diff --git a/Documentation/ABI/testing/configfs-stp-policy-p_sys-t b/Documentation/ABI/testing/configfs-stp-policy-p_sys-t new file mode 100644 index 0000000000000..b290d1c00dcff --- /dev/null +++ b/Documentation/ABI/testing/configfs-stp-policy-p_sys-t @@ -0,0 +1,41 @@ +What: /config/stp-policy/:p_sys-t.//uuid +Date: June 2018 +KernelVersion: 4.19 +Description: + UUID source identifier string, RW. + Default value is randomly generated at the mkdir time. + Data coming from trace sources that use this will be + tagged with this UUID in the MIPI SyS-T packet stream, to + allow the decoder to discern between different sources + within the same master/channel range, and identify the + higher level decoders that may be needed for each source. + +What: /config/stp-policy/:p_sys-t.//do_len +Date: June 2018 +KernelVersion: 4.19 +Description: + Include payload length in the MIPI SyS-T header, boolean. + If enabled, the SyS-T protocol encoder will include payload + length in each packet's metadata. This is normally redundant + if the underlying transport protocol supports marking message + boundaries (which STP does), so this is off by default. + +What: /config/stp-policy/:p_sys-t.//ts_interval +Date: June 2018 +KernelVersion: 4.19 +Description: + Time interval in milliseconds. Include a timestamp in the + MIPI SyS-T packet metadata, if this many milliseconds have + passed since the previous packet from this source. Zero is + the default and stands for "never send the timestamp". + +What: /config/stp-policy/:p_sys-t.//clocksync_interval +Date: June 2018 +KernelVersion: 4.19 +Description: + Time interval in milliseconds. Send a CLOCKSYNC packet if + this many milliseconds have passed since the previous + CLOCKSYNC packet from this source. Zero is the default and + stands for "never send the CLOCKSYNC". It makes sense to + use this option with sources that generate constant and/or + periodic data, like stm_heartbeat. diff --git a/Documentation/ABI/testing/sysfs-bus-mei b/Documentation/ABI/testing/sysfs-bus-mei index 6bd45346ac7e4..6b11be387ab6d 100644 --- a/Documentation/ABI/testing/sysfs-bus-mei +++ b/Documentation/ABI/testing/sysfs-bus-mei @@ -26,3 +26,31 @@ KernelVersion: 4.3 Contact: Tomas Winkler Description: Stores mei client protocol version Format: %d + +What: /sys/bus/mei/devices/.../max_conn +Date: Sep 2018 +KernelVersion: 4.19 +Contact: Tomas Winkler +Description: Stores mei client maximum number of connections + Format: %d + +What: /sys/bus/mei/devices/.../fixed +Date: Sep 2018 +KernelVersion: 4.19 +Contact: Tomas Winkler +Description: Stores mei client fixed address, if any + Format: %d + +What: /sys/bus/mei/devices/.../vtag +Date: Sep 2018 +KernelVersion: 4.19 +Contact: Tomas Winkler +Description: Stores mei client vm tag support status + Format: %d + +What: /sys/bus/mei/devices/.../max_len +Date: Sep 2018 +KernelVersion: 4.19 +Contact: Tomas Winkler +Description: Stores mei client maximum message length + Format: %d diff --git a/Documentation/ABI/testing/sysfs-class-mei b/Documentation/ABI/testing/sysfs-class-mei index 17d7444a23973..cb135822f2c03 100644 --- a/Documentation/ABI/testing/sysfs-class-mei +++ b/Documentation/ABI/testing/sysfs-class-mei @@ -65,3 +65,18 @@ Description: Display the ME firmware version. :.... There can be up to three such blocks for different FW components. + +What: /sys/class/mei/meiN/dev_state +Date: Sep 2019 +KernelVersion: 4.19 +Contact: Tomas Winkler +Description: Display the ME device state. + + The device state can have following values: + INITIALIZING + INIT_CLIENTS + ENABLED + RESETTING + DISABLED + POWER_DOWN + POWER_UP diff --git a/Documentation/ABI/testing/sysfs-class-rpmb b/Documentation/ABI/testing/sysfs-class-rpmb new file mode 100644 index 0000000000000..d0540be7db199 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-rpmb @@ -0,0 +1,57 @@ +What: /sys/class/rpmb/ +Date: Jul 2018 +KernelVersion: 4.18 +Contact: Tomas Winkler +Description: + The rpmb/ class sub-directory belongs to RPMB device class. + + Few storage technologies such is EMMC, UFS, and NVMe support + Replay Protected Memory Block (RPMB) hardware partition with + common protocol and similar frame layout. + Such a partition provides authenticated and replay protected access, + hence suitable as a secure storage. + +What: /sys/class/rpmb/rpmbN/ +Date: Jul 2018 +KernelVersion: 4.18 +Contact: Tomas Winkler +Description: + The /sys/class/rpmb/rpmbN directory is created for + each RPMB registered device. + +What: /sys/class/rpmb/rpmbN/type +Date: Jul 2018 +KernelVersion: 4.18 +Contact: Tomas Winkler +Description: + The /sys/class/rpmb/rpmbN/type file contains device + underlying storage type technology: EMMC, UFS, NVMe. + In case of simulated device it will have :SIM suffix + i.e EMMC:SIM. + +What: /sys/class/rpmb/rpmbN/id +Date: Jul 2018 +KernelVersion: 4.18 +Contact: Tomas Winkler +Description: + The /sys/class/rpmb/rpmbN/id file contains unique device id + in a binary form as defined by underlying storage device. + In case of multiple RPMB devices a user can determine correct + device. + The content can be parsed according the storage device type. + +What: /sys/class/rpmb/rpmbN/wr_cnt_max +Date: Jul 2018 +KernelVersion: 4.18 +Contact: Tomas Winkler +Description: + The /sys/class/rpmb/rpmbN/wr_cnt_max file contains + number of blocks that can be reliable written in a single request. + +What: /sys/class/rpmb/rpmbN/rd_cnt_max +Date: Jul 2018 +KernelVersion: 4.18 +Contact: Tomas Winkler +Description: + The /sys/class/rpmb/rpmbN/rd_cnt_max file contains + number of blocks that can be read in a single request. diff --git a/Documentation/admin-guide/LSM/index.rst b/Documentation/admin-guide/LSM/index.rst index c980dfe9abf17..d3d8af1740426 100644 --- a/Documentation/admin-guide/LSM/index.rst +++ b/Documentation/admin-guide/LSM/index.rst @@ -17,11 +17,16 @@ MAC extensions, other extensions can be built using the LSM to provide specific changes to system operation when these tweaks are not available in the core functionality of Linux itself. -Without a specific LSM built into the kernel, the default LSM will be the -Linux capabilities system. Most LSMs choose to extend the capabilities -system, building their checks on top of the defined capability hooks. -For more details on capabilities, see ``capabilities(7)`` in the Linux -man-pages project. +The Linux capabilities modules will always be included. For more details +on capabilities, see ``capabilities(7)`` in the Linux man-pages project. + +Security modules that do not use the security data blobs maintained +by the LSM infrastructure are considered "minor" modules. These may be +included at compile time and stacked explicitly. Security modules that +use the LSM maintained security blobs are considered "major" modules. +These may only be stacked if the CONFIG_LSM_STACKED configuration +option is used. If this is chosen all of the security modules selected +will be used. A list of the active security modules can be found by reading ``/sys/kernel/security/lsm``. This is a comma separated list, and @@ -30,6 +35,14 @@ order in which checks are made. The capability module will always be first, followed by any "minor" modules (e.g. Yama) and then the one "major" module (e.g. SELinux) if there is one configured. +Process attributes associated with "major" security modules should +be accessed and maintained using the special files in ``/proc/.../attr``. +A security module may maintain a module specific subdirectory there, +named after the module. ``/proc/.../attr/smack`` is provided by the Smack +security module and contains all its special files. The files directly +in ``/proc/.../attr`` remain as legacy interfaces for modules that provide +subdirectories. + .. toctree:: :maxdepth: 1 diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 92eb1f42240d7..c09f2b41676d2 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -741,6 +741,10 @@ Format: , See also Documentation/input/devices/joystick-parport.rst + dev_sec_info.param_addr= + [BOOT] address of automotive bootloader (abl) + security parameters. + ddebug_query= [KNL,DYNAMIC_DEBUG] Enable debug messages at early boot time. See Documentation/admin-guide/dynamic-debug-howto.rst for @@ -1063,7 +1067,7 @@ earlyprintk=serial[,0x...[,baudrate]] earlyprintk=ttySn[,baudrate] earlyprintk=dbgp[debugController#] - earlyprintk=pciserial,bus:device.function[,baudrate] + earlyprintk=pciserial[,force],bus:device.function[,baudrate] earlyprintk=xdbc[xhciController#] earlyprintk is useful when the kernel crashes before @@ -1095,6 +1099,10 @@ The sclp output can only be used on s390. + The optional "force" to "pciserial" enables use of a + PCI device even when its classcode is not of the + UART class. + edac_report= [HW,EDAC] Control how to report EDAC event Format: {"on" | "off" | "force"} on: enable EDAC to report H/W event. May be overridden @@ -2069,6 +2077,9 @@ off Disables hypervisor mitigations and doesn't emit any warnings. + It also drops the swap size and available + RAM limit restriction on both hypervisor and + bare metal. Default is 'flush'. @@ -3053,6 +3064,14 @@ timeout < 0: reboot immediately Format: + panic_print= Bitmask for printing system info when panic happens. + User can chose combination of the following bits: + bit 0: print all tasks info + bit 1: print system memory info + bit 2: print timer info + bit 3: print locks info if CONFIG_LOCKDEP is on + bit 4: print ftrace buffer + panic_on_warn panic() instead of WARN(). Useful to cause kdump on a WARN(). @@ -3899,6 +3918,9 @@ reboot_cpu is s[mp]#### with #### being the processor to be used for rebooting. + reboot_panic= [KNL] + Same as reboot parameter above but only in case of panic. + relax_domain_level= [KNL, SMP] Set scheduler's default relax_domain_level. See Documentation/cgroup-v1/cpusets.txt. @@ -4165,9 +4187,13 @@ spectre_v2= [X86] Control mitigation of Spectre variant 2 (indirect branch speculation) vulnerability. + The default operation protects the kernel from + user space attacks. - on - unconditionally enable - off - unconditionally disable + on - unconditionally enable, implies + spectre_v2_user=on + off - unconditionally disable, implies + spectre_v2_user=off auto - kernel detects whether your CPU model is vulnerable @@ -4177,6 +4203,12 @@ CONFIG_RETPOLINE configuration option, and the compiler with which the kernel was built. + Selecting 'on' will also enable the mitigation + against user space to user space task attacks. + + Selecting 'off' will disable both the kernel and + the user space protections. + Specific mitigations can also be selected manually: retpoline - replace indirect branches @@ -4186,6 +4218,48 @@ Not specifying this option is equivalent to spectre_v2=auto. + spectre_v2_user= + [X86] Control mitigation of Spectre variant 2 + (indirect branch speculation) vulnerability between + user space tasks + + on - Unconditionally enable mitigations. Is + enforced by spectre_v2=on + + off - Unconditionally disable mitigations. Is + enforced by spectre_v2=off + + prctl - Indirect branch speculation is enabled, + but mitigation can be enabled via prctl + per thread. The mitigation control state + is inherited on fork. + + prctl,ibpb + - Like "prctl" above, but only STIBP is + controlled per thread. IBPB is issued + always when switching between different user + space processes. + + seccomp + - Same as "prctl" above, but all seccomp + threads will enable the mitigation unless + they explicitly opt out. + + seccomp,ibpb + - Like "seccomp" above, but only STIBP is + controlled per thread. IBPB is issued + always when switching between different + user space processes. + + auto - Kernel selects the mitigation depending on + the available CPU features and vulnerability. + + Default mitigation: + If CONFIG_SECCOMP=y then "seccomp", otherwise "prctl" + + Not specifying this option is equivalent to + spectre_v2_user=auto. + spec_store_bypass_disable= [HW] Control Speculative Store Bypass (SSB) Disable mitigation (Speculative Store Bypass vulnerability) @@ -4683,6 +4757,8 @@ prevent spurious wakeup); n = USB_QUIRK_DELAY_CTRL_MSG (Device needs a pause after every control message); + o = USB_QUIRK_HUB_SLOW_RESET (Hub needs extra + delay after resetting its port); Example: quirks=0781:5580:bk,0a5c:5834:gij usbhid.mousepoll= diff --git a/Documentation/admin-guide/l1tf.rst b/Documentation/admin-guide/l1tf.rst index bae52b845de0b..9f5924f81f894 100644 --- a/Documentation/admin-guide/l1tf.rst +++ b/Documentation/admin-guide/l1tf.rst @@ -405,6 +405,9 @@ time with the option "l1tf=". The valid arguments for this option are: off Disables hypervisor mitigations and doesn't emit any warnings. + It also drops the swap size and available RAM limit restrictions + on both hypervisor and bare metal. + ============ ============================================================= The default is 'flush'. For details about L1D flushing see :ref:`l1d_flush`. @@ -576,7 +579,8 @@ Default mitigations The kernel default mitigations for vulnerable processors are: - PTE inversion to protect against malicious user space. This is done - unconditionally and cannot be controlled. + unconditionally and cannot be controlled. The swap storage is limited + to ~16TB. - L1D conditional flushing on VMENTER when EPT is enabled for a guest. diff --git a/Documentation/admin-guide/security-bugs.rst b/Documentation/admin-guide/security-bugs.rst index 30491d91e93d7..30187d49dc2c7 100644 --- a/Documentation/admin-guide/security-bugs.rst +++ b/Documentation/admin-guide/security-bugs.rst @@ -26,23 +26,35 @@ information is helpful. Any exploit code is very helpful and will not be released without consent from the reporter unless it has already been made public. -Disclosure ----------- - -The goal of the Linux kernel security team is to work with the bug -submitter to understand and fix the bug. We prefer to publish the fix as -soon as possible, but try to avoid public discussion of the bug itself -and leave that to others. - -Publishing the fix may be delayed when the bug or the fix is not yet -fully understood, the solution is not well-tested or for vendor -coordination. However, we expect these delays to be short, measurable in -days, not weeks or months. A release date is negotiated by the security -team working with the bug submitter as well as vendors. However, the -kernel security team holds the final say when setting a timeframe. The -timeframe varies from immediate (esp. if it's already publicly known bug) -to a few weeks. As a basic default policy, we expect report date to -release date to be on the order of 7 days. +Disclosure and embargoed information +------------------------------------ + +The security list is not a disclosure channel. For that, see Coordination +below. + +Once a robust fix has been developed, the release process starts. Fixes +for publicly known bugs are released immediately. + +Although our preference is to release fixes for publicly undisclosed bugs +as soon as they become available, this may be postponed at the request of +the reporter or an affected party for up to 7 calendar days from the start +of the release process, with an exceptional extension to 14 calendar days +if it is agreed that the criticality of the bug requires more time. The +only valid reason for deferring the publication of a fix is to accommodate +the logistics of QA and large scale rollouts which require release +coordination. + +Whilst embargoed information may be shared with trusted individuals in +order to develop a fix, such information will not be published alongside +the fix or on any other disclosure channel without the permission of the +reporter. This includes but is not limited to the original bug report +and followup discussions (if any), exploits, CVE information or the +identity of the reporter. + +In other words our only interest is in getting bugs fixed. All other +information submitted to the security list and any followup discussions +of the report are treated confidentially even after the embargo has been +lifted, in perpetuity. Coordination ------------ @@ -68,7 +80,7 @@ may delay the bug handling. If a reporter wishes to have a CVE identifier assigned ahead of public disclosure, they will need to contact the private linux-distros list, described above. When such a CVE identifier is known before a patch is provided, it is desirable to mention it in the commit -message, though. +message if the reporter agrees. Non-disclosure agreements ------------------------- diff --git a/Documentation/conf.py b/Documentation/conf.py index b691af4831fad..a57272e7820c7 100644 --- a/Documentation/conf.py +++ b/Documentation/conf.py @@ -403,6 +403,8 @@ 'The kernel development community', 'manual'), ('userspace-api/index', 'userspace-api.tex', 'The Linux kernel user-space API guide', 'The kernel development community', 'manual'), + ('rpmb/index', 'rpmb.tex', 'Linux RPMB Subsystem Documentation', + 'The kernel development community', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of diff --git a/Documentation/devicetree/bindings/net/can/holt_hi311x.txt b/Documentation/devicetree/bindings/net/can/holt_hi311x.txt index 903a78da65be2..3a9926f999370 100644 --- a/Documentation/devicetree/bindings/net/can/holt_hi311x.txt +++ b/Documentation/devicetree/bindings/net/can/holt_hi311x.txt @@ -17,7 +17,7 @@ Example: reg = <1>; clocks = <&clk32m>; interrupt-parent = <&gpio4>; - interrupts = <13 IRQ_TYPE_EDGE_RISING>; + interrupts = <13 IRQ_TYPE_LEVEL_HIGH>; vdd-supply = <®5v0>; xceiver-supply = <®5v0>; }; diff --git a/Documentation/devicetree/bindings/trusty/trusty-fiq-debugger.txt b/Documentation/devicetree/bindings/trusty/trusty-fiq-debugger.txt new file mode 100644 index 0000000000000..18329d39487eb --- /dev/null +++ b/Documentation/devicetree/bindings/trusty/trusty-fiq-debugger.txt @@ -0,0 +1,8 @@ +Trusty fiq debugger interface + +Provides a single fiq for the fiq debugger. + +Required properties: +- compatible: compatible = "android,trusty-fiq-v1-*"; where * is a serial port. + +Must be a child of the node that provides fiq support ("android,trusty-fiq-v1"). diff --git a/Documentation/devicetree/bindings/trusty/trusty-fiq.txt b/Documentation/devicetree/bindings/trusty/trusty-fiq.txt new file mode 100644 index 0000000000000..de810b955bc93 --- /dev/null +++ b/Documentation/devicetree/bindings/trusty/trusty-fiq.txt @@ -0,0 +1,8 @@ +Trusty fiq interface + +Trusty provides fiq emulation. + +Required properties: +- compatible: "android,trusty-fiq-v1" + +Must be a child of the node that provides the trusty std/fast call interface. diff --git a/Documentation/devicetree/bindings/trusty/trusty-irq.txt b/Documentation/devicetree/bindings/trusty/trusty-irq.txt new file mode 100644 index 0000000000000..5aefeb8e536fa --- /dev/null +++ b/Documentation/devicetree/bindings/trusty/trusty-irq.txt @@ -0,0 +1,67 @@ +Trusty irq interface + +Trusty requires non-secure irqs to be forwarded to the secure OS. + +Required properties: +- compatible: "android,trusty-irq-v1" + +Optional properties: + +- interrupt-templates: is an optional property that works together + with "interrupt-ranges" to specify secure side to kernel IRQs mapping. + + It is a list of entries, each one of which defines a group of interrupts + having common properties, and has the following format: + < phandle irq_id_pos [templ_data]> + phandle - phandle of interrupt controller this template is for + irq_id_pos - the position of irq id in interrupt specifier array + for interrupt controller referenced by phandle. + templ_data - is an array of u32 values (could be empty) in the same + format as interrupt specifier for interrupt controller + referenced by phandle but with omitted irq id field. + +- interrupt-ranges: list of entries that specifies secure side to kernel + IRQs mapping. + + Each entry in the "interrupt-ranges" list has the following format: + + beg - first entry in this range + end - last entry in this range + templ_idx - index of entry in "interrupt-templates" property + that must be used as a template for all interrupts + in this range + +Example: +{ + gic: interrupt-controller@50041000 { + compatible = "arm,gic-400"; + #interrupt-cells = <3>; + interrupt-controller; + ... + }; + ... + IPI: interrupt-controller { + compatible = "android,CustomIPI"; + #interrupt-cells = <1>; + interrupt-controller; + }; + ... + trusty { + compatible = "android,trusty-smc-v1"; + ranges; + #address-cells = <2>; + #size-cells = <2>; + + irq { + compatible = "android,trusty-irq-v1"; + interrupt-templates = <&IPI 0>, + <&gic 1 GIC_PPI 0>, + <&gic 1 GIC_SPI 0>; + interrupt-ranges = < 0 15 0>, + <16 31 1>, + <32 223 2>; + }; + } +} + +Must be a child of the node that provides the trusty std/fast call interface. diff --git a/Documentation/devicetree/bindings/trusty/trusty-smc.txt b/Documentation/devicetree/bindings/trusty/trusty-smc.txt new file mode 100644 index 0000000000000..1b39ad317c678 --- /dev/null +++ b/Documentation/devicetree/bindings/trusty/trusty-smc.txt @@ -0,0 +1,6 @@ +Trusty smc interface + +Trusty is running in secure mode on the same (arm) cpu(s) as the current os. + +Required properties: +- compatible: "android,trusty-smc-v1" diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst index 6d9f2f9fe20ee..d602f6c05972a 100644 --- a/Documentation/driver-api/index.rst +++ b/Documentation/driver-api/index.rst @@ -53,6 +53,7 @@ available subsections can be seen below. slimbus soundwire/index fpga/index + rpmb/index .. only:: subproject and html diff --git a/Documentation/driver-api/rpmb/conf.py b/Documentation/driver-api/rpmb/conf.py new file mode 100644 index 0000000000000..15430a0b3a089 --- /dev/null +++ b/Documentation/driver-api/rpmb/conf.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8; mode: python -*- + +project = "Linux RPMB Subsystem" + +tags.add("subproject") diff --git a/Documentation/driver-api/rpmb/index.rst b/Documentation/driver-api/rpmb/index.rst new file mode 100644 index 0000000000000..3813a44ad06e7 --- /dev/null +++ b/Documentation/driver-api/rpmb/index.rst @@ -0,0 +1,18 @@ +.. SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + +============================================== +Replay Protected Memory Block (RPMB) subsystem +============================================== + +.. toctree:: + + introduction + simulation-device.rst + rpmb-tool.rst + +.. only:: subproject + + Indices + ======= + + * :ref:`genindex` diff --git a/Documentation/driver-api/rpmb/introduction.rst b/Documentation/driver-api/rpmb/introduction.rst new file mode 100644 index 0000000000000..403cbcf6e142c --- /dev/null +++ b/Documentation/driver-api/rpmb/introduction.rst @@ -0,0 +1,98 @@ +.. SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + +============= +Introduction: +============= + +Few storage technologies such is EMMC, UFS, and NVMe support RPMB +hardware partition with common protocol and frame layout. +The RPMB partition `cannot` be accessed via standard block layer, +but by a set of specific commands: + +WRITE, READ, GET_WRITE_COUNTER, and PROGRAM_KEY. + +The commands and the data are embedded within :c:type:`rpmb_frame `. + +An RPMB partition provides authenticated and replay protected access, +hence it is suitable as a secure storage. + +In-kernel API +------------- +The RPMB layer aims to provide in-kernel API for Trusted Execution +Environment (TEE) devices that are capable to securely compute the block +frame signature. In case a TEE device wish to store a replay protected +data, it creates an RPMB frame with requested data and computes HMAC of +the frame, then it requests the storage device via RPMB layer to store +the data. + +The layer provides APIs, for :c:func:`rpmb_seq_cmd()` for issuing sequence +of raw RPMB protocol frames, which is close to the functionality provided +by emmc multi ioctl interface. + +.. c:function:: int rpmb_cmd_seq(struct rpmb_dev *rdev, u8 target, struct rpmb_cmd *cmds, u32 ncmds); + + +A TEE driver can claim the RPMB interface, for example, via +:c:func:`class_interface_register`: + +.. code-block:: c + + struct class_interface tee_rpmb_intf = { + .class = &rpmb_class; + .add_dev = rpmb_add_device; + .remove_dev = rpmb_remove_device; + } + class_interface_register(&tee_rpmb_intf); + + +RPMB device registeration +---------------------------- + +A storage device registers its RPMB hardware (eMMC) partition or RPMB +W-LUN (UFS) with the RPMB layer :c:func:`rpmb_dev_register` providing +an implementation for :c:func:`rpmb_seq_cmd()` handler. The interface +enables sending sequence of RPMB standard frames. + +.. code-block:: c + + struct rpmb_ops mmc_rpmb_dev_ops = { + .cmd_seq = mmc_blk_rpmb_cmd_seq, + .type = RPMB_TYPE_EMMC, + ... + } + rpmb_dev_register(disk_to_dev(part_md->disk), &mmc_rpmb_dev_ops); + + +User space API +-------------- + +A parallel user space API is provided via /dev/rpmbX character +device with two IOCTL commands. +- First ``RPMB_IOC_VER_CMD``, return driver protocol version, +- second ``RPMB_IOC_CAP_CMD`` return capability structure, +- last ``RPMB_IOC_SEQ_CMD`` where the whole RPMB sequence, and + including ``RESULT_READ`` is supplied by the caller. +https://android.googlesource.com/trusty/app/storage/ + +.. code-block:: c + + struct rpmb_ioc_req_cmd ireq; + int ret; + + ireq.req_type = RPMB_WRITE_DATA; + rpmb_ioc_cmd_set(ireq.icmd, RPMB_F_WRITE, frames_in, cnt_in); + rpmb_ioc_cmd_set(ireq.ocmd, 0, frames_out, cnt_out); + + ret = ioctl(fd, RPMB_IOC_REQ_CMD, &ireq); + + +API +--- +.. kernel-doc:: include/linux/rpmb.h + +.. kernel-doc:: drivers/char/rpmb/core.c + +.. kernel-doc:: include/uapi/linux/rpmb.h + +.. kernel-doc:: drivers/char/rpmb/cdev.c + diff --git a/Documentation/driver-api/rpmb/rpmb-tool.rst b/Documentation/driver-api/rpmb/rpmb-tool.rst new file mode 100644 index 0000000000000..3f4eed84542a6 --- /dev/null +++ b/Documentation/driver-api/rpmb/rpmb-tool.rst @@ -0,0 +1,19 @@ +.. SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +========== +RPMB Tool +========== + +There is a sample rpmb tool under tools/rpmb/ directory that exercises +the RPMB devices via RPMB character devices interface (/dev/rpmbX) + +.. code-block:: none + + rpmb [-v] [-r|-s] + + rpmb get-info + rpmb program-key + rpmb write-counter [KEY_FILE] + rpmb write-blocks
+ rpmb read-blocks
[KEY_FILE] + + rpmb -v/--verbose: runs in verbose mode diff --git a/Documentation/driver-api/rpmb/simulation-device.rst b/Documentation/driver-api/rpmb/simulation-device.rst new file mode 100644 index 0000000000000..21b7bc8bc39d8 --- /dev/null +++ b/Documentation/driver-api/rpmb/simulation-device.rst @@ -0,0 +1,21 @@ +.. SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + +====================== +RPMB Simulation Device +====================== + +RPMB partition simulation device is a virtual device that +provides simulation of the RPMB protocol and uses kernel memory +as storage. + +This driver cannot promise any real security, it is suitable for testing +of the RPMB subsystem it self and mostly it was found useful for testing of +RPMB applications prior to RPMB key provisioning/programming as +The RPMB key programming can be performed only once in the life time +of the storage device. + +Implementation: +--------------- + +.. kernel-doc:: drivers/char/rpmb/rpmb_sim.c + diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst index 48b424de85bbc..cfbc18f0d9c98 100644 --- a/Documentation/filesystems/fscrypt.rst +++ b/Documentation/filesystems/fscrypt.rst @@ -191,21 +191,11 @@ Currently, the following pairs of encryption modes are supported: - AES-256-XTS for contents and AES-256-CTS-CBC for filenames - AES-128-CBC for contents and AES-128-CTS-CBC for filenames -- Speck128/256-XTS for contents and Speck128/256-CTS-CBC for filenames It is strongly recommended to use AES-256-XTS for contents encryption. AES-128-CBC was added only for low-powered embedded devices with crypto accelerators such as CAAM or CESA that do not support XTS. -Similarly, Speck128/256 support was only added for older or low-end -CPUs which cannot do AES fast enough -- especially ARM CPUs which have -NEON instructions but not the Cryptography Extensions -- and for which -it would not otherwise be feasible to use encryption at all. It is -not recommended to use Speck on CPUs that have AES instructions. -Speck support is only available if it has been enabled in the crypto -API via CONFIG_CRYPTO_SPECK. Also, on ARM platforms, to get -acceptable performance CONFIG_CRYPTO_SPECK_NEON must be enabled. - New encryption modes can be added relatively easily, without changes to individual filesystems. However, authenticated encryption (AE) modes are not currently supported because of the difficulty of dealing diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt index 51c136c821bfb..eef7d9d259e85 100644 --- a/Documentation/filesystems/overlayfs.txt +++ b/Documentation/filesystems/overlayfs.txt @@ -286,6 +286,12 @@ pointed by REDIRECT. This should not be possible on local system as setting "trusted." xattrs will require CAP_SYS_ADMIN. But it should be possible for untrusted layers like from a pen drive. +Note: redirect_dir={off|nofollow|follow(*)} conflicts with metacopy=on, and +results in an error. + +(*) redirect_dir=follow only conflicts with metacopy=on if upperdir=... is +given. + Sharing and copying layers -------------------------- diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index 22b4b00dee312..06ac6dda9b345 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -496,7 +496,9 @@ manner. The codes are the following: Note that there is no guarantee that every flag and associated mnemonic will be present in all further kernel releases. Things get changed, the flags may -be vanished or the reverse -- new added. +be vanished or the reverse -- new added. Interpretation of their meaning +might change in future as well. So each consumer of these flags has to +follow each specific kernel version for the exact semantic. This file is only present if the CONFIG_MMU kernel configuration option is enabled. diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt index 13a7c999c04ab..ae2f081324f4c 100644 --- a/Documentation/ioctl/ioctl-number.txt +++ b/Documentation/ioctl/ioctl-number.txt @@ -324,6 +324,7 @@ Code Seq#(hex) Include File Comments 0xB3 00 linux/mmc/ioctl.h 0xB4 00-0F linux/gpio.h 0xB5 00-0F uapi/linux/rpmsg.h +0xB5 80-8F linux/uapi/linux/rpmb.h 0xB6 all linux/fpga-dfl.h 0xC0 00-0F linux/usb/iowarrior.h 0xCA 00-0F uapi/misc/cxl.h diff --git a/Documentation/media/uapi/cec/cec-ioc-receive.rst b/Documentation/media/uapi/cec/cec-ioc-receive.rst index e964074cd15b7..b25e48afaa087 100644 --- a/Documentation/media/uapi/cec/cec-ioc-receive.rst +++ b/Documentation/media/uapi/cec/cec-ioc-receive.rst @@ -16,10 +16,10 @@ CEC_RECEIVE, CEC_TRANSMIT - Receive or transmit a CEC message Synopsis ======== -.. c:function:: int ioctl( int fd, CEC_RECEIVE, struct cec_msg *argp ) +.. c:function:: int ioctl( int fd, CEC_RECEIVE, struct cec_msg \*argp ) :name: CEC_RECEIVE -.. c:function:: int ioctl( int fd, CEC_TRANSMIT, struct cec_msg *argp ) +.. c:function:: int ioctl( int fd, CEC_TRANSMIT, struct cec_msg \*argp ) :name: CEC_TRANSMIT Arguments @@ -272,6 +272,19 @@ View On' messages from initiator 0xf ('Unregistered') to destination 0 ('TV'). - The transmit failed after one or more retries. This status bit is mutually exclusive with :ref:`CEC_TX_STATUS_OK `. Other bits can still be set to explain which failures were seen. + * .. _`CEC-TX-STATUS-ABORTED`: + + - ``CEC_TX_STATUS_ABORTED`` + - 0x40 + - The transmit was aborted due to an HDMI disconnect, or the adapter + was unconfigured, or a transmit was interrupted, or the driver + returned an error when attempting to start a transmit. + * .. _`CEC-TX-STATUS-TIMEOUT`: + + - ``CEC_TX_STATUS_TIMEOUT`` + - 0x80 + - The transmit timed out. This should not normally happen and this + indicates a driver problem. .. tabularcolumns:: |p{5.6cm}|p{0.9cm}|p{11.0cm}| @@ -300,6 +313,14 @@ View On' messages from initiator 0xf ('Unregistered') to destination 0 ('TV'). - The message was received successfully but the reply was ``CEC_MSG_FEATURE_ABORT``. This status is only set if this message was the reply to an earlier transmitted message. + * .. _`CEC-RX-STATUS-ABORTED`: + + - ``CEC_RX_STATUS_ABORTED`` + - 0x08 + - The wait for a reply to an earlier transmitted message was aborted + because the HDMI cable was disconnected, the adapter was unconfigured + or the :ref:`CEC_TRANSMIT ` that waited for a + reply was interrupted. diff --git a/Documentation/media/uapi/v4l/biblio.rst b/Documentation/media/uapi/v4l/biblio.rst index 1cedcfc043273..386d6cf83e9ca 100644 --- a/Documentation/media/uapi/v4l/biblio.rst +++ b/Documentation/media/uapi/v4l/biblio.rst @@ -226,16 +226,6 @@ xvYCC :author: International Electrotechnical Commission (http://www.iec.ch) -.. _adobergb: - -AdobeRGB -======== - - -:title: Adobe© RGB (1998) Color Image Encoding Version 2005-05 - -:author: Adobe Systems Incorporated (http://www.adobe.com) - .. _oprgb: opRGB diff --git a/Documentation/media/uapi/v4l/colorspaces-defs.rst b/Documentation/media/uapi/v4l/colorspaces-defs.rst index 410907fe9415e..f24615544792b 100644 --- a/Documentation/media/uapi/v4l/colorspaces-defs.rst +++ b/Documentation/media/uapi/v4l/colorspaces-defs.rst @@ -51,8 +51,8 @@ whole range, 0-255, dividing the angular value by 1.41. The enum - See :ref:`col-rec709`. * - ``V4L2_COLORSPACE_SRGB`` - See :ref:`col-srgb`. - * - ``V4L2_COLORSPACE_ADOBERGB`` - - See :ref:`col-adobergb`. + * - ``V4L2_COLORSPACE_OPRGB`` + - See :ref:`col-oprgb`. * - ``V4L2_COLORSPACE_BT2020`` - See :ref:`col-bt2020`. * - ``V4L2_COLORSPACE_DCI_P3`` @@ -90,8 +90,8 @@ whole range, 0-255, dividing the angular value by 1.41. The enum - Use the Rec. 709 transfer function. * - ``V4L2_XFER_FUNC_SRGB`` - Use the sRGB transfer function. - * - ``V4L2_XFER_FUNC_ADOBERGB`` - - Use the AdobeRGB transfer function. + * - ``V4L2_XFER_FUNC_OPRGB`` + - Use the opRGB transfer function. * - ``V4L2_XFER_FUNC_SMPTE240M`` - Use the SMPTE 240M transfer function. * - ``V4L2_XFER_FUNC_NONE`` diff --git a/Documentation/media/uapi/v4l/colorspaces-details.rst b/Documentation/media/uapi/v4l/colorspaces-details.rst index b5d551b9cc8f8..09fabf4cd4126 100644 --- a/Documentation/media/uapi/v4l/colorspaces-details.rst +++ b/Documentation/media/uapi/v4l/colorspaces-details.rst @@ -290,15 +290,14 @@ Y' is clamped to the range [0…1] and Cb and Cr are clamped to the range 170M/BT.601. The Y'CbCr quantization is limited range. -.. _col-adobergb: +.. _col-oprgb: -Colorspace Adobe RGB (V4L2_COLORSPACE_ADOBERGB) +Colorspace opRGB (V4L2_COLORSPACE_OPRGB) =============================================== -The :ref:`adobergb` standard defines the colorspace used by computer -graphics that use the AdobeRGB colorspace. This is also known as the -:ref:`oprgb` standard. The default transfer function is -``V4L2_XFER_FUNC_ADOBERGB``. The default Y'CbCr encoding is +The :ref:`oprgb` standard defines the colorspace used by computer +graphics that use the opRGB colorspace. The default transfer function is +``V4L2_XFER_FUNC_OPRGB``. The default Y'CbCr encoding is ``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is limited range. @@ -312,7 +311,7 @@ The chromaticities of the primary colors and the white reference are: .. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}| -.. flat-table:: Adobe RGB Chromaticities +.. flat-table:: opRGB Chromaticities :header-rows: 1 :stub-columns: 0 :widths: 1 1 2 diff --git a/Documentation/media/videodev2.h.rst.exceptions b/Documentation/media/videodev2.h.rst.exceptions index ca9f0edc579e6..e420a39f1ebfe 100644 --- a/Documentation/media/videodev2.h.rst.exceptions +++ b/Documentation/media/videodev2.h.rst.exceptions @@ -56,7 +56,8 @@ replace symbol V4L2_MEMORY_USERPTR :c:type:`v4l2_memory` # Documented enum v4l2_colorspace replace symbol V4L2_COLORSPACE_470_SYSTEM_BG :c:type:`v4l2_colorspace` replace symbol V4L2_COLORSPACE_470_SYSTEM_M :c:type:`v4l2_colorspace` -replace symbol V4L2_COLORSPACE_ADOBERGB :c:type:`v4l2_colorspace` +replace symbol V4L2_COLORSPACE_OPRGB :c:type:`v4l2_colorspace` +replace define V4L2_COLORSPACE_ADOBERGB :c:type:`v4l2_colorspace` replace symbol V4L2_COLORSPACE_BT2020 :c:type:`v4l2_colorspace` replace symbol V4L2_COLORSPACE_DCI_P3 :c:type:`v4l2_colorspace` replace symbol V4L2_COLORSPACE_DEFAULT :c:type:`v4l2_colorspace` @@ -69,7 +70,8 @@ replace symbol V4L2_COLORSPACE_SRGB :c:type:`v4l2_colorspace` # Documented enum v4l2_xfer_func replace symbol V4L2_XFER_FUNC_709 :c:type:`v4l2_xfer_func` -replace symbol V4L2_XFER_FUNC_ADOBERGB :c:type:`v4l2_xfer_func` +replace symbol V4L2_XFER_FUNC_OPRGB :c:type:`v4l2_xfer_func` +replace define V4L2_XFER_FUNC_ADOBERGB :c:type:`v4l2_xfer_func` replace symbol V4L2_XFER_FUNC_DCI_P3 :c:type:`v4l2_xfer_func` replace symbol V4L2_XFER_FUNC_DEFAULT :c:type:`v4l2_xfer_func` replace symbol V4L2_XFER_FUNC_NONE :c:type:`v4l2_xfer_func` diff --git a/Documentation/misc-devices/mei/dal/dal.rst b/Documentation/misc-devices/mei/dal/dal.rst new file mode 100644 index 0000000000000..53a7a9ec76721 --- /dev/null +++ b/Documentation/misc-devices/mei/dal/dal.rst @@ -0,0 +1,149 @@ +.. SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + +Intel(R) Dynamic Application Loader (Intel(R) DAL) +=================================================== + +Introduction +============= + +The Intel (R) Dynamic Application Loader (Intel (R) DAL) is a +Trusted Execution Environment (TEE) which, as a part of the +Converged Security Engine (CSE) firmware, enables users to directly +access and run small portions of their code on part of the system's +root of trust. + +--- + +Onto this firmware, the DAL user installs a small Java applet, +called a Trusted Application (TA), or an applet. From the host +application that runs on the device's operating system, the below +interfaces are used to interact with the applet for running a small +function that needs to be run in a secure environment, outside of the +operating system. + +DAL exposes two interfaces to the operating system, which serve as +the communication channels between trusted applications and host based +applications. +One from the user space, called Intel (R) DAL Host Interface, or JHI, +need one from kernel space, called Intel (R) Management Engine Interface +and Dynamic Application Loader (Intel (R) MEI DAL), or KDI. +Only user space applications can install and uninstall TAs. Both kernel +and user space applications can communicate with installed TAs. + + +Intel(R) MEI DAL Linux Kernel Driver +===================================== +The Intel(R) Management Engine Interface and Dynamic Application Loader +(Intel(R) MEI DAL) is a kernel component that provides both user space +and kernel space communication interfaces with the DAL client in the +CSE firmware, enabling the direct usage of DAL by Linux kernel +components. + +User Space Interface +--------------------- +DAL runs 3 processes: + * DAL Security Domains Manager (DAL SDM) + - manages the applets and security domains life cycles + * DAL Intel Virtual Machine (DAL IVM) + - the VM that runs the applets byte code + * DAL Launcher + - A place holder for future second VM and native applets + support. +For each one of them, the driver exposes a char device +called /dev/dal{i}, while i is 0-2 respectively. + +The user space interface serves as a transfer only channel between user +space applications and DAL FW; it allows sending raw messages from +user space to a DAL FW process, without any processing or modification +of the message data, and receiving back the raw messages which was +received from DAL FW. The messages are sent using the char device +'write' function, and received using the 'read' function in accordance. +Usually this interface is used by the JHI (for more information about +JHI search dynamic-application-loader-host-interface in github). + +Kernel Space Interface +----------------------- +The driver exposes API in file, to allow kernel space +clients communicating with Intel DAL. + +Below are the exposed APIs. + +dal_create_session - creates a session to an installed trusted + application. + Arguments: + session_handle: output param to hold the session handle + ta_id: trusted application (TA) id + acp_pkg ACP file of the TA + acp_pkg_len: ACP file length + init_param: init parameters to the session (optional) + init_param_len: length of the init parameters + + Returns: + 0 on success + <0 on system failure + >0 on DAL FW failure + +dal_send_and_receive - sends and receives data to and from + trusted application + Arguments: + session_handle: session handle + command_id: command id + input: message to be sent + input_len: sent message size + output: An output parameter to hold a pointer + to the buffer which will contain the received + message. + This buffer is allocated by the driver and freed + by the user + output_len: An input and output parameter- + - input: the expected maximum length + of the received message. + - output: size of the received message + response_code: An output parameter to hold the return + value from the applet + + Returns: + 0 on success + <0 on system failure + >0 on DAL FW failure + +dal_close_session - closes a session with trusted application + Arguments: + session_handle: session handle + + Returns: + 0 on success + <0 on system failure + >0 on DAL FW failure + +dal_set_ta_exclusive_access - sets client to be owner of the TA, + so no one else (especially user space client) will be able + to open a session to it + Arguments: + ta_id: trusted application (TA) id + + Return: + 0 on success + -ENODEV when the device can't be found + -ENOMEM on memory allocation failure + -EPERM when TA is owned by another client + -EEXIST when TA is already owned by current client + +dal_unset_ta_exclusive_access - unsets client from owning TA + Arguments: + ta_id: trusted application (TA) id + + Return: + 0 on success + -ENODEV when the device can't be found + -ENOENT when TA wassn't found in exclusiveness TAs list + -EPERM when TA is owned by another client + +dal_get_version_info - return DAL driver version + Arguments: + version_info: output param to hold DAL driver version + information. + + Return: + 0 on success + -EINVAL on incorrect input diff --git a/Documentation/trace/stm.rst b/Documentation/trace/stm.rst index 2c22ddb7fd3ef..99f99963e5e75 100644 --- a/Documentation/trace/stm.rst +++ b/Documentation/trace/stm.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: GPL-2.0 + =================== System Trace Module =================== @@ -53,12 +55,30 @@ under "user" directory from the example above and this new rule will be used for trace sources with the id string of "user/dummy". Trace sources have to open the stm class device's node and write their -trace data into its file descriptor. In order to identify themselves -to the policy, they need to do a STP_POLICY_ID_SET ioctl on this file -descriptor providing their id string. Otherwise, they will be -automatically allocated a master/channel pair upon first write to this -file descriptor according to the "default" rule of the policy, if such -exists. +trace data into its file descriptor. + +In order to find an appropriate policy node for a given trace source, +several mechanisms can be used. First, a trace source can explicitly +identify itself by calling an STP_POLICY_ID_SET ioctl on the character +device's file descriptor, providing their id string, before they write +any data there. Secondly, if they chose not to perform the explicit +identification (because you may not want to patch existing software +to do this), they can just start writing the data, at which point the +stm core will try to find a policy node with the name matching the +task's name (e.g., "syslogd") and if one exists, it will be used. +Thirdly, if the task name can't be found among the policy nodes, the +catch-all entry "default" will be used, if it exists. This entry also +needs to be created and configured by the system administrator or +whatever tools are taking care of the policy configuration. Finally, +if all the above steps failed, the write() to an stm file descriptor +will return a error (EINVAL). + +Previously, if no policy nodes were found for a trace source, the stm +class would silently fall back to allocating the first available +contiguous range of master/channels from the beginning of the device's +master/channel range. The new requirement for a policy node to exist +will help programmers and sysadmins identify gaps in configuration +and have better control over the un-identified sources. Some STM devices may allow direct mapping of the channel mmio regions to userspace for zero-copy writing. One mappable page (in terms of @@ -92,9 +112,9 @@ allocated for the device according to the policy configuration. If there's a node in the root of the policy directory that matches the stm_source device's name (for example, "console"), this node will be used to allocate master and channel numbers. If there's no such policy -node, the stm core will pick the first contiguous chunk of channels -within the first available master. Note that the node must exist -before the stm_source device is connected to its stm device. +node, the stm core will use the catch-all entry "default", if one +exists. If neither policy nodes exist, the write() to stm_source_link +will return an error. stm_console =========== diff --git a/Documentation/trace/sys-t.rst b/Documentation/trace/sys-t.rst new file mode 100644 index 0000000000000..3d8eb92735e9c --- /dev/null +++ b/Documentation/trace/sys-t.rst @@ -0,0 +1,62 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=================== +MIPI SyS-T over STP +=================== + +The MIPI SyS-T protocol driver can be used with STM class devices to +generate standardized trace stream. Aside from being a standard, it +provides better trace source identification and timestamp correlation. + +In order to use the MIPI SyS-T protocol driver with your STM device, +first, you'll need CONFIG_STM_PROTO_SYS_T. + +Now, you can select which protocol driver you want to use when you create +a policy for your STM device, by specifying it in the policy name: + +# mkdir /config/stp-policy/dummy_stm.0:p_sys-t.my-policy/ + +In other words, the policy name format is extended like this: + + :. + +With Intel TH, therefore it can look like "0-sth:p_sys-t.my-policy". + +If the protocol name is omitted, the STM class will chose whichever +protocol driver was loaded first. + +You can also double check that everything is working as expected by + +# cat /config/stp-policy/dummy_stm.0:p_sys-t.my-policy/protocol +p_sys-t + +Now, with the MIPI SyS-T protocol driver, each policy node in the +configfs gets a few additional attributes, which determine per-source +parameters specific to the protocol: + +# mkdir /config/stp-policy/dummy_stm.0:p_sys-t.my-policy/default +# ls /config/stp-policy/dummy_stm.0:p_sys-t.my-policy/default +channels +clocksync_interval +do_len +masters +ts_interval +uuid + +The most important one here is the "uuid", which determines the UUID +that will be used to tag all data coming from this source. It is +automatically generated when a new node is created, but it is likely +that you would want to change it. + +do_len switches on/off the additional "payload length" field in the +MIPI SyS-T message header. It is off by default as the STP already +marks message boundaries. + +ts_interval and clocksync_interval determine how much time in milliseconds +can pass before we need to include a protocol (not transport, aka STP) +timestamp in a message header or send a CLOCKSYNC packet, respectively. + +See Documentation/ABI/testing/configfs-stp-policy-p_sys-t for more +details. + +* [1] https://www.mipi.org/specifications/sys-t diff --git a/Documentation/userspace-api/spec_ctrl.rst b/Documentation/userspace-api/spec_ctrl.rst index 32f3d55c54b75..c4dbe6f7cdae8 100644 --- a/Documentation/userspace-api/spec_ctrl.rst +++ b/Documentation/userspace-api/spec_ctrl.rst @@ -92,3 +92,12 @@ Speculation misfeature controls * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0); * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0); * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0); + +- PR_SPEC_INDIR_BRANCH: Indirect Branch Speculation in User Processes + (Mitigate Spectre V2 style attacks against user processes) + + Invocations: + * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, 0, 0, 0); + * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_ENABLE, 0, 0); + * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_DISABLE, 0, 0); + * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_FORCE_DISABLE, 0, 0); diff --git a/Documentation/virtual/00-INDEX b/Documentation/virtual/00-INDEX index af0d23968ee71..257aec22dbff0 100644 --- a/Documentation/virtual/00-INDEX +++ b/Documentation/virtual/00-INDEX @@ -9,3 +9,6 @@ kvm/ - Kernel Virtual Machine. See also http://linux-kvm.org uml/ - User Mode Linux, builds/runs Linux kernel as a userspace program. + +acrn/ + - ACRN Project. See also http://github.com/projectacrn/ diff --git a/Documentation/virtual/acrn/00-INDEX b/Documentation/virtual/acrn/00-INDEX new file mode 100644 index 0000000000000..5beb50eef9e1c --- /dev/null +++ b/Documentation/virtual/acrn/00-INDEX @@ -0,0 +1,8 @@ +00-INDEX + - this file. +index.rst + - Index. +vhm.rst + - virtio and hypervisor service module (VHM) APIs. +vbs.rst + - virtio and backend service (VBS) APIs. diff --git a/Documentation/virtual/acrn/conf.py b/Documentation/virtual/acrn/conf.py new file mode 100644 index 0000000000000..ed247df22700c --- /dev/null +++ b/Documentation/virtual/acrn/conf.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8; mode: python -*- + +project = "ACRN Project" + +tags.add("subproject") diff --git a/Documentation/virtual/acrn/index.rst b/Documentation/virtual/acrn/index.rst new file mode 100644 index 0000000000000..3630d4fe32076 --- /dev/null +++ b/Documentation/virtual/acrn/index.rst @@ -0,0 +1,17 @@ +.. -*- coding: utf-8; mode: rst -*- + +============================= +ACRN Project +============================= + +.. toctree:: + + vbs.rst + vhm.rst + +.. only:: subproject + + Indices + ======= + + * :ref:`genindex` diff --git a/Documentation/virtual/acrn/vbs.rst b/Documentation/virtual/acrn/vbs.rst new file mode 100644 index 0000000000000..40a0683a1c0b9 --- /dev/null +++ b/Documentation/virtual/acrn/vbs.rst @@ -0,0 +1,20 @@ +================================ +Virtio and Backend Service (VBS) +================================ + +The Virtio and Backend Service (VBS) in part of ACRN Project. + +The VBS can be further divided into two parts: VBS in user space (VBS-U) +and VBS in kernel space (VBS-K). + +Example: +-------- +A reference driver for VBS-K can be found at :c:type:`struct vbs_rng`. + +.. kernel-doc:: drivers/vbs/vbs_rng.c + +APIs: +----- + +.. kernel-doc:: include/linux/vbs/vbs.h +.. kernel-doc:: include/linux/vbs/vq.h diff --git a/Documentation/virtual/acrn/vhm.rst b/Documentation/virtual/acrn/vhm.rst new file mode 100644 index 0000000000000..901cff492e2ba --- /dev/null +++ b/Documentation/virtual/acrn/vhm.rst @@ -0,0 +1,13 @@ +================================== +Virtio and Hypervisor Module (VHM) +================================== + +The Virtio and Hypervisor service Module (VHM) in part of ACRN Project. + +APIs: +----- + +.. kernel-doc:: include/linux/vhm/acrn_vhm_ioreq.h +.. kernel-doc:: include/linux/vhm/acrn_vhm_mm.h +.. kernel-doc:: include/linux/vhm/vhm_ioctl_defs.h +.. kernel-doc:: include/linux/vhm/vhm_vm_mngt.h diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt index 5432a96d31ffd..05ef53d83a41e 100644 --- a/Documentation/x86/x86_64/mm.txt +++ b/Documentation/x86/x86_64/mm.txt @@ -4,8 +4,9 @@ Virtual memory map with 4 level page tables: 0000000000000000 - 00007fffffffffff (=47 bits) user space, different per mm hole caused by [47:63] sign extension ffff800000000000 - ffff87ffffffffff (=43 bits) guard hole, reserved for hypervisor -ffff880000000000 - ffffc7ffffffffff (=64 TB) direct mapping of all phys. memory -ffffc80000000000 - ffffc8ffffffffff (=40 bits) hole +ffff880000000000 - ffff887fffffffff (=39 bits) LDT remap for PTI +ffff888000000000 - ffffc87fffffffff (=64 TB) direct mapping of all phys. memory +ffffc88000000000 - ffffc8ffffffffff (=39 bits) hole ffffc90000000000 - ffffe8ffffffffff (=45 bits) vmalloc/ioremap space ffffe90000000000 - ffffe9ffffffffff (=40 bits) hole ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB) @@ -30,8 +31,9 @@ Virtual memory map with 5 level page tables: 0000000000000000 - 00ffffffffffffff (=56 bits) user space, different per mm hole caused by [56:63] sign extension ff00000000000000 - ff0fffffffffffff (=52 bits) guard hole, reserved for hypervisor -ff10000000000000 - ff8fffffffffffff (=55 bits) direct mapping of all phys. memory -ff90000000000000 - ff9fffffffffffff (=52 bits) LDT remap for PTI +ff10000000000000 - ff10ffffffffffff (=48 bits) LDT remap for PTI +ff11000000000000 - ff90ffffffffffff (=55 bits) direct mapping of all phys. memory +ff91000000000000 - ff9fffffffffffff (=3840 TB) hole ffa0000000000000 - ffd1ffffffffffff (=54 bits) vmalloc/ioremap space (12800 TB) ffd2000000000000 - ffd3ffffffffffff (=49 bits) hole ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB) diff --git a/MAINTAINERS b/MAINTAINERS index b2f710eee67a7..334052824d9eb 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12507,6 +12507,11 @@ L: linux-serial@vger.kernel.org S: Odd Fixes F: drivers/tty/serial/rp2.* +CARRIER BOARD COMMUNICAION DRIVER +M: G Jaya Kumaran, Vineetha +S: Supported +F: drivers/tty/cbc/* + ROHM MULTIFUNCTION BD9571MWV-M PMIC DEVICE DRIVERS M: Marek Vasut L: linux-kernel@vger.kernel.org @@ -12527,6 +12532,17 @@ F: include/net/rose.h F: include/uapi/linux/rose.h F: net/rose/ +RPMB SUBSYSTEM +M: Tomas Winkler +L: linux-kernel@vger.kernel.org +S: Supported +F: drivers/char/rpmb/* +F: include/uapi/linux/rpmb.h +F: include/linux/rpmb.h +F: Documentation/ABI/testing/sysfs-class-rpmb +F: Documentation/driver-api/rpmb.rst +F: tools/rpmb/ + RTL2830 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org @@ -13769,6 +13785,7 @@ F: drivers/i2c/busses/i2c-stm32* STABLE BRANCH M: Greg Kroah-Hartman +M: Sasha Levin L: stable@vger.kernel.org S: Supported F: Documentation/process/stable-kernel-rules.rst @@ -16259,3 +16276,4 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git S: Buried alive in reporters F: * F: */ + diff --git a/Makefile b/Makefile index 69fa5c0310d83..39c4e7c3c13cf 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 19 -SUBLEVEL = 0 +SUBLEVEL = 19 EXTRAVERSION = NAME = "People's Front" @@ -482,18 +482,18 @@ endif ifeq ($(cc-name),clang) ifneq ($(CROSS_COMPILE),) -CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%)) +CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%)) GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD))) -CLANG_PREFIX := --prefix=$(GCC_TOOLCHAIN_DIR) +CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR) GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..) endif ifneq ($(GCC_TOOLCHAIN),) -CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN) +CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN) endif -KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX) -KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX) -KBUILD_CFLAGS += $(call cc-option, -no-integrated-as) -KBUILD_AFLAGS += $(call cc-option, -no-integrated-as) +CLANG_FLAGS += -no-integrated-as +KBUILD_CFLAGS += $(CLANG_FLAGS) +KBUILD_AFLAGS += $(CLANG_FLAGS) +export CLANG_FLAGS endif RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register @@ -954,11 +954,6 @@ ifdef CONFIG_STACK_VALIDATION ifeq ($(has_libelf),1) objtool_target := tools/objtool FORCE else - ifdef CONFIG_UNWINDER_ORC - $(error "Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel") - else - $(warning "Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel") - endif SKIP_STACK_VALIDATION := 1 export SKIP_STACK_VALIDATION endif @@ -1115,6 +1110,14 @@ uapi-asm-generic: PHONY += prepare-objtool prepare-objtool: $(objtool_target) +ifeq ($(SKIP_STACK_VALIDATION),1) +ifdef CONFIG_UNWINDER_ORC + @echo "error: Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2 + @false +else + @echo "warning: Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2 +endif +endif # Generate some files # --------------------------------------------------------------------------- diff --git a/README.intel.lsm-stacking b/README.intel.lsm-stacking new file mode 100644 index 0000000000000..402d16eec5465 --- /dev/null +++ b/README.intel.lsm-stacking @@ -0,0 +1,17 @@ +Linux kernel security module stacking +============ + +This enables full stacking of Linux security modules. +It allows "major" security modules to be used at the same time. +The major security modules are: + SELinux + Smack + TOMOYO + AppArmor + +This is platform agnostic. + +This is not a bug fix. This removes a longstanding limitation +that the kernel can support only one major security module at +a time. There are no know bugs introduced by this code. + diff --git a/README.intel.optee b/README.intel.optee new file mode 100644 index 0000000000000..0945d23cc882d --- /dev/null +++ b/README.intel.optee @@ -0,0 +1,13 @@ +IP Block: OPTEE core driver +Platform Affect: (ARM64) IOTG KeemBay +BugFix: None + +This branch pulls in driver interface enhancement to OPTEE TEE driver to expose +an kernel internal TEE client interface to be used by other drivers. + +This patch was pushed to the upstream kernel mailinglist by Jens Wiklander (OPTEE maintainer) +nearly a year ago but no effort to include yet in any recently release. + +https://patchwork.kernel.org/patch/9845351/ + +CONFIG_TEE and CONFIG_OPTEE in defconfig must be enabled for driver to compile. diff --git a/arch/alpha/include/asm/termios.h b/arch/alpha/include/asm/termios.h index 6a8c53dec57e6..b7c77bb1bfd20 100644 --- a/arch/alpha/include/asm/termios.h +++ b/arch/alpha/include/asm/termios.h @@ -73,9 +73,15 @@ }) #define user_termios_to_kernel_termios(k, u) \ - copy_from_user(k, u, sizeof(struct termios)) + copy_from_user(k, u, sizeof(struct termios2)) #define kernel_termios_to_user_termios(u, k) \ + copy_to_user(u, k, sizeof(struct termios2)) + +#define user_termios_to_kernel_termios_1(k, u) \ + copy_from_user(k, u, sizeof(struct termios)) + +#define kernel_termios_to_user_termios_1(u, k) \ copy_to_user(u, k, sizeof(struct termios)) #endif /* _ALPHA_TERMIOS_H */ diff --git a/arch/alpha/include/uapi/asm/ioctls.h b/arch/alpha/include/uapi/asm/ioctls.h index 3729d92d3fa85..dc8c20ac7191f 100644 --- a/arch/alpha/include/uapi/asm/ioctls.h +++ b/arch/alpha/include/uapi/asm/ioctls.h @@ -32,6 +32,11 @@ #define TCXONC _IO('t', 30) #define TCFLSH _IO('t', 31) +#define TCGETS2 _IOR('T', 42, struct termios2) +#define TCSETS2 _IOW('T', 43, struct termios2) +#define TCSETSW2 _IOW('T', 44, struct termios2) +#define TCSETSF2 _IOW('T', 45, struct termios2) + #define TIOCSWINSZ _IOW('t', 103, struct winsize) #define TIOCGWINSZ _IOR('t', 104, struct winsize) #define TIOCSTART _IO('t', 110) /* start output, like ^Q */ diff --git a/arch/alpha/include/uapi/asm/termbits.h b/arch/alpha/include/uapi/asm/termbits.h index de6c8360fbe36..4575ba34a0eae 100644 --- a/arch/alpha/include/uapi/asm/termbits.h +++ b/arch/alpha/include/uapi/asm/termbits.h @@ -26,6 +26,19 @@ struct termios { speed_t c_ospeed; /* output speed */ }; +/* Alpha has identical termios and termios2 */ + +struct termios2 { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_cc[NCCS]; /* control characters */ + cc_t c_line; /* line discipline (== c_cc[19]) */ + speed_t c_ispeed; /* input speed */ + speed_t c_ospeed; /* output speed */ +}; + /* Alpha has matching termios and ktermios */ struct ktermios { @@ -152,6 +165,7 @@ struct ktermios { #define B3000000 00034 #define B3500000 00035 #define B4000000 00036 +#define BOTHER 00037 #define CSIZE 00001400 #define CS5 00000000 @@ -169,6 +183,9 @@ struct ktermios { #define CMSPAR 010000000000 /* mark or space (stick) parity */ #define CRTSCTS 020000000000 /* flow control */ +#define CIBAUD 07600000 +#define IBSHIFT 16 + /* c_lflag bits */ #define ISIG 0x00000080 #define ICANON 0x00000100 diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index a045f30860477..ac69f307dcfee 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -26,6 +26,7 @@ config ARC select GENERIC_IRQ_SHOW select GENERIC_PCI_IOMAP select GENERIC_PENDING_IRQ if SMP + select GENERIC_SCHED_CLOCK select GENERIC_SMP_IDLE_THREAD select HAVE_ARCH_KGDB select HAVE_ARCH_TRACEHOOK @@ -111,7 +112,7 @@ endmenu choice prompt "ARC Instruction Set" - default ISA_ARCOMPACT + default ISA_ARCV2 config ISA_ARCOMPACT bool "ARCompact ISA" diff --git a/arch/arc/Makefile b/arch/arc/Makefile index 644815c0516e7..16e6cc22e25cc 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -6,7 +6,7 @@ # published by the Free Software Foundation. # -KBUILD_DEFCONFIG := nsim_700_defconfig +KBUILD_DEFCONFIG := nsim_hs_defconfig cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__ cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7 diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig index 41bc08be6a3b4..020d4493edfd0 100644 --- a/arch/arc/configs/axs101_defconfig +++ b/arch/arc/configs/axs101_defconfig @@ -14,6 +14,7 @@ CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_SLUB_DEBUG is not set # CONFIG_COMPAT_BRK is not set +CONFIG_ISA_ARCOMPACT=y CONFIG_MODULES=y CONFIG_MODULE_FORCE_LOAD=y CONFIG_MODULE_UNLOAD=y @@ -95,6 +96,7 @@ CONFIG_VFAT_FS=y CONFIG_NTFS_FS=y CONFIG_TMPFS=y CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y # CONFIG_ENABLE_WARN_DEPRECATED is not set diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig index 1e1c4a8011b52..666314fffc601 100644 --- a/arch/arc/configs/axs103_defconfig +++ b/arch/arc/configs/axs103_defconfig @@ -94,6 +94,7 @@ CONFIG_VFAT_FS=y CONFIG_NTFS_FS=y CONFIG_TMPFS=y CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y # CONFIG_ENABLE_WARN_DEPRECATED is not set diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig index 6b0c0cfd5c304..429832b8560b8 100644 --- a/arch/arc/configs/axs103_smp_defconfig +++ b/arch/arc/configs/axs103_smp_defconfig @@ -97,6 +97,7 @@ CONFIG_VFAT_FS=y CONFIG_NTFS_FS=y CONFIG_TMPFS=y CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y # CONFIG_ENABLE_WARN_DEPRECATED is not set diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig index 1dec2b4bc5e6e..2a1d2cbfee1a2 100644 --- a/arch/arc/configs/hsdk_defconfig +++ b/arch/arc/configs/hsdk_defconfig @@ -65,6 +65,7 @@ CONFIG_EXT3_FS=y CONFIG_VFAT_FS=y CONFIG_TMPFS=y CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y # CONFIG_ENABLE_WARN_DEPRECATED is not set diff --git a/arch/arc/configs/nps_defconfig b/arch/arc/configs/nps_defconfig index 31ba224bbfb47..6e84060e7c90a 100644 --- a/arch/arc/configs/nps_defconfig +++ b/arch/arc/configs/nps_defconfig @@ -15,6 +15,7 @@ CONFIG_SYSCTL_SYSCALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_COMPAT_BRK is not set +CONFIG_ISA_ARCOMPACT=y CONFIG_KPROBES=y CONFIG_MODULES=y CONFIG_MODULE_FORCE_LOAD=y @@ -73,6 +74,7 @@ CONFIG_PROC_KCORE=y CONFIG_TMPFS=y # CONFIG_MISC_FILESYSTEMS is not set CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y CONFIG_ROOT_NFS=y CONFIG_DEBUG_INFO=y # CONFIG_ENABLE_WARN_DEPRECATED is not set diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig index 8e0b8b134cd9e..219c2a65294b8 100644 --- a/arch/arc/configs/nsim_700_defconfig +++ b/arch/arc/configs/nsim_700_defconfig @@ -15,6 +15,7 @@ CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_SLUB_DEBUG is not set # CONFIG_COMPAT_BRK is not set +CONFIG_ISA_ARCOMPACT=y CONFIG_KPROBES=y CONFIG_MODULES=y # CONFIG_LBDAF is not set diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig index f14eeff7d3084..35dfc6491a094 100644 --- a/arch/arc/configs/nsimosci_defconfig +++ b/arch/arc/configs/nsimosci_defconfig @@ -15,6 +15,7 @@ CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_SLUB_DEBUG is not set # CONFIG_COMPAT_BRK is not set +CONFIG_ISA_ARCOMPACT=y CONFIG_KPROBES=y CONFIG_MODULES=y # CONFIG_LBDAF is not set @@ -66,5 +67,6 @@ CONFIG_EXT2_FS_XATTR=y CONFIG_TMPFS=y # CONFIG_MISC_FILESYSTEMS is not set CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y # CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig index 025298a483056..1638e5bc96724 100644 --- a/arch/arc/configs/nsimosci_hs_defconfig +++ b/arch/arc/configs/nsimosci_hs_defconfig @@ -65,5 +65,6 @@ CONFIG_EXT2_FS_XATTR=y CONFIG_TMPFS=y # CONFIG_MISC_FILESYSTEMS is not set CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y # CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig index df7b77b13b823..11cfbdb0f4415 100644 --- a/arch/arc/configs/nsimosci_hs_smp_defconfig +++ b/arch/arc/configs/nsimosci_hs_smp_defconfig @@ -76,6 +76,7 @@ CONFIG_EXT2_FS_XATTR=y CONFIG_TMPFS=y # CONFIG_MISC_FILESYSTEMS is not set CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y # CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_FTRACE=y diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig index a7f65313f84a5..e71ade3cf9c80 100644 --- a/arch/arc/configs/tb10x_defconfig +++ b/arch/arc/configs/tb10x_defconfig @@ -19,6 +19,7 @@ CONFIG_KALLSYMS_ALL=y # CONFIG_AIO is not set CONFIG_EMBEDDED=y # CONFIG_COMPAT_BRK is not set +CONFIG_ISA_ARCOMPACT=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_FORCE_LOAD=y diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig index db47c3541f159..1e59a2e9c602f 100644 --- a/arch/arc/configs/vdk_hs38_defconfig +++ b/arch/arc/configs/vdk_hs38_defconfig @@ -85,6 +85,7 @@ CONFIG_NTFS_FS=y CONFIG_TMPFS=y CONFIG_JFFS2_FS=y CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y # CONFIG_ENABLE_WARN_DEPRECATED is not set diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig index a8ac5e917d9a5..b5c3f6c54b032 100644 --- a/arch/arc/configs/vdk_hs38_smp_defconfig +++ b/arch/arc/configs/vdk_hs38_smp_defconfig @@ -90,6 +90,7 @@ CONFIG_NTFS_FS=y CONFIG_TMPFS=y CONFIG_JFFS2_FS=y CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y # CONFIG_ENABLE_WARN_DEPRECATED is not set diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h index c22b181e8206f..2f39d9b3886e4 100644 --- a/arch/arc/include/asm/io.h +++ b/arch/arc/include/asm/io.h @@ -12,6 +12,7 @@ #include #include #include +#include #ifdef CONFIG_ISA_ARCV2 #include @@ -94,6 +95,42 @@ static inline u32 __raw_readl(const volatile void __iomem *addr) return w; } +/* + * {read,write}s{b,w,l}() repeatedly access the same IO address in + * native endianness in 8-, 16-, 32-bit chunks {into,from} memory, + * @count times + */ +#define __raw_readsx(t,f) \ +static inline void __raw_reads##f(const volatile void __iomem *addr, \ + void *ptr, unsigned int count) \ +{ \ + bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \ + u##t *buf = ptr; \ + \ + if (!count) \ + return; \ + \ + /* Some ARC CPU's don't support unaligned accesses */ \ + if (is_aligned) { \ + do { \ + u##t x = __raw_read##f(addr); \ + *buf++ = x; \ + } while (--count); \ + } else { \ + do { \ + u##t x = __raw_read##f(addr); \ + put_unaligned(x, buf++); \ + } while (--count); \ + } \ +} + +#define __raw_readsb __raw_readsb +__raw_readsx(8, b) +#define __raw_readsw __raw_readsw +__raw_readsx(16, w) +#define __raw_readsl __raw_readsl +__raw_readsx(32, l) + #define __raw_writeb __raw_writeb static inline void __raw_writeb(u8 b, volatile void __iomem *addr) { @@ -126,6 +163,35 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr) } +#define __raw_writesx(t,f) \ +static inline void __raw_writes##f(volatile void __iomem *addr, \ + const void *ptr, unsigned int count) \ +{ \ + bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \ + const u##t *buf = ptr; \ + \ + if (!count) \ + return; \ + \ + /* Some ARC CPU's don't support unaligned accesses */ \ + if (is_aligned) { \ + do { \ + __raw_write##f(*buf++, addr); \ + } while (--count); \ + } else { \ + do { \ + __raw_write##f(get_unaligned(buf++), addr); \ + } while (--count); \ + } \ +} + +#define __raw_writesb __raw_writesb +__raw_writesx(8, b) +#define __raw_writesw __raw_writesw +__raw_writesx(16, w) +#define __raw_writesl __raw_writesl +__raw_writesx(32, l) + /* * MMIO can also get buffered/optimized in micro-arch, so barriers needed * Based on ARM model for the typical use case @@ -141,10 +207,16 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr) #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) +#define readsb(p,d,l) ({ __raw_readsb(p,d,l); __iormb(); }) +#define readsw(p,d,l) ({ __raw_readsw(p,d,l); __iormb(); }) +#define readsl(p,d,l) ({ __raw_readsl(p,d,l); __iormb(); }) #define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); }) #define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); }) #define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); }) +#define writesb(p,d,l) ({ __iowmb(); __raw_writesb(p,d,l); }) +#define writesw(p,d,l) ({ __iowmb(); __raw_writesw(p,d,l); }) +#define writesl(p,d,l) ({ __iowmb(); __raw_writesl(p,d,l); }) /* * Relaxed API for drivers which can handle barrier ordering themselves diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h index 9185541035cc3..6958545390f0f 100644 --- a/arch/arc/include/asm/perf_event.h +++ b/arch/arc/include/asm/perf_event.h @@ -103,7 +103,8 @@ static const char * const arc_pmu_ev_hw_map[] = { /* counts condition */ [PERF_COUNT_HW_INSTRUCTIONS] = "iall", - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */ + /* All jump instructions that are taken */ + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak", [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */ #ifdef CONFIG_ISA_ARCV2 [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp", diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S index 62ad4bcb841aa..f230bb7092fdb 100644 --- a/arch/arc/lib/memset-archs.S +++ b/arch/arc/lib/memset-archs.S @@ -7,11 +7,39 @@ */ #include +#include -#undef PREALLOC_NOT_AVAIL +/* + * The memset implementation below is optimized to use prefetchw and prealloc + * instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6) + * If you want to implement optimized memset for other possible L1 data cache + * line lengths (32B and 128B) you should rewrite code carefully checking + * we don't call any prefetchw/prealloc instruction for L1 cache lines which + * don't belongs to memset area. + */ + +#if L1_CACHE_SHIFT == 6 + +.macro PREALLOC_INSTR reg, off + prealloc [\reg, \off] +.endm + +.macro PREFETCHW_INSTR reg, off + prefetchw [\reg, \off] +.endm + +#else + +.macro PREALLOC_INSTR +.endm + +.macro PREFETCHW_INSTR +.endm + +#endif ENTRY_CFI(memset) - prefetchw [r0] ; Prefetch the write location + PREFETCHW_INSTR r0, 0 ; Prefetch the first write location mov.f 0, r2 ;;; if size is zero jz.d [blink] @@ -48,11 +76,8 @@ ENTRY_CFI(memset) lpnz @.Lset64bytes ;; LOOP START -#ifdef PREALLOC_NOT_AVAIL - prefetchw [r3, 64] ;Prefetch the next write location -#else - prealloc [r3, 64] -#endif + PREALLOC_INSTR r3, 64 ; alloc next line w/o fetching + #ifdef CONFIG_ARC_HAS_LL64 std.ab r4, [r3, 8] std.ab r4, [r3, 8] @@ -85,7 +110,6 @@ ENTRY_CFI(memset) lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes lpnz .Lset32bytes ;; LOOP START - prefetchw [r3, 32] ;Prefetch the next write location #ifdef CONFIG_ARC_HAS_LL64 std.ab r4, [r3, 8] std.ab r4, [r3, 8] diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index ba145065c579b..f890b2f9f82f0 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -138,7 +138,8 @@ void __init setup_arch_memory(void) */ memblock_add_node(low_mem_start, low_mem_sz, 0); - memblock_reserve(low_mem_start, __pa(_end) - low_mem_start); + memblock_reserve(CONFIG_LINUX_LINK_BASE, + __pa(_end) - CONFIG_LINUX_LINK_BASE); #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start) diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts index 1d158cfda15f2..c45aef8068700 100644 --- a/arch/arm/boot/dts/am3517-evm.dts +++ b/arch/arm/boot/dts/am3517-evm.dts @@ -227,7 +227,7 @@ vmmc-supply = <&vmmc_fixed>; bus-width = <4>; wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>; /* gpio_126 */ - cd-gpios = <&gpio4 31 GPIO_ACTIVE_HIGH>; /* gpio_127 */ + cd-gpios = <&gpio4 31 GPIO_ACTIVE_LOW>; /* gpio_127 */ }; &mmc3 { diff --git a/arch/arm/boot/dts/am3517-som.dtsi b/arch/arm/boot/dts/am3517-som.dtsi index dae6e458e59fe..b1c988eed87c6 100644 --- a/arch/arm/boot/dts/am3517-som.dtsi +++ b/arch/arm/boot/dts/am3517-som.dtsi @@ -163,7 +163,7 @@ compatible = "ti,wl1271"; reg = <2>; interrupt-parent = <&gpio6>; - interrupts = <10 IRQ_TYPE_LEVEL_HIGH>; /* gpio_170 */ + interrupts = <10 IRQ_TYPE_EDGE_RISING>; /* gpio_170 */ ref-clock-frequency = <26000000>; tcxo-clock-frequency = <26000000>; }; diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts index 4adb85e66be3f..93762244be7f4 100644 --- a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts +++ b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts @@ -31,7 +31,7 @@ wifi_pwrseq: wifi-pwrseq { compatible = "mmc-pwrseq-simple"; - reset-gpios = <&expgpio 1 GPIO_ACTIVE_HIGH>; + reset-gpios = <&expgpio 1 GPIO_ACTIVE_LOW>; }; }; diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts index c318bcbc6ba7e..89e6fd547c757 100644 --- a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts +++ b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts @@ -26,7 +26,7 @@ wifi_pwrseq: wifi-pwrseq { compatible = "mmc-pwrseq-simple"; - reset-gpios = <&expgpio 1 GPIO_ACTIVE_HIGH>; + reset-gpios = <&expgpio 1 GPIO_ACTIVE_LOW>; }; }; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index a0ddf497e8cdd..2cb45ddd2ae3b 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -354,7 +354,7 @@ ti,hwmods = "pcie1"; phys = <&pcie1_phy>; phy-names = "pcie-phy0"; - ti,syscon-unaligned-access = <&scm_conf1 0x14 2>; + ti,syscon-unaligned-access = <&scm_conf1 0x14 1>; status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/exynos4210-origen.dts b/arch/arm/boot/dts/exynos4210-origen.dts index 2ab99f9f3d0ac..dd9ec05eb0f79 100644 --- a/arch/arm/boot/dts/exynos4210-origen.dts +++ b/arch/arm/boot/dts/exynos4210-origen.dts @@ -151,6 +151,8 @@ reg = <0x66>; interrupt-parent = <&gpx0>; interrupts = <4 IRQ_TYPE_NONE>, <3 IRQ_TYPE_NONE>; + pinctrl-names = "default"; + pinctrl-0 = <&max8997_irq>; max8997,pmic-buck1-dvs-voltage = <1350000>; max8997,pmic-buck2-dvs-voltage = <1100000>; @@ -288,6 +290,13 @@ }; }; +&pinctrl_1 { + max8997_irq: max8997-irq { + samsung,pins = "gpx0-3", "gpx0-4"; + samsung,pin-pud = ; + }; +}; + &sdhci_0 { bus-width = <4>; pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus4 &sd0_cd>; diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi index da163a40af159..b85527faa6ea4 100644 --- a/arch/arm/boot/dts/exynos5250.dtsi +++ b/arch/arm/boot/dts/exynos5250.dtsi @@ -54,62 +54,109 @@ device_type = "cpu"; compatible = "arm,cortex-a15"; reg = <0>; - clock-frequency = <1700000000>; clocks = <&clock CLK_ARM_CLK>; clock-names = "cpu"; - clock-latency = <140000>; - - operating-points = < - 1700000 1300000 - 1600000 1250000 - 1500000 1225000 - 1400000 1200000 - 1300000 1150000 - 1200000 1125000 - 1100000 1100000 - 1000000 1075000 - 900000 1050000 - 800000 1025000 - 700000 1012500 - 600000 1000000 - 500000 975000 - 400000 950000 - 300000 937500 - 200000 925000 - >; + operating-points-v2 = <&cpu0_opp_table>; #cooling-cells = <2>; /* min followed by max */ }; cpu@1 { device_type = "cpu"; compatible = "arm,cortex-a15"; reg = <1>; - clock-frequency = <1700000000>; clocks = <&clock CLK_ARM_CLK>; clock-names = "cpu"; - clock-latency = <140000>; - - operating-points = < - 1700000 1300000 - 1600000 1250000 - 1500000 1225000 - 1400000 1200000 - 1300000 1150000 - 1200000 1125000 - 1100000 1100000 - 1000000 1075000 - 900000 1050000 - 800000 1025000 - 700000 1012500 - 600000 1000000 - 500000 975000 - 400000 950000 - 300000 937500 - 200000 925000 - >; + operating-points-v2 = <&cpu0_opp_table>; #cooling-cells = <2>; /* min followed by max */ }; }; + cpu0_opp_table: opp_table0 { + compatible = "operating-points-v2"; + opp-shared; + + opp-200000000 { + opp-hz = /bits/ 64 <200000000>; + opp-microvolt = <925000>; + clock-latency-ns = <140000>; + }; + opp-300000000 { + opp-hz = /bits/ 64 <300000000>; + opp-microvolt = <937500>; + clock-latency-ns = <140000>; + }; + opp-400000000 { + opp-hz = /bits/ 64 <400000000>; + opp-microvolt = <950000>; + clock-latency-ns = <140000>; + }; + opp-500000000 { + opp-hz = /bits/ 64 <500000000>; + opp-microvolt = <975000>; + clock-latency-ns = <140000>; + }; + opp-600000000 { + opp-hz = /bits/ 64 <600000000>; + opp-microvolt = <1000000>; + clock-latency-ns = <140000>; + }; + opp-700000000 { + opp-hz = /bits/ 64 <700000000>; + opp-microvolt = <1012500>; + clock-latency-ns = <140000>; + }; + opp-800000000 { + opp-hz = /bits/ 64 <800000000>; + opp-microvolt = <1025000>; + clock-latency-ns = <140000>; + }; + opp-900000000 { + opp-hz = /bits/ 64 <900000000>; + opp-microvolt = <1050000>; + clock-latency-ns = <140000>; + }; + opp-1000000000 { + opp-hz = /bits/ 64 <1000000000>; + opp-microvolt = <1075000>; + clock-latency-ns = <140000>; + opp-suspend; + }; + opp-1100000000 { + opp-hz = /bits/ 64 <1100000000>; + opp-microvolt = <1100000>; + clock-latency-ns = <140000>; + }; + opp-1200000000 { + opp-hz = /bits/ 64 <1200000000>; + opp-microvolt = <1125000>; + clock-latency-ns = <140000>; + }; + opp-1300000000 { + opp-hz = /bits/ 64 <1300000000>; + opp-microvolt = <1150000>; + clock-latency-ns = <140000>; + }; + opp-1400000000 { + opp-hz = /bits/ 64 <1400000000>; + opp-microvolt = <1200000>; + clock-latency-ns = <140000>; + }; + opp-1500000000 { + opp-hz = /bits/ 64 <1500000000>; + opp-microvolt = <1225000>; + clock-latency-ns = <140000>; + }; + opp-1600000000 { + opp-hz = /bits/ 64 <1600000000>; + opp-microvolt = <1250000>; + clock-latency-ns = <140000>; + }; + opp-1700000000 { + opp-hz = /bits/ 64 <1700000000>; + opp-microvolt = <1300000>; + clock-latency-ns = <140000>; + }; + }; + soc: soc { sysram@2020000 { compatible = "mmio-sram"; diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi index 03611d50c5a9e..e84544b220b9e 100644 --- a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi +++ b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi @@ -26,8 +26,7 @@ "Speakers", "SPKL", "Speakers", "SPKR"; - assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>, - <&clock CLK_MOUT_EPLL>, + assigned-clocks = <&clock CLK_MOUT_EPLL>, <&clock CLK_MOUT_MAU_EPLL>, <&clock CLK_MOUT_USER_MAU_EPLL>, <&clock_audss EXYNOS_MOUT_AUDSS>, @@ -36,8 +35,7 @@ <&clock_audss EXYNOS_DOUT_AUD_BUS>, <&clock_audss EXYNOS_DOUT_I2S>; - assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>, - <&clock CLK_FOUT_EPLL>, + assigned-clock-parents = <&clock CLK_FOUT_EPLL>, <&clock CLK_MOUT_EPLL>, <&clock CLK_MOUT_MAU_EPLL>, <&clock CLK_MAU_EPLL>, @@ -48,7 +46,6 @@ <0>, <0>, <0>, - <0>, <196608001>, <(196608002 / 2)>, <196608000>; @@ -84,4 +81,6 @@ &i2s0 { status = "okay"; + assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>; + assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>; }; diff --git a/arch/arm/boot/dts/exynos5422-odroidxu4.dts b/arch/arm/boot/dts/exynos5422-odroidxu4.dts index 4a30cc849b00a..122174ea9e0a3 100644 --- a/arch/arm/boot/dts/exynos5422-odroidxu4.dts +++ b/arch/arm/boot/dts/exynos5422-odroidxu4.dts @@ -33,8 +33,7 @@ compatible = "samsung,odroid-xu3-audio"; model = "Odroid-XU4"; - assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>, - <&clock CLK_MOUT_EPLL>, + assigned-clocks = <&clock CLK_MOUT_EPLL>, <&clock CLK_MOUT_MAU_EPLL>, <&clock CLK_MOUT_USER_MAU_EPLL>, <&clock_audss EXYNOS_MOUT_AUDSS>, @@ -43,8 +42,7 @@ <&clock_audss EXYNOS_DOUT_AUD_BUS>, <&clock_audss EXYNOS_DOUT_I2S>; - assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>, - <&clock CLK_FOUT_EPLL>, + assigned-clock-parents = <&clock CLK_FOUT_EPLL>, <&clock CLK_MOUT_EPLL>, <&clock CLK_MOUT_MAU_EPLL>, <&clock CLK_MAU_EPLL>, @@ -55,7 +53,6 @@ <0>, <0>, <0>, - <0>, <196608001>, <(196608002 / 2)>, <196608000>; @@ -79,6 +76,8 @@ &i2s0 { status = "okay"; + assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>; + assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>; }; &pwm { diff --git a/arch/arm/boot/dts/imx53-ppd.dts b/arch/arm/boot/dts/imx53-ppd.dts index cdb90bee7b4a2..f202396e3f2a8 100644 --- a/arch/arm/boot/dts/imx53-ppd.dts +++ b/arch/arm/boot/dts/imx53-ppd.dts @@ -55,7 +55,7 @@ }; chosen { - stdout-path = "&uart1:115200n8"; + stdout-path = "serial0:115200n8"; }; memory@70000000 { diff --git a/arch/arm/boot/dts/imx6sll.dtsi b/arch/arm/boot/dts/imx6sll.dtsi index 000e6136a9d6c..3e6ffaf5f104f 100644 --- a/arch/arm/boot/dts/imx6sll.dtsi +++ b/arch/arm/boot/dts/imx6sll.dtsi @@ -709,7 +709,7 @@ i2c1: i2c@21a0000 { #address-cells = <1>; #size-cells = <0>; - compatible = "fs,imx6sll-i2c", "fsl,imx21-i2c"; + compatible = "fsl,imx6sll-i2c", "fsl,imx21-i2c"; reg = <0x021a0000 0x4000>; interrupts = ; clocks = <&clks IMX6SLL_CLK_I2C1>; diff --git a/arch/arm/boot/dts/imx6ull-pinfunc.h b/arch/arm/boot/dts/imx6ull-pinfunc.h index fdc46bb09cc1a..3c12a6fb0b618 100644 --- a/arch/arm/boot/dts/imx6ull-pinfunc.h +++ b/arch/arm/boot/dts/imx6ull-pinfunc.h @@ -14,14 +14,23 @@ * The pin function ID is a tuple of * */ +/* signals common for i.MX6UL and i.MX6ULL */ +#undef MX6UL_PAD_UART5_TX_DATA__UART5_DTE_RX +#define MX6UL_PAD_UART5_TX_DATA__UART5_DTE_RX 0x00BC 0x0348 0x0644 0x0 0x6 +#undef MX6UL_PAD_UART5_RX_DATA__UART5_DCE_RX +#define MX6UL_PAD_UART5_RX_DATA__UART5_DCE_RX 0x00C0 0x034C 0x0644 0x0 0x7 +#undef MX6UL_PAD_ENET1_RX_EN__UART5_DCE_RTS +#define MX6UL_PAD_ENET1_RX_EN__UART5_DCE_RTS 0x00CC 0x0358 0x0640 0x1 0x5 +#undef MX6UL_PAD_ENET1_TX_DATA0__UART5_DTE_RTS +#define MX6UL_PAD_ENET1_TX_DATA0__UART5_DTE_RTS 0x00D0 0x035C 0x0640 0x1 0x6 +#undef MX6UL_PAD_CSI_DATA02__UART5_DCE_RTS +#define MX6UL_PAD_CSI_DATA02__UART5_DCE_RTS 0x01EC 0x0478 0x0640 0x8 0x7 + +/* signals for i.MX6ULL only */ #define MX6ULL_PAD_UART1_TX_DATA__UART5_DTE_RX 0x0084 0x0310 0x0644 0x9 0x4 #define MX6ULL_PAD_UART1_RX_DATA__UART5_DCE_RX 0x0088 0x0314 0x0644 0x9 0x5 #define MX6ULL_PAD_UART1_CTS_B__UART5_DCE_RTS 0x008C 0x0318 0x0640 0x9 0x3 #define MX6ULL_PAD_UART1_RTS_B__UART5_DTE_RTS 0x0090 0x031C 0x0640 0x9 0x4 -#define MX6ULL_PAD_UART5_TX_DATA__UART5_DTE_RX 0x00BC 0x0348 0x0644 0x0 0x6 -#define MX6ULL_PAD_UART5_RX_DATA__UART5_DCE_RX 0x00C0 0x034C 0x0644 0x0 0x7 -#define MX6ULL_PAD_ENET1_RX_EN__UART5_DCE_RTS 0x00CC 0x0358 0x0640 0x1 0x5 -#define MX6ULL_PAD_ENET1_TX_DATA0__UART5_DTE_RTS 0x00D0 0x035C 0x0640 0x1 0x6 #define MX6ULL_PAD_ENET2_RX_DATA0__EPDC_SDDO08 0x00E4 0x0370 0x0000 0x9 0x0 #define MX6ULL_PAD_ENET2_RX_DATA1__EPDC_SDDO09 0x00E8 0x0374 0x0000 0x9 0x0 #define MX6ULL_PAD_ENET2_RX_EN__EPDC_SDDO10 0x00EC 0x0378 0x0000 0x9 0x0 @@ -55,7 +64,6 @@ #define MX6ULL_PAD_CSI_DATA00__ESAI_TX_HF_CLK 0x01E4 0x0470 0x0000 0x9 0x0 #define MX6ULL_PAD_CSI_DATA01__ESAI_RX_HF_CLK 0x01E8 0x0474 0x0000 0x9 0x0 #define MX6ULL_PAD_CSI_DATA02__ESAI_RX_FS 0x01EC 0x0478 0x0000 0x9 0x0 -#define MX6ULL_PAD_CSI_DATA02__UART5_DCE_RTS 0x01EC 0x0478 0x0640 0x8 0x7 #define MX6ULL_PAD_CSI_DATA03__ESAI_RX_CLK 0x01F0 0x047C 0x0000 0x9 0x0 #define MX6ULL_PAD_CSI_DATA04__ESAI_TX_FS 0x01F4 0x0480 0x0000 0x9 0x0 #define MX6ULL_PAD_CSI_DATA05__ESAI_TX_CLK 0x01F8 0x0484 0x0000 0x9 0x0 diff --git a/arch/arm/boot/dts/imx7d-nitrogen7.dts b/arch/arm/boot/dts/imx7d-nitrogen7.dts index d8aac4a2d02a2..177d21fdeb288 100644 --- a/arch/arm/boot/dts/imx7d-nitrogen7.dts +++ b/arch/arm/boot/dts/imx7d-nitrogen7.dts @@ -86,13 +86,17 @@ compatible = "regulator-fixed"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; - clocks = <&clks IMX7D_CLKO2_ROOT_DIV>; - clock-names = "slow"; regulator-name = "reg_wlan"; startup-delay-us = <70000>; gpio = <&gpio4 21 GPIO_ACTIVE_HIGH>; enable-active-high; }; + + usdhc2_pwrseq: usdhc2_pwrseq { + compatible = "mmc-pwrseq-simple"; + clocks = <&clks IMX7D_CLKO2_ROOT_DIV>; + clock-names = "ext_clock"; + }; }; &adc1 { @@ -375,6 +379,7 @@ bus-width = <4>; non-removable; vmmc-supply = <®_wlan>; + mmc-pwrseq = <&usdhc2_pwrseq>; cap-power-off-card; keep-power-in-suspend; status = "okay"; diff --git a/arch/arm/boot/dts/imx7d-pico.dtsi b/arch/arm/boot/dts/imx7d-pico.dtsi index 21973eb556719..f27b3849d3ff3 100644 --- a/arch/arm/boot/dts/imx7d-pico.dtsi +++ b/arch/arm/boot/dts/imx7d-pico.dtsi @@ -100,6 +100,19 @@ regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; }; + + usdhc2_pwrseq: usdhc2_pwrseq { + compatible = "mmc-pwrseq-simple"; + clocks = <&clks IMX7D_CLKO2_ROOT_DIV>; + clock-names = "ext_clock"; + }; +}; + +&clks { + assigned-clocks = <&clks IMX7D_CLKO2_ROOT_SRC>, + <&clks IMX7D_CLKO2_ROOT_DIV>; + assigned-clock-parents = <&clks IMX7D_CKIL>; + assigned-clock-rates = <0>, <32768>; }; &i2c4 { @@ -199,12 +212,13 @@ &usdhc2 { /* Wifi SDIO */ pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_usdhc2>; + pinctrl-0 = <&pinctrl_usdhc2 &pinctrl_wifi_clk>; no-1-8-v; non-removable; keep-power-in-suspend; wakeup-source; vmmc-supply = <®_ap6212>; + mmc-pwrseq = <&usdhc2_pwrseq>; status = "okay"; }; @@ -301,6 +315,12 @@ }; &iomuxc_lpsr { + pinctrl_wifi_clk: wificlkgrp { + fsl,pins = < + MX7D_PAD_LPSR_GPIO1_IO03__CCM_CLKO2 0x7d + >; + }; + pinctrl_wdog: wdoggrp { fsl,pins = < MX7D_PAD_LPSR_GPIO1_IO00__WDOG1_WDOG_B 0x74 diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi index ac343330d0c83..98b682a8080cc 100644 --- a/arch/arm/boot/dts/logicpd-som-lv.dtsi +++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi @@ -129,7 +129,7 @@ }; &mmc3 { - interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>; + interrupts-extended = <&intc 94 &omap3_pmx_core 0x136>; pinctrl-0 = <&mmc3_pins &wl127x_gpio>; pinctrl-names = "default"; vmmc-supply = <&wl12xx_vmmc>; diff --git a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts index 9d5d53fbe9c0c..c39cf2ca54da8 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts +++ b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts @@ -35,7 +35,7 @@ * jumpering combinations for the long run. */ &mmc3 { - interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>; + interrupts-extended = <&intc 94 &omap3_pmx_core 0x136>; pinctrl-0 = <&mmc3_pins &mmc3_core2_pins>; pinctrl-names = "default"; vmmc-supply = <&wl12xx_vmmc>; diff --git a/arch/arm/boot/dts/qcom-apq8064-arrow-sd-600eval.dts b/arch/arm/boot/dts/qcom-apq8064-arrow-sd-600eval.dts index 76b56eafaab90..f714a20649d74 100644 --- a/arch/arm/boot/dts/qcom-apq8064-arrow-sd-600eval.dts +++ b/arch/arm/boot/dts/qcom-apq8064-arrow-sd-600eval.dts @@ -387,6 +387,11 @@ hpd-gpio = <&tlmm_pinmux 72 GPIO_ACTIVE_HIGH>; ports { + port@0 { + endpoint { + remote-endpoint = <&mdp_dtv_out>; + }; + }; port@1 { endpoint { remote-endpoint = <&hdmi_con>; diff --git a/arch/arm/boot/dts/rk3288-veyron.dtsi b/arch/arm/boot/dts/rk3288-veyron.dtsi index 2075120cfc4d7..d8bf939a3aff9 100644 --- a/arch/arm/boot/dts/rk3288-veyron.dtsi +++ b/arch/arm/boot/dts/rk3288-veyron.dtsi @@ -10,7 +10,11 @@ #include "rk3288.dtsi" / { - memory@0 { + /* + * The default coreboot on veyron devices ignores memory@0 nodes + * and would instead create another memory node. + */ + memory { device_type = "memory"; reg = <0x0 0x0 0x0 0x80000000>; }; diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi index 61f68e5c48e96..b405992eb6016 100644 --- a/arch/arm/boot/dts/sama5d2.dtsi +++ b/arch/arm/boot/dts/sama5d2.dtsi @@ -308,7 +308,7 @@ 0x1 0x0 0x60000000 0x10000000 0x2 0x0 0x70000000 0x10000000 0x3 0x0 0x80000000 0x10000000>; - clocks = <&mck>; + clocks = <&h32ck>; status = "disabled"; nand_controller: nand-controller { diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi index a4dcb68f4322e..b4dd3846e8cc9 100644 --- a/arch/arm/boot/dts/socfpga_arria10.dtsi +++ b/arch/arm/boot/dts/socfpga_arria10.dtsi @@ -613,7 +613,7 @@ status = "disabled"; }; - sdr: sdr@ffc25000 { + sdr: sdr@ffcfb100 { compatible = "altr,sdr-ctl", "syscon"; reg = <0xffcfb100 0x80>; }; diff --git a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts index c7ce4158d6c8b..f250b20af4937 100644 --- a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts +++ b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts @@ -309,8 +309,8 @@ ®_dldo3 { regulator-always-on; - regulator-min-microvolt = <2500000>; - regulator-max-microvolt = <2500000>; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; regulator-name = "vcc-pd"; }; diff --git a/arch/arm/boot/dts/vf610m4-colibri.dts b/arch/arm/boot/dts/vf610m4-colibri.dts index 41ec66a969907..ca62495587602 100644 --- a/arch/arm/boot/dts/vf610m4-colibri.dts +++ b/arch/arm/boot/dts/vf610m4-colibri.dts @@ -50,8 +50,8 @@ compatible = "fsl,vf610m4"; chosen { - bootargs = "console=ttyLP2,115200 clk_ignore_unused init=/linuxrc rw"; - stdout-path = "&uart2"; + bootargs = "clk_ignore_unused init=/linuxrc rw"; + stdout-path = "serial2:115200"; }; memory@8c000000 { diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig index 7eca43ff69bbe..f4c2e993bba3a 100644 --- a/arch/arm/configs/imx_v6_v7_defconfig +++ b/arch/arm/configs/imx_v6_v7_defconfig @@ -409,6 +409,7 @@ CONFIG_ZISOFS=y CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=y +CONFIG_TMPFS_POSIX_ACL=y CONFIG_JFFS2_FS=y CONFIG_UBIFS_FS=y CONFIG_NFS_FS=y diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig index 925d1364727a5..b8e69fe282b8d 100644 --- a/arch/arm/crypto/Kconfig +++ b/arch/arm/crypto/Kconfig @@ -121,10 +121,4 @@ config CRYPTO_CHACHA20_NEON select CRYPTO_BLKCIPHER select CRYPTO_CHACHA20 -config CRYPTO_SPECK_NEON - tristate "NEON accelerated Speck cipher algorithms" - depends on KERNEL_MODE_NEON - select CRYPTO_BLKCIPHER - select CRYPTO_SPECK - endif diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile index 8de542c48adea..bd5bceef0605f 100644 --- a/arch/arm/crypto/Makefile +++ b/arch/arm/crypto/Makefile @@ -10,7 +10,6 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o -obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o @@ -54,7 +53,6 @@ ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o crct10dif-arm-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o crc32-arm-ce-y:= crc32-ce-core.o crc32-ce-glue.o chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o -speck-neon-y := speck-neon-core.o speck-neon-glue.o ifdef REGENERATE_ARM_CRYPTO quiet_cmd_perl = PERL $@ diff --git a/arch/arm/crypto/speck-neon-core.S b/arch/arm/crypto/speck-neon-core.S deleted file mode 100644 index 57caa742016ed..0000000000000 --- a/arch/arm/crypto/speck-neon-core.S +++ /dev/null @@ -1,434 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS - * - * Copyright (c) 2018 Google, Inc - * - * Author: Eric Biggers - */ - -#include - - .text - .fpu neon - - // arguments - ROUND_KEYS .req r0 // const {u64,u32} *round_keys - NROUNDS .req r1 // int nrounds - DST .req r2 // void *dst - SRC .req r3 // const void *src - NBYTES .req r4 // unsigned int nbytes - TWEAK .req r5 // void *tweak - - // registers which hold the data being encrypted/decrypted - X0 .req q0 - X0_L .req d0 - X0_H .req d1 - Y0 .req q1 - Y0_H .req d3 - X1 .req q2 - X1_L .req d4 - X1_H .req d5 - Y1 .req q3 - Y1_H .req d7 - X2 .req q4 - X2_L .req d8 - X2_H .req d9 - Y2 .req q5 - Y2_H .req d11 - X3 .req q6 - X3_L .req d12 - X3_H .req d13 - Y3 .req q7 - Y3_H .req d15 - - // the round key, duplicated in all lanes - ROUND_KEY .req q8 - ROUND_KEY_L .req d16 - ROUND_KEY_H .req d17 - - // index vector for vtbl-based 8-bit rotates - ROTATE_TABLE .req d18 - - // multiplication table for updating XTS tweaks - GF128MUL_TABLE .req d19 - GF64MUL_TABLE .req d19 - - // current XTS tweak value(s) - TWEAKV .req q10 - TWEAKV_L .req d20 - TWEAKV_H .req d21 - - TMP0 .req q12 - TMP0_L .req d24 - TMP0_H .req d25 - TMP1 .req q13 - TMP2 .req q14 - TMP3 .req q15 - - .align 4 -.Lror64_8_table: - .byte 1, 2, 3, 4, 5, 6, 7, 0 -.Lror32_8_table: - .byte 1, 2, 3, 0, 5, 6, 7, 4 -.Lrol64_8_table: - .byte 7, 0, 1, 2, 3, 4, 5, 6 -.Lrol32_8_table: - .byte 3, 0, 1, 2, 7, 4, 5, 6 -.Lgf128mul_table: - .byte 0, 0x87 - .fill 14 -.Lgf64mul_table: - .byte 0, 0x1b, (0x1b << 1), (0x1b << 1) ^ 0x1b - .fill 12 - -/* - * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time - * - * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for - * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes - * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64. - * - * The 8-bit rotates are implemented using vtbl instead of vshr + vsli because - * the vtbl approach is faster on some processors and the same speed on others. - */ -.macro _speck_round_128bytes n - - // x = ror(x, 8) - vtbl.8 X0_L, {X0_L}, ROTATE_TABLE - vtbl.8 X0_H, {X0_H}, ROTATE_TABLE - vtbl.8 X1_L, {X1_L}, ROTATE_TABLE - vtbl.8 X1_H, {X1_H}, ROTATE_TABLE - vtbl.8 X2_L, {X2_L}, ROTATE_TABLE - vtbl.8 X2_H, {X2_H}, ROTATE_TABLE - vtbl.8 X3_L, {X3_L}, ROTATE_TABLE - vtbl.8 X3_H, {X3_H}, ROTATE_TABLE - - // x += y - vadd.u\n X0, Y0 - vadd.u\n X1, Y1 - vadd.u\n X2, Y2 - vadd.u\n X3, Y3 - - // x ^= k - veor X0, ROUND_KEY - veor X1, ROUND_KEY - veor X2, ROUND_KEY - veor X3, ROUND_KEY - - // y = rol(y, 3) - vshl.u\n TMP0, Y0, #3 - vshl.u\n TMP1, Y1, #3 - vshl.u\n TMP2, Y2, #3 - vshl.u\n TMP3, Y3, #3 - vsri.u\n TMP0, Y0, #(\n - 3) - vsri.u\n TMP1, Y1, #(\n - 3) - vsri.u\n TMP2, Y2, #(\n - 3) - vsri.u\n TMP3, Y3, #(\n - 3) - - // y ^= x - veor Y0, TMP0, X0 - veor Y1, TMP1, X1 - veor Y2, TMP2, X2 - veor Y3, TMP3, X3 -.endm - -/* - * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time - * - * This is the inverse of _speck_round_128bytes(). - */ -.macro _speck_unround_128bytes n - - // y ^= x - veor TMP0, Y0, X0 - veor TMP1, Y1, X1 - veor TMP2, Y2, X2 - veor TMP3, Y3, X3 - - // y = ror(y, 3) - vshr.u\n Y0, TMP0, #3 - vshr.u\n Y1, TMP1, #3 - vshr.u\n Y2, TMP2, #3 - vshr.u\n Y3, TMP3, #3 - vsli.u\n Y0, TMP0, #(\n - 3) - vsli.u\n Y1, TMP1, #(\n - 3) - vsli.u\n Y2, TMP2, #(\n - 3) - vsli.u\n Y3, TMP3, #(\n - 3) - - // x ^= k - veor X0, ROUND_KEY - veor X1, ROUND_KEY - veor X2, ROUND_KEY - veor X3, ROUND_KEY - - // x -= y - vsub.u\n X0, Y0 - vsub.u\n X1, Y1 - vsub.u\n X2, Y2 - vsub.u\n X3, Y3 - - // x = rol(x, 8); - vtbl.8 X0_L, {X0_L}, ROTATE_TABLE - vtbl.8 X0_H, {X0_H}, ROTATE_TABLE - vtbl.8 X1_L, {X1_L}, ROTATE_TABLE - vtbl.8 X1_H, {X1_H}, ROTATE_TABLE - vtbl.8 X2_L, {X2_L}, ROTATE_TABLE - vtbl.8 X2_H, {X2_H}, ROTATE_TABLE - vtbl.8 X3_L, {X3_L}, ROTATE_TABLE - vtbl.8 X3_H, {X3_H}, ROTATE_TABLE -.endm - -.macro _xts128_precrypt_one dst_reg, tweak_buf, tmp - - // Load the next source block - vld1.8 {\dst_reg}, [SRC]! - - // Save the current tweak in the tweak buffer - vst1.8 {TWEAKV}, [\tweak_buf:128]! - - // XOR the next source block with the current tweak - veor \dst_reg, TWEAKV - - /* - * Calculate the next tweak by multiplying the current one by x, - * modulo p(x) = x^128 + x^7 + x^2 + x + 1. - */ - vshr.u64 \tmp, TWEAKV, #63 - vshl.u64 TWEAKV, #1 - veor TWEAKV_H, \tmp\()_L - vtbl.8 \tmp\()_H, {GF128MUL_TABLE}, \tmp\()_H - veor TWEAKV_L, \tmp\()_H -.endm - -.macro _xts64_precrypt_two dst_reg, tweak_buf, tmp - - // Load the next two source blocks - vld1.8 {\dst_reg}, [SRC]! - - // Save the current two tweaks in the tweak buffer - vst1.8 {TWEAKV}, [\tweak_buf:128]! - - // XOR the next two source blocks with the current two tweaks - veor \dst_reg, TWEAKV - - /* - * Calculate the next two tweaks by multiplying the current ones by x^2, - * modulo p(x) = x^64 + x^4 + x^3 + x + 1. - */ - vshr.u64 \tmp, TWEAKV, #62 - vshl.u64 TWEAKV, #2 - vtbl.8 \tmp\()_L, {GF64MUL_TABLE}, \tmp\()_L - vtbl.8 \tmp\()_H, {GF64MUL_TABLE}, \tmp\()_H - veor TWEAKV, \tmp -.endm - -/* - * _speck_xts_crypt() - Speck-XTS encryption/decryption - * - * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer - * using Speck-XTS, specifically the variant with a block size of '2n' and round - * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and - * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a - * nonzero multiple of 128. - */ -.macro _speck_xts_crypt n, decrypting - push {r4-r7} - mov r7, sp - - /* - * The first four parameters were passed in registers r0-r3. Load the - * additional parameters, which were passed on the stack. - */ - ldr NBYTES, [sp, #16] - ldr TWEAK, [sp, #20] - - /* - * If decrypting, modify the ROUND_KEYS parameter to point to the last - * round key rather than the first, since for decryption the round keys - * are used in reverse order. - */ -.if \decrypting -.if \n == 64 - add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #3 - sub ROUND_KEYS, #8 -.else - add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #2 - sub ROUND_KEYS, #4 -.endif -.endif - - // Load the index vector for vtbl-based 8-bit rotates -.if \decrypting - ldr r12, =.Lrol\n\()_8_table -.else - ldr r12, =.Lror\n\()_8_table -.endif - vld1.8 {ROTATE_TABLE}, [r12:64] - - // One-time XTS preparation - - /* - * Allocate stack space to store 128 bytes worth of tweaks. For - * performance, this space is aligned to a 16-byte boundary so that we - * can use the load/store instructions that declare 16-byte alignment. - * For Thumb2 compatibility, don't do the 'bic' directly on 'sp'. - */ - sub r12, sp, #128 - bic r12, #0xf - mov sp, r12 - -.if \n == 64 - // Load first tweak - vld1.8 {TWEAKV}, [TWEAK] - - // Load GF(2^128) multiplication table - ldr r12, =.Lgf128mul_table - vld1.8 {GF128MUL_TABLE}, [r12:64] -.else - // Load first tweak - vld1.8 {TWEAKV_L}, [TWEAK] - - // Load GF(2^64) multiplication table - ldr r12, =.Lgf64mul_table - vld1.8 {GF64MUL_TABLE}, [r12:64] - - // Calculate second tweak, packing it together with the first - vshr.u64 TMP0_L, TWEAKV_L, #63 - vtbl.u8 TMP0_L, {GF64MUL_TABLE}, TMP0_L - vshl.u64 TWEAKV_H, TWEAKV_L, #1 - veor TWEAKV_H, TMP0_L -.endif - -.Lnext_128bytes_\@: - - /* - * Load the source blocks into {X,Y}[0-3], XOR them with their XTS tweak - * values, and save the tweaks on the stack for later. Then - * de-interleave the 'x' and 'y' elements of each block, i.e. make it so - * that the X[0-3] registers contain only the second halves of blocks, - * and the Y[0-3] registers contain only the first halves of blocks. - * (Speck uses the order (y, x) rather than the more intuitive (x, y).) - */ - mov r12, sp -.if \n == 64 - _xts128_precrypt_one X0, r12, TMP0 - _xts128_precrypt_one Y0, r12, TMP0 - _xts128_precrypt_one X1, r12, TMP0 - _xts128_precrypt_one Y1, r12, TMP0 - _xts128_precrypt_one X2, r12, TMP0 - _xts128_precrypt_one Y2, r12, TMP0 - _xts128_precrypt_one X3, r12, TMP0 - _xts128_precrypt_one Y3, r12, TMP0 - vswp X0_L, Y0_H - vswp X1_L, Y1_H - vswp X2_L, Y2_H - vswp X3_L, Y3_H -.else - _xts64_precrypt_two X0, r12, TMP0 - _xts64_precrypt_two Y0, r12, TMP0 - _xts64_precrypt_two X1, r12, TMP0 - _xts64_precrypt_two Y1, r12, TMP0 - _xts64_precrypt_two X2, r12, TMP0 - _xts64_precrypt_two Y2, r12, TMP0 - _xts64_precrypt_two X3, r12, TMP0 - _xts64_precrypt_two Y3, r12, TMP0 - vuzp.32 Y0, X0 - vuzp.32 Y1, X1 - vuzp.32 Y2, X2 - vuzp.32 Y3, X3 -.endif - - // Do the cipher rounds - - mov r12, ROUND_KEYS - mov r6, NROUNDS - -.Lnext_round_\@: -.if \decrypting -.if \n == 64 - vld1.64 ROUND_KEY_L, [r12] - sub r12, #8 - vmov ROUND_KEY_H, ROUND_KEY_L -.else - vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12] - sub r12, #4 -.endif - _speck_unround_128bytes \n -.else -.if \n == 64 - vld1.64 ROUND_KEY_L, [r12]! - vmov ROUND_KEY_H, ROUND_KEY_L -.else - vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12]! -.endif - _speck_round_128bytes \n -.endif - subs r6, r6, #1 - bne .Lnext_round_\@ - - // Re-interleave the 'x' and 'y' elements of each block -.if \n == 64 - vswp X0_L, Y0_H - vswp X1_L, Y1_H - vswp X2_L, Y2_H - vswp X3_L, Y3_H -.else - vzip.32 Y0, X0 - vzip.32 Y1, X1 - vzip.32 Y2, X2 - vzip.32 Y3, X3 -.endif - - // XOR the encrypted/decrypted blocks with the tweaks we saved earlier - mov r12, sp - vld1.8 {TMP0, TMP1}, [r12:128]! - vld1.8 {TMP2, TMP3}, [r12:128]! - veor X0, TMP0 - veor Y0, TMP1 - veor X1, TMP2 - veor Y1, TMP3 - vld1.8 {TMP0, TMP1}, [r12:128]! - vld1.8 {TMP2, TMP3}, [r12:128]! - veor X2, TMP0 - veor Y2, TMP1 - veor X3, TMP2 - veor Y3, TMP3 - - // Store the ciphertext in the destination buffer - vst1.8 {X0, Y0}, [DST]! - vst1.8 {X1, Y1}, [DST]! - vst1.8 {X2, Y2}, [DST]! - vst1.8 {X3, Y3}, [DST]! - - // Continue if there are more 128-byte chunks remaining, else return - subs NBYTES, #128 - bne .Lnext_128bytes_\@ - - // Store the next tweak -.if \n == 64 - vst1.8 {TWEAKV}, [TWEAK] -.else - vst1.8 {TWEAKV_L}, [TWEAK] -.endif - - mov sp, r7 - pop {r4-r7} - bx lr -.endm - -ENTRY(speck128_xts_encrypt_neon) - _speck_xts_crypt n=64, decrypting=0 -ENDPROC(speck128_xts_encrypt_neon) - -ENTRY(speck128_xts_decrypt_neon) - _speck_xts_crypt n=64, decrypting=1 -ENDPROC(speck128_xts_decrypt_neon) - -ENTRY(speck64_xts_encrypt_neon) - _speck_xts_crypt n=32, decrypting=0 -ENDPROC(speck64_xts_encrypt_neon) - -ENTRY(speck64_xts_decrypt_neon) - _speck_xts_crypt n=32, decrypting=1 -ENDPROC(speck64_xts_decrypt_neon) diff --git a/arch/arm/crypto/speck-neon-glue.c b/arch/arm/crypto/speck-neon-glue.c deleted file mode 100644 index f012c3ea998fb..0000000000000 --- a/arch/arm/crypto/speck-neon-glue.c +++ /dev/null @@ -1,288 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS - * - * Copyright (c) 2018 Google, Inc - * - * Note: the NIST recommendation for XTS only specifies a 128-bit block size, - * but a 64-bit version (needed for Speck64) is fairly straightforward; the math - * is just done in GF(2^64) instead of GF(2^128), with the reducing polynomial - * x^64 + x^4 + x^3 + x + 1 from the original XEX paper (Rogaway, 2004: - * "Efficient Instantiations of Tweakable Blockciphers and Refinements to Modes - * OCB and PMAC"), represented as 0x1B. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* The assembly functions only handle multiples of 128 bytes */ -#define SPECK_NEON_CHUNK_SIZE 128 - -/* Speck128 */ - -struct speck128_xts_tfm_ctx { - struct speck128_tfm_ctx main_key; - struct speck128_tfm_ctx tweak_key; -}; - -asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds, - void *dst, const void *src, - unsigned int nbytes, void *tweak); - -asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds, - void *dst, const void *src, - unsigned int nbytes, void *tweak); - -typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *, - u8 *, const u8 *); -typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *, - const void *, unsigned int, void *); - -static __always_inline int -__speck128_xts_crypt(struct skcipher_request *req, - speck128_crypt_one_t crypt_one, - speck128_xts_crypt_many_t crypt_many) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - const struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - le128 tweak; - int err; - - err = skcipher_walk_virt(&walk, req, true); - - crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv); - - while (walk.nbytes > 0) { - unsigned int nbytes = walk.nbytes; - u8 *dst = walk.dst.virt.addr; - const u8 *src = walk.src.virt.addr; - - if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) { - unsigned int count; - - count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE); - kernel_neon_begin(); - (*crypt_many)(ctx->main_key.round_keys, - ctx->main_key.nrounds, - dst, src, count, &tweak); - kernel_neon_end(); - dst += count; - src += count; - nbytes -= count; - } - - /* Handle any remainder with generic code */ - while (nbytes >= sizeof(tweak)) { - le128_xor((le128 *)dst, (const le128 *)src, &tweak); - (*crypt_one)(&ctx->main_key, dst, dst); - le128_xor((le128 *)dst, (const le128 *)dst, &tweak); - gf128mul_x_ble(&tweak, &tweak); - - dst += sizeof(tweak); - src += sizeof(tweak); - nbytes -= sizeof(tweak); - } - err = skcipher_walk_done(&walk, nbytes); - } - - return err; -} - -static int speck128_xts_encrypt(struct skcipher_request *req) -{ - return __speck128_xts_crypt(req, crypto_speck128_encrypt, - speck128_xts_encrypt_neon); -} - -static int speck128_xts_decrypt(struct skcipher_request *req) -{ - return __speck128_xts_crypt(req, crypto_speck128_decrypt, - speck128_xts_decrypt_neon); -} - -static int speck128_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen) -{ - struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); - int err; - - err = xts_verify_key(tfm, key, keylen); - if (err) - return err; - - keylen /= 2; - - err = crypto_speck128_setkey(&ctx->main_key, key, keylen); - if (err) - return err; - - return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen); -} - -/* Speck64 */ - -struct speck64_xts_tfm_ctx { - struct speck64_tfm_ctx main_key; - struct speck64_tfm_ctx tweak_key; -}; - -asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds, - void *dst, const void *src, - unsigned int nbytes, void *tweak); - -asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds, - void *dst, const void *src, - unsigned int nbytes, void *tweak); - -typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *, - u8 *, const u8 *); -typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *, - const void *, unsigned int, void *); - -static __always_inline int -__speck64_xts_crypt(struct skcipher_request *req, speck64_crypt_one_t crypt_one, - speck64_xts_crypt_many_t crypt_many) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - const struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - __le64 tweak; - int err; - - err = skcipher_walk_virt(&walk, req, true); - - crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv); - - while (walk.nbytes > 0) { - unsigned int nbytes = walk.nbytes; - u8 *dst = walk.dst.virt.addr; - const u8 *src = walk.src.virt.addr; - - if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) { - unsigned int count; - - count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE); - kernel_neon_begin(); - (*crypt_many)(ctx->main_key.round_keys, - ctx->main_key.nrounds, - dst, src, count, &tweak); - kernel_neon_end(); - dst += count; - src += count; - nbytes -= count; - } - - /* Handle any remainder with generic code */ - while (nbytes >= sizeof(tweak)) { - *(__le64 *)dst = *(__le64 *)src ^ tweak; - (*crypt_one)(&ctx->main_key, dst, dst); - *(__le64 *)dst ^= tweak; - tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^ - ((tweak & cpu_to_le64(1ULL << 63)) ? - 0x1B : 0)); - dst += sizeof(tweak); - src += sizeof(tweak); - nbytes -= sizeof(tweak); - } - err = skcipher_walk_done(&walk, nbytes); - } - - return err; -} - -static int speck64_xts_encrypt(struct skcipher_request *req) -{ - return __speck64_xts_crypt(req, crypto_speck64_encrypt, - speck64_xts_encrypt_neon); -} - -static int speck64_xts_decrypt(struct skcipher_request *req) -{ - return __speck64_xts_crypt(req, crypto_speck64_decrypt, - speck64_xts_decrypt_neon); -} - -static int speck64_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen) -{ - struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); - int err; - - err = xts_verify_key(tfm, key, keylen); - if (err) - return err; - - keylen /= 2; - - err = crypto_speck64_setkey(&ctx->main_key, key, keylen); - if (err) - return err; - - return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen); -} - -static struct skcipher_alg speck_algs[] = { - { - .base.cra_name = "xts(speck128)", - .base.cra_driver_name = "xts-speck128-neon", - .base.cra_priority = 300, - .base.cra_blocksize = SPECK128_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx), - .base.cra_alignmask = 7, - .base.cra_module = THIS_MODULE, - .min_keysize = 2 * SPECK128_128_KEY_SIZE, - .max_keysize = 2 * SPECK128_256_KEY_SIZE, - .ivsize = SPECK128_BLOCK_SIZE, - .walksize = SPECK_NEON_CHUNK_SIZE, - .setkey = speck128_xts_setkey, - .encrypt = speck128_xts_encrypt, - .decrypt = speck128_xts_decrypt, - }, { - .base.cra_name = "xts(speck64)", - .base.cra_driver_name = "xts-speck64-neon", - .base.cra_priority = 300, - .base.cra_blocksize = SPECK64_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx), - .base.cra_alignmask = 7, - .base.cra_module = THIS_MODULE, - .min_keysize = 2 * SPECK64_96_KEY_SIZE, - .max_keysize = 2 * SPECK64_128_KEY_SIZE, - .ivsize = SPECK64_BLOCK_SIZE, - .walksize = SPECK_NEON_CHUNK_SIZE, - .setkey = speck64_xts_setkey, - .encrypt = speck64_xts_encrypt, - .decrypt = speck64_xts_decrypt, - } -}; - -static int __init speck_neon_module_init(void) -{ - if (!(elf_hwcap & HWCAP_NEON)) - return -ENODEV; - return crypto_register_skciphers(speck_algs, ARRAY_SIZE(speck_algs)); -} - -static void __exit speck_neon_module_exit(void) -{ - crypto_unregister_skciphers(speck_algs, ARRAY_SIZE(speck_algs)); -} - -module_init(speck_neon_module_init); -module_exit(speck_neon_module_exit); - -MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)"); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Eric Biggers "); -MODULE_ALIAS_CRYPTO("xts(speck128)"); -MODULE_ALIAS_CRYPTO("xts-speck128-neon"); -MODULE_ALIAS_CRYPTO("xts(speck64)"); -MODULE_ALIAS_CRYPTO("xts-speck64-neon"); diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index 92fd2c8a9af06..12659ce5c1f38 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h @@ -10,7 +10,7 @@ #ifndef _ASM_PGTABLE_2LEVEL_H #define _ASM_PGTABLE_2LEVEL_H -#define __PAGETABLE_PMD_FOLDED +#define __PAGETABLE_PMD_FOLDED 1 /* * Hardware-wise, we have a two level page table structure, where the first diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index 5617932a83dfa..ee673c09aa6c0 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -227,9 +227,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, unsigned long frame_pointer) { unsigned long return_hooker = (unsigned long) &return_to_handler; - struct ftrace_graph_ent trace; unsigned long old; - int err; if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; @@ -237,21 +235,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, old = *parent; *parent = return_hooker; - trace.func = self_addr; - trace.depth = current->curr_ret_stack + 1; - - /* Only trace if the calling function expects to */ - if (!ftrace_graph_entry(&trace)) { + if (function_graph_enter(old, self_addr, frame_pointer, NULL)) *parent = old; - return; - } - - err = ftrace_push_return_trace(old, self_addr, &trace.depth, - frame_pointer, NULL); - if (err == -EBUSY) { - *parent = old; - return; - } } #ifdef CONFIG_DYNAMIC_FTRACE diff --git a/arch/arm/mach-imx/cpuidle-imx6sx.c b/arch/arm/mach-imx/cpuidle-imx6sx.c index 243a108a940b4..fd0053e47a151 100644 --- a/arch/arm/mach-imx/cpuidle-imx6sx.c +++ b/arch/arm/mach-imx/cpuidle-imx6sx.c @@ -110,7 +110,7 @@ int __init imx6sx_cpuidle_init(void) * except for power up sw2iso which need to be * larger than LDO ramp up time. */ - imx_gpc_set_arm_power_up_timing(2, 1); + imx_gpc_set_arm_power_up_timing(0xf, 1); imx_gpc_set_arm_power_down_timing(1, 1); return cpuidle_register(&imx6sx_cpuidle_driver, NULL); diff --git a/arch/arm/mach-mmp/cputype.h b/arch/arm/mach-mmp/cputype.h index 446edaeb78a71..a96abcf521b4b 100644 --- a/arch/arm/mach-mmp/cputype.h +++ b/arch/arm/mach-mmp/cputype.h @@ -44,10 +44,12 @@ static inline int cpu_is_pxa910(void) #define cpu_is_pxa910() (0) #endif -#ifdef CONFIG_CPU_MMP2 +#if defined(CONFIG_CPU_MMP2) || defined(CONFIG_MACH_MMP2_DT) static inline int cpu_is_mmp2(void) { - return (((read_cpuid_id() >> 8) & 0xff) == 0x58); + return (((read_cpuid_id() >> 8) & 0xff) == 0x58) && + (((mmp_chip_id & 0xfff) == 0x410) || + ((mmp_chip_id & 0xfff) == 0x610)); } #else #define cpu_is_mmp2() (0) diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c index dd28d2614d7fe..d10d8831f5274 100644 --- a/arch/arm/mach-omap1/board-ams-delta.c +++ b/arch/arm/mach-omap1/board-ams-delta.c @@ -726,6 +726,9 @@ static void modem_pm(struct uart_port *port, unsigned int state, unsigned old) struct modem_private_data *priv = port->private_data; int ret; + if (!priv) + return; + if (IS_ERR(priv->regulator)) return; diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c index 7b95729e83594..38a1be6c3694f 100644 --- a/arch/arm/mach-omap2/prm44xx.c +++ b/arch/arm/mach-omap2/prm44xx.c @@ -351,7 +351,7 @@ static void omap44xx_prm_reconfigure_io_chain(void) * to occur, WAKEUPENABLE bits must be set in the pad mux registers, and * omap44xx_prm_reconfigure_io_chain() must be called. No return value. */ -static void __init omap44xx_prm_enable_io_wakeup(void) +static void omap44xx_prm_enable_io_wakeup(void) { s32 inst = omap4_prmst_get_prm_dev_inst(); diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 215df435bfb98..2149b47a0c5ac 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -360,14 +360,16 @@ v7_dma_inv_range: ALT_UP(W(nop)) #endif mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line + addne r0, r0, r2 tst r1, r3 bic r1, r1, r3 mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line -1: - mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line - add r0, r0, r2 cmp r0, r1 +1: + mcrlo p15, 0, r0, c7, c6, 1 @ invalidate D / U line + addlo r0, r0, r2 + cmplo r0, r1 blo 1b dsb st ret lr diff --git a/arch/arm/mm/cache-v7m.S b/arch/arm/mm/cache-v7m.S index 788486e830d3e..32aa2a2aa260c 100644 --- a/arch/arm/mm/cache-v7m.S +++ b/arch/arm/mm/cache-v7m.S @@ -73,9 +73,11 @@ /* * dcimvac: Invalidate data cache line by MVA to PoC */ -.macro dcimvac, rt, tmp - v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC +.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo +.macro dcimvac\c, rt, tmp + v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC, \c .endm +.endr /* * dccmvau: Clean data cache line by MVA to PoU @@ -369,14 +371,16 @@ v7m_dma_inv_range: tst r0, r3 bic r0, r0, r3 dccimvacne r0, r3 + addne r0, r0, r2 subne r3, r2, #1 @ restore r3, corrupted by v7m's dccimvac tst r1, r3 bic r1, r1, r3 dccimvacne r1, r3 -1: - dcimvac r0, r3 - add r0, r0, r2 cmp r0, r1 +1: + dcimvaclo r0, r3 + addlo r0, r0, r2 + cmplo r0, r1 blo 1b dsb st ret lr diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 66566472c1538..1cb9c0f9b5d6e 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -830,7 +830,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { - int ret; + int ret = -ENXIO; unsigned long nr_vma_pages = vma_pages(vma); unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long pfn = dma_to_pfn(dev, dma_addr); diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 6fe52819e0148..339eb17c9808e 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -112,7 +112,7 @@ ENTRY(cpu_v7_hvc_switch_mm) hvc #0 ldmfd sp!, {r0 - r3} b cpu_v7_switch_mm -ENDPROC(cpu_v7_smc_switch_mm) +ENDPROC(cpu_v7_hvc_switch_mm) #endif ENTRY(cpu_v7_iciallu_switch_mm) mov r3, #0 diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c index b2aa9b32bff2b..2c118a6ab3587 100644 --- a/arch/arm/probes/kprobes/opt-arm.c +++ b/arch/arm/probes/kprobes/opt-arm.c @@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or } /* Copy arch-dep-instance from template. */ - memcpy(code, &optprobe_template_entry, + memcpy(code, (unsigned char *)optprobe_template_entry, TMPL_END_IDX * sizeof(kprobe_opcode_t)); /* Adjust buffer according to instruction. */ diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 106039d25e2f7..35649ee8ad56b 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -18,7 +18,7 @@ ifeq ($(CONFIG_RELOCATABLE), y) # Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour # for relative relocs, since this leads to better Image compression # with the relocation offsets always being zero. -LDFLAGS_vmlinux += -pie -shared -Bsymbolic \ +LDFLAGS_vmlinux += -shared -Bsymbolic -z notext -z norelro \ $(call ld-option, --no-apply-dynamic-relocs) endif diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi index d033da401c268..5089aa64088fc 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi @@ -137,6 +137,9 @@ reset-names = "stmmaceth", "stmmaceth-ocp"; clocks = <&clkmgr STRATIX10_EMAC0_CLK>; clock-names = "stmmaceth"; + tx-fifo-depth = <16384>; + rx-fifo-depth = <16384>; + snps,multicast-filter-bins = <256>; status = "disabled"; }; @@ -150,6 +153,9 @@ reset-names = "stmmaceth", "stmmaceth-ocp"; clocks = <&clkmgr STRATIX10_EMAC1_CLK>; clock-names = "stmmaceth"; + tx-fifo-depth = <16384>; + rx-fifo-depth = <16384>; + snps,multicast-filter-bins = <256>; status = "disabled"; }; @@ -163,6 +169,9 @@ reset-names = "stmmaceth", "stmmaceth-ocp"; clocks = <&clkmgr STRATIX10_EMAC2_CLK>; clock-names = "stmmaceth"; + tx-fifo-depth = <16384>; + rx-fifo-depth = <16384>; + snps,multicast-filter-bins = <256>; status = "disabled"; }; @@ -335,7 +344,7 @@ sysmgr: sysmgr@ffd12000 { compatible = "altr,sys-mgr", "syscon"; - reg = <0xffd12000 0x1000>; + reg = <0xffd12000 0x228>; }; /* Local timer */ diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts index 6edc4fa9fd426..7c661753bfaf4 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts @@ -76,7 +76,7 @@ phy-mode = "rgmii"; phy-handle = <&phy0>; - max-frame-size = <3800>; + max-frame-size = <9000>; mdio0 { #address-cells = <1>; diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi index 176e38d548727..ec0da5b3d7fd7 100644 --- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi @@ -27,6 +27,23 @@ method = "smc"; }; + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + /* + * This area matches the mapping done with a + * mainline U-Boot, and should be updated by the + * bootloader. + */ + + psci-area@4000000 { + reg = <0x0 0x4000000 0x0 0x200000>; + no-map; + }; + }; + ap806 { #address-cells = <2>; #size-cells = <2>; diff --git a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts index a747b7bf132d1..387be39d40cdd 100644 --- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts +++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts @@ -17,8 +17,13 @@ model = "MediaTek MT7622 RFB1 board"; compatible = "mediatek,mt7622-rfb1", "mediatek,mt7622"; + aliases { + serial0 = &uart0; + }; + chosen { - bootargs = "earlycon=uart8250,mmio32,0x11002000 console=ttyS0,115200n1 swiotlb=512"; + stdout-path = "serial0:115200n8"; + bootargs = "earlycon=uart8250,mmio32,0x11002000 swiotlb=512"; }; cpus { diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts index 6d651f3141937..6921f8dc5ebbc 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts @@ -31,6 +31,10 @@ status = "okay"; }; +&tlmm { + gpio-reserved-ranges = <0 4>, <81 4>; +}; + &uart9 { status = "okay"; }; diff --git a/arch/arm64/boot/dts/renesas/r8a7795.dtsi b/arch/arm64/boot/dts/renesas/r8a7795.dtsi index fb9d08ad7659d..c87eed77de2c1 100644 --- a/arch/arm64/boot/dts/renesas/r8a7795.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a7795.dtsi @@ -662,7 +662,7 @@ clock-names = "fck", "brg_int", "scif_clk"; dmas = <&dmac1 0x35>, <&dmac1 0x34>, <&dmac2 0x35>, <&dmac2 0x34>; - dma-names = "tx", "rx"; + dma-names = "tx", "rx", "tx", "rx"; power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; resets = <&cpg 518>; status = "disabled"; diff --git a/arch/arm64/boot/dts/renesas/r8a77980-condor.dts b/arch/arm64/boot/dts/renesas/r8a77980-condor.dts index 9f25c407dfd71..e830b6162375d 100644 --- a/arch/arm64/boot/dts/renesas/r8a77980-condor.dts +++ b/arch/arm64/boot/dts/renesas/r8a77980-condor.dts @@ -15,7 +15,7 @@ aliases { serial0 = &scif0; - ethernet0 = &avb; + ethernet0 = &gether; }; chosen { @@ -47,23 +47,6 @@ }; }; -&avb { - pinctrl-0 = <&avb_pins>; - pinctrl-names = "default"; - - phy-mode = "rgmii-id"; - phy-handle = <&phy0>; - renesas,no-ether-link; - status = "okay"; - - phy0: ethernet-phy@0 { - rxc-skew-ps = <1500>; - reg = <0>; - interrupt-parent = <&gpio1>; - interrupts = <17 IRQ_TYPE_LEVEL_LOW>; - }; -}; - &canfd { pinctrl-0 = <&canfd0_pins>; pinctrl-names = "default"; @@ -82,6 +65,23 @@ clock-frequency = <32768>; }; +&gether { + pinctrl-0 = <&gether_pins>; + pinctrl-names = "default"; + + phy-mode = "rgmii-id"; + phy-handle = <&phy0>; + renesas,no-ether-link; + status = "okay"; + + phy0: ethernet-phy@0 { + rxc-skew-ps = <1500>; + reg = <0>; + interrupt-parent = <&gpio4>; + interrupts = <23 IRQ_TYPE_LEVEL_LOW>; + }; +}; + &i2c0 { pinctrl-0 = <&i2c0_pins>; pinctrl-names = "default"; @@ -118,16 +118,17 @@ }; &pfc { - avb_pins: avb { - groups = "avb_mdio", "avb_rgmii"; - function = "avb"; - }; - canfd0_pins: canfd0 { groups = "canfd0_data_a"; function = "canfd0"; }; + gether_pins: gether { + groups = "gether_mdio_a", "gether_rgmii", + "gether_txcrefclk", "gether_txcrefclk_mega"; + function = "gether"; + }; + i2c0_pins: i2c0 { groups = "i2c0"; function = "i2c0"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-ficus.dts b/arch/arm64/boot/dts/rockchip/rk3399-ficus.dts index 8978d924eb83e..85cf0b6bdda9e 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-ficus.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-ficus.dts @@ -75,18 +75,6 @@ regulator-always-on; vin-supply = <&vcc_sys>; }; - - vdd_log: vdd-log { - compatible = "pwm-regulator"; - pwms = <&pwm2 0 25000 0>; - regulator-name = "vdd_log"; - regulator-min-microvolt = <800000>; - regulator-max-microvolt = <1400000>; - regulator-always-on; - regulator-boot-on; - vin-supply = <&vcc_sys>; - }; - }; &cpu_l0 { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts index e0d64f862322e..8ce4a79d9360f 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts @@ -153,7 +153,7 @@ }; &pcie0 { - ep-gpios = <&gpio4 RK_PC6 GPIO_ACTIVE_LOW>; + ep-gpios = <&gpio4 RK_PC6 GPIO_ACTIVE_HIGH>; num-lanes = <4>; pinctrl-names = "default"; pinctrl-0 = <&pcie_clkreqn_cpm>; diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index e3fdb0fd6f700..d51944ff9f91d 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig @@ -119,10 +119,4 @@ config CRYPTO_AES_ARM64_BS select CRYPTO_AES_ARM64 select CRYPTO_SIMD -config CRYPTO_SPECK_NEON - tristate "NEON accelerated Speck cipher algorithms" - depends on KERNEL_MODE_NEON - select CRYPTO_BLKCIPHER - select CRYPTO_SPECK - endif diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index bcafd016618ea..7bc4bda6d9c63 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile @@ -56,9 +56,6 @@ sha512-arm64-y := sha512-glue.o sha512-core.o obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o -obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o -speck-neon-y := speck-neon-core.o speck-neon-glue.o - obj-$(CONFIG_CRYPTO_AES_ARM64) += aes-arm64.o aes-arm64-y := aes-cipher-core.o aes-cipher-glue.o diff --git a/arch/arm64/crypto/speck-neon-core.S b/arch/arm64/crypto/speck-neon-core.S deleted file mode 100644 index b14463438b096..0000000000000 --- a/arch/arm64/crypto/speck-neon-core.S +++ /dev/null @@ -1,352 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * ARM64 NEON-accelerated implementation of Speck128-XTS and Speck64-XTS - * - * Copyright (c) 2018 Google, Inc - * - * Author: Eric Biggers - */ - -#include - - .text - - // arguments - ROUND_KEYS .req x0 // const {u64,u32} *round_keys - NROUNDS .req w1 // int nrounds - NROUNDS_X .req x1 - DST .req x2 // void *dst - SRC .req x3 // const void *src - NBYTES .req w4 // unsigned int nbytes - TWEAK .req x5 // void *tweak - - // registers which hold the data being encrypted/decrypted - // (underscores avoid a naming collision with ARM64 registers x0-x3) - X_0 .req v0 - Y_0 .req v1 - X_1 .req v2 - Y_1 .req v3 - X_2 .req v4 - Y_2 .req v5 - X_3 .req v6 - Y_3 .req v7 - - // the round key, duplicated in all lanes - ROUND_KEY .req v8 - - // index vector for tbl-based 8-bit rotates - ROTATE_TABLE .req v9 - ROTATE_TABLE_Q .req q9 - - // temporary registers - TMP0 .req v10 - TMP1 .req v11 - TMP2 .req v12 - TMP3 .req v13 - - // multiplication table for updating XTS tweaks - GFMUL_TABLE .req v14 - GFMUL_TABLE_Q .req q14 - - // next XTS tweak value(s) - TWEAKV_NEXT .req v15 - - // XTS tweaks for the blocks currently being encrypted/decrypted - TWEAKV0 .req v16 - TWEAKV1 .req v17 - TWEAKV2 .req v18 - TWEAKV3 .req v19 - TWEAKV4 .req v20 - TWEAKV5 .req v21 - TWEAKV6 .req v22 - TWEAKV7 .req v23 - - .align 4 -.Lror64_8_table: - .octa 0x080f0e0d0c0b0a090007060504030201 -.Lror32_8_table: - .octa 0x0c0f0e0d080b0a090407060500030201 -.Lrol64_8_table: - .octa 0x0e0d0c0b0a09080f0605040302010007 -.Lrol32_8_table: - .octa 0x0e0d0c0f0a09080b0605040702010003 -.Lgf128mul_table: - .octa 0x00000000000000870000000000000001 -.Lgf64mul_table: - .octa 0x0000000000000000000000002d361b00 - -/* - * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time - * - * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for - * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes - * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64. - * 'lanes' is the lane specifier: "2d" for Speck128 or "4s" for Speck64. - */ -.macro _speck_round_128bytes n, lanes - - // x = ror(x, 8) - tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b - tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b - tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b - tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b - - // x += y - add X_0.\lanes, X_0.\lanes, Y_0.\lanes - add X_1.\lanes, X_1.\lanes, Y_1.\lanes - add X_2.\lanes, X_2.\lanes, Y_2.\lanes - add X_3.\lanes, X_3.\lanes, Y_3.\lanes - - // x ^= k - eor X_0.16b, X_0.16b, ROUND_KEY.16b - eor X_1.16b, X_1.16b, ROUND_KEY.16b - eor X_2.16b, X_2.16b, ROUND_KEY.16b - eor X_3.16b, X_3.16b, ROUND_KEY.16b - - // y = rol(y, 3) - shl TMP0.\lanes, Y_0.\lanes, #3 - shl TMP1.\lanes, Y_1.\lanes, #3 - shl TMP2.\lanes, Y_2.\lanes, #3 - shl TMP3.\lanes, Y_3.\lanes, #3 - sri TMP0.\lanes, Y_0.\lanes, #(\n - 3) - sri TMP1.\lanes, Y_1.\lanes, #(\n - 3) - sri TMP2.\lanes, Y_2.\lanes, #(\n - 3) - sri TMP3.\lanes, Y_3.\lanes, #(\n - 3) - - // y ^= x - eor Y_0.16b, TMP0.16b, X_0.16b - eor Y_1.16b, TMP1.16b, X_1.16b - eor Y_2.16b, TMP2.16b, X_2.16b - eor Y_3.16b, TMP3.16b, X_3.16b -.endm - -/* - * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time - * - * This is the inverse of _speck_round_128bytes(). - */ -.macro _speck_unround_128bytes n, lanes - - // y ^= x - eor TMP0.16b, Y_0.16b, X_0.16b - eor TMP1.16b, Y_1.16b, X_1.16b - eor TMP2.16b, Y_2.16b, X_2.16b - eor TMP3.16b, Y_3.16b, X_3.16b - - // y = ror(y, 3) - ushr Y_0.\lanes, TMP0.\lanes, #3 - ushr Y_1.\lanes, TMP1.\lanes, #3 - ushr Y_2.\lanes, TMP2.\lanes, #3 - ushr Y_3.\lanes, TMP3.\lanes, #3 - sli Y_0.\lanes, TMP0.\lanes, #(\n - 3) - sli Y_1.\lanes, TMP1.\lanes, #(\n - 3) - sli Y_2.\lanes, TMP2.\lanes, #(\n - 3) - sli Y_3.\lanes, TMP3.\lanes, #(\n - 3) - - // x ^= k - eor X_0.16b, X_0.16b, ROUND_KEY.16b - eor X_1.16b, X_1.16b, ROUND_KEY.16b - eor X_2.16b, X_2.16b, ROUND_KEY.16b - eor X_3.16b, X_3.16b, ROUND_KEY.16b - - // x -= y - sub X_0.\lanes, X_0.\lanes, Y_0.\lanes - sub X_1.\lanes, X_1.\lanes, Y_1.\lanes - sub X_2.\lanes, X_2.\lanes, Y_2.\lanes - sub X_3.\lanes, X_3.\lanes, Y_3.\lanes - - // x = rol(x, 8) - tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b - tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b - tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b - tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b -.endm - -.macro _next_xts_tweak next, cur, tmp, n -.if \n == 64 - /* - * Calculate the next tweak by multiplying the current one by x, - * modulo p(x) = x^128 + x^7 + x^2 + x + 1. - */ - sshr \tmp\().2d, \cur\().2d, #63 - and \tmp\().16b, \tmp\().16b, GFMUL_TABLE.16b - shl \next\().2d, \cur\().2d, #1 - ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8 - eor \next\().16b, \next\().16b, \tmp\().16b -.else - /* - * Calculate the next two tweaks by multiplying the current ones by x^2, - * modulo p(x) = x^64 + x^4 + x^3 + x + 1. - */ - ushr \tmp\().2d, \cur\().2d, #62 - shl \next\().2d, \cur\().2d, #2 - tbl \tmp\().16b, {GFMUL_TABLE.16b}, \tmp\().16b - eor \next\().16b, \next\().16b, \tmp\().16b -.endif -.endm - -/* - * _speck_xts_crypt() - Speck-XTS encryption/decryption - * - * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer - * using Speck-XTS, specifically the variant with a block size of '2n' and round - * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and - * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a - * nonzero multiple of 128. - */ -.macro _speck_xts_crypt n, lanes, decrypting - - /* - * If decrypting, modify the ROUND_KEYS parameter to point to the last - * round key rather than the first, since for decryption the round keys - * are used in reverse order. - */ -.if \decrypting - mov NROUNDS, NROUNDS /* zero the high 32 bits */ -.if \n == 64 - add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #3 - sub ROUND_KEYS, ROUND_KEYS, #8 -.else - add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #2 - sub ROUND_KEYS, ROUND_KEYS, #4 -.endif -.endif - - // Load the index vector for tbl-based 8-bit rotates -.if \decrypting - ldr ROTATE_TABLE_Q, .Lrol\n\()_8_table -.else - ldr ROTATE_TABLE_Q, .Lror\n\()_8_table -.endif - - // One-time XTS preparation -.if \n == 64 - // Load first tweak - ld1 {TWEAKV0.16b}, [TWEAK] - - // Load GF(2^128) multiplication table - ldr GFMUL_TABLE_Q, .Lgf128mul_table -.else - // Load first tweak - ld1 {TWEAKV0.8b}, [TWEAK] - - // Load GF(2^64) multiplication table - ldr GFMUL_TABLE_Q, .Lgf64mul_table - - // Calculate second tweak, packing it together with the first - ushr TMP0.2d, TWEAKV0.2d, #63 - shl TMP1.2d, TWEAKV0.2d, #1 - tbl TMP0.8b, {GFMUL_TABLE.16b}, TMP0.8b - eor TMP0.8b, TMP0.8b, TMP1.8b - mov TWEAKV0.d[1], TMP0.d[0] -.endif - -.Lnext_128bytes_\@: - - // Calculate XTS tweaks for next 128 bytes - _next_xts_tweak TWEAKV1, TWEAKV0, TMP0, \n - _next_xts_tweak TWEAKV2, TWEAKV1, TMP0, \n - _next_xts_tweak TWEAKV3, TWEAKV2, TMP0, \n - _next_xts_tweak TWEAKV4, TWEAKV3, TMP0, \n - _next_xts_tweak TWEAKV5, TWEAKV4, TMP0, \n - _next_xts_tweak TWEAKV6, TWEAKV5, TMP0, \n - _next_xts_tweak TWEAKV7, TWEAKV6, TMP0, \n - _next_xts_tweak TWEAKV_NEXT, TWEAKV7, TMP0, \n - - // Load the next source blocks into {X,Y}[0-3] - ld1 {X_0.16b-Y_1.16b}, [SRC], #64 - ld1 {X_2.16b-Y_3.16b}, [SRC], #64 - - // XOR the source blocks with their XTS tweaks - eor TMP0.16b, X_0.16b, TWEAKV0.16b - eor Y_0.16b, Y_0.16b, TWEAKV1.16b - eor TMP1.16b, X_1.16b, TWEAKV2.16b - eor Y_1.16b, Y_1.16b, TWEAKV3.16b - eor TMP2.16b, X_2.16b, TWEAKV4.16b - eor Y_2.16b, Y_2.16b, TWEAKV5.16b - eor TMP3.16b, X_3.16b, TWEAKV6.16b - eor Y_3.16b, Y_3.16b, TWEAKV7.16b - - /* - * De-interleave the 'x' and 'y' elements of each block, i.e. make it so - * that the X[0-3] registers contain only the second halves of blocks, - * and the Y[0-3] registers contain only the first halves of blocks. - * (Speck uses the order (y, x) rather than the more intuitive (x, y).) - */ - uzp2 X_0.\lanes, TMP0.\lanes, Y_0.\lanes - uzp1 Y_0.\lanes, TMP0.\lanes, Y_0.\lanes - uzp2 X_1.\lanes, TMP1.\lanes, Y_1.\lanes - uzp1 Y_1.\lanes, TMP1.\lanes, Y_1.\lanes - uzp2 X_2.\lanes, TMP2.\lanes, Y_2.\lanes - uzp1 Y_2.\lanes, TMP2.\lanes, Y_2.\lanes - uzp2 X_3.\lanes, TMP3.\lanes, Y_3.\lanes - uzp1 Y_3.\lanes, TMP3.\lanes, Y_3.\lanes - - // Do the cipher rounds - mov x6, ROUND_KEYS - mov w7, NROUNDS -.Lnext_round_\@: -.if \decrypting - ld1r {ROUND_KEY.\lanes}, [x6] - sub x6, x6, #( \n / 8 ) - _speck_unround_128bytes \n, \lanes -.else - ld1r {ROUND_KEY.\lanes}, [x6], #( \n / 8 ) - _speck_round_128bytes \n, \lanes -.endif - subs w7, w7, #1 - bne .Lnext_round_\@ - - // Re-interleave the 'x' and 'y' elements of each block - zip1 TMP0.\lanes, Y_0.\lanes, X_0.\lanes - zip2 Y_0.\lanes, Y_0.\lanes, X_0.\lanes - zip1 TMP1.\lanes, Y_1.\lanes, X_1.\lanes - zip2 Y_1.\lanes, Y_1.\lanes, X_1.\lanes - zip1 TMP2.\lanes, Y_2.\lanes, X_2.\lanes - zip2 Y_2.\lanes, Y_2.\lanes, X_2.\lanes - zip1 TMP3.\lanes, Y_3.\lanes, X_3.\lanes - zip2 Y_3.\lanes, Y_3.\lanes, X_3.\lanes - - // XOR the encrypted/decrypted blocks with the tweaks calculated earlier - eor X_0.16b, TMP0.16b, TWEAKV0.16b - eor Y_0.16b, Y_0.16b, TWEAKV1.16b - eor X_1.16b, TMP1.16b, TWEAKV2.16b - eor Y_1.16b, Y_1.16b, TWEAKV3.16b - eor X_2.16b, TMP2.16b, TWEAKV4.16b - eor Y_2.16b, Y_2.16b, TWEAKV5.16b - eor X_3.16b, TMP3.16b, TWEAKV6.16b - eor Y_3.16b, Y_3.16b, TWEAKV7.16b - mov TWEAKV0.16b, TWEAKV_NEXT.16b - - // Store the ciphertext in the destination buffer - st1 {X_0.16b-Y_1.16b}, [DST], #64 - st1 {X_2.16b-Y_3.16b}, [DST], #64 - - // Continue if there are more 128-byte chunks remaining - subs NBYTES, NBYTES, #128 - bne .Lnext_128bytes_\@ - - // Store the next tweak and return -.if \n == 64 - st1 {TWEAKV_NEXT.16b}, [TWEAK] -.else - st1 {TWEAKV_NEXT.8b}, [TWEAK] -.endif - ret -.endm - -ENTRY(speck128_xts_encrypt_neon) - _speck_xts_crypt n=64, lanes=2d, decrypting=0 -ENDPROC(speck128_xts_encrypt_neon) - -ENTRY(speck128_xts_decrypt_neon) - _speck_xts_crypt n=64, lanes=2d, decrypting=1 -ENDPROC(speck128_xts_decrypt_neon) - -ENTRY(speck64_xts_encrypt_neon) - _speck_xts_crypt n=32, lanes=4s, decrypting=0 -ENDPROC(speck64_xts_encrypt_neon) - -ENTRY(speck64_xts_decrypt_neon) - _speck_xts_crypt n=32, lanes=4s, decrypting=1 -ENDPROC(speck64_xts_decrypt_neon) diff --git a/arch/arm64/crypto/speck-neon-glue.c b/arch/arm64/crypto/speck-neon-glue.c deleted file mode 100644 index 6e233aeb4ff48..0000000000000 --- a/arch/arm64/crypto/speck-neon-glue.c +++ /dev/null @@ -1,282 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS - * (64-bit version; based on the 32-bit version) - * - * Copyright (c) 2018 Google, Inc - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* The assembly functions only handle multiples of 128 bytes */ -#define SPECK_NEON_CHUNK_SIZE 128 - -/* Speck128 */ - -struct speck128_xts_tfm_ctx { - struct speck128_tfm_ctx main_key; - struct speck128_tfm_ctx tweak_key; -}; - -asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds, - void *dst, const void *src, - unsigned int nbytes, void *tweak); - -asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds, - void *dst, const void *src, - unsigned int nbytes, void *tweak); - -typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *, - u8 *, const u8 *); -typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *, - const void *, unsigned int, void *); - -static __always_inline int -__speck128_xts_crypt(struct skcipher_request *req, - speck128_crypt_one_t crypt_one, - speck128_xts_crypt_many_t crypt_many) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - const struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - le128 tweak; - int err; - - err = skcipher_walk_virt(&walk, req, true); - - crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv); - - while (walk.nbytes > 0) { - unsigned int nbytes = walk.nbytes; - u8 *dst = walk.dst.virt.addr; - const u8 *src = walk.src.virt.addr; - - if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) { - unsigned int count; - - count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE); - kernel_neon_begin(); - (*crypt_many)(ctx->main_key.round_keys, - ctx->main_key.nrounds, - dst, src, count, &tweak); - kernel_neon_end(); - dst += count; - src += count; - nbytes -= count; - } - - /* Handle any remainder with generic code */ - while (nbytes >= sizeof(tweak)) { - le128_xor((le128 *)dst, (const le128 *)src, &tweak); - (*crypt_one)(&ctx->main_key, dst, dst); - le128_xor((le128 *)dst, (const le128 *)dst, &tweak); - gf128mul_x_ble(&tweak, &tweak); - - dst += sizeof(tweak); - src += sizeof(tweak); - nbytes -= sizeof(tweak); - } - err = skcipher_walk_done(&walk, nbytes); - } - - return err; -} - -static int speck128_xts_encrypt(struct skcipher_request *req) -{ - return __speck128_xts_crypt(req, crypto_speck128_encrypt, - speck128_xts_encrypt_neon); -} - -static int speck128_xts_decrypt(struct skcipher_request *req) -{ - return __speck128_xts_crypt(req, crypto_speck128_decrypt, - speck128_xts_decrypt_neon); -} - -static int speck128_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen) -{ - struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); - int err; - - err = xts_verify_key(tfm, key, keylen); - if (err) - return err; - - keylen /= 2; - - err = crypto_speck128_setkey(&ctx->main_key, key, keylen); - if (err) - return err; - - return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen); -} - -/* Speck64 */ - -struct speck64_xts_tfm_ctx { - struct speck64_tfm_ctx main_key; - struct speck64_tfm_ctx tweak_key; -}; - -asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds, - void *dst, const void *src, - unsigned int nbytes, void *tweak); - -asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds, - void *dst, const void *src, - unsigned int nbytes, void *tweak); - -typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *, - u8 *, const u8 *); -typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *, - const void *, unsigned int, void *); - -static __always_inline int -__speck64_xts_crypt(struct skcipher_request *req, speck64_crypt_one_t crypt_one, - speck64_xts_crypt_many_t crypt_many) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - const struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - __le64 tweak; - int err; - - err = skcipher_walk_virt(&walk, req, true); - - crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv); - - while (walk.nbytes > 0) { - unsigned int nbytes = walk.nbytes; - u8 *dst = walk.dst.virt.addr; - const u8 *src = walk.src.virt.addr; - - if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) { - unsigned int count; - - count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE); - kernel_neon_begin(); - (*crypt_many)(ctx->main_key.round_keys, - ctx->main_key.nrounds, - dst, src, count, &tweak); - kernel_neon_end(); - dst += count; - src += count; - nbytes -= count; - } - - /* Handle any remainder with generic code */ - while (nbytes >= sizeof(tweak)) { - *(__le64 *)dst = *(__le64 *)src ^ tweak; - (*crypt_one)(&ctx->main_key, dst, dst); - *(__le64 *)dst ^= tweak; - tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^ - ((tweak & cpu_to_le64(1ULL << 63)) ? - 0x1B : 0)); - dst += sizeof(tweak); - src += sizeof(tweak); - nbytes -= sizeof(tweak); - } - err = skcipher_walk_done(&walk, nbytes); - } - - return err; -} - -static int speck64_xts_encrypt(struct skcipher_request *req) -{ - return __speck64_xts_crypt(req, crypto_speck64_encrypt, - speck64_xts_encrypt_neon); -} - -static int speck64_xts_decrypt(struct skcipher_request *req) -{ - return __speck64_xts_crypt(req, crypto_speck64_decrypt, - speck64_xts_decrypt_neon); -} - -static int speck64_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen) -{ - struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); - int err; - - err = xts_verify_key(tfm, key, keylen); - if (err) - return err; - - keylen /= 2; - - err = crypto_speck64_setkey(&ctx->main_key, key, keylen); - if (err) - return err; - - return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen); -} - -static struct skcipher_alg speck_algs[] = { - { - .base.cra_name = "xts(speck128)", - .base.cra_driver_name = "xts-speck128-neon", - .base.cra_priority = 300, - .base.cra_blocksize = SPECK128_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx), - .base.cra_alignmask = 7, - .base.cra_module = THIS_MODULE, - .min_keysize = 2 * SPECK128_128_KEY_SIZE, - .max_keysize = 2 * SPECK128_256_KEY_SIZE, - .ivsize = SPECK128_BLOCK_SIZE, - .walksize = SPECK_NEON_CHUNK_SIZE, - .setkey = speck128_xts_setkey, - .encrypt = speck128_xts_encrypt, - .decrypt = speck128_xts_decrypt, - }, { - .base.cra_name = "xts(speck64)", - .base.cra_driver_name = "xts-speck64-neon", - .base.cra_priority = 300, - .base.cra_blocksize = SPECK64_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx), - .base.cra_alignmask = 7, - .base.cra_module = THIS_MODULE, - .min_keysize = 2 * SPECK64_96_KEY_SIZE, - .max_keysize = 2 * SPECK64_128_KEY_SIZE, - .ivsize = SPECK64_BLOCK_SIZE, - .walksize = SPECK_NEON_CHUNK_SIZE, - .setkey = speck64_xts_setkey, - .encrypt = speck64_xts_encrypt, - .decrypt = speck64_xts_decrypt, - } -}; - -static int __init speck_neon_module_init(void) -{ - if (!(elf_hwcap & HWCAP_ASIMD)) - return -ENODEV; - return crypto_register_skciphers(speck_algs, ARRAY_SIZE(speck_algs)); -} - -static void __exit speck_neon_module_exit(void) -{ - crypto_unregister_skciphers(speck_algs, ARRAY_SIZE(speck_algs)); -} - -module_init(speck_neon_module_init); -module_exit(speck_neon_module_exit); - -MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)"); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Eric Biggers "); -MODULE_ALIAS_CRYPTO("xts(speck128)"); -MODULE_ALIAS_CRYPTO("xts-speck128-neon"); -MODULE_ALIAS_CRYPTO("xts(speck64)"); -MODULE_ALIAS_CRYPTO("xts-speck64-neon"); diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 0bcc98dbba565..f90f5d83b228a 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -378,27 +378,33 @@ alternative_endif * size: size of the region * Corrupts: kaddr, size, tmp1, tmp2 */ + .macro __dcache_op_workaround_clean_cache, op, kaddr +alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE + dc \op, \kaddr +alternative_else + dc civac, \kaddr +alternative_endif + .endm + .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2 dcache_line_size \tmp1, \tmp2 add \size, \kaddr, \size sub \tmp2, \tmp1, #1 bic \kaddr, \kaddr, \tmp2 9998: - .if (\op == cvau || \op == cvac) -alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE - dc \op, \kaddr -alternative_else - dc civac, \kaddr -alternative_endif - .elseif (\op == cvap) -alternative_if ARM64_HAS_DCPOP - sys 3, c7, c12, 1, \kaddr // dc cvap -alternative_else - dc cvac, \kaddr -alternative_endif + .ifc \op, cvau + __dcache_op_workaround_clean_cache \op, \kaddr + .else + .ifc \op, cvac + __dcache_op_workaround_clean_cache \op, \kaddr + .else + .ifc \op, cvap + sys 3, c7, c12, 1, \kaddr // dc cvap .else dc \op, \kaddr .endif + .endif + .endif add \kaddr, \kaddr, \tmp1 cmp \kaddr, \size b.lo 9998b diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h index caa955f10e195..fac54fb050d00 100644 --- a/arch/arm64/include/asm/ftrace.h +++ b/arch/arm64/include/asm/ftrace.h @@ -56,6 +56,19 @@ static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) { return is_compat_task(); } + +#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME + +static inline bool arch_syscall_match_sym_name(const char *sym, + const char *name) +{ + /* + * Since all syscall functions have __arm64_ prefix, we must skip it. + * However, as we described above, we decided to ignore compat + * syscalls, so we don't care about __arm64_compat_ prefix here. + */ + return !strcmp(sym + 8, name); +} #endif /* ifndef __ASSEMBLY__ */ #endif /* __ASM_FTRACE_H */ diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index aa45df752a166..8b284cbf8162f 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -24,6 +24,8 @@ /* Hyp Configuration Register (HCR) bits */ #define HCR_FWB (UL(1) << 46) +#define HCR_API (UL(1) << 41) +#define HCR_APK (UL(1) << 40) #define HCR_TEA (UL(1) << 37) #define HCR_TERR (UL(1) << 36) #define HCR_TLOR (UL(1) << 35) @@ -87,6 +89,7 @@ HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \ HCR_FMO | HCR_IMO) #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF) +#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK) #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) /* TCR_EL2 Registers bits */ @@ -104,7 +107,7 @@ TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK) /* VTCR_EL2 Registers bits */ -#define VTCR_EL2_RES1 (1 << 31) +#define VTCR_EL2_RES1 (1U << 31) #define VTCR_EL2_HD (1 << 22) #define VTCR_EL2_HA (1 << 21) #define VTCR_EL2_PS_MASK TCR_EL2_PS_MASK diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index b96442960aead..56562ff01076d 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -76,12 +76,17 @@ /* * KASAN requires 1/8th of the kernel virtual address space for the shadow * region. KASAN can bloat the stack significantly, so double the (minimum) - * stack size when KASAN is in use. + * stack size when KASAN is in use, and then double it again if KASAN_EXTRA is + * on. */ #ifdef CONFIG_KASAN #define KASAN_SHADOW_SCALE_SHIFT 3 #define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT)) +#ifdef CONFIG_KASAN_EXTRA +#define KASAN_THREAD_SHIFT 2 +#else #define KASAN_THREAD_SHIFT 1 +#endif /* CONFIG_KASAN_EXTRA */ #else #define KASAN_SHADOW_SIZE (0) #define KASAN_THREAD_SHIFT 0 diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 9234013e759e5..21a81b59a0ccd 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h @@ -96,6 +96,7 @@ static inline unsigned long __percpu_##op(void *ptr, \ : [val] "Ir" (val)); \ break; \ default: \ + ret = 0; \ BUILD_BUG(); \ } \ \ @@ -125,6 +126,7 @@ static inline unsigned long __percpu_read(void *ptr, int size) ret = READ_ONCE(*(u64 *)ptr); break; default: + ret = 0; BUILD_BUG(); } @@ -194,6 +196,7 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val, : [val] "r" (val)); break; default: + ret = 0; BUILD_BUG(); } diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index e0d0f5b856e74..d52051879ffe2 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -40,8 +40,9 @@ * The following SVCs are ARM private. */ #define __ARM_NR_COMPAT_BASE 0x0f0000 -#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) -#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) +#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE + 2) +#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5) +#define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800) #define __NR_compat_syscalls 399 #endif diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index e238b7932096d..93f69d82225de 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -848,15 +848,29 @@ static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unus } static bool has_cache_idc(const struct arm64_cpu_capabilities *entry, - int __unused) + int scope) { - return read_sanitised_ftr_reg(SYS_CTR_EL0) & BIT(CTR_IDC_SHIFT); + u64 ctr; + + if (scope == SCOPE_SYSTEM) + ctr = arm64_ftr_reg_ctrel0.sys_val; + else + ctr = read_cpuid_cachetype(); + + return ctr & BIT(CTR_IDC_SHIFT); } static bool has_cache_dic(const struct arm64_cpu_capabilities *entry, - int __unused) + int scope) { - return read_sanitised_ftr_reg(SYS_CTR_EL0) & BIT(CTR_DIC_SHIFT); + u64 ctr; + + if (scope == SCOPE_SYSTEM) + ctr = arm64_ftr_reg_ctrel0.sys_val; + else + ctr = read_cpuid_cachetype(); + + return ctr & BIT(CTR_DIC_SHIFT); } #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 09dbea221a274..8556876c91096 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -589,7 +589,7 @@ el1_undef: inherit_daif pstate=x23, tmp=x2 mov x0, sp bl do_undefinstr - ASM_BUG() + kernel_exit 1 el1_dbg: /* * Debug exception handling diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index 50986e388d2b2..57e962290df3a 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -216,8 +216,6 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, { unsigned long return_hooker = (unsigned long)&return_to_handler; unsigned long old; - struct ftrace_graph_ent trace; - int err; if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; @@ -229,18 +227,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, */ old = *parent; - trace.func = self_addr; - trace.depth = current->curr_ret_stack + 1; - - /* Only trace if the calling function expects to */ - if (!ftrace_graph_entry(&trace)) - return; - - err = ftrace_push_return_trace(old, self_addr, &trace.depth, - frame_pointer, NULL); - if (err == -EBUSY) - return; - else + if (!function_graph_enter(old, self_addr, frame_pointer, NULL)) *parent = return_hooker; } diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index b0853069702f7..651a06b1980f4 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -494,10 +494,9 @@ ENTRY(el2_setup) #endif /* Hyp configuration. */ - mov x0, #HCR_RW // 64-bit EL1 + mov_q x0, HCR_HOST_NVHE_FLAGS cbz x2, set_hcr - orr x0, x0, #HCR_TGE // Enable Host Extensions - orr x0, x0, #HCR_E2H + mov_q x0, HCR_HOST_VHE_FLAGS set_hcr: msr hcr_el2, x0 isb diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 6b2686d54411f..29cdc99688f33 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -214,7 +214,7 @@ static int create_safe_exec_page(void *src_start, size_t length, } memcpy((void *)dst, src_start, length); - flush_icache_range(dst, dst + length); + __flush_icache_range(dst, dst + length); pgdp = pgd_offset_raw(allocator(mask), dst_addr); if (pgd_none(READ_ONCE(*pgdp))) { diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h index a820ed07fb800..8da289dc843a0 100644 --- a/arch/arm64/kernel/image.h +++ b/arch/arm64/kernel/image.h @@ -75,16 +75,6 @@ __efistub_stext_offset = stext - _text; -/* - * Prevent the symbol aliases below from being emitted into the kallsyms - * table, by forcing them to be absolute symbols (which are conveniently - * ignored by scripts/kallsyms) rather than section relative symbols. - * The distinction is only relevant for partial linking, and only for symbols - * that are defined within a section declaration (which is not the case for - * the definitions below) so the resulting values will be identical. - */ -#define KALLSYMS_HIDE(sym) ABSOLUTE(sym) - /* * The EFI stub has its own symbol namespace prefixed by __efistub_, to * isolate it from the kernel proper. The following symbols are legally @@ -94,28 +84,28 @@ __efistub_stext_offset = stext - _text; * linked at. The routines below are all implemented in assembler in a * position independent manner */ -__efistub_memcmp = KALLSYMS_HIDE(__pi_memcmp); -__efistub_memchr = KALLSYMS_HIDE(__pi_memchr); -__efistub_memcpy = KALLSYMS_HIDE(__pi_memcpy); -__efistub_memmove = KALLSYMS_HIDE(__pi_memmove); -__efistub_memset = KALLSYMS_HIDE(__pi_memset); -__efistub_strlen = KALLSYMS_HIDE(__pi_strlen); -__efistub_strnlen = KALLSYMS_HIDE(__pi_strnlen); -__efistub_strcmp = KALLSYMS_HIDE(__pi_strcmp); -__efistub_strncmp = KALLSYMS_HIDE(__pi_strncmp); -__efistub_strrchr = KALLSYMS_HIDE(__pi_strrchr); -__efistub___flush_dcache_area = KALLSYMS_HIDE(__pi___flush_dcache_area); +__efistub_memcmp = __pi_memcmp; +__efistub_memchr = __pi_memchr; +__efistub_memcpy = __pi_memcpy; +__efistub_memmove = __pi_memmove; +__efistub_memset = __pi_memset; +__efistub_strlen = __pi_strlen; +__efistub_strnlen = __pi_strnlen; +__efistub_strcmp = __pi_strcmp; +__efistub_strncmp = __pi_strncmp; +__efistub_strrchr = __pi_strrchr; +__efistub___flush_dcache_area = __pi___flush_dcache_area; #ifdef CONFIG_KASAN -__efistub___memcpy = KALLSYMS_HIDE(__pi_memcpy); -__efistub___memmove = KALLSYMS_HIDE(__pi_memmove); -__efistub___memset = KALLSYMS_HIDE(__pi_memset); +__efistub___memcpy = __pi_memcpy; +__efistub___memmove = __pi_memmove; +__efistub___memset = __pi_memset; #endif -__efistub__text = KALLSYMS_HIDE(_text); -__efistub__end = KALLSYMS_HIDE(_end); -__efistub__edata = KALLSYMS_HIDE(_edata); -__efistub_screen_info = KALLSYMS_HIDE(screen_info); +__efistub__text = _text; +__efistub__end = _end; +__efistub__edata = _edata; +__efistub_screen_info = screen_info; #endif diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index f0e6ab8abe9c9..ba6b41790fcdf 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -14,6 +14,7 @@ #include #include +#include #include #include #include @@ -43,7 +44,7 @@ static __init u64 get_kaslr_seed(void *fdt) return ret; } -static __init const u8 *get_cmdline(void *fdt) +static __init const u8 *kaslr_get_cmdline(void *fdt) { static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE; @@ -109,7 +110,7 @@ u64 __init kaslr_early_init(u64 dt_phys) * Check if 'nokaslr' appears on the command line, and * return 0 if that is the case. */ - cmdline = get_cmdline(fdt); + cmdline = kaslr_get_cmdline(fdt); str = strstr(cmdline, "nokaslr"); if (str == cmdline || (str > cmdline && *(str - 1) == ' ')) return 0; @@ -169,5 +170,8 @@ u64 __init kaslr_early_init(u64 dt_phys) module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; module_alloc_base &= PAGE_MASK; + __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base)); + __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed)); + return offset; } diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index e213f8e867f65..8a91ac067d44c 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -1274,6 +1274,7 @@ static struct platform_driver armv8_pmu_driver = { .driver = { .name = ARMV8_PMU_PDEV_NAME, .of_match_table = armv8_pmu_of_device_ids, + .suppress_bind_attrs = true, }, .probe = armv8_pmu_device_probe, }; diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index e78c3ef04d95d..b5a367d4bba6a 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -23,7 +23,9 @@ #include #include #include +#include #include +#include #include #include #include @@ -42,10 +44,21 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); static void __kprobes post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); +static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode) +{ + void *addrs[1]; + u32 insns[1]; + + addrs[0] = addr; + insns[0] = opcode; + + return aarch64_insn_patch_text(addrs, insns, 1); +} + static void __kprobes arch_prepare_ss_slot(struct kprobe *p) { /* prepare insn slot */ - p->ainsn.api.insn[0] = cpu_to_le32(p->opcode); + patch_text(p->ainsn.api.insn, p->opcode); flush_icache_range((uintptr_t) (p->ainsn.api.insn), (uintptr_t) (p->ainsn.api.insn) + @@ -118,15 +131,15 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) return 0; } -static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode) +void *alloc_insn_page(void) { - void *addrs[1]; - u32 insns[1]; + void *page; - addrs[0] = (void *)addr; - insns[0] = (u32)opcode; + page = vmalloc_exec(PAGE_SIZE); + if (page) + set_memory_ro((unsigned long)page, 1); - return aarch64_insn_patch_text(addrs, insns, 1); + return page; } /* arm kprobe: install breakpoint in text */ diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index a6109825eeb97..010212d35700e 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c @@ -66,12 +66,11 @@ do_compat_cache_op(unsigned long start, unsigned long end, int flags) /* * Handle all unrecognised system calls. */ -long compat_arm_syscall(struct pt_regs *regs) +long compat_arm_syscall(struct pt_regs *regs, int scno) { siginfo_t info; - unsigned int no = regs->regs[7]; - switch (no) { + switch (scno) { /* * Flush a region from virtual address 'r0' to virtual address 'r1' * _exclusive_. There is no alignment requirement on either address; @@ -102,12 +101,12 @@ long compat_arm_syscall(struct pt_regs *regs) default: /* - * Calls 9f00xx..9f07ff are defined to return -ENOSYS + * Calls 0xf0xxx..0xf07ff are defined to return -ENOSYS * if not implemented, rather than raising SIGILL. This * way the calling program can gracefully determine whether * a feature is supported. */ - if ((no & 0xffff) <= 0x7ff) + if (scno < __ARM_NR_COMPAT_END) return -ENOSYS; break; } @@ -119,6 +118,6 @@ long compat_arm_syscall(struct pt_regs *regs) info.si_addr = (void __user *)instruction_pointer(regs) - (compat_thumb_mode(regs) ? 2 : 4); - arm64_notify_die("Oops - bad compat syscall(2)", regs, &info, no); + arm64_notify_die("Oops - bad compat syscall(2)", regs, &info, scno); return 0; } diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c index 032d223128815..5610ac01c1ec0 100644 --- a/arch/arm64/kernel/syscall.c +++ b/arch/arm64/kernel/syscall.c @@ -13,16 +13,15 @@ #include #include -long compat_arm_syscall(struct pt_regs *regs); - +long compat_arm_syscall(struct pt_regs *regs, int scno); long sys_ni_syscall(void); -asmlinkage long do_ni_syscall(struct pt_regs *regs) +static long do_ni_syscall(struct pt_regs *regs, int scno) { #ifdef CONFIG_COMPAT long ret; if (is_compat_task()) { - ret = compat_arm_syscall(regs); + ret = compat_arm_syscall(regs, scno); if (ret != -ENOSYS) return ret; } @@ -47,7 +46,7 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno, syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)]; ret = __invoke_syscall(regs, syscall_fn); } else { - ret = do_ni_syscall(regs); + ret = do_ni_syscall(regs, scno); } regs->regs[0] = ret; diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 039e9ff379cc4..b9da093e03417 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -310,10 +310,12 @@ static int call_undef_hook(struct pt_regs *regs) int (*fn)(struct pt_regs *regs, u32 instr) = NULL; void __user *pc = (void __user *)instruction_pointer(regs); - if (!user_mode(regs)) - return 1; - - if (compat_thumb_mode(regs)) { + if (!user_mode(regs)) { + __le32 instr_le; + if (probe_kernel_address((__force __le32 *)pc, instr_le)) + goto exit; + instr = le32_to_cpu(instr_le); + } else if (compat_thumb_mode(regs)) { /* 16-bit Thumb instruction */ __le16 instr_le; if (get_user(instr_le, (__le16 __user *)pc)) @@ -407,6 +409,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) return; force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc); + BUG_ON(!user_mode(regs)); } void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 605d1b60469c2..74e469f8a8507 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -99,7 +99,8 @@ SECTIONS *(.discard) *(.discard.*) *(.interp .dynamic) - *(.dynsym .dynstr .hash) + *(.dynsym .dynstr .hash .gnu.hash) + *(.eh_frame) } . = KIMAGE_VADDR + TEXT_OFFSET; @@ -176,12 +177,12 @@ SECTIONS PERCPU_SECTION(L1_CACHE_BYTES) - .rela : ALIGN(8) { + .rela.dyn : ALIGN(8) { *(.rela .rela*) } - __rela_offset = ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR); - __rela_size = SIZEOF(.rela); + __rela_offset = ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR); + __rela_size = SIZEOF(.rela.dyn); . = ALIGN(SEGMENT_ALIGN); __initdata_end = .; diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index ca46153d79154..a1c32c1f2267f 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -157,7 +157,7 @@ static void __hyp_text __deactivate_traps_nvhe(void) mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT; write_sysreg(mdcr_el2, mdcr_el2); - write_sysreg(HCR_RW, hcr_el2); + write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2); write_sysreg(CPTR_EL2_DEFAULT, cptr_el2); } diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c index 131c7772703c2..c041eab3dce0f 100644 --- a/arch/arm64/kvm/hyp/tlb.c +++ b/arch/arm64/kvm/hyp/tlb.c @@ -15,14 +15,19 @@ * along with this program. If not, see . */ +#include + #include #include #include -static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm) +static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm, + unsigned long *flags) { u64 val; + local_irq_save(*flags); + /* * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and * most TLB operations target EL2/EL0. In order to affect the @@ -37,7 +42,8 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm) isb(); } -static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm) +static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm, + unsigned long *flags) { write_sysreg(kvm->arch.vttbr, vttbr_el2); isb(); @@ -48,7 +54,8 @@ static hyp_alternate_select(__tlb_switch_to_guest, __tlb_switch_to_guest_vhe, ARM64_HAS_VIRT_HOST_EXTN); -static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm) +static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm, + unsigned long flags) { /* * We're done with the TLB operation, let's restore the host's @@ -56,9 +63,12 @@ static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm) */ write_sysreg(0, vttbr_el2); write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); + isb(); + local_irq_restore(flags); } -static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm) +static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm, + unsigned long flags) { write_sysreg(0, vttbr_el2); } @@ -70,11 +80,13 @@ static hyp_alternate_select(__tlb_switch_to_host, void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) { + unsigned long flags; + dsb(ishst); /* Switch to requested VMID */ kvm = kern_hyp_va(kvm); - __tlb_switch_to_guest()(kvm); + __tlb_switch_to_guest()(kvm, &flags); /* * We could do so much better if we had the VA as well. @@ -117,36 +129,39 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) if (!has_vhe() && icache_is_vpipt()) __flush_icache_all(); - __tlb_switch_to_host()(kvm); + __tlb_switch_to_host()(kvm, flags); } void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) { + unsigned long flags; + dsb(ishst); /* Switch to requested VMID */ kvm = kern_hyp_va(kvm); - __tlb_switch_to_guest()(kvm); + __tlb_switch_to_guest()(kvm, &flags); __tlbi(vmalls12e1is); dsb(ish); isb(); - __tlb_switch_to_host()(kvm); + __tlb_switch_to_host()(kvm, flags); } void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) { struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); + unsigned long flags; /* Switch to requested VMID */ - __tlb_switch_to_guest()(kvm); + __tlb_switch_to_guest()(kvm, &flags); __tlbi(vmalle1); dsb(nsh); isb(); - __tlb_switch_to_host()(kvm); + __tlb_switch_to_host()(kvm, flags); } void __hyp_text __kvm_flush_vm_context(void) diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index 68755fd70dcf4..5df2d611b77d9 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -12,7 +12,7 @@ lib-y := clear_user.o delay.o copy_from_user.o \ # when supported by the CPU. Result and argument registers are handled # correctly, based on the function prototype. lib-$(CONFIG_ARM64_LSE_ATOMICS) += atomic_ll_sc.o -CFLAGS_atomic_ll_sc.o := -fcall-used-x0 -ffixed-x1 -ffixed-x2 \ +CFLAGS_atomic_ll_sc.o := -ffixed-x1 -ffixed-x2 \ -ffixed-x3 -ffixed-x4 -ffixed-x5 -ffixed-x6 \ -ffixed-x7 -fcall-saved-x8 -fcall-saved-x9 \ -fcall-saved-x10 -fcall-saved-x11 -fcall-saved-x12 \ diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 0c22ede52f906..a194fd0e837fb 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -212,6 +212,9 @@ ENDPROC(__dma_clean_area) * - size - size in question */ ENTRY(__clean_dcache_area_pop) + alternative_if_not ARM64_HAS_DCPOP + b __clean_dcache_area_poc + alternative_else_nop_endif dcache_by_line_op cvap, sy, x0, x1, x2, x3 ret ENDPIPROC(__clean_dcache_area_pop) diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 072c51fb07d73..c389f2bef938e 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -587,9 +587,9 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, prot, __builtin_return_address(0)); if (addr) { - memset(addr, 0, size); if (!coherent) __dma_flush_area(page_to_virt(page), iosize); + memset(addr, 0, size); } else { iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs); dma_release_from_contiguous(dev, page, diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index 1d5483f6e457b..93a3c3c0238ce 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -657,7 +657,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index 52a0af127951f..e3d0efd6397d0 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -614,7 +614,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index b3103e51268a3..75ac0c76e8849 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -635,7 +635,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index fb7d651a4cabe..c6e492700188c 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -606,7 +606,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index 6b37f5537c390..b00d1c477432d 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -616,7 +616,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index c717bf8794492..85cac3770d898 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -638,7 +638,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index 226c994ce794f..b3a5d1e99d277 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -720,7 +720,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index b383327fd77a9..0ca22608453fd 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -606,7 +606,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index 9783d3deb9e9d..8e3d10d12d9ca 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -606,7 +606,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index a35d10ee10cb7..ff7e653ec7fac 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -629,7 +629,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 573bf922d4482..612cf46f6d0cb 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -607,7 +607,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index efb27a7fcc559..a6a7bb6dc3fd5 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -608,7 +608,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h index 6181e4134483c..fe3ddd73a0ccb 100644 --- a/arch/m68k/include/asm/pgtable_mm.h +++ b/arch/m68k/include/asm/pgtable_mm.h @@ -55,12 +55,12 @@ */ #ifdef CONFIG_SUN3 #define PTRS_PER_PTE 16 -#define __PAGETABLE_PMD_FOLDED +#define __PAGETABLE_PMD_FOLDED 1 #define PTRS_PER_PMD 1 #define PTRS_PER_PGD 2048 #elif defined(CONFIG_COLDFIRE) #define PTRS_PER_PTE 512 -#define __PAGETABLE_PMD_FOLDED +#define __PAGETABLE_PMD_FOLDED 1 #define PTRS_PER_PMD 1 #define PTRS_PER_PGD 1024 #else diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c index 5d3596c180f9f..de44899c0e617 100644 --- a/arch/m68k/kernel/setup_mm.c +++ b/arch/m68k/kernel/setup_mm.c @@ -165,8 +165,6 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record) be32_to_cpu(m->addr); m68k_memory[m68k_num_memory].size = be32_to_cpu(m->size); - memblock_add(m68k_memory[m68k_num_memory].addr, - m68k_memory[m68k_num_memory].size); m68k_num_memory++; } else pr_warn("%s: too many memory chunks\n", diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index 4e17ecb5928aa..2eb2b31fb16a6 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -228,6 +228,7 @@ void __init paging_init(void) min_addr = m68k_memory[0].addr; max_addr = min_addr + m68k_memory[0].size; + memblock_add(m68k_memory[0].addr, m68k_memory[0].size); for (i = 1; i < m68k_num_memory;) { if (m68k_memory[i].addr < min_addr) { printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n", @@ -238,6 +239,7 @@ void __init paging_init(void) (m68k_num_memory - i) * sizeof(struct m68k_mem_info)); continue; } + memblock_add(m68k_memory[i].addr, m68k_memory[i].size); addr = m68k_memory[i].addr + m68k_memory[i].size; if (addr > max_addr) max_addr = addr; diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h index 7b650ab14fa08..2ca598534cc7b 100644 --- a/arch/microblaze/include/asm/pgtable.h +++ b/arch/microblaze/include/asm/pgtable.h @@ -63,7 +63,7 @@ extern int mem_init_done; #include -#define __PAGETABLE_PMD_FOLDED +#define __PAGETABLE_PMD_FOLDED 1 #ifdef __KERNEL__ #ifndef __ASSEMBLY__ diff --git a/arch/microblaze/kernel/ftrace.c b/arch/microblaze/kernel/ftrace.c index d57563c58a26b..224eea40e1ee8 100644 --- a/arch/microblaze/kernel/ftrace.c +++ b/arch/microblaze/kernel/ftrace.c @@ -22,8 +22,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) { unsigned long old; - int faulted, err; - struct ftrace_graph_ent trace; + int faulted; unsigned long return_hooker = (unsigned long) &return_to_handler; @@ -63,18 +62,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) return; } - err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0, NULL); - if (err == -EBUSY) { + if (function_graph_enter(old, self_addr, 0, NULL)) *parent = old; - return; - } - - trace.func = self_addr; - /* Only trace if the calling function expects to */ - if (!ftrace_graph_entry(&trace)) { - current->curr_ret_stack--; - *parent = old; - } } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 35511999156af..201caf226b47b 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -794,6 +794,7 @@ config SIBYTE_SWARM select SYS_SUPPORTS_HIGHMEM select SYS_SUPPORTS_LITTLE_ENDIAN select ZONE_DMA32 if 64BIT + select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI config SIBYTE_LITTLESUR bool "Sibyte BCM91250C2-LittleSur" @@ -814,6 +815,7 @@ config SIBYTE_SENTOSA select SYS_HAS_CPU_SB1 select SYS_SUPPORTS_BIG_ENDIAN select SYS_SUPPORTS_LITTLE_ENDIAN + select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI config SIBYTE_BIGSUR bool "Sibyte BCM91480B-BigSur" @@ -826,6 +828,7 @@ config SIBYTE_BIGSUR select SYS_SUPPORTS_HIGHMEM select SYS_SUPPORTS_LITTLE_ENDIAN select ZONE_DMA32 if 64BIT + select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI config SNI_RM bool "SNI RM200/300/400" @@ -3149,6 +3152,7 @@ config MIPS32_O32 config MIPS32_N32 bool "Kernel support for n32 binaries" depends on 64BIT + select ARCH_WANT_COMPAT_IPC_PARSE_VERSION select COMPAT select MIPS32_COMPAT select SYSVIPC_COMPAT if SYSVIPC diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c index 6054d49e608ee..fe3773539effe 100644 --- a/arch/mips/bcm47xx/setup.c +++ b/arch/mips/bcm47xx/setup.c @@ -173,6 +173,31 @@ void __init plat_mem_setup(void) pm_power_off = bcm47xx_machine_halt; } +#ifdef CONFIG_BCM47XX_BCMA +static struct device * __init bcm47xx_setup_device(void) +{ + struct device *dev; + int err; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return NULL; + + err = dev_set_name(dev, "bcm47xx_soc"); + if (err) { + pr_err("Failed to set SoC device name: %d\n", err); + kfree(dev); + return NULL; + } + + err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (err) + pr_err("Failed to set SoC DMA mask: %d\n", err); + + return dev; +} +#endif + /* * This finishes bus initialization doing things that were not possible without * kmalloc. Make sure to call it late enough (after mm_init). @@ -183,6 +208,10 @@ void __init bcm47xx_bus_setup(void) if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) { int err; + bcm47xx_bus.bcma.dev = bcm47xx_setup_device(); + if (!bcm47xx_bus.bcma.dev) + panic("Failed to setup SoC device\n"); + err = bcma_host_soc_init(&bcm47xx_bus.bcma); if (err) panic("Failed to initialize BCMA bus (err %d)", err); @@ -235,6 +264,8 @@ static int __init bcm47xx_register_bus_complete(void) #endif #ifdef CONFIG_BCM47XX_BCMA case BCM47XX_BUS_TYPE_BCMA: + if (device_register(bcm47xx_bus.bcma.dev)) + pr_err("Failed to register SoC device\n"); bcma_bus_register(&bcm47xx_bus.bcma.bus); break; #endif diff --git a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c index 37fe58c19a90f..542c3ede97222 100644 --- a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c +++ b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c @@ -13,6 +13,7 @@ #include #include #include +#include "../../../../include/linux/sizes.h" int main(int argc, char *argv[]) { @@ -45,11 +46,11 @@ int main(int argc, char *argv[]) vmlinuz_load_addr = vmlinux_load_addr + vmlinux_size; /* - * Align with 16 bytes: "greater than that used for any standard data - * types by a MIPS compiler." -- See MIPS Run Linux (Second Edition). + * Align with 64KB: KEXEC needs load sections to be aligned to PAGE_SIZE, + * which may be as large as 64KB depending on the kernel configuration. */ - vmlinuz_load_addr += (16 - vmlinux_size % 16); + vmlinuz_load_addr += (SZ_64K - vmlinux_size % SZ_64K); printf("0x%llx\n", vmlinuz_load_addr); diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c index 75108ec669ebc..3ddbb98dff848 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-helper.c +++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c @@ -67,7 +67,7 @@ void (*cvmx_override_pko_queue_priority) (int pko_port, void (*cvmx_override_ipd_port_setup) (int ipd_port); /* Port count per interface */ -static int interface_port_count[5]; +static int interface_port_count[9]; /** * Return the number of interfaces the chip has. Each interface @@ -286,7 +286,8 @@ static cvmx_helper_interface_mode_t __cvmx_get_mode_cn7xxx(int interface) case 3: return CVMX_HELPER_INTERFACE_MODE_LOOP; case 4: - return CVMX_HELPER_INTERFACE_MODE_RGMII; + /* TODO: Implement support for AGL (RGMII). */ + return CVMX_HELPER_INTERFACE_MODE_DISABLED; default: return CVMX_HELPER_INTERFACE_MODE_DISABLED; } diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig index 490b12af103c1..c52d0efacd146 100644 --- a/arch/mips/configs/cavium_octeon_defconfig +++ b/arch/mips/configs/cavium_octeon_defconfig @@ -140,6 +140,7 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_DS1307=y CONFIG_STAGING=y CONFIG_OCTEON_ETHERNET=y +CONFIG_OCTEON_USB=y # CONFIG_IOMMU_SUPPORT is not set CONFIG_RAS=y CONFIG_EXT4_FS=y diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index d4ea7a5b60cf4..9e805317847d8 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -306,7 +306,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ { \ long result; \ \ - if (kernel_uses_llsc && R10000_LLSC_WAR) { \ + if (kernel_uses_llsc) { \ long temp; \ \ __asm__ __volatile__( \ diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h index a41059d47d31c..ed7ffe4e63a32 100644 --- a/arch/mips/include/asm/cpu-info.h +++ b/arch/mips/include/asm/cpu-info.h @@ -50,7 +50,7 @@ struct guest_info { #define MIPS_CACHE_PINDEX 0x00000020 /* Physically indexed cache */ struct cpuinfo_mips { - unsigned long asid_cache; + u64 asid_cache; #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE unsigned long asid_mask; #endif diff --git a/arch/mips/include/asm/mach-loongson64/irq.h b/arch/mips/include/asm/mach-loongson64/irq.h index 3644b68c0cccd..be9f727a93280 100644 --- a/arch/mips/include/asm/mach-loongson64/irq.h +++ b/arch/mips/include/asm/mach-loongson64/irq.h @@ -10,7 +10,7 @@ #define MIPS_CPU_IRQ_BASE 56 #define LOONGSON_UART_IRQ (MIPS_CPU_IRQ_BASE + 2) /* UART */ -#define LOONGSON_HT1_IRQ (MIPS_CPU_IRQ_BASE + 3) /* HT1 */ +#define LOONGSON_BRIDGE_IRQ (MIPS_CPU_IRQ_BASE + 3) /* CASCADE */ #define LOONGSON_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 7) /* CPU Timer */ #define LOONGSON_HT1_CFG_BASE loongson_sysconf.ht_control_base diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h index c9f7e231e66bb..59c8b11c090ee 100644 --- a/arch/mips/include/asm/mach-loongson64/mmzone.h +++ b/arch/mips/include/asm/mach-loongson64/mmzone.h @@ -21,6 +21,7 @@ #define NODE3_ADDRSPACE_OFFSET 0x300000000000UL #define pa_to_nid(addr) (((addr) & 0xf00000000000) >> NODE_ADDRSPACE_SHIFT) +#define nid_to_addrbase(nid) ((nid) << NODE_ADDRSPACE_SHIFT) #define LEVELS_PER_SLICE 128 diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h index 0740be7d5d4ac..24d6b42345fb8 100644 --- a/arch/mips/include/asm/mmu.h +++ b/arch/mips/include/asm/mmu.h @@ -7,7 +7,7 @@ #include typedef struct { - unsigned long asid[NR_CPUS]; + u64 asid[NR_CPUS]; void *vdso; atomic_t fp_mode_switching; diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index 94414561de0e7..a589585be21be 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h @@ -76,14 +76,14 @@ extern unsigned long pgd_current[]; * All unused by hardware upper bits will be considered * as a software asid extension. */ -static unsigned long asid_version_mask(unsigned int cpu) +static inline u64 asid_version_mask(unsigned int cpu) { unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]); - return ~(asid_mask | (asid_mask - 1)); + return ~(u64)(asid_mask | (asid_mask - 1)); } -static unsigned long asid_first_version(unsigned int cpu) +static inline u64 asid_first_version(unsigned int cpu) { return ~asid_version_mask(cpu) + 1; } @@ -102,14 +102,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) static inline void get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) { - unsigned long asid = asid_cache(cpu); + u64 asid = asid_cache(cpu); if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) { if (cpu_has_vtag_icache) flush_icache_all(); local_flush_tlb_all(); /* start new asid cycle */ - if (!asid) /* fix version if needed */ - asid = asid_first_version(cpu); } cpu_context(cpu, mm) = asid_cache(cpu) = asid; diff --git a/arch/mips/include/asm/mmzone.h b/arch/mips/include/asm/mmzone.h index f085fba41da50..b826b8473e956 100644 --- a/arch/mips/include/asm/mmzone.h +++ b/arch/mips/include/asm/mmzone.h @@ -7,7 +7,18 @@ #define _ASM_MMZONE_H_ #include -#include + +#ifdef CONFIG_NEED_MULTIPLE_NODES +# include +#endif + +#ifndef pa_to_nid +#define pa_to_nid(addr) 0 +#endif + +#ifndef nid_to_addrbase +#define nid_to_addrbase(nid) 0 +#endif #ifdef CONFIG_DISCONTIGMEM diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h index 0036ea0c71735..93a9dce31f255 100644 --- a/arch/mips/include/asm/pgtable-64.h +++ b/arch/mips/include/asm/pgtable-64.h @@ -265,6 +265,11 @@ static inline int pmd_bad(pmd_t pmd) static inline int pmd_present(pmd_t pmd) { +#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT + if (unlikely(pmd_val(pmd) & _PAGE_HUGE)) + return pmd_val(pmd) & _PAGE_PRESENT; +#endif + return pmd_val(pmd) != (unsigned long) invalid_pte_table; } diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index 49d6046ca1d0c..c373eb6050402 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -81,7 +81,7 @@ extern unsigned int vced_count, vcei_count; #endif -#define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_256M) +#define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_64M) extern unsigned long mips_stack_top(void); #define STACK_TOP mips_stack_top() diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index 7f12d7e27c94e..e5190126080ee 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h @@ -20,6 +20,7 @@ #include #include #include +#include #include /* for uaccess_kernel() */ extern void (*r4k_blast_dcache)(void); @@ -747,4 +748,25 @@ __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , ) __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , ) __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , ) +/* Currently, this is very specific to Loongson-3 */ +#define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize) \ +static inline void blast_##pfx##cache##lsize##_node(long node) \ +{ \ + unsigned long start = CAC_BASE | nid_to_addrbase(node); \ + unsigned long end = start + current_cpu_data.desc.waysize; \ + unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \ + unsigned long ws_end = current_cpu_data.desc.ways << \ + current_cpu_data.desc.waybit; \ + unsigned long ws, addr; \ + \ + for (ws = 0; ws < ws_end; ws += ws_inc) \ + for (addr = start; addr < end; addr += lsize * 32) \ + cache##lsize##_unroll32(addr|ws, indexop); \ +} + +__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16) +__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32) +__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64) +__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128) + #endif /* _ASM_R4KCACHE_H */ diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h index 0170602a1e4e3..6cf8ffb5367ec 100644 --- a/arch/mips/include/asm/syscall.h +++ b/arch/mips/include/asm/syscall.h @@ -73,7 +73,7 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg, #ifdef CONFIG_64BIT case 4: case 5: case 6: case 7: #ifdef CONFIG_MIPS32_O32 - if (test_thread_flag(TIF_32BIT_REGS)) + if (test_tsk_thread_flag(task, TIF_32BIT_REGS)) return get_user(*arg, (int *)usp + n); else #endif diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c index d455363d51c3d..4c07a43a3242a 100644 --- a/arch/mips/kernel/crash.c +++ b/arch/mips/kernel/crash.c @@ -36,6 +36,9 @@ static void crash_shutdown_secondary(void *passed_regs) if (!cpu_online(cpu)) return; + /* We won't be sent IPIs any more. */ + set_cpu_online(cpu, false); + local_irq_disable(); if (!cpumask_test_cpu(cpu, &cpus_in_crash)) crash_save_cpu(regs, cpu); diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 7f3dfdbc3657e..b122cbb4aad18 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -322,7 +322,6 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, unsigned long fp) { unsigned long old_parent_ra; - struct ftrace_graph_ent trace; unsigned long return_hooker = (unsigned long) &return_to_handler; int faulted, insns; @@ -369,12 +368,6 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, if (unlikely(faulted)) goto out; - if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp, - NULL) == -EBUSY) { - *parent_ra_addr = old_parent_ra; - return; - } - /* * Get the recorded ip of the current mcount calling site in the * __mcount_loc section, which will be used to filter the function @@ -382,13 +375,10 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, */ insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; - trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); + self_ra -= (MCOUNT_INSN_SIZE * insns); - /* Only trace if the calling function expects to */ - if (!ftrace_graph_entry(&trace)) { - current->curr_ret_stack--; + if (function_graph_enter(old_parent_ra, self_ra, fp, NULL)) *parent_ra_addr = old_parent_ra; - } return; out: ftrace_graph_stop(); diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c index 8b574bcd39ba8..4b3726e4fe3ac 100644 --- a/arch/mips/kernel/machine_kexec.c +++ b/arch/mips/kernel/machine_kexec.c @@ -118,6 +118,9 @@ machine_kexec(struct kimage *image) *ptr = (unsigned long) phys_to_virt(*ptr); } + /* Mark offline BEFORE disabling local irq. */ + set_cpu_online(smp_processor_id(), false); + /* * we do not want to be bothered. */ diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index 48a9c6b90e079..9df3ebdc7b0f7 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c @@ -126,8 +126,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) /* Map delay slot emulation page */ base = mmap_region(NULL, STACK_TOP, PAGE_SIZE, - VM_READ|VM_WRITE|VM_EXEC| - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, + VM_READ | VM_EXEC | + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, 0, NULL); if (IS_ERR_VALUE(base)) { ret = base; diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index f0bc3312ed110..c4ef1c31e0c4f 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c @@ -224,9 +224,11 @@ static struct irq_chip ltq_eiu_type = { .irq_set_type = ltq_eiu_settype, }; -static void ltq_hw_irqdispatch(int module) +static void ltq_hw_irq_handler(struct irq_desc *desc) { + int module = irq_desc_get_irq(desc) - 2; u32 irq; + int hwirq; irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR); if (irq == 0) @@ -237,7 +239,8 @@ static void ltq_hw_irqdispatch(int module) * other bits might be bogus */ irq = __fls(irq); - do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module)); + hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module); + generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq)); /* if this is a EBU irq, we need to ack it or get a deadlock */ if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT) @@ -245,49 +248,6 @@ static void ltq_hw_irqdispatch(int module) LTQ_EBU_PCC_ISTAT); } -#define DEFINE_HWx_IRQDISPATCH(x) \ - static void ltq_hw ## x ## _irqdispatch(void) \ - { \ - ltq_hw_irqdispatch(x); \ - } -DEFINE_HWx_IRQDISPATCH(0) -DEFINE_HWx_IRQDISPATCH(1) -DEFINE_HWx_IRQDISPATCH(2) -DEFINE_HWx_IRQDISPATCH(3) -DEFINE_HWx_IRQDISPATCH(4) - -#if MIPS_CPU_TIMER_IRQ == 7 -static void ltq_hw5_irqdispatch(void) -{ - do_IRQ(MIPS_CPU_TIMER_IRQ); -} -#else -DEFINE_HWx_IRQDISPATCH(5) -#endif - -static void ltq_hw_irq_handler(struct irq_desc *desc) -{ - ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2); -} - -asmlinkage void plat_irq_dispatch(void) -{ - unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; - int irq; - - if (!pending) { - spurious_interrupt(); - return; - } - - pending >>= CAUSEB_IP; - while (pending) { - irq = fls(pending) - 1; - do_IRQ(MIPS_CPU_IRQ_BASE + irq); - pending &= ~BIT(irq); - } -} - static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { struct irq_chip *chip = <q_irq_type; @@ -343,28 +303,10 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent) for (i = 0; i < MAX_IM; i++) irq_set_chained_handler(i + 2, ltq_hw_irq_handler); - if (cpu_has_vint) { - pr_info("Setting up vectored interrupts\n"); - set_vi_handler(2, ltq_hw0_irqdispatch); - set_vi_handler(3, ltq_hw1_irqdispatch); - set_vi_handler(4, ltq_hw2_irqdispatch); - set_vi_handler(5, ltq_hw3_irqdispatch); - set_vi_handler(6, ltq_hw4_irqdispatch); - set_vi_handler(7, ltq_hw5_irqdispatch); - } - ltq_domain = irq_domain_add_linear(node, (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE, &irq_domain_ops, 0); -#ifndef CONFIG_MIPS_MT_SMP - set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | - IE_IRQ3 | IE_IRQ4 | IE_IRQ5); -#else - set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 | - IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5); -#endif - /* tell oprofile which irq to use */ ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ); diff --git a/arch/mips/loongson64/loongson-3/irq.c b/arch/mips/loongson64/loongson-3/irq.c index cbeb20f9fc95c..5605061f5f981 100644 --- a/arch/mips/loongson64/loongson-3/irq.c +++ b/arch/mips/loongson64/loongson-3/irq.c @@ -96,51 +96,8 @@ void mach_irq_dispatch(unsigned int pending) } } -static struct irqaction cascade_irqaction = { - .handler = no_action, - .flags = IRQF_NO_SUSPEND, - .name = "cascade", -}; - -static inline void mask_loongson_irq(struct irq_data *d) -{ - clear_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE)); - irq_disable_hazard(); - - /* Workaround: UART IRQ may deliver to any core */ - if (d->irq == LOONGSON_UART_IRQ) { - int cpu = smp_processor_id(); - int node_id = cpu_logical_map(cpu) / loongson_sysconf.cores_per_node; - int core_id = cpu_logical_map(cpu) % loongson_sysconf.cores_per_node; - u64 intenclr_addr = smp_group[node_id] | - (u64)(&LOONGSON_INT_ROUTER_INTENCLR); - u64 introuter_lpc_addr = smp_group[node_id] | - (u64)(&LOONGSON_INT_ROUTER_LPC); - - *(volatile u32 *)intenclr_addr = 1 << 10; - *(volatile u8 *)introuter_lpc_addr = 0x10 + (1<irq == LOONGSON_UART_IRQ) { - int cpu = smp_processor_id(); - int node_id = cpu_logical_map(cpu) / loongson_sysconf.cores_per_node; - int core_id = cpu_logical_map(cpu) % loongson_sysconf.cores_per_node; - u64 intenset_addr = smp_group[node_id] | - (u64)(&LOONGSON_INT_ROUTER_INTENSET); - u64 introuter_lpc_addr = smp_group[node_id] | - (u64)(&LOONGSON_INT_ROUTER_LPC); - - *(volatile u32 *)intenset_addr = 1 << 10; - *(volatile u8 *)introuter_lpc_addr = 0x10 + (1<irq - MIPS_CPU_IRQ_BASE)); - irq_enable_hazard(); -} +static inline void mask_loongson_irq(struct irq_data *d) { } +static inline void unmask_loongson_irq(struct irq_data *d) { } /* For MIPS IRQs which shared by all cores */ static struct irq_chip loongson_irq_chip = { @@ -183,12 +140,11 @@ void __init mach_init_irq(void) chip->irq_set_affinity = plat_set_irq_affinity; irq_set_chip_and_handler(LOONGSON_UART_IRQ, - &loongson_irq_chip, handle_level_irq); - - /* setup HT1 irq */ - setup_irq(LOONGSON_HT1_IRQ, &cascade_irqaction); + &loongson_irq_chip, handle_percpu_irq); + irq_set_chip_and_handler(LOONGSON_BRIDGE_IRQ, + &loongson_irq_chip, handle_percpu_irq); - set_c0_status(STATUSF_IP2 | STATUSF_IP6); + set_c0_status(STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP6); } #ifdef CONFIG_HOTPLUG_CPU diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c index 5450f4d1c920e..e2d46cb93ca98 100644 --- a/arch/mips/math-emu/dsemul.c +++ b/arch/mips/math-emu/dsemul.c @@ -214,8 +214,9 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, { int isa16 = get_isa16_mode(regs->cp0_epc); mips_instruction break_math; - struct emuframe __user *fr; - int err, fr_idx; + unsigned long fr_uaddr; + struct emuframe fr; + int fr_idx, ret; /* NOP is easy */ if (ir == 0) @@ -250,27 +251,31 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, fr_idx = alloc_emuframe(); if (fr_idx == BD_EMUFRAME_NONE) return SIGBUS; - fr = &dsemul_page()[fr_idx]; /* Retrieve the appropriately encoded break instruction */ break_math = BREAK_MATH(isa16); /* Write the instructions to the frame */ if (isa16) { - err = __put_user(ir >> 16, - (u16 __user *)(&fr->emul)); - err |= __put_user(ir & 0xffff, - (u16 __user *)((long)(&fr->emul) + 2)); - err |= __put_user(break_math >> 16, - (u16 __user *)(&fr->badinst)); - err |= __put_user(break_math & 0xffff, - (u16 __user *)((long)(&fr->badinst) + 2)); + union mips_instruction _emul = { + .halfword = { ir >> 16, ir } + }; + union mips_instruction _badinst = { + .halfword = { break_math >> 16, break_math } + }; + + fr.emul = _emul.word; + fr.badinst = _badinst.word; } else { - err = __put_user(ir, &fr->emul); - err |= __put_user(break_math, &fr->badinst); + fr.emul = ir; + fr.badinst = break_math; } - if (unlikely(err)) { + /* Write the frame to user memory */ + fr_uaddr = (unsigned long)&dsemul_page()[fr_idx]; + ret = access_process_vm(current, fr_uaddr, &fr, sizeof(fr), + FOLL_FORCE | FOLL_WRITE); + if (unlikely(ret != sizeof(fr))) { MIPS_FPU_EMU_INC_STATS(errors); free_emuframe(fr_idx, current->mm); return SIGBUS; @@ -282,10 +287,7 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, atomic_set(¤t->thread.bd_emu_frame, fr_idx); /* Change user register context to execute the frame */ - regs->cp0_epc = (unsigned long)&fr->emul | isa16; - - /* Ensure the icache observes our newly written frame */ - flush_cache_sigtramp((unsigned long)&fr->emul); + regs->cp0_epc = fr_uaddr | isa16; return 0; } diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c index 3466fcdae0ca2..01848cdf20741 100644 --- a/arch/mips/mm/c-r3k.c +++ b/arch/mips/mm/c-r3k.c @@ -245,7 +245,7 @@ static void r3k_flush_cache_page(struct vm_area_struct *vma, pmd_t *pmdp; pte_t *ptep; - pr_debug("cpage[%08lx,%08lx]\n", + pr_debug("cpage[%08llx,%08lx]\n", cpu_context(smp_processor_id(), mm), addr); /* No ASID => no such page in the cache. */ diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index a9ef057c79fe4..05a539d3a5970 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -459,11 +459,28 @@ static void r4k_blast_scache_setup(void) r4k_blast_scache = blast_scache128; } +static void (*r4k_blast_scache_node)(long node); + +static void r4k_blast_scache_node_setup(void) +{ + unsigned long sc_lsize = cpu_scache_line_size(); + + if (current_cpu_type() != CPU_LOONGSON3) + r4k_blast_scache_node = (void *)cache_noop; + else if (sc_lsize == 16) + r4k_blast_scache_node = blast_scache16_node; + else if (sc_lsize == 32) + r4k_blast_scache_node = blast_scache32_node; + else if (sc_lsize == 64) + r4k_blast_scache_node = blast_scache64_node; + else if (sc_lsize == 128) + r4k_blast_scache_node = blast_scache128_node; +} + static inline void local_r4k___flush_cache_all(void * args) { switch (current_cpu_type()) { case CPU_LOONGSON2: - case CPU_LOONGSON3: case CPU_R4000SC: case CPU_R4000MC: case CPU_R4400SC: @@ -480,6 +497,11 @@ static inline void local_r4k___flush_cache_all(void * args) r4k_blast_scache(); break; + case CPU_LOONGSON3: + /* Use get_ebase_cpunum() for both NUMA=y/n */ + r4k_blast_scache_node(get_ebase_cpunum() >> 2); + break; + case CPU_BMIPS5000: r4k_blast_scache(); __sync(); @@ -840,10 +862,14 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) preempt_disable(); if (cpu_has_inclusive_pcaches) { - if (size >= scache_size) - r4k_blast_scache(); - else + if (size >= scache_size) { + if (current_cpu_type() != CPU_LOONGSON3) + r4k_blast_scache(); + else + r4k_blast_scache_node(pa_to_nid(addr)); + } else { blast_scache_range(addr, addr + size); + } preempt_enable(); __sync(); return; @@ -877,9 +903,12 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) preempt_disable(); if (cpu_has_inclusive_pcaches) { - if (size >= scache_size) - r4k_blast_scache(); - else { + if (size >= scache_size) { + if (current_cpu_type() != CPU_LOONGSON3) + r4k_blast_scache(); + else + r4k_blast_scache_node(pa_to_nid(addr)); + } else { /* * There is no clearly documented alignment requirement * for the cache instruction on MIPS processors and @@ -1918,6 +1947,7 @@ void r4k_cache_init(void) r4k_blast_scache_page_setup(); r4k_blast_scache_page_indexed_setup(); r4k_blast_scache_setup(); + r4k_blast_scache_node_setup(); #ifdef CONFIG_EVA r4k_blast_dcache_user_page_setup(); r4k_blast_icache_user_page_setup(); diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c index 2a5bb849b10ef..288b58b00dc84 100644 --- a/arch/mips/pci/msi-octeon.c +++ b/arch/mips/pci/msi-octeon.c @@ -369,7 +369,9 @@ int __init octeon_msi_initialize(void) int irq; struct irq_chip *msi; - if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) { + if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) { + return 0; + } else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) { msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0; msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1; msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2; diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c index f1e92bf743c27..3c3b1e6abb535 100644 --- a/arch/mips/pci/pci-legacy.c +++ b/arch/mips/pci/pci-legacy.c @@ -127,8 +127,12 @@ static void pcibios_scanbus(struct pci_controller *hose) if (pci_has_flag(PCI_PROBE_ONLY)) { pci_bus_claim_resources(bus); } else { + struct pci_bus *child; + pci_bus_size_bridges(bus); pci_bus_assign_resources(bus); + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); } pci_bus_add_devices(bus); } diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c index 41b71c4352c25..c1ce6f43642bc 100644 --- a/arch/mips/ralink/mt7620.c +++ b/arch/mips/ralink/mt7620.c @@ -84,7 +84,7 @@ static struct rt2880_pmx_func pcie_rst_grp[] = { }; static struct rt2880_pmx_func nd_sd_grp[] = { FUNC("nand", MT7620_GPIO_MODE_NAND, 45, 15), - FUNC("sd", MT7620_GPIO_MODE_SD, 45, 15) + FUNC("sd", MT7620_GPIO_MODE_SD, 47, 13) }; static struct rt2880_pmx_group mt7620a_pinmux_data[] = { diff --git a/arch/mips/sibyte/common/Makefile b/arch/mips/sibyte/common/Makefile index b3d6bf23a6620..3ef3fb6581369 100644 --- a/arch/mips/sibyte/common/Makefile +++ b/arch/mips/sibyte/common/Makefile @@ -1,4 +1,5 @@ obj-y := cfe.o +obj-$(CONFIG_SWIOTLB) += dma.o obj-$(CONFIG_SIBYTE_BUS_WATCHER) += bus_watcher.o obj-$(CONFIG_SIBYTE_CFE_CONSOLE) += cfe_console.o obj-$(CONFIG_SIBYTE_TBPROF) += sb_tbprof.o diff --git a/arch/mips/sibyte/common/dma.c b/arch/mips/sibyte/common/dma.c new file mode 100644 index 0000000000000..eb47a94f3583e --- /dev/null +++ b/arch/mips/sibyte/common/dma.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * DMA support for Broadcom SiByte platforms. + * + * Copyright (c) 2018 Maciej W. Rozycki + */ + +#include +#include + +void __init plat_swiotlb_setup(void) +{ + swiotlb_init(1); +} diff --git a/arch/nds32/include/asm/pgtable.h b/arch/nds32/include/asm/pgtable.h index d3e19a55cf530..9f52db930c004 100644 --- a/arch/nds32/include/asm/pgtable.h +++ b/arch/nds32/include/asm/pgtable.h @@ -4,7 +4,7 @@ #ifndef _ASMNDS32_PGTABLE_H #define _ASMNDS32_PGTABLE_H -#define __PAGETABLE_PMD_FOLDED +#define __PAGETABLE_PMD_FOLDED 1 #include #include diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c index a0a9679ad5dee..8a41372551ff3 100644 --- a/arch/nds32/kernel/ftrace.c +++ b/arch/nds32/kernel/ftrace.c @@ -211,29 +211,15 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, unsigned long frame_pointer) { unsigned long return_hooker = (unsigned long)&return_to_handler; - struct ftrace_graph_ent trace; unsigned long old; - int err; if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; old = *parent; - trace.func = self_addr; - trace.depth = current->curr_ret_stack + 1; - - /* Only trace if the calling function expects to */ - if (!ftrace_graph_entry(&trace)) - return; - - err = ftrace_push_return_trace(old, self_addr, &trace.depth, - frame_pointer, NULL); - - if (err == -EBUSY) - return; - - *parent = return_hooker; + if (!function_graph_enter(old, self_addr, frame_pointer, NULL)) + *parent = return_hooker; } noinline void ftrace_graph_caller(void) diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 5ce030266e7d0..253d7ca714724 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -71,6 +71,13 @@ ifdef CONFIG_MLONGCALLS KBUILD_CFLAGS_KERNEL += -mlong-calls endif +# Without this, "ld -r" results in .text sections that are too big (> 0x40000) +# for branches to reach stubs. And multiple .text sections trigger a warning +# when creating the sysfs module information section. +ifndef CONFIG_64BIT +KBUILD_CFLAGS_MODULE += -ffunction-sections +endif + # select which processor to optimise for cflags-$(CONFIG_PA7000) += -march=1.1 -mschedule=7100 cflags-$(CONFIG_PA7200) += -march=1.1 -mschedule=7200 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index fa6b7c78f18a9..ff0860b2b21ab 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -117,7 +117,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) #if CONFIG_PGTABLE_LEVELS == 3 #define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY) #else -#define __PAGETABLE_PMD_FOLDED +#define __PAGETABLE_PMD_FOLDED 1 #define BITS_PER_PMD 0 #endif #define PTRS_PER_PMD (1UL << BITS_PER_PMD) diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 242c5ab656113..d2f92273fe376 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -186,7 +186,7 @@ bv,n 0(%r3) nop .word 0 /* checksum (will be patched) */ - .word PA(os_hpmc) /* address of handler */ + .word 0 /* address of handler */ .word 0 /* length of handler */ .endm diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c index 6fa8535d3cceb..e46a4157a8948 100644 --- a/arch/parisc/kernel/ftrace.c +++ b/arch/parisc/kernel/ftrace.c @@ -30,7 +30,6 @@ static void __hot prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) { unsigned long old; - struct ftrace_graph_ent trace; extern int parisc_return_to_handler; if (unlikely(ftrace_graph_is_dead())) @@ -41,19 +40,9 @@ static void __hot prepare_ftrace_return(unsigned long *parent, old = *parent; - trace.func = self_addr; - trace.depth = current->curr_ret_stack + 1; - - /* Only trace if the calling function expects to */ - if (!ftrace_graph_entry(&trace)) - return; - - if (ftrace_push_return_trace(old, self_addr, &trace.depth, - 0, NULL) == -EBUSY) - return; - - /* activate parisc_return_to_handler() as return point */ - *parent = (unsigned long) &parisc_return_to_handler; + if (!function_graph_enter(old, self_addr, 0, NULL)) + /* activate parisc_return_to_handler() as return point */ + *parent = (unsigned long) &parisc_return_to_handler; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S index 781c3b9a3e46a..fde6541155645 100644 --- a/arch/parisc/kernel/hpmc.S +++ b/arch/parisc/kernel/hpmc.S @@ -85,7 +85,7 @@ END(hpmc_pim_data) .import intr_save, code .align 16 -ENTRY_CFI(os_hpmc) +ENTRY(os_hpmc) .os_hpmc: /* @@ -302,7 +302,6 @@ os_hpmc_6: b . nop .align 16 /* make function length multiple of 16 bytes */ -ENDPROC_CFI(os_hpmc) .os_hpmc_end: diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 68f10f87073da..abeb5321a83fc 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -802,7 +802,8 @@ void __init initialize_ivt(const void *iva) * the Length/4 words starting at Address is zero. */ - /* Compute Checksum for HPMC handler */ + /* Setup IVA and compute checksum for HPMC handler */ + ivap[6] = (u32)__pa(os_hpmc); length = os_hpmc_size; ivap[7] = length; diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 74842d28a7a16..aae9b0d71c1e1 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -494,12 +494,8 @@ static void __init map_pages(unsigned long start_vaddr, pte = pte_mkhuge(pte); } - if (address >= end_paddr) { - if (force) - break; - else - pte_val(pte) = 0; - } + if (address >= end_paddr) + break; set_pte(pg_table, pte); diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 11a1acba164a1..c4c03992ee828 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -160,8 +160,17 @@ else CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64 endif +ifdef CONFIG_FUNCTION_TRACER +CC_FLAGS_FTRACE := -pg ifdef CONFIG_MPROFILE_KERNEL - CC_FLAGS_FTRACE := -pg -mprofile-kernel +CC_FLAGS_FTRACE += -mprofile-kernel +endif +# Work around gcc code-gen bugs with -pg / -fno-omit-frame-pointer in gcc <= 4.8 +# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=44199 +# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52828 +ifneq ($(cc-name),clang) +CC_FLAGS_FTRACE += $(call cc-ifversion, -lt, 0409, -mno-sched-epilog) +endif endif CFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU)) @@ -229,16 +238,15 @@ ifdef CONFIG_6xx KBUILD_CFLAGS += -mcpu=powerpc endif -# Work around a gcc code-gen bug with -fno-omit-frame-pointer. -ifdef CONFIG_FUNCTION_TRACER -KBUILD_CFLAGS += -mno-sched-epilog -endif - cpu-as-$(CONFIG_4xx) += -Wa,-m405 cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec) cpu-as-$(CONFIG_E200) += -Wa,-me200 cpu-as-$(CONFIG_E500) += -Wa,-me500 -cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4 + +# When using '-many -mpower4' gas will first try and find a matching power4 +# mnemonic and failing that it will allow any valid mnemonic that GAS knows +# about. GCC will pass -many to GAS when assembling, clang does not. +cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4 -Wa,-many cpu-as-$(CONFIG_PPC_E500MC) += $(call as-option,-Wa$(comma)-me500mc) KBUILD_AFLAGS += $(cpu-as-y) @@ -404,36 +412,9 @@ archprepare: checkbin # to stdout and these checks are run even on install targets. TOUT := .tmp_gas_check -# Check gcc and binutils versions: -# - gcc-3.4 and binutils-2.14 are a fatal combination -# - Require gcc 4.0 or above on 64-bit -# - gcc-4.2.0 has issues compiling modules on 64-bit +# Check toolchain versions: +# - gcc-4.6 is the minimum kernel-wide version so nothing required. checkbin: - @if test "$(cc-name)" != "clang" \ - && test "$(cc-version)" = "0304" ; then \ - if ! /bin/echo mftb 5 | $(AS) -v -mppc -many -o $(TOUT) >/dev/null 2>&1 ; then \ - echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '; \ - echo 'correctly with gcc-3.4 and your version of binutils.'; \ - echo '*** Please upgrade your binutils or downgrade your gcc'; \ - false; \ - fi ; \ - fi - @if test "$(cc-name)" != "clang" \ - && test "$(cc-version)" -lt "0400" \ - && test "x${CONFIG_PPC64}" = "xy" ; then \ - echo -n "Sorry, GCC v4.0 or above is required to build " ; \ - echo "the 64-bit powerpc kernel." ; \ - false ; \ - fi - @if test "$(cc-name)" != "clang" \ - && test "$(cc-fullversion)" = "040200" \ - && test "x${CONFIG_MODULES}${CONFIG_PPC64}" = "xyy" ; then \ - echo -n '*** GCC-4.2.0 cannot compile the 64-bit powerpc ' ; \ - echo 'kernel with modules enabled.' ; \ - echo -n '*** Please use a different GCC version or ' ; \ - echo 'disable kernel modules' ; \ - false ; \ - fi @if test "x${CONFIG_CPU_LITTLE_ENDIAN}" = "xy" \ && $(LD) --version | head -1 | grep ' 2\.24$$' >/dev/null ; then \ echo -n '*** binutils 2.24 miscompiles weak symbols ' ; \ diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 0fb96c26136f6..25e3184f11f78 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -55,6 +55,11 @@ BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc BOOTARFLAGS := -cr$(KBUILD_ARFLAGS) +ifdef CONFIG_CC_IS_CLANG +BOOTCFLAGS += $(CLANG_FLAGS) +BOOTAFLAGS += $(CLANG_FLAGS) +endif + ifdef CONFIG_DEBUG_INFO BOOTCFLAGS += -g endif diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S index dcf2f15e67971..9b9d17437373b 100644 --- a/arch/powerpc/boot/crt0.S +++ b/arch/powerpc/boot/crt0.S @@ -15,7 +15,7 @@ RELA = 7 RELACOUNT = 0x6ffffff9 - .text + .data /* A procedure descriptor used when booting this as a COFF file. * When making COFF, this comes first in the link and we're * linked at 0x500000. @@ -23,6 +23,8 @@ RELACOUNT = 0x6ffffff9 .globl _zimage_start_opd _zimage_start_opd: .long 0x500000, 0, 0, 0 + .text + b _zimage_start #ifdef __powerpc64__ .balign 8 @@ -47,8 +49,10 @@ p_end: .long _end p_pstack: .long _platform_stack_top #endif - .weak _zimage_start .globl _zimage_start + /* Clang appears to require the .weak directive to be after the symbol + * is defined. See https://bugs.llvm.org/show_bug.cgi?id=38921 */ + .weak _zimage_start _zimage_start: .globl _zimage_start_lib _zimage_start_lib: diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index e0331e7545685..b855f56489acc 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -285,19 +285,13 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src, * their hooks, a bitfield is reserved for use by the platform near the * top of MMIO addresses (not PIO, those have to cope the hard way). * - * This bit field is 12 bits and is at the top of the IO virtual - * addresses PCI_IO_INDIRECT_TOKEN_MASK. + * The highest address in the kernel virtual space are: * - * The kernel virtual space is thus: + * d0003fffffffffff # with Hash MMU + * c00fffffffffffff # with Radix MMU * - * 0xD000000000000000 : vmalloc - * 0xD000080000000000 : PCI PHB IO space - * 0xD000080080000000 : ioremap - * 0xD0000fffffffffff : end of ioremap region - * - * Since the top 4 bits are reserved as the region ID, we use thus - * the next 12 bits and keep 4 bits available for the future if the - * virtual address space is ever to be extended. + * The top 4 bits are reserved as the region ID on hash, leaving us 8 bits + * that can be used for the field. * * The direct IO mapping operations will then mask off those bits * before doing the actual access, though that only happen when @@ -309,8 +303,8 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src, */ #ifdef CONFIG_PPC_INDIRECT_MMIO -#define PCI_IO_IND_TOKEN_MASK 0x0fff000000000000ul -#define PCI_IO_IND_TOKEN_SHIFT 48 +#define PCI_IO_IND_TOKEN_SHIFT 52 +#define PCI_IO_IND_TOKEN_MASK (0xfful << PCI_IO_IND_TOKEN_SHIFT) #define PCI_FIX_ADDR(addr) \ ((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK)) #define PCI_GET_ADDR_TOKEN(addr) \ diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h index 4f547752ae795..193f53116c7ae 100644 --- a/arch/powerpc/include/asm/mmu-8xx.h +++ b/arch/powerpc/include/asm/mmu-8xx.h @@ -34,20 +34,12 @@ * respectively NA for All or X for Supervisor and no access for User. * Then we use the APG to say whether accesses are according to Page rules or * "all Supervisor" rules (Access to all) - * We also use the 2nd APG bit for _PAGE_ACCESSED when having SWAP: - * When that bit is not set access is done iaw "all user" - * which means no access iaw page rules. - * Therefore, we define 4 APG groups. lsb is _PMD_USER, 2nd is _PAGE_ACCESSED - * 0x => No access => 11 (all accesses performed as user iaw page definition) - * 10 => No user => 01 (all accesses performed according to page definition) - * 11 => User => 00 (all accesses performed as supervisor iaw page definition) + * Therefore, we define 2 APG groups. lsb is _PMD_USER + * 0 => No user => 01 (all accesses performed according to page definition) + * 1 => User => 00 (all accesses performed as supervisor iaw page definition) * We define all 16 groups so that all other bits of APG can take any value */ -#ifdef CONFIG_SWAP -#define MI_APG_INIT 0xf4f4f4f4 -#else #define MI_APG_INIT 0x44444444 -#endif /* The effective page number register. When read, contains the information * about the last instruction TLB miss. When MI_RPN is written, bits in @@ -115,20 +107,12 @@ * Supervisor and no access for user and NA for ALL. * Then we use the APG to say whether accesses are according to Page rules or * "all Supervisor" rules (Access to all) - * We also use the 2nd APG bit for _PAGE_ACCESSED when having SWAP: - * When that bit is not set access is done iaw "all user" - * which means no access iaw page rules. - * Therefore, we define 4 APG groups. lsb is _PMD_USER, 2nd is _PAGE_ACCESSED - * 0x => No access => 11 (all accesses performed as user iaw page definition) - * 10 => No user => 01 (all accesses performed according to page definition) - * 11 => User => 00 (all accesses performed as supervisor iaw page definition) + * Therefore, we define 2 APG groups. lsb is _PMD_USER + * 0 => No user => 01 (all accesses performed according to page definition) + * 1 => User => 00 (all accesses performed as supervisor iaw page definition) * We define all 16 groups so that all other bits of APG can take any value */ -#ifdef CONFIG_SWAP -#define MD_APG_INIT 0xf4f4f4f4 -#else #define MD_APG_INIT 0x44444444 -#endif /* The effective page number register. When read, contains the information * about the last instruction TLB miss. When MD_RPN is written, bits in @@ -180,12 +164,6 @@ */ #define SPRN_M_TW 799 -/* APGs */ -#define M_APG0 0x00000000 -#define M_APG1 0x00000020 -#define M_APG2 0x00000040 -#define M_APG3 0x00000060 - #ifdef CONFIG_PPC_MM_SLICES #include #define SLICE_ARRAY_SIZE (1 << (32 - SLICE_LOW_SHIFT - 1)) diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h index fad8ddd697ac4..0abf2e7fd2226 100644 --- a/arch/powerpc/include/asm/mpic.h +++ b/arch/powerpc/include/asm/mpic.h @@ -393,7 +393,14 @@ extern struct bus_type mpic_subsys; #define MPIC_REGSET_TSI108 MPIC_REGSET(1) /* Tsi108/109 PIC */ /* Get the version of primary MPIC */ +#ifdef CONFIG_MPIC extern u32 fsl_mpic_primary_get_version(void); +#else +static inline u32 fsl_mpic_primary_get_version(void) +{ + return 0; +} +#endif /* Allocate the controller structure and setup the linux irq descs * for the range if interrupts passed in. No HW initialization is diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 3b66f2c19c84e..eac18790d1b15 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -5,6 +5,9 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' +# Disable clang warning for using setjmp without setjmp.h header +CFLAGS_crash.o += $(call cc-disable-warning, builtin-requires-header) + subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror ifdef CONFIG_PPC64 @@ -22,10 +25,10 @@ CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) ifdef CONFIG_FUNCTION_TRACER # Do not trace early boot code -CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) -CFLAGS_REMOVE_prom_init.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) -CFLAGS_REMOVE_btext.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) -CFLAGS_REMOVE_prom.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_cputable.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_prom_init.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_btext.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_prom.o = $(CC_FLAGS_FTRACE) endif obj-y := cputable.o ptrace.o syscalls.o \ diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 6ebba3e48b012..c72767a5327ad 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -169,6 +169,11 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len) int n = 0, l = 0; char buffer[128]; + if (!pdn) { + pr_warn("EEH: Note: No error log for absent device.\n"); + return 0; + } + n += scnprintf(buf+n, len-n, "%04x:%02x:%02x.%01x\n", pdn->phb->global_number, pdn->busno, PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 6582f824d6206..81d4574d1f377 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -353,13 +353,14 @@ _ENTRY(ITLBMiss_cmp) #if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE) mtcr r12 #endif - -#ifdef CONFIG_SWAP - rlwinm r11, r10, 31, _PAGE_ACCESSED >> 1 -#endif /* Load the MI_TWC with the attributes for this "segment." */ mtspr SPRN_MI_TWC, r11 /* Set segment attributes */ +#ifdef CONFIG_SWAP + rlwinm r11, r10, 32-5, _PAGE_PRESENT + and r11, r11, r10 + rlwimi r10, r11, 0, _PAGE_PRESENT +#endif li r11, RPN_PATTERN | 0x200 /* The Linux PTE won't go exactly into the MMU TLB. * Software indicator bits 20 and 23 must be clear. @@ -470,14 +471,22 @@ _ENTRY(DTLBMiss_jmp) * above. */ rlwimi r11, r10, 0, _PAGE_GUARDED -#ifdef CONFIG_SWAP - /* _PAGE_ACCESSED has to be set. We use second APG bit for that, 0 - * on that bit will represent a Non Access group - */ - rlwinm r11, r10, 31, _PAGE_ACCESSED >> 1 -#endif mtspr SPRN_MD_TWC, r11 + /* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set. + * We also need to know if the insn is a load/store, so: + * Clear _PAGE_PRESENT and load that which will + * trap into DTLB Error with store bit set accordinly. + */ + /* PRESENT=0x1, ACCESSED=0x20 + * r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5)); + * r10 = (r10 & ~PRESENT) | r11; + */ +#ifdef CONFIG_SWAP + rlwinm r11, r10, 32-5, _PAGE_PRESENT + and r11, r11, r10 + rlwimi r10, r11, 0, _PAGE_PRESENT +#endif /* The Linux PTE won't go exactly into the MMU TLB. * Software indicator bits 24, 25, 26, and 27 must be * set. All other Linux PTE bits control the behavior @@ -637,8 +646,8 @@ InstructionBreakpoint: */ DTLBMissIMMR: mtcr r12 - /* Set 512k byte guarded page and mark it valid and accessed */ - li r10, MD_PS512K | MD_GUARDED | MD_SVALID | M_APG2 + /* Set 512k byte guarded page and mark it valid */ + li r10, MD_PS512K | MD_GUARDED | MD_SVALID mtspr SPRN_MD_TWC, r10 mfspr r10, SPRN_IMMR /* Get current IMMR */ rlwinm r10, r10, 0, 0xfff80000 /* Get 512 kbytes boundary */ @@ -656,8 +665,8 @@ _ENTRY(dtlb_miss_exit_2) DTLBMissLinear: mtcr r12 - /* Set 8M byte page and mark it valid and accessed */ - li r11, MD_PS8MEG | MD_SVALID | M_APG2 + /* Set 8M byte page and mark it valid */ + li r11, MD_PS8MEG | MD_SVALID mtspr SPRN_MD_TWC, r11 rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */ ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY | \ @@ -675,8 +684,8 @@ _ENTRY(dtlb_miss_exit_3) #ifndef CONFIG_PIN_TLB_TEXT ITLBMissLinear: mtcr r12 - /* Set 8M byte page and mark it valid,accessed */ - li r11, MI_PS8MEG | MI_SVALID | M_APG2 + /* Set 8M byte page and mark it valid */ + li r11, MI_PS8MEG | MI_SVALID mtspr SPRN_MI_TWC, r11 rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */ ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY | \ @@ -960,7 +969,7 @@ initial_mmu: ori r8, r8, MI_EVALID /* Mark it valid */ mtspr SPRN_MI_EPN, r8 li r8, MI_PS8MEG /* Set 8M byte page */ - ori r8, r8, MI_SVALID | M_APG2 /* Make it valid, APG 2 */ + ori r8, r8, MI_SVALID /* Make it valid */ mtspr SPRN_MI_TWC, r8 li r8, MI_BOOTINIT /* Create RPN for address 0 */ mtspr SPRN_MI_RPN, r8 /* Store TLB entry */ @@ -987,7 +996,7 @@ initial_mmu: ori r8, r8, MD_EVALID /* Mark it valid */ mtspr SPRN_MD_EPN, r8 li r8, MD_PS512K | MD_GUARDED /* Set 512k byte page */ - ori r8, r8, MD_SVALID | M_APG2 /* Make it valid and accessed */ + ori r8, r8, MD_SVALID /* Make it valid */ mtspr SPRN_MD_TWC, r8 mr r8, r9 /* Create paddr for TLB */ ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */ diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index 33b34a58fc62f..5b9dce17f0c92 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c @@ -372,6 +372,8 @@ void __init find_legacy_serial_ports(void) /* Now find out if one of these is out firmware console */ path = of_get_property(of_chosen, "linux,stdout-path", NULL); + if (path == NULL) + path = of_get_property(of_chosen, "stdout-path", NULL); if (path != NULL) { stdout = of_find_node_by_path(path); if (stdout) @@ -595,8 +597,10 @@ static int __init check_legacy_serial_console(void) /* We are getting a weird phandle from OF ... */ /* ... So use the full path instead */ name = of_get_property(of_chosen, "linux,stdout-path", NULL); + if (name == NULL) + name = of_get_property(of_chosen, "stdout-path", NULL); if (name == NULL) { - DBG(" no linux,stdout-path !\n"); + DBG(" no stdout-path !\n"); return -ENODEV; } prom_stdout = of_find_node_by_path(name); diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c index 3497c8329c1d7..3022d67f0c48b 100644 --- a/arch/powerpc/kernel/mce_power.c +++ b/arch/powerpc/kernel/mce_power.c @@ -89,6 +89,13 @@ static void flush_and_reload_slb(void) static void flush_erat(void) { +#ifdef CONFIG_PPC_BOOK3S_64 + if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) { + flush_and_reload_slb(); + return; + } +#endif + /* PPC_INVALIDATE_ERAT can only be used on ISA v3 and newer */ asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); } diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c index 77371c9ef3d8f..2d861a36662ed 100644 --- a/arch/powerpc/kernel/module.c +++ b/arch/powerpc/kernel/module.c @@ -74,6 +74,14 @@ int module_finalize(const Elf_Ehdr *hdr, (void *)sect->sh_addr + sect->sh_size); #endif /* CONFIG_PPC64 */ +#ifdef PPC64_ELF_ABI_v1 + sect = find_section(hdr, sechdrs, ".opd"); + if (sect != NULL) { + me->arch.start_opd = sect->sh_addr; + me->arch.end_opd = sect->sh_addr + sect->sh_size; + } +#endif /* PPC64_ELF_ABI_v1 */ + #ifdef CONFIG_PPC_BARRIER_NOSPEC sect = find_section(hdr, sechdrs, "__spec_barrier_fixup"); if (sect != NULL) diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index b8d61e019d061..8661eea78503f 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -360,11 +360,6 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr, else if (strcmp(secstrings+sechdrs[i].sh_name,"__versions")==0) dedotify_versions((void *)hdr + sechdrs[i].sh_offset, sechdrs[i].sh_size); - else if (!strcmp(secstrings + sechdrs[i].sh_name, ".opd")) { - me->arch.start_opd = sechdrs[i].sh_addr; - me->arch.end_opd = sechdrs[i].sh_addr + - sechdrs[i].sh_size; - } /* We don't handle .init for the moment: rename to _init */ while ((p = strstr(secstrings + sechdrs[i].sh_name, ".init"))) @@ -685,7 +680,14 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, case R_PPC64_REL32: /* 32 bits relative (used by relative exception tables) */ - *(u32 *)location = value - (unsigned long)location; + /* Convert value to relative */ + value -= (unsigned long)location; + if (value + 0x80000000 > 0xffffffff) { + pr_err("%s: REL32 %li out of range!\n", + me->name, (long int)value); + return -ENOEXEC; + } + *(u32 *)location = value; break; case R_PPC64_TOCSAVE: diff --git a/arch/powerpc/kernel/msi.c b/arch/powerpc/kernel/msi.c index dab616a33b8db..f2197654be070 100644 --- a/arch/powerpc/kernel/msi.c +++ b/arch/powerpc/kernel/msi.c @@ -34,5 +34,10 @@ void arch_teardown_msi_irqs(struct pci_dev *dev) { struct pci_controller *phb = pci_bus_to_host(dev->bus); - phb->controller_ops.teardown_msi_irqs(dev); + /* + * We can be called even when arch_setup_msi_irqs() returns -ENOSYS, + * so check the pointer again. + */ + if (phb->controller_ops.teardown_msi_irqs) + phb->controller_ops.teardown_msi_irqs(dev); } diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index f6f469fc4073e..1b395b85132be 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -22,7 +22,7 @@ enum count_cache_flush_type { COUNT_CACHE_FLUSH_SW = 0x2, COUNT_CACHE_FLUSH_HW = 0x4, }; -static enum count_cache_flush_type count_cache_flush_type; +static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; bool barrier_nospec_enabled; static bool no_nospec; diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 6a501b25dd85f..faf00222b324c 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -243,13 +243,19 @@ static void cpu_ready_for_interrupts(void) } /* - * Fixup HFSCR:TM based on CPU features. The bit is set by our - * early asm init because at that point we haven't updated our - * CPU features from firmware and device-tree. Here we have, - * so let's do it. + * Set HFSCR:TM based on CPU features: + * In the special case of TM no suspend (P9N DD2.1), Linux is + * told TM is off via the dt-ftrs but told to (partially) use + * it via OPAL_REINIT_CPUS_TM_SUSPEND_DISABLED. So HFSCR[TM] + * will be off from dt-ftrs but we need to turn it on for the + * no suspend case. */ - if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP)) - mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM); + if (cpu_has_feature(CPU_FTR_HVMODE)) { + if (cpu_has_feature(CPU_FTR_TM_COMP)) + mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) | HFSCR_TM); + else + mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM); + } /* Set IR and DR in PACA MSR */ get_paca()->kernel_msr = MSR_KERNEL; diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index e6474a45cef50..fd59fef9931bf 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -848,7 +848,23 @@ static long restore_tm_user_regs(struct pt_regs *regs, /* If TM bits are set to the reserved value, it's an invalid context */ if (MSR_TM_RESV(msr_hi)) return 1; - /* Pull in the MSR TM bits from the user context */ + + /* + * Disabling preemption, since it is unsafe to be preempted + * with MSR[TS] set without recheckpointing. + */ + preempt_disable(); + + /* + * CAUTION: + * After regs->MSR[TS] being updated, make sure that get_user(), + * put_user() or similar functions are *not* called. These + * functions can generate page faults which will cause the process + * to be de-scheduled with MSR[TS] set but without calling + * tm_recheckpoint(). This can cause a bug. + * + * Pull in the MSR TM bits from the user context + */ regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK); /* Now, recheckpoint. This loads up all of the checkpointed (older) * registers, including FP and V[S]Rs. After recheckpointing, the @@ -873,6 +889,8 @@ static long restore_tm_user_regs(struct pt_regs *regs, } #endif + preempt_enable(); + return 0; } #endif diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 83d51bf586c7e..bbd1c73243d79 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -467,20 +467,6 @@ static long restore_tm_sigcontexts(struct task_struct *tsk, if (MSR_TM_RESV(msr)) return -EINVAL; - /* pull in MSR TS bits from user context */ - regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); - - /* - * Ensure that TM is enabled in regs->msr before we leave the signal - * handler. It could be the case that (a) user disabled the TM bit - * through the manipulation of the MSR bits in uc_mcontext or (b) the - * TM bit was disabled because a sufficient number of context switches - * happened whilst in the signal handler and load_tm overflowed, - * disabling the TM bit. In either case we can end up with an illegal - * TM state leading to a TM Bad Thing when we return to userspace. - */ - regs->msr |= MSR_TM; - /* pull in MSR LE from user context */ regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); @@ -572,6 +558,34 @@ static long restore_tm_sigcontexts(struct task_struct *tsk, tm_enable(); /* Make sure the transaction is marked as failed */ tsk->thread.tm_texasr |= TEXASR_FS; + + /* + * Disabling preemption, since it is unsafe to be preempted + * with MSR[TS] set without recheckpointing. + */ + preempt_disable(); + + /* pull in MSR TS bits from user context */ + regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); + + /* + * Ensure that TM is enabled in regs->msr before we leave the signal + * handler. It could be the case that (a) user disabled the TM bit + * through the manipulation of the MSR bits in uc_mcontext or (b) the + * TM bit was disabled because a sufficient number of context switches + * happened whilst in the signal handler and load_tm overflowed, + * disabling the TM bit. In either case we can end up with an illegal + * TM state leading to a TM Bad Thing when we return to userspace. + * + * CAUTION: + * After regs->MSR[TS] being updated, make sure that get_user(), + * put_user() or similar functions are *not* called. These + * functions can generate page faults which will cause the process + * to be de-scheduled with MSR[TS] set but without calling + * tm_recheckpoint(). This can cause a bug. + */ + regs->msr |= MSR_TM; + /* This loads the checkpointed FP/VEC state, if used */ tm_recheckpoint(&tsk->thread); @@ -585,6 +599,8 @@ static long restore_tm_sigcontexts(struct task_struct *tsk, regs->msr |= MSR_VEC; } + preempt_enable(); + return err; } #endif diff --git a/arch/powerpc/kernel/trace/Makefile b/arch/powerpc/kernel/trace/Makefile index d22d8bafb6434..d868ba42032f2 100644 --- a/arch/powerpc/kernel/trace/Makefile +++ b/arch/powerpc/kernel/trace/Makefile @@ -7,7 +7,7 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror ifdef CONFIG_FUNCTION_TRACER # do not trace tracer code -CFLAGS_REMOVE_ftrace.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) endif obj32-$(CONFIG_FUNCTION_TRACER) += ftrace_32.o diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index 4bfbb54dee517..19ef4f5866b6a 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c @@ -697,7 +697,6 @@ int ftrace_disable_ftrace_graph_caller(void) */ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) { - struct ftrace_graph_ent trace; unsigned long return_hooker; if (unlikely(ftrace_graph_is_dead())) @@ -708,18 +707,8 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) return_hooker = ppc_function_entry(return_to_handler); - trace.func = ip; - trace.depth = current->curr_ret_stack + 1; - - /* Only trace if the calling function expects to */ - if (!ftrace_graph_entry(&trace)) - goto out; - - if (ftrace_push_return_trace(parent, ip, &trace.depth, 0, - NULL) == -EBUSY) - goto out; - - parent = return_hooker; + if (!function_graph_enter(parent, ip, 0, NULL)) + parent = return_hooker; out: return parent; } diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index c85adb8582713..8689a02b7df87 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -767,12 +767,17 @@ void machine_check_exception(struct pt_regs *regs) if (check_io_access(regs)) goto bail; - die("Machine check", regs, SIGBUS); - /* Must die if the interrupt is not recoverable */ if (!(regs->msr & MSR_RI)) nmi_panic(regs, "Unrecoverable Machine check"); + if (!nested) + nmi_exit(); + + die("Machine check", regs, SIGBUS); + + return; + bail: if (!nested) nmi_exit(); diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h index 491b0f715d6bc..ea1d7c8083190 100644 --- a/arch/powerpc/kvm/trace.h +++ b/arch/powerpc/kvm/trace.h @@ -6,8 +6,6 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM kvm -#define TRACE_INCLUDE_PATH . -#define TRACE_INCLUDE_FILE trace /* * Tracepoint for guest mode entry. @@ -120,4 +118,10 @@ TRACE_EVENT(kvm_check_requests, #endif /* _TRACE_KVM_H */ /* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE + +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE trace + #include diff --git a/arch/powerpc/kvm/trace_booke.h b/arch/powerpc/kvm/trace_booke.h index ac640e81fdc5f..3837842986aa4 100644 --- a/arch/powerpc/kvm/trace_booke.h +++ b/arch/powerpc/kvm/trace_booke.h @@ -6,8 +6,6 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM kvm_booke -#define TRACE_INCLUDE_PATH . -#define TRACE_INCLUDE_FILE trace_booke #define kvm_trace_symbol_exit \ {0, "CRITICAL"}, \ @@ -218,4 +216,11 @@ TRACE_EVENT(kvm_booke_queue_irqprio, #endif /* This part must be outside protection */ + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE + +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE trace_booke + #include diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h index bcfe8a987f6a9..8a1e3b0047f19 100644 --- a/arch/powerpc/kvm/trace_hv.h +++ b/arch/powerpc/kvm/trace_hv.h @@ -9,8 +9,6 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM kvm_hv -#define TRACE_INCLUDE_PATH . -#define TRACE_INCLUDE_FILE trace_hv #define kvm_trace_symbol_hcall \ {H_REMOVE, "H_REMOVE"}, \ @@ -497,4 +495,11 @@ TRACE_EVENT(kvmppc_run_vcpu_exit, #endif /* _TRACE_KVM_HV_H */ /* This part must be outside protection */ + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE + +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE trace_hv + #include diff --git a/arch/powerpc/kvm/trace_pr.h b/arch/powerpc/kvm/trace_pr.h index 2f9a8829552b9..46a46d328fbf2 100644 --- a/arch/powerpc/kvm/trace_pr.h +++ b/arch/powerpc/kvm/trace_pr.h @@ -8,8 +8,6 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM kvm_pr -#define TRACE_INCLUDE_PATH . -#define TRACE_INCLUDE_FILE trace_pr TRACE_EVENT(kvm_book3s_reenter, TP_PROTO(int r, struct kvm_vcpu *vcpu), @@ -257,4 +255,11 @@ TRACE_EVENT(kvm_exit, #endif /* _TRACE_KVM_H */ /* This part must be outside protection */ + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE + +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE trace_pr + #include diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c index cf77d755246db..5d53684c2ebd7 100644 --- a/arch/powerpc/mm/8xx_mmu.c +++ b/arch/powerpc/mm/8xx_mmu.c @@ -79,7 +79,7 @@ void __init MMU_init_hw(void) for (; i < 32 && mem >= LARGE_PAGE_SIZE_8M; i++) { mtspr(SPRN_MD_CTR, ctr | (i << 8)); mtspr(SPRN_MD_EPN, (unsigned long)__va(addr) | MD_EVALID); - mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID | M_APG2); + mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID); mtspr(SPRN_MD_RPN, addr | flags | _PAGE_PRESENT); addr += LARGE_PAGE_SIZE_8M; mem -= LARGE_PAGE_SIZE_8M; diff --git a/arch/powerpc/mm/dump_linuxpagetables.c b/arch/powerpc/mm/dump_linuxpagetables.c index 876e2a3c79f20..8464c2c01c0ca 100644 --- a/arch/powerpc/mm/dump_linuxpagetables.c +++ b/arch/powerpc/mm/dump_linuxpagetables.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -418,12 +419,13 @@ static void walk_pagetables(struct pg_state *st) unsigned int i; unsigned long addr; + addr = st->start_address; + /* * Traverse the linux pagetable structure and dump pages that are in * the hash pagetable. */ - for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { - addr = KERN_VIRT_START + i * PGDIR_SIZE; + for (i = 0; i < PTRS_PER_PGD; i++, pgd++, addr += PGDIR_SIZE) { if (!pgd_none(*pgd) && !pgd_huge(*pgd)) /* pgd exists */ walk_pud(st, pgd, addr); @@ -472,9 +474,14 @@ static int ptdump_show(struct seq_file *m, void *v) { struct pg_state st = { .seq = m, - .start_address = KERN_VIRT_START, .marker = address_markers, }; + + if (radix_enabled()) + st.start_address = PAGE_OFFSET; + else + st.start_address = KERN_VIRT_START; + /* Traverse kernel page tables */ walk_pagetables(&st); note_page(&st, 0, 0, 0); diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 729f02df8290c..aaa28fd918fe4 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -115,6 +115,8 @@ static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is) tlbiel_hash_set_isa300(0, is, 0, 2, 1); asm volatile("ptesync": : :"memory"); + + asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory"); } void hash__tlbiel_all(unsigned int action) @@ -140,8 +142,6 @@ void hash__tlbiel_all(unsigned int action) tlbiel_all_isa206(POWER7_TLB_SETS, is); else WARN(1, "%s called on pre-POWER7 CPU\n", __func__); - - asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory"); } static inline unsigned long ___tlbie(unsigned long vpn, int psize, diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index e87f9ef9115b4..7296a42eb62e4 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -112,6 +113,8 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, for (i = i - 1 ; i >= 0; i--, hpdp--) *hpdp = __hugepd(0); kmem_cache_free(cachep, new); + } else { + kmemleak_ignore(new); } spin_unlock(ptl); return 0; diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 7a9886f98b0c1..a5091c0347475 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -188,15 +188,20 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node); for (; start < end; start += page_size) { - void *p; + void *p = NULL; int rc; if (vmemmap_populated(start, page_size)) continue; + /* + * Allocate from the altmap first if we have one. This may + * fail due to alignment issues when using 16MB hugepages, so + * fall back to system memory if the altmap allocation fail. + */ if (altmap) p = altmap_alloc_block_buf(page_size, altmap); - else + if (!p) p = vmemmap_alloc_block_buf(page_size, node); if (!p) return -ENOMEM; @@ -255,8 +260,15 @@ void __ref vmemmap_free(unsigned long start, unsigned long end, { unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; unsigned long page_order = get_order(page_size); + unsigned long alt_start = ~0, alt_end = ~0; + unsigned long base_pfn; start = _ALIGN_DOWN(start, page_size); + if (altmap) { + alt_start = altmap->base_pfn; + alt_end = altmap->base_pfn + altmap->reserve + + altmap->free + altmap->alloc + altmap->align; + } pr_debug("vmemmap_free %lx...%lx\n", start, end); @@ -280,8 +292,9 @@ void __ref vmemmap_free(unsigned long start, unsigned long end, page = pfn_to_page(addr >> PAGE_SHIFT); section_base = pfn_to_page(vmemmap_section_start(start)); nr_pages = 1 << page_order; + base_pfn = PHYS_PFN(addr); - if (altmap) { + if (base_pfn >= alt_start && base_pfn < alt_end) { vmem_altmap_free(altmap, nr_pages); } else if (PageReserved(page)) { /* allocated from bootmem */ diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 055b211b71266..5500e4edabc63 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1179,7 +1179,7 @@ static long vphn_get_associativity(unsigned long cpu, switch (rc) { case H_FUNCTION: - printk(KERN_INFO + printk_once(KERN_INFO "VPHN is not supported. Disabling polling...\n"); stop_topology_update(); break; diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 205fe557ca109..4f213ba33491b 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -61,6 +61,13 @@ static void slice_print_mask(const char *label, const struct slice_mask *mask) { #endif +static inline bool slice_addr_is_low(unsigned long addr) +{ + u64 tmp = (u64)addr; + + return tmp < SLICE_LOW_TOP; +} + static void slice_range_to_mask(unsigned long start, unsigned long len, struct slice_mask *ret) { @@ -70,7 +77,7 @@ static void slice_range_to_mask(unsigned long start, unsigned long len, if (SLICE_NUM_HIGH) bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); - if (start < SLICE_LOW_TOP) { + if (slice_addr_is_low(start)) { unsigned long mend = min(end, (unsigned long)(SLICE_LOW_TOP - 1)); @@ -78,7 +85,7 @@ static void slice_range_to_mask(unsigned long start, unsigned long len, - (1u << GET_LOW_SLICE_INDEX(start)); } - if ((start + len) > SLICE_LOW_TOP) { + if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) { unsigned long start_index = GET_HIGH_SLICE_INDEX(start); unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT)); unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index; @@ -133,7 +140,7 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret, if (!slice_low_has_vma(mm, i)) ret->low_slices |= 1u << i; - if (high_limit <= SLICE_LOW_TOP) + if (slice_addr_is_low(high_limit - 1)) return; for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++) @@ -182,7 +189,7 @@ static bool slice_check_range_fits(struct mm_struct *mm, unsigned long end = start + len - 1; u64 low_slices = 0; - if (start < SLICE_LOW_TOP) { + if (slice_addr_is_low(start)) { unsigned long mend = min(end, (unsigned long)(SLICE_LOW_TOP - 1)); @@ -192,7 +199,7 @@ static bool slice_check_range_fits(struct mm_struct *mm, if ((low_slices & available->low_slices) != low_slices) return false; - if (SLICE_NUM_HIGH && ((start + len) > SLICE_LOW_TOP)) { + if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) { unsigned long start_index = GET_HIGH_SLICE_INDEX(start); unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT)); unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index; @@ -303,7 +310,7 @@ static bool slice_scan_available(unsigned long addr, int end, unsigned long *boundary_addr) { unsigned long slice; - if (addr < SLICE_LOW_TOP) { + if (slice_addr_is_low(addr)) { slice = GET_LOW_SLICE_INDEX(addr); *boundary_addr = (slice + end) << SLICE_LOW_SHIFT; return !!(available->low_slices & (1u << slice)); @@ -706,7 +713,7 @@ unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) VM_BUG_ON(radix_enabled()); - if (addr < SLICE_LOW_TOP) { + if (slice_addr_is_low(addr)) { psizes = mm->context.low_slices_psize; index = GET_LOW_SLICE_INDEX(addr); } else { diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index 15fe5f0c8665b..ae5d568e267f6 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c @@ -503,6 +503,9 @@ static void setup_page_sizes(void) for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { struct mmu_psize_def *def = &mmu_psize_defs[psize]; + if (!def->shift) + continue; + if (tlb1ps & (1U << (def->shift - 10))) { def->flags |= MMU_PAGE_SIZE_DIRECT; diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile index f2839eed0f897..561a67d65e4d4 100644 --- a/arch/powerpc/platforms/powermac/Makefile +++ b/arch/powerpc/platforms/powermac/Makefile @@ -3,7 +3,7 @@ CFLAGS_bootx_init.o += -fPIC ifdef CONFIG_FUNCTION_TRACER # Do not trace early boot code -CFLAGS_REMOVE_bootx_init.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_bootx_init.o = $(CC_FLAGS_FTRACE) endif obj-y += pic.o setup.o time.o feature.o pci.o \ diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c index 51dc398ae3f7a..a29fdf8a2e56e 100644 --- a/arch/powerpc/platforms/powernv/memtrace.c +++ b/arch/powerpc/platforms/powernv/memtrace.c @@ -90,17 +90,15 @@ static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages) walk_memory_range(start_pfn, end_pfn, (void *)MEM_OFFLINE, change_memblock_state); - lock_device_hotplug(); - remove_memory(nid, start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT); - unlock_device_hotplug(); return true; } static u64 memtrace_alloc_node(u32 nid, u64 size) { - u64 start_pfn, end_pfn, nr_pages; + u64 start_pfn, end_pfn, nr_pages, pfn; u64 base_pfn; + u64 bytes = memory_block_size_bytes(); if (!node_spanned_pages(nid)) return 0; @@ -113,8 +111,21 @@ static u64 memtrace_alloc_node(u32 nid, u64 size) end_pfn = round_down(end_pfn - nr_pages, nr_pages); for (base_pfn = end_pfn; base_pfn > start_pfn; base_pfn -= nr_pages) { - if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true) + if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true) { + /* + * Remove memory in memory block size chunks so that + * iomem resources are always split to the same size and + * we never try to remove memory that spans two iomem + * resources. + */ + lock_device_hotplug(); + end_pfn = base_pfn + nr_pages; + for (pfn = base_pfn; pfn < end_pfn; pfn += bytes>> PAGE_SHIFT) { + remove_memory(nid, pfn << PAGE_SHIFT, bytes); + } + unlock_device_hotplug(); return base_pfn << PAGE_SHIFT; + } } return 0; diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile index 1bc3abb237cda..9d7d8e6d705c4 100644 --- a/arch/powerpc/xmon/Makefile +++ b/arch/powerpc/xmon/Makefile @@ -1,14 +1,17 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for xmon -subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror +# Disable clang warning for using setjmp without setjmp.h header +subdir-ccflags-y := $(call cc-disable-warning, builtin-requires-header) + +subdir-ccflags-$(CONFIG_PPC_WERROR) += -Werror GCOV_PROFILE := n UBSAN_SANITIZE := n # Disable ftrace for the entire directory ORIG_CFLAGS := $(KBUILD_CFLAGS) -KBUILD_CFLAGS = $(subst -mno-sched-epilog,,$(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))) +KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS)) ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 4264aedc7775a..dd6badc31f458 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -75,6 +75,9 @@ static int xmon_gate; #define xmon_owner 0 #endif /* CONFIG_SMP */ +#ifdef CONFIG_PPC_PSERIES +static int set_indicator_token = RTAS_UNKNOWN_SERVICE; +#endif static unsigned long in_xmon __read_mostly = 0; static int xmon_on = IS_ENABLED(CONFIG_XMON_DEFAULT); @@ -358,7 +361,6 @@ static inline void disable_surveillance(void) #ifdef CONFIG_PPC_PSERIES /* Since this can't be a module, args should end up below 4GB. */ static struct rtas_args args; - int token; /* * At this point we have got all the cpus we can into @@ -367,11 +369,11 @@ static inline void disable_surveillance(void) * If we did try to take rtas.lock there would be a * real possibility of deadlock. */ - token = rtas_token("set-indicator"); - if (token == RTAS_UNKNOWN_SERVICE) + if (set_indicator_token == RTAS_UNKNOWN_SERVICE) return; - rtas_call_unlocked(&args, token, 3, 1, NULL, SURVEILLANCE_TOKEN, 0, 0); + rtas_call_unlocked(&args, set_indicator_token, 3, 1, NULL, + SURVEILLANCE_TOKEN, 0, 0); #endif /* CONFIG_PPC_PSERIES */ } @@ -3672,6 +3674,14 @@ static void xmon_init(int enable) __debugger_iabr_match = xmon_iabr_match; __debugger_break_match = xmon_break_match; __debugger_fault_handler = xmon_fault_handler; + +#ifdef CONFIG_PPC_PSERIES + /* + * Get the token here to avoid trying to get a lock + * during the crash, causing a deadlock. + */ + set_indicator_token = rtas_token("set-indicator"); +#endif } else { __debugger = NULL; __debugger_ipi = NULL; diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 61ec42405ec96..110be14e61226 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -82,4 +82,8 @@ core-y += arch/riscv/kernel/ arch/riscv/mm/ libs-y += arch/riscv/lib/ +PHONY += vdso_install +vdso_install: + $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@ + all: vmlinux diff --git a/arch/riscv/include/asm/module.h b/arch/riscv/include/asm/module.h index 349df33808c42..cd2af4b013e38 100644 --- a/arch/riscv/include/asm/module.h +++ b/arch/riscv/include/asm/module.h @@ -8,6 +8,7 @@ #define MODULE_ARCH_VERMAGIC "riscv" +struct module; u64 module_emit_got_entry(struct module *mod, u64 val); u64 module_emit_plt_entry(struct module *mod, u64 val); diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index 473cfc84e412f..8c3e3e3c8be12 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -400,13 +400,13 @@ extern unsigned long __must_check __asm_copy_from_user(void *to, static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) { - return __asm_copy_to_user(to, from, n); + return __asm_copy_from_user(to, from, n); } static inline unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) { - return __asm_copy_from_user(to, from, n); + return __asm_copy_to_user(to, from, n); } extern long strncpy_from_user(char *dest, const char __user *src, long count); diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c index 1157b6b52d259..c433f6d3dd64f 100644 --- a/arch/riscv/kernel/ftrace.c +++ b/arch/riscv/kernel/ftrace.c @@ -132,7 +132,6 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, { unsigned long return_hooker = (unsigned long)&return_to_handler; unsigned long old; - struct ftrace_graph_ent trace; int err; if (unlikely(atomic_read(¤t->tracing_graph_pause))) @@ -144,17 +143,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, */ old = *parent; - trace.func = self_addr; - trace.depth = current->curr_ret_stack + 1; - - if (!ftrace_graph_entry(&trace)) - return; - - err = ftrace_push_return_trace(old, self_addr, &trace.depth, - frame_pointer, parent); - if (err == -EBUSY) - return; - *parent = return_hooker; + if (function_graph_enter(old, self_addr, frame_pointer, parent)) + *parent = return_hooker; } #ifdef CONFIG_DYNAMIC_FTRACE diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index 3303ed2cd4193..7dd308129b40f 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c @@ -21,7 +21,7 @@ static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v) { if (v != (u32)v) { pr_err("%s: value %016llx out of range for 32-bit field\n", - me->name, v); + me->name, (long long)v); return -EINVAL; } *location = v; @@ -102,7 +102,7 @@ static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location, if (offset != (s32)offset) { pr_err( "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", - me->name, v, location); + me->name, (long long)v, location); return -EINVAL; } @@ -144,7 +144,7 @@ static int apply_r_riscv_hi20_rela(struct module *me, u32 *location, if (IS_ENABLED(CMODEL_MEDLOW)) { pr_err( "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", - me->name, v, location); + me->name, (long long)v, location); return -EINVAL; } @@ -188,7 +188,7 @@ static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location, } else { pr_err( "%s: can not generate the GOT entry for symbol = %016llx from PC = %p\n", - me->name, v, location); + me->name, (long long)v, location); return -EINVAL; } @@ -212,7 +212,7 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location, } else { pr_err( "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", - me->name, v, location); + me->name, (long long)v, location); return -EINVAL; } } @@ -234,7 +234,7 @@ static int apply_r_riscv_call_rela(struct module *me, u32 *location, if (offset != fill_v) { pr_err( "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", - me->name, v, location); + me->name, (long long)v, location); return -EINVAL; } diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile index 04609478d18b9..b375c6c5ae7b1 100644 --- a/arch/s390/boot/compressed/Makefile +++ b/arch/s390/boot/compressed/Makefile @@ -20,7 +20,7 @@ KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR) OBJECTS := $(addprefix $(obj)/,$(obj-y)) LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T -$(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS) +$(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS) FORCE $(call if_changed,ld) # extract required uncompressed vmlinux symbols and adjust them to reflect offsets inside vmlinux.bin @@ -51,17 +51,17 @@ suffix-$(CONFIG_KERNEL_LZMA) := .lzma suffix-$(CONFIG_KERNEL_LZO) := .lzo suffix-$(CONFIG_KERNEL_XZ) := .xz -$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) +$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) FORCE $(call if_changed,gzip) -$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) +$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) FORCE $(call if_changed,bzip2) -$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) +$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) FORCE $(call if_changed,lz4) -$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) +$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE $(call if_changed,lzma) -$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) +$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE $(call if_changed,lzo) -$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) +$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE $(call if_changed,xzkern) LDFLAGS_piggy.o := -r --format binary --oformat $(LD_BFD) -T diff --git a/arch/s390/defconfig b/arch/s390/defconfig index f40600eb17628..5134c71a4937b 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -221,7 +221,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_DEFLATE=m diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 0717ee76885d6..09b61d0e491f6 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -45,8 +45,6 @@ static inline int init_new_context(struct task_struct *tsk, mm->context.asce_limit = STACK_TOP_MAX; mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | _ASCE_USER_BITS | _ASCE_TYPE_REGION3; - /* pgd_alloc() did not account this pud */ - mm_inc_nr_puds(mm); break; case -PAGE_SIZE: /* forked 5-level task, set new asce with new_mm->pgd */ @@ -62,9 +60,6 @@ static inline int init_new_context(struct task_struct *tsk, /* forked 2-level compat task, set new asce with new mm->pgd */ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT; - /* pgd_alloc() did not account this pmd */ - mm_inc_nr_pmds(mm); - mm_inc_nr_puds(mm); } crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); return 0; @@ -94,8 +89,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, { int cpu = smp_processor_id(); - if (prev == next) - return; S390_lowcore.user_asce = next->context.asce; cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); /* Clear previous user-ASCE from CR1 and CR7 */ @@ -107,7 +100,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, __ctl_load(S390_lowcore.vdso_asce, 7, 7); clear_cpu_flag(CIF_ASCE_SECONDARY); } - cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); + if (prev != next) + cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); } #define finish_arch_post_lock_switch finish_arch_post_lock_switch diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index f0f9bcf94c037..5ee733720a571 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -36,11 +36,11 @@ static inline void crst_table_init(unsigned long *crst, unsigned long entry) static inline unsigned long pgd_entry_type(struct mm_struct *mm) { - if (mm->context.asce_limit <= _REGION3_SIZE) + if (mm_pmd_folded(mm)) return _SEGMENT_ENTRY_EMPTY; - if (mm->context.asce_limit <= _REGION2_SIZE) + if (mm_pud_folded(mm)) return _REGION3_ENTRY_EMPTY; - if (mm->context.asce_limit <= _REGION1_SIZE) + if (mm_p4d_folded(mm)) return _REGION2_ENTRY_EMPTY; return _REGION1_ENTRY_EMPTY; } diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 0e7cb0dc9c33b..de05466ce50c5 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -485,6 +485,24 @@ static inline int is_module_addr(void *addr) _REGION_ENTRY_PROTECT | \ _REGION_ENTRY_NOEXEC) +static inline bool mm_p4d_folded(struct mm_struct *mm) +{ + return mm->context.asce_limit <= _REGION1_SIZE; +} +#define mm_p4d_folded(mm) mm_p4d_folded(mm) + +static inline bool mm_pud_folded(struct mm_struct *mm) +{ + return mm->context.asce_limit <= _REGION2_SIZE; +} +#define mm_pud_folded(mm) mm_pud_folded(mm) + +static inline bool mm_pmd_folded(struct mm_struct *mm) +{ + return mm->context.asce_limit <= _REGION3_SIZE; +} +#define mm_pmd_folded(mm) mm_pmd_folded(mm) + static inline int mm_has_pgste(struct mm_struct *mm) { #ifdef CONFIG_PGSTE diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 457b7ba0fbb66..b31c779cf5817 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -136,7 +136,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, unsigned long address) { - if (tlb->mm->context.asce_limit <= _REGION3_SIZE) + if (mm_pmd_folded(tlb->mm)) return; pgtable_pmd_page_dtor(virt_to_page(pmd)); tlb_remove_table(tlb, pmd); @@ -152,7 +152,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, unsigned long address) { - if (tlb->mm->context.asce_limit <= _REGION1_SIZE) + if (mm_p4d_folded(tlb->mm)) return; tlb_remove_table(tlb, p4d); } @@ -167,7 +167,7 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, unsigned long address) { - if (tlb->mm->context.asce_limit <= _REGION2_SIZE) + if (mm_pud_folded(tlb->mm)) return; tlb_remove_table(tlb, pud); } diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 5b28b434f8a15..e7e6608b996c6 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -64,10 +64,10 @@ static noinline __init void detect_machine_type(void) if (stsi(vmms, 3, 2, 2) || !vmms->count) return; - /* Running under KVM? If not we assume z/VM */ + /* Detect known hypervisors */ if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3)) S390_lowcore.machine_flags |= MACHINE_FLAG_KVM; - else + else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4)) S390_lowcore.machine_flags |= MACHINE_FLAG_VM; } diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 84be7f02d0c21..39b13d71a8fe6 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -203,22 +203,13 @@ device_initcall(ftrace_plt_init); */ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) { - struct ftrace_graph_ent trace; - if (unlikely(ftrace_graph_is_dead())) goto out; if (unlikely(atomic_read(¤t->tracing_graph_pause))) goto out; ip -= MCOUNT_INSN_SIZE; - trace.func = ip; - trace.depth = current->curr_ret_stack + 1; - /* Only trace if the calling function expects to. */ - if (!ftrace_graph_entry(&trace)) - goto out; - if (ftrace_push_return_trace(parent, ip, &trace.depth, 0, - NULL) == -EBUSY) - goto out; - parent = (unsigned long) return_to_handler; + if (!function_graph_enter(parent, ip, 0, NULL)) + parent = (unsigned long) return_to_handler; out: return parent; } diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index cc085e2d2ce99..d5523adeddbf4 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c @@ -346,6 +346,8 @@ static int __hw_perf_event_init(struct perf_event *event) break; case PERF_TYPE_HARDWARE: + if (is_sampling_event(event)) /* No sampling support */ + return -ENOENT; ev = attr->config; /* Count user space (problem-state) only */ if (!attr->exclude_user && attr->exclude_kernel) { @@ -373,7 +375,7 @@ static int __hw_perf_event_init(struct perf_event *event) return -ENOENT; if (ev > PERF_CPUM_CF_MAX_CTR) - return -EINVAL; + return -ENOENT; /* Obtain the counter set to which the specified counter belongs */ set = get_counter_set(ev); diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index c637c12f9e37c..a0097f8bada8e 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -882,6 +882,8 @@ void __init setup_arch(char **cmdline_p) pr_info("Linux is running under KVM in 64-bit mode\n"); else if (MACHINE_IS_LPAR) pr_info("Linux is running natively in 64-bit mode\n"); + else + pr_info("Linux is running as a guest in 64-bit mode\n"); /* Have one command line that is parsed and saved in /proc/cmdline */ /* boot_command_line has been already set up in early.c */ diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 2f8f7d7dd9a83..da02f4087d61f 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -371,9 +371,13 @@ void smp_call_online_cpu(void (*func)(void *), void *data) */ void smp_call_ipl_cpu(void (*func)(void *), void *data) { + struct lowcore *lc = pcpu_devices->lowcore; + + if (pcpu_devices[0].address == stap()) + lc = &S390_lowcore; + pcpu_delegate(&pcpu_devices[0], func, data, - pcpu_devices->lowcore->panic_stack - - PANIC_FRAME_OFFSET + PAGE_SIZE); + lc->panic_stack - PANIC_FRAME_OFFSET + PAGE_SIZE); } int smp_find_processor_id(u16 address) @@ -1152,7 +1156,11 @@ static ssize_t __ref rescan_store(struct device *dev, { int rc; + rc = lock_device_hotplug_sysfs(); + if (rc) + return rc; rc = smp_rescan_cpus(); + unlock_device_hotplug(); return rc ? rc : count; } static DEVICE_ATTR_WO(rescan); diff --git a/arch/s390/kernel/sthyi.c b/arch/s390/kernel/sthyi.c index 0859cde36f752..888cc2f166db7 100644 --- a/arch/s390/kernel/sthyi.c +++ b/arch/s390/kernel/sthyi.c @@ -183,17 +183,19 @@ static void fill_hdr(struct sthyi_sctns *sctns) static void fill_stsi_mac(struct sthyi_sctns *sctns, struct sysinfo_1_1_1 *sysinfo) { + sclp_ocf_cpc_name_copy(sctns->mac.infmname); + if (*(u64 *)sctns->mac.infmname != 0) + sctns->mac.infmval1 |= MAC_NAME_VLD; + if (stsi(sysinfo, 1, 1, 1)) return; - sclp_ocf_cpc_name_copy(sctns->mac.infmname); - memcpy(sctns->mac.infmtype, sysinfo->type, sizeof(sctns->mac.infmtype)); memcpy(sctns->mac.infmmanu, sysinfo->manufacturer, sizeof(sctns->mac.infmmanu)); memcpy(sctns->mac.infmpman, sysinfo->plant, sizeof(sctns->mac.infmpman)); memcpy(sctns->mac.infmseq, sysinfo->sequence, sizeof(sctns->mac.infmseq)); - sctns->mac.infmval1 |= MAC_ID_VLD | MAC_NAME_VLD; + sctns->mac.infmval1 |= MAC_ID_VLD; } static void fill_stsi_par(struct sthyi_sctns *sctns, diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile index c5c856f320bca..04dd3e2c3bd9b 100644 --- a/arch/s390/kernel/vdso32/Makefile +++ b/arch/s390/kernel/vdso32/Makefile @@ -36,7 +36,7 @@ UBSAN_SANITIZE := n $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so # link rule for the .so file, .lds has to be first -$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) +$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE $(call if_changed,vdso32ld) # strip rule for the .so file @@ -45,12 +45,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE $(call if_changed,objcopy) # assembly rules for the .S files -$(obj-vdso32): %.o: %.S +$(obj-vdso32): %.o: %.S FORCE $(call if_changed_dep,vdso32as) # actual build commands quiet_cmd_vdso32ld = VDSO32L $@ - cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $^ -o $@ + cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@ quiet_cmd_vdso32as = VDSO32A $@ cmd_vdso32as = $(CC) $(a_flags) -c -o $@ $< diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile index 15b1ceafc4c18..ddebc26cd9494 100644 --- a/arch/s390/kernel/vdso64/Makefile +++ b/arch/s390/kernel/vdso64/Makefile @@ -36,7 +36,7 @@ UBSAN_SANITIZE := n $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so # link rule for the .so file, .lds has to be first -$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) +$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE $(call if_changed,vdso64ld) # strip rule for the .so file @@ -45,12 +45,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE $(call if_changed,objcopy) # assembly rules for the .S files -$(obj-vdso64): %.o: %.S +$(obj-vdso64): %.o: %.S FORCE $(call if_changed_dep,vdso64as) # actual build commands quiet_cmd_vdso64ld = VDSO64L $@ - cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@ + cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@ quiet_cmd_vdso64as = VDSO64A $@ cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index 76d89ee8b4288..814f26520aa2c 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -101,6 +101,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end) mm->context.asce_limit = _REGION1_SIZE; mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | _ASCE_USER_BITS | _ASCE_TYPE_REGION2; + mm_inc_nr_puds(mm); } else { crst_table_init(table, _REGION1_ENTRY_EMPTY); pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd); diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c index 5bd374491f946..6c151b42e65db 100644 --- a/arch/s390/numa/numa.c +++ b/arch/s390/numa/numa.c @@ -54,6 +54,7 @@ int __node_distance(int a, int b) { return mode->distance ? mode->distance(a, b) : 0; } +EXPORT_SYMBOL(__node_distance); int numa_debug_enabled; diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c index 19b2d2a9b43db..eeb7450db18c0 100644 --- a/arch/s390/pci/pci_clp.c +++ b/arch/s390/pci/pci_clp.c @@ -436,7 +436,7 @@ int clp_get_state(u32 fid, enum zpci_state *state) struct clp_state_data sd = {fid, ZPCI_FN_STATE_RESERVED}; int rc; - rrb = clp_alloc_block(GFP_KERNEL); + rrb = clp_alloc_block(GFP_ATOMIC); if (!rrb) return -ENOMEM; diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c index 96dd9f7da2506..1b04270e5460e 100644 --- a/arch/sh/kernel/ftrace.c +++ b/arch/sh/kernel/ftrace.c @@ -321,8 +321,7 @@ int ftrace_disable_ftrace_graph_caller(void) void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) { unsigned long old; - int faulted, err; - struct ftrace_graph_ent trace; + int faulted; unsigned long return_hooker = (unsigned long)&return_to_handler; if (unlikely(ftrace_graph_is_dead())) @@ -365,18 +364,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) return; } - err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0, NULL); - if (err == -EBUSY) { + if (function_graph_enter(old, self_addr, 0, NULL)) __raw_writel(old, parent); - return; - } - - trace.func = self_addr; - - /* Only trace if the calling function expects to */ - if (!ftrace_graph_entry(&trace)) { - current->curr_ret_stack--; - __raw_writel(old, parent); - } } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/sparc/include/asm/switch_to_64.h b/arch/sparc/include/asm/switch_to_64.h index 4ff29b1406a9b..b1d4e2e3210fb 100644 --- a/arch/sparc/include/asm/switch_to_64.h +++ b/arch/sparc/include/asm/switch_to_64.h @@ -67,6 +67,7 @@ do { save_and_clear_fpu(); \ } while(0) void synchronize_user_stack(void); -void fault_in_user_windows(void); +struct pt_regs; +void fault_in_user_windows(struct pt_regs *); #endif /* __SPARC64_SWITCH_TO_64_H */ diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c index 915dda4ae4120..684b84ce397f7 100644 --- a/arch/sparc/kernel/ftrace.c +++ b/arch/sparc/kernel/ftrace.c @@ -126,20 +126,11 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long frame_pointer) { unsigned long return_hooker = (unsigned long) &return_to_handler; - struct ftrace_graph_ent trace; if (unlikely(atomic_read(¤t->tracing_graph_pause))) return parent + 8UL; - trace.func = self_addr; - trace.depth = current->curr_ret_stack + 1; - - /* Only trace if the calling function expects to */ - if (!ftrace_graph_entry(&trace)) - return parent + 8UL; - - if (ftrace_push_return_trace(parent, self_addr, &trace.depth, - frame_pointer, NULL) == -EBUSY) + if (function_graph_enter(parent, self_addr, frame_pointer, NULL)) return parent + 8UL; return return_hooker; diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 6c086086ca8fa..59eaf6227af1d 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -521,7 +522,12 @@ static void stack_unaligned(unsigned long sp) force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *) sp, 0, current); } -void fault_in_user_windows(void) +static const char uwfault32[] = KERN_INFO \ + "%s[%d]: bad register window fault: SP %08lx (orig_sp %08lx) TPC %08lx O7 %08lx\n"; +static const char uwfault64[] = KERN_INFO \ + "%s[%d]: bad register window fault: SP %016lx (orig_sp %016lx) TPC %08lx O7 %016lx\n"; + +void fault_in_user_windows(struct pt_regs *regs) { struct thread_info *t = current_thread_info(); unsigned long window; @@ -534,9 +540,9 @@ void fault_in_user_windows(void) do { struct reg_window *rwin = &t->reg_window[window]; int winsize = sizeof(struct reg_window); - unsigned long sp; + unsigned long sp, orig_sp; - sp = t->rwbuf_stkptrs[window]; + orig_sp = sp = t->rwbuf_stkptrs[window]; if (test_thread_64bit_stack(sp)) sp += STACK_BIAS; @@ -547,8 +553,16 @@ void fault_in_user_windows(void) stack_unaligned(sp); if (unlikely(copy_to_user((char __user *)sp, - rwin, winsize))) + rwin, winsize))) { + if (show_unhandled_signals) + printk_ratelimited(is_compat_task() ? + uwfault32 : uwfault64, + current->comm, current->pid, + sp, orig_sp, + regs->tpc, + regs->u_regs[UREG_I7]); goto barf; + } } while (window--); } set_thread_wsaved(0); @@ -556,8 +570,7 @@ void fault_in_user_windows(void) barf: set_thread_wsaved(window + 1); - user_exit(); - do_exit(SIGILL); + force_sig(SIGSEGV, current); } asmlinkage long sparc_do_fork(unsigned long clone_flags, diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index 4073e2b87dd0e..29aa34f11720c 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S @@ -39,6 +39,7 @@ __handle_preemption: wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate __handle_user_windows: + add %sp, PTREGS_OFF, %o0 call fault_in_user_windows 661: wrpr %g0, RTRAP_PSTATE, %pstate /* If userspace is using ADI, it could potentially pass diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index 44d379db3f648..4c5b3fcbed94c 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c @@ -371,7 +371,11 @@ static int setup_frame32(struct ksignal *ksig, struct pt_regs *regs, get_sigframe(ksig, regs, sigframe_size); if (invalid_frame_pointer(sf, sigframe_size)) { - do_exit(SIGILL); + if (show_unhandled_signals) + pr_info("%s[%d] bad frame in setup_frame32: %08lx TPC %08lx O7 %08lx\n", + current->comm, current->pid, (unsigned long)sf, + regs->tpc, regs->u_regs[UREG_I7]); + force_sigsegv(ksig->sig, current); return -EINVAL; } @@ -501,7 +505,11 @@ static int setup_rt_frame32(struct ksignal *ksig, struct pt_regs *regs, get_sigframe(ksig, regs, sigframe_size); if (invalid_frame_pointer(sf, sigframe_size)) { - do_exit(SIGILL); + if (show_unhandled_signals) + pr_info("%s[%d] bad frame in setup_rt_frame32: %08lx TPC %08lx O7 %08lx\n", + current->comm, current->pid, (unsigned long)sf, + regs->tpc, regs->u_regs[UREG_I7]); + force_sigsegv(ksig->sig, current); return -EINVAL; } diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index 48366e5eb5b26..e9de1803a22e0 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -370,7 +370,11 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs) get_sigframe(ksig, regs, sf_size); if (invalid_frame_pointer (sf)) { - do_exit(SIGILL); /* won't return, actually */ + if (show_unhandled_signals) + pr_info("%s[%d] bad frame in setup_rt_frame: %016lx TPC %016lx O7 %016lx\n", + current->comm, current->pid, (unsigned long)sf, + regs->tpc, regs->u_regs[UREG_I7]); + force_sigsegv(ksig->sig, current); return -EINVAL; } diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index bb68c805b8918..ff9389a1c9f3f 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S @@ -47,9 +47,9 @@ sys_call_table32: .word sys_recvfrom, sys_setreuid16, sys_setregid16, sys_rename, compat_sys_truncate /*130*/ .word compat_sys_ftruncate, sys_flock, compat_sys_lstat64, sys_sendto, sys_shutdown .word sys_socketpair, sys_mkdir, sys_rmdir, compat_sys_utimes, compat_sys_stat64 -/*140*/ .word sys_sendfile64, sys_nis_syscall, compat_sys_futex, sys_gettid, compat_sys_getrlimit +/*140*/ .word sys_sendfile64, sys_getpeername, compat_sys_futex, sys_gettid, compat_sys_getrlimit .word compat_sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write -/*150*/ .word sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64 +/*150*/ .word sys_getsockname, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64 .word compat_sys_fcntl64, sys_inotify_rm_watch, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount /*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_nis_syscall .word sys_quotactl, sys_set_tid_address, compat_sys_mount, compat_sys_ustat, sys_setxattr diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index f396048a0d680..39822f611c015 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -1383,6 +1383,7 @@ int __node_distance(int from, int to) } return numa_latency[from][to]; } +EXPORT_SYMBOL(__node_distance); static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) { diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c index c94c3bd70ccd7..df4a985716eba 100644 --- a/arch/um/os-Linux/skas/process.c +++ b/arch/um/os-Linux/skas/process.c @@ -610,6 +610,11 @@ int start_idle_thread(void *stack, jmp_buf *switch_buf) fatal_sigsegv(); } longjmp(*switch_buf, 1); + + /* unreachable */ + printk(UM_KERN_ERR "impossible long jump!"); + fatal_sigsegv(); + return 0; } void initial_thread_cb_skas(void (*proc)(void *), void *arg) diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild index 0038a2d10a7a5..466219296cd6f 100644 --- a/arch/x86/Kbuild +++ b/arch/x86/Kbuild @@ -7,6 +7,8 @@ obj-$(CONFIG_KVM) += kvm/ # Xen paravirtualization support obj-$(CONFIG_XEN) += xen/ +obj-$(CONFIG_ACRN) += acrn/ + # Hyper-V paravirtualization support obj-$(subst m,y,$(CONFIG_HYPERV)) += hyperv/ diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 1a0be022f91d8..6cb07933d4063 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -441,10 +441,6 @@ config RETPOLINE branches. Requires a compiler with -mindirect-branch=thunk-extern support for full protection. The kernel may run slower. - Without compiler support, at least indirect branches in assembler - code are eliminated. Since this includes the syscall entry path, - it is not entirely pointless. - config INTEL_RDT bool "Intel Resource Director Technology support" default n @@ -782,6 +778,8 @@ config QUEUED_LOCK_STAT behavior of paravirtualized queued spinlocks and report them on debugfs. +source "arch/x86/acrn/Kconfig" + source "arch/x86/xen/Kconfig" config KVM_GUEST @@ -1005,13 +1003,7 @@ config NR_CPUS to the kernel image. config SCHED_SMT - bool "SMT (Hyperthreading) scheduler support" - depends on SMP - ---help--- - SMT scheduler support improves the CPU scheduler's decision making - when dealing with Intel Pentium 4 chips with HyperThreading at a - cost of slightly increased overhead in some places. If unsure say - N here. + def_bool y if SMP config SCHED_MC def_bool y @@ -1491,6 +1483,14 @@ config X86_DIRECT_GBPAGES supports them), so don't confuse the user by printing that we have them enabled. +config X86_CPA_STATISTICS + bool "Enable statistic for Change Page Attribute" + depends on DEBUG_FS + ---help--- + Expose statistics about the Change Page Attribute mechanims, which + helps to determine the effectivness of preserving large and huge + page mappings when mapping protections are changed. + config ARCH_HAS_MEM_ENCRYPT def_bool y diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 8f6e7eb8ae9fc..b84f61bc5e7af 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -223,9 +223,7 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables # Avoid indirect branches in kernel to deal with Spectre ifdef CONFIG_RETPOLINE -ifneq ($(RETPOLINE_CFLAGS),) - KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE -endif + KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) endif archscripts: scripts_basic @@ -302,6 +300,13 @@ ifndef CC_HAVE_ASM_GOTO @echo Compiler lacks asm-goto support. @exit 1 endif +ifdef CONFIG_RETPOLINE +ifeq ($(RETPOLINE_CFLAGS),) + @echo "You are building kernel with non-retpoline compiler." >&2 + @echo "Please update your compiler." >&2 + @false +endif +endif archclean: $(Q)rm -rf $(objtree)/arch/i386 diff --git a/arch/x86/acrn/Kconfig b/arch/x86/acrn/Kconfig new file mode 100644 index 0000000000000..ce0abc8cdcadf --- /dev/null +++ b/arch/x86/acrn/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# This Kconfig describes ACRN options +# + +config ACRN + bool "Enable services run on ACRN hypervisor" + depends on X86_64 + depends on PARAVIRT + help + This option is needed if were to run ACRN services linux on top of + ACRN hypervisor. diff --git a/arch/x86/acrn/Makefile b/arch/x86/acrn/Makefile new file mode 100644 index 0000000000000..d961d8c5ee938 --- /dev/null +++ b/arch/x86/acrn/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_ACRN) += acrn.o diff --git a/arch/x86/acrn/acrn.c b/arch/x86/acrn/acrn.c new file mode 100644 index 0000000000000..6b01b27794a18 --- /dev/null +++ b/arch/x86/acrn/acrn.c @@ -0,0 +1,110 @@ +/* + * ACRN hypervisor support + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Jason Chen CJ + * + */ +#include +#include +#include +#include +#include + +static uint32_t __init acrn_detect(void) +{ + return hypervisor_cpuid_base("ACRNACRNACRN\0\0", 0); +} + +static void __init acrn_init_platform(void) +{ + alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, + acrn_hv_callback_vector); +} + +static void acrn_pin_vcpu(int cpu) +{ + /* do nothing here now */ +} + +static bool acrn_x2apic_available(void) +{ + /* ACRN supports x2apic emulation */ + return true; +} + +static void __init acrn_init_mem_mapping(void) +{ + /* do nothing here now */ +} + + +static void (*acrn_intr_handler)(void); +/* + * Handler for ACRN_HV_CALLBACK. + */ +__visible void acrn_hv_vector_handler(struct pt_regs *regs) +{ + struct pt_regs *old_regs = set_irq_regs(regs); + + entering_ack_irq(); +#ifdef CONFIG_X86 + inc_irq_stat(irq_hv_callback_count); +#endif + + if (acrn_intr_handler) + acrn_intr_handler(); + + exiting_irq(); + set_irq_regs(old_regs); +} + +void acrn_setup_intr_irq(void (*handler)(void)) +{ + acrn_intr_handler = handler; +} + +void acrn_remove_intr_irq(void) +{ + acrn_intr_handler = NULL; +} + +const struct hypervisor_x86 x86_hyper_acrn = { + .name = "ACRN", + .detect = acrn_detect, + .type = X86_HYPER_ACRN, + .init.init_platform = acrn_init_platform, + .runtime.pin_vcpu = acrn_pin_vcpu, + .init.x2apic_available = acrn_x2apic_available, + .init.init_mem_mapping = acrn_init_mem_mapping, +}; +EXPORT_SYMBOL(x86_hyper_acrn); +EXPORT_SYMBOL(acrn_setup_intr_irq); +EXPORT_SYMBOL(acrn_remove_intr_irq); diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index 1458b1700fc7e..544ac4fafd112 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -1,3 +1,4 @@ + /* ----------------------------------------------------------------------- * * Copyright 2011 Intel Corporation; author Matt Fleming @@ -634,37 +635,54 @@ static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext, return status; } +static efi_status_t allocate_e820(struct boot_params *params, + struct setup_data **e820ext, + u32 *e820ext_size) +{ + unsigned long map_size, desc_size, buff_size; + struct efi_boot_memmap boot_map; + efi_memory_desc_t *map; + efi_status_t status; + __u32 nr_desc; + + boot_map.map = ↦ + boot_map.map_size = &map_size; + boot_map.desc_size = &desc_size; + boot_map.desc_ver = NULL; + boot_map.key_ptr = NULL; + boot_map.buff_size = &buff_size; + + status = efi_get_memory_map(sys_table, &boot_map); + if (status != EFI_SUCCESS) + return status; + + nr_desc = buff_size / desc_size; + + if (nr_desc > ARRAY_SIZE(params->e820_table)) { + u32 nr_e820ext = nr_desc - ARRAY_SIZE(params->e820_table); + + status = alloc_e820ext(nr_e820ext, e820ext, e820ext_size); + if (status != EFI_SUCCESS) + return status; + } + + return EFI_SUCCESS; +} + struct exit_boot_struct { struct boot_params *boot_params; struct efi_info *efi; - struct setup_data *e820ext; - __u32 e820ext_size; }; static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg, struct efi_boot_memmap *map, void *priv) { - static bool first = true; const char *signature; __u32 nr_desc; efi_status_t status; struct exit_boot_struct *p = priv; - if (first) { - nr_desc = *map->buff_size / *map->desc_size; - if (nr_desc > ARRAY_SIZE(p->boot_params->e820_table)) { - u32 nr_e820ext = nr_desc - - ARRAY_SIZE(p->boot_params->e820_table); - - status = alloc_e820ext(nr_e820ext, &p->e820ext, - &p->e820ext_size); - if (status != EFI_SUCCESS) - return status; - } - first = false; - } - signature = efi_is_64bit() ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE; memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32)); @@ -687,8 +705,8 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle) { unsigned long map_sz, key, desc_size, buff_size; efi_memory_desc_t *mem_map; - struct setup_data *e820ext; - __u32 e820ext_size; + struct setup_data *e820ext = NULL; + __u32 e820ext_size = 0; efi_status_t status; __u32 desc_version; struct efi_boot_memmap map; @@ -702,8 +720,10 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle) map.buff_size = &buff_size; priv.boot_params = boot_params; priv.efi = &boot_params->efi_info; - priv.e820ext = NULL; - priv.e820ext_size = 0; + + status = allocate_e820(boot_params, &e820ext, &e820ext_size); + if (status != EFI_SUCCESS) + return status; /* Might as well exit boot services now */ status = efi_exit_boot_services(sys_table, handle, &map, &priv, @@ -711,9 +731,6 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle) if (status != EFI_SUCCESS) return status; - e820ext = priv.e820ext; - e820ext_size = priv.e820ext_size; - /* Historic? */ boot_params->alt_mem_k = 32 * 1024; @@ -738,6 +755,7 @@ efi_main(struct efi_config *c, struct boot_params *boot_params) struct desc_struct *desc; void *handle; efi_system_table_t *_table; + unsigned long cmdline_paddr; efi_early = c; @@ -755,6 +773,15 @@ efi_main(struct efi_config *c, struct boot_params *boot_params) else setup_boot_services32(efi_early); + /* + * make_boot_params() may have been called before efi_main(), in which + * case this is the second time we parse the cmdline. This is ok, + * parsing the cmdline multiple times does not have side-effects. + */ + cmdline_paddr = ((u64)hdr->cmd_line_ptr | + ((u64)boot_params->ext_cmd_line_ptr << 32)); + efi_parse_options((char *)cmdline_paddr); + /* * If the boot loader gave us a value for secure_boot then we use that, * otherwise we ask the BIOS. diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c index d4e6cd4577e5d..bf0e824003584 100644 --- a/arch/x86/boot/tools/build.c +++ b/arch/x86/boot/tools/build.c @@ -391,6 +391,13 @@ int main(int argc, char ** argv) die("Unable to mmap '%s': %m", argv[2]); /* Number of 16-byte paragraphs, including space for a 4-byte CRC */ sys_size = (sz + 15 + 4) / 16; +#ifdef CONFIG_EFI_STUB + /* + * COFF requires minimum 32-byte alignment of sections, and + * adding a signature is problematic without that alignment. + */ + sys_size = (sys_size + 1) & ~1; +#endif /* Patch the setup code with the appropriate size parameters */ buf[0x1f1] = setup_sectors-1; diff --git a/arch/x86/configs/android_test_defconfig b/arch/x86/configs/android_test_defconfig new file mode 100644 index 0000000000000..82ef1999e2a75 --- /dev/null +++ b/arch/x86/configs/android_test_defconfig @@ -0,0 +1,6285 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/x86_64 4.19.0 Kernel Configuration +# + +# +# Compiler: x86_64-poky-linux-gcc (GCC) 7.3.0 +# +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=70300 +CONFIG_CLANG_VERSION=0 +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_EXTABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="-quilt-2e5dc0ac" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_BUILD_SALT="" +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set +CONFIG_DEFAULT_HOSTNAME="localhost" +CONFIG_SWAP=y +# CONFIG_SYSVIPC is not set +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y +CONFIG_AUDIT_WATCH=y +CONFIG_AUDIT_TREE=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_PENDING_IRQ=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y +CONFIG_GENERIC_IRQ_RESERVATION_MODE=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +CONFIG_CLOCKSOURCE_WATCHDOG=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y +CONFIG_GENERIC_CMOS_UPDATE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +CONFIG_NO_HZ_IDLE=y +# CONFIG_NO_HZ_FULL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# CONFIG_ANDROID_AUTO_SUSPEND_BEHAVIOR is not set +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +CONFIG_PREEMPT_COUNT=y + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set +# CONFIG_IRQ_TIME_ACCOUNTING is not set +CONFIG_BSD_PROCESS_ACCT=y +# CONFIG_BSD_PROCESS_ACCT_V3 is not set +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +# CONFIG_CPU_ISOLATION is not set + +# +# RCU Subsystem +# +CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +CONFIG_TASKS_RCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_BUILD_BIN2C=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 +CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_MEMCG_KMEM=y +# CONFIG_BLK_CGROUP is not set +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +# CONFIG_CFS_BANDWIDTH is not set +CONFIG_RT_GROUP_SCHED=y +# CONFIG_CGROUP_PIDS is not set +# CONFIG_CGROUP_RDMA is not set +CONFIG_CGROUP_FREEZER=y +# CONFIG_CGROUP_HUGETLB is not set +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +# CONFIG_CGROUP_DEVICE is not set +CONFIG_CGROUP_CPUACCT=y +# CONFIG_CGROUP_PERF is not set +CONFIG_CGROUP_BPF=y +CONFIG_CGROUP_DEBUG=y +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +# CONFIG_USER_NS is not set +CONFIG_PID_NS=y +CONFIG_NET_NS=y +# CONFIG_CHECKPOINT_RESTORE is not set +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SCHED_TUNE is not set +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_HAVE_PCSPKR_PLATFORM=y +CONFIG_BPF=y +CONFIG_EXPERT=y +CONFIG_UID16=y +CONFIG_MULTIUSER=y +# CONFIG_SGETMASK_SYSCALL is not set +# CONFIG_SYSFS_SYSCALL is not set +# CONFIG_SYSCTL_SYSCALL is not set +# CONFIG_FHANDLE is not set +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PRINTK_NMI=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_PCSPKR_PLATFORM=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_BPF_SYSCALL=y +# CONFIG_USERFAULTFD is not set +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_RSEQ=y +# CONFIG_DEBUG_RSEQ is not set +CONFIG_EMBEDDED=y +CONFIG_HAVE_PERF_EVENTS=y +# CONFIG_PC104 is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set +CONFIG_SLAB_MERGE_DEFAULT=y +CONFIG_SLAB_FREELIST_RANDOM=y +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_64BIT=y +CONFIG_X86_64=y +CONFIG_X86=y +CONFIG_INSTRUCTION_DECODER=y +CONFIG_OUTPUT_FORMAT="elf64-x86-64" +CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_MMU=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=28 +CONFIG_ARCH_MMAP_RND_BITS_MAX=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_GENERIC_ISA_DMA=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ARCH_HAS_CPU_RELAX=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_FILTER_PGPROT=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y +CONFIG_ZONE_DMA32=y +CONFIG_AUDIT_ARCH=y +CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_HAVE_INTEL_TXT=y +CONFIG_X86_64_SMP=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_CC_HAS_SANE_STACKPROTECTOR=y + +# +# Processor type and features +# +CONFIG_ZONE_DMA=y +CONFIG_SMP=y +CONFIG_X86_FEATURE_NAMES=y +# CONFIG_X86_X2APIC is not set +CONFIG_X86_MPPARSE=y +# CONFIG_GOLDFISH is not set +CONFIG_RETPOLINE=y +# CONFIG_INTEL_RDT is not set +# CONFIG_X86_EXTENDED_PLATFORM is not set +CONFIG_X86_INTEL_LPSS=y +# CONFIG_X86_AMD_PLATFORM_DEVICE is not set +CONFIG_IOSF_MBI=y +# CONFIG_IOSF_MBI_DEBUG is not set +CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_HYPERVISOR_GUEST=y +# CONFIG_PARAVIRT is not set +# CONFIG_JAILHOUSE_GUEST is not set +CONFIG_NO_BOOTMEM=y +# CONFIG_MK8 is not set +# CONFIG_MPSC is not set +CONFIG_MCORE2=y +# CONFIG_MATOM is not set +# CONFIG_GENERIC_CPU is not set +CONFIG_X86_INTERNODE_CACHE_SHIFT=6 +CONFIG_X86_L1_CACHE_SHIFT=6 +CONFIG_X86_INTEL_USERCOPY=y +CONFIG_X86_USE_PPRO_CHECKSUM=y +CONFIG_X86_P6_NOP=y +CONFIG_X86_TSC=y +CONFIG_X86_CMPXCHG64=y +CONFIG_X86_CMOV=y +CONFIG_X86_MINIMUM_CPU_FAMILY=64 +CONFIG_X86_DEBUGCTLMSR=y +# CONFIG_PROCESSOR_SELECT is not set +CONFIG_CPU_SUP_INTEL=y +CONFIG_CPU_SUP_AMD=y +CONFIG_CPU_SUP_CENTAUR=y +CONFIG_HPET_TIMER=y +CONFIG_HPET_EMULATE_RTC=y +CONFIG_DMI=y +CONFIG_GART_IOMMU=y +# CONFIG_CALGARY_IOMMU is not set +# CONFIG_MAXSMP is not set +CONFIG_NR_CPUS_RANGE_BEGIN=2 +CONFIG_NR_CPUS_RANGE_END=512 +CONFIG_NR_CPUS_DEFAULT=64 +CONFIG_NR_CPUS=32 +CONFIG_SCHED_SMT=y +CONFIG_SCHED_MC=y +CONFIG_SCHED_MC_PRIO=y +CONFIG_X86_LOCAL_APIC=y +CONFIG_X86_IO_APIC=y +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y +CONFIG_X86_MCE=y +# CONFIG_X86_MCELOG_LEGACY is not set +CONFIG_X86_MCE_INTEL=y +# CONFIG_X86_MCE_AMD is not set +CONFIG_X86_MCE_THRESHOLD=y +# CONFIG_X86_MCE_INJECT is not set +CONFIG_X86_THERMAL_VECTOR=y + +# +# Performance monitoring +# +CONFIG_PERF_EVENTS_INTEL_UNCORE=y +CONFIG_PERF_EVENTS_INTEL_RAPL=y +CONFIG_PERF_EVENTS_INTEL_CSTATE=y +# CONFIG_PERF_EVENTS_AMD_POWER is not set +CONFIG_X86_VSYSCALL_EMULATION=y +CONFIG_I8K=m +CONFIG_MICROCODE=y +CONFIG_MICROCODE_INTEL=y +# CONFIG_MICROCODE_AMD is not set +CONFIG_MICROCODE_OLD_INTERFACE=y +# CONFIG_X86_MSR is not set +CONFIG_X86_CPUID=y +# CONFIG_X86_5LEVEL is not set +CONFIG_X86_DIRECT_GBPAGES=y +# CONFIG_X86_CPA_STATISTICS is not set +CONFIG_ARCH_HAS_MEM_ENCRYPT=y +# CONFIG_AMD_MEM_ENCRYPT is not set +# CONFIG_NUMA is not set +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +# CONFIG_X86_PMEM_LEGACY is not set +CONFIG_X86_CHECK_BIOS_CORRUPTION=y +CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y +CONFIG_X86_RESERVE_LOW=64 +CONFIG_MTRR=y +CONFIG_MTRR_SANITIZER=y +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 +CONFIG_X86_PAT=y +CONFIG_ARCH_USES_PG_UNCACHED=y +CONFIG_ARCH_RANDOM=y +CONFIG_X86_SMAP=y +CONFIG_X86_INTEL_UMIP=y +# CONFIG_X86_INTEL_MPX is not set +CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y +CONFIG_EFI=y +CONFIG_EFI_STUB=y +# CONFIG_EFI_MIXED is not set +CONFIG_SECCOMP=y +# CONFIG_HZ_100 is not set +# CONFIG_HZ_250 is not set +# CONFIG_HZ_300 is not set +CONFIG_HZ_1000=y +CONFIG_HZ=1000 +CONFIG_SCHED_HRTICK=y +# CONFIG_KEXEC is not set +# CONFIG_KEXEC_FILE is not set +# CONFIG_CRASH_DUMP is not set +CONFIG_PHYSICAL_START=0x1000000 +CONFIG_RELOCATABLE=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_X86_NEED_RELOCS=y +CONFIG_PHYSICAL_ALIGN=0x1000000 +CONFIG_DYNAMIC_MEMORY_LAYOUT=y +CONFIG_RANDOMIZE_MEMORY=y +CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0x0 +CONFIG_HOTPLUG_CPU=y +# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set +# CONFIG_DEBUG_HOTPLUG_CPU0 is not set +# CONFIG_COMPAT_VDSO is not set +# CONFIG_LEGACY_VSYSCALL_EMULATE is not set +CONFIG_LEGACY_VSYSCALL_NONE=y +# CONFIG_CMDLINE_BOOL is not set +# CONFIG_MODIFY_LDT_SYSCALL is not set +CONFIG_HAVE_LIVEPATCH=y +CONFIG_ARCH_HAS_ADD_PAGES=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y + +# +# Power management and ACPI options +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +# CONFIG_SUSPEND_SKIP_SYNC is not set +# CONFIG_HIBERNATION is not set +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +CONFIG_PM_AUTOSLEEP=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=100 +CONFIG_PM_WAKELOCKS_GC=y +CONFIG_PM=y +CONFIG_PM_DEBUG=y +CONFIG_PM_ADVANCED_DEBUG=y +# CONFIG_PM_TEST_SUSPEND is not set +CONFIG_PM_SLEEP_DEBUG=y +# CONFIG_DPM_WATCHDOG is not set +CONFIG_PM_TRACE=y +CONFIG_PM_TRACE_RTC=y +CONFIG_PM_CLK=y +CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y +CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_LPIT=y +CONFIG_ACPI_SLEEP=y +# CONFIG_ACPI_PROCFS_POWER is not set +CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=y +CONFIG_ACPI_FAN=y +# CONFIG_ACPI_TAD is not set +CONFIG_ACPI_DOCK=y +CONFIG_ACPI_CPU_FREQ_PSS=y +CONFIG_ACPI_PROCESSOR_CSTATE=y +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_HOTPLUG_CPU=y +# CONFIG_ACPI_PROCESSOR_AGGREGATOR is not set +CONFIG_ACPI_THERMAL=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set +# CONFIG_ACPI_PCI_SLOT is not set +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_IOAPIC=y +# CONFIG_ACPI_SBS is not set +# CONFIG_ACPI_HED is not set +# CONFIG_ACPI_CUSTOM_METHOD is not set +# CONFIG_ACPI_BGRT is not set +# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set +# CONFIG_ACPI_NFIT is not set +CONFIG_HAVE_ACPI_APEI=y +CONFIG_HAVE_ACPI_APEI_NMI=y +CONFIG_ACPI_APEI=y +# CONFIG_ACPI_APEI_GHES is not set +# CONFIG_ACPI_APEI_PCIEAER is not set +# CONFIG_ACPI_APEI_EINJ is not set +# CONFIG_ACPI_APEI_ERST_DEBUG is not set +# CONFIG_DPTF_POWER is not set +CONFIG_PMIC_OPREGION=y +CONFIG_CRC_PMIC_OPREGION=y +CONFIG_BXT_WC_PMIC_OPREGION=y +# CONFIG_ACPI_CONFIGFS is not set +CONFIG_X86_PM_TIMER=y +# CONFIG_SFI is not set + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +# CONFIG_CPU_FREQ_STAT is not set +# CONFIG_CPU_FREQ_TIMES is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +# CONFIG_CPU_FREQ_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +CONFIG_X86_INTEL_PSTATE=y +CONFIG_X86_PCC_CPUFREQ=y +CONFIG_X86_ACPI_CPUFREQ=y +CONFIG_X86_ACPI_CPUFREQ_CPB=y +# CONFIG_X86_POWERNOW_K8 is not set +# CONFIG_X86_AMD_FREQ_SENSITIVITY is not set +# CONFIG_X86_SPEEDSTEP_CENTRINO is not set +# CONFIG_X86_P4_CLOCKMOD is not set + +# +# shared options +# + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_INTEL_IDLE=y + +# +# Bus options (PCI etc.) +# +CONFIG_PCI=y +CONFIG_PCI_DIRECT=y +CONFIG_PCI_MMCONFIG=y +CONFIG_PCI_DOMAINS=y +CONFIG_MMCONF_FAM10H=y +# CONFIG_PCI_CNB20LE_QUIRK is not set +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +# CONFIG_PCIEAER_INJECT is not set +# CONFIG_PCIE_ECRC is not set +CONFIG_PCIEASPM=y +# CONFIG_PCIEASPM_DEBUG is not set +# CONFIG_PCIEASPM_DEFAULT is not set +CONFIG_PCIEASPM_POWERSAVE=y +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +# CONFIG_PCIE_DPC is not set +# CONFIG_PCIE_PTM is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_STUB is not set +CONFIG_PCI_LOCKLESS_CONFIG=y +# CONFIG_PCI_IOV is not set +# CONFIG_PCI_PRI is not set +# CONFIG_PCI_PASID is not set +CONFIG_PCI_LABEL=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set +# CONFIG_HOTPLUG_PCI_CPCI is not set +# CONFIG_HOTPLUG_PCI_SHPC is not set + +# +# PCI controller drivers +# + +# +# Cadence PCIe controllers support +# +# CONFIG_VMD is not set + +# +# DesignWare PCI Core Support +# +# CONFIG_PCIE_DW_PLAT_HOST is not set + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# CONFIG_ISA_BUS is not set +CONFIG_ISA_DMA_API=y +CONFIG_AMD_NB=y +# CONFIG_PCCARD is not set +# CONFIG_RAPIDIO is not set +CONFIG_X86_SYSFB=y + +# +# Binary Emulations +# +CONFIG_IA32_EMULATION=y +# CONFIG_IA32_AOUT is not set +# CONFIG_X86_X32 is not set +CONFIG_COMPAT_32=y +CONFIG_COMPAT=y +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y +CONFIG_X86_DEV_DMA_OPS=y +CONFIG_HAVE_GENERIC_GUP=y + +# +# Firmware Drivers +# +CONFIG_EDD=y +# CONFIG_EDD_OFF is not set +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DELL_RBU=y +CONFIG_DCDBAS=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y +# CONFIG_ISCSI_IBFT_FIND is not set +# CONFIG_FW_CFG_SYSFS is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_VARS=y +CONFIG_EFI_ESRT=y +# CONFIG_EFI_VARS_PSTORE is not set +# CONFIG_EFI_FAKE_MEMMAP is not set +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_BOOTLOADER_CONTROL=y +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +# CONFIG_APPLE_PROPERTIES is not set +# CONFIG_RESET_ATTACK_MITIGATION is not set +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_X86=y + +# +# Tegra firmware driver +# +CONFIG_HAVE_KVM=y +CONFIG_VIRTUALIZATION=y +# CONFIG_KVM is not set +# CONFIG_VHOST_NET is not set +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# General architecture-dependent options +# +CONFIG_HOTPLUG_SMT=y +# CONFIG_OPROFILE is not set +CONFIG_HAVE_OPROFILE=y +CONFIG_OPROFILE_NMI_TIMER=y +# CONFIG_KPROBES is not set +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_OPTPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y +CONFIG_HAVE_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_CC_HAS_STACKPROTECTOR_NONE=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_ARCH_SOFT_DIRTY=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=28 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 +CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y +CONFIG_HAVE_COPY_THREAD_TLS=y +CONFIG_HAVE_STACK_VALIDATION=y +CONFIG_HAVE_RELIABLE_STACKTRACE=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_ARCH_HAS_REFCOUNT=y +# CONFIG_REFCOUNT_FULL is not set +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +CONFIG_PLUGIN_HOSTCC="" +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_MODULE_SIG=y +CONFIG_MODULE_SIG_FORCE=y +CONFIG_MODULE_SIG_ALL=y +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +# CONFIG_MODULE_SIG_SHA256 is not set +# CONFIG_MODULE_SIG_SHA384 is not set +CONFIG_MODULE_SIG_SHA512=y +CONFIG_MODULE_SIG_HASH="sha512" +# CONFIG_MODULE_COMPRESS is not set +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_DEV_BSG=y +# CONFIG_BLK_DEV_BSGLIB is not set +# CONFIG_BLK_DEV_INTEGRITY is not set +# CONFIG_BLK_DEV_ZONED is not set +# CONFIG_BLK_CMDLINE_PARSER is not set +# CONFIG_BLK_WBT is not set +CONFIG_BLK_DEBUG_FS=y +# CONFIG_BLK_SED_OPAL is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_BSD_DISKLABEL is not set +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +# CONFIG_IOSCHED_BFQ is not set +CONFIG_ASN1=y +CONFIG_UNINLINE_SPIN_UNLOCK=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=y +CONFIG_COREDUMP=y + +# +# Memory Management options +# +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_ARCH_DISCARD_MEMBLOCK=y +# CONFIG_MEMORY_HOTPLUG is not set +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_BOUNCE=y +CONFIG_VIRT_TO_BUS=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=8192 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +# CONFIG_MEMORY_FAILURE is not set +CONFIG_TRANSPARENT_HUGEPAGE=y +# CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS is not set +CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y +CONFIG_ARCH_WANTS_THP_SWAP=y +CONFIG_THP_SWAP=y +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +# CONFIG_CLEANCACHE is not set +# CONFIG_FRONTSWAP is not set +# CONFIG_CMA is not set +# CONFIG_ZPOOL is not set +# CONFIG_ZBUD is not set +CONFIG_ZSMALLOC=y +# CONFIG_PGTABLE_MAPPING is not set +# CONFIG_DEBUG_PANIC_ON_BAD_PAGE is not set +# CONFIG_ZSMALLOC_STAT is not set +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +# CONFIG_IDLE_PAGE_TRACKING is not set +CONFIG_ARCH_HAS_ZONE_DEVICE=y +CONFIG_FRAME_VECTOR=y +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +CONFIG_ARCH_HAS_PKEYS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_BENCHMARK is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_NET=y +CONFIG_COMPAT_NETLINK_MESSAGES=y +CONFIG_NET_INGRESS=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_DIAG is not set +CONFIG_UNIX=y +# CONFIG_UNIX_DIAG is not set +# CONFIG_TLS is not set +CONFIG_XFRM=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +# CONFIG_XFRM_INTERFACE is not set +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set +CONFIG_XFRM_IPCOMP=y +CONFIG_NET_KEY=y +# CONFIG_NET_KEY_MIGRATE is not set +# CONFIG_XDP_SOCKETS is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +# CONFIG_IP_FIB_TRIE_STATS is not set +CONFIG_IP_MULTIPLE_TABLES=y +# CONFIG_IP_ROUTE_MULTIPATH is not set +# CONFIG_IP_ROUTE_VERBOSE is not set +# CONFIG_IP_PNP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE_DEMUX is not set +CONFIG_NET_IP_TUNNEL=y +# CONFIG_IP_MROUTE is not set +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=y +CONFIG_NET_UDP_TUNNEL=y +# CONFIG_NET_FOU is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set +# CONFIG_INET_AH is not set +CONFIG_INET_ESP=y +# CONFIG_INET_ESP_OFFLOAD is not set +# CONFIG_INET_IPCOMP is not set +CONFIG_INET_TUNNEL=y +CONFIG_INET_XFRM_MODE_TRANSPORT=y +CONFIG_INET_XFRM_MODE_TUNNEL=y +# CONFIG_INET_XFRM_MODE_BEET is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_INET_UDP_DIAG is not set +# CONFIG_INET_RAW_DIAG is not set +CONFIG_INET_DIAG_DESTROY=y +CONFIG_TCP_CONG_ADVANCED=y +# CONFIG_TCP_CONG_BIC is not set +CONFIG_TCP_CONG_CUBIC=y +# CONFIG_TCP_CONG_WESTWOOD is not set +# CONFIG_TCP_CONG_HTCP is not set +# CONFIG_TCP_CONG_HSTCP is not set +# CONFIG_TCP_CONG_HYBLA is not set +# CONFIG_TCP_CONG_VEGAS is not set +# CONFIG_TCP_CONG_NV is not set +# CONFIG_TCP_CONG_SCALABLE is not set +# CONFIG_TCP_CONG_LP is not set +# CONFIG_TCP_CONG_VENO is not set +# CONFIG_TCP_CONG_YEAH is not set +# CONFIG_TCP_CONG_ILLINOIS is not set +# CONFIG_TCP_CONG_DCTCP is not set +# CONFIG_TCP_CONG_CDG is not set +# CONFIG_TCP_CONG_BBR is not set +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +# CONFIG_INET6_ESP_OFFLOAD is not set +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_TUNNEL=y +CONFIG_INET6_TUNNEL=y +CONFIG_INET6_XFRM_MODE_TRANSPORT=y +CONFIG_INET6_XFRM_MODE_TUNNEL=y +CONFIG_INET6_XFRM_MODE_BEET=y +# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set +CONFIG_IPV6_VTI=y +CONFIG_IPV6_SIT=y +# CONFIG_IPV6_SIT_6RD is not set +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=y +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +# CONFIG_IPV6_MROUTE is not set +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +# CONFIG_NETLABEL is not set +CONFIG_ANDROID_PARANOID_NETWORK=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_NETLINK=y +CONFIG_NETFILTER_FAMILY_ARP=y +# CONFIG_NETFILTER_NETLINK_ACCT is not set +CONFIG_NETFILTER_NETLINK_QUEUE=y +CONFIG_NETFILTER_NETLINK_LOG=y +# CONFIG_NETFILTER_NETLINK_OSF is not set +CONFIG_NF_CONNTRACK=y +# CONFIG_NF_LOG_NETDEV is not set +CONFIG_NETFILTER_CONNCOUNT=y +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +# CONFIG_NF_CONNTRACK_ZONES is not set +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +# CONFIG_NF_CONNTRACK_TIMEOUT is not set +# CONFIG_NF_CONNTRACK_TIMESTAMP is not set +# CONFIG_NF_CONNTRACK_LABELS is not set +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_BROADCAST=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +# CONFIG_NF_CONNTRACK_SNMP is not set +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_SIP=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +# CONFIG_NETFILTER_NETLINK_GLUE_CT is not set +CONFIG_NF_NAT=y +CONFIG_NF_NAT_NEEDED=y +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +CONFIG_NF_NAT_AMANDA=y +CONFIG_NF_NAT_FTP=y +CONFIG_NF_NAT_IRC=y +CONFIG_NF_NAT_SIP=y +CONFIG_NF_NAT_TFTP=y +CONFIG_NF_NAT_REDIRECT=y +# CONFIG_NF_TABLES is not set +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=y +CONFIG_NETFILTER_XT_CONNMARK=y + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +# CONFIG_NETFILTER_XT_TARGET_CT is not set +# CONFIG_NETFILTER_XT_TARGET_DSCP is not set +# CONFIG_NETFILTER_XT_TARGET_HL is not set +# CONFIG_NETFILTER_XT_TARGET_HMARK is not set +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +# CONFIG_NETFILTER_XT_TARGET_LED is not set +# CONFIG_NETFILTER_XT_TARGET_LOG is not set +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_NAT=y +CONFIG_NETFILTER_XT_TARGET_NETMAP=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set +# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set +CONFIG_NETFILTER_XT_TARGET_REDIRECT=y +# CONFIG_NETFILTER_XT_TARGET_TEE is not set +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set + +# +# Xtables matches +# +# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE is not set +CONFIG_NETFILTER_XT_MATCH_BPF=y +# CONFIG_NETFILTER_XT_MATCH_CGROUP is not set +# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +# CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set +# CONFIG_NETFILTER_XT_MATCH_CONNLABEL is not set +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +# CONFIG_NETFILTER_XT_MATCH_CPU is not set +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +# CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set +# CONFIG_NETFILTER_XT_MATCH_DSCP is not set +CONFIG_NETFILTER_XT_MATCH_ECN=y +# CONFIG_NETFILTER_XT_MATCH_ESP is not set +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_HL=y +# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_L2TP=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set +# CONFIG_NETFILTER_XT_MATCH_NFACCT is not set +# CONFIG_NETFILTER_XT_MATCH_OSF is not set +# CONFIG_NETFILTER_XT_MATCH_OWNER is not set +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y +# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set +# CONFIG_NETFILTER_XT_MATCH_REALM is not set +# CONFIG_NETFILTER_XT_MATCH_RECENT is not set +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +# CONFIG_IP_SET is not set +# CONFIG_IP_VS is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=y +CONFIG_NF_SOCKET_IPV4=y +CONFIG_NF_TPROXY_IPV4=y +# CONFIG_NF_DUP_IPV4 is not set +# CONFIG_NF_LOG_ARP is not set +# CONFIG_NF_LOG_IPV4 is not set +CONFIG_NF_REJECT_IPV4=y +CONFIG_NF_NAT_IPV4=y +CONFIG_NF_NAT_MASQUERADE_IPV4=y +CONFIG_NF_NAT_PROTO_GRE=y +CONFIG_NF_NAT_PPTP=y +CONFIG_NF_NAT_H323=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +# CONFIG_IP_NF_MATCH_RPFILTER is not set +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +# CONFIG_IP_NF_TARGET_SYNPROXY is not set +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_MANGLE=y +# CONFIG_IP_NF_TARGET_CLUSTERIP is not set +# CONFIG_IP_NF_TARGET_ECN is not set +# CONFIG_IP_NF_TARGET_TTL is not set +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=y +CONFIG_NF_TPROXY_IPV6=y +# CONFIG_NF_DUP_IPV6 is not set +CONFIG_NF_REJECT_IPV6=y +# CONFIG_NF_LOG_IPV6 is not set +CONFIG_NF_NAT_IPV6=y +CONFIG_NF_NAT_MASQUERADE_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +# CONFIG_IP6_NF_MATCH_AH is not set +# CONFIG_IP6_NF_MATCH_EUI64 is not set +# CONFIG_IP6_NF_MATCH_FRAG is not set +# CONFIG_IP6_NF_MATCH_OPTS is not set +# CONFIG_IP6_NF_MATCH_HL is not set +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +# CONFIG_IP6_NF_MATCH_MH is not set +CONFIG_IP6_NF_MATCH_RPFILTER=y +# CONFIG_IP6_NF_MATCH_RT is not set +# CONFIG_IP6_NF_MATCH_SRH is not set +# CONFIG_IP6_NF_TARGET_HL is not set +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +# CONFIG_IP6_NF_TARGET_SYNPROXY is not set +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +# CONFIG_IP6_NF_SECURITY is not set +CONFIG_IP6_NF_NAT=y +CONFIG_IP6_NF_TARGET_MASQUERADE=y +# CONFIG_IP6_NF_TARGET_NPT is not set +CONFIG_NF_DEFRAG_IPV6=y +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +CONFIG_L2TP=y +# CONFIG_L2TP_DEBUGFS is not set +# CONFIG_L2TP_V3 is not set +# CONFIG_BRIDGE is not set +CONFIG_HAVE_NET_DSA=y +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=y +# CONFIG_VLAN_8021Q_GVRP is not set +# CONFIG_VLAN_8021Q_MVRP is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +# CONFIG_6LOWPAN is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +# CONFIG_NET_SCH_CBQ is not set +CONFIG_NET_SCH_HTB=y +# CONFIG_NET_SCH_HFSC is not set +# CONFIG_NET_SCH_PRIO is not set +# CONFIG_NET_SCH_MULTIQ is not set +# CONFIG_NET_SCH_RED is not set +# CONFIG_NET_SCH_SFB is not set +# CONFIG_NET_SCH_SFQ is not set +# CONFIG_NET_SCH_TEQL is not set +# CONFIG_NET_SCH_TBF is not set +# CONFIG_NET_SCH_CBS is not set +# CONFIG_NET_SCH_ETF is not set +# CONFIG_NET_SCH_GRED is not set +# CONFIG_NET_SCH_DSMARK is not set +# CONFIG_NET_SCH_NETEM is not set +# CONFIG_NET_SCH_DRR is not set +# CONFIG_NET_SCH_MQPRIO is not set +# CONFIG_NET_SCH_SKBPRIO is not set +# CONFIG_NET_SCH_CHOKE is not set +# CONFIG_NET_SCH_QFQ is not set +# CONFIG_NET_SCH_CODEL is not set +# CONFIG_NET_SCH_FQ_CODEL is not set +# CONFIG_NET_SCH_CAKE is not set +CONFIG_NET_SCH_FQ=y +# CONFIG_NET_SCH_HHF is not set +# CONFIG_NET_SCH_PIE is not set +# CONFIG_NET_SCH_INGRESS is not set +# CONFIG_NET_SCH_PLUG is not set +# CONFIG_NET_SCH_DEFAULT is not set + +# +# Classification +# +CONFIG_NET_CLS=y +# CONFIG_NET_CLS_BASIC is not set +# CONFIG_NET_CLS_TCINDEX is not set +# CONFIG_NET_CLS_ROUTE4 is not set +# CONFIG_NET_CLS_FW is not set +CONFIG_NET_CLS_U32=y +# CONFIG_CLS_U32_PERF is not set +# CONFIG_CLS_U32_MARK is not set +# CONFIG_NET_CLS_RSVP is not set +# CONFIG_NET_CLS_RSVP6 is not set +# CONFIG_NET_CLS_FLOW is not set +# CONFIG_NET_CLS_CGROUP is not set +# CONFIG_NET_CLS_BPF is not set +# CONFIG_NET_CLS_FLOWER is not set +# CONFIG_NET_CLS_MATCHALL is not set +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +# CONFIG_NET_EMATCH_CMP is not set +# CONFIG_NET_EMATCH_NBYTE is not set +CONFIG_NET_EMATCH_U32=y +# CONFIG_NET_EMATCH_META is not set +# CONFIG_NET_EMATCH_TEXT is not set +# CONFIG_NET_EMATCH_CANID is not set +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +# CONFIG_NET_ACT_POLICE is not set +# CONFIG_NET_ACT_GACT is not set +# CONFIG_NET_ACT_MIRRED is not set +# CONFIG_NET_ACT_SAMPLE is not set +# CONFIG_NET_ACT_IPT is not set +# CONFIG_NET_ACT_NAT is not set +# CONFIG_NET_ACT_PEDIT is not set +# CONFIG_NET_ACT_SIMP is not set +# CONFIG_NET_ACT_SKBEDIT is not set +# CONFIG_NET_ACT_CSUM is not set +# CONFIG_NET_ACT_VLAN is not set +# CONFIG_NET_ACT_BPF is not set +# CONFIG_NET_ACT_CONNMARK is not set +# CONFIG_NET_ACT_SKBMOD is not set +# CONFIG_NET_ACT_IFE is not set +# CONFIG_NET_ACT_TUNNEL_KEY is not set +# CONFIG_NET_CLS_IND is not set +CONFIG_NET_SCH_FIFO=y +# CONFIG_DCB is not set +CONFIG_DNS_RESOLVER=y +# CONFIG_BATMAN_ADV is not set +# CONFIG_OPENVSWITCH is not set +# CONFIG_VSOCKETS is not set +# CONFIG_NETLINK_DIAG is not set +# CONFIG_MPLS is not set +# CONFIG_NET_NSH is not set +# CONFIG_HSR is not set +# CONFIG_NET_SWITCHDEV is not set +# CONFIG_NET_L3_MASTER_DEV is not set +# CONFIG_NET_NCSI is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +# CONFIG_CGROUP_NET_PRIO is not set +# CONFIG_CGROUP_NET_CLASSID is not set +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +# CONFIG_BPF_JIT is not set +# CONFIG_BPF_STREAM_PARSER is not set +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_HAMRADIO is not set +CONFIG_CAN=y +CONFIG_CAN_RAW=y +CONFIG_CAN_BCM=y +CONFIG_CAN_GW=y + +# +# CAN Device Drivers +# +# CONFIG_CAN_VCAN is not set +# CONFIG_CAN_VXCAN is not set +CONFIG_CAN_SLCAN=y +CONFIG_CAN_DEV=y +CONFIG_CAN_CALC_BITTIMING=y +# CONFIG_CAN_C_CAN is not set +# CONFIG_CAN_CC770 is not set +# CONFIG_CAN_IFI_CANFD is not set +# CONFIG_CAN_M_CAN is not set +# CONFIG_CAN_PEAK_PCIEFD is not set +# CONFIG_CAN_SJA1000 is not set +# CONFIG_CAN_SOFTING is not set + +# +# CAN SPI interfaces +# +# CONFIG_CAN_HI311X is not set +# CONFIG_CAN_MCP251X is not set + +# +# CAN USB interfaces +# +CONFIG_CAN_8DEV_USB=y +# CONFIG_CAN_EMS_USB is not set +# CONFIG_CAN_ESD_USB2 is not set +# CONFIG_CAN_GS_USB is not set +# CONFIG_CAN_KVASER_USB is not set +# CONFIG_CAN_MCBA_USB is not set +# CONFIG_CAN_PEAK_USB is not set +# CONFIG_CAN_UCAN is not set +# CONFIG_CAN_DEBUG_DEVICES is not set +CONFIG_BT=m +CONFIG_BT_BREDR=y +CONFIG_BT_RFCOMM=m +# CONFIG_BT_RFCOMM_TTY is not set +CONFIG_BT_BNEP=m +# CONFIG_BT_BNEP_MC_FILTER is not set +# CONFIG_BT_BNEP_PROTO_FILTER is not set +CONFIG_BT_HIDP=m +CONFIG_BT_HS=y +CONFIG_BT_LE=y +# CONFIG_BT_LEDS is not set +# CONFIG_BT_SELFTEST is not set +CONFIG_BT_DEBUGFS=y + +# +# Bluetooth device drivers +# +CONFIG_BT_INTEL=m +CONFIG_BT_BCM=m +CONFIG_BT_RTL=m +CONFIG_BT_HCIBTUSB=m +# CONFIG_BT_HCIBTUSB_AUTOSUSPEND is not set +CONFIG_BT_HCIBTUSB_BCM=y +CONFIG_BT_HCIBTUSB_RTL=y +# CONFIG_BT_HCIBTSDIO is not set +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_H4=y +# CONFIG_BT_HCIUART_BCSP is not set +# CONFIG_BT_HCIUART_ATH3K is not set +# CONFIG_BT_HCIUART_INTEL is not set +# CONFIG_BT_HCIUART_AG6XX is not set +# CONFIG_BT_HCIUART_MRVL is not set +# CONFIG_BT_HCIBCM203X is not set +# CONFIG_BT_HCIBPA10X is not set +# CONFIG_BT_HCIBFUSB is not set +# CONFIG_BT_HCIVHCI is not set +# CONFIG_BT_MRVL is not set +CONFIG_BT_ATH3K=m +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WIRELESS_EXT=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_WEXT_PRIV=y +CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_CERTIFICATION_ONUS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +CONFIG_CFG80211_CRDA_SUPPORT=y +# CONFIG_CFG80211_WEXT is not set +CONFIG_LIB80211=m +# CONFIG_LIB80211_DEBUG is not set +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_MINSTREL_HT=y +# CONFIG_MAC80211_RC_MINSTREL_VHT is not set +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +# CONFIG_MAC80211_DEBUGFS is not set +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +# CONFIG_WIMAX is not set +CONFIG_RFKILL=y +CONFIG_RFKILL_PM=y +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +CONFIG_RFKILL_GPIO=m +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +# CONFIG_CEPH_LIB is not set +# CONFIG_NFC is not set +# CONFIG_PSAMPLE is not set +# CONFIG_NET_IFE is not set +# CONFIG_LWTUNNEL is not set +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +# CONFIG_NET_DEVLINK is not set +CONFIG_MAY_USE_DEVLINK=y +# CONFIG_FAILOVER is not set +CONFIG_HAVE_EBPF_JIT=y + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +# CONFIG_DEVTMPFS is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_EXTRA_FIRMWARE="" +CONFIG_FW_LOADER_USER_HELPER=y +CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y +CONFIG_WANT_DEV_COREDUMP=y +CONFIG_ALLOW_DEV_COREDUMP=y +CONFIG_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +CONFIG_DEBUG_DEVRES=y +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_SPI=y +CONFIG_REGMAP_IRQ=y +# CONFIG_REGMAP_SDW is not set +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set + +# +# Bus devices +# +CONFIG_DVC_TRACE_BUS=y +# CONFIG_DVC_TRACE_BUS_DEBUG is not set +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y +# CONFIG_GNSS is not set +# CONFIG_MTD is not set +# CONFIG_OF is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +# CONFIG_PARPORT is not set +CONFIG_PNP=y +# CONFIG_PNP_DEBUG_MESSAGES is not set + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_NULL_BLK is not set +# CONFIG_BLK_DEV_FD is not set +CONFIG_CDROM=y +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +CONFIG_ZRAM=y +# CONFIG_ZRAM_WRITEBACK is not set +# CONFIG_ZRAM_MEMORY_TRACKING is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_DRBD is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_SKD is not set +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_VIRTIO_BLK is not set +# CONFIG_BLK_DEV_RBD is not set +# CONFIG_BLK_DEV_RSXX is not set + +# +# NVME Support +# +# CONFIG_BLK_DEV_NVME is not set +# CONFIG_NVME_FC is not set +# CONFIG_NVME_TARGET is not set + +# +# Misc devices +# +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_IBM_ASM is not set +# CONFIG_PHANTOM is not set +# CONFIG_SGI_IOC4 is not set +# CONFIG_TIFM_CORE is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_HP_ILO is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_USB_SWITCH_FSA9480 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +CONFIG_UID_SYS_STATS=y +# CONFIG_UID_SYS_STATS_DEBUG is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_CB710_CORE is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# CONFIG_SENSORS_LIS3_I2C is not set +# CONFIG_ALTERA_STAPL is not set +CONFIG_INTEL_MEI=m +CONFIG_INTEL_MEI_ME=m +CONFIG_INTEL_MEI_TXE=m +# CONFIG_INTEL_MEI_SPD is not set +# CONFIG_INTEL_MEI_DAL is not set +# CONFIG_VMWARE_VMCI is not set + +# +# Intel MIC & related support +# + +# +# Intel MIC Bus Driver +# +# CONFIG_INTEL_MIC_BUS is not set + +# +# SCIF Bus Driver +# +# CONFIG_SCIF_BUS is not set + +# +# VOP Bus Driver +# +# CONFIG_VOP_BUS is not set + +# +# Intel MIC Host Driver +# + +# +# Intel MIC Card Driver +# + +# +# SCIF Driver +# + +# +# Intel MIC Coprocessor State Management (COSM) Drivers +# + +# +# VOP Driver +# +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_MISC_RTSX_PCI is not set +# CONFIG_MISC_RTSX_USB is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +# CONFIG_RAID_ATTRS is not set +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +# CONFIG_SCSI_MQ_DEFAULT is not set +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +# CONFIG_CHR_DEV_ST is not set +# CONFIG_CHR_DEV_OSST is not set +CONFIG_BLK_DEV_SR=y +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=y +# CONFIG_CHR_DEV_SCH is not set +CONFIG_SCSI_CONSTANTS=y +# CONFIG_SCSI_LOGGING is not set +# CONFIG_SCSI_SCAN_ASYNC is not set + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=y +# CONFIG_SCSI_FC_ATTRS is not set +# CONFIG_SCSI_ISCSI_ATTRS is not set +# CONFIG_SCSI_SAS_ATTRS is not set +# CONFIG_SCSI_SAS_LIBSAS is not set +# CONFIG_SCSI_SRP_ATTRS is not set +CONFIG_SCSI_LOWLEVEL=y +# CONFIG_ISCSI_TCP is not set +# CONFIG_ISCSI_BOOT_SYSFS is not set +# CONFIG_SCSI_CXGB3_ISCSI is not set +# CONFIG_SCSI_CXGB4_ISCSI is not set +# CONFIG_SCSI_BNX2_ISCSI is not set +# CONFIG_BE2ISCSI is not set +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +# CONFIG_SCSI_HPSA is not set +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +# CONFIG_SCSI_AACRAID is not set +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_DPT_I2O is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +# CONFIG_MEGARAID_SAS is not set +# CONFIG_SCSI_MPT3SAS is not set +# CONFIG_SCSI_MPT2SAS is not set +# CONFIG_SCSI_SMARTPQI is not set +CONFIG_SCSI_UFSHCD=y +CONFIG_SCSI_UFSHCD_PCI=y +# CONFIG_SCSI_UFS_DWC_TC_PCI is not set +# CONFIG_SCSI_UFSHCD_PLATFORM is not set +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_BUSLOGIC is not set +# CONFIG_VMWARE_PVSCSI is not set +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_GDTH is not set +# CONFIG_SCSI_ISCI is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +# CONFIG_SCSI_QLA_ISCSI is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +# CONFIG_SCSI_DEBUG is not set +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_VIRTIO is not set +# CONFIG_SCSI_DH is not set +# CONFIG_SCSI_OSD_INITIATOR is not set +CONFIG_ATA=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_ACPI=y +CONFIG_SATA_ZPODD=y +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=0 +CONFIG_SATA_AHCI_PLATFORM=y +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=y +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +CONFIG_PATA_AMD=y +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +CONFIG_PATA_OLDPIIX=y +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +CONFIG_PATA_SCH=y +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +CONFIG_PATA_MPIIX=y +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +CONFIG_ATA_GENERIC=y +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +# CONFIG_BLK_DEV_MD is not set +CONFIG_BCACHE=m +# CONFIG_BCACHE_DEBUG is not set +# CONFIG_BCACHE_CLOSURES_DEBUG is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=y +# CONFIG_DM_MQ_DEFAULT is not set +# CONFIG_DM_DEBUG is not set +CONFIG_DM_BUFIO=y +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=y +CONFIG_DM_SNAPSHOT=y +# CONFIG_DM_THIN_PROVISIONING is not set +# CONFIG_DM_CACHE is not set +# CONFIG_DM_WRITECACHE is not set +# CONFIG_DM_ERA is not set +CONFIG_DM_MIRROR=y +# CONFIG_DM_LOG_USERSPACE is not set +# CONFIG_DM_RAID is not set +CONFIG_DM_ZERO=y +# CONFIG_DM_MULTIPATH is not set +# CONFIG_DM_DELAY is not set +CONFIG_DM_UEVENT=y +# CONFIG_DM_FLAKEY is not set +CONFIG_DM_VERITY=y +# CONFIG_DM_VERITY_HASH_PREFETCH_MIN_SIZE_128 is not set +CONFIG_DM_VERITY_HASH_PREFETCH_MIN_SIZE=1 +CONFIG_DM_VERITY_FEC=y +# CONFIG_DM_SWITCH is not set +# CONFIG_DM_LOG_WRITES is not set +# CONFIG_DM_INTEGRITY is not set +# CONFIG_DM_VERITY_AVB is not set +# CONFIG_DM_ANDROID_VERITY is not set +# CONFIG_DM_ANDROID_VERITY_AT_MOST_ONCE_DEFAULT_ENABLED is not set +# CONFIG_TARGET_CORE is not set +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +# CONFIG_MACINTOSH_DRIVERS is not set +CONFIG_NETDEVICES=y +CONFIG_MII=y +CONFIG_NET_CORE=y +# CONFIG_BONDING is not set +# CONFIG_DUMMY is not set +# CONFIG_EQUALIZER is not set +# CONFIG_NET_FC is not set +# CONFIG_IFB is not set +# CONFIG_NET_TEAM is not set +# CONFIG_MACVLAN is not set +# CONFIG_IPVLAN is not set +# CONFIG_VXLAN is not set +# CONFIG_GENEVE is not set +# CONFIG_GTP is not set +# CONFIG_MACSEC is not set +CONFIG_NETCONSOLE=y +# CONFIG_NETCONSOLE_DYNAMIC is not set +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_TUN=y +# CONFIG_TUN_VNET_CROSS_LE is not set +# CONFIG_VETH is not set +# CONFIG_VIRTIO_NET is not set +CONFIG_NLMON=m +# CONFIG_ARCNET is not set + +# +# CAIF transport drivers +# + +# +# Distributed Switch Architecture drivers +# +CONFIG_ETHERNET=y +CONFIG_NET_VENDOR_3COM=y +# CONFIG_VORTEX is not set +# CONFIG_TYPHOON is not set +CONFIG_NET_VENDOR_ADAPTEC=y +# CONFIG_ADAPTEC_STARFIRE is not set +# CONFIG_NET_VENDOR_AGERE is not set +CONFIG_NET_VENDOR_ALACRITECH=y +# CONFIG_SLICOSS is not set +CONFIG_NET_VENDOR_ALTEON=y +# CONFIG_ACENIC is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +# CONFIG_ENA_ETHERNET is not set +CONFIG_NET_VENDOR_AMD=y +# CONFIG_AMD8111_ETH is not set +# CONFIG_PCNET32 is not set +# CONFIG_AMD_XGBE is not set +CONFIG_NET_VENDOR_AQUANTIA=y +# CONFIG_AQTION is not set +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_NET_VENDOR_ATHEROS=y +# CONFIG_ATL2 is not set +# CONFIG_ATL1 is not set +# CONFIG_ATL1E is not set +# CONFIG_ATL1C is not set +# CONFIG_ALX is not set +# CONFIG_NET_VENDOR_AURORA is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +# CONFIG_BNX2 is not set +# CONFIG_CNIC is not set +# CONFIG_TIGON3 is not set +# CONFIG_BNX2X is not set +# CONFIG_SYSTEMPORT is not set +# CONFIG_BNXT is not set +CONFIG_NET_VENDOR_BROCADE=y +# CONFIG_BNA is not set +CONFIG_NET_VENDOR_CADENCE=y +# CONFIG_MACB is not set +CONFIG_NET_VENDOR_CAVIUM=y +# CONFIG_THUNDER_NIC_PF is not set +# CONFIG_THUNDER_NIC_VF is not set +# CONFIG_THUNDER_NIC_BGX is not set +# CONFIG_THUNDER_NIC_RGX is not set +CONFIG_CAVIUM_PTP=y +# CONFIG_LIQUIDIO is not set +# CONFIG_LIQUIDIO_VF is not set +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +# CONFIG_CHELSIO_T3 is not set +# CONFIG_CHELSIO_T4 is not set +# CONFIG_CHELSIO_T4VF is not set +CONFIG_NET_VENDOR_CISCO=y +# CONFIG_ENIC is not set +CONFIG_NET_VENDOR_CORTINA=y +# CONFIG_CX_ECAT is not set +# CONFIG_DNET is not set +CONFIG_NET_VENDOR_DEC=y +CONFIG_NET_TULIP=y +# CONFIG_DE2104X is not set +# CONFIG_TULIP is not set +# CONFIG_DE4X5 is not set +# CONFIG_WINBOND_840 is not set +# CONFIG_DM9102 is not set +# CONFIG_ULI526X is not set +CONFIG_NET_VENDOR_DLINK=y +# CONFIG_DL2K is not set +# CONFIG_SUNDANCE is not set +CONFIG_NET_VENDOR_EMULEX=y +# CONFIG_BE2NET is not set +CONFIG_NET_VENDOR_EZCHIP=y +CONFIG_NET_VENDOR_HP=y +# CONFIG_HP100 is not set +CONFIG_NET_VENDOR_HUAWEI=y +# CONFIG_HINIC is not set +CONFIG_NET_VENDOR_I825XX=y +CONFIG_NET_VENDOR_INTEL=y +CONFIG_E100=y +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_E1000E_HWTS=y +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGBVF=m +# CONFIG_IXGB is not set +# CONFIG_IXGBE is not set +CONFIG_IXGBEVF=y +# CONFIG_I40E is not set +# CONFIG_I40EVF is not set +# CONFIG_ICE is not set +# CONFIG_FM10K is not set +# CONFIG_JME is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MELLANOX is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +CONFIG_NET_VENDOR_MICROSEMI=y +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_FEALNX is not set +CONFIG_NET_VENDOR_NATSEMI=y +# CONFIG_NATSEMI is not set +# CONFIG_NS83820 is not set +CONFIG_NET_VENDOR_NETERION=y +# CONFIG_S2IO is not set +# CONFIG_VXGE is not set +CONFIG_NET_VENDOR_NETRONOME=y +# CONFIG_NFP is not set +CONFIG_NET_VENDOR_NI=y +CONFIG_NET_VENDOR_8390=y +CONFIG_NE2K_PCI=y +CONFIG_NET_VENDOR_NVIDIA=y +# CONFIG_FORCEDETH is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_ETHOC is not set +CONFIG_NET_VENDOR_PACKET_ENGINES=y +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +# CONFIG_NET_VENDOR_REALTEK is not set +CONFIG_NET_VENDOR_RENESAS=y +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +# CONFIG_SFC is not set +# CONFIG_SFC_FALCON is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SMSC is not set +CONFIG_NET_VENDOR_SOCIONEXT=y +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +CONFIG_NET_VENDOR_SYNOPSYS=y +# CONFIG_DWC_XLGMAC is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +# CONFIG_MDIO_BCM_UNIMAC is not set +CONFIG_MDIO_BITBANG=m +# CONFIG_MDIO_GPIO is not set +# CONFIG_MDIO_MSCC_MIIM is not set +# CONFIG_MDIO_THUNDER is not set +CONFIG_PHYLIB=y +# CONFIG_LED_TRIGGER_PHY is not set + +# +# MII PHY device drivers +# +# CONFIG_AMD_PHY is not set +# CONFIG_AQUANTIA_PHY is not set +# CONFIG_ASIX_PHY is not set +# CONFIG_AT803X_PHY is not set +# CONFIG_BCM7XXX_PHY is not set +# CONFIG_BCM87XX_PHY is not set +# CONFIG_BROADCOM_PHY is not set +# CONFIG_CICADA_PHY is not set +# CONFIG_CORTINA_PHY is not set +# CONFIG_DAVICOM_PHY is not set +# CONFIG_DP83822_PHY is not set +# CONFIG_DP83TC811_PHY is not set +# CONFIG_DP83848_PHY is not set +# CONFIG_DP83867_PHY is not set +# CONFIG_FIXED_PHY is not set +# CONFIG_ICPLUS_PHY is not set +# CONFIG_INTEL_XWAY_PHY is not set +# CONFIG_LSI_ET1011C_PHY is not set +# CONFIG_LXT_PHY is not set +# CONFIG_MARVELL_PHY is not set +# CONFIG_MARVELL_10G_PHY is not set +# CONFIG_MICREL_PHY is not set +# CONFIG_MICROCHIP_PHY is not set +# CONFIG_MICROCHIP_T1_PHY is not set +# CONFIG_MICROSEMI_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_QSEMI_PHY is not set +# CONFIG_REALTEK_PHY is not set +# CONFIG_RENESAS_PHY is not set +# CONFIG_ROCKCHIP_PHY is not set +# CONFIG_SMSC_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_TERANETICS_PHY is not set +# CONFIG_VITESSE_PHY is not set +# CONFIG_XILINX_GMII2RGMII is not set +# CONFIG_MICREL_KS8995MA is not set +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=y +# CONFIG_PPP_MULTILINK is not set +CONFIG_PPPOE=y +CONFIG_PPPOL2TP=y +# CONFIG_PPP_ASYNC is not set +# CONFIG_PPP_SYNC_TTY is not set +# CONFIG_SLIP is not set +CONFIG_SLHC=y +CONFIG_USB_NET_DRIVERS=y +CONFIG_USB_CATC=y +CONFIG_USB_KAWETH=y +CONFIG_USB_PEGASUS=y +CONFIG_USB_RTL8150=y +# CONFIG_USB_RTL8152 is not set +# CONFIG_USB_LAN78XX is not set +CONFIG_USB_USBNET=y +CONFIG_USB_NET_AX8817X=y +CONFIG_USB_NET_AX88179_178A=y +CONFIG_USB_NET_CDCETHER=y +CONFIG_USB_NET_CDC_EEM=y +CONFIG_USB_NET_CDC_NCM=y +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +# CONFIG_USB_NET_CDC_MBIM is not set +CONFIG_USB_NET_DM9601=y +CONFIG_USB_NET_SR9700=m +# CONFIG_USB_NET_SR9800 is not set +CONFIG_USB_NET_SMSC75XX=y +CONFIG_USB_NET_SMSC95XX=y +# CONFIG_USB_NET_GL620A is not set +CONFIG_USB_NET_NET1080=y +# CONFIG_USB_NET_PLUSB is not set +CONFIG_USB_NET_MCS7830=y +# CONFIG_USB_NET_RNDIS_HOST is not set +CONFIG_USB_NET_CDC_SUBSET_ENABLE=y +CONFIG_USB_NET_CDC_SUBSET=y +# CONFIG_USB_ALI_M5632 is not set +# CONFIG_USB_AN2720 is not set +CONFIG_USB_BELKIN=y +CONFIG_USB_ARMLINUX=y +# CONFIG_USB_EPSON2888 is not set +# CONFIG_USB_KC2190 is not set +CONFIG_USB_NET_ZAURUS=y +# CONFIG_USB_NET_CX82310_ETH is not set +# CONFIG_USB_NET_KALMIA is not set +# CONFIG_USB_NET_QMI_WWAN is not set +# CONFIG_USB_HSO is not set +# CONFIG_USB_NET_INT51X1 is not set +CONFIG_USB_IPHETH=y +CONFIG_USB_SIERRA_NET=y +# CONFIG_USB_VL600 is not set +# CONFIG_USB_NET_CH9200 is not set +CONFIG_WLAN=y +# CONFIG_WIRELESS_WDS is not set +CONFIG_WLAN_VENDOR_ADMTEK=y +# CONFIG_ADM8211 is not set +CONFIG_WLAN_VENDOR_ATH=y +# CONFIG_ATH_DEBUG is not set +# CONFIG_ATH5K is not set +# CONFIG_ATH5K_PCI is not set +# CONFIG_ATH9K is not set +# CONFIG_ATH9K_HTC is not set +# CONFIG_CARL9170 is not set +# CONFIG_ATH6KL is not set +# CONFIG_AR5523 is not set +# CONFIG_WIL6210 is not set +# CONFIG_ATH10K is not set +# CONFIG_WCN36XX is not set +CONFIG_WLAN_VENDOR_ATMEL=y +# CONFIG_ATMEL is not set +# CONFIG_AT76C50X_USB is not set +CONFIG_WLAN_VENDOR_BROADCOM=y +# CONFIG_B43 is not set +# CONFIG_B43LEGACY is not set +# CONFIG_BRCMSMAC is not set +# CONFIG_BRCMFMAC is not set +CONFIG_WLAN_VENDOR_CISCO=y +# CONFIG_AIRO is not set +CONFIG_WLAN_VENDOR_INTEL=y +# CONFIG_IPW2100 is not set +# CONFIG_IPW2200 is not set +# CONFIG_IWL4965 is not set +# CONFIG_IWL3945 is not set +# CONFIG_IWLWIFI is not set +CONFIG_WLAN_VENDOR_INTERSIL=y +# CONFIG_HOSTAP is not set +# CONFIG_HERMES is not set +# CONFIG_P54_COMMON is not set +# CONFIG_PRISM54 is not set +CONFIG_WLAN_VENDOR_MARVELL=y +# CONFIG_LIBERTAS is not set +# CONFIG_LIBERTAS_THINFIRM is not set +# CONFIG_MWIFIEX is not set +# CONFIG_MWL8K is not set +CONFIG_WLAN_VENDOR_MEDIATEK=y +# CONFIG_MT7601U is not set +# CONFIG_MT76x0U is not set +# CONFIG_MT76x2E is not set +# CONFIG_MT76x2U is not set +CONFIG_WLAN_VENDOR_RALINK=y +# CONFIG_RT2X00 is not set +CONFIG_WLAN_VENDOR_REALTEK=y +# CONFIG_RTL8180 is not set +# CONFIG_RTL8187 is not set +CONFIG_RTL_CARDS=m +# CONFIG_RTL8192CE is not set +# CONFIG_RTL8192SE is not set +# CONFIG_RTL8192DE is not set +# CONFIG_RTL8723AE is not set +# CONFIG_RTL8723BE is not set +# CONFIG_RTL8188EE is not set +# CONFIG_RTL8192EE is not set +# CONFIG_RTL8821AE is not set +# CONFIG_RTL8192CU is not set +# CONFIG_RTL8XXXU is not set +CONFIG_WLAN_VENDOR_RSI=y +# CONFIG_RSI_91X is not set +CONFIG_WLAN_VENDOR_ST=y +# CONFIG_CW1200 is not set +CONFIG_WLAN_VENDOR_TI=y +# CONFIG_WL1251 is not set +# CONFIG_WL12XX is not set +# CONFIG_WL18XX is not set +# CONFIG_WLCORE is not set +CONFIG_WLAN_VENDOR_ZYDAS=y +# CONFIG_USB_ZD1201 is not set +# CONFIG_ZD1211RW is not set +CONFIG_WLAN_VENDOR_QUANTENNA=y +# CONFIG_QTNFMAC_PEARL_PCIE is not set +# CONFIG_MAC80211_HWSIM is not set +# CONFIG_USB_NET_RNDIS_WLAN is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +# CONFIG_VMXNET3 is not set +# CONFIG_FUJITSU_ES is not set +# CONFIG_NETDEVSIM is not set +# CONFIG_NET_FAILOVER is not set +# CONFIG_ISDN is not set +# CONFIG_NVM is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_POLLDEV=y +CONFIG_INPUT_SPARSEKMAP=y +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADC is not set +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +CONFIG_KEYBOARD_GPIO=y +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set +# CONFIG_KEYBOARD_XTKBD is not set +CONFIG_INPUT_MOUSE=y +# CONFIG_MOUSE_PS2 is not set +# CONFIG_MOUSE_SERIAL is not set +# CONFIG_MOUSE_APPLETOUCH is not set +# CONFIG_MOUSE_BCM5974 is not set +# CONFIG_MOUSE_CYAPA is not set +# CONFIG_MOUSE_ELAN_I2C is not set +# CONFIG_MOUSE_VSXXXAA is not set +# CONFIG_MOUSE_GPIO is not set +# CONFIG_MOUSE_SYNAPTICS_I2C is not set +# CONFIG_MOUSE_SYNAPTICS_USB is not set +CONFIG_INPUT_JOYSTICK=y +# CONFIG_JOYSTICK_ANALOG is not set +# CONFIG_JOYSTICK_A3D is not set +# CONFIG_JOYSTICK_ADI is not set +# CONFIG_JOYSTICK_COBRA is not set +# CONFIG_JOYSTICK_GF2K is not set +# CONFIG_JOYSTICK_GRIP is not set +# CONFIG_JOYSTICK_GRIP_MP is not set +# CONFIG_JOYSTICK_GUILLEMOT is not set +# CONFIG_JOYSTICK_INTERACT is not set +# CONFIG_JOYSTICK_SIDEWINDER is not set +# CONFIG_JOYSTICK_TMDC is not set +# CONFIG_JOYSTICK_IFORCE is not set +# CONFIG_JOYSTICK_WARRIOR is not set +# CONFIG_JOYSTICK_MAGELLAN is not set +# CONFIG_JOYSTICK_SPACEORB is not set +# CONFIG_JOYSTICK_SPACEBALL is not set +# CONFIG_JOYSTICK_STINGER is not set +# CONFIG_JOYSTICK_TWIDJOY is not set +# CONFIG_JOYSTICK_ZHENHUA is not set +# CONFIG_JOYSTICK_AS5011 is not set +# CONFIG_JOYSTICK_JOYDUMP is not set +# CONFIG_JOYSTICK_XPAD is not set +# CONFIG_JOYSTICK_PSXPAD_SPI is not set +# CONFIG_JOYSTICK_PXRC is not set +CONFIG_INPUT_TABLET=y +# CONFIG_TABLET_USB_ACECAD is not set +# CONFIG_TABLET_USB_AIPTEK is not set +# CONFIG_TABLET_USB_GTCO is not set +# CONFIG_TABLET_USB_HANWANG is not set +# CONFIG_TABLET_USB_KBTAB is not set +# CONFIG_TABLET_USB_PEGASUS is not set +# CONFIG_TABLET_SERIAL_WACOM4 is not set +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_PROPERTIES=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_ADC is not set +CONFIG_TOUCHSCREEN_ATMEL_MXT=y +# CONFIG_TOUCHSCREEN_ATMEL_MXT_T37 is not set +# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_BU21029 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set +CONFIG_TOUCHSCREEN_CYTTSP4_CORE=m +CONFIG_TOUCHSCREEN_CYTTSP4_I2C=m +CONFIG_TOUCHSCREEN_CYTTSP4_SPI=m +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set +# CONFIG_TOUCHSCREEN_EXC3000 is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GOODIX is not set +# CONFIG_TOUCHSCREEN_HIDEEP is not set +# CONFIG_TOUCHSCREEN_ILI210X is not set +# CONFIG_TOUCHSCREEN_S6SY761 is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_EKTF2127 is not set +# CONFIG_TOUCHSCREEN_ELAN is not set +# CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_WACOM_I2C is not set +# CONFIG_TOUCHSCREEN_MAX11801 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MMS114 is not set +# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_PIXCIR is not set +# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC_SERIO is not set +# CONFIG_TOUCHSCREEN_TSC2004 is not set +# CONFIG_TOUCHSCREEN_TSC2005 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_RM_TS is not set +# CONFIG_TOUCHSCREEN_SILEAD is not set +# CONFIG_TOUCHSCREEN_SIS_I2C is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_STMFTS is not set +CONFIG_TOUCHSCREEN_SUR40=m +# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set +# CONFIG_TOUCHSCREEN_SX8654 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +# CONFIG_TOUCHSCREEN_ZET6223 is not set +CONFIG_TOUCHSCREEN_ZFORCE=m +# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ARIZONA_HAPTICS is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +# CONFIG_INPUT_PCSPKR is not set +# CONFIG_INPUT_MMA8450 is not set +# CONFIG_INPUT_APANEL is not set +# CONFIG_INPUT_GP2A is not set +# CONFIG_INPUT_GPIO_BEEPER is not set +# CONFIG_INPUT_GPIO_DECODER is not set +# CONFIG_INPUT_ATLAS_BTNS is not set +# CONFIG_INPUT_ATI_REMOTE2 is not set +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +# CONFIG_INPUT_KXTJ9 is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +# CONFIG_INPUT_REGULATOR_HAPTIC is not set +CONFIG_INPUT_UINPUT=y +# CONFIG_INPUT_GPIO is not set +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_PWM_BEEPER is not set +# CONFIG_INPUT_PWM_VIBRA is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_IMS_PCU is not set +# CONFIG_INPUT_CMA3000 is not set +CONFIG_INPUT_SOC_BUTTON_ARRAY=y +# CONFIG_INPUT_DRV260X_HAPTICS is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +# CONFIG_RMI4_CORE is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_ROCKETPORT is not set +# CONFIG_CYCLADES is not set +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +# CONFIG_SYNCLINK is not set +# CONFIG_SYNCLINKMP is not set +# CONFIG_SYNCLINK_GT is not set +# CONFIG_NOZOMI is not set +# CONFIG_ISI is not set +# CONFIG_N_HDLC is not set +CONFIG_N_GSM=y +CONFIG_TRACE_ROUTER=y +CONFIG_TRACE_SINK=y +# CONFIG_CBC_LDISC is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +CONFIG_SERIAL_8250_DETECT_IRQ=y +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DW=y +# CONFIG_SERIAL_8250_RT288X is not set +CONFIG_SERIAL_8250_LPSS=y +CONFIG_SERIAL_8250_MID=y +# CONFIG_SERIAL_8250_MOXA is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_DEV_BUS is not set +# CONFIG_TTY_PRINTK is not set +# CONFIG_VIRTIO_CONSOLE is not set +# CONFIG_IPMI_HANDLER is not set +CONFIG_HW_RANDOM=y +# CONFIG_HW_RANDOM_TIMERIOMEM is not set +CONFIG_HW_RANDOM_INTEL=y +# CONFIG_HW_RANDOM_AMD is not set +# CONFIG_HW_RANDOM_VIA is not set +# CONFIG_HW_RANDOM_VIRTIO is not set +CONFIG_NVRAM=y +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set +# CONFIG_MWAVE is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_HPET is not set +# CONFIG_HANGCHECK_TIMER is not set +CONFIG_TCG_TPM=m +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=m +CONFIG_TCG_TIS=m +# CONFIG_TCG_TIS_SPI is not set +CONFIG_TCG_TIS_I2C_ATMEL=m +CONFIG_TCG_TIS_I2C_INFINEON=m +CONFIG_TCG_TIS_I2C_NUVOTON=m +CONFIG_TCG_NSC=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +CONFIG_TCG_CRB=m +# CONFIG_TCG_VTPM_PROXY is not set +# CONFIG_TCG_TIS_ST33ZP24_I2C is not set +# CONFIG_TCG_TIS_ST33ZP24_SPI is not set +# CONFIG_TELCLOCK is not set +# CONFIG_DEVPORT is not set +# CONFIG_XILLYBUS is not set +CONFIG_RPMB=y +CONFIG_RPMB_INTF_DEV=y +CONFIG_RPMB_SIM=m +# CONFIG_VIRTIO_RPMB is not set +# CONFIG_RPMB_MUX is not set +CONFIG_RANDOM_TRUST_CPU=y + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y + +# +# Multiplexer I2C Chip support +# +# CONFIG_I2C_MUX_GPIO is not set +# CONFIG_I2C_MUX_LTC4306 is not set +# CONFIG_I2C_MUX_PCA9541 is not set +# CONFIG_I2C_MUX_PCA954x is not set +# CONFIG_I2C_MUX_REG is not set +# CONFIG_I2C_MUX_MLXCPLD is not set +# CONFIG_I2C_HELPER_AUTO is not set +CONFIG_I2C_SMBUS=y + +# +# I2C Algorithms +# +CONFIG_I2C_ALGOBIT=y +# CONFIG_I2C_ALGOPCF is not set +# CONFIG_I2C_ALGOPCA is not set + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +CONFIG_I2C_I801=y +CONFIG_I2C_ISCH=y +# CONFIG_I2C_ISMT is not set +# CONFIG_I2C_PIIX4 is not set +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# ACPI drivers +# +CONFIG_I2C_SCMI=y + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=y +CONFIG_I2C_DESIGNWARE_PLATFORM=y +# CONFIG_I2C_DESIGNWARE_SLAVE is not set +CONFIG_I2C_DESIGNWARE_PCI=y +CONFIG_I2C_DESIGNWARE_BAYTRAIL=y +# CONFIG_I2C_EMEV2 is not set +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_DIOLAN_U2C is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +# CONFIG_I2C_TINY_USB is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_MLXCPLD is not set +# CONFIG_I2C_STUB is not set +CONFIG_I2C_SLAVE=y +# CONFIG_I2C_SLAVE_EEPROM is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +# CONFIG_SPI_MEM is not set + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_CADENCE is not set +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_OC_TINY is not set +CONFIG_SPI_PXA2XX=y +CONFIG_SPI_PXA2XX_PCI=y +# CONFIG_SPI_ROCKCHIP is not set +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_ZYNQMP_GQSPI is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +# CONFIG_PPS_CLIENT_LDISC is not set +# CONFIG_PPS_CLIENT_GPIO is not set + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y + +# +# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. +# +CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_SX150X is not set +CONFIG_PINCTRL_BAYTRAIL=y +CONFIG_PINCTRL_CHERRYVIEW=y +CONFIG_PINCTRL_INTEL=y +CONFIG_PINCTRL_BROXTON=y +# CONFIG_PINCTRL_CANNONLAKE is not set +# CONFIG_PINCTRL_CEDARFORK is not set +# CONFIG_PINCTRL_DENVERTON is not set +# CONFIG_PINCTRL_GEMINILAKE is not set +# CONFIG_PINCTRL_ICELAKE is not set +# CONFIG_PINCTRL_LEWISBURG is not set +# CONFIG_PINCTRL_SUNRISEPOINT is not set +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_SYSFS=y + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_AMDPT is not set +# CONFIG_GPIO_DWAPB is not set +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_GENERIC_PLATFORM is not set +# CONFIG_GPIO_ICH is not set +CONFIG_GPIO_LYNXPOINT=y +# CONFIG_GPIO_MB86S7X is not set +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_GPIO_VX855 is not set + +# +# Port-mapped I/O GPIO drivers +# +# CONFIG_GPIO_F7188X is not set +# CONFIG_GPIO_IT87 is not set +# CONFIG_GPIO_SCH is not set +# CONFIG_GPIO_SCH311X is not set +# CONFIG_GPIO_WINBOND is not set +# CONFIG_GPIO_WS16C48 is not set + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADP5588 is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set + +# +# MFD GPIO expanders +# +CONFIG_GPIO_ARIZONA=y +# CONFIG_GPIO_CRYSTAL_COVE is not set +CONFIG_GPIO_WHISKEY_COVE=y + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_AMD8111 is not set +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_ML_IOH is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set + +# +# USB GPIO expanders +# +# CONFIG_W1 is not set +# CONFIG_POWER_AVS is not set +# CONFIG_POWER_RESET is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +CONFIG_GENERIC_ADC_BATTERY=m +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_MANAGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +CONFIG_BATTERY_MAX17042=y +CONFIG_CHARGER_ISP1704=m +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_MANAGER is not set +# CONFIG_CHARGER_LTC3651 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24190 is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +CONFIG_CHARGER_BQ25890=y +CONFIG_CHARGER_SMB347=y +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_CHARGER_RT9455 is not set +CONFIG_HWMON=y +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_ABITUGURU is not set +# CONFIG_SENSORS_ABITUGURU3 is not set +# CONFIG_SENSORS_AD7314 is not set +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7310 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_K8TEMP is not set +# CONFIG_SENSORS_K10TEMP is not set +# CONFIG_SENSORS_FAM15H_POWER is not set +# CONFIG_SENSORS_APPLESMC is not set +# CONFIG_SENSORS_ASB100 is not set +# CONFIG_SENSORS_ASPEED is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +CONFIG_SENSORS_DELL_SMM=m +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_FSCHMD is not set +# CONFIG_SENSORS_FTSTEUTATES is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_HIH6130 is not set +CONFIG_SENSORS_IIO_HWMON=y +# CONFIG_SENSORS_I5500 is not set +CONFIG_SENSORS_CORETEMP=y +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_JC42 is not set +# CONFIG_SENSORS_POWR1220 is not set +# CONFIG_SENSORS_LINEAGE is not set +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC4151 is not set +# CONFIG_SENSORS_LTC4215 is not set +# CONFIG_SENSORS_LTC4222 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_LTC4260 is not set +# CONFIG_SENSORS_LTC4261 is not set +# CONFIG_SENSORS_MAX1111 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX6621 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MAX6697 is not set +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_ADCXX is not set +# CONFIG_SENSORS_LM63 is not set +# CONFIG_SENSORS_LM70 is not set +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +# CONFIG_SENSORS_LM95245 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_NTC_THERMISTOR is not set +# CONFIG_SENSORS_NCT6683 is not set +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_PMBUS is not set +# CONFIG_SENSORS_SHT15 is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHTC1 is not set +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +CONFIG_SENSORS_SCH56XX_COMMON=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_SMM665 is not set +# CONFIG_SENSORS_ADC128D818 is not set +# CONFIG_SENSORS_ADS1015 is not set +# CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_ADS7871 is not set +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_VIA_CPUTEMP is not set +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83773G is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +# CONFIG_SENSORS_XGENE is not set + +# +# ACPI drivers +# +# CONFIG_SENSORS_ACPI_POWER is not set +# CONFIG_SENSORS_ATK0110 is not set +CONFIG_THERMAL=y +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set +# CONFIG_THERMAL_GOV_FAIR_SHARE is not set +CONFIG_THERMAL_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_BANG_BANG=y +CONFIG_THERMAL_GOV_USER_SPACE=y +# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set +# CONFIG_CLOCK_THERMAL is not set +# CONFIG_DEVFREQ_THERMAL is not set +# CONFIG_THERMAL_EMULATION is not set +CONFIG_INTEL_POWERCLAMP=y +CONFIG_X86_PKG_TEMP_THERMAL=y +CONFIG_INTEL_SOC_DTS_IOSF_CORE=y +CONFIG_INTEL_SOC_DTS_THERMAL=m + +# +# ACPI INT340X thermal drivers +# +CONFIG_INT340X_THERMAL=y +CONFIG_ACPI_THERMAL_REL=y +# CONFIG_INT3406_THERMAL is not set +# CONFIG_INTEL_BXT_PMIC_THERMAL is not set +# CONFIG_INTEL_PCH_THERMAL is not set +# CONFIG_GENERIC_ADC_THERMAL is not set + +# +# Trusty +# +CONFIG_TRUSTY=y +CONFIG_TRUSTY_LOG=y +CONFIG_TRUSTY_VIRTIO=y +CONFIG_TRUSTY_VIRTIO_IPC=y +CONFIG_TRUSTY_BACKUP_TIMER=m +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +# CONFIG_WATCHDOG_SYSFS is not set + +# +# Watchdog Device Drivers +# +# CONFIG_SOFT_WATCHDOG is not set +# CONFIG_WDAT_WDT is not set +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +# CONFIG_ACQUIRE_WDT is not set +# CONFIG_ADVANTECH_WDT is not set +# CONFIG_ALIM1535_WDT is not set +# CONFIG_ALIM7101_WDT is not set +# CONFIG_EBC_C384_WDT is not set +# CONFIG_F71808E_WDT is not set +# CONFIG_SP5100_TCO is not set +# CONFIG_SBC_FITPC2_WATCHDOG is not set +# CONFIG_EUROTECH_WDT is not set +# CONFIG_IB700_WDT is not set +# CONFIG_IBMASR is not set +# CONFIG_WAFER_WDT is not set +# CONFIG_I6300ESB_WDT is not set +# CONFIG_IE6XX_WDT is not set +CONFIG_ITCO_WDT=y +# CONFIG_ITCO_NO_NMI_INTR is not set +# CONFIG_ITCO_VENDOR_SUPPORT is not set +# CONFIG_IT8712F_WDT is not set +# CONFIG_IT87_WDT is not set +# CONFIG_HP_WATCHDOG is not set +# CONFIG_SC1200_WDT is not set +# CONFIG_PC87413_WDT is not set +# CONFIG_NV_TCO is not set +# CONFIG_60XX_WDT is not set +# CONFIG_CPU5_WDT is not set +# CONFIG_SMSC_SCH311X_WDT is not set +# CONFIG_SMSC37B787_WDT is not set +# CONFIG_VIA_WDT is not set +# CONFIG_W83627HF_WDT is not set +# CONFIG_W83877F_WDT is not set +# CONFIG_W83977F_WDT is not set +# CONFIG_MACHZ_WDT is not set +# CONFIG_SBC_EPX_C3_WATCHDOG is not set +# CONFIG_INTEL_MEI_WDT is not set +# CONFIG_NI903X_WDT is not set +# CONFIG_NIC7018_WDT is not set +# CONFIG_MEN_A21_WDT is not set + +# +# PCI-based Watchdog Cards +# +# CONFIG_PCIPCWATCHDOG is not set +# CONFIG_WDTPCI is not set + +# +# USB-based Watchdog Cards +# +# CONFIG_USBPCWATCHDOG is not set + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y +# CONFIG_BCMA_DRIVER_GMAC_CMN is not set +# CONFIG_BCMA_DRIVER_GPIO is not set +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y +# CONFIG_MFD_AS3711 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CROS_EC is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set +CONFIG_LPC_ICH=y +CONFIG_LPC_SCH=y +CONFIG_INTEL_SOC_PMIC=y +CONFIG_INTEL_SOC_PMIC_BXTWC=y +# CONFIG_INTEL_SOC_PMIC_CHTWC is not set +# CONFIG_INTEL_SOC_PMIC_CHTDC_TI is not set +CONFIG_MFD_INTEL_LPSS=y +CONFIG_MFD_INTEL_LPSS_ACPI=y +CONFIG_MFD_INTEL_LPSS_PCI=y +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_SMSC is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_SYSCON is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS68470 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS80031 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_VX855 is not set +CONFIG_MFD_ARIZONA=y +CONFIG_MFD_ARIZONA_I2C=m +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_CS47L24 is not set +# CONFIG_MFD_WM5102 is not set +CONFIG_MFD_WM5110=y +# CONFIG_MFD_WM8997 is not set +CONFIG_MFD_WM8998=y +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +CONFIG_REGULATOR=y +# CONFIG_REGULATOR_DEBUG is not set +CONFIG_REGULATOR_FIXED_VOLTAGE=y +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_88PG86X is not set +# CONFIG_REGULATOR_ACT8865 is not set +# CONFIG_REGULATOR_AD5398 is not set +# CONFIG_REGULATOR_ARIZONA_LDO1 is not set +# CONFIG_REGULATOR_ARIZONA_MICSUPP is not set +# CONFIG_REGULATOR_DA9210 is not set +# CONFIG_REGULATOR_DA9211 is not set +# CONFIG_REGULATOR_FAN53555 is not set +CONFIG_REGULATOR_GPIO=y +# CONFIG_REGULATOR_ISL9305 is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +# CONFIG_REGULATOR_LP872X is not set +# CONFIG_REGULATOR_LP8755 is not set +# CONFIG_REGULATOR_LTC3589 is not set +# CONFIG_REGULATOR_LTC3676 is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_MT6311 is not set +# CONFIG_REGULATOR_PFUZE100 is not set +# CONFIG_REGULATOR_PV88060 is not set +# CONFIG_REGULATOR_PV88080 is not set +# CONFIG_REGULATOR_PV88090 is not set +# CONFIG_REGULATOR_PWM is not set +# CONFIG_REGULATOR_TPS51632 is not set +# CONFIG_REGULATOR_TPS62360 is not set +# CONFIG_REGULATOR_TPS65023 is not set +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_TPS65132 is not set +# CONFIG_REGULATOR_TPS6524X is not set +CONFIG_RC_CORE=y +CONFIG_RC_MAP=y +# CONFIG_LIRC is not set +CONFIG_RC_DECODERS=y +CONFIG_IR_NEC_DECODER=y +CONFIG_IR_RC5_DECODER=y +CONFIG_IR_RC6_DECODER=y +CONFIG_IR_JVC_DECODER=y +CONFIG_IR_SONY_DECODER=y +CONFIG_IR_SANYO_DECODER=y +CONFIG_IR_SHARP_DECODER=y +CONFIG_IR_MCE_KBD_DECODER=y +CONFIG_IR_XMP_DECODER=y +# CONFIG_IR_IMON_DECODER is not set +# CONFIG_RC_DEVICES is not set +CONFIG_MEDIA_SUPPORT=y + +# +# Multimedia core support +# +CONFIG_MEDIA_CAMERA_SUPPORT=y +# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set +# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set +CONFIG_MEDIA_RADIO_SUPPORT=y +# CONFIG_MEDIA_SDR_SUPPORT is not set +# CONFIG_MEDIA_CEC_SUPPORT is not set +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_DEV=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +CONFIG_VIDEO_V4L2=y +# CONFIG_VIDEO_ADV_DEBUG is not set +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set +# CONFIG_VIDEO_PCI_SKELETON is not set +CONFIG_V4L2_FWNODE=m + +# +# Media drivers +# +CONFIG_MEDIA_USB_SUPPORT=y + +# +# Webcam devices +# +CONFIG_USB_VIDEO_CLASS=y +CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y +# CONFIG_USB_GSPCA is not set +# CONFIG_USB_PWC is not set +# CONFIG_VIDEO_CPIA2 is not set +# CONFIG_USB_ZR364XX is not set +# CONFIG_USB_STKWEBCAM is not set +# CONFIG_USB_S2255 is not set +# CONFIG_VIDEO_USBTV is not set + +# +# Webcam, TV (analog/digital) USB devices +# +# CONFIG_VIDEO_EM28XX is not set +CONFIG_MEDIA_PCI_SUPPORT=y + +# +# Media capture support +# +# CONFIG_VIDEO_SOLO6X10 is not set +# CONFIG_VIDEO_TW5864 is not set +# CONFIG_VIDEO_TW68 is not set +# CONFIG_VIDEO_TW686X is not set +CONFIG_VIDEO_INTEL_IPU=m +CONFIG_VIDEO_INTEL_IPU4=y +# CONFIG_VIDEO_INTEL_IPU4P is not set +CONFIG_VIDEO_INTEL_IPU_SOC=y +CONFIG_VIDEO_INTEL_IPU_FW_LIB=y +# CONFIG_VIDEO_INTEL_IPU_WERROR is not set +# CONFIG_VIDEO_INTEL_ICI is not set +# CONFIG_VIDEO_INTEL_UOS is not set +# CONFIG_VIDEO_INTEL_IPU_ACRN is not set +# CONFIG_VIDEO_IPU3_CIO2 is not set +CONFIG_V4L_PLATFORM_DRIVERS=y +# CONFIG_VIDEO_CAFE_CCIC is not set +# CONFIG_VIDEO_CADENCE is not set +# CONFIG_SOC_CAMERA is not set +# CONFIG_INTEL_IPU4_BXT_P_PDATA is not set +# CONFIG_INTEL_IPU4_BXT_GP_PDATA is not set +# CONFIG_INTEL_IPU4_AR023Z is not set +# CONFIG_INTEL_IPU4_OV13860 is not set +# CONFIG_INTEL_IPU4_OV9281 is not set +# CONFIG_INTEL_IPU4_OV10635 is not set +# CONFIG_INTEL_IPU4_AR0231AT is not set +# CONFIG_INTEL_IPU4_OV10640 is not set +# CONFIG_INTEL_IPU4_ADV7481 is not set +# CONFIG_INTEL_IPU4_ADV7481_EVAL is not set +CONFIG_INTEL_IPU4_ADV7481_I2C_ID=0 +# CONFIG_V4L_MEM2MEM_DRIVERS is not set +# CONFIG_V4L_TEST_DRIVERS is not set + +# +# Supported MMC/SDIO adapters +# +CONFIG_RADIO_ADAPTERS=y +# CONFIG_RADIO_SI470X is not set +# CONFIG_RADIO_SI4713 is not set +# CONFIG_USB_MR800 is not set +# CONFIG_USB_DSBR is not set +# CONFIG_RADIO_MAXIRADIO is not set +# CONFIG_RADIO_SHARK is not set +# CONFIG_RADIO_SHARK2 is not set +# CONFIG_USB_KEENE is not set +# CONFIG_USB_RAREMONO is not set +# CONFIG_USB_MA901 is not set +# CONFIG_RADIO_TEA5764 is not set +# CONFIG_RADIO_SAA7706H is not set +# CONFIG_RADIO_TEF6862 is not set +# CONFIG_RADIO_WL1273 is not set + +# +# Texas Instruments WL128x FM driver (ST based) +# +# CONFIG_CYPRESS_FIRMWARE is not set +CONFIG_VIDEOBUF2_CORE=y +CONFIG_VIDEOBUF2_V4L2=y +CONFIG_VIDEOBUF2_MEMOPS=y +CONFIG_VIDEOBUF2_DMA_CONTIG=m +CONFIG_VIDEOBUF2_VMALLOC=y +CONFIG_VIDEOBUF2_DMA_SG=m + +# +# Media ancillary drivers (tuners, sensors, i2c, spi, frontends) +# +# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set +CONFIG_MEDIA_ATTACH=y +CONFIG_VIDEO_IR_I2C=y + +# +# I2C Encoders, decoders, sensors and other helper chips +# + +# +# Audio decoders, processors and mixers +# +# CONFIG_VIDEO_TVAUDIO is not set +# CONFIG_VIDEO_TDA7432 is not set +# CONFIG_VIDEO_TDA9840 is not set +# CONFIG_VIDEO_TDA1997X is not set +# CONFIG_VIDEO_TEA6415C is not set +# CONFIG_VIDEO_TEA6420 is not set +# CONFIG_VIDEO_MSP3400 is not set +# CONFIG_VIDEO_CS3308 is not set +# CONFIG_VIDEO_CS5345 is not set +# CONFIG_VIDEO_CS53L32A is not set +# CONFIG_VIDEO_TLV320AIC23B is not set +# CONFIG_VIDEO_UDA1342 is not set +# CONFIG_VIDEO_WM8775 is not set +# CONFIG_VIDEO_WM8739 is not set +# CONFIG_VIDEO_VP27SMPX is not set +# CONFIG_VIDEO_SONY_BTF_MPX is not set + +# +# RDS decoders +# +# CONFIG_VIDEO_SAA6588 is not set + +# +# Video decoders +# +# CONFIG_VIDEO_ADV7180 is not set +# CONFIG_VIDEO_ADV7183 is not set +# CONFIG_VIDEO_ADV7604 is not set +# CONFIG_VIDEO_ADV7842 is not set +# CONFIG_VIDEO_BT819 is not set +# CONFIG_VIDEO_BT856 is not set +# CONFIG_VIDEO_BT866 is not set +# CONFIG_VIDEO_KS0127 is not set +# CONFIG_VIDEO_ML86V7667 is not set +# CONFIG_VIDEO_AD5820 is not set +# CONFIG_VIDEO_AK7375 is not set +# CONFIG_VIDEO_DW9714 is not set +# CONFIG_VIDEO_DW9807_VCM is not set +# CONFIG_VIDEO_SAA7110 is not set +# CONFIG_VIDEO_SAA711X is not set +# CONFIG_VIDEO_TC358743 is not set +# CONFIG_VIDEO_TVP514X is not set +# CONFIG_VIDEO_TVP5150 is not set +# CONFIG_VIDEO_TVP7002 is not set +# CONFIG_VIDEO_TW2804 is not set +# CONFIG_VIDEO_TW9903 is not set +# CONFIG_VIDEO_TW9906 is not set +# CONFIG_VIDEO_TW9910 is not set +# CONFIG_VIDEO_VPX3220 is not set + +# +# Video and audio decoders +# +# CONFIG_VIDEO_SAA717X is not set +# CONFIG_VIDEO_CX25840 is not set + +# +# Video encoders +# +# CONFIG_VIDEO_SAA7127 is not set +# CONFIG_VIDEO_SAA7185 is not set +# CONFIG_VIDEO_ADV7170 is not set +# CONFIG_VIDEO_ADV7175 is not set +# CONFIG_VIDEO_ADV7343 is not set +# CONFIG_VIDEO_ADV7393 is not set +# CONFIG_VIDEO_ADV7511 is not set +# CONFIG_VIDEO_AD9389B is not set +# CONFIG_VIDEO_AK881X is not set +# CONFIG_VIDEO_THS8200 is not set + +# +# Camera sensor devices +# +CONFIG_VIDEO_SMIAPP_PLL=m +# CONFIG_VIDEO_IMX258 is not set +# CONFIG_VIDEO_IMX274 is not set +# CONFIG_VIDEO_OV2640 is not set +# CONFIG_VIDEO_OV2659 is not set +# CONFIG_VIDEO_OV2680 is not set +# CONFIG_VIDEO_OV2685 is not set +# CONFIG_VIDEO_OV5647 is not set +# CONFIG_VIDEO_OV6650 is not set +# CONFIG_VIDEO_OV5670 is not set +# CONFIG_VIDEO_OV5695 is not set +# CONFIG_VIDEO_OV7251 is not set +# CONFIG_VIDEO_OV772X is not set +# CONFIG_VIDEO_OV7640 is not set +# CONFIG_VIDEO_OV7670 is not set +# CONFIG_VIDEO_OV7740 is not set +# CONFIG_VIDEO_OV9650 is not set +# CONFIG_VIDEO_OV13858 is not set +# CONFIG_VIDEO_VS6624 is not set +# CONFIG_VIDEO_MT9M032 is not set +# CONFIG_VIDEO_MT9M111 is not set +# CONFIG_VIDEO_MT9P031 is not set +# CONFIG_VIDEO_MT9T001 is not set +# CONFIG_VIDEO_MT9T112 is not set +# CONFIG_VIDEO_MT9V011 is not set +# CONFIG_VIDEO_MT9V032 is not set +# CONFIG_VIDEO_MT9V111 is not set +# CONFIG_VIDEO_SR030PC30 is not set +# CONFIG_VIDEO_NOON010PC30 is not set +# CONFIG_VIDEO_M5MOLS is not set +# CONFIG_VIDEO_RJ54N1 is not set +# CONFIG_VIDEO_S5K6AA is not set +# CONFIG_VIDEO_S5K6A3 is not set +# CONFIG_VIDEO_S5K4ECGX is not set +# CONFIG_VIDEO_S5K5BAF is not set +CONFIG_VIDEO_SMIAPP=m +# CONFIG_VIDEO_ET8EK8 is not set +# CONFIG_VIDEO_CRLMODULE is not set +# CONFIG_VIDEO_S5C73M3 is not set + +# +# Flash devices +# +# CONFIG_VIDEO_ADP1653 is not set +# CONFIG_VIDEO_LM3560 is not set +# CONFIG_VIDEO_LM3646 is not set + +# +# Video improvement chips +# +# CONFIG_VIDEO_UPD64031A is not set +# CONFIG_VIDEO_UPD64083 is not set + +# +# Audio/Video compression chips +# +# CONFIG_VIDEO_SAA6752HS is not set + +# +# SDR tuner chips +# + +# +# Miscellaneous helper chips +# +# CONFIG_VIDEO_THS7303 is not set +# CONFIG_VIDEO_M52790 is not set +# CONFIG_VIDEO_I2C is not set +# CONFIG_VIDEO_TI964 is not set +# CONFIG_VIDEO_MAX9286 is not set +# CONFIG_VIDEO_TI960 is not set + +# +# Sensors used on soc_camera driver +# + +# +# SPI helper chips +# +# CONFIG_VIDEO_GS1662 is not set + +# +# Media SPI Adapters +# +CONFIG_MEDIA_TUNER=y + +# +# Customize TV tuners +# +# CONFIG_MEDIA_TUNER_SIMPLE is not set +CONFIG_MEDIA_TUNER_TDA18250=m +# CONFIG_MEDIA_TUNER_TDA8290 is not set +# CONFIG_MEDIA_TUNER_TDA827X is not set +# CONFIG_MEDIA_TUNER_TDA18271 is not set +# CONFIG_MEDIA_TUNER_TDA9887 is not set +# CONFIG_MEDIA_TUNER_TEA5761 is not set +# CONFIG_MEDIA_TUNER_TEA5767 is not set +# CONFIG_MEDIA_TUNER_MSI001 is not set +# CONFIG_MEDIA_TUNER_MT20XX is not set +# CONFIG_MEDIA_TUNER_MT2060 is not set +# CONFIG_MEDIA_TUNER_MT2063 is not set +# CONFIG_MEDIA_TUNER_MT2266 is not set +# CONFIG_MEDIA_TUNER_MT2131 is not set +# CONFIG_MEDIA_TUNER_QT1010 is not set +# CONFIG_MEDIA_TUNER_XC2028 is not set +# CONFIG_MEDIA_TUNER_XC5000 is not set +# CONFIG_MEDIA_TUNER_XC4000 is not set +# CONFIG_MEDIA_TUNER_MXL5005S is not set +# CONFIG_MEDIA_TUNER_MXL5007T is not set +# CONFIG_MEDIA_TUNER_MC44S803 is not set +# CONFIG_MEDIA_TUNER_MAX2165 is not set +# CONFIG_MEDIA_TUNER_TDA18218 is not set +# CONFIG_MEDIA_TUNER_FC0011 is not set +# CONFIG_MEDIA_TUNER_FC0012 is not set +# CONFIG_MEDIA_TUNER_FC0013 is not set +# CONFIG_MEDIA_TUNER_TDA18212 is not set +# CONFIG_MEDIA_TUNER_E4000 is not set +# CONFIG_MEDIA_TUNER_FC2580 is not set +# CONFIG_MEDIA_TUNER_M88RS6000T is not set +# CONFIG_MEDIA_TUNER_TUA9001 is not set +# CONFIG_MEDIA_TUNER_SI2157 is not set +# CONFIG_MEDIA_TUNER_IT913X is not set +# CONFIG_MEDIA_TUNER_R820T is not set +# CONFIG_MEDIA_TUNER_MXL301RF is not set +# CONFIG_MEDIA_TUNER_QM1D1C0042 is not set +CONFIG_MEDIA_TUNER_QM1D1B0004=m + +# +# Customise DVB Frontends +# + +# +# Tools to develop new frontends +# + +# +# Graphics support +# +CONFIG_AGP=y +# CONFIG_AGP_AMD64 is not set +CONFIG_AGP_INTEL=y +# CONFIG_AGP_SIS is not set +# CONFIG_AGP_VIA is not set +CONFIG_INTEL_GTT=y +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=16 +CONFIG_VGA_SWITCHEROO=y +CONFIG_DRM=y +CONFIG_DRM_MIPI_DSI=y +# CONFIG_DRM_DP_AUX_CHARDEV is not set +# CONFIG_DRM_DEBUG_MM is not set +# CONFIG_DRM_DEBUG_SELFTEST is not set +CONFIG_DRM_KMS_HELPER=y +CONFIG_DRM_KMS_FB_HELPER=y +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set +# CONFIG_DRM_DP_CEC is not set +CONFIG_DRM_TTM=y + +# +# I2C encoder or helper chips +# +# CONFIG_DRM_I2C_CH7006 is not set +# CONFIG_DRM_I2C_SIL164 is not set +# CONFIG_DRM_I2C_NXP_TDA998X is not set +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# CONFIG_DRM_RADEON is not set +# CONFIG_DRM_AMDGPU is not set + +# +# ACP (Audio CoProcessor) Configuration +# + +# +# AMD Library routines +# +# CONFIG_DRM_NOUVEAU is not set +CONFIG_DRM_I915=y +# CONFIG_DRM_I915_ALPHA_SUPPORT is not set +CONFIG_DRM_I915_CAPTURE_ERROR=y +CONFIG_DRM_I915_COMPRESS_ERROR=y +# CONFIG_DRM_I915_MEMTRACK is not set +CONFIG_DRM_I915_USERPTR=y +# CONFIG_DRM_I915_GVT is not set + +# +# drm/i915 Debugging +# +# CONFIG_DRM_I915_WERROR is not set +# CONFIG_DRM_I915_DEBUG is not set +# CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS is not set +# CONFIG_DRM_I915_SW_FENCE_CHECK_DAG is not set +# CONFIG_DRM_I915_DEBUG_GUC is not set +# CONFIG_DRM_I915_SELFTEST is not set +# CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS is not set +# CONFIG_DRM_I915_DEBUG_VBLANK_EVADE is not set +# CONFIG_DRM_VGEM is not set +# CONFIG_DRM_VKMS is not set +# CONFIG_DRM_VMWGFX is not set +# CONFIG_DRM_GMA500 is not set +# CONFIG_DRM_UDL is not set +# CONFIG_DRM_AST is not set +# CONFIG_DRM_MGAG200 is not set +# CONFIG_DRM_CIRRUS_QEMU is not set +# CONFIG_DRM_QXL is not set +CONFIG_DRM_BOCHS=y +# CONFIG_DRM_VIRTIO_GPU is not set +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# CONFIG_DRM_HISI_HIBMC is not set +# CONFIG_DRM_TINYDRM is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +CONFIG_FB_BOOT_VESA_SUPPORT=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ARC is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_VGA16 is not set +# CONFIG_FB_UVESA is not set +CONFIG_FB_VESA=y +CONFIG_FB_EFI=y +# CONFIG_FB_N411 is not set +# CONFIG_FB_HGA is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_LE80578 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_VIA is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +CONFIG_FB_SIMPLE=y +# CONFIG_FB_SM712 is not set +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +CONFIG_LCD_PLATFORM=m +# CONFIG_LCD_S6E63M0 is not set +# CONFIG_LCD_LD9040 is not set +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_BACKLIGHT_GENERIC=m +# CONFIG_BACKLIGHT_PWM is not set +# CONFIG_BACKLIGHT_APPLE is not set +# CONFIG_BACKLIGHT_PM8941_WLED is not set +# CONFIG_BACKLIGHT_SAHARA is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +CONFIG_BACKLIGHT_LM3630A=m +# CONFIG_BACKLIGHT_LM3639 is not set +# CONFIG_BACKLIGHT_LP855X is not set +CONFIG_BACKLIGHT_GPIO=m +CONFIG_BACKLIGHT_LV5207LP=m +CONFIG_BACKLIGHT_BD6107=m +# CONFIG_BACKLIGHT_ARCXCNN is not set +CONFIG_HDMI=y + +# +# Console display driver support +# +# CONFIG_VGA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +# CONFIG_FRAMEBUFFER_CONSOLE is not set +# CONFIG_LOGO is not set +CONFIG_SOUND=y +CONFIG_SOUND_OSS_CORE=y +CONFIG_SOUND_OSS_CORE_PRECLAIM=y +CONFIG_SND=y +CONFIG_SND_TIMER=y +CONFIG_SND_PCM=y +CONFIG_SND_HWDEP=m +CONFIG_SND_SEQ_DEVICE=y +CONFIG_SND_RAWMIDI=y +CONFIG_SND_COMPRESS_OFFLOAD=y +CONFIG_SND_JACK=y +CONFIG_SND_JACK_INPUT_DEV=y +CONFIG_SND_OSSEMUL=y +CONFIG_SND_MIXER_OSS=m +CONFIG_SND_PCM_OSS=m +CONFIG_SND_PCM_OSS_PLUGINS=y +CONFIG_SND_PCM_TIMER=y +CONFIG_SND_HRTIMER=y +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_MAX_CARDS=32 +CONFIG_SND_SUPPORT_OLD_API=y +CONFIG_SND_PROC_FS=y +CONFIG_SND_VERBOSE_PROCFS=y +CONFIG_SND_VERBOSE_PRINTK=y +# CONFIG_SND_DEBUG is not set +CONFIG_SND_DMA_SGBUF=y +CONFIG_SND_SEQUENCER=m +# CONFIG_SND_SEQ_DUMMY is not set +# CONFIG_SND_SEQUENCER_OSS is not set +CONFIG_SND_SEQ_HRTIMER_DEFAULT=y +CONFIG_SND_SEQ_MIDI_EVENT=m +CONFIG_SND_SEQ_MIDI=m +CONFIG_SND_DRIVERS=y +# CONFIG_SND_PCSP is not set +CONFIG_SND_DUMMY=m +# CONFIG_SND_ALOOP is not set +# CONFIG_SND_VIRMIDI is not set +# CONFIG_SND_MTPAV is not set +# CONFIG_SND_SERIAL_U16550 is not set +# CONFIG_SND_MPU401 is not set +CONFIG_SND_PCI=y +# CONFIG_SND_AD1889 is not set +# CONFIG_SND_ALS300 is not set +# CONFIG_SND_ALS4000 is not set +# CONFIG_SND_ALI5451 is not set +# CONFIG_SND_ASIHPI is not set +# CONFIG_SND_ATIIXP is not set +# CONFIG_SND_ATIIXP_MODEM is not set +# CONFIG_SND_AU8810 is not set +# CONFIG_SND_AU8820 is not set +# CONFIG_SND_AU8830 is not set +# CONFIG_SND_AW2 is not set +# CONFIG_SND_AZT3328 is not set +# CONFIG_SND_BT87X is not set +# CONFIG_SND_CA0106 is not set +# CONFIG_SND_CMIPCI is not set +# CONFIG_SND_OXYGEN is not set +# CONFIG_SND_CS4281 is not set +# CONFIG_SND_CS46XX is not set +# CONFIG_SND_CTXFI is not set +# CONFIG_SND_DARLA20 is not set +# CONFIG_SND_GINA20 is not set +# CONFIG_SND_LAYLA20 is not set +# CONFIG_SND_DARLA24 is not set +# CONFIG_SND_GINA24 is not set +# CONFIG_SND_LAYLA24 is not set +# CONFIG_SND_MONA is not set +# CONFIG_SND_MIA is not set +# CONFIG_SND_ECHO3G is not set +# CONFIG_SND_INDIGO is not set +# CONFIG_SND_INDIGOIO is not set +# CONFIG_SND_INDIGODJ is not set +# CONFIG_SND_INDIGOIOX is not set +# CONFIG_SND_INDIGODJX is not set +# CONFIG_SND_EMU10K1 is not set +# CONFIG_SND_EMU10K1X is not set +# CONFIG_SND_ENS1370 is not set +# CONFIG_SND_ENS1371 is not set +# CONFIG_SND_ES1938 is not set +# CONFIG_SND_ES1968 is not set +# CONFIG_SND_FM801 is not set +# CONFIG_SND_HDSP is not set +# CONFIG_SND_HDSPM is not set +# CONFIG_SND_ICE1712 is not set +# CONFIG_SND_ICE1724 is not set +# CONFIG_SND_INTEL8X0 is not set +# CONFIG_SND_INTEL8X0M is not set +# CONFIG_SND_KORG1212 is not set +# CONFIG_SND_LOLA is not set +# CONFIG_SND_LX6464ES is not set +# CONFIG_SND_MAESTRO3 is not set +# CONFIG_SND_MIXART is not set +# CONFIG_SND_NM256 is not set +# CONFIG_SND_PCXHR is not set +# CONFIG_SND_RIPTIDE is not set +# CONFIG_SND_RME32 is not set +# CONFIG_SND_RME96 is not set +# CONFIG_SND_RME9652 is not set +# CONFIG_SND_SE6X is not set +# CONFIG_SND_SONICVIBES is not set +# CONFIG_SND_TRIDENT is not set +# CONFIG_SND_VIA82XX is not set +# CONFIG_SND_VIA82XX_MODEM is not set +# CONFIG_SND_VIRTUOSO is not set +# CONFIG_SND_VX222 is not set +# CONFIG_SND_YMFPCI is not set + +# +# HD-Audio +# +# CONFIG_SND_HDA_INTEL is not set +CONFIG_SND_HDA_CORE=m +CONFIG_SND_HDA_DSP_LOADER=y +CONFIG_SND_HDA_COMPONENT=y +CONFIG_SND_HDA_I915=y +CONFIG_SND_HDA_EXT_CORE=m +CONFIG_SND_HDA_PREALLOC_SIZE=64 +# CONFIG_SND_SPI is not set +CONFIG_SND_USB=y +CONFIG_SND_USB_AUDIO=m +# CONFIG_SND_USB_UA101 is not set +# CONFIG_SND_USB_USX2Y is not set +# CONFIG_SND_USB_CAIAQ is not set +# CONFIG_SND_USB_US122L is not set +# CONFIG_SND_USB_6FIRE is not set +CONFIG_SND_USB_HIFACE=m +# CONFIG_SND_BCD2000 is not set +# CONFIG_SND_USB_POD is not set +# CONFIG_SND_USB_PODHD is not set +# CONFIG_SND_USB_TONEPORT is not set +# CONFIG_SND_USB_VARIAX is not set +CONFIG_SND_SOC=y +CONFIG_SND_SOC_COMPRESS=y +CONFIG_SND_SOC_TOPOLOGY=y +CONFIG_SND_SOC_ACPI=y +# CONFIG_SND_SOC_AMD_ACP is not set +# CONFIG_SND_ATMEL_SOC is not set +# CONFIG_SND_DESIGNWARE_I2S is not set + +# +# SoC Audio for Freescale CPUs +# + +# +# Common SoC Audio options for Freescale CPUs: +# +# CONFIG_SND_SOC_FSL_ASRC is not set +# CONFIG_SND_SOC_FSL_SAI is not set +# CONFIG_SND_SOC_FSL_SSI is not set +# CONFIG_SND_SOC_FSL_SPDIF is not set +# CONFIG_SND_SOC_FSL_ESAI is not set +# CONFIG_SND_SOC_IMX_AUDMUX is not set +# CONFIG_SND_I2S_HI6210_I2S is not set +# CONFIG_SND_SOC_IMG is not set +CONFIG_SND_SOC_INTEL_SST_TOPLEVEL=y +CONFIG_SND_SST_IPC=y +CONFIG_SND_SST_IPC_ACPI=y +CONFIG_SND_SOC_INTEL_SST=m +# CONFIG_SND_SOC_INTEL_HASWELL is not set +CONFIG_SND_SST_ATOM_HIFI2_PLATFORM=y +# CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_PCI is not set +CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_ACPI=y +CONFIG_SND_SOC_INTEL_SKYLAKE=m +CONFIG_SND_SOC_ACPI_INTEL_MATCH=y +# CONFIG_SND_SOC_INTEL_CNL_FPGA is not set +# CONFIG_SND_SOC_SDW_AGGM1M2 is not set +CONFIG_SND_SOC_INTEL_MACH=y +# CONFIG_SND_SOC_INTEL_BYTCR_RT5640_MACH is not set +# CONFIG_SND_SOC_INTEL_BYTCR_RT5651_MACH is not set +# CONFIG_SND_SOC_INTEL_CHT_BSW_RT5672_MACH is not set +# CONFIG_SND_SOC_INTEL_CHT_BSW_RT5645_MACH is not set +# CONFIG_SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH is not set +# CONFIG_SND_SOC_INTEL_CHT_BSW_NAU8824_MACH is not set +# CONFIG_SND_SOC_INTEL_BYT_CHT_DA7213_MACH is not set +# CONFIG_SND_SOC_INTEL_BYT_CHT_ES8316_MACH is not set +# CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH is not set +# CONFIG_SND_SOC_INTEL_SKL_RT286_MACH is not set +# CONFIG_SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH is not set +# CONFIG_SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH is not set +# CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH is not set +# CONFIG_SND_SOC_INTEL_BXT_RT298_MACH is not set +# CONFIG_SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH is not set +# CONFIG_SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH is not set +# CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98357A_MACH is not set +# CONFIG_SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH is not set +# CONFIG_SND_SOC_INTEL_CNL_CS42L42_MACH is not set +# CONFIG_SND_SOC_INTEL_CNL_RT700_MACH is not set +# CONFIG_SND_SOC_INTEL_CNL_SVFPGA_MACH is not set +# CONFIG_SND_SOC_INTEL_CNL_RT274_MACH is not set +# CONFIG_SND_SOC_INTEL_ICL_RT274_MACH is not set +CONFIG_SND_SOC_INTEL_BXT_TDF8532_MACH=m +# CONFIG_SND_SOC_INTEL_BXT_ULL_MACH is not set +# CONFIG_SND_SOC_INTEL_KBLR_RT298_MACH is not set +# CONFIG_SND_SOC_INTEL_BXTP_IVI_RSE_MACH is not set +# CONFIG_SND_SOC_INTEL_BXTP_IVI_HU_MACH is not set +# CONFIG_SND_SOC_INTEL_BXTP_IVI_M3_MACH is not set +# CONFIG_SND_SOC_INTEL_BXTP_IVI_GENERIC_MACH is not set + +# +# STMicroelectronics STM32 SOC audio support +# +# CONFIG_SND_SOC_XTFPGA_I2S is not set +# CONFIG_ZX_TDM is not set +CONFIG_SND_SOC_I2C_AND_SPI=y + +# +# CODEC drivers +# +# CONFIG_SND_SOC_AC97_CODEC is not set +# CONFIG_SND_SOC_ADAU1701 is not set +# CONFIG_SND_SOC_ADAU1761_I2C is not set +# CONFIG_SND_SOC_ADAU1761_SPI is not set +# CONFIG_SND_SOC_ADAU7002 is not set +# CONFIG_SND_SOC_AK4104 is not set +# CONFIG_SND_SOC_AK4458 is not set +# CONFIG_SND_SOC_AK4554 is not set +# CONFIG_SND_SOC_AK4613 is not set +# CONFIG_SND_SOC_AK4642 is not set +# CONFIG_SND_SOC_AK5386 is not set +# CONFIG_SND_SOC_AK5558 is not set +# CONFIG_SND_SOC_ALC5623 is not set +# CONFIG_SND_SOC_BD28623 is not set +# CONFIG_SND_SOC_BT_SCO is not set +# CONFIG_SND_SOC_CS35L32 is not set +# CONFIG_SND_SOC_CS35L33 is not set +# CONFIG_SND_SOC_CS35L34 is not set +# CONFIG_SND_SOC_CS35L35 is not set +# CONFIG_SND_SOC_CS42L42 is not set +# CONFIG_SND_SOC_SVFPGA is not set +# CONFIG_SND_SOC_SVFPGA_SDW is not set +# CONFIG_SND_SOC_SVFPGA_I2C is not set +# CONFIG_SND_SOC_CS42L51_I2C is not set +# CONFIG_SND_SOC_CS42L52 is not set +# CONFIG_SND_SOC_CS42L56 is not set +# CONFIG_SND_SOC_CS42L73 is not set +# CONFIG_SND_SOC_CS4265 is not set +# CONFIG_SND_SOC_CS4270 is not set +# CONFIG_SND_SOC_CS4271_I2C is not set +# CONFIG_SND_SOC_CS4271_SPI is not set +# CONFIG_SND_SOC_CS42XX8_I2C is not set +# CONFIG_SND_SOC_CS43130 is not set +# CONFIG_SND_SOC_CS4349 is not set +# CONFIG_SND_SOC_CS53L30 is not set +# CONFIG_SND_SOC_ES7134 is not set +# CONFIG_SND_SOC_ES7241 is not set +# CONFIG_SND_SOC_ES8316 is not set +# CONFIG_SND_SOC_ES8328_I2C is not set +# CONFIG_SND_SOC_ES8328_SPI is not set +# CONFIG_SND_SOC_GTM601 is not set +# CONFIG_SND_SOC_INNO_RK3036 is not set +# CONFIG_SND_SOC_MAX98504 is not set +# CONFIG_SND_SOC_MAX9867 is not set +# CONFIG_SND_SOC_MAX98927 is not set +# CONFIG_SND_SOC_MAX98373 is not set +# CONFIG_SND_SOC_MAX9860 is not set +# CONFIG_SND_SOC_MSM8916_WCD_DIGITAL is not set +# CONFIG_SND_SOC_PCM1681 is not set +# CONFIG_SND_SOC_PCM1789_I2C is not set +# CONFIG_SND_SOC_PCM179X_I2C is not set +# CONFIG_SND_SOC_PCM179X_SPI is not set +# CONFIG_SND_SOC_PCM186X_I2C is not set +# CONFIG_SND_SOC_PCM186X_SPI is not set +# CONFIG_SND_SOC_PCM3168A_I2C is not set +# CONFIG_SND_SOC_PCM3168A_SPI is not set +# CONFIG_SND_SOC_PCM512x_I2C is not set +# CONFIG_SND_SOC_PCM512x_SPI is not set +# CONFIG_SND_SOC_RT5616 is not set +# CONFIG_SND_SOC_RT5631 is not set +# CONFIG_SND_SOC_RT700 is not set +# CONFIG_SND_SOC_RT700_SDW is not set +# CONFIG_SND_SOC_SGTL5000 is not set +# CONFIG_SND_SOC_SIMPLE_AMPLIFIER is not set +# CONFIG_SND_SOC_SIRF_AUDIO_CODEC is not set +# CONFIG_SND_SOC_SPDIF is not set +# CONFIG_SND_SOC_SSM2305 is not set +# CONFIG_SND_SOC_SSM2602_SPI is not set +# CONFIG_SND_SOC_SSM2602_I2C is not set +# CONFIG_SND_SOC_SSM4567 is not set +# CONFIG_SND_SOC_STA32X is not set +# CONFIG_SND_SOC_STA350 is not set +# CONFIG_SND_SOC_STI_SAS is not set +# CONFIG_SND_SOC_TAS2552 is not set +# CONFIG_SND_SOC_TAS5086 is not set +# CONFIG_SND_SOC_TAS571X is not set +# CONFIG_SND_SOC_TAS5720 is not set +# CONFIG_SND_SOC_TAS6424 is not set +# CONFIG_SND_SOC_TDA7419 is not set +CONFIG_SND_SOC_TDF8532=m +# CONFIG_SND_SOC_TFA9879 is not set +# CONFIG_SND_SOC_TLV320AIC23_I2C is not set +# CONFIG_SND_SOC_TLV320AIC23_SPI is not set +# CONFIG_SND_SOC_TLV320AIC31XX is not set +# CONFIG_SND_SOC_TLV320AIC32X4_I2C is not set +# CONFIG_SND_SOC_TLV320AIC32X4_SPI is not set +# CONFIG_SND_SOC_TLV320AIC3X is not set +# CONFIG_SND_SOC_TS3A227E is not set +# CONFIG_SND_SOC_TSCS42XX is not set +# CONFIG_SND_SOC_TSCS454 is not set +# CONFIG_SND_SOC_WM8510 is not set +# CONFIG_SND_SOC_WM8523 is not set +# CONFIG_SND_SOC_WM8524 is not set +# CONFIG_SND_SOC_WM8580 is not set +# CONFIG_SND_SOC_WM8711 is not set +# CONFIG_SND_SOC_WM8728 is not set +# CONFIG_SND_SOC_WM8731 is not set +# CONFIG_SND_SOC_WM8737 is not set +# CONFIG_SND_SOC_WM8741 is not set +# CONFIG_SND_SOC_WM8750 is not set +# CONFIG_SND_SOC_WM8753 is not set +# CONFIG_SND_SOC_WM8770 is not set +# CONFIG_SND_SOC_WM8776 is not set +# CONFIG_SND_SOC_WM8782 is not set +# CONFIG_SND_SOC_WM8804_I2C is not set +# CONFIG_SND_SOC_WM8804_SPI is not set +# CONFIG_SND_SOC_WM8903 is not set +# CONFIG_SND_SOC_WM8960 is not set +# CONFIG_SND_SOC_WM8962 is not set +# CONFIG_SND_SOC_WM8974 is not set +# CONFIG_SND_SOC_WM8978 is not set +# CONFIG_SND_SOC_WM8985 is not set +# CONFIG_SND_SOC_ZX_AUD96P22 is not set +# CONFIG_SND_SOC_MAX9759 is not set +# CONFIG_SND_SOC_MT6351 is not set +# CONFIG_SND_SOC_NAU8540 is not set +# CONFIG_SND_SOC_NAU8810 is not set +# CONFIG_SND_SOC_NAU8824 is not set +# CONFIG_SND_SOC_TPA6130A2 is not set +# CONFIG_SND_SIMPLE_CARD is not set +CONFIG_SND_X86=y +# CONFIG_HDMI_LPE_AUDIO is not set + +# +# HID support +# +CONFIG_HID=y +# CONFIG_HID_BATTERY_STRENGTH is not set +CONFIG_HIDRAW=y +CONFIG_UHID=y +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=y +# CONFIG_HID_ACCUTOUCH is not set +# CONFIG_HID_ACRUX is not set +CONFIG_HID_APPLE=y +# CONFIG_HID_APPLEIR is not set +# CONFIG_HID_ASUS is not set +# CONFIG_HID_AUREAL is not set +CONFIG_HID_BELKIN=y +# CONFIG_HID_BETOP_FF is not set +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +# CONFIG_HID_CORSAIR is not set +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_PRODIKEYS is not set +# CONFIG_HID_CMEDIA is not set +# CONFIG_HID_CP2112 is not set +CONFIG_HID_CYPRESS=y +# CONFIG_HID_DRAGONRISE is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELAN is not set +# CONFIG_HID_ELECOM is not set +CONFIG_HID_ELO=m +CONFIG_HID_EZKEY=y +# CONFIG_HID_GEMBIRD is not set +# CONFIG_HID_GFRM is not set +# CONFIG_HID_HOLTEK is not set +# CONFIG_HID_GOOGLE_HAMMER is not set +# CONFIG_HID_GT683R is not set +# CONFIG_HID_KEYTOUCH is not set +# CONFIG_HID_KYE is not set +# CONFIG_HID_UCLOGIC is not set +# CONFIG_HID_WALTOP is not set +# CONFIG_HID_GYRATION is not set +# CONFIG_HID_ICADE is not set +# CONFIG_HID_ITE is not set +# CONFIG_HID_JABRA is not set +# CONFIG_HID_TWINHAN is not set +CONFIG_HID_KENSINGTON=y +# CONFIG_HID_LCPOWER is not set +# CONFIG_HID_LED is not set +# CONFIG_HID_LENOVO is not set +CONFIG_HID_LOGITECH=y +# CONFIG_HID_LOGITECH_DJ is not set +# CONFIG_HID_LOGITECH_HIDPP is not set +CONFIG_LOGITECH_FF=y +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +CONFIG_LOGIWHEELS_FF=y +CONFIG_HID_MAGICMOUSE=m +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_REDRAGON is not set +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +CONFIG_HID_MULTITOUCH=y +# CONFIG_HID_NTI is not set +CONFIG_HID_NTRIG=m +# CONFIG_HID_ORTEK is not set +# CONFIG_HID_PANTHERLORD is not set +# CONFIG_HID_PENMOUNT is not set +# CONFIG_HID_PETALYNX is not set +# CONFIG_HID_PICOLCD is not set +# CONFIG_HID_PLANTRONICS is not set +# CONFIG_HID_PRIMAX is not set +# CONFIG_HID_RETRODE is not set +# CONFIG_HID_ROCCAT is not set +# CONFIG_HID_SAITEK is not set +# CONFIG_HID_SAMSUNG is not set +# CONFIG_HID_SONY is not set +# CONFIG_HID_SPEEDLINK is not set +# CONFIG_HID_STEAM is not set +# CONFIG_HID_STEELSERIES is not set +# CONFIG_HID_SUNPLUS is not set +# CONFIG_HID_RMI is not set +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TIVO is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_HID_THINGM is not set +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_WACOM is not set +# CONFIG_HID_WIIMOTE is not set +CONFIG_HID_XINMO=m +# CONFIG_HID_ZEROPLUS is not set +# CONFIG_HID_ZYDACRON is not set +CONFIG_HID_SENSOR_HUB=m +# CONFIG_HID_SENSOR_CUSTOM_SENSOR is not set +# CONFIG_HID_ALPS is not set + +# +# USB HID support +# +CONFIG_USB_HID=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y + +# +# I2C HID support +# +CONFIG_I2C_HID=m + +# +# Intel ISH HID support +# +# CONFIG_INTEL_ISH_HID is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +# CONFIG_USB_DEFAULT_PERSIST is not set +CONFIG_USB_DYNAMIC_MINORS=y +CONFIG_USB_OTG=y +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +CONFIG_USB_OTG_FSM=y +# CONFIG_USB_LEDS_TRIGGER_USBPORT is not set +CONFIG_USB_MON=y +# CONFIG_USB_WUSB_CBAF is not set + +# +# USB Host Controller Drivers +# +CONFIG_USB_C67X00_HCD=y +CONFIG_USB_XHCI_HCD=m +# CONFIG_USB_XHCI_DBGCAP is not set +CONFIG_USB_XHCI_PCI=m +CONFIG_USB_XHCI_PLATFORM=m +CONFIG_USB_EHCI_HCD=y +# CONFIG_USB_EHCI_ROOT_HUB_TT is not set +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_HCD_PLATFORM is not set +CONFIG_USB_OXU210HP_HCD=y +CONFIG_USB_ISP116X_HCD=y +CONFIG_USB_FOTG210_HCD=m +# CONFIG_USB_MAX3421_HCD is not set +CONFIG_USB_OHCI_HCD=y +# CONFIG_USB_OHCI_HCD_PCI is not set +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_SL811_HCD=y +# CONFIG_USB_SL811_HCD_ISO is not set +CONFIG_USB_R8A66597_HCD=y +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=y +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +# CONFIG_USB_TMC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=y +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=y +CONFIG_REALTEK_AUTOPM=y +# CONFIG_USB_STORAGE_DATAFAB is not set +# CONFIG_USB_STORAGE_FREECOM is not set +# CONFIG_USB_STORAGE_ISD200 is not set +# CONFIG_USB_STORAGE_USBAT is not set +# CONFIG_USB_STORAGE_SDDR09 is not set +# CONFIG_USB_STORAGE_SDDR55 is not set +# CONFIG_USB_STORAGE_JUMPSHOT is not set +# CONFIG_USB_STORAGE_ALAUDA is not set +# CONFIG_USB_STORAGE_ONETOUCH is not set +# CONFIG_USB_STORAGE_KARMA is not set +# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set +# CONFIG_USB_STORAGE_ENE_UB6250 is not set +# CONFIG_USB_UAS is not set + +# +# USB Imaging devices +# +# CONFIG_USB_MDC800 is not set +# CONFIG_USB_MICROTEK is not set +# CONFIG_USBIP_CORE is not set +# CONFIG_USB_MUSB_HDRC is not set +CONFIG_USB_DWC3=m +CONFIG_USB_DWC3_ULPI=y +# CONFIG_USB_DWC3_HOST is not set +CONFIG_USB_DWC3_GADGET=y +# CONFIG_USB_DWC3_DUAL_ROLE is not set + +# +# Platform Glue Driver Support +# +CONFIG_USB_DWC3_PCI=m +CONFIG_USB_DWC3_HAPS=m +CONFIG_USB_DWC2=y +# CONFIG_USB_DWC2_HOST is not set + +# +# Gadget/Dual-role mode requires USB Gadget support to be enabled +# +CONFIG_USB_DWC2_PERIPHERAL=y +# CONFIG_USB_DWC2_DUAL_ROLE is not set +# CONFIG_USB_DWC2_PCI is not set +# CONFIG_USB_DWC2_DEBUG is not set +# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +CONFIG_USB_SERIAL=y +CONFIG_USB_SERIAL_CONSOLE=y +CONFIG_USB_SERIAL_GENERIC=y +# CONFIG_USB_SERIAL_SIMPLE is not set +# CONFIG_USB_SERIAL_AIRCABLE is not set +CONFIG_USB_SERIAL_ARK3116=y +CONFIG_USB_SERIAL_BELKIN=y +CONFIG_USB_SERIAL_CH341=y +CONFIG_USB_SERIAL_WHITEHEAT=y +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=y +CONFIG_USB_SERIAL_CP210X=y +# CONFIG_USB_SERIAL_CYPRESS_M8 is not set +# CONFIG_USB_SERIAL_EMPEG is not set +CONFIG_USB_SERIAL_FTDI_SIO=y +# CONFIG_USB_SERIAL_VISOR is not set +# CONFIG_USB_SERIAL_IPAQ is not set +# CONFIG_USB_SERIAL_IR is not set +# CONFIG_USB_SERIAL_EDGEPORT is not set +# CONFIG_USB_SERIAL_EDGEPORT_TI is not set +CONFIG_USB_SERIAL_F81232=y +# CONFIG_USB_SERIAL_F8153X is not set +# CONFIG_USB_SERIAL_GARMIN is not set +# CONFIG_USB_SERIAL_IPW is not set +# CONFIG_USB_SERIAL_IUU is not set +# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set +# CONFIG_USB_SERIAL_KEYSPAN is not set +# CONFIG_USB_SERIAL_KLSI is not set +# CONFIG_USB_SERIAL_KOBIL_SCT is not set +CONFIG_USB_SERIAL_MCT_U232=y +# CONFIG_USB_SERIAL_METRO is not set +CONFIG_USB_SERIAL_MOS7720=y +CONFIG_USB_SERIAL_MOS7840=y +# CONFIG_USB_SERIAL_MXUPORT is not set +# CONFIG_USB_SERIAL_NAVMAN is not set +CONFIG_USB_SERIAL_PL2303=y +CONFIG_USB_SERIAL_OTI6858=y +# CONFIG_USB_SERIAL_QCAUX is not set +# CONFIG_USB_SERIAL_QUALCOMM is not set +CONFIG_USB_SERIAL_SPCP8X5=y +# CONFIG_USB_SERIAL_SAFE is not set +# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set +# CONFIG_USB_SERIAL_SYMBOL is not set +CONFIG_USB_SERIAL_TI=y +# CONFIG_USB_SERIAL_CYBERJACK is not set +CONFIG_USB_SERIAL_XIRCOM=y +CONFIG_USB_SERIAL_WWAN=y +CONFIG_USB_SERIAL_OPTION=y +# CONFIG_USB_SERIAL_OMNINET is not set +# CONFIG_USB_SERIAL_OPTICON is not set +# CONFIG_USB_SERIAL_XSENS_MT is not set +# CONFIG_USB_SERIAL_WISHBONE is not set +CONFIG_USB_SERIAL_SSU100=y +# CONFIG_USB_SERIAL_QT2 is not set +# CONFIG_USB_SERIAL_UPD78F0730 is not set +# CONFIG_USB_SERIAL_DEBUG is not set + +# +# USB Miscellaneous drivers +# +# CONFIG_USB_EMI62 is not set +# CONFIG_USB_EMI26 is not set +# CONFIG_USB_ADUTUX is not set +# CONFIG_USB_SEVSEG is not set +# CONFIG_USB_RIO500 is not set +# CONFIG_USB_LEGOTOWER is not set +# CONFIG_USB_LCD is not set +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +# CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_FTDI_ELAN is not set +# CONFIG_USB_APPLEDISPLAY is not set +# CONFIG_USB_SISUSBVGA is not set +# CONFIG_USB_LD is not set +# CONFIG_USB_TRANCEVIBRATOR is not set +# CONFIG_USB_IOWARRIOR is not set +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +# CONFIG_USB_ISIGHTFW is not set +# CONFIG_USB_YUREX is not set +CONFIG_USB_EZUSB_FX2=y +# CONFIG_USB_HUB_USB251XB is not set +# CONFIG_USB_HSIC_USB3503 is not set +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +# CONFIG_USB_CHAOSKEY is not set + +# +# USB Physical Layer drivers +# +CONFIG_USB_PHY=y +CONFIG_NOP_USB_XCEIV=y +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=2 +CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 +# CONFIG_U_SERIAL_CONSOLE is not set + +# +# USB Peripheral Controller +# +# CONFIG_USB_FOTG210_UDC is not set +# CONFIG_USB_GR_UDC is not set +# CONFIG_USB_R8A66597 is not set +# CONFIG_USB_PXA27X is not set +# CONFIG_USB_MV_UDC is not set +# CONFIG_USB_MV_U3D is not set +# CONFIG_USB_M66592 is not set +# CONFIG_USB_BDC_UDC is not set +# CONFIG_USB_AMD5536UDC is not set +# CONFIG_USB_NET2272 is not set +# CONFIG_USB_NET2280 is not set +# CONFIG_USB_GOKU is not set +# CONFIG_USB_EG20T is not set +# CONFIG_USB_DUMMY_HCD is not set +CONFIG_USB_LIBCOMPOSITE=y +CONFIG_USB_F_ACM=y +CONFIG_USB_U_SERIAL=y +CONFIG_USB_U_ETHER=y +CONFIG_USB_F_SERIAL=y +CONFIG_USB_F_NCM=y +CONFIG_USB_F_RNDIS=y +CONFIG_USB_F_MASS_STORAGE=y +CONFIG_USB_F_FS=y +CONFIG_USB_F_MIDI=y +CONFIG_USB_F_HID=y +CONFIG_USB_F_AUDIO_SRC=y +CONFIG_USB_F_ACC=y +CONFIG_USB_F_DVCTRACE=y +CONFIG_USB_CONFIGFS=y +CONFIG_USB_CONFIGFS_SERIAL=y +CONFIG_USB_CONFIGFS_ACM=y +# CONFIG_USB_CONFIGFS_OBEX is not set +CONFIG_USB_CONFIGFS_NCM=y +# CONFIG_USB_CONFIGFS_ECM is not set +# CONFIG_USB_CONFIGFS_ECM_SUBSET is not set +CONFIG_USB_CONFIGFS_RNDIS=y +# CONFIG_USB_CONFIGFS_EEM is not set +CONFIG_USB_CONFIGFS_MASS_STORAGE=y +# CONFIG_USB_CONFIGFS_F_LB_SS is not set +CONFIG_USB_CONFIGFS_F_FS=y +CONFIG_USB_CONFIGFS_F_ACC=y +CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y +CONFIG_USB_CONFIGFS_UEVENT=y +# CONFIG_USB_CONFIGFS_F_UAC1 is not set +# CONFIG_USB_CONFIGFS_F_UAC1_LEGACY is not set +# CONFIG_USB_CONFIGFS_F_UAC2 is not set +CONFIG_USB_CONFIGFS_F_MIDI=y +CONFIG_USB_CONFIGFS_F_HID=y +# CONFIG_USB_CONFIGFS_F_UVC is not set +# CONFIG_USB_CONFIGFS_F_PRINTER is not set +CONFIG_USB_CONFIGFS_F_DVCTRACE=y +# CONFIG_USB_ZERO is not set +# CONFIG_USB_AUDIO is not set +# CONFIG_USB_ETH is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_G_SERIAL is not set +# CONFIG_USB_MIDI_GADGET is not set +# CONFIG_USB_G_PRINTER is not set +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_ACM_MS is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set +# CONFIG_USB_G_WEBCAM is not set +CONFIG_TYPEC=y +CONFIG_TYPEC_TCPM=y +CONFIG_TYPEC_TCPCI=y +# CONFIG_TYPEC_RT1711H is not set +# CONFIG_TYPEC_FUSB302 is not set +# CONFIG_TYPEC_WCOVE is not set +# CONFIG_TYPEC_UCSI is not set +# CONFIG_TYPEC_TPS6598X is not set + +# +# USB Type-C Multiplexer/DeMultiplexer Switch support +# +# CONFIG_TYPEC_MUX_PI3USB30532 is not set + +# +# USB Type-C Alternate Mode drivers +# +# CONFIG_TYPEC_DP_ALTMODE is not set +CONFIG_USB_ROLES_INTEL_XHCI=y +# CONFIG_USB_LED_TRIG is not set +CONFIG_USB_ULPI_BUS=y +CONFIG_USB_ROLE_SWITCH=y +# CONFIG_UWB is not set +CONFIG_MMC=y +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=16 +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PCI=y +# CONFIG_MMC_RICOH_MMC is not set +CONFIG_MMC_SDHCI_ACPI=y +# CONFIG_MMC_SDHCI_PLTFM is not set +# CONFIG_MMC_WBSD is not set +# CONFIG_MMC_TIFM_SD is not set +# CONFIG_MMC_SPI is not set +# CONFIG_MMC_CB710 is not set +# CONFIG_MMC_VIA_SDMMC is not set +# CONFIG_MMC_VUB300 is not set +# CONFIG_MMC_USHC is not set +# CONFIG_MMC_USDHI6ROL0 is not set +CONFIG_MMC_CQHCI=y +# CONFIG_MMC_TOSHIBA_PCI is not set +# CONFIG_MMC_MTK is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +# CONFIG_LEDS_CLASS_FLASH is not set +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_APU is not set +# CONFIG_LEDS_LM3530 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_LP3952 is not set +# CONFIG_LEDS_LP5521 is not set +# CONFIG_LEDS_LP5523 is not set +# CONFIG_LEDS_LP5562 is not set +# CONFIG_LEDS_LP8501 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_INTEL_SS4200 is not set +# CONFIG_LEDS_LT3593 is not set +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +# CONFIG_LEDS_BLINKM is not set +# CONFIG_LEDS_MLXCPLD is not set +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set +# CONFIG_LEDS_NIC78BX is not set + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +# CONFIG_LEDS_TRIGGER_TIMER is not set +# CONFIG_LEDS_TRIGGER_ONESHOT is not set +# CONFIG_LEDS_TRIGGER_DISK is not set +# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_LEDS_TRIGGER_TRANSIENT is not set +# CONFIG_LEDS_TRIGGER_CAMERA is not set +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_ACCESSIBILITY is not set +# CONFIG_INFINIBAND is not set +CONFIG_EDAC_ATOMIC_SCRUB=y +CONFIG_EDAC_SUPPORT=y +# CONFIG_EDAC is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_MC146818_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABX80X is not set +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8523 is not set +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8010 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set +# CONFIG_RTC_DRV_EM3027 is not set +# CONFIG_RTC_DRV_RV8803 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RX4581 is not set +# CONFIG_RTC_DRV_RX6110 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_PCF2127 is not set +# CONFIG_RTC_DRV_RV3029C2 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_CMOS=y +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_DS2404 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_FTRTC010 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_VIRTUAL_CHANNELS=y +CONFIG_DMA_ACPI=y +# CONFIG_ALTERA_MSGDMA is not set +CONFIG_INTEL_IDMA64=y +# CONFIG_INTEL_IOATDMA is not set +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +CONFIG_DW_DMAC_CORE=y +CONFIG_DW_DMAC=y +CONFIG_DW_DMAC_PCI=y +CONFIG_HSU_DMA=y + +# +# DMA Clients +# +# CONFIG_ASYNC_TX_DMA is not set +# CONFIG_DMATEST is not set + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_HYPER_DMABUF is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set +# CONFIG_VFIO is not set +CONFIG_VIRT_DRIVERS=y +# CONFIG_VBOXGUEST is not set +CONFIG_VIRTIO=y +CONFIG_VIRTIO_MENU=y +# CONFIG_VIRTIO_PCI is not set +# CONFIG_VIRTIO_BALLOON is not set +# CONFIG_VIRTIO_INPUT is not set +# CONFIG_VIRTIO_MMIO is not set + +# +# Microsoft Hyper-V guest support +# +# CONFIG_HYPERV is not set +CONFIG_STAGING=y +# CONFIG_PRISM2_USB is not set +# CONFIG_COMEDI is not set +CONFIG_RTL8192U=m +CONFIG_RTLLIB=m +CONFIG_RTLLIB_CRYPTO_CCMP=m +CONFIG_RTLLIB_CRYPTO_TKIP=m +CONFIG_RTLLIB_CRYPTO_WEP=m +CONFIG_RTL8192E=m +# CONFIG_RTL8723BS is not set +CONFIG_R8712U=m +# CONFIG_R8188EU is not set +# CONFIG_R8822BE is not set +# CONFIG_RTS5208 is not set +# CONFIG_VT6655 is not set +# CONFIG_VT6656 is not set + +# +# IIO staging drivers +# + +# +# Accelerometers +# +# CONFIG_ADIS16203 is not set +# CONFIG_ADIS16240 is not set + +# +# Analog to digital converters +# +# CONFIG_AD7606 is not set +# CONFIG_AD7780 is not set +# CONFIG_AD7816 is not set +# CONFIG_AD7192 is not set +# CONFIG_AD7280 is not set + +# +# Analog digital bi-direction converters +# +# CONFIG_ADT7316 is not set + +# +# Capacitance to digital converters +# +# CONFIG_AD7150 is not set +# CONFIG_AD7152 is not set +# CONFIG_AD7746 is not set + +# +# Direct Digital Synthesis +# +# CONFIG_AD9832 is not set +# CONFIG_AD9834 is not set + +# +# Network Analyzer, Impedance Converters +# +# CONFIG_AD5933 is not set + +# +# Active energy metering IC +# +# CONFIG_ADE7854 is not set + +# +# Resolver to digital converters +# +# CONFIG_AD2S90 is not set +# CONFIG_AD2S1210 is not set +# CONFIG_FB_SM750 is not set +# CONFIG_FB_XGI is not set + +# +# Speakup console speech +# +# CONFIG_SPEAKUP is not set +# CONFIG_STAGING_MEDIA is not set + +# +# Android +# +CONFIG_ASHMEM=y +# CONFIG_ANDROID_VSOC is not set +CONFIG_SYNC=y +CONFIG_ANDROID_FWDATA=y +# CONFIG_ION is not set +# CONFIG_ABL_BOOTLOADER_CONTROL is not set +# CONFIG_SEND_SLCAN_ENABLE is not set +# CONFIG_SBL_BOOTLOADER_CONTROL is not set +# CONFIG_VSBL_BOOTLOADER_CONTROL is not set +CONFIG_LTE_GDM724X=m +# CONFIG_DGNC is not set +# CONFIG_GS_FPGABOOT is not set +# CONFIG_UNISYSSPAR is not set +# CONFIG_FB_TFT is not set +# CONFIG_WILC1000_SDIO is not set +# CONFIG_WILC1000_SPI is not set +# CONFIG_MOST is not set +# CONFIG_KS7010 is not set +# CONFIG_GREYBUS is not set +# CONFIG_DRM_VBOXVIDEO is not set +# CONFIG_PI433 is not set +# CONFIG_MTK_MMC is not set + +# +# Gasket devices +# +# CONFIG_STAGING_GASKET_FRAMEWORK is not set +# CONFIG_XIL_AXIS_FIFO is not set +# CONFIG_EROFS_FS is not set +CONFIG_X86_PLATFORM_DEVICES=y +# CONFIG_ACER_WIRELESS is not set +# CONFIG_ACERHDF is not set +# CONFIG_ASUS_LAPTOP is not set +# CONFIG_DELL_SMBIOS is not set +# CONFIG_DELL_SMO8800 is not set +# CONFIG_DELL_RBTN is not set +# CONFIG_FUJITSU_LAPTOP is not set +# CONFIG_FUJITSU_TABLET is not set +# CONFIG_GPD_POCKET_FAN is not set +# CONFIG_HP_WIRELESS is not set +# CONFIG_PANASONIC_LAPTOP is not set +# CONFIG_COMPAL_LAPTOP is not set +# CONFIG_SONY_LAPTOP is not set +# CONFIG_THINKPAD_ACPI is not set +# CONFIG_SENSORS_HDAPS is not set +# CONFIG_INTEL_MENLOW is not set +# CONFIG_EEEPC_LAPTOP is not set +# CONFIG_ASUS_WIRELESS is not set +# CONFIG_ACPI_WMI is not set +# CONFIG_TOPSTAR_LAPTOP is not set +# CONFIG_TOSHIBA_BT_RFKILL is not set +# CONFIG_TOSHIBA_HAPS is not set +# CONFIG_ACPI_CMPC is not set +# CONFIG_INTEL_INT0002_VGPIO is not set +# CONFIG_INTEL_HID_EVENT is not set +# CONFIG_INTEL_VBTN is not set +# CONFIG_INTEL_IPS is not set +# CONFIG_INTEL_PMC_CORE is not set +# CONFIG_IBM_RTL is not set +# CONFIG_SAMSUNG_LAPTOP is not set +# CONFIG_INTEL_OAKTRAIL is not set +# CONFIG_SAMSUNG_Q10 is not set +# CONFIG_APPLE_GMUX is not set +CONFIG_INTEL_RST=y +# CONFIG_INTEL_SMARTCONNECT is not set +# CONFIG_PVPANIC is not set +CONFIG_INTEL_PMC_IPC=y +# CONFIG_INTEL_BXTWC_PMIC_TMU is not set +# CONFIG_SURFACE_PRO3_BUTTON is not set +# CONFIG_SURFACE_3_BUTTON is not set +CONFIG_INTEL_PUNIT_IPC=y +CONFIG_INTEL_TELEMETRY=y +# CONFIG_MLX_PLATFORM is not set +# CONFIG_INTEL_TURBO_MAX_3 is not set +# CONFIG_I2C_MULTI_INSTANTIATE is not set +# CONFIG_INTEL_PSTORE_PRAM is not set +CONFIG_PMC_ATOM=y +# CONFIG_CHROME_PLATFORMS is not set +# CONFIG_MELLANOX_PLATFORM is not set +CONFIG_CLKDEV_LOOKUP=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Common Clock Framework +# +# CONFIG_COMMON_CLK_MAX9485 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +CONFIG_CLKEVT_I8253=y +CONFIG_I8253_LOCK=y +CONFIG_CLKBLD_I8253=y +CONFIG_MAILBOX=y +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +# CONFIG_IOMMU_DEBUGFS is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_IOMMU_IOVA=y +# CONFIG_AMD_IOMMU is not set +CONFIG_DMAR_TABLE=y +CONFIG_INTEL_IOMMU=y +# CONFIG_INTEL_IOMMU_SVM is not set +CONFIG_INTEL_IOMMU_DEFAULT_ON=y +CONFIG_INTEL_IOMMU_FLOPPY_WA=y +# CONFIG_IRQ_REMAP is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# + +# +# Broadcom SoC drivers +# + +# +# NXP/Freescale QorIQ SoC drivers +# + +# +# i.MX SoC drivers +# + +# +# Qualcomm SoC drivers +# +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# CONFIG_XILINX_VCU is not set +CONFIG_PM_DEVFREQ=y + +# +# DEVFREQ Governors +# +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y +# CONFIG_DEVFREQ_GOV_PERFORMANCE is not set +# CONFIG_DEVFREQ_GOV_POWERSAVE is not set +# CONFIG_DEVFREQ_GOV_USERSPACE is not set +# CONFIG_DEVFREQ_GOV_PASSIVE is not set + +# +# DEVFREQ Drivers +# +# CONFIG_PM_DEVFREQ_EVENT is not set +CONFIG_EXTCON=y + +# +# Extcon Device Drivers +# +# CONFIG_EXTCON_ADC_JACK is not set +CONFIG_EXTCON_ARIZONA=y +CONFIG_EXTCON_GPIO=y +# CONFIG_EXTCON_INTEL_INT3496 is not set +# CONFIG_EXTCON_MAX3355 is not set +# CONFIG_EXTCON_RT8973A is not set +# CONFIG_EXTCON_SM5502 is not set +# CONFIG_EXTCON_USB_GPIO is not set +# CONFIG_MEMORY is not set +CONFIG_IIO=y +CONFIG_IIO_BUFFER=y +# CONFIG_IIO_BUFFER_CB is not set +# CONFIG_IIO_BUFFER_HW_CONSUMER is not set +CONFIG_IIO_KFIFO_BUF=y +CONFIG_IIO_TRIGGERED_BUFFER=y +# CONFIG_IIO_CONFIGFS is not set +CONFIG_IIO_TRIGGER=y +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 +# CONFIG_IIO_SW_DEVICE is not set +# CONFIG_IIO_SW_TRIGGER is not set + +# +# Accelerometers +# +# CONFIG_ADIS16201 is not set +# CONFIG_ADIS16209 is not set +# CONFIG_ADXL345_I2C is not set +# CONFIG_ADXL345_SPI is not set +CONFIG_BMA180=m +# CONFIG_BMA220 is not set +CONFIG_BMC150_ACCEL=y +CONFIG_BMC150_ACCEL_I2C=y +CONFIG_BMC150_ACCEL_SPI=y +# CONFIG_DA280 is not set +# CONFIG_DA311 is not set +# CONFIG_DMARD09 is not set +# CONFIG_DMARD10 is not set +CONFIG_HID_SENSOR_ACCEL_3D=m +# CONFIG_IIO_CROS_EC_ACCEL_LEGACY is not set +CONFIG_IIO_ST_ACCEL_3AXIS=y +CONFIG_IIO_ST_ACCEL_I2C_3AXIS=y +CONFIG_IIO_ST_ACCEL_SPI_3AXIS=y +# CONFIG_KXSD9 is not set +CONFIG_KXCJK1013=y +# CONFIG_MC3230 is not set +# CONFIG_MMA7455_I2C is not set +# CONFIG_MMA7455_SPI is not set +# CONFIG_MMA7660 is not set +# CONFIG_MMA8452 is not set +# CONFIG_MMA9551 is not set +# CONFIG_MMA9553 is not set +# CONFIG_MXC4005 is not set +# CONFIG_MXC6255 is not set +# CONFIG_SCA3000 is not set +# CONFIG_STK8312 is not set +# CONFIG_STK8BA50 is not set + +# +# Analog to digital converters +# +# CONFIG_AD7266 is not set +# CONFIG_AD7291 is not set +# CONFIG_AD7298 is not set +# CONFIG_AD7476 is not set +# CONFIG_AD7766 is not set +# CONFIG_AD7791 is not set +# CONFIG_AD7793 is not set +# CONFIG_AD7887 is not set +# CONFIG_AD7923 is not set +# CONFIG_AD799X is not set +# CONFIG_CC10001_ADC is not set +# CONFIG_HI8435 is not set +# CONFIG_HX711 is not set +# CONFIG_INA2XX_ADC is not set +# CONFIG_LTC2471 is not set +# CONFIG_LTC2485 is not set +# CONFIG_LTC2497 is not set +# CONFIG_MAX1027 is not set +# CONFIG_MAX11100 is not set +# CONFIG_MAX1118 is not set +# CONFIG_MAX1363 is not set +# CONFIG_MAX9611 is not set +CONFIG_MCP320X=m +# CONFIG_MCP3422 is not set +CONFIG_NAU7802=m +# CONFIG_TI_ADC081C is not set +# CONFIG_TI_ADC0832 is not set +# CONFIG_TI_ADC084S021 is not set +# CONFIG_TI_ADC12138 is not set +# CONFIG_TI_ADC108S102 is not set +# CONFIG_TI_ADC128S052 is not set +# CONFIG_TI_ADC161S626 is not set +# CONFIG_TI_ADS1015 is not set +# CONFIG_TI_ADS7950 is not set +# CONFIG_TI_TLC4541 is not set + +# +# Analog Front Ends +# + +# +# Amplifiers +# +# CONFIG_AD8366 is not set + +# +# Chemical Sensors +# +# CONFIG_ATLAS_PH_SENSOR is not set +# CONFIG_BME680 is not set +# CONFIG_CCS811 is not set +# CONFIG_IAQCORE is not set +# CONFIG_VZ89X is not set + +# +# Hid Sensor IIO Common +# +CONFIG_HID_SENSOR_IIO_COMMON=m +CONFIG_HID_SENSOR_IIO_TRIGGER=m + +# +# SSP Sensor Common +# +# CONFIG_IIO_SSP_SENSORHUB is not set +CONFIG_IIO_ST_SENSORS_I2C=y +CONFIG_IIO_ST_SENSORS_SPI=y +CONFIG_IIO_ST_SENSORS_CORE=y + +# +# Counters +# + +# +# Digital to analog converters +# +# CONFIG_AD5064 is not set +# CONFIG_AD5360 is not set +# CONFIG_AD5380 is not set +# CONFIG_AD5421 is not set +# CONFIG_AD5446 is not set +# CONFIG_AD5449 is not set +# CONFIG_AD5592R is not set +# CONFIG_AD5593R is not set +# CONFIG_AD5504 is not set +# CONFIG_AD5624R_SPI is not set +# CONFIG_LTC2632 is not set +# CONFIG_AD5686_SPI is not set +# CONFIG_AD5696_I2C is not set +# CONFIG_AD5755 is not set +# CONFIG_AD5758 is not set +# CONFIG_AD5761 is not set +# CONFIG_AD5764 is not set +# CONFIG_AD5791 is not set +# CONFIG_AD7303 is not set +# CONFIG_AD8801 is not set +# CONFIG_DS4424 is not set +# CONFIG_M62332 is not set +# CONFIG_MAX517 is not set +# CONFIG_MCP4725 is not set +# CONFIG_MCP4922 is not set +# CONFIG_TI_DAC082S085 is not set +# CONFIG_TI_DAC5571 is not set + +# +# IIO dummy driver +# + +# +# Frequency Synthesizers DDS/PLL +# + +# +# Clock Generator/Distribution +# +# CONFIG_AD9523 is not set + +# +# Phase-Locked Loop (PLL) frequency synthesizers +# +# CONFIG_ADF4350 is not set + +# +# Digital gyroscope sensors +# +# CONFIG_ADIS16080 is not set +# CONFIG_ADIS16130 is not set +# CONFIG_ADIS16136 is not set +# CONFIG_ADIS16260 is not set +# CONFIG_ADXRS450 is not set +CONFIG_BMG160=y +CONFIG_BMG160_I2C=y +CONFIG_BMG160_SPI=y +CONFIG_HID_SENSOR_GYRO_3D=m +# CONFIG_MPU3050_I2C is not set +CONFIG_IIO_ST_GYRO_3AXIS=y +CONFIG_IIO_ST_GYRO_I2C_3AXIS=y +CONFIG_IIO_ST_GYRO_SPI_3AXIS=y +# CONFIG_ITG3200 is not set + +# +# Health Sensors +# + +# +# Heart Rate Monitors +# +# CONFIG_AFE4403 is not set +# CONFIG_AFE4404 is not set +# CONFIG_MAX30100 is not set +# CONFIG_MAX30102 is not set + +# +# Humidity sensors +# +# CONFIG_AM2315 is not set +# CONFIG_DHT11 is not set +# CONFIG_HDC100X is not set +# CONFIG_HID_SENSOR_HUMIDITY is not set +# CONFIG_HTS221 is not set +# CONFIG_HTU21 is not set +# CONFIG_SI7005 is not set +# CONFIG_SI7020 is not set + +# +# Inertial measurement units +# +# CONFIG_ADIS16400 is not set +# CONFIG_ADIS16480 is not set +# CONFIG_BMI160_I2C is not set +# CONFIG_BMI160_SPI is not set +CONFIG_KMX61=y +# CONFIG_INV_MPU6050_I2C is not set +# CONFIG_INV_MPU6050_SPI is not set +# CONFIG_IIO_ST_LSM6DSX is not set + +# +# Light sensors +# +CONFIG_ACPI_ALS=y +# CONFIG_ADJD_S311 is not set +# CONFIG_AL3320A is not set +# CONFIG_APDS9300 is not set +# CONFIG_APDS9960 is not set +# CONFIG_BH1750 is not set +# CONFIG_BH1780 is not set +CONFIG_CM32181=y +CONFIG_CM3232=y +# CONFIG_CM3323 is not set +CONFIG_CM36651=m +# CONFIG_GP2AP020A00F is not set +# CONFIG_SENSORS_ISL29018 is not set +# CONFIG_SENSORS_ISL29028 is not set +# CONFIG_ISL29125 is not set +CONFIG_HID_SENSOR_ALS=m +# CONFIG_HID_SENSOR_PROX is not set +CONFIG_JSA1212=m +# CONFIG_RPR0521 is not set +# CONFIG_LTR501 is not set +# CONFIG_LV0104CS is not set +# CONFIG_MAX44000 is not set +# CONFIG_OPT3001 is not set +# CONFIG_PA12203001 is not set +# CONFIG_SI1133 is not set +# CONFIG_SI1145 is not set +# CONFIG_STK3310 is not set +# CONFIG_ST_UVIS25 is not set +# CONFIG_TCS3414 is not set +# CONFIG_TCS3472 is not set +# CONFIG_SENSORS_TSL2563 is not set +# CONFIG_TSL2583 is not set +# CONFIG_TSL2772 is not set +# CONFIG_TSL4531 is not set +# CONFIG_US5182D is not set +# CONFIG_VCNL4000 is not set +# CONFIG_VEML6070 is not set +# CONFIG_VL6180 is not set +# CONFIG_ZOPT2201 is not set + +# +# Magnetometer sensors +# +CONFIG_AK8975=m +CONFIG_AK09911=m +# CONFIG_BMC150_MAGN_I2C is not set +# CONFIG_BMC150_MAGN_SPI is not set +# CONFIG_MAG3110 is not set +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +# CONFIG_MMC35240 is not set +CONFIG_IIO_ST_MAGN_3AXIS=y +CONFIG_IIO_ST_MAGN_I2C_3AXIS=y +CONFIG_IIO_ST_MAGN_SPI_3AXIS=y +# CONFIG_SENSORS_HMC5843_I2C is not set +# CONFIG_SENSORS_HMC5843_SPI is not set + +# +# Multiplexers +# + +# +# Inclinometer sensors +# +CONFIG_HID_SENSOR_INCLINOMETER_3D=m +# CONFIG_HID_SENSOR_DEVICE_ROTATION is not set + +# +# Triggers - standalone +# +CONFIG_IIO_INTERRUPT_TRIGGER=y +CONFIG_IIO_SYSFS_TRIGGER=y + +# +# Digital potentiometers +# +# CONFIG_AD5272 is not set +# CONFIG_DS1803 is not set +# CONFIG_MAX5481 is not set +# CONFIG_MAX5487 is not set +# CONFIG_MCP4018 is not set +# CONFIG_MCP4131 is not set +# CONFIG_MCP4531 is not set +# CONFIG_TPL0102 is not set + +# +# Digital potentiostats +# +# CONFIG_LMP91000 is not set + +# +# Pressure sensors +# +# CONFIG_ABP060MG is not set +# CONFIG_BMP280 is not set +# CONFIG_HID_SENSOR_PRESS is not set +# CONFIG_HP03 is not set +# CONFIG_MPL115_I2C is not set +# CONFIG_MPL115_SPI is not set +# CONFIG_MPL3115 is not set +# CONFIG_MS5611 is not set +# CONFIG_MS5637 is not set +CONFIG_IIO_ST_PRESS=y +CONFIG_IIO_ST_PRESS_I2C=y +CONFIG_IIO_ST_PRESS_SPI=y +# CONFIG_T5403 is not set +# CONFIG_HP206C is not set +# CONFIG_ZPA2326 is not set + +# +# Lightning sensors +# +# CONFIG_AS3935 is not set + +# +# Proximity and distance sensors +# +# CONFIG_ISL29501 is not set +# CONFIG_LIDAR_LITE_V2 is not set +# CONFIG_RFD77402 is not set +# CONFIG_SRF04 is not set +# CONFIG_SX9500 is not set +# CONFIG_SRF08 is not set + +# +# Resolver to digital converters +# +# CONFIG_AD2S1200 is not set + +# +# Temperature sensors +# +# CONFIG_MAXIM_THERMOCOUPLE is not set +# CONFIG_HID_SENSOR_TEMP is not set +# CONFIG_MLX90614 is not set +# CONFIG_MLX90632 is not set +CONFIG_TMP006=m +# CONFIG_TMP007 is not set +# CONFIG_TSYS01 is not set +# CONFIG_TSYS02D is not set +# CONFIG_NTB is not set +# CONFIG_VME_BUS is not set +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_CRC is not set +CONFIG_PWM_LPSS=y +CONFIG_PWM_LPSS_PCI=y +# CONFIG_PWM_LPSS_PLATFORM is not set +# CONFIG_PWM_PCA9685 is not set + +# +# IRQ chip support +# +CONFIG_ARM_GIC_MAX_NR=1 +# CONFIG_IPACK_BUS is not set +# CONFIG_RESET_CONTROLLER is not set +# CONFIG_FMC is not set + +# +# PHY Subsystem +# +CONFIG_GENERIC_PHY=y +# CONFIG_BCM_KONA_USB2_PHY is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_CPCAP_USB is not set +# CONFIG_PHY_QCOM_USB_HS is not set +# CONFIG_PHY_QCOM_USB_HSIC is not set +# CONFIG_PHY_SAMSUNG_USB2 is not set +# CONFIG_PHY_TUSB1210 is not set +CONFIG_POWERCAP=y +CONFIG_INTEL_RAPL=y +# CONFIG_IDLE_INJECT is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +CONFIG_RAS=y +# CONFIG_THUNDERBOLT is not set + +# +# Android +# +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_BINDER_DEVICES="binder,hwbinder,vndbinder" +# CONFIG_ANDROID_BINDER_IPC_SELFTEST is not set +# CONFIG_LIBNVDIMM is not set +CONFIG_DAX=y +# CONFIG_DEV_DAX is not set +CONFIG_NVMEM=y + +# +# HW tracing support +# +CONFIG_STM=y +# CONFIG_STM_PROTO_BASIC is not set +# CONFIG_STM_PROTO_SYS_T is not set +# CONFIG_STM_DUMMY is not set +CONFIG_STM_SOURCE_CONSOLE=y +# CONFIG_STM_SOURCE_HEARTBEAT is not set +CONFIG_INTEL_TH=y +CONFIG_INTEL_TH_PCI=y +# CONFIG_INTEL_TH_ACPI is not set +CONFIG_INTEL_TH_GTH=y +CONFIG_INTEL_TH_STH=y +CONFIG_INTEL_TH_MSU=y +CONFIG_INTEL_TH_MSU_DVC=y +# CONFIG_INTEL_TH_MSU_DVC_DEBUG is not set +CONFIG_INTEL_TH_PTI=y +# CONFIG_INTEL_TH_DEBUG is not set +# CONFIG_INTEL_TH_EARLY_PRINTK is not set +# CONFIG_FPGA is not set +CONFIG_PM_OPP=y +CONFIG_SDW=y +CONFIG_SDW_CNL=y +# CONFIG_SDW_MAXIM_SLAVE is not set +# CONFIG_UNISYS_VISORBUS is not set +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_LEGACY_ENERGY_MODEL_DT is not set + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +CONFIG_FS_IOMAP=y +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_FS_DAX is not set +CONFIG_FS_POSIX_ACL=y +# CONFIG_EXPORTFS_BLOCK_OPS is not set +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +CONFIG_FS_ENCRYPTION=y +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_PRINT_QUOTA_WARNING is not set +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_QUOTACTL_COMPAT=y +# CONFIG_AUTOFS4_FS is not set +# CONFIG_AUTOFS_FS is not set +CONFIG_FUSE_FS=y +# CONFIG_CUSE is not set +# CONFIG_OVERLAY_FS is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_FAT_DEFAULT_UTF8 is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +# CONFIG_PROC_KCORE is not set +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +# CONFIG_PROC_CHILDREN is not set +CONFIG_PROC_UID=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_MEMFD_CREATE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +CONFIG_SDCARD_FS=y +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +CONFIG_SQUASHFS=y +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +# CONFIG_SQUASHFS_DECOMP_SINGLE is not set +CONFIG_SQUASHFS_DECOMP_MULTI=y +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +CONFIG_SQUASHFS_EMBEDDED=y +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFLATE_COMPRESS=y +# CONFIG_PSTORE_LZO_COMPRESS is not set +# CONFIG_PSTORE_LZ4_COMPRESS is not set +# CONFIG_PSTORE_LZ4HC_COMPRESS is not set +# CONFIG_PSTORE_842_COMPRESS is not set +# CONFIG_PSTORE_ZSTD_COMPRESS is not set +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT=y +CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" +CONFIG_PSTORE_CONSOLE=y +# CONFIG_PSTORE_PMSG is not set +CONFIG_PSTORE_RAM=y +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +# CONFIG_NFS_FS is not set +# CONFIG_NFSD is not set +# CONFIG_CEPH_FS is not set +CONFIG_CIFS=y +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_WEAK_PW_HASH=y +# CONFIG_CIFS_UPCALL is not set +# CONFIG_CIFS_XATTR is not set +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +# CONFIG_CIFS_DFS_UPCALL is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_MAC_ROMAN is not set +# CONFIG_NLS_MAC_CELTIC is not set +# CONFIG_NLS_MAC_CENTEURO is not set +# CONFIG_NLS_MAC_CROATIAN is not set +# CONFIG_NLS_MAC_CYRILLIC is not set +# CONFIG_NLS_MAC_GAELIC is not set +# CONFIG_NLS_MAC_GREEK is not set +# CONFIG_NLS_MAC_ICELAND is not set +# CONFIG_NLS_MAC_INUIT is not set +# CONFIG_NLS_MAC_ROMANIAN is not set +# CONFIG_NLS_MAC_TURKISH is not set +CONFIG_NLS_UTF8=y +# CONFIG_DLM is not set + +# +# Security options +# +CONFIG_KEYS=y +CONFIG_KEYS_COMPAT=y +# CONFIG_PERSISTENT_KEYRINGS is not set +CONFIG_BIG_KEYS=y +# CONFIG_TRUSTED_KEYS is not set +# CONFIG_ENCRYPTED_KEYS is not set +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y +CONFIG_SECURITY=y +CONFIG_SECURITY_WRITABLE_HOOKS=y +# CONFIG_SECURITY_STACKING is not set +# CONFIG_SECURITY_LSM_DEBUG is not set +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +# CONFIG_PAGE_TABLE_ISOLATION is not set +# CONFIG_SECURITY_NETWORK_XFRM is not set +# CONFIG_SECURITY_PATH is not set +# CONFIG_INTEL_TXT is not set +CONFIG_LSM_MMAP_MIN_ADDR=65536 +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +CONFIG_HARDENED_USERCOPY=y +CONFIG_HARDENED_USERCOPY_FALLBACK=y +# CONFIG_HARDENED_USERCOPY_PAGESPAN is not set +# CONFIG_FORTIFY_SOURCE is not set +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1 +CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +# CONFIG_SECURITY_YAMA is not set +# CONFIG_INTEGRITY is not set + +# +# Security Module Selection +# +CONFIG_DEFAULT_SECURITY_SELINUX=y +# CONFIG_DEFAULT_SECURITY_DAC is not set +CONFIG_DEFAULT_SECURITY="selinux" +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=m +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_RSA=y +# CONFIG_CRYPTO_DH is not set +CONFIG_CRYPTO_ECDH=m +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_USER is not set +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +CONFIG_CRYPTO_GF128MUL=y +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +# CONFIG_CRYPTO_PCRYPT is not set +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CRYPTO_CRYPTD=y +# CONFIG_CRYPTO_MCRYPTD is not set +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set +CONFIG_CRYPTO_SIMD=y +CONFIG_CRYPTO_GLUE_HELPER_X86=y +CONFIG_CRYPTO_ENGINE=m + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=y +CONFIG_CRYPTO_GCM=y +# CONFIG_CRYPTO_CHACHA20POLY1305 is not set +# CONFIG_CRYPTO_AEGIS128 is not set +# CONFIG_CRYPTO_AEGIS128L is not set +# CONFIG_CRYPTO_AEGIS256 is not set +# CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 is not set +# CONFIG_CRYPTO_AEGIS128L_AESNI_SSE2 is not set +# CONFIG_CRYPTO_AEGIS256_AESNI_SSE2 is not set +# CONFIG_CRYPTO_MORUS640 is not set +# CONFIG_CRYPTO_MORUS640_SSE2 is not set +# CONFIG_CRYPTO_MORUS1280 is not set +# CONFIG_CRYPTO_MORUS1280_SSE2 is not set +# CONFIG_CRYPTO_MORUS1280_AVX2 is not set +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=y + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CFB is not set +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=y +# CONFIG_CRYPTO_PCBC is not set +CONFIG_CRYPTO_XTS=y +# CONFIG_CRYPTO_KEYWRAP is not set + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=y +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32C_INTEL=y +CONFIG_CRYPTO_CRC32=y +# CONFIG_CRYPTO_CRC32_PCLMUL is not set +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_CRCT10DIF_PCLMUL=y +CONFIG_CRYPTO_GHASH=y +# CONFIG_CRYPTO_POLY1305 is not set +# CONFIG_CRYPTO_POLY1305_X86_64 is not set +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA1_SSSE3=y +# CONFIG_CRYPTO_SHA256_SSSE3 is not set +# CONFIG_CRYPTO_SHA512_SSSE3 is not set +# CONFIG_CRYPTO_SHA1_MB is not set +# CONFIG_CRYPTO_SHA256_MB is not set +# CONFIG_CRYPTO_SHA512_MB is not set +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +# CONFIG_CRYPTO_SHA3 is not set +# CONFIG_CRYPTO_SM3 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set +# CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_AES_X86_64=y +CONFIG_CRYPTO_AES_NI_INTEL=y +# CONFIG_CRYPTO_ANUBIS is not set +CONFIG_CRYPTO_ARC4=y +CONFIG_CRYPTO_BLOWFISH=y +CONFIG_CRYPTO_BLOWFISH_COMMON=y +CONFIG_CRYPTO_BLOWFISH_X86_64=y +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAMELLIA_X86_64 is not set +# CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64 is not set +# CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST5_AVX_X86_64 is not set +# CONFIG_CRYPTO_CAST6 is not set +# CONFIG_CRYPTO_CAST6_AVX_X86_64 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_DES3_EDE_X86_64 is not set +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_CHACHA20 is not set +# CONFIG_CRYPTO_CHACHA20_X86_64 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_SERPENT_SSE2_X86_64 is not set +# CONFIG_CRYPTO_SERPENT_AVX_X86_64 is not set +# CONFIG_CRYPTO_SERPENT_AVX2_X86_64 is not set +# CONFIG_CRYPTO_SM4 is not set +# CONFIG_CRYPTO_SPECK is not set +# CONFIG_CRYPTO_TEA is not set +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_TWOFISH_COMMON=y +CONFIG_CRYPTO_TWOFISH_X86_64=y +CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=y +CONFIG_CRYPTO_TWOFISH_AVX_X86_64=y + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +CONFIG_CRYPTO_LZ4=y +CONFIG_CRYPTO_LZ4HC=y +# CONFIG_CRYPTO_ZSTD is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +# CONFIG_CRYPTO_DRBG_HASH is not set +# CONFIG_CRYPTO_DRBG_CTR is not set +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +# CONFIG_CRYPTO_USER_API_HASH is not set +# CONFIG_CRYPTO_USER_API_SKCIPHER is not set +# CONFIG_CRYPTO_USER_API_RNG is not set +# CONFIG_CRYPTO_USER_API_AEAD is not set +CONFIG_CRYPTO_HASH_INFO=y +CONFIG_CRYPTO_HW=y +# CONFIG_CRYPTO_DEV_PADLOCK is not set +# CONFIG_CRYPTO_DEV_CCP is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXX is not set +# CONFIG_CRYPTO_DEV_QAT_C62X is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set +# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set +# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set +CONFIG_CRYPTO_DEV_VIRTIO=m +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS7_MESSAGE_PARSER=y +CONFIG_PKCS7_TEST_KEY=y +# CONFIG_SIGNED_PE_FILE_VERIFICATION is not set + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set +# CONFIG_SECONDARY_TRUSTED_KEYRING is not set +# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_RATIONAL=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_GENERIC_FIND_FIRST_BIT=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=m +# CONFIG_CRC4 is not set +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +CONFIG_CRC8=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=y +CONFIG_LZ4HC_COMPRESS=y +CONFIG_LZ4_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +# CONFIG_XZ_DEC_POWERPC is not set +# CONFIG_XZ_DEC_IA64 is not set +# CONFIG_XZ_DEC_ARM is not set +# CONFIG_XZ_DEC_ARMTHUMB is not set +# CONFIG_XZ_DEC_SPARC is not set +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=y +CONFIG_TEXTSEARCH_BM=y +CONFIG_TEXTSEARCH_FSM=y +CONFIG_INTERVAL_TREE=y +CONFIG_RADIX_TREE_MULTIORDER=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_DMA_DIRECT_OPS=y +CONFIG_SWIOTLB=y +CONFIG_SGL_ALLOC=y +CONFIG_IOMMU_HELPER=y +CONFIG_CHECK_SIGNATURE=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_CLZ_TAB=y +CONFIG_CORDIC=m +# CONFIG_DDR is not set +# CONFIG_IRQ_POLL is not set +CONFIG_MPILIB=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_FONT_SUPPORT=y +CONFIG_FONT_8x16=y +CONFIG_FONT_AUTOSELECT=y +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_SG_CHAIN=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_HAS_UACCESS_MCSAFE=y +CONFIG_SBITMAP=y +# CONFIG_STRING_SELFTEST is not set + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +# CONFIG_BOOT_PRINTK_DELAY is not set +CONFIG_DYNAMIC_DEBUG=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +# CONFIG_DEBUG_INFO_DWARF4 is not set +# CONFIG_GDB_SCRIPTS is not set +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=2048 +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_READABLE_ASM is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_PAGE_OWNER is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +# CONFIG_DEBUG_SECTION_MISMATCH is not set +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_FRAME_POINTER=y +CONFIG_STACK_VALIDATION=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_DEBUG_KERNEL=y + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_RODATA_TEST is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_DEBUG_SLAB is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_VM is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_DEBUG_STACKOVERFLOW=y +CONFIG_DEBUG_STACKOVERFLOW=y +CONFIG_HAVE_ARCH_KASAN=y +# CONFIG_KASAN is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +# CONFIG_DEBUG_SHIRQ is not set + +# +# Debug Lockups and Hangs +# +# CONFIG_SOFTLOCKUP_DETECTOR is not set +CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y +# CONFIG_HARDLOCKUP_DETECTOR is not set +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_PANIC_ON_OOPS is not set +CONFIG_PANIC_ON_OOPS_VALUE=0 +CONFIG_PANIC_TIMEOUT=10 +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_STACK_END_CHECK=y +# CONFIG_DEBUG_TIMEKEEPING is not set +CONFIG_DEBUG_PREEMPT=y + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +CONFIG_DEBUG_ATOMIC_SLEEP=y +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PI_LIST is not set +# CONFIG_DEBUG_SG is not set +CONFIG_DEBUG_NOTIFIERS=y +CONFIG_DEBUG_CREDENTIALS=y + +# +# RCU Debugging +# +# CONFIG_RCU_PERF_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=21 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +# CONFIG_FAULT_INJECTION is not set +CONFIG_LATENCYTOP=y +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_FENTRY=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACING_SUPPORT=y +# CONFIG_FTRACE is not set +CONFIG_PROVIDE_OHCI1394_DMA_INIT=y +# CONFIG_DMA_API_DEBUG is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_SORT is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_BITFIELD is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_OVERFLOW is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +# CONFIG_MEMTEST is not set +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_UBSAN is not set +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_EARLY_PRINTK_USB=y +CONFIG_X86_VERBOSE_BOOTUP=y +CONFIG_EARLY_PRINTK=y +CONFIG_EARLY_PRINTK_DBGP=y +CONFIG_EARLY_PRINTK_EFI=y +# CONFIG_EARLY_PRINTK_USB_XDBC is not set +# CONFIG_X86_PTDUMP is not set +# CONFIG_EFI_PGT_DUMP is not set +# CONFIG_DEBUG_WX is not set +CONFIG_DOUBLEFAULT=y +# CONFIG_DEBUG_TLBFLUSH is not set +# CONFIG_IOMMU_DEBUG is not set +CONFIG_HAVE_MMIOTRACE_SUPPORT=y +CONFIG_IO_DELAY_TYPE_0X80=0 +CONFIG_IO_DELAY_TYPE_0XED=1 +CONFIG_IO_DELAY_TYPE_UDELAY=2 +CONFIG_IO_DELAY_TYPE_NONE=3 +CONFIG_IO_DELAY_0X80=y +# CONFIG_IO_DELAY_0XED is not set +# CONFIG_IO_DELAY_UDELAY is not set +# CONFIG_IO_DELAY_NONE is not set +CONFIG_DEFAULT_IO_DELAY_TYPE=0 +CONFIG_DEBUG_BOOT_PARAMS=y +# CONFIG_CPA_DEBUG is not set +CONFIG_OPTIMIZE_INLINING=y +# CONFIG_DEBUG_ENTRY is not set +# CONFIG_DEBUG_NMI_SELFTEST is not set +CONFIG_X86_DEBUG_FPU=y +# CONFIG_PUNIT_ATOM_DEBUG is not set +# CONFIG_UNWINDER_ORC is not set +CONFIG_UNWINDER_FRAME_POINTER=y +# CONFIG_UNWINDER_GUESS is not set diff --git a/arch/x86/configs/test_defconfig b/arch/x86/configs/test_defconfig new file mode 100644 index 0000000000000..8874941c42407 --- /dev/null +++ b/arch/x86/configs/test_defconfig @@ -0,0 +1,6498 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/x86_64 4.19.0 Kernel Configuration +# + +# +# Compiler: x86_64-poky-linux-gcc (GCC) 7.3.0 +# +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=70300 +CONFIG_CLANG_VERSION=0 +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_EXTABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_LZ4=y +# CONFIG_KERNEL_GZIP is not set +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set +CONFIG_KERNEL_XZ=y +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set +CONFIG_DEFAULT_HOSTNAME="CannotLeaveINTEL" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +# CONFIG_CROSS_MEMORY_ATTACH is not set +# CONFIG_USELIB is not set +# CONFIG_AUDIT is not set +CONFIG_HAVE_ARCH_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_PENDING_IRQ=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y +CONFIG_GENERIC_IRQ_RESERVATION_MODE=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +CONFIG_CLOCKSOURCE_WATCHDOG=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y +CONFIG_GENERIC_CMOS_UPDATE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +# CONFIG_NO_HZ_IDLE is not set +CONFIG_NO_HZ_FULL=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set + +# +# CPU/Task time and stats accounting +# +CONFIG_VIRT_CPU_ACCOUNTING=y +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +CONFIG_RCU_EXPERT=y +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_CONTEXT_TRACKING=y +# CONFIG_CONTEXT_TRACKING_FORCE is not set +CONFIG_RCU_FANOUT=32 +CONFIG_RCU_FANOUT_LEAF=16 +CONFIG_RCU_FAST_NO_HZ=y +CONFIG_RCU_NOCB_CPU=y +# CONFIG_IKCONFIG is not set +CONFIG_LOG_BUF_SHIFT=17 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 +CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y +CONFIG_ARCH_SUPPORTS_INT128=y +# CONFIG_NUMA_BALANCING is not set +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +# CONFIG_DEBUG_BLK_CGROUP is not set +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +# CONFIG_CGROUP_RDMA is not set +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_HAVE_PCSPKR_PLATFORM=y +CONFIG_BPF=y +CONFIG_EXPERT=y +# CONFIG_UID16 is not set +CONFIG_MULTIUSER=y +CONFIG_SGETMASK_SYSCALL=y +# CONFIG_SYSFS_SYSCALL is not set +CONFIG_SYSCTL_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PRINTK_NMI=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +# CONFIG_PCSPKR_PLATFORM is not set +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT_ALWAYS_ON=y +# CONFIG_USERFAULTFD is not set +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_RSEQ=y +# CONFIG_DEBUG_RSEQ is not set +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y +# CONFIG_PC104 is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set +CONFIG_SLAB_MERGE_DEFAULT=y +CONFIG_SLAB_FREELIST_RANDOM=y +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y +CONFIG_64BIT=y +CONFIG_X86_64=y +CONFIG_X86=y +CONFIG_INSTRUCTION_DECODER=y +CONFIG_OUTPUT_FORMAT="elf64-x86-64" +CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_MMU=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=28 +CONFIG_ARCH_MMAP_RND_BITS_MAX=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_GENERIC_ISA_DMA=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ARCH_HAS_CPU_RELAX=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_FILTER_PGPROT=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y +CONFIG_ZONE_DMA32=y +CONFIG_AUDIT_ARCH=y +CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_HAVE_INTEL_TXT=y +CONFIG_X86_64_SMP=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_CC_HAS_SANE_STACKPROTECTOR=y + +# +# Processor type and features +# +CONFIG_ZONE_DMA=y +CONFIG_SMP=y +CONFIG_X86_FEATURE_NAMES=y +CONFIG_X86_X2APIC=y +CONFIG_X86_MPPARSE=y +# CONFIG_GOLDFISH is not set +CONFIG_RETPOLINE=y +CONFIG_INTEL_RDT=y +# CONFIG_X86_EXTENDED_PLATFORM is not set +CONFIG_X86_INTEL_LPSS=y +# CONFIG_X86_AMD_PLATFORM_DEVICE is not set +CONFIG_IOSF_MBI=y +# CONFIG_IOSF_MBI_DEBUG is not set +CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y +# CONFIG_SCHED_OMIT_FRAME_POINTER is not set +CONFIG_HYPERVISOR_GUEST=y +# CONFIG_PARAVIRT is not set +# CONFIG_JAILHOUSE_GUEST is not set +CONFIG_NO_BOOTMEM=y +# CONFIG_MK8 is not set +# CONFIG_MPSC is not set +CONFIG_MCORE2=y +# CONFIG_MATOM is not set +# CONFIG_GENERIC_CPU is not set +CONFIG_X86_INTERNODE_CACHE_SHIFT=6 +CONFIG_X86_L1_CACHE_SHIFT=6 +CONFIG_X86_INTEL_USERCOPY=y +CONFIG_X86_USE_PPRO_CHECKSUM=y +CONFIG_X86_P6_NOP=y +CONFIG_X86_TSC=y +CONFIG_X86_CMPXCHG64=y +CONFIG_X86_CMOV=y +CONFIG_X86_MINIMUM_CPU_FAMILY=64 +CONFIG_X86_DEBUGCTLMSR=y +CONFIG_PROCESSOR_SELECT=y +CONFIG_CPU_SUP_INTEL=y +CONFIG_CPU_SUP_AMD=y +# CONFIG_CPU_SUP_CENTAUR is not set +CONFIG_HPET_TIMER=y +CONFIG_HPET_EMULATE_RTC=y +CONFIG_DMI=y +# CONFIG_GART_IOMMU is not set +# CONFIG_CALGARY_IOMMU is not set +# CONFIG_MAXSMP is not set +CONFIG_NR_CPUS_RANGE_BEGIN=2 +CONFIG_NR_CPUS_RANGE_END=512 +CONFIG_NR_CPUS_DEFAULT=64 +CONFIG_NR_CPUS=320 +CONFIG_SCHED_SMT=y +CONFIG_SCHED_MC=y +CONFIG_SCHED_MC_PRIO=y +CONFIG_X86_LOCAL_APIC=y +CONFIG_X86_IO_APIC=y +# CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set +CONFIG_X86_MCE=y +CONFIG_X86_MCELOG_LEGACY=y +CONFIG_X86_MCE_INTEL=y +CONFIG_X86_MCE_AMD=y +CONFIG_X86_MCE_THRESHOLD=y +CONFIG_X86_MCE_INJECT=m +CONFIG_X86_THERMAL_VECTOR=y + +# +# Performance monitoring +# +CONFIG_PERF_EVENTS_INTEL_UNCORE=y +CONFIG_PERF_EVENTS_INTEL_RAPL=y +CONFIG_PERF_EVENTS_INTEL_CSTATE=y +CONFIG_PERF_EVENTS_AMD_POWER=m +# CONFIG_X86_VSYSCALL_EMULATION is not set +# CONFIG_I8K is not set +CONFIG_MICROCODE=y +CONFIG_MICROCODE_INTEL=y +# CONFIG_MICROCODE_AMD is not set +CONFIG_MICROCODE_OLD_INTERFACE=y +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +# CONFIG_X86_5LEVEL is not set +CONFIG_X86_DIRECT_GBPAGES=y +# CONFIG_X86_CPA_STATISTICS is not set +CONFIG_ARCH_HAS_MEM_ENCRYPT=y +# CONFIG_AMD_MEM_ENCRYPT is not set +CONFIG_NUMA=y +# CONFIG_AMD_NUMA is not set +CONFIG_X86_64_ACPI_NUMA=y +CONFIG_NODES_SPAN_OTHER_NODES=y +# CONFIG_NUMA_EMU is not set +CONFIG_NODES_SHIFT=2 +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +# CONFIG_ARCH_MEMORY_PROBE is not set +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +# CONFIG_X86_PMEM_LEGACY is not set +CONFIG_X86_CHECK_BIOS_CORRUPTION=y +# CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK is not set +CONFIG_X86_RESERVE_LOW=64 +CONFIG_MTRR=y +CONFIG_MTRR_SANITIZER=y +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=0 +CONFIG_X86_PAT=y +CONFIG_ARCH_USES_PG_UNCACHED=y +CONFIG_ARCH_RANDOM=y +CONFIG_X86_SMAP=y +CONFIG_X86_INTEL_UMIP=y +# CONFIG_X86_INTEL_MPX is not set +# CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS is not set +CONFIG_EFI=y +CONFIG_EFI_STUB=y +# CONFIG_EFI_MIXED is not set +CONFIG_SECCOMP=y +# CONFIG_HZ_100 is not set +# CONFIG_HZ_250 is not set +# CONFIG_HZ_300 is not set +CONFIG_HZ_1000=y +CONFIG_HZ=1000 +CONFIG_SCHED_HRTICK=y +# CONFIG_KEXEC is not set +# CONFIG_KEXEC_FILE is not set +# CONFIG_CRASH_DUMP is not set +CONFIG_PHYSICAL_START=0x100000 +CONFIG_RELOCATABLE=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_X86_NEED_RELOCS=y +CONFIG_PHYSICAL_ALIGN=0x1000000 +CONFIG_DYNAMIC_MEMORY_LAYOUT=y +CONFIG_RANDOMIZE_MEMORY=y +CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0x1 +CONFIG_HOTPLUG_CPU=y +# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set +# CONFIG_DEBUG_HOTPLUG_CPU0 is not set +# CONFIG_COMPAT_VDSO is not set +# CONFIG_LEGACY_VSYSCALL_EMULATE is not set +CONFIG_LEGACY_VSYSCALL_NONE=y +# CONFIG_CMDLINE_BOOL is not set +# CONFIG_MODIFY_LDT_SYSCALL is not set +CONFIG_HAVE_LIVEPATCH=y +CONFIG_ARCH_HAS_ADD_PAGES=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y + +# +# Power management and ACPI options +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +# CONFIG_SUSPEND_SKIP_SYNC is not set +# CONFIG_HIBERNATION is not set +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +CONFIG_PM_DEBUG=y +CONFIG_PM_ADVANCED_DEBUG=y +# CONFIG_PM_TEST_SUSPEND is not set +CONFIG_PM_SLEEP_DEBUG=y +# CONFIG_DPM_WATCHDOG is not set +# CONFIG_PM_TRACE_RTC is not set +CONFIG_PM_CLK=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y +CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_LPIT=y +CONFIG_ACPI_SLEEP=y +# CONFIG_ACPI_PROCFS_POWER is not set +# CONFIG_ACPI_REV_OVERRIDE_POSSIBLE is not set +CONFIG_ACPI_EC_DEBUGFS=y +CONFIG_ACPI_AC=m +CONFIG_ACPI_BATTERY=m +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=y +CONFIG_ACPI_FAN=y +CONFIG_ACPI_TAD=y +# CONFIG_ACPI_DOCK is not set +CONFIG_ACPI_CPU_FREQ_PSS=y +CONFIG_ACPI_PROCESSOR_CSTATE=y +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_PROCESSOR_AGGREGATOR=y +CONFIG_ACPI_THERMAL=y +CONFIG_ACPI_NUMA=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_TABLE_UPGRADE is not set +CONFIG_ACPI_DEBUG=y +# CONFIG_ACPI_PCI_SLOT is not set +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HOTPLUG_IOAPIC=y +# CONFIG_ACPI_SBS is not set +CONFIG_ACPI_HED=m +# CONFIG_ACPI_CUSTOM_METHOD is not set +# CONFIG_ACPI_BGRT is not set +# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set +CONFIG_ACPI_NFIT=m +CONFIG_HAVE_ACPI_APEI=y +CONFIG_HAVE_ACPI_APEI_NMI=y +CONFIG_ACPI_APEI=y +# CONFIG_ACPI_APEI_GHES is not set +# CONFIG_ACPI_APEI_EINJ is not set +# CONFIG_ACPI_APEI_ERST_DEBUG is not set +# CONFIG_DPTF_POWER is not set +# CONFIG_PMIC_OPREGION is not set +# CONFIG_ACPI_CONFIGFS is not set +CONFIG_X86_PM_TIMER=y +# CONFIG_SFI is not set + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +CONFIG_X86_INTEL_PSTATE=y +# CONFIG_X86_PCC_CPUFREQ is not set +CONFIG_X86_ACPI_CPUFREQ=y +# CONFIG_X86_ACPI_CPUFREQ_CPB is not set +# CONFIG_X86_POWERNOW_K8 is not set +CONFIG_X86_AMD_FREQ_SENSITIVITY=m +# CONFIG_X86_SPEEDSTEP_CENTRINO is not set +# CONFIG_X86_P4_CLOCKMOD is not set + +# +# shared options +# + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_INTEL_IDLE=y + +# +# Bus options (PCI etc.) +# +CONFIG_PCI=y +CONFIG_PCI_DIRECT=y +CONFIG_PCI_MMCONFIG=y +CONFIG_PCI_DOMAINS=y +CONFIG_MMCONF_FAM10H=y +# CONFIG_PCI_CNB20LE_QUIRK is not set +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +# CONFIG_PCIEAER is not set +CONFIG_PCIEASPM=y +# CONFIG_PCIEASPM_DEBUG is not set +# CONFIG_PCIEASPM_DEFAULT is not set +CONFIG_PCIEASPM_POWERSAVE=y +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +# CONFIG_PCIE_PTM is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=y +# CONFIG_PCI_PF_STUB is not set +CONFIG_PCI_ATS=y +CONFIG_PCI_LOCKLESS_CONFIG=y +CONFIG_PCI_IOV=y +# CONFIG_PCI_PRI is not set +# CONFIG_PCI_PASID is not set +CONFIG_PCI_LABEL=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +# CONFIG_HOTPLUG_PCI_CPCI is not set +# CONFIG_HOTPLUG_PCI_SHPC is not set + +# +# PCI controller drivers +# + +# +# Cadence PCIe controllers support +# +# CONFIG_VMD is not set + +# +# DesignWare PCI Core Support +# +# CONFIG_PCIE_DW_PLAT_HOST is not set +# CONFIG_PCIE_DW_PLAT_EP is not set + +# +# PCI Endpoint +# +CONFIG_PCI_ENDPOINT=y +CONFIG_PCI_ENDPOINT_CONFIGFS=y +# CONFIG_PCI_EPF_TEST is not set + +# +# PCI switch controller drivers +# +CONFIG_PCI_SW_SWITCHTEC=m +# CONFIG_ISA_BUS is not set +CONFIG_ISA_DMA_API=y +CONFIG_AMD_NB=y +# CONFIG_PCCARD is not set +# CONFIG_RAPIDIO is not set +# CONFIG_X86_SYSFB is not set + +# +# Binary Emulations +# +CONFIG_IA32_EMULATION=y +# CONFIG_IA32_AOUT is not set +# CONFIG_X86_X32 is not set +CONFIG_COMPAT_32=y +CONFIG_COMPAT=y +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y +CONFIG_SYSVIPC_COMPAT=y +CONFIG_X86_DEV_DMA_OPS=y +CONFIG_HAVE_GENERIC_GUP=y + +# +# Firmware Drivers +# +# CONFIG_EDD is not set +CONFIG_FIRMWARE_MEMMAP=y +# CONFIG_DELL_RBU is not set +CONFIG_DCDBAS=m +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y +# CONFIG_ISCSI_IBFT_FIND is not set +CONFIG_FW_CFG_SYSFS=m +CONFIG_FW_CFG_SYSFS_CMDLINE=y +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_VARS=y +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=y +# CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE is not set +# CONFIG_EFI_FAKE_MEMMAP is not set +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_BOOTLOADER_CONTROL=y +CONFIG_EFI_CAPSULE_LOADER=y +# CONFIG_EFI_TEST is not set +CONFIG_APPLE_PROPERTIES=y +# CONFIG_RESET_ATTACK_MITIGATION is not set +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_X86=y +CONFIG_EFI_DEV_PATH_PARSER=y + +# +# Tegra firmware driver +# +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_KVM_ASYNC_PF=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_KVM_COMPAT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_KVM_INTEL=y +CONFIG_KVM_AMD=m +CONFIG_KVM_MMU_AUDIT=y +CONFIG_VHOST_NET=y +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST=y +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# General architecture-dependent options +# +CONFIG_HOTPLUG_SMT=y +# CONFIG_OPROFILE is not set +CONFIG_HAVE_OPROFILE=y +CONFIG_OPROFILE_NMI_TIMER=y +# CONFIG_KPROBES is not set +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_OPTPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y +CONFIG_HAVE_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_CC_HAS_STACKPROTECTOR_NONE=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_ARCH_SOFT_DIRTY=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=28 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 +CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y +CONFIG_HAVE_COPY_THREAD_TLS=y +CONFIG_HAVE_STACK_VALIDATION=y +CONFIG_HAVE_RELIABLE_STACKTRACE=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_ARCH_HAS_REFCOUNT=y +CONFIG_REFCOUNT_FULL=y +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +CONFIG_PLUGIN_HOSTCC="" +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_MODULE_SIG=y +CONFIG_MODULE_SIG_FORCE=y +CONFIG_MODULE_SIG_ALL=y +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +# CONFIG_MODULE_SIG_SHA256 is not set +# CONFIG_MODULE_SIG_SHA384 is not set +CONFIG_MODULE_SIG_SHA512=y +CONFIG_MODULE_SIG_HASH="sha512" +# CONFIG_MODULE_COMPRESS is not set +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +# CONFIG_BLK_CMDLINE_PARSER is not set +# CONFIG_BLK_WBT is not set +# CONFIG_BLK_CGROUP_IOLATENCY is not set +# CONFIG_BLK_DEBUG_FS is not set +CONFIG_BLK_SED_OPAL=y + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_BSD_DISKLABEL is not set +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_MQ_RDMA=y + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_CFQ_GROUP_IOSCHED=y +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=y +CONFIG_COREDUMP=y + +# +# Memory Management options +# +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_ARCH_DISCARD_MEMBLOCK=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_HAVE_BOOTMEM_INFO_NODE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_SPARSE=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +# CONFIG_BALLOON_COMPACTION is not set +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_BOUNCE=y +CONFIG_VIRT_TO_BUS=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +# CONFIG_MEMORY_FAILURE is not set +CONFIG_TRANSPARENT_HUGEPAGE=y +# CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS is not set +CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y +CONFIG_ARCH_WANTS_THP_SWAP=y +CONFIG_THP_SWAP=y +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +# CONFIG_CLEANCACHE is not set +# CONFIG_FRONTSWAP is not set +# CONFIG_CMA is not set +# CONFIG_MEM_SOFT_DIRTY is not set +# CONFIG_ZPOOL is not set +# CONFIG_ZBUD is not set +CONFIG_ZSMALLOC=m +# CONFIG_PGTABLE_MAPPING is not set +# CONFIG_ZSMALLOC_STAT is not set +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +# CONFIG_IDLE_PAGE_TRACKING is not set +CONFIG_ARCH_HAS_ZONE_DEVICE=y +CONFIG_ZONE_DEVICE=y +CONFIG_ARCH_HAS_HMM=y +CONFIG_DEV_PAGEMAP_OPS=y +# CONFIG_HMM_MIRROR is not set +# CONFIG_DEVICE_PRIVATE is not set +# CONFIG_DEVICE_PUBLIC is not set +CONFIG_FRAME_VECTOR=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_BENCHMARK is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_NET=y +CONFIG_COMPAT_NETLINK_MESSAGES=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +# CONFIG_TLS_DEVICE is not set +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +# CONFIG_XFRM_INTERFACE is not set +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +# CONFIG_XDP_SOCKETS is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +# CONFIG_IP_FIB_TRIE_STATS is not set +CONFIG_IP_MULTIPLE_TABLES=y +# CONFIG_IP_ROUTE_MULTIPATH is not set +# CONFIG_IP_ROUTE_VERBOSE is not set +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +# CONFIG_NET_IPGRE_BROADCAST is not set +# CONFIG_IP_MROUTE is not set +CONFIG_SYN_COOKIES=y +# CONFIG_NET_IPVTI is not set +CONFIG_NET_UDP_TUNNEL=m +# CONFIG_NET_FOU is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_XFRM_MODE_TRANSPORT=m +CONFIG_INET_XFRM_MODE_TUNNEL=m +CONFIG_INET_XFRM_MODE_BEET=m +# CONFIG_INET_DIAG is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=m +# CONFIG_TCP_CONG_WESTWOOD is not set +# CONFIG_TCP_CONG_HTCP is not set +# CONFIG_TCP_CONG_HSTCP is not set +# CONFIG_TCP_CONG_HYBLA is not set +# CONFIG_TCP_CONG_VEGAS is not set +# CONFIG_TCP_CONG_NV is not set +# CONFIG_TCP_CONG_SCALABLE is not set +# CONFIG_TCP_CONG_LP is not set +# CONFIG_TCP_CONG_VENO is not set +# CONFIG_TCP_CONG_YEAH is not set +# CONFIG_TCP_CONG_ILLINOIS is not set +CONFIG_TCP_CONG_DCTCP=m +# CONFIG_TCP_CONG_CDG is not set +CONFIG_TCP_CONG_BBR=y +CONFIG_DEFAULT_BBR=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="bbr" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_INET6_XFRM_MODE_TRANSPORT=m +CONFIG_INET6_XFRM_MODE_TUNNEL=m +CONFIG_INET6_XFRM_MODE_BEET=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +# CONFIG_IPV6_VTI is not set +CONFIG_IPV6_SIT=m +# CONFIG_IPV6_SIT_6RD is not set +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +# CONFIG_IPV6_MULTIPLE_TABLES is not set +# CONFIG_IPV6_MROUTE is not set +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +CONFIG_NETLABEL=y +# CONFIG_NETWORK_SECMARK is not set +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=y + +# +# Core Netfilter Configuration +# +# CONFIG_NETFILTER_INGRESS is not set +CONFIG_NETFILTER_NETLINK=y +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_NETLINK_ACCT=y +CONFIG_NETFILTER_NETLINK_QUEUE=y +CONFIG_NETFILTER_NETLINK_LOG=y +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_COMMON=m +# CONFIG_NF_LOG_NETDEV is not set +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_ZONES=y +# CONFIG_NF_CONNTRACK_PROCFS is not set +CONFIG_NF_CONNTRACK_EVENTS=y +# CONFIG_NF_CONNTRACK_TIMEOUT is not set +# CONFIG_NF_CONNTRACK_TIMESTAMP is not set +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=m +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +# CONFIG_NF_CONNTRACK_AMANDA is not set +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +# CONFIG_NETFILTER_NETLINK_GLUE_CT is not set +CONFIG_NF_NAT=m +CONFIG_NF_NAT_NEEDED=y +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_TABLES=m +# CONFIG_NF_TABLES_SET is not set +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_COUNTER=m +# CONFIG_NFT_CONNLIMIT is not set +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +# CONFIG_NFT_MASQ is not set +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +# CONFIG_NFT_TUNNEL is not set +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +# CONFIG_NFT_SOCKET is not set +# CONFIG_NFT_OSF is not set +# CONFIG_NFT_TPROXY is not set +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=y +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +# CONFIG_NETFILTER_XT_TARGET_LED is not set +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +# CONFIG_NETFILTER_XT_MATCH_CGROUP is not set +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=y +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=y +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=15 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +# CONFIG_IP_VS_MH is not set +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=m +# CONFIG_NF_LOG_ARP is not set +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=y +CONFIG_NF_NAT_IPV4=m +CONFIG_NF_NAT_MASQUERADE_IPV4=y +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NFT_REDIR_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PROTO_GRE=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +# CONFIG_IP_NF_TARGET_SYNPROXY is not set +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +# CONFIG_NFT_CHAIN_NAT_IPV6 is not set +CONFIG_NFT_REDIR_IPV6=m +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=y +CONFIG_NF_LOG_IPV6=m +CONFIG_NF_NAT_IPV6=m +CONFIG_NF_NAT_MASQUERADE_IPV6=y +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_MATCH_SRH=m +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +# CONFIG_IP6_NF_TARGET_SYNPROXY is not set +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +# CONFIG_IP6_NF_SECURITY is not set +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +# CONFIG_IP6_NF_TARGET_NPT is not set +CONFIG_NF_DEFRAG_IPV6=m +# CONFIG_NF_TABLES_BRIDGE is not set +CONFIG_BRIDGE_NF_EBTABLES=y +CONFIG_BRIDGE_EBT_BROUTE=y +CONFIG_BRIDGE_EBT_T_FILTER=y +CONFIG_BRIDGE_EBT_T_NAT=y +CONFIG_BRIDGE_EBT_802_3=y +CONFIG_BRIDGE_EBT_AMONG=y +CONFIG_BRIDGE_EBT_ARP=y +CONFIG_BRIDGE_EBT_IP=y +CONFIG_BRIDGE_EBT_IP6=y +CONFIG_BRIDGE_EBT_LIMIT=y +CONFIG_BRIDGE_EBT_MARK=y +CONFIG_BRIDGE_EBT_PKTTYPE=y +CONFIG_BRIDGE_EBT_STP=y +CONFIG_BRIDGE_EBT_VLAN=y +CONFIG_BRIDGE_EBT_ARPREPLY=y +CONFIG_BRIDGE_EBT_DNAT=y +CONFIG_BRIDGE_EBT_MARK_T=y +CONFIG_BRIDGE_EBT_REDIRECT=y +CONFIG_BRIDGE_EBT_SNAT=y +CONFIG_BRIDGE_EBT_LOG=y +CONFIG_BRIDGE_EBT_NFLOG=y +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_L2TP is not set +CONFIG_STP=y +CONFIG_BRIDGE=y +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_HAVE_NET_DSA=y +CONFIG_NET_DSA=m +# CONFIG_NET_DSA_LEGACY is not set +CONFIG_NET_DSA_TAG_BRCM=y +CONFIG_NET_DSA_TAG_BRCM_PREPEND=y +CONFIG_NET_DSA_TAG_DSA=y +CONFIG_NET_DSA_TAG_EDSA=y +CONFIG_VLAN_8021Q=m +# CONFIG_VLAN_8021Q_GVRP is not set +# CONFIG_VLAN_8021Q_MVRP is not set +# CONFIG_DECNET is not set +CONFIG_LLC=y +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +# CONFIG_6LOWPAN is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_CBQ=y +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +# CONFIG_NET_SCH_PRIO is not set +CONFIG_NET_SCH_MULTIQ=y +CONFIG_NET_SCH_RED=m +# CONFIG_NET_SCH_SFB is not set +CONFIG_NET_SCH_SFQ=m +# CONFIG_NET_SCH_TEQL is not set +# CONFIG_NET_SCH_TBF is not set +CONFIG_NET_SCH_CBS=m +# CONFIG_NET_SCH_ETF is not set +# CONFIG_NET_SCH_GRED is not set +# CONFIG_NET_SCH_DSMARK is not set +# CONFIG_NET_SCH_NETEM is not set +# CONFIG_NET_SCH_DRR is not set +# CONFIG_NET_SCH_MQPRIO is not set +# CONFIG_NET_SCH_SKBPRIO is not set +# CONFIG_NET_SCH_CHOKE is not set +# CONFIG_NET_SCH_QFQ is not set +CONFIG_NET_SCH_CODEL=y +CONFIG_NET_SCH_FQ_CODEL=y +# CONFIG_NET_SCH_CAKE is not set +CONFIG_NET_SCH_FQ=y +# CONFIG_NET_SCH_HHF is not set +# CONFIG_NET_SCH_PIE is not set +CONFIG_NET_SCH_INGRESS=m +# CONFIG_NET_SCH_PLUG is not set +# CONFIG_NET_SCH_DEFAULT is not set + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +# CONFIG_CLS_U32_PERF is not set +# CONFIG_CLS_U32_MARK is not set +# CONFIG_NET_CLS_RSVP is not set +# CONFIG_NET_CLS_RSVP6 is not set +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=y +# CONFIG_NET_CLS_FLOWER is not set +# CONFIG_NET_CLS_MATCHALL is not set +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +# CONFIG_NET_EMATCH_CMP is not set +# CONFIG_NET_EMATCH_NBYTE is not set +# CONFIG_NET_EMATCH_U32 is not set +# CONFIG_NET_EMATCH_META is not set +# CONFIG_NET_EMATCH_TEXT is not set +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_EMATCH_IPT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +# CONFIG_GACT_PROB is not set +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_CONNMARK=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +# CONFIG_NET_CLS_IND is not set +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=m +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VMWARE_VMCI_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +# CONFIG_NETLINK_DIAG is not set +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=m +# CONFIG_MPLS_ROUTING is not set +CONFIG_NET_NSH=m +# CONFIG_HSR is not set +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_NET_NCSI is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_JIT=y +# CONFIG_BPF_STREAM_PARSER is not set +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NET_DROP_MONITOR is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +CONFIG_BT=m +CONFIG_BT_BREDR=y +CONFIG_BT_RFCOMM=m +# CONFIG_BT_RFCOMM_TTY is not set +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_HIDP=m +CONFIG_BT_HS=y +CONFIG_BT_LE=y +# CONFIG_BT_LEDS is not set +# CONFIG_BT_SELFTEST is not set +# CONFIG_BT_DEBUGFS is not set + +# +# Bluetooth device drivers +# +CONFIG_BT_INTEL=m +CONFIG_BT_BCM=m +CONFIG_BT_RTL=m +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y +CONFIG_BT_HCIBTUSB_BCM=y +CONFIG_BT_HCIBTUSB_RTL=y +# CONFIG_BT_HCIBTSDIO is not set +# CONFIG_BT_HCIUART is not set +# CONFIG_BT_HCIBCM203X is not set +CONFIG_BT_HCIBPA10X=m +# CONFIG_BT_HCIBFUSB is not set +# CONFIG_BT_HCIVHCI is not set +# CONFIG_BT_MRVL is not set +# CONFIG_BT_ATH3K is not set +# CONFIG_BT_MTKUART is not set +# CONFIG_AF_RXRPC is not set +CONFIG_AF_KCM=m +CONFIG_STREAM_PARSER=m +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WIRELESS_EXT=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_WEXT_PRIV=y +CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_CERTIFICATION_ONUS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +CONFIG_CFG80211_CRDA_SUPPORT=y +# CONFIG_CFG80211_WEXT is not set +CONFIG_LIB80211=m +CONFIG_LIB80211_CRYPT_WEP=m +CONFIG_LIB80211_CRYPT_CCMP=m +# CONFIG_LIB80211_DEBUG is not set +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_MINSTREL_HT=y +CONFIG_MAC80211_RC_MINSTREL_VHT=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +# CONFIG_MAC80211_DEBUGFS is not set +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +# CONFIG_WIMAX is not set +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +# CONFIG_RFKILL_GPIO is not set +CONFIG_NET_9P=m +CONFIG_NET_9P_VIRTIO=m +# CONFIG_NET_9P_RDMA is not set +# CONFIG_NET_9P_DEBUG is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +# CONFIG_CEPH_LIB_USE_DNS_RESOLVER is not set +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m +# CONFIG_LWTUNNEL is not set +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +# CONFIG_NET_DEVLINK is not set +CONFIG_MAY_USE_DEVLINK=y +CONFIG_PAGE_POOL=y +CONFIG_FAILOVER=m +CONFIG_HAVE_EBPF_JIT=y + +# +# Device Drivers +# + +# +# Generic Driver Options +# +# CONFIG_UEVENT_HELPER is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set +CONFIG_WANT_DEV_COREDUMP=y +CONFIG_ALLOW_DEV_COREDUMP=y +CONFIG_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +CONFIG_DEBUG_DEVRES=y +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_SPI=y +CONFIG_REGMAP_IRQ=y +# CONFIG_REGMAP_SDW is not set +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set + +# +# Bus devices +# +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y +# CONFIG_GNSS is not set +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set + +# +# Partition parsers +# + +# +# User Modules And Translation Layers +# +# CONFIG_MTD_BLOCK is not set +# CONFIG_MTD_BLOCK_RO is not set +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=m +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_GEN_PROBE=m +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_NOSWAP=y +# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set +# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set +CONFIG_MTD_CFI_GEOMETRY=y +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_MAP_BANK_WIDTH_8=y +CONFIG_MTD_MAP_BANK_WIDTH_16=y +CONFIG_MTD_MAP_BANK_WIDTH_32=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +CONFIG_MTD_CFI_I4=y +CONFIG_MTD_CFI_I8=y +# CONFIG_MTD_OTP is not set +CONFIG_MTD_CFI_INTELEXT=m +# CONFIG_MTD_CFI_AMDSTD is not set +# CONFIG_MTD_CFI_STAA is not set +CONFIG_MTD_CFI_UTIL=m +CONFIG_MTD_RAM=m +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +CONFIG_MTD_COMPLEX_MAPPINGS=y +# CONFIG_MTD_PHYSMAP is not set +# CONFIG_MTD_SBC_GXX is not set +# CONFIG_MTD_PCI is not set +# CONFIG_MTD_GPIO_ADDR is not set +CONFIG_MTD_INTEL_VR_NOR=m +CONFIG_MTD_PLATRAM=m +# CONFIG_MTD_LATCH_ADDR is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_SPI_NOR is not set +# CONFIG_MTD_UBI is not set +# CONFIG_OF is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +# CONFIG_PARPORT is not set +CONFIG_PNP=y +# CONFIG_PNP_DEBUG_MESSAGES is not set + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_NULL_BLK is not set +# CONFIG_BLK_DEV_FD is not set +CONFIG_CDROM=m +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +CONFIG_ZRAM=m +CONFIG_ZRAM_WRITEBACK=y +# CONFIG_ZRAM_MEMORY_TRACKING is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +CONFIG_BLK_DEV_CRYPTOLOOP=y +# CONFIG_BLK_DEV_DRBD is not set +CONFIG_BLK_DEV_NBD=m +# CONFIG_BLK_DEV_SKD is not set +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_VIRTIO_BLK=y +CONFIG_VIRTIO_BLK_SCSI=y +CONFIG_BLK_DEV_RBD=m +# CONFIG_BLK_DEV_RSXX is not set + +# +# NVME Support +# +CONFIG_NVME_CORE=y +CONFIG_BLK_DEV_NVME=y +CONFIG_NVME_MULTIPATH=y +# CONFIG_NVME_RDMA is not set +# CONFIG_NVME_FC is not set +# CONFIG_NVME_TARGET is not set + +# +# Misc devices +# +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_IBM_ASM is not set +# CONFIG_PHANTOM is not set +# CONFIG_SGI_IOC4 is not set +# CONFIG_TIFM_CORE is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +CONFIG_HP_ILO=m +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +CONFIG_VMWARE_BALLOON=m +CONFIG_USB_SWITCH_FSA9480=m +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +CONFIG_MISC_RTSX=m +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_CB710_CORE is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# CONFIG_SENSORS_LIS3_I2C is not set +# CONFIG_ALTERA_STAPL is not set +# CONFIG_INTEL_MEI is not set +# CONFIG_INTEL_MEI_ME is not set +# CONFIG_INTEL_MEI_TXE is not set +# CONFIG_INTEL_MEI_VIRTIO is not set +CONFIG_VMWARE_VMCI=m + +# +# Intel MIC & related support +# + +# +# Intel MIC Bus Driver +# +# CONFIG_INTEL_MIC_BUS is not set + +# +# SCIF Bus Driver +# +# CONFIG_SCIF_BUS is not set + +# +# VOP Bus Driver +# +# CONFIG_VOP_BUS is not set + +# +# Intel MIC Host Driver +# + +# +# Intel MIC Card Driver +# + +# +# SCIF Driver +# + +# +# Intel MIC Coprocessor State Management (COSM) Drivers +# + +# +# VOP Driver +# +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +CONFIG_MISC_RTSX_PCI=m +CONFIG_MISC_RTSX_USB=m +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_MQ_DEFAULT=y +# CONFIG_SCSI_PROC_FS is not set + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +# CONFIG_CHR_DEV_ST is not set +# CONFIG_CHR_DEV_OSST is not set +CONFIG_BLK_DEV_SR=m +# CONFIG_BLK_DEV_SR_VENDOR is not set +CONFIG_CHR_DEV_SG=y +# CONFIG_CHR_DEV_SCH is not set +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_FC_ATTRS=y +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=y +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +# CONFIG_ISCSI_BOOT_SYSFS is not set +# CONFIG_SCSI_CXGB3_ISCSI is not set +# CONFIG_SCSI_CXGB4_ISCSI is not set +# CONFIG_SCSI_BNX2_ISCSI is not set +# CONFIG_SCSI_BNX2X_FCOE is not set +# CONFIG_BE2ISCSI is not set +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=y +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +# CONFIG_SCSI_AACRAID is not set +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_DPT_I2O is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +CONFIG_MEGARAID_SAS=y +CONFIG_SCSI_MPT3SAS=y +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +CONFIG_SCSI_MPT2SAS=y +CONFIG_SCSI_SMARTPQI=y +CONFIG_SCSI_UFSHCD=m +# CONFIG_SCSI_UFSHCD_PCI is not set +# CONFIG_SCSI_UFSHCD_PLATFORM is not set +# CONFIG_SCSI_HPTIOP is not set +CONFIG_SCSI_BUSLOGIC=y +# CONFIG_SCSI_FLASHPOINT is not set +CONFIG_VMWARE_PVSCSI=y +CONFIG_LIBFC=y +CONFIG_LIBFCOE=m +CONFIG_FCOE=m +CONFIG_FCOE_FNIC=m +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_GDTH is not set +CONFIG_SCSI_ISCI=y +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +# CONFIG_SCSI_QLA_FC is not set +# CONFIG_SCSI_QLA_ISCSI is not set +# CONFIG_SCSI_LPFC is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +# CONFIG_SCSI_DEBUG is not set +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set +CONFIG_SCSI_VIRTIO=y +# CONFIG_SCSI_CHELSIO_FCOE is not set +# CONFIG_SCSI_DH is not set +# CONFIG_SCSI_OSD_INITIATOR is not set +CONFIG_ATA=y +# CONFIG_ATA_VERBOSE_ERROR is not set +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=3 +CONFIG_SATA_AHCI_PLATFORM=y +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=y +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +CONFIG_PATA_SCH=y +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +CONFIG_PATA_MPIIX=y +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +CONFIG_PATA_ACPI=m +CONFIG_ATA_GENERIC=y +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +# CONFIG_MD_AUTODETECT is not set +# CONFIG_MD_LINEAR is not set +CONFIG_MD_RAID0=y +CONFIG_MD_RAID1=y +CONFIG_MD_RAID10=y +CONFIG_MD_RAID456=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +# CONFIG_BCACHE_DEBUG is not set +# CONFIG_BCACHE_CLOSURES_DEBUG is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=y +# CONFIG_DM_MQ_DEFAULT is not set +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +CONFIG_DM_UNSTRIPED=m +CONFIG_DM_CRYPT=y +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +# CONFIG_DM_CACHE is not set +# CONFIG_DM_WRITECACHE is not set +# CONFIG_DM_ERA is not set +CONFIG_DM_MIRROR=m +# CONFIG_DM_LOG_USERSPACE is not set +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +# CONFIG_DM_MULTIPATH_QL is not set +# CONFIG_DM_MULTIPATH_ST is not set +CONFIG_DM_DELAY=m +# CONFIG_DM_UEVENT is not set +# CONFIG_DM_FLAKEY is not set +# CONFIG_DM_VERITY is not set +# CONFIG_DM_SWITCH is not set +# CONFIG_DM_LOG_WRITES is not set +# CONFIG_DM_INTEGRITY is not set +CONFIG_DM_ZONED=m +# CONFIG_TARGET_CORE is not set +CONFIG_FUSION=y +CONFIG_FUSION_SPI=y +# CONFIG_FUSION_FC is not set +CONFIG_FUSION_SAS=y +CONFIG_FUSION_MAX_SGE=128 +# CONFIG_FUSION_CTL is not set +# CONFIG_FUSION_LOGGING is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +# CONFIG_MACINTOSH_DRIVERS is not set +CONFIG_NETDEVICES=y +CONFIG_MII=y +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +# CONFIG_EQUALIZER is not set +# CONFIG_NET_FC is not set +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=y +CONFIG_MACVTAP=y +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +# CONFIG_GENEVE is not set +# CONFIG_GTP is not set +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_TUN=y +CONFIG_TAP=y +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +# CONFIG_NLMON is not set +CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set + +# +# CAIF transport drivers +# + +# +# Distributed Switch Architecture drivers +# +CONFIG_B53=m +# CONFIG_B53_SPI_DRIVER is not set +# CONFIG_B53_MDIO_DRIVER is not set +# CONFIG_B53_MMAP_DRIVER is not set +# CONFIG_B53_SRAB_DRIVER is not set +# CONFIG_NET_DSA_BCM_SF2 is not set +# CONFIG_NET_DSA_LOOP is not set +# CONFIG_NET_DSA_MT7530 is not set +# CONFIG_MICROCHIP_KSZ is not set +CONFIG_NET_DSA_MV88E6XXX=m +CONFIG_NET_DSA_MV88E6XXX_GLOBAL2=y +# CONFIG_NET_DSA_MV88E6XXX_PTP is not set +# CONFIG_NET_DSA_QCA8K is not set +# CONFIG_NET_DSA_REALTEK_SMI is not set +# CONFIG_NET_DSA_SMSC_LAN9303_I2C is not set +# CONFIG_NET_DSA_SMSC_LAN9303_MDIO is not set +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +CONFIG_NET_VENDOR_AGERE=y +CONFIG_ET131X=m +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +CONFIG_ENA_ETHERNET=m +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_NET_VENDOR_ATHEROS=y +CONFIG_ATL2=m +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_ALX=m +# CONFIG_NET_VENDOR_AURORA is not set +CONFIG_NET_VENDOR_BROADCOM=y +CONFIG_B44=m +CONFIG_B44_PCI_AUTOSELECT=y +CONFIG_B44_PCICORE_AUTOSELECT=y +CONFIG_B44_PCI=y +# CONFIG_BCMGENET is not set +CONFIG_BNX2=m +CONFIG_CNIC=m +CONFIG_TIGON3=m +# CONFIG_TIGON3_HWMON is not set +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +# CONFIG_BNXT_DCB is not set +CONFIG_BNXT_HWMON=y +# CONFIG_NET_VENDOR_BROCADE is not set +CONFIG_NET_VENDOR_CADENCE=y +# CONFIG_MACB is not set +# CONFIG_NET_VENDOR_CAVIUM is not set +# CONFIG_NET_VENDOR_CHELSIO is not set +CONFIG_NET_VENDOR_CISCO=y +CONFIG_ENIC=m +CONFIG_NET_VENDOR_CORTINA=y +# CONFIG_CX_ECAT is not set +# CONFIG_DNET is not set +# CONFIG_NET_VENDOR_DEC is not set +CONFIG_NET_VENDOR_DLINK=y +CONFIG_DL2K=m +CONFIG_SUNDANCE=m +CONFIG_SUNDANCE_MMIO=y +CONFIG_NET_VENDOR_EMULEX=y +CONFIG_BE2NET=m +CONFIG_BE2NET_HWMON=y +CONFIG_BE2NET_BE2=y +CONFIG_BE2NET_BE3=y +CONFIG_BE2NET_LANCER=y +CONFIG_BE2NET_SKYHAWK=y +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_HP is not set +CONFIG_NET_VENDOR_HUAWEI=y +CONFIG_HINIC=m +CONFIG_NET_VENDOR_I825XX=y +CONFIG_NET_VENDOR_INTEL=y +CONFIG_E100=y +# CONFIG_E1000 is not set +# CONFIG_E1000E is not set +# CONFIG_IGB is not set +# CONFIG_IGBVF is not set +CONFIG_IXGB=m +CONFIG_IXGBE=m +# CONFIG_IXGBE_HWMON is not set +CONFIG_IXGBE_DCA=y +# CONFIG_IXGBE_DCB is not set +CONFIG_IXGBEVF=m +CONFIG_I40E=m +CONFIG_I40E_DCB=y +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_FM10K=m +# CONFIG_JME is not set +CONFIG_NET_VENDOR_MARVELL=y +CONFIG_MVMDIO=m +CONFIG_SKGE=m +CONFIG_SKGE_DEBUG=y +CONFIG_SKGE_GENESIS=y +CONFIG_SKY2=m +# CONFIG_SKY2_DEBUG is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_ACCEL=y +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_ESWITCH=y +CONFIG_MLX5_CORE_EN_DCB=y +CONFIG_MLX5_CORE_IPOIB=y +CONFIG_MLX5_EN_IPSEC=y +# CONFIG_MLXSW_CORE is not set +CONFIG_MLXFW=m +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +CONFIG_NET_VENDOR_MICROSEMI=y +# CONFIG_MSCC_OCELOT_SWITCH is not set +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +CONFIG_NET_VENDOR_NETERION=y +# CONFIG_S2IO is not set +# CONFIG_VXGE is not set +CONFIG_NET_VENDOR_NETRONOME=y +# CONFIG_NFP is not set +CONFIG_NET_VENDOR_NI=y +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_ETHOC is not set +CONFIG_NET_VENDOR_PACKET_ENGINES=y +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +CONFIG_8139CP=m +CONFIG_8139TOO=m +CONFIG_8139TOO_PIO=y +CONFIG_8139TOO_TUNE_TWISTER=y +CONFIG_8139TOO_8129=y +CONFIG_8139_OLD_RX_RESET=y +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +CONFIG_SFC=m +CONFIG_SFC_MTD=y +CONFIG_SFC_MCDI_MON=y +CONFIG_SFC_SRIOV=y +CONFIG_SFC_MCDI_LOGGING=y +CONFIG_SFC_FALCON=m +CONFIG_SFC_FALCON_MTD=y +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SMSC is not set +CONFIG_NET_VENDOR_SOCIONEXT=y +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_MDIO_DEVICE=m +CONFIG_MDIO_BUS=m +# CONFIG_MDIO_BCM_UNIMAC is not set +# CONFIG_MDIO_BITBANG is not set +# CONFIG_MDIO_MSCC_MIIM is not set +# CONFIG_MDIO_THUNDER is not set +CONFIG_PHYLINK=m +CONFIG_PHYLIB=m +CONFIG_SWPHY=y +# CONFIG_LED_TRIGGER_PHY is not set + +# +# MII PHY device drivers +# +# CONFIG_SFP is not set +# CONFIG_AMD_PHY is not set +# CONFIG_AQUANTIA_PHY is not set +# CONFIG_ASIX_PHY is not set +# CONFIG_AT803X_PHY is not set +CONFIG_BCM7XXX_PHY=m +# CONFIG_BCM87XX_PHY is not set +CONFIG_BCM_NET_PHYLIB=m +# CONFIG_BROADCOM_PHY is not set +# CONFIG_CICADA_PHY is not set +CONFIG_CORTINA_PHY=m +# CONFIG_DAVICOM_PHY is not set +# CONFIG_DP83822_PHY is not set +# CONFIG_DP83TC811_PHY is not set +# CONFIG_DP83848_PHY is not set +# CONFIG_DP83867_PHY is not set +CONFIG_FIXED_PHY=m +# CONFIG_ICPLUS_PHY is not set +# CONFIG_INTEL_XWAY_PHY is not set +# CONFIG_LSI_ET1011C_PHY is not set +CONFIG_LXT_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=m +# CONFIG_MICREL_PHY is not set +# CONFIG_MICROCHIP_PHY is not set +# CONFIG_MICROCHIP_T1_PHY is not set +# CONFIG_MICROSEMI_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_QSEMI_PHY is not set +CONFIG_REALTEK_PHY=m +# CONFIG_RENESAS_PHY is not set +CONFIG_ROCKCHIP_PHY=m +# CONFIG_SMSC_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_TERANETICS_PHY is not set +# CONFIG_VITESSE_PHY is not set +# CONFIG_XILINX_GMII2RGMII is not set +# CONFIG_MICREL_KS8995MA is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +CONFIG_USB_NET_DRIVERS=m +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +# CONFIG_USB_LAN78XX is not set +CONFIG_USB_USBNET=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_AX88179_178A=m +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +# CONFIG_USB_NET_SR9700 is not set +# CONFIG_USB_NET_SR9800 is not set +# CONFIG_USB_NET_SMSC75XX is not set +# CONFIG_USB_NET_SMSC95XX is not set +# CONFIG_USB_NET_GL620A is not set +# CONFIG_USB_NET_NET1080 is not set +# CONFIG_USB_NET_PLUSB is not set +# CONFIG_USB_NET_MCS7830 is not set +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +CONFIG_USB_BELKIN=y +CONFIG_USB_ARMLINUX=y +# CONFIG_USB_EPSON2888 is not set +# CONFIG_USB_KC2190 is not set +# CONFIG_USB_NET_ZAURUS is not set +# CONFIG_USB_NET_CX82310_ETH is not set +# CONFIG_USB_NET_KALMIA is not set +CONFIG_USB_NET_QMI_WWAN=m +# CONFIG_USB_HSO is not set +# CONFIG_USB_NET_INT51X1 is not set +# CONFIG_USB_IPHETH is not set +CONFIG_USB_SIERRA_NET=m +# CONFIG_USB_VL600 is not set +# CONFIG_USB_NET_CH9200 is not set +CONFIG_WLAN=y +# CONFIG_WIRELESS_WDS is not set +CONFIG_WLAN_VENDOR_ADMTEK=y +# CONFIG_ADM8211 is not set +CONFIG_ATH_COMMON=m +CONFIG_WLAN_VENDOR_ATH=y +# CONFIG_ATH_DEBUG is not set +CONFIG_ATH5K=m +# CONFIG_ATH5K_DEBUG is not set +# CONFIG_ATH5K_TRACER is not set +CONFIG_ATH5K_PCI=y +CONFIG_ATH9K_HW=m +CONFIG_ATH9K_COMMON=m +CONFIG_ATH9K_BTCOEX_SUPPORT=y +CONFIG_ATH9K=m +CONFIG_ATH9K_PCI=y +# CONFIG_ATH9K_AHB is not set +# CONFIG_ATH9K_DEBUGFS is not set +# CONFIG_ATH9K_DYNACK is not set +# CONFIG_ATH9K_WOW is not set +CONFIG_ATH9K_RFKILL=y +# CONFIG_ATH9K_CHANNEL_CONTEXT is not set +CONFIG_ATH9K_PCOEM=y +CONFIG_ATH9K_HTC=m +# CONFIG_ATH9K_HTC_DEBUGFS is not set +# CONFIG_ATH9K_HWRNG is not set +CONFIG_CARL9170=m +CONFIG_CARL9170_LEDS=y +CONFIG_CARL9170_WPC=y +# CONFIG_CARL9170_HWRNG is not set +# CONFIG_ATH6KL is not set +CONFIG_AR5523=m +CONFIG_WIL6210=m +CONFIG_WIL6210_ISR_COR=y +CONFIG_WIL6210_TRACING=y +CONFIG_WIL6210_DEBUGFS=y +CONFIG_ATH10K=m +CONFIG_ATH10K_CE=y +CONFIG_ATH10K_PCI=m +CONFIG_ATH10K_SDIO=m +CONFIG_ATH10K_USB=m +# CONFIG_ATH10K_DEBUG is not set +# CONFIG_ATH10K_DEBUGFS is not set +# CONFIG_ATH10K_TRACING is not set +# CONFIG_WCN36XX is not set +CONFIG_WLAN_VENDOR_ATMEL=y +# CONFIG_ATMEL is not set +# CONFIG_AT76C50X_USB is not set +CONFIG_WLAN_VENDOR_BROADCOM=y +CONFIG_B43=m +CONFIG_B43_BCMA=y +CONFIG_B43_SSB=y +CONFIG_B43_BUSES_BCMA_AND_SSB=y +# CONFIG_B43_BUSES_BCMA is not set +# CONFIG_B43_BUSES_SSB is not set +CONFIG_B43_PCI_AUTOSELECT=y +CONFIG_B43_PCICORE_AUTOSELECT=y +# CONFIG_B43_SDIO is not set +CONFIG_B43_BCMA_PIO=y +CONFIG_B43_PIO=y +CONFIG_B43_PHY_G=y +CONFIG_B43_PHY_N=y +CONFIG_B43_PHY_LP=y +CONFIG_B43_PHY_HT=y +CONFIG_B43_LEDS=y +CONFIG_B43_HWRNG=y +# CONFIG_B43_DEBUG is not set +# CONFIG_B43LEGACY is not set +CONFIG_BRCMUTIL=m +CONFIG_BRCMSMAC=m +CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_PROTO_BCDC=y +CONFIG_BRCMFMAC_PROTO_MSGBUF=y +CONFIG_BRCMFMAC_SDIO=y +CONFIG_BRCMFMAC_USB=y +CONFIG_BRCMFMAC_PCIE=y +# CONFIG_BRCM_TRACING is not set +# CONFIG_BRCMDBG is not set +CONFIG_WLAN_VENDOR_CISCO=y +# CONFIG_AIRO is not set +CONFIG_WLAN_VENDOR_INTEL=y +# CONFIG_IPW2100 is not set +# CONFIG_IPW2200 is not set +# CONFIG_IWL4965 is not set +# CONFIG_IWL3945 is not set +CONFIG_IWLWIFI=m +CONFIG_IWLWIFI_LEDS=y +CONFIG_IWLDVM=m +CONFIG_IWLMVM=m +CONFIG_IWLWIFI_OPMODE_MODULAR=y +# CONFIG_IWLWIFI_BCAST_FILTERING is not set +CONFIG_IWLWIFI_PCIE_RTPM=y + +# +# Debugging Options +# +# CONFIG_IWLWIFI_DEBUG is not set +# CONFIG_IWLWIFI_DEVICE_TRACING is not set +CONFIG_WLAN_VENDOR_INTERSIL=y +# CONFIG_HOSTAP is not set +# CONFIG_HERMES is not set +# CONFIG_P54_COMMON is not set +# CONFIG_PRISM54 is not set +CONFIG_WLAN_VENDOR_MARVELL=y +# CONFIG_LIBERTAS is not set +# CONFIG_LIBERTAS_THINFIRM is not set +# CONFIG_MWIFIEX is not set +# CONFIG_MWL8K is not set +CONFIG_WLAN_VENDOR_MEDIATEK=y +# CONFIG_MT7601U is not set +CONFIG_MT76_CORE=m +CONFIG_MT76x2_COMMON=m +# CONFIG_MT76x0U is not set +CONFIG_MT76x2E=m +# CONFIG_MT76x2U is not set +CONFIG_WLAN_VENDOR_RALINK=y +# CONFIG_RT2X00 is not set +CONFIG_WLAN_VENDOR_REALTEK=y +# CONFIG_RTL8180 is not set +# CONFIG_RTL8187 is not set +# CONFIG_RTL_CARDS is not set +# CONFIG_RTL8XXXU is not set +CONFIG_WLAN_VENDOR_RSI=y +# CONFIG_RSI_91X is not set +CONFIG_WLAN_VENDOR_ST=y +# CONFIG_CW1200 is not set +CONFIG_WLAN_VENDOR_TI=y +# CONFIG_WL1251 is not set +# CONFIG_WL12XX is not set +# CONFIG_WL18XX is not set +# CONFIG_WLCORE is not set +CONFIG_WLAN_VENDOR_ZYDAS=y +# CONFIG_USB_ZD1201 is not set +# CONFIG_ZD1211RW is not set +CONFIG_WLAN_VENDOR_QUANTENNA=y +CONFIG_QTNFMAC=m +CONFIG_QTNFMAC_PEARL_PCIE=m +# CONFIG_MAC80211_HWSIM is not set +CONFIG_USB_NET_RNDIS_WLAN=m + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +CONFIG_VMXNET3=m +# CONFIG_FUJITSU_ES is not set +# CONFIG_THUNDERBOLT_NET is not set +# CONFIG_NETDEVSIM is not set +CONFIG_NET_FAILOVER=m +# CONFIG_ISDN is not set +# CONFIG_NVM is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_LEDS is not set +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_POLLDEV=m +CONFIG_INPUT_SPARSEKMAP=m +CONFIG_INPUT_MATRIXKMAP=m + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADC is not set +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=m +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +CONFIG_KEYBOARD_DLINK_DIR685=m +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +CONFIG_KEYBOARD_LM8323=m +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set +# CONFIG_KEYBOARD_XTKBD is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=m +CONFIG_MOUSE_PS2_ALPS=y +# CONFIG_MOUSE_PS2_BYD is not set +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_LIFEBOOK=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +# CONFIG_MOUSE_PS2_ELANTECH is not set +# CONFIG_MOUSE_PS2_SENTELIC is not set +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_FOCALTECH=y +# CONFIG_MOUSE_PS2_VMMOUSE is not set +CONFIG_MOUSE_PS2_SMBUS=y +# CONFIG_MOUSE_SERIAL is not set +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +# CONFIG_MOUSE_CYAPA is not set +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_I2C=y +# CONFIG_MOUSE_ELAN_I2C_SMBUS is not set +# CONFIG_MOUSE_VSXXXAA is not set +# CONFIG_MOUSE_GPIO is not set +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_PROPERTIES=y +CONFIG_TOUCHSCREEN_ADS7846=m +CONFIG_TOUCHSCREEN_AD7877=m +CONFIG_TOUCHSCREEN_AD7879=m +CONFIG_TOUCHSCREEN_AD7879_I2C=m +CONFIG_TOUCHSCREEN_AD7879_SPI=m +# CONFIG_TOUCHSCREEN_ADC is not set +CONFIG_TOUCHSCREEN_ATMEL_MXT=m +CONFIG_TOUCHSCREEN_ATMEL_MXT_T37=y +CONFIG_TOUCHSCREEN_AUO_PIXCIR=m +CONFIG_TOUCHSCREEN_BU21013=m +# CONFIG_TOUCHSCREEN_BU21029 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 is not set +CONFIG_TOUCHSCREEN_CY8CTMG110=m +CONFIG_TOUCHSCREEN_CYTTSP_CORE=m +CONFIG_TOUCHSCREEN_CYTTSP_I2C=m +CONFIG_TOUCHSCREEN_CYTTSP_SPI=m +CONFIG_TOUCHSCREEN_CYTTSP4_CORE=m +CONFIG_TOUCHSCREEN_CYTTSP4_I2C=m +CONFIG_TOUCHSCREEN_CYTTSP4_SPI=m +CONFIG_TOUCHSCREEN_DYNAPRO=m +CONFIG_TOUCHSCREEN_HAMPSHIRE=m +CONFIG_TOUCHSCREEN_EETI=m +CONFIG_TOUCHSCREEN_EGALAX_SERIAL=m +CONFIG_TOUCHSCREEN_EXC3000=m +CONFIG_TOUCHSCREEN_FUJITSU=m +CONFIG_TOUCHSCREEN_GOODIX=m +CONFIG_TOUCHSCREEN_HIDEEP=m +CONFIG_TOUCHSCREEN_ILI210X=m +CONFIG_TOUCHSCREEN_S6SY761=m +CONFIG_TOUCHSCREEN_GUNZE=m +CONFIG_TOUCHSCREEN_EKTF2127=m +CONFIG_TOUCHSCREEN_ELAN=m +CONFIG_TOUCHSCREEN_ELO=m +CONFIG_TOUCHSCREEN_WACOM_W8001=m +CONFIG_TOUCHSCREEN_WACOM_I2C=m +CONFIG_TOUCHSCREEN_MAX11801=m +CONFIG_TOUCHSCREEN_MCS5000=m +CONFIG_TOUCHSCREEN_MMS114=m +CONFIG_TOUCHSCREEN_MELFAS_MIP4=m +CONFIG_TOUCHSCREEN_MTOUCH=m +CONFIG_TOUCHSCREEN_INEXIO=m +CONFIG_TOUCHSCREEN_MK712=m +CONFIG_TOUCHSCREEN_PENMOUNT=m +CONFIG_TOUCHSCREEN_EDT_FT5X06=m +CONFIG_TOUCHSCREEN_TOUCHRIGHT=m +CONFIG_TOUCHSCREEN_TOUCHWIN=m +CONFIG_TOUCHSCREEN_PIXCIR=m +CONFIG_TOUCHSCREEN_WDT87XX_I2C=m +CONFIG_TOUCHSCREEN_WM97XX=m +CONFIG_TOUCHSCREEN_WM9705=y +CONFIG_TOUCHSCREEN_WM9712=y +CONFIG_TOUCHSCREEN_WM9713=y +CONFIG_TOUCHSCREEN_USB_COMPOSITE=m +CONFIG_TOUCHSCREEN_USB_EGALAX=y +CONFIG_TOUCHSCREEN_USB_PANJIT=y +CONFIG_TOUCHSCREEN_USB_3M=y +CONFIG_TOUCHSCREEN_USB_ITM=y +CONFIG_TOUCHSCREEN_USB_ETURBO=y +CONFIG_TOUCHSCREEN_USB_GUNZE=y +CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y +CONFIG_TOUCHSCREEN_USB_IRTOUCH=y +CONFIG_TOUCHSCREEN_USB_IDEALTEK=y +CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y +CONFIG_TOUCHSCREEN_USB_GOTOP=y +CONFIG_TOUCHSCREEN_USB_JASTEC=y +CONFIG_TOUCHSCREEN_USB_ELO=y +CONFIG_TOUCHSCREEN_USB_E2I=y +CONFIG_TOUCHSCREEN_USB_ZYTRONIC=y +CONFIG_TOUCHSCREEN_USB_ETT_TC45USB=y +CONFIG_TOUCHSCREEN_USB_NEXIO=y +CONFIG_TOUCHSCREEN_USB_EASYTOUCH=y +CONFIG_TOUCHSCREEN_TOUCHIT213=m +CONFIG_TOUCHSCREEN_TSC_SERIO=m +CONFIG_TOUCHSCREEN_TSC200X_CORE=m +CONFIG_TOUCHSCREEN_TSC2004=m +CONFIG_TOUCHSCREEN_TSC2005=m +CONFIG_TOUCHSCREEN_TSC2007=m +# CONFIG_TOUCHSCREEN_TSC2007_IIO is not set +CONFIG_TOUCHSCREEN_RM_TS=m +CONFIG_TOUCHSCREEN_SILEAD=m +CONFIG_TOUCHSCREEN_SIS_I2C=m +CONFIG_TOUCHSCREEN_ST1232=m +CONFIG_TOUCHSCREEN_STMFTS=m +CONFIG_TOUCHSCREEN_SUR40=m +CONFIG_TOUCHSCREEN_SURFACE3_SPI=m +CONFIG_TOUCHSCREEN_SX8654=m +CONFIG_TOUCHSCREEN_TPS6507X=m +CONFIG_TOUCHSCREEN_ZET6223=m +CONFIG_TOUCHSCREEN_ZFORCE=m +CONFIG_TOUCHSCREEN_ROHM_BU21023=m +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +# CONFIG_INPUT_MMA8450 is not set +# CONFIG_INPUT_APANEL is not set +# CONFIG_INPUT_GP2A is not set +# CONFIG_INPUT_GPIO_BEEPER is not set +# CONFIG_INPUT_GPIO_DECODER is not set +# CONFIG_INPUT_ATLAS_BTNS is not set +# CONFIG_INPUT_ATI_REMOTE2 is not set +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +# CONFIG_INPUT_KXTJ9 is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +CONFIG_INPUT_UINPUT=m +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_PWM_BEEPER is not set +CONFIG_INPUT_PWM_VIBRA=m +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_IMS_PCU is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set +# CONFIG_INPUT_DRV260X_HAPTICS is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +CONFIG_RMI4_CORE=y +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SPI=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=y +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y +# CONFIG_RMI4_F34 is not set +CONFIG_RMI4_F55=y + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_SERIO_I8042=m +CONFIG_SERIO_SERPORT=m +# CONFIG_SERIO_CT82C710 is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=m +CONFIG_SERIO_RAW=m +# CONFIG_SERIO_ALTERA_PS2 is not set +# CONFIG_SERIO_PS2MULT is not set +# CONFIG_SERIO_ARC_PS2 is not set +CONFIG_SERIO_GPIO_PS2=m +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_NOZOMI is not set +# CONFIG_N_GSM is not set +# CONFIG_TRACE_SINK is not set +# CONFIG_CBC_LDISC is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +CONFIG_SERIAL_8250_FINTEK=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=m +CONFIG_SERIAL_8250_NR_UARTS=4 +CONFIG_SERIAL_8250_RUNTIME_UARTS=3 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +CONFIG_SERIAL_8250_DETECT_IRQ=y +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DW=y +# CONFIG_SERIAL_8250_RT288X is not set +CONFIG_SERIAL_8250_LPSS=y +CONFIG_SERIAL_8250_MID=y +# CONFIG_SERIAL_8250_MOXA is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +CONFIG_SERIAL_DEV_BUS=m +CONFIG_TTY_PRINTK=y +CONFIG_HVC_DRIVER=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +# CONFIG_IPMI_PANIC_EVENT is not set +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +# CONFIG_HW_RANDOM_TIMERIOMEM is not set +CONFIG_HW_RANDOM_INTEL=y +CONFIG_HW_RANDOM_AMD=y +# CONFIG_HW_RANDOM_VIA is not set +CONFIG_HW_RANDOM_VIRTIO=y +CONFIG_NVRAM=m +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set +# CONFIG_MWAVE is not set +# CONFIG_RAW_DRIVER is not set +CONFIG_HPET=y +CONFIG_HPET_MMAP=y +CONFIG_HPET_MMAP_DEFAULT=y +# CONFIG_HANGCHECK_TIMER is not set +# CONFIG_TCG_TPM is not set +# CONFIG_TELCLOCK is not set +CONFIG_DEVPORT=y +# CONFIG_XILLYBUS is not set +CONFIG_RPMB=y +# CONFIG_RPMB_INTF_DEV is not set +# CONFIG_RPMB_SIM is not set +# CONFIG_VIRTIO_RPMB is not set +# CONFIG_RPMB_MUX is not set +# CONFIG_RANDOM_TRUST_CPU is not set + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y +CONFIG_I2C_BOARDINFO=y +# CONFIG_I2C_COMPAT is not set +# CONFIG_I2C_CHARDEV is not set +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +CONFIG_I2C_MUX_GPIO=m +CONFIG_I2C_MUX_LTC4306=m +CONFIG_I2C_MUX_PCA9541=m +CONFIG_I2C_MUX_PCA954x=m +CONFIG_I2C_MUX_REG=m +CONFIG_I2C_MUX_MLXCPLD=m +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_SMBUS=m +CONFIG_I2C_ALGOBIT=y + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD756_S4882=m +CONFIG_I2C_AMD8111=m +CONFIG_I2C_I801=m +CONFIG_I2C_ISCH=m +CONFIG_I2C_ISMT=m +CONFIG_I2C_PIIX4=m +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# ACPI drivers +# +CONFIG_I2C_SCMI=y + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=y +CONFIG_I2C_DESIGNWARE_PLATFORM=y +CONFIG_I2C_DESIGNWARE_SLAVE=y +CONFIG_I2C_DESIGNWARE_PCI=y +CONFIG_I2C_DESIGNWARE_BAYTRAIL=y +# CONFIG_I2C_EMEV2 is not set +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_DIOLAN_U2C is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +# CONFIG_I2C_TINY_USB is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_MLXCPLD is not set +# CONFIG_I2C_STUB is not set +CONFIG_I2C_SLAVE=y +CONFIG_I2C_SLAVE_EEPROM=m +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +# CONFIG_SPI_MEM is not set + +# +# SPI Master Controller Drivers +# +CONFIG_SPI_ALTERA=m +# CONFIG_SPI_AXI_SPI_ENGINE is not set +CONFIG_SPI_BITBANG=m +CONFIG_SPI_CADENCE=m +CONFIG_SPI_DESIGNWARE=m +CONFIG_SPI_DW_PCI=m +CONFIG_SPI_DW_MID_DMA=y +CONFIG_SPI_DW_MMIO=m +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_OC_TINY is not set +CONFIG_SPI_PXA2XX=m +CONFIG_SPI_PXA2XX_PCI=m +# CONFIG_SPI_ROCKCHIP is not set +CONFIG_SPI_SC18IS602=m +CONFIG_SPI_XCOMM=m +CONFIG_SPI_XILINX=m +# CONFIG_SPI_ZYNQMP_GQSPI is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +# CONFIG_PPS_CLIENT_LDISC is not set +# CONFIG_PPS_CLIENT_GPIO is not set + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +# CONFIG_DP83640_PHY is not set +CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +CONFIG_PINCTRL_AMD=m +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_SX150X is not set +CONFIG_PINCTRL_BAYTRAIL=y +CONFIG_PINCTRL_CHERRYVIEW=m +CONFIG_PINCTRL_INTEL=m +CONFIG_PINCTRL_BROXTON=m +CONFIG_PINCTRL_CANNONLAKE=m +CONFIG_PINCTRL_CEDARFORK=m +CONFIG_PINCTRL_DENVERTON=m +CONFIG_PINCTRL_GEMINILAKE=m +# CONFIG_PINCTRL_ICELAKE is not set +CONFIG_PINCTRL_LEWISBURG=m +CONFIG_PINCTRL_SUNRISEPOINT=m +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +# CONFIG_GPIO_SYSFS is not set + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_AMDPT is not set +# CONFIG_GPIO_DWAPB is not set +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_GENERIC_PLATFORM is not set +CONFIG_GPIO_ICH=m +CONFIG_GPIO_LYNXPOINT=m +# CONFIG_GPIO_MB86S7X is not set +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_GPIO_VX855 is not set + +# +# Port-mapped I/O GPIO drivers +# +# CONFIG_GPIO_F7188X is not set +# CONFIG_GPIO_IT87 is not set +# CONFIG_GPIO_SCH is not set +# CONFIG_GPIO_SCH311X is not set +# CONFIG_GPIO_WINBOND is not set +# CONFIG_GPIO_WS16C48 is not set + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADP5588 is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set + +# +# MFD GPIO expanders +# +# CONFIG_GPIO_BD9571MWV is not set +CONFIG_GPIO_CRYSTAL_COVE=m +CONFIG_GPIO_WHISKEY_COVE=m + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_AMD8111 is not set +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_ML_IOH is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set + +# +# USB GPIO expanders +# +# CONFIG_W1 is not set +# CONFIG_POWER_AVS is not set +# CONFIG_POWER_RESET is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_GENERIC_ADC_BATTERY is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +CONFIG_MANAGER_SBS=m +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_ISP1704 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_LTC3651 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24190 is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_SMB347 is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_CHARGER_RT9455 is not set +CONFIG_HWMON=y +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_ABITUGURU is not set +# CONFIG_SENSORS_ABITUGURU3 is not set +CONFIG_SENSORS_AD7314=m +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7310 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_K8TEMP is not set +# CONFIG_SENSORS_K10TEMP is not set +CONFIG_SENSORS_FAM15H_POWER=m +CONFIG_SENSORS_APPLESMC=m +# CONFIG_SENSORS_ASB100 is not set +# CONFIG_SENSORS_ASPEED is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_DELL_SMM is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_FSCHMD is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_IBMAEM is not set +# CONFIG_SENSORS_IBMPEX is not set +# CONFIG_SENSORS_IIO_HWMON is not set +CONFIG_SENSORS_I5500=m +CONFIG_SENSORS_CORETEMP=y +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_JC42 is not set +# CONFIG_SENSORS_POWR1220 is not set +# CONFIG_SENSORS_LINEAGE is not set +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC4151 is not set +# CONFIG_SENSORS_LTC4215 is not set +# CONFIG_SENSORS_LTC4222 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_LTC4260 is not set +# CONFIG_SENSORS_LTC4261 is not set +# CONFIG_SENSORS_MAX1111 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX6621 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MAX6697 is not set +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_MLXREG_FAN is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_ADCXX is not set +# CONFIG_SENSORS_LM63 is not set +# CONFIG_SENSORS_LM70 is not set +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +# CONFIG_SENSORS_LM95245 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_NTC_THERMISTOR is not set +# CONFIG_SENSORS_NCT6683 is not set +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_PMBUS is not set +# CONFIG_SENSORS_SHT15 is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHTC1 is not set +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_SMM665 is not set +# CONFIG_SENSORS_ADC128D818 is not set +# CONFIG_SENSORS_ADS1015 is not set +# CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_ADS7871 is not set +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_VIA_CPUTEMP is not set +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83773G is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +# CONFIG_SENSORS_XGENE is not set + +# +# ACPI drivers +# +CONFIG_SENSORS_ACPI_POWER=y +# CONFIG_SENSORS_ATK0110 is not set +CONFIG_THERMAL=y +CONFIG_THERMAL_STATISTICS=y +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=100 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_BANG_BANG=y +CONFIG_THERMAL_GOV_USER_SPACE=y +# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set +# CONFIG_CLOCK_THERMAL is not set +# CONFIG_DEVFREQ_THERMAL is not set +CONFIG_THERMAL_EMULATION=y +CONFIG_INTEL_POWERCLAMP=y +CONFIG_X86_PKG_TEMP_THERMAL=y +CONFIG_INTEL_SOC_DTS_IOSF_CORE=m +CONFIG_INTEL_SOC_DTS_THERMAL=m + +# +# ACPI INT340X thermal drivers +# +CONFIG_INT340X_THERMAL=m +CONFIG_ACPI_THERMAL_REL=m +CONFIG_INT3406_THERMAL=m +CONFIG_INTEL_BXT_PMIC_THERMAL=m +CONFIG_INTEL_PCH_THERMAL=m +# CONFIG_GENERIC_ADC_THERMAL is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y +CONFIG_SSB=m +CONFIG_SSB_SPROM=y +CONFIG_SSB_BLOCKIO=y +CONFIG_SSB_PCIHOST_POSSIBLE=y +CONFIG_SSB_PCIHOST=y +CONFIG_SSB_B43_PCI_BRIDGE=y +CONFIG_SSB_SDIOHOST_POSSIBLE=y +# CONFIG_SSB_SDIOHOST is not set +CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y +CONFIG_SSB_DRIVER_PCICORE=y +# CONFIG_SSB_DRIVER_GPIO is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_BLOCKIO=y +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y +# CONFIG_BCMA_DRIVER_GMAC_CMN is not set +# CONFIG_BCMA_DRIVER_GPIO is not set +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y +# CONFIG_MFD_AS3711 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +CONFIG_MFD_BCM590XX=m +CONFIG_MFD_BD9571MWV=m +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CROS_EC is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set +CONFIG_LPC_ICH=m +CONFIG_LPC_SCH=m +CONFIG_INTEL_SOC_PMIC=y +CONFIG_INTEL_SOC_PMIC_BXTWC=m +# CONFIG_INTEL_SOC_PMIC_CHTWC is not set +CONFIG_INTEL_SOC_PMIC_CHTDC_TI=m +CONFIG_MFD_INTEL_LPSS=y +CONFIG_MFD_INTEL_LPSS_ACPI=y +CONFIG_MFD_INTEL_LPSS_PCI=y +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_UCB1400_CORE is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_SMSC is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_SYSCON is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS68470 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS80031 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_RAVE_SP_CORE is not set +# CONFIG_REGULATOR is not set +# CONFIG_RC_CORE is not set +CONFIG_MEDIA_SUPPORT=m + +# +# Multimedia core support +# +CONFIG_MEDIA_CAMERA_SUPPORT=y +# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set +# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set +# CONFIG_MEDIA_RADIO_SUPPORT is not set +# CONFIG_MEDIA_SDR_SUPPORT is not set +# CONFIG_MEDIA_CEC_SUPPORT is not set +# CONFIG_MEDIA_CONTROLLER is not set +CONFIG_VIDEO_DEV=m +CONFIG_VIDEO_V4L2=m +# CONFIG_VIDEO_ADV_DEBUG is not set +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set + +# +# Media drivers +# +CONFIG_MEDIA_USB_SUPPORT=y + +# +# Webcam devices +# +CONFIG_USB_VIDEO_CLASS=m +CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y +# CONFIG_USB_GSPCA is not set +# CONFIG_USB_PWC is not set +# CONFIG_VIDEO_CPIA2 is not set +# CONFIG_USB_ZR364XX is not set +# CONFIG_USB_STKWEBCAM is not set +# CONFIG_USB_S2255 is not set +# CONFIG_VIDEO_USBTV is not set + +# +# Webcam, TV (analog/digital) USB devices +# +# CONFIG_VIDEO_EM28XX is not set +# CONFIG_MEDIA_PCI_SUPPORT is not set +# CONFIG_V4L_PLATFORM_DRIVERS is not set +# CONFIG_V4L_MEM2MEM_DRIVERS is not set +# CONFIG_V4L_TEST_DRIVERS is not set + +# +# Supported MMC/SDIO adapters +# +# CONFIG_CYPRESS_FIRMWARE is not set +CONFIG_VIDEOBUF2_CORE=m +CONFIG_VIDEOBUF2_V4L2=m +CONFIG_VIDEOBUF2_MEMOPS=m +CONFIG_VIDEOBUF2_VMALLOC=m +CONFIG_VIDEOBUF2_DMA_SG=m + +# +# Media ancillary drivers (tuners, sensors, i2c, spi, frontends) +# +CONFIG_MEDIA_SUBDRV_AUTOSELECT=y + +# +# Audio decoders, processors and mixers +# + +# +# RDS decoders +# + +# +# Video decoders +# + +# +# Video and audio decoders +# + +# +# Video encoders +# + +# +# Camera sensor devices +# + +# +# Flash devices +# + +# +# Video improvement chips +# + +# +# Audio/Video compression chips +# + +# +# SDR tuner chips +# + +# +# Miscellaneous helper chips +# + +# +# Sensors used on soc_camera driver +# + +# +# Media SPI Adapters +# + +# +# Tools to develop new frontends +# + +# +# Graphics support +# +# CONFIG_AGP is not set +CONFIG_INTEL_GTT=y +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=10 +# CONFIG_VGA_SWITCHEROO is not set +CONFIG_DRM=y +CONFIG_DRM_MIPI_DSI=y +# CONFIG_DRM_DP_AUX_CHARDEV is not set +# CONFIG_DRM_DEBUG_MM is not set +# CONFIG_DRM_DEBUG_SELFTEST is not set +CONFIG_DRM_KMS_HELPER=y +CONFIG_DRM_KMS_FB_HELPER=y +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set +# CONFIG_DRM_DP_CEC is not set +CONFIG_DRM_TTM=y +CONFIG_DRM_VM=y +CONFIG_DRM_SCHED=m + +# +# I2C encoder or helper chips +# +# CONFIG_DRM_I2C_CH7006 is not set +# CONFIG_DRM_I2C_SIL164 is not set +# CONFIG_DRM_I2C_NXP_TDA998X is not set +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +CONFIG_DRM_RADEON=m +# CONFIG_DRM_RADEON_USERPTR is not set +CONFIG_DRM_AMDGPU=m +# CONFIG_DRM_AMDGPU_SI is not set +# CONFIG_DRM_AMDGPU_CIK is not set +# CONFIG_DRM_AMDGPU_USERPTR is not set +# CONFIG_DRM_AMDGPU_GART_DEBUGFS is not set + +# +# ACP (Audio CoProcessor) Configuration +# +# CONFIG_DRM_AMD_ACP is not set + +# +# Display Engine Configuration +# +CONFIG_DRM_AMD_DC=y +CONFIG_DRM_AMD_DC_DCN1_0=y +# CONFIG_DEBUG_KERNEL_DC is not set + +# +# AMD Library routines +# +CONFIG_CHASH=m +# CONFIG_CHASH_STATS is not set +# CONFIG_CHASH_SELFTEST is not set +CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +# CONFIG_NOUVEAU_DEBUG_MMU is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +CONFIG_DRM_I915=y +# CONFIG_DRM_I915_ALPHA_SUPPORT is not set +CONFIG_DRM_I915_CAPTURE_ERROR=y +CONFIG_DRM_I915_COMPRESS_ERROR=y +CONFIG_DRM_I915_MEMTRACK=y +CONFIG_DRM_I915_USERPTR=y +CONFIG_DRM_I915_GVT=y + +# +# drm/i915 Debugging +# +# CONFIG_DRM_I915_WERROR is not set +# CONFIG_DRM_I915_DEBUG is not set +# CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS is not set +# CONFIG_DRM_I915_SW_FENCE_CHECK_DAG is not set +# CONFIG_DRM_I915_DEBUG_GUC is not set +# CONFIG_DRM_I915_SELFTEST is not set +# CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS is not set +# CONFIG_DRM_I915_DEBUG_VBLANK_EVADE is not set +# CONFIG_DRM_VGEM is not set +# CONFIG_DRM_VKMS is not set +CONFIG_DRM_VMWGFX=m +CONFIG_DRM_VMWGFX_FBCON=y +# CONFIG_DRM_GMA500 is not set +# CONFIG_DRM_UDL is not set +# CONFIG_DRM_AST is not set +# CONFIG_DRM_MGAG200 is not set +CONFIG_DRM_CIRRUS_QEMU=y +CONFIG_DRM_QXL=m +CONFIG_DRM_BOCHS=y +CONFIG_DRM_VIRTIO_GPU=y +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +CONFIG_HSA_AMD=m +# CONFIG_DRM_HISI_HIBMC is not set +# CONFIG_DRM_TINYDRM is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +# +# Frame buffer Devices +# +CONFIG_FB=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +CONFIG_FB_DDC=m +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_BACKLIGHT=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ARC is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_VGA16 is not set +# CONFIG_FB_UVESA is not set +# CONFIG_FB_VESA is not set +CONFIG_FB_EFI=y +# CONFIG_FB_N411 is not set +# CONFIG_FB_HGA is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_LE80578 is not set +# CONFIG_FB_MATROX is not set +CONFIG_FB_RADEON=m +CONFIG_FB_RADEON_I2C=y +CONFIG_FB_RADEON_BACKLIGHT=y +# CONFIG_FB_RADEON_DEBUG is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_VIA is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SM712 is not set +CONFIG_BACKLIGHT_LCD_SUPPORT=y +# CONFIG_LCD_CLASS_DEVICE is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_GENERIC is not set +# CONFIG_BACKLIGHT_PWM is not set +CONFIG_BACKLIGHT_APPLE=m +# CONFIG_BACKLIGHT_PM8941_WLED is not set +# CONFIG_BACKLIGHT_SAHARA is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set +# CONFIG_BACKLIGHT_LP855X is not set +# CONFIG_BACKLIGHT_GPIO is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_VGA_CONSOLE=y +CONFIG_VGACON_SOFT_SCROLLBACK=y +CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64 +# CONFIG_VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# CONFIG_LOGO is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_TIMER=m +CONFIG_SND_PCM=y +CONFIG_SND_HWDEP=m +CONFIG_SND_SEQ_DEVICE=m +CONFIG_SND_RAWMIDI=m +CONFIG_SND_COMPRESS_OFFLOAD=y +CONFIG_SND_JACK=y +CONFIG_SND_JACK_INPUT_DEV=y +# CONFIG_SND_OSSEMUL is not set +# CONFIG_SND_PCM_TIMER is not set +CONFIG_SND_HRTIMER=m +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_MAX_CARDS=32 +# CONFIG_SND_SUPPORT_OLD_API is not set +CONFIG_SND_PROC_FS=y +CONFIG_SND_VERBOSE_PROCFS=y +CONFIG_SND_VERBOSE_PRINTK=y +CONFIG_SND_DEBUG=y +CONFIG_SND_DEBUG_VERBOSE=y +# CONFIG_SND_PCM_XRUN_DEBUG is not set +CONFIG_SND_VMASTER=y +CONFIG_SND_DMA_SGBUF=y +CONFIG_SND_SEQUENCER=m +# CONFIG_SND_SEQ_DUMMY is not set +CONFIG_SND_SEQ_HRTIMER_DEFAULT=y +CONFIG_SND_SEQ_MIDI_EVENT=m +CONFIG_SND_SEQ_MIDI=m +CONFIG_SND_AC97_CODEC=m +# CONFIG_SND_DRIVERS is not set +CONFIG_SND_PCI=y +# CONFIG_SND_AD1889 is not set +# CONFIG_SND_ALS300 is not set +# CONFIG_SND_ALS4000 is not set +# CONFIG_SND_ALI5451 is not set +# CONFIG_SND_ASIHPI is not set +# CONFIG_SND_ATIIXP is not set +# CONFIG_SND_ATIIXP_MODEM is not set +# CONFIG_SND_AU8810 is not set +# CONFIG_SND_AU8820 is not set +# CONFIG_SND_AU8830 is not set +# CONFIG_SND_AW2 is not set +# CONFIG_SND_AZT3328 is not set +# CONFIG_SND_BT87X is not set +# CONFIG_SND_CA0106 is not set +# CONFIG_SND_CMIPCI is not set +# CONFIG_SND_OXYGEN is not set +# CONFIG_SND_CS4281 is not set +# CONFIG_SND_CS46XX is not set +# CONFIG_SND_CTXFI is not set +# CONFIG_SND_DARLA20 is not set +# CONFIG_SND_GINA20 is not set +# CONFIG_SND_LAYLA20 is not set +# CONFIG_SND_DARLA24 is not set +# CONFIG_SND_GINA24 is not set +# CONFIG_SND_LAYLA24 is not set +# CONFIG_SND_MONA is not set +# CONFIG_SND_MIA is not set +# CONFIG_SND_ECHO3G is not set +# CONFIG_SND_INDIGO is not set +# CONFIG_SND_INDIGOIO is not set +# CONFIG_SND_INDIGODJ is not set +# CONFIG_SND_INDIGOIOX is not set +# CONFIG_SND_INDIGODJX is not set +# CONFIG_SND_EMU10K1 is not set +# CONFIG_SND_EMU10K1X is not set +# CONFIG_SND_ENS1370 is not set +# CONFIG_SND_ENS1371 is not set +# CONFIG_SND_ES1938 is not set +# CONFIG_SND_ES1968 is not set +# CONFIG_SND_FM801 is not set +# CONFIG_SND_HDSP is not set +# CONFIG_SND_HDSPM is not set +# CONFIG_SND_ICE1712 is not set +# CONFIG_SND_ICE1724 is not set +CONFIG_SND_INTEL8X0=m +# CONFIG_SND_INTEL8X0M is not set +# CONFIG_SND_KORG1212 is not set +# CONFIG_SND_LOLA is not set +# CONFIG_SND_LX6464ES is not set +# CONFIG_SND_MAESTRO3 is not set +# CONFIG_SND_MIXART is not set +# CONFIG_SND_NM256 is not set +# CONFIG_SND_PCXHR is not set +# CONFIG_SND_RIPTIDE is not set +# CONFIG_SND_RME32 is not set +# CONFIG_SND_RME96 is not set +# CONFIG_SND_RME9652 is not set +# CONFIG_SND_SE6X is not set +# CONFIG_SND_SONICVIBES is not set +# CONFIG_SND_TRIDENT is not set +# CONFIG_SND_VIA82XX is not set +# CONFIG_SND_VIA82XX_MODEM is not set +# CONFIG_SND_VIRTUOSO is not set +# CONFIG_SND_VX222 is not set +# CONFIG_SND_YMFPCI is not set + +# +# HD-Audio +# +CONFIG_SND_HDA=m +CONFIG_SND_HDA_INTEL=m +CONFIG_SND_HDA_HWDEP=y +CONFIG_SND_HDA_RECONFIG=y +# CONFIG_SND_HDA_INPUT_BEEP is not set +# CONFIG_SND_HDA_PATCH_LOADER is not set +CONFIG_SND_HDA_CODEC_REALTEK=m +CONFIG_SND_HDA_CODEC_ANALOG=m +# CONFIG_SND_HDA_CODEC_SIGMATEL is not set +# CONFIG_SND_HDA_CODEC_VIA is not set +CONFIG_SND_HDA_CODEC_HDMI=m +# CONFIG_SND_HDA_CODEC_CIRRUS is not set +# CONFIG_SND_HDA_CODEC_CONEXANT is not set +# CONFIG_SND_HDA_CODEC_CA0110 is not set +# CONFIG_SND_HDA_CODEC_CA0132 is not set +# CONFIG_SND_HDA_CODEC_CMEDIA is not set +# CONFIG_SND_HDA_CODEC_SI3054 is not set +CONFIG_SND_HDA_GENERIC=m +CONFIG_SND_HDA_POWER_SAVE_DEFAULT=10 +CONFIG_SND_HDA_CORE=m +CONFIG_SND_HDA_DSP_LOADER=y +CONFIG_SND_HDA_COMPONENT=y +CONFIG_SND_HDA_I915=y +CONFIG_SND_HDA_EXT_CORE=m +CONFIG_SND_HDA_PREALLOC_SIZE=64 +# CONFIG_SND_SPI is not set +CONFIG_SND_USB=y +CONFIG_SND_USB_AUDIO=m +# CONFIG_SND_USB_UA101 is not set +# CONFIG_SND_USB_USX2Y is not set +# CONFIG_SND_USB_CAIAQ is not set +# CONFIG_SND_USB_US122L is not set +# CONFIG_SND_USB_6FIRE is not set +# CONFIG_SND_USB_HIFACE is not set +# CONFIG_SND_BCD2000 is not set +# CONFIG_SND_USB_POD is not set +# CONFIG_SND_USB_PODHD is not set +# CONFIG_SND_USB_TONEPORT is not set +# CONFIG_SND_USB_VARIAX is not set +CONFIG_SND_SOC=y +CONFIG_SND_SOC_COMPRESS=y +CONFIG_SND_SOC_TOPOLOGY=y +CONFIG_SND_SOC_ACPI=y +# CONFIG_SND_SOC_AMD_ACP is not set +# CONFIG_SND_ATMEL_SOC is not set +# CONFIG_SND_DESIGNWARE_I2S is not set + +# +# SoC Audio for Freescale CPUs +# + +# +# Common SoC Audio options for Freescale CPUs: +# +# CONFIG_SND_SOC_FSL_ASRC is not set +# CONFIG_SND_SOC_FSL_SAI is not set +# CONFIG_SND_SOC_FSL_SSI is not set +# CONFIG_SND_SOC_FSL_SPDIF is not set +# CONFIG_SND_SOC_FSL_ESAI is not set +# CONFIG_SND_SOC_IMX_AUDMUX is not set +# CONFIG_SND_I2S_HI6210_I2S is not set +# CONFIG_SND_SOC_IMG is not set +CONFIG_SND_SOC_INTEL_SST_TOPLEVEL=y +CONFIG_SND_SST_IPC=y +CONFIG_SND_SST_IPC_ACPI=y +CONFIG_SND_SOC_INTEL_SST=m +# CONFIG_SND_SOC_INTEL_HASWELL is not set +CONFIG_SND_SST_ATOM_HIFI2_PLATFORM=y +# CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_PCI is not set +CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_ACPI=y +CONFIG_SND_SOC_INTEL_SKYLAKE=m +CONFIG_SND_SOC_ACPI_INTEL_MATCH=y +# CONFIG_SND_SOC_INTEL_CNL_FPGA is not set +# CONFIG_SND_SOC_SDW_AGGM1M2 is not set +CONFIG_SND_SOC_INTEL_MACH=y +# CONFIG_SND_SOC_INTEL_BYTCR_RT5640_MACH is not set +# CONFIG_SND_SOC_INTEL_BYTCR_RT5651_MACH is not set +# CONFIG_SND_SOC_INTEL_CHT_BSW_RT5672_MACH is not set +# CONFIG_SND_SOC_INTEL_CHT_BSW_RT5645_MACH is not set +# CONFIG_SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH is not set +# CONFIG_SND_SOC_INTEL_CHT_BSW_NAU8824_MACH is not set +# CONFIG_SND_SOC_INTEL_BYT_CHT_DA7213_MACH is not set +# CONFIG_SND_SOC_INTEL_BYT_CHT_ES8316_MACH is not set +# CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH is not set +# CONFIG_SND_SOC_INTEL_SKL_RT286_MACH is not set +# CONFIG_SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH is not set +# CONFIG_SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH is not set +# CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH is not set +# CONFIG_SND_SOC_INTEL_BXT_RT298_MACH is not set +# CONFIG_SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH is not set +# CONFIG_SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH is not set +# CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98357A_MACH is not set +# CONFIG_SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH is not set +# CONFIG_SND_SOC_INTEL_CNL_CS42L42_MACH is not set +# CONFIG_SND_SOC_INTEL_CNL_RT700_MACH is not set +# CONFIG_SND_SOC_INTEL_CNL_SVFPGA_MACH is not set +# CONFIG_SND_SOC_INTEL_CNL_RT274_MACH is not set +# CONFIG_SND_SOC_INTEL_ICL_RT274_MACH is not set +CONFIG_SND_SOC_INTEL_BXT_TDF8532_MACH=m +# CONFIG_SND_SOC_INTEL_BXT_ULL_MACH is not set +# CONFIG_SND_SOC_INTEL_KBLR_RT298_MACH is not set +# CONFIG_SND_SOC_INTEL_BXTP_IVI_RSE_MACH is not set +# CONFIG_SND_SOC_INTEL_BXTP_IVI_HU_MACH is not set +# CONFIG_SND_SOC_INTEL_BXTP_IVI_M3_MACH is not set +# CONFIG_SND_SOC_INTEL_BXTP_IVI_GENERIC_MACH is not set + +# +# STMicroelectronics STM32 SOC audio support +# +# CONFIG_SND_SOC_XTFPGA_I2S is not set +# CONFIG_ZX_TDM is not set +CONFIG_SND_SOC_I2C_AND_SPI=y + +# +# CODEC drivers +# +# CONFIG_SND_SOC_AC97_CODEC is not set +# CONFIG_SND_SOC_ADAU1701 is not set +# CONFIG_SND_SOC_ADAU1761_I2C is not set +# CONFIG_SND_SOC_ADAU1761_SPI is not set +# CONFIG_SND_SOC_ADAU7002 is not set +# CONFIG_SND_SOC_AK4104 is not set +# CONFIG_SND_SOC_AK4458 is not set +# CONFIG_SND_SOC_AK4554 is not set +# CONFIG_SND_SOC_AK4613 is not set +# CONFIG_SND_SOC_AK4642 is not set +# CONFIG_SND_SOC_AK5386 is not set +# CONFIG_SND_SOC_AK5558 is not set +# CONFIG_SND_SOC_ALC5623 is not set +# CONFIG_SND_SOC_BD28623 is not set +# CONFIG_SND_SOC_BT_SCO is not set +# CONFIG_SND_SOC_CS35L32 is not set +# CONFIG_SND_SOC_CS35L33 is not set +# CONFIG_SND_SOC_CS35L34 is not set +# CONFIG_SND_SOC_CS35L35 is not set +# CONFIG_SND_SOC_CS42L42 is not set +# CONFIG_SND_SOC_SVFPGA is not set +# CONFIG_SND_SOC_SVFPGA_SDW is not set +# CONFIG_SND_SOC_SVFPGA_I2C is not set +# CONFIG_SND_SOC_CS42L51_I2C is not set +# CONFIG_SND_SOC_CS42L52 is not set +# CONFIG_SND_SOC_CS42L56 is not set +# CONFIG_SND_SOC_CS42L73 is not set +# CONFIG_SND_SOC_CS4265 is not set +# CONFIG_SND_SOC_CS4270 is not set +# CONFIG_SND_SOC_CS4271_I2C is not set +# CONFIG_SND_SOC_CS4271_SPI is not set +# CONFIG_SND_SOC_CS42XX8_I2C is not set +# CONFIG_SND_SOC_CS43130 is not set +# CONFIG_SND_SOC_CS4349 is not set +# CONFIG_SND_SOC_CS53L30 is not set +# CONFIG_SND_SOC_ES7134 is not set +# CONFIG_SND_SOC_ES7241 is not set +# CONFIG_SND_SOC_ES8316 is not set +# CONFIG_SND_SOC_ES8328_I2C is not set +# CONFIG_SND_SOC_ES8328_SPI is not set +# CONFIG_SND_SOC_GTM601 is not set +# CONFIG_SND_SOC_INNO_RK3036 is not set +# CONFIG_SND_SOC_MAX98504 is not set +# CONFIG_SND_SOC_MAX9867 is not set +# CONFIG_SND_SOC_MAX98927 is not set +# CONFIG_SND_SOC_MAX98373 is not set +# CONFIG_SND_SOC_MAX9860 is not set +# CONFIG_SND_SOC_MSM8916_WCD_DIGITAL is not set +# CONFIG_SND_SOC_PCM1681 is not set +# CONFIG_SND_SOC_PCM1789_I2C is not set +# CONFIG_SND_SOC_PCM179X_I2C is not set +# CONFIG_SND_SOC_PCM179X_SPI is not set +# CONFIG_SND_SOC_PCM186X_I2C is not set +# CONFIG_SND_SOC_PCM186X_SPI is not set +# CONFIG_SND_SOC_PCM3168A_I2C is not set +# CONFIG_SND_SOC_PCM3168A_SPI is not set +# CONFIG_SND_SOC_PCM512x_I2C is not set +# CONFIG_SND_SOC_PCM512x_SPI is not set +# CONFIG_SND_SOC_RT5616 is not set +# CONFIG_SND_SOC_RT5631 is not set +# CONFIG_SND_SOC_RT700 is not set +# CONFIG_SND_SOC_RT700_SDW is not set +# CONFIG_SND_SOC_SGTL5000 is not set +# CONFIG_SND_SOC_SIMPLE_AMPLIFIER is not set +# CONFIG_SND_SOC_SIRF_AUDIO_CODEC is not set +# CONFIG_SND_SOC_SPDIF is not set +# CONFIG_SND_SOC_SSM2305 is not set +# CONFIG_SND_SOC_SSM2602_SPI is not set +# CONFIG_SND_SOC_SSM2602_I2C is not set +# CONFIG_SND_SOC_SSM4567 is not set +# CONFIG_SND_SOC_STA32X is not set +# CONFIG_SND_SOC_STA350 is not set +# CONFIG_SND_SOC_STI_SAS is not set +# CONFIG_SND_SOC_TAS2552 is not set +# CONFIG_SND_SOC_TAS5086 is not set +# CONFIG_SND_SOC_TAS571X is not set +# CONFIG_SND_SOC_TAS5720 is not set +# CONFIG_SND_SOC_TAS6424 is not set +# CONFIG_SND_SOC_TDA7419 is not set +CONFIG_SND_SOC_TDF8532=m +# CONFIG_SND_SOC_TFA9879 is not set +# CONFIG_SND_SOC_TLV320AIC23_I2C is not set +# CONFIG_SND_SOC_TLV320AIC23_SPI is not set +# CONFIG_SND_SOC_TLV320AIC31XX is not set +# CONFIG_SND_SOC_TLV320AIC32X4_I2C is not set +# CONFIG_SND_SOC_TLV320AIC32X4_SPI is not set +# CONFIG_SND_SOC_TLV320AIC3X is not set +# CONFIG_SND_SOC_TS3A227E is not set +# CONFIG_SND_SOC_TSCS42XX is not set +# CONFIG_SND_SOC_TSCS454 is not set +# CONFIG_SND_SOC_WM8510 is not set +# CONFIG_SND_SOC_WM8523 is not set +# CONFIG_SND_SOC_WM8524 is not set +# CONFIG_SND_SOC_WM8580 is not set +# CONFIG_SND_SOC_WM8711 is not set +# CONFIG_SND_SOC_WM8728 is not set +# CONFIG_SND_SOC_WM8731 is not set +# CONFIG_SND_SOC_WM8737 is not set +# CONFIG_SND_SOC_WM8741 is not set +# CONFIG_SND_SOC_WM8750 is not set +# CONFIG_SND_SOC_WM8753 is not set +# CONFIG_SND_SOC_WM8770 is not set +# CONFIG_SND_SOC_WM8776 is not set +# CONFIG_SND_SOC_WM8782 is not set +# CONFIG_SND_SOC_WM8804_I2C is not set +# CONFIG_SND_SOC_WM8804_SPI is not set +# CONFIG_SND_SOC_WM8903 is not set +# CONFIG_SND_SOC_WM8960 is not set +# CONFIG_SND_SOC_WM8962 is not set +# CONFIG_SND_SOC_WM8974 is not set +# CONFIG_SND_SOC_WM8978 is not set +# CONFIG_SND_SOC_WM8985 is not set +# CONFIG_SND_SOC_ZX_AUD96P22 is not set +# CONFIG_SND_SOC_MAX9759 is not set +# CONFIG_SND_SOC_MT6351 is not set +# CONFIG_SND_SOC_NAU8540 is not set +# CONFIG_SND_SOC_NAU8810 is not set +# CONFIG_SND_SOC_NAU8824 is not set +# CONFIG_SND_SOC_TPA6130A2 is not set +# CONFIG_SND_SIMPLE_CARD is not set +CONFIG_SND_X86=y +CONFIG_HDMI_LPE_AUDIO=m +CONFIG_AC97_BUS=m + +# +# HID support +# +CONFIG_HID=y +# CONFIG_HID_BATTERY_STRENGTH is not set +CONFIG_HIDRAW=y +# CONFIG_UHID is not set +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +# CONFIG_HID_A4TECH is not set +# CONFIG_HID_ACCUTOUCH is not set +# CONFIG_HID_ACRUX is not set +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +CONFIG_HID_ASUS=m +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=y +CONFIG_HID_BETOP_FF=m +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_CORSAIR=m +# CONFIG_HID_COUGAR is not set +CONFIG_HID_PRODIKEYS=m +CONFIG_HID_CMEDIA=m +CONFIG_HID_CP2112=m +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +# CONFIG_DRAGONRISE_FF is not set +CONFIG_HID_EMS_FF=m +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +CONFIG_HID_EZKEY=m +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +CONFIG_HID_HOLTEK=y +# CONFIG_HOLTEK_FF is not set +CONFIG_HID_GOOGLE_HAMMER=m +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m +CONFIG_HID_LENOVO=y +CONFIG_HID_LOGITECH=y +CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m +CONFIG_LOGITECH_FF=y +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +CONFIG_LOGIWHEELS_FF=y +CONFIG_HID_MAGICMOUSE=m +CONFIG_HID_MAYFLASH=m +# CONFIG_HID_REDRAGON is not set +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=m +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +# CONFIG_PANTHERLORD_FF is not set +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +# CONFIG_HID_PICOLCD_FB is not set +# CONFIG_HID_PICOLCD_BACKLIGHT is not set +# CONFIG_HID_PICOLCD_LEDS is not set +CONFIG_HID_PLANTRONICS=m +CONFIG_HID_PRIMAX=m +CONFIG_HID_RETRODE=m +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=y +CONFIG_HID_SONY=m +# CONFIG_SONY_FF is not set +CONFIG_HID_SPEEDLINK=m +# CONFIG_HID_STEAM is not set +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +# CONFIG_GREENASIA_FF is not set +CONFIG_HID_SMARTJOYPLUS=m +# CONFIG_SMARTJOYPLUS_FF is not set +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +# CONFIG_THRUSTMASTER_FF is not set +CONFIG_HID_UDRAW_PS3=m +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +# CONFIG_ZEROPLUS_FF is not set +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=m +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m +CONFIG_HID_ALPS=m + +# +# USB HID support +# +CONFIG_USB_HID=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y + +# +# I2C HID support +# +CONFIG_I2C_HID=m + +# +# Intel ISH HID support +# +CONFIG_INTEL_ISH_HID=m +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_LEDS_TRIGGER_USBPORT is not set +CONFIG_USB_MON=m +# CONFIG_USB_WUSB_CBAF is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +# CONFIG_USB_XHCI_DBGCAP is not set +CONFIG_USB_XHCI_PCI=y +# CONFIG_USB_XHCI_PLATFORM is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +CONFIG_USB_EHCI_HCD_PLATFORM=m +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_FOTG210_HCD is not set +CONFIG_USB_MAX3421_HCD=m +# CONFIG_USB_OHCI_HCD is not set +CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +CONFIG_USB_HCD_BCMA=m +# CONFIG_USB_HCD_SSB is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +# CONFIG_USB_PRINTER is not set +CONFIG_USB_WDM=m +# CONFIG_USB_TMC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=y +# CONFIG_USB_STORAGE_DEBUG is not set +# CONFIG_USB_STORAGE_REALTEK is not set +# CONFIG_USB_STORAGE_DATAFAB is not set +# CONFIG_USB_STORAGE_FREECOM is not set +# CONFIG_USB_STORAGE_ISD200 is not set +# CONFIG_USB_STORAGE_USBAT is not set +# CONFIG_USB_STORAGE_SDDR09 is not set +# CONFIG_USB_STORAGE_SDDR55 is not set +# CONFIG_USB_STORAGE_JUMPSHOT is not set +# CONFIG_USB_STORAGE_ALAUDA is not set +# CONFIG_USB_STORAGE_ONETOUCH is not set +# CONFIG_USB_STORAGE_KARMA is not set +# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set +# CONFIG_USB_STORAGE_ENE_UB6250 is not set +# CONFIG_USB_UAS is not set + +# +# USB Imaging devices +# +# CONFIG_USB_MDC800 is not set +# CONFIG_USB_MICROTEK is not set +# CONFIG_USBIP_CORE is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_SIMPLE=m +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +CONFIG_USB_SERIAL_F81232=m +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +CONFIG_USB_SERIAL_METRO=m +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_XIRCOM=m +CONFIG_USB_SERIAL_WWAN=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +CONFIG_USB_SERIAL_WISHBONE=m +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# +# CONFIG_USB_EMI62 is not set +# CONFIG_USB_EMI26 is not set +# CONFIG_USB_ADUTUX is not set +# CONFIG_USB_SEVSEG is not set +# CONFIG_USB_RIO500 is not set +# CONFIG_USB_LEGOTOWER is not set +# CONFIG_USB_LCD is not set +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +# CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_FTDI_ELAN is not set +CONFIG_USB_APPLEDISPLAY=m +# CONFIG_USB_SISUSBVGA is not set +# CONFIG_USB_LD is not set +# CONFIG_USB_TRANCEVIBRATOR is not set +# CONFIG_USB_IOWARRIOR is not set +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +# CONFIG_USB_ISIGHTFW is not set +# CONFIG_USB_YUREX is not set +CONFIG_USB_EZUSB_FX2=m +# CONFIG_USB_HUB_USB251XB is not set +# CONFIG_USB_HSIC_USB3503 is not set +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +# CONFIG_USB_CHAOSKEY is not set + +# +# USB Physical Layer drivers +# +CONFIG_USB_PHY=y +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +CONFIG_USB_GADGET=m +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=2 +CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 + +# +# USB Peripheral Controller +# +# CONFIG_USB_FOTG210_UDC is not set +# CONFIG_USB_GR_UDC is not set +# CONFIG_USB_R8A66597 is not set +# CONFIG_USB_PXA27X is not set +# CONFIG_USB_MV_UDC is not set +# CONFIG_USB_MV_U3D is not set +# CONFIG_USB_M66592 is not set +# CONFIG_USB_BDC_UDC is not set +# CONFIG_USB_AMD5536UDC is not set +# CONFIG_USB_NET2272 is not set +# CONFIG_USB_NET2280 is not set +# CONFIG_USB_GOKU is not set +# CONFIG_USB_EG20T is not set +# CONFIG_USB_DUMMY_HCD is not set +CONFIG_USB_LIBCOMPOSITE=m +CONFIG_USB_U_AUDIO=m +CONFIG_USB_F_UAC2=m +CONFIG_USB_F_HID=m +# CONFIG_USB_CONFIGFS is not set +# CONFIG_USB_ZERO is not set +CONFIG_USB_AUDIO=m +# CONFIG_GADGET_UAC1 is not set +# CONFIG_USB_ETH is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_G_SERIAL is not set +# CONFIG_USB_MIDI_GADGET is not set +# CONFIG_USB_G_PRINTER is not set +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_ACM_MS is not set +# CONFIG_USB_G_MULTI is not set +CONFIG_USB_G_HID=m +# CONFIG_USB_G_DBGP is not set +# CONFIG_USB_G_WEBCAM is not set +CONFIG_TYPEC=m +CONFIG_TYPEC_TCPM=m +CONFIG_TYPEC_TCPCI=m +# CONFIG_TYPEC_RT1711H is not set +CONFIG_TYPEC_FUSB302=m +CONFIG_TYPEC_UCSI=m +CONFIG_UCSI_ACPI=m +CONFIG_TYPEC_TPS6598X=m + +# +# USB Type-C Multiplexer/DeMultiplexer Switch support +# +CONFIG_TYPEC_MUX_PI3USB30532=m + +# +# USB Type-C Alternate Mode drivers +# +# CONFIG_TYPEC_DP_ALTMODE is not set +CONFIG_USB_ROLES_INTEL_XHCI=m +# CONFIG_USB_LED_TRIG is not set +# CONFIG_USB_ULPI_BUS is not set +CONFIG_USB_ROLE_SWITCH=m +# CONFIG_UWB is not set +CONFIG_MMC=y +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=8 +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PCI=y +CONFIG_MMC_RICOH_MMC=y +CONFIG_MMC_SDHCI_ACPI=y +CONFIG_MMC_SDHCI_PLTFM=m +CONFIG_MMC_SDHCI_F_SDH30=m +# CONFIG_MMC_WBSD is not set +# CONFIG_MMC_TIFM_SD is not set +# CONFIG_MMC_SPI is not set +# CONFIG_MMC_CB710 is not set +# CONFIG_MMC_VIA_SDMMC is not set +# CONFIG_MMC_VUB300 is not set +# CONFIG_MMC_USHC is not set +# CONFIG_MMC_USDHI6ROL0 is not set +CONFIG_MMC_REALTEK_PCI=m +CONFIG_MMC_REALTEK_USB=m +CONFIG_MMC_CQHCI=y +# CONFIG_MMC_TOSHIBA_PCI is not set +# CONFIG_MMC_MTK is not set +CONFIG_MMC_SDHCI_XENON=m +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +# CONFIG_LEDS_CLASS_FLASH is not set +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_APU is not set +# CONFIG_LEDS_LM3530 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_LP3952 is not set +# CONFIG_LEDS_LP5521 is not set +# CONFIG_LEDS_LP5523 is not set +# CONFIG_LEDS_LP5562 is not set +# CONFIG_LEDS_LP8501 is not set +# CONFIG_LEDS_CLEVO_MAIL is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_INTEL_SS4200 is not set +# CONFIG_LEDS_LT3593 is not set +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +# CONFIG_LEDS_BLINKM is not set +# CONFIG_LEDS_MLXCPLD is not set +CONFIG_LEDS_MLXREG=m +# CONFIG_LEDS_USER is not set +# CONFIG_LEDS_NIC78BX is not set + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +# CONFIG_LEDS_TRIGGER_TIMER is not set +# CONFIG_LEDS_TRIGGER_ONESHOT is not set +# CONFIG_LEDS_TRIGGER_DISK is not set +# CONFIG_LEDS_TRIGGER_MTD is not set +# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_LEDS_TRIGGER_TRANSIENT is not set +# CONFIG_LEDS_TRIGGER_CAMERA is not set +# CONFIG_LEDS_TRIGGER_PANIC is not set +CONFIG_LEDS_TRIGGER_NETDEV=m +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +# CONFIG_INFINIBAND_EXP_LEGACY_VERBS_NEW_UAPI is not set +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +CONFIG_INFINIBAND_MTHCA=m +# CONFIG_INFINIBAND_MTHCA_DEBUG is not set +CONFIG_INFINIBAND_QIB=m +CONFIG_INFINIBAND_QIB_DCA=y +CONFIG_INFINIBAND_I40IW=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_INFINIBAND_NES=m +# CONFIG_INFINIBAND_NES_DEBUG is not set +CONFIG_INFINIBAND_OCRDMA=m +CONFIG_INFINIBAND_VMWARE_PVRDMA=m +CONFIG_INFINIBAND_USNIC=m +CONFIG_INFINIBAND_IPOIB=m +# CONFIG_INFINIBAND_IPOIB_CM is not set +# CONFIG_INFINIBAND_IPOIB_DEBUG is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_OPA_VNIC=m +CONFIG_INFINIBAND_RDMAVT=m +CONFIG_RDMA_RXE=m +CONFIG_INFINIBAND_HFI1=m +# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set +# CONFIG_SDMA_VERBOSITY is not set +# CONFIG_INFINIBAND_BNXT_RE is not set +CONFIG_EDAC_ATOMIC_SCRUB=y +CONFIG_EDAC_SUPPORT=y +CONFIG_RTC_LIB=y +CONFIG_RTC_MC146818_LIB=y +CONFIG_RTC_CLASS=y +# CONFIG_RTC_HCTOSYS is not set +# CONFIG_RTC_SYSTOHC is not set +# CONFIG_RTC_DEBUG is not set +# CONFIG_RTC_NVMEM is not set + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABX80X is not set +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8523 is not set +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8010 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set +# CONFIG_RTC_DRV_EM3027 is not set +# CONFIG_RTC_DRV_RV8803 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RX4581 is not set +# CONFIG_RTC_DRV_RX6110 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_PCF2127 is not set +# CONFIG_RTC_DRV_RV3029C2 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_CMOS=y +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_DS2404 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_FTRTC010 is not set + +# +# HID Sensor RTC drivers +# +CONFIG_RTC_DRV_HID_SENSOR_TIME=m +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_VIRTUAL_CHANNELS=y +CONFIG_DMA_ACPI=y +CONFIG_ALTERA_MSGDMA=m +# CONFIG_INTEL_IDMA64 is not set +CONFIG_INTEL_IOATDMA=y +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +CONFIG_DW_DMAC_CORE=y +# CONFIG_DW_DMAC is not set +CONFIG_DW_DMAC_PCI=y +CONFIG_HSU_DMA=y + +# +# DMA Clients +# +# CONFIG_ASYNC_TX_DMA is not set +# CONFIG_DMATEST is not set +CONFIG_DMA_ENGINE_RAID=y + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +CONFIG_HYPER_DMABUF=y +CONFIG_HYPER_DMABUF_SYSFS=y +# CONFIG_HYPER_DMABUF_EVENT_GEN is not set +CONFIG_DCA=y +# CONFIG_AUXDISPLAY is not set +CONFIG_UIO=m +# CONFIG_UIO_CIF is not set +# CONFIG_UIO_PDRV_GENIRQ is not set +# CONFIG_UIO_DMEM_GENIRQ is not set +# CONFIG_UIO_AEC is not set +# CONFIG_UIO_SERCOS3 is not set +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +CONFIG_VFIO_IOMMU_TYPE1=m +CONFIG_VFIO_VIRQFD=m +CONFIG_VFIO=m +# CONFIG_VFIO_NOIOMMU is not set +CONFIG_VFIO_PCI=m +# CONFIG_VFIO_PCI_VGA is not set +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PCI_IGD=y +# CONFIG_VFIO_MDEV is not set +CONFIG_IRQ_BYPASS_MANAGER=y +CONFIG_VIRT_DRIVERS=y +CONFIG_VBOXGUEST=y +CONFIG_VIRTIO=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_BALLOON=m +# CONFIG_VIRTIO_INPUT is not set +CONFIG_VIRTIO_MMIO=m +# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set +# CONFIG_ACRN_VIRTIO_DEVICES is not set + +# +# Microsoft Hyper-V guest support +# +# CONFIG_HYPERV is not set +CONFIG_STAGING=y +CONFIG_PRISM2_USB=m +# CONFIG_COMEDI is not set +CONFIG_RTL8192U=m +CONFIG_RTLLIB=m +CONFIG_RTLLIB_CRYPTO_CCMP=m +CONFIG_RTLLIB_CRYPTO_TKIP=m +CONFIG_RTLLIB_CRYPTO_WEP=m +CONFIG_RTL8192E=m +CONFIG_RTL8723BS=m +CONFIG_R8712U=m +CONFIG_R8188EU=m +# CONFIG_88EU_AP_MODE is not set +CONFIG_R8822BE=m +CONFIG_RTLWIFI_DEBUG_ST=y +CONFIG_RTS5208=m +CONFIG_VT6655=m +CONFIG_VT6656=m + +# +# IIO staging drivers +# + +# +# Accelerometers +# +# CONFIG_ADIS16203 is not set +# CONFIG_ADIS16240 is not set + +# +# Analog to digital converters +# +# CONFIG_AD7606 is not set +# CONFIG_AD7780 is not set +# CONFIG_AD7816 is not set +# CONFIG_AD7192 is not set +# CONFIG_AD7280 is not set + +# +# Analog digital bi-direction converters +# +# CONFIG_ADT7316 is not set + +# +# Capacitance to digital converters +# +# CONFIG_AD7150 is not set +# CONFIG_AD7152 is not set +# CONFIG_AD7746 is not set + +# +# Direct Digital Synthesis +# +# CONFIG_AD9832 is not set +# CONFIG_AD9834 is not set + +# +# Network Analyzer, Impedance Converters +# +# CONFIG_AD5933 is not set + +# +# Active energy metering IC +# +# CONFIG_ADE7854 is not set + +# +# Resolver to digital converters +# +# CONFIG_AD2S90 is not set +# CONFIG_AD2S1210 is not set +# CONFIG_FB_SM750 is not set +# CONFIG_FB_XGI is not set + +# +# Speakup console speech +# +# CONFIG_SPEAKUP is not set +# CONFIG_STAGING_MEDIA is not set + +# +# Android +# +# CONFIG_LTE_GDM724X is not set +# CONFIG_DGNC is not set +# CONFIG_GS_FPGABOOT is not set +# CONFIG_UNISYSSPAR is not set +# CONFIG_FB_TFT is not set +# CONFIG_WILC1000_SDIO is not set +# CONFIG_WILC1000_SPI is not set +# CONFIG_MOST is not set +# CONFIG_KS7010 is not set +# CONFIG_GREYBUS is not set +CONFIG_DRM_VBOXVIDEO=m +# CONFIG_PI433 is not set +CONFIG_MTK_MMC=m +# CONFIG_MTK_AEE_KDUMP is not set +# CONFIG_MTK_MMC_CD_POLL is not set + +# +# Gasket devices +# +# CONFIG_STAGING_GASKET_FRAMEWORK is not set +# CONFIG_XIL_AXIS_FIFO is not set +# CONFIG_EROFS_FS is not set +CONFIG_IGB_AVB=m +CONFIG_X86_PLATFORM_DEVICES=y +CONFIG_ACER_WMI=m +CONFIG_ACER_WIRELESS=m +CONFIG_ACERHDF=m +CONFIG_ALIENWARE_WMI=m +CONFIG_ASUS_LAPTOP=m +CONFIG_DELL_SMBIOS=m +# CONFIG_DELL_SMBIOS_WMI is not set +# CONFIG_DELL_SMBIOS_SMM is not set +CONFIG_DELL_LAPTOP=m +CONFIG_DELL_WMI=m +CONFIG_DELL_WMI_DESCRIPTOR=m +CONFIG_DELL_WMI_AIO=m +CONFIG_DELL_WMI_LED=m +CONFIG_DELL_SMO8800=m +CONFIG_DELL_RBTN=m +CONFIG_FUJITSU_LAPTOP=m +# CONFIG_FUJITSU_TABLET is not set +# CONFIG_AMILO_RFKILL is not set +CONFIG_GPD_POCKET_FAN=m +# CONFIG_HP_ACCEL is not set +# CONFIG_HP_WIRELESS is not set +CONFIG_HP_WMI=m +CONFIG_MSI_LAPTOP=m +# CONFIG_PANASONIC_LAPTOP is not set +# CONFIG_COMPAL_LAPTOP is not set +# CONFIG_SONY_LAPTOP is not set +# CONFIG_IDEAPAD_LAPTOP is not set +CONFIG_SURFACE3_WMI=m +CONFIG_THINKPAD_ACPI=m +CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y +# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set +# CONFIG_THINKPAD_ACPI_DEBUG is not set +# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set +CONFIG_THINKPAD_ACPI_VIDEO=y +CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y +# CONFIG_SENSORS_HDAPS is not set +# CONFIG_INTEL_MENLOW is not set +# CONFIG_EEEPC_LAPTOP is not set +CONFIG_ASUS_WMI=m +CONFIG_ASUS_NB_WMI=m +# CONFIG_EEEPC_WMI is not set +# CONFIG_ASUS_WIRELESS is not set +CONFIG_ACPI_WMI=m +CONFIG_WMI_BMOF=m +CONFIG_INTEL_WMI_THUNDERBOLT=m +CONFIG_MSI_WMI=m +CONFIG_PEAQ_WMI=m +# CONFIG_TOPSTAR_LAPTOP is not set +CONFIG_ACPI_TOSHIBA=m +# CONFIG_TOSHIBA_BT_RFKILL is not set +# CONFIG_TOSHIBA_HAPS is not set +CONFIG_TOSHIBA_WMI=m +# CONFIG_ACPI_CMPC is not set +CONFIG_INTEL_INT0002_VGPIO=m +CONFIG_INTEL_HID_EVENT=m +# CONFIG_INTEL_VBTN is not set +CONFIG_INTEL_IPS=y +# CONFIG_INTEL_PMC_CORE is not set +# CONFIG_IBM_RTL is not set +# CONFIG_SAMSUNG_LAPTOP is not set +CONFIG_MXM_WMI=m +# CONFIG_INTEL_OAKTRAIL is not set +# CONFIG_SAMSUNG_Q10 is not set +CONFIG_APPLE_GMUX=m +CONFIG_INTEL_RST=m +CONFIG_INTEL_SMARTCONNECT=m +# CONFIG_PVPANIC is not set +CONFIG_INTEL_PMC_IPC=m +CONFIG_INTEL_BXTWC_PMIC_TMU=m +# CONFIG_SURFACE_PRO3_BUTTON is not set +# CONFIG_INTEL_PUNIT_IPC is not set +# CONFIG_MLX_PLATFORM is not set +CONFIG_INTEL_TURBO_MAX_3=y +# CONFIG_TOUCHSCREEN_DMI is not set +CONFIG_INTEL_CHTDC_TI_PWRBTN=m +# CONFIG_I2C_MULTI_INSTANTIATE is not set +CONFIG_PMC_ATOM=y +# CONFIG_CHROME_PLATFORMS is not set +CONFIG_MELLANOX_PLATFORM=y +CONFIG_MLXREG_HOTPLUG=m +# CONFIG_MLXREG_IO is not set +CONFIG_CLKDEV_LOOKUP=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Common Clock Framework +# +# CONFIG_COMMON_CLK_MAX9485 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +CONFIG_CLKEVT_I8253=y +CONFIG_CLKBLD_I8253=y +CONFIG_MAILBOX=y +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +# CONFIG_IOMMU_DEBUGFS is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_IOMMU_IOVA=y +# CONFIG_AMD_IOMMU is not set +CONFIG_DMAR_TABLE=y +CONFIG_INTEL_IOMMU=y +# CONFIG_INTEL_IOMMU_SVM is not set +CONFIG_INTEL_IOMMU_DEFAULT_ON=y +CONFIG_INTEL_IOMMU_FLOPPY_WA=y +CONFIG_IRQ_REMAP=y + +# +# Remoteproc drivers +# +CONFIG_REMOTEPROC=m + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# + +# +# Broadcom SoC drivers +# + +# +# NXP/Freescale QorIQ SoC drivers +# + +# +# i.MX SoC drivers +# + +# +# Qualcomm SoC drivers +# +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +CONFIG_XILINX_VCU=m +CONFIG_PM_DEVFREQ=y + +# +# DEVFREQ Governors +# +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=m +CONFIG_DEVFREQ_GOV_PERFORMANCE=y +# CONFIG_DEVFREQ_GOV_POWERSAVE is not set +# CONFIG_DEVFREQ_GOV_USERSPACE is not set +# CONFIG_DEVFREQ_GOV_PASSIVE is not set + +# +# DEVFREQ Drivers +# +# CONFIG_PM_DEVFREQ_EVENT is not set +CONFIG_EXTCON=y + +# +# Extcon Device Drivers +# +# CONFIG_EXTCON_ADC_JACK is not set +# CONFIG_EXTCON_GPIO is not set +CONFIG_EXTCON_INTEL_INT3496=m +# CONFIG_EXTCON_MAX3355 is not set +# CONFIG_EXTCON_RT8973A is not set +# CONFIG_EXTCON_SM5502 is not set +# CONFIG_EXTCON_USB_GPIO is not set +# CONFIG_MEMORY is not set +CONFIG_IIO=m +CONFIG_IIO_BUFFER=y +# CONFIG_IIO_BUFFER_CB is not set +CONFIG_IIO_BUFFER_HW_CONSUMER=m +CONFIG_IIO_KFIFO_BUF=m +CONFIG_IIO_TRIGGERED_BUFFER=m +# CONFIG_IIO_CONFIGFS is not set +CONFIG_IIO_TRIGGER=y +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 +# CONFIG_IIO_SW_DEVICE is not set +# CONFIG_IIO_SW_TRIGGER is not set + +# +# Accelerometers +# +# CONFIG_ADIS16201 is not set +# CONFIG_ADIS16209 is not set +# CONFIG_ADXL345_I2C is not set +# CONFIG_ADXL345_SPI is not set +# CONFIG_BMA180 is not set +# CONFIG_BMA220 is not set +# CONFIG_BMC150_ACCEL is not set +# CONFIG_DA280 is not set +# CONFIG_DA311 is not set +# CONFIG_DMARD09 is not set +# CONFIG_DMARD10 is not set +CONFIG_HID_SENSOR_ACCEL_3D=m +# CONFIG_IIO_CROS_EC_ACCEL_LEGACY is not set +# CONFIG_IIO_ST_ACCEL_3AXIS is not set +# CONFIG_KXSD9 is not set +# CONFIG_KXCJK1013 is not set +# CONFIG_MC3230 is not set +# CONFIG_MMA7455_I2C is not set +# CONFIG_MMA7455_SPI is not set +# CONFIG_MMA7660 is not set +# CONFIG_MMA8452 is not set +# CONFIG_MMA9551 is not set +# CONFIG_MMA9553 is not set +# CONFIG_MXC4005 is not set +# CONFIG_MXC6255 is not set +# CONFIG_SCA3000 is not set +# CONFIG_STK8312 is not set +# CONFIG_STK8BA50 is not set + +# +# Analog to digital converters +# +# CONFIG_AD7266 is not set +# CONFIG_AD7291 is not set +# CONFIG_AD7298 is not set +# CONFIG_AD7476 is not set +# CONFIG_AD7766 is not set +# CONFIG_AD7791 is not set +# CONFIG_AD7793 is not set +# CONFIG_AD7887 is not set +# CONFIG_AD7923 is not set +# CONFIG_AD799X is not set +# CONFIG_HI8435 is not set +# CONFIG_HX711 is not set +# CONFIG_INA2XX_ADC is not set +# CONFIG_LTC2471 is not set +# CONFIG_LTC2485 is not set +# CONFIG_LTC2497 is not set +# CONFIG_MAX1027 is not set +# CONFIG_MAX11100 is not set +# CONFIG_MAX1118 is not set +# CONFIG_MAX1363 is not set +# CONFIG_MAX9611 is not set +# CONFIG_MCP320X is not set +# CONFIG_MCP3422 is not set +# CONFIG_NAU7802 is not set +# CONFIG_TI_ADC081C is not set +# CONFIG_TI_ADC0832 is not set +# CONFIG_TI_ADC084S021 is not set +# CONFIG_TI_ADC12138 is not set +# CONFIG_TI_ADC108S102 is not set +# CONFIG_TI_ADC128S052 is not set +# CONFIG_TI_ADC161S626 is not set +# CONFIG_TI_ADS1015 is not set +# CONFIG_TI_ADS7950 is not set +# CONFIG_TI_TLC4541 is not set + +# +# Analog Front Ends +# + +# +# Amplifiers +# +# CONFIG_AD8366 is not set + +# +# Chemical Sensors +# +# CONFIG_ATLAS_PH_SENSOR is not set +# CONFIG_BME680 is not set +# CONFIG_CCS811 is not set +# CONFIG_IAQCORE is not set +# CONFIG_VZ89X is not set + +# +# Hid Sensor IIO Common +# +CONFIG_HID_SENSOR_IIO_COMMON=m +CONFIG_HID_SENSOR_IIO_TRIGGER=m + +# +# SSP Sensor Common +# +CONFIG_IIO_SSP_SENSORS_COMMONS=m +CONFIG_IIO_SSP_SENSORHUB=m +CONFIG_IIO_ST_SENSORS_I2C=m +CONFIG_IIO_ST_SENSORS_SPI=m +CONFIG_IIO_ST_SENSORS_CORE=m + +# +# Counters +# + +# +# Digital to analog converters +# +# CONFIG_AD5064 is not set +# CONFIG_AD5360 is not set +# CONFIG_AD5380 is not set +# CONFIG_AD5421 is not set +# CONFIG_AD5446 is not set +# CONFIG_AD5449 is not set +# CONFIG_AD5592R is not set +# CONFIG_AD5593R is not set +# CONFIG_AD5504 is not set +# CONFIG_AD5624R_SPI is not set +# CONFIG_LTC2632 is not set +# CONFIG_AD5686_SPI is not set +# CONFIG_AD5696_I2C is not set +# CONFIG_AD5755 is not set +# CONFIG_AD5758 is not set +# CONFIG_AD5761 is not set +# CONFIG_AD5764 is not set +# CONFIG_AD5791 is not set +# CONFIG_AD7303 is not set +# CONFIG_AD8801 is not set +# CONFIG_DS4424 is not set +# CONFIG_M62332 is not set +# CONFIG_MAX517 is not set +# CONFIG_MCP4725 is not set +# CONFIG_MCP4922 is not set +# CONFIG_TI_DAC082S085 is not set +# CONFIG_TI_DAC5571 is not set + +# +# IIO dummy driver +# + +# +# Frequency Synthesizers DDS/PLL +# + +# +# Clock Generator/Distribution +# +# CONFIG_AD9523 is not set + +# +# Phase-Locked Loop (PLL) frequency synthesizers +# +# CONFIG_ADF4350 is not set + +# +# Digital gyroscope sensors +# +# CONFIG_ADIS16080 is not set +# CONFIG_ADIS16130 is not set +# CONFIG_ADIS16136 is not set +# CONFIG_ADIS16260 is not set +# CONFIG_ADXRS450 is not set +# CONFIG_BMG160 is not set +CONFIG_HID_SENSOR_GYRO_3D=m +# CONFIG_MPU3050_I2C is not set +CONFIG_IIO_ST_GYRO_3AXIS=m +CONFIG_IIO_ST_GYRO_I2C_3AXIS=m +CONFIG_IIO_ST_GYRO_SPI_3AXIS=m +# CONFIG_ITG3200 is not set + +# +# Health Sensors +# + +# +# Heart Rate Monitors +# +# CONFIG_AFE4403 is not set +# CONFIG_AFE4404 is not set +# CONFIG_MAX30100 is not set +# CONFIG_MAX30102 is not set + +# +# Humidity sensors +# +# CONFIG_AM2315 is not set +# CONFIG_DHT11 is not set +# CONFIG_HDC100X is not set +# CONFIG_HID_SENSOR_HUMIDITY is not set +# CONFIG_HTS221 is not set +# CONFIG_HTU21 is not set +# CONFIG_SI7005 is not set +# CONFIG_SI7020 is not set + +# +# Inertial measurement units +# +# CONFIG_ADIS16400 is not set +# CONFIG_ADIS16480 is not set +# CONFIG_BMI160_I2C is not set +# CONFIG_BMI160_SPI is not set +# CONFIG_KMX61 is not set +# CONFIG_INV_MPU6050_I2C is not set +# CONFIG_INV_MPU6050_SPI is not set +# CONFIG_IIO_ST_LSM6DSX is not set + +# +# Light sensors +# +CONFIG_ACPI_ALS=m +# CONFIG_ADJD_S311 is not set +# CONFIG_AL3320A is not set +# CONFIG_APDS9300 is not set +# CONFIG_APDS9960 is not set +# CONFIG_BH1750 is not set +# CONFIG_BH1780 is not set +# CONFIG_CM32181 is not set +# CONFIG_CM3232 is not set +# CONFIG_CM3323 is not set +# CONFIG_CM36651 is not set +# CONFIG_GP2AP020A00F is not set +# CONFIG_SENSORS_ISL29018 is not set +CONFIG_SENSORS_ISL29028=m +# CONFIG_ISL29125 is not set +CONFIG_HID_SENSOR_ALS=m +CONFIG_HID_SENSOR_PROX=m +# CONFIG_JSA1212 is not set +# CONFIG_RPR0521 is not set +# CONFIG_LTR501 is not set +CONFIG_LV0104CS=m +# CONFIG_MAX44000 is not set +# CONFIG_OPT3001 is not set +# CONFIG_PA12203001 is not set +# CONFIG_SI1133 is not set +# CONFIG_SI1145 is not set +# CONFIG_STK3310 is not set +# CONFIG_ST_UVIS25 is not set +# CONFIG_TCS3414 is not set +# CONFIG_TCS3472 is not set +# CONFIG_SENSORS_TSL2563 is not set +# CONFIG_TSL2583 is not set +# CONFIG_TSL2772 is not set +# CONFIG_TSL4531 is not set +# CONFIG_US5182D is not set +# CONFIG_VCNL4000 is not set +# CONFIG_VEML6070 is not set +# CONFIG_VL6180 is not set +# CONFIG_ZOPT2201 is not set + +# +# Magnetometer sensors +# +CONFIG_AK8975=m +CONFIG_AK09911=m +# CONFIG_BMC150_MAGN_I2C is not set +# CONFIG_BMC150_MAGN_SPI is not set +# CONFIG_MAG3110 is not set +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +# CONFIG_MMC35240 is not set +# CONFIG_IIO_ST_MAGN_3AXIS is not set +# CONFIG_SENSORS_HMC5843_I2C is not set +# CONFIG_SENSORS_HMC5843_SPI is not set + +# +# Multiplexers +# + +# +# Inclinometer sensors +# +CONFIG_HID_SENSOR_INCLINOMETER_3D=m +CONFIG_HID_SENSOR_DEVICE_ROTATION=m + +# +# Triggers - standalone +# +# CONFIG_IIO_INTERRUPT_TRIGGER is not set +CONFIG_IIO_SYSFS_TRIGGER=m + +# +# Digital potentiometers +# +# CONFIG_AD5272 is not set +# CONFIG_DS1803 is not set +# CONFIG_MAX5481 is not set +# CONFIG_MAX5487 is not set +# CONFIG_MCP4018 is not set +# CONFIG_MCP4131 is not set +# CONFIG_MCP4531 is not set +# CONFIG_TPL0102 is not set + +# +# Digital potentiostats +# +# CONFIG_LMP91000 is not set + +# +# Pressure sensors +# +# CONFIG_ABP060MG is not set +# CONFIG_BMP280 is not set +CONFIG_HID_SENSOR_PRESS=m +# CONFIG_HP03 is not set +# CONFIG_MPL115_I2C is not set +# CONFIG_MPL115_SPI is not set +# CONFIG_MPL3115 is not set +# CONFIG_MS5611 is not set +# CONFIG_MS5637 is not set +# CONFIG_IIO_ST_PRESS is not set +# CONFIG_T5403 is not set +# CONFIG_HP206C is not set +# CONFIG_ZPA2326 is not set + +# +# Lightning sensors +# +# CONFIG_AS3935 is not set + +# +# Proximity and distance sensors +# +# CONFIG_ISL29501 is not set +# CONFIG_LIDAR_LITE_V2 is not set +# CONFIG_RFD77402 is not set +CONFIG_SRF04=m +# CONFIG_SX9500 is not set +# CONFIG_SRF08 is not set + +# +# Resolver to digital converters +# +# CONFIG_AD2S1200 is not set + +# +# Temperature sensors +# +# CONFIG_MAXIM_THERMOCOUPLE is not set +CONFIG_HID_SENSOR_TEMP=m +# CONFIG_MLX90614 is not set +# CONFIG_MLX90632 is not set +# CONFIG_TMP006 is not set +# CONFIG_TMP007 is not set +# CONFIG_TSYS01 is not set +# CONFIG_TSYS02D is not set +# CONFIG_NTB is not set +# CONFIG_VME_BUS is not set +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_CRC is not set +CONFIG_PWM_LPSS=m +CONFIG_PWM_LPSS_PCI=m +CONFIG_PWM_LPSS_PLATFORM=m +# CONFIG_PWM_PCA9685 is not set + +# +# IRQ chip support +# +CONFIG_ARM_GIC_MAX_NR=1 +# CONFIG_IPACK_BUS is not set +# CONFIG_RESET_CONTROLLER is not set +# CONFIG_FMC is not set + +# +# PHY Subsystem +# +CONFIG_GENERIC_PHY=y +# CONFIG_BCM_KONA_USB2_PHY is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +CONFIG_PHY_CPCAP_USB=m +CONFIG_POWERCAP=y +CONFIG_INTEL_RAPL=y +# CONFIG_IDLE_INJECT is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# CONFIG_RAS is not set +CONFIG_THUNDERBOLT=m + +# +# Android +# +# CONFIG_ANDROID is not set +CONFIG_LIBNVDIMM=m +CONFIG_BLK_DEV_PMEM=m +CONFIG_ND_BLK=m +CONFIG_ND_CLAIM=y +CONFIG_ND_BTT=m +CONFIG_BTT=y +CONFIG_ND_PFN=m +CONFIG_NVDIMM_PFN=y +CONFIG_NVDIMM_DAX=y +CONFIG_DAX_DRIVER=y +CONFIG_DAX=y +CONFIG_DEV_DAX=y +CONFIG_DEV_DAX_PMEM=m +CONFIG_NVMEM=y + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +CONFIG_FPGA=y +CONFIG_ALTERA_PR_IP_CORE=m +CONFIG_FPGA_MGR_ALTERA_PS_SPI=m +CONFIG_FPGA_MGR_ALTERA_CVP=m +CONFIG_FPGA_MGR_XILINX_SPI=m +# CONFIG_FPGA_MGR_MACHXO2_SPI is not set +CONFIG_FPGA_BRIDGE=m +CONFIG_XILINX_PR_DECOUPLER=m +CONFIG_FPGA_REGION=m +# CONFIG_FPGA_DFL is not set +CONFIG_PM_OPP=y +CONFIG_SDW=y +CONFIG_SDW_CNL=y +# CONFIG_SDW_MAXIM_SLAVE is not set +# CONFIG_UNISYS_VISORBUS is not set +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +CONFIG_FS_IOMAP=y +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_ENCRYPTION is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_XFS_FS=y +# CONFIG_XFS_QUOTA is not set +CONFIG_XFS_POSIX_ACL=y +CONFIG_XFS_RT=y +# CONFIG_XFS_ONLINE_SCRUB is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +CONFIG_BTRFS_FS=y +CONFIG_BTRFS_FS_POSIX_ACL=y +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +CONFIG_FS_DAX=y +CONFIG_FS_DAX_PMD=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +CONFIG_FS_ENCRYPTION=m +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set +# CONFIG_QUOTA is not set +CONFIG_AUTOFS4_FS=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +# CONFIG_CUSE is not set +CONFIG_OVERLAY_FS=y +CONFIG_OVERLAY_FS_REDIRECT_DIR=y +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +CONFIG_OVERLAY_FS_INDEX=y +# CONFIG_OVERLAY_FS_NFS_EXPORT is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set + +# +# Caches +# +CONFIG_FSCACHE=m +# CONFIG_FSCACHE_STATS is not set +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_HISTOGRAM is not set + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +# CONFIG_JOLIET is not set +# CONFIG_ZISOFS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +# CONFIG_MSDOS_FS is not set +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +# CONFIG_FAT_DEFAULT_UTF8 is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +# CONFIG_PROC_KCORE is not set +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_MEMFD_CREATE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_CRAMFS is not set +CONFIG_SQUASHFS=y +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set +# CONFIG_SQUASHFS_XATTR is not set +CONFIG_SQUASHFS_ZLIB=y +# CONFIG_SQUASHFS_LZ4 is not set +# CONFIG_SQUASHFS_LZO is not set +# CONFIG_SQUASHFS_XZ is not set +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFLATE_COMPRESS=m +# CONFIG_PSTORE_LZO_COMPRESS is not set +# CONFIG_PSTORE_LZ4_COMPRESS is not set +CONFIG_PSTORE_LZ4HC_COMPRESS=m +# CONFIG_PSTORE_842_COMPRESS is not set +# CONFIG_PSTORE_ZSTD_COMPRESS is not set +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT=y +# CONFIG_PSTORE_LZ4HC_COMPRESS_DEFAULT is not set +CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +CONFIG_PSTORE_RAM=m +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +CONFIG_NFS_V2=m +CONFIG_NFS_V3=m +# CONFIG_NFS_V3_ACL is not set +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="clearlinux.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +# CONFIG_NFS_FSCACHE is not set +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFSD=m +CONFIG_NFSD_V3=y +# CONFIG_NFSD_V3_ACL is not set +CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y +CONFIG_NFSD_BLOCKLAYOUT=y +CONFIG_NFSD_SCSILAYOUT=y +# CONFIG_NFSD_FLEXFILELAYOUT is not set +# CONFIG_NFSD_V4_SECURITY_LABEL is not set +# CONFIG_NFSD_FAULT_INJECTION is not set +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_SUNRPC_DEBUG is not set +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +CONFIG_CEPH_FSCACHE=y +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_WEAK_PW_HASH=y +# CONFIG_CIFS_UPCALL is not set +# CONFIG_CIFS_XATTR is not set +# CONFIG_CIFS_DEBUG is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SMB_DIRECT is not set +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_9P_FS=m +# CONFIG_9P_FSCACHE is not set +CONFIG_9P_FS_POSIX_ACL=y +# CONFIG_9P_FS_SECURITY is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +# CONFIG_DLM is not set + +# +# Security options +# +CONFIG_KEYS=y +CONFIG_KEYS_COMPAT=y +# CONFIG_PERSISTENT_KEYRINGS is not set +# CONFIG_BIG_KEYS is not set +CONFIG_ENCRYPTED_KEYS=m +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +# CONFIG_SECURITY_STACKING is not set +# CONFIG_SECURITY_LSM_DEBUG is not set +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_PAGE_TABLE_ISOLATION=y +# CONFIG_SECURITY_INFINIBAND is not set +# CONFIG_SECURITY_NETWORK_XFRM is not set +CONFIG_SECURITY_PATH=y +# CONFIG_INTEL_TXT is not set +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +CONFIG_HARDENED_USERCOPY=y +# CONFIG_HARDENED_USERCOPY_FALLBACK is not set +# CONFIG_HARDENED_USERCOPY_PAGESPAN is not set +CONFIG_FORTIFY_SOURCE=y +# CONFIG_STATIC_USERMODEHELPER is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +# CONFIG_SECURITY_YAMA is not set +# CONFIG_INTEGRITY is not set + +# +# Security Module Selection +# +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_XOR_BLOCKS=y +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +# CONFIG_CRYPTO_FIPS is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=m +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=m +CONFIG_CRYPTO_ECDH=m +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_USER is not set +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_GF128MUL=y +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_MCRYPTD=m +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SIMD=y +CONFIG_CRYPTO_GLUE_HELPER_X86=y +CONFIG_CRYPTO_ENGINE=m + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=y +CONFIG_CRYPTO_GCM=m +# CONFIG_CRYPTO_CHACHA20POLY1305 is not set +# CONFIG_CRYPTO_AEGIS128 is not set +# CONFIG_CRYPTO_AEGIS128L is not set +# CONFIG_CRYPTO_AEGIS256 is not set +# CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 is not set +# CONFIG_CRYPTO_AEGIS128L_AESNI_SSE2 is not set +# CONFIG_CRYPTO_AEGIS256_AESNI_SSE2 is not set +# CONFIG_CRYPTO_MORUS640 is not set +# CONFIG_CRYPTO_MORUS640_SSE2 is not set +# CONFIG_CRYPTO_MORUS1280 is not set +# CONFIG_CRYPTO_MORUS1280_SSE2 is not set +# CONFIG_CRYPTO_MORUS1280_AVX2 is not set +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=y + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CFB=m +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=y +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=y +CONFIG_CRYPTO_KEYWRAP=m + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_VMAC=m + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32C_INTEL=y +CONFIG_CRYPTO_CRC32=m +# CONFIG_CRYPTO_CRC32_PCLMUL is not set +CONFIG_CRYPTO_CRCT10DIF=y +# CONFIG_CRYPTO_CRCT10DIF_PCLMUL is not set +CONFIG_CRYPTO_GHASH=m +# CONFIG_CRYPTO_POLY1305 is not set +# CONFIG_CRYPTO_POLY1305_X86_64 is not set +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +# CONFIG_CRYPTO_SHA1_SSSE3 is not set +CONFIG_CRYPTO_SHA256_SSSE3=y +CONFIG_CRYPTO_SHA512_SSSE3=y +CONFIG_CRYPTO_SHA1_MB=m +# CONFIG_CRYPTO_SHA256_MB is not set +# CONFIG_CRYPTO_SHA512_MB is not set +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +# CONFIG_CRYPTO_SHA3 is not set +# CONFIG_CRYPTO_SM3 is not set +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +# CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_AES_TI=y +CONFIG_CRYPTO_AES_X86_64=y +CONFIG_CRYPTO_AES_NI_INTEL=y +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_ARC4=y +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_BLOWFISH_X86_64=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAMELLIA_X86_64=y +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=y +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=y +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST5_AVX_X86_64=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_CAST6_AVX_X86_64=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_DES3_EDE_X86_64=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_CHACHA20=m +# CONFIG_CRYPTO_CHACHA20_X86_64 is not set +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=y +# CONFIG_CRYPTO_SERPENT_SSE2_X86_64 is not set +CONFIG_CRYPTO_SERPENT_AVX_X86_64=y +CONFIG_CRYPTO_SERPENT_AVX2_X86_64=y +# CONFIG_CRYPTO_SM4 is not set +# CONFIG_CRYPTO_SPECK is not set +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m +# CONFIG_CRYPTO_TWOFISH_X86_64 is not set +# CONFIG_CRYPTO_TWOFISH_X86_64_3WAY is not set +# CONFIG_CRYPTO_TWOFISH_AVX_X86_64 is not set + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=m +# CONFIG_CRYPTO_842 is not set +# CONFIG_CRYPTO_LZ4 is not set +CONFIG_CRYPTO_LZ4HC=m +# CONFIG_CRYPTO_ZSTD is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +# CONFIG_CRYPTO_DRBG_HASH is not set +# CONFIG_CRYPTO_DRBG_CTR is not set +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +# CONFIG_CRYPTO_USER_API_RNG is not set +# CONFIG_CRYPTO_USER_API_AEAD is not set +CONFIG_CRYPTO_HASH_INFO=y +CONFIG_CRYPTO_HW=y +# CONFIG_CRYPTO_DEV_PADLOCK is not set +# CONFIG_CRYPTO_DEV_CCP is not set +CONFIG_CRYPTO_DEV_QAT=m +CONFIG_CRYPTO_DEV_QAT_DH895xCC=m +# CONFIG_CRYPTO_DEV_QAT_C3XXX is not set +# CONFIG_CRYPTO_DEV_QAT_C62X is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set +# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set +# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set +CONFIG_CRYPTO_DEV_VIRTIO=m +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set +# CONFIG_SIGNED_PE_FILE_VERIFICATION is not set + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set +# CONFIG_SECONDARY_TRUSTED_KEYRING is not set +# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=y +CONFIG_BITREVERSE=y +CONFIG_RATIONAL=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_GENERIC_FIND_FIRST_BIT=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=m +CONFIG_CRC4=m +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +CONFIG_CRC8=y +CONFIG_XXHASH=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMPRESS=y +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +# CONFIG_XZ_DEC_POWERPC is not set +# CONFIG_XZ_DEC_IA64 is not set +# CONFIG_XZ_DEC_ARM is not set +# CONFIG_XZ_DEC_ARMTHUMB is not set +# CONFIG_XZ_DEC_SPARC is not set +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=m +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_INTERVAL_TREE=y +CONFIG_RADIX_TREE_MULTIORDER=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_DMA_DIRECT_OPS=y +CONFIG_DMA_VIRT_OPS=y +CONFIG_SWIOTLB=y +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_CLZ_TAB=y +CONFIG_CORDIC=y +# CONFIG_DDR is not set +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_FONT_SUPPORT=y +CONFIG_FONTS=y +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +# CONFIG_FONT_6x11 is not set +CONFIG_FONT_7x14=y +# CONFIG_FONT_PEARL_8x8 is not set +# CONFIG_FONT_ACORN_8x8 is not set +# CONFIG_FONT_MINI_4x6 is not set +# CONFIG_FONT_6x10 is not set +CONFIG_FONT_10x18=y +# CONFIG_FONT_SUN8x16 is not set +# CONFIG_FONT_SUN12x22 is not set +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_SG_CHAIN=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_HAS_UACCESS_MCSAFE=y +CONFIG_SBITMAP=y +# CONFIG_STRING_SELFTEST is not set + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y + +# +# Compile-time checks and compiler options +# +# CONFIG_DEBUG_INFO is not set +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_READABLE_ASM is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_PAGE_OWNER is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_SECTION_MISMATCH=y +# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set +CONFIG_STACK_VALIDATION=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +# CONFIG_MAGIC_SYSRQ_SERIAL is not set +CONFIG_DEBUG_KERNEL=y + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_DEBUG_SLAB is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_VM is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_DEBUG_STACKOVERFLOW=y +# CONFIG_DEBUG_STACKOVERFLOW is not set +CONFIG_HAVE_ARCH_KASAN=y +# CONFIG_KASAN is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +CONFIG_DEBUG_SHIRQ=y + +# +# Debug Lockups and Hangs +# +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y +CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0 +# CONFIG_DETECT_HUNG_TASK is not set +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_PANIC_ON_OOPS is not set +CONFIG_PANIC_ON_OOPS_VALUE=0 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_STACK_END_CHECK=y +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PI_LIST is not set +CONFIG_DEBUG_SG=y +CONFIG_DEBUG_NOTIFIERS=y +CONFIG_DEBUG_CREDENTIALS=y + +# +# RCU Debugging +# +# CONFIG_RCU_PERF_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +# CONFIG_FAULT_INJECTION is not set +CONFIG_LATENCYTOP=y +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_FENTRY=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_FUNCTION_TRACER is not set +# CONFIG_PREEMPTIRQ_EVENTS is not set +# CONFIG_IRQSOFF_TRACER is not set +CONFIG_SCHED_TRACER=y +# CONFIG_HWLAT_TRACER is not set +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_STACK_TRACER is not set +CONFIG_BLK_DEV_IO_TRACE=y +# CONFIG_UPROBE_EVENTS is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_MMIOTRACE is not set +# CONFIG_HIST_TRIGGERS is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_TRACING_EVENTS_GPIO is not set +# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set +# CONFIG_DMA_API_DEBUG is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_SORT is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_ASYNC_RAID6_TEST is not set +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_BITFIELD is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_OVERFLOW is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +# CONFIG_MEMTEST is not set +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_UBSAN is not set +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_X86_VERBOSE_BOOTUP=y +CONFIG_EARLY_PRINTK=y +# CONFIG_EARLY_PRINTK_DBGP is not set +CONFIG_EARLY_PRINTK_EFI=y +# CONFIG_EARLY_PRINTK_USB_XDBC is not set +# CONFIG_X86_PTDUMP is not set +# CONFIG_EFI_PGT_DUMP is not set +# CONFIG_DEBUG_WX is not set +CONFIG_DOUBLEFAULT=y +# CONFIG_DEBUG_TLBFLUSH is not set +CONFIG_HAVE_MMIOTRACE_SUPPORT=y +CONFIG_IO_DELAY_TYPE_0X80=0 +CONFIG_IO_DELAY_TYPE_0XED=1 +CONFIG_IO_DELAY_TYPE_UDELAY=2 +CONFIG_IO_DELAY_TYPE_NONE=3 +CONFIG_IO_DELAY_0X80=y +# CONFIG_IO_DELAY_0XED is not set +# CONFIG_IO_DELAY_UDELAY is not set +# CONFIG_IO_DELAY_NONE is not set +CONFIG_DEFAULT_IO_DELAY_TYPE=0 +CONFIG_DEBUG_BOOT_PARAMS=y +# CONFIG_CPA_DEBUG is not set +# CONFIG_OPTIMIZE_INLINING is not set +# CONFIG_DEBUG_ENTRY is not set +# CONFIG_DEBUG_NMI_SELFTEST is not set +# CONFIG_X86_DEBUG_FPU is not set +# CONFIG_PUNIT_ATOM_DEBUG is not set +CONFIG_UNWINDER_ORC=y +# CONFIG_UNWINDER_FRAME_POINTER is not set +# CONFIG_UNWINDER_GUESS is not set diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index acbe7e8336d85..e4b78f9628749 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -817,7 +817,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, /* Linearize assoc, if not already linear */ if (req->src->length >= assoclen && req->src->length && (!PageHighMem(sg_page(req->src)) || - req->src->offset + req->src->length < PAGE_SIZE)) { + req->src->offset + req->src->length <= PAGE_SIZE)) { scatterwalk_start(&assoc_sg_walk, req->src); assoc = scatterwalk_map(&assoc_sg_walk); } else { diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index f95dcb209fdff..cdff14eafcd04 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -1147,6 +1147,11 @@ apicinterrupt3 HYPERV_STIMER0_VECTOR \ hv_stimer0_callback_vector hv_stimer0_vector_handler #endif /* CONFIG_HYPERV */ +#if IS_ENABLED(CONFIG_ACRN) +apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ + acrn_hv_callback_vector acrn_hv_vector_handler +#endif + idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK idtentry int3 do_int3 has_error_code=0 idtentry stack_segment do_stack_segment has_error_code=1 diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index 7d0df78db7272..40d2834a8101e 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -356,7 +356,8 @@ ENTRY(entry_INT80_compat) /* Need to switch before accessing the thread stack. */ SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi - movq %rsp, %rdi + /* In the Xen PV case we already run on the thread stack. */ + ALTERNATIVE "movq %rsp, %rdi", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp pushq 6*8(%rdi) /* regs->ss */ @@ -365,8 +366,9 @@ ENTRY(entry_INT80_compat) pushq 3*8(%rdi) /* regs->cs */ pushq 2*8(%rdi) /* regs->ip */ pushq 1*8(%rdi) /* regs->orig_ax */ - pushq (%rdi) /* pt_regs->di */ +.Lint80_keep_stack: + pushq %rsi /* pt_regs->si */ xorl %esi, %esi /* nospec si */ pushq %rdx /* pt_regs->dx */ diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index 141d415a8c809..c3d7ccd25381b 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile @@ -171,7 +171,8 @@ quiet_cmd_vdso = VDSO $@ sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@' VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \ - $(call ld-option, --build-id) -Bsymbolic + $(call ld-option, --build-id) $(call ld-option, --eh-frame-hdr) \ + -Bsymbolic GCOV_PROFILE := n # diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index dfb2f7c0d0192..c8d08da5b308f 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -438,26 +438,6 @@ int x86_setup_perfctr(struct perf_event *event) if (config == -1LL) return -EINVAL; - /* - * Branch tracing: - */ - if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS && - !attr->freq && hwc->sample_period == 1) { - /* BTS is not supported by this architecture. */ - if (!x86_pmu.bts_active) - return -EOPNOTSUPP; - - /* BTS is currently only allowed for user-mode. */ - if (!attr->exclude_kernel) - return -EOPNOTSUPP; - - /* disallow bts if conflicting events are present */ - if (x86_add_exclusive(x86_lbr_exclusive_lbr)) - return -EBUSY; - - event->destroy = hw_perf_lbr_event_destroy; - } - hwc->config |= config; return 0; diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 035c37481f572..155fa4b53c56b 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2358,16 +2358,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) static struct event_constraint * intel_bts_constraints(struct perf_event *event) { - struct hw_perf_event *hwc = &event->hw; - unsigned int hw_event, bts_event; - - if (event->attr.freq) - return NULL; - - hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; - bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); - - if (unlikely(hw_event == bts_event && hwc->sample_period == 1)) + if (unlikely(intel_pmu_has_bts(event))) return &bts_constraint; return NULL; @@ -2986,10 +2977,51 @@ static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event) return flags; } +static int intel_pmu_bts_config(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + + if (unlikely(intel_pmu_has_bts(event))) { + /* BTS is not supported by this architecture. */ + if (!x86_pmu.bts_active) + return -EOPNOTSUPP; + + /* BTS is currently only allowed for user-mode. */ + if (!attr->exclude_kernel) + return -EOPNOTSUPP; + + /* BTS is not allowed for precise events. */ + if (attr->precise_ip) + return -EOPNOTSUPP; + + /* disallow bts if conflicting events are present */ + if (x86_add_exclusive(x86_lbr_exclusive_lbr)) + return -EBUSY; + + event->destroy = hw_perf_lbr_event_destroy; + } + + return 0; +} + +static int core_pmu_hw_config(struct perf_event *event) +{ + int ret = x86_pmu_hw_config(event); + + if (ret) + return ret; + + return intel_pmu_bts_config(event); +} + static int intel_pmu_hw_config(struct perf_event *event) { int ret = x86_pmu_hw_config(event); + if (ret) + return ret; + + ret = intel_pmu_bts_config(event); if (ret) return ret; @@ -3015,7 +3047,7 @@ static int intel_pmu_hw_config(struct perf_event *event) /* * BTS is set up earlier in this path, so don't account twice */ - if (!intel_pmu_has_bts(event)) { + if (!unlikely(intel_pmu_has_bts(event))) { /* disallow lbr if conflicting events are present */ if (x86_add_exclusive(x86_lbr_exclusive_lbr)) return -EBUSY; @@ -3478,7 +3510,7 @@ static __initconst const struct x86_pmu core_pmu = { .enable_all = core_pmu_enable_all, .enable = core_pmu_enable_event, .disable = x86_pmu_disable_event, - .hw_config = x86_pmu_hw_config, + .hw_config = core_pmu_hw_config, .schedule_events = x86_schedule_events, .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, .perfctr = MSR_ARCH_PERFMON_PERFCTR0, diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index 8527c3e1038b7..bfa25814fe5f2 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c @@ -15,6 +15,25 @@ #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910 #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f +#define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c +#define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904 +#define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914 +#define PCI_DEVICE_ID_INTEL_KBL_SD_IMC 0x590f +#define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC 0x591f +#define PCI_DEVICE_ID_INTEL_CFL_2U_IMC 0x3ecc +#define PCI_DEVICE_ID_INTEL_CFL_4U_IMC 0x3ed0 +#define PCI_DEVICE_ID_INTEL_CFL_4H_IMC 0x3e10 +#define PCI_DEVICE_ID_INTEL_CFL_6H_IMC 0x3ec4 +#define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC 0x3e0f +#define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC 0x3e1f +#define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC 0x3ec2 +#define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC 0x3e30 +#define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC 0x3e18 +#define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC 0x3ec6 +#define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC 0x3e31 +#define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC 0x3e33 +#define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC 0x3eca +#define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC 0x3e32 /* SNB event control */ #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff @@ -569,7 +588,82 @@ static const struct pci_device_id skl_uncore_pci_ids[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC), .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), }, - + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_U_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_UQ_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SD_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SQ_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2U_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4U_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4H_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6H_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, { /* end: all zeroes */ }, }; @@ -618,6 +712,25 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = { IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */ IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */ IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */ + IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver), /* 7th Gen Core Y */ + IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U */ + IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */ + IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Dual Core */ + IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Quad Core */ + IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 2 Cores */ + IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 4 Cores */ + IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 4 Cores */ + IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 6 Cores */ + IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 2 Cores Desktop */ + IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Desktop */ + IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Desktop */ + IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Desktop */ + IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Work Station */ + IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Work Station */ + IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Work Station */ + IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Server */ + IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Server */ + IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Server */ { /* end marker */ } }; diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 156286335351a..c5ad9cc61f4bd 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -857,11 +857,16 @@ static inline int amd_pmu_init(void) static inline bool intel_pmu_has_bts(struct perf_event *event) { - if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS && - !event->attr.freq && event->hw.sample_period == 1) - return true; + struct hw_perf_event *hwc = &event->hw; + unsigned int hw_event, bts_event; + + if (event->attr.freq) + return false; + + hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; + bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); - return false; + return hw_event == bts_event && hwc->sample_period == 1; } int intel_pmu_save_and_restart(struct perf_event *event); diff --git a/arch/x86/include/asm/acrnhyper.h b/arch/x86/include/asm/acrnhyper.h new file mode 100644 index 0000000000000..1cfdb24de6961 --- /dev/null +++ b/arch/x86/include/asm/acrnhyper.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_ACRNHYPER_H +#define _ASM_X86_ACRNHYPER_H + +#include +#include +#include +#include + +#ifdef CONFIG_ACRN +/* ACRN Hypervisor callback */ +void acrn_hv_callback_vector(void); + +void acrn_setup_intr_irq(void (*handler)(void)); +void acrn_remove_intr_irq(void); +#endif + +#endif diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index d9069bb26c7fa..5aa52d2c1dcd9 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h @@ -37,7 +37,7 @@ typedef struct { #ifdef CONFIG_X86_MCE_AMD unsigned int irq_deferred_error_count; #endif -#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN) +#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN) || defined(CONFIG_ACRN) unsigned int irq_hv_callback_count; #endif #if IS_ENABLED(CONFIG_HYPERV) diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h index 8c5aaba6633f2..50a30f6c668b6 100644 --- a/arch/x86/include/asm/hypervisor.h +++ b/arch/x86/include/asm/hypervisor.h @@ -29,6 +29,7 @@ enum x86_hypervisor_type { X86_HYPER_XEN_HVM, X86_HYPER_KVM, X86_HYPER_JAILHOUSE, + X86_HYPER_ACRN, }; #ifdef CONFIG_HYPERVISOR_GUEST diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 09b2e3e2cf1be..728dc661ebb69 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -177,6 +177,7 @@ enum { #define DR6_BD (1 << 13) #define DR6_BS (1 << 14) +#define DR6_BT (1 << 15) #define DR6_RTM (1 << 16) #define DR6_FIXED_1 0xfffe0ff0 #define DR6_INIT 0xffff0ff0 @@ -1045,7 +1046,8 @@ struct kvm_x86_ops { bool (*has_wbinvd_exit)(void); u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu); - void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); + /* Returns actual tsc_offset set in active VMCS */ + u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); @@ -1439,7 +1441,7 @@ asmlinkage void kvm_spurious_fault(void); "cmpb $0, kvm_rebooting \n\t" \ "jne 668b \n\t" \ __ASM_SIZE(push) " $666b \n\t" \ - "call kvm_spurious_fault \n\t" \ + "jmp kvm_spurious_fault \n\t" \ ".popsection \n\t" \ _ASM_EXTABLE(666b, 667b) diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 3a17107594c88..eb786f90f2d31 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -216,6 +216,8 @@ static inline int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *s int mce_available(struct cpuinfo_x86 *c); bool mce_is_memory_error(struct mce *m); +bool mce_is_correctable(struct mce *m); +int mce_usable_address(struct mce *m); DECLARE_PER_CPU(unsigned, mce_exception_count); DECLARE_PER_CPU(unsigned, mce_poll_count); diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index eeeb9289c764d..2252b63d38b50 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -178,6 +178,10 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next) void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); +/* + * Init a new mm. Used on mm copies, like at fork() + * and on mm's that are brand-new, like at execve(). + */ static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { @@ -228,8 +232,22 @@ do { \ } while (0) #endif +static inline void arch_dup_pkeys(struct mm_struct *oldmm, + struct mm_struct *mm) +{ +#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS + if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) + return; + + /* Duplicate the oldmm pkey state in mm: */ + mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map; + mm->context.execute_only_pkey = oldmm->context.execute_only_pkey; +#endif +} + static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) { + arch_dup_pkeys(oldmm, mm); paravirt_arch_dup_mmap(oldmm, mm); return ldt_dup_context(oldmm, mm); } diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 4731f0cf97c5c..1f9de7635bcbe 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -41,9 +41,10 @@ #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */ #define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */ -#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */ +#define SPEC_CTRL_STIBP_SHIFT 1 /* Single Thread Indirect Branch Predictor (STIBP) bit */ +#define SPEC_CTRL_STIBP (1 << SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */ #define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */ -#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */ +#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */ #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ #define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */ @@ -388,6 +389,7 @@ #define MSR_F15H_NB_PERF_CTR 0xc0010241 #define MSR_F15H_PTSC 0xc0010280 #define MSR_F15H_IC_CFG 0xc0011021 +#define MSR_F15H_EX_CFG 0xc001102c /* Fam 10h MSRs */ #define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058 diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index fd2a8c1b88bc1..032b6009baab4 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -3,6 +3,8 @@ #ifndef _ASM_X86_NOSPEC_BRANCH_H_ #define _ASM_X86_NOSPEC_BRANCH_H_ +#include + #include #include #include @@ -162,29 +164,35 @@ _ASM_PTR " 999b\n\t" \ ".popsection\n\t" -#if defined(CONFIG_X86_64) && defined(RETPOLINE) +#ifdef CONFIG_RETPOLINE +#ifdef CONFIG_X86_64 /* - * Since the inline asm uses the %V modifier which is only in newer GCC, - * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE. + * Inline asm uses the %V modifier which is only in newer GCC + * which is ensured when CONFIG_RETPOLINE is defined. */ # define CALL_NOSPEC \ ANNOTATE_NOSPEC_ALTERNATIVE \ - ALTERNATIVE( \ + ALTERNATIVE_2( \ ANNOTATE_RETPOLINE_SAFE \ "call *%[thunk_target]\n", \ "call __x86_indirect_thunk_%V[thunk_target]\n", \ - X86_FEATURE_RETPOLINE) + X86_FEATURE_RETPOLINE, \ + "lfence;\n" \ + ANNOTATE_RETPOLINE_SAFE \ + "call *%[thunk_target]\n", \ + X86_FEATURE_RETPOLINE_AMD) # define THUNK_TARGET(addr) [thunk_target] "r" (addr) -#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE) +#else /* CONFIG_X86_32 */ /* * For i386 we use the original ret-equivalent retpoline, because * otherwise we'll run out of registers. We don't care about CET * here, anyway. */ # define CALL_NOSPEC \ - ALTERNATIVE( \ + ANNOTATE_NOSPEC_ALTERNATIVE \ + ALTERNATIVE_2( \ ANNOTATE_RETPOLINE_SAFE \ "call *%[thunk_target]\n", \ " jmp 904f;\n" \ @@ -199,9 +207,14 @@ " ret;\n" \ " .align 16\n" \ "904: call 901b;\n", \ - X86_FEATURE_RETPOLINE) + X86_FEATURE_RETPOLINE, \ + "lfence;\n" \ + ANNOTATE_RETPOLINE_SAFE \ + "call *%[thunk_target]\n", \ + X86_FEATURE_RETPOLINE_AMD) # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) +#endif #else /* No retpoline for C / inline asm */ # define CALL_NOSPEC "call *%[thunk_target]\n" # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) @@ -210,13 +223,19 @@ /* The Spectre V2 mitigation variants */ enum spectre_v2_mitigation { SPECTRE_V2_NONE, - SPECTRE_V2_RETPOLINE_MINIMAL, - SPECTRE_V2_RETPOLINE_MINIMAL_AMD, SPECTRE_V2_RETPOLINE_GENERIC, SPECTRE_V2_RETPOLINE_AMD, SPECTRE_V2_IBRS_ENHANCED, }; +/* The indirect branch speculation control variants */ +enum spectre_v2_user_mitigation { + SPECTRE_V2_USER_NONE, + SPECTRE_V2_USER_STRICT, + SPECTRE_V2_USER_PRCTL, + SPECTRE_V2_USER_SECCOMP, +}; + /* The Speculative Store Bypass disable variants */ enum ssb_mitigation { SPEC_STORE_BYPASS_NONE, @@ -294,6 +313,10 @@ do { \ preempt_enable(); \ } while (0) +DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp); +DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); +DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb); + #endif /* __ASSEMBLY__ */ /* diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 6afac386a434e..b99d497e342d0 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -33,12 +33,14 @@ /* * Set __PAGE_OFFSET to the most negative possible address + - * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a - * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's - * what Xen requires. + * PGDIR_SIZE*17 (pgd slot 273). + * + * The gap is to allow a space for LDT remap for PTI (1 pgd slot) and space for + * a hypervisor (16 slots). Choosing 16 slots for a hypervisor is arbitrary, + * but it's what Xen requires. */ -#define __PAGE_OFFSET_BASE_L5 _AC(0xff10000000000000, UL) -#define __PAGE_OFFSET_BASE_L4 _AC(0xffff880000000000, UL) +#define __PAGE_OFFSET_BASE_L5 _AC(0xff11000000000000, UL) +#define __PAGE_OFFSET_BASE_L4 _AC(0xffff888000000000, UL) #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT #define __PAGE_OFFSET page_offset_base diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 04edd2d58211a..88bca456da994 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -111,9 +111,12 @@ extern unsigned int ptrs_per_p4d; */ #define MAXMEM (1UL << MAX_PHYSMEM_BITS) -#define LDT_PGD_ENTRY_L4 -3UL -#define LDT_PGD_ENTRY_L5 -112UL -#define LDT_PGD_ENTRY (pgtable_l5_enabled() ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4) +#define GUARD_HOLE_PGD_ENTRY -256UL +#define GUARD_HOLE_SIZE (16UL << PGDIR_SHIFT) +#define GUARD_HOLE_BASE_ADDR (GUARD_HOLE_PGD_ENTRY << PGDIR_SHIFT) +#define GUARD_HOLE_END_ADDR (GUARD_HOLE_BASE_ADDR + GUARD_HOLE_SIZE) + +#define LDT_PGD_ENTRY -240UL #define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) #define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE) diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h index 3e70bed8a978e..055c60a057567 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -6,9 +6,30 @@ #include #include #include +#include #define _Q_PENDING_LOOPS (1 << 9) +#define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire + +static __always_inline bool __queued_RMW_btsl(struct qspinlock *lock) +{ + GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, + "I", _Q_PENDING_OFFSET, "%0", c); +} + +static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock) +{ + u32 val = 0; + + if (__queued_RMW_btsl(lock)) + val |= _Q_PENDING_VAL; + + val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK; + + return val; +} + #ifdef CONFIG_PARAVIRT_SPINLOCKS extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); extern void __pv_init_lock_hash(void); diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h index ae7c2c5cd7f0e..5393babc05989 100644 --- a/arch/x86/include/asm/spec-ctrl.h +++ b/arch/x86/include/asm/spec-ctrl.h @@ -53,12 +53,24 @@ static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn) return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT); } +static inline u64 stibp_tif_to_spec_ctrl(u64 tifn) +{ + BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT); + return (tifn & _TIF_SPEC_IB) >> (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT); +} + static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl) { BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT); return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT); } +static inline unsigned long stibp_spec_ctrl_to_tif(u64 spec_ctrl) +{ + BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT); + return (spec_ctrl & SPEC_CTRL_STIBP) << (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT); +} + static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn) { return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL; @@ -70,11 +82,7 @@ extern void speculative_store_bypass_ht_init(void); static inline void speculative_store_bypass_ht_init(void) { } #endif -extern void speculative_store_bypass_update(unsigned long tif); - -static inline void speculative_store_bypass_update_current(void) -{ - speculative_store_bypass_update(current_thread_info()->flags); -} +extern void speculation_ctrl_update(unsigned long tif); +extern void speculation_ctrl_update_current(void); #endif diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h index 36bd243843d6d..7cf1a270d8910 100644 --- a/arch/x86/include/asm/switch_to.h +++ b/arch/x86/include/asm/switch_to.h @@ -11,9 +11,6 @@ struct task_struct *__switch_to_asm(struct task_struct *prev, __visible struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next); -struct tss_struct; -void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, - struct tss_struct *tss); /* This runs runs on the previous thread's stack. */ static inline void prepare_switch_to(struct task_struct *next) diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 2ff2a30a264f4..82b73b75d67ca 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -79,10 +79,12 @@ struct thread_info { #define TIF_SIGPENDING 2 /* signal pending */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ -#define TIF_SSBD 5 /* Reduced data speculation */ +#define TIF_SSBD 5 /* Speculative store bypass disable */ #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SECCOMP 8 /* secure computing */ +#define TIF_SPEC_IB 9 /* Indirect branch speculation mitigation */ +#define TIF_SPEC_FORCE_UPDATE 10 /* Force speculation MSR update in context switch */ #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ #define TIF_UPROBE 12 /* breakpointed or singlestepping */ #define TIF_PATCH_PENDING 13 /* pending live patching update */ @@ -110,6 +112,8 @@ struct thread_info { #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) +#define _TIF_SPEC_IB (1 << TIF_SPEC_IB) +#define _TIF_SPEC_FORCE_UPDATE (1 << TIF_SPEC_FORCE_UPDATE) #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) #define _TIF_UPROBE (1 << TIF_UPROBE) #define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING) @@ -145,8 +149,18 @@ struct thread_info { _TIF_FSCHECK) /* flags to check in __switch_to() */ -#define _TIF_WORK_CTXSW \ - (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD) +#define _TIF_WORK_CTXSW_BASE \ + (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP| \ + _TIF_SSBD | _TIF_SPEC_FORCE_UPDATE) + +/* + * Avoid calls to __switch_to_xtra() on UP as STIBP is not evaluated. + */ +#ifdef CONFIG_SMP +# define _TIF_WORK_CTXSW (_TIF_WORK_CTXSW_BASE | _TIF_SPEC_IB) +#else +# define _TIF_WORK_CTXSW (_TIF_WORK_CTXSW_BASE) +#endif #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 58ce5288878e8..79ec7add5f98f 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -185,10 +185,14 @@ struct tlb_state { #define LOADED_MM_SWITCHING ((struct mm_struct *)1) + /* Last user mm for optimizing IBPB */ + union { + struct mm_struct *last_user_mm; + unsigned long last_user_mm_ibpb; + }; + u16 loaded_mm_asid; u16 next_asid; - /* last user mm's ctx id */ - u64 last_ctx_id; /* * We can be in one of several states: @@ -469,6 +473,12 @@ static inline void __native_flush_tlb_one_user(unsigned long addr) */ static inline void __flush_tlb_all(void) { + /* + * This is to catch users with enabled preemption and the PGE feature + * and don't trigger the warning in __native_flush_tlb(). + */ + VM_WARN_ON_ONCE(preemptible()); + if (boot_cpu_has(X86_FEATURE_PGE)) { __flush_tlb_global(); } else { diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index 3de69330e6c50..afbc87206886e 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -104,9 +104,9 @@ extern int panic_on_unrecovered_nmi; void math_emulate(struct math_emu_info *); #ifndef CONFIG_X86_32 -asmlinkage void smp_thermal_interrupt(void); -asmlinkage void smp_threshold_interrupt(void); -asmlinkage void smp_deferred_error_interrupt(void); +asmlinkage void smp_thermal_interrupt(struct pt_regs *regs); +asmlinkage void smp_threshold_interrupt(struct pt_regs *regs); +asmlinkage void smp_deferred_error_interrupt(struct pt_regs *regs); #endif extern void ist_enter(struct pt_regs *regs); diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index e84c9eb4e5b41..9ba0ac0c8c1f6 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c @@ -29,6 +29,15 @@ static struct apic apic_flat; struct apic *apic __ro_after_init = &apic_flat; EXPORT_SYMBOL_GPL(apic); +int xapic_phys = 0; + +static int set_xapic_phys_mode(char *arg) +{ + xapic_phys = 1; + return 0; +} +early_param("xapic_phys", set_xapic_phys_mode); + static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 1; @@ -236,6 +245,9 @@ static void physflat_send_IPI_all(int vector) static int physflat_probe(void) { + if (xapic_phys == 1) + return 1; + if (apic == &apic_physflat || num_possible_cpus() > 8 || jailhouse_paravirt()) return 1; diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c index 33399426793e0..cc8258a5378b0 100644 --- a/arch/x86/kernel/check.c +++ b/arch/x86/kernel/check.c @@ -31,6 +31,11 @@ static __init int set_corruption_check(char *arg) ssize_t ret; unsigned long val; + if (!arg) { + pr_err("memory_corruption_check config string not provided\n"); + return -EINVAL; + } + ret = kstrtoul(arg, 10, &val); if (ret) return ret; @@ -45,6 +50,11 @@ static __init int set_corruption_check_period(char *arg) ssize_t ret; unsigned long val; + if (!arg) { + pr_err("memory_corruption_check_period config string not provided\n"); + return -EINVAL; + } + ret = kstrtoul(arg, 10, &val); if (ret) return ret; @@ -59,6 +69,11 @@ static __init int set_corruption_check_size(char *arg) char *end; unsigned size; + if (!arg) { + pr_err("memory_corruption_check_size config string not provided\n"); + return -EINVAL; + } + size = memparse(arg, &end); if (*end == '\0') diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 40bdaea97fe7c..807d06a7acac1 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -35,12 +36,10 @@ static void __init spectre_v2_select_mitigation(void); static void __init ssb_select_mitigation(void); static void __init l1tf_select_mitigation(void); -/* - * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any - * writes to SPEC_CTRL contain whatever reserved bits have been set. - */ -u64 __ro_after_init x86_spec_ctrl_base; +/* The base value of the SPEC_CTRL MSR that always has to be preserved. */ +u64 x86_spec_ctrl_base; EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); +static DEFINE_MUTEX(spec_ctrl_mutex); /* * The vendor and possibly platform specific bits which can be modified in @@ -55,6 +54,13 @@ static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS; u64 __ro_after_init x86_amd_ls_cfg_base; u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; +/* Control conditional STIPB in switch_to() */ +DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp); +/* Control conditional IBPB in switch_mm() */ +DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); +/* Control unconditional IBPB in switch_mm() */ +DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); + void __init check_bugs(void) { identify_boot_cpu(); @@ -125,31 +131,6 @@ void __init check_bugs(void) #endif } -/* The kernel command line selection */ -enum spectre_v2_mitigation_cmd { - SPECTRE_V2_CMD_NONE, - SPECTRE_V2_CMD_AUTO, - SPECTRE_V2_CMD_FORCE, - SPECTRE_V2_CMD_RETPOLINE, - SPECTRE_V2_CMD_RETPOLINE_GENERIC, - SPECTRE_V2_CMD_RETPOLINE_AMD, -}; - -static const char *spectre_v2_strings[] = { - [SPECTRE_V2_NONE] = "Vulnerable", - [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline", - [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline", - [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline", - [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline", - [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS", -}; - -#undef pr_fmt -#define pr_fmt(fmt) "Spectre V2 : " fmt - -static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = - SPECTRE_V2_NONE; - void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) { @@ -171,6 +152,10 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) static_cpu_has(X86_FEATURE_AMD_SSBD)) hostval |= ssbd_tif_to_spec_ctrl(ti->flags); + /* Conditional STIBP enabled? */ + if (static_branch_unlikely(&switch_to_cond_stibp)) + hostval |= stibp_tif_to_spec_ctrl(ti->flags); + if (hostval != guestval) { msrval = setguest ? guestval : hostval; wrmsrl(MSR_IA32_SPEC_CTRL, msrval); @@ -204,7 +189,7 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) : ssbd_spec_ctrl_to_tif(hostval); - speculative_store_bypass_update(tif); + speculation_ctrl_update(tif); } } EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); @@ -219,7 +204,16 @@ static void x86_amd_ssb_disable(void) wrmsrl(MSR_AMD64_LS_CFG, msrval); } -#ifdef RETPOLINE +#undef pr_fmt +#define pr_fmt(fmt) "Spectre V2 : " fmt + +static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = + SPECTRE_V2_NONE; + +static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init = + SPECTRE_V2_USER_NONE; + +#ifdef CONFIG_RETPOLINE static bool spectre_v2_bad_module; bool retpoline_module_ok(bool has_retpoline) @@ -240,67 +234,217 @@ static inline const char *spectre_v2_module_string(void) static inline const char *spectre_v2_module_string(void) { return ""; } #endif -static void __init spec2_print_if_insecure(const char *reason) +static inline bool match_option(const char *arg, int arglen, const char *opt) { - if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) - pr_info("%s selected on command line.\n", reason); + int len = strlen(opt); + + return len == arglen && !strncmp(arg, opt, len); } -static void __init spec2_print_if_secure(const char *reason) +/* The kernel command line selection for spectre v2 */ +enum spectre_v2_mitigation_cmd { + SPECTRE_V2_CMD_NONE, + SPECTRE_V2_CMD_AUTO, + SPECTRE_V2_CMD_FORCE, + SPECTRE_V2_CMD_RETPOLINE, + SPECTRE_V2_CMD_RETPOLINE_GENERIC, + SPECTRE_V2_CMD_RETPOLINE_AMD, +}; + +enum spectre_v2_user_cmd { + SPECTRE_V2_USER_CMD_NONE, + SPECTRE_V2_USER_CMD_AUTO, + SPECTRE_V2_USER_CMD_FORCE, + SPECTRE_V2_USER_CMD_PRCTL, + SPECTRE_V2_USER_CMD_PRCTL_IBPB, + SPECTRE_V2_USER_CMD_SECCOMP, + SPECTRE_V2_USER_CMD_SECCOMP_IBPB, +}; + +static const char * const spectre_v2_user_strings[] = { + [SPECTRE_V2_USER_NONE] = "User space: Vulnerable", + [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", + [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", + [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", +}; + +static const struct { + const char *option; + enum spectre_v2_user_cmd cmd; + bool secure; +} v2_user_options[] __initdata = { + { "auto", SPECTRE_V2_USER_CMD_AUTO, false }, + { "off", SPECTRE_V2_USER_CMD_NONE, false }, + { "on", SPECTRE_V2_USER_CMD_FORCE, true }, + { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false }, + { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false }, + { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false }, + { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false }, +}; + +static void __init spec_v2_user_print_cond(const char *reason, bool secure) { - if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) - pr_info("%s selected on command line.\n", reason); + if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) + pr_info("spectre_v2_user=%s forced on command line.\n", reason); } -static inline bool retp_compiler(void) +static enum spectre_v2_user_cmd __init +spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd) { - return __is_defined(RETPOLINE); + char arg[20]; + int ret, i; + + switch (v2_cmd) { + case SPECTRE_V2_CMD_NONE: + return SPECTRE_V2_USER_CMD_NONE; + case SPECTRE_V2_CMD_FORCE: + return SPECTRE_V2_USER_CMD_FORCE; + default: + break; + } + + ret = cmdline_find_option(boot_command_line, "spectre_v2_user", + arg, sizeof(arg)); + if (ret < 0) + return SPECTRE_V2_USER_CMD_AUTO; + + for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) { + if (match_option(arg, ret, v2_user_options[i].option)) { + spec_v2_user_print_cond(v2_user_options[i].option, + v2_user_options[i].secure); + return v2_user_options[i].cmd; + } + } + + pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg); + return SPECTRE_V2_USER_CMD_AUTO; } -static inline bool match_option(const char *arg, int arglen, const char *opt) +static void __init +spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) { - int len = strlen(opt); + enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE; + bool smt_possible = IS_ENABLED(CONFIG_SMP); + enum spectre_v2_user_cmd cmd; - return len == arglen && !strncmp(arg, opt, len); + if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) + return; + + if (cpu_smt_control == CPU_SMT_FORCE_DISABLED || + cpu_smt_control == CPU_SMT_NOT_SUPPORTED) + smt_possible = false; + + cmd = spectre_v2_parse_user_cmdline(v2_cmd); + switch (cmd) { + case SPECTRE_V2_USER_CMD_NONE: + goto set_mode; + case SPECTRE_V2_USER_CMD_FORCE: + mode = SPECTRE_V2_USER_STRICT; + break; + case SPECTRE_V2_USER_CMD_PRCTL: + case SPECTRE_V2_USER_CMD_PRCTL_IBPB: + mode = SPECTRE_V2_USER_PRCTL; + break; + case SPECTRE_V2_USER_CMD_AUTO: + case SPECTRE_V2_USER_CMD_SECCOMP: + case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: + if (IS_ENABLED(CONFIG_SECCOMP)) + mode = SPECTRE_V2_USER_SECCOMP; + else + mode = SPECTRE_V2_USER_PRCTL; + break; + } + + /* Initialize Indirect Branch Prediction Barrier */ + if (boot_cpu_has(X86_FEATURE_IBPB)) { + setup_force_cpu_cap(X86_FEATURE_USE_IBPB); + + switch (cmd) { + case SPECTRE_V2_USER_CMD_FORCE: + case SPECTRE_V2_USER_CMD_PRCTL_IBPB: + case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: + static_branch_enable(&switch_mm_always_ibpb); + break; + case SPECTRE_V2_USER_CMD_PRCTL: + case SPECTRE_V2_USER_CMD_AUTO: + case SPECTRE_V2_USER_CMD_SECCOMP: + static_branch_enable(&switch_mm_cond_ibpb); + break; + default: + break; + } + + pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n", + static_key_enabled(&switch_mm_always_ibpb) ? + "always-on" : "conditional"); + } + + /* If enhanced IBRS is enabled no STIPB required */ + if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) + return; + + /* + * If SMT is not possible or STIBP is not available clear the STIPB + * mode. + */ + if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP)) + mode = SPECTRE_V2_USER_NONE; +set_mode: + spectre_v2_user = mode; + /* Only print the STIBP mode when SMT possible */ + if (smt_possible) + pr_info("%s\n", spectre_v2_user_strings[mode]); } +static const char * const spectre_v2_strings[] = { + [SPECTRE_V2_NONE] = "Vulnerable", + [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline", + [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline", + [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS", +}; + static const struct { const char *option; enum spectre_v2_mitigation_cmd cmd; bool secure; -} mitigation_options[] = { - { "off", SPECTRE_V2_CMD_NONE, false }, - { "on", SPECTRE_V2_CMD_FORCE, true }, - { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, - { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false }, - { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, - { "auto", SPECTRE_V2_CMD_AUTO, false }, +} mitigation_options[] __initdata = { + { "off", SPECTRE_V2_CMD_NONE, false }, + { "on", SPECTRE_V2_CMD_FORCE, true }, + { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, + { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false }, + { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, + { "auto", SPECTRE_V2_CMD_AUTO, false }, }; +static void __init spec_v2_print_cond(const char *reason, bool secure) +{ + if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) + pr_info("%s selected on command line.\n", reason); +} + static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) { + enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO; char arg[20]; int ret, i; - enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO; if (cmdline_find_option_bool(boot_command_line, "nospectre_v2")) return SPECTRE_V2_CMD_NONE; - else { - ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); - if (ret < 0) - return SPECTRE_V2_CMD_AUTO; - for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { - if (!match_option(arg, ret, mitigation_options[i].option)) - continue; - cmd = mitigation_options[i].cmd; - break; - } + ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); + if (ret < 0) + return SPECTRE_V2_CMD_AUTO; - if (i >= ARRAY_SIZE(mitigation_options)) { - pr_err("unknown option (%s). Switching to AUTO select\n", arg); - return SPECTRE_V2_CMD_AUTO; - } + for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { + if (!match_option(arg, ret, mitigation_options[i].option)) + continue; + cmd = mitigation_options[i].cmd; + break; + } + + if (i >= ARRAY_SIZE(mitigation_options)) { + pr_err("unknown option (%s). Switching to AUTO select\n", arg); + return SPECTRE_V2_CMD_AUTO; } if ((cmd == SPECTRE_V2_CMD_RETPOLINE || @@ -317,11 +461,8 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) return SPECTRE_V2_CMD_AUTO; } - if (mitigation_options[i].secure) - spec2_print_if_secure(mitigation_options[i].option); - else - spec2_print_if_insecure(mitigation_options[i].option); - + spec_v2_print_cond(mitigation_options[i].option, + mitigation_options[i].secure); return cmd; } @@ -377,14 +518,12 @@ static void __init spectre_v2_select_mitigation(void) pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n"); goto retpoline_generic; } - mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD : - SPECTRE_V2_RETPOLINE_MINIMAL_AMD; + mode = SPECTRE_V2_RETPOLINE_AMD; setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD); setup_force_cpu_cap(X86_FEATURE_RETPOLINE); } else { retpoline_generic: - mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC : - SPECTRE_V2_RETPOLINE_MINIMAL; + mode = SPECTRE_V2_RETPOLINE_GENERIC; setup_force_cpu_cap(X86_FEATURE_RETPOLINE); } @@ -403,12 +542,6 @@ static void __init spectre_v2_select_mitigation(void) setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); - /* Initialize Indirect Branch Prediction Barrier if supported */ - if (boot_cpu_has(X86_FEATURE_IBPB)) { - setup_force_cpu_cap(X86_FEATURE_USE_IBPB); - pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n"); - } - /* * Retpoline means the kernel is safe because it has no indirect * branches. Enhanced IBRS protects firmware too, so, enable restricted @@ -424,6 +557,66 @@ static void __init spectre_v2_select_mitigation(void) setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); pr_info("Enabling Restricted Speculation for firmware calls\n"); } + + /* Set up IBPB and STIBP depending on the general spectre V2 command */ + spectre_v2_user_select_mitigation(cmd); + + /* Enable STIBP if appropriate */ + arch_smt_update(); +} + +static void update_stibp_msr(void * __unused) +{ + wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); +} + +/* Update x86_spec_ctrl_base in case SMT state changed. */ +static void update_stibp_strict(void) +{ + u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP; + + if (sched_smt_active()) + mask |= SPEC_CTRL_STIBP; + + if (mask == x86_spec_ctrl_base) + return; + + pr_info("Update user space SMT mitigation: STIBP %s\n", + mask & SPEC_CTRL_STIBP ? "always-on" : "off"); + x86_spec_ctrl_base = mask; + on_each_cpu(update_stibp_msr, NULL, 1); +} + +/* Update the static key controlling the evaluation of TIF_SPEC_IB */ +static void update_indir_branch_cond(void) +{ + if (sched_smt_active()) + static_branch_enable(&switch_to_cond_stibp); + else + static_branch_disable(&switch_to_cond_stibp); +} + +void arch_smt_update(void) +{ + /* Enhanced IBRS implies STIBP. No update required. */ + if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) + return; + + mutex_lock(&spec_ctrl_mutex); + + switch (spectre_v2_user) { + case SPECTRE_V2_USER_NONE: + break; + case SPECTRE_V2_USER_STRICT: + update_stibp_strict(); + break; + case SPECTRE_V2_USER_PRCTL: + case SPECTRE_V2_USER_SECCOMP: + update_indir_branch_cond(); + break; + } + + mutex_unlock(&spec_ctrl_mutex); } #undef pr_fmt @@ -440,7 +633,7 @@ enum ssb_mitigation_cmd { SPEC_STORE_BYPASS_CMD_SECCOMP, }; -static const char *ssb_strings[] = { +static const char * const ssb_strings[] = { [SPEC_STORE_BYPASS_NONE] = "Vulnerable", [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl", @@ -450,7 +643,7 @@ static const char *ssb_strings[] = { static const struct { const char *option; enum ssb_mitigation_cmd cmd; -} ssb_mitigation_options[] = { +} ssb_mitigation_options[] __initdata = { { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ @@ -561,10 +754,25 @@ static void ssb_select_mitigation(void) #undef pr_fmt #define pr_fmt(fmt) "Speculation prctl: " fmt -static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) +static void task_update_spec_tif(struct task_struct *tsk) { - bool update; + /* Force the update of the real TIF bits */ + set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE); + /* + * Immediately update the speculation control MSRs for the current + * task, but for a non-current task delay setting the CPU + * mitigation until it is scheduled next. + * + * This can only happen for SECCOMP mitigation. For PRCTL it's + * always the current task. + */ + if (tsk == current) + speculation_ctrl_update_current(); +} + +static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) +{ if (ssb_mode != SPEC_STORE_BYPASS_PRCTL && ssb_mode != SPEC_STORE_BYPASS_SECCOMP) return -ENXIO; @@ -575,28 +783,56 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) if (task_spec_ssb_force_disable(task)) return -EPERM; task_clear_spec_ssb_disable(task); - update = test_and_clear_tsk_thread_flag(task, TIF_SSBD); + task_update_spec_tif(task); break; case PR_SPEC_DISABLE: task_set_spec_ssb_disable(task); - update = !test_and_set_tsk_thread_flag(task, TIF_SSBD); + task_update_spec_tif(task); break; case PR_SPEC_FORCE_DISABLE: task_set_spec_ssb_disable(task); task_set_spec_ssb_force_disable(task); - update = !test_and_set_tsk_thread_flag(task, TIF_SSBD); + task_update_spec_tif(task); break; default: return -ERANGE; } + return 0; +} - /* - * If being set on non-current task, delay setting the CPU - * mitigation until it is next scheduled. - */ - if (task == current && update) - speculative_store_bypass_update_current(); - +static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) +{ + switch (ctrl) { + case PR_SPEC_ENABLE: + if (spectre_v2_user == SPECTRE_V2_USER_NONE) + return 0; + /* + * Indirect branch speculation is always disabled in strict + * mode. + */ + if (spectre_v2_user == SPECTRE_V2_USER_STRICT) + return -EPERM; + task_clear_spec_ib_disable(task); + task_update_spec_tif(task); + break; + case PR_SPEC_DISABLE: + case PR_SPEC_FORCE_DISABLE: + /* + * Indirect branch speculation is always allowed when + * mitigation is force disabled. + */ + if (spectre_v2_user == SPECTRE_V2_USER_NONE) + return -EPERM; + if (spectre_v2_user == SPECTRE_V2_USER_STRICT) + return 0; + task_set_spec_ib_disable(task); + if (ctrl == PR_SPEC_FORCE_DISABLE) + task_set_spec_ib_force_disable(task); + task_update_spec_tif(task); + break; + default: + return -ERANGE; + } return 0; } @@ -606,6 +842,8 @@ int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, switch (which) { case PR_SPEC_STORE_BYPASS: return ssb_prctl_set(task, ctrl); + case PR_SPEC_INDIRECT_BRANCH: + return ib_prctl_set(task, ctrl); default: return -ENODEV; } @@ -616,6 +854,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task) { if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); + if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP) + ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); } #endif @@ -638,11 +878,35 @@ static int ssb_prctl_get(struct task_struct *task) } } +static int ib_prctl_get(struct task_struct *task) +{ + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) + return PR_SPEC_NOT_AFFECTED; + + switch (spectre_v2_user) { + case SPECTRE_V2_USER_NONE: + return PR_SPEC_ENABLE; + case SPECTRE_V2_USER_PRCTL: + case SPECTRE_V2_USER_SECCOMP: + if (task_spec_ib_force_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; + if (task_spec_ib_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_DISABLE; + return PR_SPEC_PRCTL | PR_SPEC_ENABLE; + case SPECTRE_V2_USER_STRICT: + return PR_SPEC_DISABLE; + default: + return PR_SPEC_NOT_AFFECTED; + } +} + int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) { switch (which) { case PR_SPEC_STORE_BYPASS: return ssb_prctl_get(task); + case PR_SPEC_INDIRECT_BRANCH: + return ib_prctl_get(task); default: return -ENODEV; } @@ -736,7 +1000,8 @@ static void __init l1tf_select_mitigation(void) #endif half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; - if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { + if (l1tf_mitigation != L1TF_MITIGATION_OFF && + e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n", half_pa); @@ -780,7 +1045,7 @@ early_param("l1tf", l1tf_cmdline); #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion" #if IS_ENABLED(CONFIG_KVM_INTEL) -static const char *l1tf_vmx_states[] = { +static const char * const l1tf_vmx_states[] = { [VMENTER_L1D_FLUSH_AUTO] = "auto", [VMENTER_L1D_FLUSH_NEVER] = "vulnerable", [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes", @@ -796,13 +1061,14 @@ static ssize_t l1tf_show_state(char *buf) if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED || (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER && - cpu_smt_control == CPU_SMT_ENABLED)) + sched_smt_active())) { return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG, l1tf_vmx_states[l1tf_vmx_mitigation]); + } return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG, l1tf_vmx_states[l1tf_vmx_mitigation], - cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled"); + sched_smt_active() ? "vulnerable" : "disabled"); } #else static ssize_t l1tf_show_state(char *buf) @@ -811,6 +1077,36 @@ static ssize_t l1tf_show_state(char *buf) } #endif +static char *stibp_state(void) +{ + if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) + return ""; + + switch (spectre_v2_user) { + case SPECTRE_V2_USER_NONE: + return ", STIBP: disabled"; + case SPECTRE_V2_USER_STRICT: + return ", STIBP: forced"; + case SPECTRE_V2_USER_PRCTL: + case SPECTRE_V2_USER_SECCOMP: + if (static_key_enabled(&switch_to_cond_stibp)) + return ", STIBP: conditional"; + } + return ""; +} + +static char *ibpb_state(void) +{ + if (boot_cpu_has(X86_FEATURE_IBPB)) { + if (static_key_enabled(&switch_mm_always_ibpb)) + return ", IBPB: always-on"; + if (static_key_enabled(&switch_mm_cond_ibpb)) + return ", IBPB: conditional"; + return ", IBPB: disabled"; + } + return ""; +} + static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, char *buf, unsigned int bug) { @@ -831,9 +1127,11 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr return sprintf(buf, "Mitigation: __user pointer sanitization\n"); case X86_BUG_SPECTRE_V2: - return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], - boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", + return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], + ibpb_state(), boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", + stibp_state(), + boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "", spectre_v2_module_string()); case X86_BUG_SPEC_STORE_BYPASS: diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c index 479ca4728de01..5a6f072e67481 100644 --- a/arch/x86/kernel/cpu/hypervisor.c +++ b/arch/x86/kernel/cpu/hypervisor.c @@ -32,6 +32,7 @@ extern const struct hypervisor_x86 x86_hyper_xen_pv; extern const struct hypervisor_x86 x86_hyper_xen_hvm; extern const struct hypervisor_x86 x86_hyper_kvm; extern const struct hypervisor_x86 x86_hyper_jailhouse; +extern const struct hypervisor_x86 x86_hyper_acrn; static const __initconst struct hypervisor_x86 * const hypervisors[] = { @@ -49,6 +50,9 @@ static const __initconst struct hypervisor_x86 * const hypervisors[] = #ifdef CONFIG_JAILHOUSE_GUEST &x86_hyper_jailhouse, #endif +#ifdef CONFIG_ACRN + &x86_hyper_acrn, +#endif }; enum x86_hypervisor_type x86_hyper_type; diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c index 0f53049719cd6..627e5c809b33d 100644 --- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c +++ b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c @@ -23,6 +23,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include @@ -310,9 +311,11 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, return -EINVAL; buf[nbytes - 1] = '\0'; + cpus_read_lock(); rdtgrp = rdtgroup_kn_lock_live(of->kn); if (!rdtgrp) { rdtgroup_kn_unlock(of->kn); + cpus_read_unlock(); return -ENOENT; } rdt_last_cmd_clear(); @@ -367,6 +370,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, out: rdtgroup_kn_unlock(of->kn); + cpus_read_unlock(); return ret ?: nbytes; } diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index b140c68bc14ba..643670fb89434 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -2805,6 +2805,13 @@ static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf) { if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled) seq_puts(seq, ",cdp"); + + if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled) + seq_puts(seq, ",cdpl2"); + + if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA])) + seq_puts(seq, ",mba_MBps"); + return 0; } diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 953b3ce92dccf..cdbedeb3f3db4 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -485,7 +485,7 @@ static void mce_report_event(struct pt_regs *regs) * be somewhat complicated (e.g. segment offset would require an instruction * parser). So only support physical addresses up to page granuality for now. */ -static int mce_usable_address(struct mce *m) +int mce_usable_address(struct mce *m) { if (!(m->status & MCI_STATUS_ADDRV)) return 0; @@ -505,6 +505,7 @@ static int mce_usable_address(struct mce *m) return 1; } +EXPORT_SYMBOL_GPL(mce_usable_address); bool mce_is_memory_error(struct mce *m) { @@ -534,7 +535,7 @@ bool mce_is_memory_error(struct mce *m) } EXPORT_SYMBOL_GPL(mce_is_memory_error); -static bool mce_is_correctable(struct mce *m) +bool mce_is_correctable(struct mce *m) { if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED) return false; @@ -544,6 +545,7 @@ static bool mce_is_correctable(struct mce *m) return true; } +EXPORT_SYMBOL_GPL(mce_is_correctable); static bool cec_add_mce(struct mce *m) { diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index dd33c357548f1..9f915a8791cc7 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -23,6 +23,7 @@ #include #include +#include #include #include #include @@ -56,7 +57,7 @@ /* Threshold LVT offset is at MSR0xC0000410[15:12] */ #define SMCA_THR_LVT_OFF 0xF000 -static bool thresholding_en; +static bool thresholding_irq_en; static const char * const th_names[] = { "load_store", @@ -99,7 +100,7 @@ static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init = [0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 } }; -const char *smca_get_name(enum smca_bank_types t) +static const char *smca_get_name(enum smca_bank_types t) { if (t >= N_SMCA_BANK_TYPES) return NULL; @@ -534,9 +535,8 @@ prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr, set_offset: offset = setup_APIC_mce_threshold(offset, new); - - if ((offset == new) && (mce_threshold_vector != amd_threshold_interrupt)) - mce_threshold_vector = amd_threshold_interrupt; + if (offset == new) + thresholding_irq_en = true; done: mce_threshold_block_init(&b, offset); @@ -825,7 +825,7 @@ static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc) mce_log(&m); } -asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(void) +asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(struct pt_regs *regs) { entering_irq(); trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR); @@ -1357,9 +1357,6 @@ int mce_threshold_remove_device(unsigned int cpu) { unsigned int bank; - if (!thresholding_en) - return 0; - for (bank = 0; bank < mca_cfg.banks; ++bank) { if (!(per_cpu(bank_map, cpu) & (1 << bank))) continue; @@ -1377,9 +1374,6 @@ int mce_threshold_create_device(unsigned int cpu) struct threshold_bank **bp; int err = 0; - if (!thresholding_en) - return 0; - bp = per_cpu(threshold_banks, cpu); if (bp) return 0; @@ -1408,9 +1402,6 @@ static __init int threshold_init_device(void) { unsigned lcpu = 0; - if (mce_threshold_vector == amd_threshold_interrupt) - thresholding_en = true; - /* to hit CPUs online before the notifier is up */ for_each_online_cpu(lcpu) { int err = mce_threshold_create_device(lcpu); @@ -1419,6 +1410,9 @@ static __init int threshold_init_device(void) return err; } + if (thresholding_irq_en) + mce_threshold_vector = amd_threshold_interrupt; + return 0; } /* diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 2da67b70ba989..ee229ceee745c 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c @@ -25,6 +25,7 @@ #include #include +#include #include #include #include @@ -390,7 +391,7 @@ static void unexpected_thermal_interrupt(void) static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt; -asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *r) +asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *regs) { entering_irq(); trace_thermal_apic_entry(THERMAL_APIC_VECTOR); diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c index 2b584b319eff3..c21e0a1efd0fb 100644 --- a/arch/x86/kernel/cpu/mcheck/threshold.c +++ b/arch/x86/kernel/cpu/mcheck/threshold.c @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -18,7 +19,7 @@ static void default_threshold_interrupt(void) void (*mce_threshold_vector)(void) = default_threshold_interrupt; -asmlinkage __visible void __irq_entry smp_threshold_interrupt(void) +asmlinkage __visible void __irq_entry smp_threshold_interrupt(struct pt_regs *regs) { entering_irq(); trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR); diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index ad12733f60585..852e74e48890b 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -285,6 +286,16 @@ static void __init ms_hyperv_init_platform(void) if (efi_enabled(EFI_BOOT)) x86_platform.get_nmi_reason = hv_get_nmi_reason; + /* + * Hyper-V VMs have a PIT emulation quirk such that zeroing the + * counter register during PIT shutdown restarts the PIT. So it + * continues to interrupt @18.2 HZ. Setting i8253_clear_counter + * to false tells pit_shutdown() not to zero the counter so that + * the PIT really is shutdown. Generation 2 VMs don't have a PIT, + * and setting this value has no effect. + */ + i8253_clear_counter_on_shutdown = false; + #if IS_ENABLED(CONFIG_HYPERV) /* * Setup the hook to get control post apic initialization. diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index 40eee6cc41248..254683b503a9f 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c @@ -165,6 +165,8 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) struct mtrr_gentry gentry; void __user *arg = (void __user *) __arg; + memset(&gentry, 0, sizeof(gentry)); + switch (cmd) { case MTRRIOC_ADD_ENTRY: case MTRRIOC_SET_ENTRY: diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index 8e005329648b6..d805202c63cdc 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c @@ -77,7 +77,7 @@ static __init int setup_vmw_sched_clock(char *s) } early_param("no-vmw-sched-clock", setup_vmw_sched_clock); -static unsigned long long vmware_sched_clock(void) +static unsigned long long notrace vmware_sched_clock(void) { unsigned long long ns; diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index c88c23c658c1e..d1f25c8314475 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -1248,7 +1248,6 @@ void __init e820__memblock_setup(void) { int i; u64 end; - u64 addr = 0; /* * The bootstrap memblock region count maximum is 128 entries @@ -1265,21 +1264,13 @@ void __init e820__memblock_setup(void) struct e820_entry *entry = &e820_table->entries[i]; end = entry->addr + entry->size; - if (addr < entry->addr) - memblock_reserve(addr, entry->addr - addr); - addr = end; if (end != (resource_size_t)end) continue; - /* - * all !E820_TYPE_RAM ranges (including gap ranges) are put - * into memblock.reserved to make sure that struct pages in - * such regions are not left uninitialized after bootup. - */ if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN) - memblock_reserve(entry->addr, entry->size); - else - memblock_add(entry->addr, entry->size); + continue; + + memblock_add(entry->addr, entry->size); } /* Throw away partial pages: */ diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index 5e801c8c8ce7c..374a52fa52969 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -213,8 +213,9 @@ static unsigned int mem32_serial_in(unsigned long addr, int offset) * early_pci_serial_init() * * This function is invoked when the early_printk param starts with "pciserial" - * The rest of the param should be ",B:D.F,baud" where B, D & F describe the - * location of a PCI device that must be a UART device. + * The rest of the param should be "[force],B:D.F,baud", where B, D & F describe + * the location of a PCI device that must be a UART device. "force" is optional + * and overrides the use of an UART device with a wrong PCI class code. */ static __init void early_pci_serial_init(char *s) { @@ -224,17 +225,23 @@ static __init void early_pci_serial_init(char *s) u32 classcode, bar0; u16 cmdreg; char *e; + int force = 0; - - /* - * First, part the param to get the BDF values - */ if (*s == ',') ++s; if (*s == 0) return; + /* Force the use of an UART device with wrong class code */ + if (!strncmp(s, "force,", 6)) { + force = 1; + s += 6; + } + + /* + * Part the param to get the BDF values + */ bus = (u8)simple_strtoul(s, &e, 16); s = e; if (*s != ':') @@ -253,7 +260,7 @@ static __init void early_pci_serial_init(char *s) s++; /* - * Second, find the device from the BDF + * Find the device from the BDF */ cmdreg = read_pci_config(bus, slot, func, PCI_COMMAND); classcode = read_pci_config(bus, slot, func, PCI_CLASS_REVISION); @@ -264,8 +271,10 @@ static __init void early_pci_serial_init(char *s) */ if (((classcode >> 16 != PCI_CLASS_COMMUNICATION_MODEM) && (classcode >> 16 != PCI_CLASS_COMMUNICATION_SERIAL)) || - (((classcode >> 8) & 0xff) != 0x02)) /* 16550 I/F at BAR0 */ - return; + (((classcode >> 8) & 0xff) != 0x02)) /* 16550 I/F at BAR0 */ { + if (!force) + return; + } /* * Determine if it is IO or memory mapped @@ -289,7 +298,7 @@ static __init void early_pci_serial_init(char *s) } /* - * Lastly, initialize the hardware + * Initialize the hardware */ if (*s) { if (strcmp(s, "nocfg") == 0) diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c index 61a949d84dfa5..d99a8ee9e185e 100644 --- a/arch/x86/kernel/fpu/signal.c +++ b/arch/x86/kernel/fpu/signal.c @@ -344,10 +344,10 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) sanitize_restored_xstate(tsk, &env, xfeatures, fx_only); } + local_bh_disable(); fpu->initialized = 1; - preempt_disable(); fpu__restore(fpu); - preempt_enable(); + local_bh_enable(); return err; } else { diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 01ebcb6f263e3..7ee8067cbf45c 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -994,7 +994,6 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, { unsigned long old; int faulted; - struct ftrace_graph_ent trace; unsigned long return_hooker = (unsigned long) &return_to_handler; @@ -1046,19 +1045,7 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, return; } - trace.func = self_addr; - trace.depth = current->curr_ret_stack + 1; - - /* Only trace if the calling function expects to */ - if (!ftrace_graph_entry(&trace)) { + if (function_graph_enter(old, self_addr, frame_pointer, parent)) *parent = old; - return; - } - - if (ftrace_push_return_trace(old, self_addr, &trace.depth, - frame_pointer, parent) == -EBUSY) { - *parent = old; - return; - } } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 59b5f2ea7c2f3..6b3eaabeb019d 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -134,7 +134,7 @@ int arch_show_interrupts(struct seq_file *p, int prec) seq_printf(p, "%10u ", per_cpu(mce_poll_count, j)); seq_puts(p, " Machine check polls\n"); #endif -#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN) +#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN) || defined(CONFIG_ACRN) if (test_bit(HYPERVISOR_CALLBACK_VECTOR, system_vectors)) { seq_printf(p, "%*s: ", prec, "HYP"); for_each_online_cpu(j) diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index eaf02f2e73005..6adf6e6c29339 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -179,7 +179,7 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) opt_pre_handler(&op->kp, regs); __this_cpu_write(current_kprobe, NULL); } - preempt_enable_no_resched(); + preempt_enable(); } NOKPROBE_SYMBOL(optimized_callback); @@ -189,7 +189,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real) int len = 0, ret; while (len < RELATIVEJUMP_SIZE) { - ret = __copy_instruction(dest + len, src + len, real, &insn); + ret = __copy_instruction(dest + len, src + len, real + len, &insn); if (!ret || !can_boost(&insn, src + len)) return -EINVAL; len += ret; diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index d9b71924c23c9..7f89d609095ac 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -457,6 +457,7 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector) #else u64 ipi_bitmap = 0; #endif + long ret; if (cpumask_empty(mask)) return; @@ -482,8 +483,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector) } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) { max = apic_id < max ? max : apic_id; } else { - kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, + ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr); + WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret); min = max = apic_id; ipi_bitmap = 0; } @@ -491,8 +493,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector) } if (ipi_bitmap) { - kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, + ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr); + WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret); } local_irq_restore(flags); diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 733e6ace0fa4e..65590eee62893 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -199,14 +199,6 @@ static void sanity_check_ldt_mapping(struct mm_struct *mm) /* * If PTI is enabled, this maps the LDT into the kernelmode and * usermode tables for the given mm. - * - * There is no corresponding unmap function. Even if the LDT is freed, we - * leave the PTEs around until the slot is reused or the mm is destroyed. - * This is harmless: the LDT is always in ordinary memory, and no one will - * access the freed slot. - * - * If we wanted to unmap freed LDTs, we'd also need to do a flush to make - * it useful, and the flush would slow down modify_ldt(). */ static int map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) @@ -214,8 +206,7 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) unsigned long va; bool is_vmalloc; spinlock_t *ptl; - pgd_t *pgd; - int i; + int i, nr_pages; if (!static_cpu_has(X86_FEATURE_PTI)) return 0; @@ -229,16 +220,11 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) /* Check if the current mappings are sane */ sanity_check_ldt_mapping(mm); - /* - * Did we already have the top level entry allocated? We can't - * use pgd_none() for this because it doens't do anything on - * 4-level page table kernels. - */ - pgd = pgd_offset(mm, LDT_BASE_ADDR); - is_vmalloc = is_vmalloc_addr(ldt->entries); - for (i = 0; i * PAGE_SIZE < ldt->nr_entries * LDT_ENTRY_SIZE; i++) { + nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE); + + for (i = 0; i < nr_pages; i++) { unsigned long offset = i << PAGE_SHIFT; const void *src = (char *)ldt->entries + offset; unsigned long pfn; @@ -272,13 +258,39 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) /* Propagate LDT mapping to the user page-table */ map_ldt_struct_to_user(mm); - va = (unsigned long)ldt_slot_va(slot); - flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, 0); - ldt->slot = slot; return 0; } +static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt) +{ + unsigned long va; + int i, nr_pages; + + if (!ldt) + return; + + /* LDT map/unmap is only required for PTI */ + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + + nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE); + + for (i = 0; i < nr_pages; i++) { + unsigned long offset = i << PAGE_SHIFT; + spinlock_t *ptl; + pte_t *ptep; + + va = (unsigned long)ldt_slot_va(ldt->slot) + offset; + ptep = get_locked_pte(mm, va, &ptl); + pte_clear(mm, va, ptep); + pte_unmap_unlock(ptep, ptl); + } + + va = (unsigned long)ldt_slot_va(ldt->slot); + flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, 0); +} + #else /* !CONFIG_PAGE_TABLE_ISOLATION */ static int @@ -286,6 +298,10 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) { return 0; } + +static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt) +{ +} #endif /* CONFIG_PAGE_TABLE_ISOLATION */ static void free_ldt_pgtables(struct mm_struct *mm) @@ -524,6 +540,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) } install_ldt(mm, new_ldt); + unmap_ldt_struct(mm, old_ldt); free_ldt_struct(old_ldt); error = 0; diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index c93fcfdf16734..7d31192296a87 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -40,6 +40,8 @@ #include #include +#include "process.h" + /* * per-CPU TSS segments. Threads are completely 'soft' on Linux, * no more per-task TSS's. The TSS size is kept cacheline-aligned @@ -252,11 +254,12 @@ void arch_setup_new_exec(void) enable_cpuid(); } -static inline void switch_to_bitmap(struct tss_struct *tss, - struct thread_struct *prev, +static inline void switch_to_bitmap(struct thread_struct *prev, struct thread_struct *next, unsigned long tifp, unsigned long tifn) { + struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw); + if (tifn & _TIF_IO_BITMAP) { /* * Copy the relevant range of the IO bitmap. @@ -395,32 +398,85 @@ static __always_inline void amd_set_ssb_virt_state(unsigned long tifn) wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn)); } -static __always_inline void intel_set_ssb_state(unsigned long tifn) +/* + * Update the MSRs managing speculation control, during context switch. + * + * tifp: Previous task's thread flags + * tifn: Next task's thread flags + */ +static __always_inline void __speculation_ctrl_update(unsigned long tifp, + unsigned long tifn) { - u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn); + unsigned long tif_diff = tifp ^ tifn; + u64 msr = x86_spec_ctrl_base; + bool updmsr = false; + + /* + * If TIF_SSBD is different, select the proper mitigation + * method. Note that if SSBD mitigation is disabled or permanentely + * enabled this branch can't be taken because nothing can set + * TIF_SSBD. + */ + if (tif_diff & _TIF_SSBD) { + if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) { + amd_set_ssb_virt_state(tifn); + } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) { + amd_set_core_ssb_state(tifn); + } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || + static_cpu_has(X86_FEATURE_AMD_SSBD)) { + msr |= ssbd_tif_to_spec_ctrl(tifn); + updmsr = true; + } + } + + /* + * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled, + * otherwise avoid the MSR write. + */ + if (IS_ENABLED(CONFIG_SMP) && + static_branch_unlikely(&switch_to_cond_stibp)) { + updmsr |= !!(tif_diff & _TIF_SPEC_IB); + msr |= stibp_tif_to_spec_ctrl(tifn); + } - wrmsrl(MSR_IA32_SPEC_CTRL, msr); + if (updmsr) + wrmsrl(MSR_IA32_SPEC_CTRL, msr); } -static __always_inline void __speculative_store_bypass_update(unsigned long tifn) +static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk) { - if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) - amd_set_ssb_virt_state(tifn); - else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) - amd_set_core_ssb_state(tifn); - else - intel_set_ssb_state(tifn); + if (test_and_clear_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE)) { + if (task_spec_ssb_disable(tsk)) + set_tsk_thread_flag(tsk, TIF_SSBD); + else + clear_tsk_thread_flag(tsk, TIF_SSBD); + + if (task_spec_ib_disable(tsk)) + set_tsk_thread_flag(tsk, TIF_SPEC_IB); + else + clear_tsk_thread_flag(tsk, TIF_SPEC_IB); + } + /* Return the updated threadinfo flags*/ + return task_thread_info(tsk)->flags; } -void speculative_store_bypass_update(unsigned long tif) +void speculation_ctrl_update(unsigned long tif) { + /* Forced update. Make sure all relevant TIF flags are different */ preempt_disable(); - __speculative_store_bypass_update(tif); + __speculation_ctrl_update(~tif, tif); preempt_enable(); } -void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, - struct tss_struct *tss) +/* Called from seccomp/prctl update */ +void speculation_ctrl_update_current(void) +{ + preempt_disable(); + speculation_ctrl_update(speculation_ctrl_update_tif(current)); + preempt_enable(); +} + +void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_struct *prev, *next; unsigned long tifp, tifn; @@ -430,7 +486,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, tifn = READ_ONCE(task_thread_info(next_p)->flags); tifp = READ_ONCE(task_thread_info(prev_p)->flags); - switch_to_bitmap(tss, prev, next, tifp, tifn); + switch_to_bitmap(prev, next, tifp, tifn); propagate_user_return_notify(prev_p, next_p); @@ -451,8 +507,15 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, if ((tifp ^ tifn) & _TIF_NOCPUID) set_cpuid_faulting(!!(tifn & _TIF_NOCPUID)); - if ((tifp ^ tifn) & _TIF_SSBD) - __speculative_store_bypass_update(tifn); + if (likely(!((tifp | tifn) & _TIF_SPEC_FORCE_UPDATE))) { + __speculation_ctrl_update(tifp, tifn); + } else { + speculation_ctrl_update_tif(prev_p); + tifn = speculation_ctrl_update_tif(next_p); + + /* Enforce MSR update to ensure consistent state */ + __speculation_ctrl_update(~tifn, tifn); + } } /* diff --git a/arch/x86/kernel/process.h b/arch/x86/kernel/process.h new file mode 100644 index 0000000000000..898e97cf6629d --- /dev/null +++ b/arch/x86/kernel/process.h @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Code shared between 32 and 64 bit + +#include + +void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p); + +/* + * This needs to be inline to optimize for the common case where no extra + * work needs to be done. + */ +static inline void switch_to_extra(struct task_struct *prev, + struct task_struct *next) +{ + unsigned long next_tif = task_thread_info(next)->flags; + unsigned long prev_tif = task_thread_info(prev)->flags; + + if (IS_ENABLED(CONFIG_SMP)) { + /* + * Avoid __switch_to_xtra() invocation when conditional + * STIPB is disabled and the only different bit is + * TIF_SPEC_IB. For CONFIG_SMP=n TIF_SPEC_IB is not + * in the TIF_WORK_CTXSW masks. + */ + if (!static_branch_likely(&switch_to_cond_stibp)) { + prev_tif &= ~_TIF_SPEC_IB; + next_tif &= ~_TIF_SPEC_IB; + } + } + + /* + * __switch_to_xtra() handles debug registers, i/o bitmaps, + * speculation mitigations etc. + */ + if (unlikely(next_tif & _TIF_WORK_CTXSW_NEXT || + prev_tif & _TIF_WORK_CTXSW_PREV)) + __switch_to_xtra(prev, next); +} diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 5046a3c9dec2f..d3e593eb189f0 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -59,6 +59,8 @@ #include #include +#include "process.h" + void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) { unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; @@ -232,7 +234,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) struct fpu *prev_fpu = &prev->fpu; struct fpu *next_fpu = &next->fpu; int cpu = smp_processor_id(); - struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu); /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ @@ -264,12 +265,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) set_iopl_mask(next->iopl); - /* - * Now maybe handle debug registers and/or IO bitmaps - */ - if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV || - task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) - __switch_to_xtra(prev_p, next_p, tss); + switch_to_extra(prev_p, next_p); /* * Leave lazy mode, flushing any hypercalls made here. diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index ea5ea850348da..a0854f283efda 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -59,6 +59,8 @@ #include #endif +#include "process.h" + __visible DEFINE_PER_CPU(unsigned long, rsp_scratch); /* Prints also some state that isn't saved in the pt_regs */ @@ -422,7 +424,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) struct fpu *prev_fpu = &prev->fpu; struct fpu *next_fpu = &next->fpu; int cpu = smp_processor_id(); - struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu); WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) && this_cpu_read(irq_count) != -1); @@ -489,12 +490,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) /* Reload sp0. */ update_task_stack(next_p); - /* - * Now maybe reload the debug registers and handle I/O bitmaps - */ - if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT || - task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) - __switch_to_xtra(prev_p, next_p, tss); + switch_to_extra(prev_p, next_p); #ifdef CONFIG_XEN_PV /* diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 04adc8d60aed8..9bbeec53634c4 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -117,6 +117,19 @@ static atomic_t stopping_cpu = ATOMIC_INIT(-1); static bool smp_no_nmi_ipi = false; +static DEFINE_PER_CPU(struct pt_regs, cpu_regs); + +/* Store regs of this CPU for RAM dump decoding help */ +static inline void store_regs(struct pt_regs *regs) +{ + struct pt_regs *print_regs; + print_regs = &get_cpu_var(cpu_regs); + crash_setup_regs(print_regs, regs); + + /* Flush CPU cache */ + wbinvd(); +} + /* * this function sends a 'reschedule' IPI to another CPU. * it goes straight through and wastes no time serializing @@ -163,6 +176,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs) if (raw_smp_processor_id() == atomic_read(&stopping_cpu)) return NMI_HANDLED; + store_regs(regs); cpu_emergency_vmxoff(); stop_this_cpu(NULL); @@ -173,9 +187,10 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs) * this function calls the 'stop' function on all other CPUs in the system. */ -asmlinkage __visible void smp_reboot_interrupt(void) +__visible void smp_reboot_interrupt(struct pt_regs *regs) { ipi_entering_ack_irq(); + store_regs(regs); cpu_emergency_vmxoff(); stop_this_cpu(NULL); irq_exit(); @@ -247,6 +262,7 @@ static void native_stop_other_cpus(int wait) } finish: + store_regs(NULL); local_irq_save(flags); disable_local_APIC(); mcheck_cpu_clear(this_cpu_ptr(&cpu_info)); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index f02ecaf97904b..6489067b78a4e 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1346,7 +1346,7 @@ void __init calculate_max_logical_packages(void) * extrapolate the boot cpu's data to all packages. */ ncpus = cpu_data(0).booted_cores * topology_max_smt_threads(); - __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus); + __max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus); pr_info("Max logical packages: %u\n", __max_logical_packages); } diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index fbb0e6df121b2..3692de84c4201 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -55,7 +55,7 @@ #define PRIo64 "o" /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */ -#define apic_debug(fmt, arg...) +#define apic_debug(fmt, arg...) do {} while (0) /* 14 is the version for Xeon and Pentium 8.4.8*/ #define APIC_VERSION (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16)) @@ -571,6 +571,11 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, rcu_read_lock(); map = rcu_dereference(kvm->arch.apic_map); + if (unlikely(!map)) { + count = -EOPNOTSUPP; + goto out; + } + if (min > map->max_apic_id) goto out; /* Bits above cluster_size are masked in the caller. */ diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 51b953ad9d4ef..1b82bc7c3ccaa 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -5013,9 +5013,9 @@ static bool need_remote_flush(u64 old, u64 new) } static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, - const u8 *new, int *bytes) + int *bytes) { - u64 gentry; + u64 gentry = 0; int r; /* @@ -5027,22 +5027,12 @@ static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, /* Handle a 32-bit guest writing two halves of a 64-bit gpte */ *gpa &= ~(gpa_t)7; *bytes = 8; - r = kvm_vcpu_read_guest(vcpu, *gpa, &gentry, 8); - if (r) - gentry = 0; - new = (const u8 *)&gentry; } - switch (*bytes) { - case 4: - gentry = *(const u32 *)new; - break; - case 8: - gentry = *(const u64 *)new; - break; - default: - gentry = 0; - break; + if (*bytes == 4 || *bytes == 8) { + r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes); + if (r) + gentry = 0; } return gentry; @@ -5146,8 +5136,6 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); - gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, new, &bytes); - /* * No need to care whether allocation memory is successful * or not since pte prefetch is skiped if it does not have @@ -5156,6 +5144,9 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, mmu_topup_memory_caches(vcpu); spin_lock(&vcpu->kvm->mmu_lock); + + gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes); + ++vcpu->kvm->stat.mmu_pte_write; kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 61ccfb13899ed..f1d3fe5a0c657 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1444,7 +1444,7 @@ static u64 svm_read_l1_tsc_offset(struct kvm_vcpu *vcpu) return vcpu->arch.tsc_offset; } -static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) +static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) { struct vcpu_svm *svm = to_svm(vcpu); u64 g_tsc_offset = 0; @@ -1462,6 +1462,7 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) svm->vmcb->control.tsc_offset = offset + g_tsc_offset; mark_dirty(svm->vmcb, VMCB_INTERCEPTS); + return svm->vmcb->control.tsc_offset; } static void avic_init_vmcb(struct vcpu_svm *svm) @@ -1662,20 +1663,23 @@ static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu, static int avic_init_access_page(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; - int ret; + int ret = 0; + mutex_lock(&kvm->slots_lock); if (kvm->arch.apic_access_page_done) - return 0; + goto out; - ret = x86_set_memory_region(kvm, - APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, - APIC_DEFAULT_PHYS_BASE, - PAGE_SIZE); + ret = __x86_set_memory_region(kvm, + APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, + APIC_DEFAULT_PHYS_BASE, + PAGE_SIZE); if (ret) - return ret; + goto out; kvm->arch.apic_access_page_done = true; - return 0; +out: + mutex_unlock(&kvm->slots_lock); + return ret; } static int avic_init_backing_page(struct kvm_vcpu *vcpu) @@ -2187,21 +2191,31 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) return ERR_PTR(err); } +static void svm_clear_current_vmcb(struct vmcb *vmcb) +{ + int i; + + for_each_online_cpu(i) + cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL); +} + static void svm_free_vcpu(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); + /* + * The vmcb page can be recycled, causing a false negative in + * svm_vcpu_load(). So, ensure that no logical CPU has this + * vmcb page recorded as its current vmcb. + */ + svm_clear_current_vmcb(svm->vmcb); + __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT)); __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); __free_page(virt_to_page(svm->nested.hsave)); __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER); kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, svm); - /* - * The vmcb page can be recycled, causing a false negative in - * svm_vcpu_load(). So do a full IBPB now. - */ - indirect_branch_prediction_barrier(); } static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) @@ -7145,7 +7159,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .has_wbinvd_exit = svm_has_wbinvd_exit, .read_l1_tsc_offset = svm_read_l1_tsc_offset, - .write_tsc_offset = svm_write_tsc_offset, + .write_l1_tsc_offset = svm_write_l1_tsc_offset, .set_tdp_cr3 = set_tdp_cr3, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index e665aa7167cf9..39a0e34ff6764 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -170,6 +170,7 @@ module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO); * refer SDM volume 3b section 21.6.13 & 22.1.3. */ static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP; +module_param(ple_gap, uint, 0444); static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; module_param(ple_window, uint, 0444); @@ -961,6 +962,7 @@ struct vcpu_vmx { struct shared_msr_entry *guest_msrs; int nmsrs; int save_nmsrs; + bool guest_msrs_dirty; unsigned long host_idt_base; #ifdef CONFIG_X86_64 u64 msr_host_kernel_gs_base; @@ -1283,7 +1285,7 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked); static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, u16 error_code); static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu); -static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, +static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr, int type); static DEFINE_PER_CPU(struct vmcs *, vmxarea); @@ -2873,6 +2875,20 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) vmx->req_immediate_exit = false; + /* + * Note that guest MSRs to be saved/restored can also be changed + * when guest state is loaded. This happens when guest transitions + * to/from long-mode by setting MSR_EFER.LMA. + */ + if (!vmx->loaded_cpu_state || vmx->guest_msrs_dirty) { + vmx->guest_msrs_dirty = false; + for (i = 0; i < vmx->save_nmsrs; ++i) + kvm_set_shared_msr(vmx->guest_msrs[i].index, + vmx->guest_msrs[i].data, + vmx->guest_msrs[i].mask); + + } + if (vmx->loaded_cpu_state) return; @@ -2933,11 +2949,6 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) vmcs_writel(HOST_GS_BASE, gs_base); host_state->gs_base = gs_base; } - - for (i = 0; i < vmx->save_nmsrs; ++i) - kvm_set_shared_msr(vmx->guest_msrs[i].index, - vmx->guest_msrs[i].data, - vmx->guest_msrs[i].mask); } static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx) @@ -3294,10 +3305,13 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit } } else { if (vmcs12->exception_bitmap & (1u << nr)) { - if (nr == DB_VECTOR) + if (nr == DB_VECTOR) { *exit_qual = vcpu->arch.dr6; - else + *exit_qual &= ~(DR6_FIXED_1 | DR6_BT); + *exit_qual ^= DR6_RTM; + } else { *exit_qual = 0; + } return 1; } } @@ -3414,6 +3428,7 @@ static void setup_msrs(struct vcpu_vmx *vmx) move_msr_up(vmx, index, save_nmsrs++); vmx->save_nmsrs = save_nmsrs; + vmx->guest_msrs_dirty = true; if (cpu_has_vmx_msr_bitmap()) vmx_update_msr_bitmap(&vmx->vcpu); @@ -3430,11 +3445,9 @@ static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu) return vcpu->arch.tsc_offset; } -/* - * writes 'offset' into guest's timestamp counter offset register - */ -static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) +static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) { + u64 active_offset = offset; if (is_guest_mode(vcpu)) { /* * We're here if L1 chose not to trap WRMSR to TSC. According @@ -3442,17 +3455,16 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) * set for L2 remains unchanged, and still needs to be added * to the newly set TSC to get L2's TSC. */ - struct vmcs12 *vmcs12; - /* recalculate vmcs02.TSC_OFFSET: */ - vmcs12 = get_vmcs12(vcpu); - vmcs_write64(TSC_OFFSET, offset + - (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ? - vmcs12->tsc_offset : 0)); + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING)) + active_offset += vmcs12->tsc_offset; } else { trace_kvm_write_tsc_offset(vcpu->vcpu_id, vmcs_read64(TSC_OFFSET), offset); - vmcs_write64(TSC_OFFSET, offset); } + + vmcs_write64(TSC_OFFSET, active_offset); + return active_offset; } /* @@ -5923,7 +5935,7 @@ static void free_vpid(int vpid) spin_unlock(&vmx_vpid_lock); } -static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, +static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr, int type) { int f = sizeof(unsigned long); @@ -5961,7 +5973,7 @@ static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bit } } -static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitmap, +static __always_inline void vmx_enable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr, int type) { int f = sizeof(unsigned long); @@ -5999,7 +6011,7 @@ static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitm } } -static void __always_inline vmx_set_intercept_for_msr(unsigned long *msr_bitmap, +static __always_inline void vmx_set_intercept_for_msr(unsigned long *msr_bitmap, u32 msr, int type, bool value) { if (value) @@ -7999,13 +8011,16 @@ static __init int hardware_setup(void) kvm_mce_cap_supported |= MCG_LMCE_P; - return alloc_kvm_area(); + r = alloc_kvm_area(); + if (r) + goto out; + return 0; out: for (i = 0; i < VMX_BITMAP_NR; i++) free_page((unsigned long)vmx_bitmap[i]); - return r; + return r; } static __exit void hardware_unsetup(void) @@ -8275,11 +8290,11 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) if (r < 0) goto out_vmcs02; - vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); + vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL); if (!vmx->nested.cached_vmcs12) goto out_cached_vmcs12; - vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); + vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL); if (!vmx->nested.cached_shadow_vmcs12) goto out_cached_shadow_vmcs12; @@ -11459,6 +11474,8 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) kunmap(vmx->nested.pi_desc_page); kvm_release_page_dirty(vmx->nested.pi_desc_page); vmx->nested.pi_desc_page = NULL; + vmx->nested.pi_desc = NULL; + vmcs_write64(POSTED_INTR_DESC_ADDR, -1ull); } page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr); if (is_error_page(page)) @@ -11716,7 +11733,7 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, !nested_exit_intr_ack_set(vcpu) || (vmcs12->posted_intr_nv & 0xff00) || (vmcs12->posted_intr_desc_addr & 0x3f) || - (!page_address_valid(vcpu, vmcs12->posted_intr_desc_addr)))) + (vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu)))) return -EINVAL; /* tpr shadow is needed by all apicv features. */ @@ -13967,13 +13984,17 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu, else if (enable_shadow_vmcs && !vmx->nested.sync_shadow_vmcs) copy_shadow_to_vmcs12(vmx); - if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12))) + /* + * Copy over the full allocated size of vmcs12 rather than just the size + * of the struct. + */ + if (copy_to_user(user_kvm_nested_state->data, vmcs12, VMCS12_SIZE)) return -EFAULT; if (nested_cpu_has_shadow_vmcs(vmcs12) && vmcs12->vmcs_link_pointer != -1ull) { if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE, - get_shadow_vmcs12(vcpu), sizeof(*vmcs12))) + get_shadow_vmcs12(vcpu), VMCS12_SIZE)) return -EFAULT; } @@ -14010,13 +14031,6 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, if (!page_address_valid(vcpu, kvm_state->vmx.vmxon_pa)) return -EINVAL; - if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12)) - return -EINVAL; - - if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa || - !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa)) - return -EINVAL; - if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) return -EINVAL; @@ -14046,6 +14060,14 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, if (ret) return ret; + /* Empty 'VMXON' state is permitted */ + if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12)) + return 0; + + if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa || + !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa)) + return -EINVAL; + set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa); if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) { @@ -14199,7 +14221,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, .read_l1_tsc_offset = vmx_read_l1_tsc_offset, - .write_tsc_offset = vmx_write_tsc_offset, + .write_l1_tsc_offset = vmx_write_l1_tsc_offset, .set_tdp_cr3 = vmx_set_cr3, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ca717737347e6..5a9a3ebe8fba6 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1582,8 +1582,7 @@ EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) { - kvm_x86_ops->write_tsc_offset(vcpu, offset); - vcpu->arch.tsc_offset = offset; + vcpu->arch.tsc_offset = kvm_x86_ops->write_l1_tsc_offset(vcpu, offset); } static inline bool kvm_check_tsc_unstable(void) @@ -1711,7 +1710,8 @@ EXPORT_SYMBOL_GPL(kvm_write_tsc); static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment) { - kvm_vcpu_write_tsc_offset(vcpu, vcpu->arch.tsc_offset + adjustment); + u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu); + kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment); } static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) @@ -2343,6 +2343,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_AMD64_PATCH_LOADER: case MSR_AMD64_BU_CFG2: case MSR_AMD64_DC_CFG: + case MSR_F15H_EX_CFG: break; case MSR_IA32_UCODE_REV: @@ -2638,6 +2639,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_AMD64_BU_CFG2: case MSR_IA32_PERF_CTL: case MSR_AMD64_DC_CFG: + case MSR_F15H_EX_CFG: msr_info->data = 0; break; case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5: @@ -6275,8 +6277,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, toggle_interruptibility(vcpu, ctxt->interruptibility); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; kvm_rip_write(vcpu, ctxt->eip); - if (r == EMULATE_DONE && - (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) + if (r == EMULATE_DONE && ctxt->tf) kvm_vcpu_do_singlestep(vcpu, &r); if (!ctxt->have_exception || exception_type(ctxt->exception.vector) == EXCPT_TRAP) @@ -6788,6 +6789,7 @@ static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, clock_pairing.nsec = ts.tv_nsec; clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle); clock_pairing.flags = 0; + memset(&clock_pairing.pad, 0, sizeof(clock_pairing.pad)); ret = 0; if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing, @@ -6865,10 +6867,10 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) case KVM_HC_CLOCK_PAIRING: ret = kvm_pv_clock_pairing(vcpu, a0, a1); break; +#endif case KVM_HC_SEND_IPI: ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit); break; -#endif default: ret = -KVM_ENOSYS; break; @@ -7303,7 +7305,7 @@ void kvm_make_scan_ioapic_request(struct kvm *kvm) static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) { - if (!kvm_apic_hw_enabled(vcpu->arch.apic)) + if (!kvm_apic_present(vcpu)) return; bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256); @@ -7313,7 +7315,8 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) else { if (vcpu->arch.apicv_active) kvm_x86_ops->sync_pir_to_irr(vcpu); - kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); + if (ioapic_in_kernel(vcpu->kvm)) + kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); } if (is_guest_mode(vcpu)) diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c index 79778ab200e49..a536651164584 100644 --- a/arch/x86/lib/kaslr.c +++ b/arch/x86/lib/kaslr.c @@ -36,8 +36,8 @@ static inline u16 i8254(void) u16 status, timer; do { - outb(I8254_PORT_CONTROL, - I8254_CMD_READBACK | I8254_SELECT_COUNTER0); + outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0, + I8254_PORT_CONTROL); status = inb(I8254_PORT_COUNTER0); timer = inb(I8254_PORT_COUNTER0); timer |= inb(I8254_PORT_COUNTER0) << 8; diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index a12afff146d10..c05a818224bb0 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -53,10 +53,10 @@ struct addr_marker { enum address_markers_idx { USER_SPACE_NR = 0, KERNEL_SPACE_NR, - LOW_KERNEL_NR, -#if defined(CONFIG_MODIFY_LDT_SYSCALL) && defined(CONFIG_X86_5LEVEL) +#ifdef CONFIG_MODIFY_LDT_SYSCALL LDT_NR, #endif + LOW_KERNEL_NR, VMALLOC_START_NR, VMEMMAP_START_NR, #ifdef CONFIG_KASAN @@ -64,9 +64,6 @@ enum address_markers_idx { KASAN_SHADOW_END_NR, #endif CPU_ENTRY_AREA_NR, -#if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL) - LDT_NR, -#endif #ifdef CONFIG_X86_ESPFIX64 ESPFIX_START_NR, #endif @@ -493,11 +490,11 @@ static inline bool is_hypervisor_range(int idx) { #ifdef CONFIG_X86_64 /* - * ffff800000000000 - ffff87ffffffffff is reserved for - * the hypervisor. + * A hole in the beginning of kernel address space reserved + * for a hypervisor. */ - return (idx >= pgd_index(__PAGE_OFFSET) - 16) && - (idx < pgd_index(__PAGE_OFFSET)); + return (idx >= pgd_index(GUARD_HOLE_BASE_ADDR)) && + (idx < pgd_index(GUARD_HOLE_END_ADDR)); #else return false; #endif diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index faca978ebf9d8..d883869437b52 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -932,7 +932,7 @@ unsigned long max_swapfile_size(void) pages = generic_max_swapfile_size(); - if (boot_cpu_has_bug(X86_BUG_L1TF)) { + if (boot_cpu_has_bug(X86_BUG_L1TF) && l1tf_mitigation != L1TF_MITIGATION_OFF) { /* Limit the swap file size to MAX_PA/2 for L1TF workaround */ unsigned long long l1tf_limit = l1tf_pfn_limit(); /* diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 979e0a02cbe1a..142c7d9f89cc2 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -923,34 +923,19 @@ static void mark_nxdata_nx(void) void mark_rodata_ro(void) { unsigned long start = PFN_ALIGN(_text); - unsigned long size = PFN_ALIGN(_etext) - start; + unsigned long size = (unsigned long)__end_rodata - start; set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); - printk(KERN_INFO "Write protecting the kernel text: %luk\n", + pr_info("Write protecting kernel text and read-only data: %luk\n", size >> 10); kernel_set_to_readonly = 1; #ifdef CONFIG_CPA_DEBUG - printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n", - start, start+size); - set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT); - - printk(KERN_INFO "Testing CPA: write protecting again\n"); - set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); -#endif - - start += size; - size = (unsigned long)__end_rodata - start; - set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); - printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", - size >> 10); - -#ifdef CONFIG_CPA_DEBUG - printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size); + pr_info("Testing CPA: Reverting %lx-%lx\n", start, start + size); set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); - printk(KERN_INFO "Testing CPA: write protecting again\n"); + pr_info("Testing CPA: write protecting again\n"); set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); #endif mark_nxdata_nx(); diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index dd519f3721692..a3e9c6ee3cf2b 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -585,7 +585,6 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, paddr_end, page_size_mask, prot); - __flush_tlb_all(); continue; } /* @@ -628,7 +627,6 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, pud_populate(&init_mm, pud, pmd); spin_unlock(&init_mm.page_table_lock); } - __flush_tlb_all(); update_page_count(PG_LEVEL_1G, pages); @@ -669,7 +667,6 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end, paddr_last = phys_pud_init(pud, paddr, paddr_end, page_size_mask); - __flush_tlb_all(); continue; } @@ -681,7 +678,6 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end, p4d_populate(&init_mm, p4d, pud); spin_unlock(&init_mm.page_table_lock); } - __flush_tlb_all(); return paddr_last; } @@ -734,8 +730,6 @@ kernel_physical_mapping_init(unsigned long paddr_start, if (pgd_changed) sync_global_pgds(vaddr_start, vaddr_end - 1); - __flush_tlb_all(); - return paddr_last; } diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c index b54d52a2d00a8..d71d72cf6c666 100644 --- a/arch/x86/mm/numa_emulation.c +++ b/arch/x86/mm/numa_emulation.c @@ -400,9 +400,17 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) n = simple_strtoul(emu_cmdline, &emu_cmdline, 0); ret = -1; for_each_node_mask(i, physnode_mask) { + /* + * The reason we pass in blk[0] is due to + * numa_remove_memblk_from() called by + * emu_setup_memblk() will delete entry 0 + * and then move everything else up in the pi.blk + * array. Therefore we should always be looking + * at blk[0]. + */ ret = split_nodes_size_interleave_uniform(&ei, &pi, - pi.blk[i].start, pi.blk[i].end, 0, - n, &pi.blk[i], nid); + pi.blk[0].start, pi.blk[0].end, 0, + n, &pi.blk[0], nid); if (ret < 0) break; if (ret < n) { diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 51a5a69ecac9f..73c1f6ae72415 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -37,11 +37,20 @@ struct cpa_data { unsigned long numpages; int flags; unsigned long pfn; - unsigned force_split : 1; + unsigned force_split : 1, + force_static_prot : 1; int curpage; struct page **pages; }; +enum cpa_warn { + CPA_CONFLICT, + CPA_PROTECT, + CPA_DETECT, +}; + +static const int cpa_warn_level = CPA_PROTECT; + /* * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings) * using cpa_lock. So that we don't allow any other cpu, with stale large tlb @@ -94,6 +103,87 @@ void arch_report_meminfo(struct seq_file *m) static inline void split_page_count(int level) { } #endif +#ifdef CONFIG_X86_CPA_STATISTICS + +static unsigned long cpa_1g_checked; +static unsigned long cpa_1g_sameprot; +static unsigned long cpa_1g_preserved; +static unsigned long cpa_2m_checked; +static unsigned long cpa_2m_sameprot; +static unsigned long cpa_2m_preserved; +static unsigned long cpa_4k_install; + +static inline void cpa_inc_1g_checked(void) +{ + cpa_1g_checked++; +} + +static inline void cpa_inc_2m_checked(void) +{ + cpa_2m_checked++; +} + +static inline void cpa_inc_4k_install(void) +{ + cpa_4k_install++; +} + +static inline void cpa_inc_lp_sameprot(int level) +{ + if (level == PG_LEVEL_1G) + cpa_1g_sameprot++; + else + cpa_2m_sameprot++; +} + +static inline void cpa_inc_lp_preserved(int level) +{ + if (level == PG_LEVEL_1G) + cpa_1g_preserved++; + else + cpa_2m_preserved++; +} + +static int cpastats_show(struct seq_file *m, void *p) +{ + seq_printf(m, "1G pages checked: %16lu\n", cpa_1g_checked); + seq_printf(m, "1G pages sameprot: %16lu\n", cpa_1g_sameprot); + seq_printf(m, "1G pages preserved: %16lu\n", cpa_1g_preserved); + seq_printf(m, "2M pages checked: %16lu\n", cpa_2m_checked); + seq_printf(m, "2M pages sameprot: %16lu\n", cpa_2m_sameprot); + seq_printf(m, "2M pages preserved: %16lu\n", cpa_2m_preserved); + seq_printf(m, "4K pages set-checked: %16lu\n", cpa_4k_install); + return 0; +} + +static int cpastats_open(struct inode *inode, struct file *file) +{ + return single_open(file, cpastats_show, NULL); +} + +static const struct file_operations cpastats_fops = { + .open = cpastats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init cpa_stats_init(void) +{ + debugfs_create_file("cpa_stats", S_IRUSR, arch_debugfs_dir, NULL, + &cpastats_fops); + return 0; +} +late_initcall(cpa_stats_init); +#else +static inline void cpa_inc_1g_checked(void) { } +static inline void cpa_inc_2m_checked(void) { } +static inline void cpa_inc_4k_install(void) { } +static inline void cpa_inc_lp_sameprot(int level) { } +static inline void cpa_inc_lp_preserved(int level) { } +#endif + + static inline int within(unsigned long addr, unsigned long start, unsigned long end) { @@ -286,84 +376,179 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache, } } -/* - * Certain areas of memory on x86 require very specific protection flags, - * for example the BIOS area or kernel text. Callers don't always get this - * right (again, ioremap() on BIOS memory is not uncommon) so this function - * checks and fixes these known static required protection bits. - */ -static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, - unsigned long pfn) +static bool overlaps(unsigned long r1_start, unsigned long r1_end, + unsigned long r2_start, unsigned long r2_end) { - pgprot_t forbidden = __pgprot(0); + return (r1_start <= r2_end && r1_end >= r2_start) || + (r2_start <= r1_end && r2_end >= r1_start); +} - /* - * The BIOS area between 640k and 1Mb needs to be executable for - * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support. - */ #ifdef CONFIG_PCI_BIOS - if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT)) - pgprot_val(forbidden) |= _PAGE_NX; +/* + * The BIOS area between 640k and 1Mb needs to be executable for PCI BIOS + * based config access (CONFIG_PCI_GOBIOS) support. + */ +#define BIOS_PFN PFN_DOWN(BIOS_BEGIN) +#define BIOS_PFN_END PFN_DOWN(BIOS_END - 1) + +static pgprotval_t protect_pci_bios(unsigned long spfn, unsigned long epfn) +{ + if (pcibios_enabled && overlaps(spfn, epfn, BIOS_PFN, BIOS_PFN_END)) + return _PAGE_NX; + return 0; +} +#else +static pgprotval_t protect_pci_bios(unsigned long spfn, unsigned long epfn) +{ + return 0; +} #endif - /* - * The kernel text needs to be executable for obvious reasons - * Does not cover __inittext since that is gone later on. On - * 64bit we do not enforce !NX on the low mapping - */ - if (within(address, (unsigned long)_text, (unsigned long)_etext)) - pgprot_val(forbidden) |= _PAGE_NX; +/* + * The .rodata section needs to be read-only. Using the pfn catches all + * aliases. This also includes __ro_after_init, so do not enforce until + * kernel_set_to_readonly is true. + */ +static pgprotval_t protect_rodata(unsigned long spfn, unsigned long epfn) +{ + unsigned long epfn_ro, spfn_ro = PFN_DOWN(__pa_symbol(__start_rodata)); /* - * The .rodata section needs to be read-only. Using the pfn - * catches all aliases. This also includes __ro_after_init, - * so do not enforce until kernel_set_to_readonly is true. + * Note: __end_rodata is at page aligned and not inclusive, so + * subtract 1 to get the last enforced PFN in the rodata area. */ - if (kernel_set_to_readonly && - within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT, - __pa_symbol(__end_rodata) >> PAGE_SHIFT)) - pgprot_val(forbidden) |= _PAGE_RW; + epfn_ro = PFN_DOWN(__pa_symbol(__end_rodata)) - 1; + + if (kernel_set_to_readonly && overlaps(spfn, epfn, spfn_ro, epfn_ro)) + return _PAGE_RW; + return 0; +} + +/* + * Protect kernel text against becoming non executable by forbidding + * _PAGE_NX. This protects only the high kernel mapping (_text -> _etext) + * out of which the kernel actually executes. Do not protect the low + * mapping. + * + * This does not cover __inittext since that is gone after boot. + */ +static pgprotval_t protect_kernel_text(unsigned long start, unsigned long end) +{ + unsigned long t_end = (unsigned long)_etext - 1; + unsigned long t_start = (unsigned long)_text; + + if (overlaps(start, end, t_start, t_end)) + return _PAGE_NX; + return 0; +} #if defined(CONFIG_X86_64) +/* + * Once the kernel maps the text as RO (kernel_set_to_readonly is set), + * kernel text mappings for the large page aligned text, rodata sections + * will be always read-only. For the kernel identity mappings covering the + * holes caused by this alignment can be anything that user asks. + * + * This will preserve the large page mappings for kernel text/data at no + * extra cost. + */ +static pgprotval_t protect_kernel_text_ro(unsigned long start, + unsigned long end) +{ + unsigned long t_end = (unsigned long)__end_rodata_hpage_align - 1; + unsigned long t_start = (unsigned long)_text; + unsigned int level; + + if (!kernel_set_to_readonly || !overlaps(start, end, t_start, t_end)) + return 0; /* - * Once the kernel maps the text as RO (kernel_set_to_readonly is set), - * kernel text mappings for the large page aligned text, rodata sections - * will be always read-only. For the kernel identity mappings covering - * the holes caused by this alignment can be anything that user asks. + * Don't enforce the !RW mapping for the kernel text mapping, if + * the current mapping is already using small page mapping. No + * need to work hard to preserve large page mappings in this case. * - * This will preserve the large page mappings for kernel text/data - * at no extra cost. + * This also fixes the Linux Xen paravirt guest boot failure caused + * by unexpected read-only mappings for kernel identity + * mappings. In this paravirt guest case, the kernel text mapping + * and the kernel identity mapping share the same page-table pages, + * so the protections for kernel text and identity mappings have to + * be the same. */ - if (kernel_set_to_readonly && - within(address, (unsigned long)_text, - (unsigned long)__end_rodata_hpage_align)) { - unsigned int level; - - /* - * Don't enforce the !RW mapping for the kernel text mapping, - * if the current mapping is already using small page mapping. - * No need to work hard to preserve large page mappings in this - * case. - * - * This also fixes the Linux Xen paravirt guest boot failure - * (because of unexpected read-only mappings for kernel identity - * mappings). In this paravirt guest case, the kernel text - * mapping and the kernel identity mapping share the same - * page-table pages. Thus we can't really use different - * protections for the kernel text and identity mappings. Also, - * these shared mappings are made of small page mappings. - * Thus this don't enforce !RW mapping for small page kernel - * text mapping logic will help Linux Xen parvirt guest boot - * as well. - */ - if (lookup_address(address, &level) && (level != PG_LEVEL_4K)) - pgprot_val(forbidden) |= _PAGE_RW; - } + if (lookup_address(start, &level) && (level != PG_LEVEL_4K)) + return _PAGE_RW; + return 0; +} +#else +static pgprotval_t protect_kernel_text_ro(unsigned long start, + unsigned long end) +{ + return 0; +} #endif - prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); +static inline bool conflicts(pgprot_t prot, pgprotval_t val) +{ + return (pgprot_val(prot) & ~val) != pgprot_val(prot); +} - return prot; +static inline void check_conflict(int warnlvl, pgprot_t prot, pgprotval_t val, + unsigned long start, unsigned long end, + unsigned long pfn, const char *txt) +{ + static const char *lvltxt[] = { + [CPA_CONFLICT] = "conflict", + [CPA_PROTECT] = "protect", + [CPA_DETECT] = "detect", + }; + + if (warnlvl > cpa_warn_level || !conflicts(prot, val)) + return; + + pr_warn("CPA %8s %10s: 0x%016lx - 0x%016lx PFN %lx req %016llx prevent %016llx\n", + lvltxt[warnlvl], txt, start, end, pfn, (unsigned long long)pgprot_val(prot), + (unsigned long long)val); +} + +/* + * Certain areas of memory on x86 require very specific protection flags, + * for example the BIOS area or kernel text. Callers don't always get this + * right (again, ioremap() on BIOS memory is not uncommon) so this function + * checks and fixes these known static required protection bits. + */ +static inline pgprot_t static_protections(pgprot_t prot, unsigned long start, + unsigned long pfn, unsigned long npg, + int warnlvl) +{ + pgprotval_t forbidden, res; + unsigned long end; + + /* + * There is no point in checking RW/NX conflicts when the requested + * mapping is setting the page !PRESENT. + */ + if (!(pgprot_val(prot) & _PAGE_PRESENT)) + return prot; + + /* Operate on the virtual address */ + end = start + npg * PAGE_SIZE - 1; + + res = protect_kernel_text(start, end); + check_conflict(warnlvl, prot, res, start, end, pfn, "Text NX"); + forbidden = res; + + res = protect_kernel_text_ro(start, end); + check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO"); + forbidden |= res; + + /* Check the PFN directly */ + res = protect_pci_bios(pfn, pfn + npg - 1); + check_conflict(warnlvl, prot, res, start, end, pfn, "PCIBIOS NX"); + forbidden |= res; + + res = protect_rodata(pfn, pfn + npg - 1); + check_conflict(warnlvl, prot, res, start, end, pfn, "Rodata RO"); + forbidden |= res; + + return __pgprot(pgprot_val(prot) & ~forbidden); } /* @@ -421,18 +606,18 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, */ pte_t *lookup_address(unsigned long address, unsigned int *level) { - return lookup_address_in_pgd(pgd_offset_k(address), address, level); + return lookup_address_in_pgd(pgd_offset_k(address), address, level); } EXPORT_SYMBOL_GPL(lookup_address); static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address, unsigned int *level) { - if (cpa->pgd) + if (cpa->pgd) return lookup_address_in_pgd(cpa->pgd + pgd_index(address), address, level); - return lookup_address(address, level); + return lookup_address(address, level); } /* @@ -549,40 +734,35 @@ static pgprot_t pgprot_clear_protnone_bits(pgprot_t prot) return prot; } -static int -try_preserve_large_page(pte_t *kpte, unsigned long address, - struct cpa_data *cpa) +static int __should_split_large_page(pte_t *kpte, unsigned long address, + struct cpa_data *cpa) { - unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn, old_pfn; + unsigned long numpages, pmask, psize, lpaddr, pfn, old_pfn; + pgprot_t old_prot, new_prot, req_prot, chk_prot; pte_t new_pte, old_pte, *tmp; - pgprot_t old_prot, new_prot, req_prot; - int i, do_split = 1; enum pg_level level; - if (cpa->force_split) - return 1; - - spin_lock(&pgd_lock); /* * Check for races, another CPU might have split this page * up already: */ tmp = _lookup_address_cpa(cpa, address, &level); if (tmp != kpte) - goto out_unlock; + return 1; switch (level) { case PG_LEVEL_2M: old_prot = pmd_pgprot(*(pmd_t *)kpte); old_pfn = pmd_pfn(*(pmd_t *)kpte); + cpa_inc_2m_checked(); break; case PG_LEVEL_1G: old_prot = pud_pgprot(*(pud_t *)kpte); old_pfn = pud_pfn(*(pud_t *)kpte); + cpa_inc_1g_checked(); break; default: - do_split = -EINVAL; - goto out_unlock; + return -EINVAL; } psize = page_level_size(level); @@ -592,8 +772,8 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, * Calculate the number of pages, which fit into this large * page starting at address: */ - nextpage_addr = (address + psize) & pmask; - numpages = (nextpage_addr - address) >> PAGE_SHIFT; + lpaddr = (address + psize) & pmask; + numpages = (lpaddr - address) >> PAGE_SHIFT; if (numpages < cpa->numpages) cpa->numpages = numpages; @@ -620,71 +800,142 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, pgprot_val(req_prot) |= _PAGE_PSE; /* - * old_pfn points to the large page base pfn. So we need - * to add the offset of the virtual address: + * old_pfn points to the large page base pfn. So we need to add the + * offset of the virtual address: */ pfn = old_pfn + ((address & (psize - 1)) >> PAGE_SHIFT); cpa->pfn = pfn; - new_prot = static_protections(req_prot, address, pfn); + /* + * Calculate the large page base address and the number of 4K pages + * in the large page + */ + lpaddr = address & pmask; + numpages = psize >> PAGE_SHIFT; /* - * We need to check the full range, whether - * static_protection() requires a different pgprot for one of - * the pages in the range we try to preserve: + * Sanity check that the existing mapping is correct versus the static + * protections. static_protections() guards against !PRESENT, so no + * extra conditional required here. */ - addr = address & pmask; - pfn = old_pfn; - for (i = 0; i < (psize >> PAGE_SHIFT); i++, addr += PAGE_SIZE, pfn++) { - pgprot_t chk_prot = static_protections(req_prot, addr, pfn); + chk_prot = static_protections(old_prot, lpaddr, old_pfn, numpages, + CPA_CONFLICT); - if (pgprot_val(chk_prot) != pgprot_val(new_prot)) - goto out_unlock; + if (WARN_ON_ONCE(pgprot_val(chk_prot) != pgprot_val(old_prot))) { + /* + * Split the large page and tell the split code to + * enforce static protections. + */ + cpa->force_static_prot = 1; + return 1; } /* - * If there are no changes, return. maxpages has been updated - * above: + * Optimization: If the requested pgprot is the same as the current + * pgprot, then the large page can be preserved and no updates are + * required independent of alignment and length of the requested + * range. The above already established that the current pgprot is + * correct, which in consequence makes the requested pgprot correct + * as well if it is the same. The static protection scan below will + * not come to a different conclusion. */ - if (pgprot_val(new_prot) == pgprot_val(old_prot)) { - do_split = 0; - goto out_unlock; + if (pgprot_val(req_prot) == pgprot_val(old_prot)) { + cpa_inc_lp_sameprot(level); + return 0; } /* - * We need to change the attributes. Check, whether we can - * change the large page in one go. We request a split, when - * the address is not aligned and the number of pages is - * smaller than the number of pages in the large page. Note - * that we limited the number of possible pages already to - * the number of pages in the large page. + * If the requested range does not cover the full page, split it up */ - if (address == (address & pmask) && cpa->numpages == (psize >> PAGE_SHIFT)) { - /* - * The address is aligned and the number of pages - * covers the full page. - */ - new_pte = pfn_pte(old_pfn, new_prot); - __set_pmd_pte(kpte, address, new_pte); - cpa->flags |= CPA_FLUSHTLB; - do_split = 0; - } + if (address != lpaddr || cpa->numpages != numpages) + return 1; + + /* + * Check whether the requested pgprot is conflicting with a static + * protection requirement in the large page. + */ + new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages, + CPA_DETECT); + + /* + * If there is a conflict, split the large page. + * + * There used to be a 4k wise evaluation trying really hard to + * preserve the large pages, but experimentation has shown, that this + * does not help at all. There might be corner cases which would + * preserve one large page occasionally, but it's really not worth the + * extra code and cycles for the common case. + */ + if (pgprot_val(req_prot) != pgprot_val(new_prot)) + return 1; + + /* All checks passed. Update the large page mapping. */ + new_pte = pfn_pte(old_pfn, new_prot); + __set_pmd_pte(kpte, address, new_pte); + cpa->flags |= CPA_FLUSHTLB; + cpa_inc_lp_preserved(level); + return 0; +} + +static int should_split_large_page(pte_t *kpte, unsigned long address, + struct cpa_data *cpa) +{ + int do_split; + + if (cpa->force_split) + return 1; -out_unlock: + spin_lock(&pgd_lock); + do_split = __should_split_large_page(kpte, address, cpa); spin_unlock(&pgd_lock); return do_split; } +static void split_set_pte(struct cpa_data *cpa, pte_t *pte, unsigned long pfn, + pgprot_t ref_prot, unsigned long address, + unsigned long size) +{ + unsigned int npg = PFN_DOWN(size); + pgprot_t prot; + + /* + * If should_split_large_page() discovered an inconsistent mapping, + * remove the invalid protection in the split mapping. + */ + if (!cpa->force_static_prot) + goto set; + + prot = static_protections(ref_prot, address, pfn, npg, CPA_PROTECT); + + if (pgprot_val(prot) == pgprot_val(ref_prot)) + goto set; + + /* + * If this is splitting a PMD, fix it up. PUD splits cannot be + * fixed trivially as that would require to rescan the newly + * installed PMD mappings after returning from split_large_page() + * so an eventual further split can allocate the necessary PTE + * pages. Warn for now and revisit it in case this actually + * happens. + */ + if (size == PAGE_SIZE) + ref_prot = prot; + else + pr_warn_once("CPA: Cannot fixup static protections for PUD split\n"); +set: + set_pte(pte, pfn_pte(pfn, ref_prot)); +} + static int __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, struct page *base) { + unsigned long lpaddr, lpinc, ref_pfn, pfn, pfninc = 1; pte_t *pbase = (pte_t *)page_address(base); - unsigned long ref_pfn, pfn, pfninc = 1; unsigned int i, level; - pte_t *tmp; pgprot_t ref_prot; + pte_t *tmp; spin_lock(&pgd_lock); /* @@ -707,15 +958,17 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, * PAT bit to correct position. */ ref_prot = pgprot_large_2_4k(ref_prot); - ref_pfn = pmd_pfn(*(pmd_t *)kpte); + lpaddr = address & PMD_MASK; + lpinc = PAGE_SIZE; break; case PG_LEVEL_1G: ref_prot = pud_pgprot(*(pud_t *)kpte); ref_pfn = pud_pfn(*(pud_t *)kpte); pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT; - + lpaddr = address & PUD_MASK; + lpinc = PMD_SIZE; /* * Clear the PSE flags if the PRESENT flag is not set * otherwise pmd_present/pmd_huge will return true @@ -736,8 +989,8 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, * Get the target pfn from the original entry: */ pfn = ref_pfn; - for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc) - set_pte(&pbase[i], pfn_pte(pfn, ref_prot)); + for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc, lpaddr += lpinc) + split_set_pte(cpa, pbase + i, pfn, ref_prot, lpaddr, lpinc); if (virt_addr_valid(address)) { unsigned long pfn = PFN_DOWN(__pa(address)); @@ -1247,7 +1500,9 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); - new_prot = static_protections(new_prot, address, pfn); + cpa_inc_4k_install(); + new_prot = static_protections(new_prot, address, pfn, 1, + CPA_PROTECT); new_prot = pgprot_clear_protnone_bits(new_prot); @@ -1273,7 +1528,7 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) * Check, whether we can keep the large page intact * and just change the pte: */ - do_split = try_preserve_large_page(kpte, address, cpa); + do_split = should_split_large_page(kpte, address, cpa); /* * When the range fits into the existing large page, * return. cp->numpages and cpa->tlbflush have been updated in @@ -1288,23 +1543,23 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) err = split_large_page(cpa, kpte, address); if (!err) { /* - * Do a global flush tlb after splitting the large page - * and before we do the actual change page attribute in the PTE. - * - * With out this, we violate the TLB application note, that says - * "The TLBs may contain both ordinary and large-page + * Do a global flush tlb after splitting the large page + * and before we do the actual change page attribute in the PTE. + * + * With out this, we violate the TLB application note, that says + * "The TLBs may contain both ordinary and large-page * translations for a 4-KByte range of linear addresses. This * may occur if software modifies the paging structures so that * the page size used for the address range changes. If the two * translations differ with respect to page frame or attributes * (e.g., permissions), processor behavior is undefined and may * be implementation-specific." - * - * We do this global tlb flush inside the cpa_lock, so that we + * + * We do this global tlb flush inside the cpa_lock, so that we * don't allow any other cpu, with stale tlb entries change the * page attribute in parallel, that also falls into the * just split large page entry. - */ + */ flush_tlb_all(); goto repeat; } @@ -2086,9 +2341,13 @@ void __kernel_map_pages(struct page *page, int numpages, int enable) /* * We should perform an IPI and flush all tlbs, - * but that can deadlock->flush only current cpu: + * but that can deadlock->flush only current cpu. + * Preemption needs to be disabled around __flush_tlb_all() due to + * CR3 reload in __native_flush_tlb(). */ + preempt_disable(); __flush_tlb_all(); + preempt_enable(); arch_flush_lazy_mmu_mode(); } diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 3d0c83ef6aab9..a3c9ea29d7cc3 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -519,8 +519,13 @@ static u64 sanitize_phys(u64 address) * for a "decoy" virtual address (bit 63 clear) passed to * set_memory_X(). __pa() on a "decoy" address results in a * physical address with bit 63 set. + * + * Decoy addresses are not present for 32-bit builds, see + * set_mce_nospec(). */ - return address & __PHYSICAL_MASK; + if (IS_ENABLED(CONFIG_X86_64)) + return address & __PHYSICAL_MASK; + return address; } /* @@ -546,7 +551,11 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type, start = sanitize_phys(start); end = sanitize_phys(end); - BUG_ON(start >= end); /* end is exclusive */ + if (start >= end) { + WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__, + start, end - 1, cattr_name(req_type)); + return -EINVAL; + } if (!pat_enabled()) { /* This is identical to page table setting without PAT */ diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index e96b99eb800cc..a6d1b0241aea6 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -29,6 +29,12 @@ * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi */ +/* + * Use bit 0 to mangle the TIF_SPEC_IB state into the mm pointer which is + * stored in cpu_tlb_state.last_user_mm_ibpb. + */ +#define LAST_USER_MM_IBPB 0x1UL + /* * We get here when we do something requiring a TLB invalidation * but could not go invalidate all of the contexts. We do the @@ -180,6 +186,89 @@ static void sync_current_stack_to_mm(struct mm_struct *mm) } } +static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next) +{ + unsigned long next_tif = task_thread_info(next)->flags; + unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB; + + return (unsigned long)next->mm | ibpb; +} + +static void cond_ibpb(struct task_struct *next) +{ + if (!next || !next->mm) + return; + + /* + * Both, the conditional and the always IBPB mode use the mm + * pointer to avoid the IBPB when switching between tasks of the + * same process. Using the mm pointer instead of mm->context.ctx_id + * opens a hypothetical hole vs. mm_struct reuse, which is more or + * less impossible to control by an attacker. Aside of that it + * would only affect the first schedule so the theoretically + * exposed data is not really interesting. + */ + if (static_branch_likely(&switch_mm_cond_ibpb)) { + unsigned long prev_mm, next_mm; + + /* + * This is a bit more complex than the always mode because + * it has to handle two cases: + * + * 1) Switch from a user space task (potential attacker) + * which has TIF_SPEC_IB set to a user space task + * (potential victim) which has TIF_SPEC_IB not set. + * + * 2) Switch from a user space task (potential attacker) + * which has TIF_SPEC_IB not set to a user space task + * (potential victim) which has TIF_SPEC_IB set. + * + * This could be done by unconditionally issuing IBPB when + * a task which has TIF_SPEC_IB set is either scheduled in + * or out. Though that results in two flushes when: + * + * - the same user space task is scheduled out and later + * scheduled in again and only a kernel thread ran in + * between. + * + * - a user space task belonging to the same process is + * scheduled in after a kernel thread ran in between + * + * - a user space task belonging to the same process is + * scheduled in immediately. + * + * Optimize this with reasonably small overhead for the + * above cases. Mangle the TIF_SPEC_IB bit into the mm + * pointer of the incoming task which is stored in + * cpu_tlbstate.last_user_mm_ibpb for comparison. + */ + next_mm = mm_mangle_tif_spec_ib(next); + prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_ibpb); + + /* + * Issue IBPB only if the mm's are different and one or + * both have the IBPB bit set. + */ + if (next_mm != prev_mm && + (next_mm | prev_mm) & LAST_USER_MM_IBPB) + indirect_branch_prediction_barrier(); + + this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, next_mm); + } + + if (static_branch_unlikely(&switch_mm_always_ibpb)) { + /* + * Only flush when switching to a user space task with a + * different context than the user space task which ran + * last on this CPU. + */ + if (this_cpu_read(cpu_tlbstate.last_user_mm) != next->mm) { + indirect_branch_prediction_barrier(); + this_cpu_write(cpu_tlbstate.last_user_mm, next->mm); + } + } +} + void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { @@ -254,27 +343,13 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, } else { u16 new_asid; bool need_flush; - u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id); /* * Avoid user/user BTB poisoning by flushing the branch * predictor when switching between processes. This stops * one process from doing Spectre-v2 attacks on another. - * - * As an optimization, flush indirect branches only when - * switching into processes that disable dumping. This - * protects high value processes like gpg, without having - * too high performance overhead. IBPB is *expensive*! - * - * This will not flush branches when switching into kernel - * threads. It will also not flush if we switch to idle - * thread and back to the same process. It will flush if we - * switch to a different non-dumpable process. */ - if (tsk && tsk->mm && - tsk->mm->context.ctx_id != last_ctx_id && - get_dumpable(tsk->mm) != SUID_DUMP_USER) - indirect_branch_prediction_barrier(); + cond_ibpb(tsk); if (IS_ENABLED(CONFIG_VMAP_STACK)) { /* @@ -331,14 +406,6 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0); } - /* - * Record last user mm's context id, so we can avoid - * flushing branch buffer with IBPB if we switch back - * to the same user. - */ - if (next != &init_mm) - this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id); - /* Make sure we write CR3 before loaded_mm. */ barrier(); @@ -419,7 +486,7 @@ void initialize_tlbstate_and_flush(void) write_cr3(build_cr3(mm->pgd, 0)); /* Reinitialize tlbstate. */ - this_cpu_write(cpu_tlbstate.last_ctx_id, mm->context.ctx_id); + this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, LAST_USER_MM_IBPB); this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0); this_cpu_write(cpu_tlbstate.next_asid, 1); this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id); diff --git a/arch/x86/platform/efi/early_printk.c b/arch/x86/platform/efi/early_printk.c index 5fdacb322ceb4..c3e6be110b7d0 100644 --- a/arch/x86/platform/efi/early_printk.c +++ b/arch/x86/platform/efi/early_printk.c @@ -179,7 +179,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num) num--; } - if (efi_x >= si->lfb_width) { + if (efi_x + font->width > si->lfb_width) { efi_x = 0; efi_y += font->height; } diff --git a/arch/x86/platform/olpc/olpc-xo1-rtc.c b/arch/x86/platform/olpc/olpc-xo1-rtc.c index a2b4efddd61a5..8e7ddd7e313a4 100644 --- a/arch/x86/platform/olpc/olpc-xo1-rtc.c +++ b/arch/x86/platform/olpc/olpc-xo1-rtc.c @@ -16,6 +16,7 @@ #include #include +#include static void rtc_wake_on(struct device *dev) { @@ -75,6 +76,8 @@ static int __init xo1_rtc_init(void) if (r) return r; + x86_platform.legacy.rtc = 0; + device_init_wakeup(&xo1_rtc_device.dev, 1); return 0; } diff --git a/arch/x86/um/shared/sysdep/ptrace_32.h b/arch/x86/um/shared/sysdep/ptrace_32.h index b94a108de1dc8..ae00d22bce02c 100644 --- a/arch/x86/um/shared/sysdep/ptrace_32.h +++ b/arch/x86/um/shared/sysdep/ptrace_32.h @@ -10,20 +10,10 @@ static inline void update_debugregs(int seq) {} -/* syscall emulation path in ptrace */ - -#ifndef PTRACE_SYSEMU -#define PTRACE_SYSEMU 31 -#endif - void set_using_sysemu(int value); int get_using_sysemu(void); extern int sysemu_supported; -#ifndef PTRACE_SYSEMU_SINGLESTEP -#define PTRACE_SYSEMU_SINGLESTEP 32 -#endif - #define UPT_SYSCALL_ARG1(r) UPT_BX(r) #define UPT_SYSCALL_ARG2(r) UPT_CX(r) #define UPT_SYSCALL_ARG3(r) UPT_DX(r) diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 2eeddd8146533..c6c7c9b7b5c19 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -7,7 +7,6 @@ #include #include -#include #include #include @@ -343,80 +342,3 @@ void xen_arch_unregister_cpu(int num) } EXPORT_SYMBOL(xen_arch_unregister_cpu); #endif - -#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG -void __init arch_xen_balloon_init(struct resource *hostmem_resource) -{ - struct xen_memory_map memmap; - int rc; - unsigned int i, last_guest_ram; - phys_addr_t max_addr = PFN_PHYS(max_pfn); - struct e820_table *xen_e820_table; - const struct e820_entry *entry; - struct resource *res; - - if (!xen_initial_domain()) - return; - - xen_e820_table = kmalloc(sizeof(*xen_e820_table), GFP_KERNEL); - if (!xen_e820_table) - return; - - memmap.nr_entries = ARRAY_SIZE(xen_e820_table->entries); - set_xen_guest_handle(memmap.buffer, xen_e820_table->entries); - rc = HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap); - if (rc) { - pr_warn("%s: Can't read host e820 (%d)\n", __func__, rc); - goto out; - } - - last_guest_ram = 0; - for (i = 0; i < memmap.nr_entries; i++) { - if (xen_e820_table->entries[i].addr >= max_addr) - break; - if (xen_e820_table->entries[i].type == E820_TYPE_RAM) - last_guest_ram = i; - } - - entry = &xen_e820_table->entries[last_guest_ram]; - if (max_addr >= entry->addr + entry->size) - goto out; /* No unallocated host RAM. */ - - hostmem_resource->start = max_addr; - hostmem_resource->end = entry->addr + entry->size; - - /* - * Mark non-RAM regions between the end of dom0 RAM and end of host RAM - * as unavailable. The rest of that region can be used for hotplug-based - * ballooning. - */ - for (; i < memmap.nr_entries; i++) { - entry = &xen_e820_table->entries[i]; - - if (entry->type == E820_TYPE_RAM) - continue; - - if (entry->addr >= hostmem_resource->end) - break; - - res = kzalloc(sizeof(*res), GFP_KERNEL); - if (!res) - goto out; - - res->name = "Unavailable host RAM"; - res->start = entry->addr; - res->end = (entry->addr + entry->size < hostmem_resource->end) ? - entry->addr + entry->size : hostmem_resource->end; - rc = insert_resource(hostmem_resource, res); - if (rc) { - pr_warn("%s: Can't insert [%llx - %llx) (%d)\n", - __func__, res->start, res->end, rc); - kfree(res); - goto out; - } - } - - out: - kfree(xen_e820_table); -} -#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */ diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c index c85d1a88f4769..f7f77023288ab 100644 --- a/arch/x86/xen/enlighten_pvh.c +++ b/arch/x86/xen/enlighten_pvh.c @@ -75,7 +75,7 @@ static void __init init_pvh_bootparams(void) * Version 2.12 supports Xen entry point but we will use default x86/PC * environment (i.e. hardware_subarch 0). */ - pvh_bootparams.hdr.version = 0x212; + pvh_bootparams.hdr.version = (2 << 8) | 12; pvh_bootparams.hdr.type_of_loader = (9 << 4) | 0; /* Xen loader */ x86_init.acpi.get_root_pointer = pvh_get_root_pointer; diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index dd461c0167ef0..c8f011e07a15e 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -640,19 +640,20 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd, unsigned long limit) { int i, nr, flush = 0; - unsigned hole_low, hole_high; + unsigned hole_low = 0, hole_high = 0; /* The limit is the last byte to be touched */ limit--; BUG_ON(limit >= FIXADDR_TOP); +#ifdef CONFIG_X86_64 /* * 64-bit has a great big hole in the middle of the address - * space, which contains the Xen mappings. On 32-bit these - * will end up making a zero-sized hole and so is a no-op. + * space, which contains the Xen mappings. */ - hole_low = pgd_index(USER_LIMIT); - hole_high = pgd_index(PAGE_OFFSET); + hole_low = pgd_index(GUARD_HOLE_BASE_ADDR); + hole_high = pgd_index(GUARD_HOLE_END_ADDR); +#endif nr = pgd_index(limit) + 1; for (i = 0; i < nr; i++) { @@ -1897,7 +1898,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) init_top_pgt[0] = __pgd(0); /* Pre-constructed entries are in pfn, so convert to mfn */ - /* L4[272] -> level3_ident_pgt */ + /* L4[273] -> level3_ident_pgt */ /* L4[511] -> level3_kernel_pgt */ convert_pfn_mfn(init_top_pgt); @@ -1917,8 +1918,8 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) addr[0] = (unsigned long)pgd; addr[1] = (unsigned long)l3; addr[2] = (unsigned long)l2; - /* Graft it onto L4[272][0]. Note that we creating an aliasing problem: - * Both L4[272][0] and L4[511][510] have entries that point to the same + /* Graft it onto L4[273][0]. Note that we creating an aliasing problem: + * Both L4[273][0] and L4[511][510] have entries that point to the same * L2 (PMD) tables. Meaning that if you modify it in __va space * it will be also modified in the __ka space! (But if you just * modify the PMD table to point to other PTE's or none, then you diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c index 33a783c77d969..184b369223979 100644 --- a/arch/x86/xen/platform-pci-unplug.c +++ b/arch/x86/xen/platform-pci-unplug.c @@ -146,6 +146,10 @@ void xen_unplug_emulated_devices(void) { int r; + /* PVH guests don't have emulated devices. */ + if (xen_pvh_domain()) + return; + /* user explicitly requested no unplug */ if (xen_emul_unplug & XEN_UNPLUG_NEVER) return; diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 1163e33121fb3..075ed47993bbf 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -808,6 +808,7 @@ char * __init xen_memory_setup(void) addr = xen_e820_table.entries[0].addr; size = xen_e820_table.entries[0].size; while (i < xen_e820_table.nr_entries) { + bool discard = false; chunk_size = size; type = xen_e820_table.entries[i].type; @@ -823,10 +824,11 @@ char * __init xen_memory_setup(void) xen_add_extra_mem(pfn_s, n_pfns); xen_max_p2m_pfn = pfn_s + n_pfns; } else - type = E820_TYPE_UNUSABLE; + discard = true; } - xen_align_and_add_e820_region(addr, chunk_size, type); + if (!discard) + xen_align_and_add_e820_region(addr, chunk_size, type); addr += chunk_size; size -= chunk_size; diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 973f10e052119..717b4847b473f 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -21,6 +22,7 @@ static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; static DEFINE_PER_CPU(char *, irq_name); +static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest); static bool xen_pvspin = true; static void xen_qlock_kick(int cpu) @@ -40,33 +42,24 @@ static void xen_qlock_kick(int cpu) static void xen_qlock_wait(u8 *byte, u8 val) { int irq = __this_cpu_read(lock_kicker_irq); + atomic_t *nest_cnt = this_cpu_ptr(&xen_qlock_wait_nest); /* If kicker interrupts not initialized yet, just spin */ - if (irq == -1) + if (irq == -1 || in_nmi()) return; - /* clear pending */ - xen_clear_irq_pending(irq); - barrier(); - - /* - * We check the byte value after clearing pending IRQ to make sure - * that we won't miss a wakeup event because of the clearing. - * - * The sync_clear_bit() call in xen_clear_irq_pending() is atomic. - * So it is effectively a memory barrier for x86. - */ - if (READ_ONCE(*byte) != val) - return; + /* Detect reentry. */ + atomic_inc(nest_cnt); - /* - * If an interrupt happens here, it will leave the wakeup irq - * pending, which will cause xen_poll_irq() to return - * immediately. - */ + /* If irq pending already and no nested call clear it. */ + if (atomic_read(nest_cnt) == 1 && xen_test_irq_pending(irq)) { + xen_clear_irq_pending(irq); + } else if (READ_ONCE(*byte) == val) { + /* Block until irq becomes pending (or a spurious wakeup) */ + xen_poll_irq(irq); + } - /* Block until irq becomes pending (or perhaps a spurious wakeup) */ - xen_poll_irq(irq); + atomic_dec(nest_cnt); } static irqreturn_t dummy_handler(int irq, void *dev_id) diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index c84f1e039d849..01dcccf9185ff 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -361,8 +361,6 @@ void xen_timer_resume(void) { int cpu; - pvclock_resume(); - if (xen_clockevent != &xen_vcpuop_clockevent) return; @@ -379,12 +377,15 @@ static const struct pv_time_ops xen_time_ops __initconst = { }; static struct pvclock_vsyscall_time_info *xen_clock __read_mostly; +static u64 xen_clock_value_saved; void xen_save_time_memory_area(void) { struct vcpu_register_time_memory_area t; int ret; + xen_clock_value_saved = xen_clocksource_read() - xen_sched_clock_offset; + if (!xen_clock) return; @@ -404,7 +405,7 @@ void xen_restore_time_memory_area(void) int ret; if (!xen_clock) - return; + goto out; t.addr.v = &xen_clock->pvti; @@ -421,6 +422,11 @@ void xen_restore_time_memory_area(void) if (ret != 0) pr_notice("Cannot restore secondary vcpu_time_info (err %d)", ret); + +out: + /* Need pvclock_resume() before using xen_clocksource_read(). */ + pvclock_resume(); + xen_sched_clock_offset = xen_clocksource_read() - xen_clock_value_saved; } static void xen_setup_vsyscall_time_info(void) diff --git a/arch/x86/xen/xen-pvh.S b/arch/x86/xen/xen-pvh.S index ca2d3b2bf2af7..58722a052f9c1 100644 --- a/arch/x86/xen/xen-pvh.S +++ b/arch/x86/xen/xen-pvh.S @@ -181,7 +181,7 @@ canary: .fill 48, 1, 0 early_stack: - .fill 256, 1, 0 + .fill BOOT_STACK_SIZE, 1, 0 early_stack_end: ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY, diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile index dc9e0ba7122ca..294846117fc2c 100644 --- a/arch/xtensa/boot/Makefile +++ b/arch/xtensa/boot/Makefile @@ -33,7 +33,7 @@ uImage: $(obj)/uImage boot-elf boot-redboot: $(addprefix $(obj)/,$(subdir-y)) $(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS) -OBJCOPYFLAGS = --strip-all -R .comment -R .note.gnu.build-id -O binary +OBJCOPYFLAGS = --strip-all -R .comment -R .notes -O binary vmlinux.bin: vmlinux FORCE $(call if_changed,objcopy) diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h index e4ccb88b79963..677bc76c1d707 100644 --- a/arch/xtensa/include/asm/processor.h +++ b/arch/xtensa/include/asm/processor.h @@ -23,7 +23,11 @@ # error Linux requires the Xtensa Windowed Registers Option. #endif -#define ARCH_SLAB_MINALIGN XCHAL_DATA_WIDTH +/* Xtensa ABI requires stack alignment to be at least 16 */ + +#define STACK_ALIGN (XCHAL_DATA_WIDTH > 16 ? XCHAL_DATA_WIDTH : 16) + +#define ARCH_SLAB_MINALIGN STACK_ALIGN /* * User space process size: 1 GB. diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c index 67904f55f1884..120dd746a1475 100644 --- a/arch/xtensa/kernel/asm-offsets.c +++ b/arch/xtensa/kernel/asm-offsets.c @@ -94,14 +94,14 @@ int main(void) DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp)); DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable)); #if XTENSA_HAVE_COPROCESSORS - DEFINE(THREAD_XTREGS_CP0, offsetof (struct thread_info, xtregs_cp)); - DEFINE(THREAD_XTREGS_CP1, offsetof (struct thread_info, xtregs_cp)); - DEFINE(THREAD_XTREGS_CP2, offsetof (struct thread_info, xtregs_cp)); - DEFINE(THREAD_XTREGS_CP3, offsetof (struct thread_info, xtregs_cp)); - DEFINE(THREAD_XTREGS_CP4, offsetof (struct thread_info, xtregs_cp)); - DEFINE(THREAD_XTREGS_CP5, offsetof (struct thread_info, xtregs_cp)); - DEFINE(THREAD_XTREGS_CP6, offsetof (struct thread_info, xtregs_cp)); - DEFINE(THREAD_XTREGS_CP7, offsetof (struct thread_info, xtregs_cp)); + DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0)); + DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1)); + DEFINE(THREAD_XTREGS_CP2, offsetof(struct thread_info, xtregs_cp.cp2)); + DEFINE(THREAD_XTREGS_CP3, offsetof(struct thread_info, xtregs_cp.cp3)); + DEFINE(THREAD_XTREGS_CP4, offsetof(struct thread_info, xtregs_cp.cp4)); + DEFINE(THREAD_XTREGS_CP5, offsetof(struct thread_info, xtregs_cp.cp5)); + DEFINE(THREAD_XTREGS_CP6, offsetof(struct thread_info, xtregs_cp.cp6)); + DEFINE(THREAD_XTREGS_CP7, offsetof(struct thread_info, xtregs_cp.cp7)); #endif DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user)); DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t)); diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S index 2f76118ecf623..9053a5622d2c3 100644 --- a/arch/xtensa/kernel/head.S +++ b/arch/xtensa/kernel/head.S @@ -88,9 +88,12 @@ _SetupMMU: initialize_mmu #if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY rsr a2, excsave1 - movi a3, 0x08000000 + movi a3, XCHAL_KSEG_PADDR + bltu a2, a3, 1f + sub a2, a2, a3 + movi a3, XCHAL_KSEG_SIZE bgeu a2, a3, 1f - movi a3, 0xd0000000 + movi a3, XCHAL_KSEG_CACHED_VADDR add a2, a2, a3 wsr a2, excsave1 1: diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 483dcfb6e681d..4bb68133a72af 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -94,18 +94,21 @@ void coprocessor_release_all(struct thread_info *ti) void coprocessor_flush_all(struct thread_info *ti) { - unsigned long cpenable; + unsigned long cpenable, old_cpenable; int i; preempt_disable(); + RSR_CPENABLE(old_cpenable); cpenable = ti->cpenable; + WSR_CPENABLE(cpenable); for (i = 0; i < XCHAL_CP_MAX; i++) { if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti) coprocessor_flush(ti, i); cpenable >>= 1; } + WSR_CPENABLE(old_cpenable); preempt_enable(); } diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c index c0845cb1cbb99..d9541be0605ad 100644 --- a/arch/xtensa/kernel/ptrace.c +++ b/arch/xtensa/kernel/ptrace.c @@ -127,12 +127,37 @@ static int ptrace_setregs(struct task_struct *child, void __user *uregs) } +#if XTENSA_HAVE_COPROCESSORS +#define CP_OFFSETS(cp) \ + { \ + .elf_xtregs_offset = offsetof(elf_xtregs_t, cp), \ + .ti_offset = offsetof(struct thread_info, xtregs_cp.cp), \ + .sz = sizeof(xtregs_ ## cp ## _t), \ + } + +static const struct { + size_t elf_xtregs_offset; + size_t ti_offset; + size_t sz; +} cp_offsets[] = { + CP_OFFSETS(cp0), + CP_OFFSETS(cp1), + CP_OFFSETS(cp2), + CP_OFFSETS(cp3), + CP_OFFSETS(cp4), + CP_OFFSETS(cp5), + CP_OFFSETS(cp6), + CP_OFFSETS(cp7), +}; +#endif + static int ptrace_getxregs(struct task_struct *child, void __user *uregs) { struct pt_regs *regs = task_pt_regs(child); struct thread_info *ti = task_thread_info(child); elf_xtregs_t __user *xtregs = uregs; int ret = 0; + int i __maybe_unused; if (!access_ok(VERIFY_WRITE, uregs, sizeof(elf_xtregs_t))) return -EIO; @@ -140,8 +165,13 @@ static int ptrace_getxregs(struct task_struct *child, void __user *uregs) #if XTENSA_HAVE_COPROCESSORS /* Flush all coprocessor registers to memory. */ coprocessor_flush_all(ti); - ret |= __copy_to_user(&xtregs->cp0, &ti->xtregs_cp, - sizeof(xtregs_coprocessor_t)); + + for (i = 0; i < ARRAY_SIZE(cp_offsets); ++i) + ret |= __copy_to_user((char __user *)xtregs + + cp_offsets[i].elf_xtregs_offset, + (const char *)ti + + cp_offsets[i].ti_offset, + cp_offsets[i].sz); #endif ret |= __copy_to_user(&xtregs->opt, ®s->xtregs_opt, sizeof(xtregs->opt)); @@ -157,6 +187,7 @@ static int ptrace_setxregs(struct task_struct *child, void __user *uregs) struct pt_regs *regs = task_pt_regs(child); elf_xtregs_t *xtregs = uregs; int ret = 0; + int i __maybe_unused; if (!access_ok(VERIFY_READ, uregs, sizeof(elf_xtregs_t))) return -EFAULT; @@ -166,8 +197,11 @@ static int ptrace_setxregs(struct task_struct *child, void __user *uregs) coprocessor_flush_all(ti); coprocessor_release_all(ti); - ret |= __copy_from_user(&ti->xtregs_cp, &xtregs->cp0, - sizeof(xtregs_coprocessor_t)); + for (i = 0; i < ARRAY_SIZE(cp_offsets); ++i) + ret |= __copy_from_user((char *)ti + cp_offsets[i].ti_offset, + (const char __user *)xtregs + + cp_offsets[i].elf_xtregs_offset, + cp_offsets[i].sz); #endif ret |= __copy_from_user(®s->xtregs_opt, &xtregs->opt, sizeof(xtregs->opt)); diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index a1c3edb8ad56c..fa926995d2a37 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -131,6 +131,7 @@ SECTIONS .fixup : { *(.fixup) } EXCEPTION_TABLE(16) + NOTES /* Data section */ _sdata = .; diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c index ae52bff43ce4f..ff7c2d470bb82 100644 --- a/block/bfq-wf2q.c +++ b/block/bfq-wf2q.c @@ -1181,10 +1181,17 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) st = bfq_entity_service_tree(entity); is_in_service = entity == sd->in_service_entity; - if (is_in_service) { - bfq_calc_finish(entity, entity->service); + bfq_calc_finish(entity, entity->service); + + if (is_in_service) sd->in_service_entity = NULL; - } + else + /* + * Non in-service entity: nobody will take care of + * resetting its service counter on expiration. Do it + * now. + */ + entity->service = 0; if (entity->tree == &st->active) bfq_active_extract(st, entity); diff --git a/block/bio.c b/block/bio.c index 0093bed81c0e8..55a5386fd431e 100644 --- a/block/bio.c +++ b/block/bio.c @@ -605,6 +605,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src) if (bio_flagged(bio_src, BIO_THROTTLED)) bio_set_flag(bio, BIO_THROTTLED); bio->bi_opf = bio_src->bi_opf; + bio->bi_ioprio = bio_src->bi_ioprio; bio->bi_write_hint = bio_src->bi_write_hint; bio->bi_iter = bio_src->bi_iter; bio->bi_io_vec = bio_src->bi_io_vec; @@ -1261,6 +1262,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q, if (ret) goto cleanup; } else { + if (bmd->is_our_pages) + zero_fill_bio(bio); iov_iter_advance(iter, bio->bi_iter.bi_size); } diff --git a/block/blk-core.c b/block/blk-core.c index cff0a60ee2006..eb8b52241453d 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -793,9 +793,8 @@ void blk_cleanup_queue(struct request_queue *q) * dispatch may still be in-progress since we dispatch requests * from more than one contexts. * - * No need to quiesce queue if it isn't initialized yet since - * blk_freeze_queue() should be enough for cases of passthrough - * request. + * We rely on driver to deal with the race in case that queue + * initialization isn't done. */ if (q->mq_ops && blk_queue_init_done(q)) blk_mq_quiesce_queue(q); diff --git a/block/blk-lib.c b/block/blk-lib.c index bbd44666f2b51..1f196cf0aa5de 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -58,8 +58,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, if (!req_sects) goto fail; - if (req_sects > UINT_MAX >> 9) - req_sects = UINT_MAX >> 9; + req_sects = min(req_sects, bio_allowed_max_sectors(q)); end_sect = sector + req_sects; @@ -162,7 +161,7 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector, return -EOPNOTSUPP; /* Ensure that max_write_same_sectors doesn't overflow bi_size */ - max_write_same_sectors = UINT_MAX >> 9; + max_write_same_sectors = bio_allowed_max_sectors(q); while (nr_sects) { bio = next_bio(bio, 1, gfp_mask); diff --git a/block/blk-merge.c b/block/blk-merge.c index aaec38cc37b86..2e042190a4f1c 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -27,7 +27,8 @@ static struct bio *blk_bio_discard_split(struct request_queue *q, /* Zero-sector (unknown) and one-sector granularities are the same. */ granularity = max(q->limits.discard_granularity >> 9, 1U); - max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); + max_discard_sectors = min(q->limits.max_discard_sectors, + bio_allowed_max_sectors(q)); max_discard_sectors -= max_discard_sectors % granularity; if (unlikely(!max_discard_sectors)) { diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 29bfe8017a2d8..da1de190a3b13 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -54,13 +54,14 @@ void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio) * Mark a hardware queue as needing a restart. For shared queues, maintain * a count of how many hardware queues are marked for restart. */ -static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) +void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) { if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) return; set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); } +EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx); void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) { diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 4e028ee424301..fe660764b8d13 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -15,6 +15,7 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, struct request **merged_request); bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio); bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq); +void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx); void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); void blk_mq_sched_insert_request(struct request *rq, bool at_head, diff --git a/block/blk-mq.c b/block/blk-mq.c index e3c39ea8e17b0..23a53b67cf0d1 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1747,7 +1747,7 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, if (bypass_insert) return BLK_STS_RESOURCE; - blk_mq_sched_insert_request(rq, false, run_queue, false); + blk_mq_request_bypass_insert(rq, run_queue); return BLK_STS_OK; } @@ -1763,7 +1763,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false); if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) - blk_mq_sched_insert_request(rq, false, true, false); + blk_mq_request_bypass_insert(rq, true); else if (ret != BLK_STS_OK) blk_mq_end_request(rq, ret); @@ -1798,7 +1798,8 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, if (ret != BLK_STS_OK) { if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) { - list_add(&rq->queuelist, list); + blk_mq_request_bypass_insert(rq, + list_empty(list)); break; } blk_mq_end_request(rq, ret); diff --git a/block/blk-stat.h b/block/blk-stat.h index f4a1568e81a41..17b47a86eefb3 100644 --- a/block/blk-stat.h +++ b/block/blk-stat.h @@ -145,6 +145,11 @@ static inline void blk_stat_activate_nsecs(struct blk_stat_callback *cb, mod_timer(&cb->timer, jiffies + nsecs_to_jiffies(nsecs)); } +static inline void blk_stat_deactivate(struct blk_stat_callback *cb) +{ + del_timer_sync(&cb->timer); +} + /** * blk_stat_activate_msecs() - Gather block statistics during a time window in * milliseconds. diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 8ac93fcbaa2ea..0c62bf4eca757 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -760,8 +760,10 @@ void wbt_disable_default(struct request_queue *q) if (!rqos) return; rwb = RQWB(rqos); - if (rwb->enable_state == WBT_STATE_ON_DEFAULT) + if (rwb->enable_state == WBT_STATE_ON_DEFAULT) { + blk_stat_deactivate(rwb->cb); rwb->wb_normal = 0; + } } EXPORT_SYMBOL_GPL(wbt_disable_default); diff --git a/block/blk.h b/block/blk.h index 9db4e389582c8..977d4b5d968d5 100644 --- a/block/blk.h +++ b/block/blk.h @@ -328,6 +328,16 @@ static inline unsigned long blk_rq_deadline(struct request *rq) return rq->__deadline & ~0x1UL; } +/* + * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size + * is defined as 'unsigned int', meantime it has to aligned to with logical + * block size which is the minimum accepted unit by hardware. + */ +static inline unsigned int bio_allowed_max_sectors(struct request_queue *q) +{ + return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9; +} + /* * Internal io_context interface */ diff --git a/block/bounce.c b/block/bounce.c index bc63b3a2d18ca..abb50e7e5fab1 100644 --- a/block/bounce.c +++ b/block/bounce.c @@ -31,6 +31,24 @@ static struct bio_set bounce_bio_set, bounce_bio_split; static mempool_t page_pool, isa_page_pool; +static void init_bounce_bioset(void) +{ + static bool bounce_bs_setup; + int ret; + + if (bounce_bs_setup) + return; + + ret = bioset_init(&bounce_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); + BUG_ON(ret); + if (bioset_integrity_create(&bounce_bio_set, BIO_POOL_SIZE)) + BUG_ON(1); + + ret = bioset_init(&bounce_bio_split, BIO_POOL_SIZE, 0, 0); + BUG_ON(ret); + bounce_bs_setup = true; +} + #if defined(CONFIG_HIGHMEM) static __init int init_emergency_pool(void) { @@ -44,14 +62,7 @@ static __init int init_emergency_pool(void) BUG_ON(ret); pr_info("pool size: %d pages\n", POOL_SIZE); - ret = bioset_init(&bounce_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); - BUG_ON(ret); - if (bioset_integrity_create(&bounce_bio_set, BIO_POOL_SIZE)) - BUG_ON(1); - - ret = bioset_init(&bounce_bio_split, BIO_POOL_SIZE, 0, 0); - BUG_ON(ret); - + init_bounce_bioset(); return 0; } @@ -86,6 +97,8 @@ static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data) return mempool_alloc_pages(gfp_mask | GFP_DMA, data); } +static DEFINE_MUTEX(isa_mutex); + /* * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA * as the max address, so check if the pool has already been created. @@ -94,14 +107,20 @@ int init_emergency_isa_pool(void) { int ret; - if (mempool_initialized(&isa_page_pool)) + mutex_lock(&isa_mutex); + + if (mempool_initialized(&isa_page_pool)) { + mutex_unlock(&isa_mutex); return 0; + } ret = mempool_init(&isa_page_pool, ISA_POOL_SIZE, mempool_alloc_pages_isa, mempool_free_pages, (void *) 0); BUG_ON(ret); pr_info("isa pool size: %d pages\n", ISA_POOL_SIZE); + init_bounce_bioset(); + mutex_unlock(&isa_mutex); return 0; } @@ -229,6 +248,7 @@ static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask, return NULL; bio->bi_disk = bio_src->bi_disk; bio->bi_opf = bio_src->bi_opf; + bio->bi_ioprio = bio_src->bi_ioprio; bio->bi_write_hint = bio_src->bi_write_hint; bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; diff --git a/block/genhd.c b/block/genhd.c index be5bab20b2abf..4c777e1b3bd93 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1284,6 +1284,7 @@ static void disk_release(struct device *dev) struct class block_class = { .name = "block", }; +EXPORT_SYMBOL_GPL(block_class); static char *block_devnode(struct device *dev, umode_t *mode, kuid_t *uid, kgid_t *gid) diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 099a9e05854c3..d5e21ce44d2cc 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -373,9 +373,16 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd) /* * One confusing aspect here is that we get called for a specific - * hardware queue, but we return a request that may not be for a + * hardware queue, but we may return a request that is for a * different hardware queue. This is because mq-deadline has shared * state for all hardware queues, in terms of sorting, FIFOs, etc. + * + * For a zoned block device, __dd_dispatch_request() may return NULL + * if all the queued write requests are directed at zones that are already + * locked due to on-going write requests. In this case, make sure to mark + * the queue as needing a restart to ensure that the queue is run again + * and the pending writes dispatched once the target zones for the ongoing + * write requests are unlocked in dd_finish_request(). */ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) { @@ -384,6 +391,9 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) spin_lock(&dd->lock); rq = __dd_dispatch_request(dd); + if (!rq && blk_queue_is_zoned(hctx->queue) && + !list_empty(&dd->fifo_list[WRITE])) + blk_mq_sched_mark_restart_hctx(hctx); spin_unlock(&dd->lock); return rq; diff --git a/block/partition-generic.c b/block/partition-generic.c index d3d14e81fb12d..5f8db5c5140f4 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c @@ -249,9 +249,10 @@ struct device_type part_type = { .uevent = part_uevent, }; -static void delete_partition_rcu_cb(struct rcu_head *head) +static void delete_partition_work_fn(struct work_struct *work) { - struct hd_struct *part = container_of(head, struct hd_struct, rcu_head); + struct hd_struct *part = container_of(to_rcu_work(work), struct hd_struct, + rcu_work); part->start_sect = 0; part->nr_sects = 0; @@ -262,7 +263,8 @@ static void delete_partition_rcu_cb(struct rcu_head *head) void __delete_partition(struct percpu_ref *ref) { struct hd_struct *part = container_of(ref, struct hd_struct, ref); - call_rcu(&part->rcu_head, delete_partition_rcu_cb); + INIT_RCU_WORK(&part->rcu_work, delete_partition_work_fn); + queue_rcu_work(system_wq, &part->rcu_work); } /* diff --git a/crypto/Kconfig b/crypto/Kconfig index f3e40ac56d939..59e32623a7ce8 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1590,20 +1590,6 @@ config CRYPTO_SM4 If unsure, say N. -config CRYPTO_SPECK - tristate "Speck cipher algorithm" - select CRYPTO_ALGAPI - help - Speck is a lightweight block cipher that is tuned for optimal - performance in software (rather than hardware). - - Speck may not be as secure as AES, and should only be used on systems - where AES is not fast enough. - - See also: - - If unsure, say N. - config CRYPTO_TEA tristate "TEA, XTEA and XETA cipher algorithms" select CRYPTO_ALGAPI diff --git a/crypto/Makefile b/crypto/Makefile index 6d1d40eeb9642..f6a234d088822 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -115,7 +115,6 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o obj-$(CONFIG_CRYPTO_SEED) += seed.o -obj-$(CONFIG_CRYPTO_SPECK) += speck.o obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o diff --git a/crypto/aegis.h b/crypto/aegis.h index f1c6900ddb801..405e025fc9067 100644 --- a/crypto/aegis.h +++ b/crypto/aegis.h @@ -21,7 +21,7 @@ union aegis_block { __le64 words64[AEGIS_BLOCK_SIZE / sizeof(__le64)]; - u32 words32[AEGIS_BLOCK_SIZE / sizeof(u32)]; + __le32 words32[AEGIS_BLOCK_SIZE / sizeof(__le32)]; u8 bytes[AEGIS_BLOCK_SIZE]; }; @@ -57,24 +57,22 @@ static void crypto_aegis_aesenc(union aegis_block *dst, const union aegis_block *src, const union aegis_block *key) { - u32 *d = dst->words32; const u8 *s = src->bytes; - const u32 *k = key->words32; const u32 *t0 = crypto_ft_tab[0]; const u32 *t1 = crypto_ft_tab[1]; const u32 *t2 = crypto_ft_tab[2]; const u32 *t3 = crypto_ft_tab[3]; u32 d0, d1, d2, d3; - d0 = t0[s[ 0]] ^ t1[s[ 5]] ^ t2[s[10]] ^ t3[s[15]] ^ k[0]; - d1 = t0[s[ 4]] ^ t1[s[ 9]] ^ t2[s[14]] ^ t3[s[ 3]] ^ k[1]; - d2 = t0[s[ 8]] ^ t1[s[13]] ^ t2[s[ 2]] ^ t3[s[ 7]] ^ k[2]; - d3 = t0[s[12]] ^ t1[s[ 1]] ^ t2[s[ 6]] ^ t3[s[11]] ^ k[3]; + d0 = t0[s[ 0]] ^ t1[s[ 5]] ^ t2[s[10]] ^ t3[s[15]]; + d1 = t0[s[ 4]] ^ t1[s[ 9]] ^ t2[s[14]] ^ t3[s[ 3]]; + d2 = t0[s[ 8]] ^ t1[s[13]] ^ t2[s[ 2]] ^ t3[s[ 7]]; + d3 = t0[s[12]] ^ t1[s[ 1]] ^ t2[s[ 6]] ^ t3[s[11]]; - d[0] = d0; - d[1] = d1; - d[2] = d2; - d[3] = d3; + dst->words32[0] = cpu_to_le32(d0) ^ key->words32[0]; + dst->words32[1] = cpu_to_le32(d1) ^ key->words32[1]; + dst->words32[2] = cpu_to_le32(d2) ^ key->words32[2]; + dst->words32[3] = cpu_to_le32(d3) ^ key->words32[3]; } #endif /* _CRYPTO_AEGIS_H */ diff --git a/crypto/af_alg.c b/crypto/af_alg.c index b053179e0bc53..17eb09d222ff4 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -1071,7 +1071,7 @@ __poll_t af_alg_poll(struct file *file, struct socket *sock, struct af_alg_ctx *ctx = ask->private; __poll_t mask; - sock_poll_wait(file, wait); + sock_poll_wait(file, sock, wait); mask = 0; if (!ctx->more || ctx->used) diff --git a/crypto/authenc.c b/crypto/authenc.c index 4fa8d40d947b7..3ee10fc25aff3 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -58,14 +58,22 @@ int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, return -EINVAL; if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) return -EINVAL; - if (RTA_PAYLOAD(rta) < sizeof(*param)) + + /* + * RTA_OK() didn't align the rtattr's payload when validating that it + * fits in the buffer. Yet, the keys should start on the next 4-byte + * aligned boundary. To avoid confusion, require that the rtattr + * payload be exactly the param struct, which has a 4-byte aligned size. + */ + if (RTA_PAYLOAD(rta) != sizeof(*param)) return -EINVAL; + BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO); param = RTA_DATA(rta); keys->enckeylen = be32_to_cpu(param->enckeylen); - key += RTA_ALIGN(rta->rta_len); - keylen -= RTA_ALIGN(rta->rta_len); + key += rta->rta_len; + keylen -= rta->rta_len; if (keylen < keys->enckeylen) return -EINVAL; diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 50b804747e20a..4eff4be6bd127 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -279,7 +279,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq, struct aead_request *req = areq->data; err = err ?: crypto_authenc_esn_decrypt_tail(req, 0); - aead_request_complete(req, err); + authenc_esn_request_complete(req, err); } static int crypto_authenc_esn_decrypt(struct aead_request *req) diff --git a/crypto/cbc.c b/crypto/cbc.c index b761b1f9c6ca1..dd5f332fd5668 100644 --- a/crypto/cbc.c +++ b/crypto/cbc.c @@ -140,9 +140,8 @@ static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = skcipher_instance_ctx(inst); err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), CRYPTO_ALG_TYPE_MASK); - crypto_mod_put(alg); if (err) - goto err_free_inst; + goto err_put_alg; err = crypto_inst_setname(skcipher_crypto_instance(inst), "cbc", alg); if (err) @@ -174,12 +173,15 @@ static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb) err = skcipher_register_instance(tmpl, inst); if (err) goto err_drop_spawn; + crypto_mod_put(alg); out: return err; err_drop_spawn: crypto_drop_spawn(spawn); +err_put_alg: + crypto_mod_put(alg); err_free_inst: kfree(inst); goto out; diff --git a/crypto/cfb.c b/crypto/cfb.c index a0d68c09e1b9c..e81e456734985 100644 --- a/crypto/cfb.c +++ b/crypto/cfb.c @@ -144,7 +144,7 @@ static int crypto_cfb_decrypt_segment(struct skcipher_walk *walk, do { crypto_cfb_encrypt_one(tfm, iv, dst); - crypto_xor(dst, iv, bsize); + crypto_xor(dst, src, bsize); iv = src; src += bsize; @@ -286,9 +286,8 @@ static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = skcipher_instance_ctx(inst); err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), CRYPTO_ALG_TYPE_MASK); - crypto_mod_put(alg); if (err) - goto err_free_inst; + goto err_put_alg; err = crypto_inst_setname(skcipher_crypto_instance(inst), "cfb", alg); if (err) @@ -317,12 +316,15 @@ static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb) err = skcipher_register_instance(tmpl, inst); if (err) goto err_drop_spawn; + crypto_mod_put(alg); out: return err; err_drop_spawn: crypto_drop_spawn(spawn); +err_put_alg: + crypto_mod_put(alg); err_free_inst: kfree(inst); goto out; diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index 0e89b5457cab0..ceeb2eaf28cff 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c @@ -83,7 +83,7 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_cipher rcipher; - strlcpy(rcipher.type, "cipher", sizeof(rcipher.type)); + strncpy(rcipher.type, "cipher", sizeof(rcipher.type)); rcipher.blocksize = alg->cra_blocksize; rcipher.min_keysize = alg->cra_cipher.cia_min_keysize; @@ -102,7 +102,7 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_comp rcomp; - strlcpy(rcomp.type, "compression", sizeof(rcomp.type)); + strncpy(rcomp.type, "compression", sizeof(rcomp.type)); if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, sizeof(struct crypto_report_comp), &rcomp)) goto nla_put_failure; @@ -116,7 +116,7 @@ static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_acomp racomp; - strlcpy(racomp.type, "acomp", sizeof(racomp.type)); + strncpy(racomp.type, "acomp", sizeof(racomp.type)); if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, sizeof(struct crypto_report_acomp), &racomp)) @@ -131,7 +131,7 @@ static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_akcipher rakcipher; - strlcpy(rakcipher.type, "akcipher", sizeof(rakcipher.type)); + strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type)); if (nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER, sizeof(struct crypto_report_akcipher), &rakcipher)) @@ -146,7 +146,7 @@ static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_kpp rkpp; - strlcpy(rkpp.type, "kpp", sizeof(rkpp.type)); + strncpy(rkpp.type, "kpp", sizeof(rkpp.type)); if (nla_put(skb, CRYPTOCFGA_REPORT_KPP, sizeof(struct crypto_report_kpp), &rkpp)) @@ -160,10 +160,10 @@ static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg) static int crypto_report_one(struct crypto_alg *alg, struct crypto_user_alg *ualg, struct sk_buff *skb) { - strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name)); - strlcpy(ualg->cru_driver_name, alg->cra_driver_name, + strncpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name)); + strncpy(ualg->cru_driver_name, alg->cra_driver_name, sizeof(ualg->cru_driver_name)); - strlcpy(ualg->cru_module_name, module_name(alg->cra_module), + strncpy(ualg->cru_module_name, module_name(alg->cra_module), sizeof(ualg->cru_module_name)); ualg->cru_type = 0; @@ -176,7 +176,7 @@ static int crypto_report_one(struct crypto_alg *alg, if (alg->cra_flags & CRYPTO_ALG_LARVAL) { struct crypto_report_larval rl; - strlcpy(rl.type, "larval", sizeof(rl.type)); + strncpy(rl.type, "larval", sizeof(rl.type)); if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL, sizeof(struct crypto_report_larval), &rl)) goto nla_put_failure; diff --git a/crypto/ecc.c b/crypto/ecc.c index 8facafd678026..adcce310f6462 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c @@ -842,15 +842,23 @@ static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime, static void ecc_point_mult(struct ecc_point *result, const struct ecc_point *point, const u64 *scalar, - u64 *initial_z, u64 *curve_prime, + u64 *initial_z, const struct ecc_curve *curve, unsigned int ndigits) { /* R0 and R1 */ u64 rx[2][ECC_MAX_DIGITS]; u64 ry[2][ECC_MAX_DIGITS]; u64 z[ECC_MAX_DIGITS]; + u64 sk[2][ECC_MAX_DIGITS]; + u64 *curve_prime = curve->p; int i, nb; - int num_bits = vli_num_bits(scalar, ndigits); + int num_bits; + int carry; + + carry = vli_add(sk[0], scalar, curve->n, ndigits); + vli_add(sk[1], sk[0], curve->n, ndigits); + scalar = sk[!carry]; + num_bits = sizeof(u64) * ndigits * 8 + 1; vli_set(rx[1], point->x, ndigits); vli_set(ry[1], point->y, ndigits); @@ -1004,7 +1012,7 @@ int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits, goto out; } - ecc_point_mult(pk, &curve->g, priv, NULL, curve->p, ndigits); + ecc_point_mult(pk, &curve->g, priv, NULL, curve, ndigits); if (ecc_point_is_zero(pk)) { ret = -EAGAIN; goto err_free_point; @@ -1090,7 +1098,7 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, goto err_alloc_product; } - ecc_point_mult(product, pk, priv, rand_z, curve->p, ndigits); + ecc_point_mult(product, pk, priv, rand_z, curve, ndigits); ecc_swap_digits(product->x, secret, ndigits); diff --git a/crypto/lrw.c b/crypto/lrw.c index 393a782679c78..5504d1325a56a 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -143,7 +143,12 @@ static inline int get_index128(be128 *block) return x + ffz(val); } - return x; + /* + * If we get here, then x == 128 and we are incrementing the counter + * from all ones to all zeros. This means we must return index 127, i.e. + * the one corresponding to key2*{ 1,...,1 }. + */ + return 127; } static int post_crypt(struct skcipher_request *req) diff --git a/crypto/morus1280.c b/crypto/morus1280.c index d057cf5ac4a8b..3889c188f2665 100644 --- a/crypto/morus1280.c +++ b/crypto/morus1280.c @@ -385,14 +385,11 @@ static void crypto_morus1280_final(struct morus1280_state *state, struct morus1280_block *tag_xor, u64 assoclen, u64 cryptlen) { - u64 assocbits = assoclen * 8; - u64 cryptbits = cryptlen * 8; - struct morus1280_block tmp; unsigned int i; - tmp.words[0] = cpu_to_le64(assocbits); - tmp.words[1] = cpu_to_le64(cryptbits); + tmp.words[0] = assoclen * 8; + tmp.words[1] = cryptlen * 8; tmp.words[2] = 0; tmp.words[3] = 0; diff --git a/crypto/morus640.c b/crypto/morus640.c index 1ca76e54281bf..da06ec2f6a807 100644 --- a/crypto/morus640.c +++ b/crypto/morus640.c @@ -384,21 +384,13 @@ static void crypto_morus640_final(struct morus640_state *state, struct morus640_block *tag_xor, u64 assoclen, u64 cryptlen) { - u64 assocbits = assoclen * 8; - u64 cryptbits = cryptlen * 8; - - u32 assocbits_lo = (u32)assocbits; - u32 assocbits_hi = (u32)(assocbits >> 32); - u32 cryptbits_lo = (u32)cryptbits; - u32 cryptbits_hi = (u32)(cryptbits >> 32); - struct morus640_block tmp; unsigned int i; - tmp.words[0] = cpu_to_le32(assocbits_lo); - tmp.words[1] = cpu_to_le32(assocbits_hi); - tmp.words[2] = cpu_to_le32(cryptbits_lo); - tmp.words[3] = cpu_to_le32(cryptbits_hi); + tmp.words[0] = lower_32_bits(assoclen * 8); + tmp.words[1] = upper_32_bits(assoclen * 8); + tmp.words[2] = lower_32_bits(cryptlen * 8); + tmp.words[3] = upper_32_bits(cryptlen * 8); for (i = 0; i < MORUS_BLOCK_WORDS; i++) state->s[4].words[i] ^= state->s[0].words[i]; diff --git a/crypto/pcbc.c b/crypto/pcbc.c index ef802f6e96421..8aa10144407c0 100644 --- a/crypto/pcbc.c +++ b/crypto/pcbc.c @@ -244,9 +244,8 @@ static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = skcipher_instance_ctx(inst); err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), CRYPTO_ALG_TYPE_MASK); - crypto_mod_put(alg); if (err) - goto err_free_inst; + goto err_put_alg; err = crypto_inst_setname(skcipher_crypto_instance(inst), "pcbc", alg); if (err) @@ -275,12 +274,15 @@ static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb) err = skcipher_register_instance(tmpl, inst); if (err) goto err_drop_spawn; + crypto_mod_put(alg); out: return err; err_drop_spawn: crypto_drop_spawn(spawn); +err_put_alg: + crypto_mod_put(alg); err_free_inst: kfree(inst); goto out; diff --git a/crypto/simd.c b/crypto/simd.c index ea7240be3001b..78e8d037ae2b3 100644 --- a/crypto/simd.c +++ b/crypto/simd.c @@ -124,8 +124,9 @@ static int simd_skcipher_init(struct crypto_skcipher *tfm) ctx->cryptd_tfm = cryptd_tfm; - reqsize = sizeof(struct skcipher_request); - reqsize += crypto_skcipher_reqsize(&cryptd_tfm->base); + reqsize = crypto_skcipher_reqsize(cryptd_skcipher_child(cryptd_tfm)); + reqsize = max(reqsize, crypto_skcipher_reqsize(&cryptd_tfm->base)); + reqsize += sizeof(struct skcipher_request); crypto_skcipher_set_reqsize(tfm, reqsize); diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c index 9a5c60f08aad8..c0cf87ae7ef6d 100644 --- a/crypto/sm3_generic.c +++ b/crypto/sm3_generic.c @@ -100,7 +100,7 @@ static void sm3_compress(u32 *w, u32 *wt, u32 *m) for (i = 0; i <= 63; i++) { - ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7); + ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7); ss2 = ss1 ^ rol32(a, 12); diff --git a/crypto/speck.c b/crypto/speck.c deleted file mode 100644 index 58aa9f7f91f79..0000000000000 --- a/crypto/speck.c +++ /dev/null @@ -1,307 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Speck: a lightweight block cipher - * - * Copyright (c) 2018 Google, Inc - * - * Speck has 10 variants, including 5 block sizes. For now we only implement - * the variants Speck128/128, Speck128/192, Speck128/256, Speck64/96, and - * Speck64/128. Speck${B}/${K} denotes the variant with a block size of B bits - * and a key size of K bits. The Speck128 variants are believed to be the most - * secure variants, and they use the same block size and key sizes as AES. The - * Speck64 variants are less secure, but on 32-bit processors are usually - * faster. The remaining variants (Speck32, Speck48, and Speck96) are even less - * secure and/or not as well suited for implementation on either 32-bit or - * 64-bit processors, so are omitted. - * - * Reference: "The Simon and Speck Families of Lightweight Block Ciphers" - * https://eprint.iacr.org/2013/404.pdf - * - * In a correspondence, the Speck designers have also clarified that the words - * should be interpreted in little-endian format, and the words should be - * ordered such that the first word of each block is 'y' rather than 'x', and - * the first key word (rather than the last) becomes the first round key. - */ - -#include -#include -#include -#include -#include -#include - -/* Speck128 */ - -static __always_inline void speck128_round(u64 *x, u64 *y, u64 k) -{ - *x = ror64(*x, 8); - *x += *y; - *x ^= k; - *y = rol64(*y, 3); - *y ^= *x; -} - -static __always_inline void speck128_unround(u64 *x, u64 *y, u64 k) -{ - *y ^= *x; - *y = ror64(*y, 3); - *x ^= k; - *x -= *y; - *x = rol64(*x, 8); -} - -void crypto_speck128_encrypt(const struct speck128_tfm_ctx *ctx, - u8 *out, const u8 *in) -{ - u64 y = get_unaligned_le64(in); - u64 x = get_unaligned_le64(in + 8); - int i; - - for (i = 0; i < ctx->nrounds; i++) - speck128_round(&x, &y, ctx->round_keys[i]); - - put_unaligned_le64(y, out); - put_unaligned_le64(x, out + 8); -} -EXPORT_SYMBOL_GPL(crypto_speck128_encrypt); - -static void speck128_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) -{ - crypto_speck128_encrypt(crypto_tfm_ctx(tfm), out, in); -} - -void crypto_speck128_decrypt(const struct speck128_tfm_ctx *ctx, - u8 *out, const u8 *in) -{ - u64 y = get_unaligned_le64(in); - u64 x = get_unaligned_le64(in + 8); - int i; - - for (i = ctx->nrounds - 1; i >= 0; i--) - speck128_unround(&x, &y, ctx->round_keys[i]); - - put_unaligned_le64(y, out); - put_unaligned_le64(x, out + 8); -} -EXPORT_SYMBOL_GPL(crypto_speck128_decrypt); - -static void speck128_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) -{ - crypto_speck128_decrypt(crypto_tfm_ctx(tfm), out, in); -} - -int crypto_speck128_setkey(struct speck128_tfm_ctx *ctx, const u8 *key, - unsigned int keylen) -{ - u64 l[3]; - u64 k; - int i; - - switch (keylen) { - case SPECK128_128_KEY_SIZE: - k = get_unaligned_le64(key); - l[0] = get_unaligned_le64(key + 8); - ctx->nrounds = SPECK128_128_NROUNDS; - for (i = 0; i < ctx->nrounds; i++) { - ctx->round_keys[i] = k; - speck128_round(&l[0], &k, i); - } - break; - case SPECK128_192_KEY_SIZE: - k = get_unaligned_le64(key); - l[0] = get_unaligned_le64(key + 8); - l[1] = get_unaligned_le64(key + 16); - ctx->nrounds = SPECK128_192_NROUNDS; - for (i = 0; i < ctx->nrounds; i++) { - ctx->round_keys[i] = k; - speck128_round(&l[i % 2], &k, i); - } - break; - case SPECK128_256_KEY_SIZE: - k = get_unaligned_le64(key); - l[0] = get_unaligned_le64(key + 8); - l[1] = get_unaligned_le64(key + 16); - l[2] = get_unaligned_le64(key + 24); - ctx->nrounds = SPECK128_256_NROUNDS; - for (i = 0; i < ctx->nrounds; i++) { - ctx->round_keys[i] = k; - speck128_round(&l[i % 3], &k, i); - } - break; - default: - return -EINVAL; - } - - return 0; -} -EXPORT_SYMBOL_GPL(crypto_speck128_setkey); - -static int speck128_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen) -{ - return crypto_speck128_setkey(crypto_tfm_ctx(tfm), key, keylen); -} - -/* Speck64 */ - -static __always_inline void speck64_round(u32 *x, u32 *y, u32 k) -{ - *x = ror32(*x, 8); - *x += *y; - *x ^= k; - *y = rol32(*y, 3); - *y ^= *x; -} - -static __always_inline void speck64_unround(u32 *x, u32 *y, u32 k) -{ - *y ^= *x; - *y = ror32(*y, 3); - *x ^= k; - *x -= *y; - *x = rol32(*x, 8); -} - -void crypto_speck64_encrypt(const struct speck64_tfm_ctx *ctx, - u8 *out, const u8 *in) -{ - u32 y = get_unaligned_le32(in); - u32 x = get_unaligned_le32(in + 4); - int i; - - for (i = 0; i < ctx->nrounds; i++) - speck64_round(&x, &y, ctx->round_keys[i]); - - put_unaligned_le32(y, out); - put_unaligned_le32(x, out + 4); -} -EXPORT_SYMBOL_GPL(crypto_speck64_encrypt); - -static void speck64_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) -{ - crypto_speck64_encrypt(crypto_tfm_ctx(tfm), out, in); -} - -void crypto_speck64_decrypt(const struct speck64_tfm_ctx *ctx, - u8 *out, const u8 *in) -{ - u32 y = get_unaligned_le32(in); - u32 x = get_unaligned_le32(in + 4); - int i; - - for (i = ctx->nrounds - 1; i >= 0; i--) - speck64_unround(&x, &y, ctx->round_keys[i]); - - put_unaligned_le32(y, out); - put_unaligned_le32(x, out + 4); -} -EXPORT_SYMBOL_GPL(crypto_speck64_decrypt); - -static void speck64_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) -{ - crypto_speck64_decrypt(crypto_tfm_ctx(tfm), out, in); -} - -int crypto_speck64_setkey(struct speck64_tfm_ctx *ctx, const u8 *key, - unsigned int keylen) -{ - u32 l[3]; - u32 k; - int i; - - switch (keylen) { - case SPECK64_96_KEY_SIZE: - k = get_unaligned_le32(key); - l[0] = get_unaligned_le32(key + 4); - l[1] = get_unaligned_le32(key + 8); - ctx->nrounds = SPECK64_96_NROUNDS; - for (i = 0; i < ctx->nrounds; i++) { - ctx->round_keys[i] = k; - speck64_round(&l[i % 2], &k, i); - } - break; - case SPECK64_128_KEY_SIZE: - k = get_unaligned_le32(key); - l[0] = get_unaligned_le32(key + 4); - l[1] = get_unaligned_le32(key + 8); - l[2] = get_unaligned_le32(key + 12); - ctx->nrounds = SPECK64_128_NROUNDS; - for (i = 0; i < ctx->nrounds; i++) { - ctx->round_keys[i] = k; - speck64_round(&l[i % 3], &k, i); - } - break; - default: - return -EINVAL; - } - - return 0; -} -EXPORT_SYMBOL_GPL(crypto_speck64_setkey); - -static int speck64_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen) -{ - return crypto_speck64_setkey(crypto_tfm_ctx(tfm), key, keylen); -} - -/* Algorithm definitions */ - -static struct crypto_alg speck_algs[] = { - { - .cra_name = "speck128", - .cra_driver_name = "speck128-generic", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, - .cra_blocksize = SPECK128_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct speck128_tfm_ctx), - .cra_module = THIS_MODULE, - .cra_u = { - .cipher = { - .cia_min_keysize = SPECK128_128_KEY_SIZE, - .cia_max_keysize = SPECK128_256_KEY_SIZE, - .cia_setkey = speck128_setkey, - .cia_encrypt = speck128_encrypt, - .cia_decrypt = speck128_decrypt - } - } - }, { - .cra_name = "speck64", - .cra_driver_name = "speck64-generic", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, - .cra_blocksize = SPECK64_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct speck64_tfm_ctx), - .cra_module = THIS_MODULE, - .cra_u = { - .cipher = { - .cia_min_keysize = SPECK64_96_KEY_SIZE, - .cia_max_keysize = SPECK64_128_KEY_SIZE, - .cia_setkey = speck64_setkey, - .cia_encrypt = speck64_encrypt, - .cia_decrypt = speck64_decrypt - } - } - } -}; - -static int __init speck_module_init(void) -{ - return crypto_register_algs(speck_algs, ARRAY_SIZE(speck_algs)); -} - -static void __exit speck_module_exit(void) -{ - crypto_unregister_algs(speck_algs, ARRAY_SIZE(speck_algs)); -} - -module_init(speck_module_init); -module_exit(speck_module_exit); - -MODULE_DESCRIPTION("Speck block cipher (generic)"); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Eric Biggers "); -MODULE_ALIAS_CRYPTO("speck128"); -MODULE_ALIAS_CRYPTO("speck128-generic"); -MODULE_ALIAS_CRYPTO("speck64"); -MODULE_ALIAS_CRYPTO("speck64-generic"); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index bdde95e8d3693..d332988eb8dea 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1103,6 +1103,9 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs, break; } + if (speed[i].klen) + crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen); + pr_info("test%3u " "(%5u byte blocks,%5u bytes per update,%4u updates): ", i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); @@ -1733,6 +1736,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) ret += tcrypt_test("xts(aes)"); ret += tcrypt_test("ctr(aes)"); ret += tcrypt_test("rfc3686(ctr(aes))"); + ret += tcrypt_test("cfb(aes)"); break; case 11: @@ -2059,6 +2063,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) speed_template_16_24_32); test_cipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0, speed_template_16_24_32); + test_cipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0, + speed_template_16_24_32); + test_cipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0, + speed_template_16_24_32); break; case 201: diff --git a/crypto/testmgr.c b/crypto/testmgr.c index a1d42245082aa..54d882ffe4383 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -2684,6 +2684,13 @@ static const struct alg_test_desc alg_test_descs[] = { .dec = __VECS(aes_ccm_dec_tv_template) } } + }, { + .alg = "cfb(aes)", + .test = alg_test_skcipher, + .fips_allowed = 1, + .suite = { + .cipher = __VECS(aes_cfb_tv_template) + }, }, { .alg = "chacha20", .test = alg_test_skcipher, @@ -3037,18 +3044,6 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .cipher = __VECS(sm4_tv_template) } - }, { - .alg = "ecb(speck128)", - .test = alg_test_skcipher, - .suite = { - .cipher = __VECS(speck128_tv_template) - } - }, { - .alg = "ecb(speck64)", - .test = alg_test_skcipher, - .suite = { - .cipher = __VECS(speck64_tv_template) - } }, { .alg = "ecb(tea)", .test = alg_test_skcipher, @@ -3576,18 +3571,6 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .cipher = __VECS(serpent_xts_tv_template) } - }, { - .alg = "xts(speck128)", - .test = alg_test_skcipher, - .suite = { - .cipher = __VECS(speck128_xts_tv_template) - } - }, { - .alg = "xts(speck64)", - .test = alg_test_skcipher, - .suite = { - .cipher = __VECS(speck64_xts_tv_template) - } }, { .alg = "xts(twofish)", .test = alg_test_skcipher, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 173111c70746e..11e6f17fe724b 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -10198,744 +10198,6 @@ static const struct cipher_testvec sm4_tv_template[] = { } }; -/* - * Speck test vectors taken from the original paper: - * "The Simon and Speck Families of Lightweight Block Ciphers" - * https://eprint.iacr.org/2013/404.pdf - * - * Note that the paper does not make byte and word order clear. But it was - * confirmed with the authors that the intended orders are little endian byte - * order and (y, x) word order. Equivalently, the printed test vectors, when - * looking at only the bytes (ignoring the whitespace that divides them into - * words), are backwards: the left-most byte is actually the one with the - * highest memory address, while the right-most byte is actually the one with - * the lowest memory address. - */ - -static const struct cipher_testvec speck128_tv_template[] = { - { /* Speck128/128 */ - .key = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .klen = 16, - .ptext = "\x20\x6d\x61\x64\x65\x20\x69\x74" - "\x20\x65\x71\x75\x69\x76\x61\x6c", - .ctext = "\x18\x0d\x57\x5c\xdf\xfe\x60\x78" - "\x65\x32\x78\x79\x51\x98\x5d\xa6", - .len = 16, - }, { /* Speck128/192 */ - .key = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x10\x11\x12\x13\x14\x15\x16\x17", - .klen = 24, - .ptext = "\x65\x6e\x74\x20\x74\x6f\x20\x43" - "\x68\x69\x65\x66\x20\x48\x61\x72", - .ctext = "\x86\x18\x3c\xe0\x5d\x18\xbc\xf9" - "\x66\x55\x13\x13\x3a\xcf\xe4\x1b", - .len = 16, - }, { /* Speck128/256 */ - .key = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x10\x11\x12\x13\x14\x15\x16\x17" - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", - .klen = 32, - .ptext = "\x70\x6f\x6f\x6e\x65\x72\x2e\x20" - "\x49\x6e\x20\x74\x68\x6f\x73\x65", - .ctext = "\x43\x8f\x18\x9c\x8d\xb4\xee\x4e" - "\x3e\xf5\xc0\x05\x04\x01\x09\x41", - .len = 16, - }, -}; - -/* - * Speck128-XTS test vectors, taken from the AES-XTS test vectors with the - * ciphertext recomputed with Speck128 as the cipher - */ -static const struct cipher_testvec speck128_xts_tv_template[] = { - { - .key = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .klen = 32, - .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .ctext = "\xbe\xa0\xe7\x03\xd7\xfe\xab\x62" - "\x3b\x99\x4a\x64\x74\x77\xac\xed" - "\xd8\xf4\xa6\xcf\xae\xb9\x07\x42" - "\x51\xd9\xb6\x1d\xe0\x5e\xbc\x54", - .len = 32, - }, { - .key = "\x11\x11\x11\x11\x11\x11\x11\x11" - "\x11\x11\x11\x11\x11\x11\x11\x11" - "\x22\x22\x22\x22\x22\x22\x22\x22" - "\x22\x22\x22\x22\x22\x22\x22\x22", - .klen = 32, - .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44" - "\x44\x44\x44\x44\x44\x44\x44\x44" - "\x44\x44\x44\x44\x44\x44\x44\x44" - "\x44\x44\x44\x44\x44\x44\x44\x44", - .ctext = "\xfb\x53\x81\x75\x6f\x9f\x34\xad" - "\x7e\x01\xed\x7b\xcc\xda\x4e\x4a" - "\xd4\x84\xa4\x53\xd5\x88\x73\x1b" - "\xfd\xcb\xae\x0d\xf3\x04\xee\xe6", - .len = 32, - }, { - .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8" - "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0" - "\x22\x22\x22\x22\x22\x22\x22\x22" - "\x22\x22\x22\x22\x22\x22\x22\x22", - .klen = 32, - .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44" - "\x44\x44\x44\x44\x44\x44\x44\x44" - "\x44\x44\x44\x44\x44\x44\x44\x44" - "\x44\x44\x44\x44\x44\x44\x44\x44", - .ctext = "\x21\x52\x84\x15\xd1\xf7\x21\x55" - "\xd9\x75\x4a\xd3\xc5\xdb\x9f\x7d" - "\xda\x63\xb2\xf1\x82\xb0\x89\x59" - "\x86\xd4\xaa\xaa\xdd\xff\x4f\x92", - .len = 32, - }, { - .key = "\x27\x18\x28\x18\x28\x45\x90\x45" - "\x23\x53\x60\x28\x74\x71\x35\x26" - "\x31\x41\x59\x26\x53\x58\x97\x93" - "\x23\x84\x62\x64\x33\x83\x27\x95", - .klen = 32, - .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x10\x11\x12\x13\x14\x15\x16\x17" - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" - "\x20\x21\x22\x23\x24\x25\x26\x27" - "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" - "\x30\x31\x32\x33\x34\x35\x36\x37" - "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" - "\x40\x41\x42\x43\x44\x45\x46\x47" - "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" - "\x50\x51\x52\x53\x54\x55\x56\x57" - "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" - "\x60\x61\x62\x63\x64\x65\x66\x67" - "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" - "\x70\x71\x72\x73\x74\x75\x76\x77" - "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" - "\x80\x81\x82\x83\x84\x85\x86\x87" - "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" - "\x90\x91\x92\x93\x94\x95\x96\x97" - "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" - "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" - "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" - "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" - "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" - "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" - "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" - "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" - "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" - "\xe8\xe9\xea\xeb\xec\xed\xee\xef" - "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" - "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" - "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x10\x11\x12\x13\x14\x15\x16\x17" - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" - "\x20\x21\x22\x23\x24\x25\x26\x27" - "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" - "\x30\x31\x32\x33\x34\x35\x36\x37" - "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" - "\x40\x41\x42\x43\x44\x45\x46\x47" - "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" - "\x50\x51\x52\x53\x54\x55\x56\x57" - "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" - "\x60\x61\x62\x63\x64\x65\x66\x67" - "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" - "\x70\x71\x72\x73\x74\x75\x76\x77" - "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" - "\x80\x81\x82\x83\x84\x85\x86\x87" - "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" - "\x90\x91\x92\x93\x94\x95\x96\x97" - "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" - "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" - "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" - "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" - "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" - "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" - "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" - "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" - "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" - "\xe8\xe9\xea\xeb\xec\xed\xee\xef" - "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" - "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", - .ctext = "\x57\xb5\xf8\x71\x6e\x6d\xdd\x82" - "\x53\xd0\xed\x2d\x30\xc1\x20\xef" - "\x70\x67\x5e\xff\x09\x70\xbb\xc1" - "\x3a\x7b\x48\x26\xd9\x0b\xf4\x48" - "\xbe\xce\xb1\xc7\xb2\x67\xc4\xa7" - "\x76\xf8\x36\x30\xb7\xb4\x9a\xd9" - "\xf5\x9d\xd0\x7b\xc1\x06\x96\x44" - "\x19\xc5\x58\x84\x63\xb9\x12\x68" - "\x68\xc7\xaa\x18\x98\xf2\x1f\x5c" - "\x39\xa6\xd8\x32\x2b\xc3\x51\xfd" - "\x74\x79\x2e\xb4\x44\xd7\x69\xc4" - "\xfc\x29\xe6\xed\x26\x1e\xa6\x9d" - "\x1c\xbe\x00\x0e\x7f\x3a\xca\xfb" - "\x6d\x13\x65\xa0\xf9\x31\x12\xe2" - "\x26\xd1\xec\x2b\x0a\x8b\x59\x99" - "\xa7\x49\xa0\x0e\x09\x33\x85\x50" - "\xc3\x23\xca\x7a\xdd\x13\x45\x5f" - "\xde\x4c\xa7\xcb\x00\x8a\x66\x6f" - "\xa2\xb6\xb1\x2e\xe1\xa0\x18\xf6" - "\xad\xf3\xbd\xeb\xc7\xef\x55\x4f" - "\x79\x91\x8d\x36\x13\x7b\xd0\x4a" - "\x6c\x39\xfb\x53\xb8\x6f\x02\x51" - "\xa5\x20\xac\x24\x1c\x73\x59\x73" - "\x58\x61\x3a\x87\x58\xb3\x20\x56" - "\x39\x06\x2b\x4d\xd3\x20\x2b\x89" - "\x3f\xa2\xf0\x96\xeb\x7f\xa4\xcd" - "\x11\xae\xbd\xcb\x3a\xb4\xd9\x91" - "\x09\x35\x71\x50\x65\xac\x92\xe3" - "\x7b\x32\xc0\x7a\xdd\xd4\xc3\x92" - "\x6f\xeb\x79\xde\x6f\xd3\x25\xc9" - "\xcd\x63\xf5\x1e\x7a\x3b\x26\x9d" - "\x77\x04\x80\xa9\xbf\x38\xb5\xbd" - "\xb8\x05\x07\xbd\xfd\xab\x7b\xf8" - "\x2a\x26\xcc\x49\x14\x6d\x55\x01" - "\x06\x94\xd8\xb2\x2d\x53\x83\x1b" - "\x8f\xd4\xdd\x57\x12\x7e\x18\xba" - "\x8e\xe2\x4d\x80\xef\x7e\x6b\x9d" - "\x24\xa9\x60\xa4\x97\x85\x86\x2a" - "\x01\x00\x09\xf1\xcb\x4a\x24\x1c" - "\xd8\xf6\xe6\x5b\xe7\x5d\xf2\xc4" - "\x97\x1c\x10\xc6\x4d\x66\x4f\x98" - "\x87\x30\xac\xd5\xea\x73\x49\x10" - "\x80\xea\xe5\x5f\x4d\x5f\x03\x33" - "\x66\x02\x35\x3d\x60\x06\x36\x4f" - "\x14\x1c\xd8\x07\x1f\x78\xd0\xf8" - "\x4f\x6c\x62\x7c\x15\xa5\x7c\x28" - "\x7c\xcc\xeb\x1f\xd1\x07\x90\x93" - "\x7e\xc2\xa8\x3a\x80\xc0\xf5\x30" - "\xcc\x75\xcf\x16\x26\xa9\x26\x3b" - "\xe7\x68\x2f\x15\x21\x5b\xe4\x00" - "\xbd\x48\x50\xcd\x75\x70\xc4\x62" - "\xbb\x41\xfb\x89\x4a\x88\x3b\x3b" - "\x51\x66\x02\x69\x04\x97\x36\xd4" - "\x75\xae\x0b\xa3\x42\xf8\xca\x79" - "\x8f\x93\xe9\xcc\x38\xbd\xd6\xd2" - "\xf9\x70\x4e\xc3\x6a\x8e\x25\xbd" - "\xea\x15\x5a\xa0\x85\x7e\x81\x0d" - "\x03\xe7\x05\x39\xf5\x05\x26\xee" - "\xec\xaa\x1f\x3d\xc9\x98\x76\x01" - "\x2c\xf4\xfc\xa3\x88\x77\x38\xc4" - "\x50\x65\x50\x6d\x04\x1f\xdf\x5a" - "\xaa\xf2\x01\xa9\xc1\x8d\xee\xca" - "\x47\x26\xef\x39\xb8\xb4\xf2\xd1" - "\xd6\xbb\x1b\x2a\xc1\x34\x14\xcf", - .len = 512, - }, { - .key = "\x27\x18\x28\x18\x28\x45\x90\x45" - "\x23\x53\x60\x28\x74\x71\x35\x26" - "\x62\x49\x77\x57\x24\x70\x93\x69" - "\x99\x59\x57\x49\x66\x96\x76\x27" - "\x31\x41\x59\x26\x53\x58\x97\x93" - "\x23\x84\x62\x64\x33\x83\x27\x95" - "\x02\x88\x41\x97\x16\x93\x99\x37" - "\x51\x05\x82\x09\x74\x94\x45\x92", - .klen = 64, - .iv = "\xff\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x10\x11\x12\x13\x14\x15\x16\x17" - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" - "\x20\x21\x22\x23\x24\x25\x26\x27" - "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" - "\x30\x31\x32\x33\x34\x35\x36\x37" - "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" - "\x40\x41\x42\x43\x44\x45\x46\x47" - "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" - "\x50\x51\x52\x53\x54\x55\x56\x57" - "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" - "\x60\x61\x62\x63\x64\x65\x66\x67" - "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" - "\x70\x71\x72\x73\x74\x75\x76\x77" - "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" - "\x80\x81\x82\x83\x84\x85\x86\x87" - "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" - "\x90\x91\x92\x93\x94\x95\x96\x97" - "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" - "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" - "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" - "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" - "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" - "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" - "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" - "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" - "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" - "\xe8\xe9\xea\xeb\xec\xed\xee\xef" - "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" - "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" - "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x10\x11\x12\x13\x14\x15\x16\x17" - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" - "\x20\x21\x22\x23\x24\x25\x26\x27" - "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" - "\x30\x31\x32\x33\x34\x35\x36\x37" - "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" - "\x40\x41\x42\x43\x44\x45\x46\x47" - "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" - "\x50\x51\x52\x53\x54\x55\x56\x57" - "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" - "\x60\x61\x62\x63\x64\x65\x66\x67" - "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" - "\x70\x71\x72\x73\x74\x75\x76\x77" - "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" - "\x80\x81\x82\x83\x84\x85\x86\x87" - "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" - "\x90\x91\x92\x93\x94\x95\x96\x97" - "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" - "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" - "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" - "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" - "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" - "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" - "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" - "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" - "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" - "\xe8\xe9\xea\xeb\xec\xed\xee\xef" - "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" - "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", - .ctext = "\xc5\x85\x2a\x4b\x73\xe4\xf6\xf1" - "\x7e\xf9\xf6\xe9\xa3\x73\x36\xcb" - "\xaa\xb6\x22\xb0\x24\x6e\x3d\x73" - "\x92\x99\xde\xd3\x76\xed\xcd\x63" - "\x64\x3a\x22\x57\xc1\x43\x49\xd4" - "\x79\x36\x31\x19\x62\xae\x10\x7e" - "\x7d\xcf\x7a\xe2\x6b\xce\x27\xfa" - "\xdc\x3d\xd9\x83\xd3\x42\x4c\xe0" - "\x1b\xd6\x1d\x1a\x6f\xd2\x03\x00" - "\xfc\x81\x99\x8a\x14\x62\xf5\x7e" - "\x0d\xe7\x12\xe8\x17\x9d\x0b\xec" - "\xe2\xf7\xc9\xa7\x63\xd1\x79\xb6" - "\x62\x62\x37\xfe\x0a\x4c\x4a\x37" - "\x70\xc7\x5e\x96\x5f\xbc\x8e\x9e" - "\x85\x3c\x4f\x26\x64\x85\xbc\x68" - "\xb0\xe0\x86\x5e\x26\x41\xce\x11" - "\x50\xda\x97\x14\xe9\x9e\xc7\x6d" - "\x3b\xdc\x43\xde\x2b\x27\x69\x7d" - "\xfc\xb0\x28\xbd\x8f\xb1\xc6\x31" - "\x14\x4d\xf0\x74\x37\xfd\x07\x25" - "\x96\x55\xe5\xfc\x9e\x27\x2a\x74" - "\x1b\x83\x4d\x15\x83\xac\x57\xa0" - "\xac\xa5\xd0\x38\xef\x19\x56\x53" - "\x25\x4b\xfc\xce\x04\x23\xe5\x6b" - "\xf6\xc6\x6c\x32\x0b\xb3\x12\xc5" - "\xed\x22\x34\x1c\x5d\xed\x17\x06" - "\x36\xa3\xe6\x77\xb9\x97\x46\xb8" - "\xe9\x3f\x7e\xc7\xbc\x13\x5c\xdc" - "\x6e\x3f\x04\x5e\xd1\x59\xa5\x82" - "\x35\x91\x3d\x1b\xe4\x97\x9f\x92" - "\x1c\x5e\x5f\x6f\x41\xd4\x62\xa1" - "\x8d\x39\xfc\x42\xfb\x38\x80\xb9" - "\x0a\xe3\xcc\x6a\x93\xd9\x7a\xb1" - "\xe9\x69\xaf\x0a\x6b\x75\x38\xa7" - "\xa1\xbf\xf7\xda\x95\x93\x4b\x78" - "\x19\xf5\x94\xf9\xd2\x00\x33\x37" - "\xcf\xf5\x9e\x9c\xf3\xcc\xa6\xee" - "\x42\xb2\x9e\x2c\x5f\x48\x23\x26" - "\x15\x25\x17\x03\x3d\xfe\x2c\xfc" - "\xeb\xba\xda\xe0\x00\x05\xb6\xa6" - "\x07\xb3\xe8\x36\x5b\xec\x5b\xbf" - "\xd6\x5b\x00\x74\xc6\x97\xf1\x6a" - "\x49\xa1\xc3\xfa\x10\x52\xb9\x14" - "\xad\xb7\x73\xf8\x78\x12\xc8\x59" - "\x17\x80\x4c\x57\x39\xf1\x6d\x80" - "\x25\x77\x0f\x5e\x7d\xf0\xaf\x21" - "\xec\xce\xb7\xc8\x02\x8a\xed\x53" - "\x2c\x25\x68\x2e\x1f\x85\x5e\x67" - "\xd1\x07\x7a\x3a\x89\x08\xe0\x34" - "\xdc\xdb\x26\xb4\x6b\x77\xfc\x40" - "\x31\x15\x72\xa0\xf0\x73\xd9\x3b" - "\xd5\xdb\xfe\xfc\x8f\xa9\x44\xa2" - "\x09\x9f\xc6\x33\xe5\xe2\x88\xe8" - "\xf3\xf0\x1a\xf4\xce\x12\x0f\xd6" - "\xf7\x36\xe6\xa4\xf4\x7a\x10\x58" - "\xcc\x1f\x48\x49\x65\x47\x75\xe9" - "\x28\xe1\x65\x7b\xf2\xc4\xb5\x07" - "\xf2\xec\x76\xd8\x8f\x09\xf3\x16" - "\xa1\x51\x89\x3b\xeb\x96\x42\xac" - "\x65\xe0\x67\x63\x29\xdc\xb4\x7d" - "\xf2\x41\x51\x6a\xcb\xde\x3c\xfb" - "\x66\x8d\x13\xca\xe0\x59\x2a\x00" - "\xc9\x53\x4c\xe6\x9e\xe2\x73\xd5" - "\x67\x19\xb2\xbd\x9a\x63\xd7\x5c", - .len = 512, - .also_non_np = 1, - .np = 3, - .tap = { 512 - 20, 4, 16 }, - } -}; - -static const struct cipher_testvec speck64_tv_template[] = { - { /* Speck64/96 */ - .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b" - "\x10\x11\x12\x13", - .klen = 12, - .ptext = "\x65\x61\x6e\x73\x20\x46\x61\x74", - .ctext = "\x6c\x94\x75\x41\xec\x52\x79\x9f", - .len = 8, - }, { /* Speck64/128 */ - .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b" - "\x10\x11\x12\x13\x18\x19\x1a\x1b", - .klen = 16, - .ptext = "\x2d\x43\x75\x74\x74\x65\x72\x3b", - .ctext = "\x8b\x02\x4e\x45\x48\xa5\x6f\x8c", - .len = 8, - }, -}; - -/* - * Speck64-XTS test vectors, taken from the AES-XTS test vectors with the - * ciphertext recomputed with Speck64 as the cipher, and key lengths adjusted - */ -static const struct cipher_testvec speck64_xts_tv_template[] = { - { - .key = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .klen = 24, - .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .ctext = "\x84\xaf\x54\x07\x19\xd4\x7c\xa6" - "\xe4\xfe\xdf\xc4\x1f\x34\xc3\xc2" - "\x80\xf5\x72\xe7\xcd\xf0\x99\x22" - "\x35\xa7\x2f\x06\xef\xdc\x51\xaa", - .len = 32, - }, { - .key = "\x11\x11\x11\x11\x11\x11\x11\x11" - "\x11\x11\x11\x11\x11\x11\x11\x11" - "\x22\x22\x22\x22\x22\x22\x22\x22", - .klen = 24, - .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44" - "\x44\x44\x44\x44\x44\x44\x44\x44" - "\x44\x44\x44\x44\x44\x44\x44\x44" - "\x44\x44\x44\x44\x44\x44\x44\x44", - .ctext = "\x12\x56\x73\xcd\x15\x87\xa8\x59" - "\xcf\x84\xae\xd9\x1c\x66\xd6\x9f" - "\xb3\x12\x69\x7e\x36\xeb\x52\xff" - "\x62\xdd\xba\x90\xb3\xe1\xee\x99", - .len = 32, - }, { - .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8" - "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0" - "\x22\x22\x22\x22\x22\x22\x22\x22", - .klen = 24, - .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44" - "\x44\x44\x44\x44\x44\x44\x44\x44" - "\x44\x44\x44\x44\x44\x44\x44\x44" - "\x44\x44\x44\x44\x44\x44\x44\x44", - .ctext = "\x15\x1b\xe4\x2c\xa2\x5a\x2d\x2c" - "\x27\x36\xc0\xbf\x5d\xea\x36\x37" - "\x2d\x1a\x88\xbc\x66\xb5\xd0\x0b" - "\xa1\xbc\x19\xb2\x0f\x3b\x75\x34", - .len = 32, - }, { - .key = "\x27\x18\x28\x18\x28\x45\x90\x45" - "\x23\x53\x60\x28\x74\x71\x35\x26" - "\x31\x41\x59\x26\x53\x58\x97\x93", - .klen = 24, - .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x10\x11\x12\x13\x14\x15\x16\x17" - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" - "\x20\x21\x22\x23\x24\x25\x26\x27" - "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" - "\x30\x31\x32\x33\x34\x35\x36\x37" - "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" - "\x40\x41\x42\x43\x44\x45\x46\x47" - "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" - "\x50\x51\x52\x53\x54\x55\x56\x57" - "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" - "\x60\x61\x62\x63\x64\x65\x66\x67" - "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" - "\x70\x71\x72\x73\x74\x75\x76\x77" - "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" - "\x80\x81\x82\x83\x84\x85\x86\x87" - "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" - "\x90\x91\x92\x93\x94\x95\x96\x97" - "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" - "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" - "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" - "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" - "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" - "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" - "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" - "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" - "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" - "\xe8\xe9\xea\xeb\xec\xed\xee\xef" - "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" - "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" - "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x10\x11\x12\x13\x14\x15\x16\x17" - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" - "\x20\x21\x22\x23\x24\x25\x26\x27" - "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" - "\x30\x31\x32\x33\x34\x35\x36\x37" - "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" - "\x40\x41\x42\x43\x44\x45\x46\x47" - "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" - "\x50\x51\x52\x53\x54\x55\x56\x57" - "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" - "\x60\x61\x62\x63\x64\x65\x66\x67" - "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" - "\x70\x71\x72\x73\x74\x75\x76\x77" - "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" - "\x80\x81\x82\x83\x84\x85\x86\x87" - "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" - "\x90\x91\x92\x93\x94\x95\x96\x97" - "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" - "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" - "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" - "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" - "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" - "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" - "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" - "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" - "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" - "\xe8\xe9\xea\xeb\xec\xed\xee\xef" - "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" - "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", - .ctext = "\xaf\xa1\x81\xa6\x32\xbb\x15\x8e" - "\xf8\x95\x2e\xd3\xe6\xee\x7e\x09" - "\x0c\x1a\xf5\x02\x97\x8b\xe3\xb3" - "\x11\xc7\x39\x96\xd0\x95\xf4\x56" - "\xf4\xdd\x03\x38\x01\x44\x2c\xcf" - "\x88\xae\x8e\x3c\xcd\xe7\xaa\x66" - "\xfe\x3d\xc6\xfb\x01\x23\x51\x43" - "\xd5\xd2\x13\x86\x94\x34\xe9\x62" - "\xf9\x89\xe3\xd1\x7b\xbe\xf8\xef" - "\x76\x35\x04\x3f\xdb\x23\x9d\x0b" - "\x85\x42\xb9\x02\xd6\xcc\xdb\x96" - "\xa7\x6b\x27\xb6\xd4\x45\x8f\x7d" - "\xae\xd2\x04\xd5\xda\xc1\x7e\x24" - "\x8c\x73\xbe\x48\x7e\xcf\x65\x28" - "\x29\xe5\xbe\x54\x30\xcb\x46\x95" - "\x4f\x2e\x8a\x36\xc8\x27\xc5\xbe" - "\xd0\x1a\xaf\xab\x26\xcd\x9e\x69" - "\xa1\x09\x95\x71\x26\xe9\xc4\xdf" - "\xe6\x31\xc3\x46\xda\xaf\x0b\x41" - "\x1f\xab\xb1\x8e\xd6\xfc\x0b\xb3" - "\x82\xc0\x37\x27\xfc\x91\xa7\x05" - "\xfb\xc5\xdc\x2b\x74\x96\x48\x43" - "\x5d\x9c\x19\x0f\x60\x63\x3a\x1f" - "\x6f\xf0\x03\xbe\x4d\xfd\xc8\x4a" - "\xc6\xa4\x81\x6d\xc3\x12\x2a\x5c" - "\x07\xff\xf3\x72\x74\x48\xb5\x40" - "\x50\xb5\xdd\x90\x43\x31\x18\x15" - "\x7b\xf2\xa6\xdb\x83\xc8\x4b\x4a" - "\x29\x93\x90\x8b\xda\x07\xf0\x35" - "\x6d\x90\x88\x09\x4e\x83\xf5\x5b" - "\x94\x12\xbb\x33\x27\x1d\x3f\x23" - "\x51\xa8\x7c\x07\xa2\xae\x77\xa6" - "\x50\xfd\xcc\xc0\x4f\x80\x7a\x9f" - "\x66\xdd\xcd\x75\x24\x8b\x33\xf7" - "\x20\xdb\x83\x9b\x4f\x11\x63\x6e" - "\xcf\x37\xef\xc9\x11\x01\x5c\x45" - "\x32\x99\x7c\x3c\x9e\x42\x89\xe3" - "\x70\x6d\x15\x9f\xb1\xe6\xb6\x05" - "\xfe\x0c\xb9\x49\x2d\x90\x6d\xcc" - "\x5d\x3f\xc1\xfe\x89\x0a\x2e\x2d" - "\xa0\xa8\x89\x3b\x73\x39\xa5\x94" - "\x4c\xa4\xa6\xbb\xa7\x14\x46\x89" - "\x10\xff\xaf\xef\xca\xdd\x4f\x80" - "\xb3\xdf\x3b\xab\xd4\xe5\x5a\xc7" - "\x33\xca\x00\x8b\x8b\x3f\xea\xec" - "\x68\x8a\xc2\x6d\xfd\xd4\x67\x0f" - "\x22\x31\xe1\x0e\xfe\x5a\x04\xd5" - "\x64\xa3\xf1\x1a\x76\x28\xcc\x35" - "\x36\xa7\x0a\x74\xf7\x1c\x44\x9b" - "\xc7\x1b\x53\x17\x02\xea\xd1\xad" - "\x13\x51\x73\xc0\xa0\xb2\x05\x32" - "\xa8\xa2\x37\x2e\xe1\x7a\x3a\x19" - "\x26\xb4\x6c\x62\x5d\xb3\x1a\x1d" - "\x59\xda\xee\x1a\x22\x18\xda\x0d" - "\x88\x0f\x55\x8b\x72\x62\xfd\xc1" - "\x69\x13\xcd\x0d\x5f\xc1\x09\x52" - "\xee\xd6\xe3\x84\x4d\xee\xf6\x88" - "\xaf\x83\xdc\x76\xf4\xc0\x93\x3f" - "\x4a\x75\x2f\xb0\x0b\x3e\xc4\x54" - "\x7d\x69\x8d\x00\x62\x77\x0d\x14" - "\xbe\x7c\xa6\x7d\xc5\x24\x4f\xf3" - "\x50\xf7\x5f\xf4\xc2\xca\x41\x97" - "\x37\xbe\x75\x74\xcd\xf0\x75\x6e" - "\x25\x23\x94\xbd\xda\x8d\xb0\xd4", - .len = 512, - }, { - .key = "\x27\x18\x28\x18\x28\x45\x90\x45" - "\x23\x53\x60\x28\x74\x71\x35\x26" - "\x62\x49\x77\x57\x24\x70\x93\x69" - "\x99\x59\x57\x49\x66\x96\x76\x27", - .klen = 32, - .iv = "\xff\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x10\x11\x12\x13\x14\x15\x16\x17" - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" - "\x20\x21\x22\x23\x24\x25\x26\x27" - "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" - "\x30\x31\x32\x33\x34\x35\x36\x37" - "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" - "\x40\x41\x42\x43\x44\x45\x46\x47" - "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" - "\x50\x51\x52\x53\x54\x55\x56\x57" - "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" - "\x60\x61\x62\x63\x64\x65\x66\x67" - "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" - "\x70\x71\x72\x73\x74\x75\x76\x77" - "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" - "\x80\x81\x82\x83\x84\x85\x86\x87" - "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" - "\x90\x91\x92\x93\x94\x95\x96\x97" - "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" - "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" - "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" - "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" - "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" - "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" - "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" - "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" - "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" - "\xe8\xe9\xea\xeb\xec\xed\xee\xef" - "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" - "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" - "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x10\x11\x12\x13\x14\x15\x16\x17" - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" - "\x20\x21\x22\x23\x24\x25\x26\x27" - "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" - "\x30\x31\x32\x33\x34\x35\x36\x37" - "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" - "\x40\x41\x42\x43\x44\x45\x46\x47" - "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" - "\x50\x51\x52\x53\x54\x55\x56\x57" - "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" - "\x60\x61\x62\x63\x64\x65\x66\x67" - "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" - "\x70\x71\x72\x73\x74\x75\x76\x77" - "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" - "\x80\x81\x82\x83\x84\x85\x86\x87" - "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" - "\x90\x91\x92\x93\x94\x95\x96\x97" - "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" - "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" - "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" - "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" - "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" - "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" - "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" - "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" - "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" - "\xe8\xe9\xea\xeb\xec\xed\xee\xef" - "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" - "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", - .ctext = "\x55\xed\x71\xd3\x02\x8e\x15\x3b" - "\xc6\x71\x29\x2d\x3e\x89\x9f\x59" - "\x68\x6a\xcc\x8a\x56\x97\xf3\x95" - "\x4e\x51\x08\xda\x2a\xf8\x6f\x3c" - "\x78\x16\xea\x80\xdb\x33\x75\x94" - "\xf9\x29\xc4\x2b\x76\x75\x97\xc7" - "\xf2\x98\x2c\xf9\xff\xc8\xd5\x2b" - "\x18\xf1\xaf\xcf\x7c\xc5\x0b\xee" - "\xad\x3c\x76\x7c\xe6\x27\xa2\x2a" - "\xe4\x66\xe1\xab\xa2\x39\xfc\x7c" - "\xf5\xec\x32\x74\xa3\xb8\x03\x88" - "\x52\xfc\x2e\x56\x3f\xa1\xf0\x9f" - "\x84\x5e\x46\xed\x20\x89\xb6\x44" - "\x8d\xd0\xed\x54\x47\x16\xbe\x95" - "\x8a\xb3\x6b\x72\xc4\x32\x52\x13" - "\x1b\xb0\x82\xbe\xac\xf9\x70\xa6" - "\x44\x18\xdd\x8c\x6e\xca\x6e\x45" - "\x8f\x1e\x10\x07\x57\x25\x98\x7b" - "\x17\x8c\x78\xdd\x80\xa7\xd9\xd8" - "\x63\xaf\xb9\x67\x57\xfd\xbc\xdb" - "\x44\xe9\xc5\x65\xd1\xc7\x3b\xff" - "\x20\xa0\x80\x1a\xc3\x9a\xad\x5e" - "\x5d\x3b\xd3\x07\xd9\xf5\xfd\x3d" - "\x4a\x8b\xa8\xd2\x6e\x7a\x51\x65" - "\x6c\x8e\x95\xe0\x45\xc9\x5f\x4a" - "\x09\x3c\x3d\x71\x7f\x0c\x84\x2a" - "\xc8\x48\x52\x1a\xc2\xd5\xd6\x78" - "\x92\x1e\xa0\x90\x2e\xea\xf0\xf3" - "\xdc\x0f\xb1\xaf\x0d\x9b\x06\x2e" - "\x35\x10\x30\x82\x0d\xe7\xc5\x9b" - "\xde\x44\x18\xbd\x9f\xd1\x45\xa9" - "\x7b\x7a\x4a\xad\x35\x65\x27\xca" - "\xb2\xc3\xd4\x9b\x71\x86\x70\xee" - "\xf1\x89\x3b\x85\x4b\x5b\xaa\xaf" - "\xfc\x42\xc8\x31\x59\xbe\x16\x60" - "\x4f\xf9\xfa\x12\xea\xd0\xa7\x14" - "\xf0\x7a\xf3\xd5\x8d\xbd\x81\xef" - "\x52\x7f\x29\x51\x94\x20\x67\x3c" - "\xd1\xaf\x77\x9f\x22\x5a\x4e\x63" - "\xe7\xff\x73\x25\xd1\xdd\x96\x8a" - "\x98\x52\x6d\xf3\xac\x3e\xf2\x18" - "\x6d\xf6\x0a\x29\xa6\x34\x3d\xed" - "\xe3\x27\x0d\x9d\x0a\x02\x44\x7e" - "\x5a\x7e\x67\x0f\x0a\x9e\xd6\xad" - "\x91\xe6\x4d\x81\x8c\x5c\x59\xaa" - "\xfb\xeb\x56\x53\xd2\x7d\x4c\x81" - "\x65\x53\x0f\x41\x11\xbd\x98\x99" - "\xf9\xc6\xfa\x51\x2e\xa3\xdd\x8d" - "\x84\x98\xf9\x34\xed\x33\x2a\x1f" - "\x82\xed\xc1\x73\x98\xd3\x02\xdc" - "\xe6\xc2\x33\x1d\xa2\xb4\xca\x76" - "\x63\x51\x34\x9d\x96\x12\xae\xce" - "\x83\xc9\x76\x5e\xa4\x1b\x53\x37" - "\x17\xd5\xc0\x80\x1d\x62\xf8\x3d" - "\x54\x27\x74\xbb\x10\x86\x57\x46" - "\x68\xe1\xed\x14\xe7\x9d\xfc\x84" - "\x47\xbc\xc2\xf8\x19\x4b\x99\xcf" - "\x7a\xe9\xc4\xb8\x8c\x82\x72\x4d" - "\x7b\x4f\x38\x55\x36\x71\x64\xc1" - "\xfc\x5c\x75\x52\x33\x02\x18\xf8" - "\x17\xe1\x2b\xc2\x43\x39\xbd\x76" - "\x9b\x63\x76\x32\x2f\x19\x72\x10" - "\x9f\x21\x0c\xf1\x66\x50\x7f\xa5" - "\x0d\x1f\x46\xe0\xba\xd3\x2f\x3c", - .len = 512, - .also_non_np = 1, - .np = 3, - .tap = { 512 - 20, 4, 16 }, - } -}; - /* Cast6 test vectors from RFC 2612 */ static const struct cipher_testvec cast6_tv_template[] = { { @@ -12081,6 +11343,82 @@ static const struct cipher_testvec aes_cbc_tv_template[] = { }, }; +static const struct cipher_testvec aes_cfb_tv_template[] = { + { /* From NIST SP800-38A */ + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", + .klen = 16, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", + .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20" + "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a" + "\xc8\xa6\x45\x37\xa0\xb3\xa9\x3f" + "\xcd\xe3\xcd\xad\x9f\x1c\xe5\x8b" + "\x26\x75\x1f\x67\xa3\xcb\xb1\x40" + "\xb1\x80\x8c\xf1\x87\xa4\xf4\xdf" + "\xc0\x4b\x05\x35\x7c\x5d\x1c\x0e" + "\xea\xc4\xc6\x6f\x9f\xf7\xf2\xe6", + .len = 64, + }, { + .key = "\x8e\x73\xb0\xf7\xda\x0e\x64\x52" + "\xc8\x10\xf3\x2b\x80\x90\x79\xe5" + "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b", + .klen = 24, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", + .ctext = "\xcd\xc8\x0d\x6f\xdd\xf1\x8c\xab" + "\x34\xc2\x59\x09\xc9\x9a\x41\x74" + "\x67\xce\x7f\x7f\x81\x17\x36\x21" + "\x96\x1a\x2b\x70\x17\x1d\x3d\x7a" + "\x2e\x1e\x8a\x1d\xd5\x9b\x88\xb1" + "\xc8\xe6\x0f\xed\x1e\xfa\xc4\xc9" + "\xc0\x5f\x9f\x9c\xa9\x83\x4f\xa0" + "\x42\xae\x8f\xba\x58\x4b\x09\xff", + .len = 64, + }, { + .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe" + "\x2b\x73\xae\xf0\x85\x7d\x77\x81" + "\x1f\x35\x2c\x07\x3b\x61\x08\xd7" + "\x2d\x98\x10\xa3\x09\x14\xdf\xf4", + .klen = 32, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", + .ctext = "\xdc\x7e\x84\xbf\xda\x79\x16\x4b" + "\x7e\xcd\x84\x86\x98\x5d\x38\x60" + "\x39\xff\xed\x14\x3b\x28\xb1\xc8" + "\x32\x11\x3c\x63\x31\xe5\x40\x7b" + "\xdf\x10\x13\x24\x15\xe5\x4b\x92" + "\xa1\x3e\xd0\xa8\x26\x7a\xe2\xf9" + "\x75\xa3\x85\x74\x1a\xb9\xce\xf8" + "\x20\x31\x62\x3d\x55\xb1\xe4\x71", + .len = 64, + }, +}; + static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = { { /* Input data from RFC 2410 Case 1 */ #ifdef __LITTLE_ENDIAN diff --git a/drivers/Kconfig b/drivers/Kconfig index ab4d43923c4dd..a3070e9670ea1 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -79,6 +79,8 @@ source "drivers/hwmon/Kconfig" source "drivers/thermal/Kconfig" +source "drivers/trusty/Kconfig" + source "drivers/watchdog/Kconfig" source "drivers/ssb/Kconfig" @@ -212,6 +214,7 @@ source "drivers/tee/Kconfig" source "drivers/mux/Kconfig" source "drivers/opp/Kconfig" +source "drivers/sdw/Kconfig" source "drivers/visorbus/Kconfig" @@ -219,4 +222,9 @@ source "drivers/siox/Kconfig" source "drivers/slimbus/Kconfig" +source "drivers/vbs/Kconfig" + +source "drivers/acrn/Kconfig" + +source "drivers/vhm/Kconfig" endmenu diff --git a/drivers/Makefile b/drivers/Makefile index 578f469f72fbb..6c5071ab26607 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -58,6 +58,8 @@ obj-y += char/ # iommu/ comes before gpu as gpu are using iommu controllers obj-$(CONFIG_IOMMU_SUPPORT) += iommu/ +obj-y += mmc/ + # gpu/ comes after char for AGP vs DRM startup and after iommu obj-y += gpu/ @@ -118,6 +120,7 @@ obj-$(CONFIG_W1) += w1/ obj-y += power/ obj-$(CONFIG_HWMON) += hwmon/ obj-$(CONFIG_THERMAL) += thermal/ +obj-$(CONFIG_TRUSTY) += trusty/ obj-$(CONFIG_WATCHDOG) += watchdog/ obj-$(CONFIG_MD) += md/ obj-$(CONFIG_BT) += bluetooth/ @@ -128,7 +131,6 @@ obj-$(CONFIG_EISA) += eisa/ obj-$(CONFIG_PM_OPP) += opp/ obj-$(CONFIG_CPU_FREQ) += cpufreq/ obj-$(CONFIG_CPU_IDLE) += cpuidle/ -obj-y += mmc/ obj-$(CONFIG_MEMSTICK) += memstick/ obj-$(CONFIG_NEW_LEDS) += leds/ obj-$(CONFIG_INFINIBAND) += infiniband/ @@ -146,6 +148,7 @@ obj-$(CONFIG_OF) += of/ obj-$(CONFIG_SSB) += ssb/ obj-$(CONFIG_BCMA) += bcma/ obj-$(CONFIG_VHOST_RING) += vhost/ +obj-$(CONFIG_VBS) += vbs/ obj-$(CONFIG_VHOST) += vhost/ obj-$(CONFIG_VLYNQ) += vlynq/ obj-$(CONFIG_STAGING) += staging/ @@ -183,6 +186,9 @@ obj-$(CONFIG_FPGA) += fpga/ obj-$(CONFIG_FSI) += fsi/ obj-$(CONFIG_TEE) += tee/ obj-$(CONFIG_MULTIPLEXER) += mux/ +obj-$(CONFIG_ACRN) += acrn/ +obj-$(CONFIG_ACRN_VHM) += vhm/ obj-$(CONFIG_UNISYS_VISORBUS) += visorbus/ obj-$(CONFIG_SIOX) += siox/ obj-$(CONFIG_GNSS) += gnss/ +obj-$(CONFIG_SDW) += sdw/ diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c index cf4fc01611641..e43cb71b69726 100644 --- a/drivers/acpi/acpi_lpit.c +++ b/drivers/acpi/acpi_lpit.c @@ -117,11 +117,17 @@ static void lpit_update_residency(struct lpit_residency_info *info, if (!info->iomem_addr) return; + if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) + return; + /* Silently fail, if cpuidle attribute group is not present */ sysfs_add_file_to_group(&cpu_subsys.dev_root->kobj, &dev_attr_low_power_idle_system_residency_us.attr, "cpuidle"); } else if (info->gaddr.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { + if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) + return; + /* Silently fail, if cpuidle attribute group is not present */ sysfs_add_file_to_group(&cpu_subsys.dev_root->kobj, &dev_attr_low_power_idle_cpu_residency_us.attr, diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index bf64cfa30febf..969bf8d515c02 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -327,9 +327,11 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = { { "INT33FC", }, /* Braswell LPSS devices */ + { "80862286", LPSS_ADDR(lpss_dma_desc) }, { "80862288", LPSS_ADDR(bsw_pwm_dev_desc) }, { "8086228A", LPSS_ADDR(bsw_uart_dev_desc) }, { "8086228E", LPSS_ADDR(bsw_spi_dev_desc) }, + { "808622C0", LPSS_ADDR(lpss_dma_desc) }, { "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) }, /* Broadwell LPSS devices */ diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c index eaa60c94205a8..1f32caa87686e 100644 --- a/drivers/acpi/acpi_platform.c +++ b/drivers/acpi/acpi_platform.c @@ -30,6 +30,7 @@ static const struct acpi_device_id forbidden_id_list[] = { {"PNP0200", 0}, /* AT DMA Controller */ {"ACPI0009", 0}, /* IOxAPIC */ {"ACPI000A", 0}, /* IOAPIC */ + {"SMB0001", 0}, /* ACPI SMBUS virtual device */ {"", 0}, }; diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index 449d86d39965e..fc447410ae4d1 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -643,7 +643,7 @@ static acpi_status __init acpi_processor_ids_walk(acpi_handle handle, status = acpi_get_type(handle, &acpi_type); if (ACPI_FAILURE(status)) - return false; + return status; switch (acpi_type) { case ACPI_TYPE_PROCESSOR: @@ -663,11 +663,12 @@ static acpi_status __init acpi_processor_ids_walk(acpi_handle handle, } processor_validated_ids_update(uid); - return true; + return AE_OK; err: + /* Exit on error, but don't abort the namespace walk */ acpi_handle_info(handle, "Invalid processor object\n"); - return false; + return AE_OK; } diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c index e9fb0bf3c8d25..78f9de260d5f1 100644 --- a/drivers/acpi/acpica/dsopcode.c +++ b/drivers/acpi/acpica/dsopcode.c @@ -417,6 +417,10 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state, ACPI_FORMAT_UINT64(obj_desc->region.address), obj_desc->region.length)); + status = acpi_ut_add_address_range(obj_desc->region.space_id, + obj_desc->region.address, + obj_desc->region.length, node); + /* Now the address and length are valid for this opregion */ obj_desc->region.flags |= AOPOBJ_DATA_VALID; diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c index 34fc2f7476edd..b0789c483b0f4 100644 --- a/drivers/acpi/acpica/psloop.c +++ b/drivers/acpi/acpica/psloop.c @@ -417,6 +417,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state) union acpi_parse_object *op = NULL; /* current op */ struct acpi_parse_state *parser_state; u8 *aml_op_start = NULL; + u8 opcode_length; ACPI_FUNCTION_TRACE_PTR(ps_parse_loop, walk_state); @@ -540,8 +541,19 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state) "Skip parsing opcode %s", acpi_ps_get_opcode_name (walk_state->opcode))); + + /* + * Determine the opcode length before skipping the opcode. + * An opcode can be 1 byte or 2 bytes in length. + */ + opcode_length = 1; + if ((walk_state->opcode & 0xFF00) == + AML_EXTENDED_OPCODE) { + opcode_length = 2; + } walk_state->parser_state.aml = - walk_state->aml + 1; + walk_state->aml + opcode_length; + walk_state->parser_state.aml = acpi_ps_get_next_package_end (&walk_state->parser_state); diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 08f26db2da7e1..e48eebc27b81b 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -700,7 +700,7 @@ static void iort_set_device_domain(struct device *dev, */ static struct irq_domain *iort_get_platform_device_domain(struct device *dev) { - struct acpi_iort_node *node, *msi_parent; + struct acpi_iort_node *node, *msi_parent = NULL; struct fwnode_handle *iort_fwnode; struct acpi_iort_its_group *its; int i; @@ -951,9 +951,10 @@ static int rc_dma_get_range(struct device *dev, u64 *size) { struct acpi_iort_node *node; struct acpi_iort_root_complex *rc; + struct pci_bus *pbus = to_pci_dev(dev)->bus; node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, - iort_match_node_callback, dev); + iort_match_node_callback, &pbus->dev); if (!node || node->revision < 1) return -ENODEV; diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index cb97b6105f528..1cb33c95388ac 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -563,7 +563,7 @@ static int acpi_battery_get_state(struct acpi_battery *battery) if (!acpi_battery_present(battery)) return 0; - if (battery->update_time && + if (cache_time && time_before(jiffies, battery->update_time + msecs_to_jiffies(cache_time))) return 0; diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index b072cfc5f20ee..ea59c01ce8db0 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -391,6 +391,32 @@ static u8 nfit_dsm_revid(unsigned family, unsigned func) return id; } +static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd, + struct nd_cmd_pkg *call_pkg) +{ + if (call_pkg) { + int i; + + if (nfit_mem->family != call_pkg->nd_family) + return -ENOTTY; + + for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) + if (call_pkg->nd_reserved2[i]) + return -EINVAL; + return call_pkg->nd_command; + } + + /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */ + if (nfit_mem->family == NVDIMM_FAMILY_INTEL) + return cmd; + + /* + * Force function number validation to fail since 0 is never + * published as a valid function in dsm_mask. + */ + return 0; +} + int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) { @@ -404,30 +430,23 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned long cmd_mask, dsm_mask; u32 offset, fw_status = 0; acpi_handle handle; - unsigned int func; const guid_t *guid; - int rc, i; + int func, rc, i; if (cmd_rc) *cmd_rc = -EINVAL; - func = cmd; - if (cmd == ND_CMD_CALL) { - call_pkg = buf; - func = call_pkg->nd_command; - - for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) - if (call_pkg->nd_reserved2[i]) - return -EINVAL; - } if (nvdimm) { struct acpi_device *adev = nfit_mem->adev; if (!adev) return -ENOTTY; - if (call_pkg && nfit_mem->family != call_pkg->nd_family) - return -ENOTTY; + if (cmd == ND_CMD_CALL) + call_pkg = buf; + func = cmd_to_func(nfit_mem, cmd, call_pkg); + if (func < 0) + return func; dimm_name = nvdimm_name(nvdimm); cmd_name = nvdimm_cmd_name(cmd); cmd_mask = nvdimm_cmd_mask(nvdimm); @@ -438,6 +457,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, } else { struct acpi_device *adev = to_acpi_dev(acpi_desc); + func = cmd; cmd_name = nvdimm_bus_cmd_name(cmd); cmd_mask = nd_desc->cmd_mask; dsm_mask = cmd_mask; @@ -452,7 +472,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) return -ENOTTY; - if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask)) + /* + * Check for a valid command. For ND_CMD_CALL, we also have to + * make sure that the DSM function is supported. + */ + if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask)) + return -ENOTTY; + else if (!test_bit(cmd, &cmd_mask)) return -ENOTTY; in_obj.type = ACPI_TYPE_PACKAGE; @@ -1303,7 +1329,7 @@ static ssize_t scrub_store(struct device *dev, if (nd_desc) { struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); - rc = acpi_nfit_ars_rescan(acpi_desc, 0); + rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG); } device_unlock(dev); if (rc) @@ -1764,6 +1790,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, return 0; } + /* + * Function 0 is the command interrogation function, don't + * export it to potential userspace use, and enable it to be + * used as an error value in acpi_nfit_ctl(). + */ + dsm_mask &= ~1UL; + guid = to_nfit_uuid(nfit_mem->family); for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) if (acpi_check_dsm(adev_dimm->handle, guid, @@ -2466,7 +2499,8 @@ static int ars_get_cap(struct acpi_nfit_desc *acpi_desc, return cmd_rc; } -static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa) +static int ars_start(struct acpi_nfit_desc *acpi_desc, + struct nfit_spa *nfit_spa, enum nfit_ars_state req_type) { int rc; int cmd_rc; @@ -2477,7 +2511,7 @@ static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa memset(&ars_start, 0, sizeof(ars_start)); ars_start.address = spa->address; ars_start.length = spa->length; - if (test_bit(ARS_SHORT, &nfit_spa->ars_state)) + if (req_type == ARS_REQ_SHORT) ars_start.flags = ND_ARS_RETURN_PREV_DATA; if (nfit_spa_type(spa) == NFIT_SPA_PM) ars_start.type = ND_ARS_PERSISTENT; @@ -2534,6 +2568,15 @@ static void ars_complete(struct acpi_nfit_desc *acpi_desc, struct nd_region *nd_region = nfit_spa->nd_region; struct device *dev; + lockdep_assert_held(&acpi_desc->init_mutex); + /* + * Only advance the ARS state for ARS runs initiated by the + * kernel, ignore ARS results from BIOS initiated runs for scrub + * completion tracking. + */ + if (acpi_desc->scrub_spa != nfit_spa) + return; + if ((ars_status->address >= spa->address && ars_status->address < spa->address + spa->length) || (ars_status->address < spa->address)) { @@ -2553,28 +2596,13 @@ static void ars_complete(struct acpi_nfit_desc *acpi_desc, } else return; - if (test_bit(ARS_DONE, &nfit_spa->ars_state)) - return; - - if (!test_and_clear_bit(ARS_REQ, &nfit_spa->ars_state)) - return; - + acpi_desc->scrub_spa = NULL; if (nd_region) { dev = nd_region_dev(nd_region); nvdimm_region_notify(nd_region, NVDIMM_REVALIDATE_POISON); } else dev = acpi_desc->dev; - - dev_dbg(dev, "ARS: range %d %s complete\n", spa->range_index, - test_bit(ARS_SHORT, &nfit_spa->ars_state) - ? "short" : "long"); - clear_bit(ARS_SHORT, &nfit_spa->ars_state); - if (test_and_clear_bit(ARS_REQ_REDO, &nfit_spa->ars_state)) { - set_bit(ARS_SHORT, &nfit_spa->ars_state); - set_bit(ARS_REQ, &nfit_spa->ars_state); - dev_dbg(dev, "ARS: processing scrub request received while in progress\n"); - } else - set_bit(ARS_DONE, &nfit_spa->ars_state); + dev_dbg(dev, "ARS: range %d complete\n", spa->range_index); } static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc) @@ -2850,51 +2878,60 @@ static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc) return rc; if (ars_status_process_records(acpi_desc)) - return -ENOMEM; + dev_err(acpi_desc->dev, "Failed to process ARS records\n"); - return 0; + return rc; } -static int ars_register(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa, - int *query_rc) +static int ars_register(struct acpi_nfit_desc *acpi_desc, + struct nfit_spa *nfit_spa) { - int rc = *query_rc; + int rc; - if (no_init_ars) + if (no_init_ars || test_bit(ARS_FAILED, &nfit_spa->ars_state)) return acpi_nfit_register_region(acpi_desc, nfit_spa); - set_bit(ARS_REQ, &nfit_spa->ars_state); - set_bit(ARS_SHORT, &nfit_spa->ars_state); + set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state); + set_bit(ARS_REQ_LONG, &nfit_spa->ars_state); - switch (rc) { + switch (acpi_nfit_query_poison(acpi_desc)) { case 0: case -EAGAIN: - rc = ars_start(acpi_desc, nfit_spa); - if (rc == -EBUSY) { - *query_rc = rc; + rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT); + /* shouldn't happen, try again later */ + if (rc == -EBUSY) break; - } else if (rc == 0) { - rc = acpi_nfit_query_poison(acpi_desc); - } else { + if (rc) { set_bit(ARS_FAILED, &nfit_spa->ars_state); break; } - if (rc == -EAGAIN) - clear_bit(ARS_SHORT, &nfit_spa->ars_state); - else if (rc == 0) - ars_complete(acpi_desc, nfit_spa); + clear_bit(ARS_REQ_SHORT, &nfit_spa->ars_state); + rc = acpi_nfit_query_poison(acpi_desc); + if (rc) + break; + acpi_desc->scrub_spa = nfit_spa; + ars_complete(acpi_desc, nfit_spa); + /* + * If ars_complete() says we didn't complete the + * short scrub, we'll try again with a long + * request. + */ + acpi_desc->scrub_spa = NULL; break; case -EBUSY: + case -ENOMEM: case -ENOSPC: + /* + * BIOS was using ARS, wait for it to complete (or + * resources to become available) and then perform our + * own scrubs. + */ break; default: set_bit(ARS_FAILED, &nfit_spa->ars_state); break; } - if (test_and_clear_bit(ARS_DONE, &nfit_spa->ars_state)) - set_bit(ARS_REQ, &nfit_spa->ars_state); - return acpi_nfit_register_region(acpi_desc, nfit_spa); } @@ -2916,6 +2953,8 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc, struct device *dev = acpi_desc->dev; struct nfit_spa *nfit_spa; + lockdep_assert_held(&acpi_desc->init_mutex); + if (acpi_desc->cancel) return 0; @@ -2939,21 +2978,49 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc, ars_complete_all(acpi_desc); list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { + enum nfit_ars_state req_type; + int rc; + if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) continue; - if (test_bit(ARS_REQ, &nfit_spa->ars_state)) { - int rc = ars_start(acpi_desc, nfit_spa); - - clear_bit(ARS_DONE, &nfit_spa->ars_state); - dev = nd_region_dev(nfit_spa->nd_region); - dev_dbg(dev, "ARS: range %d ARS start (%d)\n", - nfit_spa->spa->range_index, rc); - if (rc == 0 || rc == -EBUSY) - return 1; - dev_err(dev, "ARS: range %d ARS failed (%d)\n", - nfit_spa->spa->range_index, rc); - set_bit(ARS_FAILED, &nfit_spa->ars_state); + + /* prefer short ARS requests first */ + if (test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state)) + req_type = ARS_REQ_SHORT; + else if (test_bit(ARS_REQ_LONG, &nfit_spa->ars_state)) + req_type = ARS_REQ_LONG; + else + continue; + rc = ars_start(acpi_desc, nfit_spa, req_type); + + dev = nd_region_dev(nfit_spa->nd_region); + dev_dbg(dev, "ARS: range %d ARS start %s (%d)\n", + nfit_spa->spa->range_index, + req_type == ARS_REQ_SHORT ? "short" : "long", + rc); + /* + * Hmm, we raced someone else starting ARS? Try again in + * a bit. + */ + if (rc == -EBUSY) + return 1; + if (rc == 0) { + dev_WARN_ONCE(dev, acpi_desc->scrub_spa, + "scrub start while range %d active\n", + acpi_desc->scrub_spa->spa->range_index); + clear_bit(req_type, &nfit_spa->ars_state); + acpi_desc->scrub_spa = nfit_spa; + /* + * Consider this spa last for future scrub + * requests + */ + list_move_tail(&nfit_spa->list, &acpi_desc->spas); + return 1; } + + dev_err(dev, "ARS: range %d ARS failed (%d)\n", + nfit_spa->spa->range_index, rc); + set_bit(ARS_FAILED, &nfit_spa->ars_state); } return 0; } @@ -3009,6 +3076,7 @@ static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc, struct nd_cmd_ars_cap ars_cap; int rc; + set_bit(ARS_FAILED, &nfit_spa->ars_state); memset(&ars_cap, 0, sizeof(ars_cap)); rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa); if (rc < 0) @@ -3025,16 +3093,14 @@ static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc, nfit_spa->clear_err_unit = ars_cap.clear_err_unit; acpi_desc->max_ars = max(nfit_spa->max_ars, acpi_desc->max_ars); clear_bit(ARS_FAILED, &nfit_spa->ars_state); - set_bit(ARS_REQ, &nfit_spa->ars_state); } static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) { struct nfit_spa *nfit_spa; - int rc, query_rc; + int rc; list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { - set_bit(ARS_FAILED, &nfit_spa->ars_state); switch (nfit_spa_type(nfit_spa->spa)) { case NFIT_SPA_VOLATILE: case NFIT_SPA_PM: @@ -3043,20 +3109,12 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) } } - /* - * Reap any results that might be pending before starting new - * short requests. - */ - query_rc = acpi_nfit_query_poison(acpi_desc); - if (query_rc == 0) - ars_complete_all(acpi_desc); - list_for_each_entry(nfit_spa, &acpi_desc->spas, list) switch (nfit_spa_type(nfit_spa->spa)) { case NFIT_SPA_VOLATILE: case NFIT_SPA_PM: /* register regions and kick off initial ARS run */ - rc = ars_register(acpi_desc, nfit_spa, &query_rc); + rc = ars_register(acpi_desc, nfit_spa); if (rc) return rc; break; @@ -3251,7 +3309,8 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, return 0; } -int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags) +int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, + enum nfit_ars_state req_type) { struct device *dev = acpi_desc->dev; int scheduled = 0, busy = 0; @@ -3271,14 +3330,10 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags) if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) continue; - if (test_and_set_bit(ARS_REQ, &nfit_spa->ars_state)) { + if (test_and_set_bit(req_type, &nfit_spa->ars_state)) busy++; - set_bit(ARS_REQ_REDO, &nfit_spa->ars_state); - } else { - if (test_bit(ARS_SHORT, &flags)) - set_bit(ARS_SHORT, &nfit_spa->ars_state); + else scheduled++; - } } if (scheduled) { sched_ars(acpi_desc); @@ -3464,10 +3519,11 @@ static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle) static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle) { struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev); - unsigned long flags = (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) ? - 0 : 1 << ARS_SHORT; - acpi_nfit_ars_rescan(acpi_desc, flags); + if (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) + acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG); + else + acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_SHORT); } void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event) diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c index e9626bf6ca296..d6c1b10f6c254 100644 --- a/drivers/acpi/nfit/mce.c +++ b/drivers/acpi/nfit/mce.c @@ -25,8 +25,12 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val, struct acpi_nfit_desc *acpi_desc; struct nfit_spa *nfit_spa; - /* We only care about memory errors */ - if (!mce_is_memory_error(mce)) + /* We only care about uncorrectable memory errors */ + if (!mce_is_memory_error(mce) || mce_is_correctable(mce)) + return NOTIFY_DONE; + + /* Verify the address reported in the MCE is valid. */ + if (!mce_usable_address(mce)) return NOTIFY_DONE; /* diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h index d1274ea2d2516..02c10de50386c 100644 --- a/drivers/acpi/nfit/nfit.h +++ b/drivers/acpi/nfit/nfit.h @@ -118,10 +118,8 @@ enum nfit_dimm_notifiers { }; enum nfit_ars_state { - ARS_REQ, - ARS_REQ_REDO, - ARS_DONE, - ARS_SHORT, + ARS_REQ_SHORT, + ARS_REQ_LONG, ARS_FAILED, }; @@ -198,6 +196,7 @@ struct acpi_nfit_desc { struct device *dev; u8 ars_start_flags; struct nd_cmd_ars_status *ars_status; + struct nfit_spa *scrub_spa; struct delayed_work dwork; struct list_head list; struct kernfs_node *scrub_count_state; @@ -252,7 +251,8 @@ struct nfit_blk { extern struct list_head acpi_descs; extern struct mutex acpi_desc_lock; -int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags); +int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, + enum nfit_ars_state req_type); #ifdef CONFIG_X86_MCE void nfit_mce_register(void); diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 8df9abfa947b0..ed73f6fb0779b 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -617,15 +617,18 @@ void acpi_os_stall(u32 us) } /* - * Support ACPI 3.0 AML Timer operand - * Returns 64-bit free-running, monotonically increasing timer - * with 100ns granularity + * Support ACPI 3.0 AML Timer operand. Returns a 64-bit free-running, + * monotonically increasing timer with 100ns granularity. Do not use + * ktime_get() to implement this function because this function may get + * called after timekeeping has been suspended. Note: calling this function + * after timekeeping has been suspended may lead to unexpected results + * because when timekeeping is suspended the jiffies counter is not + * incremented. See also timekeeping_suspend(). */ u64 acpi_os_get_timer(void) { - u64 time_ns = ktime_to_ns(ktime_get()); - do_div(time_ns, 100); - return time_ns; + return (get_jiffies_64() - INITIAL_JIFFIES) * + (ACPI_100NSEC_PER_SEC / HZ); } acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width) diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c index 316e55174aa97..bb5391f59b8b5 100644 --- a/drivers/acpi/pmic/intel_pmic_xpower.c +++ b/drivers/acpi/pmic/intel_pmic_xpower.c @@ -27,8 +27,11 @@ #define GPI1_LDO_ON (3 << 0) #define GPI1_LDO_OFF (4 << 0) -#define AXP288_ADC_TS_PIN_GPADC 0xf2 -#define AXP288_ADC_TS_PIN_ON 0xf3 +#define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0) +#define AXP288_ADC_TS_CURRENT_OFF (0 << 0) +#define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0) +#define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0) +#define AXP288_ADC_TS_CURRENT_ON (3 << 0) static struct pmic_table power_table[] = { { @@ -211,22 +214,44 @@ static int intel_xpower_pmic_update_power(struct regmap *regmap, int reg, */ static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg) { + int ret, adc_ts_pin_ctrl; u8 buf[2]; - int ret; - ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, - AXP288_ADC_TS_PIN_GPADC); + /* + * The current-source used for the battery temp-sensor (TS) is shared + * with the GPADC. For proper fuel-gauge and charger operation the TS + * current-source needs to be permanently on. But to read the GPADC we + * need to temporary switch the TS current-source to ondemand, so that + * the GPADC can use it, otherwise we will always read an all 0 value. + * + * Note that the switching from on to on-ondemand is not necessary + * when the TS current-source is off (this happens on devices which + * do not use the TS-pin). + */ + ret = regmap_read(regmap, AXP288_ADC_TS_PIN_CTRL, &adc_ts_pin_ctrl); if (ret) return ret; - /* After switching to the GPADC pin give things some time to settle */ - usleep_range(6000, 10000); + if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) { + ret = regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL, + AXP288_ADC_TS_CURRENT_ON_OFF_MASK, + AXP288_ADC_TS_CURRENT_ON_ONDEMAND); + if (ret) + return ret; + + /* Wait a bit after switching the current-source */ + usleep_range(6000, 10000); + } ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2); if (ret == 0) ret = (buf[0] << 4) + ((buf[1] >> 4) & 0x0f); - regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON); + if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) { + regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL, + AXP288_ADC_TS_CURRENT_ON_OFF_MASK, + AXP288_ADC_TS_CURRENT_ON); + } return ret; } diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 1b475bc1ae169..665e93ca0b40f 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c @@ -131,6 +131,23 @@ void acpi_power_resources_list_free(struct list_head *list) } } +static bool acpi_power_resource_is_dup(union acpi_object *package, + unsigned int start, unsigned int i) +{ + acpi_handle rhandle, dup; + unsigned int j; + + /* The caller is expected to check the package element types */ + rhandle = package->package.elements[i].reference.handle; + for (j = start; j < i; j++) { + dup = package->package.elements[j].reference.handle; + if (dup == rhandle) + return true; + } + + return false; +} + int acpi_extract_power_resources(union acpi_object *package, unsigned int start, struct list_head *list) { @@ -150,6 +167,11 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start, err = -ENODEV; break; } + + /* Some ACPI tables contain duplicate power resource references */ + if (acpi_power_resource_is_dup(package, start, i)) + continue; + err = acpi_add_power_resource(rhandle); if (err) break; diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index d1e26cb599bfc..da031b1df6f5c 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -338,9 +338,6 @@ static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *ta return found; } -/* total number of attributes checked by the properties code */ -#define PPTT_CHECKED_ATTRIBUTES 4 - /** * update_cache_properties() - Update cacheinfo for the given processor * @this_leaf: Kernel cache info structure being updated @@ -357,25 +354,15 @@ static void update_cache_properties(struct cacheinfo *this_leaf, struct acpi_pptt_cache *found_cache, struct acpi_pptt_processor *cpu_node) { - int valid_flags = 0; - this_leaf->fw_token = cpu_node; - if (found_cache->flags & ACPI_PPTT_SIZE_PROPERTY_VALID) { + if (found_cache->flags & ACPI_PPTT_SIZE_PROPERTY_VALID) this_leaf->size = found_cache->size; - valid_flags++; - } - if (found_cache->flags & ACPI_PPTT_LINE_SIZE_VALID) { + if (found_cache->flags & ACPI_PPTT_LINE_SIZE_VALID) this_leaf->coherency_line_size = found_cache->line_size; - valid_flags++; - } - if (found_cache->flags & ACPI_PPTT_NUMBER_OF_SETS_VALID) { + if (found_cache->flags & ACPI_PPTT_NUMBER_OF_SETS_VALID) this_leaf->number_of_sets = found_cache->number_of_sets; - valid_flags++; - } - if (found_cache->flags & ACPI_PPTT_ASSOCIATIVITY_VALID) { + if (found_cache->flags & ACPI_PPTT_ASSOCIATIVITY_VALID) this_leaf->ways_of_associativity = found_cache->associativity; - valid_flags++; - } if (found_cache->flags & ACPI_PPTT_WRITE_POLICY_VALID) { switch (found_cache->attributes & ACPI_PPTT_MASK_WRITE_POLICY) { case ACPI_PPTT_CACHE_POLICY_WT: @@ -402,11 +389,17 @@ static void update_cache_properties(struct cacheinfo *this_leaf, } } /* - * If the above flags are valid, and the cache type is NOCACHE - * update the cache type as well. + * If cache type is NOCACHE, then the cache hasn't been specified + * via other mechanisms. Update the type if a cache type has been + * provided. + * + * Note, we assume such caches are unified based on conventional system + * design and known examples. Significant work is required elsewhere to + * fully support data/instruction only type caches which are only + * specified in PPTT. */ if (this_leaf->type == CACHE_TYPE_NOCACHE && - valid_flags == PPTT_CHECKED_ATTRIBUTES) + found_cache->flags & ACPI_PPTT_CACHE_TYPE_VALID) this_leaf->type = CACHE_TYPE_UNIFIED; } diff --git a/drivers/acrn/Kconfig b/drivers/acrn/Kconfig new file mode 100644 index 0000000000000..706aba14ad00b --- /dev/null +++ b/drivers/acrn/Kconfig @@ -0,0 +1,29 @@ +config ACRN_SHARED_BUFFER + bool "Intel ACRN SHARED BUFFER" + depends on ACRN_VHM + ---help--- + Ring buffer shared between ACRN Hypervisor and its SOS. + Help ACRN performance profiling. + +config ACRN_TRACE + tristate "Intel ACRN Hypervisor Trace support" + depends on ACRN_SHARED_BUFFER + ---help--- + This is the Trace driver for the Intel ACRN hypervisor. + You can say y to build it into the kernel, or m to build + it as a module. + +config ACRN_HVLOG + bool "Intel ACRN Hypervisor Logmsg support" + depends on ACRN_SHARED_BUFFER + ---help--- + This is the Trace driver for the Intel ACRN hypervisor log. + You can say y to build it into the kernel. + +config ACRN_HV_NPK_LOG + bool "Intel ACRN Hypervisor NPK Log" + depends on INTEL_TH + depends on ACRN_VHM + ---help--- + The driver is to configure/enable/disable the Intel ACRN hypervisor + NPK log. diff --git a/drivers/acrn/Makefile b/drivers/acrn/Makefile new file mode 100644 index 0000000000000..dad6a9e8c42c1 --- /dev/null +++ b/drivers/acrn/Makefile @@ -0,0 +1,5 @@ +subdir-ccflags-$(CONFIG_ACRN) := -Werror +obj-$(CONFIG_ACRN_SHARED_BUFFER) += sbuf.o +obj-$(CONFIG_ACRN_TRACE) += acrn_trace.o +obj-$(CONFIG_ACRN_HVLOG) += acrn_hvlog.o +obj-$(CONFIG_ACRN_HV_NPK_LOG) += hv_npk_log.o diff --git a/drivers/acrn/acrn_hvlog.c b/drivers/acrn/acrn_hvlog.c new file mode 100644 index 0000000000000..da04aeb2e829f --- /dev/null +++ b/drivers/acrn/acrn_hvlog.c @@ -0,0 +1,400 @@ +/* + * ACRN Hypervisor logmsg + * + * This file is provided under a dual BSD/GPLv2 license.  When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * General Public License for more details. + * + * Contact Information: Li Fei + * + * BSD LICENSE + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Li Fei + * + */ +#define pr_fmt(fmt) "ACRN HVLog: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sbuf.h" + +#define LOG_ENTRY_SIZE 80 +#define DEFAULT_PCPU_NR 4 + +#define foreach_cpu(cpu, cpu_num) \ + for ((cpu) = 0; (cpu) < (cpu_num); (cpu)++) + +#define foreach_hvlog_type(idx, hvlog_type) \ + for ((idx) = 0; (idx) < (hvlog_type); (idx)++) + +enum sbuf_hvlog_index { + SBUF_CUR_HVLOG = 0, + SBUF_LAST_HVLOG, + SBUF_HVLOG_TYPES +}; + +struct acrn_hvlog { + struct miscdevice miscdev; + char name[24]; + shared_buf_t *sbuf; + atomic_t open_cnt; + int pcpu_num; +}; + +static struct acrn_hvlog *acrn_hvlog_devs[SBUF_HVLOG_TYPES]; +static uint16_t pcpu_nr = DEFAULT_PCPU_NR; +static unsigned long long hvlog_buf_size; +static unsigned long long hvlog_buf_base; + +static int __init early_hvlog(char *p) +{ + int ret; + + pr_debug("%s(%s)\n", __func__, p); + hvlog_buf_size = memparse(p, &p); + if (*p != '@') + return 0; + hvlog_buf_base = memparse(p + 1, &p); + + if (!!hvlog_buf_base && !!hvlog_buf_size) { + ret = memblock_reserve(hvlog_buf_base, hvlog_buf_size); + if (ret) { + pr_err("%s: Error reserving hvlog memblock\n", + __func__); + hvlog_buf_base = 0; + hvlog_buf_size = 0; + return ret; + } + } + + return 0; +} +early_param("hvlog", early_hvlog); + + +static inline shared_buf_t *hvlog_mark_unread(shared_buf_t *sbuf) +{ + /* sbuf must point to valid data. + * clear the lowest bit in the magic to indicate that the sbuf point + * to the last boot valid data. We will read all of valid data in the + * sbuf later from 0 offset to sbuf->tail. + */ + if (sbuf != NULL) { + sbuf->magic &= ~1; + sbuf->head = 0; + } + + return sbuf; +} + +static int acrn_hvlog_open(struct inode *inode, struct file *filp) +{ + struct acrn_hvlog *acrn_hvlog; + + acrn_hvlog = container_of(filp->private_data, + struct acrn_hvlog, miscdev); + pr_debug("%s, %s\n", __func__, acrn_hvlog->miscdev.name); + + if (acrn_hvlog->pcpu_num >= pcpu_nr) { + pr_err("%s, invalid pcpu_num: %d\n", + __func__, acrn_hvlog->pcpu_num); + return -EIO; + } + + /* More than one reader at the same time could get data messed up */ + if (atomic_cmpxchg(&acrn_hvlog->open_cnt, 0, 1) != 0) + return -EBUSY; + + filp->private_data = acrn_hvlog; + + return 0; +} + +static int acrn_hvlog_release(struct inode *inode, struct file *filp) +{ + struct acrn_hvlog *acrn_hvlog; + + acrn_hvlog = filp->private_data; + + pr_debug("%s, %s\n", __func__, acrn_hvlog->miscdev.name); + + if (acrn_hvlog->pcpu_num >= pcpu_nr) { + pr_err("%s, invalid pcpu_num: %d\n", + __func__, acrn_hvlog->pcpu_num); + return -EIO; + } + + atomic_dec(&acrn_hvlog->open_cnt); + filp->private_data = NULL; + + return 0; +} + +static ssize_t acrn_hvlog_read(struct file *filp, char __user *buf, + size_t count, loff_t *offset) +{ + char data[LOG_ENTRY_SIZE]; + struct acrn_hvlog *acrn_hvlog; + int ret; + + acrn_hvlog = (struct acrn_hvlog *)filp->private_data; + + pr_debug("%s, %s\n", __func__, acrn_hvlog->miscdev.name); + + if (acrn_hvlog->pcpu_num >= pcpu_nr) { + pr_err("%s, invalid pcpu_num: %d\n", + __func__, acrn_hvlog->pcpu_num); + return -EIO; + } + + if (acrn_hvlog->sbuf != NULL) { + ret = sbuf_get(acrn_hvlog->sbuf, (uint8_t *)&data); + if (ret > 0) { + if (copy_to_user(buf, &data, ret)) + return -EFAULT; + } + + return ret; + } + + return 0; +} + +static const struct file_operations acrn_hvlog_fops = { + .owner = THIS_MODULE, + .open = acrn_hvlog_open, + .release = acrn_hvlog_release, + .read = acrn_hvlog_read, +}; + +/** + * base0 = hvlog_buf_base; + * base1 = hvlog_buf_base + (hvlog_buf_size >> 1) + * if there is valid data in base0, cur_logbuf = base1, last_logbuf = base0. + * if there is valid data in base1, cur_logbuf = base0, last_logbuf = base1. + * if there is no valid data both in base0 and base1, cur_logbuf = base0, + * last_logbuf = 0. + */ +static void assign_hvlog_buf_base(uint64_t *cur_logbuf, uint64_t *last_logbuf) +{ + uint64_t base0, base1; + uint32_t ele_num, size; + uint16_t pcpu_id; + + base0 = hvlog_buf_base; + base1 = hvlog_buf_base + (hvlog_buf_size >> 1); + size = (hvlog_buf_size >> 1) / pcpu_nr; + ele_num = (size - SBUF_HEAD_SIZE) / LOG_ENTRY_SIZE; + + foreach_cpu(pcpu_id, pcpu_nr) { + if (sbuf_check_valid(ele_num, LOG_ENTRY_SIZE, + base0 + (size * pcpu_id))) { + *last_logbuf = base0; + *cur_logbuf = base1; + return; + } + } + + foreach_cpu(pcpu_id, pcpu_nr) { + if (sbuf_check_valid(ele_num, LOG_ENTRY_SIZE, + base1 + (size * pcpu_id))) { + *last_logbuf = base1; + *cur_logbuf = base0; + return; + } + } + + /* No last logbuf found */ + *last_logbuf = 0; + *cur_logbuf = base0; +} + +static int init_hvlog_dev(uint64_t base, uint32_t hvlog_type) +{ + int err = 0; + uint16_t idx, i; + shared_buf_t *sbuf; + struct acrn_hvlog *hvlog; + uint32_t ele_size, ele_num, size; + + if (!base) + return -ENODEV; + + size = (hvlog_buf_size >> 1) / pcpu_nr; + ele_size = LOG_ENTRY_SIZE; + ele_num = (size - SBUF_HEAD_SIZE) / ele_size; + + foreach_cpu(idx, pcpu_nr) { + hvlog = &acrn_hvlog_devs[hvlog_type][idx]; + + switch (hvlog_type) { + case SBUF_CUR_HVLOG: + snprintf(hvlog->name, sizeof(hvlog->name), + "acrn_hvlog_cur_%hu", idx); + sbuf = sbuf_construct(ele_num, ele_size, + base + (size * idx)); + sbuf_share_setup(idx, ACRN_HVLOG, sbuf); + break; + case SBUF_LAST_HVLOG: + snprintf(hvlog->name, sizeof(hvlog->name), + "acrn_hvlog_last_%hu", idx); + sbuf = sbuf_check_valid(ele_num, ele_size, + base + (size * idx)); + hvlog_mark_unread(sbuf); + break; + default: + return -EINVAL; + } + + hvlog->miscdev.name = hvlog->name; + hvlog->miscdev.minor = MISC_DYNAMIC_MINOR; + hvlog->miscdev.fops = &acrn_hvlog_fops; + hvlog->pcpu_num = idx; + hvlog->sbuf = sbuf; + + err = misc_register(&(hvlog->miscdev)); + if (err < 0) { + pr_err("Failed to register %s, errno %d\n", + hvlog->name, err); + goto err_reg; + } + } + + return 0; + +err_reg: + for (i = --idx; i >= 0; i--) + misc_deregister(&acrn_hvlog_devs[hvlog_type][i].miscdev); + + return err; +} + +static void deinit_hvlog_dev(uint32_t hvlog_type) +{ + uint16_t idx; + struct acrn_hvlog *hvlog; + + foreach_cpu(idx, pcpu_nr) { + hvlog = &acrn_hvlog_devs[hvlog_type][idx]; + switch (hvlog_type) { + case SBUF_CUR_HVLOG: + sbuf_share_setup(idx, ACRN_HVLOG, 0); + sbuf_deconstruct(hvlog->sbuf); + break; + case SBUF_LAST_HVLOG: + break; + default: + break; + } + + misc_deregister(&(hvlog->miscdev)); + } + + kfree(acrn_hvlog_devs[hvlog_type]); +} + +static int __init acrn_hvlog_init(void) +{ + int idx, ret = 0; + struct acrn_hw_info hw_info; + uint64_t cur_logbuf, last_logbuf; + + if (!hvlog_buf_base || !hvlog_buf_size) { + pr_warn("no fixed memory reserve for hvlog.\n"); + return 0; + } + + memset(&hw_info, 0, sizeof(struct acrn_hw_info)); + ret = hcall_get_hw_info(virt_to_phys(&hw_info)); + if (!ret) + pcpu_nr = hw_info.cpu_num; + + foreach_hvlog_type(idx, SBUF_HVLOG_TYPES) { + acrn_hvlog_devs[idx] = kcalloc(pcpu_nr, + sizeof(struct acrn_hvlog), GFP_KERNEL); + if (!acrn_hvlog_devs[idx]) + return -ENOMEM; + } + + assign_hvlog_buf_base(&cur_logbuf, &last_logbuf); + ret = init_hvlog_dev(cur_logbuf, SBUF_CUR_HVLOG); + if (ret) { + pr_err("Failed to init cur hvlog devs, errno %d\n", ret); + return ret; + } + + /* If error happens for last hvlog devs setup, just print out an warn */ + ret = init_hvlog_dev(last_logbuf, SBUF_LAST_HVLOG); + if (ret) + pr_warn("Failed to init last hvlog devs, errno %d\n", ret); + + pr_info("Initialized hvlog module with %u cpu\n", pcpu_nr); + return 0; +} + +static void __exit acrn_hvlog_exit(void) +{ + int i; + + foreach_hvlog_type(i, SBUF_HVLOG_TYPES) + deinit_hvlog_dev(i); + + pr_info("Exit hvlog module\n"); +} + +module_init(acrn_hvlog_init); +module_exit(acrn_hvlog_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Intel Corp., http://www.intel.com"); +MODULE_DESCRIPTION("Driver for the Intel ACRN Hypervisor Logmsg"); +MODULE_VERSION("0.1"); diff --git a/drivers/acrn/acrn_trace.c b/drivers/acrn/acrn_trace.c new file mode 100644 index 0000000000000..010acb9872c42 --- /dev/null +++ b/drivers/acrn/acrn_trace.c @@ -0,0 +1,286 @@ +/* +* +* ACRN Trace module +* +* This file is provided under a dual BSD/GPLv2 license.  When using or +* redistributing this file, you may do so under either license. +* +* GPL LICENSE SUMMARY +* +* Copyright (c) 2017 Intel Corporation. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of version 2 of the GNU General Public License as +* published by the Free Software Foundation. +* +* This program is distributed in the hope that it will be useful, but +* WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU +* General Public License for more details. +* +* Contact Information: Yan, Like +* +* BSD LICENSE +* +* Copyright (c) 2017 Intel Corporation. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +*   * Redistributions of source code must retain the above copyright +*     notice, this list of conditions and the following disclaimer. +*   * Redistributions in binary form must reproduce the above copyright +*     notice, this list of conditions and the following disclaimer in +*     the documentation and/or other materials provided with the +*     distribution. +*   * Neither the name of Intel Corporation nor the names of its +*     contributors may be used to endorse or promote products derived +*     from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +* Like Yan +* +*/ + +#define pr_fmt(fmt) "ACRNTrace: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "sbuf.h" + + +#define TRACE_SBUF_SIZE (4 * 1024 * 1024) +#define TRACE_ELEMENT_SIZE 32 /* byte */ +#define TRACE_ELEMENT_NUM ((TRACE_SBUF_SIZE - SBUF_HEAD_SIZE) / \ + TRACE_ELEMENT_SIZE) + +#define foreach_cpu(cpu, cpu_num) \ + for ((cpu) = 0; (cpu) < (cpu_num); (cpu)++) + +#define DEFAULT_NR_CPUS 4 +/* actual physical cpu number, initialized by module init */ +static int pcpu_num = DEFAULT_NR_CPUS; + +struct acrn_trace { + struct miscdevice miscdev; + char name[24]; + shared_buf_t *sbuf; + atomic_t open_cnt; + uint16_t pcpu_id; +}; + +static struct acrn_trace *acrn_trace_devs; + +/************************************************************************ + * + * file_operations functions + * + ***********************************************************************/ +static int acrn_trace_open(struct inode *inode, struct file *filep) +{ + struct acrn_trace *dev; + + dev = container_of(filep->private_data, struct acrn_trace, miscdev); + if (!dev) { + pr_err("No such dev\n"); + return -ENODEV; + } + pr_debug("%s, cpu %d\n", __func__, dev->pcpu_id); + + /* More than one reader at the same time could get data messed up */ + if (atomic_read(&dev->open_cnt)) + return -EBUSY; + + atomic_inc(&dev->open_cnt); + + return 0; +} + +static int acrn_trace_release(struct inode *inode, struct file *filep) +{ + struct acrn_trace *dev; + + dev = container_of(filep->private_data, struct acrn_trace, miscdev); + if (!dev) { + pr_err("No such dev\n"); + return -ENODEV; + } + + pr_debug("%s, cpu %d\n", __func__, dev->pcpu_id); + + atomic_dec(&dev->open_cnt); + + return 0; +} + +static int acrn_trace_mmap(struct file *filep, struct vm_area_struct *vma) +{ + phys_addr_t paddr; + struct acrn_trace *dev; + + dev = container_of(filep->private_data, struct acrn_trace, miscdev); + if (!dev) { + pr_err("No such dev\n"); + return -ENODEV; + } + + pr_debug("%s, cpu %d\n", __func__, dev->pcpu_id); + + WARN_ON(!virt_addr_valid(dev->sbuf)); + paddr = virt_to_phys(dev->sbuf); + + if (remap_pfn_range(vma, vma->vm_start, + paddr >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) { + pr_err("Failed to mmap sbuf for cpu%d\n", dev->pcpu_id); + return -EAGAIN; + } + + return 0; +} + +static const struct file_operations acrn_trace_fops = { + .owner = THIS_MODULE, + .open = acrn_trace_open, + .release = acrn_trace_release, + .mmap = acrn_trace_mmap, +}; + +/* + * acrn_trace_init() + */ +static int __init acrn_trace_init(void) +{ + int ret = 0; + int i, cpu; + shared_buf_t *sbuf; + struct miscdevice *miscdev; + struct acrn_hw_info hw_info; + + if (x86_hyper_type != X86_HYPER_ACRN) { + pr_err("acrn_trace: not support acrn hypervisor!\n"); + return -EINVAL; + } + + memset(&hw_info, 0, sizeof(struct acrn_hw_info)); + ret = hcall_get_hw_info(virt_to_phys(&hw_info)); + if (!ret) + pcpu_num = hw_info.cpu_num; + + acrn_trace_devs = kcalloc(pcpu_num, sizeof(struct acrn_trace), + GFP_KERNEL); + if (!acrn_trace_devs) + return -ENOMEM; + + foreach_cpu(cpu, pcpu_num) { + /* allocate shared_buf */ + sbuf = sbuf_allocate(TRACE_ELEMENT_NUM, TRACE_ELEMENT_SIZE); + if (!sbuf) { + ret = -ENOMEM; + goto out_free; + } + acrn_trace_devs[cpu].sbuf = sbuf; + } + + foreach_cpu(cpu, pcpu_num) { + sbuf = acrn_trace_devs[cpu].sbuf; + ret = sbuf_share_setup(cpu, ACRN_TRACE, sbuf); + if (ret < 0) { + pr_err("Failed to setup SBuf, cpuid %d\n", cpu); + goto out_sbuf; + } + } + + foreach_cpu(cpu, pcpu_num) { + acrn_trace_devs[cpu].pcpu_id = cpu; + + miscdev = &acrn_trace_devs[cpu].miscdev; + snprintf(acrn_trace_devs[cpu].name, + sizeof(acrn_trace_devs[cpu].name), + "acrn_trace_%d", cpu); + miscdev->name = acrn_trace_devs[cpu].name; + miscdev->minor = MISC_DYNAMIC_MINOR; + miscdev->fops = &acrn_trace_fops; + + ret = misc_register(&acrn_trace_devs[cpu].miscdev); + if (ret < 0) { + pr_err("Failed to register acrn_trace_%d, errno %d\n", + cpu, ret); + goto out_dereg; + } + } + + pr_info("Initialized acrn trace module with %u cpu\n", pcpu_num); + return ret; + +out_dereg: + for (i = --cpu; i >= 0; i--) + misc_deregister(&acrn_trace_devs[i].miscdev); + cpu = pcpu_num; + +out_sbuf: + for (i = --cpu; i >= 0; i--) + sbuf_share_setup(i, ACRN_TRACE, NULL); + cpu = pcpu_num; + +out_free: + for (i = --cpu; i >= 0; i--) + sbuf_free(acrn_trace_devs[i].sbuf); + kfree(acrn_trace_devs); + + return ret; +} + +/* + * acrn_trace_exit() + */ +static void __exit acrn_trace_exit(void) +{ + int cpu; + + pr_info("%s, cpu_num %d\n", __func__, pcpu_num); + + foreach_cpu(cpu, pcpu_num) { + /* deregister devices */ + misc_deregister(&acrn_trace_devs[cpu].miscdev); + + /* set sbuf pointer to NULL in HV */ + sbuf_share_setup(cpu, ACRN_TRACE, NULL); + + /* free sbuf, per-cpu sbuf should be set NULL */ + sbuf_free(acrn_trace_devs[cpu].sbuf); + } + + kfree(acrn_trace_devs); +} + +module_init(acrn_trace_init); +module_exit(acrn_trace_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Intel Corp., http://www.intel.com"); +MODULE_DESCRIPTION("Driver for the Intel ACRN Hypervisor Trace"); +MODULE_VERSION("0.1"); diff --git a/drivers/acrn/hv_npk_log.c b/drivers/acrn/hv_npk_log.c new file mode 100644 index 0000000000000..2303b9a72a3af --- /dev/null +++ b/drivers/acrn/hv_npk_log.c @@ -0,0 +1,384 @@ +/* + * ACRN Hypervisor NPK Log + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2018 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright (C) 2018 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* + * The hv_npk_log driver is to enable/disable/configure the ACRN hypervisor + * NPK log. It communicates with the hypervisor via the HC_SETUP_HV_NPK_LOG + * hypercall, and exposes the interface to usperspace via sysfs. + * With this driver, the user could: + * 1. Configure the Master/Channel used for the hypervisor NPK log. + * 2. Configure the log level of the hypervisor NPK log. + * 3. Enable/Disable the hypervisor NPK log. + * + * +-------------------------------------+ + * | Interfaces exposed by the driver | + * SOS | U: and used by the applications | + * | +----------------^----------------+ | + * | | | + * | K: +---------v---------+ | + * | | hv_npk_log driver | | + * | +---------^---------+ | + * +------------------|------------------+ + * | HC_SETUP_HV_NPK_LOG + * +------------------|------------------+ + * HV | +-------v-------+ | + * | | npk_log | | + * | +-------+-------+ | + * +------------------|------------------+ + * | + * +-------------+----v----+-------------+ + * NPK MMIO | | /////// | | + * | | /////// | | + * +-------------+----+----+-------------+ + * | + * +---+ The Master/Channel reserved + * for the hypervisor NPK logs + */ + +#define pr_fmt(fmt) "ACRN HV_NPK_LOG: " fmt + +#include +#include +#include +#include +#include +#include +#include "hv_npk_log.h" + +#define HV_NPK_LOG_USAGE \ + "echo \"E[nable] [#M #C] [#L]\" to enable the ACRN HV NPK Log\n" \ + "echo \"D[isable]\" to disable the ACRN HV NPK Log\n" \ + "echo \"C[onfig] [#M #C] [#L]\" to configure the ACRN HV NPK Log\n" + +static struct hv_npk_log_conf *hnl_conf; + +/* Try to get the master/channel based on the given address */ +static int addr2mc(phys_addr_t addr, int *master, int *channel) +{ + phys_addr_t offset, base; + unsigned int start, end; + int c, m; + + if (!hnl_conf || !master || !channel || !hnl_conf->nchan) + return -EINVAL; + + /* check if the addr belongs to SW_BAR or FW_BAR */ + if (addr >= hnl_conf->stmr_base && addr < hnl_conf->stmr_end) { + base = hnl_conf->stmr_base; + start = hnl_conf->sw_start; + end = hnl_conf->sw_end; + } else if (addr >= hnl_conf->ftmr_base && addr < hnl_conf->ftmr_end) { + base = hnl_conf->ftmr_base; + start = hnl_conf->fw_start; + end = hnl_conf->fw_end; + } else { + return -EINVAL; + } + + offset = addr - base; + if (offset % NPK_CHAN_SIZE) + return -EINVAL; + + c = offset / NPK_CHAN_SIZE; + m = c / hnl_conf->nchan; + c = c % hnl_conf->nchan; + if (start + m > end) + return -EINVAL; + + *channel = c; + *master = m + start; + return 0; +} + +/* Try to get the MMIO address based on the given master/channel */ +static int mc2addr(phys_addr_t *addr, unsigned int master, unsigned int channel) +{ + phys_addr_t base; + unsigned int start; + + if (!hnl_conf || !addr || !hnl_conf->nchan + || channel >= hnl_conf->nchan) + return -EINVAL; + + /* check if the master belongs to SW_BAR or FW_BAR */ + if (master >= hnl_conf->sw_start && master <= hnl_conf->sw_end) { + base = hnl_conf->stmr_base; + start = hnl_conf->sw_start; + } else if (master >= hnl_conf->fw_start && master <= hnl_conf->fw_end) { + base = hnl_conf->ftmr_base; + start = hnl_conf->fw_start; + } else { + return -EINVAL; + } + + *addr = base + ((master - start) * hnl_conf->nchan + channel) + * NPK_CHAN_SIZE; + return 0; +} + +static int npk_dev_match(struct device *dev, void *data) +{ + return 1; +} + +/* Check if the NPK device/driver exists, and get info from them */ +static int load_npk_conf(void) +{ + u32 reg; + int err; + void __iomem *base; + struct device *dev; + struct pci_dev *pdev; + struct device_driver *drv; + + /* check if the NPK device and driver exists */ + drv = driver_find(NPK_DRV_NAME, &pci_bus_type); + if (!drv) { + pr_err("Cannot find the %s driver\n", NPK_DRV_NAME); + return -ENODEV; + } + + dev = driver_find_device(drv, NULL, NULL, npk_dev_match); + if (!dev) { + pr_err("Cannot find the NPK device\n"); + return -ENODEV; + } + + hnl_conf = kzalloc(sizeof(struct hv_npk_log_conf), GFP_KERNEL); + if (!hnl_conf) + return -ENOMEM; + + /* get the base address of FW_BAR */ + pdev = to_pci_dev(dev); + err = pci_read_config_dword(pdev, PCI_REG_FW_LBAR, ®); + if (err) + return err; + hnl_conf->ftmr_base = reg & 0xfffc0000U; + err = pci_read_config_dword(pdev, PCI_REG_FW_UBAR, ®); + if (err) + return err; + hnl_conf->ftmr_base |= ((phys_addr_t)reg << 32); + + /* read out some configurations of NPK */ + base = devm_ioremap(dev, pdev->resource[TH_MMIO_CONFIG].start, + resource_size(&(pdev->resource[TH_MMIO_CONFIG]))); + if (!base) { + pr_err("Cannot map the NPK configuration address\n"); + goto error; + } + + reg = ioread32(base + REG_STH_STHCAP0); + hnl_conf->sw_start = reg & 0xffffU; + hnl_conf->sw_end = reg >> 16; + reg = ioread32(base + REG_STH_STHCAP1); + hnl_conf->nchan = reg & 0xffU; + hnl_conf->fw_end = reg >> 24; + reg = ioread32(base + REG_STH_STHCAP2); + hnl_conf->fw_start = reg & 0xffffU; + devm_iounmap(dev, base); + + hnl_conf->status = HV_NPK_LOG_UNKNOWN; + hnl_conf->master = HV_NPK_LOG_UNKNOWN; + hnl_conf->channel = HV_NPK_LOG_UNKNOWN; + hnl_conf->loglevel = HV_NPK_LOG_UNKNOWN; + hnl_conf->stmr_base = pdev->resource[TH_MMIO_SW].start; + + if (hnl_conf->sw_end < hnl_conf->sw_start + || hnl_conf->fw_end < hnl_conf->fw_start + || hnl_conf->nchan == 0) + goto error; + + hnl_conf->stmr_end = hnl_conf->stmr_base + (hnl_conf->sw_end - + hnl_conf->sw_start) * hnl_conf->nchan * NPK_CHAN_SIZE; + hnl_conf->ftmr_end = hnl_conf->ftmr_base + (hnl_conf->fw_end - + hnl_conf->fw_start) * hnl_conf->nchan * NPK_CHAN_SIZE; + + return 0; + +error: + kfree(hnl_conf); + hnl_conf = NULL; + return -EINVAL; +} + +/* User interface to set the configuration */ +static int hv_npk_log_conf_set(const char *val, const struct kernel_param *kp) +{ + char **argv; + int i, argc, ret = -EINVAL; + struct hv_npk_log_param cmd; + unsigned int args[HV_NPK_LOG_MAX_PARAM]; + + if (!hnl_conf && load_npk_conf() < 0) + return -EINVAL; + + argv = argv_split(GFP_KERNEL, val, &argc); + if (!argv) + return -ENOMEM; + if (!argc || argc > HV_NPK_LOG_MAX_PARAM) + goto out; + + for (i = 1; i < argc; i++) + if (kstrtouint(argv[i], 10, &args[i]) < 0) + goto out; + + memset(&cmd, 0, sizeof(struct hv_npk_log_param)); + cmd.loglevel = 0xffffU; + cmd.cmd = HV_NPK_LOG_CMD_INVALID; + switch (tolower(argv[0][0])) { + case 'e': /* enable */ + case 'c': /* configure */ + if (!strncasecmp(argv[0], "enable", strlen(argv[0]))) { + cmd.cmd = HV_NPK_LOG_CMD_ENABLE; + } else if (!strncasecmp(argv[0], "configure", strlen(argv[0])) + && argc != 1) { + cmd.cmd = HV_NPK_LOG_CMD_CONF; + } else + break; + + if (argc <= 2) { + cmd.loglevel = argc == 2 ? args[1] : 0xffffU; + if (hnl_conf->master == HV_NPK_LOG_UNKNOWN) + mc2addr(&cmd.mmio_addr, HV_NPK_LOG_DFT_MASTER, + HV_NPK_LOG_DFT_CHANNEL); + } else if (argc > 2 && !mc2addr(&cmd.mmio_addr, + args[1], args[2])) { + cmd.loglevel = argc == 4 ? args[3] : 0xffffU; + } + break; + case 'd': /* disable */ + if (!strncasecmp(argv[0], "disable", strlen(argv[0])) + && argc == 1) + cmd.cmd = HV_NPK_LOG_CMD_DISABLE; + break; + default: + pr_err("Unsupported command : %s\n", argv[0]); + break; + } + + if (cmd.cmd != HV_NPK_LOG_CMD_INVALID) { + ret = hcall_setup_hv_npk_log(virt_to_phys(&cmd)); + ret = (ret < 0 || cmd.res == HV_NPK_LOG_RES_KO) ? -EINVAL : 0; + } + +out: + argv_free(argv); + if (ret < 0) + pr_err("Unsupported configuration : %s\n", val); + return ret; +} + +/* User interface to query the configuration */ +static int hv_npk_log_conf_get(char *buffer, const struct kernel_param *kp) +{ + long ret; + struct hv_npk_log_param query; + + if (!hnl_conf && load_npk_conf() < 0) + return sprintf(buffer, "%s\n", + "Failed to init the configuration."); + + memset(&query, 0, sizeof(struct hv_npk_log_param)); + query.cmd = HV_NPK_LOG_CMD_QUERY; + ret = hcall_setup_hv_npk_log(virt_to_phys(&query)); + if (ret < 0 || query.res == HV_NPK_LOG_RES_KO) + return sprintf(buffer, "%s\n", "Failed to invoke the hcall."); + + if (!addr2mc(query.mmio_addr, &hnl_conf->master, &hnl_conf->channel)) { + hnl_conf->status = query.res == HV_NPK_LOG_RES_ENABLED ? + HV_NPK_LOG_ENABLED : HV_NPK_LOG_DISABLED; + } else { + hnl_conf->status = HV_NPK_LOG_UNKNOWN; + hnl_conf->master = HV_NPK_LOG_UNKNOWN; + hnl_conf->channel = HV_NPK_LOG_UNKNOWN; + } + hnl_conf->loglevel = query.loglevel; + + return scnprintf(buffer, PAGE_SIZE, "Master(SW:%d~%d FW:%d~%d):%d " + "Channel(0~%d):%d Status:%d Log Level: %d\n%s\n", + hnl_conf->sw_start, hnl_conf->sw_end, + hnl_conf->fw_start, hnl_conf->fw_end, + hnl_conf->master, hnl_conf->nchan - 1, + hnl_conf->channel, hnl_conf->status, + hnl_conf->loglevel, HV_NPK_LOG_USAGE); +} + +/* /sys/module/hv_npk_log/parameters/hv_npk_log_conf */ +static struct kernel_param_ops hv_npk_log_conf_param_ops = { + .set = hv_npk_log_conf_set, + .get = hv_npk_log_conf_get, +}; +module_param_cb(hv_npk_log_conf, &hv_npk_log_conf_param_ops, NULL, 0644); + +static struct miscdevice hv_npk_log_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "hv_npk_log", +}; + +static int __init hv_npk_log_init(void) +{ + return misc_register(&hv_npk_log_misc); +} + +static void __exit hv_npk_log_exit(void) +{ + kfree(hnl_conf); + + misc_deregister(&hv_npk_log_misc); +} + +module_init(hv_npk_log_init); +module_exit(hv_npk_log_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Intel Corp., http://www.intel.com"); +MODULE_DESCRIPTION("Driver for the Intel ACRN Hypervisor NPK Log"); +MODULE_VERSION("0.1"); diff --git a/drivers/acrn/hv_npk_log.h b/drivers/acrn/hv_npk_log.h new file mode 100644 index 0000000000000..68bd682865229 --- /dev/null +++ b/drivers/acrn/hv_npk_log.h @@ -0,0 +1,109 @@ +/* + * ACRN Hypervisor NPK Log + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2018 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright (C) 2018 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _HV_NPK_LOG_H_ +#define _HV_NPK_LOG_H_ + +#define NPK_DRV_NAME "intel_th_pci" + +#define NPK_CHAN_SIZE 64 +#define TH_MMIO_CONFIG 0 +#define TH_MMIO_SW 2 + +#define PCI_REG_FW_LBAR 0x70 +#define PCI_REG_FW_UBAR 0x74 + +#define REG_STH_STHCAP0 0x4000 +#define REG_STH_STHCAP1 0x4004 +#define REG_STH_STHCAP2 0x407C + +#define HV_NPK_LOG_ENABLED 1 +#define HV_NPK_LOG_DISABLED 0 +#define HV_NPK_LOG_UNKNOWN (-1) +#define HV_NPK_LOG_MAX_PARAM 4 + +#define HV_NPK_LOG_DFT_MASTER 74 +#define HV_NPK_LOG_DFT_CHANNEL 0 + +enum { + HV_NPK_LOG_CMD_INVALID, + HV_NPK_LOG_CMD_CONF, + HV_NPK_LOG_CMD_ENABLE, + HV_NPK_LOG_CMD_DISABLE, + HV_NPK_LOG_CMD_QUERY, +}; + +enum { + HV_NPK_LOG_RES_INVALID, + HV_NPK_LOG_RES_OK, + HV_NPK_LOG_RES_KO, + HV_NPK_LOG_RES_ENABLED, + HV_NPK_LOG_RES_DISABLED, +}; + +struct hv_npk_log_conf { + int status; + int master; + int channel; + int loglevel; + unsigned int fw_start; + unsigned int fw_end; + unsigned int sw_start; + unsigned int sw_end; + unsigned int nchan; + phys_addr_t stmr_base; + phys_addr_t stmr_end; + phys_addr_t ftmr_base; + phys_addr_t ftmr_end; + phys_addr_t ch_addr; +}; + +#endif /* _HV_NPK_LOG_H_ */ diff --git a/drivers/acrn/sbuf.c b/drivers/acrn/sbuf.c new file mode 100644 index 0000000000000..b51ee04e12fa6 --- /dev/null +++ b/drivers/acrn/sbuf.c @@ -0,0 +1,241 @@ +/* + * shared buffer + * + * This file is provided under a dual BSD/GPLv2 license.  When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * General Public License for more details. + * + * Contact Information: Li Fei + * + * BSD LICENSE + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Li Fei + * + */ + +#define pr_fmt(fmt) "SBuf: " fmt + +#include +#include +#include +#include +#include +#include "sbuf.h" + +static inline bool sbuf_is_empty(shared_buf_t *sbuf) +{ + return (sbuf->head == sbuf->tail); +} + +static inline uint32_t sbuf_next_ptr(uint32_t pos, + uint32_t span, uint32_t scope) +{ + pos += span; + pos = (pos >= scope) ? (pos - scope) : pos; + return pos; +} + +static inline uint32_t sbuf_calculate_allocate_size(uint32_t ele_num, + uint32_t ele_size) +{ + uint64_t sbuf_allocate_size; + + sbuf_allocate_size = ele_num * ele_size; + sbuf_allocate_size += SBUF_HEAD_SIZE; + if (sbuf_allocate_size > SBUF_MAX_SIZE) { + pr_err("num=0x%x, size=0x%x exceed 0x%llx!\n", + ele_num, ele_size, SBUF_MAX_SIZE); + return 0; + } + + /* align to PAGE_SIZE */ + return (sbuf_allocate_size + PAGE_SIZE - 1) & PAGE_MASK; +} + +shared_buf_t *sbuf_allocate(uint32_t ele_num, uint32_t ele_size) +{ + shared_buf_t *sbuf; + struct page *page; + uint32_t sbuf_allocate_size; + + if (!ele_num || !ele_size) { + pr_err("invalid parameter %s!\n", __func__); + return NULL; + } + + sbuf_allocate_size = sbuf_calculate_allocate_size(ele_num, ele_size); + if (!sbuf_allocate_size) + return NULL; + + page = alloc_pages(GFP_KERNEL | __GFP_ZERO, + get_order(sbuf_allocate_size)); + if (page == NULL) { + pr_err("failed to alloc pages!\n"); + return NULL; + } + + sbuf = phys_to_virt(page_to_phys(page)); + sbuf->ele_num = ele_num; + sbuf->ele_size = ele_size; + sbuf->size = ele_num * ele_size; + sbuf->magic = SBUF_MAGIC; + pr_info("ele_num=0x%x, ele_size=0x%x allocated!\n", + ele_num, ele_size); + return sbuf; +} +EXPORT_SYMBOL(sbuf_allocate); + +void sbuf_free(shared_buf_t *sbuf) +{ + uint32_t sbuf_allocate_size; + + if ((sbuf == NULL) || sbuf->magic != SBUF_MAGIC) { + pr_err("invalid parameter %s\n", __func__); + return; + } + + sbuf_allocate_size = sbuf_calculate_allocate_size(sbuf->ele_num, + sbuf->ele_size); + if (!sbuf_allocate_size) + return; + + sbuf->magic = 0; + __free_pages((struct page *)virt_to_page(sbuf), + get_order(sbuf_allocate_size)); +} +EXPORT_SYMBOL(sbuf_free); + +int sbuf_get(shared_buf_t *sbuf, uint8_t *data) +{ + const void *from; + + if ((sbuf == NULL) || (data == NULL)) + return -EINVAL; + + if (sbuf_is_empty(sbuf)) { + /* no data available */ + return 0; + } + + from = (void *)sbuf + SBUF_HEAD_SIZE + sbuf->head; + + memcpy(data, from, sbuf->ele_size); + + sbuf->head = sbuf_next_ptr(sbuf->head, sbuf->ele_size, sbuf->size); + + return sbuf->ele_size; +} +EXPORT_SYMBOL(sbuf_get); + +int sbuf_share_setup(uint32_t pcpu_id, uint32_t sbuf_id, shared_buf_t *sbuf) +{ + struct sbuf_setup_param ssp; + + if (x86_hyper_type != X86_HYPER_ACRN) + return -ENODEV; + + ssp.pcpu_id = pcpu_id; + ssp.sbuf_id = sbuf_id; + + if (!sbuf) { + ssp.gpa = 0; + } else { + BUG_ON(!virt_addr_valid(sbuf)); + ssp.gpa = virt_to_phys(sbuf); + } + pr_info("setup phys add = 0x%llx\n", ssp.gpa); + + return hcall_setup_sbuf(virt_to_phys(&ssp)); +} +EXPORT_SYMBOL(sbuf_share_setup); + +shared_buf_t *sbuf_check_valid(uint32_t ele_num, uint32_t ele_size, + uint64_t paddr) +{ + shared_buf_t *sbuf; + + if (!ele_num || !ele_size || !paddr) + return NULL; + + sbuf = (shared_buf_t *)phys_to_virt(paddr); + BUG_ON(!virt_addr_valid(sbuf)); + + if ((sbuf->magic == SBUF_MAGIC) && + (sbuf->ele_num == ele_num) && + (sbuf->ele_size == ele_size)) { + return sbuf; + } + + return NULL; +} +EXPORT_SYMBOL(sbuf_check_valid); + +shared_buf_t *sbuf_construct(uint32_t ele_num, uint32_t ele_size, + uint64_t paddr) +{ + shared_buf_t *sbuf; + + if (!ele_num || !ele_size || !paddr) + return NULL; + + sbuf = (shared_buf_t *)phys_to_virt(paddr); + BUG_ON(!virt_addr_valid(sbuf)); + + memset(sbuf, 0, SBUF_HEAD_SIZE); + sbuf->magic = SBUF_MAGIC; + sbuf->ele_num = ele_num; + sbuf->ele_size = ele_size; + sbuf->size = ele_num * ele_size; + pr_info("construct sbuf at 0x%llx.\n", paddr); + return sbuf; +} +EXPORT_SYMBOL(sbuf_construct); + +void sbuf_deconstruct(shared_buf_t *sbuf) +{ + if (sbuf == NULL) + return; + + sbuf->magic = 0; +} +EXPORT_SYMBOL(sbuf_deconstruct); diff --git a/drivers/acrn/sbuf.h b/drivers/acrn/sbuf.h new file mode 100644 index 0000000000000..d08bf9fedf70e --- /dev/null +++ b/drivers/acrn/sbuf.h @@ -0,0 +1,131 @@ +/* + * shared buffer + * + * This file is provided under a dual BSD/GPLv2 license.  When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * General Public License for more details. + * + * Contact Information: Li Fei + * + * BSD LICENSE + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Li Fei + * + */ + +#ifndef SHARED_BUF_H +#define SHARED_BUF_H + +#include + + +#define SBUF_MAGIC 0x5aa57aa71aa13aa3 +#define SBUF_MAX_SIZE (1ULL << 22) +#define SBUF_HEAD_SIZE 64 + +/* sbuf flags */ +#define OVERRUN_CNT_EN (1ULL << 0) /* whether overrun counting is enabled */ +#define OVERWRITE_EN (1ULL << 1) /* whether overwrite is enabled */ + +enum sbuf_type { + ACRN_TRACE, + ACRN_HVLOG, + ACRN_SEP, + ACRN_SOCWATCH, + ACRN_SBUF_TYPE_MAX, +}; +/** + * (sbuf) head + buf (store (ele_num - 1) elements at most) + * buffer empty: tail == head + * buffer full: (tail + ele_size) % size == head + * + * Base of memory for elements + * | + * | + * --------------------------------------------------------------------------------------- + * | shared_buf_t | raw data (ele_size)| raw date (ele_size) | ... | raw data (ele_size) | + * --------------------------------------------------------------------------------------- + * | + * | + * shared_buf_t *buf + */ + +/* Make sure sizeof(shared_buf_t) == SBUF_HEAD_SIZE */ +typedef struct shared_buf { + uint64_t magic; + uint32_t ele_num; /* number of elements */ + uint32_t ele_size; /* sizeof of elements */ + uint32_t head; /* offset from base, to read */ + uint32_t tail; /* offset from base, to write */ + uint64_t flags; + uint32_t overrun_cnt; /* count of overrun */ + uint32_t size; /* ele_num * ele_size */ + uint32_t padding[6]; +} ____cacheline_aligned shared_buf_t; + +static inline void sbuf_clear_flags(shared_buf_t *sbuf, uint64_t flags) +{ + sbuf->flags &= ~flags; +} + +static inline void sbuf_set_flags(shared_buf_t *sbuf, uint64_t flags) +{ + sbuf->flags = flags; +} + +static inline void sbuf_add_flags(shared_buf_t *sbuf, uint64_t flags) +{ + sbuf->flags |= flags; +} + +shared_buf_t *sbuf_allocate(uint32_t ele_num, uint32_t ele_size); +void sbuf_free(shared_buf_t *sbuf); +int sbuf_get(shared_buf_t *sbuf, uint8_t *data); +int sbuf_share_setup(uint32_t pcpu_id, uint32_t sbuf_id, shared_buf_t *sbuf); +shared_buf_t *sbuf_check_valid(uint32_t ele_num, uint32_t ele_size, + uint64_t gpa); +shared_buf_t *sbuf_construct(uint32_t ele_num, uint32_t ele_size, + uint64_t gpa); +void sbuf_deconstruct(shared_buf_t *sbuf); + +#endif /* SHARED_BUF_H */ diff --git a/drivers/android/binder.c b/drivers/android/binder.c index d58763b6b0090..ce0e4d317d241 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2971,7 +2971,6 @@ static void binder_transaction(struct binder_proc *proc, t->buffer = NULL; goto err_binder_alloc_buf_failed; } - t->buffer->allow_user_free = 0; t->buffer->debug_id = t->debug_id; t->buffer->transaction = t; t->buffer->target_node = target_node; @@ -3465,14 +3464,18 @@ static int binder_thread_write(struct binder_proc *proc, buffer = binder_alloc_prepare_to_free(&proc->alloc, data_ptr); - if (buffer == NULL) { - binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n", - proc->pid, thread->pid, (u64)data_ptr); - break; - } - if (!buffer->allow_user_free) { - binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n", - proc->pid, thread->pid, (u64)data_ptr); + if (IS_ERR_OR_NULL(buffer)) { + if (PTR_ERR(buffer) == -EPERM) { + binder_user_error( + "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n", + proc->pid, thread->pid, + (u64)data_ptr); + } else { + binder_user_error( + "%d:%d BC_FREE_BUFFER u%016llx no match\n", + proc->pid, thread->pid, + (u64)data_ptr); + } break; } binder_debug(BINDER_DEBUG_FREE_BUFFER, diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 64fd96eada31f..030c98f35cca7 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -151,16 +151,12 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked( else { /* * Guard against user threads attempting to - * free the buffer twice + * free the buffer when in use by kernel or + * after it's already been freed. */ - if (buffer->free_in_progress) { - binder_alloc_debug(BINDER_DEBUG_USER_ERROR, - "%d:%d FREE_BUFFER u%016llx user freed buffer twice\n", - alloc->pid, current->pid, - (u64)user_ptr); - return NULL; - } - buffer->free_in_progress = 1; + if (!buffer->allow_user_free) + return ERR_PTR(-EPERM); + buffer->allow_user_free = 0; return buffer; } } @@ -500,7 +496,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( rb_erase(best_fit, &alloc->free_buffers); buffer->free = 0; - buffer->free_in_progress = 0; + buffer->allow_user_free = 0; binder_insert_allocated_buffer_locked(alloc, buffer); binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: binder_alloc_buf size %zd got %pK\n", diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h index 9ef64e5638566..fb3238c74c8a8 100644 --- a/drivers/android/binder_alloc.h +++ b/drivers/android/binder_alloc.h @@ -50,8 +50,7 @@ struct binder_buffer { unsigned free:1; unsigned allow_user_free:1; unsigned async_transaction:1; - unsigned free_in_progress:1; - unsigned debug_id:28; + unsigned debug_id:29; struct binder_transaction *transaction; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index a9dd4ea7467df..b8c3f9e6af899 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4553,6 +4553,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* These specific Samsung models/firmware-revs do not handle LPM well */ { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, }, { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, }, + { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, }, /* devices that don't properly handle queued TRIM commands */ { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | @@ -4601,6 +4602,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, /* diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c index 81c22d20d9d9c..60e0b772673f3 100644 --- a/drivers/auxdisplay/charlcd.c +++ b/drivers/auxdisplay/charlcd.c @@ -538,6 +538,9 @@ static inline int handle_lcd_special_code(struct charlcd *lcd) } case 'x': /* gotoxy : LxXXX[yYYY]; */ case 'y': /* gotoxy : LyYYY[xXXX]; */ + if (priv->esc_seq.buf[priv->esc_seq.len - 1] != ';') + break; + /* If the command is valid, move to the new address */ if (parse_xy(esc, &priv->addr.x, &priv->addr.y)) charlcd_gotoxy(lcd); diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 8bfd27ec73d60..585e2e1c9c8f4 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -31,6 +31,9 @@ static struct kset *system_kset; #define to_drv_attr(_attr) container_of(_attr, struct driver_attribute, attr) +#define DRIVER_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \ + struct driver_attribute driver_attr_##_name = \ + __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) static int __must_check bus_rescan_devices_helper(struct device *dev, void *data); @@ -195,7 +198,7 @@ static ssize_t unbind_store(struct device_driver *drv, const char *buf, bus_put(bus); return err; } -static DRIVER_ATTR_WO(unbind); +static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, S_IWUSR, NULL, unbind_store); /* * Manually attach a device to a driver. @@ -231,7 +234,7 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf, bus_put(bus); return err; } -static DRIVER_ATTR_WO(bind); +static DRIVER_ATTR_IGNORE_LOCKDEP(bind, S_IWUSR, NULL, bind_store); static ssize_t show_drivers_autoprobe(struct bus_type *bus, char *buf) { diff --git a/drivers/base/dd.c b/drivers/base/dd.c index edfc9f0b11809..2607f859881af 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -931,11 +931,11 @@ static void __device_release_driver(struct device *dev, struct device *parent) while (device_links_busy(dev)) { device_unlock(dev); - if (parent) + if (parent && dev->bus->need_parent_lock) device_unlock(parent); device_links_unbind_consumers(dev); - if (parent) + if (parent && dev->bus->need_parent_lock) device_lock(parent); device_lock(dev); diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c index 60d6cc618f1c4..6d54905c62639 100644 --- a/drivers/base/platform-msi.c +++ b/drivers/base/platform-msi.c @@ -366,14 +366,16 @@ void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nvec) { struct platform_msi_priv_data *data = domain->host_data; - struct msi_desc *desc; - for_each_msi_entry(desc, data->dev) { + struct msi_desc *desc, *tmp; + for_each_msi_entry_safe(desc, tmp, data->dev) { if (WARN_ON(!desc->irq || desc->nvec_used != 1)) return; if (!(desc->irq >= virq && desc->irq < (virq + nvec))) continue; irq_domain_free_irqs_common(domain, desc->irq, 1); + list_del(&desc->list); + free_msi_entry(desc); } } diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig index 6ad5ef48b61ee..afa9b84595727 100644 --- a/drivers/base/regmap/Kconfig +++ b/drivers/base/regmap/Kconfig @@ -49,3 +49,14 @@ config REGMAP_SOUNDWIRE config REGMAP_SCCB tristate depends on I2C + +config REGMAP_HWSPINLOCK + bool + +config REGMAP_SDW + default n + tristate "Regmap support for soundwire" + depends on SDW + help + Enable this if regmap support is required for + soundwire slave devices. diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile index f5b4e8851d00d..d78c51ae3da2f 100644 --- a/drivers/base/regmap/Makefile +++ b/drivers/base/regmap/Makefile @@ -16,3 +16,4 @@ obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o obj-$(CONFIG_REGMAP_W1) += regmap-w1.o obj-$(CONFIG_REGMAP_SOUNDWIRE) += regmap-sdw.o obj-$(CONFIG_REGMAP_SCCB) += regmap-sccb.o +obj-$(CONFIG_REGMAP_SDW) += regmap-sdwint.o diff --git a/drivers/base/regmap/regmap-sdwint.c b/drivers/base/regmap/regmap-sdwint.c new file mode 100644 index 0000000000000..ed8c28db03b8a --- /dev/null +++ b/drivers/base/regmap/regmap-sdwint.c @@ -0,0 +1,252 @@ +/* + * regmap-sdw.c - Register map access API - SoundWire support + * + * Copyright (C) 2015-2016 Intel Corp + * Author: Hardik T Shah + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + */ +#include +#include +#include + +#include "internal.h" + +#define SDW_SCP_ADDRPAGE1_MASK 0xFF +#define SDW_SCP_ADDRPAGE1_SHIFT 15 + +#define SDW_SCP_ADDRPAGE2_MASK 0xFF +#define SDW_SCP_ADDRPAGE2_SHIFT 22 + +#define SDW_REGADDR_SHIFT 0x0 +#define SDW_REGADDR_MASK 0xFFFF + +#define SDW_MAX_REG_ADDR 65536 + +static int regmap_sdw_read(void *context, + const void *reg, size_t reg_size, + void *val, size_t val_size) +{ + struct device *dev = context; + struct sdw_slv *sdw = to_sdw_slave(dev); + struct sdw_msg xfer; + int ret, scp_addr1, scp_addr2; + int reg_command; + int reg_addr = *(u32 *)reg; + size_t t_val_size = 0, t_size; + int offset; + u8 *t_val; + + /* All registers are 4 byte on SoundWire bus */ + if (reg_size != 4) + return -ENOTSUPP; + + xfer.slave_addr = sdw->slv_number; + xfer.ssp_tag = 0; + xfer.flag = SDW_MSG_FLAG_READ; + xfer.len = 0; + t_val = val; + + offset = 0; + reg_command = (reg_addr >> SDW_REGADDR_SHIFT) & + SDW_REGADDR_MASK; + if (val_size > SDW_MAX_REG_ADDR) + t_size = SDW_MAX_REG_ADDR - reg_command; + else + t_size = val_size; + while (t_val_size < val_size) { + + scp_addr1 = (reg_addr >> SDW_SCP_ADDRPAGE1_SHIFT) & + SDW_SCP_ADDRPAGE1_MASK; + scp_addr2 = (reg_addr >> SDW_SCP_ADDRPAGE2_SHIFT) & + SDW_SCP_ADDRPAGE2_MASK; + xfer.addr_page1 = scp_addr1; + xfer.addr_page2 = scp_addr2; + xfer.addr = reg_command; + xfer.len += t_size; + xfer.buf = &t_val[offset]; + ret = sdw_slave_transfer(sdw->mstr, &xfer, 1); + if (ret < 0) + return ret; + else if (ret != 1) + return -EIO; + + t_val_size += t_size; + offset += t_size; + if (val_size - t_val_size > 65535) + t_size = 65535; + else + t_size = val_size - t_val_size; + reg_addr += t_size; + reg_command = (reg_addr >> SDW_REGADDR_SHIFT) & + SDW_REGADDR_MASK; + } + return 0; +} + +static int regmap_sdw_gather_write(void *context, + const void *reg, size_t reg_size, + const void *val, size_t val_size) +{ + struct device *dev = context; + struct sdw_slv *sdw = to_sdw_slave(dev); + struct sdw_msg xfer; + int ret, scp_addr1, scp_addr2; + int reg_command; + int reg_addr = *(u32 *)reg; + size_t t_val_size = 0, t_size; + int offset; + u8 *t_val; + + /* All registers are 4 byte on SoundWire bus */ + if (reg_size != 4) + return -ENOTSUPP; + + if (!sdw) + return 0; + + xfer.slave_addr = sdw->slv_number; + xfer.ssp_tag = 0; + xfer.flag = SDW_MSG_FLAG_WRITE; + xfer.len = 0; + t_val = (u8 *)val; + + offset = 0; + reg_command = (reg_addr >> SDW_REGADDR_SHIFT) & + SDW_REGADDR_MASK; + if (val_size > SDW_MAX_REG_ADDR) + t_size = SDW_MAX_REG_ADDR - reg_command; + else + t_size = val_size; + while (t_val_size < val_size) { + + scp_addr1 = (reg_addr >> SDW_SCP_ADDRPAGE1_SHIFT) & + SDW_SCP_ADDRPAGE1_MASK; + scp_addr2 = (reg_addr >> SDW_SCP_ADDRPAGE2_SHIFT) & + SDW_SCP_ADDRPAGE2_MASK; + xfer.addr_page1 = scp_addr1; + xfer.addr_page2 = scp_addr2; + xfer.addr = reg_command; + xfer.len += t_size; + xfer.buf = &t_val[offset]; + ret = sdw_slave_transfer(sdw->mstr, &xfer, 1); + if (ret < 0) + return ret; + else if (ret != 1) + return -EIO; + + t_val_size += t_size; + offset += t_size; + if (val_size - t_val_size > 65535) + t_size = 65535; + else + t_size = val_size - t_val_size; + reg_addr += t_size; + reg_command = (reg_addr >> SDW_REGADDR_SHIFT) & + SDW_REGADDR_MASK; + } + return 0; +} + +static inline void regmap_sdw_count_check(size_t count, u32 offset) +{ + BUG_ON(count <= offset); +} + +static int regmap_sdw_write(void *context, const void *data, size_t count) +{ + /* 4-byte register address for the soundwire */ + unsigned int offset = 4; + + regmap_sdw_count_check(count, offset); + return regmap_sdw_gather_write(context, data, 4, + data + offset, count - offset); +} + +static struct regmap_bus regmap_sdw = { + .write = regmap_sdw_write, + .gather_write = regmap_sdw_gather_write, + .read = regmap_sdw_read, + .reg_format_endian_default = REGMAP_ENDIAN_LITTLE, + .val_format_endian_default = REGMAP_ENDIAN_LITTLE, +}; + +static int regmap_sdw_config_check(const struct regmap_config *config) +{ + /* All register are 8-bits wide as per MIPI Soundwire 1.0 Spec */ + if (config->val_bits != 8) + return -ENOTSUPP; + /* Registers are 32 bit in size, based on SCP_ADDR1 and SCP_ADDR2 + * implementation address range may vary in slave. + */ + if (config->reg_bits != 32) + return -ENOTSUPP; + /* SoundWire register address are contiguous. */ + if (config->reg_stride != 0) + return -ENOTSUPP; + if (config->pad_bits != 0) + return -ENOTSUPP; + + + return 0; +} + +/** + * regmap_init_sdwint(): Initialise register map + * + * @sdw: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer to + * a struct regmap. + */ +struct regmap *regmap_init_sdwint(struct sdw_slv *sdw, + const struct regmap_config *config) +{ + int ret; + + ret = regmap_sdw_config_check(config); + if (ret) + return ERR_PTR(ret); + + return regmap_init(&sdw->dev, ®map_sdw, &sdw->dev, config); +} +EXPORT_SYMBOL_GPL(regmap_init_sdwint); + + +/** + * devm_regmap_init_sdwint(): Initialise managed register map + * + * @sdw Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The regmap will be automatically freed by the + * device management code. + */ +struct regmap *devm_regmap_init_sdwint(struct sdw_slv *sdw, + const struct regmap_config *config) +{ + int ret; + + ret = regmap_sdw_config_check(config); + if (ret) + return ERR_PTR(ret); + + return devm_regmap_init(&sdw->dev, ®map_sdw, &sdw->dev, config); +} +EXPORT_SYMBOL_GPL(devm_regmap_init_sdwint); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index dfb2c2622e5a6..822e3060d8348 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -1935,6 +1935,11 @@ static int __init atari_floppy_init (void) unit[i].disk = alloc_disk(1); if (!unit[i].disk) goto Enomem; + + unit[i].disk->queue = blk_init_queue(do_fd_request, + &ataflop_lock); + if (!unit[i].disk->queue) + goto Enomem; } if (UseTrackbuffer < 0) @@ -1966,10 +1971,6 @@ static int __init atari_floppy_init (void) sprintf(unit[i].disk->disk_name, "fd%d", i); unit[i].disk->fops = &floppy_fops; unit[i].disk->private_data = &unit[i]; - unit[i].disk->queue = blk_init_queue(do_fd_request, - &ataflop_lock); - if (!unit[i].disk->queue) - goto Enomem; set_capacity(unit[i].disk, MAX_DISK_SIZE * 2); add_disk(unit[i].disk); } @@ -1984,13 +1985,17 @@ static int __init atari_floppy_init (void) return 0; Enomem: - while (i--) { - struct request_queue *q = unit[i].disk->queue; + do { + struct gendisk *disk = unit[i].disk; - put_disk(unit[i].disk); - if (q) - blk_cleanup_queue(q); - } + if (disk) { + if (disk->queue) { + blk_cleanup_queue(disk->queue); + disk->queue = NULL; + } + put_disk(unit[i].disk); + } + } while (i--); unregister_blkdev(FLOPPY_MAJOR, "fd"); return -ENOMEM; diff --git a/drivers/block/brd.c b/drivers/block/brd.c index df8103dd40ac2..c18586fccb6f2 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -396,15 +396,14 @@ static struct brd_device *brd_alloc(int i) disk->first_minor = i * max_part; disk->fops = &brd_fops; disk->private_data = brd; - disk->queue = brd->brd_queue; disk->flags = GENHD_FL_EXT_DEVT; sprintf(disk->disk_name, "ram%d", i); set_capacity(disk, rd_size * 2); - disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO; + brd->brd_queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO; /* Tell the block layer that this is not a rotational device */ - blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); - blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, brd->brd_queue); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, brd->brd_queue); return brd; @@ -436,6 +435,7 @@ static struct brd_device *brd_init_one(int i, bool *new) brd = brd_alloc(i); if (brd) { + brd->brd_disk->queue = brd->brd_queue; add_disk(brd->brd_disk); list_add_tail(&brd->brd_list, &brd_devices); } @@ -503,8 +503,14 @@ static int __init brd_init(void) /* point of no return */ - list_for_each_entry(brd, &brd_devices, brd_list) + list_for_each_entry(brd, &brd_devices, brd_list) { + /* + * associate with queue just before adding disk for + * avoiding to mess up failure path + */ + brd->brd_disk->queue = brd->brd_queue; add_disk(brd->brd_disk); + } blk_register_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS, THIS_MODULE, brd_probe, NULL, NULL); diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index f2b6f4da10341..fdabd0b74492d 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -4151,10 +4151,11 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive) bio.bi_end_io = floppy_rb0_cb; bio_set_op_attrs(&bio, REQ_OP_READ, 0); + init_completion(&cbdata.complete); + submit_bio(&bio); process_fd_request(); - init_completion(&cbdata.complete); wait_for_completion(&cbdata.complete); __free_page(page); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index ea9debf59b225..c9c2bcc36e264 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -83,7 +83,7 @@ #include static DEFINE_IDR(loop_index_idr); -static DEFINE_MUTEX(loop_index_mutex); +static DEFINE_MUTEX(loop_ctl_mutex); static int max_part; static int part_shift; @@ -631,18 +631,7 @@ static void loop_reread_partitions(struct loop_device *lo, { int rc; - /* - * bd_mutex has been held already in release path, so don't - * acquire it if this function is called in such case. - * - * If the reread partition isn't from release path, lo_refcnt - * must be at least one and it can only become zero when the - * current holder is released. - */ - if (!atomic_read(&lo->lo_refcnt)) - rc = __blkdev_reread_part(bdev); - else - rc = blkdev_reread_part(bdev); + rc = blkdev_reread_part(bdev); if (rc) pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n", __func__, lo->lo_number, lo->lo_file_name, rc); @@ -689,26 +678,30 @@ static int loop_validate_file(struct file *file, struct block_device *bdev) static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, unsigned int arg) { - struct file *file, *old_file; + struct file *file = NULL, *old_file; int error; + bool partscan; + error = mutex_lock_killable(&loop_ctl_mutex); + if (error) + return error; error = -ENXIO; if (lo->lo_state != Lo_bound) - goto out; + goto out_err; /* the loop device has to be read-only */ error = -EINVAL; if (!(lo->lo_flags & LO_FLAGS_READ_ONLY)) - goto out; + goto out_err; error = -EBADF; file = fget(arg); if (!file) - goto out; + goto out_err; error = loop_validate_file(file, bdev); if (error) - goto out_putf; + goto out_err; old_file = lo->lo_backing_file; @@ -716,7 +709,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, /* size of the new backing store needs to be the same */ if (get_loop_size(lo, file) != get_loop_size(lo, old_file)) - goto out_putf; + goto out_err; /* and ... switch */ blk_mq_freeze_queue(lo->lo_queue); @@ -727,15 +720,22 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); loop_update_dio(lo); blk_mq_unfreeze_queue(lo->lo_queue); - + partscan = lo->lo_flags & LO_FLAGS_PARTSCAN; + mutex_unlock(&loop_ctl_mutex); + /* + * We must drop file reference outside of loop_ctl_mutex as dropping + * the file ref can take bd_mutex which creates circular locking + * dependency. + */ fput(old_file); - if (lo->lo_flags & LO_FLAGS_PARTSCAN) + if (partscan) loop_reread_partitions(lo, bdev); return 0; - out_putf: - fput(file); - out: +out_err: + mutex_unlock(&loop_ctl_mutex); + if (file) + fput(file); return error; } @@ -910,6 +910,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, int lo_flags = 0; int error; loff_t size; + bool partscan; /* This is safe, since we have a reference from open(). */ __module_get(THIS_MODULE); @@ -919,13 +920,17 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, if (!file) goto out; + error = mutex_lock_killable(&loop_ctl_mutex); + if (error) + goto out_putf; + error = -EBUSY; if (lo->lo_state != Lo_unbound) - goto out_putf; + goto out_unlock; error = loop_validate_file(file, bdev); if (error) - goto out_putf; + goto out_unlock; mapping = file->f_mapping; inode = mapping->host; @@ -937,10 +942,10 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, error = -EFBIG; size = get_loop_size(lo, file); if ((loff_t)(sector_t)size != size) - goto out_putf; + goto out_unlock; error = loop_prepare_queue(lo); if (error) - goto out_putf; + goto out_unlock; error = 0; @@ -972,18 +977,22 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, lo->lo_state = Lo_bound; if (part_shift) lo->lo_flags |= LO_FLAGS_PARTSCAN; - if (lo->lo_flags & LO_FLAGS_PARTSCAN) - loop_reread_partitions(lo, bdev); + partscan = lo->lo_flags & LO_FLAGS_PARTSCAN; /* Grab the block_device to prevent its destruction after we - * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev). + * put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev). */ bdgrab(bdev); + mutex_unlock(&loop_ctl_mutex); + if (partscan) + loop_reread_partitions(lo, bdev); return 0; - out_putf: +out_unlock: + mutex_unlock(&loop_ctl_mutex); +out_putf: fput(file); - out: +out: /* This is safe: open() is still holding a reference. */ module_put(THIS_MODULE); return error; @@ -1026,39 +1035,31 @@ loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer, return err; } -static int loop_clr_fd(struct loop_device *lo) +static int __loop_clr_fd(struct loop_device *lo, bool release) { - struct file *filp = lo->lo_backing_file; + struct file *filp = NULL; gfp_t gfp = lo->old_gfp_mask; struct block_device *bdev = lo->lo_device; + int err = 0; + bool partscan = false; + int lo_number; - if (lo->lo_state != Lo_bound) - return -ENXIO; - - /* - * If we've explicitly asked to tear down the loop device, - * and it has an elevated reference count, set it for auto-teardown when - * the last reference goes away. This stops $!~#$@ udev from - * preventing teardown because it decided that it needs to run blkid on - * the loopback device whenever they appear. xfstests is notorious for - * failing tests because blkid via udev races with a losetup - * /do something like mkfs/losetup -d causing the losetup -d - * command to fail with EBUSY. - */ - if (atomic_read(&lo->lo_refcnt) > 1) { - lo->lo_flags |= LO_FLAGS_AUTOCLEAR; - mutex_unlock(&lo->lo_ctl_mutex); - return 0; + mutex_lock(&loop_ctl_mutex); + if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) { + err = -ENXIO; + goto out_unlock; } - if (filp == NULL) - return -EINVAL; + filp = lo->lo_backing_file; + if (filp == NULL) { + err = -EINVAL; + goto out_unlock; + } /* freeze request queue during the transition */ blk_mq_freeze_queue(lo->lo_queue); spin_lock_irq(&lo->lo_lock); - lo->lo_state = Lo_rundown; lo->lo_backing_file = NULL; spin_unlock_irq(&lo->lo_lock); @@ -1094,21 +1095,73 @@ static int loop_clr_fd(struct loop_device *lo) module_put(THIS_MODULE); blk_mq_unfreeze_queue(lo->lo_queue); - if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev) - loop_reread_partitions(lo, bdev); + partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev; + lo_number = lo->lo_number; lo->lo_flags = 0; if (!part_shift) lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN; loop_unprepare_queue(lo); - mutex_unlock(&lo->lo_ctl_mutex); +out_unlock: + mutex_unlock(&loop_ctl_mutex); + if (partscan) { + /* + * bd_mutex has been held already in release path, so don't + * acquire it if this function is called in such case. + * + * If the reread partition isn't from release path, lo_refcnt + * must be at least one and it can only become zero when the + * current holder is released. + */ + if (release) + err = __blkdev_reread_part(bdev); + else + err = blkdev_reread_part(bdev); + pr_warn("%s: partition scan of loop%d failed (rc=%d)\n", + __func__, lo_number, err); + /* Device is gone, no point in returning error */ + err = 0; + } /* - * Need not hold lo_ctl_mutex to fput backing file. - * Calling fput holding lo_ctl_mutex triggers a circular + * Need not hold loop_ctl_mutex to fput backing file. + * Calling fput holding loop_ctl_mutex triggers a circular * lock dependency possibility warning as fput can take - * bd_mutex which is usually taken before lo_ctl_mutex. + * bd_mutex which is usually taken before loop_ctl_mutex. */ - fput(filp); - return 0; + if (filp) + fput(filp); + return err; +} + +static int loop_clr_fd(struct loop_device *lo) +{ + int err; + + err = mutex_lock_killable(&loop_ctl_mutex); + if (err) + return err; + if (lo->lo_state != Lo_bound) { + mutex_unlock(&loop_ctl_mutex); + return -ENXIO; + } + /* + * If we've explicitly asked to tear down the loop device, + * and it has an elevated reference count, set it for auto-teardown when + * the last reference goes away. This stops $!~#$@ udev from + * preventing teardown because it decided that it needs to run blkid on + * the loopback device whenever they appear. xfstests is notorious for + * failing tests because blkid via udev races with a losetup + * /do something like mkfs/losetup -d causing the losetup -d + * command to fail with EBUSY. + */ + if (atomic_read(&lo->lo_refcnt) > 1) { + lo->lo_flags |= LO_FLAGS_AUTOCLEAR; + mutex_unlock(&loop_ctl_mutex); + return 0; + } + lo->lo_state = Lo_rundown; + mutex_unlock(&loop_ctl_mutex); + + return __loop_clr_fd(lo, false); } static int @@ -1117,47 +1170,72 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) int err; struct loop_func_table *xfer; kuid_t uid = current_uid(); + struct block_device *bdev; + bool partscan = false; + err = mutex_lock_killable(&loop_ctl_mutex); + if (err) + return err; if (lo->lo_encrypt_key_size && !uid_eq(lo->lo_key_owner, uid) && - !capable(CAP_SYS_ADMIN)) - return -EPERM; - if (lo->lo_state != Lo_bound) - return -ENXIO; - if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) - return -EINVAL; + !capable(CAP_SYS_ADMIN)) { + err = -EPERM; + goto out_unlock; + } + if (lo->lo_state != Lo_bound) { + err = -ENXIO; + goto out_unlock; + } + if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) { + err = -EINVAL; + goto out_unlock; + } + + if (lo->lo_offset != info->lo_offset || + lo->lo_sizelimit != info->lo_sizelimit) { + sync_blockdev(lo->lo_device); + kill_bdev(lo->lo_device); + } /* I/O need to be drained during transfer transition */ blk_mq_freeze_queue(lo->lo_queue); err = loop_release_xfer(lo); if (err) - goto exit; + goto out_unfreeze; if (info->lo_encrypt_type) { unsigned int type = info->lo_encrypt_type; if (type >= MAX_LO_CRYPT) { err = -EINVAL; - goto exit; + goto out_unfreeze; } xfer = xfer_funcs[type]; if (xfer == NULL) { err = -EINVAL; - goto exit; + goto out_unfreeze; } } else xfer = NULL; err = loop_init_xfer(lo, xfer, info); if (err) - goto exit; + goto out_unfreeze; if (lo->lo_offset != info->lo_offset || lo->lo_sizelimit != info->lo_sizelimit) { + /* kill_bdev should have truncated all the pages */ + if (lo->lo_device->bd_inode->i_mapping->nrpages) { + err = -EAGAIN; + pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n", + __func__, lo->lo_number, lo->lo_file_name, + lo->lo_device->bd_inode->i_mapping->nrpages); + goto out_unfreeze; + } if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) { err = -EFBIG; - goto exit; + goto out_unfreeze; } } @@ -1189,15 +1267,20 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) /* update dio if lo_offset or transfer is changed */ __loop_update_dio(lo, lo->use_dio); - exit: +out_unfreeze: blk_mq_unfreeze_queue(lo->lo_queue); if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) && !(lo->lo_flags & LO_FLAGS_PARTSCAN)) { lo->lo_flags |= LO_FLAGS_PARTSCAN; lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN; - loop_reread_partitions(lo, lo->lo_device); + bdev = lo->lo_device; + partscan = true; } +out_unlock: + mutex_unlock(&loop_ctl_mutex); + if (partscan) + loop_reread_partitions(lo, bdev); return err; } @@ -1205,12 +1288,15 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) static int loop_get_status(struct loop_device *lo, struct loop_info64 *info) { - struct file *file; + struct path path; struct kstat stat; int ret; + ret = mutex_lock_killable(&loop_ctl_mutex); + if (ret) + return ret; if (lo->lo_state != Lo_bound) { - mutex_unlock(&lo->lo_ctl_mutex); + mutex_unlock(&loop_ctl_mutex); return -ENXIO; } @@ -1229,17 +1315,17 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info) lo->lo_encrypt_key_size); } - /* Drop lo_ctl_mutex while we call into the filesystem. */ - file = get_file(lo->lo_backing_file); - mutex_unlock(&lo->lo_ctl_mutex); - ret = vfs_getattr(&file->f_path, &stat, STATX_INO, - AT_STATX_SYNC_AS_STAT); + /* Drop loop_ctl_mutex while we call into the filesystem. */ + path = lo->lo_backing_file->f_path; + path_get(&path); + mutex_unlock(&loop_ctl_mutex); + ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT); if (!ret) { info->lo_device = huge_encode_dev(stat.dev); info->lo_inode = stat.ino; info->lo_rdevice = huge_encode_dev(stat.rdev); } - fput(file); + path_put(&path); return ret; } @@ -1323,10 +1409,8 @@ loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) { struct loop_info64 info64; int err; - if (!arg) { - mutex_unlock(&lo->lo_ctl_mutex); + if (!arg) return -EINVAL; - } err = loop_get_status(lo, &info64); if (!err) err = loop_info64_to_old(&info64, &info); @@ -1341,10 +1425,8 @@ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) { struct loop_info64 info64; int err; - if (!arg) { - mutex_unlock(&lo->lo_ctl_mutex); + if (!arg) return -EINVAL; - } err = loop_get_status(lo, &info64); if (!err && copy_to_user(arg, &info64, sizeof(info64))) err = -EFAULT; @@ -1376,22 +1458,64 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg) static int loop_set_block_size(struct loop_device *lo, unsigned long arg) { + int err = 0; + if (lo->lo_state != Lo_bound) return -ENXIO; if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg)) return -EINVAL; + if (lo->lo_queue->limits.logical_block_size != arg) { + sync_blockdev(lo->lo_device); + kill_bdev(lo->lo_device); + } + blk_mq_freeze_queue(lo->lo_queue); + /* kill_bdev should have truncated all the pages */ + if (lo->lo_queue->limits.logical_block_size != arg && + lo->lo_device->bd_inode->i_mapping->nrpages) { + err = -EAGAIN; + pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n", + __func__, lo->lo_number, lo->lo_file_name, + lo->lo_device->bd_inode->i_mapping->nrpages); + goto out_unfreeze; + } + blk_queue_logical_block_size(lo->lo_queue, arg); blk_queue_physical_block_size(lo->lo_queue, arg); blk_queue_io_min(lo->lo_queue, arg); loop_update_dio(lo); - +out_unfreeze: blk_mq_unfreeze_queue(lo->lo_queue); - return 0; + return err; +} + +static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd, + unsigned long arg) +{ + int err; + + err = mutex_lock_killable(&loop_ctl_mutex); + if (err) + return err; + switch (cmd) { + case LOOP_SET_CAPACITY: + err = loop_set_capacity(lo); + break; + case LOOP_SET_DIRECT_IO: + err = loop_set_dio(lo, arg); + break; + case LOOP_SET_BLOCK_SIZE: + err = loop_set_block_size(lo, arg); + break; + default: + err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL; + } + mutex_unlock(&loop_ctl_mutex); + return err; } static int lo_ioctl(struct block_device *bdev, fmode_t mode, @@ -1400,64 +1524,42 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, struct loop_device *lo = bdev->bd_disk->private_data; int err; - err = mutex_lock_killable_nested(&lo->lo_ctl_mutex, 1); - if (err) - goto out_unlocked; - switch (cmd) { case LOOP_SET_FD: - err = loop_set_fd(lo, mode, bdev, arg); - break; + return loop_set_fd(lo, mode, bdev, arg); case LOOP_CHANGE_FD: - err = loop_change_fd(lo, bdev, arg); - break; + return loop_change_fd(lo, bdev, arg); case LOOP_CLR_FD: - /* loop_clr_fd would have unlocked lo_ctl_mutex on success */ - err = loop_clr_fd(lo); - if (!err) - goto out_unlocked; - break; + return loop_clr_fd(lo); case LOOP_SET_STATUS: err = -EPERM; - if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) + if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) { err = loop_set_status_old(lo, (struct loop_info __user *)arg); + } break; case LOOP_GET_STATUS: - err = loop_get_status_old(lo, (struct loop_info __user *) arg); - /* loop_get_status() unlocks lo_ctl_mutex */ - goto out_unlocked; + return loop_get_status_old(lo, (struct loop_info __user *) arg); case LOOP_SET_STATUS64: err = -EPERM; - if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) + if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) { err = loop_set_status64(lo, (struct loop_info64 __user *) arg); + } break; case LOOP_GET_STATUS64: - err = loop_get_status64(lo, (struct loop_info64 __user *) arg); - /* loop_get_status() unlocks lo_ctl_mutex */ - goto out_unlocked; + return loop_get_status64(lo, (struct loop_info64 __user *) arg); case LOOP_SET_CAPACITY: - err = -EPERM; - if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) - err = loop_set_capacity(lo); - break; case LOOP_SET_DIRECT_IO: - err = -EPERM; - if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) - err = loop_set_dio(lo, arg); - break; case LOOP_SET_BLOCK_SIZE: - err = -EPERM; - if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) - err = loop_set_block_size(lo, arg); - break; + if (!(mode & FMODE_WRITE) && !capable(CAP_SYS_ADMIN)) + return -EPERM; + /* Fall through */ default: - err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL; + err = lo_simple_ioctl(lo, cmd, arg); + break; } - mutex_unlock(&lo->lo_ctl_mutex); -out_unlocked: return err; } @@ -1571,10 +1673,8 @@ loop_get_status_compat(struct loop_device *lo, struct loop_info64 info64; int err; - if (!arg) { - mutex_unlock(&lo->lo_ctl_mutex); + if (!arg) return -EINVAL; - } err = loop_get_status(lo, &info64); if (!err) err = loop_info64_to_compat(&info64, arg); @@ -1589,20 +1689,12 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, switch(cmd) { case LOOP_SET_STATUS: - err = mutex_lock_killable(&lo->lo_ctl_mutex); - if (!err) { - err = loop_set_status_compat(lo, - (const struct compat_loop_info __user *)arg); - mutex_unlock(&lo->lo_ctl_mutex); - } + err = loop_set_status_compat(lo, + (const struct compat_loop_info __user *)arg); break; case LOOP_GET_STATUS: - err = mutex_lock_killable(&lo->lo_ctl_mutex); - if (!err) { - err = loop_get_status_compat(lo, - (struct compat_loop_info __user *)arg); - /* loop_get_status() unlocks lo_ctl_mutex */ - } + err = loop_get_status_compat(lo, + (struct compat_loop_info __user *)arg); break; case LOOP_SET_CAPACITY: case LOOP_CLR_FD: @@ -1626,9 +1718,11 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, static int lo_open(struct block_device *bdev, fmode_t mode) { struct loop_device *lo; - int err = 0; + int err; - mutex_lock(&loop_index_mutex); + err = mutex_lock_killable(&loop_ctl_mutex); + if (err) + return err; lo = bdev->bd_disk->private_data; if (!lo) { err = -ENXIO; @@ -1637,26 +1731,30 @@ static int lo_open(struct block_device *bdev, fmode_t mode) atomic_inc(&lo->lo_refcnt); out: - mutex_unlock(&loop_index_mutex); + mutex_unlock(&loop_ctl_mutex); return err; } -static void __lo_release(struct loop_device *lo) +static void lo_release(struct gendisk *disk, fmode_t mode) { - int err; + struct loop_device *lo; + mutex_lock(&loop_ctl_mutex); + lo = disk->private_data; if (atomic_dec_return(&lo->lo_refcnt)) - return; + goto out_unlock; - mutex_lock(&lo->lo_ctl_mutex); if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) { + if (lo->lo_state != Lo_bound) + goto out_unlock; + lo->lo_state = Lo_rundown; + mutex_unlock(&loop_ctl_mutex); /* * In autoclear mode, stop the loop thread * and remove configuration after last close. */ - err = loop_clr_fd(lo); - if (!err) - return; + __loop_clr_fd(lo, true); + return; } else if (lo->lo_state == Lo_bound) { /* * Otherwise keep thread (if running) and config, @@ -1666,14 +1764,8 @@ static void __lo_release(struct loop_device *lo) blk_mq_unfreeze_queue(lo->lo_queue); } - mutex_unlock(&lo->lo_ctl_mutex); -} - -static void lo_release(struct gendisk *disk, fmode_t mode) -{ - mutex_lock(&loop_index_mutex); - __lo_release(disk->private_data); - mutex_unlock(&loop_index_mutex); +out_unlock: + mutex_unlock(&loop_ctl_mutex); } static const struct block_device_operations lo_fops = { @@ -1712,10 +1804,10 @@ static int unregister_transfer_cb(int id, void *ptr, void *data) struct loop_device *lo = ptr; struct loop_func_table *xfer = data; - mutex_lock(&lo->lo_ctl_mutex); + mutex_lock(&loop_ctl_mutex); if (lo->lo_encryption == xfer) loop_release_xfer(lo); - mutex_unlock(&lo->lo_ctl_mutex); + mutex_unlock(&loop_ctl_mutex); return 0; } @@ -1896,7 +1988,6 @@ static int loop_add(struct loop_device **l, int i) if (!part_shift) disk->flags |= GENHD_FL_NO_PART_SCAN; disk->flags |= GENHD_FL_EXT_DEVT; - mutex_init(&lo->lo_ctl_mutex); atomic_set(&lo->lo_refcnt, 0); lo->lo_number = i; spin_lock_init(&lo->lo_lock); @@ -1975,7 +2066,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data) struct kobject *kobj; int err; - mutex_lock(&loop_index_mutex); + mutex_lock(&loop_ctl_mutex); err = loop_lookup(&lo, MINOR(dev) >> part_shift); if (err < 0) err = loop_add(&lo, MINOR(dev) >> part_shift); @@ -1983,7 +2074,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data) kobj = NULL; else kobj = get_disk_and_module(lo->lo_disk); - mutex_unlock(&loop_index_mutex); + mutex_unlock(&loop_ctl_mutex); *part = 0; return kobj; @@ -1993,9 +2084,13 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd, unsigned long parm) { struct loop_device *lo; - int ret = -ENOSYS; + int ret; + + ret = mutex_lock_killable(&loop_ctl_mutex); + if (ret) + return ret; - mutex_lock(&loop_index_mutex); + ret = -ENOSYS; switch (cmd) { case LOOP_CTL_ADD: ret = loop_lookup(&lo, parm); @@ -2009,21 +2104,15 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd, ret = loop_lookup(&lo, parm); if (ret < 0) break; - ret = mutex_lock_killable(&lo->lo_ctl_mutex); - if (ret) - break; if (lo->lo_state != Lo_unbound) { ret = -EBUSY; - mutex_unlock(&lo->lo_ctl_mutex); break; } if (atomic_read(&lo->lo_refcnt) > 0) { ret = -EBUSY; - mutex_unlock(&lo->lo_ctl_mutex); break; } lo->lo_disk->private_data = NULL; - mutex_unlock(&lo->lo_ctl_mutex); idr_remove(&loop_index_idr, lo->lo_number); loop_remove(lo); break; @@ -2033,7 +2122,7 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd, break; ret = loop_add(&lo, -1); } - mutex_unlock(&loop_index_mutex); + mutex_unlock(&loop_ctl_mutex); return ret; } @@ -2117,10 +2206,10 @@ static int __init loop_init(void) THIS_MODULE, loop_probe, NULL, NULL); /* pre-create number of devices given by config or max_loop */ - mutex_lock(&loop_index_mutex); + mutex_lock(&loop_ctl_mutex); for (i = 0; i < nr; i++) loop_add(&lo, i); - mutex_unlock(&loop_index_mutex); + mutex_unlock(&loop_ctl_mutex); printk(KERN_INFO "loop: module loaded\n"); return 0; diff --git a/drivers/block/loop.h b/drivers/block/loop.h index 4d42c7af7de75..af75a5ee40944 100644 --- a/drivers/block/loop.h +++ b/drivers/block/loop.h @@ -54,7 +54,6 @@ struct loop_device { spinlock_t lo_lock; int lo_state; - struct mutex lo_ctl_mutex; struct kthread_worker worker; struct task_struct *worker_task; bool use_dio; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 14a51254c3db7..c13a6d1796a77 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -288,9 +288,10 @@ static void nbd_size_update(struct nbd_device *nbd) blk_queue_physical_block_size(nbd->disk->queue, config->blksize); set_capacity(nbd->disk, config->bytesize >> 9); if (bdev) { - if (bdev->bd_disk) + if (bdev->bd_disk) { bd_set_size(bdev, config->bytesize); - else + set_blocksize(bdev, config->blksize); + } else bdev->bd_invalidated = 1; bdput(bdev); } diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 73ed5f3a862df..585378bc988cd 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -5982,7 +5982,6 @@ static ssize_t do_rbd_remove(struct bus_type *bus, struct list_head *tmp; int dev_id; char opt_buf[6]; - bool already = false; bool force = false; int ret; @@ -6015,13 +6014,13 @@ static ssize_t do_rbd_remove(struct bus_type *bus, spin_lock_irq(&rbd_dev->lock); if (rbd_dev->open_count && !force) ret = -EBUSY; - else - already = test_and_set_bit(RBD_DEV_FLAG_REMOVING, - &rbd_dev->flags); + else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING, + &rbd_dev->flags)) + ret = -EINPROGRESS; spin_unlock_irq(&rbd_dev->lock); } spin_unlock(&rbd_dev_list_lock); - if (ret < 0 || already) + if (ret) return ret; if (force) { diff --git a/drivers/block/swim.c b/drivers/block/swim.c index 0e31884a95196..cbe909c51847d 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c @@ -887,8 +887,17 @@ static int swim_floppy_init(struct swim_priv *swd) exit_put_disks: unregister_blkdev(FLOPPY_MAJOR, "fd"); - while (drive--) - put_disk(swd->unit[drive].disk); + do { + struct gendisk *disk = swd->unit[drive].disk; + + if (disk) { + if (disk->queue) { + blk_cleanup_queue(disk->queue); + disk->queue = NULL; + } + put_disk(disk); + } + } while (drive--); return err; } diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 23752dc99b008..d9135ce9cc155 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -18,6 +18,7 @@ #define PART_BITS 4 #define VQ_NAME_LEN 16 +#define MAX_DISCARD_SEGMENTS 256u static int major; static DEFINE_IDA(vd_index_ida); @@ -172,10 +173,48 @@ static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr, return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC); } +static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap) +{ + unsigned short segments = blk_rq_nr_discard_segments(req); + unsigned short n = 0; + struct virtio_blk_discard_write_zeroes *range; + struct bio *bio; + u32 flags = 0; + + if (unmap) + flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP; + + range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC); + if (!range) + return -ENOMEM; + + __rq_for_each_bio(bio, req) { + u64 sector = bio->bi_iter.bi_sector; + u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT; + + range[n].flags = cpu_to_le32(flags); + range[n].num_sectors = cpu_to_le32(num_sectors); + range[n].sector = cpu_to_le64(sector); + n++; + } + + req->special_vec.bv_page = virt_to_page(range); + req->special_vec.bv_offset = offset_in_page(range); + req->special_vec.bv_len = sizeof(*range) * segments; + req->rq_flags |= RQF_SPECIAL_PAYLOAD; + + return 0; +} + static inline void virtblk_request_done(struct request *req) { struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); + if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { + kfree(page_address(req->special_vec.bv_page) + + req->special_vec.bv_offset); + } + switch (req_op(req)) { case REQ_OP_SCSI_IN: case REQ_OP_SCSI_OUT: @@ -225,6 +264,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, int qid = hctx->queue_num; int err; bool notify = false; + bool unmap = false; u32 type; BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); @@ -237,6 +277,13 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, case REQ_OP_FLUSH: type = VIRTIO_BLK_T_FLUSH; break; + case REQ_OP_DISCARD: + type = VIRTIO_BLK_T_DISCARD; + break; + case REQ_OP_WRITE_ZEROES: + type = VIRTIO_BLK_T_WRITE_ZEROES; + unmap = !(req->cmd_flags & REQ_NOUNMAP); + break; case REQ_OP_SCSI_IN: case REQ_OP_SCSI_OUT: type = VIRTIO_BLK_T_SCSI_CMD; @@ -256,6 +303,12 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, blk_mq_start_request(req); + if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) { + err = virtblk_setup_discard_write_zeroes(req, unmap); + if (err) + return BLK_STS_RESOURCE; + } + num = blk_rq_map_sg(hctx->queue, req, vbr->sg); if (num) { if (rq_data_dir(req) == WRITE) @@ -777,6 +830,32 @@ static int virtblk_probe(struct virtio_device *vdev) if (!err && opt_io_size) blk_queue_io_opt(q, blk_size * opt_io_size); + if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) { + q->limits.discard_granularity = blk_size; + + virtio_cread(vdev, struct virtio_blk_config, + discard_sector_alignment, &v); + q->limits.discard_alignment = v ? v << SECTOR_SHIFT : 0; + + virtio_cread(vdev, struct virtio_blk_config, + max_discard_sectors, &v); + blk_queue_max_discard_sectors(q, v ? v : UINT_MAX); + + virtio_cread(vdev, struct virtio_blk_config, max_discard_seg, + &v); + blk_queue_max_discard_segments(q, + min_not_zero(v, + MAX_DISCARD_SEGMENTS)); + + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); + } + + if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) { + virtio_cread(vdev, struct virtio_blk_config, + max_write_zeroes_sectors, &v); + blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX); + } + virtblk_update_capacity(vblk, false); virtio_device_ready(vdev); @@ -885,14 +964,14 @@ static unsigned int features_legacy[] = { VIRTIO_BLK_F_SCSI, #endif VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE, - VIRTIO_BLK_F_MQ, + VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES, } ; static unsigned int features[] = { VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE, - VIRTIO_BLK_F_MQ, + VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES, }; static struct virtio_driver virtio_blk = { diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 429d20131c7e2..3e905da33bcbe 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1919,6 +1919,7 @@ static int negotiate_mq(struct blkfront_info *info) GFP_KERNEL); if (!info->rinfo) { xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure"); + info->nr_rings = 0; return -ENOMEM; } @@ -2493,6 +2494,9 @@ static int blkfront_remove(struct xenbus_device *xbdev) dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename); + if (!info) + return 0; + blkif_free(info, 0); mutex_lock(&info->mutex); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index a1d6b5597c17b..e19bf0a750cfc 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -382,8 +382,10 @@ static ssize_t backing_dev_store(struct device *dev, bdev = bdgrab(I_BDEV(inode)); err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram); - if (err < 0) + if (err < 0) { + bdev = NULL; goto out; + } nr_pages = i_size_read(inode) >> PAGE_SHIFT; bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long); @@ -1636,6 +1638,11 @@ static const struct attribute_group zram_disk_attr_group = { .attrs = zram_disk_attrs, }; +static const struct attribute_group *zram_disk_attr_groups[] = { + &zram_disk_attr_group, + NULL, +}; + /* * Allocate and initialize new zram device. the function returns * '>= 0' device_id upon success, and negative value otherwise. @@ -1716,24 +1723,15 @@ static int zram_add(void) zram->disk->queue->backing_dev_info->capabilities |= (BDI_CAP_STABLE_WRITES | BDI_CAP_SYNCHRONOUS_IO); + disk_to_dev(zram->disk)->groups = zram_disk_attr_groups; add_disk(zram->disk); - ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj, - &zram_disk_attr_group); - if (ret < 0) { - pr_err("Error creating sysfs group for device %d\n", - device_id); - goto out_free_disk; - } strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor)); zram_debugfs_register(zram); pr_info("Added device: %s\n", zram->disk->disk_name); return device_id; -out_free_disk: - del_gendisk(zram->disk); - put_disk(zram->disk); out_free_queue: blk_cleanup_queue(queue); out_free_idr: @@ -1762,16 +1760,6 @@ static int zram_remove(struct zram *zram) mutex_unlock(&bdev->bd_mutex); zram_debugfs_unregister(zram); - /* - * Remove sysfs first, so no one will perform a disksize - * store while we destroy the devices. This also helps during - * hot_remove -- zram_reset_device() is the last holder of - * ->init_lock, no later/concurrent disksize_store() or any - * other sysfs handlers are possible. - */ - sysfs_remove_group(&disk_to_dev(zram->disk)->kobj, - &zram_disk_attr_group); - /* Make sure all the pending I/O are finished */ fsync_bdev(bdev); zram_reset_device(zram); diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c index 99cde1f9467d4..e3e4d929e74f5 100644 --- a/drivers/bluetooth/btbcm.c +++ b/drivers/bluetooth/btbcm.c @@ -324,6 +324,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = { { 0x4103, "BCM4330B1" }, /* 002.001.003 */ { 0x410e, "BCM43341B0" }, /* 002.001.014 */ { 0x4406, "BCM4324B3" }, /* 002.004.006 */ + { 0x6109, "BCM4335C0" }, /* 003.001.009 */ { 0x610c, "BCM4354" }, /* 003.001.012 */ { 0x2122, "BCM4343A0" }, /* 001.001.034 */ { 0x2209, "BCM43430A1" }, /* 001.002.009 */ diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index cd2e5cf14ea5b..77b67a5f21eef 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -343,6 +343,7 @@ static const struct usb_device_id blacklist_table[] = { /* Intel Bluetooth devices */ { USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_NEW }, { USB_DEVICE(0x8087, 0x0026), .driver_info = BTUSB_INTEL_NEW }, + { USB_DEVICE(0x8087, 0x0029), .driver_info = BTUSB_INTEL_NEW }, { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR }, { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL }, { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL }, @@ -2054,6 +2055,35 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb) return -EILSEQ; } +static bool btusb_setup_intel_new_get_fw_name(struct intel_version *ver, + struct intel_boot_params *params, + char *fw_name, size_t len, + const char *suffix) +{ + switch (ver->hw_variant) { + case 0x0b: /* SfP */ + case 0x0c: /* WsP */ + snprintf(fw_name, len, "intel/ibt-%u-%u.%s", + le16_to_cpu(ver->hw_variant), + le16_to_cpu(params->dev_revid), + suffix); + break; + case 0x11: /* JfP */ + case 0x12: /* ThP */ + case 0x13: /* HrP */ + case 0x14: /* CcP */ + snprintf(fw_name, len, "intel/ibt-%u-%u-%u.%s", + le16_to_cpu(ver->hw_variant), + le16_to_cpu(ver->hw_revision), + le16_to_cpu(ver->fw_revision), + suffix); + break; + default: + return false; + } + return true; +} + static int btusb_setup_intel_new(struct hci_dev *hdev) { struct btusb_data *data = hci_get_drvdata(hdev); @@ -2105,7 +2135,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) case 0x11: /* JfP */ case 0x12: /* ThP */ case 0x13: /* HrP */ - case 0x14: /* QnJ, IcP */ + case 0x14: /* CcP */ break; default: bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)", @@ -2189,23 +2219,9 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) * ibt---.sfi. * */ - switch (ver.hw_variant) { - case 0x0b: /* SfP */ - case 0x0c: /* WsP */ - snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi", - le16_to_cpu(ver.hw_variant), - le16_to_cpu(params.dev_revid)); - break; - case 0x11: /* JfP */ - case 0x12: /* ThP */ - case 0x13: /* HrP */ - case 0x14: /* QnJ, IcP */ - snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.sfi", - le16_to_cpu(ver.hw_variant), - le16_to_cpu(ver.hw_revision), - le16_to_cpu(ver.fw_revision)); - break; - default: + err = btusb_setup_intel_new_get_fw_name(&ver, ¶ms, fwname, + sizeof(fwname), "sfi"); + if (!err) { bt_dev_err(hdev, "Unsupported Intel firmware naming"); return -EINVAL; } @@ -2221,23 +2237,9 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) /* Save the DDC file name for later use to apply once the firmware * downloading is done. */ - switch (ver.hw_variant) { - case 0x0b: /* SfP */ - case 0x0c: /* WsP */ - snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc", - le16_to_cpu(ver.hw_variant), - le16_to_cpu(params.dev_revid)); - break; - case 0x11: /* JfP */ - case 0x12: /* ThP */ - case 0x13: /* HrP */ - case 0x14: /* QnJ, IcP */ - snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.ddc", - le16_to_cpu(ver.hw_variant), - le16_to_cpu(ver.hw_revision), - le16_to_cpu(ver.fw_revision)); - break; - default: + err = btusb_setup_intel_new_get_fw_name(&ver, ¶ms, fwname, + sizeof(fwname), "ddc"); + if (!err) { bt_dev_err(hdev, "Unsupported Intel firmware naming"); return -EINVAL; } diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 2fee65886d50f..f0d593c3fa728 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -167,7 +167,7 @@ struct qca_serdev { }; static int qca_power_setup(struct hci_uart *hu, bool on); -static void qca_power_shutdown(struct hci_dev *hdev); +static void qca_power_shutdown(struct hci_uart *hu); static void __serial_clock_on(struct tty_struct *tty) { @@ -609,7 +609,7 @@ static int qca_close(struct hci_uart *hu) if (hu->serdev) { qcadev = serdev_device_get_drvdata(hu->serdev); if (qcadev->btsoc_type == QCA_WCN3990) - qca_power_shutdown(hu->hdev); + qca_power_shutdown(hu); else gpiod_set_value_cansleep(qcadev->bt_en, 0); @@ -1232,12 +1232,15 @@ static const struct qca_vreg_data qca_soc_data = { .num_vregs = 4, }; -static void qca_power_shutdown(struct hci_dev *hdev) +static void qca_power_shutdown(struct hci_uart *hu) { - struct hci_uart *hu = hci_get_drvdata(hdev); + struct serdev_device *serdev = hu->serdev; + unsigned char cmd = QCA_WCN3990_POWEROFF_PULSE; host_set_baudrate(hu, 2400); - qca_send_power_pulse(hdev, QCA_WCN3990_POWEROFF_PULSE); + hci_uart_set_flow_control(hu, true); + serdev_device_write_buf(serdev, &cmd, sizeof(cmd)); + hci_uart_set_flow_control(hu, false); qca_power_setup(hu, false); } @@ -1413,7 +1416,7 @@ static void qca_serdev_remove(struct serdev_device *serdev) struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev); if (qcadev->btsoc_type == QCA_WCN3990) - qca_power_shutdown(qcadev->serdev_hu.hdev); + qca_power_shutdown(&qcadev->serdev_hu); else clk_disable_unprepare(qcadev->susclk); diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index a5d5a96479bfe..10802d1fc554c 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -2445,7 +2445,7 @@ static int cdrom_ioctl_select_disc(struct cdrom_device_info *cdi, return -ENOSYS; if (arg != CDSL_CURRENT && arg != CDSL_NONE) { - if ((int)arg >= cdi->capacity) + if (arg >= cdi->capacity) return -EINVAL; } diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 40728491f37b6..26a2da8dde630 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -552,6 +552,8 @@ config ADI and SSM (Silicon Secured Memory). Intended consumers of this driver include crash and makedumpfile. +source "drivers/char/rpmb/Kconfig" + endmenu config RANDOM_TRUST_CPU diff --git a/drivers/char/Makefile b/drivers/char/Makefile index b8d42b4e979bb..2f5697e20a5b7 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -58,3 +58,5 @@ js-rtc-y = rtc.o obj-$(CONFIG_XILLYBUS) += xillybus/ obj-$(CONFIG_POWERNV_OP_PANEL) += powernv-op-panel.o obj-$(CONFIG_ADI) += adi.o +obj-$(CONFIG_RPMB) += rpmb/ +obj-$(CONFIG_ACRN_VHM) += vhm/ diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 7fc9612070a1f..d5f7a12e350e5 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -29,6 +29,7 @@ #include #include #include +#include #define PFX "IPMI message handler: " @@ -61,7 +62,8 @@ static void ipmi_debug_msg(const char *title, unsigned char *data, { } #endif -static int initialized; +static bool initialized; +static bool drvregistered; enum ipmi_panic_event_op { IPMI_SEND_PANIC_EVENT_NONE, @@ -611,7 +613,7 @@ static DEFINE_MUTEX(ipmidriver_mutex); static LIST_HEAD(ipmi_interfaces); static DEFINE_MUTEX(ipmi_interfaces_mutex); -DEFINE_STATIC_SRCU(ipmi_interfaces_srcu); +struct srcu_struct ipmi_interfaces_srcu; /* * List of watchers that want to know when smi's are added and deleted. @@ -719,7 +721,15 @@ struct watcher_entry { int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) { struct ipmi_smi *intf; - int index; + int index, rv; + + /* + * Make sure the driver is actually initialized, this handles + * problems with initialization order. + */ + rv = ipmi_init_msghandler(); + if (rv) + return rv; mutex_lock(&smi_watchers_mutex); @@ -883,7 +893,7 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) if (user) { user->handler->ipmi_recv_hndl(msg, user->handler_data); - release_ipmi_user(msg->user, index); + release_ipmi_user(user, index); } else { /* User went away, give up. */ ipmi_free_recv_msg(msg); @@ -1075,7 +1085,7 @@ int ipmi_create_user(unsigned int if_num, { unsigned long flags; struct ipmi_user *new_user; - int rv = 0, index; + int rv, index; struct ipmi_smi *intf; /* @@ -1093,18 +1103,9 @@ int ipmi_create_user(unsigned int if_num, * Make sure the driver is actually initialized, this handles * problems with initialization order. */ - if (!initialized) { - rv = ipmi_init_msghandler(); - if (rv) - return rv; - - /* - * The init code doesn't return an error if it was turned - * off, but it won't initialize. Check that. - */ - if (!initialized) - return -ENODEV; - } + rv = ipmi_init_msghandler(); + if (rv) + return rv; new_user = kmalloc(sizeof(*new_user), GFP_KERNEL); if (!new_user) @@ -1182,6 +1183,7 @@ EXPORT_SYMBOL(ipmi_get_smi_info); static void free_user(struct kref *ref) { struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); + cleanup_srcu_struct(&user->release_barrier); kfree(user); } @@ -1258,7 +1260,6 @@ int ipmi_destroy_user(struct ipmi_user *user) { _ipmi_destroy_user(user); - cleanup_srcu_struct(&user->release_barrier); kref_put(&user->refcount, free_user); return 0; @@ -1297,10 +1298,12 @@ int ipmi_set_my_address(struct ipmi_user *user, if (!user) return -ENODEV; - if (channel >= IPMI_MAX_CHANNELS) + if (channel >= IPMI_MAX_CHANNELS) { rv = -EINVAL; - else + } else { + channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); user->intf->addrinfo[channel].address = address; + } release_ipmi_user(user, index); return rv; @@ -1317,10 +1320,12 @@ int ipmi_get_my_address(struct ipmi_user *user, if (!user) return -ENODEV; - if (channel >= IPMI_MAX_CHANNELS) + if (channel >= IPMI_MAX_CHANNELS) { rv = -EINVAL; - else + } else { + channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); *address = user->intf->addrinfo[channel].address; + } release_ipmi_user(user, index); return rv; @@ -1337,10 +1342,12 @@ int ipmi_set_my_LUN(struct ipmi_user *user, if (!user) return -ENODEV; - if (channel >= IPMI_MAX_CHANNELS) + if (channel >= IPMI_MAX_CHANNELS) { rv = -EINVAL; - else + } else { + channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); user->intf->addrinfo[channel].lun = LUN & 0x3; + } release_ipmi_user(user, index); return 0; @@ -1357,10 +1364,12 @@ int ipmi_get_my_LUN(struct ipmi_user *user, if (!user) return -ENODEV; - if (channel >= IPMI_MAX_CHANNELS) + if (channel >= IPMI_MAX_CHANNELS) { rv = -EINVAL; - else + } else { + channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); *address = user->intf->addrinfo[channel].lun; + } release_ipmi_user(user, index); return rv; @@ -2184,6 +2193,7 @@ static int check_addr(struct ipmi_smi *intf, { if (addr->channel >= IPMI_MAX_CHANNELS) return -EINVAL; + addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS); *lun = intf->addrinfo[addr->channel].lun; *saddr = intf->addrinfo[addr->channel].address; return 0; @@ -3294,17 +3304,9 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, * Make sure the driver is actually initialized, this handles * problems with initialization order. */ - if (!initialized) { - rv = ipmi_init_msghandler(); - if (rv) - return rv; - /* - * The init code doesn't return an error if it was turned - * off, but it won't initialize. Check that. - */ - if (!initialized) - return -ENODEV; - } + rv = ipmi_init_msghandler(); + if (rv) + return rv; intf = kzalloc(sizeof(*intf), GFP_KERNEL); if (!intf) @@ -5020,6 +5022,22 @@ static int panic_event(struct notifier_block *this, return NOTIFY_DONE; } +/* Must be called with ipmi_interfaces_mutex held. */ +static int ipmi_register_driver(void) +{ + int rv; + + if (drvregistered) + return 0; + + rv = driver_register(&ipmidriver.driver); + if (rv) + pr_err("Could not register IPMI driver\n"); + else + drvregistered = true; + return rv; +} + static struct notifier_block panic_block = { .notifier_call = panic_event, .next = NULL, @@ -5030,66 +5048,74 @@ static int ipmi_init_msghandler(void) { int rv; + mutex_lock(&ipmi_interfaces_mutex); + rv = ipmi_register_driver(); + if (rv) + goto out; if (initialized) - return 0; - - rv = driver_register(&ipmidriver.driver); - if (rv) { - pr_err(PFX "Could not register IPMI driver\n"); - return rv; - } + goto out; - pr_info("ipmi message handler version " IPMI_DRIVER_VERSION "\n"); + init_srcu_struct(&ipmi_interfaces_srcu); timer_setup(&ipmi_timer, ipmi_timeout, 0); mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); atomic_notifier_chain_register(&panic_notifier_list, &panic_block); - initialized = 1; + initialized = true; - return 0; +out: + mutex_unlock(&ipmi_interfaces_mutex); + return rv; } static int __init ipmi_init_msghandler_mod(void) { - ipmi_init_msghandler(); - return 0; + int rv; + + pr_info("version " IPMI_DRIVER_VERSION "\n"); + + mutex_lock(&ipmi_interfaces_mutex); + rv = ipmi_register_driver(); + mutex_unlock(&ipmi_interfaces_mutex); + + return rv; } static void __exit cleanup_ipmi(void) { int count; - if (!initialized) - return; - - atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block); - - /* - * This can't be called if any interfaces exist, so no worry - * about shutting down the interfaces. - */ - - /* - * Tell the timer to stop, then wait for it to stop. This - * avoids problems with race conditions removing the timer - * here. - */ - atomic_inc(&stop_operation); - del_timer_sync(&ipmi_timer); + if (initialized) { + atomic_notifier_chain_unregister(&panic_notifier_list, + &panic_block); - driver_unregister(&ipmidriver.driver); - - initialized = 0; + /* + * This can't be called if any interfaces exist, so no worry + * about shutting down the interfaces. + */ - /* Check for buffer leaks. */ - count = atomic_read(&smi_msg_inuse_count); - if (count != 0) - pr_warn(PFX "SMI message count %d at exit\n", count); - count = atomic_read(&recv_msg_inuse_count); - if (count != 0) - pr_warn(PFX "recv message count %d at exit\n", count); + /* + * Tell the timer to stop, then wait for it to stop. This + * avoids problems with race conditions removing the timer + * here. + */ + atomic_inc(&stop_operation); + del_timer_sync(&ipmi_timer); + + initialized = false; + + /* Check for buffer leaks. */ + count = atomic_read(&smi_msg_inuse_count); + if (count != 0) + pr_warn(PFX "SMI message count %d at exit\n", count); + count = atomic_read(&recv_msg_inuse_count); + if (count != 0) + pr_warn(PFX "recv message count %d at exit\n", count); + cleanup_srcu_struct(&ipmi_interfaces_srcu); + } + if (drvregistered) + driver_unregister(&ipmidriver.driver); } module_exit(cleanup_ipmi); diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 29e67a80fb208..76c2010ba6726 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -606,8 +606,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, flags = ipmi_ssif_lock_cond(ssif_info, &oflags); ssif_info->waiting_alert = true; ssif_info->rtc_us_timer = SSIF_MSG_USEC; - mod_timer(&ssif_info->retry_timer, - jiffies + SSIF_MSG_JIFFIES); + if (!ssif_info->stopping) + mod_timer(&ssif_info->retry_timer, + jiffies + SSIF_MSG_JIFFIES); ipmi_ssif_unlock_cond(ssif_info, flags); return; } @@ -629,8 +630,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, /* Remove the multi-part read marker. */ len -= 2; + data += 2; for (i = 0; i < len; i++) - ssif_info->data[i] = data[i+2]; + ssif_info->data[i] = data[i]; ssif_info->multi_len = len; ssif_info->multi_pos = 1; @@ -658,8 +660,19 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, } blocknum = data[0]; + len--; + data++; + + if (blocknum != 0xff && len != 31) { + /* All blocks but the last must have 31 data bytes. */ + result = -EIO; + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) + pr_info("Received middle message <31\n"); - if (ssif_info->multi_len + len - 1 > IPMI_MAX_MSG_LENGTH) { + goto continue_op; + } + + if (ssif_info->multi_len + len > IPMI_MAX_MSG_LENGTH) { /* Received message too big, abort the operation. */ result = -E2BIG; if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) @@ -668,16 +681,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, goto continue_op; } - /* Remove the blocknum from the data. */ - len--; for (i = 0; i < len; i++) - ssif_info->data[i + ssif_info->multi_len] = data[i + 1]; + ssif_info->data[i + ssif_info->multi_len] = data[i]; ssif_info->multi_len += len; if (blocknum == 0xff) { /* End of read */ len = ssif_info->multi_len; data = ssif_info->data; - } else if (blocknum + 1 != ssif_info->multi_pos) { + } else if (blocknum != ssif_info->multi_pos) { /* * Out of sequence block, just abort. Block * numbers start at zero for the second block, @@ -705,6 +716,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, } } + continue_op: if (result < 0) { ssif_inc_stat(ssif_info, receive_errors); } else { @@ -712,8 +724,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, ssif_inc_stat(ssif_info, received_message_parts); } - - continue_op: if (ssif_info->ssif_debug & SSIF_DEBUG_STATE) pr_info(PFX "DONE 1: state = %d, result=%d.\n", ssif_info->ssif_state, result); @@ -939,8 +949,9 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result, ssif_info->waiting_alert = true; ssif_info->retries_left = SSIF_RECV_RETRIES; ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC; - mod_timer(&ssif_info->retry_timer, - jiffies + SSIF_MSG_PART_JIFFIES); + if (!ssif_info->stopping) + mod_timer(&ssif_info->retry_timer, + jiffies + SSIF_MSG_PART_JIFFIES); ipmi_ssif_unlock_cond(ssif_info, flags); } } diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c index b5e3103c11755..e43c876a92232 100644 --- a/drivers/char/mwave/mwavedd.c +++ b/drivers/char/mwave/mwavedd.c @@ -59,6 +59,7 @@ #include #include #include +#include #include "smapi.h" #include "mwavedd.h" #include "3780i.h" @@ -289,6 +290,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, ipcnum); return -EINVAL; } + ipcnum = array_index_nospec(ipcnum, + ARRAY_SIZE(pDrvData->IPCs)); PRINTK_3(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC" " ipcnum %x entry usIntCount %x\n", @@ -317,6 +320,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, " Invalid ipcnum %x\n", ipcnum); return -EINVAL; } + ipcnum = array_index_nospec(ipcnum, + ARRAY_SIZE(pDrvData->IPCs)); PRINTK_3(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC" " ipcnum %x, usIntCount %x\n", @@ -383,6 +388,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, ipcnum); return -EINVAL; } + ipcnum = array_index_nospec(ipcnum, + ARRAY_SIZE(pDrvData->IPCs)); mutex_lock(&mwave_mutex); if (pDrvData->IPCs[ipcnum].bIsEnabled == true) { pDrvData->IPCs[ipcnum].bIsEnabled = false; diff --git a/drivers/char/rpmb/Kconfig b/drivers/char/rpmb/Kconfig new file mode 100644 index 0000000000000..568ba9adb1179 --- /dev/null +++ b/drivers/char/rpmb/Kconfig @@ -0,0 +1,44 @@ +# SPDX-License-Identifier: GPL-2.0 +config RPMB + tristate "RPMB partition interface" + help + Unified RPMB partition interface for eMMC and UFS. + Provides interface for in kernel security controllers to + access RPMB partition. + + If unsure, select N. + +config RPMB_INTF_DEV + bool "RPMB character device interface /dev/rpmbN" + depends on RPMB + help + Say yes here if you want to access RPMB from user space + via character device interface /dev/rpmb%d + +config RPMB_SIM + tristate "RPMB partition device simulator" + default n + select RPMB + select CRYPTO_SHA256 + select CRYPTO_HMAC + help + RPMB partition simulation device is a virtual device that + provides simulation of the RPMB protocol and use kernel memory + as storage. + + Be aware it doesn't promise any real security. This driver is + suitable only for testing of the RPMB subsystem or RPMB applications + prior to RPMB key provisioning. + Most people should say N here. + +config VIRTIO_RPMB + tristate "Virtio RPMB character device interface /dev/vrpmb" + default n + depends on VIRTIO + select RPMB + help + Say yes here if you want to access virtio RPMB from user space + via character device interface /dev/vrpmb. + This device interface is only for guest/frontend virtio driver. + +source "drivers/char/rpmb/mux/Kconfig" diff --git a/drivers/char/rpmb/Makefile b/drivers/char/rpmb/Makefile new file mode 100644 index 0000000000000..20cf088d24769 --- /dev/null +++ b/drivers/char/rpmb/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_RPMB) += rpmb.o +rpmb-objs += core.o +rpmb-$(CONFIG_RPMB_INTF_DEV) += cdev.o +obj-$(CONFIG_RPMB_SIM) += rpmb_sim.o +obj-$(CONFIG_VIRTIO_RPMB) += virtio_rpmb.o + +ccflags-y += -D__CHECK_ENDIAN__ + +obj-$(CONFIG_RPMB_MUX) += mux/ diff --git a/drivers/char/rpmb/cdev.c b/drivers/char/rpmb/cdev.c new file mode 100644 index 0000000000000..028c7ecd2ac74 --- /dev/null +++ b/drivers/char/rpmb/cdev.c @@ -0,0 +1,308 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Copyright(c) 2015 - 2018 Intel Corporation. All rights reserved. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +#include + +#include "rpmb-cdev.h" + +static dev_t rpmb_devt; +#define RPMB_MAX_DEVS MINORMASK + +#define RPMB_DEV_OPEN 0 /** single open bit (position) */ +/* from MMC_IOC_MAX_CMDS */ +#define RPMB_MAX_FRAMES 255 + +/** + * rpmb_open - the open function + * + * @inode: pointer to inode structure + * @fp: pointer to file structure + * + * Return: 0 on success, <0 on error + */ +static int rpmb_open(struct inode *inode, struct file *fp) +{ + struct rpmb_dev *rdev; + + rdev = container_of(inode->i_cdev, struct rpmb_dev, cdev); + if (!rdev) + return -ENODEV; + + /* the rpmb is single open! */ + if (test_and_set_bit(RPMB_DEV_OPEN, &rdev->status)) + return -EBUSY; + + mutex_lock(&rdev->lock); + + fp->private_data = rdev; + + mutex_unlock(&rdev->lock); + + return nonseekable_open(inode, fp); +} + +/** + * rpmb_release - the cdev release function + * + * @inode: pointer to inode structure + * @fp: pointer to file structure + * + * Return: 0 always. + */ +static int rpmb_release(struct inode *inode, struct file *fp) +{ + struct rpmb_dev *rdev = fp->private_data; + + clear_bit(RPMB_DEV_OPEN, &rdev->status); + + return 0; +} + +static size_t rpmb_ioc_frames_len(struct rpmb_dev *rdev, size_t nframes) +{ + if (rdev->ops->type == RPMB_TYPE_NVME) + return rpmb_ioc_frames_len_nvme(nframes); + else + return rpmb_ioc_frames_len_jdec(nframes); +} + +/** + * rpmb_cmd_copy_from_user - copy rpmb command from the user space + * + * @rdev: rpmb device + * @cmd: internal cmd structure + * @ucmd: user space cmd structure + * + * Return: 0 on success, <0 on error + */ +static int rpmb_cmd_copy_from_user(struct rpmb_dev *rdev, + struct rpmb_cmd *cmd, + struct rpmb_ioc_cmd __user *ucmd) +{ + void *frames; + u64 frames_ptr; + + if (get_user(cmd->flags, &ucmd->flags)) + return -EFAULT; + + if (get_user(cmd->nframes, &ucmd->nframes)) + return -EFAULT; + + if (cmd->nframes > RPMB_MAX_FRAMES) + return -EOVERFLOW; + + /* some archs have issues with 64bit get_user */ + if (copy_from_user(&frames_ptr, &ucmd->frames_ptr, sizeof(frames_ptr))) + return -EFAULT; + + frames = memdup_user(u64_to_user_ptr(frames_ptr), + rpmb_ioc_frames_len(rdev, cmd->nframes)); + if (IS_ERR(frames)) + return PTR_ERR(frames); + + cmd->frames = frames; + return 0; +} + +/** + * rpmb_cmd_copy_to_user - copy rpmb command to the user space + * + * @rdev: rpmb device + * @ucmd: user space cmd structure + * @cmd: internal cmd structure + * + * Return: 0 on success, <0 on error + */ +static int rpmb_cmd_copy_to_user(struct rpmb_dev *rdev, + struct rpmb_ioc_cmd __user *ucmd, + struct rpmb_cmd *cmd) +{ + u64 frames_ptr; + + if (copy_from_user(&frames_ptr, &ucmd->frames_ptr, sizeof(frames_ptr))) + return -EFAULT; + + /* some archs have issues with 64bit get_user */ + if (copy_to_user(u64_to_user_ptr(frames_ptr), cmd->frames, + rpmb_ioc_frames_len(rdev, cmd->nframes))) + return -EFAULT; + + return 0; +} + +/** + * rpmb_ioctl_seq_cmd - issue an rpmb command sequence + * + * @rdev: rpmb device + * @ptr: rpmb cmd sequence + * + * RPMB_IOC_SEQ_CMD handler + * + * Return: 0 on success, <0 on error + */ +static long rpmb_ioctl_seq_cmd(struct rpmb_dev *rdev, + struct rpmb_ioc_seq_cmd __user *ptr) +{ + __u64 ncmds; + struct rpmb_cmd *cmds; + struct rpmb_ioc_cmd __user *ucmds; + + int i; + int ret; + + /* The caller must have CAP_SYS_RAWIO, like mmc ioctl */ + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + + /* some archs have issues with 64bit get_user */ + if (copy_from_user(&ncmds, &ptr->num_of_cmds, sizeof(ncmds))) + return -EFAULT; + + if (ncmds > 3) { + dev_err(&rdev->dev, "supporting up to 3 packets (%llu)\n", + ncmds); + return -EINVAL; + } + + cmds = kcalloc(ncmds, sizeof(*cmds), GFP_KERNEL); + if (!cmds) + return -ENOMEM; + + ucmds = (struct rpmb_ioc_cmd __user *)ptr->cmds; + for (i = 0; i < ncmds; i++) { + ret = rpmb_cmd_copy_from_user(rdev, &cmds[i], &ucmds[i]); + if (ret) + goto out; + } + + ret = rpmb_cmd_seq(rdev, cmds, ncmds); + if (ret) + goto out; + + for (i = 0; i < ncmds; i++) { + ret = rpmb_cmd_copy_to_user(rdev, &ucmds[i], &cmds[i]); + if (ret) + goto out; + } +out: + for (i = 0; i < ncmds; i++) + kfree(cmds[i].frames); + kfree(cmds); + return ret; +} + +static long rpmb_ioctl_ver_cmd(struct rpmb_dev *rdev, + struct rpmb_ioc_ver_cmd __user *ptr) +{ + struct rpmb_ioc_ver_cmd ver = { + .api_version = RPMB_API_VERSION, + }; + + return copy_to_user(ptr, &ver, sizeof(ver)) ? -EFAULT : 0; +} + +static long rpmb_ioctl_cap_cmd(struct rpmb_dev *rdev, + struct rpmb_ioc_cap_cmd __user *ptr) +{ + struct rpmb_ioc_cap_cmd cap; + + cap.device_type = rdev->ops->type; + cap.target = rdev->target; + cap.block_size = rdev->ops->block_size; + cap.wr_cnt_max = rdev->ops->wr_cnt_max; + cap.rd_cnt_max = rdev->ops->rd_cnt_max; + cap.auth_method = rdev->ops->auth_method; + cap.capacity = rpmb_get_capacity(rdev); + cap.reserved = 0; + + return copy_to_user(ptr, &cap, sizeof(cap)) ? -EFAULT : 0; +} + +/** + * rpmb_ioctl - rpmb ioctl dispatcher + * + * @fp: a file pointer + * @cmd: ioctl command RPMB_IOC_SEQ_CMD RPMB_IOC_VER_CMD RPMB_IOC_CAP_CMD + * @arg: ioctl data: rpmb_ioc_ver_cmd rpmb_ioc_cap_cmd pmb_ioc_seq_cmd + * + * Return: 0 on success; < 0 on error + */ +static long rpmb_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) +{ + struct rpmb_dev *rdev = fp->private_data; + void __user *ptr = (void __user *)arg; + + switch (cmd) { + case RPMB_IOC_VER_CMD: + return rpmb_ioctl_ver_cmd(rdev, ptr); + case RPMB_IOC_CAP_CMD: + return rpmb_ioctl_cap_cmd(rdev, ptr); + case RPMB_IOC_SEQ_CMD: + return rpmb_ioctl_seq_cmd(rdev, ptr); + default: + dev_err(&rdev->dev, "unsupported ioctl 0x%x.\n", cmd); + return -ENOIOCTLCMD; + } +} + +#ifdef CONFIG_COMPAT +static long rpmb_compat_ioctl(struct file *fp, unsigned int cmd, + unsigned long arg) +{ + return rpmb_ioctl(fp, cmd, (unsigned long)compat_ptr(arg)); +} +#endif /* CONFIG_COMPAT */ + +static const struct file_operations rpmb_fops = { + .open = rpmb_open, + .release = rpmb_release, + .unlocked_ioctl = rpmb_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = rpmb_compat_ioctl, +#endif + .owner = THIS_MODULE, + .llseek = noop_llseek, +}; + +void rpmb_cdev_prepare(struct rpmb_dev *rdev) +{ + rdev->dev.devt = MKDEV(MAJOR(rpmb_devt), rdev->id); + rdev->cdev.owner = THIS_MODULE; + cdev_init(&rdev->cdev, &rpmb_fops); +} + +void rpmb_cdev_add(struct rpmb_dev *rdev) +{ + cdev_add(&rdev->cdev, rdev->dev.devt, 1); +} + +void rpmb_cdev_del(struct rpmb_dev *rdev) +{ + if (rdev->dev.devt) + cdev_del(&rdev->cdev); +} + +int __init rpmb_cdev_init(void) +{ + int ret; + + ret = alloc_chrdev_region(&rpmb_devt, 0, RPMB_MAX_DEVS, "rpmb"); + if (ret < 0) + pr_err("unable to allocate char dev region\n"); + + return ret; +} + +void __exit rpmb_cdev_exit(void) +{ + unregister_chrdev_region(rpmb_devt, RPMB_MAX_DEVS); +} diff --git a/drivers/char/rpmb/core.c b/drivers/char/rpmb/core.c new file mode 100644 index 0000000000000..e02c12b8046ca --- /dev/null +++ b/drivers/char/rpmb/core.c @@ -0,0 +1,457 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Copyright(c) 2015 - 2018 Intel Corporation. All rights reserved. + */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include "rpmb-cdev.h" + +static DEFINE_IDA(rpmb_ida); + +/** + * rpmb_dev_get - increase rpmb device ref counter + * + * @rdev: rpmb device + */ +struct rpmb_dev *rpmb_dev_get(struct rpmb_dev *rdev) +{ + return get_device(&rdev->dev) ? rdev : NULL; +} +EXPORT_SYMBOL_GPL(rpmb_dev_get); + +/** + * rpmb_dev_put - decrease rpmb device ref counter + * + * @rdev: rpmb device + */ +void rpmb_dev_put(struct rpmb_dev *rdev) +{ + put_device(&rdev->dev); +} +EXPORT_SYMBOL_GPL(rpmb_dev_put); + +/** + * rpmb_cmd_fixup - fixup rpmb command + * + * @rdev: rpmb device + * @cmds: rpmb command list + * @ncmds: number of commands + * + */ +static void rpmb_cmd_fixup(struct rpmb_dev *rdev, + struct rpmb_cmd *cmds, u32 ncmds) +{ + int i; + + if (RPMB_TYPE_HW(rdev->ops->type) != RPMB_TYPE_EMMC) + return; + + /* Fixup RPMB_READ_DATA specific to eMMC + * The block count of the RPMB read operation is not indicated + * in the original RPMB Data Read Request packet. + * This is different then implementation for other protocol + * standards. + */ + for (i = 0; i < ncmds; i++) { + struct rpmb_frame_jdec *frame = cmds[i].frames; + + if (frame->req_resp == cpu_to_be16(RPMB_READ_DATA)) { + dev_dbg(&rdev->dev, "Fixing up READ_DATA frame to block_count=0\n"); + frame->block_count = 0; + } + } +} + +/** + * rpmb_cmd_seq - send RPMB command sequence + * + * @rdev: rpmb device + * @cmds: rpmb command list + * @ncmds: number of commands + * + * Return: 0 on success + * -EINVAL on wrong parameters + * -EOPNOTSUPP if device doesn't support the requested operation + * < 0 if the operation fails + */ +int rpmb_cmd_seq(struct rpmb_dev *rdev, struct rpmb_cmd *cmds, u32 ncmds) +{ + int err; + + if (!rdev || !cmds || !ncmds) + return -EINVAL; + + mutex_lock(&rdev->lock); + err = -EOPNOTSUPP; + if (rdev->ops && rdev->ops->cmd_seq) { + rpmb_cmd_fixup(rdev, cmds, ncmds); + err = rdev->ops->cmd_seq(rdev->dev.parent, rdev->target, + cmds, ncmds); + } + mutex_unlock(&rdev->lock); + + return err; +} +EXPORT_SYMBOL_GPL(rpmb_cmd_seq); + +int rpmb_get_capacity(struct rpmb_dev *rdev) +{ + int err; + + if (!rdev) + return -EINVAL; + + mutex_lock(&rdev->lock); + err = -EOPNOTSUPP; + if (rdev->ops && rdev->ops->get_capacity) + err = rdev->ops->get_capacity(rdev->dev.parent, rdev->target); + mutex_unlock(&rdev->lock); + + return err; +} +EXPORT_SYMBOL_GPL(rpmb_get_capacity); + +static void rpmb_dev_release(struct device *dev) +{ + struct rpmb_dev *rdev = to_rpmb_dev(dev); + + ida_simple_remove(&rpmb_ida, rdev->id); + kfree(rdev); +} + +struct class rpmb_class = { + .name = "rpmb", + .owner = THIS_MODULE, + .dev_release = rpmb_dev_release, +}; +EXPORT_SYMBOL(rpmb_class); + +/** + * rpmb_dev_find_device - return first matching rpmb device + * + * @data: data for the match function + * @match: the matching function + * + * Return: matching rpmb device or NULL on failure + */ +static +struct rpmb_dev *rpmb_dev_find_device(const void *data, + int (*match)(struct device *dev, + const void *data)) +{ + struct device *dev; + + dev = class_find_device(&rpmb_class, NULL, data, match); + + return dev ? to_rpmb_dev(dev) : NULL; +} + +static int match_by_type(struct device *dev, const void *data) +{ + struct rpmb_dev *rdev = to_rpmb_dev(dev); + const u32 *type = data; + + return (*type == RPMB_TYPE_ANY || rdev->ops->type == *type); +} + +/** + * rpmb_dev_get_by_type - return first registered rpmb device + * with matching type. + * If run with RPMB_TYPE_ANY the first an probably only + * device is returned + * + * @type: rpbm underlying device type + * + * Return: matching rpmb device or NULL/ERR_PTR on failure + */ +struct rpmb_dev *rpmb_dev_get_by_type(u32 type) +{ + if (type > RPMB_TYPE_MAX) + return ERR_PTR(-EINVAL); + + return rpmb_dev_find_device(&type, match_by_type); +} +EXPORT_SYMBOL_GPL(rpmb_dev_get_by_type); + +struct device_with_target { + const struct device *dev; + u8 target; +}; + +static int match_by_parent(struct device *dev, const void *data) +{ + const struct device_with_target *d = data; + struct rpmb_dev *rdev = to_rpmb_dev(dev); + + return (d->dev && dev->parent == d->dev && rdev->target == d->target); +} + +/** + * rpmb_dev_find_by_device - retrieve rpmb device from the parent device + * + * @parent: parent device of the rpmb device + * @target: RPMB target/region within the physical device + * + * Return: NULL if there is no rpmb device associated with the parent device + */ +struct rpmb_dev *rpmb_dev_find_by_device(struct device *parent, u8 target) +{ + struct device_with_target t; + + if (!parent) + return NULL; + + t.dev = parent; + t.target = target; + + return rpmb_dev_find_device(&t, match_by_parent); +} +EXPORT_SYMBOL_GPL(rpmb_dev_find_by_device); + +static ssize_t type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rpmb_dev *rdev = to_rpmb_dev(dev); + const char *sim; + ssize_t ret; + + sim = (rdev->ops->type & RPMB_TYPE_SIM) ? ":SIM" : ""; + switch (RPMB_TYPE_HW(rdev->ops->type)) { + case RPMB_TYPE_EMMC: + ret = sprintf(buf, "EMMC%s\n", sim); + break; + case RPMB_TYPE_UFS: + ret = sprintf(buf, "UFS%s\n", sim); + break; + case RPMB_TYPE_NVME: + ret = sprintf(buf, "NVMe%s\n", sim); + break; + default: + ret = sprintf(buf, "UNKNOWN\n"); + break; + } + + return ret; +} +static DEVICE_ATTR_RO(type); + +static ssize_t id_read(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t off, size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct rpmb_dev *rdev = to_rpmb_dev(dev); + size_t sz = min_t(size_t, rdev->ops->dev_id_len, PAGE_SIZE); + + if (!rdev->ops->dev_id) + return 0; + + return memory_read_from_buffer(buf, count, &off, rdev->ops->dev_id, sz); +} +static BIN_ATTR_RO(id, 0); + +static ssize_t wr_cnt_max_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rpmb_dev *rdev = to_rpmb_dev(dev); + + return sprintf(buf, "%u\n", rdev->ops->wr_cnt_max); +} +static DEVICE_ATTR_RO(wr_cnt_max); + +static ssize_t rd_cnt_max_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rpmb_dev *rdev = to_rpmb_dev(dev); + + return sprintf(buf, "%u\n", rdev->ops->rd_cnt_max); +} +static DEVICE_ATTR_RO(rd_cnt_max); + +static struct attribute *rpmb_attrs[] = { + &dev_attr_type.attr, + &dev_attr_wr_cnt_max.attr, + &dev_attr_rd_cnt_max.attr, + NULL, +}; + +static struct bin_attribute *rpmb_bin_attributes[] = { + &bin_attr_id, + NULL, +}; + +static struct attribute_group rpmb_attr_group = { + .attrs = rpmb_attrs, + .bin_attrs = rpmb_bin_attributes, +}; + +static const struct attribute_group *rpmb_attr_groups[] = { + &rpmb_attr_group, + NULL +}; + +/** + * rpmb_dev_unregister - unregister RPMB partition from the RPMB subsystem + * + * @rdev: the rpmb device to unregister + */ +int rpmb_dev_unregister(struct rpmb_dev *rdev) +{ + if (!rdev) + return -EINVAL; + + mutex_lock(&rdev->lock); + rpmb_cdev_del(rdev); + device_del(&rdev->dev); + mutex_unlock(&rdev->lock); + + rpmb_dev_put(rdev); + + return 0; +} +EXPORT_SYMBOL_GPL(rpmb_dev_unregister); + +/** + * rpmb_dev_unregister_by_device - unregister RPMB partition + * from the RPMB subsystem + * + * @dev: the parent device of the rpmb device + * @target: RPMB target/region within the physical device + */ +int rpmb_dev_unregister_by_device(struct device *dev, u8 target) +{ + struct rpmb_dev *rdev; + + if (!dev) + return -EINVAL; + + rdev = rpmb_dev_find_by_device(dev, target); + if (!rdev) { + dev_warn(dev, "no disk found %s\n", dev_name(dev->parent)); + return -ENODEV; + } + + rpmb_dev_put(rdev); + + return rpmb_dev_unregister(rdev); +} +EXPORT_SYMBOL_GPL(rpmb_dev_unregister_by_device); + +/** + * rpmb_dev_get_drvdata - driver data getter + * + * @rdev: rpmb device + * + * Return: driver private data + */ +void *rpmb_dev_get_drvdata(const struct rpmb_dev *rdev) +{ + return dev_get_drvdata(&rdev->dev); +} +EXPORT_SYMBOL_GPL(rpmb_dev_get_drvdata); + +/** + * rpmb_dev_set_drvdata - driver data setter + * + * @rdev: rpmb device + * @data: data to store + */ +void rpmb_dev_set_drvdata(struct rpmb_dev *rdev, void *data) +{ + dev_set_drvdata(&rdev->dev, data); +} +EXPORT_SYMBOL_GPL(rpmb_dev_set_drvdata); + +/** + * rpmb_dev_register - register RPMB partition with the RPMB subsystem + * + * @dev: storage device of the rpmb device + * @target: RPMB target/region within the physical device + * @ops: device specific operations + */ +struct rpmb_dev *rpmb_dev_register(struct device *dev, u8 target, + const struct rpmb_ops *ops) +{ + struct rpmb_dev *rdev; + int id; + int ret; + + if (!dev || !ops) + return ERR_PTR(-EINVAL); + + if (!ops->cmd_seq) + return ERR_PTR(-EINVAL); + + if (!ops->get_capacity) + return ERR_PTR(-EINVAL); + + if (ops->type == RPMB_TYPE_ANY || ops->type > RPMB_TYPE_MAX) + return ERR_PTR(-EINVAL); + + rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); + if (!rdev) + return ERR_PTR(-ENOMEM); + + id = ida_simple_get(&rpmb_ida, 0, 0, GFP_KERNEL); + if (id < 0) { + ret = id; + goto exit; + } + + mutex_init(&rdev->lock); + rdev->ops = ops; + rdev->id = id; + rdev->target = target; + + dev_set_name(&rdev->dev, "rpmb%d", id); + rdev->dev.class = &rpmb_class; + rdev->dev.parent = dev; + rdev->dev.groups = rpmb_attr_groups; + + rpmb_cdev_prepare(rdev); + + ret = device_register(&rdev->dev); + if (ret) + goto exit; + + rpmb_cdev_add(rdev); + + dev_dbg(&rdev->dev, "registered device\n"); + + return rdev; + +exit: + if (id >= 0) + ida_simple_remove(&rpmb_ida, id); + kfree(rdev); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(rpmb_dev_register); + +static int __init rpmb_init(void) +{ + ida_init(&rpmb_ida); + class_register(&rpmb_class); + return rpmb_cdev_init(); +} + +static void __exit rpmb_exit(void) +{ + rpmb_cdev_exit(); + class_unregister(&rpmb_class); + ida_destroy(&rpmb_ida); +} + +subsys_initcall(rpmb_init); +module_exit(rpmb_exit); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("RPMB class"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/char/rpmb/mux/Kconfig b/drivers/char/rpmb/mux/Kconfig new file mode 100644 index 0000000000000..8087f9b797f84 --- /dev/null +++ b/drivers/char/rpmb/mux/Kconfig @@ -0,0 +1,14 @@ +config RPMB_MUX + tristate "RPMB Mux kernel module interface /dev/rpmbmux" + default n + select RPMB + select CRYPTO_SHA256 + select CRYPTO_HMAC + help + Say yes here if you want to access RPMB from user space + via character device interface /dev/rpmbmux, which is acted + as a multiplexor for RPMB by calling RPMB native driver directly. + + It owns RPMB authentication key internally for RPMB + virtualization usage.The users who don't own RPMB key + in such a RPMB virtualization use case could enable it. diff --git a/drivers/char/rpmb/mux/Makefile b/drivers/char/rpmb/mux/Makefile new file mode 100644 index 0000000000000..94999dd468db9 --- /dev/null +++ b/drivers/char/rpmb/mux/Makefile @@ -0,0 +1,8 @@ +obj-$(CONFIG_RPMB_MUX) += rpmb_mux.o +rpmb_mux-objs := mux.o +rpmb_mux-objs += mux_hkdf.o +rpmb_mux-objs += key.o +rpmb_mux-objs += key_abl.o +rpmb_mux-objs += key_sbl.o + +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/char/rpmb/mux/key.c b/drivers/char/rpmb/mux/key.c new file mode 100644 index 0000000000000..e9df04765ad77 --- /dev/null +++ b/drivers/char/rpmb/mux/key.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * RPMB Key management: key retrieval + * + * Copyright (c) 2018 Intel Corporation. All rights reserved. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ + +#include +#include +#include + +#include "key.h" +#include "key_sbl.h" +#include "key_abl.h" + +static ulong sbl_params_addr; +static ulong abl_params_addr; + +static int __init get_sbl_params_addr(char *str) +{ + if (kstrtoul(str, 16, &sbl_params_addr)) { + pr_err("Failed to parse ImageBootParamsAddr\n"); + return -EINVAL; + } + + return 0; +} +__setup("ImageBootParamsAddr=", get_sbl_params_addr); + +static int __init get_abl_params_addr(char *str) +{ + if (kstrtoul(str, 16, &abl_params_addr)) { + pr_err("Failed to parse dev_sec_info.param\n"); + return -EINVAL; + } + + return 0; +} +__setup("dev_sec_info.param_addr=", get_abl_params_addr); + +int rpmb_key_get(const u8 *dev_id, size_t dev_id_len, + size_t max_partition_num, u8 rpmb_key[][RPMB_KEY_LENGTH]) +{ + int ret = -1; + + if (sbl_params_addr) + ret = rpmb_key_sbl_get(sbl_params_addr, max_partition_num, + rpmb_key); + else if (abl_params_addr) + ret = rpmb_key_abl_get(abl_params_addr, dev_id, dev_id_len, + max_partition_num, rpmb_key); + else + pr_err("Failed to get boot_params from the command line!\n"); + + return ret; +} diff --git a/drivers/char/rpmb/mux/key.h b/drivers/char/rpmb/mux/key.h new file mode 100644 index 0000000000000..8c17fbcfc8207 --- /dev/null +++ b/drivers/char/rpmb/mux/key.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* + * RPMB Key management: retrieve and distribute + * + * Copyright (c) 2018 Intel Corporation. All rights reserved. + */ + +#ifndef __RPMB_KEY_H__ +#define __RPMB_KEY_H__ + +/* + * Storage may support multiple rpmb partitions, but the specification + * does not specify the max number of rpmb partitions. + * Here we use 6 for now. In future, this may need to be expanded + * dynamically. + */ +#define RPMB_MAX_PARTITION_NUMBER 6U + +#define RPMB_KEY_LENGTH 64U + +int rpmb_key_get(const u8 *dev_id, size_t dev_id_len, + size_t max_partition_num, u8 rpmb_key[][RPMB_KEY_LENGTH]); + +#endif /* !__RPMB_KEY_H__ */ diff --git a/drivers/char/rpmb/mux/key_abl.c b/drivers/char/rpmb/mux/key_abl.c new file mode 100644 index 0000000000000..bcb7355622180 --- /dev/null +++ b/drivers/char/rpmb/mux/key_abl.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Parse legacy seed from ABL(Automotive Bootloader). Derive a rpmb key + * with the legacy seed. + * + * Copyright (c) 2018 Intel Corporation. All rights reserved. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include + +#include "key.h" +#include "key_abl.h" +#include "mux_hkdf.h" + +#define ABL_SEED_LEN 32U +#define ABL_SEED_LIST_MAX 4U +#define EMMC_SERIAL_LEN 15U + +struct abl_seed_info { + u8 svn; + u8 reserved[3]; + u8 seed[ABL_SEED_LEN]; +}; + +struct dev_sec_info { + u32 size_of_this_struct; + u32 version; + u32 num_seeds; + struct abl_seed_info seed_list[ABL_SEED_LIST_MAX]; +}; + +/* + * The output serial is concatenation of mmc product name with a string + * representation of PSN. + */ +static int rpmb_key_abl_build_serial(const u8 *cid, u8 *serial) +{ + u32 psn; + + if (!cid || !serial) + return -EFAULT; + + psn = (cid[9] << 24) | (cid[8] << 16) | (cid[15] << 8) | cid[14]; + + serial[0] = cid[0]; + serial[1] = cid[7]; + serial[2] = cid[6]; + serial[3] = cid[5]; + serial[4] = cid[4]; + serial[5] = cid[11]; + + snprintf(&serial[6], 9, "%08x", psn); + + return 0; +} + +int rpmb_key_abl_get(ulong params_addr, const u8 *dev_id, size_t dev_id_len, + size_t max_partition_num, u8 rpmb_key[][RPMB_KEY_LENGTH]) +{ + u32 i, legacy_seed_index = 0; + struct dev_sec_info *sec_info; + struct abl_seed_info *seed_list; + u8 serial[EMMC_SERIAL_LEN] = {0}; + int ret; + + if (!params_addr || !dev_id || !dev_id_len || !max_partition_num) { + pr_err("Invalid input params!\n"); + return -EFAULT; + } + + ret = rpmb_key_abl_build_serial(dev_id, serial); + if (ret) { + pr_err("Failed to build serial from cid\n"); + return -EFAULT; + } + + sec_info = memremap(params_addr, sizeof(*sec_info), MEMREMAP_WB); + if (!sec_info) { + pr_err("Remap params_addr failed!\n"); + return -EFAULT; + } + seed_list = &sec_info->seed_list[0]; + + /* + * The seed_list must contain at least 2 seeds: 1 is legacy + * seed and others are SVN based seed. + */ + if (sec_info->num_seeds < 2U || + sec_info->num_seeds > ABL_SEED_LIST_MAX) { + pr_err("Invalid seed number!\n"); + memunmap(sec_info); + return -EFAULT; + } + + /* + * The seed_list from ABL contains several seeds which based on SVN + * and one legacy seed which is not based on SVN. The legacy seed's + * svn value is minimum in the seed list. And CSE ensures at least two + * seeds will be generated which will contain the legacy seed. + * Here find the legacy seed index first. + */ + for (i = 1; i < sec_info->num_seeds; i++) { + if (seed_list[i].svn < seed_list[legacy_seed_index].svn) + legacy_seed_index = i; + } + + /* + * The eMMC Field Firmware Update would impact below fields of + * CID(Card Identification): + * CID[6]:PRV (Product Revision) + * CID[0]:CRC (CRC7 checksum) + * Mapping relation between CID and eMMC serial: + * serial[0] = CID[0] + * serial[2] = CID[6] + * So mask off serial[0]/serial[2] fields when using eMMC serial + * to derive rpmb key. + */ + serial[0] ^= serial[0]; + serial[2] ^= serial[2]; + + /* + * Derive RPMB key from legacy seed with storage serial number. + * Currently, only support eMMC storage device, UFS storage device is + * not supported. + */ + ret = mux_hkdf_sha256(&rpmb_key[0][0], SHA256_HASH_SIZE, + (const u8 *)&seed_list[legacy_seed_index].seed[0], + ABL_SEED_LEN, + NULL, 0, + (const u8 *)serial, sizeof(serial)); + + memset(&seed_list[legacy_seed_index], 0, sizeof(struct abl_seed_info)); + memunmap(sec_info); + + return ret; +} diff --git a/drivers/char/rpmb/mux/key_abl.h b/drivers/char/rpmb/mux/key_abl.h new file mode 100644 index 0000000000000..0d2a09abde608 --- /dev/null +++ b/drivers/char/rpmb/mux/key_abl.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ + +#ifndef __RPMB_KEY_ABL__ +#define __RPMB_KEY_ABL__ + +int rpmb_key_abl_get(ulong params_addr, const u8 *dev_id, size_t dev_id_len, + size_t max_partition_num, u8 rpmb_key[][RPMB_KEY_LENGTH]); + +#endif /* !__RPMB_KEY_ABL__ */ diff --git a/drivers/char/rpmb/mux/key_sbl.c b/drivers/char/rpmb/mux/key_sbl.c new file mode 100644 index 0000000000000..8a238ac7f5ae5 --- /dev/null +++ b/drivers/char/rpmb/mux/key_sbl.c @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Parse RPMB key from SBL(SlimBootloader). + * + * Copyright (c) 2018 Intel Corporation. All rights reserved. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ + +#include +#include +#include +#include +#include + +#include "key.h" +#include "key_sbl.h" + +#define SEED_ENTRY_TYPE_SVNSEED 0x1U +#define SEED_ENTRY_TYPE_RPMBSEED 0x2U + +#define SEED_ENTRY_USAGE_BASE_ON_SERIAL 0x1U +#define SEED_ENTRY_USAGE_NOT_BASE_ON_SERIAL 0x2U + +struct image_boot_params { + u32 size_of_this_struct; + u32 version; + u64 p_seed_list; + u64 p_platform_info; + u64 reserved; +}; + +struct seed_entry { + /* SVN based seed or RPMB seed or attestation key_box */ + u8 type; + /* For SVN seed: useed or dseed + * For RPMB seed: serial number based or not + */ + u8 usage; + /* index for the same type and usage seed */ + u8 index; + u8 reserved; + /* reserved for future use */ + u16 flags; + /* Total size of this seed entry */ + u16 seed_entry_size; + /* SVN seed: struct seed_info + * RPMB seed: u8 rpmb_seed[key_len] + */ + u8 seed[0]; +}; + +struct seed_list_hob { + u8 revision; + u8 rsvd0[3]; + u32 buffer_size; + u8 total_seed_count; + u8 rsvd1[3]; + struct seed_entry entry[0]; +}; + +static int rpmb_key_sbl_parse_seed_list(struct seed_list_hob *seed_hob, + size_t max_partition_num, + u8 rpmb_seed[][RPMB_KEY_LENGTH]) +{ + u8 i; + u8 index = 0U; + struct seed_entry *entry; + + if (!seed_hob || !max_partition_num) { + pr_warn("Invalid input parameters!\n"); + goto fail; + } + + if (seed_hob->total_seed_count == 0U) { + pr_warn("Total seed count is 0.\n"); + goto fail; + } + + entry = seed_hob->entry; + + for (i = 0U; i < seed_hob->total_seed_count; i++) { + if ((u8 *)entry >= (u8 *)seed_hob + seed_hob->buffer_size) { + pr_warn("Exceed memory boundray!\n"); + goto fail; + } + + /* retrieve rpmb seed */ + if (entry->type == SEED_ENTRY_TYPE_RPMBSEED) { + if (entry->index != 0) { + pr_warn("RPMB usage mismatch!\n"); + goto fail; + } + + /* The seed_entry with same type/usage are always + * arranged by index in order of 0~3. + */ + if (entry->index != index) { + pr_warn("Index mismatch.\n"); + goto fail; + } + + if (entry->index > max_partition_num) { + pr_warn("Index exceed max number!\n"); + goto fail; + } + + memcpy(&rpmb_seed[index], entry->seed, RPMB_KEY_LENGTH); + index++; + + /* erase original seed in seed entry */ + memset(entry->seed, 0U, RPMB_KEY_LENGTH); + } + + entry = (struct seed_entry *)((u8 *)entry + + entry->seed_entry_size); + } + + return 0; + +fail: + return -EFAULT; +} + +int rpmb_key_sbl_get(ulong params_addr, size_t max_partition_num, + u8 rpmb_key[][RPMB_KEY_LENGTH]) +{ + struct image_boot_params *boot_params = NULL; + struct seed_list_hob *seed_list = NULL; + u32 remap_buffer_size = 0; + + if (!params_addr || !max_partition_num) { + pr_err("Invalid input params!\n"); + goto fail; + } + + boot_params = memremap(params_addr, sizeof(*boot_params), MEMREMAP_WB); + if (!boot_params) { + pr_err("Remap params_addr failed!\n"); + goto fail; + } + + seed_list = memremap(boot_params->p_seed_list, + sizeof(*seed_list), MEMREMAP_WB); + if (!seed_list) { + pr_err("Remap seed_list failed!\n"); + goto fail; + } + + remap_buffer_size = seed_list->buffer_size; + memunmap(seed_list); + + /* Remap with actual buffer size */ + seed_list = memremap(boot_params->p_seed_list, + remap_buffer_size, MEMREMAP_WB); + + return rpmb_key_sbl_parse_seed_list(seed_list, max_partition_num, + rpmb_key); + +fail: + if (seed_list) + memunmap(seed_list); + if (boot_params) + memunmap(boot_params); + return -EFAULT; +} diff --git a/drivers/char/rpmb/mux/key_sbl.h b/drivers/char/rpmb/mux/key_sbl.h new file mode 100644 index 0000000000000..0483c176012cc --- /dev/null +++ b/drivers/char/rpmb/mux/key_sbl.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ + +#ifndef __RPMB_KEY_SBL__ +#define __RPMB_KEY_SBL__ + +int rpmb_key_sbl_get(ulong params_addr, size_t max_partition_num, + u8 rpmb_key[][RPMB_KEY_LENGTH]); + +#endif /* __RPMB_KEY_SBL__ */ diff --git a/drivers/char/rpmb/mux/mux.c b/drivers/char/rpmb/mux/mux.c new file mode 100644 index 0000000000000..c7caa04069069 --- /dev/null +++ b/drivers/char/rpmb/mux/mux.c @@ -0,0 +1,754 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * RPMB Mux Kernel Module Driver + * + * Copyright (c) 2018 Intel Corporation. All rights reserved. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "key.h" + +/** + * struct rpmb_mux_dev - device which can support RPMB partition + * @lock : the device lock + * @rdev : point to the rpmb device + * @cdev : character dev + * @rpmb_interface : rpmb class interface + * @write_counter : write counter of RPMB + * @wc_inited : write counter is initialized + * @rpmb_key : RPMB authentication key + * @hash_desc : hmac(sha256) shash descriptor + */ +struct rpmb_mux_dev { + struct mutex lock; /* device serialization lock */ + struct rpmb_dev *rdev; + struct cdev cdev; + struct class_interface rpmb_interface; + + u32 write_counter; + u32 wc_inited; + u8 rpmb_key[32]; + struct shash_desc *hash_desc; +}; + +static dev_t rpmb_mux_devt; +static struct rpmb_mux_dev *__mux_dev; +static struct class *rpmb_mux_class; +/* from MMC_IOC_MAX_CMDS */ +#define RPMB_MAX_FRAMES 255 + +static int rpmb_mux_open(struct inode *inode, struct file *fp) +{ + struct rpmb_mux_dev *mux_dev; + + mux_dev = container_of(inode->i_cdev, struct rpmb_mux_dev, cdev); + if (!mux_dev) + return -ENODEV; + + mutex_lock(&mux_dev->lock); + + fp->private_data = mux_dev; + + mutex_unlock(&mux_dev->lock); + + return nonseekable_open(inode, fp); +} + +static int rpmb_mux_release(struct inode *inode, struct file *fp) +{ + return 0; +} + +static int rpmb_mux_hmac_256_alloc(struct rpmb_mux_dev *mux_dev) +{ + struct shash_desc *desc; + struct crypto_shash *tfm; + + tfm = crypto_alloc_shash("hmac(sha256)", 0, 0); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL); + if (!desc) { + crypto_free_shash(tfm); + return -ENOMEM; + } + + desc->tfm = tfm; + mux_dev->hash_desc = desc; + + return 0; +} + +static void rpmb_mux_hmac_256_free(struct rpmb_mux_dev *mux_dev) +{ + struct shash_desc *desc = mux_dev->hash_desc; + + crypto_free_shash(desc->tfm); + kfree(desc); + + mux_dev->hash_desc = NULL; +} + +static int rpmb_mux_calc_hmac(struct rpmb_mux_dev *mux_dev, + struct rpmb_frame_jdec *frames, + unsigned int blks, u8 *mac) +{ + struct shash_desc *desc = mux_dev->hash_desc; + int ret; + unsigned int i; + + ret = crypto_shash_init(desc); + if (ret) + return ret; + + for (i = 0; i < blks; i++) { + ret = crypto_shash_update(desc, frames[i].data, + rpmb_jdec_hmac_data_len); + if (ret) + return ret; + } + + ret = crypto_shash_final(desc, mac); + + return ret; +} + +static int rpmb_program_key(struct rpmb_mux_dev *mux_dev) +{ + struct rpmb_frame_jdec *frame_write, *frame_rel, *frame_out; + struct rpmb_cmd *cmds; + int ret; + + frame_write = kzalloc(sizeof(*frame_write), GFP_KERNEL); + frame_rel = kzalloc(sizeof(*frame_rel), GFP_KERNEL); + frame_out = kzalloc(sizeof(*frame_out), GFP_KERNEL); + cmds = kcalloc(3, sizeof(*cmds), GFP_KERNEL); + if (!frame_write || !frame_rel || !frame_out || !cmds) { + ret = -ENOMEM; + goto out; + } + + /* fill rel write frame */ + memcpy(frame_rel->key_mac, mux_dev->rpmb_key, + sizeof(mux_dev->rpmb_key)); + frame_rel->req_resp = cpu_to_be16(RPMB_PROGRAM_KEY); + + /* fill write frame */ + frame_write->req_resp = cpu_to_be16(RPMB_RESULT_READ); + + /* fill io cmd */ + cmds[0].flags = RPMB_F_WRITE | RPMB_F_REL_WRITE; + cmds[0].nframes = 1; + cmds[0].frames = frame_rel; + cmds[1].flags = RPMB_F_WRITE; + cmds[1].nframes = 1; + cmds[1].frames = frame_write; + cmds[2].flags = 0; + cmds[2].nframes = 1; + cmds[2].frames = frame_out; + + ret = rpmb_cmd_seq(mux_dev->rdev, cmds, 3); + if (ret) + goto out; + + if (be16_to_cpu(frame_out->result) != RPMB_ERR_OK) { + ret = -EPERM; + dev_err(&mux_dev->rdev->dev, "rpmb program key failed(0x%X).\n", + be16_to_cpu(frame_out->result)); + } + +out: + kfree(frame_write); + kfree(frame_rel); + kfree(frame_out); + kfree(cmds); + + return ret; +} + +static int rpmb_get_counter(struct rpmb_mux_dev *mux_dev) +{ + struct rpmb_frame_jdec *in_frame, *out_frame; + struct rpmb_cmd *cmds; + int ret; + u8 mac[32]; + + in_frame = kzalloc(sizeof(*in_frame), GFP_KERNEL); + out_frame = kzalloc(sizeof(*out_frame), GFP_KERNEL); + cmds = kcalloc(2, sizeof(*cmds), GFP_KERNEL); + if (!in_frame || !out_frame || !cmds) { + ret = -ENOMEM; + goto out; + } + + in_frame->req_resp = cpu_to_be16(RPMB_GET_WRITE_COUNTER); + cmds[0].flags = RPMB_F_WRITE; + cmds[0].nframes = 1; + cmds[0].frames = in_frame; + cmds[1].flags = 0; + cmds[1].nframes = 1; + cmds[1].frames = out_frame; + + ret = rpmb_cmd_seq(mux_dev->rdev, cmds, 2); + if (ret) + goto out; + + ret = rpmb_mux_calc_hmac(mux_dev, out_frame, 1, mac); + if (ret) { + dev_err(&mux_dev->rdev->dev, "MAC calculation failed for read counter\n"); + goto out; + } + + if (memcmp(mac, out_frame->key_mac, sizeof(mac))) { + ret = -EPERM; + dev_err(&mux_dev->rdev->dev, "MAC check failed for read counter\n"); + goto out; + } + + if (be16_to_cpu(out_frame->result) == RPMB_ERR_NO_KEY) { + dev_dbg(&mux_dev->rdev->dev, "Start to program key...\n"); + ret = rpmb_program_key(mux_dev); + if (ret) + goto out; + } else if (be16_to_cpu(out_frame->result) != RPMB_ERR_OK) { + ret = -EPERM; + dev_err(&mux_dev->rdev->dev, "get rpmb counter failed(0x%X).\n", + be16_to_cpu(out_frame->result)); + goto out; + } + + mux_dev->write_counter = be32_to_cpu(out_frame->write_counter); + +out: + kfree(in_frame); + kfree(out_frame); + kfree(cmds); + + return ret; +} + +static size_t rpmb_ioc_frames_len(struct rpmb_dev *rdev, size_t nframes) +{ + return rpmb_ioc_frames_len_jdec(nframes); +} + +/** + * rpmb_mux_copy_from_user - copy rpmb command from the user space + * + * @rdev: rpmb device + * @cmd: internal cmd structure + * @ucmd: user space cmd structure + * + * Return: 0 on success, <0 on error + */ +static int rpmb_mux_copy_from_user(struct rpmb_dev *rdev, + struct rpmb_cmd *cmd, + struct rpmb_ioc_cmd __user *ucmd) +{ + void *frames; + u64 frames_ptr; + + if (get_user(cmd->flags, &ucmd->flags)) + return -EFAULT; + + if (get_user(cmd->nframes, &ucmd->nframes)) + return -EFAULT; + + if (cmd->nframes > RPMB_MAX_FRAMES) + return -EOVERFLOW; + + /* some archs have issues with 64bit get_user */ + if (copy_from_user(&frames_ptr, &ucmd->frames_ptr, sizeof(frames_ptr))) + return -EFAULT; + + frames = memdup_user(u64_to_user_ptr(frames_ptr), + rpmb_ioc_frames_len(rdev, cmd->nframes)); + if (IS_ERR(frames)) + return PTR_ERR(frames); + + cmd->frames = frames; + return 0; +} + +/** + * rpmb_mux_copy_to_user - copy rpmb command to the user space + * + * @rdev: rpmb device + * @ucmd: user space cmd structure + * @cmd: internal cmd structure + * + * Return: 0 on success, <0 on error + */ +static int rpmb_mux_copy_to_user(struct rpmb_dev *rdev, + struct rpmb_ioc_cmd __user *ucmd, + struct rpmb_cmd *cmd) +{ + u64 frames_ptr; + + if (copy_from_user(&frames_ptr, &ucmd->frames_ptr, sizeof(frames_ptr))) + return -EFAULT; + + /* some archs have issues with 64bit get_user */ + if (copy_to_user(u64_to_user_ptr(frames_ptr), cmd->frames, + rpmb_ioc_frames_len(rdev, cmd->nframes))) + return -EFAULT; + + return 0; +} + +static int rpmb_replace_write_frame(struct rpmb_mux_dev *mux_dev, + struct rpmb_cmd *cmds, u32 ncmd) +{ + u32 i; + u32 frame_cnt; + __be32 write_counter; + struct rpmb_frame_jdec *in_frames = cmds[0].frames; + + if (in_frames->req_resp != cpu_to_be16(RPMB_WRITE_DATA)) { + dev_err(&mux_dev->rdev->dev, "rpmb ioctl frame is unsupported(0x%X).\n", + in_frames->req_resp); + return -EINVAL; + } + + frame_cnt = cmds[0].nframes; + write_counter = cpu_to_be32(mux_dev->write_counter); + for (i = 0; i < frame_cnt; i++) + in_frames[i].write_counter = write_counter; + + if (rpmb_mux_calc_hmac(mux_dev, in_frames, frame_cnt, + in_frames[frame_cnt - 1].key_mac)) { + dev_err(&mux_dev->rdev->dev, "MAC calculation failed for rpmb write\n"); + return -ERANGE; + } + + return 0; +} + +static int rpmb_check_mac(struct rpmb_mux_dev *mux_dev, struct rpmb_cmd *cmds) +{ + u32 frame_cnt; + u8 mac[32]; + struct rpmb_frame_jdec *in_frames = cmds[0].frames; + + frame_cnt = cmds[0].nframes; + + if (rpmb_mux_calc_hmac(mux_dev, in_frames, frame_cnt, mac)) { + dev_err(&mux_dev->rdev->dev, "MAC calculation failed for rpmb write\n"); + return -ERANGE; + } + + if (memcmp(mac, in_frames[frame_cnt - 1].key_mac, sizeof(mac))) { + dev_err(&mux_dev->rdev->dev, "MAC check failed for write data\n"); + return -EPERM; + } + + return 0; +} + +static int rpmb_check_result(struct rpmb_mux_dev *mux_dev, + struct rpmb_cmd *cmds, u32 ncmd) +{ + struct rpmb_frame_jdec *out_frames = cmds[ncmd - 1].frames; + int ret; + + ret = rpmb_check_mac(mux_dev, cmds); + if (ret) { + dev_err(&mux_dev->rdev->dev, "rpmb check mac fail!\n"); + return ret; + } + + /* write retry */ + if (out_frames->result == cpu_to_be16(RPMB_ERR_COUNTER)) { + dev_err(&mux_dev->rdev->dev, "rpmb counter error, write retry!\n"); + memset(out_frames, 0, sizeof(*out_frames)); + + ret = rpmb_get_counter(mux_dev); + if (ret) { + dev_err(&mux_dev->rdev->dev, "rpmb_get_counter failed!\n"); + return ret; + } + + /* Since phy_counter has changed, + * so we have to generate mac again + */ + ret = rpmb_replace_write_frame(mux_dev, cmds, ncmd); + if (ret) { + dev_err(&mux_dev->rdev->dev, "rpmb replace write frame failed\n"); + return ret; + } + + ret = rpmb_cmd_seq(mux_dev->rdev, cmds, ncmd); + if (ret) { + dev_err(&mux_dev->rdev->dev, "rpmb write retry failed\n"); + return ret; + } + + ret = rpmb_check_mac(mux_dev, cmds); + if (ret) { + dev_err(&mux_dev->rdev->dev, "write retry rpmb check mac fail!\n"); + return ret; + } + } + + if (out_frames->result == cpu_to_be16(RPMB_ERR_OK)) { + dev_dbg(&mux_dev->rdev->dev, "write_counter =%d\n", + mux_dev->write_counter); + mux_dev->write_counter++; + } else { + dev_err(&mux_dev->rdev->dev, "ERR result is 0x%X.\n", + be16_to_cpu(out_frames->result)); + } + + return 0; +} + +/** + * rpmb_ioctl_seq_cmd() - issue an rpmb command sequence + * @mux_dev: rpmb mux_device + * @ptr: rpmb cmd sequence + * + * RPMB_IOC_SEQ_CMD handler + * + * Return: 0 on success, <0 on error + */ +static long rpmb_ioctl_seq_cmd(struct rpmb_mux_dev *mux_dev, + struct rpmb_ioc_seq_cmd __user *ptr) +{ + struct rpmb_dev *rdev = mux_dev->rdev; + __u64 ncmds; + struct rpmb_cmd *cmds; + struct rpmb_ioc_cmd __user *ucmds; + unsigned int i; + int ret; + + /* The caller must have CAP_SYS_RAWIO, like mmc ioctl */ + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + + /* some archs have issues with 64bit get_user */ + if (copy_from_user(&ncmds, &ptr->num_of_cmds, sizeof(ncmds))) + return -EFAULT; + + if (ncmds > 3) { + dev_err(&rdev->dev, "supporting up to 3 packets (%llu)\n", + ncmds); + return -EINVAL; + } + + cmds = kcalloc(ncmds, sizeof(*cmds), GFP_KERNEL); + if (!cmds) + return -ENOMEM; + + ucmds = (struct rpmb_ioc_cmd __user *)ptr->cmds; + for (i = 0; i < ncmds; i++) { + ret = rpmb_mux_copy_from_user(rdev, &cmds[i], &ucmds[i]); + if (ret) + goto out; + } + + if (cmds->flags & RPMB_F_REL_WRITE) { + ret = rpmb_replace_write_frame(mux_dev, cmds, ncmds); + if (ret) + goto out; + } + + ret = rpmb_cmd_seq(rdev, cmds, ncmds); + if (ret) + goto out; + + if (cmds->flags & RPMB_F_REL_WRITE) { + ret = rpmb_check_result(mux_dev, cmds, ncmds); + if (ret) + goto out; + } + + for (i = 0; i < ncmds; i++) { + ret = rpmb_mux_copy_to_user(rdev, &ucmds[i], &cmds[i]); + if (ret) + goto out; + } + +out: + for (i = 0; i < ncmds; i++) + kfree(cmds[i].frames); + kfree(cmds); + + return ret; +} + +static long rpmb_mux_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) +{ + long ret; + struct rpmb_mux_dev *mux_dev = fp->private_data; + void __user *ptr = (void __user *)arg; + + mutex_lock(&mux_dev->lock); + + if (!mux_dev->rdev) { + pr_err("rpmb dev is NULL!\n"); + ret = -EINVAL; + goto out; + } + + if (!mux_dev->wc_inited) { + ret = rpmb_get_counter(mux_dev); + if (ret) { + dev_err(&mux_dev->rdev->dev, + "init counter failed = %ld\n", ret); + goto out; + } + + mux_dev->wc_inited = true; + } + + switch (cmd) { + case RPMB_IOC_SEQ_CMD: + ret = rpmb_ioctl_seq_cmd(mux_dev, ptr); + break; + default: + dev_err(&mux_dev->rdev->dev, "unsupport:0x%X!!!\n", cmd); + ret = -ENOIOCTLCMD; + } + +out: + mutex_unlock(&mux_dev->lock); + + return ret; +} + +static int rpmb_mux_start(struct rpmb_mux_dev *mux_dev, struct rpmb_dev *rdev) +{ + if (mux_dev->rdev == rdev) + return 0; + + if (mux_dev->rdev) { + dev_err(&rdev->dev, "rpmb device already registered\n"); + return -EEXIST; + } + + mux_dev->rdev = rpmb_dev_get(rdev); + dev_dbg(&rdev->dev, "rpmb partition created\n"); + return 0; +} + +static int rpmb_mux_stop(struct rpmb_mux_dev *mux_dev, struct rpmb_dev *rdev) +{ + if (!mux_dev->rdev) { + dev_err(&rdev->dev, "Already stopped\n"); + return -EPROTO; + } + + if (rdev && mux_dev->rdev != rdev) { + dev_err(&rdev->dev, "Wrong RPMB on stop\n"); + return -EINVAL; + } + + rpmb_dev_put(mux_dev->rdev); + mux_dev->rdev = NULL; + + dev_dbg(&rdev->dev, "rpmb partition removed\n"); + return 0; +} + +static int rpmb_add_device(struct device *dev, struct class_interface *intf) +{ + struct rpmb_mux_dev *mux_dev; + struct rpmb_dev *rdev = to_rpmb_dev(dev); + u8 rpmb_key[RPMB_MAX_PARTITION_NUMBER][RPMB_KEY_LENGTH]; + int ret; + + mux_dev = container_of(intf, struct rpmb_mux_dev, rpmb_interface); + + if (!rdev->ops) + return -EINVAL; + + if (rdev->ops->type != RPMB_TYPE_EMMC) { + dev_err(&rdev->dev, "support RPMB_TYPE_EMMC only.\n"); + return -ENOENT; + } + + mutex_lock(&mux_dev->lock); + + ret = rpmb_mux_start(mux_dev, rdev); + if (ret) { + dev_err(&rdev->dev, "fail in rpmb_mux_start.\n"); + mutex_unlock(&mux_dev->lock); + return ret; + } + + mutex_unlock(&mux_dev->lock); + + memset(rpmb_key, 0, sizeof(rpmb_key)); + ret = rpmb_key_get(mux_dev->rdev->ops->dev_id, + mux_dev->rdev->ops->dev_id_len, + RPMB_MAX_PARTITION_NUMBER, + rpmb_key); + if (ret) { + dev_err(&rdev->dev, "rpmb_key_get failed.\n"); + goto err_rpmb_key_get; + } + memcpy(mux_dev->rpmb_key, &rpmb_key[0], sizeof(mux_dev->rpmb_key)); + memset(rpmb_key, 0, sizeof(rpmb_key)); + + ret = crypto_shash_setkey(mux_dev->hash_desc->tfm, + mux_dev->rpmb_key, 32); + if (ret) { + dev_err(&rdev->dev, "set key failed = %d\n", ret); + goto err_crypto_shash_setkey; + } + + return 0; + +err_crypto_shash_setkey: + memset(mux_dev->rpmb_key, 0, sizeof(mux_dev->rpmb_key)); +err_rpmb_key_get: + rpmb_mux_hmac_256_free(mux_dev); + device_destroy(rpmb_mux_class, rpmb_mux_devt); + class_destroy(rpmb_mux_class); + cdev_del(&mux_dev->cdev); + kfree(mux_dev); + unregister_chrdev_region(rpmb_mux_devt, 0); + + return ret; +} + +static void rpmb_remove_device(struct device *dev, struct class_interface *intf) +{ + struct rpmb_mux_dev *mux_dev; + struct rpmb_dev *rdev = to_rpmb_dev(dev); + + mux_dev = container_of(intf, struct rpmb_mux_dev, rpmb_interface); + + mutex_lock(&mux_dev->lock); + if (rpmb_mux_stop(mux_dev, rdev)) + dev_err(&rdev->dev, "fail in rpmb_mux_stop.\n"); + mutex_unlock(&mux_dev->lock); +} + +#ifdef CONFIG_COMPAT +static long rpmb_mux_compat_ioctl(struct file *fp, unsigned int cmd, + unsigned long arg) +{ + return rpmb_mux_ioctl(fp, cmd, (unsigned long)compat_ptr(arg)); +} +#endif /* CONFIG_COMPAT */ + +static const struct file_operations rpmb_mux_fops = { + .open = rpmb_mux_open, + .release = rpmb_mux_release, + .unlocked_ioctl = rpmb_mux_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = rpmb_mux_compat_ioctl, +#endif + .llseek = noop_llseek, + .owner = THIS_MODULE, +}; + +static int __init rpmb_mux_init(void) +{ + int ret; + struct device *class_dev; + struct rpmb_mux_dev *mux_dev; + + ret = alloc_chrdev_region(&rpmb_mux_devt, 0, MINORMASK, "rpmbmux"); + if (ret < 0) { + pr_err("unable to allocate char dev region\n"); + return ret; + } + + mux_dev = kzalloc(sizeof(*mux_dev), GFP_KERNEL); + if (!mux_dev) { + ret = -ENOMEM; + goto err_kzalloc; + } + __mux_dev = mux_dev; + + cdev_init(&mux_dev->cdev, &rpmb_mux_fops); + mux_dev->cdev.owner = THIS_MODULE; + ret = cdev_add(&mux_dev->cdev, rpmb_mux_devt, 1); + if (ret) { + pr_err("unable to cdev_add.\n"); + goto err_cdev_add; + } + + rpmb_mux_class = class_create(THIS_MODULE, "rpmbmux"); + if (IS_ERR(rpmb_mux_class)) { + ret = PTR_ERR(rpmb_mux_class); + goto err_class_create; + } + + class_dev = device_create(rpmb_mux_class, NULL, + rpmb_mux_devt, mux_dev, "rpmbmux"); + if (IS_ERR(class_dev)) { + pr_err("failed to device_create!!!\n"); + ret = PTR_ERR(class_dev); + goto err_device_create; + } + + ret = rpmb_mux_hmac_256_alloc(mux_dev); + if (ret) { + pr_err("failed to set rpmb_mux_hmac_256_alloc.\n"); + goto err_rpmb_mux_hmac_256_alloc; + } + + mux_dev->rpmb_interface.add_dev = rpmb_add_device; + mux_dev->rpmb_interface.remove_dev = rpmb_remove_device; + mux_dev->rpmb_interface.class = &rpmb_class; + + ret = class_interface_register(&mux_dev->rpmb_interface); + if (ret) { + pr_err("Can't register interface\n"); + goto err_class_interface_register; + } + + return 0; + +err_class_interface_register: +err_rpmb_mux_hmac_256_alloc: + device_destroy(rpmb_mux_class, rpmb_mux_devt); +err_device_create: + class_destroy(rpmb_mux_class); +err_class_create: + cdev_del(&mux_dev->cdev); +err_cdev_add: + kfree(mux_dev); +err_kzalloc: + unregister_chrdev_region(rpmb_mux_devt, 0); + return ret; +} + +static void __exit rpmb_mux_exit(void) +{ + struct rpmb_mux_dev *mux_dev = __mux_dev; + + class_interface_unregister(&mux_dev->rpmb_interface); + device_destroy(rpmb_mux_class, rpmb_mux_devt); + class_destroy(rpmb_mux_class); + cdev_del(&mux_dev->cdev); + unregister_chrdev_region(rpmb_mux_devt, 0); + + rpmb_mux_hmac_256_free(mux_dev); + memset(mux_dev->rpmb_key, 0, sizeof(mux_dev->rpmb_key)); + kfree(mux_dev); +} + +module_init(rpmb_mux_init); +module_exit(rpmb_mux_exit); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("RPMB Mux kernel module"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/char/rpmb/mux/mux_hkdf.c b/drivers/char/rpmb/mux/mux_hkdf.c new file mode 100644 index 0000000000000..d4234924148eb --- /dev/null +++ b/drivers/char/rpmb/mux/mux_hkdf.c @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * RPMB Mux HKDF + * + * Copyright (c) 2018 Intel Corporation. All rights reserved. + */ + +#include +#include +#include "mux_hkdf.h" + +static int mux_sha256_extract(u8 *out_key, size_t out_len, + struct shash_desc *desc, + const u8 *secret, size_t secret_len, + const u8 *salt, size_t salt_len) +{ + int ret; + u8 salt0[SHA256_HASH_SIZE]; + + if (!salt || !salt_len) { + memset(salt0, 0, sizeof(salt0)); + salt = salt0; + salt_len = sizeof(salt0); + } + + ret = crypto_shash_setkey(desc->tfm, salt, salt_len); + if (ret) { + pr_err("set key failed = %d\n", ret); + goto out; + } + + ret = crypto_shash_init(desc); + if (ret) + goto out; + + ret = crypto_shash_update(desc, secret, secret_len); + if (ret) + goto out; + + ret = crypto_shash_final(desc, out_key); + if (ret) + goto out; + +out: + return ret; +} + +static int mux_sha256_expand(u8 *out_key, size_t out_len, + struct shash_desc *desc, + const u8 *prk, size_t prk_len, + const u8 *info, size_t info_len) +{ + const size_t digest_len = SHA256_HASH_SIZE; + u8 previous[SHA256_HASH_SIZE]; + size_t n, done = 0; + unsigned int i; + int ret = 0; + + n = (out_len + digest_len - 1) / digest_len; + + /* check for possible integer overflow */ + if (out_len + digest_len < out_len) + return 0; + + if (n > 255) + return 0; + + for (i = 0; i < n; i++) { + u8 ctr = i + 1; + size_t todo; + + ret = crypto_shash_setkey(desc->tfm, prk, prk_len); + if (ret) + goto out; + + ret = crypto_shash_init(desc); + if (ret) + goto out; + + if (i != 0 && crypto_shash_update(desc, previous, digest_len)) + goto out; + + if (crypto_shash_update(desc, info, info_len) || + crypto_shash_update(desc, &ctr, 1) || + crypto_shash_final(desc, previous)) { + ret = -EPERM; + goto out; + } + + todo = digest_len; + /* Check if the length of left buffer is smaller than + * 32 to make sure no buffer overflow in below memcpy + */ + if (done + todo > out_len) + todo = out_len - done; + + memcpy(out_key + done, previous, todo); + done += todo; + } + +out: + memset(previous, 0, sizeof(previous)); + + return ret; +} + +static struct shash_desc *mux_hkdf_init_hmac_sha256_desc(void) +{ + struct shash_desc *desc; + struct crypto_shash *tfm; + + tfm = crypto_alloc_shash("hmac(sha256)", 0, 0); + if (IS_ERR(tfm)) + return ERR_PTR(-EFAULT); + + desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL); + if (!desc) { + crypto_free_shash(tfm); + return ERR_PTR(-ENOMEM); + } + desc->tfm = tfm; + + return desc; +} + +int mux_hkdf_sha256(u8 *out_key, size_t out_len, + const u8 *secret, size_t secret_len, + const u8 *salt, size_t salt_len, + const u8 *info, size_t info_len) +{ + u8 prk[SHA256_HASH_SIZE]; + size_t prk_len = SHA256_HASH_SIZE; + int ret; + struct shash_desc *desc; + + if (!out_key || !out_len) + return -EINVAL; + + if (!secret || !secret_len) + return -EINVAL; + + if (!info && info_len) + return -EINVAL; + + desc = mux_hkdf_init_hmac_sha256_desc(); + if (IS_ERR(desc)) + return PTR_ERR(desc); + + memset(prk, 0, sizeof(prk)); + + ret = mux_sha256_extract(prk, prk_len, desc, + secret, secret_len, + salt, salt_len); + if (ret) + goto err_free_shash; + + ret = mux_sha256_expand(out_key, out_len, desc, + prk, prk_len, + info, info_len); + +err_free_shash: + crypto_free_shash(desc->tfm); + kfree(desc); + + return ret; +} diff --git a/drivers/char/rpmb/mux/mux_hkdf.h b/drivers/char/rpmb/mux/mux_hkdf.h new file mode 100644 index 0000000000000..0055884054c25 --- /dev/null +++ b/drivers/char/rpmb/mux/mux_hkdf.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* + * Copyright (C) 2018 Intel Corp. All rights reserved + */ +#ifndef _MUX_HKDF_H +#define _MUX_HKDF_H + +#define SHA256_HASH_SIZE 32 + +int mux_hkdf_sha256(u8 *out_key, size_t out_len, + const u8 *secret, size_t secret_len, + const u8 *salt, size_t salt_len, + const u8 *info, size_t info_len); +#endif /* !_MUX_HKDF_H */ diff --git a/drivers/char/rpmb/rpmb-cdev.h b/drivers/char/rpmb/rpmb-cdev.h new file mode 100644 index 0000000000000..e59ff0c05e9d6 --- /dev/null +++ b/drivers/char/rpmb/rpmb-cdev.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* + * Copyright (C) 2015-2018 Intel Corp. All rights reserved + */ +#ifdef CONFIG_RPMB_INTF_DEV +int __init rpmb_cdev_init(void); +void __exit rpmb_cdev_exit(void); +void rpmb_cdev_prepare(struct rpmb_dev *rdev); +void rpmb_cdev_add(struct rpmb_dev *rdev); +void rpmb_cdev_del(struct rpmb_dev *rdev); +#else +static inline int __init rpmb_cdev_init(void) { return 0; } +static inline void __exit rpmb_cdev_exit(void) {} +static inline void rpmb_cdev_prepare(struct rpmb_dev *rdev) {} +static inline void rpmb_cdev_add(struct rpmb_dev *rdev) {} +static inline void rpmb_cdev_del(struct rpmb_dev *rdev) {} +#endif /* CONFIG_RPMB_INTF_DEV */ diff --git a/drivers/char/rpmb/rpmb_sim.c b/drivers/char/rpmb/rpmb_sim.c new file mode 100644 index 0000000000000..728e255113777 --- /dev/null +++ b/drivers/char/rpmb/rpmb_sim.c @@ -0,0 +1,715 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Copyright(c) 2015 - 2018 Intel Corporation. All rights reserved. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include + +#include + +static const char id[] = "RPMB:SIM"; +#define CAPACITY_UNIT SZ_128K +#define CAPACITY_MIN SZ_128K +#define CAPACITY_MAX SZ_16M +#define BLK_UNIT SZ_256 + +static unsigned int max_wr_blks = 2; +module_param(max_wr_blks, uint, 0644); +MODULE_PARM_DESC(max_wr_blks, "max blocks that can be written in a single command (default: 2)"); + +static unsigned int daunits = 1; +module_param(daunits, uint, 0644); +MODULE_PARM_DESC(daunits, "number of data area units of 128K (default: 1)"); + +struct blk { + u8 data[BLK_UNIT]; +}; + +/** + * struct rpmb_sim_dev + * + * @dev: back pointer device + * @rdev: rpmb device + * @auth_key: Authentication key register which is used to authenticate + * accesses when MAC is calculated; + * @auth_key_set: true if authentication key was set + * @write_counter: Counter value for the total amount of successful + * authenticated data write requests made by the host. + * The initial value of this register after production is 00000000h. + * The value will be incremented by one along with each successful + * programming access. The value cannot be reset. After the counter + * has reached the maximum value of FFFFFFFFh, + * it will not be incremented anymore (overflow prevention) + * @hash_desc: hmac(sha256) shash descriptor + * + * @res_frames: frame that holds the result of the last write operation + * @out_frames: next read operation result frames + * @out_frames_cnt: number of the output frames + * + * @capacity: size of the partition in bytes multiple of 128K + * @blkcnt: block count + * @da: data area in blocks + */ +struct rpmb_sim_dev { + struct device *dev; + struct rpmb_dev *rdev; + u8 auth_key[32]; + bool auth_key_set; + u32 write_counter; + struct shash_desc *hash_desc; + + struct rpmb_frame_jdec res_frames[1]; + struct rpmb_frame_jdec *out_frames; + unsigned int out_frames_cnt; + + size_t capacity; + size_t blkcnt; + struct blk *da; +}; + +static __be16 op_result(struct rpmb_sim_dev *rsdev, u16 result) +{ + if (!rsdev->auth_key_set) + return cpu_to_be16(RPMB_ERR_NO_KEY); + + if (rsdev->write_counter == 0xFFFFFFFF) + result |= RPMB_ERR_COUNTER_EXPIRED; + + return cpu_to_be16(result); +} + +static __be16 req_to_resp(u16 req) +{ + return cpu_to_be16(RPMB_REQ2RESP(req)); +} + +static int rpmb_sim_calc_hmac(struct rpmb_sim_dev *rsdev, + struct rpmb_frame_jdec *frames, + unsigned int blks, u8 *mac) +{ + struct shash_desc *desc = rsdev->hash_desc; + int i; + int ret; + + ret = crypto_shash_init(desc); + if (ret) + goto out; + + for (i = 0; i < blks; i++) { + ret = crypto_shash_update(desc, frames[i].data, + rpmb_jdec_hmac_data_len); + if (ret) + goto out; + } + ret = crypto_shash_final(desc, mac); +out: + if (ret) + dev_err(rsdev->dev, "digest error = %d", ret); + + return ret; +} + +static int rpmb_op_not_programmed(struct rpmb_sim_dev *rsdev, u16 req) +{ + struct rpmb_frame_jdec *res_frame = rsdev->res_frames; + + res_frame->req_resp = req_to_resp(req); + res_frame->result = op_result(rsdev, RPMB_ERR_NO_KEY); + + rsdev->out_frames = res_frame; + rsdev->out_frames_cnt = 1; + + dev_err(rsdev->dev, "not programmed\n"); + + return 0; +} + +static int rpmb_op_program_key(struct rpmb_sim_dev *rsdev, + struct rpmb_frame_jdec *in_frame, u32 cnt) +{ + struct rpmb_frame_jdec *res_frame = rsdev->res_frames; + struct crypto_shash *tfm = rsdev->hash_desc->tfm; + u16 req; + int ret; + u16 err = RPMB_ERR_OK; + + req = be16_to_cpu(in_frame[0].req_resp); + + if (req != RPMB_PROGRAM_KEY) + return -EINVAL; + + if (cnt != 1) { + dev_err(rsdev->dev, "wrong number of frames %d != 1\n", cnt); + return -EINVAL; + } + + if (rsdev->auth_key_set) { + dev_err(rsdev->dev, "key already set\n"); + err = RPMB_ERR_WRITE; + goto out; + } + + ret = crypto_shash_setkey(tfm, in_frame[0].key_mac, 32); + if (ret) { + dev_err(rsdev->dev, "set key failed = %d\n", ret); + err = RPMB_ERR_GENERAL; + goto out; + } + + dev_dbg(rsdev->dev, "digest size %u\n", crypto_shash_digestsize(tfm)); + + memcpy(rsdev->auth_key, in_frame[0].key_mac, 32); + rsdev->auth_key_set = true; +out: + + memset(res_frame, 0, sizeof(*res_frame)); + res_frame->req_resp = req_to_resp(req); + res_frame->result = op_result(rsdev, err); + + return 0; +} + +static int rpmb_op_get_wr_counter(struct rpmb_sim_dev *rsdev, + struct rpmb_frame_jdec *in_frame, u32 cnt) +{ + struct rpmb_frame_jdec *frame; + int ret = 0; + u16 req; + u16 err; + + req = be16_to_cpu(in_frame[0].req_resp); + if (req != RPMB_GET_WRITE_COUNTER) + return -EINVAL; + + if (cnt != 1) { + dev_err(rsdev->dev, "wrong number of frames %d != 1\n", cnt); + return -EINVAL; + } + + frame = kcalloc(1, sizeof(*frame), GFP_KERNEL); + if (!frame) { + err = RPMB_ERR_READ; + ret = -ENOMEM; + rsdev->out_frames = rsdev->res_frames; + rsdev->out_frames_cnt = cnt; + goto out; + } + + rsdev->out_frames = frame; + rsdev->out_frames_cnt = cnt; + + frame->req_resp = req_to_resp(req); + frame->write_counter = cpu_to_be32(rsdev->write_counter); + memcpy(frame->nonce, in_frame[0].nonce, 16); + + err = RPMB_ERR_OK; + if (rpmb_sim_calc_hmac(rsdev, frame, cnt, frame->key_mac)) + err = RPMB_ERR_READ; + +out: + rsdev->out_frames[0].req_resp = req_to_resp(req); + rsdev->out_frames[0].result = op_result(rsdev, err); + + return ret; +} + +static int rpmb_op_write_data(struct rpmb_sim_dev *rsdev, + struct rpmb_frame_jdec *in_frame, u32 cnt) +{ + struct rpmb_frame_jdec *res_frame = rsdev->res_frames; + u8 mac[32]; + u16 req, err, addr, blks; + unsigned int i; + int ret = 0; + + req = be16_to_cpu(in_frame[0].req_resp); + if (req != RPMB_WRITE_DATA) + return -EINVAL; + + if (rsdev->write_counter == 0xFFFFFFFF) { + err = RPMB_ERR_WRITE; + goto out; + } + + blks = be16_to_cpu(in_frame[0].block_count); + if (blks == 0 || blks > cnt) { + dev_err(rsdev->dev, "wrong number of blocks: blks=%u cnt=%u\n", + blks, cnt); + ret = -EINVAL; + err = RPMB_ERR_GENERAL; + goto out; + } + + if (blks > max_wr_blks) { + err = RPMB_ERR_WRITE; + goto out; + } + + addr = be16_to_cpu(in_frame[0].addr); + if (addr >= rsdev->blkcnt) { + err = RPMB_ERR_ADDRESS; + goto out; + } + + if (rpmb_sim_calc_hmac(rsdev, in_frame, blks, mac)) { + err = RPMB_ERR_AUTH; + goto out; + } + + /* mac is in the last frame */ + if (memcmp(mac, in_frame[blks - 1].key_mac, sizeof(mac)) != 0) { + err = RPMB_ERR_AUTH; + goto out; + } + + if (be32_to_cpu(in_frame[0].write_counter) != rsdev->write_counter) { + err = RPMB_ERR_COUNTER; + goto out; + } + + if (addr + blks > rsdev->blkcnt) { + err = RPMB_ERR_WRITE; + goto out; + } + + dev_dbg(rsdev->dev, "Writing = %u blocks at addr = 0x%X\n", blks, addr); + err = RPMB_ERR_OK; + for (i = 0; i < blks; i++) + memcpy(rsdev->da[addr + i].data, in_frame[i].data, BLK_UNIT); + + rsdev->write_counter++; + + memset(res_frame, 0, sizeof(*res_frame)); + res_frame->req_resp = req_to_resp(req); + res_frame->write_counter = cpu_to_be32(rsdev->write_counter); + res_frame->addr = cpu_to_be16(addr); + if (rpmb_sim_calc_hmac(rsdev, res_frame, 1, res_frame->key_mac)) + err = RPMB_ERR_READ; + +out: + if (err != RPMB_ERR_OK) { + memset(res_frame, 0, sizeof(*res_frame)); + res_frame->req_resp = req_to_resp(req); + } + res_frame->result = op_result(rsdev, err); + + return ret; +} + +static int rpmb_do_read_data(struct rpmb_sim_dev *rsdev, + struct rpmb_frame_jdec *in_frame, u32 cnt) +{ + struct rpmb_frame_jdec *res_frame = rsdev->res_frames; + struct rpmb_frame_jdec *out_frames = NULL; + u8 mac[32]; + u16 req, err, addr, blks; + unsigned int i; + int ret; + + req = be16_to_cpu(in_frame->req_resp); + if (req != RPMB_READ_DATA) + return -EINVAL; + + /* eMMC intentionally set 0 here */ + blks = be16_to_cpu(in_frame->block_count); + blks = blks ?: cnt; + if (blks > cnt) { + dev_err(rsdev->dev, "wrong number of frames cnt %u\n", blks); + ret = -EINVAL; + err = RPMB_ERR_GENERAL; + goto out; + } + + out_frames = kcalloc(blks, sizeof(*out_frames), GFP_KERNEL); + if (!out_frames) { + ret = -ENOMEM; + err = RPMB_ERR_READ; + goto out; + } + + ret = 0; + addr = be16_to_cpu(in_frame[0].addr); + if (addr >= rsdev->blkcnt) { + err = RPMB_ERR_ADDRESS; + goto out; + } + + if (addr + blks > rsdev->blkcnt) { + err = RPMB_ERR_READ; + goto out; + } + + dev_dbg(rsdev->dev, "reading = %u blocks at addr = 0x%X\n", blks, addr); + for (i = 0; i < blks; i++) { + memcpy(out_frames[i].data, rsdev->da[addr + i].data, BLK_UNIT); + memcpy(out_frames[i].nonce, in_frame[0].nonce, 16); + out_frames[i].req_resp = req_to_resp(req); + out_frames[i].addr = in_frame[0].addr; + out_frames[i].block_count = cpu_to_be16(blks); + } + + if (rpmb_sim_calc_hmac(rsdev, out_frames, blks, mac)) { + err = RPMB_ERR_AUTH; + goto out; + } + + memcpy(out_frames[blks - 1].key_mac, mac, sizeof(mac)); + + err = RPMB_ERR_OK; + for (i = 0; i < blks; i++) + out_frames[i].result = op_result(rsdev, err); + + rsdev->out_frames = out_frames; + rsdev->out_frames_cnt = cnt; + + return 0; + +out: + memset(res_frame, 0, sizeof(*res_frame)); + res_frame->req_resp = req_to_resp(req); + res_frame->result = op_result(rsdev, err); + kfree(out_frames); + rsdev->out_frames = res_frame; + rsdev->out_frames_cnt = 1; + + return ret; +} + +static int rpmb_op_read_data(struct rpmb_sim_dev *rsdev, + struct rpmb_frame_jdec *in_frame, u32 cnt) +{ + struct rpmb_frame_jdec *res_frame = rsdev->res_frames; + u16 req; + + req = be16_to_cpu(in_frame->req_resp); + if (req != RPMB_READ_DATA) + return -EINVAL; + + memcpy(res_frame, in_frame, sizeof(*res_frame)); + + rsdev->out_frames = res_frame; + rsdev->out_frames_cnt = 1; + + return 0; +} + +static int rpmb_op_result_read(struct rpmb_sim_dev *rsdev, + struct rpmb_frame_jdec *frames, u32 cnt) +{ + u16 req = be16_to_cpu(frames[0].req_resp); + u16 blks = be16_to_cpu(frames[0].block_count); + + if (req != RPMB_RESULT_READ) + return -EINVAL; + + if (blks != 0) { + dev_err(rsdev->dev, "wrong number of frames %u != 0\n", blks); + return -EINVAL; + } + + rsdev->out_frames = rsdev->res_frames; + rsdev->out_frames_cnt = 1; + return 0; +} + +static int rpmb_sim_write(struct rpmb_sim_dev *rsdev, + struct rpmb_frame_jdec *frames, u32 cnt) +{ + u16 req; + int ret; + + if (!frames) + return -EINVAL; + + if (cnt == 0) + cnt = 1; + + req = be16_to_cpu(frames[0].req_resp); + if (!rsdev->auth_key_set && req != RPMB_PROGRAM_KEY) + return rpmb_op_not_programmed(rsdev, req); + + switch (req) { + case RPMB_PROGRAM_KEY: + dev_dbg(rsdev->dev, "rpmb: program key\n"); + ret = rpmb_op_program_key(rsdev, frames, cnt); + break; + case RPMB_WRITE_DATA: + dev_dbg(rsdev->dev, "rpmb: write data\n"); + ret = rpmb_op_write_data(rsdev, frames, cnt); + break; + case RPMB_GET_WRITE_COUNTER: + dev_dbg(rsdev->dev, "rpmb: get write counter\n"); + ret = rpmb_op_get_wr_counter(rsdev, frames, cnt); + break; + case RPMB_READ_DATA: + dev_dbg(rsdev->dev, "rpmb: read data\n"); + ret = rpmb_op_read_data(rsdev, frames, cnt); + break; + case RPMB_RESULT_READ: + dev_dbg(rsdev->dev, "rpmb: result read\n"); + ret = rpmb_op_result_read(rsdev, frames, cnt); + break; + default: + dev_err(rsdev->dev, "unsupported command %u\n", req); + ret = -EINVAL; + break; + } + + dev_dbg(rsdev->dev, "rpmb: ret=%d\n", ret); + + return ret; +} + +static int rpmb_sim_read(struct rpmb_sim_dev *rsdev, + struct rpmb_frame_jdec *frames, u32 cnt) +{ + int i; + + if (!frames) + return -EINVAL; + + if (cnt == 0) + cnt = 1; + + if (!rsdev->out_frames || rsdev->out_frames_cnt == 0) { + dev_err(rsdev->dev, "out_frames are not set\n"); + return -EINVAL; + } + + if (rsdev->out_frames->req_resp == cpu_to_be16(RPMB_READ_DATA)) + rpmb_do_read_data(rsdev, rsdev->out_frames, cnt); + + for (i = 0; i < min_t(u32, rsdev->out_frames_cnt, cnt); i++) + memcpy(&frames[i], &rsdev->out_frames[i], sizeof(frames[i])); + + if (rsdev->out_frames != rsdev->res_frames) + kfree(rsdev->out_frames); + + rsdev->out_frames = NULL; + rsdev->out_frames_cnt = 0; + dev_dbg(rsdev->dev, "rpmb: cnt=%d\n", cnt); + + return 0; +} + +static int rpmb_sim_cmd_seq(struct device *dev, u8 target, + struct rpmb_cmd *cmds, u32 ncmds) +{ + struct rpmb_sim_dev *rsdev; + int i; + int ret; + struct rpmb_cmd *cmd; + + if (!dev) + return -EINVAL; + + rsdev = dev_get_drvdata(dev); + + if (!rsdev) + return -EINVAL; + + for (ret = 0, i = 0; i < ncmds && !ret; i++) { + cmd = &cmds[i]; + if (cmd->flags & RPMB_F_WRITE) + ret = rpmb_sim_write(rsdev, cmd->frames, cmd->nframes); + else + ret = rpmb_sim_read(rsdev, cmd->frames, cmd->nframes); + } + return ret; +} + +static int rpmb_sim_get_capacity(struct device *dev, u8 target) +{ + return daunits; +} + +static struct rpmb_ops rpmb_sim_ops = { + .cmd_seq = rpmb_sim_cmd_seq, + .get_capacity = rpmb_sim_get_capacity, + .type = RPMB_TYPE_EMMC | RPMB_TYPE_SIM, +}; + +static int rpmb_sim_hmac_256_alloc(struct rpmb_sim_dev *rsdev) +{ + struct shash_desc *desc; + struct crypto_shash *tfm; + + tfm = crypto_alloc_shash("hmac(sha256)", 0, 0); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL); + if (!desc) { + crypto_free_shash(tfm); + return -ENOMEM; + } + + desc->tfm = tfm; + rsdev->hash_desc = desc; + + dev_dbg(rsdev->dev, "hamac(sha256) registered\n"); + return 0; +} + +static void rpmb_sim_hmac_256_free(struct rpmb_sim_dev *rsdev) +{ + struct shash_desc *desc = rsdev->hash_desc; + + if (desc->tfm) + crypto_free_shash(desc->tfm); + kfree(desc); + + rsdev->hash_desc = NULL; +} + +static int rpmb_sim_probe(struct device *dev) +{ + struct rpmb_sim_dev *rsdev; + int ret; + + rsdev = kzalloc(sizeof(*rsdev), GFP_KERNEL); + if (!rsdev) + return -ENOMEM; + + rsdev->dev = dev; + + ret = rpmb_sim_hmac_256_alloc(rsdev); + if (ret) + goto err; + + rsdev->capacity = CAPACITY_UNIT * daunits; + rsdev->blkcnt = rsdev->capacity / BLK_UNIT; + rsdev->da = kzalloc(rsdev->capacity, GFP_KERNEL); + if (!rsdev->da) { + ret = -ENOMEM; + goto err; + } + + rpmb_sim_ops.dev_id_len = strlen(id); + rpmb_sim_ops.dev_id = id; + rpmb_sim_ops.wr_cnt_max = max_wr_blks; + rpmb_sim_ops.rd_cnt_max = max_wr_blks; + rpmb_sim_ops.block_size = 1; + + rsdev->rdev = rpmb_dev_register(rsdev->dev, 0, &rpmb_sim_ops); + if (IS_ERR(rsdev->rdev)) { + ret = PTR_ERR(rsdev->rdev); + goto err; + } + + dev_info(dev, "registered RPMB capacity = %zu of %zu blocks\n", + rsdev->capacity, rsdev->blkcnt); + + dev_set_drvdata(dev, rsdev); + + return 0; +err: + rpmb_sim_hmac_256_free(rsdev); + if (rsdev) + kfree(rsdev->da); + kfree(rsdev); + return ret; +} + +static int rpmb_sim_remove(struct device *dev) +{ + struct rpmb_sim_dev *rsdev; + + rsdev = dev_get_drvdata(dev); + + rpmb_dev_unregister(rsdev->rdev); + + dev_set_drvdata(dev, NULL); + + rpmb_sim_hmac_256_free(rsdev); + + kfree(rsdev->da); + kfree(rsdev); + return 0; +} + +static void rpmb_sim_shutdown(struct device *dev) +{ + rpmb_sim_remove(dev); +} + +static int rpmb_sim_match(struct device *dev, struct device_driver *drv) +{ + return 1; +} + +static struct bus_type rpmb_sim_bus = { + .name = "rpmb_sim", + .match = rpmb_sim_match, +}; + +static struct device_driver rpmb_sim_drv = { + .name = "rpmb_sim", + .probe = rpmb_sim_probe, + .remove = rpmb_sim_remove, + .shutdown = rpmb_sim_shutdown, +}; + +static void rpmb_sim_dev_release(struct device *dev) +{ +} + +static struct device rpmb_sim_dev; + +static int __init rpmb_sim_init(void) +{ + int ret; + struct device *dev = &rpmb_sim_dev; + struct device_driver *drv = &rpmb_sim_drv; + + ret = bus_register(&rpmb_sim_bus); + if (ret) + return ret; + + dev->bus = &rpmb_sim_bus; + dev->release = rpmb_sim_dev_release; + dev_set_name(dev, "%s", "rpmb_sim"); + ret = device_register(dev); + if (ret) { + pr_err("device register failed %d\n", ret); + goto err_device; + } + + drv->bus = &rpmb_sim_bus; + ret = driver_register(drv); + if (ret) { + pr_err("driver register failed %d\n", ret); + goto err_driver; + } + + return 0; + +err_driver: + device_unregister(dev); +err_device: + bus_unregister(&rpmb_sim_bus); + return ret; +} + +static void __exit rpmb_sim_exit(void) +{ + struct device *dev = &rpmb_sim_dev; + struct device_driver *drv = &rpmb_sim_drv; + + device_unregister(dev); + driver_unregister(drv); + bus_unregister(&rpmb_sim_bus); +} + +module_init(rpmb_sim_init); +module_exit(rpmb_sim_exit); + +MODULE_AUTHOR("Tomas Winkler +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static const char id[] = "RPMB:VIRTIO"; +#ifndef VIRTIO_ID_RPMB +#define VIRTIO_ID_RPMB 0xFFFF +#endif + +#define RPMB_SEQ_CMD_MAX 3 /* support up to 3 cmds */ + +struct virtio_rpmb_info { + struct virtqueue *vq; + struct mutex lock; /* info lock */ + wait_queue_head_t have_data; + struct rpmb_dev *rdev; +}; + +struct virtio_rpmb_ioc { + unsigned int ioc_cmd; + int result; + u8 target; + u8 reserved[3]; +}; + +static void virtio_rpmb_recv_done(struct virtqueue *vq) +{ + struct virtio_rpmb_info *vi; + struct virtio_device *vdev = vq->vdev; + + vi = vq->vdev->priv; + if (!vi) { + dev_err(&vdev->dev, "Error: no found vi data.\n"); + return; + } + + wake_up(&vi->have_data); +} + +static int rpmb_virtio_cmd_seq(struct device *dev, u8 target, + struct rpmb_cmd *cmds, u32 ncmds) +{ + struct virtio_device *vdev = dev_to_virtio(dev); + struct virtio_rpmb_info *vi = vdev->priv; + unsigned int i; + struct virtio_rpmb_ioc *vio_cmd; + struct rpmb_ioc_seq_cmd *seq_cmd; + size_t seq_cmd_sz; + struct scatterlist vio_ioc, vio_seq, frame[3]; + struct scatterlist *sgs[5]; + unsigned int num_out = 0, num_in = 0; + size_t sz; + int ret; + unsigned int len; + + if (ncmds > RPMB_SEQ_CMD_MAX) + return -EINVAL; + + mutex_lock(&vi->lock); + + vio_cmd = kzalloc(sizeof(*vio_cmd), GFP_KERNEL); + seq_cmd_sz = sizeof(*seq_cmd) + sizeof(struct rpmb_ioc_cmd) * ncmds; + seq_cmd = kzalloc(seq_cmd_sz, GFP_KERNEL); + if (!vio_cmd || !seq_cmd) { + ret = -ENOMEM; + goto out; + } + + vio_cmd->ioc_cmd = RPMB_IOC_SEQ_CMD; + vio_cmd->result = 0; + vio_cmd->target = target; + sg_init_one(&vio_ioc, vio_cmd, sizeof(*vio_cmd)); + sgs[num_out + num_in++] = &vio_ioc; + + seq_cmd->num_of_cmds = ncmds; + for (i = 0; i < ncmds; i++) { + seq_cmd->cmds[i].flags = cmds[i].flags; + seq_cmd->cmds[i].nframes = cmds[i].nframes; + seq_cmd->cmds[i].frames_ptr = i; + } + sg_init_one(&vio_seq, seq_cmd, seq_cmd_sz); + sgs[num_out + num_in++] = &vio_seq; + + for (i = 0; i < ncmds; i++) { + sz = sizeof(struct rpmb_frame_jdec) * (cmds[i].nframes ?: 1); + sg_init_one(&frame[i], cmds[i].frames, sz); + sgs[num_out + num_in++] = &frame[i]; + } + + virtqueue_add_sgs(vi->vq, sgs, num_out, num_in, vi, GFP_KERNEL); + virtqueue_kick(vi->vq); + + wait_event(vi->have_data, virtqueue_get_buf(vi->vq, &len)); + + ret = 0; + + if (vio_cmd->result != 0) { + dev_err(dev, "Error: command error = %d.\n", vio_cmd->result); + ret = -EIO; + } + +out: + kfree(vio_cmd); + kfree(seq_cmd); + mutex_unlock(&vi->lock); + return ret; +} + +static int rpmb_virtio_cmd_cap(struct device *dev, u8 target) +{ + struct virtio_device *vdev = dev_to_virtio(dev); + struct virtio_rpmb_info *vi = vdev->priv; + struct virtio_rpmb_ioc *vio_cmd; + struct rpmb_ioc_cap_cmd *cap_cmd; + struct scatterlist vio_ioc, cap_ioc; + struct scatterlist *sgs[2]; + unsigned int num_out = 0, num_in = 0; + unsigned int len; + int ret; + + mutex_lock(&vi->lock); + + vio_cmd = kzalloc(sizeof(*vio_cmd), GFP_KERNEL); + cap_cmd = kzalloc(sizeof(*cap_cmd), GFP_KERNEL); + if (!vio_cmd || !cap_cmd) { + ret = -ENOMEM; + goto out; + } + + vio_cmd->ioc_cmd = RPMB_IOC_CAP_CMD; + vio_cmd->result = 0; + vio_cmd->target = target; + sg_init_one(&vio_ioc, vio_cmd, sizeof(*vio_cmd)); + sgs[num_out + num_in++] = &vio_ioc; + + sg_init_one(&cap_ioc, cap_cmd, sizeof(*cap_cmd)); + sgs[num_out + num_in++] = &cap_ioc; + + virtqueue_add_sgs(vi->vq, sgs, num_out, num_in, vi, GFP_KERNEL); + virtqueue_kick(vi->vq); + + wait_event(vi->have_data, virtqueue_get_buf(vi->vq, &len)); + + ret = 0; + + if (vio_cmd->result != 0) { + dev_err(dev, "Error: command error = %d.\n", vio_cmd->result); + ret = -EIO; + } + +out: + kfree(vio_cmd); + kfree(cap_cmd); + + mutex_unlock(&vi->lock); + return ret; +} + +static int rpmb_virtio_get_capacity(struct device *dev, u8 target) +{ + return 0; +} + +static struct rpmb_ops rpmb_virtio_ops = { + .cmd_seq = rpmb_virtio_cmd_seq, + .get_capacity = rpmb_virtio_get_capacity, + .type = RPMB_TYPE_EMMC, +}; + +static int rpmb_virtio_dev_init(struct virtio_rpmb_info *vi) +{ + int ret = 0; + struct device *dev = &vi->vq->vdev->dev; + + rpmb_virtio_ops.dev_id_len = strlen(id); + rpmb_virtio_ops.dev_id = id; + rpmb_virtio_ops.wr_cnt_max = 1; + rpmb_virtio_ops.rd_cnt_max = 1; + rpmb_virtio_ops.block_size = 1; + + vi->rdev = rpmb_dev_register(dev, 0, &rpmb_virtio_ops); + if (IS_ERR(vi->rdev)) { + ret = PTR_ERR(vi->rdev); + goto err; + } + + dev_set_drvdata(dev, vi); +err: + return ret; +} + +static int virtio_rpmb_init(struct virtio_device *vdev) +{ + int ret; + struct virtio_rpmb_info *vi; + + vi = kzalloc(sizeof(*vi), GFP_KERNEL); + if (!vi) + return -ENOMEM; + + init_waitqueue_head(&vi->have_data); + mutex_init(&vi->lock); + vdev->priv = vi; + + /* We expect a single virtqueue. */ + vi->vq = virtio_find_single_vq(vdev, virtio_rpmb_recv_done, "request"); + if (IS_ERR(vi->vq)) { + dev_err(&vdev->dev, "get single vq failed!\n"); + ret = PTR_ERR(vi->vq); + goto err; + } + + /* create vrpmb device. */ + ret = rpmb_virtio_dev_init(vi); + if (ret) { + dev_err(&vdev->dev, "create vrpmb device failed.\n"); + goto err; + } + + dev_info(&vdev->dev, "init done!\n"); + + return 0; + +err: + kfree(vi); + return ret; +} + +static void virtio_rpmb_remove(struct virtio_device *vdev) +{ + struct virtio_rpmb_info *vi; + + vi = vdev->priv; + if (!vi) + return; + + if (wq_has_sleeper(&vi->have_data)) + wake_up(&vi->have_data); + + rpmb_dev_unregister(vi->rdev); + + if (vdev->config->reset) + vdev->config->reset(vdev); + + if (vdev->config->del_vqs) + vdev->config->del_vqs(vdev); + + kfree(vi); +} + +static int virtio_rpmb_probe(struct virtio_device *vdev) +{ + return virtio_rpmb_init(vdev); +} + +#ifdef CONFIG_PM_SLEEP +static int virtio_rpmb_freeze(struct virtio_device *vdev) +{ + virtio_rpmb_remove(vdev); + return 0; +} + +static int virtio_rpmb_restore(struct virtio_device *vdev) +{ + return virtio_rpmb_init(vdev); +} +#endif + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_RPMB, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static struct virtio_driver virtio_rpmb_driver = { + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = virtio_rpmb_probe, + .remove = virtio_rpmb_remove, +#ifdef CONFIG_PM_SLEEP + .freeze = virtio_rpmb_freeze, + .restore = virtio_rpmb_restore, +#endif +}; + +module_virtio_driver(virtio_rpmb_driver); +MODULE_DEVICE_TABLE(virtio, id_table); + +MODULE_DESCRIPTION("Virtio rpmb frontend driver"); +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index 18c81cbe4704c..536e55d3919fd 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig @@ -5,7 +5,7 @@ menuconfig TCG_TPM tristate "TPM Hardware Support" depends on HAS_IOMEM - select SECURITYFS + imply SECURITYFS select CRYPTO select CRYPTO_HASH_INFO ---help--- diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile index 4e9c33ca1f8fd..a01c4cab902a6 100644 --- a/drivers/char/tpm/Makefile +++ b/drivers/char/tpm/Makefile @@ -3,9 +3,19 @@ # Makefile for the kernel tpm device drivers. # obj-$(CONFIG_TCG_TPM) += tpm.o -tpm-y := tpm-interface.o tpm-dev.o tpm-sysfs.o tpm-chip.o tpm2-cmd.o \ - tpm-dev-common.o tpmrm-dev.o eventlog/common.o eventlog/tpm1.o \ - eventlog/tpm2.o tpm2-space.o +tpm-y := tpm-chip.o +tpm-y += tpm-dev-common.o +tpm-y += tpm-dev.o +tpm-y += tpm-interface.o +tpm-y += tpm1-cmd.o +tpm-y += tpm2-cmd.o +tpm-y += tpmrm-dev.o +tpm-y += tpm2-space.o +tpm-y += tpm-sysfs.o +tpm-y += eventlog/common.o +tpm-y += eventlog/tpm1.o +tpm-y += eventlog/tpm2.o + tpm-$(CONFIG_ACPI) += tpm_ppi.o eventlog/acpi.o tpm-$(CONFIG_EFI) += eventlog/efi.o tpm-$(CONFIG_OF) += eventlog/of.o diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c index abd675bec88c8..64dc560859f2c 100644 --- a/drivers/char/tpm/st33zp24/st33zp24.c +++ b/drivers/char/tpm/st33zp24/st33zp24.c @@ -649,7 +649,7 @@ int st33zp24_pm_resume(struct device *dev) } else { ret = tpm_pm_resume(dev); if (!ret) - tpm_do_selftest(chip); + tpm1_do_selftest(chip); } return ret; } /* st33zp24_pm_resume() */ diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 46caadca916a0..32db84683c401 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -451,14 +451,9 @@ int tpm_chip_register(struct tpm_chip *chip) { int rc; - if (chip->ops->flags & TPM_OPS_AUTO_STARTUP) { - if (chip->flags & TPM_CHIP_FLAG_TPM2) - rc = tpm2_auto_startup(chip); - else - rc = tpm1_auto_startup(chip); - if (rc) - return rc; - } + rc = tpm_auto_startup(chip); + if (rc) + return rc; tpm_sysfs_add_device(chip); diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c index e4a04b2d3c32d..99b5133a9d058 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c @@ -17,11 +17,36 @@ * License. * */ +#include #include #include +#include #include "tpm.h" #include "tpm-dev.h" +static struct workqueue_struct *tpm_dev_wq; +static DEFINE_MUTEX(tpm_dev_wq_lock); + +static void tpm_async_work(struct work_struct *work) +{ + struct file_priv *priv = + container_of(work, struct file_priv, async_work); + ssize_t ret; + + mutex_lock(&priv->buffer_mutex); + priv->command_enqueued = false; + ret = tpm_transmit(priv->chip, priv->space, priv->data_buffer, + sizeof(priv->data_buffer), 0); + + tpm_put_ops(priv->chip); + if (ret > 0) { + priv->data_pending = ret; + mod_timer(&priv->user_read_timer, jiffies + (120 * HZ)); + } + mutex_unlock(&priv->buffer_mutex); + wake_up_interruptible(&priv->async_wait); +} + static void user_reader_timeout(struct timer_list *t) { struct file_priv *priv = from_timer(priv, t, user_read_timer); @@ -29,27 +54,32 @@ static void user_reader_timeout(struct timer_list *t) pr_warn("TPM user space timeout is deprecated (pid=%d)\n", task_tgid_nr(current)); - schedule_work(&priv->work); + schedule_work(&priv->timeout_work); } -static void timeout_work(struct work_struct *work) +static void tpm_timeout_work(struct work_struct *work) { - struct file_priv *priv = container_of(work, struct file_priv, work); + struct file_priv *priv = container_of(work, struct file_priv, + timeout_work); mutex_lock(&priv->buffer_mutex); priv->data_pending = 0; memset(priv->data_buffer, 0, sizeof(priv->data_buffer)); mutex_unlock(&priv->buffer_mutex); + wake_up_interruptible(&priv->async_wait); } void tpm_common_open(struct file *file, struct tpm_chip *chip, - struct file_priv *priv) + struct file_priv *priv, struct tpm_space *space) { priv->chip = chip; + priv->space = space; + mutex_init(&priv->buffer_mutex); timer_setup(&priv->user_read_timer, user_reader_timeout, 0); - INIT_WORK(&priv->work, timeout_work); - + INIT_WORK(&priv->timeout_work, tpm_timeout_work); + INIT_WORK(&priv->async_work, tpm_async_work); + init_waitqueue_head(&priv->async_wait); file->private_data = priv; } @@ -61,15 +91,17 @@ ssize_t tpm_common_read(struct file *file, char __user *buf, int rc; del_singleshot_timer_sync(&priv->user_read_timer); - flush_work(&priv->work); + flush_work(&priv->timeout_work); mutex_lock(&priv->buffer_mutex); if (priv->data_pending) { ret_size = min_t(ssize_t, size, priv->data_pending); - rc = copy_to_user(buf, priv->data_buffer, ret_size); - memset(priv->data_buffer, 0, priv->data_pending); - if (rc) - ret_size = -EFAULT; + if (ret_size > 0) { + rc = copy_to_user(buf, priv->data_buffer, ret_size); + memset(priv->data_buffer, 0, priv->data_pending); + if (rc) + ret_size = -EFAULT; + } priv->data_pending = 0; } @@ -79,13 +111,12 @@ ssize_t tpm_common_read(struct file *file, char __user *buf, } ssize_t tpm_common_write(struct file *file, const char __user *buf, - size_t size, loff_t *off, struct tpm_space *space) + size_t size, loff_t *off) { struct file_priv *priv = file->private_data; - size_t in_size = size; - ssize_t out_size; + int ret = 0; - if (in_size > TPM_BUFSIZE) + if (size > TPM_BUFSIZE) return -E2BIG; mutex_lock(&priv->buffer_mutex); @@ -94,21 +125,20 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf, * tpm_read or a user_read_timer timeout. This also prevents split * buffered writes from blocking here. */ - if (priv->data_pending != 0) { - mutex_unlock(&priv->buffer_mutex); - return -EBUSY; + if (priv->data_pending != 0 || priv->command_enqueued) { + ret = -EBUSY; + goto out; } - if (copy_from_user - (priv->data_buffer, (void __user *) buf, in_size)) { - mutex_unlock(&priv->buffer_mutex); - return -EFAULT; + if (copy_from_user(priv->data_buffer, buf, size)) { + ret = -EFAULT; + goto out; } - if (in_size < 6 || - in_size < be32_to_cpu(*((__be32 *) (priv->data_buffer + 2)))) { - mutex_unlock(&priv->buffer_mutex); - return -EINVAL; + if (size < 6 || + size < be32_to_cpu(*((__be32 *)(priv->data_buffer + 2)))) { + ret = -EINVAL; + goto out; } /* atomic tpm command send and result receive. We only hold the ops @@ -116,25 +146,50 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf, * the char dev is held open. */ if (tpm_try_get_ops(priv->chip)) { - mutex_unlock(&priv->buffer_mutex); - return -EPIPE; + ret = -EPIPE; + goto out; } - out_size = tpm_transmit(priv->chip, space, priv->data_buffer, - sizeof(priv->data_buffer), 0); - tpm_put_ops(priv->chip); - if (out_size < 0) { + /* + * If in nonblocking mode schedule an async job to send + * the command return the size. + * In case of error the err code will be returned in + * the subsequent read call. + */ + if (file->f_flags & O_NONBLOCK) { + priv->command_enqueued = true; + queue_work(tpm_dev_wq, &priv->async_work); mutex_unlock(&priv->buffer_mutex); - return out_size; + return size; } - priv->data_pending = out_size; + ret = tpm_transmit(priv->chip, priv->space, priv->data_buffer, + sizeof(priv->data_buffer), 0); + tpm_put_ops(priv->chip); + + if (ret > 0) { + priv->data_pending = ret; + mod_timer(&priv->user_read_timer, jiffies + (120 * HZ)); + ret = size; + } +out: mutex_unlock(&priv->buffer_mutex); + return ret; +} + +__poll_t tpm_common_poll(struct file *file, poll_table *wait) +{ + struct file_priv *priv = file->private_data; + __poll_t mask = 0; + + poll_wait(file, &priv->async_wait, wait); - /* Set a timeout by which the reader must come claim the result */ - mod_timer(&priv->user_read_timer, jiffies + (120 * HZ)); + if (priv->data_pending) + mask = EPOLLIN | EPOLLRDNORM; + else + mask = EPOLLOUT | EPOLLWRNORM; - return in_size; + return mask; } /* @@ -142,8 +197,24 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf, */ void tpm_common_release(struct file *file, struct file_priv *priv) { + flush_work(&priv->async_work); del_singleshot_timer_sync(&priv->user_read_timer); - flush_work(&priv->work); + flush_work(&priv->timeout_work); file->private_data = NULL; priv->data_pending = 0; } + +int __init tpm_dev_common_init(void) +{ + tpm_dev_wq = alloc_workqueue("tpm_dev_wq", WQ_MEM_RECLAIM, 0); + + return !tpm_dev_wq ? -ENOMEM : 0; +} + +void __exit tpm_dev_common_exit(void) +{ + if (tpm_dev_wq) { + destroy_workqueue(tpm_dev_wq); + tpm_dev_wq = NULL; + } +} diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c index ebd74ab5abef8..32f9738f1cb24 100644 --- a/drivers/char/tpm/tpm-dev.c +++ b/drivers/char/tpm/tpm-dev.c @@ -39,7 +39,7 @@ static int tpm_open(struct inode *inode, struct file *file) if (priv == NULL) goto out; - tpm_common_open(file, chip, priv); + tpm_common_open(file, chip, priv, NULL); return 0; @@ -48,12 +48,6 @@ static int tpm_open(struct inode *inode, struct file *file) return -ENOMEM; } -static ssize_t tpm_write(struct file *file, const char __user *buf, - size_t size, loff_t *off) -{ - return tpm_common_write(file, buf, size, off, NULL); -} - /* * Called on file close */ @@ -73,6 +67,7 @@ const struct file_operations tpm_fops = { .llseek = no_llseek, .open = tpm_open, .read = tpm_common_read, - .write = tpm_write, + .write = tpm_common_write, + .poll = tpm_common_poll, .release = tpm_release, }; diff --git a/drivers/char/tpm/tpm-dev.h b/drivers/char/tpm/tpm-dev.h index b24cfb4d3ee1e..a126b575cb8c8 100644 --- a/drivers/char/tpm/tpm-dev.h +++ b/drivers/char/tpm/tpm-dev.h @@ -2,27 +2,33 @@ #ifndef _TPM_DEV_H #define _TPM_DEV_H +#include #include "tpm.h" struct file_priv { struct tpm_chip *chip; + struct tpm_space *space; - /* Data passed to and from the tpm via the read/write calls */ - size_t data_pending; + /* Holds the amount of data passed or an error code from async op */ + ssize_t data_pending; struct mutex buffer_mutex; struct timer_list user_read_timer; /* user needs to claim result */ - struct work_struct work; + struct work_struct timeout_work; + struct work_struct async_work; + wait_queue_head_t async_wait; + bool command_enqueued; u8 data_buffer[TPM_BUFSIZE]; }; void tpm_common_open(struct file *file, struct tpm_chip *chip, - struct file_priv *priv); + struct file_priv *priv, struct tpm_space *space); ssize_t tpm_common_read(struct file *file, char __user *buf, size_t size, loff_t *off); ssize_t tpm_common_write(struct file *file, const char __user *buf, - size_t size, loff_t *off, struct tpm_space *space); -void tpm_common_release(struct file *file, struct file_priv *priv); + size_t size, loff_t *off); +__poll_t tpm_common_poll(struct file *file, poll_table *wait); +void tpm_common_release(struct file *file, struct file_priv *priv); #endif diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index 1a803b0cf9808..d9439f9abe78d 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -33,297 +33,32 @@ #include "tpm.h" -#define TPM_MAX_ORDINAL 243 -#define TSC_MAX_ORDINAL 12 -#define TPM_PROTECTED_COMMAND 0x00 -#define TPM_CONNECTION_COMMAND 0x40 - /* * Bug workaround - some TPM's don't flush the most * recently changed pcr on suspend, so force the flush * with an extend to the selected _unused_ non-volatile pcr. */ -static int tpm_suspend_pcr; +static u32 tpm_suspend_pcr; module_param_named(suspend_pcr, tpm_suspend_pcr, uint, 0644); MODULE_PARM_DESC(suspend_pcr, "PCR to use for dummy writes to facilitate flush on suspend."); -/* - * Array with one entry per ordinal defining the maximum amount - * of time the chip could take to return the result. The ordinal - * designation of short, medium or long is defined in a table in - * TCG Specification TPM Main Part 2 TPM Structures Section 17. The - * values of the SHORT, MEDIUM, and LONG durations are retrieved - * from the chip during initialization with a call to tpm_get_timeouts. - */ -static const u8 tpm_ordinal_duration[TPM_MAX_ORDINAL] = { - TPM_UNDEFINED, /* 0 */ - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, /* 5 */ - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_SHORT, /* 10 */ - TPM_SHORT, - TPM_MEDIUM, - TPM_LONG, - TPM_LONG, - TPM_MEDIUM, /* 15 */ - TPM_SHORT, - TPM_SHORT, - TPM_MEDIUM, - TPM_LONG, - TPM_SHORT, /* 20 */ - TPM_SHORT, - TPM_MEDIUM, - TPM_MEDIUM, - TPM_MEDIUM, - TPM_SHORT, /* 25 */ - TPM_SHORT, - TPM_MEDIUM, - TPM_SHORT, - TPM_SHORT, - TPM_MEDIUM, /* 30 */ - TPM_LONG, - TPM_MEDIUM, - TPM_SHORT, - TPM_SHORT, - TPM_SHORT, /* 35 */ - TPM_MEDIUM, - TPM_MEDIUM, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_MEDIUM, /* 40 */ - TPM_LONG, - TPM_MEDIUM, - TPM_SHORT, - TPM_SHORT, - TPM_SHORT, /* 45 */ - TPM_SHORT, - TPM_SHORT, - TPM_SHORT, - TPM_LONG, - TPM_MEDIUM, /* 50 */ - TPM_MEDIUM, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, /* 55 */ - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_MEDIUM, /* 60 */ - TPM_MEDIUM, - TPM_MEDIUM, - TPM_SHORT, - TPM_SHORT, - TPM_MEDIUM, /* 65 */ - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_SHORT, /* 70 */ - TPM_SHORT, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, /* 75 */ - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_LONG, /* 80 */ - TPM_UNDEFINED, - TPM_MEDIUM, - TPM_LONG, - TPM_SHORT, - TPM_UNDEFINED, /* 85 */ - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_SHORT, /* 90 */ - TPM_SHORT, - TPM_SHORT, - TPM_SHORT, - TPM_SHORT, - TPM_UNDEFINED, /* 95 */ - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_MEDIUM, /* 100 */ - TPM_SHORT, - TPM_SHORT, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, /* 105 */ - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_SHORT, /* 110 */ - TPM_SHORT, - TPM_SHORT, - TPM_SHORT, - TPM_SHORT, - TPM_SHORT, /* 115 */ - TPM_SHORT, - TPM_SHORT, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_LONG, /* 120 */ - TPM_LONG, - TPM_MEDIUM, - TPM_UNDEFINED, - TPM_SHORT, - TPM_SHORT, /* 125 */ - TPM_SHORT, - TPM_LONG, - TPM_SHORT, - TPM_SHORT, - TPM_SHORT, /* 130 */ - TPM_MEDIUM, - TPM_UNDEFINED, - TPM_SHORT, - TPM_MEDIUM, - TPM_UNDEFINED, /* 135 */ - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_SHORT, /* 140 */ - TPM_SHORT, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, /* 145 */ - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_SHORT, /* 150 */ - TPM_MEDIUM, - TPM_MEDIUM, - TPM_SHORT, - TPM_SHORT, - TPM_UNDEFINED, /* 155 */ - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_SHORT, /* 160 */ - TPM_SHORT, - TPM_SHORT, - TPM_SHORT, - TPM_UNDEFINED, - TPM_UNDEFINED, /* 165 */ - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_LONG, /* 170 */ - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, /* 175 */ - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_MEDIUM, /* 180 */ - TPM_SHORT, - TPM_MEDIUM, - TPM_MEDIUM, - TPM_MEDIUM, - TPM_MEDIUM, /* 185 */ - TPM_SHORT, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, /* 190 */ - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, /* 195 */ - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_SHORT, /* 200 */ - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_SHORT, - TPM_SHORT, /* 205 */ - TPM_SHORT, - TPM_SHORT, - TPM_SHORT, - TPM_SHORT, - TPM_MEDIUM, /* 210 */ - TPM_UNDEFINED, - TPM_MEDIUM, - TPM_MEDIUM, - TPM_MEDIUM, - TPM_UNDEFINED, /* 215 */ - TPM_MEDIUM, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_SHORT, - TPM_SHORT, /* 220 */ - TPM_SHORT, - TPM_SHORT, - TPM_SHORT, - TPM_SHORT, - TPM_UNDEFINED, /* 225 */ - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_SHORT, /* 230 */ - TPM_LONG, - TPM_MEDIUM, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, /* 235 */ - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_UNDEFINED, - TPM_SHORT, /* 240 */ - TPM_UNDEFINED, - TPM_MEDIUM, -}; - -/* - * Returns max number of jiffies to wait +/** + * tpm_calc_ordinal_duration() - calculate the maximum command duration + * @chip: TPM chip to use. + * @ordinal: TPM command ordinal. + * + * The function returns the maximum amount of time the chip could take + * to return the result for a particular ordinal in jiffies. + * + * Return: A maximal duration time for an ordinal in jiffies. */ -unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, - u32 ordinal) +unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal) { - int duration_idx = TPM_UNDEFINED; - int duration = 0; - - /* - * We only have a duration table for protected commands, where the upper - * 16 bits are 0. For the few other ordinals the fallback will be used. - */ - if (ordinal < TPM_MAX_ORDINAL) - duration_idx = tpm_ordinal_duration[ordinal]; - - if (duration_idx != TPM_UNDEFINED) - duration = chip->duration[duration_idx]; - if (duration <= 0) - return 2 * 60 * HZ; + if (chip->flags & TPM_CHIP_FLAG_TPM2) + return tpm2_calc_ordinal_duration(chip, ordinal); else - return duration; + return tpm1_calc_ordinal_duration(chip, ordinal); } EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration); @@ -477,13 +212,15 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, if (need_locality) { rc = tpm_request_locality(chip, flags); - if (rc < 0) - goto out_no_locality; + if (rc < 0) { + need_locality = false; + goto out_locality; + } } rc = tpm_cmd_ready(chip, flags); if (rc) - goto out; + goto out_locality; rc = tpm2_prepare_space(chip, space, ordinal, buf); if (rc) @@ -500,10 +237,7 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, if (chip->flags & TPM_CHIP_FLAG_IRQ) goto out_recv; - if (chip->flags & TPM_CHIP_FLAG_TPM2) - stop = jiffies + tpm2_calc_ordinal_duration(chip, ordinal); - else - stop = jiffies + tpm_calc_ordinal_duration(chip, ordinal); + stop = jiffies + tpm_calc_ordinal_duration(chip, ordinal); do { u8 status = chip->ops->status(chip); if ((status & chip->ops->req_complete_mask) == @@ -547,14 +281,13 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, dev_err(&chip->dev, "tpm2_commit_space: error %d\n", rc); out: - rc = tpm_go_idle(chip, flags); - if (rc) - goto out; + /* may fail but do not override previous error value in rc */ + tpm_go_idle(chip, flags); +out_locality: if (need_locality) tpm_relinquish_locality(chip, flags); -out_no_locality: if (chip->ops->clk_enable != NULL) chip->ops->clk_enable(chip, false); @@ -663,7 +396,8 @@ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_space *space, return len; err = be32_to_cpu(header->return_code); - if (err != 0 && desc) + if (err != 0 && err != TPM_ERR_DISABLED && err != TPM_ERR_DEACTIVATED + && desc) dev_err(&chip->dev, "A TPM error (%d) occurred %s\n", err, desc); if (err) @@ -676,277 +410,18 @@ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_space *space, } EXPORT_SYMBOL_GPL(tpm_transmit_cmd); -#define TPM_ORD_STARTUP 153 -#define TPM_ST_CLEAR 1 - -/** - * tpm_startup - turn on the TPM - * @chip: TPM chip to use - * - * Normally the firmware should start the TPM. This function is provided as a - * workaround if this does not happen. A legal case for this could be for - * example when a TPM emulator is used. - * - * Return: same as tpm_transmit_cmd() - */ -int tpm_startup(struct tpm_chip *chip) -{ - struct tpm_buf buf; - int rc; - - dev_info(&chip->dev, "starting up the TPM manually\n"); - - if (chip->flags & TPM_CHIP_FLAG_TPM2) { - rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_STARTUP); - if (rc < 0) - return rc; - - tpm_buf_append_u16(&buf, TPM2_SU_CLEAR); - } else { - rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_STARTUP); - if (rc < 0) - return rc; - - tpm_buf_append_u16(&buf, TPM_ST_CLEAR); - } - - rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0, - "attempting to start the TPM"); - - tpm_buf_destroy(&buf); - return rc; -} - -#define TPM_DIGEST_SIZE 20 -#define TPM_RET_CODE_IDX 6 -#define TPM_INTERNAL_RESULT_SIZE 200 -#define TPM_ORD_GET_CAP 101 -#define TPM_ORD_GET_RANDOM 70 - -static const struct tpm_input_header tpm_getcap_header = { - .tag = cpu_to_be16(TPM_TAG_RQU_COMMAND), - .length = cpu_to_be32(22), - .ordinal = cpu_to_be32(TPM_ORD_GET_CAP) -}; - -ssize_t tpm_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap, - const char *desc, size_t min_cap_length) -{ - struct tpm_buf buf; - int rc; - - rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_GET_CAP); - if (rc) - return rc; - - if (subcap_id == TPM_CAP_VERSION_1_1 || - subcap_id == TPM_CAP_VERSION_1_2) { - tpm_buf_append_u32(&buf, subcap_id); - tpm_buf_append_u32(&buf, 0); - } else { - if (subcap_id == TPM_CAP_FLAG_PERM || - subcap_id == TPM_CAP_FLAG_VOL) - tpm_buf_append_u32(&buf, TPM_CAP_FLAG); - else - tpm_buf_append_u32(&buf, TPM_CAP_PROP); - - tpm_buf_append_u32(&buf, 4); - tpm_buf_append_u32(&buf, subcap_id); - } - rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, - min_cap_length, 0, desc); - if (!rc) - *cap = *(cap_t *)&buf.data[TPM_HEADER_SIZE + 4]; - - tpm_buf_destroy(&buf); - return rc; -} -EXPORT_SYMBOL_GPL(tpm_getcap); - int tpm_get_timeouts(struct tpm_chip *chip) { - cap_t cap; - unsigned long timeout_old[4], timeout_chip[4], timeout_eff[4]; - ssize_t rc; - if (chip->flags & TPM_CHIP_FLAG_HAVE_TIMEOUTS) return 0; - if (chip->flags & TPM_CHIP_FLAG_TPM2) { - /* Fixed timeouts for TPM2 */ - chip->timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A); - chip->timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B); - chip->timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C); - chip->timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D); - chip->duration[TPM_SHORT] = - msecs_to_jiffies(TPM2_DURATION_SHORT); - chip->duration[TPM_MEDIUM] = - msecs_to_jiffies(TPM2_DURATION_MEDIUM); - chip->duration[TPM_LONG] = - msecs_to_jiffies(TPM2_DURATION_LONG); - chip->duration[TPM_LONG_LONG] = - msecs_to_jiffies(TPM2_DURATION_LONG_LONG); - - chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS; - return 0; - } - - rc = tpm_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, NULL, - sizeof(cap.timeout)); - if (rc == TPM_ERR_INVALID_POSTINIT) { - if (tpm_startup(chip)) - return rc; - - rc = tpm_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, - "attempting to determine the timeouts", - sizeof(cap.timeout)); - } - - if (rc) { - dev_err(&chip->dev, - "A TPM error (%zd) occurred attempting to determine the timeouts\n", - rc); - return rc; - } - - timeout_old[0] = jiffies_to_usecs(chip->timeout_a); - timeout_old[1] = jiffies_to_usecs(chip->timeout_b); - timeout_old[2] = jiffies_to_usecs(chip->timeout_c); - timeout_old[3] = jiffies_to_usecs(chip->timeout_d); - timeout_chip[0] = be32_to_cpu(cap.timeout.a); - timeout_chip[1] = be32_to_cpu(cap.timeout.b); - timeout_chip[2] = be32_to_cpu(cap.timeout.c); - timeout_chip[3] = be32_to_cpu(cap.timeout.d); - memcpy(timeout_eff, timeout_chip, sizeof(timeout_eff)); - - /* - * Provide ability for vendor overrides of timeout values in case - * of misreporting. - */ - if (chip->ops->update_timeouts != NULL) - chip->timeout_adjusted = - chip->ops->update_timeouts(chip, timeout_eff); - - if (!chip->timeout_adjusted) { - /* Restore default if chip reported 0 */ - int i; - - for (i = 0; i < ARRAY_SIZE(timeout_eff); i++) { - if (timeout_eff[i]) - continue; - - timeout_eff[i] = timeout_old[i]; - chip->timeout_adjusted = true; - } - - if (timeout_eff[0] != 0 && timeout_eff[0] < 1000) { - /* timeouts in msec rather usec */ - for (i = 0; i != ARRAY_SIZE(timeout_eff); i++) - timeout_eff[i] *= 1000; - chip->timeout_adjusted = true; - } - } - - /* Report adjusted timeouts */ - if (chip->timeout_adjusted) { - dev_info(&chip->dev, - HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n", - timeout_chip[0], timeout_eff[0], - timeout_chip[1], timeout_eff[1], - timeout_chip[2], timeout_eff[2], - timeout_chip[3], timeout_eff[3]); - } - - chip->timeout_a = usecs_to_jiffies(timeout_eff[0]); - chip->timeout_b = usecs_to_jiffies(timeout_eff[1]); - chip->timeout_c = usecs_to_jiffies(timeout_eff[2]); - chip->timeout_d = usecs_to_jiffies(timeout_eff[3]); - - rc = tpm_getcap(chip, TPM_CAP_PROP_TIS_DURATION, &cap, - "attempting to determine the durations", - sizeof(cap.duration)); - if (rc) - return rc; - - chip->duration[TPM_SHORT] = - usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_short)); - chip->duration[TPM_MEDIUM] = - usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_medium)); - chip->duration[TPM_LONG] = - usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_long)); - chip->duration[TPM_LONG_LONG] = 0; /* not used under 1.2 */ - - /* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above - * value wrong and apparently reports msecs rather than usecs. So we - * fix up the resulting too-small TPM_SHORT value to make things work. - * We also scale the TPM_MEDIUM and -_LONG values by 1000. - */ - if (chip->duration[TPM_SHORT] < (HZ / 100)) { - chip->duration[TPM_SHORT] = HZ; - chip->duration[TPM_MEDIUM] *= 1000; - chip->duration[TPM_LONG] *= 1000; - chip->duration_adjusted = true; - dev_info(&chip->dev, "Adjusting TPM timeout parameters."); - } - - chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS; - return 0; + if (chip->flags & TPM_CHIP_FLAG_TPM2) + return tpm2_get_timeouts(chip); + else + return tpm1_get_timeouts(chip); } EXPORT_SYMBOL_GPL(tpm_get_timeouts); -#define TPM_ORD_CONTINUE_SELFTEST 83 -#define CONTINUE_SELFTEST_RESULT_SIZE 10 - -static const struct tpm_input_header continue_selftest_header = { - .tag = cpu_to_be16(TPM_TAG_RQU_COMMAND), - .length = cpu_to_be32(10), - .ordinal = cpu_to_be32(TPM_ORD_CONTINUE_SELFTEST), -}; - -/** - * tpm_continue_selftest -- run TPM's selftest - * @chip: TPM chip to use - * - * Returns 0 on success, < 0 in case of fatal error or a value > 0 representing - * a TPM error code. - */ -static int tpm_continue_selftest(struct tpm_chip *chip) -{ - int rc; - struct tpm_cmd_t cmd; - - cmd.header.in = continue_selftest_header; - rc = tpm_transmit_cmd(chip, NULL, &cmd, CONTINUE_SELFTEST_RESULT_SIZE, - 0, 0, "continue selftest"); - return rc; -} - -#define TPM_ORDINAL_PCRREAD 21 -#define READ_PCR_RESULT_SIZE 30 -#define READ_PCR_RESULT_BODY_SIZE 20 -static const struct tpm_input_header pcrread_header = { - .tag = cpu_to_be16(TPM_TAG_RQU_COMMAND), - .length = cpu_to_be32(14), - .ordinal = cpu_to_be32(TPM_ORDINAL_PCRREAD) -}; - -int tpm_pcr_read_dev(struct tpm_chip *chip, int pcr_idx, u8 *res_buf) -{ - int rc; - struct tpm_cmd_t cmd; - - cmd.header.in = pcrread_header; - cmd.params.pcrread_in.pcr_idx = cpu_to_be32(pcr_idx); - rc = tpm_transmit_cmd(chip, NULL, &cmd, READ_PCR_RESULT_SIZE, - READ_PCR_RESULT_BODY_SIZE, 0, - "attempting to read a pcr value"); - - if (rc == 0) - memcpy(res_buf, cmd.params.pcrread_out.pcr_result, - TPM_DIGEST_SIZE); - return rc; -} - /** * tpm_is_tpm2 - do we a have a TPM2 chip? * @chip: a &struct tpm_chip instance, %NULL for the default chip @@ -980,50 +455,24 @@ EXPORT_SYMBOL_GPL(tpm_is_tpm2); * * Return: same as with tpm_transmit_cmd() */ -int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf) +int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf) { int rc; chip = tpm_find_get_ops(chip); if (!chip) return -ENODEV; + if (chip->flags & TPM_CHIP_FLAG_TPM2) rc = tpm2_pcr_read(chip, pcr_idx, res_buf); else - rc = tpm_pcr_read_dev(chip, pcr_idx, res_buf); + rc = tpm1_pcr_read(chip, pcr_idx, res_buf); + tpm_put_ops(chip); return rc; } EXPORT_SYMBOL_GPL(tpm_pcr_read); -#define TPM_ORD_PCR_EXTEND 20 -#define EXTEND_PCR_RESULT_SIZE 34 -#define EXTEND_PCR_RESULT_BODY_SIZE 20 -static const struct tpm_input_header pcrextend_header = { - .tag = cpu_to_be16(TPM_TAG_RQU_COMMAND), - .length = cpu_to_be32(34), - .ordinal = cpu_to_be32(TPM_ORD_PCR_EXTEND) -}; - -static int tpm1_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash, - char *log_msg) -{ - struct tpm_buf buf; - int rc; - - rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_PCR_EXTEND); - if (rc) - return rc; - - tpm_buf_append_u32(&buf, pcr_idx); - tpm_buf_append(&buf, hash, TPM_DIGEST_SIZE); - - rc = tpm_transmit_cmd(chip, NULL, buf.data, EXTEND_PCR_RESULT_SIZE, - EXTEND_PCR_RESULT_BODY_SIZE, 0, log_msg); - tpm_buf_destroy(&buf); - return rc; -} - /** * tpm_pcr_extend - extend a PCR value in SHA1 bank. * @chip: a &struct tpm_chip instance, %NULL for the default chip @@ -1036,7 +485,7 @@ static int tpm1_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash, * * Return: same as with tpm_transmit_cmd() */ -int tpm_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash) +int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, const u8 *hash) { int rc; struct tpm2_digest digest_list[ARRAY_SIZE(chip->active_banks)]; @@ -1069,97 +518,6 @@ int tpm_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash) } EXPORT_SYMBOL_GPL(tpm_pcr_extend); -/** - * tpm_do_selftest - have the TPM continue its selftest and wait until it - * can receive further commands - * @chip: TPM chip to use - * - * Returns 0 on success, < 0 in case of fatal error or a value > 0 representing - * a TPM error code. - */ -int tpm_do_selftest(struct tpm_chip *chip) -{ - int rc; - unsigned int loops; - unsigned int delay_msec = 100; - unsigned long duration; - u8 dummy[TPM_DIGEST_SIZE]; - - duration = tpm_calc_ordinal_duration(chip, TPM_ORD_CONTINUE_SELFTEST); - - loops = jiffies_to_msecs(duration) / delay_msec; - - rc = tpm_continue_selftest(chip); - if (rc == TPM_ERR_INVALID_POSTINIT) { - chip->flags |= TPM_CHIP_FLAG_ALWAYS_POWERED; - dev_info(&chip->dev, "TPM not ready (%d)\n", rc); - } - /* This may fail if there was no TPM driver during a suspend/resume - * cycle; some may return 10 (BAD_ORDINAL), others 28 (FAILEDSELFTEST) - */ - if (rc) - return rc; - - do { - /* Attempt to read a PCR value */ - rc = tpm_pcr_read_dev(chip, 0, dummy); - - /* Some buggy TPMs will not respond to tpm_tis_ready() for - * around 300ms while the self test is ongoing, keep trying - * until the self test duration expires. */ - if (rc == -ETIME) { - dev_info( - &chip->dev, HW_ERR - "TPM command timed out during continue self test"); - tpm_msleep(delay_msec); - continue; - } - - if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) { - dev_info(&chip->dev, - "TPM is disabled/deactivated (0x%X)\n", rc); - /* TPM is disabled and/or deactivated; driver can - * proceed and TPM does handle commands for - * suspend/resume correctly - */ - return 0; - } - if (rc != TPM_WARN_DOING_SELFTEST) - return rc; - tpm_msleep(delay_msec); - } while (--loops > 0); - - return rc; -} -EXPORT_SYMBOL_GPL(tpm_do_selftest); - -/** - * tpm1_auto_startup - Perform the standard automatic TPM initialization - * sequence - * @chip: TPM chip to use - * - * Returns 0 on success, < 0 in case of fatal error. - */ -int tpm1_auto_startup(struct tpm_chip *chip) -{ - int rc; - - rc = tpm_get_timeouts(chip); - if (rc) - goto out; - rc = tpm_do_selftest(chip); - if (rc) { - dev_err(&chip->dev, "TPM self test failed\n"); - goto out; - } - - return rc; -out: - if (rc > 0) - rc = -ENODEV; - return rc; -} - /** * tpm_send - send a TPM command * @chip: a &struct tpm_chip instance, %NULL for the default chip @@ -1183,14 +541,20 @@ int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen) } EXPORT_SYMBOL_GPL(tpm_send); -#define TPM_ORD_SAVESTATE 152 -#define SAVESTATE_RESULT_SIZE 10 +int tpm_auto_startup(struct tpm_chip *chip) +{ + int rc; + + if (!(chip->ops->flags & TPM_OPS_AUTO_STARTUP)) + return 0; -static const struct tpm_input_header savestate_header = { - .tag = cpu_to_be16(TPM_TAG_RQU_COMMAND), - .length = cpu_to_be32(10), - .ordinal = cpu_to_be32(TPM_ORD_SAVESTATE) -}; + if (chip->flags & TPM_CHIP_FLAG_TPM2) + rc = tpm2_auto_startup(chip); + else + rc = tpm1_auto_startup(chip); + + return rc; +} /* * We are about to suspend. Save the TPM state @@ -1199,54 +563,18 @@ static const struct tpm_input_header savestate_header = { int tpm_pm_suspend(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); - struct tpm_cmd_t cmd; - int rc, try; - - u8 dummy_hash[TPM_DIGEST_SIZE] = { 0 }; + int rc = 0; - if (chip == NULL) + if (!chip) return -ENODEV; if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED) return 0; - if (chip->flags & TPM_CHIP_FLAG_TPM2) { + if (chip->flags & TPM_CHIP_FLAG_TPM2) tpm2_shutdown(chip, TPM2_SU_STATE); - return 0; - } - - /* for buggy tpm, flush pcrs with extend to selected dummy */ - if (tpm_suspend_pcr) - rc = tpm1_pcr_extend(chip, tpm_suspend_pcr, dummy_hash, - "extending dummy pcr before suspend"); - - /* now do the actual savestate */ - for (try = 0; try < TPM_RETRY; try++) { - cmd.header.in = savestate_header; - rc = tpm_transmit_cmd(chip, NULL, &cmd, SAVESTATE_RESULT_SIZE, - 0, 0, NULL); - - /* - * If the TPM indicates that it is too busy to respond to - * this command then retry before giving up. It can take - * several seconds for this TPM to be ready. - * - * This can happen if the TPM has already been sent the - * SaveState command before the driver has loaded. TCG 1.2 - * specification states that any communication after SaveState - * may cause the TPM to invalidate previously saved state. - */ - if (rc != TPM_WARN_RETRY) - break; - tpm_msleep(TPM_TIMEOUT_RETRY); - } - - if (rc) - dev_err(&chip->dev, - "Error (%d) sending savestate before suspend\n", rc); - else if (try > 0) - dev_warn(&chip->dev, "TPM savestate took %dms\n", - try * TPM_TIMEOUT_RETRY); + else + rc = tpm1_pm_suspend(chip, tpm_suspend_pcr); return rc; } @@ -1267,74 +595,32 @@ int tpm_pm_resume(struct device *dev) } EXPORT_SYMBOL_GPL(tpm_pm_resume); -#define TPM_GETRANDOM_RESULT_SIZE 18 -static const struct tpm_input_header tpm_getrandom_header = { - .tag = cpu_to_be16(TPM_TAG_RQU_COMMAND), - .length = cpu_to_be32(14), - .ordinal = cpu_to_be32(TPM_ORD_GET_RANDOM) -}; - /** * tpm_get_random() - get random bytes from the TPM's RNG * @chip: a &struct tpm_chip instance, %NULL for the default chip * @out: destination buffer for the random bytes * @max: the max number of bytes to write to @out * - * Return: same as with tpm_transmit_cmd() + * Return: number of random bytes read or a negative error value. */ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max) { - struct tpm_cmd_t tpm_cmd; - u32 recd, num_bytes = min_t(u32, max, TPM_MAX_RNG_DATA), rlength; - int err, total = 0, retries = 5; - u8 *dest = out; + int rc; - if (!out || !num_bytes || max > TPM_MAX_RNG_DATA) + if (!out || max > TPM_MAX_RNG_DATA) return -EINVAL; chip = tpm_find_get_ops(chip); if (!chip) return -ENODEV; - if (chip->flags & TPM_CHIP_FLAG_TPM2) { - err = tpm2_get_random(chip, out, max); - tpm_put_ops(chip); - return err; - } - - do { - tpm_cmd.header.in = tpm_getrandom_header; - tpm_cmd.params.getrandom_in.num_bytes = cpu_to_be32(num_bytes); - - err = tpm_transmit_cmd(chip, NULL, &tpm_cmd, - TPM_GETRANDOM_RESULT_SIZE + num_bytes, - offsetof(struct tpm_getrandom_out, - rng_data), - 0, "attempting get random"); - if (err) - break; - - recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len); - if (recd > num_bytes) { - total = -EFAULT; - break; - } - - rlength = be32_to_cpu(tpm_cmd.header.out.length); - if (rlength < offsetof(struct tpm_getrandom_out, rng_data) + - recd) { - total = -EFAULT; - break; - } - memcpy(dest, tpm_cmd.params.getrandom_out.rng_data, recd); - - dest += recd; - total += recd; - num_bytes -= recd; - } while (retries-- && total < max); + if (chip->flags & TPM_CHIP_FLAG_TPM2) + rc = tpm2_get_random(chip, out, max); + else + rc = tpm1_get_random(chip, out, max); tpm_put_ops(chip); - return total ? total : -EIO; + return rc; } EXPORT_SYMBOL_GPL(tpm_get_random); @@ -1407,19 +693,32 @@ static int __init tpm_init(void) tpmrm_class = class_create(THIS_MODULE, "tpmrm"); if (IS_ERR(tpmrm_class)) { pr_err("couldn't create tpmrm class\n"); - class_destroy(tpm_class); - return PTR_ERR(tpmrm_class); + rc = PTR_ERR(tpmrm_class); + goto out_destroy_tpm_class; } rc = alloc_chrdev_region(&tpm_devt, 0, 2*TPM_NUM_DEVICES, "tpm"); if (rc < 0) { pr_err("tpm: failed to allocate char dev region\n"); - class_destroy(tpmrm_class); - class_destroy(tpm_class); - return rc; + goto out_destroy_tpmrm_class; + } + + rc = tpm_dev_common_init(); + if (rc) { + pr_err("tpm: failed to allocate char dev region\n"); + goto out_unreg_chrdev; } return 0; + +out_unreg_chrdev: + unregister_chrdev_region(tpm_devt, 2 * TPM_NUM_DEVICES); +out_destroy_tpmrm_class: + class_destroy(tpmrm_class); +out_destroy_tpm_class: + class_destroy(tpm_class); + + return rc; } static void __exit tpm_exit(void) @@ -1428,6 +727,7 @@ static void __exit tpm_exit(void) class_destroy(tpm_class); class_destroy(tpmrm_class); unregister_chrdev_region(tpm_devt, 2*TPM_NUM_DEVICES); + tpm_dev_common_exit(); } subsys_initcall(tpm_init); diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c index 83a77a4455380..b88e08ec2c595 100644 --- a/drivers/char/tpm/tpm-sysfs.c +++ b/drivers/char/tpm/tpm-sysfs.c @@ -102,19 +102,19 @@ static ssize_t pcrs_show(struct device *dev, struct device_attribute *attr, cap_t cap; u8 digest[TPM_DIGEST_SIZE]; ssize_t rc; - int i, j, num_pcrs; + u32 i, j, num_pcrs; char *str = buf; struct tpm_chip *chip = to_tpm_chip(dev); - rc = tpm_getcap(chip, TPM_CAP_PROP_PCR, &cap, - "attempting to determine the number of PCRS", - sizeof(cap.num_pcrs)); + rc = tpm1_getcap(chip, TPM_CAP_PROP_PCR, &cap, + "attempting to determine the number of PCRS", + sizeof(cap.num_pcrs)); if (rc) return 0; num_pcrs = be32_to_cpu(cap.num_pcrs); for (i = 0; i < num_pcrs; i++) { - rc = tpm_pcr_read_dev(chip, i, digest); + rc = tpm1_pcr_read(chip, i, digest); if (rc) break; str += sprintf(str, "PCR-%02d: ", i); @@ -132,9 +132,9 @@ static ssize_t enabled_show(struct device *dev, struct device_attribute *attr, cap_t cap; ssize_t rc; - rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap, - "attempting to determine the permanent enabled state", - sizeof(cap.perm_flags)); + rc = tpm1_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap, + "attempting to determine the permanent enabled state", + sizeof(cap.perm_flags)); if (rc) return 0; @@ -149,9 +149,9 @@ static ssize_t active_show(struct device *dev, struct device_attribute *attr, cap_t cap; ssize_t rc; - rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap, - "attempting to determine the permanent active state", - sizeof(cap.perm_flags)); + rc = tpm1_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap, + "attempting to determine the permanent active state", + sizeof(cap.perm_flags)); if (rc) return 0; @@ -166,9 +166,9 @@ static ssize_t owned_show(struct device *dev, struct device_attribute *attr, cap_t cap; ssize_t rc; - rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_PROP_OWNER, &cap, - "attempting to determine the owner state", - sizeof(cap.owned)); + rc = tpm1_getcap(to_tpm_chip(dev), TPM_CAP_PROP_OWNER, &cap, + "attempting to determine the owner state", + sizeof(cap.owned)); if (rc) return 0; @@ -183,9 +183,9 @@ static ssize_t temp_deactivated_show(struct device *dev, cap_t cap; ssize_t rc; - rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_VOL, &cap, - "attempting to determine the temporary state", - sizeof(cap.stclear_flags)); + rc = tpm1_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_VOL, &cap, + "attempting to determine the temporary state", + sizeof(cap.stclear_flags)); if (rc) return 0; @@ -202,18 +202,18 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr, ssize_t rc; char *str = buf; - rc = tpm_getcap(chip, TPM_CAP_PROP_MANUFACTURER, &cap, - "attempting to determine the manufacturer", - sizeof(cap.manufacturer_id)); + rc = tpm1_getcap(chip, TPM_CAP_PROP_MANUFACTURER, &cap, + "attempting to determine the manufacturer", + sizeof(cap.manufacturer_id)); if (rc) return 0; str += sprintf(str, "Manufacturer: 0x%x\n", be32_to_cpu(cap.manufacturer_id)); /* Try to get a TPM version 1.2 TPM_CAP_VERSION_INFO */ - rc = tpm_getcap(chip, TPM_CAP_VERSION_1_2, &cap, - "attempting to determine the 1.2 version", - sizeof(cap.tpm_version_1_2)); + rc = tpm1_getcap(chip, TPM_CAP_VERSION_1_2, &cap, + "attempting to determine the 1.2 version", + sizeof(cap.tpm_version_1_2)); if (!rc) { str += sprintf(str, "TCG version: %d.%d\nFirmware version: %d.%d\n", @@ -223,9 +223,9 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr, cap.tpm_version_1_2.revMinor); } else { /* Otherwise just use TPM_STRUCT_VER */ - rc = tpm_getcap(chip, TPM_CAP_VERSION_1_1, &cap, - "attempting to determine the 1.1 version", - sizeof(cap.tpm_version)); + rc = tpm1_getcap(chip, TPM_CAP_VERSION_1_1, &cap, + "attempting to determine the 1.1 version", + sizeof(cap.tpm_version)); if (rc) return 0; str += sprintf(str, diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index f3501d05264f5..f27d1f38a93d7 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -134,22 +134,31 @@ enum tpm2_algorithms { }; enum tpm2_command_codes { - TPM2_CC_FIRST = 0x011F, - TPM2_CC_CREATE_PRIMARY = 0x0131, - TPM2_CC_SELF_TEST = 0x0143, - TPM2_CC_STARTUP = 0x0144, - TPM2_CC_SHUTDOWN = 0x0145, - TPM2_CC_CREATE = 0x0153, - TPM2_CC_LOAD = 0x0157, - TPM2_CC_UNSEAL = 0x015E, - TPM2_CC_CONTEXT_LOAD = 0x0161, - TPM2_CC_CONTEXT_SAVE = 0x0162, - TPM2_CC_FLUSH_CONTEXT = 0x0165, - TPM2_CC_GET_CAPABILITY = 0x017A, - TPM2_CC_GET_RANDOM = 0x017B, - TPM2_CC_PCR_READ = 0x017E, - TPM2_CC_PCR_EXTEND = 0x0182, - TPM2_CC_LAST = 0x018F, + TPM2_CC_FIRST = 0x011F, + TPM2_CC_HIERARCHY_CONTROL = 0x0121, + TPM2_CC_HIERARCHY_CHANGE_AUTH = 0x0129, + TPM2_CC_CREATE_PRIMARY = 0x0131, + TPM2_CC_SEQUENCE_COMPLETE = 0x013E, + TPM2_CC_SELF_TEST = 0x0143, + TPM2_CC_STARTUP = 0x0144, + TPM2_CC_SHUTDOWN = 0x0145, + TPM2_CC_NV_READ = 0x014E, + TPM2_CC_CREATE = 0x0153, + TPM2_CC_LOAD = 0x0157, + TPM2_CC_SEQUENCE_UPDATE = 0x015C, + TPM2_CC_UNSEAL = 0x015E, + TPM2_CC_CONTEXT_LOAD = 0x0161, + TPM2_CC_CONTEXT_SAVE = 0x0162, + TPM2_CC_FLUSH_CONTEXT = 0x0165, + TPM2_CC_VERIFY_SIGNATURE = 0x0177, + TPM2_CC_GET_CAPABILITY = 0x017A, + TPM2_CC_GET_RANDOM = 0x017B, + TPM2_CC_PCR_READ = 0x017E, + TPM2_CC_PCR_EXTEND = 0x0182, + TPM2_CC_EVENT_SEQUENCE_COMPLETE = 0x0185, + TPM2_CC_HASH_SEQUENCE_START = 0x0186, + TPM2_CC_CREATE_LOADED = 0x0191, + TPM2_CC_LAST = 0x0193, /* Spec 1.36 */ }; enum tpm2_permanent_handles { @@ -368,18 +377,6 @@ enum tpm_sub_capabilities { TPM_CAP_PROP_TIS_DURATION = 0x120, }; -typedef union { - struct tpm_input_header in; - struct tpm_output_header out; -} tpm_cmd_header; - -struct tpm_pcrread_out { - u8 pcr_result[TPM_DIGEST_SIZE]; -} __packed; - -struct tpm_pcrread_in { - __be32 pcr_idx; -} __packed; /* 128 bytes is an arbitrary cap. This could be as large as TPM_BUFSIZE - 18 * bytes, but 128 is still a relatively large number of random bytes and @@ -387,28 +384,6 @@ struct tpm_pcrread_in { * compiler warnings about stack frame size. */ #define TPM_MAX_RNG_DATA 128 -struct tpm_getrandom_out { - __be32 rng_data_len; - u8 rng_data[TPM_MAX_RNG_DATA]; -} __packed; - -struct tpm_getrandom_in { - __be32 num_bytes; -} __packed; - -typedef union { - struct tpm_pcrread_in pcrread_in; - struct tpm_pcrread_out pcrread_out; - struct tpm_getrandom_in getrandom_in; - struct tpm_getrandom_out getrandom_out; -} tpm_cmd_params; - -struct tpm_cmd_t { - tpm_cmd_header header; - tpm_cmd_params params; -} __packed; - - /* A string buffer type for constructing TPM commands. This is based on the * ideas of string buffer code in security/keys/trusted.h but is heap based * in order to keep the stack usage minimal. @@ -531,12 +506,20 @@ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_space *space, void *buf, size_t bufsiz, size_t min_rsp_body_length, unsigned int flags, const char *desc); -int tpm_startup(struct tpm_chip *chip); -ssize_t tpm_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap, - const char *desc, size_t min_cap_length); int tpm_get_timeouts(struct tpm_chip *); +int tpm_auto_startup(struct tpm_chip *chip); + +int tpm1_pm_suspend(struct tpm_chip *chip, u32 tpm_suspend_pcr); int tpm1_auto_startup(struct tpm_chip *chip); -int tpm_do_selftest(struct tpm_chip *chip); +int tpm1_do_selftest(struct tpm_chip *chip); +int tpm1_get_timeouts(struct tpm_chip *chip); +unsigned long tpm1_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal); +int tpm1_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, const u8 *hash, + const char *log_msg); +int tpm1_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf); +ssize_t tpm1_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap, + const char *desc, size_t min_cap_length); +int tpm1_get_random(struct tpm_chip *chip, u8 *out, size_t max); unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal); int tpm_pm_suspend(struct device *dev); int tpm_pm_resume(struct device *dev); @@ -560,7 +543,6 @@ void tpm_chip_unregister(struct tpm_chip *chip); void tpm_sysfs_add_device(struct tpm_chip *chip); -int tpm_pcr_read_dev(struct tpm_chip *chip, int pcr_idx, u8 *res_buf); #ifdef CONFIG_ACPI extern void tpm_add_ppi(struct tpm_chip *chip); @@ -575,8 +557,9 @@ static inline u32 tpm2_rc_value(u32 rc) return (rc & BIT(7)) ? rc & 0xff : rc; } -int tpm2_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf); -int tpm2_pcr_extend(struct tpm_chip *chip, int pcr_idx, u32 count, +int tpm2_get_timeouts(struct tpm_chip *chip); +int tpm2_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf); +int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, u32 count, struct tpm2_digest *digests); int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max); void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle, @@ -604,4 +587,6 @@ int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, int tpm_bios_log_setup(struct tpm_chip *chip); void tpm_bios_log_teardown(struct tpm_chip *chip); +int tpm_dev_common_init(void); +void tpm_dev_common_exit(void); #endif diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c new file mode 100644 index 0000000000000..6f306338953b3 --- /dev/null +++ b/drivers/char/tpm/tpm1-cmd.c @@ -0,0 +1,781 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2004 IBM Corporation + * Copyright (C) 2014 Intel Corporation + * + * Authors: + * Leendert van Doorn + * Dave Safford + * Reiner Sailer + * Kylene Hall + * + * Device driver for TCG/TCPA TPM (trusted platform module). + * Specifications at www.trustedcomputinggroup.org + */ + +#include +#include +#include +#include +#include +#include + +#include "tpm.h" + +#define TPM_MAX_ORDINAL 243 + +/* + * Array with one entry per ordinal defining the maximum amount + * of time the chip could take to return the result. The ordinal + * designation of short, medium or long is defined in a table in + * TCG Specification TPM Main Part 2 TPM Structures Section 17. The + * values of the SHORT, MEDIUM, and LONG durations are retrieved + * from the chip during initialization with a call to tpm_get_timeouts. + */ +static const u8 tpm1_ordinal_duration[TPM_MAX_ORDINAL] = { + TPM_UNDEFINED, /* 0 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, /* 5 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_SHORT, /* 10 */ + TPM_SHORT, + TPM_MEDIUM, + TPM_LONG, + TPM_LONG, + TPM_MEDIUM, /* 15 */ + TPM_SHORT, + TPM_SHORT, + TPM_MEDIUM, + TPM_LONG, + TPM_SHORT, /* 20 */ + TPM_SHORT, + TPM_MEDIUM, + TPM_MEDIUM, + TPM_MEDIUM, + TPM_SHORT, /* 25 */ + TPM_SHORT, + TPM_MEDIUM, + TPM_SHORT, + TPM_SHORT, + TPM_MEDIUM, /* 30 */ + TPM_LONG, + TPM_MEDIUM, + TPM_SHORT, + TPM_SHORT, + TPM_SHORT, /* 35 */ + TPM_MEDIUM, + TPM_MEDIUM, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_MEDIUM, /* 40 */ + TPM_LONG, + TPM_MEDIUM, + TPM_SHORT, + TPM_SHORT, + TPM_SHORT, /* 45 */ + TPM_SHORT, + TPM_SHORT, + TPM_SHORT, + TPM_LONG, + TPM_MEDIUM, /* 50 */ + TPM_MEDIUM, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, /* 55 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_MEDIUM, /* 60 */ + TPM_MEDIUM, + TPM_MEDIUM, + TPM_SHORT, + TPM_SHORT, + TPM_MEDIUM, /* 65 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_SHORT, /* 70 */ + TPM_SHORT, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, /* 75 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_LONG, /* 80 */ + TPM_UNDEFINED, + TPM_MEDIUM, + TPM_LONG, + TPM_SHORT, + TPM_UNDEFINED, /* 85 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_SHORT, /* 90 */ + TPM_SHORT, + TPM_SHORT, + TPM_SHORT, + TPM_SHORT, + TPM_UNDEFINED, /* 95 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_MEDIUM, /* 100 */ + TPM_SHORT, + TPM_SHORT, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, /* 105 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_SHORT, /* 110 */ + TPM_SHORT, + TPM_SHORT, + TPM_SHORT, + TPM_SHORT, + TPM_SHORT, /* 115 */ + TPM_SHORT, + TPM_SHORT, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_LONG, /* 120 */ + TPM_LONG, + TPM_MEDIUM, + TPM_UNDEFINED, + TPM_SHORT, + TPM_SHORT, /* 125 */ + TPM_SHORT, + TPM_LONG, + TPM_SHORT, + TPM_SHORT, + TPM_SHORT, /* 130 */ + TPM_MEDIUM, + TPM_UNDEFINED, + TPM_SHORT, + TPM_MEDIUM, + TPM_UNDEFINED, /* 135 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_SHORT, /* 140 */ + TPM_SHORT, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, /* 145 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_SHORT, /* 150 */ + TPM_MEDIUM, + TPM_MEDIUM, + TPM_SHORT, + TPM_SHORT, + TPM_UNDEFINED, /* 155 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_SHORT, /* 160 */ + TPM_SHORT, + TPM_SHORT, + TPM_SHORT, + TPM_UNDEFINED, + TPM_UNDEFINED, /* 165 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_LONG, /* 170 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, /* 175 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_MEDIUM, /* 180 */ + TPM_SHORT, + TPM_MEDIUM, + TPM_MEDIUM, + TPM_MEDIUM, + TPM_MEDIUM, /* 185 */ + TPM_SHORT, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, /* 190 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, /* 195 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_SHORT, /* 200 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_SHORT, + TPM_SHORT, /* 205 */ + TPM_SHORT, + TPM_SHORT, + TPM_SHORT, + TPM_SHORT, + TPM_MEDIUM, /* 210 */ + TPM_UNDEFINED, + TPM_MEDIUM, + TPM_MEDIUM, + TPM_MEDIUM, + TPM_UNDEFINED, /* 215 */ + TPM_MEDIUM, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_SHORT, + TPM_SHORT, /* 220 */ + TPM_SHORT, + TPM_SHORT, + TPM_SHORT, + TPM_SHORT, + TPM_UNDEFINED, /* 225 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_SHORT, /* 230 */ + TPM_LONG, + TPM_MEDIUM, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, /* 235 */ + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_UNDEFINED, + TPM_SHORT, /* 240 */ + TPM_UNDEFINED, + TPM_MEDIUM, +}; + +/** + * tpm1_calc_ordinal_duration() - calculate the maximum command duration + * @chip: TPM chip to use. + * @ordinal: TPM command ordinal. + * + * The function returns the maximum amount of time the chip could take + * to return the result for a particular ordinal in jiffies. + * + * Return: A maximal duration time for an ordinal in jiffies. + */ +unsigned long tpm1_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal) +{ + int duration_idx = TPM_UNDEFINED; + int duration = 0; + + /* + * We only have a duration table for protected commands, where the upper + * 16 bits are 0. For the few other ordinals the fallback will be used. + */ + if (ordinal < TPM_MAX_ORDINAL) + duration_idx = tpm1_ordinal_duration[ordinal]; + + if (duration_idx != TPM_UNDEFINED) + duration = chip->duration[duration_idx]; + if (duration <= 0) + return 2 * 60 * HZ; + else + return duration; +} + +#define TPM_ORD_STARTUP 153 +#define TPM_ST_CLEAR 1 + +/** + * tpm_startup() - turn on the TPM + * @chip: TPM chip to use + * + * Normally the firmware should start the TPM. This function is provided as a + * workaround if this does not happen. A legal case for this could be for + * example when a TPM emulator is used. + * + * Return: same as tpm_transmit_cmd() + */ +static int tpm1_startup(struct tpm_chip *chip) +{ + struct tpm_buf buf; + int rc; + + dev_info(&chip->dev, "starting up the TPM manually\n"); + + rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_STARTUP); + if (rc < 0) + return rc; + + tpm_buf_append_u16(&buf, TPM_ST_CLEAR); + + rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0, + "attempting to start the TPM"); + + tpm_buf_destroy(&buf); + + return rc; +} + +int tpm1_get_timeouts(struct tpm_chip *chip) +{ + cap_t cap; + unsigned long timeout_old[4], timeout_chip[4], timeout_eff[4]; + ssize_t rc; + + rc = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, NULL, + sizeof(cap.timeout)); + if (rc == TPM_ERR_INVALID_POSTINIT) { + if (tpm1_startup(chip)) + return rc; + + rc = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, + "attempting to determine the timeouts", + sizeof(cap.timeout)); + } + + if (rc) { + dev_err(&chip->dev, "A TPM error (%zd) occurred attempting to determine the timeouts\n", + rc); + return rc; + } + + timeout_old[0] = jiffies_to_usecs(chip->timeout_a); + timeout_old[1] = jiffies_to_usecs(chip->timeout_b); + timeout_old[2] = jiffies_to_usecs(chip->timeout_c); + timeout_old[3] = jiffies_to_usecs(chip->timeout_d); + timeout_chip[0] = be32_to_cpu(cap.timeout.a); + timeout_chip[1] = be32_to_cpu(cap.timeout.b); + timeout_chip[2] = be32_to_cpu(cap.timeout.c); + timeout_chip[3] = be32_to_cpu(cap.timeout.d); + memcpy(timeout_eff, timeout_chip, sizeof(timeout_eff)); + + /* + * Provide ability for vendor overrides of timeout values in case + * of misreporting. + */ + if (chip->ops->update_timeouts) + chip->timeout_adjusted = + chip->ops->update_timeouts(chip, timeout_eff); + + if (!chip->timeout_adjusted) { + /* Restore default if chip reported 0 */ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(timeout_eff); i++) { + if (timeout_eff[i]) + continue; + + timeout_eff[i] = timeout_old[i]; + chip->timeout_adjusted = true; + } + + if (timeout_eff[0] != 0 && timeout_eff[0] < 1000) { + /* timeouts in msec rather usec */ + for (i = 0; i != ARRAY_SIZE(timeout_eff); i++) + timeout_eff[i] *= 1000; + chip->timeout_adjusted = true; + } + } + + /* Report adjusted timeouts */ + if (chip->timeout_adjusted) { + dev_info(&chip->dev, HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n", + timeout_chip[0], timeout_eff[0], + timeout_chip[1], timeout_eff[1], + timeout_chip[2], timeout_eff[2], + timeout_chip[3], timeout_eff[3]); + } + + chip->timeout_a = usecs_to_jiffies(timeout_eff[0]); + chip->timeout_b = usecs_to_jiffies(timeout_eff[1]); + chip->timeout_c = usecs_to_jiffies(timeout_eff[2]); + chip->timeout_d = usecs_to_jiffies(timeout_eff[3]); + + rc = tpm1_getcap(chip, TPM_CAP_PROP_TIS_DURATION, &cap, + "attempting to determine the durations", + sizeof(cap.duration)); + if (rc) + return rc; + + chip->duration[TPM_SHORT] = + usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_short)); + chip->duration[TPM_MEDIUM] = + usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_medium)); + chip->duration[TPM_LONG] = + usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_long)); + chip->duration[TPM_LONG_LONG] = 0; /* not used under 1.2 */ + + /* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above + * value wrong and apparently reports msecs rather than usecs. So we + * fix up the resulting too-small TPM_SHORT value to make things work. + * We also scale the TPM_MEDIUM and -_LONG values by 1000. + */ + if (chip->duration[TPM_SHORT] < (HZ / 100)) { + chip->duration[TPM_SHORT] = HZ; + chip->duration[TPM_MEDIUM] *= 1000; + chip->duration[TPM_LONG] *= 1000; + chip->duration_adjusted = true; + dev_info(&chip->dev, "Adjusting TPM timeout parameters."); + } + + chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS; + return 0; +} + +#define TPM_ORD_PCR_EXTEND 20 +int tpm1_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, const u8 *hash, + const char *log_msg) +{ + struct tpm_buf buf; + int rc; + + rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_PCR_EXTEND); + if (rc) + return rc; + + tpm_buf_append_u32(&buf, pcr_idx); + tpm_buf_append(&buf, hash, TPM_DIGEST_SIZE); + + rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, + TPM_DIGEST_SIZE, 0, log_msg); + + tpm_buf_destroy(&buf); + return rc; +} + +#define TPM_ORD_GET_CAP 101 +ssize_t tpm1_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap, + const char *desc, size_t min_cap_length) +{ + struct tpm_buf buf; + int rc; + + rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_GET_CAP); + if (rc) + return rc; + + if (subcap_id == TPM_CAP_VERSION_1_1 || + subcap_id == TPM_CAP_VERSION_1_2) { + tpm_buf_append_u32(&buf, subcap_id); + tpm_buf_append_u32(&buf, 0); + } else { + if (subcap_id == TPM_CAP_FLAG_PERM || + subcap_id == TPM_CAP_FLAG_VOL) + tpm_buf_append_u32(&buf, TPM_CAP_FLAG); + else + tpm_buf_append_u32(&buf, TPM_CAP_PROP); + + tpm_buf_append_u32(&buf, 4); + tpm_buf_append_u32(&buf, subcap_id); + } + rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, + min_cap_length, 0, desc); + if (!rc) + *cap = *(cap_t *)&buf.data[TPM_HEADER_SIZE + 4]; + + tpm_buf_destroy(&buf); + return rc; +} +EXPORT_SYMBOL_GPL(tpm1_getcap); + +#define TPM_ORD_GET_RANDOM 70 +struct tpm1_get_random_out { + __be32 rng_data_len; + u8 rng_data[TPM_MAX_RNG_DATA]; +} __packed; + +/** + * tpm1_get_random() - get random bytes from the TPM's RNG + * @chip: a &struct tpm_chip instance + * @dest: destination buffer for the random bytes + * @max: the maximum number of bytes to write to @dest + * + * Return: + * * number of bytes read + * * -errno or a TPM return code otherwise + */ +int tpm1_get_random(struct tpm_chip *chip, u8 *dest, size_t max) +{ + struct tpm1_get_random_out *out; + u32 num_bytes = min_t(u32, max, TPM_MAX_RNG_DATA); + struct tpm_buf buf; + u32 total = 0; + int retries = 5; + u32 recd; + int rc; + + rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_GET_RANDOM); + if (rc) + return rc; + + do { + tpm_buf_append_u32(&buf, num_bytes); + + rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, + sizeof(out->rng_data_len), 0, + "attempting get random"); + if (rc) + goto out; + + out = (struct tpm1_get_random_out *)&buf.data[TPM_HEADER_SIZE]; + + recd = be32_to_cpu(out->rng_data_len); + if (recd > num_bytes) { + rc = -EFAULT; + goto out; + } + + if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + + sizeof(out->rng_data_len) + recd) { + rc = -EFAULT; + goto out; + } + memcpy(dest, out->rng_data, recd); + + dest += recd; + total += recd; + num_bytes -= recd; + + tpm_buf_reset(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_GET_RANDOM); + } while (retries-- && total < max); + + rc = total ? (int)total : -EIO; +out: + tpm_buf_destroy(&buf); + return rc; +} + +#define TPM_ORD_PCRREAD 21 +int tpm1_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf) +{ + struct tpm_buf buf; + int rc; + + rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_PCRREAD); + if (rc) + return rc; + + tpm_buf_append_u32(&buf, pcr_idx); + + rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, + TPM_DIGEST_SIZE, 0, + "attempting to read a pcr value"); + if (rc) + goto out; + + if (tpm_buf_length(&buf) < TPM_DIGEST_SIZE) { + rc = -EFAULT; + goto out; + } + + memcpy(res_buf, &buf.data[TPM_HEADER_SIZE], TPM_DIGEST_SIZE); + +out: + tpm_buf_destroy(&buf); + return rc; +} + +#define TPM_ORD_CONTINUE_SELFTEST 83 +/** + * tpm_continue_selftest() - run TPM's selftest + * @chip: TPM chip to use + * + * Returns 0 on success, < 0 in case of fatal error or a value > 0 representing + * a TPM error code. + */ +static int tpm1_continue_selftest(struct tpm_chip *chip) +{ + struct tpm_buf buf; + int rc; + + rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_CONTINUE_SELFTEST); + if (rc) + return rc; + + rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, + 0, 0, "continue selftest"); + + tpm_buf_destroy(&buf); + + return rc; +} + +/** + * tpm1_do_selftest - have the TPM continue its selftest and wait until it + * can receive further commands + * @chip: TPM chip to use + * + * Returns 0 on success, < 0 in case of fatal error or a value > 0 representing + * a TPM error code. + */ +int tpm1_do_selftest(struct tpm_chip *chip) +{ + int rc; + unsigned int loops; + unsigned int delay_msec = 100; + unsigned long duration; + u8 dummy[TPM_DIGEST_SIZE]; + + duration = tpm1_calc_ordinal_duration(chip, TPM_ORD_CONTINUE_SELFTEST); + + loops = jiffies_to_msecs(duration) / delay_msec; + + rc = tpm1_continue_selftest(chip); + if (rc == TPM_ERR_INVALID_POSTINIT) { + chip->flags |= TPM_CHIP_FLAG_ALWAYS_POWERED; + dev_info(&chip->dev, "TPM not ready (%d)\n", rc); + } + /* This may fail if there was no TPM driver during a suspend/resume + * cycle; some may return 10 (BAD_ORDINAL), others 28 (FAILEDSELFTEST) + */ + if (rc) + return rc; + + do { + /* Attempt to read a PCR value */ + rc = tpm1_pcr_read(chip, 0, dummy); + + /* Some buggy TPMs will not respond to tpm_tis_ready() for + * around 300ms while the self test is ongoing, keep trying + * until the self test duration expires. + */ + if (rc == -ETIME) { + dev_info(&chip->dev, HW_ERR "TPM command timed out during continue self test"); + tpm_msleep(delay_msec); + continue; + } + + if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) { + dev_info(&chip->dev, "TPM is disabled/deactivated (0x%X)\n", + rc); + /* TPM is disabled and/or deactivated; driver can + * proceed and TPM does handle commands for + * suspend/resume correctly + */ + return 0; + } + if (rc != TPM_WARN_DOING_SELFTEST) + return rc; + tpm_msleep(delay_msec); + } while (--loops > 0); + + return rc; +} +EXPORT_SYMBOL_GPL(tpm1_do_selftest); + +/** + * tpm1_auto_startup - Perform the standard automatic TPM initialization + * sequence + * @chip: TPM chip to use + * + * Returns 0 on success, < 0 in case of fatal error. + */ +int tpm1_auto_startup(struct tpm_chip *chip) +{ + int rc; + + rc = tpm1_get_timeouts(chip); + if (rc) + goto out; + rc = tpm1_do_selftest(chip); + if (rc) { + dev_err(&chip->dev, "TPM self test failed\n"); + goto out; + } + + return rc; +out: + if (rc > 0) + rc = -ENODEV; + return rc; +} + +#define TPM_ORD_SAVESTATE 152 + +/** + * tpm1_pm_suspend() - pm suspend handler + * @chip: TPM chip to use. + * @tpm_suspend_pcr: flush pcr for buggy TPM chips. + * + * The functions saves the TPM state to be restored on resume. + * + * Return: + * * 0 on success, + * * < 0 on error. + */ +int tpm1_pm_suspend(struct tpm_chip *chip, u32 tpm_suspend_pcr) +{ + u8 dummy_hash[TPM_DIGEST_SIZE] = { 0 }; + struct tpm_buf buf; + unsigned int try; + int rc; + + + /* for buggy tpm, flush pcrs with extend to selected dummy */ + if (tpm_suspend_pcr) + rc = tpm1_pcr_extend(chip, tpm_suspend_pcr, dummy_hash, + "extending dummy pcr before suspend"); + + rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_SAVESTATE); + if (rc) + return rc; + /* now do the actual savestate */ + for (try = 0; try < TPM_RETRY; try++) { + rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, + 0, 0, NULL); + + /* + * If the TPM indicates that it is too busy to respond to + * this command then retry before giving up. It can take + * several seconds for this TPM to be ready. + * + * This can happen if the TPM has already been sent the + * SaveState command before the driver has loaded. TCG 1.2 + * specification states that any communication after SaveState + * may cause the TPM to invalidate previously saved state. + */ + if (rc != TPM_WARN_RETRY) + break; + tpm_msleep(TPM_TIMEOUT_RETRY); + + tpm_buf_reset(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_SAVESTATE); + } + + if (rc) + dev_err(&chip->dev, "Error (%d) sending savestate before suspend\n", + rc); + else if (try > 0) + dev_warn(&chip->dev, "TPM savestate took %dms\n", + try * TPM_TIMEOUT_RETRY); + + tpm_buf_destroy(&buf); + + return rc; +} + diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index c31b490bd41d9..ba94dd5f11a41 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -40,129 +40,121 @@ static struct tpm2_hash tpm2_hash_map[] = { {HASH_ALGO_SM3_256, TPM2_ALG_SM3_256}, }; -/* - * Array with one entry per ordinal defining the maximum amount - * of time the chip could take to return the result. The values - * of the SHORT, MEDIUM, and LONG durations are taken from the - * PC Client Profile (PTP) specification. - * LONG_LONG is for commands that generates keys which empirically - * takes longer time on some systems. +int tpm2_get_timeouts(struct tpm_chip *chip) +{ + /* Fixed timeouts for TPM2 */ + chip->timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A); + chip->timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B); + chip->timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C); + chip->timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D); + + /* PTP spec timeouts */ + chip->duration[TPM_SHORT] = msecs_to_jiffies(TPM2_DURATION_SHORT); + chip->duration[TPM_MEDIUM] = msecs_to_jiffies(TPM2_DURATION_MEDIUM); + chip->duration[TPM_LONG] = msecs_to_jiffies(TPM2_DURATION_LONG); + + /* Key creation commands long timeouts */ + chip->duration[TPM_LONG_LONG] = + msecs_to_jiffies(TPM2_DURATION_LONG_LONG); + + chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS; + + return 0; +} + +/** + * tpm2_ordinal_duration_index() - returns an index to the chip duration table + * @ordinal: TPM command ordinal. + * + * The function returns an index to the chip duration table + * (enum tpm_duration), that describes the maximum amount of + * time the chip could take to return the result for a particular ordinal. + * + * The values of the MEDIUM, and LONG durations are taken + * from the PC Client Profile (PTP) specification (750, 2000 msec) + * + * LONG_LONG is for commands that generates keys which empirically takes + * a longer time on some systems. + * + * Return: + * * TPM_MEDIUM + * * TPM_LONG + * * TPM_LONG_LONG + * * TPM_UNDEFINED */ -static const u8 tpm2_ordinal_duration[TPM2_CC_LAST - TPM2_CC_FIRST + 1] = { - TPM_UNDEFINED, /* 11F */ - TPM_UNDEFINED, /* 120 */ - TPM_LONG, /* 121 */ - TPM_UNDEFINED, /* 122 */ - TPM_UNDEFINED, /* 123 */ - TPM_UNDEFINED, /* 124 */ - TPM_UNDEFINED, /* 125 */ - TPM_UNDEFINED, /* 126 */ - TPM_UNDEFINED, /* 127 */ - TPM_UNDEFINED, /* 128 */ - TPM_LONG, /* 129 */ - TPM_UNDEFINED, /* 12a */ - TPM_UNDEFINED, /* 12b */ - TPM_UNDEFINED, /* 12c */ - TPM_UNDEFINED, /* 12d */ - TPM_UNDEFINED, /* 12e */ - TPM_UNDEFINED, /* 12f */ - TPM_UNDEFINED, /* 130 */ - TPM_LONG_LONG, /* 131 */ - TPM_UNDEFINED, /* 132 */ - TPM_UNDEFINED, /* 133 */ - TPM_UNDEFINED, /* 134 */ - TPM_UNDEFINED, /* 135 */ - TPM_UNDEFINED, /* 136 */ - TPM_UNDEFINED, /* 137 */ - TPM_UNDEFINED, /* 138 */ - TPM_UNDEFINED, /* 139 */ - TPM_UNDEFINED, /* 13a */ - TPM_UNDEFINED, /* 13b */ - TPM_UNDEFINED, /* 13c */ - TPM_UNDEFINED, /* 13d */ - TPM_MEDIUM, /* 13e */ - TPM_UNDEFINED, /* 13f */ - TPM_UNDEFINED, /* 140 */ - TPM_UNDEFINED, /* 141 */ - TPM_UNDEFINED, /* 142 */ - TPM_LONG, /* 143 */ - TPM_MEDIUM, /* 144 */ - TPM_UNDEFINED, /* 145 */ - TPM_UNDEFINED, /* 146 */ - TPM_UNDEFINED, /* 147 */ - TPM_UNDEFINED, /* 148 */ - TPM_UNDEFINED, /* 149 */ - TPM_UNDEFINED, /* 14a */ - TPM_UNDEFINED, /* 14b */ - TPM_UNDEFINED, /* 14c */ - TPM_UNDEFINED, /* 14d */ - TPM_LONG, /* 14e */ - TPM_UNDEFINED, /* 14f */ - TPM_UNDEFINED, /* 150 */ - TPM_UNDEFINED, /* 151 */ - TPM_UNDEFINED, /* 152 */ - TPM_LONG_LONG, /* 153 */ - TPM_UNDEFINED, /* 154 */ - TPM_UNDEFINED, /* 155 */ - TPM_UNDEFINED, /* 156 */ - TPM_UNDEFINED, /* 157 */ - TPM_UNDEFINED, /* 158 */ - TPM_UNDEFINED, /* 159 */ - TPM_UNDEFINED, /* 15a */ - TPM_UNDEFINED, /* 15b */ - TPM_MEDIUM, /* 15c */ - TPM_UNDEFINED, /* 15d */ - TPM_UNDEFINED, /* 15e */ - TPM_UNDEFINED, /* 15f */ - TPM_UNDEFINED, /* 160 */ - TPM_UNDEFINED, /* 161 */ - TPM_UNDEFINED, /* 162 */ - TPM_UNDEFINED, /* 163 */ - TPM_UNDEFINED, /* 164 */ - TPM_UNDEFINED, /* 165 */ - TPM_UNDEFINED, /* 166 */ - TPM_UNDEFINED, /* 167 */ - TPM_UNDEFINED, /* 168 */ - TPM_UNDEFINED, /* 169 */ - TPM_UNDEFINED, /* 16a */ - TPM_UNDEFINED, /* 16b */ - TPM_UNDEFINED, /* 16c */ - TPM_UNDEFINED, /* 16d */ - TPM_UNDEFINED, /* 16e */ - TPM_UNDEFINED, /* 16f */ - TPM_UNDEFINED, /* 170 */ - TPM_UNDEFINED, /* 171 */ - TPM_UNDEFINED, /* 172 */ - TPM_UNDEFINED, /* 173 */ - TPM_UNDEFINED, /* 174 */ - TPM_UNDEFINED, /* 175 */ - TPM_UNDEFINED, /* 176 */ - TPM_LONG, /* 177 */ - TPM_UNDEFINED, /* 178 */ - TPM_UNDEFINED, /* 179 */ - TPM_MEDIUM, /* 17a */ - TPM_LONG, /* 17b */ - TPM_UNDEFINED, /* 17c */ - TPM_UNDEFINED, /* 17d */ - TPM_UNDEFINED, /* 17e */ - TPM_UNDEFINED, /* 17f */ - TPM_UNDEFINED, /* 180 */ - TPM_UNDEFINED, /* 181 */ - TPM_MEDIUM, /* 182 */ - TPM_UNDEFINED, /* 183 */ - TPM_UNDEFINED, /* 184 */ - TPM_MEDIUM, /* 185 */ - TPM_MEDIUM, /* 186 */ - TPM_UNDEFINED, /* 187 */ - TPM_UNDEFINED, /* 188 */ - TPM_UNDEFINED, /* 189 */ - TPM_UNDEFINED, /* 18a */ - TPM_UNDEFINED, /* 18b */ - TPM_UNDEFINED, /* 18c */ - TPM_UNDEFINED, /* 18d */ - TPM_UNDEFINED, /* 18e */ - TPM_UNDEFINED /* 18f */ -}; +static u8 tpm2_ordinal_duration_index(u32 ordinal) +{ + switch (ordinal) { + /* Startup */ + case TPM2_CC_STARTUP: /* 144 */ + return TPM_MEDIUM; + + case TPM2_CC_SELF_TEST: /* 143 */ + return TPM_LONG; + + case TPM2_CC_GET_RANDOM: /* 17B */ + return TPM_LONG; + + case TPM2_CC_SEQUENCE_UPDATE: /* 15C */ + return TPM_MEDIUM; + case TPM2_CC_SEQUENCE_COMPLETE: /* 13E */ + return TPM_MEDIUM; + case TPM2_CC_EVENT_SEQUENCE_COMPLETE: /* 185 */ + return TPM_MEDIUM; + case TPM2_CC_HASH_SEQUENCE_START: /* 186 */ + return TPM_MEDIUM; + + case TPM2_CC_VERIFY_SIGNATURE: /* 177 */ + return TPM_LONG; + + case TPM2_CC_PCR_EXTEND: /* 182 */ + return TPM_MEDIUM; + + case TPM2_CC_HIERARCHY_CONTROL: /* 121 */ + return TPM_LONG; + case TPM2_CC_HIERARCHY_CHANGE_AUTH: /* 129 */ + return TPM_LONG; + + case TPM2_CC_GET_CAPABILITY: /* 17A */ + return TPM_MEDIUM; + + case TPM2_CC_NV_READ: /* 14E */ + return TPM_LONG; + + case TPM2_CC_CREATE_PRIMARY: /* 131 */ + return TPM_LONG_LONG; + case TPM2_CC_CREATE: /* 153 */ + return TPM_LONG_LONG; + case TPM2_CC_CREATE_LOADED: /* 191 */ + return TPM_LONG_LONG; + + default: + return TPM_UNDEFINED; + } +} + +/** + * tpm2_calc_ordinal_duration() - calculate the maximum command duration + * @chip: TPM chip to use. + * @ordinal: TPM command ordinal. + * + * The function returns the maximum amount of time the chip could take + * to return the result for a particular ordinal in jiffies. + * + * Return: A maximal duration time for an ordinal in jiffies. + */ +unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal) +{ + unsigned int index; + + index = tpm2_ordinal_duration_index(ordinal); + + if (index != TPM_UNDEFINED) + return chip->duration[index]; + else + return msecs_to_jiffies(TPM2_DURATION_DEFAULT); +} + struct tpm2_pcr_read_out { __be32 update_cnt; @@ -183,7 +175,7 @@ struct tpm2_pcr_read_out { * * Return: Same as with tpm_transmit_cmd. */ -int tpm2_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf) +int tpm2_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf) { int rc; struct tpm_buf buf; @@ -233,7 +225,7 @@ struct tpm2_null_auth_area { * * Return: Same as with tpm_transmit_cmd. */ -int tpm2_pcr_extend(struct tpm_chip *chip, int pcr_idx, u32 count, +int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, u32 count, struct tpm2_digest *digests) { struct tpm_buf buf; @@ -280,7 +272,6 @@ int tpm2_pcr_extend(struct tpm_chip *chip, int pcr_idx, u32 count, return rc; } - struct tpm2_get_random_out { __be16 size; u8 buffer[TPM_MAX_RNG_DATA]; @@ -329,7 +320,9 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max) &buf.data[TPM_HEADER_SIZE]; recd = min_t(u32, be16_to_cpu(out->size), num_bytes); if (tpm_buf_length(&buf) < - offsetof(struct tpm2_get_random_out, buffer) + recd) { + TPM_HEADER_SIZE + + offsetof(struct tpm2_get_random_out, buffer) + + recd) { err = -EFAULT; goto out; } @@ -350,10 +343,9 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max) /** * tpm2_flush_context_cmd() - execute a TPM2_FlushContext command * @chip: TPM chip to use - * @payload: the key data in clear and encrypted form - * @options: authentication values and other options + * @handle: context handle + * @flags: tpm transmit flags - bitmap * - * Return: same as with tpm_transmit_cmd */ void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle, unsigned int flags) @@ -746,32 +738,6 @@ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type) tpm_buf_destroy(&buf); } -/* - * tpm2_calc_ordinal_duration() - maximum duration for a command - * - * @chip: TPM chip to use. - * @ordinal: command code number. - * - * Return: maximum duration for a command - */ -unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal) -{ - int index = TPM_UNDEFINED; - int duration = 0; - - if (ordinal >= TPM2_CC_FIRST && ordinal <= TPM2_CC_LAST) - index = tpm2_ordinal_duration[ordinal - TPM2_CC_FIRST]; - - if (index != TPM_UNDEFINED) - duration = chip->duration[index]; - - if (duration <= 0) - duration = msecs_to_jiffies(TPM2_DURATION_DEFAULT); - - return duration; -} -EXPORT_SYMBOL_GPL(tpm2_calc_ordinal_duration); - /** * tpm2_do_selftest() - ensure that all self tests have passed * @@ -980,6 +946,36 @@ static int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip) return rc; } +/** + * tpm2_startup - turn on the TPM + * @chip: TPM chip to use + * + * Normally the firmware should start the TPM. This function is provided as a + * workaround if this does not happen. A legal case for this could be for + * example when a TPM emulator is used. + * + * Return: same as tpm_transmit_cmd() + */ + +static int tpm2_startup(struct tpm_chip *chip) +{ + struct tpm_buf buf; + int rc; + + dev_info(&chip->dev, "starting up the TPM manually\n"); + + rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_STARTUP); + if (rc < 0) + return rc; + + tpm_buf_append_u16(&buf, TPM2_SU_CLEAR); + rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0, + "attempting to start the TPM"); + tpm_buf_destroy(&buf); + + return rc; +} + /** * tpm2_auto_startup - Perform the standard automatic TPM initialization * sequence @@ -991,7 +987,7 @@ int tpm2_auto_startup(struct tpm_chip *chip) { int rc; - rc = tpm_get_timeouts(chip); + rc = tpm2_get_timeouts(chip); if (rc) goto out; @@ -1000,7 +996,7 @@ int tpm2_auto_startup(struct tpm_chip *chip) goto out; if (rc == TPM2_RC_INITIALIZE) { - rc = tpm_startup(chip); + rc = tpm2_startup(chip); if (rc) goto out; diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c index d2e101b32482f..dcdfde3c253e6 100644 --- a/drivers/char/tpm/tpm2-space.c +++ b/drivers/char/tpm/tpm2-space.c @@ -373,7 +373,7 @@ static int tpm2_map_response_header(struct tpm_chip *chip, u32 cc, u8 *rsp, dev_err(&chip->dev, "%s: unknown handle 0x%08X\n", __func__, phandle); break; - }; + } return 0; out_no_slots: diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c index caa86b19c76dd..217f7f1cbde80 100644 --- a/drivers/char/tpm/tpm_i2c_nuvoton.c +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c @@ -369,6 +369,7 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len) struct device *dev = chip->dev.parent; struct i2c_client *client = to_i2c_client(dev); u32 ordinal; + unsigned long duration; size_t count = 0; int burst_count, bytes2write, retries, rc = -EIO; @@ -455,12 +456,12 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len) return rc; } ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); - rc = i2c_nuvoton_wait_for_data_avail(chip, - tpm_calc_ordinal_duration(chip, - ordinal), - &priv->read_queue); + duration = tpm_calc_ordinal_duration(chip, ordinal); + + rc = i2c_nuvoton_wait_for_data_avail(chip, duration, &priv->read_queue); if (rc) { - dev_err(dev, "%s() timeout command duration\n", __func__); + dev_err(dev, "%s() timeout command duration %ld\n", + __func__, duration); i2c_nuvoton_ready(chip); return rc; } diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index d2345d9fd7b51..bf7e49cfa6435 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -473,11 +473,7 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len) if (chip->flags & TPM_CHIP_FLAG_IRQ) { ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); - if (chip->flags & TPM_CHIP_FLAG_TPM2) - dur = tpm2_calc_ordinal_duration(chip, ordinal); - else - dur = tpm_calc_ordinal_duration(chip, ordinal); - + dur = tpm_calc_ordinal_duration(chip, ordinal); if (wait_for_tpm_stat (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, dur, &priv->read_queue, false) < 0) { @@ -668,7 +664,7 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip) if (chip->flags & TPM_CHIP_FLAG_TPM2) return tpm2_get_tpm_pt(chip, 0x100, &cap2, desc); else - return tpm_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, + return tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0); } @@ -1060,7 +1056,7 @@ int tpm_tis_resume(struct device *dev) * an error code but for unknown reason it isn't handled. */ if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) - tpm_do_selftest(chip); + tpm1_do_selftest(chip); return 0; } diff --git a/drivers/char/tpm/tpmrm-dev.c b/drivers/char/tpm/tpmrm-dev.c index 1a0e97a5da5a4..0c751a79bbedd 100644 --- a/drivers/char/tpm/tpmrm-dev.c +++ b/drivers/char/tpm/tpmrm-dev.c @@ -28,7 +28,7 @@ static int tpmrm_open(struct inode *inode, struct file *file) return -ENOMEM; } - tpm_common_open(file, chip, &priv->priv); + tpm_common_open(file, chip, &priv->priv, &priv->space); return 0; } @@ -45,21 +45,12 @@ static int tpmrm_release(struct inode *inode, struct file *file) return 0; } -static ssize_t tpmrm_write(struct file *file, const char __user *buf, - size_t size, loff_t *off) -{ - struct file_priv *fpriv = file->private_data; - struct tpmrm_priv *priv = container_of(fpriv, struct tpmrm_priv, priv); - - return tpm_common_write(file, buf, size, off, &priv->space); -} - const struct file_operations tpmrm_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .open = tpmrm_open, .read = tpm_common_read, - .write = tpmrm_write, + .write = tpm_common_write, + .poll = tpm_common_poll, .release = tpmrm_release, }; - diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c index 911475d368002..b150f87f38f51 100644 --- a/drivers/char/tpm/xen-tpmfront.c +++ b/drivers/char/tpm/xen-tpmfront.c @@ -264,7 +264,7 @@ static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv) return -ENOMEM; } - rv = xenbus_grant_ring(dev, &priv->shr, 1, &gref); + rv = xenbus_grant_ring(dev, priv->shr, 1, &gref); if (rv < 0) return rv; diff --git a/drivers/char/vhm/Makefile b/drivers/char/vhm/Makefile new file mode 100644 index 0000000000000..5ee68c5f72786 --- /dev/null +++ b/drivers/char/vhm/Makefile @@ -0,0 +1,2 @@ +subdir-ccflags-$(CONFIG_ACRN_VHM) := -Werror +obj-y += vhm_dev.o diff --git a/drivers/char/vhm/vhm_dev.c b/drivers/char/vhm/vhm_dev.c new file mode 100644 index 0000000000000..082dc40956ec9 --- /dev/null +++ b/drivers/char/vhm/vhm_dev.c @@ -0,0 +1,849 @@ +/* + * virtio and hyperviosr service module (VHM): main framework + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Liang Ding + * Jason Zeng + * Xiao Zheng + * Jason Chen CJ + * Jack Ren + * Mingqiang Chi + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define DEVICE_NAME "acrn_vhm" +#define CLASS_NAME "vhm" + +#define VHM_API_VERSION_MAJOR 1 +#define VHM_API_VERSION_MINOR 0 + +static int major; +static struct class *vhm_class; +static struct device *vhm_device; +static struct tasklet_struct vhm_io_req_tasklet; + +struct table_iomems { + /* list node for this table_iomems */ + struct list_head list; + /* device's physical BDF */ + unsigned short phys_bdf; + /* virtual base address of MSI-X table in memory space after ioremap */ + unsigned long mmap_addr; +}; +static LIST_HEAD(table_iomems_list); +static DEFINE_MUTEX(table_iomems_lock); + +static int vhm_dev_open(struct inode *inodep, struct file *filep) +{ + struct vhm_vm *vm; + int i; + + vm = kzalloc(sizeof(struct vhm_vm), GFP_KERNEL); + pr_info("vhm_dev_open: opening device node\n"); + + if (!vm) + return -ENOMEM; + vm->vmid = ACRN_INVALID_VMID; + vm->dev = vhm_device; + + for (i = 0; i < HUGEPAGE_HLIST_ARRAY_SIZE; i++) + INIT_HLIST_HEAD(&vm->hugepage_hlist[i]); + mutex_init(&vm->hugepage_lock); + + INIT_LIST_HEAD(&vm->ioreq_client_list); + spin_lock_init(&vm->ioreq_client_lock); + + atomic_set(&vm->refcnt, 1); + write_lock_bh(&vhm_vm_list_lock); + vm_list_add(&vm->list); + write_unlock_bh(&vhm_vm_list_lock); + filep->private_data = vm; + return 0; +} + +static ssize_t vhm_dev_read(struct file *filep, char *buffer, size_t len, + loff_t *offset) +{ + /* Does Nothing */ + pr_info("vhm_dev_read: reading device node\n"); + return 0; +} + +static ssize_t vhm_dev_write(struct file *filep, const char *buffer, + size_t len, loff_t *offset) +{ + /* Does Nothing */ + pr_info("vhm_dev_read: writing device node\n"); + return 0; +} + +static long vhm_dev_ioctl(struct file *filep, + unsigned int ioctl_num, unsigned long ioctl_param) +{ + long ret = 0; + struct vhm_vm *vm; + struct ic_ptdev_irq ic_pt_irq; + struct hc_ptdev_irq hc_pt_irq; + + pr_debug("[%s] ioctl_num=0x%x\n", __func__, ioctl_num); + + if (ioctl_num == IC_GET_API_VERSION) { + struct api_version api_version; + + api_version.major_version = VHM_API_VERSION_MAJOR; + api_version.minor_version = VHM_API_VERSION_MINOR; + + if (copy_to_user((void *)ioctl_param, &api_version, + sizeof(struct api_version))) + return -EFAULT; + + return 0; + } else if (ioctl_num == IC_PM_SET_SSTATE_DATA) { + struct acpi_sstate_data host_sstate_data; + + if (copy_from_user(&host_sstate_data, + (void *)ioctl_param, sizeof(host_sstate_data))) + return -EFAULT; + + ret = hcall_set_sstate_data(virt_to_phys(&host_sstate_data)); + if (ret < 0) { + pr_err("vhm: failed to set host Sstate data!"); + return -EFAULT; + } + return 0; + } + + memset(&hc_pt_irq, 0, sizeof(hc_pt_irq)); + memset(&ic_pt_irq, 0, sizeof(ic_pt_irq)); + vm = (struct vhm_vm *)filep->private_data; + if (vm == NULL) { + pr_err("vhm: invalid VM !\n"); + return -EFAULT; + } + if ((vm->vmid == ACRN_INVALID_VMID) && (ioctl_num != IC_CREATE_VM)) { + pr_err("vhm: invalid VM ID !\n"); + return -EFAULT; + } + + switch (ioctl_num) { + case IC_CREATE_VM: { + struct acrn_create_vm created_vm; + + if (copy_from_user(&created_vm, (void *)ioctl_param, + sizeof(struct acrn_create_vm))) + return -EFAULT; + + ret = hcall_create_vm(virt_to_phys(&created_vm)); + if ((ret < 0) || + (created_vm.vmid == ACRN_INVALID_VMID)) { + pr_err("vhm: failed to create VM from Hypervisor !\n"); + return -EFAULT; + } + + if (copy_to_user((void *)ioctl_param, &created_vm, + sizeof(struct acrn_create_vm))) { + ret = -EFAULT; + goto create_vm_fail; + } + vm->vmid = created_vm.vmid; + + if (created_vm.req_buf) { + ret = acrn_ioreq_init(vm, created_vm.req_buf); + if (ret < 0) + goto create_vm_fail; + } + + acrn_ioeventfd_init(vm->vmid); + acrn_irqfd_init(vm->vmid); + + pr_info("vhm: VM %d created\n", created_vm.vmid); + break; + +create_vm_fail: + hcall_destroy_vm(created_vm.vmid); + vm->vmid = ACRN_INVALID_VMID; + break; + + } + + case IC_START_VM: { + ret = hcall_start_vm(vm->vmid); + if (ret < 0) { + pr_err("vhm: failed to start VM %ld!\n", vm->vmid); + return -EFAULT; + } + break; + } + + case IC_PAUSE_VM: { + ret = hcall_pause_vm(vm->vmid); + if (ret < 0) { + pr_err("vhm: failed to pause VM %ld!\n", vm->vmid); + return -EFAULT; + } + break; + } + + case IC_RESET_VM: { + ret = hcall_reset_vm(vm->vmid); + if (ret < 0) { + pr_err("vhm: failed to restart VM %ld!\n", vm->vmid); + return -EFAULT; + } + break; + } + + case IC_DESTROY_VM: { + acrn_ioeventfd_deinit(vm->vmid); + acrn_irqfd_deinit(vm->vmid); + acrn_ioreq_free(vm); + ret = hcall_destroy_vm(vm->vmid); + if (ret < 0) { + pr_err("failed to destroy VM %ld\n", vm->vmid); + return -EFAULT; + } + vm->vmid = ACRN_INVALID_VMID; + break; + } + + case IC_CREATE_VCPU: { + struct acrn_create_vcpu cv; + + if (copy_from_user(&cv, (void *)ioctl_param, + sizeof(struct acrn_create_vcpu))) + return -EFAULT; + + ret = acrn_hypercall2(HC_CREATE_VCPU, vm->vmid, + virt_to_phys(&cv)); + if (ret < 0) { + pr_err("vhm: failed to create vcpu %d!\n", cv.vcpu_id); + return -EFAULT; + } + atomic_inc(&vm->vcpu_num); + + return ret; + } + + case IC_SET_VCPU_REGS: { + struct acrn_set_vcpu_regs asvr; + + if (copy_from_user(&asvr, (void *)ioctl_param, sizeof(asvr))) + return -EFAULT; + + ret = acrn_hypercall2(HC_SET_VCPU_REGS, vm->vmid, + virt_to_phys(&asvr)); + if (ret < 0) { + pr_err("vhm: failed to set bsp state of vm %ld!\n", + vm->vmid); + return -EFAULT; + } + + return ret; + } + + case IC_SET_MEMSEG: { + struct vm_memmap memmap; + + if (copy_from_user(&memmap, (void *)ioctl_param, + sizeof(struct vm_memmap))) + return -EFAULT; + + ret = map_guest_memseg(vm, &memmap); + break; + } + + case IC_UNSET_MEMSEG: { + struct vm_memmap memmap; + + if (copy_from_user(&memmap, (void *)ioctl_param, + sizeof(struct vm_memmap))) + return -EFAULT; + + ret = unmap_guest_memseg(vm, &memmap); + break; + } + + case IC_SET_IOREQ_BUFFER: { + /* init ioreq buffer */ + ret = acrn_ioreq_init(vm, (unsigned long)ioctl_param); + if (ret < 0 && ret != -EEXIST) + return ret; + ret = 0; + break; + } + + case IC_CREATE_IOREQ_CLIENT: { + int client_id; + + client_id = acrn_ioreq_create_fallback_client(vm->vmid, "acrndm"); + if (client_id < 0) + return -EFAULT; + return client_id; + } + + case IC_DESTROY_IOREQ_CLIENT: { + int client = ioctl_param; + + acrn_ioreq_destroy_client(client); + break; + } + + case IC_ATTACH_IOREQ_CLIENT: { + int client = ioctl_param; + + return acrn_ioreq_attach_client(client); + } + + case IC_NOTIFY_REQUEST_FINISH: { + struct ioreq_notify notify; + + if (copy_from_user(¬ify, (void *)ioctl_param, + sizeof(notify))) + return -EFAULT; + + ret = acrn_ioreq_complete_request(notify.client_id, + notify.vcpu, NULL); + if (ret < 0) + return -EFAULT; + break; + } + + case IC_CLEAR_VM_IOREQ: { + /* + * TODO: Query VM status with additional hypercall. + * VM should be in paused status. + * + * In SMP SOS, we need flush the current pending ioreq dispatch + * tasklet and finish it before clearing all ioreq of this VM. + * With tasklet_kill, there still be a very rare race which + * might lost one ioreq tasklet for other VMs. So arm one after + * the clearing. It's harmless. + */ + tasklet_schedule(&vhm_io_req_tasklet); + tasklet_kill(&vhm_io_req_tasklet); + tasklet_schedule(&vhm_io_req_tasklet); + acrn_ioreq_clear_request(vm); + break; + } + + case IC_SET_IRQLINE: { + ret = hcall_set_irqline(vm->vmid, ioctl_param); + if (ret < 0) { + pr_err("vhm: failed to set irqline!\n"); + return -EFAULT; + } + break; + } + + case IC_INJECT_MSI: { + struct acrn_msi_entry msi; + + if (copy_from_user(&msi, (void *)ioctl_param, sizeof(msi))) + return -EFAULT; + + ret = hcall_inject_msi(vm->vmid, virt_to_phys(&msi)); + if (ret < 0) { + pr_err("vhm: failed to inject!\n"); + return -EFAULT; + } + break; + } + + case IC_ASSIGN_PTDEV: { + uint16_t bdf; + + if (copy_from_user(&bdf, + (void *)ioctl_param, sizeof(uint16_t))) + return -EFAULT; + + ret = hcall_assign_ptdev(vm->vmid, virt_to_phys(&bdf)); + if (ret < 0) { + pr_err("vhm: failed to assign ptdev!\n"); + return -EFAULT; + } + break; + } + case IC_DEASSIGN_PTDEV: { + uint16_t bdf; + + if (copy_from_user(&bdf, + (void *)ioctl_param, sizeof(uint16_t))) + return -EFAULT; + + ret = hcall_deassign_ptdev(vm->vmid, virt_to_phys(&bdf)); + if (ret < 0) { + pr_err("vhm: failed to deassign ptdev!\n"); + return -EFAULT; + } + break; + } + + case IC_SET_PTDEV_INTR_INFO: { + struct table_iomems *new; + + if (copy_from_user(&ic_pt_irq, + (void *)ioctl_param, sizeof(ic_pt_irq))) + return -EFAULT; + + memcpy(&hc_pt_irq, &ic_pt_irq, sizeof(hc_pt_irq)); + + ret = hcall_set_ptdev_intr_info(vm->vmid, + virt_to_phys(&hc_pt_irq)); + if (ret < 0) { + pr_err("vhm: failed to set intr info for ptdev!\n"); + return -EFAULT; + } + + if ((ic_pt_irq.type == IRQ_MSIX) && + ic_pt_irq.msix.table_paddr) { + new = kmalloc(sizeof(struct table_iomems), GFP_KERNEL); + if (new == NULL) + return -EFAULT; + new->phys_bdf = ic_pt_irq.phys_bdf; + new->mmap_addr = (unsigned long) + ioremap_nocache(ic_pt_irq.msix.table_paddr, + ic_pt_irq.msix.table_size); + + mutex_lock(&table_iomems_lock); + list_add(&new->list, &table_iomems_list); + mutex_unlock(&table_iomems_lock); + } + + break; + } + case IC_RESET_PTDEV_INTR_INFO: { + struct table_iomems *ptr; + int dev_found = 0; + + if (copy_from_user(&ic_pt_irq, + (void *)ioctl_param, sizeof(ic_pt_irq))) + return -EFAULT; + + memcpy(&hc_pt_irq, &ic_pt_irq, sizeof(hc_pt_irq)); + + ret = hcall_reset_ptdev_intr_info(vm->vmid, + virt_to_phys(&hc_pt_irq)); + if (ret < 0) { + pr_err("vhm: failed to reset intr info for ptdev!\n"); + return -EFAULT; + } + + if (ic_pt_irq.type == IRQ_MSIX) { + mutex_lock(&table_iomems_lock); + list_for_each_entry(ptr, &table_iomems_list, list) { + if (ptr->phys_bdf == ic_pt_irq.phys_bdf) { + dev_found = 1; + break; + } + } + if (dev_found) { + iounmap((void __iomem *)ptr->mmap_addr); + list_del(&ptr->list); + } + mutex_unlock(&table_iomems_lock); + } + + break; + } + + case IC_VM_PCI_MSIX_REMAP: { + struct acrn_vm_pci_msix_remap msix_remap; + + if (copy_from_user(&msix_remap, + (void *)ioctl_param, sizeof(msix_remap))) + return -EFAULT; + + if (msix_remap.msix) { + void __iomem *msix_entry; + struct table_iomems *ptr; + int dev_found = 0; + + mutex_lock(&table_iomems_lock); + list_for_each_entry(ptr, &table_iomems_list, list) { + if (ptr->phys_bdf == msix_remap.phys_bdf) { + dev_found = 1; + break; + } + } + mutex_unlock(&table_iomems_lock); + + if (!dev_found || !ptr->mmap_addr) + return -EFAULT; + + msix_entry = (void __iomem *) (ptr->mmap_addr + + msix_remap.msix_entry_index * + PCI_MSIX_ENTRY_SIZE); + + /* mask the entry when setup */ + writel(PCI_MSIX_ENTRY_CTRL_MASKBIT, + msix_entry + PCI_MSIX_ENTRY_VECTOR_CTRL); + + /* setup the msi entry */ + writel((uint32_t)msix_remap.msi_addr, + msix_entry + PCI_MSIX_ENTRY_LOWER_ADDR); + writel((uint32_t)(msix_remap.msi_addr >> 32), + msix_entry + PCI_MSIX_ENTRY_UPPER_ADDR); + writel(msix_remap.msi_data, + msix_entry + PCI_MSIX_ENTRY_DATA); + + /* unmask the entry */ + writel(msix_remap.vector_ctl & + PCI_MSIX_ENTRY_CTRL_MASKBIT, + msix_entry + PCI_MSIX_ENTRY_VECTOR_CTRL); + } + break; + } + + case IC_PM_GET_CPU_STATE: { + uint64_t cmd; + + if (copy_from_user(&cmd, + (void *)ioctl_param, sizeof(cmd))) + return -EFAULT; + + switch (cmd & PMCMD_TYPE_MASK) { + case PMCMD_GET_PX_CNT: + case PMCMD_GET_CX_CNT: { + uint64_t pm_info; + + ret = hcall_get_cpu_state(cmd, virt_to_phys(&pm_info)); + if (ret < 0) + return -EFAULT; + + if (copy_to_user((void *)ioctl_param, + &pm_info, sizeof(pm_info))) + ret = -EFAULT; + + break; + } + case PMCMD_GET_PX_DATA: { + struct cpu_px_data px_data; + + ret = hcall_get_cpu_state(cmd, virt_to_phys(&px_data)); + if (ret < 0) + return -EFAULT; + + if (copy_to_user((void *)ioctl_param, + &px_data, sizeof(px_data))) + ret = -EFAULT; + break; + } + case PMCMD_GET_CX_DATA: { + struct cpu_cx_data cx_data; + + ret = hcall_get_cpu_state(cmd, virt_to_phys(&cx_data)); + if (ret < 0) + return -EFAULT; + + if (copy_to_user((void *)ioctl_param, + &cx_data, sizeof(cx_data))) + ret = -EFAULT; + break; + } + default: + ret = -EFAULT; + break; + } + break; + } + + case IC_VM_INTR_MONITOR: { + struct page *page; + + ret = get_user_pages_fast(ioctl_param, 1, 1, &page); + if (unlikely(ret != 1) || (page == NULL)) { + pr_err("vhm-dev: failed to pin intr hdr buffer!\n"); + return -ENOMEM; + } + + ret = hcall_vm_intr_monitor(vm->vmid, page_to_phys(page)); + if (ret < 0) { + pr_err("vhm-dev: monitor intr data err=%ld\n", ret); + return -EFAULT; + } + break; + } + + case IC_EVENT_IOEVENTFD: { + struct acrn_ioeventfd args; + + if (copy_from_user(&args, (void *)ioctl_param, sizeof(args))) + return -EFAULT; + ret = acrn_ioeventfd(vm->vmid, &args); + break; + } + + case IC_EVENT_IRQFD: { + struct acrn_irqfd args; + + if (copy_from_user(&args, (void *)ioctl_param, sizeof(args))) + return -EFAULT; + ret = acrn_irqfd(vm->vmid, &args); + break; + } + + default: + pr_warn("Unknown IOCTL 0x%x\n", ioctl_num); + ret = 0; + break; + } + + return ret; +} + +static void io_req_tasklet(unsigned long data) +{ + struct vhm_vm *vm; + + read_lock(&vhm_vm_list_lock); + list_for_each_entry(vm, &vhm_vm_list, list) { + if (!vm || !vm->req_buf) + break; + + acrn_ioreq_distribute_request(vm); + } + read_unlock(&vhm_vm_list_lock); +} + +static void vhm_intr_handler(void) +{ + tasklet_schedule(&vhm_io_req_tasklet); +} + +static int vhm_dev_release(struct inode *inodep, struct file *filep) +{ + struct vhm_vm *vm = filep->private_data; + + if (vm == NULL) { + pr_err("vhm: invalid VM !\n"); + return -EFAULT; + } + acrn_ioreq_free(vm); + write_lock_bh(&vhm_vm_list_lock); + list_del_init(&vm->list); + write_unlock_bh(&vhm_vm_list_lock); + put_vm(vm); + filep->private_data = NULL; + return 0; +} + +static const struct file_operations fops = { + .open = vhm_dev_open, + .read = vhm_dev_read, + .write = vhm_dev_write, + .release = vhm_dev_release, + .unlocked_ioctl = vhm_dev_ioctl, + .poll = vhm_dev_poll, +}; + +static ssize_t +store_offline_cpu(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ +#ifdef CONFIG_X86 + u64 cpu, lapicid; + + if (kstrtoull(buf, 0, &cpu) < 0) + return -EINVAL; + + if (cpu_possible(cpu)) { + lapicid = cpu_data(cpu).apicid; + pr_info("vhm: try to offline cpu %lld with lapicid %lld\n", + cpu, lapicid); + if (hcall_sos_offline_cpu(lapicid) < 0) { + pr_err("vhm: failed to offline cpu from Hypervisor!\n"); + return -EINVAL; + } + } +#endif + return count; +} + +static DEVICE_ATTR(offline_cpu, S_IWUSR, NULL, store_offline_cpu); + +static struct attribute *vhm_attrs[] = { + &dev_attr_offline_cpu.attr, + NULL +}; + +static struct attribute_group vhm_attr_group = { + .attrs = vhm_attrs, +}; + +#define SUPPORT_HV_API_VERSION_MAJOR 1 +#define SUPPORT_HV_API_VERSION_MINOR 0 +static int __init vhm_init(void) +{ + unsigned long flag; + struct hc_api_version api_version = {0, 0}; + + if (x86_hyper_type != X86_HYPER_ACRN) + return -ENODEV; + + pr_info("vhm: initializing\n"); + + if (hcall_get_api_version(virt_to_phys(&api_version)) < 0) { + pr_err("vhm: failed to get api version from Hypervisor !\n"); + return -EINVAL; + } + + if (api_version.major_version == SUPPORT_HV_API_VERSION_MAJOR && + api_version.minor_version == SUPPORT_HV_API_VERSION_MINOR) { + pr_info("vhm: hv api version %d.%d\n", + api_version.major_version, api_version.minor_version); + } else { + pr_err("vhm: not support hv api version %d.%d!\n", + api_version.major_version, api_version.minor_version); + return -EINVAL; + } + + /* Try to dynamically allocate a major number for the device */ + major = register_chrdev(0, DEVICE_NAME, &fops); + if (major < 0) { + pr_warn("vhm: failed to register a major number\n"); + return major; + } + pr_info("vhm: registered correctly with major number %d\n", major); + + /* Register the device class */ + vhm_class = class_create(THIS_MODULE, CLASS_NAME); + if (IS_ERR(vhm_class)) { + unregister_chrdev(major, DEVICE_NAME); + pr_warn("vhm: failed to register device class\n"); + return PTR_ERR(vhm_class); + } + pr_info("vhm: device class registered correctly\n"); + + /* Register the device driver */ + vhm_device = device_create(vhm_class, NULL, MKDEV(major, 0), + NULL, DEVICE_NAME); + if (IS_ERR(vhm_device)) { + class_destroy(vhm_class); + unregister_chrdev(major, DEVICE_NAME); + pr_warn("vhm: failed to create the device\n"); + return PTR_ERR(vhm_device); + } + pr_info("register IPI handler\n"); + tasklet_init(&vhm_io_req_tasklet, io_req_tasklet, 0); + + if (hcall_set_callback_vector(HYPERVISOR_CALLBACK_VECTOR)) { + if (x86_platform_ipi_callback) { + pr_warn("vhm: ipi callback was occupied\n"); + return -EINVAL; + } + local_irq_save(flag); + x86_platform_ipi_callback = vhm_intr_handler; + local_irq_restore(flag); + } + else { + acrn_setup_intr_irq(vhm_intr_handler); + } + + if (sysfs_create_group(&vhm_device->kobj, &vhm_attr_group)) { + pr_warn("vhm: sysfs create failed\n"); + return -EINVAL; + } + + acrn_ioreq_driver_init(); + pr_info("vhm: Virtio & Hypervisor service module initialized\n"); + return 0; +} +static void __exit vhm_exit(void) +{ + tasklet_kill(&vhm_io_req_tasklet); + acrn_remove_intr_irq(); + device_destroy(vhm_class, MKDEV(major, 0)); + class_unregister(vhm_class); + class_destroy(vhm_class); + unregister_chrdev(major, DEVICE_NAME); + sysfs_remove_group(&vhm_device->kobj, &vhm_attr_group); + pr_info("vhm: exit\n"); +} + +module_init(vhm_init); +module_exit(vhm_exit); + +MODULE_AUTHOR("Intel"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("This is a char device driver, acts as a route " + "responsible for transferring IO requsts from other modules " + "either in user-space or in kernel to and from hypervisor"); +MODULE_VERSION("0.1"); diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c index 72b6091eb7b94..dc7fbc796cb65 100644 --- a/drivers/clk/at91/clk-pll.c +++ b/drivers/clk/at91/clk-pll.c @@ -133,6 +133,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw, { struct clk_pll *pll = to_clk_pll(hw); + if (!pll->div || !pll->mul) + return 0; + return (parent_rate / pll->div) * (pll->mul + 1); } diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c index 20724abd38bd1..7df6b5b1e7ee0 100644 --- a/drivers/clk/clk-fixed-factor.c +++ b/drivers/clk/clk-fixed-factor.c @@ -210,6 +210,7 @@ static int of_fixed_factor_clk_remove(struct platform_device *pdev) { struct clk *clk = platform_get_drvdata(pdev); + of_clk_del_provider(pdev->dev.of_node); clk_unregister_fixed_factor(clk); return 0; diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c index b5c46b3f8764d..6d6475c32ee51 100644 --- a/drivers/clk/clk-fixed-rate.c +++ b/drivers/clk/clk-fixed-rate.c @@ -200,6 +200,7 @@ static int of_fixed_clk_remove(struct platform_device *pdev) { struct clk *clk = platform_get_drvdata(pdev); + of_clk_del_provider(pdev->dev.of_node); clk_unregister_fixed_rate(clk); return 0; diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c index d44e0eea31ec6..0934d3724495a 100644 --- a/drivers/clk/clk-s2mps11.c +++ b/drivers/clk/clk-s2mps11.c @@ -245,6 +245,36 @@ static const struct platform_device_id s2mps11_clk_id[] = { }; MODULE_DEVICE_TABLE(platform, s2mps11_clk_id); +#ifdef CONFIG_OF +/* + * Device is instantiated through parent MFD device and device matching is done + * through platform_device_id. + * + * However if device's DT node contains proper clock compatible and driver is + * built as a module, then the *module* matching will be done trough DT aliases. + * This requires of_device_id table. In the same time this will not change the + * actual *device* matching so do not add .of_match_table. + */ +static const struct of_device_id s2mps11_dt_match[] = { + { + .compatible = "samsung,s2mps11-clk", + .data = (void *)S2MPS11X, + }, { + .compatible = "samsung,s2mps13-clk", + .data = (void *)S2MPS13X, + }, { + .compatible = "samsung,s2mps14-clk", + .data = (void *)S2MPS14X, + }, { + .compatible = "samsung,s5m8767-clk", + .data = (void *)S5M8767X, + }, { + /* Sentinel */ + }, +}; +MODULE_DEVICE_TABLE(of, s2mps11_dt_match); +#endif + static struct platform_driver s2mps11_clk_driver = { .driver = { .name = "s2mps11-clk", diff --git a/drivers/clk/hisilicon/reset.c b/drivers/clk/hisilicon/reset.c index 2a5015c736ce6..43e82fa644226 100644 --- a/drivers/clk/hisilicon/reset.c +++ b/drivers/clk/hisilicon/reset.c @@ -109,9 +109,8 @@ struct hisi_reset_controller *hisi_reset_init(struct platform_device *pdev) return NULL; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - rstc->membase = devm_ioremap(&pdev->dev, - res->start, resource_size(res)); - if (!rstc->membase) + rstc->membase = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(rstc->membase)) return NULL; spin_lock_init(&rstc->lock); diff --git a/drivers/clk/imx/clk-busy.c b/drivers/clk/imx/clk-busy.c index 99036527eb0d8..e695622c5aa56 100644 --- a/drivers/clk/imx/clk-busy.c +++ b/drivers/clk/imx/clk-busy.c @@ -154,7 +154,7 @@ static const struct clk_ops clk_busy_mux_ops = { struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift, u8 width, void __iomem *busy_reg, u8 busy_shift, - const char **parent_names, int num_parents) + const char * const *parent_names, int num_parents) { struct clk_busy_mux *busy; struct clk *clk; diff --git a/drivers/clk/imx/clk-fixup-mux.c b/drivers/clk/imx/clk-fixup-mux.c index c9b327e0a8dd9..44817c1b0b88c 100644 --- a/drivers/clk/imx/clk-fixup-mux.c +++ b/drivers/clk/imx/clk-fixup-mux.c @@ -70,7 +70,7 @@ static const struct clk_ops clk_fixup_mux_ops = { }; struct clk *imx_clk_fixup_mux(const char *name, void __iomem *reg, - u8 shift, u8 width, const char **parents, + u8 shift, u8 width, const char * const *parents, int num_parents, void (*fixup)(u32 *val)) { struct clk_fixup_mux *fixup_mux; diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c index 8c7c2fcb8d949..c509324f63385 100644 --- a/drivers/clk/imx/clk-imx6q.c +++ b/drivers/clk/imx/clk-imx6q.c @@ -508,8 +508,12 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) * lvds1_gate and lvds2_gate are pseudo-gates. Both can be * independently configured as clock inputs or outputs. We treat * the "output_enable" bit as a gate, even though it's really just - * enabling clock output. + * enabling clock output. Initially the gate bits are cleared, as + * otherwise the exclusive configuration gets locked in the setup done + * by software running before the clock driver, with no way to change + * it. */ + writel(readl(base + 0x160) & ~0x3c00, base + 0x160); clk[IMX6QDL_CLK_LVDS1_GATE] = imx_clk_gate_exclusive("lvds1_gate", "lvds1_sel", base + 0x160, 10, BIT(12)); clk[IMX6QDL_CLK_LVDS2_GATE] = imx_clk_gate_exclusive("lvds2_gate", "lvds2_sel", base + 0x160, 11, BIT(13)); diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h index 8076ec040f375..e65c1115d9788 100644 --- a/drivers/clk/imx/clk.h +++ b/drivers/clk/imx/clk.h @@ -63,14 +63,14 @@ struct clk *imx_clk_busy_divider(const char *name, const char *parent_name, struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift, u8 width, void __iomem *busy_reg, u8 busy_shift, - const char **parent_names, int num_parents); + const char * const *parent_names, int num_parents); struct clk *imx_clk_fixup_divider(const char *name, const char *parent, void __iomem *reg, u8 shift, u8 width, void (*fixup)(u32 *val)); struct clk *imx_clk_fixup_mux(const char *name, void __iomem *reg, - u8 shift, u8 width, const char **parents, + u8 shift, u8 width, const char * const *parents, int num_parents, void (*fixup)(u32 *val)); static inline struct clk *imx_clk_fixed(const char *name, int rate) @@ -79,7 +79,8 @@ static inline struct clk *imx_clk_fixed(const char *name, int rate) } static inline struct clk *imx_clk_mux_ldb(const char *name, void __iomem *reg, - u8 shift, u8 width, const char **parents, int num_parents) + u8 shift, u8 width, const char * const *parents, + int num_parents) { return clk_register_mux(NULL, name, parents, num_parents, CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT, reg, @@ -192,7 +193,8 @@ static inline struct clk *imx_clk_gate4(const char *name, const char *parent, } static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg, - u8 shift, u8 width, const char **parents, int num_parents) + u8 shift, u8 width, const char * const *parents, + int num_parents) { return clk_register_mux(NULL, name, parents, num_parents, CLK_SET_RATE_NO_REPARENT, reg, shift, @@ -200,7 +202,8 @@ static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg, } static inline struct clk *imx_clk_mux2(const char *name, void __iomem *reg, - u8 shift, u8 width, const char **parents, int num_parents) + u8 shift, u8 width, const char * const *parents, + int num_parents) { return clk_register_mux(NULL, name, parents, num_parents, CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE, @@ -208,8 +211,9 @@ static inline struct clk *imx_clk_mux2(const char *name, void __iomem *reg, } static inline struct clk *imx_clk_mux_flags(const char *name, - void __iomem *reg, u8 shift, u8 width, const char **parents, - int num_parents, unsigned long flags) + void __iomem *reg, u8 shift, u8 width, + const char * const *parents, int num_parents, + unsigned long flags) { return clk_register_mux(NULL, name, parents, num_parents, flags | CLK_SET_RATE_NO_REPARENT, reg, shift, width, 0, diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c index 00ce62ad6416c..02229d051d778 100644 --- a/drivers/clk/meson/axg.c +++ b/drivers/clk/meson/axg.c @@ -96,7 +96,6 @@ static struct clk_regmap axg_sys_pll = { .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, }, }; @@ -319,6 +318,7 @@ static struct clk_regmap axg_fclk_div2 = { .ops = &clk_regmap_gate_ops, .parent_names = (const char *[]){ "fclk_div2_div" }, .num_parents = 1, + .flags = CLK_IS_CRITICAL, }, }; @@ -343,6 +343,18 @@ static struct clk_regmap axg_fclk_div3 = { .ops = &clk_regmap_gate_ops, .parent_names = (const char *[]){ "fclk_div3_div" }, .num_parents = 1, + /* + * FIXME: + * This clock, as fdiv2, is used by the SCPI FW and is required + * by the platform to operate correctly. + * Until the following condition are met, we need this clock to + * be marked as critical: + * a) The SCPI generic driver claims and enable all the clocks + * it needs + * b) CCF has a clock hand-off mechanism to make the sure the + * clock stays on until the proper driver comes along + */ + .flags = CLK_IS_CRITICAL, }, }; @@ -700,12 +712,14 @@ static struct clk_regmap axg_pcie_mux = { .offset = HHI_PCIE_PLL_CNTL6, .mask = 0x1, .shift = 2, + /* skip the parent mpll3, reserved for debug */ + .table = (u32[]){ 1 }, }, .hw.init = &(struct clk_init_data){ .name = "pcie_mux", .ops = &clk_regmap_mux_ops, - .parent_names = (const char *[]){ "mpll3", "pcie_pll" }, - .num_parents = 2, + .parent_names = (const char *[]){ "pcie_pll" }, + .num_parents = 1, .flags = CLK_SET_RATE_PARENT, }, }; diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index 86d3ae58e84c2..6628ffa31383a 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -213,7 +213,6 @@ static struct clk_regmap gxbb_fixed_pll = { .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, }, }; @@ -276,6 +275,10 @@ static struct clk_regmap gxbb_hdmi_pll = { .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "hdmi_pll_pre_mult" }, .num_parents = 1, + /* + * Display directly handle hdmi pll registers ATM, we need + * NOCACHE to keep our view of the clock as accurate as possible + */ .flags = CLK_GET_RATE_NOCACHE, }, }; @@ -334,6 +337,10 @@ static struct clk_regmap gxl_hdmi_pll = { .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, + /* + * Display directly handle hdmi pll registers ATM, we need + * NOCACHE to keep our view of the clock as accurate as possible + */ .flags = CLK_GET_RATE_NOCACHE, }, }; @@ -371,7 +378,6 @@ static struct clk_regmap gxbb_sys_pll = { .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, }, }; @@ -418,7 +424,6 @@ static struct clk_regmap gxbb_gp0_pll = { .ops = &meson_clk_pll_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, }, }; @@ -472,7 +477,6 @@ static struct clk_regmap gxl_gp0_pll = { .ops = &meson_clk_pll_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, }, }; @@ -522,6 +526,18 @@ static struct clk_regmap gxbb_fclk_div3 = { .ops = &clk_regmap_gate_ops, .parent_names = (const char *[]){ "fclk_div3_div" }, .num_parents = 1, + /* + * FIXME: + * This clock, as fdiv2, is used by the SCPI FW and is required + * by the platform to operate correctly. + * Until the following condition are met, we need this clock to + * be marked as critical: + * a) The SCPI generic driver claims and enable all the clocks + * it needs + * b) CCF has a clock hand-off mechanism to make the sure the + * clock stays on until the proper driver comes along + */ + .flags = CLK_IS_CRITICAL, }, }; diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index 7447d96a265f7..50060e895e7ad 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c @@ -132,7 +132,6 @@ static struct clk_regmap meson8b_fixed_pll = { .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, }, }; @@ -169,7 +168,6 @@ static struct clk_regmap meson8b_vid_pll = { .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, }, }; @@ -207,7 +205,6 @@ static struct clk_regmap meson8b_sys_pll = { .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, }, }; @@ -571,13 +568,14 @@ static struct clk_fixed_factor meson8b_cpu_div3 = { }; static const struct clk_div_table cpu_scale_table[] = { - { .val = 2, .div = 4 }, - { .val = 3, .div = 6 }, - { .val = 4, .div = 8 }, - { .val = 5, .div = 10 }, - { .val = 6, .div = 12 }, - { .val = 7, .div = 14 }, - { .val = 8, .div = 16 }, + { .val = 1, .div = 4 }, + { .val = 2, .div = 6 }, + { .val = 3, .div = 8 }, + { .val = 4, .div = 10 }, + { .val = 5, .div = 12 }, + { .val = 6, .div = 14 }, + { .val = 7, .div = 16 }, + { .val = 8, .div = 18 }, { /* sentinel */ }, }; diff --git a/drivers/clk/mmp/clk.c b/drivers/clk/mmp/clk.c index ad8d483a35cd5..ca7d37e2c7be6 100644 --- a/drivers/clk/mmp/clk.c +++ b/drivers/clk/mmp/clk.c @@ -183,7 +183,7 @@ void mmp_clk_add(struct mmp_clk_unit *unit, unsigned int id, pr_err("CLK %d has invalid pointer %p\n", id, clk); return; } - if (id > unit->nr_clks) { + if (id >= unit->nr_clks) { pr_err("CLK %d is invalid\n", id); return; } diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c index 75bf7b8f282fc..0153c76d4a20a 100644 --- a/drivers/clk/mvebu/cp110-system-controller.c +++ b/drivers/clk/mvebu/cp110-system-controller.c @@ -202,11 +202,11 @@ static struct clk_hw *cp110_of_clk_get(struct of_phandle_args *clkspec, unsigned int idx = clkspec->args[1]; if (type == CP110_CLK_TYPE_CORE) { - if (idx > CP110_MAX_CORE_CLOCKS) + if (idx >= CP110_MAX_CORE_CLOCKS) return ERR_PTR(-EINVAL); return clk_data->hws[idx]; } else if (type == CP110_CLK_TYPE_GATABLE) { - if (idx > CP110_MAX_GATABLE_CLOCKS) + if (idx >= CP110_MAX_GATABLE_CLOCKS) return ERR_PTR(-EINVAL); return clk_data->hws[CP110_MAX_CORE_CLOCKS + idx]; } diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c index a0b6ecdc63dd3..6d2b568915597 100644 --- a/drivers/clk/renesas/r9a06g032-clocks.c +++ b/drivers/clk/renesas/r9a06g032-clocks.c @@ -539,7 +539,8 @@ r9a06g032_div_round_rate(struct clk_hw *hw, * several uarts attached to this divider, and changing this impacts * everyone. */ - if (clk->index == R9A06G032_DIV_UART) { + if (clk->index == R9A06G032_DIV_UART || + clk->index == R9A06G032_DIV_P2_PG) { pr_devel("%s div uart hack!\n", __func__); return clk_get_rate(hw->clk); } diff --git a/drivers/clk/rockchip/clk-ddr.c b/drivers/clk/rockchip/clk-ddr.c index e8075359366b0..ebce5260068b7 100644 --- a/drivers/clk/rockchip/clk-ddr.c +++ b/drivers/clk/rockchip/clk-ddr.c @@ -80,16 +80,12 @@ static long rockchip_ddrclk_sip_round_rate(struct clk_hw *hw, static u8 rockchip_ddrclk_get_parent(struct clk_hw *hw) { struct rockchip_ddrclk *ddrclk = to_rockchip_ddrclk_hw(hw); - int num_parents = clk_hw_get_num_parents(hw); u32 val; val = clk_readl(ddrclk->reg_base + ddrclk->mux_offset) >> ddrclk->mux_shift; val &= GENMASK(ddrclk->mux_width - 1, 0); - if (val >= num_parents) - return -EINVAL; - return val; } diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c index 67e73fd71f095..69fb3afc970fb 100644 --- a/drivers/clk/rockchip/clk-rk3188.c +++ b/drivers/clk/rockchip/clk-rk3188.c @@ -382,7 +382,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = { COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0, RK2928_CLKSEL_CON(5), 0, 7, DFLAGS, RK2928_CLKGATE_CON(0), 13, GFLAGS), - COMPOSITE_FRACMUX(0, "spdif_frac", "spdif_pll", CLK_SET_RATE_PARENT, + COMPOSITE_FRACMUX(0, "spdif_frac", "spdif_pre", CLK_SET_RATE_PARENT, RK2928_CLKSEL_CON(9), 0, RK2928_CLKGATE_CON(0), 14, GFLAGS, &common_spdif_fracmux), diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c index 252366a5231f7..2c54266077907 100644 --- a/drivers/clk/rockchip/clk-rk3328.c +++ b/drivers/clk/rockchip/clk-rk3328.c @@ -813,22 +813,22 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = { MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "clk_sdmmc", RK3328_SDMMC_CON0, 1), MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "clk_sdmmc", - RK3328_SDMMC_CON1, 1), + RK3328_SDMMC_CON1, 0), MMC(SCLK_SDIO_DRV, "sdio_drv", "clk_sdio", RK3328_SDIO_CON0, 1), MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "clk_sdio", - RK3328_SDIO_CON1, 1), + RK3328_SDIO_CON1, 0), MMC(SCLK_EMMC_DRV, "emmc_drv", "clk_emmc", RK3328_EMMC_CON0, 1), MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "clk_emmc", - RK3328_EMMC_CON1, 1), + RK3328_EMMC_CON1, 0), MMC(SCLK_SDMMC_EXT_DRV, "sdmmc_ext_drv", "clk_sdmmc_ext", RK3328_SDMMC_EXT_CON0, 1), MMC(SCLK_SDMMC_EXT_SAMPLE, "sdmmc_ext_sample", "clk_sdmmc_ext", - RK3328_SDMMC_EXT_CON1, 1), + RK3328_SDMMC_EXT_CON1, 0), }; static const char *const rk3328_critical_clocks[] __initconst = { diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index 95e1bf69449b7..d4f77c4eb277a 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -281,6 +281,7 @@ static const struct samsung_clk_reg_dump exynos5420_set_clksrc[] = { { .offset = GATE_BUS_TOP, .value = 0xffffffff, }, { .offset = GATE_BUS_DISP1, .value = 0xffffffff, }, { .offset = GATE_IP_PERIC, .value = 0xffffffff, }, + { .offset = GATE_IP_PERIS, .value = 0xffffffff, }, }; static int exynos5420_clk_suspend(void) diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c index 2d5d8b43727e9..c4d0b6f6abf2e 100644 --- a/drivers/clk/socfpga/clk-pll-s10.c +++ b/drivers/clk/socfpga/clk-pll-s10.c @@ -43,7 +43,7 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk, /* Read mdiv and fdiv from the fdbck register */ reg = readl(socfpgaclk->hw.reg + 0x4); mdiv = (reg & SOCFPGA_PLL_MDIV_MASK) >> SOCFPGA_PLL_MDIV_SHIFT; - vco_freq = (unsigned long long)parent_rate * (mdiv + 6); + vco_freq = (unsigned long long)vco_freq * (mdiv + 6); return (unsigned long)vco_freq; } diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c index 5b238fc314ac6..8281dfbf38c2f 100644 --- a/drivers/clk/socfpga/clk-s10.c +++ b/drivers/clk/socfpga/clk-s10.c @@ -12,17 +12,17 @@ #include "stratix10-clk.h" -static const char * const pll_mux[] = { "osc1", "cb_intosc_hs_div2_clk", - "f2s_free_clk",}; +static const char * const pll_mux[] = { "osc1", "cb-intosc-hs-div2-clk", + "f2s-free-clk",}; static const char * const cntr_mux[] = { "main_pll", "periph_pll", - "osc1", "cb_intosc_hs_div2_clk", - "f2s_free_clk"}; -static const char * const boot_mux[] = { "osc1", "cb_intosc_hs_div2_clk",}; + "osc1", "cb-intosc-hs-div2-clk", + "f2s-free-clk"}; +static const char * const boot_mux[] = { "osc1", "cb-intosc-hs-div2-clk",}; static const char * const noc_free_mux[] = {"main_noc_base_clk", "peri_noc_base_clk", - "osc1", "cb_intosc_hs_div2_clk", - "f2s_free_clk"}; + "osc1", "cb-intosc-hs-div2-clk", + "f2s-free-clk"}; static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"}; static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"}; @@ -33,14 +33,14 @@ static const char * const s2f_usr1_free_mux[] = {"peri_s2f_usr1_clk", "boot_clk" static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"}; static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",}; -static const char * const s2f_usr0_mux[] = {"f2s_free_clk", "boot_clk"}; +static const char * const s2f_usr0_mux[] = {"f2s-free-clk", "boot_clk"}; static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"}; static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"}; static const char * const mpu_free_mux[] = {"main_mpu_base_clk", "peri_mpu_base_clk", - "osc1", "cb_intosc_hs_div2_clk", - "f2s_free_clk"}; + "osc1", "cb-intosc-hs-div2-clk", + "f2s-free-clk"}; /* clocks in AO (always on) controller */ static const struct stratix10_pll_clock s10_pll_clks[] = { diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c index bdbfe78fe1333..0f7a0ffd3f706 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c @@ -224,7 +224,7 @@ static SUNXI_CCU_MP_WITH_MUX(psi_ahb1_ahb2_clk, "psi-ahb1-ahb2", psi_ahb1_ahb2_parents, 0x510, 0, 5, /* M */ - 16, 2, /* P */ + 8, 2, /* P */ 24, 2, /* mux */ 0); @@ -233,19 +233,19 @@ static const char * const ahb3_apb1_apb2_parents[] = { "osc24M", "osc32k", "pll-periph0" }; static SUNXI_CCU_MP_WITH_MUX(ahb3_clk, "ahb3", ahb3_apb1_apb2_parents, 0x51c, 0, 5, /* M */ - 16, 2, /* P */ + 8, 2, /* P */ 24, 2, /* mux */ 0); static SUNXI_CCU_MP_WITH_MUX(apb1_clk, "apb1", ahb3_apb1_apb2_parents, 0x520, 0, 5, /* M */ - 16, 2, /* P */ + 8, 2, /* P */ 24, 2, /* mux */ 0); static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", ahb3_apb1_apb2_parents, 0x524, 0, 5, /* M */ - 16, 2, /* P */ + 8, 2, /* P */ 24, 2, /* mux */ 0); diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c index 4e2073307f340..9e3944f868fff 100644 --- a/drivers/clk/sunxi-ng/ccu_nm.c +++ b/drivers/clk/sunxi-ng/ccu_nm.c @@ -19,6 +19,17 @@ struct _ccu_nm { unsigned long m, min_m, max_m; }; +static unsigned long ccu_nm_calc_rate(unsigned long parent, + unsigned long n, unsigned long m) +{ + u64 rate = parent; + + rate *= n; + do_div(rate, m); + + return rate; +} + static void ccu_nm_find_best(unsigned long parent, unsigned long rate, struct _ccu_nm *nm) { @@ -28,7 +39,8 @@ static void ccu_nm_find_best(unsigned long parent, unsigned long rate, for (_n = nm->min_n; _n <= nm->max_n; _n++) { for (_m = nm->min_m; _m <= nm->max_m; _m++) { - unsigned long tmp_rate = parent * _n / _m; + unsigned long tmp_rate = ccu_nm_calc_rate(parent, + _n, _m); if (tmp_rate > rate) continue; @@ -100,7 +112,7 @@ static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw, if (ccu_sdm_helper_is_enabled(&nm->common, &nm->sdm)) rate = ccu_sdm_helper_read_rate(&nm->common, &nm->sdm, m, n); else - rate = parent_rate * n / m; + rate = ccu_nm_calc_rate(parent_rate, n, m); if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV) rate /= nm->fixed_post_div; @@ -142,7 +154,7 @@ static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate, _nm.max_m = nm->m.max ?: 1 << nm->m.width; ccu_nm_find_best(*parent_rate, rate, &_nm); - rate = *parent_rate * _nm.n / _nm.m; + rate = ccu_nm_calc_rate(*parent_rate, _nm.n, _nm.m); if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV) rate /= nm->fixed_post_div; diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c index 7d22e1af22477..27e0979b31586 100644 --- a/drivers/clk/ti/clk.c +++ b/drivers/clk/ti/clk.c @@ -129,7 +129,7 @@ int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops) void __init ti_dt_clocks_register(struct ti_dt_clk oclks[]) { struct ti_dt_clk *c; - struct device_node *node; + struct device_node *node, *parent; struct clk *clk; struct of_phandle_args clkspec; char buf[64]; @@ -164,8 +164,12 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[]) continue; node = of_find_node_by_name(NULL, buf); - if (num_args) - node = of_find_node_by_name(node, "clk"); + if (num_args) { + parent = node; + node = of_get_child_by_name(parent, "clk"); + of_node_put(parent); + } + clkspec.np = node; clkspec.args_count = num_args; for (i = 0; i < num_args; i++) { @@ -173,11 +177,12 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[]) if (ret) { pr_warn("Bad tag in %s at %d: %s\n", c->node_name, i, tags[i]); + of_node_put(node); return; } } clk = of_clk_get_from_provider(&clkspec); - + of_node_put(node); if (!IS_ERR(clk)) { c->lk.clk = clk; clkdev_add(&c->lk); diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index a11f4ba98b05c..316d48d7be729 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -290,6 +290,7 @@ config CLKSRC_MPS2 config ARC_TIMERS bool "Support for 32-bit TIMERn counters in ARC Cores" if COMPILE_TEST + depends on GENERIC_SCHED_CLOCK select TIMER_OF help These are legacy 32-bit TIMER0 and TIMER1 counters found on all ARC cores diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c index 20da9b1d7f7d0..b28970ca4a7a9 100644 --- a/drivers/clocksource/arc_timer.c +++ b/drivers/clocksource/arc_timer.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -88,6 +89,11 @@ static u64 arc_read_gfrc(struct clocksource *cs) return (((u64)h) << 32) | l; } +static notrace u64 arc_gfrc_clock_read(void) +{ + return arc_read_gfrc(NULL); +} + static struct clocksource arc_counter_gfrc = { .name = "ARConnect GFRC", .rating = 400, @@ -111,6 +117,8 @@ static int __init arc_cs_setup_gfrc(struct device_node *node) if (ret) return ret; + sched_clock_register(arc_gfrc_clock_read, 64, arc_timer_freq); + return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq); } TIMER_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc); @@ -139,6 +147,11 @@ static u64 arc_read_rtc(struct clocksource *cs) return (((u64)h) << 32) | l; } +static notrace u64 arc_rtc_clock_read(void) +{ + return arc_read_rtc(NULL); +} + static struct clocksource arc_counter_rtc = { .name = "ARCv2 RTC", .rating = 350, @@ -170,6 +183,8 @@ static int __init arc_cs_setup_rtc(struct device_node *node) write_aux_reg(AUX_RTC_CTRL, 1); + sched_clock_register(arc_rtc_clock_read, 64, arc_timer_freq); + return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq); } TIMER_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc); @@ -185,6 +200,11 @@ static u64 arc_read_timer1(struct clocksource *cs) return (u64) read_aux_reg(ARC_REG_TIMER1_CNT); } +static notrace u64 arc_timer1_clock_read(void) +{ + return arc_read_timer1(NULL); +} + static struct clocksource arc_counter_timer1 = { .name = "ARC Timer1", .rating = 300, @@ -209,6 +229,8 @@ static int __init arc_cs_setup_timer1(struct device_node *node) write_aux_reg(ARC_REG_TIMER1_CNT, 0); write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH); + sched_clock_register(arc_timer1_clock_read, 32, arc_timer_freq); + return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq); } diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c index 9c38895542f4a..d4350bb10b83a 100644 --- a/drivers/clocksource/i8253.c +++ b/drivers/clocksource/i8253.c @@ -20,6 +20,13 @@ DEFINE_RAW_SPINLOCK(i8253_lock); EXPORT_SYMBOL(i8253_lock); +/* + * Handle PIT quirk in pit_shutdown() where zeroing the counter register + * restarts the PIT, negating the shutdown. On platforms with the quirk, + * platform specific code can set this to false. + */ +bool i8253_clear_counter_on_shutdown __ro_after_init = true; + #ifdef CONFIG_CLKSRC_I8253 /* * Since the PIT overflows every tick, its not very useful @@ -109,8 +116,11 @@ static int pit_shutdown(struct clock_event_device *evt) raw_spin_lock(&i8253_lock); outb_p(0x30, PIT_MODE); - outb_p(0, PIT_CH0); - outb_p(0, PIT_CH0); + + if (i8253_clear_counter_on_shutdown) { + outb_p(0, PIT_CH0); + outb_p(0, PIT_CH0); + } raw_spin_unlock(&i8253_lock); return 0; diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c index 62d24690ba020..9701107806a73 100644 --- a/drivers/clocksource/timer-integrator-ap.c +++ b/drivers/clocksource/timer-integrator-ap.c @@ -181,8 +181,7 @@ static int __init integrator_ap_timer_init_of(struct device_node *node) int irq; struct clk *clk; unsigned long rate; - struct device_node *pri_node; - struct device_node *sec_node; + struct device_node *alias_node; base = of_io_request_and_map(node, 0, "integrator-timer"); if (IS_ERR(base)) @@ -204,7 +203,18 @@ static int __init integrator_ap_timer_init_of(struct device_node *node) return err; } - pri_node = of_find_node_by_path(path); + alias_node = of_find_node_by_path(path); + + /* + * The pointer is used as an identifier not as a pointer, we + * can drop the refcount on the of__node immediately after + * getting it. + */ + of_node_put(alias_node); + + if (node == alias_node) + /* The primary timer lacks IRQ, use as clocksource */ + return integrator_clocksource_init(rate, base); err = of_property_read_string(of_aliases, "arm,timer-secondary", &path); @@ -213,14 +223,11 @@ static int __init integrator_ap_timer_init_of(struct device_node *node) return err; } + alias_node = of_find_node_by_path(path); - sec_node = of_find_node_by_path(path); - - if (node == pri_node) - /* The primary timer lacks IRQ, use as clocksource */ - return integrator_clocksource_init(rate, base); + of_node_put(alias_node); - if (node == sec_node) { + if (node == alias_node) { /* The secondary timer will drive the clock event */ irq = irq_of_parse_and_map(node, 0); return integrator_clockevent_init(rate, base, irq); diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index 0a9ebf00be468..e58bfcb1169eb 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -32,6 +32,7 @@ struct private_data { struct device *cpu_dev; struct thermal_cooling_device *cdev; const char *reg_name; + bool have_static_opps; }; static struct freq_attr *cpufreq_dt_attr[] = { @@ -204,6 +205,15 @@ static int cpufreq_init(struct cpufreq_policy *policy) } } + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto out_put_regulator; + } + + priv->reg_name = name; + priv->opp_table = opp_table; + /* * Initialize OPP tables for all policy->cpus. They will be shared by * all CPUs which have marked their CPUs shared with OPP bindings. @@ -214,7 +224,8 @@ static int cpufreq_init(struct cpufreq_policy *policy) * * OPPs might be populated at runtime, don't check for error here */ - dev_pm_opp_of_cpumask_add_table(policy->cpus); + if (!dev_pm_opp_of_cpumask_add_table(policy->cpus)) + priv->have_static_opps = true; /* * But we need OPP table to function so if it is not there let's @@ -240,19 +251,10 @@ static int cpufreq_init(struct cpufreq_policy *policy) __func__, ret); } - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) { - ret = -ENOMEM; - goto out_free_opp; - } - - priv->reg_name = name; - priv->opp_table = opp_table; - ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); - goto out_free_priv; + goto out_free_opp; } priv->cpu_dev = cpu_dev; @@ -282,10 +284,11 @@ static int cpufreq_init(struct cpufreq_policy *policy) out_free_cpufreq_table: dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); -out_free_priv: - kfree(priv); out_free_opp: - dev_pm_opp_of_cpumask_remove_table(policy->cpus); + if (priv->have_static_opps) + dev_pm_opp_of_cpumask_remove_table(policy->cpus); + kfree(priv); +out_put_regulator: if (name) dev_pm_opp_put_regulators(opp_table); out_put_clk: @@ -300,7 +303,8 @@ static int cpufreq_exit(struct cpufreq_policy *policy) cpufreq_cooling_unregister(priv->cdev); dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); - dev_pm_opp_of_cpumask_remove_table(policy->related_cpus); + if (priv->have_static_opps) + dev_pm_opp_of_cpumask_remove_table(policy->related_cpus); if (priv->reg_name) dev_pm_opp_put_regulators(priv->opp_table); diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index f20f20a77d4d3..4268f87e99fcf 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -80,8 +80,10 @@ static unsigned int cs_dbs_update(struct cpufreq_policy *policy) * changed in the meantime, so fall back to current frequency in that * case. */ - if (requested_freq > policy->max || requested_freq < policy->min) + if (requested_freq > policy->max || requested_freq < policy->min) { requested_freq = policy->cur; + dbs_info->requested_freq = requested_freq; + } freq_step = get_freq_step(cs_tuners, policy); @@ -92,7 +94,7 @@ static unsigned int cs_dbs_update(struct cpufreq_policy *policy) if (policy_dbs->idle_periods < UINT_MAX) { unsigned int freq_steps = policy_dbs->idle_periods * freq_step; - if (requested_freq > freq_steps) + if (requested_freq > policy->min + freq_steps) requested_freq -= freq_steps; else requested_freq = policy->min; diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index b2ff423ad7f82..f4880a4f865bc 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c @@ -159,8 +159,13 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) /* Ensure the arm clock divider is what we expect */ ret = clk_set_rate(clks[ARM].clk, new_freq * 1000); if (ret) { + int ret1; + dev_err(cpu_dev, "failed to set clock rate: %d\n", ret); - regulator_set_voltage_tol(arm_reg, volt_old, 0); + ret1 = regulator_set_voltage_tol(arm_reg, volt_old, 0); + if (ret1) + dev_warn(cpu_dev, + "failed to restore vddarm voltage: %d\n", ret1); return ret; } diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index 50b1551ba8942..3f06934394869 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -52,9 +52,9 @@ scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) int ret; struct scmi_data *priv = policy->driver_data; struct scmi_perf_ops *perf_ops = handle->perf_ops; - u64 freq = policy->freq_table[index].frequency * 1000; + u64 freq = policy->freq_table[index].frequency; - ret = perf_ops->freq_set(handle, priv->domain_id, freq, false); + ret = perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false); if (!ret) arch_set_freq_scale(policy->related_cpus, freq, policy->cpuinfo.max_freq); diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index 3f0e2a14895a0..22b53bf268179 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c @@ -201,19 +201,28 @@ static const struct of_device_id ti_cpufreq_of_match[] = { {}, }; +static const struct of_device_id *ti_cpufreq_match_node(void) +{ + struct device_node *np; + const struct of_device_id *match; + + np = of_find_node_by_path("/"); + match = of_match_node(ti_cpufreq_of_match, np); + of_node_put(np); + + return match; +} + static int ti_cpufreq_probe(struct platform_device *pdev) { u32 version[VERSION_COUNT]; - struct device_node *np; const struct of_device_id *match; struct opp_table *ti_opp_table; struct ti_cpufreq_data *opp_data; const char * const reg_names[] = {"vdd", "vbb"}; int ret; - np = of_find_node_by_path("/"); - match = of_match_node(ti_cpufreq_of_match, np); - of_node_put(np); + match = dev_get_platdata(&pdev->dev); if (!match) return -ENODEV; @@ -290,7 +299,14 @@ static int ti_cpufreq_probe(struct platform_device *pdev) static int ti_cpufreq_init(void) { - platform_device_register_simple("ti-cpufreq", -1, NULL, 0); + const struct of_device_id *match; + + /* Check to ensure we are on a compatible platform */ + match = ti_cpufreq_match_node(); + if (match) + platform_device_register_data(NULL, "ti-cpufreq", -1, match, + sizeof(*match)); + return 0; } module_init(ti_cpufreq_init); diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c index 073557f433eb1..df564d7832161 100644 --- a/drivers/cpuidle/cpuidle-arm.c +++ b/drivers/cpuidle/cpuidle-arm.c @@ -103,13 +103,6 @@ static int __init arm_idle_init_cpu(int cpu) goto out_kfree_drv; } - ret = cpuidle_register_driver(drv); - if (ret) { - if (ret != -EBUSY) - pr_err("Failed to register cpuidle driver\n"); - goto out_kfree_drv; - } - /* * Call arch CPU operations in order to initialize * idle states suspend back-end specific data @@ -117,15 +110,20 @@ static int __init arm_idle_init_cpu(int cpu) ret = arm_cpuidle_init(cpu); /* - * Skip the cpuidle device initialization if the reported + * Allow the initialization to continue for other CPUs, if the reported * failure is a HW misconfiguration/breakage (-ENXIO). */ - if (ret == -ENXIO) - return 0; - if (ret) { pr_err("CPU %d failed to init idle CPU ops\n", cpu); - goto out_unregister_drv; + ret = ret == -ENXIO ? 0 : ret; + goto out_kfree_drv; + } + + ret = cpuidle_register_driver(drv); + if (ret) { + if (ret != -EBUSY) + pr_err("Failed to register cpuidle driver\n"); + goto out_kfree_drv; } dev = kzalloc(sizeof(*dev), GFP_KERNEL); diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c index 9e56bc411061f..74c247972bb36 100644 --- a/drivers/cpuidle/cpuidle-pseries.c +++ b/drivers/cpuidle/cpuidle-pseries.c @@ -247,7 +247,13 @@ static int pseries_idle_probe(void) return -ENODEV; if (firmware_has_feature(FW_FEATURE_SPLPAR)) { - if (lppaca_shared_proc(get_lppaca())) { + /* + * Use local_paca instead of get_lppaca() since + * preemption is not disabled, and it is not required in + * fact, since lppaca_ptr does not need to be the value + * associated to the current CPU, it can be from any CPU. + */ + if (lppaca_shared_proc(local_paca->lppaca_ptr)) { cpuidle_state_table = shared_states; max_idle_state = ARRAY_SIZE(shared_states); } else { diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index a8c4ce07fc9d6..a825b64444594 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -681,6 +681,7 @@ config CRYPTO_DEV_BCM_SPU depends on ARCH_BCM_IPROC depends on MAILBOX default m + select CRYPTO_AUTHENC select CRYPTO_DES select CRYPTO_MD5 select CRYPTO_SHA1 diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 2d1f1db9f8074..cd464637b0cb6 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -2845,44 +2845,28 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, struct spu_hw *spu = &iproc_priv.spu; struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); struct crypto_tfm *tfm = crypto_aead_tfm(cipher); - struct rtattr *rta = (void *)key; - struct crypto_authenc_key_param *param; - const u8 *origkey = key; - const unsigned int origkeylen = keylen; - - int ret = 0; + struct crypto_authenc_keys keys; + int ret; flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key, keylen); flow_dump(" key: ", key, keylen); - if (!RTA_OK(rta, keylen)) - goto badkey; - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) - goto badkey; - if (RTA_PAYLOAD(rta) < sizeof(*param)) + ret = crypto_authenc_extractkeys(&keys, key, keylen); + if (ret) goto badkey; - param = RTA_DATA(rta); - ctx->enckeylen = be32_to_cpu(param->enckeylen); - - key += RTA_ALIGN(rta->rta_len); - keylen -= RTA_ALIGN(rta->rta_len); - - if (keylen < ctx->enckeylen) - goto badkey; - if (ctx->enckeylen > MAX_KEY_SIZE) + if (keys.enckeylen > MAX_KEY_SIZE || + keys.authkeylen > MAX_KEY_SIZE) goto badkey; - ctx->authkeylen = keylen - ctx->enckeylen; - - if (ctx->authkeylen > MAX_KEY_SIZE) - goto badkey; + ctx->enckeylen = keys.enckeylen; + ctx->authkeylen = keys.authkeylen; - memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen); + memcpy(ctx->enckey, keys.enckey, keys.enckeylen); /* May end up padding auth key. So make sure it's zeroed. */ memset(ctx->authkey, 0, sizeof(ctx->authkey)); - memcpy(ctx->authkey, key, ctx->authkeylen); + memcpy(ctx->authkey, keys.authkey, keys.authkeylen); switch (ctx->alg->cipher_info.alg) { case CIPHER_ALG_DES: @@ -2890,7 +2874,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, u32 tmp[DES_EXPKEY_WORDS]; u32 flags = CRYPTO_TFM_RES_WEAK_KEY; - if (des_ekey(tmp, key) == 0) { + if (des_ekey(tmp, keys.enckey) == 0) { if (crypto_aead_get_flags(cipher) & CRYPTO_TFM_REQ_WEAK_KEY) { crypto_aead_set_flags(cipher, flags); @@ -2905,7 +2889,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, break; case CIPHER_ALG_3DES: if (ctx->enckeylen == (DES_KEY_SIZE * 3)) { - const u32 *K = (const u32 *)key; + const u32 *K = (const u32 *)keys.enckey; u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED; if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) || @@ -2956,9 +2940,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; ctx->fallback_cipher->base.crt_flags |= tfm->crt_flags & CRYPTO_TFM_REQ_MASK; - ret = - crypto_aead_setkey(ctx->fallback_cipher, origkey, - origkeylen); + ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen); if (ret) { flow_log(" fallback setkey() returned:%d\n", ret); tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 43975ab5f09c1..f84ca2ff61dea 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -1131,13 +1131,16 @@ static int ahash_final_no_ctx(struct ahash_request *req) desc = edesc->hw_desc; - state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, state->buf_dma)) { - dev_err(jrdev, "unable to map src\n"); - goto unmap; - } + if (buflen) { + state->buf_dma = dma_map_single(jrdev, buf, buflen, + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, state->buf_dma)) { + dev_err(jrdev, "unable to map src\n"); + goto unmap; + } - append_seq_in_ptr(desc, state->buf_dma, buflen, 0); + append_seq_in_ptr(desc, state->buf_dma, buflen, 0); + } edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, digestsize); diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 4fb91ba39c36b..ce3f9ad7120f0 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -70,22 +70,22 @@ extern bool caam_little_end; extern bool caam_imx; -#define caam_to_cpu(len) \ -static inline u##len caam##len ## _to_cpu(u##len val) \ -{ \ - if (caam_little_end) \ - return le##len ## _to_cpu(val); \ - else \ - return be##len ## _to_cpu(val); \ +#define caam_to_cpu(len) \ +static inline u##len caam##len ## _to_cpu(u##len val) \ +{ \ + if (caam_little_end) \ + return le##len ## _to_cpu((__force __le##len)val); \ + else \ + return be##len ## _to_cpu((__force __be##len)val); \ } -#define cpu_to_caam(len) \ -static inline u##len cpu_to_caam##len(u##len val) \ -{ \ - if (caam_little_end) \ - return cpu_to_le##len(val); \ - else \ - return cpu_to_be##len(val); \ +#define cpu_to_caam(len) \ +static inline u##len cpu_to_caam##len(u##len val) \ +{ \ + if (caam_little_end) \ + return (__force u##len)cpu_to_le##len(val); \ + else \ + return (__force u##len)cpu_to_be##len(val); \ } caam_to_cpu(16) diff --git a/drivers/crypto/cavium/nitrox/nitrox_algs.c b/drivers/crypto/cavium/nitrox/nitrox_algs.c index 2ae6124e5da67..5d54ebc20cb30 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_algs.c +++ b/drivers/crypto/cavium/nitrox/nitrox_algs.c @@ -73,7 +73,7 @@ static int flexi_aes_keylen(int keylen) static int nitrox_skcipher_init(struct crypto_skcipher *tfm) { struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm); - void *fctx; + struct crypto_ctx_hdr *chdr; /* get the first device */ nctx->ndev = nitrox_get_first_device(); @@ -81,12 +81,14 @@ static int nitrox_skcipher_init(struct crypto_skcipher *tfm) return -ENODEV; /* allocate nitrox crypto context */ - fctx = crypto_alloc_context(nctx->ndev); - if (!fctx) { + chdr = crypto_alloc_context(nctx->ndev); + if (!chdr) { nitrox_put_device(nctx->ndev); return -ENOMEM; } - nctx->u.ctx_handle = (uintptr_t)fctx; + nctx->chdr = chdr; + nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr + + sizeof(struct ctx_hdr)); crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(tfm) + sizeof(struct nitrox_kcrypt_request)); return 0; @@ -102,7 +104,7 @@ static void nitrox_skcipher_exit(struct crypto_skcipher *tfm) memset(&fctx->crypto, 0, sizeof(struct crypto_keys)); memset(&fctx->auth, 0, sizeof(struct auth_keys)); - crypto_free_context((void *)fctx); + crypto_free_context((void *)nctx->chdr); } nitrox_put_device(nctx->ndev); diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c index 4d31df07777f6..28baf1a19d0a3 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_lib.c +++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c @@ -146,12 +146,19 @@ static void destroy_crypto_dma_pool(struct nitrox_device *ndev) void *crypto_alloc_context(struct nitrox_device *ndev) { struct ctx_hdr *ctx; + struct crypto_ctx_hdr *chdr; void *vaddr; dma_addr_t dma; + chdr = kmalloc(sizeof(*chdr), GFP_KERNEL); + if (!chdr) + return NULL; + vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_KERNEL | __GFP_ZERO), &dma); - if (!vaddr) + if (!vaddr) { + kfree(chdr); return NULL; + } /* fill meta data */ ctx = vaddr; @@ -159,7 +166,11 @@ void *crypto_alloc_context(struct nitrox_device *ndev) ctx->dma = dma; ctx->ctx_dma = dma + sizeof(struct ctx_hdr); - return ((u8 *)vaddr + sizeof(struct ctx_hdr)); + chdr->pool = ndev->ctx_pool; + chdr->dma = dma; + chdr->vaddr = vaddr; + + return chdr; } /** @@ -168,13 +179,14 @@ void *crypto_alloc_context(struct nitrox_device *ndev) */ void crypto_free_context(void *ctx) { - struct ctx_hdr *ctxp; + struct crypto_ctx_hdr *ctxp; if (!ctx) return; - ctxp = (struct ctx_hdr *)((u8 *)ctx - sizeof(struct ctx_hdr)); - dma_pool_free(ctxp->pool, ctxp, ctxp->dma); + ctxp = ctx; + dma_pool_free(ctxp->pool, ctxp->vaddr, ctxp->dma); + kfree(ctxp); } /** diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h index d091b6f5f5dd6..19f0a20e3bb3b 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_req.h +++ b/drivers/crypto/cavium/nitrox/nitrox_req.h @@ -181,12 +181,19 @@ struct flexi_crypto_context { struct auth_keys auth; }; +struct crypto_ctx_hdr { + struct dma_pool *pool; + dma_addr_t dma; + void *vaddr; +}; + struct nitrox_crypto_ctx { struct nitrox_device *ndev; union { u64 ctx_handle; struct flexi_crypto_context *fctx; } u; + struct crypto_ctx_hdr *chdr; }; struct nitrox_kcrypt_request { diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c index 01b82b82f8b87..5852d29ae2dac 100644 --- a/drivers/crypto/ccree/cc_aead.c +++ b/drivers/crypto/ccree/cc_aead.c @@ -540,13 +540,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); - struct rtattr *rta = (struct rtattr *)key; struct cc_crypto_req cc_req = {}; - struct crypto_authenc_key_param *param; struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ]; - int rc = -EINVAL; unsigned int seq_len = 0; struct device *dev = drvdata_to_dev(ctx->drvdata); + const u8 *enckey, *authkey; + int rc; dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n", ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen); @@ -554,35 +553,33 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, /* STAT_PHASE_0: Init and sanity checks */ if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */ - if (!RTA_OK(rta, keylen)) - goto badkey; - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) - goto badkey; - if (RTA_PAYLOAD(rta) < sizeof(*param)) - goto badkey; - param = RTA_DATA(rta); - ctx->enc_keylen = be32_to_cpu(param->enckeylen); - key += RTA_ALIGN(rta->rta_len); - keylen -= RTA_ALIGN(rta->rta_len); - if (keylen < ctx->enc_keylen) + struct crypto_authenc_keys keys; + + rc = crypto_authenc_extractkeys(&keys, key, keylen); + if (rc) goto badkey; - ctx->auth_keylen = keylen - ctx->enc_keylen; + enckey = keys.enckey; + authkey = keys.authkey; + ctx->enc_keylen = keys.enckeylen; + ctx->auth_keylen = keys.authkeylen; if (ctx->cipher_mode == DRV_CIPHER_CTR) { /* the nonce is stored in bytes at end of key */ + rc = -EINVAL; if (ctx->enc_keylen < (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)) goto badkey; /* Copy nonce from last 4 bytes in CTR key to * first 4 bytes in CTR IV */ - memcpy(ctx->ctr_nonce, key + ctx->auth_keylen + - ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE, - CTR_RFC3686_NONCE_SIZE); + memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen - + CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE); /* Set CTR key size */ ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE; } } else { /* non-authenc - has just one key */ + enckey = key; + authkey = NULL; ctx->enc_keylen = keylen; ctx->auth_keylen = 0; } @@ -594,13 +591,14 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, /* STAT_PHASE_1: Copy key to ctx */ /* Get key material */ - memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen); + memcpy(ctx->enckey, enckey, ctx->enc_keylen); if (ctx->enc_keylen == 24) memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24); if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { - memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen); + memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey, + ctx->auth_keylen); } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */ - rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen); + rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen); if (rc) goto badkey; } diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c index 461b97e2f1fdc..1ff8738631a38 100644 --- a/drivers/crypto/chelsio/chcr_ipsec.c +++ b/drivers/crypto/chelsio/chcr_ipsec.c @@ -303,7 +303,10 @@ static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) static inline int is_eth_imm(const struct sk_buff *skb, unsigned int kctx_len) { - int hdrlen = sizeof(struct chcr_ipsec_req) + kctx_len; + int hdrlen; + + hdrlen = sizeof(struct fw_ulptx_wr) + + sizeof(struct chcr_ipsec_req) + kctx_len; hdrlen += sizeof(struct cpl_tx_pkt); if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen) diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c index f7d6d690116ee..cdc4f9a171d98 100644 --- a/drivers/crypto/hisilicon/sec/sec_algs.c +++ b/drivers/crypto/hisilicon/sec/sec_algs.c @@ -732,6 +732,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, int *splits_in_nents; int *splits_out_nents = NULL; struct sec_request_el *el, *temp; + bool split = skreq->src != skreq->dst; mutex_init(&sec_req->lock); sec_req->req_base = &skreq->base; @@ -750,7 +751,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, if (ret) goto err_free_split_sizes; - if (skreq->src != skreq->dst) { + if (split) { sec_req->len_out = sg_nents(skreq->dst); ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps, &splits_out, &splits_out_nents, @@ -785,8 +786,9 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, split_sizes[i], skreq->src != skreq->dst, splits_in[i], splits_in_nents[i], - splits_out[i], - splits_out_nents[i], info); + split ? splits_out[i] : NULL, + split ? splits_out_nents[i] : 0, + info); if (IS_ERR(el)) { ret = PTR_ERR(el); goto err_free_elements; @@ -806,13 +808,6 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, * more refined but this is unlikely to happen so no need. */ - /* Cleanup - all elements in pointer arrays have been coppied */ - kfree(splits_in_nents); - kfree(splits_in); - kfree(splits_out_nents); - kfree(splits_out); - kfree(split_sizes); - /* Grab a big lock for a long time to avoid concurrency issues */ mutex_lock(&queue->queuelock); @@ -827,13 +822,13 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, (!queue->havesoftqueue || kfifo_avail(&queue->softqueue) > steps)) || !list_empty(&ctx->backlog)) { + ret = -EBUSY; if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { list_add_tail(&sec_req->backlog_head, &ctx->backlog); mutex_unlock(&queue->queuelock); - return -EBUSY; + goto out; } - ret = -EBUSY; mutex_unlock(&queue->queuelock); goto err_free_elements; } @@ -842,7 +837,15 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, if (ret) goto err_free_elements; - return -EINPROGRESS; + ret = -EINPROGRESS; +out: + /* Cleanup - all elements in pointer arrays have been copied */ + kfree(splits_in_nents); + kfree(splits_in); + kfree(splits_out_nents); + kfree(splits_out); + kfree(split_sizes); + return ret; err_free_elements: list_for_each_entry_safe(el, temp, &sec_req->elements, head) { @@ -854,7 +857,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, crypto_skcipher_ivsize(atfm), DMA_BIDIRECTIONAL); err_unmap_out_sg: - if (skreq->src != skreq->dst) + if (split) sec_unmap_sg_on_err(skreq->dst, steps, splits_out, splits_out_nents, sec_req->len_out, info->dev); diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 6988012deca4c..f4f3e9a5851e9 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1361,23 +1361,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, struct talitos_private *priv = dev_get_drvdata(dev); bool is_sec1 = has_ftr_sec1(priv); int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; - void *err; if (cryptlen + authsize > max_len) { dev_err(dev, "length exceeds h/w max limit\n"); return ERR_PTR(-EINVAL); } - if (ivsize) - iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); - if (!dst || dst == src) { src_len = assoclen + cryptlen + authsize; src_nents = sg_nents_for_len(src, src_len); if (src_nents < 0) { dev_err(dev, "Invalid number of src SG.\n"); - err = ERR_PTR(-EINVAL); - goto error_sg; + return ERR_PTR(-EINVAL); } src_nents = (src_nents == 1) ? 0 : src_nents; dst_nents = dst ? src_nents : 0; @@ -1387,16 +1382,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, src_nents = sg_nents_for_len(src, src_len); if (src_nents < 0) { dev_err(dev, "Invalid number of src SG.\n"); - err = ERR_PTR(-EINVAL); - goto error_sg; + return ERR_PTR(-EINVAL); } src_nents = (src_nents == 1) ? 0 : src_nents; dst_len = assoclen + cryptlen + (encrypt ? authsize : 0); dst_nents = sg_nents_for_len(dst, dst_len); if (dst_nents < 0) { dev_err(dev, "Invalid number of dst SG.\n"); - err = ERR_PTR(-EINVAL); - goto error_sg; + return ERR_PTR(-EINVAL); } dst_nents = (dst_nents == 1) ? 0 : dst_nents; } @@ -1423,11 +1416,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, /* if its a ahash, add space for a second desc next to the first one */ if (is_sec1 && !dst) alloc_len += sizeof(struct talitos_desc); + alloc_len += ivsize; edesc = kmalloc(alloc_len, GFP_DMA | flags); - if (!edesc) { - err = ERR_PTR(-ENOMEM); - goto error_sg; + if (!edesc) + return ERR_PTR(-ENOMEM); + if (ivsize) { + iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize); + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); } memset(&edesc->desc, 0, sizeof(edesc->desc)); @@ -1445,10 +1441,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, DMA_BIDIRECTIONAL); } return edesc; -error_sg: - if (iv_dma) - dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); - return err; } static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c index 99e2aace8078c..2c1f459c0c63a 100644 --- a/drivers/dax/pmem.c +++ b/drivers/dax/pmem.c @@ -48,9 +48,8 @@ static void dax_pmem_percpu_exit(void *data) percpu_ref_exit(ref); } -static void dax_pmem_percpu_kill(void *data) +static void dax_pmem_percpu_kill(struct percpu_ref *ref) { - struct percpu_ref *ref = data; struct dax_pmem *dax_pmem = to_dax_pmem(ref); dev_dbg(dax_pmem->dev, "trace\n"); @@ -112,17 +111,10 @@ static int dax_pmem_probe(struct device *dev) } dax_pmem->pgmap.ref = &dax_pmem->ref; + dax_pmem->pgmap.kill = dax_pmem_percpu_kill; addr = devm_memremap_pages(dev, &dax_pmem->pgmap); - if (IS_ERR(addr)) { - devm_remove_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref); - percpu_ref_exit(&dax_pmem->ref); + if (IS_ERR(addr)) return PTR_ERR(addr); - } - - rc = devm_add_action_or_reset(dev, dax_pmem_percpu_kill, - &dax_pmem->ref); - if (rc) - return rc; /* adjust the dax_region resource to the start of data */ memcpy(&res, &dax_pmem->pgmap.res, sizeof(res)); diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index ed3b785bae37e..09ccac1768e35 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig @@ -30,4 +30,6 @@ config SW_SYNC WARNING: improper use of this can result in deadlocking kernel drivers from userspace. Intended for test and debug only. +source "drivers/dma-buf/hyper_dmabuf/Kconfig" + endmenu diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index c33bf88631479..3f15a841502e4 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -1,3 +1,4 @@ obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o obj-$(CONFIG_SYNC_FILE) += sync_file.o obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o +obj-$(CONFIG_HYPER_DMABUF) += hyper_dmabuf/ diff --git a/drivers/dma-buf/hyper_dmabuf/Kconfig b/drivers/dma-buf/hyper_dmabuf/Kconfig new file mode 100644 index 0000000000000..17ea5b8323dad --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/Kconfig @@ -0,0 +1,75 @@ +menuconfig HYPER_DMABUF + bool "configure HyperDMABUF driver" + default y + depends on X86_64 + +if HYPER_DMABUF + +choice + prompt "Hypervisor" + depends on HYPER_DMABUF + default HYPER_DMABUF_XEN + +config HYPER_DMABUF_XEN + bool "XEN" + depends on XEN + help + Configuring hyper_dmabuf driver for XEN hypervisor + +config HYPER_DMABUF_ACRN + bool "ACRN" + depends on ACRN_VIRTIO_DEVICES + select VIRTIO + help + Configuring hyper_dmabuf driver for ACRN hypervisor +endchoice + +choice + prompt "Virtio driver type" + depends on HYPER_DMABUF_ACRN + default HYPER_DMABUF_VIRTIO_BE + +config HYPER_DMABUF_VIRTIO_BE + depends on VBS && DRM_I915_GVT + bool "virtio backend (SOS)" + help + Configuring hyper_dmabuf driver as virtio backend + running from service OS + +config HYPER_DMABUF_VIRTIO_FE + depends on ACRN_VIRTIO_DEVICES + bool "virtio frontend (UOS)" + help + Configuring hyper_dmabuf driver as virtio frontend + running from guest OS +endchoice + +config HYPER_DMABUF_SYSFS + bool "Enable sysfs information about hyper DMA buffers" + default y + depends on HYPER_DMABUF + help + Expose information about imported and exported buffers using + hyper_dmabuf driver + +config HYPER_DMABUF_EVENT_GEN + bool "Enable event-generation and polling operation" + default n + depends on HYPER_DMABUF + help + With this config enabled, hyper_dmabuf driver on the importer side + generates events and queue those up in the event list whenever a new + shared DMA-BUF is available. Events in the list can be retrieved by + read operation. + +config HYPER_DMABUF_XEN_AUTO_RX_CH_ADD + bool "Enable automatic rx-ch add with 10 secs interval" + default y + depends on HYPER_DMABUF && HYPER_DMABUF_XEN + help + If enabled, driver reads a node in xenstore every 10 seconds + to check whether there is any tx comm ch configured by another + domain then initialize matched rx comm ch automatically for any + existing tx comm chs. + +endif diff --git a/drivers/dma-buf/hyper_dmabuf/Makefile b/drivers/dma-buf/hyper_dmabuf/Makefile new file mode 100644 index 0000000000000..4ad8dc70234b6 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/Makefile @@ -0,0 +1,30 @@ +obj-y := hyper_dmabuf_drv.o \ + hyper_dmabuf_ioctl.o \ + hyper_dmabuf_list.o \ + hyper_dmabuf_sgl_proc.o \ + hyper_dmabuf_ops.o \ + hyper_dmabuf_msg.o \ + hyper_dmabuf_id.o \ + hyper_dmabuf_remote_sync.o \ + hyper_dmabuf_query.o \ + +ifeq ($(CONFIG_HYPER_DMABUF_EVENT_GEN), y) + obj-y += hyper_dmabuf_event.o +endif + +ifeq ($(CONFIG_HYPER_DMABUF_XEN), y) + obj-y += xen/hyper_dmabuf_xen_comm.o \ + xen/hyper_dmabuf_xen_comm_list.o \ + xen/hyper_dmabuf_xen_shm.o \ + xen/hyper_dmabuf_xen_drv.o +else ifeq ($(CONFIG_HYPER_DMABUF_ACRN), y) + ifeq ($(CONFIG_HYPER_DMABUF_VIRTIO_BE), y) + obj-y += virtio/hyper_dmabuf_virtio_be_drv.o \ + virtio/hyper_dmabuf_virtio_fe_list.o + else + obj-y += virtio/hyper_dmabuf_virtio_fe_drv.o + endif + obj-y += virtio/hyper_dmabuf_virtio_common.o \ + virtio/hyper_dmabuf_virtio_shm.o \ + virtio/hyper_dmabuf_virtio_comm_ring.o +endif diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.c new file mode 100644 index 0000000000000..f1afce29d6aff --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.c @@ -0,0 +1,411 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_ioctl.h" +#include "hyper_dmabuf_list.h" +#include "hyper_dmabuf_id.h" +#include "hyper_dmabuf_event.h" + +#ifdef CONFIG_HYPER_DMABUF_XEN +#include "xen/hyper_dmabuf_xen_drv.h" +#elif defined (CONFIG_HYPER_DMABUF_ACRN) +#include "virtio/hyper_dmabuf_virtio_common.h" +#endif + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Intel Corporation"); + +struct hyper_dmabuf_private *hy_drv_priv; + +static void force_free(struct exported_sgt_info *exported, + void *attr) +{ + struct ioctl_hyper_dmabuf_unexport unexport_attr; + struct file *filp = (struct file *)attr; + + if (!filp || !exported) + return; + + if (exported->filp == filp) { + dev_dbg(hy_drv_priv->dev, + "Forcefully releasing buffer {id:%d key:%d %d %d}\n", + exported->hid.id, exported->hid.rng_key[0], + exported->hid.rng_key[1], exported->hid.rng_key[2]); + + unexport_attr.hid = exported->hid; + unexport_attr.delay_ms = 0; + + hyper_dmabuf_unexport_ioctl(filp, &unexport_attr); + } +} + +static int hyper_dmabuf_open(struct inode *inode, struct file *filp) +{ + int ret = 0; + + /* Do not allow exclusive open */ + if (filp->f_flags & O_EXCL) + return -EBUSY; + + return ret; +} + +static int hyper_dmabuf_release(struct inode *inode, struct file *filp) +{ + hyper_dmabuf_foreach_exported(force_free, filp); + + return 0; +} + +#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN + +static unsigned int hyper_dmabuf_event_poll(struct file *filp, + struct poll_table_struct *wait) +{ + poll_wait(filp, &hy_drv_priv->event_wait, wait); + + if (!list_empty(&hy_drv_priv->event_list)) + return POLLIN | POLLRDNORM; + + return 0; +} + +static ssize_t hyper_dmabuf_event_read(struct file *filp, char __user *buffer, + size_t count, loff_t *offset) +{ + int ret; + + /* only root can read events */ + if (!capable(CAP_DAC_OVERRIDE)) { + dev_err(hy_drv_priv->dev, + "Only root can read events\n"); + return -EPERM; + } + + /* make sure user buffer can be written */ + if (!access_ok(VERIFY_WRITE, buffer, count)) { + dev_err(hy_drv_priv->dev, + "User buffer can't be written.\n"); + return -EINVAL; + } + + ret = mutex_lock_interruptible(&hy_drv_priv->event_read_lock); + if (ret) + return ret; + + while (1) { + struct hyper_dmabuf_event *e = NULL; + + spin_lock_irq(&hy_drv_priv->event_lock); + if (!list_empty(&hy_drv_priv->event_list)) { + e = list_first_entry(&hy_drv_priv->event_list, + struct hyper_dmabuf_event, link); + list_del(&e->link); + } + spin_unlock_irq(&hy_drv_priv->event_lock); + + if (!e) { + if (ret) + break; + + if (filp->f_flags & O_NONBLOCK) { + ret = -EAGAIN; + break; + } + + mutex_unlock(&hy_drv_priv->event_read_lock); + ret = wait_event_interruptible(hy_drv_priv->event_wait, + !list_empty(&hy_drv_priv->event_list)); + + if (ret == 0) + ret = mutex_lock_interruptible( + &hy_drv_priv->event_read_lock); + + if (ret) + return ret; + } else { + unsigned int length = (sizeof(e->event_data.hdr) + + e->event_data.hdr.size); + + if (length > count - ret) { +put_back_event: + spin_lock_irq(&hy_drv_priv->event_lock); + list_add(&e->link, &hy_drv_priv->event_list); + spin_unlock_irq(&hy_drv_priv->event_lock); + break; + } + + if (copy_to_user(buffer + ret, &e->event_data.hdr, + sizeof(e->event_data.hdr))) { + if (ret == 0) + ret = -EFAULT; + + goto put_back_event; + } + + ret += sizeof(e->event_data.hdr); + + if (copy_to_user(buffer + ret, e->event_data.data, + e->event_data.hdr.size)) { + /* error while copying void *data */ + + struct hyper_dmabuf_event_hdr dummy_hdr = {0}; + + ret -= sizeof(e->event_data.hdr); + + /* nullifying hdr of the event in user buffer */ + if (copy_to_user(buffer + ret, &dummy_hdr, + sizeof(dummy_hdr))) { + dev_err(hy_drv_priv->dev, + "failed to nullify invalid hdr already in userspace\n"); + } + + ret = -EFAULT; + + goto put_back_event; + } + + ret += e->event_data.hdr.size; + hy_drv_priv->pending--; + kfree(e); + } + } + + mutex_unlock(&hy_drv_priv->event_read_lock); + + return ret; +} + +#endif + +static const struct file_operations hyper_dmabuf_driver_fops = { + .owner = THIS_MODULE, + .open = hyper_dmabuf_open, + .release = hyper_dmabuf_release, + +/* poll and read interfaces are needed only for event-polling */ +#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN + .read = hyper_dmabuf_event_read, + .poll = hyper_dmabuf_event_poll, +#endif + + .unlocked_ioctl = hyper_dmabuf_ioctl, +}; + +static struct miscdevice hyper_dmabuf_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "hyper_dmabuf", + .fops = &hyper_dmabuf_driver_fops, +}; + +static int register_device(void) +{ + int ret = 0; + + ret = misc_register(&hyper_dmabuf_miscdev); + + if (ret) { + printk(KERN_ERR "hyper_dmabuf: driver can't be registered\n"); + return ret; + } + + hy_drv_priv->dev = hyper_dmabuf_miscdev.this_device; + + /* TODO: Check if there is a different way to initialize dma mask */ + dma_coerce_mask_and_coherent(hy_drv_priv->dev, DMA_BIT_MASK(64)); + + return ret; +} + +static void unregister_device(void) +{ + dev_info(hy_drv_priv->dev, + "hyper_dmabuf: unregister_device() is called\n"); + + misc_deregister(&hyper_dmabuf_miscdev); +} + +static int __init hyper_dmabuf_drv_init(void) +{ + int ret = 0; + + printk(KERN_NOTICE "hyper_dmabuf_starting: Initialization started\n"); + + hy_drv_priv = kcalloc(1, sizeof(struct hyper_dmabuf_private), + GFP_KERNEL); + + if (!hy_drv_priv) + return -ENOMEM; + + ret = register_device(); + if (ret < 0) + return ret; + +/* currently only supports XEN hypervisor */ +#ifdef CONFIG_HYPER_DMABUF_XEN + hy_drv_priv->bknd_ops = &xen_bknd_ops; +#elif defined (CONFIG_HYPER_DMABUF_ACRN) + hy_drv_priv->bknd_ops = &virtio_bknd_ops; +#else + hy_drv_priv->bknd_ops = NULL; + printk(KERN_ERR "No backend configured for hyper_dmabuf in kernel config\n"); +#endif + + if (hy_drv_priv->bknd_ops == NULL) { + printk(KERN_ERR "Hyper_dmabuf: no backend found\n"); + return -1; + } + + mutex_init(&hy_drv_priv->lock); + + mutex_lock(&hy_drv_priv->lock); + + hy_drv_priv->initialized = false; + + dev_info(hy_drv_priv->dev, + "initializing database for imported/exported dmabufs\n"); + + hy_drv_priv->work_queue = create_workqueue("hyper_dmabuf_wqueue"); + + ret = hyper_dmabuf_table_init(); + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "fail to init table for exported/imported entries\n"); + mutex_unlock(&hy_drv_priv->lock); + kfree(hy_drv_priv); + return ret; + } + +#ifdef CONFIG_HYPER_DMABUF_SYSFS + ret = hyper_dmabuf_register_sysfs(hy_drv_priv->dev); + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "failed to initialize sysfs\n"); + mutex_unlock(&hy_drv_priv->lock); + kfree(hy_drv_priv); + return ret; + } +#endif + +#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN + mutex_init(&hy_drv_priv->event_read_lock); + spin_lock_init(&hy_drv_priv->event_lock); + + /* Initialize event queue */ + INIT_LIST_HEAD(&hy_drv_priv->event_list); + init_waitqueue_head(&hy_drv_priv->event_wait); + + /* resetting number of pending events */ + hy_drv_priv->pending = 0; +#endif + + if (hy_drv_priv->bknd_ops->init) { + ret = hy_drv_priv->bknd_ops->init(); + + if (ret < 0) { + dev_dbg(hy_drv_priv->dev, + "failed to initialize backend.\n"); + return ret; + } + } + + hy_drv_priv->domid = hy_drv_priv->bknd_ops->get_vm_id(); + + hy_drv_priv->initialized = true; + if (hy_drv_priv->bknd_ops->init_comm_env) { + ret = hy_drv_priv->bknd_ops->init_comm_env(); + if (ret < 0) { + hy_drv_priv->initialized = false; + dev_dbg(hy_drv_priv->dev, + "failed to initialize comm-env.\n"); + } + } + + mutex_unlock(&hy_drv_priv->lock); + + dev_info(hy_drv_priv->dev, + "Finishing up initialization of hyper_dmabuf drv\n"); + + /* interrupt for comm should be registered here: */ + return ret; +} + +static void hyper_dmabuf_drv_exit(void) +{ +#ifdef CONFIG_HYPER_DMABUF_SYSFS + hyper_dmabuf_unregister_sysfs(hy_drv_priv->dev); +#endif + + mutex_lock(&hy_drv_priv->lock); + + /* hash tables for export/import entries and ring_infos */ + hyper_dmabuf_table_destroy(); + + if (hy_drv_priv->bknd_ops->destroy_comm) { + hy_drv_priv->bknd_ops->destroy_comm(); + } + + if (hy_drv_priv->bknd_ops->cleanup) { + hy_drv_priv->bknd_ops->cleanup(); + }; + + /* destroy workqueue */ + if (hy_drv_priv->work_queue) + destroy_workqueue(hy_drv_priv->work_queue); + + /* destroy id_queue */ + if (hy_drv_priv->id_queue) + hyper_dmabuf_free_hid_list(); + +#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN + /* clean up event queue */ + hyper_dmabuf_events_release(); +#endif + + mutex_unlock(&hy_drv_priv->lock); + + dev_info(hy_drv_priv->dev, + "hyper_dmabuf driver: Exiting\n"); + + kfree(hy_drv_priv); + + unregister_device(); +} + +module_init(hyper_dmabuf_drv_init); +module_exit(hyper_dmabuf_drv_exit); diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.h new file mode 100644 index 0000000000000..ad4839b9c0f2f --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.h @@ -0,0 +1,118 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__ +#define __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__ + +#include +#include + +struct hyper_dmabuf_req; + +struct hyper_dmabuf_event { + struct hyper_dmabuf_event_data event_data; + struct list_head link; +}; + +struct hyper_dmabuf_private { + struct device *dev; + + /* VM(domain) id of current VM instance */ + int domid; + + /* workqueue dedicated to hyper_dmabuf driver */ + struct workqueue_struct *work_queue; + + /* list of reusable hyper_dmabuf_ids */ + struct list_reusable_id *id_queue; + + /* backend ops - hypervisor specific */ + struct hyper_dmabuf_bknd_ops *bknd_ops; + + /* device global lock */ + /* TODO: might need a lock per resource (e.g. EXPORT LIST) */ + struct mutex lock; + + /* flag that shows whether backend is initialized */ + bool initialized; + + wait_queue_head_t event_wait; + struct list_head event_list; + + spinlock_t event_lock; + struct mutex event_read_lock; + + /* # of pending events */ + int pending; +}; + +struct list_reusable_id { + hyper_dmabuf_id_t hid; + struct list_head list; +}; + +struct hyper_dmabuf_bknd_ops { + /* backend initialization routine (optional) */ + int (*init)(void); + + /* backend cleanup routine (optional) */ + void (*cleanup)(void); + + /* retreiving id of current virtual machine */ + int (*get_vm_id)(void); + + /* get pages shared via hypervisor-specific method */ + long (*share_pages)(struct page **, int, int, void **); + + /* make shared pages unshared via hypervisor specific method */ + int (*unshare_pages)(void **, int); + + /* map remotely shared pages on importer's side via + * hypervisor-specific method + */ + struct page ** (*map_shared_pages)(unsigned long, int, int, void **); + + /* unmap and free shared pages on importer's side via + * hypervisor-specific method + */ + int (*unmap_shared_pages)(void **, int); + + /* initialize communication environment */ + int (*init_comm_env)(void); + + void (*destroy_comm)(void); + + /* upstream ch setup (receiving and responding) */ + int (*init_rx_ch)(int); + + /* downstream ch setup (transmitting and parsing responses) */ + int (*init_tx_ch)(int); + + int (*send_req)(int, struct hyper_dmabuf_req *, int); +}; + +/* exporting global drv private info */ +extern struct hyper_dmabuf_private *hy_drv_priv; + +#endif /* __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__ */ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.c new file mode 100644 index 0000000000000..392ea99e07842 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.c @@ -0,0 +1,122 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_struct.h" +#include "hyper_dmabuf_list.h" +#include "hyper_dmabuf_event.h" + +static void send_event(struct hyper_dmabuf_event *e) +{ + struct hyper_dmabuf_event *oldest; + unsigned long irqflags; + + spin_lock_irqsave(&hy_drv_priv->event_lock, irqflags); + + /* check current number of event then if it hits the max num allowed + * then remove the oldest event in the list + */ + if (hy_drv_priv->pending > MAX_DEPTH_EVENT_QUEUE - 1) { + oldest = list_first_entry(&hy_drv_priv->event_list, + struct hyper_dmabuf_event, link); + list_del(&oldest->link); + hy_drv_priv->pending--; + kfree(oldest); + } + + list_add_tail(&e->link, + &hy_drv_priv->event_list); + + hy_drv_priv->pending++; + + wake_up_interruptible(&hy_drv_priv->event_wait); + + spin_unlock_irqrestore(&hy_drv_priv->event_lock, irqflags); +} + +void hyper_dmabuf_events_release(void) +{ + struct hyper_dmabuf_event *e, *et; + unsigned long irqflags; + + spin_lock_irqsave(&hy_drv_priv->event_lock, irqflags); + + list_for_each_entry_safe(e, et, &hy_drv_priv->event_list, + link) { + list_del(&e->link); + kfree(e); + hy_drv_priv->pending--; + } + + if (hy_drv_priv->pending) { + dev_err(hy_drv_priv->dev, + "possible leak on event_list\n"); + } + + spin_unlock_irqrestore(&hy_drv_priv->event_lock, irqflags); +} + +int hyper_dmabuf_import_event(hyper_dmabuf_id_t hid) +{ + struct hyper_dmabuf_event *e; + struct imported_sgt_info *imported; + + imported = hyper_dmabuf_find_imported(hid); + + if (!imported) { + dev_err(hy_drv_priv->dev, + "can't find imported_sgt_info in the list\n"); + return -EINVAL; + } + + e = kzalloc(sizeof(*e), GFP_KERNEL); + + if (!e) + return -ENOMEM; + + e->event_data.hdr.event_type = HYPER_DMABUF_NEW_IMPORT; + e->event_data.hdr.hid = hid; + e->event_data.data = (void *)imported->priv; + e->event_data.hdr.size = imported->sz_priv; + + send_event(e); + + dev_dbg(hy_drv_priv->dev, + "event number = %d :", hy_drv_priv->pending); + + dev_dbg(hy_drv_priv->dev, + "generating events for {%d, %d, %d, %d}\n", + imported->hid.id, imported->hid.rng_key[0], + imported->hid.rng_key[1], imported->hid.rng_key[2]); + + return 0; +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.h new file mode 100644 index 0000000000000..50db04faf2225 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.h @@ -0,0 +1,38 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_EVENT_H__ +#define __HYPER_DMABUF_EVENT_H__ + +#define MAX_DEPTH_EVENT_QUEUE 32 + +enum hyper_dmabuf_event_type { + HYPER_DMABUF_NEW_IMPORT = 0x10000, +}; + +void hyper_dmabuf_events_release(void); + +int hyper_dmabuf_import_event(hyper_dmabuf_id_t hid); + +#endif /* __HYPER_DMABUF_EVENT_H__ */ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.c new file mode 100644 index 0000000000000..e67b84a7e64c3 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.c @@ -0,0 +1,133 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_id.h" + +void hyper_dmabuf_store_hid(hyper_dmabuf_id_t hid) +{ + struct list_reusable_id *reusable_head = hy_drv_priv->id_queue; + struct list_reusable_id *new_reusable; + + new_reusable = kmalloc(sizeof(*new_reusable), GFP_KERNEL); + + if (!new_reusable) + return; + + new_reusable->hid = hid; + + list_add(&new_reusable->list, &reusable_head->list); +} + +static hyper_dmabuf_id_t get_reusable_hid(void) +{ + struct list_reusable_id *reusable_head = hy_drv_priv->id_queue; + hyper_dmabuf_id_t hid = {-1, {0, 0, 0} }; + + /* check there is reusable id */ + if (!list_empty(&reusable_head->list)) { + reusable_head = list_first_entry(&reusable_head->list, + struct list_reusable_id, + list); + + list_del(&reusable_head->list); + hid = reusable_head->hid; + kfree(reusable_head); + } + + return hid; +} + +void hyper_dmabuf_free_hid_list(void) +{ + struct list_reusable_id *reusable_head = hy_drv_priv->id_queue; + struct list_reusable_id *temp_head; + + if (reusable_head) { + /* freeing mem space all reusable ids in the stack */ + while (!list_empty(&reusable_head->list)) { + temp_head = list_first_entry(&reusable_head->list, + struct list_reusable_id, + list); + list_del(&temp_head->list); + kfree(temp_head); + } + + /* freeing head */ + kfree(reusable_head); + } +} + +hyper_dmabuf_id_t hyper_dmabuf_get_hid(void) +{ + static int count; + hyper_dmabuf_id_t hid; + struct list_reusable_id *reusable_head; + + /* first call to hyper_dmabuf_get_id */ + if (count == 0) { + reusable_head = kmalloc(sizeof(*reusable_head), GFP_KERNEL); + + if (!reusable_head) + return (hyper_dmabuf_id_t){-1, {0, 0, 0} }; + + /* list head has an invalid count */ + reusable_head->hid.id = -1; + INIT_LIST_HEAD(&reusable_head->list); + hy_drv_priv->id_queue = reusable_head; + } + + hid = get_reusable_hid(); + + /*creating a new H-ID only if nothing in the reusable id queue + * and count is less than maximum allowed + */ + if (hid.id == -1 && count < HYPER_DMABUF_ID_MAX) + hid.id = HYPER_DMABUF_ID_CREATE(hy_drv_priv->domid, count++); + + /* random data embedded in the id for security */ + get_random_bytes(&hid.rng_key[0], 12); + + return hid; +} + +bool hyper_dmabuf_hid_keycomp(hyper_dmabuf_id_t hid1, hyper_dmabuf_id_t hid2) +{ + int i; + + /* compare keys */ + for (i = 0; i < 3; i++) { + if (hid1.rng_key[i] != hid2.rng_key[i]) + return false; + } + + return true; +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.h new file mode 100644 index 0000000000000..ed690f3a478cf --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.h @@ -0,0 +1,51 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_ID_H__ +#define __HYPER_DMABUF_ID_H__ + +#define HYPER_DMABUF_ID_CREATE(domid, cnt) \ + ((((domid) & 0xFF) << 24) | ((cnt) & 0xFFFFFF)) + +#define HYPER_DMABUF_DOM_ID(hid) \ + (((hid.id) >> 24) & 0xFF) + +/* currently maximum number of buffers shared + * at any given moment is limited to 1000 + */ +#define HYPER_DMABUF_ID_MAX 1000 + +/* adding freed hid to the reusable list */ +void hyper_dmabuf_store_hid(hyper_dmabuf_id_t hid); + +/* freeing the reusasble list */ +void hyper_dmabuf_free_hid_list(void); + +/* getting a hid available to use. */ +hyper_dmabuf_id_t hyper_dmabuf_get_hid(void); + +/* comparing two different hid */ +bool hyper_dmabuf_hid_keycomp(hyper_dmabuf_id_t hid1, hyper_dmabuf_id_t hid2); + +#endif /*__HYPER_DMABUF_ID_H*/ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c new file mode 100644 index 0000000000000..8fc3a56ffee69 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c @@ -0,0 +1,810 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_id.h" +#include "hyper_dmabuf_struct.h" +#include "hyper_dmabuf_ioctl.h" +#include "hyper_dmabuf_list.h" +#include "hyper_dmabuf_msg.h" +#include "hyper_dmabuf_sgl_proc.h" +#include "hyper_dmabuf_ops.h" +#include "hyper_dmabuf_query.h" + +static int hyper_dmabuf_tx_ch_setup_ioctl(struct file *filp, void *data) +{ + struct ioctl_hyper_dmabuf_tx_ch_setup *tx_ch_attr; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + int ret = 0; + + if (!data) { + dev_err(hy_drv_priv->dev, "user data is NULL\n"); + return -EINVAL; + } + tx_ch_attr = (struct ioctl_hyper_dmabuf_tx_ch_setup *)data; + + if (bknd_ops->init_tx_ch) { + ret = bknd_ops->init_tx_ch(tx_ch_attr->remote_domain); + } + + return ret; +} + +static int hyper_dmabuf_rx_ch_setup_ioctl(struct file *filp, void *data) +{ + struct ioctl_hyper_dmabuf_rx_ch_setup *rx_ch_attr; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + int ret = 0; + + if (!data) { + dev_err(hy_drv_priv->dev, "user data is NULL\n"); + return -EINVAL; + } + + rx_ch_attr = (struct ioctl_hyper_dmabuf_rx_ch_setup *)data; + + if (bknd_ops->init_rx_ch) + ret = bknd_ops->init_rx_ch(rx_ch_attr->source_domain); + + return ret; +} + +static int send_export_msg(struct exported_sgt_info *exported, + struct pages_info *pg_info) +{ + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + struct hyper_dmabuf_req *req; + int op[MAX_NUMBER_OF_OPERANDS] = {0}; + int ret, i; + long tmp; + + /* now create request for importer via ring */ + op[0] = exported->hid.id; + + for (i = 0; i < 3; i++) + op[i+1] = exported->hid.rng_key[i]; + + if (pg_info) { + op[4] = pg_info->nents; + op[5] = pg_info->frst_ofst; + op[6] = pg_info->last_len; + tmp = bknd_ops->share_pages(pg_info->pgs, exported->rdomid, + pg_info->nents, &exported->refs_info); + if (tmp < 0) { + dev_err(hy_drv_priv->dev, "pages sharing failed\n"); + return tmp; + } + op[7] = tmp & 0xffffffff; + op[8] = (tmp >> 32) & 0xffffffff; + } + + op[9] = exported->sz_priv; + + /* driver/application specific private info */ + memcpy(&op[10], exported->priv, op[9]); + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + + if (!req) + return -ENOMEM; + + /* composing a message to the importer */ + hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT, &op[0]); + + ret = bknd_ops->send_req(exported->rdomid, req, true); + + kfree(req); + + return ret; +} + +/* Fast path exporting routine in case same buffer is already exported. + * In this function, we skip normal exporting process and just update + * private data on both VMs (importer and exporter) + * + * return '1' if reexport is needed, return '0' if succeeds, return + * Kernel error code if something goes wrong + */ +static int fastpath_export(hyper_dmabuf_id_t hid, int sz_priv, char *priv) +{ + int reexport = 1; + int ret = 0; + struct exported_sgt_info *exported; + + exported = hyper_dmabuf_find_exported(hid); + + if (!exported) + return reexport; + + if (exported->valid == false) + return reexport; + + /* + * Check if unexport is already scheduled for that buffer, + * if so try to cancel it. If that will fail, buffer needs + * to be reexport once again. + */ + if (exported->unexport_sched) { + if (!cancel_delayed_work_sync(&exported->unexport)) + return reexport; + + exported->unexport_sched = false; + } + + /* if there's any change in size of private data. + * we reallocate space for private data with new size + */ + if (sz_priv != exported->sz_priv) { + kfree(exported->priv); + + /* truncating size */ + if (sz_priv > MAX_SIZE_PRIV_DATA) + exported->sz_priv = MAX_SIZE_PRIV_DATA; + else + exported->sz_priv = sz_priv; + + exported->priv = kcalloc(1, exported->sz_priv, + GFP_KERNEL); + + if (!exported->priv) { + hyper_dmabuf_remove_exported(exported->hid); + hyper_dmabuf_cleanup_sgt_info(exported, true); + kfree(exported); + return -ENOMEM; + } + } + + /* update private data in sgt_info with new ones */ + ret = copy_from_user(exported->priv, priv, exported->sz_priv); + if (ret) { + dev_err(hy_drv_priv->dev, + "Failed to load a new private data\n"); + ret = -EINVAL; + } else { + /* send an export msg for updating priv in importer */ + ret = send_export_msg(exported, NULL); + + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "Failed to send a new private data\n"); + ret = -EBUSY; + } + } + + return ret; +} + +static int hyper_dmabuf_export_remote_ioctl(struct file *filp, void *data) +{ + struct ioctl_hyper_dmabuf_export_remote *export_remote_attr = + (struct ioctl_hyper_dmabuf_export_remote *)data; + struct dma_buf *dma_buf; + struct dma_buf_attachment *attachment; + struct sg_table *sgt; + struct pages_info *pg_info; + struct exported_sgt_info *exported; + hyper_dmabuf_id_t hid; + int ret = 0; + + if (hy_drv_priv->domid == export_remote_attr->remote_domain) { + dev_err(hy_drv_priv->dev, + "exporting to the same VM is not permitted\n"); + return -EINVAL; + } + + dma_buf = dma_buf_get(export_remote_attr->dmabuf_fd); + + if (IS_ERR(dma_buf)) { + dev_err(hy_drv_priv->dev, "Cannot get dma buf\n"); + return PTR_ERR(dma_buf); + } + + /* we check if this specific attachment was already exported + * to the same domain and if yes and it's valid sgt_info, + * it returns hyper_dmabuf_id of pre-exported sgt_info + */ + hid = hyper_dmabuf_find_hid_exported(dma_buf, + export_remote_attr->remote_domain); + + if (hid.id != -1) { + ret = fastpath_export(hid, export_remote_attr->sz_priv, + export_remote_attr->priv); + + /* return if fastpath_export succeeds or + * gets some fatal error + */ + if (ret <= 0) { + dma_buf_put(dma_buf); + export_remote_attr->hid = hid; + return ret; + } + } + + attachment = dma_buf_attach(dma_buf, hy_drv_priv->dev); + if (IS_ERR(attachment)) { + dev_err(hy_drv_priv->dev, "cannot get attachment\n"); + ret = PTR_ERR(attachment); + goto fail_attach; + } + + sgt = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL); + + if (IS_ERR(sgt)) { + dev_err(hy_drv_priv->dev, "cannot map attachment\n"); + ret = PTR_ERR(sgt); + goto fail_map_attachment; + } + + exported = kcalloc(1, sizeof(*exported), GFP_KERNEL); + + if (!exported) { + ret = -ENOMEM; + goto fail_sgt_info_creation; + } + + /* possible truncation */ + if (export_remote_attr->sz_priv > MAX_SIZE_PRIV_DATA) + exported->sz_priv = MAX_SIZE_PRIV_DATA; + else + exported->sz_priv = export_remote_attr->sz_priv; + + /* creating buffer for private data of buffer */ + if (exported->sz_priv != 0) { + exported->priv = kcalloc(1, exported->sz_priv, GFP_KERNEL); + + if (!exported->priv) { + ret = -ENOMEM; + goto fail_priv_creation; + } + } else { + dev_err(hy_drv_priv->dev, "size is 0\n"); + } + + exported->hid = hyper_dmabuf_get_hid(); + + /* no more exported dmabuf allowed */ + if (exported->hid.id == -1) { + dev_err(hy_drv_priv->dev, + "exceeds allowed number of dmabuf to be exported\n"); + ret = -ENOMEM; + goto fail_sgt_info_creation; + } + + exported->rdomid = export_remote_attr->remote_domain; + exported->dma_buf = dma_buf; + exported->valid = true; + + exported->active_sgts = kmalloc(sizeof(struct sgt_list), GFP_KERNEL); + if (!exported->active_sgts) { + ret = -ENOMEM; + goto fail_map_active_sgts; + } + + exported->active_attached = kmalloc(sizeof(struct attachment_list), + GFP_KERNEL); + if (!exported->active_attached) { + ret = -ENOMEM; + goto fail_map_active_attached; + } + + exported->va_kmapped = kmalloc(sizeof(struct kmap_vaddr_list), + GFP_KERNEL); + if (!exported->va_kmapped) { + ret = -ENOMEM; + goto fail_map_va_kmapped; + } + + exported->va_vmapped = kmalloc(sizeof(struct vmap_vaddr_list), + GFP_KERNEL); + if (!exported->va_vmapped) { + ret = -ENOMEM; + goto fail_map_va_vmapped; + } + + exported->active_sgts->sgt = sgt; + exported->active_attached->attach = attachment; + exported->va_kmapped->vaddr = NULL; + exported->va_vmapped->vaddr = NULL; + + /* initialize list of sgt, attachment and vaddr for dmabuf sync + * via shadow dma-buf + */ + INIT_LIST_HEAD(&exported->active_sgts->list); + INIT_LIST_HEAD(&exported->active_attached->list); + INIT_LIST_HEAD(&exported->va_kmapped->list); + INIT_LIST_HEAD(&exported->va_vmapped->list); + + /* copy private data to sgt_info */ + ret = copy_from_user(exported->priv, export_remote_attr->priv, + exported->sz_priv); + + if (ret) { + dev_err(hy_drv_priv->dev, + "failed to load private data\n"); + ret = -EINVAL; + goto fail_export; + } + + pg_info = hyper_dmabuf_ext_pgs(sgt); + if (!pg_info) { + dev_err(hy_drv_priv->dev, + "failed to construct pg_info\n"); + ret = -ENOMEM; + goto fail_export; + } + + exported->nents = pg_info->nents; + + /* now register it to export list */ + hyper_dmabuf_register_exported(exported); + + export_remote_attr->hid = exported->hid; + + ret = send_export_msg(exported, pg_info); + + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "failed to send out the export request\n"); + goto fail_send_request; + } + + /* free pg_info */ + kfree(pg_info->pgs); + kfree(pg_info); + + exported->filp = filp; + + return ret; + +/* Clean-up if error occurs */ + +fail_send_request: + hyper_dmabuf_remove_exported(exported->hid); + + /* free pg_info */ + kfree(pg_info->pgs); + kfree(pg_info); + +fail_export: + kfree(exported->va_vmapped); + +fail_map_va_vmapped: + kfree(exported->va_kmapped); + +fail_map_va_kmapped: + kfree(exported->active_attached); + +fail_map_active_attached: + kfree(exported->active_sgts); + kfree(exported->priv); + +fail_priv_creation: + kfree(exported); + +fail_map_active_sgts: +fail_sgt_info_creation: + dma_buf_unmap_attachment(attachment, sgt, + DMA_BIDIRECTIONAL); + +fail_map_attachment: + dma_buf_detach(dma_buf, attachment); + +fail_attach: + dma_buf_put(dma_buf); + + return ret; +} + +static int hyper_dmabuf_export_fd_ioctl(struct file *filp, void *data) +{ + struct ioctl_hyper_dmabuf_export_fd *export_fd_attr = + (struct ioctl_hyper_dmabuf_export_fd *)data; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + struct imported_sgt_info *imported; + struct hyper_dmabuf_req *req; + struct page **data_pgs; + int op[4]; + int i; + int ret = 0; + + dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__); + + /* look for dmabuf for the id */ + imported = hyper_dmabuf_find_imported(export_fd_attr->hid); + + /* can't find sgt from the table */ + if (!imported) { + dev_err(hy_drv_priv->dev, "can't find the entry\n"); + return -ENOENT; + } + + mutex_lock(&hy_drv_priv->lock); + + if (imported->dma_buf) { + if (imported->valid == false) { + mutex_unlock(&hy_drv_priv->lock); + dev_err(hy_drv_priv->dev, + "Buffer is released {id:%d key:%d %d %d}, cannot import\n", + imported->hid.id, imported->hid.rng_key[0], + imported->hid.rng_key[1], imported->hid.rng_key[2]); + return -EINVAL; + } + get_dma_buf(imported->dma_buf); + export_fd_attr->fd = dma_buf_fd(imported->dma_buf, + export_fd_attr->flags); + mutex_unlock(&hy_drv_priv->lock); + dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__); + return 0; + } + + imported->importers++; + + /* send notification for export_fd to exporter */ + op[0] = imported->hid.id; + + for (i = 0; i < 3; i++) + op[i+1] = imported->hid.rng_key[i]; + + dev_dbg(hy_drv_priv->dev, "Export FD of buffer {id:%d key:%d %d %d}\n", + imported->hid.id, imported->hid.rng_key[0], + imported->hid.rng_key[1], imported->hid.rng_key[2]); + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + + if (!req) { + mutex_unlock(&hy_drv_priv->lock); + return -ENOMEM; + } + + hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT_FD, &op[0]); + + ret = bknd_ops->send_req(HYPER_DMABUF_DOM_ID(imported->hid), req, true); + + if (ret < 0) { + /* in case of timeout other end eventually will receive request, + * so we need to undo it + */ + hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT_FD_FAILED, + &op[0]); + bknd_ops->send_req(HYPER_DMABUF_DOM_ID(imported->hid), req, false); + kfree(req); + dev_err(hy_drv_priv->dev, + "Failed to create sgt or notify exporter\n"); + imported->importers--; + mutex_unlock(&hy_drv_priv->lock); + return ret; + } + + kfree(req); + + if (ret == HYPER_DMABUF_REQ_ERROR) { + dev_err(hy_drv_priv->dev, + "Buffer invalid {id:%d key:%d %d %d}, cannot import\n", + imported->hid.id, imported->hid.rng_key[0], + imported->hid.rng_key[1], imported->hid.rng_key[2]); + + imported->importers--; + mutex_unlock(&hy_drv_priv->lock); + return -EINVAL; + } + + ret = 0; + + dev_dbg(hy_drv_priv->dev, + "Found buffer gref 0x%lx off %d\n", + imported->ref_handle, imported->frst_ofst); + + dev_dbg(hy_drv_priv->dev, + "last len %d nents %d domain %d\n", + imported->last_len, imported->nents, + HYPER_DMABUF_DOM_ID(imported->hid)); + + if (!imported->sgt) { + dev_dbg(hy_drv_priv->dev, + "buffer {id:%d key:%d %d %d} pages not mapped yet\n", + imported->hid.id, imported->hid.rng_key[0], + imported->hid.rng_key[1], imported->hid.rng_key[2]); + + data_pgs = bknd_ops->map_shared_pages(imported->ref_handle, + HYPER_DMABUF_DOM_ID(imported->hid), + imported->nents, + &imported->refs_info); + + if (!data_pgs) { + dev_err(hy_drv_priv->dev, + "can't map pages hid {id:%d key:%d %d %d}\n", + imported->hid.id, imported->hid.rng_key[0], + imported->hid.rng_key[1], + imported->hid.rng_key[2]); + + imported->importers--; + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + + if (!req) { + mutex_unlock(&hy_drv_priv->lock); + return -ENOMEM; + } + + hyper_dmabuf_create_req(req, + HYPER_DMABUF_EXPORT_FD_FAILED, + &op[0]); + bknd_ops->send_req(HYPER_DMABUF_DOM_ID(imported->hid), req, + false); + kfree(req); + mutex_unlock(&hy_drv_priv->lock); + return -EINVAL; + } + + imported->sgt = hyper_dmabuf_create_sgt(data_pgs, + imported->frst_ofst, + imported->last_len, + imported->nents); + + } + + export_fd_attr->fd = hyper_dmabuf_export_fd(imported, + export_fd_attr->flags); + + if (export_fd_attr->fd < 0) { + /* fail to get fd */ + ret = export_fd_attr->fd; + } + + mutex_unlock(&hy_drv_priv->lock); + + dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__); + return ret; +} + +/* unexport dmabuf from the database and send int req to the source domain + * to unmap it. + */ +static void delayed_unexport(struct work_struct *work) +{ + struct hyper_dmabuf_req *req; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + struct exported_sgt_info *exported; + int op[4]; + int i, ret; + + if (!work) + return; + + exported = container_of(work, struct exported_sgt_info, unexport.work); + + dev_dbg(hy_drv_priv->dev, + "Marking buffer {id:%d key:%d %d %d} as invalid\n", + exported->hid.id, exported->hid.rng_key[0], + exported->hid.rng_key[1], exported->hid.rng_key[2]); + + /* no longer valid */ + exported->valid = false; + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + + if (!req) + return; + + op[0] = exported->hid.id; + + for (i = 0; i < 3; i++) + op[i+1] = exported->hid.rng_key[i]; + + hyper_dmabuf_create_req(req, HYPER_DMABUF_NOTIFY_UNEXPORT, &op[0]); + + /* Now send unexport request to remote domain, marking + * that buffer should not be used anymore + */ + ret = bknd_ops->send_req(exported->rdomid, req, true); + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "unexport message for buffer {id:%d key:%d %d %d} failed\n", + exported->hid.id, exported->hid.rng_key[0], + exported->hid.rng_key[1], exported->hid.rng_key[2]); + } + + kfree(req); + exported->unexport_sched = false; + + /* Immediately clean-up if it has never been exported by importer + * (so no SGT is constructed on importer). + * clean it up later in remote sync when final release ops + * is called (importer does this only when there's no + * no consumer of locally exported FDs) + */ + if (exported->active == 0) { + dev_dbg(hy_drv_priv->dev, + "claning up buffer {id:%d key:%d %d %d} completly\n", + exported->hid.id, exported->hid.rng_key[0], + exported->hid.rng_key[1], exported->hid.rng_key[2]); + + hyper_dmabuf_cleanup_sgt_info(exported, false); + hyper_dmabuf_remove_exported(exported->hid); + + /* register hyper_dmabuf_id to the list for reuse */ + hyper_dmabuf_store_hid(exported->hid); + + if (exported->sz_priv > 0 && !exported->priv) + kfree(exported->priv); + + kfree(exported); + } +} + +/* Schedule unexport of dmabuf. + */ +int hyper_dmabuf_unexport_ioctl(struct file *filp, void *data) +{ + struct ioctl_hyper_dmabuf_unexport *unexport_attr = + (struct ioctl_hyper_dmabuf_unexport *)data; + struct exported_sgt_info *exported; + + dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__); + + /* find dmabuf in export list */ + exported = hyper_dmabuf_find_exported(unexport_attr->hid); + + dev_dbg(hy_drv_priv->dev, + "scheduling unexport of buffer {id:%d key:%d %d %d}\n", + unexport_attr->hid.id, unexport_attr->hid.rng_key[0], + unexport_attr->hid.rng_key[1], unexport_attr->hid.rng_key[2]); + + /* failed to find corresponding entry in export list */ + if (exported == NULL) { + unexport_attr->status = -ENOENT; + return -ENOENT; + } + + if (exported->unexport_sched) + return 0; + + exported->unexport_sched = true; + INIT_DELAYED_WORK(&exported->unexport, delayed_unexport); + schedule_delayed_work(&exported->unexport, + msecs_to_jiffies(unexport_attr->delay_ms)); + + dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__); + return 0; +} + +static int hyper_dmabuf_query_ioctl(struct file *filp, void *data) +{ + struct ioctl_hyper_dmabuf_query *query_attr = + (struct ioctl_hyper_dmabuf_query *)data; + struct exported_sgt_info *exported = NULL; + struct imported_sgt_info *imported = NULL; + int ret = 0; + + if (HYPER_DMABUF_DOM_ID(query_attr->hid) == hy_drv_priv->domid) { + /* query for exported dmabuf */ + exported = hyper_dmabuf_find_exported(query_attr->hid); + if (exported) { + ret = hyper_dmabuf_query_exported(exported, + query_attr->item, + &query_attr->info); + } else { + dev_err(hy_drv_priv->dev, + "hid {id:%d key:%d %d %d} not in exp list\n", + query_attr->hid.id, + query_attr->hid.rng_key[0], + query_attr->hid.rng_key[1], + query_attr->hid.rng_key[2]); + return -ENOENT; + } + } else { + /* query for imported dmabuf */ + imported = hyper_dmabuf_find_imported(query_attr->hid); + if (imported) { + ret = hyper_dmabuf_query_imported(imported, + query_attr->item, + &query_attr->info); + } else { + dev_err(hy_drv_priv->dev, + "hid {id:%d key:%d %d %d} not in imp list\n", + query_attr->hid.id, + query_attr->hid.rng_key[0], + query_attr->hid.rng_key[1], + query_attr->hid.rng_key[2]); + return -ENOENT; + } + } + + return ret; +} + +const struct hyper_dmabuf_ioctl_desc hyper_dmabuf_ioctls[] = { + HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_TX_CH_SETUP, + hyper_dmabuf_tx_ch_setup_ioctl, 0), + HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_RX_CH_SETUP, + hyper_dmabuf_rx_ch_setup_ioctl, 0), + HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_EXPORT_REMOTE, + hyper_dmabuf_export_remote_ioctl, 0), + HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_EXPORT_FD, + hyper_dmabuf_export_fd_ioctl, 0), + HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_UNEXPORT, + hyper_dmabuf_unexport_ioctl, 0), + HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_QUERY, + hyper_dmabuf_query_ioctl, 0), +}; + +long hyper_dmabuf_ioctl(struct file *filp, + unsigned int cmd, unsigned long param) +{ + const struct hyper_dmabuf_ioctl_desc *ioctl = NULL; + unsigned int nr = _IOC_NR(cmd); + int ret; + hyper_dmabuf_ioctl_t func; + char *kdata; + + if (nr >= ARRAY_SIZE(hyper_dmabuf_ioctls)) { + dev_err(hy_drv_priv->dev, "invalid ioctl\n"); + return -EINVAL; + } + + ioctl = &hyper_dmabuf_ioctls[nr]; + + func = ioctl->func; + + if (unlikely(!func)) { + dev_err(hy_drv_priv->dev, "no function\n"); + return -EINVAL; + } + + kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL); + if (!kdata) + return -ENOMEM; + + if (copy_from_user(kdata, (void __user *)param, + _IOC_SIZE(cmd)) != 0) { + dev_err(hy_drv_priv->dev, + "failed to copy from user arguments\n"); + ret = -EFAULT; + goto ioctl_error; + } + + ret = func(filp, kdata); + + if (copy_to_user((void __user *)param, kdata, + _IOC_SIZE(cmd)) != 0) { + dev_err(hy_drv_priv->dev, + "failed to copy to user arguments\n"); + ret = -EFAULT; + goto ioctl_error; + } + +ioctl_error: + kfree(kdata); + + return ret; +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.h new file mode 100644 index 0000000000000..5991a87b194f4 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.h @@ -0,0 +1,50 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_IOCTL_H__ +#define __HYPER_DMABUF_IOCTL_H__ + +typedef int (*hyper_dmabuf_ioctl_t)(struct file *filp, void *data); + +struct hyper_dmabuf_ioctl_desc { + unsigned int cmd; + int flags; + hyper_dmabuf_ioctl_t func; + const char *name; +}; + +#define HYPER_DMABUF_IOCTL_DEF(ioctl, _func, _flags) \ + [_IOC_NR(ioctl)] = { \ + .cmd = ioctl, \ + .func = _func, \ + .flags = _flags, \ + .name = #ioctl \ + } + +long hyper_dmabuf_ioctl(struct file *filp, + unsigned int cmd, unsigned long param); + +int hyper_dmabuf_unexport_ioctl(struct file *filp, void *data); + +#endif //__HYPER_DMABUF_IOCTL_H__ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.c new file mode 100644 index 0000000000000..7b6ce1f067a26 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.c @@ -0,0 +1,307 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_list.h" +#include "hyper_dmabuf_id.h" + +DECLARE_HASHTABLE(hyper_dmabuf_hash_imported, MAX_ENTRY_IMPORTED); +DECLARE_HASHTABLE(hyper_dmabuf_hash_exported, MAX_ENTRY_EXPORTED); + +#ifdef CONFIG_HYPER_DMABUF_SYSFS +static ssize_t hyper_dmabuf_imported_show(struct device *drv, + struct device_attribute *attr, + char *buf) +{ + struct list_entry_imported *info_entry; + int bkt; + ssize_t count = 0; + size_t total = 0; + + hash_for_each(hyper_dmabuf_hash_imported, bkt, info_entry, node) { + hyper_dmabuf_id_t hid = info_entry->imported->hid; + int nents = info_entry->imported->nents; + bool valid = info_entry->imported->valid; + int num_importers = info_entry->imported->importers; + + total += nents; + count += scnprintf(buf + count, PAGE_SIZE - count, + "hid:{%d %d %d %d}, nent:%d, v:%c, numi:%d\n", + hid.id, hid.rng_key[0], hid.rng_key[1], + hid.rng_key[2], nents, (valid ? 't' : 'f'), + num_importers); + } + count += scnprintf(buf + count, PAGE_SIZE - count, + "total nents: %lu\n", total); + + return count; +} + +static ssize_t hyper_dmabuf_exported_show(struct device *drv, + struct device_attribute *attr, + char *buf) +{ + struct list_entry_exported *info_entry; + int bkt; + ssize_t count = 0; + size_t total = 0; + + hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node) { + hyper_dmabuf_id_t hid = info_entry->exported->hid; + int nents = info_entry->exported->nents; + bool valid = info_entry->exported->valid; + int importer_exported = info_entry->exported->active; + + total += nents; + count += scnprintf(buf + count, PAGE_SIZE - count, + "hid:{%d %d %d %d}, nent:%d, v:%c, ie:%d\n", + hid.id, hid.rng_key[0], hid.rng_key[1], + hid.rng_key[2], nents, (valid ? 't' : 'f'), + importer_exported); + } + count += scnprintf(buf + count, PAGE_SIZE - count, + "total nents: %lu\n", total); + + return count; +} + +static DEVICE_ATTR(imported, 0400, hyper_dmabuf_imported_show, NULL); +static DEVICE_ATTR(exported, 0400, hyper_dmabuf_exported_show, NULL); + +int hyper_dmabuf_register_sysfs(struct device *dev) +{ + int err; + + err = device_create_file(dev, &dev_attr_imported); + if (err < 0) + goto err1; + err = device_create_file(dev, &dev_attr_exported); + if (err < 0) + goto err2; + + return 0; +err2: + device_remove_file(dev, &dev_attr_imported); +err1: + return -1; +} + +int hyper_dmabuf_unregister_sysfs(struct device *dev) +{ + device_remove_file(dev, &dev_attr_imported); + device_remove_file(dev, &dev_attr_exported); + return 0; +} + +#endif + +int hyper_dmabuf_table_init(void) +{ + hash_init(hyper_dmabuf_hash_imported); + hash_init(hyper_dmabuf_hash_exported); + return 0; +} + +int hyper_dmabuf_table_destroy(void) +{ + /* TODO: cleanup hyper_dmabuf_hash_imported + * and hyper_dmabuf_hash_exported + */ + return 0; +} + +int hyper_dmabuf_register_exported(struct exported_sgt_info *exported) +{ + struct list_entry_exported *info_entry; + + info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL); + + if (!info_entry) + return -ENOMEM; + + info_entry->exported = exported; + + hash_add(hyper_dmabuf_hash_exported, &info_entry->node, + info_entry->exported->hid.id); + + return 0; +} + +int hyper_dmabuf_register_imported(struct imported_sgt_info *imported) +{ + struct list_entry_imported *info_entry; + + info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL); + + if (!info_entry) + return -ENOMEM; + + info_entry->imported = imported; + + hash_add(hyper_dmabuf_hash_imported, &info_entry->node, + info_entry->imported->hid.id); + + return 0; +} + +struct exported_sgt_info *hyper_dmabuf_find_exported(hyper_dmabuf_id_t hid) +{ + struct list_entry_exported *info_entry; + int bkt; + + hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node) + /* checking hid.id first */ + if (info_entry->exported->hid.id == hid.id) { + /* then key is compared */ + if (hyper_dmabuf_hid_keycomp(info_entry->exported->hid, + hid)) + return info_entry->exported; + + /* if key is unmatched, given HID is invalid, + * so returning NULL + */ + break; + } + + return NULL; +} + +/* search for pre-exported sgt and return id of it if it exist */ +hyper_dmabuf_id_t hyper_dmabuf_find_hid_exported(struct dma_buf *dmabuf, + int domid) +{ + struct list_entry_exported *info_entry; + hyper_dmabuf_id_t hid = {-1, {0, 0, 0} }; + int bkt; + + hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node) + if (info_entry->exported->dma_buf == dmabuf && + info_entry->exported->rdomid == domid) + return info_entry->exported->hid; + + return hid; +} + +struct imported_sgt_info *hyper_dmabuf_find_imported(hyper_dmabuf_id_t hid) +{ + struct list_entry_imported *info_entry; + int bkt; + + hash_for_each(hyper_dmabuf_hash_imported, bkt, info_entry, node) + /* checking hid.id first */ + if (info_entry->imported->hid.id == hid.id) { + /* then key is compared */ + if (hyper_dmabuf_hid_keycomp(info_entry->imported->hid, + hid)) + return info_entry->imported; + /* if key is unmatched, given HID is invalid, + * so returning NULL + */ + break; + } + + return NULL; +} + +int hyper_dmabuf_remove_exported(hyper_dmabuf_id_t hid) +{ + struct list_entry_exported *info_entry; + int bkt; + + hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node) + /* checking hid.id first */ + if (info_entry->exported->hid.id == hid.id) { + /* then key is compared */ + if (hyper_dmabuf_hid_keycomp(info_entry->exported->hid, + hid)) { + hash_del(&info_entry->node); + kfree(info_entry); + return 0; + } + + break; + } + + return -ENOENT; +} + +int hyper_dmabuf_remove_imported(hyper_dmabuf_id_t hid) +{ + struct list_entry_imported *info_entry; + int bkt; + + hash_for_each(hyper_dmabuf_hash_imported, bkt, info_entry, node) + /* checking hid.id first */ + if (info_entry->imported->hid.id == hid.id) { + /* then key is compared */ + if (hyper_dmabuf_hid_keycomp(info_entry->imported->hid, + hid)) { + hash_del(&info_entry->node); + kfree(info_entry); + return 0; + } + + break; + } + + return -ENOENT; +} + +void hyper_dmabuf_foreach_exported( + void (*func)(struct exported_sgt_info *, void *attr), + void *attr) +{ + struct list_entry_exported *info_entry; + struct hlist_node *tmp; + int bkt; + + hash_for_each_safe(hyper_dmabuf_hash_exported, bkt, tmp, + info_entry, node) { + func(info_entry->exported, attr); + } +} + +void hyper_dmabuf_remove_imported_vmid(int vmid) +{ + struct list_entry_imported *info_entry; + struct hlist_node *tmp; + int bkt; + + hash_for_each_safe(hyper_dmabuf_hash_imported, bkt, tmp, + info_entry, node) { + if (HYPER_DMABUF_DOM_ID(info_entry->imported->hid) == vmid) { + hash_del(&info_entry->node); + kfree(info_entry); + } + } +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.h new file mode 100644 index 0000000000000..81250e5c5eab5 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.h @@ -0,0 +1,72 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_LIST_H__ +#define __HYPER_DMABUF_LIST_H__ + +#include "hyper_dmabuf_struct.h" + +/* number of bits to be used for exported dmabufs hash table */ +#define MAX_ENTRY_EXPORTED 7 +/* number of bits to be used for imported dmabufs hash table */ +#define MAX_ENTRY_IMPORTED 7 + +struct list_entry_exported { + struct exported_sgt_info *exported; + struct hlist_node node; +}; + +struct list_entry_imported { + struct imported_sgt_info *imported; + struct hlist_node node; +}; + +int hyper_dmabuf_table_init(void); + +int hyper_dmabuf_table_destroy(void); + +int hyper_dmabuf_register_exported(struct exported_sgt_info *info); + +/* search for pre-exported sgt and return id of it if it exist */ +hyper_dmabuf_id_t hyper_dmabuf_find_hid_exported(struct dma_buf *dmabuf, + int domid); + +int hyper_dmabuf_register_imported(struct imported_sgt_info *info); + +struct exported_sgt_info *hyper_dmabuf_find_exported(hyper_dmabuf_id_t hid); + +struct imported_sgt_info *hyper_dmabuf_find_imported(hyper_dmabuf_id_t hid); + +int hyper_dmabuf_remove_exported(hyper_dmabuf_id_t hid); + +int hyper_dmabuf_remove_imported(hyper_dmabuf_id_t hid); +void hyper_dmabuf_remove_imported_vmid(int vmid); + +void hyper_dmabuf_foreach_exported(void (*func)(struct exported_sgt_info *, + void *attr), void *attr); + +int hyper_dmabuf_register_sysfs(struct device *dev); +int hyper_dmabuf_unregister_sysfs(struct device *dev); + +#endif /* __HYPER_DMABUF_LIST_H__ */ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.c new file mode 100644 index 0000000000000..48db3fd8bb0e9 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.c @@ -0,0 +1,415 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_msg.h" +#include "hyper_dmabuf_remote_sync.h" +#include "hyper_dmabuf_event.h" +#include "hyper_dmabuf_list.h" + +struct cmd_process { + struct work_struct work; + struct hyper_dmabuf_req *rq; + int domid; +}; + +void hyper_dmabuf_create_req(struct hyper_dmabuf_req *req, + enum hyper_dmabuf_command cmd, int *op) +{ + int i; + + req->stat = HYPER_DMABUF_REQ_NOT_RESPONDED; + req->cmd = cmd; + + switch (cmd) { + /* as exporter, commands to importer */ + case HYPER_DMABUF_EXPORT: + /* exporting pages for dmabuf */ + /* command : HYPER_DMABUF_EXPORT, + * op0~op3 : hyper_dmabuf_id + * op4 : number of pages to be shared + * op5 : offset of data in the first page + * op6 : length of data in the last page + * op7 : 32 LSB of top-level reference number for shared pages + * op8 : 32 MSB of top-level reference number for shared pages + * op9 : size of private data (from op9) + * op10 ~ : Driver-specific private data + * (e.g. graphic buffer's meta info) + */ + + memcpy(&req->op[0], &op[0], 10 * sizeof(int) + op[9]); + break; + + case HYPER_DMABUF_NOTIFY_UNEXPORT: + /* destroy sg_list for hyper_dmabuf_id on remote side */ + /* command : DMABUF_DESTROY, + * op0~op3 : hyper_dmabuf_id_t hid + */ + + for (i = 0; i < 4; i++) + req->op[i] = op[i]; + break; + + case HYPER_DMABUF_EXPORT_FD: + case HYPER_DMABUF_EXPORT_FD_FAILED: + /* dmabuf fd is being created on imported side or importing + * failed + * + * command : HYPER_DMABUF_EXPORT_FD or + * HYPER_DMABUF_EXPORT_FD_FAILED, + * op0~op3 : hyper_dmabuf_id + */ + + for (i = 0; i < 4; i++) + req->op[i] = op[i]; + break; + + case HYPER_DMABUF_OPS_TO_REMOTE: + /* notifying dmabuf map/unmap to importer (probably not needed) + * for dmabuf synchronization + */ + break; + + case HYPER_DMABUF_OPS_TO_SOURCE: + /* notifying dmabuf map/unmap to exporter, map will make + * the driver to do shadow mapping or unmapping for + * synchronization with original exporter (e.g. i915) + * + * command : DMABUF_OPS_TO_SOURCE. + * op0~3 : hyper_dmabuf_id + * op4 : map(=1)/unmap(=2)/attach(=3)/detach(=4) + */ + for (i = 0; i < 5; i++) + req->op[i] = op[i]; + break; + + default: + /* no command found */ + return; + } +} + +static void cmd_process_work(struct work_struct *work) +{ + struct imported_sgt_info *imported; + struct cmd_process *proc = container_of(work, + struct cmd_process, work); + struct hyper_dmabuf_req *req; + hyper_dmabuf_id_t hid; + int i; + + req = proc->rq; + + switch (req->cmd) { + case HYPER_DMABUF_EXPORT: + /* exporting pages for dmabuf */ + /* command : HYPER_DMABUF_EXPORT, + * op0~op3 : hyper_dmabuf_id + * op4 : number of pages to be shared + * op5 : offset of data in the first page + * op6 : length of data in the last page + * op7 : 32 LSB of top-level reference number for shared pages + * op8 : 32 MSB of top-level reference number for shared pages + * op9 : size of private data (from op9) + * op10 ~ : Driver-specific private data + * (e.g. graphic buffer's meta info) + */ + + /* if nents == 0, it means it is a message only for + * priv synchronization. for existing imported_sgt_info + * so not creating a new one + */ + if (req->op[4] == 0) { + hyper_dmabuf_id_t exist = {req->op[0], + {req->op[1], req->op[2], + req->op[3] } }; + + imported = hyper_dmabuf_find_imported(exist); + + if (!imported) { + dev_err(hy_drv_priv->dev, + "Can't find imported sgt_info\n"); + break; + } + + /* if size of new private data is different, + * we reallocate it. + */ + if (imported->sz_priv != req->op[9]) { + kfree(imported->priv); + imported->sz_priv = req->op[9]; + imported->priv = kcalloc(1, req->op[9], + GFP_KERNEL); + if (!imported->priv) { + /* set it invalid */ + imported->valid = 0; + break; + } + } + + /* updating priv data */ + memcpy(imported->priv, &req->op[10], req->op[9]); + +#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN + /* generating import event */ + hyper_dmabuf_import_event(imported->hid); +#endif + + break; + } + + imported = kcalloc(1, sizeof(*imported), GFP_KERNEL); + + if (!imported) + break; + + imported->sz_priv = req->op[9]; + imported->priv = kcalloc(1, req->op[9], GFP_KERNEL); + + if (!imported->priv) { + kfree(imported); + break; + } + + imported->hid.id = req->op[0]; + + for (i = 0; i < 3; i++) + imported->hid.rng_key[i] = req->op[i+1]; + + imported->nents = req->op[4]; + imported->frst_ofst = req->op[5]; + imported->last_len = req->op[6]; + imported->ref_handle = (u64)req->op[8] << 32 | req->op[7]; + + dev_dbg(hy_drv_priv->dev, "DMABUF was exported\n"); + dev_dbg(hy_drv_priv->dev, "\thid{id:%d key:%d %d %d}\n", + req->op[0], req->op[1], req->op[2], + req->op[3]); + dev_dbg(hy_drv_priv->dev, "\tnents %d\n", req->op[4]); + dev_dbg(hy_drv_priv->dev, "\tfirst offset %d\n", req->op[5]); + dev_dbg(hy_drv_priv->dev, "\tlast len %d\n", req->op[6]); + dev_dbg(hy_drv_priv->dev, "\tgrefid 0x%llx\n", + (u64)req->op[8] << 32 | req->op[7]); + + memcpy(imported->priv, &req->op[10], req->op[9]); + + imported->valid = true; + hyper_dmabuf_register_imported(imported); + +#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN + /* generating import event */ + hyper_dmabuf_import_event(imported->hid); +#endif + + break; + + case HYPER_DMABUF_OPS_TO_SOURCE: + /* notifying dmabuf map/unmap to exporter, map will + * make the driver to do shadow mapping + * or unmapping for synchronization with original + * exporter (e.g. i915) + * + * command : DMABUF_OPS_TO_SOURCE. + * op0~3 : hyper_dmabuf_id + * op1 : enum hyper_dmabuf_ops {....} + */ + dev_dbg(hy_drv_priv->dev, + "%s: HYPER_DMABUF_OPS_TO_SOURCE\n", __func__); + + hid.id = req->op[0]; + hid.rng_key[0] = req->op[1]; + hid.rng_key[1] = req->op[2]; + hid.rng_key[2] = req->op[3]; + hyper_dmabuf_remote_sync(hid, req->op[4]); + + break; + + + case HYPER_DMABUF_OPS_TO_REMOTE: + /* notifying dmabuf map/unmap to importer + * (probably not needed) for dmabuf synchronization + */ + break; + + default: + /* shouldn't get here */ + break; + } + + kfree(req); + kfree(proc); +} + +int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req) +{ + struct cmd_process *proc; + struct hyper_dmabuf_req *temp_req; + struct imported_sgt_info *imported; + struct exported_sgt_info *exported; + hyper_dmabuf_id_t hid; + + if (!req) { + dev_err(hy_drv_priv->dev, "request is NULL\n"); + return -EINVAL; + } + + hid.id = req->op[0]; + hid.rng_key[0] = req->op[1]; + hid.rng_key[1] = req->op[2]; + hid.rng_key[2] = req->op[3]; + + if ((req->cmd < HYPER_DMABUF_EXPORT) || + (req->cmd > HYPER_DMABUF_OPS_TO_SOURCE)) { + dev_err(hy_drv_priv->dev, "invalid command\n"); + return -EINVAL; + } + + req->stat = HYPER_DMABUF_REQ_PROCESSED; + + /* HYPER_DMABUF_DESTROY requires immediate + * follow up so can't be processed in workqueue + */ + if (req->cmd == HYPER_DMABUF_NOTIFY_UNEXPORT) { + /* destroy sg_list for hyper_dmabuf_id on remote side */ + /* command : HYPER_DMABUF_NOTIFY_UNEXPORT, + * op0~3 : hyper_dmabuf_id + */ + dev_dbg(hy_drv_priv->dev, + "processing HYPER_DMABUF_NOTIFY_UNEXPORT\n"); + + imported = hyper_dmabuf_find_imported(hid); + + if (imported) { + /* if anything is still using dma_buf */ + if (imported->importers) { + /* Buffer is still in use, just mark that + * it should not be allowed to export its fd + * anymore. + */ + imported->valid = false; + hyper_dmabuf_remove_imported(hid); + } else { + /* No one is using buffer, remove it from + * imported list + */ + hyper_dmabuf_remove_imported(hid); + kfree(imported->priv); + kfree(imported); + } + } else { + req->stat = HYPER_DMABUF_REQ_ERROR; + } + + return req->cmd; + } + + /* synchronous dma_buf_fd export */ + if (req->cmd == HYPER_DMABUF_EXPORT_FD) { + /* find a corresponding SGT for the id */ + dev_dbg(hy_drv_priv->dev, + "HYPER_DMABUF_EXPORT_FD for {id:%d key:%d %d %d}\n", + hid.id, hid.rng_key[0], hid.rng_key[1], hid.rng_key[2]); + + exported = hyper_dmabuf_find_exported(hid); + + if (!exported) { + dev_err(hy_drv_priv->dev, + "buffer {id:%d key:%d %d %d} not found\n", + hid.id, hid.rng_key[0], hid.rng_key[1], + hid.rng_key[2]); + + req->stat = HYPER_DMABUF_REQ_ERROR; + } else if (!exported->valid) { + dev_dbg(hy_drv_priv->dev, + "Buffer no longer valid {id:%d key:%d %d %d}\n", + hid.id, hid.rng_key[0], hid.rng_key[1], + hid.rng_key[2]); + + req->stat = HYPER_DMABUF_REQ_ERROR; + } else { + dev_dbg(hy_drv_priv->dev, + "Buffer still valid {id:%d key:%d %d %d}\n", + hid.id, hid.rng_key[0], hid.rng_key[1], + hid.rng_key[2]); + + exported->active++; + req->stat = HYPER_DMABUF_REQ_PROCESSED; + } + return req->cmd; + } + + if (req->cmd == HYPER_DMABUF_EXPORT_FD_FAILED) { + dev_dbg(hy_drv_priv->dev, + "HYPER_DMABUF_EXPORT_FD_FAILED for {id:%d key:%d %d %d}\n", + hid.id, hid.rng_key[0], hid.rng_key[1], hid.rng_key[2]); + + exported = hyper_dmabuf_find_exported(hid); + + if (!exported) { + dev_err(hy_drv_priv->dev, + "buffer {id:%d key:%d %d %d} not found\n", + hid.id, hid.rng_key[0], hid.rng_key[1], + hid.rng_key[2]); + + req->stat = HYPER_DMABUF_REQ_ERROR; + } else { + exported->active--; + req->stat = HYPER_DMABUF_REQ_PROCESSED; + } + return req->cmd; + } + + dev_dbg(hy_drv_priv->dev, + "%s: putting request to workqueue\n", __func__); + temp_req = kmalloc(sizeof(*temp_req), GFP_ATOMIC); + + if (!temp_req) + return -ENOMEM; + + memcpy(temp_req, req, sizeof(*temp_req)); + + proc = kcalloc(1, sizeof(struct cmd_process), GFP_ATOMIC); + + if (!proc) { + kfree(temp_req); + return -ENOMEM; + } + + proc->rq = temp_req; + proc->domid = domid; + + INIT_WORK(&(proc->work), cmd_process_work); + + queue_work(hy_drv_priv->work_queue, &(proc->work)); + + return req->cmd; +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.h new file mode 100644 index 0000000000000..9c8a76bf261e9 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.h @@ -0,0 +1,87 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_MSG_H__ +#define __HYPER_DMABUF_MSG_H__ + +#define MAX_NUMBER_OF_OPERANDS 64 + +struct hyper_dmabuf_req { + unsigned int req_id; + unsigned int stat; + unsigned int cmd; + unsigned int op[MAX_NUMBER_OF_OPERANDS]; +}; + +struct hyper_dmabuf_resp { + unsigned int resp_id; + unsigned int stat; + unsigned int cmd; + unsigned int op[MAX_NUMBER_OF_OPERANDS]; +}; + +enum hyper_dmabuf_command { + HYPER_DMABUF_EXPORT = 0x10, + HYPER_DMABUF_EXPORT_FD, + HYPER_DMABUF_EXPORT_FD_FAILED, + HYPER_DMABUF_NOTIFY_UNEXPORT, + HYPER_DMABUF_OPS_TO_REMOTE, + HYPER_DMABUF_OPS_TO_SOURCE, +}; + +enum hyper_dmabuf_ops { + HYPER_DMABUF_OPS_ATTACH = 0x1000, + HYPER_DMABUF_OPS_DETACH, + HYPER_DMABUF_OPS_MAP, + HYPER_DMABUF_OPS_UNMAP, + HYPER_DMABUF_OPS_RELEASE, + HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS, + HYPER_DMABUF_OPS_END_CPU_ACCESS, + HYPER_DMABUF_OPS_KMAP_ATOMIC, + HYPER_DMABUF_OPS_KUNMAP_ATOMIC, + HYPER_DMABUF_OPS_KMAP, + HYPER_DMABUF_OPS_KUNMAP, + HYPER_DMABUF_OPS_MMAP, + HYPER_DMABUF_OPS_VMAP, + HYPER_DMABUF_OPS_VUNMAP, +}; + +enum hyper_dmabuf_req_feedback { + HYPER_DMABUF_REQ_PROCESSED = 0x100, + HYPER_DMABUF_REQ_NEEDS_FOLLOW_UP, + HYPER_DMABUF_REQ_ERROR, + HYPER_DMABUF_REQ_NOT_RESPONDED +}; + +/* create a request packet with given command and operands */ +void hyper_dmabuf_create_req(struct hyper_dmabuf_req *req, + enum hyper_dmabuf_command command, + int *operands); + +/* parse incoming request packet (or response) and take + * appropriate actions for those + */ +int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req); + +#endif // __HYPER_DMABUF_MSG_H__ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.c new file mode 100644 index 0000000000000..3bd13c584ffc3 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.c @@ -0,0 +1,409 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_struct.h" +#include "hyper_dmabuf_ops.h" +#include "hyper_dmabuf_sgl_proc.h" +#include "hyper_dmabuf_id.h" +#include "hyper_dmabuf_msg.h" +#include "hyper_dmabuf_list.h" + +#define WAIT_AFTER_SYNC_REQ 0 +#define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t)) + +static int dmabuf_refcount(struct dma_buf *dma_buf) +{ + if ((dma_buf != NULL) && (dma_buf->file != NULL)) + return file_count(dma_buf->file); + + return -EINVAL; +} + +static int sync_request(hyper_dmabuf_id_t hid, int dmabuf_ops) +{ + struct hyper_dmabuf_req *req; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + int op[5]; + int i; + int ret; + + op[0] = hid.id; + + for (i = 0; i < 3; i++) + op[i+1] = hid.rng_key[i]; + + op[4] = dmabuf_ops; + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + + if (!req) + return -ENOMEM; + + hyper_dmabuf_create_req(req, HYPER_DMABUF_OPS_TO_SOURCE, &op[0]); + + /* send request and wait for a response */ + ret = bknd_ops->send_req(HYPER_DMABUF_DOM_ID(hid), req, + WAIT_AFTER_SYNC_REQ); + + if (ret < 0) { + dev_dbg(hy_drv_priv->dev, + "dmabuf sync request failed:%d\n", req->op[4]); + } + + kfree(req); + + return ret; +} + +static int hyper_dmabuf_ops_attach(struct dma_buf *dmabuf, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0) + struct device *dev, +#endif + struct dma_buf_attachment *attach) +{ + struct imported_sgt_info *imported; + int ret; + + if (!attach->dmabuf->priv) + return -EINVAL; + + imported = (struct imported_sgt_info *)attach->dmabuf->priv; + + ret = sync_request(imported->hid, HYPER_DMABUF_OPS_ATTACH); + + return ret; +} + +static void hyper_dmabuf_ops_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attach) +{ + struct imported_sgt_info *imported; + + if (!attach->dmabuf->priv) + return; + + imported = (struct imported_sgt_info *)attach->dmabuf->priv; + + sync_request(imported->hid, HYPER_DMABUF_OPS_DETACH); +} + +static struct sg_table *hyper_dmabuf_ops_map( + struct dma_buf_attachment *attachment, + enum dma_data_direction dir) +{ + struct sg_table *st; + struct imported_sgt_info *imported; + struct pages_info *pg_info; + int ret; + + if (!attachment->dmabuf->priv) + return NULL; + + imported = (struct imported_sgt_info *)attachment->dmabuf->priv; + + /* extract pages from sgt */ + pg_info = hyper_dmabuf_ext_pgs(imported->sgt); + + if (!pg_info) + return NULL; + + /* create a new sg_table with extracted pages */ + st = hyper_dmabuf_create_sgt(pg_info->pgs, pg_info->frst_ofst, + pg_info->last_len, pg_info->nents); + if (!st) + goto err_free_sg; + + if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) + goto err_free_sg; + + ret = sync_request(imported->hid, HYPER_DMABUF_OPS_MAP); + + kfree(pg_info->pgs); + kfree(pg_info); + + return st; + +err_free_sg: + if (st) { + sg_free_table(st); + kfree(st); + } + + kfree(pg_info->pgs); + kfree(pg_info); + + return NULL; +} + +static void hyper_dmabuf_ops_unmap(struct dma_buf_attachment *attachment, + struct sg_table *sg, + enum dma_data_direction dir) +{ + struct imported_sgt_info *imported; + + if (!attachment->dmabuf->priv) + return; + + imported = (struct imported_sgt_info *)attachment->dmabuf->priv; + + dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); + + sg_free_table(sg); + kfree(sg); + + sync_request(imported->hid, HYPER_DMABUF_OPS_UNMAP); +} + +static void hyper_dmabuf_ops_release(struct dma_buf *dma_buf) +{ + struct imported_sgt_info *imported; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + int finish; + + if (!dma_buf->priv) + return; + + imported = (struct imported_sgt_info *)dma_buf->priv; + + if (!dmabuf_refcount(imported->dma_buf)) + imported->dma_buf = NULL; + + imported->importers--; + + if (imported->importers == 0) { + bknd_ops->unmap_shared_pages(&imported->refs_info, + imported->nents); + + if (imported->sgt) { + sg_free_table(imported->sgt); + kfree(imported->sgt); + imported->sgt = NULL; + } + } + + finish = imported && !imported->valid && + !imported->importers; + + sync_request(imported->hid, HYPER_DMABUF_OPS_RELEASE); + + /* + * Check if buffer is still valid and if not remove it + * from imported list. That has to be done after sending + * sync request + */ + if (finish) { + hyper_dmabuf_remove_imported(imported->hid); + kfree(imported->priv); + kfree(imported); + } +} + +static int hyper_dmabuf_ops_begin_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction dir) +{ + struct imported_sgt_info *imported; + int ret; + + if (!dmabuf->priv) + return -EINVAL; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + ret = sync_request(imported->hid, HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS); + + return ret; +} + +static int hyper_dmabuf_ops_end_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction dir) +{ + struct imported_sgt_info *imported; + + if (!dmabuf->priv) + return -EINVAL; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + return sync_request(imported->hid, HYPER_DMABUF_OPS_END_CPU_ACCESS); +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0) +static void *hyper_dmabuf_ops_kmap_atomic(struct dma_buf *dmabuf, + unsigned long pgnum) +{ + struct imported_sgt_info *imported; + + if (!dmabuf->priv) + return NULL; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + sync_request(imported->hid, HYPER_DMABUF_OPS_KMAP_ATOMIC); + + /* TODO: NULL for now. Need to return the addr of mapped region */ + return NULL; +} + +static void hyper_dmabuf_ops_kunmap_atomic(struct dma_buf *dmabuf, + unsigned long pgnum, void *vaddr) +{ + struct imported_sgt_info *imported; + + if (!dmabuf->priv) + return; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + sync_request(imported->hid, HYPER_DMABUF_OPS_KUNMAP_ATOMIC); +} +#endif + +static void *hyper_dmabuf_ops_kmap(struct dma_buf *dmabuf, unsigned long pgnum) +{ + struct imported_sgt_info *imported; + + if (!dmabuf->priv) + return NULL; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + sync_request(imported->hid, HYPER_DMABUF_OPS_KMAP); + + /* for now NULL.. need to return the address of mapped region */ + return NULL; +} + +static void hyper_dmabuf_ops_kunmap(struct dma_buf *dmabuf, unsigned long pgnum, + void *vaddr) +{ + struct imported_sgt_info *imported; + + if (!dmabuf->priv) + return; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + sync_request(imported->hid, HYPER_DMABUF_OPS_KUNMAP); +} + +static int hyper_dmabuf_ops_mmap(struct dma_buf *dmabuf, + struct vm_area_struct *vma) +{ + struct imported_sgt_info *imported; + int ret; + + if (!dmabuf->priv) + return -EINVAL; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + ret = sync_request(imported->hid, HYPER_DMABUF_OPS_MMAP); + + return ret; +} + +static void *hyper_dmabuf_ops_vmap(struct dma_buf *dmabuf) +{ + struct imported_sgt_info *imported; + + if (!dmabuf->priv) + return NULL; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + sync_request(imported->hid, HYPER_DMABUF_OPS_VMAP); + + return NULL; +} + +static void hyper_dmabuf_ops_vunmap(struct dma_buf *dmabuf, void *vaddr) +{ + struct imported_sgt_info *imported; + + if (!dmabuf->priv) + return; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + sync_request(imported->hid, HYPER_DMABUF_OPS_VUNMAP); +} + +static const struct dma_buf_ops hyper_dmabuf_ops = { + .attach = hyper_dmabuf_ops_attach, + .detach = hyper_dmabuf_ops_detach, + .map_dma_buf = hyper_dmabuf_ops_map, + .unmap_dma_buf = hyper_dmabuf_ops_unmap, + .release = hyper_dmabuf_ops_release, + .begin_cpu_access = hyper_dmabuf_ops_begin_cpu_access, + .end_cpu_access = hyper_dmabuf_ops_end_cpu_access, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0) + .map_atomic = hyper_dmabuf_ops_kmap_atomic, + .unmap_atomic = hyper_dmabuf_ops_kunmap_atomic, +#endif + .map = hyper_dmabuf_ops_kmap, + .unmap = hyper_dmabuf_ops_kunmap, + .mmap = hyper_dmabuf_ops_mmap, + .vmap = hyper_dmabuf_ops_vmap, + .vunmap = hyper_dmabuf_ops_vunmap, +}; + +/* exporting dmabuf as fd */ +int hyper_dmabuf_export_fd(struct imported_sgt_info *imported, int flags) +{ + int fd = -1; + + /* call hyper_dmabuf_export_dmabuf and create + * and bind a handle for it then release + */ + hyper_dmabuf_export_dma_buf(imported); + + if (imported->dma_buf) + fd = dma_buf_fd(imported->dma_buf, flags); + + return fd; +} + +void hyper_dmabuf_export_dma_buf(struct imported_sgt_info *imported) +{ + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + + exp_info.ops = &hyper_dmabuf_ops; + + /* multiple of PAGE_SIZE, not considering offset */ + exp_info.size = imported->sgt->nents * PAGE_SIZE; + exp_info.flags = /* not sure about flag */ 0; + exp_info.priv = imported; + + imported->dma_buf = dma_buf_export(&exp_info); +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.h new file mode 100644 index 0000000000000..c5505a41f0fe6 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.h @@ -0,0 +1,32 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_OPS_H__ +#define __HYPER_DMABUF_OPS_H__ + +int hyper_dmabuf_export_fd(struct imported_sgt_info *imported, int flags); + +void hyper_dmabuf_export_dma_buf(struct imported_sgt_info *imported); + +#endif /* __HYPER_DMABUF_IMP_H__ */ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.c new file mode 100644 index 0000000000000..1f2f56b1162d2 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.c @@ -0,0 +1,172 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_struct.h" +#include "hyper_dmabuf_id.h" + +#define HYPER_DMABUF_SIZE(nents, first_offset, last_len) \ + ((nents)*PAGE_SIZE - (first_offset) - PAGE_SIZE + (last_len)) + +int hyper_dmabuf_query_exported(struct exported_sgt_info *exported, + int query, unsigned long *info) +{ + switch (query) { + case HYPER_DMABUF_QUERY_TYPE: + *info = EXPORTED; + break; + + /* exporting domain of this specific dmabuf*/ + case HYPER_DMABUF_QUERY_EXPORTER: + *info = HYPER_DMABUF_DOM_ID(exported->hid); + break; + + /* importing domain of this specific dmabuf */ + case HYPER_DMABUF_QUERY_IMPORTER: + *info = exported->rdomid; + break; + + /* size of dmabuf in byte */ + case HYPER_DMABUF_QUERY_SIZE: + *info = exported->dma_buf->size; + break; + + /* whether the buffer is used by importer */ + case HYPER_DMABUF_QUERY_BUSY: + *info = (exported->active > 0); + break; + + /* whether the buffer is unexported */ + case HYPER_DMABUF_QUERY_UNEXPORTED: + *info = !exported->valid; + break; + + /* whether the buffer is scheduled to be unexported */ + case HYPER_DMABUF_QUERY_DELAYED_UNEXPORTED: + *info = !exported->unexport_sched; + break; + + /* size of private info attached to buffer */ + case HYPER_DMABUF_QUERY_PRIV_INFO_SIZE: + *info = exported->sz_priv; + break; + + /* copy private info attached to buffer */ + case HYPER_DMABUF_QUERY_PRIV_INFO: + if (exported->sz_priv > 0) { + int n; + + n = copy_to_user((void __user *) *info, + exported->priv, + exported->sz_priv); + if (n != 0) + return -EINVAL; + } + break; + + default: + return -EINVAL; + } + + return 0; +} + + +int hyper_dmabuf_query_imported(struct imported_sgt_info *imported, + int query, unsigned long *info) +{ + switch (query) { + case HYPER_DMABUF_QUERY_TYPE: + *info = IMPORTED; + break; + + /* exporting domain of this specific dmabuf*/ + case HYPER_DMABUF_QUERY_EXPORTER: + *info = HYPER_DMABUF_DOM_ID(imported->hid); + break; + + /* importing domain of this specific dmabuf */ + case HYPER_DMABUF_QUERY_IMPORTER: + *info = hy_drv_priv->domid; + break; + + /* size of dmabuf in byte */ + case HYPER_DMABUF_QUERY_SIZE: + if (imported->dma_buf) { + /* if local dma_buf is created (if it's + * ever mapped), retrieve it directly + * from struct dma_buf * + */ + *info = imported->dma_buf->size; + } else { + /* calcuate it from given nents, frst_ofst + * and last_len + */ + *info = HYPER_DMABUF_SIZE(imported->nents, + imported->frst_ofst, + imported->last_len); + } + break; + + /* whether the buffer is used or not */ + case HYPER_DMABUF_QUERY_BUSY: + /* checks if it's used by importer */ + *info = (imported->importers > 0); + break; + + /* whether the buffer is unexported */ + case HYPER_DMABUF_QUERY_UNEXPORTED: + *info = !imported->valid; + break; + + /* size of private info attached to buffer */ + case HYPER_DMABUF_QUERY_PRIV_INFO_SIZE: + *info = imported->sz_priv; + break; + + /* copy private info attached to buffer */ + case HYPER_DMABUF_QUERY_PRIV_INFO: + if (imported->sz_priv > 0) { + int n; + + n = copy_to_user((void __user *)*info, + imported->priv, + imported->sz_priv); + if (n != 0) + return -EINVAL; + } + break; + + default: + return -EINVAL; + } + + return 0; +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.h new file mode 100644 index 0000000000000..65ae738f8f534 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.h @@ -0,0 +1,10 @@ +#ifndef __HYPER_DMABUF_QUERY_H__ +#define __HYPER_DMABUF_QUERY_H__ + +int hyper_dmabuf_query_imported(struct imported_sgt_info *imported, + int query, unsigned long *info); + +int hyper_dmabuf_query_exported(struct exported_sgt_info *exported, + int query, unsigned long *info); + +#endif // __HYPER_DMABUF_QUERY_H__ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.c new file mode 100644 index 0000000000000..3cd3d6c98c337 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.c @@ -0,0 +1,327 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_struct.h" +#include "hyper_dmabuf_list.h" +#include "hyper_dmabuf_msg.h" +#include "hyper_dmabuf_id.h" +#include "hyper_dmabuf_sgl_proc.h" + +/* Whenever importer does dma operations from remote domain, + * a notification is sent to the exporter so that exporter + * issues equivalent dma operation on the original dma buf + * for indirect synchronization via shadow operations. + * + * All ptrs and references (e.g struct sg_table*, + * struct dma_buf_attachment) created via these operations on + * exporter's side are kept in stack (implemented as circular + * linked-lists) separately so that those can be re-referenced + * later when unmapping operations are invoked to free those. + * + * The very first element on the bottom of each stack holds + * is what is created when initial exporting is issued so it + * should not be modified or released by this fuction. + */ +int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int ops) +{ + struct exported_sgt_info *exported; + struct sgt_list *sgtl; + struct attachment_list *attachl; + struct kmap_vaddr_list *va_kmapl; + struct vmap_vaddr_list *va_vmapl; + int ret; + + /* find a coresponding SGT for the id */ + exported = hyper_dmabuf_find_exported(hid); + + if (!exported) { + dev_err(hy_drv_priv->dev, + "dmabuf remote sync::can't find exported list\n"); + return -ENOENT; + } + + switch (ops) { + case HYPER_DMABUF_OPS_ATTACH: + attachl = kcalloc(1, sizeof(*attachl), GFP_KERNEL); + + if (!attachl) + return -ENOMEM; + + attachl->attach = dma_buf_attach(exported->dma_buf, + hy_drv_priv->dev); + + if (!attachl->attach) { + kfree(attachl); + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_ATTACH\n"); + return -ENOMEM; + } + + list_add(&attachl->list, &exported->active_attached->list); + break; + + case HYPER_DMABUF_OPS_DETACH: + if (list_empty(&exported->active_attached->list)) { + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_DETACH\n"); + dev_err(hy_drv_priv->dev, + "no more dmabuf attachment left to be detached\n"); + return -EFAULT; + } + + attachl = list_first_entry(&exported->active_attached->list, + struct attachment_list, list); + + dma_buf_detach(exported->dma_buf, attachl->attach); + list_del(&attachl->list); + kfree(attachl); + break; + + case HYPER_DMABUF_OPS_MAP: + if (list_empty(&exported->active_attached->list)) { + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_MAP\n"); + dev_err(hy_drv_priv->dev, + "no more dmabuf attachment left to be mapped\n"); + return -EFAULT; + } + + attachl = list_first_entry(&exported->active_attached->list, + struct attachment_list, list); + + sgtl = kcalloc(1, sizeof(*sgtl), GFP_KERNEL); + + if (!sgtl) + return -ENOMEM; + + sgtl->sgt = dma_buf_map_attachment(attachl->attach, + DMA_BIDIRECTIONAL); + if (!sgtl->sgt) { + kfree(sgtl); + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_MAP\n"); + return -ENOMEM; + } + list_add(&sgtl->list, &exported->active_sgts->list); + break; + + case HYPER_DMABUF_OPS_UNMAP: + if (list_empty(&exported->active_sgts->list) || + list_empty(&exported->active_attached->list)) { + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_UNMAP\n"); + dev_err(hy_drv_priv->dev, + "no SGT or attach left to be unmapped\n"); + return -EFAULT; + } + + attachl = list_first_entry(&exported->active_attached->list, + struct attachment_list, list); + sgtl = list_first_entry(&exported->active_sgts->list, + struct sgt_list, list); + + dma_buf_unmap_attachment(attachl->attach, sgtl->sgt, + DMA_BIDIRECTIONAL); + list_del(&sgtl->list); + kfree(sgtl); + break; + + case HYPER_DMABUF_OPS_RELEASE: + dev_dbg(hy_drv_priv->dev, + "id:%d key:%d %d %d} released, ref left: %d\n", + exported->hid.id, exported->hid.rng_key[0], + exported->hid.rng_key[1], exported->hid.rng_key[2], + exported->active - 1); + + exported->active--; + + /* If there are still importers just break, if no then + * continue with final cleanup + */ + if (exported->active) + break; + + /* Importer just released buffer fd, check if there is + * any other importer still using it. + * If not and buffer was unexported, clean up shared + * data and remove that buffer. + */ + dev_dbg(hy_drv_priv->dev, + "Buffer {id:%d key:%d %d %d} final released\n", + exported->hid.id, exported->hid.rng_key[0], + exported->hid.rng_key[1], exported->hid.rng_key[2]); + + if (!exported->valid && !exported->active && + !exported->unexport_sched) { + hyper_dmabuf_cleanup_sgt_info(exported, false); + hyper_dmabuf_remove_exported(hid); + kfree(exported); + /* store hyper_dmabuf_id in the list for reuse */ + hyper_dmabuf_store_hid(hid); + } + + break; + + case HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS: + ret = dma_buf_begin_cpu_access(exported->dma_buf, + DMA_BIDIRECTIONAL); + if (ret) { + dev_err(hy_drv_priv->dev, + "HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS\n"); + return ret; + } + break; + + case HYPER_DMABUF_OPS_END_CPU_ACCESS: + ret = dma_buf_end_cpu_access(exported->dma_buf, + DMA_BIDIRECTIONAL); + if (ret) { + dev_err(hy_drv_priv->dev, + "HYPER_DMABUF_OPS_END_CPU_ACCESS\n"); + return ret; + } + break; + + case HYPER_DMABUF_OPS_KMAP_ATOMIC: + case HYPER_DMABUF_OPS_KMAP: + va_kmapl = kcalloc(1, sizeof(*va_kmapl), GFP_KERNEL); + if (!va_kmapl) + return -ENOMEM; + + /* dummy kmapping of 1 page */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0) + if (ops == HYPER_DMABUF_OPS_KMAP_ATOMIC) + va_kmapl->vaddr = dma_buf_kmap_atomic( + exported->dma_buf, 1); + else +#endif + va_kmapl->vaddr = dma_buf_kmap( + exported->dma_buf, 1); + + if (!va_kmapl->vaddr) { + kfree(va_kmapl); + dev_err(hy_drv_priv->dev, + "HYPER_DMABUF_OPS_KMAP(_ATOMIC)\n"); + return -ENOMEM; + } + list_add(&va_kmapl->list, &exported->va_kmapped->list); + break; + + case HYPER_DMABUF_OPS_KUNMAP_ATOMIC: + case HYPER_DMABUF_OPS_KUNMAP: + if (list_empty(&exported->va_kmapped->list)) { + dev_err(hy_drv_priv->dev, + "HYPER_DMABUF_OPS_KUNMAP(_ATOMIC)\n"); + dev_err(hy_drv_priv->dev, + "no more dmabuf VA to be freed\n"); + return -EFAULT; + } + + va_kmapl = list_first_entry(&exported->va_kmapped->list, + struct kmap_vaddr_list, list); + if (!va_kmapl->vaddr) { + dev_err(hy_drv_priv->dev, + "HYPER_DMABUF_OPS_KUNMAP(_ATOMIC)\n"); + return PTR_ERR(va_kmapl->vaddr); + } + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0) + /* unmapping 1 page */ + if (ops == HYPER_DMABUF_OPS_KUNMAP_ATOMIC) + dma_buf_kunmap_atomic(exported->dma_buf, + 1, va_kmapl->vaddr); + else +#endif + dma_buf_kunmap(exported->dma_buf, + 1, va_kmapl->vaddr); + + list_del(&va_kmapl->list); + kfree(va_kmapl); + break; + + case HYPER_DMABUF_OPS_MMAP: + /* currently not supported: looking for a way to create + * a dummy vma + */ + dev_warn(hy_drv_priv->dev, + "remote sync::sychronized mmap is not supported\n"); + break; + + case HYPER_DMABUF_OPS_VMAP: + va_vmapl = kcalloc(1, sizeof(*va_vmapl), GFP_KERNEL); + + if (!va_vmapl) + return -ENOMEM; + + /* dummy vmapping */ + va_vmapl->vaddr = dma_buf_vmap(exported->dma_buf); + + if (!va_vmapl->vaddr) { + kfree(va_vmapl); + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_VMAP\n"); + return -ENOMEM; + } + list_add(&va_vmapl->list, &exported->va_vmapped->list); + break; + + case HYPER_DMABUF_OPS_VUNMAP: + if (list_empty(&exported->va_vmapped->list)) { + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_VUNMAP\n"); + dev_err(hy_drv_priv->dev, + "no more dmabuf VA to be freed\n"); + return -EFAULT; + } + va_vmapl = list_first_entry(&exported->va_vmapped->list, + struct vmap_vaddr_list, list); + if (!va_vmapl || va_vmapl->vaddr == NULL) { + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_VUNMAP\n"); + return -EFAULT; + } + + dma_buf_vunmap(exported->dma_buf, va_vmapl->vaddr); + + list_del(&va_vmapl->list); + kfree(va_vmapl); + break; + + default: + /* program should not get here */ + break; + } + + return 0; +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.h new file mode 100644 index 0000000000000..366389287f4e4 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.h @@ -0,0 +1,30 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_REMOTE_SYNC_H__ +#define __HYPER_DMABUF_REMOTE_SYNC_H__ + +int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int ops); + +#endif // __HYPER_DMABUF_REMOTE_SYNC_H__ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.c new file mode 100644 index 0000000000000..c1887d1ad7097 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.c @@ -0,0 +1,261 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_struct.h" +#include "hyper_dmabuf_sgl_proc.h" + +#define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t)) + +/* return total number of pages referenced by a sgt + * for pre-calculation of # of pages behind a given sgt + */ +static int get_num_pgs(struct sg_table *sgt) +{ + struct scatterlist *sgl; + int length, i; + /* at least one page */ + int num_pages = 1; + + sgl = sgt->sgl; + + length = sgl->length - PAGE_SIZE + sgl->offset; + + /* round-up */ + num_pages += ((length + PAGE_SIZE - 1)/PAGE_SIZE); + + for (i = 1; i < sgt->nents; i++) { + sgl = sg_next(sgl); + + /* round-up */ + num_pages += ((sgl->length + PAGE_SIZE - 1) / + PAGE_SIZE); /* round-up */ + } + + return num_pages; +} + +/* extract pages directly from struct sg_table */ +struct pages_info *hyper_dmabuf_ext_pgs(struct sg_table *sgt) +{ + struct pages_info *pg_info; + int i, j, k; + int length; + struct scatterlist *sgl; + + pg_info = kmalloc(sizeof(*pg_info), GFP_KERNEL); + if (!pg_info) + return NULL; + + pg_info->pgs = kmalloc_array(get_num_pgs(sgt), + sizeof(struct page *), + GFP_KERNEL); + + if (!pg_info->pgs) { + kfree(pg_info); + return NULL; + } + + sgl = sgt->sgl; + + pg_info->nents = 1; + pg_info->frst_ofst = sgl->offset; + pg_info->pgs[0] = sg_page(sgl); + length = sgl->length - PAGE_SIZE + sgl->offset; + i = 1; + + while (length > 0) { + pg_info->pgs[i] = nth_page(sg_page(sgl), i); + length -= PAGE_SIZE; + pg_info->nents++; + i++; + } + + for (j = 1; j < sgt->nents; j++) { + sgl = sg_next(sgl); + pg_info->pgs[i++] = sg_page(sgl); + length = sgl->length - PAGE_SIZE; + pg_info->nents++; + k = 1; + + while (length > 0) { + pg_info->pgs[i++] = nth_page(sg_page(sgl), k++); + length -= PAGE_SIZE; + pg_info->nents++; + } + } + + /* + * lenght at that point will be 0 or negative, + * so to calculate last page size just add it to PAGE_SIZE + */ + pg_info->last_len = PAGE_SIZE + length; + + return pg_info; +} + +/* create sg_table with given pages and other parameters */ +struct sg_table *hyper_dmabuf_create_sgt(struct page **pgs, + int frst_ofst, int last_len, + int nents) +{ + struct sg_table *sgt; + struct scatterlist *sgl; + int i, ret; + + sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!sgt) + return NULL; + + ret = sg_alloc_table(sgt, nents, GFP_KERNEL); + if (ret) { + if (sgt) { + sg_free_table(sgt); + kfree(sgt); + } + + return NULL; + } + + sgl = sgt->sgl; + + sg_set_page(sgl, pgs[0], PAGE_SIZE-frst_ofst, frst_ofst); + + for (i = 1; i < nents-1; i++) { + sgl = sg_next(sgl); + sg_set_page(sgl, pgs[i], PAGE_SIZE, 0); + } + + if (nents > 1) /* more than one page */ { + sgl = sg_next(sgl); + sg_set_page(sgl, pgs[i], last_len, 0); + } + + return sgt; +} + +int hyper_dmabuf_cleanup_sgt_info(struct exported_sgt_info *exported, + int force) +{ + struct sgt_list *sgtl; + struct attachment_list *attachl; + struct kmap_vaddr_list *va_kmapl; + struct vmap_vaddr_list *va_vmapl; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + + if (!exported) { + dev_err(hy_drv_priv->dev, "invalid hyper_dmabuf_id\n"); + return -EINVAL; + } + + /* if force != 1, sgt_info can be released only if + * there's no activity on exported dma-buf on importer + * side. + */ + if (!force && + exported->active) { + dev_warn(hy_drv_priv->dev, + "dma-buf is used by importer\n"); + + return -EPERM; + } + + /* force == 1 is not recommended */ + while (!list_empty(&exported->va_kmapped->list)) { + va_kmapl = list_first_entry(&exported->va_kmapped->list, + struct kmap_vaddr_list, list); + + dma_buf_kunmap(exported->dma_buf, 1, va_kmapl->vaddr); + list_del(&va_kmapl->list); + kfree(va_kmapl); + } + + while (!list_empty(&exported->va_vmapped->list)) { + va_vmapl = list_first_entry(&exported->va_vmapped->list, + struct vmap_vaddr_list, list); + + dma_buf_vunmap(exported->dma_buf, va_vmapl->vaddr); + list_del(&va_vmapl->list); + kfree(va_vmapl); + } + + while (!list_empty(&exported->active_sgts->list)) { + attachl = list_first_entry(&exported->active_attached->list, + struct attachment_list, list); + + sgtl = list_first_entry(&exported->active_sgts->list, + struct sgt_list, list); + + dma_buf_unmap_attachment(attachl->attach, sgtl->sgt, + DMA_BIDIRECTIONAL); + list_del(&sgtl->list); + kfree(sgtl); + } + + while (!list_empty(&exported->active_sgts->list)) { + attachl = list_first_entry(&exported->active_attached->list, + struct attachment_list, list); + + dma_buf_detach(exported->dma_buf, attachl->attach); + list_del(&attachl->list); + kfree(attachl); + } + + /* Start cleanup of buffer in reverse order to exporting */ + bknd_ops->unshare_pages(&exported->refs_info, exported->nents); + + /* unmap dma-buf */ + dma_buf_unmap_attachment(exported->active_attached->attach, + exported->active_sgts->sgt, + DMA_BIDIRECTIONAL); + + /* detatch dma-buf */ + dma_buf_detach(exported->dma_buf, exported->active_attached->attach); + + /* close connection to dma-buf completely */ + dma_buf_put(exported->dma_buf); + exported->dma_buf = NULL; + + kfree(exported->active_sgts); + kfree(exported->active_attached); + kfree(exported->va_kmapped); + kfree(exported->va_vmapped); + kfree(exported->priv); + + exported->active_sgts = NULL; + exported->active_attached = NULL; + exported->va_kmapped = NULL; + exported->va_vmapped = NULL; + exported->priv = NULL; + + return 0; +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.h new file mode 100644 index 0000000000000..869d98204e039 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.h @@ -0,0 +1,41 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_IMP_H__ +#define __HYPER_DMABUF_IMP_H__ + +/* extract pages directly from struct sg_table */ +struct pages_info *hyper_dmabuf_ext_pgs(struct sg_table *sgt); + +/* create sg_table with given pages and other parameters */ +struct sg_table *hyper_dmabuf_create_sgt(struct page **pgs, + int frst_ofst, int last_len, + int nents); + +int hyper_dmabuf_cleanup_sgt_info(struct exported_sgt_info *exported, + int force); + +void hyper_dmabuf_free_sgt(struct sg_table *sgt); + +#endif /* __HYPER_DMABUF_IMP_H__ */ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_struct.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_struct.h new file mode 100644 index 0000000000000..f7b7de0e1432e --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_struct.h @@ -0,0 +1,141 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_STRUCT_H__ +#define __HYPER_DMABUF_STRUCT_H__ + +/* stack of mapped sgts */ +struct sgt_list { + struct sg_table *sgt; + struct list_head list; +}; + +/* stack of attachments */ +struct attachment_list { + struct dma_buf_attachment *attach; + struct list_head list; +}; + +/* stack of vaddr mapped via kmap */ +struct kmap_vaddr_list { + void *vaddr; + struct list_head list; +}; + +/* stack of vaddr mapped via vmap */ +struct vmap_vaddr_list { + void *vaddr; + struct list_head list; +}; + +/* Exporter builds pages_info before sharing pages */ +struct pages_info { + int frst_ofst; + int last_len; + int nents; + struct page **pgs; +}; + + +/* Exporter stores references to sgt in a hash table + * Exporter keeps these references for synchronization + * and tracking purposes + */ +struct exported_sgt_info { + hyper_dmabuf_id_t hid; + + /* VM ID of importer */ + int rdomid; + + struct dma_buf *dma_buf; + int nents; + + /* list for tracking activities on dma_buf */ + struct sgt_list *active_sgts; + struct attachment_list *active_attached; + struct kmap_vaddr_list *va_kmapped; + struct vmap_vaddr_list *va_vmapped; + + /* set to 0 when unexported. Importer doesn't + * do a new mapping of buffer if valid == false + */ + bool valid; + + /* active == true if the buffer is actively used + * (mapped) by importer + */ + int active; + + /* hypervisor specific reference data for shared pages */ + void *refs_info; + + struct delayed_work unexport; + bool unexport_sched; + + /* list for file pointers associated with all user space + * application that have exported this same buffer to + * another VM. This needs to be tracked to know whether + * the buffer can be completely freed. + */ + struct file *filp; + + /* size of private */ + size_t sz_priv; + + /* private data associated with the exported buffer */ + char *priv; +}; + +/* imported_sgt_info contains information about imported DMA_BUF + * this info is kept in IMPORT list and asynchorously retrieved and + * used to map DMA_BUF on importer VM's side upon export fd ioctl + * request from user-space + */ + +struct imported_sgt_info { + hyper_dmabuf_id_t hid; /* unique id for shared dmabuf imported */ + + /* hypervisor-specific handle to pages */ + unsigned long ref_handle; + + /* offset and size info of DMA_BUF */ + int frst_ofst; + int last_len; + int nents; + + struct dma_buf *dma_buf; + struct sg_table *sgt; + + void *refs_info; + bool valid; + int importers; + + /* size of private */ + size_t sz_priv; + + /* private data associated with the exported buffer */ + char *priv; +}; + +#endif /* __HYPER_DMABUF_STRUCT_H__ */ diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_be_drv.c b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_be_drv.c new file mode 100644 index 0000000000000..5e6a25138cae3 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_be_drv.c @@ -0,0 +1,547 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../hyper_dmabuf_msg.h" +#include "../hyper_dmabuf_drv.h" +#include "../hyper_dmabuf_list.h" +#include "hyper_dmabuf_virtio_common.h" +#include "hyper_dmabuf_virtio_fe_list.h" +#include "hyper_dmabuf_virtio_shm.h" +#include "hyper_dmabuf_virtio_comm_ring.h" + +/* + * Identifies which queue is used for TX and RX + * Note: it is opposite regarding to frontent definition + */ +enum virio_queue_type { + HDMA_VIRTIO_RX_QUEUE = 0, + HDMA_VIRTIO_TX_QUEUE, + HDMA_VIRTIO_QUEUE_MAX +}; + +/* Data required for sending TX messages using virtqueues*/ +struct virtio_be_tx_data { + struct iovec tx_iov; + uint16_t tx_idx; +}; + +struct virtio_be_priv { + struct virtio_dev_info dev; + struct virtio_vq_info vqs[HDMA_VIRTIO_QUEUE_MAX]; + bool busy; + struct hyper_dmabuf_req *pending_tx_req; + struct virtio_comm_ring tx_ring; + struct mutex lock; +}; + + +/* + * Received response to TX request, + * or empty buffer to be used for TX requests in future + */ +static void virtio_be_handle_tx_kick(struct virtio_vq_info *vq, + struct virtio_fe_info *fe_info) +{ + struct virtio_be_priv *priv = fe_info->priv; + /* Fill last used buffer with received buffer details */ + struct virtio_be_tx_data *tx_data = + (struct virtio_be_tx_data *) + virtio_comm_ring_pop(&priv->tx_ring); + + virtio_vq_getchain(vq, &tx_data->tx_idx, &tx_data->tx_iov, 1, NULL); + + /* Copy response if request was synchronous */ + if (priv->busy) { + memcpy(priv->pending_tx_req, + tx_data->tx_iov.iov_base, + tx_data->tx_iov.iov_len); + priv->busy = false; + } +} + +/* + * Received request from frontend + */ +static void virtio_be_handle_rx_kick(struct virtio_vq_info *vq, + struct virtio_fe_info *fe_info) +{ + struct iovec iov; + uint16_t idx; + struct hyper_dmabuf_req *req = NULL; + int len; + int ret; + + /* Make sure we will process all pending requests */ + while (virtio_vq_has_descs(vq)) { + virtio_vq_getchain(vq, &idx, &iov, 1, NULL); + + if (iov.iov_len != sizeof(struct hyper_dmabuf_req)) { + /* HACK: if int size buffer was provided, + * treat that as request to get frontend vmid */ + if (iov.iov_len == sizeof(int)) { + *((int *)iov.iov_base) = fe_info->vmid; + len = iov.iov_len; + } else { + len = 0; + dev_warn(hy_drv_priv->dev, + "received request with wrong size"); + dev_warn(hy_drv_priv->dev, + "%zu != %zu\n", + iov.iov_len, + sizeof(struct hyper_dmabuf_req)); + } + + virtio_vq_relchain(vq, idx, len); + continue; + } + + req = (struct hyper_dmabuf_req *)iov.iov_base; + + ret = hyper_dmabuf_msg_parse(1, req); + + len = iov.iov_len; + + virtio_vq_relchain(vq, idx, len); + } + virtio_vq_endchains(vq, 1); +} + +/* + * Check in what virtqueue we received buffer and process it accordingly. + */ +static void virtio_be_handle_vq_kick( + int vq_idx, struct virtio_fe_info *fe_info) +{ + struct virtio_vq_info *vq; + + vq = &fe_info->priv->vqs[vq_idx]; + + if (vq_idx == HDMA_VIRTIO_RX_QUEUE) + virtio_be_handle_rx_kick(vq, fe_info); + else + virtio_be_handle_tx_kick(vq, fe_info); +} + +/* + * Received new buffer in virtqueue + */ +static int virtio_be_handle_kick(int client_id, unsigned long *ioreqs_map, + void *client_priv) +{ + int val = -1; + struct vhm_request *req; + struct virtio_fe_info *fe_info; + int vcpu; + + fe_info = (struct virtio_fe_info *)client_priv; + if (fe_info == NULL) { + dev_warn(hy_drv_priv->dev, "Client %d not found\n", client_id); + return -EINVAL; + } + + while (1) { + vcpu = find_first_bit(ioreqs_map, fe_info->max_vcpu); + if (vcpu == fe_info->max_vcpu) + break; + req = &fe_info->req_buf[vcpu]; + if (atomic_read(&req->processed) == REQ_STATE_PROCESSING && + req->client == fe_info->client_id) { + if (req->reqs.pio_request.direction == REQUEST_READ) + req->reqs.pio_request.value = 0; + else + val = req->reqs.pio_request.value; + + acrn_ioreq_complete_request( + fe_info->client_id, vcpu, req); + } + } + + if (val >= 0) + virtio_be_handle_vq_kick(val, fe_info); + + return 0; +} + +/* + * New frontend is connecting to backend. + * Creates virtqueues for it and registers internally. + */ +static int virtio_be_register_vhm_client(struct virtio_dev_info *d) +{ + unsigned int vmid; + struct vm_info info; + struct virtio_fe_info *fe_info; + int ret; + + fe_info = kcalloc(1, sizeof(*fe_info), GFP_KERNEL); + if (fe_info == NULL) + return -ENOMEM; + + fe_info->priv = + container_of(d, struct virtio_be_priv, dev); + vmid = d->_ctx.vmid; + fe_info->vmid = vmid; + + dev_dbg(hy_drv_priv->dev, + "Virtio frontend from vm %d connected\n", vmid); + + fe_info->client_id = + acrn_ioreq_create_client(vmid, + virtio_be_handle_kick, + fe_info, + "hyper dmabuf kick"); + if (fe_info->client_id < 0) { + dev_err(hy_drv_priv->dev, + "Failed to create client of ACRN ioreq\n"); + goto err; + } + + ret = acrn_ioreq_add_iorange(fe_info->client_id, + d->io_range_type ? REQ_MMIO : REQ_PORTIO, + d->io_range_start, + d->io_range_start + d->io_range_len - 1); + + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "Failed to add iorange to acrn ioreq\n"); + goto err; + } + + ret = vhm_get_vm_info(vmid, &info); + if (ret < 0) { + acrn_ioreq_del_iorange(fe_info->client_id, + d->io_range_type ? REQ_MMIO : REQ_PORTIO, + d->io_range_start, + d->io_range_start + d->io_range_len - 1); + + dev_err(hy_drv_priv->dev, "Failed in vhm_get_vm_info\n"); + goto err; + } + + fe_info->max_vcpu = info.max_vcpu; + + fe_info->req_buf = acrn_ioreq_get_reqbuf(fe_info->client_id); + if (fe_info->req_buf == NULL) { + acrn_ioreq_del_iorange(fe_info->client_id, + d->io_range_type ? REQ_MMIO : REQ_PORTIO, + d->io_range_start, + d->io_range_start + d->io_range_len - 1); + + dev_err(hy_drv_priv->dev, "Failed in acrn_ioreq_get_reqbuf\n"); + goto err; + } + + acrn_ioreq_attach_client(fe_info->client_id); + + virtio_fe_add(fe_info); + + return 0; + +err: + acrn_ioreq_destroy_client(fe_info->client_id); + kfree(fe_info); + + return -EINVAL; +} + +/* + * DM is opening our VBS interface to create new frontend instance. + */ +static int vbs_k_open(struct inode *inode, struct file *f) +{ + struct virtio_be_priv *priv; + struct virtio_dev_info *dev; + struct virtio_vq_info *vqs; + int i; + + priv = kcalloc(1, sizeof(*priv), GFP_KERNEL); + if (priv == NULL) + return -ENOMEM; + + vqs = &priv->vqs[0]; + + dev = &priv->dev; + + for (i = 0; i < HDMA_VIRTIO_QUEUE_MAX; i++) { + vqs[i].dev = dev; + vqs[i].vq_notify = NULL; + } + dev->vqs = vqs; + + virtio_dev_init(dev, vqs, HDMA_VIRTIO_QUEUE_MAX); + + priv->pending_tx_req = + kcalloc(1, sizeof(struct hyper_dmabuf_req), GFP_KERNEL); + + virtio_comm_ring_init(&priv->tx_ring, + sizeof(struct virtio_be_tx_data), + REQ_RING_SIZE); + + mutex_init(&priv->lock); + + f->private_data = priv; + + return 0; +} + +static void cleanup_fe(struct virtio_fe_info *fe_info, void *attr) +{ + struct virtio_be_priv *priv = attr; + if (fe_info->priv == priv) { + acrn_ioreq_del_iorange(fe_info->client_id, + priv->dev.io_range_type ? REQ_MMIO : REQ_PORTIO, + priv->dev.io_range_start, + priv->dev.io_range_start + priv->dev.io_range_len - 1); + + acrn_ioreq_destroy_client(fe_info->client_id); + virtio_fe_remove(fe_info->client_id); + hyper_dmabuf_remove_imported_vmid(fe_info->vmid); + kfree(fe_info); + } +} + +static int vbs_k_release(struct inode *inode, struct file *f) +{ + struct virtio_be_priv *priv = + (struct virtio_be_priv *) f->private_data; + + kfree(priv->pending_tx_req); + virtio_comm_ring_free(&priv->tx_ring); + + /* + * Find and cleanup virtio frontend that + * has been using released vbs k file + */ + virtio_fe_foreach(cleanup_fe, priv); + + virtio_dev_reset(&priv->dev); + + kfree(priv); + return 0; +} + +static int vbs_k_reset(struct virtio_be_priv *priv) +{ + virtio_comm_ring_free(&priv->tx_ring); + + virtio_fe_foreach(cleanup_fe, priv); + + virtio_dev_reset(&priv->dev); + + virtio_comm_ring_init(&priv->tx_ring, + sizeof(struct virtio_be_tx_data), + REQ_RING_SIZE); + + return 0; +} + +static long vbs_k_ioctl(struct file *f, unsigned int ioctl, + unsigned long arg) +{ + struct virtio_be_priv *priv = + (struct virtio_be_priv *) f->private_data; + void __user *argp = (void __user *)arg; + int r = 0; + + if (priv == NULL) { + dev_err(hy_drv_priv->dev, + "No backend private data\n"); + + return -EINVAL; + } + + switch(ioctl) { + case VBS_SET_VQ: + /* Overridden to call additionally + * virtio_be_register_vhm_client */ + r = virtio_vqs_ioctl(&priv->dev, ioctl, argp); + if (r == -ENOIOCTLCMD) + return -EFAULT; + + if (virtio_be_register_vhm_client(&priv->dev) < 0) + return -EFAULT; + break; + case VBS_RESET_DEV: + vbs_k_reset(priv); + break; + default: + r = virtio_dev_ioctl(&priv->dev, ioctl, argp); + if (r == -ENOIOCTLCMD) + r = virtio_vqs_ioctl(&priv->dev, ioctl, argp); + break; + } + + return r; +} + +static const struct file_operations vbs_hyper_dmabuf_fops = { + .owner = THIS_MODULE, + .open = vbs_k_open, + .release = vbs_k_release, + .unlocked_ioctl = vbs_k_ioctl, + .llseek = noop_llseek, +}; + +static struct miscdevice vbs_hyper_dmabuf_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "vbs_hyper_dmabuf", + .fops = &vbs_hyper_dmabuf_fops, +}; + +static int virtio_be_register(void) +{ + return misc_register(&vbs_hyper_dmabuf_misc); +} + +static void virtio_be_unregister(void) +{ + misc_deregister(&vbs_hyper_dmabuf_misc); +} + +/* + * ACRN SOS will always has vmid 0 + * TODO: check if that always will be true + */ +static int virtio_be_get_vmid(void) +{ + return 0; +} + +static int virtio_be_send_req(int vmid, struct hyper_dmabuf_req *req, + int wait) +{ + int timeout = 1000; + struct virtio_fe_info *fe_info; + struct virtio_be_priv *priv; + struct virtio_be_tx_data *tx_data; + struct virtio_vq_info *vq; + int len; + + fe_info = virtio_fe_find_by_vmid(vmid); + + if (fe_info == NULL) { + dev_err(hy_drv_priv->dev, + "No frontend registered for vmid %d\n", vmid); + return -ENOENT; + } + + priv = fe_info->priv; + + mutex_lock(&priv->lock); + + /* Check if we have any free buffers for sending new request */ + while (virtio_comm_ring_full(&priv->tx_ring) && + timeout--) { + usleep_range(100, 120); + } + + if (timeout <= 0) { + mutex_unlock(&priv->lock); + dev_warn(hy_drv_priv->dev, "Requests ring full\n"); + return -EBUSY; + } + + /* Get free buffer for sending request from ring */ + tx_data = (struct virtio_be_tx_data *) + virtio_comm_ring_push(&priv->tx_ring); + + vq = &priv->vqs[HDMA_VIRTIO_TX_QUEUE]; + + if (tx_data->tx_iov.iov_len != sizeof(struct hyper_dmabuf_req)) { + dev_warn(hy_drv_priv->dev, + "received request with wrong size\n"); + virtio_vq_relchain(vq, tx_data->tx_idx, 0); + mutex_unlock(&priv->lock); + return -EINVAL; + } + + req->req_id = hyper_dmabuf_virtio_get_next_req_id(); + + /* Copy request data to virtqueue buffer */ + memcpy(tx_data->tx_iov.iov_base, req, sizeof(*req)); + len = tx_data->tx_iov.iov_len; + + /* update req_pending with current request */ + if (wait) { + priv->busy = true; + memcpy(priv->pending_tx_req, req, sizeof(*req)); + } + + virtio_vq_relchain(vq, tx_data->tx_idx, len); + + virtio_vq_endchains(vq, 1); + + if (wait) { + while (timeout--) { + if (priv->pending_tx_req->stat != + HYPER_DMABUF_REQ_NOT_RESPONDED) + break; + usleep_range(100, 120); + } + + if (timeout < 0) { + mutex_unlock(&priv->lock); + dev_err(hy_drv_priv->dev, "request timed-out\n"); + return -EBUSY; + } + } + + mutex_unlock(&priv->lock); + return 0; +}; + +struct hyper_dmabuf_bknd_ops virtio_bknd_ops = { + .init = virtio_be_register, + .cleanup = virtio_be_unregister, + .get_vm_id = virtio_be_get_vmid, + .share_pages = virtio_share_pages, + .unshare_pages = virtio_unshare_pages, + .map_shared_pages = virtio_map_shared_pages, + .unmap_shared_pages = virtio_unmap_shared_pages, + .init_comm_env = NULL, + .destroy_comm = NULL, + .init_rx_ch = NULL, + .init_tx_ch = NULL, + .send_req = virtio_be_send_req, +}; + + +MODULE_DESCRIPTION("Hyper dmabuf virtio driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_comm_ring.c b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_comm_ring.c new file mode 100644 index 0000000000000..d73bcbcc8e879 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_comm_ring.c @@ -0,0 +1,89 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Mateusz Polrola + * + */ + +#include +#include +#include "hyper_dmabuf_virtio_comm_ring.h" + +int virtio_comm_ring_init(struct virtio_comm_ring *ring, + int entry_size, + int num_entries) +{ + ring->data = kcalloc(num_entries, entry_size, GFP_KERNEL); + + if (!ring->data) + return -ENOMEM; + + ring->head = 0; + ring->tail = 0; + ring->used = 0; + ring->num_entries = num_entries; + ring->entry_size = entry_size; + + return 0; +} + +void virtio_comm_ring_free(struct virtio_comm_ring *ring) +{ + kfree(ring->data); + ring->data = NULL; +} + +bool virtio_comm_ring_full(struct virtio_comm_ring *ring) +{ + if (ring->used == ring->num_entries) + return true; + + return false; +} + +void *virtio_comm_ring_push(struct virtio_comm_ring *ring) +{ + int old_head; + + if (virtio_comm_ring_full(ring)) + return NULL; + + old_head = ring->head; + + ring->head++; + ring->head %= ring->num_entries; + ring->used++; + + return ring->data + (ring->entry_size * old_head); +} + +void *virtio_comm_ring_pop(struct virtio_comm_ring *ring) +{ + int old_tail = ring->tail; + + ring->tail++; + ring->tail %= ring->num_entries; + ring->used--; + + return ring->data + (ring->entry_size * old_tail); +} diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_comm_ring.h b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_comm_ring.h new file mode 100644 index 0000000000000..a95a63af2ba0e --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_comm_ring.h @@ -0,0 +1,68 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_VIRTIO_COMM_RING_H__ +#define __HYPER_DMABUF_VIRTIO_COMM_RING_H__ + +/* Generic ring buffer */ +struct virtio_comm_ring { + /* Buffer allocated for keeping ring entries */ + void *data; + + /* Index pointing to next free element in ring */ + int head; + + /* Index pointing to last released element in ring */ + int tail; + + /* Total number of elements that ring can contain */ + int num_entries; + + /* Size of single ring element in bytes */ + int entry_size; + + /* Number of currently used elements */ + int used; +}; + +/* Initializes given ring for keeping given a + * number of entries of specific size */ +int virtio_comm_ring_init(struct virtio_comm_ring *ring, + int entry_size, + int num_entries); + +/* Frees buffer used for storing ring entries */ +void virtio_comm_ring_free(struct virtio_comm_ring *ring); + +/* Checks if ring is full */ +bool virtio_comm_ring_full(struct virtio_comm_ring *ring); + +/* Gets next free element from ring and marks it as used + * or NULL if ring is full */ +void *virtio_comm_ring_push(struct virtio_comm_ring *ring); + +/* Pops oldest element from ring and marks it as free */ +void *virtio_comm_ring_pop(struct virtio_comm_ring *ring); + +#endif /* __HYPER_DMABUF_VIRTIO_COMM_RING_H__*/ diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_common.c b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_common.c new file mode 100644 index 0000000000000..05be74358a74e --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_common.c @@ -0,0 +1,35 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Mateusz Polrola + * + */ + +#include "hyper_dmabuf_virtio_common.h" + +int hyper_dmabuf_virtio_get_next_req_id(void) +{ + static int req_id; + + return req_id++; +} diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_common.h b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_common.h new file mode 100644 index 0000000000000..24a652ef54c0d --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_common.h @@ -0,0 +1,55 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_VIRTIO_COMMON_H__ +#define __HYPER_DMABUF_VIRTIO_COMMON_H__ + +/* + * ACRN uses physicall addresses for memory sharing, + * so size of one page ref will be 64-bits + */ + +#define REFS_PER_PAGE (PAGE_SIZE/sizeof(u64)) + +/* Defines size of requests circular buffer */ +#define REQ_RING_SIZE 128 + +extern struct hyper_dmabuf_bknd_ops virtio_bknd_ops; +struct virtio_be_priv; +struct vhm_request; + +/* Entry describing each connected frontend */ +struct virtio_fe_info { + struct virtio_be_priv *priv; + int client_id; + int vmid; + int max_vcpu; + struct vhm_request *req_buf; +}; + +extern struct hyper_dmabuf_private hyper_dmabuf_private; + +int hyper_dmabuf_virtio_get_next_req_id(void); + +#endif /* __HYPER_DMABUF_VIRTIO_COMMON_H__*/ diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_drv.c b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_drv.c new file mode 100644 index 0000000000000..e0c811135699b --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_drv.c @@ -0,0 +1,439 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "../hyper_dmabuf_msg.h" +#include "../hyper_dmabuf_drv.h" +#include "hyper_dmabuf_virtio_common.h" +#include "hyper_dmabuf_virtio_shm.h" +#include "hyper_dmabuf_virtio_comm_ring.h" + +/* + * Identifies which queue is used for TX and RX + * Note: it is opposite regarding to backend definition + */ +enum virio_queue_type { + HDMA_VIRTIO_TX_QUEUE = 0, + HDMA_VIRTIO_RX_QUEUE, + HDMA_VIRTIO_QUEUE_MAX +}; + +struct virtio_hdma_fe_priv { + struct virtqueue *vqs[HDMA_VIRTIO_QUEUE_MAX]; + struct virtio_comm_ring tx_ring; + struct virtio_comm_ring rx_ring; + int vmid; + /* + * Lock to protect operations on virtqueue + * which are not safe to run concurrently + */ + spinlock_t lock; +}; + +/* Assuming there will be one FE instance per VM */ +static struct virtio_hdma_fe_priv *hyper_dmabuf_virtio_fe; + +/* + * Received response for request. + * No need for copying request with updated result, + * as backend is processing original request data directly. + */ +static void virtio_hdma_fe_tx_done(struct virtqueue *vq) +{ + struct virtio_hdma_fe_priv *priv = + (struct virtio_hdma_fe_priv *) vq->vdev->priv; + int len; + unsigned long flags; + + if (priv == NULL) { + dev_dbg(hy_drv_priv->dev, + "No frontend private data\n"); + return; + } + + spin_lock_irqsave(&priv->lock, flags); + /* Make sure that all pending responses are processed */ + while (virtqueue_get_buf(vq, &len)) { + if (len == sizeof(struct hyper_dmabuf_req)) { + /* Mark that response was received + * and buffer can be reused */ + virtio_comm_ring_pop(&priv->tx_ring); + } + } + spin_unlock_irqrestore(&priv->lock, flags); +} + +/* + * Sends given data buffer via given virtqueue. + */ +static void virtio_hdma_fe_queue_buffer(struct virtio_hdma_fe_priv *priv, + unsigned int queue_nr, + void *buf, size_t size) +{ + struct scatterlist sg; + + if (queue_nr >= HDMA_VIRTIO_QUEUE_MAX) { + dev_dbg(hy_drv_priv->dev, + "queue_nr exceeding max queue number\n"); + return; + } + + sg_init_one(&sg, buf, size); + + virtqueue_add_inbuf(priv->vqs[queue_nr], &sg, 1, buf, GFP_KERNEL); + + virtqueue_kick(priv->vqs[queue_nr]); +} + +/* + * Handle requests coming from other VMs + */ +static void virtio_hdma_fe_handle_rx(struct virtqueue *vq) +{ + struct virtio_hdma_fe_priv *priv = + (struct virtio_hdma_fe_priv *) vq->vdev->priv; + struct hyper_dmabuf_req *rx_req; + int size, ret; + + if (priv == NULL) { + dev_dbg(hy_drv_priv->dev, + "No frontend private data\n"); + return; + } + + /* Make sure all pending requests will be processed */ + while (virtqueue_get_buf(vq, &size)) { + + /* Get next request from ring */ + rx_req = (struct hyper_dmabuf_req *) + virtio_comm_ring_pop(&priv->rx_ring); + + if (size != sizeof(struct hyper_dmabuf_req)) { + dev_dbg(hy_drv_priv->dev, + "Received malformed request\n"); + } else { + ret = hyper_dmabuf_msg_parse(1, rx_req); + } + + /* Send updated request back to virtqueue as a response.*/ + virtio_hdma_fe_queue_buffer(priv, HDMA_VIRTIO_RX_QUEUE, + rx_req, sizeof(*rx_req)); + } +} + +static int virtio_hdma_fe_probe_common(struct virtio_device *vdev) +{ + struct virtio_hdma_fe_priv *priv; + vq_callback_t *callbacks[] = {virtio_hdma_fe_tx_done, + virtio_hdma_fe_handle_rx}; + static const char *names[] = {"txqueue", "rxqueue"}; + int ret; + + priv = kzalloc(sizeof(struct virtio_hdma_fe_priv), GFP_KERNEL); + if (priv == NULL) + return -ENOMEM; + + virtio_comm_ring_init(&priv->tx_ring, + sizeof(struct hyper_dmabuf_req), + REQ_RING_SIZE); + virtio_comm_ring_init(&priv->rx_ring, + sizeof(struct hyper_dmabuf_req), + REQ_RING_SIZE); + + /* Set vmid to -1 to mark that it is not initialized yet */ + priv->vmid = -1; + + spin_lock_init(&priv->lock); + + vdev->priv = priv; + + ret = virtio_find_vqs(vdev, HDMA_VIRTIO_QUEUE_MAX, + priv->vqs, callbacks, names, NULL); + if (ret) + goto err; + + hyper_dmabuf_virtio_fe = priv; + + return 0; +err: + virtio_comm_ring_free(&priv->tx_ring); + virtio_comm_ring_free(&priv->rx_ring); + kfree(priv); + return ret; +} + +static void virtio_hdma_fe_remove_common(struct virtio_device *vdev) +{ + struct virtio_hdma_fe_priv *priv = + (struct virtio_hdma_fe_priv *) vdev->priv; + + if (priv == NULL) { + dev_err(hy_drv_priv->dev, + "No frontend private data\n"); + + return; + } + + vdev->config->reset(vdev); + vdev->config->del_vqs(vdev); + virtio_comm_ring_free(&priv->tx_ring); + virtio_comm_ring_free(&priv->rx_ring); + kfree(priv); + hyper_dmabuf_virtio_fe = NULL; +} + +static int virtio_hdma_fe_probe(struct virtio_device *vdev) +{ + return virtio_hdma_fe_probe_common(vdev); +} + +static void virtio_hdma_fe_remove(struct virtio_device *vdev) +{ + virtio_hdma_fe_remove_common(vdev); +} + +struct virtio_hdma_restore_work +{ + struct work_struct work; + struct virtio_device *dev; +}; + +/* + * Queues empty requests buffers to backend, + * which will be used by it to send requests back to frontend. + */ +static void virtio_hdma_query_vmid(struct virtio_device *vdev) +{ + struct virtio_hdma_fe_priv *priv = + (struct virtio_hdma_fe_priv *) vdev->priv; + struct hyper_dmabuf_req *rx_req; + int timeout = 1000; + + if (priv == NULL) { + dev_dbg(hy_drv_priv->dev, + "No frontend private data\n"); + + return; + } + + /* Send request to query vmid, in ACRN guest instances don't + * know their ids, but host does. Here a small hack is used, + * and buffer of int size is sent to backend, in that case + * backend will fill it with vmid of instance that sent that request + */ + virtio_hdma_fe_queue_buffer(priv, HDMA_VIRTIO_TX_QUEUE, + &priv->vmid, sizeof(priv->vmid)); + + while (timeout--) { + if (priv->vmid > 0) + break; + usleep_range(100, 120); + } + + if (timeout < 0) + dev_err(hy_drv_priv->dev, + "Cannot query vmid\n"); + + while (!virtio_comm_ring_full(&priv->rx_ring)) { + rx_req = virtio_comm_ring_push(&priv->rx_ring); + + virtio_hdma_fe_queue_buffer(priv, HDMA_VIRTIO_RX_QUEUE, + rx_req, sizeof(*rx_req)); + } +} + +/* + * Queues empty requests buffers to backend, + * which will be used by it to send requests back to frontend. + */ +static void virtio_hdma_fe_scan(struct virtio_device *vdev) +{ + virtio_hdma_query_vmid(vdev); +} + +static void virtio_hdma_restore_bh(struct work_struct *w) +{ + struct virtio_hdma_restore_work *work = + (struct virtio_hdma_restore_work *) w; + + while (!(VIRTIO_CONFIG_S_DRIVER_OK & + work->dev->config->get_status(work->dev))) { + usleep_range(100, 120); + } + + virtio_hdma_query_vmid(work->dev); + kfree(w); +} + +#ifdef CONFIG_PM_SLEEP +static int virtio_hdma_fe_freeze(struct virtio_device *vdev) +{ + virtio_hdma_fe_remove_common(vdev); + return 0; +} + +static int virtio_hdma_fe_restore(struct virtio_device *vdev) +{ + struct virtio_hdma_restore_work *work; + int ret; + + ret = virtio_hdma_fe_probe_common(vdev); + if (!ret) { + work = kmalloc(sizeof(*work), GFP_KERNEL); + INIT_WORK(&work->work, virtio_hdma_restore_bh); + work->dev = vdev; + schedule_work(&work->work); + } + + return ret; +} +#endif + + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_HYPERDMABUF, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static struct virtio_driver virtio_hdma_fe_driver = { + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = virtio_hdma_fe_probe, + .remove = virtio_hdma_fe_remove, + .scan = virtio_hdma_fe_scan, +#ifdef CONFIG_PM_SLEEP + .freeze = virtio_hdma_fe_freeze, + .restore = virtio_hdma_fe_restore, +#endif +}; + +int virtio_hdma_fe_register(void) +{ + return register_virtio_driver(&virtio_hdma_fe_driver); +} + +void virtio_hdma_fe_unregister(void) +{ + unregister_virtio_driver(&virtio_hdma_fe_driver); +} + +static int virtio_hdma_fe_get_vmid(void) +{ + struct virtio_hdma_fe_priv *priv = hyper_dmabuf_virtio_fe; + + if (hyper_dmabuf_virtio_fe == NULL) { + dev_err(hy_drv_priv->dev, + "Backend not connected\n"); + return -1; + } + + return priv->vmid; +} + +static int virtio_hdma_fe_send_req(int vmid, struct hyper_dmabuf_req *req, + int wait) +{ + struct virtio_hdma_fe_priv *priv = hyper_dmabuf_virtio_fe; + struct hyper_dmabuf_req *tx_req; + int timeout = 1000; + unsigned long flags; + + if (priv == NULL) { + dev_err(hy_drv_priv->dev, + "Backend not connected\n"); + return -ENOENT; + } + + /* Check if there are any free buffers in ring */ + while (timeout--) { + if (!virtio_comm_ring_full(&priv->tx_ring)) + break; + usleep_range(100, 120); + } + + if (timeout < 0) { + dev_err(hy_drv_priv->dev, + "Timedout while waiting for free request buffers\n"); + return -EBUSY; + } + + spin_lock_irqsave(&priv->lock, flags); + /* Get free buffer for sending request from ring */ + tx_req = (struct hyper_dmabuf_req *) + virtio_comm_ring_push(&priv->tx_ring); + req->req_id = hyper_dmabuf_virtio_get_next_req_id(); + + /* copy request to buffer that will be used in virtqueue */ + memcpy(tx_req, req, sizeof(*req)); + + virtio_hdma_fe_queue_buffer(hyper_dmabuf_virtio_fe, + HDMA_VIRTIO_TX_QUEUE, + tx_req, sizeof(*tx_req)); + spin_unlock_irqrestore(&priv->lock, flags); + + if (wait) { + while (timeout--) { + if (tx_req->stat != + HYPER_DMABUF_REQ_NOT_RESPONDED) + break; + usleep_range(100, 120); + } + + if (timeout < 0) + return -EBUSY; + } + + return 0; +} + +struct hyper_dmabuf_bknd_ops virtio_bknd_ops = { + .init = virtio_hdma_fe_register, + .cleanup = virtio_hdma_fe_unregister, + .get_vm_id = virtio_hdma_fe_get_vmid, + .share_pages = virtio_share_pages, + .unshare_pages = virtio_unshare_pages, + .map_shared_pages = virtio_map_shared_pages, + .unmap_shared_pages = virtio_unmap_shared_pages, + .send_req = virtio_hdma_fe_send_req, + .init_comm_env = NULL, + .destroy_comm = NULL, + .init_rx_ch = NULL, + .init_tx_ch = NULL, +}; + + +MODULE_DEVICE_TABLE(virtio, id_table); +MODULE_DESCRIPTION("Hyper dmabuf virtio driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_list.c b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_list.c new file mode 100644 index 0000000000000..84b6ed5e96c17 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_list.c @@ -0,0 +1,113 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include +#include "../hyper_dmabuf_drv.h" +#include "hyper_dmabuf_virtio_common.h" +#include "hyper_dmabuf_virtio_fe_list.h" + +DECLARE_HASHTABLE(virtio_fe_hash, MAX_ENTRY_FE); + +void virtio_fe_table_init(void) +{ + hash_init(virtio_fe_hash); +} + +int virtio_fe_add(struct virtio_fe_info *fe_info) +{ + struct virtio_fe_info_entry *info_entry; + + info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL); + + if (!info_entry) + return -ENOMEM; + + info_entry->info = fe_info; + + hash_add(virtio_fe_hash, &info_entry->node, + info_entry->info->client_id); + + return 0; +} + +struct virtio_fe_info *virtio_fe_find(int client_id) +{ + struct virtio_fe_info_entry *info_entry; + int bkt; + + hash_for_each(virtio_fe_hash, bkt, info_entry, node) + if (info_entry->info->client_id == client_id) + return info_entry->info; + + return NULL; +} + +struct virtio_fe_info *virtio_fe_find_by_vmid(int vmid) +{ + struct virtio_fe_info_entry *info_entry; + int bkt; + + hash_for_each(virtio_fe_hash, bkt, info_entry, node) + if (info_entry->info->vmid == vmid) + return info_entry->info; + + return NULL; +} + +int virtio_fe_remove(int client_id) +{ + struct virtio_fe_info_entry *info_entry; + int bkt; + + hash_for_each(virtio_fe_hash, bkt, info_entry, node) + if (info_entry->info->client_id == client_id) { + hash_del(&info_entry->node); + kfree(info_entry); + return 0; + } + + return -ENOENT; +} + +void virtio_fe_foreach( + void (*func)(struct virtio_fe_info *, void *attr), + void *attr) +{ + struct virtio_fe_info_entry *info_entry; + struct hlist_node *tmp; + int bkt; + + hash_for_each_safe(virtio_fe_hash, bkt, tmp, + info_entry, node) { + func(info_entry->info, attr); + } +} diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_list.h b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_list.h new file mode 100644 index 0000000000000..c353c1e5baa14 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_list.h @@ -0,0 +1,51 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_VIRTIO_FE_LIST_H__ +#define __HYPER_DMABUF_VIRTIO_FE_LIST_H__ + +/* number of bits to be used for exported dmabufs hash table */ +#define MAX_ENTRY_FE 7 + +struct virtio_fe_info; + +struct virtio_fe_info_entry { + struct virtio_fe_info *info; + struct hlist_node node; +}; + +void virtio_fe_table_init(void); + +int virtio_fe_add(struct virtio_fe_info *fe_info); + +int virtio_fe_remove(int client_id); + +struct virtio_fe_info *virtio_fe_find(int client_id); + +struct virtio_fe_info *virtio_fe_find_by_vmid(int vmid); + +void virtio_fe_foreach(void (*func)(struct virtio_fe_info *, + void *attr), void *attr); + +#endif /* __HYPER_DMABUF_VIRTIO_FE_LIST_H__*/ diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_shm.c b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_shm.c new file mode 100644 index 0000000000000..b18f7cae0115c --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_shm.c @@ -0,0 +1,343 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#ifdef CONFIG_HYPER_DMABUF_VIRTIO_BE +#include +#endif +#include "../hyper_dmabuf_drv.h" +#include "hyper_dmabuf_virtio_shm.h" +#include "hyper_dmabuf_virtio_common.h" + +#ifdef CONFIG_HYPER_DMABUF_VIRTIO_BE +struct virtio_shared_pages_info { + u64 *lvl3_table; + u64 **lvl2_table; + u64 lvl3_gref; + struct page **data_pages; + int n_lvl2_refs; + int nents_last; + int vmid; +}; +#else +struct virtio_shared_pages_info { + u64 *lvl3_table; + u64 *lvl2_table; + u64 lvl3_gref; +}; +#endif + +#ifdef CONFIG_HYPER_DMABUF_VIRTIO_BE +static long virtio_be_share_pages(struct page **pages, + int vmid, + int nents, + void **refs_info) +{ + dev_err(hy_drv_priv->dev, + "Pages sharing not available with ACRN backend in SOS\n"); + + return -EINVAL; +} + +static int virtio_be_unshare_pages(void **refs_info, + int nents) +{ + dev_err(hy_drv_priv->dev, + "Pages sharing not available with ACRN backend in SOS\n"); + + return -EINVAL; +} + +static struct page **virtio_be_map_shared_pages(unsigned long lvl3_gref, + int vmid, int nents, + void **refs_info) +{ + u64 *lvl3_table = NULL; + u64 **lvl2_table = NULL; + struct page **data_pages = NULL; + struct virtio_shared_pages_info *sh_pages_info = NULL; + void *pageaddr; + + int nents_last = (nents - 1) % REFS_PER_PAGE + 1; + int n_lvl2_refs = (nents / REFS_PER_PAGE) + ((nents_last > 0) ? 1 : 0) - + (nents_last == REFS_PER_PAGE); + int i, j, k; + + sh_pages_info = kmalloc(sizeof(*sh_pages_info), GFP_KERNEL); + if (sh_pages_info == NULL) + goto map_failed; + + *refs_info = (void *) sh_pages_info; + + data_pages = kcalloc(nents, sizeof(struct page *), GFP_KERNEL); + if (data_pages == NULL) + goto map_failed; + + lvl2_table = kcalloc(n_lvl2_refs, sizeof(u64 *), GFP_KERNEL); + if (lvl2_table == NULL) + goto map_failed; + + lvl3_table = (u64 *)map_guest_phys(vmid, lvl3_gref, PAGE_SIZE); + if (lvl3_table == NULL) + goto map_failed; + + for (i = 0; i < n_lvl2_refs; i++) { + lvl2_table[i] = (u64 *)map_guest_phys(vmid, + lvl3_table[i], + PAGE_SIZE); + if (lvl2_table[i] == NULL) + goto map_failed; + } + + k = 0; + for (i = 0; i < n_lvl2_refs - 1; i++) { + for (j = 0; j < REFS_PER_PAGE; j++) { + pageaddr = map_guest_phys(vmid, + lvl2_table[i][j], + PAGE_SIZE); + if (pageaddr == NULL) + goto map_failed; + + data_pages[k] = virt_to_page(pageaddr); + k++; + } + } + + for (j = 0; j < nents_last; j++) { + pageaddr = map_guest_phys(vmid, + lvl2_table[i][j], + PAGE_SIZE); + if (pageaddr == NULL) + goto map_failed; + + data_pages[k] = virt_to_page(pageaddr); + k++; + } + + sh_pages_info->lvl2_table = lvl2_table; + sh_pages_info->lvl3_table = lvl3_table; + sh_pages_info->lvl3_gref = lvl3_gref; + sh_pages_info->n_lvl2_refs = n_lvl2_refs; + sh_pages_info->nents_last = nents_last; + sh_pages_info->data_pages = data_pages; + sh_pages_info->vmid = vmid; + + return data_pages; + +map_failed: + dev_err(hy_drv_priv->dev, + "Cannot map guest memory\n"); + + kfree(lvl2_table); + kfree(data_pages); + kfree(sh_pages_info); + + return NULL; +} + +/* + * TODO: In theory pages don't need to be unmaped, + * as ACRN is just translating memory addresses, + * but not sure if that will work the same way in future + */ +static int virtio_be_unmap_shared_pages(void **refs_info, int nents) +{ + struct virtio_shared_pages_info *sh_pages_info; + int vmid; + int i, j; + + sh_pages_info = (struct virtio_shared_pages_info *)(*refs_info); + + if (sh_pages_info->data_pages == NULL) { + dev_warn(hy_drv_priv->dev, + "Imported pages already cleaned up"); + dev_warn(hy_drv_priv->dev, + "or buffer was not imported yet\n"); + return 0; + } + vmid = sh_pages_info->vmid; + + for (i = 0; i < sh_pages_info->n_lvl2_refs - 1; i++) { + for (j = 0; j < REFS_PER_PAGE; j++) + unmap_guest_phys(vmid, + sh_pages_info->lvl2_table[i][j]); + } + + for (j = 0; j < sh_pages_info->nents_last; j++) + unmap_guest_phys(vmid, sh_pages_info->lvl2_table[i][j]); + + for (i = 0; i < sh_pages_info->n_lvl2_refs; i++) + unmap_guest_phys(vmid, sh_pages_info->lvl3_table[i]); + + unmap_guest_phys(vmid, sh_pages_info->lvl3_gref); + + kfree(sh_pages_info->lvl2_table); + kfree(sh_pages_info->data_pages); + sh_pages_info->data_pages = NULL; + kfree(sh_pages_info); + sh_pages_info = NULL; + + return 0; +} +#else +static long virtio_fe_share_pages(struct page **pages, + int domid, int nents, + void **refs_info) +{ + struct virtio_shared_pages_info *sh_pages_info; + u64 lvl3_gref; + u64 *lvl2_table; + u64 *lvl3_table; + int i; + + /* + * Calculate number of pages needed for 2nd level addresing: + */ + int n_lvl2_grefs = (nents/REFS_PER_PAGE + + ((nents % REFS_PER_PAGE) ? 1 : 0)); + + lvl3_table = (u64 *)__get_free_pages(GFP_KERNEL, 1); + lvl2_table = (u64 *)__get_free_pages(GFP_KERNEL, n_lvl2_grefs); + + sh_pages_info = kmalloc(sizeof(*sh_pages_info), GFP_KERNEL); + + if (sh_pages_info == NULL) + return -ENOMEM; + + *refs_info = (void *)sh_pages_info; + + /* Share physical address of pages */ + for (i = 0; i < nents; i++) + lvl2_table[i] = page_to_phys(pages[i]); + + for (i = 0; i < n_lvl2_grefs; i++) + lvl3_table[i] = + virt_to_phys((void *)lvl2_table + i * PAGE_SIZE); + + lvl3_gref = virt_to_phys(lvl3_table); + + sh_pages_info->lvl3_table = lvl3_table; + sh_pages_info->lvl2_table = lvl2_table; + sh_pages_info->lvl3_gref = lvl3_gref; + + return lvl3_gref; +} + +static int virtio_fe_unshare_pages(void **refs_info, + int nents) +{ + struct virtio_shared_pages_info *sh_pages_info; + int n_lvl2_grefs = (nents/REFS_PER_PAGE + + ((nents % REFS_PER_PAGE) ? 1 : 0)); + + sh_pages_info = (struct virtio_shared_pages_info *)(*refs_info); + + if (sh_pages_info == NULL) { + dev_err(hy_drv_priv->dev, + "No pages info\n"); + return -EINVAL; + } + + free_pages((unsigned long)sh_pages_info->lvl2_table, n_lvl2_grefs); + free_pages((unsigned long)sh_pages_info->lvl3_table, 1); + + kfree(sh_pages_info); + + return 0; +} + +static struct page **virtio_fe_map_shared_pages(unsigned long lvl3_gref, + int vmid, int nents, + void **refs_info) +{ + dev_dbg(hy_drv_priv->dev, + "Virtio frontend not supporting currently page mapping\n"); + return NULL; +} + +static int virtio_fe_unmap_shared_pages(void **refs_info, int nents) +{ + dev_dbg(hy_drv_priv->dev, + "Virtio frontend not supporting currently page mapping\n"); + return -EINVAL; +} + +#endif + +long virtio_share_pages(struct page **pages, + int domid, int nents, + void **refs_info) +{ + long ret; +#ifdef CONFIG_HYPER_DMABUF_VIRTIO_BE + ret = virtio_be_share_pages(pages, domid, nents, refs_info); +#else + ret = virtio_fe_share_pages(pages, domid, nents, refs_info); +#endif + return ret; +} + +int virtio_unshare_pages(void **refs_info, int nents) +{ + int ret; +#ifdef CONFIG_HYPER_DMABUF_VIRTIO_BE + ret = virtio_be_unshare_pages(refs_info, nents); +#else + ret = virtio_fe_unshare_pages(refs_info, nents); +#endif + return ret; +} + +struct page **virtio_map_shared_pages(unsigned long lvl3_gref, + int vmid, int nents, + void **refs_info) +{ + struct page **ret; +#ifdef CONFIG_HYPER_DMABUF_VIRTIO_BE + ret = virtio_be_map_shared_pages(lvl3_gref, vmid, + nents, refs_info); +#else + ret = virtio_fe_map_shared_pages(lvl3_gref, vmid, + nents, refs_info); +#endif + return ret; +} + +int virtio_unmap_shared_pages(void **refs_info, int nents) +{ + int ret; +#ifdef CONFIG_HYPER_DMABUF_VIRTIO_BE + ret = virtio_be_unmap_shared_pages(refs_info, nents); +#else + ret = virtio_fe_unmap_shared_pages(refs_info, nents); +#endif + return ret; +} diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_shm.h b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_shm.h new file mode 100644 index 0000000000000..55f3e13ef2dfa --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_shm.h @@ -0,0 +1,40 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_VIRTIO_SHM_H__ +#define __HYPER_DMABUF_VIRTIO_SHM_H__ + +long virtio_share_pages(struct page **pages, + int domid, int nents, + void **refs_info); + +int virtio_unshare_pages(void **refs_info, int nents); + +struct page **virtio_map_shared_pages(unsigned long lvl3_gref, + int vmid, int nents, + void **refs_info); + +int virtio_unmap_shared_pages(void **refs_info, int nents); + +#endif /* __HYPER_DMABUF_VIRTIO_SHM_H__*/ diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c new file mode 100644 index 0000000000000..3dd49db66e316 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c @@ -0,0 +1,951 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "hyper_dmabuf_xen_comm.h" +#include "hyper_dmabuf_xen_comm_list.h" +#include "../hyper_dmabuf_drv.h" + +static int export_req_id; + +struct hyper_dmabuf_req req_pending = {0}; + +static void xen_get_domid_delayed(struct work_struct *unused); +static void xen_init_comm_env_delayed(struct work_struct *unused); + +static DECLARE_DELAYED_WORK(get_vm_id_work, xen_get_domid_delayed); +static DECLARE_DELAYED_WORK(xen_init_comm_env_work, xen_init_comm_env_delayed); + +/* Creates entry in xen store that will keep details of all + * exporter rings created by this domain + */ +static int xen_comm_setup_data_dir(void) +{ + char buf[255]; + + sprintf(buf, "/local/domain/%d/data/hyper_dmabuf", + hy_drv_priv->domid); + + return xenbus_mkdir(XBT_NIL, buf, ""); +} + +/* Removes entry from xenstore with exporter ring details. + * Other domains that has connected to any of exporter rings + * created by this domain, will be notified about removal of + * this entry and will treat that as signal to cleanup importer + * rings created for this domain + */ +static int xen_comm_destroy_data_dir(void) +{ + char buf[255]; + + sprintf(buf, "/local/domain/%d/data/hyper_dmabuf", + hy_drv_priv->domid); + + return xenbus_rm(XBT_NIL, buf, ""); +} + +/* Adds xenstore entries with details of exporter ring created + * for given remote domain. It requires special daemon running + * in dom0 to make sure that given remote domain will have right + * permissions to access that data. + */ +static int xen_comm_expose_ring_details(int domid, int rdomid, + int gref, int port) +{ + char buf[255]; + int ret; + + sprintf(buf, "/local/domain/%d/data/hyper_dmabuf/%d", + domid, rdomid); + + ret = xenbus_printf(XBT_NIL, buf, "grefid", "%d", gref); + + if (ret) { + dev_err(hy_drv_priv->dev, + "Failed to write xenbus entry %s: %d\n", + buf, ret); + + return ret; + } + + ret = xenbus_printf(XBT_NIL, buf, "port", "%d", port); + + if (ret) { + dev_err(hy_drv_priv->dev, + "Failed to write xenbus entry %s: %d\n", + buf, ret); + + return ret; + } + + return 0; +} + +/* + * Queries details of ring exposed by remote domain. + */ +static int xen_comm_get_ring_details(int domid, int rdomid, + int *grefid, int *port) +{ + char buf[255]; + int ret; + + sprintf(buf, "/local/domain/%d/data/hyper_dmabuf/%d", + rdomid, domid); + + ret = xenbus_scanf(XBT_NIL, buf, "grefid", "%d", grefid); + + if (ret <= 0) { + dev_err(hy_drv_priv->dev, + "Failed to read xenbus entry %s: %d\n", + buf, ret); + + return 1; + } + + ret = xenbus_scanf(XBT_NIL, buf, "port", "%d", port); + + if (ret <= 0) { + dev_err(hy_drv_priv->dev, + "Failed to read xenbus entry %s: %d\n", + buf, ret); + + return 1; + } + + return 0; +} + +static void xen_get_domid_delayed(struct work_struct *unused) +{ + struct xenbus_transaction xbt; + int domid, ret; + + /* scheduling another if driver is still running + * and xenstore has not been initialized + */ + if (likely(xenstored_ready == 0)) { + dev_dbg(hy_drv_priv->dev, + "Xenstore is not ready yet. Will retry in 500ms\n"); + schedule_delayed_work(&get_vm_id_work, msecs_to_jiffies(500)); + } else { + xenbus_transaction_start(&xbt); + + ret = xenbus_scanf(xbt, "domid", "", "%d", &domid); + + if (ret <= 0) + domid = -1; + + xenbus_transaction_end(xbt, 0); + + /* try again since -1 is an invalid id for domain + * (but only if driver is still running) + */ + if (unlikely(domid == -1)) { + dev_dbg(hy_drv_priv->dev, + "domid==-1 is invalid. Will retry it in 500ms\n"); + schedule_delayed_work(&get_vm_id_work, + msecs_to_jiffies(500)); + } else { + dev_info(hy_drv_priv->dev, + "Successfully retrieved domid from Xenstore:%d\n", + domid); + hy_drv_priv->domid = domid; + } + } +} + +int xen_be_get_domid(void) +{ + struct xenbus_transaction xbt; + int domid; + + if (unlikely(xenstored_ready == 0)) { + xen_get_domid_delayed(NULL); + return -1; + } + + xenbus_transaction_start(&xbt); + + if (!xenbus_scanf(xbt, "domid", "", "%d", &domid)) + domid = -1; + + xenbus_transaction_end(xbt, 0); + + return domid; +} + +static int xen_comm_next_req_id(void) +{ + export_req_id++; + return export_req_id; +} + +/* For now cache latast rings as global variables TODO: keep them in list*/ +static irqreturn_t front_ring_isr(int irq, void *info); +static irqreturn_t back_ring_isr(int irq, void *info); + +/* Callback function that will be called on any change of xenbus path + * being watched. Used for detecting creation/destruction of remote + * domain exporter ring. + * + * When remote domain's exporter ring will be detected, importer ring + * on this domain will be created. + * + * When remote domain's exporter ring destruction will be detected it + * will celanup this domain importer ring. + * + * Destruction can be caused by unloading module by remote domain or + * it's crash/force shutdown. + */ +static void remote_dom_exporter_watch_cb(struct xenbus_watch *watch, + const char *path, const char *token) +{ + int rdom, ret; + uint32_t grefid, port; + struct xen_comm_rx_ring_info *ring_info; + + /* Check which domain has changed its exporter rings */ + ret = sscanf(watch->node, "/local/domain/%d/", &rdom); + if (ret <= 0) + return; + + /* Check if we have importer ring for given remote domain already + * created + */ + ring_info = xen_comm_find_rx_ring(rdom); + + /* Try to query remote domain exporter ring details - if + * that will fail and we have importer ring that means remote + * domains has cleanup its exporter ring, so our importer ring + * is no longer useful. + * + * If querying details will succeed and we don't have importer ring, + * it means that remote domain has setup it for us and we should + * connect to it. + */ + + ret = xen_comm_get_ring_details(xen_be_get_domid(), + rdom, &grefid, &port); + + if (ring_info && ret != 0) { + dev_info(hy_drv_priv->dev, + "Remote exporter closed, cleaninup importer\n"); + xen_be_cleanup_rx_rbuf(rdom); + } else if (!ring_info && ret == 0) { + dev_info(hy_drv_priv->dev, + "Registering importer\n"); + xen_be_init_rx_rbuf(rdom); + } +} + +/* exporter needs to generated info for page sharing */ +int xen_be_init_tx_rbuf(int domid) +{ + struct xen_comm_tx_ring_info *ring_info; + struct xen_comm_sring *sring; + struct evtchn_alloc_unbound alloc_unbound; + struct evtchn_close close; + + void *shared_ring; + int ret; + + /* check if there's any existing tx channel in the table */ + ring_info = xen_comm_find_tx_ring(domid); + + if (ring_info) { + dev_info(hy_drv_priv->dev, + "tx ring ch to domid = %d already exist\ngref = %d, port = %d\n", + ring_info->rdomain, ring_info->gref_ring, ring_info->port); + return 0; + } + + ring_info = kmalloc(sizeof(*ring_info), GFP_KERNEL); + + if (!ring_info) + return -ENOMEM; + + /* from exporter to importer */ + shared_ring = (void *)__get_free_pages(GFP_KERNEL, 1); + if (shared_ring == 0) { + kfree(ring_info); + return -ENOMEM; + } + + sring = (struct xen_comm_sring *) shared_ring; + + SHARED_RING_INIT(sring); + + FRONT_RING_INIT(&(ring_info->ring_front), sring, PAGE_SIZE); + + ring_info->gref_ring = gnttab_grant_foreign_access(domid, + virt_to_mfn(shared_ring), + 0); + if (ring_info->gref_ring < 0) { + /* fail to get gref */ + kfree(ring_info); + return -EFAULT; + } + + alloc_unbound.dom = DOMID_SELF; + alloc_unbound.remote_dom = domid; + ret = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, + &alloc_unbound); + if (ret) { + dev_err(hy_drv_priv->dev, + "Cannot allocate event channel\n"); + kfree(ring_info); + return -EIO; + } + + /* setting up interrupt */ + ret = bind_evtchn_to_irqhandler(alloc_unbound.port, + front_ring_isr, 0, + NULL, (void *) ring_info); + + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "Failed to setup event channel\n"); + close.port = alloc_unbound.port; + HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); + gnttab_end_foreign_access(ring_info->gref_ring, 0, + virt_to_mfn(shared_ring)); + kfree(ring_info); + return -EIO; + } + + ring_info->rdomain = domid; + ring_info->irq = ret; + ring_info->port = alloc_unbound.port; + + mutex_init(&ring_info->lock); + + dev_dbg(hy_drv_priv->dev, + "%s: allocated eventchannel gref %d port: %d irq: %d\n", + __func__, + ring_info->gref_ring, + ring_info->port, + ring_info->irq); + + ret = xen_comm_add_tx_ring(ring_info); + + if (ret < 0) { + kfree(ring_info); + return -ENOMEM; + } + + ret = xen_comm_expose_ring_details(xen_be_get_domid(), + domid, + ring_info->gref_ring, + ring_info->port); + + /* Register watch for remote domain exporter ring. + * When remote domain will setup its exporter ring, + * we will automatically connect our importer ring to it. + */ + ring_info->watch.callback = remote_dom_exporter_watch_cb; + ring_info->watch.node = kmalloc(255, GFP_KERNEL); + + if (!ring_info->watch.node) { + kfree(ring_info); + return -ENOMEM; + } + + sprintf((char *)ring_info->watch.node, + "/local/domain/%d/data/hyper_dmabuf/%d/port", + domid, xen_be_get_domid()); + + register_xenbus_watch(&ring_info->watch); + + return ret; +} + +/* cleans up exporter ring created for given remote domain */ +void xen_be_cleanup_tx_rbuf(int domid) +{ + struct xen_comm_tx_ring_info *ring_info; + struct xen_comm_rx_ring_info *rx_ring_info; + + /* check if we at all have exporter ring for given rdomain */ + ring_info = xen_comm_find_tx_ring(domid); + + if (!ring_info) + return; + + xen_comm_remove_tx_ring(domid); + + unregister_xenbus_watch(&ring_info->watch); + kfree(ring_info->watch.node); + + /* No need to close communication channel, will be done by + * this function + */ + unbind_from_irqhandler(ring_info->irq, (void *) ring_info); + + /* No need to free sring page, will be freed by this function + * when other side will end its access + */ + gnttab_end_foreign_access(ring_info->gref_ring, 0, + (unsigned long) ring_info->ring_front.sring); + + kfree(ring_info); + + rx_ring_info = xen_comm_find_rx_ring(domid); + if (!rx_ring_info) + return; + + BACK_RING_INIT(&(rx_ring_info->ring_back), + rx_ring_info->ring_back.sring, + PAGE_SIZE); +} + +/* importer needs to know about shared page and port numbers for + * ring buffer and event channel + */ +int xen_be_init_rx_rbuf(int domid) +{ + struct xen_comm_rx_ring_info *ring_info; + struct xen_comm_sring *sring; + + struct page *shared_ring; + + struct gnttab_map_grant_ref *map_ops; + + int ret; + int rx_gref, rx_port; + + /* check if there's existing rx ring channel */ + ring_info = xen_comm_find_rx_ring(domid); + + if (ring_info) { + dev_info(hy_drv_priv->dev, + "rx ring ch from domid = %d already exist\n", + ring_info->sdomain); + + return 0; + } + + ret = xen_comm_get_ring_details(xen_be_get_domid(), domid, + &rx_gref, &rx_port); + + if (ret) { + dev_err(hy_drv_priv->dev, + "Domain %d has not created exporter ring for current domain\n", + domid); + + return ret; + } + + ring_info = kmalloc(sizeof(*ring_info), GFP_KERNEL); + + if (!ring_info) + return -ENOMEM; + + ring_info->sdomain = domid; + ring_info->evtchn = rx_port; + + map_ops = kmalloc(sizeof(*map_ops), GFP_KERNEL); + + if (!map_ops) { + ret = -ENOMEM; + goto fail_no_map_ops; + } + + if (gnttab_alloc_pages(1, &shared_ring)) { + ret = -ENOMEM; + goto fail_others; + } + + gnttab_set_map_op(&map_ops[0], + (unsigned long)pfn_to_kaddr( + page_to_pfn(shared_ring)), + GNTMAP_host_map, rx_gref, domid); + + gnttab_set_unmap_op(&ring_info->unmap_op, + (unsigned long)pfn_to_kaddr( + page_to_pfn(shared_ring)), + GNTMAP_host_map, -1); + + ret = gnttab_map_refs(map_ops, NULL, &shared_ring, 1); + if (ret < 0) { + dev_err(hy_drv_priv->dev, "Cannot map ring\n"); + ret = -EFAULT; + goto fail_others; + } + + if (map_ops[0].status) { + dev_err(hy_drv_priv->dev, "Ring mapping failed\n"); + ret = -EFAULT; + goto fail_others; + } else { + ring_info->unmap_op.handle = map_ops[0].handle; + } + + kfree(map_ops); + + sring = (struct xen_comm_sring *)pfn_to_kaddr(page_to_pfn(shared_ring)); + + BACK_RING_INIT(&ring_info->ring_back, sring, PAGE_SIZE); + + ret = bind_interdomain_evtchn_to_irq(domid, rx_port); + + if (ret < 0) { + ret = -EIO; + goto fail_others; + } + + ring_info->irq = ret; + + dev_dbg(hy_drv_priv->dev, + "%s: bound to eventchannel port: %d irq: %d\n", __func__, + rx_port, + ring_info->irq); + + ret = xen_comm_add_rx_ring(ring_info); + + if (ret < 0) { + ret = -ENOMEM; + goto fail_others; + } + + /* Setup communcation channel in opposite direction */ + if (!xen_comm_find_tx_ring(domid)) + ret = xen_be_init_tx_rbuf(domid); + + ret = request_irq(ring_info->irq, + back_ring_isr, 0, + NULL, (void *)ring_info); + + return ret; + +fail_others: + kfree(map_ops); + +fail_no_map_ops: + kfree(ring_info); + + return ret; +} + +/* clenas up importer ring create for given source domain */ +void xen_be_cleanup_rx_rbuf(int domid) +{ + struct xen_comm_rx_ring_info *ring_info; + struct xen_comm_tx_ring_info *tx_ring_info; + struct page *shared_ring; + + /* check if we have importer ring created for given sdomain */ + ring_info = xen_comm_find_rx_ring(domid); + + if (!ring_info) + return; + + xen_comm_remove_rx_ring(domid); + + /* no need to close event channel, will be done by that function */ + unbind_from_irqhandler(ring_info->irq, (void *)ring_info); + + /* unmapping shared ring page */ + shared_ring = virt_to_page(ring_info->ring_back.sring); + gnttab_unmap_refs(&ring_info->unmap_op, NULL, &shared_ring, 1); + gnttab_free_pages(1, &shared_ring); + + kfree(ring_info); + + tx_ring_info = xen_comm_find_tx_ring(domid); + if (!tx_ring_info) + return; + + SHARED_RING_INIT(tx_ring_info->ring_front.sring); + FRONT_RING_INIT(&(tx_ring_info->ring_front), + tx_ring_info->ring_front.sring, + PAGE_SIZE); +} + +#ifdef CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD + +static void xen_rx_ch_add_delayed(struct work_struct *unused); + +static DECLARE_DELAYED_WORK(xen_rx_ch_auto_add_work, xen_rx_ch_add_delayed); + +#define DOMID_SCAN_START 1 /* domid = 1 */ +#define DOMID_SCAN_END 10 /* domid = 10 */ + +static void xen_rx_ch_add_delayed(struct work_struct *unused) +{ + int ret; + char buf[128]; + int i, dummy; + + dev_dbg(hy_drv_priv->dev, + "Scanning new tx channel comming from another domain\n"); + + /* check other domains and schedule another work if driver + * is still running and backend is valid + */ + if (hy_drv_priv && + hy_drv_priv->initialized) { + for (i = DOMID_SCAN_START; i < DOMID_SCAN_END + 1; i++) { + if (i == hy_drv_priv->domid) + continue; + + sprintf(buf, "/local/domain/%d/data/hyper_dmabuf/%d", + i, hy_drv_priv->domid); + + ret = xenbus_scanf(XBT_NIL, buf, "port", "%d", &dummy); + + if (ret > 0) { + if (xen_comm_find_rx_ring(i) != NULL) + continue; + + ret = xen_be_init_rx_rbuf(i); + + if (!ret) + dev_info(hy_drv_priv->dev, + "Done rx ch init for VM %d\n", + i); + } + } + + /* check every 10 seconds */ + schedule_delayed_work(&xen_rx_ch_auto_add_work, + msecs_to_jiffies(10000)); + } +} + +#endif /* CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD */ + +void xen_init_comm_env_delayed(struct work_struct *unused) +{ + int ret; + + /* scheduling another work if driver is still running + * and xenstore hasn't been initialized or dom_id hasn't + * been correctly retrieved. + */ + if (likely(xenstored_ready == 0 || + hy_drv_priv->domid == -1)) { + dev_dbg(hy_drv_priv->dev, + "Xenstore not ready Will re-try in 500ms\n"); + schedule_delayed_work(&xen_init_comm_env_work, + msecs_to_jiffies(500)); + } else { + ret = xen_comm_setup_data_dir(); + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "Failed to create data dir in Xenstore\n"); + } else { + dev_info(hy_drv_priv->dev, + "Successfully finished comm env init\n"); + hy_drv_priv->initialized = true; + +#ifdef CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD + xen_rx_ch_add_delayed(NULL); +#endif /* CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD */ + } + } +} + +int xen_be_init_comm_env(void) +{ + int ret; + + xen_comm_ring_table_init(); + + if (unlikely(xenstored_ready == 0 || + hy_drv_priv->domid == -1)) { + xen_init_comm_env_delayed(NULL); + return -1; + } + + ret = xen_comm_setup_data_dir(); + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "Failed to create data dir in Xenstore\n"); + } else { + dev_info(hy_drv_priv->dev, + "Successfully finished comm env initialization\n"); + + hy_drv_priv->initialized = true; + } + + return ret; +} + +/* cleans up all tx/rx rings */ +static void xen_be_cleanup_all_rbufs(void) +{ + xen_comm_foreach_tx_ring(xen_be_cleanup_tx_rbuf); + xen_comm_foreach_rx_ring(xen_be_cleanup_rx_rbuf); +} + +void xen_be_destroy_comm(void) +{ + xen_be_cleanup_all_rbufs(); + xen_comm_destroy_data_dir(); +} + +int xen_be_send_req(int domid, struct hyper_dmabuf_req *req, + int wait) +{ + struct xen_comm_front_ring *ring; + struct hyper_dmabuf_req *new_req; + struct xen_comm_tx_ring_info *ring_info; + int notify; + + struct timeval tv_start, tv_end; + struct timeval tv_diff; + + int timeout = 1000; + + /* find a ring info for the channel */ + ring_info = xen_comm_find_tx_ring(domid); + if (!ring_info) { + dev_err(hy_drv_priv->dev, + "Can't find ring info for the channel\n"); + return -ENOENT; + } + + + ring = &ring_info->ring_front; + + do_gettimeofday(&tv_start); + + while (RING_FULL(ring)) { + dev_dbg(hy_drv_priv->dev, "RING_FULL\n"); + + if (timeout == 0) { + dev_err(hy_drv_priv->dev, + "Timeout while waiting for an entry in the ring\n"); + return -EIO; + } + usleep_range(100, 120); + timeout--; + } + + timeout = 1000; + + mutex_lock(&ring_info->lock); + + new_req = RING_GET_REQUEST(ring, ring->req_prod_pvt); + if (!new_req) { + mutex_unlock(&ring_info->lock); + dev_err(hy_drv_priv->dev, + "NULL REQUEST\n"); + return -EIO; + } + + req->req_id = xen_comm_next_req_id(); + + /* update req_pending with current request */ + memcpy(&req_pending, req, sizeof(req_pending)); + + /* pass current request to the ring */ + memcpy(new_req, req, sizeof(*new_req)); + + ring->req_prod_pvt++; + + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify); + if (notify) + notify_remote_via_irq(ring_info->irq); + + if (wait) { + while (timeout--) { + if (req_pending.stat != + HYPER_DMABUF_REQ_NOT_RESPONDED) + break; + usleep_range(100, 120); + } + + if (timeout < 0) { + mutex_unlock(&ring_info->lock); + dev_err(hy_drv_priv->dev, + "request timed-out\n"); + return -EBUSY; + } + + mutex_unlock(&ring_info->lock); + do_gettimeofday(&tv_end); + + /* checking time duration for round-trip of a request + * for debugging + */ + if (tv_end.tv_usec >= tv_start.tv_usec) { + tv_diff.tv_sec = tv_end.tv_sec-tv_start.tv_sec; + tv_diff.tv_usec = tv_end.tv_usec-tv_start.tv_usec; + } else { + tv_diff.tv_sec = tv_end.tv_sec-tv_start.tv_sec-1; + tv_diff.tv_usec = tv_end.tv_usec+1000000- + tv_start.tv_usec; + } + + if (tv_diff.tv_sec != 0 && tv_diff.tv_usec > 16000) + dev_dbg(hy_drv_priv->dev, + "send_req:time diff: %ld sec, %ld usec\n", + tv_diff.tv_sec, tv_diff.tv_usec); + } + + mutex_unlock(&ring_info->lock); + + return 0; +} + +/* ISR for handling request */ +static irqreturn_t back_ring_isr(int irq, void *info) +{ + RING_IDX rc, rp; + struct hyper_dmabuf_req req; + struct hyper_dmabuf_resp resp; + + int notify, more_to_do; + int ret; + + struct xen_comm_rx_ring_info *ring_info; + struct xen_comm_back_ring *ring; + + ring_info = (struct xen_comm_rx_ring_info *)info; + ring = &ring_info->ring_back; + + dev_dbg(hy_drv_priv->dev, "%s\n", __func__); + + do { + rc = ring->req_cons; + rp = ring->sring->req_prod; + more_to_do = 0; + while (rc != rp) { + if (RING_REQUEST_CONS_OVERFLOW(ring, rc)) + break; + + memcpy(&req, RING_GET_REQUEST(ring, rc), sizeof(req)); + ring->req_cons = ++rc; + + ret = hyper_dmabuf_msg_parse(ring_info->sdomain, &req); + + if (ret > 0) { + /* preparing a response for the request and + * send it to the requester + */ + memcpy(&resp, &req, sizeof(resp)); + memcpy(RING_GET_RESPONSE(ring, + ring->rsp_prod_pvt), + &resp, sizeof(resp)); + ring->rsp_prod_pvt++; + + dev_dbg(hy_drv_priv->dev, + "responding to exporter for req:%d\n", + resp.resp_id); + + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(ring, + notify); + + if (notify) + notify_remote_via_irq(ring_info->irq); + } + + RING_FINAL_CHECK_FOR_REQUESTS(ring, more_to_do); + } + } while (more_to_do); + + return IRQ_HANDLED; +} + +/* ISR for handling responses */ +static irqreturn_t front_ring_isr(int irq, void *info) +{ + /* front ring only care about response from back */ + struct hyper_dmabuf_resp *resp; + RING_IDX i, rp; + int more_to_do, ret; + + struct xen_comm_tx_ring_info *ring_info; + struct xen_comm_front_ring *ring; + + ring_info = (struct xen_comm_tx_ring_info *)info; + ring = &ring_info->ring_front; + + dev_dbg(hy_drv_priv->dev, "%s\n", __func__); + + do { + more_to_do = 0; + rp = ring->sring->rsp_prod; + for (i = ring->rsp_cons; i != rp; i++) { + resp = RING_GET_RESPONSE(ring, i); + + /* update pending request's status with what is + * in the response + */ + + dev_dbg(hy_drv_priv->dev, + "getting response from importer\n"); + + if (req_pending.req_id == resp->resp_id) + req_pending.stat = resp->stat; + + if (resp->stat == HYPER_DMABUF_REQ_NEEDS_FOLLOW_UP) { + /* parsing response */ + ret = hyper_dmabuf_msg_parse(ring_info->rdomain, + (struct hyper_dmabuf_req *)resp); + + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "err while parsing resp\n"); + } + } else if (resp->stat == HYPER_DMABUF_REQ_PROCESSED) { + /* for debugging dma_buf remote synch */ + dev_dbg(hy_drv_priv->dev, + "original request = 0x%x\n", resp->cmd); + dev_dbg(hy_drv_priv->dev, + "got HYPER_DMABUF_REQ_PROCESSED\n"); + } else if (resp->stat == HYPER_DMABUF_REQ_ERROR) { + /* for debugging dma_buf remote synch */ + dev_dbg(hy_drv_priv->dev, + "original request = 0x%x\n", resp->cmd); + dev_dbg(hy_drv_priv->dev, + "got HYPER_DMABUF_REQ_ERROR\n"); + } + } + + ring->rsp_cons = i; + + if (i != ring->req_prod_pvt) + RING_FINAL_CHECK_FOR_RESPONSES(ring, more_to_do); + else + ring->sring->rsp_event = i+1; + + } while (more_to_do); + + return IRQ_HANDLED; +} diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h new file mode 100644 index 0000000000000..70a2b704badd9 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h @@ -0,0 +1,78 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_XEN_COMM_H__ +#define __HYPER_DMABUF_XEN_COMM_H__ + +#include "xen/interface/io/ring.h" +#include "xen/xenbus.h" +#include "../hyper_dmabuf_msg.h" + +extern int xenstored_ready; + +DEFINE_RING_TYPES(xen_comm, struct hyper_dmabuf_req, struct hyper_dmabuf_resp); + +struct xen_comm_tx_ring_info { + struct xen_comm_front_ring ring_front; + int rdomain; + int gref_ring; + int irq; + int port; + struct mutex lock; + struct xenbus_watch watch; +}; + +struct xen_comm_rx_ring_info { + int sdomain; + int irq; + int evtchn; + struct xen_comm_back_ring ring_back; + struct gnttab_unmap_grant_ref unmap_op; +}; + +int xen_be_get_domid(void); + +int xen_be_init_comm_env(void); + +/* exporter needs to generated info for page sharing */ +int xen_be_init_tx_rbuf(int domid); + +/* importer needs to know about shared page and port numbers + * for ring buffer and event channel + */ +int xen_be_init_rx_rbuf(int domid); + +/* cleans up exporter ring created for given domain */ +void xen_be_cleanup_tx_rbuf(int domid); + +/* cleans up importer ring created for given domain */ +void xen_be_cleanup_rx_rbuf(int domid); + +void xen_be_destroy_comm(void); + +/* send request to the remote domain */ +int xen_be_send_req(int domid, struct hyper_dmabuf_req *req, + int wait); + +#endif /* __HYPER_DMABUF_XEN_COMM_H__ */ diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c new file mode 100644 index 0000000000000..15023dbc8ced8 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c @@ -0,0 +1,158 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include +#include "../hyper_dmabuf_drv.h" +#include "hyper_dmabuf_xen_comm.h" +#include "hyper_dmabuf_xen_comm_list.h" + +DECLARE_HASHTABLE(xen_comm_tx_ring_hash, MAX_ENTRY_TX_RING); +DECLARE_HASHTABLE(xen_comm_rx_ring_hash, MAX_ENTRY_RX_RING); + +void xen_comm_ring_table_init(void) +{ + hash_init(xen_comm_rx_ring_hash); + hash_init(xen_comm_tx_ring_hash); +} + +int xen_comm_add_tx_ring(struct xen_comm_tx_ring_info *ring_info) +{ + struct xen_comm_tx_ring_info_entry *info_entry; + + info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL); + + if (!info_entry) + return -ENOMEM; + + info_entry->info = ring_info; + + hash_add(xen_comm_tx_ring_hash, &info_entry->node, + info_entry->info->rdomain); + + return 0; +} + +int xen_comm_add_rx_ring(struct xen_comm_rx_ring_info *ring_info) +{ + struct xen_comm_rx_ring_info_entry *info_entry; + + info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL); + + if (!info_entry) + return -ENOMEM; + + info_entry->info = ring_info; + + hash_add(xen_comm_rx_ring_hash, &info_entry->node, + info_entry->info->sdomain); + + return 0; +} + +struct xen_comm_tx_ring_info *xen_comm_find_tx_ring(int domid) +{ + struct xen_comm_tx_ring_info_entry *info_entry; + int bkt; + + hash_for_each(xen_comm_tx_ring_hash, bkt, info_entry, node) + if (info_entry->info->rdomain == domid) + return info_entry->info; + + return NULL; +} + +struct xen_comm_rx_ring_info *xen_comm_find_rx_ring(int domid) +{ + struct xen_comm_rx_ring_info_entry *info_entry; + int bkt; + + hash_for_each(xen_comm_rx_ring_hash, bkt, info_entry, node) + if (info_entry->info->sdomain == domid) + return info_entry->info; + + return NULL; +} + +int xen_comm_remove_tx_ring(int domid) +{ + struct xen_comm_tx_ring_info_entry *info_entry; + int bkt; + + hash_for_each(xen_comm_tx_ring_hash, bkt, info_entry, node) + if (info_entry->info->rdomain == domid) { + hash_del(&info_entry->node); + kfree(info_entry); + return 0; + } + + return -ENOENT; +} + +int xen_comm_remove_rx_ring(int domid) +{ + struct xen_comm_rx_ring_info_entry *info_entry; + int bkt; + + hash_for_each(xen_comm_rx_ring_hash, bkt, info_entry, node) + if (info_entry->info->sdomain == domid) { + hash_del(&info_entry->node); + kfree(info_entry); + return 0; + } + + return -ENOENT; +} + +void xen_comm_foreach_tx_ring(void (*func)(int domid)) +{ + struct xen_comm_tx_ring_info_entry *info_entry; + struct hlist_node *tmp; + int bkt; + + hash_for_each_safe(xen_comm_tx_ring_hash, bkt, tmp, + info_entry, node) { + func(info_entry->info->rdomain); + } +} + +void xen_comm_foreach_rx_ring(void (*func)(int domid)) +{ + struct xen_comm_rx_ring_info_entry *info_entry; + struct hlist_node *tmp; + int bkt; + + hash_for_each_safe(xen_comm_rx_ring_hash, bkt, tmp, + info_entry, node) { + func(info_entry->info->sdomain); + } +} diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h new file mode 100644 index 0000000000000..8502fe7df578e --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h @@ -0,0 +1,67 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_XEN_COMM_LIST_H__ +#define __HYPER_DMABUF_XEN_COMM_LIST_H__ + +/* number of bits to be used for exported dmabufs hash table */ +#define MAX_ENTRY_TX_RING 7 +/* number of bits to be used for imported dmabufs hash table */ +#define MAX_ENTRY_RX_RING 7 + +struct xen_comm_tx_ring_info_entry { + struct xen_comm_tx_ring_info *info; + struct hlist_node node; +}; + +struct xen_comm_rx_ring_info_entry { + struct xen_comm_rx_ring_info *info; + struct hlist_node node; +}; + +void xen_comm_ring_table_init(void); + +int xen_comm_add_tx_ring(struct xen_comm_tx_ring_info *ring_info); + +int xen_comm_add_rx_ring(struct xen_comm_rx_ring_info *ring_info); + +int xen_comm_remove_tx_ring(int domid); + +int xen_comm_remove_rx_ring(int domid); + +struct xen_comm_tx_ring_info *xen_comm_find_tx_ring(int domid); + +struct xen_comm_rx_ring_info *xen_comm_find_rx_ring(int domid); + +/* iterates over all exporter rings and calls provided + * function for each of them + */ +void xen_comm_foreach_tx_ring(void (*func)(int domid)); + +/* iterates over all importer rings and calls provided + * function for each of them + */ +void xen_comm_foreach_rx_ring(void (*func)(int domid)); + +#endif // __HYPER_DMABUF_XEN_COMM_LIST_H__ diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c new file mode 100644 index 0000000000000..14ed3bc51e6aa --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c @@ -0,0 +1,46 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include "../hyper_dmabuf_drv.h" +#include "hyper_dmabuf_xen_comm.h" +#include "hyper_dmabuf_xen_shm.h" + +struct hyper_dmabuf_bknd_ops xen_bknd_ops = { + .init = NULL, /* not needed for xen */ + .cleanup = NULL, /* not needed for xen */ + .get_vm_id = xen_be_get_domid, + .share_pages = xen_be_share_pages, + .unshare_pages = xen_be_unshare_pages, + .map_shared_pages = (void *)xen_be_map_shared_pages, + .unmap_shared_pages = xen_be_unmap_shared_pages, + .init_comm_env = xen_be_init_comm_env, + .destroy_comm = xen_be_destroy_comm, + .init_rx_ch = xen_be_init_rx_rbuf, + .init_tx_ch = xen_be_init_tx_rbuf, + .send_req = xen_be_send_req, +}; diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h new file mode 100644 index 0000000000000..a4902b747a87c --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h @@ -0,0 +1,53 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_XEN_DRV_H__ +#define __HYPER_DMABUF_XEN_DRV_H__ +#include + +extern struct hyper_dmabuf_bknd_ops xen_bknd_ops; + +/* Main purpose of this structure is to keep + * all references created or acquired for sharing + * pages with another domain for freeing those later + * when unsharing. + */ +struct xen_shared_pages_info { + /* top level refid */ + grant_ref_t lvl3_gref; + + /* page of top level addressing, it contains refids of 2nd lvl pages */ + grant_ref_t *lvl3_table; + + /* table of 2nd level pages, that contains refids to data pages */ + grant_ref_t *lvl2_table; + + /* unmap ops for mapped pages */ + struct gnttab_unmap_grant_ref *unmap_ops; + + /* data pages to be unmapped */ + struct page **data_pages; +}; + +#endif // __HYPER_DMABUF_XEN_COMM_H__ diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c new file mode 100644 index 0000000000000..5889485125e00 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c @@ -0,0 +1,525 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include "hyper_dmabuf_xen_drv.h" +#include "../hyper_dmabuf_drv.h" + +#define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t)) + +/* + * Creates 2 level page directory structure for referencing shared pages. + * Top level page is a single page that contains up to 1024 refids that + * point to 2nd level pages. + * + * Each 2nd level page contains up to 1024 refids that point to shared + * data pages. + * + * There will always be one top level page and number of 2nd level pages + * depends on number of shared data pages. + * + * 3rd level page 2nd level pages Data pages + * +-------------------------+ ┌>+--------------------+ ┌>+------------+ + * |2nd level page 0 refid |---┘ |Data page 0 refid |-┘ |Data page 0 | + * |2nd level page 1 refid |---┐ |Data page 1 refid |-┐ +------------+ + * | ... | | | .... | | + * |2nd level page 1023 refid|-┐ | |Data page 1023 refid| └>+------------+ + * +-------------------------+ | | +--------------------+ |Data page 1 | + * | | +------------+ + * | └>+--------------------+ + * | |Data page 1024 refid| + * | |Data page 1025 refid| + * | | ... | + * | |Data page 2047 refid| + * | +--------------------+ + * | + * | ..... + * └-->+-----------------------+ + * |Data page 1047552 refid| + * |Data page 1047553 refid| + * | ... | + * |Data page 1048575 refid| + * +-----------------------+ + * + * Using such 2 level structure it is possible to reference up to 4GB of + * shared data using single refid pointing to top level page. + * + * Returns refid of top level page. + */ +long xen_be_share_pages(struct page **pages, int domid, int nents, + void **refs_info) +{ + grant_ref_t lvl3_gref; + grant_ref_t *lvl2_table; + grant_ref_t *lvl3_table; + + /* + * Calculate number of pages needed for 2nd level addresing: + */ + int n_lvl2_grefs = (nents/REFS_PER_PAGE + + ((nents % REFS_PER_PAGE) ? 1 : 0)); + + struct xen_shared_pages_info *sh_pages_info; + int i; + + lvl3_table = (grant_ref_t *)__get_free_pages(GFP_KERNEL, 1); + lvl2_table = (grant_ref_t *)__get_free_pages(GFP_KERNEL, n_lvl2_grefs); + + sh_pages_info = kmalloc(sizeof(*sh_pages_info), GFP_KERNEL); + + if (!sh_pages_info) + return -ENOMEM; + + *refs_info = (void *)sh_pages_info; + + /* share data pages in readonly mode for security */ + for (i = 0; i < nents; i++) { + lvl2_table[i] = gnttab_grant_foreign_access(domid, + pfn_to_mfn(page_to_pfn(pages[i])), + true /* read only */); + if (lvl2_table[i] == -ENOSPC) { + dev_err(hy_drv_priv->dev, + "No more space left in grant table\n"); + + /* Unshare all already shared pages for lvl2 */ + while (i--) { + gnttab_end_foreign_access_ref(lvl2_table[i], 0); + gnttab_free_grant_reference(lvl2_table[i]); + } + goto err_cleanup; + } + } + + /* Share 2nd level addressing pages in readonly mode*/ + for (i = 0; i < n_lvl2_grefs; i++) { + lvl3_table[i] = gnttab_grant_foreign_access(domid, + virt_to_mfn( + (unsigned long)lvl2_table+i*PAGE_SIZE), + true); + + if (lvl3_table[i] == -ENOSPC) { + dev_err(hy_drv_priv->dev, + "No more space left in grant table\n"); + + /* Unshare all already shared pages for lvl3 */ + while (i--) { + gnttab_end_foreign_access_ref(lvl3_table[i], 1); + gnttab_free_grant_reference(lvl3_table[i]); + } + + /* Unshare all pages for lvl2 */ + while (nents--) { + gnttab_end_foreign_access_ref( + lvl2_table[nents], 0); + gnttab_free_grant_reference(lvl2_table[nents]); + } + + goto err_cleanup; + } + } + + /* Share lvl3_table in readonly mode*/ + lvl3_gref = gnttab_grant_foreign_access(domid, + virt_to_mfn((unsigned long)lvl3_table), + true); + + if (lvl3_gref == -ENOSPC) { + dev_err(hy_drv_priv->dev, + "No more space left in grant table\n"); + + /* Unshare all pages for lvl3 */ + while (i--) { + gnttab_end_foreign_access_ref(lvl3_table[i], 1); + gnttab_free_grant_reference(lvl3_table[i]); + } + + /* Unshare all pages for lvl2 */ + while (nents--) { + gnttab_end_foreign_access_ref(lvl2_table[nents], 0); + gnttab_free_grant_reference(lvl2_table[nents]); + } + + goto err_cleanup; + } + + /* Store lvl3_table page to be freed later */ + sh_pages_info->lvl3_table = lvl3_table; + + /* Store lvl2_table pages to be freed later */ + sh_pages_info->lvl2_table = lvl2_table; + + + /* Store exported pages refid to be unshared later */ + sh_pages_info->lvl3_gref = lvl3_gref; + + dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__); + return lvl3_gref; + +err_cleanup: + free_pages((unsigned long)lvl2_table, n_lvl2_grefs); + free_pages((unsigned long)lvl3_table, 1); + + return -ENOSPC; +} + +int xen_be_unshare_pages(void **refs_info, int nents) +{ + struct xen_shared_pages_info *sh_pages_info; + int n_lvl2_grefs = (nents/REFS_PER_PAGE + + ((nents % REFS_PER_PAGE) ? 1 : 0)); + int i; + + dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__); + sh_pages_info = (struct xen_shared_pages_info *)(*refs_info); + + if (sh_pages_info->lvl3_table == NULL || + sh_pages_info->lvl2_table == NULL || + sh_pages_info->lvl3_gref == -1) { + dev_warn(hy_drv_priv->dev, + "gref table for hyper_dmabuf already cleaned up\n"); + return 0; + } + + /* End foreign access for data pages, but do not free them */ + for (i = 0; i < nents; i++) { + if (gnttab_query_foreign_access(sh_pages_info->lvl2_table[i])) + dev_warn(hy_drv_priv->dev, "refid not shared !!\n"); + + gnttab_end_foreign_access_ref(sh_pages_info->lvl2_table[i], 0); + gnttab_free_grant_reference(sh_pages_info->lvl2_table[i]); + } + + /* End foreign access for 2nd level addressing pages */ + for (i = 0; i < n_lvl2_grefs; i++) { + if (gnttab_query_foreign_access(sh_pages_info->lvl3_table[i])) + dev_warn(hy_drv_priv->dev, "refid not shared !!\n"); + + if (!gnttab_end_foreign_access_ref( + sh_pages_info->lvl3_table[i], 1)) + dev_warn(hy_drv_priv->dev, "refid still in use!!!\n"); + + gnttab_free_grant_reference(sh_pages_info->lvl3_table[i]); + } + + /* End foreign access for top level addressing page */ + if (gnttab_query_foreign_access(sh_pages_info->lvl3_gref)) + dev_warn(hy_drv_priv->dev, "gref not shared !!\n"); + + gnttab_end_foreign_access_ref(sh_pages_info->lvl3_gref, 1); + gnttab_free_grant_reference(sh_pages_info->lvl3_gref); + + /* freeing all pages used for 2 level addressing */ + free_pages((unsigned long)sh_pages_info->lvl2_table, n_lvl2_grefs); + free_pages((unsigned long)sh_pages_info->lvl3_table, 1); + + sh_pages_info->lvl3_gref = -1; + sh_pages_info->lvl2_table = NULL; + sh_pages_info->lvl3_table = NULL; + kfree(sh_pages_info); + sh_pages_info = NULL; + + dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__); + return 0; +} + +/* Maps provided top level ref id and then return array of pages + * containing data refs. + */ +struct page **xen_be_map_shared_pages(unsigned long lvl3_gref, int domid, + int nents, void **refs_info) +{ + struct page *lvl3_table_page; + struct page **lvl2_table_pages; + struct page **data_pages; + struct xen_shared_pages_info *sh_pages_info; + + grant_ref_t *lvl3_table; + grant_ref_t *lvl2_table; + + struct gnttab_map_grant_ref lvl3_map_ops; + struct gnttab_unmap_grant_ref lvl3_unmap_ops; + + struct gnttab_map_grant_ref *lvl2_map_ops; + struct gnttab_unmap_grant_ref *lvl2_unmap_ops; + + struct gnttab_map_grant_ref *data_map_ops; + struct gnttab_unmap_grant_ref *data_unmap_ops; + + /* # of grefs in the last page of lvl2 table */ + int nents_last = (nents - 1) % REFS_PER_PAGE + 1; + int n_lvl2_grefs = (nents / REFS_PER_PAGE) + + ((nents_last > 0) ? 1 : 0) - + (nents_last == REFS_PER_PAGE); + int i, j, k; + + dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__); + + sh_pages_info = kmalloc(sizeof(*sh_pages_info), GFP_KERNEL); + *refs_info = (void *) sh_pages_info; + + lvl2_table_pages = kcalloc(n_lvl2_grefs, sizeof(struct page *), + GFP_KERNEL); + + data_pages = kcalloc(nents, sizeof(struct page *), GFP_KERNEL); + + lvl2_map_ops = kcalloc(n_lvl2_grefs, sizeof(*lvl2_map_ops), + GFP_KERNEL); + + lvl2_unmap_ops = kcalloc(n_lvl2_grefs, sizeof(*lvl2_unmap_ops), + GFP_KERNEL); + + data_map_ops = kcalloc(nents, sizeof(*data_map_ops), GFP_KERNEL); + data_unmap_ops = kcalloc(nents, sizeof(*data_unmap_ops), GFP_KERNEL); + + /* Map top level addressing page */ + if (gnttab_alloc_pages(1, &lvl3_table_page)) { + dev_err(hy_drv_priv->dev, "Cannot allocate pages\n"); + return NULL; + } + + lvl3_table = (grant_ref_t *)pfn_to_kaddr(page_to_pfn(lvl3_table_page)); + + gnttab_set_map_op(&lvl3_map_ops, (unsigned long)lvl3_table, + GNTMAP_host_map | GNTMAP_readonly, + (grant_ref_t)lvl3_gref, domid); + + gnttab_set_unmap_op(&lvl3_unmap_ops, (unsigned long)lvl3_table, + GNTMAP_host_map | GNTMAP_readonly, -1); + + if (gnttab_map_refs(&lvl3_map_ops, NULL, &lvl3_table_page, 1)) { + dev_err(hy_drv_priv->dev, + "HYPERVISOR map grant ref failed"); + return NULL; + } + + if (lvl3_map_ops.status) { + dev_err(hy_drv_priv->dev, + "HYPERVISOR map grant ref failed status = %d", + lvl3_map_ops.status); + + goto error_cleanup_lvl3; + } else { + lvl3_unmap_ops.handle = lvl3_map_ops.handle; + } + + /* Map all second level pages */ + if (gnttab_alloc_pages(n_lvl2_grefs, lvl2_table_pages)) { + dev_err(hy_drv_priv->dev, "Cannot allocate pages\n"); + goto error_cleanup_lvl3; + } + + for (i = 0; i < n_lvl2_grefs; i++) { + lvl2_table = (grant_ref_t *)pfn_to_kaddr( + page_to_pfn(lvl2_table_pages[i])); + gnttab_set_map_op(&lvl2_map_ops[i], + (unsigned long)lvl2_table, GNTMAP_host_map | + GNTMAP_readonly, + lvl3_table[i], domid); + gnttab_set_unmap_op(&lvl2_unmap_ops[i], + (unsigned long)lvl2_table, GNTMAP_host_map | + GNTMAP_readonly, -1); + } + + /* Unmap top level page, as it won't be needed any longer */ + if (gnttab_unmap_refs(&lvl3_unmap_ops, NULL, + &lvl3_table_page, 1)) { + dev_err(hy_drv_priv->dev, + "xen: cannot unmap top level page\n"); + return NULL; + } + + /* Mark that page was unmapped */ + lvl3_unmap_ops.handle = -1; + + if (gnttab_map_refs(lvl2_map_ops, NULL, + lvl2_table_pages, n_lvl2_grefs)) { + dev_err(hy_drv_priv->dev, + "HYPERVISOR map grant ref failed"); + return NULL; + } + + /* Checks if pages were mapped correctly */ + for (i = 0; i < n_lvl2_grefs; i++) { + if (lvl2_map_ops[i].status) { + dev_err(hy_drv_priv->dev, + "HYPERVISOR map grant ref failed status = %d", + lvl2_map_ops[i].status); + goto error_cleanup_lvl2; + } else { + lvl2_unmap_ops[i].handle = lvl2_map_ops[i].handle; + } + } + + if (gnttab_alloc_pages(nents, data_pages)) { + dev_err(hy_drv_priv->dev, + "Cannot allocate pages\n"); + goto error_cleanup_lvl2; + } + + k = 0; + + for (i = 0; i < n_lvl2_grefs - 1; i++) { + lvl2_table = pfn_to_kaddr(page_to_pfn(lvl2_table_pages[i])); + for (j = 0; j < REFS_PER_PAGE; j++) { + gnttab_set_map_op(&data_map_ops[k], + (unsigned long)pfn_to_kaddr( + page_to_pfn(data_pages[k])), + GNTMAP_host_map | GNTMAP_readonly, + lvl2_table[j], domid); + + gnttab_set_unmap_op(&data_unmap_ops[k], + (unsigned long)pfn_to_kaddr( + page_to_pfn(data_pages[k])), + GNTMAP_host_map | GNTMAP_readonly, -1); + k++; + } + } + + /* for grefs in the last lvl2 table page */ + lvl2_table = pfn_to_kaddr(page_to_pfn( + lvl2_table_pages[n_lvl2_grefs - 1])); + + for (j = 0; j < nents_last; j++) { + gnttab_set_map_op(&data_map_ops[k], + (unsigned long)pfn_to_kaddr(page_to_pfn(data_pages[k])), + GNTMAP_host_map | GNTMAP_readonly, + lvl2_table[j], domid); + + gnttab_set_unmap_op(&data_unmap_ops[k], + (unsigned long)pfn_to_kaddr(page_to_pfn(data_pages[k])), + GNTMAP_host_map | GNTMAP_readonly, -1); + k++; + } + + if (gnttab_map_refs(data_map_ops, NULL, + data_pages, nents)) { + dev_err(hy_drv_priv->dev, + "HYPERVISOR map grant ref failed\n"); + return NULL; + } + + /* unmapping lvl2 table pages */ + if (gnttab_unmap_refs(lvl2_unmap_ops, + NULL, lvl2_table_pages, + n_lvl2_grefs)) { + dev_err(hy_drv_priv->dev, + "Cannot unmap 2nd level refs\n"); + return NULL; + } + + /* Mark that pages were unmapped */ + for (i = 0; i < n_lvl2_grefs; i++) + lvl2_unmap_ops[i].handle = -1; + + for (i = 0; i < nents; i++) { + if (data_map_ops[i].status) { + dev_err(hy_drv_priv->dev, + "HYPERVISOR map grant ref failed status = %d\n", + data_map_ops[i].status); + goto error_cleanup_data; + } else { + data_unmap_ops[i].handle = data_map_ops[i].handle; + } + } + + /* store these references for unmapping in the future */ + sh_pages_info->unmap_ops = data_unmap_ops; + sh_pages_info->data_pages = data_pages; + + gnttab_free_pages(1, &lvl3_table_page); + gnttab_free_pages(n_lvl2_grefs, lvl2_table_pages); + kfree(lvl2_table_pages); + kfree(lvl2_map_ops); + kfree(lvl2_unmap_ops); + kfree(data_map_ops); + + dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__); + return data_pages; + +error_cleanup_data: + gnttab_unmap_refs(data_unmap_ops, NULL, data_pages, + nents); + + gnttab_free_pages(nents, data_pages); + +error_cleanup_lvl2: + if (lvl2_unmap_ops[0].handle != -1) + gnttab_unmap_refs(lvl2_unmap_ops, NULL, + lvl2_table_pages, n_lvl2_grefs); + gnttab_free_pages(n_lvl2_grefs, lvl2_table_pages); + +error_cleanup_lvl3: + if (lvl3_unmap_ops.handle != -1) + gnttab_unmap_refs(&lvl3_unmap_ops, NULL, + &lvl3_table_page, 1); + gnttab_free_pages(1, &lvl3_table_page); + + kfree(lvl2_table_pages); + kfree(lvl2_map_ops); + kfree(lvl2_unmap_ops); + kfree(data_map_ops); + + + return NULL; +} + +int xen_be_unmap_shared_pages(void **refs_info, int nents) +{ + struct xen_shared_pages_info *sh_pages_info; + + dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__); + + sh_pages_info = (struct xen_shared_pages_info *)(*refs_info); + + if (sh_pages_info->unmap_ops == NULL || + sh_pages_info->data_pages == NULL) { + dev_warn(hy_drv_priv->dev, + "pages already cleaned up or buffer not imported yet\n"); + return 0; + } + + if (gnttab_unmap_refs(sh_pages_info->unmap_ops, NULL, + sh_pages_info->data_pages, nents)) { + dev_err(hy_drv_priv->dev, "Cannot unmap data pages\n"); + return -EFAULT; + } + + gnttab_free_pages(nents, sh_pages_info->data_pages); + + kfree(sh_pages_info->data_pages); + kfree(sh_pages_info->unmap_ops); + sh_pages_info->unmap_ops = NULL; + sh_pages_info->data_pages = NULL; + kfree(sh_pages_info); + sh_pages_info = NULL; + + dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__); + return 0; +} diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h new file mode 100644 index 0000000000000..f23deb394a009 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h @@ -0,0 +1,46 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_XEN_SHM_H__ +#define __HYPER_DMABUF_XEN_SHM_H__ + +/* This collects all reference numbers for 2nd level shared pages and + * create a table with those in 1st level shared pages then return reference + * numbers for this top level table. + */ +long xen_be_share_pages(struct page **pages, int domid, int nents, + void **refs_info); + +int xen_be_unshare_pages(void **refs_info, int nents); + +/* Maps provided top level ref id and then return array of pages containing + * data refs. + */ +struct page **xen_be_map_shared_pages(unsigned long lvl3_gref, int domid, + int nents, + void **refs_info); + +int xen_be_unmap_shared_pages(void **refs_info, int nents); + +#endif /* __HYPER_DMABUF_XEN_SHM_H__ */ diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 75f38d19fcbed..dbc51154f1229 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -1641,6 +1641,12 @@ static void atc_free_chan_resources(struct dma_chan *chan) atchan->descs_allocated = 0; atchan->status = 0; + /* + * Free atslave allocated in at_dma_xlate() + */ + kfree(chan->private); + chan->private = NULL; + dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); } @@ -1675,7 +1681,7 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); - atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL); + atslave = kzalloc(sizeof(*atslave), GFP_KERNEL); if (!atslave) return NULL; @@ -2000,6 +2006,8 @@ static int at_dma_remove(struct platform_device *pdev) struct resource *io; at_dma_off(atdma); + if (pdev->dev.of_node) + of_dma_controller_free(pdev->dev.of_node); dma_async_device_unregister(&atdma->dma_common); dma_pool_destroy(atdma->memset_pool); diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index 85820a2d69d48..987899610b461 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -761,6 +761,11 @@ static int jz4780_dma_probe(struct platform_device *pdev) struct resource *res; int i, ret; + if (!dev->of_node) { + dev_err(dev, "This driver must be probed from devicetree\n"); + return -EINVAL; + } + jzdma = devm_kzalloc(dev, sizeof(*jzdma), GFP_KERNEL); if (!jzdma) return -ENOMEM; diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index f43e6dafe446d..0f389e008ce64 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -1064,12 +1064,12 @@ static void dwc_issue_pending(struct dma_chan *chan) /* * Program FIFO size of channels. * - * By default full FIFO (1024 bytes) is assigned to channel 0. Here we + * By default full FIFO (512 bytes) is assigned to channel 0. Here we * slice FIFO on equal parts between channels. */ static void idma32_fifo_partition(struct dw_dma *dw) { - u64 value = IDMA32C_FP_PSIZE_CH0(128) | IDMA32C_FP_PSIZE_CH1(128) | + u64 value = IDMA32C_FP_PSIZE_CH0(64) | IDMA32C_FP_PSIZE_CH1(64) | IDMA32C_FP_UPDATE; u64 fifo_partition = 0; @@ -1082,7 +1082,7 @@ static void idma32_fifo_partition(struct dw_dma *dw) /* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */ fifo_partition |= value << 32; - /* Program FIFO Partition registers - 128 bytes for each channel */ + /* Program FIFO Partition registers - 64 bytes per channel */ idma32_writeq(dw, FIFO_PARTITION1, fifo_partition); idma32_writeq(dw, FIFO_PARTITION0, fifo_partition); } diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index b4ec2d20e6616..cb1b44d78a1f2 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include @@ -33,6 +32,7 @@ #include #include #include +#include #include #include @@ -376,7 +376,7 @@ struct sdma_channel { u32 shp_addr, per_addr; enum dma_status status; struct imx_dma_data data; - struct dma_pool *bd_pool; + struct work_struct terminate_worker; }; #define IMX_DMA_SG_LOOP BIT(0) @@ -1027,31 +1027,49 @@ static int sdma_disable_channel(struct dma_chan *chan) return 0; } - -static int sdma_disable_channel_with_delay(struct dma_chan *chan) +static void sdma_channel_terminate_work(struct work_struct *work) { - struct sdma_channel *sdmac = to_sdma_chan(chan); + struct sdma_channel *sdmac = container_of(work, struct sdma_channel, + terminate_worker); unsigned long flags; LIST_HEAD(head); - sdma_disable_channel(chan); - spin_lock_irqsave(&sdmac->vc.lock, flags); - vchan_get_all_descriptors(&sdmac->vc, &head); - sdmac->desc = NULL; - spin_unlock_irqrestore(&sdmac->vc.lock, flags); - vchan_dma_desc_free_list(&sdmac->vc, &head); - /* * According to NXP R&D team a delay of one BD SDMA cost time * (maximum is 1ms) should be added after disable of the channel * bit, to ensure SDMA core has really been stopped after SDMA * clients call .device_terminate_all. */ - mdelay(1); + usleep_range(1000, 2000); + + spin_lock_irqsave(&sdmac->vc.lock, flags); + vchan_get_all_descriptors(&sdmac->vc, &head); + sdmac->desc = NULL; + spin_unlock_irqrestore(&sdmac->vc.lock, flags); + vchan_dma_desc_free_list(&sdmac->vc, &head); +} + +static int sdma_disable_channel_async(struct dma_chan *chan) +{ + struct sdma_channel *sdmac = to_sdma_chan(chan); + + sdma_disable_channel(chan); + + if (sdmac->desc) + schedule_work(&sdmac->terminate_worker); return 0; } +static void sdma_channel_synchronize(struct dma_chan *chan) +{ + struct sdma_channel *sdmac = to_sdma_chan(chan); + + vchan_synchronize(&sdmac->vc); + + flush_work(&sdmac->terminate_worker); +} + static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac) { struct sdma_engine *sdma = sdmac->sdma; @@ -1192,10 +1210,11 @@ static int sdma_request_channel0(struct sdma_engine *sdma) static int sdma_alloc_bd(struct sdma_desc *desc) { + u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); int ret = 0; - desc->bd = dma_pool_alloc(desc->sdmac->bd_pool, GFP_NOWAIT, - &desc->bd_phys); + desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys, + GFP_NOWAIT); if (!desc->bd) { ret = -ENOMEM; goto out; @@ -1206,7 +1225,9 @@ static int sdma_alloc_bd(struct sdma_desc *desc) static void sdma_free_bd(struct sdma_desc *desc) { - dma_pool_free(desc->sdmac->bd_pool, desc->bd, desc->bd_phys); + u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); + + dma_free_coherent(NULL, bd_size, desc->bd, desc->bd_phys); } static void sdma_desc_free(struct virt_dma_desc *vd) @@ -1272,10 +1293,6 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan) if (ret) goto disable_clk_ahb; - sdmac->bd_pool = dma_pool_create("bd_pool", chan->device->dev, - sizeof(struct sdma_buffer_descriptor), - 32, 0); - return 0; disable_clk_ahb: @@ -1290,7 +1307,9 @@ static void sdma_free_chan_resources(struct dma_chan *chan) struct sdma_channel *sdmac = to_sdma_chan(chan); struct sdma_engine *sdma = sdmac->sdma; - sdma_disable_channel_with_delay(chan); + sdma_disable_channel_async(chan); + + sdma_channel_synchronize(chan); if (sdmac->event_id0) sdma_event_disable(sdmac, sdmac->event_id0); @@ -1304,9 +1323,6 @@ static void sdma_free_chan_resources(struct dma_chan *chan) clk_disable(sdma->clk_ipg); clk_disable(sdma->clk_ahb); - - dma_pool_destroy(sdmac->bd_pool); - sdmac->bd_pool = NULL; } static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac, @@ -1999,6 +2015,8 @@ static int sdma_probe(struct platform_device *pdev) sdmac->channel = i; sdmac->vc.desc_free = sdma_desc_free; + INIT_WORK(&sdmac->terminate_worker, + sdma_channel_terminate_work); /* * Add the channel to the DMAC list. Do not add channel 0 though * because we need it internally in the SDMA driver. This also means @@ -2050,7 +2068,8 @@ static int sdma_probe(struct platform_device *pdev) sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; sdma->dma_device.device_config = sdma_config; - sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay; + sdma->dma_device.device_terminate_all = sdma_disable_channel_async; + sdma->dma_device.device_synchronize = sdma_channel_synchronize; sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS; sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS; sdma->dma_device.directions = SDMA_DMA_DIRECTIONS; diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 4fa4c06c9edb9..21a5708985bc2 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -1205,8 +1205,15 @@ static void ioat_shutdown(struct pci_dev *pdev) spin_lock_bh(&ioat_chan->prep_lock); set_bit(IOAT_CHAN_DOWN, &ioat_chan->state); - del_timer_sync(&ioat_chan->timer); spin_unlock_bh(&ioat_chan->prep_lock); + /* + * Synchronization rule for del_timer_sync(): + * - The caller must not hold locks which would prevent + * completion of the timer's handler. + * So prep_lock cannot be held before calling it. + */ + del_timer_sync(&ioat_chan->timer); + /* this should quiesce then reset */ ioat_reset_hw(ioat_chan); } diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 4cf0d4d0cecfb..25610286979f6 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -4360,7 +4360,7 @@ static ssize_t enable_store(struct device_driver *dev, const char *buf, } static DRIVER_ATTR_RW(enable); -static ssize_t poly_store(struct device_driver *dev, char *buf) +static ssize_t poly_show(struct device_driver *dev, char *buf) { ssize_t size = 0; u32 reg; diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c index 1497da3677109..e507ec36c0d3d 100644 --- a/drivers/dma/ti/cppi41.c +++ b/drivers/dma/ti/cppi41.c @@ -723,8 +723,22 @@ static int cppi41_stop_chan(struct dma_chan *chan) desc_phys = lower_32_bits(c->desc_phys); desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc); - if (!cdd->chan_busy[desc_num]) + if (!cdd->chan_busy[desc_num]) { + struct cppi41_channel *cc, *_ct; + + /* + * channels might still be in the pendling list if + * cppi41_dma_issue_pending() is called after + * cppi41_runtime_suspend() is called + */ + list_for_each_entry_safe(cc, _ct, &cdd->pending, node) { + if (cc != c) + continue; + list_del(&cc->node); + break; + } return 0; + } ret = cppi41_tear_down_chan(c); if (ret) diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 18aeabb1d5ee4..e2addb2bca296 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -2200,6 +2200,15 @@ static struct amd64_family_type family_types[] = { .dbam_to_cs = f17_base_addr_to_cs_size, } }, + [F17_M10H_CPUS] = { + .ctl_name = "F17h_M10h", + .f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0, + .f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6, + .ops = { + .early_channel_count = f17_early_channel_count, + .dbam_to_cs = f17_base_addr_to_cs_size, + } + }, }; /* @@ -3188,6 +3197,11 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt) break; case 0x17: + if (pvt->model >= 0x10 && pvt->model <= 0x2f) { + fam_type = &family_types[F17_M10H_CPUS]; + pvt->ops = &family_types[F17_M10H_CPUS].ops; + break; + } fam_type = &family_types[F17_CPUS]; pvt->ops = &family_types[F17_CPUS].ops; break; diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 1d4b74e9a037f..4242f8e39c18f 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h @@ -115,6 +115,8 @@ #define PCI_DEVICE_ID_AMD_16H_M30H_NB_F2 0x1582 #define PCI_DEVICE_ID_AMD_17H_DF_F0 0x1460 #define PCI_DEVICE_ID_AMD_17H_DF_F6 0x1466 +#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F0 0x15e8 +#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F6 0x15ee /* * Function 1 - Address Map @@ -281,6 +283,7 @@ enum amd_families { F16_CPUS, F16_M30H_CPUS, F17_CPUS, + F17_M10H_CPUS, NUM_FAMILIES, }; diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index 8e120bf606243..f1d19504a0281 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c @@ -1711,6 +1711,7 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci, u32 errnum = find_first_bit(&error, 32); if (uncorrected_error) { + core_err_cnt = 1; if (ripv) tp_event = HW_EVENT_ERR_FATAL; else diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index 07726fb00321e..72cea3cb86224 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -2888,6 +2888,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, recoverable = GET_BITFIELD(m->status, 56, 56); if (uncorrected_error) { + core_err_cnt = 1; if (ripv) { type = "FATAL"; tp_event = HW_EVENT_ERR_FATAL; diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c index fae095162c017..4ba92f1dd0f74 100644 --- a/drivers/edac/skx_edac.c +++ b/drivers/edac/skx_edac.c @@ -668,7 +668,7 @@ static bool skx_sad_decode(struct decoded_addr *res) break; case 2: lchan = (addr >> shift) % 2; - lchan = (lchan << 1) | ~lchan; + lchan = (lchan << 1) | !lchan; break; case 3: lchan = ((addr >> shift) % 2) << 1; @@ -959,6 +959,7 @@ static void skx_mce_output_error(struct mem_ctl_info *mci, recoverable = GET_BITFIELD(m->status, 56, 56); if (uncorrected_error) { + core_err_cnt = 1; if (ripv) { type = "FATAL"; tp_event = HW_EVENT_ERR_FATAL; diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c index 388a929baf95d..1a6a77df8a5e8 100644 --- a/drivers/firmware/efi/arm-init.c +++ b/drivers/firmware/efi/arm-init.c @@ -265,6 +265,10 @@ void __init efi_init(void) (params.mmap & ~PAGE_MASK))); init_screen_info(); + + /* ARM does not permit early mappings to persist across paging_init() */ + if (IS_ENABLED(CONFIG_ARM)) + efi_memmap_unmap(); } static int __init register_gop_device(void) diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c index 922cfb813109a..a00934d263c51 100644 --- a/drivers/firmware/efi/arm-runtime.c +++ b/drivers/firmware/efi/arm-runtime.c @@ -110,7 +110,7 @@ static int __init arm_enable_runtime_services(void) { u64 mapsize; - if (!efi_enabled(EFI_BOOT) || !efi_enabled(EFI_MEMMAP)) { + if (!efi_enabled(EFI_BOOT)) { pr_info("EFI services will not be available.\n"); return 0; } diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index 14c40a7750d1d..d9845099635e0 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -9,14 +9,18 @@ cflags-$(CONFIG_X86_32) := -march=i386 cflags-$(CONFIG_X86_64) := -mcmodel=small cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \ -fPIC -fno-strict-aliasing -mno-red-zone \ - -mno-mmx -mno-sse -fshort-wchar + -mno-mmx -mno-sse -fshort-wchar \ + -Wno-pointer-sign \ + $(call cc-disable-warning, address-of-packed-member) \ + $(call cc-disable-warning, gnu) # arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly # disable the stackleak plugin cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie \ $(DISABLE_STACKLEAK_PLUGIN) cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \ - -fno-builtin -fpic -mno-single-pic-base + -fno-builtin -fpic \ + $(call cc-option,-mno-single-pic-base) cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c index 8830fa601e45d..0c0d2312f4a8a 100644 --- a/drivers/firmware/efi/libstub/fdt.c +++ b/drivers/firmware/efi/libstub/fdt.c @@ -158,6 +158,10 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, return efi_status; } } + + /* shrink the FDT back to its minimum size */ + fdt_pack(fdt); + return EFI_SUCCESS; fdt_set_fail: diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c index 5fc70520e04c4..1907db2b38d81 100644 --- a/drivers/firmware/efi/memmap.c +++ b/drivers/firmware/efi/memmap.c @@ -118,6 +118,9 @@ int __init efi_memmap_init_early(struct efi_memory_map_data *data) void __init efi_memmap_unmap(void) { + if (!efi_enabled(EFI_MEMMAP)) + return; + if (!efi.memmap.late) { unsigned long size; diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c index 19db5709ae288..898bb9abc41f1 100644 --- a/drivers/firmware/google/coreboot_table.c +++ b/drivers/firmware/google/coreboot_table.c @@ -110,7 +110,8 @@ int coreboot_table_init(struct device *dev, void __iomem *ptr) if (strncmp(header.signature, "LBIO", sizeof(header.signature))) { pr_warn("coreboot_table: coreboot table missing or corrupt!\n"); - return -ENODEV; + ret = -ENODEV; + goto out; } ptr_entry = (void *)ptr_header + header.header_bytes; @@ -137,7 +138,8 @@ int coreboot_table_init(struct device *dev, void __iomem *ptr) ptr_entry += entry.size; } - +out: + iounmap(ptr); return ret; } EXPORT_SYMBOL(coreboot_table_init); @@ -146,7 +148,6 @@ int coreboot_table_exit(void) { if (ptr_header) { bus_unregister(&coreboot_bus_type); - iounmap(ptr_header); ptr_header = NULL; } diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c index 7fa793672a7a9..68e4b2b98c8ff 100644 --- a/drivers/fpga/altera-cvp.c +++ b/drivers/fpga/altera-cvp.c @@ -468,14 +468,6 @@ static int altera_cvp_probe(struct pci_dev *pdev, goto err_unmap; } - ret = driver_create_file(&altera_cvp_driver.driver, - &driver_attr_chkcfg); - if (ret) { - dev_err(&pdev->dev, "Can't create sysfs chkcfg file\n"); - fpga_mgr_unregister(mgr); - goto err_unmap; - } - return 0; err_unmap: @@ -493,7 +485,6 @@ static void altera_cvp_remove(struct pci_dev *pdev) struct altera_cvp_conf *conf = mgr->priv; u16 cmd; - driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg); fpga_mgr_unregister(mgr); pci_iounmap(pdev, conf->map); pci_release_region(pdev, CVP_BAR); @@ -502,7 +493,30 @@ static void altera_cvp_remove(struct pci_dev *pdev) pci_write_config_word(pdev, PCI_COMMAND, cmd); } -module_pci_driver(altera_cvp_driver); +static int __init altera_cvp_init(void) +{ + int ret; + + ret = pci_register_driver(&altera_cvp_driver); + if (ret) + return ret; + + ret = driver_create_file(&altera_cvp_driver.driver, + &driver_attr_chkcfg); + if (ret) + pr_warn("Can't create sysfs chkcfg file\n"); + + return 0; +} + +static void __exit altera_cvp_exit(void) +{ + driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg); + pci_unregister_driver(&altera_cvp_driver); +} + +module_init(altera_cvp_init); +module_exit(altera_cvp_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Anatolij Gustschin "); diff --git a/drivers/fsi/Kconfig b/drivers/fsi/Kconfig index af3a20dd5aa4a..99c99a5d57fe2 100644 --- a/drivers/fsi/Kconfig +++ b/drivers/fsi/Kconfig @@ -46,6 +46,7 @@ config FSI_MASTER_AST_CF tristate "FSI master based on Aspeed ColdFire coprocessor" depends on GPIOLIB depends on GPIO_ASPEED + select GENERIC_ALLOCATOR ---help--- This option enables a FSI master using the AST2400 and AST2500 GPIO lines driven by the internal ColdFire coprocessor. This requires diff --git a/drivers/gnss/serial.c b/drivers/gnss/serial.c index b01ba4438501a..31e891f00175c 100644 --- a/drivers/gnss/serial.c +++ b/drivers/gnss/serial.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -63,7 +64,7 @@ static int gnss_serial_write_raw(struct gnss_device *gdev, int ret; /* write is only buffered synchronously */ - ret = serdev_device_write(serdev, buf, count, 0); + ret = serdev_device_write(serdev, buf, count, MAX_SCHEDULE_TIMEOUT); if (ret < 0) return ret; diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c index 79cb98950013b..2c22836d3ffd5 100644 --- a/drivers/gnss/sirf.c +++ b/drivers/gnss/sirf.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -83,7 +84,7 @@ static int sirf_write_raw(struct gnss_device *gdev, const unsigned char *buf, int ret; /* write is only buffered synchronously */ - ret = serdev_device_write(serdev, buf, count, 0); + ret = serdev_device_write(serdev, buf, count, MAX_SCHEDULE_TIMEOUT); if (ret < 0) return ret; @@ -167,7 +168,7 @@ static int sirf_set_active(struct sirf_data *data, bool active) else timeout = SIRF_HIBERNATE_TIMEOUT; - while (retries-- > 0) { + do { sirf_pulse_on_off(data); ret = sirf_wait_for_power_state(data, active, timeout); if (ret < 0) { @@ -178,9 +179,9 @@ static int sirf_set_active(struct sirf_data *data, bool active) } break; - } + } while (retries--); - if (retries == 0) + if (retries < 0) return -ETIMEDOUT; return 0; diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 4f52c3a8ec99b..6039b7bdbf8ea 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -1322,6 +1322,16 @@ config GPIO_SODAVILLE help Say Y here to support Intel Sodaville GPIO. +config GPIO_VIRTIO + tristate "GPIO virtio-based front-end driver" + depends on X86 && PCI && VIRTIO_PCI + help + This module implements GPIO emulation over virtio transport. + + Say Y or M here to build GPIO virtio front-end driver. + + If the driver is built as a module, it will be called gpio-virtio. + endmenu menu "SPI GPIO expanders" diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index c256aff66a656..5e6e98a2ce975 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -159,3 +159,4 @@ obj-$(CONFIG_GPIO_ZEVIO) += gpio-zevio.o obj-$(CONFIG_GPIO_ZYNQ) += gpio-zynq.o obj-$(CONFIG_GPIO_ZX) += gpio-zx.o obj-$(CONFIG_GPIO_LOONGSON1) += gpio-loongson1.o +obj-$(CONFIG_GPIO_VIRTIO) += gpio-virtio.o diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c index 16c7f9f494164..af936dcca6596 100644 --- a/drivers/gpio/gpio-brcmstb.c +++ b/drivers/gpio/gpio-brcmstb.c @@ -664,6 +664,18 @@ static int brcmstb_gpio_probe(struct platform_device *pdev) struct brcmstb_gpio_bank *bank; struct gpio_chip *gc; + /* + * If bank_width is 0, then there is an empty bank in the + * register block. Special handling for this case. + */ + if (bank_width == 0) { + dev_dbg(dev, "Width 0 found: Empty bank @ %d\n", + num_banks); + num_banks++; + gpio_base += MAX_GPIO_PER_BANK; + continue; + } + bank = devm_kzalloc(dev, sizeof(*bank), GFP_KERNEL); if (!bank) { err = -ENOMEM; @@ -740,9 +752,6 @@ static int brcmstb_gpio_probe(struct platform_device *pdev) goto fail; } - dev_info(dev, "Registered %d banks (GPIO(s): %d-%d)\n", - num_banks, priv->gpio_base, gpio_base - 1); - if (priv->parent_wake_irq && need_wakeup_event) pm_wakeup_event(dev, 0); diff --git a/drivers/gpio/gpio-max7301.c b/drivers/gpio/gpio-max7301.c index 05813fbf3daf2..647dfbbc4e1cf 100644 --- a/drivers/gpio/gpio-max7301.c +++ b/drivers/gpio/gpio-max7301.c @@ -25,7 +25,7 @@ static int max7301_spi_write(struct device *dev, unsigned int reg, struct spi_device *spi = to_spi_device(dev); u16 word = ((reg & 0x7F) << 8) | (val & 0xFF); - return spi_write(spi, (const u8 *)&word, sizeof(word)); + return spi_write_then_read(spi, &word, sizeof(word), NULL, 0); } /* A read from the MAX7301 means two transfers; here, one message each */ @@ -37,14 +37,8 @@ static int max7301_spi_read(struct device *dev, unsigned int reg) struct spi_device *spi = to_spi_device(dev); word = 0x8000 | (reg << 8); - ret = spi_write(spi, (const u8 *)&word, sizeof(word)); - if (ret) - return ret; - /* - * This relies on the fact, that a transfer with NULL tx_buf shifts out - * zero bytes (=NOOP for MAX7301) - */ - ret = spi_read(spi, (u8 *)&word, sizeof(word)); + ret = spi_write_then_read(spi, &word, sizeof(word), &word, + sizeof(word)); if (ret) return ret; return word & 0xff; diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c index d66b7a768ecd2..945bd13e5e791 100644 --- a/drivers/gpio/gpio-mockup.c +++ b/drivers/gpio/gpio-mockup.c @@ -32,8 +32,8 @@ #define gpio_mockup_err(...) pr_err(GPIO_MOCKUP_NAME ": " __VA_ARGS__) enum { - GPIO_MOCKUP_DIR_OUT = 0, - GPIO_MOCKUP_DIR_IN = 1, + GPIO_MOCKUP_DIR_IN = 0, + GPIO_MOCKUP_DIR_OUT = 1, }; /* @@ -135,7 +135,7 @@ static int gpio_mockup_get_direction(struct gpio_chip *gc, unsigned int offset) { struct gpio_mockup_chip *chip = gpiochip_get_data(gc); - return chip->lines[offset].dir; + return !chip->lines[offset].dir; } static int gpio_mockup_to_irq(struct gpio_chip *gc, unsigned int offset) diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index 6e02148c208b2..adc768f908f1a 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -773,9 +773,6 @@ static int mvebu_pwm_probe(struct platform_device *pdev, "marvell,armada-370-gpio")) return 0; - if (IS_ERR(mvchip->clk)) - return PTR_ERR(mvchip->clk); - /* * There are only two sets of PWM configuration registers for * all the GPIO lines on those SoCs which this driver reserves @@ -786,6 +783,9 @@ static int mvebu_pwm_probe(struct platform_device *pdev, if (!res) return 0; + if (IS_ERR(mvchip->clk)) + return PTR_ERR(mvchip->clk); + /* * Use set A for lines of GPIO chip with id 0, B for GPIO chip * with id 1. Don't allow further GPIO chips to be used for PWM. diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c index df30490da820d..ea874fd033a5e 100644 --- a/drivers/gpio/gpio-mxs.c +++ b/drivers/gpio/gpio-mxs.c @@ -18,8 +18,6 @@ #include #include #include -/* FIXME: for gpio_get_value(), replace this by direct register read */ -#include #include #define MXS_SET 0x4 @@ -86,7 +84,7 @@ static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type) port->both_edges &= ~pin_mask; switch (type) { case IRQ_TYPE_EDGE_BOTH: - val = gpio_get_value(port->gc.base + d->hwirq); + val = port->gc.get(&port->gc, d->hwirq); if (val) edge = GPIO_INT_FALL_EDGE; else diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c index 2afd9de84a0d0..dc42571e6fdc8 100644 --- a/drivers/gpio/gpio-pl061.c +++ b/drivers/gpio/gpio-pl061.c @@ -54,6 +54,7 @@ struct pl061 { void __iomem *base; struct gpio_chip gc; + struct irq_chip irq_chip; int parent_irq; #ifdef CONFIG_PM @@ -281,15 +282,6 @@ static int pl061_irq_set_wake(struct irq_data *d, unsigned int state) return irq_set_irq_wake(pl061->parent_irq, state); } -static struct irq_chip pl061_irqchip = { - .name = "pl061", - .irq_ack = pl061_irq_ack, - .irq_mask = pl061_irq_mask, - .irq_unmask = pl061_irq_unmask, - .irq_set_type = pl061_irq_type, - .irq_set_wake = pl061_irq_set_wake, -}; - static int pl061_probe(struct amba_device *adev, const struct amba_id *id) { struct device *dev = &adev->dev; @@ -328,6 +320,13 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id) /* * irq_chip support */ + pl061->irq_chip.name = dev_name(dev); + pl061->irq_chip.irq_ack = pl061_irq_ack; + pl061->irq_chip.irq_mask = pl061_irq_mask; + pl061->irq_chip.irq_unmask = pl061_irq_unmask; + pl061->irq_chip.irq_set_type = pl061_irq_type; + pl061->irq_chip.irq_set_wake = pl061_irq_set_wake; + writeb(0, pl061->base + GPIOIE); /* disable irqs */ irq = adev->irq[0]; if (irq < 0) { @@ -336,14 +335,14 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id) } pl061->parent_irq = irq; - ret = gpiochip_irqchip_add(&pl061->gc, &pl061_irqchip, + ret = gpiochip_irqchip_add(&pl061->gc, &pl061->irq_chip, 0, handle_bad_irq, IRQ_TYPE_NONE); if (ret) { dev_info(&adev->dev, "could not add irqchip\n"); return ret; } - gpiochip_set_chained_irqchip(&pl061->gc, &pl061_irqchip, + gpiochip_set_chained_irqchip(&pl061->gc, &pl061->irq_chip, irq, pl061_irq_handler); amba_set_drvdata(adev, pl061); diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index c18712dabf93d..9f3f166f17608 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c @@ -268,8 +268,8 @@ static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset) if (pxa_gpio_has_pinctrl()) { ret = pinctrl_gpio_direction_input(chip->base + offset); - if (!ret) - return 0; + if (ret) + return ret; } spin_lock_irqsave(&gpio_lock, flags); diff --git a/drivers/gpio/gpio-virtio.c b/drivers/gpio/gpio-virtio.c new file mode 100644 index 0000000000000..4cb45737e6398 --- /dev/null +++ b/drivers/gpio/gpio-virtio.c @@ -0,0 +1,401 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Virtio GPIO Front End Driver + * + * Copyright (c) 2019 Intel Corporation. All rights reserved. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef VIRTIO_ID_GPIO +#define VIRTIO_ID_GPIO 0xFFF7 +#endif + +#define GPIO_VIRTIO_F_CHIP 0 +#define GPIO_VIRTIO_TIMEOUT 500 + +enum gpio_virtio_request_command { + GPIO_REQ_SET_VALUE = 0, + GPIO_REQ_GET_VALUE = 1, + GPIO_REQ_INPUT_DIRECTION = 2, + GPIO_REQ_OUTPUT_DIRECTION = 3, + GPIO_REQ_GET_DIRECTION = 4, + GPIO_REQ_SET_CONFIG = 5, + + GPIO_REQ_MAX +}; + +struct gpio_virtio_request { + uint8_t cmd; + uint8_t offset; + uint64_t data; +} __packed; + +struct gpio_virtio_response { + int8_t err; + uint8_t data; +} __packed; + +struct gpio_virtio_info { + struct gpio_virtio_request req; + struct gpio_virtio_response rsp; +} __packed; + +struct gpio_virtio_config { + uint16_t base; /* base number */ + uint16_t ngpio; /* number of gpios */ +} __packed; + +struct gpio_virtio_data { + char name[32]; +} __packed; + +struct gpio_virtio { + struct device *dev; + struct virtio_device *vdev; + struct virtqueue *gpio_vq; + struct gpio_chip chip; + struct gpio_virtio_data *data; + const char **names; + struct mutex gpio_lock; +}; + +static unsigned int features[] = {GPIO_VIRTIO_F_CHIP}; + +static int gpio_virtio_update(struct gpio_virtio *vgpio, + unsigned int cmd, unsigned int offset, unsigned long data) +{ + struct gpio_virtio_info *info; + struct scatterlist out, in, *sgs[2]; + unsigned long timeout; + int rc = 0; + int len; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + rc = -ENOMEM; + goto out; + } + + info->req.cmd = cmd; + info->req.offset = offset; + if (cmd == GPIO_REQ_SET_VALUE || cmd == GPIO_REQ_OUTPUT_DIRECTION + || cmd == GPIO_REQ_SET_CONFIG) + info->req.data = data; + + sg_init_one(&out, &info->req, sizeof(info->req)); + sg_init_one(&in, &info->rsp, sizeof(info->rsp)); + sgs[0] = &out; + sgs[1] = ∈ + mutex_lock(&vgpio->gpio_lock); + rc = virtqueue_add_sgs(vgpio->gpio_vq, sgs, 1, 1, info, GFP_KERNEL); + if (rc) { + mutex_unlock(&vgpio->gpio_lock); + goto out; + } + virtqueue_kick(vgpio->gpio_vq); + + /* polling for the virtqueue */ + rc = -1; + timeout = jiffies + msecs_to_jiffies(GPIO_VIRTIO_TIMEOUT); + do { + if (virtqueue_get_buf(vgpio->gpio_vq, &len)) { + if (len == sizeof(info->rsp) && !info->rsp.err) + rc = 0; + else + rc = -EINVAL; + break; + } + + /* polling interval is 5 - 10 ms */ + usleep_range(5000, 10000); + } while (time_before(jiffies, timeout)); + if (rc == 0) { + if (info->req.cmd == GPIO_REQ_GET_DIRECTION || + info->req.cmd == GPIO_REQ_GET_VALUE) + rc = info->rsp.data; + } else if (rc == -EINVAL) { + dev_err(&vgpio->vdev->dev, "gpio response error %d, len %d\n", + info->rsp.err, len); + } else { + /* Try to get buf */ + if (virtqueue_get_buf(vgpio->gpio_vq, &len)) { + if (len == sizeof(info->rsp) && !info->rsp.err) + rc = 0; + else + dev_err(&vgpio->vdev->dev, + "gpio response error %d, len %d\n", + info->rsp.err, len); + } else { + dev_err(&vgpio->vdev->dev, + "gpio repsonse timeout %d ms\n", + GPIO_VIRTIO_TIMEOUT); + } + } + mutex_unlock(&vgpio->gpio_lock); +out: + kfree(info); + return rc; +} + +static int gpio_virtio_get_direction(struct gpio_chip *chip, + unsigned int offset) +{ + return gpio_virtio_update(gpiochip_get_data(chip), + GPIO_REQ_GET_DIRECTION, offset, 0); +} + +static int gpio_virtio_direction_input(struct gpio_chip *chip, + unsigned int offset) +{ + return gpio_virtio_update(gpiochip_get_data(chip), + GPIO_REQ_INPUT_DIRECTION, offset, 0); +} + +static int gpio_virtio_direction_output(struct gpio_chip *chip, + unsigned int offset, int value) +{ + return gpio_virtio_update(gpiochip_get_data(chip), + GPIO_REQ_OUTPUT_DIRECTION, offset, value); +} + +static int gpio_virtio_get(struct gpio_chip *chip, + unsigned int offset) +{ + return gpio_virtio_update(gpiochip_get_data(chip), + GPIO_REQ_GET_VALUE, offset, 0); +} + +static void gpio_virtio_set(struct gpio_chip *chip, unsigned int offset, + int value) +{ + gpio_virtio_update(gpiochip_get_data(chip), + GPIO_REQ_SET_VALUE, offset, value); +} + +static int gpio_virtio_set_config(struct gpio_chip *chip, unsigned int offset, + unsigned long config) +{ + return gpio_virtio_update(gpiochip_get_data(chip), + GPIO_REQ_SET_CONFIG, offset, config); +} + +static const struct gpio_chip gpio_virtio_chip = { + .owner = THIS_MODULE, + .label = "gpio_virtio", + .get_direction = gpio_virtio_get_direction, + .direction_input = gpio_virtio_direction_input, + .direction_output = gpio_virtio_direction_output, + .get = gpio_virtio_get, + .set = gpio_virtio_set, + .set_config = gpio_virtio_set_config, +}; + +static int gpio_virtio_register_chip(struct gpio_virtio *vgpio, + struct device *pdev) +{ + struct scatterlist sg; + unsigned long timeout; + int err, i, len; + u16 base, n = 0; + + if (virtio_has_feature(vgpio->vdev, GPIO_VIRTIO_F_CHIP)) { + virtio_cread(vgpio->vdev, struct gpio_virtio_config, + base, &base); + virtio_cread(vgpio->vdev, struct gpio_virtio_config, + ngpio, &n); + } else { + dev_err(&vgpio->vdev->dev, "failed to get virtio feature\n"); + return -ENODEV; + } + + if (n == 0) { + dev_err(&vgpio->vdev->dev, "number of gpio is invalid\n"); + return -EINVAL; + } + + vgpio->chip = gpio_virtio_chip; + vgpio->chip.base = base; + vgpio->chip.ngpio = n; + if (pdev && ACPI_COMPANION(pdev->parent)) + vgpio->chip.parent = pdev->parent; + + /* initialize gpio names */ + vgpio->names = kcalloc(n, sizeof(*vgpio->names), GFP_KERNEL); + vgpio->data = kcalloc(n, sizeof(*vgpio->data), GFP_KERNEL); + if (!vgpio->data || !vgpio->names) { + dev_err(&vgpio->vdev->dev, "failed to alloc names and data\n"); + err = -ENOMEM; + goto out; + } + sg_init_one(&sg, vgpio->data, n * sizeof(*vgpio->data)); + err = virtqueue_add_inbuf(vgpio->gpio_vq, &sg, 1, vgpio->data, + GFP_KERNEL); + if (err) + goto out; + + virtqueue_kick(vgpio->gpio_vq); + + /* polling for the virtqueue */ + err = -1; + timeout = jiffies + msecs_to_jiffies(GPIO_VIRTIO_TIMEOUT); + do { + if (virtqueue_get_buf(vgpio->gpio_vq, &len)) { + if (n == len) + err = 0; + break; + } + + /* polling interval is 5 - 10 ms */ + usleep_range(5000, 10000); + } while (time_before(jiffies, timeout)); + + if (err) { + dev_err(&vgpio->vdev->dev, "gpio name is invalid,n=%u,len=%d\n", + n, len); + err = -EINVAL; + goto out; + } + + /* + * The buffer that allocates n gpio names, the backend will copy len + * gpio names inito it. If the buffer return NULL or n and len are + * not match, the initialization fails. + */ + for (i = 0; i < n; i++) + vgpio->names[i] = vgpio->data[i].name; + + vgpio->chip.names = vgpio->names; + err = gpiochip_add_data(&vgpio->chip, vgpio); + if (err) + goto out; + + return 0; +out: + kfree(vgpio->names); + kfree(vgpio->data); + return err; +} + +static int init_vqs(struct gpio_virtio *vgpio) +{ + struct virtqueue *vqs[1]; + vq_callback_t *callbacks[1] = {NULL}; + const char * const names[1] = {"gpio"}; + int err; + + err = virtio_find_vqs(vgpio->vdev, 1, vqs, callbacks, names, NULL); + if (err) + return err; + + vgpio->gpio_vq = vqs[0]; + return 0; +} + +static int gpio_virtio_probe(struct virtio_device *vdev) +{ + struct gpio_virtio *vgpio; + struct device *pdev; + int err; + + vgpio = kzalloc(sizeof(*vgpio), GFP_KERNEL); + if (!vgpio) + return -ENOMEM; + + pdev = &vdev->dev; + vdev->priv = vgpio; + vgpio->vdev = vdev; + mutex_init(&vgpio->gpio_lock); + err = init_vqs(vgpio); + if (err) + goto out; + + err = gpio_virtio_register_chip(vgpio, pdev); + if (err) + goto out; + + return 0; +out: + dev_err(&vgpio->vdev->dev, "failed to initialize gpio virtio\n"); + kfree(vgpio); + return err; +} + +static void gpio_virtio_remove(struct virtio_device *vdev) +{ + struct gpio_virtio *gpio = vdev->priv; + + /* Disable virtqueues. */ + if (vdev->config->reset) + vdev->config->reset(vdev); + if (vdev->config->del_vqs) + vdev->config->del_vqs(vdev); + + gpiochip_remove(&gpio->chip); + + kfree(gpio->data); + kfree(gpio->names); + kfree(gpio); +} + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_GPIO, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +#ifdef CONFIG_PM_SLEEP +static int gpio_virtio_freeze(struct virtio_device *vdev) +{ + /* Disable virtqueues. */ + if (vdev->config->reset) + vdev->config->reset(vdev); + if (vdev->config->del_vqs) + vdev->config->del_vqs(vdev); + return 0; +} + +static int gpio_virtio_restore(struct virtio_device *vdev) +{ + return init_vqs(vdev->priv); +} +#endif + +static struct virtio_driver gpio_virtio_driver = { + .feature_table = features, + .feature_table_size = ARRAY_SIZE(features), + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = gpio_virtio_probe, + .remove = gpio_virtio_remove, +#ifdef CONFIG_PM_SLEEP + .freeze = gpio_virtio_freeze, + .restore = gpio_virtio_restore, +#endif +}; + +static int __init gpio_virtio_init(void) +{ + return register_virtio_driver(&gpio_virtio_driver); +} + +static void __exit gpio_virtio_exit(void) +{ + unregister_virtio_driver(&gpio_virtio_driver); +} + +subsys_initcall(gpio_virtio_init); +module_exit(gpio_virtio_exit); + +MODULE_DEVICE_TABLE(virtio, id_table); +MODULE_DESCRIPTION("GPIO virtio frontend driver"); +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 8b9d7e42c600b..c5e009f610210 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -23,11 +23,28 @@ #include "gpiolib.h" +/** + * struct acpi_gpio_event - ACPI GPIO event handler data + * + * @node: list-entry of the events list of the struct acpi_gpio_chip + * @handle: handle of ACPI method to execute when the IRQ triggers + * @handler: irq_handler to pass to request_irq when requesting the IRQ + * @pin: GPIO pin number on the gpio_chip + * @irq: Linux IRQ number for the event, for request_ / free_irq + * @irqflags: flags to pass to request_irq when requesting the IRQ + * @irq_is_wake: If the ACPI flags indicate the IRQ is a wakeup source + * @is_requested: True if request_irq has been done + * @desc: gpio_desc for the GPIO pin for this event + */ struct acpi_gpio_event { struct list_head node; acpi_handle handle; + irq_handler_t handler; unsigned int pin; unsigned int irq; + unsigned long irqflags; + bool irq_is_wake; + bool irq_requested; struct gpio_desc *desc; }; @@ -53,10 +70,10 @@ struct acpi_gpio_chip { /* * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init - * (so builtin drivers) we register the ACPI GpioInt event handlers from a + * (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a * late_initcall_sync handler, so that other builtin drivers can register their * OpRegions before the event handlers can run. This list contains gpiochips - * for which the acpi_gpiochip_request_interrupts() has been deferred. + * for which the acpi_gpiochip_request_irqs() call has been deferred. */ static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock); static LIST_HEAD(acpi_gpio_deferred_req_irqs_list); @@ -137,8 +154,42 @@ bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, } EXPORT_SYMBOL_GPL(acpi_gpio_get_irq_resource); -static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, - void *context) +static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio, + struct acpi_gpio_event *event) +{ + int ret, value; + + ret = request_threaded_irq(event->irq, NULL, event->handler, + event->irqflags, "ACPI:Event", event); + if (ret) { + dev_err(acpi_gpio->chip->parent, + "Failed to setup interrupt handler for %d\n", + event->irq); + return; + } + + if (event->irq_is_wake) + enable_irq_wake(event->irq); + + event->irq_requested = true; + + /* Make sure we trigger the initial state of edge-triggered IRQs */ + value = gpiod_get_raw_value_cansleep(event->desc); + if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) || + ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0)) + event->handler(event->irq, event); +} + +static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio) +{ + struct acpi_gpio_event *event; + + list_for_each_entry(event, &acpi_gpio->events, node) + acpi_gpiochip_request_irq(acpi_gpio, event); +} + +static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares, + void *context) { struct acpi_gpio_chip *acpi_gpio = context; struct gpio_chip *chip = acpi_gpio->chip; @@ -147,8 +198,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, struct acpi_gpio_event *event; irq_handler_t handler = NULL; struct gpio_desc *desc; - unsigned long irqflags; - int ret, pin, irq, value; + int ret, pin, irq; if (!acpi_gpio_get_irq_resource(ares, &agpio)) return AE_OK; @@ -179,8 +229,6 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, gpiod_direction_input(desc); - value = gpiod_get_value_cansleep(desc); - ret = gpiochip_lock_as_irq(chip, pin); if (ret) { dev_err(chip->parent, "Failed to lock GPIO as interrupt\n"); @@ -193,64 +241,42 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, goto fail_unlock_irq; } - irqflags = IRQF_ONESHOT; + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (!event) + goto fail_unlock_irq; + + event->irqflags = IRQF_ONESHOT; if (agpio->triggering == ACPI_LEVEL_SENSITIVE) { if (agpio->polarity == ACPI_ACTIVE_HIGH) - irqflags |= IRQF_TRIGGER_HIGH; + event->irqflags |= IRQF_TRIGGER_HIGH; else - irqflags |= IRQF_TRIGGER_LOW; + event->irqflags |= IRQF_TRIGGER_LOW; } else { switch (agpio->polarity) { case ACPI_ACTIVE_HIGH: - irqflags |= IRQF_TRIGGER_RISING; + event->irqflags |= IRQF_TRIGGER_RISING; break; case ACPI_ACTIVE_LOW: - irqflags |= IRQF_TRIGGER_FALLING; + event->irqflags |= IRQF_TRIGGER_FALLING; break; default: - irqflags |= IRQF_TRIGGER_RISING | - IRQF_TRIGGER_FALLING; + event->irqflags |= IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING; break; } } - event = kzalloc(sizeof(*event), GFP_KERNEL); - if (!event) - goto fail_unlock_irq; - event->handle = evt_handle; + event->handler = handler; event->irq = irq; + event->irq_is_wake = agpio->wake_capable == ACPI_WAKE_CAPABLE; event->pin = pin; event->desc = desc; - ret = request_threaded_irq(event->irq, NULL, handler, irqflags, - "ACPI:Event", event); - if (ret) { - dev_err(chip->parent, - "Failed to setup interrupt handler for %d\n", - event->irq); - goto fail_free_event; - } - - if (agpio->wake_capable == ACPI_WAKE_CAPABLE) - enable_irq_wake(irq); - list_add_tail(&event->node, &acpi_gpio->events); - /* - * Make sure we trigger the initial state of the IRQ when using RISING - * or FALLING. Note we run the handlers on late_init, the AML code - * may refer to OperationRegions from other (builtin) drivers which - * may be probed after us. - */ - if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) || - ((irqflags & IRQF_TRIGGER_FALLING) && value == 0)) - handler(event->irq, event); - return AE_OK; -fail_free_event: - kfree(event); fail_unlock_irq: gpiochip_unlock_as_irq(chip, pin); fail_free_desc: @@ -287,6 +313,9 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip) if (ACPI_FAILURE(status)) return; + acpi_walk_resources(handle, "_AEI", + acpi_gpiochip_alloc_event, acpi_gpio); + mutex_lock(&acpi_gpio_deferred_req_irqs_lock); defer = !acpi_gpio_deferred_req_irqs_done; if (defer) @@ -297,8 +326,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip) if (defer) return; - acpi_walk_resources(handle, "_AEI", - acpi_gpiochip_request_interrupt, acpi_gpio); + acpi_gpiochip_request_irqs(acpi_gpio); } EXPORT_SYMBOL_GPL(acpi_gpiochip_request_interrupts); @@ -335,10 +363,13 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip) list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { struct gpio_desc *desc; - if (irqd_is_wakeup_set(irq_get_irq_data(event->irq))) - disable_irq_wake(event->irq); + if (event->irq_requested) { + if (event->irq_is_wake) + disable_irq_wake(event->irq); + + free_irq(event->irq, event); + } - free_irq(event->irq, event); desc = event->desc; if (WARN_ON(IS_ERR(desc))) continue; @@ -1204,23 +1235,16 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id) return con_id == NULL; } -/* Run deferred acpi_gpiochip_request_interrupts() */ -static int acpi_gpio_handle_deferred_request_interrupts(void) +/* Run deferred acpi_gpiochip_request_irqs() */ +static int acpi_gpio_handle_deferred_request_irqs(void) { struct acpi_gpio_chip *acpi_gpio, *tmp; mutex_lock(&acpi_gpio_deferred_req_irqs_lock); list_for_each_entry_safe(acpi_gpio, tmp, &acpi_gpio_deferred_req_irqs_list, - deferred_req_irqs_list_entry) { - acpi_handle handle; - - handle = ACPI_HANDLE(acpi_gpio->chip->parent); - acpi_walk_resources(handle, "_AEI", - acpi_gpiochip_request_interrupt, acpi_gpio); - - list_del_init(&acpi_gpio->deferred_req_irqs_list_entry); - } + deferred_req_irqs_list_entry) + acpi_gpiochip_request_irqs(acpi_gpio); acpi_gpio_deferred_req_irqs_done = true; mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); @@ -1228,4 +1252,4 @@ static int acpi_gpio_handle_deferred_request_interrupts(void) return 0; } /* We must use _sync so that this runs after the first deferred_probe run */ -late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts); +late_initcall_sync(acpi_gpio_handle_deferred_request_irqs); diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 25187403e3ace..a8e01d99919c5 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -1285,7 +1285,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL); if (!gdev->descs) { status = -ENOMEM; - goto err_free_gdev; + goto err_free_ida; } if (chip->ngpio == 0) { @@ -1413,8 +1413,9 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, kfree_const(gdev->label); err_free_descs: kfree(gdev->descs); -err_free_gdev: +err_free_ida: ida_simple_remove(&gpio_ida, gdev->id); +err_free_gdev: /* failures here can mean systems won't boot... */ pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__, gdev->base, gdev->base + gdev->ngpio - 1, diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index cb88528e7b10c..e44e567bd7892 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -110,6 +110,26 @@ config DRM_FBDEV_OVERALLOC is 100. Typical values for double buffering will be 200, triple buffering 300. +config DRM_FBDEV_LEAK_PHYS_SMEM + bool "Shamelessly allow leaking of fbdev physical address (DANGEROUS)" + depends on DRM_FBDEV_EMULATION && EXPERT + default n + help + In order to keep user-space compatibility, we want in certain + use-cases to keep leaking the fbdev physical address to the + user-space program handling the fbdev buffer. + This affects, not only, Amlogic, Allwinner or Rockchip devices + with ARM Mali GPUs using an userspace Blob. + This option is not supported by upstream developers and should be + removed as soon as possible and be considered as a broken and + legacy behaviour from a modern fbdev device driver. + + Please send any bug reports when using this to your proprietary + software vendor that requires this. + + If in doubt, say "N" or spread the word to your closed source + library vendor. + config DRM_LOAD_EDID_FIRMWARE bool "Allow to specify an EDID data set instead of probing for it" depends on DRM diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 353993218f213..f008804f0b975 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -358,7 +358,9 @@ static int amdgpu_atif_get_sbios_requests(struct amdgpu_atif *atif, * * Checks the acpi event and if it matches an atif event, * handles it. - * Returns NOTIFY code + * + * Returns: + * NOTIFY_BAD or NOTIFY_DONE, depending on the event. */ static int amdgpu_atif_handler(struct amdgpu_device *adev, struct acpi_bus_event *event) @@ -372,11 +374,16 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev, if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) return NOTIFY_DONE; + /* Is this actually our event? */ if (!atif || !atif->notification_cfg.enabled || - event->type != atif->notification_cfg.command_code) - /* Not our event */ - return NOTIFY_DONE; + event->type != atif->notification_cfg.command_code) { + /* These events will generate keypresses otherwise */ + if (event->type == ACPI_VIDEO_NOTIFY_PROBE) + return NOTIFY_BAD; + else + return NOTIFY_DONE; + } if (atif->functions.sbios_requests) { struct atif_sbios_requests req; @@ -385,7 +392,7 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev, count = amdgpu_atif_get_sbios_requests(atif, &req); if (count <= 0) - return NOTIFY_DONE; + return NOTIFY_BAD; DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 0c791e35acf02..79bd8bd97fae0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -496,8 +496,11 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - amdgpu_dpm_switch_power_profile(adev, - PP_SMC_POWER_PROFILE_COMPUTE, !idle); + if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->switch_power_profile) + amdgpu_dpm_switch_power_profile(adev, + PP_SMC_POWER_PROFILE_COMPUTE, + !idle); } bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index a028661d9e201..92b11de195813 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -576,6 +576,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = { { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX }, + { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0, 0, 0, 0, 0 }, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index d472a2c8399fe..b80243d3972e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -67,7 +67,8 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, unsigned i; int r; - if (num_entries > SIZE_MAX / sizeof(struct amdgpu_bo_list_entry)) + if (num_entries > (SIZE_MAX - sizeof(struct amdgpu_bo_list)) + / sizeof(struct amdgpu_bo_list_entry)) return -EINVAL; size = sizeof(struct amdgpu_bo_list); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 8816c697b2053..387f1cf1dc207 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -330,7 +330,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, case CHIP_TOPAZ: if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) || ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) || - ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) { + ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)) || + ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD1)) || + ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD3))) { info->is_kicker = true; strcpy(fw_name, "amdgpu/topaz_k_smc.bin"); } else @@ -351,7 +353,6 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, if (type == CGS_UCODE_ID_SMU) { if (((adev->pdev->device == 0x67ef) && ((adev->pdev->revision == 0xe0) || - (adev->pdev->revision == 0xe2) || (adev->pdev->revision == 0xe5))) || ((adev->pdev->device == 0x67ff) && ((adev->pdev->revision == 0xcf) || @@ -359,8 +360,13 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, (adev->pdev->revision == 0xff)))) { info->is_kicker = true; strcpy(fw_name, "amdgpu/polaris11_k_smc.bin"); - } else + } else if ((adev->pdev->device == 0x67ef) && + (adev->pdev->revision == 0xe2)) { + info->is_kicker = true; + strcpy(fw_name, "amdgpu/polaris11_k2_smc.bin"); + } else { strcpy(fw_name, "amdgpu/polaris11_smc.bin"); + } } else if (type == CGS_UCODE_ID_SMU_SK) { strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin"); } @@ -375,17 +381,35 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, (adev->pdev->revision == 0xe7) || (adev->pdev->revision == 0xef))) || ((adev->pdev->device == 0x6fdf) && - (adev->pdev->revision == 0xef))) { + ((adev->pdev->revision == 0xef) || + (adev->pdev->revision == 0xff)))) { info->is_kicker = true; strcpy(fw_name, "amdgpu/polaris10_k_smc.bin"); - } else + } else if ((adev->pdev->device == 0x67df) && + ((adev->pdev->revision == 0xe1) || + (adev->pdev->revision == 0xf7))) { + info->is_kicker = true; + strcpy(fw_name, "amdgpu/polaris10_k2_smc.bin"); + } else { strcpy(fw_name, "amdgpu/polaris10_smc.bin"); + } } else if (type == CGS_UCODE_ID_SMU_SK) { strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin"); } break; case CHIP_POLARIS12: - strcpy(fw_name, "amdgpu/polaris12_smc.bin"); + if (((adev->pdev->device == 0x6987) && + ((adev->pdev->revision == 0xc0) || + (adev->pdev->revision == 0xc3))) || + ((adev->pdev->device == 0x6981) && + ((adev->pdev->revision == 0x00) || + (adev->pdev->revision == 0x01) || + (adev->pdev->revision == 0x10)))) { + info->is_kicker = true; + strcpy(fw_name, "amdgpu/polaris12_k_smc.bin"); + } else { + strcpy(fw_name, "amdgpu/polaris12_smc.bin"); + } break; case CHIP_VEGAM: strcpy(fw_name, "amdgpu/vegam_smc.bin"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index b31d121a876bf..81001d8793221 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -122,14 +122,14 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs goto free_chunk; } + mutex_lock(&p->ctx->lock); + /* skip guilty context job */ if (atomic_read(&p->ctx->guilty) == 1) { ret = -ECANCELED; goto free_chunk; } - mutex_lock(&p->ctx->lock); - /* get chunks */ chunk_array_user = u64_to_user_ptr(cs->in.chunks); if (copy_from_user(chunk_array, chunk_array_user, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 6748cd7fc129b..686a26de50f91 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -626,6 +626,13 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev) "dither", amdgpu_dither_enum_list, sz); + if (amdgpu_device_has_dc_support(adev)) { + adev->mode_info.max_bpc_property = + drm_property_create_range(adev->ddev, 0, "max bpc", 8, 16); + if (!adev->mode_info.max_bpc_property) + return -ENOMEM; + } + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 0f41d8647376a..b40e9c76af0c3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -753,6 +753,7 @@ static const struct pci_device_id pciidlist[] = { /* VEGAM */ {0x1002, 0x694C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM}, {0x1002, 0x694E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM}, + {0x1002, 0x694F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM}, /* Vega 10 */ {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, {0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, @@ -761,7 +762,13 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, {0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, {0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, + {0x1002, 0x6869, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, + {0x1002, 0x686a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, + {0x1002, 0x686b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, + {0x1002, 0x686d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, + {0x1002, 0x686e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, + {0x1002, 0x686f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, /* Vega 12 */ {0x1002, 0x69A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12}, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 3a072a7a39f0f..df9b173c3d0b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -574,7 +574,7 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev) /* skip over VMID 0, since it is the system VM */ for (j = 1; j < id_mgr->num_ids; ++j) { amdgpu_vmid_reset(adev, i, j); - amdgpu_sync_create(&id_mgr->ids[i].active); + amdgpu_sync_create(&id_mgr->ids[j].active); list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 391e2f7c03aac..f823d4baf044d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -66,6 +66,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, amdgpu_sync_create(&(*job)->sync); amdgpu_sync_create(&(*job)->sched_sync); (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter); + (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index bd98cc5fb97bc..fd825d30edf13 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -292,9 +292,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file if (!info->return_size || !info->return_pointer) return -EINVAL; - /* Ensure IB tests are run on ring */ - flush_delayed_work(&adev->late_init_work); - switch (info->query) { case AMDGPU_INFO_ACCEL_WORKING: ui32 = adev->accel_working; @@ -861,6 +858,9 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) struct amdgpu_fpriv *fpriv; int r, pasid; + /* Ensure IB tests are run on ring */ + flush_delayed_work(&adev->late_init_work); + file_priv->driver_priv = NULL; r = pm_runtime_get_sync(dev->dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index b9e9e8b02fb75..d1b4d9b6aae0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -339,6 +339,8 @@ struct amdgpu_mode_info { struct drm_property *audio_property; /* FMT dithering */ struct drm_property *dither_property; + /* maximum number of bits per channel for monitor color */ + struct drm_property *max_bpc_property; /* hardcoded DFP edid from BIOS */ struct edid *bios_hardcoded_edid; int bios_hardcoded_edid_size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index f55f72a37ca83..c29d519fa381a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -277,6 +277,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) case CHIP_PITCAIRN: case CHIP_VERDE: case CHIP_OLAND: + case CHIP_HAINAN: return AMDGPU_FW_LOAD_DIRECT; #endif #ifdef CONFIG_DRM_AMDGPU_CIK diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index b17771dd5ce73..6a84526e20e09 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -714,7 +714,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ } gds_switch_needed &= !!ring->funcs->emit_gds_switch; - vm_flush_needed &= !!ring->funcs->emit_vm_flush; + vm_flush_needed &= !!ring->funcs->emit_vm_flush && + job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET; pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping && ring->funcs->emit_wreg; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index ef00d14f86453..325e2213cac53 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2243,12 +2243,13 @@ static void gfx_v9_0_rlc_start(struct amdgpu_device *adev) #endif WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1); + udelay(50); /* carrizo do enable cp interrupt after cp inited */ - if (!(adev->flags & AMD_IS_APU)) + if (!(adev->flags & AMD_IS_APU)) { gfx_v9_0_enable_gui_idle_interrupt(adev, true); - - udelay(50); + udelay(50); + } #ifdef AMDGPU_RLC_DEBUG_RETRY /* RLC_GPM_GENERAL_6 : RLC Ucode version */ diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index ad151fefa41f1..db406a35808f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -45,6 +45,7 @@ MODULE_FIRMWARE("amdgpu/tahiti_mc.bin"); MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin"); MODULE_FIRMWARE("amdgpu/verde_mc.bin"); MODULE_FIRMWARE("amdgpu/oland_mc.bin"); +MODULE_FIRMWARE("amdgpu/hainan_mc.bin"); MODULE_FIRMWARE("amdgpu/si58_mc.bin"); #define MC_SEQ_MISC0__MT__MASK 0xf0000000 diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 9333109b210de..1a744f964b301 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -55,6 +55,9 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris12_mc.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin"); +MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin"); static const u32 golden_settings_tonga_a11[] = { @@ -223,13 +226,39 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev) chip_name = "tonga"; break; case CHIP_POLARIS11: - chip_name = "polaris11"; + if (((adev->pdev->device == 0x67ef) && + ((adev->pdev->revision == 0xe0) || + (adev->pdev->revision == 0xe5))) || + ((adev->pdev->device == 0x67ff) && + ((adev->pdev->revision == 0xcf) || + (adev->pdev->revision == 0xef) || + (adev->pdev->revision == 0xff)))) + chip_name = "polaris11_k"; + else if ((adev->pdev->device == 0x67ef) && + (adev->pdev->revision == 0xe2)) + chip_name = "polaris11_k"; + else + chip_name = "polaris11"; break; case CHIP_POLARIS10: - chip_name = "polaris10"; + if ((adev->pdev->device == 0x67df) && + ((adev->pdev->revision == 0xe1) || + (adev->pdev->revision == 0xf7))) + chip_name = "polaris10_k"; + else + chip_name = "polaris10"; break; case CHIP_POLARIS12: - chip_name = "polaris12"; + if (((adev->pdev->device == 0x6987) && + ((adev->pdev->revision == 0xc0) || + (adev->pdev->revision == 0xc3))) || + ((adev->pdev->device == 0x6981) && + ((adev->pdev->revision == 0x00) || + (adev->pdev->revision == 0x01) || + (adev->pdev->revision == 0x10)))) + chip_name = "polaris12_k"; + else + chip_name = "polaris12"; break; case CHIP_FIJI: case CHIP_CARRIZO: @@ -336,7 +365,7 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev) const struct mc_firmware_header_v1_0 *hdr; const __le32 *fw_data = NULL; const __le32 *io_mc_regs = NULL; - u32 data, vbios_version; + u32 data; int i, ucode_size, regs_size; /* Skip MC ucode loading on SR-IOV capable boards. @@ -347,13 +376,6 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev) if (amdgpu_sriov_bios(adev)) return 0; - WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 0x9F); - data = RREG32(mmMC_SEQ_IO_DEBUG_DATA); - vbios_version = data & 0xf; - - if (vbios_version == 0) - return 0; - if (!adev->gmc.fw) return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 8a926d1df939a..2b4199adcd946 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -116,16 +116,16 @@ static int uvd_v4_2_sw_init(void *handle) if (r) return r; - r = amdgpu_uvd_resume(adev); - if (r) - return r; - ring = &adev->uvd.inst->ring; sprintf(ring->name, "uvd"); r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); if (r) return r; + r = amdgpu_uvd_resume(adev); + if (r) + return r; + r = amdgpu_uvd_entity_init(adev); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 50248059412e7..88c006c5ee2cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -113,16 +113,16 @@ static int uvd_v5_0_sw_init(void *handle) if (r) return r; - r = amdgpu_uvd_resume(adev); - if (r) - return r; - ring = &adev->uvd.inst->ring; sprintf(ring->name, "uvd"); r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); if (r) return r; + r = amdgpu_uvd_resume(adev); + if (r) + return r; + r = amdgpu_uvd_entity_init(adev); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 6ae82cc2e55e0..d4070839ac809 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -420,16 +420,16 @@ static int uvd_v6_0_sw_init(void *handle) DRM_INFO("UVD ENC is disabled\n"); } - r = amdgpu_uvd_resume(adev); - if (r) - return r; - ring = &adev->uvd.inst->ring; sprintf(ring->name, "uvd"); r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); if (r) return r; + r = amdgpu_uvd_resume(adev); + if (r) + return r; + if (uvd_v6_0_enc_support(adev)) { for (i = 0; i < adev->uvd.num_enc_rings; ++i) { ring = &adev->uvd.inst->ring_enc[i]; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 9b7f8469bc5c0..057151b17b456 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -444,10 +444,6 @@ static int uvd_v7_0_sw_init(void *handle) DRM_INFO("PSP loading UVD firmware\n"); } - r = amdgpu_uvd_resume(adev); - if (r) - return r; - for (j = 0; j < adev->uvd.num_uvd_inst; j++) { if (adev->uvd.harvest_config & (1 << j)) continue; @@ -479,6 +475,10 @@ static int uvd_v7_0_sw_init(void *handle) } } + r = amdgpu_uvd_resume(adev); + if (r) + return r; + r = amdgpu_uvd_entity_init(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 072371ef59759..4f8f3bb218320 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -43,6 +43,7 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev); static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev); static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev); static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr); +static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state); /** * vcn_v1_0_early_init - set function pointers @@ -216,7 +217,7 @@ static int vcn_v1_0_hw_fini(void *handle) struct amdgpu_ring *ring = &adev->vcn.ring_dec; if (RREG32_SOC15(VCN, 0, mmUVD_STATUS)) - vcn_v1_0_stop(adev); + vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE); ring->ready = false; diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index 5ae5ed2e62d63..21bc12e023111 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -129,7 +129,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev) else wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4); WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off)); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF); + WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFFFF); /* set rptr, wptr to 0 */ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 29ac74f40dceb..5aba50f63ac6f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -326,7 +326,13 @@ static const struct kfd_deviceid supported_devices[] = { { 0x6864, &vega10_device_info }, /* Vega10 */ { 0x6867, &vega10_device_info }, /* Vega10 */ { 0x6868, &vega10_device_info }, /* Vega10 */ + { 0x6869, &vega10_device_info }, /* Vega10 */ + { 0x686A, &vega10_device_info }, /* Vega10 */ + { 0x686B, &vega10_device_info }, /* Vega10 */ { 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/ + { 0x686D, &vega10_device_info }, /* Vega10 */ + { 0x686E, &vega10_device_info }, /* Vega10 */ + { 0x686F, &vega10_device_info }, /* Vega10 */ { 0x687F, &vega10_device_info }, /* Vega10 */ }; @@ -655,6 +661,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) { uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE]; bool is_patched = false; + unsigned long flags; if (!kfd->init_complete) return; @@ -664,7 +671,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) return; } - spin_lock(&kfd->interrupt_lock); + spin_lock_irqsave(&kfd->interrupt_lock, flags); if (kfd->interrupts_active && interrupt_is_wanted(kfd, ih_ring_entry, @@ -673,7 +680,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) is_patched ? patched_ihre : ih_ring_entry)) queue_work(kfd->ih_wq, &kfd->interrupt_work); - spin_unlock(&kfd->interrupt_lock); + spin_unlock_irqrestore(&kfd->interrupt_lock, flags); } int kgd2kfd_quiesce_mm(struct mm_struct *mm) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 6903fe6c894ba..a851bb07443f0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -565,22 +565,36 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) { struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; + struct drm_dp_mst_topology_mgr *mgr; + int ret; + bool need_hotplug = false; drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - aconnector = to_amdgpu_dm_connector(connector); - if (aconnector->dc_link->type == dc_connection_mst_branch && - !aconnector->mst_port) { + list_for_each_entry(connector, &dev->mode_config.connector_list, + head) { + aconnector = to_amdgpu_dm_connector(connector); + if (aconnector->dc_link->type != dc_connection_mst_branch || + aconnector->mst_port) + continue; + + mgr = &aconnector->mst_mgr; - if (suspend) - drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr); - else - drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr); - } + if (suspend) { + drm_dp_mst_topology_mgr_suspend(mgr); + } else { + ret = drm_dp_mst_topology_mgr_resume(mgr); + if (ret < 0) { + drm_dp_mst_topology_mgr_set_mst(mgr, false); + need_hotplug = true; + } + } } drm_modeset_unlock(&dev->mode_config.connection_mutex); + + if (need_hotplug) + drm_kms_helper_hotplug_event(dev); } static int dm_hw_init(void *handle) @@ -736,7 +750,6 @@ static int dm_resume(void *handle) struct drm_plane_state *new_plane_state; struct dm_plane_state *dm_new_plane_state; enum dc_connection_type new_connection_type = dc_connection_none; - int ret; int i; /* power on hardware */ @@ -809,13 +822,13 @@ static int dm_resume(void *handle) } } - ret = drm_atomic_helper_resume(ddev, dm->cached_state); + drm_atomic_helper_resume(ddev, dm->cached_state); dm->cached_state = NULL; amdgpu_dm_irq_resume_late(adev); - return ret; + return 0; } static const struct amd_ip_funcs amdgpu_dm_funcs = { @@ -2213,8 +2226,15 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode, static enum dc_color_depth convert_color_depth_from_display_info(const struct drm_connector *connector) { + struct dm_connector_state *dm_conn_state = + to_dm_connector_state(connector->state); uint32_t bpc = connector->display_info.bpc; + /* TODO: Remove this when there's support for max_bpc in drm */ + if (dm_conn_state && bpc > dm_conn_state->max_bpc) + /* Round down to nearest even number. */ + bpc = dm_conn_state->max_bpc - (dm_conn_state->max_bpc & 1); + switch (bpc) { case 0: /* Temporary Work around, DRM don't parse color depth for @@ -2796,6 +2816,9 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, } else if (property == adev->mode_info.underscan_property) { dm_new_state->underscan_enable = val; ret = 0; + } else if (property == adev->mode_info.max_bpc_property) { + dm_new_state->max_bpc = val; + ret = 0; } return ret; @@ -2838,6 +2861,9 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, } else if (property == adev->mode_info.underscan_property) { *val = dm_state->underscan_enable; ret = 0; + } else if (property == adev->mode_info.max_bpc_property) { + *val = dm_state->max_bpc; + ret = 0; } return ret; } @@ -2881,6 +2907,7 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) state->underscan_enable = false; state->underscan_hborder = 0; state->underscan_vborder = 0; + state->max_bpc = 8; __drm_atomic_helper_connector_reset(connector, &state->base); } @@ -2898,6 +2925,7 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) if (new_state) { __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base); + new_state->max_bpc = state->max_bpc; return &new_state->base; } @@ -3167,7 +3195,7 @@ void dm_drm_plane_destroy_state(struct drm_plane *plane, static const struct drm_plane_funcs dm_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, - .destroy = drm_plane_cleanup, + .destroy = drm_primary_helper_destroy, .reset = dm_drm_plane_reset, .atomic_duplicate_state = dm_drm_plane_duplicate_state, .atomic_destroy_state = dm_drm_plane_destroy_state, @@ -3658,6 +3686,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, drm_object_attach_property(&aconnector->base.base, adev->mode_info.underscan_vborder_property, 0); + drm_object_attach_property(&aconnector->base.base, + adev->mode_info.max_bpc_property, + 0); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index a29dc35954c9a..74aedcffc4bb7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -173,8 +173,6 @@ struct amdgpu_dm_connector { struct mutex hpd_lock; bool fake_enable; - - bool mst_connected; }; #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) @@ -215,6 +213,7 @@ struct dm_connector_state { enum amdgpu_rmx_type scaling; uint8_t underscan_vborder; uint8_t underscan_hborder; + uint8_t max_bpc; bool underscan_enable; struct mod_freesync_user_enable user_enable; bool freesync_capable; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index 9bfb040352e98..6a6d977ddd7ac 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -60,6 +60,11 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name, return -EINVAL; } + if (!stream_state) { + DRM_ERROR("No stream state for CRTC%d\n", crtc->index); + return -EINVAL; + } + /* When enabling CRC, we should also disable dithering. */ if (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO) { if (dc_stream_configure_crc(stream_state->ctx->dc, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 9a300732ba374..59445c83f0238 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -317,12 +317,7 @@ dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector) struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder; struct drm_encoder *encoder; - const struct drm_connector_helper_funcs *connector_funcs = - connector->base.helper_private; - struct drm_encoder *enc_master = - connector_funcs->best_encoder(&connector->base); - DRM_DEBUG_KMS("enc master is %p\n", enc_master); amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL); if (!amdgpu_encoder) return NULL; @@ -352,25 +347,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct amdgpu_device *adev = dev->dev_private; struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; - - drm_connector_list_iter_begin(dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - aconnector = to_amdgpu_dm_connector(connector); - if (aconnector->mst_port == master - && !aconnector->port) { - DRM_INFO("DM_MST: reusing connector: %p [id: %d] [master: %p]\n", - aconnector, connector->base.id, aconnector->mst_port); - - aconnector->port = port; - drm_connector_set_path_property(connector, pathprop); - - drm_connector_list_iter_end(&conn_iter); - aconnector->mst_connected = true; - return &aconnector->base; - } - } - drm_connector_list_iter_end(&conn_iter); aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); if (!aconnector) @@ -398,6 +374,8 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, master->connector_id); aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master); + drm_connector_attach_encoder(&aconnector->base, + &aconnector->mst_encoder->base); /* * TODO: understand why this one is needed @@ -419,8 +397,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, */ amdgpu_dm_connector_funcs_reset(connector); - aconnector->mst_connected = true; - DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n", aconnector, connector->base.id, aconnector->mst_port); @@ -432,6 +408,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_connector *connector) { + struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr); + struct drm_device *dev = master->base.dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n", @@ -445,7 +424,10 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, aconnector->dc_sink = NULL; } - aconnector->mst_connected = false; + drm_connector_unregister(connector); + if (adev->mode_info.rfbdev) + drm_fb_helper_remove_one_connector(&adev->mode_info.rfbdev->helper, connector); + drm_connector_put(connector); } static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) @@ -456,18 +438,10 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) drm_kms_helper_hotplug_event(dev); } -static void dm_dp_mst_link_status_reset(struct drm_connector *connector) -{ - mutex_lock(&connector->dev->mode_config.mutex); - drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD); - mutex_unlock(&connector->dev->mode_config.mutex); -} - static void dm_dp_mst_register_connector(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct amdgpu_device *adev = dev->dev_private; - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); if (adev->mode_info.rfbdev) drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector); @@ -475,9 +449,6 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector) DRM_ERROR("adev->mode_info.rfbdev is NULL\n"); drm_connector_register(connector); - - if (aconnector->mst_connected) - dm_dp_mst_link_status_reset(connector); } static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 6ae050dc32209..9045e6fa0780b 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1120,9 +1120,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa */ update_flags->bits.bpp_change = 1; - if (u->gamma && dce_use_lut(u->plane_info->format)) - update_flags->bits.gamma_change = 1; - if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info, sizeof(union dc_tiling_info)) != 0) { update_flags->bits.swizzle_change = 1; @@ -1139,7 +1136,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa if (update_flags->bits.rotation_change || update_flags->bits.stereo_format_change || update_flags->bits.pixel_format_change - || update_flags->bits.gamma_change || update_flags->bits.bpp_change || update_flags->bits.bandwidth_change || update_flags->bits.output_tf_change) @@ -1229,13 +1225,26 @@ static enum surface_update_type det_surface_update(const struct dc *dc, if (u->coeff_reduction_factor) update_flags->bits.coeff_reduction_change = 1; + if (u->gamma) { + enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN; + + if (u->plane_info) + format = u->plane_info->format; + else if (u->surface) + format = u->surface->format; + + if (dce_use_lut(format)) + update_flags->bits.gamma_change = 1; + } + if (update_flags->bits.in_transfer_func_change) { type = UPDATE_TYPE_MED; elevate_update_type(&overall_type, type); } if (update_flags->bits.input_csc_change - || update_flags->bits.coeff_reduction_change) { + || update_flags->bits.coeff_reduction_change + || update_flags->bits.gamma_change) { type = UPDATE_TYPE_FULL; elevate_update_type(&overall_type, type); } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index fced3c1c2ef5f..7c89785fd7315 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -2457,11 +2457,11 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option) { struct dc *core_dc = pipe_ctx->stream->ctx->dc; + core_dc->hwss.blank_stream(pipe_ctx); + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) deallocate_mst_payload(pipe_ctx); - core_dc->hwss.blank_stream(pipe_ctx); - core_dc->hwss.disable_stream(pipe_ctx, option); disable_link(pipe_ctx->stream->sink->link, pipe_ctx->stream->signal); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c index fb1f373d08a12..e798241fae37a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c @@ -466,6 +466,9 @@ static void dce12_update_clocks(struct dccg *dccg, { struct dm_pp_clock_for_voltage_req clock_voltage_req = {0}; + /* TODO: Investigate why this is needed to fix display corruption. */ + new_clocks->dispclk_khz = new_clocks->dispclk_khz * 115 / 100; + if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) { clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK; clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index b2f308766a9e8..0941f3c689bca 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -2530,6 +2530,8 @@ static void pplib_apply_display_requirements( dc, context->bw.dce.sclk_khz); + pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz; + pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw.dce.sclk_deep_sleep_khz; diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index bf29733958c37..962900932beed 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -1069,10 +1069,14 @@ static void build_evenly_distributed_points( struct dividers dividers) { struct gamma_pixel *p = points; - struct gamma_pixel *p_last = p + numberof_points - 1; + struct gamma_pixel *p_last; uint32_t i = 0; + // This function should not gets called with 0 as a parameter + ASSERT(numberof_points > 0); + p_last = p + numberof_points - 1; + do { struct fixed31_32 value = dc_fixpt_from_fraction(i, numberof_points - 1); @@ -1083,7 +1087,7 @@ static void build_evenly_distributed_points( ++p; ++i; - } while (i != numberof_points); + } while (i < numberof_points); p->r = dc_fixpt_div(p_last->r, dividers.divider1); p->g = dc_fixpt_div(p_last->g, dividers.divider1); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 8994aa5c8cf80..64596029b6963 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -365,6 +365,9 @@ int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id, switch (task_id) { case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: + ret = phm_pre_display_configuration_changed(hwmgr); + if (ret) + return ret; ret = phm_set_cpu_power_state(hwmgr); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c index 91ffb7bc4ee72..56437866d1206 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c @@ -265,8 +265,6 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip, if (skip) return 0; - phm_pre_display_configuration_changed(hwmgr); - phm_display_configuration_changed(hwmgr); if (hwmgr->ps) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c index 0adfc5392cd37..c9a15baf2c10f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c @@ -1222,14 +1222,17 @@ static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr) { - if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) + if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) { + smu8_nbdpm_pstate_enable_disable(hwmgr, true, true); return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF); + } return 0; } static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr) { if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) { + smu8_nbdpm_pstate_enable_disable(hwmgr, false, true); return smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_UVDPowerON, diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h index 62f36ba2435be..c1a99dfe4913f 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h @@ -386,6 +386,8 @@ typedef uint16_t PPSMC_Result; #define PPSMC_MSG_AgmResetPsm ((uint16_t) 0x403) #define PPSMC_MSG_ReadVftCell ((uint16_t) 0x404) +#define PPSMC_MSG_ApplyAvfsCksOffVoltage ((uint16_t) 0x415) + #define PPSMC_MSG_GFX_CU_PG_ENABLE ((uint16_t) 0x280) #define PPSMC_MSG_GFX_CU_PG_DISABLE ((uint16_t) 0x281) #define PPSMC_MSG_GetCurrPkgPwr ((uint16_t) 0x282) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c index fbe3ef4ee45c6..924788772b07f 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c @@ -2268,11 +2268,13 @@ static uint32_t ci_get_offsetof(uint32_t type, uint32_t member) case DRAM_LOG_BUFF_SIZE: return offsetof(SMU7_SoftRegisters, DRAM_LOG_BUFF_SIZE); } + break; case SMU_Discrete_DpmTable: switch (member) { case LowSclkInterruptThreshold: return offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT); } + break; } pr_debug("can't get the offset of type %x member %x\n", type, member); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index 18048f8e2f130..40df5c2706cce 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -2330,6 +2330,7 @@ static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member) case DRAM_LOG_BUFF_SIZE: return offsetof(SMU73_SoftRegisters, DRAM_LOG_BUFF_SIZE); } + break; case SMU_Discrete_DpmTable: switch (member) { case UvdBootLevel: @@ -2339,6 +2340,7 @@ static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member) case LowSclkInterruptThreshold: return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold); } + break; } pr_warn("can't get the offset of type %x member %x\n", type, member); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c index 9299b93aa09af..302ca7745723e 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c @@ -2236,11 +2236,13 @@ static uint32_t iceland_get_offsetof(uint32_t type, uint32_t member) case DRAM_LOG_BUFF_SIZE: return offsetof(SMU71_SoftRegisters, DRAM_LOG_BUFF_SIZE); } + break; case SMU_Discrete_DpmTable: switch (member) { case LowSclkInterruptThreshold: return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold); } + break; } pr_warn("can't get the offset of type %x member %x\n", type, member); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 1276f168ff68d..5b67f575cd34d 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -1984,6 +1984,12 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr) smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs); + /* Apply avfs cks-off voltages to avoid the overshoot + * when switching to the highest sclk frequency + */ + if (data->apply_avfs_cks_off_voltage) + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage); + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index 99d5e4f98f49c..a6edd5df33b0f 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -37,10 +37,13 @@ MODULE_FIRMWARE("amdgpu/fiji_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin"); +MODULE_FIRMWARE("amdgpu/polaris10_k2_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin"); MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_k2_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris12_smc.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_k_smc.bin"); MODULE_FIRMWARE("amdgpu/vegam_smc.bin"); MODULE_FIRMWARE("amdgpu/vega10_smc.bin"); MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin"); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index 7dabc6c456e12..697c8d92bd531 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -2618,6 +2618,7 @@ static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member) case DRAM_LOG_BUFF_SIZE: return offsetof(SMU72_SoftRegisters, DRAM_LOG_BUFF_SIZE); } + break; case SMU_Discrete_DpmTable: switch (member) { case UvdBootLevel: @@ -2627,6 +2628,7 @@ static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member) case LowSclkInterruptThreshold: return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold); } + break; } pr_warn("can't get the offset of type %x member %x\n", type, member); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c index 57420d7caa4e9..59113fdd1c1c1 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c @@ -2184,6 +2184,7 @@ static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member) case DRAM_LOG_BUFF_SIZE: return offsetof(SMU75_SoftRegisters, DRAM_LOG_BUFF_SIZE); } + break; case SMU_Discrete_DpmTable: switch (member) { case UvdBootLevel: @@ -2193,6 +2194,7 @@ static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member) case LowSclkInterruptThreshold: return offsetof(SMU75_Discrete_DpmTable, LowSclkInterruptThreshold); } + break; } pr_warn("can't get the offset of type %x member %x\n", type, member); return 0; diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 69dab82a37714..bf589c53b908d 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -60,8 +60,29 @@ static const struct pci_device_id pciidlist[] = { MODULE_DEVICE_TABLE(pci, pciidlist); +static void ast_kick_out_firmware_fb(struct pci_dev *pdev) +{ + struct apertures_struct *ap; + bool primary = false; + + ap = alloc_apertures(1); + if (!ap) + return; + + ap->ranges[0].base = pci_resource_start(pdev, 0); + ap->ranges[0].size = pci_resource_len(pdev, 0); + +#ifdef CONFIG_X86 + primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; +#endif + drm_fb_helper_remove_conflicting_framebuffers(ap, "astdrmfb", primary); + kfree(ap); +} + static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { + ast_kick_out_firmware_fb(pdev); + return drm_get_pci_dev(pdev, ent, &driver); } diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c index 0cd827e11fa20..de26df0c6044d 100644 --- a/drivers/gpu/drm/ast/ast_fb.c +++ b/drivers/gpu/drm/ast/ast_fb.c @@ -263,6 +263,7 @@ static void ast_fbdev_destroy(struct drm_device *dev, { struct ast_framebuffer *afb = &afbdev->afb; + drm_crtc_force_disable_all(dev); drm_fb_helper_unregister_fbi(&afbdev->helper); if (afb->obj) { diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index dac355812adcb..373700c05a00f 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -583,7 +583,8 @@ void ast_driver_unload(struct drm_device *dev) drm_mode_config_cleanup(dev); ast_mm_fini(ast); - pci_iounmap(dev->pdev, ast->ioregs); + if (ast->ioregs != ast->regs + AST_IO_MM_OFFSET) + pci_iounmap(dev->pdev, ast->ioregs); pci_iounmap(dev->pdev, ast->regs); kfree(ast); } diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 5e77d456d9bb9..8bb355d5d43d8 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -568,6 +568,7 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc, } ast_bo_unreserve(bo); + ast_set_offset_reg(crtc); ast_set_start_address_crt1(crtc, (u32)gpu_addr); return 0; @@ -972,9 +973,21 @@ static int get_clock(void *i2c_priv) { struct ast_i2c_chan *i2c = i2c_priv; struct ast_private *ast = i2c->dev->dev_private; - uint32_t val; + uint32_t val, val2, count, pass; + + count = 0; + pass = 0; + val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01; + do { + val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01; + if (val == val2) { + pass++; + } else { + pass = 0; + val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01; + } + } while ((pass < 5) && (count++ < 0x10000)); - val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4; return val & 1 ? 1 : 0; } @@ -982,9 +995,21 @@ static int get_data(void *i2c_priv) { struct ast_i2c_chan *i2c = i2c_priv; struct ast_private *ast = i2c->dev->dev_private; - uint32_t val; + uint32_t val, val2, count, pass; + + count = 0; + pass = 0; + val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01; + do { + val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01; + if (val == val2) { + pass++; + } else { + pass = 0; + val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01; + } + } while ((pass < 5) && (count++ < 0x10000)); - val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5; return val & 1 ? 1 : 0; } @@ -997,7 +1022,7 @@ static void set_clock(void *i2c_priv, int clock) for (i = 0; i < 0x10000; i++) { ujcrb7 = ((clock & 0x01) ? 0 : 1); - ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfe, ujcrb7); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf4, ujcrb7); jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01); if (ujcrb7 == jtemp) break; @@ -1013,7 +1038,7 @@ static void set_data(void *i2c_priv, int data) for (i = 0; i < 0x10000; i++) { ujcrb7 = ((data & 0x01) ? 0 : 1) << 2; - ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfb, ujcrb7); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf1, ujcrb7); jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04); if (ujcrb7 == jtemp) break; @@ -1254,7 +1279,7 @@ static int ast_cursor_move(struct drm_crtc *crtc, ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07)); /* dummy write to fire HWC */ - ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xCB, 0xFF, 0x00); + ast_show_cursor(crtc); return 0; } diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 281cf9cbb44c4..d3c3c65bf5b7a 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -574,6 +574,14 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc, &replaced); state->color_mgmt_changed |= replaced; return ret; + } else if (property == config->ctm_post_offset_property) { + ret = drm_atomic_replace_property_blob_from_id(dev, + &state->ctm_post_offset, + val, + sizeof(struct drm_color_ctm_post_offset), -1, + &replaced); + state->color_mgmt_changed |= replaced; + return ret; } else if (property == config->gamma_lut_property) { ret = drm_atomic_replace_property_blob_from_id(dev, &state->gamma_lut, @@ -636,6 +644,8 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc, *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0; else if (property == config->ctm_property) *val = (state->ctm) ? state->ctm->base.id : 0; + else if (property == config->ctm_post_offset_property) + *val = (state->ctm_post_offset) ? state->ctm_post_offset->base.id : 0; else if (property == config->gamma_lut_property) *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0; else if (property == config->prop_out_fence_ptr) @@ -909,6 +919,8 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane, state->rotation = val; } else if (property == plane->zpos_property) { state->zpos = val; + } else if (property == plane->decryption_property) { + state->decryption_reqd = val; } else if (property == plane->color_encoding_property) { state->color_encoding = val; } else if (property == plane->color_range_property) { @@ -977,6 +989,8 @@ drm_atomic_plane_get_property(struct drm_plane *plane, *val = state->rotation; } else if (property == plane->zpos_property) { *val = state->zpos; + } else if (property == plane->decryption_property) { + *val = state->decryption_reqd; } else if (property == plane->color_encoding_property) { *val = state->color_encoding; } else if (property == plane->color_range_property) { @@ -1421,6 +1435,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector, return set_out_fence_for_connector(state->state, connector, fence_ptr); + } else if (property == connector->cp_srm_property) { + state->cp_srm_blob_id = val; } else if (connector->funcs->atomic_set_property) { return connector->funcs->atomic_set_property(connector, state, property, val); @@ -1511,11 +1527,11 @@ drm_atomic_connector_get_property(struct drm_connector *connector, *val = state->scaling_mode; } else if (property == connector->content_protection_property) { *val = state->content_protection; - } else if (property == config->writeback_fb_id_property) { - /* Writeback framebuffer is one-shot, write and forget */ - *val = 0; - } else if (property == config->writeback_out_fence_ptr_property) { - *val = 0; + } else if (property == connector->cp_srm_property) { + *val = state->cp_srm_blob_id; + } else if (property == connector->cp_downstream_property) { + *val = connector->cp_downstream_blob_ptr ? + connector->cp_downstream_blob_ptr->base.id : 0; } else if (connector->funcs->atomic_get_property) { return connector->funcs->atomic_get_property(connector, state, property, val); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 1bb4c318bdd4d..d94734e24c806 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1074,7 +1074,7 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev, crtc->enabled = new_crtc_state->enable; new_plane_state = - drm_atomic_get_new_plane_state(old_state, primary); + primary ? drm_atomic_get_new_plane_state(old_state, primary) : NULL; if (new_plane_state && new_plane_state->crtc == crtc) { crtc->x = new_plane_state->src_x >> 16; @@ -1425,6 +1425,9 @@ void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev, DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n", crtc->base.id, crtc->name); } + + if (old_state->fake_commit) + complete_all(&old_state->fake_commit->flip_done); } EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done); @@ -2916,6 +2919,9 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set, int hdisplay, vdisplay; int ret; + if (!crtc->primary) + return -EINVAL; + crtc_state = drm_atomic_get_crtc_state(state, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); @@ -3469,6 +3475,8 @@ void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc, drm_property_blob_get(state->degamma_lut); if (state->ctm) drm_property_blob_get(state->ctm); + if (state->ctm_post_offset) + drm_property_blob_get(state->ctm_post_offset); if (state->gamma_lut) drm_property_blob_get(state->gamma_lut); state->mode_changed = false; @@ -3538,6 +3546,7 @@ void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state) drm_property_blob_put(state->mode_blob); drm_property_blob_put(state->degamma_lut); drm_property_blob_put(state->ctm); + drm_property_blob_put(state->ctm_post_offset); drm_property_blob_put(state->gamma_lut); } EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state); @@ -3934,6 +3943,7 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, /* Reset DEGAMMA_LUT and CTM properties. */ replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL); replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL); + replaced |= drm_property_replace_blob(&crtc_state->ctm_post_offset, NULL); replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob); crtc_state->color_mgmt_changed |= replaced; diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c index d9c0f75739054..1669c42c40ed3 100644 --- a/drivers/gpu/drm/drm_auth.c +++ b/drivers/gpu/drm/drm_auth.c @@ -142,6 +142,7 @@ static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv) lockdep_assert_held_once(&dev->master_mutex); + WARN_ON(fpriv->is_master); old_master = fpriv->master; fpriv->master = drm_master_create(dev); if (!fpriv->master) { @@ -170,6 +171,7 @@ static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv) /* drop references and restore old master on failure */ drm_master_put(&fpriv->master); fpriv->master = old_master; + fpriv->is_master = 0; return ret; } diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c index a16a74d7e15e8..183a7f6488c73 100644 --- a/drivers/gpu/drm/drm_blend.c +++ b/drivers/gpu/drm/drm_blend.c @@ -112,6 +112,38 @@ * exposed and assumed to be black). */ +/** +* drm_plane_create_decryption_property - create a decryption property + * @plane: drm plane + * + * This function creates a generic decryption property and enables support + * for it in the DRM core. It is attached to @plane. + * + * The decryption property will enable(1) or disable(0) the framebuffer + * decryption on this plane. + * + * Returns: + * 0 on success, negative error code on failure. + */ +int drm_plane_create_decryption_property(struct drm_plane *plane) +{ + struct drm_property *prop; + + prop = drm_property_create_bool(plane->dev, 0, + "DECRYPTION"); + if (!prop) + return -ENOMEM; + + drm_object_attach_property(&plane->base, prop, false); + plane->decryption_property = prop; + + if (plane->state) + plane->state->decryption_reqd = false; + + return 0; +} +EXPORT_SYMBOL(drm_plane_create_decryption_property); + /** * drm_plane_create_alpha_property - create a new alpha property * @plane: drm plane diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c index b97e2de2c0298..8fc8197059bda 100644 --- a/drivers/gpu/drm/drm_color_mgmt.c +++ b/drivers/gpu/drm/drm_color_mgmt.c @@ -64,6 +64,16 @@ * boot-up state too. Drivers can access the blob for the color conversion * matrix through &drm_crtc_state.ctm. * + * “CTM_POST_OFFSET”: + * Blob property to set post offset vector used to convert colors after + * applying ctm. The data is interpreted as a struct + * &drm_color_ctm_post_offset. + * + * Setting this to NULL (blob property value set to 0) means a pass-thru + * vector should be used. This is generally the driver boot-up state too. + * Drivers can access the blob for post offset vector through + * &drm_crtc_state.ctm_post_offset. + * * “GAMMA_LUT”: * Blob property to set the gamma lookup table (LUT) mapping pixel data * after the transformation matrix to data sent to the connector. The @@ -162,9 +172,12 @@ void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc, degamma_lut_size); } - if (has_ctm) + if (has_ctm){ drm_object_attach_property(&crtc->base, config->ctm_property, 0); + drm_object_attach_property(&crtc->base, + config->ctm_post_offset_property, 0); + } if (gamma_lut_size) { drm_object_attach_property(&crtc->base, diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 6011d769d50bb..60858b76374c7 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -241,6 +241,7 @@ int drm_connector_init(struct drm_device *dev, INIT_LIST_HEAD(&connector->modes); mutex_init(&connector->mutex); connector->edid_blob_ptr = NULL; + connector->cp_downstream_blob_ptr = NULL; connector->status = connector_status_unknown; connector->display_info.panel_orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN; @@ -1346,6 +1347,77 @@ int drm_connector_attach_content_protection_property( } EXPORT_SYMBOL(drm_connector_attach_content_protection_property); +/** + * drm_connector_attach_cp_downstream_property - attach cp downstream + * property + * + * @connector: connector to attach cp downstream property on. + * + * This is used to add support for content protection downstream info on + * select connectors. when Intel platform is configured as repeater, + * this downstream info is used by userspace, to complete the repeater + * authentication of HDCP specification with upstream HDCP transmitter. + * + * The cp downstream will be set to &drm_connector_state.cp_downstream + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_connector_attach_cp_downstream_property( + struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_property *prop; + + prop = drm_property_create(dev, DRM_MODE_PROP_BLOB | + DRM_MODE_PROP_IMMUTABLE, + "CP_Downstream_Info", 0); + if (!prop) + return -ENOMEM; + + drm_object_attach_property(&connector->base, prop, 0); + + connector->cp_downstream_property = prop; + + return 0; +} +EXPORT_SYMBOL(drm_connector_attach_cp_downstream_property); + +/** + * drm_connector_attach_cp_srm_property - attach cp srm + * property + * + * @connector: connector to attach cp srm property on. + * + * This is used to add support for sending the SRM table from userspace to + * kernel on selected connectors. Protected content provider will provide + * the system renewability Message(SRM) to userspace before requesting for + * HDCP on a port. Hence if a Port supports content protection (mostly HDCP) + * then this property will be attached to receive the SRM for revocation check + * of the ksvs. + * + * The srm blob id will be set to &drm_connector_state.cp_srm_blob_id + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_connector_attach_cp_srm_property(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_property *prop; + + prop = drm_property_create(dev, DRM_MODE_PROP_BLOB, "CP_SRM", 0); + if (!prop) + return -ENOMEM; + + drm_object_attach_property(&connector->base, prop, 0); + connector->cp_srm_property = prop; + + return 0; +} +EXPORT_SYMBOL(drm_connector_attach_cp_srm_property); + + /** * drm_mode_create_aspect_ratio_property - create aspect ratio property * @dev: DRM device @@ -1578,6 +1650,38 @@ void drm_connector_set_link_status_property(struct drm_connector *connector, } EXPORT_SYMBOL(drm_connector_set_link_status_property); +/** + * drm_mode_connector_update_cp_downstream_property - update the cp_downstream + * property of a connector + * @connector: drm connector + * @cp_downstream_info: new value of the cp_downstream property + * + * This function creates a new blob modeset object and assigns its id to the + * connector's cp_downstream property. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_connector_update_cp_downstream_property( + struct drm_connector *connector, + const struct cp_downstream_info *info) +{ + struct drm_device *dev = connector->dev; + int ret; + + if (!info) + return -EINVAL; + + ret = drm_property_replace_global_blob(dev, + &connector->cp_downstream_blob_ptr, + sizeof(struct cp_downstream_info), + info, + &connector->base, + connector->cp_downstream_property); + return ret; +} +EXPORT_SYMBOL(drm_mode_connector_update_cp_downstream_property); + /** * drm_connector_init_panel_orientation_property - * initialize the connecters panel_orientation property diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 9cbe8f5c9acaf..bd4684395600e 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -410,6 +410,9 @@ int drm_mode_getcrtc(struct drm_device *dev, plane = crtc->primary; + if (!crtc->primary) + return -EINVAL; + crtc_resp->gamma_size = crtc->gamma_size; drm_modeset_lock(&plane->mutex, NULL); @@ -461,6 +464,9 @@ static int __drm_mode_set_config_internal(struct drm_mode_set *set, struct drm_crtc *tmp; int ret; + if (!crtc->primary) + return -EINVAL; + WARN_ON(drm_drv_uses_atomic_modeset(crtc->dev)); /* @@ -470,6 +476,8 @@ static int __drm_mode_set_config_internal(struct drm_mode_set *set, */ drm_for_each_crtc(tmp, crtc->dev) { struct drm_plane *plane = tmp->primary; + if (!tmp->primary) + continue; plane->old_fb = plane->fb; } @@ -486,6 +494,8 @@ static int __drm_mode_set_config_internal(struct drm_mode_set *set, drm_for_each_crtc(tmp, crtc->dev) { struct drm_plane *plane = tmp->primary; + if (!tmp->primary) + continue; if (plane->fb) drm_framebuffer_get(plane->fb); @@ -606,6 +616,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, if (ret) goto out; + if (!crtc->primary) + return -EINVAL; + if (crtc_req->mode_valid) { /* If we have a mode we need a framebuffer. */ /* If we pass -1, set the mode with the currently bound fb */ diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 5a84c3bc915df..aaa80d640203c 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -185,6 +185,8 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev) (*crtc_funcs->disable)(crtc); else (*crtc_funcs->dpms)(crtc, DRM_MODE_DPMS_OFF); + if (!crtc->primary) + continue; crtc->primary->fb = NULL; } } @@ -539,6 +541,9 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set, crtc_funcs = set->crtc->helper_private; + if (!set->crtc->primary) + return -EINVAL; + if (!set->mode) set->fb = NULL; @@ -950,6 +955,8 @@ void drm_helper_resume_force_mode(struct drm_device *dev) if (!crtc->enabled) continue; + if (!crtc->primary) + continue; ret = drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb); @@ -1072,6 +1079,9 @@ int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, struct drm_plane_state *plane_state; struct drm_plane *plane = crtc->primary; + if (!plane) + return -EINVAL; + if (plane->funcs->atomic_duplicate_state) plane_state = plane->funcs->atomic_duplicate_state(plane); else { diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 7780567aa6692..d708472d93c42 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1274,6 +1274,9 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_ mutex_lock(&mgr->lock); mstb = mgr->mst_primary; + if (!mstb) + goto out; + for (i = 0; i < lct - 1; i++) { int shift = (i % 2) ? 0 : 4; int port_num = (rad[i / 2] >> shift) & 0xf; diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index ff0bfc65a8c1d..b506e3622b08f 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -122,6 +122,9 @@ static const struct edid_quirk { /* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */ { "SDC", 0x3652, EDID_QUIRK_FORCE_6BPC }, + /* BOE model 0x0771 reports 8 bpc, but is a 6 bpc panel */ + { "BOE", 0x0771, EDID_QUIRK_FORCE_6BPC }, + /* Belinea 10 15 55 */ { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 9628dd6178269..d4f342a4db204 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -56,6 +56,25 @@ MODULE_PARM_DESC(drm_fbdev_overalloc, "Overallocation of the fbdev buffer (%) [default=" __MODULE_STRING(CONFIG_DRM_FBDEV_OVERALLOC) "]"); +/* + * In order to keep user-space compatibility, we want in certain use-cases + * to keep leaking the fbdev physical address to the user-space program + * handling the fbdev buffer. + * This is a bad habit essentially kept into closed source opengl driver + * that should really be moved into open-source upstream projects instead + * of using legacy physical addresses in user space to communicate with + * other out-of-tree kernel modules. + * + * This module_param *should* be removed as soon as possible and be + * considered as a broken and legacy behaviour from a modern fbdev device. + */ +#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) +static bool drm_leak_fbdev_smem = false; +module_param_unsafe(drm_leak_fbdev_smem, bool, 0600); +MODULE_PARM_DESC(fbdev_emulation, + "Allow unsafe leaking fbdev physical smem address [default=false]"); +#endif + static LIST_HEAD(kernel_fb_helper_list); static DEFINE_MUTEX(kernel_fb_helper_lock); @@ -200,6 +219,9 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) mutex_lock(&fb_helper->lock); drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + ret = __drm_fb_helper_add_one_connector(fb_helper, connector); if (ret) goto fail; @@ -549,6 +571,8 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper) drm_for_each_crtc(crtc, dev) { drm_modeset_lock(&crtc->mutex, NULL); + if (!crtc->primary) + continue; if (crtc->primary->fb) crtcs_bound++; if (crtc->primary->fb == fb_helper->fb) @@ -1440,6 +1464,7 @@ static int setcmap_atomic(struct fb_cmap *cmap, struct fb_info *info) replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL); replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL); + replaced |= drm_property_replace_blob(&crtc_state->ctm_post_offset, NULL); replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, gamma_lut); crtc_state->color_mgmt_changed |= replaced; @@ -1599,6 +1624,64 @@ static bool drm_fb_pixel_format_equal(const struct fb_var_screeninfo *var_1, var_1->transp.msb_right == var_2->transp.msb_right; } +static void drm_fb_helper_fill_pixel_fmt(struct fb_var_screeninfo *var, + u8 depth) +{ + switch (depth) { + case 8: + var->red.offset = 0; + var->green.offset = 0; + var->blue.offset = 0; + var->red.length = 8; /* 8bit DAC */ + var->green.length = 8; + var->blue.length = 8; + var->transp.offset = 0; + var->transp.length = 0; + break; + case 15: + var->red.offset = 10; + var->green.offset = 5; + var->blue.offset = 0; + var->red.length = 5; + var->green.length = 5; + var->blue.length = 5; + var->transp.offset = 15; + var->transp.length = 1; + break; + case 16: + var->red.offset = 11; + var->green.offset = 5; + var->blue.offset = 0; + var->red.length = 5; + var->green.length = 6; + var->blue.length = 5; + var->transp.offset = 0; + break; + case 24: + var->red.offset = 16; + var->green.offset = 8; + var->blue.offset = 0; + var->red.length = 8; + var->green.length = 8; + var->blue.length = 8; + var->transp.offset = 0; + var->transp.length = 0; + break; + case 32: + var->red.offset = 16; + var->green.offset = 8; + var->blue.offset = 0; + var->red.length = 8; + var->green.length = 8; + var->blue.length = 8; + var->transp.offset = 24; + var->transp.length = 8; + break; + default: + break; + } +} + /** * drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var * @var: screeninfo to check @@ -1610,9 +1693,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, struct drm_fb_helper *fb_helper = info->par; struct drm_framebuffer *fb = fb_helper->fb; - if (var->pixclock != 0 || in_dbg_master()) + if (in_dbg_master()) return -EINVAL; + if (var->pixclock != 0) { + DRM_DEBUG("fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n"); + var->pixclock = 0; + } + /* * Changes struct fb_var_screeninfo are currently not pushed back * to KMS, hence fail if different settings are requested. @@ -1628,6 +1716,20 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, return -EINVAL; } + /* + * Workaround for SDL 1.2, which is known to be setting all pixel format + * fields values to zero in some cases. We treat this situation as a + * kind of "use some reasonable autodetected values". + */ + if (!var->red.offset && !var->green.offset && + !var->blue.offset && !var->transp.offset && + !var->red.length && !var->green.length && + !var->blue.length && !var->transp.length && + !var->red.msb_right && !var->green.msb_right && + !var->blue.msb_right && !var->transp.msb_right) { + drm_fb_helper_fill_pixel_fmt(var, fb->format->depth); + } + /* * drm fbdev emulation doesn't support changing the pixel format at all, * so reject all pixel format changing requests. @@ -1939,59 +2041,7 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe info->var.yoffset = 0; info->var.activate = FB_ACTIVATE_NOW; - switch (fb->format->depth) { - case 8: - info->var.red.offset = 0; - info->var.green.offset = 0; - info->var.blue.offset = 0; - info->var.red.length = 8; /* 8bit DAC */ - info->var.green.length = 8; - info->var.blue.length = 8; - info->var.transp.offset = 0; - info->var.transp.length = 0; - break; - case 15: - info->var.red.offset = 10; - info->var.green.offset = 5; - info->var.blue.offset = 0; - info->var.red.length = 5; - info->var.green.length = 5; - info->var.blue.length = 5; - info->var.transp.offset = 15; - info->var.transp.length = 1; - break; - case 16: - info->var.red.offset = 11; - info->var.green.offset = 5; - info->var.blue.offset = 0; - info->var.red.length = 5; - info->var.green.length = 6; - info->var.blue.length = 5; - info->var.transp.offset = 0; - break; - case 24: - info->var.red.offset = 16; - info->var.green.offset = 8; - info->var.blue.offset = 0; - info->var.red.length = 8; - info->var.green.length = 8; - info->var.blue.length = 8; - info->var.transp.offset = 0; - info->var.transp.length = 0; - break; - case 32: - info->var.red.offset = 16; - info->var.green.offset = 8; - info->var.blue.offset = 0; - info->var.red.length = 8; - info->var.green.length = 8; - info->var.blue.length = 8; - info->var.transp.offset = 24; - info->var.transp.length = 8; - break; - default: - break; - } + drm_fb_helper_fill_pixel_fmt(&info->var, fb->format->depth); info->var.xres = fb_width; info->var.yres = fb_height; @@ -3038,6 +3088,12 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, fbi->screen_size = fb->height * fb->pitches[0]; fbi->fix.smem_len = fbi->screen_size; fbi->screen_buffer = buffer->vaddr; + /* Shamelessly leak the physical address to user-space */ +#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) + if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0) + fbi->fix.smem_start = + page_to_phys(virt_to_page(fbi->screen_buffer)); +#endif strcpy(fbi->fix.id, "DRM emulated"); drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth); diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index ffa8dc35515ff..d47119e045e8f 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -46,6 +46,10 @@ /* from BKL pushdown */ DEFINE_MUTEX(drm_global_mutex); +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) +EXPORT_SYMBOL(drm_global_mutex); +#endif + /** * DOC: file operations * diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 781af1d42d766..d2d2c30bea974 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -891,7 +891,7 @@ static void legacy_remove_fb(struct drm_framebuffer *fb) drm_modeset_lock_all(dev); /* remove from any CRTC */ drm_for_each_crtc(crtc, dev) { - if (crtc->primary->fb == fb) { + if (crtc->primary && crtc->primary->fb == fb) { /* should turn off the crtc */ if (drm_crtc_force_disable(crtc)) DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc); diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 40179c5fc6b87..6b2d7fe6efa90 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -48,6 +48,11 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr struct dma_buf *dma_buf); /* drm_drv.c */ + +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) +#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */ +#endif + struct drm_minor *drm_minor_acquire(unsigned int minor_id); void drm_minor_release(struct drm_minor *minor); @@ -99,6 +104,8 @@ struct device *drm_sysfs_minor_alloc(struct drm_minor *minor); int drm_sysfs_connector_add(struct drm_connector *connector); void drm_sysfs_connector_remove(struct drm_connector *connector); +void drm_sysfs_lease_event(struct drm_device *dev); + /* drm_gem.c */ int drm_gem_init(struct drm_device *dev); void drm_gem_destroy(struct drm_device *dev); diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index ea10e9a26aadd..ecd93312f99f7 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -37,6 +37,7 @@ #include #include +#include /** * DOC: getunique and setversion story @@ -557,7 +558,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_UNLOCKED | DRM_MASTER), DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), @@ -628,10 +629,10 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED), @@ -640,7 +641,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_connector_property_set_ioctl, DRM_MASTER|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_UNLOCKED), @@ -652,7 +653,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER|DRM_UNLOCKED), @@ -794,13 +795,17 @@ long drm_ioctl(struct file *filp, if (is_driver_ioctl) { /* driver ioctl */ - if (nr - DRM_COMMAND_BASE >= dev->driver->num_ioctls) + unsigned int index = nr - DRM_COMMAND_BASE; + + if (index >= dev->driver->num_ioctls) goto err_i1; - ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; + index = array_index_nospec(index, dev->driver->num_ioctls); + ioctl = &dev->driver->ioctls[index]; } else { /* core ioctl */ if (nr >= DRM_CORE_IOCTL_COUNT) goto err_i1; + nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT); ioctl = &drm_ioctls[nr]; } @@ -882,6 +887,7 @@ bool drm_ioctl_flags(unsigned int nr, unsigned int *flags) if (nr >= DRM_CORE_IOCTL_COUNT) return false; + nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT); *flags = drm_ioctls[nr].flags; return true; diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c index b82da96ded5c8..fe6bfaf8b53f5 100644 --- a/drivers/gpu/drm/drm_lease.c +++ b/drivers/gpu/drm/drm_lease.c @@ -296,7 +296,7 @@ void drm_lease_destroy(struct drm_master *master) if (master->lessor) { /* Tell the master to check the lessee list */ - drm_sysfs_hotplug_event(dev); + drm_sysfs_lease_event(dev); drm_master_put(&master->lessor); } diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c index 21e353bd3948e..1d6ea4bb4838e 100644 --- a/drivers/gpu/drm/drm_mode_config.c +++ b/drivers/gpu/drm/drm_mode_config.c @@ -332,6 +332,13 @@ static int drm_mode_create_standard_properties(struct drm_device *dev) return -ENOMEM; dev->mode_config.ctm_property = prop; + prop = drm_property_create(dev, + DRM_MODE_PROP_BLOB, + "CTM_POST_OFFSET", 0); + if (!prop) + return -ENOMEM; + dev->mode_config.ctm_post_offset_property = prop; + prop = drm_property_create(dev, DRM_MODE_PROP_BLOB, "GAMMA_LUT", 0); diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index fe9c6c731e878..ee4a5e1221f1f 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -30,6 +30,12 @@ struct drm_dmi_panel_orientation_data { int orientation; }; +static const struct drm_dmi_panel_orientation_data acer_s1003 = { + .width = 800, + .height = 1280, + .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, +}; + static const struct drm_dmi_panel_orientation_data asus_t100ha = { .width = 800, .height = 1280, @@ -67,7 +73,13 @@ static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = { }; static const struct dmi_system_id orientation_data[] = { - { /* Asus T100HA */ + { /* Acer One 10 (S1003) */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"), + }, + .driver_data = (void *)&acer_s1003, + }, { /* Asus T100HA */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"), diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index b3c1daad1169b..ecb7b33002bb2 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -301,6 +301,16 @@ void drm_sysfs_connector_remove(struct drm_connector *connector) connector->kdev = NULL; } +void drm_sysfs_lease_event(struct drm_device *dev) +{ + char *event_string = "LEASE=1"; + char *envp[] = { event_string, NULL }; + + DRM_DEBUG("generating lease event\n"); + + kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp); +} + /** * drm_sysfs_hotplug_event - generate a DRM uevent * @dev: DRM device diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index 69e9b431bf1f0..e5a9fae31ab7b 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -93,7 +93,7 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) * If the GPU managed to complete this jobs fence, the timout is * spurious. Bail out. */ - if (fence_completed(gpu, submit->out_fence->seqno)) + if (dma_fence_is_signaled(submit->out_fence)) return; /* diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 94529aa823392..aef487dd87315 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -164,13 +164,6 @@ static u32 decon_get_frame_count(struct decon_context *ctx, bool end) return frm; } -static u32 decon_get_vblank_counter(struct exynos_drm_crtc *crtc) -{ - struct decon_context *ctx = crtc->ctx; - - return decon_get_frame_count(ctx, false); -} - static void decon_setup_trigger(struct decon_context *ctx) { if (!ctx->crtc->i80_mode && !(ctx->out_type & I80_HW_TRG)) @@ -536,7 +529,6 @@ static const struct exynos_drm_crtc_ops decon_crtc_ops = { .disable = decon_disable, .enable_vblank = decon_enable_vblank, .disable_vblank = decon_disable_vblank, - .get_vblank_counter = decon_get_vblank_counter, .atomic_begin = decon_atomic_begin, .update_plane = decon_update_plane, .disable_plane = decon_disable_plane, @@ -554,7 +546,6 @@ static int decon_bind(struct device *dev, struct device *master, void *data) int ret; ctx->drm_dev = drm_dev; - drm_dev->max_vblank_count = 0xffffffff; for (win = ctx->first_win; win < WINDOWS_NR; win++) { ctx->configs[win].pixel_formats = decon_formats; diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index eea90251808fa..2696289ecc78f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -162,16 +162,6 @@ static void exynos_drm_crtc_disable_vblank(struct drm_crtc *crtc) exynos_crtc->ops->disable_vblank(exynos_crtc); } -static u32 exynos_drm_crtc_get_vblank_counter(struct drm_crtc *crtc) -{ - struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); - - if (exynos_crtc->ops->get_vblank_counter) - return exynos_crtc->ops->get_vblank_counter(exynos_crtc); - - return 0; -} - static const struct drm_crtc_funcs exynos_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, @@ -181,7 +171,6 @@ static const struct drm_crtc_funcs exynos_crtc_funcs = { .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, .enable_vblank = exynos_drm_crtc_enable_vblank, .disable_vblank = exynos_drm_crtc_disable_vblank, - .get_vblank_counter = exynos_drm_crtc_get_vblank_counter, }; struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev, diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index c737c4bd2c19b..630f1edc5de22 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -133,7 +133,6 @@ struct exynos_drm_crtc_ops { void (*disable)(struct exynos_drm_crtc *crtc); int (*enable_vblank)(struct exynos_drm_crtc *crtc); void (*disable_vblank)(struct exynos_drm_crtc *crtc); - u32 (*get_vblank_counter)(struct exynos_drm_crtc *crtc); enum drm_mode_status (*mode_valid)(struct exynos_drm_crtc *crtc, const struct drm_display_mode *mode); bool (*mode_fixup)(struct exynos_drm_crtc *crtc, diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c index b92595c477ef6..8bd29075ae4eb 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c @@ -122,6 +122,7 @@ static int hibmc_drm_fb_create(struct drm_fb_helper *helper, hi_fbdev->fb = hibmc_framebuffer_init(priv->dev, &mode_cmd, gobj); if (IS_ERR(hi_fbdev->fb)) { ret = PTR_ERR(hi_fbdev->fb); + hi_fbdev->fb = NULL; DRM_ERROR("failed to initialize framebuffer: %d\n", ret); goto out_release_fbi; } diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 33a458b7f1fcd..3d25ace132e7e 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -84,6 +84,16 @@ config DRM_I915_COMPRESS_ERROR If in doubt, say "Y". +config DRM_I915_MEMTRACK + bool "Enable shmem usage status track" + depends on DRM_I915_CAPTURE_ERROR + default y + help + This option enables shmem usage status track of system summary and + each process. + + If in doubt, say "N". + config DRM_I915_USERPTR bool "Always enable userptr support" depends on DRM_I915 @@ -127,6 +137,15 @@ config DRM_I915_GVT_KVMGT help Choose this option if you want to enable KVMGT support for Intel GVT-g. +config DRM_I915_GVT_ACRN_GVT + tristate "Enable ACRN support for Intel GVT-g" + depends on DRM_I915_GVT + depends on ACRN + depends on ACRN_VHM + default n + help + Choose this option if you want to enable ACRN_GVT support for + Intel GVT-g under ACRN hypervisor environment. menu "drm/i915 Debugging" depends on DRM_I915 diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 5794f102f9b8f..c70a494bb2136 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -183,11 +183,14 @@ i915-y += i915_perf.o \ i915_oa_icl.o ifeq ($(CONFIG_DRM_I915_GVT),y) -i915-y += intel_gvt.o +i915-y += intel_gvt.o i915_gem_gvtbuffer.o include $(src)/gvt/Makefile endif # LPE Audio for VLV and CHT i915-y += intel_lpe_audio.o +# initial modeset +i915-y += intel_initial_modeset.o + obj-$(CONFIG_DRM_I915) += i915.o diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile index b016dc753db96..0acb4dabc00ce 100644 --- a/drivers/gpu/drm/i915/gvt/Makefile +++ b/drivers/gpu/drm/i915/gvt/Makefile @@ -5,6 +5,7 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \ execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \ fb_decoder.o dmabuf.o page_track.o -ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) +ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE)) obj-$(CONFIG_DRM_I915_GVT_KVMGT) += $(GVT_DIR)/kvmgt.o +obj-$(CONFIG_DRM_I915_GVT_ACRN_GVT) += $(GVT_DIR)/acrngt.o diff --git a/drivers/gpu/drm/i915/gvt/acrngt.c b/drivers/gpu/drm/i915/gvt/acrngt.c new file mode 100644 index 0000000000000..569e3a2502ebc --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/acrngt.c @@ -0,0 +1,992 @@ +/* + * Interfaces coupled to ACRN + * + * Copyright(c) 2018 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of Version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. + * + */ + +/* + * NOTE: + * This file contains hypervisor specific interactions to + * implement the concept of mediated pass-through framework. + * What this file provides is actually a general abstraction + * of in-kernel device model, which is not gvt specific. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include "acrngt.h" + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("ACRNGT mediated passthrough driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("0.1"); + +#define ASSERT(x) \ +do { if (x) break; \ + printk(KERN_EMERG "### ASSERTION FAILED %s: %s: %d: %s\n", \ + __FILE__, __func__, __LINE__, #x); dump_stack(); BUG(); \ +} while (0) + + +struct kobject *acrn_gvt_ctrl_kobj; +static struct kset *acrn_gvt_kset; +static DEFINE_MUTEX(acrn_gvt_sysfs_lock); + +struct gvt_acrngt acrngt_priv; +const struct intel_gvt_ops *intel_gvt_ops; + +static void disable_domu_plane(int pipe, int plane) +{ + struct drm_i915_private *dev_priv = acrngt_priv.gvt->dev_priv; + + I915_WRITE(PLANE_CTL(pipe, plane), 0); + + I915_WRITE(PLANE_SURF(pipe, plane), 0); + POSTING_READ(PLANE_SURF(pipe, plane)); +} + +void acrngt_instance_destroy(struct intel_vgpu *vgpu) +{ + int pipe, plane; + struct acrngt_hvm_dev *info = NULL; + struct intel_gvt *gvt = acrngt_priv.gvt; + + if (vgpu) { + info = (struct acrngt_hvm_dev *)vgpu->handle; + + for_each_pipe(gvt->dev_priv, pipe) { + for_each_universal_plane(gvt->dev_priv, pipe, plane) { + if (gvt->pipe_info[pipe].plane_owner[plane] == + vgpu->id) { + disable_domu_plane(pipe, plane); + } + } + } + + intel_gvt_ops->vgpu_deactivate(vgpu); + intel_gvt_ops->vgpu_destroy(vgpu); + } + + if (info) { + gvt_dbg_core("destroy vgpu instance, vm id: %d, client %d", + info->vm_id, info->client); + + if (info->client != 0) + acrn_ioreq_destroy_client(info->client); + + if (info->vm) + put_vm(info->vm); + + kfree(info); + } +} + +static bool acrngt_write_cfg_space(struct intel_vgpu *vgpu, + unsigned int port, unsigned int bytes, unsigned long val) +{ + if (intel_gvt_ops->emulate_cfg_write(vgpu, port, &val, bytes)) { + gvt_err("failed to write config space port 0x%x\n", port); + return false; + } + return true; +} + +static bool acrngt_read_cfg_space(struct intel_vgpu *vgpu, + unsigned int port, unsigned int bytes, unsigned long *val) +{ + unsigned long data; + + if (intel_gvt_ops->emulate_cfg_read(vgpu, port, &data, bytes)) { + gvt_err("failed to read config space port 0x%x\n", port); + return false; + } + memcpy(val, &data, bytes); + return true; +} + +static int acrngt_hvm_pio_emulation(struct intel_vgpu *vgpu, + struct vhm_request *req) +{ + if (req->reqs.pci_request.direction == REQUEST_READ) { + /* PIO READ */ + gvt_dbg_core("handle pio read emulation at port 0x%x\n", + req->reqs.pci_request.reg); + if (!acrngt_read_cfg_space(vgpu, + req->reqs.pci_request.reg, + req->reqs.pci_request.size, + (unsigned long *)&req->reqs.pci_request.value)) { + gvt_err("failed to read pio at addr 0x%x\n", + req->reqs.pci_request.reg); + return -EINVAL; + } + } else if (req->reqs.pci_request.direction == REQUEST_WRITE) { + /* PIO WRITE */ + gvt_dbg_core("handle pio write emulation at address 0x%x, " + "value 0x%x\n", + req->reqs.pci_request.reg, req->reqs.pci_request.value); + if (!acrngt_write_cfg_space(vgpu, + req->reqs.pci_request.reg, + req->reqs.pci_request.size, + (unsigned long)req->reqs.pci_request.value)) { + gvt_err("failed to write pio at addr 0x%x\n", + req->reqs.pci_request.reg); + return -EINVAL; + } + } + return 0; +} + +static int acrngt_hvm_write_handler(struct intel_vgpu *vgpu, uint64_t pa, + void *p_data, unsigned int bytes) +{ + + /* Check whether pa is ppgtt */ + if (intel_gvt_ops->write_protect_handler(vgpu, pa, p_data, bytes) == 0) + return 0; + + /* pa is mmio reg or gtt */ + return intel_gvt_ops->emulate_mmio_write(vgpu, pa, p_data, bytes); +} + +static int acrngt_hvm_mmio_emulation(struct intel_vgpu *vgpu, + struct vhm_request *req) +{ + if (req->reqs.mmio_request.direction == REQUEST_READ) { + /* MMIO READ */ + gvt_dbg_core("handle mmio read emulation at address 0x%llx\n", + req->reqs.mmio_request.address); + if (intel_gvt_ops->emulate_mmio_read(vgpu, + req->reqs.mmio_request.address, + &req->reqs.mmio_request.value, + req->reqs.mmio_request.size)) { + gvt_err("failed to read mmio at addr 0x%llx\n", + req->reqs.mmio_request.address); + return -EINVAL; + } + } else if (req->reqs.mmio_request.direction == REQUEST_WRITE) { + /* MMIO Write */ + if (acrngt_hvm_write_handler(vgpu, + req->reqs.mmio_request.address, + &req->reqs.mmio_request.value, + req->reqs.mmio_request.size)) { + gvt_err("failed to write mmio at addr 0x%llx\n", + req->reqs.mmio_request.address); + return -EINVAL; + } + gvt_dbg_core("handle mmio write emulation at address 0x%llx, " + "value 0x%llx\n", + req->reqs.mmio_request.address, req->reqs.mmio_request.value); + } + + return 0; +} + +static void handle_request_error(struct intel_vgpu *vgpu) +{ + mutex_lock(&vgpu->gvt->lock); + if (vgpu->failsafe == false) { + vgpu->failsafe= true; + gvt_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id); + } + mutex_unlock(&vgpu->gvt->lock); +} + +static int acrngt_emulate_ioreq(int client_id, + unsigned long *ioreqs_map, + void *client_priv) +{ + struct vhm_request *req; + struct acrngt_hvm_dev *info = NULL; + int vcpu; + int ret = 0; + struct intel_vgpu *vgpu = NULL; + + info = (struct acrngt_hvm_dev *)client_priv; + if (!info || !info->vgpu) { + gvt_err("Unknown vgpu for client %d\n", + client_id); + return -EINVAL; + } + vgpu = info->vgpu; + + while (1) { + vcpu = find_first_bit(ioreqs_map, info->nr_vcpu); + if (vcpu == info->nr_vcpu) + break; + + req = &info->req_buf[vcpu]; + if ((atomic_read(&req->processed) == REQ_STATE_PROCESSING) && + (req->client == info->client)) { + gvt_dbg_core("handle ioreq type %d\n", + req->type); + switch (req->type) { + case REQ_PCICFG: + ret = acrngt_hvm_pio_emulation(vgpu, req); + break; + case REQ_MMIO: + case REQ_WP: + ret = acrngt_hvm_mmio_emulation(vgpu, req); + break; + default: + gvt_err("Unknown ioreq type %x\n", + req->type); + ret = -EINVAL; + break; + } + /* error handling */ + if (ret) + handle_request_error(vgpu); + + /* complete request */ + if (acrn_ioreq_complete_request(info->client, + vcpu, req)) + gvt_err("failed complete request\n"); + } + } + + return ret; +} + +struct intel_vgpu *acrngt_instance_create(domid_t vm_id, + struct intel_vgpu_type *vgpu_type) +{ + struct acrngt_hvm_dev *info; + struct intel_vgpu *vgpu; + int ret = 0; + struct vm_info vm_info; + + gvt_dbg_core("acrngt_instance_create enter\n"); + if (!intel_gvt_ops || !acrngt_priv.gvt) + return NULL; + + vgpu = intel_gvt_ops->vgpu_create(acrngt_priv.gvt, vgpu_type); + if (IS_ERR(vgpu)) { + gvt_err("failed to create vgpu\n"); + return NULL; + } + + info = kzalloc(sizeof(struct acrngt_hvm_dev), GFP_KERNEL); + if (info == NULL) { + gvt_err("failed to alloc acrngt_hvm_dev\n"); + goto err; + } + + info->vm_id = vm_id; + info->vgpu = vgpu; + vgpu->handle = (unsigned long)info; + + if ((info->vm = find_get_vm(vm_id)) == NULL) { + gvt_err("failed to get vm %d\n", vm_id); + acrngt_instance_destroy(vgpu); + return NULL; + } + if (info->vm->req_buf == NULL) { + gvt_err("failed to get req buf for vm %d\n", vm_id); + goto err; + } + gvt_dbg_core("get vm req_buf from vm_id %d\n", vm_id); + + /* create client: no handler -> handle request by itself */ + info->client = acrn_ioreq_create_client(vm_id, acrngt_emulate_ioreq, + info, "ioreq gvt-g"); + if (info->client < 0) { + gvt_err("failed to create ioreq client for vm id %d\n", vm_id); + goto err; + } + + /* get vm info */ + ret = vhm_get_vm_info(vm_id, &vm_info); + if (ret < 0) { + gvt_err("failed to get vm info for vm id %d\n", vm_id); + goto err; + } + + info->nr_vcpu = vm_info.max_vcpu; + + /* get req buf */ + info->req_buf = acrn_ioreq_get_reqbuf(info->client); + if (info->req_buf == NULL) { + gvt_err("failed to get req_buf for client %d\n", info->client); + goto err; + } + + /* trap config space access */ + acrn_ioreq_intercept_bdf(info->client, 0, 2, 0); + acrn_ioreq_attach_client(info->client); + + gvt_dbg_core("create vgpu instance success, vm_id %d, client %d," + " nr_vcpu %d\n", info->vm_id,info->client, info->nr_vcpu); + + intel_gvt_ops->vgpu_activate(vgpu); + + return vgpu; + +err: + acrngt_instance_destroy(vgpu); + return NULL; +} + +static ssize_t kobj_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct kobj_attribute *kattr; + ssize_t ret = -EIO; + + kattr = container_of(attr, struct kobj_attribute, attr); + if (kattr->show) + ret = kattr->show(kobj, kattr, buf); + return ret; +} + +static ssize_t kobj_attr_store(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + struct kobj_attribute *kattr; + ssize_t ret = -EIO; + + kattr = container_of(attr, struct kobj_attribute, attr); + if (kattr->store) + ret = kattr->store(kobj, kattr, buf, count); + return ret; +} + +const struct sysfs_ops acrngt_kobj_sysfs_ops = { + .show = kobj_attr_show, + .store = kobj_attr_store, +}; + +static ssize_t acrngt_sysfs_vgpu_id(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + int i; + + for (i = 0; i < GVT_MAX_VGPU_INSTANCE; i++) { + if (acrngt_priv.vgpus[i] && + (kobj == &((struct acrngt_hvm_dev *) + (acrngt_priv.vgpus[i]->handle))->kobj)) { + return sprintf(buf, "%d\n", acrngt_priv.vgpus[i]->id); + } + } + return 0; +} + +static struct kobj_attribute acrngt_vm_attr = +__ATTR(vgpu_id, 0440, acrngt_sysfs_vgpu_id, NULL); + + +static struct attribute *acrngt_vm_attrs[] = { + &acrngt_vm_attr.attr, + NULL, /* need to NULL terminate the list of attributes */ +}; + +static struct kobj_type acrngt_instance_ktype = { + .sysfs_ops = &acrngt_kobj_sysfs_ops, + .default_attrs = acrngt_vm_attrs, +}; + +static int acrngt_sysfs_add_instance(struct acrngt_hvm_params *vp) +{ + int ret = 0; + struct intel_vgpu *vgpu; + struct acrngt_hvm_dev *info; + + struct intel_vgpu_type type = acrngt_priv.gvt->types[0]; + type.low_gm_size = vp->aperture_sz * VMEM_1MB; + type.high_gm_size = (vp->gm_sz - vp->aperture_sz) * VMEM_1MB; + type.fence = vp->fence_sz; + mutex_lock(&acrn_gvt_sysfs_lock); + vgpu = acrngt_instance_create(vp->vm_id, &type); + mutex_unlock(&acrn_gvt_sysfs_lock); + if (vgpu == NULL) { + gvt_err("acrngt_sysfs_add_instance failed.\n"); + ret = -EINVAL; + } else { + info = (struct acrngt_hvm_dev *) vgpu->handle; + info->vm_id = vp->vm_id; + acrngt_priv.vgpus[vgpu->id - 1] = vgpu; + gvt_dbg_core("add acrngt instance for vm-%d with vgpu-%d.\n", + vp->vm_id, vgpu->id); + + kobject_init(&info->kobj, &acrngt_instance_ktype); + info->kobj.kset = acrn_gvt_kset; + /* add kobject, NULL parent indicates using kset as parent */ + ret = kobject_add(&info->kobj, NULL, "vm%u", info->vm_id); + if (ret) { + gvt_err("%s: kobject add error: %d\n", __func__, ret); + kobject_put(&info->kobj); + } + } + + return ret; +} + +static struct intel_vgpu *vgpu_from_id(int vm_id) +{ + int i; + struct acrngt_hvm_dev *hvm_dev = NULL; + + /* vm_id is negtive in del_instance call */ + if (vm_id < 0) + vm_id = -vm_id; + for (i = 0; i < GVT_MAX_VGPU_INSTANCE; i++) + if (acrngt_priv.vgpus[i]) { + hvm_dev = (struct acrngt_hvm_dev *) + acrngt_priv.vgpus[i]->handle; + if (hvm_dev && (vm_id == hvm_dev->vm_id)) + return acrngt_priv.vgpus[i]; + } + return NULL; +} + +static int acrngt_sysfs_del_instance(struct acrngt_hvm_params *vp) +{ + int ret = 0; + struct intel_vgpu *vgpu = vgpu_from_id(vp->vm_id); + struct acrngt_hvm_dev *info = NULL; + + if (vgpu) { + info = (struct acrngt_hvm_dev *) vgpu->handle; + gvt_dbg_core("remove vm-%d sysfs node.\n", vp->vm_id); + kobject_put(&info->kobj); + + mutex_lock(&acrn_gvt_sysfs_lock); + acrngt_priv.vgpus[vgpu->id - 1] = NULL; + acrngt_instance_destroy(vgpu); + mutex_unlock(&acrn_gvt_sysfs_lock); + } + + return ret; +} + +static ssize_t acrngt_sysfs_instance_manage(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + struct acrngt_hvm_params vp; + int param_cnt; + char param_str[64]; + int rc; + int high_gm_sz; + int low_gm_sz; + + /* We expect the param_str should be vmid,a,b,c (where the guest + * wants a MB aperture and b MB gm, and c fence registers) or -vmid + * (where we want to release the gvt instance). + */ + (void)sscanf(buf, "%63s", param_str); + param_cnt = sscanf(param_str, "%d,%d,%d,%d", &vp.vm_id, + &low_gm_sz, &high_gm_sz, &vp.fence_sz); + gvt_dbg_core("create vm-%d sysfs node, low gm size %d," + " high gm size %d, fence size %d\n", + vp.vm_id, low_gm_sz, high_gm_sz, vp.fence_sz); + vp.aperture_sz = low_gm_sz; + vp.gm_sz = high_gm_sz + low_gm_sz; + if (param_cnt == 1) { + if (vp.vm_id >= 0) + return -EINVAL; + } else if (param_cnt == 4) { + if (!(vp.vm_id > 0 && vp.aperture_sz > 0 && + vp.aperture_sz <= vp.gm_sz && vp.fence_sz > 0)) + return -EINVAL; + } else { + gvt_err("%s: parameter counter incorrect\n", __func__); + return -EINVAL; + } + + rc = (vp.vm_id > 0) ? acrngt_sysfs_add_instance(&vp) : + acrngt_sysfs_del_instance(&vp); + + return rc < 0 ? rc : count; +} + +static ssize_t show_plane_owner(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "Planes:\nPipe A: %d %d %d %d\n" + "Pipe B: %d %d %d %d\nPipe C: %d %d %d\n", + acrngt_priv.gvt->pipe_info[PIPE_A].plane_owner[PLANE_PRIMARY], + acrngt_priv.gvt->pipe_info[PIPE_A].plane_owner[PLANE_SPRITE0], + acrngt_priv.gvt->pipe_info[PIPE_A].plane_owner[PLANE_SPRITE1], + acrngt_priv.gvt->pipe_info[PIPE_A].plane_owner[PLANE_SPRITE2], + acrngt_priv.gvt->pipe_info[PIPE_B].plane_owner[PLANE_PRIMARY], + acrngt_priv.gvt->pipe_info[PIPE_B].plane_owner[PLANE_SPRITE0], + acrngt_priv.gvt->pipe_info[PIPE_B].plane_owner[PLANE_SPRITE1], + acrngt_priv.gvt->pipe_info[PIPE_B].plane_owner[PLANE_SPRITE2], + acrngt_priv.gvt->pipe_info[PIPE_C].plane_owner[PLANE_PRIMARY], + acrngt_priv.gvt->pipe_info[PIPE_C].plane_owner[PLANE_SPRITE0], + acrngt_priv.gvt->pipe_info[PIPE_C].plane_owner[PLANE_SPRITE1]); +} + +static struct kobj_attribute acrngt_instance_attr = +__ATTR(create_gvt_instance, 0220, NULL, acrngt_sysfs_instance_manage); + +static struct kobj_attribute plane_owner_attr = +__ATTR(plane_owner_show, 0440, show_plane_owner, NULL); + +static struct attribute *acrngt_ctrl_attrs[] = { + &acrngt_instance_attr.attr, + &plane_owner_attr.attr, + NULL, /* need to NULL terminate the list of attributes */ +}; + +static struct kobj_type acrngt_ctrl_ktype = { + .sysfs_ops = &acrngt_kobj_sysfs_ops, + .default_attrs = acrngt_ctrl_attrs, +}; + +int acrngt_sysfs_init(struct intel_gvt *gvt) +{ + int ret; + + acrn_gvt_kset = kset_create_and_add("gvt", NULL, kernel_kobj); + if (!acrn_gvt_kset) { + ret = -ENOMEM; + goto kset_fail; + } + + acrn_gvt_ctrl_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL); + if (!acrn_gvt_ctrl_kobj) { + ret = -ENOMEM; + goto ctrl_fail; + } + + acrn_gvt_ctrl_kobj->kset = acrn_gvt_kset; + ret = kobject_init_and_add(acrn_gvt_ctrl_kobj, &acrngt_ctrl_ktype, + NULL, "control"); + if (ret) { + ret = -EINVAL; + goto kobj_fail; + } + + return 0; + +kobj_fail: + kobject_put(acrn_gvt_ctrl_kobj); +ctrl_fail: + kset_unregister(acrn_gvt_kset); +kset_fail: + return ret; +} + +void acrngt_sysfs_del(void) +{ + kobject_put(acrn_gvt_ctrl_kobj); + kset_unregister(acrn_gvt_kset); +} + +static int acrngt_host_init(struct device *dev, void *gvt, const void *ops) +{ + int ret = -EFAULT; + + if (!gvt || !ops) + return -EINVAL; + + acrngt_priv.gvt = (struct intel_gvt *)gvt; + intel_gvt_ops = (const struct intel_gvt_ops *)ops; + + ret = acrngt_sysfs_init(acrngt_priv.gvt); + if (ret) { + gvt_err("failed call acrngt_sysfs_init, error: %d\n", ret); + acrngt_priv.gvt = NULL; + intel_gvt_ops = NULL; + } + + return ret; +} + +static void acrngt_host_exit(struct device *dev, void *gvt) +{ + acrngt_sysfs_del(); + acrngt_priv.gvt = NULL; + intel_gvt_ops = NULL; +} + +static int acrngt_attach_vgpu(void *vgpu, unsigned long *handle) +{ + return 0; +} + +static void acrngt_detach_vgpu(unsigned long handle) +{ + return; +} + +static int acrngt_inject_msi(unsigned long handle, u32 addr_lo, u16 data) +{ + int ret; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + gvt_dbg_core("inject msi irq, addr 0x%x, data 0x%hx\n", addr_lo, data); + + ret = vhm_inject_msi(info->vm_id, addr_lo, data); + if (ret) + gvt_err("failed to inject msi for vm %d\n", info->vm_id); + return ret; +} + +static unsigned long acrngt_virt_to_mfn(void *addr) +{ + uint64_t gpa; + uint64_t hpa; + gvt_dbg_core("virt 0x%lx to mfn\n", (unsigned long)addr); + + gpa = virt_to_phys(addr); + hpa = vhm_vm_gpa2hpa(0, gpa); + + return (unsigned long) (hpa >> PAGE_SHIFT); +} + +static int acrngt_page_track_add(unsigned long handle, u64 gfn) +{ + int ret; + unsigned long hpa; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + gvt_dbg_core("set wp page for gfn 0x%llx\n", gfn); + + hpa = vhm_vm_gpa2hpa(info->vm_id, gfn << PAGE_SHIFT); + ret = acrn_ioreq_add_iorange(info->client, REQ_WP, gfn << PAGE_SHIFT, + ((gfn + 1) << PAGE_SHIFT) - 1); + if (ret) { + gvt_err("failed acrn_ioreq_add_iorange for gfn 0x%llx\n", gfn); + return ret; + } + ret = write_protect_page(info->vm_id, gfn << PAGE_SHIFT, true); + if (ret) + gvt_err("failed set write protect for gfn 0x%llx\n", gfn); + return ret; +} + +static int acrngt_page_track_remove(unsigned long handle, u64 gfn) +{ + int ret; + unsigned long hpa; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + gvt_dbg_core("unset wp page for gfx 0x%llx\n", gfn); + + hpa = vhm_vm_gpa2hpa(info->vm_id, gfn << PAGE_SHIFT); + ret = write_protect_page(info->vm_id, gfn << PAGE_SHIFT, false); + if (ret) { + gvt_err("failed update_memmap_attr unset for gfn 0x%llx\n", gfn); + return ret; + } + ret = acrn_ioreq_del_iorange(info->client, REQ_WP, gfn << PAGE_SHIFT, + ((gfn + 1) << PAGE_SHIFT) - 1); + if (ret) + gvt_err("failed acrn_ioreq_del_iorange for gfn 0x%llx\n", gfn); + return ret; +} + +static int acrngt_read_gpa(unsigned long handle, unsigned long gpa, + void *buf, unsigned long len) +{ + void *va = NULL; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + gvt_dbg_core("read gpa 0x%lx with len 0x%lx\n", gpa, len); + + va = map_guest_phys(info->vm_id, gpa, len); + if (!va) { + gvt_err("GVT: can not read gpa = 0x%lx!!!\n", gpa); + return -EFAULT; + } + + switch (len) + { + case 1: + *((uint8_t *) buf) = *((uint8_t *) va); + break; + case 2: + *((uint16_t *) buf) = *((uint16_t *) va); + break; + case 4: + *((uint32_t *) buf) = *((uint32_t *) va); + break; + case 8: + *((uint64_t *) buf) = *((uint64_t *) va); + break; + default: + memcpy(buf, va, len); + } + return 0; +} + +static int acrngt_write_gpa(unsigned long handle, unsigned long gpa, + void *buf, unsigned long len) +{ + void *va = NULL; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + gvt_dbg_core("write gpa 0x%lx with len 0x%lx\n", gpa, len); + + va = map_guest_phys(info->vm_id, gpa, len); + if (!va) { + gvt_err("GVT: can not write gpa = 0x%lx!!!\n", gpa); + return -EFAULT; + } + + switch (len) + { + case 1: + *((uint8_t *) va) = *((uint8_t *) buf); + break; + case 2: + *((uint16_t *) va) = *((uint16_t *) buf); + break; + case 4: + *((uint32_t *) va) = *((uint32_t *) buf); + break; + case 8: + *((uint64_t *) va) = *((uint64_t *) buf); + break; + default: + memcpy(va, buf, len); + } + return 0; +} + +static bool is_identical_mmap(void) +{ + /* todo: need add hypercall to get such info from hypervisor */ + return true; +} + +static unsigned long acrngt_gfn_to_pfn(unsigned long handle, unsigned long gfn) +{ + unsigned long hpa; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + + gvt_dbg_core("convert gfn 0x%lx to pfn\n", gfn); + if (is_identical_mmap()) { + void *va = NULL; + + va = map_guest_phys(info->vm_id, gfn << PAGE_SHIFT, + 1 << PAGE_SHIFT); + if (!va) { + gvt_err("GVT: can not map gfn = 0x%lx!!!\n", gfn); + hpa = vhm_vm_gpa2hpa(info->vm_id, gfn << PAGE_SHIFT); + } else { + hpa = virt_to_phys(va); + } + } else { + hpa = vhm_vm_gpa2hpa(info->vm_id, gfn << PAGE_SHIFT); + } + + return hpa >> PAGE_SHIFT; +} + +static int acrngt_map_gfn_to_mfn(unsigned long handle, unsigned long gfn, + unsigned long mfn, unsigned int nr, bool map) +{ + int ret; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + gvt_dbg_core("map/unmap gfn 0x%lx to mfn 0x%lx with %u pages, map %d\n", + gfn, mfn, nr, map); + + if (map) + ret = add_memory_region(info->vm_id, gfn << PAGE_SHIFT, + mfn << PAGE_SHIFT, nr << PAGE_SHIFT, + MEM_TYPE_UC, MEM_ACCESS_RWX); + else + ret = del_memory_region(info->vm_id, gfn << PAGE_SHIFT, + nr << PAGE_SHIFT); + if (ret) + gvt_err("failed map/unmap gfn 0x%lx to mfn 0x%lx with %u pages," + " map %d\n", gfn, mfn, nr, map); + return ret; +} + +static int acrngt_set_trap_area(unsigned long handle, u64 start, + u64 end, bool map) +{ + int ret; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + gvt_dbg_core("set trap area, start 0x%llx, end 0x%llx, map %d\n", + start, end, map); + + if (map) + ret = acrn_ioreq_add_iorange(info->client, REQ_MMIO, + start, end); + else + ret = acrn_ioreq_del_iorange(info->client, REQ_MMIO, + start, end); + if (ret) + gvt_err("failed set trap, start 0x%llx, end 0x%llx, map %d\n", + start, end, map); + return ret; +} + +static int acrngt_set_pvmmio(unsigned long handle, u64 start, u64 end, bool map) +{ + int rc, i; + unsigned long mfn, shared_mfn; + unsigned long pfn = start >> PAGE_SHIFT; + u32 mmio_size_fn = acrngt_priv.gvt->device_info.mmio_size >> PAGE_SHIFT; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + + if (map) { + mfn = acrngt_virt_to_mfn(info->vgpu->mmio.vreg); + rc = acrngt_map_gfn_to_mfn(handle, pfn, mfn, mmio_size_fn, map); + if (rc) { + gvt_err("acrn-gvt: map pfn %lx to mfn %lx fail with ret %d\n", + pfn, mfn, rc); + return rc; + } + + /* map the shared page to guest */ + shared_mfn = acrngt_virt_to_mfn(info->vgpu->mmio.shared_page); + rc = acrngt_map_gfn_to_mfn(handle, pfn + mmio_size_fn, shared_mfn, 1, map); + if (rc) { + gvt_err("acrn-gvt: map shared page fail with ret %d\n", rc); + return rc; + } + + /* mmio access is trapped like memory write protection */ + rc = acrn_ioreq_add_iorange(info->client, REQ_WP, pfn << PAGE_SHIFT, + ((pfn + mmio_size_fn) << PAGE_SHIFT) - 1); + if (rc) { + gvt_err("failed acrn_ioreq_add_iorange for pfn 0x%lx\n", pfn); + return rc; + } + + for (i = 0; i < mmio_size_fn; i++) { + rc = write_protect_page(info->vm_id, + (pfn + i) << PAGE_SHIFT, true); + if (rc) { + gvt_err("failed set wp for pfn 0x%lx\n", pfn + i); + return rc; + } + } + + /* scratch reg access is trapped like mmio access, 1 page */ + rc = acrngt_map_gfn_to_mfn(handle, pfn + (VGT_PVINFO_PAGE >> PAGE_SHIFT), + mfn + (VGT_PVINFO_PAGE >> PAGE_SHIFT), 1, 0); + if (rc) { + gvt_err("acrn-gvt: map pfn %lx to mfn %lx fail with ret %d\n", + pfn, mfn, rc); + return rc; + } + } else { + mfn = acrngt_virt_to_mfn(info->vgpu->mmio.vreg); + + /* scratch reg access is trapped like mmio access, 1 page */ + rc = acrngt_map_gfn_to_mfn(handle, + pfn + (VGT_PVINFO_PAGE >> PAGE_SHIFT), + mfn + (VGT_PVINFO_PAGE >> PAGE_SHIFT), 1, 1); + if (rc) { + gvt_err("acrn-gvt: map pfn %lx to mfn %lx fail with ret %d\n", + pfn + (VGT_PVINFO_PAGE >> PAGE_SHIFT), + mfn + (VGT_PVINFO_PAGE >> PAGE_SHIFT), + rc); + return rc; + } + + rc = acrngt_map_gfn_to_mfn(handle, pfn, mfn, mmio_size_fn, map); + if (rc) { + gvt_err("acrn-gvt: map pfn %lx to mfn %lx fail with ret %d\n", + pfn, mfn, rc); + return rc; + } + rc = acrn_ioreq_del_iorange(info->client, REQ_WP, pfn << PAGE_SHIFT, + ((pfn + mmio_size_fn) << PAGE_SHIFT) - 1); + if (rc) { + gvt_err("failed acrn_ioreq_add_iorange for pfn 0x%lx\n", pfn); + return rc; + } + + /* unmap the shared page to guest */ + shared_mfn = acrngt_virt_to_mfn(info->vgpu->mmio.shared_page); + rc = acrngt_map_gfn_to_mfn(handle, pfn + mmio_size_fn, shared_mfn, 1, map); + if (rc) { + gvt_err("acrn-gvt: map shared page fail with ret %d\n", rc); + return rc; + } + } + return rc; +} + +static int acrngt_dom0_ready(void) +{ + char *env[] = {"GVT_DOM0_READY=1", NULL}; + if(!acrn_gvt_ctrl_kobj) + return 0; + gvt_dbg_core("acrngt: Dom 0 ready to accept Dom U guests\n"); + return kobject_uevent_env(acrn_gvt_ctrl_kobj, KOBJ_ADD, env); +} + +static int acrngt_dma_map_guest_page(unsigned long handle, unsigned long gfn, + unsigned long size, dma_addr_t *dma_addr) +{ + unsigned long pfn; + + pfn = acrngt_gfn_to_pfn(handle, gfn); + *dma_addr = pfn << PAGE_SHIFT; + + return 0; +} + +static void acrngt_dma_unmap_guest_page(unsigned long handle, + dma_addr_t dma_addr) +{ +} + +struct intel_gvt_mpt acrn_gvt_mpt = { + //.detect_host = acrngt_detect_host, + .host_init = acrngt_host_init, + .host_exit = acrngt_host_exit, + .attach_vgpu = acrngt_attach_vgpu, + .detach_vgpu = acrngt_detach_vgpu, + .inject_msi = acrngt_inject_msi, + .from_virt_to_mfn = acrngt_virt_to_mfn, + .enable_page_track = acrngt_page_track_add, + .disable_page_track = acrngt_page_track_remove, + .read_gpa = acrngt_read_gpa, + .write_gpa = acrngt_write_gpa, + .gfn_to_mfn = acrngt_gfn_to_pfn, + .map_gfn_to_mfn = acrngt_map_gfn_to_mfn, + .dma_map_guest_page = acrngt_dma_map_guest_page, + .dma_unmap_guest_page = acrngt_dma_unmap_guest_page, + .set_trap_area = acrngt_set_trap_area, + .set_pvmmio = acrngt_set_pvmmio, + .dom0_ready = acrngt_dom0_ready, +}; +EXPORT_SYMBOL_GPL(acrn_gvt_mpt); + +static int __init acrngt_init(void) +{ + /* todo: to support need implment check_gfx_iommu_enabled func */ + gvt_dbg_core("acrngt loaded\n"); + return 0; +} + +static void __exit acrngt_exit(void) +{ + gvt_dbg_core("acrngt: unloaded\n"); +} + +module_init(acrngt_init); +module_exit(acrngt_exit); diff --git a/drivers/gpu/drm/i915/gvt/acrngt.h b/drivers/gpu/drm/i915/gvt/acrngt.h new file mode 100644 index 0000000000000..ce31620b93099 --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/acrngt.h @@ -0,0 +1,80 @@ +/* + * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef INTEL_GVT_ACRNGT_H +#define INTEL_GVT_ACRNGT_H + +extern struct intel_gvt *gvt_instance; +extern const struct intel_gvt_ops *acrn_intel_gvt_ops; + +#define MAX_HVM_VCPUS_SUPPORTED 127 + +#define VMEM_1MB (1ULL << 20) /* the size of the first 1MB */ + +typedef uint16_t domid_t; + +/* + * acrngt_hvm_dev is a wrapper of a vGPU instance which is reprensented by the + * intel_vgpu structure. Under acrn hypervisor, the acrngt_instance stands for a + * HVM device, which the related resource. + */ +struct acrngt_hvm_dev { + domid_t vm_id; + struct kobject kobj; + struct intel_vgpu *vgpu; + + int nr_vcpu; + + int client; + struct vhm_request *req_buf; + struct vhm_vm *vm; +}; + +struct acrngt_hvm_params { + int vm_id; + int aperture_sz; /* in MB */ + int gm_sz; /* in MB */ + int fence_sz; +}; + +/* + * struct gvt_acrngt should be a single instance to share global + * information for ACRNGT module. + */ +#define GVT_MAX_VGPU_INSTANCE 15 +struct gvt_acrngt { + struct intel_gvt *gvt; + struct intel_vgpu *vgpus[GVT_MAX_VGPU_INSTANCE]; +}; + +static ssize_t acrngt_sysfs_instance_manage(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count); +static ssize_t acrngt_sysfs_vgpu_id(struct kobject *kobj, + struct kobj_attribute *attr, char *buf); + +struct intel_vgpu *acrngt_instance_create(domid_t vm_id, + struct intel_vgpu_type *type); +void acrngt_instance_destroy(struct intel_vgpu *vgpu); + +#endif diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index c62346fdc05d5..71a815d81f2de 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -56,6 +56,10 @@ static const u8 pci_cfg_space_rw_bmp[PCI_INTERRUPT_LINE + 4] = { /** * vgpu_pci_cfg_mem_write - write virtual cfg space memory + * @vgpu: a vGPU + * @off: offset into the PCI configuration space + * @src: data buffer write to vGPU's emulated configure space + * @bytes: size of data to write in bytes * * Use this function to write virtual cfg space memory. * For standard cfg space, only RW bits can be changed, @@ -92,6 +96,11 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off, /** * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read * + * @vgpu: a vGPU + * @offset: offset into the PCI configuration space + * @p_data: data buffer read from vGPU's emulated configure space + * @bytes: size of data to read in bytes + * * Returns: * Zero on success, negative error code if failed. */ @@ -138,6 +147,52 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map) return 0; } +int map_gttmmio(struct intel_vgpu *vgpu, bool map) +{ + struct intel_vgpu_gm *gm = &vgpu->gm; + unsigned long mfn; + struct scatterlist *sg; + struct sg_table *st = gm->st; + u64 start, end; + int ret = 0; + + if (!st) { + DRM_INFO("no scatter list, fallback to disable ggtt pv\n"); + return -EINVAL; + } + + if (vgpu->gtt.ggtt_pv_mapped == map) { + /* If it is already set as the target state, skip it */ + return ret; + } + + start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0); + start &= ~GENMASK(3, 0); + start += vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size >> 1; + + end = start + + (vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size >> 1); + + gvt_dbg_mmio("%s start=%llx end=%llx map=%d\n", + __func__, start, end, map); + + start >>= PAGE_SHIFT; + for (sg = st->sgl; sg; sg = __sg_next(sg)) { + mfn = page_to_pfn(sg_page(sg)); + gvt_dbg_mmio("page=%p mfn=%lx size=%x start=%llx\n", + sg_page(sg), mfn, sg->length, start); + ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, start, + mfn, sg->length >> PAGE_SHIFT, map); + if (ret) + return ret; + start += sg->length >> PAGE_SHIFT; + } + + vgpu->gtt.ggtt_pv_mapped = map; + + return ret; +} + static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap) { u64 start, end; @@ -278,6 +333,10 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, /** * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space write + * @vgpu: a vGPU + * @offset: offset into the PCI configuration space + * @p_data: data buffer write to vGPU's emulated configure space + * @bytes: size of data to write in bytes * * Returns: * Zero on success, negative error code if failed. @@ -295,9 +354,22 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, /* First check if it's PCI_COMMAND */ if (IS_ALIGNED(offset, 2) && offset == PCI_COMMAND) { - if (WARN_ON(bytes > 2)) + if (WARN_ON(bytes != 2 && bytes != 4)) return -EINVAL; - return emulate_pci_command_write(vgpu, offset, p_data, bytes); + + ret = -EINVAL; + if (bytes == 2) + ret = emulate_pci_command_write(vgpu, offset, + p_data, bytes); + if (bytes == 4) { + ret = emulate_pci_command_write(vgpu, offset, + p_data, 2); + if (ret) + return ret; + vgpu_pci_cfg_mem_write(vgpu, offset + 2, + (u8 *)p_data + 2, 2); + } + return ret; } switch (rounddown(offset, 4)) { @@ -322,6 +394,15 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, case INTEL_GVT_PCI_OPREGION: if (WARN_ON(!IS_ALIGNED(offset, 4))) return -EINVAL; + + /* + * To support virtual display, we need to override the real VBT in the + * OpRegion. So here we don't report OpRegion to guest. + */ + if (IS_BROXTON(vgpu->gvt->dev_priv) || + IS_KABYLAKE(vgpu->gvt->dev_priv)) + return 0; + ret = intel_vgpu_opregion_base_write_handler(vgpu, *(u32 *)p_data); if (ret) @@ -399,6 +480,8 @@ void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu) INTEL_GVT_PCI_CLASS_VGA_OTHER; if (cmd & PCI_COMMAND_MEMORY) { + if (VGPU_PVMMIO(vgpu)) + set_pvmmio(vgpu, false); trap_gttmmio(vgpu, false); map_aperture(vgpu, false); } diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index a614db310ea27..4fa37daf13d6a 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -808,7 +808,7 @@ static bool is_shadowed_mmio(unsigned int offset) return ret; } -static inline bool is_force_nonpriv_mmio(unsigned int offset) +bool is_force_nonpriv_mmio(unsigned int offset) { return (offset >= 0x24d0 && offset < 0x2500); } @@ -918,6 +918,15 @@ static int cmd_reg_handler(struct parser_exec_state *s, } } + /* Re-direct the non-context MMIO access to VGT_SCRATCH_REG, it + * has no functional impact to HW. + */ + if (!strcmp(cmd, "lri") || !strcmp(cmd, "lrr-dst") + || !strcmp(cmd, "lrm") || !strcmp(cmd, "pipe_ctrl")) { + if (intel_gvt_mmio_is_non_context(gvt, offset)) + patch_value(s, cmd_ptr(s, index), VGT_SCRATCH_REG); + } + /* TODO: Update the global mask if this MMIO is a masked-MMIO */ intel_gvt_mmio_set_cmd_accessed(gvt, offset); return 0; @@ -957,6 +966,32 @@ static int cmd_handler_lri(struct parser_exec_state *s) ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri"); if (ret) break; + + if (s->vgpu->entire_nonctxmmio_checked + && intel_gvt_mmio_is_non_context(gvt, + cmd_reg(s, i))) { + int offset = cmd_reg(s, i); + int value = cmd_val(s, i + 1); + + if (intel_gvt_mmio_has_mode_mask(gvt, offset)) { + u32 mask = value >> 16; + + vgpu_vreg(s->vgpu, offset) = + (vgpu_vreg(s->vgpu, offset) & ~mask) + | (value & mask); + } else { + vgpu_vreg(s->vgpu, offset) = value; + } + + if (gvt_host_reg(gvt, offset) != + vgpu_vreg(s->vgpu, offset)) { + + gvt_err("vgpu%d unexpected non-context MMIO " + "access by cmd 0x%x:0x%x,0x%x\n", + s->vgpu->id, offset, value, + gvt_host_reg(gvt, offset)); + } + } } return ret; } @@ -1840,6 +1875,8 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s) return ret; } +static int mi_noop_index; + static struct cmd_info cmd_info[] = { {"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL}, @@ -2525,7 +2562,12 @@ static int cmd_parser_exec(struct parser_exec_state *s) cmd = cmd_val(s, 0); - info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); + /* fastpath for MI_NOOP */ + if (cmd == MI_NOOP) + info = &cmd_info[mi_noop_index]; + else + info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); + if (info == NULL) { gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n", cmd, get_opcode(cmd, s->ring_id), @@ -2710,6 +2752,33 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) return ret; } +#define GEN8_PDPES 4 +int gvt_emit_pdps(struct intel_vgpu_workload *workload) +{ + const int num_cmds = GEN8_PDPES * 2; + struct i915_request *req = workload->req; + struct intel_engine_cs *engine = req->engine; + u32 *cs; + u32 *pdps = (u32 *)(workload->shadow_mm->ppgtt_mm.shadow_pdps); + int i; + + cs = intel_ring_begin(req, num_cmds * 2 + 2); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_LOAD_REGISTER_IMM(num_cmds); + for (i = 0; i < GEN8_PDPES; i++) { + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i)); + *cs++ = pdps[i * 2]; + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i)); + *cs++ = pdps[i * 2 + 1]; + } + *cs++ = MI_NOOP; + intel_ring_advance(req, cs); + + return 0; +} + static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; @@ -2928,6 +2997,8 @@ static int init_cmd_table(struct intel_gvt *gvt) kfree(e); return -EEXIST; } + if (cmd_info[i].opcode == OP_MI_NOOP) + mi_noop_index = i; INIT_HLIST_NODE(&e->hlist); add_cmd_entry(gvt, e); diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.h b/drivers/gpu/drm/i915/gvt/cmd_parser.h index 2867036430027..1356803a05862 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.h +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h @@ -46,4 +46,5 @@ int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload); int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx); +int gvt_emit_pdps(struct intel_vgpu_workload *workload); #endif diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c index 2ec89bcb59f13..c41e1e2457892 100644 --- a/drivers/gpu/drm/i915/gvt/debugfs.c +++ b/drivers/gpu/drm/i915/gvt/debugfs.c @@ -196,9 +196,9 @@ DEFINE_SIMPLE_ATTRIBUTE(vgpu_scan_nonprivbb_fops, int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu) { struct dentry *ent; - char name[10] = ""; + char name[10]; - sprintf(name, "vgpu%d", vgpu->id); + snprintf(name, sizeof(name), "vgpu%d", vgpu->id); vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root); if (!vgpu->debugfs) return -ENOMEM; diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 3019dbc39aef2..afe9eafdbd1d2 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -191,6 +191,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) BXT_DE_PORT_HP_DDIC; } + vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |= + SKL_FUSE_DOWNLOAD_STATUS | + SKL_FUSE_PG_DIST_STATUS(SKL_PG0) | + SKL_FUSE_PG_DIST_STATUS(SKL_PG1) | + SKL_FUSE_PG_DIST_STATUS(SKL_PG2); + return; } @@ -228,7 +234,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_B)) |= PORT_CLK_SEL_LCPLL_810; } - vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE; + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_CTL_ENABLE; vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT; } @@ -248,7 +254,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_C)) |= PORT_CLK_SEL_LCPLL_810; } - vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE; + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_CTL_ENABLE; vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED; } @@ -268,7 +274,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_D)) |= PORT_CLK_SEL_LCPLL_810; } - vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE; + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_CTL_ENABLE; vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED; } @@ -314,15 +320,20 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num) port->dpcd = NULL; } -static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, - int type, unsigned int resolution) +static int setup_virtual_monitor(struct intel_vgpu *vgpu, int port_num, + int type, unsigned int resolution, void *edid, bool is_dp) { struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); + int valid_extensions = 1; + struct edid *tmp_edid = NULL; if (WARN_ON(resolution >= GVT_EDID_NUM)) return -EINVAL; - port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL); + if (edid) + valid_extensions += ((struct edid *)edid)->extensions; + port->edid = kzalloc(sizeof(*(port->edid)) + + valid_extensions * EDID_SIZE, GFP_KERNEL); if (!port->edid) return -ENOMEM; @@ -332,13 +343,30 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, return -ENOMEM; } - memcpy(port->edid->edid_block, virtual_dp_monitor_edid[resolution], - EDID_SIZE); + if (edid) + memcpy(port->edid->edid_block, edid, EDID_SIZE * valid_extensions); + else + memcpy(port->edid->edid_block, virtual_dp_monitor_edid[resolution], + EDID_SIZE); + + /* Sometimes the physical display will report the EDID with no + * digital bit set, which will cause the guest fail to enumerate + * the virtual HDMI monitor. So here we will set the digital + * bit and re-calculate the checksum. + */ + tmp_edid = ((struct edid *)port->edid->edid_block); + if (!(tmp_edid->input & DRM_EDID_INPUT_DIGITAL)) { + tmp_edid->input += DRM_EDID_INPUT_DIGITAL; + tmp_edid->checksum -= DRM_EDID_INPUT_DIGITAL; + } + port->edid->data_valid = true; - memcpy(port->dpcd->data, dpcd_fix_data, DPCD_HEADER_SIZE); - port->dpcd->data_valid = true; - port->dpcd->data[DPCD_SINK_COUNT] = 0x1; + if (is_dp) { + memcpy(port->dpcd->data, dpcd_fix_data, DPCD_HEADER_SIZE); + port->dpcd->data_valid = true; + port->dpcd->data[DPCD_SINK_COUNT] = 0x1; + } port->type = type; emulate_monitor_status_change(vgpu); @@ -442,6 +470,117 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt) mutex_unlock(&gvt->lock); } +static void intel_gvt_vblank_work(struct work_struct *w) +{ + struct intel_gvt_pipe_info *pipe_info = container_of(w, + struct intel_gvt_pipe_info, vblank_work); + struct intel_gvt *gvt = pipe_info->gvt; + struct intel_vgpu *vgpu; + int id; + + mutex_lock(&gvt->lock); + for_each_active_vgpu(gvt, vgpu, id) + emulate_vblank_on_pipe(vgpu, pipe_info->pipe_num); + mutex_unlock(&gvt->lock); +} + +#define BITS_PER_DOMAIN 4 +#define MAX_SCALERS_PER_DOMAIN 2 + +#define DOMAIN_SCALER_OWNER(owner, pipe, scaler) \ + ((((owner) >> (pipe) * BITS_PER_DOMAIN * MAX_SCALERS_PER_DOMAIN) >> \ + BITS_PER_DOMAIN * (scaler)) & 0xf) + +int bxt_check_planes(struct intel_vgpu *vgpu, int pipe) +{ + int plane = 0; + bool ret = false; + + for (plane = 0; + plane < ((INTEL_INFO(vgpu->gvt->dev_priv)->num_sprites[pipe]) + 1); + plane++) { + if (vgpu->gvt->pipe_info[pipe].plane_owner[plane] == vgpu->id) { + ret = true; + break; + } + } + return ret; +} + +void intel_gvt_init_pipe_info(struct intel_gvt *gvt) +{ + enum pipe pipe; + unsigned int scaler; + unsigned int domain_scaler_owner = i915_modparams.domain_scaler_owner; + struct drm_i915_private *dev_priv = gvt->dev_priv; + + for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) { + gvt->pipe_info[pipe].pipe_num = pipe; + gvt->pipe_info[pipe].gvt = gvt; + INIT_WORK(&gvt->pipe_info[pipe].vblank_work, + intel_gvt_vblank_work); + /* Each nibble represents domain id + * ids can be from 0-F. 0 for Dom0, 1,2,3...0xF for DomUs + * scaler_owner[i] holds the id of the domain that owns it, + * eg:0,1,2 etc + */ + for_each_universal_scaler(dev_priv, pipe, scaler) + gvt->pipe_info[pipe].scaler_owner[scaler] = + DOMAIN_SCALER_OWNER(domain_scaler_owner, pipe, scaler); + } +} + +bool gvt_emulate_hdmi = true; + +int setup_virtual_monitors(struct intel_vgpu *vgpu) +{ + struct intel_connector *connector = NULL; + struct drm_connector_list_iter conn_iter; + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + int pipe = 0; + int ret = 0; + int type = gvt_emulate_hdmi ? GVT_HDMI_A : GVT_DP_A; + int port = PORT_B; + + /* BXT have to use port A for HDMI to support 3 HDMI monitors */ + if (IS_BROXTON(dev_priv)) + port = PORT_A; + + drm_connector_list_iter_begin(&vgpu->gvt->dev_priv->drm, &conn_iter); + for_each_intel_connector_iter(connector, &conn_iter) { + if (connector->encoder->get_hw_state(connector->encoder, &pipe) + && connector->detect_edid) { + /* if no planes are allocated for this pipe, skip it */ + if (i915_modparams.avail_planes_per_pipe && + !bxt_check_planes(vgpu, pipe)) + continue; + /* Get (Dom0) port associated with current pipe. */ + port = connector->encoder->port; + ret = setup_virtual_monitor(vgpu, port, + type, 0, connector->detect_edid, + !gvt_emulate_hdmi); + if (ret) + return ret; + type++; + port++; + } + } + drm_connector_list_iter_end(&conn_iter); + return 0; +} + +void clean_virtual_monitors(struct intel_vgpu *vgpu) +{ + int port = 0; + + for (port = PORT_A; port < I915_MAX_PORTS; port++) { + struct intel_vgpu_port *p = intel_vgpu_port(vgpu, port); + + if (p->edid) + clean_virtual_dp_monitor(vgpu, port); + } +} + /** * intel_vgpu_clean_display - clean vGPU virtual display emulation * @vgpu: a vGPU @@ -453,7 +592,9 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) + if (IS_BROXTON(dev_priv) || IS_KABYLAKE(dev_priv)) + clean_virtual_monitors(vgpu); + else if (IS_SKYLAKE(dev_priv)) clean_virtual_dp_monitor(vgpu, PORT_D); else clean_virtual_dp_monitor(vgpu, PORT_B); @@ -475,12 +616,14 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution) intel_vgpu_init_i2c_edid(vgpu); - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) - return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D, - resolution); + if (IS_BROXTON(dev_priv) || IS_KABYLAKE(dev_priv)) + return setup_virtual_monitors(vgpu); + else if (IS_SKYLAKE(dev_priv)) + return setup_virtual_monitor(vgpu, PORT_D, GVT_DP_D, + resolution, NULL, true); else - return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B, - resolution); + return setup_virtual_monitor(vgpu, PORT_B, GVT_DP_B, + resolution, NULL, true); } /** diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h index ea7c1c525b8c3..e6d3912bc730c 100644 --- a/drivers/gpu/drm/i915/gvt/display.h +++ b/drivers/gpu/drm/i915/gvt/display.h @@ -140,6 +140,7 @@ enum intel_vgpu_port_type { GVT_DP_B, GVT_DP_C, GVT_DP_D, + GVT_HDMI_A, GVT_HDMI_B, GVT_HDMI_C, GVT_HDMI_D, diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c index 4b98539025c5b..c4bf4800f72f4 100644 --- a/drivers/gpu/drm/i915/gvt/edid.c +++ b/drivers/gpu/drm/i915/gvt/edid.c @@ -55,10 +55,6 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu) gvt_vgpu_err("Driver tries to read EDID without proper sequence!\n"); return 0; } - if (edid->current_edid_read >= EDID_SIZE) { - gvt_vgpu_err("edid_get_byte() exceeds the size of EDID!\n"); - return 0; - } if (!edid->edid_available) { gvt_vgpu_err("Reading EDID but EDID is not available!\n"); @@ -87,7 +83,7 @@ static inline int bxt_get_port_from_gmbus0(u32 gmbus0) else if (port_select == 2) port = PORT_C; else if (port_select == 3) - port = PORT_D; + port = PORT_A; return port; } @@ -452,6 +448,8 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, u32 value = *(u32 *)p_data; int aux_data_for_write = 0; int reg = get_aux_ch_reg(offset); + uint8_t rxbuf[20] = {0}; + size_t rxsize; if (reg != AUX_CH_CTL) { vgpu_vreg(vgpu, offset) = value; @@ -459,6 +457,12 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, } msg_length = AUX_CTL_MSG_LENGTH(value); + if (WARN_ON(msg_length <= 0 || msg_length > 20)) + return; + + for (rxsize = 0; rxsize < msg_length; rxsize += 4) + intel_dp_unpack_aux(vgpu_vreg(vgpu, offset + 4 + rxsize), + rxbuf + rxsize, msg_length - rxsize); // check the msg in DATA register. msg = vgpu_vreg(vgpu, offset + 4); addr = (msg >> 8) & 0xffff; @@ -498,12 +502,13 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, } } } else if ((op & 0x1) == GVT_AUX_I2C_WRITE) { - /* TODO - * We only support EDID reading from I2C_over_AUX. And - * we do not expect the index mode to be used. Right now - * the WRITE operation is ignored. It is good enough to - * support the gfx driver to do EDID access. + /* We only support EDID reading from I2C_over_AUX. + * But if EDID has extension blocks, we use this write + * operation to set block starting address */ + if (addr == EDID_ADDR) { + i2c_edid->current_edid_read = rxbuf[4]; + } } else { if (WARN_ON((op & 0x1) != GVT_AUX_I2C_READ)) return; diff --git a/drivers/gpu/drm/i915/gvt/edid.h b/drivers/gpu/drm/i915/gvt/edid.h index f6dfc8b795ec2..11a75d69062d9 100644 --- a/drivers/gpu/drm/i915/gvt/edid.h +++ b/drivers/gpu/drm/i915/gvt/edid.h @@ -48,7 +48,7 @@ struct intel_vgpu_edid_data { bool data_valid; - unsigned char edid_block[EDID_SIZE]; + unsigned char edid_block[0]; }; enum gmbus_cycle_type { diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 481896fb712ab..0d616c500db4e 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -37,6 +37,7 @@ #include "i915_drv.h" #include "gvt.h" #include "i915_pvinfo.h" +#include "fb_decoder.h" #define PRIMARY_FORMAT_NUM 16 struct pixel_format { @@ -235,7 +236,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, plane->bpp = skl_pixel_formats[fmt].bpp; plane->drm_format = skl_pixel_formats[fmt].drm_format; } else { - plane->tiled = !!(val & DISPPLANE_TILED); + plane->tiled = val & DISPPLANE_TILED; fmt = bdw_format_to_drm(val & DISPPLANE_PIXFORMAT_MASK); plane->bpp = bdw_pixel_formats[fmt].bpp; plane->drm_format = bdw_pixel_formats[fmt].drm_format; @@ -266,11 +267,12 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, (_PRI_PLANE_STRIDE_MASK >> 6) : _PRI_PLANE_STRIDE_MASK, plane->bpp); - plane->width = (vgpu_vreg_t(vgpu, PIPESRC(pipe)) & _PIPE_H_SRCSZ_MASK) >> - _PIPE_H_SRCSZ_SHIFT; + plane->width = vgpu_vreg_t(vgpu, PLANE_SIZE(pipe, PLANE_PRIMARY))& + _PLANE_SIZE_WIDTH_MASK; + plane->width += 1; - plane->height = (vgpu_vreg_t(vgpu, PIPESRC(pipe)) & - _PIPE_V_SRCSZ_MASK) >> _PIPE_V_SRCSZ_SHIFT; + plane->height = (vgpu_vreg_t(vgpu, PLANE_SIZE(pipe, PLANE_PRIMARY)) & + _PLANE_SIZE_HEIGHT_MASK) >> _PLANE_SIZE_HEIGHT_SHIFT; plane->height += 1; /* raw height is one minus the real value */ val = vgpu_vreg_t(vgpu, DSPTILEOFF(pipe)); diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h index 60c155085029c..6c51fe00d421e 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.h +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h @@ -50,6 +50,10 @@ #define _PRI_PLANE_Y_OFF_SHIFT 16 #define _PRI_PLANE_Y_OFF_MASK (0xfff << _PRI_PLANE_Y_OFF_SHIFT) +#define _PLANE_SIZE_HEIGHT_SHIFT 16 +#define _PLANE_SIZE_HEIGHT_MASK (0xfff << _PLANE_SIZE_HEIGHT_SHIFT) +#define _PLANE_SIZE_WIDTH_MASK 0x1fff + #define _CURSOR_MODE 0x3f #define _CURSOR_ALPHA_FORCE_SHIFT 8 #define _CURSOR_ALPHA_FORCE_MASK (0x3 << _CURSOR_ALPHA_FORCE_SHIFT) @@ -165,5 +169,4 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, struct intel_vgpu_cursor_plane_format *plane); int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, struct intel_vgpu_sprite_plane_format *plane); - #endif diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c index 4ac18b4472476..f0d30237c9884 100644 --- a/drivers/gpu/drm/i915/gvt/firmware.c +++ b/drivers/gpu/drm/i915/gvt/firmware.c @@ -199,6 +199,7 @@ static int verify_firmware(struct intel_gvt *gvt, #define GVT_FIRMWARE_PATH "i915/gvt" +bool disable_gvt_fw_loading=true; /** * intel_gvt_load_firmware - load GVT firmware * @gvt: intel gvt device @@ -216,27 +217,27 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt) void *mem; int ret; - path = kmalloc(PATH_MAX, GFP_KERNEL); - if (!path) - return -ENOMEM; - mem = kmalloc(info->cfg_space_size, GFP_KERNEL); - if (!mem) { - kfree(path); + if (!mem) return -ENOMEM; - } firmware->cfg_space = mem; mem = kmalloc(info->mmio_size, GFP_KERNEL); if (!mem) { - kfree(path); kfree(firmware->cfg_space); return -ENOMEM; } firmware->mmio = mem; + if (disable_gvt_fw_loading) + goto expose_firmware; + + path = kmalloc(PATH_MAX, GFP_KERNEL); + if (!path) + return -ENOMEM; + sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%02x.golden_hw_state", GVT_FIRMWARE_PATH, pdev->vendor, pdev->device, pdev->revision); diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 00aad8164dec2..97e762fb27ea0 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -44,8 +44,8 @@ #define gvt_vdbg_mm(fmt, args...) #endif -static bool enable_out_of_sync = false; -static int preallocated_oos_pages = 8192; +static bool enable_out_of_sync = true; +static int preallocated_oos_pages = 2048; /* * validate a gm address and related range size, @@ -303,6 +303,18 @@ static inline int gtt_get_entry64(void *pt, return -EINVAL; if (hypervisor_access) { + if (vgpu->ge_cache_enable && vgpu->cached_guest_entry) { + if (index == 0) { + ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa, + vgpu->cached_guest_entry, + I915_GTT_PAGE_SIZE); + if (WARN_ON(ret)) + return ret; + } + e->val64 = *(vgpu->cached_guest_entry + index); + return 0; + } + ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa + (index << info->gtt_entry_size_shift), &e->val64, 8); @@ -1277,8 +1289,10 @@ static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt) trace_spt_change(spt->vgpu->id, "born", spt, spt->guest_page.gfn, spt->shadow_page.type); + vgpu->ge_cache_enable = true; for_each_present_guest_entry(spt, &ge, i) { if (gtt_type_is_pt(get_next_pt_type(ge.type))) { + vgpu->ge_cache_enable = false; s = ppgtt_populate_spt_by_guest_entry(vgpu, &ge); if (IS_ERR(s)) { ret = PTR_ERR(s); @@ -1300,6 +1314,7 @@ static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt) goto fail; } } + vgpu->ge_cache_enable = false; return 0; fail: gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", @@ -1554,6 +1569,109 @@ int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu) return 0; } +static void free_ggtt_virtual_page_table(struct intel_vgpu_mm *mm) +{ + struct intel_vgpu_gm *gm = &mm->vgpu->gm; + struct sg_table *st = gm->st; + struct scatterlist *sg; + + for (sg = st->sgl; sg; sg = __sg_next(sg)) { + if (sg_page(sg)) + __free_pages(sg_page(sg), get_order(sg->length)); + } + + sg_free_table(st); + kfree(st); + vunmap(mm->ggtt_mm.virtual_ggtt); + gm->st = NULL; +} + +/* + * Alloc virtual page table for guest ggtt. If ggtt pv enabled, the + * physical pages behind virtual page table is also mapped to guest, + * guest can update its pte entries directly to avoid trap. + */ +static void *alloc_ggtt_virtual_page_table(struct intel_vgpu_mm *mm) +{ + struct intel_vgpu *vgpu = mm->vgpu; + unsigned int page_count; + struct intel_vgpu_gm *gm = &vgpu->gm; + struct page **pages = NULL; + struct page *p; + unsigned int i; + void *vaddr = NULL; + int order; + struct sg_table *st; + struct scatterlist *sg; + struct sgt_iter sgt_iter; + int npages; + + page_count = ALIGN(gvt_ggtt_sz(vgpu->gvt), 1 << PMD_SHIFT) + >> PAGE_SHIFT; + npages = page_count; + /* + * page_table_entry_size is bigger than the size alloc_pages can + * allocate, We have to split it according to the PMD size (2M). + * Head page is kept in scatter list so that we can free them later. + */ + order = get_order(1 << PMD_SHIFT); + + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return ERR_PTR(-ENOMEM); + + if (sg_alloc_table(st, page_count, GFP_KERNEL)) { + kfree(st); + return ERR_PTR(-ENOMEM); + } + + sg = st->sgl; + st->nents = 0; + gm->st = st; + do { + p = alloc_pages(GFP_KERNEL, order); + if (!p) + goto fail; + gvt_dbg_mm("page=%p size=%ld\n", p, PAGE_SIZE << order); + sg_set_page(sg, p, PAGE_SIZE << order, 0); + st->nents++; + npages -= 1 << order; + if (npages <= 0) { + sg_mark_end(sg); + break; + } + sg = __sg_next(sg); + } while (1); + + + /* keep all the pages for vmap */ + pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL); + if (!pages) + goto fail; + + i = 0; + for_each_sgt_page(p, sgt_iter, st) + pages[i++] = p; + + WARN_ON(i != page_count); + + vaddr = vmap(pages, page_count, VM_MAP, PAGE_KERNEL); + if (!vaddr) { + gvt_vgpu_err("fail to vmap pages"); + goto fail; + } + kfree(pages); + return vaddr; + +fail: + sg_set_page(sg, NULL, 0, 0); + sg_mark_end(sg); + free_ggtt_virtual_page_table(mm); + kfree(pages); + gm->st = NULL; + return NULL; +} + /* * The heart of PPGTT shadow page table. */ @@ -1688,6 +1806,8 @@ static int ppgtt_handle_guest_write_page_table_bytes( index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift; + /* Set guest ppgtt entry. Optional for KVMGT, but MUST for XENGT. */ + intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes); ppgtt_get_guest_entry(spt, &we, index); /* @@ -1740,6 +1860,32 @@ static int ppgtt_handle_guest_write_page_table_bytes( return 0; } +static void invalidate_mm_pv(struct intel_vgpu_mm *mm) +{ + struct intel_vgpu *vgpu = mm->vgpu; + struct intel_gvt *gvt = vgpu->gvt; + struct intel_gvt_gtt *gtt = &gvt->gtt; + struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; + struct intel_gvt_gtt_entry se; + + if (WARN_ON(mm->ppgtt_mm.root_entry_type != + GTT_TYPE_PPGTT_ROOT_L4_ENTRY)) + return; + + i915_ppgtt_close(&mm->ppgtt_mm.ppgtt->vm); + i915_ppgtt_put(mm->ppgtt_mm.ppgtt); + + ppgtt_get_shadow_root_entry(mm, &se, 0); + if (!ops->test_present(&se)) + return; + trace_spt_guest_change(vgpu->id, "destroy root pointer", + NULL, se.type, se.val64, 0); + se.val64 = 0; + ppgtt_set_shadow_root_entry(mm, &se, 0); + + mm->ppgtt_mm.shadowed = false; +} + static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm) { struct intel_vgpu *vgpu = mm->vgpu; @@ -1752,6 +1898,11 @@ static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm) if (!mm->ppgtt_mm.shadowed) return; + if (VGPU_PVMMIO(mm->vgpu) & PVMMIO_PPGTT_UPDATE) { + invalidate_mm_pv(mm); + return; + } + for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) { ppgtt_get_shadow_root_entry(mm, &se, index); @@ -1769,6 +1920,33 @@ static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm) mm->ppgtt_mm.shadowed = false; } +static int shadow_mm_pv(struct intel_vgpu_mm *mm) +{ + struct intel_vgpu *vgpu = mm->vgpu; + struct intel_gvt *gvt = vgpu->gvt; + struct intel_gvt_gtt_entry se; + + if (WARN_ON(mm->ppgtt_mm.root_entry_type != + GTT_TYPE_PPGTT_ROOT_L4_ENTRY)) + return -EINVAL; + + mm->ppgtt_mm.ppgtt = i915_ppgtt_create(gvt->dev_priv, NULL); + if (IS_ERR(mm->ppgtt_mm.ppgtt)) { + gvt_vgpu_err("fail to create ppgtt: %ld\n", + PTR_ERR(mm->ppgtt_mm.ppgtt)); + return PTR_ERR(mm->ppgtt_mm.ppgtt); + } + + se.type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY; + se.val64 = px_dma(&mm->ppgtt_mm.ppgtt->pml4); + ppgtt_set_shadow_root_entry(mm, &se, 0); + + trace_spt_guest_change(vgpu->id, "populate root pointer", + NULL, se.type, se.val64, 0); + mm->ppgtt_mm.shadowed = true; + + return 0; +} static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm) { @@ -1783,6 +1961,9 @@ static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm) if (mm->ppgtt_mm.shadowed) return 0; + if (VGPU_PVMMIO(mm->vgpu) & PVMMIO_PPGTT_UPDATE) + return shadow_mm_pv(mm); + mm->ppgtt_mm.shadowed = true; for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) { @@ -1885,7 +2066,6 @@ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu, static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu) { struct intel_vgpu_mm *mm; - unsigned long nr_entries; mm = vgpu_alloc_mm(vgpu); if (!mm) @@ -1893,10 +2073,17 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu) mm->type = INTEL_GVT_MM_GGTT; - nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT; - mm->ggtt_mm.virtual_ggtt = - vzalloc(array_size(nr_entries, + mm->ggtt_mm.virtual_ggtt = alloc_ggtt_virtual_page_table(mm); + if (!mm->ggtt_mm.virtual_ggtt) { + unsigned long nr_entries; + + DRM_INFO("fail to alloc contiguous pages, fallback\n"); + nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT; + mm->ggtt_mm.virtual_ggtt = + vzalloc(array_size(nr_entries, vgpu->gvt->device_info.gtt_entry_size)); + } + if (!mm->ggtt_mm.virtual_ggtt) { vgpu_free_mm(mm); return ERR_PTR(-ENOMEM); @@ -1925,7 +2112,17 @@ void _intel_vgpu_mm_release(struct kref *mm_ref) list_del(&mm->ppgtt_mm.lru_list); invalidate_ppgtt_mm(mm); } else { - vfree(mm->ggtt_mm.virtual_ggtt); + if (mm->ggtt_mm.virtual_ggtt) { + struct intel_vgpu *vgpu = mm->vgpu; + struct intel_vgpu_gm *gm = &vgpu->gm; + + if (gm->st) { + map_gttmmio(mm->vgpu, false); + free_ggtt_virtual_page_table(mm); + } else + vfree(mm->ggtt_mm.virtual_ggtt); + mm->ggtt_mm.virtual_ggtt = NULL; + } mm->ggtt_mm.last_partial_off = -1UL; } @@ -2426,6 +2623,13 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) intel_vgpu_reset_ggtt(vgpu, false); + vgpu->cached_guest_entry = kzalloc(I915_GTT_PAGE_SIZE, GFP_KERNEL); + if (!vgpu->cached_guest_entry) { + gvt_vgpu_err("fail to allocate cached_guest_entry page\n"); + return -ENOMEM; + } + vgpu->ge_cache_enable = false; + return create_scratch_page_tree(vgpu); } @@ -2468,6 +2672,7 @@ void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu) { intel_vgpu_destroy_all_ppgtt_mm(vgpu); intel_vgpu_destroy_ggtt_mm(vgpu); + kfree(vgpu->cached_guest_entry); release_scratch_page_tree(vgpu); } @@ -2498,7 +2703,7 @@ static int setup_spt_oos(struct intel_gvt *gvt) INIT_LIST_HEAD(>t->oos_page_use_list_head); for (i = 0; i < preallocated_oos_pages; i++) { - oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL); + oos_page = kmalloc(sizeof(*oos_page), GFP_KERNEL); if (!oos_page) { ret = -ENOMEM; goto fail; @@ -2763,3 +2968,431 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu) intel_vgpu_destroy_all_ppgtt_mm(vgpu); intel_vgpu_reset_ggtt(vgpu, true); } + +int intel_vgpu_g2v_pv_ppgtt_alloc_4lvl(struct intel_vgpu *vgpu, + int page_table_level) +{ + struct pv_ppgtt_update *pv_ppgtt = &vgpu->mmio.shared_page->pv_ppgtt; + struct intel_vgpu_mm *mm; + u64 pdps[4] = {pv_ppgtt->pdp, 0, 0, 0}; + int ret = 0; + + if (WARN_ON(page_table_level != 4)) + return -EINVAL; + + gvt_dbg_mm("alloc_4lvl pdp=%llx start=%llx length=%llx\n", + pv_ppgtt->pdp, pv_ppgtt->start, + pv_ppgtt->length); + + mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps); + if (!mm) { + gvt_vgpu_err("failed to find mm for pdp 0x%llx\n", pdps[0]); + ret = -EINVAL; + } else { + ret = mm->ppgtt_mm.ppgtt->vm.allocate_va_range( + &mm->ppgtt_mm.ppgtt->vm, + pv_ppgtt->start, pv_ppgtt->length); + if (ret) + gvt_vgpu_err("failed to alloc for pdp %llx\n", pdps[0]); + } + + return ret; +} + +int intel_vgpu_g2v_pv_ppgtt_clear_4lvl(struct intel_vgpu *vgpu, + int page_table_level) +{ + struct pv_ppgtt_update *pv_ppgtt = &vgpu->mmio.shared_page->pv_ppgtt; + struct intel_vgpu_mm *mm; + u64 pdps[4] = {pv_ppgtt->pdp, 0, 0, 0}; + int ret = 0; + + if (WARN_ON(page_table_level != 4)) + return -EINVAL; + + gvt_dbg_mm("clear_4lvl pdp=%llx start=%llx length=%llx\n", + pv_ppgtt->pdp, pv_ppgtt->start, + pv_ppgtt->length); + + mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps); + if (!mm) { + gvt_vgpu_err("failed to find mm for pdp 0x%llx\n", pdps[0]); + ret = -EINVAL; + } else { + mm->ppgtt_mm.ppgtt->vm.clear_range( + &mm->ppgtt_mm.ppgtt->vm, + pv_ppgtt->start, pv_ppgtt->length); + } + + return ret; +} + +#define GEN8_PML4E_SIZE (1UL << GEN8_PML4E_SHIFT) +#define GEN8_PML4E_SIZE_MASK (~(GEN8_PML4E_SIZE - 1)) +#define GEN8_PDPE_SIZE (1UL << GEN8_PDPE_SHIFT) +#define GEN8_PDPE_SIZE_MASK (~(GEN8_PDPE_SIZE - 1)) +#define GEN8_PDE_SIZE (1UL << GEN8_PDE_SHIFT) +#define GEN8_PDE_SIZE_MASK (~(GEN8_PDE_SIZE - 1)) + +#define pml4_addr_end(addr, end) \ +({ unsigned long __boundary = \ + ((addr) + GEN8_PML4E_SIZE) & GEN8_PML4E_SIZE_MASK; \ + (__boundary < (end)) ? __boundary : (end); \ +}) + +#define pdp_addr_end(addr, end) \ +({ unsigned long __boundary = \ + ((addr) + GEN8_PDPE_SIZE) & GEN8_PDPE_SIZE_MASK; \ + (__boundary < (end)) ? __boundary : (end); \ +}) + +#define pd_addr_end(addr, end) \ +({ unsigned long __boundary = \ + ((addr) + GEN8_PDE_SIZE) & GEN8_PDE_SIZE_MASK; \ + (__boundary < (end)) ? __boundary : (end); \ +}) + +struct ppgtt_walk { + unsigned long *mfns; + int mfn_index; + unsigned long *pt; +}; + +static int walk_pt_range(struct intel_vgpu *vgpu, u64 pt, + u64 start, u64 end, struct ppgtt_walk *walk) +{ + const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; + struct intel_gvt_gtt_gma_ops *gma_ops = vgpu->gvt->gtt.gma_ops; + unsigned long start_index, end_index; + int ret; + int i; + unsigned long mfn, gfn; + + start_index = gma_ops->gma_to_pte_index(start); + end_index = ((end - start) >> PAGE_SHIFT) + start_index; + + gvt_dbg_mm("%s: %llx start=%llx end=%llx start_index=%lx end_index=%lx mfn_index=%x\n", + __func__, pt, start, end, + start_index, end_index, walk->mfn_index); + ret = intel_gvt_hypervisor_read_gpa(vgpu, + (pt & PAGE_MASK) + (start_index << info->gtt_entry_size_shift), + walk->pt + start_index, + (end_index - start_index) << info->gtt_entry_size_shift); + if (ret) { + gvt_vgpu_err("fail to read gpa %llx\n", pt); + return ret; + } + + for (i = start_index; i < end_index; i++) { + gfn = walk->pt[i] >> PAGE_SHIFT; + mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn); + if (mfn == INTEL_GVT_INVALID_ADDR) { + gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn); + return -ENXIO; + } + walk->mfns[walk->mfn_index++] = mfn << PAGE_SHIFT; + } + + return 0; +} + + +static int walk_pd_range(struct intel_vgpu *vgpu, u64 pd, + u64 start, u64 end, struct ppgtt_walk *walk) +{ + const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; + struct intel_gvt_gtt_gma_ops *gma_ops = vgpu->gvt->gtt.gma_ops; + unsigned long index; + u64 pt, next; + int ret = 0; + + do { + index = gma_ops->gma_to_pde_index(start); + + ret = intel_gvt_hypervisor_read_gpa(vgpu, + (pd & PAGE_MASK) + (index << + info->gtt_entry_size_shift), &pt, 8); + if (ret) + return ret; + next = pd_addr_end(start, end); + gvt_dbg_mm("%s: %llx start=%llx end=%llx next=%llx\n", + __func__, pd, start, end, next); + walk_pt_range(vgpu, pt, start, next, walk); + + start = next; + } while (start != end); + + return ret; +} + + +static int walk_pdp_range(struct intel_vgpu *vgpu, u64 pdp, + u64 start, u64 end, struct ppgtt_walk *walk) +{ + const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; + struct intel_gvt_gtt_gma_ops *gma_ops = vgpu->gvt->gtt.gma_ops; + unsigned long index; + u64 pd, next; + int ret = 0; + + do { + index = gma_ops->gma_to_l4_pdp_index(start); + + ret = intel_gvt_hypervisor_read_gpa(vgpu, + (pdp & PAGE_MASK) + (index << + info->gtt_entry_size_shift), &pd, 8); + if (ret) + return ret; + next = pdp_addr_end(start, end); + gvt_dbg_mm("%s: %llx start=%llx end=%llx next=%llx\n", + __func__, pdp, start, end, next); + + walk_pd_range(vgpu, pd, start, next, walk); + start = next; + } while (start != end); + + return ret; +} + + +static int walk_pml4_range(struct intel_vgpu *vgpu, u64 pml4, + u64 start, u64 end, struct ppgtt_walk *walk) +{ + const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; + struct intel_gvt_gtt_gma_ops *gma_ops = vgpu->gvt->gtt.gma_ops; + unsigned long index; + u64 pdp, next; + int ret = 0; + + do { + index = gma_ops->gma_to_pml4_index(start); + ret = intel_gvt_hypervisor_read_gpa(vgpu, + (pml4 & PAGE_MASK) + (index << + info->gtt_entry_size_shift), &pdp, 8); + if (ret) + return ret; + next = pml4_addr_end(start, end); + gvt_dbg_mm("%s: %llx start=%llx end=%llx next=%llx\n", + __func__, pml4, start, end, next); + + walk_pdp_range(vgpu, pdp, start, next, walk); + start = next; + } while (start != end); + + return ret; +} + +int intel_vgpu_g2v_pv_ppgtt_insert_4lvl(struct intel_vgpu *vgpu, + int page_table_level) +{ + struct pv_ppgtt_update *pv_ppgtt = &vgpu->mmio.shared_page->pv_ppgtt; + struct intel_vgpu_mm *mm; + u64 pdps[4] = {pv_ppgtt->pdp, 0, 0, 0}; + int ret = 0; + u64 start = pv_ppgtt->start; + u64 length = pv_ppgtt->length; + struct sg_table st; + struct scatterlist *sg = NULL; + int num_pages = length >> PAGE_SHIFT; + struct i915_vma vma; + struct ppgtt_walk walk; + int i; + + if (WARN_ON(page_table_level != 4)) + return -EINVAL; + + gvt_dbg_mm("insert_4lvl pml4=%llx start=%llx length=%llx cache=%x\n", + pv_ppgtt->pdp, start, length, pv_ppgtt->cache_level); + + mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps); + if (!mm) { + gvt_vgpu_err("fail to find mm for pml4 0x%llx\n", pdps[0]); + return -EINVAL; + } + + walk.mfn_index = 0; + walk.mfns = NULL; + walk.pt = NULL; + + walk.mfns = kmalloc_array(num_pages, + sizeof(unsigned long), GFP_KERNEL); + if (!walk.mfns) { + ret = -ENOMEM; + goto fail; + } + + walk.pt = (unsigned long *)__get_free_pages(GFP_KERNEL, 0); + if (!walk.pt) { + ret = -ENOMEM; + goto fail; + } + + if (sg_alloc_table(&st, num_pages, GFP_KERNEL)) { + ret = -ENOMEM; + goto fail; + } + + ret = walk_pml4_range(vgpu, pdps[0], start, start + length, &walk); + if (ret) + goto fail_free_sg; + + WARN_ON(num_pages != walk.mfn_index); + + for_each_sg(st.sgl, sg, num_pages, i) { + sg->offset = 0; + sg->length = PAGE_SIZE; + sg_dma_address(sg) = walk.mfns[i]; + sg_dma_len(sg) = PAGE_SIZE; + } + + /* fake vma for insert call*/ + memset(&vma, 0, sizeof(vma)); + vma.node.start = start; + vma.pages = &st; + mm->ppgtt_mm.ppgtt->vm.insert_entries( + &mm->ppgtt_mm.ppgtt->vm, &vma, + pv_ppgtt->cache_level, 0); + +fail_free_sg: + sg_free_table(&st); +fail: + kfree(walk.mfns); + free_page((unsigned long)walk.pt); + + return ret; +} + +static void validate_ggtt_range(struct intel_vgpu *vgpu, + u64 *start, u64 *length) +{ + u64 end; + + if (WARN_ON(*start > vgpu->gvt->dev_priv->ggtt.vm.total || + *length > vgpu->gvt->dev_priv->ggtt.vm.total)) { + *length = 0; + return; + } + + end = *start + *length - 1; + + if (*start >= vgpu_aperture_gmadr_base(vgpu) && + end <= vgpu_aperture_gmadr_end(vgpu)) + return; + + if (*start >= vgpu_hidden_gmadr_base(vgpu) && + end <= vgpu_hidden_gmadr_end(vgpu)) + return; + + /* handle the cases with invalid ranges */ + WARN_ON(1); + + /* start is in aperture range, end is after apeture range */ + if (*start >= vgpu_aperture_gmadr_base(vgpu) && + *start <= vgpu_aperture_gmadr_end(vgpu)) { + *length = vgpu_aperture_gmadr_end(vgpu) - *start + 1; + return; + } + + /* start is before aperture range, end is in apeture range */ + if (end >= vgpu_aperture_gmadr_base(vgpu) && + end <= vgpu_aperture_gmadr_end(vgpu)) { + *start = vgpu_aperture_gmadr_base(vgpu); + return; + } + + /* start is in hidden range, end is after hidden range */ + if (*start >= vgpu_hidden_gmadr_base(vgpu) && + *start <= vgpu_hidden_gmadr_end(vgpu)) { + *length = vgpu_hidden_gmadr_end(vgpu) - *start + 1; + return; + } + + /* start is before hidden range, end is in hidden range */ + if (end >= vgpu_hidden_gmadr_base(vgpu) && + end <= vgpu_hidden_gmadr_end(vgpu)) { + *start = vgpu_hidden_gmadr_base(vgpu); + return; + } + + /* both start and end are not in valid range*/ + *length = 0; + + return; +} + +int intel_vgpu_g2v_pv_ggtt_insert(struct intel_vgpu *vgpu) +{ + struct intel_vgpu_gtt *gtt = &vgpu->gtt; + struct gvt_shared_page *shared_page = vgpu->mmio.shared_page; + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct i915_ggtt *ggtt = &dev_priv->ggtt; + u64 start = shared_page->pv_ggtt.start; + u64 num_entries = shared_page->pv_ggtt.length; + u32 cache_level = shared_page->pv_ggtt.cache_level; + u64 length = num_entries << PAGE_SHIFT; + u64 *vaddr = gtt->ggtt_mm->ggtt_mm.virtual_ggtt; + u64 gtt_entry_index; + u64 gtt_entry; + unsigned long mfn; + struct i915_vma vma; + struct sg_table st; + struct scatterlist *sg = NULL; + int ret = 0; + int i; + + gvt_dbg_mm("ggtt_insert: start=%llx length=%llx cache=%x\n", + start, length, cache_level); + validate_ggtt_range(vgpu, &start, &length); + if (length == 0) + return 0; + + num_entries = length >> PAGE_SHIFT; + + if (sg_alloc_table(&st, num_entries, GFP_KERNEL)) + return -ENOMEM; + + for_each_sg(st.sgl, sg, num_entries, i) { + gtt_entry_index = (start >> PAGE_SHIFT) + i; + gtt_entry = vaddr[gtt_entry_index]; + mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, + gtt_entry >> PAGE_SHIFT); + if (mfn == INTEL_GVT_INVALID_ADDR) { + gvt_vgpu_err("fail to translate gfn: 0x%llx\n", + gtt_entry >> PAGE_SHIFT); + ret = -ENXIO; + goto fail; + } + sg->offset = 0; + sg->length = PAGE_SIZE; + sg_dma_address(sg) = mfn << PAGE_SHIFT; + sg_dma_len(sg) = PAGE_SIZE; + } + + /* fake vma for insert call*/ + memset(&vma, 0, sizeof(vma)); + vma.node.start = start; + vma.pages = &st; + ggtt->vm.insert_entries(&ggtt->vm, &vma, cache_level, 0); + +fail: + sg_free_table(&st); + return ret; +} + +int intel_vgpu_g2v_pv_ggtt_clear(struct intel_vgpu *vgpu) +{ + struct gvt_shared_page *shared_page = vgpu->mmio.shared_page; + u64 start = shared_page->pv_ggtt.start; + u64 length = shared_page->pv_ggtt.length; + struct i915_ggtt *ggtt = &vgpu->gvt->dev_priv->ggtt; + + gvt_dbg_mm("ggtt_clear: start=%llx length=%llx\n", + start, length); + validate_ggtt_range(vgpu, &start, &length); + if (length == 0) + return 0; + + ggtt->vm.clear_range(&ggtt->vm, start, length); + + return 0; +} diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 7a9b36176efb7..b3cfcae4032a3 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -35,7 +35,6 @@ #define _GVT_GTT_H_ #define I915_GTT_PAGE_SHIFT 12 -#define I915_GTT_PAGE_MASK (~(I915_GTT_PAGE_SIZE - 1)) struct intel_vgpu_mm; @@ -131,7 +130,7 @@ enum intel_gvt_mm_type { INTEL_GVT_MM_PPGTT, }; -#define GVT_RING_CTX_NR_PDPS GEN8_3LVL_PDPES +#define GVT_RING_CTX_NR_PDPS GEN8_3LVL_PDPES struct intel_vgpu_mm { enum intel_gvt_mm_type type; @@ -154,6 +153,7 @@ struct intel_vgpu_mm { struct list_head list; struct list_head lru_list; + struct i915_hw_ppgtt *ppgtt; } ppgtt_mm; struct { void *virtual_ggtt; @@ -198,6 +198,9 @@ struct intel_vgpu_gtt { struct list_head oos_page_list_head; struct list_head post_shadow_list_head; struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX]; + + /* indicate whether the PV mapped is enabled for ggtt */ + bool ggtt_pv_mapped; }; extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); @@ -272,4 +275,17 @@ int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, void *p_data, unsigned int bytes); +int intel_vgpu_g2v_pv_ppgtt_alloc_4lvl(struct intel_vgpu *vgpu, + int page_table_level); + +int intel_vgpu_g2v_pv_ppgtt_clear_4lvl(struct intel_vgpu *vgpu, + int page_table_level); + +int intel_vgpu_g2v_pv_ppgtt_insert_4lvl(struct intel_vgpu *vgpu, + int page_table_level); + +int intel_vgpu_g2v_pv_ggtt_insert(struct intel_vgpu *vgpu); + +int intel_vgpu_g2v_pv_ggtt_clear(struct intel_vgpu *vgpu); + #endif /* _GVT_GTT_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 46c8b720e3363..72b7dca9f4eb0 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -44,6 +44,7 @@ struct intel_gvt_host intel_gvt_host; static const char * const supported_hypervisors[] = { [INTEL_GVT_HYPERVISOR_XEN] = "XEN", [INTEL_GVT_HYPERVISOR_KVM] = "KVM", + [INTEL_GVT_HYPERVISOR_ACRN] = "ACRN", }; static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt, @@ -221,6 +222,11 @@ int intel_gvt_init_host(void) symbol_get(kvmgt_mpt), "kvmgt"); intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_KVM; #endif + /* not in Xen. Try ACRN */ + intel_gvt_host.mpt = try_then_request_module( + symbol_get(acrn_gvt_mpt), "acrn_gvt"); + intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_ACRN; + printk("acrngt %s\n", intel_gvt_host.mpt?"found":"not found"); } /* Fail to load MPT modules - bail out */ @@ -242,6 +248,8 @@ static void init_device_info(struct intel_gvt *gvt) info->max_support_vgpus = 8; info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE; info->mmio_size = 2 * 1024 * 1024; + /* order of mmio size. assert(2^order == mmio_size) */ + info->mmio_size_order = 9; info->mmio_bar = 0; info->gtt_start_offset = 8 * 1024 * 1024; info->gtt_entry_size = 8; @@ -301,6 +309,102 @@ static int init_service_thread(struct intel_gvt *gvt) return 0; } +void intel_gvt_init_pipe_info(struct intel_gvt *gvt); + +/* + * When enabling multi-plane in DomU, an issue is that the PLANE_BUF_CFG + * register cannot be updated dynamically, since Dom0 has no idea of the + * plane information of DomU's planes, so here we statically allocate the + * ddb entries for all the possible enabled planes. + */ +void intel_gvt_allocate_ddb(struct intel_gvt *gvt, + struct skl_ddb_allocation *ddb, unsigned int active_crtcs) +{ + struct drm_i915_private *dev_priv = gvt->dev_priv; + unsigned int pipe_size, ddb_size, plane_size, plane_cnt; + u16 start, end; + enum pipe pipe; + enum plane_id plane; + int i = 0; + int num_active = hweight32(active_crtcs); + + if (!num_active) + return; + + ddb_size = INTEL_INFO(dev_priv)->ddb_size; + ddb_size -= 4; /* 4 blocks for bypass path allocation */ + pipe_size = ddb_size / num_active; + + memset(ddb, 0, sizeof(*ddb)); + for_each_pipe_masked(dev_priv, pipe, active_crtcs) { + start = pipe_size * (i++); + end = start + pipe_size; + ddb->plane[pipe][PLANE_CURSOR].start = end - 8; + ddb->plane[pipe][PLANE_CURSOR].end = end; + + plane_cnt = (INTEL_INFO(dev_priv)->num_sprites[pipe] + 1); + plane_size = (pipe_size - 8) / plane_cnt; + + for_each_universal_plane(dev_priv, pipe, plane) { + ddb->plane[pipe][plane].start = start + + (plane * (pipe_size - 8) / plane_cnt); + ddb->plane[pipe][plane].end = + ddb->plane[pipe][plane].start + plane_size; + } + } +} + +static int intel_gvt_init_vreg_pool(struct intel_gvt *gvt) +{ + int i = 0; + const struct intel_gvt_device_info *info = &gvt->device_info; + + for (i = 0; i < GVT_MAX_VGPU; i++) { + gvt->intel_gvt_vreg_pool[i] = (void *)__get_free_pages( + GFP_KERNEL, info->mmio_size_order); + if (!gvt->intel_gvt_vreg_pool[i]) + return -ENOMEM; + } + + return 0; +} + +static void intel_gvt_clean_vreg_pool(struct intel_gvt *gvt) +{ + int i = 0; + const struct intel_gvt_device_info *info = &gvt->device_info; + + for (i = 0; i < GVT_MAX_VGPU && gvt->intel_gvt_vreg_pool[i]; i++) + free_pages((unsigned long) gvt->intel_gvt_vreg_pool[i], + info->mmio_size_order); +} + +void *intel_gvt_allocate_vreg(struct intel_vgpu *vgpu) +{ + int id = vgpu->id - 1; + struct intel_gvt *gvt = vgpu->gvt; + + if (id < 0 || id >= GVT_MAX_VGPU || + gvt->intel_gvt_vreg_pool[id] == NULL || + gvt->intel_gvt_vreg_allocated[id]) + return NULL; + + gvt->intel_gvt_vreg_allocated[id] = true; + return gvt->intel_gvt_vreg_pool[id]; +} + +void intel_gvt_free_vreg(struct intel_vgpu *vgpu) +{ + int id = vgpu->id - 1; + struct intel_gvt *gvt = vgpu->gvt; + + if (id < 0 || id >= GVT_MAX_VGPU || + gvt->intel_gvt_vreg_pool[id] == NULL || + !gvt->intel_gvt_vreg_allocated[id]) + return; + gvt->intel_gvt_vreg_allocated[id] = false; +} + /** * intel_gvt_clean_device - clean a GVT device * @gvt: intel gvt device @@ -316,6 +420,7 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) if (WARN_ON(!gvt)) return; + intel_gvt_clean_vreg_pool(gvt); intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt); intel_gvt_cleanup_vgpu_type_groups(gvt); @@ -336,6 +441,12 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) dev_priv->gvt = NULL; } +#define BITS_PER_DOMAIN 4 +#define MAX_PLANES_PER_DOMAIN 4 +#define DOMAIN_PLANE_OWNER(owner, pipe, plane) \ + ((((owner) >> (pipe) * BITS_PER_DOMAIN * MAX_PLANES_PER_DOMAIN) >> \ + BITS_PER_DOMAIN * (plane)) & 0xf) + /** * intel_gvt_init_device - initialize a GVT device * @dev_priv: drm i915 private data @@ -421,6 +532,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) goto out_clean_types; } + intel_gvt_init_pipe_info(gvt); + ret = intel_gvt_hypervisor_host_init(&dev_priv->drm.pdev->dev, gvt, &intel_gvt_ops); if (ret) { @@ -436,14 +549,42 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) } gvt->idle_vgpu = vgpu; + ret = intel_gvt_init_vreg_pool(gvt); + if (ret) { + gvt_err("failed to init vreg pool\n"); + goto out_clean_vreg; + } + ret = intel_gvt_debugfs_init(gvt); if (ret) gvt_err("debugfs registeration failed, go on.\n"); - gvt_dbg_core("gvt device initialization is done\n"); dev_priv->gvt = gvt; + + if (i915_modparams.avail_planes_per_pipe) { + unsigned long long domain_plane_owners; + int plane; + enum pipe pipe; + + /* + * Each nibble represents domain id + * ids can be from 0-F. 0 for Dom0, 1,2,3...0xF for DomUs + * plane_owner[i] holds the id of the domain that owns it,eg:0,1,2 etc + */ + domain_plane_owners = i915_modparams.domain_plane_owners; + for_each_pipe(dev_priv, pipe) { + for_each_universal_plane(dev_priv, pipe, plane) { + gvt->pipe_info[pipe].plane_owner[plane] = + DOMAIN_PLANE_OWNER(domain_plane_owners, pipe, plane); + } + } + } + + gvt_dbg_core("gvt device initialization is done\n"); return 0; +out_clean_vreg: + intel_gvt_clean_vreg_pool(gvt); out_clean_types: intel_gvt_clean_vgpu_types(gvt); out_clean_thread: @@ -468,6 +609,14 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) return ret; } +int gvt_dom0_ready(struct drm_i915_private *dev_priv) +{ + if (!intel_gvt_active(dev_priv)) + return 0; + + return intel_gvt_hypervisor_dom0_ready(); +} + #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) MODULE_SOFTDEP("pre: kvmgt"); #endif diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 31f6cdbe5c424..cf689150dbf71 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -55,6 +55,7 @@ enum { INTEL_GVT_HYPERVISOR_XEN = 0, INTEL_GVT_HYPERVISOR_KVM, + INTEL_GVT_HYPERVISOR_ACRN, }; struct intel_gvt_host { @@ -70,6 +71,7 @@ struct intel_gvt_device_info { u32 max_support_vgpus; u32 cfg_space_size; u32 mmio_size; + u32 mmio_size_order; u32 mmio_bar; unsigned long msi_cap_offset; u32 gtt_start_offset; @@ -83,6 +85,7 @@ struct intel_gvt_device_info { struct intel_vgpu_gm { u64 aperture_sz; u64 hidden_sz; + struct sg_table *st; struct drm_mm_node low_gm_node; struct drm_mm_node high_gm_node; }; @@ -99,6 +102,7 @@ struct intel_vgpu_fence { struct intel_vgpu_mmio { void *vreg; void *sreg; + struct gvt_shared_page *shared_page; }; #define INTEL_GVT_MAX_BAR_NUM 4 @@ -182,7 +186,7 @@ struct intel_vgpu { * scheduler structure. So below 2 vgpu data are protected * by sched_lock, not vgpu_lock. */ - void *sched_data; + void *sched_data[I915_NUM_ENGINES]; struct vgpu_sched_ctl sched_ctl; struct intel_vgpu_fence fence; @@ -232,6 +236,10 @@ struct intel_vgpu { struct completion vblank_done; u32 scan_nonprivbb; + + unsigned long long *cached_guest_entry; + bool ge_cache_enable; + bool entire_nonctxmmio_checked; }; /* validating GM healthy status*/ @@ -259,7 +267,7 @@ struct gvt_mmio_block { #define INTEL_GVT_MMIO_HASH_BITS 11 struct intel_gvt_mmio { - u8 *mmio_attribute; + u16 *mmio_attribute; /* Register contains RO bits */ #define F_RO (1 << 0) /* Register contains graphics address */ @@ -276,14 +284,22 @@ struct intel_gvt_mmio { #define F_UNALIGN (1 << 6) /* This reg is saved/restored in context */ #define F_IN_CTX (1 << 7) +/* This reg is not in the context */ +#define F_NON_CONTEXT (1 << 8) struct gvt_mmio_block *mmio_block; unsigned int num_mmio_block; + void *mmio_host_cache; + bool host_cache_initialized; DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS); unsigned long num_tracked_mmio; }; +/* Macro for easily access host engine mmio cached register */ +#define gvt_host_reg(gvt, reg) \ + (*(u32 *)(gvt->mmio.mmio_host_cache + reg)) \ + struct intel_gvt_firmware { void *cfg_space; void *mmio; @@ -291,6 +307,7 @@ struct intel_gvt_firmware { }; #define NR_MAX_INTEL_VGPU_TYPES 20 + struct intel_vgpu_type { char name[16]; unsigned int avail_instance; @@ -301,6 +318,15 @@ struct intel_vgpu_type { enum intel_vgpu_edid resolution; }; +struct intel_gvt_pipe_info { + enum pipe pipe_num; + int owner; + struct intel_gvt *gvt; + struct work_struct vblank_work; + int plane_owner[I915_MAX_PLANES]; + int scaler_owner[SKL_NUM_SCALERS]; +}; + struct intel_gvt { /* GVT scope lock, protect GVT itself, and all resource currently * not yet protected by special locks(vgpu and scheduler lock). @@ -334,12 +360,20 @@ struct intel_gvt { */ unsigned long service_request; + struct intel_gvt_pipe_info pipe_info[I915_MAX_PIPES]; + + struct skl_ddb_allocation ddb; + struct { struct engine_mmio *mmio; int ctx_mmio_count[I915_NUM_ENGINES]; } engine_mmio_list; struct dentry *debugfs_root; + struct work_struct active_hp_work; + + void *intel_gvt_vreg_pool[GVT_MAX_VGPU]; + bool intel_gvt_vreg_allocated[GVT_MAX_VGPU]; }; static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915) @@ -458,6 +492,11 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu, idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \ for_each_if(vgpu->active) +#define for_each_universal_scaler(__dev_priv, __pipe, __s) \ + for ((__s) = 0; \ + (__s) < INTEL_INFO(__dev_priv)->num_scalers[(__pipe)] + 1; \ + (__s)++) + static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu, u32 offset, u32 val, bool low) { @@ -530,6 +569,8 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, bool primary); void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu); +int set_pvmmio(struct intel_vgpu *vgpu, bool map); + int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes); @@ -543,6 +584,8 @@ static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar) PCI_BASE_ADDRESS_MEM_MASK; } +int map_gttmmio(struct intel_vgpu *vgpu, bool map); + void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu); int intel_vgpu_init_opregion(struct intel_vgpu *vgpu); int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa); @@ -579,6 +622,9 @@ struct intel_gvt_ops { unsigned int); }; +int gvt_dom0_ready(struct drm_i915_private *dev_priv); +void intel_gvt_allocate_ddb(struct intel_gvt *gvt, + struct skl_ddb_allocation *ddb, unsigned int active_crtcs); enum { GVT_FAILSAFE_UNSUPPORTED_GUEST, @@ -686,11 +732,40 @@ static inline void intel_gvt_mmio_set_in_ctx( gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX; } +/** + * intel_gvt_mmio_is_non_context - check a MMIO is non-context + * @gvt: a GVT device + * @offset: register offset + * + */ +static inline bool intel_gvt_mmio_is_non_context( + struct intel_gvt *gvt, unsigned int offset) +{ + return gvt->mmio.mmio_attribute[offset >> 2] & F_NON_CONTEXT; +} + +/** + * intel_gvt_mmio_set_non_context - mark a MMIO is non-context + + * @gvt: a GVT device + * @offset: register offset + * + */ +static inline void intel_gvt_mmio_set_non_context( + struct intel_gvt *gvt, unsigned int offset) +{ + gvt->mmio.mmio_attribute[offset >> 2] |= F_NON_CONTEXT; +} + int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu); void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu); int intel_gvt_debugfs_init(struct intel_gvt *gvt); void intel_gvt_debugfs_clean(struct intel_gvt *gvt); +void *intel_gvt_allocate_vreg(struct intel_vgpu *vgpu); +void intel_gvt_free_vreg(struct intel_vgpu *vgpu); + +bool is_force_nonpriv_mmio(unsigned int offset); #include "trace.h" #include "mpt.h" diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 94c1089ecf59e..02c12fbeafd34 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -47,6 +47,8 @@ #define PCH_PP_OFF_DELAYS _MMIO(0xc720c) #define PCH_PP_DIVISOR _MMIO(0xc7210) +#define MCHBAR_MEM_BASE _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x4000) + unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt) { if (IS_BROADWELL(gvt->dev_priv)) @@ -92,7 +94,7 @@ static struct intel_gvt_mmio_info *find_mmio_info(struct intel_gvt *gvt, } static int new_mmio_info(struct intel_gvt *gvt, - u32 offset, u8 flags, u32 size, + u32 offset, u16 flags, u32 size, u32 addr_mask, u32 ro_mask, u32 device, gvt_mmio_func read, gvt_mmio_func write) { @@ -413,27 +415,9 @@ static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, return 0; } -static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, +static int mmio_write_empty(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - switch (offset) { - case 0xe651c: - case 0xe661c: - case 0xe671c: - case 0xe681c: - vgpu_vreg(vgpu, offset) = 1 << 17; - break; - case 0xe6c04: - vgpu_vreg(vgpu, offset) = 0x3; - break; - case 0xe6e1c: - vgpu_vreg(vgpu, offset) = 0x2f << 16; - break; - default: - return -EINVAL; - } - - read_vreg(vgpu, offset, p_data, bytes); return 0; } @@ -441,18 +425,21 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 data; + unsigned int pipe = SKL_PLANE_REG_TO_PIPE(offset); + struct intel_crtc *crtc = intel_get_crtc_for_pipe( + vgpu->gvt->dev_priv, pipe); write_vreg(vgpu, offset, p_data, bytes); data = vgpu_vreg(vgpu, offset); - if (data & PIPECONF_ENABLE) + if (data & PIPECONF_ENABLE) { vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE; - else + if (crtc) + drm_crtc_vblank_get(&crtc->base); + } else { vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE; - /* vgpu_lock already hold by emulate mmio r/w */ - mutex_unlock(&vgpu->vgpu_lock); - intel_gvt_check_vblank_emulation(vgpu->gvt); - mutex_lock(&vgpu->vgpu_lock); + } + return 0; } @@ -530,6 +517,14 @@ static int force_nonpriv_write(struct intel_vgpu *vgpu, return 0; } +static int pipe_dsl_mmio_read(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, unsigned int bytes) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset)); + return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); +} + static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -785,6 +780,66 @@ static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, return 0; } +static int skl_plane_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes); +static int skl_ps_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes); + +static void pvmmio_update_plane_register(struct intel_vgpu *vgpu, + unsigned int pipe, unsigned int plane) +{ + struct pv_plane_update *pv_plane = &vgpu->mmio.shared_page->pv_plane; + + /* null function for PLANE_COLOR_CTL, PLANE_AUX_DIST, PLANE_AUX_OFFSET, + * and SKL_PS_PWR_GATE register trap + */ + + if (pv_plane->flags & PLANE_KEY_BIT) { + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_KEYVAL(pipe, plane)), + &pv_plane->plane_key_val, 4); + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_KEYMAX(pipe, plane)), + &pv_plane->plane_key_max, 4); + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_KEYMSK(pipe, plane)), + &pv_plane->plane_key_msk, 4); + } + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_OFFSET(pipe, plane)), + &pv_plane->plane_offset, 4); + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_STRIDE(pipe, plane)), + &pv_plane->plane_stride, 4); + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_SIZE(pipe, plane)), + &pv_plane->plane_size, 4); + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_AUX_DIST(pipe, plane)), + &pv_plane->plane_aux_dist, 4); + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_AUX_OFFSET(pipe, plane)), + &pv_plane->plane_aux_offset, 4); + + if (pv_plane->flags & PLANE_SCALER_BIT) { + skl_ps_mmio_write(vgpu, + i915_mmio_reg_offset(SKL_PS_CTRL(pipe, plane)), + &pv_plane->ps_ctrl, 4); + skl_ps_mmio_write(vgpu, + i915_mmio_reg_offset(SKL_PS_WIN_POS(pipe, plane)), + &pv_plane->ps_win_ps, 4); + skl_ps_mmio_write(vgpu, + i915_mmio_reg_offset(SKL_PS_WIN_SZ(pipe, plane)), + &pv_plane->ps_win_sz, 4); + } + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_POS(pipe, plane)), + &pv_plane->plane_pos, 4); + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_CTL(pipe, plane)), + &pv_plane->plane_ctl, 4); +} + static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu, unsigned int reg) { @@ -1152,6 +1207,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { bool invalid_read = false; + int ret = 0; read_vreg(vgpu, offset, p_data, bytes); @@ -1166,8 +1222,27 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, _vgtif_reg(avail_rs.fence_num) + 4) invalid_read = true; break; + case _vgtif_reg(pv_mmio): + /* a remap happens from guest mmio read operation, the target reg offset + * is in the first DWORD of shared_page. + */ + { + u32 reg = vgpu->mmio.shared_page->reg_addr; + struct intel_gvt_mmio_info *mmio; + + mmio = find_mmio_info(vgpu->gvt, rounddown(reg, 4)); + if (mmio) + ret = mmio->read(vgpu, reg, p_data, bytes); + else + ret = intel_vgpu_default_mmio_read(vgpu, reg, p_data, + bytes); + break; + } + case 0x78010: /* vgt_caps */ case 0x7881c: + case _vgtif_reg(scaler_owned): + case _vgtif_reg(enable_pvmmio): break; default: invalid_read = true; @@ -1177,7 +1252,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n", offset, bytes, *(u32 *)p_data); vgpu->pv_notified = true; - return 0; + return ret; } static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) @@ -1198,6 +1273,18 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY: case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY: return intel_vgpu_put_ppgtt_mm(vgpu, pdps); + case VGT_G2V_PPGTT_L4_ALLOC: + return intel_vgpu_g2v_pv_ppgtt_alloc_4lvl(vgpu, 4); + case VGT_G2V_PPGTT_L4_INSERT: + return intel_vgpu_g2v_pv_ppgtt_insert_4lvl(vgpu, 4); + case VGT_G2V_PPGTT_L4_CLEAR: + return intel_vgpu_g2v_pv_ppgtt_clear_4lvl(vgpu, 4); + case VGT_G2V_GGTT_INSERT: + return intel_vgpu_g2v_pv_ggtt_insert(vgpu); + break; + case VGT_G2V_GGTT_CLEAR: + return intel_vgpu_g2v_pv_ggtt_clear(vgpu); + break; case VGT_G2V_EXECLIST_CONTEXT_CREATE: case VGT_G2V_EXECLIST_CONTEXT_DESTROY: case 1: /* Remove this in guest driver. */ @@ -1225,6 +1312,26 @@ static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready) return kobject_uevent_env(kobj, KOBJ_ADD, env); } +#define INTEL_GVT_PCI_BAR_GTTMMIO 0 +int set_pvmmio(struct intel_vgpu *vgpu, bool map) +{ + u64 start, end; + u64 val; + int ret; + + val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0]; + if (val & PCI_BASE_ADDRESS_MEM_TYPE_64) + start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0); + else + start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0); + + start &= ~GENMASK(3, 0); + end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1; + + ret = intel_gvt_hypervisor_set_pvmmio(vgpu, start, end, map); + return ret; +} + static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -1241,6 +1348,27 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, case _vgtif_reg(g2v_notify): ret = handle_g2v_notification(vgpu, data); break; + case _vgtif_reg(enable_pvmmio): + if (i915_modparams.enable_pvmmio) { + vgpu_vreg(vgpu, offset) = data & + i915_modparams.enable_pvmmio; + if (set_pvmmio(vgpu, !!vgpu_vreg(vgpu, offset))) { + vgpu_vreg(vgpu, offset) = 0; + break; + } + if (vgpu_vreg(vgpu, offset) & PVMMIO_GGTT_UPDATE) { + ret = map_gttmmio(vgpu, true); + if (ret) { + DRM_INFO("ggtt pv mode is off\n"); + vgpu_vreg(vgpu, offset) &= + ~PVMMIO_GGTT_UPDATE; + } + } + + } else { + vgpu_vreg(vgpu, offset) = 0; + } + break; /* add xhot and yhot to handled list to avoid error log */ case _vgtif_reg(cursor_x_hot): case _vgtif_reg(cursor_y_hot): @@ -1266,22 +1394,6 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, return 0; } -static int pf_write(struct intel_vgpu *vgpu, - unsigned int offset, void *p_data, unsigned int bytes) -{ - u32 val = *(u32 *)p_data; - - if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL || - offset == _PS_1B_CTRL || offset == _PS_2B_CTRL || - offset == _PS_1C_CTRL) && (val & PS_PLANE_SEL_MASK) != 0) { - WARN_ONCE(true, "VM(%d): guest is trying to scaling a plane\n", - vgpu->id); - return 0; - } - - return intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes); -} - static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -1650,6 +1762,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); struct intel_vgpu_execlist *execlist; u32 data = *(u32 *)p_data; + u32 *elsp_data = vgpu->mmio.shared_page->elsp_data; int ret = 0; if (WARN_ON(ring_id < 0 || ring_id >= I915_NUM_ENGINES)) @@ -1657,16 +1770,23 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, execlist = &vgpu->submission.execlist[ring_id]; - execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data; - if (execlist->elsp_dwords.index == 3) { + if (VGPU_PVMMIO(vgpu) & PVMMIO_ELSP_SUBMIT) { + execlist->elsp_dwords.data[3] = elsp_data[0]; + execlist->elsp_dwords.data[2] = elsp_data[1]; + execlist->elsp_dwords.data[1] = elsp_data[2]; + execlist->elsp_dwords.data[0] = data; ret = intel_vgpu_submit_execlist(vgpu, ring_id); - if(ret) - gvt_vgpu_err("fail submit workload on ring %d\n", - ring_id); + } else { + execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data; + if (execlist->elsp_dwords.index == 3) + ret = intel_vgpu_submit_execlist(vgpu, ring_id); + ++execlist->elsp_dwords.index; + execlist->elsp_dwords.index &= 0x3; } - ++execlist->elsp_dwords.index; - execlist->elsp_dwords.index &= 0x3; + if (ret) + gvt_vgpu_err("fail submit workload on ring %d\n", ring_id); + return ret; } @@ -1909,9 +2029,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(_MMIO(0xc4040), D_ALL); MMIO_D(DERRMR, D_ALL); - MMIO_D(PIPEDSL(PIPE_A), D_ALL); - MMIO_D(PIPEDSL(PIPE_B), D_ALL); - MMIO_D(PIPEDSL(PIPE_C), D_ALL); + MMIO_DH(PIPEDSL(PIPE_A), D_ALL, pipe_dsl_mmio_read, NULL); + MMIO_DH(PIPEDSL(PIPE_B), D_ALL, pipe_dsl_mmio_read, NULL); + MMIO_DH(PIPEDSL(PIPE_C), D_ALL, pipe_dsl_mmio_read, NULL); MMIO_D(PIPEDSL(_PIPE_EDP), D_ALL); MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write); @@ -1959,71 +2079,71 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(_MMIO(0x70098), D_ALL); MMIO_D(_MMIO(0x7009c), D_ALL); - MMIO_D(DSPCNTR(PIPE_A), D_ALL); - MMIO_D(DSPADDR(PIPE_A), D_ALL); - MMIO_D(DSPSTRIDE(PIPE_A), D_ALL); - MMIO_D(DSPPOS(PIPE_A), D_ALL); - MMIO_D(DSPSIZE(PIPE_A), D_ALL); - MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write); - MMIO_D(DSPOFFSET(PIPE_A), D_ALL); - MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL); - - MMIO_D(DSPCNTR(PIPE_B), D_ALL); - MMIO_D(DSPADDR(PIPE_B), D_ALL); - MMIO_D(DSPSTRIDE(PIPE_B), D_ALL); - MMIO_D(DSPPOS(PIPE_B), D_ALL); - MMIO_D(DSPSIZE(PIPE_B), D_ALL); - MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write); - MMIO_D(DSPOFFSET(PIPE_B), D_ALL); - MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL); - - MMIO_D(DSPCNTR(PIPE_C), D_ALL); - MMIO_D(DSPADDR(PIPE_C), D_ALL); - MMIO_D(DSPSTRIDE(PIPE_C), D_ALL); - MMIO_D(DSPPOS(PIPE_C), D_ALL); - MMIO_D(DSPSIZE(PIPE_C), D_ALL); - MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write); - MMIO_D(DSPOFFSET(PIPE_C), D_ALL); - MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL); - - MMIO_D(SPRCTL(PIPE_A), D_ALL); - MMIO_D(SPRLINOFF(PIPE_A), D_ALL); - MMIO_D(SPRSTRIDE(PIPE_A), D_ALL); - MMIO_D(SPRPOS(PIPE_A), D_ALL); - MMIO_D(SPRSIZE(PIPE_A), D_ALL); - MMIO_D(SPRKEYVAL(PIPE_A), D_ALL); - MMIO_D(SPRKEYMSK(PIPE_A), D_ALL); - MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write); - MMIO_D(SPRKEYMAX(PIPE_A), D_ALL); - MMIO_D(SPROFFSET(PIPE_A), D_ALL); - MMIO_D(SPRSCALE(PIPE_A), D_ALL); - MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL); - - MMIO_D(SPRCTL(PIPE_B), D_ALL); - MMIO_D(SPRLINOFF(PIPE_B), D_ALL); - MMIO_D(SPRSTRIDE(PIPE_B), D_ALL); - MMIO_D(SPRPOS(PIPE_B), D_ALL); - MMIO_D(SPRSIZE(PIPE_B), D_ALL); - MMIO_D(SPRKEYVAL(PIPE_B), D_ALL); - MMIO_D(SPRKEYMSK(PIPE_B), D_ALL); - MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write); - MMIO_D(SPRKEYMAX(PIPE_B), D_ALL); - MMIO_D(SPROFFSET(PIPE_B), D_ALL); - MMIO_D(SPRSCALE(PIPE_B), D_ALL); - MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL); - - MMIO_D(SPRCTL(PIPE_C), D_ALL); - MMIO_D(SPRLINOFF(PIPE_C), D_ALL); - MMIO_D(SPRSTRIDE(PIPE_C), D_ALL); - MMIO_D(SPRPOS(PIPE_C), D_ALL); - MMIO_D(SPRSIZE(PIPE_C), D_ALL); - MMIO_D(SPRKEYVAL(PIPE_C), D_ALL); - MMIO_D(SPRKEYMSK(PIPE_C), D_ALL); - MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write); - MMIO_D(SPRKEYMAX(PIPE_C), D_ALL); - MMIO_D(SPROFFSET(PIPE_C), D_ALL); - MMIO_D(SPRSCALE(PIPE_C), D_ALL); - MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL); + MMIO_D(DSPCNTR(PIPE_A), D_BDW); + MMIO_D(DSPADDR(PIPE_A), D_BDW); + MMIO_D(DSPSTRIDE(PIPE_A), D_BDW); + MMIO_D(DSPPOS(PIPE_A), D_BDW); + MMIO_D(DSPSIZE(PIPE_A), D_BDW); + MMIO_DH(DSPSURF(PIPE_A), D_BDW, NULL, pri_surf_mmio_write); + MMIO_D(DSPOFFSET(PIPE_A), D_BDW); + MMIO_D(DSPSURFLIVE(PIPE_A), D_BDW); + + MMIO_D(DSPCNTR(PIPE_B), D_BDW); + MMIO_D(DSPADDR(PIPE_B), D_BDW); + MMIO_D(DSPSTRIDE(PIPE_B), D_BDW); + MMIO_D(DSPPOS(PIPE_B), D_BDW); + MMIO_D(DSPSIZE(PIPE_B), D_BDW); + MMIO_DH(DSPSURF(PIPE_B), D_BDW, NULL, pri_surf_mmio_write); + MMIO_D(DSPOFFSET(PIPE_B), D_BDW); + MMIO_D(DSPSURFLIVE(PIPE_B), D_BDW); + + MMIO_D(DSPCNTR(PIPE_C), D_BDW); + MMIO_D(DSPADDR(PIPE_C), D_BDW); + MMIO_D(DSPSTRIDE(PIPE_C), D_BDW); + MMIO_D(DSPPOS(PIPE_C), D_BDW); + MMIO_D(DSPSIZE(PIPE_C), D_BDW); + MMIO_DH(DSPSURF(PIPE_C), D_BDW, NULL, pri_surf_mmio_write); + MMIO_D(DSPOFFSET(PIPE_C), D_BDW); + MMIO_D(DSPSURFLIVE(PIPE_C), D_BDW); + + MMIO_D(SPRCTL(PIPE_A), D_BDW); + MMIO_D(SPRLINOFF(PIPE_A), D_BDW); + MMIO_D(SPRSTRIDE(PIPE_A), D_BDW); + MMIO_D(SPRPOS(PIPE_A), D_BDW); + MMIO_D(SPRSIZE(PIPE_A), D_BDW); + MMIO_D(SPRKEYVAL(PIPE_A), D_BDW); + MMIO_D(SPRKEYMSK(PIPE_A), D_BDW); + MMIO_DH(SPRSURF(PIPE_A), D_BDW, NULL, spr_surf_mmio_write); + MMIO_D(SPRKEYMAX(PIPE_A), D_BDW); + MMIO_D(SPROFFSET(PIPE_A), D_BDW); + MMIO_D(SPRSCALE(PIPE_A), D_BDW); + MMIO_D(SPRSURFLIVE(PIPE_A), D_BDW); + + MMIO_D(SPRCTL(PIPE_B), D_BDW); + MMIO_D(SPRLINOFF(PIPE_B), D_BDW); + MMIO_D(SPRSTRIDE(PIPE_B), D_BDW); + MMIO_D(SPRPOS(PIPE_B), D_BDW); + MMIO_D(SPRSIZE(PIPE_B), D_BDW); + MMIO_D(SPRKEYVAL(PIPE_B), D_BDW); + MMIO_D(SPRKEYMSK(PIPE_B), D_BDW); + MMIO_DH(SPRSURF(PIPE_B), D_BDW, NULL, spr_surf_mmio_write); + MMIO_D(SPRKEYMAX(PIPE_B), D_BDW); + MMIO_D(SPROFFSET(PIPE_B), D_BDW); + MMIO_D(SPRSCALE(PIPE_B), D_BDW); + MMIO_D(SPRSURFLIVE(PIPE_B), D_BDW); + + MMIO_D(SPRCTL(PIPE_C), D_BDW); + MMIO_D(SPRLINOFF(PIPE_C), D_BDW); + MMIO_D(SPRSTRIDE(PIPE_C), D_BDW); + MMIO_D(SPRPOS(PIPE_C), D_BDW); + MMIO_D(SPRSIZE(PIPE_C), D_BDW); + MMIO_D(SPRKEYVAL(PIPE_C), D_BDW); + MMIO_D(SPRKEYMSK(PIPE_C), D_BDW); + MMIO_DH(SPRSURF(PIPE_C), D_BDW, NULL, spr_surf_mmio_write); + MMIO_D(SPRKEYMAX(PIPE_C), D_BDW); + MMIO_D(SPROFFSET(PIPE_C), D_BDW); + MMIO_D(SPRSCALE(PIPE_C), D_BDW); + MMIO_D(SPRSURFLIVE(PIPE_C), D_BDW); MMIO_D(HTOTAL(TRANSCODER_A), D_ALL); MMIO_D(HBLANK(TRANSCODER_A), D_ALL); @@ -2229,12 +2349,12 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(PCH_PP_ON_DELAYS, D_ALL); MMIO_D(PCH_PP_OFF_DELAYS, D_ALL); - MMIO_DH(_MMIO(0xe651c), D_ALL, dpy_reg_mmio_read, NULL); - MMIO_DH(_MMIO(0xe661c), D_ALL, dpy_reg_mmio_read, NULL); - MMIO_DH(_MMIO(0xe671c), D_ALL, dpy_reg_mmio_read, NULL); - MMIO_DH(_MMIO(0xe681c), D_ALL, dpy_reg_mmio_read, NULL); - MMIO_DH(_MMIO(0xe6c04), D_ALL, dpy_reg_mmio_read, NULL); - MMIO_DH(_MMIO(0xe6e1c), D_ALL, dpy_reg_mmio_read, NULL); + MMIO_DH(_MMIO(0xe651c), D_ALL, NULL, mmio_write_empty); + MMIO_DH(_MMIO(0xe661c), D_ALL, NULL, mmio_write_empty); + MMIO_DH(_MMIO(0xe671c), D_ALL, NULL, mmio_write_empty); + MMIO_DH(_MMIO(0xe681c), D_ALL, NULL, mmio_write_empty); + MMIO_DH(_MMIO(0xe6c04), D_ALL, NULL, mmio_write_empty); + MMIO_DH(_MMIO(0xe6e1c), D_ALL, NULL, mmio_write_empty); MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0, PORTA_HOTPLUG_STATUS_MASK @@ -2804,6 +2924,126 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) return 0; } +static int skl_plane_surf_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + unsigned int pipe = SKL_PLANE_REG_TO_PIPE(offset); + unsigned int plane = SKL_PLANE_REG_TO_PLANE(offset); + i915_reg_t reg_1ac = _MMIO(_REG_701AC(pipe, plane)); + int flip_event = SKL_FLIP_EVENT(pipe, plane); + + /* plane disable is not pv and it is indicated by value 0 */ + if (*(u32 *)p_data != 0 && VGPU_PVMMIO(vgpu) & PVMMIO_PLANE_UPDATE) + pvmmio_update_plane_register(vgpu, pipe, plane); + + write_vreg(vgpu, offset, p_data, bytes); + vgpu_vreg_t(vgpu, reg_1ac) = vgpu_vreg(vgpu, offset); + + if ((vgpu_vreg_t(vgpu, PIPECONF(pipe)) & I965_PIPECONF_ACTIVE) && + (vgpu->gvt->pipe_info[pipe].plane_owner[plane] == vgpu->id)) { + I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset)); + } + + set_bit(flip_event, vgpu->irq.flip_done_event[pipe]); + return 0; +} + +static int skl_plane_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + unsigned int pipe = SKL_PLANE_REG_TO_PIPE(offset); + unsigned int plane = SKL_PLANE_REG_TO_PLANE(offset); + + write_vreg(vgpu, offset, p_data, bytes); + if ((vgpu_vreg_t(vgpu, PIPECONF(pipe)) & I965_PIPECONF_ACTIVE) && + (vgpu->gvt->pipe_info[pipe].plane_owner[plane] == vgpu->id)) { + I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset)); + } + return 0; +} + +static int pv_plane_wm_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + unsigned int pipe = SKL_PLANE_REG_TO_PIPE(offset); + unsigned int plane = SKL_PLANE_REG_TO_PLANE(offset); + struct pv_plane_wm_update *pv_plane_wm = + &vgpu->mmio.shared_page->pv_plane_wm; + int level; + + if (VGPU_PVMMIO(vgpu) & PVMMIO_PLANE_WM_UPDATE) { + for (level = 0; level <= pv_plane_wm->max_wm_level; level++) + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset( + PLANE_WM(pipe, plane, level)), + &pv_plane_wm->plane_wm_level[level], 4); + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_WM_TRANS(pipe, plane)), + &pv_plane_wm->plane_trans_wm_level, 4); + /* null function for PLANE_BUF_CFG and PLANE_NV12_BUF_CFG */ + } + return 0; +} + +#define MMIO_PIPES_SDH(prefix, plane, s, d, r, w) do { \ + int pipe; \ + for_each_pipe(dev_priv, pipe) \ + MMIO_F(prefix(pipe, plane), s, 0, 0, 0, d, r, w); \ +} while (0) + +#define MMIO_PLANES_SDH(prefix, s, d, r, w) do { \ + int pipe, plane; \ + for_each_pipe(dev_priv, pipe) \ + for_each_universal_plane(dev_priv, pipe, plane) \ + MMIO_F(prefix(pipe, plane), s, 0, 0, 0, d, r, w); \ +} while (0) + +#define MMIO_PLANES_DH(prefix, d, r, w) \ + MMIO_PLANES_SDH(prefix, 4, d, r, w) + +#define PLANE_WM_BASE(pipe, plane) _MMIO(_PLANE_WM_BASE(pipe, plane)) + +static int skl_ps_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + unsigned int pipe = SKL_PS_REG_TO_PIPE(offset); + unsigned int scaler = SKL_PS_REG_TO_SCALER(offset) - 1; + + if (pipe >= I915_MAX_PIPES || scaler >= SKL_NUM_SCALERS || + vgpu->gvt->pipe_info[pipe].scaler_owner[scaler] != vgpu->id) { + gvt_vgpu_err("Unsupport pipe %d, scaler %d scaling\n", + pipe, scaler); + return 0; + } + + if (!(vgpu_vreg_t(vgpu, PIPECONF(pipe)) & I965_PIPECONF_ACTIVE)) + return 0; + + if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL || + offset == _PS_1B_CTRL || offset == _PS_2B_CTRL || + offset == _PS_1C_CTRL) && ((*(u32 *)p_data) & PS_SCALER_EN)) { + unsigned int plane; + + if (SKL_PS_REG_VALUE_TO_PLANE(*(u32 *)p_data) == 0) { + gvt_vgpu_err("Unsupport crtc scaling for UOS\n"); + return 0; + } + plane = SKL_PS_REG_VALUE_TO_PLANE(*(u32 *)p_data) - 1; + if (plane >= I915_MAX_PLANES || + vgpu->gvt->pipe_info[pipe].plane_owner[plane] != vgpu->id) { + gvt_vgpu_err("Unsupport plane %d scaling\n", plane); + return 0; + } + } + + write_vreg(vgpu, offset, p_data, bytes); + I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset)); + return 0; +} + static int init_skl_mmio_info(struct intel_gvt *gvt) { struct drm_i915_private *dev_priv = gvt->dev_priv; @@ -2854,129 +3094,65 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(_MMIO(0x6c05c), D_SKL_PLUS); MMIO_DH(_MMIO(0x6c060), D_SKL_PLUS, dpll_status_read, NULL); - MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write); - - MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write); - - MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write); - - MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL); + MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL_PLUS, NULL, skl_ps_mmio_write); + + MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL_PLUS, NULL, skl_ps_mmio_write); + + MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL_PLUS, NULL, skl_ps_mmio_write); + + MMIO_PLANES_DH(PLANE_CTL, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_STRIDE, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_POS, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_SIZE, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_KEYVAL, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_KEYMSK, D_SKL_PLUS, NULL, skl_plane_mmio_write); + + MMIO_PLANES_DH(PLANE_SURF, D_SKL_PLUS, NULL, skl_plane_surf_write); + + MMIO_PLANES_DH(PLANE_KEYMAX, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_OFFSET, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_AUX_DIST, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_AUX_OFFSET, D_SKL_PLUS, NULL, skl_plane_mmio_write); + + if (i915_modparams.avail_planes_per_pipe) { + MMIO_PLANES_SDH(PLANE_WM_BASE, 4 * 8, D_SKL_PLUS, NULL, NULL); + MMIO_PLANES_DH(PLANE_WM_TRANS, D_SKL_PLUS, NULL, NULL); + } else { + MMIO_PLANES_SDH(PLANE_WM_BASE, 4 * 8, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_WM_TRANS, D_SKL_PLUS, NULL, skl_plane_mmio_write); + } + + MMIO_PLANES_DH(PLANE_NV12_BUF_CFG, D_SKL_PLUS, NULL, + pv_plane_wm_mmio_write); + MMIO_PLANES_DH(PLANE_BUF_CFG, D_SKL_PLUS, NULL, NULL); MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL_PLUS, NULL, NULL); MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL_PLUS, NULL, NULL); MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - - MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - - MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL_PLUS, NULL, NULL); MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL_PLUS, NULL, NULL); MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 1)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 2)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 3)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 4)), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 1)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 2)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 3)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 4)), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 1)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 2)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 1)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 2)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 3)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 4)), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 1)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 2)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 3)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 4)), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 1)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 2)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL); - - MMIO_D(_MMIO(0x70380), D_SKL_PLUS); - MMIO_D(_MMIO(0x71380), D_SKL_PLUS); - MMIO_D(_MMIO(0x72380), D_SKL_PLUS); - MMIO_D(_MMIO(0x7239c), D_SKL_PLUS); - MMIO_D(_MMIO(0x7039c), D_SKL_PLUS); - MMIO_D(_MMIO(0x8f074), D_SKL_PLUS); MMIO_D(_MMIO(0x8f004), D_SKL_PLUS); MMIO_D(_MMIO(0x8f034), D_SKL_PLUS); @@ -3031,16 +3207,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(_MMIO(0x71034), D_SKL_PLUS); MMIO_D(_MMIO(0x72034), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_A)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_B)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_C)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS); - MMIO_D(_MMIO(0x44500), D_SKL_PLUS); MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, @@ -3051,6 +3217,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(_MMIO(0x4ab8), D_KBL); MMIO_D(_MMIO(0x2248), D_KBL | D_SKL); + MMIO_D(HUC_STATUS2, D_SKL_PLUS); + return 0; } @@ -3269,12 +3437,15 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt) vfree(gvt->mmio.mmio_attribute); gvt->mmio.mmio_attribute = NULL; + + vfree(gvt->mmio.mmio_host_cache); + gvt->mmio.mmio_host_cache = NULL; } /* Special MMIO blocks. */ static struct gvt_mmio_block mmio_blocks[] = { {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL}, - {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL}, + {D_ALL, MCHBAR_MEM_BASE, 0x4000, NULL, NULL}, {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE, pvinfo_mmio_read, pvinfo_mmio_write}, {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL}, @@ -3303,6 +3474,12 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) if (!gvt->mmio.mmio_attribute) return -ENOMEM; + gvt->mmio.mmio_host_cache = vzalloc(info->mmio_size); + if (!gvt->mmio.mmio_host_cache) { + vfree(gvt->mmio.mmio_attribute); + return -ENOMEM; + } + ret = init_generic_mmio_info(gvt); if (ret) goto err; diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 5af11cf1b4823..4c550627e78e8 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -60,13 +60,17 @@ struct intel_gvt_mpt { unsigned long mfn, unsigned int nr, bool map); int (*set_trap_area)(unsigned long handle, u64 start, u64 end, bool map); + int (*set_pvmmio)(unsigned long handle, u64 start, u64 end, + bool map); int (*set_opregion)(void *vgpu); int (*get_vfio_device)(void *vgpu); void (*put_vfio_device)(void *vgpu); bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn); + int (*dom0_ready)(void); }; extern struct intel_gvt_mpt xengt_mpt; extern struct intel_gvt_mpt kvmgt_mpt; +extern struct intel_gvt_mpt acrn_gvt_mpt; #endif /* _GVT_HYPERCALL_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index 5daa23ae566b0..06ce906b66731 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -69,6 +69,7 @@ static const char * const irq_name[INTEL_GVT_EVENT_MAX] = { [VCS_PAGE_DIRECTORY_FAULT] = "Video page directory faults", [VCS_AS_CONTEXT_SWITCH] = "Video AS Context Switch Interrupt", [VCS2_MI_USER_INTERRUPT] = "VCS2 Video CS MI USER INTERRUPT", + [VCS2_CMD_STREAMER_ERR] = "VCS2 Video CS error interrupt", [VCS2_MI_FLUSH_DW] = "VCS2 Video MI FLUSH DW notify", [VCS2_AS_CONTEXT_SWITCH] = "VCS2 Context Switch Interrupt", @@ -524,21 +525,26 @@ static void gen8_init_irq( /* GEN8 interrupt GT0 events */ SET_BIT_INFO(irq, 0, RCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT0); + SET_BIT_INFO(irq, 3, RCS_CMD_STREAMER_ERR, INTEL_GVT_IRQ_INFO_GT0); SET_BIT_INFO(irq, 4, RCS_PIPE_CONTROL, INTEL_GVT_IRQ_INFO_GT0); SET_BIT_INFO(irq, 8, RCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT0); SET_BIT_INFO(irq, 16, BCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT0); + SET_BIT_INFO(irq, 19, BCS_CMD_STREAMER_ERR, INTEL_GVT_IRQ_INFO_GT0); SET_BIT_INFO(irq, 20, BCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT0); SET_BIT_INFO(irq, 24, BCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT0); /* GEN8 interrupt GT1 events */ SET_BIT_INFO(irq, 0, VCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT1); + SET_BIT_INFO(irq, 3, VCS_CMD_STREAMER_ERR, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1); if (HAS_BSD2(gvt->dev_priv)) { SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT1); + SET_BIT_INFO(irq, 19, VCS2_CMD_STREAMER_ERR, + INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 24, VCS2_AS_CONTEXT_SWITCH, @@ -547,6 +553,7 @@ static void gen8_init_irq( /* GEN8 interrupt GT3 events */ SET_BIT_INFO(irq, 0, VECS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT3); + SET_BIT_INFO(irq, 3, VECS_CMD_STREAMER_ERR, INTEL_GVT_IRQ_INFO_GT3); SET_BIT_INFO(irq, 4, VECS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT3); SET_BIT_INFO(irq, 8, VECS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT3); @@ -595,6 +602,10 @@ static void gen8_init_irq( SET_BIT_INFO(irq, 4, SPRITE_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A); SET_BIT_INFO(irq, 4, SPRITE_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B); SET_BIT_INFO(irq, 4, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); + + SET_BIT_INFO(irq, 5, PLANE_3_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A); + SET_BIT_INFO(irq, 5, PLANE_3_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B); + SET_BIT_INFO(irq, 5, PLANE_3_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); } /* GEN8 interrupt PCU events */ diff --git a/drivers/gpu/drm/i915/gvt/interrupt.h b/drivers/gpu/drm/i915/gvt/interrupt.h index 5313fb1b33e1a..6ec761a84557a 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.h +++ b/drivers/gpu/drm/i915/gvt/interrupt.h @@ -53,6 +53,7 @@ enum intel_gvt_event_type { VCS_AS_CONTEXT_SWITCH, VCS2_MI_USER_INTERRUPT, + VCS2_CMD_STREAMER_ERR, VCS2_MI_FLUSH_DW, VCS2_AS_CONTEXT_SWITCH, @@ -64,6 +65,7 @@ enum intel_gvt_event_type { BCS_AS_CONTEXT_SWITCH, VECS_MI_USER_INTERRUPT, + VECS_CMD_STREAMER_ERR, VECS_MI_FLUSH_DW, VECS_AS_CONTEXT_SWITCH, @@ -92,6 +94,9 @@ enum intel_gvt_event_type { SPRITE_A_FLIP_DONE, SPRITE_B_FLIP_DONE, SPRITE_C_FLIP_DONE, + PLANE_3_A_FLIP_DONE, + PLANE_3_B_FLIP_DONE, + PLANE_3_C_FLIP_DONE, PCU_THERMAL, PCU_PCODE2DRIVER_MAILBOX, diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 9ad89e38f6c07..12e4203c06dbd 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -996,7 +996,7 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) { unsigned int index; u64 virtaddr; - unsigned long req_size, pgoff = 0; + unsigned long req_size, pgoff, req_start; pgprot_t pg_prot; struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); @@ -1014,7 +1014,17 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) pg_prot = vma->vm_page_prot; virtaddr = vma->vm_start; req_size = vma->vm_end - vma->vm_start; - pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT; + pgoff = vma->vm_pgoff & + ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); + req_start = pgoff << PAGE_SHIFT; + + if (!intel_vgpu_in_aperture(vgpu, req_start)) + return -EINVAL; + if (req_start + req_size > + vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu)) + return -EINVAL; + + pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff; return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot); } diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 9bb9a85c992ca..e6e38393595c4 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -39,7 +39,7 @@ /** * intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset * @vgpu: a vGPU - * + * @gpa: guest physical address * Returns: * Zero on success, negative error code if failed */ @@ -213,6 +213,14 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, if (ret < 0) goto err; + if (vgpu->entire_nonctxmmio_checked + && intel_gvt_mmio_is_non_context(vgpu->gvt, offset) + && vgpu_vreg(vgpu, offset) != gvt_host_reg(gvt, offset)) { + gvt_err("vgpu%d unexpected non-context MMIO change at 0x%x:0x%x,0x%x\n", + vgpu->id, offset, vgpu_vreg(vgpu, offset), + gvt_host_reg(gvt, offset)); + } + intel_gvt_mmio_set_accessed(gvt, offset); ret = 0; goto out; @@ -228,13 +236,14 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, /** * intel_vgpu_reset_mmio - reset virtual MMIO space * @vgpu: a vGPU - * + * @dmlr: vGPU Device Model Level Reset or GT Reset */ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) { struct intel_gvt *gvt = vgpu->gvt; const struct intel_gvt_device_info *info = &gvt->device_info; void *mmio = gvt->firmware.mmio; + struct drm_i915_private *dev_priv = gvt->dev_priv; if (dmlr) { memcpy(vgpu->mmio.vreg, mmio, info->mmio_size); @@ -282,6 +291,23 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) memcpy(vgpu->mmio.sreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET); } + /* below vreg init value are got from handler.c, + * which won't change during vgpu life cycle + */ + vgpu_vreg(vgpu, 0xe651c) = 1 << 17; + vgpu_vreg(vgpu, 0xe661c) = 1 << 17; + vgpu_vreg(vgpu, 0xe671c) = 1 << 17; + vgpu_vreg(vgpu, 0xe681c) = 1 << 17; + vgpu_vreg(vgpu, 0xe6c04) = 3; + vgpu_vreg(vgpu, 0xe6e1c) = 0x2f << 16; + + if (HAS_HUC_UCODE(dev_priv)) { + mmio_hw_access_pre(dev_priv); + vgpu_vreg_t(vgpu, HUC_STATUS2) = I915_READ(HUC_STATUS2); + mmio_hw_access_post(dev_priv); + } + /* Non-context MMIOs need entire check again if mmio/vgpu reset */ + vgpu->entire_nonctxmmio_checked = false; } /** @@ -295,11 +321,20 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu) { const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; - vgpu->mmio.vreg = vzalloc(array_size(info->mmio_size, 2)); + BUILD_BUG_ON(sizeof(struct gvt_shared_page) != PAGE_SIZE); + + vgpu->mmio.sreg = vzalloc(info->mmio_size); + vgpu->mmio.vreg = intel_gvt_allocate_vreg(vgpu); if (!vgpu->mmio.vreg) return -ENOMEM; - vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; + vgpu->mmio.shared_page = (struct gvt_shared_page *) __get_free_pages( + GFP_KERNEL, 0); + if (!vgpu->mmio.shared_page) { + intel_gvt_free_vreg(vgpu); + vgpu->mmio.vreg = NULL; + return -ENOMEM; + } intel_vgpu_reset_mmio(vgpu, true); @@ -313,6 +348,8 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu) */ void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu) { - vfree(vgpu->mmio.vreg); - vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; + vfree(vgpu->mmio.sreg); + intel_gvt_free_vreg(vgpu); + free_pages((unsigned long) vgpu->mmio.shared_page, 0); + vgpu->mmio.vreg = vgpu->mmio.sreg = vgpu->mmio.shared_page = NULL; } diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index e872f4847fbe0..4e99cd0e4fbe3 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -144,7 +144,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { {RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */ {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */ - {RCS, GEN9_CSFE_CHICKEN1_RCS, 0x0, false}, /* 0x20d4 */ + {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */ {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ @@ -171,6 +171,8 @@ static void load_render_mocs(struct drm_i915_private *dev_priv) int ring_id, i; for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) { + if (!HAS_ENGINE(dev_priv, ring_id)) + continue; offset.reg = regs[ring_id]; for (i = 0; i < GEN9_MOCS_SIZE; i++) { gen9_render_mocs.control_table[ring_id][i] = @@ -567,6 +569,86 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre, intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } +#define MMIO_COMPARE(vgpu, reg, mask) ({ \ + int ret; \ + u32 value = vgpu_vreg(vgpu, reg); \ + u32 host_value = gvt_host_reg(vgpu->gvt, reg); \ + \ + if (mask) { \ + value &= mask; \ + host_value &= mask; \ + } \ + if (host_value == value) { \ + ret = 0; \ + } else { \ + gvt_err("vgpu%d unconformance mmio 0x%x:0x%x,0x%x\n", \ + vgpu->id, reg, \ + vgpu_vreg(vgpu, reg), \ + gvt_host_reg(vgpu->gvt, reg)); \ + ret = -EINVAL; \ + } \ + ret; \ + }) + +static int noncontext_mmio_compare(struct intel_vgpu *vgpu, int ring_id) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct engine_mmio *mmio, *mmio_list; + struct intel_engine_cs *engine = dev_priv->engine[ring_id]; + + mmio_list = vgpu->gvt->engine_mmio_list.mmio; + + for (mmio = mmio_list; i915_mmio_reg_valid(mmio->reg); mmio++) { + if (mmio->ring_id != ring_id || mmio->in_context + || is_force_nonpriv_mmio(mmio->reg.reg) + || mmio->reg.reg == RING_MODE_GEN7(engine).reg) + continue; + + if (MMIO_COMPARE(vgpu, mmio->reg.reg, mmio->mask)) + return -EINVAL; + } + + return 0; +} + +static void get_host_mmio_snapshot(struct intel_gvt *gvt) +{ + struct drm_i915_private *dev_priv = gvt->dev_priv; + struct engine_mmio *mmio, *mmio_list; + + mmio_list = gvt->engine_mmio_list.mmio; + + if (!gvt->mmio.host_cache_initialized) { + /* Snapshot all the non-context MMIOs */ + for (mmio = mmio_list; i915_mmio_reg_valid(mmio->reg); mmio++) { + if (mmio->in_context) + continue; + + gvt_host_reg(gvt, mmio->reg.reg) = + I915_READ_FW(mmio->reg); + if (mmio->mask) + gvt_host_reg(gvt, mmio->reg.reg) &= mmio->mask; + } + gvt->mmio.host_cache_initialized = true; + } +} + +int intel_gvt_vgpu_conformance_check(struct intel_vgpu *vgpu, int ring_id) +{ + int ret; + + get_host_mmio_snapshot(vgpu->gvt); + + ret = noncontext_mmio_compare(vgpu, ring_id); + if (ret) + goto err; + + return 0; +err: + return ret; +} + + /** * intel_gvt_init_engine_mmio_context - Initiate the engine mmio list * @gvt: GVT device @@ -588,6 +670,8 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt) if (mmio->in_context) { gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++; intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg); + } else { + intel_gvt_mmio_set_non_context(gvt, mmio->reg.reg); } } } diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h index 5c3b9ff9f96aa..b5059de42af94 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.h +++ b/drivers/gpu/drm/i915/gvt/mmio_context.h @@ -54,4 +54,5 @@ bool is_inhibit_context(struct intel_context *ce); int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu, struct i915_request *req); +int intel_gvt_vgpu_conformance_check(struct intel_vgpu *vgpu, int ring_id); #endif diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 67f19992b226f..14fa2ea047fa4 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -45,6 +45,9 @@ /** * intel_gvt_hypervisor_host_init - init GVT-g host side + * @dev: i915 device + * @gvt: GVT device + * @ops: intel_gvt_ops interface * * Returns: * Zero on success, negative error code if failed @@ -61,6 +64,8 @@ static inline int intel_gvt_hypervisor_host_init(struct device *dev, /** * intel_gvt_hypervisor_host_exit - exit GVT-g host side + * @dev: i915 device + * @gvt: GVT device */ static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt) @@ -75,6 +80,7 @@ static inline void intel_gvt_hypervisor_host_exit(struct device *dev, /** * intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU * related stuffs inside hypervisor. + * @vgpu: a vGPU * * Returns: * Zero on success, negative error code if failed. @@ -91,6 +97,7 @@ static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu) /** * intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU * related stuffs inside hypervisor. + * @vgpu: a vGPU * * Returns: * Zero on success, negative error code if failed. @@ -111,6 +118,7 @@ static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu) /** * intel_gvt_hypervisor_inject_msi - inject a MSI interrupt into vGPU + * @vgpu: a vGPU * * Returns: * Zero on success, negative error code if failed. @@ -142,7 +150,7 @@ static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu) } /** - * intel_gvt_hypervisor_set_wp_page - translate a host VA into MFN + * intel_gvt_hypervisor_virt_to_mfn - translate a host VA into MFN * @p: host kernel virtual address * * Returns: @@ -216,7 +224,7 @@ static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu, /** * intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN * @vgpu: a vGPU - * @gpfn: guest pfn + * @gfn: guest pfn * * Returns: * MFN on success, INTEL_GVT_INVALID_ADDR if failed. @@ -300,6 +308,27 @@ static inline int intel_gvt_hypervisor_set_trap_area( return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map); } +/** + * intel_gvt_hypervisor_set_pvmmio - Set the pvmmio area + * @vgpu: a vGPU + * @start: the beginning of the guest physical address region + * @end: the end of the guest physical address region + * @map: map or unmap + * + * Returns: + * Zero on success, negative error code if failed. + */ +static inline int intel_gvt_hypervisor_set_pvmmio( + struct intel_vgpu *vgpu, u64 start, u64 end, bool map) +{ + /* a MPT implementation could have MMIO trapped elsewhere */ + if (!intel_gvt_host.mpt->set_pvmmio) + return -ENOENT; + + return intel_gvt_host.mpt->set_pvmmio(vgpu->handle, start, end, map); +} + + /** * intel_gvt_hypervisor_set_opregion - Set opregion for guest * @vgpu: a vGPU @@ -362,4 +391,21 @@ static inline bool intel_gvt_hypervisor_is_valid_gfn( return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn); } +/** + * intel_gvt_hypervisor_dom0_ready - Signal Dom 0 is ready for Dom U + * + * It's to raise a uevent to notify Dom 0 is ready to start a Dom U, so that + * Dom U can be started as early as possible + * + * Returns: + * Zero on success, negative error code if failed + */ +static inline int intel_gvt_hypervisor_dom0_ready(void) +{ + if (!intel_gvt_host.mpt->dom0_ready) + return 0; + + return intel_gvt_host.mpt->dom0_ready(); +} + #endif /* _GVT_MPT_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h index d4f7ce6dc1d73..b55fc82027e06 100644 --- a/drivers/gpu/drm/i915/gvt/reg.h +++ b/drivers/gpu/drm/i915/gvt/reg.h @@ -57,8 +57,15 @@ #define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B) -#define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100) -#define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100) +#define _REG_701AC(pipe, plane) (0x701ac + pipe * 0x1000 + plane * 0x100) + +#define SKL_PS_REG_TO_PIPE(reg) (((reg) >> 11) & 0x3) +#define SKL_PS_REG_TO_SCALER(reg) (((reg) >> 8) & 0x3) +#define SKL_PS_REG_VALUE_TO_PLANE(val) (((val) >> 25) & 0x7) + +#define SKL_PLANE_REG_TO_PIPE(reg) (((reg) >> 12) & 0x3) +#define SKL_PLANE_REG_TO_PLANE(reg) ((((reg) & 0xFFF) - 0x180) >> 8) +#define SKL_FLIP_EVENT(pipe, plane) (PRIMARY_A_FLIP_DONE + (plane)*3 + pipe) #define GFX_MODE_BIT_SET_IN_MASK(val, bit) \ ((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16)))) diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index c32e7d5e86291..f5127e07570b5 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -34,15 +34,11 @@ #include "i915_drv.h" #include "gvt.h" -static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) +static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu, + enum intel_engine_id ring_id) { - enum intel_engine_id i; - struct intel_engine_cs *engine; - - for_each_engine(engine, vgpu->gvt->dev_priv, i) { - if (!list_empty(workload_q_head(vgpu, i))) - return true; - } + if (!list_empty(workload_q_head(vgpu, ring_id))) + return true; return false; } @@ -68,11 +64,12 @@ struct gvt_sched_data { struct intel_gvt *gvt; struct hrtimer timer; unsigned long period; - struct list_head lru_runq_head; + struct list_head lru_runq_head[I915_NUM_ENGINES]; ktime_t expire_time; }; -static void vgpu_update_timeslice(struct intel_vgpu *vgpu, ktime_t cur_time) +static void vgpu_update_timeslice(struct intel_vgpu *vgpu, ktime_t cur_time, + enum intel_engine_id ring_id) { ktime_t delta_ts; struct vgpu_sched_data *vgpu_data; @@ -80,7 +77,7 @@ static void vgpu_update_timeslice(struct intel_vgpu *vgpu, ktime_t cur_time) if (!vgpu || vgpu == vgpu->gvt->idle_vgpu) return; - vgpu_data = vgpu->sched_data; + vgpu_data = vgpu->sched_data[ring_id]; delta_ts = ktime_sub(cur_time, vgpu_data->sched_in_time); vgpu_data->sched_time = ktime_add(vgpu_data->sched_time, delta_ts); vgpu_data->left_ts = ktime_sub(vgpu_data->left_ts, delta_ts); @@ -90,12 +87,13 @@ static void vgpu_update_timeslice(struct intel_vgpu *vgpu, ktime_t cur_time) #define GVT_TS_BALANCE_PERIOD_MS 100 #define GVT_TS_BALANCE_STAGE_NUM 10 -static void gvt_balance_timeslice(struct gvt_sched_data *sched_data) +static void gvt_balance_timeslice(struct gvt_sched_data *sched_data, + enum intel_engine_id ring_id) { struct vgpu_sched_data *vgpu_data; struct list_head *pos; - static uint64_t stage_check; - int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM; + static uint64_t stage_check[I915_NUM_ENGINES]; + int stage = stage_check[ring_id]++ % GVT_TS_BALANCE_STAGE_NUM; /* The timeslice accumulation reset at stage 0, which is * allocated again without adding previous debt. @@ -104,12 +102,12 @@ static void gvt_balance_timeslice(struct gvt_sched_data *sched_data) int total_weight = 0; ktime_t fair_timeslice; - list_for_each(pos, &sched_data->lru_runq_head) { + list_for_each(pos, &sched_data->lru_runq_head[ring_id]) { vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); total_weight += vgpu_data->sched_ctl.weight; } - list_for_each(pos, &sched_data->lru_runq_head) { + list_for_each(pos, &sched_data->lru_runq_head[ring_id]) { vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); fair_timeslice = ktime_divns(ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS), total_weight) * vgpu_data->sched_ctl.weight; @@ -118,7 +116,7 @@ static void gvt_balance_timeslice(struct gvt_sched_data *sched_data) vgpu_data->left_ts = vgpu_data->allocated_ts; } } else { - list_for_each(pos, &sched_data->lru_runq_head) { + list_for_each(pos, &sched_data->lru_runq_head[ring_id]) { vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); /* timeslice for next 100ms should add the left/debt @@ -129,62 +127,63 @@ static void gvt_balance_timeslice(struct gvt_sched_data *sched_data) } } -static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) +static void try_to_schedule_next_vgpu(struct intel_gvt *gvt, + enum intel_engine_id ring_id) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; - enum intel_engine_id i; - struct intel_engine_cs *engine; struct vgpu_sched_data *vgpu_data; ktime_t cur_time; /* no need to schedule if next_vgpu is the same with current_vgpu, * let scheduler chose next_vgpu again by setting it to NULL. */ - if (scheduler->next_vgpu == scheduler->current_vgpu) { - scheduler->next_vgpu = NULL; + if (scheduler->next_vgpu[ring_id] == + scheduler->current_vgpu[ring_id]) { + scheduler->next_vgpu[ring_id] = NULL; return; } + /* no target to schedule */ + if (!scheduler->next_vgpu[ring_id]) + return; /* * after the flag is set, workload dispatch thread will * stop dispatching workload for current vgpu */ - scheduler->need_reschedule = true; + scheduler->need_reschedule[ring_id] = true; /* still have uncompleted workload? */ - for_each_engine(engine, gvt->dev_priv, i) { - if (scheduler->current_workload[i]) - return; - } + if (scheduler->current_workload[ring_id]) + return; cur_time = ktime_get(); - vgpu_update_timeslice(scheduler->current_vgpu, cur_time); - vgpu_data = scheduler->next_vgpu->sched_data; + vgpu_update_timeslice(scheduler->current_vgpu[ring_id], cur_time, ring_id); + vgpu_data = scheduler->next_vgpu[ring_id]->sched_data[ring_id]; vgpu_data->sched_in_time = cur_time; /* switch current vgpu */ - scheduler->current_vgpu = scheduler->next_vgpu; - scheduler->next_vgpu = NULL; + scheduler->current_vgpu[ring_id] = scheduler->next_vgpu[ring_id]; + scheduler->next_vgpu[ring_id] = NULL; - scheduler->need_reschedule = false; + scheduler->need_reschedule[ring_id] = false; /* wake up workload dispatch thread */ - for_each_engine(engine, gvt->dev_priv, i) - wake_up(&scheduler->waitq[i]); + wake_up(&scheduler->waitq[ring_id]); } -static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data) +static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data, + enum intel_engine_id ring_id) { struct vgpu_sched_data *vgpu_data; struct intel_vgpu *vgpu = NULL; - struct list_head *head = &sched_data->lru_runq_head; + struct list_head *head = &sched_data->lru_runq_head[ring_id]; struct list_head *pos; /* search a vgpu with pending workload */ list_for_each(pos, head) { vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); - if (!vgpu_has_pending_workload(vgpu_data->vgpu)) + if (!vgpu_has_pending_workload(vgpu_data->vgpu, ring_id)) continue; if (vgpu_data->pri_sched) { @@ -208,7 +207,8 @@ static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data) /* in nanosecond */ #define GVT_DEFAULT_TIME_SLICE 1000000 -static void tbs_sched_func(struct gvt_sched_data *sched_data) +static void tbs_sched_func(struct gvt_sched_data *sched_data, + enum intel_engine_id ring_id) { struct intel_gvt *gvt = sched_data->gvt; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; @@ -216,31 +216,34 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data) struct intel_vgpu *vgpu = NULL; /* no active vgpu or has already had a target */ - if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu) + if (list_empty(&sched_data->lru_runq_head[ring_id]) + || scheduler->next_vgpu[ring_id]) goto out; - vgpu = find_busy_vgpu(sched_data); + vgpu = find_busy_vgpu(sched_data, ring_id); if (vgpu) { - scheduler->next_vgpu = vgpu; - vgpu_data = vgpu->sched_data; + scheduler->next_vgpu[ring_id] = vgpu; + vgpu_data = vgpu->sched_data[ring_id]; if (!vgpu_data->pri_sched) { /* Move the last used vGPU to the tail of lru_list */ list_del_init(&vgpu_data->lru_list); list_add_tail(&vgpu_data->lru_list, - &sched_data->lru_runq_head); + &sched_data->lru_runq_head[ring_id]); } } else { - scheduler->next_vgpu = gvt->idle_vgpu; + scheduler->next_vgpu[ring_id] = gvt->idle_vgpu; } out: - if (scheduler->next_vgpu) - try_to_schedule_next_vgpu(gvt); + if (scheduler->next_vgpu[ring_id]) + try_to_schedule_next_vgpu(gvt, ring_id); } void intel_gvt_schedule(struct intel_gvt *gvt) { struct gvt_sched_data *sched_data = gvt->scheduler.sched_data; ktime_t cur_time; + enum intel_engine_id i; + struct intel_engine_cs *engine; mutex_lock(&gvt->sched_lock); cur_time = ktime_get(); @@ -248,15 +251,19 @@ void intel_gvt_schedule(struct intel_gvt *gvt) if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED, (void *)&gvt->service_request)) { if (cur_time >= sched_data->expire_time) { - gvt_balance_timeslice(sched_data); + for_each_engine(engine, gvt->dev_priv, i) + gvt_balance_timeslice(sched_data, i); sched_data->expire_time = ktime_add_ms( cur_time, GVT_TS_BALANCE_PERIOD_MS); } } clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request); - vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time); - tbs_sched_func(sched_data); + for_each_engine(engine, gvt->dev_priv, i) { + vgpu_update_timeslice(gvt->scheduler.current_vgpu[i], + cur_time, i); + tbs_sched_func(sched_data, i); + } mutex_unlock(&gvt->sched_lock); } @@ -276,6 +283,9 @@ static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data) static int tbs_sched_init(struct intel_gvt *gvt) { + enum intel_engine_id i; + struct intel_engine_cs *engine; + struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; @@ -285,7 +295,9 @@ static int tbs_sched_init(struct intel_gvt *gvt) if (!data) return -ENOMEM; - INIT_LIST_HEAD(&data->lru_runq_head); + for_each_engine(engine, gvt->dev_priv, i) + INIT_LIST_HEAD(&data->lru_runq_head[i]); + hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); data->timer.function = tbs_timer_fn; data->period = GVT_DEFAULT_TIME_SLICE; @@ -311,18 +323,29 @@ static void tbs_sched_clean(struct intel_gvt *gvt) static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu) { struct vgpu_sched_data *data; + enum intel_engine_id i; + struct intel_engine_cs *engine; - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto err; - data->sched_ctl.weight = vgpu->sched_ctl.weight; - data->vgpu = vgpu; - INIT_LIST_HEAD(&data->lru_list); + data->sched_ctl.weight = vgpu->sched_ctl.weight; + data->vgpu = vgpu; + INIT_LIST_HEAD(&data->lru_list); - vgpu->sched_data = data; + vgpu->sched_data[i] = data; + } return 0; + +err: + for (; i >= 0; i--) { + kfree(vgpu->sched_data[i]); + vgpu->sched_data[i] = NULL; + } + return -ENOMEM; } static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) @@ -330,8 +353,13 @@ static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) struct intel_gvt *gvt = vgpu->gvt; struct gvt_sched_data *sched_data = gvt->scheduler.sched_data; - kfree(vgpu->sched_data); - vgpu->sched_data = NULL; + enum intel_engine_id i; + struct intel_engine_cs *engine; + + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + kfree(vgpu->sched_data[i]); + vgpu->sched_data[i] = NULL; + } /* this vgpu id has been removed */ if (idr_is_empty(&gvt->vgpu_idr)) @@ -341,31 +369,42 @@ static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) { struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data; - struct vgpu_sched_data *vgpu_data = vgpu->sched_data; ktime_t now; + struct vgpu_sched_data *vgpu_data; + enum intel_engine_id i; + struct intel_engine_cs *engine; - if (!list_empty(&vgpu_data->lru_list)) - return; + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + vgpu_data = vgpu->sched_data[i]; + if (!list_empty(&vgpu_data->lru_list)) + continue; - now = ktime_get(); - vgpu_data->pri_time = ktime_add(now, + now = ktime_get(); + vgpu_data->pri_time = ktime_add(now, ktime_set(GVT_SCHED_VGPU_PRI_TIME, 0)); - vgpu_data->pri_sched = true; + vgpu_data->pri_sched = true; - list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head); + list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head[i]); + vgpu_data->active = true; + } if (!hrtimer_active(&sched_data->timer)) hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(), sched_data->period), HRTIMER_MODE_ABS); - vgpu_data->active = true; } static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) { - struct vgpu_sched_data *vgpu_data = vgpu->sched_data; + struct vgpu_sched_data *vgpu_data; + enum intel_engine_id i; + struct intel_engine_cs *engine; + + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + vgpu_data = vgpu->sched_data[i]; - list_del_init(&vgpu_data->lru_list); - vgpu_data->active = false; + list_del_init(&vgpu_data->lru_list); + vgpu_data->active = false; + } } static struct intel_gvt_sched_policy_ops tbs_schedule_ops = { @@ -423,10 +462,16 @@ void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu) void intel_vgpu_start_schedule(struct intel_vgpu *vgpu) { - struct vgpu_sched_data *vgpu_data = vgpu->sched_data; + struct vgpu_sched_data *vgpu_data; + struct intel_engine_cs *engine; + enum intel_engine_id i; mutex_lock(&vgpu->gvt->sched_lock); - if (!vgpu_data->active) { + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + vgpu_data = vgpu->sched_data[i]; + if (vgpu_data->active) + continue; + gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id); vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu); } @@ -444,36 +489,27 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) { struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler; - int ring_id; - struct vgpu_sched_data *vgpu_data = vgpu->sched_data; - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - - if (!vgpu_data->active) - return; + struct vgpu_sched_data *vgpu_data; + enum intel_engine_id i; + struct intel_engine_cs *engine; gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); mutex_lock(&vgpu->gvt->sched_lock); scheduler->sched_ops->stop_schedule(vgpu); - if (scheduler->next_vgpu == vgpu) - scheduler->next_vgpu = NULL; + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + vgpu_data = vgpu->sched_data[i]; - if (scheduler->current_vgpu == vgpu) { - /* stop workload dispatching */ - scheduler->need_reschedule = true; - scheduler->current_vgpu = NULL; - } + if (scheduler->next_vgpu[i] == vgpu) + scheduler->next_vgpu[i] = NULL; - intel_runtime_pm_get(dev_priv); - spin_lock_bh(&scheduler->mmio_context_lock); - for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { - if (scheduler->engine_owner[ring_id] == vgpu) { - intel_gvt_switch_mmio(vgpu, NULL, ring_id); - scheduler->engine_owner[ring_id] = NULL; + if (scheduler->current_vgpu[i] == vgpu) { + /* stop workload dispatching */ + scheduler->need_reschedule[i] = true; + scheduler->current_vgpu[i] = NULL; } } - spin_unlock_bh(&scheduler->mmio_context_lock); - intel_runtime_pm_put(dev_priv); + mutex_unlock(&vgpu->gvt->sched_lock); } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 43aa058e29fca..87944a44cb51e 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -41,6 +41,8 @@ #define RING_CTX_OFF(x) \ offsetof(struct execlist_ring_context, x) +bool gvt_shadow_wa_ctx = false; + static void set_context_pdp_root_pointer( struct execlist_ring_context *ring_context, u32 pdp[8]) @@ -119,6 +121,7 @@ static void sr_oa_regs(struct intel_vgpu_workload *workload, } } +static bool enable_lazy_shadow_ctx = true; static int populate_shadow_context(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; @@ -130,6 +133,10 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) struct page *page; void *dst; unsigned long context_gpa, context_page_num; + struct drm_i915_private *dev_priv = gvt->dev_priv; + struct i915_ggtt *ggtt = &gvt->dev_priv->ggtt; + dma_addr_t addr; + gen8_pte_t __iomem *pte; int i; gvt_dbg_sched("ring id %d workload lrca %x", ring_id, @@ -143,6 +150,18 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) context_page_num = 19; i = 2; +#ifdef CONFIG_INTEL_IOMMU + /* + * In case IOMMU for graphics is turned on, we don't want to + * turn on lazy shadow context feature because it will touch + * GGTT entries which require a BKL and since this is a + * performance enhancement feature, we will end up negating + * the performance. + */ + if(intel_iommu_gfx_mapped) { + enable_lazy_shadow_ctx = false; + } +#endif while (i < context_page_num) { context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, @@ -153,14 +172,41 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) return -EFAULT; } - page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i); - dst = kmap(page); - intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, + if (!enable_lazy_shadow_ctx) { + page = i915_gem_object_get_page(ctx_obj, + LRC_PPHWSP_PN + i); + dst = kmap(page); + intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, I915_GTT_PAGE_SIZE); - kunmap(page); + kunmap(page); + } else { + unsigned long mfn; + struct i915_gem_context *shadow_ctx = + workload->vgpu->submission.shadow_ctx; + + addr = i915_ggtt_offset( + shadow_ctx->__engine[ring_id].state) + + (LRC_PPHWSP_PN + i) * PAGE_SIZE; + pte = (gen8_pte_t __iomem *)ggtt->gsm + + (addr >> PAGE_SHIFT); + + mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, + context_gpa >> 12); + if (mfn == INTEL_GVT_INVALID_ADDR) { + gvt_vgpu_err("fail to translate gfn during context shadow\n"); + return -ENXIO; + } + + mfn <<= 12; + mfn |= _PAGE_PRESENT | _PAGE_RW | PPAT_CACHED; + writeq(mfn, pte); + } i++; } + I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); + POSTING_READ(GFX_FLSH_CNTL_GEN6); + page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); shadow_ring_context = kmap(page); @@ -203,6 +249,7 @@ static inline bool is_gvt_request(struct i915_request *req) return i915_gem_context_force_single_submission(req->gem_context); } +/* static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; @@ -216,6 +263,24 @@ static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id) reg = RING_ACTHD_UDW(ring_base); vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg); } +*/ + +static void active_hp_work(struct work_struct *work) +{ + struct intel_gvt *gvt = + container_of(work, struct intel_gvt, active_hp_work); + struct drm_i915_private *dev_priv = gvt->dev_priv; + u8 freq = dev_priv->gt_pm.rps.rp0_freq; + + if (IS_BROXTON(dev_priv)) + freq = intel_freq_opcode(dev_priv, 600); + + if (READ_ONCE(dev_priv->gt_pm.rps.cur_freq) < freq) { + mutex_lock(&dev_priv->pcu_lock); + intel_set_rps(dev_priv, freq); + mutex_unlock(&dev_priv->pcu_lock); + } +} static int shadow_context_status_change(struct notifier_block *nb, unsigned long action, void *data) @@ -226,21 +291,9 @@ static int shadow_context_status_change(struct notifier_block *nb, struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; enum intel_engine_id ring_id = req->engine->id; struct intel_vgpu_workload *workload; - unsigned long flags; - - if (!is_gvt_request(req)) { - spin_lock_irqsave(&scheduler->mmio_context_lock, flags); - if (action == INTEL_CONTEXT_SCHEDULE_IN && - scheduler->engine_owner[ring_id]) { - /* Switch ring from vGPU to host. */ - intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], - NULL, ring_id); - scheduler->engine_owner[ring_id] = NULL; - } - spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags); + if (!is_gvt_request(req)) return NOTIFY_OK; - } workload = scheduler->current_workload[ring_id]; if (unlikely(!workload)) @@ -248,25 +301,14 @@ static int shadow_context_status_change(struct notifier_block *nb, switch (action) { case INTEL_CONTEXT_SCHEDULE_IN: - spin_lock_irqsave(&scheduler->mmio_context_lock, flags); - if (workload->vgpu != scheduler->engine_owner[ring_id]) { - /* Switch ring from host to vGPU or vGPU to vGPU. */ - intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], - workload->vgpu, ring_id); - scheduler->engine_owner[ring_id] = workload->vgpu; - } else - gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n", - ring_id, workload->vgpu->id); - spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags); + schedule_work(&gvt->active_hp_work); atomic_set(&workload->shadow_ctx_active, 1); break; case INTEL_CONTEXT_SCHEDULE_OUT: - save_ring_hw_state(workload->vgpu, ring_id); atomic_set(&workload->shadow_ctx_active, 0); break; case INTEL_CONTEXT_SCHEDULE_PREEMPTED: - save_ring_hw_state(workload->vgpu, ring_id); - break; + return NOTIFY_OK; default: WARN_ON(1); return NOTIFY_OK; @@ -381,7 +423,8 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) goto err_unpin; if ((workload->ring_id == RCS) && - (workload->wa_ctx.indirect_ctx.size != 0)) { + (workload->wa_ctx.indirect_ctx.size != 0) + && gvt_shadow_wa_ctx) { ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); if (ret) goto err_shadow; @@ -410,6 +453,47 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) return ret; } +static void gen8_shadow_pid_cid(struct intel_vgpu_workload *workload) +{ + int ring_id = workload->ring_id; + struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; + struct intel_engine_cs *engine = dev_priv->engine[ring_id]; + u32 *cs; + + /* Copy the PID and CID from the guest's HWS page to the host's one */ + cs = intel_ring_begin(workload->req, 16); + *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; + *cs++ = i915_mmio_reg_offset(NOPID); + *cs++ = (workload->ctx_desc.lrca << I915_GTT_PAGE_SHIFT) + + I915_GEM_HWS_PID_ADDR; + *cs++ = 0; + *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; + *cs++ = i915_mmio_reg_offset(NOPID); + *cs++ = engine->status_page.ggtt_offset + I915_GEM_HWS_PID_ADDR + + (workload->vgpu->id << MI_STORE_DWORD_INDEX_SHIFT); + *cs++ = 0; + *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; + *cs++ = i915_mmio_reg_offset(NOPID); + *cs++ = (workload->ctx_desc.lrca << I915_GTT_PAGE_SHIFT) + + I915_GEM_HWS_CID_ADDR; + *cs++ = 0; + *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; + *cs++ = i915_mmio_reg_offset(NOPID); + *cs++ = engine->status_page.ggtt_offset + I915_GEM_HWS_CID_ADDR + + (workload->vgpu->id << MI_STORE_DWORD_INDEX_SHIFT); + *cs++ = 0; + intel_ring_advance(workload->req, cs); +} + +static int sanitize_priority(int priority) +{ + if (priority > I915_CONTEXT_MAX_USER_PRIORITY) + return I915_CONTEXT_MAX_USER_PRIORITY; + else if (priority < I915_CONTEXT_MIN_USER_PRIORITY) + return I915_CONTEXT_MIN_USER_PRIORITY; + return priority; +} + static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload); static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) @@ -594,22 +678,34 @@ static int prepare_workload(struct intel_vgpu_workload *workload) goto err_unpin_mm; } + /* we consider this as an workaround to avoid the situation that + * PDP's not updated, and right now we only limit it to BXT platform + * since it's not reported on the other platforms + */ + if (IS_BROXTON(vgpu->gvt->dev_priv)) { + gvt_emit_pdps(workload); + } + ret = copy_workload_to_ring_buffer(workload); if (ret) { gvt_vgpu_err("fail to generate request\n"); goto err_unpin_mm; } + gen8_shadow_pid_cid(workload); + ret = prepare_shadow_batch_buffer(workload); if (ret) { gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n"); goto err_unpin_mm; } - ret = prepare_shadow_wa_ctx(&workload->wa_ctx); - if (ret) { - gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n"); - goto err_shadow_batch; + if (gvt_shadow_wa_ctx) { + ret = prepare_shadow_wa_ctx(&workload->wa_ctx); + if (ret) { + gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n"); + goto err_shadow_batch; + } } if (workload->prepare) { @@ -632,6 +728,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct intel_vgpu_submission *s = &vgpu->submission; int ring_id = workload->ring_id; int ret; @@ -642,11 +739,17 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) mutex_lock(&dev_priv->drm.struct_mutex); ret = intel_gvt_scan_and_shadow_workload(workload); + + if (i915_modparams.enable_conformance_check + && intel_gvt_vgpu_conformance_check(vgpu, ring_id)) + gvt_err("vgpu%d unconformance guest detected\n", vgpu->id); + if (ret) goto out; ret = prepare_workload(workload); + workload->guilty_count = atomic_read(&workload->req->gem_context->guilty_count); out: if (ret) workload->status = ret; @@ -654,6 +757,8 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) if (!IS_ERR_OR_NULL(workload->req)) { gvt_dbg_sched("ring id %d submit workload to i915 %p\n", ring_id, workload->req); + s->shadow_ctx->sched.priority = i915_modparams.gvt_workload_priority = + sanitize_priority(i915_modparams.gvt_workload_priority); i915_request_add(workload->req); workload->dispatched = true; } @@ -675,17 +780,18 @@ static struct intel_vgpu_workload *pick_next_workload( * no current vgpu / will be scheduled out / no workload * bail out */ - if (!scheduler->current_vgpu) { + if (!scheduler->current_vgpu[ring_id]) { gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id); goto out; } - if (scheduler->need_reschedule) { + if (scheduler->need_reschedule[ring_id]) { gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id); goto out; } - if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) + if (list_empty(workload_q_head(scheduler->current_vgpu[ring_id], + ring_id))) goto out; /* @@ -706,7 +812,8 @@ static struct intel_vgpu_workload *pick_next_workload( * schedule out a vgpu. */ scheduler->current_workload[ring_id] = container_of( - workload_q_head(scheduler->current_vgpu, ring_id)->next, + workload_q_head(scheduler->current_vgpu[ring_id], + ring_id)->next, struct intel_vgpu_workload, list); workload = scheduler->current_workload[ring_id]; @@ -734,29 +841,31 @@ static void update_guest_context(struct intel_vgpu_workload *workload) gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id, workload->ctx_desc.lrca); - context_page_num = rq->engine->context_size; - context_page_num = context_page_num >> PAGE_SHIFT; + if (!enable_lazy_shadow_ctx) { + context_page_num = rq->engine->context_size; + context_page_num = context_page_num >> PAGE_SHIFT; - if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS) - context_page_num = 19; + if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS) + context_page_num = 19; - i = 2; + i = 2; - while (i < context_page_num) { - context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, - (u32)((workload->ctx_desc.lrca + i) << - I915_GTT_PAGE_SHIFT)); - if (context_gpa == INTEL_GVT_INVALID_ADDR) { - gvt_vgpu_err("invalid guest context descriptor\n"); - return; - } + while (i < context_page_num) { + context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, + (u32)((workload->ctx_desc.lrca + i) << + I915_GTT_PAGE_SHIFT)); + if (context_gpa == INTEL_GVT_INVALID_ADDR) { + gvt_vgpu_err("invalid guest context descriptor\n"); + return; + } - page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i); - src = kmap(page); - intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src, - I915_GTT_PAGE_SIZE); - kunmap(page); - i++; + page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i); + src = kmap(page); + intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src, + I915_GTT_PAGE_SIZE); + kunmap(page); + i++; + } } intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + @@ -861,9 +970,13 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) list_del_init(&workload->list); + if (workload->status == -EIO) + intel_vgpu_reset_submission(vgpu, 1 << ring_id); + if (!workload->status) { release_shadow_batch_buffer(workload); - release_shadow_wa_ctx(&workload->wa_ctx); + if(gvt_shadow_wa_ctx) + release_shadow_wa_ctx(&workload->wa_ctx); } if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { @@ -888,13 +1001,29 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) atomic_dec(&s->running_workload_num); wake_up(&scheduler->workload_complete_wq); - if (gvt->scheduler.need_reschedule) + if (gvt->scheduler.need_reschedule[ring_id]) intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED); mutex_unlock(&gvt->sched_lock); mutex_unlock(&vgpu->vgpu_lock); } +static void inject_error_cs_irq(struct intel_vgpu *vgpu, int ring_id) +{ + enum intel_gvt_event_type events[] = { + [RCS] = RCS_CMD_STREAMER_ERR, + [BCS] = BCS_CMD_STREAMER_ERR, + [VCS] = VCS_CMD_STREAMER_ERR, + [VCS2] = VCS2_CMD_STREAMER_ERR, + [VECS] = VECS_CMD_STREAMER_ERR, + }; + + if (unlikely(events[ring_id] == 0)) + return; + + intel_vgpu_trigger_virtual_event(vgpu, events[ring_id]); +} + struct workload_thread_param { struct intel_gvt *gvt; int ring_id; @@ -909,6 +1038,7 @@ static int workload_thread(void *priv) struct intel_vgpu_workload *workload = NULL; struct intel_vgpu *vgpu = NULL; int ret; + long lret; bool need_force_wake = IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv) || IS_BROXTON(gvt->dev_priv); @@ -955,7 +1085,24 @@ static int workload_thread(void *priv) gvt_dbg_sched("ring id %d wait workload %p\n", workload->ring_id, workload); - i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT); + lret = i915_request_wait(workload->req, 0, + MAX_SCHEDULE_TIMEOUT); + + gvt_dbg_sched("i915_wait_request %p returns %ld\n", + workload, lret); + if (lret >= 0 && workload->status == -EINPROGRESS) + workload->status = 0; + + /* + * increased guilty_count means that this request triggerred + * a GPU reset, so we need to notify the guest about the + * hang. + */ + if (workload->guilty_count < + atomic_read(&workload->req->gem_context->guilty_count)) { + workload->status = -EIO; + inject_error_cs_irq(workload->vgpu, ring_id); + } complete: gvt_dbg_sched("will complete workload %p, status: %d\n", @@ -1041,6 +1188,8 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt) atomic_notifier_chain_register(&engine->context_status_notifier, &gvt->shadow_ctx_notifier_block[i]); } + INIT_WORK(&gvt->active_hp_work, active_hp_work); + return 0; err: intel_gvt_clean_workload_scheduler(gvt); @@ -1108,6 +1257,10 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) if (IS_ERR(s->shadow_ctx)) return PTR_ERR(s->shadow_ctx); + if (!s->shadow_ctx->name) { + s->shadow_ctx->name = kasprintf(GFP_KERNEL, "Shadow Context %d", vgpu->id); + } + bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES); s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload", diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index ca5529d0e48ef..3cec02d2ac1a5 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -37,10 +37,10 @@ #define _GVT_SCHEDULER_H_ struct intel_gvt_workload_scheduler { - struct intel_vgpu *current_vgpu; - struct intel_vgpu *next_vgpu; + struct intel_vgpu *current_vgpu[I915_NUM_ENGINES]; + struct intel_vgpu *next_vgpu[I915_NUM_ENGINES]; struct intel_vgpu_workload *current_workload[I915_NUM_ENGINES]; - bool need_reschedule; + bool need_reschedule[I915_NUM_ENGINES]; spinlock_t mmio_context_lock; /* can be null when owner is host */ @@ -84,6 +84,7 @@ struct intel_vgpu_workload { /* if this workload has been dispatched to i915? */ bool dispatched; int status; + unsigned int guilty_count; struct intel_vgpu_mm *shadow_mm; diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index c628be05fbfe9..5411b402f7285 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -37,6 +37,11 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) { + enum pipe pipe; + int scaler; + struct intel_gvt *gvt = vgpu->gvt; + struct drm_i915_private *dev_priv = gvt->dev_priv; + /* setup the ballooning information */ vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC; vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1; @@ -62,6 +67,16 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot)) = UINT_MAX; vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)) = UINT_MAX; + vgpu_vreg_t(vgpu, vgtif_reg(scaler_owned)) = 0; + for_each_pipe(dev_priv, pipe) + for_each_universal_scaler(dev_priv, pipe, scaler) + if (gvt->pipe_info[pipe].scaler_owner[scaler] == + vgpu->id) + vgpu_vreg_t(vgpu, vgtif_reg(scaler_owned)) |= + 1 << (pipe * SKL_NUM_SCALERS + scaler); + + vgpu_vreg_t(vgpu, vgtif_reg(enable_pvmmio)) = 0; + gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id); gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n", vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu)); @@ -102,6 +117,8 @@ static struct { * * Initialize vGPU type list based on available resource. * + * Returns: + * Zero on success, negative error code if failed. */ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) { @@ -285,6 +302,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) intel_vgpu_clean_gtt(vgpu); intel_gvt_hypervisor_detach_vgpu(vgpu); intel_vgpu_free_resource(vgpu); + intel_vgpu_reset_cfg_space(vgpu); intel_vgpu_clean_mmio(vgpu); intel_vgpu_dmabuf_cleanup(vgpu); mutex_unlock(&vgpu->vgpu_lock); @@ -525,6 +543,9 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask; + enum intel_engine_id i; + struct intel_engine_cs *engine; + bool enable_pvmmio = vgpu_vreg_t(vgpu, vgtif_reg(enable_pvmmio)); gvt_dbg_core("------------------------------------------\n"); gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n", @@ -537,7 +558,10 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, * The current_vgpu will set to NULL after stopping the * scheduler when the reset is triggered by current vgpu. */ - if (scheduler->current_vgpu == NULL) { + for_each_engine_masked(engine, gvt->dev_priv, resetting_eng, i) { + if (scheduler->current_vgpu[i] != NULL) + continue; + mutex_unlock(&vgpu->vgpu_lock); intel_gvt_wait_vgpu_idle(vgpu); mutex_lock(&vgpu->vgpu_lock); @@ -556,6 +580,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, intel_vgpu_reset_mmio(vgpu, dmlr); populate_pvinfo_page(vgpu); + vgpu_vreg_t(vgpu, vgtif_reg(enable_pvmmio)) = enable_pvmmio; intel_vgpu_reset_display(vgpu); if (dmlr) { diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index f9ce35da4123e..89b2863845d5b 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1939,6 +1939,49 @@ static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring) ring->space, ring->head, ring->tail, ring->emit); } +bool is_shadow_context(struct i915_gem_context *ctx) +{ + if (ctx->name && !strncmp(ctx->name, "Shadow Context", 14)) + return true; + + return false; +} + +int get_vgt_id(struct i915_gem_context *ctx) +{ + int vgt_id; + + vgt_id = 0; + + if (is_shadow_context(ctx)) + sscanf(ctx->name, "Shadow Context %d", &vgt_id); + + return vgt_id; +} + +int get_pid_shadowed(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + int pid, vgt_id; + + sscanf(ctx->name, "Shadow Context %d", &vgt_id); + pid = intel_read_status_page(engine, I915_GEM_HWS_PID_INDEX + vgt_id); + return pid; +} + +static void describe_ctx_ring_shadowed(struct seq_file *m, + struct i915_gem_context *ctx, struct intel_ring *ring, + struct intel_engine_cs *engine) +{ + int pid, cid, vgt_id; + + sscanf(ctx->name, "Shadow Context %d", &vgt_id); + pid = intel_read_status_page(engine, I915_GEM_HWS_PID_INDEX + vgt_id); + cid = intel_read_status_page(engine, I915_GEM_HWS_CID_INDEX + vgt_id); + seq_printf(m, " (Current DomU Process PID: %d, CID: %d)", + pid, cid); +} + static int i915_context_status(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -1953,6 +1996,7 @@ static int i915_context_status(struct seq_file *m, void *unused) return ret; list_for_each_entry(ctx, &dev_priv->contexts.list, link) { + bool is_shadow_context = false; seq_printf(m, "HW context %u ", ctx->hw_id); if (ctx->pid) { struct task_struct *task; @@ -1963,6 +2007,9 @@ static int i915_context_status(struct seq_file *m, void *unused) task->comm, task->pid); put_task_struct(task); } + } else if (ctx->name && !strncmp(ctx->name, "Shadow Context", 14)) { + seq_puts(m, "DomU Shadow Context "); + is_shadow_context = true; } else if (IS_ERR(ctx->file_priv)) { seq_puts(m, "(deleted) "); } else { @@ -1975,12 +2022,19 @@ static int i915_context_status(struct seq_file *m, void *unused) for_each_engine(engine, dev_priv, id) { struct intel_context *ce = to_intel_context(ctx, engine); + u64 lrc_desc = ce->lrc_desc; + seq_printf(m, "ctx id 0x%x ", (uint32_t)((lrc_desc >> 12) & + 0xFFFFF)); seq_printf(m, "%s: ", engine->name); if (ce->state) describe_obj(m, ce->state->obj); - if (ce->ring) + if (ce->ring) { describe_ctx_ring(m, ce->ring); + if(is_shadow_context) + describe_ctx_ring_shadowed(m, ctx, + ce->ring, engine); + } seq_putc(m, '\n'); } @@ -2954,15 +3008,23 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc) struct drm_device *dev = &dev_priv->drm; struct drm_crtc *crtc = &intel_crtc->base; struct intel_encoder *intel_encoder; - struct drm_plane_state *plane_state = crtc->primary->state; - struct drm_framebuffer *fb = plane_state->fb; + struct drm_plane_state *plane_state; + struct drm_framebuffer *fb; - if (fb) - seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", + if (!crtc->primary) { + seq_puts(m, "\tno primary plane\n"); + } else { + plane_state = crtc->primary->state; + fb = plane_state->fb; + + if (fb) + seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", fb->base.id, plane_state->src_x >> 16, plane_state->src_y >> 16, fb->width, fb->height); - else - seq_puts(m, "\tprimary plane disabled\n"); + else + seq_puts(m, "\tprimary plane disabled\n"); + } + for_each_encoder_on_crtc(dev, crtc, intel_encoder) intel_encoder_info(m, intel_crtc, intel_encoder); } @@ -3207,13 +3269,18 @@ static int i915_display_info(struct seq_file *m, void *unused) intel_crtc_info(m, crtc); - seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n", - yesno(cursor->base.state->visible), - cursor->base.state->crtc_x, - cursor->base.state->crtc_y, - cursor->base.state->crtc_w, - cursor->base.state->crtc_h, - cursor->cursor.base); + if (cursor) { + seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n", + yesno(cursor->base.state->visible), + cursor->base.state->crtc_x, + cursor->base.state->crtc_y, + cursor->base.state->crtc_w, + cursor->base.state->crtc_h, + cursor->cursor.base); + } else { + seq_puts(m, "\tNo cursor plane available on this platform\n"); + } + intel_scaler_info(m, crtc); intel_plane_info(m, crtc); } @@ -4000,6 +4067,9 @@ i915_wedged_set(void *data, u64 val) struct intel_engine_cs *engine; unsigned int tmp; + if (intel_vgpu_active(i915)) + return -EINVAL; + /* * There is no safeguard against this debugfs entry colliding * with the hangcheck calling same i915_handle_error() in diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index f8cfd16be534c..c7f4f94fd49bd 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -51,6 +51,7 @@ #include "i915_pmu.h" #include "i915_query.h" #include "i915_vgpu.h" +#include "intel_uc.h" #include "intel_drv.h" #include "intel_uc.h" @@ -689,9 +690,11 @@ static int i915_load_modeset_init(struct drm_device *dev) if (INTEL_INFO(dev_priv)->num_pipes == 0) return 0; - ret = intel_fbdev_init(dev); - if (ret) - goto cleanup_gem; + if (!i915_modparams.enable_initial_modeset) { + ret = intel_fbdev_init(dev); + if (ret) + goto cleanup_gem; + } /* Only enable hotplug handling once the fbdev is fully set up. */ intel_hpd_init(dev_priv); @@ -895,6 +898,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, sizeof(device_info->platform_mask) * BITS_PER_BYTE); BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); spin_lock_init(&dev_priv->irq_lock); + spin_lock_init(&dev_priv->shared_page_lock); spin_lock_init(&dev_priv->gpu_error.lock); mutex_init(&dev_priv->backlight_lock); spin_lock_init(&dev_priv->uncore.lock); @@ -991,6 +995,9 @@ static void i915_mmio_cleanup(struct drm_i915_private *dev_priv) intel_teardown_mchbar(dev_priv); pci_iounmap(pdev, dev_priv->regs); + if (intel_vgpu_active(dev_priv) && dev_priv->shared_page) + pci_iounmap(pdev, dev_priv->shared_page); + } /** @@ -1024,6 +1031,21 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) intel_uc_init_mmio(dev_priv); + if (intel_vgpu_active(dev_priv) && i915_modparams.enable_pvmmio) { + u32 bar = 0; + u32 mmio_size = 2 * 1024 * 1024; + + /* Map a share page from the end of 2M mmio region in bar0. */ + dev_priv->shared_page = (struct gvt_shared_page *) + pci_iomap_range(dev_priv->drm.pdev, bar, + mmio_size, PAGE_SIZE); + if (dev_priv->shared_page == NULL) { + ret = -EIO; + DRM_ERROR("ivi: failed to map share page.\n"); + goto err_uncore; + } + } + ret = intel_engines_init_mmio(dev_priv); if (ret) goto err_uncore; @@ -1033,6 +1055,8 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) return 0; err_uncore: + if (intel_vgpu_active(dev_priv) && dev_priv->shared_page) + pci_iounmap(dev_priv->drm.pdev, dev_priv->shared_page); intel_uncore_fini(dev_priv); err_bridge: pci_dev_put(dev_priv->bridge_dev); @@ -1264,7 +1288,10 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) * irqs are fully enabled. We do it last so that the async config * cannot run before the connectors are registered. */ - intel_fbdev_initial_config_async(dev); + if (i915_modparams.enable_initial_modeset) + intel_initial_mode_config_init(dev); + else + intel_fbdev_initial_config_async(dev); /* * We need to coordinate the hotplugs with the asynchronous fbdev @@ -1318,6 +1345,24 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv) DRM_INFO("DRM_I915_DEBUG_GEM enabled\n"); } +static inline int get_max_avail_pipes(struct drm_i915_private *dev_priv) +{ + enum pipe pipe; + int index = 0; + + if (!intel_vgpu_active(dev_priv) || + !i915_modparams.avail_planes_per_pipe) + return INTEL_INFO(dev_priv)->num_pipes; + + for_each_pipe(dev_priv, pipe) { + if (AVAIL_PLANE_PER_PIPE(dev_priv, i915_modparams.avail_planes_per_pipe, + pipe)) + index++; + } + + return index; +} + /** * i915_driver_load - setup chip and create an initial config * @pdev: PCI device @@ -1335,6 +1380,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) (struct intel_device_info *)ent->driver_data; struct drm_i915_private *dev_priv; int ret; + int num_crtcs = 0; /* Enable nuclear pageflip on ILK+ */ if (!i915_modparams.nuclear_pageflip && match_info->gen < 5) @@ -1386,9 +1432,9 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) * of the i915_driver_init_/i915_driver_register functions according * to the role/effect of the given init step. */ - if (INTEL_INFO(dev_priv)->num_pipes) { - ret = drm_vblank_init(&dev_priv->drm, - INTEL_INFO(dev_priv)->num_pipes); + num_crtcs = get_max_avail_pipes(dev_priv); + if (num_crtcs) { + ret = drm_vblank_init(&dev_priv->drm, num_crtcs); if (ret) goto out_cleanup_hw; } @@ -1434,6 +1480,11 @@ void i915_driver_unload(struct drm_device *dev) i915_driver_unregister(dev_priv); + if (!i915_modparams.enable_initial_modeset) + intel_fbdev_fini(dev_priv); + else + intel_initial_mode_config_fini(dev); + if (i915_gem_suspend(dev_priv)) DRM_ERROR("failed to idle hardware; continuing to unload!\n"); @@ -1503,7 +1554,8 @@ static int i915_driver_open(struct drm_device *dev, struct drm_file *file) */ static void i915_driver_lastclose(struct drm_device *dev) { - intel_fbdev_restore_mode(dev); + if (!i915_modparams.enable_initial_modeset) + intel_fbdev_restore_mode(dev); vga_switcheroo_process_delayed_switch(); } @@ -1516,6 +1568,10 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) i915_gem_release(dev, file); mutex_unlock(&dev->struct_mutex); +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + kfree(file_priv->process_name); +#endif + kfree(file_priv); } @@ -2860,6 +2916,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_GVTBUFFER, i915_gem_gvtbuffer_ioctl, DRM_RENDER_ALLOW), }; static struct drm_driver driver = { @@ -2874,6 +2931,9 @@ static struct drm_driver driver = { .lastclose = i915_driver_lastclose, .postclose = i915_driver_postclose, +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + .gem_open_object = i915_gem_open_object, +#endif .gem_close_object = i915_gem_close_object, .gem_free_object_unlocked = i915_gem_free_object, .gem_vm_ops = &i915_gem_vm_ops, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4aca5344863d6..8a4fb73727fd4 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -55,6 +55,7 @@ #include "i915_params.h" #include "i915_reg.h" +#include "i915_pvinfo.h" #include "i915_utils.h" #include "intel_bios.h" @@ -78,6 +79,9 @@ #include "i915_scheduler.h" #include "i915_timeline.h" #include "i915_vma.h" +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) +#include "i915_gpu_error.h" +#endif #include "intel_gvt.h" @@ -333,6 +337,11 @@ struct drm_i915_file_private { struct drm_i915_private *dev_priv; struct drm_file *file; +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + char *process_name; + struct pid *tgid; +#endif + struct { spinlock_t lock; struct list_head request_list; @@ -351,6 +360,10 @@ struct drm_i915_file_private { unsigned int bsd_engine; +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + struct bin_attribute *obj_attr; +#endif + /* * Every context ban increments per client ban score. Also * hangs in short succession increments ban score. If ban threshold @@ -996,6 +1009,10 @@ struct i915_gem_mm { spinlock_t object_stat_lock; u64 object_memory; u32 object_count; + +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + size_t phys_mem_total; +#endif }; #define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */ @@ -1306,6 +1323,7 @@ struct i915_workarounds { struct i915_virtual_gpu { bool active; u32 caps; + u32 scaler_owned; }; /* used in computing the new watermarks state */ @@ -1589,6 +1607,8 @@ struct drm_i915_private { resource_size_t stolen_usable_size; /* Total size minus reserved ranges */ void __iomem *regs; + struct gvt_shared_page *shared_page; + spinlock_t shared_page_lock; struct intel_uncore uncore; @@ -1666,6 +1686,10 @@ struct drm_i915_private { bool preserve_bios_swizzle; +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + struct kobject memtrack_kobj; +#endif + /* overlay */ struct intel_overlay *overlay; @@ -1750,6 +1774,8 @@ struct drm_i915_private { struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; #endif + struct work_struct initial_modeset_work; + /* dpll and cdclk state is protected by connection_mutex */ int num_shared_dpll; struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; @@ -1837,6 +1863,10 @@ struct drm_i915_private { * This is limited in execlists to 21 bits. */ struct ida hw_ida; + + /* In case of virtualization, 3-bits of vgt-id will be added to hw_id */ +#define SIZE_CONTEXT_HW_ID_GVT (18) +#define MAX_CONTEXT_HW_ID_GVT (1<sgl, true); \ ((__dmap) = (__iter).dma + (__iter).curr); \ - (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \ + (((__iter).curr += I915_GTT_PAGE_SIZE) >= (__iter).max) ? \ (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0) /** @@ -2664,6 +2696,11 @@ intel_info(const struct drm_i915_private *dev_priv) #define GT_FREQUENCY_MULTIPLIER 50 #define GEN9_FREQ_SCALER 3 +#define BITS_PER_PIPE 8 +#define AVAIL_PLANE_PER_PIPE(dev_priv, mask, pipe) \ + (((mask) >> (pipe) * BITS_PER_PIPE) & \ + ((1 << ((INTEL_INFO(dev_priv)->num_sprites[pipe]) + 1)) - 1)) + #include "i915_trace.h" static inline bool intel_vtd_active(void) @@ -2727,6 +2764,11 @@ extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); + +/* initial modesetting support */ +extern void intel_initial_mode_config_init(struct drm_device *dev); +extern void intel_initial_mode_config_fini(struct drm_device *dev); + int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); int intel_engines_init_mmio(struct drm_i915_private *dev_priv); @@ -2780,7 +2822,7 @@ static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) return dev_priv->gvt; } -static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv) +static inline bool intel_vgpu_active(const struct drm_i915_private *dev_priv) { return dev_priv->vgpu.active; } @@ -2814,18 +2856,18 @@ ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) ilk_update_display_irq(dev_priv, bits, 0); } void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, - enum pipe pipe, + unsigned int crtc_index, uint32_t interrupt_mask, uint32_t enabled_irq_mask); static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv, - enum pipe pipe, uint32_t bits) + unsigned int crtc_index, uint32_t bits) { - bdw_update_pipe_irq(dev_priv, pipe, bits, bits); + bdw_update_pipe_irq(dev_priv, crtc_index, bits, bits); } static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv, - enum pipe pipe, uint32_t bits) + unsigned int crtc_index, uint32_t bits) { - bdw_update_pipe_irq(dev_priv, pipe, bits, 0); + bdw_update_pipe_irq(dev_priv, crtc_index, bits, 0); } void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, uint32_t interrupt_mask, @@ -2898,6 +2940,11 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size); struct drm_i915_gem_object * i915_gem_object_create_from_data(struct drm_i915_private *dev_priv, const void *data, size_t size); + +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) +int i915_gem_open_object(struct drm_gem_object *gem, struct drm_file *file); +#endif + void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file); void i915_gem_free_object(struct drm_gem_object *obj); @@ -3149,6 +3196,7 @@ void i915_gem_reset_engine(struct intel_engine_cs *engine, void i915_gem_init_mmio(struct drm_i915_private *i915); int __must_check i915_gem_init(struct drm_i915_private *dev_priv); int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv); +int __must_check i915_gem_init_hw_late(struct drm_i915_private *dev_priv); void i915_gem_init_swizzling(struct drm_i915_private *dev_priv); void i915_gem_fini(struct drm_i915_private *dev_priv); void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv); @@ -3164,8 +3212,10 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj, struct intel_rps_client *rps); int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, unsigned int flags, - const struct i915_sched_attr *attr); + const struct i915_sched_attr *attr, + unsigned int timeout); #define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX +#define I915_PREEMPTION_TIMEOUT_DISPLAY (100 * 1000 * 1000) /* 100 ms / 10Hz */ int __must_check i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write); @@ -3242,6 +3292,16 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, void i915_oa_init_reg_state(struct intel_engine_cs *engine, struct i915_gem_context *ctx, uint32_t *reg_state); +#ifdef CONFIG_DRM_I915_GVT +int i915_gem_gvtbuffer_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +#else +static inline int i915_gem_gvtbuffer_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + return -EINVAL; +} +#endif /* i915_gem_evict.c */ int __must_check i915_gem_evict_something(struct i915_address_space *vm, @@ -3319,6 +3379,19 @@ u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size, u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size, unsigned int tiling, unsigned int stride); +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) +int i915_get_pid_cmdline(struct task_struct *task, char *buffer); +int i915_gem_obj_insert_pid(struct drm_i915_gem_object *obj); +void i915_gem_obj_remove_all_pids(struct drm_i915_gem_object *obj); +int i915_obj_insert_virt_addr(struct drm_i915_gem_object *obj, + unsigned long addr, bool is_map_gtt, + bool is_mutex_locked); +int i915_get_drm_clients_info(struct drm_i915_error_state_buf *m, + struct drm_device *dev); +int i915_gem_get_obj_info(struct drm_i915_error_state_buf *m, + struct drm_device *dev, struct pid *tgid); +#endif + /* i915_debugfs.c */ #ifdef CONFIG_DEBUG_FS int i915_debugfs_register(struct drm_i915_private *dev_priv); @@ -3358,6 +3431,13 @@ extern int i915_restore_state(struct drm_i915_private *dev_priv); void i915_setup_sysfs(struct drm_i915_private *dev_priv); void i915_teardown_sysfs(struct drm_i915_private *dev_priv); +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) +int i915_gem_create_sysfs_file_entry(struct drm_device *dev, + struct drm_file *file); +void i915_gem_remove_sysfs_file_entry(struct drm_device *dev, + struct drm_file *file); +#endif + /* intel_lpe_audio.c */ int intel_lpe_audio_init(struct drm_i915_private *dev_priv); void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv); @@ -3578,7 +3658,11 @@ static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv, static inline uint##x##_t __raw_i915_read##x(const struct drm_i915_private *dev_priv, \ i915_reg_t reg) \ { \ - return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \ + if (!intel_vgpu_active(dev_priv) || !i915_modparams.enable_pvmmio || \ + likely(!in_mmio_read_trap_list((reg).reg))) \ + return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \ + dev_priv->shared_page->reg_addr = i915_mmio_reg_offset(reg); \ + return read##s(dev_priv->regs + i915_mmio_reg_offset(vgtif_reg(pv_mmio))); \ } #define __raw_write(x, s) \ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index fcc73a6ab503e..abf74c3d4b1a9 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -47,8 +47,852 @@ #include #include +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) +#include +#include +#include +#include "../drm_internal.h" +#endif + static void i915_gem_flush_free_objects(struct drm_i915_private *i915); +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) +struct per_file_obj_mem_info { + int num_obj; + int num_obj_shared; + int num_obj_private; + int num_obj_gtt_bound; + int num_obj_purged; + int num_obj_purgeable; + int num_obj_allocated; + int num_obj_fault_mappable; + int num_obj_stolen; + size_t gtt_space_allocated_shared; + size_t gtt_space_allocated_priv; + size_t phys_space_allocated_shared; + size_t phys_space_allocated_priv; + size_t phys_space_purgeable; + size_t phys_space_shared_proportion; + size_t fault_mappable_size; + size_t stolen_space_allocated; + char *process_name; +}; + +struct name_entry { + struct list_head head; + struct drm_hash_item hash_item; +}; + +struct pid_stat_entry { + struct list_head head; + struct list_head namefree; + struct drm_open_hash namelist; + struct per_file_obj_mem_info stats; + struct pid *tgid; + int pid_num; +}; + +struct drm_i915_obj_virt_addr { + struct list_head head; + unsigned long user_virt_addr; +}; + +struct drm_i915_obj_pid_info { + struct list_head head; + pid_t tgid; + int open_handle_count; + struct list_head virt_addr_head; +}; + +struct get_obj_stats_buf { + struct pid_stat_entry *entry; + struct drm_i915_error_state_buf *m; +}; + +#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) +#define err_puts(e, s) i915_error_puts(e, s) + +static const char *get_tiling_flag(struct drm_i915_gem_object *obj) +{ + switch (i915_gem_object_get_tiling(obj)) { + default: + case I915_TILING_NONE: return " "; + case I915_TILING_X: return "X"; + case I915_TILING_Y: return "Y"; + } +} + +/* + * If this mmput call is the last one, it will tear down the mmaps of the + * process and calls drm_gem_vm_close(), which leads deadlock on i915 mutex. + * Instead, asynchronously schedule mmput function here, to avoid recursive + * calls to acquire i915_mutex. + */ +static void async_mmput_func(void *data, async_cookie_t cookie) +{ + struct mm_struct *mm = data; + mmput(mm); +} + +static void async_mmput(struct mm_struct *mm) +{ + async_schedule(async_mmput_func, mm); +} + +int i915_get_pid_cmdline(struct task_struct *task, char *buffer) +{ + int res = 0; + unsigned int len; + struct mm_struct *mm = get_task_mm(task); + + if (!mm) + goto out; + if (!mm->arg_end) + goto out_mm; + + len = mm->arg_end - mm->arg_start; + + if (len > PAGE_SIZE) + len = PAGE_SIZE; + + res = access_process_vm(task, mm->arg_start, buffer, len, 0); + if (res < 0) { + async_mmput(mm); + return res; + } + + if (res > 0 && buffer[res-1] != '\0' && len < PAGE_SIZE) + buffer[res-1] = '\0'; +out_mm: + async_mmput(mm); +out: + return 0; +} + +static int i915_obj_get_shmem_pages_alloced(struct drm_i915_gem_object *obj) +{ + if (obj->base.filp) { + struct inode *inode = file_inode(obj->base.filp); + + if (!inode) + return 0; + return inode->i_mapping->nrpages; + } + return 0; +} + +int i915_gem_obj_insert_pid(struct drm_i915_gem_object *obj) +{ + int found = 0; + struct drm_i915_obj_pid_info *entry; + pid_t current_tgid = task_tgid_nr(current); + + mutex_lock(&obj->base.dev->struct_mutex); + + list_for_each_entry(entry, &obj->pid_info, head) { + if (entry->tgid == current_tgid) { + entry->open_handle_count++; + found = 1; + break; + } + } + if (found == 0) { + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (entry == NULL) { + DRM_ERROR("alloc failed\n"); + mutex_unlock(&obj->base.dev->struct_mutex); + return -ENOMEM; + } + entry->tgid = current_tgid; + entry->open_handle_count = 1; + INIT_LIST_HEAD(&entry->virt_addr_head); + list_add_tail(&entry->head, &obj->pid_info); + } + + mutex_unlock(&obj->base.dev->struct_mutex); + return 0; +} + +void i915_gem_obj_remove_all_pids(struct drm_i915_gem_object *obj) +{ + struct drm_i915_obj_pid_info *pid_entry, *pid_next; + struct drm_i915_obj_virt_addr *virt_entry, *virt_next; + + list_for_each_entry_safe(pid_entry, pid_next, &obj->pid_info, head) { + list_for_each_entry_safe(virt_entry, + virt_next, + &pid_entry->virt_addr_head, + head) { + list_del(&virt_entry->head); + kfree(virt_entry); + } + list_del(&pid_entry->head); + kfree(pid_entry); + } +} + + int +i915_obj_insert_virt_addr(struct drm_i915_gem_object *obj, + unsigned long addr, + bool is_map_gtt, + bool is_mutex_locked) +{ + struct drm_i915_obj_pid_info *pid_entry; + pid_t current_tgid = task_tgid_nr(current); + int ret = 0, found = 0; + + if (is_map_gtt) + addr |= 1; + + if (!is_mutex_locked) { + ret = i915_mutex_lock_interruptible(obj->base.dev); + if (ret) + return ret; + } + + list_for_each_entry(pid_entry, &obj->pid_info, head) { + if (pid_entry->tgid == current_tgid) { + struct drm_i915_obj_virt_addr *virt_entry, *new_entry; + + list_for_each_entry(virt_entry, + &pid_entry->virt_addr_head, + head) { + if (virt_entry->user_virt_addr == addr) { + found = 1; + break; + } + } + if (found) + break; + new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL); + if (new_entry == NULL) { + DRM_ERROR("alloc failed\n"); + ret = -ENOMEM; + goto out; + } + new_entry->user_virt_addr = addr; + list_add_tail(&new_entry->head, + &pid_entry->virt_addr_head); + break; + } + } + +out: + if (!is_mutex_locked) + mutex_unlock(&obj->base.dev->struct_mutex); + + return ret; +} + +static int i915_obj_virt_addr_is_invalid(struct drm_gem_object *obj, + struct pid *tgid, unsigned long addr) +{ + struct task_struct *task; + struct mm_struct *mm; + struct vm_area_struct *vma; + int locked, ret = 0; + + task = get_pid_task(tgid, PIDTYPE_PID); + if (task == NULL) { + DRM_DEBUG("null task for tgid=%d\n", pid_nr(tgid)); + return -EINVAL; + } + + mm = get_task_mm(task); + if (mm == NULL) { + DRM_DEBUG("null mm for tgid=%d\n", pid_nr(tgid)); + ret = -EINVAL; + goto out_task; + } + + locked = down_read_trylock(&mm->mmap_sem); + if (!locked) + goto out_mm; + + vma = find_vma(mm, addr); + if (vma) { + if (addr & 1) { /* mmap_gtt case */ + if (vma->vm_pgoff*PAGE_SIZE == (unsigned long) + drm_vma_node_offset_addr(&obj->vma_node)) + ret = 0; + else + ret = -EINVAL; + } else { /* mmap case */ + if (vma->vm_file == obj->filp) + ret = 0; + else + ret = -EINVAL; + } + } else + ret = -EINVAL; + + up_read(&mm->mmap_sem); + +out_mm: + async_mmput(mm); +out_task: + put_task_struct(task); + return ret; +} + +static void i915_obj_pidarray_validate(struct drm_gem_object *gem_obj) +{ + struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); + struct drm_device *dev = gem_obj->dev; + struct drm_i915_obj_virt_addr *virt_entry, *virt_next; + struct drm_i915_obj_pid_info *pid_entry, *pid_next; + struct drm_file *file; + struct drm_i915_file_private *file_priv; + struct pid *tgid; + int pid_num, present; + + /* + * Run a sanity check on pid_array. All entries in pid_array should + * be subset of the the drm filelist pid entries. + */ + list_for_each_entry_safe(pid_entry, pid_next, &obj->pid_info, head) { + if (pid_next == NULL) { + DRM_ERROR( + "Invalid pid info. obj:%p, size:%zdK, tiling:%s, userptr=%s, stolen:%s, name:%d, handle_count=%d\n", + &obj->base, obj->base.size/1024, + get_tiling_flag(obj), + (obj->userptr.mm != 0) ? "Y" : "N", + obj->stolen ? "Y" : "N", obj->base.name, + obj->base.handle_count); + break; + } + + present = 0; + list_for_each_entry(file, &dev->filelist, lhead) { + file_priv = file->driver_priv; + tgid = file_priv->tgid; + pid_num = pid_nr(tgid); + + if (pid_num == pid_entry->tgid) { + present = 1; + break; + } + } + if (present == 0) { + DRM_DEBUG("stale_tgid=%d\n", pid_entry->tgid); + list_for_each_entry_safe(virt_entry, virt_next, + &pid_entry->virt_addr_head, + head) { + list_del(&virt_entry->head); + kfree(virt_entry); + } + list_del(&pid_entry->head); + kfree(pid_entry); + } else { + /* Validate the virtual address list */ + struct task_struct *task = + get_pid_task(tgid, PIDTYPE_PID); + if (task == NULL) + continue; + + list_for_each_entry_safe(virt_entry, virt_next, + &pid_entry->virt_addr_head, + head) { + if (i915_obj_virt_addr_is_invalid(gem_obj, tgid, + virt_entry->user_virt_addr)) { + DRM_DEBUG("stale_addr=%ld\n", + virt_entry->user_virt_addr); + list_del(&virt_entry->head); + kfree(virt_entry); + } + } + put_task_struct(task); + } + } +} + +static int i915_obj_find_insert_in_hash(struct drm_i915_gem_object *obj, + struct pid_stat_entry *pid_entry, + bool *found) +{ + struct drm_hash_item *hash_item; + int ret; + + ret = drm_ht_find_item(&pid_entry->namelist, + (unsigned long)&obj->base, &hash_item); + /* Not found, insert in hash */ + if (ret) { + struct name_entry *entry = + kzalloc(sizeof(*entry), GFP_NOWAIT); + if (entry == NULL) { + DRM_ERROR("alloc failed\n"); + return -ENOMEM; + } + entry->hash_item.key = (unsigned long)&obj->base; + drm_ht_insert_item(&pid_entry->namelist, + &entry->hash_item); + list_add_tail(&entry->head, &pid_entry->namefree); + *found = false; + } else + *found = true; + + return 0; +} + +static int i915_obj_shared_count(struct drm_i915_gem_object *obj, + struct pid_stat_entry *pid_entry, + bool *discard) +{ + struct drm_i915_obj_pid_info *pid_info_entry; + int ret, obj_shared_count = 0; + + /* + * The object can be shared among different processes by either flink + * or dma-buf mechanism, leading to shared count more than 1. For the + * objects not shared , return the shared count as 1. + * In case of shared dma-buf objects, there's a possibility that these + * may be external to i915. Detect this condition through + * 'import_attach' field. + */ + if (!obj->base.name && !obj->base.dma_buf) + return 1; + else if(obj->base.import_attach) { + /* not our GEM obj */ + *discard = true; + return 0; + } + + ret = i915_obj_find_insert_in_hash(obj, pid_entry, discard); + if (ret) + return ret; + + list_for_each_entry(pid_info_entry, &obj->pid_info, head) + obj_shared_count++; + + if (WARN_ON(obj_shared_count == 0)) + return -EINVAL; + + return obj_shared_count; +} + + static int +i915_describe_obj(struct get_obj_stats_buf *obj_stat_buf, + struct drm_i915_gem_object *obj) +{ + struct pid_stat_entry *pid_entry = obj_stat_buf->entry; + struct per_file_obj_mem_info *stats = &pid_entry->stats; + int obj_shared_count = 0; + + bool discard = false; + + obj_shared_count = i915_obj_shared_count(obj, pid_entry, &discard); + if (obj_shared_count < 0) + return obj_shared_count; + + if (!discard && !obj->stolen && + (obj->mm.madv != __I915_MADV_PURGED) && + (i915_obj_get_shmem_pages_alloced(obj) != 0)) { + if (obj_shared_count > 1) + stats->phys_space_shared_proportion += + obj->base.size/obj_shared_count; + else + stats->phys_space_allocated_priv += + obj->base.size; + } + + return 0; +} + + static int +i915_drm_gem_obj_info(int id, void *ptr, void *data) +{ + struct drm_i915_gem_object *obj = ptr; + struct get_obj_stats_buf *obj_stat_buf = data; + + if (obj->pid_info.next == NULL) { + DRM_ERROR( + "Invalid pid info. obj:%p, size:%zdK, tiling:%s, userptr=%s, stolen:%s, name:%d, handle_count=%d\n", + &obj->base, obj->base.size/1024, + get_tiling_flag(obj), + (obj->userptr.mm != 0) ? "Y" : "N", + obj->stolen ? "Y" : "N", obj->base.name, + obj->base.handle_count); + return 0; + } + + return i915_describe_obj(obj_stat_buf, obj); +} + +bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o) +{ + struct i915_vma *vma; + + list_for_each_entry(vma, &o->vma_list, obj_link) + if (drm_mm_node_allocated(&vma->node)) + return true; + + return false; +} + + static int +i915_drm_gem_object_per_file_summary(int id, void *ptr, void *data) +{ + struct pid_stat_entry *pid_entry = data; + struct drm_i915_gem_object *obj = ptr; + struct per_file_obj_mem_info *stats = &pid_entry->stats; + int obj_shared_count = 0; + bool discard = false; + + if (obj->pid_info.next == NULL) { + DRM_ERROR( + "Invalid pid info. obj:%p, size:%zdK, tiling:%s, userptr=%s, stolen:%s, name:%d, handle_count=%d\n", + &obj->base, obj->base.size/1024, + get_tiling_flag(obj), + (obj->userptr.mm != 0) ? "Y" : "N", + obj->stolen ? "Y" : "N", obj->base.name, + obj->base.handle_count); + return 0; + } + + i915_obj_pidarray_validate(&obj->base); + + stats->num_obj++; + + obj_shared_count = i915_obj_shared_count(obj, pid_entry, &discard); + if (obj_shared_count < 0) + return obj_shared_count; + + if (discard) + return 0; + + if (obj_shared_count > 1) + stats->num_obj_shared++; + else + stats->num_obj_private++; + + if (i915_gem_obj_bound_any(obj)) { + stats->num_obj_gtt_bound++; + if (obj_shared_count > 1) + stats->gtt_space_allocated_shared += obj->base.size; + else + stats->gtt_space_allocated_priv += obj->base.size; + } + + if (obj->stolen) { + stats->num_obj_stolen++; + stats->stolen_space_allocated += obj->base.size; + } else if (obj->mm.madv == __I915_MADV_PURGED) { + stats->num_obj_purged++; + } else if (obj->mm.madv == I915_MADV_DONTNEED) { + stats->num_obj_purgeable++; + stats->num_obj_allocated++; + if (i915_obj_get_shmem_pages_alloced(obj) != 0) { + stats->phys_space_purgeable += obj->base.size; + if (obj_shared_count > 1) { + stats->phys_space_allocated_shared += + obj->base.size; + stats->phys_space_shared_proportion += + obj->base.size/obj_shared_count; + } else + stats->phys_space_allocated_priv += + obj->base.size; + } else + WARN_ON(1); + } else if (i915_obj_get_shmem_pages_alloced(obj) != 0) { + stats->num_obj_allocated++; + if (obj_shared_count > 1) { + stats->phys_space_allocated_shared += + obj->base.size; + stats->phys_space_shared_proportion += + obj->base.size/obj_shared_count; + } + else + stats->phys_space_allocated_priv += obj->base.size; + } + return 0; +} + + static int +__i915_get_drm_clients_info(struct drm_i915_error_state_buf *m, + struct drm_device *dev) +{ + struct drm_file *file; + struct drm_i915_private *dev_priv = dev->dev_private; + + struct name_entry *entry, *next; + struct pid_stat_entry *pid_entry, *temp_entry; + struct pid_stat_entry *new_pid_entry, *new_temp_entry; + struct list_head per_pid_stats, sorted_pid_stats; + int ret = 0; + size_t total_shared_prop_space = 0, total_priv_space = 0; + + INIT_LIST_HEAD(&per_pid_stats); + INIT_LIST_HEAD(&sorted_pid_stats); + + err_puts(m, + "\n\n pid Total Shared Priv Purgeable Alloced SharedPHYsize SharedPHYprop PrivPHYsize PurgeablePHYsize process\n"); + + list_for_each_entry(file, &dev->filelist, lhead) { + struct pid *tgid; + struct drm_i915_file_private *file_priv = file->driver_priv; + int pid_num, found = 0; + + tgid = file_priv->tgid; + pid_num = pid_nr(tgid); + + list_for_each_entry(pid_entry, &per_pid_stats, head) { + if (pid_entry->pid_num == pid_num) { + found = 1; + break; + } + } + + if (!found) { + struct pid_stat_entry *new_entry = + kzalloc(sizeof(*new_entry), GFP_KERNEL); + if (new_entry == NULL) { + DRM_ERROR("alloc failed\n"); + ret = -ENOMEM; + break; + } + new_entry->tgid = tgid; + new_entry->pid_num = pid_num; + ret = drm_ht_create(&new_entry->namelist, + DRM_MAGIC_HASH_ORDER); + if (ret) { + kfree(new_entry); + break; + } + + list_add_tail(&new_entry->head, &per_pid_stats); + INIT_LIST_HEAD(&new_entry->namefree); + new_entry->stats.process_name = file_priv->process_name; + pid_entry = new_entry; + } + + spin_lock(&file->table_lock); + ret = idr_for_each(&file->object_idr, + &i915_drm_gem_object_per_file_summary, pid_entry); + spin_unlock(&file->table_lock); + if (ret) + break; + } + + list_for_each_entry_safe(pid_entry, temp_entry, &per_pid_stats, head) { + if (list_empty(&sorted_pid_stats)) { + list_del(&pid_entry->head); + list_add_tail(&pid_entry->head, &sorted_pid_stats); + continue; + } + + list_for_each_entry_safe(new_pid_entry, new_temp_entry, + &sorted_pid_stats, head) { + int prev_space = + pid_entry->stats.phys_space_shared_proportion + + pid_entry->stats.phys_space_allocated_priv; + int new_space = + new_pid_entry-> + stats.phys_space_shared_proportion + + new_pid_entry->stats.phys_space_allocated_priv; + if (prev_space > new_space) { + list_del(&pid_entry->head); + list_add_tail(&pid_entry->head, + &new_pid_entry->head); + break; + } + if (list_is_last(&new_pid_entry->head, + &sorted_pid_stats)) { + list_del(&pid_entry->head); + list_add_tail(&pid_entry->head, + &sorted_pid_stats); + } + } + } + + list_for_each_entry_safe(pid_entry, temp_entry, + &sorted_pid_stats, head) { + struct task_struct *task = get_pid_task(pid_entry->tgid, + PIDTYPE_PID); + err_printf(m, + "%5d %6d %6d %6d %9d %8d %14zdK %14zdK %14zdK %14zdK %s", + pid_entry->pid_num, + pid_entry->stats.num_obj, + pid_entry->stats.num_obj_shared, + pid_entry->stats.num_obj_private, + pid_entry->stats.num_obj_purgeable, + pid_entry->stats.num_obj_allocated, + pid_entry->stats.phys_space_allocated_shared/1024, + pid_entry->stats.phys_space_shared_proportion/1024, + pid_entry->stats.phys_space_allocated_priv/1024, + pid_entry->stats.phys_space_purgeable/1024, + pid_entry->stats.process_name); + + if (task == NULL) + err_puts(m, "*\n"); + else + err_puts(m, "\n"); + + total_shared_prop_space += + pid_entry->stats.phys_space_shared_proportion/1024; + total_priv_space += + pid_entry->stats.phys_space_allocated_priv/1024; + list_del(&pid_entry->head); + + list_for_each_entry_safe(entry, next, + &pid_entry->namefree, head) { + list_del(&entry->head); + drm_ht_remove_item(&pid_entry->namelist, + &entry->hash_item); + kfree(entry); + } + drm_ht_remove(&pid_entry->namelist); + kfree(pid_entry); + if (task) + put_task_struct(task); + } + + err_puts(m, + "\t\t\t\t\t\t\t\t--------------\t-------------\t--------\n"); + err_printf(m, + "\t\t\t\t\t\t\t\t%13zdK\t%12zdK\tTotal\n", + total_shared_prop_space, total_priv_space); + + err_printf(m, "\nTotal used GFX Shmem Physical space %8zdK\n", + dev_priv->mm.phys_mem_total/1024); + + if (ret) + return ret; + if (m->bytes == 0 && m->err) + return m->err; + + return 0; +} + +#define NUM_SPACES 100 +#define INITIAL_SPACES_STR(x) #x +#define SPACES_STR(x) INITIAL_SPACES_STR(x) + + static int +__i915_gem_get_obj_info(struct drm_i915_error_state_buf *m, + struct drm_device *dev, struct pid *tgid) +{ + struct drm_file *file; + struct drm_i915_file_private *file_priv_reqd = NULL; + int bytes_copy, ret = 0; + struct pid_stat_entry pid_entry; + struct name_entry *entry, *next; + + pid_entry.stats.phys_space_shared_proportion = 0; + pid_entry.stats.phys_space_allocated_priv = 0; + pid_entry.tgid = tgid; + pid_entry.pid_num = pid_nr(tgid); + ret = drm_ht_create(&pid_entry.namelist, DRM_MAGIC_HASH_ORDER); + if (ret) + return ret; + + INIT_LIST_HEAD(&pid_entry.namefree); + + /* + * Fill up initial few bytes with spaces, to insert summary data later + * on + */ + err_printf(m, "%"SPACES_STR(NUM_SPACES)"s\n", " "); + + list_for_each_entry(file, &dev->filelist, lhead) { + struct drm_i915_file_private *file_priv = file->driver_priv; + struct get_obj_stats_buf obj_stat_buf; + + obj_stat_buf.entry = &pid_entry; + obj_stat_buf.m = m; + + if (file_priv->tgid != tgid) + continue; + + file_priv_reqd = file_priv; + spin_lock(&file->table_lock); + ret = idr_for_each(&file->object_idr, + &i915_drm_gem_obj_info, &obj_stat_buf); + spin_unlock(&file->table_lock); + if (ret) + break; + } + + if (file_priv_reqd) { + int space_remaining; + + /* Reset the bytes counter to buffer beginning */ + bytes_copy = m->bytes; + m->bytes = 0; + + err_printf(m, "\n PID GfxMem Process\n"); + err_printf(m, "%5d %8zdK ", pid_nr(file_priv_reqd->tgid), + (pid_entry.stats.phys_space_shared_proportion + + pid_entry.stats.phys_space_allocated_priv)/1024); + + space_remaining = NUM_SPACES - m->bytes - 1; + if (strlen(file_priv_reqd->process_name) > space_remaining) + file_priv_reqd->process_name[space_remaining] = '\0'; + + err_printf(m, "%s\n", file_priv_reqd->process_name); + + /* Reinstate the previous saved value of bytes counter */ + m->bytes = bytes_copy; + } else + WARN(1, "drm file corresponding to tgid:%d not found\n", + pid_nr(tgid)); + + list_for_each_entry_safe(entry, next, + &pid_entry.namefree, head) { + list_del(&entry->head); + drm_ht_remove_item(&pid_entry.namelist, + &entry->hash_item); + kfree(entry); + } + drm_ht_remove(&pid_entry.namelist); + + if (ret) + return ret; + if (m->bytes == 0 && m->err) + return m->err; + return 0; +} + +int i915_get_drm_clients_info(struct drm_i915_error_state_buf *m, + struct drm_device *dev) +{ + int ret = 0; + + /* + * Protect the access to global drm resources such as filelist. Protect + * against their removal under our noses, while in use. + */ + mutex_lock(&drm_global_mutex); + ret = i915_mutex_lock_interruptible(dev); + if (ret) { + mutex_unlock(&drm_global_mutex); + return ret; + } + + ret = __i915_get_drm_clients_info(m, dev); + + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&drm_global_mutex); + + return ret; +} + +int i915_gem_get_obj_info(struct drm_i915_error_state_buf *m, + struct drm_device *dev, struct pid *tgid) +{ + int ret = 0; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + ret = __i915_gem_get_obj_info(m, dev, tgid); + + mutex_unlock(&dev->struct_mutex); + + return ret; +} +#endif + static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) { if (obj->cache_dirty) @@ -573,7 +1417,8 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, } static void __fence_set_priority(struct dma_fence *fence, - const struct i915_sched_attr *attr) + const struct i915_sched_attr *attr, + unsigned int timeout) { struct i915_request *rq; struct intel_engine_cs *engine; @@ -587,13 +1432,14 @@ static void __fence_set_priority(struct dma_fence *fence, local_bh_disable(); rcu_read_lock(); /* RCU serialisation for set-wedged protection */ if (engine->schedule) - engine->schedule(rq, attr); + engine->schedule(rq, attr, timeout); rcu_read_unlock(); local_bh_enable(); /* kick the tasklets if queues were reprioritised */ } static void fence_set_priority(struct dma_fence *fence, - const struct i915_sched_attr *attr) + const struct i915_sched_attr *attr, + unsigned int timeout) { /* Recurse once into a fence-array */ if (dma_fence_is_array(fence)) { @@ -601,16 +1447,17 @@ static void fence_set_priority(struct dma_fence *fence, int i; for (i = 0; i < array->num_fences; i++) - __fence_set_priority(array->fences[i], attr); + __fence_set_priority(array->fences[i], attr, timeout); } else { - __fence_set_priority(fence, attr); + __fence_set_priority(fence, attr, timeout); } } int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, unsigned int flags, - const struct i915_sched_attr *attr) + const struct i915_sched_attr *attr, + unsigned int timeout) { struct dma_fence *excl; @@ -625,7 +1472,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, return ret; for (i = 0; i < count; i++) { - fence_set_priority(shared[i], attr); + fence_set_priority(shared[i], attr, timeout); dma_fence_put(shared[i]); } @@ -635,7 +1482,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, } if (excl) { - fence_set_priority(excl, attr); + fence_set_priority(excl, attr, timeout); dma_fence_put(excl); } return 0; @@ -1122,11 +1969,7 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj, offset = offset_in_page(args->offset); for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { struct page *page = i915_gem_object_get_page(obj, idx); - int length; - - length = remain; - if (offset + length > PAGE_SIZE) - length = PAGE_SIZE - offset; + unsigned int length = min_t(u64, remain, PAGE_SIZE - offset); ret = shmem_pread(page, offset, length, user_data, page_to_phys(page) & obj_do_bit17_swizzling, @@ -1570,11 +2413,7 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, offset = offset_in_page(args->offset); for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { struct page *page = i915_gem_object_get_page(obj, idx); - int length; - - length = remain; - if (offset + length > PAGE_SIZE) - length = PAGE_SIZE - offset; + unsigned int length = min_t(u64, remain, PAGE_SIZE - offset); ret = shmem_pwrite(page, offset, length, user_data, page_to_phys(page) & obj_do_bit17_swizzling, @@ -1856,6 +2695,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_mmap *args = data; struct drm_i915_gem_object *obj; unsigned long addr; +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + int ret; +#endif if (args->flags & ~(I915_MMAP_WC)) return -EINVAL; @@ -1901,6 +2743,12 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, if (IS_ERR((void *)addr)) return addr; +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + ret = i915_obj_insert_virt_addr(obj, addr, false, false); + if (ret) + return ret; +#endif + args->addr_ptr = (uint64_t) addr; return 0; @@ -2103,6 +2951,9 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT, min_t(u64, vma->size, area->vm_end - area->vm_start), &ggtt->iomap); +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + ret = i915_obj_insert_virt_addr(obj, (unsigned long)area->vm_start, true, true); +#endif if (ret) goto err_fence; @@ -2360,6 +3211,19 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj) shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); obj->mm.madv = __I915_MADV_PURGED; obj->mm.pages = ERR_PTR(-EFAULT); + +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + /* + * Mark the object as not having backing pages, as physical space + * returned back to kernel + */ + if (obj->has_backing_pages == 1) { + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + + dev_priv->mm.phys_mem_total -= obj->base.size; + obj->has_backing_pages = 0; + } +#endif } /* Try to discard unwanted pages */ @@ -2655,6 +3519,14 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_do_bit_17_swizzle(obj, st); +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + if (obj->has_backing_pages == 0) { + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + + dev_priv->mm.phys_mem_total += obj->base.size; + obj->has_backing_pages = 1; + } +#endif __i915_gem_object_set_pages(obj, st, sg_page_sizes); return 0; @@ -3822,7 +4694,17 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, return err; i915_retire_requests(i915); - GEM_BUG_ON(i915->gt.active_requests); + + /* + * temporarily disable the assert before i915 upstream fix + * we are pretty sure i915 is working fine and GEM_BUG_ON + * is empty if CONFIG_DRM_I915_DEBUG_GEM=n (default) + */ + + /* + * GEM_BUG_ON(i915->gt.active_requests); + */ + } else { struct intel_engine_cs *engine; enum intel_engine_id id; @@ -4699,6 +5581,15 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); mutex_init(&obj->mm.get_page.lock); +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + /* + * Mark the object as not having backing pages, as no allocation + * for it yet + */ + obj->has_backing_pages = 0; + INIT_LIST_HEAD(&obj->pid_info); +#endif + i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size); } @@ -4832,6 +5723,17 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj) return atomic_long_read(&obj->base.filp->f_count) == 1; } +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) +int +i915_gem_open_object(struct drm_gem_object *gem_obj, + struct drm_file *file_priv) +{ + struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); + + return i915_gem_obj_insert_pid(obj); +} +#endif + static void __i915_gem_free_objects(struct drm_i915_private *i915, struct llist_node *freed) { @@ -4885,6 +5787,16 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, if (obj->base.import_attach) drm_prime_gem_destroy(&obj->base, NULL); +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + if (!obj->stolen && (obj->has_backing_pages == 1)) { + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + + dev_priv->mm.phys_mem_total -= obj->base.size; + obj->has_backing_pages = 0; + } + i915_gem_obj_remove_all_pids(obj); +#endif + reservation_object_fini(&obj->__builtin_resv); drm_gem_object_release(&obj->base); i915_gem_info_remove_obj(i915, obj->base.size); @@ -5027,7 +5939,8 @@ void i915_gem_sanitize(struct drm_i915_private *i915) * of the reset, so this could be applied to even earlier gen. */ err = -ENODEV; - if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915)) + if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915) && + !intel_vgpu_active(i915)) err = WARN_ON(intel_gpu_reset(i915, ALL_ENGINES)); if (!err) intel_engines_sanitize(i915); @@ -5254,6 +6167,28 @@ static int __i915_gem_restart_engines(void *data) return 0; } +int i915_gem_init_hw_late(struct drm_i915_private *dev_priv) +{ + int ret; + + /* + * Place for things that can be delayed until the first context + * is open. For example, fw loading in android. + */ + + /* fetch firmware */ + intel_uc_init_misc(dev_priv); + + /* Load fw. We can't enable contexts until all firmware is loaded */ + ret = intel_uc_init_hw(dev_priv); + if (ret) { + DRM_ERROR("Late init: enabling uc failed (%d)\n", ret); + return ret; + } + + return 0; +} + int i915_gem_init_hw(struct drm_i915_private *dev_priv) { int ret; @@ -5312,11 +6247,17 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) goto out; } - /* We can't enable contexts until all firmware is loaded */ - ret = intel_uc_init_hw(dev_priv); - if (ret) { - DRM_ERROR("Enabling uc failed (%d)\n", ret); - goto out; + /* + * Don't call i915_gem_init_hw_late() the very first time (during + * driver load); it will get called during first open instead. + * It should only be called on subsequent (re-initialization) passes. + */ + if (dev_priv->contexts_ready) { + ret = i915_gem_init_hw_late(dev_priv); + if (ret) + goto out; + } else { + DRM_DEBUG_DRIVER("Deferring late initialization\n"); } intel_mocs_init_l3cc_table(dev_priv); @@ -5461,7 +6402,8 @@ int i915_gem_init(struct drm_i915_private *dev_priv) int ret; /* We need to fallback to 4K pages if host doesn't support huge gtt. */ - if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv)) + if ((intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv)) + || PVMMIO_LEVEL(dev_priv, PVMMIO_PPGTT_UPDATE)) mkwrite_device_info(dev_priv)->page_sizes = I915_GTT_PAGE_SIZE_4K; @@ -5479,9 +6421,13 @@ int i915_gem_init(struct drm_i915_private *dev_priv) if (ret) return ret; - ret = intel_uc_init_misc(dev_priv); - if (ret) - return ret; + /* + * ANDROID: fetch fw during drm_open instead + * due to filesystem is not up yet during driver init + * ret = intel_uc_init_misc(dev_priv); + * if (ret) + * return ret; + */ ret = intel_wopcm_init(&dev_priv->wopcm); if (ret) @@ -5837,6 +6783,11 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file) struct drm_i915_file_private *file_priv = file->driver_priv; struct i915_request *request; +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + i915_gem_remove_sysfs_file_entry(dev, file); + put_pid(file_priv->tgid); +#endif + /* Clean up our request list when the client is going away, so that * later retire_requests won't dereference our soon-to-be-gone * file_priv. @@ -5862,15 +6813,57 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file) file_priv->dev_priv = i915; file_priv->file = file; +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + rcu_read_lock(); + file_priv->tgid = get_pid(find_vpid(task_tgid_nr(current))); + rcu_read_unlock(); + + file_priv->process_name = kzalloc(PAGE_SIZE, GFP_ATOMIC); + if (!file_priv->process_name) { + ret = -ENOMEM; + goto out_free_file; + } + + ret = i915_get_pid_cmdline(current, file_priv->process_name); + if (ret) + goto out_free_name; +#endif + spin_lock_init(&file_priv->mm.lock); INIT_LIST_HEAD(&file_priv->mm.request_list); file_priv->bsd_engine = -1; file_priv->hang_timestamp = jiffies; +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + intel_runtime_pm_get(i915); +#endif + ret = i915_gem_context_open(i915, file); +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + if (ret) { + intel_runtime_pm_put(i915); + goto out_free_name; + } + intel_runtime_pm_put(i915); + + ret = i915_gem_create_sysfs_file_entry(&i915->drm, file); + if (ret) { + i915_gem_context_close(file); + goto out_free_name; + } + + return 0; + +out_free_name: + kfree(file_priv->process_name); +out_free_file: + put_pid(file_priv->tgid); + kfree(file_priv); +#else if (ret) kfree(file_priv); +#endif return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index b10770cfccd24..08c614659e1d8 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -91,6 +91,7 @@ #include "i915_drv.h" #include "i915_trace.h" #include "intel_workarounds.h" +#include "i915_vgpu.h" #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 @@ -136,7 +137,12 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) list_del(&ctx->link); - ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id); + if (intel_vgpu_active(ctx->i915)) + ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id & + ~(0x7 << SIZE_CONTEXT_HW_ID_GVT)); + else + ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id); + kfree_rcu(ctx, rcu); } @@ -217,6 +223,8 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out) */ if (USES_GUC_SUBMISSION(dev_priv)) max = MAX_GUC_CONTEXT_HW_ID; + else if (intel_vgpu_active(dev_priv) || intel_gvt_active(dev_priv)) + max = MAX_CONTEXT_HW_ID_GVT; else max = MAX_CONTEXT_HW_ID; } @@ -236,6 +244,12 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out) return ret; } + if (intel_vgpu_active(dev_priv)) { + /* add vgpu_id to context hw_id */ + ret = ret | (I915_READ(vgtif_reg(vgt_id)) + << SIZE_CONTEXT_HW_ID_GVT); + } + *out = ret; return 0; } @@ -499,7 +513,12 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv) * For easy recognisablity, we want the kernel context to be 0 and then * all user contexts will have non-zero hw_id. */ - GEM_BUG_ON(ctx->hw_id); + if (intel_vgpu_active(dev_priv)){ + /* remove vgpu_id from context hw_id */ + GEM_BUG_ON(ctx->hw_id & ~(0x7 << SIZE_CONTEXT_HW_ID_GVT)); + } else { + GEM_BUG_ON(ctx->hw_id); + } dev_priv->kernel_context = ctx; /* highest priority; preempting task */ @@ -548,23 +567,55 @@ static int context_idr_cleanup(int id, void *p, void *data) return 0; } +int i915_gem_context_first_open(struct drm_i915_private *dev_priv) +{ + int ret; + + lockdep_assert_held(&dev_priv->drm.struct_mutex); + + DRM_DEBUG_DRIVER("Late initialization starting\n"); + + intel_runtime_pm_get(dev_priv); + intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + + ret = i915_gem_init_hw_late(dev_priv); + if (ret == 0) + dev_priv->contexts_ready = true; + else + DRM_ERROR("Late initialization failed: %d\n", ret); + + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_runtime_pm_put(dev_priv); + + return ret; +} + int i915_gem_context_open(struct drm_i915_private *i915, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; struct i915_gem_context *ctx; + int ret = 0; idr_init(&file_priv->context_idr); mutex_lock(&i915->drm.struct_mutex); - ctx = i915_gem_create_context(i915, file_priv); - mutex_unlock(&i915->drm.struct_mutex); - if (IS_ERR(ctx)) { - idr_destroy(&file_priv->context_idr); - return PTR_ERR(ctx); + + if (!(i915->contexts_ready)) + ret = i915_gem_context_first_open(i915); + + if (ret == 0) { + ctx = i915_gem_create_context(i915, file_priv); + if (IS_ERR(ctx)) + ret = PTR_ERR(ctx); + + GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); } - GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); + mutex_unlock(&i915->drm.struct_mutex); + + if (ret) + idr_destroy(&file_priv->context_idr); return 0; } @@ -818,6 +869,15 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, case I915_CONTEXT_PARAM_PRIORITY: args->value = ctx->sched.priority; break; + case I915_CONTEXT_PARAM_PREEMPT_TIMEOUT: + if (!(to_i915(dev)->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) + ret = -ENODEV; + else if (args->size) + ret = -EINVAL; + else + args->value = ctx->preempt_timeout; + break; + default: ret = -EINVAL; break; @@ -893,6 +953,19 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, } break; + case I915_CONTEXT_PARAM_PREEMPT_TIMEOUT: + if (args->size) + ret = -EINVAL; + else if (args->value > U32_MAX) + ret = -EINVAL; + else if (!(to_i915(dev)->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) + ret = -ENODEV; + else if (args->value && !capable(CAP_SYS_ADMIN)) + ret = -EPERM; + else + ctx->preempt_timeout = args->value; + break; + default: ret = -EINVAL; break; diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h index b116e4942c10d..ded814456de96 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.h +++ b/drivers/gpu/drm/i915/i915_gem_context.h @@ -147,6 +147,19 @@ struct i915_gem_context { struct i915_sched_attr sched; + /** + * @preempt_timeout: QoS guarantee for the high priority context + * + * Some clients need a guarantee that they will start executing + * within a certain window, even at the expense of others. This entails + * that if a preemption request is not honoured by the active context + * within the timeout, we will reset the GPU to evict the hog and + * run the high priority context instead. + * + * Timeout is stored in nanoseconds. + */ + u32 preempt_timeout; + /** ggtt_offset_bias: placement restriction for context objects */ u32 ggtt_offset_bias; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 3f0c612d42e78..679bbae529453 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -458,7 +458,7 @@ eb_validate_vma(struct i915_execbuffer *eb, * any non-page-aligned or non-canonical addresses. */ if (unlikely(entry->flags & EXEC_OBJECT_PINNED && - entry->offset != gen8_canonical_addr(entry->offset & PAGE_MASK))) + entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK))) return -EINVAL; /* pad_to_size was once a reserved field, so sanitize it */ diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index d548ac05ccd7a..317e376cc2da6 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -63,6 +63,7 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence, i915_reg_t fence_reg_lo, fence_reg_hi; int fence_pitch_shift; u64 val; + struct drm_i915_private *dev_priv = fence->i915; if (INTEL_GEN(fence->i915) >= 6) { fence_reg_lo = FENCE_REG_GEN6_LO(fence->id); @@ -92,9 +93,17 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence, val |= I965_FENCE_REG_VALID; } - if (!pipelined) { - struct drm_i915_private *dev_priv = fence->i915; - + if (intel_vgpu_active(dev_priv)) { + /* Use the 64-bit RW to write fence reg on VGPU mode. + * The GVT-g can trap the written val of VGPU to program the + * fence reg. And the fence write in gvt-g follows the + * sequence of off/read/double-write/read. This assures that + * the fence reg is configured as expected. + * At the same time the 64-bit op can help to reduce the num + * of VGPU trap for the fence reg. + */ + I915_WRITE64_FW(fence_reg_lo, val); + } else if (!pipelined) { /* To w/a incoherency with non-atomic 64-bit register updates, * we split the 64-bit update into two 32-bit writes. In order * for a partial fence not to be evaluated between writes, we diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index f00c7fbef79ef..627921fa8ee98 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -998,6 +998,8 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm, struct i915_pml4 *pml4 = &ppgtt->pml4; struct i915_page_directory_pointer *pdp; unsigned int pml4e; + u64 orig_start = start; + u64 orig_length = length; GEM_BUG_ON(!use_4lvl(vm)); @@ -1011,6 +1013,17 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm, free_pdp(vm, pdp); } + + if (PVMMIO_LEVEL(vm->i915, PVMMIO_PPGTT_UPDATE)) { + struct drm_i915_private *dev_priv = vm->i915; + struct pv_ppgtt_update *pv_ppgtt = + &dev_priv->shared_page->pv_ppgtt; + + writeq(px_dma(pml4), &pv_ppgtt->pdp); + writeq(orig_start, &pv_ppgtt->start); + writeq(orig_length, &pv_ppgtt->length); + I915_WRITE(vgtif_reg(g2v_notify), VGT_G2V_PPGTT_L4_CLEAR); + } } static inline struct sgt_dma { @@ -1058,7 +1071,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt, do { vaddr[idx->pte] = pte_encode | iter->dma; - iter->dma += PAGE_SIZE; + iter->dma += I915_GTT_PAGE_SIZE; if (iter->dma >= iter->max) { iter->sg = __sg_next(iter->sg); if (!iter->sg) { @@ -1250,6 +1263,18 @@ static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm, flags)) GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4); + if (PVMMIO_LEVEL(vm->i915, PVMMIO_PPGTT_UPDATE)) { + struct drm_i915_private *dev_priv = vm->i915; + struct pv_ppgtt_update *pv_ppgtt = + &dev_priv->shared_page->pv_ppgtt; + + writeq(px_dma(&ppgtt->pml4), &pv_ppgtt->pdp); + writeq(vma->node.start, &pv_ppgtt->start); + writeq(vma->node.size, &pv_ppgtt->length); + writel(cache_level, &pv_ppgtt->cache_level); + I915_WRITE(vgtif_reg(g2v_notify), VGT_G2V_PPGTT_L4_INSERT); + } + vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; } } @@ -1498,6 +1523,8 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, u64 from = start; u32 pml4e; int ret; + u64 orig_start = start; + u64 orig_length = length; gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { if (pml4->pdps[pml4e] == vm->scratch_pdp) { @@ -1514,6 +1541,17 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, goto unwind_pdp; } + if (PVMMIO_LEVEL(vm->i915, PVMMIO_PPGTT_UPDATE)) { + struct drm_i915_private *dev_priv = vm->i915; + struct pv_ppgtt_update *pv_ppgtt = + &dev_priv->shared_page->pv_ppgtt; + + writeq(px_dma(pml4), &pv_ppgtt->pdp); + writeq(orig_start, &pv_ppgtt->start); + writeq(orig_length, &pv_ppgtt->length); + I915_WRITE(vgtif_reg(g2v_notify), VGT_G2V_PPGTT_L4_ALLOC); + } + return 0; unwind_pdp: @@ -1768,9 +1806,9 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m) if (i == 4) continue; - seq_printf(m, "\t\t(%03d, %04d) %08lx: ", + seq_printf(m, "\t\t(%03d, %04d) %08llx: ", pde, pte, - (pde * GEN6_PTES + pte) * PAGE_SIZE); + (pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE); for (i = 0; i < 4; i++) { if (vaddr[pte + i] != scratch_pte) seq_printf(m, " %08x", vaddr[pte + i]); @@ -1910,7 +1948,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, do { vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma); - iter.dma += PAGE_SIZE; + iter.dma += I915_GTT_PAGE_SIZE; if (iter.dma == iter.max) { iter.sg = __sg_next(iter.sg); if (!iter.sg) @@ -2048,7 +2086,7 @@ static int pd_vma_bind(struct i915_vma *vma, { struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm); struct gen6_hw_ppgtt *ppgtt = vma->private; - u32 ggtt_offset = i915_ggtt_offset(vma) / PAGE_SIZE; + u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE; struct i915_page_table *pt; unsigned int pde; @@ -2128,6 +2166,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size) int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) { struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); + int err; /* * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt @@ -2143,9 +2182,17 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) * allocator works in address space sizes, so it's multiplied by page * size. We allocate at the top of the GTT to avoid fragmentation. */ - return i915_vma_pin(ppgtt->vma, - 0, GEN6_PD_ALIGN, - PIN_GLOBAL | PIN_HIGH); + err = i915_vma_pin(ppgtt->vma, + 0, GEN6_PD_ALIGN, + PIN_GLOBAL | PIN_HIGH); + if (err) + goto unpin; + + return 0; + +unpin: + ppgtt->pin_count = 0; + return err; } void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base) @@ -2174,7 +2221,7 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) ppgtt->base.vm.i915 = i915; ppgtt->base.vm.dma = &i915->drm.pdev->dev; - ppgtt->base.vm.total = I915_PDES * GEN6_PTES * PAGE_SIZE; + ppgtt->base.vm.total = I915_PDES * GEN6_PTES * I915_GTT_PAGE_SIZE; i915_address_space_init(&ppgtt->base.vm, i915); @@ -2465,6 +2512,17 @@ static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) writeq(pte, addr); } +static void vgpu_ggtt_insert(struct drm_i915_private *dev_priv, + u64 start, int num_entries, enum i915_cache_level level) +{ + struct gvt_shared_page *shared_page = dev_priv->shared_page; + + writeq(start, &shared_page->pv_ggtt.start); + writeq(num_entries, &shared_page->pv_ggtt.length); + writel(level, &shared_page->pv_ggtt.cache_level); + I915_WRITE(vgtif_reg(g2v_notify), VGT_G2V_GGTT_INSERT); +} + static void gen8_ggtt_insert_page(struct i915_address_space *vm, dma_addr_t addr, u64 offset, @@ -2477,6 +2535,11 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm, gen8_set_pte(pte, gen8_pte_encode(addr, level, 0)); + if (PVMMIO_LEVEL(vm->i915, PVMMIO_GGTT_UPDATE)) { + vgpu_ggtt_insert(vm->i915, offset, 1, level); + return; + } + ggtt->invalidate(vm->i915); } @@ -2501,6 +2564,20 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, for_each_sgt_dma(addr, sgt_iter, vma->pages) gen8_set_pte(gtt_entries++, pte_encode | addr); + if (PVMMIO_LEVEL(vm->i915, PVMMIO_GGTT_UPDATE)) { + int num_entries = gtt_entries - + ((gen8_pte_t __iomem *)ggtt->gsm + + (vma->node.start >> PAGE_SHIFT)); + /* + * Sometimes number of entries does not match vma node size. + * Pass number of pte entries instead. + */ + vgpu_ggtt_insert(vm->i915, vma->node.start, + num_entries, level); + return; + } + + /* * We want to flush the TLBs only after we're certain all the PTE * updates have finished. @@ -2574,6 +2651,16 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, for (i = 0; i < num_entries; i++) gen8_set_pte(>t_base[i], scratch_pte); + + if (PVMMIO_LEVEL(vm->i915, PVMMIO_GGTT_UPDATE)) { + struct drm_i915_private *dev_priv = vm->i915; + struct gvt_shared_page *shared_page = dev_priv->shared_page; + + writeq(start, &shared_page->pv_ggtt.start); + writeq(length, &shared_page->pv_ggtt.length); + I915_WRITE(vgtif_reg(g2v_notify), VGT_G2V_GGTT_CLEAR); + } + } static void bxt_vtd_ggtt_wa(struct i915_address_space *vm) @@ -2949,16 +3036,19 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) if (ret) return ret; - /* Clear any non-preallocated blocks */ - drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { - DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", + if (!intel_vgpu_active(dev_priv)) { + /* Clear any non-preallocated blocks */ + drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { + DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", hole_start, hole_end); - ggtt->vm.clear_range(&ggtt->vm, hole_start, + ggtt->vm.clear_range(&ggtt->vm, hole_start, hole_end - hole_start); - } + } - /* And finally clear the reserved guard page */ - ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); + /* And finally clear the reserved guard page */ + ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); + + } if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) { ret = i915_gem_init_aliasing_ppgtt(dev_priv); @@ -3031,7 +3121,7 @@ static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) bdw_gmch_ctl = 1 << bdw_gmch_ctl; #ifdef CONFIG_X86_32 - /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */ + /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */ if (bdw_gmch_ctl > 4) bdw_gmch_ctl = 4; #endif @@ -3729,9 +3819,9 @@ rotate_pages(const dma_addr_t *in, unsigned int offset, * the entries so the sg list can be happily traversed. * The only thing we need are DMA addresses. */ - sg_set_page(sg, NULL, PAGE_SIZE, 0); + sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0); sg_dma_address(sg) = in[offset + src_idx]; - sg_dma_len(sg) = PAGE_SIZE; + sg_dma_len(sg) = I915_GTT_PAGE_SIZE; sg = sg_next(sg); src_idx -= stride; } @@ -3744,7 +3834,7 @@ static noinline struct sg_table * intel_rotate_pages(struct intel_rotation_info *rot_info, struct drm_i915_gem_object *obj) { - const unsigned long n_pages = obj->base.size / PAGE_SIZE; + const unsigned long n_pages = obj->base.size / I915_GTT_PAGE_SIZE; unsigned int size = intel_rotation_info_size(rot_info); struct sgt_iter sgt_iter; dma_addr_t dma_addr; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 2a116a91420bc..680e0dc5db4bb 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -42,13 +42,15 @@ #include "i915_selftest.h" #include "i915_timeline.h" -#define I915_GTT_PAGE_SIZE_4K BIT(12) -#define I915_GTT_PAGE_SIZE_64K BIT(16) -#define I915_GTT_PAGE_SIZE_2M BIT(21) +#define I915_GTT_PAGE_SIZE_4K BIT_ULL(12) +#define I915_GTT_PAGE_SIZE_64K BIT_ULL(16) +#define I915_GTT_PAGE_SIZE_2M BIT_ULL(21) #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M +#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE + #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE #define I915_FENCE_REG_NONE -1 @@ -662,20 +664,20 @@ int i915_gem_gtt_insert(struct i915_address_space *vm, u64 start, u64 end, unsigned int flags); /* Flags used by pin/bind&friends. */ -#define PIN_NONBLOCK BIT(0) -#define PIN_MAPPABLE BIT(1) -#define PIN_ZONE_4G BIT(2) -#define PIN_NONFAULT BIT(3) -#define PIN_NOEVICT BIT(4) - -#define PIN_MBZ BIT(5) /* I915_VMA_PIN_OVERFLOW */ -#define PIN_GLOBAL BIT(6) /* I915_VMA_GLOBAL_BIND */ -#define PIN_USER BIT(7) /* I915_VMA_LOCAL_BIND */ -#define PIN_UPDATE BIT(8) - -#define PIN_HIGH BIT(9) -#define PIN_OFFSET_BIAS BIT(10) -#define PIN_OFFSET_FIXED BIT(11) +#define PIN_NONBLOCK BIT_ULL(0) +#define PIN_MAPPABLE BIT_ULL(1) +#define PIN_ZONE_4G BIT_ULL(2) +#define PIN_NONFAULT BIT_ULL(3) +#define PIN_NOEVICT BIT_ULL(4) + +#define PIN_MBZ BIT_ULL(5) /* I915_VMA_PIN_OVERFLOW */ +#define PIN_GLOBAL BIT_ULL(6) /* I915_VMA_GLOBAL_BIND */ +#define PIN_USER BIT_ULL(7) /* I915_VMA_LOCAL_BIND */ +#define PIN_UPDATE BIT_ULL(8) + +#define PIN_HIGH BIT_ULL(9) +#define PIN_OFFSET_BIAS BIT_ULL(10) +#define PIN_OFFSET_FIXED BIT_ULL(11) #define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE) #endif diff --git a/drivers/gpu/drm/i915/i915_gem_gvtbuffer.c b/drivers/gpu/drm/i915/i915_gem_gvtbuffer.c new file mode 100644 index 0000000000000..c34599674867f --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_gvtbuffer.c @@ -0,0 +1,307 @@ +/* + * Copyright © 2012 - 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include "i915_drv.h" +#include "i915_trace.h" +#include "intel_drv.h" +#include + +#include "gvt/gvt.h" +#include "gvt/fb_decoder.h" + +static int +i915_gem_gvtbuffer_get_pages(struct drm_i915_gem_object *obj) +{ + BUG(); + return -EINVAL; +} + +static void i915_gem_gvtbuffer_put_pages(struct drm_i915_gem_object *obj, + struct sg_table *pages) +{ + /* like stolen memory, this should only be called during free + * after clearing pin count. + */ + sg_free_table(pages); + kfree(pages); +} + +static void +i915_gem_gvtbuffer_release(struct drm_i915_gem_object *obj) +{ + i915_gem_object_unpin_pages(obj); +} + +static const struct drm_i915_gem_object_ops i915_gem_gvtbuffer_ops = { + .get_pages = i915_gem_gvtbuffer_get_pages, + .put_pages = i915_gem_gvtbuffer_put_pages, + .release = i915_gem_gvtbuffer_release, +}; + +#define GEN8_DECODE_PTE(pte) \ + ((dma_addr_t)(((((u64)pte) >> 12) & 0x7ffffffULL) << 12)) + +#define GEN7_DECODE_PTE(pte) \ + ((dma_addr_t)(((((u64)pte) & 0x7f0) << 28) | (u64)(pte & 0xfffff000))) + +#define PLANE_CTL_TILED_SHIFT 10 + +static struct sg_table * +i915_create_sg_pages_for_gvtbuffer(struct drm_device *dev, + u32 start, u32 num_pages) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct sg_table *st; + struct scatterlist *sg; + int i; + + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (st == NULL) + return NULL; + + if (sg_alloc_table(st, num_pages, GFP_KERNEL)) { + kfree(st); + return NULL; + } + + if (INTEL_INFO(dev_priv)->gen >= 8) { + gen8_pte_t __iomem *gtt_entries = + (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + + (start >> PAGE_SHIFT); + for_each_sg(st->sgl, sg, num_pages, i) { + sg->offset = 0; + sg->length = PAGE_SIZE; + sg_dma_address(sg) = + GEN8_DECODE_PTE(readq(>t_entries[i])); + sg_dma_len(sg) = PAGE_SIZE; + } + } else { + gen6_pte_t __iomem *gtt_entries = + (gen6_pte_t __iomem *)dev_priv->ggtt.gsm + + (start >> PAGE_SHIFT); + for_each_sg(st->sgl, sg, num_pages, i) { + sg->offset = 0; + sg->length = PAGE_SIZE; + sg_dma_address(sg) = + GEN7_DECODE_PTE(readq(>t_entries[i])); + sg_dma_len(sg) = PAGE_SIZE; + } + } + + return st; +} + +struct drm_i915_gem_object * +i915_gem_object_create_gvtbuffer(struct drm_device *dev, + u32 start, u32 num_pages) +{ + struct drm_i915_gem_object *obj; + struct drm_i915_private *i915 = to_i915(dev); + + obj = i915_gem_object_alloc(to_i915(dev)); + if (obj == NULL) + return NULL; + + drm_gem_private_object_init(dev, &obj->base, num_pages << PAGE_SHIFT); + i915_gem_object_init(obj, &i915_gem_gvtbuffer_ops); + + obj->mm.pages = i915_create_sg_pages_for_gvtbuffer(dev, start, num_pages); + if (obj->mm.pages == NULL) { + i915_gem_object_free(obj); + return NULL; + } + + if (i915_gem_object_pin_pages(obj)) + printk(KERN_ERR "%s:%d> Pin pages failed!\n", __func__, __LINE__); + obj->cache_level = I915_CACHE_L3_LLC; + + DRM_DEBUG_DRIVER("GVT_GEM: backing store base = 0x%x pages = 0x%x\n", + start, num_pages); + + spin_lock(&i915->mm.obj_lock); + list_add(&obj->mm.link, &i915->mm.unbound_list); + spin_unlock(&i915->mm.obj_lock); + + return obj; +} + +static int gvt_decode_information(struct drm_device *dev, + struct drm_i915_gem_gvtbuffer *args) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_gvt *gvt = dev_priv->gvt; + struct intel_vgpu_primary_plane_format p; + struct intel_vgpu_cursor_plane_format c; + struct intel_vgpu *vgpu = NULL; + int ret; + int i; + + if (!intel_gvt_active(dev_priv)) + return -EINVAL; + + mutex_lock(&gvt->lock); + for_each_active_vgpu(gvt, vgpu, i) + if (vgpu->id == args->id) + break; + + if (!vgpu) { + gvt_err("Invalid vgpu ID (%d)\n", args->id); + mutex_unlock(&gvt->lock); + return -ENODEV; + } + mutex_unlock(&gvt->lock); + + if ((args->plane_id) == I915_GVT_PLANE_PRIMARY) { + ret = intel_vgpu_decode_primary_plane(vgpu, &p); + if (ret) + return ret; + + args->enabled = p.enabled; + args->x_offset = p.x_offset; + args->y_offset = p.y_offset; + args->start = p.base; + args->width = p.width; + args->height = p.height; + args->stride = p.stride; + args->bpp = p.bpp; + args->hw_format = p.hw_format; + args->drm_format = p.drm_format; + args->tiled = p.tiled >> PLANE_CTL_TILED_SHIFT; + } else if ((args->plane_id) == I915_GVT_PLANE_CURSOR) { + ret = intel_vgpu_decode_cursor_plane(vgpu, &c); + if (ret) + return ret; + + args->enabled = c.enabled; + args->x_offset = c.x_hot; + args->y_offset = c.y_hot; + args->x_pos = c.x_pos; + args->y_pos = c.y_pos; + args->start = c.base; + args->width = c.width; + args->height = c.height; + args->stride = c.width * (c.bpp / 8); + args->bpp = c.bpp; + args->tiled = 0; + } else { + DRM_DEBUG_DRIVER("GVT_GEM: Invalid plaine_id: %d\n", + args->plane_id); + return -EINVAL; + } + + args->size = ALIGN(args->stride * args->height, PAGE_SIZE) >> PAGE_SHIFT; + + if (args->start & (PAGE_SIZE - 1)) { + DRM_DEBUG_DRIVER("GVT_GEM: Not aligned fb start address: " + "0x%x\n", args->start); + return -EINVAL; + } + + if (((args->start >> PAGE_SHIFT) + args->size) > + ggtt_total_entries(&dev_priv->ggtt)) { + DRM_DEBUG_DRIVER("GVT: Invalid GTT offset or size\n"); + return -EINVAL; + } + return 0; +} + +/** + * Creates a new mm object that wraps some user memory. + */ +int +i915_gem_gvtbuffer_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_gem_gvtbuffer *args = data; + struct drm_i915_gem_object *obj; + u32 handle; + int ret = 0; + + if (INTEL_INFO(dev_priv)->gen < 7) + return -EPERM; + + if (args->flags & I915_GVTBUFFER_CHECK_CAPABILITY) + return 0; +#if 0 + if (!gvt_check_host()) + return -EPERM; +#endif + /* if args->start != 0 do not decode, but use it as ggtt offset*/ + if (args->start == 0) { + ret = gvt_decode_information(dev, args); + if (ret) + return ret; + } + + if (ret) + return ret; + + if (args->flags & I915_GVTBUFFER_QUERY_ONLY) + return 0; + + obj = i915_gem_object_create_gvtbuffer(dev, args->start, args->size); + if (!obj) { + DRM_DEBUG_DRIVER("GVT_GEM: Failed to create gem object" + " for VM FB!\n"); + return -EINVAL; + } + + if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) || + IS_KABYLAKE(dev_priv)) { + unsigned int tiling_mode = I915_TILING_NONE; + unsigned int stride = 0; + + switch (args->tiled << 10) { + case PLANE_CTL_TILED_LINEAR: + /* Default valid value */ + break; + case PLANE_CTL_TILED_X: + tiling_mode = I915_TILING_X; + stride = args->stride; + break; + case PLANE_CTL_TILED_Y: + tiling_mode = I915_TILING_Y; + stride = args->stride; + break; + default: + DRM_ERROR("gvt: tiling mode %d not supported\n", args->tiled); + } + obj->tiling_and_stride = tiling_mode | stride; + } else { + obj->tiling_and_stride = (args->tiled ? I915_TILING_X : I915_TILING_NONE) | + (args->tiled ? args->stride : 0); + } + + ret = drm_gem_handle_create(file, &obj->base, &handle); + + /* drop reference from allocate - handle holds it now */ + i915_gem_object_put(obj); + + if (ret) + return ret; + + args->handle = handle; + return 0; +} diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h index 83e5e01fa9eaa..338709b6640e9 100644 --- a/drivers/gpu/drm/i915/i915_gem_object.h +++ b/drivers/gpu/drm/i915/i915_gem_object.h @@ -147,6 +147,10 @@ struct drm_i915_gem_object { #define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1) unsigned int cache_dirty:1; +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + unsigned int has_backing_pages:1; +#endif + /** * @read_domains: Read memory domains. * @@ -278,6 +282,10 @@ struct drm_i915_gem_object { void *gvt_info; }; +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + struct list_head pid_info; +#endif + /** for phys allocated objects */ struct drm_dma_handle *phys_handle; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index a262a64f56256..a803449498f8a 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -91,6 +91,13 @@ static bool __i915_error_ok(struct drm_i915_error_state_buf *e) return true; } +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) +bool i915_error_ok(struct drm_i915_error_state_buf *e) +{ + return __i915_error_ok(e); +} +#endif + static bool __i915_error_seek(struct drm_i915_error_state_buf *e, unsigned len) { @@ -162,7 +169,7 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e, __i915_error_advance(e, len); } -static void i915_error_puts(struct drm_i915_error_state_buf *e, +void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str) { unsigned len; @@ -871,6 +878,22 @@ int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf, return 0; } +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) +int i915_obj_state_buf_init(struct drm_i915_error_state_buf *ebuf, + size_t count) +{ + memset(ebuf, 0, sizeof(*ebuf)); + + ebuf->buf = kmalloc(count, GFP_KERNEL); + + if (ebuf->buf == NULL) + return -ENOMEM; + + ebuf->size = count; + return 0; +} +#endif + static void i915_error_object_free(struct drm_i915_error_object *obj) { int page; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index 8710fb18ed746..821bed7bd375a 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -307,6 +307,13 @@ struct drm_i915_error_state_buf { }; #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) +void i915_error_puts(struct drm_i915_error_state_buf *e, + const char *str); +bool i915_error_ok(struct drm_i915_error_state_buf *e); +int i915_obj_state_buf_init(struct drm_i915_error_state_buf *eb, + size_t count); +#endif __printf(2, 3) void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 29877969310da..5e0e9f189418a 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -37,6 +37,10 @@ #include "i915_trace.h" #include "intel_drv.h" +#if IS_ENABLED(CONFIG_DRM_I915_GVT) +#include "gvt.h" +#endif + /** * DOC: interrupt handling * @@ -221,6 +225,17 @@ static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv, static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); + +#if IS_ENABLED(CONFIG_DRM_I915_GVT) +static inline void gvt_notify_vblank(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + if (dev_priv->gvt) + queue_work(system_highpri_wq, + &dev_priv->gvt->pipe_info[pipe].vblank_work); +} +#endif + /* For display hotplug interrupt */ static inline void i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, @@ -611,11 +626,12 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv, * @enabled_irq_mask: mask of interrupt bits to enable */ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, - enum pipe pipe, + unsigned int crtc_index, uint32_t interrupt_mask, uint32_t enabled_irq_mask) { uint32_t new_val; + enum pipe pipe; lockdep_assert_held(&dev_priv->irq_lock); @@ -624,6 +640,9 @@ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, if (WARN_ON(!intel_irqs_enabled(dev_priv))) return; + if(get_pipe_from_crtc_index(&dev_priv->drm, crtc_index, &pipe)) + return; + new_val = dev_priv->de_irq_mask[pipe]; new_val &= ~interrupt_mask; new_val |= (~enabled_irq_mask & interrupt_mask); @@ -869,9 +888,14 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; } -static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) +static u32 g4x_get_vblank_counter(struct drm_device *dev, + unsigned int crtc_index) { struct drm_i915_private *dev_priv = to_i915(dev); + enum pipe pipe; + + if(get_pipe_from_crtc_index(dev, crtc_index, &pipe)) + return 0; return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); } @@ -987,18 +1011,21 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) return (position + crtc->scanline_offset) % vtotal; } -static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, - bool in_vblank_irq, int *vpos, int *hpos, - ktime_t *stime, ktime_t *etime, - const struct drm_display_mode *mode) +static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int crtc_index, + bool in_vblank_irq, int *vpos, int *hpos, + ktime_t *stime, ktime_t *etime, + const struct drm_display_mode *mode) { struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, - pipe); + struct intel_crtc *intel_crtc; + enum pipe pipe; int position; int vbl_start, vbl_end, hsync_start, htotal, vtotal; unsigned long irqflags; + intel_crtc = get_intel_crtc_from_index(dev, crtc_index); + pipe = intel_crtc->pipe; + if (WARN_ON(!mode->crtc_clock)) { DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " "pipe %c\n", pipe_name(pipe)); @@ -1503,6 +1530,12 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir) tasklet |= USES_GUC_SUBMISSION(engine->i915); } + if ((iir & (GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) && + intel_vgpu_active(engine->i915)) { + queue_work(system_highpri_wq, &engine->reset_work); + return; + } + if (tasklet) tasklet_hi_schedule(&engine->execlists.tasklet); } @@ -2727,6 +2760,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) irqreturn_t ret = IRQ_NONE; u32 iir; enum pipe pipe; + struct intel_crtc *crtc; if (master_ctl & GEN8_DE_MISC_IRQ) { iir = I915_READ(GEN8_DE_MISC_IIR); @@ -2837,8 +2871,13 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) ret = IRQ_HANDLED; I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); - if (iir & GEN8_PIPE_VBLANK) - drm_handle_vblank(&dev_priv->drm, pipe); + crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + if (iir & GEN8_PIPE_VBLANK) { + drm_handle_vblank(&dev_priv->drm, drm_crtc_index(&crtc->base)); +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + gvt_notify_vblank(dev_priv, pipe); +#endif + } if (iir & GEN8_PIPE_CDCLK_CRC_DONE) hsw_pipe_crc_irq_handler(dev_priv, pipe); @@ -3446,7 +3485,9 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); + /*since guest will see all the pipes, we don't want it disable vblank*/ + if (!dev_priv->gvt) + bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } @@ -4138,6 +4179,19 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) if (HAS_L3_DPF(dev_priv)) gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; + if (intel_vgpu_active(dev_priv)) { + gt_interrupts[0] |= GT_RENDER_CS_MASTER_ERROR_INTERRUPT << + GEN8_RCS_IRQ_SHIFT | + GT_RENDER_CS_MASTER_ERROR_INTERRUPT << + GEN8_BCS_IRQ_SHIFT; + gt_interrupts[1] |= GT_RENDER_CS_MASTER_ERROR_INTERRUPT << + GEN8_VCS1_IRQ_SHIFT | + GT_RENDER_CS_MASTER_ERROR_INTERRUPT << + GEN8_VCS2_IRQ_SHIFT; + gt_interrupts[3] |= GT_RENDER_CS_MASTER_ERROR_INTERRUPT << + GEN8_VECS_IRQ_SHIFT; + } + dev_priv->pm_ier = 0x0; dev_priv->pm_imr = ~dev_priv->pm_ier; GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 295e981e4a398..c588edb4a41dc 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -174,6 +174,93 @@ i915_param_named(enable_dpcd_backlight, bool, 0600, i915_param_named(enable_gvt, bool, 0400, "Enable support for Intel GVT-g graphics virtualization host support(default:false)"); +i915_param_named(gvt_workload_priority, int, 0600, + "Set GVT-g workload priority, (range: (-1023, 1023), default: 0, " + "more positive value means higher priority)."); + +i915_param_named(enable_initial_modeset, bool, 0400, + "Do initial modeset (default : false)"); + +i915_param_named(domain_scaler_owner, int, 0400, + "scaler owners for each domain and for each pipe ids can be from 0-F"); + +/* pipeA Scaler = BITS 0-7 pipeB scaler = 8-15, pipeC = 16-19 + * + * +----------+------------+-------------+------------+ + * |unused | Pipe C | Pipe B | Pipe A | + * +----------+------------+-------------+------------+ + * 31 20 19 16 15 8 7 0 + * + * Each nibble represents domain id. 0 for Dom0, 1,2,3...0xF for DomUs + * eg: domains_scaler_owners = 0x00030210 // 0x000|3|02|10 + * scaler domain + * scaler_owner1A -0 + * scaler_owner2A -1 + * scaler_owner1B -2 + * scaler_owner2B -0 + * scaler_owner1C -3 + * scaler_owner2C -0 + * + */ + + +i915_param_named(enable_pvmmio, uint, 0400, + "Enable pv mmio feature and set pvmmio level, default 1." + "This parameter could only set from host, guest value is set through vgt_if"); + +/* pipeA = BITS 0-3, pipeB = BITS 8-11, pipeC = BITS 16-18 + * +----------+-------+---------+--------+--------+--------+--------+ + * |unused |unused | Pipe C | unused | Pipe B | unused | Pipe A | + * +----------+-------+---------+--------+--------+--------+--------+ + * 31 23 18 15 11 7 3 0 + * + * + * BITS 0,1,2,3 - needs to be set planes assigned for pipes A and B + * and BITs 0,1,2 - for pipe C + * eg: avail_planes_per_pipe = 0x3 - pipe A=2(planes 1 and 2) , pipeB=0 and pipeC=0 planes + * eg: avail_planes_per_pipe = 0x5 - pipe A=2(planes 1 and 3) , pipeB=0 and pipeC=0 planes + * avail_planes_per_pipe = 0x030701 - pipe A =1(plane 1, pipeB=3(planes 1,2 and 3), pipeC=2( planes 1 and 2) + * + */ +i915_param_named_unsafe(avail_planes_per_pipe, uint, 0400, + "plane mask for each pipe: \ + set BITS 0-3:pipeA 8-11:pipeB 16-18:pipeC to specify the planes that \ + are available eg: 0x030701 : planes 1:pipeA 1,2,3:pipeB \ + 1,2:pipeC (0x0 - default value)"); + +/* pipeA = BITS 0-15 pipeB = 16-31, pipeC = 32-47 + * + * +----------+------------+-------------+------------+ + * |unused | Pipe C | Pipe B | Pipe A | + * +----------+------------+-------------+------------+ + * 63 47 31 15 0 + * + * Each nibble represents domain id. 0 for Dom0, 1,2,3...0xF for DomUs + * eg: domain_plane_owners = 0x022111000010 // 0x0221|1100|0010 + * plane domain + * plane_owner1A -0 + * plane_owner2A -1 + * plane_owner3A -0 + * plane_owner4A -0 + * plane_owner1B -0 + * plane_owner2B -0 + * plane_owner3B -1 + * plane_owner4B -1 + * plane_owner1C -1 + * plane_owner2C -2 + * plane_owner3C -2 + * + * + */ +i915_param_named_unsafe(domain_plane_owners, ullong, 0400, + "plane owners for each domain and for each pipe \ + ids can be from 0-F, eg: domain_plane_owners = 0x022111000010 \ + planes owner: 3C:2 2C:2 1C:1 4B:1 3B:1 2B:1 1B:0 4A:0 3A:0 2A:1 1A:0 \ + (0x0 - default value)"); + +i915_param_named(enable_conformance_check, bool, 0400, + "To toggle the GVT guest conformance feature(default:true)"); + static __always_inline void _print_param(struct drm_printer *p, const char *name, const char *type, @@ -185,6 +272,8 @@ static __always_inline void _print_param(struct drm_printer *p, drm_printf(p, "i915.%s=%d\n", name, *(const int *)x); else if (!__builtin_strcmp(type, "unsigned int")) drm_printf(p, "i915.%s=%u\n", name, *(const unsigned int *)x); + else if (!__builtin_strcmp(type, "unsigned long long")) + drm_printf(p, "i915.%s=%llu\n", name, *(const unsigned long long *)x); else if (!__builtin_strcmp(type, "char *")) drm_printf(p, "i915.%s=%s\n", name, *(const char **)x); else diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index 6c4d4a21474b5..9df42e8d99c0c 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -55,6 +55,8 @@ struct drm_printer; param(int, edp_vswing, 0) \ param(int, reset, 2) \ param(unsigned int, inject_load_failure, 0) \ + param(unsigned int, avail_planes_per_pipe, 0) \ + param(unsigned long long, domain_plane_owners, 0) \ /* leave bools at the end to not create holes */ \ param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \ param(bool, enable_hangcheck, true) \ @@ -68,7 +70,15 @@ struct drm_printer; param(bool, nuclear_pageflip, false) \ param(bool, enable_dp_mst, true) \ param(bool, enable_dpcd_backlight, false) \ - param(bool, enable_gvt, false) + param(int, domain_scaler_owner, 0x11100) \ + param(unsigned int, enable_pvmmio, \ + PVMMIO_ELSP_SUBMIT | PVMMIO_PLANE_UPDATE \ + | PVMMIO_PLANE_WM_UPDATE | PVMMIO_PPGTT_UPDATE \ + | PVMMIO_GGTT_UPDATE ) \ + param(int, gvt_workload_priority, 0) \ + param(bool, enable_initial_modeset, false) \ + param(bool, enable_gvt, false) \ + param(bool, enable_conformance_check, true) #define MEMBER(T, member, ...) T member; struct i915_params { diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 1df3ce134cd00..fcf5e3f41b014 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -737,6 +737,7 @@ static struct pci_driver i915_pci_driver = { .probe = i915_pci_probe, .remove = i915_pci_remove, .driver.pm = &i915_pm_ops, + .driver.probe_type = PROBE_PREFER_ASYNCHRONOUS }; static int __init i915_init(void) diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h index eeaa3d506d95d..fd26872f15b6e 100644 --- a/drivers/gpu/drm/i915/i915_pvinfo.h +++ b/drivers/gpu/drm/i915/i915_pvinfo.h @@ -28,6 +28,12 @@ #define VGT_PVINFO_PAGE 0x78000 #define VGT_PVINFO_SIZE 0x1000 +/* Scratch reg used for redirecting command access to registers, any + * command access to PVINFO page would be discarded, so it has no HW + * impact. + */ +#define VGT_SCRATCH_REG VGT_PVINFO_PAGE + /* * The following structure pages are defined in GEN MMIO space * for virtualization. (One page for now) @@ -46,9 +52,83 @@ enum vgt_g2v_type { VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY, VGT_G2V_EXECLIST_CONTEXT_CREATE, VGT_G2V_EXECLIST_CONTEXT_DESTROY, + VGT_G2V_PPGTT_L4_ALLOC, + VGT_G2V_PPGTT_L4_CLEAR, + VGT_G2V_PPGTT_L4_INSERT, + VGT_G2V_GGTT_INSERT, + VGT_G2V_GGTT_CLEAR, VGT_G2V_MAX, }; +#define PLANE_COLOR_CTL_BIT (1 << 0) +#define PLANE_KEY_BIT (1 << 1) +#define PLANE_SCALER_BIT (1 << 2) + +struct pv_plane_update { + u32 flags; + u32 plane_color_ctl; + u32 plane_key_val; + u32 plane_key_max; + u32 plane_key_msk; + u32 plane_offset; + u32 plane_stride; + u32 plane_size; + u32 plane_aux_dist; + u32 plane_aux_offset; + u32 ps_ctrl; + u32 ps_pwr_gate; + u32 ps_win_ps; + u32 ps_win_sz; + u32 plane_pos; + u32 plane_ctl; +}; + +struct pv_plane_wm_update { + u32 max_wm_level; + u32 plane_wm_level[8]; + u32 plane_trans_wm_level; + u32 plane_buf_cfg; +}; + +struct pv_ppgtt_update { + u64 pdp; + u64 start; + u64 length; + u32 cache_level; +}; + +struct pv_ggtt_update { + u64 start; + u64 length; + u32 cache_level; +}; + +/* shared page(4KB) between gvt and VM, located at the first page next + * to MMIO region(2MB size normally). + */ +struct gvt_shared_page { + u32 elsp_data[4]; + u32 reg_addr; + struct pv_plane_update pv_plane; + struct pv_plane_wm_update pv_plane_wm; + struct pv_ppgtt_update pv_ppgtt; + struct pv_ggtt_update pv_ggtt; + u32 rsvd2[0x400 - 46]; +}; + +#define VGPU_PVMMIO(vgpu) vgpu_vreg_t(vgpu, vgtif_reg(enable_pvmmio)) + +/* + * define different levels of PVMMIO optimization + */ +enum pvmmio_levels { + PVMMIO_ELSP_SUBMIT = 0x1, + PVMMIO_PLANE_UPDATE = 0x2, + PVMMIO_PLANE_WM_UPDATE = 0x4, + PVMMIO_PPGTT_UPDATE = 0x10, + PVMMIO_GGTT_UPDATE = 0x20, +}; + /* * VGT capabilities type */ @@ -56,6 +136,9 @@ enum vgt_g2v_type { #define VGT_CAPS_HWSP_EMULATION BIT(3) #define VGT_CAPS_HUGE_GTT BIT(4) +#define PVMMIO_LEVEL(dev_priv, level) \ + (intel_vgpu_active(dev_priv) && (i915_modparams.enable_pvmmio & level)) + struct vgt_if { u64 magic; /* VGT_MAGIC */ u16 version_major; @@ -106,8 +189,11 @@ struct vgt_if { u32 execlist_context_descriptor_lo; u32 execlist_context_descriptor_hi; + u32 enable_pvmmio; + u32 pv_mmio; + u32 scaler_owned; - u32 rsv7[0x200 - 24]; /* pad to one page */ + u32 rsv7[0x200 - 27]; /* pad to one page */ } __packed; #define vgtif_reg(x) \ diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 9e63cd47b60f3..0297014630a52 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2097,8 +2097,12 @@ enum i915_power_well_id { /* ICL PHY DFLEX registers */ #define PORT_TX_DFLEXDPMLE1 _MMIO(0x1638C0) -#define DFLEXDPMLE1_DPMLETC_MASK(n) (0xf << (4 * (n))) -#define DFLEXDPMLE1_DPMLETC(n, x) ((x) << (4 * (n))) +#define DFLEXDPMLE1_DPMLETC_MASK(tc_port) (0xf << (4 * (tc_port))) +#define DFLEXDPMLE1_DPMLETC_ML0(tc_port) (1 << (4 * (tc_port))) +#define DFLEXDPMLE1_DPMLETC_ML1_0(tc_port) (3 << (4 * (tc_port))) +#define DFLEXDPMLE1_DPMLETC_ML3(tc_port) (8 << (4 * (tc_port))) +#define DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) (12 << (4 * (tc_port))) +#define DFLEXDPMLE1_DPMLETC_ML3_0(tc_port) (15 << (4 * (tc_port))) /* BXT PHY Ref registers */ #define _PORT_REF_DW3_A 0x16218C @@ -6540,6 +6544,7 @@ enum { #define PLANE_CTL_ALPHA_DISABLE (0 << 4) #define PLANE_CTL_ALPHA_SW_PREMULTIPLY (2 << 4) #define PLANE_CTL_ALPHA_HW_PREMULTIPLY (3 << 4) +#define PLANE_SURF_DECRYPTION_ENABLED (1 << 2) #define PLANE_CTL_ROTATE_MASK 0x3 #define PLANE_CTL_ROTATE_0 0x0 #define PLANE_CTL_ROTATE_90 0x1 @@ -10652,4 +10657,31 @@ enum skl_power_gate { _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \ _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC) +/* GVT has special read process from some MMIO register, + * which so that should be trapped to GVT to make a + * complete emulation. Such MMIO is not too much, now using + * a static list to cover them. + */ +static inline bool in_mmio_read_trap_list(u32 reg) +{ + if (unlikely(reg >= PCH_GMBUS0.reg && reg <= PCH_GMBUS5.reg)) + return true; + + if (unlikely(reg == RING_TIMESTAMP(RENDER_RING_BASE).reg || + reg == RING_TIMESTAMP(BLT_RING_BASE).reg || + reg == RING_TIMESTAMP(GEN6_BSD_RING_BASE).reg || + reg == RING_TIMESTAMP(VEBOX_RING_BASE).reg || + reg == RING_TIMESTAMP(GEN8_BSD2_RING_BASE).reg || + reg == RING_TIMESTAMP_UDW(RENDER_RING_BASE).reg || + reg == RING_TIMESTAMP_UDW(BLT_RING_BASE).reg || + reg == RING_TIMESTAMP_UDW(GEN6_BSD_RING_BASE).reg || + reg == RING_TIMESTAMP_UDW(VEBOX_RING_BASE).reg)) + return true; + + if (unlikely(reg == SBI_DATA.reg || reg == 0x6c060 || reg == 0x206c)) + return true; + + return false; +} + #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 5c2c93cbab12f..811595d32eb68 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1054,6 +1054,7 @@ void i915_request_add(struct i915_request *request) lockdep_assert_held(&request->i915->drm.struct_mutex); trace_i915_request_add(request); + trace_i915_request_add_domain(request); /* * Make sure that no request gazumped us - if it was allocated after @@ -1126,8 +1127,11 @@ void i915_request_add(struct i915_request *request) */ local_bh_disable(); rcu_read_lock(); /* RCU serialisation for set-wedged protection */ - if (engine->schedule) - engine->schedule(request, &request->gem_context->sched); + if (engine->schedule) { + engine->schedule(request, + &request->gem_context->sched, + request->gem_context->preempt_timeout); + } rcu_read_unlock(); i915_sw_fence_commit(&request->submit); local_bh_enable(); /* Kick the execlists tasklet if just scheduled */ diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index e5e6f6bb2b05a..4ff6442027432 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -32,6 +32,10 @@ #include "intel_drv.h" #include "i915_drv.h" +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) +#include "../drm_internal.h" +#endif + static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev) { struct drm_minor *minor = dev_get_drvdata(kdev); @@ -571,6 +575,284 @@ static void i915_teardown_error_capture(struct device *kdev) { sysfs_remove_bin_file(&kdev->kobj, &error_state_attr); } + +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) +#define dev_to_drm_minor(d) dev_get_drvdata((d)) + +static ssize_t i915_gem_clients_state_read(struct file *filp, + struct kobject *memtrack_kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct kobject *kobj = memtrack_kobj->parent; + struct device *kdev = container_of(kobj, struct device, kobj); + struct drm_minor *minor = dev_to_drm_minor(kdev); + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_error_state_buf error_str; + ssize_t ret_count = 0; + int ret; + + ret = i915_error_state_buf_init(&error_str, dev_priv, count, off); + if (ret) + return ret; + + ret = i915_get_drm_clients_info(&error_str, dev); + if (ret) + goto out; + + ret_count = count < error_str.bytes ? count : error_str.bytes; + + memcpy(buf, error_str.buf, ret_count); +out: + i915_error_state_buf_release(&error_str); + + return ret ?: ret_count; +} + +#define GEM_OBJ_STAT_BUF_SIZE (4*1024) /* 4KB */ +#define GEM_OBJ_STAT_BUF_SIZE_MAX (1024*1024) /* 1MB */ + +struct i915_gem_file_attr_priv { + char tgid_str[16]; + struct pid *tgid; + struct drm_i915_error_state_buf buf; +}; + +static ssize_t i915_gem_read_objects(struct file *filp, + struct kobject *memtrack_kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct kobject *kobj = memtrack_kobj->parent; + struct device *kdev = container_of(kobj, struct device, kobj); + struct drm_minor *minor = dev_to_drm_minor(kdev); + struct drm_device *dev = minor->dev; + struct i915_gem_file_attr_priv *attr_priv; + struct pid *tgid; + ssize_t ret_count = 0; + long bytes_available; + int ret = 0, buf_size = GEM_OBJ_STAT_BUF_SIZE; + unsigned long timeout = msecs_to_jiffies(500) + 1; + + /* + * There may arise a scenario where syfs file entry is being removed, + * and may race against sysfs read. Sysfs file remove function would + * have taken the drm_global_mutex and would wait for read to finish, + * which is again waiting to acquire drm_global_mutex, leading to + * deadlock. To avoid this, use mutex_trylock here with a timeout. + */ + while (!mutex_trylock(&drm_global_mutex) && --timeout) + schedule_timeout_killable(1); + if (timeout == 0) { + DRM_DEBUG_DRIVER("Unable to acquire drm global mutex.\n"); + return -EBUSY; + } + + if (!attr || !attr->private) { + ret = -EINVAL; + DRM_ERROR("attr | attr->private pointer is NULL\n"); + goto out; + } + attr_priv = attr->private; + tgid = attr_priv->tgid; + + if (off && !attr_priv->buf.buf) { + ret = -EINVAL; + DRM_ERROR( + "Buf not allocated during read with non-zero offset\n"); + goto out; + } + + if (off == 0) { +retry: + if (!attr_priv->buf.buf) { + ret = i915_obj_state_buf_init(&attr_priv->buf, + buf_size); + if (ret) { + DRM_ERROR( + "obj state buf init failed. buf_size=%d\n", + buf_size); + goto out; + } + } else { + /* Reset the buf parameters before filling data */ + attr_priv->buf.pos = 0; + attr_priv->buf.bytes = 0; + } + + /* Read the gfx device stats */ + ret = i915_gem_get_obj_info(&attr_priv->buf, dev, tgid); + if (ret) + goto out; + + ret = i915_error_ok(&attr_priv->buf); + if (ret) { + ret = 0; + goto copy_data; + } + if (buf_size >= GEM_OBJ_STAT_BUF_SIZE_MAX) { + DRM_DEBUG_DRIVER("obj stat buf size limit reached\n"); + ret = -ENOMEM; + goto out; + } else { + /* Try to reallocate buf of larger size */ + i915_error_state_buf_release(&attr_priv->buf); + buf_size *= 2; + + ret = i915_obj_state_buf_init(&attr_priv->buf, + buf_size); + if (ret) { + DRM_ERROR( + "obj stat buf init failed. buf_size=%d\n", + buf_size); + goto out; + } + goto retry; + } + } +copy_data: + + bytes_available = (long)attr_priv->buf.bytes - (long)off; + + if (bytes_available > 0) { + ret_count = count < bytes_available ? count : bytes_available; + memcpy(buf, attr_priv->buf.buf + off, ret_count); + } else + ret_count = 0; + +out: + mutex_unlock(&drm_global_mutex); + + return ret ?: ret_count; +} + +int i915_gem_create_sysfs_file_entry(struct drm_device *dev, + struct drm_file *file) +{ + struct drm_i915_file_private *file_priv = file->driver_priv; + struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_gem_file_attr_priv *attr_priv; + struct bin_attribute *obj_attr; + struct drm_file *file_local; + int ret; + + /* + * Check for multiple drm files having same tgid. If found, copy the + * bin attribute into the new file priv. Otherwise allocate a new + * copy of bin attribute, and create its corresponding sysfs file. + */ + mutex_lock(&dev->struct_mutex); + list_for_each_entry(file_local, &dev->filelist, lhead) { + struct drm_i915_file_private *file_priv_local = + file_local->driver_priv; + + if (file_priv->tgid == file_priv_local->tgid) { + file_priv->obj_attr = file_priv_local->obj_attr; + mutex_unlock(&dev->struct_mutex); + return 0; + } + } + mutex_unlock(&dev->struct_mutex); + + obj_attr = kzalloc(sizeof(*obj_attr), GFP_KERNEL); + if (!obj_attr) { + DRM_ERROR("Alloc failed. Out of memory\n"); + ret = -ENOMEM; + goto out; + } + + attr_priv = kzalloc(sizeof(*attr_priv), GFP_KERNEL); + if (!attr_priv) { + DRM_ERROR("Alloc failed. Out of memory\n"); + ret = -ENOMEM; + goto out_obj_attr; + } + + snprintf(attr_priv->tgid_str, 16, "%d", task_tgid_nr(current)); + obj_attr->attr.name = attr_priv->tgid_str; + obj_attr->attr.mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH; + obj_attr->size = 0; + obj_attr->read = i915_gem_read_objects; + + attr_priv->tgid = file_priv->tgid; + obj_attr->private = attr_priv; + + ret = sysfs_create_bin_file(&dev_priv->memtrack_kobj, + obj_attr); + if (ret) { + DRM_ERROR( + "sysfs tgid file setup failed. tgid=%d, process:%s, ret:%d\n", + pid_nr(file_priv->tgid), file_priv->process_name, ret); + + goto out_attr_priv; + } + + file_priv->obj_attr = obj_attr; + return 0; + +out_attr_priv: + kfree(attr_priv); +out_obj_attr: + kfree(obj_attr); +out: + return ret; +} + +void i915_gem_remove_sysfs_file_entry(struct drm_device *dev, + struct drm_file *file) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_file_private *file_priv = file->driver_priv; + struct drm_file *file_local; + int open_count = 1; + + /* + * The current drm file instance is already removed from filelist at + * this point. + * Check if this particular drm file being removed is the last one for + * that particular tgid, and no other instances for this tgid exist in + * the filelist. If so, remove the corresponding sysfs file entry also. + */ + list_for_each_entry(file_local, &dev->filelist, lhead) { + struct drm_i915_file_private *file_priv_local = + file_local->driver_priv; + + if (pid_nr(file_priv->tgid) == pid_nr(file_priv_local->tgid)) + open_count++; + } + + if (open_count == 1) { + struct i915_gem_file_attr_priv *attr_priv; + + if (WARN_ON(file_priv->obj_attr == NULL)) + return; + attr_priv = file_priv->obj_attr->private; + + sysfs_remove_bin_file(&dev_priv->memtrack_kobj, + file_priv->obj_attr); + + i915_error_state_buf_release(&attr_priv->buf); + kfree(file_priv->obj_attr->private); + kfree(file_priv->obj_attr); + } +} + +static struct bin_attribute i915_gem_client_state_attr = { + .attr.name = "i915_gem_meminfo", + .attr.mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH, + .size = 0, + .read = i915_gem_clients_state_read, +}; + +static struct attribute *memtrack_kobj_attrs[] = {NULL}; + +static struct kobj_type memtrack_kobj_type = { + .release = NULL, + .sysfs_ops = NULL, + .default_attrs = memtrack_kobj_attrs, +}; +#endif #else static void i915_setup_error_capture(struct device *kdev) {} static void i915_teardown_error_capture(struct device *kdev) {} @@ -623,6 +905,28 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv) DRM_ERROR("RPS sysfs setup failed\n"); i915_setup_error_capture(kdev); + +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + /* + * Create the gfx_memtrack directory for memtrack sysfs files + */ + ret = kobject_init_and_add( + &dev_priv->memtrack_kobj, &memtrack_kobj_type, + &kdev->kobj, "gfx_memtrack"); + if (unlikely(ret != 0)) { + DRM_ERROR( + "i915 sysfs setup memtrack directory failed\n" + ); + kobject_put(&dev_priv->memtrack_kobj); + } else { + ret = sysfs_create_bin_file(&dev_priv->memtrack_kobj, + &i915_gem_client_state_attr); + if (ret) + DRM_ERROR( + "i915_gem_client_state sysfs setup failed\n" + ); + } +#endif } void i915_teardown_sysfs(struct drm_i915_private *dev_priv) @@ -641,4 +945,11 @@ void i915_teardown_sysfs(struct drm_i915_private *dev_priv) sysfs_unmerge_group(&kdev->kobj, &rc6_attr_group); sysfs_unmerge_group(&kdev->kobj, &rc6p_attr_group); #endif + +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + sysfs_remove_bin_file(&dev_priv->memtrack_kobj, + &i915_gem_client_state_attr); + kobject_del(&dev_priv->memtrack_kobj); + kobject_put(&dev_priv->memtrack_kobj); +#endif } diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index b50c6b829715e..6f25961ad9ad2 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -281,7 +281,7 @@ TRACE_EVENT(i915_pipe_update_start, TP_fast_assign( __entry->pipe = crtc->pipe; __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + drm_crtc_index(&crtc->base)); __entry->scanline = intel_get_crtc_scanline(crtc); __entry->min = crtc->debug.min_vbl; __entry->max = crtc->debug.max_vbl; @@ -679,6 +679,53 @@ DEFINE_EVENT(i915_request, i915_request_add, TP_ARGS(rq) ); +TRACE_EVENT(i915_multi_domains, + TP_PROTO(struct i915_request *req), + TP_ARGS(req), + + TP_STRUCT__entry( + __field(u32, dev) + __field(u32, ctx) + __field(u32, ring) + __field(u32, seqno) + __field(u32, global) + __field(int, prio_req) + __field(int, prio_ctx) + __field(bool, shadow_ctx) + __field(u32, hw_id) + __field(int, vgt_id) + __field(u32, pid) + ), + + TP_fast_assign( + __entry->dev = req->i915->drm.primary->index; + __entry->ring = req->engine->id; + __entry->ctx = req->fence.context; + __entry->seqno = req->fence.seqno; + __entry->global = req->global_seqno; + __entry->prio_req = req->sched.attr.priority; + __entry->prio_ctx = req->sched.attr.priority; + __entry->shadow_ctx = is_shadow_context(req->gem_context); + __entry->hw_id = req->gem_context->hw_id; + __entry->vgt_id = get_vgt_id(req->gem_context); + __entry->pid = is_shadow_context(req->gem_context) ? + get_pid_shadowed(req->gem_context, req->engine) : + pid_nr(req->gem_context->pid); + ), + + TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, global=%u, " + "priority=%d (%d), is_shadow_ctx=%u, hw_id=%u, " + "vgt_id=%u, pid=%u", __entry->dev, __entry->ring, + __entry->ctx, __entry->seqno, __entry->global, + __entry->prio_req, __entry->prio_ctx, __entry->shadow_ctx, + __entry->hw_id, __entry->vgt_id, __entry->pid) +); + +DEFINE_EVENT(i915_multi_domains, i915_request_add_domain, + TP_PROTO(struct i915_request *req), + TP_ARGS(req) +); + #if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS) DEFINE_EVENT(i915_request, i915_request_submit, TP_PROTO(struct i915_request *rq), diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index 869cf4a3b6de7..d7a328f52978e 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -76,6 +76,17 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv) } dev_priv->vgpu.caps = __raw_i915_read32(dev_priv, vgtif_reg(vgt_caps)); + dev_priv->vgpu.scaler_owned = + __raw_i915_read32(dev_priv, vgtif_reg(scaler_owned)); + + /* If guest wants to enable pvmmio, it needs to enable it explicitly + * through vgt_if interface, and then read back the enable state from + * gvt layer. + */ + __raw_i915_write32(dev_priv, vgtif_reg(enable_pvmmio), + i915_modparams.enable_pvmmio); + i915_modparams.enable_pvmmio = __raw_i915_read16(dev_priv, + vgtif_reg(enable_pvmmio)); dev_priv->vgpu.active = true; DRM_INFO("Virtual GPU for Intel GVT-g detected.\n"); diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index b04952bacf77c..ec4a73e797092 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c @@ -316,7 +316,8 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, if (*scaler_id < 0) { /* find a free scaler */ for (j = 0; j < intel_crtc->num_scalers; j++) { - if (!scaler_state->scalers[j].in_use) { + if (!scaler_state->scalers[j].in_use && + scaler_state->scalers[j].owned == 1) { scaler_state->scalers[j].in_use = 1; *scaler_id = j; DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n", @@ -350,10 +351,13 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, * scaler 0 operates in high quality (HQ) mode. * In this case use scaler 0 to take advantage of HQ mode */ - *scaler_id = 0; - scaler_state->scalers[0].in_use = 1; - scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ; - scaler_state->scalers[1].in_use = 0; + if (scaler_state->scalers[0].owned == 1) { + *scaler_id = 0; + scaler_state->scalers[0].in_use = 1; + scaler_state->scalers[0].mode = + PS_SCALER_MODE_HQ; + scaler_state->scalers[1].in_use = 0; + } } else { scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN; } diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c index dcba645cabb87..b950fabb4bbf7 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/intel_atomic_plane.c @@ -182,7 +182,8 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ else crtc_state->active_planes &= ~BIT(intel_plane->id); - if (state->visible && state->fb->format->format == DRM_FORMAT_NV12) + if (state->visible && state->fb && + state->fb->format->format == DRM_FORMAT_NV12) crtc_state->nv12_planes |= BIT(intel_plane->id); else crtc_state->nv12_planes &= ~BIT(intel_plane->id); diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 769f3f5866611..ee3ca2de983b9 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -144,6 +144,9 @@ static const struct { /* HDMI N/CTS table */ #define TMDS_297M 297000 #define TMDS_296M 296703 +#define TMDS_594M 594000 +#define TMDS_593M 593407 + static const struct { int sample_rate; int clock; @@ -164,6 +167,20 @@ static const struct { { 176400, TMDS_297M, 18816, 247500 }, { 192000, TMDS_296M, 23296, 281250 }, { 192000, TMDS_297M, 20480, 247500 }, + { 44100, TMDS_593M, 8918, 937500 }, + { 44100, TMDS_594M, 9408, 990000 }, + { 48000, TMDS_593M, 5824, 562500 }, + { 48000, TMDS_594M, 6144, 594000 }, + { 32000, TMDS_593M, 5824, 843750 }, + { 32000, TMDS_594M, 3072, 445500 }, + { 88200, TMDS_593M, 17836, 937500 }, + { 88200, TMDS_594M, 18816, 990000 }, + { 96000, TMDS_593M, 11648, 562500 }, + { 96000, TMDS_594M, 12288, 594000 }, + { 176400, TMDS_593M, 35672, 937500 }, + { 176400, TMDS_594M, 37632, 990000 }, + { 192000, TMDS_593M, 23296, 562500 }, + { 192000, TMDS_594M, 24576, 594000 }, }; /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */ diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 1faa494e2bc91..1f99373dcd77a 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -1726,6 +1726,14 @@ void intel_bios_init(struct drm_i915_private *dev_priv) return; } + if (HAS_PCH_NOP(dev_priv) && !intel_vgpu_active(dev_priv)) { + DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n"); + return; + } + else if (HAS_PCH_NOP(dev_priv)) { + dev_priv->pch_type = PCH_NONE; + } + init_vbt_defaults(dev_priv); /* If the OpRegion does not have VBT, look in PCI ROM. */ diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c index c6a7beabd58d1..8d41c8f7042a2 100644 --- a/drivers/gpu/drm/i915/intel_color.c +++ b/drivers/gpu/drm/i915/intel_color.c @@ -230,13 +230,34 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state) if (INTEL_GEN(dev_priv) > 6) { uint16_t postoff = 0; + uint16_t postoff_red = 0; + uint16_t postoff_green = 0; + uint16_t postoff_blue = 0; - if (limited_color_range) + if (limited_color_range){ postoff = (16 * (1 << 12) / 255) & 0x1fff; + postoff_red = postoff; + postoff_green = postoff; + postoff_blue = postoff; + } + + if (crtc_state->ctm_post_offset) { + struct drm_color_ctm_post_offset *ctm_post_offset = + (struct drm_color_ctm_post_offset *)crtc_state->ctm_post_offset->data; + + /* Convert to U0.12 format. */ + postoff_red = ctm_post_offset->red >> 4; + postoff_green = ctm_post_offset->green >> 4; + postoff_blue = ctm_post_offset->blue >> 4; + + postoff_red = clamp_val(postoff_red, postoff, 0xfff); + postoff_green = clamp_val(postoff_green, postoff, 0xfff); + postoff_blue = clamp_val(postoff_blue, postoff, 0xfff); + } - I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); - I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); - I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff); + I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff_red); + I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff_green); + I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff_blue); I915_WRITE(PIPE_CSC_MODE(pipe), 0); } else { diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index cf9b600cca79f..addb223604fa3 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -453,7 +453,13 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) INIT_WORK(&dev_priv->csr.work, csr_load_work_fn); - if (!HAS_CSR(dev_priv)) + /* + * In a GVTg enabled environment, loading the CSR firmware for DomU doesn't + * make much sense since we don't allow it to control display power + * management settings. Furthermore, we can save some time for DomU bootup + * by skipping CSR loading. + */ + if (!HAS_CSR(dev_priv) || intel_vgpu_active(dev_priv)) return; if (i915_modparams.dmc_firmware_path) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index c9af34861d9e3..8b34f086a6632 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1882,7 +1882,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) goto out; } - if (port == PORT_A) + if (port == PORT_A && !intel_vgpu_active(dev_priv)) cpu_transcoder = TRANSCODER_EDP; else cpu_transcoder = (enum transcoder) pipe; @@ -3009,11 +3009,6 @@ static void intel_enable_ddi(struct intel_encoder *encoder, intel_enable_ddi_hdmi(encoder, crtc_state, conn_state); else intel_enable_ddi_dp(encoder, crtc_state, conn_state); - - /* Enable hdcp if it's desired */ - if (conn_state->content_protection == - DRM_MODE_CONTENT_PROTECTION_DESIRED) - intel_hdcp_enable(to_intel_connector(conn_state->connector)); } static void intel_disable_ddi_dp(struct intel_encoder *encoder, @@ -3053,8 +3048,6 @@ static void intel_disable_ddi(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - intel_hdcp_disable(to_intel_connector(old_conn_state->connector)); - if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI)) intel_disable_ddi_hdmi(encoder, old_crtc_state, old_conn_state); else @@ -3278,7 +3271,7 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder, enum port port = encoder->port; int ret; - if (port == PORT_A) + if (port == PORT_A && !intel_vgpu_active(dev_priv)) pipe_config->cpu_transcoder = TRANSCODER_EDP; if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) @@ -3548,11 +3541,18 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) bool init_hdmi, init_dp, init_lspcon = false; - init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi || + /* + * For port A check whether vgpu is active and we have a monitor + * attached to port A. + * */ + init_hdmi = (intel_vgpu_active(dev_priv) && port == PORT_A && + (I915_READ(GEN8_DE_PORT_ISR) & BXT_DE_PORT_HP_DDIA)) || + (dev_priv->vbt.ddi_port_info[port].supports_dvi || dev_priv->vbt.ddi_port_info[port].supports_hdmi); init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp; - if (intel_bios_is_lspcon_present(dev_priv, port)) { + if (!intel_vgpu_active(dev_priv) && + intel_bios_is_lspcon_present(dev_priv, port)) { /* * Lspcon device needs to be driven with DP connector * with special detection sequence. So make sure DP @@ -3648,7 +3648,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) /* In theory we don't need the encoder->type check, but leave it just in * case we have some really bad VBTs... */ - if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi) { + if ((intel_vgpu_active(dev_priv) && IS_BROXTON(dev_priv)) || + (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi)) { if (!intel_ddi_init_hdmi_connector(intel_dig_port)) goto err; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d2951096bca0d..223cda4b380cc 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -49,6 +49,10 @@ #include #include +#if IS_ENABLED(CONFIG_DRM_I915_GVT) +#include "gvt.h" +#endif + /* Primary plane formats for gen <= 3 */ static const uint32_t i8xx_primary_formats[] = { DRM_FORMAT_C8, @@ -2754,20 +2758,33 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state, plane_state->base.visible = visible; - /* FIXME pre-g4x don't work like this */ - if (visible) { + if (visible) crtc_state->base.plane_mask |= drm_plane_mask(&plane->base); - crtc_state->active_planes |= BIT(plane->id); - } else { + else crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base); - crtc_state->active_planes &= ~BIT(plane->id); - } DRM_DEBUG_KMS("%s active planes 0x%x\n", crtc_state->base.crtc->name, crtc_state->active_planes); } +static void fixup_active_planes(struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_plane *plane; + + /* + * Active_planes aliases if multiple "primary" or cursor planes + * have been used on the same (or wrong) pipe. plane_mask uses + * unique ids, hence we can use that to reconstruct active_planes. + */ + crtc_state->active_planes = 0; + + drm_for_each_plane_mask(plane, &dev_priv->drm, + crtc_state->base.plane_mask) + crtc_state->active_planes |= BIT(to_intel_plane(plane)->id); +} + static void intel_plane_disable_noatomic(struct intel_crtc *crtc, struct intel_plane *plane) { @@ -2777,6 +2794,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc, to_intel_plane_state(plane->base.state); intel_set_plane_visible(crtc_state, plane_state, false); + fixup_active_planes(crtc_state); if (plane->id == PLANE_PRIMARY) intel_pre_disable_primary_noatomic(&crtc->base); @@ -2795,7 +2813,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, struct drm_i915_gem_object *obj; struct drm_plane *primary = intel_crtc->base.primary; struct drm_plane_state *plane_state = primary->state; - struct drm_crtc_state *crtc_state = intel_crtc->base.state; struct intel_plane *intel_plane = to_intel_plane(primary); struct intel_plane_state *intel_state = to_intel_plane_state(plane_state); @@ -2885,10 +2902,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, plane_state->fb = fb; plane_state->crtc = &intel_crtc->base; - intel_set_plane_visible(to_intel_crtc_state(crtc_state), - to_intel_plane_state(plane_state), - true); - atomic_or(to_intel_plane(primary)->frontbuffer_bit, &obj->frontbuffer_bits); } @@ -4943,6 +4956,10 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, } /* Check src format */ + if (!fb) { + DRM_ERROR("skl_update_scaler_plane(): fb is invalid\n"); + return 0; + } switch (fb->format->format) { case DRM_FORMAT_RGB565: case DRM_FORMAT_XBGR8888: @@ -5237,8 +5254,8 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state), crtc); struct drm_plane *primary = crtc->base.primary; - struct drm_plane_state *old_primary_state = - drm_atomic_get_old_plane_state(old_state, primary); + struct drm_plane_state *old_primary_state = primary ? + drm_atomic_get_old_plane_state(old_state, primary) : NULL; intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits); @@ -5276,8 +5293,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, struct drm_i915_private *dev_priv = to_i915(dev); struct drm_atomic_state *old_state = old_crtc_state->base.state; struct drm_plane *primary = crtc->base.primary; - struct drm_plane_state *old_primary_state = - drm_atomic_get_old_plane_state(old_state, primary); + struct drm_plane_state *old_primary_state = primary ? + drm_atomic_get_old_plane_state(old_state, primary) : NULL; bool modeset = needs_modeset(&pipe_config->base); struct intel_atomic_state *old_intel_state = to_intel_atomic_state(old_state); @@ -5358,15 +5375,37 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, intel_update_watermarks(crtc); } +static void disable_primary_plane(struct drm_i915_private *dev_priv, int pipe) +{ + u32 val; + + val = I915_READ(PLANE_CTL(pipe, PLANE_PRIMARY)); + if (val & PLANE_CTL_ENABLE) { + I915_WRITE(PLANE_CTL(pipe, PLANE_PRIMARY), 0); + I915_WRITE(PLANE_SURF(pipe, PLANE_PRIMARY), 0); + POSTING_READ(PLANE_SURF(pipe, PLANE_PRIMARY)); + } +} + static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask) { struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_plane *p; int pipe = intel_crtc->pipe; intel_crtc_dpms_overlay_disable(intel_crtc); + /* + * On BIOS based systems, if Dom0 doesn't own Plane 0 (Primary Plane), + * then during modeset, it wouldn't be able to disable this plane and + * this can lead to unexpected behavior after the modeset. Therefore, + * disable the primary plane if it was enabled by the BIOS/GOP. + */ + if (dev_priv->gvt && i915_modparams.avail_planes_per_pipe) + disable_primary_plane(dev_priv, pipe); + drm_for_each_plane_mask(p, dev, plane_mask) to_intel_plane(p)->disable_plane(to_intel_plane(p), intel_crtc); @@ -8728,7 +8767,8 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc, /* find scaler attached to this pipe */ for (i = 0; i < crtc->num_scalers; i++) { ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i)); - if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) { + if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK) && + scaler_state->scalers[i].owned) { id = i; pipe_config->pch_pfit.enabled = true; pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i)); @@ -11629,7 +11669,8 @@ static void verify_wm_state(struct drm_crtc *crtc, const enum pipe pipe = intel_crtc->pipe; int plane, level, max_level = ilk_wm_max_level(dev_priv); - if (INTEL_GEN(dev_priv) < 9 || !new_state->active) + if (INTEL_GEN(dev_priv) < 9 || !new_state->active || + i915_modparams.avail_planes_per_pipe) return; skl_pipe_wm_get_hw_state(crtc, &hw_wm); @@ -11874,7 +11915,16 @@ verify_crtc_state(struct drm_crtc *crtc, intel_pipe_config_sanity_check(dev_priv, pipe_config); sw_config = to_intel_crtc_state(new_crtc_state); - if (!intel_pipe_config_compare(dev_priv, sw_config, + + /* + * Only check for pipe config if we are not in a GVT guest environment, + * because such a check in a GVT guest environment doesn't make any sense + * as we don't allow the guest to do a mode set, so there can very well + * be a difference between what it has programmed vs. what the host + * truly configured the HW pipe to be in. + */ + if (!intel_vgpu_active(dev_priv) && + !intel_pipe_config_compare(dev_priv, sw_config, pipe_config, false)) { I915_STATE_WARN(1, "pipe state doesn't match!\n"); intel_dump_pipe_config(intel_crtc, pipe_config, @@ -11935,11 +11985,13 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv, if (new_state->active) I915_STATE_WARN(!(pll->active_mask & crtc_mask), "pll active mismatch (expected pipe %c in active mask 0x%02x)\n", - pipe_name(drm_crtc_index(crtc)), pll->active_mask); + pipe_name(to_intel_crtc(crtc)->pipe), + pll->active_mask); else I915_STATE_WARN(pll->active_mask & crtc_mask, "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n", - pipe_name(drm_crtc_index(crtc)), pll->active_mask); + pipe_name(to_intel_crtc(crtc)->pipe), + pll->active_mask); I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask), "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n", @@ -11970,10 +12022,10 @@ verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc, I915_STATE_WARN(pll->active_mask & crtc_mask, "pll active mismatch (didn't expect pipe %c in active mask)\n", - pipe_name(drm_crtc_index(crtc))); + pipe_name(to_intel_crtc(crtc)->pipe)); I915_STATE_WARN(pll->state.crtc_mask & crtc_mask, "pll enabled crtcs mismatch (found %x in enabled mask)\n", - pipe_name(drm_crtc_index(crtc))); + pipe_name(to_intel_crtc(crtc)->pipe)); } } @@ -12388,7 +12440,8 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) if (!dev->max_vblank_count) return (u32)drm_crtc_accurate_vblank_count(&crtc->base); - return dev->driver->get_vblank_counter(dev, crtc->pipe); + return dev->driver->get_vblank_counter(dev, + drm_crtc_index(&crtc->base)); } static void intel_update_crtc(struct drm_crtc *crtc, @@ -12402,8 +12455,9 @@ static void intel_update_crtc(struct drm_crtc *crtc, struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state); bool modeset = needs_modeset(new_crtc_state); struct intel_plane_state *new_plane_state = + crtc->primary ? intel_atomic_get_new_plane_state(to_intel_atomic_state(state), - to_intel_plane(crtc->primary)); + to_intel_plane(crtc->primary)) : NULL; if (modeset) { update_scanline_offset(intel_crtc); @@ -12580,6 +12634,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct drm_connector_state *old_conn_state, *new_conn_state; + struct drm_connector *connector; struct drm_crtc *crtc; struct intel_crtc_state *intel_cstate; u64 put_domains[I915_MAX_PIPES] = {}; @@ -12630,17 +12686,12 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) intel_check_cpu_fifo_underruns(dev_priv); intel_check_pch_fifo_underruns(dev_priv); - if (!new_crtc_state->active) { - /* - * Make sure we don't call initial_watermarks - * for ILK-style watermark updates. - * - * No clue what this is supposed to achieve. - */ - if (INTEL_GEN(dev_priv) >= 9) - dev_priv->display.initial_watermarks(intel_state, - to_intel_crtc_state(new_crtc_state)); - } + /* FIXME unify this for all platforms */ + if (!new_crtc_state->active && + !HAS_GMCH_DISPLAY(dev_priv) && + dev_priv->display.initial_watermarks) + dev_priv->display.initial_watermarks(intel_state, + to_intel_crtc_state(new_crtc_state)); } } @@ -12677,9 +12728,17 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) } } + for_each_oldnew_connector_in_state(state, connector, old_conn_state, + new_conn_state, i) + intel_hdcp_atomic_pre_commit(connector, old_conn_state, + new_conn_state); + /* Now enable the clocks, plane, pipe, and connectors that we set up. */ dev_priv->display.update_crtcs(state); + for_each_new_connector_in_state(state, connector, new_conn_state, i) + intel_hdcp_atomic_commit(connector, new_conn_state); + /* FIXME: We should call drm_atomic_helper_commit_hw_done() here * already, but still need the state for the delayed optimization. To * fix this: @@ -13002,7 +13061,8 @@ static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj) .priority = I915_PRIORITY_DISPLAY, }; - i915_gem_object_wait_priority(obj, 0, &attr); + i915_gem_object_wait_priority(obj, 0, + &attr, I915_PREEMPTION_TIMEOUT_DISPLAY); } /** @@ -13081,6 +13141,20 @@ intel_prepare_plane_fb(struct drm_plane *plane, ret = intel_plane_pin_fb(to_intel_plane_state(new_state)); + /* + * Reschedule our dependencies, and ensure we run within a timeout. + * + * Note that if the timeout is exceeded, then whoever was running that + * prevented us from acquiring the GPU is declared rogue and reset. An + * unresponsive process will then be banned in order to preserve + * interactivity. Since this can be seen as a bit heavy-handed, we + * select a timeout for when the dropped frames start to become a + * noticeable nuisance for the user (100 ms, i.e. preemption was + * blocked for more than a few frames). Note, this is only a timeout + * for a delay in preempting the current request in order to run our + * dependency chain, our dependency chain may itself take a long time + * to run to completion before we can present the framebuffer. + */ fb_obj_bump_render_priority(obj); mutex_unlock(&dev_priv->drm.struct_mutex); @@ -13720,6 +13794,9 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) else modifiers = skl_format_modifiers_noccs; + if (intel_gvt_active(dev_priv) || intel_vgpu_active(dev_priv)) + modifiers = i9xx_format_modifiers; + primary->update_plane = skl_update_plane; primary->disable_plane = skl_disable_plane; primary->get_hw_state = skl_plane_get_hw_state; @@ -13792,11 +13869,15 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) supported_rotations = DRM_MODE_ROTATE_0; } - if (INTEL_GEN(dev_priv) >= 4) + if (INTEL_GEN(dev_priv) >= 4) { drm_plane_create_rotation_property(&primary->base, DRM_MODE_ROTATE_0, supported_rotations); + if (drm_plane_create_decryption_property(&primary->base)) + DRM_ERROR("Failed to create decryption property\n"); + } + if (INTEL_GEN(dev_priv) >= 9) drm_plane_create_color_properties(&primary->base, BIT(DRM_COLOR_YCBCR_BT601) | @@ -13817,6 +13898,113 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) return ERR_PTR(ret); } +static struct intel_plane * +intel_skl_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe, + int plane, bool is_primary) +{ + struct intel_plane *intel_plane = NULL; + struct intel_plane_state *state = NULL; + unsigned long possible_crtcs; + const uint32_t *plane_formats; + unsigned int supported_rotations, plane_type; + unsigned int num_formats; + const uint64_t *modifiers; + int ret; + + intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL); + if (!intel_plane) { + ret = -ENOMEM; + goto fail; + } + + state = intel_create_plane_state(&intel_plane->base); + if (!state) { + ret = -ENOMEM; + goto fail; + } + + intel_plane->base.state = &state->base; + intel_plane->can_scale = false; + state->scaler_id = -1; + intel_plane->pipe = pipe; + + /* + * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS + * port is hooked to pipe B. Hence we want plane A feeding pipe B. + */ + if (is_primary) { + intel_plane->i9xx_plane = (enum i9xx_plane_id) pipe; + intel_plane->check_plane = intel_check_primary_plane; + plane_type = DRM_PLANE_TYPE_PRIMARY; + } else { + intel_plane->i9xx_plane = (enum i9xx_plane_id) plane; + intel_plane->check_plane = intel_check_sprite_plane; + plane_type = DRM_PLANE_TYPE_OVERLAY; + } + + if (plane == PLANE_PRIMARY) { + intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane); + intel_plane->update_plane = skl_update_plane; + intel_plane->disable_plane = skl_disable_plane; + intel_plane->get_hw_state = skl_plane_get_hw_state; + } else { + intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane); + intel_plane->update_plane = skl_update_plane; + intel_plane->disable_plane = skl_disable_plane; + intel_plane->get_hw_state = skl_plane_get_hw_state; + } + + intel_plane->id = plane; + plane_formats = skl_primary_formats; + + if (pipe < PIPE_C) + modifiers = skl_format_modifiers_ccs; + else + modifiers = skl_format_modifiers_noccs; + + if (intel_gvt_active(dev_priv) || intel_vgpu_active(dev_priv)) + modifiers = i9xx_format_modifiers; + + num_formats = ARRAY_SIZE(skl_primary_formats); + + /* + * Drop final format (NV12) for pipes or hardware steppings + * that don't support it. + */ + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_C0) || pipe >= PIPE_C + || plane >= 2) + num_formats--; + + + possible_crtcs = (1 << dev_priv->drm.mode_config.num_crtc); + ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base, + possible_crtcs, &skl_plane_funcs, + plane_formats, num_formats, + modifiers, + plane_type, + "plane %d%c", plane+1, pipe_name(pipe)); + + if (ret) + goto fail; + + supported_rotations = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | + DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270; + if (INTEL_GEN(dev_priv) >= 4) + drm_plane_create_rotation_property(&intel_plane->base, + DRM_MODE_ROTATE_0, + supported_rotations); + + drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs); + + return intel_plane; + +fail: + kfree(state); + kfree(intel_plane); + + return ERR_PTR(ret); +} + static struct intel_plane * intel_cursor_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) @@ -13911,11 +14099,94 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc, scaler->in_use = 0; scaler->mode = PS_SCALER_MODE_DYN; + scaler->owned = 1; +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + if (intel_gvt_active(dev_priv) && + dev_priv->gvt->pipe_info[crtc->pipe].scaler_owner[i] != 0) + scaler->owned = 0; +#endif + if (intel_vgpu_active(dev_priv) && + !(1 << (crtc->pipe * SKL_NUM_SCALERS + i) & + dev_priv->vgpu.scaler_owned)) + scaler->owned = 0; } scaler_state->scaler_id = -1; } +static int intel_crtc_init_restrict_planes(struct drm_i915_private *dev_priv, + enum pipe pipe, int planes_mask) +{ + struct intel_crtc *intel_crtc; + struct intel_crtc_state *crtc_state; + struct intel_plane *primary = NULL, *intel_plane = NULL; + bool is_primary = true; + int plane, ret, crtc_plane; + + intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); + if (!intel_crtc) + return -ENOMEM; + + crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); + if (!crtc_state) { + ret = -ENOMEM; + goto fail; + } + intel_crtc->config = crtc_state; + intel_crtc->base.state = &crtc_state->base; + crtc_state->base.crtc = &intel_crtc->base; + + for_each_universal_plane(dev_priv, pipe, plane) { + if (planes_mask & BIT(plane)) { + intel_plane = intel_skl_plane_create(dev_priv, + pipe, plane, is_primary); + if (IS_ERR(intel_plane)) { + DRM_DEBUG_KMS(" plane %d failed for pipe %d\n", plane, pipe); + ret = PTR_ERR(intel_plane); + goto fail; + } + if (is_primary) { + primary = intel_plane; + is_primary = false; + } + DRM_DEBUG_KMS(" plane %d created for pipe %d\n", plane, pipe); + intel_crtc->plane_ids_mask |= BIT(intel_plane->id); + } + } + + ret = drm_crtc_init_with_planes(&dev_priv->drm, + &intel_crtc->base, + primary ? &primary->base : NULL, NULL, + &intel_crtc_funcs, + "pipe %c", pipe_name(pipe)); + if (ret) + goto fail; + + intel_crtc->pipe = pipe; + crtc_plane = primary ? primary->i9xx_plane : 0; + + dev_priv->plane_to_crtc_mapping[crtc_plane] = intel_crtc; + dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = intel_crtc; + + drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); + + intel_color_init(&intel_crtc->base); + + WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); + + return 0; + +fail: + /* + * drm_mode_config_cleanup() will free up any + * crtcs/planes already initialized. + */ + kfree(crtc_state); + kfree(intel_crtc); + + return ret; +} + static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) { struct intel_crtc *intel_crtc; @@ -14034,6 +14305,27 @@ int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, return 0; } +int get_pipe_from_crtc_index(struct drm_device *dev, unsigned int index, enum pipe *pipe) +{ + struct drm_crtc *c = drm_crtc_from_index(dev, index); + + if (WARN_ON(!c)) + return -ENOENT; + + *pipe = (to_intel_crtc(c)->pipe); + return 0; +} + +struct intel_crtc *get_intel_crtc_from_index(struct drm_device *dev, + unsigned int index) +{ + struct drm_crtc *c = drm_crtc_from_index(dev, index); + + WARN_ON(!c); + return to_intel_crtc(c); +} + + static int intel_encoder_clones(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; @@ -14315,6 +14607,15 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) intel_encoder_clones(encoder); } +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + /* + * Encoders have been initialized. If we are in VGT mode, + * let's inform the HV that it can start Dom U as Dom 0 + * is ready to accept new Dom Us. + */ + gvt_dom0_ready(dev_priv); +#endif + intel_init_pch_refclk(dev_priv); drm_helper_move_panel_connectors_to_head(&dev_priv->drm); @@ -14573,7 +14874,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, fb->height < SKL_MIN_YUV_420_SRC_H || (fb->width % 4) != 0 || (fb->height % 4) != 0)) { DRM_DEBUG_KMS("src dimensions not correct for NV12\n"); - return -EINVAL; + goto err; } for (i = 0; i < fb->format->num_planes; i++) { @@ -15133,12 +15434,42 @@ static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv) DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq); } + +static int intel_sanitize_plane_restriction(struct drm_i915_private *dev_priv) +{ + unsigned int mask; + + /*plane restriction feature is only for APL and KBL for now*/ + if (!(IS_BROXTON(dev_priv) || IS_KABYLAKE(dev_priv))) { + i915_modparams.avail_planes_per_pipe = 0; + DRM_INFO("Turning off Plane Restrictions feature\n"); + } + + mask = i915_modparams.avail_planes_per_pipe; + + /* make sure SOS has a (dummy) plane per pipe. */ + if ((IS_BROXTON(dev_priv) || IS_KABYLAKE(dev_priv)) && + intel_gvt_active(dev_priv)) { + enum pipe pipe; + + for_each_pipe(dev_priv, pipe) { + if (!AVAIL_PLANE_PER_PIPE(dev_priv, mask, pipe)) + mask |= (1 << pipe * BITS_PER_PIPE); + } + DRM_INFO("Fix internal plane mask: 0x%06x --> 0x%06x", + i915_modparams.avail_planes_per_pipe, mask); + } + return mask; +} + int intel_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; enum pipe pipe; struct intel_crtc *crtc; + unsigned int planes_mask[I915_MAX_PIPES]; + unsigned int avail_plane_per_pipe_mask = 0; dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0); @@ -15212,10 +15543,29 @@ int intel_modeset_init(struct drm_device *dev) INTEL_INFO(dev_priv)->num_pipes, INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : ""); + avail_plane_per_pipe_mask = intel_sanitize_plane_restriction(dev_priv); + DRM_DEBUG_KMS("avail_planes_per_pipe = 0x%x \n", i915_modparams.avail_planes_per_pipe); + DRM_DEBUG_KMS("domain_plane_owners = 0x%llx \n", i915_modparams.domain_plane_owners); + for_each_pipe(dev_priv, pipe) { - int ret; + planes_mask[pipe] = AVAIL_PLANE_PER_PIPE(dev_priv, + avail_plane_per_pipe_mask, pipe); + DRM_DEBUG_KMS("for pipe %d plane_mask = %d \n", pipe, planes_mask[pipe]); + } - ret = intel_crtc_init(dev_priv, pipe); + for_each_pipe(dev_priv, pipe) { + int ret = 0; + + if (!i915_modparams.avail_planes_per_pipe) { + ret = intel_crtc_init(dev_priv, pipe); + } else { + if (!intel_vgpu_active(dev_priv) || (intel_vgpu_active(dev_priv) + && planes_mask[pipe])) { + ret = intel_crtc_init_restrict_planes(dev_priv, + pipe, + planes_mask[pipe]); + } + } if (ret) { drm_mode_config_cleanup(dev); return ret; @@ -15242,7 +15592,7 @@ int intel_modeset_init(struct drm_device *dev) for_each_intel_crtc(dev, crtc) { struct intel_initial_plane_config plane_config = {}; - if (!crtc->active) + if (!crtc->active || !crtc->base.primary) continue; /* @@ -15365,17 +15715,6 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) POSTING_READ(DPLL(pipe)); } -static bool intel_plane_mapping_ok(struct intel_crtc *crtc, - struct intel_plane *plane) -{ - enum pipe pipe; - - if (!plane->get_hw_state(plane, &pipe)) - return true; - - return pipe == crtc->pipe; -} - static void intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv) { @@ -15387,13 +15726,20 @@ intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv) for_each_intel_crtc(&dev_priv->drm, crtc) { struct intel_plane *plane = to_intel_plane(crtc->base.primary); + struct intel_crtc *plane_crtc; + enum pipe pipe; - if (intel_plane_mapping_ok(crtc, plane)) + if (!plane->get_hw_state(plane, &pipe)) + continue; + + if (pipe == crtc->pipe) continue; DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n", plane->base.name); - intel_plane_disable_noatomic(crtc, plane); + + plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + intel_plane_disable_noatomic(plane_crtc, plane); } } @@ -15441,13 +15787,9 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); } - /* restore vblank interrupts to correct state */ - drm_crtc_vblank_reset(&crtc->base); if (crtc->active) { struct intel_plane *plane; - drm_crtc_vblank_on(&crtc->base); - /* Disable everything but the primary plane */ for_each_intel_plane_on_crtc(dev, crtc, plane) { const struct intel_plane_state *plane_state = @@ -15565,23 +15907,32 @@ void i915_redisable_vga(struct drm_i915_private *dev_priv) } /* FIXME read out full plane state for all planes */ -static void readout_plane_state(struct intel_crtc *crtc) +static void readout_plane_state(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); struct intel_plane *plane; + struct intel_crtc *crtc; - for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { + for_each_intel_plane(&dev_priv->drm, plane) { struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); - enum pipe pipe; + struct intel_crtc_state *crtc_state; + enum pipe pipe = PIPE_A; bool visible; visible = plane->get_hw_state(plane, &pipe); + crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + crtc_state = to_intel_crtc_state(crtc->base.state); + intel_set_plane_visible(crtc_state, plane_state, visible); } + + for_each_intel_crtc(&dev_priv->drm, crtc) { + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + + fixup_active_planes(crtc_state); + } } static void intel_modeset_readout_hw_state(struct drm_device *dev) @@ -15611,15 +15962,16 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) crtc->active = crtc_state->base.active; if (crtc_state->base.active) - dev_priv->active_crtcs |= 1 << crtc->pipe; - - readout_plane_state(crtc); + dev_priv->active_crtcs |= + 1 << drm_crtc_index(&crtc->base); DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", crtc->base.base.id, crtc->base.name, enableddisabled(crtc_state->base.active)); } + readout_plane_state(dev_priv); + for (i = 0; i < dev_priv->num_shared_dpll; i++) { struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; @@ -15632,7 +15984,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) if (crtc_state->base.active && crtc_state->shared_dpll == pll) - pll->state.crtc_mask |= 1 << crtc->pipe; + pll->state.crtc_mask |= + 1 << drm_crtc_index(&crtc->base); } pll->active_mask = pll->state.crtc_mask; @@ -15647,10 +16000,15 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) struct intel_crtc_state *crtc_state; crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - crtc_state = to_intel_crtc_state(crtc->base.state); + if (!crtc) { + encoder->base.crtc = NULL; + } else { + crtc_state = to_intel_crtc_state(crtc->base.state); + + encoder->base.crtc = &crtc->base; + encoder->get_config(encoder, crtc_state); - encoder->base.crtc = &crtc->base; - encoder->get_config(encoder, crtc_state); + } } else { encoder->base.crtc = NULL; } @@ -15789,7 +16147,6 @@ intel_modeset_setup_hw_state(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx) { struct drm_i915_private *dev_priv = to_i915(dev); - enum pipe pipe; struct intel_crtc *crtc; struct intel_encoder *encoder; int i; @@ -15800,15 +16157,23 @@ intel_modeset_setup_hw_state(struct drm_device *dev, /* HW state is read out, now we need to sanitize this mess. */ get_encoder_power_domains(dev_priv); - intel_sanitize_plane_mapping(dev_priv); + /* + * intel_sanitize_plane_mapping() may need to do vblank + * waits, so we need vblank interrupts restored beforehand. + */ + for_each_intel_crtc(&dev_priv->drm, crtc) { + drm_crtc_vblank_reset(&crtc->base); - for_each_intel_encoder(dev, encoder) { - intel_sanitize_encoder(encoder); + if (crtc->active) + drm_crtc_vblank_on(&crtc->base); } - for_each_pipe(dev_priv, pipe) { - crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + intel_sanitize_plane_mapping(dev_priv); + for_each_intel_encoder(dev, encoder) + intel_sanitize_encoder(encoder); + + for_each_intel_crtc(&dev_priv->drm, crtc) { intel_sanitize_crtc(crtc, ctx); intel_dump_pipe_config(crtc, crtc->config, "[setup_hw_state]"); @@ -15925,6 +16290,7 @@ static void intel_hpd_poll_fini(struct drm_device *dev) if (connector->hdcp_shim) { cancel_delayed_work_sync(&connector->hdcp_check_work); cancel_work_sync(&connector->hdcp_prop_work); + cancel_work_sync(&connector->hdcp_enable_work); } } drm_connector_list_iter_end(&conn_iter); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 1193202766a2c..d583d074e83fb 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -401,6 +401,22 @@ static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, return true; } +static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, + int link_rate, + uint8_t lane_count) +{ + const struct drm_display_mode *fixed_mode = + intel_dp->attached_connector->panel.fixed_mode; + int mode_rate, max_rate; + + mode_rate = intel_dp_link_required(fixed_mode->clock, 18); + max_rate = intel_dp_max_data_rate(link_rate, lane_count); + if (mode_rate > max_rate) + return false; + + return true; +} + int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, int link_rate, uint8_t lane_count) { @@ -410,9 +426,23 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, intel_dp->num_common_rates, link_rate); if (index > 0) { + if (intel_dp_is_edp(intel_dp) && + !intel_dp_can_link_train_fallback_for_edp(intel_dp, + intel_dp->common_rates[index - 1], + lane_count)) { + DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n"); + return 0; + } intel_dp->max_link_rate = intel_dp->common_rates[index - 1]; intel_dp->max_link_lane_count = lane_count; } else if (lane_count > 1) { + if (intel_dp_is_edp(intel_dp) && + !intel_dp_can_link_train_fallback_for_edp(intel_dp, + intel_dp_max_common_rate(intel_dp), + lane_count >> 1)) { + DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n"); + return 0; + } intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); intel_dp->max_link_lane_count = lane_count >> 1; } else { @@ -479,7 +509,7 @@ uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes) return v; } -static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) +void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) { int i; if (dst_bytes > 4) @@ -2045,7 +2075,12 @@ static void wait_panel_status(struct intel_dp *intel_dp, I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); - if (intel_wait_for_register(dev_priv, + /* + * Only wait for panel status if we are not in a GVT guest environment, + * because such a wait in a GVT guest environment doesn't make any sense + * as we are exposing virtual DP monitors to the guest. + */ + if (!intel_vgpu_active(dev_priv) && intel_wait_for_register(dev_priv, pp_stat_reg, mask, value, 5000)) DRM_ERROR("Panel status timeout: status %08x control %08x\n", @@ -4709,19 +4744,13 @@ intel_dp_long_pulse(struct intel_connector *connector, */ status = connector_status_disconnected; goto out; - } else { - /* - * If display is now connected check links status, - * there has been known issues of link loss triggering - * long pulse. - * - * Some sinks (eg. ASUS PB287Q) seem to perform some - * weird HPD ping pong during modesets. So we can apparently - * end up with HPD going low during a modeset, and then - * going back up soon after. And once that happens we must - * retrain the link to get a picture. That's in case no - * userspace component reacted to intermittent HPD dip. - */ + } + + /* + * Some external monitors do not signal loss of link synchronization + * with an IRQ_HPD, so force a link status check. + */ + if (!intel_dp_is_edp(intel_dp)) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; intel_dp_retrain_link(encoder, ctx); diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c index 4da6e33c7fa1c..329309a085cb7 100644 --- a/drivers/gpu/drm/i915/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/intel_dp_link_training.c @@ -352,22 +352,14 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) return; failure_handling: - /* Dont fallback and prune modes if its eDP */ - if (!intel_dp_is_edp(intel_dp)) { - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d", - intel_connector->base.base.id, - intel_connector->base.name, - intel_dp->link_rate, intel_dp->lane_count); - if (!intel_dp_get_link_train_fallback_values(intel_dp, - intel_dp->link_rate, - intel_dp->lane_count)) - /* Schedule a Hotplug Uevent to userspace to start modeset */ - schedule_work(&intel_connector->modeset_retry_work); - } else { - DRM_ERROR("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d", - intel_connector->base.base.id, - intel_connector->base.name, - intel_dp->link_rate, intel_dp->lane_count); - } + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d", + intel_connector->base.base.id, + intel_connector->base.name, + intel_dp->link_rate, intel_dp->lane_count); + if (!intel_dp_get_link_train_fallback_values(intel_dp, + intel_dp->link_rate, + intel_dp->lane_count)) + /* Schedule a Hotplug Uevent to userspace to start modeset */ + schedule_work(&intel_connector->modeset_retry_work); return; } diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 4ecd653756033..1fec0c71b4d95 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -38,11 +38,11 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; - struct intel_connector *connector = - to_intel_connector(conn_state->connector); + struct drm_connector *connector = conn_state->connector; + void *port = to_intel_connector(connector)->port; struct drm_atomic_state *state = pipe_config->base.state; int bpp; - int lane_count, slots; + int lane_count, slots = 0; const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; int mst_pbn; bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc, @@ -70,17 +70,23 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, pipe_config->port_clock = intel_dp_max_link_rate(intel_dp); - if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, connector->port)) + if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, port)) pipe_config->has_audio = true; mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp); pipe_config->pbn = mst_pbn; - slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr, - connector->port, mst_pbn); - if (slots < 0) { - DRM_DEBUG_KMS("failed finding vcpi slots:%d\n", slots); - return false; + /* Zombie connectors can't have VCPI slots */ + if (READ_ONCE(connector->registered)) { + slots = drm_dp_atomic_find_vcpi_slots(state, + &intel_dp->mst_mgr, + port, + mst_pbn); + if (slots < 0) { + DRM_DEBUG_KMS("failed finding vcpi slots:%d\n", + slots); + return false; + } } intel_link_compute_m_n(bpp, lane_count, @@ -311,9 +317,8 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector) struct edid *edid; int ret; - if (!intel_dp) { + if (!READ_ONCE(connector->registered)) return intel_connector_update_modes(connector, NULL); - } edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port); ret = intel_connector_update_modes(connector, edid); @@ -328,9 +333,10 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force) struct intel_connector *intel_connector = to_intel_connector(connector); struct intel_dp *intel_dp = intel_connector->mst_port; - if (!intel_dp) + if (!READ_ONCE(connector->registered)) return connector_status_disconnected; - return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, intel_connector->port); + return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, + intel_connector->port); } static void @@ -370,7 +376,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector, int bpp = 24; /* MST uses fixed bpp */ int max_rate, mode_rate, max_lanes, max_link_clock; - if (!intel_dp) + if (!READ_ONCE(connector->registered)) return MODE_ERROR; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) @@ -402,7 +408,7 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c struct intel_dp *intel_dp = intel_connector->mst_port; struct intel_crtc *crtc = to_intel_crtc(state->crtc); - if (!intel_dp) + if (!READ_ONCE(connector->registered)) return NULL; return &intel_dp->mst_encoders[crtc->pipe]->base.base; } @@ -452,6 +458,10 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo if (!intel_connector) return NULL; + intel_connector->get_hw_state = intel_dp_mst_get_hw_state; + intel_connector->mst_port = intel_dp; + intel_connector->port = port; + connector = &intel_connector->base; ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort); @@ -462,10 +472,6 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs); - intel_connector->get_hw_state = intel_dp_mst_get_hw_state; - intel_connector->mst_port = intel_dp; - intel_connector->port = port; - for_each_pipe(dev_priv, pipe) { struct drm_encoder *enc = &intel_dp->mst_encoders[pipe]->base.base; @@ -503,7 +509,6 @@ static void intel_dp_register_mst_connector(struct drm_connector *connector) static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_connector *connector) { - struct intel_connector *intel_connector = to_intel_connector(connector); struct drm_i915_private *dev_priv = to_i915(connector->dev); DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); @@ -512,10 +517,6 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, if (dev_priv->fbdev) drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, connector); - /* prevent race with the check in ->detect */ - drm_modeset_lock(&connector->dev->mode_config.connection_mutex, NULL); - intel_connector->mst_port = NULL; - drm_modeset_unlock(&connector->dev->mode_config.connection_mutex); drm_connector_put(connector); } diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index b51ad2917dbef..47b1dfe061529 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -303,7 +303,7 @@ intel_reference_shared_dpll(struct intel_shared_dpll *pll, DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name, pipe_name(crtc->pipe)); - shared_dpll[id].crtc_mask |= 1 << crtc->pipe; + shared_dpll[id].crtc_mask |= 1 << (drm_crtc_index(&crtc->base)); } /** @@ -3285,7 +3285,8 @@ void intel_release_shared_dpll(struct intel_shared_dpll *dpll, struct intel_shared_dpll_state *shared_dpll_state; shared_dpll_state = intel_atomic_get_shared_dpll_state(state); - shared_dpll_state[dpll->info->id].crtc_mask &= ~(1 << crtc->pipe); + shared_dpll_state[dpll->info->id].crtc_mask &= + ~(1 << drm_crtc_index(&crtc->base)); } /** diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 8fc61e96754ff..b8bae3ab7c353 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -418,6 +418,15 @@ struct intel_connector { uint64_t hdcp_value; /* protected by hdcp_mutex */ struct delayed_work hdcp_check_work; struct work_struct hdcp_prop_work; + struct work_struct hdcp_enable_work; + + /* list of Revocated KSVs and their count from SRM blob Parsing */ + unsigned int revocated_ksv_cnt; + u8 *revocated_ksv_list; + u32 srm_blob_id; + + /* Downstream info like, depth, device_count, bksv and ksv_list etc */ + struct cp_downstream_info *downstream_info; }; struct intel_digital_connector_state { @@ -564,6 +573,7 @@ struct intel_initial_plane_config { struct intel_scaler { int in_use; uint32_t mode; + int owned; }; struct intel_crtc_scaler_state { @@ -1384,6 +1394,11 @@ static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv) return dev_priv->runtime_pm.irqs_enabled; } +bool is_shadow_context(struct i915_gem_context *ctx); +int get_vgt_id(struct i915_gem_context *ctx); +int get_pid_shadowed(struct i915_gem_context *ctx, + struct intel_engine_cs *engine); + int intel_get_crtc_scanline(struct intel_crtc *crtc); void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, u8 pipe_mask); @@ -1514,6 +1529,9 @@ enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum pipe intel_get_pipe_from_connector(struct intel_connector *connector); int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int get_pipe_from_crtc_index(struct drm_device *dev, unsigned int index, enum pipe *pipe); +struct intel_crtc *get_intel_crtc_from_index(struct drm_device *dev, + unsigned int index); enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, enum pipe pipe); static inline bool @@ -1533,7 +1551,12 @@ intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state) static inline void intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) { - drm_wait_one_vblank(&dev_priv->drm, pipe); + struct intel_crtc *crtc; + + crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + if (crtc) + drm_wait_one_vblank(&dev_priv->drm, + drm_crtc_index(&crtc->base)); } static inline void intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, int pipe) @@ -1708,6 +1731,7 @@ int intel_dp_rate_select(struct intel_dp *intel_dp, int rate); void intel_dp_hot_plug(struct intel_encoder *intel_encoder); void intel_power_sequencer_reset(struct drm_i915_private *dev_priv); uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes); +void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes); void intel_plane_destroy(struct drm_plane *plane); void intel_edp_drrs_enable(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state); @@ -1916,10 +1940,13 @@ static inline void intel_backlight_device_unregister(struct intel_connector *con void intel_hdcp_atomic_check(struct drm_connector *connector, struct drm_connector_state *old_state, struct drm_connector_state *new_state); +void intel_hdcp_atomic_pre_commit(struct drm_connector *connector, + struct drm_connector_state *old_state, + struct drm_connector_state *new_state); +void intel_hdcp_atomic_commit(struct drm_connector *connector, + struct drm_connector_state *new_state); int intel_hdcp_init(struct intel_connector *connector, const struct intel_hdcp_shim *hdcp_shim); -int intel_hdcp_enable(struct intel_connector *connector); -int intel_hdcp_disable(struct intel_connector *connector); int intel_hdcp_check_link(struct intel_connector *connector); bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port); @@ -2099,6 +2126,9 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state); void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state); +int intel_check_sprite_plane(struct intel_plane *plane, + struct intel_crtc_state *crtc_state, + struct intel_plane_state *state); void skl_update_plane(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 2d1952849d69f..7f3bcbd2451b1 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -468,6 +468,9 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine) execlists->queue_priority = INT_MIN; execlists->queue = RB_ROOT_CACHED; + + hrtimer_init(&execlists->preempt_timer, + CLOCK_MONOTONIC, HRTIMER_MODE_REL); } /** @@ -1109,6 +1112,7 @@ void intel_engines_park(struct drm_i915_private *i915) for_each_engine(engine, i915, id) { /* Flush the residual irq tasklets first. */ + hrtimer_cancel(&engine->execlists.preempt_timer); intel_engine_disarm_breadcrumbs(engine); tasklet_kill(&engine->execlists.tasklet); diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index fb2f9fce34cd2..f028a6fb33fea 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -548,10 +548,17 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, /* Find the largest fb */ for_each_crtc(dev, crtc) { - struct drm_i915_gem_object *obj = - intel_fb_obj(crtc->primary->state->fb); + struct drm_i915_gem_object *obj; intel_crtc = to_intel_crtc(crtc); + if (!crtc->primary) { + DRM_DEBUG_KMS("pipe %c has no primary plane\n", + pipe_name(intel_crtc->pipe)); + continue; + } + + obj = intel_fb_obj(crtc->primary->state->fb); + if (!crtc->state->active || !obj) { DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n", pipe_name(intel_crtc->pipe)); diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c index 77c123cc88179..83ed6f56ed561 100644 --- a/drivers/gpu/drm/i915/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c @@ -181,11 +181,18 @@ static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable) { struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + if (!crtc) { + DRM_DEBUG("No crtc for pipe=%d\n", pipe); + return; + } if (enable) - bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN); + bdw_enable_pipe_irq(dev_priv, drm_crtc_index(&crtc->base), + GEN8_PIPE_FIFO_UNDERRUN); else - bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN); + bdw_disable_pipe_irq(dev_priv, drm_crtc_index(&crtc->base), + GEN8_PIPE_FIFO_UNDERRUN); } static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index 4aa5e6463e7b7..84e79f8bf344b 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -748,6 +748,7 @@ static bool __guc_dequeue(struct intel_engine_cs *engine) kmem_cache_free(engine->i915->priorities, p); } done: + execlists_clear_active(execlists, EXECLISTS_ACTIVE_PREEMPT_TIMEOUT); execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN; if (submit) port_assign(port, last); diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c index 2fc7a0dd0df9b..1f7da1cfd4b2c 100644 --- a/drivers/gpu/drm/i915/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/intel_hangcheck.c @@ -418,6 +418,9 @@ static void i915_hangcheck_elapsed(struct work_struct *work) if (!i915_modparams.enable_hangcheck) return; + if (intel_vgpu_active(dev_priv)) + return; + if (!READ_ONCE(dev_priv->gt.awake)) return; diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c index 0cc6a861bcf83..d9869b64a3f13 100644 --- a/drivers/gpu/drm/i915/intel_hdcp.c +++ b/drivers/gpu/drm/i915/intel_hdcp.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "intel_drv.h" #include "i915_reg.h" @@ -179,22 +180,124 @@ bool intel_hdcp_is_ksv_valid(u8 *ksv) return true; } +struct intel_digital_port *conn_to_dig_port(struct intel_connector *connector) +{ + return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base); +} + +static inline void intel_hdcp_print_ksv(u8 *ksv) +{ + DRM_DEBUG_KMS("\t%#04x, %#04x, %#04x, %#04x, %#04x\n", *ksv, + *(ksv + 1), *(ksv + 2), *(ksv + 3), *(ksv + 4)); +} + +/* Check if any of the KSV is revocated by DCP LLC through SRM table */ +static inline bool intel_hdcp_ksvs_revocated(struct intel_connector *connector, + u8 *ksvs, u32 ksv_count) +{ + u32 rev_ksv_cnt = connector->revocated_ksv_cnt; + u8 *rev_ksv_list = connector->revocated_ksv_list; + u32 cnt, i, j; + + /* If the Revocated ksv list is empty */ + if (!rev_ksv_cnt || !rev_ksv_list) + return false; + + for (cnt = 0; cnt < ksv_count; cnt++) { + rev_ksv_list = connector->revocated_ksv_list; + for (i = 0; i < rev_ksv_cnt; i++) { + for (j = 0; j < DRM_HDCP_KSV_LEN; j++) + if (*(ksvs + j) != *(rev_ksv_list + j)) { + break; + } else if (j == (DRM_HDCP_KSV_LEN - 1)) { + DRM_DEBUG_KMS("Revocated KSV is "); + intel_hdcp_print_ksv(ksvs); + return true; + } + /* Move the offset to next KSV in the revocated list */ + rev_ksv_list += DRM_HDCP_KSV_LEN; + } + + /* Iterate to next ksv_offset */ + ksvs += DRM_HDCP_KSV_LEN; + } + return false; +} + +/* Implements Part 2 of the HDCP authorization procedure */ static -int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port, - const struct intel_hdcp_shim *shim, - u8 *ksv_fifo, u8 num_downstream, u8 *bstatus) +int intel_hdcp_auth_downstream(struct intel_connector *connector) { - struct drm_i915_private *dev_priv; + struct intel_digital_port *intel_dig_port = + conn_to_dig_port(connector); + const struct intel_hdcp_shim *shim = connector->hdcp_shim; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); u32 vprime, sha_text, sha_leftovers, rep_ctl; - int ret, i, j, sha_idx; + u8 bstatus[2], num_downstream, *ksv_fifo; + int ret = 0, i, j, sha_idx; + + if(intel_dig_port == NULL) { + ret = -EINVAL; + goto out; + } + + ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim); + if (ret) { + DRM_ERROR("KSV list failed to become ready (%d)\n", ret); + goto out; + } + + ret = shim->read_bstatus(intel_dig_port, bstatus); + if (ret) + goto out; - dev_priv = intel_dig_port->base.base.dev->dev_private; + if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) || + DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) { + DRM_ERROR("Max Topology Limit Exceeded\n"); + ret = -EPERM; + goto out; + } + + /* + * When repeater reports 0 device count, HDCP1.4 spec allows disabling + * the HDCP encryption. That implies that repeater can't have its own + * display. As there is no consumption of encrypted content in the + * repeater with 0 downstream devices, we are failing the + * authentication. + */ + num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]); + if (num_downstream == 0) { + ret = -EINVAL; + goto out; + } + + connector->downstream_info->device_count = num_downstream; + connector->downstream_info->depth = DRM_HDCP_DEPTH(bstatus[1]); + + ksv_fifo = kzalloc(num_downstream * DRM_HDCP_KSV_LEN, GFP_KERNEL); + if (!ksv_fifo) { + ret = -ENOMEM; + goto out; + } + + ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo); + if (ret) + goto kfree_out; + + if (intel_hdcp_ksvs_revocated(connector, ksv_fifo, num_downstream)) { + DRM_ERROR("Revocated Ksv(s) in ksv_fifo\n"); + ret = -EPERM; + goto kfree_out; + } + + memcpy(connector->downstream_info->ksv_list, ksv_fifo, + num_downstream * DRM_HDCP_KSV_LEN); /* Process V' values from the receiver */ for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) { ret = shim->read_v_prime_part(intel_dig_port, i, &vprime); if (ret) - return ret; + goto kfree_out; I915_WRITE(HDCP_SHA_V_PRIME(i), vprime); } @@ -224,7 +327,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port, ret = intel_write_sha_text(dev_priv, sha_text); if (ret < 0) - return ret; + goto kfree_out; /* Programming guide writes this every 64 bytes */ sha_idx += sizeof(sha_text); @@ -247,7 +350,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port, ret = intel_write_sha_text(dev_priv, sha_text); if (ret < 0) - return ret; + goto kfree_out; sha_leftovers = 0; sha_text = 0; sha_idx += sizeof(sha_text); @@ -265,21 +368,21 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port, ret = intel_write_sha_text(dev_priv, bstatus[0] << 8 | bstatus[1]); if (ret < 0) - return ret; + goto kfree_out; sha_idx += sizeof(sha_text); /* Write 32 bits of M0 */ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); ret = intel_write_sha_text(dev_priv, 0); if (ret < 0) - return ret; + goto kfree_out; sha_idx += sizeof(sha_text); /* Write 16 bits of M0 */ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16); ret = intel_write_sha_text(dev_priv, 0); if (ret < 0) - return ret; + goto kfree_out; sha_idx += sizeof(sha_text); } else if (sha_leftovers == 1) { @@ -290,21 +393,21 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port, sha_text = (sha_text & 0xffffff00) >> 8; ret = intel_write_sha_text(dev_priv, sha_text); if (ret < 0) - return ret; + goto kfree_out; sha_idx += sizeof(sha_text); /* Write 32 bits of M0 */ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); ret = intel_write_sha_text(dev_priv, 0); if (ret < 0) - return ret; + goto kfree_out; sha_idx += sizeof(sha_text); /* Write 24 bits of M0 */ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8); ret = intel_write_sha_text(dev_priv, 0); if (ret < 0) - return ret; + goto kfree_out; sha_idx += sizeof(sha_text); } else if (sha_leftovers == 2) { @@ -313,7 +416,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port, sha_text |= bstatus[0] << 24 | bstatus[1] << 16; ret = intel_write_sha_text(dev_priv, sha_text); if (ret < 0) - return ret; + goto kfree_out; sha_idx += sizeof(sha_text); /* Write 64 bits of M0 */ @@ -321,7 +424,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port, for (i = 0; i < 2; i++) { ret = intel_write_sha_text(dev_priv, 0); if (ret < 0) - return ret; + goto kfree_out; sha_idx += sizeof(sha_text); } } else if (sha_leftovers == 3) { @@ -330,33 +433,34 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port, sha_text |= bstatus[0] << 24; ret = intel_write_sha_text(dev_priv, sha_text); if (ret < 0) - return ret; + goto kfree_out; sha_idx += sizeof(sha_text); /* Write 8 bits of text, 24 bits of M0 */ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8); ret = intel_write_sha_text(dev_priv, bstatus[1]); if (ret < 0) - return ret; + goto kfree_out; sha_idx += sizeof(sha_text); /* Write 32 bits of M0 */ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); ret = intel_write_sha_text(dev_priv, 0); if (ret < 0) - return ret; + goto kfree_out; sha_idx += sizeof(sha_text); /* Write 8 bits of M0 */ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24); ret = intel_write_sha_text(dev_priv, 0); if (ret < 0) - return ret; + goto kfree_out; sha_idx += sizeof(sha_text); } else { DRM_DEBUG_KMS("Invalid number of leftovers %d\n", sha_leftovers); - return -EINVAL; + ret = -EINVAL; + goto kfree_out; } I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); @@ -364,7 +468,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port, while ((sha_idx % 64) < (64 - sizeof(sha_text))) { ret = intel_write_sha_text(dev_priv, 0); if (ret < 0) - return ret; + goto kfree_out; sha_idx += sizeof(sha_text); } @@ -376,7 +480,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port, sha_text = (num_downstream * 5 + 10) * 8; ret = intel_write_sha_text(dev_priv, sha_text); if (ret < 0) - return ret; + goto kfree_out; /* Tell the HW we're done with the hash and wait for it to ACK */ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH); @@ -384,89 +488,27 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port, HDCP_SHA1_COMPLETE, HDCP_SHA1_COMPLETE, 1)) { DRM_DEBUG_KMS("Timed out waiting for SHA1 complete\n"); - return -ETIMEDOUT; + ret = -ETIMEDOUT; + goto kfree_out; } if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) { DRM_DEBUG_KMS("SHA-1 mismatch, HDCP failed\n"); - return -ENXIO; - } - - return 0; -} - -/* Implements Part 2 of the HDCP authorization procedure */ -static -int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port, - const struct intel_hdcp_shim *shim) -{ - u8 bstatus[2], num_downstream, *ksv_fifo; - int ret, i, tries = 3; - - ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim); - if (ret) { - DRM_ERROR("KSV list failed to become ready (%d)\n", ret); - return ret; - } - - ret = shim->read_bstatus(intel_dig_port, bstatus); - if (ret) - return ret; - - if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) || - DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) { - DRM_ERROR("Max Topology Limit Exceeded\n"); - return -EPERM; - } - - /* - * When repeater reports 0 device count, HDCP1.4 spec allows disabling - * the HDCP encryption. That implies that repeater can't have its own - * display. As there is no consumption of encrypted content in the - * repeater with 0 downstream devices, we are failing the - * authentication. - */ - num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]); - if (num_downstream == 0) - return -EINVAL; - - ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL); - if (!ksv_fifo) - return -ENOMEM; - - ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo); - if (ret) - goto err; - - /* - * When V prime mismatches, DP Spec mandates re-read of - * V prime atleast twice. - */ - for (i = 0; i < tries; i++) { - ret = intel_hdcp_validate_v_prime(intel_dig_port, shim, - ksv_fifo, num_downstream, - bstatus); - if (!ret) - break; - } - - if (i == tries) { - DRM_ERROR("V Prime validation failed.(%d)\n", ret); - goto err; + ret = -ENXIO; + goto kfree_out; } - DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n", - num_downstream); - ret = 0; -err: +kfree_out: kfree(ksv_fifo); +out: return ret; } /* Implements Part 1 of the HDCP authorization procedure */ -static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port, - const struct intel_hdcp_shim *shim) +static int intel_hdcp_auth(struct intel_connector *connector) { - struct drm_i915_private *dev_priv; + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + const struct intel_hdcp_shim *shim = connector->hdcp_shim; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); enum port port; unsigned long r0_prime_gen_start; int ret, i, tries = 2; @@ -484,7 +526,8 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port, } ri; bool repeater_present, hdcp_capable; - dev_priv = intel_dig_port->base.base.dev->dev_private; + if(intel_dig_port == NULL) + return EINVAL; port = intel_dig_port->base.port; @@ -540,15 +583,25 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port, return -ENODEV; } + if (intel_hdcp_ksvs_revocated(connector, bksv.shim, 1)) { + DRM_ERROR("BKSV is revocated\n"); + return -EPERM; + } + + memcpy(connector->downstream_info->bksv, bksv.shim, + DRM_MODE_HDCP_KSV_LEN); + I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]); I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]); ret = shim->repeater_present(intel_dig_port, &repeater_present); if (ret) return ret; - if (repeater_present) + if (repeater_present) { I915_WRITE(HDCP_REP_CTL, intel_hdcp_get_repeater_ctl(intel_dig_port)); + connector->downstream_info->is_repeater = true; + } ret = shim->toggle_signalling(intel_dig_port, true); if (ret) @@ -612,18 +665,12 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port, */ if (repeater_present) - return intel_hdcp_auth_downstream(intel_dig_port, shim); + return intel_hdcp_auth_downstream(connector); DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n"); return 0; } -static -struct intel_digital_port *conn_to_dig_port(struct intel_connector *connector) -{ - return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base); -} - static int _intel_hdcp_disable(struct intel_connector *connector) { struct drm_i915_private *dev_priv = connector->base.dev->dev_private; @@ -647,6 +694,9 @@ static int _intel_hdcp_disable(struct intel_connector *connector) return ret; } + memset(connector->downstream_info, 0, + sizeof(struct cp_downstream_info)); + DRM_DEBUG_KMS("HDCP is disabled\n"); return 0; } @@ -677,10 +727,15 @@ static int _intel_hdcp_enable(struct intel_connector *connector) /* Incase of authentication failures, HDCP spec expects reauth. */ for (i = 0; i < tries; i++) { - ret = intel_hdcp_auth(conn_to_dig_port(connector), - connector->hdcp_shim); - if (!ret) + ret = intel_hdcp_auth(connector); + if (!ret) { + connector->hdcp_value = + DRM_MODE_CONTENT_PROTECTION_ENABLED; + schedule_work(&connector->hdcp_prop_work); + schedule_delayed_work(&connector->hdcp_check_work, + DRM_HDCP_CHECK_PERIOD_MS); return 0; + } DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret); @@ -688,10 +743,33 @@ static int _intel_hdcp_enable(struct intel_connector *connector) _intel_hdcp_disable(connector); } + memset(connector->downstream_info, 0, + sizeof(struct cp_downstream_info)); + DRM_ERROR("HDCP authentication failed (%d tries/%d)\n", tries, ret); return ret; } +static void intel_hdcp_enable_work(struct work_struct *work) +{ + struct intel_connector *connector = container_of(work, + struct intel_connector, + hdcp_enable_work); + int ret; + + mutex_lock(&connector->hdcp_mutex); + ret = _intel_hdcp_enable(connector); + if (!ret) { + ret = drm_mode_connector_update_cp_downstream_property( + &connector->base, + connector->downstream_info); + if (ret) + DRM_ERROR("Downstream_property update failed.%d\n", + ret); + } + mutex_unlock(&connector->hdcp_mutex); +} + static void intel_hdcp_check_work(struct work_struct *work) { struct intel_connector *connector = container_of(to_delayed_work(work), @@ -744,33 +822,37 @@ int intel_hdcp_init(struct intel_connector *connector, if (ret) return ret; + ret = drm_connector_attach_cp_srm_property(&connector->base); + if (ret) + return ret; + + ret = drm_connector_attach_cp_downstream_property(&connector->base); + if (ret) + return ret; + + connector->downstream_info = kzalloc(sizeof(struct cp_downstream_info), + GFP_KERNEL); + if (!connector->downstream_info) + return -ENOMEM; + connector->hdcp_shim = hdcp_shim; mutex_init(&connector->hdcp_mutex); INIT_DELAYED_WORK(&connector->hdcp_check_work, intel_hdcp_check_work); INIT_WORK(&connector->hdcp_prop_work, intel_hdcp_prop_work); + INIT_WORK(&connector->hdcp_enable_work, intel_hdcp_enable_work); return 0; } int intel_hdcp_enable(struct intel_connector *connector) { - int ret; - if (!connector->hdcp_shim) return -ENOENT; mutex_lock(&connector->hdcp_mutex); - - ret = _intel_hdcp_enable(connector); - if (ret) - goto out; - - connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_ENABLED; - schedule_work(&connector->hdcp_prop_work); - schedule_delayed_work(&connector->hdcp_check_work, - DRM_HDCP_CHECK_PERIOD_MS); -out: + schedule_work(&connector->hdcp_enable_work); mutex_unlock(&connector->hdcp_mutex); - return ret; + + return 0; } int intel_hdcp_disable(struct intel_connector *connector) @@ -798,7 +880,6 @@ void intel_hdcp_atomic_check(struct drm_connector *connector, { uint64_t old_cp = old_state->content_protection; uint64_t new_cp = new_state->content_protection; - struct drm_crtc_state *crtc_state; if (!new_state->crtc) { /* @@ -819,10 +900,176 @@ void intel_hdcp_atomic_check(struct drm_connector *connector, (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED && new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) return; +} + +void intel_hdcp_atomic_pre_commit(struct drm_connector *connector, + struct drm_connector_state *old_state, + struct drm_connector_state *new_state) +{ + uint64_t old_cp = old_state->content_protection; + uint64_t new_cp = new_state->content_protection; + + /* + * Disable HDCP if the connector is becoming disabled, or if requested + * via the property. + */ + if ((!new_state->crtc && + old_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) || + (new_state->crtc && + old_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED && + new_cp == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) + intel_hdcp_disable(to_intel_connector(connector)); +} + +static u32 intel_hdcp_get_revocated_ksv_count(u8 *buf, u32 vrls_length) +{ + u32 parsed_bytes = 0, ksv_count = 0, vrl_ksv_cnt, vrl_sz; + + do { + vrl_ksv_cnt = *buf; + ksv_count += vrl_ksv_cnt; + + vrl_sz = (vrl_ksv_cnt * DRM_HDCP_KSV_LEN) + 1; + buf += vrl_sz; + parsed_bytes += vrl_sz; + } while (parsed_bytes < vrls_length); + + return ksv_count; +} + +static u32 intel_hdcp_get_revocated_ksvs(u8 *ksv_list, const u8 *buf, + u32 vrls_length) +{ + u32 parsed_bytes = 0, ksv_count = 0; + u32 vrl_ksv_cnt, vrl_ksv_sz, vrl_idx = 0; + + do { + vrl_ksv_cnt = *buf; + vrl_ksv_sz = vrl_ksv_cnt * DRM_HDCP_KSV_LEN; + + buf++; + + DRM_INFO("vrl: %d, Revoked KSVs: %d\n", vrl_idx++, + vrl_ksv_cnt); + memcpy(ksv_list, buf, vrl_ksv_sz); + + ksv_count += vrl_ksv_cnt; + ksv_list += vrl_ksv_sz; + buf += vrl_ksv_sz; + + parsed_bytes += (vrl_ksv_sz + 1); + } while (parsed_bytes < vrls_length); + + return ksv_count; +} + +static int intel_hdcp_parse_srm(struct drm_connector *connector, + struct drm_property_blob *blob) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct cp_srm_header *header; + u32 vrl_length, ksv_count; + u8 *buf; + + if (blob->length < (sizeof(struct cp_srm_header) + + DRM_HDCP_1_4_VRL_LENGTH_SIZE + + DRM_HDCP_1_4_DCP_SIG_SIZE)) { + DRM_ERROR("Invalid blob length\n"); + return -EINVAL; + } + + header = (struct cp_srm_header *)blob->data; + + DRM_INFO("SRM ID: 0x%x, SRM Ver: 0x%x, SRM Gen No: 0x%x\n", + header->spec_indicator.srm_id, + __swab16(header->srm_version), + header->srm_gen_no); + + WARN_ON(header->spec_indicator.reserved_hi || + header->spec_indicator.reserved_lo); + + if (header->spec_indicator.srm_id != DRM_HDCP_1_4_SRM_ID) { + DRM_ERROR("Invalid srm_id\n"); + return -EINVAL; + } + + buf = blob->data + sizeof(*header); + + vrl_length = (*buf << 16 | *(buf + 1) << 8 | *(buf + 2)); + + if (blob->length < (sizeof(struct cp_srm_header) + vrl_length) || + vrl_length < (DRM_HDCP_1_4_VRL_LENGTH_SIZE + + DRM_HDCP_1_4_DCP_SIG_SIZE)) { + DRM_ERROR("Invalid blob length or vrl length\n"); + return -EINVAL; + } + + /* Length of the all vrls combined */ + vrl_length -= (DRM_HDCP_1_4_VRL_LENGTH_SIZE + + DRM_HDCP_1_4_DCP_SIG_SIZE); + + if (!vrl_length) { + DRM_DEBUG("No vrl found\n"); + return -EINVAL; + } + + buf += DRM_HDCP_1_4_VRL_LENGTH_SIZE; + + + ksv_count = intel_hdcp_get_revocated_ksv_count(buf, vrl_length); + if (!ksv_count) { + DRM_INFO("Revocated KSV count is 0\n"); + return 0; + } + + kfree(intel_connector->revocated_ksv_list); + intel_connector->revocated_ksv_list = kzalloc(ksv_count * + DRM_HDCP_KSV_LEN, GFP_KERNEL); + if (!intel_connector->revocated_ksv_list) { + DRM_ERROR("Out of Memory\n"); + return -ENOMEM; + } + + if (intel_hdcp_get_revocated_ksvs(intel_connector->revocated_ksv_list, + buf, vrl_length) != ksv_count) { + intel_connector->revocated_ksv_cnt = 0; + kfree(intel_connector->revocated_ksv_list); + return -EINVAL; + } + + intel_connector->revocated_ksv_cnt = ksv_count; + return 0; +} + +static void intel_hdcp_update_srm(struct drm_connector *connector, + u32 srm_blob_id) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct drm_property_blob *blob; + + blob = drm_property_lookup_blob(connector->dev, srm_blob_id); + if (!blob || !blob->data) + return; + + if (!intel_hdcp_parse_srm(connector, blob)) + intel_connector->srm_blob_id = srm_blob_id; + + drm_property_blob_put(blob); +} + +void intel_hdcp_atomic_commit(struct drm_connector *connector, + struct drm_connector_state *new_state) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + uint64_t new_cp = new_state->content_protection; + + if (new_state->cp_srm_blob_id && + new_state->cp_srm_blob_id != intel_connector->srm_blob_id) + intel_hdcp_update_srm(connector, new_state->cp_srm_blob_id); - crtc_state = drm_atomic_get_new_crtc_state(new_state->state, - new_state->crtc); - crtc_state->mode_changed = true; + /* Enable hdcp if it's desired */ + if (new_state->crtc && new_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED) + intel_hdcp_enable(to_intel_connector(connector)); } /* Implements Part 3 of the HDCP authorization procedure */ diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 192972a7d287e..a3b6d078ca246 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -2182,6 +2182,14 @@ static u8 bxt_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) u8 ddc_pin; switch (port) { + case PORT_A: + if ((IS_GEN9_LP(dev_priv)) && (intel_vgpu_active(dev_priv))) + ddc_pin = GMBUS_PIN_3_BXT; + else { + MISSING_CASE(port); + ddc_pin = GMBUS_PIN_DPB; + } + break; case PORT_B: ddc_pin = GMBUS_PIN_1_BXT; break; @@ -2365,7 +2373,8 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port); - if (WARN_ON(port == PORT_A)) + if (!intel_vgpu_active(dev_priv) && + WARN_ON(port == PORT_A)) return; intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index 648a13c6043c0..9a80181302372 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -228,7 +228,9 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) drm_for_each_connector_iter(connector, &conn_iter) { struct intel_connector *intel_connector = to_intel_connector(connector); - if (intel_connector->encoder->hpd_pin == pin) { + /* Don't check MST ports, they don't have pins */ + if (!intel_connector->mst_port && + intel_connector->encoder->hpd_pin == pin) { if (connector->polled != intel_connector->polled) DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", connector->name); @@ -395,37 +397,54 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, struct intel_encoder *encoder; bool storm_detected = false; bool queue_dig = false, queue_hp = false; + u32 long_hpd_pulse_mask = 0; + u32 short_hpd_pulse_mask = 0; + enum hpd_pin pin; if (!pin_mask) return; spin_lock(&dev_priv->irq_lock); + + /* + * Determine whether ->hpd_pulse() exists for each pin, and + * whether we have a short or a long pulse. This is needed + * as each pin may have up to two encoders (HDMI and DP) and + * only the one of them (DP) will have ->hpd_pulse(). + */ for_each_intel_encoder(&dev_priv->drm, encoder) { - enum hpd_pin pin = encoder->hpd_pin; bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder); + enum port port = encoder->port; + bool long_hpd; + pin = encoder->hpd_pin; if (!(BIT(pin) & pin_mask)) continue; - if (has_hpd_pulse) { - bool long_hpd = long_mask & BIT(pin); - enum port port = encoder->port; + if (!has_hpd_pulse) + continue; - DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port), - long_hpd ? "long" : "short"); - /* - * For long HPD pulses we want to have the digital queue happen, - * but we still want HPD storm detection to function. - */ - queue_dig = true; - if (long_hpd) { - dev_priv->hotplug.long_port_mask |= (1 << port); - } else { - /* for short HPD just trigger the digital queue */ - dev_priv->hotplug.short_port_mask |= (1 << port); - continue; - } + long_hpd = long_mask & BIT(pin); + + DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port), + long_hpd ? "long" : "short"); + queue_dig = true; + + if (long_hpd) { + long_hpd_pulse_mask |= BIT(pin); + dev_priv->hotplug.long_port_mask |= BIT(port); + } else { + short_hpd_pulse_mask |= BIT(pin); + dev_priv->hotplug.short_port_mask |= BIT(port); } + } + + /* Now process each pin just once */ + for_each_hpd_pin(pin) { + bool long_hpd; + + if (!(BIT(pin) & pin_mask)) + continue; if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) { /* @@ -442,11 +461,22 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED) continue; - if (!has_hpd_pulse) { + /* + * Delegate to ->hpd_pulse() if one of the encoders for this + * pin has it, otherwise let the hotplug_work deal with this + * pin directly. + */ + if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) { + long_hpd = long_hpd_pulse_mask & BIT(pin); + } else { dev_priv->hotplug.event_bits |= BIT(pin); + long_hpd = true; queue_hp = true; } + if (!long_hpd) + continue; + if (intel_hpd_irq_storm_detect(dev_priv, pin)) { dev_priv->hotplug.event_bits &= ~BIT(pin); storm_detected = true; diff --git a/drivers/gpu/drm/i915/intel_initial_modeset.c b/drivers/gpu/drm/i915/intel_initial_modeset.c new file mode 100644 index 0000000000000..4a3a6e7b0f9b0 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_initial_modeset.c @@ -0,0 +1,489 @@ +/* + * + * Copyright (c) 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/** + * DOC: Boot-time mode setting. + * + * There exists a use case where the kernel graphics needs to be initialized + * with a valid display configuration with full display pipeline programming + * in place before user space is initialized and without a fbdev & fb console. + * + * The primary motivation is to allow early user space applications to + * display a frame (or frames) as soon as possible after user space starts. + * Eliminating the time it takes userspace to program the display configuration + * benefits this use case. + * + * By doing all the display programming in the kernel, it can be done in + * parallel with other kernel startup tasks without adding significant + * elapshed time before user space starts. + */ + +#include "intel_drv.h" +#include "i915_drv.h" + +static inline struct drm_encoder *get_encoder(struct drm_connector *connector) +{ + struct intel_encoder *encoder; + + encoder = intel_attached_encoder(connector); + + return &encoder->base; +} + +/* + * This makes use of the video= kernel command line to determine what + * connectors to configure. See Documentation/fb/modedb.txt for details + * on the format. There are 3 specific cases that are used: + * + * 1) video= + * - assume monitor is connected, use EDID preferred mode + * 2) video= + * - use regardless of monitor connected, use EDID preferred mode + * 3) video=cmdline_mode; + + fb_get_options(connector->name, &option); + if (option) { + switch (connector->force) { + + case DRM_FORCE_OFF: + return false; + case DRM_FORCE_ON: + case DRM_FORCE_ON_DIGITAL: + return true; + case DRM_FORCE_UNSPECIFIED: + break; + } + + connector->status = connector->funcs->detect(connector, true); + if (connector->status != connector_status_connected) { + connector->force = cl_mode->force; + connector->status = connector_status_connected; + } + return true; + } + + return false; +} + +static bool attach_crtc(struct drm_device *dev, struct drm_encoder *encoder, + uint32_t *used_crtcs) +{ + struct drm_crtc *possible_crtc; + + if (encoder->crtc != NULL && + !(*used_crtcs & drm_crtc_mask(encoder->crtc))) { + *used_crtcs |= drm_crtc_mask(encoder->crtc); + return true; + } + + drm_for_each_crtc(possible_crtc, dev) { + if (!(encoder->possible_crtcs & drm_crtc_mask(possible_crtc)) + || (*used_crtcs & drm_crtc_mask(possible_crtc))) + continue; + *used_crtcs |= drm_crtc_mask(possible_crtc); + encoder->crtc = possible_crtc; + return true; + } + + return false; +} + +static struct drm_display_mode *get_modeline(struct drm_i915_private *dev_priv, + struct drm_connector *connector, + int width, int height) +{ + struct drm_display_mode *mode; + struct drm_cmdline_mode *cl_mode = &connector->cmdline_mode; + + /* + * fill_modes() takes a bit of time but is necessary. + * It is reading the EDID (or loading the EDID firmware blob + * and building the connector mode list. The time can be + * minimized by using a small EDID blob built into the kernel. + */ + + connector->funcs->fill_modes(connector, width, height); + + /* + * Search the mode list. If a mode was specified using the + * video= command line, use that. Otherwise look for the + * preferred mode. + * + * x[M][R][-][@][i][m][eDd] + */ + list_for_each_entry(mode, &connector->modes, head) { + if (cl_mode && cl_mode->specified && + cl_mode->refresh_specified) { + if (mode->hdisplay == cl_mode->xres && + mode->vdisplay == cl_mode->yres && + mode->vrefresh == cl_mode->refresh) + return mode; + } else if (cl_mode && cl_mode->specified) { + if (mode->hdisplay == cl_mode->xres && + mode->vdisplay == cl_mode->yres) + return mode; + } else { + if (mode->type & DRM_MODE_TYPE_PREFERRED) + return mode; + } + } + + DRM_ERROR("Failed to find a valid mode.\n"); + return NULL; +} + +static int update_crtc_state(struct drm_atomic_state *state, + struct drm_display_mode *mode, + struct drm_crtc *crtc) +{ + struct drm_crtc_state *crtc_state; + int ret; + + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + ret = drm_atomic_set_mode_for_crtc(crtc_state, mode); + if (ret) { + crtc_state->active = false; + return ret; + } + + crtc_state->active = true; + + if (!IS_GEN9(to_i915(state->dev))) + return 0; + + WARN_ON(ret); + + return 0; +} + +static int update_connector_state(struct drm_atomic_state *state, + struct drm_connector *connector, + struct drm_crtc *crtc) +{ + struct drm_connector_state *conn_state; + int ret; + + conn_state = drm_atomic_get_connector_state(state, connector); + if (IS_ERR(conn_state)) { + DRM_DEBUG_KMS("failed to get connector %s state\n", + connector->name); + return PTR_ERR(conn_state); + } + + ret = drm_atomic_set_crtc_for_connector(conn_state, crtc); + if (ret) { + DRM_DEBUG_KMS("failed to set crtc for connector\n"); + return ret; + } + + return 0; +} + +static int update_primary_plane_state(struct drm_atomic_state *state, + struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_framebuffer *fb) +{ + int hdisplay, vdisplay; + struct drm_plane_state *primary_state; + int ret; + + primary_state = drm_atomic_get_plane_state(state, crtc->primary); + ret = drm_atomic_set_crtc_for_plane(primary_state, crtc); + if (ret) + return ret; + drm_mode_get_hv_timing(mode, &hdisplay, &vdisplay); + drm_atomic_set_fb_for_plane(primary_state, fb); + primary_state->crtc_x = 0; + primary_state->crtc_y = 0; + primary_state->crtc_w = hdisplay; + primary_state->crtc_h = vdisplay; + primary_state->src_x = 0 << 16; + primary_state->src_y = 0 << 16; + primary_state->src_w = hdisplay << 16; + primary_state->src_h = vdisplay << 16; + primary_state->rotation = DRM_MODE_ROTATE_0; + + return 0; +} + +static int update_atomic_state(struct drm_device *dev, + struct drm_atomic_state *state, + struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct drm_framebuffer *fb = NULL; + struct drm_crtc *crtc; + int ret; + + if (get_encoder(connector)) + crtc = get_encoder(connector)->crtc; + else + return -EINVAL; + + ret = update_crtc_state(state, mode, crtc); + if (ret) + return ret; + + /* attach connector to atomic state */ + ret = update_connector_state(state, connector, crtc); + if (ret) + return ret; + + /* Set up primary plane if a framebuffer is allocated */ + if (fb) { + ret = update_primary_plane_state(state, crtc, mode, fb); + if (ret) + return ret; + } + + return 0; +} + + +static int disable_planes(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct drm_plane *plane; + int ret; + + drm_for_each_plane(plane, dev) { + struct drm_plane_state *plane_state; + + plane->old_fb = plane->fb; + + plane_state = drm_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) { + return PTR_ERR(plane_state); + } + + ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); + if (ret != 0) + return ret; + + drm_atomic_set_fb_for_plane(plane_state, NULL); + } + + return 0; +} + + +/* + * The modeset_config is scheduled to run via an async + * schedule call from the main driver load. + */ +static void modeset_config_fn(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, typeof(*dev_priv), initial_modeset_work); + struct drm_device *dev = &dev_priv->drm; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + struct drm_atomic_state *state; + struct drm_modeset_acquire_ctx ctx; + struct drm_plane *plane; + int ret; + bool found = false; + uint32_t used_crtcs = 0; + struct drm_display_mode *connector_mode[20]; + struct drm_encoder *encoder; + struct drm_display_mode *mode; + + memset(connector_mode, 0, sizeof(connector_mode)); + mutex_lock(&dev->mode_config.mutex); + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (use_connector(connector)) { + encoder = get_encoder(connector); + if (!encoder) + continue; + if (!attach_crtc(dev, encoder, &used_crtcs)) + continue; + mode = get_modeline(dev_priv, connector, + dev->mode_config.max_width, + dev->mode_config.max_height); + if (mode) { + found = true; + WARN_ON(connector->index >= 20); + connector_mode[connector->index] = mode; + } + } + } + drm_connector_list_iter_end(&conn_iter); + if (!found) { + used_crtcs = 0; + /* Try to detect attached connectors */ + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + if (connector->funcs && connector->funcs->detect) + connector->status = connector->funcs->detect(connector, + true); + else if (connector->helper_private && connector->helper_private->detect_ctx) + connector->status = connector->helper_private->detect_ctx(connector, + NULL, true); + drm_modeset_unlock(&dev->mode_config.connection_mutex); + + if (connector->status == connector_status_connected) { + encoder = get_encoder(connector); + if (!encoder) + continue; + if (!attach_crtc(dev, encoder, &used_crtcs)) + continue; + mode = get_modeline(dev_priv, connector, + dev->mode_config.max_width, + dev->mode_config.max_height); + if (mode) { + found = true; + WARN_ON(connector->index >= 20); + connector_mode[connector->index] = mode; + } + } + } + drm_connector_list_iter_end(&conn_iter); + } + mutex_unlock(&dev->mode_config.mutex); + + if (!found) + return; + + state = drm_atomic_state_alloc(dev); + if (!state) + return; + + mutex_lock(&dev->mode_config.mutex); + + drm_modeset_acquire_init(&ctx, 0); + state->acquire_ctx = &ctx; +retry: + ret = drm_modeset_lock_all_ctx(dev, &ctx); + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + goto retry; + } else if (ret) { + goto out; + } + + ret = disable_planes(dev, state); + if (ret) + goto fail; + + /* + * For each connector that we want to set up, update the atomic + * state to include the connector and crtc mode. + */ + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector_mode[connector->index]) { + ret = update_atomic_state(dev, state, connector, + connector_mode[connector->index]); + if (ret) + goto fail; + } + } + drm_connector_list_iter_end(&conn_iter); + + ret = drm_atomic_commit(state); + if (ret) + goto fail; + goto out; + +fail: + if (ret == -EDEADLK) { + DRM_DEBUG_KMS("modeset commit deadlock, retry...\n"); + drm_modeset_backoff(&ctx); + drm_atomic_state_clear(state); + goto retry; + } + +out: + if (!ret) { + drm_for_each_plane(plane, dev) { + if (plane->old_fb) + drm_framebuffer_unreference(plane->old_fb); + } + } + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + drm_atomic_state_put(state); + + mutex_unlock(&dev->mode_config.mutex); +} + +void intel_initial_mode_config_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + + INIT_WORK(&dev_priv->initial_modeset_work, modeset_config_fn); + schedule_work(&dev_priv->initial_modeset_work); +} + +static void initial_mode_destroy(struct drm_device *dev) +{ + struct drm_atomic_state *state; + struct drm_modeset_acquire_ctx ctx; + int ret; + + state = drm_atomic_state_alloc(dev); + if (!state) + return; + + drm_modeset_acquire_init(&ctx, 0); + state->acquire_ctx = &ctx; + drm_modeset_lock_all_ctx(dev, &ctx); + +retry: + ret = disable_planes(dev, state); + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + drm_atomic_state_clear(state); + goto retry; + } + + ret = drm_atomic_commit(state); + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + drm_atomic_state_clear(state); + goto retry; + } + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); +} + +void intel_initial_mode_config_fini(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + + flush_work(&dev_priv->initial_modeset_work); + initial_mode_destroy(dev); +} diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c index cdf19553ffacd..5d5336fbe7b05 100644 --- a/drivers/gpu/drm/i915/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/intel_lpe_audio.c @@ -297,8 +297,10 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv) lpe_audio_platdev_destroy(dev_priv); irq_free_desc(dev_priv->lpe_audio.irq); -} + dev_priv->lpe_audio.irq = -1; + dev_priv->lpe_audio.platdev = NULL; +} /** * intel_lpe_audio_notify() - notify lpe audio event diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 174479232e943..2a6b9fbbf2c55 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -186,7 +186,8 @@ static inline bool need_preempt(const struct intel_engine_cs *engine, const struct i915_request *last, int prio) { - return (intel_engine_has_preemption(engine) && + return (!intel_vgpu_active(engine->i915) && + intel_engine_has_preemption(engine) && __execlists_need_preempt(prio, rq_prio(last)) && !i915_request_completed(last)); } @@ -424,7 +425,8 @@ static u64 execlists_update_context(struct i915_request *rq) reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail); - /* True 32b PPGTT with dynamic page allocation: update PDP + /* + * True 32b PPGTT with dynamic page allocation: update PDP * registers and point the unallocated PDPs to scratch page. * PML4 is allocated during ppgtt init, so this is not needed * in 48-bit mode. @@ -432,6 +434,22 @@ static u64 execlists_update_context(struct i915_request *rq) if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm)) execlists_update_context_pdps(ppgtt, reg_state); + /* + * Make sure the context image is complete before we submit it to HW. + * + * Ostensibly, writes (including the WCB) should be flushed prior to + * an uncached write such as our mmio register access, the empirical + * evidence (esp. on Braswell) suggests that the WC write into memory + * may not be visible to the HW prior to the completion of the UC + * register write and that we may begin execution from the context + * before its image is complete leading to invalid PD chasing. + * + * Furthermore, Braswell, at least, wants a full mb to be sure that + * the writes are coherent in memory (visible to the GPU) prior to + * execution, and not just visible to other CPUs (as is the result of + * wmb). + */ + mb(); return ce->lrc_desc; } @@ -451,6 +469,8 @@ static void execlists_submit_ports(struct intel_engine_cs *engine) struct intel_engine_execlists *execlists = &engine->execlists; struct execlist_port *port = execlists->port; unsigned int n; + u32 descs[4]; + int i = 0; /* * We can skip acquiring intel_runtime_pm_get() here as it was taken @@ -493,10 +513,27 @@ static void execlists_submit_ports(struct intel_engine_cs *engine) GEM_BUG_ON(!n); desc = 0; } + if (intel_vgpu_active(engine->i915) && + PVMMIO_LEVEL(engine->i915, PVMMIO_ELSP_SUBMIT)) { + BUG_ON(i >= 4); + descs[i] = upper_32_bits(desc); + descs[i + 1] = lower_32_bits(desc); + i += 2; + continue; + } write_desc(execlists, desc, n); } - + if (intel_vgpu_active(engine->i915) && + PVMMIO_LEVEL(engine->i915, PVMMIO_ELSP_SUBMIT)) { + u32 __iomem *elsp_data = engine->i915->shared_page->elsp_data; + spin_lock(&engine->i915->shared_page_lock); + writel(descs[0], elsp_data); + writel(descs[1], elsp_data + 1); + writel(descs[2], elsp_data + 2); + writel(descs[3], execlists->submit_reg); + spin_unlock(&engine->i915->shared_page_lock); + } /* we need to manually load the submit queue */ if (execlists->ctrl_reg) writel(EL_CTRL_LOAD, execlists->ctrl_reg); @@ -552,10 +589,24 @@ static void inject_preempt_context(struct intel_engine_cs *engine) * the state of the GPU is known (idle). */ GEM_TRACE("%s\n", engine->name); - for (n = execlists_num_ports(execlists); --n; ) - write_desc(execlists, 0, n); - write_desc(execlists, ce->lrc_desc, n); + if (intel_vgpu_active(engine->i915) && + PVMMIO_LEVEL(engine->i915, PVMMIO_ELSP_SUBMIT)) { + u32 __iomem *elsp_data = engine->i915->shared_page->elsp_data; + + spin_lock(&engine->i915->shared_page_lock); + writel(0, elsp_data); + writel(0, elsp_data + 1); + writel(upper_32_bits(ce->lrc_desc), elsp_data + 2); + writel(lower_32_bits(ce->lrc_desc), execlists->submit_reg); + spin_unlock(&engine->i915->shared_page_lock); + + } else { + for (n = execlists_num_ports(execlists); --n; ) + write_desc(execlists, 0, n); + + write_desc(execlists, ce->lrc_desc, n); + } /* we need to manually load the submit queue */ if (execlists->ctrl_reg) @@ -565,6 +616,84 @@ static void inject_preempt_context(struct intel_engine_cs *engine) execlists_set_active(execlists, EXECLISTS_ACTIVE_PREEMPT); } +static int try_preempt_reset(struct intel_engine_execlists *execlists) +{ + struct tasklet_struct * const t = &execlists->tasklet; + int err = -EBUSY; + + if (tasklet_trylock(t)) { + struct intel_engine_cs *engine = + container_of(execlists, typeof(*engine), execlists); + const unsigned int bit = I915_RESET_ENGINE + engine->id; + unsigned long *lock = &engine->i915->gpu_error.flags; + + t->func(t->data); + if (!execlists_is_active(execlists, + EXECLISTS_ACTIVE_PREEMPT_TIMEOUT)) { + /* Nothing to do; the tasklet was just delayed. */ + err = 0; + } else if (!test_and_set_bit(bit, lock)) { + tasklet_disable_nosync(t); + err = i915_reset_engine(engine, "preemption time out"); + tasklet_enable(t); + + clear_bit(bit, lock); + wake_up_bit(lock, bit); + } + + tasklet_unlock(t); + } + + return err; +} + +static enum hrtimer_restart preempt_timeout(struct hrtimer *hrtimer) +{ + struct intel_engine_execlists *execlists = + container_of(hrtimer, typeof(*execlists), preempt_timer); + + GEM_TRACE("%s active=%x\n", + container_of(execlists, + struct intel_engine_cs, + execlists)->name, + execlists->active); + + if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT_TIMEOUT)) + return HRTIMER_NORESTART; + + if (GEM_SHOW_DEBUG()) { + struct intel_engine_cs *engine = + container_of(execlists, typeof(*engine), execlists); + struct drm_printer p = drm_debug_printer(__func__); + + intel_engine_dump(engine, &p, "%s\n", engine->name); + } + + if (try_preempt_reset(execlists)) + queue_work(system_highpri_wq, &execlists->preempt_reset); + + return HRTIMER_NORESTART; +} + +static void preempt_reset(struct work_struct *work) +{ + struct intel_engine_execlists *execlists = + container_of(work, typeof(*execlists), preempt_reset); + struct intel_engine_cs *engine = + container_of(execlists, struct intel_engine_cs, execlists); + + GEM_TRACE("%s\n", engine->name); + + tasklet_disable(&execlists->tasklet); + + execlists->tasklet.func(execlists->tasklet.data); + if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT_TIMEOUT)) + i915_handle_error(engine->i915, BIT(engine->id), 0, + "preemption time out on %s", engine->name); + + tasklet_enable(&execlists->tasklet); +} + static void complete_preempt_context(struct intel_engine_execlists *execlists) { GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT)); @@ -658,7 +787,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * priorities of the ports haven't been switch. */ if (port_count(&port[1])) - return; + goto clear_preempt_timeout; /* * WaIdleLiteRestore:bdw,skl @@ -765,6 +894,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine) GEM_BUG_ON(rb_first_cached(&execlists->queue) && !port_isset(execlists->port)); +clear_preempt_timeout: + execlists_clear_active(execlists, EXECLISTS_ACTIVE_PREEMPT_TIMEOUT); + /* Re-evaluate the executing context setup after each preemptive kick */ if (last) execlists_user_begin(execlists, execlists->port); @@ -1085,8 +1217,30 @@ static void queue_request(struct intel_engine_cs *engine, &lookup_priolist(engine, prio)->requests); } -static void __update_queue(struct intel_engine_cs *engine, int prio) +static void __update_queue(struct intel_engine_cs *engine, + int prio, unsigned int timeout) { + struct intel_engine_execlists * const execlists = &engine->execlists; + + GEM_TRACE("%s prio=%d (previous=%d)\n", + engine->name, prio, execlists->queue_priority); + + if (unlikely(execlists_is_active(execlists, + EXECLISTS_ACTIVE_PREEMPT_TIMEOUT))) + hrtimer_cancel(&execlists->preempt_timer); + + /* Set a timer to force preemption vs hostile userspace */ + if (timeout && + __execlists_need_preempt(prio, execlists->queue_priority)) { + GEM_TRACE("%s preempt timeout=%uns\n", engine->name, timeout); + + execlists_set_active(execlists, + EXECLISTS_ACTIVE_PREEMPT_TIMEOUT); + hrtimer_start(&execlists->preempt_timer, + ns_to_ktime(timeout), + HRTIMER_MODE_REL); + } + engine->execlists.queue_priority = prio; } @@ -1103,28 +1257,29 @@ static void __submit_queue_imm(struct intel_engine_cs *engine) tasklet_hi_schedule(&execlists->tasklet); } -static void submit_queue(struct intel_engine_cs *engine, int prio) +static void submit_queue(struct intel_engine_cs *engine, + int prio, unsigned int timeout) { if (prio > engine->execlists.queue_priority) { - __update_queue(engine, prio); + __update_queue(engine, prio, timeout); __submit_queue_imm(engine); } } -static void execlists_submit_request(struct i915_request *request) +static void execlists_submit_request(struct i915_request *rq) { - struct intel_engine_cs *engine = request->engine; + struct intel_engine_cs *engine = rq->engine; unsigned long flags; /* Will be called from irq-context when using foreign fences. */ spin_lock_irqsave(&engine->timeline.lock, flags); - queue_request(engine, &request->sched, rq_prio(request)); + queue_request(engine, &rq->sched, rq_prio(rq)); GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); - GEM_BUG_ON(list_empty(&request->sched.link)); + GEM_BUG_ON(list_empty(&rq->sched.link)); - submit_queue(engine, rq_prio(request)); + submit_queue(engine, rq_prio(rq), rq->gem_context->preempt_timeout); spin_unlock_irqrestore(&engine->timeline.lock, flags); } @@ -1150,7 +1305,8 @@ sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked) } static void execlists_schedule(struct i915_request *request, - const struct i915_sched_attr *attr) + const struct i915_sched_attr *attr, + unsigned int timeout) { struct i915_priolist *uninitialized_var(pl); struct intel_engine_cs *engine, *last; @@ -1254,7 +1410,7 @@ static void execlists_schedule(struct i915_request *request, if (prio > engine->execlists.queue_priority && i915_sw_fence_done(&sched_to_request(node)->submit)) { /* defer submission until after all of our updates */ - __update_queue(engine, prio); + __update_queue(engine, prio, timeout); tasklet_hi_schedule(&engine->execlists.tasklet); } } @@ -2369,6 +2525,16 @@ logical_ring_default_irqs(struct intel_engine_cs *engine) engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; } +static void i915_error_reset(struct work_struct *work) { + struct intel_engine_cs *engine = + container_of(work, struct intel_engine_cs, + reset_work); + i915_handle_error(engine->i915, 1 << engine->id, + I915_ERROR_CAPTURE, + "Received error interrupt from engine %d", + engine->id); +} + static void logical_ring_setup(struct intel_engine_cs *engine) { @@ -2380,8 +2546,13 @@ logical_ring_setup(struct intel_engine_cs *engine) tasklet_init(&engine->execlists.tasklet, execlists_submission_tasklet, (unsigned long)engine); + INIT_WORK(&engine->execlists.preempt_reset, preempt_reset); + engine->execlists.preempt_timer.function = preempt_timeout; + logical_ring_default_vfuncs(engine); logical_ring_default_irqs(engine); + + INIT_WORK(&engine->reset_work, i915_error_reset); } static bool csb_force_mmio(struct drm_i915_private *i915) @@ -2712,6 +2883,14 @@ populate_lr_context(struct i915_gem_context *ctx, _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT); + /* write the context's pid and hw_id/cid to the per-context HWS page */ + if(intel_vgpu_active(engine->i915) && pid_nr(ctx->pid)) { + *(u32*)(vaddr + LRC_PPHWSP_PN * PAGE_SIZE + I915_GEM_HWS_PID_ADDR) + = pid_nr(ctx->pid) & 0x3fffff; + *(u32*)(vaddr + LRC_PPHWSP_PN * PAGE_SIZE + I915_GEM_HWS_CID_ADDR) + = ctx->hw_id & 0x3fffff; + } + err_unpin_ctx: i915_gem_object_unpin_map(ctx_obj); return ret; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 43ae9de12ba3e..da62058b52941 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -33,6 +33,10 @@ #include #include +#if IS_ENABLED(CONFIG_DRM_I915_GVT) +#include "gvt.h" +#endif + /** * DOC: RC6 * @@ -803,11 +807,19 @@ static int intel_wm_num_levels(struct drm_i915_private *dev_priv) static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct intel_plane *plane = plane_state ? to_intel_plane(plane_state->base.plane) : NULL; /* FIXME check the 'enable' instead */ if (!crtc_state->base.active) return false; + if (!plane_state && i915_modparams.avail_planes_per_pipe) { + return true; + } + + if(!plane_state) { + DRM_ERROR("intel_wm_plane_visible(): plane_state==NULL and return 0\n"); + return false; + } /* * Treat cursor with fb as always visible since cursor updates @@ -2492,6 +2504,9 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate, uint32_t method1, method2; int cpp; + if (mem_value == 0) + return U32_MAX; + if (!intel_wm_plane_visible(cstate, pstate)) return 0; @@ -2521,6 +2536,9 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate, uint32_t method1, method2; int cpp; + if (mem_value == 0) + return U32_MAX; + if (!intel_wm_plane_visible(cstate, pstate)) return 0; @@ -2544,6 +2562,9 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate, { int cpp; + if (mem_value == 0) + return U32_MAX; + if (!intel_wm_plane_visible(cstate, pstate)) return 0; @@ -2942,8 +2963,8 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv, unsigned int latency = wm[level]; if (latency == 0) { - DRM_ERROR("%s WM%d latency not provided\n", - name, level); + DRM_DEBUG_KMS("%s WM%d latency not provided\n", + name, level); continue; } @@ -2998,6 +3019,34 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv) intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); } +static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv) +{ + /* + * On some SNB machines (Thinkpad X220 Tablet at least) + * LP3 usage can cause vblank interrupts to be lost. + * The DEIIR bit will go high but it looks like the CPU + * never gets interrupted. + * + * It's not clear whether other interrupt source could + * be affected or if this is somehow limited to vblank + * interrupts only. To play it safe we disable LP3 + * watermarks entirely. + */ + if (dev_priv->wm.pri_latency[3] == 0 && + dev_priv->wm.spr_latency[3] == 0 && + dev_priv->wm.cur_latency[3] == 0) + return; + + dev_priv->wm.pri_latency[3] = 0; + dev_priv->wm.spr_latency[3] = 0; + dev_priv->wm.cur_latency[3] = 0; + + DRM_DEBUG_KMS("LP3 watermarks disabled due to potential for lost interrupts\n"); + intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency); + intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); + intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); +} + static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) { intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency); @@ -3014,8 +3063,10 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); - if (IS_GEN6(dev_priv)) + if (IS_GEN6(dev_priv)) { snb_wm_latency_quirk(dev_priv); + snb_wm_lp3_irq_quirk(dev_priv); + } } static void skl_setup_wm_latency(struct drm_i915_private *dev_priv) @@ -3706,7 +3757,6 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state) struct intel_crtc *crtc; struct intel_plane *plane; struct intel_crtc_state *cstate; - enum pipe pipe; int level, latency; int sagv_block_time_us; @@ -3732,8 +3782,10 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state) return false; /* Since we're now guaranteed to only have one active CRTC... */ - pipe = ffs(intel_state->active_crtcs) - 1; - crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + crtc = get_intel_crtc_from_index(dev, + ffs(intel_state->active_crtcs) - 1); + if (!crtc) + return false; cstate = to_intel_crtc_state(crtc->base.state); if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) @@ -3755,6 +3807,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state) latency = dev_priv->wm.skl_latency[level]; if (skl_needs_memory_bw_wa(intel_state) && + plane->base.state->fb && plane->base.state->fb->modifier == I915_FORMAT_MOD_X_TILED) latency += 15; @@ -3968,11 +4021,19 @@ static uint_fixed_16_16_t skl_plane_downscale_amount(const struct intel_crtc_state *cstate, const struct intel_plane_state *pstate) { - struct intel_plane *plane = to_intel_plane(pstate->base.plane); + struct intel_plane *plane = pstate ? to_intel_plane(pstate->base.plane) : NULL; uint32_t src_w, src_h, dst_w, dst_h; uint_fixed_16_16_t fp_w_ratio, fp_h_ratio; uint_fixed_16_16_t downscale_h, downscale_w; + if (!pstate && i915_modparams.avail_planes_per_pipe) { + return mul_fixed16(u32_to_fixed16(1), u32_to_fixed16(1)); + } + + if (WARN_ON_ONCE(!pstate)) { + return u32_to_fixed16(0); + } + if (WARN_ON(!intel_wm_plane_visible(cstate, pstate))) return u32_to_fixed16(0); @@ -4483,9 +4544,9 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv, const struct intel_plane_state *intel_pstate, struct skl_wm_params *wp, int plane_id) { - struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane); - const struct drm_plane_state *pstate = &intel_pstate->base; - const struct drm_framebuffer *fb = pstate->fb; + struct intel_plane *plane = intel_pstate ? to_intel_plane(intel_pstate->base.plane) : NULL; + const struct drm_plane_state *pstate = intel_pstate ? &intel_pstate->base : NULL; + const struct drm_framebuffer *fb = pstate ? pstate->fb : NULL; uint32_t interm_pbpl; struct intel_atomic_state *state = to_intel_atomic_state(cstate->base.state); @@ -4494,12 +4555,34 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv, if (!intel_wm_plane_visible(cstate, intel_pstate)) return 0; + if (plane_id == 1 && !fb) { + DRM_DEBUG_KMS("Invalid fb for plane\n"); + return -EINVAL; + } + /* only NV12 format has two planes */ if (plane_id == 1 && fb->format->format != DRM_FORMAT_NV12) { DRM_DEBUG_KMS("Non NV12 format have single plane\n"); return -EINVAL; } + if (!intel_pstate && i915_modparams.avail_planes_per_pipe) { + wp->y_tiled = false; + wp->x_tiled = true; + wp->cpp = 4; + wp->y_min_scanlines = 8; + wp->rc_surface = fb ? fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || + fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS : 0; + wp->is_planar = fb ? fb->format->format == DRM_FORMAT_NV12 : 0; + wp->width = cstate->pipe_src_w; + wp->dbuf_block_size = 512; + goto calculate_wm; + } + + if (!fb || !intel_pstate) { + DRM_ERROR("invalid fb:%p intel_pstate:%p\n", fb, intel_pstate); + return -EINVAL; + } wp->y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED || fb->modifier == I915_FORMAT_MOD_Yf_TILED || fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || @@ -4524,8 +4607,6 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv, wp->width /= 2; wp->cpp = fb->format->cpp[plane_id]; - wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, - intel_pstate); if (INTEL_GEN(dev_priv) >= 11 && fb->modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 8) @@ -4533,7 +4614,7 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv, else wp->dbuf_block_size = 512; - if (drm_rotation_90_or_270(pstate->rotation)) { + if (pstate && drm_rotation_90_or_270(pstate->rotation)) { switch (wp->cpp) { case 1: @@ -4556,6 +4637,9 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv, if (apply_memory_bw_wa) wp->y_min_scanlines *= 2; +calculate_wm: + wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, + intel_pstate); wp->plane_bytes_per_line = wp->width * wp->cpp; if (wp->y_tiled) { interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line * @@ -4594,7 +4678,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, const struct skl_wm_level *result_prev, struct skl_wm_level *result /* out */) { - const struct drm_plane_state *pstate = &intel_pstate->base; + const struct drm_plane_state *pstate = intel_pstate ? &intel_pstate->base : NULL; + const struct drm_framebuffer *fb = pstate ? pstate->fb : NULL; uint32_t latency = dev_priv->wm.skl_latency[level]; uint_fixed_16_16_t method1, method2; uint_fixed_16_16_t selected_result; @@ -4647,7 +4732,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, wp->plane_blocks_per_line); /* Display WA #1125: skl,bxt,kbl,glk */ - if (level == 0 && wp->rc_surface) + if (fb && level == 0 && wp->rc_surface) res_blocks += fixed16_to_u32_round_up(wp->y_tile_minimum); /* Display WA #1126: skl,bxt,kbl,glk */ @@ -4705,12 +4790,16 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, if (level) { return 0; } else { - struct drm_plane *plane = pstate->plane; + struct drm_plane *plane = pstate ? pstate->plane : NULL; DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n"); - DRM_DEBUG_KMS("[PLANE:%d:%s] blocks required = %u/%u, lines required = %u/31\n", + + if (plane) { + DRM_DEBUG_KMS("[PLANE:%d:%s] blocks required = %u/%u, lines required = %u/31\n", plane->base.id, plane->name, res_blocks, ddb_allocation, res_lines); + } + return -EINVAL; } } @@ -4741,18 +4830,16 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv, const struct intel_plane_state *intel_pstate, const struct skl_wm_params *wm_params, struct skl_plane_wm *wm, - int plane_id) + int plane_id, + enum plane_id intel_plane_id) { struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); - struct drm_plane *plane = intel_pstate->base.plane; - struct intel_plane *intel_plane = to_intel_plane(plane); uint16_t ddb_blocks; enum pipe pipe = intel_crtc->pipe; int level, max_level = ilk_wm_max_level(dev_priv); - enum plane_id intel_plane_id = intel_plane->id; int ret; - if (WARN_ON(!intel_pstate->base.fb)) + if (WARN_ON(intel_pstate && !intel_pstate->base.fb)) return -EINVAL; ddb_blocks = plane_id ? @@ -4782,7 +4869,8 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv, return ret; } - if (intel_pstate->base.fb->format->format == DRM_FORMAT_NV12) + if (intel_pstate && + intel_pstate->base.fb->format->format == DRM_FORMAT_NV12) wm->is_planar = true; return 0; @@ -4866,16 +4954,109 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate, trans_wm->plane_en = false; } -static int skl_build_pipe_wm(struct intel_crtc_state *cstate, +static int skl_build_plane_wm(struct intel_crtc_state *cstate, + struct skl_ddb_allocation *ddb, + struct skl_pipe_wm *pipe_wm, + int pipe, + enum plane_id plane_id, + struct intel_plane_state *intel_pstate) +{ + struct drm_device *dev = cstate->base.crtc->dev; + const struct drm_i915_private *dev_priv = to_i915(dev); + struct skl_plane_wm *wm; + struct skl_wm_params wm_params; + uint16_t ddb_blocks; + int ret; + + wm = &pipe_wm->planes[plane_id]; + ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]); + + ret = skl_compute_plane_wm_params(dev_priv, cstate, + intel_pstate, &wm_params, 0); + if (ret) + return ret; + + ret = skl_compute_wm_levels(dev_priv, ddb, cstate, + intel_pstate, &wm_params, wm, 0, plane_id); + if (ret) + return ret; + + skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0], + ddb_blocks, &wm->trans_wm); + + /* uv plane watermarks must also be validated for NV12/Planar */ + if (wm_params.is_planar) { + memset(&wm_params, 0, sizeof(struct skl_wm_params)); + wm->is_planar = true; + + ret = skl_compute_plane_wm_params(dev_priv, cstate, + intel_pstate, + &wm_params, 1); + if (ret) + return ret; + + ret = skl_compute_wm_levels(dev_priv, ddb, cstate, + intel_pstate, &wm_params, + wm, 1, plane_id); + if (ret) + return ret; + } + + return 0; +} + +static int skl_build_pipe_all_plane_wm(struct intel_crtc_state *cstate, struct skl_ddb_allocation *ddb, struct skl_pipe_wm *pipe_wm) { struct drm_device *dev = cstate->base.crtc->dev; - struct drm_crtc_state *crtc_state = &cstate->base; const struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc); + struct drm_crtc_state *crtc_state = &cstate->base; + struct drm_plane *plane; + const struct drm_plane_state *pstate; + struct intel_plane_state *intel_pstate; + int pipe = crtc->pipe; + int plane_id; + int ret; + + memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes)); + + /* + * Since Dom0 may not own all planes on this pipe, there will + * not be a valid intel_plane for the planes it doesn't own. + * Therefore, we have to pass NULL to skl_compute_wm_level() + * which will then know that this plane is not owned by Dom0 + * and hence will use width and height from the crtc and will + * also assume cpp = 4 and tiling = x_tiled. + */ + for_each_universal_plane(dev_priv, pipe, plane_id) { + intel_pstate = NULL; + + drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { + if (plane_id == to_intel_plane(plane)->id) { + intel_pstate = to_intel_plane_state(pstate); + break; + } + } + + ret = skl_build_plane_wm(cstate, ddb, pipe_wm, + pipe, plane_id, (struct intel_plane_state *) intel_pstate); + if (ret) + return ret; + } + pipe_wm->linetime = skl_compute_linetime_wm(cstate); + + return 0; +} + +static int skl_build_pipe_wm(struct intel_crtc_state *cstate, + struct skl_ddb_allocation *ddb, + struct skl_pipe_wm *pipe_wm) +{ + struct drm_crtc_state *crtc_state = &cstate->base; struct drm_plane *plane; const struct drm_plane_state *pstate; - struct skl_plane_wm *wm; int ret; /* @@ -4888,43 +5069,12 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate, const struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); enum plane_id plane_id = to_intel_plane(plane)->id; - struct skl_wm_params wm_params; enum pipe pipe = to_intel_crtc(cstate->base.crtc)->pipe; - uint16_t ddb_blocks; - - wm = &pipe_wm->planes[plane_id]; - ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]); - - ret = skl_compute_plane_wm_params(dev_priv, cstate, - intel_pstate, &wm_params, 0); - if (ret) - return ret; - ret = skl_compute_wm_levels(dev_priv, ddb, cstate, - intel_pstate, &wm_params, wm, 0); + ret = skl_build_plane_wm(cstate, ddb, pipe_wm, + pipe, plane_id, (struct intel_plane_state *) intel_pstate); if (ret) return ret; - - skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0], - ddb_blocks, &wm->trans_wm); - - /* uv plane watermarks must also be validated for NV12/Planar */ - if (wm_params.is_planar) { - memset(&wm_params, 0, sizeof(struct skl_wm_params)); - wm->is_planar = true; - - ret = skl_compute_plane_wm_params(dev_priv, cstate, - intel_pstate, - &wm_params, 1); - if (ret) - return ret; - - ret = skl_compute_wm_levels(dev_priv, ddb, cstate, - intel_pstate, &wm_params, - wm, 1); - if (ret) - return ret; - } } pipe_wm->linetime = skl_compute_linetime_wm(cstate); @@ -4957,6 +5107,70 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv, I915_WRITE(reg, val); } +static void skl_pv_write_wm_level(u32 *plane_wm_level, + const struct skl_wm_level *level) +{ + uint32_t val = 0; + + if (level->plane_en) { + val |= PLANE_WM_EN; + val |= level->plane_res_b; + val |= level->plane_res_l << PLANE_WM_LINES_SHIFT; + } + + *plane_wm_level = val; +} + +static void skl_pv_ddb_entry_write(u32 *plane_cfg, + const struct skl_ddb_entry *entry) +{ + if (entry->end) + *plane_cfg = (entry->end - 1) << 16 | entry->start; + else + *plane_cfg = 0; +} + +static void skl_pv_write_plane_wm(struct intel_crtc *intel_crtc, + const struct skl_plane_wm *wm, + const struct skl_ddb_allocation *ddb, + enum plane_id plane_id) +{ + int i, level; + struct pv_plane_wm_update tmp_plane_wm; + struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); + int max_level = ilk_wm_max_level(dev_priv); + u32 __iomem *pv_plane_wm = (u32 *)&(dev_priv->shared_page->pv_plane_wm); + enum pipe pipe = intel_crtc->pipe; + + memset(&tmp_plane_wm, 0, sizeof(struct pv_plane_wm_update)); + tmp_plane_wm.max_wm_level = max_level; + for (level = 0; level <= max_level; level++) { + skl_pv_write_wm_level(&tmp_plane_wm.plane_wm_level[level], + &wm->wm[level]); + } + skl_pv_write_wm_level(&tmp_plane_wm.plane_trans_wm_level, + &wm->trans_wm); + + if (wm->is_planar) { + skl_pv_ddb_entry_write(&tmp_plane_wm.plane_buf_cfg, + &ddb->uv_plane[pipe][plane_id]); + } else { + skl_pv_ddb_entry_write(&tmp_plane_wm.plane_buf_cfg, + &ddb->plane[pipe][plane_id]); + } + + spin_lock(&dev_priv->shared_page_lock); + for (i = 0; i < sizeof(struct pv_plane_wm_update) / 4; i++) + writel(*((u32 *)(&tmp_plane_wm) + i), pv_plane_wm + i); + if (wm->is_planar) + skl_ddb_entry_write(dev_priv, + PLANE_NV12_BUF_CFG(pipe, plane_id), + &ddb->plane[pipe][plane_id]); + else + I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0); + spin_unlock(&dev_priv->shared_page_lock); +} + static void skl_write_plane_wm(struct intel_crtc *intel_crtc, const struct skl_plane_wm *wm, const struct skl_ddb_allocation *ddb, @@ -4968,6 +5182,21 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc, int level, max_level = ilk_wm_max_level(dev_priv); enum pipe pipe = intel_crtc->pipe; + if (INTEL_GEN(dev_priv) < 11) { + /* + * when plane restriction feature is enabled, + * sos trap handlers for plane wm related registers are null + */ + /* TODO: uncomment when plane restriction feature is enabled */ +#if 0 + if (i915_modparams.avail_planes_per_pipe) + return; +#endif + if (PVMMIO_LEVEL(dev_priv, PVMMIO_PLANE_WM_UPDATE)) + return skl_pv_write_plane_wm(intel_crtc, wm, + ddb, plane_id); + } + for (level = 0; level <= max_level; level++) { skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level), &wm->wm[level]); @@ -5059,7 +5288,10 @@ static int skl_update_pipe_wm(struct drm_crtc_state *cstate, struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate); int ret; - ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm); + if (i915_modparams.avail_planes_per_pipe) + ret = skl_build_pipe_all_plane_wm(intel_cstate, ddb, pipe_wm); + else + ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm); if (ret) return ret; @@ -5128,6 +5360,23 @@ skl_compute_ddb(struct drm_atomic_state *state) memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb)); +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + /* + * In GVT environemnt, allocate ddb for all planes in active crtc. + * When there is active pipe change, intel_state active_crtcs is + * not zero and updated before dev_priv, so use intel_state + * active_crtc when it is not zero. + */ + if (dev_priv->gvt) { + unsigned int active_crtcs; + + active_crtcs = intel_state->active_crtcs ? + intel_state->active_crtcs : dev_priv->active_crtcs; + intel_gvt_allocate_ddb(dev_priv->gvt, ddb, active_crtcs); + return 0; + } +#endif + for_each_new_intel_crtc_in_state(intel_state, crtc, cstate, i) { ret = skl_allocate_pipe_ddb(cstate, ddb); if (ret) @@ -5286,10 +5535,14 @@ skl_compute_wm(struct drm_atomic_state *state) struct drm_crtc_state *cstate; struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct skl_ddb_values *results = &intel_state->wm_results; + struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev); struct skl_pipe_wm *pipe_wm; bool changed = false; int ret, i; + if (intel_vgpu_active(dev_priv) && i915_modparams.avail_planes_per_pipe) + return 0; + /* Clear all dirty flags */ results->dirty_pipes = 0; @@ -5348,12 +5601,29 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state, enum pipe pipe = crtc->pipe; enum plane_id plane_id; + if (intel_vgpu_active(dev_priv) && i915_modparams.avail_planes_per_pipe) + return; + if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base))) return; I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime); + if (i915_modparams.avail_planes_per_pipe) { + for_each_universal_plane(dev_priv, pipe, plane_id) { + skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id], + ddb, plane_id); + } + + return; + } + for_each_plane_id_on_crtc(crtc, plane_id) { +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + if (dev_priv->gvt && + dev_priv->gvt->pipe_info[pipe].plane_owner[plane_id]) + return; +#endif if (plane_id != PLANE_CURSOR) skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id], ddb, plane_id); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 6a8f27d0a7429..3b8218dd9bb14 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -91,6 +91,7 @@ static int gen4_render_ring_flush(struct i915_request *rq, u32 mode) { u32 cmd, *cs; + int i; /* * read/write caches: @@ -127,12 +128,45 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode) cmd |= MI_INVALIDATE_ISP; } - cs = intel_ring_begin(rq, 2); + i = 2; + if (mode & EMIT_INVALIDATE) + i += 20; + + cs = intel_ring_begin(rq, i); if (IS_ERR(cs)) return PTR_ERR(cs); *cs++ = cmd; - *cs++ = MI_NOOP; + + /* + * A random delay to let the CS invalidate take effect? Without this + * delay, the GPU relocation path fails as the CS does not see + * the updated contents. Just as important, if we apply the flushes + * to the EMIT_FLUSH branch (i.e. immediately after the relocation + * write and before the invalidate on the next batch), the relocations + * still fail. This implies that is a delay following invalidation + * that is required to reset the caches as opposed to a delay to + * ensure the memory is written. + */ + if (mode & EMIT_INVALIDATE) { + *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; + *cs++ = i915_ggtt_offset(rq->engine->scratch) | + PIPE_CONTROL_GLOBAL_GTT; + *cs++ = 0; + *cs++ = 0; + + for (i = 0; i < 12; i++) + *cs++ = MI_FLUSH; + + *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; + *cs++ = i915_ggtt_offset(rq->engine->scratch) | + PIPE_CONTROL_GLOBAL_GTT; + *cs++ = 0; + *cs++ = 0; + } + + *cs++ = cmd; + intel_ring_advance(rq, cs); return 0; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index f5ffa6d31e82c..5ef96c608be8a 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -276,8 +276,9 @@ struct intel_engine_execlists { */ unsigned int active; #define EXECLISTS_ACTIVE_USER 0 -#define EXECLISTS_ACTIVE_PREEMPT 1 -#define EXECLISTS_ACTIVE_HWACK 2 +#define EXECLISTS_ACTIVE_HWACK 1 +#define EXECLISTS_ACTIVE_PREEMPT 2 +#define EXECLISTS_ACTIVE_PREEMPT_TIMEOUT 3 /** * @port_mask: number of execlist ports - 1 @@ -325,6 +326,9 @@ struct intel_engine_execlists { */ u32 preempt_complete_status; + struct hrtimer preempt_timer; + struct work_struct preempt_reset; + /** * @csb_write_reset: reset value for CSB write pointer * @@ -486,14 +490,16 @@ struct intel_engine_cs { */ void (*submit_request)(struct i915_request *rq); - /* Call when the priority on a request has changed and it and its + /* + * Call when the priority on a request has changed and it and its * dependencies may need rescheduling. Note the request itself may * not be ready to run! * * Called under the struct_mutex. */ void (*schedule)(struct i915_request *request, - const struct i915_sched_attr *attr); + const struct i915_sched_attr *attr, + unsigned int timeout); /* * Cancel all requests on the hardware, or queued for execution. @@ -567,6 +573,7 @@ struct intel_engine_cs { } semaphore; struct intel_engine_execlists execlists; + struct work_struct reset_work; /* Contexts are pinned whilst they are active on the GPU. The last * context executed remains active whilst the GPU is idle - the @@ -789,6 +796,11 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) #define I915_GEM_HWS_SCRATCH_INDEX 0x40 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) +#define I915_GEM_HWS_PID_INDEX 0x50 +#define I915_GEM_HWS_PID_ADDR (I915_GEM_HWS_PID_INDEX << MI_STORE_DWORD_INDEX_SHIFT) +#define I915_GEM_HWS_CID_INDEX 0x58 +#define I915_GEM_HWS_CID_ADDR (I915_GEM_HWS_CID_INDEX << MI_STORE_DWORD_INDEX_SHIFT) + #define I915_HWS_CSB_BUF0_INDEX 0x10 #define I915_HWS_CSB_WRITE_INDEX 0x1f #define CNL_HWS_CSB_WRITE_INDEX 0x2f diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 6b5aa3b074ecc..c7cea892525dd 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -387,7 +387,8 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { enum i915_power_well_id id = power_well->id; - bool wait_fuses = power_well->hsw.has_fuses; + bool wait_fuses = power_well->hsw.has_fuses && + !intel_vgpu_active(dev_priv); enum skl_power_gate uninitialized_var(pg); u32 val; diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index f7026e887fa9b..2c0cc897c41ef 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -41,6 +41,10 @@ #include #include "i915_drv.h" +#if IS_ENABLED(CONFIG_DRM_I915_GVT) +#include "gvt.h" +#endif + int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, int usecs) { @@ -228,6 +232,68 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state) #endif } +static void pv_update_plane_reg(struct intel_plane *plane, + u32 stride, uint32_t src_w, uint32_t src_h, + uint32_t crtc_w, uint32_t crtc_h, u32 aux_stride, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + int i; + struct pv_plane_update tmp_plane; + uint32_t x = plane_state->main.x; + uint32_t y = plane_state->main.y; + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + u32 __iomem *pv_plane = (u32 *)&(dev_priv->shared_page->pv_plane); + + memset(&tmp_plane, 0, sizeof(struct pv_plane_update)); + if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) { + tmp_plane.flags |= PLANE_COLOR_CTL_BIT; + tmp_plane.plane_color_ctl = PLANE_COLOR_PIPE_GAMMA_ENABLE | + PLANE_COLOR_PIPE_CSC_ENABLE | + PLANE_COLOR_PLANE_GAMMA_DISABLE; + } + + if (plane_state->ckey.flags) { + tmp_plane.flags |= PLANE_KEY_BIT; + tmp_plane.plane_key_val = plane_state->ckey.min_value; + tmp_plane.plane_key_max = plane_state->ckey.max_value; + tmp_plane.plane_key_msk = plane_state->ckey.channel_mask; + } + + tmp_plane.plane_offset = (y << 16) | x; + tmp_plane.plane_stride = stride; + tmp_plane.plane_size = (src_h << 16) | src_w; + tmp_plane.plane_aux_dist = + (plane_state->aux.offset - plane_state->main.offset) | + aux_stride; + tmp_plane.plane_aux_offset = + (plane_state->aux.y << 16) | plane_state->aux.x; + + /* program plane scaler */ + if (plane_state->scaler_id >= 0) { + tmp_plane.flags |= PLANE_SCALER_BIT; + tmp_plane.ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane->id) | + crtc_state->scaler_state.scalers[plane_state->scaler_id].mode; + tmp_plane.ps_pwr_gate = 0; + tmp_plane.ps_win_ps = + (plane_state->base.dst.x1 << 16) | plane_state->base.dst.y1; + tmp_plane.ps_win_sz = ((crtc_w + 1) << 16) | (crtc_h + 1); + tmp_plane.plane_pos = 0; + } else { + tmp_plane.plane_pos = + (plane_state->base.dst.y1 << 16) | plane_state->base.dst.x1; + } + + tmp_plane.plane_ctl = plane_state->ctl; + + spin_lock(&dev_priv->shared_page_lock); + for (i = 0; i < sizeof(struct pv_plane_update) / 4; i++) + writel(*((u32 *)(&tmp_plane) + i), pv_plane + i); + I915_WRITE_FW(PLANE_SURF(plane->pipe, plane->id), + intel_plane_ggtt_offset(plane_state) + plane_state->main.offset); + spin_unlock(&dev_priv->shared_page_lock); +} + void skl_update_plane(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, @@ -251,14 +317,26 @@ skl_update_plane(struct intel_plane *plane, uint32_t y = plane_state->main.y; uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; + uint32_t val; unsigned long irqflags; +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + if (dev_priv->gvt && + dev_priv->gvt->pipe_info[pipe].plane_owner[plane_id]) + return; +#endif /* Sizes are 0 based */ src_w--; src_h--; crtc_w--; crtc_h--; + if (PVMMIO_LEVEL(dev_priv, PVMMIO_PLANE_UPDATE)) { + pv_update_plane_reg(plane, stride, src_w, src_h, + crtc_w, crtc_h, aux_stride, crtc_state, plane_state); + return; + } + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) @@ -321,8 +399,16 @@ skl_update_plane(struct intel_plane *plane, } I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl); - I915_WRITE_FW(PLANE_SURF(pipe, plane_id), - intel_plane_ggtt_offset(plane_state) + surf_addr); + + val = intel_plane_ggtt_offset(plane_state) + surf_addr; + + if (plane_state->base.decryption_reqd) + val |= PLANE_SURF_DECRYPTION_ENABLED; + else + val &= ~PLANE_SURF_DECRYPTION_ENABLED; + + I915_WRITE_FW(PLANE_SURF(pipe, plane_id), val); + POSTING_READ_FW(PLANE_SURF(pipe, plane_id)); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); @@ -336,6 +422,12 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) enum pipe pipe = plane->pipe; unsigned long irqflags; +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + if (dev_priv->gvt && + dev_priv->gvt->pipe_info[pipe].plane_owner[plane_id]) + return; +#endif + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0); @@ -956,7 +1048,7 @@ g4x_plane_get_hw_state(struct intel_plane *plane, return ret; } -static int +int intel_check_sprite_plane(struct intel_plane *plane, struct intel_crtc_state *crtc_state, struct intel_plane_state *state) @@ -1546,6 +1638,9 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, else modifiers = skl_plane_format_modifiers_noccs; + if (intel_gvt_active(dev_priv) || intel_vgpu_active(dev_priv)) + modifiers = i9xx_plane_format_modifiers; + plane_funcs = &skl_plane_funcs; } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { intel_plane->can_scale = false; diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c index 74bf76f3fddc4..10d1ea1897750 100644 --- a/drivers/gpu/drm/i915/intel_wopcm.c +++ b/drivers/gpu/drm/i915/intel_wopcm.c @@ -63,6 +63,9 @@ #define GEN9_GUC_FW_RESERVED (128 * 1024) #define GEN9_GUC_WOPCM_OFFSET (GUC_WOPCM_RESERVED + GEN9_GUC_FW_RESERVED) +#define GEN9_GUC_9_29_SIZE ((142 * 1024) + 768) +#define GEN9_HUC_1_07_SIZE ((150 * 1024) + 576) + /** * intel_wopcm_init_early() - Early initialization of the WOPCM. * @wopcm: pointer to intel_wopcm. @@ -155,8 +158,8 @@ static inline int check_hw_restriction(struct drm_i915_private *i915, int intel_wopcm_init(struct intel_wopcm *wopcm) { struct drm_i915_private *i915 = wopcm_to_i915(wopcm); - u32 guc_fw_size = intel_uc_fw_get_upload_size(&i915->guc.fw); - u32 huc_fw_size = intel_uc_fw_get_upload_size(&i915->huc.fw); + u32 guc_fw_size = GEN9_GUC_9_29_SIZE; + u32 huc_fw_size = GEN9_HUC_1_07_SIZE; u32 ctx_rsvd = context_reserved_size(i915); u32 guc_wopcm_base; u32 guc_wopcm_size; @@ -207,6 +210,13 @@ int intel_wopcm_init(struct intel_wopcm *wopcm) wopcm->guc.base = guc_wopcm_base; wopcm->guc.size = guc_wopcm_size; + /* + * In deferred fw loading, we defer the intel_guc_init which will + * initialize the guc.ggtt_pin_bias. As it relies on wopcm size, + * set the ggtt_pin_bias after wopcm initialization + */ + i915->guc.ggtt_pin_bias = i915->wopcm.size - i915->wopcm.guc.base; + return 0; } diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c index 7efb326badcd6..704572c2e6a23 100644 --- a/drivers/gpu/drm/i915/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c @@ -549,7 +549,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg) err = igt_check_page_sizes(vma); if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) { - pr_err("page_sizes.gtt=%u, expected %lu\n", + pr_err("page_sizes.gtt=%u, expected %llu\n", vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K); err = -EINVAL; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 8e2e269db97e8..127d815136717 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -1337,7 +1337,7 @@ static int igt_gtt_reserve(void *arg) GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); if (vma->node.start != total || vma->node.size != 2*I915_GTT_PAGE_SIZE) { - pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n", + pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n", vma->node.start, vma->node.size, total, 2*I915_GTT_PAGE_SIZE); err = -EINVAL; @@ -1386,7 +1386,7 @@ static int igt_gtt_reserve(void *arg) GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); if (vma->node.start != total || vma->node.size != 2*I915_GTT_PAGE_SIZE) { - pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n", + pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n", vma->node.start, vma->node.size, total, 2*I915_GTT_PAGE_SIZE); err = -EINVAL; @@ -1430,7 +1430,7 @@ static int igt_gtt_reserve(void *arg) GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); if (vma->node.start != offset || vma->node.size != 2*I915_GTT_PAGE_SIZE) { - pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n", + pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n", vma->node.start, vma->node.size, offset, 2*I915_GTT_PAGE_SIZE); err = -EINVAL; diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c index 582566faef090..12a33a8410ca3 100644 --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c @@ -413,7 +413,7 @@ static int live_late_preempt(void *arg) } attr.priority = I915_PRIORITY_MAX; - engine->schedule(rq, &attr); + engine->schedule(rq, &attr, 0); if (!wait_for_spinner(&spin_hi, rq)) { pr_err("High priority context failed to preempt the low priority context\n"); @@ -565,6 +565,371 @@ static int live_preempt_hang(void *arg) return err; } +static void mark_preemption_hang(struct intel_engine_execlists *execlists) +{ + execlists_set_active(execlists, EXECLISTS_ACTIVE_PREEMPT); + execlists_set_active(execlists, EXECLISTS_ACTIVE_PREEMPT_TIMEOUT); +} + +static int live_preempt_timeout(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + struct i915_gem_context *ctx; + enum intel_engine_id id; + struct spinner spin; + int err = -ENOMEM; + + if (!HAS_LOGICAL_RING_PREEMPTION(i915)) + return 0; + + mutex_lock(&i915->drm.struct_mutex); + + if (spinner_init(&spin, i915)) + goto err_unlock; + + ctx = kernel_context(i915); + if (!ctx) + goto err_spin; + + for_each_engine(engine, i915, id) { + struct i915_request *rq; + + rq = spinner_create_request(&spin, ctx, engine, MI_NOOP); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_ctx; + } + + i915_request_add(rq); + if (!wait_for_spinner(&spin, rq)) { + i915_gem_set_wedged(i915); + err = -EIO; + goto err_ctx; + } + + GEM_TRACE("%s triggering reset\n", engine->name); + mark_preemption_hang(&engine->execlists); + preempt_reset(&engine->execlists.preempt_reset); + + if (igt_flush_test(i915, I915_WAIT_LOCKED)) { + err = -EIO; + goto err_ctx; + } + } + + err = 0; +err_ctx: + kernel_context_close(ctx); +err_spin: + spinner_fini(&spin); +err_unlock: + igt_flush_test(i915, I915_WAIT_LOCKED); + mutex_unlock(&i915->drm.struct_mutex); + return err; +} + +static void __softirq_begin(void) +{ + local_bh_disable(); +} + +static void __softirq_end(void) +{ + local_bh_enable(); +} + +static void __hardirq_begin(void) +{ + local_irq_disable(); +} + +static void __hardirq_end(void) +{ + local_irq_enable(); +} + +static int live_preempt_reset(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + struct i915_gem_context *ctx; + enum intel_engine_id id; + struct spinner spin; + int err = -ENOMEM; + + if (!HAS_LOGICAL_RING_PREEMPTION(i915)) + return 0; + + mutex_lock(&i915->drm.struct_mutex); + + if (spinner_init(&spin, i915)) + goto err_unlock; + + ctx = kernel_context(i915); + if (!ctx) + goto err_spin; + + for_each_engine(engine, i915, id) { + static const struct { + const char *name; + void (*critical_section_begin)(void); + void (*critical_section_end)(void); + } phases[] = { + { "softirq", __softirq_begin, __softirq_end }, + { "hardirq", __hardirq_begin, __hardirq_end }, + { } + }; + struct tasklet_struct *t = &engine->execlists.tasklet; + const typeof(*phases) *p; + + for (p = phases; p->name; p++) { + struct i915_request *rq; + + rq = spinner_create_request(&spin, ctx, engine, + MI_NOOP); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_ctx; + } + + i915_request_add(rq); + if (!wait_for_spinner(&spin, rq)) { + i915_gem_set_wedged(i915); + err = -EIO; + goto err_ctx; + } + + /* Flush to give try_preempt_reset a chance */ + tasklet_schedule(t); + tasklet_kill(t); + GEM_BUG_ON(i915_request_completed(rq)); + + GEM_TRACE("%s triggering %s reset\n", + engine->name, p->name); + p->critical_section_begin(); + + mark_preemption_hang(&engine->execlists); + err = try_preempt_reset(&engine->execlists); + + p->critical_section_end(); + if (err) { + pr_err("Preempt softirq reset failed on %s, tasklet state %lx\n", + engine->name, t->state); + spinner_end(&spin); + i915_gem_set_wedged(i915); + goto err_ctx; + } + + if (igt_flush_test(i915, I915_WAIT_LOCKED)) { + err = -EIO; + goto err_ctx; + } + } + } + + err = 0; +err_ctx: + kernel_context_close(ctx); +err_spin: + spinner_fini(&spin); +err_unlock: + igt_flush_test(i915, I915_WAIT_LOCKED); + mutex_unlock(&i915->drm.struct_mutex); + return err; +} + +static int live_late_preempt_timeout(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct i915_gem_context *ctx_hi, *ctx_lo; + struct spinner spin_hi, spin_lo; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = -ENOMEM; + + if (!HAS_LOGICAL_RING_PREEMPTION(i915)) + return 0; + + mutex_lock(&i915->drm.struct_mutex); + + if (spinner_init(&spin_hi, i915)) + goto err_unlock; + + if (spinner_init(&spin_lo, i915)) + goto err_spin_hi; + + ctx_hi = kernel_context(i915); + if (!ctx_hi) + goto err_spin_lo; + + ctx_lo = kernel_context(i915); + if (!ctx_lo) + goto err_ctx_hi; + + for_each_engine(engine, i915, id) { + struct i915_request *rq; + + rq = spinner_create_request(&spin_lo, ctx_lo, engine, MI_NOOP); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_ctx_lo; + } + + i915_request_add(rq); + if (!wait_for_spinner(&spin_lo, rq)) { + pr_err("First context failed to start\n"); + goto err_wedged; + } + + rq = spinner_create_request(&spin_hi, ctx_hi, engine, MI_NOOP); + if (IS_ERR(rq)) { + spinner_end(&spin_lo); + err = PTR_ERR(rq); + goto err_ctx_lo; + } + + i915_request_add(rq); + if (wait_for_spinner(&spin_hi, rq)) { + pr_err("Second context overtook first?\n"); + goto err_wedged; + } + + GEM_TRACE("%s rescheduling (no timeout)\n", engine->name); + engine->schedule(rq, &(struct i915_sched_attr){ + .priority = 1, + }, 0); + + if (wait_for_spinner(&spin_hi, rq)) { + pr_err("High priority context overtook first without an arbitration point?\n"); + goto err_wedged; + } + + GEM_TRACE("%s rescheduling (with timeout)\n", engine->name); + engine->schedule(rq, &(struct i915_sched_attr){ + .priority = 2, + }, 10 * 1000 /* 10us */); + + if (!wait_for_spinner(&spin_hi, rq)) { + pr_err("High priority context failed to force itself in front of the low priority context\n"); + GEM_TRACE_DUMP(); + goto err_wedged; + } + + spinner_end(&spin_hi); + spinner_end(&spin_lo); + if (igt_flush_test(i915, I915_WAIT_LOCKED)) { + err = -EIO; + goto err_ctx_lo; + } + } + + err = 0; +err_ctx_lo: + kernel_context_close(ctx_lo); +err_ctx_hi: + kernel_context_close(ctx_hi); +err_spin_lo: + spinner_fini(&spin_lo); +err_spin_hi: + spinner_fini(&spin_hi); +err_unlock: + igt_flush_test(i915, I915_WAIT_LOCKED); + mutex_unlock(&i915->drm.struct_mutex); + return err; + +err_wedged: + spinner_end(&spin_hi); + spinner_end(&spin_lo); + i915_gem_set_wedged(i915); + err = -EIO; + goto err_ctx_lo; +} + +static int live_context_preempt_timeout(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct i915_gem_context *ctx_hi, *ctx_lo; + struct spinner spin_hi, spin_lo; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = -ENOMEM; + + if (!HAS_LOGICAL_RING_PREEMPTION(i915)) + return 0; + + mutex_lock(&i915->drm.struct_mutex); + + if (spinner_init(&spin_hi, i915)) + goto err_unlock; + + if (spinner_init(&spin_lo, i915)) + goto err_spin_hi; + + ctx_hi = kernel_context(i915); + if (!ctx_hi) + goto err_spin_lo; + ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY; + ctx_hi->preempt_timeout = 50 * 1000; /* 50us */ + + ctx_lo = kernel_context(i915); + if (!ctx_lo) + goto err_ctx_hi; + ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY; + + for_each_engine(engine, i915, id) { + struct i915_request *rq; + + rq = spinner_create_request(&spin_lo, ctx_lo, engine, MI_NOOP); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_ctx_lo; + } + + i915_request_add(rq); + if (!wait_for_spinner(&spin_lo, rq)) { + i915_gem_set_wedged(i915); + err = -EIO; + goto err_ctx_lo; + } + + rq = spinner_create_request(&spin_hi, ctx_hi, engine, MI_NOOP); + if (IS_ERR(rq)) { + spinner_end(&spin_lo); + err = PTR_ERR(rq); + goto err_ctx_lo; + } + + i915_request_add(rq); + if (!wait_for_spinner(&spin_hi, rq)) { + i915_gem_set_wedged(i915); + err = -EIO; + goto err_ctx_lo; + } + + spinner_end(&spin_hi); + spinner_end(&spin_lo); + if (igt_flush_test(i915, I915_WAIT_LOCKED)) { + err = -EIO; + goto err_ctx_lo; + } + } + + err = 0; +err_ctx_lo: + kernel_context_close(ctx_lo); +err_ctx_hi: + kernel_context_close(ctx_hi); +err_spin_lo: + spinner_fini(&spin_lo); +err_spin_hi: + spinner_fini(&spin_hi); +err_unlock: + igt_flush_test(i915, I915_WAIT_LOCKED); + mutex_unlock(&i915->drm.struct_mutex); + return err; +} + int intel_execlists_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { @@ -572,6 +937,10 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915) SUBTEST(live_preempt), SUBTEST(live_late_preempt), SUBTEST(live_preempt_hang), + SUBTEST(live_preempt_timeout), + SUBTEST(live_preempt_reset), + SUBTEST(live_late_preempt_timeout), + SUBTEST(live_context_preempt_timeout), }; if (!HAS_EXECLISTS(i915)) diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 2d45d1dd9554a..643f5edd68fe3 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1446,8 +1446,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, } /* The CEC module handles HDMI hotplug detection */ - cec_np = of_find_compatible_node(np->parent, NULL, - "mediatek,mt8173-cec"); + cec_np = of_get_compatible_child(np->parent, "mediatek,mt8173-cec"); if (!cec_np) { dev_err(dev, "Failed to find CEC node\n"); return -EINVAL; @@ -1457,8 +1456,10 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, if (!cec_pdev) { dev_err(hdmi->dev, "Waiting for CEC device %pOF\n", cec_np); + of_node_put(cec_np); return -EPROBE_DEFER; } + of_node_put(cec_np); hdmi->cec_dev = &cec_pdev->dev; /* diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c index 05520202c9677..709475d5cc30e 100644 --- a/drivers/gpu/drm/meson/meson_crtc.c +++ b/drivers/gpu/drm/meson/meson_crtc.c @@ -101,6 +101,8 @@ static void meson_crtc_atomic_enable(struct drm_crtc *crtc, writel_bits_relaxed(VPP_POSTBLEND_ENABLE, VPP_POSTBLEND_ENABLE, priv->io_base + _REG(VPP_MISC)); + drm_crtc_vblank_on(crtc); + priv->viu.osd1_enabled = true; } @@ -110,6 +112,8 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc, struct meson_crtc *meson_crtc = to_meson_crtc(crtc); struct meson_drm *priv = meson_crtc->priv; + drm_crtc_vblank_off(crtc); + priv->viu.osd1_enabled = false; priv->viu.osd1_commit = false; diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index d3443125e6616..bf5f294f172fa 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -82,6 +82,10 @@ static const struct drm_mode_config_funcs meson_mode_config_funcs = { .fb_create = drm_gem_fb_create, }; +static const struct drm_mode_config_helper_funcs meson_mode_config_helpers = { + .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, +}; + static irqreturn_t meson_irq(int irq, void *arg) { struct drm_device *dev = arg; @@ -246,6 +250,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) drm->mode_config.max_width = 3840; drm->mode_config.max_height = 2160; drm->mode_config.funcs = &meson_mode_config_funcs; + drm->mode_config.helper_private = &meson_mode_config_helpers; /* Hardware Initialization */ diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c index df7247cd93f98..2cb2ad26d7167 100644 --- a/drivers/gpu/drm/meson/meson_dw_hdmi.c +++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c @@ -706,6 +706,7 @@ static const struct regmap_config meson_dw_hdmi_regmap_config = { .reg_read = meson_dw_hdmi_reg_read, .reg_write = meson_dw_hdmi_reg_write, .max_register = 0x10000, + .fast_io = true, }; static bool meson_hdmi_connector_is_available(struct device *dev) diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c index 514245e69b384..7a3a6ed9f27bb 100644 --- a/drivers/gpu/drm/meson/meson_venc.c +++ b/drivers/gpu/drm/meson/meson_venc.c @@ -71,6 +71,7 @@ */ /* HHI Registers */ +#define HHI_GCLK_MPEG2 0x148 /* 0x52 offset in data sheet */ #define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */ #define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */ #define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 offset in data sheet */ @@ -714,6 +715,7 @@ struct meson_hdmi_venc_vic_mode { { 5, &meson_hdmi_encp_mode_1080i60 }, { 20, &meson_hdmi_encp_mode_1080i50 }, { 32, &meson_hdmi_encp_mode_1080p24 }, + { 33, &meson_hdmi_encp_mode_1080p50 }, { 34, &meson_hdmi_encp_mode_1080p30 }, { 31, &meson_hdmi_encp_mode_1080p50 }, { 16, &meson_hdmi_encp_mode_1080p60 }, @@ -1529,10 +1531,12 @@ unsigned int meson_venci_get_field(struct meson_drm *priv) void meson_venc_enable_vsync(struct meson_drm *priv) { writel_relaxed(2, priv->io_base + _REG(VENC_INTCTRL)); + regmap_update_bits(priv->hhi, HHI_GCLK_MPEG2, BIT(25), BIT(25)); } void meson_venc_disable_vsync(struct meson_drm *priv) { + regmap_update_bits(priv->hhi, HHI_GCLK_MPEG2, BIT(25), 0); writel_relaxed(0, priv->io_base + _REG(VENC_INTCTRL)); } diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c index 6bcfa527c1801..26a0857878bfd 100644 --- a/drivers/gpu/drm/meson/meson_viu.c +++ b/drivers/gpu/drm/meson/meson_viu.c @@ -184,18 +184,18 @@ void meson_viu_set_osd_lut(struct meson_drm *priv, enum viu_lut_sel_e lut_sel, if (lut_sel == VIU_LUT_OSD_OETF) { writel(0, priv->io_base + _REG(addr_port)); - for (i = 0; i < 20; i++) + for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++) writel(r_map[i * 2] | (r_map[i * 2 + 1] << 16), priv->io_base + _REG(data_port)); writel(r_map[OSD_OETF_LUT_SIZE - 1] | (g_map[0] << 16), priv->io_base + _REG(data_port)); - for (i = 0; i < 20; i++) + for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++) writel(g_map[i * 2 + 1] | (g_map[i * 2 + 2] << 16), priv->io_base + _REG(data_port)); - for (i = 0; i < 20; i++) + for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++) writel(b_map[i * 2] | (b_map[i * 2 + 1] << 16), priv->io_base + _REG(data_port)); @@ -211,18 +211,18 @@ void meson_viu_set_osd_lut(struct meson_drm *priv, enum viu_lut_sel_e lut_sel, } else if (lut_sel == VIU_LUT_OSD_EOTF) { writel(0, priv->io_base + _REG(addr_port)); - for (i = 0; i < 20; i++) + for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++) writel(r_map[i * 2] | (r_map[i * 2 + 1] << 16), priv->io_base + _REG(data_port)); writel(r_map[OSD_EOTF_LUT_SIZE - 1] | (g_map[0] << 16), priv->io_base + _REG(data_port)); - for (i = 0; i < 20; i++) + for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++) writel(g_map[i * 2 + 1] | (g_map[i * 2 + 2] << 16), priv->io_base + _REG(data_port)); - for (i = 0; i < 20; i++) + for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++) writel(b_map[i * 2] | (b_map[i * 2 + 1] << 16), priv->io_base + _REG(data_port)); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index da1363a0c54d6..93d70f4a2154e 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -633,8 +633,7 @@ static int adreno_get_legacy_pwrlevels(struct device *dev) struct device_node *child, *node; int ret; - node = of_find_compatible_node(dev->of_node, NULL, - "qcom,gpu-pwrlevels"); + node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels"); if (!node) { dev_err(dev, "Could not find the GPU powerlevels\n"); return -ENXIO; @@ -655,6 +654,8 @@ static int adreno_get_legacy_pwrlevels(struct device *dev) dev_pm_opp_add(dev, val, 0); } + of_node_put(node); + return 0; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 80cbf75bc2ff2..4752f08f0884c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -1535,8 +1535,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, cnt++; dst = drm_plane_state_dest(pstate); - if (!drm_rect_intersect(&clip, &dst) || - !drm_rect_equals(&clip, &dst)) { + if (!drm_rect_intersect(&clip, &dst)) { DPU_ERROR("invalid vertical/horizontal destination\n"); DPU_ERROR("display: " DRM_RECT_FMT " plane: " DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect), @@ -2123,7 +2122,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane) NULL); drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs); - plane->crtc = crtc; /* save user friendly CRTC name for later */ snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c index ae2aee7ed9e19..e741d26185df6 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c @@ -1962,7 +1962,7 @@ static void _dpu_dbg_dump_dpu_dbg_bus(struct dpu_dbg_dpu_debug_bus *bus) u32 *dump_addr = NULL; u32 status = 0; struct dpu_debug_bus_entry *head; - phys_addr_t phys = 0; + dma_addr_t dma = 0; int list_size; int i; u32 offset; @@ -2000,7 +2000,7 @@ static void _dpu_dbg_dump_dpu_dbg_bus(struct dpu_dbg_dpu_debug_bus *bus) if (in_mem) { if (!(*dump_mem)) *dump_mem = dma_alloc_coherent(dpu_dbg_base.dev, - list_size, &phys, GFP_KERNEL); + list_size, &dma, GFP_KERNEL); if (*dump_mem) { dump_addr = *dump_mem; @@ -2101,7 +2101,7 @@ static void _dpu_dbg_dump_vbif_dbg_bus(struct dpu_dbg_vbif_debug_bus *bus) u32 value, d0, d1; unsigned long reg, reg1, reg2; struct vbif_debug_bus_entry *head; - phys_addr_t phys = 0; + dma_addr_t dma = 0; int i, list_size = 0; void __iomem *mem_base = NULL; struct vbif_debug_bus_entry *dbg_bus; @@ -2151,7 +2151,7 @@ static void _dpu_dbg_dump_vbif_dbg_bus(struct dpu_dbg_vbif_debug_bus *bus) if (in_mem) { if (!(*dump_mem)) *dump_mem = dma_alloc_coherent(dpu_dbg_base.dev, - list_size, &phys, GFP_KERNEL); + list_size, &dma, GFP_KERNEL); if (*dump_mem) { dump_addr = *dump_mem; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 1b4de3486ef9e..ec3fd67378c18 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -503,8 +503,6 @@ static void dpu_encoder_destroy(struct drm_encoder *drm_enc) drm_encoder_cleanup(drm_enc); mutex_destroy(&dpu_enc->enc_lock); - - kfree(dpu_enc); } void dpu_encoder_helper_split_config( diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index 7dd6bd2d6d378..74cc204b07e80 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -450,7 +450,7 @@ static void _dpu_kms_initialize_dsi(struct drm_device *dev, int i, rc; /*TODO: Support two independent DSI connectors */ - encoder = dpu_encoder_init(dev, DRM_MODE_CONNECTOR_DSI); + encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI); if (IS_ERR_OR_NULL(encoder)) { DPU_ERROR("encoder init failed for dsi display\n"); return; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index b640e39ebaca2..4ac2b0c669b74 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -1254,7 +1254,7 @@ static int dpu_plane_sspp_atomic_update(struct drm_plane *plane, const struct dpu_format *fmt; struct drm_crtc *crtc; struct drm_framebuffer *fb; - struct drm_rect src, dst; + int ret, min_scale; if (!plane) { DPU_ERROR("invalid plane\n"); @@ -1293,21 +1293,29 @@ static int dpu_plane_sspp_atomic_update(struct drm_plane *plane, pdpu->is_rt_pipe = (dpu_crtc_get_client_type(crtc) != NRT_CLIENT); _dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL); - src.x1 = state->src_x >> 16; - src.y1 = state->src_y >> 16; - src.x2 = src.x1 + (state->src_w >> 16); - src.y2 = src.y1 + (state->src_h >> 16); + min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxdwnscale); + ret = drm_atomic_helper_check_plane_state(state, crtc->state, min_scale, + pdpu->pipe_sblk->maxupscale << 16, + true, false); + if (ret) { + DPU_ERROR_PLANE(pdpu, "Check plane state failed (%d)\n", ret); + return ret; + } - dst = drm_plane_state_dest(state); + DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT + ", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_FP_ARG(&state->src), + crtc->base.id, DRM_RECT_ARG(&state->dst), + (char *)&fmt->base.pixel_format, DPU_FORMAT_IS_UBWC(fmt)); - DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FMT "->crtc%u " DRM_RECT_FMT - ", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_ARG(&src), - crtc->base.id, DRM_RECT_ARG(&dst), - (char *)&fmt->base.pixel_format, - DPU_FORMAT_IS_UBWC(fmt)); + pdpu->pipe_cfg.src_rect = state->src; + + /* state->src is 16.16, src_rect is not */ + pdpu->pipe_cfg.src_rect.x1 >>= 16; + pdpu->pipe_cfg.src_rect.x2 >>= 16; + pdpu->pipe_cfg.src_rect.y1 >>= 16; + pdpu->pipe_cfg.src_rect.y2 >>= 16; - pdpu->pipe_cfg.src_rect = src; - pdpu->pipe_cfg.dst_rect = dst; + pdpu->pipe_cfg.dst_rect = state->dst; _dpu_plane_setup_scaler(pdpu, pstate, fmt, false); diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c index 7d306c5acd096..273cbbe27c2e5 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c @@ -259,7 +259,6 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane, msm_framebuffer_cleanup(fb, kms->aspace); } -#define FRAC_16_16(mult, div) (((mult) << 16) / (div)) static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state, struct drm_plane_state *state) { diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c index 4c03f0b7343ed..41bec570c5184 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c @@ -39,6 +39,8 @@ #define DSI_PIXEL_PLL_CLK 1 #define NUM_PROVIDED_CLKS 2 +#define VCO_REF_CLK_RATE 19200000 + struct dsi_pll_regs { u32 pll_prop_gain_rate; u32 pll_lockdet_rate; @@ -316,7 +318,7 @@ static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate, parent_rate); pll_10nm->vco_current_rate = rate; - pll_10nm->vco_ref_clk_rate = parent_rate; + pll_10nm->vco_ref_clk_rate = VCO_REF_CLK_RATE; dsi_pll_setup_config(pll_10nm); diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index c79659ca57065..33e083f71a170 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -332,6 +332,12 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi, goto fail; } + ret = msm_hdmi_hpd_enable(hdmi->connector); + if (ret < 0) { + DRM_DEV_ERROR(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret); + goto fail; + } + encoder->bridge = hdmi->bridge; priv->bridges[priv->num_bridges++] = hdmi->bridge; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index accc9a61611d3..5c5df6ab2a573 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h @@ -245,6 +245,7 @@ void msm_hdmi_bridge_destroy(struct drm_bridge *bridge); void msm_hdmi_connector_irq(struct drm_connector *connector); struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi); +int msm_hdmi_hpd_enable(struct drm_connector *connector); /* * i2c adapter for ddc: diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c index e9c9a0af508e8..30e908dfded7e 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c @@ -167,8 +167,9 @@ static void enable_hpd_clocks(struct hdmi *hdmi, bool enable) } } -static int hpd_enable(struct hdmi_connector *hdmi_connector) +int msm_hdmi_hpd_enable(struct drm_connector *connector) { + struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); struct hdmi *hdmi = hdmi_connector->hdmi; const struct hdmi_platform_config *config = hdmi->config; struct device *dev = &hdmi->pdev->dev; @@ -450,7 +451,6 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi) { struct drm_connector *connector = NULL; struct hdmi_connector *hdmi_connector; - int ret; hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL); if (!hdmi_connector) @@ -471,12 +471,6 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi) connector->interlace_allowed = 0; connector->doublescan_allowed = 0; - ret = hpd_enable(hdmi_connector); - if (ret) { - dev_err(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret); - return ERR_PTR(ret); - } - drm_connector_attach_encoder(connector, hdmi->encoder); return connector; diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index c1f1779c980f6..2b7bb6e166d3f 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -32,7 +32,12 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev, if (!new_crtc_state->active) continue; + if (drm_crtc_vblank_get(crtc)) + continue; + kms->funcs->wait_for_crtc_commit_done(kms, crtc); + + drm_crtc_vblank_put(crtc); } } diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index f0da0d3c8a80f..d756436c1fcd3 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -84,7 +84,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file) ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) - return ret; + goto free_priv; pm_runtime_get_sync(&gpu->pdev->dev); show_priv->state = gpu->funcs->gpu_state_get(gpu); @@ -94,13 +94,20 @@ static int msm_gpu_open(struct inode *inode, struct file *file) if (IS_ERR(show_priv->state)) { ret = PTR_ERR(show_priv->state); - kfree(show_priv); - return ret; + goto free_priv; } show_priv->dev = dev; - return single_open(file, msm_gpu_show, show_priv); + ret = single_open(file, msm_gpu_show, show_priv); + if (ret) + goto free_priv; + + return 0; + +free_priv: + kfree(show_priv); + return ret; } static const struct file_operations msm_gpu_fops = { diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 8e510d5c758a5..9d11f321f5a92 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -62,6 +62,8 @@ struct msm_gem_vma; #define MAX_BRIDGES 8 #define MAX_CONNECTORS 8 +#define FRAC_16_16(mult, div) (((mult) << 16) / (div)) + struct msm_file_private { rwlock_t queuelock; struct list_head submitqueues; diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 7bd83e0afa971..c3ae7507d1c76 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -410,7 +410,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct msm_file_private *ctx = file->driver_priv; struct msm_gem_submit *submit; struct msm_gpu *gpu = priv->gpu; - struct dma_fence *in_fence = NULL; struct sync_file *sync_file = NULL; struct msm_gpu_submitqueue *queue; struct msm_ringbuffer *ring; @@ -443,6 +442,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, ring = gpu->rb[queue->prio]; if (args->flags & MSM_SUBMIT_FENCE_FD_IN) { + struct dma_fence *in_fence; + in_fence = sync_file_get_fence(args->fence_fd); if (!in_fence) @@ -452,11 +453,13 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, * Wait if the fence is from a foreign context, or if the fence * array contains any fence from a foreign context. */ - if (!dma_fence_match_context(in_fence, ring->fctx->context)) { + ret = 0; + if (!dma_fence_match_context(in_fence, ring->fctx->context)) ret = dma_fence_wait(in_fence, true); - if (ret) - return ret; - } + + dma_fence_put(in_fence); + if (ret) + return ret; } ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -582,8 +585,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, } out: - if (in_fence) - dma_fence_put(in_fence); submit_cleanup(submit); if (ret) msm_gem_submit_free(submit); diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 5e808cfec345f..52a2146dc1f25 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -367,8 +367,8 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, msm_gpu_devcoredump_read, msm_gpu_devcoredump_free); } #else -static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, char *comm, - char *cmd) +static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, + struct msm_gem_submit *submit, char *comm, char *cmd) { } #endif @@ -425,10 +425,9 @@ static void recover_worker(struct work_struct *work) if (submit) { struct task_struct *task; - rcu_read_lock(); - task = pid_task(submit->pid, PIDTYPE_PID); + task = get_pid_task(submit->pid, PIDTYPE_PID); if (task) { - comm = kstrdup(task->comm, GFP_ATOMIC); + comm = kstrdup(task->comm, GFP_KERNEL); /* * So slightly annoying, in other paths like @@ -441,10 +440,10 @@ static void recover_worker(struct work_struct *work) * about the submit going away. */ mutex_unlock(&dev->struct_mutex); - cmd = kstrdup_quotable_cmdline(task, GFP_ATOMIC); + cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL); + put_task_struct(task); mutex_lock(&dev->struct_mutex); } - rcu_read_unlock(); if (comm && cmd) { dev_err(dev->dev, "%s: offending task: %s (%s)\n", diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index b23d33622f374..2a90aa4caec08 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -66,7 +66,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova, // pm_runtime_get_sync(mmu->dev); ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot); // pm_runtime_put_sync(mmu->dev); - WARN_ON(ret < 0); + WARN_ON(!ret); return (ret == len) ? 0 : -EINVAL; } diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index 3aa8a8576abea..f7a0edea4705b 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c @@ -316,10 +316,11 @@ static void snapshot_buf(struct msm_rd_state *rd, uint64_t iova, uint32_t size) { struct msm_gem_object *obj = submit->bos[idx].obj; + unsigned offset = 0; const char *buf; if (iova) { - buf += iova - submit->bos[idx].iova; + offset = iova - submit->bos[idx].iova; } else { iova = submit->bos[idx].iova; size = obj->base.size; @@ -340,6 +341,8 @@ static void snapshot_buf(struct msm_rd_state *rd, if (IS_ERR(buf)) return; + buf += offset; + rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size); msm_gem_put_vaddr(&obj->base); diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 041e7daf8a337..f889d41a281fa 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -197,6 +197,22 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp, /****************************************************************************** * EVO channel helpers *****************************************************************************/ +static void +evo_flush(struct nv50_dmac *dmac) +{ + /* Push buffer fetches are not coherent with BAR1, we need to ensure + * writes have been flushed right through to VRAM before writing PUT. + */ + if (dmac->push.type & NVIF_MEM_VRAM) { + struct nvif_device *device = dmac->base.device; + nvif_wr32(&device->object, 0x070000, 0x00000001); + nvif_msec(device, 2000, + if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002)) + break; + ); + } +} + u32 * evo_wait(struct nv50_dmac *evoc, int nr) { @@ -207,6 +223,7 @@ evo_wait(struct nv50_dmac *evoc, int nr) mutex_lock(&dmac->lock); if (put + nr >= (PAGE_SIZE / 4) - 8) { dmac->ptr[put] = 0x20000000; + evo_flush(dmac); nvif_wr32(&dmac->base.user, 0x0000, 0x00000000); if (nvif_msec(device, 2000, @@ -229,17 +246,7 @@ evo_kick(u32 *push, struct nv50_dmac *evoc) { struct nv50_dmac *dmac = evoc; - /* Push buffer fetches are not coherent with BAR1, we need to ensure - * writes have been flushed right through to VRAM before writing PUT. - */ - if (dmac->push.type & NVIF_MEM_VRAM) { - struct nvif_device *device = dmac->base.device; - nvif_wr32(&device->object, 0x070000, 0x00000001); - nvif_msec(device, 2000, - if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002)) - break; - ); - } + evo_flush(dmac); nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2); mutex_unlock(&dmac->lock); @@ -843,22 +850,16 @@ nv50_mstc_atomic_best_encoder(struct drm_connector *connector, { struct nv50_head *head = nv50_head(connector_state->crtc); struct nv50_mstc *mstc = nv50_mstc(connector); - if (mstc->port) { - struct nv50_mstm *mstm = mstc->mstm; - return &mstm->msto[head->base.index]->encoder; - } - return NULL; + + return &mstc->mstm->msto[head->base.index]->encoder; } static struct drm_encoder * nv50_mstc_best_encoder(struct drm_connector *connector) { struct nv50_mstc *mstc = nv50_mstc(connector); - if (mstc->port) { - struct nv50_mstm *mstm = mstc->mstm; - return &mstm->msto[0]->encoder; - } - return NULL; + + return &mstc->mstm->msto[0]->encoder; } static enum drm_mode_status @@ -1223,8 +1224,16 @@ nv50_mstm_fini(struct nv50_mstm *mstm) static void nv50_mstm_init(struct nv50_mstm *mstm) { - if (mstm && mstm->mgr.mst_state) - drm_dp_mst_topology_mgr_resume(&mstm->mgr); + int ret; + + if (!mstm || !mstm->mgr.mst_state) + return; + + ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr); + if (ret == -1) { + drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false); + drm_kms_helper_hotplug_event(mstm->mgr.dev); + } } static void @@ -1232,6 +1241,7 @@ nv50_mstm_del(struct nv50_mstm **pmstm) { struct nv50_mstm *mstm = *pmstm; if (mstm) { + drm_dp_mst_topology_mgr_destroy(&mstm->mgr); kfree(*pmstm); *pmstm = NULL; } diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index 408b955e5c39a..6dd72bc32897a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -116,7 +116,7 @@ nv40_backlight_init(struct drm_connector *connector) &nv40_bl_ops, &props); if (IS_ERR(bd)) { - if (bl_connector.id > 0) + if (bl_connector.id >= 0) ida_simple_remove(&bl_ida, bl_connector.id); return PTR_ERR(bd); } @@ -249,7 +249,7 @@ nv50_backlight_init(struct drm_connector *connector) nv_encoder, ops, &props); if (IS_ERR(bd)) { - if (bl_connector.id > 0) + if (bl_connector.id >= 0) ida_simple_remove(&bl_ida, bl_connector.id); return PTR_ERR(bd); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c index d02e183717dc4..5c14d6ac855d2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c @@ -801,6 +801,7 @@ acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon, bl = acr->hsbl_unload_blob; } else { nvkm_error(_acr->subdev, "invalid secure boot blob!\n"); + kfree(bl_desc); return -EINVAL; } diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index f92fe205550bc..e884183c018ac 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -285,6 +285,17 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait) } txn->last_pat->next_pa = 0; + /* ensure that the written descriptors are visible to DMM */ + wmb(); + + /* + * NOTE: the wmb() above should be enough, but there seems to be a bug + * in OMAP's memory barrier implementation, which in some rare cases may + * cause the writes not to be observable after wmb(). + */ + + /* read back to ensure the data is in RAM */ + readl(&txn->last_pat->next_pa); /* write to PAT_DESCR to clear out any pending transaction */ dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index f0bc7cc0e913f..fb46df56f0c4e 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -516,12 +516,22 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) dev->mode_config.min_width = 0; dev->mode_config.min_height = 0; - dev->mode_config.max_width = 4095; - dev->mode_config.max_height = 2047; dev->mode_config.normalize_zpos = true; dev->mode_config.funcs = &rcar_du_mode_config_funcs; dev->mode_config.helper_private = &rcar_du_mode_config_helper; + if (rcdu->info->gen < 3) { + dev->mode_config.max_width = 4095; + dev->mode_config.max_height = 2047; + } else { + /* + * The Gen3 DU uses the VSP1 for memory access, and is limited + * to frame sizes of 8190x8190. + */ + dev->mode_config.max_width = 8190; + dev->mode_config.max_height = 8190; + } + rcdu->num_crtcs = hweight8(rcdu->info->channels_mask); ret = rcar_du_properties_init(rcdu); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c index 79d00d861a31f..01ff3c8588750 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c @@ -189,12 +189,14 @@ EXPORT_SYMBOL(rockchip_drm_psr_flush_all); int rockchip_drm_psr_register(struct drm_encoder *encoder, int (*psr_set)(struct drm_encoder *, bool enable)) { - struct rockchip_drm_private *drm_drv = encoder->dev->dev_private; + struct rockchip_drm_private *drm_drv; struct psr_drv *psr; if (!encoder || !psr_set) return -EINVAL; + drm_drv = encoder->dev->dev_private; + psr = kzalloc(sizeof(struct psr_drv), GFP_KERNEL); if (!psr) return -ENOMEM; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 046a6dda690a2..40904e84f883a 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -492,8 +492,10 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, if (!fbo) return -ENOMEM; - ttm_bo_get(bo); fbo->base = *bo; + fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT; + + ttm_bo_get(bo); fbo->bo = bo; /** diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c index f455f095a1468..1b014d92855b9 100644 --- a/drivers/gpu/drm/udl/udl_main.c +++ b/drivers/gpu/drm/udl/udl_main.c @@ -350,15 +350,10 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags) if (ret) goto err; - ret = drm_vblank_init(dev, 1); - if (ret) - goto err_fb; - drm_kms_helper_poll_init(dev); return 0; -err_fb: - udl_fbdev_cleanup(dev); + err: if (udl->urbs.count) udl_free_urb_list(dev); diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c index 4db62c5457482..26470c77eb6e5 100644 --- a/drivers/gpu/drm/v3d/v3d_debugfs.c +++ b/drivers/gpu/drm/v3d/v3d_debugfs.c @@ -71,10 +71,13 @@ static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused) V3D_READ(v3d_hub_reg_defs[i].reg)); } - for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) { - seq_printf(m, "%s (0x%04x): 0x%08x\n", - v3d_gca_reg_defs[i].name, v3d_gca_reg_defs[i].reg, - V3D_GCA_READ(v3d_gca_reg_defs[i].reg)); + if (v3d->ver < 41) { + for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) { + seq_printf(m, "%s (0x%04x): 0x%08x\n", + v3d_gca_reg_defs[i].name, + v3d_gca_reg_defs[i].reg, + V3D_GCA_READ(v3d_gca_reg_defs[i].reg)); + } } for (core = 0; core < v3d->cores; core++) { diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index ca5aa7fba7694..f4d8a730e821b 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -216,6 +216,12 @@ static int vc4_atomic_commit(struct drm_device *dev, return 0; } + /* We know for sure we don't want an async update here. Set + * state->legacy_cursor_update to false to prevent + * drm_atomic_helper_setup_commit() from auto-completing + * commit->flip_done. + */ + state->legacy_cursor_update = false; ret = drm_atomic_helper_setup_commit(state, nonblock); if (ret) return ret; diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index a3275fa66b7b9..629f40424bbaa 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -322,6 +322,7 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) if (vc4_state->is_unity) vc4_state->x_scaling[0] = VC4_SCALING_PPF; } else { + vc4_state->is_yuv = false; vc4_state->x_scaling[1] = VC4_SCALING_NONE; vc4_state->y_scaling[1] = VC4_SCALING_NONE; } diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index cf2a18571d484..a132c37d73349 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -380,6 +380,9 @@ int vga_switcheroo_register_audio_client(struct pci_dev *pdev, mutex_unlock(&vgasr_mutex); return -EINVAL; } + /* notify if GPU has been already bound */ + if (ops->gpu_bound) + ops->gpu_bound(pdev, id); } mutex_unlock(&vgasr_mutex); diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c index aec253b441568..3cd7229b6e546 100644 --- a/drivers/hid/hid-alps.c +++ b/drivers/hid/hid-alps.c @@ -660,6 +660,20 @@ static int T4_init(struct hid_device *hdev, struct alps_dev *pri_data) return ret; } +static int alps_sp_open(struct input_dev *dev) +{ + struct hid_device *hid = input_get_drvdata(dev); + + return hid_hw_open(hid); +} + +static void alps_sp_close(struct input_dev *dev) +{ + struct hid_device *hid = input_get_drvdata(dev); + + hid_hw_close(hid); +} + static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi) { struct alps_dev *data = hid_get_drvdata(hdev); @@ -733,6 +747,10 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi) input2->id.version = input->id.version; input2->dev.parent = input->dev.parent; + input_set_drvdata(input2, hdev); + input2->open = alps_sp_open; + input2->close = alps_sp_close; + __set_bit(EV_KEY, input2->evbit); data->sp_btn_cnt = (data->sp_btn_info & 0x0F); for (i = 0; i < data->sp_btn_cnt; i++) diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c index b372854cf38d3..704049e62d58a 100644 --- a/drivers/hid/hid-hyperv.c +++ b/drivers/hid/hid-hyperv.c @@ -309,7 +309,7 @@ static void mousevsc_on_receive(struct hv_device *device, hid_input_report(input_dev->hid_device, HID_INPUT_REPORT, input_dev->input_buf, len, 1); - pm_wakeup_event(&input_dev->device->device, 0); + pm_wakeup_hard_event(&input_dev->device->device); break; default: diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index bc49909aba8e6..b7870e7e41d42 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -17,6 +17,9 @@ #ifndef HID_IDS_H_FILE #define HID_IDS_H_FILE +#define USB_VENDOR_ID_258A 0x258a +#define USB_DEVICE_ID_258A_6A88 0x6a88 + #define USB_VENDOR_ID_3M 0x0596 #define USB_DEVICE_ID_3M1968 0x0500 #define USB_DEVICE_ID_3M2256 0x0502 @@ -271,6 +274,9 @@ #define USB_VENDOR_ID_CIDC 0x1677 +#define I2C_VENDOR_ID_CIRQUE 0x0488 +#define I2C_PRODUCT_ID_CIRQUE_121F 0x121F + #define USB_VENDOR_ID_CJTOUCH 0x24b8 #define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0020 0x0020 #define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0040 0x0040 @@ -799,6 +805,7 @@ #define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7 #define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9 #define USB_DEVICE_ID_MS_POWER_COVER 0x07da +#define USB_DEVICE_ID_MS_PIXART_MOUSE 0x00cb #define USB_VENDOR_ID_MOJO 0x8282 #define USB_DEVICE_ID_RETRO_ADAPTER 0x3201 @@ -921,12 +928,19 @@ #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003 0x3003 #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008 +#define I2C_VENDOR_ID_RAYDIUM 0x2386 +#define I2C_PRODUCT_ID_RAYDIUM_4B33 0x4b33 + #define USB_VENDOR_ID_RAZER 0x1532 #define USB_DEVICE_ID_RAZER_BLADE_14 0x011D #define USB_VENDOR_ID_REALTEK 0x0bda #define USB_DEVICE_ID_REALTEK_READER 0x0152 +#define USB_VENDOR_ID_RETROUSB 0xf000 +#define USB_DEVICE_ID_RETROUSB_SNES_RETROPAD 0x0003 +#define USB_DEVICE_ID_RETROUSB_SNES_RETROPORT 0x00f1 + #define USB_VENDOR_ID_ROCCAT 0x1e7d #define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4 #define USB_DEVICE_ID_ROCCAT_ISKU 0x319c @@ -1034,6 +1048,7 @@ #define USB_VENDOR_ID_SYMBOL 0x05e0 #define USB_DEVICE_ID_SYMBOL_SCANNER_1 0x0800 #define USB_DEVICE_ID_SYMBOL_SCANNER_2 0x1300 +#define USB_DEVICE_ID_SYMBOL_SCANNER_3 0x1200 #define USB_VENDOR_ID_SYNAPTICS 0x06cb #define USB_DEVICE_ID_SYNAPTICS_TP 0x0001 @@ -1195,6 +1210,8 @@ #define USB_DEVICE_ID_PRIMAX_MOUSE_4D22 0x4d22 #define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05 #define USB_DEVICE_ID_PRIMAX_REZEL 0x4e72 +#define USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F 0x4d0f +#define USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4E22 0x4e22 #define USB_VENDOR_ID_RISO_KAGAKU 0x1294 /* Riso Kagaku Corp. */ diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index a481eaf39e887..a3916e58dbf57 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -325,6 +325,9 @@ static const struct hid_device_id hid_battery_quirks[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084), HID_BATTERY_QUIRK_IGNORE }, + { HID_USB_DEVICE(USB_VENDOR_ID_SYMBOL, + USB_DEVICE_ID_SYMBOL_SCANNER_3), + HID_BATTERY_QUIRK_IGNORE }, {} }; diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c index 1882a4ab0f29f..98b059d79bc89 100644 --- a/drivers/hid/hid-ite.c +++ b/drivers/hid/hid-ite.c @@ -42,6 +42,7 @@ static int ite_event(struct hid_device *hdev, struct hid_field *field, static const struct hid_device_id ite_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) }, + { HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) }, { } }; MODULE_DEVICE_TABLE(hid, ite_devices); diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index da954f3f4da7f..2faf5421fdd0c 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -1822,6 +1822,12 @@ static const struct hid_device_id mt_devices[] = { MT_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT, USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) }, + /* Cirque devices */ + { .driver_data = MT_CLS_WIN_8_DUAL, + HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, + I2C_VENDOR_ID_CIRQUE, + I2C_PRODUCT_ID_CIRQUE_121F) }, + /* CJTouch panels */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_CJTOUCH, diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 249d49b6b16c7..77316f022c5ae 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -106,7 +106,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT }, - { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS), HID_QUIRK_NOGET }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PIXART_MOUSE), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), HID_QUIRK_NO_INIT_REPORTS }, @@ -129,11 +129,15 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4D22), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4E22), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER), HID_QUIRK_NO_INIT_REPORTS }, + { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPAD), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, + { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD }, { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS }, diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c index e8a114157f87b..bb012bc032e02 100644 --- a/drivers/hid/hid-sensor-custom.c +++ b/drivers/hid/hid-sensor-custom.c @@ -358,7 +358,7 @@ static ssize_t show_value(struct device *dev, struct device_attribute *attr, sensor_inst->hsdev, sensor_inst->hsdev->usage, usage, report_id, - SENSOR_HUB_SYNC); + SENSOR_HUB_SYNC, false); } else if (!strncmp(name, "units", strlen("units"))) value = sensor_inst->fields[field_index].attribute.units; else if (!strncmp(name, "unit-expo", strlen("unit-expo"))) diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c index 2b63487057c25..4256fdc5cd6d5 100644 --- a/drivers/hid/hid-sensor-hub.c +++ b/drivers/hid/hid-sensor-hub.c @@ -299,7 +299,8 @@ EXPORT_SYMBOL_GPL(sensor_hub_get_feature); int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev, u32 usage_id, u32 attr_usage_id, u32 report_id, - enum sensor_hub_read_flags flag) + enum sensor_hub_read_flags flag, + bool is_signed) { struct sensor_hub_data *data = hid_get_drvdata(hsdev->hdev); unsigned long flags; @@ -331,10 +332,16 @@ int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev, &hsdev->pending.ready, HZ*5); switch (hsdev->pending.raw_size) { case 1: - ret_val = *(u8 *)hsdev->pending.raw_data; + if (is_signed) + ret_val = *(s8 *)hsdev->pending.raw_data; + else + ret_val = *(u8 *)hsdev->pending.raw_data; break; case 2: - ret_val = *(u16 *)hsdev->pending.raw_data; + if (is_signed) + ret_val = *(s16 *)hsdev->pending.raw_data; + else + ret_val = *(u16 *)hsdev->pending.raw_data; break; case 4: ret_val = *(u32 *)hsdev->pending.raw_data; diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c index 0422ec2b13d20..dc4128bfe2ca9 100644 --- a/drivers/hid/hid-steam.c +++ b/drivers/hid/hid-steam.c @@ -23,8 +23,9 @@ * In order to avoid breaking them this driver creates a layered hidraw device, * so it can detect when the client is running and then: * - it will not send any command to the controller. - * - this input device will be disabled, to avoid double input of the same + * - this input device will be removed, to avoid double input of the same * user action. + * When the client is closed, this input device will be created again. * * For additional functions, such as changing the right-pad margin or switching * the led, you can use the user-space tool at: @@ -113,7 +114,7 @@ struct steam_device { spinlock_t lock; struct hid_device *hdev, *client_hdev; struct mutex mutex; - bool client_opened, input_opened; + bool client_opened; struct input_dev __rcu *input; unsigned long quirks; struct work_struct work_connect; @@ -279,18 +280,6 @@ static void steam_set_lizard_mode(struct steam_device *steam, bool enable) } } -static void steam_update_lizard_mode(struct steam_device *steam) -{ - mutex_lock(&steam->mutex); - if (!steam->client_opened) { - if (steam->input_opened) - steam_set_lizard_mode(steam, false); - else - steam_set_lizard_mode(steam, lizard_mode); - } - mutex_unlock(&steam->mutex); -} - static int steam_input_open(struct input_dev *dev) { struct steam_device *steam = input_get_drvdata(dev); @@ -301,7 +290,6 @@ static int steam_input_open(struct input_dev *dev) return ret; mutex_lock(&steam->mutex); - steam->input_opened = true; if (!steam->client_opened && lizard_mode) steam_set_lizard_mode(steam, false); mutex_unlock(&steam->mutex); @@ -313,7 +301,6 @@ static void steam_input_close(struct input_dev *dev) struct steam_device *steam = input_get_drvdata(dev); mutex_lock(&steam->mutex); - steam->input_opened = false; if (!steam->client_opened && lizard_mode) steam_set_lizard_mode(steam, true); mutex_unlock(&steam->mutex); @@ -400,7 +387,7 @@ static int steam_battery_register(struct steam_device *steam) return 0; } -static int steam_register(struct steam_device *steam) +static int steam_input_register(struct steam_device *steam) { struct hid_device *hdev = steam->hdev; struct input_dev *input; @@ -414,17 +401,6 @@ static int steam_register(struct steam_device *steam) return 0; } - /* - * Unlikely, but getting the serial could fail, and it is not so - * important, so make up a serial number and go on. - */ - if (steam_get_serial(steam) < 0) - strlcpy(steam->serial_no, "XXXXXXXXXX", - sizeof(steam->serial_no)); - - hid_info(hdev, "Steam Controller '%s' connected", - steam->serial_no); - input = input_allocate_device(); if (!input) return -ENOMEM; @@ -492,11 +468,6 @@ static int steam_register(struct steam_device *steam) goto input_register_fail; rcu_assign_pointer(steam->input, input); - - /* ignore battery errors, we can live without it */ - if (steam->quirks & STEAM_QUIRK_WIRELESS) - steam_battery_register(steam); - return 0; input_register_fail: @@ -504,27 +475,88 @@ static int steam_register(struct steam_device *steam) return ret; } -static void steam_unregister(struct steam_device *steam) +static void steam_input_unregister(struct steam_device *steam) { struct input_dev *input; + rcu_read_lock(); + input = rcu_dereference(steam->input); + rcu_read_unlock(); + if (!input) + return; + RCU_INIT_POINTER(steam->input, NULL); + synchronize_rcu(); + input_unregister_device(input); +} + +static void steam_battery_unregister(struct steam_device *steam) +{ struct power_supply *battery; rcu_read_lock(); - input = rcu_dereference(steam->input); battery = rcu_dereference(steam->battery); rcu_read_unlock(); - if (battery) { - RCU_INIT_POINTER(steam->battery, NULL); - synchronize_rcu(); - power_supply_unregister(battery); + if (!battery) + return; + RCU_INIT_POINTER(steam->battery, NULL); + synchronize_rcu(); + power_supply_unregister(battery); +} + +static int steam_register(struct steam_device *steam) +{ + int ret; + + /* + * This function can be called several times in a row with the + * wireless adaptor, without steam_unregister() between them, because + * another client send a get_connection_status command, for example. + * The battery and serial number are set just once per device. + */ + if (!steam->serial_no[0]) { + /* + * Unlikely, but getting the serial could fail, and it is not so + * important, so make up a serial number and go on. + */ + if (steam_get_serial(steam) < 0) + strlcpy(steam->serial_no, "XXXXXXXXXX", + sizeof(steam->serial_no)); + + hid_info(steam->hdev, "Steam Controller '%s' connected", + steam->serial_no); + + /* ignore battery errors, we can live without it */ + if (steam->quirks & STEAM_QUIRK_WIRELESS) + steam_battery_register(steam); + + mutex_lock(&steam_devices_lock); + list_add(&steam->list, &steam_devices); + mutex_unlock(&steam_devices_lock); } - if (input) { - RCU_INIT_POINTER(steam->input, NULL); - synchronize_rcu(); + + mutex_lock(&steam->mutex); + if (!steam->client_opened) { + steam_set_lizard_mode(steam, lizard_mode); + ret = steam_input_register(steam); + } else { + ret = 0; + } + mutex_unlock(&steam->mutex); + + return ret; +} + +static void steam_unregister(struct steam_device *steam) +{ + steam_battery_unregister(steam); + steam_input_unregister(steam); + if (steam->serial_no[0]) { hid_info(steam->hdev, "Steam Controller '%s' disconnected", steam->serial_no); - input_unregister_device(input); + mutex_lock(&steam_devices_lock); + list_del(&steam->list); + mutex_unlock(&steam_devices_lock); + steam->serial_no[0] = 0; } } @@ -600,6 +632,9 @@ static int steam_client_ll_open(struct hid_device *hdev) mutex_lock(&steam->mutex); steam->client_opened = true; mutex_unlock(&steam->mutex); + + steam_input_unregister(steam); + return ret; } @@ -609,13 +644,13 @@ static void steam_client_ll_close(struct hid_device *hdev) mutex_lock(&steam->mutex); steam->client_opened = false; - if (steam->input_opened) - steam_set_lizard_mode(steam, false); - else - steam_set_lizard_mode(steam, lizard_mode); mutex_unlock(&steam->mutex); hid_hw_close(steam->hdev); + if (steam->connected) { + steam_set_lizard_mode(steam, lizard_mode); + steam_input_register(steam); + } } static int steam_client_ll_raw_request(struct hid_device *hdev, @@ -744,11 +779,6 @@ static int steam_probe(struct hid_device *hdev, } } - mutex_lock(&steam_devices_lock); - steam_update_lizard_mode(steam); - list_add(&steam->list, &steam_devices); - mutex_unlock(&steam_devices_lock); - return 0; hid_hw_open_fail: @@ -774,10 +804,6 @@ static void steam_remove(struct hid_device *hdev) return; } - mutex_lock(&steam_devices_lock); - list_del(&steam->list); - mutex_unlock(&steam_devices_lock); - hid_destroy_device(steam->client_hdev); steam->client_opened = false; cancel_work_sync(&steam->work_connect); @@ -792,12 +818,14 @@ static void steam_remove(struct hid_device *hdev) static void steam_do_connect_event(struct steam_device *steam, bool connected) { unsigned long flags; + bool changed; spin_lock_irqsave(&steam->lock, flags); + changed = steam->connected != connected; steam->connected = connected; spin_unlock_irqrestore(&steam->lock, flags); - if (schedule_work(&steam->work_connect) == 0) + if (changed && schedule_work(&steam->work_connect) == 0) dbg_hid("%s: connected=%d event already queued\n", __func__, connected); } @@ -1019,13 +1047,8 @@ static int steam_raw_event(struct hid_device *hdev, return 0; rcu_read_lock(); input = rcu_dereference(steam->input); - if (likely(input)) { + if (likely(input)) steam_do_input_event(steam, input, data); - } else { - dbg_hid("%s: input data without connect event\n", - __func__); - steam_do_connect_event(steam, true); - } rcu_read_unlock(); break; case STEAM_EV_CONNECT: @@ -1074,7 +1097,10 @@ static int steam_param_set_lizard_mode(const char *val, mutex_lock(&steam_devices_lock); list_for_each_entry(steam, &steam_devices, list) { - steam_update_lizard_mode(steam); + mutex_lock(&steam->mutex); + if (!steam->client_opened) + steam_set_lizard_mode(steam, lizard_mode); + mutex_unlock(&steam->mutex); } mutex_unlock(&steam_devices_lock); return 0; diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index 4e3592e7a3f72..88daa388e1f61 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -48,6 +48,7 @@ #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0) #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1) #define I2C_HID_QUIRK_NO_RUNTIME_PM BIT(2) +#define I2C_HID_QUIRK_DELAY_AFTER_SLEEP BIT(3) /* flags */ #define I2C_HID_STARTED 0 @@ -157,6 +158,8 @@ struct i2c_hid { bool irq_wake_enabled; struct mutex reset_lock; + + unsigned long sleep_delay; }; static const struct i2c_hid_quirks { @@ -171,6 +174,8 @@ static const struct i2c_hid_quirks { { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288, I2C_HID_QUIRK_NO_IRQ_AFTER_RESET | I2C_HID_QUIRK_NO_RUNTIME_PM }, + { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_4B33, + I2C_HID_QUIRK_DELAY_AFTER_SLEEP }, { 0, 0 } }; @@ -386,6 +391,7 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state) { struct i2c_hid *ihid = i2c_get_clientdata(client); int ret; + unsigned long now, delay; i2c_hid_dbg(ihid, "%s\n", __func__); @@ -403,9 +409,22 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state) goto set_pwr_exit; } + if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP && + power_state == I2C_HID_PWR_ON) { + now = jiffies; + if (time_after(ihid->sleep_delay, now)) { + delay = jiffies_to_usecs(ihid->sleep_delay - now); + usleep_range(delay, delay + 1); + } + } + ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state, 0, NULL, 0, NULL, 0); + if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP && + power_state == I2C_HID_PWR_SLEEP) + ihid->sleep_delay = jiffies + msecs_to_jiffies(20); + if (ret) dev_err(&client->dev, "failed to change power setting.\n"); diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c index 3c55073136064..840634e0f1e3c 100644 --- a/drivers/hid/uhid.c +++ b/drivers/hid/uhid.c @@ -12,6 +12,7 @@ #include #include +#include #include #include #include @@ -496,12 +497,13 @@ static int uhid_dev_create2(struct uhid_device *uhid, goto err_free; } - len = min(sizeof(hid->name), sizeof(ev->u.create2.name)); - strlcpy(hid->name, ev->u.create2.name, len); - len = min(sizeof(hid->phys), sizeof(ev->u.create2.phys)); - strlcpy(hid->phys, ev->u.create2.phys, len); - len = min(sizeof(hid->uniq), sizeof(ev->u.create2.uniq)); - strlcpy(hid->uniq, ev->u.create2.uniq, len); + /* @hid is zero-initialized, strncpy() is correct, strlcpy() not */ + len = min(sizeof(hid->name), sizeof(ev->u.create2.name)) - 1; + strncpy(hid->name, ev->u.create2.name, len); + len = min(sizeof(hid->phys), sizeof(ev->u.create2.phys)) - 1; + strncpy(hid->phys, ev->u.create2.phys, len); + len = min(sizeof(hid->uniq), sizeof(ev->u.create2.uniq)) - 1; + strncpy(hid->uniq, ev->u.create2.uniq, len); hid->ll_driver = &uhid_hid_driver; hid->bus = ev->u.create2.bus; @@ -722,6 +724,17 @@ static ssize_t uhid_char_write(struct file *file, const char __user *buffer, switch (uhid->input_buf.type) { case UHID_CREATE: + /* + * 'struct uhid_create_req' contains a __user pointer which is + * copied from, so it's unsafe to allow this with elevated + * privileges (e.g. from a setuid binary) or via kernel_write(). + */ + if (file->f_cred != current_cred() || uaccess_kernel()) { + pr_err_once("UHID_CREATE from different security context by process %d (%s), this is not allowed.\n", + task_tgid_vnr(current), current->comm); + ret = -EACCES; + goto unlock; + } ret = uhid_dev_create(uhid, &uhid->input_buf); break; case UHID_CREATE2: diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 23872d08308cd..a746017fac170 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -512,14 +512,24 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, if (cmd == HIDIOCGCOLLECTIONINDEX) { if (uref->usage_index >= field->maxusage) goto inval; + uref->usage_index = + array_index_nospec(uref->usage_index, + field->maxusage); } else if (uref->usage_index >= field->report_count) goto inval; } - if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) && - (uref_multi->num_values > HID_MAX_MULTI_USAGES || - uref->usage_index + uref_multi->num_values > field->report_count)) - goto inval; + if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) { + if (uref_multi->num_values > HID_MAX_MULTI_USAGES || + uref->usage_index + uref_multi->num_values > + field->report_count) + goto inval; + + uref->usage_index = + array_index_nospec(uref->usage_index, + field->report_count - + uref_multi->num_values); + } switch (cmd) { case HIDIOCGUSAGE: diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index e0a06be5ef5c0..5dd3a8245f0fd 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -3335,6 +3335,7 @@ static void wacom_setup_intuos(struct wacom_wac *wacom_wac) void wacom_setup_device_quirks(struct wacom *wacom) { + struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct wacom_features *features = &wacom->wacom_wac.features; /* The pen and pad share the same interface on most devices */ @@ -3464,6 +3465,24 @@ void wacom_setup_device_quirks(struct wacom *wacom) if (features->type == REMOTE) features->device_type |= WACOM_DEVICETYPE_WL_MONITOR; + + /* HID descriptor for DTK-2451 / DTH-2452 claims to report lots + * of things it shouldn't. Lets fix up the damage... + */ + if (wacom->hdev->product == 0x382 || wacom->hdev->product == 0x37d) { + features->quirks &= ~WACOM_QUIRK_TOOLSERIAL; + __clear_bit(BTN_TOOL_BRUSH, wacom_wac->pen_input->keybit); + __clear_bit(BTN_TOOL_PENCIL, wacom_wac->pen_input->keybit); + __clear_bit(BTN_TOOL_AIRBRUSH, wacom_wac->pen_input->keybit); + __clear_bit(ABS_Z, wacom_wac->pen_input->absbit); + __clear_bit(ABS_DISTANCE, wacom_wac->pen_input->absbit); + __clear_bit(ABS_TILT_X, wacom_wac->pen_input->absbit); + __clear_bit(ABS_TILT_Y, wacom_wac->pen_input->absbit); + __clear_bit(ABS_WHEEL, wacom_wac->pen_input->absbit); + __clear_bit(ABS_MISC, wacom_wac->pen_input->absbit); + __clear_bit(MSC_SERIAL, wacom_wac->pen_input->mscbit); + __clear_bit(EV_MSC, wacom_wac->pen_input->evbit); + } } int wacom_setup_pen_input_capabilities(struct input_dev *input_dev, diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig index 97954f575c3f6..1c1a2514d6f31 100644 --- a/drivers/hv/Kconfig +++ b/drivers/hv/Kconfig @@ -4,7 +4,7 @@ menu "Microsoft Hyper-V guest support" config HYPERV tristate "Microsoft Hyper-V client drivers" - depends on X86 && ACPI && PCI && X86_LOCAL_APIC && HYPERVISOR_GUEST + depends on X86 && ACPI && X86_LOCAL_APIC && HYPERVISOR_GUEST select PARAVIRT help Select this option to run Linux as a Hyper-V client operating diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 741857d80da11..2f164bd746874 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -482,6 +482,14 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, } wait_for_completion(&msginfo->waitevent); + if (msginfo->response.gpadl_created.creation_status != 0) { + pr_err("Failed to establish GPADL: err = 0x%x\n", + msginfo->response.gpadl_created.creation_status); + + ret = -EDQUOT; + goto cleanup; + } + if (channel->rescind) { ret = -ENODEV; goto cleanup; diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 0f0e091c117c6..16eb9b3f1cb1b 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -447,61 +447,16 @@ void vmbus_free_channels(void) } } -/* - * vmbus_process_offer - Process the offer by creating a channel/device - * associated with this offer - */ -static void vmbus_process_offer(struct vmbus_channel *newchannel) +/* Note: the function can run concurrently for primary/sub channels. */ +static void vmbus_add_channel_work(struct work_struct *work) { - struct vmbus_channel *channel; - bool fnew = true; + struct vmbus_channel *newchannel = + container_of(work, struct vmbus_channel, add_channel_work); + struct vmbus_channel *primary_channel = newchannel->primary_channel; unsigned long flags; u16 dev_type; int ret; - /* Make sure this is a new offer */ - mutex_lock(&vmbus_connection.channel_mutex); - - /* - * Now that we have acquired the channel_mutex, - * we can release the potentially racing rescind thread. - */ - atomic_dec(&vmbus_connection.offer_in_progress); - - list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { - if (!uuid_le_cmp(channel->offermsg.offer.if_type, - newchannel->offermsg.offer.if_type) && - !uuid_le_cmp(channel->offermsg.offer.if_instance, - newchannel->offermsg.offer.if_instance)) { - fnew = false; - break; - } - } - - if (fnew) - list_add_tail(&newchannel->listentry, - &vmbus_connection.chn_list); - - mutex_unlock(&vmbus_connection.channel_mutex); - - if (!fnew) { - /* - * Check to see if this is a sub-channel. - */ - if (newchannel->offermsg.offer.sub_channel_index != 0) { - /* - * Process the sub-channel. - */ - newchannel->primary_channel = channel; - spin_lock_irqsave(&channel->lock, flags); - list_add_tail(&newchannel->sc_list, &channel->sc_list); - channel->num_sc++; - spin_unlock_irqrestore(&channel->lock, flags); - } else { - goto err_free_chan; - } - } - dev_type = hv_get_dev_type(newchannel); init_vp_index(newchannel, dev_type); @@ -519,27 +474,26 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) /* * This state is used to indicate a successful open * so that when we do close the channel normally, we - * can cleanup properly + * can cleanup properly. */ newchannel->state = CHANNEL_OPEN_STATE; - if (!fnew) { - struct hv_device *dev - = newchannel->primary_channel->device_obj; + if (primary_channel != NULL) { + /* newchannel is a sub-channel. */ + struct hv_device *dev = primary_channel->device_obj; if (vmbus_add_channel_kobj(dev, newchannel)) - goto err_free_chan; + goto err_deq_chan; + + if (primary_channel->sc_creation_callback != NULL) + primary_channel->sc_creation_callback(newchannel); - if (channel->sc_creation_callback != NULL) - channel->sc_creation_callback(newchannel); newchannel->probe_done = true; return; } /* - * Start the process of binding this offer to the driver - * We need to set the DeviceObject field before calling - * vmbus_child_dev_add() + * Start the process of binding the primary channel to the driver */ newchannel->device_obj = vmbus_device_create( &newchannel->offermsg.offer.if_type, @@ -568,13 +522,28 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) err_deq_chan: mutex_lock(&vmbus_connection.channel_mutex); - list_del(&newchannel->listentry); + + /* + * We need to set the flag, otherwise + * vmbus_onoffer_rescind() can be blocked. + */ + newchannel->probe_done = true; + + if (primary_channel == NULL) { + list_del(&newchannel->listentry); + } else { + spin_lock_irqsave(&primary_channel->lock, flags); + list_del(&newchannel->sc_list); + spin_unlock_irqrestore(&primary_channel->lock, flags); + } + mutex_unlock(&vmbus_connection.channel_mutex); if (newchannel->target_cpu != get_cpu()) { put_cpu(); smp_call_function_single(newchannel->target_cpu, - percpu_channel_deq, newchannel, true); + percpu_channel_deq, + newchannel, true); } else { percpu_channel_deq(newchannel); put_cpu(); @@ -582,14 +551,104 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) vmbus_release_relid(newchannel->offermsg.child_relid); -err_free_chan: free_channel(newchannel); } +/* + * vmbus_process_offer - Process the offer by creating a channel/device + * associated with this offer + */ +static void vmbus_process_offer(struct vmbus_channel *newchannel) +{ + struct vmbus_channel *channel; + struct workqueue_struct *wq; + unsigned long flags; + bool fnew = true; + + mutex_lock(&vmbus_connection.channel_mutex); + + /* + * Now that we have acquired the channel_mutex, + * we can release the potentially racing rescind thread. + */ + atomic_dec(&vmbus_connection.offer_in_progress); + + list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { + if (!uuid_le_cmp(channel->offermsg.offer.if_type, + newchannel->offermsg.offer.if_type) && + !uuid_le_cmp(channel->offermsg.offer.if_instance, + newchannel->offermsg.offer.if_instance)) { + fnew = false; + break; + } + } + + if (fnew) + list_add_tail(&newchannel->listentry, + &vmbus_connection.chn_list); + else { + /* + * Check to see if this is a valid sub-channel. + */ + if (newchannel->offermsg.offer.sub_channel_index == 0) { + mutex_unlock(&vmbus_connection.channel_mutex); + /* + * Don't call free_channel(), because newchannel->kobj + * is not initialized yet. + */ + kfree(newchannel); + WARN_ON_ONCE(1); + return; + } + /* + * Process the sub-channel. + */ + newchannel->primary_channel = channel; + spin_lock_irqsave(&channel->lock, flags); + list_add_tail(&newchannel->sc_list, &channel->sc_list); + spin_unlock_irqrestore(&channel->lock, flags); + } + + mutex_unlock(&vmbus_connection.channel_mutex); + + /* + * vmbus_process_offer() mustn't call channel->sc_creation_callback() + * directly for sub-channels, because sc_creation_callback() -> + * vmbus_open() may never get the host's response to the + * OPEN_CHANNEL message (the host may rescind a channel at any time, + * e.g. in the case of hot removing a NIC), and vmbus_onoffer_rescind() + * may not wake up the vmbus_open() as it's blocked due to a non-zero + * vmbus_connection.offer_in_progress, and finally we have a deadlock. + * + * The above is also true for primary channels, if the related device + * drivers use sync probing mode by default. + * + * And, usually the handling of primary channels and sub-channels can + * depend on each other, so we should offload them to different + * workqueues to avoid possible deadlock, e.g. in sync-probing mode, + * NIC1's netvsc_subchan_work() can race with NIC2's netvsc_probe() -> + * rtnl_lock(), and causes deadlock: the former gets the rtnl_lock + * and waits for all the sub-channels to appear, but the latter + * can't get the rtnl_lock and this blocks the handling of + * sub-channels. + */ + INIT_WORK(&newchannel->add_channel_work, vmbus_add_channel_work); + wq = fnew ? vmbus_connection.handle_primary_chan_wq : + vmbus_connection.handle_sub_chan_wq; + queue_work(wq, &newchannel->add_channel_work); +} + /* * We use this state to statically distribute the channel interrupt load. */ static int next_numa_node_id; +/* + * init_vp_index() accesses global variables like next_numa_node_id, and + * it can run concurrently for primary channels and sub-channels: see + * vmbus_process_offer(), so we need the lock to protect the global + * variables. + */ +static DEFINE_SPINLOCK(bind_channel_to_cpu_lock); /* * Starting with Win8, we can statically distribute the incoming @@ -606,16 +665,18 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type) bool perf_chn = vmbus_devs[dev_type].perf_device; struct vmbus_channel *primary = channel->primary_channel; int next_node; - struct cpumask available_mask; + cpumask_var_t available_mask; struct cpumask *alloced_mask; if ((vmbus_proto_version == VERSION_WS2008) || - (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) { + (vmbus_proto_version == VERSION_WIN7) || (!perf_chn) || + !alloc_cpumask_var(&available_mask, GFP_KERNEL)) { /* * Prior to win8, all channel interrupts are * delivered on cpu 0. * Also if the channel is not a performance critical * channel, bind it to cpu 0. + * In case alloc_cpumask_var() fails, bind it to cpu 0. */ channel->numa_node = 0; channel->target_cpu = 0; @@ -623,6 +684,8 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type) return; } + spin_lock(&bind_channel_to_cpu_lock); + /* * Based on the channel affinity policy, we will assign the NUMA * nodes. @@ -653,7 +716,7 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type) cpumask_clear(alloced_mask); } - cpumask_xor(&available_mask, alloced_mask, + cpumask_xor(available_mask, alloced_mask, cpumask_of_node(primary->numa_node)); cur_cpu = -1; @@ -671,10 +734,10 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type) } while (true) { - cur_cpu = cpumask_next(cur_cpu, &available_mask); + cur_cpu = cpumask_next(cur_cpu, available_mask); if (cur_cpu >= nr_cpu_ids) { cur_cpu = -1; - cpumask_copy(&available_mask, + cpumask_copy(available_mask, cpumask_of_node(primary->numa_node)); continue; } @@ -704,6 +767,10 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type) channel->target_cpu = cur_cpu; channel->target_vp = hv_cpu_number_to_vp_number(cur_cpu); + + spin_unlock(&bind_channel_to_cpu_lock); + + free_cpumask_var(available_mask); } static void vmbus_wait_for_unload(void) diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index f4d08c8ac7f8f..4fe117b761ce0 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c @@ -190,6 +190,20 @@ int vmbus_connect(void) goto cleanup; } + vmbus_connection.handle_primary_chan_wq = + create_workqueue("hv_pri_chan"); + if (!vmbus_connection.handle_primary_chan_wq) { + ret = -ENOMEM; + goto cleanup; + } + + vmbus_connection.handle_sub_chan_wq = + create_workqueue("hv_sub_chan"); + if (!vmbus_connection.handle_sub_chan_wq) { + ret = -ENOMEM; + goto cleanup; + } + INIT_LIST_HEAD(&vmbus_connection.chn_msg_list); spin_lock_init(&vmbus_connection.channelmsg_lock); @@ -280,10 +294,14 @@ void vmbus_disconnect(void) */ vmbus_initiate_unload(false); - if (vmbus_connection.work_queue) { - drain_workqueue(vmbus_connection.work_queue); + if (vmbus_connection.handle_sub_chan_wq) + destroy_workqueue(vmbus_connection.handle_sub_chan_wq); + + if (vmbus_connection.handle_primary_chan_wq) + destroy_workqueue(vmbus_connection.handle_primary_chan_wq); + + if (vmbus_connection.work_queue) destroy_workqueue(vmbus_connection.work_queue); - } if (vmbus_connection.int_page) { free_pages((unsigned long)vmbus_connection.int_page, 0); diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index b1b7880827931..d2a735ac9ba1d 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -888,12 +888,14 @@ static unsigned long handle_pg_range(unsigned long pg_start, pfn_cnt -= pgs_ol; /* * Check if the corresponding memory block is already - * online by checking its last previously backed page. - * In case it is we need to bring rest (which was not - * backed previously) online too. + * online. It is possible to observe struct pages still + * being uninitialized here so check section instead. + * In case the section is online we need to bring the + * rest of pfns (which were not backed previously) + * online too. */ if (start_pfn > has->start_pfn && - !PageReserved(pfn_to_page(start_pfn - 1))) + online_section_nr(pfn_to_section_nr(start_pfn))) hv_bring_pgs_online(has, start_pfn, pgs_ol); } diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index 72eaba3d50fc2..87d3d7da78f87 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -335,7 +335,14 @@ struct vmbus_connection { struct list_head chn_list; struct mutex channel_mutex; + /* + * An offer message is handled first on the work_queue, and then + * is further handled on handle_primary_chan_wq or + * handle_sub_chan_wq. + */ struct workqueue_struct *work_queue; + struct workqueue_struct *handle_primary_chan_wq; + struct workqueue_struct *handle_sub_chan_wq; }; diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index 3e90eb91db45a..6cb45f256107e 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c @@ -164,26 +164,25 @@ hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi, } /* Get various debug metrics for the specified ring buffer. */ -void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, - struct hv_ring_buffer_debug_info *debug_info) +int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, + struct hv_ring_buffer_debug_info *debug_info) { u32 bytes_avail_towrite; u32 bytes_avail_toread; - if (ring_info->ring_buffer) { - hv_get_ringbuffer_availbytes(ring_info, - &bytes_avail_toread, - &bytes_avail_towrite); - - debug_info->bytes_avail_toread = bytes_avail_toread; - debug_info->bytes_avail_towrite = bytes_avail_towrite; - debug_info->current_read_index = - ring_info->ring_buffer->read_index; - debug_info->current_write_index = - ring_info->ring_buffer->write_index; - debug_info->current_interrupt_mask = - ring_info->ring_buffer->interrupt_mask; - } + if (!ring_info->ring_buffer) + return -EINVAL; + + hv_get_ringbuffer_availbytes(ring_info, + &bytes_avail_toread, + &bytes_avail_towrite); + debug_info->bytes_avail_toread = bytes_avail_toread; + debug_info->bytes_avail_towrite = bytes_avail_towrite; + debug_info->current_read_index = ring_info->ring_buffer->read_index; + debug_info->current_write_index = ring_info->ring_buffer->write_index; + debug_info->current_interrupt_mask + = ring_info->ring_buffer->interrupt_mask; + return 0; } EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo); diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index c71cc857b649d..9aa18f387a346 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -313,10 +313,16 @@ static ssize_t out_intr_mask_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info outbound; + int ret; if (!hv_dev->channel) return -ENODEV; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, + &outbound); + if (ret < 0) + return ret; + return sprintf(buf, "%d\n", outbound.current_interrupt_mask); } static DEVICE_ATTR_RO(out_intr_mask); @@ -326,10 +332,15 @@ static ssize_t out_read_index_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info outbound; + int ret; if (!hv_dev->channel) return -ENODEV; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, + &outbound); + if (ret < 0) + return ret; return sprintf(buf, "%d\n", outbound.current_read_index); } static DEVICE_ATTR_RO(out_read_index); @@ -340,10 +351,15 @@ static ssize_t out_write_index_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info outbound; + int ret; if (!hv_dev->channel) return -ENODEV; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, + &outbound); + if (ret < 0) + return ret; return sprintf(buf, "%d\n", outbound.current_write_index); } static DEVICE_ATTR_RO(out_write_index); @@ -354,10 +370,15 @@ static ssize_t out_read_bytes_avail_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info outbound; + int ret; if (!hv_dev->channel) return -ENODEV; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, + &outbound); + if (ret < 0) + return ret; return sprintf(buf, "%d\n", outbound.bytes_avail_toread); } static DEVICE_ATTR_RO(out_read_bytes_avail); @@ -368,10 +389,15 @@ static ssize_t out_write_bytes_avail_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info outbound; + int ret; if (!hv_dev->channel) return -ENODEV; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, + &outbound); + if (ret < 0) + return ret; return sprintf(buf, "%d\n", outbound.bytes_avail_towrite); } static DEVICE_ATTR_RO(out_write_bytes_avail); @@ -381,10 +407,15 @@ static ssize_t in_intr_mask_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info inbound; + int ret; if (!hv_dev->channel) return -ENODEV; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + if (ret < 0) + return ret; + return sprintf(buf, "%d\n", inbound.current_interrupt_mask); } static DEVICE_ATTR_RO(in_intr_mask); @@ -394,10 +425,15 @@ static ssize_t in_read_index_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info inbound; + int ret; if (!hv_dev->channel) return -ENODEV; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + if (ret < 0) + return ret; + return sprintf(buf, "%d\n", inbound.current_read_index); } static DEVICE_ATTR_RO(in_read_index); @@ -407,10 +443,15 @@ static ssize_t in_write_index_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info inbound; + int ret; if (!hv_dev->channel) return -ENODEV; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + if (ret < 0) + return ret; + return sprintf(buf, "%d\n", inbound.current_write_index); } static DEVICE_ATTR_RO(in_write_index); @@ -421,10 +462,15 @@ static ssize_t in_read_bytes_avail_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info inbound; + int ret; if (!hv_dev->channel) return -ENODEV; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + if (ret < 0) + return ret; + return sprintf(buf, "%d\n", inbound.bytes_avail_toread); } static DEVICE_ATTR_RO(in_read_bytes_avail); @@ -435,10 +481,15 @@ static ssize_t in_write_bytes_avail_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info inbound; + int ret; if (!hv_dev->channel) return -ENODEV; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + if (ret < 0) + return ret; + return sprintf(buf, "%d\n", inbound.bytes_avail_towrite); } static DEVICE_ATTR_RO(in_write_bytes_avail); diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index 33d51281272bb..fcdbac4a56e39 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -635,8 +635,10 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, if (info[i]->config[j] & HWMON_T_INPUT) { err = hwmon_thermal_add_sensor(dev, hwdev, j); - if (err) - goto free_device; + if (err) { + device_unregister(hdev); + goto ida_remove; + } } } } @@ -644,8 +646,6 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, return hdev; -free_device: - device_unregister(hdev); free_hwmon: kfree(hwdev); ida_remove: diff --git a/drivers/hwmon/ibmpowernv.c b/drivers/hwmon/ibmpowernv.c index 83472808c8163..64d05edff1304 100644 --- a/drivers/hwmon/ibmpowernv.c +++ b/drivers/hwmon/ibmpowernv.c @@ -181,7 +181,7 @@ static ssize_t show_label(struct device *dev, struct device_attribute *devattr, return sprintf(buf, "%s\n", sdata->label); } -static int __init get_logical_cpu(int hwcpu) +static int get_logical_cpu(int hwcpu) { int cpu; @@ -192,9 +192,8 @@ static int __init get_logical_cpu(int hwcpu) return -ENOENT; } -static void __init make_sensor_label(struct device_node *np, - struct sensor_data *sdata, - const char *label) +static void make_sensor_label(struct device_node *np, + struct sensor_data *sdata, const char *label) { u32 id; size_t n; diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c index 71d3445ba869c..07ee19573b3f0 100644 --- a/drivers/hwmon/ina2xx.c +++ b/drivers/hwmon/ina2xx.c @@ -274,7 +274,7 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg, break; case INA2XX_CURRENT: /* signed register, result in mA */ - val = regval * data->current_lsb_uA; + val = (s16)regval * data->current_lsb_uA; val = DIV_ROUND_CLOSEST(val, 1000); break; case INA2XX_CALIBRATION: @@ -491,7 +491,7 @@ static int ina2xx_probe(struct i2c_client *client, } data->groups[group++] = &ina2xx_group; - if (id->driver_data == ina226) + if (chip == ina226) data->groups[group++] = &ina226_group; hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, @@ -500,7 +500,7 @@ static int ina2xx_probe(struct i2c_client *client, return PTR_ERR(hwmon_dev); dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n", - id->name, data->rshunt); + client->name, data->rshunt); return 0; } diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c index de46577c7d5a1..d8fa4bea4bc84 100644 --- a/drivers/hwmon/mlxreg-fan.c +++ b/drivers/hwmon/mlxreg-fan.c @@ -51,7 +51,7 @@ */ #define MLXREG_FAN_GET_RPM(rval, d, s) (DIV_ROUND_CLOSEST(15000000 * 100, \ ((rval) + (s)) * (d))) -#define MLXREG_FAN_GET_FAULT(val, mask) (!!((val) ^ (mask))) +#define MLXREG_FAN_GET_FAULT(val, mask) (!((val) ^ (mask))) #define MLXREG_FAN_PWM_DUTY2STATE(duty) (DIV_ROUND_CLOSEST((duty) * \ MLXREG_FAN_MAX_STATE, \ MLXREG_FAN_MAX_DUTY)) diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c index 7718e58dbda54..7688dab32f6e6 100644 --- a/drivers/hwmon/pmbus/pmbus.c +++ b/drivers/hwmon/pmbus/pmbus.c @@ -118,6 +118,8 @@ static int pmbus_identify(struct i2c_client *client, } else { info->pages = 1; } + + pmbus_clear_faults(client); } if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) { diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index 82c3754e21e33..2e2b5851139c2 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -2015,7 +2015,10 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data, if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK)) client->flags |= I2C_CLIENT_PEC; - pmbus_clear_faults(client); + if (data->info->pages) + pmbus_clear_faults(client); + else + pmbus_clear_fault_page(client, -1); if (info->identify) { ret = (*info->identify)(client, info); diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c index 7838af58f92d5..9d611dd268e1e 100644 --- a/drivers/hwmon/pwm-fan.c +++ b/drivers/hwmon/pwm-fan.c @@ -290,9 +290,19 @@ static int pwm_fan_remove(struct platform_device *pdev) static int pwm_fan_suspend(struct device *dev) { struct pwm_fan_ctx *ctx = dev_get_drvdata(dev); + struct pwm_args args; + int ret; + + pwm_get_args(ctx->pwm, &args); + + if (ctx->pwm_value) { + ret = pwm_config(ctx->pwm, 0, args.period); + if (ret < 0) + return ret; - if (ctx->pwm_value) pwm_disable(ctx->pwm); + } + return 0; } diff --git a/drivers/hwmon/raspberrypi-hwmon.c b/drivers/hwmon/raspberrypi-hwmon.c index be5ba46908953..0d0457245e7d0 100644 --- a/drivers/hwmon/raspberrypi-hwmon.c +++ b/drivers/hwmon/raspberrypi-hwmon.c @@ -115,7 +115,6 @@ static int rpi_hwmon_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rpi_hwmon_data *data; - int ret; data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) @@ -124,11 +123,6 @@ static int rpi_hwmon_probe(struct platform_device *pdev) /* Parent driver assure that firmware is correct */ data->fw = dev_get_drvdata(dev->parent); - /* Init throttled */ - ret = rpi_firmware_property(data->fw, RPI_FIRMWARE_GET_THROTTLED, - &data->last_throttled, - sizeof(data->last_throttled)); - data->hwmon_dev = devm_hwmon_device_register_with_info(dev, "rpi_volt", data, &rpi_chip_info, diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c index 49276bbdac3dd..1bb80f992aa86 100644 --- a/drivers/hwmon/w83795.c +++ b/drivers/hwmon/w83795.c @@ -1691,7 +1691,7 @@ store_sf_setup(struct device *dev, struct device_attribute *attr, * somewhere else in the code */ #define SENSOR_ATTR_TEMP(index) { \ - SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 4 ? S_IWUSR : 0), \ + SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 5 ? S_IWUSR : 0), \ show_temp_mode, store_temp_mode, NOT_USED, index - 1), \ SENSOR_ATTR_2(temp##index##_input, S_IRUGO, show_temp, \ NULL, TEMP_READ, index - 1), \ diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c index 306119eaf16a6..0dad8626bcfbf 100644 --- a/drivers/hwtracing/coresight/coresight-etb10.c +++ b/drivers/hwtracing/coresight/coresight-etb10.c @@ -147,6 +147,10 @@ static int etb_enable(struct coresight_device *csdev, u32 mode) if (val == CS_MODE_PERF) return -EBUSY; + /* Don't let perf disturb sysFS sessions */ + if (val == CS_MODE_SYSFS && mode == CS_MODE_PERF) + return -EBUSY; + /* Nothing to do, the tracer is already enabled. */ if (val == CS_MODE_SYSFS) goto out; diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index d293e55553bd6..ba7aaf421f36c 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c @@ -1423,7 +1423,8 @@ nr_pages_store(struct device *dev, struct device_attribute *attr, if (!end) break; - len -= end - p; + /* consume the number and the following comma, hence +1 */ + len -= end - p + 1; p = end + 1; } while (len); diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig index 723e2d90083d6..752dd66742bfd 100644 --- a/drivers/hwtracing/stm/Kconfig +++ b/drivers/hwtracing/stm/Kconfig @@ -11,6 +11,35 @@ config STM if STM +config STM_PROTO_BASIC + tristate "Basic STM framing protocol driver" + default CONFIG_STM + help + This is a simple framing protocol for sending data over STM + devices. This was the protocol that the STM framework used + exclusively until the MIPI SyS-T support was added. Use this + driver for compatibility with your existing STM setup. + + The receiving side only needs to be able to decode the MIPI + STP protocol in order to extract the data. + + If you want to be able to use the basic protocol or want the + backwards compatibility for your existing setup, say Y. + +config STM_PROTO_SYS_T + tristate "MIPI SyS-T STM framing protocol driver" + default CONFIG_STM + help + This is an implementation of MIPI SyS-T protocol to be used + over the STP transport. In addition to the data payload, it + also carries additional metadata for time correlation, better + means of trace source identification, etc. + + The receiving side must be able to decode this protocol in + addition to the MIPI STP, in order to extract the data. + + If you don't know what this is, say N. + config STM_DUMMY tristate "Dummy STM driver" help diff --git a/drivers/hwtracing/stm/Makefile b/drivers/hwtracing/stm/Makefile index effc19e5190f4..1692fcd292779 100644 --- a/drivers/hwtracing/stm/Makefile +++ b/drivers/hwtracing/stm/Makefile @@ -3,6 +3,12 @@ obj-$(CONFIG_STM) += stm_core.o stm_core-y := core.o policy.o +obj-$(CONFIG_STM_PROTO_BASIC) += stm_p_basic.o +obj-$(CONFIG_STM_PROTO_SYS_T) += stm_p_sys-t.o + +stm_p_basic-y := p_basic.o +stm_p_sys-t-y := p_sys-t.o + obj-$(CONFIG_STM_DUMMY) += dummy_stm.o obj-$(CONFIG_STM_SOURCE_CONSOLE) += stm_console.o diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c index 10bcb5d73f90e..93ce3aa740a92 100644 --- a/drivers/hwtracing/stm/core.c +++ b/drivers/hwtracing/stm/core.c @@ -293,15 +293,15 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width, if (width > stm->data->sw_nchannels) return -EINVAL; - if (policy_node) { - stp_policy_node_get_ranges(policy_node, - &midx, &mend, &cidx, &cend); - } else { - midx = stm->data->sw_start; - cidx = 0; - mend = stm->data->sw_end; - cend = stm->data->sw_nchannels - 1; - } + /* We no longer accept policy_node==NULL here */ + if (WARN_ON_ONCE(!policy_node)) + return -EINVAL; + + /* + * Also, the caller holds reference to policy_node, so it won't + * disappear on us. + */ + stp_policy_node_get_ranges(policy_node, &midx, &mend, &cidx, &cend); spin_lock(&stm->mc_lock); spin_lock(&output->lock); @@ -316,11 +316,26 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width, output->master = midx; output->channel = cidx; output->nr_chans = width; + if (stm->pdrv->output_open) { + void *priv = stp_policy_node_priv(policy_node); + + if (WARN_ON_ONCE(!priv)) + goto unlock; + + /* configfs subsys mutex is held by the caller */ + ret = stm->pdrv->output_open(priv, output); + if (ret) + goto unlock; + } + stm_output_claim(stm, output); dev_dbg(&stm->dev, "assigned %u:%u (+%u)\n", midx, cidx, width); ret = 0; unlock: + if (ret) + output->nr_chans = 0; + spin_unlock(&output->lock); spin_unlock(&stm->mc_lock); @@ -333,6 +348,8 @@ static void stm_output_free(struct stm_device *stm, struct stm_output *output) spin_lock(&output->lock); if (output->nr_chans) stm_output_disclaim(stm, output); + if (stm->pdrv && stm->pdrv->output_close) + stm->pdrv->output_close(output); spin_unlock(&output->lock); spin_unlock(&stm->mc_lock); } @@ -349,6 +366,127 @@ static int major_match(struct device *dev, const void *data) return MAJOR(dev->devt) == major; } +/* + * Framing protocol management + * Modules can implement STM protocol drivers and (un-)register them + * with the STM class framework. + */ +static struct list_head stm_pdrv_head; +static struct mutex stm_pdrv_mutex; + +struct stm_pdrv_entry { + struct list_head entry; + const struct stm_protocol_driver *pdrv; + const struct config_item_type *node_type; +}; + +static const struct stm_pdrv_entry * +__stm_lookup_protocol(const char *name) +{ + struct stm_pdrv_entry *pe; + + /* + * If no name is given (NULL or ""), fall back to "p_basic". + */ + if (!name || !*name) + name = "p_basic"; + + list_for_each_entry(pe, &stm_pdrv_head, entry) { + if (!strcmp(name, pe->pdrv->name)) + return pe; + } + + return NULL; +} + +int stm_register_protocol(const struct stm_protocol_driver *pdrv) +{ + struct stm_pdrv_entry *pe = NULL; + int ret = -ENOMEM; + + mutex_lock(&stm_pdrv_mutex); + + if (__stm_lookup_protocol(pdrv->name)) { + ret = -EEXIST; + goto unlock; + } + + pe = kzalloc(sizeof(*pe), GFP_KERNEL); + if (!pe) + goto unlock; + + if (pdrv->policy_attr) { + pe->node_type = get_policy_node_type(pdrv->policy_attr); + if (!pe->node_type) + goto unlock; + } + + list_add_tail(&pe->entry, &stm_pdrv_head); + pe->pdrv = pdrv; + + ret = 0; +unlock: + mutex_unlock(&stm_pdrv_mutex); + + if (ret) + kfree(pe); + + return ret; +} +EXPORT_SYMBOL_GPL(stm_register_protocol); + +void stm_unregister_protocol(const struct stm_protocol_driver *pdrv) +{ + struct stm_pdrv_entry *pe, *iter; + + mutex_lock(&stm_pdrv_mutex); + + list_for_each_entry_safe(pe, iter, &stm_pdrv_head, entry) { + if (pe->pdrv == pdrv) { + list_del(&pe->entry); + + if (pe->node_type) { + kfree(pe->node_type->ct_attrs); + kfree(pe->node_type); + } + kfree(pe); + break; + } + } + + mutex_unlock(&stm_pdrv_mutex); +} +EXPORT_SYMBOL_GPL(stm_unregister_protocol); + +static bool stm_get_protocol(const struct stm_protocol_driver *pdrv) +{ + return try_module_get(pdrv->owner); +} + +void stm_put_protocol(const struct stm_protocol_driver *pdrv) +{ + module_put(pdrv->owner); +} + +int stm_lookup_protocol(const char *name, + const struct stm_protocol_driver **pdrv, + const struct config_item_type **node_type) +{ + const struct stm_pdrv_entry *pe; + + mutex_lock(&stm_pdrv_mutex); + + pe = __stm_lookup_protocol(name); + if (pe && pe->pdrv && stm_get_protocol(pe->pdrv)) { + *pdrv = pe->pdrv; + *node_type = pe->node_type; + } + + mutex_unlock(&stm_pdrv_mutex); + + return pe ? 0 : -ENOENT; +} + static int stm_char_open(struct inode *inode, struct file *file) { struct stm_file *stmf; @@ -405,42 +543,81 @@ static int stm_char_release(struct inode *inode, struct file *file) return 0; } -static int stm_file_assign(struct stm_file *stmf, char *id, unsigned int width) +static int +stm_assign_first_policy(struct stm_device *stm, struct stm_output *output, + char **ids, unsigned int width) { - struct stm_device *stm = stmf->stm; - int ret; + struct stp_policy_node *pn; + int err, n; - stmf->policy_node = stp_policy_node_lookup(stm, id); + /* + * On success, stp_policy_node_lookup() will return holding the + * configfs subsystem mutex, which is then released in + * stp_policy_node_put(). This allows the pdrv->output_open() in + * stm_output_assign() to serialize against the attribute accessors. + */ + for (n = 0, pn = NULL; ids[n] && !pn; n++) + pn = stp_policy_node_lookup(stm, ids[n]); - ret = stm_output_assign(stm, width, stmf->policy_node, &stmf->output); + if (!pn) + return -EINVAL; - if (stmf->policy_node) - stp_policy_node_put(stmf->policy_node); + err = stm_output_assign(stm, width, pn, output); - return ret; + stp_policy_node_put(pn); + + return err; } -static ssize_t notrace stm_write(struct stm_data *data, unsigned int master, - unsigned int channel, const char *buf, size_t count) +/** + * stm_data_write() - send the given payload as data packets + * @data: stm driver's data + * @m: STP master + * @c: STP channel + * @ts_first: timestamp the first packet + * @buf: data payload buffer + * @count: data payload size + */ +ssize_t notrace stm_data_write(struct stm_data *data, unsigned int m, + unsigned int c, bool ts_first, const void *buf, + size_t count) { - unsigned int flags = STP_PACKET_TIMESTAMPED; - const unsigned char *p = buf, nil = 0; - size_t pos; + unsigned int flags = ts_first ? STP_PACKET_TIMESTAMPED : 0; ssize_t sz; + size_t pos; - for (pos = 0, p = buf; count > pos; pos += sz, p += sz) { + for (pos = 0, sz = 0; pos < count; pos += sz) { sz = min_t(unsigned int, count - pos, 8); - sz = data->packet(data, master, channel, STP_PACKET_DATA, flags, - sz, p); - flags = 0; - - if (sz < 0) + sz = data->packet(data, m, c, STP_PACKET_DATA, flags, sz, + &((u8 *)buf)[pos]); + if (sz <= 0) break; + + if (ts_first) { + flags = 0; + ts_first = false; + } } - data->packet(data, master, channel, STP_PACKET_FLAG, 0, 0, &nil); + return sz < 0 ? sz : pos; +} +EXPORT_SYMBOL_GPL(stm_data_write); + +static ssize_t notrace +stm_write(struct stm_device *stm, struct stm_output *output, + unsigned int chan, const char *buf, size_t count) +{ + int err; + + /* stm->pdrv is serialized against policy_mutex */ + if (!stm->pdrv) + return -ENODEV; + + err = stm->pdrv->write(stm->data, output, chan, buf, count); + if (err < 0) + return err; - return pos; + return err; } static ssize_t stm_char_write(struct file *file, const char __user *buf, @@ -455,16 +632,21 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf, count = PAGE_SIZE - 1; /* - * if no m/c have been assigned to this writer up to this - * point, use "default" policy entry + * If no m/c have been assigned to this writer up to this + * point, try to use the task name and "default" policy entries. */ if (!stmf->output.nr_chans) { - err = stm_file_assign(stmf, "default", 1); + char comm[sizeof(current->comm)]; + char *ids[] = { comm, "default", NULL }; + + get_task_comm(comm, current); + + err = stm_assign_first_policy(stmf->stm, &stmf->output, ids, 1); /* * EBUSY means that somebody else just assigned this * output, which is just fine for write() */ - if (err && err != -EBUSY) + if (err) return err; } @@ -480,8 +662,7 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf, pm_runtime_get_sync(&stm->dev); - count = stm_write(stm->data, stmf->output.master, stmf->output.channel, - kbuf, count); + count = stm_write(stm, &stmf->output, 0, kbuf, count); pm_runtime_mark_last_busy(&stm->dev); pm_runtime_put_autosuspend(&stm->dev); @@ -550,6 +731,7 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg) { struct stm_device *stm = stmf->stm; struct stp_policy_id *id; + char *ids[] = { NULL, NULL }; int ret = -EINVAL; u32 size; @@ -582,7 +764,9 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg) id->width > PAGE_SIZE / stm->data->sw_mmiosz) goto err_free; - ret = stm_file_assign(stmf, id->id, id->width); + ids[0] = id->id; + ret = stm_assign_first_policy(stmf->stm, &stmf->output, ids, + id->width); if (ret) goto err_free; @@ -818,8 +1002,8 @@ EXPORT_SYMBOL_GPL(stm_unregister_device); static int stm_source_link_add(struct stm_source_device *src, struct stm_device *stm) { - char *id; - int err; + char *ids[] = { NULL, "default", NULL }; + int err = -ENOMEM; mutex_lock(&stm->link_mutex); spin_lock(&stm->link_lock); @@ -833,19 +1017,13 @@ static int stm_source_link_add(struct stm_source_device *src, spin_unlock(&stm->link_lock); mutex_unlock(&stm->link_mutex); - id = kstrdup(src->data->name, GFP_KERNEL); - if (id) { - src->policy_node = - stp_policy_node_lookup(stm, id); - - kfree(id); - } - - err = stm_output_assign(stm, src->data->nr_chans, - src->policy_node, &src->output); + ids[0] = kstrdup(src->data->name, GFP_KERNEL); + if (!ids[0]) + goto fail_detach; - if (src->policy_node) - stp_policy_node_put(src->policy_node); + err = stm_assign_first_policy(stm, &src->output, ids, + src->data->nr_chans); + kfree(ids[0]); if (err) goto fail_detach; @@ -1134,9 +1312,7 @@ int notrace stm_source_write(struct stm_source_data *data, stm = srcu_dereference(src->link, &stm_source_srcu); if (stm) - count = stm_write(stm->data, src->output.master, - src->output.channel + chan, - buf, count); + count = stm_write(stm, &src->output, chan, buf, count); else count = -ENODEV; @@ -1163,7 +1339,15 @@ static int __init stm_core_init(void) goto err_src; init_srcu_struct(&stm_source_srcu); + INIT_LIST_HEAD(&stm_pdrv_head); + mutex_init(&stm_pdrv_mutex); + /* + * So as to not confuse existing users with a requirement + * to load yet another module, do it here. + */ + if (IS_ENABLED(CONFIG_STM_PROTO_BASIC)) + (void)request_module_nowait("stm_p_basic"); stm_core_up++; return 0; diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c index 7db42395e1316..3e7df1c0477f7 100644 --- a/drivers/hwtracing/stm/heartbeat.c +++ b/drivers/hwtracing/stm/heartbeat.c @@ -76,7 +76,7 @@ static int stm_heartbeat_init(void) goto fail_unregister; stm_heartbeat[i].data.nr_chans = 1; - stm_heartbeat[i].data.link = stm_heartbeat_link; + stm_heartbeat[i].data.link = stm_heartbeat_link; stm_heartbeat[i].data.unlink = stm_heartbeat_unlink; hrtimer_init(&stm_heartbeat[i].hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); diff --git a/drivers/hwtracing/stm/p_basic.c b/drivers/hwtracing/stm/p_basic.c new file mode 100644 index 0000000000000..8980a6a5fd6c4 --- /dev/null +++ b/drivers/hwtracing/stm/p_basic.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Basic framing protocol for STM devices. + * Copyright (c) 2018, Intel Corporation. + */ + +#include +#include +#include +#include "stm.h" + +static ssize_t basic_write(struct stm_data *data, struct stm_output *output, + unsigned int chan, const char *buf, size_t count) +{ + unsigned int c = output->channel + chan; + unsigned int m = output->master; + const unsigned char nil = 0; + ssize_t sz; + + sz = stm_data_write(data, m, c, true, buf, count); + if (sz > 0) + data->packet(data, m, c, STP_PACKET_FLAG, 0, 0, &nil); + + return sz; +} + +static const struct stm_protocol_driver basic_pdrv = { + .owner = THIS_MODULE, + .name = "p_basic", + .write = basic_write, +}; + +static int basic_stm_init(void) +{ + return stm_register_protocol(&basic_pdrv); +} + +static void basic_stm_exit(void) +{ + stm_unregister_protocol(&basic_pdrv); +} + +module_init(basic_stm_init); +module_exit(basic_stm_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Basic STM framing protocol driver"); +MODULE_AUTHOR("Alexander Shishkin "); diff --git a/drivers/hwtracing/stm/p_sys-t.c b/drivers/hwtracing/stm/p_sys-t.c new file mode 100644 index 0000000000000..b178a5495b679 --- /dev/null +++ b/drivers/hwtracing/stm/p_sys-t.c @@ -0,0 +1,382 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MIPI SyS-T framing protocol for STM devices. + * Copyright (c) 2018, Intel Corporation. + */ + +#include +#include +#include +#include +#include +#include +#include "stm.h" + +enum sys_t_message_type { + MIPI_SYST_TYPE_BUILD = 0, + MIPI_SYST_TYPE_SHORT32, + MIPI_SYST_TYPE_STRING, + MIPI_SYST_TYPE_CATALOG, + MIPI_SYST_TYPE_RAW = 6, + MIPI_SYST_TYPE_SHORT64, + MIPI_SYST_TYPE_CLOCK, +}; + +enum sys_t_message_severity { + MIPI_SYST_SEVERITY_MAX = 0, + MIPI_SYST_SEVERITY_FATAL, + MIPI_SYST_SEVERITY_ERROR, + MIPI_SYST_SEVERITY_WARNING, + MIPI_SYST_SEVERITY_INFO, + MIPI_SYST_SEVERITY_USER1, + MIPI_SYST_SEVERITY_USER2, + MIPI_SYST_SEVERITY_DEBUG, +}; + +enum sys_t_message_build_subtype { + MIPI_SYST_BUILD_ID_COMPACT32 = 0, + MIPI_SYST_BUILD_ID_COMPACT64, + MIPI_SYST_BUILD_ID_LONG, +}; + +enum sys_t_message_clock_subtype { + MIPI_SYST_CLOCK_TRANSPORT_SYNC = 1, +}; + +enum sys_t_message_string_subtype { + MIPI_SYST_STRING_GENERIC = 1, + MIPI_SYST_STRING_FUNCTIONENTER, + MIPI_SYST_STRING_FUNCTIONEXIT, + MIPI_SYST_STRING_INVALIDPARAM = 5, + MIPI_SYST_STRING_ASSERT = 7, + MIPI_SYST_STRING_PRINTF_32 = 11, + MIPI_SYST_STRING_PRINTF_64 = 12, +}; + +#define MIPI_SYST_TYPE(t) ((u32)(MIPI_SYST_TYPE_ ## t)) +#define MIPI_SYST_SEVERITY(s) ((u32)(MIPI_SYST_SEVERITY_ ## s) << 4) +#define MIPI_SYST_OPT_LOC BIT(8) +#define MIPI_SYST_OPT_LEN BIT(9) +#define MIPI_SYST_OPT_CHK BIT(10) +#define MIPI_SYST_OPT_TS BIT(11) +#define MIPI_SYST_UNIT(u) ((u32)(u) << 12) +#define MIPI_SYST_ORIGIN(o) ((u32)(o) << 16) +#define MIPI_SYST_OPT_GUID BIT(23) +#define MIPI_SYST_SUBTYPE(s) ((u32)(MIPI_SYST_ ## s) << 24) +#define MIPI_SYST_UNITLARGE(u) (MIPI_SYST_UNIT(u & 0xf) | \ + MIPI_SYST_ORIGIN(u >> 4)) +#define MIPI_SYST_TYPES(t, s) (MIPI_SYST_TYPE(t) | \ + MIPI_SYST_SUBTYPE(t ## _ ## s)) + +#define DATA_HEADER (MIPI_SYST_TYPES(STRING, GENERIC) | \ + MIPI_SYST_SEVERITY(INFO) | \ + MIPI_SYST_OPT_GUID) + +#define CLOCK_SYNC_HEADER (MIPI_SYST_TYPES(CLOCK, TRANSPORT_SYNC) | \ + MIPI_SYST_SEVERITY(MAX)) + +struct sys_t_policy_node { + uuid_t uuid; + bool do_len; + unsigned long ts_interval; + unsigned long clocksync_interval; +}; + +struct sys_t_output { + struct sys_t_policy_node node; + unsigned long ts_jiffies; + unsigned long clocksync_jiffies; +}; + +static void sys_t_policy_node_init(void *priv) +{ + struct sys_t_policy_node *pn = priv; + + generate_random_uuid(pn->uuid.b); +} + +static int sys_t_output_open(void *priv, struct stm_output *output) +{ + struct sys_t_policy_node *pn = priv; + struct sys_t_output *opriv; + + opriv = kzalloc(sizeof(*opriv), GFP_ATOMIC); + if (!opriv) + return -ENOMEM; + + memcpy(&opriv->node, pn, sizeof(opriv->node)); + output->pdrv_private = opriv; + + return 0; +} + +static void sys_t_output_close(struct stm_output *output) +{ + kfree(output->pdrv_private); +} + +static ssize_t sys_t_policy_uuid_show(struct config_item *item, + char *page) +{ + struct sys_t_policy_node *pn = to_pdrv_policy_node(item); + + return sprintf(page, "%pU\n", &pn->uuid); +} + +static ssize_t +sys_t_policy_uuid_store(struct config_item *item, const char *page, + size_t count) +{ + struct mutex *mutexp = &item->ci_group->cg_subsys->su_mutex; + struct sys_t_policy_node *pn = to_pdrv_policy_node(item); + int ret; + + mutex_lock(mutexp); + ret = uuid_parse(page, &pn->uuid); + mutex_unlock(mutexp); + + return ret < 0 ? ret : count; +} + +CONFIGFS_ATTR(sys_t_policy_, uuid); + +static ssize_t sys_t_policy_do_len_show(struct config_item *item, + char *page) +{ + struct sys_t_policy_node *pn = to_pdrv_policy_node(item); + + return sprintf(page, "%d\n", pn->do_len); +} + +static ssize_t +sys_t_policy_do_len_store(struct config_item *item, const char *page, + size_t count) +{ + struct mutex *mutexp = &item->ci_group->cg_subsys->su_mutex; + struct sys_t_policy_node *pn = to_pdrv_policy_node(item); + int ret; + + mutex_lock(mutexp); + ret = kstrtobool(page, &pn->do_len); + mutex_unlock(mutexp); + + return ret ? ret : count; +} + +CONFIGFS_ATTR(sys_t_policy_, do_len); + +static ssize_t sys_t_policy_ts_interval_show(struct config_item *item, + char *page) +{ + struct sys_t_policy_node *pn = to_pdrv_policy_node(item); + + return sprintf(page, "%u\n", jiffies_to_msecs(pn->ts_interval)); +} + +static ssize_t +sys_t_policy_ts_interval_store(struct config_item *item, const char *page, + size_t count) +{ + struct mutex *mutexp = &item->ci_group->cg_subsys->su_mutex; + struct sys_t_policy_node *pn = to_pdrv_policy_node(item); + unsigned int ms; + int ret; + + mutex_lock(mutexp); + ret = kstrtouint(page, 10, &ms); + mutex_unlock(mutexp); + + if (!ret) { + pn->ts_interval = msecs_to_jiffies(ms); + return count; + } + + return ret; +} + +CONFIGFS_ATTR(sys_t_policy_, ts_interval); + +static ssize_t sys_t_policy_clocksync_interval_show(struct config_item *item, + char *page) +{ + struct sys_t_policy_node *pn = to_pdrv_policy_node(item); + + return sprintf(page, "%u\n", jiffies_to_msecs(pn->clocksync_interval)); +} + +static ssize_t +sys_t_policy_clocksync_interval_store(struct config_item *item, + const char *page, size_t count) +{ + struct mutex *mutexp = &item->ci_group->cg_subsys->su_mutex; + struct sys_t_policy_node *pn = to_pdrv_policy_node(item); + unsigned int ms; + int ret; + + mutex_lock(mutexp); + ret = kstrtouint(page, 10, &ms); + mutex_unlock(mutexp); + + if (!ret) { + pn->clocksync_interval = msecs_to_jiffies(ms); + return count; + } + + return ret; +} + +CONFIGFS_ATTR(sys_t_policy_, clocksync_interval); + +static struct configfs_attribute *sys_t_policy_attrs[] = { + &sys_t_policy_attr_uuid, + &sys_t_policy_attr_do_len, + &sys_t_policy_attr_ts_interval, + &sys_t_policy_attr_clocksync_interval, + NULL, +}; + +static inline bool sys_t_need_ts(struct sys_t_output *op) +{ + if (op->node.ts_interval && + time_after(op->ts_jiffies + op->node.ts_interval, jiffies)) { + op->ts_jiffies = jiffies; + + return true; + } + + return false; +} + +static bool sys_t_need_clock_sync(struct sys_t_output *op) +{ + if (op->node.clocksync_interval && + time_after(op->clocksync_jiffies + op->node.clocksync_interval, + jiffies)) { + op->clocksync_jiffies = jiffies; + + return true; + } + + return false; +} + +static ssize_t +sys_t_clock_sync(struct stm_data *data, unsigned int m, unsigned int c) +{ + u32 header = CLOCK_SYNC_HEADER; + const unsigned char nil = 0; + u64 payload[2]; /* Clock value and frequency */ + ssize_t sz; + + sz = data->packet(data, m, c, STP_PACKET_DATA, STP_PACKET_TIMESTAMPED, + 4, (u8 *)&header); + if (sz <= 0) + return sz; + + payload[0] = ktime_get_real_ns(); + payload[1] = NSEC_PER_SEC; + sz = stm_data_write(data, m, c, false, &payload, sizeof(payload)); + if (sz <= 0) + return sz; + + data->packet(data, m, c, STP_PACKET_FLAG, 0, 0, &nil); + + return sizeof(header) + sizeof(payload); +} + +static ssize_t sys_t_write(struct stm_data *data, struct stm_output *output, + unsigned int chan, const char *buf, size_t count) +{ + struct sys_t_output *op = output->pdrv_private; + unsigned int c = output->channel + chan; + unsigned int m = output->master; + const unsigned char nil = 0; + u32 header = DATA_HEADER; + ssize_t sz; + + /* We require an existing policy node to proceed */ + if (!op) + return -EINVAL; + + if (sys_t_need_clock_sync(op)) { + sz = sys_t_clock_sync(data, m, c); + if (sz <= 0) + return sz; + } + + if (op->node.do_len) + header |= MIPI_SYST_OPT_LEN; + if (sys_t_need_ts(op)) + header |= MIPI_SYST_OPT_TS; + + /* + * STP framing rules for SyS-T frames: + * * the first packet of the SyS-T frame is timestamped; + * * the last packet is a FLAG. + */ + /* Message layout: HEADER / GUID / [LENGTH /][TIMESTAMP /] DATA */ + /* HEADER */ + sz = data->packet(data, m, c, STP_PACKET_DATA, STP_PACKET_TIMESTAMPED, + 4, (u8 *)&header); + if (sz <= 0) + return sz; + + /* GUID */ + sz = stm_data_write(data, m, c, false, op->node.uuid.b, UUID_SIZE); + if (sz <= 0) + return sz; + + /* [LENGTH] */ + if (op->node.do_len) { + u16 length = count; + + sz = data->packet(data, m, c, STP_PACKET_DATA, 0, 2, + (u8 *)&length); + if (sz <= 0) + return sz; + } + + /* [TIMESTAMP] */ + if (header & MIPI_SYST_OPT_TS) { + u64 ts = ktime_get_real_ns(); + + sz = stm_data_write(data, m, c, false, &ts, sizeof(ts)); + if (sz <= 0) + return sz; + } + + /* DATA */ + sz = stm_data_write(data, m, c, false, buf, count); + if (sz > 0) + data->packet(data, m, c, STP_PACKET_FLAG, 0, 0, &nil); + + return sz; +} + +static const struct stm_protocol_driver sys_t_pdrv = { + .owner = THIS_MODULE, + .name = "p_sys-t", + .priv_sz = sizeof(struct sys_t_policy_node), + .write = sys_t_write, + .policy_attr = sys_t_policy_attrs, + .policy_node_init = sys_t_policy_node_init, + .output_open = sys_t_output_open, + .output_close = sys_t_output_close, +}; + +static int sys_t_stm_init(void) +{ + return stm_register_protocol(&sys_t_pdrv); +} + +static void sys_t_stm_exit(void) +{ + stm_unregister_protocol(&sys_t_pdrv); +} + +module_init(sys_t_stm_init); +module_exit(sys_t_stm_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MIPI SyS-T STM framing protocol driver"); +MODULE_AUTHOR("Alexander Shishkin "); diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c index 3fd07e275b34e..0910ec8071874 100644 --- a/drivers/hwtracing/stm/policy.c +++ b/drivers/hwtracing/stm/policy.c @@ -33,8 +33,18 @@ struct stp_policy_node { unsigned int last_master; unsigned int first_channel; unsigned int last_channel; + /* this is the one that's exposed to the attributes */ + unsigned char priv[0]; }; +void *stp_policy_node_priv(struct stp_policy_node *pn) +{ + if (!pn) + return NULL; + + return pn->priv; +} + static struct configfs_subsystem stp_policy_subsys; void stp_policy_node_get_ranges(struct stp_policy_node *policy_node, @@ -68,6 +78,14 @@ to_stp_policy_node(struct config_item *item) NULL; } +void *to_pdrv_policy_node(struct config_item *item) +{ + struct stp_policy_node *node = to_stp_policy_node(item); + + return stp_policy_node_priv(node); +} +EXPORT_SYMBOL_GPL(to_pdrv_policy_node); + static ssize_t stp_policy_node_masters_show(struct config_item *item, char *page) { @@ -163,7 +181,9 @@ stp_policy_node_channels_store(struct config_item *item, const char *page, static void stp_policy_node_release(struct config_item *item) { - kfree(to_stp_policy_node(item)); + struct stp_policy_node *node = to_stp_policy_node(item); + + kfree(node); } static struct configfs_item_operations stp_policy_node_item_ops = { @@ -182,10 +202,34 @@ static struct configfs_attribute *stp_policy_node_attrs[] = { static const struct config_item_type stp_policy_type; static const struct config_item_type stp_policy_node_type; +const struct config_item_type * +get_policy_node_type(struct configfs_attribute **attrs) +{ + struct config_item_type *type; + struct configfs_attribute **merged; + + type = kmemdup(&stp_policy_node_type, sizeof(stp_policy_node_type), + GFP_KERNEL); + if (!type) + return NULL; + + merged = memcat_p(stp_policy_node_attrs, attrs); + if (!merged) { + kfree(type); + return NULL; + } + + type->ct_attrs = merged; + + return type; +} + static struct config_group * stp_policy_node_make(struct config_group *group, const char *name) { + const struct config_item_type *type = &stp_policy_node_type; struct stp_policy_node *policy_node, *parent_node; + const struct stm_protocol_driver *pdrv; struct stp_policy *policy; if (group->cg_item.ci_type == &stp_policy_type) { @@ -199,12 +243,20 @@ stp_policy_node_make(struct config_group *group, const char *name) if (!policy->stm) return ERR_PTR(-ENODEV); - policy_node = kzalloc(sizeof(struct stp_policy_node), GFP_KERNEL); + pdrv = policy->stm->pdrv; + policy_node = + kzalloc(offsetof(struct stp_policy_node, priv[pdrv->priv_sz]), + GFP_KERNEL); if (!policy_node) return ERR_PTR(-ENOMEM); - config_group_init_type_name(&policy_node->group, name, - &stp_policy_node_type); + if (pdrv->policy_node_init) + pdrv->policy_node_init((void *)policy_node->priv); + + if (policy->stm->pdrv_node_type) + type = policy->stm->pdrv_node_type; + + config_group_init_type_name(&policy_node->group, name, type); policy_node->policy = policy; @@ -254,8 +306,25 @@ static ssize_t stp_policy_device_show(struct config_item *item, CONFIGFS_ATTR_RO(stp_policy_, device); +static ssize_t stp_policy_protocol_show(struct config_item *item, + char *page) +{ + struct stp_policy *policy = to_stp_policy(item); + ssize_t count; + + count = sprintf(page, "%s\n", + (policy && policy->stm) ? + policy->stm->pdrv->name : + ""); + + return count; +} + +CONFIGFS_ATTR_RO(stp_policy_, protocol); + static struct configfs_attribute *stp_policy_attrs[] = { &stp_policy_attr_device, + &stp_policy_attr_protocol, NULL, }; @@ -276,6 +345,7 @@ void stp_policy_unbind(struct stp_policy *policy) stm->policy = NULL; policy->stm = NULL; + stm_put_protocol(stm->pdrv); stm_put_device(stm); } @@ -311,11 +381,14 @@ static const struct config_item_type stp_policy_type = { }; static struct config_group * -stp_policies_make(struct config_group *group, const char *name) +stp_policy_make(struct config_group *group, const char *name) { + const struct config_item_type *pdrv_node_type; + const struct stm_protocol_driver *pdrv; + char *devname, *proto, *p; struct config_group *ret; struct stm_device *stm; - char *devname, *p; + int err; devname = kasprintf(GFP_KERNEL, "%s", name); if (!devname) @@ -326,6 +399,7 @@ stp_policies_make(struct config_group *group, const char *name) * is the name of an existing stm device; may * contain dots; * is an arbitrary string; may not contain dots + * :. */ p = strrchr(devname, '.'); if (!p) { @@ -335,11 +409,28 @@ stp_policies_make(struct config_group *group, const char *name) *p = '\0'; + /* + * look for ":": + * + no protocol suffix: fall back to whatever is available; + * + unknown protocol: fail the whole thing + */ + proto = strrchr(devname, ':'); + if (proto) + *proto++ = '\0'; + stm = stm_find_device(devname); + if (!stm) { + kfree(devname); + return ERR_PTR(-ENODEV); + } + + err = stm_lookup_protocol(proto, &pdrv, &pdrv_node_type); kfree(devname); - if (!stm) + if (err) { + stm_put_device(stm); return ERR_PTR(-ENODEV); + } mutex_lock(&stm->policy_mutex); if (stm->policy) { @@ -349,31 +440,37 @@ stp_policies_make(struct config_group *group, const char *name) stm->policy = kzalloc(sizeof(*stm->policy), GFP_KERNEL); if (!stm->policy) { - ret = ERR_PTR(-ENOMEM); - goto unlock_policy; + mutex_unlock(&stm->policy_mutex); + stm_put_protocol(pdrv); + stm_put_device(stm); + return ERR_PTR(-ENOMEM); } config_group_init_type_name(&stm->policy->group, name, &stp_policy_type); - stm->policy->stm = stm; + stm->pdrv = pdrv; + stm->pdrv_node_type = pdrv_node_type; + stm->policy->stm = stm; ret = &stm->policy->group; unlock_policy: mutex_unlock(&stm->policy_mutex); - if (IS_ERR(ret)) + if (IS_ERR(ret)) { + stm_put_protocol(stm->pdrv); stm_put_device(stm); + } return ret; } -static struct configfs_group_operations stp_policies_group_ops = { - .make_group = stp_policies_make, +static struct configfs_group_operations stp_policy_root_group_ops = { + .make_group = stp_policy_make, }; -static const struct config_item_type stp_policies_type = { - .ct_group_ops = &stp_policies_group_ops, +static const struct config_item_type stp_policy_root_type = { + .ct_group_ops = &stp_policy_root_group_ops, .ct_owner = THIS_MODULE, }; @@ -381,7 +478,7 @@ static struct configfs_subsystem stp_policy_subsys = { .su_group = { .cg_item = { .ci_namebuf = "stp-policy", - .ci_type = &stp_policies_type, + .ci_type = &stp_policy_root_type, }, }, }; @@ -392,7 +489,7 @@ static struct configfs_subsystem stp_policy_subsys = { static struct stp_policy_node * __stp_policy_node_lookup(struct stp_policy *policy, char *s) { - struct stp_policy_node *policy_node, *ret; + struct stp_policy_node *policy_node, *ret = NULL; struct list_head *head = &policy->group.cg_children; struct config_item *item; char *start, *end = s; @@ -400,10 +497,6 @@ __stp_policy_node_lookup(struct stp_policy *policy, char *s) if (list_empty(head)) return NULL; - /* return the first entry if everything else fails */ - item = list_entry(head->next, struct config_item, ci_entry); - ret = to_stp_policy_node(item); - next: for (;;) { start = strsep(&end, "/"); @@ -449,25 +542,25 @@ stp_policy_node_lookup(struct stm_device *stm, char *s) if (policy_node) config_item_get(&policy_node->group.cg_item); - mutex_unlock(&stp_policy_subsys.su_mutex); + else + mutex_unlock(&stp_policy_subsys.su_mutex); return policy_node; } void stp_policy_node_put(struct stp_policy_node *policy_node) { + lockdep_assert_held(&stp_policy_subsys.su_mutex); + + mutex_unlock(&stp_policy_subsys.su_mutex); config_item_put(&policy_node->group.cg_item); } int __init stp_configfs_init(void) { - int err; - config_group_init(&stp_policy_subsys.su_group); mutex_init(&stp_policy_subsys.su_mutex); - err = configfs_register_subsystem(&stp_policy_subsys); - - return err; + return configfs_register_subsystem(&stp_policy_subsys); } void __exit stp_configfs_exit(void) diff --git a/drivers/hwtracing/stm/stm.h b/drivers/hwtracing/stm/stm.h index 923571adc6f40..3569439d53bb9 100644 --- a/drivers/hwtracing/stm/stm.h +++ b/drivers/hwtracing/stm/stm.h @@ -10,20 +10,17 @@ #ifndef _STM_STM_H_ #define _STM_STM_H_ +#include + struct stp_policy; struct stp_policy_node; +struct stm_protocol_driver; -struct stp_policy_node * -stp_policy_node_lookup(struct stm_device *stm, char *s); -void stp_policy_node_put(struct stp_policy_node *policy_node); -void stp_policy_unbind(struct stp_policy *policy); - -void stp_policy_node_get_ranges(struct stp_policy_node *policy_node, - unsigned int *mstart, unsigned int *mend, - unsigned int *cstart, unsigned int *cend); int stp_configfs_init(void); void stp_configfs_exit(void); +void *stp_policy_node_priv(struct stp_policy_node *pn); + struct stp_master { unsigned int nr_free; unsigned long chan_map[0]; @@ -40,6 +37,9 @@ struct stm_device { struct mutex link_mutex; spinlock_t link_lock; struct list_head link_list; + /* framing protocol in use */ + const struct stm_protocol_driver *pdrv; + const struct config_item_type *pdrv_node_type; /* master allocation */ spinlock_t mc_lock; struct stp_master *masters[0]; @@ -48,16 +48,28 @@ struct stm_device { #define to_stm_device(_d) \ container_of((_d), struct stm_device, dev) +struct stp_policy_node * +stp_policy_node_lookup(struct stm_device *stm, char *s); +void stp_policy_node_put(struct stp_policy_node *policy_node); +void stp_policy_unbind(struct stp_policy *policy); + +void stp_policy_node_get_ranges(struct stp_policy_node *policy_node, + unsigned int *mstart, unsigned int *mend, + unsigned int *cstart, unsigned int *cend); + +const struct config_item_type * +get_policy_node_type(struct configfs_attribute **attrs); + struct stm_output { spinlock_t lock; unsigned int master; unsigned int channel; unsigned int nr_chans; + void *pdrv_private; }; struct stm_file { struct stm_device *stm; - struct stp_policy_node *policy_node; struct stm_output output; }; @@ -71,11 +83,35 @@ struct stm_source_device { struct stm_device __rcu *link; struct list_head link_entry; /* one output per stm_source device */ - struct stp_policy_node *policy_node; struct stm_output output; }; #define to_stm_source_device(_d) \ container_of((_d), struct stm_source_device, dev) +void *to_pdrv_policy_node(struct config_item *item); + +struct stm_protocol_driver { + struct module *owner; + const char *name; + ssize_t (*write)(struct stm_data *data, + struct stm_output *output, unsigned int chan, + const char *buf, size_t count); + void (*policy_node_init)(void *arg); + int (*output_open)(void *priv, struct stm_output *output); + void (*output_close)(struct stm_output *output); + ssize_t priv_sz; + struct configfs_attribute **policy_attr; +}; + +int stm_register_protocol(const struct stm_protocol_driver *pdrv); +void stm_unregister_protocol(const struct stm_protocol_driver *pdrv); +int stm_lookup_protocol(const char *name, + const struct stm_protocol_driver **pdrv, + const struct config_item_type **type); +void stm_put_protocol(const struct stm_protocol_driver *pdrv); +ssize_t stm_data_write(struct stm_data *data, unsigned int m, + unsigned int c, bool ts_first, const void *buf, + size_t count); + #endif /* _STM_STM_H_ */ diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 451d4ae50e665..ac4b09642f631 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -751,7 +751,7 @@ config I2C_OCORES config I2C_OMAP tristate "OMAP I2C adapter" - depends on ARCH_OMAP + depends on ARCH_OMAP || ARCH_K3 default y if MACH_OMAP_H3 || MACH_OMAP_OSK help If you say yes to this option, support will be included for the diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c index a4f956c6d567d..a19fbff168617 100644 --- a/drivers/i2c/busses/i2c-aspeed.c +++ b/drivers/i2c/busses/i2c-aspeed.c @@ -555,7 +555,7 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id) spin_lock(&bus->lock); #if IS_ENABLED(CONFIG_I2C_SLAVE) - if (aspeed_i2c_slave_irq(bus)) { + if (IS_ENABLED(CONFIG_I2C_SLAVE) && aspeed_i2c_slave_irq(bus)) { dev_dbg(bus->dev, "irq handled by slave.\n"); ret = true; goto out; @@ -564,7 +564,9 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id) ret = aspeed_i2c_master_irq(bus); +#if IS_ENABLED(CONFIG_I2C_SLAVE) out: +#endif spin_unlock(&bus->lock); return ret ? IRQ_HANDLED : IRQ_NONE; } diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c index 8e60048a33f8f..51d34959709ba 100644 --- a/drivers/i2c/busses/i2c-axxia.c +++ b/drivers/i2c/busses/i2c-axxia.c @@ -74,8 +74,7 @@ MST_STATUS_ND) #define MST_STATUS_ERR (MST_STATUS_NAK | \ MST_STATUS_AL | \ - MST_STATUS_IP | \ - MST_STATUS_TSS) + MST_STATUS_IP) #define MST_TX_BYTES_XFRD 0x50 #define MST_RX_BYTES_XFRD 0x54 #define SCL_HIGH_PERIOD 0x80 @@ -241,7 +240,7 @@ static int axxia_i2c_empty_rx_fifo(struct axxia_i2c_dev *idev) */ if (c <= 0 || c > I2C_SMBUS_BLOCK_MAX) { idev->msg_err = -EPROTO; - i2c_int_disable(idev, ~0); + i2c_int_disable(idev, ~MST_STATUS_TSS); complete(&idev->msg_complete); break; } @@ -299,14 +298,19 @@ static irqreturn_t axxia_i2c_isr(int irq, void *_dev) if (status & MST_STATUS_SCC) { /* Stop completed */ - i2c_int_disable(idev, ~0); + i2c_int_disable(idev, ~MST_STATUS_TSS); complete(&idev->msg_complete); } else if (status & MST_STATUS_SNS) { /* Transfer done */ - i2c_int_disable(idev, ~0); + i2c_int_disable(idev, ~MST_STATUS_TSS); if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len) axxia_i2c_empty_rx_fifo(idev); complete(&idev->msg_complete); + } else if (status & MST_STATUS_TSS) { + /* Transfer timeout */ + idev->msg_err = -ETIMEDOUT; + i2c_int_disable(idev, ~MST_STATUS_TSS); + complete(&idev->msg_complete); } else if (unlikely(status & MST_STATUS_ERR)) { /* Transfer error */ i2c_int_disable(idev, ~0); @@ -339,10 +343,10 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg) u32 rx_xfer, tx_xfer; u32 addr_1, addr_2; unsigned long time_left; + unsigned int wt_value; idev->msg = msg; idev->msg_xfrd = 0; - idev->msg_err = 0; reinit_completion(&idev->msg_complete); if (i2c_m_ten(msg)) { @@ -383,9 +387,18 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg) else if (axxia_i2c_fill_tx_fifo(idev) != 0) int_mask |= MST_STATUS_TFL; + wt_value = WT_VALUE(readl(idev->base + WAIT_TIMER_CONTROL)); + /* Disable wait timer temporarly */ + writel(wt_value, idev->base + WAIT_TIMER_CONTROL); + /* Check if timeout error happened */ + if (idev->msg_err) + goto out; + /* Start manual mode */ writel(CMD_MANUAL, idev->base + MST_COMMAND); + writel(WT_EN | wt_value, idev->base + WAIT_TIMER_CONTROL); + i2c_int_enable(idev, int_mask); time_left = wait_for_completion_timeout(&idev->msg_complete, @@ -396,13 +409,15 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg) if (readl(idev->base + MST_COMMAND) & CMD_BUSY) dev_warn(idev->dev, "busy after xfer\n"); - if (time_left == 0) + if (time_left == 0) { idev->msg_err = -ETIMEDOUT; - - if (idev->msg_err == -ETIMEDOUT) i2c_recover_bus(&idev->adapter); + axxia_i2c_init(idev); + } - if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO) +out: + if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO && + idev->msg_err != -ETIMEDOUT) axxia_i2c_init(idev); return idev->msg_err; @@ -410,7 +425,7 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg) static int axxia_i2c_stop(struct axxia_i2c_dev *idev) { - u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC; + u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC | MST_STATUS_TSS; unsigned long time_left; reinit_completion(&idev->msg_complete); @@ -437,6 +452,9 @@ axxia_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) int i; int ret = 0; + idev->msg_err = 0; + i2c_int_enable(idev, MST_STATUS_TSS); + for (i = 0; ret == 0 && i < num; ++i) ret = axxia_i2c_xfer_msg(idev, &msgs[i]); diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c index 9f2eb02481d34..d7329177b0ea9 100644 --- a/drivers/i2c/busses/i2c-qcom-geni.c +++ b/drivers/i2c/busses/i2c-qcom-geni.c @@ -590,18 +590,19 @@ static int geni_i2c_probe(struct platform_device *pdev) dev_dbg(&pdev->dev, "i2c fifo/se-dma mode. fifo depth:%d\n", tx_depth); - ret = i2c_add_adapter(&gi2c->adap); - if (ret) { - dev_err(&pdev->dev, "Error adding i2c adapter %d\n", ret); - return ret; - } - gi2c->suspended = 1; pm_runtime_set_suspended(gi2c->se.dev); pm_runtime_set_autosuspend_delay(gi2c->se.dev, I2C_AUTO_SUSPEND_DELAY); pm_runtime_use_autosuspend(gi2c->se.dev); pm_runtime_enable(gi2c->se.dev); + ret = i2c_add_adapter(&gi2c->adap); + if (ret) { + dev_err(&pdev->dev, "Error adding i2c adapter %d\n", ret); + pm_runtime_disable(gi2c->se.dev); + return ret; + } + return 0; } @@ -609,8 +610,8 @@ static int geni_i2c_remove(struct platform_device *pdev) { struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev); - pm_runtime_disable(gi2c->se.dev); i2c_del_adapter(&gi2c->adap); + pm_runtime_disable(gi2c->se.dev); return 0; } diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 4aa7dde876f3f..254e6219e5389 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -779,6 +779,11 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap, pm_runtime_get_sync(dev); + /* Check bus state before init otherwise bus busy info will be lost */ + ret = rcar_i2c_bus_barrier(priv); + if (ret < 0) + goto out; + /* Gen3 needs a reset before allowing RXDMA once */ if (priv->devtype == I2C_RCAR_GEN3) { priv->flags |= ID_P_NO_RXDMA; @@ -791,10 +796,6 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap, rcar_i2c_init(priv); - ret = rcar_i2c_bus_barrier(priv); - if (ret < 0) - goto out; - for (i = 0; i < num; i++) rcar_i2c_request_dma(priv, msgs + i); diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c index 7e9a2bbf5ddcb..ff3f4553648f3 100644 --- a/drivers/i2c/busses/i2c-scmi.c +++ b/drivers/i2c/busses/i2c-scmi.c @@ -367,6 +367,7 @@ static int acpi_smbus_cmi_add(struct acpi_device *device) { struct acpi_smbus_cmi *smbus_cmi; const struct acpi_device_id *id; + int ret; smbus_cmi = kzalloc(sizeof(struct acpi_smbus_cmi), GFP_KERNEL); if (!smbus_cmi) @@ -388,8 +389,10 @@ static int acpi_smbus_cmi_add(struct acpi_device *device) acpi_walk_namespace(ACPI_TYPE_METHOD, smbus_cmi->handle, 1, acpi_smbus_cmi_query_methods, NULL, smbus_cmi, NULL); - if (smbus_cmi->cap_info == 0) + if (smbus_cmi->cap_info == 0) { + ret = -ENODEV; goto err; + } snprintf(smbus_cmi->adapter.name, sizeof(smbus_cmi->adapter.name), "SMBus CMI adapter %s", @@ -400,7 +403,8 @@ static int acpi_smbus_cmi_add(struct acpi_device *device) smbus_cmi->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD; smbus_cmi->adapter.dev.parent = &device->dev; - if (i2c_add_adapter(&smbus_cmi->adapter)) { + ret = i2c_add_adapter(&smbus_cmi->adapter); + if (ret) { dev_err(&device->dev, "Couldn't register adapter!\n"); goto err; } @@ -410,7 +414,7 @@ static int acpi_smbus_cmi_add(struct acpi_device *device) err: kfree(smbus_cmi); device->driver_data = NULL; - return -EIO; + return ret; } static int acpi_smbus_cmi_remove(struct acpi_device *device) diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c index a403e8579b652..bc26ec822e268 100644 --- a/drivers/i2c/busses/i2c-uniphier-f.c +++ b/drivers/i2c/busses/i2c-uniphier-f.c @@ -470,9 +470,26 @@ static void uniphier_fi2c_hw_init(struct uniphier_fi2c_priv *priv) uniphier_fi2c_reset(priv); + /* + * Standard-mode: tLOW + tHIGH = 10 us + * Fast-mode: tLOW + tHIGH = 2.5 us + */ writel(cyc, priv->membase + UNIPHIER_FI2C_CYC); - writel(cyc / 2, priv->membase + UNIPHIER_FI2C_LCTL); + /* + * Standard-mode: tLOW = 4.7 us, tHIGH = 4.0 us, tBUF = 4.7 us + * Fast-mode: tLOW = 1.3 us, tHIGH = 0.6 us, tBUF = 1.3 us + * "tLow/tHIGH = 5/4" meets both. + */ + writel(cyc * 5 / 9, priv->membase + UNIPHIER_FI2C_LCTL); + /* + * Standard-mode: tHD;STA = 4.0 us, tSU;STA = 4.7 us, tSU;STO = 4.0 us + * Fast-mode: tHD;STA = 0.6 us, tSU;STA = 0.6 us, tSU;STO = 0.6 us + */ writel(cyc / 2, priv->membase + UNIPHIER_FI2C_SSUT); + /* + * Standard-mode: tSU;DAT = 250 ns + * Fast-mode: tSU;DAT = 100 ns + */ writel(cyc / 16, priv->membase + UNIPHIER_FI2C_DSUT); uniphier_fi2c_prepare_operation(priv); diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c index 454f914ae66db..c488e558aef70 100644 --- a/drivers/i2c/busses/i2c-uniphier.c +++ b/drivers/i2c/busses/i2c-uniphier.c @@ -320,7 +320,13 @@ static void uniphier_i2c_hw_init(struct uniphier_i2c_priv *priv) uniphier_i2c_reset(priv, true); - writel((cyc / 2 << 16) | cyc, priv->membase + UNIPHIER_I2C_CLK); + /* + * Bit30-16: clock cycles of tLOW. + * Standard-mode: tLOW = 4.7 us, tHIGH = 4.0 us + * Fast-mode: tLOW = 1.3 us, tHIGH = 0.6 us + * "tLow/tHIGH = 5/4" meets both. + */ + writel((cyc * 5 / 9 << 16) | cyc, priv->membase + UNIPHIER_I2C_CLK); uniphier_i2c_reset(priv, false); } diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index 1aca742fde4ae..ccd76c71af098 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c @@ -470,9 +470,15 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) data_arg.data); } case I2C_RETRIES: + if (arg > INT_MAX) + return -EINVAL; + client->adapter->retries = arg; break; case I2C_TIMEOUT: + if (arg > INT_MAX) + return -EINVAL; + /* For historical reasons, user-space sets the timeout * value in units of 10 ms. */ diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c index 45c9974303328..0e51803de0e7a 100644 --- a/drivers/ide/ide-proc.c +++ b/drivers/ide/ide-proc.c @@ -544,7 +544,7 @@ void ide_proc_port_register_devices(ide_hwif_t *hwif) drive->proc = proc_mkdir(drive->name, parent); if (drive->proc) { ide_add_proc_entries(drive->proc, generic_drive_entries, drive); - proc_create_data("setting", S_IFREG|S_IRUSR|S_IWUSR, + proc_create_data("settings", S_IFREG|S_IRUSR|S_IWUSR, drive->proc, &ide_settings_proc_fops, drive); } diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c index c5b902b86b444..203ed4adc04ae 100644 --- a/drivers/ide/pmac.c +++ b/drivers/ide/pmac.c @@ -920,6 +920,7 @@ static u8 pmac_ide_cable_detect(ide_hwif_t *hwif) struct device_node *root = of_find_node_by_path("/"); const char *model = of_get_property(root, "model", NULL); + of_node_put(root); /* Get cable type from device-tree. */ if (cable && !strncmp(cable, "80-", 3)) { /* Some drives fail to detect 80c cable in PowerBook */ diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c index 41d97faf50138..38ff374a3ca45 100644 --- a/drivers/iio/accel/hid-sensor-accel-3d.c +++ b/drivers/iio/accel/hid-sensor-accel-3d.c @@ -149,6 +149,7 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev, int report_id = -1; u32 address; int ret_type; + s32 min; struct hid_sensor_hub_device *hsdev = accel_state->common_attributes.hsdev; @@ -158,12 +159,14 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev, case IIO_CHAN_INFO_RAW: hid_sensor_power_state(&accel_state->common_attributes, true); report_id = accel_state->accel[chan->scan_index].report_id; + min = accel_state->accel[chan->scan_index].logical_minimum; address = accel_3d_addresses[chan->scan_index]; if (report_id >= 0) *val = sensor_hub_input_attr_get_raw_value( accel_state->common_attributes.hsdev, hsdev->usage, address, report_id, - SENSOR_HUB_SYNC); + SENSOR_HUB_SYNC, + min < 0); else { *val = 0; hid_sensor_power_state(&accel_state->common_attributes, diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c index 44b516863c9d4..75d2f73582a3d 100644 --- a/drivers/iio/adc/at91_adc.c +++ b/drivers/iio/adc/at91_adc.c @@ -248,12 +248,14 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *idev = pf->indio_dev; struct at91_adc_state *st = iio_priv(idev); + struct iio_chan_spec const *chan; int i, j = 0; for (i = 0; i < idev->masklength; i++) { if (!test_bit(i, idev->active_scan_mask)) continue; - st->buffer[j] = at91_adc_readl(st, AT91_ADC_CHAN(st, i)); + chan = idev->channels + i; + st->buffer[j] = at91_adc_readl(st, AT91_ADC_CHAN(st, chan->channel)); j++; } @@ -279,6 +281,8 @@ static void handle_adc_eoc_trigger(int irq, struct iio_dev *idev) iio_trigger_poll(idev->trig); } else { st->last_value = at91_adc_readl(st, AT91_ADC_CHAN(st, st->chnb)); + /* Needed to ACK the DRDY interruption */ + at91_adc_readl(st, AT91_ADC_LCDR); st->done = true; wake_up_interruptible(&st->wq_data_avail); } diff --git a/drivers/iio/adc/fsl-imx25-gcq.c b/drivers/iio/adc/fsl-imx25-gcq.c index ea264fa9e567a..929c617db3645 100644 --- a/drivers/iio/adc/fsl-imx25-gcq.c +++ b/drivers/iio/adc/fsl-imx25-gcq.c @@ -209,12 +209,14 @@ static int mx25_gcq_setup_cfgs(struct platform_device *pdev, ret = of_property_read_u32(child, "reg", ®); if (ret) { dev_err(dev, "Failed to get reg property\n"); + of_node_put(child); return ret; } if (reg >= MX25_NUM_CFGS) { dev_err(dev, "reg value is greater than the number of available configuration registers\n"); + of_node_put(child); return -EINVAL; } @@ -228,6 +230,7 @@ static int mx25_gcq_setup_cfgs(struct platform_device *pdev, if (IS_ERR(priv->vref[refp])) { dev_err(dev, "Error, trying to use external voltage reference without a vref-%s regulator.", mx25_gcq_refp_names[refp]); + of_node_put(child); return PTR_ERR(priv->vref[refp]); } priv->channel_vref_mv[reg] = @@ -240,6 +243,7 @@ static int mx25_gcq_setup_cfgs(struct platform_device *pdev, break; default: dev_err(dev, "Invalid positive reference %d\n", refp); + of_node_put(child); return -EINVAL; } @@ -254,10 +258,12 @@ static int mx25_gcq_setup_cfgs(struct platform_device *pdev, if ((refp & MX25_ADCQ_CFG_REFP_MASK) != refp) { dev_err(dev, "Invalid fsl,adc-refp property value\n"); + of_node_put(child); return -EINVAL; } if ((refn & MX25_ADCQ_CFG_REFN_MASK) != refn) { dev_err(dev, "Invalid fsl,adc-refn property value\n"); + of_node_put(child); return -EINVAL; } diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c index bf4fc40ec84d9..2f98cb2a3b964 100644 --- a/drivers/iio/dac/ad5064.c +++ b/drivers/iio/dac/ad5064.c @@ -808,6 +808,40 @@ static int ad5064_set_config(struct ad5064_state *st, unsigned int val) return ad5064_write(st, cmd, 0, val, 0); } +static int ad5064_request_vref(struct ad5064_state *st, struct device *dev) +{ + unsigned int i; + int ret; + + for (i = 0; i < ad5064_num_vref(st); ++i) + st->vref_reg[i].supply = ad5064_vref_name(st, i); + + if (!st->chip_info->internal_vref) + return devm_regulator_bulk_get(dev, ad5064_num_vref(st), + st->vref_reg); + + /* + * This assumes that when the regulator has an internal VREF + * there is only one external VREF connection, which is + * currently the case for all supported devices. + */ + st->vref_reg[0].consumer = devm_regulator_get_optional(dev, "vref"); + if (!IS_ERR(st->vref_reg[0].consumer)) + return 0; + + ret = PTR_ERR(st->vref_reg[0].consumer); + if (ret != -ENODEV) + return ret; + + /* If no external regulator was supplied use the internal VREF */ + st->use_internal_vref = true; + ret = ad5064_set_config(st, AD5064_CONFIG_INT_VREF_ENABLE); + if (ret) + dev_err(dev, "Failed to enable internal vref: %d\n", ret); + + return ret; +} + static int ad5064_probe(struct device *dev, enum ad5064_type type, const char *name, ad5064_write_func write) { @@ -828,22 +862,11 @@ static int ad5064_probe(struct device *dev, enum ad5064_type type, st->dev = dev; st->write = write; - for (i = 0; i < ad5064_num_vref(st); ++i) - st->vref_reg[i].supply = ad5064_vref_name(st, i); + ret = ad5064_request_vref(st, dev); + if (ret) + return ret; - ret = devm_regulator_bulk_get(dev, ad5064_num_vref(st), - st->vref_reg); - if (ret) { - if (!st->chip_info->internal_vref) - return ret; - st->use_internal_vref = true; - ret = ad5064_set_config(st, AD5064_CONFIG_INT_VREF_ENABLE); - if (ret) { - dev_err(dev, "Failed to enable internal vref: %d\n", - ret); - return ret; - } - } else { + if (!st->use_internal_vref) { ret = regulator_bulk_enable(ad5064_num_vref(st), st->vref_reg); if (ret) return ret; diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c index 2ddbfc3fdbae7..cba62ad26cd86 100644 --- a/drivers/iio/dac/ad5686.c +++ b/drivers/iio/dac/ad5686.c @@ -124,7 +124,8 @@ static int ad5686_read_raw(struct iio_dev *indio_dev, mutex_unlock(&indio_dev->mlock); if (ret < 0) return ret; - *val = ret; + *val = (ret >> chan->scan_type.shift) & + GENMASK(chan->scan_type.realbits - 1, 0); return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: *val = st->vref_mv; diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c index 36941e69f9595..88e857c4baf45 100644 --- a/drivers/iio/gyro/hid-sensor-gyro-3d.c +++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c @@ -111,6 +111,7 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev, int report_id = -1; u32 address; int ret_type; + s32 min; *val = 0; *val2 = 0; @@ -118,13 +119,15 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev, case IIO_CHAN_INFO_RAW: hid_sensor_power_state(&gyro_state->common_attributes, true); report_id = gyro_state->gyro[chan->scan_index].report_id; + min = gyro_state->gyro[chan->scan_index].logical_minimum; address = gyro_3d_addresses[chan->scan_index]; if (report_id >= 0) *val = sensor_hub_input_attr_get_raw_value( gyro_state->common_attributes.hsdev, HID_USAGE_SENSOR_GYRO_3D, address, report_id, - SENSOR_HUB_SYNC); + SENSOR_HUB_SYNC, + min < 0); else { *val = 0; hid_sensor_power_state(&gyro_state->common_attributes, diff --git a/drivers/iio/humidity/hid-sensor-humidity.c b/drivers/iio/humidity/hid-sensor-humidity.c index beab6d6fd6e18..4bc95f31c730e 100644 --- a/drivers/iio/humidity/hid-sensor-humidity.c +++ b/drivers/iio/humidity/hid-sensor-humidity.c @@ -75,7 +75,8 @@ static int humidity_read_raw(struct iio_dev *indio_dev, HID_USAGE_SENSOR_HUMIDITY, HID_USAGE_SENSOR_ATMOSPHERIC_HUMIDITY, humid_st->humidity_attr.report_id, - SENSOR_HUB_SYNC); + SENSOR_HUB_SYNC, + humid_st->humidity_attr.logical_minimum < 0); hid_sensor_power_state(&humid_st->common_attributes, false); return IIO_VAL_INT; diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c index 406caaee9a3c5..94f33250ba5a6 100644 --- a/drivers/iio/light/hid-sensor-als.c +++ b/drivers/iio/light/hid-sensor-als.c @@ -93,6 +93,7 @@ static int als_read_raw(struct iio_dev *indio_dev, int report_id = -1; u32 address; int ret_type; + s32 min; *val = 0; *val2 = 0; @@ -102,8 +103,8 @@ static int als_read_raw(struct iio_dev *indio_dev, case CHANNEL_SCAN_INDEX_INTENSITY: case CHANNEL_SCAN_INDEX_ILLUM: report_id = als_state->als_illum.report_id; - address = - HID_USAGE_SENSOR_LIGHT_ILLUM; + min = als_state->als_illum.logical_minimum; + address = HID_USAGE_SENSOR_LIGHT_ILLUM; break; default: report_id = -1; @@ -116,7 +117,8 @@ static int als_read_raw(struct iio_dev *indio_dev, als_state->common_attributes.hsdev, HID_USAGE_SENSOR_ALS, address, report_id, - SENSOR_HUB_SYNC); + SENSOR_HUB_SYNC, + min < 0); hid_sensor_power_state(&als_state->common_attributes, false); } else { diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c index 45107f7537b5d..cf5a0c242609d 100644 --- a/drivers/iio/light/hid-sensor-prox.c +++ b/drivers/iio/light/hid-sensor-prox.c @@ -73,6 +73,7 @@ static int prox_read_raw(struct iio_dev *indio_dev, int report_id = -1; u32 address; int ret_type; + s32 min; *val = 0; *val2 = 0; @@ -81,8 +82,8 @@ static int prox_read_raw(struct iio_dev *indio_dev, switch (chan->scan_index) { case CHANNEL_SCAN_INDEX_PRESENCE: report_id = prox_state->prox_attr.report_id; - address = - HID_USAGE_SENSOR_HUMAN_PRESENCE; + min = prox_state->prox_attr.logical_minimum; + address = HID_USAGE_SENSOR_HUMAN_PRESENCE; break; default: report_id = -1; @@ -95,7 +96,8 @@ static int prox_read_raw(struct iio_dev *indio_dev, prox_state->common_attributes.hsdev, HID_USAGE_SENSOR_PROX, address, report_id, - SENSOR_HUB_SYNC); + SENSOR_HUB_SYNC, + min < 0); hid_sensor_power_state(&prox_state->common_attributes, false); } else { diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c index d55c4885211ad..f3c0d41e5a8c2 100644 --- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c +++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c @@ -163,21 +163,23 @@ static int magn_3d_read_raw(struct iio_dev *indio_dev, int report_id = -1; u32 address; int ret_type; + s32 min; *val = 0; *val2 = 0; switch (mask) { case IIO_CHAN_INFO_RAW: hid_sensor_power_state(&magn_state->magn_flux_attributes, true); - report_id = - magn_state->magn[chan->address].report_id; + report_id = magn_state->magn[chan->address].report_id; + min = magn_state->magn[chan->address].logical_minimum; address = magn_3d_addresses[chan->address]; if (report_id >= 0) *val = sensor_hub_input_attr_get_raw_value( magn_state->magn_flux_attributes.hsdev, HID_USAGE_SENSOR_COMPASS_3D, address, report_id, - SENSOR_HUB_SYNC); + SENSOR_HUB_SYNC, + min < 0); else { *val = 0; hid_sensor_power_state( diff --git a/drivers/iio/magnetometer/st_magn_buffer.c b/drivers/iio/magnetometer/st_magn_buffer.c index 0a9e8fadfa9de..37ab305664649 100644 --- a/drivers/iio/magnetometer/st_magn_buffer.c +++ b/drivers/iio/magnetometer/st_magn_buffer.c @@ -30,11 +30,6 @@ int st_magn_trig_set_state(struct iio_trigger *trig, bool state) return st_sensors_set_dataready_irq(indio_dev, state); } -static int st_magn_buffer_preenable(struct iio_dev *indio_dev) -{ - return st_sensors_set_enable(indio_dev, true); -} - static int st_magn_buffer_postenable(struct iio_dev *indio_dev) { int err; @@ -50,7 +45,7 @@ static int st_magn_buffer_postenable(struct iio_dev *indio_dev) if (err < 0) goto st_magn_buffer_postenable_error; - return err; + return st_sensors_set_enable(indio_dev, true); st_magn_buffer_postenable_error: kfree(mdata->buffer_data); @@ -63,11 +58,11 @@ static int st_magn_buffer_predisable(struct iio_dev *indio_dev) int err; struct st_sensor_data *mdata = iio_priv(indio_dev); - err = iio_triggered_buffer_predisable(indio_dev); + err = st_sensors_set_enable(indio_dev, false); if (err < 0) goto st_magn_buffer_predisable_error; - err = st_sensors_set_enable(indio_dev, false); + err = iio_triggered_buffer_predisable(indio_dev); st_magn_buffer_predisable_error: kfree(mdata->buffer_data); @@ -75,7 +70,6 @@ static int st_magn_buffer_predisable(struct iio_dev *indio_dev) } static const struct iio_buffer_setup_ops st_magn_buffer_setup_ops = { - .preenable = &st_magn_buffer_preenable, .postenable = &st_magn_buffer_postenable, .predisable = &st_magn_buffer_predisable, }; diff --git a/drivers/iio/orientation/hid-sensor-incl-3d.c b/drivers/iio/orientation/hid-sensor-incl-3d.c index 1e5451d1ff884..bdc5e4554ee48 100644 --- a/drivers/iio/orientation/hid-sensor-incl-3d.c +++ b/drivers/iio/orientation/hid-sensor-incl-3d.c @@ -111,21 +111,23 @@ static int incl_3d_read_raw(struct iio_dev *indio_dev, int report_id = -1; u32 address; int ret_type; + s32 min; *val = 0; *val2 = 0; switch (mask) { case IIO_CHAN_INFO_RAW: hid_sensor_power_state(&incl_state->common_attributes, true); - report_id = - incl_state->incl[chan->scan_index].report_id; + report_id = incl_state->incl[chan->scan_index].report_id; + min = incl_state->incl[chan->scan_index].logical_minimum; address = incl_3d_addresses[chan->scan_index]; if (report_id >= 0) *val = sensor_hub_input_attr_get_raw_value( incl_state->common_attributes.hsdev, HID_USAGE_SENSOR_INCLINOMETER_3D, address, report_id, - SENSOR_HUB_SYNC); + SENSOR_HUB_SYNC, + min < 0); else { hid_sensor_power_state(&incl_state->common_attributes, false); diff --git a/drivers/iio/pressure/hid-sensor-press.c b/drivers/iio/pressure/hid-sensor-press.c index 4c437918f1d28..d7b1c00ceb4da 100644 --- a/drivers/iio/pressure/hid-sensor-press.c +++ b/drivers/iio/pressure/hid-sensor-press.c @@ -77,6 +77,7 @@ static int press_read_raw(struct iio_dev *indio_dev, int report_id = -1; u32 address; int ret_type; + s32 min; *val = 0; *val2 = 0; @@ -85,8 +86,8 @@ static int press_read_raw(struct iio_dev *indio_dev, switch (chan->scan_index) { case CHANNEL_SCAN_INDEX_PRESSURE: report_id = press_state->press_attr.report_id; - address = - HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE; + min = press_state->press_attr.logical_minimum; + address = HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE; break; default: report_id = -1; @@ -99,7 +100,8 @@ static int press_read_raw(struct iio_dev *indio_dev, press_state->common_attributes.hsdev, HID_USAGE_SENSOR_PRESSURE, address, report_id, - SENSOR_HUB_SYNC); + SENSOR_HUB_SYNC, + min < 0); hid_sensor_power_state(&press_state->common_attributes, false); } else { diff --git a/drivers/iio/temperature/hid-sensor-temperature.c b/drivers/iio/temperature/hid-sensor-temperature.c index beaf6fd3e337c..b592fc4f007e4 100644 --- a/drivers/iio/temperature/hid-sensor-temperature.c +++ b/drivers/iio/temperature/hid-sensor-temperature.c @@ -76,7 +76,8 @@ static int temperature_read_raw(struct iio_dev *indio_dev, HID_USAGE_SENSOR_TEMPERATURE, HID_USAGE_SENSOR_DATA_ENVIRONMENTAL_TEMPERATURE, temp_st->temperature_attr.report_id, - SENSOR_HUB_SYNC); + SENSOR_HUB_SYNC, + temp_st->temperature_attr.logical_minimum < 0); hid_sensor_power_state( &temp_st->common_attributes, false); diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 6e39c27dca8ec..4c533275d1f20 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -3292,8 +3292,11 @@ static int cm_lap_handler(struct cm_work *work) if (ret) goto unlock; - cm_init_av_by_path(param->alternate_path, NULL, &cm_id_priv->alt_av, - cm_id_priv); + ret = cm_init_av_by_path(param->alternate_path, NULL, + &cm_id_priv->alt_av, cm_id_priv); + if (ret) + goto unlock; + cm_id_priv->id.lap_state = IB_CM_LAP_RCVD; cm_id_priv->tid = lap_msg->hdr.tid; ret = atomic_inc_and_test(&cm_id_priv->work_count); diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 0385ab4383208..f6fa9b115fdad 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -579,10 +579,6 @@ static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb, if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD)) goto err; - if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) && - nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, - pd->unsafe_global_rkey)) - goto err; if (fill_res_name_pid(msg, res)) goto err; diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c index ee366199b169c..558de0b9895cb 100644 --- a/drivers/infiniband/core/roce_gid_mgmt.c +++ b/drivers/infiniband/core/roce_gid_mgmt.c @@ -267,6 +267,9 @@ is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u8 port, struct net_device *cookie_ndev = cookie; bool match = false; + if (!rdma_ndev) + return false; + rcu_read_lock(); if (netif_is_bond_master(cookie_ndev) && rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev)) @@ -767,8 +770,10 @@ static int netdevice_event(struct notifier_block *this, unsigned long event, case NETDEV_CHANGEADDR: cmds[0] = netdev_del_cmd; - cmds[1] = add_default_gid_cmd; - cmds[2] = add_cmd; + if (ndev->reg_state == NETREG_REGISTERED) { + cmds[1] = add_default_gid_cmd; + cmds[2] = add_cmd; + } break; case NETDEV_CHANGEUPPER: diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 7fd14ead7b378..ace40bb98624c 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -512,7 +512,7 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr, ret = get_perf_mad(p->ibdev, p->port_num, tab_attr->attr_id, &data, 40 + offset / 8, sizeof(data)); if (ret < 0) - return sprintf(buf, "N/A (no PMA)\n"); + return ret; switch (width) { case 4: @@ -1057,10 +1057,12 @@ static int add_port(struct ib_device *device, int port_num, goto err_put; } - p->pma_table = get_counter_table(device, port_num); - ret = sysfs_create_group(&p->kobj, p->pma_table); - if (ret) - goto err_put_gid_attrs; + if (device->process_mad) { + p->pma_table = get_counter_table(device, port_num); + ret = sysfs_create_group(&p->kobj, p->pma_table); + if (ret) + goto err_put_gid_attrs; + } p->gid_group.name = "gids"; p->gid_group.attrs = alloc_group_attrs(show_port_gid, attr.gid_tbl_len); @@ -1173,7 +1175,8 @@ static int add_port(struct ib_device *device, int port_num, p->gid_group.attrs = NULL; err_remove_pma: - sysfs_remove_group(&p->kobj, p->pma_table); + if (p->pma_table) + sysfs_remove_group(&p->kobj, p->pma_table); err_put_gid_attrs: kobject_put(&p->gid_attr_group->kobj); @@ -1285,7 +1288,9 @@ static void free_port_list_attributes(struct ib_device *device) kfree(port->hw_stats); free_hsag(&port->kobj, port->hw_stats_ag); } - sysfs_remove_group(p, port->pma_table); + + if (port->pma_table) + sysfs_remove_group(p, port->pma_table); sysfs_remove_group(p, &port->pkey_group); sysfs_remove_group(p, &port->gid_group); sysfs_remove_group(&port->gid_attr_group->kobj, diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 85cd1a3593d61..22bd9784fa2ea 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -1252,6 +1252,7 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) /* Registered a new RoCE device instance to netdev */ rc = bnxt_re_register_netdev(rdev); if (rc) { + rtnl_unlock(); pr_err("Failed to register with netedev: %#x\n", rc); return -EINVAL; } @@ -1461,6 +1462,7 @@ static void bnxt_re_task(struct work_struct *work) "Failed to register with IB: %#x", rc); bnxt_re_remove_one(rdev); bnxt_re_dev_unreg(rdev); + goto exit; } break; case NETDEV_UP: @@ -1484,6 +1486,7 @@ static void bnxt_re_task(struct work_struct *work) } smp_mb__before_atomic(); atomic_dec(&rdev->sched_count); +exit: kfree(re_work); } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 6ad0d46ab879a..249efa0a6aba7 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -360,7 +360,8 @@ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq) } /* Make sure the HW is stopped! */ - bnxt_qplib_nq_stop_irq(nq, true); + if (nq->requested) + bnxt_qplib_nq_stop_irq(nq, true); if (nq->bar_reg_iomem) iounmap(nq->bar_reg_iomem); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 2852d350ada16..6637df77d2365 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -309,8 +309,17 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, rcfw->aeq_handler(rcfw, qp_event, qp); break; default: - /* Command Response */ - spin_lock_irqsave(&cmdq->lock, flags); + /* + * Command Response + * cmdq->lock needs to be acquired to synchronie + * the command send and completion reaping. This function + * is always called with creq->lock held. Using + * the nested variant of spin_lock. + * + */ + + spin_lock_irqsave_nested(&cmdq->lock, flags, + SINGLE_DEPTH_NESTING); cookie = le16_to_cpu(qp_event->cookie); mcookie = qp_event->cookie; blocked = cookie & RCFW_CMD_IS_BLOCKING; diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index e1668bcc2d13d..902d12d6d88b0 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -12485,7 +12485,8 @@ static int init_cntrs(struct hfi1_devdata *dd) } /* allocate space for the counter values */ - dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL); + dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64), + GFP_KERNEL); if (!dd->cntrs) goto bail; diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index d9470317983f6..cfd2523863566 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h @@ -154,6 +154,8 @@ struct hfi1_ib_stats { extern struct hfi1_ib_stats hfi1_stats; extern const struct pci_error_handlers hfi1_pci_err_handler; +extern int num_driver_cntrs; + /* * First-cut criterion for "device is active" is * two thousand dwords combined Tx, Rx traffic per diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index 5c88706121c1c..51831bfbf90f5 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c @@ -187,7 +187,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, pq->ctxt = uctxt->ctxt; pq->subctxt = fd->subctxt; pq->n_max_reqs = hfi1_sdma_comp_ring_size; - pq->state = SDMA_PKT_Q_INACTIVE; atomic_set(&pq->n_reqs, 0); init_waitqueue_head(&pq->wait); atomic_set(&pq->n_locked, 0); @@ -276,7 +275,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd, /* Wait until all requests have been freed. */ wait_event_interruptible( pq->wait, - (READ_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE)); + !atomic_read(&pq->n_reqs)); kfree(pq->reqs); kfree(pq->req_in_use); kmem_cache_destroy(pq->txreq_cache); @@ -312,6 +311,13 @@ static u8 dlid_to_selector(u16 dlid) return mapping[hash]; } +/** + * hfi1_user_sdma_process_request() - Process and start a user sdma request + * @fd: valid file descriptor + * @iovec: array of io vectors to process + * @dim: overall iovec array size + * @count: number of io vector array entries processed + */ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, struct iovec *iovec, unsigned long dim, unsigned long *count) @@ -328,7 +334,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, u8 opcode, sc, vl; u16 pkey; u32 slid; - int req_queued = 0; u16 dlid; u32 selector; @@ -392,7 +397,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, req->data_len = 0; req->pq = pq; req->cq = cq; - req->status = -1; req->ahg_idx = -1; req->iov_idx = 0; req->sent = 0; @@ -400,12 +404,14 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, req->seqcomp = 0; req->seqsubmitted = 0; req->tids = NULL; - req->done = 0; req->has_error = 0; INIT_LIST_HEAD(&req->txps); memcpy(&req->info, &info, sizeof(info)); + /* The request is initialized, count it */ + atomic_inc(&pq->n_reqs); + if (req_opcode(info.ctrl) == EXPECTED) { /* expected must have a TID info and at least one data vector */ if (req->data_iovs < 2) { @@ -500,7 +506,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, ret = pin_vector_pages(req, &req->iovs[i]); if (ret) { req->data_iovs = i; - req->status = ret; goto free_req; } req->data_len += req->iovs[i].iov.iov_len; @@ -561,23 +566,11 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, req->ahg_idx = sdma_ahg_alloc(req->sde); set_comp_state(pq, cq, info.comp_idx, QUEUED, 0); - atomic_inc(&pq->n_reqs); - req_queued = 1; + pq->state = SDMA_PKT_Q_ACTIVE; /* Send the first N packets in the request to buy us some time */ ret = user_sdma_send_pkts(req, pcount); - if (unlikely(ret < 0 && ret != -EBUSY)) { - req->status = ret; + if (unlikely(ret < 0 && ret != -EBUSY)) goto free_req; - } - - /* - * It is possible that the SDMA engine would have processed all the - * submitted packets by the time we get here. Therefore, only set - * packet queue state to ACTIVE if there are still uncompleted - * requests. - */ - if (atomic_read(&pq->n_reqs)) - xchg(&pq->state, SDMA_PKT_Q_ACTIVE); /* * This is a somewhat blocking send implementation. @@ -588,14 +581,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, while (req->seqsubmitted != req->info.npkts) { ret = user_sdma_send_pkts(req, pcount); if (ret < 0) { - if (ret != -EBUSY) { - req->status = ret; - WRITE_ONCE(req->has_error, 1); - if (READ_ONCE(req->seqcomp) == - req->seqsubmitted - 1) - goto free_req; - return ret; - } + if (ret != -EBUSY) + goto free_req; wait_event_interruptible_timeout( pq->busy.wait_dma, (pq->state == SDMA_PKT_Q_ACTIVE), @@ -606,10 +593,19 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, *count += idx; return 0; free_req: - user_sdma_free_request(req, true); - if (req_queued) + /* + * If the submitted seqsubmitted == npkts, the completion routine + * controls the final state. If sequbmitted < npkts, wait for any + * outstanding packets to finish before cleaning up. + */ + if (req->seqsubmitted < req->info.npkts) { + if (req->seqsubmitted) + wait_event(pq->busy.wait_dma, + (req->seqcomp == req->seqsubmitted - 1)); + user_sdma_free_request(req, true); pq_update(pq); - set_comp_state(pq, cq, info.comp_idx, ERROR, req->status); + set_comp_state(pq, cq, info.comp_idx, ERROR, ret); + } return ret; } @@ -917,7 +913,6 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps, &count); req->seqsubmitted += count; if (req->seqsubmitted == req->info.npkts) { - WRITE_ONCE(req->done, 1); /* * The txreq has already been submitted to the HW queue * so we can free the AHG entry now. Corruption will not @@ -1365,11 +1360,15 @@ static int set_txreq_header_ahg(struct user_sdma_request *req, return idx; } -/* - * SDMA tx request completion callback. Called when the SDMA progress - * state machine gets notification that the SDMA descriptors for this - * tx request have been processed by the DMA engine. Called in - * interrupt context. +/** + * user_sdma_txreq_cb() - SDMA tx request completion callback. + * @txreq: valid sdma tx request + * @status: success/failure of request + * + * Called when the SDMA progress state machine gets notification that + * the SDMA descriptors for this tx request have been processed by the + * DMA engine. Called in interrupt context. + * Only do work on completed sequences. */ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) { @@ -1378,7 +1377,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) struct user_sdma_request *req; struct hfi1_user_sdma_pkt_q *pq; struct hfi1_user_sdma_comp_q *cq; - u16 idx; + enum hfi1_sdma_comp_state state = COMPLETE; if (!tx->req) return; @@ -1391,39 +1390,25 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) SDMA_DBG(req, "SDMA completion with error %d", status); WRITE_ONCE(req->has_error, 1); + state = ERROR; } req->seqcomp = tx->seqnum; kmem_cache_free(pq->txreq_cache, tx); - tx = NULL; - - idx = req->info.comp_idx; - if (req->status == -1 && status == SDMA_TXREQ_S_OK) { - if (req->seqcomp == req->info.npkts - 1) { - req->status = 0; - user_sdma_free_request(req, false); - pq_update(pq); - set_comp_state(pq, cq, idx, COMPLETE, 0); - } - } else { - if (status != SDMA_TXREQ_S_OK) - req->status = status; - if (req->seqcomp == (READ_ONCE(req->seqsubmitted) - 1) && - (READ_ONCE(req->done) || - READ_ONCE(req->has_error))) { - user_sdma_free_request(req, false); - pq_update(pq); - set_comp_state(pq, cq, idx, ERROR, req->status); - } - } + + /* sequence isn't complete? We are done */ + if (req->seqcomp != req->info.npkts - 1) + return; + + user_sdma_free_request(req, false); + set_comp_state(pq, cq, req->info.comp_idx, state, status); + pq_update(pq); } static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq) { - if (atomic_dec_and_test(&pq->n_reqs)) { - xchg(&pq->state, SDMA_PKT_Q_INACTIVE); + if (atomic_dec_and_test(&pq->n_reqs)) wake_up(&pq->wait); - } } static void user_sdma_free_request(struct user_sdma_request *req, bool unpin) @@ -1448,6 +1433,8 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin) if (!node) continue; + req->iovs[i].node = NULL; + if (unpin) hfi1_mmu_rb_remove(req->pq->handler, &node->rb); diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h index d2bc77f75253f..91c343f91776a 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.h +++ b/drivers/infiniband/hw/hfi1/user_sdma.h @@ -105,9 +105,10 @@ static inline int ahg_header_set(u32 *arr, int idx, size_t array_size, #define TXREQ_FLAGS_REQ_ACK BIT(0) /* Set the ACK bit in the header */ #define TXREQ_FLAGS_REQ_DISABLE_SH BIT(1) /* Disable header suppression */ -#define SDMA_PKT_Q_INACTIVE BIT(0) -#define SDMA_PKT_Q_ACTIVE BIT(1) -#define SDMA_PKT_Q_DEFERRED BIT(2) +enum pkt_q_sdma_state { + SDMA_PKT_Q_ACTIVE, + SDMA_PKT_Q_DEFERRED, +}; /* * Maximum retry attempts to submit a TX request @@ -133,7 +134,7 @@ struct hfi1_user_sdma_pkt_q { struct user_sdma_request *reqs; unsigned long *req_in_use; struct iowait busy; - unsigned state; + enum pkt_q_sdma_state state; wait_queue_head_t wait; unsigned long unpinned; struct mmu_rb_handler *handler; @@ -205,8 +206,6 @@ struct user_sdma_request { /* Writeable fields shared with interrupt */ u64 seqcomp ____cacheline_aligned_in_smp; u64 seqsubmitted; - /* status of the last txreq completed */ - int status; /* Send side fields */ struct list_head txps ____cacheline_aligned_in_smp; @@ -228,7 +227,6 @@ struct user_sdma_request { u16 tididx; /* progress index moving along the iovs array */ u8 iov_idx; - u8 done; u8 has_error; struct user_sdma_iovec iovs[MAX_VECTORS_PER_REQ]; diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index a7c586a5589d6..48692adbe811e 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -1141,6 +1141,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, if (slen > len) slen = len; + if (slen > ss->sge.sge_length) + slen = ss->sge.sge_length; rvt_update_sge(ss, slen, false); seg_pio_copy_mid(pbuf, addr, slen); len -= slen; @@ -1701,7 +1703,7 @@ static const char * const driver_cntr_names[] = { static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */ static const char **dev_cntr_names; static const char **port_cntr_names; -static int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names); +int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names); static int num_dev_cntrs; static int num_port_cntrs; static int cntr_names_initialized; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 0218c0f8c2a7d..a442b29e76119 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -1661,10 +1661,9 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, return hns_roce_cmq_send(hr_dev, &desc, 1); } -static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, - unsigned long mtpt_idx) +static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry, + struct hns_roce_mr *mr) { - struct hns_roce_v2_mpt_entry *mpt_entry; struct scatterlist *sg; u64 page_addr; u64 *pages; @@ -1672,6 +1671,53 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, int len; int entry; + mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size); + mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3)); + roce_set_field(mpt_entry->byte_48_mode_ba, + V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S, + upper_32_bits(mr->pbl_ba >> 3)); + + pages = (u64 *)__get_free_page(GFP_KERNEL); + if (!pages) + return -ENOMEM; + + i = 0; + for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) { + len = sg_dma_len(sg) >> PAGE_SHIFT; + for (j = 0; j < len; ++j) { + page_addr = sg_dma_address(sg) + + (j << mr->umem->page_shift); + pages[i] = page_addr >> 6; + /* Record the first 2 entry directly to MTPT table */ + if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1) + goto found; + i++; + } + } +found: + mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0])); + roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M, + V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0])); + + mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1])); + roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M, + V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1])); + roce_set_field(mpt_entry->byte_64_buf_pa1, + V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M, + V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S, + mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET); + + free_page((unsigned long)pages); + + return 0; +} + +static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, + unsigned long mtpt_idx) +{ + struct hns_roce_v2_mpt_entry *mpt_entry; + int ret; + mpt_entry = mb_buf; memset(mpt_entry, 0, sizeof(*mpt_entry)); @@ -1686,7 +1732,6 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET); roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M, V2_MPT_BYTE_4_PD_S, mr->pd); - mpt_entry->byte_4_pd_hop_st = cpu_to_le32(mpt_entry->byte_4_pd_hop_st); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1); @@ -1700,13 +1745,11 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0)); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0)); - mpt_entry->byte_8_mw_cnt_en = cpu_to_le32(mpt_entry->byte_8_mw_cnt_en); roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, mr->type == MR_TYPE_MR ? 0 : 1); roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S, 1); - mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa); mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size)); mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size)); @@ -1717,53 +1760,9 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, if (mr->type == MR_TYPE_DMA) return 0; - mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size); - - mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3)); - roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M, - V2_MPT_BYTE_48_PBL_BA_H_S, - upper_32_bits(mr->pbl_ba >> 3)); - mpt_entry->byte_48_mode_ba = cpu_to_le32(mpt_entry->byte_48_mode_ba); - - pages = (u64 *)__get_free_page(GFP_KERNEL); - if (!pages) - return -ENOMEM; - - i = 0; - for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) { - len = sg_dma_len(sg) >> PAGE_SHIFT; - for (j = 0; j < len; ++j) { - page_addr = sg_dma_address(sg) + - (j << mr->umem->page_shift); - pages[i] = page_addr >> 6; - - /* Record the first 2 entry directly to MTPT table */ - if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1) - goto found; - i++; - } - } + ret = set_mtpt_pbl(mpt_entry, mr); -found: - mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0])); - roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M, - V2_MPT_BYTE_56_PA0_H_S, - upper_32_bits(pages[0])); - mpt_entry->byte_56_pa0_h = cpu_to_le32(mpt_entry->byte_56_pa0_h); - - mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1])); - roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M, - V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1])); - - free_page((unsigned long)pages); - - roce_set_field(mpt_entry->byte_64_buf_pa1, - V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M, - V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S, - mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET); - mpt_entry->byte_64_buf_pa1 = cpu_to_le32(mpt_entry->byte_64_buf_pa1); - - return 0; + return ret; } static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, @@ -1772,6 +1771,7 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, u64 size, void *mb_buf) { struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf; + int ret = 0; if (flags & IB_MR_REREG_PD) { roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M, @@ -1784,14 +1784,14 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, V2_MPT_BYTE_8_BIND_EN_S, (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0)); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, - V2_MPT_BYTE_8_ATOMIC_EN_S, - (mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0)); + V2_MPT_BYTE_8_ATOMIC_EN_S, + mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S, - (mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0)); + mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S, - (mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0)); + mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, - (mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0)); + mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0); } if (flags & IB_MR_REREG_TRANS) { @@ -1800,21 +1800,13 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, mpt_entry->len_l = cpu_to_le32(lower_32_bits(size)); mpt_entry->len_h = cpu_to_le32(upper_32_bits(size)); - mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size); - mpt_entry->pbl_ba_l = - cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3)); - roce_set_field(mpt_entry->byte_48_mode_ba, - V2_MPT_BYTE_48_PBL_BA_H_M, - V2_MPT_BYTE_48_PBL_BA_H_S, - upper_32_bits(mr->pbl_ba >> 3)); - mpt_entry->byte_48_mode_ba = - cpu_to_le32(mpt_entry->byte_48_mode_ba); - mr->iova = iova; mr->size = size; + + ret = set_mtpt_pbl(mpt_entry, mr); } - return 0; + return ret; } static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n) diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index f2f11e652dcd2..02f36ab72ad42 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -857,7 +857,9 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext, err = uverbs_get_flags32(&access, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS, - IB_ACCESS_SUPPORTED); + IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_WRITE | + IB_ACCESS_REMOTE_READ); if (err) return err; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index c414f3809e5c2..50be240df331b 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1094,31 +1094,26 @@ enum mlx5_ib_width { MLX5_IB_WIDTH_12X = 1 << 4 }; -static int translate_active_width(struct ib_device *ibdev, u8 active_width, +static void translate_active_width(struct ib_device *ibdev, u8 active_width, u8 *ib_width) { struct mlx5_ib_dev *dev = to_mdev(ibdev); - int err = 0; - if (active_width & MLX5_IB_WIDTH_1X) { + if (active_width & MLX5_IB_WIDTH_1X) *ib_width = IB_WIDTH_1X; - } else if (active_width & MLX5_IB_WIDTH_2X) { - mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n", - (int)active_width); - err = -EINVAL; - } else if (active_width & MLX5_IB_WIDTH_4X) { + else if (active_width & MLX5_IB_WIDTH_4X) *ib_width = IB_WIDTH_4X; - } else if (active_width & MLX5_IB_WIDTH_8X) { + else if (active_width & MLX5_IB_WIDTH_8X) *ib_width = IB_WIDTH_8X; - } else if (active_width & MLX5_IB_WIDTH_12X) { + else if (active_width & MLX5_IB_WIDTH_12X) *ib_width = IB_WIDTH_12X; - } else { - mlx5_ib_dbg(dev, "Invalid active_width %d\n", + else { + mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n", (int)active_width); - err = -EINVAL; + *ib_width = IB_WIDTH_4X; } - return err; + return; } static int mlx5_mtu_to_ib_mtu(int mtu) @@ -1225,10 +1220,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port, if (err) goto out; - err = translate_active_width(ibdev, ib_link_width_oper, - &props->active_width); - if (err) - goto out; + translate_active_width(ibdev, ib_link_width_oper, &props->active_width); + err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port); if (err) goto out; diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index e223148376458..7df4a4fe4af47 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -691,7 +691,6 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) init_completion(&ent->compl); INIT_WORK(&ent->work, cache_work_func); INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); - queue_work(cache->wq, &ent->work); if (i > MR_CACHE_LAST_STD_ENTRY) { mlx5_odp_init_mr_cache_entry(ent); @@ -711,6 +710,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) ent->limit = dev->mdev->profile->mr_cache[i].limit; else ent->limit = 0; + queue_work(cache->wq, &ent->work); } err = mlx5_mr_cache_debugfs_init(dev); diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index d216e0d2921da..9e1cac8cb2609 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -724,6 +724,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev, head = frame; bcnt -= frame->bcnt; + offset = 0; } break; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 6cba2a02d11ba..183fe5c8ceb77 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -3243,7 +3243,9 @@ static bool modify_dci_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state new int req = IB_QP_STATE; int opt = 0; - if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { + if (new_state == IB_QPS_RESET) { + return is_valid_mask(attr_mask, req, opt); + } else if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { req |= IB_QP_PKEY_INDEX | IB_QP_PORT; return is_valid_mask(attr_mask, req, opt); } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { @@ -4411,17 +4413,18 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, goto out; } - if (wr->opcode == IB_WR_LOCAL_INV || - wr->opcode == IB_WR_REG_MR) { + if (wr->opcode == IB_WR_REG_MR) { fence = dev->umr_fence; next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; - } else if (wr->send_flags & IB_SEND_FENCE) { - if (qp->next_fence) - fence = MLX5_FENCE_MODE_SMALL_AND_FENCE; - else - fence = MLX5_FENCE_MODE_FENCE; - } else { - fence = qp->next_fence; + } else { + if (wr->send_flags & IB_SEND_FENCE) { + if (qp->next_fence) + fence = MLX5_FENCE_MODE_SMALL_AND_FENCE; + else + fence = MLX5_FENCE_MODE_FENCE; + } else { + fence = qp->next_fence; + } } switch (ibqp->qp_type) { diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index 9973ac893635c..3db232429630e 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -334,13 +334,16 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port, usnic_dbg("\n"); - mutex_lock(&us_ibdev->usdev_lock); if (ib_get_eth_speed(ibdev, port, &props->active_speed, - &props->active_width)) { - mutex_unlock(&us_ibdev->usdev_lock); + &props->active_width)) return -EINVAL; - } + /* + * usdev_lock is acquired after (and not before) ib_get_eth_speed call + * because acquiring rtnl_lock in ib_get_eth_speed, while holding + * usdev_lock could lead to a deadlock. + */ + mutex_lock(&us_ibdev->usdev_lock); /* props being zeroed by the caller, avoid zeroing it here */ props->lid = 0; diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h index 42b8685c997eb..3c633ab580528 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h @@ -427,7 +427,40 @@ static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state) static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op) { - return (enum pvrdma_wr_opcode)op; + switch (op) { + case IB_WR_RDMA_WRITE: + return PVRDMA_WR_RDMA_WRITE; + case IB_WR_RDMA_WRITE_WITH_IMM: + return PVRDMA_WR_RDMA_WRITE_WITH_IMM; + case IB_WR_SEND: + return PVRDMA_WR_SEND; + case IB_WR_SEND_WITH_IMM: + return PVRDMA_WR_SEND_WITH_IMM; + case IB_WR_RDMA_READ: + return PVRDMA_WR_RDMA_READ; + case IB_WR_ATOMIC_CMP_AND_SWP: + return PVRDMA_WR_ATOMIC_CMP_AND_SWP; + case IB_WR_ATOMIC_FETCH_AND_ADD: + return PVRDMA_WR_ATOMIC_FETCH_AND_ADD; + case IB_WR_LSO: + return PVRDMA_WR_LSO; + case IB_WR_SEND_WITH_INV: + return PVRDMA_WR_SEND_WITH_INV; + case IB_WR_RDMA_READ_WITH_INV: + return PVRDMA_WR_RDMA_READ_WITH_INV; + case IB_WR_LOCAL_INV: + return PVRDMA_WR_LOCAL_INV; + case IB_WR_REG_MR: + return PVRDMA_WR_FAST_REG_MR; + case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: + return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP; + case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD: + return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD; + case IB_WR_REG_SIG_MR: + return PVRDMA_WR_REG_SIG_MR; + default: + return PVRDMA_WR_ERROR; + } } static inline enum ib_wc_status pvrdma_wc_status_to_ib( diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c index 60083c0363a57..9aeb330932794 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c @@ -721,6 +721,12 @@ int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) wqe_hdr->ex.imm_data = wr->ex.imm_data; + if (unlikely(wqe_hdr->opcode == PVRDMA_WR_ERROR)) { + *bad_wr = wr; + ret = -EINVAL; + goto out; + } + switch (qp->ibqp.qp_type) { case IB_QPT_GSI: case IB_QPT_UD: diff --git a/drivers/infiniband/sw/rdmavt/ah.c b/drivers/infiniband/sw/rdmavt/ah.c index 89ec0f64abfc3..084bb4baebb50 100644 --- a/drivers/infiniband/sw/rdmavt/ah.c +++ b/drivers/infiniband/sw/rdmavt/ah.c @@ -91,13 +91,15 @@ EXPORT_SYMBOL(rvt_check_ah); * rvt_create_ah - create an address handle * @pd: the protection domain * @ah_attr: the attributes of the AH + * @udata: pointer to user's input output buffer information. * * This may be called from interrupt context. * * Return: newly allocated ah */ struct ib_ah *rvt_create_ah(struct ib_pd *pd, - struct rdma_ah_attr *ah_attr) + struct rdma_ah_attr *ah_attr, + struct ib_udata *udata) { struct rvt_ah *ah; struct rvt_dev_info *dev = ib_to_rvt(pd->device); diff --git a/drivers/infiniband/sw/rdmavt/ah.h b/drivers/infiniband/sw/rdmavt/ah.h index 16105af991890..25271b48a6830 100644 --- a/drivers/infiniband/sw/rdmavt/ah.h +++ b/drivers/infiniband/sw/rdmavt/ah.h @@ -51,7 +51,8 @@ #include struct ib_ah *rvt_create_ah(struct ib_pd *pd, - struct rdma_ah_attr *ah_attr); + struct rdma_ah_attr *ah_attr, + struct ib_udata *udata); int rvt_destroy_ah(struct ib_ah *ibah); int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index 8be27238a86e4..fa98a52796470 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -640,6 +640,7 @@ int rxe_requester(void *arg) rmr->access = wqe->wr.wr.reg.access; rmr->lkey = wqe->wr.wr.reg.key; rmr->rkey = wqe->wr.wr.reg.key; + rmr->iova = wqe->wr.wr.reg.mr->iova; wqe->state = wqe_state_done; wqe->status = IB_WC_SUCCESS; } else { diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index aa5833318372b..4111b798fd3c9 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -682,6 +682,7 @@ static enum resp_states read_reply(struct rxe_qp *qp, rxe_advance_resp_resource(qp); res->type = RXE_READ_MASK; + res->replay = 0; res->read.va = qp->resp.va; res->read.va_org = qp->resp.va; @@ -752,7 +753,8 @@ static enum resp_states read_reply(struct rxe_qp *qp, state = RESPST_DONE; } else { qp->resp.res = NULL; - qp->resp.opcode = -1; + if (!res->replay) + qp->resp.opcode = -1; if (psn_compare(res->cur_psn, qp->resp.psn) >= 0) qp->resp.psn = res->cur_psn; state = RESPST_CLEANUP; @@ -814,6 +816,7 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt) /* next expected psn, read handles this separately */ qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; + qp->resp.ack_psn = qp->resp.psn; qp->resp.opcode = pkt->opcode; qp->resp.status = IB_WC_SUCCESS; @@ -841,11 +844,16 @@ static enum resp_states do_complete(struct rxe_qp *qp, memset(&cqe, 0, sizeof(cqe)); - wc->wr_id = wqe->wr_id; - wc->status = qp->resp.status; - wc->qp = &qp->ibqp; + if (qp->rcq->is_user) { + uwc->status = qp->resp.status; + uwc->qp_num = qp->ibqp.qp_num; + uwc->wr_id = wqe->wr_id; + } else { + wc->status = qp->resp.status; + wc->qp = &qp->ibqp; + wc->wr_id = wqe->wr_id; + } - /* fields after status are not required for errors */ if (wc->status == IB_WC_SUCCESS) { wc->opcode = (pkt->mask & RXE_IMMDT_MASK && pkt->mask & RXE_WRITE_MASK) ? @@ -1065,7 +1073,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp, struct rxe_pkt_info *pkt) { enum resp_states rc; - u32 prev_psn = (qp->resp.psn - 1) & BTH_PSN_MASK; + u32 prev_psn = (qp->resp.ack_psn - 1) & BTH_PSN_MASK; if (pkt->mask & RXE_SEND_MASK || pkt->mask & RXE_WRITE_MASK) { @@ -1108,6 +1116,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp, res->state = (pkt->psn == res->first_psn) ? rdatm_res_state_new : rdatm_res_state_replay; + res->replay = 1; /* Reset the resource, except length. */ res->read.va_org = iova; diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h index af1470d293912..332a16dad2a7e 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.h +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h @@ -171,6 +171,7 @@ enum rdatm_res_state { struct resp_res { int type; + int replay; u32 first_psn; u32 last_psn; u32 cur_psn; @@ -195,6 +196,7 @@ struct rxe_resp_info { enum rxe_qp_state state; u32 msn; u32 psn; + u32 ack_psn; int opcode; int drop_msg; int goto_error; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 3d5424f335cb0..0428e01e8f691 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -1438,11 +1438,15 @@ static void ipoib_cm_skb_reap(struct work_struct *work) spin_unlock_irqrestore(&priv->lock, flags); netif_tx_unlock_bh(dev); - if (skb->protocol == htons(ETH_P_IP)) + if (skb->protocol == htons(ETH_P_IP)) { + memset(IPCB(skb), 0, sizeof(*IPCB(skb))); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); + } #if IS_ENABLED(CONFIG_IPV6) - else if (skb->protocol == htons(ETH_P_IPV6)) + else if (skb->protocol == htons(ETH_P_IPV6)) { + memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); + } #endif dev_kfree_skb_any(skb); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index e3d28f9ad9c0b..30f840f874b3c 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1880,6 +1880,8 @@ static int ipoib_parent_init(struct net_device *ndev) sizeof(union ib_gid)); SET_NETDEV_DEV(priv->dev, priv->ca->dev.parent); + priv->dev->dev_port = priv->port - 1; + /* Let's set this one too for backwards compatibility. */ priv->dev->dev_id = priv->port - 1; return 0; diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index b686a4aaffe86..bee8c0b1d6a51 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -1123,7 +1123,9 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, IB_MR_CHECK_SIG_STATUS, &mr_status); if (ret) { pr_err("ib_check_mr_status failed, ret %d\n", ret); - goto err; + /* Not a lot we can do, return ambiguous guard error */ + *sector = 0; + return 0x1; } if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { @@ -1151,9 +1153,6 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, } return 0; -err: - /* Not alot we can do here, return ambiguous guard error */ - return 0x1; } void iser_err_comp(struct ib_wc *wc, const char *type) diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index f37cbad022a24..f4bce5aa0ff84 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -2009,6 +2009,14 @@ static void srpt_free_ch(struct kref *kref) kfree_rcu(ch, rcu); } +/* + * Shut down the SCSI target session, tell the connection manager to + * disconnect the associated RDMA channel, transition the QP to the error + * state and remove the channel from the channel list. This function is + * typically called from inside srpt_zerolength_write_done(). Concurrent + * srpt_zerolength_write() calls from inside srpt_close_ch() are possible + * as long as the channel is on sport->nexus_list. + */ static void srpt_release_channel_work(struct work_struct *w) { struct srpt_rdma_ch *ch; @@ -2036,6 +2044,11 @@ static void srpt_release_channel_work(struct work_struct *w) else ib_destroy_cm_id(ch->ib_cm.cm_id); + sport = ch->sport; + mutex_lock(&sport->mutex); + list_del_rcu(&ch->list); + mutex_unlock(&sport->mutex); + srpt_destroy_ch_ib(ch); srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, @@ -2046,11 +2059,6 @@ static void srpt_release_channel_work(struct work_struct *w) sdev, ch->rq_size, srp_max_req_size, DMA_FROM_DEVICE); - sport = ch->sport; - mutex_lock(&sport->mutex); - list_del_rcu(&ch->list); - mutex_unlock(&sport->mutex); - wake_up(&sport->ch_releaseQ); kref_put(&ch->kref, srpt_free_ch); diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index d4b9db487b16f..aa4e431cbcd35 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -252,6 +252,8 @@ static const struct xpad_device { { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX }, { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX }, { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX }, + { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, + { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 }, { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 }, @@ -428,6 +430,7 @@ static const struct usb_device_id xpad_table[] = { XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */ XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */ XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */ + XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */ XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */ XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ @@ -480,18 +483,18 @@ static const u8 xboxone_hori_init[] = { }; /* - * This packet is required for some of the PDP pads to start + * This packet is required for most (all?) of the PDP pads to start * sending input reports. These pads include: (0x0e6f:0x02ab), - * (0x0e6f:0x02a4). + * (0x0e6f:0x02a4), (0x0e6f:0x02a6). */ static const u8 xboxone_pdp_init1[] = { 0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14 }; /* - * This packet is required for some of the PDP pads to start + * This packet is required for most (all?) of the PDP pads to start * sending input reports. These pads include: (0x0e6f:0x02ab), - * (0x0e6f:0x02a4). + * (0x0e6f:0x02a4), (0x0e6f:0x02a6). */ static const u8 xboxone_pdp_init2[] = { 0x06, 0x20, 0x00, 0x02, 0x01, 0x00 @@ -527,12 +530,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = { XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init), XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init), XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init), - XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init1), - XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2), - XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init1), - XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init2), - XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init1), - XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init2), + XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init1), + XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init2), XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init), XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init), diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c index 81be6f781f0b6..d560011815983 100644 --- a/drivers/input/keyboard/cros_ec_keyb.c +++ b/drivers/input/keyboard/cros_ec_keyb.c @@ -493,7 +493,8 @@ static int cros_ec_keyb_register_bs(struct cros_ec_keyb *ckdev) for (i = 0; i < ARRAY_SIZE(cros_ec_keyb_bs); i++) { const struct cros_ec_bs_map *map = &cros_ec_keyb_bs[i]; - if (buttons & BIT(map->bit)) + if ((map->ev_type == EV_KEY && (buttons & BIT(map->bit))) || + (map->ev_type == EV_SW && (switches & BIT(map->bit)))) input_set_capability(idev, map->ev_type, map->code); } diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c index f51ae09596ef2..403452ef00e6f 100644 --- a/drivers/input/keyboard/matrix_keypad.c +++ b/drivers/input/keyboard/matrix_keypad.c @@ -407,7 +407,7 @@ matrix_keypad_parse_dt(struct device *dev) struct matrix_keypad_platform_data *pdata; struct device_node *np = dev->of_node; unsigned int *gpios; - int i, nrow, ncol; + int ret, i, nrow, ncol; if (!np) { dev_err(dev, "device lacks DT data\n"); @@ -452,12 +452,19 @@ matrix_keypad_parse_dt(struct device *dev) return ERR_PTR(-ENOMEM); } - for (i = 0; i < pdata->num_row_gpios; i++) - gpios[i] = of_get_named_gpio(np, "row-gpios", i); + for (i = 0; i < nrow; i++) { + ret = of_get_named_gpio(np, "row-gpios", i); + if (ret < 0) + return ERR_PTR(ret); + gpios[i] = ret; + } - for (i = 0; i < pdata->num_col_gpios; i++) - gpios[pdata->num_row_gpios + i] = - of_get_named_gpio(np, "col-gpios", i); + for (i = 0; i < ncol; i++) { + ret = of_get_named_gpio(np, "col-gpios", i); + if (ret < 0) + return ERR_PTR(ret); + gpios[nrow + i] = ret; + } pdata->row_gpios = gpios; pdata->col_gpios = &gpios[pdata->num_row_gpios]; @@ -484,10 +491,8 @@ static int matrix_keypad_probe(struct platform_device *pdev) pdata = dev_get_platdata(&pdev->dev); if (!pdata) { pdata = matrix_keypad_parse_dt(&pdev->dev); - if (IS_ERR(pdata)) { - dev_err(&pdev->dev, "no platform data defined\n"); + if (IS_ERR(pdata)) return PTR_ERR(pdata); - } } else if (!pdata->keymap_data) { dev_err(&pdev->dev, "no keymap data defined\n"); return -EINVAL; diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c index 46406345742b9..840e53732753f 100644 --- a/drivers/input/keyboard/omap4-keypad.c +++ b/drivers/input/keyboard/omap4-keypad.c @@ -60,8 +60,18 @@ /* OMAP4 values */ #define OMAP4_VAL_IRQDISABLE 0x0 -#define OMAP4_VAL_DEBOUNCINGTIME 0x7 -#define OMAP4_VAL_PVT 0x7 + +/* + * Errata i689: If a key is released for a time shorter than debounce time, + * the keyboard will idle and never detect the key release. The workaround + * is to use at least a 12ms debounce time. See omap5432 TRM chapter + * "26.4.6.2 Keyboard Controller Timer" for more information. + */ +#define OMAP4_KEYPAD_PTV_DIV_128 0x6 +#define OMAP4_KEYPAD_DEBOUNCINGTIME_MS(dbms, ptv) \ + ((((dbms) * 1000) / ((1 << ((ptv) + 1)) * (1000000 / 32768))) - 1) +#define OMAP4_VAL_DEBOUNCINGTIME_16MS \ + OMAP4_KEYPAD_DEBOUNCINGTIME_MS(16, OMAP4_KEYPAD_PTV_DIV_128) enum { KBD_REVISION_OMAP4 = 0, @@ -116,12 +126,8 @@ static irqreturn_t omap4_keypad_irq_handler(int irq, void *dev_id) { struct omap4_keypad *keypad_data = dev_id; - if (kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS)) { - /* Disable interrupts */ - kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE, - OMAP4_VAL_IRQDISABLE); + if (kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS)) return IRQ_WAKE_THREAD; - } return IRQ_NONE; } @@ -163,11 +169,6 @@ static irqreturn_t omap4_keypad_irq_thread_fn(int irq, void *dev_id) kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS, kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS)); - /* enable interrupts */ - kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE, - OMAP4_DEF_IRQENABLE_EVENTEN | - OMAP4_DEF_IRQENABLE_LONGKEY); - return IRQ_HANDLED; } @@ -181,9 +182,9 @@ static int omap4_keypad_open(struct input_dev *input) kbd_writel(keypad_data, OMAP4_KBD_CTRL, OMAP4_DEF_CTRL_NOSOFTMODE | - (OMAP4_VAL_PVT << OMAP4_DEF_CTRL_PTV_SHIFT)); + (OMAP4_KEYPAD_PTV_DIV_128 << OMAP4_DEF_CTRL_PTV_SHIFT)); kbd_writel(keypad_data, OMAP4_KBD_DEBOUNCINGTIME, - OMAP4_VAL_DEBOUNCINGTIME); + OMAP4_VAL_DEBOUNCINGTIME_16MS); /* clear pending interrupts */ kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS, kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS)); @@ -204,9 +205,10 @@ static void omap4_keypad_close(struct input_dev *input) disable_irq(keypad_data->irq); - /* Disable interrupts */ + /* Disable interrupts and wake-up events */ kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE, OMAP4_VAL_IRQDISABLE); + kbd_writel(keypad_data, OMAP4_KBD_WAKEUPENABLE, 0); /* clear pending interrupts */ kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS, @@ -355,7 +357,7 @@ static int omap4_keypad_probe(struct platform_device *pdev) } error = request_threaded_irq(keypad_data->irq, omap4_keypad_irq_handler, - omap4_keypad_irq_thread_fn, 0, + omap4_keypad_irq_thread_fn, IRQF_ONESHOT, "omap4-keypad", keypad_data); if (error) { dev_err(&pdev->dev, "failed to register interrupt\n"); diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 8ec483e8688be..26ec603fe2208 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include "../input-compat.h" @@ -405,7 +406,7 @@ static int uinput_open(struct inode *inode, struct file *file) static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code, const struct input_absinfo *abs) { - int min, max; + int min, max, range; min = abs->minimum; max = abs->maximum; @@ -417,7 +418,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code, return -EINVAL; } - if (abs->flat > max - min) { + if (!check_sub_overflow(max, min, &range) && abs->flat > range) { printk(KERN_DEBUG "%s: abs_flat #%02x out of range: %d (min:%d/max:%d)\n", UINPUT_NAME, code, abs->flat, min, max); diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index b0f9d19b3410a..f322a1768fbb5 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -1336,6 +1336,7 @@ MODULE_DEVICE_TABLE(i2c, elan_id); static const struct acpi_device_id elan_acpi_id[] = { { "ELAN0000", 0 }, { "ELAN0100", 0 }, + { "ELAN0501", 0 }, { "ELAN0600", 0 }, { "ELAN0602", 0 }, { "ELAN0605", 0 }, @@ -1348,6 +1349,9 @@ static const struct acpi_device_id elan_acpi_id[] = { { "ELAN0618", 0 }, { "ELAN061C", 0 }, { "ELAN061D", 0 }, + { "ELAN061E", 0 }, + { "ELAN0620", 0 }, + { "ELAN0621", 0 }, { "ELAN0622", 0 }, { "ELAN1000", 0 }, { } diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 2d95e8d93cc76..9fe075c137dc4 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1767,6 +1767,18 @@ static int elantech_smbus = IS_ENABLED(CONFIG_MOUSE_ELAN_I2C_SMBUS) ? module_param_named(elantech_smbus, elantech_smbus, int, 0644); MODULE_PARM_DESC(elantech_smbus, "Use a secondary bus for the Elantech device."); +static const char * const i2c_blacklist_pnp_ids[] = { + /* + * These are known to not be working properly as bits are missing + * in elan_i2c. + */ + "LEN2131", /* ThinkPad P52 w/ NFC */ + "LEN2132", /* ThinkPad P52 */ + "LEN2133", /* ThinkPad P72 w/ NFC */ + "LEN2134", /* ThinkPad P72 */ + NULL +}; + static int elantech_create_smbus(struct psmouse *psmouse, struct elantech_device_info *info, bool leave_breadcrumbs) @@ -1802,10 +1814,12 @@ static int elantech_setup_smbus(struct psmouse *psmouse, if (elantech_smbus == ELANTECH_SMBUS_NOT_SET) { /* - * New ICs are enabled by default. + * New ICs are enabled by default, unless mentioned in + * i2c_blacklist_pnp_ids. * Old ICs are up to the user to decide. */ - if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version)) + if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) || + psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids)) return -ENXIO; } diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 55d33500d55ec..b6da0c1267e36 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -99,9 +99,7 @@ static int synaptics_mode_cmd(struct psmouse *psmouse, u8 mode) int synaptics_detect(struct psmouse *psmouse, bool set_properties) { struct ps2dev *ps2dev = &psmouse->ps2dev; - u8 param[4]; - - param[0] = 0; + u8 param[4] = { 0 }; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES); ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES); @@ -172,6 +170,8 @@ static const char * const smbus_pnp_ids[] = { "LEN0048", /* X1 Carbon 3 */ "LEN0046", /* X250 */ "LEN004a", /* W541 */ + "LEN005b", /* P50 */ + "LEN005e", /* T560 */ "LEN0071", /* T480 */ "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */ "LEN0073", /* X1 Carbon G5 (Elantech) */ @@ -179,6 +179,8 @@ static const char * const smbus_pnp_ids[] = { "LEN0096", /* X280 */ "LEN0097", /* X280 -> ALPS trackpoint */ "LEN200f", /* T450s */ + "SYN3052", /* HP EliteBook 840 G4 */ + "SYN3221", /* HP 15-ay000 */ NULL }; diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c index 47a0e81a2989c..a8b9be3e28db7 100644 --- a/drivers/input/serio/hyperv-keyboard.c +++ b/drivers/input/serio/hyperv-keyboard.c @@ -177,7 +177,7 @@ static void hv_kbd_on_receive(struct hv_device *hv_dev, * state because the Enter-UP can trigger a wakeup at once. */ if (!(info & IS_BREAK)) - pm_wakeup_event(&hv_dev->device, 0); + pm_wakeup_hard_event(&hv_dev->device); break; diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index 3232af5dcf894..a7ace07e179e2 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c @@ -1586,10 +1586,10 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *fw) /* T7 config may have changed */ mxt_init_t7_power_cfg(data); -release_raw: - kfree(cfg.raw); release_mem: kfree(cfg.mem); +release_raw: + kfree(cfg.raw); return ret; } diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c index 2566b4d8b3428..73856c2a8ac0f 100644 --- a/drivers/input/touchscreen/wm97xx-core.c +++ b/drivers/input/touchscreen/wm97xx-core.c @@ -929,7 +929,8 @@ static int __init wm97xx_init(void) static void __exit wm97xx_exit(void) { - driver_unregister(&wm97xx_driver); + if (IS_BUILTIN(CONFIG_AC97_BUS)) + driver_unregister(&wm97xx_driver); platform_driver_unregister(&wm97xx_mfd_driver); } diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 84b3e4445d46d..e062ab9687c75 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -797,7 +797,8 @@ static int iommu_init_ga_log(struct amd_iommu *iommu) entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, &entry, sizeof(entry)); - entry = (iommu_virt_to_phys(iommu->ga_log) & 0xFFFFFFFFFFFFFULL) & ~7ULL; + entry = (iommu_virt_to_phys(iommu->ga_log_tail) & + (BIT_ULL(52)-1)) & ~7ULL; memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, &entry, sizeof(entry)); writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 5059d09f32020..3e02aace38b10 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -837,7 +837,13 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_SEV); cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSH, ARM_SMMU_SH_ISH); cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIATTR, ARM_SMMU_MEMATTR_OIWB); - cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA, ent->sync.msidata); + /* + * Commands are written little-endian, but we want the SMMU to + * receive MSIData, and thus write it back to memory, in CPU + * byte order, so big-endian needs an extra byteswap here. + */ + cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA, + cpu_to_le32(ent->sync.msidata)); cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK; break; default: diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index fd1b80ef9490d..e7cbf4fcf61d4 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -469,6 +469,9 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx); + if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) + wmb(); + if (stage1) { reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; @@ -510,6 +513,9 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size, struct arm_smmu_domain *smmu_domain = cookie; void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu); + if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) + wmb(); + writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID); } diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index bedc801b06a0b..4339177629e36 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -2069,7 +2069,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, * than default. Unnecessary for PT mode. */ if (translation != CONTEXT_TT_PASS_THROUGH) { - for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) { + for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { ret = -ENOMEM; pgd = phys_to_virt(dma_pte_addr(pgd)); if (!dma_pte_present(pgd)) @@ -2083,7 +2083,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, translation = CONTEXT_TT_MULTI_LEVEL; context_set_address_root(context, virt_to_phys(pgd)); - context_set_address_width(context, iommu->agaw); + context_set_address_width(context, agaw); } else { /* * In pass through mode, AW must be programmed to @@ -3100,7 +3100,7 @@ static int copy_context_table(struct intel_iommu *iommu, } if (old_ce) - iounmap(old_ce); + memunmap(old_ce); ret = 0; if (devfn < 0x80) diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index 4a03e50909520..188f4eaed6e59 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -596,7 +596,7 @@ static irqreturn_t prq_event_thread(int irq, void *d) pr_err("%s: Page request without PASID: %08llx %08llx\n", iommu->name, ((unsigned long long *)req)[0], ((unsigned long long *)req)[1]); - goto bad_req; + goto no_pasid; } if (!svm || svm->pasid != req->pasid) { diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 22b94f8a9a04f..d8598e44e3816 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -501,6 +501,9 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) { + if (!domain->mmu) + return; + /* * Disable the context. Flush the TLB as required when modifying the * context registers. diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index c2df341ff6faf..cf3abb8d284fa 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -2267,13 +2267,14 @@ static void its_free_device(struct its_device *its_dev) kfree(its_dev); } -static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) +static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq) { int idx; - idx = find_first_zero_bit(dev->event_map.lpi_map, - dev->event_map.nr_lpis); - if (idx == dev->event_map.nr_lpis) + idx = bitmap_find_free_region(dev->event_map.lpi_map, + dev->event_map.nr_lpis, + get_count_order(nvecs)); + if (idx < 0) return -ENOSPC; *hwirq = dev->event_map.lpi_base + idx; @@ -2369,21 +2370,21 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, int err; int i; - for (i = 0; i < nr_irqs; i++) { - err = its_alloc_device_irq(its_dev, &hwirq); - if (err) - return err; + err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq); + if (err) + return err; - err = its_irq_gic_domain_alloc(domain, virq + i, hwirq); + for (i = 0; i < nr_irqs; i++) { + err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i); if (err) return err; irq_domain_set_hwirq_and_chip(domain, virq + i, - hwirq, &its_irq_chip, its_dev); + hwirq + i, &its_irq_chip, its_dev); irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i))); pr_debug("ID:%d pID:%d vID:%d\n", - (int)(hwirq - its_dev->event_map.lpi_base), - (int) hwirq, virq + i); + (int)(hwirq + i - its_dev->event_map.lpi_base), + (int)(hwirq + i), virq + i); } return 0; diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c index b1b47a40a2786..faa7d61b9d6c4 100644 --- a/drivers/irqchip/qcom-pdc.c +++ b/drivers/irqchip/qcom-pdc.c @@ -124,6 +124,7 @@ static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type) break; case IRQ_TYPE_EDGE_BOTH: pdc_type = PDC_EDGE_DUAL; + type = IRQ_TYPE_EDGE_RISING; break; case IRQ_TYPE_LEVEL_HIGH: pdc_type = PDC_LEVEL_HIGH; diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c index 0ff517d3c98f9..a4ceb61c5b603 100644 --- a/drivers/isdn/capi/kcapi.c +++ b/drivers/isdn/capi/kcapi.c @@ -852,7 +852,7 @@ u16 capi20_get_manufacturer(u32 contr, u8 *buf) u16 ret; if (contr == 0) { - strlcpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN); + strncpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN); return CAPI_NOERROR; } @@ -860,7 +860,7 @@ u16 capi20_get_manufacturer(u32 contr, u8 *buf) ctr = get_capi_ctr_by_nr(contr); if (ctr && ctr->state == CAPI_CTR_RUNNING) { - strlcpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN); + strncpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN); ret = CAPI_NOERROR; } else ret = CAPI_REGNOTINSTALLED; diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c index df80c89ebe7fa..5d3faae51d59e 100644 --- a/drivers/leds/leds-pwm.c +++ b/drivers/leds/leds-pwm.c @@ -100,8 +100,9 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv, led_data->pwm = devm_pwm_get(dev, led->name); if (IS_ERR(led_data->pwm)) { ret = PTR_ERR(led_data->pwm); - dev_err(dev, "unable to request PWM for %s: %d\n", - led->name, ret); + if (ret != -EPROBE_DEFER) + dev_err(dev, "unable to request PWM for %s: %d\n", + led->name, ret); return ret; } diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c index 00984b486fea7..2940cdc87af17 100644 --- a/drivers/lightnvm/pblk-core.c +++ b/drivers/lightnvm/pblk-core.c @@ -1539,13 +1539,14 @@ struct pblk_line *pblk_line_replace_data(struct pblk *pblk) struct pblk_line *cur, *new = NULL; unsigned int left_seblks; - cur = l_mg->data_line; new = l_mg->data_next; if (!new) goto out; - l_mg->data_line = new; spin_lock(&l_mg->free_lock); + cur = l_mg->data_line; + l_mg->data_line = new; + pblk_line_setup_metadata(new, l_mg, &pblk->lm); spin_unlock(&l_mg->free_lock); diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c index e232e47e13532..df75d9caec45e 100644 --- a/drivers/lightnvm/pblk-recovery.c +++ b/drivers/lightnvm/pblk-recovery.c @@ -956,12 +956,14 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk) } } - spin_lock(&l_mg->free_lock); if (!open_lines) { + spin_lock(&l_mg->free_lock); WARN_ON_ONCE(!test_and_clear_bit(meta_line, &l_mg->meta_bitmap)); + spin_unlock(&l_mg->free_lock); pblk_line_replace_data(pblk); } else { + spin_lock(&l_mg->free_lock); /* Allocate next line for preparation */ l_mg->data_next = pblk_line_get(pblk); if (l_mg->data_next) { @@ -969,8 +971,8 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk) l_mg->data_next->type = PBLK_LINETYPE_DATA; is_next = 1; } + spin_unlock(&l_mg->free_lock); } - spin_unlock(&l_mg->free_lock); if (is_next) pblk_line_erase(pblk, l_mg->data_next); diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c index 9fc3dfa168b4b..8d2ed510c04b3 100644 --- a/drivers/lightnvm/pblk-sysfs.c +++ b/drivers/lightnvm/pblk-sysfs.c @@ -262,8 +262,14 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page) sec_in_line = l_mg->data_line->sec_in_line; meta_weight = bitmap_weight(&l_mg->meta_bitmap, PBLK_DATA_LINES); - map_weight = bitmap_weight(l_mg->data_line->map_bitmap, + + spin_lock(&l_mg->data_line->lock); + if (l_mg->data_line->map_bitmap) + map_weight = bitmap_weight(l_mg->data_line->map_bitmap, lm->sec_per_line); + else + map_weight = 0; + spin_unlock(&l_mg->data_line->lock); } spin_unlock(&l_mg->free_lock); diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c index ee774a86cf1e6..879227d584e7d 100644 --- a/drivers/lightnvm/pblk-write.c +++ b/drivers/lightnvm/pblk-write.c @@ -417,12 +417,11 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line) rqd->ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id); } + spin_lock(&l_mg->close_lock); emeta->mem += rq_len; - if (emeta->mem >= lm->emeta_len[0]) { - spin_lock(&l_mg->close_lock); + if (emeta->mem >= lm->emeta_len[0]) list_del(&meta_line->list); - spin_unlock(&l_mg->close_lock); - } + spin_unlock(&l_mg->close_lock); pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas); @@ -491,14 +490,15 @@ static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line; spin_lock(&l_mg->close_lock); -retry: if (list_empty(&l_mg->emeta_list)) { spin_unlock(&l_mg->close_lock); return NULL; } meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list); - if (meta_line->emeta->mem >= lm->emeta_len[0]) - goto retry; + if (meta_line->emeta->mem >= lm->emeta_len[0]) { + spin_unlock(&l_mg->close_lock); + return NULL; + } spin_unlock(&l_mg->close_lock); if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd)) diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c index 311e91b1a14f3..256f18b67e8a6 100644 --- a/drivers/mailbox/pcc.c +++ b/drivers/mailbox/pcc.c @@ -461,8 +461,11 @@ static int __init acpi_pcc_probe(void) count = acpi_table_parse_entries_array(ACPI_SIG_PCCT, sizeof(struct acpi_table_pcct), proc, ACPI_PCCT_TYPE_RESERVED, MAX_PCC_SUBSPACES); - if (count == 0 || count > MAX_PCC_SUBSPACES) { - pr_warn("Invalid PCCT: %d PCC subspaces\n", count); + if (count <= 0 || count > MAX_PCC_SUBSPACES) { + if (count < 0) + pr_warn("Error parsing PCC subspaces from PCCT\n"); + else + pr_warn("Invalid PCCT: %d PCC subspaces\n", count); return -EINVAL; } diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index e7d4817681f22..3f4211b5cd334 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -2434,7 +2434,7 @@ static int refill_keybuf_fn(struct btree_op *op, struct btree *b, struct keybuf *buf = refill->buf; int ret = MAP_CONTINUE; - if (bkey_cmp(k, refill->end) >= 0) { + if (bkey_cmp(k, refill->end) > 0) { ret = MAP_DONE; goto out; } diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 51be355a3309f..22944aa7d8e5f 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -850,7 +850,7 @@ static void cached_dev_read_done_bh(struct closure *cl) bch_mark_cache_accounting(s->iop.c, s->d, !s->cache_missed, s->iop.bypass); - trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass); + trace_bcache_read(s->orig_bio, !s->cache_missed, s->iop.bypass); if (s->iop.status) continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq); @@ -1218,6 +1218,9 @@ static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode, { struct cached_dev *dc = container_of(d, struct cached_dev, disk); + if (dc->io_disable) + return -EIO; + return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg); } diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 30ba9aeb5ee83..03bb5cee2b835 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -643,10 +643,6 @@ static int ioctl_dev(struct block_device *b, fmode_t mode, unsigned int cmd, unsigned long arg) { struct bcache_device *d = b->bd_disk->private_data; - struct cached_dev *dc = container_of(d, struct cached_dev, disk); - - if (dc->io_disable) - return -EIO; return d->ioctl(d, mode, cmd, arg); } @@ -1152,11 +1148,12 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, } if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { - bch_sectors_dirty_init(&dc->disk); atomic_set(&dc->has_dirty, 1); bch_writeback_queue(dc); } + bch_sectors_dirty_init(&dc->disk); + bch_cached_dev_run(dc); bcache_device_link(&dc->disk, c, "bdev"); atomic_inc(&c->attached_dev_nr); diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 150cf4f4cf749..26f035a0c5b9f 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -285,6 +285,7 @@ STORE(__cached_dev) 1, WRITEBACK_RATE_UPDATE_SECS_MAX); d_strtoul(writeback_rate_i_term_inverse); d_strtoul_nonzero(writeback_rate_p_term_inverse); + d_strtoul_nonzero(writeback_rate_minimum); sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX); @@ -412,6 +413,7 @@ static struct attribute *bch_cached_dev_files[] = { &sysfs_writeback_rate_update_seconds, &sysfs_writeback_rate_i_term_inverse, &sysfs_writeback_rate_p_term_inverse, + &sysfs_writeback_rate_minimum, &sysfs_writeback_rate_debug, &sysfs_errors, &sysfs_io_error_limit, diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 5936de71883fb..6fc93834da446 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -930,6 +930,10 @@ static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd, bool dirty_flag; *result = true; + if (from_cblock(cmd->cache_blocks) == 0) + /* Nothing to do */ + return 0; + r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root, from_cblock(cmd->cache_blocks), &cmd->dirty_cursor); if (r) { diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 0481223b1deb8..5921ecc670c18 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -49,7 +49,7 @@ struct convert_context { struct bio *bio_out; struct bvec_iter iter_in; struct bvec_iter iter_out; - sector_t cc_sector; + u64 cc_sector; atomic_t cc_pending; union { struct skcipher_request *req; @@ -81,7 +81,7 @@ struct dm_crypt_request { struct convert_context *ctx; struct scatterlist sg_in[4]; struct scatterlist sg_out[4]; - sector_t iv_sector; + u64 iv_sector; }; struct crypt_config; @@ -160,7 +160,7 @@ struct crypt_config { struct iv_lmk_private lmk; struct iv_tcw_private tcw; } iv_gen_private; - sector_t iv_offset; + u64 iv_offset; unsigned int iv_size; unsigned short int sector_size; unsigned char sector_shift; @@ -2405,9 +2405,21 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key * capi:cipher_api_spec-iv:ivopts */ tmp = &cipher_in[strlen("capi:")]; - cipher_api = strsep(&tmp, "-"); - *ivmode = strsep(&tmp, ":"); - *ivopts = tmp; + + /* Separate IV options if present, it can contain another '-' in hash name */ + *ivopts = strrchr(tmp, ':'); + if (*ivopts) { + **ivopts = '\0'; + (*ivopts)++; + } + /* Parse IV mode */ + *ivmode = strrchr(tmp, '-'); + if (*ivmode) { + **ivmode = '\0'; + (*ivmode)++; + } + /* The rest is crypto API spec */ + cipher_api = tmp; if (*ivmode && !strcmp(*ivmode, "lmk")) cc->tfms_count = 64; @@ -2477,11 +2489,8 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key goto bad_mem; chainmode = strsep(&tmp, "-"); - *ivopts = strsep(&tmp, "-"); - *ivmode = strsep(&*ivopts, ":"); - - if (tmp) - DMWARN("Ignoring unexpected additional cipher options"); + *ivmode = strsep(&tmp, ":"); + *ivopts = tmp; /* * For compatibility with the original dm-crypt mapping format, if @@ -2780,7 +2789,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) } ret = -EINVAL; - if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) { + if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) { ti->error = "Invalid device sector"; goto bad; } diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index 2fb7bb4304ad7..fddffe251bf6b 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -141,7 +141,7 @@ static int delay_class_ctr(struct dm_target *ti, struct delay_class *c, char **a unsigned long long tmpll; char dummy; - if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1) { + if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) { ti->error = "Invalid device sector"; return -EINVAL; } diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 32aabe27b37ce..b86d2439ffc76 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -213,7 +213,7 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv) devname = dm_shift_arg(&as); r = -EINVAL; - if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1) { + if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) { ti->error = "Invalid device sector"; goto bad; } diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index b810ea77e6b16..f666778ad2372 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1720,8 +1720,7 @@ static void free_params(struct dm_ioctl *param, size_t param_size, int param_fla } static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kernel, - int ioctl_flags, - struct dm_ioctl **param, int *param_flags) + int ioctl_flags, struct dm_ioctl **param, int *param_flags) { struct dm_ioctl *dmi; int secure_data; @@ -1762,18 +1761,13 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern *param_flags |= DM_PARAMS_MALLOC; - if (copy_from_user(dmi, user, param_kernel->data_size)) - goto bad; + /* Copy from param_kernel (which was already copied from user) */ + memcpy(dmi, param_kernel, minimum_data_size); -data_copied: - /* - * Abort if something changed the ioctl data while it was being copied. - */ - if (dmi->data_size != param_kernel->data_size) { - DMERR("rejecting ioctl: data size modified while processing parameters"); + if (copy_from_user(&dmi->data, (char __user *)user + minimum_data_size, + param_kernel->data_size - minimum_data_size)) goto bad; - } - +data_copied: /* Wipe the user buffer so we do not return it to userspace */ if (secure_data && clear_user(user, param_kernel->data_size)) goto bad; diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 2fc4213e02b5f..671c24332802e 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -56,15 +56,17 @@ struct dm_kcopyd_client { atomic_t nr_jobs; /* - * We maintain three lists of jobs: + * We maintain four lists of jobs: * * i) jobs waiting for pages * ii) jobs that have pages, and are waiting for the io to be issued. - * iii) jobs that have completed. + * iii) jobs that don't need to do any IO and just run a callback + * iv) jobs that have completed. * - * All three of these are protected by job_lock. + * All four of these are protected by job_lock. */ spinlock_t job_lock; + struct list_head callback_jobs; struct list_head complete_jobs; struct list_head io_jobs; struct list_head pages_jobs; @@ -625,6 +627,7 @@ static void do_work(struct work_struct *work) struct dm_kcopyd_client *kc = container_of(work, struct dm_kcopyd_client, kcopyd_work); struct blk_plug plug; + unsigned long flags; /* * The order that these are called is *very* important. @@ -633,6 +636,10 @@ static void do_work(struct work_struct *work) * list. io jobs call wake when they complete and it all * starts again. */ + spin_lock_irqsave(&kc->job_lock, flags); + list_splice_tail_init(&kc->callback_jobs, &kc->complete_jobs); + spin_unlock_irqrestore(&kc->job_lock, flags); + blk_start_plug(&plug); process_jobs(&kc->complete_jobs, kc, run_complete_job); process_jobs(&kc->pages_jobs, kc, run_pages_job); @@ -650,7 +657,7 @@ static void dispatch_job(struct kcopyd_job *job) struct dm_kcopyd_client *kc = job->kc; atomic_inc(&kc->nr_jobs); if (unlikely(!job->source.count)) - push(&kc->complete_jobs, job); + push(&kc->callback_jobs, job); else if (job->pages == &zero_page_list) push(&kc->io_jobs, job); else @@ -858,7 +865,7 @@ void dm_kcopyd_do_callback(void *j, int read_err, unsigned long write_err) job->read_err = read_err; job->write_err = write_err; - push(&kc->complete_jobs, job); + push(&kc->callback_jobs, job); wake(kc); } EXPORT_SYMBOL(dm_kcopyd_do_callback); @@ -888,6 +895,7 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro return ERR_PTR(-ENOMEM); spin_lock_init(&kc->job_lock); + INIT_LIST_HEAD(&kc->callback_jobs); INIT_LIST_HEAD(&kc->complete_jobs); INIT_LIST_HEAD(&kc->io_jobs); INIT_LIST_HEAD(&kc->pages_jobs); @@ -939,6 +947,7 @@ void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc) /* Wait for completion of all jobs submitted by this client. */ wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs)); + BUG_ON(!list_empty(&kc->callback_jobs)); BUG_ON(!list_empty(&kc->complete_jobs)); BUG_ON(!list_empty(&kc->io_jobs)); BUG_ON(!list_empty(&kc->pages_jobs)); diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 2f7c44a006c41..caa08c4b84cd4 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -45,7 +45,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) } ret = -EINVAL; - if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1) { + if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) { ti->error = "Invalid device sector"; goto bad; } diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 79eab1071ec22..5a51151f680d6 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -943,7 +943,8 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti, char dummy; int ret; - if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1) { + if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1 || + offset != (sector_t)offset) { ti->error = "Invalid offset"; return -EINVAL; } diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index ae4b33d109246..36805b12661e1 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "dm.h" @@ -105,6 +106,9 @@ struct dm_snapshot { /* The on disk metadata handler */ struct dm_exception_store *store; + /* Maximum number of in-flight COW jobs. */ + struct semaphore cow_count; + struct dm_kcopyd_client *kcopyd_client; /* Wait for events based on state_bits */ @@ -145,6 +149,19 @@ struct dm_snapshot { #define RUNNING_MERGE 0 #define SHUTDOWN_MERGE 1 +/* + * Maximum number of chunks being copied on write. + * + * The value was decided experimentally as a trade-off between memory + * consumption, stalling the kernel's workqueues and maintaining a high enough + * throughput. + */ +#define DEFAULT_COW_THRESHOLD 2048 + +static int cow_threshold = DEFAULT_COW_THRESHOLD; +module_param_named(snapshot_cow_threshold, cow_threshold, int, 0644); +MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write"); + DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, "A percentage of time allocated for copy on write"); @@ -1190,6 +1207,8 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad_hash_tables; } + sema_init(&s->cow_count, (cow_threshold > 0) ? cow_threshold : INT_MAX); + s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle); if (IS_ERR(s->kcopyd_client)) { r = PTR_ERR(s->kcopyd_client); @@ -1575,6 +1594,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context) rb_link_node(&pe->out_of_order_node, parent, p); rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree); } + up(&s->cow_count); } /* @@ -1598,6 +1618,7 @@ static void start_copy(struct dm_snap_pending_exception *pe) dest.count = src.count; /* Hand over to kcopyd */ + down(&s->cow_count); dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe); } @@ -1617,6 +1638,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe, pe->full_bio = bio; pe->full_bio_end_io = bio->bi_end_io; + down(&s->cow_count); callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client, copy_callback, pe); diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 20b0776e39ef3..ed3caceaed07c 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -1678,7 +1678,7 @@ int dm_thin_remove_range(struct dm_thin_device *td, return r; } -int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result) +int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result) { int r; uint32_t ref_count; @@ -1686,7 +1686,7 @@ int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *resu down_read(&pmd->root_lock); r = dm_sm_get_count(pmd->data_sm, b, &ref_count); if (!r) - *result = (ref_count != 0); + *result = (ref_count > 1); up_read(&pmd->root_lock); return r; diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h index 35e954ea20a9b..f6be0d733c202 100644 --- a/drivers/md/dm-thin-metadata.h +++ b/drivers/md/dm-thin-metadata.h @@ -195,7 +195,7 @@ int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd, int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result); -int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result); +int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result); int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e); int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e); diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index aaf1ad481ee88..c30a7850b2da2 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -195,7 +195,7 @@ static void throttle_unlock(struct throttle *t) struct dm_thin_new_mapping; /* - * The pool runs in 4 modes. Ordered in degraded order for comparisons. + * The pool runs in various modes. Ordered in degraded order for comparisons. */ enum pool_mode { PM_WRITE, /* metadata may be changed */ @@ -282,9 +282,38 @@ struct pool { mempool_t mapping_pool; }; -static enum pool_mode get_pool_mode(struct pool *pool); static void metadata_operation_failed(struct pool *pool, const char *op, int r); +static enum pool_mode get_pool_mode(struct pool *pool) +{ + return pool->pf.mode; +} + +static void notify_of_pool_mode_change(struct pool *pool) +{ + const char *descs[] = { + "write", + "out-of-data-space", + "read-only", + "read-only", + "fail" + }; + const char *extra_desc = NULL; + enum pool_mode mode = get_pool_mode(pool); + + if (mode == PM_OUT_OF_DATA_SPACE) { + if (!pool->pf.error_if_no_space) + extra_desc = " (queue IO)"; + else + extra_desc = " (error IO)"; + } + + dm_table_event(pool->ti->table); + DMINFO("%s: switching pool to %s%s mode", + dm_device_name(pool->pool_md), + descs[(int)mode], extra_desc ? : ""); +} + /* * Target context for a pool. */ @@ -1019,7 +1048,7 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m * passdown we have to check that these blocks are now unused. */ int r = 0; - bool used = true; + bool shared = true; struct thin_c *tc = m->tc; struct pool *pool = tc->pool; dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin; @@ -1029,11 +1058,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m while (b != end) { /* find start of unmapped run */ for (; b < end; b++) { - r = dm_pool_block_is_used(pool->pmd, b, &used); + r = dm_pool_block_is_shared(pool->pmd, b, &shared); if (r) goto out; - if (!used) + if (!shared) break; } @@ -1042,11 +1071,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m /* find end of run */ for (e = b + 1; e != end; e++) { - r = dm_pool_block_is_used(pool->pmd, e, &used); + r = dm_pool_block_is_shared(pool->pmd, e, &shared); if (r) goto out; - if (used) + if (shared) break; } @@ -2351,8 +2380,6 @@ static void do_waker(struct work_struct *ws) queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); } -static void notify_of_pool_mode_change_to_oods(struct pool *pool); - /* * We're holding onto IO to allow userland time to react. After the * timeout either the pool will have been resized (and thus back in @@ -2365,7 +2392,7 @@ static void do_no_space_timeout(struct work_struct *ws) if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) { pool->pf.error_if_no_space = true; - notify_of_pool_mode_change_to_oods(pool); + notify_of_pool_mode_change(pool); error_retry_list_with_code(pool, BLK_STS_NOSPC); } } @@ -2433,26 +2460,6 @@ static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *)) /*----------------------------------------------------------------*/ -static enum pool_mode get_pool_mode(struct pool *pool) -{ - return pool->pf.mode; -} - -static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode) -{ - dm_table_event(pool->ti->table); - DMINFO("%s: switching pool to %s mode", - dm_device_name(pool->pool_md), new_mode); -} - -static void notify_of_pool_mode_change_to_oods(struct pool *pool) -{ - if (!pool->pf.error_if_no_space) - notify_of_pool_mode_change(pool, "out-of-data-space (queue IO)"); - else - notify_of_pool_mode_change(pool, "out-of-data-space (error IO)"); -} - static bool passdown_enabled(struct pool_c *pt) { return pt->adjusted_pf.discard_passdown; @@ -2501,8 +2508,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) switch (new_mode) { case PM_FAIL: - if (old_mode != new_mode) - notify_of_pool_mode_change(pool, "failure"); dm_pool_metadata_read_only(pool->pmd); pool->process_bio = process_bio_fail; pool->process_discard = process_bio_fail; @@ -2516,8 +2521,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) case PM_OUT_OF_METADATA_SPACE: case PM_READ_ONLY: - if (!is_read_only_pool_mode(old_mode)) - notify_of_pool_mode_change(pool, "read-only"); dm_pool_metadata_read_only(pool->pmd); pool->process_bio = process_bio_read_only; pool->process_discard = process_bio_success; @@ -2538,8 +2541,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) * alarming rate. Adjust your low water mark if you're * frequently seeing this mode. */ - if (old_mode != new_mode) - notify_of_pool_mode_change_to_oods(pool); pool->out_of_data_space = true; pool->process_bio = process_bio_read_only; pool->process_discard = process_discard_bio; @@ -2552,8 +2553,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) break; case PM_WRITE: - if (old_mode != new_mode) - notify_of_pool_mode_change(pool, "write"); if (old_mode == PM_OUT_OF_DATA_SPACE) cancel_delayed_work_sync(&pool->no_space_timeout); pool->out_of_data_space = false; @@ -2573,6 +2572,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) * doesn't cause an unexpected mode transition on resume. */ pt->adjusted_pf.mode = new_mode; + + if (old_mode != new_mode) + notify_of_pool_mode_change(pool); } static void abort_transaction(struct pool *pool) diff --git a/drivers/md/dm-unstripe.c b/drivers/md/dm-unstripe.c index 954b7ab4e684d..e673dacf64181 100644 --- a/drivers/md/dm-unstripe.c +++ b/drivers/md/dm-unstripe.c @@ -78,7 +78,7 @@ static int unstripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto err; } - if (sscanf(argv[4], "%llu%c", &start, &dummy) != 1) { + if (sscanf(argv[4], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) { ti->error = "Invalid striped device offset"; goto err; } diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c index 969954915566f..fa68336560c34 100644 --- a/drivers/md/dm-zoned-metadata.c +++ b/drivers/md/dm-zoned-metadata.c @@ -99,7 +99,7 @@ struct dmz_mblock { struct rb_node node; struct list_head link; sector_t no; - atomic_t ref; + unsigned int ref; unsigned long state; struct page *page; void *data; @@ -296,7 +296,7 @@ static struct dmz_mblock *dmz_alloc_mblock(struct dmz_metadata *zmd, RB_CLEAR_NODE(&mblk->node); INIT_LIST_HEAD(&mblk->link); - atomic_set(&mblk->ref, 0); + mblk->ref = 0; mblk->state = 0; mblk->no = mblk_no; mblk->data = page_address(mblk->page); @@ -339,10 +339,11 @@ static void dmz_insert_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk) } /* - * Lookup a metadata block in the rbtree. + * Lookup a metadata block in the rbtree. If the block is found, increment + * its reference count. */ -static struct dmz_mblock *dmz_lookup_mblock(struct dmz_metadata *zmd, - sector_t mblk_no) +static struct dmz_mblock *dmz_get_mblock_fast(struct dmz_metadata *zmd, + sector_t mblk_no) { struct rb_root *root = &zmd->mblk_rbtree; struct rb_node *node = root->rb_node; @@ -350,8 +351,17 @@ static struct dmz_mblock *dmz_lookup_mblock(struct dmz_metadata *zmd, while (node) { mblk = container_of(node, struct dmz_mblock, node); - if (mblk->no == mblk_no) + if (mblk->no == mblk_no) { + /* + * If this is the first reference to the block, + * remove it from the LRU list. + */ + mblk->ref++; + if (mblk->ref == 1 && + !test_bit(DMZ_META_DIRTY, &mblk->state)) + list_del_init(&mblk->link); return mblk; + } node = (mblk->no < mblk_no) ? node->rb_left : node->rb_right; } @@ -382,32 +392,47 @@ static void dmz_mblock_bio_end_io(struct bio *bio) } /* - * Read a metadata block from disk. + * Read an uncached metadata block from disk and add it to the cache. */ -static struct dmz_mblock *dmz_fetch_mblock(struct dmz_metadata *zmd, - sector_t mblk_no) +static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd, + sector_t mblk_no) { - struct dmz_mblock *mblk; + struct dmz_mblock *mblk, *m; sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no; struct bio *bio; - /* Get block and insert it */ + /* Get a new block and a BIO to read it */ mblk = dmz_alloc_mblock(zmd, mblk_no); if (!mblk) return NULL; - spin_lock(&zmd->mblk_lock); - atomic_inc(&mblk->ref); - set_bit(DMZ_META_READING, &mblk->state); - dmz_insert_mblock(zmd, mblk); - spin_unlock(&zmd->mblk_lock); - bio = bio_alloc(GFP_NOIO, 1); if (!bio) { dmz_free_mblock(zmd, mblk); return NULL; } + spin_lock(&zmd->mblk_lock); + + /* + * Make sure that another context did not start reading + * the block already. + */ + m = dmz_get_mblock_fast(zmd, mblk_no); + if (m) { + spin_unlock(&zmd->mblk_lock); + dmz_free_mblock(zmd, mblk); + bio_put(bio); + return m; + } + + mblk->ref++; + set_bit(DMZ_META_READING, &mblk->state); + dmz_insert_mblock(zmd, mblk); + + spin_unlock(&zmd->mblk_lock); + + /* Submit read BIO */ bio->bi_iter.bi_sector = dmz_blk2sect(block); bio_set_dev(bio, zmd->dev->bdev); bio->bi_private = mblk; @@ -484,7 +509,8 @@ static void dmz_release_mblock(struct dmz_metadata *zmd, spin_lock(&zmd->mblk_lock); - if (atomic_dec_and_test(&mblk->ref)) { + mblk->ref--; + if (mblk->ref == 0) { if (test_bit(DMZ_META_ERROR, &mblk->state)) { rb_erase(&mblk->node, &zmd->mblk_rbtree); dmz_free_mblock(zmd, mblk); @@ -508,18 +534,12 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd, /* Check rbtree */ spin_lock(&zmd->mblk_lock); - mblk = dmz_lookup_mblock(zmd, mblk_no); - if (mblk) { - /* Cache hit: remove block from LRU list */ - if (atomic_inc_return(&mblk->ref) == 1 && - !test_bit(DMZ_META_DIRTY, &mblk->state)) - list_del_init(&mblk->link); - } + mblk = dmz_get_mblock_fast(zmd, mblk_no); spin_unlock(&zmd->mblk_lock); if (!mblk) { /* Cache miss: read the block from disk */ - mblk = dmz_fetch_mblock(zmd, mblk_no); + mblk = dmz_get_mblock_slow(zmd, mblk_no); if (!mblk) return ERR_PTR(-ENOMEM); } @@ -753,7 +773,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd) spin_lock(&zmd->mblk_lock); clear_bit(DMZ_META_DIRTY, &mblk->state); - if (atomic_read(&mblk->ref) == 0) + if (mblk->ref == 0) list_add_tail(&mblk->link, &zmd->mblk_lru_list); spin_unlock(&zmd->mblk_lock); } @@ -2308,7 +2328,7 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd) mblk = list_first_entry(&zmd->mblk_dirty_list, struct dmz_mblock, link); dmz_dev_warn(zmd->dev, "mblock %llu still in dirty list (ref %u)", - (u64)mblk->no, atomic_read(&mblk->ref)); + (u64)mblk->no, mblk->ref); list_del_init(&mblk->link); rb_erase(&mblk->node, &zmd->mblk_rbtree); dmz_free_mblock(zmd, mblk); @@ -2326,8 +2346,8 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd) root = &zmd->mblk_rbtree; rbtree_postorder_for_each_entry_safe(mblk, next, root, node) { dmz_dev_warn(zmd->dev, "mblock %llu ref %u still in rbtree", - (u64)mblk->no, atomic_read(&mblk->ref)); - atomic_set(&mblk->ref, 0); + (u64)mblk->no, mblk->ref); + mblk->ref = 0; dmz_free_mblock(zmd, mblk); } diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index a44183ff4be0a..85fb2baa8a7fa 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -20,7 +20,6 @@ struct dmz_bioctx { struct dm_zone *zone; struct bio *bio; atomic_t ref; - blk_status_t status; }; /* @@ -78,65 +77,66 @@ static inline void dmz_bio_endio(struct bio *bio, blk_status_t status) { struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); - if (bioctx->status == BLK_STS_OK && status != BLK_STS_OK) - bioctx->status = status; - bio_endio(bio); + if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK) + bio->bi_status = status; + + if (atomic_dec_and_test(&bioctx->ref)) { + struct dm_zone *zone = bioctx->zone; + + if (zone) { + if (bio->bi_status != BLK_STS_OK && + bio_op(bio) == REQ_OP_WRITE && + dmz_is_seq(zone)) + set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags); + dmz_deactivate_zone(zone); + } + bio_endio(bio); + } } /* - * Partial clone read BIO completion callback. This terminates the + * Completion callback for an internally cloned target BIO. This terminates the * target BIO when there are no more references to its context. */ -static void dmz_read_bio_end_io(struct bio *bio) +static void dmz_clone_endio(struct bio *clone) { - struct dmz_bioctx *bioctx = bio->bi_private; - blk_status_t status = bio->bi_status; + struct dmz_bioctx *bioctx = clone->bi_private; + blk_status_t status = clone->bi_status; - bio_put(bio); + bio_put(clone); dmz_bio_endio(bioctx->bio, status); } /* - * Issue a BIO to a zone. The BIO may only partially process the + * Issue a clone of a target BIO. The clone may only partially process the * original target BIO. */ -static int dmz_submit_read_bio(struct dmz_target *dmz, struct dm_zone *zone, - struct bio *bio, sector_t chunk_block, - unsigned int nr_blocks) +static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone, + struct bio *bio, sector_t chunk_block, + unsigned int nr_blocks) { struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); - sector_t sector; struct bio *clone; - /* BIO remap sector */ - sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); - - /* If the read is not partial, there is no need to clone the BIO */ - if (nr_blocks == dmz_bio_blocks(bio)) { - /* Setup and submit the BIO */ - bio->bi_iter.bi_sector = sector; - atomic_inc(&bioctx->ref); - generic_make_request(bio); - return 0; - } - - /* Partial BIO: we need to clone the BIO */ clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set); if (!clone) return -ENOMEM; - /* Setup the clone */ - clone->bi_iter.bi_sector = sector; + bio_set_dev(clone, dmz->dev->bdev); + clone->bi_iter.bi_sector = + dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT; - clone->bi_end_io = dmz_read_bio_end_io; + clone->bi_end_io = dmz_clone_endio; clone->bi_private = bioctx; bio_advance(bio, clone->bi_iter.bi_size); - /* Submit the clone */ atomic_inc(&bioctx->ref); generic_make_request(clone); + if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone)) + zone->wp_block += nr_blocks; + return 0; } @@ -214,7 +214,7 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone, if (nr_blocks) { /* Valid blocks found: read them */ nr_blocks = min_t(unsigned int, nr_blocks, end_block - chunk_block); - ret = dmz_submit_read_bio(dmz, rzone, bio, chunk_block, nr_blocks); + ret = dmz_submit_bio(dmz, rzone, bio, chunk_block, nr_blocks); if (ret) return ret; chunk_block += nr_blocks; @@ -228,25 +228,6 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone, return 0; } -/* - * Issue a write BIO to a zone. - */ -static void dmz_submit_write_bio(struct dmz_target *dmz, struct dm_zone *zone, - struct bio *bio, sector_t chunk_block, - unsigned int nr_blocks) -{ - struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); - - /* Setup and submit the BIO */ - bio_set_dev(bio, dmz->dev->bdev); - bio->bi_iter.bi_sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); - atomic_inc(&bioctx->ref); - generic_make_request(bio); - - if (dmz_is_seq(zone)) - zone->wp_block += nr_blocks; -} - /* * Write blocks directly in a data zone, at the write pointer. * If a buffer zone is assigned, invalidate the blocks written @@ -265,7 +246,9 @@ static int dmz_handle_direct_write(struct dmz_target *dmz, return -EROFS; /* Submit write */ - dmz_submit_write_bio(dmz, zone, bio, chunk_block, nr_blocks); + ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks); + if (ret) + return ret; /* * Validate the blocks in the data zone and invalidate @@ -301,7 +284,9 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz, return -EROFS; /* Submit write */ - dmz_submit_write_bio(dmz, bzone, bio, chunk_block, nr_blocks); + ret = dmz_submit_bio(dmz, bzone, bio, chunk_block, nr_blocks); + if (ret) + return ret; /* * Validate the blocks in the buffer zone @@ -600,7 +585,6 @@ static int dmz_map(struct dm_target *ti, struct bio *bio) bioctx->zone = NULL; bioctx->bio = bio; atomic_set(&bioctx->ref, 1); - bioctx->status = BLK_STS_OK; /* Set the BIO pending in the flush list */ if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) { @@ -623,35 +607,6 @@ static int dmz_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_SUBMITTED; } -/* - * Completed target BIO processing. - */ -static int dmz_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error) -{ - struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); - - if (bioctx->status == BLK_STS_OK && *error) - bioctx->status = *error; - - if (!atomic_dec_and_test(&bioctx->ref)) - return DM_ENDIO_INCOMPLETE; - - /* Done */ - bio->bi_status = bioctx->status; - - if (bioctx->zone) { - struct dm_zone *zone = bioctx->zone; - - if (*error && bio_op(bio) == REQ_OP_WRITE) { - if (dmz_is_seq(zone)) - set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags); - } - dmz_deactivate_zone(zone); - } - - return DM_ENDIO_DONE; -} - /* * Get zoned device information. */ @@ -947,7 +902,6 @@ static struct target_type dmz_type = { .ctr = dmz_ctr, .dtr = dmz_dtr, .map = dmz_map, - .end_io = dmz_end_io, .io_hints = dmz_io_hints, .prepare_ioctl = dmz_prepare_ioctl, .postsuspend = dmz_suspend, diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 45abb54037fc6..07d2949a87464 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1592,6 +1592,8 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, return ret; } + blk_queue_split(md->queue, &bio); + init_clone_info(&ci, md, map, bio); if (bio->bi_opf & REQ_PREFLUSH) { diff --git a/drivers/md/md.c b/drivers/md/md.c index 63ceabb4e020f..8668793262d09 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -452,10 +452,11 @@ static void md_end_flush(struct bio *fbio) rdev_dec_pending(rdev, mddev); if (atomic_dec_and_test(&fi->flush_pending)) { - if (bio->bi_iter.bi_size == 0) + if (bio->bi_iter.bi_size == 0) { /* an empty barrier - all done */ bio_endio(bio); - else { + mempool_free(fi, mddev->flush_pool); + } else { INIT_WORK(&fi->flush_work, submit_flushes); queue_work(md_wq, &fi->flush_work); } @@ -509,10 +510,11 @@ void md_flush_request(struct mddev *mddev, struct bio *bio) rcu_read_unlock(); if (atomic_dec_and_test(&fi->flush_pending)) { - if (bio->bi_iter.bi_size == 0) + if (bio->bi_iter.bi_size == 0) { /* an empty barrier - all done */ bio_endio(bio); - else { + mempool_free(fi, mddev->flush_pool); + } else { INIT_WORK(&fi->flush_work, submit_flushes); queue_work(md_wq, &fi->flush_work); } @@ -5904,14 +5906,6 @@ static void __md_stop(struct mddev *mddev) mddev->to_remove = &md_redundancy_group; module_put(pers->owner); clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); -} - -void md_stop(struct mddev *mddev) -{ - /* stop the array and free an attached data structures. - * This is called from dm-raid - */ - __md_stop(mddev); if (mddev->flush_bio_pool) { mempool_destroy(mddev->flush_bio_pool); mddev->flush_bio_pool = NULL; @@ -5920,6 +5914,14 @@ void md_stop(struct mddev *mddev) mempool_destroy(mddev->flush_pool); mddev->flush_pool = NULL; } +} + +void md_stop(struct mddev *mddev) +{ + /* stop the array and free an attached data structures. + * This is called from dm-raid + */ + __md_stop(mddev); bioset_exit(&mddev->bio_set); bioset_exit(&mddev->sync_set); } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 4e990246225ea..1d54109071cc8 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1734,6 +1734,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) */ if (rdev->saved_raid_disk >= 0 && rdev->saved_raid_disk >= first && + rdev->saved_raid_disk < conf->raid_disks && conf->mirrors[rdev->saved_raid_disk].rdev == NULL) first = last = rdev->saved_raid_disk; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index d6f7978b4449e..811427e53126b 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1808,6 +1808,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) first = last = rdev->raid_disk; if (rdev->saved_raid_disk >= first && + rdev->saved_raid_disk < conf->geo.raid_disks && conf->mirrors[rdev->saved_raid_disk].rdev == NULL) mirror = rdev->saved_raid_disk; else diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c index 030b2602faf0c..a7ea27d2aa8ef 100644 --- a/drivers/media/cec/cec-adap.c +++ b/drivers/media/cec/cec-adap.c @@ -341,7 +341,7 @@ static void cec_data_completed(struct cec_data *data) * * This function is called with adap->lock held. */ -static void cec_data_cancel(struct cec_data *data) +static void cec_data_cancel(struct cec_data *data, u8 tx_status) { /* * It's either the current transmit, or it is a pending @@ -356,13 +356,11 @@ static void cec_data_cancel(struct cec_data *data) } if (data->msg.tx_status & CEC_TX_STATUS_OK) { - /* Mark the canceled RX as a timeout */ data->msg.rx_ts = ktime_get_ns(); - data->msg.rx_status = CEC_RX_STATUS_TIMEOUT; + data->msg.rx_status = CEC_RX_STATUS_ABORTED; } else { - /* Mark the canceled TX as an error */ data->msg.tx_ts = ktime_get_ns(); - data->msg.tx_status |= CEC_TX_STATUS_ERROR | + data->msg.tx_status |= tx_status | CEC_TX_STATUS_MAX_RETRIES; data->msg.tx_error_cnt++; data->attempts = 0; @@ -390,15 +388,15 @@ static void cec_flush(struct cec_adapter *adap) while (!list_empty(&adap->transmit_queue)) { data = list_first_entry(&adap->transmit_queue, struct cec_data, list); - cec_data_cancel(data); + cec_data_cancel(data, CEC_TX_STATUS_ABORTED); } if (adap->transmitting) - cec_data_cancel(adap->transmitting); + cec_data_cancel(adap->transmitting, CEC_TX_STATUS_ABORTED); /* Cancel the pending timeout work. */ list_for_each_entry_safe(data, n, &adap->wait_queue, list) { if (cancel_delayed_work(&data->work)) - cec_data_cancel(data); + cec_data_cancel(data, CEC_TX_STATUS_OK); /* * If cancel_delayed_work returned false, then * the cec_wait_timeout function is running, @@ -444,7 +442,7 @@ int cec_thread_func(void *_adap) (adap->needs_hpd && (!adap->is_configured && !adap->is_configuring)) || kthread_should_stop() || - (!adap->transmitting && + (!adap->transmit_in_progress && !list_empty(&adap->transmit_queue)), msecs_to_jiffies(CEC_XFER_TIMEOUT_MS)); timeout = err == 0; @@ -452,7 +450,7 @@ int cec_thread_func(void *_adap) /* Otherwise we just wait for something to happen. */ wait_event_interruptible(adap->kthread_waitq, kthread_should_stop() || - (!adap->transmitting && + (!adap->transmit_in_progress && !list_empty(&adap->transmit_queue))); } @@ -474,12 +472,14 @@ int cec_thread_func(void *_adap) * so much traffic on the bus that the adapter was * unable to transmit for CEC_XFER_TIMEOUT_MS (2.1s). */ - dprintk(1, "%s: message %*ph timed out\n", __func__, + pr_warn("cec-%s: message %*ph timed out\n", adap->name, adap->transmitting->msg.len, adap->transmitting->msg.msg); + adap->transmit_in_progress = false; adap->tx_timeouts++; /* Just give up on this. */ - cec_data_cancel(adap->transmitting); + cec_data_cancel(adap->transmitting, + CEC_TX_STATUS_TIMEOUT); goto unlock; } @@ -487,7 +487,7 @@ int cec_thread_func(void *_adap) * If we are still transmitting, or there is nothing new to * transmit, then just continue waiting. */ - if (adap->transmitting || list_empty(&adap->transmit_queue)) + if (adap->transmit_in_progress || list_empty(&adap->transmit_queue)) goto unlock; /* Get a new message to transmit */ @@ -514,9 +514,11 @@ int cec_thread_func(void *_adap) if (data->attempts) { /* should be >= 3 data bit periods for a retry */ signal_free_time = CEC_SIGNAL_FREE_TIME_RETRY; - } else if (data->new_initiator) { + } else if (adap->last_initiator != + cec_msg_initiator(&data->msg)) { /* should be >= 5 data bit periods for new initiator */ signal_free_time = CEC_SIGNAL_FREE_TIME_NEW_INITIATOR; + adap->last_initiator = cec_msg_initiator(&data->msg); } else { /* * should be >= 7 data bit periods for sending another @@ -530,7 +532,9 @@ int cec_thread_func(void *_adap) /* Tell the adapter to transmit, cancel on error */ if (adap->ops->adap_transmit(adap, data->attempts, signal_free_time, &data->msg)) - cec_data_cancel(data); + cec_data_cancel(data, CEC_TX_STATUS_ABORTED); + else + adap->transmit_in_progress = true; unlock: mutex_unlock(&adap->lock); @@ -561,14 +565,17 @@ void cec_transmit_done_ts(struct cec_adapter *adap, u8 status, data = adap->transmitting; if (!data) { /* - * This can happen if a transmit was issued and the cable is + * This might happen if a transmit was issued and the cable is * unplugged while the transmit is ongoing. Ignore this * transmit in that case. */ - dprintk(1, "%s was called without an ongoing transmit!\n", - __func__); - goto unlock; + if (!adap->transmit_in_progress) + dprintk(1, "%s was called without an ongoing transmit!\n", + __func__); + adap->transmit_in_progress = false; + goto wake_thread; } + adap->transmit_in_progress = false; msg = &data->msg; @@ -634,7 +641,6 @@ void cec_transmit_done_ts(struct cec_adapter *adap, u8 status, * for transmitting or to retry the current message. */ wake_up_interruptible(&adap->kthread_waitq); -unlock: mutex_unlock(&adap->lock); } EXPORT_SYMBOL_GPL(cec_transmit_done_ts); @@ -701,9 +707,6 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg, struct cec_fh *fh, bool block) { struct cec_data *data; - u8 last_initiator = 0xff; - unsigned int timeout; - int res = 0; msg->rx_ts = 0; msg->tx_ts = 0; @@ -813,23 +816,6 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg, data->adap = adap; data->blocking = block; - /* - * Determine if this message follows a message from the same - * initiator. Needed to determine the free signal time later on. - */ - if (msg->len > 1) { - if (!(list_empty(&adap->transmit_queue))) { - const struct cec_data *last; - - last = list_last_entry(&adap->transmit_queue, - const struct cec_data, list); - last_initiator = cec_msg_initiator(&last->msg); - } else if (adap->transmitting) { - last_initiator = - cec_msg_initiator(&adap->transmitting->msg); - } - } - data->new_initiator = last_initiator != cec_msg_initiator(msg); init_completion(&data->c); INIT_DELAYED_WORK(&data->work, cec_wait_timeout); @@ -845,48 +831,23 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg, if (!block) return 0; - /* - * If we don't get a completion before this time something is really - * wrong and we time out. - */ - timeout = CEC_XFER_TIMEOUT_MS; - /* Add the requested timeout if we have to wait for a reply as well */ - if (msg->timeout) - timeout += msg->timeout; - /* * Release the lock and wait, retake the lock afterwards. */ mutex_unlock(&adap->lock); - res = wait_for_completion_killable_timeout(&data->c, - msecs_to_jiffies(timeout)); + wait_for_completion_killable(&data->c); + if (!data->completed) + cancel_delayed_work_sync(&data->work); mutex_lock(&adap->lock); - if (data->completed) { - /* The transmit completed (possibly with an error) */ - *msg = data->msg; - kfree(data); - return 0; - } - /* - * The wait for completion timed out or was interrupted, so mark this - * as non-blocking and disconnect from the filehandle since it is - * still 'in flight'. When it finally completes it will just drop the - * result silently. - */ - data->blocking = false; - if (data->fh) - list_del(&data->xfer_list); - data->fh = NULL; + /* Cancel the transmit if it was interrupted */ + if (!data->completed) + cec_data_cancel(data, CEC_TX_STATUS_ABORTED); - if (res == 0) { /* timed out */ - /* Check if the reply or the transmit failed */ - if (msg->timeout && (msg->tx_status & CEC_TX_STATUS_OK)) - msg->rx_status = CEC_RX_STATUS_TIMEOUT; - else - msg->tx_status = CEC_TX_STATUS_MAX_RETRIES; - } - return res > 0 ? 0 : res; + /* The transmit completed (possibly with an error) */ + *msg = data->msg; + kfree(data); + return 0; } /* Helper function to be used by drivers and this framework. */ @@ -1044,6 +1005,8 @@ void cec_received_msg_ts(struct cec_adapter *adap, mutex_lock(&adap->lock); dprintk(2, "%s: %*ph\n", __func__, msg->len, msg->msg); + adap->last_initiator = 0xff; + /* Check if this message was for us (directed or broadcast). */ if (!cec_msg_is_broadcast(msg)) valid_la = cec_has_log_addr(adap, msg_dest); @@ -1209,6 +1172,8 @@ static int cec_config_log_addr(struct cec_adapter *adap, { struct cec_log_addrs *las = &adap->log_addrs; struct cec_msg msg = { }; + const unsigned int max_retries = 2; + unsigned int i; int err; if (cec_has_log_addr(adap, log_addr)) @@ -1217,19 +1182,44 @@ static int cec_config_log_addr(struct cec_adapter *adap, /* Send poll message */ msg.len = 1; msg.msg[0] = (log_addr << 4) | log_addr; - err = cec_transmit_msg_fh(adap, &msg, NULL, true); - /* - * While trying to poll the physical address was reset - * and the adapter was unconfigured, so bail out. - */ - if (!adap->is_configuring) - return -EINTR; + for (i = 0; i < max_retries; i++) { + err = cec_transmit_msg_fh(adap, &msg, NULL, true); - if (err) - return err; + /* + * While trying to poll the physical address was reset + * and the adapter was unconfigured, so bail out. + */ + if (!adap->is_configuring) + return -EINTR; + + if (err) + return err; - if (msg.tx_status & CEC_TX_STATUS_OK) + /* + * The message was aborted due to a disconnect or + * unconfigure, just bail out. + */ + if (msg.tx_status & CEC_TX_STATUS_ABORTED) + return -EINTR; + if (msg.tx_status & CEC_TX_STATUS_OK) + return 0; + if (msg.tx_status & CEC_TX_STATUS_NACK) + break; + /* + * Retry up to max_retries times if the message was neither + * OKed or NACKed. This can happen due to e.g. a Lost + * Arbitration condition. + */ + } + + /* + * If we are unable to get an OK or a NACK after max_retries attempts + * (and note that each attempt already consists of four polls), then + * then we assume that something is really weird and that it is not a + * good idea to try and claim this logical address. + */ + if (i == max_retries) return 0; /* @@ -1498,14 +1488,20 @@ void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block) if (adap->monitor_all_cnt) WARN_ON(call_op(adap, adap_monitor_all_enable, false)); mutex_lock(&adap->devnode.lock); - if (adap->needs_hpd || list_empty(&adap->devnode.fhs)) + if (adap->needs_hpd || list_empty(&adap->devnode.fhs)) { WARN_ON(adap->ops->adap_enable(adap, false)); + adap->transmit_in_progress = false; + wake_up_interruptible(&adap->kthread_waitq); + } mutex_unlock(&adap->devnode.lock); if (phys_addr == CEC_PHYS_ADDR_INVALID) return; } mutex_lock(&adap->devnode.lock); + adap->last_initiator = 0xff; + adap->transmit_in_progress = false; + if ((adap->needs_hpd || list_empty(&adap->devnode.fhs)) && adap->ops->adap_enable(adap, true)) { mutex_unlock(&adap->devnode.lock); diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c index b6536bbad530c..4961573850d54 100644 --- a/drivers/media/cec/cec-api.c +++ b/drivers/media/cec/cec-api.c @@ -101,6 +101,23 @@ static long cec_adap_g_phys_addr(struct cec_adapter *adap, return 0; } +static int cec_validate_phys_addr(u16 phys_addr) +{ + int i; + + if (phys_addr == CEC_PHYS_ADDR_INVALID) + return 0; + for (i = 0; i < 16; i += 4) + if (phys_addr & (0xf << i)) + break; + if (i == 16) + return 0; + for (i += 4; i < 16; i += 4) + if ((phys_addr & (0xf << i)) == 0) + return -EINVAL; + return 0; +} + static long cec_adap_s_phys_addr(struct cec_adapter *adap, struct cec_fh *fh, bool block, __u16 __user *parg) { @@ -112,7 +129,7 @@ static long cec_adap_s_phys_addr(struct cec_adapter *adap, struct cec_fh *fh, if (copy_from_user(&phys_addr, parg, sizeof(phys_addr))) return -EFAULT; - err = cec_phys_addr_validate(phys_addr, NULL, NULL); + err = cec_validate_phys_addr(phys_addr); if (err) return err; mutex_lock(&adap->lock); diff --git a/drivers/media/cec/cec-edid.c b/drivers/media/cec/cec-edid.c index ec72ac1c0b915..f587e8eaefd81 100644 --- a/drivers/media/cec/cec-edid.c +++ b/drivers/media/cec/cec-edid.c @@ -10,66 +10,6 @@ #include #include -/* - * This EDID is expected to be a CEA-861 compliant, which means that there are - * at least two blocks and one or more of the extensions blocks are CEA-861 - * blocks. - * - * The returned location is guaranteed to be < size - 1. - */ -static unsigned int cec_get_edid_spa_location(const u8 *edid, unsigned int size) -{ - unsigned int blocks = size / 128; - unsigned int block; - u8 d; - - /* Sanity check: at least 2 blocks and a multiple of the block size */ - if (blocks < 2 || size % 128) - return 0; - - /* - * If there are fewer extension blocks than the size, then update - * 'blocks'. It is allowed to have more extension blocks than the size, - * since some hardware can only read e.g. 256 bytes of the EDID, even - * though more blocks are present. The first CEA-861 extension block - * should normally be in block 1 anyway. - */ - if (edid[0x7e] + 1 < blocks) - blocks = edid[0x7e] + 1; - - for (block = 1; block < blocks; block++) { - unsigned int offset = block * 128; - - /* Skip any non-CEA-861 extension blocks */ - if (edid[offset] != 0x02 || edid[offset + 1] != 0x03) - continue; - - /* search Vendor Specific Data Block (tag 3) */ - d = edid[offset + 2] & 0x7f; - /* Check if there are Data Blocks */ - if (d <= 4) - continue; - if (d > 4) { - unsigned int i = offset + 4; - unsigned int end = offset + d; - - /* Note: 'end' is always < 'size' */ - do { - u8 tag = edid[i] >> 5; - u8 len = edid[i] & 0x1f; - - if (tag == 3 && len >= 5 && i + len <= end && - edid[i + 1] == 0x03 && - edid[i + 2] == 0x0c && - edid[i + 3] == 0x00) - return i + 4; - i += len + 1; - } while (i < end); - } - } - return 0; -} - u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size, unsigned int *offset) { diff --git a/drivers/media/cec/cec-pin.c b/drivers/media/cec/cec-pin.c index 6e311424f0dc5..0496d93b2b8fa 100644 --- a/drivers/media/cec/cec-pin.c +++ b/drivers/media/cec/cec-pin.c @@ -601,8 +601,9 @@ static void cec_pin_tx_states(struct cec_pin *pin, ktime_t ts) break; /* Was the message ACKed? */ ack = cec_msg_is_broadcast(&pin->tx_msg) ? v : !v; - if (!ack && !pin->tx_ignore_nack_until_eom && - pin->tx_bit / 10 < pin->tx_msg.len && !pin->tx_post_eom) { + if (!ack && (!pin->tx_ignore_nack_until_eom || + pin->tx_bit / 10 == pin->tx_msg.len - 1) && + !pin->tx_post_eom) { /* * Note: the CEC spec is ambiguous regarding * what action to take when a NACK appears diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c index 3a3dc23c560c8..a4341205c197d 100644 --- a/drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c +++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c @@ -602,14 +602,14 @@ const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFE [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][5] = { 3138, 657, 810 }, [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][6] = { 731, 680, 3048 }, [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][7] = { 800, 799, 800 }, - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 }, - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][1] = { 3046, 3054, 886 }, - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][2] = { 0, 3058, 3031 }, - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][3] = { 360, 3079, 877 }, - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][4] = { 3103, 587, 3027 }, - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][5] = { 3116, 723, 861 }, - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][6] = { 789, 744, 3025 }, - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][1] = { 3046, 3054, 886 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][2] = { 0, 3058, 3031 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][3] = { 360, 3079, 877 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][4] = { 3103, 587, 3027 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][5] = { 3116, 723, 861 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][6] = { 789, 744, 3025 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 }, [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 }, [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][1] = { 2941, 2950, 546 }, [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][2] = { 0, 2954, 2924 }, @@ -658,14 +658,14 @@ const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFE [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][5] = { 3138, 657, 810 }, [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][6] = { 731, 680, 3048 }, [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][7] = { 800, 799, 800 }, - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 }, - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][1] = { 3046, 3054, 886 }, - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][2] = { 0, 3058, 3031 }, - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][3] = { 360, 3079, 877 }, - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][4] = { 3103, 587, 3027 }, - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][5] = { 3116, 723, 861 }, - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][6] = { 789, 744, 3025 }, - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][1] = { 3046, 3054, 886 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][2] = { 0, 3058, 3031 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][3] = { 360, 3079, 877 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][4] = { 3103, 587, 3027 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][5] = { 3116, 723, 861 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][6] = { 789, 744, 3025 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 }, [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 }, [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][1] = { 2941, 2950, 546 }, [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][2] = { 0, 2954, 2924 }, @@ -714,14 +714,14 @@ const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFE [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][5] = { 3056, 800, 800 }, [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][6] = { 800, 800, 3056 }, [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 800 }, - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 }, - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][1] = { 3033, 3033, 851 }, - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][2] = { 851, 3033, 3033 }, - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][3] = { 851, 3033, 851 }, - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][4] = { 3033, 851, 3033 }, - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][5] = { 3033, 851, 851 }, - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][6] = { 851, 851, 3033 }, - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][1] = { 3033, 3033, 851 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][2] = { 851, 3033, 3033 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][3] = { 851, 3033, 851 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][4] = { 3033, 851, 3033 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][5] = { 3033, 851, 851 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][6] = { 851, 851, 3033 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 }, [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 }, [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][1] = { 2926, 2926, 507 }, [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][2] = { 507, 2926, 2926 }, @@ -770,14 +770,14 @@ const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFE [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][5] = { 2599, 901, 909 }, [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][6] = { 991, 0, 2966 }, [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][7] = { 800, 799, 800 }, - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 }, - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][1] = { 2989, 3120, 1180 }, - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][2] = { 1913, 3011, 3009 }, - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][3] = { 1836, 3099, 1105 }, - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][4] = { 2627, 413, 2966 }, - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][5] = { 2576, 943, 951 }, - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][6] = { 1026, 0, 2942 }, - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][1] = { 2989, 3120, 1180 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][2] = { 1913, 3011, 3009 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][3] = { 1836, 3099, 1105 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][4] = { 2627, 413, 2966 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][5] = { 2576, 943, 951 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][6] = { 1026, 0, 2942 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 }, [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 }, [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][1] = { 2879, 3022, 874 }, [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][2] = { 1688, 2903, 2901 }, @@ -826,14 +826,14 @@ const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFE [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][5] = { 3001, 800, 799 }, [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][6] = { 800, 800, 3071 }, [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 799 }, - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 }, - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][1] = { 3033, 3033, 776 }, - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][2] = { 1068, 3033, 3033 }, - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][3] = { 1068, 3033, 776 }, - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][4] = { 2977, 851, 3048 }, - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][5] = { 2977, 851, 851 }, - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][6] = { 851, 851, 3048 }, - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][1] = { 3033, 3033, 776 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][2] = { 1068, 3033, 3033 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][3] = { 1068, 3033, 776 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][4] = { 2977, 851, 3048 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][5] = { 2977, 851, 851 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][6] = { 851, 851, 3048 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 }, [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 }, [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][1] = { 2926, 2926, 423 }, [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][2] = { 749, 2926, 2926 }, @@ -882,14 +882,14 @@ const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFE [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][5] = { 3056, 800, 800 }, [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][6] = { 800, 800, 3056 }, [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 800 }, - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 }, - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][1] = { 3033, 3033, 851 }, - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][2] = { 851, 3033, 3033 }, - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][3] = { 851, 3033, 851 }, - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][4] = { 3033, 851, 3033 }, - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][5] = { 3033, 851, 851 }, - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][6] = { 851, 851, 3033 }, - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][1] = { 3033, 3033, 851 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][2] = { 851, 3033, 3033 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][3] = { 851, 3033, 851 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][4] = { 3033, 851, 3033 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][5] = { 3033, 851, 851 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][6] = { 851, 851, 3033 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 }, [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 }, [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][1] = { 2926, 2926, 507 }, [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][2] = { 507, 2926, 2926 }, @@ -922,62 +922,62 @@ const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFE [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][5] = { 1812, 886, 886 }, [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][6] = { 886, 886, 1812 }, [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 781 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][2] = { 1622, 2939, 2939 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][3] = { 1622, 2939, 781 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][4] = { 2502, 547, 2881 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][5] = { 2502, 547, 547 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][6] = { 547, 547, 2881 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3056 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][1] = { 3056, 3056, 1031 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][2] = { 1838, 3056, 3056 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][3] = { 1838, 3056, 1031 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][4] = { 2657, 800, 3002 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][5] = { 2657, 800, 800 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][6] = { 800, 800, 3002 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 800 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][1] = { 3033, 3033, 1063 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][2] = { 1828, 3033, 3033 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][3] = { 1828, 3033, 1063 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][4] = { 2633, 851, 2979 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][5] = { 2633, 851, 851 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][6] = { 851, 851, 2979 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][1] = { 2926, 2926, 744 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][2] = { 1594, 2926, 2926 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][3] = { 1594, 2926, 744 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][4] = { 2484, 507, 2867 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][5] = { 2484, 507, 507 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][6] = { 507, 507, 2867 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][1] = { 2125, 2125, 212 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][2] = { 698, 2125, 2125 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][3] = { 698, 2125, 212 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][4] = { 1557, 130, 2043 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][5] = { 1557, 130, 130 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][6] = { 130, 130, 2043 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][1] = { 3175, 3175, 1308 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][2] = { 2069, 3175, 3175 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][3] = { 2069, 3175, 1308 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][4] = { 2816, 1084, 3127 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][5] = { 2816, 1084, 1084 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3127 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][1] = { 1812, 1812, 1022 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][2] = { 1402, 1812, 1812 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][3] = { 1402, 1812, 1022 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][4] = { 1692, 886, 1797 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][5] = { 1692, 886, 886 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][6] = { 886, 886, 1797 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 781 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][2] = { 1622, 2939, 2939 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][3] = { 1622, 2939, 781 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][4] = { 2502, 547, 2881 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][5] = { 2502, 547, 547 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][6] = { 547, 547, 2881 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3056 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][1] = { 3056, 3056, 1031 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][2] = { 1838, 3056, 3056 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][3] = { 1838, 3056, 1031 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][4] = { 2657, 800, 3002 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][5] = { 2657, 800, 800 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][6] = { 800, 800, 3002 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 800 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][1] = { 3033, 3033, 1063 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][2] = { 1828, 3033, 3033 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][3] = { 1828, 3033, 1063 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][4] = { 2633, 851, 2979 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][5] = { 2633, 851, 851 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][6] = { 851, 851, 2979 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][1] = { 2926, 2926, 744 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][2] = { 1594, 2926, 2926 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][3] = { 1594, 2926, 744 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][4] = { 2484, 507, 2867 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][5] = { 2484, 507, 507 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][6] = { 507, 507, 2867 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][1] = { 2125, 2125, 212 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][2] = { 698, 2125, 2125 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][3] = { 698, 2125, 212 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][4] = { 1557, 130, 2043 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][5] = { 1557, 130, 130 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][6] = { 130, 130, 2043 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][1] = { 3175, 3175, 1308 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][2] = { 2069, 3175, 3175 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][3] = { 2069, 3175, 1308 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][4] = { 2816, 1084, 3127 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][5] = { 2816, 1084, 1084 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3127 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][1] = { 1812, 1812, 1022 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][2] = { 1402, 1812, 1812 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][3] = { 1402, 1812, 1022 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][4] = { 1692, 886, 1797 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][5] = { 1692, 886, 886 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][6] = { 886, 886, 1797 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 }, [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 }, [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][1] = { 2877, 2923, 1058 }, [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][2] = { 1837, 2840, 2916 }, @@ -994,14 +994,14 @@ const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFE [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][5] = { 2517, 1159, 900 }, [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][6] = { 1042, 870, 2917 }, [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 800 }, - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 }, - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][1] = { 2976, 3018, 1315 }, - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][2] = { 2024, 2942, 3011 }, - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][3] = { 1930, 2926, 1256 }, - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][4] = { 2563, 1227, 2916 }, - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][5] = { 2494, 1183, 943 }, - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][6] = { 1073, 916, 2894 }, - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][1] = { 2976, 3018, 1315 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][2] = { 2024, 2942, 3011 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][3] = { 1930, 2926, 1256 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][4] = { 2563, 1227, 2916 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][5] = { 2494, 1183, 943 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][6] = { 1073, 916, 2894 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 }, [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 }, [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][1] = { 2864, 2910, 1024 }, [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][2] = { 1811, 2826, 2903 }, @@ -1050,14 +1050,14 @@ const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFE [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][5] = { 2880, 998, 902 }, [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][6] = { 816, 823, 2940 }, [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 799 }, - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 }, - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][1] = { 3029, 3028, 1255 }, - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][2] = { 1406, 2988, 3011 }, - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][3] = { 1398, 2983, 1190 }, - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][4] = { 2860, 1050, 2939 }, - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][5] = { 2857, 1033, 945 }, - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][6] = { 866, 873, 2916 }, - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][1] = { 3029, 3028, 1255 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][2] = { 1406, 2988, 3011 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][3] = { 1398, 2983, 1190 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][4] = { 2860, 1050, 2939 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][5] = { 2857, 1033, 945 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][6] = { 866, 873, 2916 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 }, [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 }, [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][1] = { 2923, 2921, 957 }, [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][2] = { 1125, 2877, 2902 }, @@ -1128,7 +1128,7 @@ static const double rec709_to_240m[3][3] = { { 0.0016327, 0.0044133, 0.9939540 }, }; -static const double rec709_to_adobergb[3][3] = { +static const double rec709_to_oprgb[3][3] = { { 0.7151627, 0.2848373, -0.0000000 }, { 0.0000000, 1.0000000, 0.0000000 }, { -0.0000000, 0.0411705, 0.9588295 }, @@ -1195,7 +1195,7 @@ static double transfer_rec709_to_rgb(double v) return (v < 0.081) ? v / 4.5 : pow((v + 0.099) / 1.099, 1.0 / 0.45); } -static double transfer_rgb_to_adobergb(double v) +static double transfer_rgb_to_oprgb(double v) { return pow(v, 1.0 / 2.19921875); } @@ -1251,8 +1251,8 @@ static void csc(enum v4l2_colorspace colorspace, enum v4l2_xfer_func xfer_func, case V4L2_COLORSPACE_470_SYSTEM_M: mult_matrix(r, g, b, rec709_to_ntsc1953); break; - case V4L2_COLORSPACE_ADOBERGB: - mult_matrix(r, g, b, rec709_to_adobergb); + case V4L2_COLORSPACE_OPRGB: + mult_matrix(r, g, b, rec709_to_oprgb); break; case V4L2_COLORSPACE_BT2020: mult_matrix(r, g, b, rec709_to_bt2020); @@ -1284,10 +1284,10 @@ static void csc(enum v4l2_colorspace colorspace, enum v4l2_xfer_func xfer_func, *g = transfer_rgb_to_srgb(*g); *b = transfer_rgb_to_srgb(*b); break; - case V4L2_XFER_FUNC_ADOBERGB: - *r = transfer_rgb_to_adobergb(*r); - *g = transfer_rgb_to_adobergb(*g); - *b = transfer_rgb_to_adobergb(*b); + case V4L2_XFER_FUNC_OPRGB: + *r = transfer_rgb_to_oprgb(*r); + *g = transfer_rgb_to_oprgb(*g); + *b = transfer_rgb_to_oprgb(*b); break; case V4L2_XFER_FUNC_DCI_P3: *r = transfer_rgb_to_dcip3(*r); @@ -1321,7 +1321,7 @@ int main(int argc, char **argv) V4L2_COLORSPACE_470_SYSTEM_BG, 0, V4L2_COLORSPACE_SRGB, - V4L2_COLORSPACE_ADOBERGB, + V4L2_COLORSPACE_OPRGB, V4L2_COLORSPACE_BT2020, 0, V4L2_COLORSPACE_DCI_P3, @@ -1336,7 +1336,7 @@ int main(int argc, char **argv) "V4L2_COLORSPACE_470_SYSTEM_BG", "", "V4L2_COLORSPACE_SRGB", - "V4L2_COLORSPACE_ADOBERGB", + "V4L2_COLORSPACE_OPRGB", "V4L2_COLORSPACE_BT2020", "", "V4L2_COLORSPACE_DCI_P3", @@ -1345,7 +1345,7 @@ int main(int argc, char **argv) "", "V4L2_XFER_FUNC_709", "V4L2_XFER_FUNC_SRGB", - "V4L2_XFER_FUNC_ADOBERGB", + "V4L2_XFER_FUNC_OPRGB", "V4L2_XFER_FUNC_SMPTE240M", "V4L2_XFER_FUNC_NONE", "V4L2_XFER_FUNC_DCI_P3", diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c index abd4c788dffde..2036b94269afe 100644 --- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c +++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c @@ -1738,7 +1738,7 @@ typedef struct { u16 __; u8 _; } __packed x24; unsigned s; \ \ for (s = 0; s < len; s++) { \ - u8 chr = font8x16[text[s] * 16 + line]; \ + u8 chr = font8x16[(u8)text[s] * 16 + line]; \ \ if (hdiv == 2 && tpg->hflip) { \ pos[3] = (chr & (0x01 << 6) ? fg : bg); \ @@ -1770,7 +1770,7 @@ typedef struct { u16 __; u8 _; } __packed x24; pos[7] = (chr & (0x01 << 0) ? fg : bg); \ } \ \ - pos += (tpg->hflip ? -8 : 8) / hdiv; \ + pos += (tpg->hflip ? -8 : 8) / (int)hdiv; \ } \ } \ } while (0) diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c index 5653e8eebe2b1..6889c25c62cbd 100644 --- a/drivers/media/common/videobuf2/videobuf2-core.c +++ b/drivers/media/common/videobuf2/videobuf2-core.c @@ -800,6 +800,9 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); q->memory = memory; q->waiting_for_buffers = !q->is_output; + } else if (q->memory != memory) { + dprintk(1, "memory model mismatch\n"); + return -EINVAL; } num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers); @@ -1755,10 +1758,8 @@ int vb2_core_streamon(struct vb2_queue *q, unsigned int type) if (ret) return ret; ret = vb2_start_streaming(q); - if (ret) { - __vb2_queue_cancel(q); + if (ret) return ret; - } } q->streaming = 1; @@ -1932,9 +1933,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) return -EINVAL; } } + + mutex_lock(&q->mmap_lock); + if (vb2_fileio_is_active(q)) { dprintk(1, "mmap: file io in progress\n"); - return -EBUSY; + ret = -EBUSY; + goto unlock; } /* @@ -1942,7 +1947,7 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) */ ret = __find_plane_by_offset(q, off, &buffer, &plane); if (ret) - return ret; + goto unlock; vb = q->bufs[buffer]; @@ -1955,11 +1960,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) if (length < (vma->vm_end - vma->vm_start)) { dprintk(1, "MMAP invalid, as it would overflow buffer length\n"); - return -EINVAL; + ret = -EINVAL; + goto unlock; } - mutex_lock(&q->mmap_lock); ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma); + +unlock: mutex_unlock(&q->mmap_lock); if (ret) return ret; diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c index 886a2d8d5c6c4..5b678ea1dc9e6 100644 --- a/drivers/media/common/videobuf2/videobuf2-v4l2.c +++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c @@ -178,6 +178,11 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b, return -EINVAL; } + if (!q->allow_requests && b->request) { + dprintk(1, "%s: unsupported request ID\n", opname); + return -EINVAL; + } + return __verify_planes_array(q->bufs[b->index], b); } @@ -203,8 +208,8 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb) b->timestamp = ns_to_timeval(vb->timestamp); b->timecode = vbuf->timecode; b->sequence = vbuf->sequence; - b->reserved2 = 0; - b->reserved = 0; + b->request = vbuf->request; + b->reserved = vbuf->reserved; if (q->is_multiplanar) { /* @@ -320,6 +325,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb, } vb->timestamp = 0; vbuf->sequence = 0; + vbuf->request = b->request; if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) { if (b->memory == VB2_MEMORY_USERPTR) { diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c index 6d4b2eec67b4f..29836c1a40e98 100644 --- a/drivers/media/dvb-frontends/dvb-pll.c +++ b/drivers/media/dvb-frontends/dvb-pll.c @@ -80,8 +80,8 @@ struct dvb_pll_desc { static const struct dvb_pll_desc dvb_pll_thomson_dtt7579 = { .name = "Thomson dtt7579", - .min = 177000000, - .max = 858000000, + .min = 177 * MHz, + .max = 858 * MHz, .iffreq= 36166667, .sleepdata = (u8[]){ 2, 0xb4, 0x03 }, .count = 4, @@ -102,8 +102,8 @@ static void thomson_dtt759x_bw(struct dvb_frontend *fe, u8 *buf) static const struct dvb_pll_desc dvb_pll_thomson_dtt759x = { .name = "Thomson dtt759x", - .min = 177000000, - .max = 896000000, + .min = 177 * MHz, + .max = 896 * MHz, .set = thomson_dtt759x_bw, .iffreq= 36166667, .sleepdata = (u8[]){ 2, 0x84, 0x03 }, @@ -126,8 +126,8 @@ static void thomson_dtt7520x_bw(struct dvb_frontend *fe, u8 *buf) static const struct dvb_pll_desc dvb_pll_thomson_dtt7520x = { .name = "Thomson dtt7520x", - .min = 185000000, - .max = 900000000, + .min = 185 * MHz, + .max = 900 * MHz, .set = thomson_dtt7520x_bw, .iffreq = 36166667, .count = 7, @@ -144,8 +144,8 @@ static const struct dvb_pll_desc dvb_pll_thomson_dtt7520x = { static const struct dvb_pll_desc dvb_pll_lg_z201 = { .name = "LG z201", - .min = 174000000, - .max = 862000000, + .min = 174 * MHz, + .max = 862 * MHz, .iffreq= 36166667, .sleepdata = (u8[]){ 2, 0xbc, 0x03 }, .count = 5, @@ -160,8 +160,8 @@ static const struct dvb_pll_desc dvb_pll_lg_z201 = { static const struct dvb_pll_desc dvb_pll_unknown_1 = { .name = "unknown 1", /* used by dntv live dvb-t */ - .min = 174000000, - .max = 862000000, + .min = 174 * MHz, + .max = 862 * MHz, .iffreq= 36166667, .count = 9, .entries = { @@ -182,8 +182,8 @@ static const struct dvb_pll_desc dvb_pll_unknown_1 = { */ static const struct dvb_pll_desc dvb_pll_tua6010xs = { .name = "Infineon TUA6010XS", - .min = 44250000, - .max = 858000000, + .min = 44250 * kHz, + .max = 858 * MHz, .iffreq= 36125000, .count = 3, .entries = { @@ -196,8 +196,8 @@ static const struct dvb_pll_desc dvb_pll_tua6010xs = { /* Panasonic env57h1xd5 (some Philips PLL ?) */ static const struct dvb_pll_desc dvb_pll_env57h1xd5 = { .name = "Panasonic ENV57H1XD5", - .min = 44250000, - .max = 858000000, + .min = 44250 * kHz, + .max = 858 * MHz, .iffreq= 36125000, .count = 4, .entries = { @@ -220,8 +220,8 @@ static void tda665x_bw(struct dvb_frontend *fe, u8 *buf) static const struct dvb_pll_desc dvb_pll_tda665x = { .name = "Philips TDA6650/TDA6651", - .min = 44250000, - .max = 858000000, + .min = 44250 * kHz, + .max = 858 * MHz, .set = tda665x_bw, .iffreq= 36166667, .initdata = (u8[]){ 4, 0x0b, 0xf5, 0x85, 0xab }, @@ -254,8 +254,8 @@ static void tua6034_bw(struct dvb_frontend *fe, u8 *buf) static const struct dvb_pll_desc dvb_pll_tua6034 = { .name = "Infineon TUA6034", - .min = 44250000, - .max = 858000000, + .min = 44250 * kHz, + .max = 858 * MHz, .iffreq= 36166667, .count = 3, .set = tua6034_bw, @@ -278,8 +278,8 @@ static void tded4_bw(struct dvb_frontend *fe, u8 *buf) static const struct dvb_pll_desc dvb_pll_tded4 = { .name = "ALPS TDED4", - .min = 47000000, - .max = 863000000, + .min = 47 * MHz, + .max = 863 * MHz, .iffreq= 36166667, .set = tded4_bw, .count = 4, @@ -296,8 +296,8 @@ static const struct dvb_pll_desc dvb_pll_tded4 = { */ static const struct dvb_pll_desc dvb_pll_tdhu2 = { .name = "ALPS TDHU2", - .min = 54000000, - .max = 864000000, + .min = 54 * MHz, + .max = 864 * MHz, .iffreq= 44000000, .count = 4, .entries = { @@ -313,8 +313,8 @@ static const struct dvb_pll_desc dvb_pll_tdhu2 = { */ static const struct dvb_pll_desc dvb_pll_samsung_tbmv = { .name = "Samsung TBMV30111IN / TBMV30712IN1", - .min = 54000000, - .max = 860000000, + .min = 54 * MHz, + .max = 860 * MHz, .iffreq= 44000000, .count = 6, .entries = { @@ -332,8 +332,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tbmv = { */ static const struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261 = { .name = "Philips SD1878", - .min = 950000, - .max = 2150000, + .min = 950 * MHz, + .max = 2150 * MHz, .iffreq= 249, /* zero-IF, offset 249 is to round up */ .count = 4, .entries = { @@ -398,8 +398,8 @@ static void opera1_bw(struct dvb_frontend *fe, u8 *buf) static const struct dvb_pll_desc dvb_pll_opera1 = { .name = "Opera Tuner", - .min = 900000, - .max = 2250000, + .min = 900 * MHz, + .max = 2250 * MHz, .initdata = (u8[]){ 4, 0x08, 0xe5, 0xe1, 0x00 }, .initdata2 = (u8[]){ 4, 0x08, 0xe5, 0xe5, 0x00 }, .iffreq= 0, @@ -445,8 +445,8 @@ static void samsung_dtos403ih102a_set(struct dvb_frontend *fe, u8 *buf) /* unknown pll used in Samsung DTOS403IH102A DVB-C tuner */ static const struct dvb_pll_desc dvb_pll_samsung_dtos403ih102a = { .name = "Samsung DTOS403IH102A", - .min = 44250000, - .max = 858000000, + .min = 44250 * kHz, + .max = 858 * MHz, .iffreq = 36125000, .count = 8, .set = samsung_dtos403ih102a_set, @@ -465,8 +465,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_dtos403ih102a = { /* Samsung TDTC9251DH0 DVB-T NIM, as used on AirStar 2 */ static const struct dvb_pll_desc dvb_pll_samsung_tdtc9251dh0 = { .name = "Samsung TDTC9251DH0", - .min = 48000000, - .max = 863000000, + .min = 48 * MHz, + .max = 863 * MHz, .iffreq = 36166667, .count = 3, .entries = { @@ -479,8 +479,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tdtc9251dh0 = { /* Samsung TBDU18132 DVB-S NIM with TSA5059 PLL, used in SkyStar2 DVB-S 2.3 */ static const struct dvb_pll_desc dvb_pll_samsung_tbdu18132 = { .name = "Samsung TBDU18132", - .min = 950000, - .max = 2150000, /* guesses */ + .min = 950 * MHz, + .max = 2150 * MHz, /* guesses */ .iffreq = 0, .count = 2, .entries = { @@ -500,8 +500,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tbdu18132 = { /* Samsung TBMU24112 DVB-S NIM with SL1935 zero-IF tuner */ static const struct dvb_pll_desc dvb_pll_samsung_tbmu24112 = { .name = "Samsung TBMU24112", - .min = 950000, - .max = 2150000, /* guesses */ + .min = 950 * MHz, + .max = 2150 * MHz, /* guesses */ .iffreq = 0, .count = 2, .entries = { @@ -521,8 +521,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tbmu24112 = { * 822 - 862 1 * 0 0 1 0 0 0 0x88 */ static const struct dvb_pll_desc dvb_pll_alps_tdee4 = { .name = "ALPS TDEE4", - .min = 47000000, - .max = 862000000, + .min = 47 * MHz, + .max = 862 * MHz, .iffreq = 36125000, .count = 4, .entries = { @@ -537,8 +537,8 @@ static const struct dvb_pll_desc dvb_pll_alps_tdee4 = { /* CP cur. 50uA, AGC takeover: 103dBuV, PORT3 on */ static const struct dvb_pll_desc dvb_pll_tua6034_friio = { .name = "Infineon TUA6034 ISDB-T (Friio)", - .min = 90000000, - .max = 770000000, + .min = 90 * MHz, + .max = 770 * MHz, .iffreq = 57000000, .initdata = (u8[]){ 4, 0x9a, 0x50, 0xb2, 0x08 }, .sleepdata = (u8[]){ 4, 0x9a, 0x70, 0xb3, 0x0b }, @@ -553,8 +553,8 @@ static const struct dvb_pll_desc dvb_pll_tua6034_friio = { /* Philips TDA6651 ISDB-T, used in Earthsoft PT1 */ static const struct dvb_pll_desc dvb_pll_tda665x_earth_pt1 = { .name = "Philips TDA6651 ISDB-T (EarthSoft PT1)", - .min = 90000000, - .max = 770000000, + .min = 90 * MHz, + .max = 770 * MHz, .iffreq = 57000000, .initdata = (u8[]){ 5, 0x0e, 0x7f, 0xc1, 0x80, 0x80 }, .count = 10, @@ -610,9 +610,6 @@ static int dvb_pll_configure(struct dvb_frontend *fe, u8 *buf, u32 div; int i; - if (frequency && (frequency < desc->min || frequency > desc->max)) - return -EINVAL; - for (i = 0; i < desc->count; i++) { if (frequency > desc->entries[i].limit) continue; @@ -799,7 +796,6 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, struct dvb_pll_priv *priv = NULL; int ret; const struct dvb_pll_desc *desc; - struct dtv_frontend_properties *c = &fe->dtv_property_cache; b1 = kmalloc(1, GFP_KERNEL); if (!b1) @@ -845,18 +841,12 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, strncpy(fe->ops.tuner_ops.info.name, desc->name, sizeof(fe->ops.tuner_ops.info.name)); - switch (c->delivery_system) { - case SYS_DVBS: - case SYS_DVBS2: - case SYS_TURBO: - case SYS_ISDBS: - fe->ops.tuner_ops.info.frequency_min_hz = desc->min * kHz; - fe->ops.tuner_ops.info.frequency_max_hz = desc->max * kHz; - break; - default: - fe->ops.tuner_ops.info.frequency_min_hz = desc->min; - fe->ops.tuner_ops.info.frequency_max_hz = desc->max; - } + + fe->ops.tuner_ops.info.frequency_min_hz = desc->min; + fe->ops.tuner_ops.info.frequency_max_hz = desc->max; + + dprintk("%s tuner, frequency range: %u...%u\n", + desc->name, desc->min, desc->max); if (!desc->initdata) fe->ops.tuner_ops.init = NULL; diff --git a/drivers/media/firewire/firedtv-avc.c b/drivers/media/firewire/firedtv-avc.c index 1c933b2cf7603..3ef5df1648d77 100644 --- a/drivers/media/firewire/firedtv-avc.c +++ b/drivers/media/firewire/firedtv-avc.c @@ -968,7 +968,8 @@ static int get_ca_object_length(struct avc_response_frame *r) return r->operand[7]; } -int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len) +int avc_ca_app_info(struct firedtv *fdtv, unsigned char *app_info, + unsigned int *len) { struct avc_command_frame *c = (void *)fdtv->avc_data; struct avc_response_frame *r = (void *)fdtv->avc_data; @@ -1009,7 +1010,8 @@ int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len) return ret; } -int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len) +int avc_ca_info(struct firedtv *fdtv, unsigned char *app_info, + unsigned int *len) { struct avc_command_frame *c = (void *)fdtv->avc_data; struct avc_response_frame *r = (void *)fdtv->avc_data; diff --git a/drivers/media/firewire/firedtv.h b/drivers/media/firewire/firedtv.h index 876cdec8329be..009905a199472 100644 --- a/drivers/media/firewire/firedtv.h +++ b/drivers/media/firewire/firedtv.h @@ -124,8 +124,10 @@ int avc_lnb_control(struct firedtv *fdtv, char voltage, char burst, struct dvb_diseqc_master_cmd *diseqcmd); void avc_remote_ctrl_work(struct work_struct *work); int avc_register_remote_control(struct firedtv *fdtv); -int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len); -int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len); +int avc_ca_app_info(struct firedtv *fdtv, unsigned char *app_info, + unsigned int *len); +int avc_ca_info(struct firedtv *fdtv, unsigned char *app_info, + unsigned int *len); int avc_ca_reset(struct firedtv *fdtv); int avc_ca_pmt(struct firedtv *fdtv, char *app_info, int length); int avc_ca_get_time_date(struct firedtv *fdtv, int *interval); diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index 82af97430e5b3..81079ce07ad6e 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -945,6 +945,8 @@ config VIDEO_S5K5BAF source "drivers/media/i2c/smiapp/Kconfig" source "drivers/media/i2c/et8ek8/Kconfig" +source "drivers/media/i2c/crlmodule/Kconfig" +source "drivers/media/i2c/crlmodule-lite/Kconfig" config VIDEO_S5C73M3 tristate "Samsung S5C73M3 sensor support" @@ -1065,6 +1067,24 @@ config VIDEO_I2C To compile this driver as a module, choose M here: the module will be called video-i2c +config VIDEO_TI964 + tristate "TI964 driver support" + depends on I2C && MEDIA_CONTROLLER && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API + ---help--- + This is a driver for TI964 camera. + +config VIDEO_MAX9286 + tristate "MAX96705/MAX9286 Serializer/Deserializer" + depends on I2C && MEDIA_CONTROLLER && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && REGMAP_I2C + ---help--- + This is a MAXIM 96705 Serializer and MAXIM 9286 CSI-2 Deserializer driver. + +config VIDEO_TI960 + tristate "TI960 driver support" + depends on I2C && MEDIA_CONTROLLER && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API + ---help--- + This is a driver for TI960 Deserializer. + endmenu menu "Sensors used on soc_camera driver" @@ -1075,4 +1095,8 @@ endif endmenu +if VIDEO_INTEL_ICI + source "drivers/media/i2c/ici/Kconfig" +endif + endif diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile index a94eb03d10d4e..e05004cc5544d 100644 --- a/drivers/media/i2c/Makefile +++ b/drivers/media/i2c/Makefile @@ -110,3 +110,10 @@ obj-$(CONFIG_VIDEO_IMX258) += imx258.o obj-$(CONFIG_VIDEO_IMX274) += imx274.o obj-$(CONFIG_SDR_MAX2175) += max2175.o + +obj-$(CONFIG_VIDEO_CRLMODULE) += crlmodule/ +obj-$(CONFIG_VIDEO_TI964) += ti964.o +obj-$(CONFIG_VIDEO_MAX9286) += max9286.o +obj-$(CONFIG_VIDEO_TI960) += ti960.o +obj-$(CONFIG_VIDEO_CRLMODULE_LITE) += crlmodule-lite/ +obj-$(CONFIG_VIDEO_INTEL_ICI) += ici/ diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c index 55c2ea0720d9e..f3899cc84e27f 100644 --- a/drivers/media/i2c/adv7511.c +++ b/drivers/media/i2c/adv7511.c @@ -1355,10 +1355,10 @@ static int adv7511_set_fmt(struct v4l2_subdev *sd, state->xfer_func = format->format.xfer_func; switch (format->format.colorspace) { - case V4L2_COLORSPACE_ADOBERGB: + case V4L2_COLORSPACE_OPRGB: c = HDMI_COLORIMETRY_EXTENDED; - ec = y ? HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601 : - HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB; + ec = y ? HDMI_EXTENDED_COLORIMETRY_OPYCC_601 : + HDMI_EXTENDED_COLORIMETRY_OPRGB; break; case V4L2_COLORSPACE_SMPTE170M: c = y ? HDMI_COLORIMETRY_ITU_601 : HDMI_COLORIMETRY_NONE; diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c index 668be2bca57aa..c78698199ac58 100644 --- a/drivers/media/i2c/adv7604.c +++ b/drivers/media/i2c/adv7604.c @@ -2284,8 +2284,10 @@ static int adv76xx_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid) state->aspect_ratio.numerator = 16; state->aspect_ratio.denominator = 9; - if (!state->edid.present) + if (!state->edid.present) { state->edid.blocks = 0; + cec_phys_addr_invalidate(state->cec_adap); + } v4l2_dbg(2, debug, sd, "%s: clear EDID pad %d, edid.present = 0x%x\n", __func__, edid->pad, state->edid.present); @@ -2474,7 +2476,7 @@ static int adv76xx_log_status(struct v4l2_subdev *sd) "YCbCr Bt.601 (16-235)", "YCbCr Bt.709 (16-235)", "xvYCC Bt.601", "xvYCC Bt.709", "YCbCr Bt.601 (0-255)", "YCbCr Bt.709 (0-255)", - "sYCC", "Adobe YCC 601", "AdobeRGB", "invalid", "invalid", + "sYCC", "opYCC 601", "opRGB", "invalid", "invalid", "invalid", "invalid", "invalid" }; static const char * const rgb_quantization_range_txt[] = { diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c index 4f8fbdd00e35e..71fe56565f753 100644 --- a/drivers/media/i2c/adv7842.c +++ b/drivers/media/i2c/adv7842.c @@ -786,8 +786,10 @@ static int edid_write_hdmi_segment(struct v4l2_subdev *sd, u8 port) /* Disable I2C access to internal EDID ram from HDMI DDC ports */ rep_write_and_or(sd, 0x77, 0xf3, 0x00); - if (!state->hdmi_edid.present) + if (!state->hdmi_edid.present) { + cec_phys_addr_invalidate(state->cec_adap); return 0; + } pa = cec_get_edid_phys_addr(edid, 256, &spa_loc); err = cec_phys_addr_validate(pa, &pa, NULL); diff --git a/drivers/media/i2c/crlmodule-lite/Kconfig b/drivers/media/i2c/crlmodule-lite/Kconfig new file mode 100644 index 0000000000000..5f6b506ae749e --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/Kconfig @@ -0,0 +1,7 @@ +config VIDEO_CRLMODULE_LITE + tristate "CRL Module sensor support for ICI driver" + depends on I2C + depends on VIDEO_INTEL_ICI + depends on !VIDEO_CRLMODULE + ---help--- + This is a generic driver for CRL based camera modules. diff --git a/drivers/media/i2c/crlmodule-lite/Makefile b/drivers/media/i2c/crlmodule-lite/Makefile new file mode 100644 index 0000000000000..de5f5e4d3ccba --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + +crlmodule-lite-objs += crlmodule-core.o crlmodule-data.o \ + crlmodule-regs.o crlmodule-nvm.o \ + crl_adv7481_hdmi_configuration.o \ + crlmodule-msrlist.o +obj-$(CONFIG_VIDEO_CRLMODULE_LITE) += crlmodule-lite.o + +ccflags-y += -Idrivers/media/i2c diff --git a/drivers/media/i2c/crlmodule-lite/crl_adv7481_configuration.h b/drivers/media/i2c/crlmodule-lite/crl_adv7481_configuration.h new file mode 100644 index 0000000000000..4ebe85478a24b --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crl_adv7481_configuration.h @@ -0,0 +1,703 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __CRLMODULE_ADV7481_CONFIGURATION_H_ +#define __CRLMODULE_ADV7481_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +static struct crl_register_write_rep adv7481_powerup_regset[] = { + {0xFF, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* SW reset */ + {0x00, CRL_REG_LEN_DELAY, 0x05, 0x00}, /* Delay 5ms */ + {0x01, CRL_REG_LEN_08BIT, 0x76, 0xE0}, /* ADI recommended setting */ + {0xF2, CRL_REG_LEN_08BIT, 0x01, 0xE0}, /* I2C Rd Auto-Increment=1 */ + {0xF3, CRL_REG_LEN_08BIT, 0x4C, 0xE0}, /* DPLL Map Address */ + {0xF4, CRL_REG_LEN_08BIT, 0x44, 0xE0}, /* CP Map Address */ + {0xF5, CRL_REG_LEN_08BIT, 0x68, 0xE0}, /* HDMI RX Map Address */ + {0xF6, CRL_REG_LEN_08BIT, 0x6C, 0xE0}, /* EDID Map Address */ + {0xF7, CRL_REG_LEN_08BIT, 0x64, 0xE0}, /* HDMI RX Repeater Map Addr */ + {0xF8, CRL_REG_LEN_08BIT, 0x62, 0xE0}, /* HDMI RX Infoframe Map Addr */ + {0xF9, CRL_REG_LEN_08BIT, 0xF0, 0xE0}, /* CBUS Map Address Set */ + {0xFA, CRL_REG_LEN_08BIT, 0x82, 0xE0}, /* CEC Map Address Set */ + {0xFB, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* SDP Main Map Address */ + {0xFC, CRL_REG_LEN_08BIT, 0x90, 0xE0}, /* CSI-TXB Map Address */ + {0xFD, CRL_REG_LEN_08BIT, 0x94, 0xE0}, /* CSI-TXA Map Address */ + {0x00, CRL_REG_LEN_08BIT, 0x50, 0xE0}, /* Disable Chip Powerdown & + HDMI Rx Block */ + {0x40, CRL_REG_LEN_08BIT, 0x83, 0x64}, /* Enable HDCP 1.1 */ + {0x00, CRL_REG_LEN_08BIT, 0x08, 0x68}, /* ADI recommended setting */ + {0x3D, CRL_REG_LEN_08BIT, 0x10, 0x68}, /* ADI recommended setting */ + {0x3E, CRL_REG_LEN_08BIT, 0x69, 0x68}, /* ADI recommended setting */ + {0x3F, CRL_REG_LEN_08BIT, 0x46, 0x68}, /* ADI recommended setting */ + {0x4E, CRL_REG_LEN_08BIT, 0xFE, 0x68}, /* ADI recommended setting */ + {0x4F, CRL_REG_LEN_08BIT, 0x08, 0x68}, /* ADI recommended setting */ + {0x57, CRL_REG_LEN_08BIT, 0xA3, 0x68}, /* ADI recommended setting */ + {0x58, CRL_REG_LEN_08BIT, 0x04, 0x68}, /* ADI recommended setting */ + {0x85, CRL_REG_LEN_08BIT, 0x10, 0x68}, /* ADI recommended setting */ + {0x83, CRL_REG_LEN_08BIT, 0x00, 0x68}, /* Enable All Terminations */ + {0xBE, CRL_REG_LEN_08BIT, 0x00, 0x68}, /* ADI recommended setting */ + {0x6C, CRL_REG_LEN_08BIT, 0x01, 0x68}, /* HPA Manual Enable */ + {0xF8, CRL_REG_LEN_08BIT, 0x01, 0x68}, /* HPA Asserted */ + {0x0F, CRL_REG_LEN_08BIT, 0x00, 0x68}, /* Audio Mute Speed = + Fastest Smallest Step Size */ + {0x70, CRL_REG_LEN_08BIT, 0xA0, 0x64}, /* Write primary edid size */ + {0x74, CRL_REG_LEN_08BIT, 0x01, 0x64}, /* Enable manual edid */ + {0x7A, CRL_REG_LEN_08BIT, 0x00, 0x64}, /* Write edid sram select */ + {0xF6, CRL_REG_LEN_08BIT, 0x6C, 0xE0}, /* Write edid map bus address */ + + {0x00*4, CRL_REG_LEN_32BIT, 0x00FFFFFF, 0x6C}, /* EDID programming */ + {0x01*4, CRL_REG_LEN_32BIT, 0xFFFFFF00, 0x6C}, /* EDID programming */ + {0x02*4, CRL_REG_LEN_32BIT, 0x4DD90100, 0x6C}, /* EDID programming */ + {0x03*4, CRL_REG_LEN_32BIT, 0x00000000, 0x6C}, /* EDID programming */ + {0x04*4, CRL_REG_LEN_32BIT, 0x00110103, 0x6C}, /* EDID programming */ + {0x05*4, CRL_REG_LEN_32BIT, 0x80000078, 0x6C}, /* EDID programming */ + {0x06*4, CRL_REG_LEN_32BIT, 0x0A0DC9A0, 0x6C}, /* EDID programming */ + {0x07*4, CRL_REG_LEN_32BIT, 0x57479827, 0x6C}, /* EDID programming */ + {0x08*4, CRL_REG_LEN_32BIT, 0x12484C00, 0x6C}, /* EDID programming */ + {0x09*4, CRL_REG_LEN_32BIT, 0x00000101, 0x6C}, /* EDID programming */ + {0x0A*4, CRL_REG_LEN_32BIT, 0x01010101, 0x6C}, /* EDID programming */ + {0x0B*4, CRL_REG_LEN_32BIT, 0x01010101, 0x6C}, /* EDID programming */ + {0x0C*4, CRL_REG_LEN_32BIT, 0x01010101, 0x6C}, /* EDID programming */ + {0x0D*4, CRL_REG_LEN_32BIT, 0x0101011D, 0x6C}, /* EDID programming */ + {0x0E*4, CRL_REG_LEN_32BIT, 0x80D0721C, 0x6C}, /* EDID programming */ + {0x0F*4, CRL_REG_LEN_32BIT, 0x1620102C, 0x6C}, /* EDID programming */ + {0x10*4, CRL_REG_LEN_32BIT, 0x2580C48E, 0x6C}, /* EDID programming */ + {0x11*4, CRL_REG_LEN_32BIT, 0x2100009E, 0x6C}, /* EDID programming */ + {0x12*4, CRL_REG_LEN_32BIT, 0x011D8018, 0x6C}, /* EDID programming */ + {0x13*4, CRL_REG_LEN_32BIT, 0x711C1620, 0x6C}, /* EDID programming */ + {0x14*4, CRL_REG_LEN_32BIT, 0x582C2500, 0x6C}, /* EDID programming */ + {0x15*4, CRL_REG_LEN_32BIT, 0xC48E2100, 0x6C}, /* EDID programming */ + {0x16*4, CRL_REG_LEN_32BIT, 0x009E0000, 0x6C}, /* EDID programming */ + {0x17*4, CRL_REG_LEN_32BIT, 0x00FC0048, 0x6C}, /* EDID programming */ + {0x18*4, CRL_REG_LEN_32BIT, 0x444D4920, 0x6C}, /* EDID programming */ + {0x19*4, CRL_REG_LEN_32BIT, 0x4C4C430A, 0x6C}, /* EDID programming */ + {0x1A*4, CRL_REG_LEN_32BIT, 0x20202020, 0x6C}, /* EDID programming */ + {0x1B*4, CRL_REG_LEN_32BIT, 0x000000FD, 0x6C}, /* EDID programming */ + {0x1C*4, CRL_REG_LEN_32BIT, 0x003B3D0F, 0x6C}, /* EDID programming */ + {0x1D*4, CRL_REG_LEN_32BIT, 0x2D08000A, 0x6C}, /* EDID programming */ + {0x1E*4, CRL_REG_LEN_32BIT, 0x20202020, 0x6C}, /* EDID programming */ + {0x1F*4, CRL_REG_LEN_32BIT, 0x202001C1, 0x6C}, /* EDID programming */ + {0x20*4, CRL_REG_LEN_32BIT, 0x02031E77, 0x6C}, /* EDID programming */ + {0x21*4, CRL_REG_LEN_32BIT, 0x4F941305, 0x6C}, /* EDID programming */ + {0x22*4, CRL_REG_LEN_32BIT, 0x03040201, 0x6C}, /* EDID programming */ + {0x23*4, CRL_REG_LEN_32BIT, 0x16150706, 0x6C}, /* EDID programming */ + {0x24*4, CRL_REG_LEN_32BIT, 0x1110121F, 0x6C}, /* EDID programming */ + {0x25*4, CRL_REG_LEN_32BIT, 0x23090701, 0x6C}, /* EDID programming */ + {0x26*4, CRL_REG_LEN_32BIT, 0x65030C00, 0x6C}, /* EDID programming */ + {0x27*4, CRL_REG_LEN_32BIT, 0x10008C0A, 0x6C}, /* EDID programming */ + {0x28*4, CRL_REG_LEN_32BIT, 0xD0902040, 0x6C}, /* EDID programming */ + {0x29*4, CRL_REG_LEN_32BIT, 0x31200C40, 0x6C}, /* EDID programming */ + {0x2A*4, CRL_REG_LEN_32BIT, 0x5500138E, 0x6C}, /* EDID programming */ + {0x2B*4, CRL_REG_LEN_32BIT, 0x21000018, 0x6C}, /* EDID programming */ + {0x2C*4, CRL_REG_LEN_32BIT, 0x011D00BC, 0x6C}, /* EDID programming */ + {0x2D*4, CRL_REG_LEN_32BIT, 0x52D01E20, 0x6C}, /* EDID programming */ + {0x2E*4, CRL_REG_LEN_32BIT, 0xB8285540, 0x6C}, /* EDID programming */ + {0x2F*4, CRL_REG_LEN_32BIT, 0xC48E2100, 0x6C}, /* EDID programming */ + {0x30*4, CRL_REG_LEN_32BIT, 0x001E8C0A, 0x6C}, /* EDID programming */ + {0x31*4, CRL_REG_LEN_32BIT, 0xD08A20E0, 0x6C}, /* EDID programming */ + {0x32*4, CRL_REG_LEN_32BIT, 0x2D10103E, 0x6C}, /* EDID programming */ + {0x33*4, CRL_REG_LEN_32BIT, 0x9600C48E, 0x6C}, /* EDID programming */ + {0x34*4, CRL_REG_LEN_32BIT, 0x21000018, 0x6C}, /* EDID programming */ + {0x35*4, CRL_REG_LEN_32BIT, 0x011D0072, 0x6C}, /* EDID programming */ + {0x36*4, CRL_REG_LEN_32BIT, 0x51D01E20, 0x6C}, /* EDID programming */ + {0x37*4, CRL_REG_LEN_32BIT, 0x6E285500, 0x6C}, /* EDID programming */ + {0x38*4, CRL_REG_LEN_32BIT, 0xC48E2100, 0x6C}, /* EDID programming */ + {0x39*4, CRL_REG_LEN_32BIT, 0x001E8C0A, 0x6C}, /* EDID programming */ + {0x3A*4, CRL_REG_LEN_32BIT, 0xD08A20E0, 0x6C}, /* EDID programming */ + {0x3B*4, CRL_REG_LEN_32BIT, 0x2D10103E, 0x6C}, /* EDID programming */ + {0x3C*4, CRL_REG_LEN_32BIT, 0x9600138E, 0x6C}, /* EDID programming */ + {0x3D*4, CRL_REG_LEN_32BIT, 0x21000018, 0x6C}, /* EDID programming */ + {0x3E*4, CRL_REG_LEN_32BIT, 0x00000000, 0x6C}, /* EDID programming */ + {0x3F*4, CRL_REG_LEN_32BIT, 0x000000CB, 0x6C}, /* EDID programming */ + + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x1E, CRL_REG_LEN_08BIT, 0x80, 0x94}, /* No MIPI frame start */ + {0x26, CRL_REG_LEN_08BIT, 0x55, 0x94}, /* Disable sleep mode */ + {0x27, CRL_REG_LEN_08BIT, 0x55, 0x94}, /* Disable escape mode */ + {0x7E, CRL_REG_LEN_08BIT, 0xA0, 0x94}, /* ADI recommended setting */ + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x90}, /* ADI recommended setting */ + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x90}, /* ADI recommended setting */ + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, /* ADI recommended setting */ + {0x34, CRL_REG_LEN_08BIT, 0x55, 0x94}, /* ADI recommended setting */ + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, /* ADI recommended setting */ + {0xCA, CRL_REG_LEN_08BIT, 0x02, 0x94}, /* ADI recommended setting */ + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, /* ADI recommended setting */ + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, /* ADI recommended setting */ + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, /* Power up DPHY */ + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, /* ADI recommended setting */ + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, /* ADI recommended setting */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_mode_1080p[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x05, CRL_REG_LEN_08BIT, 0x5E, 0xE0}, /* Select Resolution 1080P */ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* ADI recommended setting */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI recommended setting */ + + {0x8B, CRL_REG_LEN_08BIT, 0x43, 0x44}, /* 1080P shift left 44 pixel */ + {0x8C, CRL_REG_LEN_08BIT, 0xD4, 0x44}, /* 1080P shift left 44 pixel */ + {0x8B, CRL_REG_LEN_08BIT, 0x4F, 0x44}, /* 1080P shift left 44 pixel */ + {0x8D, CRL_REG_LEN_08BIT, 0xD4, 0x44}, /* 1080P shift left 44 pixel */ + + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS TRISTATED */ + {0x10, CRL_REG_LEN_08BIT, 0xA0, 0xE0}, /* Enable 4-lane CSI Tx & Pixel Port */ + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_mode_720p[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x05, CRL_REG_LEN_08BIT, 0x53, 0xE0}, /* Select Resolution 720P */ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* ADI recommended setting */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI recommended setting */ + + {0x8B, CRL_REG_LEN_08BIT, 0x43, 0x44}, /* 720P shift left 40 pixel */ + {0x8C, CRL_REG_LEN_08BIT, 0xD8, 0x44}, /* 720P shift left 40 pixel */ + {0x8B, CRL_REG_LEN_08BIT, 0x4F, 0x44}, /* 720P shift left 40 pixel */ + {0x8D, CRL_REG_LEN_08BIT, 0xD8, 0x44}, /* 720P shift left 40 pixel */ + + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS TRISTATED */ + {0x10, CRL_REG_LEN_08BIT, 0xA0, 0xE0}, /* Enable 4-lane CSI Tx & Pixel Port */ + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_mode_VGA[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x05, CRL_REG_LEN_08BIT, 0x88, 0xE0}, /* Select Resolution VGA */ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* ADI recommended setting */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI recommended setting */ + + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS TRISTATED */ + {0x10, CRL_REG_LEN_08BIT, 0xA0, 0xE0}, /* Enable 4-lane CSI Tx & Pixel Port */ + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_mode_1080i[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x05, CRL_REG_LEN_08BIT, 0x54, 0xE0}, /* Select Resolution 1080i*/ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* ADI recommended setting */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI recommended setting */ + + {0x8B, CRL_REG_LEN_08BIT, 0x43, 0x44}, /* 1080i shift left 44 pixel */ + {0x8C, CRL_REG_LEN_08BIT, 0xD4, 0x44}, /* 1080i shift left 44 pixel */ + {0x8B, CRL_REG_LEN_08BIT, 0x4F, 0x44}, /* 1080i shift left 44 pixel */ + {0x8D, CRL_REG_LEN_08BIT, 0xD4, 0x44}, /* 1080i shift left 44 pixel */ + + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS TRISTATED */ + {0x10, CRL_REG_LEN_08BIT, 0xA0, 0xE0}, /* Enable 4-lane CSI Tx & Pixel Port */ + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_mode_480i[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x05, CRL_REG_LEN_08BIT, 0x40, 0xE0}, /* Select Resolution 480i */ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* ADI recommended setting */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI recommended setting */ + + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS TRISTATED */ + {0x10, CRL_REG_LEN_08BIT, 0xA0, 0xE0}, /* Enable 4-lane CSI Tx & Pixel Port */ + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_mode_576p[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x05, CRL_REG_LEN_08BIT, 0x4B, 0xE0}, /* Select Resolution 576p*/ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* ADI recommended setting */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI recommended setting */ + + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS TRISTATED */ + {0x10, CRL_REG_LEN_08BIT, 0xA0, 0xE0}, /* Enable 4-lane CSI Tx & Pixel Port */ + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_mode_576i[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x05, CRL_REG_LEN_08BIT, 0x41, 0xE0}, /* Select Resolution 576i*/ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* ADI recommended setting */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI recommended setting */ + + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS TRISTATED */ + {0x10, CRL_REG_LEN_08BIT, 0xA0, 0xE0}, /* Enable 4-lane CSI Tx & Pixel Port */ + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_streamon_regs[] = { + {0x00, CRL_REG_LEN_DELAY, 0x02, 0x00}, + {0x00, CRL_REG_LEN_08BIT, 0x24, 0x94}, /* Power-up CSI-TX */ + {0x00, CRL_REG_LEN_DELAY, 0x01, 0x00}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, /* ADI recommended setting */ + {0x00, CRL_REG_LEN_DELAY, 0x01, 0x00}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_streamoff_regs[] = { + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, /* ADI Recommended Write */ + {0x1E, CRL_REG_LEN_08BIT, 0x00, 0x94}, /* Reset the clock Lane */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x00, 0x94}, /* i2c_mipi_pll_en - 1'b0 Disable MIPI PLL */ + {0xC1, CRL_REG_LEN_08BIT, 0x3B, 0x94}, +}; + +static struct crl_sensor_detect_config adv7481_sensor_detect_regset[] = { + { + .reg = { 0x0019, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 5, + }, + { + .reg = { 0x0016, CRL_REG_LEN_16BIT, 0x0000ffff }, + .width = 7, + }, +}; + +static struct crl_pll_configuration adv7481_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 16, + .pixel_rate_csi = 800000000, + .pixel_rate_pa = 800000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + +}; + +static struct crl_subdev_rect_rep adv7481_1080p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, +}; + +static struct crl_subdev_rect_rep adv7481_720p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, +}; + +static struct crl_subdev_rect_rep adv7481_VGA_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 640, + .out_rect.height = 480, + }, +}; + +static struct crl_subdev_rect_rep adv7481_1080i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 540, + }, +}; + +static struct crl_subdev_rect_rep adv7481_480i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 240, + }, +}; + +static struct crl_subdev_rect_rep adv7481_576p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 576, + }, +}; + +static struct crl_subdev_rect_rep adv7481_576i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 288, + }, +}; +static struct crl_mode_rep adv7481_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(adv7481_1080p_rects), + .sd_rects = adv7481_1080p_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1080, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(adv7481_mode_1080p), + .mode_regs = adv7481_mode_1080p, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_720p_rects), + .sd_rects = adv7481_720p_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 720, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(adv7481_mode_720p), + .mode_regs = adv7481_mode_720p, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_VGA_rects), + .sd_rects = adv7481_VGA_rects, + .binn_hor = 3, + .binn_vert = 2, + .scale_m = 1, + .width = 640, + .height = 480, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(adv7481_mode_VGA), + .mode_regs = adv7481_mode_VGA, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_1080i_rects), + .sd_rects = adv7481_1080i_rects, + .binn_hor = 1, + .binn_vert = 2, + .scale_m = 1, + .width = 1920, + .height = 540, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(adv7481_mode_1080i), + .mode_regs = adv7481_mode_1080i, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_480i_rects), + .sd_rects = adv7481_480i_rects, + .binn_hor = 2, + .binn_vert = 4, + .scale_m = 1, + .width = 720, + .height = 240, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(adv7481_mode_480i), + .mode_regs = adv7481_mode_480i, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_576p_rects), + .sd_rects = adv7481_576p_rects, + .binn_hor = 2, + .binn_vert = 1, + .scale_m = 1, + .width = 720, + .height = 576, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(adv7481_mode_576p), + .mode_regs = adv7481_mode_576p, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_576i_rects), + .sd_rects = adv7481_576i_rects, + .binn_hor = 2, + .binn_vert = 3, + .scale_m = 1, + .width = 720, + .height = 288, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = adv7481_mode_576i, + }, +}; + +static struct crl_sensor_subdev_config adv7481_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "adv7481 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "adv7481 pixel array", + }, +}; + +static struct crl_sensor_limits adv7481_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1920, + .y_addr_max = 1080, + .min_frame_length_lines = 160, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 6024, + .max_line_length_pixels = 32752, + .scaler_m_min = 1, + .scaler_m_max = 1, + .scaler_n_min = 1, + .scaler_n_max = 1, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 1, +}; + +static struct crl_csi_data_fmt adv7481_crl_csi_data_fmt[] = { + { + .code = ICI_FORMAT_UYVY, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + .regs_items = ARRAY_SIZE(adv7481_mode_1080p), + .regs = adv7481_mode_1080p, /* default yuv422 format */ + }, +}; + +static struct crl_ctrl_data adv7481_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = ICI_EXT_SD_PARAM_ID_LINK_FREQ, + .name = "CTRL_ID_LINK_FREQ", + .type = CRL_CTRL_TYPE_MENU_INT, + .data.int_menu.def = 0, + .data.int_menu.max = ARRAY_SIZE(adv7481_pll_configurations) - 1, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_PIXEL_RATE, + .name = "CTRL_ID_PIXEL_RATE_PA", + .type = CRL_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_PIXEL_RATE, + .name = "CTRL_ID_PIXEL_RATE_CSI", + .type = CRL_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +/* Power items, they are enabled in the order they are listed here */ +static struct crl_power_seq_entity adv7481_power_items[] = { + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 24000000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + }, +}; + +static struct crl_sensor_configuration adv7481_crl_configuration = { + + .power_items = ARRAY_SIZE(adv7481_power_items), + .power_entities = adv7481_power_items, + + .powerup_regs_items = ARRAY_SIZE(adv7481_powerup_regset), + .powerup_regs = adv7481_powerup_regset, + + .poweroff_regs_items = ARRAY_SIZE(adv7481_streamoff_regs), + .poweroff_regs = adv7481_streamoff_regs, + + .id_reg_items = ARRAY_SIZE(adv7481_sensor_detect_regset), + .id_regs = adv7481_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(adv7481_sensor_subdevs), + .subdevs = adv7481_sensor_subdevs, + + .sensor_limits = &adv7481_sensor_limits, + + .pll_config_items = ARRAY_SIZE(adv7481_pll_configurations), + .pll_configs = adv7481_pll_configurations, + + .modes_items = ARRAY_SIZE(adv7481_modes), + .modes = adv7481_modes, + + .streamon_regs_items = ARRAY_SIZE(adv7481_streamon_regs), + .streamon_regs = adv7481_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(adv7481_streamoff_regs), + .streamoff_regs = adv7481_streamoff_regs, + + .ctrl_items = ARRAY_SIZE(adv7481_ctrls), + .ctrl_bank = adv7481_ctrls, + + .csi_fmts_items = ARRAY_SIZE(adv7481_crl_csi_data_fmt), + .csi_fmts = adv7481_crl_csi_data_fmt, +}; + +#endif /* __CRLMODULE_ADV7481_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule-lite/crl_adv7481_cvbs_configuration.h b/drivers/media/i2c/crlmodule-lite/crl_adv7481_cvbs_configuration.h new file mode 100644 index 0000000000000..b077c5cb09c4a --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crl_adv7481_cvbs_configuration.h @@ -0,0 +1,282 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __CRLMODULE_ADV7481_CVBS_CONFIGURATION_H_ +#define __CRLMODULE_ADV7481_CVBS_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +static struct crl_register_write_rep adv7481_cvbs_powerup_regset[] = { + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/ + SPI PINS TRISTATED */ + {0x0F, CRL_REG_LEN_08BIT, 0x00, 0xF2}, /* Exit Power Down Mode */ + {0x52, CRL_REG_LEN_08BIT, 0xC0, 0xF2}, /* ADI Required Write */ + {0x00, CRL_REG_LEN_08BIT, 0x0E, 0xF2}, /* INSEL = CVBS in on Ain 1 */ + {0x0E, CRL_REG_LEN_08BIT, 0x80, 0xF2}, /* ADI Required Write */ + {0x9C, CRL_REG_LEN_08BIT, 0x00, 0xF2}, /* ADI Required Write */ + {0x9C, CRL_REG_LEN_08BIT, 0xFF, 0xF2}, /* ADI Required Write */ + {0x0E, CRL_REG_LEN_08BIT, 0x00, 0xF2}, /* ADI Required Write */ + {0x5A, CRL_REG_LEN_08BIT, 0x90, 0xF2}, /* ADI Required Write */ + {0x60, CRL_REG_LEN_08BIT, 0xA0, 0xF2}, /* ADI Required Write */ + {0x00, CRL_REG_LEN_DELAY, 0x19, 0x00}, /* Delay 25*/ + {0x60, CRL_REG_LEN_08BIT, 0xB0, 0xF2}, /* ADI Required Write */ + {0x5F, CRL_REG_LEN_08BIT, 0xA8, 0xF2}, + {0x0E, CRL_REG_LEN_08BIT, 0x80, 0xF2}, /* ADI Required Write */ + {0xB6, CRL_REG_LEN_08BIT, 0x08, 0xF2}, /* ADI Required Write */ + {0xC0, CRL_REG_LEN_08BIT, 0xA0, 0xF2}, /* ADI Required Write */ + {0xD9, CRL_REG_LEN_08BIT, 0x44, 0xF2}, + {0x0E, CRL_REG_LEN_08BIT, 0x40, 0xF2}, + {0xE0, CRL_REG_LEN_08BIT, 0x01, 0xF2}, /* Fast Lock enable*/ + {0x0E, CRL_REG_LEN_08BIT, 0x00, 0xF2}, /* ADI Required Write */ + {0x80, CRL_REG_LEN_08BIT, 0x51, 0xF2}, /* ADI Required Write */ + {0x81, CRL_REG_LEN_08BIT, 0x51, 0xF2}, /* ADI Required Write */ + {0x82, CRL_REG_LEN_08BIT, 0x68, 0xF2}, /* ADI Required Write */ + {0x03, CRL_REG_LEN_08BIT, 0x42, 0xF2}, /* Tri-S Output Drivers, + PwrDwn 656 pads */ + {0x04, CRL_REG_LEN_08BIT, 0x07, 0xF2}, /* Power-up INTRQ pad, + & Enable SFL */ + {0x13, CRL_REG_LEN_08BIT, 0x00, 0xF2}, /* ADI Required Write */ + {0x17, CRL_REG_LEN_08BIT, 0x41, 0xF2}, /* Select SH1 */ + {0x31, CRL_REG_LEN_08BIT, 0x12, 0xF2}, /* ADI Required Write */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0x70, 0xE0, 0x70 }, + /* Enable 1-Lane MIPI Tx, + enable pixel output and route + SD through Pixel port */ + {0x00, CRL_REG_LEN_08BIT, 0x81, 0x90}, /* Enable 1-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA1, 0x90}, /* Set Auto DPHY Timing */ + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, /* ADI Required Write */ + {0xD2, CRL_REG_LEN_08BIT, 0x40, 0x90}, /* ADI Required Write */ + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x90}, /* ADI Required Write */ + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x90}, /* ADI Required Write */ + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x90}, /* ADI Required Write */ + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x90}, /* i2c_dphy_pwdn - 1'b0 */ + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x90}, /* ADI Required Write */ + {0x1E, CRL_REG_LEN_08BIT, 0xC0, 0x90}, /* ADI Required Write */ +}; + + +static struct crl_register_write_rep adv7481_cvbs_streamon_regs[] = { + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x90}, //ADI Required Write + {0x00, CRL_REG_LEN_DELAY, 0x01, 0x00}, + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x90}, //i2c_mipi_pll_en - 1'b1 + {0x00, CRL_REG_LEN_DELAY, 0x02, 0x00}, + {0x00, CRL_REG_LEN_08BIT, 0x21, 0x90}, //Power-up CSI-TX 21 + {0x00, CRL_REG_LEN_DELAY, 0x01, 0x00}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x90}, //ADI Required Write +}; + +static struct crl_register_write_rep adv7481_cvbs_streamoff_regs[] = { + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x90}, /* ADI Recommended Write */ + {0x1E, CRL_REG_LEN_08BIT, 0x00, 0x90}, /* Reset the clock Lane */ + {0x00, CRL_REG_LEN_08BIT, 0x81, 0x90}, + {0xDA, CRL_REG_LEN_08BIT, 0x00, 0x90}, /* i2c_mipi_pll_en - 1'b0 Disable MIPI PLL */ + {0xC1, CRL_REG_LEN_08BIT, 0x3B, 0x90}, +}; + + +static struct crl_pll_configuration adv7481_cvbs_pll_configurations[] = { + { + .input_clk = 286363636, + .op_sys_clk = 216000000, + .bitsperpixel = 16, + .pixel_rate_csi = 130000000, + .pixel_rate_pa = 130000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + { + .input_clk = 24000000, + .op_sys_clk = 130000000, + .bitsperpixel = 16, + .pixel_rate_csi = 130000000, + .pixel_rate_pa = 130000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, +}; + +static struct crl_subdev_rect_rep adv7481_cvbs_ntsc_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 720, + .in_rect.height = 240, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 240, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 720, + .in_rect.height = 240, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 240, + }, +}; + +static struct crl_mode_rep adv7481_cvbs_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(adv7481_cvbs_ntsc_rects), + .sd_rects = adv7481_cvbs_ntsc_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 720, + .height = 240, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = 0, + }, +}; + +static struct crl_sensor_subdev_config adv7481_cvbs_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "adv7481 cvbs binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "adv7481 cvbs pixel array", + }, +}; + +static struct crl_sensor_limits adv7481_cvbs_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 720, + .y_addr_max = 240, + .min_frame_length_lines = 160, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 6024, + .max_line_length_pixels = 32752, + .scaler_m_min = 1, + .scaler_m_max = 1, + .scaler_n_min = 1, + .scaler_n_max = 1, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 1, +}; + +static struct crl_csi_data_fmt adv7481_cvbs_crl_csi_data_fmt[] = { + { + .code = ICI_FORMAT_UYVY, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + .regs_items = 0, + .regs = NULL, + }, +}; + +static struct crl_ctrl_data adv7481_cvbs_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = ICI_EXT_SD_PARAM_ID_LINK_FREQ, + .name = "CTRL_ID_LINK_FREQ", + .type = CRL_CTRL_TYPE_MENU_INT, + .data.int_menu.def = 0, + .data.int_menu.max = ARRAY_SIZE(adv7481_cvbs_pll_configurations) - 1, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_PIXEL_RATE, + .name = "CTRL_ID_PIXEL_RATE_PA", + .type = CRL_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_PIXEL_RATE, + .name = "CTRL_ID_PIXEL_RATE_CSI", + .type = CRL_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +static struct crl_sensor_configuration adv7481_cvbs_crl_configuration = { + .sensor_init = NULL, + .sensor_cleanup = NULL, + + .onetime_init_regs_items = 0, //one time initialization is done by HDMI part + .onetime_init_regs = NULL, + + .powerup_regs_items = ARRAY_SIZE(adv7481_cvbs_powerup_regset), + .powerup_regs = adv7481_cvbs_powerup_regset, + + .poweroff_regs_items = ARRAY_SIZE(adv7481_cvbs_streamoff_regs), + .poweroff_regs = adv7481_cvbs_streamoff_regs, + + .id_reg_items = 0, + .id_regs = NULL, + + .subdev_items = ARRAY_SIZE(adv7481_cvbs_sensor_subdevs), + .subdevs = adv7481_cvbs_sensor_subdevs, + + .sensor_limits = &adv7481_cvbs_sensor_limits, + + .pll_config_items = ARRAY_SIZE(adv7481_cvbs_pll_configurations), + .pll_configs = adv7481_cvbs_pll_configurations, + + .modes_items = ARRAY_SIZE(adv7481_cvbs_modes), + .modes = adv7481_cvbs_modes, + + .streamon_regs_items = ARRAY_SIZE(adv7481_cvbs_streamon_regs), + .streamon_regs = adv7481_cvbs_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(adv7481_cvbs_streamoff_regs), + .streamoff_regs = adv7481_cvbs_streamoff_regs, + + .ctrl_items = ARRAY_SIZE(adv7481_cvbs_ctrls), + .ctrl_bank = adv7481_cvbs_ctrls, + + .csi_fmts_items = ARRAY_SIZE(adv7481_cvbs_crl_csi_data_fmt), + .csi_fmts = adv7481_cvbs_crl_csi_data_fmt, + + .addr_len = CRL_ADDR_7BIT, +}; + +#endif /* __CRLMODULE_ADV7481_CVBS_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule-lite/crl_adv7481_eval_configuration.h b/drivers/media/i2c/crlmodule-lite/crl_adv7481_eval_configuration.h new file mode 100644 index 0000000000000..575d2db42edc3 --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crl_adv7481_eval_configuration.h @@ -0,0 +1,531 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __CRLMODULE_ADV7481_EVAL_CONFIGURATION_H_ +#define __CRLMODULE_ADV7481_EVAL_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + + +struct crl_ctrl_data_pair ctrl_data_lanes[] = { + { + .ctrl_id = ICI_EXT_SD_PARAM_ID_MIPI_LANES, + .data = 4, + }, + { + .ctrl_id = ICI_EXT_SD_PARAM_ID_MIPI_LANES, + .data = 2, + }, +}; +static struct crl_pll_configuration adv7481_eval_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 16, + .pixel_rate_csi = 800000000, + .pixel_rate_pa = 800000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 24, + .pixel_rate_csi = 800000000, + .pixel_rate_pa = 800000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_1080p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_720p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_VGA_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 640, + .out_rect.height = 480, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_1080i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 540, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_480i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 240, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_576p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 576, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_576i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 288, + }, +}; +static struct crl_mode_rep adv7481_eval_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_1080p_rects), + .sd_rects = adv7481_eval_1080p_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1080, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[0], + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_720p_rects), + .sd_rects = adv7481_eval_720p_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 720, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[0], + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_VGA_rects), + .sd_rects = adv7481_eval_VGA_rects, + .binn_hor = 3, + .binn_vert = 2, + .scale_m = 1, + .width = 640, + .height = 480, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[1], + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_1080i_rects), + .sd_rects = adv7481_eval_1080i_rects, + .binn_hor = 1, + .binn_vert = 2, + .scale_m = 1, + .width = 1920, + .height = 540, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[1], + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_480i_rects), + .sd_rects = adv7481_eval_480i_rects, + .binn_hor = 2, + .binn_vert = 4, + .scale_m = 1, + .width = 720, + .height = 240, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[1], + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_576p_rects), + .sd_rects = adv7481_eval_576p_rects, + .binn_hor = 2, + .binn_vert = 1, + .scale_m = 1, + .width = 720, + .height = 576, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[1], + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_576i_rects), + .sd_rects = adv7481_eval_576i_rects, + .binn_hor = 2, + .binn_vert = 3, + .scale_m = 1, + .width = 720, + .height = 288, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[1], + .mode_regs_items = 0, + .mode_regs = NULL, + }, +}; + +static struct crl_sensor_subdev_config adv7481_eval_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "adv7481 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "adv7481 pixel array", + }, +}; + +static struct crl_sensor_subdev_config adv7481b_eval_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "adv7481b binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "adv7481b pixel array", + }, +}; + +static struct crl_sensor_limits adv7481_eval_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1920, + .y_addr_max = 1080, + .min_frame_length_lines = 160, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 6024, + .max_line_length_pixels = 32752, + .scaler_m_min = 1, + .scaler_m_max = 1, + .scaler_n_min = 1, + .scaler_n_max = 1, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 1, +}; + +static struct crl_csi_data_fmt adv7481_eval_crl_csi_data_fmt[] = { + { + .code = ICI_FORMAT_YUYV, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + .regs_items = 0, + .regs = NULL, + }, + { + .code = ICI_FORMAT_UYVY, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + .regs_items = 0, + .regs = NULL, + }, + { + .code = ICI_FORMAT_RGB565, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + .regs_items = 0, + .regs = NULL, + }, + { + .code = ICI_FORMAT_RGB888, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 24, + .regs_items = 0, + .regs = NULL, + }, +}; + +static struct crl_ctrl_data adv7481_eval_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = ICI_EXT_SD_PARAM_ID_LINK_FREQ, + .name = "CTRL_ID_LINK_FREQ", + .type = CRL_CTRL_TYPE_MENU_INT, + .data.int_menu.def = 0, + .data.int_menu.max = ARRAY_SIZE(adv7481_eval_pll_configurations) - 1, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_PIXEL_RATE, + .name = "CTRL_ID_PIXEL_RATE_PA", + .type = CRL_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_PIXEL_RATE, + .name = "CTRL_ID_PIXEL_RATE_CSI", + .type = CRL_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_MIPI_LANES, + .name = "CTRL_ID_MIPI_LANES", + .type = CRL_CTRL_TYPE_CUSTOM, + .data.std_data.min = 2, + .data.std_data.max = 4, + .data.std_data.step = 2, + .data.std_data.def = 4, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + .param.type = ICI_EXT_SD_PARAM_TYPE_INT32, + }, +}; + +static struct crl_sensor_configuration adv7481_eval_crl_configuration = { + + .powerup_regs_items = 0, + .powerup_regs = NULL, + + .poweroff_regs_items = 0, + .poweroff_regs = NULL, + + .id_reg_items = 0, + .id_regs = NULL, + + .subdev_items = 0, + .subdevs = adv7481_eval_sensor_subdevs, + + .sensor_limits = &adv7481_eval_sensor_limits, + + .pll_config_items = ARRAY_SIZE(adv7481_eval_pll_configurations), + .pll_configs = adv7481_eval_pll_configurations, + + .modes_items = ARRAY_SIZE(adv7481_eval_modes), + .modes = adv7481_eval_modes, + + .streamon_regs_items = 0, + .streamon_regs = NULL, + + .streamoff_regs_items = 0, + .streamoff_regs = NULL, + + .ctrl_items = ARRAY_SIZE(adv7481_eval_ctrls), + .ctrl_bank = adv7481_eval_ctrls, + + .csi_fmts_items = ARRAY_SIZE(adv7481_eval_crl_csi_data_fmt), + .csi_fmts = adv7481_eval_crl_csi_data_fmt, +}; + +static struct crl_sensor_configuration adv7481b_eval_crl_configuration = { + + .powerup_regs_items = 0, + .powerup_regs = NULL, + + .poweroff_regs_items = 0, + .poweroff_regs = NULL, + + .id_reg_items = 0, + .id_regs = NULL, + + .subdev_items = 0, + .subdevs = adv7481b_eval_sensor_subdevs, + + .sensor_limits = &adv7481_eval_sensor_limits, + + .pll_config_items = ARRAY_SIZE(adv7481_eval_pll_configurations), + .pll_configs = adv7481_eval_pll_configurations, + + .modes_items = ARRAY_SIZE(adv7481_eval_modes), + .modes = adv7481_eval_modes, + + .streamon_regs_items = 0, + .streamon_regs = NULL, + + .streamoff_regs_items = 0, + .streamoff_regs = NULL, + + .ctrl_items = ARRAY_SIZE(adv7481_eval_ctrls), + .ctrl_bank = adv7481_eval_ctrls, + + .csi_fmts_items = ARRAY_SIZE(adv7481_eval_crl_csi_data_fmt), + .csi_fmts = adv7481_eval_crl_csi_data_fmt, +}; + +#endif /* __CRLMODULE_ADV7481_EVAL_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule-lite/crl_adv7481_hdmi_configuration.c b/drivers/media/i2c/crlmodule-lite/crl_adv7481_hdmi_configuration.c new file mode 100644 index 0000000000000..80f668a462ea0 --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crl_adv7481_hdmi_configuration.c @@ -0,0 +1,624 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include "crlmodule.h" +#include "crlmodule-regs.h" + +#define CREATE_ATTRIBUTE(attr) \ + if (device_create_file(&client->dev, &attr) != 0) { \ + dev_err(&client->dev, "ADV7481 couldn't register %s sysfs entry.\n", \ + #attr); \ + } \ + +#define REMOVE_ATTRIBUTE(attr) \ + device_remove_file(&client->dev, &attr); + +/* Size of the mondello KSV buffer in bytes */ +#define ADV7481_KSV_BUFFER_SIZE 0x80 +/* Size of a single KSV */ +#define ADV7481_KSV_SIZE 0x05 +/* Max number of devices (MAX_MONDELO_KSV_SIZE / HDCP_KSV_SIZE */ +#define ADV7481_MAX_DEVICES 0x19 + +#define ADV7481_AKSV_UPDATE_A_ST 0x08 +#define ADV7481_CABLE_DET_A_ST 0x40 +#define ADV7481_V_LOCKED_A_ST 0x02 +#define ADV7481_DE_REGEN_A_ST 0x01 + +#define ADV7481_GPIO 456 + +/* + * Prevents executing another hot plug reset until current one will finish + */ +static unsigned int in_hot_plug_reset = 0; + +/* + * When hot plug reset is executed, HPA bit is deasserted for 2 seconds. + * This timer is used to assert HPA bit again after that time without blocking. + */ +static struct timer_list hot_plug_reset_timer; + +static struct workqueue_struct *irq_workqueue = NULL; +static int hdmi_res_width; +static int hdmi_res_height; +static int hdmi_res_interlaced; + +static DEFINE_MUTEX(hot_plug_reset_lock); + +typedef struct { + struct work_struct work; + struct i2c_client *client; +} irq_task_t; + +/* ADV7481 HDCP B-status register */ +struct adv7481_bstatus { + union { + __u8 bstatus[2]; + struct { + __u8 device_count:7; + __u8 max_devs_exceeded:1; + __u8 depth:3; + __u8 max_cascade_exceeded:1; + __u8 hdmi_mode:1; + __u8 hdmi_reserved_2:1; + __u8 rsvd:2; + }; + }; +}; + +struct adv7481_dev_info { + struct adv7481_bstatus bstatus; + __u8 ksv[ADV7481_KSV_BUFFER_SIZE]; +}; + +struct adv7481_bcaps { + union { + __u8 bcaps; + struct { + __u8 fast_reauth:1; + __u8 features:1; + __u8 reserved:2; + __u8 fast:1; + __u8 ksv_fifo_ready:1; + __u8 repeater:1; + __u8 hdmi_reserved:1; + }; + }; +}; + +static int adv_i2c_write(struct i2c_client *client, u16 i2c_addr, u16 reg, u8 val) +{ + struct ici_ext_subdev *subdev = + i2c_get_clientdata(client); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + + return crlmodule_write_reg(sensor, i2c_addr, reg, 1, 0xFF, val); +} + +static int adv_i2c_read(struct i2c_client *client, u16 i2c_addr, u16 reg, u32 *val) +{ + struct ici_ext_subdev *subdev = + i2c_get_clientdata(client); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct crl_register_read_rep read_reg; + + read_reg.address = reg; + read_reg.len = CRL_REG_LEN_08BIT; + read_reg.dev_i2c_addr = i2c_addr; + return crlmodule_read_reg(sensor, read_reg, val); +} + +/* + * Writes the HDCP BKSV list & status when the system acts + * as an HDCP 1.4 repeater + */ +static long adv_write_bksv(struct i2c_client *client, + struct adv7481_dev_info *dev_info) +{ + unsigned int k = 0; + int ret = 0; + u32 reg; + struct ici_ext_subdev *subdev = + i2c_get_clientdata(client); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + + dev_dbg(&client->dev, "%s: Writing ADV7481 BKSV list.\n", __func__); + + /* Clear BCAPS KSV list ready */ + ret = adv_i2c_write(client, 0x64, 0x78, 0x01); + if (ret) { + dev_err(&client->dev, "%s: Error clearing BCAPS KSV list ready!\n", __func__); + return ret; + } + + /* KSV_LIST_READY_PORT_A KSV list not ready */ + ret = adv_i2c_write(client, 0x64, 0x69, 0x00); + if (ret) { + dev_err(&client->dev, "%s: Error clearing KSV_LIST_READY_PORT_A register!\n", __func__); + return ret; + } + + /* Write the BSKV list, one device at a time */ + /* Writing the entire list in one call exceeds frame size */ + for (k = 0; k < ADV7481_MAX_DEVICES; ++k) { + unsigned int j = k * ADV7481_KSV_SIZE; + struct crl_register_write_rep adv_ksv_cmd[] = { + {0x80 + j, CRL_REG_LEN_08BIT, dev_info->ksv[j + 0], 0x64}, + {0x81 + j, CRL_REG_LEN_08BIT, dev_info->ksv[j + 1], 0x64}, + {0x82 + j, CRL_REG_LEN_08BIT, dev_info->ksv[j + 2], 0x64}, + {0x83 + j, CRL_REG_LEN_08BIT, dev_info->ksv[j + 3], 0x64}, + {0x84 + j, CRL_REG_LEN_08BIT, dev_info->ksv[j + 4], 0x64}, + }; + ret = crlmodule_write_regs(sensor, adv_ksv_cmd, ARRAY_SIZE(adv_ksv_cmd)); + + if (ret) { + dev_err(&client->dev, "%s: Error while writing BKSV list!\n", __func__); + return ret; + } + } + + /* Finally update the bstatus registers */ + ret = adv_i2c_read(client, 0x64, 0x42, ®); + + if (ret) { + dev_err(&client->dev, "%s: Error reading bstatus register!\n", __func__); + return ret; + } + + /* ADV recommendation: only update bits [0:11] */ + /* Take the lower nibble (bits [11:8]) of the input bstatus */ + /* Take the upper nibble (bits [15:12]) of the current register */ + dev_info->bstatus.bstatus[1] = + (dev_info->bstatus.bstatus[1] & 0x0F) | (reg & 0xF0); + { + struct crl_register_write_rep adv_cmd[] = { + {0x41, CRL_REG_LEN_08BIT, dev_info->bstatus.bstatus[0], 0x64}, + {0x42, CRL_REG_LEN_08BIT, dev_info->bstatus.bstatus[1], 0x64}, + {0x69, CRL_REG_LEN_08BIT, 0x01, 0x64}, /* KSV_LIST_READY_PORT_A */ + }; + + ret = crlmodule_write_regs(sensor, adv_cmd, ARRAY_SIZE(adv_cmd)); + } + + return ret; +} + +static ssize_t adv_bcaps_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + u32 val; + int ret; + struct i2c_client* client = container_of(dev, struct i2c_client, dev); + + ret = adv_i2c_read(client, 0x64, 0x40, &val); + + if (ret != 0) { + return -EIO; + } + + val = val & 0xFF; + *buf = val; + return 1; +} + +/* Declares bcaps attribute that will be exposed to user space via sysfs */ +static DEVICE_ATTR(bcaps, S_IRUGO, adv_bcaps_show, NULL); + +/* + * Writes provided BKSV value from user space to chip. + * BKSV should be formatted as adv7481_dev_info struct, + * it does basic validation and checks if provided buffer size matches size of adv7481_dev_info struct. + * In case of error return EIO. + */ +static ssize_t adv_bksv_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + int ret; + struct adv7481_dev_info dev_info; + struct i2c_client *client = container_of(dev, struct i2c_client, dev); + + dev_dbg(&client->dev, "%s\n", __func__); + if (count != sizeof (struct adv7481_dev_info)) { + return -EIO; + } + + dev_info = *((struct adv7481_dev_info*) buf); + + ret = adv_write_bksv(client, &dev_info); + + if (ret != 0) { + return -EIO; + } + + return count; +} + +/* Declares bksv attribute that will be exposed to user space via sysfs */ +static DEVICE_ATTR(bksv, S_IWUSR | S_IWGRP, NULL, adv_bksv_store); + +/* + * Enables HPA_MAN_VALUE_PORT_A to enable hot plug detection. + */ +static void adv_hpa_assert(struct work_struct *work) +{ + irq_task_t *task = (irq_task_t*) work; + struct i2c_client *client = task->client; + + adv_i2c_write(client, 0x68, 0xF8, 0x01); + in_hot_plug_reset = 0; + kfree(work); +} + +/* + * Handles hpa timer interrupt, defers enalbing of HPA to adv_hpa_assert + */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) +static void adv_hpa_reset_callback(unsigned long data) +{ + irq_task_t *task = NULL; + + task = (irq_task_t*) kmalloc(sizeof(irq_task_t), GFP_ATOMIC); + if (task) { + INIT_WORK( (struct work_struct*) task, adv_hpa_assert); + task->client = (struct i2c_client*) data; + queue_work(irq_workqueue, (struct work_struct*)task); + } +} +#else +static void adv_hpa_reset_callback(struct timer_list *t) +{ + irq_task_t *task = NULL; + + task = (irq_task_t*) kmalloc(sizeof(irq_task_t), GFP_ATOMIC); + if (task) { + INIT_WORK( (struct work_struct*) task, adv_hpa_assert); + queue_work(irq_workqueue, (struct work_struct*)task); + } +} +#endif + +/* + * Reauthenticates HDCP by disabling hot plug detection for 2 seconds. + * It can be triggered by user space by writing any value to "reauthenticate" attribute. + * After that time connected source will automatically ask for HDCP authentication once again. + * To prevent sleep, timer is used to delay enabling of hot plug by 2 seconds. + * In case that previous reauthentication is not completed, returns EBUSY. + * In case of error returns EIO. + */ +static ssize_t adv_reauthenticate_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + int ret; + struct i2c_client *client = container_of(dev, struct i2c_client, dev); + + dev_dbg(&client->dev, "%s\n", __func__); + + mutex_lock(&hot_plug_reset_lock); + + if (in_hot_plug_reset) { + mutex_unlock(&hot_plug_reset_lock); + return -EBUSY; + } + + /* Clear BCAPS KSV list ready */ + ret = adv_i2c_write(client, 0x64, 0x78, 0x01); + if (ret != 0) { + dev_err(&client->dev, "%s: Error clearing BCAPS KSV list ready!\n", __func__); + mutex_unlock(&hot_plug_reset_lock); + return -EIO; + } + + /* KSV_LIST_READY_PORT_A KSV list not ready */ + ret = adv_i2c_write(client, 0x64, 0x69, 0x00); + if (ret != 0) { + dev_err(&client->dev, "%s: Error clearing KSV_LIST_READY_PORT_A register!\n", __func__); + mutex_unlock(&hot_plug_reset_lock); + return -EIO; + } + + ret = adv_i2c_write(client, 0x68, 0xF8, 0x00); + + if (ret != 0) { + mutex_unlock(&hot_plug_reset_lock); + return -EIO; + } + + in_hot_plug_reset = 1; + mod_timer(&hot_plug_reset_timer, jiffies + msecs_to_jiffies(2000)); + + mutex_unlock(&hot_plug_reset_lock); + return count; +} + +/* Declares reauthenticate attribute that will be exposed to user space via sysfs */ +static DEVICE_ATTR(reauthenticate, S_IWUSR | S_IWGRP, NULL, adv_reauthenticate_store); + +/* Dummy show to prevent WARN when registering aksv attribute */ +static ssize_t adv_aksv_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + (void) dev; + (void) attr; + (void) buf; + + return -EIO; +} + +/* Declares aksv attribute that will be exposed to user space via sysfs, to notify about AKSV events */ +static DEVICE_ATTR(aksv, S_IRUGO, adv_aksv_show, NULL); + + +static ssize_t adv_hdmi_cable_connected_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + char interlaced = 'p'; + if (hdmi_res_interlaced) { + interlaced = 'i'; + } + + return snprintf(buf, 20, "%dx%d%c", hdmi_res_width, hdmi_res_height, interlaced); +} +static DEVICE_ATTR(hdmi_cable_connected, S_IRUGO, adv_hdmi_cable_connected_show, NULL); + +static ssize_t adv_bstatus_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + u32 b0, b1; + int ret; + struct i2c_client* client = container_of(dev, struct i2c_client, dev); + dev_dbg(&client->dev, "Getting bstatus\n"); + ret = adv_i2c_read(client, 0x64, 0x41, &b0); + if (ret != 0) { + dev_err(&client->dev, "Error getting bstatus(0)\n"); + return -EIO; + } + dev_dbg(&client->dev, "btatus(0): 0x%x\n", b0 & 0xff); + ret = adv_i2c_read(client, 0x64, 0x42, &b1); + if (ret != 0) { + dev_err(&client->dev, "Error getting bstatus(1)\n"); + return -EIO; + } + dev_dbg(&client->dev, "bstatus(1): 0x%x\n", b1 & 0xff); + *buf = b0 & 0xff; + buf++; + *buf = b1 & 0xff; + return 2; +} +static DEVICE_ATTR(bstatus, S_IRUGO, adv_bstatus_show, NULL); + +// irq GPIO ping unavailable on ACRN UOS +#if (!IS_ENABLED(CONFIG_VIDEO_INTEL_UOS)) +static void adv_isr_bh(struct work_struct *work) +{ + irq_task_t *task = (irq_task_t*) work; + struct i2c_client *client = task->client; + + u32 interrupt_st; + u32 raw_value; + u32 temp[3]; + int ret = 0; + + struct crl_register_read_rep reg; + reg.address = 0x90; + reg.len = CRL_REG_LEN_08BIT; + reg.mask = 0xFF; + reg.dev_i2c_addr = 0xE0; + + dev_dbg(&client->dev, "%s\n", __func__); + + /* AKSV_UPDATE_A_ST: check interrupt status */ + ret = adv_i2c_read(client, 0xE0, 0x90, &interrupt_st); + + if (interrupt_st & 0x08 /*ADV7481_AKSV_UPDATE_A_ST*/) { + dev_dbg(&client->dev, "%s: ADV7481 ISR: AKSV_UPDATE_A_ST: 0x%x\n", + __func__, interrupt_st); + + /* Notify user space about AKSV event */ + sysfs_notify(&client->dev.kobj, NULL, "aksv"); + + /* Clear interrupt bit */ + ret = adv_i2c_write(client, 0xE0, 0x91, 0x08); + } + + /* Check interrupt status for: CABLE_DET_A_ST, V_LOCKED_A_ST and DE_REGEN_LCK_A_ST */ + ret = adv_i2c_read(client, 0xE0, 0x72, &interrupt_st); + + /* If any of CABLE_DET_A_ST, V_LOCKED_A_ST and DE_REGEN_LCK_A_ST interrupts was set, + * get updated values of CABLE_DET_RAW, V_LOCKED_RAW and DE_REGEN_LCK_RAW + */ + if (interrupt_st) { + ret = adv_i2c_read(client, 0xE0, 0x71, &raw_value); + } + + /* Check CABLE_DET_A_ST interrupt */ + if ((interrupt_st & ADV7481_CABLE_DET_A_ST)) { + /* Clear interrupt bit */ + ret = adv_i2c_write(client, 0xE0, 0x73, 0x40); + + /* HDMI cable is connected */ + if (raw_value & ADV7481_CABLE_DET_A_ST) { + dev_dbg(&client->dev, "%s: ADV7481 ISR: HDMI cable connected\n", __func__); + ret = adv_i2c_write(client, 0xE0, 0x10, 0xA1); + } + else { + dev_dbg(&client->dev, "%s: ADV7481 ISR: HDMI cable disconnected\n", __func__); + } + } + + /* Check V_LOCKED_A_ST interrupt */ + if((interrupt_st & ADV7481_V_LOCKED_A_ST)) { + /* Clear interrupt bit */ + ret = adv_i2c_write(client, 0xE0, 0x73, 0x02); + /* Vertical sync filter has been locked, resolution height can be read */ + if (raw_value & ADV7481_V_LOCKED_A_ST) { + dev_dbg(&client->dev, "%s: ADV7481 ISR: Vertical Sync Filter Locked\n", __func__); + reg.dev_i2c_addr = 0x68; //HDMI_RX_MAP; + reg.address = 0x09; + adv_i2c_read(client, 0x68, 0x09, &temp[0]); + adv_i2c_read(client, 0x68, 0x0A, &temp[1]); + adv_i2c_read(client, 0x68, 0x0B, &temp[2]); + + temp[0] = temp[0] & 0x1F; + hdmi_res_height = (temp[0]<<8) + temp[1]; + if (temp[2] & 0x20) { + hdmi_res_height = hdmi_res_height << 1; + hdmi_res_interlaced = 1; + } + else { + hdmi_res_interlaced = 0; + } + + /* If resolution width was already read, notify user space about new resolution */ + if (hdmi_res_width) { + sysfs_notify(&client->dev.kobj, NULL, "hdmi_cable_connected"); + } + } + else { + dev_dbg(&client->dev, "%s: ADV7481 ISR: Vertical Sync Filter Lost\n", __func__); + hdmi_res_height = 0; + /* Notify user space about losing resolution */ + if (!hdmi_res_width) { + sysfs_notify(&client->dev.kobj, NULL, "hdmi_cable_connected"); + } + } + } + + /* Check DE_REGEN_A_ST interrupt */ + if((interrupt_st & ADV7481_DE_REGEN_A_ST)) { + /* Clear interrupt bit */ + ret = adv_i2c_write(client, 0xE0, 0x73, 0x01); + + /* DE regeneration has been locked, resolution height can be read */ + if (raw_value & ADV7481_DE_REGEN_A_ST) { + dev_dbg(&client->dev, "%s: ADV7481 ISR: DE Regeneration Locked\n", __func__); + reg.dev_i2c_addr = 0x68; //HDMI_RX_MAP; + reg.address = 0x07; + adv_i2c_read(client, 0x68, 0x07, &temp[0]); + adv_i2c_read(client, 0x68, 0x08, &temp[1]); + + temp[0] = temp[0] & 0x1F; + hdmi_res_width = (temp[0]<<8) + temp[1]; + + /* If resolution height was already read back, notify user space about new resolution */ + if (hdmi_res_height) { + sysfs_notify(&client->dev.kobj, NULL, "hdmi_cable_connected"); + } + } + else { + dev_dbg(&client->dev, "%s: ADV7481 ISR: DE Regeneration Lost\n", __func__); + hdmi_res_width = 0; + /* Notfiy user space about losing resolution */ + if (!hdmi_res_height) { + sysfs_notify(&client->dev.kobj, NULL, "hdmi_cable_connected"); + } + } + } +} + +static irq_handler_t adv7481_irq_handler(unsigned int irq, void *dev_id, + struct pt_regs *regs) +{ + irq_task_t *task = NULL; + struct i2c_client *client = (struct i2c_client*)dev_id; + + dev_dbg(&client->dev, "%s: Interrupt in ADV7481\n", __func__); + + task = (irq_task_t*) kmalloc(sizeof(irq_task_t), GFP_ATOMIC); + if (task) { + INIT_WORK( (struct work_struct*) task, adv_isr_bh); + task->client = client; + queue_work(irq_workqueue, (struct work_struct*)task); + } + + return (irq_handler_t)IRQ_HANDLED; +} + +static int unregister_gpio_irq(void) +{ + gpio_free(ADV7481_GPIO); + return 0; +} + +static int register_gpio_irq(struct i2c_client *client) +{ + int res = 0; + unsigned int irq; + + + if (!gpio_is_valid(ADV7481_GPIO)) { + dev_err(&client->dev, "%s: ADV7481 GPIO pin %d is invalid!\n", + __func__, ADV7481_GPIO); + return -ENODEV; + } else { + dev_dbg(&client->dev, "%s: GPIO %d is valid.\n", __func__, ADV7481_GPIO); + } + + res = gpio_request(ADV7481_GPIO, "ADV7481 Interrupt"); + if (res) { + dev_err(&client->dev, "%s: ADV7481 GPIO pin request failed!\n", __func__); + return -ENODEV; + } + + gpio_direction_input(ADV7481_GPIO); + irq = gpio_to_irq(ADV7481_GPIO); + res = request_irq(irq, + (irq_handler_t)adv7481_irq_handler, + IRQF_TRIGGER_RISING, + "adv7481_irq_handler", + client); + + dev_dbg(&client->dev, "%s: GPIO register GPIO IRQ result: %d\n", __func__, res); + + return res; +} +#endif + +int adv7481_sensor_init(struct i2c_client *client) +{ + dev_dbg(&client->dev, "%s ADV7481_sensor_init\n", __func__); + irq_workqueue = create_workqueue("adv7481_irq_workqueue"); +// irq GPIO ping unavailable on ACRN UOS +#if (!IS_ENABLED(CONFIG_VIDEO_INTEL_UOS)) + register_gpio_irq(client); +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) + setup_timer(&hot_plug_reset_timer, adv_hpa_reset_callback, (unsigned long) client); +#else + timer_setup(&hot_plug_reset_timer, adv_hpa_reset_callback, 0); +#endif + CREATE_ATTRIBUTE(dev_attr_hdmi_cable_connected); + CREATE_ATTRIBUTE(dev_attr_bcaps); + CREATE_ATTRIBUTE(dev_attr_aksv); + CREATE_ATTRIBUTE(dev_attr_bksv); + CREATE_ATTRIBUTE(dev_attr_reauthenticate); + CREATE_ATTRIBUTE(dev_attr_bstatus); + + return 0; +} + +int adv7481_sensor_cleanup(struct i2c_client *client) +{ + dev_dbg(&client->dev, "%s: ADV7481_sensor_cleanup\n", __func__); + if (irq_workqueue != NULL) { + free_irq(gpio_to_irq(ADV7481_GPIO), client); +// irq GPIO ping unavailable on ACRN UOS +#if (!IS_ENABLED(CONFIG_VIDEO_INTEL_UOS)) + unregister_gpio_irq(); +#endif + del_timer(&hot_plug_reset_timer); + flush_workqueue(irq_workqueue); + destroy_workqueue(irq_workqueue); + irq_workqueue = NULL; + } + REMOVE_ATTRIBUTE(dev_attr_bstatus); + REMOVE_ATTRIBUTE(dev_attr_reauthenticate); + REMOVE_ATTRIBUTE(dev_attr_bksv); + REMOVE_ATTRIBUTE(dev_attr_aksv); + REMOVE_ATTRIBUTE(dev_attr_bcaps); + REMOVE_ATTRIBUTE(dev_attr_hdmi_cable_connected); + return 0; +} diff --git a/drivers/media/i2c/crlmodule-lite/crl_adv7481_hdmi_configuration.h b/drivers/media/i2c/crlmodule-lite/crl_adv7481_hdmi_configuration.h new file mode 100644 index 0000000000000..489a3eb978785 --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crl_adv7481_hdmi_configuration.h @@ -0,0 +1,942 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __CRLMODULE_ADV7481_HDMI_CONFIGURATION_H_ +#define __CRLMODULE_ADV7481_HDMI_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +static struct crl_register_write_rep adv7481_hdmi_onetime_init_regset[] = { + {0xFF, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, + {0x00, CRL_REG_LEN_DELAY, 0x05, 0x00}, + {0x01, CRL_REG_LEN_08BIT, 0x76, 0xE0}, /* ADI Required Write */ + {0x05, CRL_REG_LEN_08BIT, 0x96, 0xE0}, /* Setting Vid_Std to + 1600x1200(UXGA)@60 */ + {0xF2, CRL_REG_LEN_08BIT, 0x01, 0xE0}, /* Enable I2C Read + Auto-Increment */ + {0xF3, CRL_REG_LEN_08BIT, 0x4C, 0xE0}, /* DPLL Map Address + Set to 0x4C */ + {0xF4, CRL_REG_LEN_08BIT, 0x44, 0xE0}, /* CP Map Address + Set to 0x44 */ + {0xF5, CRL_REG_LEN_08BIT, 0x68, 0xE0}, /* HDMI RX Map Address + Set to 0x68 */ + {0xF6, CRL_REG_LEN_08BIT, 0x6C, 0xE0}, /* EDID Map Address + Set to 0x6C */ + {0xF7, CRL_REG_LEN_08BIT, 0x64, 0xE0}, /* HDMI RX Repeater Map Address + Set to 0x64 */ + {0xF8, CRL_REG_LEN_08BIT, 0x62, 0xE0}, /* HDMI RX Infoframe Map Address + Set to 0x62 */ + {0xF9, CRL_REG_LEN_08BIT, 0xF0, 0xE0}, /* CBUS Map Address + Set to 0xF0 */ + {0xFA, CRL_REG_LEN_08BIT, 0x82, 0xE0}, /* CEC Map Address + Set to 0x82 */ + {0xFB, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* SDP Main Map Address + Set to 0xF2 */ + {0xFC, CRL_REG_LEN_08BIT, 0x90, 0xE0}, /* CSI-TXB Map Address + Set to 0x90 */ + {0xFD, CRL_REG_LEN_08BIT, 0x94, 0xE0}, /* CSI-TXA Map Address + Set to 0x94 */ + {0x00, CRL_REG_LEN_08BIT, 0x40, 0xE0}, /* Disable chip powerdown & + Enable HDMI Rx block */ + + {0x40, CRL_REG_LEN_08BIT, 0xC3, 0x64}, /* Enable HDCP 1.1 Repeater */ + {0x69, CRL_REG_LEN_08BIT, 0x00, 0x64}, /* KSV List not ready port A */ + {0x77, CRL_REG_LEN_08BIT, 0x08, 0x64}, /* Clear KSV List */ + {0x78, CRL_REG_LEN_08BIT, 0x01, 0x64}, /* KSV_LIST_READY_CLR_A: + Clears the BCAPS ready bit */ + {0x68, CRL_REG_LEN_08BIT, 0x00, 0x64}, /* Disable dual ksv list + for port A */ + {0x41, CRL_REG_LEN_08BIT, 0x00, 0x64}, /* Reset b-status (1) */ + {0x42, CRL_REG_LEN_08BIT, 0x00, 0x64}, /* Reset b-status (2) */ + {0x91, CRL_REG_LEN_08BIT, 0x08, 0xE0}, /* AKSV Update Clear */ + + {0x00, CRL_REG_LEN_08BIT, 0x08, 0x68}, /* Foreground Channel = A */ + {0x98, CRL_REG_LEN_08BIT, 0xFF, 0x68}, /* ADI Required Write */ + {0x99, CRL_REG_LEN_08BIT, 0xA3, 0x68}, /* ADI Required Write */ + {0x9A, CRL_REG_LEN_08BIT, 0x00, 0x68}, /* ADI Required Write */ + {0x9B, CRL_REG_LEN_08BIT, 0x0A, 0x68}, /* ADI Required Write */ + {0x9D, CRL_REG_LEN_08BIT, 0x40, 0x68}, /* ADI Required Write */ + {0xCB, CRL_REG_LEN_08BIT, 0x09, 0x68}, /* ADI Required Write */ + {0x3D, CRL_REG_LEN_08BIT, 0x10, 0x68}, /* ADI Required Write */ + {0x3E, CRL_REG_LEN_08BIT, 0x7B, 0x68}, /* ADI Required Write */ + {0x3F, CRL_REG_LEN_08BIT, 0x5E, 0x68}, /* ADI Required Write */ + {0x4E, CRL_REG_LEN_08BIT, 0xFE, 0x68}, /* ADI Required Write */ + {0x4F, CRL_REG_LEN_08BIT, 0x18, 0x68}, /* ADI Required Write */ + {0x57, CRL_REG_LEN_08BIT, 0xA3, 0x68}, /* ADI Required Write */ + {0x58, CRL_REG_LEN_08BIT, 0x04, 0x68}, /* ADI Required Write */ + {0x85, CRL_REG_LEN_08BIT, 0x10, 0x68}, /* ADI Required Write */ + {0x83, CRL_REG_LEN_08BIT, 0x00, 0x68}, /* Enable All Terminatio ns */ + {0xA3, CRL_REG_LEN_08BIT, 0x01, 0x68}, /* ADI Required Write */ + {0xBE, CRL_REG_LEN_08BIT, 0x00, 0x68}, /* ADI Required Write */ + {0x6C, CRL_REG_LEN_08BIT, 0x01, 0x68}, /* HPA Manual Enable */ + {0xF8, CRL_REG_LEN_08BIT, 0x01, 0x68}, /* HPA Asserted */ + {0x0F, CRL_REG_LEN_08BIT, 0x00, 0x68}, /* Audio Mute Speed + Set to Fastest (Smallest Step Size) */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS + TRISTATED */ + + {0x74, CRL_REG_LEN_08BIT, 0x43, 0xE0}, /* Enable interrupts */ + {0x75, CRL_REG_LEN_08BIT, 0x43, 0xE0}, + + {0x70, CRL_REG_LEN_08BIT, 0xA0, 0x64}, /* Write primary edid size */ + {0x74, CRL_REG_LEN_08BIT, 0x01, 0x64}, /* Enable manual edid */ + {0x7A, CRL_REG_LEN_08BIT, 0x00, 0x64}, /* Write edid sram select */ + {0xF6, CRL_REG_LEN_08BIT, 0x6C, 0xE0}, /* Write edid map bus address */ + + {0x00*4, CRL_REG_LEN_32BIT, 0x00FFFFFF, 0x6C}, /* EDID programming */ + {0x01*4, CRL_REG_LEN_32BIT, 0xFFFFFF00, 0x6C}, /* EDID programming */ + {0x02*4, CRL_REG_LEN_32BIT, 0x4DD90100, 0x6C}, /* EDID programming */ + {0x03*4, CRL_REG_LEN_32BIT, 0x00000000, 0x6C}, /* EDID programming */ + {0x04*4, CRL_REG_LEN_32BIT, 0x00110103, 0x6C}, /* EDID programming */ + {0x05*4, CRL_REG_LEN_32BIT, 0x80000078, 0x6C}, /* EDID programming */ + {0x06*4, CRL_REG_LEN_32BIT, 0x0A0DC9A0, 0x6C}, /* EDID programming */ + {0x07*4, CRL_REG_LEN_32BIT, 0x57479827, 0x6C}, /* EDID programming */ + {0x08*4, CRL_REG_LEN_32BIT, 0x12484C00, 0x6C}, /* EDID programming */ + {0x09*4, CRL_REG_LEN_32BIT, 0x00000101, 0x6C}, /* EDID programming */ + {0x0A*4, CRL_REG_LEN_32BIT, 0x01010101, 0x6C}, /* EDID programming */ + {0x0B*4, CRL_REG_LEN_32BIT, 0x01010101, 0x6C}, /* EDID programming */ + {0x0C*4, CRL_REG_LEN_32BIT, 0x01010101, 0x6C}, /* EDID programming */ + {0x0D*4, CRL_REG_LEN_32BIT, 0x0101011D, 0x6C}, /* EDID programming */ + {0x0E*4, CRL_REG_LEN_32BIT, 0x80D0721C, 0x6C}, /* EDID programming */ + {0x0F*4, CRL_REG_LEN_32BIT, 0x1620102C, 0x6C}, /* EDID programming */ + {0x10*4, CRL_REG_LEN_32BIT, 0x2580C48E, 0x6C}, /* EDID programming */ + {0x11*4, CRL_REG_LEN_32BIT, 0x2100009E, 0x6C}, /* EDID programming */ + {0x12*4, CRL_REG_LEN_32BIT, 0x011D8018, 0x6C}, /* EDID programming */ + {0x13*4, CRL_REG_LEN_32BIT, 0x711C1620, 0x6C}, /* EDID programming */ + {0x14*4, CRL_REG_LEN_32BIT, 0x582C2500, 0x6C}, /* EDID programming */ + {0x15*4, CRL_REG_LEN_32BIT, 0xC48E2100, 0x6C}, /* EDID programming */ + {0x16*4, CRL_REG_LEN_32BIT, 0x009E0000, 0x6C}, /* EDID programming */ + {0x17*4, CRL_REG_LEN_32BIT, 0x00FC0048, 0x6C}, /* EDID programming */ + {0x18*4, CRL_REG_LEN_32BIT, 0x444D4920, 0x6C}, /* EDID programming */ + {0x19*4, CRL_REG_LEN_32BIT, 0x4C4C430A, 0x6C}, /* EDID programming */ + {0x1A*4, CRL_REG_LEN_32BIT, 0x20202020, 0x6C}, /* EDID programming */ + {0x1B*4, CRL_REG_LEN_32BIT, 0x000000FD, 0x6C}, /* EDID programming */ + {0x1C*4, CRL_REG_LEN_32BIT, 0x003B3D0F, 0x6C}, /* EDID programming */ + {0x1D*4, CRL_REG_LEN_32BIT, 0x2D08000A, 0x6C}, /* EDID programming */ + {0x1E*4, CRL_REG_LEN_32BIT, 0x20202020, 0x6C}, /* EDID programming */ + {0x1F*4, CRL_REG_LEN_32BIT, 0x202001C1, 0x6C}, /* EDID programming */ + {0x20*4, CRL_REG_LEN_32BIT, 0x02031E77, 0x6C}, /* EDID programming */ + {0x21*4, CRL_REG_LEN_32BIT, 0x4F941305, 0x6C}, /* EDID programming */ + {0x22*4, CRL_REG_LEN_32BIT, 0x03040201, 0x6C}, /* EDID programming */ + {0x23*4, CRL_REG_LEN_32BIT, 0x16150706, 0x6C}, /* EDID programming */ + {0x24*4, CRL_REG_LEN_32BIT, 0x1110121F, 0x6C}, /* EDID programming */ + {0x25*4, CRL_REG_LEN_32BIT, 0x23090701, 0x6C}, /* EDID programming */ + {0x26*4, CRL_REG_LEN_32BIT, 0x65030C00, 0x6C}, /* EDID programming */ + {0x27*4, CRL_REG_LEN_32BIT, 0x10008C0A, 0x6C}, /* EDID programming */ + {0x28*4, CRL_REG_LEN_32BIT, 0xD0902040, 0x6C}, /* EDID programming */ + {0x29*4, CRL_REG_LEN_32BIT, 0x31200C40, 0x6C}, /* EDID programming */ + {0x2A*4, CRL_REG_LEN_32BIT, 0x5500138E, 0x6C}, /* EDID programming */ + {0x2B*4, CRL_REG_LEN_32BIT, 0x21000018, 0x6C}, /* EDID programming */ + {0x2C*4, CRL_REG_LEN_32BIT, 0x011D00BC, 0x6C}, /* EDID programming */ + {0x2D*4, CRL_REG_LEN_32BIT, 0x52D01E20, 0x6C}, /* EDID programming */ + {0x2E*4, CRL_REG_LEN_32BIT, 0xB8285540, 0x6C}, /* EDID programming */ + {0x2F*4, CRL_REG_LEN_32BIT, 0xC48E2100, 0x6C}, /* EDID programming */ + {0x30*4, CRL_REG_LEN_32BIT, 0x001E8C0A, 0x6C}, /* EDID programming */ + {0x31*4, CRL_REG_LEN_32BIT, 0xD08A20E0, 0x6C}, /* EDID programming */ + {0x32*4, CRL_REG_LEN_32BIT, 0x2D10103E, 0x6C}, /* EDID programming */ + {0x33*4, CRL_REG_LEN_32BIT, 0x9600C48E, 0x6C}, /* EDID programming */ + {0x34*4, CRL_REG_LEN_32BIT, 0x21000018, 0x6C}, /* EDID programming */ + {0x35*4, CRL_REG_LEN_32BIT, 0x011D0072, 0x6C}, /* EDID programming */ + {0x36*4, CRL_REG_LEN_32BIT, 0x51D01E20, 0x6C}, /* EDID programming */ + {0x37*4, CRL_REG_LEN_32BIT, 0x6E285500, 0x6C}, /* EDID programming */ + {0x38*4, CRL_REG_LEN_32BIT, 0xC48E2100, 0x6C}, /* EDID programming */ + {0x39*4, CRL_REG_LEN_32BIT, 0x001E8C0A, 0x6C}, /* EDID programming */ + {0x3A*4, CRL_REG_LEN_32BIT, 0xD08A20E0, 0x6C}, /* EDID programming */ + {0x3B*4, CRL_REG_LEN_32BIT, 0x2D10103E, 0x6C}, /* EDID programming */ + {0x3C*4, CRL_REG_LEN_32BIT, 0x9600138E, 0x6C}, /* EDID programming */ + {0x3D*4, CRL_REG_LEN_32BIT, 0x21000018, 0x6C}, /* EDID programming */ + {0x3E*4, CRL_REG_LEN_32BIT, 0x00000000, 0x6C}, /* EDID programming */ + {0x3F*4, CRL_REG_LEN_32BIT, 0x000000CB, 0x6C}, /* EDID programming */ +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_rgb565[] = { + {0x04, CRL_REG_LEN_08BIT, 0x02, 0xE0}, //RGB Out of CP + {0x12, CRL_REG_LEN_08BIT, 0xF0, 0xE0}, //CSC Depends on ip Packets - SDR 444 + {0x17, CRL_REG_LEN_08BIT, 0xB8, 0xE0}, //Configure for RGB565 & Luma & Chroma Values Can Reach 254d + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, //CP-Insert_AV_Code + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, //ADI Required Write + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, //Enable LLC_DLL & Double LLC Timing + {0x0E, CRL_REG_LEN_08BIT, 0xDD, 0xE0}, //LLC/PIX/SPI PINS TRISTATED AUD Outputs Enabled + {0x10, CRL_REG_LEN_08BIT, 0xC0, 0xE0}, //Enable 4-lane CSI Tx & Pixel Port + {0x7E, CRL_REG_LEN_08BIT, 0x98, 0x94}, //ADI Required Write +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_rgb888[] = { + {0x04, CRL_REG_LEN_08BIT, 0x02, 0xE0}, /* RGB Out of CP */ + {0x12, CRL_REG_LEN_08BIT, 0xF0, 0xE0}, /* CSC Depends on ip Packets - + SDR 444 */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* Luma & Chroma Values Can + Reach 254d */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI Required Write */ + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & + Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xDD, 0xE0}, /* LLC/PIX/SPI PINS TRISTATED + AUD Outputs Enabled */ + {0xDB, CRL_REG_LEN_08BIT, 0x10, 0x94}, /* ADI Required Write */ + {0x7E, CRL_REG_LEN_08BIT, 0x1B, 0x94}, /* ADI Required Write */ +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_uyvy[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, //YCrCb output + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, //CSC Depends on ip Packets - SDR422 set + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, //Luma & Chroma Values Can Reach 254d + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, //CP-Insert_AV_Code + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, //ADI Required Write + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, //Enable LLC_DLL & Double LLC Timing + {0x0E, CRL_REG_LEN_08BIT, 0xDD, 0xE0}, //LLC/PIX/SPI PINS TRISTATED AUD Outputs Enabled + {0x10, CRL_REG_LEN_08BIT, 0xC0, 0xE0}, //Enable 4-lane CSI Tx & Pixel Port + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, //Enable 4-lane MIPI + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, //Set Auto DPHY Timing + {0xDB, CRL_REG_LEN_08BIT, 0x10, 0x94}, //ADI Required Write + {0x7E, CRL_REG_LEN_08BIT, 0x00, 0x94}, //ADI Required Write +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_yuyv[] = { + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* Enable Interrupt*/ + {0x04, CRL_REG_LEN_08BIT, 0x40, 0xE0}, /* YCrCb output good=0xE0*/ + /* CSC Depends on ip Packets - SDR422 set */ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, + /* Luma & Chroma Values Can Reach 254d */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI Required Write */ + {0x3E, CRL_REG_LEN_08BIT, 0x08, 0x44}, /* Invert order of Cb and Cr*/ + /* Enable LLC_DLL & Double LLC Timing */ + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, + /* LLC/PIX/SPI PINS TRISTATED AUD Outputs Enabled */ + {0x0E, CRL_REG_LEN_08BIT, 0xDD, 0xE0}, + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0xA0, 0xE0}, + /* Enable 4-lane CSI TXB & Pixel Port */ + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* Set Auto DPHY Timing */ + {0xDB, CRL_REG_LEN_08BIT, 0x10, 0x94}, /* ADI Required Write */ + {0x7E, CRL_REG_LEN_08BIT, 0x00, 0x94}, /* ADI Required Write */ +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_1080p[] = { + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* Set Auto DPHY Timing */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0xA0, 0xE0, 0xA0}, + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, + {0x1E, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, + {0x00, CRL_REG_LEN_08BIT, 0x24, 0x94}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xC9, CRL_REG_LEN_08BIT, 0x2D, 0x44}, + {0x05, CRL_REG_LEN_08BIT, 0x5E, 0xE0}, + {0x8B, CRL_REG_LEN_08BIT, 0x43, 0x44}, /* shift 44 pixel to right */ + {0x8C, CRL_REG_LEN_08BIT, 0xD4, 0x44}, + {0x8B, CRL_REG_LEN_08BIT, 0x4F, 0x44}, + {0x8D, CRL_REG_LEN_08BIT, 0xD4, 0x44}, + {0x00, CRL_REG_LEN_DELAY, 0x05, 0x00}, + {0x03, CRL_REG_LEN_08BIT, 0x00, 0xE0}, + {0x04, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0x00, 0xE0, 0xFD}, + {0x37, CRL_REG_LEN_08BIT, 0x00, 0x44}, +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_1080i[] = { + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* Set Auto DPHY Timing */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0xA0, 0xE0, 0xA0}, + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, + {0x1E, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, + {0x00, CRL_REG_LEN_08BIT, 0x24, 0x94}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xC9, CRL_REG_LEN_08BIT, 0x2D, 0x44}, + {0x05, CRL_REG_LEN_08BIT, 0x54, 0xE0}, + {0x8B, CRL_REG_LEN_08BIT, 0x43, 0x44}, /* shift 44 pixel to right */ + {0x8C, CRL_REG_LEN_08BIT, 0xD4, 0x44}, + {0x8B, CRL_REG_LEN_08BIT, 0x4F, 0x44}, + {0x8D, CRL_REG_LEN_08BIT, 0xD4, 0x44}, + {0x00, CRL_REG_LEN_DELAY, 0x05, 0x00}, + {0x03, CRL_REG_LEN_08BIT, 0x00, 0xE0}, + {0x04, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0x00, 0xE0, 0xFD}, + {0x37, CRL_REG_LEN_08BIT, 0x00, 0x44}, +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_480p[] = { + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* Set Auto DPHY Timing */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0xA0, 0xE0, 0xA0}, + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, + {0x1E, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, + {0x00, CRL_REG_LEN_08BIT, 0x24, 0x94}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xC9, CRL_REG_LEN_08BIT, 0x2D, 0x44}, + {0x05, CRL_REG_LEN_08BIT, 0x4A, 0xE0}, + {0x00, CRL_REG_LEN_DELAY, 0x05, 0x00}, + {0x03, CRL_REG_LEN_08BIT, 0x00, 0xE0}, + {0x04, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0x00, 0xE0, 0xFD}, + {0x37, CRL_REG_LEN_08BIT, 0x00, 0x44}, +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_720p[] = { + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* Set Auto DPHY Timing */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0xA0, 0xE0, 0xA0}, + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, + {0x1E, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, + {0x00, CRL_REG_LEN_08BIT, 0x24, 0x94}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xC9, CRL_REG_LEN_08BIT, 0x2D, 0x44}, + {0x05, CRL_REG_LEN_08BIT, 0x53, 0xE0}, + {0x8B, CRL_REG_LEN_08BIT, 0x43, 0x44}, /* shift 40 pixel to right */ + {0x8C, CRL_REG_LEN_08BIT, 0xD8, 0x44}, + {0x8B, CRL_REG_LEN_08BIT, 0x4F, 0x44}, + {0x8D, CRL_REG_LEN_08BIT, 0xD8, 0x44}, + {0x00, CRL_REG_LEN_DELAY, 0x05, 0x00}, + {0x03, CRL_REG_LEN_08BIT, 0x00, 0xE0}, + {0x04, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0x00, 0xE0, 0xFD}, + {0x37, CRL_REG_LEN_08BIT, 0x00, 0x44}, +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_576p[] = { + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* Set Auto DPHY Timing */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0xA0, 0xE0, 0xA0}, + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, + {0x1E, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, + {0x00, CRL_REG_LEN_08BIT, 0x24, 0x94}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xC9, CRL_REG_LEN_08BIT, 0x2D, 0x44}, + {0x05, CRL_REG_LEN_08BIT, 0x4B, 0xE0}, + {0x00, CRL_REG_LEN_DELAY, 0x05, 0x00}, + {0x03, CRL_REG_LEN_08BIT, 0x00, 0xE0}, + {0x04, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0x00, 0xE0, 0xFD}, + {0x37, CRL_REG_LEN_08BIT, 0x00, 0x44}, +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_576i[] = { + {0x00, CRL_REG_LEN_08BIT, 0x81, 0x94}, /* Enable 1-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA1, 0x94}, /* Set Auto DPHY Timing */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0xA0, 0xE0, 0xA0}, + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, + {0x1E, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, + {0x00, CRL_REG_LEN_08BIT, 0x21, 0x94}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xC9, CRL_REG_LEN_08BIT, 0x2D, 0x44}, + {0x05, CRL_REG_LEN_08BIT, 0x41, 0xE0}, + {0x00, CRL_REG_LEN_DELAY, 0x05, 0x00}, + {0x03, CRL_REG_LEN_08BIT, 0x00, 0xE0}, + {0x04, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0x00, 0xE0, 0xFD}, + {0x37, CRL_REG_LEN_08BIT, 0x00, 0x44}, +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_480i[] = { + {0x00, CRL_REG_LEN_08BIT, 0x81, 0x94}, /* Enable 1-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA1, 0x94}, /* Set Auto DPHY Timing */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0xA0, 0xE0, 0xA0}, + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, + {0x1E, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, + {0x00, CRL_REG_LEN_08BIT, 0x21, 0x94}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xC9, CRL_REG_LEN_08BIT, 0x2D, 0x44}, + {0x05, CRL_REG_LEN_08BIT, 0x40, 0xE0}, + {0x00, CRL_REG_LEN_DELAY, 0x05, 0x00}, + {0x03, CRL_REG_LEN_08BIT, 0x00, 0xE0}, + {0x04, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0x00, 0xE0, 0xFD}, + {0x37, CRL_REG_LEN_08BIT, 0x00, 0x44}, +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_vga[] = { + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* Set Auto DPHY Timing */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0xA0, 0xE0, 0xA0}, + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, + {0x1E, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, + {0x00, CRL_REG_LEN_08BIT, 0x24, 0x94}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xC9, CRL_REG_LEN_08BIT, 0x2D, 0x44}, + {0x05, CRL_REG_LEN_08BIT, 0x88, 0xE0}, + {0x00, CRL_REG_LEN_DELAY, 0x05, 0x00}, + {0x03, CRL_REG_LEN_08BIT, 0x00, 0xE0}, + {0x04, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0x00, 0xE0, 0xFD}, + {0x37, CRL_REG_LEN_08BIT, 0x00, 0x44}, +}; + +static struct crl_register_write_rep adv7481_hdmi_powerup_regset[] = { + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* Set Auto DPHY Timing */ + {0xDB, CRL_REG_LEN_08BIT, 0x10, 0x94}, /* ADI Required Write */ + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, /* ADI Required Write */ + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, /* ADI Required Write */ + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, /* ADI Required Write */ + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, /* ADI Required Write */ + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, /* i2c_dphy_pwdn - 1'b0 */ + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, /* ADI Required Write */ + {0x1E, CRL_REG_LEN_08BIT, 0xC0, 0x94}, + /* ADI Required Write, transmit only Frame Start/End packets */ + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, /* i2c_mipi_pll_en - 1'b1 */ +}; + +static struct crl_register_write_rep adv7481_hdmi_streamon_regs[] = { + {0x00, CRL_REG_LEN_DELAY, 0x02, 0x00}, + {0x00, CRL_REG_LEN_08BIT, 0x24, 0x94}, /* Power-up CSI-TX */ + {0x00, CRL_REG_LEN_DELAY, 0x01, 0x00}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, /* ADI recommended setting */ + {0x00, CRL_REG_LEN_DELAY, 0x01, 0x00}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_hdmi_streamoff_regs[] = { + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, /* ADI Recommended Write */ + {0x1E, CRL_REG_LEN_08BIT, 0x00, 0x94}, /* Reset the clock Lane */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x00, 0x94}, + /* i2c_mipi_pll_en -1'b0 Disable MIPI PLL */ + {0xC1, CRL_REG_LEN_08BIT, 0x3B, 0x94}, +}; + +static struct crl_pll_configuration adv7481_hdmi_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 297000000, + .bitsperpixel = 16, + .pixel_rate_csi = 594000000, + .pixel_rate_pa = 594000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + { + .input_clk = 24000000, + .op_sys_clk = 445500000, + .bitsperpixel = 24, + .pixel_rate_csi = 891000000, + .pixel_rate_pa = 891000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_1080p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_720p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_VGA_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 640, + .out_rect.height = 480, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_1080i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 540, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_480p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 480, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_480i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 240, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_576p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 576, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_576i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 288, + }, +}; +static struct crl_mode_rep adv7481_hdmi_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_1080p_rects), + .sd_rects = adv7481_hdmi_1080p_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1080, + .mode_regs_items = ARRAY_SIZE(adv7481_hdmi_mode_1080p), + .mode_regs = adv7481_hdmi_mode_1080p, + .comp_items = 0, + .ctrl_data = 0, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_720p_rects), + .sd_rects = adv7481_hdmi_720p_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 720, + .mode_regs_items = ARRAY_SIZE(adv7481_hdmi_mode_720p), + .mode_regs = adv7481_hdmi_mode_720p, + .comp_items = 0, + .ctrl_data = 0, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_VGA_rects), + .sd_rects = adv7481_hdmi_VGA_rects, + .binn_hor = 3, + .binn_vert = 2, + .scale_m = 1, + .width = 640, + .height = 480, + .mode_regs_items = ARRAY_SIZE(adv7481_hdmi_mode_vga), + .mode_regs = adv7481_hdmi_mode_vga, + .comp_items = 0, + .ctrl_data = 0, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_1080i_rects), + .sd_rects = adv7481_hdmi_1080i_rects, + .binn_hor = 1, + .binn_vert = 2, + .scale_m = 1, + .width = 1920, + .height = 540, + .mode_regs_items = ARRAY_SIZE(adv7481_hdmi_mode_1080i), + .mode_regs = adv7481_hdmi_mode_1080i, + .comp_items = 0, + .ctrl_data = 0, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_480p_rects), + .sd_rects = adv7481_hdmi_480p_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 1, + .width = 720, + .height = 480, + .mode_regs_items = ARRAY_SIZE(adv7481_hdmi_mode_480p), + .mode_regs = adv7481_hdmi_mode_480p, + .comp_items = 0, + .ctrl_data = 0, + }, + + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_480i_rects), + .sd_rects = adv7481_hdmi_480i_rects, + .binn_hor = 2, + .binn_vert = 4, + .scale_m = 1, + .width = 720, + .height = 240, + .mode_regs_items = ARRAY_SIZE(adv7481_hdmi_mode_480i), + .mode_regs = adv7481_hdmi_mode_480i, + .comp_items = 0, + .ctrl_data = 0, + }, + + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_576p_rects), + .sd_rects = adv7481_hdmi_576p_rects, + .binn_hor = 2, + .binn_vert = 1, + .scale_m = 1, + .width = 720, + .height = 576, + .mode_regs_items = ARRAY_SIZE(adv7481_hdmi_mode_576p), + .mode_regs = adv7481_hdmi_mode_576p, + .comp_items = 0, + .ctrl_data = 0, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_576i_rects), + .sd_rects = adv7481_hdmi_576i_rects, + .binn_hor = 2, + .binn_vert = 3, + .scale_m = 1, + .width = 720, + .height = 288, + .mode_regs_items = ARRAY_SIZE(adv7481_hdmi_mode_576i), + .mode_regs = adv7481_hdmi_mode_576i, + .comp_items = 0, + .ctrl_data = 0, + }, +}; + +static struct crl_sensor_subdev_config adv7481_hdmi_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "adv7481 hdmi binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "adv7481 hdmi pixel array", + }, +}; + +static struct crl_sensor_limits adv7481_hdmi_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1920, + .y_addr_max = 1080, + .min_frame_length_lines = 160, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 6024, + .max_line_length_pixels = 32752, + .scaler_m_min = 1, + .scaler_m_max = 1, + .scaler_n_min = 1, + .scaler_n_max = 1, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 1, +}; + +static struct crl_csi_data_fmt adv7481_hdmi_crl_csi_data_fmt[] = { + { + .code = ICI_FORMAT_RGB565, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + .regs_items = ARRAY_SIZE(adv7481_hdmi_mode_rgb565), + .regs = adv7481_hdmi_mode_rgb565, + }, + { + .code = ICI_FORMAT_UYVY, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + .regs_items = ARRAY_SIZE(adv7481_hdmi_mode_uyvy), + .regs = adv7481_hdmi_mode_uyvy, + }, + { + .code = ICI_FORMAT_YUYV, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + .regs_items = ARRAY_SIZE(adv7481_hdmi_mode_yuyv), + .regs = adv7481_hdmi_mode_yuyv, + }, + { + .code = ICI_FORMAT_RGB888, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 24, + .regs_items = ARRAY_SIZE(adv7481_hdmi_mode_rgb888), + .regs = adv7481_hdmi_mode_rgb888, + }, +}; + +static struct crl_ctrl_data adv7481_hdmi_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = ICI_EXT_SD_PARAM_ID_LINK_FREQ, + .name = "CTRL_ID_LINK_FREQ", + .type = CRL_CTRL_TYPE_MENU_INT, + .data.int_menu.def = 0, + .data.int_menu.max = ARRAY_SIZE(adv7481_hdmi_pll_configurations) - 1, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_PIXEL_RATE, + .name = "CTRL_ID_PIXEL_RATE_PA", + .type = CRL_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_PIXEL_RATE, + .name = "CTRL_ID_PIXEL_RATE_CSI", + .type = CRL_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +int adv7481_sensor_init(struct i2c_client*); +int adv7481_sensor_cleanup(struct i2c_client*); + +static struct crl_sensor_configuration adv7481_hdmi_crl_configuration = { + + .sensor_init = adv7481_sensor_init, + .sensor_cleanup = adv7481_sensor_cleanup, + + .onetime_init_regs_items = ARRAY_SIZE(adv7481_hdmi_onetime_init_regset), + .onetime_init_regs = adv7481_hdmi_onetime_init_regset, + + .powerup_regs_items = ARRAY_SIZE(adv7481_hdmi_powerup_regset), + .powerup_regs = adv7481_hdmi_powerup_regset, + + .poweroff_regs_items = ARRAY_SIZE(adv7481_hdmi_streamoff_regs), + .poweroff_regs = adv7481_hdmi_streamoff_regs, + + .id_reg_items = 0, + .id_regs = NULL, + + .subdev_items = ARRAY_SIZE(adv7481_hdmi_sensor_subdevs), + .subdevs = adv7481_hdmi_sensor_subdevs, + + .sensor_limits = &adv7481_hdmi_sensor_limits, + + .pll_config_items = ARRAY_SIZE(adv7481_hdmi_pll_configurations), + .pll_configs = adv7481_hdmi_pll_configurations, + + .modes_items = ARRAY_SIZE(adv7481_hdmi_modes), + .modes = adv7481_hdmi_modes, + + .streamon_regs_items = ARRAY_SIZE(adv7481_hdmi_streamon_regs), + .streamon_regs = adv7481_hdmi_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(adv7481_hdmi_streamoff_regs), + .streamoff_regs = adv7481_hdmi_streamoff_regs, + + .ctrl_items = ARRAY_SIZE(adv7481_hdmi_ctrls), + .ctrl_bank = adv7481_hdmi_ctrls, + + .csi_fmts_items = ARRAY_SIZE(adv7481_hdmi_crl_csi_data_fmt), + .csi_fmts = adv7481_hdmi_crl_csi_data_fmt, + + .addr_len = CRL_ADDR_7BIT, +}; + +#endif /* __CRLMODULE_ADV7481_HDMI_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule-lite/crl_ar0231at_configuration.h b/drivers/media/i2c/crlmodule-lite/crl_ar0231at_configuration.h new file mode 100644 index 0000000000000..7ea3560f33c75 --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crl_ar0231at_configuration.h @@ -0,0 +1,2411 @@ +/* + * Copyright (c) 2018 Intel Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __CRLMODULE_AR0231AT_CONFIGURATION_H_ +#define __CRLMODULE_AR0231AT_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +struct crl_pll_configuration ar0231at_pll_configurations[] = { + { + .input_clk = 27000000, + .op_sys_clk = 87750000, + .bitsperpixel = 12, + .pixel_rate_csi = 176000000, + .pixel_rate_pa = 176000000, /* pixel_rate = op_sys_clk*2 *csi_lanes/bitsperpixel */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = 0, + }, + { + .input_clk = 27000000, + .op_sys_clk = 87750000, + .bitsperpixel = 10, + .pixel_rate_csi = 211200000, + .pixel_rate_pa = 211200000, /* pixel_rate = op_sys_clk*2 *csi_lanes/bitsperpixel */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = 0, + }, +}; + +struct crl_sensor_subdev_config ar0231at_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "ar0231at binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "ar0231at pixel array", + }, +}; + +struct crl_subdev_rect_rep ar0231at_1920_1088_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1088, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1088, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1088, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1088, + } +}; + +/* + * Exposure mode: + * 0: Linear mode + * 1: 2-HDR mode + * 2: 3-HDR mode + * 3: 4-HDR mode + */ +struct crl_ctrl_data_pair ar0231at_ctrl_data_modes[] = { + { + .ctrl_id = ICI_EXT_SD_PARAM_ID_EXPOSURE, + .data = 0, + }, + { + .ctrl_id = ICI_EXT_SD_PARAM_ID_EXPOSURE, + .data = 1, + }, + { + .ctrl_id = ICI_EXT_SD_PARAM_ID_EXPOSURE, + .data = 2, + }, + { + .ctrl_id = ICI_EXT_SD_PARAM_ID_EXPOSURE, + .data = 3, + }, + { + .ctrl_id = ICI_EXT_SD_PARAM_ID_EXPOSURE, + .data = 4, + }, +}; + +static struct crl_register_write_rep ar0231at_1920_1088_10bit_linear_mode[] = { + { 0x301A, CRL_REG_LEN_16BIT, 0x1058, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 200, 0x10 }, + { 0x3092, CRL_REG_LEN_16BIT, 0x0C24, 0x10 }, + { 0x337A, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3520, CRL_REG_LEN_16BIT, 0x1288, 0x10 }, + { 0x3522, CRL_REG_LEN_16BIT, 0x880C, 0x10 }, + { 0x3524, CRL_REG_LEN_16BIT, 0x0C12, 0x10 }, + { 0x352C, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x354A, CRL_REG_LEN_16BIT, 0x007F, 0x10 }, + { 0x350C, CRL_REG_LEN_16BIT, 0x055C, 0x10 }, + { 0x3506, CRL_REG_LEN_16BIT, 0x3333, 0x10 }, + { 0x3508, CRL_REG_LEN_16BIT, 0x3333, 0x10 }, + { 0x3100, CRL_REG_LEN_16BIT, 0x4000, 0x10 }, + { 0x3280, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3282, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3284, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3286, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3288, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328A, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328C, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328E, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3290, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3292, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3294, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3296, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3298, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329A, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329C, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329E, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x301A, CRL_REG_LEN_16BIT, 0x10D8, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 200, 0x10 }, + { 0x2512, CRL_REG_LEN_16BIT, 0x8000, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0905, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3350, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2004, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1460, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1578, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x7B24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xEA24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1022, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2410, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x155A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24FF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24FF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24EA, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2324, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x647A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2404, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x052C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x400A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF0A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF0A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1008, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3851, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0801, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0408, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1180, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2652, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1518, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0906, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1348, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1002, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1016, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1181, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1189, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1056, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0D09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1413, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2B15, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0311, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD909, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1214, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0312, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1409, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0110, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD612, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xDD11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD910, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xDB09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x9B09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0F11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1A12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1014, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x7610, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xE609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0812, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x290B, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0904, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0923, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15C8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13C8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x092C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1588, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1388, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C14, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1112, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBF11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB10, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6611, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFB09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6312, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6014, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xB812, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xA012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2610, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3053, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4215, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1611, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8111, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8910, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5612, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x010D, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0815, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1313, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0215, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC813, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0515, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8813, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0213, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0411, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC909, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0814, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD908, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x091A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0903, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1214, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x10D6, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11DD, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11D9, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1056, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0917, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11DB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0913, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11FB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0905, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x121A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1460, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1250, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1076, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x10E6, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15A8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13A8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1240, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0925, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13AD, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0902, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0907, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1588, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x138D, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0914, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B13, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1C0C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0920, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1262, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1066, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x090A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11FB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x093B, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1263, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1508, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11B8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x12A0, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1200, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1026, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1000, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1300, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1100, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x437A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B05, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0708, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4137, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x502C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2CFE, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15FE, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C2C, 0x10 }, + { 0x32E6, CRL_REG_LEN_16BIT, 0x00E0, 0x10 }, + { 0x1008, CRL_REG_LEN_16BIT, 0x036F, 0x10 }, + { 0x100C, CRL_REG_LEN_16BIT, 0x058F, 0x10 }, + { 0x100E, CRL_REG_LEN_16BIT, 0x07AF, 0x10 }, + { 0x1010, CRL_REG_LEN_16BIT, 0x014F, 0x10 }, + { 0x3230, CRL_REG_LEN_16BIT, 0x0312, 0x10 }, + { 0x3232, CRL_REG_LEN_16BIT, 0x0532, 0x10 }, + { 0x3234, CRL_REG_LEN_16BIT, 0x0752, 0x10 }, + { 0x3236, CRL_REG_LEN_16BIT, 0x00F2, 0x10 }, + { 0x3566, CRL_REG_LEN_16BIT, 0x3328, 0x10 }, + { 0x32D0, CRL_REG_LEN_16BIT, 0x3A02, 0x10 }, + { 0x32D2, CRL_REG_LEN_16BIT, 0x3508, 0x10 }, + { 0x32D4, CRL_REG_LEN_16BIT, 0x3702, 0x10 }, + { 0x32D6, CRL_REG_LEN_16BIT, 0x3C04, 0x10 }, + { 0x32DC, CRL_REG_LEN_16BIT, 0x370A, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x302A, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x302C, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x302E, CRL_REG_LEN_16BIT, 0x0003, 0x10 }, + { 0x3030, CRL_REG_LEN_16BIT, 0x004E, 0x10 }, + { 0x3036, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3038, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x30A2, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x30A6, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x3040, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F2, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3180, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x33E4, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3004, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x3008, CRL_REG_LEN_16BIT, 0x0783, 0x10 }, + { 0x3002, CRL_REG_LEN_16BIT, 0x003C, 0x10 }, + { 0x3006, CRL_REG_LEN_16BIT, 0x047B, 0x10 }, + { 0x3032, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3400, CRL_REG_LEN_16BIT, 0x0010, 0x10 }, + { 0x3402, CRL_REG_LEN_16BIT, 0x0F10, 0x10 }, + { 0x3404, CRL_REG_LEN_16BIT, 0x0970, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F1, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 200, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F0, 0x10 }, + { 0x300C, CRL_REG_LEN_16BIT, 0x0872, 0x10 }, + { 0x300A, CRL_REG_LEN_16BIT, 0x054A, 0x10 }, + { 0x3042, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3012, CRL_REG_LEN_16BIT, 0x0163, 0x10 }, + { 0x3014, CRL_REG_LEN_16BIT, 0x014F, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C08, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x31D0, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x31AE, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x31AC, CRL_REG_LEN_16BIT, 0x0C0A, 0x10 }, + /* try sync mode */ + { 0x340A, CRL_REG_LEN_16BIT, 0x0077, 0x10 }, + { 0x340C, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x30CE, CRL_REG_LEN_16BIT, 0x0120, 0x10 }, + { 0x301A, CRL_REG_LEN_16BIT, 0x19DC, 0x10 }, + { 0x3370, CRL_REG_LEN_16BIT, 0x0231, 0x10 }, +}; + +static struct crl_register_write_rep ar0231at_1920_1088_linear_mode[] = { +#if 0 + { 0x300C, CRL_REG_LEN_16BIT, 0x0872, 0x10 }, + { 0x300A, CRL_REG_LEN_16BIT, 0x05C8, 0x10 }, + { 0x3366, CRL_REG_LEN_16BIT, 0xAAAA, 0x10 }, + { 0x305E, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3012, CRL_REG_LEN_16BIT, 0x0163, 0x10 }, + { 0x3212, CRL_REG_LEN_16BIT, 0x0002, 0x10 }, + { 0x3216, CRL_REG_LEN_16BIT, 0x0002, 0x10 }, + { 0x321A, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3070, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, +#endif + { 0x301A, CRL_REG_LEN_16BIT, 0x1058, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 200, 0x10 }, + { 0x3092, CRL_REG_LEN_16BIT, 0x0C24, 0x10 }, + { 0x337A, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3520, CRL_REG_LEN_16BIT, 0x1288, 0x10 }, + { 0x3522, CRL_REG_LEN_16BIT, 0x880C, 0x10 }, + { 0x3524, CRL_REG_LEN_16BIT, 0x0C12, 0x10 }, + { 0x352C, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x354A, CRL_REG_LEN_16BIT, 0x007F, 0x10 }, + { 0x350C, CRL_REG_LEN_16BIT, 0x055C, 0x10 }, + { 0x3506, CRL_REG_LEN_16BIT, 0x3333, 0x10 }, + { 0x3508, CRL_REG_LEN_16BIT, 0x3333, 0x10 }, + { 0x3100, CRL_REG_LEN_16BIT, 0x4000, 0x10 }, + { 0x3280, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3282, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3284, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3286, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3288, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328A, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328C, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328E, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3290, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3292, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3294, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3296, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3298, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329A, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329C, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329E, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x301A, CRL_REG_LEN_16BIT, 0x10D8, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 200, 0x10 }, + { 0x2512, CRL_REG_LEN_16BIT, 0x8000, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0905, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3350, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2004, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1460, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1578, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x7B24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xEA24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1022, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2410, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x155A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24FF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24FF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24EA, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2324, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x647A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2404, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x052C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x400A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF0A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF0A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1008, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3851, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0801, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0408, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1180, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2652, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1518, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0906, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1348, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1002, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1016, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1181, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1189, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1056, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0D09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1413, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2B15, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0311, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD909, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1214, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0312, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1409, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0110, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD612, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xDD11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD910, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xDB09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x9B09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0F11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1A12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1014, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x7610, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xE609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0812, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x290B, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0904, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0923, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15C8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13C8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x092C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1588, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1388, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C14, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1112, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBF11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB10, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6611, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFB09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6312, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6014, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xB812, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xA012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2610, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3053, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4215, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1611, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8111, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8910, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5612, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x010D, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0815, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1313, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0215, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC813, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0515, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8813, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0213, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0411, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC909, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0814, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD908, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x091A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0903, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1214, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x10D6, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11DD, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11D9, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1056, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0917, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11DB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0913, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11FB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0905, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x121A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1460, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1250, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1076, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x10E6, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15A8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13A8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1240, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0925, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13AD, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0902, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0907, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1588, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x138D, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0914, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B13, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1C0C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0920, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1262, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1066, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x090A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11FB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x093B, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1263, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1508, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11B8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x12A0, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1200, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1026, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1000, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1300, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1100, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x437A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B05, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0708, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4137, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x502C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2CFE, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15FE, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C2C, 0x10 }, + { 0x32E6, CRL_REG_LEN_16BIT, 0x00E0, 0x10 }, + { 0x1008, CRL_REG_LEN_16BIT, 0x036F, 0x10 }, + { 0x100C, CRL_REG_LEN_16BIT, 0x058F, 0x10 }, + { 0x100E, CRL_REG_LEN_16BIT, 0x07AF, 0x10 }, + { 0x1010, CRL_REG_LEN_16BIT, 0x014F, 0x10 }, + { 0x3230, CRL_REG_LEN_16BIT, 0x0312, 0x10 }, + { 0x3232, CRL_REG_LEN_16BIT, 0x0532, 0x10 }, + { 0x3234, CRL_REG_LEN_16BIT, 0x0752, 0x10 }, + { 0x3236, CRL_REG_LEN_16BIT, 0x00F2, 0x10 }, + { 0x3566, CRL_REG_LEN_16BIT, 0x3328, 0x10 }, + { 0x32D0, CRL_REG_LEN_16BIT, 0x3A02, 0x10 }, + { 0x32D2, CRL_REG_LEN_16BIT, 0x3508, 0x10 }, + { 0x32D4, CRL_REG_LEN_16BIT, 0x3702, 0x10 }, + { 0x32D6, CRL_REG_LEN_16BIT, 0x3C04, 0x10 }, + { 0x32DC, CRL_REG_LEN_16BIT, 0x370A, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x302A, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x302C, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x302E, CRL_REG_LEN_16BIT, 0x0003, 0x10 }, + { 0x3030, CRL_REG_LEN_16BIT, 0x004E, 0x10 }, + { 0x3036, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3038, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x30A2, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x30A6, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x3040, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F2, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3180, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x33E4, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3004, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x3008, CRL_REG_LEN_16BIT, 0x0783, 0x10 }, + { 0x3002, CRL_REG_LEN_16BIT, 0x003C, 0x10 }, + { 0x3006, CRL_REG_LEN_16BIT, 0x047B, 0x10 }, + { 0x3032, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3400, CRL_REG_LEN_16BIT, 0x0010, 0x10 }, + { 0x3402, CRL_REG_LEN_16BIT, 0x0F10, 0x10 }, + { 0x3404, CRL_REG_LEN_16BIT, 0x0970, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F1, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 200, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F0, 0x10 }, + { 0x300C, CRL_REG_LEN_16BIT, 0x0872, 0x10 }, + { 0x300A, CRL_REG_LEN_16BIT, 0x054A, 0x10 }, + { 0x3042, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3012, CRL_REG_LEN_16BIT, 0x0163, 0x10 }, + { 0x3014, CRL_REG_LEN_16BIT, 0x014F, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C08, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x31D0, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x31AE, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x31AC, CRL_REG_LEN_16BIT, 0x0C0C, 0x10 }, + /* try sync mode */ + { 0x340A, CRL_REG_LEN_16BIT, 0x0077, 0x10 }, + { 0x340C, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x30CE, CRL_REG_LEN_16BIT, 0x0120, 0x10 }, + { 0x301A, CRL_REG_LEN_16BIT, 0x19DC, 0x10 }, + { 0x3370, CRL_REG_LEN_16BIT, 0x0231, 0x10 }, +}; + +static struct crl_register_write_rep ar0231at_1920_1088_2hdr_mode[] = { + { 0x301A, CRL_REG_LEN_16BIT, 0x10D8, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 100, 0x10 }, + { 0x3092, CRL_REG_LEN_16BIT, 0x0C24, 0x10 }, + { 0x337A, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3520, CRL_REG_LEN_16BIT, 0x1288, 0x10 }, + { 0x3522, CRL_REG_LEN_16BIT, 0x880C, 0x10 }, + { 0x3524, CRL_REG_LEN_16BIT, 0x0C12, 0x10 }, + { 0x352C, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x354A, CRL_REG_LEN_16BIT, 0x007F, 0x10 }, + { 0x350C, CRL_REG_LEN_16BIT, 0x055C, 0x10 }, + { 0x3506, CRL_REG_LEN_16BIT, 0x3333, 0x10 }, + { 0x3508, CRL_REG_LEN_16BIT, 0x3333, 0x10 }, + { 0x3100, CRL_REG_LEN_16BIT, 0x4000, 0x10 }, + { 0x3280, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3282, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3284, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3286, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3288, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328A, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328C, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328E, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3290, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3292, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3294, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3296, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3298, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329A, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329C, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329E, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x301A, CRL_REG_LEN_16BIT, 0x10D8, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 200, 0x10 }, + { 0x2512, CRL_REG_LEN_16BIT, 0x8000, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0905, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3350, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2004, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1460, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1578, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x7B24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xEA24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1022, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2410, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x155A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24FF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24FF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24EA, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2324, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x647A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2404, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x052C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x400A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF0A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF0A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1008, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3851, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0801, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0408, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1180, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2652, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1518, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0906, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1348, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1002, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1016, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1181, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1189, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1056, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0D09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1413, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2B15, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0311, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD909, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1214, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0312, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1409, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0110, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD612, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xDD11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD910, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xDB09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x9B09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0F11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1A12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1014, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x7610, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xE609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0812, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x290B, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0904, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0923, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15C8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13C8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x092C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1588, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1388, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C14, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1112, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBF11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB10, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6611, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFB09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6312, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6014, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xB812, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xA012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2610, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3053, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4215, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1611, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8111, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8910, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5612, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x010D, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0815, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1313, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0215, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC813, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0515, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8813, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0213, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0411, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC909, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0814, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD908, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x091A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0903, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1214, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x10D6, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11DD, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11D9, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1056, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0917, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11DB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0913, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11FB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0905, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x121A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1460, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1250, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1076, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x10E6, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15A8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13A8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1240, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0925, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13AD, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0902, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0907, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1588, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x138D, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0914, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B13, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1C0C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0920, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1262, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1066, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x090A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11FB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x093B, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1263, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1508, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11B8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x12A0, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1200, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1026, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1000, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1300, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1100, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x437A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B05, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0708, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4137, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x502C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2CFE, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15FE, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C2C, 0x10 }, + { 0x32E6, CRL_REG_LEN_16BIT, 0x00E0, 0x10 }, + { 0x1008, CRL_REG_LEN_16BIT, 0x036F, 0x10 }, + { 0x100C, CRL_REG_LEN_16BIT, 0x058F, 0x10 }, + { 0x100E, CRL_REG_LEN_16BIT, 0x07AF, 0x10 }, + { 0x1010, CRL_REG_LEN_16BIT, 0x014F, 0x10 }, + { 0x3230, CRL_REG_LEN_16BIT, 0x0312, 0x10 }, + { 0x3232, CRL_REG_LEN_16BIT, 0x0532, 0x10 }, + { 0x3234, CRL_REG_LEN_16BIT, 0x0752, 0x10 }, + { 0x3236, CRL_REG_LEN_16BIT, 0x00F2, 0x10 }, + { 0x3566, CRL_REG_LEN_16BIT, 0x3328, 0x10 }, + { 0x32D0, CRL_REG_LEN_16BIT, 0x3A02, 0x10 }, + { 0x32D2, CRL_REG_LEN_16BIT, 0x3508, 0x10 }, + { 0x32D4, CRL_REG_LEN_16BIT, 0x3702, 0x10 }, + { 0x32D6, CRL_REG_LEN_16BIT, 0x3C04, 0x10 }, + { 0x32DC, CRL_REG_LEN_16BIT, 0x370A, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x302A, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x302C, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x302E, CRL_REG_LEN_16BIT, 0x0003, 0x10 }, + { 0x3030, CRL_REG_LEN_16BIT, 0x004E, 0x10 }, + { 0x3036, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3038, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x30A2, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x30A6, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x3040, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3040, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F1, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x33C0, CRL_REG_LEN_16BIT, 0x2000, 0x10 }, + { 0x33C2, CRL_REG_LEN_16BIT, 0x3440, 0x10 }, + { 0x33C4, CRL_REG_LEN_16BIT, 0x4890, 0x10 }, + { 0x33C6, CRL_REG_LEN_16BIT, 0x5CE0, 0x10 }, + { 0x33C8, CRL_REG_LEN_16BIT, 0x7140, 0x10 }, + { 0x33CA, CRL_REG_LEN_16BIT, 0x8590, 0x10 }, + { 0x33CC, CRL_REG_LEN_16BIT, 0x99E0, 0x10 }, + { 0x33CE, CRL_REG_LEN_16BIT, 0xAE40, 0x10 }, + { 0x33D0, CRL_REG_LEN_16BIT, 0xC290, 0x10 }, + { 0x33D2, CRL_REG_LEN_16BIT, 0xD6F0, 0x10 }, + { 0x33D4, CRL_REG_LEN_16BIT, 0xEB40, 0x10 }, + { 0x33D6, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x33DA, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3180, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x33E4, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3004, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x3008, CRL_REG_LEN_16BIT, 0x0783, 0x10 }, + { 0x3002, CRL_REG_LEN_16BIT, 0x003C, 0x10 }, + { 0x3006, CRL_REG_LEN_16BIT, 0x047B, 0x10 }, + { 0x3032, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3400, CRL_REG_LEN_16BIT, 0x0010, 0x10 }, + { 0x3402, CRL_REG_LEN_16BIT, 0x0788, 0x10 }, + { 0x3402, CRL_REG_LEN_16BIT, 0x0F10, 0x10 }, + { 0x3404, CRL_REG_LEN_16BIT, 0x04B8, 0x10 }, + { 0x3404, CRL_REG_LEN_16BIT, 0x0970, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F1, 0x10 }, + { 0x300C, CRL_REG_LEN_16BIT, 0x0872, 0x10 }, + { 0x300A, CRL_REG_LEN_16BIT, 0x054A, 0x10 }, + { 0x3042, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3012, CRL_REG_LEN_16BIT, 0x0163, 0x10 }, + { 0x3014, CRL_REG_LEN_16BIT, 0x0882, 0x10 }, + { 0x321E, CRL_REG_LEN_16BIT, 0x0882, 0x10 }, + { 0x3222, CRL_REG_LEN_16BIT, 0x0882, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C0E, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C0E, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C0E, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x31D0, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x31AE, CRL_REG_LEN_16BIT, 0x0201, 0x10 }, + { 0x31AE, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x31AC, CRL_REG_LEN_16BIT, 0x140C, 0x10 }, + { 0x340A, CRL_REG_LEN_16BIT, 0x0077, 0x10 }, + { 0x340C, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x30CE, CRL_REG_LEN_16BIT, 0x0120, 0x10 }, + { 0x301A, CRL_REG_LEN_16BIT, 0x19DC, 0x10 }, + { 0x3370, CRL_REG_LEN_16BIT, 0x0231, 0x10 }, +}; + +static struct crl_register_write_rep ar0231at_1920_1088_3hdr_mode[] = { + { 0x301A, CRL_REG_LEN_16BIT, 0x10D8, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 100, 0x10 }, + { 0x3092, CRL_REG_LEN_16BIT, 0x0C24, 0x10 }, + { 0x337A, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3520, CRL_REG_LEN_16BIT, 0x1288, 0x10 }, + { 0x3522, CRL_REG_LEN_16BIT, 0x880C, 0x10 }, + { 0x3524, CRL_REG_LEN_16BIT, 0x0C12, 0x10 }, + { 0x352C, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x354A, CRL_REG_LEN_16BIT, 0x007F, 0x10 }, + { 0x350C, CRL_REG_LEN_16BIT, 0x055C, 0x10 }, + { 0x3506, CRL_REG_LEN_16BIT, 0x3333, 0x10 }, + { 0x3508, CRL_REG_LEN_16BIT, 0x3333, 0x10 }, + { 0x3100, CRL_REG_LEN_16BIT, 0x4000, 0x10 }, + { 0x3280, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3282, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3284, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3286, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3288, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328A, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328C, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328E, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3290, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3292, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3294, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3296, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3298, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329A, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329C, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329E, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x301A, CRL_REG_LEN_16BIT, 0x10D8, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 200, 0x10 }, + { 0x2512, CRL_REG_LEN_16BIT, 0x8000, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0905, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3350, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2004, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1460, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1578, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x7B24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xEA24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1022, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2410, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x155A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24FF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24FF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24EA, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2324, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x647A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2404, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x052C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x400A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF0A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF0A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1008, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3851, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0801, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0408, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1180, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2652, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1518, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0906, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1348, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1002, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1016, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1181, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1189, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1056, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0D09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1413, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2B15, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0311, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD909, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1214, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0312, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1409, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0110, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD612, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xDD11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD910, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xDB09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x9B09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0F11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1A12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1014, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x7610, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xE609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0812, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x290B, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0904, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0923, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15C8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13C8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x092C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1588, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1388, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C14, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1112, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBF11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB10, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6611, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFB09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6312, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6014, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xB812, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xA012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2610, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3053, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4215, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1611, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8111, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8910, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5612, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x010D, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0815, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1313, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0215, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC813, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0515, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8813, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0213, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0411, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC909, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0814, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD908, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x091A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0903, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1214, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x10D6, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11DD, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11D9, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1056, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0917, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11DB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0913, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11FB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0905, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x121A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1460, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1250, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1076, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x10E6, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15A8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13A8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1240, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0925, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13AD, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0902, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0907, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1588, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x138D, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0914, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B13, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1C0C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0920, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1262, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1066, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x090A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11FB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x093B, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1263, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1508, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11B8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x12A0, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1200, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1026, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1000, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1300, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1100, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x437A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B05, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0708, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4137, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x502C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2CFE, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15FE, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C2C, 0x10 }, + { 0x32E6, CRL_REG_LEN_16BIT, 0x00E0, 0x10 }, + { 0x1008, CRL_REG_LEN_16BIT, 0x036F, 0x10 }, + { 0x100C, CRL_REG_LEN_16BIT, 0x058F, 0x10 }, + { 0x100E, CRL_REG_LEN_16BIT, 0x07AF, 0x10 }, + { 0x1010, CRL_REG_LEN_16BIT, 0x014F, 0x10 }, + { 0x3230, CRL_REG_LEN_16BIT, 0x0312, 0x10 }, + { 0x3232, CRL_REG_LEN_16BIT, 0x0532, 0x10 }, + { 0x3234, CRL_REG_LEN_16BIT, 0x0752, 0x10 }, + { 0x3236, CRL_REG_LEN_16BIT, 0x00F2, 0x10 }, + { 0x3566, CRL_REG_LEN_16BIT, 0x3328, 0x10 }, + { 0x32D0, CRL_REG_LEN_16BIT, 0x3A02, 0x10 }, + { 0x32D2, CRL_REG_LEN_16BIT, 0x3508, 0x10 }, + { 0x32D4, CRL_REG_LEN_16BIT, 0x3702, 0x10 }, + { 0x32D6, CRL_REG_LEN_16BIT, 0x3C04, 0x10 }, + { 0x32DC, CRL_REG_LEN_16BIT, 0x370A, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x302A, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x302C, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x302E, CRL_REG_LEN_16BIT, 0x0003, 0x10 }, + { 0x3030, CRL_REG_LEN_16BIT, 0x004E, 0x10 }, + { 0x3036, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3038, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x30A2, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x30A6, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x3040, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3040, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F2, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F2, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F2, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x33C0, CRL_REG_LEN_16BIT, 0x2000, 0x10 }, + { 0x33C2, CRL_REG_LEN_16BIT, 0x3440, 0x10 }, + { 0x33C4, CRL_REG_LEN_16BIT, 0x4890, 0x10 }, + { 0x33C6, CRL_REG_LEN_16BIT, 0x5CE0, 0x10 }, + { 0x33C8, CRL_REG_LEN_16BIT, 0x7140, 0x10 }, + { 0x33CA, CRL_REG_LEN_16BIT, 0x8590, 0x10 }, + { 0x33CC, CRL_REG_LEN_16BIT, 0x99E0, 0x10 }, + { 0x33CE, CRL_REG_LEN_16BIT, 0xAE40, 0x10 }, + { 0x33D0, CRL_REG_LEN_16BIT, 0xC290, 0x10 }, + { 0x33D2, CRL_REG_LEN_16BIT, 0xD6F0, 0x10 }, + { 0x33D4, CRL_REG_LEN_16BIT, 0xEB40, 0x10 }, + { 0x33D6, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x33DA, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3180, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x33E4, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3004, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x3008, CRL_REG_LEN_16BIT, 0x0783, 0x10 }, + { 0x3002, CRL_REG_LEN_16BIT, 0x003C, 0x10 }, + { 0x3006, CRL_REG_LEN_16BIT, 0x047B, 0x10 }, + { 0x3032, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3400, CRL_REG_LEN_16BIT, 0x0010, 0x10 }, + { 0x3402, CRL_REG_LEN_16BIT, 0x0788, 0x10 }, + { 0x3402, CRL_REG_LEN_16BIT, 0x0F10, 0x10 }, + { 0x3404, CRL_REG_LEN_16BIT, 0x04B8, 0x10 }, + { 0x3404, CRL_REG_LEN_16BIT, 0x0970, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F2, 0x10 }, + { 0x300C, CRL_REG_LEN_16BIT, 0x0872, 0x10 }, + { 0x300A, CRL_REG_LEN_16BIT, 0x054A, 0x10 }, + { 0x3042, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3012, CRL_REG_LEN_16BIT, 0x0163, 0x10 }, + { 0x3014, CRL_REG_LEN_16BIT, 0x0882, 0x10 }, + { 0x321E, CRL_REG_LEN_16BIT, 0x0882, 0x10 }, + { 0x3222, CRL_REG_LEN_16BIT, 0x0882, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C0E, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C0E, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C0E, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x31D0, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x31AE, CRL_REG_LEN_16BIT, 0x0201, 0x10 }, + { 0x31AE, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x31AC, CRL_REG_LEN_16BIT, 0x140C, 0x10 }, + { 0x340A, CRL_REG_LEN_16BIT, 0x0077, 0x10 }, + { 0x340C, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x30CE, CRL_REG_LEN_16BIT, 0x0120, 0x10 }, + { 0x301A, CRL_REG_LEN_16BIT, 0x19DC, 0x10 }, + { 0x3370, CRL_REG_LEN_16BIT, 0x0231, 0x10 }, +}; + +static struct crl_register_write_rep ar0231at_1920_1088_4hdr_mode[] = { + { 0x301A, CRL_REG_LEN_16BIT, 0x10D8, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 100, 0x10 }, + { 0x3092, CRL_REG_LEN_16BIT, 0x0C24, 0x10 }, + { 0x337A, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3520, CRL_REG_LEN_16BIT, 0x1288, 0x10 }, + { 0x3522, CRL_REG_LEN_16BIT, 0x880C, 0x10 }, + { 0x3524, CRL_REG_LEN_16BIT, 0x0C12, 0x10 }, + { 0x352C, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x354A, CRL_REG_LEN_16BIT, 0x007F, 0x10 }, + { 0x350C, CRL_REG_LEN_16BIT, 0x055C, 0x10 }, + { 0x3506, CRL_REG_LEN_16BIT, 0x3333, 0x10 }, + { 0x3508, CRL_REG_LEN_16BIT, 0x3333, 0x10 }, + { 0x3100, CRL_REG_LEN_16BIT, 0x4000, 0x10 }, + { 0x3280, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3282, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3284, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3286, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3288, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328A, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328C, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328E, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3290, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3292, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3294, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3296, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3298, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329A, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329C, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329E, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x301A, CRL_REG_LEN_16BIT, 0x10D8, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 200, 0x10 }, + { 0x2512, CRL_REG_LEN_16BIT, 0x8000, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0905, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3350, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2004, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1460, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1578, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x7B24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xEA24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1022, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2410, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x155A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24FF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24FF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24EA, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2324, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x647A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2404, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x052C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x400A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF0A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF0A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1008, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3851, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0801, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0408, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1180, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2652, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1518, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0906, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1348, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1002, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1016, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1181, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1189, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1056, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0D09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1413, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2B15, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0311, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD909, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1214, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0312, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1409, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0110, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD612, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xDD11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD910, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xDB09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x9B09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0F11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1A12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1014, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x7610, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xE609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0812, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x290B, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0904, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0923, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15C8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13C8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x092C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1588, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1388, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C14, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1112, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBF11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB10, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6611, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFB09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6312, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6014, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xB812, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xA012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2610, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3053, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4215, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1611, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8111, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8910, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5612, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x010D, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0815, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1313, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0215, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC813, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0515, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8813, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0213, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0411, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC909, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0814, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD908, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x091A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0903, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1214, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x10D6, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11DD, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11D9, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1056, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0917, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11DB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0913, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11FB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0905, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x121A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1460, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1250, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1076, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x10E6, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15A8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13A8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1240, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0925, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13AD, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0902, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0907, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1588, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x138D, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0914, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B13, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1C0C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0920, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1262, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1066, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x090A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11FB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x093B, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1263, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1508, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11B8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x12A0, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1200, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1026, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1000, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1300, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1100, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x437A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B05, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0708, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4137, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x502C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2CFE, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15FE, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C2C, 0x10 }, + { 0x32E6, CRL_REG_LEN_16BIT, 0x00E0, 0x10 }, + { 0x1008, CRL_REG_LEN_16BIT, 0x036F, 0x10 }, + { 0x100C, CRL_REG_LEN_16BIT, 0x058F, 0x10 }, + { 0x100E, CRL_REG_LEN_16BIT, 0x07AF, 0x10 }, + { 0x1010, CRL_REG_LEN_16BIT, 0x014F, 0x10 }, + { 0x3230, CRL_REG_LEN_16BIT, 0x0312, 0x10 }, + { 0x3232, CRL_REG_LEN_16BIT, 0x0532, 0x10 }, + { 0x3234, CRL_REG_LEN_16BIT, 0x0752, 0x10 }, + { 0x3236, CRL_REG_LEN_16BIT, 0x00F2, 0x10 }, + { 0x3566, CRL_REG_LEN_16BIT, 0x3328, 0x10 }, + { 0x32D0, CRL_REG_LEN_16BIT, 0x3A02, 0x10 }, + { 0x32D2, CRL_REG_LEN_16BIT, 0x3508, 0x10 }, + { 0x32D4, CRL_REG_LEN_16BIT, 0x3702, 0x10 }, + { 0x32D6, CRL_REG_LEN_16BIT, 0x3C04, 0x10 }, + { 0x32DC, CRL_REG_LEN_16BIT, 0x370A, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x302A, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x302C, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x302E, CRL_REG_LEN_16BIT, 0x0003, 0x10 }, + { 0x3030, CRL_REG_LEN_16BIT, 0x004E, 0x10 }, + { 0x3036, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3038, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x30A2, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x30A6, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x3040, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3040, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F2, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F2, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F2, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x33C0, CRL_REG_LEN_16BIT, 0x2000, 0x10 }, + { 0x33C2, CRL_REG_LEN_16BIT, 0x3440, 0x10 }, + { 0x33C4, CRL_REG_LEN_16BIT, 0x4890, 0x10 }, + { 0x33C6, CRL_REG_LEN_16BIT, 0x5CE0, 0x10 }, + { 0x33C8, CRL_REG_LEN_16BIT, 0x7140, 0x10 }, + { 0x33CA, CRL_REG_LEN_16BIT, 0x8590, 0x10 }, + { 0x33CC, CRL_REG_LEN_16BIT, 0x99E0, 0x10 }, + { 0x33CE, CRL_REG_LEN_16BIT, 0xAE40, 0x10 }, + { 0x33D0, CRL_REG_LEN_16BIT, 0xC290, 0x10 }, + { 0x33D2, CRL_REG_LEN_16BIT, 0xD6F0, 0x10 }, + { 0x33D4, CRL_REG_LEN_16BIT, 0xEB40, 0x10 }, + { 0x33D6, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x33DA, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3180, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x33E4, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3004, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x3008, CRL_REG_LEN_16BIT, 0x0783, 0x10 }, + { 0x3002, CRL_REG_LEN_16BIT, 0x003C, 0x10 }, + { 0x3006, CRL_REG_LEN_16BIT, 0x047B, 0x10 }, + { 0x3032, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3400, CRL_REG_LEN_16BIT, 0x0010, 0x10 }, + { 0x3402, CRL_REG_LEN_16BIT, 0x0788, 0x10 }, + { 0x3402, CRL_REG_LEN_16BIT, 0x0F10, 0x10 }, + { 0x3404, CRL_REG_LEN_16BIT, 0x04B8, 0x10 }, + { 0x3404, CRL_REG_LEN_16BIT, 0x0970, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x000C, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F3, 0x10 }, + { 0x300C, CRL_REG_LEN_16BIT, 0x09B8, 0x10 }, + { 0x300A, CRL_REG_LEN_16BIT, 0x0498, 0x10 }, + { 0x3042, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3012, CRL_REG_LEN_16BIT, 0x0131, 0x10 }, + { 0x3014, CRL_REG_LEN_16BIT, 0x098E, 0x10 }, + { 0x321E, CRL_REG_LEN_16BIT, 0x098E, 0x10 }, + { 0x3222, CRL_REG_LEN_16BIT, 0x098E, 0x10 }, + { 0x3226, CRL_REG_LEN_16BIT, 0x098E, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C0E, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C0E, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C0E, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x31D0, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x31AE, CRL_REG_LEN_16BIT, 0x0201, 0x10 }, + { 0x31AE, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x31AC, CRL_REG_LEN_16BIT, 0x140C, 0x10 }, + { 0x340A, CRL_REG_LEN_16BIT, 0x0077, 0x10 }, + { 0x340C, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x30CE, CRL_REG_LEN_16BIT, 0x0120, 0x10 }, + { 0x301A, CRL_REG_LEN_16BIT, 0x19DC, 0x10 }, + { 0x3370, CRL_REG_LEN_16BIT, 0x0231, 0x10 }, +}; + +struct crl_mode_rep ar0231at_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(ar0231at_1920_1088_rects), + .sd_rects = ar0231at_1920_1088_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1088, + .min_llp = 2162, + .min_fll = 1354, + .comp_items = 1, + .ctrl_data = &ar0231at_ctrl_data_modes[0], + .mode_regs_items = ARRAY_SIZE(ar0231at_1920_1088_linear_mode), + .mode_regs = ar0231at_1920_1088_linear_mode, + }, + { + .sd_rects_items = ARRAY_SIZE(ar0231at_1920_1088_rects), + .sd_rects = ar0231at_1920_1088_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1088, + .min_llp = 1978, + .min_fll = 1480, + .comp_items = 1, + .ctrl_data = &ar0231at_ctrl_data_modes[1], + .mode_regs_items = ARRAY_SIZE(ar0231at_1920_1088_2hdr_mode), + .mode_regs = ar0231at_1920_1088_2hdr_mode, + }, + { + .sd_rects_items = ARRAY_SIZE(ar0231at_1920_1088_rects), + .sd_rects = ar0231at_1920_1088_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1088, + .min_llp = 1978, + .min_fll = 1480, + .comp_items = 1, + .ctrl_data = &ar0231at_ctrl_data_modes[2], + .mode_regs_items = ARRAY_SIZE(ar0231at_1920_1088_3hdr_mode), + .mode_regs = ar0231at_1920_1088_3hdr_mode, + }, + { + .sd_rects_items = ARRAY_SIZE(ar0231at_1920_1088_rects), + .sd_rects = ar0231at_1920_1088_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1088, + .min_llp = 2246, + .min_fll = 1304, + .comp_items = 1, + .ctrl_data = &ar0231at_ctrl_data_modes[3], + .mode_regs_items = ARRAY_SIZE(ar0231at_1920_1088_4hdr_mode), + .mode_regs = ar0231at_1920_1088_4hdr_mode, + }, + { + .sd_rects_items = ARRAY_SIZE(ar0231at_1920_1088_rects), + .sd_rects = ar0231at_1920_1088_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1088, + .min_llp = 2162, + .min_fll = 1354, + .comp_items = 1, + .ctrl_data = &ar0231at_ctrl_data_modes[4], + .mode_regs_items = ARRAY_SIZE(ar0231at_1920_1088_10bit_linear_mode), + .mode_regs = ar0231at_1920_1088_10bit_linear_mode, + }, +}; + +struct crl_csi_data_fmt ar0231at_crl_csi_data_fmt[] = { + { + .code = ICI_FORMAT_SGRBG12, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, + { + .code = ICI_FORMAT_SGRBG12, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, + { + .code = ICI_FORMAT_SGRBG12, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, + { + .code = ICI_FORMAT_SGRBG12, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, + { + .code = ICI_FORMAT_SGRBG10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = 0, + .regs = 0, + }, + { + .code = ICI_FORMAT_SGRBG10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .bits_per_pixel = 10, + .regs_items = 0, + .regs = 0, + }, + { + .code = ICI_FORMAT_SGRBG10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 10, + .regs_items = 0, + .regs = 0, + }, + { + .code = ICI_FORMAT_SGRBG10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .bits_per_pixel = 10, + .regs_items = 0, + .regs = 0, + }, +}; + +static struct crl_arithmetic_ops ar0231at_ls2_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 2, + } +}; + +/* Line length pixel */ +static struct crl_dynamic_register_access ar0231at_llp_regs[] = { + { + .address = 0x300C, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* Frame length lines */ +static struct crl_dynamic_register_access ar0231at_fll_regs[] = { + { + .address = 0x300A, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* Analog gain register, also used in linear(non-HDR) mode */ +static struct crl_dynamic_register_access ar0231at_ana_gain_regs[] = { + { + .address = 0x3366, /* analog gain */ + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* Digital gain register */ +static struct crl_dynamic_register_access ar0231at_gl_regs[] = { + { + .address = 0x305E, /* global digital gain */ + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0x07ff, + }, +}; + +/* + * Exposure mode: + * 0: Linear mode + * 1: 2-HDR mode + * 2: 3-HDR mode + * 3: 4-HDR mode + */ +static struct crl_dynamic_register_access ar0231at_exposure_mode_regs[] = { + { + .address = 0x3082, + .len = CRL_REG_LEN_16BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ar0231at_ls2_ops), + .ops = ar0231at_ls2_ops, + .mask = 0x000C, + }, +}; + +/* + * Exposure Ratio in HDR mode + * 0x8000: + * Select exposure ratio mode or + * configure exposure time for each x-HDR individually. + * 0x0222: + * Selected exposure ratio mode and each ratio is 4x. + * The ratio also can be 2x, 8x, 16x + */ +static struct crl_dynamic_register_access ar0231at_hdr_exposure_ratio_regs[] = { + { + .address = 0x3238, + .len = CRL_REG_LEN_16BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = 0, + .ops = 0, + .mask = 0x8777, + }, +}; + +/* t1 exposure register, also used in linear(non-HDR) mode */ +static struct crl_dynamic_register_access ar0231at_t1expotime_regs[] = { + { + .address = 0x3012, /* coarse integration time T1 */ + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* t2 exposure register, only used in HDR mode */ +static struct crl_dynamic_register_access ar0231at_t2expotime_regs[] = { + { + .address = 0x3212, /* coarse integration time T2 */ + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* t3 exposure register, only used in HDR mode */ +static struct crl_dynamic_register_access ar0231at_t3expotime_regs[] = { + { + .address = 0x3216, /* coarse integration time T3 */ + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* t4 exposure register, only used in HDR mode */ +static struct crl_dynamic_register_access ar0231at_t4expotime_regs[] = { + { + .address = 0x321A, /* coarse integration time T4 */ + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access ar0231at_test_pattern_regs[] = { + { + .address = 0x3070, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static const char * const ar0231at_test_patterns[] = { + "Disabled", + "Solid Color", + "100% Vertical Color Bar", +}; + +struct crl_ctrl_data ar0231at_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = ICI_EXT_SD_PARAM_ID_LINK_FREQ, + .name = "CTRL_ID_LINK_FREQ", + .type = CRL_CTRL_TYPE_MENU_INT, + .data.int_menu.def = 0, + .data.int_menu.max = 0, + .data.int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_PIXEL_RATE, + .name = "CTRL_ID_PIXEL_RATE_PA", + .type = CRL_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_PIXEL_RATE, + .name = "CTRL_ID_PIXEL_RATE_CSI", + .type = CRL_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1920, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 1978, + .flags = 8, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(ar0231at_llp_regs), + .regs = ar0231at_llp_regs, + .dep_items = 0, + .dep_ctrls = 0, + .type = CRL_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_FRAME_LENGTH_LINES, + .name = "Frame Length Lines", + .type = CRL_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1088, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 1480, + .flags = 8, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(ar0231at_fll_regs), + .regs = ar0231at_fll_regs, + .dep_items = 0, + .dep_ctrls = 0, + .type = CRL_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_ANALOGUE_GAIN, + .name = "CTRL_ID_ANALOGUE_GAIN", + .type = CRL_CTRL_TYPE_INTEGER, + .data.std_data.min = 0x0000, + .data.std_data.max = 0xFFFF, + .data.std_data.step = 1, + .data.std_data.def = 0xAAAA, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(ar0231at_ana_gain_regs), + .regs = ar0231at_ana_gain_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_GAIN, + .name = "Digital Gain", + .type = CRL_CTRL_TYPE_INTEGER, + .data.std_data.min = 0x0080, + .data.std_data.max = 0x07FF, + .data.std_data.step = 1, + .data.std_data.def = 0x0080, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(ar0231at_gl_regs), + .regs = ar0231at_gl_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_EXPOSURE, + .name = "CRL_CTRL_EXPOSURE_MODE", + .type = CRL_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = ARRAY_SIZE(ar0231at_ctrl_data_modes)-1, + .data.std_data.step = 1, + .data.std_data.def = 0x0, + .flags = 8, + .impact = CRL_IMPACTS_MODE_SELECTION, + .regs_items = ARRAY_SIZE(ar0231at_exposure_mode_regs), + .regs = ar0231at_exposure_mode_regs, + .dep_items = 0, + .dep_ctrls = 0, + .type = CRL_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_HDR_RATIO, + .name = "CRL_CTRL_EXPOSURE_HDR_RATIO", + .type = CRL_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 0x0222, + .flags = 8, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(ar0231at_hdr_exposure_ratio_regs), + .regs = ar0231at_hdr_exposure_ratio_regs, + .dep_items = 0, + .dep_ctrls = 0, + .type = CRL_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_EXPOSURE, + .name = "T1_COARSE_EXPOSURE_TIME", + .type = CRL_CTRL_TYPE_INTEGER, + .data.std_data.min = 0x0002, + .data.std_data.max = 0x04FF, + .data.std_data.step = 1, + .data.std_data.def = 0x0163, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(ar0231at_t1expotime_regs), + .regs = ar0231at_t1expotime_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_EXPOSURE_SHS1, + .name = "T2_COARSE_EXPOSURE_TIME", + .type = CRL_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0x0002, + .data.std_data.max = 0x0300, + .data.std_data.step = 1, + .data.std_data.def = 0x0002, + .flags = CRL_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(ar0231at_t2expotime_regs), + .regs = ar0231at_t2expotime_regs, + .dep_items = 0, + .dep_ctrls = 0, + .type = CRL_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_EXPOSURE_SHS2, + .name = "T3_COARSE_EXPOSURE_TIME", + .type = CRL_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0x0002, + .data.std_data.max = 0x0180, + .data.std_data.step = 1, + .data.std_data.def = 0x0002, + .flags = 8, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(ar0231at_t3expotime_regs), + .regs = ar0231at_t3expotime_regs, + .dep_items = 0, + .dep_ctrls = 0, + .type = CRL_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_EXPOSURE_SHS3, + .name = "T4_COARSE_EXPOSURE_TIME", + .type = CRL_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0x0, + .data.std_data.max = 0x0500, + .data.std_data.step = 1, + .data.std_data.def = 0x0, + .flags = 8, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(ar0231at_t4expotime_regs), + .regs = ar0231at_t4expotime_regs, + .dep_items = 0, + .dep_ctrls = 0, + .type = CRL_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_TEST_PATTERN, + .name = "CTRL_ID_TEST_PATTERN", + .type = CRL_CTRL_TYPE_MENU_ITEMS, + .data.menu_items.menu = ar0231at_test_patterns, + .data.menu_items.size = ARRAY_SIZE(ar0231at_test_patterns)-1, + .flags = 8, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(ar0231at_test_pattern_regs), + .regs = ar0231at_test_pattern_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +struct crl_sensor_detect_config ar0231at_sensor_detect_regset[] = { + { + .reg = { 0x3000, CRL_REG_LEN_16BIT, 0xFFFF }, + .width = 15, + }, +}; + +static struct crl_sensor_limits ar0231at_maxim_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1920, + .y_addr_max = 1088, + .min_frame_length_lines = 240, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 320, + .max_line_length_pixels = 32752, +}; + +struct crl_sensor_configuration ar0231at_crl_configuration = { + .powerup_regs_items = 0, + .powerup_regs = 0, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + .power_items = 0, + .power_entities = 0, + + .pll_config_items = ARRAY_SIZE(ar0231at_pll_configurations), + .pll_configs = ar0231at_pll_configurations, + + .id_reg_items = ARRAY_SIZE(ar0231at_sensor_detect_regset), + .id_regs = ar0231at_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(ar0231at_sensor_subdevs), + .subdevs = ar0231at_sensor_subdevs, + + .modes_items = ARRAY_SIZE(ar0231at_modes), + .modes = ar0231at_modes, + + .csi_fmts_items = ARRAY_SIZE(ar0231at_crl_csi_data_fmt), + .csi_fmts = ar0231at_crl_csi_data_fmt, + + .ctrl_items = ARRAY_SIZE(ar0231at_ctrls), + .ctrl_bank = ar0231at_ctrls, + + .streamon_regs_items = 0, + .streamon_regs = 0, + .streamoff_regs_items = 0, + .streamoff_regs = 0, + + .sensor_limits = &ar0231at_maxim_sensor_limits, + +}; + +#endif /* __CRLMODULE_AR0231AT_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule-lite/crl_magna_configuration_ti964.h b/drivers/media/i2c/crlmodule-lite/crl_magna_configuration_ti964.h new file mode 100644 index 0000000000000..c8d0d7b3550de --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crl_magna_configuration_ti964.h @@ -0,0 +1,297 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __CRLMODULE_MAGNA_TI964_CONFIGURATION_H_ +#define __CRLMODULE_MAGNA_TI964_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +#define TI964_I2C_PHY_ADDR 0x3d + +static struct crl_pll_configuration magna_ti964_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 16, + .pixel_rate_csi = 529000000, + .pixel_rate_pa = 529000000, /* pixel_rate = MIPICLK*2 *4/12 */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 10, + .pixel_rate_csi = 529000000, + .pixel_rate_pa = 529000000, /* pixel_rate = MIPICLK*2 *4/12 */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 20, + .pixel_rate_csi = 529000000, + .pixel_rate_pa = 529000000, /* pixel_rate = MIPICLK*2 *4/12 */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + } +}; + +static struct crl_subdev_rect_rep magna_ti964_1280_720_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 720, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 720, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, +}; + +static struct crl_register_write_rep magna_ti964_powerup_regs[] = { + {0x4c, CRL_REG_LEN_08BIT, 0x1, TI964_I2C_PHY_ADDR}, /* Select RX port 0 */ +}; + +static struct crl_register_write_rep magna_ti964_poweroff_regs[] = { + {0x1, CRL_REG_LEN_08BIT, 0x20, TI964_I2C_PHY_ADDR}, +}; + +static struct crl_mode_rep magna_ti964_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(magna_ti964_1280_720_rects), + .sd_rects = magna_ti964_1280_720_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 720, + .min_llp = 2250, + .min_fll = 1320, + }, +}; + +static struct crl_sensor_subdev_config magna_ti964_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "ti964", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "magna binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "magna pixel array", + } +}; + +static struct crl_sensor_limits magna_ti964_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1280, + .y_addr_max = 720, + .min_frame_length_lines = 240, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 320, + .max_line_length_pixels = 32752, +}; + +static struct crl_csi_data_fmt magna_ti964_crl_csi_data_fmt[] = { + { + .code = ICI_FORMAT_YUYV, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + .bits_per_pixel = 16, + }, + { + .code = ICI_FORMAT_UYVY, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + .bits_per_pixel = 16, + }, +}; + +static struct crl_ctrl_data magna_ti964_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = ICI_EXT_SD_PARAM_ID_LINK_FREQ, + .name = "CTRL_ID_LINK_FREQ", + .type = CRL_CTRL_TYPE_MENU_INT, + .data.int_menu.def = 0, + .data.int_menu.max = 0, + .data.int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_PIXEL_RATE, + .name = "CTRL_ID_PIXEL_RATE_PA", + .type = CRL_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_PIXEL_RATE, + .name = "CTRL_ID_PIXEL_RATE_CSI", + .type = CRL_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +struct crl_register_write_rep magna_ti964_streamon_regs[] = { + {0x1f, CRL_REG_LEN_08BIT, 0x2, TI964_I2C_PHY_ADDR}, + {0x33, CRL_REG_LEN_08BIT, 0x1, TI964_I2C_PHY_ADDR}, + {0x6d, CRL_REG_LEN_08BIT, 0x7f, TI964_I2C_PHY_ADDR}, + {0x7c, CRL_REG_LEN_08BIT, 0x80, TI964_I2C_PHY_ADDR}, + {0x20, CRL_REG_LEN_08BIT, 0xe0, TI964_I2C_PHY_ADDR}, +}; + +struct crl_register_write_rep magna_ti964_streamoff_regs[] = { + {0x6d, CRL_REG_LEN_08BIT, 0x7f, TI964_I2C_PHY_ADDR}, + {0x7c, CRL_REG_LEN_08BIT, 0x81, TI964_I2C_PHY_ADDR}, + {0x20, CRL_REG_LEN_08BIT, 0xf0, TI964_I2C_PHY_ADDR}, +}; + +struct crl_register_write_rep magna_ti964_onetime_init_regs[] = { + {0x8, CRL_REG_LEN_08BIT, 0x1c, TI964_I2C_PHY_ADDR}, + {0xa, CRL_REG_LEN_08BIT, 0x79, TI964_I2C_PHY_ADDR}, + {0xb, CRL_REG_LEN_08BIT, 0x79, TI964_I2C_PHY_ADDR}, + {0xd, CRL_REG_LEN_08BIT, 0xb9, TI964_I2C_PHY_ADDR}, + {0x10, CRL_REG_LEN_08BIT, 0x91, TI964_I2C_PHY_ADDR}, + {0x11, CRL_REG_LEN_08BIT, 0x85, TI964_I2C_PHY_ADDR}, + {0x12, CRL_REG_LEN_08BIT, 0x89, TI964_I2C_PHY_ADDR}, + {0x13, CRL_REG_LEN_08BIT, 0xc1, TI964_I2C_PHY_ADDR}, + {0x17, CRL_REG_LEN_08BIT, 0xe1, TI964_I2C_PHY_ADDR}, + {0x18, CRL_REG_LEN_08BIT, 0x0, TI964_I2C_PHY_ADDR}, /* Disable frame sync. */ + {0x19, CRL_REG_LEN_08BIT, 0x0, TI964_I2C_PHY_ADDR}, /* Frame sync high time. */ + {0x1a, CRL_REG_LEN_08BIT, 0x2, TI964_I2C_PHY_ADDR}, + {0x1b, CRL_REG_LEN_08BIT, 0xa, TI964_I2C_PHY_ADDR}, /* Frame sync low time. */ + {0x1c, CRL_REG_LEN_08BIT, 0xd3, TI964_I2C_PHY_ADDR}, + {0x21, CRL_REG_LEN_08BIT, 0x43, TI964_I2C_PHY_ADDR}, /* Enable best effort mode. */ + {0xb0, CRL_REG_LEN_08BIT, 0x10, TI964_I2C_PHY_ADDR}, + {0xb1, CRL_REG_LEN_08BIT, 0x14, TI964_I2C_PHY_ADDR}, + {0xb2, CRL_REG_LEN_08BIT, 0x1f, TI964_I2C_PHY_ADDR}, + {0xb3, CRL_REG_LEN_08BIT, 0x8, TI964_I2C_PHY_ADDR}, + {0x32, CRL_REG_LEN_08BIT, 0x1, TI964_I2C_PHY_ADDR}, /* Select CSI port 0 */ + {0x4c, CRL_REG_LEN_08BIT, 0x1, TI964_I2C_PHY_ADDR}, /* Select RX port 0 */ + {0x58, CRL_REG_LEN_08BIT, 0x58, TI964_I2C_PHY_ADDR}, + {0x5c, CRL_REG_LEN_08BIT, 0x18, TI964_I2C_PHY_ADDR}, /* TI913 alias addr 0xc */ + {0x6d, CRL_REG_LEN_08BIT, 0x7f, TI964_I2C_PHY_ADDR}, + {0x70, CRL_REG_LEN_08BIT, 0x1e, TI964_I2C_PHY_ADDR}, /* YUV422_8 */ + {0x7c, CRL_REG_LEN_08BIT, 0x81, TI964_I2C_PHY_ADDR}, /* Use RAW10 8bit mode */ + {0xd2, CRL_REG_LEN_08BIT, 0x84, TI964_I2C_PHY_ADDR}, + {0x4c, CRL_REG_LEN_08BIT, 0x12, TI964_I2C_PHY_ADDR}, /* Select RX port 1 */ + {0x58, CRL_REG_LEN_08BIT, 0x58, TI964_I2C_PHY_ADDR}, + {0x5c, CRL_REG_LEN_08BIT, 0x1a, TI964_I2C_PHY_ADDR}, /* TI913 alias addr 0xd */ + {0x6d, CRL_REG_LEN_08BIT, 0x7f, TI964_I2C_PHY_ADDR}, + {0x70, CRL_REG_LEN_08BIT, 0x5e, TI964_I2C_PHY_ADDR}, /* YUV422_8 */ + {0x7c, CRL_REG_LEN_08BIT, 0x81, TI964_I2C_PHY_ADDR}, /* Use RAW10 8bit mode */ + {0xd2, CRL_REG_LEN_08BIT, 0x84, TI964_I2C_PHY_ADDR}, + {0x4c, CRL_REG_LEN_08BIT, 0x24, TI964_I2C_PHY_ADDR}, /* Select RX port 2*/ + {0x58, CRL_REG_LEN_08BIT, 0x58, TI964_I2C_PHY_ADDR}, + {0x5c, CRL_REG_LEN_08BIT, 0x1c, TI964_I2C_PHY_ADDR}, /* TI913 alias addr 0xe */ + {0x6d, CRL_REG_LEN_08BIT, 0x7f, TI964_I2C_PHY_ADDR}, + {0x70, CRL_REG_LEN_08BIT, 0x9e, TI964_I2C_PHY_ADDR}, /* YUV422_8 */ + {0x7c, CRL_REG_LEN_08BIT, 0x81, TI964_I2C_PHY_ADDR}, /* Use RAW10 8bit mode */ + {0xd2, CRL_REG_LEN_08BIT, 0x84, TI964_I2C_PHY_ADDR}, + {0x4c, CRL_REG_LEN_08BIT, 0x38, TI964_I2C_PHY_ADDR}, /* Select RX port3 */ + {0x58, CRL_REG_LEN_08BIT, 0x58, TI964_I2C_PHY_ADDR}, + {0x5c, CRL_REG_LEN_08BIT, 0x1e, TI964_I2C_PHY_ADDR}, /* TI913 alias addr 0xf */ + {0x6d, CRL_REG_LEN_08BIT, 0x7f, TI964_I2C_PHY_ADDR}, + {0x70, CRL_REG_LEN_08BIT, 0xde, TI964_I2C_PHY_ADDR}, /* YUV422_8 */ + {0x7c, CRL_REG_LEN_08BIT, 0x81, TI964_I2C_PHY_ADDR}, /* Use RAW10 8bit mode */ + {0xd2, CRL_REG_LEN_08BIT, 0x84, TI964_I2C_PHY_ADDR}, + {0x6e, CRL_REG_LEN_08BIT, 0x89, TI964_I2C_PHY_ADDR}, +}; + +struct crl_sensor_configuration magna_ti964_crl_configuration = { + + .powerup_regs_items = ARRAY_SIZE(magna_ti964_powerup_regs), + .powerup_regs = magna_ti964_powerup_regs, + + .poweroff_regs_items = ARRAY_SIZE(magna_ti964_poweroff_regs), + .poweroff_regs = magna_ti964_poweroff_regs, + + .onetime_init_regs_items = ARRAY_SIZE(magna_ti964_onetime_init_regs), + .onetime_init_regs = magna_ti964_onetime_init_regs, + + .subdev_items = ARRAY_SIZE(magna_ti964_subdevs), + .subdevs = magna_ti964_subdevs, + + .pll_config_items = ARRAY_SIZE(magna_ti964_pll_configurations), + .pll_configs = magna_ti964_pll_configurations, + + .sensor_limits = &magna_ti964_limits, + + .modes_items = ARRAY_SIZE(magna_ti964_modes), + .modes = magna_ti964_modes, + + .streamon_regs_items = ARRAY_SIZE(magna_ti964_streamon_regs), + .streamon_regs = magna_ti964_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(magna_ti964_streamoff_regs), + .streamoff_regs = magna_ti964_streamoff_regs, + + .ctrl_items = ARRAY_SIZE(magna_ti964_ctrls), + .ctrl_bank = magna_ti964_ctrls, + + .csi_fmts_items = ARRAY_SIZE(magna_ti964_crl_csi_data_fmt), + .csi_fmts = magna_ti964_crl_csi_data_fmt, + + .addr_len = CRL_ADDR_8BIT, +}; + +#endif /* __CRLMODULE_MAGNA_TI964_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule-lite/crl_ov10635_configuration.h b/drivers/media/i2c/crlmodule-lite/crl_ov10635_configuration.h new file mode 100644 index 0000000000000..012fdee465603 --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crl_ov10635_configuration.h @@ -0,0 +1,6360 @@ +/* + * Copyright (c) 2016--2017 Intel Corporation. + * + * Author: Yunliang Ding + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __CRLMODULE_OV10635_CONFIGURATION_H_ +#define __CRLMODULE_OV10635_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +#define OV10635_REG_RESET 0x0103 + +static struct crl_register_write_rep ov10635_1280_800_YUV_HDR[] = { + {0x301b, CRL_REG_LEN_08BIT, 0xff}, + {0x301c, CRL_REG_LEN_08BIT, 0xff}, + {0x301a, CRL_REG_LEN_08BIT, 0xff}, + {0x3011, CRL_REG_LEN_08BIT, 0x42}, + {0x6900, CRL_REG_LEN_08BIT, 0x0c}, + {0x6901, CRL_REG_LEN_08BIT, 0x11}, + {0x3503, CRL_REG_LEN_08BIT, 0x10}, + {0x3025, CRL_REG_LEN_08BIT, 0x03}, + {0x3003, CRL_REG_LEN_08BIT, 0x20}, + {0x3004, CRL_REG_LEN_08BIT, 0x21}, + {0x3005, CRL_REG_LEN_08BIT, 0x20}, + {0x3006, CRL_REG_LEN_08BIT, 0x91}, + {0x3600, CRL_REG_LEN_08BIT, 0x74}, + {0x3601, CRL_REG_LEN_08BIT, 0x2b}, + {0x3612, CRL_REG_LEN_08BIT, 0x00}, + {0x3611, CRL_REG_LEN_08BIT, 0x67}, + {0x3633, CRL_REG_LEN_08BIT, 0xca}, + {0x3602, CRL_REG_LEN_08BIT, 0x2f}, + {0x3603, CRL_REG_LEN_08BIT, 0x00}, + {0x3630, CRL_REG_LEN_08BIT, 0x28}, + {0x3631, CRL_REG_LEN_08BIT, 0x16}, + {0x3714, CRL_REG_LEN_08BIT, 0x10}, + {0x371d, CRL_REG_LEN_08BIT, 0x01}, + {0x3007, CRL_REG_LEN_08BIT, 0x01}, + {0x3024, CRL_REG_LEN_08BIT, 0x01}, + {0x3020, CRL_REG_LEN_08BIT, 0x0b}, + {0x3702, CRL_REG_LEN_08BIT, 0x20}, + {0x3703, CRL_REG_LEN_08BIT, 0x48}, + {0x3704, CRL_REG_LEN_08BIT, 0x32}, + {0x3709, CRL_REG_LEN_08BIT, 0xa8}, + {0x3709, CRL_REG_LEN_08BIT, 0xa8}, + {0x370c, CRL_REG_LEN_08BIT, 0xc7}, + {0x370d, CRL_REG_LEN_08BIT, 0x80}, + {0x3712, CRL_REG_LEN_08BIT, 0x00}, + {0x3713, CRL_REG_LEN_08BIT, 0x20}, + {0x3715, CRL_REG_LEN_08BIT, 0x04}, + {0x381d, CRL_REG_LEN_08BIT, 0x40}, + {0x381c, CRL_REG_LEN_08BIT, 0x00}, + {0x3822, CRL_REG_LEN_08BIT, 0x50}, + {0x3824, CRL_REG_LEN_08BIT, 0x50}, + {0x3815, CRL_REG_LEN_08BIT, 0x8c}, + {0x3804, CRL_REG_LEN_08BIT, 0x05}, + {0x3805, CRL_REG_LEN_08BIT, 0x1f}, + {0x3800, CRL_REG_LEN_08BIT, 0x00}, + {0x3801, CRL_REG_LEN_08BIT, 0x00}, + {0x3806, CRL_REG_LEN_08BIT, 0x03}, + {0x3807, CRL_REG_LEN_08BIT, 0x29}, + {0x3802, CRL_REG_LEN_08BIT, 0x00}, + {0x3803, CRL_REG_LEN_08BIT, 0x04}, + {0x3808, CRL_REG_LEN_08BIT, 0x05}, + {0x3809, CRL_REG_LEN_08BIT, 0x00}, + {0x380a, CRL_REG_LEN_08BIT, 0x03}, + {0x380b, CRL_REG_LEN_08BIT, 0x20}, + {0x380c, CRL_REG_LEN_08BIT, 0x07}, + {0x380d, CRL_REG_LEN_08BIT, 0x71}, + {0x6e42, CRL_REG_LEN_08BIT, 0x03}, + {0x6e43, CRL_REG_LEN_08BIT, 0x48}, + {0x380e, CRL_REG_LEN_08BIT, 0x03}, + {0x380f, CRL_REG_LEN_08BIT, 0x48}, + {0x3813, CRL_REG_LEN_08BIT, 0x02}, + {0x3811, CRL_REG_LEN_08BIT, 0x10}, + {0x381f, CRL_REG_LEN_08BIT, 0x0c}, + {0x3828, CRL_REG_LEN_08BIT, 0x03}, + {0x3829, CRL_REG_LEN_08BIT, 0x10}, + {0x382a, CRL_REG_LEN_08BIT, 0x10}, + {0x382b, CRL_REG_LEN_08BIT, 0x10}, + {0x3621, CRL_REG_LEN_08BIT, 0x64}, + {0x5005, CRL_REG_LEN_08BIT, 0x08}, + {0x56d5, CRL_REG_LEN_08BIT, 0x00}, + {0x56d6, CRL_REG_LEN_08BIT, 0x80}, + {0x56d7, CRL_REG_LEN_08BIT, 0x00}, + {0x56d8, CRL_REG_LEN_08BIT, 0x00}, + {0x56d9, CRL_REG_LEN_08BIT, 0x00}, + {0x56da, CRL_REG_LEN_08BIT, 0x80}, + {0x56db, CRL_REG_LEN_08BIT, 0x00}, + {0x56dc, CRL_REG_LEN_08BIT, 0x00}, + {0x56e8, CRL_REG_LEN_08BIT, 0x00}, + {0x56e9, CRL_REG_LEN_08BIT, 0x7f}, + {0x56ea, CRL_REG_LEN_08BIT, 0x00}, + {0x56eb, CRL_REG_LEN_08BIT, 0x7f}, + {0x5100, CRL_REG_LEN_08BIT, 0x00}, + {0x5101, CRL_REG_LEN_08BIT, 0x80}, + {0x5102, CRL_REG_LEN_08BIT, 0x00}, + {0x5103, CRL_REG_LEN_08BIT, 0x80}, + {0x5104, CRL_REG_LEN_08BIT, 0x00}, + {0x5105, CRL_REG_LEN_08BIT, 0x80}, + {0x5106, CRL_REG_LEN_08BIT, 0x00}, + {0x5107, CRL_REG_LEN_08BIT, 0x80}, + {0x5108, CRL_REG_LEN_08BIT, 0x00}, + {0x5109, CRL_REG_LEN_08BIT, 0x00}, + {0x510a, CRL_REG_LEN_08BIT, 0x00}, + {0x510b, CRL_REG_LEN_08BIT, 0x00}, + {0x510c, CRL_REG_LEN_08BIT, 0x00}, + {0x510d, CRL_REG_LEN_08BIT, 0x00}, + {0x510e, CRL_REG_LEN_08BIT, 0x00}, + {0x510f, CRL_REG_LEN_08BIT, 0x00}, + {0x5110, CRL_REG_LEN_08BIT, 0x00}, + {0x5111, CRL_REG_LEN_08BIT, 0x80}, + {0x5112, CRL_REG_LEN_08BIT, 0x00}, + {0x5113, CRL_REG_LEN_08BIT, 0x80}, + {0x5114, CRL_REG_LEN_08BIT, 0x00}, + {0x5115, CRL_REG_LEN_08BIT, 0x80}, + {0x5116, CRL_REG_LEN_08BIT, 0x00}, + {0x5117, CRL_REG_LEN_08BIT, 0x80}, + {0x5118, CRL_REG_LEN_08BIT, 0x00}, + {0x5119, CRL_REG_LEN_08BIT, 0x00}, + {0x511a, CRL_REG_LEN_08BIT, 0x00}, + {0x511b, CRL_REG_LEN_08BIT, 0x00}, + {0x511c, CRL_REG_LEN_08BIT, 0x00}, + {0x511d, CRL_REG_LEN_08BIT, 0x00}, + {0x511e, CRL_REG_LEN_08BIT, 0x00}, + {0x511f, CRL_REG_LEN_08BIT, 0x00}, + {0x56d0, CRL_REG_LEN_08BIT, 0x00}, + {0x5006, CRL_REG_LEN_08BIT, 0x24}, + {0x5608, CRL_REG_LEN_08BIT, 0x0d}, + {0x52d7, CRL_REG_LEN_08BIT, 0x06}, + {0x528d, CRL_REG_LEN_08BIT, 0x08}, + {0x5293, CRL_REG_LEN_08BIT, 0x12}, + {0x52d3, CRL_REG_LEN_08BIT, 0x12}, + {0x5288, CRL_REG_LEN_08BIT, 0x06}, + {0x5289, CRL_REG_LEN_08BIT, 0x20}, + {0x52c8, CRL_REG_LEN_08BIT, 0x06}, + {0x52c9, CRL_REG_LEN_08BIT, 0x20}, + {0x52cd, CRL_REG_LEN_08BIT, 0x04}, + {0x5381, CRL_REG_LEN_08BIT, 0x00}, + {0x5382, CRL_REG_LEN_08BIT, 0xff}, + {0x5589, CRL_REG_LEN_08BIT, 0x76}, + {0x558a, CRL_REG_LEN_08BIT, 0x47}, + {0x558b, CRL_REG_LEN_08BIT, 0xef}, + {0x558c, CRL_REG_LEN_08BIT, 0xc9}, + {0x558d, CRL_REG_LEN_08BIT, 0x49}, + {0x558e, CRL_REG_LEN_08BIT, 0x30}, + {0x558f, CRL_REG_LEN_08BIT, 0x67}, + {0x5590, CRL_REG_LEN_08BIT, 0x3f}, + {0x5591, CRL_REG_LEN_08BIT, 0xf0}, + {0x5592, CRL_REG_LEN_08BIT, 0x10}, + {0x55a2, CRL_REG_LEN_08BIT, 0x6d}, + {0x55a3, CRL_REG_LEN_08BIT, 0x55}, + {0x55a4, CRL_REG_LEN_08BIT, 0xc3}, + {0x55a5, CRL_REG_LEN_08BIT, 0xb5}, + {0x55a6, CRL_REG_LEN_08BIT, 0x43}, + {0x55a7, CRL_REG_LEN_08BIT, 0x38}, + {0x55a8, CRL_REG_LEN_08BIT, 0x5f}, + {0x55a9, CRL_REG_LEN_08BIT, 0x4b}, + {0x55aa, CRL_REG_LEN_08BIT, 0xf0}, + {0x55ab, CRL_REG_LEN_08BIT, 0x10}, + {0x5581, CRL_REG_LEN_08BIT, 0x52}, + {0x5300, CRL_REG_LEN_08BIT, 0x01}, + {0x5301, CRL_REG_LEN_08BIT, 0x00}, + {0x5302, CRL_REG_LEN_08BIT, 0x00}, + {0x5303, CRL_REG_LEN_08BIT, 0x0e}, + {0x5304, CRL_REG_LEN_08BIT, 0x00}, + {0x5305, CRL_REG_LEN_08BIT, 0x0e}, + {0x5306, CRL_REG_LEN_08BIT, 0x00}, + {0x5307, CRL_REG_LEN_08BIT, 0x36}, + {0x5308, CRL_REG_LEN_08BIT, 0x00}, + {0x5309, CRL_REG_LEN_08BIT, 0xd9}, + {0x530a, CRL_REG_LEN_08BIT, 0x00}, + {0x530b, CRL_REG_LEN_08BIT, 0x0f}, + {0x530c, CRL_REG_LEN_08BIT, 0x00}, + {0x530d, CRL_REG_LEN_08BIT, 0x2c}, + {0x530e, CRL_REG_LEN_08BIT, 0x00}, + {0x530f, CRL_REG_LEN_08BIT, 0x59}, + {0x5310, CRL_REG_LEN_08BIT, 0x00}, + {0x5311, CRL_REG_LEN_08BIT, 0x7b}, + {0x5312, CRL_REG_LEN_08BIT, 0x00}, + {0x5313, CRL_REG_LEN_08BIT, 0x22}, + {0x5314, CRL_REG_LEN_08BIT, 0x00}, + {0x5315, CRL_REG_LEN_08BIT, 0xd5}, + {0x5316, CRL_REG_LEN_08BIT, 0x00}, + {0x5317, CRL_REG_LEN_08BIT, 0x13}, + {0x5318, CRL_REG_LEN_08BIT, 0x00}, + {0x5319, CRL_REG_LEN_08BIT, 0x18}, + {0x531a, CRL_REG_LEN_08BIT, 0x00}, + {0x531b, CRL_REG_LEN_08BIT, 0x26}, + {0x531c, CRL_REG_LEN_08BIT, 0x00}, + {0x531d, CRL_REG_LEN_08BIT, 0xdc}, + {0x531e, CRL_REG_LEN_08BIT, 0x00}, + {0x531f, CRL_REG_LEN_08BIT, 0x02}, + {0x5320, CRL_REG_LEN_08BIT, 0x00}, + {0x5321, CRL_REG_LEN_08BIT, 0x24}, + {0x5322, CRL_REG_LEN_08BIT, 0x00}, + {0x5323, CRL_REG_LEN_08BIT, 0x56}, + {0x5324, CRL_REG_LEN_08BIT, 0x00}, + {0x5325, CRL_REG_LEN_08BIT, 0x85}, + {0x5326, CRL_REG_LEN_08BIT, 0x00}, + {0x5327, CRL_REG_LEN_08BIT, 0x20}, + {0x5609, CRL_REG_LEN_08BIT, 0x01}, + {0x560a, CRL_REG_LEN_08BIT, 0x40}, + {0x560b, CRL_REG_LEN_08BIT, 0x01}, + {0x560c, CRL_REG_LEN_08BIT, 0x40}, + {0x560d, CRL_REG_LEN_08BIT, 0x00}, + {0x560e, CRL_REG_LEN_08BIT, 0xfa}, + {0x560f, CRL_REG_LEN_08BIT, 0x00}, + {0x5610, CRL_REG_LEN_08BIT, 0xfa}, + {0x5611, CRL_REG_LEN_08BIT, 0x02}, + {0x5612, CRL_REG_LEN_08BIT, 0x80}, + {0x5613, CRL_REG_LEN_08BIT, 0x02}, + {0x5614, CRL_REG_LEN_08BIT, 0x80}, + {0x5615, CRL_REG_LEN_08BIT, 0x01}, + {0x5616, CRL_REG_LEN_08BIT, 0x2c}, + {0x5617, CRL_REG_LEN_08BIT, 0x01}, + {0x5618, CRL_REG_LEN_08BIT, 0x2c}, + {0x563b, CRL_REG_LEN_08BIT, 0x01}, + {0x563c, CRL_REG_LEN_08BIT, 0x01}, + {0x563d, CRL_REG_LEN_08BIT, 0x01}, + {0x563e, CRL_REG_LEN_08BIT, 0x01}, + {0x563f, CRL_REG_LEN_08BIT, 0x03}, + {0x5640, CRL_REG_LEN_08BIT, 0x03}, + {0x5641, CRL_REG_LEN_08BIT, 0x03}, + {0x5642, CRL_REG_LEN_08BIT, 0x05}, + {0x5643, CRL_REG_LEN_08BIT, 0x09}, + {0x5644, CRL_REG_LEN_08BIT, 0x05}, + {0x5645, CRL_REG_LEN_08BIT, 0x05}, + {0x5646, CRL_REG_LEN_08BIT, 0x05}, + {0x5647, CRL_REG_LEN_08BIT, 0x05}, + {0x5651, CRL_REG_LEN_08BIT, 0x00}, + {0x5652, CRL_REG_LEN_08BIT, 0x80}, + {0x521a, CRL_REG_LEN_08BIT, 0x01}, + {0x521b, CRL_REG_LEN_08BIT, 0x03}, + {0x521c, CRL_REG_LEN_08BIT, 0x06}, + {0x521d, CRL_REG_LEN_08BIT, 0x0a}, + {0x521e, CRL_REG_LEN_08BIT, 0x0e}, + {0x521f, CRL_REG_LEN_08BIT, 0x12}, + {0x5220, CRL_REG_LEN_08BIT, 0x16}, + {0x5223, CRL_REG_LEN_08BIT, 0x02}, + {0x5225, CRL_REG_LEN_08BIT, 0x04}, + {0x5227, CRL_REG_LEN_08BIT, 0x08}, + {0x5229, CRL_REG_LEN_08BIT, 0x0c}, + {0x522b, CRL_REG_LEN_08BIT, 0x12}, + {0x522d, CRL_REG_LEN_08BIT, 0x18}, + {0x522f, CRL_REG_LEN_08BIT, 0x1e}, + {0x5241, CRL_REG_LEN_08BIT, 0x04}, + {0x5242, CRL_REG_LEN_08BIT, 0x01}, + {0x5243, CRL_REG_LEN_08BIT, 0x03}, + {0x5244, CRL_REG_LEN_08BIT, 0x06}, + {0x5245, CRL_REG_LEN_08BIT, 0x0a}, + {0x5246, CRL_REG_LEN_08BIT, 0x0e}, + {0x5247, CRL_REG_LEN_08BIT, 0x12}, + {0x5248, CRL_REG_LEN_08BIT, 0x16}, + {0x524a, CRL_REG_LEN_08BIT, 0x03}, + {0x524c, CRL_REG_LEN_08BIT, 0x04}, + {0x524e, CRL_REG_LEN_08BIT, 0x08}, + {0x5250, CRL_REG_LEN_08BIT, 0x0c}, + {0x5252, CRL_REG_LEN_08BIT, 0x12}, + {0x5254, CRL_REG_LEN_08BIT, 0x18}, + {0x5256, CRL_REG_LEN_08BIT, 0x1e}, + {0x4606, CRL_REG_LEN_08BIT, 0x07}, + {0x4607, CRL_REG_LEN_08BIT, 0x71}, + {0x460a, CRL_REG_LEN_08BIT, 0x02}, + {0x460b, CRL_REG_LEN_08BIT, 0x70}, + {0x460c, CRL_REG_LEN_08BIT, 0x00}, + {0x4620, CRL_REG_LEN_08BIT, 0x0e}, + {0x4700, CRL_REG_LEN_08BIT, 0x04}, + {0x4701, CRL_REG_LEN_08BIT, 0x00}, + {0x4702, CRL_REG_LEN_08BIT, 0x01}, + {0x4004, CRL_REG_LEN_08BIT, 0x04}, + {0x4005, CRL_REG_LEN_08BIT, 0x18}, + {0x4001, CRL_REG_LEN_08BIT, 0x06}, + {0x4050, CRL_REG_LEN_08BIT, 0x22}, + {0x4051, CRL_REG_LEN_08BIT, 0x24}, + {0x4052, CRL_REG_LEN_08BIT, 0x02}, + {0x4057, CRL_REG_LEN_08BIT, 0x9c}, + {0x405a, CRL_REG_LEN_08BIT, 0x00}, + {0x3832, CRL_REG_LEN_08BIT, 0x00}, + {0x3833, CRL_REG_LEN_08BIT, 0x02}, + {0x3834, CRL_REG_LEN_08BIT, 0x03}, + {0x3835, CRL_REG_LEN_08BIT, 0x48}, + {0x302e, CRL_REG_LEN_08BIT, 0x00}, + {0x4202, CRL_REG_LEN_08BIT, 0x02}, + {0x3023, CRL_REG_LEN_08BIT, 0x10}, + {0x0100, CRL_REG_LEN_08BIT, 0x01}, + {0x0100, CRL_REG_LEN_08BIT, 0x01}, + {0x6f10, CRL_REG_LEN_08BIT, 0x07}, + {0x6f11, CRL_REG_LEN_08BIT, 0x82}, + {0x6f12, CRL_REG_LEN_08BIT, 0x04}, + {0x6f13, CRL_REG_LEN_08BIT, 0x00}, + {0x6f14, CRL_REG_LEN_08BIT, 0x1f}, + {0x6f15, CRL_REG_LEN_08BIT, 0xdd}, + {0x6f16, CRL_REG_LEN_08BIT, 0x04}, + {0x6f17, CRL_REG_LEN_08BIT, 0x04}, + {0x6f18, CRL_REG_LEN_08BIT, 0x36}, + {0x6f19, CRL_REG_LEN_08BIT, 0x66}, + {0x6f1a, CRL_REG_LEN_08BIT, 0x04}, + {0x6f1b, CRL_REG_LEN_08BIT, 0x08}, + {0x6f1c, CRL_REG_LEN_08BIT, 0x0c}, + {0x6f1d, CRL_REG_LEN_08BIT, 0xe7}, + {0x6f1e, CRL_REG_LEN_08BIT, 0x04}, + {0x6f1f, CRL_REG_LEN_08BIT, 0x0c}, + {0xd000, CRL_REG_LEN_08BIT, 0x19}, + {0xd001, CRL_REG_LEN_08BIT, 0xa0}, + {0xd002, CRL_REG_LEN_08BIT, 0x00}, + {0xd003, CRL_REG_LEN_08BIT, 0x01}, + {0xd004, CRL_REG_LEN_08BIT, 0xa9}, + {0xd005, CRL_REG_LEN_08BIT, 0xad}, + {0xd006, CRL_REG_LEN_08BIT, 0x10}, + {0xd007, CRL_REG_LEN_08BIT, 0x40}, + {0xd008, CRL_REG_LEN_08BIT, 0x44}, + {0xd009, CRL_REG_LEN_08BIT, 0x00}, + {0xd00a, CRL_REG_LEN_08BIT, 0x68}, + {0xd00b, CRL_REG_LEN_08BIT, 0x00}, + {0xd00c, CRL_REG_LEN_08BIT, 0x15}, + {0xd00d, CRL_REG_LEN_08BIT, 0x00}, + {0xd00e, CRL_REG_LEN_08BIT, 0x00}, + {0xd00f, CRL_REG_LEN_08BIT, 0x00}, + {0xd010, CRL_REG_LEN_08BIT, 0x19}, + {0xd011, CRL_REG_LEN_08BIT, 0xa0}, + {0xd012, CRL_REG_LEN_08BIT, 0x00}, + {0xd013, CRL_REG_LEN_08BIT, 0x01}, + {0xd014, CRL_REG_LEN_08BIT, 0xa9}, + {0xd015, CRL_REG_LEN_08BIT, 0xad}, + {0xd016, CRL_REG_LEN_08BIT, 0x13}, + {0xd017, CRL_REG_LEN_08BIT, 0xd0}, + {0xd018, CRL_REG_LEN_08BIT, 0x44}, + {0xd019, CRL_REG_LEN_08BIT, 0x00}, + {0xd01a, CRL_REG_LEN_08BIT, 0x68}, + {0xd01b, CRL_REG_LEN_08BIT, 0x00}, + {0xd01c, CRL_REG_LEN_08BIT, 0x15}, + {0xd01d, CRL_REG_LEN_08BIT, 0x00}, + {0xd01e, CRL_REG_LEN_08BIT, 0x00}, + {0xd01f, CRL_REG_LEN_08BIT, 0x00}, + {0xd020, CRL_REG_LEN_08BIT, 0x19}, + {0xd021, CRL_REG_LEN_08BIT, 0xa0}, + {0xd022, CRL_REG_LEN_08BIT, 0x00}, + {0xd023, CRL_REG_LEN_08BIT, 0x01}, + {0xd024, CRL_REG_LEN_08BIT, 0xa9}, + {0xd025, CRL_REG_LEN_08BIT, 0xad}, + {0xd026, CRL_REG_LEN_08BIT, 0x14}, + {0xd027, CRL_REG_LEN_08BIT, 0xb8}, + {0xd028, CRL_REG_LEN_08BIT, 0x44}, + {0xd029, CRL_REG_LEN_08BIT, 0x00}, + {0xd02a, CRL_REG_LEN_08BIT, 0x68}, + {0xd02b, CRL_REG_LEN_08BIT, 0x00}, + {0xd02c, CRL_REG_LEN_08BIT, 0x15}, + {0xd02d, CRL_REG_LEN_08BIT, 0x00}, + {0xd02e, CRL_REG_LEN_08BIT, 0x00}, + {0xd02f, CRL_REG_LEN_08BIT, 0x00}, + {0xd030, CRL_REG_LEN_08BIT, 0x19}, + {0xd031, CRL_REG_LEN_08BIT, 0xa0}, + {0xd032, CRL_REG_LEN_08BIT, 0x00}, + {0xd033, CRL_REG_LEN_08BIT, 0x01}, + {0xd034, CRL_REG_LEN_08BIT, 0xa9}, + {0xd035, CRL_REG_LEN_08BIT, 0xad}, + {0xd036, CRL_REG_LEN_08BIT, 0x14}, + {0xd037, CRL_REG_LEN_08BIT, 0xdc}, + {0xd038, CRL_REG_LEN_08BIT, 0x44}, + {0xd039, CRL_REG_LEN_08BIT, 0x00}, + {0xd03a, CRL_REG_LEN_08BIT, 0x68}, + {0xd03b, CRL_REG_LEN_08BIT, 0x00}, + {0xd03c, CRL_REG_LEN_08BIT, 0x15}, + {0xd03d, CRL_REG_LEN_08BIT, 0x00}, + {0xd03e, CRL_REG_LEN_08BIT, 0x00}, + {0xd03f, CRL_REG_LEN_08BIT, 0x00}, + {0xd040, CRL_REG_LEN_08BIT, 0x9c}, + {0xd041, CRL_REG_LEN_08BIT, 0x21}, + {0xd042, CRL_REG_LEN_08BIT, 0xff}, + {0xd043, CRL_REG_LEN_08BIT, 0xe4}, + {0xd044, CRL_REG_LEN_08BIT, 0xd4}, + {0xd045, CRL_REG_LEN_08BIT, 0x01}, + {0xd046, CRL_REG_LEN_08BIT, 0x48}, + {0xd047, CRL_REG_LEN_08BIT, 0x00}, + {0xd048, CRL_REG_LEN_08BIT, 0xd4}, + {0xd049, CRL_REG_LEN_08BIT, 0x01}, + {0xd04a, CRL_REG_LEN_08BIT, 0x50}, + {0xd04b, CRL_REG_LEN_08BIT, 0x04}, + {0xd04c, CRL_REG_LEN_08BIT, 0xd4}, + {0xd04d, CRL_REG_LEN_08BIT, 0x01}, + {0xd04e, CRL_REG_LEN_08BIT, 0x60}, + {0xd04f, CRL_REG_LEN_08BIT, 0x08}, + {0xd050, CRL_REG_LEN_08BIT, 0xd4}, + {0xd051, CRL_REG_LEN_08BIT, 0x01}, + {0xd052, CRL_REG_LEN_08BIT, 0x70}, + {0xd053, CRL_REG_LEN_08BIT, 0x0c}, + {0xd054, CRL_REG_LEN_08BIT, 0xd4}, + {0xd055, CRL_REG_LEN_08BIT, 0x01}, + {0xd056, CRL_REG_LEN_08BIT, 0x80}, + {0xd057, CRL_REG_LEN_08BIT, 0x10}, + {0xd058, CRL_REG_LEN_08BIT, 0x19}, + {0xd059, CRL_REG_LEN_08BIT, 0xc0}, + {0xd05a, CRL_REG_LEN_08BIT, 0x00}, + {0xd05b, CRL_REG_LEN_08BIT, 0x01}, + {0xd05c, CRL_REG_LEN_08BIT, 0xa9}, + {0xd05d, CRL_REG_LEN_08BIT, 0xce}, + {0xd05e, CRL_REG_LEN_08BIT, 0x02}, + {0xd05f, CRL_REG_LEN_08BIT, 0xa4}, + {0xd060, CRL_REG_LEN_08BIT, 0x9c}, + {0xd061, CRL_REG_LEN_08BIT, 0xa0}, + {0xd062, CRL_REG_LEN_08BIT, 0x00}, + {0xd063, CRL_REG_LEN_08BIT, 0x00}, + {0xd064, CRL_REG_LEN_08BIT, 0x84}, + {0xd065, CRL_REG_LEN_08BIT, 0x6e}, + {0xd066, CRL_REG_LEN_08BIT, 0x00}, + {0xd067, CRL_REG_LEN_08BIT, 0x00}, + {0xd068, CRL_REG_LEN_08BIT, 0xd8}, + {0xd069, CRL_REG_LEN_08BIT, 0x03}, + {0xd06a, CRL_REG_LEN_08BIT, 0x28}, + {0xd06b, CRL_REG_LEN_08BIT, 0x76}, + {0xd06c, CRL_REG_LEN_08BIT, 0x1a}, + {0xd06d, CRL_REG_LEN_08BIT, 0x00}, + {0xd06e, CRL_REG_LEN_08BIT, 0x00}, + {0xd06f, CRL_REG_LEN_08BIT, 0x01}, + {0xd070, CRL_REG_LEN_08BIT, 0xaa}, + {0xd071, CRL_REG_LEN_08BIT, 0x10}, + {0xd072, CRL_REG_LEN_08BIT, 0x03}, + {0xd073, CRL_REG_LEN_08BIT, 0xf0}, + {0xd074, CRL_REG_LEN_08BIT, 0x18}, + {0xd075, CRL_REG_LEN_08BIT, 0x60}, + {0xd076, CRL_REG_LEN_08BIT, 0x00}, + {0xd077, CRL_REG_LEN_08BIT, 0x01}, + {0xd078, CRL_REG_LEN_08BIT, 0xa8}, + {0xd079, CRL_REG_LEN_08BIT, 0x63}, + {0xd07a, CRL_REG_LEN_08BIT, 0x07}, + {0xd07b, CRL_REG_LEN_08BIT, 0x80}, + {0xd07c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd07d, CRL_REG_LEN_08BIT, 0xa0}, + {0xd07e, CRL_REG_LEN_08BIT, 0x00}, + {0xd07f, CRL_REG_LEN_08BIT, 0x04}, + {0xd080, CRL_REG_LEN_08BIT, 0x18}, + {0xd081, CRL_REG_LEN_08BIT, 0xc0}, + {0xd082, CRL_REG_LEN_08BIT, 0x00}, + {0xd083, CRL_REG_LEN_08BIT, 0x00}, + {0xd084, CRL_REG_LEN_08BIT, 0xa8}, + {0xd085, CRL_REG_LEN_08BIT, 0xc6}, + {0xd086, CRL_REG_LEN_08BIT, 0x00}, + {0xd087, CRL_REG_LEN_08BIT, 0x00}, + {0xd088, CRL_REG_LEN_08BIT, 0x8c}, + {0xd089, CRL_REG_LEN_08BIT, 0x63}, + {0xd08a, CRL_REG_LEN_08BIT, 0x00}, + {0xd08b, CRL_REG_LEN_08BIT, 0x00}, + {0xd08c, CRL_REG_LEN_08BIT, 0xd4}, + {0xd08d, CRL_REG_LEN_08BIT, 0x01}, + {0xd08e, CRL_REG_LEN_08BIT, 0x28}, + {0xd08f, CRL_REG_LEN_08BIT, 0x14}, + {0xd090, CRL_REG_LEN_08BIT, 0xd4}, + {0xd091, CRL_REG_LEN_08BIT, 0x01}, + {0xd092, CRL_REG_LEN_08BIT, 0x30}, + {0xd093, CRL_REG_LEN_08BIT, 0x18}, + {0xd094, CRL_REG_LEN_08BIT, 0x07}, + {0xd095, CRL_REG_LEN_08BIT, 0xff}, + {0xd096, CRL_REG_LEN_08BIT, 0xf8}, + {0xd097, CRL_REG_LEN_08BIT, 0xfd}, + {0xd098, CRL_REG_LEN_08BIT, 0x9c}, + {0xd099, CRL_REG_LEN_08BIT, 0x80}, + {0xd09a, CRL_REG_LEN_08BIT, 0x00}, + {0xd09b, CRL_REG_LEN_08BIT, 0x03}, + {0xd09c, CRL_REG_LEN_08BIT, 0xa5}, + {0xd09d, CRL_REG_LEN_08BIT, 0x6b}, + {0xd09e, CRL_REG_LEN_08BIT, 0x00}, + {0xd09f, CRL_REG_LEN_08BIT, 0xff}, + {0xd0a0, CRL_REG_LEN_08BIT, 0x18}, + {0xd0a1, CRL_REG_LEN_08BIT, 0xc0}, + {0xd0a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd0a3, CRL_REG_LEN_08BIT, 0x01}, + {0xd0a4, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0a5, CRL_REG_LEN_08BIT, 0xc6}, + {0xd0a6, CRL_REG_LEN_08BIT, 0x01}, + {0xd0a7, CRL_REG_LEN_08BIT, 0x02}, + {0xd0a8, CRL_REG_LEN_08BIT, 0xe1}, + {0xd0a9, CRL_REG_LEN_08BIT, 0x6b}, + {0xd0aa, CRL_REG_LEN_08BIT, 0x58}, + {0xd0ab, CRL_REG_LEN_08BIT, 0x00}, + {0xd0ac, CRL_REG_LEN_08BIT, 0x84}, + {0xd0ad, CRL_REG_LEN_08BIT, 0x8e}, + {0xd0ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd0af, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b0, CRL_REG_LEN_08BIT, 0xe1}, + {0xd0b1, CRL_REG_LEN_08BIT, 0x6b}, + {0xd0b2, CRL_REG_LEN_08BIT, 0x30}, + {0xd0b3, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b4, CRL_REG_LEN_08BIT, 0x98}, + {0xd0b5, CRL_REG_LEN_08BIT, 0xb0}, + {0xd0b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b8, CRL_REG_LEN_08BIT, 0x8c}, + {0xd0b9, CRL_REG_LEN_08BIT, 0x64}, + {0xd0ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd0bb, CRL_REG_LEN_08BIT, 0x6e}, + {0xd0bc, CRL_REG_LEN_08BIT, 0xe5}, + {0xd0bd, CRL_REG_LEN_08BIT, 0xa5}, + {0xd0be, CRL_REG_LEN_08BIT, 0x18}, + {0xd0bf, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c0, CRL_REG_LEN_08BIT, 0x10}, + {0xd0c1, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c3, CRL_REG_LEN_08BIT, 0x06}, + {0xd0c4, CRL_REG_LEN_08BIT, 0x95}, + {0xd0c5, CRL_REG_LEN_08BIT, 0x8b}, + {0xd0c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c8, CRL_REG_LEN_08BIT, 0x94}, + {0xd0c9, CRL_REG_LEN_08BIT, 0xa4}, + {0xd0ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd0cb, CRL_REG_LEN_08BIT, 0x70}, + {0xd0cc, CRL_REG_LEN_08BIT, 0xe5}, + {0xd0cd, CRL_REG_LEN_08BIT, 0x65}, + {0xd0ce, CRL_REG_LEN_08BIT, 0x60}, + {0xd0cf, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d0, CRL_REG_LEN_08BIT, 0x0c}, + {0xd0d1, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d2, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d3, CRL_REG_LEN_08BIT, 0x62}, + {0xd0d4, CRL_REG_LEN_08BIT, 0x15}, + {0xd0d5, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d8, CRL_REG_LEN_08BIT, 0x18}, + {0xd0d9, CRL_REG_LEN_08BIT, 0x60}, + {0xd0da, CRL_REG_LEN_08BIT, 0x80}, + {0xd0db, CRL_REG_LEN_08BIT, 0x06}, + {0xd0dc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0dd, CRL_REG_LEN_08BIT, 0x83}, + {0xd0de, CRL_REG_LEN_08BIT, 0x38}, + {0xd0df, CRL_REG_LEN_08BIT, 0x29}, + {0xd0e0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0e1, CRL_REG_LEN_08BIT, 0xe3}, + {0xd0e2, CRL_REG_LEN_08BIT, 0x40}, + {0xd0e3, CRL_REG_LEN_08BIT, 0x08}, + {0xd0e4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd0e5, CRL_REG_LEN_08BIT, 0x84}, + {0xd0e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0e8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0e9, CRL_REG_LEN_08BIT, 0xa3}, + {0xd0ea, CRL_REG_LEN_08BIT, 0x40}, + {0xd0eb, CRL_REG_LEN_08BIT, 0x09}, + {0xd0ec, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0ed, CRL_REG_LEN_08BIT, 0xc3}, + {0xd0ee, CRL_REG_LEN_08BIT, 0x38}, + {0xd0ef, CRL_REG_LEN_08BIT, 0x2a}, + {0xd0f0, CRL_REG_LEN_08BIT, 0xd8}, + {0xd0f1, CRL_REG_LEN_08BIT, 0x07}, + {0xd0f2, CRL_REG_LEN_08BIT, 0x20}, + {0xd0f3, CRL_REG_LEN_08BIT, 0x00}, + {0xd0f4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd0f5, CRL_REG_LEN_08BIT, 0x66}, + {0xd0f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0f8, CRL_REG_LEN_08BIT, 0xd8}, + {0xd0f9, CRL_REG_LEN_08BIT, 0x05}, + {0xd0fa, CRL_REG_LEN_08BIT, 0x18}, + {0xd0fb, CRL_REG_LEN_08BIT, 0x00}, + {0xd0fc, CRL_REG_LEN_08BIT, 0x18}, + {0xd0fd, CRL_REG_LEN_08BIT, 0x60}, + {0xd0fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd0ff, CRL_REG_LEN_08BIT, 0x01}, + {0xd100, CRL_REG_LEN_08BIT, 0x98}, + {0xd101, CRL_REG_LEN_08BIT, 0x90}, + {0xd102, CRL_REG_LEN_08BIT, 0x00}, + {0xd103, CRL_REG_LEN_08BIT, 0x00}, + {0xd104, CRL_REG_LEN_08BIT, 0x84}, + {0xd105, CRL_REG_LEN_08BIT, 0xae}, + {0xd106, CRL_REG_LEN_08BIT, 0x00}, + {0xd107, CRL_REG_LEN_08BIT, 0x00}, + {0xd108, CRL_REG_LEN_08BIT, 0xa8}, + {0xd109, CRL_REG_LEN_08BIT, 0x63}, + {0xd10a, CRL_REG_LEN_08BIT, 0x06}, + {0xd10b, CRL_REG_LEN_08BIT, 0x4c}, + {0xd10c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd10d, CRL_REG_LEN_08BIT, 0xc0}, + {0xd10e, CRL_REG_LEN_08BIT, 0x00}, + {0xd10f, CRL_REG_LEN_08BIT, 0x00}, + {0xd110, CRL_REG_LEN_08BIT, 0xd8}, + {0xd111, CRL_REG_LEN_08BIT, 0x03}, + {0xd112, CRL_REG_LEN_08BIT, 0x30}, + {0xd113, CRL_REG_LEN_08BIT, 0x00}, + {0xd114, CRL_REG_LEN_08BIT, 0x8c}, + {0xd115, CRL_REG_LEN_08BIT, 0x65}, + {0xd116, CRL_REG_LEN_08BIT, 0x00}, + {0xd117, CRL_REG_LEN_08BIT, 0x6e}, + {0xd118, CRL_REG_LEN_08BIT, 0xe5}, + {0xd119, CRL_REG_LEN_08BIT, 0x84}, + {0xd11a, CRL_REG_LEN_08BIT, 0x18}, + {0xd11b, CRL_REG_LEN_08BIT, 0x00}, + {0xd11c, CRL_REG_LEN_08BIT, 0x10}, + {0xd11d, CRL_REG_LEN_08BIT, 0x00}, + {0xd11e, CRL_REG_LEN_08BIT, 0x00}, + {0xd11f, CRL_REG_LEN_08BIT, 0x07}, + {0xd120, CRL_REG_LEN_08BIT, 0x18}, + {0xd121, CRL_REG_LEN_08BIT, 0x80}, + {0xd122, CRL_REG_LEN_08BIT, 0x80}, + {0xd123, CRL_REG_LEN_08BIT, 0x06}, + {0xd124, CRL_REG_LEN_08BIT, 0x94}, + {0xd125, CRL_REG_LEN_08BIT, 0x65}, + {0xd126, CRL_REG_LEN_08BIT, 0x00}, + {0xd127, CRL_REG_LEN_08BIT, 0x70}, + {0xd128, CRL_REG_LEN_08BIT, 0xe5}, + {0xd129, CRL_REG_LEN_08BIT, 0x43}, + {0xd12a, CRL_REG_LEN_08BIT, 0x60}, + {0xd12b, CRL_REG_LEN_08BIT, 0x00}, + {0xd12c, CRL_REG_LEN_08BIT, 0x0c}, + {0xd12d, CRL_REG_LEN_08BIT, 0x00}, + {0xd12e, CRL_REG_LEN_08BIT, 0x00}, + {0xd12f, CRL_REG_LEN_08BIT, 0x3e}, + {0xd130, CRL_REG_LEN_08BIT, 0xa8}, + {0xd131, CRL_REG_LEN_08BIT, 0x64}, + {0xd132, CRL_REG_LEN_08BIT, 0x38}, + {0xd133, CRL_REG_LEN_08BIT, 0x24}, + {0xd134, CRL_REG_LEN_08BIT, 0x18}, + {0xd135, CRL_REG_LEN_08BIT, 0x80}, + {0xd136, CRL_REG_LEN_08BIT, 0x80}, + {0xd137, CRL_REG_LEN_08BIT, 0x06}, + {0xd138, CRL_REG_LEN_08BIT, 0xa8}, + {0xd139, CRL_REG_LEN_08BIT, 0x64}, + {0xd13a, CRL_REG_LEN_08BIT, 0x38}, + {0xd13b, CRL_REG_LEN_08BIT, 0x24}, + {0xd13c, CRL_REG_LEN_08BIT, 0x8c}, + {0xd13d, CRL_REG_LEN_08BIT, 0x63}, + {0xd13e, CRL_REG_LEN_08BIT, 0x00}, + {0xd13f, CRL_REG_LEN_08BIT, 0x00}, + {0xd140, CRL_REG_LEN_08BIT, 0xa4}, + {0xd141, CRL_REG_LEN_08BIT, 0x63}, + {0xd142, CRL_REG_LEN_08BIT, 0x00}, + {0xd143, CRL_REG_LEN_08BIT, 0x40}, + {0xd144, CRL_REG_LEN_08BIT, 0xbc}, + {0xd145, CRL_REG_LEN_08BIT, 0x23}, + {0xd146, CRL_REG_LEN_08BIT, 0x00}, + {0xd147, CRL_REG_LEN_08BIT, 0x00}, + {0xd148, CRL_REG_LEN_08BIT, 0x0c}, + {0xd149, CRL_REG_LEN_08BIT, 0x00}, + {0xd14a, CRL_REG_LEN_08BIT, 0x00}, + {0xd14b, CRL_REG_LEN_08BIT, 0x2a}, + {0xd14c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd14d, CRL_REG_LEN_08BIT, 0x64}, + {0xd14e, CRL_REG_LEN_08BIT, 0x6e}, + {0xd14f, CRL_REG_LEN_08BIT, 0x44}, + {0xd150, CRL_REG_LEN_08BIT, 0x19}, + {0xd151, CRL_REG_LEN_08BIT, 0x00}, + {0xd152, CRL_REG_LEN_08BIT, 0x80}, + {0xd153, CRL_REG_LEN_08BIT, 0x06}, + {0xd154, CRL_REG_LEN_08BIT, 0xa8}, + {0xd155, CRL_REG_LEN_08BIT, 0xe8}, + {0xd156, CRL_REG_LEN_08BIT, 0x3d}, + {0xd157, CRL_REG_LEN_08BIT, 0x05}, + {0xd158, CRL_REG_LEN_08BIT, 0x8c}, + {0xd159, CRL_REG_LEN_08BIT, 0x67}, + {0xd15a, CRL_REG_LEN_08BIT, 0x00}, + {0xd15b, CRL_REG_LEN_08BIT, 0x00}, + {0xd15c, CRL_REG_LEN_08BIT, 0xb8}, + {0xd15d, CRL_REG_LEN_08BIT, 0x63}, + {0xd15e, CRL_REG_LEN_08BIT, 0x00}, + {0xd15f, CRL_REG_LEN_08BIT, 0x18}, + {0xd160, CRL_REG_LEN_08BIT, 0xb8}, + {0xd161, CRL_REG_LEN_08BIT, 0x63}, + {0xd162, CRL_REG_LEN_08BIT, 0x00}, + {0xd163, CRL_REG_LEN_08BIT, 0x98}, + {0xd164, CRL_REG_LEN_08BIT, 0xbc}, + {0xd165, CRL_REG_LEN_08BIT, 0x03}, + {0xd166, CRL_REG_LEN_08BIT, 0x00}, + {0xd167, CRL_REG_LEN_08BIT, 0x00}, + {0xd168, CRL_REG_LEN_08BIT, 0x10}, + {0xd169, CRL_REG_LEN_08BIT, 0x00}, + {0xd16a, CRL_REG_LEN_08BIT, 0x00}, + {0xd16b, CRL_REG_LEN_08BIT, 0x10}, + {0xd16c, CRL_REG_LEN_08BIT, 0xa9}, + {0xd16d, CRL_REG_LEN_08BIT, 0x48}, + {0xd16e, CRL_REG_LEN_08BIT, 0x67}, + {0xd16f, CRL_REG_LEN_08BIT, 0x02}, + {0xd170, CRL_REG_LEN_08BIT, 0xb8}, + {0xd171, CRL_REG_LEN_08BIT, 0xa3}, + {0xd172, CRL_REG_LEN_08BIT, 0x00}, + {0xd173, CRL_REG_LEN_08BIT, 0x19}, + {0xd174, CRL_REG_LEN_08BIT, 0x8c}, + {0xd175, CRL_REG_LEN_08BIT, 0x8a}, + {0xd176, CRL_REG_LEN_08BIT, 0x00}, + {0xd177, CRL_REG_LEN_08BIT, 0x00}, + {0xd178, CRL_REG_LEN_08BIT, 0xa9}, + {0xd179, CRL_REG_LEN_08BIT, 0x68}, + {0xd17a, CRL_REG_LEN_08BIT, 0x67}, + {0xd17b, CRL_REG_LEN_08BIT, 0x03}, + {0xd17c, CRL_REG_LEN_08BIT, 0xb8}, + {0xd17d, CRL_REG_LEN_08BIT, 0xc4}, + {0xd17e, CRL_REG_LEN_08BIT, 0x00}, + {0xd17f, CRL_REG_LEN_08BIT, 0x08}, + {0xd180, CRL_REG_LEN_08BIT, 0x8c}, + {0xd181, CRL_REG_LEN_08BIT, 0x6b}, + {0xd182, CRL_REG_LEN_08BIT, 0x00}, + {0xd183, CRL_REG_LEN_08BIT, 0x00}, + {0xd184, CRL_REG_LEN_08BIT, 0xb8}, + {0xd185, CRL_REG_LEN_08BIT, 0x85}, + {0xd186, CRL_REG_LEN_08BIT, 0x00}, + {0xd187, CRL_REG_LEN_08BIT, 0x98}, + {0xd188, CRL_REG_LEN_08BIT, 0xe0}, + {0xd189, CRL_REG_LEN_08BIT, 0x63}, + {0xd18a, CRL_REG_LEN_08BIT, 0x30}, + {0xd18b, CRL_REG_LEN_08BIT, 0x04}, + {0xd18c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd18d, CRL_REG_LEN_08BIT, 0x64}, + {0xd18e, CRL_REG_LEN_08BIT, 0x18}, + {0xd18f, CRL_REG_LEN_08BIT, 0x00}, + {0xd190, CRL_REG_LEN_08BIT, 0xa4}, + {0xd191, CRL_REG_LEN_08BIT, 0x83}, + {0xd192, CRL_REG_LEN_08BIT, 0xff}, + {0xd193, CRL_REG_LEN_08BIT, 0xff}, + {0xd194, CRL_REG_LEN_08BIT, 0xb8}, + {0xd195, CRL_REG_LEN_08BIT, 0x64}, + {0xd196, CRL_REG_LEN_08BIT, 0x00}, + {0xd197, CRL_REG_LEN_08BIT, 0x48}, + {0xd198, CRL_REG_LEN_08BIT, 0xd8}, + {0xd199, CRL_REG_LEN_08BIT, 0x0a}, + {0xd19a, CRL_REG_LEN_08BIT, 0x18}, + {0xd19b, CRL_REG_LEN_08BIT, 0x00}, + {0xd19c, CRL_REG_LEN_08BIT, 0xd8}, + {0xd19d, CRL_REG_LEN_08BIT, 0x0b}, + {0xd19e, CRL_REG_LEN_08BIT, 0x20}, + {0xd19f, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a0, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1a1, CRL_REG_LEN_08BIT, 0x60}, + {0xd1a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a3, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a4, CRL_REG_LEN_08BIT, 0xd8}, + {0xd1a5, CRL_REG_LEN_08BIT, 0x07}, + {0xd1a6, CRL_REG_LEN_08BIT, 0x18}, + {0xd1a7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1a9, CRL_REG_LEN_08BIT, 0x68}, + {0xd1aa, CRL_REG_LEN_08BIT, 0x38}, + {0xd1ab, CRL_REG_LEN_08BIT, 0x22}, + {0xd1ac, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1ad, CRL_REG_LEN_08BIT, 0x80}, + {0xd1ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd1af, CRL_REG_LEN_08BIT, 0x70}, + {0xd1b0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1b1, CRL_REG_LEN_08BIT, 0xe8}, + {0xd1b2, CRL_REG_LEN_08BIT, 0x38}, + {0xd1b3, CRL_REG_LEN_08BIT, 0x43}, + {0xd1b4, CRL_REG_LEN_08BIT, 0xd8}, + {0xd1b5, CRL_REG_LEN_08BIT, 0x03}, + {0xd1b6, CRL_REG_LEN_08BIT, 0x20}, + {0xd1b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1b8, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1b9, CRL_REG_LEN_08BIT, 0xa0}, + {0xd1ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd1bb, CRL_REG_LEN_08BIT, 0x00}, + {0xd1bc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1bd, CRL_REG_LEN_08BIT, 0xc8}, + {0xd1be, CRL_REG_LEN_08BIT, 0x38}, + {0xd1bf, CRL_REG_LEN_08BIT, 0x42}, + {0xd1c0, CRL_REG_LEN_08BIT, 0x8c}, + {0xd1c1, CRL_REG_LEN_08BIT, 0x66}, + {0xd1c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1c3, CRL_REG_LEN_08BIT, 0x00}, + {0xd1c4, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1c5, CRL_REG_LEN_08BIT, 0xa5}, + {0xd1c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd1c7, CRL_REG_LEN_08BIT, 0x01}, + {0xd1c8, CRL_REG_LEN_08BIT, 0xb8}, + {0xd1c9, CRL_REG_LEN_08BIT, 0x83}, + {0xd1ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd1cb, CRL_REG_LEN_08BIT, 0x08}, + {0xd1cc, CRL_REG_LEN_08BIT, 0xa4}, + {0xd1cd, CRL_REG_LEN_08BIT, 0xa5}, + {0xd1ce, CRL_REG_LEN_08BIT, 0x00}, + {0xd1cf, CRL_REG_LEN_08BIT, 0xff}, + {0xd1d0, CRL_REG_LEN_08BIT, 0x8c}, + {0xd1d1, CRL_REG_LEN_08BIT, 0x67}, + {0xd1d2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1d3, CRL_REG_LEN_08BIT, 0x00}, + {0xd1d4, CRL_REG_LEN_08BIT, 0xe0}, + {0xd1d5, CRL_REG_LEN_08BIT, 0x63}, + {0xd1d6, CRL_REG_LEN_08BIT, 0x20}, + {0xd1d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1d8, CRL_REG_LEN_08BIT, 0xa4}, + {0xd1d9, CRL_REG_LEN_08BIT, 0x63}, + {0xd1da, CRL_REG_LEN_08BIT, 0xff}, + {0xd1db, CRL_REG_LEN_08BIT, 0xff}, + {0xd1dc, CRL_REG_LEN_08BIT, 0xbc}, + {0xd1dd, CRL_REG_LEN_08BIT, 0x43}, + {0xd1de, CRL_REG_LEN_08BIT, 0x00}, + {0xd1df, CRL_REG_LEN_08BIT, 0x07}, + {0xd1e0, CRL_REG_LEN_08BIT, 0x0c}, + {0xd1e1, CRL_REG_LEN_08BIT, 0x00}, + {0xd1e2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1e3, CRL_REG_LEN_08BIT, 0x5b}, + {0xd1e4, CRL_REG_LEN_08BIT, 0xbc}, + {0xd1e5, CRL_REG_LEN_08BIT, 0x05}, + {0xd1e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd1e7, CRL_REG_LEN_08BIT, 0x02}, + {0xd1e8, CRL_REG_LEN_08BIT, 0x03}, + {0xd1e9, CRL_REG_LEN_08BIT, 0xff}, + {0xd1ea, CRL_REG_LEN_08BIT, 0xff}, + {0xd1eb, CRL_REG_LEN_08BIT, 0xf6}, + {0xd1ec, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1ed, CRL_REG_LEN_08BIT, 0xa0}, + {0xd1ee, CRL_REG_LEN_08BIT, 0x00}, + {0xd1ef, CRL_REG_LEN_08BIT, 0x00}, + {0xd1f0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1f1, CRL_REG_LEN_08BIT, 0xa4}, + {0xd1f2, CRL_REG_LEN_08BIT, 0x55}, + {0xd1f3, CRL_REG_LEN_08BIT, 0x86}, + {0xd1f4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd1f5, CRL_REG_LEN_08BIT, 0x63}, + {0xd1f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd1f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1f8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1f9, CRL_REG_LEN_08BIT, 0xc4}, + {0xd1fa, CRL_REG_LEN_08BIT, 0x6e}, + {0xd1fb, CRL_REG_LEN_08BIT, 0x45}, + {0xd1fc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1fd, CRL_REG_LEN_08BIT, 0xe4}, + {0xd1fe, CRL_REG_LEN_08BIT, 0x55}, + {0xd1ff, CRL_REG_LEN_08BIT, 0x87}, + {0xd200, CRL_REG_LEN_08BIT, 0xd8}, + {0xd201, CRL_REG_LEN_08BIT, 0x05}, + {0xd202, CRL_REG_LEN_08BIT, 0x18}, + {0xd203, CRL_REG_LEN_08BIT, 0x00}, + {0xd204, CRL_REG_LEN_08BIT, 0x8c}, + {0xd205, CRL_REG_LEN_08BIT, 0x66}, + {0xd206, CRL_REG_LEN_08BIT, 0x00}, + {0xd207, CRL_REG_LEN_08BIT, 0x00}, + {0xd208, CRL_REG_LEN_08BIT, 0xa8}, + {0xd209, CRL_REG_LEN_08BIT, 0xa4}, + {0xd20a, CRL_REG_LEN_08BIT, 0x6e}, + {0xd20b, CRL_REG_LEN_08BIT, 0x46}, + {0xd20c, CRL_REG_LEN_08BIT, 0xd8}, + {0xd20d, CRL_REG_LEN_08BIT, 0x07}, + {0xd20e, CRL_REG_LEN_08BIT, 0x18}, + {0xd20f, CRL_REG_LEN_08BIT, 0x00}, + {0xd210, CRL_REG_LEN_08BIT, 0xa8}, + {0xd211, CRL_REG_LEN_08BIT, 0x84}, + {0xd212, CRL_REG_LEN_08BIT, 0x55}, + {0xd213, CRL_REG_LEN_08BIT, 0x88}, + {0xd214, CRL_REG_LEN_08BIT, 0x8c}, + {0xd215, CRL_REG_LEN_08BIT, 0x65}, + {0xd216, CRL_REG_LEN_08BIT, 0x00}, + {0xd217, CRL_REG_LEN_08BIT, 0x00}, + {0xd218, CRL_REG_LEN_08BIT, 0xd8}, + {0xd219, CRL_REG_LEN_08BIT, 0x04}, + {0xd21a, CRL_REG_LEN_08BIT, 0x18}, + {0xd21b, CRL_REG_LEN_08BIT, 0x00}, + {0xd21c, CRL_REG_LEN_08BIT, 0x03}, + {0xd21d, CRL_REG_LEN_08BIT, 0xff}, + {0xd21e, CRL_REG_LEN_08BIT, 0xff}, + {0xd21f, CRL_REG_LEN_08BIT, 0xce}, + {0xd220, CRL_REG_LEN_08BIT, 0x19}, + {0xd221, CRL_REG_LEN_08BIT, 0x00}, + {0xd222, CRL_REG_LEN_08BIT, 0x80}, + {0xd223, CRL_REG_LEN_08BIT, 0x06}, + {0xd224, CRL_REG_LEN_08BIT, 0x8c}, + {0xd225, CRL_REG_LEN_08BIT, 0x63}, + {0xd226, CRL_REG_LEN_08BIT, 0x00}, + {0xd227, CRL_REG_LEN_08BIT, 0x00}, + {0xd228, CRL_REG_LEN_08BIT, 0xa4}, + {0xd229, CRL_REG_LEN_08BIT, 0x63}, + {0xd22a, CRL_REG_LEN_08BIT, 0x00}, + {0xd22b, CRL_REG_LEN_08BIT, 0x40}, + {0xd22c, CRL_REG_LEN_08BIT, 0xbc}, + {0xd22d, CRL_REG_LEN_08BIT, 0x23}, + {0xd22e, CRL_REG_LEN_08BIT, 0x00}, + {0xd22f, CRL_REG_LEN_08BIT, 0x00}, + {0xd230, CRL_REG_LEN_08BIT, 0x13}, + {0xd231, CRL_REG_LEN_08BIT, 0xff}, + {0xd232, CRL_REG_LEN_08BIT, 0xff}, + {0xd233, CRL_REG_LEN_08BIT, 0xc8}, + {0xd234, CRL_REG_LEN_08BIT, 0x9d}, + {0xd235, CRL_REG_LEN_08BIT, 0x00}, + {0xd236, CRL_REG_LEN_08BIT, 0x00}, + {0xd237, CRL_REG_LEN_08BIT, 0x40}, + {0xd238, CRL_REG_LEN_08BIT, 0xa8}, + {0xd239, CRL_REG_LEN_08BIT, 0x64}, + {0xd23a, CRL_REG_LEN_08BIT, 0x55}, + {0xd23b, CRL_REG_LEN_08BIT, 0x86}, + {0xd23c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd23d, CRL_REG_LEN_08BIT, 0xa4}, + {0xd23e, CRL_REG_LEN_08BIT, 0x55}, + {0xd23f, CRL_REG_LEN_08BIT, 0x87}, + {0xd240, CRL_REG_LEN_08BIT, 0xd8}, + {0xd241, CRL_REG_LEN_08BIT, 0x03}, + {0xd242, CRL_REG_LEN_08BIT, 0x40}, + {0xd243, CRL_REG_LEN_08BIT, 0x00}, + {0xd244, CRL_REG_LEN_08BIT, 0xa8}, + {0xd245, CRL_REG_LEN_08BIT, 0x64}, + {0xd246, CRL_REG_LEN_08BIT, 0x55}, + {0xd247, CRL_REG_LEN_08BIT, 0x88}, + {0xd248, CRL_REG_LEN_08BIT, 0xd8}, + {0xd249, CRL_REG_LEN_08BIT, 0x05}, + {0xd24a, CRL_REG_LEN_08BIT, 0x40}, + {0xd24b, CRL_REG_LEN_08BIT, 0x00}, + {0xd24c, CRL_REG_LEN_08BIT, 0xd8}, + {0xd24d, CRL_REG_LEN_08BIT, 0x03}, + {0xd24e, CRL_REG_LEN_08BIT, 0x40}, + {0xd24f, CRL_REG_LEN_08BIT, 0x00}, + {0xd250, CRL_REG_LEN_08BIT, 0x03}, + {0xd251, CRL_REG_LEN_08BIT, 0xff}, + {0xd252, CRL_REG_LEN_08BIT, 0xff}, + {0xd253, CRL_REG_LEN_08BIT, 0xc1}, + {0xd254, CRL_REG_LEN_08BIT, 0x19}, + {0xd255, CRL_REG_LEN_08BIT, 0x00}, + {0xd256, CRL_REG_LEN_08BIT, 0x80}, + {0xd257, CRL_REG_LEN_08BIT, 0x06}, + {0xd258, CRL_REG_LEN_08BIT, 0x94}, + {0xd259, CRL_REG_LEN_08BIT, 0x84}, + {0xd25a, CRL_REG_LEN_08BIT, 0x00}, + {0xd25b, CRL_REG_LEN_08BIT, 0x72}, + {0xd25c, CRL_REG_LEN_08BIT, 0xe5}, + {0xd25d, CRL_REG_LEN_08BIT, 0xa4}, + {0xd25e, CRL_REG_LEN_08BIT, 0x60}, + {0xd25f, CRL_REG_LEN_08BIT, 0x00}, + {0xd260, CRL_REG_LEN_08BIT, 0x0c}, + {0xd261, CRL_REG_LEN_08BIT, 0x00}, + {0xd262, CRL_REG_LEN_08BIT, 0x00}, + {0xd263, CRL_REG_LEN_08BIT, 0x3f}, + {0xd264, CRL_REG_LEN_08BIT, 0x9d}, + {0xd265, CRL_REG_LEN_08BIT, 0x60}, + {0xd266, CRL_REG_LEN_08BIT, 0x01}, + {0xd267, CRL_REG_LEN_08BIT, 0x00}, + {0xd268, CRL_REG_LEN_08BIT, 0x85}, + {0xd269, CRL_REG_LEN_08BIT, 0x4e}, + {0xd26a, CRL_REG_LEN_08BIT, 0x00}, + {0xd26b, CRL_REG_LEN_08BIT, 0x00}, + {0xd26c, CRL_REG_LEN_08BIT, 0x98}, + {0xd26d, CRL_REG_LEN_08BIT, 0x70}, + {0xd26e, CRL_REG_LEN_08BIT, 0x00}, + {0xd26f, CRL_REG_LEN_08BIT, 0x00}, + {0xd270, CRL_REG_LEN_08BIT, 0x8c}, + {0xd271, CRL_REG_LEN_08BIT, 0x8a}, + {0xd272, CRL_REG_LEN_08BIT, 0x00}, + {0xd273, CRL_REG_LEN_08BIT, 0x6f}, + {0xd274, CRL_REG_LEN_08BIT, 0xe5}, + {0xd275, CRL_REG_LEN_08BIT, 0x63}, + {0xd276, CRL_REG_LEN_08BIT, 0x20}, + {0xd277, CRL_REG_LEN_08BIT, 0x00}, + {0xd278, CRL_REG_LEN_08BIT, 0x10}, + {0xd279, CRL_REG_LEN_08BIT, 0x00}, + {0xd27a, CRL_REG_LEN_08BIT, 0x00}, + {0xd27b, CRL_REG_LEN_08BIT, 0x07}, + {0xd27c, CRL_REG_LEN_08BIT, 0x15}, + {0xd27d, CRL_REG_LEN_08BIT, 0x00}, + {0xd27e, CRL_REG_LEN_08BIT, 0x00}, + {0xd27f, CRL_REG_LEN_08BIT, 0x00}, + {0xd280, CRL_REG_LEN_08BIT, 0x8c}, + {0xd281, CRL_REG_LEN_08BIT, 0xaa}, + {0xd282, CRL_REG_LEN_08BIT, 0x00}, + {0xd283, CRL_REG_LEN_08BIT, 0x6e}, + {0xd284, CRL_REG_LEN_08BIT, 0xe0}, + {0xd285, CRL_REG_LEN_08BIT, 0x63}, + {0xd286, CRL_REG_LEN_08BIT, 0x28}, + {0xd287, CRL_REG_LEN_08BIT, 0x02}, + {0xd288, CRL_REG_LEN_08BIT, 0xe0}, + {0xd289, CRL_REG_LEN_08BIT, 0x84}, + {0xd28a, CRL_REG_LEN_08BIT, 0x28}, + {0xd28b, CRL_REG_LEN_08BIT, 0x02}, + {0xd28c, CRL_REG_LEN_08BIT, 0x07}, + {0xd28d, CRL_REG_LEN_08BIT, 0xff}, + {0xd28e, CRL_REG_LEN_08BIT, 0xf8}, + {0xd28f, CRL_REG_LEN_08BIT, 0x66}, + {0xd290, CRL_REG_LEN_08BIT, 0xe0}, + {0xd291, CRL_REG_LEN_08BIT, 0x63}, + {0xd292, CRL_REG_LEN_08BIT, 0x5b}, + {0xd293, CRL_REG_LEN_08BIT, 0x06}, + {0xd294, CRL_REG_LEN_08BIT, 0x8c}, + {0xd295, CRL_REG_LEN_08BIT, 0x6a}, + {0xd296, CRL_REG_LEN_08BIT, 0x00}, + {0xd297, CRL_REG_LEN_08BIT, 0x77}, + {0xd298, CRL_REG_LEN_08BIT, 0xe0}, + {0xd299, CRL_REG_LEN_08BIT, 0x63}, + {0xd29a, CRL_REG_LEN_08BIT, 0x5b}, + {0xd29b, CRL_REG_LEN_08BIT, 0x06}, + {0xd29c, CRL_REG_LEN_08BIT, 0xbd}, + {0xd29d, CRL_REG_LEN_08BIT, 0x63}, + {0xd29e, CRL_REG_LEN_08BIT, 0x00}, + {0xd29f, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a0, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2a1, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a3, CRL_REG_LEN_08BIT, 0x3c}, + {0xd2a4, CRL_REG_LEN_08BIT, 0x15}, + {0xd2a5, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a7, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a8, CRL_REG_LEN_08BIT, 0x8c}, + {0xd2a9, CRL_REG_LEN_08BIT, 0x8a}, + {0xd2aa, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ab, CRL_REG_LEN_08BIT, 0x78}, + {0xd2ac, CRL_REG_LEN_08BIT, 0xb8}, + {0xd2ad, CRL_REG_LEN_08BIT, 0x63}, + {0xd2ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd2af, CRL_REG_LEN_08BIT, 0x88}, + {0xd2b0, CRL_REG_LEN_08BIT, 0xe1}, + {0xd2b1, CRL_REG_LEN_08BIT, 0x64}, + {0xd2b2, CRL_REG_LEN_08BIT, 0x5b}, + {0xd2b3, CRL_REG_LEN_08BIT, 0x06}, + {0xd2b4, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2b5, CRL_REG_LEN_08BIT, 0x6b}, + {0xd2b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd2b8, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2b9, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd2bb, CRL_REG_LEN_08BIT, 0x34}, + {0xd2bc, CRL_REG_LEN_08BIT, 0xd4}, + {0xd2bd, CRL_REG_LEN_08BIT, 0x01}, + {0xd2be, CRL_REG_LEN_08BIT, 0x18}, + {0xd2bf, CRL_REG_LEN_08BIT, 0x14}, + {0xd2c0, CRL_REG_LEN_08BIT, 0xb9}, + {0xd2c1, CRL_REG_LEN_08BIT, 0x6b}, + {0xd2c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2c3, CRL_REG_LEN_08BIT, 0x88}, + {0xd2c4, CRL_REG_LEN_08BIT, 0x85}, + {0xd2c5, CRL_REG_LEN_08BIT, 0x01}, + {0xd2c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2c7, CRL_REG_LEN_08BIT, 0x14}, + {0xd2c8, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2c9, CRL_REG_LEN_08BIT, 0x68}, + {0xd2ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd2cb, CRL_REG_LEN_08BIT, 0x00}, + {0xd2cc, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2cd, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ce, CRL_REG_LEN_08BIT, 0x00}, + {0xd2cf, CRL_REG_LEN_08BIT, 0x2c}, + {0xd2d0, CRL_REG_LEN_08BIT, 0xd4}, + {0xd2d1, CRL_REG_LEN_08BIT, 0x01}, + {0xd2d2, CRL_REG_LEN_08BIT, 0x58}, + {0xd2d3, CRL_REG_LEN_08BIT, 0x18}, + {0xd2d4, CRL_REG_LEN_08BIT, 0x84}, + {0xd2d5, CRL_REG_LEN_08BIT, 0x81}, + {0xd2d6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2d7, CRL_REG_LEN_08BIT, 0x14}, + {0xd2d8, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2d9, CRL_REG_LEN_08BIT, 0xa4}, + {0xd2da, CRL_REG_LEN_08BIT, 0x01}, + {0xd2db, CRL_REG_LEN_08BIT, 0x00}, + {0xd2dc, CRL_REG_LEN_08BIT, 0x10}, + {0xd2dd, CRL_REG_LEN_08BIT, 0x00}, + {0xd2de, CRL_REG_LEN_08BIT, 0x00}, + {0xd2df, CRL_REG_LEN_08BIT, 0x05}, + {0xd2e0, CRL_REG_LEN_08BIT, 0x84}, + {0xd2e1, CRL_REG_LEN_08BIT, 0xc1}, + {0xd2e2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2e3, CRL_REG_LEN_08BIT, 0x18}, + {0xd2e4, CRL_REG_LEN_08BIT, 0x9c}, + {0xd2e5, CRL_REG_LEN_08BIT, 0xa0}, + {0xd2e6, CRL_REG_LEN_08BIT, 0x01}, + {0xd2e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd2e8, CRL_REG_LEN_08BIT, 0xd4}, + {0xd2e9, CRL_REG_LEN_08BIT, 0x01}, + {0xd2ea, CRL_REG_LEN_08BIT, 0x28}, + {0xd2eb, CRL_REG_LEN_08BIT, 0x14}, + {0xd2ec, CRL_REG_LEN_08BIT, 0x84}, + {0xd2ed, CRL_REG_LEN_08BIT, 0xc1}, + {0xd2ee, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ef, CRL_REG_LEN_08BIT, 0x18}, + {0xd2f0, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2f1, CRL_REG_LEN_08BIT, 0x66}, + {0xd2f2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f3, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f4, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2f5, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f7, CRL_REG_LEN_08BIT, 0x20}, + {0xd2f8, CRL_REG_LEN_08BIT, 0x9d}, + {0xd2f9, CRL_REG_LEN_08BIT, 0x00}, + {0xd2fa, CRL_REG_LEN_08BIT, 0x00}, + {0xd2fb, CRL_REG_LEN_08BIT, 0x00}, + {0xd2fc, CRL_REG_LEN_08BIT, 0x84}, + {0xd2fd, CRL_REG_LEN_08BIT, 0x61}, + {0xd2fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ff, CRL_REG_LEN_08BIT, 0x18}, + {0xd300, CRL_REG_LEN_08BIT, 0xbd}, + {0xd301, CRL_REG_LEN_08BIT, 0xa3}, + {0xd302, CRL_REG_LEN_08BIT, 0x01}, + {0xd303, CRL_REG_LEN_08BIT, 0x00}, + {0xd304, CRL_REG_LEN_08BIT, 0x10}, + {0xd305, CRL_REG_LEN_08BIT, 0x00}, + {0xd306, CRL_REG_LEN_08BIT, 0x00}, + {0xd307, CRL_REG_LEN_08BIT, 0x03}, + {0xd308, CRL_REG_LEN_08BIT, 0x9c}, + {0xd309, CRL_REG_LEN_08BIT, 0x80}, + {0xd30a, CRL_REG_LEN_08BIT, 0x01}, + {0xd30b, CRL_REG_LEN_08BIT, 0x00}, + {0xd30c, CRL_REG_LEN_08BIT, 0xd4}, + {0xd30d, CRL_REG_LEN_08BIT, 0x01}, + {0xd30e, CRL_REG_LEN_08BIT, 0x20}, + {0xd30f, CRL_REG_LEN_08BIT, 0x18}, + {0xd310, CRL_REG_LEN_08BIT, 0x18}, + {0xd311, CRL_REG_LEN_08BIT, 0x60}, + {0xd312, CRL_REG_LEN_08BIT, 0x80}, + {0xd313, CRL_REG_LEN_08BIT, 0x06}, + {0xd314, CRL_REG_LEN_08BIT, 0x85}, + {0xd315, CRL_REG_LEN_08BIT, 0x01}, + {0xd316, CRL_REG_LEN_08BIT, 0x00}, + {0xd317, CRL_REG_LEN_08BIT, 0x14}, + {0xd318, CRL_REG_LEN_08BIT, 0xa8}, + {0xd319, CRL_REG_LEN_08BIT, 0x83}, + {0xd31a, CRL_REG_LEN_08BIT, 0x38}, + {0xd31b, CRL_REG_LEN_08BIT, 0x29}, + {0xd31c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd31d, CRL_REG_LEN_08BIT, 0xc3}, + {0xd31e, CRL_REG_LEN_08BIT, 0x40}, + {0xd31f, CRL_REG_LEN_08BIT, 0x08}, + {0xd320, CRL_REG_LEN_08BIT, 0x8c}, + {0xd321, CRL_REG_LEN_08BIT, 0x84}, + {0xd322, CRL_REG_LEN_08BIT, 0x00}, + {0xd323, CRL_REG_LEN_08BIT, 0x00}, + {0xd324, CRL_REG_LEN_08BIT, 0xa8}, + {0xd325, CRL_REG_LEN_08BIT, 0xa3}, + {0xd326, CRL_REG_LEN_08BIT, 0x38}, + {0xd327, CRL_REG_LEN_08BIT, 0x2a}, + {0xd328, CRL_REG_LEN_08BIT, 0xa8}, + {0xd329, CRL_REG_LEN_08BIT, 0xe3}, + {0xd32a, CRL_REG_LEN_08BIT, 0x40}, + {0xd32b, CRL_REG_LEN_08BIT, 0x09}, + {0xd32c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd32d, CRL_REG_LEN_08BIT, 0x64}, + {0xd32e, CRL_REG_LEN_08BIT, 0x40}, + {0xd32f, CRL_REG_LEN_08BIT, 0x00}, + {0xd330, CRL_REG_LEN_08BIT, 0xd8}, + {0xd331, CRL_REG_LEN_08BIT, 0x06}, + {0xd332, CRL_REG_LEN_08BIT, 0x18}, + {0xd333, CRL_REG_LEN_08BIT, 0x00}, + {0xd334, CRL_REG_LEN_08BIT, 0x8c}, + {0xd335, CRL_REG_LEN_08BIT, 0x65}, + {0xd336, CRL_REG_LEN_08BIT, 0x00}, + {0xd337, CRL_REG_LEN_08BIT, 0x00}, + {0xd338, CRL_REG_LEN_08BIT, 0x84}, + {0xd339, CRL_REG_LEN_08BIT, 0x81}, + {0xd33a, CRL_REG_LEN_08BIT, 0x00}, + {0xd33b, CRL_REG_LEN_08BIT, 0x18}, + {0xd33c, CRL_REG_LEN_08BIT, 0xe3}, + {0xd33d, CRL_REG_LEN_08BIT, 0xe3}, + {0xd33e, CRL_REG_LEN_08BIT, 0x20}, + {0xd33f, CRL_REG_LEN_08BIT, 0x00}, + {0xd340, CRL_REG_LEN_08BIT, 0xd8}, + {0xd341, CRL_REG_LEN_08BIT, 0x07}, + {0xd342, CRL_REG_LEN_08BIT, 0xf8}, + {0xd343, CRL_REG_LEN_08BIT, 0x00}, + {0xd344, CRL_REG_LEN_08BIT, 0x03}, + {0xd345, CRL_REG_LEN_08BIT, 0xff}, + {0xd346, CRL_REG_LEN_08BIT, 0xff}, + {0xd347, CRL_REG_LEN_08BIT, 0x6f}, + {0xd348, CRL_REG_LEN_08BIT, 0x18}, + {0xd349, CRL_REG_LEN_08BIT, 0x60}, + {0xd34a, CRL_REG_LEN_08BIT, 0x00}, + {0xd34b, CRL_REG_LEN_08BIT, 0x01}, + {0xd34c, CRL_REG_LEN_08BIT, 0x0f}, + {0xd34d, CRL_REG_LEN_08BIT, 0xff}, + {0xd34e, CRL_REG_LEN_08BIT, 0xff}, + {0xd34f, CRL_REG_LEN_08BIT, 0x9d}, + {0xd350, CRL_REG_LEN_08BIT, 0x18}, + {0xd351, CRL_REG_LEN_08BIT, 0x60}, + {0xd352, CRL_REG_LEN_08BIT, 0x80}, + {0xd353, CRL_REG_LEN_08BIT, 0x06}, + {0xd354, CRL_REG_LEN_08BIT, 0x00}, + {0xd355, CRL_REG_LEN_08BIT, 0x00}, + {0xd356, CRL_REG_LEN_08BIT, 0x00}, + {0xd357, CRL_REG_LEN_08BIT, 0x11}, + {0xd358, CRL_REG_LEN_08BIT, 0xa8}, + {0xd359, CRL_REG_LEN_08BIT, 0x83}, + {0xd35a, CRL_REG_LEN_08BIT, 0x6e}, + {0xd35b, CRL_REG_LEN_08BIT, 0x43}, + {0xd35c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd35d, CRL_REG_LEN_08BIT, 0x6c}, + {0xd35e, CRL_REG_LEN_08BIT, 0x28}, + {0xd35f, CRL_REG_LEN_08BIT, 0x02}, + {0xd360, CRL_REG_LEN_08BIT, 0xe0}, + {0xd361, CRL_REG_LEN_08BIT, 0x84}, + {0xd362, CRL_REG_LEN_08BIT, 0x28}, + {0xd363, CRL_REG_LEN_08BIT, 0x02}, + {0xd364, CRL_REG_LEN_08BIT, 0x07}, + {0xd365, CRL_REG_LEN_08BIT, 0xff}, + {0xd366, CRL_REG_LEN_08BIT, 0xf8}, + {0xd367, CRL_REG_LEN_08BIT, 0x30}, + {0xd368, CRL_REG_LEN_08BIT, 0xb8}, + {0xd369, CRL_REG_LEN_08BIT, 0x63}, + {0xd36a, CRL_REG_LEN_08BIT, 0x00}, + {0xd36b, CRL_REG_LEN_08BIT, 0x08}, + {0xd36c, CRL_REG_LEN_08BIT, 0x03}, + {0xd36d, CRL_REG_LEN_08BIT, 0xff}, + {0xd36e, CRL_REG_LEN_08BIT, 0xff}, + {0xd36f, CRL_REG_LEN_08BIT, 0xc0}, + {0xd370, CRL_REG_LEN_08BIT, 0x85}, + {0xd371, CRL_REG_LEN_08BIT, 0x4e}, + {0xd372, CRL_REG_LEN_08BIT, 0x00}, + {0xd373, CRL_REG_LEN_08BIT, 0x00}, + {0xd374, CRL_REG_LEN_08BIT, 0x03}, + {0xd375, CRL_REG_LEN_08BIT, 0xff}, + {0xd376, CRL_REG_LEN_08BIT, 0xff}, + {0xd377, CRL_REG_LEN_08BIT, 0xe7}, + {0xd378, CRL_REG_LEN_08BIT, 0xd4}, + {0xd379, CRL_REG_LEN_08BIT, 0x01}, + {0xd37a, CRL_REG_LEN_08BIT, 0x40}, + {0xd37b, CRL_REG_LEN_08BIT, 0x18}, + {0xd37c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd37d, CRL_REG_LEN_08BIT, 0x60}, + {0xd37e, CRL_REG_LEN_08BIT, 0x00}, + {0xd37f, CRL_REG_LEN_08BIT, 0x00}, + {0xd380, CRL_REG_LEN_08BIT, 0x03}, + {0xd381, CRL_REG_LEN_08BIT, 0xff}, + {0xd382, CRL_REG_LEN_08BIT, 0xff}, + {0xd383, CRL_REG_LEN_08BIT, 0xdb}, + {0xd384, CRL_REG_LEN_08BIT, 0xd4}, + {0xd385, CRL_REG_LEN_08BIT, 0x01}, + {0xd386, CRL_REG_LEN_08BIT, 0x18}, + {0xd387, CRL_REG_LEN_08BIT, 0x14}, + {0xd388, CRL_REG_LEN_08BIT, 0x03}, + {0xd389, CRL_REG_LEN_08BIT, 0xff}, + {0xd38a, CRL_REG_LEN_08BIT, 0xff}, + {0xd38b, CRL_REG_LEN_08BIT, 0xce}, + {0xd38c, CRL_REG_LEN_08BIT, 0x9d}, + {0xd38d, CRL_REG_LEN_08BIT, 0x6b}, + {0xd38e, CRL_REG_LEN_08BIT, 0x00}, + {0xd38f, CRL_REG_LEN_08BIT, 0xff}, + {0xd390, CRL_REG_LEN_08BIT, 0x03}, + {0xd391, CRL_REG_LEN_08BIT, 0xff}, + {0xd392, CRL_REG_LEN_08BIT, 0xff}, + {0xd393, CRL_REG_LEN_08BIT, 0xc6}, + {0xd394, CRL_REG_LEN_08BIT, 0x9c}, + {0xd395, CRL_REG_LEN_08BIT, 0x63}, + {0xd396, CRL_REG_LEN_08BIT, 0x00}, + {0xd397, CRL_REG_LEN_08BIT, 0xff}, + {0xd398, CRL_REG_LEN_08BIT, 0xa8}, + {0xd399, CRL_REG_LEN_08BIT, 0xe3}, + {0xd39a, CRL_REG_LEN_08BIT, 0x38}, + {0xd39b, CRL_REG_LEN_08BIT, 0x0f}, + {0xd39c, CRL_REG_LEN_08BIT, 0x8c}, + {0xd39d, CRL_REG_LEN_08BIT, 0x84}, + {0xd39e, CRL_REG_LEN_08BIT, 0x00}, + {0xd39f, CRL_REG_LEN_08BIT, 0x00}, + {0xd3a0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3a1, CRL_REG_LEN_08BIT, 0xa3}, + {0xd3a2, CRL_REG_LEN_08BIT, 0x38}, + {0xd3a3, CRL_REG_LEN_08BIT, 0x0e}, + {0xd3a4, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3a5, CRL_REG_LEN_08BIT, 0xc3}, + {0xd3a6, CRL_REG_LEN_08BIT, 0x6e}, + {0xd3a7, CRL_REG_LEN_08BIT, 0x42}, + {0xd3a8, CRL_REG_LEN_08BIT, 0xd8}, + {0xd3a9, CRL_REG_LEN_08BIT, 0x07}, + {0xd3aa, CRL_REG_LEN_08BIT, 0x20}, + {0xd3ab, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ac, CRL_REG_LEN_08BIT, 0x8c}, + {0xd3ad, CRL_REG_LEN_08BIT, 0x66}, + {0xd3ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd3af, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b0, CRL_REG_LEN_08BIT, 0xd8}, + {0xd3b1, CRL_REG_LEN_08BIT, 0x05}, + {0xd3b2, CRL_REG_LEN_08BIT, 0x18}, + {0xd3b3, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b4, CRL_REG_LEN_08BIT, 0x85}, + {0xd3b5, CRL_REG_LEN_08BIT, 0x21}, + {0xd3b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b8, CRL_REG_LEN_08BIT, 0x85}, + {0xd3b9, CRL_REG_LEN_08BIT, 0x41}, + {0xd3ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd3bb, CRL_REG_LEN_08BIT, 0x04}, + {0xd3bc, CRL_REG_LEN_08BIT, 0x85}, + {0xd3bd, CRL_REG_LEN_08BIT, 0x81}, + {0xd3be, CRL_REG_LEN_08BIT, 0x00}, + {0xd3bf, CRL_REG_LEN_08BIT, 0x08}, + {0xd3c0, CRL_REG_LEN_08BIT, 0x85}, + {0xd3c1, CRL_REG_LEN_08BIT, 0xc1}, + {0xd3c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd3c3, CRL_REG_LEN_08BIT, 0x0c}, + {0xd3c4, CRL_REG_LEN_08BIT, 0x86}, + {0xd3c5, CRL_REG_LEN_08BIT, 0x01}, + {0xd3c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3c7, CRL_REG_LEN_08BIT, 0x10}, + {0xd3c8, CRL_REG_LEN_08BIT, 0x44}, + {0xd3c9, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ca, CRL_REG_LEN_08BIT, 0x48}, + {0xd3cb, CRL_REG_LEN_08BIT, 0x00}, + {0xd3cc, CRL_REG_LEN_08BIT, 0x9c}, + {0xd3cd, CRL_REG_LEN_08BIT, 0x21}, + {0xd3ce, CRL_REG_LEN_08BIT, 0x00}, + {0xd3cf, CRL_REG_LEN_08BIT, 0x1c}, + {0xd3d0, CRL_REG_LEN_08BIT, 0x9c}, + {0xd3d1, CRL_REG_LEN_08BIT, 0x21}, + {0xd3d2, CRL_REG_LEN_08BIT, 0xff}, + {0xd3d3, CRL_REG_LEN_08BIT, 0xfc}, + {0xd3d4, CRL_REG_LEN_08BIT, 0xd4}, + {0xd3d5, CRL_REG_LEN_08BIT, 0x01}, + {0xd3d6, CRL_REG_LEN_08BIT, 0x48}, + {0xd3d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3d8, CRL_REG_LEN_08BIT, 0x18}, + {0xd3d9, CRL_REG_LEN_08BIT, 0x60}, + {0xd3da, CRL_REG_LEN_08BIT, 0x00}, + {0xd3db, CRL_REG_LEN_08BIT, 0x01}, + {0xd3dc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3dd, CRL_REG_LEN_08BIT, 0x63}, + {0xd3de, CRL_REG_LEN_08BIT, 0x07}, + {0xd3df, CRL_REG_LEN_08BIT, 0x80}, + {0xd3e0, CRL_REG_LEN_08BIT, 0x8c}, + {0xd3e1, CRL_REG_LEN_08BIT, 0x63}, + {0xd3e2, CRL_REG_LEN_08BIT, 0x00}, + {0xd3e3, CRL_REG_LEN_08BIT, 0x68}, + {0xd3e4, CRL_REG_LEN_08BIT, 0xbc}, + {0xd3e5, CRL_REG_LEN_08BIT, 0x03}, + {0xd3e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3e8, CRL_REG_LEN_08BIT, 0x10}, + {0xd3e9, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ea, CRL_REG_LEN_08BIT, 0x00}, + {0xd3eb, CRL_REG_LEN_08BIT, 0x0c}, + {0xd3ec, CRL_REG_LEN_08BIT, 0x15}, + {0xd3ed, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ee, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ef, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f0, CRL_REG_LEN_08BIT, 0x07}, + {0xd3f1, CRL_REG_LEN_08BIT, 0xff}, + {0xd3f2, CRL_REG_LEN_08BIT, 0xd9}, + {0xd3f3, CRL_REG_LEN_08BIT, 0x98}, + {0xd3f4, CRL_REG_LEN_08BIT, 0x15}, + {0xd3f5, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f8, CRL_REG_LEN_08BIT, 0x18}, + {0xd3f9, CRL_REG_LEN_08BIT, 0x60}, + {0xd3fa, CRL_REG_LEN_08BIT, 0x80}, + {0xd3fb, CRL_REG_LEN_08BIT, 0x06}, + {0xd3fc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3fd, CRL_REG_LEN_08BIT, 0x63}, + {0xd3fe, CRL_REG_LEN_08BIT, 0xc4}, + {0xd3ff, CRL_REG_LEN_08BIT, 0xb8}, + {0xd400, CRL_REG_LEN_08BIT, 0x8c}, + {0xd401, CRL_REG_LEN_08BIT, 0x63}, + {0xd402, CRL_REG_LEN_08BIT, 0x00}, + {0xd403, CRL_REG_LEN_08BIT, 0x00}, + {0xd404, CRL_REG_LEN_08BIT, 0xbc}, + {0xd405, CRL_REG_LEN_08BIT, 0x23}, + {0xd406, CRL_REG_LEN_08BIT, 0x00}, + {0xd407, CRL_REG_LEN_08BIT, 0x01}, + {0xd408, CRL_REG_LEN_08BIT, 0x10}, + {0xd409, CRL_REG_LEN_08BIT, 0x00}, + {0xd40a, CRL_REG_LEN_08BIT, 0x00}, + {0xd40b, CRL_REG_LEN_08BIT, 0x25}, + {0xd40c, CRL_REG_LEN_08BIT, 0x9d}, + {0xd40d, CRL_REG_LEN_08BIT, 0x00}, + {0xd40e, CRL_REG_LEN_08BIT, 0x00}, + {0xd40f, CRL_REG_LEN_08BIT, 0x00}, + {0xd410, CRL_REG_LEN_08BIT, 0x00}, + {0xd411, CRL_REG_LEN_08BIT, 0x00}, + {0xd412, CRL_REG_LEN_08BIT, 0x00}, + {0xd413, CRL_REG_LEN_08BIT, 0x0b}, + {0xd414, CRL_REG_LEN_08BIT, 0xb8}, + {0xd415, CRL_REG_LEN_08BIT, 0xe8}, + {0xd416, CRL_REG_LEN_08BIT, 0x00}, + {0xd417, CRL_REG_LEN_08BIT, 0x02}, + {0xd418, CRL_REG_LEN_08BIT, 0x07}, + {0xd419, CRL_REG_LEN_08BIT, 0xff}, + {0xd41a, CRL_REG_LEN_08BIT, 0xd6}, + {0xd41b, CRL_REG_LEN_08BIT, 0x24}, + {0xd41c, CRL_REG_LEN_08BIT, 0x15}, + {0xd41d, CRL_REG_LEN_08BIT, 0x00}, + {0xd41e, CRL_REG_LEN_08BIT, 0x00}, + {0xd41f, CRL_REG_LEN_08BIT, 0x00}, + {0xd420, CRL_REG_LEN_08BIT, 0x18}, + {0xd421, CRL_REG_LEN_08BIT, 0x60}, + {0xd422, CRL_REG_LEN_08BIT, 0x80}, + {0xd423, CRL_REG_LEN_08BIT, 0x06}, + {0xd424, CRL_REG_LEN_08BIT, 0xa8}, + {0xd425, CRL_REG_LEN_08BIT, 0x63}, + {0xd426, CRL_REG_LEN_08BIT, 0xc4}, + {0xd427, CRL_REG_LEN_08BIT, 0xb8}, + {0xd428, CRL_REG_LEN_08BIT, 0x8c}, + {0xd429, CRL_REG_LEN_08BIT, 0x63}, + {0xd42a, CRL_REG_LEN_08BIT, 0x00}, + {0xd42b, CRL_REG_LEN_08BIT, 0x00}, + {0xd42c, CRL_REG_LEN_08BIT, 0xbc}, + {0xd42d, CRL_REG_LEN_08BIT, 0x23}, + {0xd42e, CRL_REG_LEN_08BIT, 0x00}, + {0xd42f, CRL_REG_LEN_08BIT, 0x01}, + {0xd430, CRL_REG_LEN_08BIT, 0x10}, + {0xd431, CRL_REG_LEN_08BIT, 0x00}, + {0xd432, CRL_REG_LEN_08BIT, 0x00}, + {0xd433, CRL_REG_LEN_08BIT, 0x1b}, + {0xd434, CRL_REG_LEN_08BIT, 0x9d}, + {0xd435, CRL_REG_LEN_08BIT, 0x00}, + {0xd436, CRL_REG_LEN_08BIT, 0x00}, + {0xd437, CRL_REG_LEN_08BIT, 0x00}, + {0xd438, CRL_REG_LEN_08BIT, 0xb8}, + {0xd439, CRL_REG_LEN_08BIT, 0xe8}, + {0xd43a, CRL_REG_LEN_08BIT, 0x00}, + {0xd43b, CRL_REG_LEN_08BIT, 0x02}, + {0xd43c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd43d, CRL_REG_LEN_08BIT, 0xc0}, + {0xd43e, CRL_REG_LEN_08BIT, 0x00}, + {0xd43f, CRL_REG_LEN_08BIT, 0x00}, + {0xd440, CRL_REG_LEN_08BIT, 0x18}, + {0xd441, CRL_REG_LEN_08BIT, 0xa0}, + {0xd442, CRL_REG_LEN_08BIT, 0x80}, + {0xd443, CRL_REG_LEN_08BIT, 0x06}, + {0xd444, CRL_REG_LEN_08BIT, 0xe0}, + {0xd445, CRL_REG_LEN_08BIT, 0x67}, + {0xd446, CRL_REG_LEN_08BIT, 0x30}, + {0xd447, CRL_REG_LEN_08BIT, 0x00}, + {0xd448, CRL_REG_LEN_08BIT, 0xa8}, + {0xd449, CRL_REG_LEN_08BIT, 0xa5}, + {0xd44a, CRL_REG_LEN_08BIT, 0xce}, + {0xd44b, CRL_REG_LEN_08BIT, 0xb0}, + {0xd44c, CRL_REG_LEN_08BIT, 0x19}, + {0xd44d, CRL_REG_LEN_08BIT, 0x60}, + {0xd44e, CRL_REG_LEN_08BIT, 0x00}, + {0xd44f, CRL_REG_LEN_08BIT, 0x01}, + {0xd450, CRL_REG_LEN_08BIT, 0xa9}, + {0xd451, CRL_REG_LEN_08BIT, 0x6b}, + {0xd452, CRL_REG_LEN_08BIT, 0x06}, + {0xd453, CRL_REG_LEN_08BIT, 0x14}, + {0xd454, CRL_REG_LEN_08BIT, 0xe0}, + {0xd455, CRL_REG_LEN_08BIT, 0x83}, + {0xd456, CRL_REG_LEN_08BIT, 0x28}, + {0xd457, CRL_REG_LEN_08BIT, 0x00}, + {0xd458, CRL_REG_LEN_08BIT, 0x9c}, + {0xd459, CRL_REG_LEN_08BIT, 0xc6}, + {0xd45a, CRL_REG_LEN_08BIT, 0x00}, + {0xd45b, CRL_REG_LEN_08BIT, 0x01}, + {0xd45c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd45d, CRL_REG_LEN_08BIT, 0x63}, + {0xd45e, CRL_REG_LEN_08BIT, 0x18}, + {0xd45f, CRL_REG_LEN_08BIT, 0x00}, + {0xd460, CRL_REG_LEN_08BIT, 0x8c}, + {0xd461, CRL_REG_LEN_08BIT, 0x84}, + {0xd462, CRL_REG_LEN_08BIT, 0x00}, + {0xd463, CRL_REG_LEN_08BIT, 0x00}, + {0xd464, CRL_REG_LEN_08BIT, 0xe0}, + {0xd465, CRL_REG_LEN_08BIT, 0xa3}, + {0xd466, CRL_REG_LEN_08BIT, 0x58}, + {0xd467, CRL_REG_LEN_08BIT, 0x00}, + {0xd468, CRL_REG_LEN_08BIT, 0xa4}, + {0xd469, CRL_REG_LEN_08BIT, 0xc6}, + {0xd46a, CRL_REG_LEN_08BIT, 0x00}, + {0xd46b, CRL_REG_LEN_08BIT, 0xff}, + {0xd46c, CRL_REG_LEN_08BIT, 0xb8}, + {0xd46d, CRL_REG_LEN_08BIT, 0x64}, + {0xd46e, CRL_REG_LEN_08BIT, 0x00}, + {0xd46f, CRL_REG_LEN_08BIT, 0x18}, + {0xd470, CRL_REG_LEN_08BIT, 0xbc}, + {0xd471, CRL_REG_LEN_08BIT, 0x46}, + {0xd472, CRL_REG_LEN_08BIT, 0x00}, + {0xd473, CRL_REG_LEN_08BIT, 0x03}, + {0xd474, CRL_REG_LEN_08BIT, 0x94}, + {0xd475, CRL_REG_LEN_08BIT, 0x85}, + {0xd476, CRL_REG_LEN_08BIT, 0x00}, + {0xd477, CRL_REG_LEN_08BIT, 0x00}, + {0xd478, CRL_REG_LEN_08BIT, 0xb8}, + {0xd479, CRL_REG_LEN_08BIT, 0x63}, + {0xd47a, CRL_REG_LEN_08BIT, 0x00}, + {0xd47b, CRL_REG_LEN_08BIT, 0x98}, + {0xd47c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd47d, CRL_REG_LEN_08BIT, 0x64}, + {0xd47e, CRL_REG_LEN_08BIT, 0x18}, + {0xd47f, CRL_REG_LEN_08BIT, 0x00}, + {0xd480, CRL_REG_LEN_08BIT, 0x0f}, + {0xd481, CRL_REG_LEN_08BIT, 0xff}, + {0xd482, CRL_REG_LEN_08BIT, 0xff}, + {0xd483, CRL_REG_LEN_08BIT, 0xf0}, + {0xd484, CRL_REG_LEN_08BIT, 0xdc}, + {0xd485, CRL_REG_LEN_08BIT, 0x05}, + {0xd486, CRL_REG_LEN_08BIT, 0x18}, + {0xd487, CRL_REG_LEN_08BIT, 0x00}, + {0xd488, CRL_REG_LEN_08BIT, 0x9c}, + {0xd489, CRL_REG_LEN_08BIT, 0x68}, + {0xd48a, CRL_REG_LEN_08BIT, 0x00}, + {0xd48b, CRL_REG_LEN_08BIT, 0x01}, + {0xd48c, CRL_REG_LEN_08BIT, 0xa5}, + {0xd48d, CRL_REG_LEN_08BIT, 0x03}, + {0xd48e, CRL_REG_LEN_08BIT, 0x00}, + {0xd48f, CRL_REG_LEN_08BIT, 0xff}, + {0xd490, CRL_REG_LEN_08BIT, 0xbc}, + {0xd491, CRL_REG_LEN_08BIT, 0x48}, + {0xd492, CRL_REG_LEN_08BIT, 0x00}, + {0xd493, CRL_REG_LEN_08BIT, 0x01}, + {0xd494, CRL_REG_LEN_08BIT, 0x0f}, + {0xd495, CRL_REG_LEN_08BIT, 0xff}, + {0xd496, CRL_REG_LEN_08BIT, 0xff}, + {0xd497, CRL_REG_LEN_08BIT, 0xea}, + {0xd498, CRL_REG_LEN_08BIT, 0xb8}, + {0xd499, CRL_REG_LEN_08BIT, 0xe8}, + {0xd49a, CRL_REG_LEN_08BIT, 0x00}, + {0xd49b, CRL_REG_LEN_08BIT, 0x02}, + {0xd49c, CRL_REG_LEN_08BIT, 0x18}, + {0xd49d, CRL_REG_LEN_08BIT, 0x60}, + {0xd49e, CRL_REG_LEN_08BIT, 0x00}, + {0xd49f, CRL_REG_LEN_08BIT, 0x01}, + {0xd4a0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd4a1, CRL_REG_LEN_08BIT, 0x63}, + {0xd4a2, CRL_REG_LEN_08BIT, 0x06}, + {0xd4a3, CRL_REG_LEN_08BIT, 0x14}, + {0xd4a4, CRL_REG_LEN_08BIT, 0x07}, + {0xd4a5, CRL_REG_LEN_08BIT, 0xff}, + {0xd4a6, CRL_REG_LEN_08BIT, 0xe4}, + {0xd4a7, CRL_REG_LEN_08BIT, 0x05}, + {0xd4a8, CRL_REG_LEN_08BIT, 0x9c}, + {0xd4a9, CRL_REG_LEN_08BIT, 0x83}, + {0xd4aa, CRL_REG_LEN_08BIT, 0x00}, + {0xd4ab, CRL_REG_LEN_08BIT, 0x10}, + {0xd4ac, CRL_REG_LEN_08BIT, 0x85}, + {0xd4ad, CRL_REG_LEN_08BIT, 0x21}, + {0xd4ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd4af, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b0, CRL_REG_LEN_08BIT, 0x44}, + {0xd4b1, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b2, CRL_REG_LEN_08BIT, 0x48}, + {0xd4b3, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b4, CRL_REG_LEN_08BIT, 0x9c}, + {0xd4b5, CRL_REG_LEN_08BIT, 0x21}, + {0xd4b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b7, CRL_REG_LEN_08BIT, 0x04}, + {0xd4b8, CRL_REG_LEN_08BIT, 0x18}, + {0xd4b9, CRL_REG_LEN_08BIT, 0x60}, + {0xd4ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd4bb, CRL_REG_LEN_08BIT, 0x01}, + {0xd4bc, CRL_REG_LEN_08BIT, 0x9c}, + {0xd4bd, CRL_REG_LEN_08BIT, 0x80}, + {0xd4be, CRL_REG_LEN_08BIT, 0xff}, + {0xd4bf, CRL_REG_LEN_08BIT, 0xff}, + {0xd4c0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd4c1, CRL_REG_LEN_08BIT, 0x63}, + {0xd4c2, CRL_REG_LEN_08BIT, 0x09}, + {0xd4c3, CRL_REG_LEN_08BIT, 0xef}, + {0xd4c4, CRL_REG_LEN_08BIT, 0xd8}, + {0xd4c5, CRL_REG_LEN_08BIT, 0x03}, + {0xd4c6, CRL_REG_LEN_08BIT, 0x20}, + {0xd4c7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4c8, CRL_REG_LEN_08BIT, 0x18}, + {0xd4c9, CRL_REG_LEN_08BIT, 0x60}, + {0xd4ca, CRL_REG_LEN_08BIT, 0x80}, + {0xd4cb, CRL_REG_LEN_08BIT, 0x06}, + {0xd4cc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd4cd, CRL_REG_LEN_08BIT, 0x63}, + {0xd4ce, CRL_REG_LEN_08BIT, 0xc9}, + {0xd4cf, CRL_REG_LEN_08BIT, 0xef}, + {0xd4d0, CRL_REG_LEN_08BIT, 0xd8}, + {0xd4d1, CRL_REG_LEN_08BIT, 0x03}, + {0xd4d2, CRL_REG_LEN_08BIT, 0x20}, + {0xd4d3, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d4, CRL_REG_LEN_08BIT, 0x44}, + {0xd4d5, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d6, CRL_REG_LEN_08BIT, 0x48}, + {0xd4d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d8, CRL_REG_LEN_08BIT, 0x15}, + {0xd4d9, CRL_REG_LEN_08BIT, 0x00}, + {0xd4da, CRL_REG_LEN_08BIT, 0x00}, + {0xd4db, CRL_REG_LEN_08BIT, 0x00}, + {0xd4dc, CRL_REG_LEN_08BIT, 0x18}, + {0xd4dd, CRL_REG_LEN_08BIT, 0x80}, + {0xd4de, CRL_REG_LEN_08BIT, 0x00}, + {0xd4df, CRL_REG_LEN_08BIT, 0x01}, + {0xd4e0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd4e1, CRL_REG_LEN_08BIT, 0x84}, + {0xd4e2, CRL_REG_LEN_08BIT, 0x0a}, + {0xd4e3, CRL_REG_LEN_08BIT, 0x12}, + {0xd4e4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd4e5, CRL_REG_LEN_08BIT, 0x64}, + {0xd4e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd4e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4e8, CRL_REG_LEN_08BIT, 0xbc}, + {0xd4e9, CRL_REG_LEN_08BIT, 0x03}, + {0xd4ea, CRL_REG_LEN_08BIT, 0x00}, + {0xd4eb, CRL_REG_LEN_08BIT, 0x00}, + {0xd4ec, CRL_REG_LEN_08BIT, 0x13}, + {0xd4ed, CRL_REG_LEN_08BIT, 0xff}, + {0xd4ee, CRL_REG_LEN_08BIT, 0xff}, + {0xd4ef, CRL_REG_LEN_08BIT, 0xfe}, + {0xd4f0, CRL_REG_LEN_08BIT, 0x15}, + {0xd4f1, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f2, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f3, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f4, CRL_REG_LEN_08BIT, 0x44}, + {0xd4f5, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f6, CRL_REG_LEN_08BIT, 0x48}, + {0xd4f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f8, CRL_REG_LEN_08BIT, 0x15}, + {0xd4f9, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fa, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fb, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fc, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fd, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd4ff, CRL_REG_LEN_08BIT, 0x00}, + {0xd500, CRL_REG_LEN_08BIT, 0x00}, + {0xd501, CRL_REG_LEN_08BIT, 0x00}, + {0xd502, CRL_REG_LEN_08BIT, 0x00}, + {0xd503, CRL_REG_LEN_08BIT, 0x00}, + {0x6f0e, CRL_REG_LEN_08BIT, 0x33}, + {0x6f0f, CRL_REG_LEN_08BIT, 0x33}, + {0x460e, CRL_REG_LEN_08BIT, 0x08}, + {0x460f, CRL_REG_LEN_08BIT, 0x01}, + {0x4610, CRL_REG_LEN_08BIT, 0x00}, + {0x4611, CRL_REG_LEN_08BIT, 0x01}, + {0x4612, CRL_REG_LEN_08BIT, 0x00}, + {0x4613, CRL_REG_LEN_08BIT, 0x01}, + {0x4605, CRL_REG_LEN_08BIT, 0x08}, + {0x4608, CRL_REG_LEN_08BIT, 0x00}, + {0x4609, CRL_REG_LEN_08BIT, 0x08}, + {0x6804, CRL_REG_LEN_08BIT, 0x00}, + {0x6805, CRL_REG_LEN_08BIT, 0x06}, + {0x6806, CRL_REG_LEN_08BIT, 0x00}, + {0x5120, CRL_REG_LEN_08BIT, 0x00}, + {0x3510, CRL_REG_LEN_08BIT, 0x00}, + {0x3504, CRL_REG_LEN_08BIT, 0x00}, + {0x6800, CRL_REG_LEN_08BIT, 0x00}, + {0x6f0d, CRL_REG_LEN_08BIT, 0x0f}, + {0x5000, CRL_REG_LEN_08BIT, 0xff}, + {0x5001, CRL_REG_LEN_08BIT, 0xbf}, + {0x5002, CRL_REG_LEN_08BIT, 0x7e}, + {0x5003, CRL_REG_LEN_08BIT, 0x0c}, + {0x503d, CRL_REG_LEN_08BIT, 0x00}, + {0xc450, CRL_REG_LEN_08BIT, 0x01}, + {0xc452, CRL_REG_LEN_08BIT, 0x04}, + {0xc453, CRL_REG_LEN_08BIT, 0x00}, + {0xc454, CRL_REG_LEN_08BIT, 0x01}, + {0xc455, CRL_REG_LEN_08BIT, 0x00}, + {0xc456, CRL_REG_LEN_08BIT, 0x00}, + {0xc457, CRL_REG_LEN_08BIT, 0x00}, + {0xc458, CRL_REG_LEN_08BIT, 0x00}, + {0xc459, CRL_REG_LEN_08BIT, 0x00}, + {0xc45b, CRL_REG_LEN_08BIT, 0x00}, + {0xc45c, CRL_REG_LEN_08BIT, 0x00}, + {0xc45d, CRL_REG_LEN_08BIT, 0x00}, + {0xc45e, CRL_REG_LEN_08BIT, 0x02}, + {0xc45f, CRL_REG_LEN_08BIT, 0x01}, + {0xc460, CRL_REG_LEN_08BIT, 0x01}, + {0xc461, CRL_REG_LEN_08BIT, 0x01}, + {0xc462, CRL_REG_LEN_08BIT, 0x01}, + {0xc464, CRL_REG_LEN_08BIT, 0x88}, + {0xc465, CRL_REG_LEN_08BIT, 0x00}, + {0xc466, CRL_REG_LEN_08BIT, 0x8a}, + {0xc467, CRL_REG_LEN_08BIT, 0x00}, + {0xc468, CRL_REG_LEN_08BIT, 0x86}, + {0xc469, CRL_REG_LEN_08BIT, 0x00}, + {0xc46a, CRL_REG_LEN_08BIT, 0x40}, + {0xc46b, CRL_REG_LEN_08BIT, 0x50}, + {0xc46c, CRL_REG_LEN_08BIT, 0x30}, + {0xc46d, CRL_REG_LEN_08BIT, 0x28}, + {0xc46e, CRL_REG_LEN_08BIT, 0x60}, + {0xc46f, CRL_REG_LEN_08BIT, 0x40}, + {0xc47c, CRL_REG_LEN_08BIT, 0x01}, + {0xc47d, CRL_REG_LEN_08BIT, 0x38}, + {0xc47e, CRL_REG_LEN_08BIT, 0x00}, + {0xc47f, CRL_REG_LEN_08BIT, 0x00}, + {0xc480, CRL_REG_LEN_08BIT, 0x00}, + {0xc481, CRL_REG_LEN_08BIT, 0xff}, + {0xc482, CRL_REG_LEN_08BIT, 0x00}, + {0xc483, CRL_REG_LEN_08BIT, 0x40}, + {0xc484, CRL_REG_LEN_08BIT, 0x00}, + {0xc485, CRL_REG_LEN_08BIT, 0x18}, + {0xc486, CRL_REG_LEN_08BIT, 0x00}, + {0xc487, CRL_REG_LEN_08BIT, 0x18}, + {0xc488, CRL_REG_LEN_08BIT, 0x34}, + {0xc489, CRL_REG_LEN_08BIT, 0x00}, + {0xc48a, CRL_REG_LEN_08BIT, 0x34}, + {0xc48b, CRL_REG_LEN_08BIT, 0x00}, + {0xc48c, CRL_REG_LEN_08BIT, 0x00}, + {0xc48d, CRL_REG_LEN_08BIT, 0x04}, + {0xc48e, CRL_REG_LEN_08BIT, 0x00}, + {0xc48f, CRL_REG_LEN_08BIT, 0x04}, + {0xc490, CRL_REG_LEN_08BIT, 0x07}, + {0xc492, CRL_REG_LEN_08BIT, 0x20}, + {0xc493, CRL_REG_LEN_08BIT, 0x08}, + {0xc498, CRL_REG_LEN_08BIT, 0x02}, + {0xc499, CRL_REG_LEN_08BIT, 0x00}, + {0xc49a, CRL_REG_LEN_08BIT, 0x02}, + {0xc49b, CRL_REG_LEN_08BIT, 0x00}, + {0xc49c, CRL_REG_LEN_08BIT, 0x02}, + {0xc49d, CRL_REG_LEN_08BIT, 0x00}, + {0xc49e, CRL_REG_LEN_08BIT, 0x02}, + {0xc49f, CRL_REG_LEN_08BIT, 0x60}, + {0xc4a0, CRL_REG_LEN_08BIT, 0x03}, + {0xc4a1, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a2, CRL_REG_LEN_08BIT, 0x04}, + {0xc4a3, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a4, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a5, CRL_REG_LEN_08BIT, 0x10}, + {0xc4a6, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a7, CRL_REG_LEN_08BIT, 0x40}, + {0xc4a8, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a9, CRL_REG_LEN_08BIT, 0x80}, + {0xc4aa, CRL_REG_LEN_08BIT, 0x0d}, + {0xc4ab, CRL_REG_LEN_08BIT, 0x00}, + {0xc4ac, CRL_REG_LEN_08BIT, 0x03}, + {0xc4ad, CRL_REG_LEN_08BIT, 0xf0}, + {0xc4b4, CRL_REG_LEN_08BIT, 0x01}, + {0xc4b5, CRL_REG_LEN_08BIT, 0x01}, + {0xc4b6, CRL_REG_LEN_08BIT, 0x00}, + {0xc4b7, CRL_REG_LEN_08BIT, 0x01}, + {0xc4b8, CRL_REG_LEN_08BIT, 0x00}, + {0xc4b9, CRL_REG_LEN_08BIT, 0x01}, + {0xc4ba, CRL_REG_LEN_08BIT, 0x01}, + {0xc4bb, CRL_REG_LEN_08BIT, 0x00}, + {0xc4bc, CRL_REG_LEN_08BIT, 0x01}, + {0xc4bd, CRL_REG_LEN_08BIT, 0x60}, + {0xc4be, CRL_REG_LEN_08BIT, 0x02}, + {0xc4bf, CRL_REG_LEN_08BIT, 0x33}, + {0xc4c8, CRL_REG_LEN_08BIT, 0x03}, + {0xc4c9, CRL_REG_LEN_08BIT, 0xd0}, + {0xc4ca, CRL_REG_LEN_08BIT, 0x0e}, + {0xc4cb, CRL_REG_LEN_08BIT, 0x00}, + {0xc4cc, CRL_REG_LEN_08BIT, 0x10}, + {0xc4cd, CRL_REG_LEN_08BIT, 0x18}, + {0xc4ce, CRL_REG_LEN_08BIT, 0x10}, + {0xc4cf, CRL_REG_LEN_08BIT, 0x18}, + {0xc4d0, CRL_REG_LEN_08BIT, 0x04}, + {0xc4d1, CRL_REG_LEN_08BIT, 0x80}, + {0xc4e0, CRL_REG_LEN_08BIT, 0x04}, + {0xc4e1, CRL_REG_LEN_08BIT, 0x02}, + {0xc4e2, CRL_REG_LEN_08BIT, 0x01}, + {0xc4e4, CRL_REG_LEN_08BIT, 0x10}, + {0xc4e5, CRL_REG_LEN_08BIT, 0x20}, + {0xc4e6, CRL_REG_LEN_08BIT, 0x30}, + {0xc4e7, CRL_REG_LEN_08BIT, 0x40}, + {0xc4e8, CRL_REG_LEN_08BIT, 0x50}, + {0xc4e9, CRL_REG_LEN_08BIT, 0x60}, + {0xc4ea, CRL_REG_LEN_08BIT, 0x70}, + {0xc4eb, CRL_REG_LEN_08BIT, 0x80}, + {0xc4ec, CRL_REG_LEN_08BIT, 0x90}, + {0xc4ed, CRL_REG_LEN_08BIT, 0xa0}, + {0xc4ee, CRL_REG_LEN_08BIT, 0xb0}, + {0xc4ef, CRL_REG_LEN_08BIT, 0xc0}, + {0xc4f0, CRL_REG_LEN_08BIT, 0xd0}, + {0xc4f1, CRL_REG_LEN_08BIT, 0xe0}, + {0xc4f2, CRL_REG_LEN_08BIT, 0xf0}, + {0xc4f3, CRL_REG_LEN_08BIT, 0x80}, + {0xc4f4, CRL_REG_LEN_08BIT, 0x00}, + {0xc4f5, CRL_REG_LEN_08BIT, 0x20}, + {0xc4f6, CRL_REG_LEN_08BIT, 0x02}, + {0xc4f7, CRL_REG_LEN_08BIT, 0x00}, + {0xc4f8, CRL_REG_LEN_08BIT, 0x04}, + {0xc4f9, CRL_REG_LEN_08BIT, 0x0b}, + {0xc4fa, CRL_REG_LEN_08BIT, 0x00}, + {0xc4fb, CRL_REG_LEN_08BIT, 0x00}, + {0xc4fc, CRL_REG_LEN_08BIT, 0x01}, + {0xc4fd, CRL_REG_LEN_08BIT, 0x00}, + {0xc4fe, CRL_REG_LEN_08BIT, 0x04}, + {0xc4ff, CRL_REG_LEN_08BIT, 0x02}, + {0xc500, CRL_REG_LEN_08BIT, 0x48}, + {0xc501, CRL_REG_LEN_08BIT, 0x74}, + {0xc502, CRL_REG_LEN_08BIT, 0x58}, + {0xc503, CRL_REG_LEN_08BIT, 0x80}, + {0xc504, CRL_REG_LEN_08BIT, 0x05}, + {0xc505, CRL_REG_LEN_08BIT, 0x80}, + {0xc506, CRL_REG_LEN_08BIT, 0x03}, + {0xc507, CRL_REG_LEN_08BIT, 0x80}, + {0xc508, CRL_REG_LEN_08BIT, 0x01}, + {0xc509, CRL_REG_LEN_08BIT, 0xc0}, + {0xc50a, CRL_REG_LEN_08BIT, 0x01}, + {0xc50b, CRL_REG_LEN_08BIT, 0xa0}, + {0xc50c, CRL_REG_LEN_08BIT, 0x01}, + {0xc50d, CRL_REG_LEN_08BIT, 0x2c}, + {0xc50e, CRL_REG_LEN_08BIT, 0x01}, + {0xc50f, CRL_REG_LEN_08BIT, 0x0a}, + {0xc510, CRL_REG_LEN_08BIT, 0x00}, + {0xc511, CRL_REG_LEN_08BIT, 0x01}, + {0xc512, CRL_REG_LEN_08BIT, 0x01}, + {0xc513, CRL_REG_LEN_08BIT, 0x80}, + {0xc514, CRL_REG_LEN_08BIT, 0x04}, + {0xc515, CRL_REG_LEN_08BIT, 0x00}, + {0xc518, CRL_REG_LEN_08BIT, 0x03}, + {0xc519, CRL_REG_LEN_08BIT, 0x48}, + {0xc51a, CRL_REG_LEN_08BIT, 0x07}, + {0xc51b, CRL_REG_LEN_08BIT, 0x70}, + {0xc2e0, CRL_REG_LEN_08BIT, 0x00}, + {0xc2e1, CRL_REG_LEN_08BIT, 0x51}, + {0xc2e2, CRL_REG_LEN_08BIT, 0x00}, + {0xc2e3, CRL_REG_LEN_08BIT, 0xd6}, + {0xc2e4, CRL_REG_LEN_08BIT, 0x01}, + {0xc2e5, CRL_REG_LEN_08BIT, 0x5e}, + {0xc2e9, CRL_REG_LEN_08BIT, 0x01}, + {0xc2ea, CRL_REG_LEN_08BIT, 0x7a}, + {0xc2eb, CRL_REG_LEN_08BIT, 0x90}, + {0xc2ed, CRL_REG_LEN_08BIT, 0x00}, + {0xc2ee, CRL_REG_LEN_08BIT, 0x7a}, + {0xc2ef, CRL_REG_LEN_08BIT, 0x64}, + {0xc308, CRL_REG_LEN_08BIT, 0x00}, + {0xc309, CRL_REG_LEN_08BIT, 0x00}, + {0xc30a, CRL_REG_LEN_08BIT, 0x00}, + {0xc30c, CRL_REG_LEN_08BIT, 0x00}, + {0xc30d, CRL_REG_LEN_08BIT, 0x01}, + {0xc30e, CRL_REG_LEN_08BIT, 0x00}, + {0xc30f, CRL_REG_LEN_08BIT, 0x00}, + {0xc310, CRL_REG_LEN_08BIT, 0x01}, + {0xc311, CRL_REG_LEN_08BIT, 0x60}, + {0xc312, CRL_REG_LEN_08BIT, 0xff}, + {0xc313, CRL_REG_LEN_08BIT, 0x08}, + {0xc314, CRL_REG_LEN_08BIT, 0x01}, + {0xc315, CRL_REG_LEN_08BIT, 0x7f}, + {0xc316, CRL_REG_LEN_08BIT, 0xff}, + {0xc317, CRL_REG_LEN_08BIT, 0x0b}, + {0xc318, CRL_REG_LEN_08BIT, 0x00}, + {0xc319, CRL_REG_LEN_08BIT, 0x0c}, + {0xc31a, CRL_REG_LEN_08BIT, 0x00}, + {0xc31b, CRL_REG_LEN_08BIT, 0xe0}, + {0xc31c, CRL_REG_LEN_08BIT, 0x00}, + {0xc31d, CRL_REG_LEN_08BIT, 0x14}, + {0xc31e, CRL_REG_LEN_08BIT, 0x00}, + {0xc31f, CRL_REG_LEN_08BIT, 0xc5}, + {0xc320, CRL_REG_LEN_08BIT, 0xff}, + {0xc321, CRL_REG_LEN_08BIT, 0x4b}, + {0xc322, CRL_REG_LEN_08BIT, 0xff}, + {0xc323, CRL_REG_LEN_08BIT, 0xf0}, + {0xc324, CRL_REG_LEN_08BIT, 0xff}, + {0xc325, CRL_REG_LEN_08BIT, 0xe8}, + {0xc326, CRL_REG_LEN_08BIT, 0x00}, + {0xc327, CRL_REG_LEN_08BIT, 0x46}, + {0xc328, CRL_REG_LEN_08BIT, 0xff}, + {0xc329, CRL_REG_LEN_08BIT, 0xd2}, + {0xc32a, CRL_REG_LEN_08BIT, 0xff}, + {0xc32b, CRL_REG_LEN_08BIT, 0xe4}, + {0xc32c, CRL_REG_LEN_08BIT, 0xff}, + {0xc32d, CRL_REG_LEN_08BIT, 0xbb}, + {0xc32e, CRL_REG_LEN_08BIT, 0x00}, + {0xc32f, CRL_REG_LEN_08BIT, 0x61}, + {0xc330, CRL_REG_LEN_08BIT, 0xff}, + {0xc331, CRL_REG_LEN_08BIT, 0xf9}, + {0xc332, CRL_REG_LEN_08BIT, 0x00}, + {0xc333, CRL_REG_LEN_08BIT, 0xd9}, + {0xc334, CRL_REG_LEN_08BIT, 0x00}, + {0xc335, CRL_REG_LEN_08BIT, 0x2e}, + {0xc336, CRL_REG_LEN_08BIT, 0x00}, + {0xc337, CRL_REG_LEN_08BIT, 0xb1}, + {0xc338, CRL_REG_LEN_08BIT, 0xff}, + {0xc339, CRL_REG_LEN_08BIT, 0x64}, + {0xc33a, CRL_REG_LEN_08BIT, 0xff}, + {0xc33b, CRL_REG_LEN_08BIT, 0xeb}, + {0xc33c, CRL_REG_LEN_08BIT, 0xff}, + {0xc33d, CRL_REG_LEN_08BIT, 0xe8}, + {0xc33e, CRL_REG_LEN_08BIT, 0x00}, + {0xc33f, CRL_REG_LEN_08BIT, 0x48}, + {0xc340, CRL_REG_LEN_08BIT, 0xff}, + {0xc341, CRL_REG_LEN_08BIT, 0xd0}, + {0xc342, CRL_REG_LEN_08BIT, 0xff}, + {0xc343, CRL_REG_LEN_08BIT, 0xed}, + {0xc344, CRL_REG_LEN_08BIT, 0xff}, + {0xc345, CRL_REG_LEN_08BIT, 0xad}, + {0xc346, CRL_REG_LEN_08BIT, 0x00}, + {0xc347, CRL_REG_LEN_08BIT, 0x66}, + {0xc348, CRL_REG_LEN_08BIT, 0x01}, + {0xc349, CRL_REG_LEN_08BIT, 0x00}, + {0x6700, CRL_REG_LEN_08BIT, 0x04}, + {0x6701, CRL_REG_LEN_08BIT, 0x7b}, + {0x6702, CRL_REG_LEN_08BIT, 0xfd}, + {0x6703, CRL_REG_LEN_08BIT, 0xf9}, + {0x6704, CRL_REG_LEN_08BIT, 0x3d}, + {0x6705, CRL_REG_LEN_08BIT, 0x71}, + {0x6706, CRL_REG_LEN_08BIT, 0x78}, + {0x6708, CRL_REG_LEN_08BIT, 0x05}, + {0x6f06, CRL_REG_LEN_08BIT, 0x6f}, + {0x6f07, CRL_REG_LEN_08BIT, 0x00}, + {0x6f0a, CRL_REG_LEN_08BIT, 0x6f}, + {0x6f0b, CRL_REG_LEN_08BIT, 0x00}, + {0x6f00, CRL_REG_LEN_08BIT, 0x03}, + {0xc34c, CRL_REG_LEN_08BIT, 0x01}, + {0xc34d, CRL_REG_LEN_08BIT, 0x00}, + {0xc34e, CRL_REG_LEN_08BIT, 0x46}, + {0xc34f, CRL_REG_LEN_08BIT, 0x55}, + {0xc350, CRL_REG_LEN_08BIT, 0x00}, + {0xc351, CRL_REG_LEN_08BIT, 0x40}, + {0xc352, CRL_REG_LEN_08BIT, 0x00}, + {0xc353, CRL_REG_LEN_08BIT, 0xff}, + {0xc354, CRL_REG_LEN_08BIT, 0x04}, + {0xc355, CRL_REG_LEN_08BIT, 0x08}, + {0xc356, CRL_REG_LEN_08BIT, 0x01}, + {0xc357, CRL_REG_LEN_08BIT, 0xef}, + {0xc358, CRL_REG_LEN_08BIT, 0x30}, + {0xc359, CRL_REG_LEN_08BIT, 0x01}, + {0xc35a, CRL_REG_LEN_08BIT, 0x64}, + {0xc35b, CRL_REG_LEN_08BIT, 0x46}, + {0xc35c, CRL_REG_LEN_08BIT, 0x00}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x302e, CRL_REG_LEN_08BIT, 0x00}, + {0x301b, CRL_REG_LEN_08BIT, 0xf0}, + {0x301c, CRL_REG_LEN_08BIT, 0xf0}, + {0x301a, CRL_REG_LEN_08BIT, 0xf0}, + {0xceb0, CRL_REG_LEN_08BIT, 0x00}, + {0xceb1, CRL_REG_LEN_08BIT, 0x00}, + {0xceb2, CRL_REG_LEN_08BIT, 0x00}, + {0xceb3, CRL_REG_LEN_08BIT, 0x00}, + {0xceb4, CRL_REG_LEN_08BIT, 0x00}, + {0xceb5, CRL_REG_LEN_08BIT, 0x00}, + {0xceb6, CRL_REG_LEN_08BIT, 0x00}, + {0xceb7, CRL_REG_LEN_08BIT, 0x00}, + {0xc4bc, CRL_REG_LEN_08BIT, 0x01}, + {0xc4bd, CRL_REG_LEN_08BIT, 0x60}, +}; + +static struct crl_register_write_rep ov10635_1280_720_YUV_HDR_BT656[] = { + {0x0103, CRL_REG_LEN_08BIT, 0x01}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x301b, CRL_REG_LEN_08BIT, 0xff}, + {0x301c, CRL_REG_LEN_08BIT, 0xff}, + {0x301a, CRL_REG_LEN_08BIT, 0xff}, + {0x3011, CRL_REG_LEN_08BIT, 0x42}, + {0x6900, CRL_REG_LEN_08BIT, 0x0c}, + {0x6901, CRL_REG_LEN_08BIT, 0x11}, + {0x3503, CRL_REG_LEN_08BIT, 0x10}, + {0x3025, CRL_REG_LEN_08BIT, 0x03}, + {0x3003, CRL_REG_LEN_08BIT, 0x14}, + {0x3004, CRL_REG_LEN_08BIT, 0x11}, + {0x3005, CRL_REG_LEN_08BIT, 0x20}, + {0x3006, CRL_REG_LEN_08BIT, 0x91}, + {0x3600, CRL_REG_LEN_08BIT, 0x74}, + {0x3601, CRL_REG_LEN_08BIT, 0x2b}, + {0x3612, CRL_REG_LEN_08BIT, 0x00}, + {0x3611, CRL_REG_LEN_08BIT, 0x67}, + {0x3633, CRL_REG_LEN_08BIT, 0xca}, + {0x3602, CRL_REG_LEN_08BIT, 0x2f}, + {0x3603, CRL_REG_LEN_08BIT, 0x00}, + {0x3630, CRL_REG_LEN_08BIT, 0x28}, + {0x3631, CRL_REG_LEN_08BIT, 0x16}, + {0x3714, CRL_REG_LEN_08BIT, 0x10}, + {0x371d, CRL_REG_LEN_08BIT, 0x01}, + {0x4300, CRL_REG_LEN_08BIT, 0x3a}, + {0x3007, CRL_REG_LEN_08BIT, 0x01}, + {0x3024, CRL_REG_LEN_08BIT, 0x01}, + {0x3020, CRL_REG_LEN_08BIT, 0x0b}, + {0x3702, CRL_REG_LEN_08BIT, 0x1a}, + {0x3703, CRL_REG_LEN_08BIT, 0x40}, + {0x3704, CRL_REG_LEN_08BIT, 0x2a}, + {0x3709, CRL_REG_LEN_08BIT, 0xa8}, + {0x3709, CRL_REG_LEN_08BIT, 0xa8}, + {0x370c, CRL_REG_LEN_08BIT, 0xc7}, + {0x370d, CRL_REG_LEN_08BIT, 0x80}, + {0x3712, CRL_REG_LEN_08BIT, 0x00}, + {0x3713, CRL_REG_LEN_08BIT, 0x20}, + {0x3715, CRL_REG_LEN_08BIT, 0x04}, + {0x381d, CRL_REG_LEN_08BIT, 0x40}, + {0x381c, CRL_REG_LEN_08BIT, 0x00}, + {0x3822, CRL_REG_LEN_08BIT, 0x50}, + {0x3824, CRL_REG_LEN_08BIT, 0x50}, + {0x3815, CRL_REG_LEN_08BIT, 0x8c}, + {0x3804, CRL_REG_LEN_08BIT, 0x05}, + {0x3805, CRL_REG_LEN_08BIT, 0x1f}, + {0x3800, CRL_REG_LEN_08BIT, 0x00}, + {0x3801, CRL_REG_LEN_08BIT, 0x00}, + {0x3806, CRL_REG_LEN_08BIT, 0x02}, + {0x3807, CRL_REG_LEN_08BIT, 0xfd}, + {0x3802, CRL_REG_LEN_08BIT, 0x00}, + {0x3803, CRL_REG_LEN_08BIT, 0x2c}, + {0x3808, CRL_REG_LEN_08BIT, 0x05}, + {0x3809, CRL_REG_LEN_08BIT, 0x00}, + {0x380a, CRL_REG_LEN_08BIT, 0x02}, + {0x380b, CRL_REG_LEN_08BIT, 0xd0}, + {0x380c, CRL_REG_LEN_08BIT, 0x06}, + {0x380d, CRL_REG_LEN_08BIT, 0xf6}, + {0x6e42, CRL_REG_LEN_08BIT, 0x02}, + {0x6e43, CRL_REG_LEN_08BIT, 0xec}, + {0x380e, CRL_REG_LEN_08BIT, 0x02}, + {0x380f, CRL_REG_LEN_08BIT, 0xec}, + {0x3813, CRL_REG_LEN_08BIT, 0x02}, + {0x3811, CRL_REG_LEN_08BIT, 0x10}, + {0x381f, CRL_REG_LEN_08BIT, 0x0c}, + {0x3828, CRL_REG_LEN_08BIT, 0x03}, + {0x3829, CRL_REG_LEN_08BIT, 0x10}, + {0x382a, CRL_REG_LEN_08BIT, 0x10}, + {0x382b, CRL_REG_LEN_08BIT, 0x10}, + {0x3621, CRL_REG_LEN_08BIT, 0x64}, + {0x5005, CRL_REG_LEN_08BIT, 0x08}, + {0x56d5, CRL_REG_LEN_08BIT, 0x00}, + {0x56d6, CRL_REG_LEN_08BIT, 0x80}, + {0x56d7, CRL_REG_LEN_08BIT, 0x00}, + {0x56d8, CRL_REG_LEN_08BIT, 0x00}, + {0x56d9, CRL_REG_LEN_08BIT, 0x00}, + {0x56da, CRL_REG_LEN_08BIT, 0x80}, + {0x56db, CRL_REG_LEN_08BIT, 0x00}, + {0x56dc, CRL_REG_LEN_08BIT, 0x00}, + {0x56e8, CRL_REG_LEN_08BIT, 0x00}, + {0x56e9, CRL_REG_LEN_08BIT, 0x7f}, + {0x56ea, CRL_REG_LEN_08BIT, 0x00}, + {0x56eb, CRL_REG_LEN_08BIT, 0x7f}, + {0x5100, CRL_REG_LEN_08BIT, 0x00}, + {0x5101, CRL_REG_LEN_08BIT, 0x80}, + {0x5102, CRL_REG_LEN_08BIT, 0x00}, + {0x5103, CRL_REG_LEN_08BIT, 0x80}, + {0x5104, CRL_REG_LEN_08BIT, 0x00}, + {0x5105, CRL_REG_LEN_08BIT, 0x80}, + {0x5106, CRL_REG_LEN_08BIT, 0x00}, + {0x5107, CRL_REG_LEN_08BIT, 0x80}, + {0x5108, CRL_REG_LEN_08BIT, 0x00}, + {0x5109, CRL_REG_LEN_08BIT, 0x00}, + {0x510a, CRL_REG_LEN_08BIT, 0x00}, + {0x510b, CRL_REG_LEN_08BIT, 0x00}, + {0x510c, CRL_REG_LEN_08BIT, 0x00}, + {0x510d, CRL_REG_LEN_08BIT, 0x00}, + {0x510e, CRL_REG_LEN_08BIT, 0x00}, + {0x510f, CRL_REG_LEN_08BIT, 0x00}, + {0x5110, CRL_REG_LEN_08BIT, 0x00}, + {0x5111, CRL_REG_LEN_08BIT, 0x80}, + {0x5112, CRL_REG_LEN_08BIT, 0x00}, + {0x5113, CRL_REG_LEN_08BIT, 0x80}, + {0x5114, CRL_REG_LEN_08BIT, 0x00}, + {0x5115, CRL_REG_LEN_08BIT, 0x80}, + {0x5116, CRL_REG_LEN_08BIT, 0x00}, + {0x5117, CRL_REG_LEN_08BIT, 0x80}, + {0x5118, CRL_REG_LEN_08BIT, 0x00}, + {0x5119, CRL_REG_LEN_08BIT, 0x00}, + {0x511a, CRL_REG_LEN_08BIT, 0x00}, + {0x511b, CRL_REG_LEN_08BIT, 0x00}, + {0x511c, CRL_REG_LEN_08BIT, 0x00}, + {0x511d, CRL_REG_LEN_08BIT, 0x00}, + {0x511e, CRL_REG_LEN_08BIT, 0x00}, + {0x511f, CRL_REG_LEN_08BIT, 0x00}, + {0x56d0, CRL_REG_LEN_08BIT, 0x00}, + {0x5006, CRL_REG_LEN_08BIT, 0x24}, + {0x5608, CRL_REG_LEN_08BIT, 0x0e}, + {0x52d7, CRL_REG_LEN_08BIT, 0x06}, + {0x528d, CRL_REG_LEN_08BIT, 0x08}, + {0x5293, CRL_REG_LEN_08BIT, 0x12}, + {0x52d3, CRL_REG_LEN_08BIT, 0x12}, + {0x5288, CRL_REG_LEN_08BIT, 0x06}, + {0x5289, CRL_REG_LEN_08BIT, 0x20}, + {0x52c8, CRL_REG_LEN_08BIT, 0x06}, + {0x52c9, CRL_REG_LEN_08BIT, 0x20}, + {0x52cd, CRL_REG_LEN_08BIT, 0x04}, + {0x5381, CRL_REG_LEN_08BIT, 0x00}, + {0x5382, CRL_REG_LEN_08BIT, 0xff}, + {0x5589, CRL_REG_LEN_08BIT, 0x76}, + {0x558a, CRL_REG_LEN_08BIT, 0x47}, + {0x558b, CRL_REG_LEN_08BIT, 0xef}, + {0x558c, CRL_REG_LEN_08BIT, 0xc9}, + {0x558d, CRL_REG_LEN_08BIT, 0x49}, + {0x558e, CRL_REG_LEN_08BIT, 0x30}, + {0x558f, CRL_REG_LEN_08BIT, 0x67}, + {0x5590, CRL_REG_LEN_08BIT, 0x3f}, + {0x5591, CRL_REG_LEN_08BIT, 0xf0}, + {0x5592, CRL_REG_LEN_08BIT, 0x10}, + {0x55a2, CRL_REG_LEN_08BIT, 0x6d}, + {0x55a3, CRL_REG_LEN_08BIT, 0x55}, + {0x55a4, CRL_REG_LEN_08BIT, 0xc3}, + {0x55a5, CRL_REG_LEN_08BIT, 0xb5}, + {0x55a6, CRL_REG_LEN_08BIT, 0x43}, + {0x55a7, CRL_REG_LEN_08BIT, 0x38}, + {0x55a8, CRL_REG_LEN_08BIT, 0x5f}, + {0x55a9, CRL_REG_LEN_08BIT, 0x4b}, + {0x55aa, CRL_REG_LEN_08BIT, 0xf0}, + {0x55ab, CRL_REG_LEN_08BIT, 0x10}, + {0x5581, CRL_REG_LEN_08BIT, 0x52}, + {0x5300, CRL_REG_LEN_08BIT, 0x01}, + {0x5301, CRL_REG_LEN_08BIT, 0x00}, + {0x5302, CRL_REG_LEN_08BIT, 0x00}, + {0x5303, CRL_REG_LEN_08BIT, 0x0e}, + {0x5304, CRL_REG_LEN_08BIT, 0x00}, + {0x5305, CRL_REG_LEN_08BIT, 0x0e}, + {0x5306, CRL_REG_LEN_08BIT, 0x00}, + {0x5307, CRL_REG_LEN_08BIT, 0x36}, + {0x5308, CRL_REG_LEN_08BIT, 0x00}, + {0x5309, CRL_REG_LEN_08BIT, 0xd9}, + {0x530a, CRL_REG_LEN_08BIT, 0x00}, + {0x530b, CRL_REG_LEN_08BIT, 0x0f}, + {0x530c, CRL_REG_LEN_08BIT, 0x00}, + {0x530d, CRL_REG_LEN_08BIT, 0x2c}, + {0x530e, CRL_REG_LEN_08BIT, 0x00}, + {0x530f, CRL_REG_LEN_08BIT, 0x59}, + {0x5310, CRL_REG_LEN_08BIT, 0x00}, + {0x5311, CRL_REG_LEN_08BIT, 0x7b}, + {0x5312, CRL_REG_LEN_08BIT, 0x00}, + {0x5313, CRL_REG_LEN_08BIT, 0x22}, + {0x5314, CRL_REG_LEN_08BIT, 0x00}, + {0x5315, CRL_REG_LEN_08BIT, 0xd5}, + {0x5316, CRL_REG_LEN_08BIT, 0x00}, + {0x5317, CRL_REG_LEN_08BIT, 0x13}, + {0x5318, CRL_REG_LEN_08BIT, 0x00}, + {0x5319, CRL_REG_LEN_08BIT, 0x18}, + {0x531a, CRL_REG_LEN_08BIT, 0x00}, + {0x531b, CRL_REG_LEN_08BIT, 0x26}, + {0x531c, CRL_REG_LEN_08BIT, 0x00}, + {0x531d, CRL_REG_LEN_08BIT, 0xdc}, + {0x531e, CRL_REG_LEN_08BIT, 0x00}, + {0x531f, CRL_REG_LEN_08BIT, 0x02}, + {0x5320, CRL_REG_LEN_08BIT, 0x00}, + {0x5321, CRL_REG_LEN_08BIT, 0x24}, + {0x5322, CRL_REG_LEN_08BIT, 0x00}, + {0x5323, CRL_REG_LEN_08BIT, 0x56}, + {0x5324, CRL_REG_LEN_08BIT, 0x00}, + {0x5325, CRL_REG_LEN_08BIT, 0x85}, + {0x5326, CRL_REG_LEN_08BIT, 0x00}, + {0x5327, CRL_REG_LEN_08BIT, 0x20}, + {0x5609, CRL_REG_LEN_08BIT, 0x01}, + {0x560a, CRL_REG_LEN_08BIT, 0x40}, + {0x560b, CRL_REG_LEN_08BIT, 0x01}, + {0x560c, CRL_REG_LEN_08BIT, 0x40}, + {0x560d, CRL_REG_LEN_08BIT, 0x00}, + {0x560e, CRL_REG_LEN_08BIT, 0xfa}, + {0x560f, CRL_REG_LEN_08BIT, 0x00}, + {0x5610, CRL_REG_LEN_08BIT, 0xfa}, + {0x5611, CRL_REG_LEN_08BIT, 0x02}, + {0x5612, CRL_REG_LEN_08BIT, 0x80}, + {0x5613, CRL_REG_LEN_08BIT, 0x02}, + {0x5614, CRL_REG_LEN_08BIT, 0x80}, + {0x5615, CRL_REG_LEN_08BIT, 0x01}, + {0x5616, CRL_REG_LEN_08BIT, 0x2c}, + {0x5617, CRL_REG_LEN_08BIT, 0x01}, + {0x5618, CRL_REG_LEN_08BIT, 0x2c}, + {0x563b, CRL_REG_LEN_08BIT, 0x01}, + {0x563c, CRL_REG_LEN_08BIT, 0x01}, + {0x563d, CRL_REG_LEN_08BIT, 0x01}, + {0x563e, CRL_REG_LEN_08BIT, 0x01}, + {0x563f, CRL_REG_LEN_08BIT, 0x03}, + {0x5640, CRL_REG_LEN_08BIT, 0x03}, + {0x5641, CRL_REG_LEN_08BIT, 0x03}, + {0x5642, CRL_REG_LEN_08BIT, 0x05}, + {0x5643, CRL_REG_LEN_08BIT, 0x09}, + {0x5644, CRL_REG_LEN_08BIT, 0x05}, + {0x5645, CRL_REG_LEN_08BIT, 0x05}, + {0x5646, CRL_REG_LEN_08BIT, 0x05}, + {0x5647, CRL_REG_LEN_08BIT, 0x05}, + {0x5651, CRL_REG_LEN_08BIT, 0x00}, + {0x5652, CRL_REG_LEN_08BIT, 0x80}, + {0x521a, CRL_REG_LEN_08BIT, 0x01}, + {0x521b, CRL_REG_LEN_08BIT, 0x03}, + {0x521c, CRL_REG_LEN_08BIT, 0x06}, + {0x521d, CRL_REG_LEN_08BIT, 0x0a}, + {0x521e, CRL_REG_LEN_08BIT, 0x0e}, + {0x521f, CRL_REG_LEN_08BIT, 0x12}, + {0x5220, CRL_REG_LEN_08BIT, 0x16}, + {0x5223, CRL_REG_LEN_08BIT, 0x02}, + {0x5225, CRL_REG_LEN_08BIT, 0x04}, + {0x5227, CRL_REG_LEN_08BIT, 0x08}, + {0x5229, CRL_REG_LEN_08BIT, 0x0c}, + {0x522b, CRL_REG_LEN_08BIT, 0x12}, + {0x522d, CRL_REG_LEN_08BIT, 0x18}, + {0x522f, CRL_REG_LEN_08BIT, 0x1e}, + {0x5241, CRL_REG_LEN_08BIT, 0x04}, + {0x5242, CRL_REG_LEN_08BIT, 0x01}, + {0x5243, CRL_REG_LEN_08BIT, 0x03}, + {0x5244, CRL_REG_LEN_08BIT, 0x06}, + {0x5245, CRL_REG_LEN_08BIT, 0x0a}, + {0x5246, CRL_REG_LEN_08BIT, 0x0e}, + {0x5247, CRL_REG_LEN_08BIT, 0x12}, + {0x5248, CRL_REG_LEN_08BIT, 0x16}, + {0x524a, CRL_REG_LEN_08BIT, 0x03}, + {0x524c, CRL_REG_LEN_08BIT, 0x04}, + {0x524e, CRL_REG_LEN_08BIT, 0x08}, + {0x5250, CRL_REG_LEN_08BIT, 0x0c}, + {0x5252, CRL_REG_LEN_08BIT, 0x12}, + {0x5254, CRL_REG_LEN_08BIT, 0x18}, + {0x5256, CRL_REG_LEN_08BIT, 0x1e}, + {0x4606, CRL_REG_LEN_08BIT, 0x07}, + {0x4607, CRL_REG_LEN_08BIT, 0x71}, + {0x460a, CRL_REG_LEN_08BIT, 0x03}, + {0x460b, CRL_REG_LEN_08BIT, 0xe7}, + {0x460c, CRL_REG_LEN_08BIT, 0x40}, + {0x4620, CRL_REG_LEN_08BIT, 0x0e}, + {0x4700, CRL_REG_LEN_08BIT, 0x06}, + {0x4701, CRL_REG_LEN_08BIT, 0x00}, + {0x4702, CRL_REG_LEN_08BIT, 0x01}, + {0x4004, CRL_REG_LEN_08BIT, 0x04}, + {0x4005, CRL_REG_LEN_08BIT, 0x18}, + {0x4001, CRL_REG_LEN_08BIT, 0x06}, + {0x4050, CRL_REG_LEN_08BIT, 0x22}, + {0x4051, CRL_REG_LEN_08BIT, 0x24}, + {0x4052, CRL_REG_LEN_08BIT, 0x02}, + {0x4057, CRL_REG_LEN_08BIT, 0x9c}, + {0x405a, CRL_REG_LEN_08BIT, 0x00}, + {0x4302, CRL_REG_LEN_08BIT, 0x03}, + {0x4303, CRL_REG_LEN_08BIT, 0xff}, + {0x4304, CRL_REG_LEN_08BIT, 0x00}, + {0x4305, CRL_REG_LEN_08BIT, 0x10}, + {0x4306, CRL_REG_LEN_08BIT, 0x03}, + {0x4307, CRL_REG_LEN_08BIT, 0xff}, + {0x4308, CRL_REG_LEN_08BIT, 0x00}, + {0x4309, CRL_REG_LEN_08BIT, 0x10}, + {0x4202, CRL_REG_LEN_08BIT, 0x02}, + {0x3023, CRL_REG_LEN_08BIT, 0x10}, + {0x0100, CRL_REG_LEN_08BIT, 0x01}, + {0x0100, CRL_REG_LEN_08BIT, 0x01}, + {0x6f10, CRL_REG_LEN_08BIT, 0x07}, + {0x6f11, CRL_REG_LEN_08BIT, 0x82}, + {0x6f12, CRL_REG_LEN_08BIT, 0x04}, + {0x6f13, CRL_REG_LEN_08BIT, 0x00}, + {0x6f14, CRL_REG_LEN_08BIT, 0x1f}, + {0x6f15, CRL_REG_LEN_08BIT, 0xdd}, + {0x6f16, CRL_REG_LEN_08BIT, 0x04}, + {0x6f17, CRL_REG_LEN_08BIT, 0x04}, + {0x6f18, CRL_REG_LEN_08BIT, 0x36}, + {0x6f19, CRL_REG_LEN_08BIT, 0x66}, + {0x6f1a, CRL_REG_LEN_08BIT, 0x04}, + {0x6f1b, CRL_REG_LEN_08BIT, 0x08}, + {0x6f1c, CRL_REG_LEN_08BIT, 0x0c}, + {0x6f1d, CRL_REG_LEN_08BIT, 0xe7}, + {0x6f1e, CRL_REG_LEN_08BIT, 0x04}, + {0x6f1f, CRL_REG_LEN_08BIT, 0x0c}, + {0xd000, CRL_REG_LEN_08BIT, 0x19}, + {0xd001, CRL_REG_LEN_08BIT, 0xa0}, + {0xd002, CRL_REG_LEN_08BIT, 0x00}, + {0xd003, CRL_REG_LEN_08BIT, 0x01}, + {0xd004, CRL_REG_LEN_08BIT, 0xa9}, + {0xd005, CRL_REG_LEN_08BIT, 0xad}, + {0xd006, CRL_REG_LEN_08BIT, 0x10}, + {0xd007, CRL_REG_LEN_08BIT, 0x40}, + {0xd008, CRL_REG_LEN_08BIT, 0x44}, + {0xd009, CRL_REG_LEN_08BIT, 0x00}, + {0xd00a, CRL_REG_LEN_08BIT, 0x68}, + {0xd00b, CRL_REG_LEN_08BIT, 0x00}, + {0xd00c, CRL_REG_LEN_08BIT, 0x15}, + {0xd00d, CRL_REG_LEN_08BIT, 0x00}, + {0xd00e, CRL_REG_LEN_08BIT, 0x00}, + {0xd00f, CRL_REG_LEN_08BIT, 0x00}, + {0xd010, CRL_REG_LEN_08BIT, 0x19}, + {0xd011, CRL_REG_LEN_08BIT, 0xa0}, + {0xd012, CRL_REG_LEN_08BIT, 0x00}, + {0xd013, CRL_REG_LEN_08BIT, 0x01}, + {0xd014, CRL_REG_LEN_08BIT, 0xa9}, + {0xd015, CRL_REG_LEN_08BIT, 0xad}, + {0xd016, CRL_REG_LEN_08BIT, 0x14}, + {0xd017, CRL_REG_LEN_08BIT, 0x40}, + {0xd018, CRL_REG_LEN_08BIT, 0x44}, + {0xd019, CRL_REG_LEN_08BIT, 0x00}, + {0xd01a, CRL_REG_LEN_08BIT, 0x68}, + {0xd01b, CRL_REG_LEN_08BIT, 0x00}, + {0xd01c, CRL_REG_LEN_08BIT, 0x15}, + {0xd01d, CRL_REG_LEN_08BIT, 0x00}, + {0xd01e, CRL_REG_LEN_08BIT, 0x00}, + {0xd01f, CRL_REG_LEN_08BIT, 0x00}, + {0xd020, CRL_REG_LEN_08BIT, 0x19}, + {0xd021, CRL_REG_LEN_08BIT, 0xa0}, + {0xd022, CRL_REG_LEN_08BIT, 0x00}, + {0xd023, CRL_REG_LEN_08BIT, 0x01}, + {0xd024, CRL_REG_LEN_08BIT, 0xa9}, + {0xd025, CRL_REG_LEN_08BIT, 0xad}, + {0xd026, CRL_REG_LEN_08BIT, 0x15}, + {0xd027, CRL_REG_LEN_08BIT, 0x28}, + {0xd028, CRL_REG_LEN_08BIT, 0x44}, + {0xd029, CRL_REG_LEN_08BIT, 0x00}, + {0xd02a, CRL_REG_LEN_08BIT, 0x68}, + {0xd02b, CRL_REG_LEN_08BIT, 0x00}, + {0xd02c, CRL_REG_LEN_08BIT, 0x15}, + {0xd02d, CRL_REG_LEN_08BIT, 0x00}, + {0xd02e, CRL_REG_LEN_08BIT, 0x00}, + {0xd02f, CRL_REG_LEN_08BIT, 0x00}, + {0xd030, CRL_REG_LEN_08BIT, 0x19}, + {0xd031, CRL_REG_LEN_08BIT, 0xa0}, + {0xd032, CRL_REG_LEN_08BIT, 0x00}, + {0xd033, CRL_REG_LEN_08BIT, 0x01}, + {0xd034, CRL_REG_LEN_08BIT, 0xa9}, + {0xd035, CRL_REG_LEN_08BIT, 0xad}, + {0xd036, CRL_REG_LEN_08BIT, 0x15}, + {0xd037, CRL_REG_LEN_08BIT, 0x4c}, + {0xd038, CRL_REG_LEN_08BIT, 0x44}, + {0xd039, CRL_REG_LEN_08BIT, 0x00}, + {0xd03a, CRL_REG_LEN_08BIT, 0x68}, + {0xd03b, CRL_REG_LEN_08BIT, 0x00}, + {0xd03c, CRL_REG_LEN_08BIT, 0x15}, + {0xd03d, CRL_REG_LEN_08BIT, 0x00}, + {0xd03e, CRL_REG_LEN_08BIT, 0x00}, + {0xd03f, CRL_REG_LEN_08BIT, 0x00}, + {0xd040, CRL_REG_LEN_08BIT, 0x9c}, + {0xd041, CRL_REG_LEN_08BIT, 0x21}, + {0xd042, CRL_REG_LEN_08BIT, 0xff}, + {0xd043, CRL_REG_LEN_08BIT, 0xe4}, + {0xd044, CRL_REG_LEN_08BIT, 0xd4}, + {0xd045, CRL_REG_LEN_08BIT, 0x01}, + {0xd046, CRL_REG_LEN_08BIT, 0x48}, + {0xd047, CRL_REG_LEN_08BIT, 0x00}, + {0xd048, CRL_REG_LEN_08BIT, 0xd4}, + {0xd049, CRL_REG_LEN_08BIT, 0x01}, + {0xd04a, CRL_REG_LEN_08BIT, 0x50}, + {0xd04b, CRL_REG_LEN_08BIT, 0x04}, + {0xd04c, CRL_REG_LEN_08BIT, 0xd4}, + {0xd04d, CRL_REG_LEN_08BIT, 0x01}, + {0xd04e, CRL_REG_LEN_08BIT, 0x60}, + {0xd04f, CRL_REG_LEN_08BIT, 0x08}, + {0xd050, CRL_REG_LEN_08BIT, 0xd4}, + {0xd051, CRL_REG_LEN_08BIT, 0x01}, + {0xd052, CRL_REG_LEN_08BIT, 0x70}, + {0xd053, CRL_REG_LEN_08BIT, 0x0c}, + {0xd054, CRL_REG_LEN_08BIT, 0xd4}, + {0xd055, CRL_REG_LEN_08BIT, 0x01}, + {0xd056, CRL_REG_LEN_08BIT, 0x80}, + {0xd057, CRL_REG_LEN_08BIT, 0x10}, + {0xd058, CRL_REG_LEN_08BIT, 0x19}, + {0xd059, CRL_REG_LEN_08BIT, 0xc0}, + {0xd05a, CRL_REG_LEN_08BIT, 0x00}, + {0xd05b, CRL_REG_LEN_08BIT, 0x01}, + {0xd05c, CRL_REG_LEN_08BIT, 0xa9}, + {0xd05d, CRL_REG_LEN_08BIT, 0xce}, + {0xd05e, CRL_REG_LEN_08BIT, 0x02}, + {0xd05f, CRL_REG_LEN_08BIT, 0xa4}, + {0xd060, CRL_REG_LEN_08BIT, 0x9c}, + {0xd061, CRL_REG_LEN_08BIT, 0xa0}, + {0xd062, CRL_REG_LEN_08BIT, 0x00}, + {0xd063, CRL_REG_LEN_08BIT, 0x00}, + {0xd064, CRL_REG_LEN_08BIT, 0x84}, + {0xd065, CRL_REG_LEN_08BIT, 0x6e}, + {0xd066, CRL_REG_LEN_08BIT, 0x00}, + {0xd067, CRL_REG_LEN_08BIT, 0x00}, + {0xd068, CRL_REG_LEN_08BIT, 0xd8}, + {0xd069, CRL_REG_LEN_08BIT, 0x03}, + {0xd06a, CRL_REG_LEN_08BIT, 0x28}, + {0xd06b, CRL_REG_LEN_08BIT, 0x76}, + {0xd06c, CRL_REG_LEN_08BIT, 0x1a}, + {0xd06d, CRL_REG_LEN_08BIT, 0x00}, + {0xd06e, CRL_REG_LEN_08BIT, 0x00}, + {0xd06f, CRL_REG_LEN_08BIT, 0x01}, + {0xd070, CRL_REG_LEN_08BIT, 0xaa}, + {0xd071, CRL_REG_LEN_08BIT, 0x10}, + {0xd072, CRL_REG_LEN_08BIT, 0x03}, + {0xd073, CRL_REG_LEN_08BIT, 0xf0}, + {0xd074, CRL_REG_LEN_08BIT, 0x18}, + {0xd075, CRL_REG_LEN_08BIT, 0x60}, + {0xd076, CRL_REG_LEN_08BIT, 0x00}, + {0xd077, CRL_REG_LEN_08BIT, 0x01}, + {0xd078, CRL_REG_LEN_08BIT, 0xa8}, + {0xd079, CRL_REG_LEN_08BIT, 0x63}, + {0xd07a, CRL_REG_LEN_08BIT, 0x07}, + {0xd07b, CRL_REG_LEN_08BIT, 0x80}, + {0xd07c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd07d, CRL_REG_LEN_08BIT, 0xa0}, + {0xd07e, CRL_REG_LEN_08BIT, 0x00}, + {0xd07f, CRL_REG_LEN_08BIT, 0x04}, + {0xd080, CRL_REG_LEN_08BIT, 0x18}, + {0xd081, CRL_REG_LEN_08BIT, 0xc0}, + {0xd082, CRL_REG_LEN_08BIT, 0x00}, + {0xd083, CRL_REG_LEN_08BIT, 0x00}, + {0xd084, CRL_REG_LEN_08BIT, 0xa8}, + {0xd085, CRL_REG_LEN_08BIT, 0xc6}, + {0xd086, CRL_REG_LEN_08BIT, 0x00}, + {0xd087, CRL_REG_LEN_08BIT, 0x00}, + {0xd088, CRL_REG_LEN_08BIT, 0x8c}, + {0xd089, CRL_REG_LEN_08BIT, 0x63}, + {0xd08a, CRL_REG_LEN_08BIT, 0x00}, + {0xd08b, CRL_REG_LEN_08BIT, 0x00}, + {0xd08c, CRL_REG_LEN_08BIT, 0xd4}, + {0xd08d, CRL_REG_LEN_08BIT, 0x01}, + {0xd08e, CRL_REG_LEN_08BIT, 0x28}, + {0xd08f, CRL_REG_LEN_08BIT, 0x14}, + {0xd090, CRL_REG_LEN_08BIT, 0xd4}, + {0xd091, CRL_REG_LEN_08BIT, 0x01}, + {0xd092, CRL_REG_LEN_08BIT, 0x30}, + {0xd093, CRL_REG_LEN_08BIT, 0x18}, + {0xd094, CRL_REG_LEN_08BIT, 0x07}, + {0xd095, CRL_REG_LEN_08BIT, 0xff}, + {0xd096, CRL_REG_LEN_08BIT, 0xf8}, + {0xd097, CRL_REG_LEN_08BIT, 0xfd}, + {0xd098, CRL_REG_LEN_08BIT, 0x9c}, + {0xd099, CRL_REG_LEN_08BIT, 0x80}, + {0xd09a, CRL_REG_LEN_08BIT, 0x00}, + {0xd09b, CRL_REG_LEN_08BIT, 0x03}, + {0xd09c, CRL_REG_LEN_08BIT, 0xa5}, + {0xd09d, CRL_REG_LEN_08BIT, 0x6b}, + {0xd09e, CRL_REG_LEN_08BIT, 0x00}, + {0xd09f, CRL_REG_LEN_08BIT, 0xff}, + {0xd0a0, CRL_REG_LEN_08BIT, 0x18}, + {0xd0a1, CRL_REG_LEN_08BIT, 0xc0}, + {0xd0a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd0a3, CRL_REG_LEN_08BIT, 0x01}, + {0xd0a4, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0a5, CRL_REG_LEN_08BIT, 0xc6}, + {0xd0a6, CRL_REG_LEN_08BIT, 0x01}, + {0xd0a7, CRL_REG_LEN_08BIT, 0x02}, + {0xd0a8, CRL_REG_LEN_08BIT, 0xe1}, + {0xd0a9, CRL_REG_LEN_08BIT, 0x6b}, + {0xd0aa, CRL_REG_LEN_08BIT, 0x58}, + {0xd0ab, CRL_REG_LEN_08BIT, 0x00}, + {0xd0ac, CRL_REG_LEN_08BIT, 0x84}, + {0xd0ad, CRL_REG_LEN_08BIT, 0x8e}, + {0xd0ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd0af, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b0, CRL_REG_LEN_08BIT, 0xe1}, + {0xd0b1, CRL_REG_LEN_08BIT, 0x6b}, + {0xd0b2, CRL_REG_LEN_08BIT, 0x30}, + {0xd0b3, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b4, CRL_REG_LEN_08BIT, 0x98}, + {0xd0b5, CRL_REG_LEN_08BIT, 0xb0}, + {0xd0b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b8, CRL_REG_LEN_08BIT, 0x8c}, + {0xd0b9, CRL_REG_LEN_08BIT, 0x64}, + {0xd0ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd0bb, CRL_REG_LEN_08BIT, 0x6e}, + {0xd0bc, CRL_REG_LEN_08BIT, 0xe5}, + {0xd0bd, CRL_REG_LEN_08BIT, 0xa5}, + {0xd0be, CRL_REG_LEN_08BIT, 0x18}, + {0xd0bf, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c0, CRL_REG_LEN_08BIT, 0x10}, + {0xd0c1, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c3, CRL_REG_LEN_08BIT, 0x06}, + {0xd0c4, CRL_REG_LEN_08BIT, 0x95}, + {0xd0c5, CRL_REG_LEN_08BIT, 0x8b}, + {0xd0c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c8, CRL_REG_LEN_08BIT, 0x94}, + {0xd0c9, CRL_REG_LEN_08BIT, 0xa4}, + {0xd0ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd0cb, CRL_REG_LEN_08BIT, 0x70}, + {0xd0cc, CRL_REG_LEN_08BIT, 0xe5}, + {0xd0cd, CRL_REG_LEN_08BIT, 0x65}, + {0xd0ce, CRL_REG_LEN_08BIT, 0x60}, + {0xd0cf, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d0, CRL_REG_LEN_08BIT, 0x0c}, + {0xd0d1, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d2, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d3, CRL_REG_LEN_08BIT, 0x62}, + {0xd0d4, CRL_REG_LEN_08BIT, 0x15}, + {0xd0d5, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d8, CRL_REG_LEN_08BIT, 0x18}, + {0xd0d9, CRL_REG_LEN_08BIT, 0x60}, + {0xd0da, CRL_REG_LEN_08BIT, 0x80}, + {0xd0db, CRL_REG_LEN_08BIT, 0x06}, + {0xd0dc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0dd, CRL_REG_LEN_08BIT, 0x83}, + {0xd0de, CRL_REG_LEN_08BIT, 0x38}, + {0xd0df, CRL_REG_LEN_08BIT, 0x29}, + {0xd0e0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0e1, CRL_REG_LEN_08BIT, 0xe3}, + {0xd0e2, CRL_REG_LEN_08BIT, 0x40}, + {0xd0e3, CRL_REG_LEN_08BIT, 0x08}, + {0xd0e4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd0e5, CRL_REG_LEN_08BIT, 0x84}, + {0xd0e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0e8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0e9, CRL_REG_LEN_08BIT, 0xa3}, + {0xd0ea, CRL_REG_LEN_08BIT, 0x40}, + {0xd0eb, CRL_REG_LEN_08BIT, 0x09}, + {0xd0ec, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0ed, CRL_REG_LEN_08BIT, 0xc3}, + {0xd0ee, CRL_REG_LEN_08BIT, 0x38}, + {0xd0ef, CRL_REG_LEN_08BIT, 0x2a}, + {0xd0f0, CRL_REG_LEN_08BIT, 0xd8}, + {0xd0f1, CRL_REG_LEN_08BIT, 0x07}, + {0xd0f2, CRL_REG_LEN_08BIT, 0x20}, + {0xd0f3, CRL_REG_LEN_08BIT, 0x00}, + {0xd0f4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd0f5, CRL_REG_LEN_08BIT, 0x66}, + {0xd0f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0f8, CRL_REG_LEN_08BIT, 0xd8}, + {0xd0f9, CRL_REG_LEN_08BIT, 0x05}, + {0xd0fa, CRL_REG_LEN_08BIT, 0x18}, + {0xd0fb, CRL_REG_LEN_08BIT, 0x00}, + {0xd0fc, CRL_REG_LEN_08BIT, 0x18}, + {0xd0fd, CRL_REG_LEN_08BIT, 0x60}, + {0xd0fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd0ff, CRL_REG_LEN_08BIT, 0x01}, + {0xd100, CRL_REG_LEN_08BIT, 0x98}, + {0xd101, CRL_REG_LEN_08BIT, 0x90}, + {0xd102, CRL_REG_LEN_08BIT, 0x00}, + {0xd103, CRL_REG_LEN_08BIT, 0x00}, + {0xd104, CRL_REG_LEN_08BIT, 0x84}, + {0xd105, CRL_REG_LEN_08BIT, 0xae}, + {0xd106, CRL_REG_LEN_08BIT, 0x00}, + {0xd107, CRL_REG_LEN_08BIT, 0x00}, + {0xd108, CRL_REG_LEN_08BIT, 0xa8}, + {0xd109, CRL_REG_LEN_08BIT, 0x63}, + {0xd10a, CRL_REG_LEN_08BIT, 0x06}, + {0xd10b, CRL_REG_LEN_08BIT, 0x4c}, + {0xd10c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd10d, CRL_REG_LEN_08BIT, 0xc0}, + {0xd10e, CRL_REG_LEN_08BIT, 0x00}, + {0xd10f, CRL_REG_LEN_08BIT, 0x00}, + {0xd110, CRL_REG_LEN_08BIT, 0xd8}, + {0xd111, CRL_REG_LEN_08BIT, 0x03}, + {0xd112, CRL_REG_LEN_08BIT, 0x30}, + {0xd113, CRL_REG_LEN_08BIT, 0x00}, + {0xd114, CRL_REG_LEN_08BIT, 0x8c}, + {0xd115, CRL_REG_LEN_08BIT, 0x65}, + {0xd116, CRL_REG_LEN_08BIT, 0x00}, + {0xd117, CRL_REG_LEN_08BIT, 0x6e}, + {0xd118, CRL_REG_LEN_08BIT, 0xe5}, + {0xd119, CRL_REG_LEN_08BIT, 0x84}, + {0xd11a, CRL_REG_LEN_08BIT, 0x18}, + {0xd11b, CRL_REG_LEN_08BIT, 0x00}, + {0xd11c, CRL_REG_LEN_08BIT, 0x10}, + {0xd11d, CRL_REG_LEN_08BIT, 0x00}, + {0xd11e, CRL_REG_LEN_08BIT, 0x00}, + {0xd11f, CRL_REG_LEN_08BIT, 0x07}, + {0xd120, CRL_REG_LEN_08BIT, 0x18}, + {0xd121, CRL_REG_LEN_08BIT, 0x80}, + {0xd122, CRL_REG_LEN_08BIT, 0x80}, + {0xd123, CRL_REG_LEN_08BIT, 0x06}, + {0xd124, CRL_REG_LEN_08BIT, 0x94}, + {0xd125, CRL_REG_LEN_08BIT, 0x65}, + {0xd126, CRL_REG_LEN_08BIT, 0x00}, + {0xd127, CRL_REG_LEN_08BIT, 0x70}, + {0xd128, CRL_REG_LEN_08BIT, 0xe5}, + {0xd129, CRL_REG_LEN_08BIT, 0x43}, + {0xd12a, CRL_REG_LEN_08BIT, 0x60}, + {0xd12b, CRL_REG_LEN_08BIT, 0x00}, + {0xd12c, CRL_REG_LEN_08BIT, 0x0c}, + {0xd12d, CRL_REG_LEN_08BIT, 0x00}, + {0xd12e, CRL_REG_LEN_08BIT, 0x00}, + {0xd12f, CRL_REG_LEN_08BIT, 0x3e}, + {0xd130, CRL_REG_LEN_08BIT, 0xa8}, + {0xd131, CRL_REG_LEN_08BIT, 0x64}, + {0xd132, CRL_REG_LEN_08BIT, 0x38}, + {0xd133, CRL_REG_LEN_08BIT, 0x24}, + {0xd134, CRL_REG_LEN_08BIT, 0x18}, + {0xd135, CRL_REG_LEN_08BIT, 0x80}, + {0xd136, CRL_REG_LEN_08BIT, 0x80}, + {0xd137, CRL_REG_LEN_08BIT, 0x06}, + {0xd138, CRL_REG_LEN_08BIT, 0xa8}, + {0xd139, CRL_REG_LEN_08BIT, 0x64}, + {0xd13a, CRL_REG_LEN_08BIT, 0x38}, + {0xd13b, CRL_REG_LEN_08BIT, 0x24}, + {0xd13c, CRL_REG_LEN_08BIT, 0x8c}, + {0xd13d, CRL_REG_LEN_08BIT, 0x63}, + {0xd13e, CRL_REG_LEN_08BIT, 0x00}, + {0xd13f, CRL_REG_LEN_08BIT, 0x00}, + {0xd140, CRL_REG_LEN_08BIT, 0xa4}, + {0xd141, CRL_REG_LEN_08BIT, 0x63}, + {0xd142, CRL_REG_LEN_08BIT, 0x00}, + {0xd143, CRL_REG_LEN_08BIT, 0x40}, + {0xd144, CRL_REG_LEN_08BIT, 0xbc}, + {0xd145, CRL_REG_LEN_08BIT, 0x23}, + {0xd146, CRL_REG_LEN_08BIT, 0x00}, + {0xd147, CRL_REG_LEN_08BIT, 0x00}, + {0xd148, CRL_REG_LEN_08BIT, 0x0c}, + {0xd149, CRL_REG_LEN_08BIT, 0x00}, + {0xd14a, CRL_REG_LEN_08BIT, 0x00}, + {0xd14b, CRL_REG_LEN_08BIT, 0x2a}, + {0xd14c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd14d, CRL_REG_LEN_08BIT, 0x64}, + {0xd14e, CRL_REG_LEN_08BIT, 0x6e}, + {0xd14f, CRL_REG_LEN_08BIT, 0x44}, + {0xd150, CRL_REG_LEN_08BIT, 0x19}, + {0xd151, CRL_REG_LEN_08BIT, 0x00}, + {0xd152, CRL_REG_LEN_08BIT, 0x80}, + {0xd153, CRL_REG_LEN_08BIT, 0x06}, + {0xd154, CRL_REG_LEN_08BIT, 0xa8}, + {0xd155, CRL_REG_LEN_08BIT, 0xe8}, + {0xd156, CRL_REG_LEN_08BIT, 0x3d}, + {0xd157, CRL_REG_LEN_08BIT, 0x05}, + {0xd158, CRL_REG_LEN_08BIT, 0x8c}, + {0xd159, CRL_REG_LEN_08BIT, 0x67}, + {0xd15a, CRL_REG_LEN_08BIT, 0x00}, + {0xd15b, CRL_REG_LEN_08BIT, 0x00}, + {0xd15c, CRL_REG_LEN_08BIT, 0xb8}, + {0xd15d, CRL_REG_LEN_08BIT, 0x63}, + {0xd15e, CRL_REG_LEN_08BIT, 0x00}, + {0xd15f, CRL_REG_LEN_08BIT, 0x18}, + {0xd160, CRL_REG_LEN_08BIT, 0xb8}, + {0xd161, CRL_REG_LEN_08BIT, 0x63}, + {0xd162, CRL_REG_LEN_08BIT, 0x00}, + {0xd163, CRL_REG_LEN_08BIT, 0x98}, + {0xd164, CRL_REG_LEN_08BIT, 0xbc}, + {0xd165, CRL_REG_LEN_08BIT, 0x03}, + {0xd166, CRL_REG_LEN_08BIT, 0x00}, + {0xd167, CRL_REG_LEN_08BIT, 0x00}, + {0xd168, CRL_REG_LEN_08BIT, 0x10}, + {0xd169, CRL_REG_LEN_08BIT, 0x00}, + {0xd16a, CRL_REG_LEN_08BIT, 0x00}, + {0xd16b, CRL_REG_LEN_08BIT, 0x10}, + {0xd16c, CRL_REG_LEN_08BIT, 0xa9}, + {0xd16d, CRL_REG_LEN_08BIT, 0x48}, + {0xd16e, CRL_REG_LEN_08BIT, 0x67}, + {0xd16f, CRL_REG_LEN_08BIT, 0x02}, + {0xd170, CRL_REG_LEN_08BIT, 0xb8}, + {0xd171, CRL_REG_LEN_08BIT, 0xa3}, + {0xd172, CRL_REG_LEN_08BIT, 0x00}, + {0xd173, CRL_REG_LEN_08BIT, 0x19}, + {0xd174, CRL_REG_LEN_08BIT, 0x8c}, + {0xd175, CRL_REG_LEN_08BIT, 0x8a}, + {0xd176, CRL_REG_LEN_08BIT, 0x00}, + {0xd177, CRL_REG_LEN_08BIT, 0x00}, + {0xd178, CRL_REG_LEN_08BIT, 0xa9}, + {0xd179, CRL_REG_LEN_08BIT, 0x68}, + {0xd17a, CRL_REG_LEN_08BIT, 0x67}, + {0xd17b, CRL_REG_LEN_08BIT, 0x03}, + {0xd17c, CRL_REG_LEN_08BIT, 0xb8}, + {0xd17d, CRL_REG_LEN_08BIT, 0xc4}, + {0xd17e, CRL_REG_LEN_08BIT, 0x00}, + {0xd17f, CRL_REG_LEN_08BIT, 0x08}, + {0xd180, CRL_REG_LEN_08BIT, 0x8c}, + {0xd181, CRL_REG_LEN_08BIT, 0x6b}, + {0xd182, CRL_REG_LEN_08BIT, 0x00}, + {0xd183, CRL_REG_LEN_08BIT, 0x00}, + {0xd184, CRL_REG_LEN_08BIT, 0xb8}, + {0xd185, CRL_REG_LEN_08BIT, 0x85}, + {0xd186, CRL_REG_LEN_08BIT, 0x00}, + {0xd187, CRL_REG_LEN_08BIT, 0x98}, + {0xd188, CRL_REG_LEN_08BIT, 0xe0}, + {0xd189, CRL_REG_LEN_08BIT, 0x63}, + {0xd18a, CRL_REG_LEN_08BIT, 0x30}, + {0xd18b, CRL_REG_LEN_08BIT, 0x04}, + {0xd18c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd18d, CRL_REG_LEN_08BIT, 0x64}, + {0xd18e, CRL_REG_LEN_08BIT, 0x18}, + {0xd18f, CRL_REG_LEN_08BIT, 0x00}, + {0xd190, CRL_REG_LEN_08BIT, 0xa4}, + {0xd191, CRL_REG_LEN_08BIT, 0x83}, + {0xd192, CRL_REG_LEN_08BIT, 0xff}, + {0xd193, CRL_REG_LEN_08BIT, 0xff}, + {0xd194, CRL_REG_LEN_08BIT, 0xb8}, + {0xd195, CRL_REG_LEN_08BIT, 0x64}, + {0xd196, CRL_REG_LEN_08BIT, 0x00}, + {0xd197, CRL_REG_LEN_08BIT, 0x48}, + {0xd198, CRL_REG_LEN_08BIT, 0xd8}, + {0xd199, CRL_REG_LEN_08BIT, 0x0a}, + {0xd19a, CRL_REG_LEN_08BIT, 0x18}, + {0xd19b, CRL_REG_LEN_08BIT, 0x00}, + {0xd19c, CRL_REG_LEN_08BIT, 0xd8}, + {0xd19d, CRL_REG_LEN_08BIT, 0x0b}, + {0xd19e, CRL_REG_LEN_08BIT, 0x20}, + {0xd19f, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a0, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1a1, CRL_REG_LEN_08BIT, 0x60}, + {0xd1a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a3, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a4, CRL_REG_LEN_08BIT, 0xd8}, + {0xd1a5, CRL_REG_LEN_08BIT, 0x07}, + {0xd1a6, CRL_REG_LEN_08BIT, 0x18}, + {0xd1a7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1a9, CRL_REG_LEN_08BIT, 0x68}, + {0xd1aa, CRL_REG_LEN_08BIT, 0x38}, + {0xd1ab, CRL_REG_LEN_08BIT, 0x22}, + {0xd1ac, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1ad, CRL_REG_LEN_08BIT, 0x80}, + {0xd1ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd1af, CRL_REG_LEN_08BIT, 0x70}, + {0xd1b0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1b1, CRL_REG_LEN_08BIT, 0xe8}, + {0xd1b2, CRL_REG_LEN_08BIT, 0x38}, + {0xd1b3, CRL_REG_LEN_08BIT, 0x43}, + {0xd1b4, CRL_REG_LEN_08BIT, 0xd8}, + {0xd1b5, CRL_REG_LEN_08BIT, 0x03}, + {0xd1b6, CRL_REG_LEN_08BIT, 0x20}, + {0xd1b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1b8, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1b9, CRL_REG_LEN_08BIT, 0xa0}, + {0xd1ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd1bb, CRL_REG_LEN_08BIT, 0x00}, + {0xd1bc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1bd, CRL_REG_LEN_08BIT, 0xc8}, + {0xd1be, CRL_REG_LEN_08BIT, 0x38}, + {0xd1bf, CRL_REG_LEN_08BIT, 0x42}, + {0xd1c0, CRL_REG_LEN_08BIT, 0x8c}, + {0xd1c1, CRL_REG_LEN_08BIT, 0x66}, + {0xd1c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1c3, CRL_REG_LEN_08BIT, 0x00}, + {0xd1c4, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1c5, CRL_REG_LEN_08BIT, 0xa5}, + {0xd1c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd1c7, CRL_REG_LEN_08BIT, 0x01}, + {0xd1c8, CRL_REG_LEN_08BIT, 0xb8}, + {0xd1c9, CRL_REG_LEN_08BIT, 0x83}, + {0xd1ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd1cb, CRL_REG_LEN_08BIT, 0x08}, + {0xd1cc, CRL_REG_LEN_08BIT, 0xa4}, + {0xd1cd, CRL_REG_LEN_08BIT, 0xa5}, + {0xd1ce, CRL_REG_LEN_08BIT, 0x00}, + {0xd1cf, CRL_REG_LEN_08BIT, 0xff}, + {0xd1d0, CRL_REG_LEN_08BIT, 0x8c}, + {0xd1d1, CRL_REG_LEN_08BIT, 0x67}, + {0xd1d2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1d3, CRL_REG_LEN_08BIT, 0x00}, + {0xd1d4, CRL_REG_LEN_08BIT, 0xe0}, + {0xd1d5, CRL_REG_LEN_08BIT, 0x63}, + {0xd1d6, CRL_REG_LEN_08BIT, 0x20}, + {0xd1d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1d8, CRL_REG_LEN_08BIT, 0xa4}, + {0xd1d9, CRL_REG_LEN_08BIT, 0x63}, + {0xd1da, CRL_REG_LEN_08BIT, 0xff}, + {0xd1db, CRL_REG_LEN_08BIT, 0xff}, + {0xd1dc, CRL_REG_LEN_08BIT, 0xbc}, + {0xd1dd, CRL_REG_LEN_08BIT, 0x43}, + {0xd1de, CRL_REG_LEN_08BIT, 0x00}, + {0xd1df, CRL_REG_LEN_08BIT, 0x07}, + {0xd1e0, CRL_REG_LEN_08BIT, 0x0c}, + {0xd1e1, CRL_REG_LEN_08BIT, 0x00}, + {0xd1e2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1e3, CRL_REG_LEN_08BIT, 0x5b}, + {0xd1e4, CRL_REG_LEN_08BIT, 0xbc}, + {0xd1e5, CRL_REG_LEN_08BIT, 0x05}, + {0xd1e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd1e7, CRL_REG_LEN_08BIT, 0x02}, + {0xd1e8, CRL_REG_LEN_08BIT, 0x03}, + {0xd1e9, CRL_REG_LEN_08BIT, 0xff}, + {0xd1ea, CRL_REG_LEN_08BIT, 0xff}, + {0xd1eb, CRL_REG_LEN_08BIT, 0xf6}, + {0xd1ec, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1ed, CRL_REG_LEN_08BIT, 0xa0}, + {0xd1ee, CRL_REG_LEN_08BIT, 0x00}, + {0xd1ef, CRL_REG_LEN_08BIT, 0x00}, + {0xd1f0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1f1, CRL_REG_LEN_08BIT, 0xa4}, + {0xd1f2, CRL_REG_LEN_08BIT, 0x55}, + {0xd1f3, CRL_REG_LEN_08BIT, 0x86}, + {0xd1f4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd1f5, CRL_REG_LEN_08BIT, 0x63}, + {0xd1f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd1f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1f8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1f9, CRL_REG_LEN_08BIT, 0xc4}, + {0xd1fa, CRL_REG_LEN_08BIT, 0x6e}, + {0xd1fb, CRL_REG_LEN_08BIT, 0x45}, + {0xd1fc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1fd, CRL_REG_LEN_08BIT, 0xe4}, + {0xd1fe, CRL_REG_LEN_08BIT, 0x55}, + {0xd1ff, CRL_REG_LEN_08BIT, 0x87}, + {0xd200, CRL_REG_LEN_08BIT, 0xd8}, + {0xd201, CRL_REG_LEN_08BIT, 0x05}, + {0xd202, CRL_REG_LEN_08BIT, 0x18}, + {0xd203, CRL_REG_LEN_08BIT, 0x00}, + {0xd204, CRL_REG_LEN_08BIT, 0x8c}, + {0xd205, CRL_REG_LEN_08BIT, 0x66}, + {0xd206, CRL_REG_LEN_08BIT, 0x00}, + {0xd207, CRL_REG_LEN_08BIT, 0x00}, + {0xd208, CRL_REG_LEN_08BIT, 0xa8}, + {0xd209, CRL_REG_LEN_08BIT, 0xa4}, + {0xd20a, CRL_REG_LEN_08BIT, 0x6e}, + {0xd20b, CRL_REG_LEN_08BIT, 0x46}, + {0xd20c, CRL_REG_LEN_08BIT, 0xd8}, + {0xd20d, CRL_REG_LEN_08BIT, 0x07}, + {0xd20e, CRL_REG_LEN_08BIT, 0x18}, + {0xd20f, CRL_REG_LEN_08BIT, 0x00}, + {0xd210, CRL_REG_LEN_08BIT, 0xa8}, + {0xd211, CRL_REG_LEN_08BIT, 0x84}, + {0xd212, CRL_REG_LEN_08BIT, 0x55}, + {0xd213, CRL_REG_LEN_08BIT, 0x88}, + {0xd214, CRL_REG_LEN_08BIT, 0x8c}, + {0xd215, CRL_REG_LEN_08BIT, 0x65}, + {0xd216, CRL_REG_LEN_08BIT, 0x00}, + {0xd217, CRL_REG_LEN_08BIT, 0x00}, + {0xd218, CRL_REG_LEN_08BIT, 0xd8}, + {0xd219, CRL_REG_LEN_08BIT, 0x04}, + {0xd21a, CRL_REG_LEN_08BIT, 0x18}, + {0xd21b, CRL_REG_LEN_08BIT, 0x00}, + {0xd21c, CRL_REG_LEN_08BIT, 0x03}, + {0xd21d, CRL_REG_LEN_08BIT, 0xff}, + {0xd21e, CRL_REG_LEN_08BIT, 0xff}, + {0xd21f, CRL_REG_LEN_08BIT, 0xce}, + {0xd220, CRL_REG_LEN_08BIT, 0x19}, + {0xd221, CRL_REG_LEN_08BIT, 0x00}, + {0xd222, CRL_REG_LEN_08BIT, 0x80}, + {0xd223, CRL_REG_LEN_08BIT, 0x06}, + {0xd224, CRL_REG_LEN_08BIT, 0x8c}, + {0xd225, CRL_REG_LEN_08BIT, 0x63}, + {0xd226, CRL_REG_LEN_08BIT, 0x00}, + {0xd227, CRL_REG_LEN_08BIT, 0x00}, + {0xd228, CRL_REG_LEN_08BIT, 0xa4}, + {0xd229, CRL_REG_LEN_08BIT, 0x63}, + {0xd22a, CRL_REG_LEN_08BIT, 0x00}, + {0xd22b, CRL_REG_LEN_08BIT, 0x40}, + {0xd22c, CRL_REG_LEN_08BIT, 0xbc}, + {0xd22d, CRL_REG_LEN_08BIT, 0x23}, + {0xd22e, CRL_REG_LEN_08BIT, 0x00}, + {0xd22f, CRL_REG_LEN_08BIT, 0x00}, + {0xd230, CRL_REG_LEN_08BIT, 0x13}, + {0xd231, CRL_REG_LEN_08BIT, 0xff}, + {0xd232, CRL_REG_LEN_08BIT, 0xff}, + {0xd233, CRL_REG_LEN_08BIT, 0xc8}, + {0xd234, CRL_REG_LEN_08BIT, 0x9d}, + {0xd235, CRL_REG_LEN_08BIT, 0x00}, + {0xd236, CRL_REG_LEN_08BIT, 0x00}, + {0xd237, CRL_REG_LEN_08BIT, 0x40}, + {0xd238, CRL_REG_LEN_08BIT, 0xa8}, + {0xd239, CRL_REG_LEN_08BIT, 0x64}, + {0xd23a, CRL_REG_LEN_08BIT, 0x55}, + {0xd23b, CRL_REG_LEN_08BIT, 0x86}, + {0xd23c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd23d, CRL_REG_LEN_08BIT, 0xa4}, + {0xd23e, CRL_REG_LEN_08BIT, 0x55}, + {0xd23f, CRL_REG_LEN_08BIT, 0x87}, + {0xd240, CRL_REG_LEN_08BIT, 0xd8}, + {0xd241, CRL_REG_LEN_08BIT, 0x03}, + {0xd242, CRL_REG_LEN_08BIT, 0x40}, + {0xd243, CRL_REG_LEN_08BIT, 0x00}, + {0xd244, CRL_REG_LEN_08BIT, 0xa8}, + {0xd245, CRL_REG_LEN_08BIT, 0x64}, + {0xd246, CRL_REG_LEN_08BIT, 0x55}, + {0xd247, CRL_REG_LEN_08BIT, 0x88}, + {0xd248, CRL_REG_LEN_08BIT, 0xd8}, + {0xd249, CRL_REG_LEN_08BIT, 0x05}, + {0xd24a, CRL_REG_LEN_08BIT, 0x40}, + {0xd24b, CRL_REG_LEN_08BIT, 0x00}, + {0xd24c, CRL_REG_LEN_08BIT, 0xd8}, + {0xd24d, CRL_REG_LEN_08BIT, 0x03}, + {0xd24e, CRL_REG_LEN_08BIT, 0x40}, + {0xd24f, CRL_REG_LEN_08BIT, 0x00}, + {0xd250, CRL_REG_LEN_08BIT, 0x03}, + {0xd251, CRL_REG_LEN_08BIT, 0xff}, + {0xd252, CRL_REG_LEN_08BIT, 0xff}, + {0xd253, CRL_REG_LEN_08BIT, 0xc1}, + {0xd254, CRL_REG_LEN_08BIT, 0x19}, + {0xd255, CRL_REG_LEN_08BIT, 0x00}, + {0xd256, CRL_REG_LEN_08BIT, 0x80}, + {0xd257, CRL_REG_LEN_08BIT, 0x06}, + {0xd258, CRL_REG_LEN_08BIT, 0x94}, + {0xd259, CRL_REG_LEN_08BIT, 0x84}, + {0xd25a, CRL_REG_LEN_08BIT, 0x00}, + {0xd25b, CRL_REG_LEN_08BIT, 0x72}, + {0xd25c, CRL_REG_LEN_08BIT, 0xe5}, + {0xd25d, CRL_REG_LEN_08BIT, 0xa4}, + {0xd25e, CRL_REG_LEN_08BIT, 0x60}, + {0xd25f, CRL_REG_LEN_08BIT, 0x00}, + {0xd260, CRL_REG_LEN_08BIT, 0x0c}, + {0xd261, CRL_REG_LEN_08BIT, 0x00}, + {0xd262, CRL_REG_LEN_08BIT, 0x00}, + {0xd263, CRL_REG_LEN_08BIT, 0x4d}, + {0xd264, CRL_REG_LEN_08BIT, 0x9d}, + {0xd265, CRL_REG_LEN_08BIT, 0x60}, + {0xd266, CRL_REG_LEN_08BIT, 0x01}, + {0xd267, CRL_REG_LEN_08BIT, 0x00}, + {0xd268, CRL_REG_LEN_08BIT, 0x85}, + {0xd269, CRL_REG_LEN_08BIT, 0x4e}, + {0xd26a, CRL_REG_LEN_08BIT, 0x00}, + {0xd26b, CRL_REG_LEN_08BIT, 0x00}, + {0xd26c, CRL_REG_LEN_08BIT, 0x98}, + {0xd26d, CRL_REG_LEN_08BIT, 0x70}, + {0xd26e, CRL_REG_LEN_08BIT, 0x00}, + {0xd26f, CRL_REG_LEN_08BIT, 0x00}, + {0xd270, CRL_REG_LEN_08BIT, 0x8c}, + {0xd271, CRL_REG_LEN_08BIT, 0x8a}, + {0xd272, CRL_REG_LEN_08BIT, 0x00}, + {0xd273, CRL_REG_LEN_08BIT, 0x6f}, + {0xd274, CRL_REG_LEN_08BIT, 0xe5}, + {0xd275, CRL_REG_LEN_08BIT, 0x63}, + {0xd276, CRL_REG_LEN_08BIT, 0x20}, + {0xd277, CRL_REG_LEN_08BIT, 0x00}, + {0xd278, CRL_REG_LEN_08BIT, 0x10}, + {0xd279, CRL_REG_LEN_08BIT, 0x00}, + {0xd27a, CRL_REG_LEN_08BIT, 0x00}, + {0xd27b, CRL_REG_LEN_08BIT, 0x07}, + {0xd27c, CRL_REG_LEN_08BIT, 0x15}, + {0xd27d, CRL_REG_LEN_08BIT, 0x00}, + {0xd27e, CRL_REG_LEN_08BIT, 0x00}, + {0xd27f, CRL_REG_LEN_08BIT, 0x00}, + {0xd280, CRL_REG_LEN_08BIT, 0x8c}, + {0xd281, CRL_REG_LEN_08BIT, 0xaa}, + {0xd282, CRL_REG_LEN_08BIT, 0x00}, + {0xd283, CRL_REG_LEN_08BIT, 0x6e}, + {0xd284, CRL_REG_LEN_08BIT, 0xe0}, + {0xd285, CRL_REG_LEN_08BIT, 0x63}, + {0xd286, CRL_REG_LEN_08BIT, 0x28}, + {0xd287, CRL_REG_LEN_08BIT, 0x02}, + {0xd288, CRL_REG_LEN_08BIT, 0xe0}, + {0xd289, CRL_REG_LEN_08BIT, 0x84}, + {0xd28a, CRL_REG_LEN_08BIT, 0x28}, + {0xd28b, CRL_REG_LEN_08BIT, 0x02}, + {0xd28c, CRL_REG_LEN_08BIT, 0x07}, + {0xd28d, CRL_REG_LEN_08BIT, 0xff}, + {0xd28e, CRL_REG_LEN_08BIT, 0xf8}, + {0xd28f, CRL_REG_LEN_08BIT, 0x66}, + {0xd290, CRL_REG_LEN_08BIT, 0xe0}, + {0xd291, CRL_REG_LEN_08BIT, 0x63}, + {0xd292, CRL_REG_LEN_08BIT, 0x5b}, + {0xd293, CRL_REG_LEN_08BIT, 0x06}, + {0xd294, CRL_REG_LEN_08BIT, 0x8c}, + {0xd295, CRL_REG_LEN_08BIT, 0x6a}, + {0xd296, CRL_REG_LEN_08BIT, 0x00}, + {0xd297, CRL_REG_LEN_08BIT, 0x77}, + {0xd298, CRL_REG_LEN_08BIT, 0xe0}, + {0xd299, CRL_REG_LEN_08BIT, 0x63}, + {0xd29a, CRL_REG_LEN_08BIT, 0x5b}, + {0xd29b, CRL_REG_LEN_08BIT, 0x06}, + {0xd29c, CRL_REG_LEN_08BIT, 0xbd}, + {0xd29d, CRL_REG_LEN_08BIT, 0x63}, + {0xd29e, CRL_REG_LEN_08BIT, 0x00}, + {0xd29f, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a0, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2a1, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a3, CRL_REG_LEN_08BIT, 0x5a}, + {0xd2a4, CRL_REG_LEN_08BIT, 0x15}, + {0xd2a5, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a7, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a8, CRL_REG_LEN_08BIT, 0x8c}, + {0xd2a9, CRL_REG_LEN_08BIT, 0x8a}, + {0xd2aa, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ab, CRL_REG_LEN_08BIT, 0x78}, + {0xd2ac, CRL_REG_LEN_08BIT, 0xb8}, + {0xd2ad, CRL_REG_LEN_08BIT, 0x63}, + {0xd2ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd2af, CRL_REG_LEN_08BIT, 0x88}, + {0xd2b0, CRL_REG_LEN_08BIT, 0xe1}, + {0xd2b1, CRL_REG_LEN_08BIT, 0x64}, + {0xd2b2, CRL_REG_LEN_08BIT, 0x5b}, + {0xd2b3, CRL_REG_LEN_08BIT, 0x06}, + {0xd2b4, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2b5, CRL_REG_LEN_08BIT, 0x6b}, + {0xd2b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd2b8, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2b9, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd2bb, CRL_REG_LEN_08BIT, 0x59}, + {0xd2bc, CRL_REG_LEN_08BIT, 0xd4}, + {0xd2bd, CRL_REG_LEN_08BIT, 0x01}, + {0xd2be, CRL_REG_LEN_08BIT, 0x18}, + {0xd2bf, CRL_REG_LEN_08BIT, 0x14}, + {0xd2c0, CRL_REG_LEN_08BIT, 0xb9}, + {0xd2c1, CRL_REG_LEN_08BIT, 0x6b}, + {0xd2c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2c3, CRL_REG_LEN_08BIT, 0x88}, + {0xd2c4, CRL_REG_LEN_08BIT, 0x85}, + {0xd2c5, CRL_REG_LEN_08BIT, 0x01}, + {0xd2c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2c7, CRL_REG_LEN_08BIT, 0x14}, + {0xd2c8, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2c9, CRL_REG_LEN_08BIT, 0x68}, + {0xd2ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd2cb, CRL_REG_LEN_08BIT, 0x00}, + {0xd2cc, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2cd, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ce, CRL_REG_LEN_08BIT, 0x00}, + {0xd2cf, CRL_REG_LEN_08BIT, 0x51}, + {0xd2d0, CRL_REG_LEN_08BIT, 0xd4}, + {0xd2d1, CRL_REG_LEN_08BIT, 0x01}, + {0xd2d2, CRL_REG_LEN_08BIT, 0x58}, + {0xd2d3, CRL_REG_LEN_08BIT, 0x18}, + {0xd2d4, CRL_REG_LEN_08BIT, 0x84}, + {0xd2d5, CRL_REG_LEN_08BIT, 0x81}, + {0xd2d6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2d7, CRL_REG_LEN_08BIT, 0x14}, + {0xd2d8, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2d9, CRL_REG_LEN_08BIT, 0xa4}, + {0xd2da, CRL_REG_LEN_08BIT, 0x01}, + {0xd2db, CRL_REG_LEN_08BIT, 0x00}, + {0xd2dc, CRL_REG_LEN_08BIT, 0x10}, + {0xd2dd, CRL_REG_LEN_08BIT, 0x00}, + {0xd2de, CRL_REG_LEN_08BIT, 0x00}, + {0xd2df, CRL_REG_LEN_08BIT, 0x05}, + {0xd2e0, CRL_REG_LEN_08BIT, 0x84}, + {0xd2e1, CRL_REG_LEN_08BIT, 0xc1}, + {0xd2e2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2e3, CRL_REG_LEN_08BIT, 0x18}, + {0xd2e4, CRL_REG_LEN_08BIT, 0x9c}, + {0xd2e5, CRL_REG_LEN_08BIT, 0xa0}, + {0xd2e6, CRL_REG_LEN_08BIT, 0x01}, + {0xd2e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd2e8, CRL_REG_LEN_08BIT, 0xd4}, + {0xd2e9, CRL_REG_LEN_08BIT, 0x01}, + {0xd2ea, CRL_REG_LEN_08BIT, 0x28}, + {0xd2eb, CRL_REG_LEN_08BIT, 0x14}, + {0xd2ec, CRL_REG_LEN_08BIT, 0x84}, + {0xd2ed, CRL_REG_LEN_08BIT, 0xc1}, + {0xd2ee, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ef, CRL_REG_LEN_08BIT, 0x18}, + {0xd2f0, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2f1, CRL_REG_LEN_08BIT, 0x66}, + {0xd2f2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f3, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f4, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2f5, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f7, CRL_REG_LEN_08BIT, 0x43}, + {0xd2f8, CRL_REG_LEN_08BIT, 0x9d}, + {0xd2f9, CRL_REG_LEN_08BIT, 0x00}, + {0xd2fa, CRL_REG_LEN_08BIT, 0x00}, + {0xd2fb, CRL_REG_LEN_08BIT, 0x00}, + {0xd2fc, CRL_REG_LEN_08BIT, 0x84}, + {0xd2fd, CRL_REG_LEN_08BIT, 0x61}, + {0xd2fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ff, CRL_REG_LEN_08BIT, 0x18}, + {0xd300, CRL_REG_LEN_08BIT, 0xbd}, + {0xd301, CRL_REG_LEN_08BIT, 0xa3}, + {0xd302, CRL_REG_LEN_08BIT, 0x01}, + {0xd303, CRL_REG_LEN_08BIT, 0x00}, + {0xd304, CRL_REG_LEN_08BIT, 0x10}, + {0xd305, CRL_REG_LEN_08BIT, 0x00}, + {0xd306, CRL_REG_LEN_08BIT, 0x00}, + {0xd307, CRL_REG_LEN_08BIT, 0x03}, + {0xd308, CRL_REG_LEN_08BIT, 0x9c}, + {0xd309, CRL_REG_LEN_08BIT, 0x80}, + {0xd30a, CRL_REG_LEN_08BIT, 0x01}, + {0xd30b, CRL_REG_LEN_08BIT, 0x00}, + {0xd30c, CRL_REG_LEN_08BIT, 0xd4}, + {0xd30d, CRL_REG_LEN_08BIT, 0x01}, + {0xd30e, CRL_REG_LEN_08BIT, 0x20}, + {0xd30f, CRL_REG_LEN_08BIT, 0x18}, + {0xd310, CRL_REG_LEN_08BIT, 0x18}, + {0xd311, CRL_REG_LEN_08BIT, 0x60}, + {0xd312, CRL_REG_LEN_08BIT, 0x80}, + {0xd313, CRL_REG_LEN_08BIT, 0x06}, + {0xd314, CRL_REG_LEN_08BIT, 0x85}, + {0xd315, CRL_REG_LEN_08BIT, 0x01}, + {0xd316, CRL_REG_LEN_08BIT, 0x00}, + {0xd317, CRL_REG_LEN_08BIT, 0x14}, + {0xd318, CRL_REG_LEN_08BIT, 0xa8}, + {0xd319, CRL_REG_LEN_08BIT, 0x83}, + {0xd31a, CRL_REG_LEN_08BIT, 0x38}, + {0xd31b, CRL_REG_LEN_08BIT, 0x29}, + {0xd31c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd31d, CRL_REG_LEN_08BIT, 0xc3}, + {0xd31e, CRL_REG_LEN_08BIT, 0x40}, + {0xd31f, CRL_REG_LEN_08BIT, 0x08}, + {0xd320, CRL_REG_LEN_08BIT, 0x8c}, + {0xd321, CRL_REG_LEN_08BIT, 0x84}, + {0xd322, CRL_REG_LEN_08BIT, 0x00}, + {0xd323, CRL_REG_LEN_08BIT, 0x00}, + {0xd324, CRL_REG_LEN_08BIT, 0xa8}, + {0xd325, CRL_REG_LEN_08BIT, 0xa3}, + {0xd326, CRL_REG_LEN_08BIT, 0x38}, + {0xd327, CRL_REG_LEN_08BIT, 0x2a}, + {0xd328, CRL_REG_LEN_08BIT, 0xa8}, + {0xd329, CRL_REG_LEN_08BIT, 0xe3}, + {0xd32a, CRL_REG_LEN_08BIT, 0x40}, + {0xd32b, CRL_REG_LEN_08BIT, 0x09}, + {0xd32c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd32d, CRL_REG_LEN_08BIT, 0x64}, + {0xd32e, CRL_REG_LEN_08BIT, 0x40}, + {0xd32f, CRL_REG_LEN_08BIT, 0x00}, + {0xd330, CRL_REG_LEN_08BIT, 0xd8}, + {0xd331, CRL_REG_LEN_08BIT, 0x06}, + {0xd332, CRL_REG_LEN_08BIT, 0x18}, + {0xd333, CRL_REG_LEN_08BIT, 0x00}, + {0xd334, CRL_REG_LEN_08BIT, 0x8c}, + {0xd335, CRL_REG_LEN_08BIT, 0x65}, + {0xd336, CRL_REG_LEN_08BIT, 0x00}, + {0xd337, CRL_REG_LEN_08BIT, 0x00}, + {0xd338, CRL_REG_LEN_08BIT, 0x84}, + {0xd339, CRL_REG_LEN_08BIT, 0x81}, + {0xd33a, CRL_REG_LEN_08BIT, 0x00}, + {0xd33b, CRL_REG_LEN_08BIT, 0x18}, + {0xd33c, CRL_REG_LEN_08BIT, 0xe3}, + {0xd33d, CRL_REG_LEN_08BIT, 0xe3}, + {0xd33e, CRL_REG_LEN_08BIT, 0x20}, + {0xd33f, CRL_REG_LEN_08BIT, 0x00}, + {0xd340, CRL_REG_LEN_08BIT, 0xd8}, + {0xd341, CRL_REG_LEN_08BIT, 0x07}, + {0xd342, CRL_REG_LEN_08BIT, 0xf8}, + {0xd343, CRL_REG_LEN_08BIT, 0x00}, + {0xd344, CRL_REG_LEN_08BIT, 0x03}, + {0xd345, CRL_REG_LEN_08BIT, 0xff}, + {0xd346, CRL_REG_LEN_08BIT, 0xff}, + {0xd347, CRL_REG_LEN_08BIT, 0x6f}, + {0xd348, CRL_REG_LEN_08BIT, 0x18}, + {0xd349, CRL_REG_LEN_08BIT, 0x60}, + {0xd34a, CRL_REG_LEN_08BIT, 0x00}, + {0xd34b, CRL_REG_LEN_08BIT, 0x01}, + {0xd34c, CRL_REG_LEN_08BIT, 0x0f}, + {0xd34d, CRL_REG_LEN_08BIT, 0xff}, + {0xd34e, CRL_REG_LEN_08BIT, 0xff}, + {0xd34f, CRL_REG_LEN_08BIT, 0x9d}, + {0xd350, CRL_REG_LEN_08BIT, 0x18}, + {0xd351, CRL_REG_LEN_08BIT, 0x60}, + {0xd352, CRL_REG_LEN_08BIT, 0x80}, + {0xd353, CRL_REG_LEN_08BIT, 0x06}, + {0xd354, CRL_REG_LEN_08BIT, 0xa8}, + {0xd355, CRL_REG_LEN_08BIT, 0x83}, + {0xd356, CRL_REG_LEN_08BIT, 0x6e}, + {0xd357, CRL_REG_LEN_08BIT, 0x43}, + {0xd358, CRL_REG_LEN_08BIT, 0xa8}, + {0xd359, CRL_REG_LEN_08BIT, 0xa3}, + {0xd35a, CRL_REG_LEN_08BIT, 0x38}, + {0xd35b, CRL_REG_LEN_08BIT, 0x0f}, + {0xd35c, CRL_REG_LEN_08BIT, 0x8c}, + {0xd35d, CRL_REG_LEN_08BIT, 0x84}, + {0xd35e, CRL_REG_LEN_08BIT, 0x00}, + {0xd35f, CRL_REG_LEN_08BIT, 0x00}, + {0xd360, CRL_REG_LEN_08BIT, 0xa8}, + {0xd361, CRL_REG_LEN_08BIT, 0xc3}, + {0xd362, CRL_REG_LEN_08BIT, 0x38}, + {0xd363, CRL_REG_LEN_08BIT, 0x0e}, + {0xd364, CRL_REG_LEN_08BIT, 0xa8}, + {0xd365, CRL_REG_LEN_08BIT, 0xe3}, + {0xd366, CRL_REG_LEN_08BIT, 0x6e}, + {0xd367, CRL_REG_LEN_08BIT, 0x42}, + {0xd368, CRL_REG_LEN_08BIT, 0xd8}, + {0xd369, CRL_REG_LEN_08BIT, 0x05}, + {0xd36a, CRL_REG_LEN_08BIT, 0x20}, + {0xd36b, CRL_REG_LEN_08BIT, 0x00}, + {0xd36c, CRL_REG_LEN_08BIT, 0x8c}, + {0xd36d, CRL_REG_LEN_08BIT, 0x67}, + {0xd36e, CRL_REG_LEN_08BIT, 0x00}, + {0xd36f, CRL_REG_LEN_08BIT, 0x00}, + {0xd370, CRL_REG_LEN_08BIT, 0xd8}, + {0xd371, CRL_REG_LEN_08BIT, 0x06}, + {0xd372, CRL_REG_LEN_08BIT, 0x18}, + {0xd373, CRL_REG_LEN_08BIT, 0x00}, + {0xd374, CRL_REG_LEN_08BIT, 0x18}, + {0xd375, CRL_REG_LEN_08BIT, 0x60}, + {0xd376, CRL_REG_LEN_08BIT, 0x80}, + {0xd377, CRL_REG_LEN_08BIT, 0x01}, + {0xd378, CRL_REG_LEN_08BIT, 0xa8}, + {0xd379, CRL_REG_LEN_08BIT, 0x63}, + {0xd37a, CRL_REG_LEN_08BIT, 0x00}, + {0xd37b, CRL_REG_LEN_08BIT, 0xc8}, + {0xd37c, CRL_REG_LEN_08BIT, 0x8c}, + {0xd37d, CRL_REG_LEN_08BIT, 0x63}, + {0xd37e, CRL_REG_LEN_08BIT, 0x00}, + {0xd37f, CRL_REG_LEN_08BIT, 0x00}, + {0xd380, CRL_REG_LEN_08BIT, 0xbc}, + {0xd381, CRL_REG_LEN_08BIT, 0x23}, + {0xd382, CRL_REG_LEN_08BIT, 0x00}, + {0xd383, CRL_REG_LEN_08BIT, 0x01}, + {0xd384, CRL_REG_LEN_08BIT, 0x10}, + {0xd385, CRL_REG_LEN_08BIT, 0x00}, + {0xd386, CRL_REG_LEN_08BIT, 0x00}, + {0xd387, CRL_REG_LEN_08BIT, 0x28}, + {0xd388, CRL_REG_LEN_08BIT, 0x9c}, + {0xd389, CRL_REG_LEN_08BIT, 0xa0}, + {0xd38a, CRL_REG_LEN_08BIT, 0x00}, + {0xd38b, CRL_REG_LEN_08BIT, 0x00}, + {0xd38c, CRL_REG_LEN_08BIT, 0x00}, + {0xd38d, CRL_REG_LEN_08BIT, 0x00}, + {0xd38e, CRL_REG_LEN_08BIT, 0x00}, + {0xd38f, CRL_REG_LEN_08BIT, 0x08}, + {0xd390, CRL_REG_LEN_08BIT, 0x15}, + {0xd391, CRL_REG_LEN_08BIT, 0x00}, + {0xd392, CRL_REG_LEN_08BIT, 0x00}, + {0xd393, CRL_REG_LEN_08BIT, 0x00}, + {0xd394, CRL_REG_LEN_08BIT, 0xe0}, + {0xd395, CRL_REG_LEN_08BIT, 0x6c}, + {0xd396, CRL_REG_LEN_08BIT, 0x28}, + {0xd397, CRL_REG_LEN_08BIT, 0x02}, + {0xd398, CRL_REG_LEN_08BIT, 0xe0}, + {0xd399, CRL_REG_LEN_08BIT, 0x84}, + {0xd39a, CRL_REG_LEN_08BIT, 0x28}, + {0xd39b, CRL_REG_LEN_08BIT, 0x02}, + {0xd39c, CRL_REG_LEN_08BIT, 0x07}, + {0xd39d, CRL_REG_LEN_08BIT, 0xff}, + {0xd39e, CRL_REG_LEN_08BIT, 0xf8}, + {0xd39f, CRL_REG_LEN_08BIT, 0x22}, + {0xd3a0, CRL_REG_LEN_08BIT, 0xb8}, + {0xd3a1, CRL_REG_LEN_08BIT, 0x63}, + {0xd3a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd3a3, CRL_REG_LEN_08BIT, 0x08}, + {0xd3a4, CRL_REG_LEN_08BIT, 0x03}, + {0xd3a5, CRL_REG_LEN_08BIT, 0xff}, + {0xd3a6, CRL_REG_LEN_08BIT, 0xff}, + {0xd3a7, CRL_REG_LEN_08BIT, 0xb2}, + {0xd3a8, CRL_REG_LEN_08BIT, 0x85}, + {0xd3a9, CRL_REG_LEN_08BIT, 0x4e}, + {0xd3aa, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ab, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ac, CRL_REG_LEN_08BIT, 0x18}, + {0xd3ad, CRL_REG_LEN_08BIT, 0xe0}, + {0xd3ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd3af, CRL_REG_LEN_08BIT, 0x01}, + {0xd3b0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3b1, CRL_REG_LEN_08BIT, 0xe7}, + {0xd3b2, CRL_REG_LEN_08BIT, 0x06}, + {0xd3b3, CRL_REG_LEN_08BIT, 0x55}, + {0xd3b4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd3b5, CRL_REG_LEN_08BIT, 0x87}, + {0xd3b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b8, CRL_REG_LEN_08BIT, 0xb8}, + {0xd3b9, CRL_REG_LEN_08BIT, 0x64}, + {0xd3ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd3bb, CRL_REG_LEN_08BIT, 0x02}, + {0xd3bc, CRL_REG_LEN_08BIT, 0x19}, + {0xd3bd, CRL_REG_LEN_08BIT, 0x00}, + {0xd3be, CRL_REG_LEN_08BIT, 0x80}, + {0xd3bf, CRL_REG_LEN_08BIT, 0x06}, + {0xd3c0, CRL_REG_LEN_08BIT, 0xe0}, + {0xd3c1, CRL_REG_LEN_08BIT, 0x63}, + {0xd3c2, CRL_REG_LEN_08BIT, 0x20}, + {0xd3c3, CRL_REG_LEN_08BIT, 0x00}, + {0xd3c4, CRL_REG_LEN_08BIT, 0xa9}, + {0xd3c5, CRL_REG_LEN_08BIT, 0x08}, + {0xd3c6, CRL_REG_LEN_08BIT, 0x56}, + {0xd3c7, CRL_REG_LEN_08BIT, 0x01}, + {0xd3c8, CRL_REG_LEN_08BIT, 0xb8}, + {0xd3c9, CRL_REG_LEN_08BIT, 0x63}, + {0xd3ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd3cb, CRL_REG_LEN_08BIT, 0x04}, + {0xd3cc, CRL_REG_LEN_08BIT, 0x18}, + {0xd3cd, CRL_REG_LEN_08BIT, 0x80}, + {0xd3ce, CRL_REG_LEN_08BIT, 0x80}, + {0xd3cf, CRL_REG_LEN_08BIT, 0x01}, + {0xd3d0, CRL_REG_LEN_08BIT, 0xe0}, + {0xd3d1, CRL_REG_LEN_08BIT, 0xc5}, + {0xd3d2, CRL_REG_LEN_08BIT, 0x40}, + {0xd3d3, CRL_REG_LEN_08BIT, 0x00}, + {0xd3d4, CRL_REG_LEN_08BIT, 0xe0}, + {0xd3d5, CRL_REG_LEN_08BIT, 0x63}, + {0xd3d6, CRL_REG_LEN_08BIT, 0x28}, + {0xd3d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3d8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3d9, CRL_REG_LEN_08BIT, 0x84}, + {0xd3da, CRL_REG_LEN_08BIT, 0x1d}, + {0xd3db, CRL_REG_LEN_08BIT, 0x00}, + {0xd3dc, CRL_REG_LEN_08BIT, 0x9c}, + {0xd3dd, CRL_REG_LEN_08BIT, 0xa5}, + {0xd3de, CRL_REG_LEN_08BIT, 0x00}, + {0xd3df, CRL_REG_LEN_08BIT, 0x01}, + {0xd3e0, CRL_REG_LEN_08BIT, 0xe0}, + {0xd3e1, CRL_REG_LEN_08BIT, 0x63}, + {0xd3e2, CRL_REG_LEN_08BIT, 0x20}, + {0xd3e3, CRL_REG_LEN_08BIT, 0x00}, + {0xd3e4, CRL_REG_LEN_08BIT, 0xbd}, + {0xd3e5, CRL_REG_LEN_08BIT, 0x45}, + {0xd3e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3e7, CRL_REG_LEN_08BIT, 0x48}, + {0xd3e8, CRL_REG_LEN_08BIT, 0x8c}, + {0xd3e9, CRL_REG_LEN_08BIT, 0x63}, + {0xd3ea, CRL_REG_LEN_08BIT, 0x00}, + {0xd3eb, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ec, CRL_REG_LEN_08BIT, 0xd8}, + {0xd3ed, CRL_REG_LEN_08BIT, 0x06}, + {0xd3ee, CRL_REG_LEN_08BIT, 0x18}, + {0xd3ef, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f0, CRL_REG_LEN_08BIT, 0x0f}, + {0xd3f1, CRL_REG_LEN_08BIT, 0xff}, + {0xd3f2, CRL_REG_LEN_08BIT, 0xff}, + {0xd3f3, CRL_REG_LEN_08BIT, 0xf1}, + {0xd3f4, CRL_REG_LEN_08BIT, 0x15}, + {0xd3f5, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f8, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f9, CRL_REG_LEN_08BIT, 0x00}, + {0xd3fa, CRL_REG_LEN_08BIT, 0x00}, + {0xd3fb, CRL_REG_LEN_08BIT, 0x0b}, + {0xd3fc, CRL_REG_LEN_08BIT, 0x15}, + {0xd3fd, CRL_REG_LEN_08BIT, 0x00}, + {0xd3fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ff, CRL_REG_LEN_08BIT, 0x00}, + {0xd400, CRL_REG_LEN_08BIT, 0x03}, + {0xd401, CRL_REG_LEN_08BIT, 0xff}, + {0xd402, CRL_REG_LEN_08BIT, 0xff}, + {0xd403, CRL_REG_LEN_08BIT, 0xc4}, + {0xd404, CRL_REG_LEN_08BIT, 0xd4}, + {0xd405, CRL_REG_LEN_08BIT, 0x01}, + {0xd406, CRL_REG_LEN_08BIT, 0x40}, + {0xd407, CRL_REG_LEN_08BIT, 0x18}, + {0xd408, CRL_REG_LEN_08BIT, 0x03}, + {0xd409, CRL_REG_LEN_08BIT, 0xff}, + {0xd40a, CRL_REG_LEN_08BIT, 0xff}, + {0xd40b, CRL_REG_LEN_08BIT, 0xa8}, + {0xd40c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd40d, CRL_REG_LEN_08BIT, 0x63}, + {0xd40e, CRL_REG_LEN_08BIT, 0x00}, + {0xd40f, CRL_REG_LEN_08BIT, 0xff}, + {0xd410, CRL_REG_LEN_08BIT, 0x9c}, + {0xd411, CRL_REG_LEN_08BIT, 0x60}, + {0xd412, CRL_REG_LEN_08BIT, 0x00}, + {0xd413, CRL_REG_LEN_08BIT, 0x00}, + {0xd414, CRL_REG_LEN_08BIT, 0x03}, + {0xd415, CRL_REG_LEN_08BIT, 0xff}, + {0xd416, CRL_REG_LEN_08BIT, 0xff}, + {0xd417, CRL_REG_LEN_08BIT, 0xb6}, + {0xd418, CRL_REG_LEN_08BIT, 0xd4}, + {0xd419, CRL_REG_LEN_08BIT, 0x01}, + {0xd41a, CRL_REG_LEN_08BIT, 0x18}, + {0xd41b, CRL_REG_LEN_08BIT, 0x14}, + {0xd41c, CRL_REG_LEN_08BIT, 0x03}, + {0xd41d, CRL_REG_LEN_08BIT, 0xff}, + {0xd41e, CRL_REG_LEN_08BIT, 0xff}, + {0xd41f, CRL_REG_LEN_08BIT, 0xa9}, + {0xd420, CRL_REG_LEN_08BIT, 0x9d}, + {0xd421, CRL_REG_LEN_08BIT, 0x6b}, + {0xd422, CRL_REG_LEN_08BIT, 0x00}, + {0xd423, CRL_REG_LEN_08BIT, 0xff}, + {0xd424, CRL_REG_LEN_08BIT, 0x85}, + {0xd425, CRL_REG_LEN_08BIT, 0x21}, + {0xd426, CRL_REG_LEN_08BIT, 0x00}, + {0xd427, CRL_REG_LEN_08BIT, 0x00}, + {0xd428, CRL_REG_LEN_08BIT, 0x85}, + {0xd429, CRL_REG_LEN_08BIT, 0x41}, + {0xd42a, CRL_REG_LEN_08BIT, 0x00}, + {0xd42b, CRL_REG_LEN_08BIT, 0x04}, + {0xd42c, CRL_REG_LEN_08BIT, 0x85}, + {0xd42d, CRL_REG_LEN_08BIT, 0x81}, + {0xd42e, CRL_REG_LEN_08BIT, 0x00}, + {0xd42f, CRL_REG_LEN_08BIT, 0x08}, + {0xd430, CRL_REG_LEN_08BIT, 0x85}, + {0xd431, CRL_REG_LEN_08BIT, 0xc1}, + {0xd432, CRL_REG_LEN_08BIT, 0x00}, + {0xd433, CRL_REG_LEN_08BIT, 0x0c}, + {0xd434, CRL_REG_LEN_08BIT, 0x86}, + {0xd435, CRL_REG_LEN_08BIT, 0x01}, + {0xd436, CRL_REG_LEN_08BIT, 0x00}, + {0xd437, CRL_REG_LEN_08BIT, 0x10}, + {0xd438, CRL_REG_LEN_08BIT, 0x44}, + {0xd439, CRL_REG_LEN_08BIT, 0x00}, + {0xd43a, CRL_REG_LEN_08BIT, 0x48}, + {0xd43b, CRL_REG_LEN_08BIT, 0x00}, + {0xd43c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd43d, CRL_REG_LEN_08BIT, 0x21}, + {0xd43e, CRL_REG_LEN_08BIT, 0x00}, + {0xd43f, CRL_REG_LEN_08BIT, 0x1c}, + {0xd440, CRL_REG_LEN_08BIT, 0x9c}, + {0xd441, CRL_REG_LEN_08BIT, 0x21}, + {0xd442, CRL_REG_LEN_08BIT, 0xff}, + {0xd443, CRL_REG_LEN_08BIT, 0xfc}, + {0xd444, CRL_REG_LEN_08BIT, 0xd4}, + {0xd445, CRL_REG_LEN_08BIT, 0x01}, + {0xd446, CRL_REG_LEN_08BIT, 0x48}, + {0xd447, CRL_REG_LEN_08BIT, 0x00}, + {0xd448, CRL_REG_LEN_08BIT, 0x18}, + {0xd449, CRL_REG_LEN_08BIT, 0x60}, + {0xd44a, CRL_REG_LEN_08BIT, 0x00}, + {0xd44b, CRL_REG_LEN_08BIT, 0x01}, + {0xd44c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd44d, CRL_REG_LEN_08BIT, 0x63}, + {0xd44e, CRL_REG_LEN_08BIT, 0x07}, + {0xd44f, CRL_REG_LEN_08BIT, 0x80}, + {0xd450, CRL_REG_LEN_08BIT, 0x8c}, + {0xd451, CRL_REG_LEN_08BIT, 0x63}, + {0xd452, CRL_REG_LEN_08BIT, 0x00}, + {0xd453, CRL_REG_LEN_08BIT, 0x68}, + {0xd454, CRL_REG_LEN_08BIT, 0xbc}, + {0xd455, CRL_REG_LEN_08BIT, 0x03}, + {0xd456, CRL_REG_LEN_08BIT, 0x00}, + {0xd457, CRL_REG_LEN_08BIT, 0x00}, + {0xd458, CRL_REG_LEN_08BIT, 0x10}, + {0xd459, CRL_REG_LEN_08BIT, 0x00}, + {0xd45a, CRL_REG_LEN_08BIT, 0x00}, + {0xd45b, CRL_REG_LEN_08BIT, 0x0c}, + {0xd45c, CRL_REG_LEN_08BIT, 0x15}, + {0xd45d, CRL_REG_LEN_08BIT, 0x00}, + {0xd45e, CRL_REG_LEN_08BIT, 0x00}, + {0xd45f, CRL_REG_LEN_08BIT, 0x00}, + {0xd460, CRL_REG_LEN_08BIT, 0x07}, + {0xd461, CRL_REG_LEN_08BIT, 0xff}, + {0xd462, CRL_REG_LEN_08BIT, 0xd9}, + {0xd463, CRL_REG_LEN_08BIT, 0x7c}, + {0xd464, CRL_REG_LEN_08BIT, 0x15}, + {0xd465, CRL_REG_LEN_08BIT, 0x00}, + {0xd466, CRL_REG_LEN_08BIT, 0x00}, + {0xd467, CRL_REG_LEN_08BIT, 0x00}, + {0xd468, CRL_REG_LEN_08BIT, 0x18}, + {0xd469, CRL_REG_LEN_08BIT, 0x60}, + {0xd46a, CRL_REG_LEN_08BIT, 0x80}, + {0xd46b, CRL_REG_LEN_08BIT, 0x06}, + {0xd46c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd46d, CRL_REG_LEN_08BIT, 0x63}, + {0xd46e, CRL_REG_LEN_08BIT, 0xc4}, + {0xd46f, CRL_REG_LEN_08BIT, 0xb8}, + {0xd470, CRL_REG_LEN_08BIT, 0x8c}, + {0xd471, CRL_REG_LEN_08BIT, 0x63}, + {0xd472, CRL_REG_LEN_08BIT, 0x00}, + {0xd473, CRL_REG_LEN_08BIT, 0x00}, + {0xd474, CRL_REG_LEN_08BIT, 0xbc}, + {0xd475, CRL_REG_LEN_08BIT, 0x23}, + {0xd476, CRL_REG_LEN_08BIT, 0x00}, + {0xd477, CRL_REG_LEN_08BIT, 0x01}, + {0xd478, CRL_REG_LEN_08BIT, 0x10}, + {0xd479, CRL_REG_LEN_08BIT, 0x00}, + {0xd47a, CRL_REG_LEN_08BIT, 0x00}, + {0xd47b, CRL_REG_LEN_08BIT, 0x25}, + {0xd47c, CRL_REG_LEN_08BIT, 0x9d}, + {0xd47d, CRL_REG_LEN_08BIT, 0x00}, + {0xd47e, CRL_REG_LEN_08BIT, 0x00}, + {0xd47f, CRL_REG_LEN_08BIT, 0x00}, + {0xd480, CRL_REG_LEN_08BIT, 0x00}, + {0xd481, CRL_REG_LEN_08BIT, 0x00}, + {0xd482, CRL_REG_LEN_08BIT, 0x00}, + {0xd483, CRL_REG_LEN_08BIT, 0x0b}, + {0xd484, CRL_REG_LEN_08BIT, 0xb8}, + {0xd485, CRL_REG_LEN_08BIT, 0xe8}, + {0xd486, CRL_REG_LEN_08BIT, 0x00}, + {0xd487, CRL_REG_LEN_08BIT, 0x02}, + {0xd488, CRL_REG_LEN_08BIT, 0x07}, + {0xd489, CRL_REG_LEN_08BIT, 0xff}, + {0xd48a, CRL_REG_LEN_08BIT, 0xd6}, + {0xd48b, CRL_REG_LEN_08BIT, 0x08}, + {0xd48c, CRL_REG_LEN_08BIT, 0x15}, + {0xd48d, CRL_REG_LEN_08BIT, 0x00}, + {0xd48e, CRL_REG_LEN_08BIT, 0x00}, + {0xd48f, CRL_REG_LEN_08BIT, 0x00}, + {0xd490, CRL_REG_LEN_08BIT, 0x18}, + {0xd491, CRL_REG_LEN_08BIT, 0x60}, + {0xd492, CRL_REG_LEN_08BIT, 0x80}, + {0xd493, CRL_REG_LEN_08BIT, 0x06}, + {0xd494, CRL_REG_LEN_08BIT, 0xa8}, + {0xd495, CRL_REG_LEN_08BIT, 0x63}, + {0xd496, CRL_REG_LEN_08BIT, 0xc4}, + {0xd497, CRL_REG_LEN_08BIT, 0xb8}, + {0xd498, CRL_REG_LEN_08BIT, 0x8c}, + {0xd499, CRL_REG_LEN_08BIT, 0x63}, + {0xd49a, CRL_REG_LEN_08BIT, 0x00}, + {0xd49b, CRL_REG_LEN_08BIT, 0x00}, + {0xd49c, CRL_REG_LEN_08BIT, 0xbc}, + {0xd49d, CRL_REG_LEN_08BIT, 0x23}, + {0xd49e, CRL_REG_LEN_08BIT, 0x00}, + {0xd49f, CRL_REG_LEN_08BIT, 0x01}, + {0xd4a0, CRL_REG_LEN_08BIT, 0x10}, + {0xd4a1, CRL_REG_LEN_08BIT, 0x00}, + {0xd4a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd4a3, CRL_REG_LEN_08BIT, 0x1b}, + {0xd4a4, CRL_REG_LEN_08BIT, 0x9d}, + {0xd4a5, CRL_REG_LEN_08BIT, 0x00}, + {0xd4a6, CRL_REG_LEN_08BIT, 0x00}, + {0xd4a7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4a8, CRL_REG_LEN_08BIT, 0xb8}, + {0xd4a9, CRL_REG_LEN_08BIT, 0xe8}, + {0xd4aa, CRL_REG_LEN_08BIT, 0x00}, + {0xd4ab, CRL_REG_LEN_08BIT, 0x02}, + {0xd4ac, CRL_REG_LEN_08BIT, 0x9c}, + {0xd4ad, CRL_REG_LEN_08BIT, 0xc0}, + {0xd4ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd4af, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b0, CRL_REG_LEN_08BIT, 0x18}, + {0xd4b1, CRL_REG_LEN_08BIT, 0xa0}, + {0xd4b2, CRL_REG_LEN_08BIT, 0x80}, + {0xd4b3, CRL_REG_LEN_08BIT, 0x06}, + {0xd4b4, CRL_REG_LEN_08BIT, 0xe0}, + {0xd4b5, CRL_REG_LEN_08BIT, 0x67}, + {0xd4b6, CRL_REG_LEN_08BIT, 0x30}, + {0xd4b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd4b9, CRL_REG_LEN_08BIT, 0xa5}, + {0xd4ba, CRL_REG_LEN_08BIT, 0xce}, + {0xd4bb, CRL_REG_LEN_08BIT, 0xb0}, + {0xd4bc, CRL_REG_LEN_08BIT, 0x19}, + {0xd4bd, CRL_REG_LEN_08BIT, 0x60}, + {0xd4be, CRL_REG_LEN_08BIT, 0x00}, + {0xd4bf, CRL_REG_LEN_08BIT, 0x01}, + {0xd4c0, CRL_REG_LEN_08BIT, 0xa9}, + {0xd4c1, CRL_REG_LEN_08BIT, 0x6b}, + {0xd4c2, CRL_REG_LEN_08BIT, 0x06}, + {0xd4c3, CRL_REG_LEN_08BIT, 0x14}, + {0xd4c4, CRL_REG_LEN_08BIT, 0xe0}, + {0xd4c5, CRL_REG_LEN_08BIT, 0x83}, + {0xd4c6, CRL_REG_LEN_08BIT, 0x28}, + {0xd4c7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4c8, CRL_REG_LEN_08BIT, 0x9c}, + {0xd4c9, CRL_REG_LEN_08BIT, 0xc6}, + {0xd4ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd4cb, CRL_REG_LEN_08BIT, 0x01}, + {0xd4cc, CRL_REG_LEN_08BIT, 0xe0}, + {0xd4cd, CRL_REG_LEN_08BIT, 0x63}, + {0xd4ce, CRL_REG_LEN_08BIT, 0x18}, + {0xd4cf, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d0, CRL_REG_LEN_08BIT, 0x8c}, + {0xd4d1, CRL_REG_LEN_08BIT, 0x84}, + {0xd4d2, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d3, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d4, CRL_REG_LEN_08BIT, 0xe0}, + {0xd4d5, CRL_REG_LEN_08BIT, 0xa3}, + {0xd4d6, CRL_REG_LEN_08BIT, 0x58}, + {0xd4d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d8, CRL_REG_LEN_08BIT, 0xa4}, + {0xd4d9, CRL_REG_LEN_08BIT, 0xc6}, + {0xd4da, CRL_REG_LEN_08BIT, 0x00}, + {0xd4db, CRL_REG_LEN_08BIT, 0xff}, + {0xd4dc, CRL_REG_LEN_08BIT, 0xb8}, + {0xd4dd, CRL_REG_LEN_08BIT, 0x64}, + {0xd4de, CRL_REG_LEN_08BIT, 0x00}, + {0xd4df, CRL_REG_LEN_08BIT, 0x18}, + {0xd4e0, CRL_REG_LEN_08BIT, 0xbc}, + {0xd4e1, CRL_REG_LEN_08BIT, 0x46}, + {0xd4e2, CRL_REG_LEN_08BIT, 0x00}, + {0xd4e3, CRL_REG_LEN_08BIT, 0x03}, + {0xd4e4, CRL_REG_LEN_08BIT, 0x94}, + {0xd4e5, CRL_REG_LEN_08BIT, 0x85}, + {0xd4e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd4e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4e8, CRL_REG_LEN_08BIT, 0xb8}, + {0xd4e9, CRL_REG_LEN_08BIT, 0x63}, + {0xd4ea, CRL_REG_LEN_08BIT, 0x00}, + {0xd4eb, CRL_REG_LEN_08BIT, 0x98}, + {0xd4ec, CRL_REG_LEN_08BIT, 0xe0}, + {0xd4ed, CRL_REG_LEN_08BIT, 0x64}, + {0xd4ee, CRL_REG_LEN_08BIT, 0x18}, + {0xd4ef, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f0, CRL_REG_LEN_08BIT, 0x0f}, + {0xd4f1, CRL_REG_LEN_08BIT, 0xff}, + {0xd4f2, CRL_REG_LEN_08BIT, 0xff}, + {0xd4f3, CRL_REG_LEN_08BIT, 0xf0}, + {0xd4f4, CRL_REG_LEN_08BIT, 0xdc}, + {0xd4f5, CRL_REG_LEN_08BIT, 0x05}, + {0xd4f6, CRL_REG_LEN_08BIT, 0x18}, + {0xd4f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f8, CRL_REG_LEN_08BIT, 0x9c}, + {0xd4f9, CRL_REG_LEN_08BIT, 0x68}, + {0xd4fa, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fb, CRL_REG_LEN_08BIT, 0x01}, + {0xd4fc, CRL_REG_LEN_08BIT, 0xa5}, + {0xd4fd, CRL_REG_LEN_08BIT, 0x03}, + {0xd4fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd4ff, CRL_REG_LEN_08BIT, 0xff}, + {0xd500, CRL_REG_LEN_08BIT, 0xbc}, + {0xd501, CRL_REG_LEN_08BIT, 0x48}, + {0xd502, CRL_REG_LEN_08BIT, 0x00}, + {0xd503, CRL_REG_LEN_08BIT, 0x01}, + {0xd504, CRL_REG_LEN_08BIT, 0x0f}, + {0xd505, CRL_REG_LEN_08BIT, 0xff}, + {0xd506, CRL_REG_LEN_08BIT, 0xff}, + {0xd507, CRL_REG_LEN_08BIT, 0xea}, + {0xd508, CRL_REG_LEN_08BIT, 0xb8}, + {0xd509, CRL_REG_LEN_08BIT, 0xe8}, + {0xd50a, CRL_REG_LEN_08BIT, 0x00}, + {0xd50b, CRL_REG_LEN_08BIT, 0x02}, + {0xd50c, CRL_REG_LEN_08BIT, 0x18}, + {0xd50d, CRL_REG_LEN_08BIT, 0x60}, + {0xd50e, CRL_REG_LEN_08BIT, 0x00}, + {0xd50f, CRL_REG_LEN_08BIT, 0x01}, + {0xd510, CRL_REG_LEN_08BIT, 0xa8}, + {0xd511, CRL_REG_LEN_08BIT, 0x63}, + {0xd512, CRL_REG_LEN_08BIT, 0x06}, + {0xd513, CRL_REG_LEN_08BIT, 0x14}, + {0xd514, CRL_REG_LEN_08BIT, 0x07}, + {0xd515, CRL_REG_LEN_08BIT, 0xff}, + {0xd516, CRL_REG_LEN_08BIT, 0xe3}, + {0xd517, CRL_REG_LEN_08BIT, 0xe9}, + {0xd518, CRL_REG_LEN_08BIT, 0x9c}, + {0xd519, CRL_REG_LEN_08BIT, 0x83}, + {0xd51a, CRL_REG_LEN_08BIT, 0x00}, + {0xd51b, CRL_REG_LEN_08BIT, 0x10}, + {0xd51c, CRL_REG_LEN_08BIT, 0x85}, + {0xd51d, CRL_REG_LEN_08BIT, 0x21}, + {0xd51e, CRL_REG_LEN_08BIT, 0x00}, + {0xd51f, CRL_REG_LEN_08BIT, 0x00}, + {0xd520, CRL_REG_LEN_08BIT, 0x44}, + {0xd521, CRL_REG_LEN_08BIT, 0x00}, + {0xd522, CRL_REG_LEN_08BIT, 0x48}, + {0xd523, CRL_REG_LEN_08BIT, 0x00}, + {0xd524, CRL_REG_LEN_08BIT, 0x9c}, + {0xd525, CRL_REG_LEN_08BIT, 0x21}, + {0xd526, CRL_REG_LEN_08BIT, 0x00}, + {0xd527, CRL_REG_LEN_08BIT, 0x04}, + {0xd528, CRL_REG_LEN_08BIT, 0x18}, + {0xd529, CRL_REG_LEN_08BIT, 0x60}, + {0xd52a, CRL_REG_LEN_08BIT, 0x00}, + {0xd52b, CRL_REG_LEN_08BIT, 0x01}, + {0xd52c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd52d, CRL_REG_LEN_08BIT, 0x80}, + {0xd52e, CRL_REG_LEN_08BIT, 0xff}, + {0xd52f, CRL_REG_LEN_08BIT, 0xff}, + {0xd530, CRL_REG_LEN_08BIT, 0xa8}, + {0xd531, CRL_REG_LEN_08BIT, 0x63}, + {0xd532, CRL_REG_LEN_08BIT, 0x09}, + {0xd533, CRL_REG_LEN_08BIT, 0xef}, + {0xd534, CRL_REG_LEN_08BIT, 0xd8}, + {0xd535, CRL_REG_LEN_08BIT, 0x03}, + {0xd536, CRL_REG_LEN_08BIT, 0x20}, + {0xd537, CRL_REG_LEN_08BIT, 0x00}, + {0xd538, CRL_REG_LEN_08BIT, 0x18}, + {0xd539, CRL_REG_LEN_08BIT, 0x60}, + {0xd53a, CRL_REG_LEN_08BIT, 0x80}, + {0xd53b, CRL_REG_LEN_08BIT, 0x06}, + {0xd53c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd53d, CRL_REG_LEN_08BIT, 0x63}, + {0xd53e, CRL_REG_LEN_08BIT, 0xc9}, + {0xd53f, CRL_REG_LEN_08BIT, 0xef}, + {0xd540, CRL_REG_LEN_08BIT, 0xd8}, + {0xd541, CRL_REG_LEN_08BIT, 0x03}, + {0xd542, CRL_REG_LEN_08BIT, 0x20}, + {0xd543, CRL_REG_LEN_08BIT, 0x00}, + {0xd544, CRL_REG_LEN_08BIT, 0x44}, + {0xd545, CRL_REG_LEN_08BIT, 0x00}, + {0xd546, CRL_REG_LEN_08BIT, 0x48}, + {0xd547, CRL_REG_LEN_08BIT, 0x00}, + {0xd548, CRL_REG_LEN_08BIT, 0x15}, + {0xd549, CRL_REG_LEN_08BIT, 0x00}, + {0xd54a, CRL_REG_LEN_08BIT, 0x00}, + {0xd54b, CRL_REG_LEN_08BIT, 0x00}, + {0xd54c, CRL_REG_LEN_08BIT, 0x18}, + {0xd54d, CRL_REG_LEN_08BIT, 0x80}, + {0xd54e, CRL_REG_LEN_08BIT, 0x00}, + {0xd54f, CRL_REG_LEN_08BIT, 0x01}, + {0xd550, CRL_REG_LEN_08BIT, 0xa8}, + {0xd551, CRL_REG_LEN_08BIT, 0x84}, + {0xd552, CRL_REG_LEN_08BIT, 0x0a}, + {0xd553, CRL_REG_LEN_08BIT, 0x12}, + {0xd554, CRL_REG_LEN_08BIT, 0x8c}, + {0xd555, CRL_REG_LEN_08BIT, 0x64}, + {0xd556, CRL_REG_LEN_08BIT, 0x00}, + {0xd557, CRL_REG_LEN_08BIT, 0x00}, + {0xd558, CRL_REG_LEN_08BIT, 0xbc}, + {0xd559, CRL_REG_LEN_08BIT, 0x03}, + {0xd55a, CRL_REG_LEN_08BIT, 0x00}, + {0xd55b, CRL_REG_LEN_08BIT, 0x00}, + {0xd55c, CRL_REG_LEN_08BIT, 0x13}, + {0xd55d, CRL_REG_LEN_08BIT, 0xff}, + {0xd55e, CRL_REG_LEN_08BIT, 0xff}, + {0xd55f, CRL_REG_LEN_08BIT, 0xfe}, + {0xd560, CRL_REG_LEN_08BIT, 0x15}, + {0xd561, CRL_REG_LEN_08BIT, 0x00}, + {0xd562, CRL_REG_LEN_08BIT, 0x00}, + {0xd563, CRL_REG_LEN_08BIT, 0x00}, + {0xd564, CRL_REG_LEN_08BIT, 0x44}, + {0xd565, CRL_REG_LEN_08BIT, 0x00}, + {0xd566, CRL_REG_LEN_08BIT, 0x48}, + {0xd567, CRL_REG_LEN_08BIT, 0x00}, + {0xd568, CRL_REG_LEN_08BIT, 0x15}, + {0xd569, CRL_REG_LEN_08BIT, 0x00}, + {0xd56a, CRL_REG_LEN_08BIT, 0x00}, + {0xd56b, CRL_REG_LEN_08BIT, 0x00}, + {0xd56c, CRL_REG_LEN_08BIT, 0x00}, + {0xd56d, CRL_REG_LEN_08BIT, 0x00}, + {0xd56e, CRL_REG_LEN_08BIT, 0x00}, + {0xd56f, CRL_REG_LEN_08BIT, 0x00}, + {0xd570, CRL_REG_LEN_08BIT, 0x00}, + {0xd571, CRL_REG_LEN_08BIT, 0x00}, + {0xd572, CRL_REG_LEN_08BIT, 0x00}, + {0xd573, CRL_REG_LEN_08BIT, 0x00}, + {0x6f0e, CRL_REG_LEN_08BIT, 0x33}, + {0x6f0f, CRL_REG_LEN_08BIT, 0x33}, + {0x460e, CRL_REG_LEN_08BIT, 0x08}, + {0x460f, CRL_REG_LEN_08BIT, 0x01}, + {0x4610, CRL_REG_LEN_08BIT, 0x00}, + {0x4611, CRL_REG_LEN_08BIT, 0x01}, + {0x4612, CRL_REG_LEN_08BIT, 0x00}, + {0x4613, CRL_REG_LEN_08BIT, 0x01}, + {0x4605, CRL_REG_LEN_08BIT, 0x0b}, + {0x4608, CRL_REG_LEN_08BIT, 0x00}, + {0x4609, CRL_REG_LEN_08BIT, 0x08}, + {0x4602, CRL_REG_LEN_08BIT, 0x02}, + {0x4603, CRL_REG_LEN_08BIT, 0xd8}, + {0x6804, CRL_REG_LEN_08BIT, 0x00}, + {0x6805, CRL_REG_LEN_08BIT, 0x06}, + {0x6806, CRL_REG_LEN_08BIT, 0x00}, + {0x5120, CRL_REG_LEN_08BIT, 0x00}, + {0x3510, CRL_REG_LEN_08BIT, 0x00}, + {0x3504, CRL_REG_LEN_08BIT, 0x00}, + {0x6800, CRL_REG_LEN_08BIT, 0x00}, + {0x6f0d, CRL_REG_LEN_08BIT, 0x0f}, + {0x5000, CRL_REG_LEN_08BIT, 0xff}, + {0x5001, CRL_REG_LEN_08BIT, 0xbf}, + {0x5002, CRL_REG_LEN_08BIT, 0x7e}, + {0x5003, CRL_REG_LEN_08BIT, 0x0c}, + {0x503d, CRL_REG_LEN_08BIT, 0x00}, + {0xc450, CRL_REG_LEN_08BIT, 0x01}, + {0xc452, CRL_REG_LEN_08BIT, 0x04}, + {0xc453, CRL_REG_LEN_08BIT, 0x00}, + {0xc454, CRL_REG_LEN_08BIT, 0x00}, + {0xc455, CRL_REG_LEN_08BIT, 0x00}, + {0xc456, CRL_REG_LEN_08BIT, 0x00}, + {0xc457, CRL_REG_LEN_08BIT, 0x00}, + {0xc458, CRL_REG_LEN_08BIT, 0x00}, + {0xc459, CRL_REG_LEN_08BIT, 0x00}, + {0xc45b, CRL_REG_LEN_08BIT, 0x00}, + {0xc45c, CRL_REG_LEN_08BIT, 0x00}, + {0xc45d, CRL_REG_LEN_08BIT, 0x00}, + {0xc45e, CRL_REG_LEN_08BIT, 0x02}, + {0xc45f, CRL_REG_LEN_08BIT, 0x01}, + {0xc460, CRL_REG_LEN_08BIT, 0x01}, + {0xc461, CRL_REG_LEN_08BIT, 0x01}, + {0xc462, CRL_REG_LEN_08BIT, 0x01}, + {0xc464, CRL_REG_LEN_08BIT, 0x88}, + {0xc465, CRL_REG_LEN_08BIT, 0x00}, + {0xc466, CRL_REG_LEN_08BIT, 0x8a}, + {0xc467, CRL_REG_LEN_08BIT, 0x00}, + {0xc468, CRL_REG_LEN_08BIT, 0x86}, + {0xc469, CRL_REG_LEN_08BIT, 0x00}, + {0xc46a, CRL_REG_LEN_08BIT, 0x40}, + {0xc46b, CRL_REG_LEN_08BIT, 0x50}, + {0xc46c, CRL_REG_LEN_08BIT, 0x30}, + {0xc46d, CRL_REG_LEN_08BIT, 0x28}, + {0xc46e, CRL_REG_LEN_08BIT, 0x60}, + {0xc46f, CRL_REG_LEN_08BIT, 0x40}, + {0xc47c, CRL_REG_LEN_08BIT, 0x01}, + {0xc47d, CRL_REG_LEN_08BIT, 0x38}, + {0xc47e, CRL_REG_LEN_08BIT, 0x00}, + {0xc47f, CRL_REG_LEN_08BIT, 0x00}, + {0xc480, CRL_REG_LEN_08BIT, 0x00}, + {0xc481, CRL_REG_LEN_08BIT, 0xff}, + {0xc482, CRL_REG_LEN_08BIT, 0x00}, + {0xc483, CRL_REG_LEN_08BIT, 0x40}, + {0xc484, CRL_REG_LEN_08BIT, 0x00}, + {0xc485, CRL_REG_LEN_08BIT, 0x18}, + {0xc486, CRL_REG_LEN_08BIT, 0x00}, + {0xc487, CRL_REG_LEN_08BIT, 0x18}, + {0xc488, CRL_REG_LEN_08BIT, 0x2e}, + {0xc489, CRL_REG_LEN_08BIT, 0x40}, + {0xc48a, CRL_REG_LEN_08BIT, 0x2e}, + {0xc48b, CRL_REG_LEN_08BIT, 0x40}, + {0xc48c, CRL_REG_LEN_08BIT, 0x00}, + {0xc48d, CRL_REG_LEN_08BIT, 0x04}, + {0xc48e, CRL_REG_LEN_08BIT, 0x00}, + {0xc48f, CRL_REG_LEN_08BIT, 0x04}, + {0xc490, CRL_REG_LEN_08BIT, 0x07}, + {0xc492, CRL_REG_LEN_08BIT, 0x20}, + {0xc493, CRL_REG_LEN_08BIT, 0x08}, + {0xc498, CRL_REG_LEN_08BIT, 0x02}, + {0xc499, CRL_REG_LEN_08BIT, 0x00}, + {0xc49a, CRL_REG_LEN_08BIT, 0x02}, + {0xc49b, CRL_REG_LEN_08BIT, 0x00}, + {0xc49c, CRL_REG_LEN_08BIT, 0x02}, + {0xc49d, CRL_REG_LEN_08BIT, 0x00}, + {0xc49e, CRL_REG_LEN_08BIT, 0x02}, + {0xc49f, CRL_REG_LEN_08BIT, 0x60}, + {0xc4a0, CRL_REG_LEN_08BIT, 0x03}, + {0xc4a1, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a2, CRL_REG_LEN_08BIT, 0x04}, + {0xc4a3, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a4, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a5, CRL_REG_LEN_08BIT, 0x10}, + {0xc4a6, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a7, CRL_REG_LEN_08BIT, 0x40}, + {0xc4a8, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a9, CRL_REG_LEN_08BIT, 0x80}, + {0xc4aa, CRL_REG_LEN_08BIT, 0x0d}, + {0xc4ab, CRL_REG_LEN_08BIT, 0x00}, + {0xc4ac, CRL_REG_LEN_08BIT, 0x03}, + {0xc4ad, CRL_REG_LEN_08BIT, 0xf0}, + {0xc4b4, CRL_REG_LEN_08BIT, 0x01}, + {0xc4b5, CRL_REG_LEN_08BIT, 0x01}, + {0xc4b6, CRL_REG_LEN_08BIT, 0x00}, + {0xc4b7, CRL_REG_LEN_08BIT, 0x01}, + {0xc4b8, CRL_REG_LEN_08BIT, 0x00}, + {0xc4b9, CRL_REG_LEN_08BIT, 0x01}, + {0xc4ba, CRL_REG_LEN_08BIT, 0x01}, + {0xc4bb, CRL_REG_LEN_08BIT, 0x00}, + {0xc4bc, CRL_REG_LEN_08BIT, 0x01}, + {0xc4bd, CRL_REG_LEN_08BIT, 0x60}, + {0xc4be, CRL_REG_LEN_08BIT, 0x02}, + {0xc4bf, CRL_REG_LEN_08BIT, 0x33}, + {0xc4c8, CRL_REG_LEN_08BIT, 0x03}, + {0xc4c9, CRL_REG_LEN_08BIT, 0xd0}, + {0xc4ca, CRL_REG_LEN_08BIT, 0x0e}, + {0xc4cb, CRL_REG_LEN_08BIT, 0x00}, + {0xc4cc, CRL_REG_LEN_08BIT, 0x0e}, + {0xc4cd, CRL_REG_LEN_08BIT, 0x51}, + {0xc4ce, CRL_REG_LEN_08BIT, 0x0e}, + {0xc4cf, CRL_REG_LEN_08BIT, 0x51}, + {0xc4d0, CRL_REG_LEN_08BIT, 0x04}, + {0xc4d1, CRL_REG_LEN_08BIT, 0x80}, + {0xc4e0, CRL_REG_LEN_08BIT, 0x04}, + {0xc4e1, CRL_REG_LEN_08BIT, 0x02}, + {0xc4e2, CRL_REG_LEN_08BIT, 0x01}, + {0xc4e4, CRL_REG_LEN_08BIT, 0x10}, + {0xc4e5, CRL_REG_LEN_08BIT, 0x20}, + {0xc4e6, CRL_REG_LEN_08BIT, 0x30}, + {0xc4e7, CRL_REG_LEN_08BIT, 0x40}, + {0xc4e8, CRL_REG_LEN_08BIT, 0x50}, + {0xc4e9, CRL_REG_LEN_08BIT, 0x60}, + {0xc4ea, CRL_REG_LEN_08BIT, 0x70}, + {0xc4eb, CRL_REG_LEN_08BIT, 0x80}, + {0xc4ec, CRL_REG_LEN_08BIT, 0x90}, + {0xc4ed, CRL_REG_LEN_08BIT, 0xa0}, + {0xc4ee, CRL_REG_LEN_08BIT, 0xb0}, + {0xc4ef, CRL_REG_LEN_08BIT, 0xc0}, + {0xc4f0, CRL_REG_LEN_08BIT, 0xd0}, + {0xc4f1, CRL_REG_LEN_08BIT, 0xe0}, + {0xc4f2, CRL_REG_LEN_08BIT, 0xf0}, + {0xc4f3, CRL_REG_LEN_08BIT, 0x80}, + {0xc4f4, CRL_REG_LEN_08BIT, 0x00}, + {0xc4f5, CRL_REG_LEN_08BIT, 0x20}, + {0xc4f6, CRL_REG_LEN_08BIT, 0x02}, + {0xc4f7, CRL_REG_LEN_08BIT, 0x00}, + {0xc4f8, CRL_REG_LEN_08BIT, 0x04}, + {0xc4f9, CRL_REG_LEN_08BIT, 0x0b}, + {0xc4fa, CRL_REG_LEN_08BIT, 0x00}, + {0xc4fb, CRL_REG_LEN_08BIT, 0x00}, + {0xc4fc, CRL_REG_LEN_08BIT, 0x01}, + {0xc4fd, CRL_REG_LEN_08BIT, 0x00}, + {0xc4fe, CRL_REG_LEN_08BIT, 0x04}, + {0xc4ff, CRL_REG_LEN_08BIT, 0x02}, + {0xc500, CRL_REG_LEN_08BIT, 0x48}, + {0xc501, CRL_REG_LEN_08BIT, 0x74}, + {0xc502, CRL_REG_LEN_08BIT, 0x58}, + {0xc503, CRL_REG_LEN_08BIT, 0x80}, + {0xc504, CRL_REG_LEN_08BIT, 0x05}, + {0xc505, CRL_REG_LEN_08BIT, 0x80}, + {0xc506, CRL_REG_LEN_08BIT, 0x03}, + {0xc507, CRL_REG_LEN_08BIT, 0x80}, + {0xc508, CRL_REG_LEN_08BIT, 0x01}, + {0xc509, CRL_REG_LEN_08BIT, 0xc0}, + {0xc50a, CRL_REG_LEN_08BIT, 0x01}, + {0xc50b, CRL_REG_LEN_08BIT, 0xa0}, + {0xc50c, CRL_REG_LEN_08BIT, 0x01}, + {0xc50d, CRL_REG_LEN_08BIT, 0x2c}, + {0xc50e, CRL_REG_LEN_08BIT, 0x01}, + {0xc50f, CRL_REG_LEN_08BIT, 0x0a}, + {0xc510, CRL_REG_LEN_08BIT, 0x00}, + {0xc511, CRL_REG_LEN_08BIT, 0x00}, + {0xc512, CRL_REG_LEN_08BIT, 0xe5}, + {0xc513, CRL_REG_LEN_08BIT, 0x14}, + {0xc514, CRL_REG_LEN_08BIT, 0x04}, + {0xc515, CRL_REG_LEN_08BIT, 0x00}, + {0xc518, CRL_REG_LEN_08BIT, 0x03}, + {0xc519, CRL_REG_LEN_08BIT, 0x48}, + {0xc51a, CRL_REG_LEN_08BIT, 0x07}, + {0xc51b, CRL_REG_LEN_08BIT, 0x70}, + {0xc2e0, CRL_REG_LEN_08BIT, 0x00}, + {0xc2e1, CRL_REG_LEN_08BIT, 0x51}, + {0xc2e2, CRL_REG_LEN_08BIT, 0x00}, + {0xc2e3, CRL_REG_LEN_08BIT, 0xd6}, + {0xc2e4, CRL_REG_LEN_08BIT, 0x01}, + {0xc2e5, CRL_REG_LEN_08BIT, 0x5e}, + {0xc2e9, CRL_REG_LEN_08BIT, 0x01}, + {0xc2ea, CRL_REG_LEN_08BIT, 0x7a}, + {0xc2eb, CRL_REG_LEN_08BIT, 0x90}, + {0xc2ed, CRL_REG_LEN_08BIT, 0x00}, + {0xc2ee, CRL_REG_LEN_08BIT, 0x7a}, + {0xc2ef, CRL_REG_LEN_08BIT, 0x64}, + {0xc308, CRL_REG_LEN_08BIT, 0x00}, + {0xc309, CRL_REG_LEN_08BIT, 0x00}, + {0xc30a, CRL_REG_LEN_08BIT, 0x00}, + {0xc30c, CRL_REG_LEN_08BIT, 0x00}, + {0xc30d, CRL_REG_LEN_08BIT, 0x01}, + {0xc30e, CRL_REG_LEN_08BIT, 0x00}, + {0xc30f, CRL_REG_LEN_08BIT, 0x00}, + {0xc310, CRL_REG_LEN_08BIT, 0x01}, + {0xc311, CRL_REG_LEN_08BIT, 0x60}, + {0xc312, CRL_REG_LEN_08BIT, 0xff}, + {0xc313, CRL_REG_LEN_08BIT, 0x08}, + {0xc314, CRL_REG_LEN_08BIT, 0x01}, + {0xc315, CRL_REG_LEN_08BIT, 0x7f}, + {0xc316, CRL_REG_LEN_08BIT, 0xff}, + {0xc317, CRL_REG_LEN_08BIT, 0x0b}, + {0xc318, CRL_REG_LEN_08BIT, 0x00}, + {0xc319, CRL_REG_LEN_08BIT, 0x0c}, + {0xc31a, CRL_REG_LEN_08BIT, 0x00}, + {0xc31b, CRL_REG_LEN_08BIT, 0xe0}, + {0xc31c, CRL_REG_LEN_08BIT, 0x00}, + {0xc31d, CRL_REG_LEN_08BIT, 0x14}, + {0xc31e, CRL_REG_LEN_08BIT, 0x00}, + {0xc31f, CRL_REG_LEN_08BIT, 0xc5}, + {0xc320, CRL_REG_LEN_08BIT, 0xff}, + {0xc321, CRL_REG_LEN_08BIT, 0x4b}, + {0xc322, CRL_REG_LEN_08BIT, 0xff}, + {0xc323, CRL_REG_LEN_08BIT, 0xf0}, + {0xc324, CRL_REG_LEN_08BIT, 0xff}, + {0xc325, CRL_REG_LEN_08BIT, 0xe8}, + {0xc326, CRL_REG_LEN_08BIT, 0x00}, + {0xc327, CRL_REG_LEN_08BIT, 0x46}, + {0xc328, CRL_REG_LEN_08BIT, 0xff}, + {0xc329, CRL_REG_LEN_08BIT, 0xd2}, + {0xc32a, CRL_REG_LEN_08BIT, 0xff}, + {0xc32b, CRL_REG_LEN_08BIT, 0xe4}, + {0xc32c, CRL_REG_LEN_08BIT, 0xff}, + {0xc32d, CRL_REG_LEN_08BIT, 0xbb}, + {0xc32e, CRL_REG_LEN_08BIT, 0x00}, + {0xc32f, CRL_REG_LEN_08BIT, 0x61}, + {0xc330, CRL_REG_LEN_08BIT, 0xff}, + {0xc331, CRL_REG_LEN_08BIT, 0xf9}, + {0xc332, CRL_REG_LEN_08BIT, 0x00}, + {0xc333, CRL_REG_LEN_08BIT, 0xd9}, + {0xc334, CRL_REG_LEN_08BIT, 0x00}, + {0xc335, CRL_REG_LEN_08BIT, 0x2e}, + {0xc336, CRL_REG_LEN_08BIT, 0x00}, + {0xc337, CRL_REG_LEN_08BIT, 0xb1}, + {0xc338, CRL_REG_LEN_08BIT, 0xff}, + {0xc339, CRL_REG_LEN_08BIT, 0x64}, + {0xc33a, CRL_REG_LEN_08BIT, 0xff}, + {0xc33b, CRL_REG_LEN_08BIT, 0xeb}, + {0xc33c, CRL_REG_LEN_08BIT, 0xff}, + {0xc33d, CRL_REG_LEN_08BIT, 0xe8}, + {0xc33e, CRL_REG_LEN_08BIT, 0x00}, + {0xc33f, CRL_REG_LEN_08BIT, 0x48}, + {0xc340, CRL_REG_LEN_08BIT, 0xff}, + {0xc341, CRL_REG_LEN_08BIT, 0xd0}, + {0xc342, CRL_REG_LEN_08BIT, 0xff}, + {0xc343, CRL_REG_LEN_08BIT, 0xed}, + {0xc344, CRL_REG_LEN_08BIT, 0xff}, + {0xc345, CRL_REG_LEN_08BIT, 0xad}, + {0xc346, CRL_REG_LEN_08BIT, 0x00}, + {0xc347, CRL_REG_LEN_08BIT, 0x66}, + {0xc348, CRL_REG_LEN_08BIT, 0x01}, + {0xc349, CRL_REG_LEN_08BIT, 0x00}, + {0x6700, CRL_REG_LEN_08BIT, 0x04}, + {0x6701, CRL_REG_LEN_08BIT, 0x7b}, + {0x6702, CRL_REG_LEN_08BIT, 0xfd}, + {0x6703, CRL_REG_LEN_08BIT, 0xf9}, + {0x6704, CRL_REG_LEN_08BIT, 0x3d}, + {0x6705, CRL_REG_LEN_08BIT, 0x71}, + {0x6706, CRL_REG_LEN_08BIT, 0x78}, + {0x6708, CRL_REG_LEN_08BIT, 0x05}, + {0x6f06, CRL_REG_LEN_08BIT, 0x6f}, + {0x6f07, CRL_REG_LEN_08BIT, 0x00}, + {0x6f0a, CRL_REG_LEN_08BIT, 0x6f}, + {0x6f0b, CRL_REG_LEN_08BIT, 0x00}, + {0x6f00, CRL_REG_LEN_08BIT, 0x03}, + {0xc34c, CRL_REG_LEN_08BIT, 0x01}, + {0xc34d, CRL_REG_LEN_08BIT, 0x00}, + {0xc34e, CRL_REG_LEN_08BIT, 0x46}, + {0xc34f, CRL_REG_LEN_08BIT, 0x55}, + {0xc350, CRL_REG_LEN_08BIT, 0x00}, + {0xc351, CRL_REG_LEN_08BIT, 0x40}, + {0xc352, CRL_REG_LEN_08BIT, 0x00}, + {0xc353, CRL_REG_LEN_08BIT, 0xff}, + {0xc354, CRL_REG_LEN_08BIT, 0x04}, + {0xc355, CRL_REG_LEN_08BIT, 0x08}, + {0xc356, CRL_REG_LEN_08BIT, 0x01}, + {0xc357, CRL_REG_LEN_08BIT, 0xef}, + {0xc358, CRL_REG_LEN_08BIT, 0x30}, + {0xc359, CRL_REG_LEN_08BIT, 0x01}, + {0xc35a, CRL_REG_LEN_08BIT, 0x64}, + {0xc35b, CRL_REG_LEN_08BIT, 0x46}, + {0xc35c, CRL_REG_LEN_08BIT, 0x00}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x301b, CRL_REG_LEN_08BIT, 0xf0}, + {0x301c, CRL_REG_LEN_08BIT, 0xf0}, + {0x301a, CRL_REG_LEN_08BIT, 0xf0}, + {0xceb0, CRL_REG_LEN_08BIT, 0x00}, + {0xceb1, CRL_REG_LEN_08BIT, 0x00}, + {0xceb2, CRL_REG_LEN_08BIT, 0x00}, + {0xceb3, CRL_REG_LEN_08BIT, 0x00}, + {0xceb4, CRL_REG_LEN_08BIT, 0x00}, + {0xceb5, CRL_REG_LEN_08BIT, 0x00}, + {0xceb6, CRL_REG_LEN_08BIT, 0x00}, + {0xceb7, CRL_REG_LEN_08BIT, 0x00}, + {0xc4bc, CRL_REG_LEN_08BIT, 0x01}, + {0xc4bd, CRL_REG_LEN_08BIT, 0x60}, + {0xc4a0, CRL_REG_LEN_08BIT, 0x03}, + {0xc4a2, CRL_REG_LEN_08BIT, 0x04}, + {0x3011, CRL_REG_LEN_08BIT, 0x42}, + {0x5608, CRL_REG_LEN_08BIT, 0x0d}, +}; + +static struct crl_register_write_rep ov10635_640_480_YUV_HDR[] = { + {0x301b, CRL_REG_LEN_08BIT, 0xff}, + {0x301c, CRL_REG_LEN_08BIT, 0xff}, + {0x301a, CRL_REG_LEN_08BIT, 0xff}, + {0x3011, CRL_REG_LEN_08BIT, 0x42}, + {0x6900, CRL_REG_LEN_08BIT, 0x0c}, + {0x6901, CRL_REG_LEN_08BIT, 0x11}, + {0x3503, CRL_REG_LEN_08BIT, 0x10}, + {0x3025, CRL_REG_LEN_08BIT, 0x03}, + {0x3003, CRL_REG_LEN_08BIT, 0x14}, + {0x3004, CRL_REG_LEN_08BIT, 0x23}, + {0x3005, CRL_REG_LEN_08BIT, 0x20}, + {0x3006, CRL_REG_LEN_08BIT, 0x91}, + {0x3600, CRL_REG_LEN_08BIT, 0x74}, + {0x3601, CRL_REG_LEN_08BIT, 0x2b}, + {0x3612, CRL_REG_LEN_08BIT, 0x00}, + {0x3611, CRL_REG_LEN_08BIT, 0x67}, + {0x3633, CRL_REG_LEN_08BIT, 0xca}, + {0x3602, CRL_REG_LEN_08BIT, 0x2f}, + {0x3603, CRL_REG_LEN_08BIT, 0x00}, + {0x3630, CRL_REG_LEN_08BIT, 0x28}, + {0x3631, CRL_REG_LEN_08BIT, 0x16}, + {0x3714, CRL_REG_LEN_08BIT, 0x10}, + {0x371d, CRL_REG_LEN_08BIT, 0x01}, + {0x3007, CRL_REG_LEN_08BIT, 0x01}, + {0x3024, CRL_REG_LEN_08BIT, 0x01}, + {0x3020, CRL_REG_LEN_08BIT, 0x0b}, + {0x3702, CRL_REG_LEN_08BIT, 0x0a}, + {0x3703, CRL_REG_LEN_08BIT, 0x17}, + {0x3704, CRL_REG_LEN_08BIT, 0x0f}, + {0x3709, CRL_REG_LEN_08BIT, 0xa8}, + {0x3709, CRL_REG_LEN_08BIT, 0xa8}, + {0x370c, CRL_REG_LEN_08BIT, 0xc7}, + {0x370d, CRL_REG_LEN_08BIT, 0x80}, + {0x3712, CRL_REG_LEN_08BIT, 0x00}, + {0x3713, CRL_REG_LEN_08BIT, 0x20}, + {0x3715, CRL_REG_LEN_08BIT, 0x04}, + {0x381d, CRL_REG_LEN_08BIT, 0x40}, + {0x381c, CRL_REG_LEN_08BIT, 0x00}, + {0x3822, CRL_REG_LEN_08BIT, 0x50}, + {0x3824, CRL_REG_LEN_08BIT, 0x50}, + {0x3815, CRL_REG_LEN_08BIT, 0x8c}, + {0x3804, CRL_REG_LEN_08BIT, 0x05}, + {0x3805, CRL_REG_LEN_08BIT, 0x1f}, + {0x3800, CRL_REG_LEN_08BIT, 0x00}, + {0x3801, CRL_REG_LEN_08BIT, 0x00}, + {0x3806, CRL_REG_LEN_08BIT, 0x02}, + {0x3807, CRL_REG_LEN_08BIT, 0x89}, + {0x3802, CRL_REG_LEN_08BIT, 0x00}, + {0x3803, CRL_REG_LEN_08BIT, 0xa4}, + {0x3808, CRL_REG_LEN_08BIT, 0x02}, + {0x3809, CRL_REG_LEN_08BIT, 0x80}, + {0x380a, CRL_REG_LEN_08BIT, 0x01}, + {0x380b, CRL_REG_LEN_08BIT, 0xe0}, + {0x380c, CRL_REG_LEN_08BIT, 0x03}, + {0x380d, CRL_REG_LEN_08BIT, 0xc0}, + {0x6e42, CRL_REG_LEN_08BIT, 0x02}, + {0x6e43, CRL_REG_LEN_08BIT, 0x08}, + {0x380e, CRL_REG_LEN_08BIT, 0x02}, + {0x380f, CRL_REG_LEN_08BIT, 0x08}, + {0x3813, CRL_REG_LEN_08BIT, 0x02}, + {0x3811, CRL_REG_LEN_08BIT, 0x08}, + {0x381f, CRL_REG_LEN_08BIT, 0x0c}, + {0x3828, CRL_REG_LEN_08BIT, 0x03}, + {0x3829, CRL_REG_LEN_08BIT, 0x10}, + {0x382a, CRL_REG_LEN_08BIT, 0x10}, + {0x382b, CRL_REG_LEN_08BIT, 0x10}, + {0x3621, CRL_REG_LEN_08BIT, 0x74}, + {0x5005, CRL_REG_LEN_08BIT, 0x08}, + {0x56d5, CRL_REG_LEN_08BIT, 0x00}, + {0x56d6, CRL_REG_LEN_08BIT, 0x80}, + {0x56d7, CRL_REG_LEN_08BIT, 0x00}, + {0x56d8, CRL_REG_LEN_08BIT, 0x00}, + {0x56d9, CRL_REG_LEN_08BIT, 0x00}, + {0x56da, CRL_REG_LEN_08BIT, 0x80}, + {0x56db, CRL_REG_LEN_08BIT, 0x00}, + {0x56dc, CRL_REG_LEN_08BIT, 0x00}, + {0x56e8, CRL_REG_LEN_08BIT, 0x00}, + {0x56e9, CRL_REG_LEN_08BIT, 0x7f}, + {0x56ea, CRL_REG_LEN_08BIT, 0x00}, + {0x56eb, CRL_REG_LEN_08BIT, 0x7f}, + {0x5100, CRL_REG_LEN_08BIT, 0x00}, + {0x5101, CRL_REG_LEN_08BIT, 0x80}, + {0x5102, CRL_REG_LEN_08BIT, 0x00}, + {0x5103, CRL_REG_LEN_08BIT, 0x80}, + {0x5104, CRL_REG_LEN_08BIT, 0x00}, + {0x5105, CRL_REG_LEN_08BIT, 0x80}, + {0x5106, CRL_REG_LEN_08BIT, 0x00}, + {0x5107, CRL_REG_LEN_08BIT, 0x80}, + {0x5108, CRL_REG_LEN_08BIT, 0x00}, + {0x5109, CRL_REG_LEN_08BIT, 0x00}, + {0x510a, CRL_REG_LEN_08BIT, 0x00}, + {0x510b, CRL_REG_LEN_08BIT, 0x00}, + {0x510c, CRL_REG_LEN_08BIT, 0x00}, + {0x510d, CRL_REG_LEN_08BIT, 0x00}, + {0x510e, CRL_REG_LEN_08BIT, 0x00}, + {0x510f, CRL_REG_LEN_08BIT, 0x00}, + {0x5110, CRL_REG_LEN_08BIT, 0x00}, + {0x5111, CRL_REG_LEN_08BIT, 0x80}, + {0x5112, CRL_REG_LEN_08BIT, 0x00}, + {0x5113, CRL_REG_LEN_08BIT, 0x80}, + {0x5114, CRL_REG_LEN_08BIT, 0x00}, + {0x5115, CRL_REG_LEN_08BIT, 0x80}, + {0x5116, CRL_REG_LEN_08BIT, 0x00}, + {0x5117, CRL_REG_LEN_08BIT, 0x80}, + {0x5118, CRL_REG_LEN_08BIT, 0x00}, + {0x5119, CRL_REG_LEN_08BIT, 0x00}, + {0x511a, CRL_REG_LEN_08BIT, 0x00}, + {0x511b, CRL_REG_LEN_08BIT, 0x00}, + {0x511c, CRL_REG_LEN_08BIT, 0x00}, + {0x511d, CRL_REG_LEN_08BIT, 0x00}, + {0x511e, CRL_REG_LEN_08BIT, 0x00}, + {0x511f, CRL_REG_LEN_08BIT, 0x00}, + {0x56d0, CRL_REG_LEN_08BIT, 0x00}, + {0x5006, CRL_REG_LEN_08BIT, 0x24}, + {0x5608, CRL_REG_LEN_08BIT, 0x19}, + {0x52d7, CRL_REG_LEN_08BIT, 0x06}, + {0x528d, CRL_REG_LEN_08BIT, 0x08}, + {0x5293, CRL_REG_LEN_08BIT, 0x12}, + {0x52d3, CRL_REG_LEN_08BIT, 0x12}, + {0x5288, CRL_REG_LEN_08BIT, 0x06}, + {0x5289, CRL_REG_LEN_08BIT, 0x20}, + {0x52c8, CRL_REG_LEN_08BIT, 0x06}, + {0x52c9, CRL_REG_LEN_08BIT, 0x20}, + {0x52cd, CRL_REG_LEN_08BIT, 0x04}, + {0x5381, CRL_REG_LEN_08BIT, 0x00}, + {0x5382, CRL_REG_LEN_08BIT, 0xff}, + {0x5589, CRL_REG_LEN_08BIT, 0x76}, + {0x558a, CRL_REG_LEN_08BIT, 0x47}, + {0x558b, CRL_REG_LEN_08BIT, 0xef}, + {0x558c, CRL_REG_LEN_08BIT, 0xc9}, + {0x558d, CRL_REG_LEN_08BIT, 0x49}, + {0x558e, CRL_REG_LEN_08BIT, 0x30}, + {0x558f, CRL_REG_LEN_08BIT, 0x67}, + {0x5590, CRL_REG_LEN_08BIT, 0x3f}, + {0x5591, CRL_REG_LEN_08BIT, 0xf0}, + {0x5592, CRL_REG_LEN_08BIT, 0x10}, + {0x55a2, CRL_REG_LEN_08BIT, 0x6d}, + {0x55a3, CRL_REG_LEN_08BIT, 0x55}, + {0x55a4, CRL_REG_LEN_08BIT, 0xc3}, + {0x55a5, CRL_REG_LEN_08BIT, 0xb5}, + {0x55a6, CRL_REG_LEN_08BIT, 0x43}, + {0x55a7, CRL_REG_LEN_08BIT, 0x38}, + {0x55a8, CRL_REG_LEN_08BIT, 0x5f}, + {0x55a9, CRL_REG_LEN_08BIT, 0x4b}, + {0x55aa, CRL_REG_LEN_08BIT, 0xf0}, + {0x55ab, CRL_REG_LEN_08BIT, 0x10}, + {0x5581, CRL_REG_LEN_08BIT, 0x52}, + {0x5300, CRL_REG_LEN_08BIT, 0x01}, + {0x5301, CRL_REG_LEN_08BIT, 0x00}, + {0x5302, CRL_REG_LEN_08BIT, 0x00}, + {0x5303, CRL_REG_LEN_08BIT, 0x0e}, + {0x5304, CRL_REG_LEN_08BIT, 0x00}, + {0x5305, CRL_REG_LEN_08BIT, 0x0e}, + {0x5306, CRL_REG_LEN_08BIT, 0x00}, + {0x5307, CRL_REG_LEN_08BIT, 0x36}, + {0x5308, CRL_REG_LEN_08BIT, 0x00}, + {0x5309, CRL_REG_LEN_08BIT, 0xd9}, + {0x530a, CRL_REG_LEN_08BIT, 0x00}, + {0x530b, CRL_REG_LEN_08BIT, 0x0f}, + {0x530c, CRL_REG_LEN_08BIT, 0x00}, + {0x530d, CRL_REG_LEN_08BIT, 0x2c}, + {0x530e, CRL_REG_LEN_08BIT, 0x00}, + {0x530f, CRL_REG_LEN_08BIT, 0x59}, + {0x5310, CRL_REG_LEN_08BIT, 0x00}, + {0x5311, CRL_REG_LEN_08BIT, 0x7b}, + {0x5312, CRL_REG_LEN_08BIT, 0x00}, + {0x5313, CRL_REG_LEN_08BIT, 0x22}, + {0x5314, CRL_REG_LEN_08BIT, 0x00}, + {0x5315, CRL_REG_LEN_08BIT, 0xd5}, + {0x5316, CRL_REG_LEN_08BIT, 0x00}, + {0x5317, CRL_REG_LEN_08BIT, 0x13}, + {0x5318, CRL_REG_LEN_08BIT, 0x00}, + {0x5319, CRL_REG_LEN_08BIT, 0x18}, + {0x531a, CRL_REG_LEN_08BIT, 0x00}, + {0x531b, CRL_REG_LEN_08BIT, 0x26}, + {0x531c, CRL_REG_LEN_08BIT, 0x00}, + {0x531d, CRL_REG_LEN_08BIT, 0xdc}, + {0x531e, CRL_REG_LEN_08BIT, 0x00}, + {0x531f, CRL_REG_LEN_08BIT, 0x02}, + {0x5320, CRL_REG_LEN_08BIT, 0x00}, + {0x5321, CRL_REG_LEN_08BIT, 0x24}, + {0x5322, CRL_REG_LEN_08BIT, 0x00}, + {0x5323, CRL_REG_LEN_08BIT, 0x56}, + {0x5324, CRL_REG_LEN_08BIT, 0x00}, + {0x5325, CRL_REG_LEN_08BIT, 0x85}, + {0x5326, CRL_REG_LEN_08BIT, 0x00}, + {0x5327, CRL_REG_LEN_08BIT, 0x20}, + {0x5609, CRL_REG_LEN_08BIT, 0x01}, + {0x560a, CRL_REG_LEN_08BIT, 0x40}, + {0x560b, CRL_REG_LEN_08BIT, 0x01}, + {0x560c, CRL_REG_LEN_08BIT, 0x40}, + {0x560d, CRL_REG_LEN_08BIT, 0x00}, + {0x560e, CRL_REG_LEN_08BIT, 0xfa}, + {0x560f, CRL_REG_LEN_08BIT, 0x00}, + {0x5610, CRL_REG_LEN_08BIT, 0xfa}, + {0x5611, CRL_REG_LEN_08BIT, 0x02}, + {0x5612, CRL_REG_LEN_08BIT, 0x80}, + {0x5613, CRL_REG_LEN_08BIT, 0x02}, + {0x5614, CRL_REG_LEN_08BIT, 0x80}, + {0x5615, CRL_REG_LEN_08BIT, 0x01}, + {0x5616, CRL_REG_LEN_08BIT, 0x2c}, + {0x5617, CRL_REG_LEN_08BIT, 0x01}, + {0x5618, CRL_REG_LEN_08BIT, 0x2c}, + {0x563b, CRL_REG_LEN_08BIT, 0x01}, + {0x563c, CRL_REG_LEN_08BIT, 0x01}, + {0x563d, CRL_REG_LEN_08BIT, 0x01}, + {0x563e, CRL_REG_LEN_08BIT, 0x01}, + {0x563f, CRL_REG_LEN_08BIT, 0x03}, + {0x5640, CRL_REG_LEN_08BIT, 0x03}, + {0x5641, CRL_REG_LEN_08BIT, 0x03}, + {0x5642, CRL_REG_LEN_08BIT, 0x05}, + {0x5643, CRL_REG_LEN_08BIT, 0x09}, + {0x5644, CRL_REG_LEN_08BIT, 0x05}, + {0x5645, CRL_REG_LEN_08BIT, 0x05}, + {0x5646, CRL_REG_LEN_08BIT, 0x05}, + {0x5647, CRL_REG_LEN_08BIT, 0x05}, + {0x5651, CRL_REG_LEN_08BIT, 0x00}, + {0x5652, CRL_REG_LEN_08BIT, 0x80}, + {0x521a, CRL_REG_LEN_08BIT, 0x01}, + {0x521b, CRL_REG_LEN_08BIT, 0x03}, + {0x521c, CRL_REG_LEN_08BIT, 0x06}, + {0x521d, CRL_REG_LEN_08BIT, 0x0a}, + {0x521e, CRL_REG_LEN_08BIT, 0x0e}, + {0x521f, CRL_REG_LEN_08BIT, 0x12}, + {0x5220, CRL_REG_LEN_08BIT, 0x16}, + {0x5223, CRL_REG_LEN_08BIT, 0x02}, + {0x5225, CRL_REG_LEN_08BIT, 0x04}, + {0x5227, CRL_REG_LEN_08BIT, 0x08}, + {0x5229, CRL_REG_LEN_08BIT, 0x0c}, + {0x522b, CRL_REG_LEN_08BIT, 0x12}, + {0x522d, CRL_REG_LEN_08BIT, 0x18}, + {0x522f, CRL_REG_LEN_08BIT, 0x1e}, + {0x5241, CRL_REG_LEN_08BIT, 0x04}, + {0x5242, CRL_REG_LEN_08BIT, 0x01}, + {0x5243, CRL_REG_LEN_08BIT, 0x03}, + {0x5244, CRL_REG_LEN_08BIT, 0x06}, + {0x5245, CRL_REG_LEN_08BIT, 0x0a}, + {0x5246, CRL_REG_LEN_08BIT, 0x0e}, + {0x5247, CRL_REG_LEN_08BIT, 0x12}, + {0x5248, CRL_REG_LEN_08BIT, 0x16}, + {0x524a, CRL_REG_LEN_08BIT, 0x03}, + {0x524c, CRL_REG_LEN_08BIT, 0x04}, + {0x524e, CRL_REG_LEN_08BIT, 0x08}, + {0x5250, CRL_REG_LEN_08BIT, 0x0c}, + {0x5252, CRL_REG_LEN_08BIT, 0x12}, + {0x5254, CRL_REG_LEN_08BIT, 0x18}, + {0x5256, CRL_REG_LEN_08BIT, 0x1e}, + {0x4606, CRL_REG_LEN_08BIT, 0x07}, + {0x4607, CRL_REG_LEN_08BIT, 0x71}, + {0x460a, CRL_REG_LEN_08BIT, 0x02}, + {0x460b, CRL_REG_LEN_08BIT, 0x70}, + {0x460c, CRL_REG_LEN_08BIT, 0x00}, + {0x4620, CRL_REG_LEN_08BIT, 0x0e}, + {0x4700, CRL_REG_LEN_08BIT, 0x04}, + {0x4701, CRL_REG_LEN_08BIT, 0x00}, + {0x4702, CRL_REG_LEN_08BIT, 0x01}, + {0x4004, CRL_REG_LEN_08BIT, 0x04}, + {0x4005, CRL_REG_LEN_08BIT, 0x18}, + {0x4001, CRL_REG_LEN_08BIT, 0x06}, + {0x4050, CRL_REG_LEN_08BIT, 0x22}, + {0x4051, CRL_REG_LEN_08BIT, 0x24}, + {0x4052, CRL_REG_LEN_08BIT, 0x02}, + {0x4057, CRL_REG_LEN_08BIT, 0x9c}, + {0x405a, CRL_REG_LEN_08BIT, 0x00}, + /*FSIN enable*/ + {0x3832, CRL_REG_LEN_08BIT, 0x00}, + {0x3833, CRL_REG_LEN_08BIT, 0x02}, + {0x3834, CRL_REG_LEN_08BIT, 0x02}, + {0x3835, CRL_REG_LEN_08BIT, 0x08}, + {0x302e, CRL_REG_LEN_08BIT, 0x00}, + /*FSIN end*/ + {0x4202, CRL_REG_LEN_08BIT, 0x02}, + {0x3023, CRL_REG_LEN_08BIT, 0x10}, + {0x3003, CRL_REG_LEN_08BIT, 0x20}, + {0x3004, CRL_REG_LEN_08BIT, 0x21}, + {0x3005, CRL_REG_LEN_08BIT, 0x14}, + {0x3006, CRL_REG_LEN_08BIT, 0x11}, + {0x3024, CRL_REG_LEN_08BIT, 0x01}, + {0x0100, CRL_REG_LEN_08BIT, 0x01}, + {0x0100, CRL_REG_LEN_08BIT, 0x01}, + {0x6f10, CRL_REG_LEN_08BIT, 0x07}, + {0x6f11, CRL_REG_LEN_08BIT, 0x82}, + {0x6f12, CRL_REG_LEN_08BIT, 0x04}, + {0x6f13, CRL_REG_LEN_08BIT, 0x00}, + {0x6f14, CRL_REG_LEN_08BIT, 0x1f}, + {0x6f15, CRL_REG_LEN_08BIT, 0xdd}, + {0x6f16, CRL_REG_LEN_08BIT, 0x04}, + {0x6f17, CRL_REG_LEN_08BIT, 0x04}, + {0x6f18, CRL_REG_LEN_08BIT, 0x36}, + {0x6f19, CRL_REG_LEN_08BIT, 0x66}, + {0x6f1a, CRL_REG_LEN_08BIT, 0x04}, + {0x6f1b, CRL_REG_LEN_08BIT, 0x08}, + {0x6f1c, CRL_REG_LEN_08BIT, 0x0c}, + {0x6f1d, CRL_REG_LEN_08BIT, 0xe7}, + {0x6f1e, CRL_REG_LEN_08BIT, 0x04}, + {0x6f1f, CRL_REG_LEN_08BIT, 0x0c}, + {0xd000, CRL_REG_LEN_08BIT, 0x19}, + {0xd001, CRL_REG_LEN_08BIT, 0xa0}, + {0xd002, CRL_REG_LEN_08BIT, 0x00}, + {0xd003, CRL_REG_LEN_08BIT, 0x01}, + {0xd004, CRL_REG_LEN_08BIT, 0xa9}, + {0xd005, CRL_REG_LEN_08BIT, 0xad}, + {0xd006, CRL_REG_LEN_08BIT, 0x10}, + {0xd007, CRL_REG_LEN_08BIT, 0x40}, + {0xd008, CRL_REG_LEN_08BIT, 0x44}, + {0xd009, CRL_REG_LEN_08BIT, 0x00}, + {0xd00a, CRL_REG_LEN_08BIT, 0x68}, + {0xd00b, CRL_REG_LEN_08BIT, 0x00}, + {0xd00c, CRL_REG_LEN_08BIT, 0x15}, + {0xd00d, CRL_REG_LEN_08BIT, 0x00}, + {0xd00e, CRL_REG_LEN_08BIT, 0x00}, + {0xd00f, CRL_REG_LEN_08BIT, 0x00}, + {0xd010, CRL_REG_LEN_08BIT, 0x19}, + {0xd011, CRL_REG_LEN_08BIT, 0xa0}, + {0xd012, CRL_REG_LEN_08BIT, 0x00}, + {0xd013, CRL_REG_LEN_08BIT, 0x01}, + {0xd014, CRL_REG_LEN_08BIT, 0xa9}, + {0xd015, CRL_REG_LEN_08BIT, 0xad}, + {0xd016, CRL_REG_LEN_08BIT, 0x13}, + {0xd017, CRL_REG_LEN_08BIT, 0xd0}, + {0xd018, CRL_REG_LEN_08BIT, 0x44}, + {0xd019, CRL_REG_LEN_08BIT, 0x00}, + {0xd01a, CRL_REG_LEN_08BIT, 0x68}, + {0xd01b, CRL_REG_LEN_08BIT, 0x00}, + {0xd01c, CRL_REG_LEN_08BIT, 0x15}, + {0xd01d, CRL_REG_LEN_08BIT, 0x00}, + {0xd01e, CRL_REG_LEN_08BIT, 0x00}, + {0xd01f, CRL_REG_LEN_08BIT, 0x00}, + {0xd020, CRL_REG_LEN_08BIT, 0x19}, + {0xd021, CRL_REG_LEN_08BIT, 0xa0}, + {0xd022, CRL_REG_LEN_08BIT, 0x00}, + {0xd023, CRL_REG_LEN_08BIT, 0x01}, + {0xd024, CRL_REG_LEN_08BIT, 0xa9}, + {0xd025, CRL_REG_LEN_08BIT, 0xad}, + {0xd026, CRL_REG_LEN_08BIT, 0x14}, + {0xd027, CRL_REG_LEN_08BIT, 0xb8}, + {0xd028, CRL_REG_LEN_08BIT, 0x44}, + {0xd029, CRL_REG_LEN_08BIT, 0x00}, + {0xd02a, CRL_REG_LEN_08BIT, 0x68}, + {0xd02b, CRL_REG_LEN_08BIT, 0x00}, + {0xd02c, CRL_REG_LEN_08BIT, 0x15}, + {0xd02d, CRL_REG_LEN_08BIT, 0x00}, + {0xd02e, CRL_REG_LEN_08BIT, 0x00}, + {0xd02f, CRL_REG_LEN_08BIT, 0x00}, + {0xd030, CRL_REG_LEN_08BIT, 0x19}, + {0xd031, CRL_REG_LEN_08BIT, 0xa0}, + {0xd032, CRL_REG_LEN_08BIT, 0x00}, + {0xd033, CRL_REG_LEN_08BIT, 0x01}, + {0xd034, CRL_REG_LEN_08BIT, 0xa9}, + {0xd035, CRL_REG_LEN_08BIT, 0xad}, + {0xd036, CRL_REG_LEN_08BIT, 0x14}, + {0xd037, CRL_REG_LEN_08BIT, 0xdc}, + {0xd038, CRL_REG_LEN_08BIT, 0x44}, + {0xd039, CRL_REG_LEN_08BIT, 0x00}, + {0xd03a, CRL_REG_LEN_08BIT, 0x68}, + {0xd03b, CRL_REG_LEN_08BIT, 0x00}, + {0xd03c, CRL_REG_LEN_08BIT, 0x15}, + {0xd03d, CRL_REG_LEN_08BIT, 0x00}, + {0xd03e, CRL_REG_LEN_08BIT, 0x00}, + {0xd03f, CRL_REG_LEN_08BIT, 0x00}, + {0xd040, CRL_REG_LEN_08BIT, 0x9c}, + {0xd041, CRL_REG_LEN_08BIT, 0x21}, + {0xd042, CRL_REG_LEN_08BIT, 0xff}, + {0xd043, CRL_REG_LEN_08BIT, 0xe4}, + {0xd044, CRL_REG_LEN_08BIT, 0xd4}, + {0xd045, CRL_REG_LEN_08BIT, 0x01}, + {0xd046, CRL_REG_LEN_08BIT, 0x48}, + {0xd047, CRL_REG_LEN_08BIT, 0x00}, + {0xd048, CRL_REG_LEN_08BIT, 0xd4}, + {0xd049, CRL_REG_LEN_08BIT, 0x01}, + {0xd04a, CRL_REG_LEN_08BIT, 0x50}, + {0xd04b, CRL_REG_LEN_08BIT, 0x04}, + {0xd04c, CRL_REG_LEN_08BIT, 0xd4}, + {0xd04d, CRL_REG_LEN_08BIT, 0x01}, + {0xd04e, CRL_REG_LEN_08BIT, 0x60}, + {0xd04f, CRL_REG_LEN_08BIT, 0x08}, + {0xd050, CRL_REG_LEN_08BIT, 0xd4}, + {0xd051, CRL_REG_LEN_08BIT, 0x01}, + {0xd052, CRL_REG_LEN_08BIT, 0x70}, + {0xd053, CRL_REG_LEN_08BIT, 0x0c}, + {0xd054, CRL_REG_LEN_08BIT, 0xd4}, + {0xd055, CRL_REG_LEN_08BIT, 0x01}, + {0xd056, CRL_REG_LEN_08BIT, 0x80}, + {0xd057, CRL_REG_LEN_08BIT, 0x10}, + {0xd058, CRL_REG_LEN_08BIT, 0x19}, + {0xd059, CRL_REG_LEN_08BIT, 0xc0}, + {0xd05a, CRL_REG_LEN_08BIT, 0x00}, + {0xd05b, CRL_REG_LEN_08BIT, 0x01}, + {0xd05c, CRL_REG_LEN_08BIT, 0xa9}, + {0xd05d, CRL_REG_LEN_08BIT, 0xce}, + {0xd05e, CRL_REG_LEN_08BIT, 0x02}, + {0xd05f, CRL_REG_LEN_08BIT, 0xa4}, + {0xd060, CRL_REG_LEN_08BIT, 0x9c}, + {0xd061, CRL_REG_LEN_08BIT, 0xa0}, + {0xd062, CRL_REG_LEN_08BIT, 0x00}, + {0xd063, CRL_REG_LEN_08BIT, 0x00}, + {0xd064, CRL_REG_LEN_08BIT, 0x84}, + {0xd065, CRL_REG_LEN_08BIT, 0x6e}, + {0xd066, CRL_REG_LEN_08BIT, 0x00}, + {0xd067, CRL_REG_LEN_08BIT, 0x00}, + {0xd068, CRL_REG_LEN_08BIT, 0xd8}, + {0xd069, CRL_REG_LEN_08BIT, 0x03}, + {0xd06a, CRL_REG_LEN_08BIT, 0x28}, + {0xd06b, CRL_REG_LEN_08BIT, 0x76}, + {0xd06c, CRL_REG_LEN_08BIT, 0x1a}, + {0xd06d, CRL_REG_LEN_08BIT, 0x00}, + {0xd06e, CRL_REG_LEN_08BIT, 0x00}, + {0xd06f, CRL_REG_LEN_08BIT, 0x01}, + {0xd070, CRL_REG_LEN_08BIT, 0xaa}, + {0xd071, CRL_REG_LEN_08BIT, 0x10}, + {0xd072, CRL_REG_LEN_08BIT, 0x03}, + {0xd073, CRL_REG_LEN_08BIT, 0xf0}, + {0xd074, CRL_REG_LEN_08BIT, 0x18}, + {0xd075, CRL_REG_LEN_08BIT, 0x60}, + {0xd076, CRL_REG_LEN_08BIT, 0x00}, + {0xd077, CRL_REG_LEN_08BIT, 0x01}, + {0xd078, CRL_REG_LEN_08BIT, 0xa8}, + {0xd079, CRL_REG_LEN_08BIT, 0x63}, + {0xd07a, CRL_REG_LEN_08BIT, 0x07}, + {0xd07b, CRL_REG_LEN_08BIT, 0x80}, + {0xd07c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd07d, CRL_REG_LEN_08BIT, 0xa0}, + {0xd07e, CRL_REG_LEN_08BIT, 0x00}, + {0xd07f, CRL_REG_LEN_08BIT, 0x04}, + {0xd080, CRL_REG_LEN_08BIT, 0x18}, + {0xd081, CRL_REG_LEN_08BIT, 0xc0}, + {0xd082, CRL_REG_LEN_08BIT, 0x00}, + {0xd083, CRL_REG_LEN_08BIT, 0x00}, + {0xd084, CRL_REG_LEN_08BIT, 0xa8}, + {0xd085, CRL_REG_LEN_08BIT, 0xc6}, + {0xd086, CRL_REG_LEN_08BIT, 0x00}, + {0xd087, CRL_REG_LEN_08BIT, 0x00}, + {0xd088, CRL_REG_LEN_08BIT, 0x8c}, + {0xd089, CRL_REG_LEN_08BIT, 0x63}, + {0xd08a, CRL_REG_LEN_08BIT, 0x00}, + {0xd08b, CRL_REG_LEN_08BIT, 0x00}, + {0xd08c, CRL_REG_LEN_08BIT, 0xd4}, + {0xd08d, CRL_REG_LEN_08BIT, 0x01}, + {0xd08e, CRL_REG_LEN_08BIT, 0x28}, + {0xd08f, CRL_REG_LEN_08BIT, 0x14}, + {0xd090, CRL_REG_LEN_08BIT, 0xd4}, + {0xd091, CRL_REG_LEN_08BIT, 0x01}, + {0xd092, CRL_REG_LEN_08BIT, 0x30}, + {0xd093, CRL_REG_LEN_08BIT, 0x18}, + {0xd094, CRL_REG_LEN_08BIT, 0x07}, + {0xd095, CRL_REG_LEN_08BIT, 0xff}, + {0xd096, CRL_REG_LEN_08BIT, 0xf8}, + {0xd097, CRL_REG_LEN_08BIT, 0xfd}, + {0xd098, CRL_REG_LEN_08BIT, 0x9c}, + {0xd099, CRL_REG_LEN_08BIT, 0x80}, + {0xd09a, CRL_REG_LEN_08BIT, 0x00}, + {0xd09b, CRL_REG_LEN_08BIT, 0x03}, + {0xd09c, CRL_REG_LEN_08BIT, 0xa5}, + {0xd09d, CRL_REG_LEN_08BIT, 0x6b}, + {0xd09e, CRL_REG_LEN_08BIT, 0x00}, + {0xd09f, CRL_REG_LEN_08BIT, 0xff}, + {0xd0a0, CRL_REG_LEN_08BIT, 0x18}, + {0xd0a1, CRL_REG_LEN_08BIT, 0xc0}, + {0xd0a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd0a3, CRL_REG_LEN_08BIT, 0x01}, + {0xd0a4, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0a5, CRL_REG_LEN_08BIT, 0xc6}, + {0xd0a6, CRL_REG_LEN_08BIT, 0x01}, + {0xd0a7, CRL_REG_LEN_08BIT, 0x02}, + {0xd0a8, CRL_REG_LEN_08BIT, 0xe1}, + {0xd0a9, CRL_REG_LEN_08BIT, 0x6b}, + {0xd0aa, CRL_REG_LEN_08BIT, 0x58}, + {0xd0ab, CRL_REG_LEN_08BIT, 0x00}, + {0xd0ac, CRL_REG_LEN_08BIT, 0x84}, + {0xd0ad, CRL_REG_LEN_08BIT, 0x8e}, + {0xd0ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd0af, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b0, CRL_REG_LEN_08BIT, 0xe1}, + {0xd0b1, CRL_REG_LEN_08BIT, 0x6b}, + {0xd0b2, CRL_REG_LEN_08BIT, 0x30}, + {0xd0b3, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b4, CRL_REG_LEN_08BIT, 0x98}, + {0xd0b5, CRL_REG_LEN_08BIT, 0xb0}, + {0xd0b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b8, CRL_REG_LEN_08BIT, 0x8c}, + {0xd0b9, CRL_REG_LEN_08BIT, 0x64}, + {0xd0ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd0bb, CRL_REG_LEN_08BIT, 0x6e}, + {0xd0bc, CRL_REG_LEN_08BIT, 0xe5}, + {0xd0bd, CRL_REG_LEN_08BIT, 0xa5}, + {0xd0be, CRL_REG_LEN_08BIT, 0x18}, + {0xd0bf, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c0, CRL_REG_LEN_08BIT, 0x10}, + {0xd0c1, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c3, CRL_REG_LEN_08BIT, 0x06}, + {0xd0c4, CRL_REG_LEN_08BIT, 0x95}, + {0xd0c5, CRL_REG_LEN_08BIT, 0x8b}, + {0xd0c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c8, CRL_REG_LEN_08BIT, 0x94}, + {0xd0c9, CRL_REG_LEN_08BIT, 0xa4}, + {0xd0ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd0cb, CRL_REG_LEN_08BIT, 0x70}, + {0xd0cc, CRL_REG_LEN_08BIT, 0xe5}, + {0xd0cd, CRL_REG_LEN_08BIT, 0x65}, + {0xd0ce, CRL_REG_LEN_08BIT, 0x60}, + {0xd0cf, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d0, CRL_REG_LEN_08BIT, 0x0c}, + {0xd0d1, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d2, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d3, CRL_REG_LEN_08BIT, 0x62}, + {0xd0d4, CRL_REG_LEN_08BIT, 0x15}, + {0xd0d5, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d8, CRL_REG_LEN_08BIT, 0x18}, + {0xd0d9, CRL_REG_LEN_08BIT, 0x60}, + {0xd0da, CRL_REG_LEN_08BIT, 0x80}, + {0xd0db, CRL_REG_LEN_08BIT, 0x06}, + {0xd0dc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0dd, CRL_REG_LEN_08BIT, 0x83}, + {0xd0de, CRL_REG_LEN_08BIT, 0x38}, + {0xd0df, CRL_REG_LEN_08BIT, 0x29}, + {0xd0e0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0e1, CRL_REG_LEN_08BIT, 0xe3}, + {0xd0e2, CRL_REG_LEN_08BIT, 0x40}, + {0xd0e3, CRL_REG_LEN_08BIT, 0x08}, + {0xd0e4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd0e5, CRL_REG_LEN_08BIT, 0x84}, + {0xd0e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0e8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0e9, CRL_REG_LEN_08BIT, 0xa3}, + {0xd0ea, CRL_REG_LEN_08BIT, 0x40}, + {0xd0eb, CRL_REG_LEN_08BIT, 0x09}, + {0xd0ec, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0ed, CRL_REG_LEN_08BIT, 0xc3}, + {0xd0ee, CRL_REG_LEN_08BIT, 0x38}, + {0xd0ef, CRL_REG_LEN_08BIT, 0x2a}, + {0xd0f0, CRL_REG_LEN_08BIT, 0xd8}, + {0xd0f1, CRL_REG_LEN_08BIT, 0x07}, + {0xd0f2, CRL_REG_LEN_08BIT, 0x20}, + {0xd0f3, CRL_REG_LEN_08BIT, 0x00}, + {0xd0f4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd0f5, CRL_REG_LEN_08BIT, 0x66}, + {0xd0f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0f8, CRL_REG_LEN_08BIT, 0xd8}, + {0xd0f9, CRL_REG_LEN_08BIT, 0x05}, + {0xd0fa, CRL_REG_LEN_08BIT, 0x18}, + {0xd0fb, CRL_REG_LEN_08BIT, 0x00}, + {0xd0fc, CRL_REG_LEN_08BIT, 0x18}, + {0xd0fd, CRL_REG_LEN_08BIT, 0x60}, + {0xd0fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd0ff, CRL_REG_LEN_08BIT, 0x01}, + {0xd100, CRL_REG_LEN_08BIT, 0x98}, + {0xd101, CRL_REG_LEN_08BIT, 0x90}, + {0xd102, CRL_REG_LEN_08BIT, 0x00}, + {0xd103, CRL_REG_LEN_08BIT, 0x00}, + {0xd104, CRL_REG_LEN_08BIT, 0x84}, + {0xd105, CRL_REG_LEN_08BIT, 0xae}, + {0xd106, CRL_REG_LEN_08BIT, 0x00}, + {0xd107, CRL_REG_LEN_08BIT, 0x00}, + {0xd108, CRL_REG_LEN_08BIT, 0xa8}, + {0xd109, CRL_REG_LEN_08BIT, 0x63}, + {0xd10a, CRL_REG_LEN_08BIT, 0x06}, + {0xd10b, CRL_REG_LEN_08BIT, 0x4c}, + {0xd10c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd10d, CRL_REG_LEN_08BIT, 0xc0}, + {0xd10e, CRL_REG_LEN_08BIT, 0x00}, + {0xd10f, CRL_REG_LEN_08BIT, 0x00}, + {0xd110, CRL_REG_LEN_08BIT, 0xd8}, + {0xd111, CRL_REG_LEN_08BIT, 0x03}, + {0xd112, CRL_REG_LEN_08BIT, 0x30}, + {0xd113, CRL_REG_LEN_08BIT, 0x00}, + {0xd114, CRL_REG_LEN_08BIT, 0x8c}, + {0xd115, CRL_REG_LEN_08BIT, 0x65}, + {0xd116, CRL_REG_LEN_08BIT, 0x00}, + {0xd117, CRL_REG_LEN_08BIT, 0x6e}, + {0xd118, CRL_REG_LEN_08BIT, 0xe5}, + {0xd119, CRL_REG_LEN_08BIT, 0x84}, + {0xd11a, CRL_REG_LEN_08BIT, 0x18}, + {0xd11b, CRL_REG_LEN_08BIT, 0x00}, + {0xd11c, CRL_REG_LEN_08BIT, 0x10}, + {0xd11d, CRL_REG_LEN_08BIT, 0x00}, + {0xd11e, CRL_REG_LEN_08BIT, 0x00}, + {0xd11f, CRL_REG_LEN_08BIT, 0x07}, + {0xd120, CRL_REG_LEN_08BIT, 0x18}, + {0xd121, CRL_REG_LEN_08BIT, 0x80}, + {0xd122, CRL_REG_LEN_08BIT, 0x80}, + {0xd123, CRL_REG_LEN_08BIT, 0x06}, + {0xd124, CRL_REG_LEN_08BIT, 0x94}, + {0xd125, CRL_REG_LEN_08BIT, 0x65}, + {0xd126, CRL_REG_LEN_08BIT, 0x00}, + {0xd127, CRL_REG_LEN_08BIT, 0x70}, + {0xd128, CRL_REG_LEN_08BIT, 0xe5}, + {0xd129, CRL_REG_LEN_08BIT, 0x43}, + {0xd12a, CRL_REG_LEN_08BIT, 0x60}, + {0xd12b, CRL_REG_LEN_08BIT, 0x00}, + {0xd12c, CRL_REG_LEN_08BIT, 0x0c}, + {0xd12d, CRL_REG_LEN_08BIT, 0x00}, + {0xd12e, CRL_REG_LEN_08BIT, 0x00}, + {0xd12f, CRL_REG_LEN_08BIT, 0x3e}, + {0xd130, CRL_REG_LEN_08BIT, 0xa8}, + {0xd131, CRL_REG_LEN_08BIT, 0x64}, + {0xd132, CRL_REG_LEN_08BIT, 0x38}, + {0xd133, CRL_REG_LEN_08BIT, 0x24}, + {0xd134, CRL_REG_LEN_08BIT, 0x18}, + {0xd135, CRL_REG_LEN_08BIT, 0x80}, + {0xd136, CRL_REG_LEN_08BIT, 0x80}, + {0xd137, CRL_REG_LEN_08BIT, 0x06}, + {0xd138, CRL_REG_LEN_08BIT, 0xa8}, + {0xd139, CRL_REG_LEN_08BIT, 0x64}, + {0xd13a, CRL_REG_LEN_08BIT, 0x38}, + {0xd13b, CRL_REG_LEN_08BIT, 0x24}, + {0xd13c, CRL_REG_LEN_08BIT, 0x8c}, + {0xd13d, CRL_REG_LEN_08BIT, 0x63}, + {0xd13e, CRL_REG_LEN_08BIT, 0x00}, + {0xd13f, CRL_REG_LEN_08BIT, 0x00}, + {0xd140, CRL_REG_LEN_08BIT, 0xa4}, + {0xd141, CRL_REG_LEN_08BIT, 0x63}, + {0xd142, CRL_REG_LEN_08BIT, 0x00}, + {0xd143, CRL_REG_LEN_08BIT, 0x40}, + {0xd144, CRL_REG_LEN_08BIT, 0xbc}, + {0xd145, CRL_REG_LEN_08BIT, 0x23}, + {0xd146, CRL_REG_LEN_08BIT, 0x00}, + {0xd147, CRL_REG_LEN_08BIT, 0x00}, + {0xd148, CRL_REG_LEN_08BIT, 0x0c}, + {0xd149, CRL_REG_LEN_08BIT, 0x00}, + {0xd14a, CRL_REG_LEN_08BIT, 0x00}, + {0xd14b, CRL_REG_LEN_08BIT, 0x2a}, + {0xd14c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd14d, CRL_REG_LEN_08BIT, 0x64}, + {0xd14e, CRL_REG_LEN_08BIT, 0x6e}, + {0xd14f, CRL_REG_LEN_08BIT, 0x44}, + {0xd150, CRL_REG_LEN_08BIT, 0x19}, + {0xd151, CRL_REG_LEN_08BIT, 0x00}, + {0xd152, CRL_REG_LEN_08BIT, 0x80}, + {0xd153, CRL_REG_LEN_08BIT, 0x06}, + {0xd154, CRL_REG_LEN_08BIT, 0xa8}, + {0xd155, CRL_REG_LEN_08BIT, 0xe8}, + {0xd156, CRL_REG_LEN_08BIT, 0x3d}, + {0xd157, CRL_REG_LEN_08BIT, 0x05}, + {0xd158, CRL_REG_LEN_08BIT, 0x8c}, + {0xd159, CRL_REG_LEN_08BIT, 0x67}, + {0xd15a, CRL_REG_LEN_08BIT, 0x00}, + {0xd15b, CRL_REG_LEN_08BIT, 0x00}, + {0xd15c, CRL_REG_LEN_08BIT, 0xb8}, + {0xd15d, CRL_REG_LEN_08BIT, 0x63}, + {0xd15e, CRL_REG_LEN_08BIT, 0x00}, + {0xd15f, CRL_REG_LEN_08BIT, 0x18}, + {0xd160, CRL_REG_LEN_08BIT, 0xb8}, + {0xd161, CRL_REG_LEN_08BIT, 0x63}, + {0xd162, CRL_REG_LEN_08BIT, 0x00}, + {0xd163, CRL_REG_LEN_08BIT, 0x98}, + {0xd164, CRL_REG_LEN_08BIT, 0xbc}, + {0xd165, CRL_REG_LEN_08BIT, 0x03}, + {0xd166, CRL_REG_LEN_08BIT, 0x00}, + {0xd167, CRL_REG_LEN_08BIT, 0x00}, + {0xd168, CRL_REG_LEN_08BIT, 0x10}, + {0xd169, CRL_REG_LEN_08BIT, 0x00}, + {0xd16a, CRL_REG_LEN_08BIT, 0x00}, + {0xd16b, CRL_REG_LEN_08BIT, 0x10}, + {0xd16c, CRL_REG_LEN_08BIT, 0xa9}, + {0xd16d, CRL_REG_LEN_08BIT, 0x48}, + {0xd16e, CRL_REG_LEN_08BIT, 0x67}, + {0xd16f, CRL_REG_LEN_08BIT, 0x02}, + {0xd170, CRL_REG_LEN_08BIT, 0xb8}, + {0xd171, CRL_REG_LEN_08BIT, 0xa3}, + {0xd172, CRL_REG_LEN_08BIT, 0x00}, + {0xd173, CRL_REG_LEN_08BIT, 0x19}, + {0xd174, CRL_REG_LEN_08BIT, 0x8c}, + {0xd175, CRL_REG_LEN_08BIT, 0x8a}, + {0xd176, CRL_REG_LEN_08BIT, 0x00}, + {0xd177, CRL_REG_LEN_08BIT, 0x00}, + {0xd178, CRL_REG_LEN_08BIT, 0xa9}, + {0xd179, CRL_REG_LEN_08BIT, 0x68}, + {0xd17a, CRL_REG_LEN_08BIT, 0x67}, + {0xd17b, CRL_REG_LEN_08BIT, 0x03}, + {0xd17c, CRL_REG_LEN_08BIT, 0xb8}, + {0xd17d, CRL_REG_LEN_08BIT, 0xc4}, + {0xd17e, CRL_REG_LEN_08BIT, 0x00}, + {0xd17f, CRL_REG_LEN_08BIT, 0x08}, + {0xd180, CRL_REG_LEN_08BIT, 0x8c}, + {0xd181, CRL_REG_LEN_08BIT, 0x6b}, + {0xd182, CRL_REG_LEN_08BIT, 0x00}, + {0xd183, CRL_REG_LEN_08BIT, 0x00}, + {0xd184, CRL_REG_LEN_08BIT, 0xb8}, + {0xd185, CRL_REG_LEN_08BIT, 0x85}, + {0xd186, CRL_REG_LEN_08BIT, 0x00}, + {0xd187, CRL_REG_LEN_08BIT, 0x98}, + {0xd188, CRL_REG_LEN_08BIT, 0xe0}, + {0xd189, CRL_REG_LEN_08BIT, 0x63}, + {0xd18a, CRL_REG_LEN_08BIT, 0x30}, + {0xd18b, CRL_REG_LEN_08BIT, 0x04}, + {0xd18c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd18d, CRL_REG_LEN_08BIT, 0x64}, + {0xd18e, CRL_REG_LEN_08BIT, 0x18}, + {0xd18f, CRL_REG_LEN_08BIT, 0x00}, + {0xd190, CRL_REG_LEN_08BIT, 0xa4}, + {0xd191, CRL_REG_LEN_08BIT, 0x83}, + {0xd192, CRL_REG_LEN_08BIT, 0xff}, + {0xd193, CRL_REG_LEN_08BIT, 0xff}, + {0xd194, CRL_REG_LEN_08BIT, 0xb8}, + {0xd195, CRL_REG_LEN_08BIT, 0x64}, + {0xd196, CRL_REG_LEN_08BIT, 0x00}, + {0xd197, CRL_REG_LEN_08BIT, 0x48}, + {0xd198, CRL_REG_LEN_08BIT, 0xd8}, + {0xd199, CRL_REG_LEN_08BIT, 0x0a}, + {0xd19a, CRL_REG_LEN_08BIT, 0x18}, + {0xd19b, CRL_REG_LEN_08BIT, 0x00}, + {0xd19c, CRL_REG_LEN_08BIT, 0xd8}, + {0xd19d, CRL_REG_LEN_08BIT, 0x0b}, + {0xd19e, CRL_REG_LEN_08BIT, 0x20}, + {0xd19f, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a0, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1a1, CRL_REG_LEN_08BIT, 0x60}, + {0xd1a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a3, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a4, CRL_REG_LEN_08BIT, 0xd8}, + {0xd1a5, CRL_REG_LEN_08BIT, 0x07}, + {0xd1a6, CRL_REG_LEN_08BIT, 0x18}, + {0xd1a7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1a9, CRL_REG_LEN_08BIT, 0x68}, + {0xd1aa, CRL_REG_LEN_08BIT, 0x38}, + {0xd1ab, CRL_REG_LEN_08BIT, 0x22}, + {0xd1ac, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1ad, CRL_REG_LEN_08BIT, 0x80}, + {0xd1ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd1af, CRL_REG_LEN_08BIT, 0x70}, + {0xd1b0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1b1, CRL_REG_LEN_08BIT, 0xe8}, + {0xd1b2, CRL_REG_LEN_08BIT, 0x38}, + {0xd1b3, CRL_REG_LEN_08BIT, 0x43}, + {0xd1b4, CRL_REG_LEN_08BIT, 0xd8}, + {0xd1b5, CRL_REG_LEN_08BIT, 0x03}, + {0xd1b6, CRL_REG_LEN_08BIT, 0x20}, + {0xd1b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1b8, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1b9, CRL_REG_LEN_08BIT, 0xa0}, + {0xd1ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd1bb, CRL_REG_LEN_08BIT, 0x00}, + {0xd1bc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1bd, CRL_REG_LEN_08BIT, 0xc8}, + {0xd1be, CRL_REG_LEN_08BIT, 0x38}, + {0xd1bf, CRL_REG_LEN_08BIT, 0x42}, + {0xd1c0, CRL_REG_LEN_08BIT, 0x8c}, + {0xd1c1, CRL_REG_LEN_08BIT, 0x66}, + {0xd1c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1c3, CRL_REG_LEN_08BIT, 0x00}, + {0xd1c4, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1c5, CRL_REG_LEN_08BIT, 0xa5}, + {0xd1c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd1c7, CRL_REG_LEN_08BIT, 0x01}, + {0xd1c8, CRL_REG_LEN_08BIT, 0xb8}, + {0xd1c9, CRL_REG_LEN_08BIT, 0x83}, + {0xd1ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd1cb, CRL_REG_LEN_08BIT, 0x08}, + {0xd1cc, CRL_REG_LEN_08BIT, 0xa4}, + {0xd1cd, CRL_REG_LEN_08BIT, 0xa5}, + {0xd1ce, CRL_REG_LEN_08BIT, 0x00}, + {0xd1cf, CRL_REG_LEN_08BIT, 0xff}, + {0xd1d0, CRL_REG_LEN_08BIT, 0x8c}, + {0xd1d1, CRL_REG_LEN_08BIT, 0x67}, + {0xd1d2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1d3, CRL_REG_LEN_08BIT, 0x00}, + {0xd1d4, CRL_REG_LEN_08BIT, 0xe0}, + {0xd1d5, CRL_REG_LEN_08BIT, 0x63}, + {0xd1d6, CRL_REG_LEN_08BIT, 0x20}, + {0xd1d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1d8, CRL_REG_LEN_08BIT, 0xa4}, + {0xd1d9, CRL_REG_LEN_08BIT, 0x63}, + {0xd1da, CRL_REG_LEN_08BIT, 0xff}, + {0xd1db, CRL_REG_LEN_08BIT, 0xff}, + {0xd1dc, CRL_REG_LEN_08BIT, 0xbc}, + {0xd1dd, CRL_REG_LEN_08BIT, 0x43}, + {0xd1de, CRL_REG_LEN_08BIT, 0x00}, + {0xd1df, CRL_REG_LEN_08BIT, 0x07}, + {0xd1e0, CRL_REG_LEN_08BIT, 0x0c}, + {0xd1e1, CRL_REG_LEN_08BIT, 0x00}, + {0xd1e2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1e3, CRL_REG_LEN_08BIT, 0x5b}, + {0xd1e4, CRL_REG_LEN_08BIT, 0xbc}, + {0xd1e5, CRL_REG_LEN_08BIT, 0x05}, + {0xd1e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd1e7, CRL_REG_LEN_08BIT, 0x02}, + {0xd1e8, CRL_REG_LEN_08BIT, 0x03}, + {0xd1e9, CRL_REG_LEN_08BIT, 0xff}, + {0xd1ea, CRL_REG_LEN_08BIT, 0xff}, + {0xd1eb, CRL_REG_LEN_08BIT, 0xf6}, + {0xd1ec, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1ed, CRL_REG_LEN_08BIT, 0xa0}, + {0xd1ee, CRL_REG_LEN_08BIT, 0x00}, + {0xd1ef, CRL_REG_LEN_08BIT, 0x00}, + {0xd1f0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1f1, CRL_REG_LEN_08BIT, 0xa4}, + {0xd1f2, CRL_REG_LEN_08BIT, 0x55}, + {0xd1f3, CRL_REG_LEN_08BIT, 0x86}, + {0xd1f4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd1f5, CRL_REG_LEN_08BIT, 0x63}, + {0xd1f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd1f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1f8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1f9, CRL_REG_LEN_08BIT, 0xc4}, + {0xd1fa, CRL_REG_LEN_08BIT, 0x6e}, + {0xd1fb, CRL_REG_LEN_08BIT, 0x45}, + {0xd1fc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1fd, CRL_REG_LEN_08BIT, 0xe4}, + {0xd1fe, CRL_REG_LEN_08BIT, 0x55}, + {0xd1ff, CRL_REG_LEN_08BIT, 0x87}, + {0xd200, CRL_REG_LEN_08BIT, 0xd8}, + {0xd201, CRL_REG_LEN_08BIT, 0x05}, + {0xd202, CRL_REG_LEN_08BIT, 0x18}, + {0xd203, CRL_REG_LEN_08BIT, 0x00}, + {0xd204, CRL_REG_LEN_08BIT, 0x8c}, + {0xd205, CRL_REG_LEN_08BIT, 0x66}, + {0xd206, CRL_REG_LEN_08BIT, 0x00}, + {0xd207, CRL_REG_LEN_08BIT, 0x00}, + {0xd208, CRL_REG_LEN_08BIT, 0xa8}, + {0xd209, CRL_REG_LEN_08BIT, 0xa4}, + {0xd20a, CRL_REG_LEN_08BIT, 0x6e}, + {0xd20b, CRL_REG_LEN_08BIT, 0x46}, + {0xd20c, CRL_REG_LEN_08BIT, 0xd8}, + {0xd20d, CRL_REG_LEN_08BIT, 0x07}, + {0xd20e, CRL_REG_LEN_08BIT, 0x18}, + {0xd20f, CRL_REG_LEN_08BIT, 0x00}, + {0xd210, CRL_REG_LEN_08BIT, 0xa8}, + {0xd211, CRL_REG_LEN_08BIT, 0x84}, + {0xd212, CRL_REG_LEN_08BIT, 0x55}, + {0xd213, CRL_REG_LEN_08BIT, 0x88}, + {0xd214, CRL_REG_LEN_08BIT, 0x8c}, + {0xd215, CRL_REG_LEN_08BIT, 0x65}, + {0xd216, CRL_REG_LEN_08BIT, 0x00}, + {0xd217, CRL_REG_LEN_08BIT, 0x00}, + {0xd218, CRL_REG_LEN_08BIT, 0xd8}, + {0xd219, CRL_REG_LEN_08BIT, 0x04}, + {0xd21a, CRL_REG_LEN_08BIT, 0x18}, + {0xd21b, CRL_REG_LEN_08BIT, 0x00}, + {0xd21c, CRL_REG_LEN_08BIT, 0x03}, + {0xd21d, CRL_REG_LEN_08BIT, 0xff}, + {0xd21e, CRL_REG_LEN_08BIT, 0xff}, + {0xd21f, CRL_REG_LEN_08BIT, 0xce}, + {0xd220, CRL_REG_LEN_08BIT, 0x19}, + {0xd221, CRL_REG_LEN_08BIT, 0x00}, + {0xd222, CRL_REG_LEN_08BIT, 0x80}, + {0xd223, CRL_REG_LEN_08BIT, 0x06}, + {0xd224, CRL_REG_LEN_08BIT, 0x8c}, + {0xd225, CRL_REG_LEN_08BIT, 0x63}, + {0xd226, CRL_REG_LEN_08BIT, 0x00}, + {0xd227, CRL_REG_LEN_08BIT, 0x00}, + {0xd228, CRL_REG_LEN_08BIT, 0xa4}, + {0xd229, CRL_REG_LEN_08BIT, 0x63}, + {0xd22a, CRL_REG_LEN_08BIT, 0x00}, + {0xd22b, CRL_REG_LEN_08BIT, 0x40}, + {0xd22c, CRL_REG_LEN_08BIT, 0xbc}, + {0xd22d, CRL_REG_LEN_08BIT, 0x23}, + {0xd22e, CRL_REG_LEN_08BIT, 0x00}, + {0xd22f, CRL_REG_LEN_08BIT, 0x00}, + {0xd230, CRL_REG_LEN_08BIT, 0x13}, + {0xd231, CRL_REG_LEN_08BIT, 0xff}, + {0xd232, CRL_REG_LEN_08BIT, 0xff}, + {0xd233, CRL_REG_LEN_08BIT, 0xc8}, + {0xd234, CRL_REG_LEN_08BIT, 0x9d}, + {0xd235, CRL_REG_LEN_08BIT, 0x00}, + {0xd236, CRL_REG_LEN_08BIT, 0x00}, + {0xd237, CRL_REG_LEN_08BIT, 0x40}, + {0xd238, CRL_REG_LEN_08BIT, 0xa8}, + {0xd239, CRL_REG_LEN_08BIT, 0x64}, + {0xd23a, CRL_REG_LEN_08BIT, 0x55}, + {0xd23b, CRL_REG_LEN_08BIT, 0x86}, + {0xd23c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd23d, CRL_REG_LEN_08BIT, 0xa4}, + {0xd23e, CRL_REG_LEN_08BIT, 0x55}, + {0xd23f, CRL_REG_LEN_08BIT, 0x87}, + {0xd240, CRL_REG_LEN_08BIT, 0xd8}, + {0xd241, CRL_REG_LEN_08BIT, 0x03}, + {0xd242, CRL_REG_LEN_08BIT, 0x40}, + {0xd243, CRL_REG_LEN_08BIT, 0x00}, + {0xd244, CRL_REG_LEN_08BIT, 0xa8}, + {0xd245, CRL_REG_LEN_08BIT, 0x64}, + {0xd246, CRL_REG_LEN_08BIT, 0x55}, + {0xd247, CRL_REG_LEN_08BIT, 0x88}, + {0xd248, CRL_REG_LEN_08BIT, 0xd8}, + {0xd249, CRL_REG_LEN_08BIT, 0x05}, + {0xd24a, CRL_REG_LEN_08BIT, 0x40}, + {0xd24b, CRL_REG_LEN_08BIT, 0x00}, + {0xd24c, CRL_REG_LEN_08BIT, 0xd8}, + {0xd24d, CRL_REG_LEN_08BIT, 0x03}, + {0xd24e, CRL_REG_LEN_08BIT, 0x40}, + {0xd24f, CRL_REG_LEN_08BIT, 0x00}, + {0xd250, CRL_REG_LEN_08BIT, 0x03}, + {0xd251, CRL_REG_LEN_08BIT, 0xff}, + {0xd252, CRL_REG_LEN_08BIT, 0xff}, + {0xd253, CRL_REG_LEN_08BIT, 0xc1}, + {0xd254, CRL_REG_LEN_08BIT, 0x19}, + {0xd255, CRL_REG_LEN_08BIT, 0x00}, + {0xd256, CRL_REG_LEN_08BIT, 0x80}, + {0xd257, CRL_REG_LEN_08BIT, 0x06}, + {0xd258, CRL_REG_LEN_08BIT, 0x94}, + {0xd259, CRL_REG_LEN_08BIT, 0x84}, + {0xd25a, CRL_REG_LEN_08BIT, 0x00}, + {0xd25b, CRL_REG_LEN_08BIT, 0x72}, + {0xd25c, CRL_REG_LEN_08BIT, 0xe5}, + {0xd25d, CRL_REG_LEN_08BIT, 0xa4}, + {0xd25e, CRL_REG_LEN_08BIT, 0x60}, + {0xd25f, CRL_REG_LEN_08BIT, 0x00}, + {0xd260, CRL_REG_LEN_08BIT, 0x0c}, + {0xd261, CRL_REG_LEN_08BIT, 0x00}, + {0xd262, CRL_REG_LEN_08BIT, 0x00}, + {0xd263, CRL_REG_LEN_08BIT, 0x3f}, + {0xd264, CRL_REG_LEN_08BIT, 0x9d}, + {0xd265, CRL_REG_LEN_08BIT, 0x60}, + {0xd266, CRL_REG_LEN_08BIT, 0x01}, + {0xd267, CRL_REG_LEN_08BIT, 0x00}, + {0xd268, CRL_REG_LEN_08BIT, 0x85}, + {0xd269, CRL_REG_LEN_08BIT, 0x4e}, + {0xd26a, CRL_REG_LEN_08BIT, 0x00}, + {0xd26b, CRL_REG_LEN_08BIT, 0x00}, + {0xd26c, CRL_REG_LEN_08BIT, 0x98}, + {0xd26d, CRL_REG_LEN_08BIT, 0x70}, + {0xd26e, CRL_REG_LEN_08BIT, 0x00}, + {0xd26f, CRL_REG_LEN_08BIT, 0x00}, + {0xd270, CRL_REG_LEN_08BIT, 0x8c}, + {0xd271, CRL_REG_LEN_08BIT, 0x8a}, + {0xd272, CRL_REG_LEN_08BIT, 0x00}, + {0xd273, CRL_REG_LEN_08BIT, 0x6f}, + {0xd274, CRL_REG_LEN_08BIT, 0xe5}, + {0xd275, CRL_REG_LEN_08BIT, 0x63}, + {0xd276, CRL_REG_LEN_08BIT, 0x20}, + {0xd277, CRL_REG_LEN_08BIT, 0x00}, + {0xd278, CRL_REG_LEN_08BIT, 0x10}, + {0xd279, CRL_REG_LEN_08BIT, 0x00}, + {0xd27a, CRL_REG_LEN_08BIT, 0x00}, + {0xd27b, CRL_REG_LEN_08BIT, 0x07}, + {0xd27c, CRL_REG_LEN_08BIT, 0x15}, + {0xd27d, CRL_REG_LEN_08BIT, 0x00}, + {0xd27e, CRL_REG_LEN_08BIT, 0x00}, + {0xd27f, CRL_REG_LEN_08BIT, 0x00}, + {0xd280, CRL_REG_LEN_08BIT, 0x8c}, + {0xd281, CRL_REG_LEN_08BIT, 0xaa}, + {0xd282, CRL_REG_LEN_08BIT, 0x00}, + {0xd283, CRL_REG_LEN_08BIT, 0x6e}, + {0xd284, CRL_REG_LEN_08BIT, 0xe0}, + {0xd285, CRL_REG_LEN_08BIT, 0x63}, + {0xd286, CRL_REG_LEN_08BIT, 0x28}, + {0xd287, CRL_REG_LEN_08BIT, 0x02}, + {0xd288, CRL_REG_LEN_08BIT, 0xe0}, + {0xd289, CRL_REG_LEN_08BIT, 0x84}, + {0xd28a, CRL_REG_LEN_08BIT, 0x28}, + {0xd28b, CRL_REG_LEN_08BIT, 0x02}, + {0xd28c, CRL_REG_LEN_08BIT, 0x07}, + {0xd28d, CRL_REG_LEN_08BIT, 0xff}, + {0xd28e, CRL_REG_LEN_08BIT, 0xf8}, + {0xd28f, CRL_REG_LEN_08BIT, 0x66}, + {0xd290, CRL_REG_LEN_08BIT, 0xe0}, + {0xd291, CRL_REG_LEN_08BIT, 0x63}, + {0xd292, CRL_REG_LEN_08BIT, 0x5b}, + {0xd293, CRL_REG_LEN_08BIT, 0x06}, + {0xd294, CRL_REG_LEN_08BIT, 0x8c}, + {0xd295, CRL_REG_LEN_08BIT, 0x6a}, + {0xd296, CRL_REG_LEN_08BIT, 0x00}, + {0xd297, CRL_REG_LEN_08BIT, 0x77}, + {0xd298, CRL_REG_LEN_08BIT, 0xe0}, + {0xd299, CRL_REG_LEN_08BIT, 0x63}, + {0xd29a, CRL_REG_LEN_08BIT, 0x5b}, + {0xd29b, CRL_REG_LEN_08BIT, 0x06}, + {0xd29c, CRL_REG_LEN_08BIT, 0xbd}, + {0xd29d, CRL_REG_LEN_08BIT, 0x63}, + {0xd29e, CRL_REG_LEN_08BIT, 0x00}, + {0xd29f, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a0, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2a1, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a3, CRL_REG_LEN_08BIT, 0x3c}, + {0xd2a4, CRL_REG_LEN_08BIT, 0x15}, + {0xd2a5, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a7, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a8, CRL_REG_LEN_08BIT, 0x8c}, + {0xd2a9, CRL_REG_LEN_08BIT, 0x8a}, + {0xd2aa, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ab, CRL_REG_LEN_08BIT, 0x78}, + {0xd2ac, CRL_REG_LEN_08BIT, 0xb8}, + {0xd2ad, CRL_REG_LEN_08BIT, 0x63}, + {0xd2ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd2af, CRL_REG_LEN_08BIT, 0x88}, + {0xd2b0, CRL_REG_LEN_08BIT, 0xe1}, + {0xd2b1, CRL_REG_LEN_08BIT, 0x64}, + {0xd2b2, CRL_REG_LEN_08BIT, 0x5b}, + {0xd2b3, CRL_REG_LEN_08BIT, 0x06}, + {0xd2b4, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2b5, CRL_REG_LEN_08BIT, 0x6b}, + {0xd2b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd2b8, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2b9, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd2bb, CRL_REG_LEN_08BIT, 0x34}, + {0xd2bc, CRL_REG_LEN_08BIT, 0xd4}, + {0xd2bd, CRL_REG_LEN_08BIT, 0x01}, + {0xd2be, CRL_REG_LEN_08BIT, 0x18}, + {0xd2bf, CRL_REG_LEN_08BIT, 0x14}, + {0xd2c0, CRL_REG_LEN_08BIT, 0xb9}, + {0xd2c1, CRL_REG_LEN_08BIT, 0x6b}, + {0xd2c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2c3, CRL_REG_LEN_08BIT, 0x88}, + {0xd2c4, CRL_REG_LEN_08BIT, 0x85}, + {0xd2c5, CRL_REG_LEN_08BIT, 0x01}, + {0xd2c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2c7, CRL_REG_LEN_08BIT, 0x14}, + {0xd2c8, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2c9, CRL_REG_LEN_08BIT, 0x68}, + {0xd2ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd2cb, CRL_REG_LEN_08BIT, 0x00}, + {0xd2cc, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2cd, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ce, CRL_REG_LEN_08BIT, 0x00}, + {0xd2cf, CRL_REG_LEN_08BIT, 0x2c}, + {0xd2d0, CRL_REG_LEN_08BIT, 0xd4}, + {0xd2d1, CRL_REG_LEN_08BIT, 0x01}, + {0xd2d2, CRL_REG_LEN_08BIT, 0x58}, + {0xd2d3, CRL_REG_LEN_08BIT, 0x18}, + {0xd2d4, CRL_REG_LEN_08BIT, 0x84}, + {0xd2d5, CRL_REG_LEN_08BIT, 0x81}, + {0xd2d6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2d7, CRL_REG_LEN_08BIT, 0x14}, + {0xd2d8, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2d9, CRL_REG_LEN_08BIT, 0xa4}, + {0xd2da, CRL_REG_LEN_08BIT, 0x01}, + {0xd2db, CRL_REG_LEN_08BIT, 0x00}, + {0xd2dc, CRL_REG_LEN_08BIT, 0x10}, + {0xd2dd, CRL_REG_LEN_08BIT, 0x00}, + {0xd2de, CRL_REG_LEN_08BIT, 0x00}, + {0xd2df, CRL_REG_LEN_08BIT, 0x05}, + {0xd2e0, CRL_REG_LEN_08BIT, 0x84}, + {0xd2e1, CRL_REG_LEN_08BIT, 0xc1}, + {0xd2e2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2e3, CRL_REG_LEN_08BIT, 0x18}, + {0xd2e4, CRL_REG_LEN_08BIT, 0x9c}, + {0xd2e5, CRL_REG_LEN_08BIT, 0xa0}, + {0xd2e6, CRL_REG_LEN_08BIT, 0x01}, + {0xd2e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd2e8, CRL_REG_LEN_08BIT, 0xd4}, + {0xd2e9, CRL_REG_LEN_08BIT, 0x01}, + {0xd2ea, CRL_REG_LEN_08BIT, 0x28}, + {0xd2eb, CRL_REG_LEN_08BIT, 0x14}, + {0xd2ec, CRL_REG_LEN_08BIT, 0x84}, + {0xd2ed, CRL_REG_LEN_08BIT, 0xc1}, + {0xd2ee, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ef, CRL_REG_LEN_08BIT, 0x18}, + {0xd2f0, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2f1, CRL_REG_LEN_08BIT, 0x66}, + {0xd2f2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f3, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f4, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2f5, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f7, CRL_REG_LEN_08BIT, 0x20}, + {0xd2f8, CRL_REG_LEN_08BIT, 0x9d}, + {0xd2f9, CRL_REG_LEN_08BIT, 0x00}, + {0xd2fa, CRL_REG_LEN_08BIT, 0x00}, + {0xd2fb, CRL_REG_LEN_08BIT, 0x00}, + {0xd2fc, CRL_REG_LEN_08BIT, 0x84}, + {0xd2fd, CRL_REG_LEN_08BIT, 0x61}, + {0xd2fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ff, CRL_REG_LEN_08BIT, 0x18}, + {0xd300, CRL_REG_LEN_08BIT, 0xbd}, + {0xd301, CRL_REG_LEN_08BIT, 0xa3}, + {0xd302, CRL_REG_LEN_08BIT, 0x01}, + {0xd303, CRL_REG_LEN_08BIT, 0x00}, + {0xd304, CRL_REG_LEN_08BIT, 0x10}, + {0xd305, CRL_REG_LEN_08BIT, 0x00}, + {0xd306, CRL_REG_LEN_08BIT, 0x00}, + {0xd307, CRL_REG_LEN_08BIT, 0x03}, + {0xd308, CRL_REG_LEN_08BIT, 0x9c}, + {0xd309, CRL_REG_LEN_08BIT, 0x80}, + {0xd30a, CRL_REG_LEN_08BIT, 0x01}, + {0xd30b, CRL_REG_LEN_08BIT, 0x00}, + {0xd30c, CRL_REG_LEN_08BIT, 0xd4}, + {0xd30d, CRL_REG_LEN_08BIT, 0x01}, + {0xd30e, CRL_REG_LEN_08BIT, 0x20}, + {0xd30f, CRL_REG_LEN_08BIT, 0x18}, + {0xd310, CRL_REG_LEN_08BIT, 0x18}, + {0xd311, CRL_REG_LEN_08BIT, 0x60}, + {0xd312, CRL_REG_LEN_08BIT, 0x80}, + {0xd313, CRL_REG_LEN_08BIT, 0x06}, + {0xd314, CRL_REG_LEN_08BIT, 0x85}, + {0xd315, CRL_REG_LEN_08BIT, 0x01}, + {0xd316, CRL_REG_LEN_08BIT, 0x00}, + {0xd317, CRL_REG_LEN_08BIT, 0x14}, + {0xd318, CRL_REG_LEN_08BIT, 0xa8}, + {0xd319, CRL_REG_LEN_08BIT, 0x83}, + {0xd31a, CRL_REG_LEN_08BIT, 0x38}, + {0xd31b, CRL_REG_LEN_08BIT, 0x29}, + {0xd31c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd31d, CRL_REG_LEN_08BIT, 0xc3}, + {0xd31e, CRL_REG_LEN_08BIT, 0x40}, + {0xd31f, CRL_REG_LEN_08BIT, 0x08}, + {0xd320, CRL_REG_LEN_08BIT, 0x8c}, + {0xd321, CRL_REG_LEN_08BIT, 0x84}, + {0xd322, CRL_REG_LEN_08BIT, 0x00}, + {0xd323, CRL_REG_LEN_08BIT, 0x00}, + {0xd324, CRL_REG_LEN_08BIT, 0xa8}, + {0xd325, CRL_REG_LEN_08BIT, 0xa3}, + {0xd326, CRL_REG_LEN_08BIT, 0x38}, + {0xd327, CRL_REG_LEN_08BIT, 0x2a}, + {0xd328, CRL_REG_LEN_08BIT, 0xa8}, + {0xd329, CRL_REG_LEN_08BIT, 0xe3}, + {0xd32a, CRL_REG_LEN_08BIT, 0x40}, + {0xd32b, CRL_REG_LEN_08BIT, 0x09}, + {0xd32c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd32d, CRL_REG_LEN_08BIT, 0x64}, + {0xd32e, CRL_REG_LEN_08BIT, 0x40}, + {0xd32f, CRL_REG_LEN_08BIT, 0x00}, + {0xd330, CRL_REG_LEN_08BIT, 0xd8}, + {0xd331, CRL_REG_LEN_08BIT, 0x06}, + {0xd332, CRL_REG_LEN_08BIT, 0x18}, + {0xd333, CRL_REG_LEN_08BIT, 0x00}, + {0xd334, CRL_REG_LEN_08BIT, 0x8c}, + {0xd335, CRL_REG_LEN_08BIT, 0x65}, + {0xd336, CRL_REG_LEN_08BIT, 0x00}, + {0xd337, CRL_REG_LEN_08BIT, 0x00}, + {0xd338, CRL_REG_LEN_08BIT, 0x84}, + {0xd339, CRL_REG_LEN_08BIT, 0x81}, + {0xd33a, CRL_REG_LEN_08BIT, 0x00}, + {0xd33b, CRL_REG_LEN_08BIT, 0x18}, + {0xd33c, CRL_REG_LEN_08BIT, 0xe3}, + {0xd33d, CRL_REG_LEN_08BIT, 0xe3}, + {0xd33e, CRL_REG_LEN_08BIT, 0x20}, + {0xd33f, CRL_REG_LEN_08BIT, 0x00}, + {0xd340, CRL_REG_LEN_08BIT, 0xd8}, + {0xd341, CRL_REG_LEN_08BIT, 0x07}, + {0xd342, CRL_REG_LEN_08BIT, 0xf8}, + {0xd343, CRL_REG_LEN_08BIT, 0x00}, + {0xd344, CRL_REG_LEN_08BIT, 0x03}, + {0xd345, CRL_REG_LEN_08BIT, 0xff}, + {0xd346, CRL_REG_LEN_08BIT, 0xff}, + {0xd347, CRL_REG_LEN_08BIT, 0x6f}, + {0xd348, CRL_REG_LEN_08BIT, 0x18}, + {0xd349, CRL_REG_LEN_08BIT, 0x60}, + {0xd34a, CRL_REG_LEN_08BIT, 0x00}, + {0xd34b, CRL_REG_LEN_08BIT, 0x01}, + {0xd34c, CRL_REG_LEN_08BIT, 0x0f}, + {0xd34d, CRL_REG_LEN_08BIT, 0xff}, + {0xd34e, CRL_REG_LEN_08BIT, 0xff}, + {0xd34f, CRL_REG_LEN_08BIT, 0x9d}, + {0xd350, CRL_REG_LEN_08BIT, 0x18}, + {0xd351, CRL_REG_LEN_08BIT, 0x60}, + {0xd352, CRL_REG_LEN_08BIT, 0x80}, + {0xd353, CRL_REG_LEN_08BIT, 0x06}, + {0xd354, CRL_REG_LEN_08BIT, 0x00}, + {0xd355, CRL_REG_LEN_08BIT, 0x00}, + {0xd356, CRL_REG_LEN_08BIT, 0x00}, + {0xd357, CRL_REG_LEN_08BIT, 0x11}, + {0xd358, CRL_REG_LEN_08BIT, 0xa8}, + {0xd359, CRL_REG_LEN_08BIT, 0x83}, + {0xd35a, CRL_REG_LEN_08BIT, 0x6e}, + {0xd35b, CRL_REG_LEN_08BIT, 0x43}, + {0xd35c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd35d, CRL_REG_LEN_08BIT, 0x6c}, + {0xd35e, CRL_REG_LEN_08BIT, 0x28}, + {0xd35f, CRL_REG_LEN_08BIT, 0x02}, + {0xd360, CRL_REG_LEN_08BIT, 0xe0}, + {0xd361, CRL_REG_LEN_08BIT, 0x84}, + {0xd362, CRL_REG_LEN_08BIT, 0x28}, + {0xd363, CRL_REG_LEN_08BIT, 0x02}, + {0xd364, CRL_REG_LEN_08BIT, 0x07}, + {0xd365, CRL_REG_LEN_08BIT, 0xff}, + {0xd366, CRL_REG_LEN_08BIT, 0xf8}, + {0xd367, CRL_REG_LEN_08BIT, 0x30}, + {0xd368, CRL_REG_LEN_08BIT, 0xb8}, + {0xd369, CRL_REG_LEN_08BIT, 0x63}, + {0xd36a, CRL_REG_LEN_08BIT, 0x00}, + {0xd36b, CRL_REG_LEN_08BIT, 0x08}, + {0xd36c, CRL_REG_LEN_08BIT, 0x03}, + {0xd36d, CRL_REG_LEN_08BIT, 0xff}, + {0xd36e, CRL_REG_LEN_08BIT, 0xff}, + {0xd36f, CRL_REG_LEN_08BIT, 0xc0}, + {0xd370, CRL_REG_LEN_08BIT, 0x85}, + {0xd371, CRL_REG_LEN_08BIT, 0x4e}, + {0xd372, CRL_REG_LEN_08BIT, 0x00}, + {0xd373, CRL_REG_LEN_08BIT, 0x00}, + {0xd374, CRL_REG_LEN_08BIT, 0x03}, + {0xd375, CRL_REG_LEN_08BIT, 0xff}, + {0xd376, CRL_REG_LEN_08BIT, 0xff}, + {0xd377, CRL_REG_LEN_08BIT, 0xe7}, + {0xd378, CRL_REG_LEN_08BIT, 0xd4}, + {0xd379, CRL_REG_LEN_08BIT, 0x01}, + {0xd37a, CRL_REG_LEN_08BIT, 0x40}, + {0xd37b, CRL_REG_LEN_08BIT, 0x18}, + {0xd37c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd37d, CRL_REG_LEN_08BIT, 0x60}, + {0xd37e, CRL_REG_LEN_08BIT, 0x00}, + {0xd37f, CRL_REG_LEN_08BIT, 0x00}, + {0xd380, CRL_REG_LEN_08BIT, 0x03}, + {0xd381, CRL_REG_LEN_08BIT, 0xff}, + {0xd382, CRL_REG_LEN_08BIT, 0xff}, + {0xd383, CRL_REG_LEN_08BIT, 0xdb}, + {0xd384, CRL_REG_LEN_08BIT, 0xd4}, + {0xd385, CRL_REG_LEN_08BIT, 0x01}, + {0xd386, CRL_REG_LEN_08BIT, 0x18}, + {0xd387, CRL_REG_LEN_08BIT, 0x14}, + {0xd388, CRL_REG_LEN_08BIT, 0x03}, + {0xd389, CRL_REG_LEN_08BIT, 0xff}, + {0xd38a, CRL_REG_LEN_08BIT, 0xff}, + {0xd38b, CRL_REG_LEN_08BIT, 0xce}, + {0xd38c, CRL_REG_LEN_08BIT, 0x9d}, + {0xd38d, CRL_REG_LEN_08BIT, 0x6b}, + {0xd38e, CRL_REG_LEN_08BIT, 0x00}, + {0xd38f, CRL_REG_LEN_08BIT, 0xff}, + {0xd390, CRL_REG_LEN_08BIT, 0x03}, + {0xd391, CRL_REG_LEN_08BIT, 0xff}, + {0xd392, CRL_REG_LEN_08BIT, 0xff}, + {0xd393, CRL_REG_LEN_08BIT, 0xc6}, + {0xd394, CRL_REG_LEN_08BIT, 0x9c}, + {0xd395, CRL_REG_LEN_08BIT, 0x63}, + {0xd396, CRL_REG_LEN_08BIT, 0x00}, + {0xd397, CRL_REG_LEN_08BIT, 0xff}, + {0xd398, CRL_REG_LEN_08BIT, 0xa8}, + {0xd399, CRL_REG_LEN_08BIT, 0xe3}, + {0xd39a, CRL_REG_LEN_08BIT, 0x38}, + {0xd39b, CRL_REG_LEN_08BIT, 0x0f}, + {0xd39c, CRL_REG_LEN_08BIT, 0x8c}, + {0xd39d, CRL_REG_LEN_08BIT, 0x84}, + {0xd39e, CRL_REG_LEN_08BIT, 0x00}, + {0xd39f, CRL_REG_LEN_08BIT, 0x00}, + {0xd3a0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3a1, CRL_REG_LEN_08BIT, 0xa3}, + {0xd3a2, CRL_REG_LEN_08BIT, 0x38}, + {0xd3a3, CRL_REG_LEN_08BIT, 0x0e}, + {0xd3a4, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3a5, CRL_REG_LEN_08BIT, 0xc3}, + {0xd3a6, CRL_REG_LEN_08BIT, 0x6e}, + {0xd3a7, CRL_REG_LEN_08BIT, 0x42}, + {0xd3a8, CRL_REG_LEN_08BIT, 0xd8}, + {0xd3a9, CRL_REG_LEN_08BIT, 0x07}, + {0xd3aa, CRL_REG_LEN_08BIT, 0x20}, + {0xd3ab, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ac, CRL_REG_LEN_08BIT, 0x8c}, + {0xd3ad, CRL_REG_LEN_08BIT, 0x66}, + {0xd3ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd3af, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b0, CRL_REG_LEN_08BIT, 0xd8}, + {0xd3b1, CRL_REG_LEN_08BIT, 0x05}, + {0xd3b2, CRL_REG_LEN_08BIT, 0x18}, + {0xd3b3, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b4, CRL_REG_LEN_08BIT, 0x85}, + {0xd3b5, CRL_REG_LEN_08BIT, 0x21}, + {0xd3b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b8, CRL_REG_LEN_08BIT, 0x85}, + {0xd3b9, CRL_REG_LEN_08BIT, 0x41}, + {0xd3ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd3bb, CRL_REG_LEN_08BIT, 0x04}, + {0xd3bc, CRL_REG_LEN_08BIT, 0x85}, + {0xd3bd, CRL_REG_LEN_08BIT, 0x81}, + {0xd3be, CRL_REG_LEN_08BIT, 0x00}, + {0xd3bf, CRL_REG_LEN_08BIT, 0x08}, + {0xd3c0, CRL_REG_LEN_08BIT, 0x85}, + {0xd3c1, CRL_REG_LEN_08BIT, 0xc1}, + {0xd3c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd3c3, CRL_REG_LEN_08BIT, 0x0c}, + {0xd3c4, CRL_REG_LEN_08BIT, 0x86}, + {0xd3c5, CRL_REG_LEN_08BIT, 0x01}, + {0xd3c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3c7, CRL_REG_LEN_08BIT, 0x10}, + {0xd3c8, CRL_REG_LEN_08BIT, 0x44}, + {0xd3c9, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ca, CRL_REG_LEN_08BIT, 0x48}, + {0xd3cb, CRL_REG_LEN_08BIT, 0x00}, + {0xd3cc, CRL_REG_LEN_08BIT, 0x9c}, + {0xd3cd, CRL_REG_LEN_08BIT, 0x21}, + {0xd3ce, CRL_REG_LEN_08BIT, 0x00}, + {0xd3cf, CRL_REG_LEN_08BIT, 0x1c}, + {0xd3d0, CRL_REG_LEN_08BIT, 0x9c}, + {0xd3d1, CRL_REG_LEN_08BIT, 0x21}, + {0xd3d2, CRL_REG_LEN_08BIT, 0xff}, + {0xd3d3, CRL_REG_LEN_08BIT, 0xfc}, + {0xd3d4, CRL_REG_LEN_08BIT, 0xd4}, + {0xd3d5, CRL_REG_LEN_08BIT, 0x01}, + {0xd3d6, CRL_REG_LEN_08BIT, 0x48}, + {0xd3d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3d8, CRL_REG_LEN_08BIT, 0x18}, + {0xd3d9, CRL_REG_LEN_08BIT, 0x60}, + {0xd3da, CRL_REG_LEN_08BIT, 0x00}, + {0xd3db, CRL_REG_LEN_08BIT, 0x01}, + {0xd3dc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3dd, CRL_REG_LEN_08BIT, 0x63}, + {0xd3de, CRL_REG_LEN_08BIT, 0x07}, + {0xd3df, CRL_REG_LEN_08BIT, 0x80}, + {0xd3e0, CRL_REG_LEN_08BIT, 0x8c}, + {0xd3e1, CRL_REG_LEN_08BIT, 0x63}, + {0xd3e2, CRL_REG_LEN_08BIT, 0x00}, + {0xd3e3, CRL_REG_LEN_08BIT, 0x68}, + {0xd3e4, CRL_REG_LEN_08BIT, 0xbc}, + {0xd3e5, CRL_REG_LEN_08BIT, 0x03}, + {0xd3e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3e8, CRL_REG_LEN_08BIT, 0x10}, + {0xd3e9, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ea, CRL_REG_LEN_08BIT, 0x00}, + {0xd3eb, CRL_REG_LEN_08BIT, 0x0c}, + {0xd3ec, CRL_REG_LEN_08BIT, 0x15}, + {0xd3ed, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ee, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ef, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f0, CRL_REG_LEN_08BIT, 0x07}, + {0xd3f1, CRL_REG_LEN_08BIT, 0xff}, + {0xd3f2, CRL_REG_LEN_08BIT, 0xd9}, + {0xd3f3, CRL_REG_LEN_08BIT, 0x98}, + {0xd3f4, CRL_REG_LEN_08BIT, 0x15}, + {0xd3f5, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f8, CRL_REG_LEN_08BIT, 0x18}, + {0xd3f9, CRL_REG_LEN_08BIT, 0x60}, + {0xd3fa, CRL_REG_LEN_08BIT, 0x80}, + {0xd3fb, CRL_REG_LEN_08BIT, 0x06}, + {0xd3fc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3fd, CRL_REG_LEN_08BIT, 0x63}, + {0xd3fe, CRL_REG_LEN_08BIT, 0xc4}, + {0xd3ff, CRL_REG_LEN_08BIT, 0xb8}, + {0xd400, CRL_REG_LEN_08BIT, 0x8c}, + {0xd401, CRL_REG_LEN_08BIT, 0x63}, + {0xd402, CRL_REG_LEN_08BIT, 0x00}, + {0xd403, CRL_REG_LEN_08BIT, 0x00}, + {0xd404, CRL_REG_LEN_08BIT, 0xbc}, + {0xd405, CRL_REG_LEN_08BIT, 0x23}, + {0xd406, CRL_REG_LEN_08BIT, 0x00}, + {0xd407, CRL_REG_LEN_08BIT, 0x01}, + {0xd408, CRL_REG_LEN_08BIT, 0x10}, + {0xd409, CRL_REG_LEN_08BIT, 0x00}, + {0xd40a, CRL_REG_LEN_08BIT, 0x00}, + {0xd40b, CRL_REG_LEN_08BIT, 0x25}, + {0xd40c, CRL_REG_LEN_08BIT, 0x9d}, + {0xd40d, CRL_REG_LEN_08BIT, 0x00}, + {0xd40e, CRL_REG_LEN_08BIT, 0x00}, + {0xd40f, CRL_REG_LEN_08BIT, 0x00}, + {0xd410, CRL_REG_LEN_08BIT, 0x00}, + {0xd411, CRL_REG_LEN_08BIT, 0x00}, + {0xd412, CRL_REG_LEN_08BIT, 0x00}, + {0xd413, CRL_REG_LEN_08BIT, 0x0b}, + {0xd414, CRL_REG_LEN_08BIT, 0xb8}, + {0xd415, CRL_REG_LEN_08BIT, 0xe8}, + {0xd416, CRL_REG_LEN_08BIT, 0x00}, + {0xd417, CRL_REG_LEN_08BIT, 0x02}, + {0xd418, CRL_REG_LEN_08BIT, 0x07}, + {0xd419, CRL_REG_LEN_08BIT, 0xff}, + {0xd41a, CRL_REG_LEN_08BIT, 0xd6}, + {0xd41b, CRL_REG_LEN_08BIT, 0x24}, + {0xd41c, CRL_REG_LEN_08BIT, 0x15}, + {0xd41d, CRL_REG_LEN_08BIT, 0x00}, + {0xd41e, CRL_REG_LEN_08BIT, 0x00}, + {0xd41f, CRL_REG_LEN_08BIT, 0x00}, + {0xd420, CRL_REG_LEN_08BIT, 0x18}, + {0xd421, CRL_REG_LEN_08BIT, 0x60}, + {0xd422, CRL_REG_LEN_08BIT, 0x80}, + {0xd423, CRL_REG_LEN_08BIT, 0x06}, + {0xd424, CRL_REG_LEN_08BIT, 0xa8}, + {0xd425, CRL_REG_LEN_08BIT, 0x63}, + {0xd426, CRL_REG_LEN_08BIT, 0xc4}, + {0xd427, CRL_REG_LEN_08BIT, 0xb8}, + {0xd428, CRL_REG_LEN_08BIT, 0x8c}, + {0xd429, CRL_REG_LEN_08BIT, 0x63}, + {0xd42a, CRL_REG_LEN_08BIT, 0x00}, + {0xd42b, CRL_REG_LEN_08BIT, 0x00}, + {0xd42c, CRL_REG_LEN_08BIT, 0xbc}, + {0xd42d, CRL_REG_LEN_08BIT, 0x23}, + {0xd42e, CRL_REG_LEN_08BIT, 0x00}, + {0xd42f, CRL_REG_LEN_08BIT, 0x01}, + {0xd430, CRL_REG_LEN_08BIT, 0x10}, + {0xd431, CRL_REG_LEN_08BIT, 0x00}, + {0xd432, CRL_REG_LEN_08BIT, 0x00}, + {0xd433, CRL_REG_LEN_08BIT, 0x1b}, + {0xd434, CRL_REG_LEN_08BIT, 0x9d}, + {0xd435, CRL_REG_LEN_08BIT, 0x00}, + {0xd436, CRL_REG_LEN_08BIT, 0x00}, + {0xd437, CRL_REG_LEN_08BIT, 0x00}, + {0xd438, CRL_REG_LEN_08BIT, 0xb8}, + {0xd439, CRL_REG_LEN_08BIT, 0xe8}, + {0xd43a, CRL_REG_LEN_08BIT, 0x00}, + {0xd43b, CRL_REG_LEN_08BIT, 0x02}, + {0xd43c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd43d, CRL_REG_LEN_08BIT, 0xc0}, + {0xd43e, CRL_REG_LEN_08BIT, 0x00}, + {0xd43f, CRL_REG_LEN_08BIT, 0x00}, + {0xd440, CRL_REG_LEN_08BIT, 0x18}, + {0xd441, CRL_REG_LEN_08BIT, 0xa0}, + {0xd442, CRL_REG_LEN_08BIT, 0x80}, + {0xd443, CRL_REG_LEN_08BIT, 0x06}, + {0xd444, CRL_REG_LEN_08BIT, 0xe0}, + {0xd445, CRL_REG_LEN_08BIT, 0x67}, + {0xd446, CRL_REG_LEN_08BIT, 0x30}, + {0xd447, CRL_REG_LEN_08BIT, 0x00}, + {0xd448, CRL_REG_LEN_08BIT, 0xa8}, + {0xd449, CRL_REG_LEN_08BIT, 0xa5}, + {0xd44a, CRL_REG_LEN_08BIT, 0xce}, + {0xd44b, CRL_REG_LEN_08BIT, 0xb0}, + {0xd44c, CRL_REG_LEN_08BIT, 0x19}, + {0xd44d, CRL_REG_LEN_08BIT, 0x60}, + {0xd44e, CRL_REG_LEN_08BIT, 0x00}, + {0xd44f, CRL_REG_LEN_08BIT, 0x01}, + {0xd450, CRL_REG_LEN_08BIT, 0xa9}, + {0xd451, CRL_REG_LEN_08BIT, 0x6b}, + {0xd452, CRL_REG_LEN_08BIT, 0x06}, + {0xd453, CRL_REG_LEN_08BIT, 0x14}, + {0xd454, CRL_REG_LEN_08BIT, 0xe0}, + {0xd455, CRL_REG_LEN_08BIT, 0x83}, + {0xd456, CRL_REG_LEN_08BIT, 0x28}, + {0xd457, CRL_REG_LEN_08BIT, 0x00}, + {0xd458, CRL_REG_LEN_08BIT, 0x9c}, + {0xd459, CRL_REG_LEN_08BIT, 0xc6}, + {0xd45a, CRL_REG_LEN_08BIT, 0x00}, + {0xd45b, CRL_REG_LEN_08BIT, 0x01}, + {0xd45c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd45d, CRL_REG_LEN_08BIT, 0x63}, + {0xd45e, CRL_REG_LEN_08BIT, 0x18}, + {0xd45f, CRL_REG_LEN_08BIT, 0x00}, + {0xd460, CRL_REG_LEN_08BIT, 0x8c}, + {0xd461, CRL_REG_LEN_08BIT, 0x84}, + {0xd462, CRL_REG_LEN_08BIT, 0x00}, + {0xd463, CRL_REG_LEN_08BIT, 0x00}, + {0xd464, CRL_REG_LEN_08BIT, 0xe0}, + {0xd465, CRL_REG_LEN_08BIT, 0xa3}, + {0xd466, CRL_REG_LEN_08BIT, 0x58}, + {0xd467, CRL_REG_LEN_08BIT, 0x00}, + {0xd468, CRL_REG_LEN_08BIT, 0xa4}, + {0xd469, CRL_REG_LEN_08BIT, 0xc6}, + {0xd46a, CRL_REG_LEN_08BIT, 0x00}, + {0xd46b, CRL_REG_LEN_08BIT, 0xff}, + {0xd46c, CRL_REG_LEN_08BIT, 0xb8}, + {0xd46d, CRL_REG_LEN_08BIT, 0x64}, + {0xd46e, CRL_REG_LEN_08BIT, 0x00}, + {0xd46f, CRL_REG_LEN_08BIT, 0x18}, + {0xd470, CRL_REG_LEN_08BIT, 0xbc}, + {0xd471, CRL_REG_LEN_08BIT, 0x46}, + {0xd472, CRL_REG_LEN_08BIT, 0x00}, + {0xd473, CRL_REG_LEN_08BIT, 0x03}, + {0xd474, CRL_REG_LEN_08BIT, 0x94}, + {0xd475, CRL_REG_LEN_08BIT, 0x85}, + {0xd476, CRL_REG_LEN_08BIT, 0x00}, + {0xd477, CRL_REG_LEN_08BIT, 0x00}, + {0xd478, CRL_REG_LEN_08BIT, 0xb8}, + {0xd479, CRL_REG_LEN_08BIT, 0x63}, + {0xd47a, CRL_REG_LEN_08BIT, 0x00}, + {0xd47b, CRL_REG_LEN_08BIT, 0x98}, + {0xd47c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd47d, CRL_REG_LEN_08BIT, 0x64}, + {0xd47e, CRL_REG_LEN_08BIT, 0x18}, + {0xd47f, CRL_REG_LEN_08BIT, 0x00}, + {0xd480, CRL_REG_LEN_08BIT, 0x0f}, + {0xd481, CRL_REG_LEN_08BIT, 0xff}, + {0xd482, CRL_REG_LEN_08BIT, 0xff}, + {0xd483, CRL_REG_LEN_08BIT, 0xf0}, + {0xd484, CRL_REG_LEN_08BIT, 0xdc}, + {0xd485, CRL_REG_LEN_08BIT, 0x05}, + {0xd486, CRL_REG_LEN_08BIT, 0x18}, + {0xd487, CRL_REG_LEN_08BIT, 0x00}, + {0xd488, CRL_REG_LEN_08BIT, 0x9c}, + {0xd489, CRL_REG_LEN_08BIT, 0x68}, + {0xd48a, CRL_REG_LEN_08BIT, 0x00}, + {0xd48b, CRL_REG_LEN_08BIT, 0x01}, + {0xd48c, CRL_REG_LEN_08BIT, 0xa5}, + {0xd48d, CRL_REG_LEN_08BIT, 0x03}, + {0xd48e, CRL_REG_LEN_08BIT, 0x00}, + {0xd48f, CRL_REG_LEN_08BIT, 0xff}, + {0xd490, CRL_REG_LEN_08BIT, 0xbc}, + {0xd491, CRL_REG_LEN_08BIT, 0x48}, + {0xd492, CRL_REG_LEN_08BIT, 0x00}, + {0xd493, CRL_REG_LEN_08BIT, 0x01}, + {0xd494, CRL_REG_LEN_08BIT, 0x0f}, + {0xd495, CRL_REG_LEN_08BIT, 0xff}, + {0xd496, CRL_REG_LEN_08BIT, 0xff}, + {0xd497, CRL_REG_LEN_08BIT, 0xea}, + {0xd498, CRL_REG_LEN_08BIT, 0xb8}, + {0xd499, CRL_REG_LEN_08BIT, 0xe8}, + {0xd49a, CRL_REG_LEN_08BIT, 0x00}, + {0xd49b, CRL_REG_LEN_08BIT, 0x02}, + {0xd49c, CRL_REG_LEN_08BIT, 0x18}, + {0xd49d, CRL_REG_LEN_08BIT, 0x60}, + {0xd49e, CRL_REG_LEN_08BIT, 0x00}, + {0xd49f, CRL_REG_LEN_08BIT, 0x01}, + {0xd4a0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd4a1, CRL_REG_LEN_08BIT, 0x63}, + {0xd4a2, CRL_REG_LEN_08BIT, 0x06}, + {0xd4a3, CRL_REG_LEN_08BIT, 0x14}, + {0xd4a4, CRL_REG_LEN_08BIT, 0x07}, + {0xd4a5, CRL_REG_LEN_08BIT, 0xff}, + {0xd4a6, CRL_REG_LEN_08BIT, 0xe4}, + {0xd4a7, CRL_REG_LEN_08BIT, 0x05}, + {0xd4a8, CRL_REG_LEN_08BIT, 0x9c}, + {0xd4a9, CRL_REG_LEN_08BIT, 0x83}, + {0xd4aa, CRL_REG_LEN_08BIT, 0x00}, + {0xd4ab, CRL_REG_LEN_08BIT, 0x10}, + {0xd4ac, CRL_REG_LEN_08BIT, 0x85}, + {0xd4ad, CRL_REG_LEN_08BIT, 0x21}, + {0xd4ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd4af, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b0, CRL_REG_LEN_08BIT, 0x44}, + {0xd4b1, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b2, CRL_REG_LEN_08BIT, 0x48}, + {0xd4b3, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b4, CRL_REG_LEN_08BIT, 0x9c}, + {0xd4b5, CRL_REG_LEN_08BIT, 0x21}, + {0xd4b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b7, CRL_REG_LEN_08BIT, 0x04}, + {0xd4b8, CRL_REG_LEN_08BIT, 0x18}, + {0xd4b9, CRL_REG_LEN_08BIT, 0x60}, + {0xd4ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd4bb, CRL_REG_LEN_08BIT, 0x01}, + {0xd4bc, CRL_REG_LEN_08BIT, 0x9c}, + {0xd4bd, CRL_REG_LEN_08BIT, 0x80}, + {0xd4be, CRL_REG_LEN_08BIT, 0xff}, + {0xd4bf, CRL_REG_LEN_08BIT, 0xff}, + {0xd4c0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd4c1, CRL_REG_LEN_08BIT, 0x63}, + {0xd4c2, CRL_REG_LEN_08BIT, 0x09}, + {0xd4c3, CRL_REG_LEN_08BIT, 0xef}, + {0xd4c4, CRL_REG_LEN_08BIT, 0xd8}, + {0xd4c5, CRL_REG_LEN_08BIT, 0x03}, + {0xd4c6, CRL_REG_LEN_08BIT, 0x20}, + {0xd4c7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4c8, CRL_REG_LEN_08BIT, 0x18}, + {0xd4c9, CRL_REG_LEN_08BIT, 0x60}, + {0xd4ca, CRL_REG_LEN_08BIT, 0x80}, + {0xd4cb, CRL_REG_LEN_08BIT, 0x06}, + {0xd4cc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd4cd, CRL_REG_LEN_08BIT, 0x63}, + {0xd4ce, CRL_REG_LEN_08BIT, 0xc9}, + {0xd4cf, CRL_REG_LEN_08BIT, 0xef}, + {0xd4d0, CRL_REG_LEN_08BIT, 0xd8}, + {0xd4d1, CRL_REG_LEN_08BIT, 0x03}, + {0xd4d2, CRL_REG_LEN_08BIT, 0x20}, + {0xd4d3, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d4, CRL_REG_LEN_08BIT, 0x44}, + {0xd4d5, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d6, CRL_REG_LEN_08BIT, 0x48}, + {0xd4d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d8, CRL_REG_LEN_08BIT, 0x15}, + {0xd4d9, CRL_REG_LEN_08BIT, 0x00}, + {0xd4da, CRL_REG_LEN_08BIT, 0x00}, + {0xd4db, CRL_REG_LEN_08BIT, 0x00}, + {0xd4dc, CRL_REG_LEN_08BIT, 0x18}, + {0xd4dd, CRL_REG_LEN_08BIT, 0x80}, + {0xd4de, CRL_REG_LEN_08BIT, 0x00}, + {0xd4df, CRL_REG_LEN_08BIT, 0x01}, + {0xd4e0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd4e1, CRL_REG_LEN_08BIT, 0x84}, + {0xd4e2, CRL_REG_LEN_08BIT, 0x0a}, + {0xd4e3, CRL_REG_LEN_08BIT, 0x12}, + {0xd4e4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd4e5, CRL_REG_LEN_08BIT, 0x64}, + {0xd4e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd4e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4e8, CRL_REG_LEN_08BIT, 0xbc}, + {0xd4e9, CRL_REG_LEN_08BIT, 0x03}, + {0xd4ea, CRL_REG_LEN_08BIT, 0x00}, + {0xd4eb, CRL_REG_LEN_08BIT, 0x00}, + {0xd4ec, CRL_REG_LEN_08BIT, 0x13}, + {0xd4ed, CRL_REG_LEN_08BIT, 0xff}, + {0xd4ee, CRL_REG_LEN_08BIT, 0xff}, + {0xd4ef, CRL_REG_LEN_08BIT, 0xfe}, + {0xd4f0, CRL_REG_LEN_08BIT, 0x15}, + {0xd4f1, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f2, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f3, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f4, CRL_REG_LEN_08BIT, 0x44}, + {0xd4f5, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f6, CRL_REG_LEN_08BIT, 0x48}, + {0xd4f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f8, CRL_REG_LEN_08BIT, 0x15}, + {0xd4f9, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fa, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fb, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fc, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fd, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd4ff, CRL_REG_LEN_08BIT, 0x00}, + {0xd500, CRL_REG_LEN_08BIT, 0x00}, + {0xd501, CRL_REG_LEN_08BIT, 0x00}, + {0xd502, CRL_REG_LEN_08BIT, 0x00}, + {0xd503, CRL_REG_LEN_08BIT, 0x00}, + {0x6f0e, CRL_REG_LEN_08BIT, 0x33}, + {0x6f0f, CRL_REG_LEN_08BIT, 0x33}, + {0x460e, CRL_REG_LEN_08BIT, 0x08}, + {0x460f, CRL_REG_LEN_08BIT, 0x01}, + {0x4610, CRL_REG_LEN_08BIT, 0x00}, + {0x4611, CRL_REG_LEN_08BIT, 0x01}, + {0x4612, CRL_REG_LEN_08BIT, 0x00}, + {0x4613, CRL_REG_LEN_08BIT, 0x01}, + {0x4605, CRL_REG_LEN_08BIT, 0x08},/*YUV 8bit*/ + {0x4608, CRL_REG_LEN_08BIT, 0x00}, + {0x4609, CRL_REG_LEN_08BIT, 0x08}, + {0x6804, CRL_REG_LEN_08BIT, 0x00}, + {0x6805, CRL_REG_LEN_08BIT, 0x06}, + {0x6806, CRL_REG_LEN_08BIT, 0x00}, + {0x5120, CRL_REG_LEN_08BIT, 0x00}, + {0x3510, CRL_REG_LEN_08BIT, 0x00}, + {0x3504, CRL_REG_LEN_08BIT, 0x00}, + {0x6800, CRL_REG_LEN_08BIT, 0x00}, + {0x6f0d, CRL_REG_LEN_08BIT, 0x0f}, + {0x5000, CRL_REG_LEN_08BIT, 0xff}, + {0x5001, CRL_REG_LEN_08BIT, 0xbf}, + {0x5002, CRL_REG_LEN_08BIT, 0x7e}, + {0x5003, CRL_REG_LEN_08BIT, 0x0c}, + {0x503d, CRL_REG_LEN_08BIT, 0x00}, + {0xc450, CRL_REG_LEN_08BIT, 0x01}, + {0xc452, CRL_REG_LEN_08BIT, 0x04}, + {0xc453, CRL_REG_LEN_08BIT, 0x00}, + {0xc454, CRL_REG_LEN_08BIT, 0x01}, + {0xc455, CRL_REG_LEN_08BIT, 0x00}, + {0xc456, CRL_REG_LEN_08BIT, 0x00}, + {0xc457, CRL_REG_LEN_08BIT, 0x00}, + {0xc458, CRL_REG_LEN_08BIT, 0x00}, + {0xc459, CRL_REG_LEN_08BIT, 0x00}, + {0xc45b, CRL_REG_LEN_08BIT, 0x00}, + {0xc45c, CRL_REG_LEN_08BIT, 0x00}, + {0xc45d, CRL_REG_LEN_08BIT, 0x00}, + {0xc45e, CRL_REG_LEN_08BIT, 0x02}, + {0xc45f, CRL_REG_LEN_08BIT, 0x01}, + {0xc460, CRL_REG_LEN_08BIT, 0x01}, + {0xc461, CRL_REG_LEN_08BIT, 0x01}, + {0xc462, CRL_REG_LEN_08BIT, 0x01}, + {0xc464, CRL_REG_LEN_08BIT, 0x88}, + {0xc465, CRL_REG_LEN_08BIT, 0x00}, + {0xc466, CRL_REG_LEN_08BIT, 0x8a}, + {0xc467, CRL_REG_LEN_08BIT, 0x00}, + {0xc468, CRL_REG_LEN_08BIT, 0x86}, + {0xc469, CRL_REG_LEN_08BIT, 0x00}, + {0xc46a, CRL_REG_LEN_08BIT, 0x40}, + {0xc46b, CRL_REG_LEN_08BIT, 0x50}, + {0xc46c, CRL_REG_LEN_08BIT, 0x30}, + {0xc46d, CRL_REG_LEN_08BIT, 0x28}, + {0xc46e, CRL_REG_LEN_08BIT, 0x60}, + {0xc46f, CRL_REG_LEN_08BIT, 0x40}, + {0xc47c, CRL_REG_LEN_08BIT, 0x01}, + {0xc47d, CRL_REG_LEN_08BIT, 0x38}, + {0xc47e, CRL_REG_LEN_08BIT, 0x00}, + {0xc47f, CRL_REG_LEN_08BIT, 0x00}, + {0xc480, CRL_REG_LEN_08BIT, 0x00}, + {0xc481, CRL_REG_LEN_08BIT, 0xff}, + {0xc482, CRL_REG_LEN_08BIT, 0x00}, + {0xc483, CRL_REG_LEN_08BIT, 0x40}, + {0xc484, CRL_REG_LEN_08BIT, 0x00}, + {0xc485, CRL_REG_LEN_08BIT, 0x18}, + {0xc486, CRL_REG_LEN_08BIT, 0x00}, + {0xc487, CRL_REG_LEN_08BIT, 0x18}, + {0xc488, CRL_REG_LEN_08BIT, 0x20}, + {0xc489, CRL_REG_LEN_08BIT, 0x00}, + {0xc48a, CRL_REG_LEN_08BIT, 0x20}, + {0xc48b, CRL_REG_LEN_08BIT, 0x00}, + {0xc48c, CRL_REG_LEN_08BIT, 0x00}, + {0xc48d, CRL_REG_LEN_08BIT, 0x04}, + {0xc48e, CRL_REG_LEN_08BIT, 0x00}, + {0xc48f, CRL_REG_LEN_08BIT, 0x04}, + {0xc490, CRL_REG_LEN_08BIT, 0x07}, + {0xc492, CRL_REG_LEN_08BIT, 0x20}, + {0xc493, CRL_REG_LEN_08BIT, 0x08}, + {0xc498, CRL_REG_LEN_08BIT, 0x02}, + {0xc499, CRL_REG_LEN_08BIT, 0x00}, + {0xc49a, CRL_REG_LEN_08BIT, 0x02}, + {0xc49b, CRL_REG_LEN_08BIT, 0x00}, + {0xc49c, CRL_REG_LEN_08BIT, 0x02}, + {0xc49d, CRL_REG_LEN_08BIT, 0x00}, + {0xc49e, CRL_REG_LEN_08BIT, 0x02}, + {0xc49f, CRL_REG_LEN_08BIT, 0x60}, + {0xc4a0, CRL_REG_LEN_08BIT, 0x03}, + {0xc4a1, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a2, CRL_REG_LEN_08BIT, 0x04}, + {0xc4a3, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a4, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a5, CRL_REG_LEN_08BIT, 0x10}, + {0xc4a6, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a7, CRL_REG_LEN_08BIT, 0x40}, + {0xc4a8, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a9, CRL_REG_LEN_08BIT, 0x80}, + {0xc4aa, CRL_REG_LEN_08BIT, 0x0d}, + {0xc4ab, CRL_REG_LEN_08BIT, 0x00}, + {0xc4ac, CRL_REG_LEN_08BIT, 0x03}, + {0xc4ad, CRL_REG_LEN_08BIT, 0xf0}, + {0xc4b4, CRL_REG_LEN_08BIT, 0x01}, + {0xc4b5, CRL_REG_LEN_08BIT, 0x01}, + {0xc4b6, CRL_REG_LEN_08BIT, 0x00}, + {0xc4b7, CRL_REG_LEN_08BIT, 0x01}, + {0xc4b8, CRL_REG_LEN_08BIT, 0x00}, + {0xc4b9, CRL_REG_LEN_08BIT, 0x01}, + {0xc4ba, CRL_REG_LEN_08BIT, 0x01}, + {0xc4bb, CRL_REG_LEN_08BIT, 0x00}, + {0xc4bc, CRL_REG_LEN_08BIT, 0x01}, + {0xc4bd, CRL_REG_LEN_08BIT, 0x60}, + {0xc4be, CRL_REG_LEN_08BIT, 0x02}, + {0xc4bf, CRL_REG_LEN_08BIT, 0x33}, + {0xc4c8, CRL_REG_LEN_08BIT, 0x03}, + {0xc4c9, CRL_REG_LEN_08BIT, 0xd0}, + {0xc4ca, CRL_REG_LEN_08BIT, 0x0e}, + {0xc4cb, CRL_REG_LEN_08BIT, 0x00}, + {0xc4cc, CRL_REG_LEN_08BIT, 0x04}, + {0xc4cd, CRL_REG_LEN_08BIT, 0xd8}, + {0xc4ce, CRL_REG_LEN_08BIT, 0x04}, + {0xc4cf, CRL_REG_LEN_08BIT, 0xd8}, + {0xc4d0, CRL_REG_LEN_08BIT, 0x04}, + {0xc4d1, CRL_REG_LEN_08BIT, 0x80}, + {0xc4e0, CRL_REG_LEN_08BIT, 0x04}, + {0xc4e1, CRL_REG_LEN_08BIT, 0x02}, + {0xc4e2, CRL_REG_LEN_08BIT, 0x01}, + {0xc4e4, CRL_REG_LEN_08BIT, 0x10}, + {0xc4e5, CRL_REG_LEN_08BIT, 0x20}, + {0xc4e6, CRL_REG_LEN_08BIT, 0x30}, + {0xc4e7, CRL_REG_LEN_08BIT, 0x40}, + {0xc4e8, CRL_REG_LEN_08BIT, 0x50}, + {0xc4e9, CRL_REG_LEN_08BIT, 0x60}, + {0xc4ea, CRL_REG_LEN_08BIT, 0x70}, + {0xc4eb, CRL_REG_LEN_08BIT, 0x80}, + {0xc4ec, CRL_REG_LEN_08BIT, 0x90}, + {0xc4ed, CRL_REG_LEN_08BIT, 0xa0}, + {0xc4ee, CRL_REG_LEN_08BIT, 0xb0}, + {0xc4ef, CRL_REG_LEN_08BIT, 0xc0}, + {0xc4f0, CRL_REG_LEN_08BIT, 0xd0}, + {0xc4f1, CRL_REG_LEN_08BIT, 0xe0}, + {0xc4f2, CRL_REG_LEN_08BIT, 0xf0}, + {0xc4f3, CRL_REG_LEN_08BIT, 0x80}, + {0xc4f4, CRL_REG_LEN_08BIT, 0x00}, + {0xc4f5, CRL_REG_LEN_08BIT, 0x20}, + {0xc4f6, CRL_REG_LEN_08BIT, 0x02}, + {0xc4f7, CRL_REG_LEN_08BIT, 0x00}, + {0xc4f8, CRL_REG_LEN_08BIT, 0x04}, + {0xc4f9, CRL_REG_LEN_08BIT, 0x0b}, + {0xc4fa, CRL_REG_LEN_08BIT, 0x00}, + {0xc4fb, CRL_REG_LEN_08BIT, 0x00}, + {0xc4fc, CRL_REG_LEN_08BIT, 0x01}, + {0xc4fd, CRL_REG_LEN_08BIT, 0x00}, + {0xc4fe, CRL_REG_LEN_08BIT, 0x04}, + {0xc4ff, CRL_REG_LEN_08BIT, 0x02}, + {0xc500, CRL_REG_LEN_08BIT, 0x48}, + {0xc501, CRL_REG_LEN_08BIT, 0x74}, + {0xc502, CRL_REG_LEN_08BIT, 0x58}, + {0xc503, CRL_REG_LEN_08BIT, 0x80}, + {0xc504, CRL_REG_LEN_08BIT, 0x05}, + {0xc505, CRL_REG_LEN_08BIT, 0x80}, + {0xc506, CRL_REG_LEN_08BIT, 0x03}, + {0xc507, CRL_REG_LEN_08BIT, 0x80}, + {0xc508, CRL_REG_LEN_08BIT, 0x01}, + {0xc509, CRL_REG_LEN_08BIT, 0xc0}, + {0xc50a, CRL_REG_LEN_08BIT, 0x01}, + {0xc50b, CRL_REG_LEN_08BIT, 0xa0}, + {0xc50c, CRL_REG_LEN_08BIT, 0x01}, + {0xc50d, CRL_REG_LEN_08BIT, 0x2c}, + {0xc50e, CRL_REG_LEN_08BIT, 0x01}, + {0xc50f, CRL_REG_LEN_08BIT, 0x0a}, + {0xc510, CRL_REG_LEN_08BIT, 0x00}, + {0xc511, CRL_REG_LEN_08BIT, 0x00}, + {0xc512, CRL_REG_LEN_08BIT, 0x4d}, + {0xc513, CRL_REG_LEN_08BIT, 0x84}, + {0xc514, CRL_REG_LEN_08BIT, 0x04}, + {0xc515, CRL_REG_LEN_08BIT, 0x00}, + {0xc518, CRL_REG_LEN_08BIT, 0x03}, + {0xc519, CRL_REG_LEN_08BIT, 0x48}, + {0xc51a, CRL_REG_LEN_08BIT, 0x07}, + {0xc51b, CRL_REG_LEN_08BIT, 0x70}, + {0xc2e0, CRL_REG_LEN_08BIT, 0x00}, + {0xc2e1, CRL_REG_LEN_08BIT, 0x51}, + {0xc2e2, CRL_REG_LEN_08BIT, 0x00}, + {0xc2e3, CRL_REG_LEN_08BIT, 0xd6}, + {0xc2e4, CRL_REG_LEN_08BIT, 0x01}, + {0xc2e5, CRL_REG_LEN_08BIT, 0x5e}, + {0xc2e9, CRL_REG_LEN_08BIT, 0x01}, + {0xc2ea, CRL_REG_LEN_08BIT, 0x7a}, + {0xc2eb, CRL_REG_LEN_08BIT, 0x90}, + {0xc2ed, CRL_REG_LEN_08BIT, 0x00}, + {0xc2ee, CRL_REG_LEN_08BIT, 0x7a}, + {0xc2ef, CRL_REG_LEN_08BIT, 0x64}, + {0xc308, CRL_REG_LEN_08BIT, 0x00}, + {0xc309, CRL_REG_LEN_08BIT, 0x00}, + {0xc30a, CRL_REG_LEN_08BIT, 0x00}, + {0xc30c, CRL_REG_LEN_08BIT, 0x00}, + {0xc30d, CRL_REG_LEN_08BIT, 0x01}, + {0xc30e, CRL_REG_LEN_08BIT, 0x00}, + {0xc30f, CRL_REG_LEN_08BIT, 0x00}, + {0xc310, CRL_REG_LEN_08BIT, 0x01}, + {0xc311, CRL_REG_LEN_08BIT, 0x60}, + {0xc312, CRL_REG_LEN_08BIT, 0xff}, + {0xc313, CRL_REG_LEN_08BIT, 0x08}, + {0xc314, CRL_REG_LEN_08BIT, 0x01}, + {0xc315, CRL_REG_LEN_08BIT, 0x7f}, + {0xc316, CRL_REG_LEN_08BIT, 0xff}, + {0xc317, CRL_REG_LEN_08BIT, 0x0b}, + {0xc318, CRL_REG_LEN_08BIT, 0x00}, + {0xc319, CRL_REG_LEN_08BIT, 0x0c}, + {0xc31a, CRL_REG_LEN_08BIT, 0x00}, + {0xc31b, CRL_REG_LEN_08BIT, 0xe0}, + {0xc31c, CRL_REG_LEN_08BIT, 0x00}, + {0xc31d, CRL_REG_LEN_08BIT, 0x14}, + {0xc31e, CRL_REG_LEN_08BIT, 0x00}, + {0xc31f, CRL_REG_LEN_08BIT, 0xc5}, + {0xc320, CRL_REG_LEN_08BIT, 0xff}, + {0xc321, CRL_REG_LEN_08BIT, 0x4b}, + {0xc322, CRL_REG_LEN_08BIT, 0xff}, + {0xc323, CRL_REG_LEN_08BIT, 0xf0}, + {0xc324, CRL_REG_LEN_08BIT, 0xff}, + {0xc325, CRL_REG_LEN_08BIT, 0xe8}, + {0xc326, CRL_REG_LEN_08BIT, 0x00}, + {0xc327, CRL_REG_LEN_08BIT, 0x46}, + {0xc328, CRL_REG_LEN_08BIT, 0xff}, + {0xc329, CRL_REG_LEN_08BIT, 0xd2}, + {0xc32a, CRL_REG_LEN_08BIT, 0xff}, + {0xc32b, CRL_REG_LEN_08BIT, 0xe4}, + {0xc32c, CRL_REG_LEN_08BIT, 0xff}, + {0xc32d, CRL_REG_LEN_08BIT, 0xbb}, + {0xc32e, CRL_REG_LEN_08BIT, 0x00}, + {0xc32f, CRL_REG_LEN_08BIT, 0x61}, + {0xc330, CRL_REG_LEN_08BIT, 0xff}, + {0xc331, CRL_REG_LEN_08BIT, 0xf9}, + {0xc332, CRL_REG_LEN_08BIT, 0x00}, + {0xc333, CRL_REG_LEN_08BIT, 0xd9}, + {0xc334, CRL_REG_LEN_08BIT, 0x00}, + {0xc335, CRL_REG_LEN_08BIT, 0x2e}, + {0xc336, CRL_REG_LEN_08BIT, 0x00}, + {0xc337, CRL_REG_LEN_08BIT, 0xb1}, + {0xc338, CRL_REG_LEN_08BIT, 0xff}, + {0xc339, CRL_REG_LEN_08BIT, 0x64}, + {0xc33a, CRL_REG_LEN_08BIT, 0xff}, + {0xc33b, CRL_REG_LEN_08BIT, 0xeb}, + {0xc33c, CRL_REG_LEN_08BIT, 0xff}, + {0xc33d, CRL_REG_LEN_08BIT, 0xe8}, + {0xc33e, CRL_REG_LEN_08BIT, 0x00}, + {0xc33f, CRL_REG_LEN_08BIT, 0x48}, + {0xc340, CRL_REG_LEN_08BIT, 0xff}, + {0xc341, CRL_REG_LEN_08BIT, 0xd0}, + {0xc342, CRL_REG_LEN_08BIT, 0xff}, + {0xc343, CRL_REG_LEN_08BIT, 0xed}, + {0xc344, CRL_REG_LEN_08BIT, 0xff}, + {0xc345, CRL_REG_LEN_08BIT, 0xad}, + {0xc346, CRL_REG_LEN_08BIT, 0x00}, + {0xc347, CRL_REG_LEN_08BIT, 0x66}, + {0xc348, CRL_REG_LEN_08BIT, 0x01}, + {0xc349, CRL_REG_LEN_08BIT, 0x00}, + {0x6700, CRL_REG_LEN_08BIT, 0x04}, + {0x6701, CRL_REG_LEN_08BIT, 0x7b}, + {0x6702, CRL_REG_LEN_08BIT, 0xfd}, + {0x6703, CRL_REG_LEN_08BIT, 0xf9}, + {0x6704, CRL_REG_LEN_08BIT, 0x3d}, + {0x6705, CRL_REG_LEN_08BIT, 0x71}, + {0x6706, CRL_REG_LEN_08BIT, 0x78}, + {0x6708, CRL_REG_LEN_08BIT, 0x05}, + {0x6f06, CRL_REG_LEN_08BIT, 0x6f}, + {0x6f07, CRL_REG_LEN_08BIT, 0x00}, + {0x6f0a, CRL_REG_LEN_08BIT, 0x6f}, + {0x6f0b, CRL_REG_LEN_08BIT, 0x00}, + {0x6f00, CRL_REG_LEN_08BIT, 0x03}, + {0xc34c, CRL_REG_LEN_08BIT, 0x01}, + {0xc34d, CRL_REG_LEN_08BIT, 0x00}, + {0xc34e, CRL_REG_LEN_08BIT, 0x46}, + {0xc34f, CRL_REG_LEN_08BIT, 0x55}, + {0xc350, CRL_REG_LEN_08BIT, 0x00}, + {0xc351, CRL_REG_LEN_08BIT, 0x40}, + {0xc352, CRL_REG_LEN_08BIT, 0x00}, + {0xc353, CRL_REG_LEN_08BIT, 0xff}, + {0xc354, CRL_REG_LEN_08BIT, 0x04}, + {0xc355, CRL_REG_LEN_08BIT, 0x08}, + {0xc356, CRL_REG_LEN_08BIT, 0x01}, + {0xc357, CRL_REG_LEN_08BIT, 0xef}, + {0xc358, CRL_REG_LEN_08BIT, 0x30}, + {0xc359, CRL_REG_LEN_08BIT, 0x01}, + {0xc35a, CRL_REG_LEN_08BIT, 0x64}, + {0xc35b, CRL_REG_LEN_08BIT, 0x46}, + {0xc35c, CRL_REG_LEN_08BIT, 0x00}, + {0x3621, CRL_REG_LEN_08BIT, 0x73}, + {0x3702, CRL_REG_LEN_08BIT, 0x20}, + {0x3703, CRL_REG_LEN_08BIT, 0x48}, + {0x3704, CRL_REG_LEN_08BIT, 0x32}, + {0x3800, CRL_REG_LEN_08BIT, 0x00}, + {0x3801, CRL_REG_LEN_08BIT, 0x00}, + {0x3802, CRL_REG_LEN_08BIT, 0x00}, + {0x3803, CRL_REG_LEN_08BIT, 0xA4}, + {0x3804, CRL_REG_LEN_08BIT, 0x00}, + {0x3805, CRL_REG_LEN_08BIT, 0xFF}, + {0x3806, CRL_REG_LEN_08BIT, 0x02}, + {0x3807, CRL_REG_LEN_08BIT, 0x89}, + {0x3808, CRL_REG_LEN_08BIT, 0x02}, + {0x3809, CRL_REG_LEN_08BIT, 0x80}, + {0x380a, CRL_REG_LEN_08BIT, 0x01}, + {0x380b, CRL_REG_LEN_08BIT, 0xE0}, + {0x380c, CRL_REG_LEN_08BIT, 0x04}, + {0x380d, CRL_REG_LEN_08BIT, 0xAC}, + {0x6e42, CRL_REG_LEN_08BIT, 0x05}, + {0x6e43, CRL_REG_LEN_08BIT, 0x3A}, + {0x3810, CRL_REG_LEN_08BIT, 0x00}, + {0x3811, CRL_REG_LEN_08BIT, 0x08}, + {0x3812, CRL_REG_LEN_08BIT, 0x00}, + {0x3813, CRL_REG_LEN_08BIT, 0x02}, + {0x381c, CRL_REG_LEN_08BIT, 0x00}, + {0x381e, CRL_REG_LEN_08BIT, 0x00}, + {0x381f, CRL_REG_LEN_08BIT, 0x0C}, + {0x4001, CRL_REG_LEN_08BIT, 0x06}, + {0x4004, CRL_REG_LEN_08BIT, 0x04}, + {0x4050, CRL_REG_LEN_08BIT, 0x22}, + {0x4051, CRL_REG_LEN_08BIT, 0x24}, + {0x4605, CRL_REG_LEN_08BIT, 0x08}, + {0x4606, CRL_REG_LEN_08BIT, 0x09}, + {0x4607, CRL_REG_LEN_08BIT, 0x58}, + {0xc488, CRL_REG_LEN_08BIT, 0x53}, + {0xc489, CRL_REG_LEN_08BIT, 0x20}, + {0xc48a, CRL_REG_LEN_08BIT, 0x53}, + {0xc48b, CRL_REG_LEN_08BIT, 0x20}, + {0xc4cc, CRL_REG_LEN_08BIT, 0x04}, + {0xc4cd, CRL_REG_LEN_08BIT, 0xD8}, + {0xc4ce, CRL_REG_LEN_08BIT, 0x04}, + {0xc4cf, CRL_REG_LEN_08BIT, 0xD8}, + {0xc510, CRL_REG_LEN_08BIT, 0x00}, + {0xc511, CRL_REG_LEN_08BIT, 0x00}, + {0xc512, CRL_REG_LEN_08BIT, 0x4D}, + {0xc513, CRL_REG_LEN_08BIT, 0x84}, + {0x5005, CRL_REG_LEN_08BIT, 0x08}, + {0x3007, CRL_REG_LEN_08BIT, 0x01}, + {0xc518, CRL_REG_LEN_08BIT, 0x05}, + {0xc519, CRL_REG_LEN_08BIT, 0x3A}, + {0xc51a, CRL_REG_LEN_08BIT, 0x04}, + {0xc51b, CRL_REG_LEN_08BIT, 0xAC}, + {0x5608, CRL_REG_LEN_08BIT, 0x15}, + {0x3815, CRL_REG_LEN_08BIT, 0x8C}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x302e, CRL_REG_LEN_08BIT, 0x00}, + {0x301b, CRL_REG_LEN_08BIT, 0xf0}, + {0x301c, CRL_REG_LEN_08BIT, 0xf0}, + {0x301a, CRL_REG_LEN_08BIT, 0xf0}, + {0xceb0, CRL_REG_LEN_08BIT, 0x00}, + {0xceb1, CRL_REG_LEN_08BIT, 0x00}, + {0xceb2, CRL_REG_LEN_08BIT, 0x00}, + {0xceb3, CRL_REG_LEN_08BIT, 0x00}, + {0xceb4, CRL_REG_LEN_08BIT, 0x00}, + {0xceb5, CRL_REG_LEN_08BIT, 0x00}, + {0xceb6, CRL_REG_LEN_08BIT, 0x00}, + {0xceb7, CRL_REG_LEN_08BIT, 0x00}, + {0xc4bc, CRL_REG_LEN_08BIT, 0x01}, + {0xc4bd, CRL_REG_LEN_08BIT, 0x60}, +}; + +static struct crl_dynamic_register_access ov10635_h_flip_regs[] = { + { + .address = 0x381d, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = 0, + .ops = 0, + .mask = 0x3, + } +}; + +static struct crl_dynamic_register_access ov10635_v_flip_regs[] = { + { + .address = 0x381c, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = 0, + .ops = 0, + .mask = 0xc0, + } +}; + +/* Needed for acpi support for runtime detection */ +static struct crl_sensor_detect_config ov10635_sensor_detect_regset[] = { + { + .reg = { 0x300A, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 8, + }, + { + .reg = { 0x300B, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 8, + } +}; + +static struct crl_pll_configuration ov10635_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 16, + .pixel_rate_csi = 529000000, + .pixel_rate_pa = 529000000, /* pixel_rate = MIPICLK*2 *4/12 */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 10, + .pixel_rate_csi = 529000000, + .pixel_rate_pa = 529000000, /* pixel_rate = MIPICLK*2 *4/12 */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 20, + .pixel_rate_csi = 529000000, + .pixel_rate_pa = 529000000, /* pixel_rate = MIPICLK*2 *4/12 */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + } +}; + +static struct crl_subdev_rect_rep ov10635_1280_800_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 800, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 800, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 800, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 800, + }, +}; + +static struct crl_subdev_rect_rep ov10635_1280_720_rects_BT656[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 720, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 720, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, +}; + +static struct crl_subdev_rect_rep ov10635_640_480_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 800, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 800, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 800, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 640, + .out_rect.height = 480, + }, +}; + +static struct crl_register_write_rep ov10635_powerup_regs[] = { + {OV10635_REG_RESET, CRL_REG_LEN_08BIT, 0x01}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, +}; + +static struct crl_register_write_rep ov10635_poweroff_regs[] = { + {OV10635_REG_RESET, CRL_REG_LEN_08BIT, 0x01}, +}; + +static struct crl_power_seq_entity ov10635_power_items[] = { + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 0x33, + .undo_val = 0x22, + }, +}; + +static struct crl_mode_rep ov10635_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(ov10635_1280_800_rects), + .sd_rects = ov10635_1280_800_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 800, + .min_llp = 2250, + .min_fll = 1320, + .mode_regs_items = ARRAY_SIZE(ov10635_1280_800_YUV_HDR), + .mode_regs = ov10635_1280_800_YUV_HDR, + }, + { + .sd_rects_items = ARRAY_SIZE(ov10635_1280_720_rects_BT656), + .sd_rects = ov10635_1280_720_rects_BT656, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 720, + .min_llp = 2250, + .min_fll = 1320, + .mode_regs_items = ARRAY_SIZE(ov10635_1280_720_YUV_HDR_BT656), + .mode_regs = ov10635_1280_720_YUV_HDR_BT656, + }, + { + .sd_rects_items = ARRAY_SIZE(ov10635_640_480_rects), + .sd_rects = ov10635_640_480_rects, + .binn_hor = 2, + .binn_vert = 1, + .scale_m = 1, + .width = 640, + .height = 480, + .min_llp = 2250, + .min_fll = 1320, + .mode_regs_items = ARRAY_SIZE(ov10635_640_480_YUV_HDR), + .mode_regs = ov10635_640_480_YUV_HDR, + }, +}; + +static struct crl_sensor_subdev_config ov10635_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "ov10635 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "ov10635 pixel array", + } +}; + +static struct crl_sensor_limits ov10635_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1280, + .y_addr_max = 800, + .min_frame_length_lines = 240, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 320, + .max_line_length_pixels = 32752, +}; + +static struct crl_flip_data ov10635_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + } +}; + +static struct crl_register_write_rep ov10635_yuyv_regs[] = { + {0x4300, CRL_REG_LEN_08BIT, 0x38}, +}; + +static struct crl_register_write_rep ov10635_uyvy_regs[] = { + {0x4300, CRL_REG_LEN_08BIT, 0x3a}, +}; + +static struct crl_csi_data_fmt ov10635_crl_csi_data_fmt[] = { + + { + .code = ICI_FORMAT_YUYV, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + .bits_per_pixel = 16, + .regs_items = ARRAY_SIZE(ov10635_yuyv_regs), + .regs = ov10635_yuyv_regs, + }, + + { + .code = ICI_FORMAT_UYVY, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + .bits_per_pixel = 16, + .regs_items = ARRAY_SIZE(ov10635_uyvy_regs), + .regs = ov10635_uyvy_regs, + }, +}; + +static struct crl_ctrl_data ov10635_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = ICI_EXT_SD_PARAM_ID_LINK_FREQ, + .name = "CTRL_ID_LINK_FREQ", + .type = CRL_CTRL_TYPE_MENU_INT, + .data.int_menu.def = 0, + .data.int_menu.max = 0, + .data.int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_PIXEL_RATE, + .name = "CTRL_ID_PIXEL_RATE_PA", + .type = CRL_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_PIXEL_RATE, + .name = "CTRL_ID_PIXEL_RATE_CSI", + .type = CRL_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_HFLIP, + .name = "CTRL_ID_HFLIP", + .type = CRL_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + // .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10635_h_flip_regs), + .regs = ov10635_h_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_VFLIP, + .name = "CTRL_ID_VFLIP", + .type = CRL_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + // .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10635_v_flip_regs), + .regs = ov10635_v_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +struct crl_sensor_configuration ov10635_crl_configuration = { + + .powerup_regs_items = ARRAY_SIZE(ov10635_powerup_regs), + .powerup_regs = ov10635_powerup_regs, + + .poweroff_regs_items = ARRAY_SIZE(ov10635_poweroff_regs), + .poweroff_regs = ov10635_poweroff_regs, + + .power_items = ARRAY_SIZE(ov10635_power_items), + .power_entities = ov10635_power_items, + + .id_reg_items = ARRAY_SIZE(ov10635_sensor_detect_regset), + .id_regs = ov10635_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(ov10635_sensor_subdevs), + .subdevs = ov10635_sensor_subdevs, + + .pll_config_items = ARRAY_SIZE(ov10635_pll_configurations), + .pll_configs = ov10635_pll_configurations, + + .sensor_limits = &ov10635_sensor_limits, + + .modes_items = ARRAY_SIZE(ov10635_modes), + .modes = ov10635_modes, + + .streamon_regs_items = 0, + .streamon_regs = 0, + + .streamoff_regs_items = 0, + .streamoff_regs = 0, + + .ctrl_items = ARRAY_SIZE(ov10635_ctrls), + .ctrl_bank = ov10635_ctrls, + + .csi_fmts_items = ARRAY_SIZE(ov10635_crl_csi_data_fmt), + .csi_fmts = ov10635_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(ov10635_flip_configurations), + .flip_data = ov10635_flip_configurations, +}; + +#endif /* __CRLMODULE_OV10635_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule-lite/crlmodule-core.c b/drivers/media/i2c/crlmodule-lite/crlmodule-core.c new file mode 100644 index 0000000000000..6120656258530 --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crlmodule-core.c @@ -0,0 +1,2740 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "crlmodule.h" +#include "crlmodule-nvm.h" +#include "crlmodule-regs.h" +#include "crlmodule-msrlist.h" + +static int init_ext_sd(struct i2c_client *client, + struct crl_subdev *ssd, int idx); +static void crlmodule_update_current_mode(struct crl_sensor *sensor); + +static int __crlmodule_get_variable_ref(struct crl_sensor *sensor, + enum crl_member_data_reference_ids ref, + u32 *val) +{ + switch (ref) { + case CRL_VAR_REF_OUTPUT_WIDTH: + *val = sensor->src->crop[CRL_PAD_SRC].width; + break; + case CRL_VAR_REF_OUTPUT_HEIGHT: + *val = sensor->src->crop[CRL_PAD_SRC].height; + break; + case CRL_VAR_REF_BITSPERPIXEL: + *val = sensor->sensor_ds->csi_fmts[ + sensor->fmt_index].bits_per_pixel; + break; + default: + return -EINVAL; + }; + + return 0; +} + +/* + * Get the data format index from the configuration definition data + */ +static int __crlmodule_get_data_fmt_index(struct crl_sensor *sensor, + u32 code) +{ + unsigned int i; + + for (i = 0; i < sensor->sensor_ds->csi_fmts_items; i++) { + if (sensor->sensor_ds->csi_fmts[i].code == code) + return i; + } + + return -EINVAL; +} + +/* + * Find the index of the ctrl pointer from the array of ctrls + * maintained by the CRL module based on the ctrl id. + */ +static int __crlmodule_get_crl_ctrl_index(struct crl_sensor *sensor, + u32 id, unsigned int *index) +{ + unsigned int i; + + for (i = 0; i < sensor->sensor_ds->ctrl_items; i++) + if (sensor->ctrl_bank[i].ctrl_id == id) + break; + + if (i >= sensor->sensor_ds->ctrl_items) + return -EINVAL; + + *index = i; + return 0; +} + +/* + * Finds the value of a specific ctrl based on the ctrl-id + */ +static int __crlmodule_get_param_value(struct crl_sensor *sensor, + u32 id, u32 *val) +{ + struct i2c_client *client = sensor->src->sd.client; + unsigned int i; + int ret; + struct ici_ext_sd_param* param; + + ret = __crlmodule_get_crl_ctrl_index(sensor, id, &i); + if (ret) + return ret; + + /* If no corresponding ctrl created, return */ + if (sensor->ctrl_bank[i].param.id != id) { + dev_dbg(&client->dev, + "%s ctrl_id: 0x%x desc: %s not ready\n", __func__, id, + sensor->ctrl_bank[i].name); + return -ENODATA; + } + + param = &sensor->ctrl_bank[i].param; + switch (sensor->ctrl_bank[i].type) { + case CRL_CTRL_TYPE_MENU_INT: + if (param->val <= sensor->ctrl_bank[i].data.int_menu.max) + *val = sensor->ctrl_bank[i].data.int_menu.menu[param->val]; + else + *val = 0; + break; + case CRL_CTRL_TYPE_INTEGER: + default: + *val = param->val; + break; + } + + + dev_dbg(&client->dev, "%s ctrl_id: 0x%x desc: %s val: %d\n", + __func__, id, + sensor->ctrl_bank[i].name, *val); + return 0; +} + +/* + * Finds the v4l2 based on the control id + */ +static struct crl_ctrl_data *__crlmodule_get_ctrl( + struct crl_sensor *sensor, + u32 id) +{ + unsigned int i; + + if (__crlmodule_get_crl_ctrl_index(sensor, id, &i)) + return NULL; + + return &sensor->ctrl_bank[i]; +} + +/* + * Grab / Release controls based on the ctrl update context + */ +static void __crlmodule_enable_param(struct crl_sensor *sensor, + enum crl_ctrl_update_context ctxt, + bool enable) +{ + struct crl_ctrl_data *crl_ctrl; + unsigned int i; + + for (i = 0; i < sensor->sensor_ds->ctrl_items; i++) { + crl_ctrl = &sensor->ctrl_bank[i]; + + if (crl_ctrl->context == ctxt) + crl_ctrl->enabled = enable; + } +} + +/* + * Checks if the ctrl sepecific data is satisfied in the mode and PLL + * selection logic. + */ +static bool __crlmodule_compare_ctrl_specific_data( + struct crl_sensor *sensor, + unsigned int items, + struct crl_ctrl_data_pair *ctrl_val) +{ + struct i2c_client *client = sensor->src->sd.client; + unsigned int i; + u32 val; + int ret; + + /* Go through all the controls associated with this config */ + for (i = 0; i < items; i++) { + /* Get the value set for the control */ + ret = __crlmodule_get_param_value(sensor, ctrl_val[i].ctrl_id, + &val); + if (ret) { + dev_err(&client->dev, "%s ctrl_id: 0x%x not found\n", + __func__, ctrl_val[i].ctrl_id); + return false; + } + + /* Compare the value from the sensor definition file config */ + if (val != ctrl_val[i].data) { + dev_err(&client->dev, + "%s ctrl_id: 0x%x value not match %d != %d\n", + __func__, ctrl_val[i].ctrl_id, val, + ctrl_val[i].data); + return false; + } + } + + dev_dbg(&client->dev, "%s success\n", __func__); + return true; +} + +/* + * Finds the correct PLL settings index based on the parameters + */ +static int __crlmodule_update_pll_index(struct crl_sensor *sensor, + struct crl_ctrl_data *crl_ctrl) +{ + struct i2c_client *client = sensor->src->sd.client; + const struct crl_pll_configuration *pll_config; + const struct crl_csi_data_fmt *fmts = + &sensor->sensor_ds->csi_fmts[sensor->fmt_index]; + unsigned int i; + u32 link_freq = 0; + + if (!sensor->link_freq || + sensor->link_freq->type != CRL_CTRL_TYPE_MENU_INT) { + dev_err(&client->dev, "%s Invalid link freq ctrl\n", + __func__); + return -EINVAL; + } + + sensor->link_freq->param.val = crl_ctrl->param.val; + if (crl_ctrl->param.val <= + sensor->link_freq->data.int_menu.max) { + link_freq =sensor->link_freq->data.int_menu.menu[ + crl_ctrl->param.val]; + } + + dev_dbg(&client->dev, "%s PLL Items: %d link_freq: %d\n", + __func__, sensor->sensor_ds->pll_config_items, + link_freq); + + for (i = 0; i < sensor->sensor_ds->pll_config_items; i++) { + pll_config = &sensor->sensor_ds->pll_configs[i]; + + if (pll_config->op_sys_clk != link_freq) + continue; + + if (pll_config->input_clk != sensor->platform_data->ext_clk) + continue; + + /* if pll_config->csi_lanes == 0, lanes do not matter */ + if (pll_config->csi_lanes) + if (sensor->platform_data->lanes != pll_config->csi_lanes) + continue; + + /* PLL config must match to bpps*/ + if (fmts->bits_per_pixel != pll_config->bitsperpixel) + continue; + + /* Check if there are any dynamic compare items */ + if (sensor->ext_ctrl_impacts_pll_selection && + !__crlmodule_compare_ctrl_specific_data(sensor, + pll_config->comp_items, + pll_config->ctrl_data)) + continue; + + /* Found PLL index */ + dev_dbg(&client->dev, "%s Found PLL index: %d for freq: %d\n", + __func__, i, link_freq); + + sensor->pll_index = i; + + /* Update the control values for pixelrate_pa and csi */ + sensor->pixel_rate_pa->param.s64val = pll_config->pixel_rate_pa; + sensor->pixel_rate_csi->param.s64val = pll_config->pixel_rate_csi; + return 0; + } + + dev_err(&client->dev, "%s no configuration found for freq: %d\n", + __func__, link_freq); + return -EINVAL; +} + +/* + * Perform the action for the dependency control + */ +static void __crlmodule_dep_ctrl_perform_action( + struct crl_sensor *sensor, + struct crl_dep_ctrl_provision *prov, + u32 *val, u32 *dep_val) +{ + enum crl_dep_ctrl_condition cond; + unsigned int i; + u32 temp; + + if (*val > *dep_val) + cond = CRL_DEP_CTRL_CONDITION_GREATER; + else if (*val < *dep_val) + cond = CRL_DEP_CTRL_CONDITION_LESSER; + else + cond = CRL_DEP_CTRL_CONDITION_EQUAL; + + for (i = 0; i < prov->action_items; i++) { + if (prov->action[i].cond == cond) + break; + } + + /* No handler found-. Return completed */ + if (i >= prov->action_items) + return; + + /* if this is dependency control, switch val and dep val */ + if (prov->action_type == CRL_DEP_CTRL_ACTION_TYPE_DEP_CTRL) { + temp = *val; + *val = *dep_val; + *dep_val = temp; + } + + switch (prov->action[i].action) { + case CRL_DEP_CTRL_CONDITION_ADD: + *val = *dep_val + prov->action[i].action_value; + break; + case CRL_DEP_CTRL_CONDITION_SUBTRACT: + *val = *dep_val - prov->action[i].action_value; + break; + case CRL_DEP_CTRL_CONDITION_MULTIPLY: + *val = *dep_val * prov->action[i].action_value; + break; + case CRL_DEP_CTRL_CONDITION_DIVIDE: + *val = *dep_val / prov->action[i].action_value; + break; + } + + /* if this is dependency control, switch val and dep val back*/ + if (prov->action_type == CRL_DEP_CTRL_ACTION_TYPE_DEP_CTRL) { + temp = *val; + *val = *dep_val; + *dep_val = temp; + } + + return; +} + +/* + * Parse the dynamic entity based on the Operand type + */ +static int __crlmodule_parse_dynamic_entity(struct crl_sensor *sensor, + struct crl_dynamic_entity entity, + u32 *val) +{ + switch (entity.entity_type) { + case CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST: + *val = entity.entity_val; + return 0; + case CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF: + return __crlmodule_get_variable_ref(sensor, + entity.entity_val, val); + case CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL: + return __crlmodule_get_param_value(sensor, + entity.entity_val, val); + case CRL_DYNAMIC_VAL_OPERAND_TYPE_REG_VAL: { + struct crl_register_read_rep reg; + + /* Note: Only 8bit registers are supported. */ + reg.address = entity.entity_val; + reg.len = CRL_REG_LEN_08BIT; + reg.mask = 0xff; + reg.dev_i2c_addr = CRL_I2C_ADDRESS_NO_OVERRIDE; + return crlmodule_read_reg(sensor, reg, val); + } + default: + break; + }; + + return -EINVAL; +} + +static int __crlmodule_calc_dynamic_entity_values( + struct crl_sensor *sensor, + unsigned int ops_items, + struct crl_arithmetic_ops *ops_arr, + unsigned int *val) +{ + struct i2c_client *client = sensor->src->sd.client; + unsigned int i; + + /* perform the bitwise operation on val one by one */ + for (i = 0; i < ops_items; i++) { + struct crl_arithmetic_ops *ops = &ops_arr[i]; + u32 operand; + int ret = __crlmodule_parse_dynamic_entity(sensor, ops->operand, + &operand); + if (ret) { + dev_dbg(&client->dev, + "%s failed to parse dynamic entity: %d %d\n", + __func__, ops->operand.entity_type, + ops->operand.entity_val); + return ret; + } + + switch (ops->op) { + case CRL_BITWISE_AND: + *val &= operand; + break; + case CRL_BITWISE_OR: + *val |= operand; + break; + case CRL_BITWISE_LSHIFT: + *val <<= operand; + break; + case CRL_BITWISE_RSHIFT: + *val >>= operand; + break; + case CRL_BITWISE_XOR: + *val ^= operand; + break; + case CRL_BITWISE_COMPLEMENT: + *val = ~(*val); + break; + case CRL_ADD: + *val += operand; + break; + case CRL_SUBTRACT: + *val = *val > operand ? *val - operand : operand - *val; + break; + case CRL_MULTIPLY: + *val *= operand; + break; + case CRL_DIV: + if(operand==0) { + dev_err(&client->dev, "CRL_DIV error for operand returned is zero."); + return -EINVAL; + } + *val /= operand; + break; + case CRL_ASSIGNMENT: + *val = operand; + break; + default: + return -EINVAL; + } + } + + return 0; +} + +/* + * Dynamic registers' value is not direct but depends on a referrence value. + * This kind of registers are mainly used in crlmodule's ctrl logic. + * + * This is to handle cases like the below examples, where mutliple registers + * need to be modified based on the input value "val" + * R3000 = val & 0xff and R3001 = val >> 8 & 0xff and R3002 = val >> 16 & 0xff + * R4001 = val and R4002 = val or + * R2800 = FLL - val and R2802 = LLP - val + */ +static int __crlmodule_update_dynamic_regs(struct crl_sensor *sensor, + struct crl_ctrl_data *crl_ctrl, + unsigned int val) +{ + unsigned int i; + + for (i = 0; i < crl_ctrl->regs_items; i++) { + struct crl_dynamic_register_access *reg = &crl_ctrl->regs[i]; + /* + * Each register group must start from the initial value, not + * as a continuation of the previous calculations. The sensor + * configurations must take care of this restriction. + */ + u32 val_t = val; + int ret; + + /* Get the value associated with the dynamic entity */ + ret = __crlmodule_calc_dynamic_entity_values(sensor, + reg->ops_items, + reg->ops, &val_t); + if (ret) + return ret; + + /* Now ready to write the value */ + ret = crlmodule_write_reg(sensor, reg->dev_i2c_addr, + reg->address, reg->len, + reg->mask, val_t); + if (ret) + return ret; + } + + return 0; +} + +/* + * Handles the dependency control actions. Dependency control is a control + * which' value depends on the current control. This information is encoded in + * the sensor configuration file. + */ +static int __crlmodule_handle_dependency_ctrl( + struct crl_sensor *sensor, + struct crl_ctrl_data *crl_ctrl, + unsigned int *val, + enum crl_dep_ctrl_action_type type) +{ + struct i2c_client *client = sensor->src->sd.client; + struct crl_ctrl_data *dep_crl_ctrl; + struct crl_dep_ctrl_provision *dep_prov; + unsigned int i, idx; + u32 dep_val; + int ret; + + dev_dbg(&client->dev, "%s ctrl_id: 0x%x dependency controls: %d\n", + __func__, crl_ctrl->ctrl_id, + crl_ctrl->dep_items); + + for (i = 0; i < crl_ctrl->dep_items; i++) { + dep_prov = &crl_ctrl->dep_ctrls[i]; + + /* If not the type, continue */ + if (dep_prov->action_type != type) + continue; + + /* Get the value from the dependency ctrl */ + ret = __crlmodule_get_param_value(sensor, dep_prov->ctrl_id, + &dep_val); + if (ret) { + dev_err(&client->dev, "%s ctrl_id: 0x%x not found\n", + __func__, dep_prov->ctrl_id); + /* TODO! Shoud continue? */ + continue; + } + + /* Perform the action */ + __crlmodule_dep_ctrl_perform_action(sensor, dep_prov, val, + &dep_val); + + /* if this is dependency control, update the register */ + if (dep_prov->action_type == + CRL_DEP_CTRL_ACTION_TYPE_DEP_CTRL) { + ret = __crlmodule_get_crl_ctrl_index(sensor, + dep_prov->ctrl_id, &idx); + if (ret) + continue; + + dep_crl_ctrl = &sensor->ctrl_bank[idx]; + dev_dbg(&client->dev, + "%s crl_ctrl: 0x%p 0x%p\n", __func__, + &sensor->ctrl_bank[idx], + dep_crl_ctrl); + + ret = __crlmodule_update_dynamic_regs(sensor, + dep_crl_ctrl, dep_val); + if (ret) + continue; + } + } + return 0; +} + + +static int crlmodule_get_fmt_index(struct crl_sensor *sensor, + u8 pixel_order, u8 bpp) +{ + struct i2c_client *client = sensor->src->sd.client; + const struct crl_csi_data_fmt *f; + int i; + + /* + * Go through the fmt list and check if this format with matching bpp + * is supported by this module definition file + */ + for (i = 0; i < sensor->sensor_ds->csi_fmts_items; i++) { + f = &sensor->sensor_ds->csi_fmts[i]; + + if (f->pixel_order == pixel_order && f->bits_per_pixel == bpp) + return i; + } + + dev_err(&client->dev, "%s no supported format for order: %d bpp: %d\n", + __func__, pixel_order, bpp); + + return -EINVAL; +} + +static int __crlmodule_update_flip_info(struct crl_sensor *sensor, + struct crl_ctrl_data *crl_ctrl, + struct ici_ext_sd_param *param) +{ + struct i2c_client *client = sensor->src->sd.client; + const struct crl_csi_data_fmt *fmt = + &sensor->sensor_ds->csi_fmts[sensor->fmt_index]; + u8 bpp = fmt->bits_per_pixel; + u8 flip_info = sensor->flip_info; + u8 new_order = 0; + int i, ret; + + dev_dbg(&client->dev, "%s current flip_info: %d curr index: %d\n", + __func__, flip_info, sensor->fmt_index); + + switch (param->id) { + case ICI_EXT_SD_PARAM_ID_HFLIP: + flip_info &= CRL_FLIP_HFLIP_MASK; + flip_info |= param->val > 0 ? CRL_FLIP_HFLIP : 0; + break; + case ICI_EXT_SD_PARAM_ID_VFLIP: + flip_info &= CRL_FLIP_VFLIP_MASK; + flip_info |= param->val > 0 ? CRL_FLIP_VFLIP : 0; + break; + } + + dev_dbg(&client->dev, "%s flip success new flip_info: %d\n", + __func__, flip_info); + + /* First check if the module actually supports any pixelorder changes */ + for (i = 0; i < sensor->sensor_ds->flip_items; i++) { + if (flip_info == sensor->sensor_ds->flip_data[i].flip) { + new_order = sensor->sensor_ds->flip_data[i].pixel_order; + break; + } + } + + if (i >= sensor->sensor_ds->flip_items) { + dev_err(&client->dev, "%s flip not supported %d\n", + __func__, flip_info); + return -EINVAL; + } + + /* + * Flip changes only pixel order. So check if the supported format list + * has any format with new pixel order and current bits per pixel + */ + i = crlmodule_get_fmt_index(sensor, new_order, bpp); + if (i < 0) { + dev_err(&client->dev, "%s no format found order: %d bpp: %d\n", + __func__, new_order, bpp); + return -EINVAL; + } + + ret = __crlmodule_update_dynamic_regs(sensor, crl_ctrl, param->val); + if (ret) { + dev_err(&client->dev, "%s register access failed\n", __func__); + return ret; + } + + /* New format found. Update info */ + sensor->fmt_index = i; + sensor->flip_info = flip_info; + + dev_dbg(&client->dev, "%s flip success flip: %d new fmt index: %d\n", + __func__, flip_info, i); + + return 0; +} +static int __crlmodule_update_framesize(struct crl_sensor *sensor, + struct crl_ctrl_data *crl_ctrl, + struct ici_ext_sd_param *param) +{ + const struct crl_mode_rep *mode = sensor->current_mode; + unsigned int val; + + switch (param->id) { + case ICI_EXT_SD_PARAM_ID_FRAME_LENGTH_LINES: + val = max(param->val, mode->min_fll); + break; + case ICI_EXT_SD_PARAM_ID_LINE_LENGTH_PIXELS: + val = max(param->val, mode->min_llp); + break; + default: + return -EINVAL; + } + + return __crlmodule_update_dynamic_regs(sensor, crl_ctrl, val); +} +static int __crlmodule_update_blanking(struct crl_sensor *sensor, + struct crl_ctrl_data *crl_ctrl, + struct ici_ext_sd_param *param) +{ + unsigned int val; + + switch (param->id) { + case ICI_EXT_SD_PARAM_ID_HBLANK: + val = sensor->pixel_array->crop[CRL_PA_PAD_SRC].width + + param->val; + break; + case ICI_EXT_SD_PARAM_ID_VBLANK: + val = sensor->pixel_array->crop[CRL_PA_PAD_SRC].height + + param->val; + break; + default: + return -EINVAL; + } + + return __crlmodule_update_dynamic_regs(sensor, crl_ctrl, val); +} + +static void __crlmodule_update_selection_impact_flags( + struct crl_sensor *sensor, + struct crl_ctrl_data *crl_ctrl) +{ + if (crl_ctrl->impact & CRL_IMPACTS_PLL_SELECTION) + sensor->ext_ctrl_impacts_pll_selection = true; + + if (crl_ctrl->impact & CRL_IMPACTS_MODE_SELECTION) + sensor->ext_ctrl_impacts_mode_selection = true; +} + +static struct crl_ctrl_data *__crlmodule_find_crlctrl( + struct crl_sensor *sensor, + struct ici_ext_sd_param *param) +{ + struct crl_ctrl_data *crl_ctrl; + unsigned int i; + + for (i = 0; i < sensor->sensor_ds->ctrl_items; i++) { + crl_ctrl = &sensor->ctrl_bank[i]; + if (crl_ctrl->param.sd == param->sd && + crl_ctrl->ctrl_id == param->id) + return crl_ctrl; + } + + return NULL; +} + +static int crlmodule_set_param(struct ici_ext_sd_param *param) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(param->sd); + struct i2c_client *client = sensor->src->sd.client; + struct crl_ctrl_data *crl_ctrl = NULL; + int ret = 0; + + dev_dbg(&client->dev, "%s id:%d val:%d\n", __func__, param->id, + param->val); + + /* + * Need to find the corresponding crlmodule wrapper for this param. + */ + crl_ctrl = __crlmodule_find_crlctrl(sensor, param); + if (!crl_ctrl) { + dev_err(&client->dev, "%s ctrl :0x%x not supported\n", + __func__, param->id); + return -EINVAL; + } + + dev_dbg(&client->dev, "%s id:0x%x name:%s\n", __func__, param->id, + crl_ctrl->name); + + if (!crl_ctrl->enabled || + crl_ctrl->flags & CRL_CTRL_FLAG_READ_ONLY) { + dev_err(&client->dev, "%s Control id:0x%x is not writeable\n", + __func__, param->id); + return -EINVAL; + } + + if (param->type != ICI_EXT_SD_PARAM_TYPE_INT32) { + dev_err(&client->dev, "%s Control id:0x%x only INT32 is supported\n", + __func__, param->id); + return -EINVAL; + } + + crl_ctrl->param.val = param->val; + + /* Then go through the mandatory controls */ + switch (param->id) { + case ICI_EXT_SD_PARAM_ID_LINK_FREQ: + /* Go through the supported list and compare the values */ + ret = __crlmodule_update_pll_index(sensor, crl_ctrl); + goto out; + }; + + /* update the selection impacts flags */ + __crlmodule_update_selection_impact_flags(sensor, crl_ctrl); + + /* + * Dependency control is a control whose value is affected by the value + * for the current control. For example, vblank can be a dependency + * control for exposure. Whenever exposure changes, the sensor can + * automatically adjust the vblank or rely on manual adjustment. In + * case of manual adjustment the sensor configuration file needs to + * specify the dependency control, the condition for an action and + * typs of action. + * + * Now check if there is any dependency controls for this. And if there + * are any we need to split the action to two. First if the current + * control needs to be changed, then do it before updating the register. + * If some other control is affected, then do it after wrriting the + * current values + * + * Now check in the dependency control list, if the action type is + * "self" and update the value accordingly now + */ + __crlmodule_handle_dependency_ctrl(sensor, crl_ctrl, ¶m->val, + CRL_DEP_CTRL_ACTION_TYPE_SELF); + + /* Handle specific controls */ + switch (param->id) { + case ICI_EXT_SD_PARAM_ID_HFLIP: + case ICI_EXT_SD_PARAM_ID_VFLIP: + ret = __crlmodule_update_flip_info(sensor, crl_ctrl, param); + goto out; + + case ICI_EXT_SD_PARAM_ID_VBLANK: + case ICI_EXT_SD_PARAM_ID_HBLANK: + if (sensor->blanking_ctrl_not_use) { + dev_info(&client->dev, "%s Blanking controls are not used \ + in this configuration, setting them has no effect\n", __func__); + /* Disable control*/ + crl_ctrl->enabled = false; + + } else { + ret = __crlmodule_update_blanking(sensor, crl_ctrl, param); + } + goto out; + + case ICI_EXT_SD_PARAM_ID_FRAME_LENGTH_LINES: + case ICI_EXT_SD_PARAM_ID_LINE_LENGTH_PIXELS: + ret = __crlmodule_update_framesize(sensor, crl_ctrl, param); + goto out; + + case ICI_EXT_SD_PARAM_ID_SENSOR_MODE: + sensor->sensor_mode = param->val; + crlmodule_update_current_mode(sensor); + goto out; + } + + ret = __crlmodule_update_dynamic_regs(sensor, crl_ctrl, param->val); + +out: + /* + * Now check in the dependency control list, if the action type is + * "dependency control" and update the value accordingly now + */ + if (!ret && crl_ctrl) + __crlmodule_handle_dependency_ctrl(sensor, crl_ctrl, ¶m->val, + CRL_DEP_CTRL_ACTION_TYPE_DEP_CTRL); + + return ret; +} + +static int crlmodule_get_param(struct ici_ext_sd_param *param) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(param->sd); + struct i2c_client *client = sensor->src->sd.client; + struct crl_ctrl_data *crl_ctrl; + struct crl_dynamic_register_access *reg; + + /* + * Need to find the corresponding crlmodule wrapper for this param. + */ + crl_ctrl = __crlmodule_find_crlctrl(sensor, param); + if (!crl_ctrl) { + dev_err(&client->dev, "%s ctrl :0x%x not supported\n", + __func__, param->id); + return -EINVAL; + } + + dev_dbg(&client->dev, "%s id:0x%x name:%s\n", __func__, param->id, + crl_ctrl->name); + + if (crl_ctrl->flags & CRL_CTRL_FLAG_WRITE_ONLY) { + dev_err(&client->dev, "%s Control id:0x%x is not readable\n", + __func__, param->id); + return -EINVAL; + } + + param->type = ICI_EXT_SD_PARAM_TYPE_INT32; + if (!(crl_ctrl->flags & CRL_CTRL_FLAG_READ_ONLY)) { + param->val = crl_ctrl->param.val; + return 0; + } + + /* + * Found the crl control wrapper. Use the dynamic entity information + * to calculate the value for this control. For get control, there + * could be only one item in the crl_dynamic_register_access. ctrl-> + * regs_items must be 1. Also the crl_dynamic_register_access.address + * and crl_dynamic_register_access.len are not used. + * Instead the values to be found or calculated need to be encoded into + * crl_dynamic_register_access.crl_arithmetic_ops. It has possibility + * to read from registers, existing control values and simple arithmetic + * operations etc. + */ + if (!crl_ctrl->regs || !crl_ctrl->regs_items) { + dev_err(&client->dev, "%s no dynamic entities found\n", + __func__); + return -EINVAL; + } + if (crl_ctrl->regs_items > 1) + dev_warn(&client->dev, + "%s multiple dynamic entities, will skip the rest\n", + __func__); + reg = &crl_ctrl->regs[0]; + + /* Get the value associated with the dynamic entity */ + return __crlmodule_calc_dynamic_entity_values(sensor, reg->ops_items, + reg->ops, ¶m->val); +} + +static int crlmodule_get_menu_item( + struct ici_ext_sd_param *param, u32 idx) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(param->sd); + struct i2c_client *client = sensor->src->sd.client; + struct crl_ctrl_data *crl_ctrl; + + crl_ctrl = __crlmodule_find_crlctrl(sensor, param); + if (!crl_ctrl) { + dev_err(&client->dev, "%s ctrl :0x%x not supported\n", + __func__, param->id); + return -EINVAL; + } + + if (idx > crl_ctrl->max) { + dev_err(&client->dev, "%s Control id:0x%x has invalid index %u\n", + __func__, param->id, idx); + return -EINVAL; + } + switch (crl_ctrl->type) + { + case CRL_CTRL_TYPE_MENU_INT: + param->type = ICI_EXT_SD_PARAM_TYPE_INT64; + param->s64val = crl_ctrl->data.int_menu.menu[idx]; + break; + case CRL_CTRL_TYPE_MENU_ITEMS: + if (!param->custom.size || !param->custom.data) { + dev_err(&client->dev, "%s Control id:0x%x param->custom.data must be preallocated by caller\n", + __func__, param->id); + return -EINVAL; + } + param->type = ICI_EXT_SD_PARAM_TYPE_STR; + strncpy(param->custom.data, + crl_ctrl->data.menu_items.menu[idx], + param->custom.size - 1); + param->custom.data[param->custom.size - 1] = '\0'; + break; + default: + dev_err(&client->dev, "%s Control id:0x%x does not have a menu\n", + __func__, param->id); + return -EINVAL; + } + return 0; +} + +static int __crlmodule_init_link_freq_ctrl_menu( + struct crl_sensor *sensor, + struct crl_ctrl_data *crl_ctrl) +{ + struct i2c_client *client = sensor->src->sd.client; + unsigned int items = 0; + unsigned int i; + + /* Cannot handle if the control type is not integer menu */ + if (crl_ctrl->type != CRL_CTRL_TYPE_MENU_INT) + return 0; + + /* If the menu contents exist, skip filling it dynamically */ + if (crl_ctrl->data.int_menu.menu) + return 0; + + sensor->link_freq_menu = devm_kzalloc(&client->dev, sizeof(s64) * + sensor->sensor_ds->pll_config_items, + GFP_KERNEL); + if (!sensor->link_freq_menu) + return -ENOMEM; + + for (i = 0; i < sensor->sensor_ds->pll_config_items; i++) { + bool dup = false; + unsigned int j; + + /* + * Skip the duplicate entries. We are using the value to match + * not the index + */ + for (j = 0; j < items && !dup; j++) + dup = (sensor->link_freq_menu[j] == + sensor->sensor_ds->pll_configs[i].op_sys_clk); + if (dup) + continue; + + sensor->link_freq_menu[items] = + sensor->sensor_ds->pll_configs[i].op_sys_clk; + items++; + } + + crl_ctrl->data.int_menu.menu = sensor->link_freq_menu; + + /* items will not be 0 as there will be atleast one pll_config_item */ + crl_ctrl->data.int_menu.max = items - 1; + + return 0; +} + +static int crlmodule_init_controls(struct crl_sensor *sensor) +{ + struct i2c_client *client = sensor->src->sd.client; + unsigned int pa_ctrls = 0; + unsigned int src_ctrls = 0; + struct crl_ctrl_data *crl_ctrl; + unsigned int i; + int rval; + + sensor->ctrl_bank = devm_kzalloc(&client->dev, + sizeof(struct crl_ctrl_data) * + sensor->sensor_ds->ctrl_items, + GFP_KERNEL); + if (!sensor->ctrl_bank) + return -ENOMEM; + + /* Prepare to initialise the ctrls from the crl wrapper */ + for (i = 0; i < sensor->sensor_ds->ctrl_items; i++) { + /* + * First copy the ctrls to the sensor as there could be + * more than one similar sensors in a product which could share + * the same configuration files + */ + sensor->ctrl_bank[i] = + sensor->sensor_ds->ctrl_bank[i]; + + crl_ctrl = &sensor->ctrl_bank[i]; + crl_ctrl->param.id = crl_ctrl->ctrl_id; + if (crl_ctrl->sd_type == CRL_SUBDEV_TYPE_PIXEL_ARRAY) { + if (sensor->pixel_array) { + crl_ctrl->param.sd = + &sensor->pixel_array->sd; + } + pa_ctrls++; + } + + if (crl_ctrl->sd_type == CRL_SUBDEV_TYPE_SCALER) { + if (sensor->scaler) { + crl_ctrl->param.sd = + &sensor->scaler->sd; + } + src_ctrls++; + } + if (crl_ctrl->sd_type == CRL_SUBDEV_TYPE_BINNER) { + if (sensor->binner) { + crl_ctrl->param.sd = + &sensor->binner->sd; + } + src_ctrls++; + } + + /* populate the ctrl for the Link_freq dynamically */ + if (crl_ctrl->ctrl_id == ICI_EXT_SD_PARAM_ID_LINK_FREQ && + (crl_ctrl->sd_type == CRL_SUBDEV_TYPE_SCALER || + crl_ctrl->sd_type == CRL_SUBDEV_TYPE_BINNER)) { + rval = __crlmodule_init_link_freq_ctrl_menu(sensor, + crl_ctrl); + if (rval) + return rval; + } + } + dev_dbg(&client->dev, "%s pa_ctrls: %d src_ctrls: %d\n", __func__, + pa_ctrls, src_ctrls); + for (i = 0; i < sensor->sensor_ds->ctrl_items; i++) { + crl_ctrl = &sensor->ctrl_bank[i]; + switch (crl_ctrl->type) { + case CRL_CTRL_TYPE_MENU_ITEMS: + crl_ctrl->max = crl_ctrl->data.menu_items.size - 1; + break; + case CRL_CTRL_TYPE_MENU_INT: + crl_ctrl->max = crl_ctrl->data.int_menu.max; + crl_ctrl->def = crl_ctrl->data.int_menu.def; + break; + case CRL_CTRL_TYPE_INTEGER64: + case CRL_CTRL_TYPE_INTEGER: + case CRL_CTRL_TYPE_CUSTOM: + crl_ctrl->min = crl_ctrl->data.std_data.min; + crl_ctrl->max = crl_ctrl->data.std_data.max; + crl_ctrl->step = crl_ctrl->data.std_data.step; + crl_ctrl->def = crl_ctrl->data.std_data.def; + break; + case CRL_CTRL_TYPE_BOOLEAN: + case CRL_CTRL_TYPE_BUTTON: + case CRL_CTRL_TYPE_CTRL_CLASS: + default: + dev_err(&client->dev, + "%s Invalid control type\n", __func__); + continue; + break; + } + + /* + * Blanking and framesize controls access to same register, + * Blank controls are disabled if framesize controls exists. + */ + if (crl_ctrl->ctrl_id == ICI_EXT_SD_PARAM_ID_FRAME_LENGTH_LINES || + crl_ctrl->ctrl_id == ICI_EXT_SD_PARAM_ID_LINE_LENGTH_PIXELS) + sensor->blanking_ctrl_not_use = 1; + + if (crl_ctrl->ctrl_id == ICI_EXT_SD_PARAM_ID_SENSOR_MODE) + sensor->direct_mode_in_use = 1; + + /* Save mandatory control references - link_freq in src sd */ + if (crl_ctrl->ctrl_id == ICI_EXT_SD_PARAM_ID_LINK_FREQ && + (crl_ctrl->sd_type == CRL_SUBDEV_TYPE_SCALER || + crl_ctrl->sd_type == CRL_SUBDEV_TYPE_BINNER)) + sensor->link_freq = crl_ctrl; + + /* Save mandatory control references - pixel_rate_pa PA sd */ + if (crl_ctrl->ctrl_id == ICI_EXT_SD_PARAM_ID_PIXEL_RATE && + crl_ctrl->sd_type == CRL_SUBDEV_TYPE_PIXEL_ARRAY) + sensor->pixel_rate_pa = crl_ctrl; + + /* Save mandatory control references - pixel_rate_csi src sd */ + if (crl_ctrl->ctrl_id == ICI_EXT_SD_PARAM_ID_PIXEL_RATE && + (crl_ctrl->sd_type == CRL_SUBDEV_TYPE_SCALER || + crl_ctrl->sd_type == CRL_SUBDEV_TYPE_BINNER)) + sensor->pixel_rate_csi = crl_ctrl; + + dev_dbg(&client->dev, + "%s idx: %d ctrl_id: 0x%x ctrl_name: %s\n", + __func__, i, crl_ctrl->ctrl_id, crl_ctrl->name); + } + + return 0; +} + + +static bool __crlmodule_rect_matches(struct i2c_client *client, + const struct ici_rect *const rect1, + const struct ici_rect *const rect2) +{ + dev_dbg(&client->dev, "%s rect1 l:%d t:%d w:%d h:%d\n", __func__, + rect1->left, rect1->top, rect1->width, rect1->height); + dev_dbg(&client->dev, "%s rect2 l:%d t:%d w:%d h:%d\n", __func__, + rect2->left, rect2->top, rect2->width, rect2->height); + + return (rect1->left == rect2->left && + rect1->top == rect2->top && + rect1->width == rect2->width && + rect1->height == rect2->height); +} + +static int __crlmodule_update_hblank(struct crl_sensor *sensor, + struct crl_ctrl_data *hblank) +{ + const struct crl_mode_rep *mode = sensor->current_mode; + const struct crl_sensor_limits *limits = sensor->sensor_ds->sensor_limits; + unsigned int width = sensor->pixel_array->crop[CRL_PA_PAD_SRC].width; + unsigned int min_llp, max_llp; + + if (mode->min_llp) + min_llp = mode->min_llp; /* mode specific limit */ + else if (limits->min_line_length_pixels) + min_llp = limits->min_line_length_pixels; /* sensor limit */ + else /* No restrictions */ + min_llp = width; + + if (mode->max_llp) + max_llp = mode->max_llp; /* mode specific limit */ + else if (limits->min_line_length_pixels) + max_llp = limits->max_line_length_pixels; /* sensor limit */ + else /* No restrictions */ + max_llp = USHRT_MAX; + + hblank->min = min_llp - width; + hblank->max = max_llp - width; + hblank->def = hblank->min; + return 0; +} + +static int __crlmodule_update_vblank(struct crl_sensor *sensor, + struct crl_ctrl_data *vblank) +{ + const struct crl_mode_rep *mode = sensor->current_mode; + const struct crl_sensor_limits *limits = sensor->sensor_ds->sensor_limits; + unsigned int height = sensor->pixel_array->crop[CRL_PA_PAD_SRC].height; + unsigned int min_fll, max_fll; + + if (mode->min_fll) + min_fll = mode->min_fll; /* mode specific limit */ + else if (limits->min_frame_length_lines) + min_fll = limits->min_frame_length_lines; /* sensor limit */ + else /* No restrictions */ + min_fll = height; + + if (mode->max_fll) + max_fll = mode->max_fll; /* mode specific limit */ + else if (limits->min_line_length_pixels) + max_fll = limits->max_line_length_pixels; /* sensor limit */ + else /* No restrictions */ + max_fll = USHRT_MAX; + + vblank->min = min_fll - height; + vblank->max = max_fll - height; + vblank->def = vblank->min; + return 0; +} + +static void crlmodule_update_framesize(struct crl_sensor *sensor) +{ + const struct crl_mode_rep *mode = sensor->current_mode; + struct crl_ctrl_data *llength; + struct crl_ctrl_data *flength; + + llength = __crlmodule_get_ctrl(sensor, ICI_EXT_SD_PARAM_ID_LINE_LENGTH_PIXELS); + flength = __crlmodule_get_ctrl(sensor, ICI_EXT_SD_PARAM_ID_FRAME_LENGTH_LINES); + + if (llength) { + llength->min = mode->min_llp; + llength->def = llength->min; + } + + if (flength) { + flength->min = mode->min_fll; + flength->def = flength->min; + } +} + +static int crlmodule_update_frame_blanking(struct crl_sensor *sensor) +{ + struct i2c_client *client = sensor->src->sd.client; + struct crl_ctrl_data *vblank; + struct crl_ctrl_data *hblank; + int ret; + + vblank = __crlmodule_get_ctrl(sensor, ICI_EXT_SD_PARAM_ID_VBLANK); + hblank = __crlmodule_get_ctrl(sensor, ICI_EXT_SD_PARAM_ID_HBLANK); + + if (hblank) { + ret = __crlmodule_update_hblank(sensor, hblank); + if (ret) + return ret; + dev_dbg(&client->dev, "%s hblank:%d\n", __func__, hblank->param.val); + } + + if (vblank) { + ret = __crlmodule_update_vblank(sensor, vblank); + if (ret) + return ret; + dev_dbg(&client->dev, "%s vblank:%d\n", __func__, vblank->param.val); + } + + return 0; +} + +static void crlmodule_update_mode_bysel(struct crl_sensor *sensor) +{ + struct i2c_client *client = sensor->src->sd.client; + const struct crl_mode_rep *this; + unsigned int i; + + dev_dbg(&client->dev, "%s look for w: %d, h: %d, in [%d] modes\n", + __func__, sensor->src->crop[CRL_PAD_SRC].width, + sensor->src->crop[CRL_PAD_SRC].height, + sensor->sensor_ds->modes_items); + + for (i = 0; i < sensor->sensor_ds->modes_items; i++) { + this = &sensor->sensor_ds->modes[i]; + + dev_dbg(&client->dev, "%s check mode list[%d] w: %d, h: %d\n", + __func__, i, this->width, this->height); + if (this->width != sensor->src->crop[CRL_PAD_SRC].width || + this->height != sensor->src->crop[CRL_PAD_SRC].height) + continue; + + if (sensor->pixel_array) { + dev_dbg(&client->dev, "%s Compare PA out rect\n", __func__); + if (!__crlmodule_rect_matches(client, + &sensor->pixel_array->crop[CRL_PA_PAD_SRC], + &this->sd_rects[CRL_SD_PA_INDEX].out_rect)) + continue; + } + if (sensor->binner) { + dev_dbg(&client->dev, "%s binning hor: %d vs. %d\n", + __func__, + sensor->binning_horizontal, + this->binn_hor); + if (sensor->binning_horizontal != this->binn_hor) + continue; + + dev_dbg(&client->dev, "%s binning vert: %d vs. %d\n", + __func__, + sensor->binning_vertical, + this->binn_vert); + if (sensor->binning_vertical != this->binn_vert) + continue; + + dev_dbg(&client->dev, "%s binner in rect\n", __func__); + if (!__crlmodule_rect_matches(client, + &sensor->binner->crop[CRL_PAD_SINK], + &this->sd_rects[CRL_SD_BINNER_INDEX].in_rect)) + continue; + + dev_dbg(&client->dev, "%s binner out rect\n", __func__); + if (!__crlmodule_rect_matches(client, + &sensor->binner->crop[CRL_PAD_SRC], + &this->sd_rects[CRL_SD_BINNER_INDEX].out_rect)) + continue; + } + + if (sensor->scaler) { + dev_dbg(&client->dev, "%s scaler scale_m %d vs. %d\n", + __func__, sensor->scale_m, + this->scale_m); + if (sensor->scale_m != this->scale_m) + continue; + + dev_dbg(&client->dev, "%s scaler in rect\n", __func__); + if (!__crlmodule_rect_matches(client, + &sensor->scaler->crop[CRL_PAD_SINK], + &this->sd_rects[CRL_SD_SCALER_INDEX].in_rect)) + continue; + + dev_dbg(&client->dev, "%s scaler out rect\n", __func__); + if (!__crlmodule_rect_matches(client, + &sensor->scaler->crop[CRL_PAD_SRC], + &this->sd_rects[CRL_SD_SCALER_INDEX].out_rect)) + continue; + } + + /* Check if there are any dynamic compare items */ + if (sensor->ext_ctrl_impacts_mode_selection && + !__crlmodule_compare_ctrl_specific_data(sensor, + this->comp_items, + this->ctrl_data)) + continue; + + /* Found a perfect match! */ + dev_dbg(&client->dev, "%s found mode. idx: %d\n", __func__, i); + break; + } + + /* If no modes found, fall back to the fail safe mode index */ + if (i >= sensor->sensor_ds->modes_items) { + i = sensor->sensor_ds->fail_safe_mode_index; + this = &sensor->sensor_ds->modes[i]; + dev_info(&client->dev, + "%s no matching mode, set to default: %d\n", + __func__, i); + } + + sensor->current_mode = this; +} + +static void crlmodule_update_mode_ctrl(struct crl_sensor *sensor) +{ + struct i2c_client *client = sensor->src->sd.client; + const struct crl_mode_rep *this; + int i; + + dev_dbg(&client->dev, "%s Sensor Mode :%d\n", + __func__, sensor->sensor_mode); + /* point to selected mode */ + this = &sensor->sensor_ds->modes[sensor->sensor_mode]; + sensor->current_mode = this; + + for (i = 0; i < this->sd_rects_items; i++) { + + if (CRL_SUBDEV_TYPE_PIXEL_ARRAY == + this->sd_rects[i].subdev_type) { + sensor->pixel_array->crop[CRL_PA_PAD_SRC] = + this->sd_rects[CRL_SD_PA_INDEX].out_rect; + } + + if (CRL_SUBDEV_TYPE_BINNER == + this->sd_rects[i].subdev_type) { + sensor->binner->sink_fmt = + this->sd_rects[i].in_rect; + sensor->binner->crop[CRL_PAD_SINK] = + this->sd_rects[i].in_rect; + sensor->binner->crop[CRL_PAD_SRC] = + this->sd_rects[i].out_rect; + sensor->binning_vertical = this->binn_vert; + sensor->binning_horizontal = this->binn_hor; + if (this->binn_vert > 1) + sensor->binner->compose = + this->sd_rects[i].out_rect; + } + + if (CRL_SUBDEV_TYPE_SCALER == + this->sd_rects[i].subdev_type) { + sensor->scaler->crop[CRL_PAD_SINK] = + this->sd_rects[i].in_rect; + sensor->scaler->crop[CRL_PAD_SRC] = + this->sd_rects[i].out_rect; + sensor->scaler->sink_fmt = + this->sd_rects[i].in_rect; + sensor->scale_m = this->scale_m; + if (this->scale_m != 1) + sensor->scaler->compose = + this->sd_rects[i].out_rect; + } + } + + /* Set source */ + sensor->src->crop[CRL_PAD_SRC].width = this->width; + sensor->src->crop[CRL_PAD_SRC].height = this->height; +} + +static void crlmodule_update_current_mode(struct crl_sensor *sensor) +{ + const struct crl_mode_rep *this; + int i; + + if (sensor->direct_mode_in_use) + crlmodule_update_mode_ctrl(sensor); + else + crlmodule_update_mode_bysel(sensor); + + /* + * We have a valid mode now. If there are any mode specific "get" + * controls defined in the configuration it could be queried by the + * user space for any mode specific information. So go through the + * mode specific ctrls and update its value from the selected mode. + */ + + this = sensor->current_mode; + + for (i = 0; i < this->comp_items; i++) { + struct crl_ctrl_data_pair *ctrl_comp = &this->ctrl_data[i]; + unsigned int idx; + + /* Get the ctl_ctrl pointer corresponding ctrl id */ + if (__crlmodule_get_crl_ctrl_index(sensor, ctrl_comp->ctrl_id, + &idx)) + /* If not found, move to the next ctrl */ + continue; + + /* No need to update this control, if this is a set op ctrl */ + if (sensor->ctrl_bank[idx].op_type == CRL_CTRL_SET_OP) + continue; + + /* Update the control value */ + sensor->ctrl_bank[idx].param.val = ctrl_comp->data; + } + + if (sensor->blanking_ctrl_not_use) + crlmodule_update_framesize(sensor); + else + crlmodule_update_frame_blanking(sensor); +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int __crlmodule_get_format( + struct ici_ext_subdev* subdev, + struct ici_pad_framefmt* pff) +{ + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct ici_rect *r; + + if (pff->pad.pad_idx == ssd->source_pad) + r = &ssd->crop[ssd->source_pad]; + else + r = &ssd->sink_fmt; + + pff->ffmt.width = r->width; + pff->ffmt.height = r->height; + pff->ffmt.pixelformat = + sensor->sensor_ds->csi_fmts[sensor->fmt_index].code; + pff->ffmt.field = + ((ssd->field == ICI_FIELD_ANY) ? + ICI_FIELD_NONE : ssd->field); + return 0; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int crlmodule_enum_pixelformat( + struct ici_isys_node* node, + struct ici_pad_supported_format_desc* psfd) +{ + struct ici_ext_subdev* subdev = node->sd; + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + + if (psfd->idx >= sensor->sensor_ds->csi_fmts_items) + return -EINVAL; + + psfd->color_format = + sensor->sensor_ds->csi_fmts[psfd->idx].code; + psfd->min_width = sensor->sensor_ds->sensor_limits->x_addr_min; + psfd->max_width = sensor->sensor_ds->sensor_limits->x_addr_max; + psfd->min_height = sensor->sensor_ds->sensor_limits->y_addr_min; + psfd->max_height = sensor->sensor_ds->sensor_limits->y_addr_max; + return 0; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int crlmodule_get_format( + struct ici_isys_node* node, + struct ici_pad_framefmt* pff) +{ + struct ici_ext_subdev* subdev = node->sd; + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + int rval; + + mutex_lock(&sensor->mutex); + rval = __crlmodule_get_format(subdev, pff); + mutex_unlock(&sensor->mutex); + + return rval; +} + +static int __crlmodule_sel_supported( + struct ici_ext_subdev *subdev, + u32 pad, + u32 type) +{ + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + + if (ssd == sensor->pixel_array && pad == CRL_PA_PAD_SRC) { + switch (type) { + case ICI_EXT_SEL_TYPE_NATIVE: + case ICI_EXT_SEL_TYPE_CROP: + case ICI_EXT_SEL_TYPE_CROP_BOUNDS: + return 0; + } + } + if (ssd == sensor->binner) { + switch (type) { + case ICI_EXT_SEL_TYPE_COMPOSE: + case ICI_EXT_SEL_TYPE_COMPOSE_BOUNDS: + if (pad == CRL_PAD_SINK) + return 0; + break; + } + } + if (ssd == sensor->scaler) { + switch (type) { + case ICI_EXT_SEL_TYPE_CROP: + case ICI_EXT_SEL_TYPE_CROP_BOUNDS: + if (pad == CRL_PAD_SRC) + return 0; + break; + case ICI_EXT_SEL_TYPE_COMPOSE: + case ICI_EXT_SEL_TYPE_COMPOSE_BOUNDS: + if (pad == CRL_PAD_SINK) + return 0; + break; + } + } + return -EINVAL; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static void crlmodule_get_crop_compose( + struct ici_ext_subdev *subdev, + struct ici_rect **crops, + struct ici_rect **comps) +{ + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + unsigned int i; + + /* Currently we support only 2 pads */ + BUG_ON(subdev->num_pads > CRL_PADS); + + if (crops) + for (i = 0; i < subdev->num_pads; i++) + crops[i] = &ssd->crop[i]; + if (comps) + *comps = &ssd->compose; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int crlmodule_get_selection( + struct ici_isys_node *node, + struct ici_pad_selection* ps) +{ + struct ici_ext_subdev *subdev = node->sd; + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct ici_rect *comp, *crops[CRL_PADS]; + struct ici_rect sink_fmt; + int ret; + + ret = __crlmodule_sel_supported(subdev, ps->pad.pad_idx, + ps->sel_type); + if (ret) + return ret; + + crlmodule_get_crop_compose(subdev, crops, &comp); + + sink_fmt = ssd->sink_fmt; + + switch (ps->sel_type) { + case ICI_EXT_SEL_TYPE_CROP_BOUNDS: + case ICI_EXT_SEL_TYPE_NATIVE: + if (ssd == sensor->pixel_array) { + ps->rect.left = ps->rect.top = 0; + ps->rect.width = + sensor->sensor_ds->sensor_limits->x_addr_max; + ps->rect.height = + sensor->sensor_ds->sensor_limits->y_addr_max; + } else if (ps->pad.pad_idx == ssd->sink_pad) { + ps->rect = sink_fmt; + } else { + ps->rect = *comp; + } + break; + case ICI_EXT_SEL_TYPE_CROP: + case ICI_EXT_SEL_TYPE_COMPOSE_BOUNDS: + ps->rect = *crops[ps->pad.pad_idx]; + break; + case ICI_EXT_SEL_TYPE_COMPOSE: + ps->rect = *comp; + break; + } + return 0; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static void crlmodule_propagate( + struct ici_ext_subdev *subdev, + u32 type) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct ici_rect *comp, *crops[CRL_PADS]; + + crlmodule_get_crop_compose(subdev, crops, &comp); + + switch (type) { + case ICI_EXT_SEL_TYPE_CROP: + comp->width = crops[CRL_PAD_SINK]->width; + comp->height = crops[CRL_PAD_SINK]->height; + if (ssd == sensor->scaler) { + sensor->scale_m = 1; + } else if (ssd == sensor->binner) { + sensor->binning_horizontal = 1; + sensor->binning_vertical = 1; + } + /* Fall through */ + case ICI_EXT_SEL_TYPE_COMPOSE: + *crops[CRL_PAD_SRC] = *comp; + break; + default: + BUG(); + } +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int crlmodule_set_compose( + struct ici_ext_subdev *subdev, + struct ici_rect *r) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct ici_rect *comp, *crops[CRL_PADS]; + + crlmodule_get_crop_compose(subdev, crops, &comp); + + r->top = 0; + r->left = 0; + + if (ssd == sensor->binner) { + sensor->binning_horizontal = crops[CRL_PAD_SINK]->width / + r->width; + sensor->binning_vertical = crops[CRL_PAD_SINK]->height / + r->height; + } else { + sensor->scale_m = crops[CRL_PAD_SINK]->width * + sensor->sensor_ds->sensor_limits->scaler_m_min / + r->width; + } + + *comp = *r; + + crlmodule_propagate(subdev, + ICI_EXT_SEL_TYPE_COMPOSE); + + crlmodule_update_current_mode(sensor); + return 0; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int crlmodule_set_crop( + struct ici_ext_subdev *subdev, + u32 pad, + struct ici_rect *r) +{ + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct ici_rect *src_size, *crops[CRL_PADS]; + + crlmodule_get_crop_compose(subdev, crops, NULL); + + if (pad == ssd->sink_pad) + src_size = &ssd->sink_fmt; + else + src_size = &ssd->compose; + + if (ssd == sensor->src && pad == CRL_PAD_SRC) { + r->left = 0; + r->top = 0; + } + + r->width = min(r->width, src_size->width); + r->height = min(r->height, src_size->height); + + r->left = min_t(s32, r->left, src_size->width - r->width); + r->top = min_t(s32, r->top, src_size->height - r->height); + + *crops[pad] = *r; + + if (ssd != sensor->pixel_array && pad == CRL_PAD_SINK) + crlmodule_propagate(subdev, + ICI_EXT_SEL_TYPE_CROP); + + /* TODO! Should we short list supported mode? */ + + return 0; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Modified based on the CRL Module changes + */ +static int crlmodule_set_format( + struct ici_isys_node *node, + struct ici_pad_framefmt *pff) +{ + struct ici_ext_subdev *subdev = node->sd; + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct i2c_client *client = sensor->src->sd.client; + struct ici_rect *crops[CRL_PADS]; + + dev_dbg(&client->dev, "%s sd_name: %s pad: %d w: %d, h: %d code: 0x%x", + __func__, node->name, pff->pad.pad_idx, + pff->ffmt.width, + pff->ffmt.height, + pff->ffmt.pixelformat); + + mutex_lock(&sensor->mutex); + + /* Currently we only support ALTERNATE interlaced mode. */ + if (pff->ffmt.field != ICI_FIELD_ALTERNATE) + pff->ffmt.field = ICI_FIELD_NONE; + pff->ffmt.colorspace = 0; + memset(pff->ffmt.reserved, 0, sizeof(pff->ffmt.reserved)); + ssd->field = pff->ffmt.field; + + if (pff->pad.pad_idx == ssd->source_pad) { + u32 code = pff->ffmt.pixelformat; + int rval = __crlmodule_get_format(subdev, pff); + + if (!rval && subdev == &sensor->src->sd) { + /* Check if this code is supported, if yes get index */ + int idx = __crlmodule_get_data_fmt_index(sensor, code); + + if (idx < 0) { + dev_err(&client->dev, "%s invalid format\n", + __func__); + mutex_unlock(&sensor->mutex); + return -EINVAL; + } + + sensor->fmt_index = idx; + rval = __crlmodule_get_format(subdev, pff); + /* TODO! validate PLL? */ + } + mutex_unlock(&sensor->mutex); + return rval; + } + + pff->ffmt.width = + clamp_t(uint32_t, pff->ffmt.width, + sensor->sensor_ds->sensor_limits->x_addr_min, + sensor->sensor_ds->sensor_limits->x_addr_max); + pff->ffmt.height = + clamp_t(uint32_t, pff->ffmt.height, + sensor->sensor_ds->sensor_limits->y_addr_min, + sensor->sensor_ds->sensor_limits->y_addr_max); + + crlmodule_get_crop_compose(subdev, crops, NULL); + + crops[ssd->sink_pad]->left = 0; + crops[ssd->sink_pad]->top = 0; + crops[ssd->sink_pad]->width = pff->ffmt.width; + crops[ssd->sink_pad]->height = pff->ffmt.height; + ssd->sink_fmt = *crops[ssd->sink_pad]; + + crlmodule_propagate(subdev, ICI_EXT_SEL_TYPE_CROP); + + crlmodule_update_current_mode(sensor); + + mutex_unlock(&sensor->mutex); + + return 0; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int crlmodule_set_selection( + struct ici_isys_node *node, + struct ici_pad_selection* ps) +{ + struct ici_ext_subdev *subdev = node->sd; + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct i2c_client *client = sensor->src->sd.client; + int ret; + + dev_dbg(&client->dev, "%s sd_name: %s sel w: %d, h: %d", + __func__, node->name, ps->rect.width, + ps->rect.height); + + ret = __crlmodule_sel_supported(subdev, ps->pad.pad_idx, + ps->sel_type); + if (ret) { + dev_dbg(&client->dev, + "%s sd_name: %s w: %d, h: %d not supported", + __func__, node->name, ps->rect.width, + ps->rect.height); + return ret; + } + + mutex_lock(&sensor->mutex); + + ps->rect.width = max_t(unsigned int, + sensor->sensor_ds->sensor_limits->x_addr_min, + ps->rect.width); + ps->rect.height = max_t(unsigned int, + sensor->sensor_ds->sensor_limits->y_addr_min, + ps->rect.height); + switch (ps->sel_type) { + case ICI_EXT_SEL_TYPE_CROP: + ret = crlmodule_set_crop(subdev, ps->pad.pad_idx, + &ps->rect); + break; + case ICI_EXT_SEL_TYPE_COMPOSE: + ret = crlmodule_set_compose(subdev, &ps->rect); + break; + default: + ret = -EINVAL; + } + + crlmodule_update_current_mode(sensor); + + mutex_unlock(&sensor->mutex); + return ret; +} + +static int crlmodule_start_streaming(struct crl_sensor *sensor) +{ + struct i2c_client *client = sensor->src->sd.client; + const struct crl_pll_configuration *pll; + const struct crl_csi_data_fmt *fmt; + int rval; + + dev_dbg(&client->dev, "%s start streaming pll_idx: %d fmt_idx: %d\n", + __func__, sensor->pll_index, + sensor->fmt_index); + + pll = &sensor->sensor_ds->pll_configs[sensor->pll_index]; + fmt = &sensor->sensor_ds->csi_fmts[sensor->fmt_index]; + + crlmodule_update_current_mode(sensor); + + rval = crlmodule_write_regs(sensor, fmt->regs, fmt->regs_items); + if (rval) { + dev_err(&client->dev, "%s failed to set format\n", __func__); + return rval; + } + + rval = crlmodule_write_regs(sensor, pll->pll_regs, pll->pll_regs_items); + if (rval) { + dev_err(&client->dev, "%s failed to set plls\n", __func__); + return rval; + } + + /* Write mode list */ + rval = crlmodule_write_regs(sensor, + sensor->current_mode->mode_regs, + sensor->current_mode->mode_regs_items); + if (rval) { + dev_err(&client->dev, "%s failed to set mode\n", __func__); + return rval; + } + + /* Write stream on list */ + rval = crlmodule_write_regs(sensor, + sensor->sensor_ds->streamon_regs, + sensor->sensor_ds->streamon_regs_items); + if (rval) { + dev_err(&client->dev, "%s failed to set stream\n", __func__); + return rval; + } + + return 0; +} + +static int crlmodule_stop_streaming(struct crl_sensor *sensor) +{ + return crlmodule_write_regs(sensor, + sensor->sensor_ds->streamoff_regs, + sensor->sensor_ds->streamoff_regs_items); +} + +static int crlmodule_set_stream( + struct ici_isys_node* node, + void* ip, + int enable) +{ + struct ici_ext_subdev *subdev = node->sd; + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct i2c_client *client = sensor->src->sd.client; + int rval = 0; + + mutex_lock(&sensor->mutex); + + if (sensor->streaming == enable) + goto out; + + if (enable) { + + if (sensor->msr_list) { + rval = crlmodule_apply_msrlist(client, + sensor->msr_list); + if (rval) + dev_warn(&client->dev, "msrlist write error %d\n", + rval); + } + rval = crlmodule_start_streaming(sensor); + if (!rval) + sensor->streaming = 1; + } else { + rval = crlmodule_stop_streaming(sensor); + sensor->streaming = 0; + } + +out: + mutex_unlock(&sensor->mutex); + + /* SENSOR_IDLE control cannot be set when streaming*/ + __crlmodule_enable_param(sensor, SENSOR_IDLE, enable); + + /* SENSOR_STREAMING controls cannot be set when not streaming */ + __crlmodule_enable_param(sensor, SENSOR_STREAMING, !enable); + + /* SENSOR_POWERED_ON controls does not matter about streaming. */ + __crlmodule_enable_param(sensor, SENSOR_POWERED_ON, false); + + return rval; +} + +static int crlmodule_identify_module( + struct ici_ext_subdev *subdev) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct i2c_client *client = sensor->src->sd.client; + unsigned int size = 0; + char *id_string; + char *temp; + int i, ret; + u32 val; + + for (i = 0; i < sensor->sensor_ds->id_reg_items; i++) + size += sensor->sensor_ds->id_regs[i].width + 1; + + /* TODO! If no ID! return success? */ + if (!size) + return 0; + + /* Create string variabel to append module ID */ + id_string = kzalloc(size, GFP_KERNEL); + if (!id_string) + return -ENOMEM; + *id_string = '\0'; + + /* Go through each regs in the list and append to id_string */ + for (i = 0; i < sensor->sensor_ds->id_reg_items; i++) { + ret = crlmodule_read_reg(sensor, + sensor->sensor_ds->id_regs[i].reg, + &val); + if (ret) + goto out; + + temp = kzalloc(sensor->sensor_ds->id_regs[i].width, GFP_KERNEL); + if (!temp) { + ret = -ENOMEM; + goto out; + } + snprintf(temp, sensor->sensor_ds->id_regs[i].width, "0x%x ", + val); + strcat(id_string, temp); + snprintf(id_string, sensor->sensor_ds->id_regs[i].width, + "%s 0x%x ", temp, val); + + kfree(temp); + } + + /* TODO! Check here if this module in the supported list + * Ideally the module manufacturer and id should be in platform + * data or ACPI and here the driver should read the value from the + * register and check if this matches to any in the supported + * platform data */ + +out: + dev_dbg(&client->dev, "%s module: %s", __func__, id_string); + kfree(id_string); + if (ret) + dev_err(&client->dev, "sensor detection failed\n"); + return ret; +} + +/* + * This function executes the initialisation routines after the power on + * is successfully completed. Following operations are done + * + * Initiases registers after sensor power up - if any such list is configured + * Ctrl handler framework intialisation + */ +static int crlmodule_run_poweron_init(struct crl_sensor *sensor) +{ + struct i2c_client *client = sensor->src->sd.client; + int rval; + + dev_dbg(&client->dev, "%s set power up registers: %d\n", + __func__, sensor->sensor_ds->powerup_regs_items); + + /* Write the power up registers */ + rval = crlmodule_write_regs(sensor, sensor->sensor_ds->powerup_regs, + sensor->sensor_ds->powerup_regs_items); + if (rval) { + dev_err(&client->dev, "%s failed to set powerup registers\n", + __func__); + return rval; + } + + /* Are we still initialising...? If yes, return here. */ + if (!sensor->pixel_array) + return 0; + + dev_dbg(&client->dev, "%s init controls", __func__); + + + /* SENSOR_IDLE control can be set only when not streaming*/ + __crlmodule_enable_param(sensor, SENSOR_IDLE, false); + + /* SENSOR_STREAMING controls can be set only when streaming */ + __crlmodule_enable_param(sensor, SENSOR_STREAMING, true); + + /* SENSOR_POWERED_ON controls can be set after power on */ + __crlmodule_enable_param(sensor, SENSOR_POWERED_ON, false); + + mutex_lock(&sensor->mutex); + crlmodule_update_current_mode(sensor); + mutex_unlock(&sensor->mutex); + + return rval; +} + + +/* + * This function handles sensor power up routine failure because of any failed + * step in the routine. The index "i" is the index to last successfull power + * sequence entity successfull completed. This function executes the power + * senquence entities in the reverse or with undo value. + */ +static void crlmodule_undo_poweron_entities( + struct crl_sensor *sensor, + int rev_idx) +{ + struct i2c_client *client = sensor->src->sd.client; + struct crl_power_seq_entity *entity; + int idx; + + for (idx = rev_idx; idx >= 0; idx--) { + entity = &sensor->pwr_entity[idx]; + dev_dbg(&client->dev, "%s power type %d index %d\n", + __func__, entity->type, idx); + + switch (entity->type) { + case CRL_POWER_ETY_GPIO_FROM_PDATA: + gpio_set_value(sensor->platform_data->xshutdown, + entity->undo_val); + break; + case CRL_POWER_ETY_GPIO_CUSTOM: + if (entity->gpiod_priv) { + if (gpiod_cansleep(entity->gpiod_priv)) + gpiod_set_raw_value_cansleep( + entity->gpiod_priv, + entity->undo_val); + else + gpiod_set_raw_value(entity->gpiod_priv, + entity->undo_val); + } else { + gpio_set_value(entity->ent_number, + entity->undo_val); + } + break; + case CRL_POWER_ETY_REGULATOR_FRAMEWORK: + regulator_disable(entity->regulator_priv); + break; + case CRL_POWER_ETY_CLK_FRAMEWORK: + clk_disable_unprepare(sensor->xclk); + break; + default: + dev_err(&client->dev, "%s Invalid power type\n", + __func__); + break; + } + + if (entity->delay) + usleep_range(entity->delay, entity->delay + 10); + } +} + +static int __crlmodule_powerup_sequence(struct crl_sensor *sensor) +{ + struct i2c_client *client = sensor->src->sd.client; + struct crl_power_seq_entity *entity; + unsigned idx; + int rval; + + dev_dbg(&client->dev, "%s platform_data->xshutdown: %d\n", __func__, sensor->platform_data->xshutdown); + for (idx = 0; idx < sensor->sensor_ds->power_items; idx++) { + entity = &sensor->pwr_entity[idx]; + dev_dbg(&client->dev, "%s power type %d index %d\n", + __func__, entity->type, idx); + + switch (entity->type) { + case CRL_POWER_ETY_GPIO_FROM_PDATA: + gpio_set_value(sensor->platform_data->xshutdown, + entity->val); + break; + case CRL_POWER_ETY_GPIO_CUSTOM: + if (entity->gpiod_priv) { + if (gpiod_cansleep(entity->gpiod_priv)) + gpiod_set_raw_value_cansleep( + entity->gpiod_priv, + entity->val); + else + gpiod_set_raw_value(entity->gpiod_priv, + entity->val); + } else { + gpio_set_value(entity->ent_number, entity->val); + } + break; + case CRL_POWER_ETY_REGULATOR_FRAMEWORK: + rval = regulator_enable(entity->regulator_priv); + if (rval) { + dev_err(&client->dev, + "Failed to enable regulator: %d\n", + rval); + devm_regulator_put(entity->regulator_priv); + entity->regulator_priv = NULL; + goto error; + } + break; + case CRL_POWER_ETY_CLK_FRAMEWORK: + rval = clk_set_rate(sensor->xclk, + sensor->platform_data->ext_clk); + if (rval < 0) { + dev_err(&client->dev, + "unable to set clock freq to %u\n", + sensor->platform_data->ext_clk); + goto error; + } + if (clk_get_rate(sensor->xclk) != + sensor->platform_data->ext_clk) + dev_warn(&client->dev, + "warning: unable to set \ + accurate clock freq %u\n", + sensor->platform_data->ext_clk); + rval = clk_prepare_enable(sensor->xclk); + if (rval) { + dev_err(&client->dev, "Failed to enable \ + clock: %d\n", rval); + goto error; + } + break; + default: + dev_err(&client->dev, "Invalid power type\n"); + rval = -ENODEV; + goto error; + break; + } + + if (entity->delay) + usleep_range(entity->delay, entity->delay + 10); + } + + return 0; +error: + dev_err(&client->dev, "Error:Power sequece failed\n"); + if (idx > 0) + crlmodule_undo_poweron_entities(sensor, idx-1); + return rval; +} + +static int crlmodule_set_power( + struct ici_isys_node* node, + int on) +{ + struct ici_ext_subdev *subdev = node->sd; + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct i2c_client *client = sensor->src->sd.client; + int ret = 0; + + dev_err(&client->dev, "crlmodule_set_power %d\n", on); + if (on) { + ret = pm_runtime_get_sync(&client->dev); + dev_err(&client->dev, "crlmodule_set_power val %d\n", ret); + if (ret < 0) { + pm_runtime_put(&client->dev); + return ret; + } + } + + mutex_lock(&sensor->power_mutex); + if (on && !sensor->power_count) { + usleep_range(2000, 3000); + ret = crlmodule_run_poweron_init(sensor); + if (ret < 0) { + dev_err(&client->dev, "crlmodule_set_power err (2) %d\n", ret); + pm_runtime_put(&client->dev); + goto out; + } + } + + /* Update the power count. */ + sensor->power_count += on ? 1 : -1; + WARN_ON(sensor->power_count < 0); + +out: + mutex_unlock(&sensor->power_mutex); + + if (!on) + pm_runtime_put(&client->dev); + + dev_err(&client->dev, "crlmodule_set_power ret %d\n", ret); + return ret; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Modified based on the CRL Module changes + */ +static int crlmodule_init_subdevs( + struct ici_ext_subdev *subdev) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct i2c_client *client = sensor->src->sd.client; + struct crl_subdev *prev_sd = NULL; + int i = 0; + struct crl_subdev *sd; + int rval = 0; + + dev_dbg(&client->dev, "%s\n", __func__); + + /* + * The scaler, binner and PA order matters. Sensor configuration file + * must maintain this order. PA sub dev is a must and binner and + * scaler can be omitted based on the sensor. But if scaler is present + * it must be the first sd. + */ + if (sensor->sensor_ds->subdevs[i].subdev_type + == CRL_SUBDEV_TYPE_SCALER) { + sensor->scaler = &sensor->ssds[sensor->ssds_used]; + sensor->ssds_used++; + i++; + } + + if (sensor->sensor_ds->subdevs[i].subdev_type + == CRL_SUBDEV_TYPE_BINNER) { + sensor->binner = &sensor->ssds[sensor->ssds_used]; + sensor->ssds_used++; + i++; + } + + if (sensor->sensor_ds->subdevs[i].subdev_type + == CRL_SUBDEV_TYPE_PIXEL_ARRAY) { + sensor->pixel_array = &sensor->ssds[sensor->ssds_used]; + sensor->ssds_used++; + i++; + } + + /* CRL MediaCTL IF driver can't handle if none of these sd's present! */ + if (!sensor->ssds_used) { + dev_err(&client->dev, "%s no subdevs present\n", __func__); + return -ENODEV; + } + + if (!sensor->sensor_ds->pll_config_items) { + dev_err(&client->dev, "%s no pll configurations\n", __func__); + return -ENODEV; + } + + /* TODO validate rest of the settings from the sensor definition file */ + + dev_dbg(&client->dev, "%s subdevs: %d\n", __func__, i); + + for (i = 0; i < sensor->ssds_used; i++) { + sd = &sensor->ssds[i]; + + sd->sensor = sensor; + if (sd == sensor->pixel_array) { + sd->npads = 1; + } else { + sd->npads = 2; + sd->source_pad = 1; + } + + sd->sink_fmt.width = + sensor->sensor_ds->sensor_limits->x_addr_max; + sd->sink_fmt.height = + sensor->sensor_ds->sensor_limits->y_addr_max; + sd->compose.width = sd->sink_fmt.width; + sd->compose.height = sd->sink_fmt.height; + sd->crop[sd->source_pad] = sd->compose; + //sd->pads[sd->source_pad].flags = ICI_PAD_FLAGS_SOURCE; + if (sd != sensor->pixel_array) { + sd->crop[sd->sink_pad] = sd->compose; + //sd->pads[sd->sink_pad].flags = ICI_PAD_FLAGS_SINK; + } + rval = init_ext_sd(client, sd, i); + if (rval) + return rval; + + if (prev_sd == NULL) { + prev_sd = sd; + continue; + } + + if (sensor->reg.create_link) { + rval = sensor->reg.create_link(&sd->sd.node, + sd->source_pad, + &prev_sd->sd.node, + prev_sd->sink_pad, + 0); + if (rval) + return rval; + } + prev_sd = sd; + } + + return rval; +} + +static int __init_power_resources( + struct ici_ext_subdev *subdev) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct i2c_client *client = sensor->src->sd.client; + struct crl_power_seq_entity *entity; + unsigned idx; + + sensor->pwr_entity = devm_kzalloc(&client->dev, + sizeof(struct crl_power_seq_entity) * + sensor->sensor_ds->power_items, GFP_KERNEL); + + if (!sensor->pwr_entity) + return -ENOMEM; + + for (idx = 0; idx < sensor->sensor_ds->power_items; idx++) + sensor->pwr_entity[idx] = + sensor->sensor_ds->power_entities[idx]; + + dev_dbg(&client->dev, "%s\n", __func__); + dev_dbg(&client->dev, "%s platform_data->xshutdown: %d\n", __func__, sensor->platform_data->xshutdown); + + for (idx = 0; idx < sensor->sensor_ds->power_items; idx++) { + int rval; + entity = &sensor->pwr_entity[idx]; + + switch (entity->type) { + case CRL_POWER_ETY_GPIO_FROM_PDATA: + if (devm_gpio_request_one(&client->dev, + sensor->platform_data->xshutdown, 0, + "CRL xshutdown") != 0) { + dev_err(&client->dev, "unable to acquire xshutdown %d\n", + sensor->platform_data->xshutdown); + return -ENODEV; + } + break; + case CRL_POWER_ETY_GPIO_CUSTOM: + if (entity->ent_name[0]) { + entity->gpiod_priv = gpiod_get(NULL, + entity->ent_name, GPIOD_OUT_LOW); + if (IS_ERR(entity->gpiod_priv)) { + dev_err(&client->dev, + "Unable to acquire custom gpio %s\n", + entity->ent_name); + entity->gpiod_priv = NULL; + return -ENODEV; + } + } else { + if (devm_gpio_request_one(&client->dev, + entity->ent_number, 0, + "CRL Custom") != 0) { + dev_err(&client->dev, "unable to acquire custom gpio %d\n", + entity->ent_number); + return -ENODEV; + } + } + break; + case CRL_POWER_ETY_REGULATOR_FRAMEWORK: + entity->regulator_priv = devm_regulator_get(&client->dev, + entity->ent_name); + if (IS_ERR(entity->regulator_priv)) { + dev_err(&client->dev, "Failed to get regulator: %s\n", + entity->ent_name); + entity->regulator_priv = NULL; + return -ENODEV; + } + rval = regulator_set_voltage(entity->regulator_priv, + entity->val, + entity->val); + /* Not all regulator supports voltage change */ + if (rval < 0) + dev_info(&client->dev, + "Failed to set voltage %s %d\n", + entity->ent_name, entity->val); + break; + case CRL_POWER_ETY_CLK_FRAMEWORK: + sensor->xclk = devm_clk_get(&client->dev, NULL); + if (IS_ERR(sensor->xclk)) { + dev_err(&client->dev, "Cannot get sensor clk\n"); + return -ENODEV; + } + break; + default: + dev_err(&client->dev, "Invalid Power item\n"); + return -ENODEV; + } + } + return 0; +} + +static int crlmodule_registered( + struct ici_ext_subdev_register *reg) +{ + struct ici_ext_subdev* subdev = reg->sd; + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct i2c_client *client = sensor->src->sd.client; + + int rval; + + if (!reg->sd || !reg->setup_node || !reg->create_link) + return -EINVAL; + + rval = __init_power_resources(subdev); + if (rval) + return -ENODEV; + + + /* Power up the sensor */ + if (pm_runtime_get_sync(&client->dev) < 0) { + pm_runtime_put(&client->dev); + return -ENODEV; + } + + /* one time init */ + rval = crlmodule_write_regs(sensor, sensor->sensor_ds->onetime_init_regs, + sensor->sensor_ds->onetime_init_regs_items); + if (rval) { + dev_err(&client->dev, "%s failed to set powerup registers\n", + __func__); + return -ENODEV; + } + + /* sensor specific init */ + if (sensor->sensor_ds->sensor_init) { + rval = sensor->sensor_ds->sensor_init(client); + + if (rval) { + dev_err(&client->dev, "%s failed to run sensor specific init\n", + __func__); + return -ENODEV; + } + } + /* Identify the module */ + rval = crlmodule_identify_module(subdev); + if (rval) { + rval = -ENODEV; + goto out; + } + + sensor->reg = *reg; + + rval = crlmodule_init_subdevs(subdev); + if (rval) + goto out; + + sensor->binning_horizontal = 1; + sensor->binning_vertical = 1; + sensor->scale_m = 1; + sensor->flip_info = CRL_FLIP_DEFAULT_NONE; + sensor->ext_ctrl_impacts_pll_selection = false; + sensor->ext_ctrl_impacts_mode_selection = false; + + rval = crlmodule_init_controls(sensor); + if (rval) + goto out; + + mutex_lock(&sensor->mutex); + crlmodule_update_current_mode(sensor); + mutex_unlock(&sensor->mutex); + rval = crlmodule_nvm_init(sensor); + +out: + dev_dbg(&client->dev, "%s rval: %d\n", __func__, rval); + /* crlmodule_power_off(sensor); */ + pm_runtime_put(&client->dev); + + return rval; +} + +static void crlmodule_unregistered( + struct ici_ext_subdev *subdev) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct i2c_client *client = sensor->src->sd.client; + dev_dbg(&client->dev, "%s\n", __func__); +} + +static int init_ext_sd(struct i2c_client *client, + struct crl_subdev *ssd, int idx) +{ + int rval; + struct ici_ext_subdev* sd = &ssd->sd; + struct crl_sensor *sensor = ssd->sensor; + char name[ICI_MAX_NODE_NAME]; + if (sensor->platform_data->suffix) + snprintf(name, + sizeof(name), "%s %c", + sensor->sensor_ds->subdevs[idx].name, + sensor->platform_data->suffix); + else + snprintf(name, + sizeof(name), "%s", + sensor->sensor_ds->subdevs[idx].name); + sd->client = client; + sd->num_pads = ssd->npads; + sd->src_pad = ssd->source_pad; + sd->set_param = crlmodule_set_param; + sd->get_param = crlmodule_get_param; + sd->get_menu_item = crlmodule_get_menu_item; + if (sensor->reg.setup_node) { + rval = sensor->reg.setup_node(sensor->reg.ipu_data, + sd, name); + if (rval) + return rval; + } + sd->node.node_set_power = crlmodule_set_power; + sd->node.node_set_streaming = crlmodule_set_stream; + sd->node.node_get_pad_supported_format = + crlmodule_enum_pixelformat; + sd->node.node_set_pad_ffmt = crlmodule_set_format; + sd->node.node_get_pad_ffmt = crlmodule_get_format; + sd->node.node_set_pad_sel = crlmodule_set_selection; + sd->node.node_get_pad_sel = crlmodule_get_selection; + + return 0; +} + +#ifdef CONFIG_PM + +static int crlmodule_runtime_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ici_ext_subdev *sd = + i2c_get_clientdata(client); + struct crl_sensor *sensor = to_crlmodule_sensor(sd); + + crlmodule_undo_poweron_entities(sensor, + sensor->sensor_ds->power_items - 1); + return 0; +} + +static int crlmodule_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ici_ext_subdev *sd = + i2c_get_clientdata(client); + struct crl_subdev *ssd = to_crlmodule_subdev(sd); + struct crl_sensor *sensor = ssd->sensor; + + if (sensor->streaming) + crlmodule_stop_streaming(sensor); + + crlmodule_undo_poweron_entities(sensor, + sensor->sensor_ds->power_items - 1); + return 0; +} + +static int crlmodule_runtime_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ici_ext_subdev *sd = + i2c_get_clientdata(client); + struct crl_sensor *sensor = to_crlmodule_sensor(sd); + + return __crlmodule_powerup_sequence(sensor); +} + +static int crlmodule_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ici_ext_subdev *sd = + i2c_get_clientdata(client); + struct crl_subdev *ssd = to_crlmodule_subdev(sd); + struct crl_sensor *sensor = ssd->sensor; + int rval; + + rval = __crlmodule_powerup_sequence(sensor); + if (!rval && sensor->power_count) + rval = crlmodule_run_poweron_init(sensor); + if (!rval && sensor->streaming) + rval = crlmodule_start_streaming(sensor); + + return rval; +} + +#else + +#define crlmodule_runtime_suspend NULL +#define crlmodule_runtime_resume NULL +#define crlmodule_suspend NULL +#define crlmodule_resume NULL + +#endif /* CONFIG_PM */ + + +static int crlmodule_probe(struct i2c_client *client, + const struct i2c_device_id *devid) +{ + struct crl_sensor *sensor; + int ret; + pr_debug("%s, entry\n", __func__); + + if (client->dev.platform_data == NULL) { + pr_err("%s, platform_data is null\n", __func__); + return -ENODEV; + } + /* TODO! Create the sensor based on the interface */ + sensor = devm_kzalloc(&client->dev, sizeof(*sensor), GFP_KERNEL); + if (sensor == NULL) + return -ENOMEM; + + sensor->platform_data = client->dev.platform_data; + mutex_init(&sensor->mutex); + dev_dbg(&client->dev, "%s xshutdown: %d\n", __func__, sensor->platform_data->xshutdown); + mutex_init(&sensor->power_mutex); + + ret = crlmodule_populate_ds(sensor, &client->dev); + if (ret) + return -ENODEV; + + sensor->src = &sensor->ssds[sensor->ssds_used]; + sensor->src->sensor = sensor; + + sensor->src->sd.client = client; + sensor->src->sd.do_register = crlmodule_registered; + sensor->src->sd.do_unregister = crlmodule_unregistered; + i2c_set_clientdata(client, &sensor->src->sd); + + pm_runtime_enable(&client->dev); + + /* Load IQ tuning registers from drvb file*/ + if (sensor->sensor_ds->msr_file_name) { + ret = crlmodule_load_msrlist(client, + sensor->sensor_ds->msr_file_name, + &sensor->msr_list); + if (ret) + dev_warn(&client->dev, + "msrlist loading failed. Ignore, move on\n"); + } else { + /* sensor will still continue streaming */ + dev_warn(&client->dev, "No msrlists associated with sensor\n"); + } + + return 0; +} + +static int crlmodule_remove(struct i2c_client *client) +{ + struct ici_ext_subdev *subdev = + i2c_get_clientdata(client); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + unsigned int i; + + if (sensor->sensor_ds->sensor_cleanup) + sensor->sensor_ds->sensor_cleanup(client); + + for (i = 0; i < sensor->ssds_used; i++) { + struct ici_ext_subdev *sd = + &sensor->ssds[i].sd; + if (sd->do_unregister) + sd->do_unregister(sd); + } + + i2c_set_clientdata(client, NULL); + + crlmodule_nvm_deinit(sensor); + crlmodule_release_ds(sensor); + crlmodule_release_msrlist(&sensor->msr_list); + + pm_runtime_disable(&client->dev); + + return 0; +} + + +static const struct i2c_device_id crlmodule_id_table[] = { + { CRLMODULE_LITE_NAME, 0 }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, crlmodule_id_table); + +static const struct dev_pm_ops crlmodule_pm_ops = { + .runtime_suspend = crlmodule_runtime_suspend, + .runtime_resume = crlmodule_runtime_resume, + .suspend = crlmodule_suspend, + .resume = crlmodule_resume, +}; + +static struct i2c_driver crlmodule_i2c_driver = { + .driver = { + .name = CRLMODULE_LITE_NAME, + .pm = &crlmodule_pm_ops, + }, + .probe = crlmodule_probe, + .remove = crlmodule_remove, + .id_table = crlmodule_id_table, +}; + +module_i2c_driver(crlmodule_i2c_driver); + +MODULE_AUTHOR("Vinod Govindapillai "); +MODULE_AUTHOR("Jouni Ukkonen "); +MODULE_AUTHOR("Tommi Franttila "); +MODULE_DESCRIPTION("Generic driver for common register list based camera sensor modules"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/i2c/crlmodule-lite/crlmodule-data.c b/drivers/media/i2c/crlmodule-lite/crlmodule-data.c new file mode 100644 index 0000000000000..846d81254ba0d --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crlmodule-data.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include "crlmodule.h" +#include "crl_adv7481_cvbs_configuration.h" +#include "crl_adv7481_hdmi_configuration.h" +#include "crl_adv7481_eval_configuration.h" +#include "crl_magna_configuration_ti964.h" +#include "crl_ov10635_configuration.h" +#include "crl_ar0231at_configuration.h" + +static const struct crlmodule_sensors supported_sensors[] = { + { "ADV7481 CVBS", "adv7481_cvbs", &adv7481_cvbs_crl_configuration }, + { "ADV7481 HDMI", "adv7481_hdmi", &adv7481_hdmi_crl_configuration }, + { "ADV7481_EVAL", "adv7481_eval", &adv7481_eval_crl_configuration }, + { "ADV7481B_EVAL", "adv7481b_eval", &adv7481b_eval_crl_configuration }, + { "MAGNA_TI964", "magna_ti964", &magna_ti964_crl_configuration }, + { "i2c-ADV7481A:00", "adv7481_hdmi", &adv7481_hdmi_crl_configuration }, + { "i2c-ADV7481B:00", "adv7481_cvbs", &adv7481_cvbs_crl_configuration }, + { "OV10635", "ov10635", &ov10635_crl_configuration }, + { "AR0231AT", "ar0231at", &ar0231at_crl_configuration }, +}; + +/* + * Function to populate the CRL data structure from the sensor configuration + * definition file + */ +int crlmodule_populate_ds(struct crl_sensor *sensor, struct device *dev) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(supported_sensors); i++) { + /* Check the ACPI supported modules */ + if (!strcmp(dev_name(dev), supported_sensors[i].pname)) { + sensor->sensor_ds = supported_sensors[i].ds; + dev_info(dev, "%s %s selected\n", + __func__, supported_sensors[i].name); + return 0; + }; + + /* Check the non ACPI modules */ + if (!strcmp(sensor->platform_data->module_name, + supported_sensors[i].pname)) { + sensor->sensor_ds = supported_sensors[i].ds; + dev_info(dev, "%s %s selected\n", + __func__, supported_sensors[i].name); + return 0; + }; + } + + dev_err(dev, "%s No suitable configuration found for %s\n", + __func__, dev_name(dev)); + return -EINVAL; +} + +/* + * Function validate the contents CRL data structure to check if all the + * required fields are filled and are according to the limits. + */ +int crlmodule_validate_ds(struct crl_sensor *sensor) +{ + /* TODO! Revisit this. */ + return 0; +} + +/* Function to free all resources allocated for the CRL data structure */ +void crlmodule_release_ds(struct crl_sensor *sensor) +{ + /* + * TODO! Revisit this. + * Place for cleaning all the resources used for the generation + * of CRL data structure. + */ +} + diff --git a/drivers/media/i2c/crlmodule-lite/crlmodule-msrlist.c b/drivers/media/i2c/crlmodule-lite/crlmodule-msrlist.c new file mode 100644 index 0000000000000..c2ad74e59be98 --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crlmodule-msrlist.c @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include "crlmodule-msrlist.h" +#include "crlmodule.h" + +/* + * + * DRVB file is part of the old structure of tagged + * binary container, which is used as such in crlmodule. + * Changes needs to be done in cameralibs to remove the + * tagged structure and convert to untagged drvb format. + * Below are the tagged binary data container structure + * definitions. Most of it is copied from libmsrlisthelper.c + * and some changes done for crlmodule. + * + */ + +static int crlmodule_write_msrlist(struct i2c_client *client, u8 *bufptr, + unsigned int size) +{ + /* + * + * The configuration data contains any number of sequences where + * the first byte (that is, u8) that marks the number of bytes + * in the sequence to follow, is indeed followed by the indicated + * number of bytes of actual data to be written to sensor. + * By convention, the first two bytes of actual data should be + * understood as an address in the sensor address space (hibyte + * followed by lobyte) where the remaining data in the sequence + * will be written. + * + */ + + u8 *ptr = bufptr; + int ret; + + while (ptr < bufptr + size) { + struct i2c_msg msg = { + .addr = client->addr, + .flags = 0, + }; + + msg.len = *ptr++; + msg.buf = ptr; + ptr += msg.len; + + if (ptr > bufptr + size) + return -EINVAL; + + ret = i2c_transfer(client->adapter, &msg, 1); + if (ret < 0) { + dev_err(&client->dev, "i2c write error: %d", ret); + return ret; + } + } + return 0; +} + +static int crlmodule_parse_msrlist(struct i2c_client *client, u8 *buffer, + unsigned int size) +{ + u8 *endptr8 = buffer + size; + int ret; + unsigned int dataset = 0; + struct tbd_data_record_header *header = + (struct tbd_data_record_header *)buffer; + + do { + + if ((u8 *)header + sizeof(*header) > endptr8) + return -EINVAL; + + if ((u8 *)header + header->data_offset + + header->data_size > endptr8) + return -EINVAL; + + dataset++; + + if (header->data_size && (header->flags & 1)) { + + ret = crlmodule_write_msrlist(client, + buffer + header->data_offset, + header->data_size); + if (ret) + return ret; + } + header = (struct tbd_data_record_header *)(buffer + + header->next_offset); + } while (header->next_offset); + + return 0; +} + + +int crlmodule_apply_msrlist(struct i2c_client *client, + const struct firmware *fw) +{ + struct tbd_header *header; + struct tbd_record_header *record; + + header = (struct tbd_header *)fw->data; + record = (struct tbd_record_header *)(header + 1); + + if (record->size && record->class_id != TBD_CLASS_DRV_ID) + return -EINVAL; + + return crlmodule_parse_msrlist(client, (u8 *)(record + 1), + record->size); +} + + +int crlmodule_load_msrlist(struct i2c_client *client, char *name, + const struct firmware **fw) +{ + + struct tbd_header *header; + struct tbd_record_header *record; + int ret = -ENOENT; + + ret = request_firmware(fw, name, &client->dev); + if (ret) { + dev_err(&client->dev, + "Error %d while requesting firmware %s\n", + ret, name); + return ret; + } + header = (struct tbd_header *)(*fw)->data; + + if (sizeof(*header) > (*fw)->size) + goto out; + + /* Check that we have drvb block. */ + if (memcmp(&header->tag, "DRVB", 4)) + goto out; + + if (header->size != (*fw)->size) + goto out; + + if (sizeof(*header) + sizeof(*record) > (*fw)->size) + goto out; + + + return 0; + +out: + crlmodule_release_msrlist(fw); + return ret; +} + + +void crlmodule_release_msrlist(const struct firmware **fw) +{ + release_firmware(*fw); + *fw = NULL; +} diff --git a/drivers/media/i2c/crlmodule-lite/crlmodule-msrlist.h b/drivers/media/i2c/crlmodule-lite/crlmodule-msrlist.h new file mode 100644 index 0000000000000..2b296c9f9d747 --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crlmodule-msrlist.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __CRLMODULE_MSRLIST_H__ +#define __CRLMODULE_MSRLIST_H__ + +#define TBD_CLASS_DRV_ID 2 + +struct i2c_client; +struct firmware; + +struct tbd_header { + /* Tag identifier, also checks endianness */ + u32 tag; + /* Container size including this header */ + u32 size; + /* Version, format 0xYYMMDDVV */ + u32 version; + /* Revision, format 0xYYMMDDVV */ + u32 revision; + /* Configuration flag bits set */ + u32 config_bits; + /* Global checksum, header included */ + u32 checksum; +} __packed; + +struct tbd_record_header { + /* Size of record including header */ + u32 size; + /* tbd_format_t enumeration values used */ + u8 format_id; + /* Packing method; 0 = no packing */ + u8 packing_key; + /* tbd_class_t enumeration values used */ + u16 class_id; +} __packed; + +struct tbd_data_record_header { + u16 next_offset; + u16 flags; + u16 data_offset; + u16 data_size; +} __packed; + +int crlmodule_load_msrlist(struct i2c_client *client, char *name, + const struct firmware **fw); +int crlmodule_apply_msrlist(struct i2c_client *client, + const struct firmware *fw); +void crlmodule_release_msrlist(const struct firmware **fw); + +#endif /* ifndef __CRLMODULE_MSRLIST_H__ */ diff --git a/drivers/media/i2c/crlmodule-lite/crlmodule-nvm.c b/drivers/media/i2c/crlmodule-lite/crlmodule-nvm.c new file mode 100644 index 0000000000000..935a967a525a8 --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crlmodule-nvm.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include "crlmodule.h" +#include "crlmodule-nvm.h" +#include "crlmodule-regs.h" + +static ssize_t crlmodule_sysfs_nvm_read(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ici_ext_subdev *subdev = + i2c_get_clientdata(to_i2c_client(dev)); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + + memcpy(buf, sensor->nvm_data, sensor->nvm_size); + return sensor->nvm_size; +} + +DEVICE_ATTR(nvm, S_IRUGO, crlmodule_sysfs_nvm_read, NULL); + +static unsigned int crlmodule_get_nvm_size(struct crl_sensor *sensor) +{ + + struct i2c_client *client = sensor->src->sd.client; + unsigned int i, size = 0; + + for (i = 0; i < sensor->sensor_ds->crl_nvm_info.nvm_blobs_items; i++) + size += sensor->sensor_ds->crl_nvm_info.nvm_config[i].size; + + if (size > PAGE_SIZE) { + dev_err(&client->dev, "nvm size too big\n"); + size = 0; + } + return size; +} + +static int crlmodule_get_nvm_data(struct crl_sensor *sensor) +{ + struct i2c_client *client = sensor->src->sd.client; + int i; + int rval = 0; + + u8 *nvm_data = sensor->nvm_data; + + if (sensor->sensor_ds->crl_nvm_info.nvm_preop_regs_items) { + dev_dbg(&client->dev, + "%s perform pre-operations\n", __func__); + + rval = crlmodule_write_regs( + sensor, + sensor->sensor_ds->crl_nvm_info.nvm_preop_regs, + sensor->sensor_ds->crl_nvm_info.nvm_preop_regs_items); + if (rval) { + dev_err(&client->dev, + "failed to perform nvm pre-operations\n"); + return rval; + } + } + + for (i = 0; i < sensor->sensor_ds->crl_nvm_info.nvm_blobs_items; i++) { + + dev_dbg(&client->dev, + "%s read blob %d dev_addr: 0x%x start_addr: 0x%x size: %d", + __func__, i, + sensor->sensor_ds->crl_nvm_info.nvm_config->dev_addr, + sensor->sensor_ds->crl_nvm_info.nvm_config->start_addr, + sensor->sensor_ds->crl_nvm_info.nvm_config->size); + + crlmodule_block_read(sensor, + sensor->sensor_ds->crl_nvm_info.nvm_config->dev_addr, + sensor->sensor_ds->crl_nvm_info.nvm_config->start_addr, + sensor->sensor_ds->crl_nvm_info.nvm_flags + & CRL_NVM_ADDR_MODE_MASK, + sensor->sensor_ds->crl_nvm_info.nvm_config->size, + nvm_data); + + nvm_data += sensor->sensor_ds->crl_nvm_info.nvm_config->size; + sensor->sensor_ds->crl_nvm_info.nvm_config++; + } + + if (sensor->sensor_ds->crl_nvm_info.nvm_postop_regs_items) { + dev_dbg(&client->dev, "%s perform post-operations\n", + __func__); + rval = crlmodule_write_regs( + sensor, + sensor->sensor_ds->crl_nvm_info.nvm_postop_regs, + sensor->sensor_ds->crl_nvm_info.nvm_postop_regs_items); + if (rval) { + dev_err(&client->dev, + "failed to perform nvm post-operations\n"); + return rval; + } + } + return rval; +} + +int crlmodule_nvm_init(struct crl_sensor *sensor) +{ + struct i2c_client *client = sensor->src->sd.client; + unsigned int size = crlmodule_get_nvm_size(sensor); + int rval; + + if (size) { + sensor->nvm_data = devm_kzalloc(&client->dev, size, GFP_KERNEL); + if (sensor->nvm_data == NULL) { + dev_err(&client->dev, "nvm buf allocation failed\n"); + return -ENOMEM; + } + sensor->nvm_size = size; + + rval = crlmodule_get_nvm_data(sensor); + if (rval) + goto err; + if (device_create_file(&client->dev, &dev_attr_nvm) != 0) { + dev_err(&client->dev, "sysfs nvm entry failed\n"); + rval = -EBUSY; + goto err; + } + } + + return 0; +err: + sensor->nvm_size = 0; + return rval; +} + +void crlmodule_nvm_deinit(struct crl_sensor *sensor) +{ + struct i2c_client *client = sensor->src->sd.client; + + if (sensor->nvm_size) { + device_remove_file(&client->dev, &dev_attr_nvm); + sensor->nvm_size = 0; + } +} diff --git a/drivers/media/i2c/crlmodule-lite/crlmodule-nvm.h b/drivers/media/i2c/crlmodule-lite/crlmodule-nvm.h new file mode 100644 index 0000000000000..9cbabfa950bd5 --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crlmodule-nvm.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __CRLMODULE_NVM_H_ +#define __CRLMODULE_NVM_H_ + +#include "crlmodule.h" + +#define CRL_NVM_ADDR_MODE_8BIT 0x00000001 +#define CRL_NVM_ADDR_MODE_16BIT 0x00000002 + +#define CRL_NVM_ADDR_MODE_MASK (CRL_NVM_ADDR_MODE_8BIT | \ + CRL_NVM_ADDR_MODE_16BIT) + + +int crlmodule_nvm_init(struct crl_sensor *sensor); +void crlmodule_nvm_deinit(struct crl_sensor *sensor); + +#endif /* __CRLMODULE_NVM_H_ */ diff --git a/drivers/media/i2c/crlmodule-lite/crlmodule-regs.c b/drivers/media/i2c/crlmodule-lite/crlmodule-regs.c new file mode 100644 index 0000000000000..d7b6d01814100 --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crlmodule-regs.c @@ -0,0 +1,330 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include + +#include "crlmodule.h" +#include "crlmodule-nvm.h" +#include "crlmodule-regs.h" + +static int crlmodule_i2c_read(struct crl_sensor *sensor, u16 dev_i2c_addr, u16 reg, + u8 len, u32 *val) +{ + struct i2c_client *client = sensor->src->sd.client; + struct i2c_msg msg[2]; + unsigned char data[4]; + int r; + + dev_dbg(&client->dev, "%s reg, len: [0x%04x, %d]", __func__, reg, len); + + if (len != CRL_REG_LEN_08BIT && len != CRL_REG_LEN_16BIT && + len != CRL_REG_LEN_24BIT && len != CRL_REG_LEN_32BIT) + return -EINVAL; + + if (dev_i2c_addr == CRL_I2C_ADDRESS_NO_OVERRIDE) + msg[0].addr = client->addr; + else + msg[0].addr = dev_i2c_addr; + + msg[1].addr = msg[0].addr; + + msg[0].flags = 0; + msg[0].buf = data; + + if (sensor->sensor_ds->addr_len == CRL_ADDR_7BIT) { + msg[0].addr = msg[0].addr>>1; + msg[1].addr = msg[1].addr>>1; + } + + if ((sensor->sensor_ds->addr_len == CRL_ADDR_8BIT) || + (sensor->sensor_ds->addr_len == CRL_ADDR_7BIT)) { + data[0] = (u8) (reg & 0xff); + msg[0].len = 1; + } else { + /* high byte goes out first */ + data[0] = (u8) (reg >> 8); + data[1] = (u8) (reg & 0xff); + msg[0].len = 2; + } + + msg[1].flags = I2C_M_RD; + msg[1].buf = data; + msg[1].len = len; + + r = i2c_transfer(client->adapter, msg, 2); + + if (r < 0) { + goto err; + } + + *val = 0; + /* high byte comes first */ + switch (len) { + case CRL_REG_LEN_32BIT: + *val = (data[0] << 24) + (data[1] << 16) + (data[2] << 8) + + data[3]; + break; + case CRL_REG_LEN_24BIT: + *val = (data[0] << 16) + (data[1] << 8) + data[2]; + break; + case CRL_REG_LEN_16BIT: + *val = (data[0] << 8) + data[1]; + break; + case CRL_REG_LEN_08BIT: + *val = data[0]; + break; + } + + return 0; + +err: + dev_err(&client->dev, "read from offset 0x%x error %d\n", reg, r); + + return r; +} + +static int crlmodule_i2c_write(struct crl_sensor *sensor, u16 dev_i2c_addr, + u16 reg, u8 len, u32 val) +{ + struct i2c_client *client = sensor->src->sd.client; + struct i2c_msg msg; + unsigned char data[6]; + unsigned int retries; + int r; + unsigned char *data_offset; + + if (len != CRL_REG_LEN_08BIT && len != CRL_REG_LEN_16BIT && + len != CRL_REG_LEN_24BIT && len != CRL_REG_LEN_32BIT) + return -EINVAL; + + if (dev_i2c_addr == CRL_I2C_ADDRESS_NO_OVERRIDE) + msg.addr = client->addr; + else + msg.addr = dev_i2c_addr; + + msg.flags = 0; /* Write */ + msg.buf = data; + + if (sensor->sensor_ds->addr_len == CRL_ADDR_7BIT) + msg.addr = msg.addr>>1; + if ((sensor->sensor_ds->addr_len == CRL_ADDR_8BIT) || + (sensor->sensor_ds->addr_len == CRL_ADDR_7BIT)) { + data[0] = (u8) (reg & 0xff); + msg.len = 1 + len; + data_offset = &data[1]; + } else { + /* high byte goes out first */ + data[0] = (u8) (reg >> 8); + data[1] = (u8) (reg & 0xff); + msg.len = 2 + len; + data_offset = &data[2]; + } + + dev_dbg(&client->dev, "%s len reg, val: [%d, 0x%04x, 0x%04x]", + __func__, len, reg, val); + + switch (len) { + case CRL_REG_LEN_08BIT: + data_offset[0] = val; + break; + case CRL_REG_LEN_16BIT: + data_offset[0] = val >> 8; + data_offset[1] = val; + break; + case CRL_REG_LEN_24BIT: + data_offset[0] = val >> 16; + data_offset[1] = val >> 8; + data_offset[2] = val; + break; + case CRL_REG_LEN_32BIT: + data_offset[0] = val >> 24; + data_offset[1] = val >> 16; + data_offset[2] = val >> 8; + data_offset[3] = val; + break; + } + + for (retries = 0; retries < 5; retries++) { + /* + * Due to unknown reason sensor stops responding. This + * loop is a temporaty solution until the root cause + * is found. + */ + r = i2c_transfer(client->adapter, &msg, 1); + if (r == 1) { + if (retries) + dev_err(&client->dev, + "sensor i2c stall encountered. retries: %d\n", + retries); + return 0; + } + + usleep_range(2000, 2000); + } + + dev_err(&client->dev, + "wrote 0x%x to offset 0x%x error %d\n", val, reg, r); + + return r; +} + +int crlmodule_read_reg(struct crl_sensor *sensor, + const struct crl_register_read_rep reg, u32 *val) +{ + return crlmodule_i2c_read(sensor, reg.dev_i2c_addr, reg.address, + reg.len, val); +} + +int crlmodule_write_regs(struct crl_sensor *sensor, + const struct crl_register_write_rep *regs, int len) +{ + struct i2c_client *client = sensor->src->sd.client; + unsigned int i; + int ret; + u32 val; + + for (i = 0; i < len; i++) { + /* + * Sensor setting sequence may need some delay. + * delay value is specified by reg.val field + */ + if (regs[i].len == CRL_REG_LEN_DELAY) { + msleep(regs[i].val); + continue; + } + /* + * If the same register is being used for two settings, updating + * one value should not overwrite the other one. Such registers + * must be marked as CRL_REG_READ_AND_UPDATE. For such registers + * first read the register and update it + */ + val = regs[i].val; + if (regs[i].len & CRL_REG_READ_AND_UPDATE) { + ret = crlmodule_i2c_read(sensor, regs[i].dev_i2c_addr, + regs[i].address, + regs[i].len & CRL_REG_LEN_READ_MASK, &val); + if (ret) + return ret; + val |= regs[i].val; + } + + ret = crlmodule_i2c_write(sensor, regs[i].dev_i2c_addr, + regs[i].address, + regs[i].len & CRL_REG_LEN_READ_MASK, + val); + if (ret < 0) { + dev_err(&client->dev, + "error %d writing reg 0x%4.4x, val 0x%2.2x", + ret, regs[i].address, regs[i].val); + return ret; + } + }; + + return 0; +} + +int crlmodule_write_reg(struct crl_sensor *sensor, u16 dev_i2c_addr, u16 reg, + u8 len, u32 mask, u32 val) +{ + struct i2c_client *client = sensor->src->sd.client; + int ret; + u32 val2; + + /* + * Sensor setting sequence may need some delay. + * delay value is specified by reg.val field + */ + if (len == CRL_REG_LEN_DELAY) { + msleep(val); + return 0; + } + + /* + * If the same register is being used for two settings, updating + * one value should not overwrite the other one. Such registers + * must be marked as CRL_REG_READ_AND_UPDATE. For such registers + * first read the register and update it + */ + if (len & CRL_REG_READ_AND_UPDATE) { + u32 tmp; + + ret = crlmodule_i2c_read(sensor, dev_i2c_addr, reg, + len & CRL_REG_LEN_READ_MASK, &val2); + if (ret) + return ret; + + tmp = val2 & ~mask; + tmp |= val & mask; + val = tmp; + } else { + val &= mask; + } + + ret = crlmodule_i2c_write(sensor, dev_i2c_addr, reg, + len & CRL_REG_LEN_READ_MASK, val); + if (ret < 0) { + dev_err(&client->dev, + "error %d writing reg 0x%4.4x, val 0x%2.2x", + ret, reg, val); + return ret; + } + + return 0; +} + +int crlmodule_block_read(struct crl_sensor *sensor, u16 dev_i2c_addr, u16 addr, + u8 addr_mode, u16 len, u8 *buf) +{ + struct i2c_client *client = sensor->src->sd.client; + struct i2c_msg msg[2]; + u8 data[2]; + u16 offset = 0; + int r; + + memset(msg, 0, sizeof(msg)); + + if (dev_i2c_addr == CRL_I2C_ADDRESS_NO_OVERRIDE) { + msg[0].addr = client->addr; + msg[1].addr = client->addr; + } else { + msg[0].addr = dev_i2c_addr; + msg[1].addr = dev_i2c_addr; + } + + if (addr_mode & CRL_NVM_ADDR_MODE_8BIT) + msg[0].len = 1; + else if (addr_mode & CRL_NVM_ADDR_MODE_16BIT) + msg[0].len = 2; + else + return -EINVAL; + + msg[0].flags = 0; + msg[1].flags = I2C_M_RD; + + while (offset < len) { + if (addr_mode & CRL_NVM_ADDR_MODE_8BIT) { + data[0] = addr & 0xff; + } else { + data[0] = (addr >> 8) & 0xff; + data[1] = addr & 0xff; + } + + msg[0].buf = data; + msg[1].len = min(CRLMODULE_I2C_BLOCK_SIZE, len - offset); + msg[1].buf = &buf[offset]; + r = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)); + if (r != ARRAY_SIZE(msg)) { + if (r >= 0) + r = -EIO; + goto err; + } + addr += msg[1].len; + offset += msg[1].len; + } + return 0; +err: + dev_err(&client->dev, "read from offset 0x%x error %d\n", offset, r); + return r; +} diff --git a/drivers/media/i2c/crlmodule-lite/crlmodule-regs.h b/drivers/media/i2c/crlmodule-lite/crlmodule-regs.h new file mode 100644 index 0000000000000..45341a16025d8 --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crlmodule-regs.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __CRLMODULE_REGS_H_ +#define __CRLMODULE_REGS_H_ + +struct crl_sensor; +struct crl_register_read_rep; +struct crl_register_write_rep; + +#define CRLMODULE_I2C_BLOCK_SIZE 0x20 + +int crlmodule_read_reg(struct crl_sensor *sensor, + const struct crl_register_read_rep reg, u32 *val); +int crlmodule_write_regs(struct crl_sensor *sensor, + const struct crl_register_write_rep *regs, int len); +int crlmodule_write_reg(struct crl_sensor *sensor, u16 dev_i2c_addr, u16 reg, + u8 len, u32 mask, u32 val); +int crlmodule_block_read(struct crl_sensor *sensor, u16 dev_i2c_addr, u16 addr, + u8 addr_mode, u16 len, u8 *buf); + +#endif /* __CRLMODULE_REGS_H_ */ diff --git a/drivers/media/i2c/crlmodule-lite/crlmodule-sensor-ds.h b/drivers/media/i2c/crlmodule-lite/crlmodule-sensor-ds.h new file mode 100644 index 0000000000000..e34deb2430092 --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crlmodule-sensor-ds.h @@ -0,0 +1,553 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __CRLMODULE_SENSOR_DS_H_ +#define __CRLMODULE_SENSOR_DS_H_ + +#include "crlmodule.h" + +#define CRL_SUBDEVS 3 + +/* Index for subdevs in any structure with multiple SDs */ +#define CRL_SD_PA_INDEX 0 +#define CRL_SD_BINNER_INDEX 1 +#define CRL_SD_SCALER_INDEX 2 + +#define CRL_REG_LEN_08BIT 1 +#define CRL_REG_LEN_16BIT 2 +#define CRL_REG_LEN_24BIT 3 +#define CRL_REG_LEN_32BIT 4 + +#define CRL_REG_READ_AND_UPDATE (1 << 3) +#define CRL_REG_LEN_READ_MASK 0x07 +#define CRL_REG_LEN_DELAY 0x10 + +#define CRL_FLIP_DEFAULT_NONE 0 +#define CRL_FLIP_HFLIP 1 +#define CRL_FLIP_VFLIP 2 +#define CRL_FLIP_HFLIP_VFLIP 3 + +#define CRL_FLIP_HFLIP_MASK 0xfe +#define CRL_FLIP_VFLIP_MASK 0xfd + +#define CRL_PIXEL_ORDER_GRBG 0 +#define CRL_PIXEL_ORDER_RGGB 1 +#define CRL_PIXEL_ORDER_BGGR 2 +#define CRL_PIXEL_ORDER_GBRG 3 +#define CRL_PIXEL_ORDER_IGNORE 255 + +/* Flag to notify configuration selction imact from Ctrls */ +#define CRL_IMPACTS_NO_IMPACT 0 +#define CRL_IMPACTS_PLL_SELECTION (1 << 1) +#define CRL_IMPACTS_MODE_SELECTION (1 << 2) + +/* + * In crl_dynamic_entity::entity_type is denoted by bits 6 and 7 + * 0 -> crl_dynamic_entity:entity_value is a constant + * 1 -> crl_dynamic_entity:entity_value is a referene to variable + * 2 -> crl_dynamic_entity:entity_value is a ctrl value + * 3 -> crl_dynamic_entity:entity_value is a 8 bit register address + */ +enum crl_dynamic_entity_type { + CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST = 0, + CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, + CRL_DYNAMIC_VAL_OPERAND_TYPE_REG_VAL, /* Only 8bit registers */ +}; + +/* + * For some combo device which has some devices inside itself with different + * i2c address, adding flag to specify whether current device needs i2c + * address override. + * For back-compatibility, making flag equals 0. So existing sensor configure + * doesn't need to be modified. + */ +#define CRL_I2C_ADDRESS_NO_OVERRIDE 0 + +struct crl_sensor; +struct i2c_client; + +enum crl_subdev_type { + CRL_SUBDEV_TYPE_SCALER, + CRL_SUBDEV_TYPE_BINNER, + CRL_SUBDEV_TYPE_PIXEL_ARRAY, +}; + +enum crl_ctrl_op_type { + CRL_CTRL_SET_OP, + CRL_CTRL_GET_OP, +}; + +enum crl_ctrl_update_context { + SENSOR_IDLE, /* Powered on. But not streamind */ + SENSOR_STREAMING, /* Sensor streaming */ + SENSOR_POWERED_ON, /* streaming or idle */ +}; + +enum crl_operators { + CRL_BITWISE_AND = 0, + CRL_BITWISE_OR, + CRL_BITWISE_LSHIFT, + CRL_BITWISE_RSHIFT, + CRL_BITWISE_XOR, + CRL_BITWISE_COMPLEMENT, + CRL_ADD, + CRL_SUBTRACT, + CRL_MULTIPLY, + CRL_DIV, + CRL_ASSIGNMENT, +}; + +/* Replicated from videodev2.h */ +enum crl_ctrl_type { + CRL_CTRL_TYPE_INTEGER = 1, + CRL_CTRL_TYPE_BOOLEAN, + CRL_CTRL_TYPE_MENU_INT, + CRL_CTRL_TYPE_MENU_ITEMS, + CRL_CTRL_TYPE_BUTTON, + CRL_CTRL_TYPE_INTEGER64, + CRL_CTRL_TYPE_CTRL_CLASS, + CRL_CTRL_TYPE_CUSTOM, +}; + +enum crl_addr_len { + CRL_ADDR_16BIT = 0, + CRL_ADDR_8BIT, + CRL_ADDR_7BIT, +}; + +enum crl_operands { + CRL_CONSTANT = 0, + CRL_VARIABLE, + CRL_CONTROL, +}; + +/* References to the CRL driver member variables */ +enum crl_member_data_reference_ids { + CRL_VAR_REF_OUTPUT_WIDTH = 1, + CRL_VAR_REF_OUTPUT_HEIGHT, + CRL_VAR_REF_PA_CROP_WIDTH, + CRL_VAR_REF_PA_CROP_HEIGHT, + CRL_VAR_REF_FRAME_TIMING_WIDTH, + CRL_VAR_REF_FRAME_TIMING_HEIGHT, + CRL_VAR_REF_BINNER_WIDTH, + CRL_VAR_REF_BINNER_HEIGHT, + CRL_VAR_REF_H_BINN_FACTOR, + CRL_VAR_REF_V_BINN_FACTOR, + CRL_VAR_REF_SCALE_FACTOR, + CRL_VAR_REF_BITSPERPIXEL, + CRL_VAR_REF_PIXELRATE_PA, + CRL_VAR_REF_PIXELRATE_CSI, + CRL_VAR_REF_PIXELRATE_LINK_FREQ, +}; + +enum crl_frame_desc_type { + CRL_MBUS_FRAME_DESC_TYPE_PLATFORM, + CRL_MBUS_FRAME_DESC_TYPE_PARALLEL, + CRL_MBUS_FRAME_DESC_TYPE_CCP2, + CRL_MBUS_FRAME_DESC_TYPE_CSI2, +}; + +enum crl_pwr_ent_type { + CRL_POWER_ETY_GPIO_FROM_PDATA = 1, + CRL_POWER_ETY_GPIO_CUSTOM, + CRL_POWER_ETY_REGULATOR_FRAMEWORK, + CRL_POWER_ETY_CLK_FRAMEWORK, +}; + +struct crl_dynamic_entity { + enum crl_dynamic_entity_type entity_type; + u32 entity_val; +}; + +struct crl_arithmetic_ops { + enum crl_operators op; + struct crl_dynamic_entity operand; +}; + +struct crl_dynamic_calculated_entity { + u8 ops_items; + struct crl_arithmetic_ops *ops; +}; + +struct crl_register_write_rep { + u16 address; + u8 len; + u32 val; + u16 dev_i2c_addr; + u32 mask; +}; + +struct crl_register_read_rep { + u16 address; + u8 len; + u32 mask; + u16 dev_i2c_addr; +}; + +/* + * crl_dynamic_register_access is used mainly in the ctrl context. + * This is intended to provide some generic arithmetic operations on the values + * to be written to a control's register or on the values read from a register. + * These arithmetic operations are controlled using struct crl_arithmetic_ops. + * + * One important information is that this structure behave differently for the + * set controls and volatile get controls. + * + * For the set control operation, the usage of the members are straight forward. + * The set control can result into multiple register write operations. Hence + * there can be more than one crl_dynamic_register_access entries associated + * with a control which results into separate register writes. + * + * But for the volatile get control operation, where a control is used + * to query read only information from the sensor, there could be only one + * crl_dynamic_register_access entry. Because the result of a get control is + * a single value. crl_dynamic_register_access.address, len and mask values are + * not used in volatile get control context. Instead all the needed information + * must be encoded into member -> ops (struct crl_arithmetic_ops) + */ +struct crl_dynamic_register_access { + u16 address; + u8 len; + u32 mask; + u8 ops_items; + struct crl_arithmetic_ops *ops; + u16 dev_i2c_addr; +}; + +struct crl_sensor_detect_config { + struct crl_register_read_rep reg; /* Register to read */ + unsigned int width; /* width of the value in chars*/ +}; + +struct crl_sensor_subdev_config { + enum crl_subdev_type subdev_type; + char name[32]; +}; + +enum crl_ctrl_flag { + CRL_CTRL_FLAG_UPDATE = 1, + CRL_CTRL_FLAG_READ_ONLY = 2, + CRL_CTRL_FLAG_WRITE_ONLY = 4, +}; + +/* + * The ctrl id value pair which should be compared when selecting a + * configuration. This gives flexibility to provide any data through set ctrl + * and provide selection mechanism for a particular configuration + */ +struct crl_ctrl_data_pair { + u32 ctrl_id; + u32 data; +}; + +enum crl_dep_ctrl_action_type { + CRL_DEP_CTRL_ACTION_TYPE_SELF = 0, + CRL_DEP_CTRL_ACTION_TYPE_DEP_CTRL, +}; + +enum crl_dep_ctrl_condition { + CRL_DEP_CTRL_CONDITION_GREATER = 0, + CRL_DEP_CTRL_CONDITION_LESSER, + CRL_DEP_CTRL_CONDITION_EQUAL, +}; + +enum crl_dep_ctrl_action { + CRL_DEP_CTRL_CONDITION_ADD = 0, + CRL_DEP_CTRL_CONDITION_SUBTRACT, + CRL_DEP_CTRL_CONDITION_MULTIPLY, + CRL_DEP_CTRL_CONDITION_DIVIDE, +}; + +struct crl_dep_ctrl_cond_action { + enum crl_dep_ctrl_condition cond; + u32 cond_value; + enum crl_dep_ctrl_action action; + u32 action_value; +}; + +/* Dependency control provision */ +struct crl_dep_ctrl_provision { + u32 ctrl_id; + enum crl_dep_ctrl_action_type action_type; + unsigned int action_items; + struct crl_dep_ctrl_cond_action *action; +}; + +struct crl_sensor_limits { + unsigned int x_addr_max; + unsigned int y_addr_max; + unsigned int x_addr_min; + unsigned int y_addr_min; + unsigned int min_frame_length_lines; + unsigned int max_frame_length_lines; + unsigned int min_line_length_pixels; + unsigned int max_line_length_pixels; + u8 scaler_m_min; + u8 scaler_m_max; + u8 scaler_n_min; + u8 scaler_n_max; + u8 min_even_inc; + u8 max_even_inc; + u8 min_odd_inc; + u8 max_odd_inc; +}; + +struct crl_ctrl_data_std { + s64 min; + s64 max; + u64 step; + s64 def; +}; + +struct crl_ctrl_data_menu_items { + const char *const *menu; + unsigned int size; +}; + +struct crl_ctrl_data_int_menu { + const s64 *menu; + s64 max; + s64 def; +}; + +union crl_ctrl_data_types { + struct crl_ctrl_data_std std_data; + struct crl_ctrl_data_menu_items menu_items; + struct crl_ctrl_data_int_menu int_menu; +}; + +/* + * Please note a difference in the usage of "regs" member in case of a + * volatile get control for read only purpose. Please check the + * "struct crl_dynamic_register_access" declaration comments for more details. + * + * Read only controls must have "flags" CRL_CTRL_FLAG_READ_ONLY set. + */ +struct crl_ctrl_data { + enum crl_subdev_type sd_type; + enum crl_ctrl_op_type op_type; + enum crl_ctrl_update_context context; + char name[32]; + u32 ctrl_id; + enum crl_ctrl_type type; + union crl_ctrl_data_types data; + unsigned long flags; + u32 impact; /* If this control impact any config selection */ + struct ici_ext_sd_param param; + bool enabled; + unsigned int regs_items; + struct crl_dynamic_register_access *regs; + unsigned int dep_items; + struct crl_dep_ctrl_provision *dep_ctrls; + s64 min; + s64 max; + u64 step; + s64 def; +}; + +struct crl_pll_configuration { + s64 input_clk; + s64 op_sys_clk; + u8 bitsperpixel; + u32 pixel_rate_csi; + u32 pixel_rate_pa; + u8 csi_lanes; + unsigned int comp_items; + struct crl_ctrl_data_pair *ctrl_data; + unsigned int pll_regs_items; + const struct crl_register_write_rep *pll_regs; +}; + +struct crl_subdev_rect_rep { + enum crl_subdev_type subdev_type; + struct ici_rect in_rect; + struct ici_rect out_rect; +}; + +struct crl_mode_rep { + unsigned int sd_rects_items; + const struct crl_subdev_rect_rep *sd_rects; + u8 binn_hor; + u8 binn_vert; + u8 scale_m; + s32 width; + s32 height; + unsigned int comp_items; + struct crl_ctrl_data_pair *ctrl_data; + unsigned int mode_regs_items; + const struct crl_register_write_rep *mode_regs; + + /* + * Minimum and maximum value for line length pixels and frame length + * lines are added for modes. This facilitates easy handling of + * modes which binning skipping and affects the calculation of vblank and + * hblank values. + * + * The blank values are limited based on the following logic + * + * If mode specific limits are available + * vblank = clamp(min_llp - PA_width, max_llp - PA_width) + * hblank = clamp(min_fll - PA_Height, max_fll - PA_Height + * + * If mode specific blanking limits are not available, then the sensor + * limits will be used in the same manner. + * + * If sensor mode limits are not available, then the values will be + * written directly to the associated control registers. + */ + s32 min_llp; /* minimum/maximum value for line length pixels */ + s32 max_llp; + s32 min_fll; + s32 max_fll; /* minimum/maximum value for frame length lines */ +}; + +struct crl_csi_data_fmt { + u32 code; + u8 pixel_order; + u8 bits_per_pixel; + unsigned int regs_items; + const struct crl_register_write_rep *regs; +}; + +struct crl_flip_data { + u8 flip; + u8 pixel_order; +}; + +struct crl_power_seq_entity { + enum crl_pwr_ent_type type; + char ent_name[12]; + int ent_number; + u16 address; + unsigned int val; + unsigned int undo_val; /* Undo value if any previous step failed */ + unsigned int delay; /* delay in micro seconds */ + struct regulator *regulator_priv; /* R/W */ + struct gpio_desc *gpiod_priv; +}; + +struct crl_nvm_blob { + u8 dev_addr; + u16 start_addr; + u16 size; +}; + +struct crl_nvm { + unsigned int nvm_preop_regs_items; + const struct crl_register_write_rep *nvm_preop_regs; + + unsigned int nvm_postop_regs_items; + const struct crl_register_write_rep *nvm_postop_regs; + + unsigned int nvm_blobs_items; + struct crl_nvm_blob *nvm_config; + u32 nvm_flags; +}; + +/* Representation for v4l2_mbus_frame_desc_entry */ +struct crl_frame_desc { + struct crl_dynamic_entity flags; + struct crl_dynamic_entity bpp; + struct crl_dynamic_entity pixelcode; + struct crl_dynamic_entity start_line; + struct crl_dynamic_entity start_pixel; + struct crl_dynamic_calculated_entity width; + struct crl_dynamic_calculated_entity height; + struct crl_dynamic_entity length; + struct crl_dynamic_entity csi2_channel; + struct crl_dynamic_entity csi2_data_type; +}; + +typedef int (*sensor_specific_init)(struct i2c_client*); +typedef int (*sensor_specific_cleanup)(struct i2c_client*); + +struct crl_sensor_configuration { + + const struct crl_clock_entity *clock_entity; + + const unsigned int power_items; + const struct crl_power_seq_entity *power_entities; + const unsigned int power_delay; /* in micro seconds */ + + const unsigned int onetime_init_regs_items; + const struct crl_register_write_rep *onetime_init_regs; + + const unsigned int powerup_regs_items; + const struct crl_register_write_rep *powerup_regs; + + const unsigned int poweroff_regs_items; + const struct crl_register_write_rep *poweroff_regs; + + const unsigned int id_reg_items; + const struct crl_sensor_detect_config *id_regs; + + const unsigned int subdev_items; + const struct crl_sensor_subdev_config *subdevs; + + const struct crl_sensor_limits *sensor_limits; + + const unsigned int pll_config_items; + const struct crl_pll_configuration *pll_configs; + + const unsigned int modes_items; + const struct crl_mode_rep *modes; + /* + * Fail safe mode should be the largest resolution available in the + * mode list. If none of the mode parameters are matched, the driver + * will select this mode for streaming. + */ + const unsigned int fail_safe_mode_index; + + const unsigned int streamon_regs_items; + const struct crl_register_write_rep *streamon_regs; + + const unsigned int streamoff_regs_items; + const struct crl_register_write_rep *streamoff_regs; + + const unsigned int ctrl_items; + const struct crl_ctrl_data *ctrl_bank; + + const unsigned int csi_fmts_items; + const struct crl_csi_data_fmt *csi_fmts; + + const unsigned int flip_items; + const struct crl_flip_data *flip_data; + + struct crl_nvm crl_nvm_info; + + enum crl_addr_len addr_len; + + unsigned int frame_desc_entries; + enum crl_frame_desc_type frame_desc_type; + struct crl_frame_desc *frame_desc; + char *msr_file_name; + + sensor_specific_init sensor_init; + sensor_specific_cleanup sensor_cleanup; +}; + +struct crlmodule_sensors { + char *pname; + char *name; + struct crl_sensor_configuration *ds; +}; + +/* + * Function to populate the CRL data structure from the sensor configuration + * definition file + */ +int crlmodule_populate_ds(struct crl_sensor *sensor, struct device *dev); + +/* + * Function validate the contents CRL data structure to check if all the + * required fields are filled and are according to the limits. + */ +int crlmodule_validate_ds(struct crl_sensor *sensor); + +/* Function to free all resources allocated for the CRL data structure */ +void crlmodule_release_ds(struct crl_sensor *sensor); + +#endif /* __CRLMODULE_SENSOR_DS_H_ */ diff --git a/drivers/media/i2c/crlmodule-lite/crlmodule.h b/drivers/media/i2c/crlmodule-lite/crlmodule.h new file mode 100644 index 0000000000000..f522409cb22e2 --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crlmodule.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __CRLMODULE_PRIV_H_ +#define __CRLMODULE_PRIV_H_ + +#include +#include +#include +#include +#include "../../../../include/media/crlmodule-lite.h" +#include +#include "crlmodule-sensor-ds.h" + +#define CRL_SUBDEVS 3 + +#define CRL_PA_PAD_SRC 0 +#define CRL_PAD_SINK 0 +#define CRL_PAD_SRC 1 +#define CRL_PADS 2 + +struct crl_subdev { + struct ici_ext_subdev sd; + struct ici_rect sink_fmt; + struct ici_rect crop[2]; + struct ici_rect compose; /* compose on sink */ + unsigned short sink_pad; + unsigned short source_pad; + int npads; + struct crl_sensor *sensor; + unsigned int field; +}; + +struct crl_sensor { + /* + * "mutex" is used to serialise access to all fields here + * except ctrls at the end of the struct. "mutex" is also + * used to serialise access to file handle specific + * information. The exception to this rule is the power_mutex + * below. + */ + struct mutex mutex; + /* + * power mutex became necessity because of the v4l2_ctrl_handler_setup + * is being called from power on function which needs to be serialised + * but v4l2_ctrl_handler setup uses "mutex" so it cannot be used. + */ + struct mutex power_mutex; + + struct crl_subdev ssds[CRL_SUBDEVS]; + u32 ssds_used; + struct crl_subdev *src; + struct crl_subdev *binner; + struct crl_subdev *scaler; + struct crl_subdev *pixel_array; + + struct crlmodule_lite_platform_data *platform_data; + + u8 binning_horizontal; + u8 binning_vertical; + + u8 sensor_mode; + u8 scale_m; + u8 fmt_index; + u8 flip_info; + u8 pll_index; + + + int power_count; + + bool streaming; + + struct crl_sensor_configuration *sensor_ds; + struct crl_ctrl_data *ctrl_bank; + + /* These are mandatory controls. So good to have reference to these */ + struct crl_ctrl_data *pixel_rate_pa; + struct crl_ctrl_data *link_freq; + struct crl_ctrl_data *pixel_rate_csi; + + s64 *link_freq_menu; + + /* If extra v4l2 contrl has an impact on PLL selection */ + bool ext_ctrl_impacts_pll_selection; + bool ext_ctrl_impacts_mode_selection; + bool blanking_ctrl_not_use; + bool direct_mode_in_use; + const struct crl_mode_rep *current_mode; + + struct clk *xclk; + struct crl_power_seq_entity *pwr_entity; + + u8 *nvm_data; + u16 nvm_size; + + /* Pointer to binary file which contains + * tunable IQ parameters like NR, DPC, BLC + * Not all MSR's are moved to the binary + * at the moment. + */ + const struct firmware *msr_list; + + struct ici_ext_subdev_register reg; +}; + +#define to_crlmodule_subdev(_sd) \ + container_of(_sd, struct crl_subdev, sd) + +#define to_crlmodule_sensor(_sd) \ + (to_crlmodule_subdev(_sd)->sensor) + +#endif /* __CRLMODULE_PRIV_H_ */ diff --git a/drivers/media/i2c/crlmodule/Kconfig b/drivers/media/i2c/crlmodule/Kconfig new file mode 100644 index 0000000000000..2da6dafcc2c61 --- /dev/null +++ b/drivers/media/i2c/crlmodule/Kconfig @@ -0,0 +1,6 @@ +config VIDEO_CRLMODULE + tristate "CRL Module sensor support" + depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API + depends on MEDIA_CAMERA_SUPPORT + ---help--- + This is a generic driver for CRL based camera modules. diff --git a/drivers/media/i2c/crlmodule/Makefile b/drivers/media/i2c/crlmodule/Makefile new file mode 100644 index 0000000000000..c3a1fed9c6bb0 --- /dev/null +++ b/drivers/media/i2c/crlmodule/Makefile @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2010 - 2018, Intel Corporation. + +# force check the compile warning to make sure zero warnings +# note we may have build issue when gcc upgraded. +ccflags-y := -Wall -Wextra +ccflags-y += $(call cc-disable-warning, unused-parameter) +ccflags-y += $(call cc-disable-warning, implicit-fallthrough) +ccflags-y += $(call cc-disable-warning, missing-field-initializers) +ccflags-$(CONFIG_VIDEO_INTEL_IPU_WERROR) += -Werror + +crlmodule-objs += crlmodule-core.o crlmodule-data.o \ + crlmodule-regs.o crlmodule-nvm.o \ + crl_adv7481_hdmi_configuration.o \ + crlmodule-msrlist.o +obj-$(CONFIG_VIDEO_CRLMODULE) += crlmodule.o + +ccflags-y += -Idrivers/media/i2c diff --git a/drivers/media/i2c/crlmodule/crl_adv7481_configuration.h b/drivers/media/i2c/crlmodule/crl_adv7481_configuration.h new file mode 100644 index 0000000000000..9cf6e37074a86 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_adv7481_configuration.h @@ -0,0 +1,706 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2015 - 2018 Intel Corporation + * + * Author: Jianxu Zheng + * + */ + +#ifndef __CRLMODULE_ADV7481_CONFIGURATION_H_ +#define __CRLMODULE_ADV7481_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +static struct crl_register_write_rep adv7481_powerup_regset[] = { + {0xFF, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* SW reset */ + {0x00, CRL_REG_LEN_DELAY, 0x05, 0x00}, /* Delay 5ms */ + {0x01, CRL_REG_LEN_08BIT, 0x76, 0xE0}, /* ADI recommended setting */ + {0xF2, CRL_REG_LEN_08BIT, 0x01, 0xE0}, /* I2C Rd Auto-Increment=1 */ + {0xF3, CRL_REG_LEN_08BIT, 0x4C, 0xE0}, /* DPLL Map Address */ + {0xF4, CRL_REG_LEN_08BIT, 0x44, 0xE0}, /* CP Map Address */ + {0xF5, CRL_REG_LEN_08BIT, 0x68, 0xE0}, /* HDMI RX Map Address */ + {0xF6, CRL_REG_LEN_08BIT, 0x6C, 0xE0}, /* EDID Map Address */ + {0xF7, CRL_REG_LEN_08BIT, 0x64, 0xE0}, /* HDMI RX Repeater Map Addr */ + {0xF8, CRL_REG_LEN_08BIT, 0x62, 0xE0}, /* HDMI RX Infoframe Map Addr */ + {0xF9, CRL_REG_LEN_08BIT, 0xF0, 0xE0}, /* CBUS Map Address Set */ + {0xFA, CRL_REG_LEN_08BIT, 0x82, 0xE0}, /* CEC Map Address Set */ + {0xFB, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* SDP Main Map Address */ + {0xFC, CRL_REG_LEN_08BIT, 0x90, 0xE0}, /* CSI-TXB Map Address */ + {0xFD, CRL_REG_LEN_08BIT, 0x94, 0xE0}, /* CSI-TXA Map Address */ + {0x00, CRL_REG_LEN_08BIT, 0x50, 0xE0}, /* Disable Chip Powerdown & + HDMI Rx Block */ + {0x40, CRL_REG_LEN_08BIT, 0x83, 0x64}, /* Enable HDCP 1.1 */ + {0x00, CRL_REG_LEN_08BIT, 0x08, 0x68}, /* ADI recommended setting */ + {0x3D, CRL_REG_LEN_08BIT, 0x10, 0x68}, /* ADI recommended setting */ + {0x3E, CRL_REG_LEN_08BIT, 0x69, 0x68}, /* ADI recommended setting */ + {0x3F, CRL_REG_LEN_08BIT, 0x46, 0x68}, /* ADI recommended setting */ + {0x4E, CRL_REG_LEN_08BIT, 0xFE, 0x68}, /* ADI recommended setting */ + {0x4F, CRL_REG_LEN_08BIT, 0x08, 0x68}, /* ADI recommended setting */ + {0x57, CRL_REG_LEN_08BIT, 0xA3, 0x68}, /* ADI recommended setting */ + {0x58, CRL_REG_LEN_08BIT, 0x04, 0x68}, /* ADI recommended setting */ + {0x85, CRL_REG_LEN_08BIT, 0x10, 0x68}, /* ADI recommended setting */ + {0x83, CRL_REG_LEN_08BIT, 0x00, 0x68}, /* Enable All Terminations */ + {0xBE, CRL_REG_LEN_08BIT, 0x00, 0x68}, /* ADI recommended setting */ + {0x6C, CRL_REG_LEN_08BIT, 0x01, 0x68}, /* HPA Manual Enable */ + {0xF8, CRL_REG_LEN_08BIT, 0x01, 0x68}, /* HPA Asserted */ + {0x0F, CRL_REG_LEN_08BIT, 0x00, 0x68}, /* Audio Mute Speed = + Fastest Smallest Step Size */ + {0x70, CRL_REG_LEN_08BIT, 0xA0, 0x64}, /* Write primary edid size */ + {0x74, CRL_REG_LEN_08BIT, 0x01, 0x64}, /* Enable manual edid */ + {0x7A, CRL_REG_LEN_08BIT, 0x00, 0x64}, /* Write edid sram select */ + {0xF6, CRL_REG_LEN_08BIT, 0x6C, 0xE0}, /* Write edid map bus address */ + + {0x00*4, CRL_REG_LEN_32BIT, 0x00FFFFFF, 0x6C}, /* EDID programming */ + {0x01*4, CRL_REG_LEN_32BIT, 0xFFFFFF00, 0x6C}, /* EDID programming */ + {0x02*4, CRL_REG_LEN_32BIT, 0x4DD90100, 0x6C}, /* EDID programming */ + {0x03*4, CRL_REG_LEN_32BIT, 0x00000000, 0x6C}, /* EDID programming */ + {0x04*4, CRL_REG_LEN_32BIT, 0x00110103, 0x6C}, /* EDID programming */ + {0x05*4, CRL_REG_LEN_32BIT, 0x80000078, 0x6C}, /* EDID programming */ + {0x06*4, CRL_REG_LEN_32BIT, 0x0A0DC9A0, 0x6C}, /* EDID programming */ + {0x07*4, CRL_REG_LEN_32BIT, 0x57479827, 0x6C}, /* EDID programming */ + {0x08*4, CRL_REG_LEN_32BIT, 0x12484C00, 0x6C}, /* EDID programming */ + {0x09*4, CRL_REG_LEN_32BIT, 0x00000101, 0x6C}, /* EDID programming */ + {0x0A*4, CRL_REG_LEN_32BIT, 0x01010101, 0x6C}, /* EDID programming */ + {0x0B*4, CRL_REG_LEN_32BIT, 0x01010101, 0x6C}, /* EDID programming */ + {0x0C*4, CRL_REG_LEN_32BIT, 0x01010101, 0x6C}, /* EDID programming */ + {0x0D*4, CRL_REG_LEN_32BIT, 0x0101011D, 0x6C}, /* EDID programming */ + {0x0E*4, CRL_REG_LEN_32BIT, 0x80D0721C, 0x6C}, /* EDID programming */ + {0x0F*4, CRL_REG_LEN_32BIT, 0x1620102C, 0x6C}, /* EDID programming */ + {0x10*4, CRL_REG_LEN_32BIT, 0x2580C48E, 0x6C}, /* EDID programming */ + {0x11*4, CRL_REG_LEN_32BIT, 0x2100009E, 0x6C}, /* EDID programming */ + {0x12*4, CRL_REG_LEN_32BIT, 0x011D8018, 0x6C}, /* EDID programming */ + {0x13*4, CRL_REG_LEN_32BIT, 0x711C1620, 0x6C}, /* EDID programming */ + {0x14*4, CRL_REG_LEN_32BIT, 0x582C2500, 0x6C}, /* EDID programming */ + {0x15*4, CRL_REG_LEN_32BIT, 0xC48E2100, 0x6C}, /* EDID programming */ + {0x16*4, CRL_REG_LEN_32BIT, 0x009E0000, 0x6C}, /* EDID programming */ + {0x17*4, CRL_REG_LEN_32BIT, 0x00FC0048, 0x6C}, /* EDID programming */ + {0x18*4, CRL_REG_LEN_32BIT, 0x444D4920, 0x6C}, /* EDID programming */ + {0x19*4, CRL_REG_LEN_32BIT, 0x4C4C430A, 0x6C}, /* EDID programming */ + {0x1A*4, CRL_REG_LEN_32BIT, 0x20202020, 0x6C}, /* EDID programming */ + {0x1B*4, CRL_REG_LEN_32BIT, 0x000000FD, 0x6C}, /* EDID programming */ + {0x1C*4, CRL_REG_LEN_32BIT, 0x003B3D0F, 0x6C}, /* EDID programming */ + {0x1D*4, CRL_REG_LEN_32BIT, 0x2D08000A, 0x6C}, /* EDID programming */ + {0x1E*4, CRL_REG_LEN_32BIT, 0x20202020, 0x6C}, /* EDID programming */ + {0x1F*4, CRL_REG_LEN_32BIT, 0x202001C1, 0x6C}, /* EDID programming */ + {0x20*4, CRL_REG_LEN_32BIT, 0x02031E77, 0x6C}, /* EDID programming */ + {0x21*4, CRL_REG_LEN_32BIT, 0x4F941305, 0x6C}, /* EDID programming */ + {0x22*4, CRL_REG_LEN_32BIT, 0x03040201, 0x6C}, /* EDID programming */ + {0x23*4, CRL_REG_LEN_32BIT, 0x16150706, 0x6C}, /* EDID programming */ + {0x24*4, CRL_REG_LEN_32BIT, 0x1110121F, 0x6C}, /* EDID programming */ + {0x25*4, CRL_REG_LEN_32BIT, 0x23090701, 0x6C}, /* EDID programming */ + {0x26*4, CRL_REG_LEN_32BIT, 0x65030C00, 0x6C}, /* EDID programming */ + {0x27*4, CRL_REG_LEN_32BIT, 0x10008C0A, 0x6C}, /* EDID programming */ + {0x28*4, CRL_REG_LEN_32BIT, 0xD0902040, 0x6C}, /* EDID programming */ + {0x29*4, CRL_REG_LEN_32BIT, 0x31200C40, 0x6C}, /* EDID programming */ + {0x2A*4, CRL_REG_LEN_32BIT, 0x5500138E, 0x6C}, /* EDID programming */ + {0x2B*4, CRL_REG_LEN_32BIT, 0x21000018, 0x6C}, /* EDID programming */ + {0x2C*4, CRL_REG_LEN_32BIT, 0x011D00BC, 0x6C}, /* EDID programming */ + {0x2D*4, CRL_REG_LEN_32BIT, 0x52D01E20, 0x6C}, /* EDID programming */ + {0x2E*4, CRL_REG_LEN_32BIT, 0xB8285540, 0x6C}, /* EDID programming */ + {0x2F*4, CRL_REG_LEN_32BIT, 0xC48E2100, 0x6C}, /* EDID programming */ + {0x30*4, CRL_REG_LEN_32BIT, 0x001E8C0A, 0x6C}, /* EDID programming */ + {0x31*4, CRL_REG_LEN_32BIT, 0xD08A20E0, 0x6C}, /* EDID programming */ + {0x32*4, CRL_REG_LEN_32BIT, 0x2D10103E, 0x6C}, /* EDID programming */ + {0x33*4, CRL_REG_LEN_32BIT, 0x9600C48E, 0x6C}, /* EDID programming */ + {0x34*4, CRL_REG_LEN_32BIT, 0x21000018, 0x6C}, /* EDID programming */ + {0x35*4, CRL_REG_LEN_32BIT, 0x011D0072, 0x6C}, /* EDID programming */ + {0x36*4, CRL_REG_LEN_32BIT, 0x51D01E20, 0x6C}, /* EDID programming */ + {0x37*4, CRL_REG_LEN_32BIT, 0x6E285500, 0x6C}, /* EDID programming */ + {0x38*4, CRL_REG_LEN_32BIT, 0xC48E2100, 0x6C}, /* EDID programming */ + {0x39*4, CRL_REG_LEN_32BIT, 0x001E8C0A, 0x6C}, /* EDID programming */ + {0x3A*4, CRL_REG_LEN_32BIT, 0xD08A20E0, 0x6C}, /* EDID programming */ + {0x3B*4, CRL_REG_LEN_32BIT, 0x2D10103E, 0x6C}, /* EDID programming */ + {0x3C*4, CRL_REG_LEN_32BIT, 0x9600138E, 0x6C}, /* EDID programming */ + {0x3D*4, CRL_REG_LEN_32BIT, 0x21000018, 0x6C}, /* EDID programming */ + {0x3E*4, CRL_REG_LEN_32BIT, 0x00000000, 0x6C}, /* EDID programming */ + {0x3F*4, CRL_REG_LEN_32BIT, 0x000000CB, 0x6C}, /* EDID programming */ + + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x1E, CRL_REG_LEN_08BIT, 0x80, 0x94}, /* No MIPI frame start */ + {0x26, CRL_REG_LEN_08BIT, 0x55, 0x94}, /* Disable sleep mode */ + {0x27, CRL_REG_LEN_08BIT, 0x55, 0x94}, /* Disable escape mode */ + {0x7E, CRL_REG_LEN_08BIT, 0xA0, 0x94}, /* ADI recommended setting */ + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x90}, /* ADI recommended setting */ + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x90}, /* ADI recommended setting */ + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, /* ADI recommended setting */ + {0x34, CRL_REG_LEN_08BIT, 0x55, 0x94}, /* ADI recommended setting */ + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, /* ADI recommended setting */ + {0xCA, CRL_REG_LEN_08BIT, 0x02, 0x94}, /* ADI recommended setting */ + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, /* ADI recommended setting */ + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, /* ADI recommended setting */ + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, /* Power up DPHY */ + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, /* ADI recommended setting */ + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, /* ADI recommended setting */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_mode_1080p[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x05, CRL_REG_LEN_08BIT, 0x5E, 0xE0}, /* Select Resolution 1080P */ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* ADI recommended setting */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI recommended setting */ + + {0x8B, CRL_REG_LEN_08BIT, 0x43, 0x44}, /* 1080P shift left 44 pixel */ + {0x8C, CRL_REG_LEN_08BIT, 0xD4, 0x44}, /* 1080P shift left 44 pixel */ + {0x8B, CRL_REG_LEN_08BIT, 0x4F, 0x44}, /* 1080P shift left 44 pixel */ + {0x8D, CRL_REG_LEN_08BIT, 0xD4, 0x44}, /* 1080P shift left 44 pixel */ + + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS TRISTATED */ + {0x10, CRL_REG_LEN_08BIT, 0xA0, 0xE0}, /* Enable 4-lane CSI Tx & Pixel Port */ + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_mode_720p[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x05, CRL_REG_LEN_08BIT, 0x53, 0xE0}, /* Select Resolution 720P */ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* ADI recommended setting */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI recommended setting */ + + {0x8B, CRL_REG_LEN_08BIT, 0x43, 0x44}, /* 720P shift left 40 pixel */ + {0x8C, CRL_REG_LEN_08BIT, 0xD8, 0x44}, /* 720P shift left 40 pixel */ + {0x8B, CRL_REG_LEN_08BIT, 0x4F, 0x44}, /* 720P shift left 40 pixel */ + {0x8D, CRL_REG_LEN_08BIT, 0xD8, 0x44}, /* 720P shift left 40 pixel */ + + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS TRISTATED */ + {0x10, CRL_REG_LEN_08BIT, 0xA0, 0xE0}, /* Enable 4-lane CSI Tx & Pixel Port */ + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_mode_VGA[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x05, CRL_REG_LEN_08BIT, 0x88, 0xE0}, /* Select Resolution VGA */ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* ADI recommended setting */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI recommended setting */ + + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS TRISTATED */ + {0x10, CRL_REG_LEN_08BIT, 0xA0, 0xE0}, /* Enable 4-lane CSI Tx & Pixel Port */ + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_mode_1080i[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x05, CRL_REG_LEN_08BIT, 0x54, 0xE0}, /* Select Resolution 1080i*/ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* ADI recommended setting */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI recommended setting */ + + {0x8B, CRL_REG_LEN_08BIT, 0x43, 0x44}, /* 1080i shift left 44 pixel */ + {0x8C, CRL_REG_LEN_08BIT, 0xD4, 0x44}, /* 1080i shift left 44 pixel */ + {0x8B, CRL_REG_LEN_08BIT, 0x4F, 0x44}, /* 1080i shift left 44 pixel */ + {0x8D, CRL_REG_LEN_08BIT, 0xD4, 0x44}, /* 1080i shift left 44 pixel */ + + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS TRISTATED */ + {0x10, CRL_REG_LEN_08BIT, 0xA0, 0xE0}, /* Enable 4-lane CSI Tx & Pixel Port */ + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_mode_480i[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x05, CRL_REG_LEN_08BIT, 0x40, 0xE0}, /* Select Resolution 480i */ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* ADI recommended setting */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI recommended setting */ + + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS TRISTATED */ + {0x10, CRL_REG_LEN_08BIT, 0xA0, 0xE0}, /* Enable 4-lane CSI Tx & Pixel Port */ + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_mode_576p[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x05, CRL_REG_LEN_08BIT, 0x4B, 0xE0}, /* Select Resolution 576p*/ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* ADI recommended setting */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI recommended setting */ + + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS TRISTATED */ + {0x10, CRL_REG_LEN_08BIT, 0xA0, 0xE0}, /* Enable 4-lane CSI Tx & Pixel Port */ + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_mode_576i[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x05, CRL_REG_LEN_08BIT, 0x41, 0xE0}, /* Select Resolution 576i*/ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* ADI recommended setting */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI recommended setting */ + + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS TRISTATED */ + {0x10, CRL_REG_LEN_08BIT, 0xA0, 0xE0}, /* Enable 4-lane CSI Tx & Pixel Port */ + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_streamon_regs[] = { + {0x00, CRL_REG_LEN_DELAY, 0x02, 0x00}, + {0x00, CRL_REG_LEN_08BIT, 0x24, 0x94}, /* Power-up CSI-TX */ + {0x00, CRL_REG_LEN_DELAY, 0x01, 0x00}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, /* ADI recommended setting */ + {0x00, CRL_REG_LEN_DELAY, 0x01, 0x00}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_streamoff_regs[] = { + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, /* ADI Recommended Write */ + {0x1E, CRL_REG_LEN_08BIT, 0x00, 0x94}, /* Reset the clock Lane */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x00, 0x94}, /* i2c_mipi_pll_en - 1'b0 Disable MIPI PLL */ + {0xC1, CRL_REG_LEN_08BIT, 0x3B, 0x94}, +}; + +static struct crl_sensor_detect_config adv7481_sensor_detect_regset[] = { + { + .reg = { 0x0019, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 5, + }, + { + .reg = { 0x0016, CRL_REG_LEN_16BIT, 0x0000ffff }, + .width = 7, + }, +}; + +static struct crl_pll_configuration adv7481_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 16, + .pixel_rate_csi = 800000000, + .pixel_rate_pa = 800000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + +}; + +static struct crl_subdev_rect_rep adv7481_1080p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, +}; + +static struct crl_subdev_rect_rep adv7481_720p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, +}; + +static struct crl_subdev_rect_rep adv7481_VGA_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 640, + .out_rect.height = 480, + }, +}; + +static struct crl_subdev_rect_rep adv7481_1080i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 540, + }, +}; + +static struct crl_subdev_rect_rep adv7481_480i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 240, + }, +}; + +static struct crl_subdev_rect_rep adv7481_576p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 576, + }, +}; + +static struct crl_subdev_rect_rep adv7481_576i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 288, + }, +}; +static struct crl_mode_rep adv7481_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(adv7481_1080p_rects), + .sd_rects = adv7481_1080p_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1080, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(adv7481_mode_1080p), + .mode_regs = adv7481_mode_1080p, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_720p_rects), + .sd_rects = adv7481_720p_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 720, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(adv7481_mode_720p), + .mode_regs = adv7481_mode_720p, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_VGA_rects), + .sd_rects = adv7481_VGA_rects, + .binn_hor = 3, + .binn_vert = 2, + .scale_m = 1, + .width = 640, + .height = 480, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(adv7481_mode_VGA), + .mode_regs = adv7481_mode_VGA, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_1080i_rects), + .sd_rects = adv7481_1080i_rects, + .binn_hor = 1, + .binn_vert = 2, + .scale_m = 1, + .width = 1920, + .height = 540, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(adv7481_mode_1080i), + .mode_regs = adv7481_mode_1080i, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_480i_rects), + .sd_rects = adv7481_480i_rects, + .binn_hor = 2, + .binn_vert = 4, + .scale_m = 1, + .width = 720, + .height = 240, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(adv7481_mode_480i), + .mode_regs = adv7481_mode_480i, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_576p_rects), + .sd_rects = adv7481_576p_rects, + .binn_hor = 2, + .binn_vert = 1, + .scale_m = 1, + .width = 720, + .height = 576, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(adv7481_mode_576p), + .mode_regs = adv7481_mode_576p, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_576i_rects), + .sd_rects = adv7481_576i_rects, + .binn_hor = 2, + .binn_vert = 3, + .scale_m = 1, + .width = 720, + .height = 288, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = adv7481_mode_576i, + }, +}; + +static struct crl_sensor_subdev_config adv7481_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "adv7481 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "adv7481 pixel array", + }, +}; + +static struct crl_sensor_limits adv7481_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1920, + .y_addr_max = 1080, + .min_frame_length_lines = 160, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 6024, + .max_line_length_pixels = 32752, + .scaler_m_min = 1, + .scaler_m_max = 1, + .scaler_n_min = 1, + .scaler_n_max = 1, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 1, +}; + +static struct crl_csi_data_fmt adv7481_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_UYVY8_1X16, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + .regs_items = ARRAY_SIZE(adv7481_mode_1080p), + .regs = adv7481_mode_1080p, /* default yuv422 format */ + }, +}; + +static struct crl_v4l2_ctrl adv7481_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +/* Power items, they are enabled in the order they are listed here */ +static struct crl_power_seq_entity adv7481_power_items[] = { + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 24000000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + }, +}; + +static struct crl_sensor_configuration adv7481_crl_configuration = { + + .power_items = ARRAY_SIZE(adv7481_power_items), + .power_entities = adv7481_power_items, + + .powerup_regs_items = ARRAY_SIZE(adv7481_powerup_regset), + .powerup_regs = adv7481_powerup_regset, + + .poweroff_regs_items = ARRAY_SIZE(adv7481_streamoff_regs), + .poweroff_regs = adv7481_streamoff_regs, + + .id_reg_items = ARRAY_SIZE(adv7481_sensor_detect_regset), + .id_regs = adv7481_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(adv7481_sensor_subdevs), + .subdevs = adv7481_sensor_subdevs, + + .sensor_limits = &adv7481_sensor_limits, + + .pll_config_items = ARRAY_SIZE(adv7481_pll_configurations), + .pll_configs = adv7481_pll_configurations, + + .modes_items = ARRAY_SIZE(adv7481_modes), + .modes = adv7481_modes, + + .streamon_regs_items = ARRAY_SIZE(adv7481_streamon_regs), + .streamon_regs = adv7481_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(adv7481_streamoff_regs), + .streamoff_regs = adv7481_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(adv7481_v4l2_ctrls), + .v4l2_ctrl_bank = adv7481_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(adv7481_crl_csi_data_fmt), + .csi_fmts = adv7481_crl_csi_data_fmt, +}; + +#endif /* __CRLMODULE_ADV7481_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_adv7481_cvbs_configuration.h b/drivers/media/i2c/crlmodule/crl_adv7481_cvbs_configuration.h new file mode 100644 index 0000000000000..2d8b4f22edf3a --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_adv7481_cvbs_configuration.h @@ -0,0 +1,285 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2015 - 2018 Intel Corporation + * + * Author: Jianxu Zheng + * + */ + +#ifndef __CRLMODULE_ADV7481_CVBS_CONFIGURATION_H_ +#define __CRLMODULE_ADV7481_CVBS_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +static struct crl_register_write_rep adv7481_cvbs_powerup_regset[] = { + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/ + SPI PINS TRISTATED */ + {0x0F, CRL_REG_LEN_08BIT, 0x00, 0xF2}, /* Exit Power Down Mode */ + {0x52, CRL_REG_LEN_08BIT, 0xC0, 0xF2}, /* ADI Required Write */ + {0x00, CRL_REG_LEN_08BIT, 0x0E, 0xF2}, /* INSEL = CVBS in on Ain 1 */ + {0x0E, CRL_REG_LEN_08BIT, 0x80, 0xF2}, /* ADI Required Write */ + {0x9C, CRL_REG_LEN_08BIT, 0x00, 0xF2}, /* ADI Required Write */ + {0x9C, CRL_REG_LEN_08BIT, 0xFF, 0xF2}, /* ADI Required Write */ + {0x0E, CRL_REG_LEN_08BIT, 0x00, 0xF2}, /* ADI Required Write */ + {0x5A, CRL_REG_LEN_08BIT, 0x90, 0xF2}, /* ADI Required Write */ + {0x60, CRL_REG_LEN_08BIT, 0xA0, 0xF2}, /* ADI Required Write */ + {0x00, CRL_REG_LEN_DELAY, 0x19, 0x00}, /* Delay 25*/ + {0x60, CRL_REG_LEN_08BIT, 0xB0, 0xF2}, /* ADI Required Write */ + {0x5F, CRL_REG_LEN_08BIT, 0xA8, 0xF2}, + {0x0E, CRL_REG_LEN_08BIT, 0x80, 0xF2}, /* ADI Required Write */ + {0xB6, CRL_REG_LEN_08BIT, 0x08, 0xF2}, /* ADI Required Write */ + {0xC0, CRL_REG_LEN_08BIT, 0xA0, 0xF2}, /* ADI Required Write */ + {0xD9, CRL_REG_LEN_08BIT, 0x44, 0xF2}, + {0x0E, CRL_REG_LEN_08BIT, 0x40, 0xF2}, + {0xE0, CRL_REG_LEN_08BIT, 0x01, 0xF2}, /* Fast Lock enable*/ + {0x0E, CRL_REG_LEN_08BIT, 0x00, 0xF2}, /* ADI Required Write */ + {0x80, CRL_REG_LEN_08BIT, 0x51, 0xF2}, /* ADI Required Write */ + {0x81, CRL_REG_LEN_08BIT, 0x51, 0xF2}, /* ADI Required Write */ + {0x82, CRL_REG_LEN_08BIT, 0x68, 0xF2}, /* ADI Required Write */ + {0x03, CRL_REG_LEN_08BIT, 0x42, 0xF2}, /* Tri-S Output Drivers, + PwrDwn 656 pads */ + {0x04, CRL_REG_LEN_08BIT, 0x07, 0xF2}, /* Power-up INTRQ pad, + & Enable SFL */ + {0x13, CRL_REG_LEN_08BIT, 0x00, 0xF2}, /* ADI Required Write */ + {0x17, CRL_REG_LEN_08BIT, 0x41, 0xF2}, /* Select SH1 */ + {0x31, CRL_REG_LEN_08BIT, 0x12, 0xF2}, /* ADI Required Write */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0x70, 0xE0, 0x70 }, + /* Enable 1-Lane MIPI Tx, + enable pixel output and route + SD through Pixel port */ + {0x00, CRL_REG_LEN_08BIT, 0x81, 0x90}, /* Enable 1-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA1, 0x90}, /* Set Auto DPHY Timing */ + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, /* ADI Required Write */ + {0xD2, CRL_REG_LEN_08BIT, 0x40, 0x90}, /* ADI Required Write */ + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x90}, /* ADI Required Write */ + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x90}, /* ADI Required Write */ + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x90}, /* ADI Required Write */ + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x90}, /* i2c_dphy_pwdn - 1'b0 */ + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x90}, /* ADI Required Write */ + {0x1E, CRL_REG_LEN_08BIT, 0xC0, 0x90}, /* ADI Required Write */ +}; + + +static struct crl_register_write_rep adv7481_cvbs_streamon_regs[] = { + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x90}, /* ADI Required Write */ + {0x00, CRL_REG_LEN_DELAY, 0x01, 0x00}, + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x90}, /* i2c_mipi_pll_en - 1'b1 */ + {0x00, CRL_REG_LEN_DELAY, 0x02, 0x00}, + {0x00, CRL_REG_LEN_08BIT, 0x21, 0x90}, /* Power-up CSI-TX 21 */ + {0x00, CRL_REG_LEN_DELAY, 0x01, 0x00}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x90}, /* ADI Required Write */ +}; + +static struct crl_register_write_rep adv7481_cvbs_streamoff_regs[] = { + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x90}, /* ADI Recommended Write */ + {0x1E, CRL_REG_LEN_08BIT, 0x00, 0x90}, /* Reset the clock Lane */ + {0x00, CRL_REG_LEN_08BIT, 0x81, 0x90}, + {0xDA, CRL_REG_LEN_08BIT, 0x00, 0x90}, /* i2c_mipi_pll_en - + 1'b0 Disable MIPI PLL */ + {0xC1, CRL_REG_LEN_08BIT, 0x3B, 0x90}, +}; + + +static struct crl_pll_configuration adv7481_cvbs_pll_configurations[] = { + { + .input_clk = 286363636, + .op_sys_clk = 216000000, + .bitsperpixel = 16, + .pixel_rate_csi = 27000000, + .pixel_rate_pa = 27000000, + .csi_lanes = 1, + }, + { + .input_clk = 24000000, + .op_sys_clk = 130000000, + .bitsperpixel = 16, + .pixel_rate_csi = 130000000, + .pixel_rate_pa = 130000000, + .csi_lanes = 1, + }, +}; + +static struct crl_subdev_rect_rep adv7481_cvbs_ntsc_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 720, + .in_rect.height = 288, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 288, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 720, + .in_rect.height = 288, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 240, + }, +}; + +static struct crl_subdev_rect_rep adv7481_cvbs_pal_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 720, + .in_rect.height = 288, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 288, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 720, + .in_rect.height = 288, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 288, + }, +}; + +static struct crl_mode_rep adv7481_cvbs_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(adv7481_cvbs_ntsc_rects), + .sd_rects = adv7481_cvbs_ntsc_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 720, + .height = 240, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_cvbs_pal_rects), + .sd_rects = adv7481_cvbs_pal_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 720, + .height = 288, + }, +}; + +static struct crl_sensor_subdev_config adv7481_cvbs_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "adv7481-cvbs binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "adv7481-cvbs pixel array", + }, +}; + +static struct crl_sensor_limits adv7481_cvbs_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 720, + .y_addr_max = 288, + .min_frame_length_lines = 160, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 6024, + .max_line_length_pixels = 32752, + .scaler_m_min = 1, + .scaler_m_max = 1, + .scaler_n_min = 1, + .scaler_n_max = 1, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 1, +}; + +static struct crl_csi_data_fmt adv7481_cvbs_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_UYVY8_1X16, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + }, +}; + +static struct crl_v4l2_ctrl adv7481_cvbs_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .impact = CRL_IMPACTS_NO_IMPACT, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + }, +}; + +static struct crl_sensor_configuration adv7481_cvbs_crl_configuration = { + + /* one time initialization is done by HDMI part */ + + .powerup_regs_items = ARRAY_SIZE(adv7481_cvbs_powerup_regset), + .powerup_regs = adv7481_cvbs_powerup_regset, + + .poweroff_regs_items = ARRAY_SIZE(adv7481_cvbs_streamoff_regs), + .poweroff_regs = adv7481_cvbs_streamoff_regs, + + .subdev_items = ARRAY_SIZE(adv7481_cvbs_sensor_subdevs), + .subdevs = adv7481_cvbs_sensor_subdevs, + + .sensor_limits = &adv7481_cvbs_sensor_limits, + + .pll_config_items = ARRAY_SIZE(adv7481_cvbs_pll_configurations), + .pll_configs = adv7481_cvbs_pll_configurations, + + .modes_items = ARRAY_SIZE(adv7481_cvbs_modes), + .modes = adv7481_cvbs_modes, + + .streamon_regs_items = ARRAY_SIZE(adv7481_cvbs_streamon_regs), + .streamon_regs = adv7481_cvbs_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(adv7481_cvbs_streamoff_regs), + .streamoff_regs = adv7481_cvbs_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(adv7481_cvbs_v4l2_ctrls), + .v4l2_ctrl_bank = adv7481_cvbs_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(adv7481_cvbs_crl_csi_data_fmt), + .csi_fmts = adv7481_cvbs_crl_csi_data_fmt, + + .addr_len = CRL_ADDR_7BIT, + .i2c_mutex_in_use = true, +}; + +#endif /* __CRLMODULE_ADV7481_CVBS_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_adv7481_eval_configuration.h b/drivers/media/i2c/crlmodule/crl_adv7481_eval_configuration.h new file mode 100644 index 0000000000000..c7781706d1f3e --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_adv7481_eval_configuration.h @@ -0,0 +1,577 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2015 - 2018 Intel Corporation + * + * Author: Jianxu Zheng + * + */ + +#ifndef __CRLMODULE_ADV7481_EVAL_CONFIGURATION_H_ +#define __CRLMODULE_ADV7481_EVAL_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + + +struct crl_ctrl_data_pair ctrl_data_lanes[] = { + { + .ctrl_id = V4L2_CID_MIPI_LANES, + .data = 4, + }, + { + .ctrl_id = V4L2_CID_MIPI_LANES, + .data = 2, + }, + { + .ctrl_id = V4L2_CID_MIPI_LANES, + .data = 1, + }, +}; + +static struct crl_pll_configuration adv7481_eval_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 16, + .pixel_rate_csi = 800000000, + .pixel_rate_pa = 800000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 24, + .pixel_rate_csi = 800000000, + .pixel_rate_pa = 800000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_1080p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_720p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_VGA_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 640, + .out_rect.height = 480, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_WVGA_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 800, + .out_rect.height = 480, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_1080i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 540, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_480i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 240, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_576p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 576, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_576i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 288, + }, +}; +static struct crl_mode_rep adv7481_eval_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_1080p_rects), + .sd_rects = adv7481_eval_1080p_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1080, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[0], + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_720p_rects), + .sd_rects = adv7481_eval_720p_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 720, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[0], + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_WVGA_rects), + .sd_rects = adv7481_eval_WVGA_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 1, + .width = 800, + .height = 480, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[2], + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_VGA_rects), + .sd_rects = adv7481_eval_VGA_rects, + .binn_hor = 3, + .binn_vert = 2, + .scale_m = 1, + .width = 640, + .height = 480, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[2], + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_1080i_rects), + .sd_rects = adv7481_eval_1080i_rects, + .binn_hor = 1, + .binn_vert = 2, + .scale_m = 1, + .width = 1920, + .height = 540, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[1], + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_480i_rects), + .sd_rects = adv7481_eval_480i_rects, + .binn_hor = 2, + .binn_vert = 4, + .scale_m = 1, + .width = 720, + .height = 240, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[2], + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_576p_rects), + .sd_rects = adv7481_eval_576p_rects, + .binn_hor = 2, + .binn_vert = 1, + .scale_m = 1, + .width = 720, + .height = 576, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[2], + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_576i_rects), + .sd_rects = adv7481_eval_576i_rects, + .binn_hor = 2, + .binn_vert = 3, + .scale_m = 1, + .width = 720, + .height = 288, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[2], + .mode_regs_items = 0, + .mode_regs = NULL, + }, +}; + +static struct crl_sensor_subdev_config adv7481_eval_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "adv7481 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "adv7481 pixel array", + }, +}; + +static struct crl_sensor_subdev_config adv7481b_eval_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "adv7481b binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "adv7481b pixel array", + }, +}; + +static struct crl_sensor_limits adv7481_eval_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1920, + .y_addr_max = 1080, + .min_frame_length_lines = 160, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 6024, + .max_line_length_pixels = 32752, + .scaler_m_min = 1, + .scaler_m_max = 1, + .scaler_n_min = 1, + .scaler_n_max = 1, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 1, +}; + +static struct crl_csi_data_fmt adv7481_eval_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_YUYV8_1X16, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + .regs_items = 0, + .regs = NULL, + }, + { + .code = MEDIA_BUS_FMT_UYVY8_1X16, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + .regs_items = 0, + .regs = NULL, + }, + { + .code = MEDIA_BUS_FMT_RGB565_1X16, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + .regs_items = 0, + .regs = NULL, + }, + { + .code = MEDIA_BUS_FMT_RGB888_1X24, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 24, + .regs_items = 0, + .regs = NULL, + }, +}; + +static struct crl_v4l2_ctrl adv7481_eval_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_MIPI_LANES, + .name = "V4L2_CID_MIPI_LANES", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1, + .data.std_data.max = 4, + .data.std_data.step = 1, + .data.std_data.def = 4, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, +}; + +static struct crl_sensor_configuration adv7481_eval_crl_configuration = { + + .powerup_regs_items = 0, + .powerup_regs = NULL, + + .poweroff_regs_items = 0, + .poweroff_regs = NULL, + + .id_reg_items = 0, + .id_regs = NULL, + + .subdev_items = ARRAY_SIZE(adv7481_eval_sensor_subdevs), + .subdevs = adv7481_eval_sensor_subdevs, + + .sensor_limits = &adv7481_eval_sensor_limits, + + .pll_config_items = ARRAY_SIZE(adv7481_eval_pll_configurations), + .pll_configs = adv7481_eval_pll_configurations, + + .modes_items = ARRAY_SIZE(adv7481_eval_modes), + .modes = adv7481_eval_modes, + + .streamon_regs_items = 0, + .streamon_regs = NULL, + + .streamoff_regs_items = 0, + .streamoff_regs = NULL, + + .v4l2_ctrls_items = ARRAY_SIZE(adv7481_eval_v4l2_ctrls), + .v4l2_ctrl_bank = adv7481_eval_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(adv7481_eval_crl_csi_data_fmt), + .csi_fmts = adv7481_eval_crl_csi_data_fmt, +}; + +static struct crl_sensor_configuration adv7481b_eval_crl_configuration = { + + .powerup_regs_items = 0, + .powerup_regs = NULL, + + .poweroff_regs_items = 0, + .poweroff_regs = NULL, + + .id_reg_items = 0, + .id_regs = NULL, + + .subdev_items = ARRAY_SIZE(adv7481b_eval_sensor_subdevs), + .subdevs = adv7481b_eval_sensor_subdevs, + + .sensor_limits = &adv7481_eval_sensor_limits, + + .pll_config_items = ARRAY_SIZE(adv7481_eval_pll_configurations), + .pll_configs = adv7481_eval_pll_configurations, + + .modes_items = ARRAY_SIZE(adv7481_eval_modes), + .modes = adv7481_eval_modes, + + .streamon_regs_items = 0, + .streamon_regs = NULL, + + .streamoff_regs_items = 0, + .streamoff_regs = NULL, + + .v4l2_ctrls_items = ARRAY_SIZE(adv7481_eval_v4l2_ctrls), + .v4l2_ctrl_bank = adv7481_eval_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(adv7481_eval_crl_csi_data_fmt), + .csi_fmts = adv7481_eval_crl_csi_data_fmt, +}; + +#endif /* __CRLMODULE_ADV7481_EVAL_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_adv7481_hdmi_configuration.c b/drivers/media/i2c/crlmodule/crl_adv7481_hdmi_configuration.c new file mode 100644 index 0000000000000..d8d9c7b930a88 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_adv7481_hdmi_configuration.c @@ -0,0 +1,615 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2016 - 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include + +#include "crlmodule.h" +#include "crlmodule-regs.h" + +/* Size of the mondello KSV buffer in bytes */ +#define ADV7481_KSV_BUFFER_SIZE 0x80 +/* Size of a single KSV */ +#define ADV7481_KSV_SIZE 0x05 +/* Max number of devices (MAX_MONDELO_KSV_SIZE / HDCP_KSV_SIZE */ +#define ADV7481_MAX_DEVICES 0x19 +#define ADV7481_AKSV_UPDATE_A_ST 0x08 +#define ADV7481_CABLE_DET_A_ST 0x40 +#define ADV7481_V_LOCKED_A_ST 0x02 +#define ADV7481_DE_REGEN_A_ST 0x01 + +struct crl_adv7481_hdmi { + unsigned int in_hot_plug_reset; + int hdmi_res_width; + int hdmi_res_height; + int hdmi_res_interlaced; + int hdmi_cable_connected; + struct delayed_work work; + struct mutex hot_plug_reset_lock; + struct i2c_client *client; +}; + +/* ADV7481 HDCP B-status register */ +struct v4l2_adv7481_bstatus { + union { + __u8 bstatus[2]; + struct { + __u8 device_count:7; + __u8 max_devs_exceeded:1; + __u8 depth:3; + __u8 max_cascade_exceeded:1; + __u8 hdmi_mode:1; + __u8 hdmi_reserved_2:1; + __u8 rsvd:2; + }; + }; +}; + +struct v4l2_adv7481_dev_info { + struct v4l2_adv7481_bstatus bstatus; + __u8 ksv[ADV7481_KSV_BUFFER_SIZE]; +}; + +struct v4l2_adv7481_bcaps { + union { + __u8 bcaps; + struct { + __u8 fast_reauth:1; + __u8 features:1; + __u8 reserved:2; + __u8 fast:1; + __u8 ksv_fifo_ready:1; + __u8 repeater:1; + __u8 hdmi_reserved:1; + }; + }; +}; + +static int adv_i2c_write(struct i2c_client *client, + u16 i2c_addr, + u16 reg, + u8 val) +{ + struct v4l2_subdev *subdev = i2c_get_clientdata(client); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + + return crlmodule_write_reg(sensor, i2c_addr, reg, + CRL_REG_LEN_08BIT, 0xFF, val); +} + +static int adv_i2c_read(struct i2c_client *client, + u16 i2c_addr, + u16 reg, + u32 *val) +{ + struct v4l2_subdev *subdev = i2c_get_clientdata(client); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct crl_register_read_rep read_reg; + + read_reg.address = reg; + read_reg.len = CRL_REG_LEN_08BIT; + read_reg.dev_i2c_addr = i2c_addr; + return crlmodule_read_reg(sensor, read_reg, val); +} + +/* + * Writes the HDCP BKSV list & status when the system acts + * as an HDCP 1.4 repeater + */ +static long adv_write_bksv(struct i2c_client *client, + struct v4l2_adv7481_dev_info *dev_info) +{ + unsigned int k = 0; + int ret = 0; + u32 reg; + struct v4l2_subdev *subdev = i2c_get_clientdata(client); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + + dev_dbg(&client->dev, "%s: Writing ADV7481 BKSV list.\n", __func__); + + /* Clear BCAPS KSV list ready */ + ret = adv_i2c_write(client, 0x64, 0x78, 0x01); + if (ret) { + dev_err(&client->dev, + "%s: Error clearing BCAPS KSV list ready!\n", + __func__); + return ret; + } + + /* KSV_LIST_READY_PORT_A KSV list not ready */ + ret = adv_i2c_write(client, 0x64, 0x69, 0x00); + if (ret) { + dev_err(&client->dev, + "%s: Error clearing KSV_LIST_READY_PORT_A register!\n", + __func__); + return ret; + } + + /* Write the BSKV list, one device at a time */ + /* Writing the entire list in one call exceeds frame size */ + for (k = 0; k < ADV7481_MAX_DEVICES; ++k) { + unsigned int j = k * ADV7481_KSV_SIZE; + struct crl_register_write_rep adv_ksv_cmd[] = { + {0x80 + j, CRL_REG_LEN_08BIT, + dev_info->ksv[j + 0], 0x64}, + {0x81 + j, CRL_REG_LEN_08BIT, + dev_info->ksv[j + 1], 0x64}, + {0x82 + j, CRL_REG_LEN_08BIT, + dev_info->ksv[j + 2], 0x64}, + {0x83 + j, CRL_REG_LEN_08BIT, + dev_info->ksv[j + 3], 0x64}, + {0x84 + j, CRL_REG_LEN_08BIT, + dev_info->ksv[j + 4], 0x64}, + }; + ret = crlmodule_write_regs(sensor, adv_ksv_cmd, + ARRAY_SIZE(adv_ksv_cmd)); + + if (ret) { + dev_err(&client->dev, + "%s: Error while writing BKSV list!\n", + __func__); + return ret; + } + } + + /* Finally update the bstatus registers */ + ret = adv_i2c_read(client, 0x64, 0x42, ®); + + if (ret) { + dev_err(&client->dev, + "%s: Error reading bstatus register!\n", + __func__); + return ret; + } + + /* ADV recommendation: only update bits [0:11] */ + /* Take the lower nibble (bits [11:8]) of the input bstatus */ + /* Take the upper nibble (bits [15:12]) of the current register */ + dev_info->bstatus.bstatus[1] = + (dev_info->bstatus.bstatus[1] & 0x0F) | (reg & 0xF0); + { + struct crl_register_write_rep adv_cmd[] = { + {0x41, CRL_REG_LEN_08BIT, + dev_info->bstatus.bstatus[0], 0x64}, + {0x42, CRL_REG_LEN_08BIT, + dev_info->bstatus.bstatus[1], 0x64}, + /* KSV_LIST_READY_PORT_A */ + {0x69, CRL_REG_LEN_08BIT, 0x01, 0x64}, + }; + + ret = crlmodule_write_regs(sensor, adv_cmd, + ARRAY_SIZE(adv_cmd)); + } + + return ret; +} + +static ssize_t adv_bcaps_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u32 val; + int ret; + struct i2c_client *client = container_of(dev, struct i2c_client, dev); + + ret = adv_i2c_read(client, 0x64, 0x40, &val); + + if (ret != 0) + return -EIO; + + val = val & 0xFF; + *buf = val; + return 1; +} + +/* Declares bcaps attribute that will be exposed to user space via sysfs */ +static DEVICE_ATTR(bcaps, S_IRUGO, adv_bcaps_show, NULL); + +/* + * Writes provided BKSV value from user space to chip. + * BKSV should be formatted as v4l2_adv7481_dev_info struct, + * it does basic validation and checks if provided buffer size matches + * size of v4l2_adv7481_dev_info struct. In case of error return EIO. + */ +static ssize_t adv_bksv_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + int ret; + struct v4l2_adv7481_dev_info dev_info; + struct i2c_client *client = container_of(dev, struct i2c_client, dev); + + dev_dbg(&client->dev, "%s\n", __func__); + if (count != sizeof(struct v4l2_adv7481_dev_info)) + return -EIO; + + dev_info = *((struct v4l2_adv7481_dev_info *) buf); + + ret = adv_write_bksv(client, &dev_info); + + if (ret != 0) + return -EIO; + + return count; +} + +/* Declares bksv attribute that will be exposed to user space via sysfs */ +static DEVICE_ATTR(bksv, S_IWUSR | S_IWGRP, NULL, adv_bksv_store); + +/* + * Enables HPA_MAN_VALUE_PORT_A to enable hot plug detection. + */ +static void adv_hpa_assert(struct work_struct *work) +{ + + struct crl_adv7481_hdmi *adv7481_hdmi + = container_of(work, struct crl_adv7481_hdmi, work.work); + struct i2c_client *client = adv7481_hdmi->client; + + adv_i2c_write(client, 0x68, 0xF8, 0x01); + adv7481_hdmi->in_hot_plug_reset = 0; +} + +/* + * Reauthenticates HDCP by disabling hot plug detection for 2 seconds. + * It can be triggered by user space by writing any value to "reauthenticate" + * attribute. After that time connected source will automatically ask for HDCP + * authentication once again. To prevent sleep, timer is used to delay enabling + * of hot plug by 2 seconds. + * In case that previous reauthentication is not completed, returns EBUSY. + * In case of error returns EIO. + */ +static ssize_t adv_reauthenticate_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int ret; + struct crl_adv7481_hdmi *adv7481_hdmi; + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct crl_subdev *ssd = to_crlmodule_subdev(sd); + struct crl_sensor *sensor = ssd->sensor; + + adv7481_hdmi = sensor->sensor_specific_data; + + dev_dbg(&client->dev, "%s\n", __func__); + + mutex_lock(&adv7481_hdmi->hot_plug_reset_lock); + + if (adv7481_hdmi->in_hot_plug_reset) { + mutex_unlock(&adv7481_hdmi->hot_plug_reset_lock); + return -EBUSY; + } + + /* Clear BCAPS KSV list ready */ + ret = adv_i2c_write(client, 0x64, 0x78, 0x01); + if (ret != 0) { + dev_err(&client->dev, + "%s: Error clearing BCAPS KSV list ready!\n", __func__); + mutex_unlock(&adv7481_hdmi->hot_plug_reset_lock); + return -EIO; + } + + /* KSV_LIST_READY_PORT_A KSV list not ready */ + ret = adv_i2c_write(client, 0x64, 0x69, 0x00); + if (ret != 0) { + dev_err(&client->dev, + "%s: Error clearing KSV_LIST_READY_PORT_A register!\n", + __func__); + mutex_unlock(&adv7481_hdmi->hot_plug_reset_lock); + return -EIO; + } + + ret = adv_i2c_write(client, 0x68, 0xF8, 0x00); + + if (ret != 0) { + mutex_unlock(&adv7481_hdmi->hot_plug_reset_lock); + return -EIO; + } + + adv7481_hdmi->in_hot_plug_reset = 1; + schedule_delayed_work(&adv7481_hdmi->work, msecs_to_jiffies(2000)); + + mutex_unlock(&adv7481_hdmi->hot_plug_reset_lock); + return count; +} + +/* Declares reauthenticate attribute that will be exposed + * to user space via sysfs + */ +static DEVICE_ATTR(reauthenticate, S_IWUSR | S_IWGRP, NULL, + adv_reauthenticate_store); + +/* Dummy show to prevent WARN when registering aksv attribute */ +static ssize_t adv_aksv_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + (void) dev; + (void) attr; + (void) buf; + + return -EIO; +} + +/* Declares aksv attribute that will be exposed to user space via sysfs, + * to notify about AKSV events. + */ +static DEVICE_ATTR(aksv, S_IRUGO, adv_aksv_show, NULL); + + +static ssize_t adv_hdmi_cable_connected_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct crl_adv7481_hdmi *adv7481_hdmi; + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct crl_subdev *ssd = to_crlmodule_subdev(sd); + struct crl_sensor *sensor = ssd->sensor; + char interlaced = 'p'; + adv7481_hdmi = sensor->sensor_specific_data; + + if (adv7481_hdmi->hdmi_res_interlaced) + interlaced = 'i'; + + return snprintf(buf, PAGE_SIZE, "%dx%d%c", + adv7481_hdmi->hdmi_res_width, + adv7481_hdmi->hdmi_res_height, interlaced); +} +static DEVICE_ATTR(hdmi_cable_connected, S_IRUGO, + adv_hdmi_cable_connected_show, NULL); + +static ssize_t adv_bstatus_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u32 b0, b1; + int ret; + struct i2c_client *client = container_of(dev, struct i2c_client, dev); + + dev_dbg(&client->dev, "Getting bstatus\n"); + ret = adv_i2c_read(client, 0x64, 0x41, &b0); + if (ret != 0) { + dev_err(&client->dev, "Error getting bstatus(0)\n"); + return -EIO; + } + dev_dbg(&client->dev, "btatus(0): 0x%x\n", b0 & 0xff); + ret = adv_i2c_read(client, 0x64, 0x42, &b1); + if (ret != 0) { + dev_err(&client->dev, "Error getting bstatus(1)\n"); + return -EIO; + } + dev_dbg(&client->dev, "bstatus(1): 0x%x\n", b1 & 0xff); + *buf = b0 & 0xff; + buf++; + *buf = b1 & 0xff; + return 2; +} +static DEVICE_ATTR(bstatus, S_IRUGO, adv_bstatus_show, NULL); + +irqreturn_t crl_adv7481_threaded_irq_fn(int irq, void *sensor_struct) +{ + struct crl_sensor *sensor = sensor_struct; + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + + u32 interrupt_st; + u32 raw_value; + u32 temp[3]; + int ret = 0; + struct crl_register_read_rep reg; + struct crl_adv7481_hdmi *adv7481_hdmi; + + adv7481_hdmi = sensor->sensor_specific_data; + reg.address = 0x90; + reg.len = CRL_REG_LEN_08BIT; + reg.mask = 0xFF; + reg.dev_i2c_addr = 0xE0; + + dev_dbg(&client->dev, "%s\n", __func__); + + if (!adv7481_hdmi) + return IRQ_HANDLED; + + /* AKSV_UPDATE_A_ST: check interrupt status */ + ret = adv_i2c_read(client, 0xE0, 0x90, &interrupt_st); + + if (interrupt_st & 0x08 /*ADV7481_AKSV_UPDATE_A_ST*/) { + dev_dbg(&client->dev, + "%s: ADV7481 ISR: AKSV_UPDATE_A_ST: 0x%x\n", + __func__, interrupt_st); + + /* Notify user space about AKSV event */ + sysfs_notify(&client->dev.kobj, NULL, "aksv"); + + /* Clear interrupt bit */ + ret = adv_i2c_write(client, 0xE0, 0x91, 0x08); + } + + /* + * Check interrupt status for: CABLE_DET_A_ST, + * V_LOCKED_A_ST and DE_REGEN_LCK_A_ST + */ + ret = adv_i2c_read(client, 0xE0, 0x72, &interrupt_st); + + /* If any of CABLE_DET_A_ST, V_LOCKED_A_ST and DE_REGEN_LCK_A_ST + * interrupts was set, get updated values of CABLE_DET_RAW, + * V_LOCKED_RAW and DE_REGEN_LCK_RAW + */ + if (interrupt_st) { + ret = adv_i2c_read(client, 0xE0, 0x71, &raw_value); + } + + /* Check CABLE_DET_A_ST interrupt */ + if ((interrupt_st & ADV7481_CABLE_DET_A_ST)) { + /* Clear interrupt bit */ + ret = adv_i2c_write(client, 0xE0, 0x73, 0x40); + + /* HDMI cable is connected */ + if (raw_value & ADV7481_CABLE_DET_A_ST) { + dev_dbg(&client->dev, + "%s: ADV7481 ISR: HDMI cable connected\n", + __func__); + ret = adv_i2c_write(client, 0xE0, 0x10, 0xA1); + } else { + dev_dbg(&client->dev, + "%s: ADV7481 ISR: HDMI cable disconnected\n", + __func__); + } + } + + /* Check V_LOCKED_A_ST interrupt */ + if ((interrupt_st & ADV7481_V_LOCKED_A_ST)) { + /* Clear interrupt bit */ + ret = adv_i2c_write(client, 0xE0, 0x73, 0x02); + /* Vertical sync filter has been locked, + * resolution height can be read + */ + if (raw_value & ADV7481_V_LOCKED_A_ST) { + dev_dbg(&client->dev, + "%s: ADV7481 ISR: Vertical Sync Filter Locked\n", + __func__); + reg.dev_i2c_addr = 0x68; /* HDMI_RX_MAP; */ + reg.address = 0x09; + adv_i2c_read(client, 0x68, 0x09, &temp[0]); + adv_i2c_read(client, 0x68, 0x0A, &temp[1]); + adv_i2c_read(client, 0x68, 0x0B, &temp[2]); + + temp[0] = temp[0] & 0x1F; + adv7481_hdmi->hdmi_res_height = + (temp[0] << 8) + temp[1]; + if (temp[2] & 0x20) { + adv7481_hdmi->hdmi_res_height = + adv7481_hdmi->hdmi_res_height << 1; + adv7481_hdmi->hdmi_res_interlaced = 1; + } else { + adv7481_hdmi->hdmi_res_interlaced = 0; + } + + /* + * If resolution width was already read, + * notify user space about new resolution + */ + if (adv7481_hdmi->hdmi_res_width) { + sysfs_notify(&client->dev.kobj, NULL, + "hdmi_cable_connected"); + } + } else { + dev_dbg(&client->dev, + "%s: ADV7481 ISR: Vertical Sync Filte Lost\n", + __func__); + adv7481_hdmi->hdmi_res_height = 0; + /* Notify user space about losing resolution */ + if (!adv7481_hdmi->hdmi_res_width) { + sysfs_notify(&client->dev.kobj, NULL, + "hdmi_cable_connected"); + } + } + } + + /* Check DE_REGEN_A_ST interrupt */ + if ((interrupt_st & ADV7481_DE_REGEN_A_ST)) { + /* Clear interrupt bit */ + ret = adv_i2c_write(client, 0xE0, 0x73, 0x01); + + /* DE regeneration has been locked, + * resolution height can be read + */ + if (raw_value & ADV7481_DE_REGEN_A_ST) { + dev_dbg(&client->dev, + "%s: ADV7481 ISR: DE Regeneration Locked\n", + __func__); + reg.dev_i2c_addr = 0x68; /* HDMI_RX_MAP; */ + reg.address = 0x07; + adv_i2c_read(client, 0x68, 0x07, &temp[0]); + adv_i2c_read(client, 0x68, 0x08, &temp[1]); + + temp[0] = temp[0] & 0x1F; + adv7481_hdmi->hdmi_res_width = (temp[0] << 8) + temp[1]; + + /* If resolution height was already read back, + notify user space about new resolution */ + if (adv7481_hdmi->hdmi_res_height) { + sysfs_notify(&client->dev.kobj, NULL, + "hdmi_cable_connected"); + } + } else { + dev_dbg(&client->dev, + "%s: ADV7481 ISR: DE Regeneration Lost\n", + __func__); + adv7481_hdmi->hdmi_res_width = 0; + /* Notfiy user space about losing resolution */ + if (!adv7481_hdmi->hdmi_res_height) { + sysfs_notify(&client->dev.kobj, NULL, + "hdmi_cable_connected"); + } + } + } + return IRQ_HANDLED; +} + +static struct attribute *adv7481_attributes[] = { + &dev_attr_bstatus.attr, + &dev_attr_hdmi_cable_connected.attr, + &dev_attr_aksv.attr, + &dev_attr_reauthenticate.attr, + &dev_attr_bksv.attr, + &dev_attr_bcaps.attr, + NULL +}; + +static const struct attribute_group adv7481_attr_group = { + .attrs = adv7481_attributes, +}; + +int adv7481_sensor_init(struct i2c_client *client) +{ + struct crl_adv7481_hdmi *adv7481_hdmi; + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct crl_subdev *ssd = to_crlmodule_subdev(sd); + struct crl_sensor *sensor = ssd->sensor; + + adv7481_hdmi = devm_kzalloc(&client->dev, + sizeof(*adv7481_hdmi), GFP_KERNEL); + + if (!adv7481_hdmi) + return -ENOMEM; + + sensor->sensor_specific_data = adv7481_hdmi; + adv7481_hdmi->client = client; + mutex_init(&adv7481_hdmi->hot_plug_reset_lock); + INIT_DELAYED_WORK(&adv7481_hdmi->work, adv_hpa_assert); + dev_dbg(&client->dev, "%s ADV7481_sensor_init\n", __func__); + + return sysfs_create_group(&client->dev.kobj, &adv7481_attr_group); + +} + +int adv7481_sensor_cleanup(struct i2c_client *client) +{ + struct crl_adv7481_hdmi *adv7481_hdmi; + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct crl_subdev *ssd = to_crlmodule_subdev(sd); + struct crl_sensor *sensor = ssd->sensor; + + adv7481_hdmi = sensor->sensor_specific_data; + + /* + * This can be NULL if crlmodule_registered call failed before + * sensor_init call. + */ + if (!adv7481_hdmi) + return 0; + + dev_dbg(&client->dev, "%s: ADV7481_sensor_cleanup\n", __func__); + cancel_delayed_work_sync(&adv7481_hdmi->work); + + sysfs_remove_group(&client->dev.kobj, &adv7481_attr_group); + return 0; +} diff --git a/drivers/media/i2c/crlmodule/crl_adv7481_hdmi_configuration.h b/drivers/media/i2c/crlmodule/crl_adv7481_hdmi_configuration.h new file mode 100644 index 0000000000000..0a33945695095 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_adv7481_hdmi_configuration.h @@ -0,0 +1,1025 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2016 - 2018 Intel Corporation */ + +#ifndef __CRLMODULE_ADV7481_HDMI_CONFIGURATION_H_ +#define __CRLMODULE_ADV7481_HDMI_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" +irqreturn_t crl_adv7481_threaded_irq_fn(int irq, void *sensor_struct); + +struct crl_ctrl_data_pair hdmi_ctrl_data_lanes[] = { + { + .ctrl_id = V4L2_CID_MIPI_LANES, + .data = 4, + }, + { + .ctrl_id = V4L2_CID_MIPI_LANES, + .data = 2, + }, + { + .ctrl_id = V4L2_CID_MIPI_LANES, + .data = 1, + }, +}; + + +static struct crl_register_write_rep adv7481_hdmi_onetime_init_regset[] = { + {0xFF, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, + {0x00, CRL_REG_LEN_DELAY, 0x05, 0x00}, + {0x01, CRL_REG_LEN_08BIT, 0x76, 0xE0}, /* ADI Required Write */ + {0x05, CRL_REG_LEN_08BIT, 0x96, 0xE0}, /* Setting Vid_Std to + 1600x1200(UXGA)@60 */ + {0xF2, CRL_REG_LEN_08BIT, 0x01, 0xE0}, /* Enable I2C Read + Auto-Increment */ + {0xF3, CRL_REG_LEN_08BIT, 0x4C, 0xE0}, /* DPLL Map Address + Set to 0x4C */ + {0xF4, CRL_REG_LEN_08BIT, 0x44, 0xE0}, /* CP Map Address + Set to 0x44 */ + {0xF5, CRL_REG_LEN_08BIT, 0x68, 0xE0}, /* HDMI RX Map Address + Set to 0x68 */ + {0xF6, CRL_REG_LEN_08BIT, 0x6C, 0xE0}, /* EDID Map Address + Set to 0x6C */ + {0xF7, CRL_REG_LEN_08BIT, 0x64, 0xE0}, /* HDMI RX Repeater Map Address + Set to 0x64 */ + {0xF8, CRL_REG_LEN_08BIT, 0x62, 0xE0}, /* HDMI RX Infoframe Map Address + Set to 0x62 */ + {0xF9, CRL_REG_LEN_08BIT, 0xF0, 0xE0}, /* CBUS Map Address + Set to 0xF0 */ + {0xFA, CRL_REG_LEN_08BIT, 0x82, 0xE0}, /* CEC Map Address + Set to 0x82 */ + {0xFB, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* SDP Main Map Address + Set to 0xF2 */ + {0xFC, CRL_REG_LEN_08BIT, 0x90, 0xE0}, /* CSI-TXB Map Address + Set to 0x90 */ + {0xFD, CRL_REG_LEN_08BIT, 0x94, 0xE0}, /* CSI-TXA Map Address + Set to 0x94 */ + {0x00, CRL_REG_LEN_08BIT, 0x40, 0xE0}, /* Disable chip powerdown & + Enable HDMI Rx block */ + + {0x40, CRL_REG_LEN_08BIT, 0xC3, 0x64}, /* Enable HDCP 1.1 Repeater */ + {0x69, CRL_REG_LEN_08BIT, 0x00, 0x64}, /* KSV List not ready port A */ + {0x77, CRL_REG_LEN_08BIT, 0x08, 0x64}, /* Clear KSV List */ + {0x78, CRL_REG_LEN_08BIT, 0x01, 0x64}, /* KSV_LIST_READY_CLR_A: + Clears the BCAPS ready bit */ + {0x68, CRL_REG_LEN_08BIT, 0x00, 0x64}, /* Disable dual ksv list + for port A */ + {0x41, CRL_REG_LEN_08BIT, 0x00, 0x64}, /* Reset b-status (1) */ + {0x42, CRL_REG_LEN_08BIT, 0x00, 0x64}, /* Reset b-status (2) */ + {0x91, CRL_REG_LEN_08BIT, 0x08, 0xE0}, /* AKSV Update Clear */ + + {0x00, CRL_REG_LEN_08BIT, 0x08, 0x68}, /* Foreground Channel = A */ + {0x98, CRL_REG_LEN_08BIT, 0xFF, 0x68}, /* ADI Required Write */ + {0x99, CRL_REG_LEN_08BIT, 0xA3, 0x68}, /* ADI Required Write */ + {0x9A, CRL_REG_LEN_08BIT, 0x00, 0x68}, /* ADI Required Write */ + {0x9B, CRL_REG_LEN_08BIT, 0x0A, 0x68}, /* ADI Required Write */ + {0x9D, CRL_REG_LEN_08BIT, 0x40, 0x68}, /* ADI Required Write */ + {0xCB, CRL_REG_LEN_08BIT, 0x09, 0x68}, /* ADI Required Write */ + {0x3D, CRL_REG_LEN_08BIT, 0x10, 0x68}, /* ADI Required Write */ + {0x3E, CRL_REG_LEN_08BIT, 0x7B, 0x68}, /* ADI Required Write */ + {0x3F, CRL_REG_LEN_08BIT, 0x5E, 0x68}, /* ADI Required Write */ + {0x4E, CRL_REG_LEN_08BIT, 0xFE, 0x68}, /* ADI Required Write */ + {0x4F, CRL_REG_LEN_08BIT, 0x18, 0x68}, /* ADI Required Write */ + {0x57, CRL_REG_LEN_08BIT, 0xA3, 0x68}, /* ADI Required Write */ + {0x58, CRL_REG_LEN_08BIT, 0x04, 0x68}, /* ADI Required Write */ + {0x85, CRL_REG_LEN_08BIT, 0x10, 0x68}, /* ADI Required Write */ + {0x83, CRL_REG_LEN_08BIT, 0x00, 0x68}, /* Enable All Terminatio ns */ + {0xA3, CRL_REG_LEN_08BIT, 0x01, 0x68}, /* ADI Required Write */ + {0xBE, CRL_REG_LEN_08BIT, 0x00, 0x68}, /* ADI Required Write */ + {0x6C, CRL_REG_LEN_08BIT, 0x01, 0x68}, /* HPA Manual Enable */ + {0xF8, CRL_REG_LEN_08BIT, 0x01, 0x68}, /* HPA Asserted */ + {0x0F, CRL_REG_LEN_08BIT, 0x00, 0x68}, /* Audio Mute Speed + Set to Fastest (Smallest Step Size) */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS + TRISTATED */ + + {0x74, CRL_REG_LEN_08BIT, 0x43, 0xE0}, /* Enable interrupts */ + {0x75, CRL_REG_LEN_08BIT, 0x43, 0xE0}, + + {0x70, CRL_REG_LEN_08BIT, 0xA0, 0x64}, /* Write primary edid size */ + {0x74, CRL_REG_LEN_08BIT, 0x01, 0x64}, /* Enable manual edid */ + {0x7A, CRL_REG_LEN_08BIT, 0x00, 0x64}, /* Write edid sram select */ + {0xF6, CRL_REG_LEN_08BIT, 0x6C, 0xE0}, /* Write edid map bus address */ + + {0x00*4, CRL_REG_LEN_32BIT, 0x00FFFFFF, 0x6C}, /* EDID programming */ + {0x01*4, CRL_REG_LEN_32BIT, 0xFFFFFF00, 0x6C}, /* EDID programming */ + {0x02*4, CRL_REG_LEN_32BIT, 0x4DD90100, 0x6C}, /* EDID programming */ + {0x03*4, CRL_REG_LEN_32BIT, 0x00000000, 0x6C}, /* EDID programming */ + {0x04*4, CRL_REG_LEN_32BIT, 0x00110103, 0x6C}, /* EDID programming */ + {0x05*4, CRL_REG_LEN_32BIT, 0x80000078, 0x6C}, /* EDID programming */ + {0x06*4, CRL_REG_LEN_32BIT, 0x0A0DC9A0, 0x6C}, /* EDID programming */ + {0x07*4, CRL_REG_LEN_32BIT, 0x57479827, 0x6C}, /* EDID programming */ + {0x08*4, CRL_REG_LEN_32BIT, 0x12484C00, 0x6C}, /* EDID programming */ + {0x09*4, CRL_REG_LEN_32BIT, 0x00000101, 0x6C}, /* EDID programming */ + {0x0A*4, CRL_REG_LEN_32BIT, 0x01010101, 0x6C}, /* EDID programming */ + {0x0B*4, CRL_REG_LEN_32BIT, 0x01010101, 0x6C}, /* EDID programming */ + {0x0C*4, CRL_REG_LEN_32BIT, 0x01010101, 0x6C}, /* EDID programming */ + {0x0D*4, CRL_REG_LEN_32BIT, 0x0101011D, 0x6C}, /* EDID programming */ + {0x0E*4, CRL_REG_LEN_32BIT, 0x80D0721C, 0x6C}, /* EDID programming */ + {0x0F*4, CRL_REG_LEN_32BIT, 0x1620102C, 0x6C}, /* EDID programming */ + {0x10*4, CRL_REG_LEN_32BIT, 0x2580C48E, 0x6C}, /* EDID programming */ + {0x11*4, CRL_REG_LEN_32BIT, 0x2100009E, 0x6C}, /* EDID programming */ + {0x12*4, CRL_REG_LEN_32BIT, 0x011D8018, 0x6C}, /* EDID programming */ + {0x13*4, CRL_REG_LEN_32BIT, 0x711C1620, 0x6C}, /* EDID programming */ + {0x14*4, CRL_REG_LEN_32BIT, 0x582C2500, 0x6C}, /* EDID programming */ + {0x15*4, CRL_REG_LEN_32BIT, 0xC48E2100, 0x6C}, /* EDID programming */ + {0x16*4, CRL_REG_LEN_32BIT, 0x009E0000, 0x6C}, /* EDID programming */ + {0x17*4, CRL_REG_LEN_32BIT, 0x00FC0048, 0x6C}, /* EDID programming */ + {0x18*4, CRL_REG_LEN_32BIT, 0x444D4920, 0x6C}, /* EDID programming */ + {0x19*4, CRL_REG_LEN_32BIT, 0x4C4C430A, 0x6C}, /* EDID programming */ + {0x1A*4, CRL_REG_LEN_32BIT, 0x20202020, 0x6C}, /* EDID programming */ + {0x1B*4, CRL_REG_LEN_32BIT, 0x000000FD, 0x6C}, /* EDID programming */ + {0x1C*4, CRL_REG_LEN_32BIT, 0x003B3D0F, 0x6C}, /* EDID programming */ + {0x1D*4, CRL_REG_LEN_32BIT, 0x2D08000A, 0x6C}, /* EDID programming */ + {0x1E*4, CRL_REG_LEN_32BIT, 0x20202020, 0x6C}, /* EDID programming */ + {0x1F*4, CRL_REG_LEN_32BIT, 0x202001C1, 0x6C}, /* EDID programming */ + {0x20*4, CRL_REG_LEN_32BIT, 0x02031E77, 0x6C}, /* EDID programming */ + {0x21*4, CRL_REG_LEN_32BIT, 0x4F941305, 0x6C}, /* EDID programming */ + {0x22*4, CRL_REG_LEN_32BIT, 0x03040201, 0x6C}, /* EDID programming */ + {0x23*4, CRL_REG_LEN_32BIT, 0x16150706, 0x6C}, /* EDID programming */ + {0x24*4, CRL_REG_LEN_32BIT, 0x1110121F, 0x6C}, /* EDID programming */ + {0x25*4, CRL_REG_LEN_32BIT, 0x23090701, 0x6C}, /* EDID programming */ + {0x26*4, CRL_REG_LEN_32BIT, 0x65030C00, 0x6C}, /* EDID programming */ + {0x27*4, CRL_REG_LEN_32BIT, 0x10008C0A, 0x6C}, /* EDID programming */ + {0x28*4, CRL_REG_LEN_32BIT, 0xD0902040, 0x6C}, /* EDID programming */ + {0x29*4, CRL_REG_LEN_32BIT, 0x31200C40, 0x6C}, /* EDID programming */ + {0x2A*4, CRL_REG_LEN_32BIT, 0x5500138E, 0x6C}, /* EDID programming */ + {0x2B*4, CRL_REG_LEN_32BIT, 0x21000018, 0x6C}, /* EDID programming */ + {0x2C*4, CRL_REG_LEN_32BIT, 0x011D00BC, 0x6C}, /* EDID programming */ + {0x2D*4, CRL_REG_LEN_32BIT, 0x52D01E20, 0x6C}, /* EDID programming */ + {0x2E*4, CRL_REG_LEN_32BIT, 0xB8285540, 0x6C}, /* EDID programming */ + {0x2F*4, CRL_REG_LEN_32BIT, 0xC48E2100, 0x6C}, /* EDID programming */ + {0x30*4, CRL_REG_LEN_32BIT, 0x001E8C0A, 0x6C}, /* EDID programming */ + {0x31*4, CRL_REG_LEN_32BIT, 0xD08A20E0, 0x6C}, /* EDID programming */ + {0x32*4, CRL_REG_LEN_32BIT, 0x2D10103E, 0x6C}, /* EDID programming */ + {0x33*4, CRL_REG_LEN_32BIT, 0x9600C48E, 0x6C}, /* EDID programming */ + {0x34*4, CRL_REG_LEN_32BIT, 0x21000018, 0x6C}, /* EDID programming */ + {0x35*4, CRL_REG_LEN_32BIT, 0x011D0072, 0x6C}, /* EDID programming */ + {0x36*4, CRL_REG_LEN_32BIT, 0x51D01E20, 0x6C}, /* EDID programming */ + {0x37*4, CRL_REG_LEN_32BIT, 0x6E285500, 0x6C}, /* EDID programming */ + {0x38*4, CRL_REG_LEN_32BIT, 0xC48E2100, 0x6C}, /* EDID programming */ + {0x39*4, CRL_REG_LEN_32BIT, 0x001E8C0A, 0x6C}, /* EDID programming */ + {0x3A*4, CRL_REG_LEN_32BIT, 0xD08A20E0, 0x6C}, /* EDID programming */ + {0x3B*4, CRL_REG_LEN_32BIT, 0x2D10103E, 0x6C}, /* EDID programming */ + {0x3C*4, CRL_REG_LEN_32BIT, 0x9600138E, 0x6C}, /* EDID programming */ + {0x3D*4, CRL_REG_LEN_32BIT, 0x21000018, 0x6C}, /* EDID programming */ + {0x3E*4, CRL_REG_LEN_32BIT, 0x00000000, 0x6C}, /* EDID programming */ + {0x3F*4, CRL_REG_LEN_32BIT, 0x000000CB, 0x6C}, /* EDID programming */ +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_rgb565[] = { + {0x04, CRL_REG_LEN_08BIT, 0x02, 0xE0}, /* RGB Out of CP */ + {0x12, CRL_REG_LEN_08BIT, 0xF0, 0xE0}, /* CSC Depends on ip Packets - SDR 444 */ + {0x17, CRL_REG_LEN_08BIT, 0xB8, 0xE0}, /* Luma & Chroma Values Can Reach 254d */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI Required Write */ + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xDD, 0xE0}, + /* LLC/PIX/SPI PINS TRISTATED AUD Outputs Enabled */ + {0xDB, CRL_REG_LEN_08BIT, 0x10, 0x94}, /* ADI Required Write */ + /* Enable 4-lane CSI TXB & Pixel Port */ + {0x7E, CRL_REG_LEN_08BIT, 0x98, 0x94}, /* ADI Required Write */ +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_rgb888[] = { + {0x04, CRL_REG_LEN_08BIT, 0x02, 0xE0}, /* RGB Out of CP */ + {0x12, CRL_REG_LEN_08BIT, 0xF0, 0xE0}, /* CSC Depends on ip Packets - SDR 444 */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* Luma & Chroma Values Can Reach 254d */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI Required Write */ + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xDD, 0xE0}, + /* LLC/PIX/SPI PINS TRISTATED AUD Outputs Enabled */ + {0xDB, CRL_REG_LEN_08BIT, 0x10, 0x94}, /* ADI Required Write */ + {0x7E, CRL_REG_LEN_08BIT, 0x1B, 0x94}, /* ADI Required Write */ +}; + + +static struct crl_register_write_rep adv7481_hdmi_mode_uyvy[] = { + {0x1C, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* ADI Require Write*/ + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* Luma & Chroma Values Can Reach 254d */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI Required Write */ + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xDD, 0xE0}, /* LLC/PIX/SPI PINS TRISTATED AUD Outputs Enabled */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0xA0, 0xE0, 0xA0}, + /* Enable 4-lane CSI TXB & Pixel Port */ + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* Set Auto DPHY Timing */ + {0xDB, CRL_REG_LEN_08BIT, 0x10, 0x94}, /* ADI Required Write */ + {0x7E, CRL_REG_LEN_08BIT, 0x00, 0x94}, /* ADI Required Write */ +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_yuyv[] = { + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* Enable Interrupt*/ + {0x04, CRL_REG_LEN_08BIT, 0x40, 0xE0}, /* YCrCb output good=0xE0*/ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* Luma & Chroma Values Can Reach 254d */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI Required Write */ + {0x3E, CRL_REG_LEN_08BIT, 0x08, 0x44}, /* Invert order of Cb and Cr*/ + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xDD, 0xE0}, /* LLC/PIX/SPI PINS TRISTATED AUD Outputs Enabled */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0xA0, 0xE0, 0xA0}, + /* Enable 4-lane CSI TXB & Pixel Port */ + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* Set Auto DPHY Timing */ + {0xDB, CRL_REG_LEN_08BIT, 0x10, 0x94}, /* ADI Required Write */ + {0x7E, CRL_REG_LEN_08BIT, 0x00, 0x94}, /* ADI Required Write */ +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_1080p[] = { + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* Set Auto DPHY Timing */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0xA0, 0xE0, 0xA0}, + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, + {0x1E, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, + {0x00, CRL_REG_LEN_08BIT, 0x24, 0x94}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xC9, CRL_REG_LEN_08BIT, 0x2D, 0x44}, + {0x05, CRL_REG_LEN_08BIT, 0x5E, 0xE0}, + {0x8B, CRL_REG_LEN_08BIT, 0x43, 0x44}, /* shift 44 pixel to right */ + {0x8C, CRL_REG_LEN_08BIT, 0xD4, 0x44}, + {0x8B, CRL_REG_LEN_08BIT, 0x4F, 0x44}, + {0x8D, CRL_REG_LEN_08BIT, 0xD4, 0x44}, +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_1080i[] = { + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* Set Auto DPHY Timing */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0xA0, 0xE0, 0xA0}, + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, + {0x1E, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, + {0x00, CRL_REG_LEN_08BIT, 0x24, 0x94}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xC9, CRL_REG_LEN_08BIT, 0x2D, 0x44}, + {0x05, CRL_REG_LEN_08BIT, 0x54, 0xE0}, + {0x8B, CRL_REG_LEN_08BIT, 0x43, 0x44}, /* shift 44 pixel to right */ + {0x8C, CRL_REG_LEN_08BIT, 0xD4, 0x44}, + {0x8B, CRL_REG_LEN_08BIT, 0x4F, 0x44}, + {0x8D, CRL_REG_LEN_08BIT, 0xD4, 0x44}, +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_480p[] = { + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* Set Auto DPHY Timing */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0xA0, 0xE0, 0xA0}, + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, + {0x1E, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, + {0x00, CRL_REG_LEN_08BIT, 0x24, 0x94}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xC9, CRL_REG_LEN_08BIT, 0x2D, 0x44}, + {0x05, CRL_REG_LEN_08BIT, 0x4A, 0xE0}, +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_720p[] = { + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* Set Auto DPHY Timing */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0xA0, 0xE0, 0xA0}, + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, + {0x1E, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, + {0x00, CRL_REG_LEN_08BIT, 0x24, 0x94}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xC9, CRL_REG_LEN_08BIT, 0x2D, 0x44}, + {0x05, CRL_REG_LEN_08BIT, 0x53, 0xE0}, + {0x8B, CRL_REG_LEN_08BIT, 0x43, 0x44}, /* shift 40 pixel to right */ + {0x8C, CRL_REG_LEN_08BIT, 0xD8, 0x44}, + {0x8B, CRL_REG_LEN_08BIT, 0x4F, 0x44}, + {0x8D, CRL_REG_LEN_08BIT, 0xD8, 0x44}, +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_576p[] = { + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* Set Auto DPHY Timing */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0xA0, 0xE0, 0xA0}, + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, + {0x1E, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, + {0x00, CRL_REG_LEN_08BIT, 0x24, 0x94}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xC9, CRL_REG_LEN_08BIT, 0x2D, 0x44}, + {0x05, CRL_REG_LEN_08BIT, 0x4B, 0xE0}, +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_576i[] = { + {0x00, CRL_REG_LEN_08BIT, 0x81, 0x94}, /* Enable 1-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA1, 0x94}, /* Set Auto DPHY Timing */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0xA0, 0xE0, 0xA0}, + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, + {0x1E, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, + {0x00, CRL_REG_LEN_08BIT, 0x21, 0x94}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xC9, CRL_REG_LEN_08BIT, 0x2D, 0x44}, + {0x05, CRL_REG_LEN_08BIT, 0x41, 0xE0}, +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_480i[] = { + {0x00, CRL_REG_LEN_08BIT, 0x81, 0x94}, /* Enable 1-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA1, 0x94}, /* Set Auto DPHY Timing */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0xA0, 0xE0, 0xA0}, + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, + {0x1E, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, + {0x00, CRL_REG_LEN_08BIT, 0x21, 0x94}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xC9, CRL_REG_LEN_08BIT, 0x2D, 0x44}, + {0x05, CRL_REG_LEN_08BIT, 0x40, 0xE0}, +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_vga[] = { + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* Set Auto DPHY Timing */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0xA0, 0xE0, 0xA0}, + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, + {0x1E, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, + {0x00, CRL_REG_LEN_08BIT, 0x24, 0x94}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xC9, CRL_REG_LEN_08BIT, 0x2D, 0x44}, + {0x05, CRL_REG_LEN_08BIT, 0x88, 0xE0}, +}; + +static struct crl_register_write_rep adv7481_hdmi_powerup_regset[] = { + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* Set Auto DPHY Timing */ + {0xDB, CRL_REG_LEN_08BIT, 0x10, 0x94}, /* ADI Required Write */ + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, /* ADI Required Write */ + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, /* ADI Required Write */ + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, /* ADI Required Write */ + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, /* ADI Required Write */ + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, /* i2c_dphy_pwdn - 1'b0 */ + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, /* ADI Required Write */ + {0x1E, CRL_REG_LEN_08BIT, 0xC0, 0x94}, + /* ADI Required Write, transmit only Frame Start/End packets */ + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, /* i2c_mipi_pll_en - 1'b1 */ +}; + +static struct crl_register_write_rep adv7481_hdmi_streamon_regs[] = { + {0x00, CRL_REG_LEN_DELAY, 0x02, 0x00}, + {0x00, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0x21, 0x94, 0xF8}, + /* Power-up CSI-TX */ + {0x00, CRL_REG_LEN_DELAY, 0x01, 0x00}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, /* ADI recommended setting */ + {0x00, CRL_REG_LEN_DELAY, 0x01, 0x00}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_hdmi_streamoff_regs[] = { + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, /* ADI Recommended Write */ + {0x1E, CRL_REG_LEN_08BIT, 0x00, 0x94}, /* Reset the clock Lane */ + {0x00, CRL_REG_LEN_08BIT, 0xA1, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x00, 0x94}, + /* i2c_mipi_pll_en -1'b0 Disable MIPI PLL */ + {0xC1, CRL_REG_LEN_08BIT, 0x3B, 0x94}, +}; + +static struct crl_pll_configuration adv7481_hdmi_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 297000000, + .bitsperpixel = 16, + .pixel_rate_csi = 594000000, + .pixel_rate_pa = 594000000, + }, + { + .input_clk = 24000000, + .op_sys_clk = 445500000, + .bitsperpixel = 24, + .pixel_rate_csi = 891000000, + .pixel_rate_pa = 891000000, + }, + /* 28.636 input clock */ + { + .input_clk = 286363636, + .op_sys_clk = 297000000, + .bitsperpixel = 16, + .pixel_rate_csi = 148500000, + .pixel_rate_pa = 297000000, + }, + { + .input_clk = 286363636, + .op_sys_clk = 297000000, + .bitsperpixel = 24, + .pixel_rate_csi = 148500000, + .pixel_rate_pa = 297000000, + }, + { + .input_clk = 286363636, + .op_sys_clk = 148500000, + .bitsperpixel = 16, + .pixel_rate_csi = 74250000, + .pixel_rate_pa = 148500000, + .csi_lanes = 4, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_1080p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_720p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_VGA_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 640, + .out_rect.height = 480, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_1080i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 540, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_480p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 480, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_480i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 240, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_576p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 576, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_576i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 288, + }, +}; +static struct crl_mode_rep adv7481_hdmi_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_1080p_rects), + .sd_rects = adv7481_hdmi_1080p_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1080, + .mode_regs_items = ARRAY_SIZE(adv7481_hdmi_mode_1080p), + .mode_regs = adv7481_hdmi_mode_1080p, + .comp_items = 1, + .ctrl_data = &hdmi_ctrl_data_lanes[0], + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_720p_rects), + .sd_rects = adv7481_hdmi_720p_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 720, + .mode_regs_items = ARRAY_SIZE(adv7481_hdmi_mode_720p), + .mode_regs = adv7481_hdmi_mode_720p, + .comp_items = 1, + .ctrl_data = &hdmi_ctrl_data_lanes[0], + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_VGA_rects), + .sd_rects = adv7481_hdmi_VGA_rects, + .binn_hor = 3, + .binn_vert = 2, + .scale_m = 1, + .width = 640, + .height = 480, + .mode_regs_items = ARRAY_SIZE(adv7481_hdmi_mode_vga), + .mode_regs = adv7481_hdmi_mode_vga, + .comp_items = 1, + .ctrl_data = &hdmi_ctrl_data_lanes[0], + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_1080i_rects), + .sd_rects = adv7481_hdmi_1080i_rects, + .binn_hor = 1, + .binn_vert = 2, + .scale_m = 1, + .width = 1920, + .height = 540, + .mode_regs_items = ARRAY_SIZE(adv7481_hdmi_mode_1080i), + .mode_regs = adv7481_hdmi_mode_1080i, + .comp_items = 1, + .ctrl_data = &hdmi_ctrl_data_lanes[0], + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_480p_rects), + .sd_rects = adv7481_hdmi_480p_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 1, + .width = 720, + .height = 480, + .mode_regs_items = ARRAY_SIZE(adv7481_hdmi_mode_480p), + .mode_regs = adv7481_hdmi_mode_480p, + .comp_items = 1, + .ctrl_data = &hdmi_ctrl_data_lanes[0], + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_480i_rects), + .sd_rects = adv7481_hdmi_480i_rects, + .binn_hor = 2, + .binn_vert = 4, + .scale_m = 1, + .width = 720, + .height = 240, + .mode_regs_items = ARRAY_SIZE(adv7481_hdmi_mode_480i), + .mode_regs = adv7481_hdmi_mode_480i, + .comp_items = 1, + .ctrl_data = &hdmi_ctrl_data_lanes[2], + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_576p_rects), + .sd_rects = adv7481_hdmi_576p_rects, + .binn_hor = 2, + .binn_vert = 1, + .scale_m = 1, + .width = 720, + .height = 576, + .mode_regs_items = ARRAY_SIZE(adv7481_hdmi_mode_576p), + .mode_regs = adv7481_hdmi_mode_576p, + .comp_items = 1, + .ctrl_data = &hdmi_ctrl_data_lanes[0], + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_576i_rects), + .sd_rects = adv7481_hdmi_576i_rects, + .binn_hor = 2, + .binn_vert = 3, + .scale_m = 1, + .width = 720, + .height = 288, + .mode_regs_items = ARRAY_SIZE(adv7481_hdmi_mode_576i), + .mode_regs = adv7481_hdmi_mode_576i, + .comp_items = 1, + .ctrl_data = &hdmi_ctrl_data_lanes[2], + }, +}; + +static struct crl_sensor_subdev_config adv7481_hdmi_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "adv7481-hdmi binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "adv7481-hdmi pixel array", + }, +}; + +static struct crl_sensor_limits adv7481_hdmi_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1920, + .y_addr_max = 1080, + .min_frame_length_lines = 160, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 6024, + .max_line_length_pixels = 32752, + .scaler_m_min = 1, + .scaler_m_max = 1, + .scaler_n_min = 1, + .scaler_n_max = 1, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 1, +}; + +static struct crl_csi_data_fmt adv7481_hdmi_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_RGB565_1X16, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + .regs_items = ARRAY_SIZE(adv7481_hdmi_mode_rgb565), + .regs = adv7481_hdmi_mode_rgb565, + }, + { + .code = MEDIA_BUS_FMT_UYVY8_1X16, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + .bits_per_pixel = 16, + .regs_items = ARRAY_SIZE(adv7481_hdmi_mode_uyvy), + .regs = adv7481_hdmi_mode_uyvy, + }, + { + .code = MEDIA_BUS_FMT_RGB888_1X24, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 24, + .regs_items = ARRAY_SIZE(adv7481_hdmi_mode_rgb888), + .regs = adv7481_hdmi_mode_rgb888, + }, + { + .code = MEDIA_BUS_FMT_YUYV8_1X16, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + .bits_per_pixel = 16, + .regs_items = ARRAY_SIZE(adv7481_hdmi_mode_yuyv), + .regs = adv7481_hdmi_mode_yuyv, + }, +}; + +static const char * const adv7481_hdmi_test_pattern_menu[] = { + "default", + "30fps", + "50fps", + "60fps", + "real", +}; + +static struct crl_register_write_rep adv7481_hdmi_test_pattern_default_mode[] = { + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, + {0x37, CRL_REG_LEN_08BIT, 0x81, 0x44}, + {0x00, CRL_REG_LEN_08BIT, 0x00, 0xE0}, +}; + +static struct crl_register_write_rep adv7481_hdmi_test_pattern_30fps_mode[] = { + {0x00, CRL_REG_LEN_08BIT, 0x00, 0xE0}, + {0x03, CRL_REG_LEN_08BIT, 0xA6, 0xE0}, + {0x04, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0x80, 0xE0, 0xFD}, + {0x37, CRL_REG_LEN_08BIT, 0x85, 0x44}, +}; + +static struct crl_register_write_rep adv7481_hdmi_test_pattern_50fps_mode[] = { + {0x00, CRL_REG_LEN_08BIT, 0x00, 0xE0}, + {0x03, CRL_REG_LEN_08BIT, 0x96, 0xE0}, + {0x04, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0x80, 0xE0, 0xFD}, + {0x37, CRL_REG_LEN_08BIT, 0x85, 0x44}, +}; + +static struct crl_register_write_rep adv7481_hdmi_test_pattern_60fps_mode[] = { + {0x00, CRL_REG_LEN_08BIT, 0x00, 0xE0}, + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, + {0x04, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0x80, 0xE0, 0xFD}, + {0x37, CRL_REG_LEN_08BIT, 0x85, 0x44}, +}; + +static struct crl_register_write_rep adv7481_hdmi_real_mode[] = { + {0x00, CRL_REG_LEN_DELAY, 0x05, 0x00}, + {0x03, CRL_REG_LEN_08BIT, 0x00, 0xE0}, + {0x04, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0x00, 0xE0, 0xFD}, + {0x37, CRL_REG_LEN_08BIT, 0x00, 0x44}, +}; + +static struct crl_dep_reg_list adv7481_hdmi_test_pattern_fps_types_regs[] = { + { CRL_DEP_CTRL_CONDITION_EQUAL, + { CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, 0 }, + ARRAY_SIZE(adv7481_hdmi_test_pattern_default_mode), + adv7481_hdmi_test_pattern_default_mode, 0, 0 }, + { CRL_DEP_CTRL_CONDITION_EQUAL, + { CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, 1 }, + ARRAY_SIZE(adv7481_hdmi_test_pattern_30fps_mode), + adv7481_hdmi_test_pattern_30fps_mode, 0, 0 }, + { CRL_DEP_CTRL_CONDITION_EQUAL, + { CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, 2 }, + ARRAY_SIZE(adv7481_hdmi_test_pattern_50fps_mode), + adv7481_hdmi_test_pattern_50fps_mode, 0, 0 }, + { CRL_DEP_CTRL_CONDITION_EQUAL, + { CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, 3 }, + ARRAY_SIZE(adv7481_hdmi_test_pattern_60fps_mode), + adv7481_hdmi_test_pattern_60fps_mode, 0, 0 }, + { CRL_DEP_CTRL_CONDITION_EQUAL, + { CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, 4 }, + ARRAY_SIZE(adv7481_hdmi_real_mode), + adv7481_hdmi_real_mode, 0, 0 }, +}; + +static struct crl_v4l2_ctrl adv7481_hdmi_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .impact = CRL_IMPACTS_NO_IMPACT, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_MIPI_LANES, + .name = "V4L2_CID_MIPI_LANES", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1, + .data.std_data.max = 4, + .data.std_data.step = 1, + .data.std_data.def = 4, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_TEST_PATTERN, + .name = "V4L2_CID_TEST_PATTERN", + .type = CRL_V4L2_CTRL_TYPE_MENU_ITEMS, + .data.v4l2_menu_items.menu = adv7481_hdmi_test_pattern_menu, + .data.v4l2_menu_items.size = ARRAY_SIZE(adv7481_hdmi_test_pattern_menu), + .impact = CRL_IMPACTS_NO_IMPACT, + .flags = V4L2_CTRL_FLAG_UPDATE, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + .crl_ctrl_dep_reg_list = ARRAY_SIZE(adv7481_hdmi_test_pattern_fps_types_regs), + .dep_regs = adv7481_hdmi_test_pattern_fps_types_regs, + }, +}; + +int adv7481_sensor_init(struct i2c_client *); +int adv7481_sensor_cleanup(struct i2c_client *); + +static struct crl_sensor_configuration adv7481_hdmi_crl_configuration = { + + .sensor_init = adv7481_sensor_init, + .sensor_cleanup = adv7481_sensor_cleanup, + + .onetime_init_regs_items = + ARRAY_SIZE(adv7481_hdmi_onetime_init_regset), + .onetime_init_regs = adv7481_hdmi_onetime_init_regset, + + .powerup_regs_items = ARRAY_SIZE(adv7481_hdmi_powerup_regset), + .powerup_regs = adv7481_hdmi_powerup_regset, + + .poweroff_regs_items = ARRAY_SIZE(adv7481_hdmi_streamoff_regs), + .poweroff_regs = adv7481_hdmi_streamoff_regs, + + .subdev_items = ARRAY_SIZE(adv7481_hdmi_sensor_subdevs), + .subdevs = adv7481_hdmi_sensor_subdevs, + + .sensor_limits = &adv7481_hdmi_sensor_limits, + + .pll_config_items = ARRAY_SIZE(adv7481_hdmi_pll_configurations), + .pll_configs = adv7481_hdmi_pll_configurations, + + .modes_items = ARRAY_SIZE(adv7481_hdmi_modes), + .modes = adv7481_hdmi_modes, + + .streamon_regs_items = ARRAY_SIZE(adv7481_hdmi_streamon_regs), + .streamon_regs = adv7481_hdmi_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(adv7481_hdmi_streamoff_regs), + .streamoff_regs = adv7481_hdmi_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(adv7481_hdmi_v4l2_ctrls), + .v4l2_ctrl_bank = adv7481_hdmi_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(adv7481_hdmi_crl_csi_data_fmt), + .csi_fmts = adv7481_hdmi_crl_csi_data_fmt, + + .irq_in_use = false, + .crl_irq_fn = NULL, + .crl_threaded_irq_fn = crl_adv7481_threaded_irq_fn, + + .addr_len = CRL_ADDR_7BIT, + .i2c_mutex_in_use = true, +}; + +#endif /* __CRLMODULE_ADV7481_HDMI_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_ar0231at_configuration.h b/drivers/media/i2c/crlmodule/crl_ar0231at_configuration.h new file mode 100644 index 0000000000000..1905a9e051927 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_ar0231at_configuration.h @@ -0,0 +1,2409 @@ +/* + * Copyright (c) 2018 Intel Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __CRLMODULE_AR0231AT_CONFIGURATION_H_ +#define __CRLMODULE_AR0231AT_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +struct crl_pll_configuration ar0231at_pll_configurations[] = { + { + .input_clk = 27000000, + .op_sys_clk = 87750000, + .bitsperpixel = 12, + .pixel_rate_csi = 176000000, + .pixel_rate_pa = 176000000, /* pixel_rate = op_sys_clk*2 *csi_lanes/bitsperpixel */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = 0, + }, + { + .input_clk = 27000000, + .op_sys_clk = 87750000, + .bitsperpixel = 10, + .pixel_rate_csi = 211200000, + .pixel_rate_pa = 211200000, /* pixel_rate = op_sys_clk*2 *csi_lanes/bitsperpixel */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = 0, + }, +}; + +struct crl_sensor_subdev_config ar0231at_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "ar0231at binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "ar0231at pixel array", + }, +}; + +struct crl_subdev_rect_rep ar0231at_1920_1088_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1088, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1088, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1088, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1088, + } +}; + +/* + * Exposure mode: + * 0: Linear mode + * 1: 2-HDR mode + * 2: 3-HDR mode + * 3: 4-HDR mode + */ +struct crl_ctrl_data_pair ar0231at_ctrl_data_modes[] = { + { + .ctrl_id = CRL_CID_EXPOSURE_MODE, + .data = 0, + }, + { + .ctrl_id = CRL_CID_EXPOSURE_MODE, + .data = 1, + }, + { + .ctrl_id = CRL_CID_EXPOSURE_MODE, + .data = 2, + }, + { + .ctrl_id = CRL_CID_EXPOSURE_MODE, + .data = 3, + }, + { + .ctrl_id = CRL_CID_EXPOSURE_MODE, + .data = 4, + }, +}; + +static struct crl_register_write_rep ar0231at_1920_1088_10bit_linear_mode[] = { + { 0x301A, CRL_REG_LEN_16BIT, 0x1058, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 200, 0x10 }, + { 0x3092, CRL_REG_LEN_16BIT, 0x0C24, 0x10 }, + { 0x337A, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3520, CRL_REG_LEN_16BIT, 0x1288, 0x10 }, + { 0x3522, CRL_REG_LEN_16BIT, 0x880C, 0x10 }, + { 0x3524, CRL_REG_LEN_16BIT, 0x0C12, 0x10 }, + { 0x352C, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x354A, CRL_REG_LEN_16BIT, 0x007F, 0x10 }, + { 0x350C, CRL_REG_LEN_16BIT, 0x055C, 0x10 }, + { 0x3506, CRL_REG_LEN_16BIT, 0x3333, 0x10 }, + { 0x3508, CRL_REG_LEN_16BIT, 0x3333, 0x10 }, + { 0x3100, CRL_REG_LEN_16BIT, 0x4000, 0x10 }, + { 0x3280, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3282, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3284, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3286, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3288, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328A, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328C, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328E, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3290, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3292, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3294, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3296, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3298, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329A, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329C, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329E, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x301A, CRL_REG_LEN_16BIT, 0x10D8, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 200, 0x10 }, + { 0x2512, CRL_REG_LEN_16BIT, 0x8000, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0905, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3350, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2004, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1460, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1578, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x7B24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xEA24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1022, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2410, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x155A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24FF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24FF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24EA, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2324, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x647A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2404, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x052C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x400A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF0A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF0A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1008, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3851, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0801, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0408, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1180, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2652, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1518, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0906, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1348, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1002, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1016, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1181, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1189, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1056, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0D09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1413, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2B15, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0311, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD909, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1214, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0312, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1409, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0110, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD612, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xDD11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD910, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xDB09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x9B09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0F11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1A12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1014, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x7610, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xE609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0812, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x290B, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0904, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0923, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15C8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13C8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x092C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1588, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1388, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C14, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1112, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBF11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB10, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6611, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFB09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6312, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6014, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xB812, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xA012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2610, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3053, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4215, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1611, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8111, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8910, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5612, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x010D, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0815, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1313, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0215, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC813, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0515, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8813, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0213, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0411, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC909, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0814, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD908, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x091A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0903, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1214, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x10D6, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11DD, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11D9, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1056, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0917, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11DB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0913, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11FB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0905, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x121A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1460, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1250, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1076, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x10E6, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15A8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13A8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1240, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0925, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13AD, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0902, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0907, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1588, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x138D, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0914, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B13, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1C0C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0920, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1262, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1066, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x090A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11FB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x093B, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1263, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1508, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11B8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x12A0, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1200, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1026, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1000, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1300, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1100, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x437A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B05, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0708, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4137, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x502C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2CFE, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15FE, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C2C, 0x10 }, + { 0x32E6, CRL_REG_LEN_16BIT, 0x00E0, 0x10 }, + { 0x1008, CRL_REG_LEN_16BIT, 0x036F, 0x10 }, + { 0x100C, CRL_REG_LEN_16BIT, 0x058F, 0x10 }, + { 0x100E, CRL_REG_LEN_16BIT, 0x07AF, 0x10 }, + { 0x1010, CRL_REG_LEN_16BIT, 0x014F, 0x10 }, + { 0x3230, CRL_REG_LEN_16BIT, 0x0312, 0x10 }, + { 0x3232, CRL_REG_LEN_16BIT, 0x0532, 0x10 }, + { 0x3234, CRL_REG_LEN_16BIT, 0x0752, 0x10 }, + { 0x3236, CRL_REG_LEN_16BIT, 0x00F2, 0x10 }, + { 0x3566, CRL_REG_LEN_16BIT, 0x3328, 0x10 }, + { 0x32D0, CRL_REG_LEN_16BIT, 0x3A02, 0x10 }, + { 0x32D2, CRL_REG_LEN_16BIT, 0x3508, 0x10 }, + { 0x32D4, CRL_REG_LEN_16BIT, 0x3702, 0x10 }, + { 0x32D6, CRL_REG_LEN_16BIT, 0x3C04, 0x10 }, + { 0x32DC, CRL_REG_LEN_16BIT, 0x370A, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x302A, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x302C, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x302E, CRL_REG_LEN_16BIT, 0x0003, 0x10 }, + { 0x3030, CRL_REG_LEN_16BIT, 0x004E, 0x10 }, + { 0x3036, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3038, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x30A2, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x30A6, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x3040, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F2, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3180, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x33E4, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3004, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x3008, CRL_REG_LEN_16BIT, 0x0783, 0x10 }, + { 0x3002, CRL_REG_LEN_16BIT, 0x003C, 0x10 }, + { 0x3006, CRL_REG_LEN_16BIT, 0x047B, 0x10 }, + { 0x3032, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3400, CRL_REG_LEN_16BIT, 0x0010, 0x10 }, + { 0x3402, CRL_REG_LEN_16BIT, 0x0F10, 0x10 }, + { 0x3404, CRL_REG_LEN_16BIT, 0x0970, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F1, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 200, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F0, 0x10 }, + { 0x300C, CRL_REG_LEN_16BIT, 0x0872, 0x10 }, + { 0x300A, CRL_REG_LEN_16BIT, 0x054A, 0x10 }, + { 0x3042, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3012, CRL_REG_LEN_16BIT, 0x0163, 0x10 }, + { 0x3014, CRL_REG_LEN_16BIT, 0x014F, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C08, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x31D0, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x31AE, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x31AC, CRL_REG_LEN_16BIT, 0x0C0A, 0x10 }, + /* try sync mode */ + { 0x340A, CRL_REG_LEN_16BIT, 0x0077, 0x10 }, + { 0x340C, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x30CE, CRL_REG_LEN_16BIT, 0x0120, 0x10 }, + { 0x301A, CRL_REG_LEN_16BIT, 0x19DC, 0x10 }, + { 0x3370, CRL_REG_LEN_16BIT, 0x0231, 0x10 }, +}; + +static struct crl_register_write_rep ar0231at_1920_1088_linear_mode[] = { + { 0x301A, CRL_REG_LEN_16BIT, 0x1058, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 200, 0x10 }, + { 0x3092, CRL_REG_LEN_16BIT, 0x0C24, 0x10 }, + { 0x337A, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3520, CRL_REG_LEN_16BIT, 0x1288, 0x10 }, + { 0x3522, CRL_REG_LEN_16BIT, 0x880C, 0x10 }, + { 0x3524, CRL_REG_LEN_16BIT, 0x0C12, 0x10 }, + { 0x352C, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x354A, CRL_REG_LEN_16BIT, 0x007F, 0x10 }, + { 0x350C, CRL_REG_LEN_16BIT, 0x055C, 0x10 }, + { 0x3506, CRL_REG_LEN_16BIT, 0x3333, 0x10 }, + { 0x3508, CRL_REG_LEN_16BIT, 0x3333, 0x10 }, + { 0x3100, CRL_REG_LEN_16BIT, 0x4000, 0x10 }, + { 0x3280, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3282, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3284, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3286, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3288, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328A, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328C, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328E, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3290, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3292, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3294, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3296, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3298, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329A, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329C, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329E, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x301A, CRL_REG_LEN_16BIT, 0x10D8, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 200, 0x10 }, + { 0x2512, CRL_REG_LEN_16BIT, 0x8000, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0905, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3350, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2004, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1460, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1578, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x7B24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xEA24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1022, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2410, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x155A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24FF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24FF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24EA, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2324, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x647A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2404, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x052C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x400A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF0A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF0A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1008, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3851, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0801, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0408, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1180, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2652, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1518, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0906, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1348, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1002, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1016, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1181, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1189, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1056, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0D09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1413, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2B15, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0311, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD909, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1214, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0312, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1409, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0110, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD612, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xDD11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD910, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xDB09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x9B09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0F11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1A12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1014, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x7610, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xE609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0812, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x290B, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0904, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0923, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15C8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13C8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x092C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1588, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1388, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C14, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1112, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBF11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB10, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6611, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFB09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6312, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6014, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xB812, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xA012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2610, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3053, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4215, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1611, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8111, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8910, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5612, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x010D, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0815, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1313, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0215, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC813, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0515, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8813, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0213, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0411, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC909, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0814, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD908, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x091A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0903, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1214, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x10D6, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11DD, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11D9, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1056, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0917, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11DB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0913, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11FB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0905, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x121A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1460, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1250, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1076, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x10E6, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15A8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13A8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1240, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0925, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13AD, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0902, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0907, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1588, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x138D, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0914, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B13, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1C0C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0920, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1262, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1066, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x090A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11FB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x093B, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1263, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1508, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11B8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x12A0, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1200, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1026, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1000, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1300, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1100, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x437A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B05, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0708, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4137, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x502C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2CFE, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15FE, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C2C, 0x10 }, + { 0x32E6, CRL_REG_LEN_16BIT, 0x00E0, 0x10 }, + { 0x1008, CRL_REG_LEN_16BIT, 0x036F, 0x10 }, + { 0x100C, CRL_REG_LEN_16BIT, 0x058F, 0x10 }, + { 0x100E, CRL_REG_LEN_16BIT, 0x07AF, 0x10 }, + { 0x1010, CRL_REG_LEN_16BIT, 0x014F, 0x10 }, + { 0x3230, CRL_REG_LEN_16BIT, 0x0312, 0x10 }, + { 0x3232, CRL_REG_LEN_16BIT, 0x0532, 0x10 }, + { 0x3234, CRL_REG_LEN_16BIT, 0x0752, 0x10 }, + { 0x3236, CRL_REG_LEN_16BIT, 0x00F2, 0x10 }, + { 0x3566, CRL_REG_LEN_16BIT, 0x3328, 0x10 }, + { 0x32D0, CRL_REG_LEN_16BIT, 0x3A02, 0x10 }, + { 0x32D2, CRL_REG_LEN_16BIT, 0x3508, 0x10 }, + { 0x32D4, CRL_REG_LEN_16BIT, 0x3702, 0x10 }, + { 0x32D6, CRL_REG_LEN_16BIT, 0x3C04, 0x10 }, + { 0x32DC, CRL_REG_LEN_16BIT, 0x370A, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x302A, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x302C, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x302E, CRL_REG_LEN_16BIT, 0x0003, 0x10 }, + { 0x3030, CRL_REG_LEN_16BIT, 0x004E, 0x10 }, + { 0x3036, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3038, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x30A2, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x30A6, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x3040, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F2, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3180, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x33E4, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3004, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x3008, CRL_REG_LEN_16BIT, 0x0783, 0x10 }, + { 0x3002, CRL_REG_LEN_16BIT, 0x003C, 0x10 }, + { 0x3006, CRL_REG_LEN_16BIT, 0x047B, 0x10 }, + { 0x3032, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3400, CRL_REG_LEN_16BIT, 0x0010, 0x10 }, + { 0x3402, CRL_REG_LEN_16BIT, 0x0F10, 0x10 }, + { 0x3404, CRL_REG_LEN_16BIT, 0x0970, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F1, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 200, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F0, 0x10 }, + { 0x300C, CRL_REG_LEN_16BIT, 0x0872, 0x10 }, + { 0x300A, CRL_REG_LEN_16BIT, 0x054A, 0x10 }, + { 0x3042, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3012, CRL_REG_LEN_16BIT, 0x0163, 0x10 }, + { 0x3014, CRL_REG_LEN_16BIT, 0x014F, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C08, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x31D0, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x31AE, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x31AC, CRL_REG_LEN_16BIT, 0x0C0C, 0x10 }, + /* try sync mode */ + { 0x340A, CRL_REG_LEN_16BIT, 0x0077, 0x10 }, + { 0x340C, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x30CE, CRL_REG_LEN_16BIT, 0x0120, 0x10 }, + { 0x301A, CRL_REG_LEN_16BIT, 0x19DC, 0x10 }, + { 0x3370, CRL_REG_LEN_16BIT, 0x0231, 0x10 }, +}; + +static struct crl_register_write_rep ar0231at_1920_1088_2hdr_mode[] = { + { 0x301A, CRL_REG_LEN_16BIT, 0x10D8, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 100, 0x10 }, + { 0x3092, CRL_REG_LEN_16BIT, 0x0C24, 0x10 }, + { 0x337A, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3520, CRL_REG_LEN_16BIT, 0x1288, 0x10 }, + { 0x3522, CRL_REG_LEN_16BIT, 0x880C, 0x10 }, + { 0x3524, CRL_REG_LEN_16BIT, 0x0C12, 0x10 }, + { 0x352C, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x354A, CRL_REG_LEN_16BIT, 0x007F, 0x10 }, + { 0x350C, CRL_REG_LEN_16BIT, 0x055C, 0x10 }, + { 0x3506, CRL_REG_LEN_16BIT, 0x3333, 0x10 }, + { 0x3508, CRL_REG_LEN_16BIT, 0x3333, 0x10 }, + { 0x3100, CRL_REG_LEN_16BIT, 0x4000, 0x10 }, + { 0x3280, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3282, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3284, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3286, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3288, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328A, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328C, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328E, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3290, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3292, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3294, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3296, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3298, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329A, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329C, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329E, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x301A, CRL_REG_LEN_16BIT, 0x10D8, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 200, 0x10 }, + { 0x2512, CRL_REG_LEN_16BIT, 0x8000, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0905, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3350, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2004, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1460, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1578, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x7B24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xEA24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1022, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2410, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x155A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24FF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24FF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24EA, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2324, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x647A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2404, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x052C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x400A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF0A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF0A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1008, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3851, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0801, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0408, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1180, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2652, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1518, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0906, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1348, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1002, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1016, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1181, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1189, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1056, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0D09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1413, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2B15, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0311, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD909, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1214, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0312, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1409, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0110, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD612, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xDD11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD910, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xDB09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x9B09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0F11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1A12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1014, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x7610, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xE609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0812, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x290B, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0904, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0923, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15C8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13C8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x092C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1588, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1388, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C14, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1112, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBF11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB10, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6611, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFB09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6312, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6014, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xB812, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xA012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2610, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3053, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4215, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1611, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8111, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8910, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5612, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x010D, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0815, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1313, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0215, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC813, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0515, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8813, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0213, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0411, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC909, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0814, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD908, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x091A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0903, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1214, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x10D6, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11DD, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11D9, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1056, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0917, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11DB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0913, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11FB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0905, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x121A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1460, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1250, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1076, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x10E6, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15A8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13A8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1240, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0925, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13AD, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0902, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0907, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1588, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x138D, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0914, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B13, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1C0C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0920, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1262, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1066, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x090A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11FB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x093B, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1263, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1508, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11B8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x12A0, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1200, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1026, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1000, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1300, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1100, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x437A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B05, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0708, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4137, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x502C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2CFE, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15FE, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C2C, 0x10 }, + { 0x32E6, CRL_REG_LEN_16BIT, 0x00E0, 0x10 }, + { 0x1008, CRL_REG_LEN_16BIT, 0x036F, 0x10 }, + { 0x100C, CRL_REG_LEN_16BIT, 0x058F, 0x10 }, + { 0x100E, CRL_REG_LEN_16BIT, 0x07AF, 0x10 }, + { 0x1010, CRL_REG_LEN_16BIT, 0x014F, 0x10 }, + { 0x3230, CRL_REG_LEN_16BIT, 0x0312, 0x10 }, + { 0x3232, CRL_REG_LEN_16BIT, 0x0532, 0x10 }, + { 0x3234, CRL_REG_LEN_16BIT, 0x0752, 0x10 }, + { 0x3236, CRL_REG_LEN_16BIT, 0x00F2, 0x10 }, + { 0x3566, CRL_REG_LEN_16BIT, 0x3328, 0x10 }, + { 0x32D0, CRL_REG_LEN_16BIT, 0x3A02, 0x10 }, + { 0x32D2, CRL_REG_LEN_16BIT, 0x3508, 0x10 }, + { 0x32D4, CRL_REG_LEN_16BIT, 0x3702, 0x10 }, + { 0x32D6, CRL_REG_LEN_16BIT, 0x3C04, 0x10 }, + { 0x32DC, CRL_REG_LEN_16BIT, 0x370A, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x302A, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x302C, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x302E, CRL_REG_LEN_16BIT, 0x0003, 0x10 }, + { 0x3030, CRL_REG_LEN_16BIT, 0x004E, 0x10 }, + { 0x3036, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3038, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x30A2, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x30A6, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x3040, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3040, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F1, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x33C0, CRL_REG_LEN_16BIT, 0x2000, 0x10 }, + { 0x33C2, CRL_REG_LEN_16BIT, 0x3440, 0x10 }, + { 0x33C4, CRL_REG_LEN_16BIT, 0x4890, 0x10 }, + { 0x33C6, CRL_REG_LEN_16BIT, 0x5CE0, 0x10 }, + { 0x33C8, CRL_REG_LEN_16BIT, 0x7140, 0x10 }, + { 0x33CA, CRL_REG_LEN_16BIT, 0x8590, 0x10 }, + { 0x33CC, CRL_REG_LEN_16BIT, 0x99E0, 0x10 }, + { 0x33CE, CRL_REG_LEN_16BIT, 0xAE40, 0x10 }, + { 0x33D0, CRL_REG_LEN_16BIT, 0xC290, 0x10 }, + { 0x33D2, CRL_REG_LEN_16BIT, 0xD6F0, 0x10 }, + { 0x33D4, CRL_REG_LEN_16BIT, 0xEB40, 0x10 }, + { 0x33D6, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x33DA, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3180, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x33E4, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3004, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x3008, CRL_REG_LEN_16BIT, 0x0783, 0x10 }, + { 0x3002, CRL_REG_LEN_16BIT, 0x003C, 0x10 }, + { 0x3006, CRL_REG_LEN_16BIT, 0x047B, 0x10 }, + { 0x3032, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3400, CRL_REG_LEN_16BIT, 0x0010, 0x10 }, + { 0x3402, CRL_REG_LEN_16BIT, 0x0788, 0x10 }, + { 0x3402, CRL_REG_LEN_16BIT, 0x0F10, 0x10 }, + { 0x3404, CRL_REG_LEN_16BIT, 0x04B8, 0x10 }, + { 0x3404, CRL_REG_LEN_16BIT, 0x0970, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F1, 0x10 }, + { 0x300C, CRL_REG_LEN_16BIT, 0x0872, 0x10 }, + { 0x300A, CRL_REG_LEN_16BIT, 0x054A, 0x10 }, + { 0x3042, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3012, CRL_REG_LEN_16BIT, 0x0163, 0x10 }, + { 0x3014, CRL_REG_LEN_16BIT, 0x0882, 0x10 }, + { 0x321E, CRL_REG_LEN_16BIT, 0x0882, 0x10 }, + { 0x3222, CRL_REG_LEN_16BIT, 0x0882, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C0E, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C0E, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C0E, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x31D0, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x31AE, CRL_REG_LEN_16BIT, 0x0201, 0x10 }, + { 0x31AE, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x31AC, CRL_REG_LEN_16BIT, 0x140C, 0x10 }, + { 0x340A, CRL_REG_LEN_16BIT, 0x0077, 0x10 }, + { 0x340C, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x30CE, CRL_REG_LEN_16BIT, 0x0120, 0x10 }, + { 0x301A, CRL_REG_LEN_16BIT, 0x19DC, 0x10 }, + { 0x3370, CRL_REG_LEN_16BIT, 0x0231, 0x10 }, +}; + +static struct crl_register_write_rep ar0231at_1920_1088_3hdr_mode[] = { + { 0x301A, CRL_REG_LEN_16BIT, 0x10D8, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 100, 0x10 }, + { 0x3092, CRL_REG_LEN_16BIT, 0x0C24, 0x10 }, + { 0x337A, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3520, CRL_REG_LEN_16BIT, 0x1288, 0x10 }, + { 0x3522, CRL_REG_LEN_16BIT, 0x880C, 0x10 }, + { 0x3524, CRL_REG_LEN_16BIT, 0x0C12, 0x10 }, + { 0x352C, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x354A, CRL_REG_LEN_16BIT, 0x007F, 0x10 }, + { 0x350C, CRL_REG_LEN_16BIT, 0x055C, 0x10 }, + { 0x3506, CRL_REG_LEN_16BIT, 0x3333, 0x10 }, + { 0x3508, CRL_REG_LEN_16BIT, 0x3333, 0x10 }, + { 0x3100, CRL_REG_LEN_16BIT, 0x4000, 0x10 }, + { 0x3280, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3282, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3284, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3286, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3288, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328A, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328C, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328E, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3290, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3292, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3294, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3296, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3298, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329A, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329C, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329E, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x301A, CRL_REG_LEN_16BIT, 0x10D8, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 200, 0x10 }, + { 0x2512, CRL_REG_LEN_16BIT, 0x8000, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0905, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3350, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2004, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1460, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1578, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x7B24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xEA24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1022, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2410, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x155A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24FF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24FF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24EA, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2324, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x647A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2404, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x052C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x400A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF0A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF0A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1008, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3851, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0801, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0408, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1180, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2652, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1518, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0906, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1348, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1002, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1016, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1181, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1189, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1056, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0D09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1413, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2B15, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0311, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD909, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1214, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0312, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1409, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0110, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD612, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xDD11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD910, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xDB09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x9B09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0F11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1A12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1014, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x7610, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xE609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0812, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x290B, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0904, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0923, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15C8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13C8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x092C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1588, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1388, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C14, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1112, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBF11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB10, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6611, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFB09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6312, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6014, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xB812, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xA012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2610, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3053, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4215, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1611, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8111, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8910, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5612, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x010D, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0815, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1313, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0215, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC813, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0515, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8813, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0213, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0411, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC909, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0814, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD908, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x091A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0903, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1214, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x10D6, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11DD, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11D9, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1056, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0917, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11DB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0913, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11FB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0905, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x121A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1460, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1250, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1076, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x10E6, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15A8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13A8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1240, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0925, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13AD, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0902, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0907, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1588, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x138D, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0914, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B13, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1C0C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0920, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1262, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1066, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x090A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11FB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x093B, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1263, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1508, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11B8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x12A0, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1200, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1026, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1000, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1300, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1100, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x437A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B05, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0708, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4137, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x502C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2CFE, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15FE, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C2C, 0x10 }, + { 0x32E6, CRL_REG_LEN_16BIT, 0x00E0, 0x10 }, + { 0x1008, CRL_REG_LEN_16BIT, 0x036F, 0x10 }, + { 0x100C, CRL_REG_LEN_16BIT, 0x058F, 0x10 }, + { 0x100E, CRL_REG_LEN_16BIT, 0x07AF, 0x10 }, + { 0x1010, CRL_REG_LEN_16BIT, 0x014F, 0x10 }, + { 0x3230, CRL_REG_LEN_16BIT, 0x0312, 0x10 }, + { 0x3232, CRL_REG_LEN_16BIT, 0x0532, 0x10 }, + { 0x3234, CRL_REG_LEN_16BIT, 0x0752, 0x10 }, + { 0x3236, CRL_REG_LEN_16BIT, 0x00F2, 0x10 }, + { 0x3566, CRL_REG_LEN_16BIT, 0x3328, 0x10 }, + { 0x32D0, CRL_REG_LEN_16BIT, 0x3A02, 0x10 }, + { 0x32D2, CRL_REG_LEN_16BIT, 0x3508, 0x10 }, + { 0x32D4, CRL_REG_LEN_16BIT, 0x3702, 0x10 }, + { 0x32D6, CRL_REG_LEN_16BIT, 0x3C04, 0x10 }, + { 0x32DC, CRL_REG_LEN_16BIT, 0x370A, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x302A, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x302C, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x302E, CRL_REG_LEN_16BIT, 0x0003, 0x10 }, + { 0x3030, CRL_REG_LEN_16BIT, 0x004E, 0x10 }, + { 0x3036, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3038, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x30A2, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x30A6, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x3040, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3040, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F2, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F2, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F2, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x33C0, CRL_REG_LEN_16BIT, 0x2000, 0x10 }, + { 0x33C2, CRL_REG_LEN_16BIT, 0x3440, 0x10 }, + { 0x33C4, CRL_REG_LEN_16BIT, 0x4890, 0x10 }, + { 0x33C6, CRL_REG_LEN_16BIT, 0x5CE0, 0x10 }, + { 0x33C8, CRL_REG_LEN_16BIT, 0x7140, 0x10 }, + { 0x33CA, CRL_REG_LEN_16BIT, 0x8590, 0x10 }, + { 0x33CC, CRL_REG_LEN_16BIT, 0x99E0, 0x10 }, + { 0x33CE, CRL_REG_LEN_16BIT, 0xAE40, 0x10 }, + { 0x33D0, CRL_REG_LEN_16BIT, 0xC290, 0x10 }, + { 0x33D2, CRL_REG_LEN_16BIT, 0xD6F0, 0x10 }, + { 0x33D4, CRL_REG_LEN_16BIT, 0xEB40, 0x10 }, + { 0x33D6, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x33DA, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3180, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x33E4, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3004, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x3008, CRL_REG_LEN_16BIT, 0x0783, 0x10 }, + { 0x3002, CRL_REG_LEN_16BIT, 0x003C, 0x10 }, + { 0x3006, CRL_REG_LEN_16BIT, 0x047B, 0x10 }, + { 0x3032, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3400, CRL_REG_LEN_16BIT, 0x0010, 0x10 }, + { 0x3402, CRL_REG_LEN_16BIT, 0x0788, 0x10 }, + { 0x3402, CRL_REG_LEN_16BIT, 0x0F10, 0x10 }, + { 0x3404, CRL_REG_LEN_16BIT, 0x04B8, 0x10 }, + { 0x3404, CRL_REG_LEN_16BIT, 0x0970, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F2, 0x10 }, + { 0x300C, CRL_REG_LEN_16BIT, 0x0872, 0x10 }, + { 0x300A, CRL_REG_LEN_16BIT, 0x054A, 0x10 }, + { 0x3042, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3012, CRL_REG_LEN_16BIT, 0x0163, 0x10 }, + { 0x3014, CRL_REG_LEN_16BIT, 0x0882, 0x10 }, + { 0x321E, CRL_REG_LEN_16BIT, 0x0882, 0x10 }, + { 0x3222, CRL_REG_LEN_16BIT, 0x0882, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C0E, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C0E, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C0E, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x31D0, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x31AE, CRL_REG_LEN_16BIT, 0x0201, 0x10 }, + { 0x31AE, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x31AC, CRL_REG_LEN_16BIT, 0x140C, 0x10 }, + { 0x340A, CRL_REG_LEN_16BIT, 0x0077, 0x10 }, + { 0x340C, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x30CE, CRL_REG_LEN_16BIT, 0x0120, 0x10 }, + { 0x301A, CRL_REG_LEN_16BIT, 0x19DC, 0x10 }, + { 0x3370, CRL_REG_LEN_16BIT, 0x0231, 0x10 }, +}; + +static struct crl_register_write_rep ar0231at_1920_1088_4hdr_mode[] = { + { 0x301A, CRL_REG_LEN_16BIT, 0x10D8, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 100, 0x10 }, + { 0x3092, CRL_REG_LEN_16BIT, 0x0C24, 0x10 }, + { 0x337A, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3520, CRL_REG_LEN_16BIT, 0x1288, 0x10 }, + { 0x3522, CRL_REG_LEN_16BIT, 0x880C, 0x10 }, + { 0x3524, CRL_REG_LEN_16BIT, 0x0C12, 0x10 }, + { 0x352C, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x354A, CRL_REG_LEN_16BIT, 0x007F, 0x10 }, + { 0x350C, CRL_REG_LEN_16BIT, 0x055C, 0x10 }, + { 0x3506, CRL_REG_LEN_16BIT, 0x3333, 0x10 }, + { 0x3508, CRL_REG_LEN_16BIT, 0x3333, 0x10 }, + { 0x3100, CRL_REG_LEN_16BIT, 0x4000, 0x10 }, + { 0x3280, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3282, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3284, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3286, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3288, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328A, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328C, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328E, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3290, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3292, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3294, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3296, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3298, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329A, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329C, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329E, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x301A, CRL_REG_LEN_16BIT, 0x10D8, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 200, 0x10 }, + { 0x2512, CRL_REG_LEN_16BIT, 0x8000, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0905, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3350, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2004, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1460, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1578, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x7B24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xEA24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1022, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2410, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x155A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24FF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24FF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24EA, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2324, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x647A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2404, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x052C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x400A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF0A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF0A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1008, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3851, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0801, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0408, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1180, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2652, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1518, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0906, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1348, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1002, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1016, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1181, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1189, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1056, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0D09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1413, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2B15, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0311, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD909, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1214, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0312, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1409, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0110, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD612, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xDD11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD910, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xDB09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x9B09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0F11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1A12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1014, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x7610, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xE609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0812, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x290B, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0904, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0923, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15C8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13C8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x092C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1588, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1388, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C14, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1112, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBF11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB10, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6611, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFB09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6312, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6014, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xB812, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xA012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2610, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3053, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4215, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1611, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8111, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8910, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5612, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x010D, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0815, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1313, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0215, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC813, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0515, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8813, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0213, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0411, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC909, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0814, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD908, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x091A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0903, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1214, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x10D6, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11DD, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11D9, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1056, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0917, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11DB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0913, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11FB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0905, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x121A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1460, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1250, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1076, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x10E6, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15A8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13A8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1240, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0925, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13AD, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0902, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0907, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1588, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x138D, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0914, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B13, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1C0C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0920, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1262, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1066, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x090A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11FB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x093B, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1263, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1508, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11B8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x12A0, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1200, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1026, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1000, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1300, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1100, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x437A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B05, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0708, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4137, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x502C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2CFE, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15FE, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C2C, 0x10 }, + { 0x32E6, CRL_REG_LEN_16BIT, 0x00E0, 0x10 }, + { 0x1008, CRL_REG_LEN_16BIT, 0x036F, 0x10 }, + { 0x100C, CRL_REG_LEN_16BIT, 0x058F, 0x10 }, + { 0x100E, CRL_REG_LEN_16BIT, 0x07AF, 0x10 }, + { 0x1010, CRL_REG_LEN_16BIT, 0x014F, 0x10 }, + { 0x3230, CRL_REG_LEN_16BIT, 0x0312, 0x10 }, + { 0x3232, CRL_REG_LEN_16BIT, 0x0532, 0x10 }, + { 0x3234, CRL_REG_LEN_16BIT, 0x0752, 0x10 }, + { 0x3236, CRL_REG_LEN_16BIT, 0x00F2, 0x10 }, + { 0x3566, CRL_REG_LEN_16BIT, 0x3328, 0x10 }, + { 0x32D0, CRL_REG_LEN_16BIT, 0x3A02, 0x10 }, + { 0x32D2, CRL_REG_LEN_16BIT, 0x3508, 0x10 }, + { 0x32D4, CRL_REG_LEN_16BIT, 0x3702, 0x10 }, + { 0x32D6, CRL_REG_LEN_16BIT, 0x3C04, 0x10 }, + { 0x32DC, CRL_REG_LEN_16BIT, 0x370A, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x302A, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x302C, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x302E, CRL_REG_LEN_16BIT, 0x0003, 0x10 }, + { 0x3030, CRL_REG_LEN_16BIT, 0x004E, 0x10 }, + { 0x3036, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3038, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x30A2, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x30A6, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x3040, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3040, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F2, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F2, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F2, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x33C0, CRL_REG_LEN_16BIT, 0x2000, 0x10 }, + { 0x33C2, CRL_REG_LEN_16BIT, 0x3440, 0x10 }, + { 0x33C4, CRL_REG_LEN_16BIT, 0x4890, 0x10 }, + { 0x33C6, CRL_REG_LEN_16BIT, 0x5CE0, 0x10 }, + { 0x33C8, CRL_REG_LEN_16BIT, 0x7140, 0x10 }, + { 0x33CA, CRL_REG_LEN_16BIT, 0x8590, 0x10 }, + { 0x33CC, CRL_REG_LEN_16BIT, 0x99E0, 0x10 }, + { 0x33CE, CRL_REG_LEN_16BIT, 0xAE40, 0x10 }, + { 0x33D0, CRL_REG_LEN_16BIT, 0xC290, 0x10 }, + { 0x33D2, CRL_REG_LEN_16BIT, 0xD6F0, 0x10 }, + { 0x33D4, CRL_REG_LEN_16BIT, 0xEB40, 0x10 }, + { 0x33D6, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x33DA, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3180, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x33E4, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3004, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x3008, CRL_REG_LEN_16BIT, 0x0783, 0x10 }, + { 0x3002, CRL_REG_LEN_16BIT, 0x003C, 0x10 }, + { 0x3006, CRL_REG_LEN_16BIT, 0x047B, 0x10 }, + { 0x3032, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3400, CRL_REG_LEN_16BIT, 0x0010, 0x10 }, + { 0x3402, CRL_REG_LEN_16BIT, 0x0788, 0x10 }, + { 0x3402, CRL_REG_LEN_16BIT, 0x0F10, 0x10 }, + { 0x3404, CRL_REG_LEN_16BIT, 0x04B8, 0x10 }, + { 0x3404, CRL_REG_LEN_16BIT, 0x0970, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x000C, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F3, 0x10 }, + { 0x300C, CRL_REG_LEN_16BIT, 0x09B8, 0x10 }, + { 0x300A, CRL_REG_LEN_16BIT, 0x0498, 0x10 }, + { 0x3042, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3012, CRL_REG_LEN_16BIT, 0x0131, 0x10 }, + { 0x3014, CRL_REG_LEN_16BIT, 0x098E, 0x10 }, + { 0x321E, CRL_REG_LEN_16BIT, 0x098E, 0x10 }, + { 0x3222, CRL_REG_LEN_16BIT, 0x098E, 0x10 }, + { 0x3226, CRL_REG_LEN_16BIT, 0x098E, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C0E, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C0E, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C0E, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x31D0, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x31AE, CRL_REG_LEN_16BIT, 0x0201, 0x10 }, + { 0x31AE, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x31AC, CRL_REG_LEN_16BIT, 0x140C, 0x10 }, + { 0x340A, CRL_REG_LEN_16BIT, 0x0077, 0x10 }, + { 0x340C, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x30CE, CRL_REG_LEN_16BIT, 0x0120, 0x10 }, + { 0x301A, CRL_REG_LEN_16BIT, 0x19DC, 0x10 }, + { 0x3370, CRL_REG_LEN_16BIT, 0x0231, 0x10 }, +}; + +struct crl_mode_rep ar0231at_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(ar0231at_1920_1088_rects), + .sd_rects = ar0231at_1920_1088_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1088, + .min_llp = 2162, + .min_fll = 1354, + .comp_items = 1, + .ctrl_data = &ar0231at_ctrl_data_modes[0], + .mode_regs_items = ARRAY_SIZE(ar0231at_1920_1088_linear_mode), + .mode_regs = ar0231at_1920_1088_linear_mode, + }, + { + .sd_rects_items = ARRAY_SIZE(ar0231at_1920_1088_rects), + .sd_rects = ar0231at_1920_1088_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1088, + .min_llp = 1978, + .min_fll = 1480, + .comp_items = 1, + .ctrl_data = &ar0231at_ctrl_data_modes[1], + .mode_regs_items = ARRAY_SIZE(ar0231at_1920_1088_2hdr_mode), + .mode_regs = ar0231at_1920_1088_2hdr_mode, + }, + { + .sd_rects_items = ARRAY_SIZE(ar0231at_1920_1088_rects), + .sd_rects = ar0231at_1920_1088_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1088, + .min_llp = 1978, + .min_fll = 1480, + .comp_items = 1, + .ctrl_data = &ar0231at_ctrl_data_modes[2], + .mode_regs_items = ARRAY_SIZE(ar0231at_1920_1088_3hdr_mode), + .mode_regs = ar0231at_1920_1088_3hdr_mode, + }, + { + .sd_rects_items = ARRAY_SIZE(ar0231at_1920_1088_rects), + .sd_rects = ar0231at_1920_1088_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1088, + .min_llp = 2246, + .min_fll = 1304, + .comp_items = 1, + .ctrl_data = &ar0231at_ctrl_data_modes[3], + .mode_regs_items = ARRAY_SIZE(ar0231at_1920_1088_4hdr_mode), + .mode_regs = ar0231at_1920_1088_4hdr_mode, + }, + { + .sd_rects_items = ARRAY_SIZE(ar0231at_1920_1088_rects), + .sd_rects = ar0231at_1920_1088_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1088, + .min_llp = 2162, + .min_fll = 1354, + .comp_items = 1, + .ctrl_data = &ar0231at_ctrl_data_modes[4], + .mode_regs_items = ARRAY_SIZE(ar0231at_1920_1088_10bit_linear_mode), + .mode_regs = ar0231at_1920_1088_10bit_linear_mode, + }, +}; + +struct crl_csi_data_fmt ar0231at_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG12_1X12, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SRGGB12_1X12, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SBGGR12_1X12, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SGBRG12_1X12, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .bits_per_pixel = 10, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 10, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .bits_per_pixel = 10, + .regs_items = 0, + .regs = 0, + }, +}; + +static struct crl_arithmetic_ops ar0231at_ls2_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 2, + } +}; + +/* Line length pixel */ +static struct crl_dynamic_register_access ar0231at_llp_regs[] = { + { + .address = 0x300C, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* Frame length lines */ +static struct crl_dynamic_register_access ar0231at_fll_regs[] = { + { + .address = 0x300A, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* Analog gain register, also used in linear(non-HDR) mode */ +static struct crl_dynamic_register_access ar0231at_ana_gain_regs[] = { + { + .address = 0x3366, /* analog gain */ + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* Digital gain register */ +static struct crl_dynamic_register_access ar0231at_gl_regs[] = { + { + .address = 0x305E, /* global digital gain */ + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0x07ff, + }, +}; + +/* + * Exposure mode: + * 0: Linear mode + * 1: 2-HDR mode + * 2: 3-HDR mode + * 3: 4-HDR mode + */ +static struct crl_dynamic_register_access ar0231at_exposure_mode_regs[] = { + { + .address = 0x3082, + .len = CRL_REG_LEN_16BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ar0231at_ls2_ops), + .ops = ar0231at_ls2_ops, + .mask = 0x000C, + }, +}; + +/* + * Exposure Ratio in HDR mode + * 0x8000: + * Select exposure ratio mode or + * configure exposure time for each x-HDR individually. + * 0x0222: + * Selected exposure ratio mode and each ratio is 4x. + * The ratio also can be 2x, 8x, 16x + */ +static struct crl_dynamic_register_access ar0231at_hdr_exposure_ratio_regs[] = { + { + .address = 0x3238, + .len = CRL_REG_LEN_16BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = 0, + .ops = 0, + .mask = 0x8777, + }, +}; + +/* t1 exposure register, also used in linear(non-HDR) mode */ +static struct crl_dynamic_register_access ar0231at_t1expotime_regs[] = { + { + .address = 0x3012, /* coarse integration time T1 */ + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* t2 exposure register, only used in HDR mode */ +static struct crl_dynamic_register_access ar0231at_t2expotime_regs[] = { + { + .address = 0x3212, /* coarse integration time T2 */ + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* t3 exposure register, only used in HDR mode */ +static struct crl_dynamic_register_access ar0231at_t3expotime_regs[] = { + { + .address = 0x3216, /* coarse integration time T3 */ + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* t4 exposure register, only used in HDR mode */ +static struct crl_dynamic_register_access ar0231at_t4expotime_regs[] = { + { + .address = 0x321A, /* coarse integration time T4 */ + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access ar0231at_test_pattern_regs[] = { + { + .address = 0x3070, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static const char * const ar0231at_test_patterns[] = { + "Disabled", + "Solid Color", + "100% Vertical Color Bar", +}; + +struct crl_v4l2_ctrl ar0231at_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1920, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 1978, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ar0231at_llp_regs), + .regs = ar0231at_llp_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame Length Lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1088, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 1480, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ar0231at_fll_regs), + .regs = ar0231at_fll_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0x0000, + .data.std_data.max = 0xFFFF, + .data.std_data.step = 1, + .data.std_data.def = 0xAAAA, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ar0231at_ana_gain_regs), + .regs = ar0231at_ana_gain_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_GAIN, + .name = "Digital Gain", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0x0080, + .data.std_data.max = 0x07FF, + .data.std_data.step = 1, + .data.std_data.def = 0x0080, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ar0231at_gl_regs), + .regs = ar0231at_gl_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_MODE, + .name = "CRL_CID_EXPOSURE_MODE", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = ARRAY_SIZE(ar0231at_ctrl_data_modes)-1, + .data.std_data.step = 1, + .data.std_data.def = 0x0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_MODE_SELECTION, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ar0231at_exposure_mode_regs), + .regs = ar0231at_exposure_mode_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_HDR_RATIO, + .name = "CRL_CID_EXPOSURE_HDR_RATIO", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 0x0222, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ar0231at_hdr_exposure_ratio_regs), + .regs = ar0231at_hdr_exposure_ratio_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "T1_COARSE_EXPOSURE_TIME", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0x0002, + .data.std_data.max = 0x04FF, + .data.std_data.step = 1, + .data.std_data.def = 0x0163, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ar0231at_t1expotime_regs), + .regs = ar0231at_t1expotime_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_SHS1, + .name = "T2_COARSE_EXPOSURE_TIME", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0x0002, + .data.std_data.max = 0x0300, + .data.std_data.step = 1, + .data.std_data.def = 0x0002, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ar0231at_t2expotime_regs), + .regs = ar0231at_t2expotime_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_SHS2, + .name = "T3_COARSE_EXPOSURE_TIME", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0x0002, + .data.std_data.max = 0x0180, + .data.std_data.step = 1, + .data.std_data.def = 0x0002, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ar0231at_t3expotime_regs), + .regs = ar0231at_t3expotime_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_SHS3, + .name = "T4_COARSE_EXPOSURE_TIME", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0x0, + .data.std_data.max = 0x0500, + .data.std_data.step = 1, + .data.std_data.def = 0x0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ar0231at_t4expotime_regs), + .regs = ar0231at_t4expotime_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_TEST_PATTERN, + .name = "V4L2_CID_TEST_PATTERN", + .type = CRL_V4L2_CTRL_TYPE_MENU_ITEMS, + .data.v4l2_menu_items.menu = ar0231at_test_patterns, + .data.v4l2_menu_items.size = ARRAY_SIZE(ar0231at_test_patterns)-1, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ar0231at_test_pattern_regs), + .regs = ar0231at_test_pattern_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +struct crl_sensor_detect_config ar0231at_sensor_detect_regset[] = { + { + .reg = { 0x3000, CRL_REG_LEN_16BIT, 0xFFFF }, + .width = 15, + }, +}; + +static struct crl_sensor_limits ar0231at_maxim_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1920, + .y_addr_max = 1088, + .min_frame_length_lines = 240, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 320, + .max_line_length_pixels = 32752, +}; + +struct crl_sensor_configuration ar0231at_crl_configuration = { + .powerup_regs_items = 0, + .powerup_regs = 0, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + .power_items = 0, + .power_entities = 0, + + .pll_config_items = ARRAY_SIZE(ar0231at_pll_configurations), + .pll_configs = ar0231at_pll_configurations, + + .id_reg_items = ARRAY_SIZE(ar0231at_sensor_detect_regset), + .id_regs = ar0231at_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(ar0231at_sensor_subdevs), + .subdevs = ar0231at_sensor_subdevs, + + .modes_items = ARRAY_SIZE(ar0231at_modes), + .modes = ar0231at_modes, + + .csi_fmts_items = ARRAY_SIZE(ar0231at_crl_csi_data_fmt), + .csi_fmts = ar0231at_crl_csi_data_fmt, + + .v4l2_ctrls_items = ARRAY_SIZE(ar0231at_v4l2_ctrls), + .v4l2_ctrl_bank = ar0231at_v4l2_ctrls, + + .streamon_regs_items = 0, + .streamon_regs = 0, + .streamoff_regs_items = 0, + .streamoff_regs = 0, + + .sensor_limits = &ar0231at_maxim_sensor_limits, + +}; + +#endif /* __CRLMODULE_AR0231AT_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_ar023z_configuration.h b/drivers/media/i2c/crlmodule/crl_ar023z_configuration.h new file mode 100644 index 0000000000000..2bd2b0f06b186 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_ar023z_configuration.h @@ -0,0 +1,1903 @@ +/* + * Copyright (c) 2018 Intel Corporation. + * + * Author: Alexei Zavjalov + * Author: Kiran Kumar + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __CRLMODULE_AR023Z_CONFIGURATION_H_ +#define __CRLMODULE_AR023Z_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +#define TC358778_I2C_ADDRESS 0x0E /* Toshiba TC358778 Parallel-MIPI Bridge */ +#define AR023Z_I2C_ADDRESS 0x48 /* OnSemi AP0202AT ISP */ + +static struct crl_register_write_rep ar023z_1920_1080[] = { + { 0x0004, CRL_REG_LEN_16BIT, 0x0004, TC358778_I2C_ADDRESS }, + { 0x0002, CRL_REG_LEN_16BIT, 0x0001, TC358778_I2C_ADDRESS }, + { 0x0002, CRL_REG_LEN_16BIT, 0x0000, TC358778_I2C_ADDRESS }, + { 0x0016, CRL_REG_LEN_16BIT, 0x50cd, TC358778_I2C_ADDRESS }, + { 0x0018, CRL_REG_LEN_16BIT, 0x0213, TC358778_I2C_ADDRESS }, + + { 0x0006, CRL_REG_LEN_16BIT, 0x0040, TC358778_I2C_ADDRESS }, + { 0x0008, CRL_REG_LEN_16BIT, 0x0060, TC358778_I2C_ADDRESS }, + { 0x0022, CRL_REG_LEN_16BIT, 0x0F00, TC358778_I2C_ADDRESS }, + + { 0x0140, CRL_REG_LEN_32BIT, 0x00000000, TC358778_I2C_ADDRESS }, + { 0x0144, CRL_REG_LEN_32BIT, 0x00000000, TC358778_I2C_ADDRESS }, + { 0x0148, CRL_REG_LEN_32BIT, 0x00000000, TC358778_I2C_ADDRESS }, + { 0x014C, CRL_REG_LEN_32BIT, 0x00010000, TC358778_I2C_ADDRESS }, + { 0x0150, CRL_REG_LEN_32BIT, 0x00010000, TC358778_I2C_ADDRESS }, + + { 0x0210, CRL_REG_LEN_32BIT, 0x21000000, TC358778_I2C_ADDRESS }, + { 0x0214, CRL_REG_LEN_32BIT, 0x00040000, TC358778_I2C_ADDRESS }, + { 0x0218, CRL_REG_LEN_32BIT, 0x17050000, TC358778_I2C_ADDRESS }, + { 0x021C, CRL_REG_LEN_32BIT, 0x00020000, TC358778_I2C_ADDRESS }, + { 0x0220, CRL_REG_LEN_32BIT, 0x0a070000, TC358778_I2C_ADDRESS }, + { 0x0224, CRL_REG_LEN_32BIT, 0x41880000, TC358778_I2C_ADDRESS }, + { 0x0228, CRL_REG_LEN_32BIT, 0x00080000, TC358778_I2C_ADDRESS }, + { 0x022C, CRL_REG_LEN_32BIT, 0x00020000, TC358778_I2C_ADDRESS }, + { 0x0234, CRL_REG_LEN_32BIT, 0x00070000, TC358778_I2C_ADDRESS }, + { 0x0238, CRL_REG_LEN_32BIT, 0x00010000, TC358778_I2C_ADDRESS }, + { 0x0204, CRL_REG_LEN_32BIT, 0x00010000, TC358778_I2C_ADDRESS }, + + { 0x0518, CRL_REG_LEN_32BIT, 0x00010000, TC358778_I2C_ADDRESS }, + { 0x0500, CRL_REG_LEN_32BIT, 0x80A3A300, TC358778_I2C_ADDRESS }, + + { 0x0004, CRL_REG_LEN_16BIT, 0x0245, TC358778_I2C_ADDRESS }, + + { 0x0040, CRL_REG_LEN_16BIT, 0x8E00, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x098E, CRL_REG_LEN_16BIT, 0x7C00, AR023Z_I2C_ADDRESS }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0054, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8706, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0982, CRL_REG_LEN_16BIT, 0x0001, AR023Z_I2C_ADDRESS }, + { 0x098A, CRL_REG_LEN_16BIT, 0x4750, AR023Z_I2C_ADDRESS }, + { 0xC750, CRL_REG_LEN_16BIT, 0xC0F1, AR023Z_I2C_ADDRESS }, + { 0xC752, CRL_REG_LEN_16BIT, 0x0CEA, AR023Z_I2C_ADDRESS }, + { 0xC754, CRL_REG_LEN_16BIT, 0x1340, AR023Z_I2C_ADDRESS }, + { 0xC756, CRL_REG_LEN_16BIT, 0x75CF, AR023Z_I2C_ADDRESS }, + { 0xC758, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC75A, CRL_REG_LEN_16BIT, 0xA1A8, AR023Z_I2C_ADDRESS }, + { 0xC75C, CRL_REG_LEN_16BIT, 0x8DC4, AR023Z_I2C_ADDRESS }, + { 0xC75E, CRL_REG_LEN_16BIT, 0x0E0B, AR023Z_I2C_ADDRESS }, + { 0xC760, CRL_REG_LEN_16BIT, 0x10D1, AR023Z_I2C_ADDRESS }, + { 0xC762, CRL_REG_LEN_16BIT, 0xD804, AR023Z_I2C_ADDRESS }, + { 0xC764, CRL_REG_LEN_16BIT, 0xAD04, AR023Z_I2C_ADDRESS }, + { 0xC766, CRL_REG_LEN_16BIT, 0x70CF, AR023Z_I2C_ADDRESS }, + { 0xC768, CRL_REG_LEN_16BIT, 0x0001, AR023Z_I2C_ADDRESS }, + { 0xC76A, CRL_REG_LEN_16BIT, 0x7C06, AR023Z_I2C_ADDRESS }, + { 0xC76C, CRL_REG_LEN_16BIT, 0x7840, AR023Z_I2C_ADDRESS }, + { 0xC76E, CRL_REG_LEN_16BIT, 0x0E0F, AR023Z_I2C_ADDRESS }, + { 0xC770, CRL_REG_LEN_16BIT, 0x1111, AR023Z_I2C_ADDRESS }, + { 0xC772, CRL_REG_LEN_16BIT, 0xD800, AR023Z_I2C_ADDRESS }, + { 0xC774, CRL_REG_LEN_16BIT, 0x0CEE, AR023Z_I2C_ADDRESS }, + { 0xC776, CRL_REG_LEN_16BIT, 0x0760, AR023Z_I2C_ADDRESS }, + { 0xC778, CRL_REG_LEN_16BIT, 0xAD04, AR023Z_I2C_ADDRESS }, + { 0xC77A, CRL_REG_LEN_16BIT, 0x0531, AR023Z_I2C_ADDRESS }, + { 0xC77C, CRL_REG_LEN_16BIT, 0x1340, AR023Z_I2C_ADDRESS }, + { 0xC77E, CRL_REG_LEN_16BIT, 0x78E0, AR023Z_I2C_ADDRESS }, + { 0xC780, CRL_REG_LEN_16BIT, 0xD900, AR023Z_I2C_ADDRESS }, + { 0xC782, CRL_REG_LEN_16BIT, 0xF00A, AR023Z_I2C_ADDRESS }, + { 0xC784, CRL_REG_LEN_16BIT, 0x70CF, AR023Z_I2C_ADDRESS }, + { 0xC786, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC788, CRL_REG_LEN_16BIT, 0xC79C, AR023Z_I2C_ADDRESS }, + { 0xC78A, CRL_REG_LEN_16BIT, 0x7835, AR023Z_I2C_ADDRESS }, + { 0xC78C, CRL_REG_LEN_16BIT, 0x8041, AR023Z_I2C_ADDRESS }, + { 0xC78E, CRL_REG_LEN_16BIT, 0x8000, AR023Z_I2C_ADDRESS }, + { 0xC790, CRL_REG_LEN_16BIT, 0xE102, AR023Z_I2C_ADDRESS }, + { 0xC792, CRL_REG_LEN_16BIT, 0xA040, AR023Z_I2C_ADDRESS }, + { 0xC794, CRL_REG_LEN_16BIT, 0x09F1, AR023Z_I2C_ADDRESS }, + { 0xC796, CRL_REG_LEN_16BIT, 0x8094, AR023Z_I2C_ADDRESS }, + { 0xC798, CRL_REG_LEN_16BIT, 0x7FE0, AR023Z_I2C_ADDRESS }, + { 0xC79A, CRL_REG_LEN_16BIT, 0xD800, AR023Z_I2C_ADDRESS }, + { 0xC79C, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC79E, CRL_REG_LEN_16BIT, 0xC160, AR023Z_I2C_ADDRESS }, + { 0xC7A0, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC7A2, CRL_REG_LEN_16BIT, 0xC750, AR023Z_I2C_ADDRESS }, + { 0x098E, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x0030, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0140, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0xA103, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x0204, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x0054, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8702, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8701, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x0054, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x01CC, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8706, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0982, CRL_REG_LEN_16BIT, 0x0001, AR023Z_I2C_ADDRESS }, + { 0x098A, CRL_REG_LEN_16BIT, 0x47A4, AR023Z_I2C_ADDRESS }, + { 0xC7A4, CRL_REG_LEN_16BIT, 0xC0F1, AR023Z_I2C_ADDRESS }, + { 0xC7A6, CRL_REG_LEN_16BIT, 0x0C96, AR023Z_I2C_ADDRESS }, + { 0xC7A8, CRL_REG_LEN_16BIT, 0x1360, AR023Z_I2C_ADDRESS }, + { 0xC7AA, CRL_REG_LEN_16BIT, 0xD900, AR023Z_I2C_ADDRESS }, + { 0xC7AC, CRL_REG_LEN_16BIT, 0xC1A1, AR023Z_I2C_ADDRESS }, + { 0xC7AE, CRL_REG_LEN_16BIT, 0x75CF, AR023Z_I2C_ADDRESS }, + { 0xC7B0, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC7B2, CRL_REG_LEN_16BIT, 0x82A4, AR023Z_I2C_ADDRESS }, + { 0xC7B4, CRL_REG_LEN_16BIT, 0x8DC0, AR023Z_I2C_ADDRESS }, + { 0xC7B6, CRL_REG_LEN_16BIT, 0x0BEE, AR023Z_I2C_ADDRESS }, + { 0xC7B8, CRL_REG_LEN_16BIT, 0x03E0, AR023Z_I2C_ADDRESS }, + { 0xC7BA, CRL_REG_LEN_16BIT, 0x708B, AR023Z_I2C_ADDRESS }, + { 0xC7BC, CRL_REG_LEN_16BIT, 0x71CF, AR023Z_I2C_ADDRESS }, + { 0xC7BE, CRL_REG_LEN_16BIT, 0x0001, AR023Z_I2C_ADDRESS }, + { 0xC7C0, CRL_REG_LEN_16BIT, 0x7E2A, AR023Z_I2C_ADDRESS }, + { 0xC7C2, CRL_REG_LEN_16BIT, 0x081B, AR023Z_I2C_ADDRESS }, + { 0xC7C4, CRL_REG_LEN_16BIT, 0x0051, AR023Z_I2C_ADDRESS }, + { 0xC7C6, CRL_REG_LEN_16BIT, 0xC020, AR023Z_I2C_ADDRESS }, + { 0xC7C8, CRL_REG_LEN_16BIT, 0xE080, AR023Z_I2C_ADDRESS }, + { 0xC7CA, CRL_REG_LEN_16BIT, 0x20CC, AR023Z_I2C_ADDRESS }, + { 0xC7CC, CRL_REG_LEN_16BIT, 0x8062, AR023Z_I2C_ADDRESS }, + { 0xC7CE, CRL_REG_LEN_16BIT, 0xF407, AR023Z_I2C_ADDRESS }, + { 0xC7D0, CRL_REG_LEN_16BIT, 0xD802, AR023Z_I2C_ADDRESS }, + { 0xC7D2, CRL_REG_LEN_16BIT, 0x7960, AR023Z_I2C_ADDRESS }, + { 0xC7D4, CRL_REG_LEN_16BIT, 0xAD00, AR023Z_I2C_ADDRESS }, + { 0xC7D6, CRL_REG_LEN_16BIT, 0xADC0, AR023Z_I2C_ADDRESS }, + { 0xC7D8, CRL_REG_LEN_16BIT, 0xF002, AR023Z_I2C_ADDRESS }, + { 0xC7DA, CRL_REG_LEN_16BIT, 0x7940, AR023Z_I2C_ADDRESS }, + { 0xC7DC, CRL_REG_LEN_16BIT, 0x04CD, AR023Z_I2C_ADDRESS }, + { 0xC7DE, CRL_REG_LEN_16BIT, 0x1360, AR023Z_I2C_ADDRESS }, + { 0xC7E0, CRL_REG_LEN_16BIT, 0xC0A1, AR023Z_I2C_ADDRESS }, + { 0xC7E2, CRL_REG_LEN_16BIT, 0x78E0, AR023Z_I2C_ADDRESS }, + { 0xC7E4, CRL_REG_LEN_16BIT, 0xC0F1, AR023Z_I2C_ADDRESS }, + { 0xC7E6, CRL_REG_LEN_16BIT, 0x0C4E, AR023Z_I2C_ADDRESS }, + { 0xC7E8, CRL_REG_LEN_16BIT, 0x1340, AR023Z_I2C_ADDRESS }, + { 0xC7EA, CRL_REG_LEN_16BIT, 0x0CE6, AR023Z_I2C_ADDRESS }, + { 0xC7EC, CRL_REG_LEN_16BIT, 0x03C0, AR023Z_I2C_ADDRESS }, + { 0xC7EE, CRL_REG_LEN_16BIT, 0x701A, AR023Z_I2C_ADDRESS }, + { 0xC7F0, CRL_REG_LEN_16BIT, 0x0D0A, AR023Z_I2C_ADDRESS }, + { 0xC7F2, CRL_REG_LEN_16BIT, 0x1360, AR023Z_I2C_ADDRESS }, + { 0xC7F4, CRL_REG_LEN_16BIT, 0x218A, AR023Z_I2C_ADDRESS }, + { 0xC7F6, CRL_REG_LEN_16BIT, 0x0A0F, AR023Z_I2C_ADDRESS }, + { 0xC7F8, CRL_REG_LEN_16BIT, 0x7708, AR023Z_I2C_ADDRESS }, + { 0xC7FA, CRL_REG_LEN_16BIT, 0x75CF, AR023Z_I2C_ADDRESS }, + { 0xC7FC, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC7FE, CRL_REG_LEN_16BIT, 0xA168, AR023Z_I2C_ADDRESS }, + { 0xC800, CRL_REG_LEN_16BIT, 0x70CF, AR023Z_I2C_ADDRESS }, + { 0xC802, CRL_REG_LEN_16BIT, 0x0001, AR023Z_I2C_ADDRESS }, + { 0xC804, CRL_REG_LEN_16BIT, 0x8712, AR023Z_I2C_ADDRESS }, + { 0xC806, CRL_REG_LEN_16BIT, 0x7840, AR023Z_I2C_ADDRESS }, + { 0xC808, CRL_REG_LEN_16BIT, 0x1524, AR023Z_I2C_ADDRESS }, + { 0xC80A, CRL_REG_LEN_16BIT, 0x1080, AR023Z_I2C_ADDRESS }, + { 0xC80C, CRL_REG_LEN_16BIT, 0xE82D, AR023Z_I2C_ADDRESS }, + { 0xC80E, CRL_REG_LEN_16BIT, 0x76CF, AR023Z_I2C_ADDRESS }, + { 0xC810, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC812, CRL_REG_LEN_16BIT, 0xB530, AR023Z_I2C_ADDRESS }, + { 0xC814, CRL_REG_LEN_16BIT, 0x9623, AR023Z_I2C_ADDRESS }, + { 0xC816, CRL_REG_LEN_16BIT, 0x75CF, AR023Z_I2C_ADDRESS }, + { 0xC818, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC81A, CRL_REG_LEN_16BIT, 0xB5B0, AR023Z_I2C_ADDRESS }, + { 0xC81C, CRL_REG_LEN_16BIT, 0x9516, AR023Z_I2C_ADDRESS }, + { 0xC81E, CRL_REG_LEN_16BIT, 0xDB7D, AR023Z_I2C_ADDRESS }, + { 0xC820, CRL_REG_LEN_16BIT, 0xBB0A, AR023Z_I2C_ADDRESS }, + { 0xC822, CRL_REG_LEN_16BIT, 0x782C, AR023Z_I2C_ADDRESS }, + { 0xC824, CRL_REG_LEN_16BIT, 0x2942, AR023Z_I2C_ADDRESS }, + { 0xC826, CRL_REG_LEN_16BIT, 0x77C0, AR023Z_I2C_ADDRESS }, + { 0xC828, CRL_REG_LEN_16BIT, 0x712F, AR023Z_I2C_ADDRESS }, + { 0xC82A, CRL_REG_LEN_16BIT, 0x0EFE, AR023Z_I2C_ADDRESS }, + { 0xC82C, CRL_REG_LEN_16BIT, 0x1360, AR023Z_I2C_ADDRESS }, + { 0xC82E, CRL_REG_LEN_16BIT, 0xDA00, AR023Z_I2C_ADDRESS }, + { 0xC830, CRL_REG_LEN_16BIT, 0x730A, AR023Z_I2C_ADDRESS }, + { 0xC832, CRL_REG_LEN_16BIT, 0x0E1A, AR023Z_I2C_ADDRESS }, + { 0xC834, CRL_REG_LEN_16BIT, 0x1360, AR023Z_I2C_ADDRESS }, + { 0xC836, CRL_REG_LEN_16BIT, 0xDA00, AR023Z_I2C_ADDRESS }, + { 0xC838, CRL_REG_LEN_16BIT, 0x72CF, AR023Z_I2C_ADDRESS }, + { 0xC83A, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC83C, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC83E, CRL_REG_LEN_16BIT, 0x7150, AR023Z_I2C_ADDRESS }, + { 0xC840, CRL_REG_LEN_16BIT, 0x22CA, AR023Z_I2C_ADDRESS }, + { 0xC842, CRL_REG_LEN_16BIT, 0x0045, AR023Z_I2C_ADDRESS }, + { 0xC844, CRL_REG_LEN_16BIT, 0x71CF, AR023Z_I2C_ADDRESS }, + { 0xC846, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC848, CRL_REG_LEN_16BIT, 0xADB4, AR023Z_I2C_ADDRESS }, + { 0xC84A, CRL_REG_LEN_16BIT, 0x9122, AR023Z_I2C_ADDRESS }, + { 0xC84C, CRL_REG_LEN_16BIT, 0x1EC0, AR023Z_I2C_ADDRESS }, + { 0xC84E, CRL_REG_LEN_16BIT, 0x1084, AR023Z_I2C_ADDRESS }, + { 0xC850, CRL_REG_LEN_16BIT, 0x854A, AR023Z_I2C_ADDRESS }, + { 0xC852, CRL_REG_LEN_16BIT, 0x7230, AR023Z_I2C_ADDRESS }, + { 0xC854, CRL_REG_LEN_16BIT, 0x21CA, AR023Z_I2C_ADDRESS }, + { 0xC856, CRL_REG_LEN_16BIT, 0x008D, AR023Z_I2C_ADDRESS }, + { 0xC858, CRL_REG_LEN_16BIT, 0xB907, AR023Z_I2C_ADDRESS }, + { 0xC85A, CRL_REG_LEN_16BIT, 0x61F8, AR023Z_I2C_ADDRESS }, + { 0xC85C, CRL_REG_LEN_16BIT, 0xB861, AR023Z_I2C_ADDRESS }, + { 0xC85E, CRL_REG_LEN_16BIT, 0x0C9E, AR023Z_I2C_ADDRESS }, + { 0xC860, CRL_REG_LEN_16BIT, 0x1360, AR023Z_I2C_ADDRESS }, + { 0xC862, CRL_REG_LEN_16BIT, 0x71E9, AR023Z_I2C_ADDRESS }, + { 0xC864, CRL_REG_LEN_16BIT, 0xB51F, AR023Z_I2C_ADDRESS }, + { 0xC866, CRL_REG_LEN_16BIT, 0x0435, AR023Z_I2C_ADDRESS }, + { 0xC868, CRL_REG_LEN_16BIT, 0x1340, AR023Z_I2C_ADDRESS }, + { 0xC86A, CRL_REG_LEN_16BIT, 0x78E0, AR023Z_I2C_ADDRESS }, + { 0xC86C, CRL_REG_LEN_16BIT, 0x8850, AR023Z_I2C_ADDRESS }, + { 0xC86E, CRL_REG_LEN_16BIT, 0xD980, AR023Z_I2C_ADDRESS }, + { 0xC870, CRL_REG_LEN_16BIT, 0xEA08, AR023Z_I2C_ADDRESS }, + { 0xC872, CRL_REG_LEN_16BIT, 0x71CF, AR023Z_I2C_ADDRESS }, + { 0xC874, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC876, CRL_REG_LEN_16BIT, 0xAD10, AR023Z_I2C_ADDRESS }, + { 0xC878, CRL_REG_LEN_16BIT, 0x21F4, AR023Z_I2C_ADDRESS }, + { 0xC87A, CRL_REG_LEN_16BIT, 0x0081, AR023Z_I2C_ADDRESS }, + { 0xC87C, CRL_REG_LEN_16BIT, 0xB907, AR023Z_I2C_ADDRESS }, + { 0xC87E, CRL_REG_LEN_16BIT, 0xB925, AR023Z_I2C_ADDRESS }, + { 0xC880, CRL_REG_LEN_16BIT, 0x8851, AR023Z_I2C_ADDRESS }, + { 0xC882, CRL_REG_LEN_16BIT, 0xEA09, AR023Z_I2C_ADDRESS }, + { 0xC884, CRL_REG_LEN_16BIT, 0x72CF, AR023Z_I2C_ADDRESS }, + { 0xC886, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC888, CRL_REG_LEN_16BIT, 0xACF4, AR023Z_I2C_ADDRESS }, + { 0xC88A, CRL_REG_LEN_16BIT, 0x9254, AR023Z_I2C_ADDRESS }, + { 0xC88C, CRL_REG_LEN_16BIT, 0x7A2C, AR023Z_I2C_ADDRESS }, + { 0xC88E, CRL_REG_LEN_16BIT, 0x2941, AR023Z_I2C_ADDRESS }, + { 0xC890, CRL_REG_LEN_16BIT, 0x7141, AR023Z_I2C_ADDRESS }, + { 0xC892, CRL_REG_LEN_16BIT, 0x9043, AR023Z_I2C_ADDRESS }, + { 0xC894, CRL_REG_LEN_16BIT, 0x7A2C, AR023Z_I2C_ADDRESS }, + { 0xC896, CRL_REG_LEN_16BIT, 0x9011, AR023Z_I2C_ADDRESS }, + { 0xC898, CRL_REG_LEN_16BIT, 0x2941, AR023Z_I2C_ADDRESS }, + { 0xC89A, CRL_REG_LEN_16BIT, 0x7141, AR023Z_I2C_ADDRESS }, + { 0xC89C, CRL_REG_LEN_16BIT, 0x782C, AR023Z_I2C_ADDRESS }, + { 0xC89E, CRL_REG_LEN_16BIT, 0x7FE0, AR023Z_I2C_ADDRESS }, + { 0xC8A0, CRL_REG_LEN_16BIT, 0x2941, AR023Z_I2C_ADDRESS }, + { 0xC8A2, CRL_REG_LEN_16BIT, 0x71C0, AR023Z_I2C_ADDRESS }, + { 0xC8A4, CRL_REG_LEN_16BIT, 0xC0F1, AR023Z_I2C_ADDRESS }, + { 0xC8A6, CRL_REG_LEN_16BIT, 0x0B92, AR023Z_I2C_ADDRESS }, + { 0xC8A8, CRL_REG_LEN_16BIT, 0x1340, AR023Z_I2C_ADDRESS }, + { 0xC8AA, CRL_REG_LEN_16BIT, 0x7508, AR023Z_I2C_ADDRESS }, + { 0xC8AC, CRL_REG_LEN_16BIT, 0xFFF0, AR023Z_I2C_ADDRESS }, + { 0xC8AE, CRL_REG_LEN_16BIT, 0xB807, AR023Z_I2C_ADDRESS }, + { 0xC8B0, CRL_REG_LEN_16BIT, 0x71CF, AR023Z_I2C_ADDRESS }, + { 0xC8B2, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC8B4, CRL_REG_LEN_16BIT, 0xB5B0, AR023Z_I2C_ADDRESS }, + { 0xC8B6, CRL_REG_LEN_16BIT, 0x0C46, AR023Z_I2C_ADDRESS }, + { 0xC8B8, CRL_REG_LEN_16BIT, 0x1360, AR023Z_I2C_ADDRESS }, + { 0xC8BA, CRL_REG_LEN_16BIT, 0x913D, AR023Z_I2C_ADDRESS }, + { 0xC8BC, CRL_REG_LEN_16BIT, 0x7708, AR023Z_I2C_ADDRESS }, + { 0xC8BE, CRL_REG_LEN_16BIT, 0x9500, AR023Z_I2C_ADDRESS }, + { 0xC8C0, CRL_REG_LEN_16BIT, 0x9521, AR023Z_I2C_ADDRESS }, + { 0xC8C2, CRL_REG_LEN_16BIT, 0x0A16, AR023Z_I2C_ADDRESS }, + { 0xC8C4, CRL_REG_LEN_16BIT, 0x13E0, AR023Z_I2C_ADDRESS }, + { 0xC8C6, CRL_REG_LEN_16BIT, 0x9547, AR023Z_I2C_ADDRESS }, + { 0xC8C8, CRL_REG_LEN_16BIT, 0x7608, AR023Z_I2C_ADDRESS }, + { 0xC8CA, CRL_REG_LEN_16BIT, 0x70E9, AR023Z_I2C_ADDRESS }, + { 0xC8CC, CRL_REG_LEN_16BIT, 0x0A56, AR023Z_I2C_ADDRESS }, + { 0xC8CE, CRL_REG_LEN_16BIT, 0x10E0, AR023Z_I2C_ADDRESS }, + { 0xC8D0, CRL_REG_LEN_16BIT, 0xD908, AR023Z_I2C_ADDRESS }, + { 0xC8D2, CRL_REG_LEN_16BIT, 0x7508, AR023Z_I2C_ADDRESS }, + { 0xC8D4, CRL_REG_LEN_16BIT, 0x2582, AR023Z_I2C_ADDRESS }, + { 0xC8D6, CRL_REG_LEN_16BIT, 0x101C, AR023Z_I2C_ADDRESS }, + { 0xC8D8, CRL_REG_LEN_16BIT, 0x70C9, AR023Z_I2C_ADDRESS }, + { 0xC8DA, CRL_REG_LEN_16BIT, 0x0A4A, AR023Z_I2C_ADDRESS }, + { 0xC8DC, CRL_REG_LEN_16BIT, 0x10E0, AR023Z_I2C_ADDRESS }, + { 0xC8DE, CRL_REG_LEN_16BIT, 0xD908, AR023Z_I2C_ADDRESS }, + { 0xC8E0, CRL_REG_LEN_16BIT, 0x03C1, AR023Z_I2C_ADDRESS }, + { 0xC8E2, CRL_REG_LEN_16BIT, 0x1360, AR023Z_I2C_ADDRESS }, + { 0xC8E4, CRL_REG_LEN_16BIT, 0x60B8, AR023Z_I2C_ADDRESS }, + { 0xC8E6, CRL_REG_LEN_16BIT, 0x78E0, AR023Z_I2C_ADDRESS }, + { 0xC8E8, CRL_REG_LEN_16BIT, 0xC0F1, AR023Z_I2C_ADDRESS }, + { 0xC8EA, CRL_REG_LEN_16BIT, 0x0B4E, AR023Z_I2C_ADDRESS }, + { 0xC8EC, CRL_REG_LEN_16BIT, 0x1340, AR023Z_I2C_ADDRESS }, + { 0xC8EE, CRL_REG_LEN_16BIT, 0x77CF, AR023Z_I2C_ADDRESS }, + { 0xC8F0, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC8F2, CRL_REG_LEN_16BIT, 0x8004, AR023Z_I2C_ADDRESS }, + { 0xC8F4, CRL_REG_LEN_16BIT, 0x0BC2, AR023Z_I2C_ADDRESS }, + { 0xC8F6, CRL_REG_LEN_16BIT, 0x03C0, AR023Z_I2C_ADDRESS }, + { 0xC8F8, CRL_REG_LEN_16BIT, 0x75CF, AR023Z_I2C_ADDRESS }, + { 0xC8FA, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC8FC, CRL_REG_LEN_16BIT, 0xAC00, AR023Z_I2C_ADDRESS }, + { 0xC8FE, CRL_REG_LEN_16BIT, 0x7608, AR023Z_I2C_ADDRESS }, + { 0xC900, CRL_REG_LEN_16BIT, 0x8F05, AR023Z_I2C_ADDRESS }, + { 0xC902, CRL_REG_LEN_16BIT, 0x9522, AR023Z_I2C_ADDRESS }, + { 0xC904, CRL_REG_LEN_16BIT, 0x7610, AR023Z_I2C_ADDRESS }, + { 0xC906, CRL_REG_LEN_16BIT, 0x21D1, AR023Z_I2C_ADDRESS }, + { 0xC908, CRL_REG_LEN_16BIT, 0x80A2, AR023Z_I2C_ADDRESS }, + { 0xC90A, CRL_REG_LEN_16BIT, 0xF213, AR023Z_I2C_ADDRESS }, + { 0xC90C, CRL_REG_LEN_16BIT, 0xE680, AR023Z_I2C_ADDRESS }, + { 0xC90E, CRL_REG_LEN_16BIT, 0x26CC, AR023Z_I2C_ADDRESS }, + { 0xC910, CRL_REG_LEN_16BIT, 0x9062, AR023Z_I2C_ADDRESS }, + { 0xC912, CRL_REG_LEN_16BIT, 0xF40F, AR023Z_I2C_ADDRESS }, + { 0xC914, CRL_REG_LEN_16BIT, 0x70CF, AR023Z_I2C_ADDRESS }, + { 0xC916, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC918, CRL_REG_LEN_16BIT, 0xB960, AR023Z_I2C_ADDRESS }, + { 0xC91A, CRL_REG_LEN_16BIT, 0xFFE3, AR023Z_I2C_ADDRESS }, + { 0xC91C, CRL_REG_LEN_16BIT, 0xB504, AR023Z_I2C_ADDRESS }, + { 0xC91E, CRL_REG_LEN_16BIT, 0x08DE, AR023Z_I2C_ADDRESS }, + { 0xC920, CRL_REG_LEN_16BIT, 0x0220, AR023Z_I2C_ADDRESS }, + { 0xC922, CRL_REG_LEN_16BIT, 0xD800, AR023Z_I2C_ADDRESS }, + { 0xC924, CRL_REG_LEN_16BIT, 0xD801, AR023Z_I2C_ADDRESS }, + { 0xC926, CRL_REG_LEN_16BIT, 0xAD0E, AR023Z_I2C_ADDRESS }, + { 0xC928, CRL_REG_LEN_16BIT, 0xAFC5, AR023Z_I2C_ADDRESS }, + { 0xC92A, CRL_REG_LEN_16BIT, 0xD800, AR023Z_I2C_ADDRESS }, + { 0xC92C, CRL_REG_LEN_16BIT, 0xF005, AR023Z_I2C_ADDRESS }, + { 0xC92E, CRL_REG_LEN_16BIT, 0x70CF, AR023Z_I2C_ADDRESS }, + { 0xC930, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC932, CRL_REG_LEN_16BIT, 0x0F7E, AR023Z_I2C_ADDRESS }, + { 0xC934, CRL_REG_LEN_16BIT, 0x7840, AR023Z_I2C_ADDRESS }, + { 0xC936, CRL_REG_LEN_16BIT, 0x036D, AR023Z_I2C_ADDRESS }, + { 0xC938, CRL_REG_LEN_16BIT, 0x1340, AR023Z_I2C_ADDRESS }, + { 0xC93A, CRL_REG_LEN_16BIT, 0x78E0, AR023Z_I2C_ADDRESS }, + { 0xC93C, CRL_REG_LEN_16BIT, 0xD900, AR023Z_I2C_ADDRESS }, + { 0xC93E, CRL_REG_LEN_16BIT, 0xF00A, AR023Z_I2C_ADDRESS }, + { 0xC940, CRL_REG_LEN_16BIT, 0x70CF, AR023Z_I2C_ADDRESS }, + { 0xC942, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC944, CRL_REG_LEN_16BIT, 0xC958, AR023Z_I2C_ADDRESS }, + { 0xC946, CRL_REG_LEN_16BIT, 0x7835, AR023Z_I2C_ADDRESS }, + { 0xC948, CRL_REG_LEN_16BIT, 0x8041, AR023Z_I2C_ADDRESS }, + { 0xC94A, CRL_REG_LEN_16BIT, 0x8000, AR023Z_I2C_ADDRESS }, + { 0xC94C, CRL_REG_LEN_16BIT, 0xE102, AR023Z_I2C_ADDRESS }, + { 0xC94E, CRL_REG_LEN_16BIT, 0xA040, AR023Z_I2C_ADDRESS }, + { 0xC950, CRL_REG_LEN_16BIT, 0x09F1, AR023Z_I2C_ADDRESS }, + { 0xC952, CRL_REG_LEN_16BIT, 0x8194, AR023Z_I2C_ADDRESS }, + { 0xC954, CRL_REG_LEN_16BIT, 0x7FE0, AR023Z_I2C_ADDRESS }, + { 0xC956, CRL_REG_LEN_16BIT, 0xD800, AR023Z_I2C_ADDRESS }, + { 0xC958, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC95A, CRL_REG_LEN_16BIT, 0xC164, AR023Z_I2C_ADDRESS }, + { 0xC95C, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC95E, CRL_REG_LEN_16BIT, 0xC7A4, AR023Z_I2C_ADDRESS }, + { 0xC960, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC962, CRL_REG_LEN_16BIT, 0xC198, AR023Z_I2C_ADDRESS }, + { 0xC964, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC966, CRL_REG_LEN_16BIT, 0xC7E4, AR023Z_I2C_ADDRESS }, + { 0xC968, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC96A, CRL_REG_LEN_16BIT, 0xBB6C, AR023Z_I2C_ADDRESS }, + { 0xC96C, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC96E, CRL_REG_LEN_16BIT, 0xC8E8, AR023Z_I2C_ADDRESS }, + { 0x098E, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x01EC, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0240, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0xA103, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x0204, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x01CC, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8702, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8701, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x0220, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x005C, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8706, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0982, CRL_REG_LEN_16BIT, 0x0001, AR023Z_I2C_ADDRESS }, + { 0x098A, CRL_REG_LEN_16BIT, 0x4970, AR023Z_I2C_ADDRESS }, + { 0xC970, CRL_REG_LEN_16BIT, 0xC0F1, AR023Z_I2C_ADDRESS }, + { 0xC972, CRL_REG_LEN_16BIT, 0x0ACA, AR023Z_I2C_ADDRESS }, + { 0xC974, CRL_REG_LEN_16BIT, 0x1340, AR023Z_I2C_ADDRESS }, + { 0xC976, CRL_REG_LEN_16BIT, 0x71CF, AR023Z_I2C_ADDRESS }, + { 0xC978, CRL_REG_LEN_16BIT, 0x0001, AR023Z_I2C_ADDRESS }, + { 0xC97A, CRL_REG_LEN_16BIT, 0x2896, AR023Z_I2C_ADDRESS }, + { 0xC97C, CRL_REG_LEN_16BIT, 0x7940, AR023Z_I2C_ADDRESS }, + { 0xC97E, CRL_REG_LEN_16BIT, 0x250A, AR023Z_I2C_ADDRESS }, + { 0xC980, CRL_REG_LEN_16BIT, 0x9000, AR023Z_I2C_ADDRESS }, + { 0xC982, CRL_REG_LEN_16BIT, 0x76CF, AR023Z_I2C_ADDRESS }, + { 0xC984, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC986, CRL_REG_LEN_16BIT, 0xB51C, AR023Z_I2C_ADDRESS }, + { 0xC988, CRL_REG_LEN_16BIT, 0xF407, AR023Z_I2C_ADDRESS }, + { 0xC98A, CRL_REG_LEN_16BIT, 0x0D4A, AR023Z_I2C_ADDRESS }, + { 0xC98C, CRL_REG_LEN_16BIT, 0x0B20, AR023Z_I2C_ADDRESS }, + { 0xC98E, CRL_REG_LEN_16BIT, 0x8E12, AR023Z_I2C_ADDRESS }, + { 0xC990, CRL_REG_LEN_16BIT, 0x0C6A, AR023Z_I2C_ADDRESS }, + { 0xC992, CRL_REG_LEN_16BIT, 0x0AE0, AR023Z_I2C_ADDRESS }, + { 0xC994, CRL_REG_LEN_16BIT, 0xD801, AR023Z_I2C_ADDRESS }, + { 0xC996, CRL_REG_LEN_16BIT, 0x0315, AR023Z_I2C_ADDRESS }, + { 0xC998, CRL_REG_LEN_16BIT, 0x1360, AR023Z_I2C_ADDRESS }, + { 0xC99A, CRL_REG_LEN_16BIT, 0x70A9, AR023Z_I2C_ADDRESS }, + { 0xC99C, CRL_REG_LEN_16BIT, 0xD900, AR023Z_I2C_ADDRESS }, + { 0xC99E, CRL_REG_LEN_16BIT, 0xF00A, AR023Z_I2C_ADDRESS }, + { 0xC9A0, CRL_REG_LEN_16BIT, 0x70CF, AR023Z_I2C_ADDRESS }, + { 0xC9A2, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC9A4, CRL_REG_LEN_16BIT, 0xC9C4, AR023Z_I2C_ADDRESS }, + { 0xC9A6, CRL_REG_LEN_16BIT, 0x7835, AR023Z_I2C_ADDRESS }, + { 0xC9A8, CRL_REG_LEN_16BIT, 0x8041, AR023Z_I2C_ADDRESS }, + { 0xC9AA, CRL_REG_LEN_16BIT, 0x8000, AR023Z_I2C_ADDRESS }, + { 0xC9AC, CRL_REG_LEN_16BIT, 0xE102, AR023Z_I2C_ADDRESS }, + { 0xC9AE, CRL_REG_LEN_16BIT, 0xA040, AR023Z_I2C_ADDRESS }, + { 0xC9B0, CRL_REG_LEN_16BIT, 0x09F1, AR023Z_I2C_ADDRESS }, + { 0xC9B2, CRL_REG_LEN_16BIT, 0x8094, AR023Z_I2C_ADDRESS }, + { 0xC9B4, CRL_REG_LEN_16BIT, 0x71CF, AR023Z_I2C_ADDRESS }, + { 0xC9B6, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC9B8, CRL_REG_LEN_16BIT, 0xB51C, AR023Z_I2C_ADDRESS }, + { 0xC9BA, CRL_REG_LEN_16BIT, 0xD808, AR023Z_I2C_ADDRESS }, + { 0xC9BC, CRL_REG_LEN_16BIT, 0xA912, AR023Z_I2C_ADDRESS }, + { 0xC9BE, CRL_REG_LEN_16BIT, 0x7FE0, AR023Z_I2C_ADDRESS }, + { 0xC9C0, CRL_REG_LEN_16BIT, 0xD800, AR023Z_I2C_ADDRESS }, + { 0xC9C2, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC9C4, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC9C6, CRL_REG_LEN_16BIT, 0xBFE4, AR023Z_I2C_ADDRESS }, + { 0xC9C8, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC9CA, CRL_REG_LEN_16BIT, 0xC970, AR023Z_I2C_ADDRESS }, + { 0x098E, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x024C, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0340, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0xA103, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x0204, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x005C, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8702, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8701, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xCC02, CRL_REG_LEN_16BIT, 0x0493, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D00, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D01, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3088, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0280, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C45, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x5872, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x9B4A, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x3143, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x428E, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x032A, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x1400, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C45, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x787B, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x3DFF, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x3DFF, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x3DEA, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x2A04, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x3D00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C10, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x2A05, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x2A15, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x352A, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x053D, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x1045, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x5800, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C2A, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x042A, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x143D, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0xFF3D, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0xFF3D, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0xEA2A, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x0400, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C62, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x2A28, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x8E00, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x362A, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x083D, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x647A, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x3D00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C04, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x442C, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x4B8F, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x0043, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x0C2D, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x6343, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x1600, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C8E, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x032A, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0xFC5C, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x1D57, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x5449, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x5F53, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x0500, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C53, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x074D, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x2BF8, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x1016, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x4C08, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x5556, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x2B00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0CB8, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x2B98, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x4E11, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x2904, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x2984, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x2994, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x6000, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C5C, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x195C, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x1B45, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x4845, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x0845, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x8829, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0xB600, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C8E, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x012A, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0xF83E, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x022A, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0xFA3F, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x095C, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x1B00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C29, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0xB23F, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x0C3E, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x023E, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x135C, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x133F, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x1100, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C3E, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x0B5F, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x2B90, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x2AF2, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x2B80, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x3E04, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x3F00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C06, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x6029, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0xA229, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0xA35F, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x4D19, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x2AFA, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x2900, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C83, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x45A8, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x3E07, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x2AFB, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x3E29, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x4588, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x2100, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C3E, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x082A, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0xFA5D, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x2992, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x8810, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x2B04, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x8B00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C16, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x858D, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x484D, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x4E2B, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x804C, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x0B60, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x3F00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C28, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x2AF2, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x3F0F, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x2982, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x2983, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x2943, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x5C00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C15, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x5F4D, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x192A, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0xFA45, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x588E, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x002A, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x9800, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C3F, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x0612, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x444A, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x0443, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x1605, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x4316, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x5800, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C43, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x165A, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x4316, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x0643, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x1607, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x4316, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x8E00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C03, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x2A9C, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x4578, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x7B3F, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x072A, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x9D3E, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x2E00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C45, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x5825, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x3E06, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x8E01, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x2A98, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x8E00, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x1200, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C44, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x4B03, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x432D, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x4643, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x16A3, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x4316, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x5D00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C0D, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x2944, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x8810, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x2B04, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x530D, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x8B16, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x8500, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C44, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x8E03, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x2AFC, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x5C1D, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x8D60, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x5754, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x4900, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C5F, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x5305, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x5307, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x4D2B, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0xF810, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x164C, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x0800, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C55, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x562B, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0xB82B, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x984E, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x1129, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x0429, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x8400, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C29, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x9460, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x5C19, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x5C1B, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x4548, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x4508, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x4500, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C88, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x29B6, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x8E01, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x2AF8, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x3E02, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x2AFA, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x3F00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C09, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x5C1B, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x29B2, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x3F0C, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x3E02, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x3E13, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x5C00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C13, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x3F11, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x3E0B, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x5F2B, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x902A, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0xF22B, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x8000, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C3E, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x043F, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x0660, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x29A2, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x29A3, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x5F4D, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x1C00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C2A, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0xFA29, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x8345, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0xA83E, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x072A, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0xFB3E, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x2900, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C45, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x8824, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x3E08, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x2AFA, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x5D29, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x9288, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x1000, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C2B, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x048B, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x1686, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x8D48, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x4D4E, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x2B80, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x4C00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C0B, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x603F, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x282A, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0xF23F, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x0F29, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x8229, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x8300, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C29, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x435C, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x155F, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x4D1C, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x2AFA, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x4558, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x8E00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C00, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x2A98, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x3F06, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x4A73, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x9D0A, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x4316, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x0B00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C43, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x168E, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x032A, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x9C45, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x783F, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x072A, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x9D00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C3E, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x1245, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x583F, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x048E, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x012A, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x988E, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C91, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x769C, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x779C, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x4644, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x1616, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x907A, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x1200, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C44, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x4B4A, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x0043, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x1663, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x4316, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x0843, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x1600, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C50, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x4316, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x6543, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x1666, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x4316, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x8E03, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x2A00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C9C, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x4578, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x3F07, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x2A9D, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x5D0C, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x2944, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x8800, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C10, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x2B04, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x530D, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x8B16, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x863E, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x1F45, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x5800, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C28, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x3E06, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x8E01, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x2A98, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x8E00, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x8D60, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x1200, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0444, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x4B2C, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x2C00, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x2A98, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x8E00, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x8D60, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x1200, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D02, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8E01, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xCCCC, CRL_REG_LEN_08BIT, 0x69, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D00, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D01, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3ED6, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0234, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0xB300, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x2436, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0200, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x0E00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x320C, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0201, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x8000, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x320E, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0203, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3210, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0205, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3204, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x020B, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x6D00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x30FE, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0200, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x8000, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3ED8, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x027B, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x9900, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3EDC, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x029B, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0xA800, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3EDA, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x029B, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x9B00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3092, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0200, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x6F00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3EEC, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x021C, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x0400, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x30BA, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0277, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x9C00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3EF6, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x02A7, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x0F00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3044, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0204, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x1000, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3ED0, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x02FF, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x4400, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3ED4, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0203, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x1F00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x30FE, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0200, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x8000, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3EE2, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0288, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x6600, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3EE4, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0266, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x2300, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3EE6, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0222, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x6300, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x30E0, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0242, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x8300, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x30F0, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0212, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x8300, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D02, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xCAC8, CRL_REG_LEN_08BIT, 0x41, AR023Z_I2C_ADDRESS }, + { 0xCACA, CRL_REG_LEN_16BIT, 0x022F, AR023Z_I2C_ADDRESS }, + { 0xCACE, CRL_REG_LEN_16BIT, 0x010E, AR023Z_I2C_ADDRESS }, + { 0xCAD0, CRL_REG_LEN_16BIT, 0x0033, AR023Z_I2C_ADDRESS }, + { 0xCAD4, CRL_REG_LEN_16BIT, 0x001F, AR023Z_I2C_ADDRESS }, + { 0xCAD4, CRL_REG_LEN_16BIT, 0x001F, AR023Z_I2C_ADDRESS }, + { 0xCAD8, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xCADA, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC806, CRL_REG_LEN_16BIT, 0x000C, AR023Z_I2C_ADDRESS }, + { 0xC80A, CRL_REG_LEN_16BIT, 0x078B, AR023Z_I2C_ADDRESS }, + { 0xC804, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC808, CRL_REG_LEN_16BIT, 0x0437, AR023Z_I2C_ADDRESS }, + { 0xC838, CRL_REG_LEN_16BIT, 0x0302, AR023Z_I2C_ADDRESS }, + { 0xC83A, CRL_REG_LEN_16BIT, 0x211B, AR023Z_I2C_ADDRESS }, + { 0xC840, CRL_REG_LEN_16BIT, 0x010C, AR023Z_I2C_ADDRESS }, + { 0xC844, CRL_REG_LEN_16BIT, 0x0802, AR023Z_I2C_ADDRESS }, + { 0xC844, CRL_REG_LEN_16BIT, 0x0801, AR023Z_I2C_ADDRESS }, + { 0xC80C, CRL_REG_LEN_16BIT, 0x04BA, AR023Z_I2C_ADDRESS }, + { 0xC80E, CRL_REG_LEN_16BIT, 0x3674, AR023Z_I2C_ADDRESS }, + { 0xC814, CRL_REG_LEN_16BIT, 0x049E, AR023Z_I2C_ADDRESS }, + { 0xC816, CRL_REG_LEN_16BIT, 0x08BC, AR023Z_I2C_ADDRESS }, + { 0xC846, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC890, CRL_REG_LEN_08BIT, 0x00, AR023Z_I2C_ADDRESS }, + { 0xC8A0, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC8A2, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC8A4, CRL_REG_LEN_16BIT, 0x0780, AR023Z_I2C_ADDRESS }, + { 0xC8A6, CRL_REG_LEN_16BIT, 0x0438, AR023Z_I2C_ADDRESS }, + { 0xC9F8, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC9FA, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC9FC, CRL_REG_LEN_16BIT, 0x0780, AR023Z_I2C_ADDRESS }, + { 0xC9FE, CRL_REG_LEN_16BIT, 0x0438, AR023Z_I2C_ADDRESS }, + { 0xCA00, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xCA02, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xCA04, CRL_REG_LEN_16BIT, 0x0780, AR023Z_I2C_ADDRESS }, + { 0xCA06, CRL_REG_LEN_16BIT, 0x0438, AR023Z_I2C_ADDRESS }, + { 0xCAE4, CRL_REG_LEN_16BIT, 0x0780, AR023Z_I2C_ADDRESS }, + { 0xCAE6, CRL_REG_LEN_16BIT, 0x0438, AR023Z_I2C_ADDRESS }, + { 0xCAE8, CRL_REG_LEN_16BIT, 0x0011, AR023Z_I2C_ADDRESS }, + { 0xCAE8, CRL_REG_LEN_16BIT, 0x0011, AR023Z_I2C_ADDRESS }, + { 0xCAEA, CRL_REG_LEN_08BIT, 0x00, AR023Z_I2C_ADDRESS }, + { 0xCAEB, CRL_REG_LEN_08BIT, 0x00, AR023Z_I2C_ADDRESS }, + { 0xCAF4, CRL_REG_LEN_16BIT, 0x249F, AR023Z_I2C_ADDRESS }, + { 0xCAF8, CRL_REG_LEN_08BIT, 0x0E, AR023Z_I2C_ADDRESS }, + { 0xCAFC, CRL_REG_LEN_16BIT, 0x4201, AR023Z_I2C_ADDRESS }, + { 0xCAFE, CRL_REG_LEN_16BIT, 0x08BC, AR023Z_I2C_ADDRESS }, + { 0xCB00, CRL_REG_LEN_16BIT, 0x0800, AR023Z_I2C_ADDRESS }, + { 0x8C16, CRL_REG_LEN_08BIT, 0x19, AR023Z_I2C_ADDRESS }, + { 0xCAC4, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0x3600, CRL_REG_LEN_16BIT, 0x00F0, AR023Z_I2C_ADDRESS }, + { 0x3602, CRL_REG_LEN_16BIT, 0xD789, AR023Z_I2C_ADDRESS }, + { 0x3604, CRL_REG_LEN_16BIT, 0x4D70, AR023Z_I2C_ADDRESS }, + { 0x3606, CRL_REG_LEN_16BIT, 0x6A8D, AR023Z_I2C_ADDRESS }, + { 0x3608, CRL_REG_LEN_16BIT, 0x7CEE, AR023Z_I2C_ADDRESS }, + { 0x360A, CRL_REG_LEN_16BIT, 0x00D0, AR023Z_I2C_ADDRESS }, + { 0x360C, CRL_REG_LEN_16BIT, 0x8F0B, AR023Z_I2C_ADDRESS }, + { 0x360E, CRL_REG_LEN_16BIT, 0x58B0, AR023Z_I2C_ADDRESS }, + { 0x3610, CRL_REG_LEN_16BIT, 0x2E2D, AR023Z_I2C_ADDRESS }, + { 0x3612, CRL_REG_LEN_16BIT, 0x0BCF, AR023Z_I2C_ADDRESS }, + { 0x3614, CRL_REG_LEN_16BIT, 0x00B0, AR023Z_I2C_ADDRESS }, + { 0x3616, CRL_REG_LEN_16BIT, 0xC149, AR023Z_I2C_ADDRESS }, + { 0x3618, CRL_REG_LEN_16BIT, 0x4950, AR023Z_I2C_ADDRESS }, + { 0x361A, CRL_REG_LEN_16BIT, 0x024E, AR023Z_I2C_ADDRESS }, + { 0x361C, CRL_REG_LEN_16BIT, 0x0B4E, AR023Z_I2C_ADDRESS }, + { 0x361E, CRL_REG_LEN_16BIT, 0x00D0, AR023Z_I2C_ADDRESS }, + { 0x3620, CRL_REG_LEN_16BIT, 0xD2E9, AR023Z_I2C_ADDRESS }, + { 0x3622, CRL_REG_LEN_16BIT, 0x4D10, AR023Z_I2C_ADDRESS }, + { 0x3624, CRL_REG_LEN_16BIT, 0x67ED, AR023Z_I2C_ADDRESS }, + { 0x3626, CRL_REG_LEN_16BIT, 0x1ACF, AR023Z_I2C_ADDRESS }, + { 0x3628, CRL_REG_LEN_16BIT, 0x406B, AR023Z_I2C_ADDRESS }, + { 0x362A, CRL_REG_LEN_16BIT, 0x1FC9, AR023Z_I2C_ADDRESS }, + { 0x362C, CRL_REG_LEN_16BIT, 0x6750, AR023Z_I2C_ADDRESS }, + { 0x362E, CRL_REG_LEN_16BIT, 0x4E0F, AR023Z_I2C_ADDRESS }, + { 0x3630, CRL_REG_LEN_16BIT, 0xBCF3, AR023Z_I2C_ADDRESS }, + { 0x3632, CRL_REG_LEN_16BIT, 0x138C, AR023Z_I2C_ADDRESS }, + { 0x3634, CRL_REG_LEN_16BIT, 0x366A, AR023Z_I2C_ADDRESS }, + { 0x3636, CRL_REG_LEN_16BIT, 0x6390, AR023Z_I2C_ADDRESS }, + { 0x3638, CRL_REG_LEN_16BIT, 0x2E2F, AR023Z_I2C_ADDRESS }, + { 0x363A, CRL_REG_LEN_16BIT, 0xB9D3, AR023Z_I2C_ADDRESS }, + { 0x363C, CRL_REG_LEN_16BIT, 0x2B4A, AR023Z_I2C_ADDRESS }, + { 0x363E, CRL_REG_LEN_16BIT, 0x008B, AR023Z_I2C_ADDRESS }, + { 0x3640, CRL_REG_LEN_16BIT, 0x6B30, AR023Z_I2C_ADDRESS }, + { 0x3642, CRL_REG_LEN_16BIT, 0x710F, AR023Z_I2C_ADDRESS }, + { 0x3644, CRL_REG_LEN_16BIT, 0xC413, AR023Z_I2C_ADDRESS }, + { 0x3646, CRL_REG_LEN_16BIT, 0x2A4B, AR023Z_I2C_ADDRESS }, + { 0x3648, CRL_REG_LEN_16BIT, 0x080A, AR023Z_I2C_ADDRESS }, + { 0x364A, CRL_REG_LEN_16BIT, 0x6BD0, AR023Z_I2C_ADDRESS }, + { 0x364C, CRL_REG_LEN_16BIT, 0x0050, AR023Z_I2C_ADDRESS }, + { 0x364E, CRL_REG_LEN_16BIT, 0xC4D3, AR023Z_I2C_ADDRESS }, + { 0x3650, CRL_REG_LEN_16BIT, 0x6F90, AR023Z_I2C_ADDRESS }, + { 0x3652, CRL_REG_LEN_16BIT, 0x5A2F, AR023Z_I2C_ADDRESS }, + { 0x3654, CRL_REG_LEN_16BIT, 0xE631, AR023Z_I2C_ADDRESS }, + { 0x3656, CRL_REG_LEN_16BIT, 0x8812, AR023Z_I2C_ADDRESS }, + { 0x3658, CRL_REG_LEN_16BIT, 0x2155, AR023Z_I2C_ADDRESS }, + { 0x365A, CRL_REG_LEN_16BIT, 0x6A30, AR023Z_I2C_ADDRESS }, + { 0x365C, CRL_REG_LEN_16BIT, 0x7FCF, AR023Z_I2C_ADDRESS }, + { 0x365E, CRL_REG_LEN_16BIT, 0xE291, AR023Z_I2C_ADDRESS }, + { 0x3660, CRL_REG_LEN_16BIT, 0x9C92, AR023Z_I2C_ADDRESS }, + { 0x3662, CRL_REG_LEN_16BIT, 0x2C75, AR023Z_I2C_ADDRESS }, + { 0x3664, CRL_REG_LEN_16BIT, 0x5F90, AR023Z_I2C_ADDRESS }, + { 0x3666, CRL_REG_LEN_16BIT, 0x618F, AR023Z_I2C_ADDRESS }, + { 0x3668, CRL_REG_LEN_16BIT, 0xED91, AR023Z_I2C_ADDRESS }, + { 0x366A, CRL_REG_LEN_16BIT, 0x9FB2, AR023Z_I2C_ADDRESS }, + { 0x366C, CRL_REG_LEN_16BIT, 0x1915, AR023Z_I2C_ADDRESS }, + { 0x366E, CRL_REG_LEN_16BIT, 0x6E90, AR023Z_I2C_ADDRESS }, + { 0x3670, CRL_REG_LEN_16BIT, 0x5C0F, AR023Z_I2C_ADDRESS }, + { 0x3672, CRL_REG_LEN_16BIT, 0xE111, AR023Z_I2C_ADDRESS }, + { 0x3674, CRL_REG_LEN_16BIT, 0x9352, AR023Z_I2C_ADDRESS }, + { 0x3676, CRL_REG_LEN_16BIT, 0x2135, AR023Z_I2C_ADDRESS }, + { 0x3678, CRL_REG_LEN_16BIT, 0x7230, AR023Z_I2C_ADDRESS }, + { 0x367A, CRL_REG_LEN_16BIT, 0x2D92, AR023Z_I2C_ADDRESS }, + { 0x367C, CRL_REG_LEN_16BIT, 0xEEB5, AR023Z_I2C_ADDRESS }, + { 0x367E, CRL_REG_LEN_16BIT, 0x8495, AR023Z_I2C_ADDRESS }, + { 0x3680, CRL_REG_LEN_16BIT, 0x3C38, AR023Z_I2C_ADDRESS }, + { 0x3682, CRL_REG_LEN_16BIT, 0x7B50, AR023Z_I2C_ADDRESS }, + { 0x3684, CRL_REG_LEN_16BIT, 0x2332, AR023Z_I2C_ADDRESS }, + { 0x3686, CRL_REG_LEN_16BIT, 0xED55, AR023Z_I2C_ADDRESS }, + { 0x3688, CRL_REG_LEN_16BIT, 0x8355, AR023Z_I2C_ADDRESS }, + { 0x368A, CRL_REG_LEN_16BIT, 0x3978, AR023Z_I2C_ADDRESS }, + { 0x368C, CRL_REG_LEN_16BIT, 0x74F0, AR023Z_I2C_ADDRESS }, + { 0x368E, CRL_REG_LEN_16BIT, 0x4032, AR023Z_I2C_ADDRESS }, + { 0x3690, CRL_REG_LEN_16BIT, 0xF9B5, AR023Z_I2C_ADDRESS }, + { 0x3692, CRL_REG_LEN_16BIT, 0x8D75, AR023Z_I2C_ADDRESS }, + { 0x3694, CRL_REG_LEN_16BIT, 0x4338, AR023Z_I2C_ADDRESS }, + { 0x3696, CRL_REG_LEN_16BIT, 0x7550, AR023Z_I2C_ADDRESS }, + { 0x3698, CRL_REG_LEN_16BIT, 0x2CB2, AR023Z_I2C_ADDRESS }, + { 0x369A, CRL_REG_LEN_16BIT, 0xF135, AR023Z_I2C_ADDRESS }, + { 0x369C, CRL_REG_LEN_16BIT, 0x80F5, AR023Z_I2C_ADDRESS }, + { 0x369E, CRL_REG_LEN_16BIT, 0x3B98, AR023Z_I2C_ADDRESS }, + { 0x36A0, CRL_REG_LEN_16BIT, 0x90F2, AR023Z_I2C_ADDRESS }, + { 0x36A2, CRL_REG_LEN_16BIT, 0xD4D2, AR023Z_I2C_ADDRESS }, + { 0x36A4, CRL_REG_LEN_16BIT, 0x35B7, AR023Z_I2C_ADDRESS }, + { 0x36A6, CRL_REG_LEN_16BIT, 0x1A75, AR023Z_I2C_ADDRESS }, + { 0x36A8, CRL_REG_LEN_16BIT, 0x9B5A, AR023Z_I2C_ADDRESS }, + { 0x36AA, CRL_REG_LEN_16BIT, 0xFF71, AR023Z_I2C_ADDRESS }, + { 0x36AC, CRL_REG_LEN_16BIT, 0xC832, AR023Z_I2C_ADDRESS }, + { 0x36AE, CRL_REG_LEN_16BIT, 0x3277, AR023Z_I2C_ADDRESS }, + { 0x36B0, CRL_REG_LEN_16BIT, 0x16F5, AR023Z_I2C_ADDRESS }, + { 0x36B2, CRL_REG_LEN_16BIT, 0x97BA, AR023Z_I2C_ADDRESS }, + { 0x36B4, CRL_REG_LEN_16BIT, 0x95B2, AR023Z_I2C_ADDRESS }, + { 0x36B6, CRL_REG_LEN_16BIT, 0x9373, AR023Z_I2C_ADDRESS }, + { 0x36B8, CRL_REG_LEN_16BIT, 0x3C77, AR023Z_I2C_ADDRESS }, + { 0x36BA, CRL_REG_LEN_16BIT, 0x6115, AR023Z_I2C_ADDRESS }, + { 0x36BC, CRL_REG_LEN_16BIT, 0xA0BA, AR023Z_I2C_ADDRESS }, + { 0x36BE, CRL_REG_LEN_16BIT, 0x95B2, AR023Z_I2C_ADDRESS }, + { 0x36C0, CRL_REG_LEN_16BIT, 0xC492, AR023Z_I2C_ADDRESS }, + { 0x36C2, CRL_REG_LEN_16BIT, 0x3517, AR023Z_I2C_ADDRESS }, + { 0x36C4, CRL_REG_LEN_16BIT, 0x15B5, AR023Z_I2C_ADDRESS }, + { 0x36C6, CRL_REG_LEN_16BIT, 0x9A9A, AR023Z_I2C_ADDRESS }, + { 0x36C8, CRL_REG_LEN_16BIT, 0x018A, AR023Z_I2C_ADDRESS }, + { 0x36CA, CRL_REG_LEN_16BIT, 0x03BE, AR023Z_I2C_ADDRESS }, + { 0xCAC4, CRL_REG_LEN_16BIT, 0x0001, AR023Z_I2C_ADDRESS }, + { 0xC91E, CRL_REG_LEN_16BIT, 0x0A8C, AR023Z_I2C_ADDRESS }, + { 0xC920, CRL_REG_LEN_16BIT, 0x0FA0, AR023Z_I2C_ADDRESS }, + { 0xC922, CRL_REG_LEN_16BIT, 0x1964, AR023Z_I2C_ADDRESS }, + { 0xC924, CRL_REG_LEN_16BIT, 0x09C4, AR023Z_I2C_ADDRESS }, + { 0xC926, CRL_REG_LEN_16BIT, 0x1964, AR023Z_I2C_ADDRESS }, + { 0xC912, CRL_REG_LEN_16BIT, 0x005F, AR023Z_I2C_ADDRESS }, + { 0xC914, CRL_REG_LEN_16BIT, 0x016D, AR023Z_I2C_ADDRESS }, + { 0xC916, CRL_REG_LEN_16BIT, 0x00AF, AR023Z_I2C_ADDRESS }, + { 0xC918, CRL_REG_LEN_16BIT, 0x0148, AR023Z_I2C_ADDRESS }, + { 0xC91A, CRL_REG_LEN_16BIT, 0x0096, AR023Z_I2C_ADDRESS }, + { 0xC91C, CRL_REG_LEN_16BIT, 0x00B4, AR023Z_I2C_ADDRESS }, + { 0xC982, CRL_REG_LEN_08BIT, 0x82, AR023Z_I2C_ADDRESS }, + { 0xC983, CRL_REG_LEN_08BIT, 0x80, AR023Z_I2C_ADDRESS }, + { 0xC984, CRL_REG_LEN_08BIT, 0x86, AR023Z_I2C_ADDRESS }, + { 0xC985, CRL_REG_LEN_08BIT, 0x84, AR023Z_I2C_ADDRESS }, + { 0xC986, CRL_REG_LEN_08BIT, 0x82, AR023Z_I2C_ADDRESS }, + { 0xC987, CRL_REG_LEN_08BIT, 0x80, AR023Z_I2C_ADDRESS }, + { 0xC980, CRL_REG_LEN_16BIT, 0x1450, AR023Z_I2C_ADDRESS }, + { 0xC8DC, CRL_REG_LEN_16BIT, 0x013E, AR023Z_I2C_ADDRESS }, + { 0xC8DE, CRL_REG_LEN_16BIT, 0xFFDB, AR023Z_I2C_ADDRESS }, + { 0xC8E0, CRL_REG_LEN_16BIT, 0xFFE7, AR023Z_I2C_ADDRESS }, + { 0xC8E2, CRL_REG_LEN_16BIT, 0xFF75, AR023Z_I2C_ADDRESS }, + { 0xC8E4, CRL_REG_LEN_16BIT, 0x01B8, AR023Z_I2C_ADDRESS }, + { 0xC8E6, CRL_REG_LEN_16BIT, 0xFFD2, AR023Z_I2C_ADDRESS }, + { 0xC8E8, CRL_REG_LEN_16BIT, 0xFF52, AR023Z_I2C_ADDRESS }, + { 0xC8EA, CRL_REG_LEN_16BIT, 0xFF1A, AR023Z_I2C_ADDRESS }, + { 0xC8EC, CRL_REG_LEN_16BIT, 0x0295, AR023Z_I2C_ADDRESS }, + { 0xC8EE, CRL_REG_LEN_16BIT, 0x01B0, AR023Z_I2C_ADDRESS }, + { 0xC8F0, CRL_REG_LEN_16BIT, 0xFF40, AR023Z_I2C_ADDRESS }, + { 0xC8F2, CRL_REG_LEN_16BIT, 0x0010, AR023Z_I2C_ADDRESS }, + { 0xC8F4, CRL_REG_LEN_16BIT, 0xFF87, AR023Z_I2C_ADDRESS }, + { 0xC8F6, CRL_REG_LEN_16BIT, 0x01A2, AR023Z_I2C_ADDRESS }, + { 0xC8F8, CRL_REG_LEN_16BIT, 0xFFD7, AR023Z_I2C_ADDRESS }, + { 0xC8FA, CRL_REG_LEN_16BIT, 0xFFD3, AR023Z_I2C_ADDRESS }, + { 0xC8FC, CRL_REG_LEN_16BIT, 0xFF63, AR023Z_I2C_ADDRESS }, + { 0xC8FE, CRL_REG_LEN_16BIT, 0x01CB, AR023Z_I2C_ADDRESS }, + { 0xC900, CRL_REG_LEN_16BIT, 0x0154, AR023Z_I2C_ADDRESS }, + { 0xC902, CRL_REG_LEN_16BIT, 0xFFCD, AR023Z_I2C_ADDRESS }, + { 0xC904, CRL_REG_LEN_16BIT, 0xFFDE, AR023Z_I2C_ADDRESS }, + { 0xC906, CRL_REG_LEN_16BIT, 0xFFB1, AR023Z_I2C_ADDRESS }, + { 0xC908, CRL_REG_LEN_16BIT, 0x013B, AR023Z_I2C_ADDRESS }, + { 0xC90A, CRL_REG_LEN_16BIT, 0xFFEC, AR023Z_I2C_ADDRESS }, + { 0xC90C, CRL_REG_LEN_16BIT, 0xFFD9, AR023Z_I2C_ADDRESS }, + { 0xC90E, CRL_REG_LEN_16BIT, 0xFF9C, AR023Z_I2C_ADDRESS }, + { 0xC910, CRL_REG_LEN_16BIT, 0x018B, AR023Z_I2C_ADDRESS }, + { 0xC97D, CRL_REG_LEN_08BIT, 0x10, AR023Z_I2C_ADDRESS }, + { 0xC92A, CRL_REG_LEN_16BIT, 0x0020, AR023Z_I2C_ADDRESS }, + { 0xC92C, CRL_REG_LEN_16BIT, 0x0018, AR023Z_I2C_ADDRESS }, + { 0xC92E, CRL_REG_LEN_16BIT, 0x0080, AR023Z_I2C_ADDRESS }, + { 0xC930, CRL_REG_LEN_16BIT, 0x0080, AR023Z_I2C_ADDRESS }, + { 0xC932, CRL_REG_LEN_16BIT, 0x0005, AR023Z_I2C_ADDRESS }, + { 0xC934, CRL_REG_LEN_16BIT, 0xFFE0, AR023Z_I2C_ADDRESS }, + { 0xC936, CRL_REG_LEN_08BIT, 0x33, AR023Z_I2C_ADDRESS }, + { 0xC937, CRL_REG_LEN_08BIT, 0x26, AR023Z_I2C_ADDRESS }, + { 0xC938, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC93A, CRL_REG_LEN_16BIT, 0x0047, AR023Z_I2C_ADDRESS }, + { 0xC93C, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC93E, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC93E, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC940, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC942, CRL_REG_LEN_16BIT, 0x0022, AR023Z_I2C_ADDRESS }, + { 0xC944, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC946, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC948, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC94A, CRL_REG_LEN_16BIT, 0x0002, AR023Z_I2C_ADDRESS }, + { 0xC94C, CRL_REG_LEN_16BIT, 0x3000, AR023Z_I2C_ADDRESS }, + { 0xC94E, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC950, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC952, CRL_REG_LEN_16BIT, 0x0123, AR023Z_I2C_ADDRESS }, + { 0xC954, CRL_REG_LEN_16BIT, 0x2000, AR023Z_I2C_ADDRESS }, + { 0xC956, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC958, CRL_REG_LEN_16BIT, 0x0150, AR023Z_I2C_ADDRESS }, + { 0xC95A, CRL_REG_LEN_16BIT, 0x5300, AR023Z_I2C_ADDRESS }, + { 0xC95C, CRL_REG_LEN_16BIT, 0x1112, AR023Z_I2C_ADDRESS }, + { 0xC95E, CRL_REG_LEN_16BIT, 0x2010, AR023Z_I2C_ADDRESS }, + { 0xC960, CRL_REG_LEN_16BIT, 0x5574, AR023Z_I2C_ADDRESS }, + { 0xC962, CRL_REG_LEN_16BIT, 0x5000, AR023Z_I2C_ADDRESS }, + { 0xC964, CRL_REG_LEN_16BIT, 0x0202, AR023Z_I2C_ADDRESS }, + { 0xC966, CRL_REG_LEN_16BIT, 0x5300, AR023Z_I2C_ADDRESS }, + { 0xC968, CRL_REG_LEN_16BIT, 0x0371, AR023Z_I2C_ADDRESS }, + { 0xC96A, CRL_REG_LEN_16BIT, 0x0400, AR023Z_I2C_ADDRESS }, + { 0xC96C, CRL_REG_LEN_16BIT, 0x0002, AR023Z_I2C_ADDRESS }, + { 0xC96E, CRL_REG_LEN_16BIT, 0x2000, AR023Z_I2C_ADDRESS }, + { 0xC970, CRL_REG_LEN_16BIT, 0x0023, AR023Z_I2C_ADDRESS }, + { 0xC972, CRL_REG_LEN_16BIT, 0x0330, AR023Z_I2C_ADDRESS }, + { 0xC974, CRL_REG_LEN_16BIT, 0x0001, AR023Z_I2C_ADDRESS }, + { 0xC976, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xCC02, CRL_REG_LEN_16BIT, 0x0083, AR023Z_I2C_ADDRESS }, + { 0xC88C, CRL_REG_LEN_16BIT, 0x0080, AR023Z_I2C_ADDRESS }, + { 0xC84A, CRL_REG_LEN_16BIT, 0x0BA0, AR023Z_I2C_ADDRESS }, + { 0xC84C, CRL_REG_LEN_16BIT, 0x0FA0, AR023Z_I2C_ADDRESS }, + { 0xC84E, CRL_REG_LEN_16BIT, 0x0800, AR023Z_I2C_ADDRESS }, + { 0xCA0C, CRL_REG_LEN_16BIT, 0xF8C0, AR023Z_I2C_ADDRESS }, + { 0xC846, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xCAE8, CRL_REG_LEN_16BIT, 0x0010, AR023Z_I2C_ADDRESS }, + { 0x3210, CRL_REG_LEN_16BIT, 0x0EB0, AR023Z_I2C_ADDRESS }, + { 0xBC02, CRL_REG_LEN_16BIT, 0x03C5, AR023Z_I2C_ADDRESS }, + { 0xA802, CRL_REG_LEN_16BIT, 0x001C, AR023Z_I2C_ADDRESS }, + { 0xA812, CRL_REG_LEN_08BIT, 0x08, AR023Z_I2C_ADDRESS }, + { 0xA81C, CRL_REG_LEN_08BIT, 0x8C, AR023Z_I2C_ADDRESS }, + { 0xC8CE, CRL_REG_LEN_16BIT, 0x0035, AR023Z_I2C_ADDRESS }, + { 0xC8CA, CRL_REG_LEN_16BIT, 0x0030, AR023Z_I2C_ADDRESS }, + { 0xC8CC, CRL_REG_LEN_16BIT, 0x0180, AR023Z_I2C_ADDRESS }, + { 0xC8C6, CRL_REG_LEN_16BIT, 0x008C, AR023Z_I2C_ADDRESS }, + { 0xC8C8, CRL_REG_LEN_16BIT, 0x03FF, AR023Z_I2C_ADDRESS }, + { 0xC8BE, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xA83C, CRL_REG_LEN_16BIT, 0x03E6, AR023Z_I2C_ADDRESS }, + { 0xA83E, CRL_REG_LEN_16BIT, 0x0300, AR023Z_I2C_ADDRESS }, + { 0xA840, CRL_REG_LEN_16BIT, 0x0133, AR023Z_I2C_ADDRESS }, + { 0xC988, CRL_REG_LEN_16BIT, 0x0E17, AR023Z_I2C_ADDRESS }, + { 0x2402, CRL_REG_LEN_16BIT, 0x0008, AR023Z_I2C_ADDRESS }, + { 0xBCBE, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xBCC0, CRL_REG_LEN_16BIT, 0x00C8, AR023Z_I2C_ADDRESS }, + { 0xBCBA, CRL_REG_LEN_16BIT, 0x0010, AR023Z_I2C_ADDRESS }, + { 0xBCBC, CRL_REG_LEN_16BIT, 0x0017, AR023Z_I2C_ADDRESS }, + { 0xBCC2, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xBCC4, CRL_REG_LEN_16BIT, 0x003B, AR023Z_I2C_ADDRESS }, + { 0xC9CC, CRL_REG_LEN_16BIT, 0xFD00, AR023Z_I2C_ADDRESS }, + { 0xC9CE, CRL_REG_LEN_16BIT, 0x0100, AR023Z_I2C_ADDRESS }, + { 0xC99A, CRL_REG_LEN_16BIT, 0x0600, AR023Z_I2C_ADDRESS }, + { 0xC99C, CRL_REG_LEN_16BIT, 0x0B00, AR023Z_I2C_ADDRESS }, + { 0xC9A0, CRL_REG_LEN_16BIT, 0x00C8, AR023Z_I2C_ADDRESS }, + { 0xC9A2, CRL_REG_LEN_16BIT, 0x0B54, AR023Z_I2C_ADDRESS }, + { 0x2414, CRL_REG_LEN_16BIT, 0x0BA0, AR023Z_I2C_ADDRESS }, + { 0x2416, CRL_REG_LEN_16BIT, 0x0FA0, AR023Z_I2C_ADDRESS }, + { 0x2418, CRL_REG_LEN_16BIT, 0xC350, AR023Z_I2C_ADDRESS }, + { 0x241A, CRL_REG_LEN_16BIT, 0xFA00, AR023Z_I2C_ADDRESS }, + { 0x241C, CRL_REG_LEN_16BIT, 0x0005, AR023Z_I2C_ADDRESS }, + { 0x241E, CRL_REG_LEN_16BIT, 0x0050, AR023Z_I2C_ADDRESS }, + { 0x2420, CRL_REG_LEN_16BIT, 0x00A5, AR023Z_I2C_ADDRESS }, + { 0x2422, CRL_REG_LEN_16BIT, 0x00A5, AR023Z_I2C_ADDRESS }, + { 0x2424, CRL_REG_LEN_16BIT, 0x00A5, AR023Z_I2C_ADDRESS }, + { 0x2426, CRL_REG_LEN_16BIT, 0x0001, AR023Z_I2C_ADDRESS }, + { 0xC996, CRL_REG_LEN_16BIT, 0x03E8, AR023Z_I2C_ADDRESS }, + { 0xC998, CRL_REG_LEN_16BIT, 0x03E8, AR023Z_I2C_ADDRESS }, + { 0xC98A, CRL_REG_LEN_16BIT, 0x000F, AR023Z_I2C_ADDRESS }, + { 0xC9E6, CRL_REG_LEN_16BIT, 0x0AF0, AR023Z_I2C_ADDRESS }, + { 0xCA2A, CRL_REG_LEN_08BIT, 0x32, AR023Z_I2C_ADDRESS }, + { 0xCA2B, CRL_REG_LEN_08BIT, 0x05, AR023Z_I2C_ADDRESS }, + { 0xCA2E, CRL_REG_LEN_08BIT, 0x32, AR023Z_I2C_ADDRESS }, + { 0xCA2F, CRL_REG_LEN_08BIT, 0x0A, AR023Z_I2C_ADDRESS }, + { 0x3222, CRL_REG_LEN_16BIT, 0x0912, AR023Z_I2C_ADDRESS }, + { 0x3224, CRL_REG_LEN_16BIT, 0x0612, AR023Z_I2C_ADDRESS }, + { 0xCAB4, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xCAAE, CRL_REG_LEN_16BIT, 0x0022, AR023Z_I2C_ADDRESS }, + { 0x3414, CRL_REG_LEN_16BIT, 0x3700, AR023Z_I2C_ADDRESS }, + { 0x3408, CRL_REG_LEN_16BIT, 0x3700, AR023Z_I2C_ADDRESS }, + { 0x340C, CRL_REG_LEN_16BIT, 0x2A00, AR023Z_I2C_ADDRESS }, + { 0x3412, CRL_REG_LEN_16BIT, 0x0400, AR023Z_I2C_ADDRESS }, + { 0x3416, CRL_REG_LEN_16BIT, 0x0036, AR023Z_I2C_ADDRESS }, + { 0x341E, CRL_REG_LEN_16BIT, 0x0004, AR023Z_I2C_ADDRESS }, + { 0x3420, CRL_REG_LEN_16BIT, 0x2A3B, AR023Z_I2C_ADDRESS }, + { 0x341A, CRL_REG_LEN_16BIT, 0x0A00, AR023Z_I2C_ADDRESS }, + { 0x3400, CRL_REG_LEN_16BIT, 0x0800, AR023Z_I2C_ADDRESS }, + { 0x3402, CRL_REG_LEN_16BIT, 0x073B, AR023Z_I2C_ADDRESS }, + { 0x3406, CRL_REG_LEN_16BIT, 0x0500, AR023Z_I2C_ADDRESS }, + { 0x3404, CRL_REG_LEN_16BIT, 0x3E1E, AR023Z_I2C_ADDRESS }, + { 0x3454, CRL_REG_LEN_16BIT, 0x0004, AR023Z_I2C_ADDRESS }, + { 0x3432, CRL_REG_LEN_16BIT, 0x000B, AR023Z_I2C_ADDRESS }, + { 0x3452, CRL_REG_LEN_16BIT, 0x000B, AR023Z_I2C_ADDRESS }, + { 0x345A, CRL_REG_LEN_16BIT, 0x000B, AR023Z_I2C_ADDRESS }, + { 0x3462, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0x344A, CRL_REG_LEN_16BIT, 0x0007, AR023Z_I2C_ADDRESS }, + { 0x342E, CRL_REG_LEN_16BIT, 0x0006, AR023Z_I2C_ADDRESS }, + { 0xCA20, CRL_REG_LEN_16BIT, 0x0100, AR023Z_I2C_ADDRESS }, + { 0xCA22, CRL_REG_LEN_16BIT, 0x0800, AR023Z_I2C_ADDRESS }, + { 0xCA24, CRL_REG_LEN_16BIT, 0x0C80, AR023Z_I2C_ADDRESS }, + { 0xCA26, CRL_REG_LEN_16BIT, 0x002D, AR023Z_I2C_ADDRESS }, + { 0xCA78, CRL_REG_LEN_16BIT, 0x0030, AR023Z_I2C_ADDRESS }, + { 0xCA80, CRL_REG_LEN_16BIT, 0x0056, AR023Z_I2C_ADDRESS }, + { 0xCA88, CRL_REG_LEN_16BIT, 0x0100, AR023Z_I2C_ADDRESS }, + { 0xCA90, CRL_REG_LEN_16BIT, 0x0200, AR023Z_I2C_ADDRESS }, + { 0xCA7A, CRL_REG_LEN_16BIT, 0x002D, AR023Z_I2C_ADDRESS }, + { 0xCA7C, CRL_REG_LEN_16BIT, 0x007D, AR023Z_I2C_ADDRESS }, + { 0xCA82, CRL_REG_LEN_16BIT, 0x0050, AR023Z_I2C_ADDRESS }, + { 0xCA84, CRL_REG_LEN_16BIT, 0x007D, AR023Z_I2C_ADDRESS }, + { 0xCA8A, CRL_REG_LEN_16BIT, 0x00B8, AR023Z_I2C_ADDRESS }, + { 0xCA8C, CRL_REG_LEN_16BIT, 0x007D, AR023Z_I2C_ADDRESS }, + { 0xCA92, CRL_REG_LEN_16BIT, 0x0173, AR023Z_I2C_ADDRESS }, + { 0xCA94, CRL_REG_LEN_16BIT, 0x007D, AR023Z_I2C_ADDRESS }, + { 0xCB20, CRL_REG_LEN_16BIT, 0x002D, AR023Z_I2C_ADDRESS }, + { 0xCB22, CRL_REG_LEN_16BIT, 0x007D, AR023Z_I2C_ADDRESS }, + { 0xCB24, CRL_REG_LEN_16BIT, 0x0050, AR023Z_I2C_ADDRESS }, + { 0xCB26, CRL_REG_LEN_16BIT, 0x007D, AR023Z_I2C_ADDRESS }, + { 0xCB28, CRL_REG_LEN_16BIT, 0x00B8, AR023Z_I2C_ADDRESS }, + { 0xCB2A, CRL_REG_LEN_16BIT, 0x007D, AR023Z_I2C_ADDRESS }, + { 0xCB2C, CRL_REG_LEN_16BIT, 0x0180, AR023Z_I2C_ADDRESS }, + { 0xCB2E, CRL_REG_LEN_16BIT, 0x007D, AR023Z_I2C_ADDRESS }, + { 0xCB40, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xCB42, CRL_REG_LEN_16BIT, 0x07D0, AR023Z_I2C_ADDRESS }, + { 0xCB44, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xCB46, CRL_REG_LEN_16BIT, 0x0DAC, AR023Z_I2C_ADDRESS }, + { 0xCA70, CRL_REG_LEN_16BIT, 0x0003, AR023Z_I2C_ADDRESS }, + { 0xCA72, CRL_REG_LEN_16BIT, 0x0003, AR023Z_I2C_ADDRESS }, + { 0xCA74, CRL_REG_LEN_16BIT, 0x03E8, AR023Z_I2C_ADDRESS }, + { 0xCA76, CRL_REG_LEN_16BIT, 0x0D00, AR023Z_I2C_ADDRESS }, + { 0xCA42, CRL_REG_LEN_08BIT, 0x02, AR023Z_I2C_ADDRESS }, + { 0xCA43, CRL_REG_LEN_08BIT, 0x16, AR023Z_I2C_ADDRESS }, + { 0xCA48, CRL_REG_LEN_08BIT, 0x02, AR023Z_I2C_ADDRESS }, + { 0xCA49, CRL_REG_LEN_08BIT, 0x16, AR023Z_I2C_ADDRESS }, + { 0xCA4E, CRL_REG_LEN_08BIT, 0x14, AR023Z_I2C_ADDRESS }, + { 0xCA4F, CRL_REG_LEN_08BIT, 0x04, AR023Z_I2C_ADDRESS }, + { 0xCA5E, CRL_REG_LEN_08BIT, 0x01, AR023Z_I2C_ADDRESS }, + { 0xCA5F, CRL_REG_LEN_08BIT, 0x16, AR023Z_I2C_ADDRESS }, + { 0xCA64, CRL_REG_LEN_08BIT, 0x01, AR023Z_I2C_ADDRESS }, + { 0xCA65, CRL_REG_LEN_08BIT, 0x16, AR023Z_I2C_ADDRESS }, + { 0xCA6A, CRL_REG_LEN_08BIT, 0x1E, AR023Z_I2C_ADDRESS }, + { 0xCA6B, CRL_REG_LEN_08BIT, 0x05, AR023Z_I2C_ADDRESS }, + { 0xBC0A, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xBC0C, CRL_REG_LEN_16BIT, 0x000A, AR023Z_I2C_ADDRESS }, + { 0xBC0E, CRL_REG_LEN_16BIT, 0x000B, AR023Z_I2C_ADDRESS }, + { 0xBC10, CRL_REG_LEN_16BIT, 0x001A, AR023Z_I2C_ADDRESS }, + { 0xBC12, CRL_REG_LEN_16BIT, 0x0027, AR023Z_I2C_ADDRESS }, + { 0xBC14, CRL_REG_LEN_16BIT, 0x0032, AR023Z_I2C_ADDRESS }, + { 0xBC16, CRL_REG_LEN_16BIT, 0x003D, AR023Z_I2C_ADDRESS }, + { 0xBC18, CRL_REG_LEN_16BIT, 0x0046, AR023Z_I2C_ADDRESS }, + { 0xBC1A, CRL_REG_LEN_16BIT, 0x004F, AR023Z_I2C_ADDRESS }, + { 0xBC1C, CRL_REG_LEN_16BIT, 0x005F, AR023Z_I2C_ADDRESS }, + { 0xBC1E, CRL_REG_LEN_16BIT, 0x006D, AR023Z_I2C_ADDRESS }, + { 0xBC20, CRL_REG_LEN_16BIT, 0x007A, AR023Z_I2C_ADDRESS }, + { 0xBC22, CRL_REG_LEN_16BIT, 0x0087, AR023Z_I2C_ADDRESS }, + { 0xBC24, CRL_REG_LEN_16BIT, 0x009D, AR023Z_I2C_ADDRESS }, + { 0xBC26, CRL_REG_LEN_16BIT, 0x00B1, AR023Z_I2C_ADDRESS }, + { 0xBC28, CRL_REG_LEN_16BIT, 0x00C4, AR023Z_I2C_ADDRESS }, + { 0xBC2A, CRL_REG_LEN_16BIT, 0x00D6, AR023Z_I2C_ADDRESS }, + { 0xBC2C, CRL_REG_LEN_16BIT, 0x00F5, AR023Z_I2C_ADDRESS }, + { 0xBC2E, CRL_REG_LEN_16BIT, 0x0112, AR023Z_I2C_ADDRESS }, + { 0xBC30, CRL_REG_LEN_16BIT, 0x012D, AR023Z_I2C_ADDRESS }, + { 0xBC32, CRL_REG_LEN_16BIT, 0x0145, AR023Z_I2C_ADDRESS }, + { 0xBC34, CRL_REG_LEN_16BIT, 0x0172, AR023Z_I2C_ADDRESS }, + { 0xBC36, CRL_REG_LEN_16BIT, 0x019B, AR023Z_I2C_ADDRESS }, + { 0xBC38, CRL_REG_LEN_16BIT, 0x01C1, AR023Z_I2C_ADDRESS }, + { 0xBC3A, CRL_REG_LEN_16BIT, 0x01E3, AR023Z_I2C_ADDRESS }, + { 0xBC3C, CRL_REG_LEN_16BIT, 0x0223, AR023Z_I2C_ADDRESS }, + { 0xBC3E, CRL_REG_LEN_16BIT, 0x025D, AR023Z_I2C_ADDRESS }, + { 0xBC40, CRL_REG_LEN_16BIT, 0x0292, AR023Z_I2C_ADDRESS }, + { 0xBC42, CRL_REG_LEN_16BIT, 0x02C3, AR023Z_I2C_ADDRESS }, + { 0xBC44, CRL_REG_LEN_16BIT, 0x031D, AR023Z_I2C_ADDRESS }, + { 0xBC46, CRL_REG_LEN_16BIT, 0x036F, AR023Z_I2C_ADDRESS }, + { 0xBC48, CRL_REG_LEN_16BIT, 0x03B9, AR023Z_I2C_ADDRESS }, + { 0xBC4A, CRL_REG_LEN_16BIT, 0x03FF, AR023Z_I2C_ADDRESS }, + { 0xBC4C, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xBC4E, CRL_REG_LEN_16BIT, 0x0002, AR023Z_I2C_ADDRESS }, + { 0xBC50, CRL_REG_LEN_16BIT, 0x0004, AR023Z_I2C_ADDRESS }, + { 0xBC52, CRL_REG_LEN_16BIT, 0x0007, AR023Z_I2C_ADDRESS }, + { 0xBC54, CRL_REG_LEN_16BIT, 0x0009, AR023Z_I2C_ADDRESS }, + { 0xBC56, CRL_REG_LEN_16BIT, 0x000B, AR023Z_I2C_ADDRESS }, + { 0xBC58, CRL_REG_LEN_16BIT, 0x000D, AR023Z_I2C_ADDRESS }, + { 0xBC5A, CRL_REG_LEN_16BIT, 0x000F, AR023Z_I2C_ADDRESS }, + { 0xBC5C, CRL_REG_LEN_16BIT, 0x0011, AR023Z_I2C_ADDRESS }, + { 0xBC5E, CRL_REG_LEN_16BIT, 0x0016, AR023Z_I2C_ADDRESS }, + { 0xBC60, CRL_REG_LEN_16BIT, 0x001A, AR023Z_I2C_ADDRESS }, + { 0xBC62, CRL_REG_LEN_16BIT, 0x001F, AR023Z_I2C_ADDRESS }, + { 0xBC64, CRL_REG_LEN_16BIT, 0x0023, AR023Z_I2C_ADDRESS }, + { 0xBC66, CRL_REG_LEN_16BIT, 0x002C, AR023Z_I2C_ADDRESS }, + { 0xBC68, CRL_REG_LEN_16BIT, 0x0034, AR023Z_I2C_ADDRESS }, + { 0xBC6A, CRL_REG_LEN_16BIT, 0x003D, AR023Z_I2C_ADDRESS }, + { 0xBC6C, CRL_REG_LEN_16BIT, 0x0046, AR023Z_I2C_ADDRESS }, + { 0xBC6E, CRL_REG_LEN_16BIT, 0x0057, AR023Z_I2C_ADDRESS }, + { 0xBC70, CRL_REG_LEN_16BIT, 0x0069, AR023Z_I2C_ADDRESS }, + { 0xBC72, CRL_REG_LEN_16BIT, 0x007A, AR023Z_I2C_ADDRESS }, + { 0xBC74, CRL_REG_LEN_16BIT, 0x008C, AR023Z_I2C_ADDRESS }, + { 0xBC76, CRL_REG_LEN_16BIT, 0x00AF, AR023Z_I2C_ADDRESS }, + { 0xBC78, CRL_REG_LEN_16BIT, 0x00D2, AR023Z_I2C_ADDRESS }, + { 0xBC7A, CRL_REG_LEN_16BIT, 0x00F5, AR023Z_I2C_ADDRESS }, + { 0xBC7C, CRL_REG_LEN_16BIT, 0x0118, AR023Z_I2C_ADDRESS }, + { 0xBC7E, CRL_REG_LEN_16BIT, 0x015E, AR023Z_I2C_ADDRESS }, + { 0xBC80, CRL_REG_LEN_16BIT, 0x01A4, AR023Z_I2C_ADDRESS }, + { 0xBC82, CRL_REG_LEN_16BIT, 0x01EA, AR023Z_I2C_ADDRESS }, + { 0xBC84, CRL_REG_LEN_16BIT, 0x022F, AR023Z_I2C_ADDRESS }, + { 0xBC86, CRL_REG_LEN_16BIT, 0x02B4, AR023Z_I2C_ADDRESS }, + { 0xBC88, CRL_REG_LEN_16BIT, 0x032B, AR023Z_I2C_ADDRESS }, + { 0xBC8A, CRL_REG_LEN_16BIT, 0x0399, AR023Z_I2C_ADDRESS }, + { 0xBC8C, CRL_REG_LEN_16BIT, 0x03FF, AR023Z_I2C_ADDRESS }, + { 0xCA30, CRL_REG_LEN_16BIT, 0x0B00, AR023Z_I2C_ADDRESS }, + { 0xCA32, CRL_REG_LEN_16BIT, 0x0100, AR023Z_I2C_ADDRESS }, + { 0xCA08, CRL_REG_LEN_16BIT, 0x0001, AR023Z_I2C_ADDRESS }, + { 0xC9C0, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC9C2, CRL_REG_LEN_16BIT, 0x0800, AR023Z_I2C_ADDRESS }, + { 0xC9C8, CRL_REG_LEN_16BIT, 0x0080, AR023Z_I2C_ADDRESS }, + { 0xC9CA, CRL_REG_LEN_16BIT, 0x0800, AR023Z_I2C_ADDRESS }, + { 0xC9BC, CRL_REG_LEN_16BIT, 0x0028, AR023Z_I2C_ADDRESS }, + { 0xC9BE, CRL_REG_LEN_16BIT, 0x0023, AR023Z_I2C_ADDRESS }, + { 0xC9C4, CRL_REG_LEN_16BIT, 0x0046, AR023Z_I2C_ADDRESS }, + { 0xC9C6, CRL_REG_LEN_16BIT, 0x0046, AR023Z_I2C_ADDRESS }, + { 0xC9A4, CRL_REG_LEN_16BIT, 0x0002, AR023Z_I2C_ADDRESS }, + { 0xC9A6, CRL_REG_LEN_16BIT, 0x001E, AR023Z_I2C_ADDRESS }, + { 0xCA2C, CRL_REG_LEN_08BIT, 0x01, AR023Z_I2C_ADDRESS }, + { 0xCA2D, CRL_REG_LEN_08BIT, 0x03, AR023Z_I2C_ADDRESS }, + { 0xCA9C, CRL_REG_LEN_16BIT, 0x0700, AR023Z_I2C_ADDRESS }, + { 0xCAA8, CRL_REG_LEN_16BIT, 0x0100, AR023Z_I2C_ADDRESS }, + { 0xCAA4, CRL_REG_LEN_16BIT, 0x01C0, AR023Z_I2C_ADDRESS }, + { 0xCAB0, CRL_REG_LEN_16BIT, 0x00B3, AR023Z_I2C_ADDRESS }, + { 0xCA28, CRL_REG_LEN_08BIT, 0x5A, AR023Z_I2C_ADDRESS }, + { 0xA82C, CRL_REG_LEN_16BIT, 0x0880, AR023Z_I2C_ADDRESS }, + { 0xA82E, CRL_REG_LEN_16BIT, 0x095A, AR023Z_I2C_ADDRESS }, + { 0xA830, CRL_REG_LEN_16BIT, 0x0980, AR023Z_I2C_ADDRESS }, + { 0xA832, CRL_REG_LEN_16BIT, 0x0980, AR023Z_I2C_ADDRESS }, + { 0xA834, CRL_REG_LEN_16BIT, 0x0980, AR023Z_I2C_ADDRESS }, + { 0xA836, CRL_REG_LEN_16BIT, 0x0980, AR023Z_I2C_ADDRESS }, + { 0xA838, CRL_REG_LEN_16BIT, 0x0980, AR023Z_I2C_ADDRESS }, + { 0xA83A, CRL_REG_LEN_16BIT, 0x0980, AR023Z_I2C_ADDRESS }, + { 0xC88C, CRL_REG_LEN_16BIT, 0x0080, AR023Z_I2C_ADDRESS }, + { 0xB00C, CRL_REG_LEN_08BIT, 0x00, AR023Z_I2C_ADDRESS }, + { 0xC8BE, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xB00D, CRL_REG_LEN_08BIT, 0x1E, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D00, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D01, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3028, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0200, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x2000, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D02, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x0001, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0101, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x0101, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8102, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x2800, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8100, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, +}; + +struct crl_sensor_detect_config ar023z_sensor_detect_regset[] = { + { + .reg = { 0x0000, CRL_REG_LEN_16BIT, 0xFFFF, TC358778_I2C_ADDRESS }, + .width = 15, + }, + { + .reg = { 0x0000, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + .width = 16, + }, +}; + +struct crl_pll_configuration ar023z_pll_configurations[] = { + { + .input_clk = 27000000, + .op_sys_clk = 317250000, + .bitsperpixel = 16, + .pixel_rate_csi = 79312500, + .pixel_rate_pa = 79312500, + .csi_lanes = 2, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, +}; + +struct crl_sensor_subdev_config ar023z_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "ar023z binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "ar023z pixel array", + }, +}; + +struct crl_register_write_rep ar023z_poweroff_regset[] = { + {0xFC00, CRL_REG_LEN_16BIT, 0x5000, AR023Z_I2C_ADDRESS}, + {0x0040, CRL_REG_LEN_16BIT, 0x8100, AR023Z_I2C_ADDRESS}, + {0x0002, CRL_REG_LEN_16BIT, 0x0001, TC358778_I2C_ADDRESS}, +}; + +struct crl_register_write_rep ar023z_streamon_regs[] = { + /* Turn on D-Phy clock and enable MIPI lanes 2 and 3 */ + {0x0140, CRL_REG_LEN_32BIT, 0x00000000, TC358778_I2C_ADDRESS}, /* CLK On */ + {0x0144, CRL_REG_LEN_32BIT, 0x00000000, TC358778_I2C_ADDRESS}, /* lane 0 */ + {0x0148, CRL_REG_LEN_32BIT, 0x00000000, TC358778_I2C_ADDRESS}, /* lane 1 */ + {0x014C, CRL_REG_LEN_32BIT, 0x00010000, TC358778_I2C_ADDRESS}, /* lane 2 */ + {0x0150, CRL_REG_LEN_32BIT, 0x00010000, TC358778_I2C_ADDRESS}, /* lane 3 */ +}; + +struct crl_register_write_rep ar023z_streamoff_regs[] = { + {0x0140, CRL_REG_LEN_32BIT, 0x00010000, TC358778_I2C_ADDRESS}, /* CLK Off */ + {0x0144, CRL_REG_LEN_32BIT, 0x00010000, TC358778_I2C_ADDRESS}, /* lane 0 */ + {0x0148, CRL_REG_LEN_32BIT, 0x00010000, TC358778_I2C_ADDRESS}, /* lane 1 */ + {0x014C, CRL_REG_LEN_32BIT, 0x00010000, TC358778_I2C_ADDRESS}, /* lane 2 */ + {0x0150, CRL_REG_LEN_32BIT, 0x00010000, TC358778_I2C_ADDRESS}, /* lane 3 */ +}; + +struct crl_subdev_rect_rep ar023z_1920_1080_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + } +}; + +struct crl_mode_rep ar023z_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(ar023z_1920_1080_rects), + .sd_rects = ar023z_1920_1080_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1080, + .min_llp = 2350, + .min_fll = 1320, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ar023z_1920_1080), + .mode_regs = ar023z_1920_1080, + }, +}; + +struct crl_csi_data_fmt ar023z_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_UYVY8_1X16, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + .bits_per_pixel = 16, + .regs_items = 0, + .regs = NULL, + }, +}; + +struct crl_sensor_limits ar023z_mipi_bridge_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1920, + .y_addr_max = 1080, + .min_frame_length_lines = 320, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 380, + .max_line_length_pixels = 32752, +}; + +/* Power items, they are enabled in the order they are listed here */ +struct crl_power_seq_entity ar023z_power_items[] = { + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 27000000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + }, +}; + +struct crl_v4l2_ctrl ar023z_v4l2_ctrls[] ={ + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +struct crl_sensor_configuration ar023z_crl_configuration = { + .power_items = ARRAY_SIZE(ar023z_power_items), + .power_entities = ar023z_power_items, + + .poweroff_regs_items = ARRAY_SIZE(ar023z_poweroff_regset), + .poweroff_regs = ar023z_poweroff_regset, + + .id_reg_items = ARRAY_SIZE(ar023z_sensor_detect_regset), + .id_regs = ar023z_sensor_detect_regset, + + .onetime_init_regs_items = 0, + .onetime_init_regs = NULL, + + .subdev_items = ARRAY_SIZE(ar023z_sensor_subdevs), + .subdevs = ar023z_sensor_subdevs, + + .sensor_limits = &ar023z_mipi_bridge_limits, + + .pll_config_items = ARRAY_SIZE(ar023z_pll_configurations), + .pll_configs = ar023z_pll_configurations, + + .modes_items = ARRAY_SIZE(ar023z_modes), + .modes = ar023z_modes, + + .streamon_regs_items = ARRAY_SIZE(ar023z_streamon_regs), + .streamon_regs = ar023z_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(ar023z_streamoff_regs), + .streamoff_regs = ar023z_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(ar023z_v4l2_ctrls), + .v4l2_ctrl_bank = ar023z_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(ar023z_crl_csi_data_fmt), + .csi_fmts = ar023z_crl_csi_data_fmt, + + .flip_items = 0, + .flip_data = NULL, + + .frame_desc_entries = 0, + .frame_desc_type = 0, + .frame_desc = 0, +}; + +#endif /* __CRLMODULE_AR023Z_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_imx132_configuration.h b/drivers/media/i2c/crlmodule/crl_imx132_configuration.h new file mode 100644 index 0000000000000..128ccb50e4f8b --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_imx132_configuration.h @@ -0,0 +1,699 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2015 - 2018 Intel Corporation */ + +#ifndef __CRLMODULE_IMX132_CONFIGURATION_H_ +#define __CRLMODULE_IMX132_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +struct crl_register_write_rep imx132_powerup_regset[] = { + { 0x3087, CRL_REG_LEN_08BIT, 0x53 }, + { 0x308B, CRL_REG_LEN_08BIT, 0x5A }, + { 0x3094, CRL_REG_LEN_08BIT, 0x11 }, + { 0x309D, CRL_REG_LEN_08BIT, 0xA4 }, + { 0x30AA, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30C6, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30C7, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3118, CRL_REG_LEN_08BIT, 0x2F }, + { 0x312A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x312B, CRL_REG_LEN_08BIT, 0x0B }, + { 0x312C, CRL_REG_LEN_08BIT, 0x0B }, + { 0x312D, CRL_REG_LEN_08BIT, 0x13 }, + { 0x303D, CRL_REG_LEN_08BIT, 0x10 }, + { 0x303E, CRL_REG_LEN_08BIT, 0x5A }, + { 0x3040, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3041, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3048, CRL_REG_LEN_08BIT, 0x00 }, + { 0x304C, CRL_REG_LEN_08BIT, 0x2F }, + { 0x304D, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3064, CRL_REG_LEN_08BIT, 0x92 }, + { 0x306A, CRL_REG_LEN_08BIT, 0x10 }, + { 0x309B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x309E, CRL_REG_LEN_08BIT, 0x41 }, + { 0x30A0, CRL_REG_LEN_08BIT, 0x10 }, + { 0x30A1, CRL_REG_LEN_08BIT, 0x0B }, + { 0x30B2, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30D5, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30D6, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30D7, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30D8, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30D9, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30DA, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30DB, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30DC, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30DD, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30DE, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3102, CRL_REG_LEN_08BIT, 0x0C }, + { 0x3103, CRL_REG_LEN_08BIT, 0x33 }, + { 0x3104, CRL_REG_LEN_08BIT, 0x18 }, + { 0x3105, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3106, CRL_REG_LEN_08BIT, 0x65 }, + { 0x3107, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3108, CRL_REG_LEN_08BIT, 0x06 }, + { 0x3109, CRL_REG_LEN_08BIT, 0x04 }, + { 0x310A, CRL_REG_LEN_08BIT, 0x04 }, + { 0x315C, CRL_REG_LEN_08BIT, 0x3D }, + { 0x315D, CRL_REG_LEN_08BIT, 0x3C }, + { 0x316E, CRL_REG_LEN_08BIT, 0x3E }, + { 0x316F, CRL_REG_LEN_08BIT, 0x3D }, + { 0x020e, CRL_REG_LEN_16BIT, 0x0100 }, + { 0x0210, CRL_REG_LEN_16BIT, 0x01a0 }, + { 0x0212, CRL_REG_LEN_16BIT, 0x0200 }, + { 0x0214, CRL_REG_LEN_16BIT, 0x0100 }, + { 0x0204, CRL_REG_LEN_16BIT, 0x0000 }, + { 0x0202, CRL_REG_LEN_16BIT, 0x0000 }, + { 0x0600, CRL_REG_LEN_16BIT, 0x0000 }, + { 0x0602, CRL_REG_LEN_16BIT, 0x03ff }, + { 0x0604, CRL_REG_LEN_16BIT, 0x03ff }, + { 0x0606, CRL_REG_LEN_16BIT, 0x03ff }, + { 0x0608, CRL_REG_LEN_16BIT, 0x03ff }, + { 0x0100, CRL_REG_LEN_08BIT, 0x00 }, +}; + +/* + .input_clk = 24000000, + .op_sys_clk = 405000000, + .bitsperpixel = 10, + .pixel_rate_csi = 810000000, + .pixel_rate_pa = 768000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx132_pll_384), + .pll_regs = imx132_pll_384, +*/ +struct crl_register_write_rep imx132_pll_405[] = { + /* PLL setting */ + { 0x0305, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0307, CRL_REG_LEN_08BIT, 0x87 }, + { 0x30A4, CRL_REG_LEN_08BIT, 0x01 }, + { 0x303C, CRL_REG_LEN_08BIT, 0x4B }, + /* Global timing */ + { 0x3304, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3305, CRL_REG_LEN_08BIT, 0x06 }, + { 0x3306, CRL_REG_LEN_08BIT, 0x19 }, + { 0x3307, CRL_REG_LEN_08BIT, 0x03 }, + { 0x3308, CRL_REG_LEN_08BIT, 0x0F }, + { 0x3309, CRL_REG_LEN_08BIT, 0x07 }, + { 0x330A, CRL_REG_LEN_08BIT, 0x0C }, + { 0x330B, CRL_REG_LEN_08BIT, 0x06 }, + { 0x330C, CRL_REG_LEN_08BIT, 0x0B }, + { 0x330D, CRL_REG_LEN_08BIT, 0x07 }, + { 0x330E, CRL_REG_LEN_08BIT, 0x03 }, + { 0x3318, CRL_REG_LEN_08BIT, 0x62 }, + { 0x3322, CRL_REG_LEN_08BIT, 0x09 }, + { 0x3342, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3348, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3301, CRL_REG_LEN_08BIT, 0x00 }, /* Lanes = 2*/ +}; + +struct crl_register_write_rep imx132_pll_312[] = { + /* PLL setting */ + { 0x0305, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0307, CRL_REG_LEN_08BIT, 0x0D }, + { 0x30A4, CRL_REG_LEN_08BIT, 0x02 }, + { 0x303C, CRL_REG_LEN_08BIT, 0x4B }, + /* Global timing */ + { 0x3304, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3305, CRL_REG_LEN_08BIT, 0x06 }, + { 0x3306, CRL_REG_LEN_08BIT, 0x19 }, + { 0x3307, CRL_REG_LEN_08BIT, 0x03 }, + { 0x3308, CRL_REG_LEN_08BIT, 0x0F }, + { 0x3309, CRL_REG_LEN_08BIT, 0x07 }, + { 0x330A, CRL_REG_LEN_08BIT, 0x0C }, + { 0x330B, CRL_REG_LEN_08BIT, 0x06 }, + { 0x330C, CRL_REG_LEN_08BIT, 0x0B }, + { 0x330D, CRL_REG_LEN_08BIT, 0x07 }, + { 0x330E, CRL_REG_LEN_08BIT, 0x03 }, + { 0x3318, CRL_REG_LEN_08BIT, 0x62 }, + { 0x3322, CRL_REG_LEN_08BIT, 0x09 }, + { 0x3342, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3348, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3301, CRL_REG_LEN_08BIT, 0x01 }, /* Lanes = 1*/ +}; + +struct crl_register_write_rep imx132_mode_1080P[] = { + {0x0344, CRL_REG_LEN_08BIT, 0x00}, + {0x0345, CRL_REG_LEN_08BIT, 0x14}, + {0x0346, CRL_REG_LEN_08BIT, 0x00}, + {0x0347, CRL_REG_LEN_08BIT, 0x32}, + {0x0348, CRL_REG_LEN_08BIT, 0x07}, + {0x0349, CRL_REG_LEN_08BIT, 0xA3}, + {0x034A, CRL_REG_LEN_08BIT, 0x04}, + {0x034B, CRL_REG_LEN_08BIT, 0x79}, + {0x034C, CRL_REG_LEN_08BIT, 0x07}, + {0x034D, CRL_REG_LEN_08BIT, 0x90}, + {0x034E, CRL_REG_LEN_08BIT, 0x04}, + {0x034F, CRL_REG_LEN_08BIT, 0x48}, + {0x0381, CRL_REG_LEN_08BIT, 0x01}, + {0x0383, CRL_REG_LEN_08BIT, 0x01}, + {0x0385, CRL_REG_LEN_08BIT, 0x01}, + {0x0387, CRL_REG_LEN_08BIT, 0x01}, +}; + +struct crl_register_write_rep imx132_mode_1636x1096[] = { + {0x0344, CRL_REG_LEN_08BIT, 0x00}, + {0x0345, CRL_REG_LEN_08BIT, 0xAA}, + {0x0346, CRL_REG_LEN_08BIT, 0x00}, + {0x0347, CRL_REG_LEN_08BIT, 0x32}, + {0x0348, CRL_REG_LEN_08BIT, 0x07}, + {0x0349, CRL_REG_LEN_08BIT, 0x0D}, + {0x034A, CRL_REG_LEN_08BIT, 0x04}, + {0x034B, CRL_REG_LEN_08BIT, 0x79}, + {0x034C, CRL_REG_LEN_08BIT, 0x06}, + {0x034D, CRL_REG_LEN_08BIT, 0x64}, + {0x034E, CRL_REG_LEN_08BIT, 0x04}, + {0x034F, CRL_REG_LEN_08BIT, 0x48}, + {0x0381, CRL_REG_LEN_08BIT, 0x01}, + {0x0383, CRL_REG_LEN_08BIT, 0x01}, + {0x0385, CRL_REG_LEN_08BIT, 0x01}, + {0x0387, CRL_REG_LEN_08BIT, 0x01}, +}; + +struct crl_register_write_rep imx132_fll_regs[] = { + { 0x0340, CRL_REG_LEN_16BIT, 0x045c }, /* LLP and FLL */ +}; + +struct crl_register_write_rep imx132_llp_regs[] = { + { 0x0342, CRL_REG_LEN_16BIT, 0x08fc }, /* LLP and FLL */ +}; + +struct crl_register_write_rep imx132_streamon_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x01 } +}; + +struct crl_register_write_rep imx132_streamoff_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x00 } +}; + +struct crl_register_write_rep imx132_data_fmt_width10[] = { + { 0x0112, CRL_REG_LEN_16BIT, 0x0a0a } +}; + +struct crl_register_write_rep imx132_data_fmt_width8[] = { + { 0x0112, CRL_REG_LEN_16BIT, 0x0808 } +}; + +struct crl_subdev_rect_rep imx132_1080P_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1976, + .in_rect.height = 1200, + .out_rect.left = 20, + .out_rect.top = 50, + .out_rect.width = 1936, + .out_rect.height = 1096, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1936, + .in_rect.height = 1096, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1936, + .out_rect.height = 1096, + }, +}; + +struct crl_subdev_rect_rep imx132_1636x1096_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1976, + .in_rect.height = 1200, + .out_rect.left = 170, + .out_rect.top = 50, + .out_rect.width = 1636, + .out_rect.height = 1096, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1636, + .in_rect.height = 1096, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1636, + .out_rect.height = 1096, + }, +}; + +struct crl_mode_rep imx132_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(imx132_1636x1096_rects), + .sd_rects = imx132_1636x1096_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1636, + .height = 1096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx132_mode_1636x1096), + .mode_regs = imx132_mode_1636x1096, + }, + { + .sd_rects_items = ARRAY_SIZE(imx132_1080P_rects), + .sd_rects = imx132_1080P_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1936, + .height = 1096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx132_mode_1080P), + .mode_regs = imx132_mode_1080P, + }, +}; + +struct crl_register_write_rep imx132_poweroff_regset[] = { + { 0x0103, CRL_REG_LEN_08BIT, 0x01 }, +}; + +struct crl_sensor_detect_config imx132_sensor_detect_regset[] = { + { + .reg = { 0x0003, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 5, + }, + { + .reg = { 0x0000, CRL_REG_LEN_16BIT, 0x0000ffff }, + .width = 7, + }, +}; + +struct crl_sensor_subdev_config imx132_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "imx132 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "imx132 pixel array", + }, +}; + +struct crl_pll_configuration imx132_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 312000000, + .bitsperpixel = 8, + .pixel_rate_csi = 624000000, + .pixel_rate_pa = 624000000, + .csi_lanes = 1, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx132_pll_312), + .pll_regs = imx132_pll_312, + }, + { + .input_clk = 24000000, + .op_sys_clk = 405000000, + .bitsperpixel = 10, + .pixel_rate_csi = 810000000, + .pixel_rate_pa = 810000000, + .csi_lanes = 2, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx132_pll_405), + .pll_regs = imx132_pll_405, + }, +}; + +struct crl_sensor_limits imx132_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1976, + .y_addr_max = 1200, + .min_frame_length_lines = 202, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 560, + .max_line_length_pixels = 65520, +}; + +struct crl_flip_data imx132_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + }, +}; + +struct crl_csi_data_fmt imx132_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = 1, + .regs = imx132_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = imx132_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = imx132_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = imx132_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SGRBG8_1X8, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx132_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SRGGB8_1X8, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx132_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SBGGR8_1X8, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx132_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SGBRG8_1X8, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx132_data_fmt_width8, + }, +}; + +struct crl_dynamic_register_access imx132_flip_regs[] = { + { + .address = 0x0101, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = 0, + .ops = 0, + }, +}; + + +struct crl_dynamic_register_access imx132_ana_gain_global_regs[] = { + { + .address = 0x0204, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + }, +}; + +struct crl_dynamic_register_access imx132_exposure_regs[] = { + { + .address = 0x0202, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + } +}; + +struct crl_dynamic_register_access imx132_vblank_regs[] = { + { + .address = 0x0340, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + }, +}; + +struct crl_dynamic_register_access imx132_hblank_regs[] = { + { + .address = 0x0342, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + }, +}; + +static struct crl_dynamic_register_access imx132_test_pattern_regs[] = { + { + .address = 0x0600, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + }, +}; + +static const char * const imx132_test_patterns[] = { + "Disabled", + "Solid Colour", + "Eight Vertical Colour Bars", + "Fade to Gray", + "PN9", +}; + +struct crl_v4l2_ctrl imx132_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 220, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(imx132_ana_gain_global_regs), + .regs = imx132_ana_gain_global_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "V4L2_CID_EXPOSURE", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(imx132_exposure_regs), + .regs = imx132_exposure_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(imx132_flip_regs), + .regs = imx132_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_VFLIP, + .name = "V4L2_CID_VFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(imx132_flip_regs), + .regs = imx132_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VBLANK, + .name = "V4L2_CID_VBLANK", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = -65535, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(imx132_vblank_regs), + .regs = imx132_vblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_HBLANK, + .name = "V4L2_CID_HBLANK", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 65520, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(imx132_hblank_regs), + .regs = imx132_hblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_TEST_PATTERN, + .name = "V4L2_CID_TEST_PATTERN", + .type = CRL_V4L2_CTRL_TYPE_MENU_ITEMS, + .data.v4l2_menu_items.menu = imx132_test_patterns, + .data.v4l2_menu_items.size = ARRAY_SIZE(imx132_test_patterns), + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx132_test_pattern_regs), + .regs = imx132_test_pattern_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +struct crl_sensor_configuration imx132_crl_configuration = { + + .powerup_regs_items = ARRAY_SIZE(imx132_powerup_regset), + .powerup_regs = imx132_powerup_regset, + + .poweroff_regs_items = ARRAY_SIZE(imx132_poweroff_regset), + .poweroff_regs = imx132_poweroff_regset, + + .id_reg_items = ARRAY_SIZE(imx132_sensor_detect_regset), + .id_regs = imx132_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(imx132_sensor_subdevs), + .subdevs = imx132_sensor_subdevs, + + .sensor_limits = &imx132_sensor_limits, + + .pll_config_items = ARRAY_SIZE(imx132_pll_configurations), + .pll_configs = imx132_pll_configurations, + + .modes_items = ARRAY_SIZE(imx132_modes), + .modes = imx132_modes, + + .streamon_regs_items = ARRAY_SIZE(imx132_streamon_regs), + .streamon_regs = imx132_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(imx132_streamoff_regs), + .streamoff_regs = imx132_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(imx132_v4l2_ctrls), + .v4l2_ctrl_bank = imx132_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(imx132_crl_csi_data_fmt), + .csi_fmts = imx132_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(imx132_flip_configurations), + .flip_data = imx132_flip_configurations, +}; + +#endif /* __CRLMODULE_IMX132_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_imx135_configuration.h b/drivers/media/i2c/crlmodule/crl_imx135_configuration.h new file mode 100644 index 0000000000000..26a38b14864aa --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_imx135_configuration.h @@ -0,0 +1,779 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2015 - 2018 Intel Corporation */ + +#ifndef __CRLMODULE_IMX135_CONFIGURATION_H_ +#define __CRLMODULE_IMX135_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +/* MIPI 451.2MHz 902.4mbps PIXCLK: 360.96MHz */ +static struct crl_register_write_rep imx135_pll_451[] = { + { 0x011e, CRL_REG_LEN_08BIT, 0x13 }, /* This is not correct for 24MHz* */ + { 0x011f, CRL_REG_LEN_08BIT, 0x33 }, /* But it is that way in vendor sheets */ + { 0x0301, CRL_REG_LEN_08BIT, 0x05 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x0f }, + { 0x0309, CRL_REG_LEN_08BIT, 0x05 }, + { 0x030b, CRL_REG_LEN_08BIT, 0x01 }, + { 0x030c, CRL_REG_LEN_08BIT, 0x02 }, + { 0x030d, CRL_REG_LEN_08BIT, 0x34 }, + { 0x030e, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3a06, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0830, CRL_REG_LEN_08BIT, 0x87 }, + { 0x0831, CRL_REG_LEN_08BIT, 0x3f }, + { 0x0832, CRL_REG_LEN_08BIT, 0x67 }, + { 0x0833, CRL_REG_LEN_08BIT, 0x3f }, + { 0x0834, CRL_REG_LEN_08BIT, 0x3f }, + { 0x0835, CRL_REG_LEN_08BIT, 0x4f }, + { 0x0836, CRL_REG_LEN_08BIT, 0xdf }, + { 0x0837, CRL_REG_LEN_08BIT, 0x47 }, + { 0x0839, CRL_REG_LEN_08BIT, 0x1f }, + { 0x083a, CRL_REG_LEN_08BIT, 0x17 }, + { 0x083b, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0108, CRL_REG_LEN_08BIT, 0x03 }, /* CSI lane */ +}; + + +static struct crl_register_write_rep imx135_powerup_regset[] = { + { 0x0101, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0105, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0110, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0220, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3302, CRL_REG_LEN_08BIT, 0x11 }, + { 0x3833, CRL_REG_LEN_08BIT, 0x20 }, + { 0x3893, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3906, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3907, CRL_REG_LEN_08BIT, 0x01 }, + { 0x391B, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3C09, CRL_REG_LEN_08BIT, 0x01 }, + { 0x600A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3008, CRL_REG_LEN_08BIT, 0xB0 }, + { 0x320A, CRL_REG_LEN_08BIT, 0x01 }, + { 0x320D, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3216, CRL_REG_LEN_08BIT, 0x2E }, + { 0x322C, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3409, CRL_REG_LEN_08BIT, 0x0C }, + { 0x340C, CRL_REG_LEN_08BIT, 0x2D }, + { 0x3411, CRL_REG_LEN_08BIT, 0x39 }, + { 0x3414, CRL_REG_LEN_08BIT, 0x1E }, + { 0x3427, CRL_REG_LEN_08BIT, 0x04 }, + { 0x3480, CRL_REG_LEN_08BIT, 0x1E }, + { 0x3484, CRL_REG_LEN_08BIT, 0x1E }, + { 0x3488, CRL_REG_LEN_08BIT, 0x1E }, + { 0x348C, CRL_REG_LEN_08BIT, 0x1E }, + { 0x3490, CRL_REG_LEN_08BIT, 0x1E }, + { 0x3494, CRL_REG_LEN_08BIT, 0x1E }, + { 0x3511, CRL_REG_LEN_08BIT, 0x8F }, + { 0x364F, CRL_REG_LEN_08BIT, 0x2D }, + { 0x0700, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3a63, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4100, CRL_REG_LEN_08BIT, 0xf8 }, + { 0x4203, CRL_REG_LEN_08BIT, 0xff }, + { 0x4344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4100, CRL_REG_LEN_08BIT, 0xf8 }, + { 0x441c, CRL_REG_LEN_08BIT, 0x01 }, + { 0x020e, CRL_REG_LEN_08BIT, 0x01 }, + { 0x020f, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0210, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0211, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0212, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0213, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0214, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0215, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep imx135_mode_13M[] = { + { 0x0108, CRL_REG_LEN_08BIT, 0x03 }, /* lanes */ + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0390, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0391, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0392, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x4082, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4083, CRL_REG_LEN_08BIT, 0x11 }, /* Sony settings do not work */ + { 0x4203, CRL_REG_LEN_08BIT, 0xFF }, + { 0x7006, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0349, CRL_REG_LEN_08BIT, 0x6F }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0C }, + { 0x034B, CRL_REG_LEN_08BIT, 0x2F }, + { 0x034C, CRL_REG_LEN_08BIT, 0x10 }, + { 0x034D, CRL_REG_LEN_08BIT, 0x70 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x0C }, + { 0x034F, CRL_REG_LEN_08BIT, 0x30 }, + { 0x0350, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0351, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0352, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0353, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0354, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0355, CRL_REG_LEN_08BIT, 0x70 }, + { 0x0356, CRL_REG_LEN_08BIT, 0x0C }, + { 0x0357, CRL_REG_LEN_08BIT, 0x30 }, + { 0x301D, CRL_REG_LEN_08BIT, 0x30 }, + { 0x3310, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3311, CRL_REG_LEN_08BIT, 0x70 }, + { 0x3312, CRL_REG_LEN_08BIT, 0x0C }, + { 0x3313, CRL_REG_LEN_08BIT, 0x30 }, + { 0x331C, CRL_REG_LEN_08BIT, 0x00 }, + { 0x331D, CRL_REG_LEN_08BIT, 0x10 }, + { 0x4084, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4085, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4086, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4087, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4400, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep imx135_mode_1936M_binn_scale[] = { + + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0390, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0391, CRL_REG_LEN_08BIT, 0x22 }, + { 0x0392, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x11 }, + { 0x4082, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4083, CRL_REG_LEN_08BIT, 0x00 }, + { 0x7006, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x2E }, + { 0x0346, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x8C }, + { 0x0348, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0349, CRL_REG_LEN_08BIT, 0x41 }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0A }, + { 0x034B, CRL_REG_LEN_08BIT, 0xA7 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x07 }, + { 0x034D, CRL_REG_LEN_08BIT, 0x90 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x04 }, + { 0x034F, CRL_REG_LEN_08BIT, 0x48 }, + { 0x0350, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0351, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0352, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0353, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0354, CRL_REG_LEN_08BIT, 0x08 }, + { 0x0355, CRL_REG_LEN_08BIT, 0x0A }, + { 0x0356, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0357, CRL_REG_LEN_08BIT, 0x8E }, + { 0x301D, CRL_REG_LEN_08BIT, 0x30 }, + { 0x3310, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3311, CRL_REG_LEN_08BIT, 0x90 }, + { 0x3312, CRL_REG_LEN_08BIT, 0x04 }, + { 0x3313, CRL_REG_LEN_08BIT, 0x48 }, + { 0x331C, CRL_REG_LEN_08BIT, 0x04 }, + { 0x331D, CRL_REG_LEN_08BIT, 0xB0 }, + { 0x4084, CRL_REG_LEN_08BIT, 0x07 }, + { 0x4085, CRL_REG_LEN_08BIT, 0x90 }, + { 0x4086, CRL_REG_LEN_08BIT, 0x04 }, + { 0x4087, CRL_REG_LEN_08BIT, 0x48 }, + { 0x4400, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep imx135_streamon_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x01 } +}; + +static struct crl_register_write_rep imx135_streamoff_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x00 } +}; + +static struct crl_register_write_rep imx135_data_fmt_width10[] = { + { 0x0112, CRL_REG_LEN_16BIT, 0x0a0a } +}; + +static struct crl_register_write_rep imx135_data_fmt_width8[] = { + { 0x0112, CRL_REG_LEN_16BIT, 0x0808 } +}; + +static struct crl_arithmetic_ops imx135_vflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_dynamic_register_access imx135_h_flip_regs[] = { + { + .address = 0x0101, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = 0, + .ops = 0, + .mask = 0x1, + }, +}; + +static struct crl_dynamic_register_access imx135_v_flip_regs[] = { + { + .address = 0x0101, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(imx135_vflip_ops), + .ops = imx135_vflip_ops, + .mask = 0x2, + }, +}; + + +static struct crl_dynamic_register_access imx135_ana_gain_global_regs[] = { + { + .address = 0x0205, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +static struct crl_dynamic_register_access imx135_exposure_regs[] = { + { + .address = 0x0202, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + } +}; + +static struct crl_dynamic_register_access imx135_vblank_regs[] = { + { + .address = 0x0340, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access imx135_hblank_regs[] = { + { + .address = 0x0342, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; +static struct crl_sensor_detect_config imx135_sensor_detect_regset[] = { + { + .reg = { 0x0019, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, + { + .reg = { 0x0016, CRL_REG_LEN_16BIT, 0x0000ffff }, + .width = 7, + }, +}; + +static struct crl_pll_configuration imx135_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 451200000, + .pixel_rate_csi = 360960000, + .pixel_rate_pa = 360960000, + .bitsperpixel = 10, + .comp_items = 0, + .ctrl_data = 0, + .csi_lanes = 4, + .pll_regs_items = ARRAY_SIZE(imx135_pll_451), + .pll_regs = imx135_pll_451, + }, +}; + +static struct crl_subdev_rect_rep imx135_13M_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4208, + .out_rect.height = 3120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4208, + .out_rect.height = 3120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4208, + .out_rect.height = 3120, + }, +}; + +static struct crl_subdev_rect_rep imx135_mode_1936M_binn_scale_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 3120, + .out_rect.left = 46, + .out_rect.top = 396, + .out_rect.width = 4116, + .out_rect.height = 2332, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4116, + .in_rect.height = 2332, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 2058, + .out_rect.height = 1166, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 2058, + .in_rect.height = 1166, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1936, + .out_rect.height = 1096, + }, +}; + +static struct crl_mode_rep imx135_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(imx135_13M_rects), + .sd_rects = imx135_13M_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 4208, + .height = 3120, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx135_mode_13M), + .mode_regs = imx135_mode_13M, + }, + { + .sd_rects_items = + ARRAY_SIZE(imx135_mode_1936M_binn_scale_rects), + .sd_rects = imx135_mode_1936M_binn_scale_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 17, + .width = 1936, + .height = 1096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx135_mode_1936M_binn_scale), + .mode_regs = imx135_mode_1936M_binn_scale, + }, +}; + +static struct crl_sensor_subdev_config imx135_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .name = "imx135 scaler", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "imx135 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "imx135 pixel array", + }, +}; + +static struct crl_sensor_limits imx135_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 4208, + .y_addr_max = 3120, + .min_frame_length_lines = 160, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 4572, + .max_line_length_pixels = 32752, + .scaler_m_min = 16, + .scaler_m_max = 255, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 3, +}; + +static struct crl_flip_data imx135_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + }, +}; + +static struct crl_csi_data_fmt imx135_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = 1, + .regs = imx135_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = imx135_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = imx135_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = imx135_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SGRBG8_1X8, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx135_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SRGGB8_1X8, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx135_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SBGGR8_1X8, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx135_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SGBRG8_1X8, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx135_data_fmt_width8, + }, +}; + +static struct crl_dynamic_register_access imx135_test_pattern_regs[] = { + { + .address = 0x0600, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static const char * const imx135_test_patterns[] = { + "Disabled", + "Solid Colour", + "Eight Vertical Colour Bars", +}; + +static const s64 imx135_op_sys_clock[] = { 451200000 }; + +static struct crl_v4l2_ctrl imx135_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_SCALER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = + ARRAY_SIZE(imx135_pll_configurations) - 1, + .data.v4l2_int_menu.menu = imx135_op_sys_clock, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_SCALER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .name = "V4L2_CID_ANALOGUE_GAIN", + .data.std_data.min = 0, + .data.std_data.max = 224, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx135_ana_gain_global_regs), + .regs = imx135_ana_gain_global_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .name = "V4L2_CID_EXPOSURE", + .data.std_data.min = 0, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 4500, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx135_exposure_regs), + .regs = imx135_exposure_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .name = "V4L2_CID_HFLIP", + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx135_h_flip_regs), + .regs = imx135_h_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .name = "V4L2_CID_VFLIP", + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx135_v_flip_regs), + .regs = imx135_v_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_TEST_PATTERN, + .name = "V4L2_CID_TEST_PATTERN", + .type = CRL_V4L2_CTRL_TYPE_MENU_ITEMS, + .data.v4l2_menu_items.menu = imx135_test_patterns, + .data.v4l2_menu_items.size = ARRAY_SIZE(imx135_test_patterns), + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx135_test_pattern_regs), + .regs = imx135_test_pattern_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame length lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 160, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 3800, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx135_vblank_regs), + .regs = imx135_vblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 4280, + .data.std_data.max = 65520, + .data.std_data.step = 1, + .data.std_data.def = 4600, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx135_hblank_regs), + .regs = imx135_hblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, +}; + +/* Power items, they are enabled in the order they are listed here */ +static struct crl_power_seq_entity imx135_power_items[] = { + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VANA", + .val = 2700000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VDIG", + .val = 1100000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 24000000, + .delay = 2000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + .delay = 250, + }, +}; + + +static struct crl_sensor_configuration imx135_crl_configuration = { + + .powerup_regs_items = ARRAY_SIZE(imx135_powerup_regset), + .powerup_regs = imx135_powerup_regset, + + .power_items = ARRAY_SIZE(imx135_power_items), + .power_entities = imx135_power_items, + + .id_reg_items = ARRAY_SIZE(imx135_sensor_detect_regset), + .id_regs = imx135_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(imx135_sensor_subdevs), + .subdevs = imx135_sensor_subdevs, + + .sensor_limits = &imx135_sensor_limits, + + .pll_config_items = ARRAY_SIZE(imx135_pll_configurations), + .pll_configs = imx135_pll_configurations, + + .modes_items = ARRAY_SIZE(imx135_modes), + .modes = imx135_modes, + + .streamon_regs_items = ARRAY_SIZE(imx135_streamon_regs), + .streamon_regs = imx135_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(imx135_streamoff_regs), + .streamoff_regs = imx135_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(imx135_v4l2_ctrls), + .v4l2_ctrl_bank = imx135_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(imx135_crl_csi_data_fmt), + .csi_fmts = imx135_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(imx135_flip_configurations), + .flip_data = imx135_flip_configurations, +}; + + +#endif /* __CRLMODULE_IMX135_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_imx185_configuration.h b/drivers/media/i2c/crlmodule/crl_imx185_configuration.h new file mode 100644 index 0000000000000..168455b63d208 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_imx185_configuration.h @@ -0,0 +1,1772 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2017 - 2018 Intel Corporation + * + * Author: Shuguang Gong + * + */ + +#ifndef __CRLMODULE_IMX185_CONFIGURATION_H_ +#define __CRLMODULE_IMX185_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +#define IMX185_REG_STANDBY 0x3000 +#define IMX185_REG_XMSTA 0x3002 +#define IMX185_REG_SW_RESET 0x3003 + +#define IMX185_HMAX 65535 +#define IMX185_VMAX 131071 +#define IMX185_MAX_SHS1 (IMX185_VMAX - 2) + +struct crl_ctrl_data_pair ctrl_data_modes[] = { + { + .ctrl_id = V4L2_CID_WDR_MODE, + .data = 0, + }, + { + .ctrl_id = V4L2_CID_WDR_MODE, + .data = 1, + }, +}; + +/* 111Mbps for imx185 720p 30fps */ +static struct crl_register_write_rep imx185_pll_111mbps[] = { + {0x3009, CRL_REG_LEN_08BIT, 0x02}, /* frame speed */ + {0x300A, CRL_REG_LEN_08BIT, 0x3C}, + {0x300C, CRL_REG_LEN_08BIT, 0x00}, + {0x3018, CRL_REG_LEN_08BIT, 0xee}, + {0x3019, CRL_REG_LEN_08BIT, 0x02}, + {0x301b, CRL_REG_LEN_08BIT, 0xe4}, + {0x301c, CRL_REG_LEN_08BIT, 0x0C}, + {0x300F, CRL_REG_LEN_08BIT, 0x01}, + {0x3010, CRL_REG_LEN_08BIT, 0x39}, + {0x3012, CRL_REG_LEN_08BIT, 0x50}, + {0x3056, CRL_REG_LEN_08BIT, 0xC9}, + {0x3057, CRL_REG_LEN_08BIT, 0x64}, + {0x3065, CRL_REG_LEN_08BIT, 0x00}, + {0x3084, CRL_REG_LEN_08BIT, 0x0F}, + {0x3086, CRL_REG_LEN_08BIT, 0x10}, + {0x30CF, CRL_REG_LEN_08BIT, 0xE1}, + {0x30D0, CRL_REG_LEN_08BIT, 0x29}, + {0x30D2, CRL_REG_LEN_08BIT, 0x9B}, + {0x30D3, CRL_REG_LEN_08BIT, 0x01}, + {0x30E1, CRL_REG_LEN_08BIT, 0xFF}, + {0x3303, CRL_REG_LEN_08BIT, 0x20}, /* repetation */ + {0x3305, CRL_REG_LEN_08BIT, 0x03}, /* 1: 2lanes, 3: 4lanes */ + {0x332C, CRL_REG_LEN_08BIT, 0x28}, /* mipi timing */ + {0x332D, CRL_REG_LEN_08BIT, 0x20}, + {0x3341, CRL_REG_LEN_08BIT, 0x00}, + {0x3342, CRL_REG_LEN_08BIT, 0x1B}, + {0x3343, CRL_REG_LEN_08BIT, 0x58}, + {0x3344, CRL_REG_LEN_08BIT, 0x0C}, + {0x3345, CRL_REG_LEN_08BIT, 0x24}, + {0x3346, CRL_REG_LEN_08BIT, 0x10}, + {0x3347, CRL_REG_LEN_08BIT, 0x0B}, + {0x3348, CRL_REG_LEN_08BIT, 0x08}, + {0x3349, CRL_REG_LEN_08BIT, 0x30}, + {0x334A, CRL_REG_LEN_08BIT, 0x20}, +}; + +/* 222Mbps for imx185 1080p 30fps */ +static struct crl_register_write_rep imx185_pll_222mbps[] = { + {0x3009, CRL_REG_LEN_08BIT, 0x02}, /* frame speed */ + {0x300A, CRL_REG_LEN_08BIT, 0x3C}, + {0x300C, CRL_REG_LEN_08BIT, 0x00}, + {0x301b, CRL_REG_LEN_08BIT, 0x98}, + {0x301c, CRL_REG_LEN_08BIT, 0x08}, + {0x300F, CRL_REG_LEN_08BIT, 0x01}, + {0x3010, CRL_REG_LEN_08BIT, 0x39}, + {0x3012, CRL_REG_LEN_08BIT, 0x50}, + {0x3056, CRL_REG_LEN_08BIT, 0xC9}, + {0x3057, CRL_REG_LEN_08BIT, 0x64}, + {0x3065, CRL_REG_LEN_08BIT, 0x00}, + {0x3084, CRL_REG_LEN_08BIT, 0x00}, + {0x3086, CRL_REG_LEN_08BIT, 0x01}, + {0x30CF, CRL_REG_LEN_08BIT, 0xD1}, + {0x30D0, CRL_REG_LEN_08BIT, 0x1B}, + {0x30D2, CRL_REG_LEN_08BIT, 0x5F}, + {0x30D3, CRL_REG_LEN_08BIT, 0x00}, + {0x30E1, CRL_REG_LEN_08BIT, 0xFF}, + {0x3303, CRL_REG_LEN_08BIT, 0x10}, /* repetation */ + {0x3305, CRL_REG_LEN_08BIT, 0x03}, /* 1: 2lanes, 3: 4lanes */ + {0x332C, CRL_REG_LEN_08BIT, 0x30}, /* mipi timing */ + {0x332D, CRL_REG_LEN_08BIT, 0x20}, + {0x3341, CRL_REG_LEN_08BIT, 0x00}, + {0x3342, CRL_REG_LEN_08BIT, 0x1B}, + {0x3343, CRL_REG_LEN_08BIT, 0x58}, + {0x3344, CRL_REG_LEN_08BIT, 0x10}, + {0x3345, CRL_REG_LEN_08BIT, 0x30}, + {0x3346, CRL_REG_LEN_08BIT, 0x18}, + {0x3347, CRL_REG_LEN_08BIT, 0x10}, + {0x3348, CRL_REG_LEN_08BIT, 0x10}, + {0x3349, CRL_REG_LEN_08BIT, 0x48}, + {0x334A, CRL_REG_LEN_08BIT, 0x28}, +}; + +/* 445Mbps for imx185 1080p 60fps */ +static struct crl_register_write_rep imx185_pll_445mbps[] = { + {0x3009, CRL_REG_LEN_08BIT, 0x01}, /* frame speed */ + {0x300A, CRL_REG_LEN_08BIT, 0x3C}, /* BLK */ + {0x300C, CRL_REG_LEN_08BIT, 0x00}, /* fixed settings */ + {0x3018, CRL_REG_LEN_08BIT, 0x65}, + {0x3019, CRL_REG_LEN_08BIT, 0x04}, + {0x301B, CRL_REG_LEN_08BIT, 0x4C}, + {0x301C, CRL_REG_LEN_08BIT, 0x04}, + {0x300F, CRL_REG_LEN_08BIT, 0x01}, + {0x3010, CRL_REG_LEN_08BIT, 0x39}, + {0x3012, CRL_REG_LEN_08BIT, 0x50}, + {0x3056, CRL_REG_LEN_08BIT, 0xC9}, + {0x3057, CRL_REG_LEN_08BIT, 0x64}, + {0x3065, CRL_REG_LEN_08BIT, 0x20}, + {0x3084, CRL_REG_LEN_08BIT, 0x00}, + {0x3086, CRL_REG_LEN_08BIT, 0x01}, + {0x30CF, CRL_REG_LEN_08BIT, 0xD1}, + {0x30D0, CRL_REG_LEN_08BIT, 0x1B}, + {0x30D2, CRL_REG_LEN_08BIT, 0x5F}, + {0x30D3, CRL_REG_LEN_08BIT, 0x00}, + {0x30E1, CRL_REG_LEN_08BIT, 0xFF}, + {0x3303, CRL_REG_LEN_08BIT, 0x00}, /* repetation */ + {0x3305, CRL_REG_LEN_08BIT, 0x03}, + {0x332C, CRL_REG_LEN_08BIT, 0x40}, /* mipi timing */ + {0x332D, CRL_REG_LEN_08BIT, 0x20}, + {0x3341, CRL_REG_LEN_08BIT, 0x00}, + {0x3342, CRL_REG_LEN_08BIT, 0x1B}, + {0x3343, CRL_REG_LEN_08BIT, 0x68}, + {0x3344, CRL_REG_LEN_08BIT, 0x20}, + {0x3345, CRL_REG_LEN_08BIT, 0x40}, + {0x3346, CRL_REG_LEN_08BIT, 0x28}, + {0x3347, CRL_REG_LEN_08BIT, 0x20}, + {0x3348, CRL_REG_LEN_08BIT, 0x18}, + {0x3349, CRL_REG_LEN_08BIT, 0x78}, + {0x334A, CRL_REG_LEN_08BIT, 0x28}, +}; + +static struct crl_register_write_rep imx185_fmt_raw10[] = { + {0x333E, CRL_REG_LEN_08BIT, 0x0a}, /* FMT RAW10 */ + {0x333F, CRL_REG_LEN_08BIT, 0x0a}, +}; + +static struct crl_register_write_rep imx185_fmt_raw12[] = { + {0x333E, CRL_REG_LEN_08BIT, 0x0c}, /* FMT RAW12 */ + {0x333F, CRL_REG_LEN_08BIT, 0x0c}, +}; + +static struct crl_register_write_rep imx185_powerup_standby[] = { + {0x3000, CRL_REG_LEN_08BIT, 0x01}, + {0x00, CRL_REG_LEN_DELAY, 50, 0x00}, + {0x3002, CRL_REG_LEN_08BIT, 0x01}, + {0x00, CRL_REG_LEN_DELAY, 200, 0x00}, +}; + +static struct crl_register_write_rep imx185_1312_728_27MHZ_CROPPING[] = { + /* 0x02h */ + {0x3005, CRL_REG_LEN_08BIT, 0x00}, /* ADBIT: 10/12 ADBIT: + 10/12 , raw 10 */ + {0x3007, CRL_REG_LEN_08BIT, 0x60}, /* mode selection */ + {0x301D, CRL_REG_LEN_08BIT, 0x08}, + {0x301E, CRL_REG_LEN_08BIT, 0x02}, + {0x3044, CRL_REG_LEN_08BIT, 0xE1}, + {0x3048, CRL_REG_LEN_08BIT, 0x33}, + {0x305C, CRL_REG_LEN_08BIT, 0x2c}, + {0x305E, CRL_REG_LEN_08BIT, 0x21}, + {0x3063, CRL_REG_LEN_08BIT, 0x54}, + /* Crop settings */ + {0x3038, CRL_REG_LEN_08BIT, 0x00}, /* WPV = 0 */ + {0x3039, CRL_REG_LEN_08BIT, 0x00}, + {0x303A, CRL_REG_LEN_08BIT, 0xE0}, /* WV = PIC_SIZE + 8 */ + {0x303B, CRL_REG_LEN_08BIT, 0x02}, + {0x303C, CRL_REG_LEN_08BIT, 0x04}, /* WPH = 4 */ + {0x303D, CRL_REG_LEN_08BIT, 0x00}, + {0x303E, CRL_REG_LEN_08BIT, 0x1C}, /* Effective size = 1308*/ + {0x303F, CRL_REG_LEN_08BIT, 0x05}, + /* 0x03h */ + {0x311D, CRL_REG_LEN_08BIT, 0x0A}, + {0x3123, CRL_REG_LEN_08BIT, 0x0F}, + {0x3147, CRL_REG_LEN_08BIT, 0x87}, + {0x31E1, CRL_REG_LEN_08BIT, 0x9E}, + {0x31E2, CRL_REG_LEN_08BIT, 0x01}, + {0x31E5, CRL_REG_LEN_08BIT, 0x05}, + {0x31E6, CRL_REG_LEN_08BIT, 0x05}, + {0x31E7, CRL_REG_LEN_08BIT, 0x3A}, + {0x31E8, CRL_REG_LEN_08BIT, 0x3A}, + /* 0x04h */ + {0x3203, CRL_REG_LEN_08BIT, 0xC8}, + {0x3207, CRL_REG_LEN_08BIT, 0x54}, + {0x3213, CRL_REG_LEN_08BIT, 0x16}, + {0x3215, CRL_REG_LEN_08BIT, 0xF6}, + {0x321A, CRL_REG_LEN_08BIT, 0x14}, + {0x321B, CRL_REG_LEN_08BIT, 0x51}, + {0x3229, CRL_REG_LEN_08BIT, 0xE7}, + {0x322A, CRL_REG_LEN_08BIT, 0xF0}, + {0x322B, CRL_REG_LEN_08BIT, 0x10}, + {0x3231, CRL_REG_LEN_08BIT, 0xE7}, + {0x3232, CRL_REG_LEN_08BIT, 0xF0}, + {0x3233, CRL_REG_LEN_08BIT, 0x10}, + {0x323C, CRL_REG_LEN_08BIT, 0xE8}, + {0x323D, CRL_REG_LEN_08BIT, 0x70}, + {0x3243, CRL_REG_LEN_08BIT, 0x08}, + {0x3244, CRL_REG_LEN_08BIT, 0xE1}, + {0x3245, CRL_REG_LEN_08BIT, 0x10}, + {0x3247, CRL_REG_LEN_08BIT, 0xE7}, + {0x3248, CRL_REG_LEN_08BIT, 0x60}, + {0x3249, CRL_REG_LEN_08BIT, 0x1E}, + {0x324B, CRL_REG_LEN_08BIT, 0x00}, + {0x324C, CRL_REG_LEN_08BIT, 0x41}, + {0x3250, CRL_REG_LEN_08BIT, 0x30}, + {0x3251, CRL_REG_LEN_08BIT, 0x0A}, + {0x3252, CRL_REG_LEN_08BIT, 0xFF}, + {0x3253, CRL_REG_LEN_08BIT, 0xFF}, + {0x3254, CRL_REG_LEN_08BIT, 0xFF}, + {0x3255, CRL_REG_LEN_08BIT, 0x02}, + {0x3257, CRL_REG_LEN_08BIT, 0xF0}, + {0x325A, CRL_REG_LEN_08BIT, 0xA6}, + {0x325D, CRL_REG_LEN_08BIT, 0x14}, + {0x325E, CRL_REG_LEN_08BIT, 0x51}, + {0x3261, CRL_REG_LEN_08BIT, 0x61}, + {0x3266, CRL_REG_LEN_08BIT, 0x30}, + {0x3267, CRL_REG_LEN_08BIT, 0x05}, + {0x3275, CRL_REG_LEN_08BIT, 0xE7}, + {0x3281, CRL_REG_LEN_08BIT, 0xEA}, + {0x3282, CRL_REG_LEN_08BIT, 0x70}, + {0x3285, CRL_REG_LEN_08BIT, 0xFF}, + {0x328A, CRL_REG_LEN_08BIT, 0xF0}, + {0x328D, CRL_REG_LEN_08BIT, 0xB6}, + {0x328E, CRL_REG_LEN_08BIT, 0x40}, + {0x3290, CRL_REG_LEN_08BIT, 0x42}, + {0x3291, CRL_REG_LEN_08BIT, 0x51}, + {0x3292, CRL_REG_LEN_08BIT, 0x1E}, + {0x3294, CRL_REG_LEN_08BIT, 0xC4}, + {0x3295, CRL_REG_LEN_08BIT, 0x20}, + {0x3297, CRL_REG_LEN_08BIT, 0x50}, + {0x3298, CRL_REG_LEN_08BIT, 0x31}, + {0x3299, CRL_REG_LEN_08BIT, 0x1F}, + {0x329B, CRL_REG_LEN_08BIT, 0xC0}, + {0x329C, CRL_REG_LEN_08BIT, 0x60}, + {0x329E, CRL_REG_LEN_08BIT, 0x4C}, + {0x329F, CRL_REG_LEN_08BIT, 0x71}, + {0x32A0, CRL_REG_LEN_08BIT, 0x1F}, + {0x32A2, CRL_REG_LEN_08BIT, 0xB6}, + {0x32A3, CRL_REG_LEN_08BIT, 0xC0}, + {0x32A4, CRL_REG_LEN_08BIT, 0x0B}, + {0x32A9, CRL_REG_LEN_08BIT, 0x24}, + {0x32AA, CRL_REG_LEN_08BIT, 0x41}, + {0x32B0, CRL_REG_LEN_08BIT, 0x25}, + {0x32B1, CRL_REG_LEN_08BIT, 0x51}, + {0x32B7, CRL_REG_LEN_08BIT, 0x1C}, + {0x32B8, CRL_REG_LEN_08BIT, 0xC1}, + {0x32B9, CRL_REG_LEN_08BIT, 0x12}, + {0x32BE, CRL_REG_LEN_08BIT, 0x1D}, + {0x32BF, CRL_REG_LEN_08BIT, 0xD1}, + {0x32C0, CRL_REG_LEN_08BIT, 0x12}, + {0x32C2, CRL_REG_LEN_08BIT, 0xA8}, + {0x32C3, CRL_REG_LEN_08BIT, 0xC0}, + {0x32C4, CRL_REG_LEN_08BIT, 0x0A}, + {0x32C5, CRL_REG_LEN_08BIT, 0x1E}, + {0x32C6, CRL_REG_LEN_08BIT, 0x21}, + {0x32C9, CRL_REG_LEN_08BIT, 0xB0}, + {0x32CA, CRL_REG_LEN_08BIT, 0x40}, + {0x32CC, CRL_REG_LEN_08BIT, 0x26}, + {0x32CD, CRL_REG_LEN_08BIT, 0xA1}, + {0x32D0, CRL_REG_LEN_08BIT, 0xB6}, + {0x32D1, CRL_REG_LEN_08BIT, 0xC0}, + {0x32D2, CRL_REG_LEN_08BIT, 0x0B}, + {0x32D4, CRL_REG_LEN_08BIT, 0xE2}, + {0x32D5, CRL_REG_LEN_08BIT, 0x40}, + {0x32D8, CRL_REG_LEN_08BIT, 0x4E}, + {0x32D9, CRL_REG_LEN_08BIT, 0xA1}, + {0x32EC, CRL_REG_LEN_08BIT, 0xF0}, + /* 0x05h */ + {0x3316, CRL_REG_LEN_08BIT, 0x02}, + {0x3317, CRL_REG_LEN_08BIT, 0x02}, + {0x3318, CRL_REG_LEN_08BIT, 0xD8}, /* PIC_SIZE = 728 */ + {0x3319, CRL_REG_LEN_08BIT, 0x02}, + {0x334E, CRL_REG_LEN_08BIT, 0x3D}, /* INCL selection 27MHz */ + {0x334F, CRL_REG_LEN_08BIT, 0x01}, +}; + +static struct crl_register_write_rep imx185_1952_1096_27MHZ[] = { + /* 0x02h */ + {0x3005, CRL_REG_LEN_08BIT, 0x01}, /* ADBIT: 10/12 */ + {0x3007, CRL_REG_LEN_08BIT, 0x10}, /* 1080p mode */ + {0x300A, CRL_REG_LEN_08BIT, 0xF0}, + {0x301D, CRL_REG_LEN_08BIT, 0x08}, + {0x301E, CRL_REG_LEN_08BIT, 0x02}, + {0x3048, CRL_REG_LEN_08BIT, 0x33}, + {0x305C, CRL_REG_LEN_08BIT, 0x2c}, /* INCLKSEL default */ + {0x305E, CRL_REG_LEN_08BIT, 0x21}, + {0x3063, CRL_REG_LEN_08BIT, 0x54}, + /* Crop settings */ + {0x3038, CRL_REG_LEN_08BIT, 0x00}, /* WPV = 0 */ + {0x3039, CRL_REG_LEN_08BIT, 0x00}, + {0x303A, CRL_REG_LEN_08BIT, 0xC0}, /* WV = PIC_SIZE + 8 */ + {0x303B, CRL_REG_LEN_08BIT, 0x04}, + {0x303C, CRL_REG_LEN_08BIT, 0x00}, /* WPH = 0 */ + {0x303D, CRL_REG_LEN_08BIT, 0x00}, + {0x303E, CRL_REG_LEN_08BIT, 0x9C}, + {0x303F, CRL_REG_LEN_08BIT, 0x07}, + /* 0x03h */ + {0x311D, CRL_REG_LEN_08BIT, 0x0A}, + {0x3123, CRL_REG_LEN_08BIT, 0x0F}, + {0x3126, CRL_REG_LEN_08BIT, 0xDF}, + {0x3147, CRL_REG_LEN_08BIT, 0x87}, + {0x31E0, CRL_REG_LEN_08BIT, 0x01}, + {0x31E1, CRL_REG_LEN_08BIT, 0x9E}, + {0x31E2, CRL_REG_LEN_08BIT, 0x01}, + {0x31E5, CRL_REG_LEN_08BIT, 0x05}, + {0x31E6, CRL_REG_LEN_08BIT, 0x05}, + {0x31E7, CRL_REG_LEN_08BIT, 0x3A}, + {0x31E8, CRL_REG_LEN_08BIT, 0x3A}, + /* 0x04h */ + {0x3203, CRL_REG_LEN_08BIT, 0xC8}, + {0x3207, CRL_REG_LEN_08BIT, 0x54}, + {0x3213, CRL_REG_LEN_08BIT, 0x16}, + {0x3215, CRL_REG_LEN_08BIT, 0xF6}, + {0x321A, CRL_REG_LEN_08BIT, 0x14}, + {0x321B, CRL_REG_LEN_08BIT, 0x51}, + {0x3229, CRL_REG_LEN_08BIT, 0xE7}, + {0x322A, CRL_REG_LEN_08BIT, 0xF0}, + {0x322B, CRL_REG_LEN_08BIT, 0x10}, + {0x3231, CRL_REG_LEN_08BIT, 0xE7}, + {0x3232, CRL_REG_LEN_08BIT, 0xF0}, + {0x3233, CRL_REG_LEN_08BIT, 0x10}, + {0x323C, CRL_REG_LEN_08BIT, 0xE8}, + {0x323D, CRL_REG_LEN_08BIT, 0x70}, + {0x3243, CRL_REG_LEN_08BIT, 0x08}, + {0x3244, CRL_REG_LEN_08BIT, 0xE1}, + {0x3245, CRL_REG_LEN_08BIT, 0x10}, + {0x3247, CRL_REG_LEN_08BIT, 0xE7}, + {0x3248, CRL_REG_LEN_08BIT, 0x60}, + {0x3249, CRL_REG_LEN_08BIT, 0x1E}, + {0x324B, CRL_REG_LEN_08BIT, 0x00}, + {0x324C, CRL_REG_LEN_08BIT, 0x41}, + {0x3250, CRL_REG_LEN_08BIT, 0x30}, + {0x3251, CRL_REG_LEN_08BIT, 0x0A}, + {0x3252, CRL_REG_LEN_08BIT, 0xFF}, + {0x3253, CRL_REG_LEN_08BIT, 0xFF}, + {0x3254, CRL_REG_LEN_08BIT, 0xFF}, + {0x3255, CRL_REG_LEN_08BIT, 0x02}, + {0x3257, CRL_REG_LEN_08BIT, 0xF0}, + {0x325A, CRL_REG_LEN_08BIT, 0xA6}, + {0x325D, CRL_REG_LEN_08BIT, 0x14}, + {0x325E, CRL_REG_LEN_08BIT, 0x51}, + {0x3261, CRL_REG_LEN_08BIT, 0x61}, + {0x3266, CRL_REG_LEN_08BIT, 0x30}, + {0x3267, CRL_REG_LEN_08BIT, 0x05}, + {0x3275, CRL_REG_LEN_08BIT, 0xE7}, + {0x3281, CRL_REG_LEN_08BIT, 0xEA}, + {0x3282, CRL_REG_LEN_08BIT, 0x70}, + {0x3285, CRL_REG_LEN_08BIT, 0xFF}, + {0x328A, CRL_REG_LEN_08BIT, 0xF0}, + {0x328D, CRL_REG_LEN_08BIT, 0xB6}, + {0x328E, CRL_REG_LEN_08BIT, 0x40}, + {0x3290, CRL_REG_LEN_08BIT, 0x42}, + {0x3291, CRL_REG_LEN_08BIT, 0x51}, + {0x3292, CRL_REG_LEN_08BIT, 0x1E}, + {0x3294, CRL_REG_LEN_08BIT, 0xC4}, + {0x3295, CRL_REG_LEN_08BIT, 0x20}, + {0x3297, CRL_REG_LEN_08BIT, 0x50}, + {0x3298, CRL_REG_LEN_08BIT, 0x31}, + {0x3299, CRL_REG_LEN_08BIT, 0x1F}, + {0x329B, CRL_REG_LEN_08BIT, 0xC0}, + {0x329C, CRL_REG_LEN_08BIT, 0x60}, + {0x329E, CRL_REG_LEN_08BIT, 0x4C}, + {0x329F, CRL_REG_LEN_08BIT, 0x71}, + {0x32A0, CRL_REG_LEN_08BIT, 0x1F}, + {0x32A2, CRL_REG_LEN_08BIT, 0xB6}, + {0x32A3, CRL_REG_LEN_08BIT, 0xC0}, + {0x32A4, CRL_REG_LEN_08BIT, 0x0B}, + {0x32A9, CRL_REG_LEN_08BIT, 0x24}, + {0x32AA, CRL_REG_LEN_08BIT, 0x41}, + {0x32B0, CRL_REG_LEN_08BIT, 0x25}, + {0x32B1, CRL_REG_LEN_08BIT, 0x51}, + {0x32B7, CRL_REG_LEN_08BIT, 0x1C}, + {0x32B8, CRL_REG_LEN_08BIT, 0xC1}, + {0x32B9, CRL_REG_LEN_08BIT, 0x12}, + {0x32BE, CRL_REG_LEN_08BIT, 0x1D}, + {0x32BF, CRL_REG_LEN_08BIT, 0xD1}, + {0x32C0, CRL_REG_LEN_08BIT, 0x12}, + {0x32C2, CRL_REG_LEN_08BIT, 0xA8}, + {0x32C3, CRL_REG_LEN_08BIT, 0xC0}, + {0x32C4, CRL_REG_LEN_08BIT, 0x0A}, + {0x32C5, CRL_REG_LEN_08BIT, 0x1E}, + {0x32C6, CRL_REG_LEN_08BIT, 0x21}, + {0x32C9, CRL_REG_LEN_08BIT, 0xB0}, + {0x32CA, CRL_REG_LEN_08BIT, 0x40}, + {0x32CC, CRL_REG_LEN_08BIT, 0x26}, + {0x32CD, CRL_REG_LEN_08BIT, 0xA1}, + {0x32D0, CRL_REG_LEN_08BIT, 0xB6}, + {0x32D1, CRL_REG_LEN_08BIT, 0xC0}, + {0x32D2, CRL_REG_LEN_08BIT, 0x0B}, + {0x32D4, CRL_REG_LEN_08BIT, 0xE2}, + {0x32D5, CRL_REG_LEN_08BIT, 0x40}, + {0x32D8, CRL_REG_LEN_08BIT, 0x4E}, + {0x32D9, CRL_REG_LEN_08BIT, 0xA1}, + {0x32EC, CRL_REG_LEN_08BIT, 0xF0}, + /* 0x05h */ + {0x3316, CRL_REG_LEN_08BIT, 0x04}, + {0x3317, CRL_REG_LEN_08BIT, 0x04}, + {0x3318, CRL_REG_LEN_08BIT, 0x48}, /* PIC_SIZE = 1096 */ + {0x3319, CRL_REG_LEN_08BIT, 0x04}, + {0x334E, CRL_REG_LEN_08BIT, 0x3D}, /* INCL selection 27MHz */ + {0x334F, CRL_REG_LEN_08BIT, 0x01}, +}; + +static struct crl_register_write_rep imx185_1952_1096_BUILD_IN_WDR_27MHZ[] = { + /* 0x02h */ + {0x3005, CRL_REG_LEN_08BIT, 0x01}, /* ADBIT: 10/12 */ + {0x3007, CRL_REG_LEN_08BIT, 0x10}, /* mode selection */ + {0x300A, CRL_REG_LEN_08BIT, 0xF0}, + {0x300C, CRL_REG_LEN_08BIT, 0x02}, + {0x300F, CRL_REG_LEN_08BIT, 0x05}, + {0x3010, CRL_REG_LEN_08BIT, 0x38}, + {0x3012, CRL_REG_LEN_08BIT, 0x0F}, + {0x301B, CRL_REG_LEN_08BIT, 0x98}, + {0x301C, CRL_REG_LEN_08BIT, 0x08}, + {0x301D, CRL_REG_LEN_08BIT, 0x08}, + {0x301E, CRL_REG_LEN_08BIT, 0x02}, + {0x3048, CRL_REG_LEN_08BIT, 0x33}, + {0x3056, CRL_REG_LEN_08BIT, 0xC9}, + {0x3057, CRL_REG_LEN_08BIT, 0x64}, + {0x305C, CRL_REG_LEN_08BIT, 0x2c}, /* INCLKSEL default */ + {0x305E, CRL_REG_LEN_08BIT, 0x21}, + {0x3063, CRL_REG_LEN_08BIT, 0x54}, + /* Crop settings */ + {0x3038, CRL_REG_LEN_08BIT, 0x00}, /* WPV = 0 */ + {0x3039, CRL_REG_LEN_08BIT, 0x00}, + {0x303A, CRL_REG_LEN_08BIT, 0x4C}, /* WV = PIC_SIZE + 8 */ + {0x303B, CRL_REG_LEN_08BIT, 0x04}, + {0x303C, CRL_REG_LEN_08BIT, 0x00}, /* WPH = 0 */ + {0x303D, CRL_REG_LEN_08BIT, 0x00}, + {0x303E, CRL_REG_LEN_08BIT, 0x9C}, /* Effective size = 1948*/ + {0x303F, CRL_REG_LEN_08BIT, 0x07}, + /* 0x03h */ + {0x311D, CRL_REG_LEN_08BIT, 0x0A}, + {0x3123, CRL_REG_LEN_08BIT, 0x0F}, + {0x3126, CRL_REG_LEN_08BIT, 0xDF}, + {0x3147, CRL_REG_LEN_08BIT, 0x87}, + {0x31E0, CRL_REG_LEN_08BIT, 0x01}, + {0x31E1, CRL_REG_LEN_08BIT, 0x9E}, + {0x31E2, CRL_REG_LEN_08BIT, 0x01}, + {0x31E5, CRL_REG_LEN_08BIT, 0x05}, + {0x31E6, CRL_REG_LEN_08BIT, 0x05}, + {0x31E7, CRL_REG_LEN_08BIT, 0x3A}, + {0x31E8, CRL_REG_LEN_08BIT, 0x3A}, + /* 0x04h */ + {0x3203, CRL_REG_LEN_08BIT, 0xC8}, + {0x3207, CRL_REG_LEN_08BIT, 0x54}, + {0x3213, CRL_REG_LEN_08BIT, 0x16}, + {0x3215, CRL_REG_LEN_08BIT, 0xF6}, + {0x321A, CRL_REG_LEN_08BIT, 0x14}, + {0x321B, CRL_REG_LEN_08BIT, 0x51}, + {0x3229, CRL_REG_LEN_08BIT, 0xE7}, + {0x322A, CRL_REG_LEN_08BIT, 0xF0}, + {0x322B, CRL_REG_LEN_08BIT, 0x10}, + {0x3231, CRL_REG_LEN_08BIT, 0xE7}, + {0x3232, CRL_REG_LEN_08BIT, 0xF0}, + {0x3233, CRL_REG_LEN_08BIT, 0x10}, + {0x323C, CRL_REG_LEN_08BIT, 0xE8}, + {0x323D, CRL_REG_LEN_08BIT, 0x70}, + {0x3243, CRL_REG_LEN_08BIT, 0x08}, + {0x3244, CRL_REG_LEN_08BIT, 0xE1}, + {0x3245, CRL_REG_LEN_08BIT, 0x10}, + {0x3247, CRL_REG_LEN_08BIT, 0xE7}, + {0x3248, CRL_REG_LEN_08BIT, 0x60}, + {0x3249, CRL_REG_LEN_08BIT, 0x1E}, + {0x324B, CRL_REG_LEN_08BIT, 0x00}, + {0x324C, CRL_REG_LEN_08BIT, 0x41}, + {0x3250, CRL_REG_LEN_08BIT, 0x30}, + {0x3251, CRL_REG_LEN_08BIT, 0x0A}, + {0x3252, CRL_REG_LEN_08BIT, 0xFF}, + {0x3253, CRL_REG_LEN_08BIT, 0xFF}, + {0x3254, CRL_REG_LEN_08BIT, 0xFF}, + {0x3255, CRL_REG_LEN_08BIT, 0x02}, + {0x3257, CRL_REG_LEN_08BIT, 0xF0}, + {0x325A, CRL_REG_LEN_08BIT, 0xA6}, + {0x325D, CRL_REG_LEN_08BIT, 0x14}, + {0x325E, CRL_REG_LEN_08BIT, 0x51}, + {0x3261, CRL_REG_LEN_08BIT, 0x61}, + {0x3266, CRL_REG_LEN_08BIT, 0x30}, + {0x3267, CRL_REG_LEN_08BIT, 0x05}, + {0x3275, CRL_REG_LEN_08BIT, 0xE7}, + {0x3281, CRL_REG_LEN_08BIT, 0xEA}, + {0x3282, CRL_REG_LEN_08BIT, 0x70}, + {0x3285, CRL_REG_LEN_08BIT, 0xFF}, + {0x328A, CRL_REG_LEN_08BIT, 0xF0}, + {0x328D, CRL_REG_LEN_08BIT, 0xB6}, + {0x328E, CRL_REG_LEN_08BIT, 0x40}, + {0x3290, CRL_REG_LEN_08BIT, 0x42}, + {0x3291, CRL_REG_LEN_08BIT, 0x51}, + {0x3292, CRL_REG_LEN_08BIT, 0x1E}, + {0x3294, CRL_REG_LEN_08BIT, 0xC4}, + {0x3295, CRL_REG_LEN_08BIT, 0x20}, + {0x3297, CRL_REG_LEN_08BIT, 0x50}, + {0x3298, CRL_REG_LEN_08BIT, 0x31}, + {0x3299, CRL_REG_LEN_08BIT, 0x1F}, + {0x329B, CRL_REG_LEN_08BIT, 0xC0}, + {0x329C, CRL_REG_LEN_08BIT, 0x60}, + {0x329E, CRL_REG_LEN_08BIT, 0x4C}, + {0x329F, CRL_REG_LEN_08BIT, 0x71}, + {0x32A0, CRL_REG_LEN_08BIT, 0x1F}, + {0x32A2, CRL_REG_LEN_08BIT, 0xB6}, + {0x32A3, CRL_REG_LEN_08BIT, 0xC0}, + {0x32A4, CRL_REG_LEN_08BIT, 0x0B}, + {0x32A9, CRL_REG_LEN_08BIT, 0x24}, + {0x32AA, CRL_REG_LEN_08BIT, 0x41}, + {0x32B0, CRL_REG_LEN_08BIT, 0x25}, + {0x32B1, CRL_REG_LEN_08BIT, 0x51}, + {0x32B7, CRL_REG_LEN_08BIT, 0x1C}, + {0x32B8, CRL_REG_LEN_08BIT, 0xC1}, + {0x32B9, CRL_REG_LEN_08BIT, 0x12}, + {0x32BE, CRL_REG_LEN_08BIT, 0x1D}, + {0x32BF, CRL_REG_LEN_08BIT, 0xD1}, + {0x32C0, CRL_REG_LEN_08BIT, 0x12}, + {0x32C2, CRL_REG_LEN_08BIT, 0xA8}, + {0x32C3, CRL_REG_LEN_08BIT, 0xC0}, + {0x32C4, CRL_REG_LEN_08BIT, 0x0A}, + {0x32C5, CRL_REG_LEN_08BIT, 0x1E}, + {0x32C6, CRL_REG_LEN_08BIT, 0x21}, + {0x32C9, CRL_REG_LEN_08BIT, 0xB0}, + {0x32CA, CRL_REG_LEN_08BIT, 0x40}, + {0x32CC, CRL_REG_LEN_08BIT, 0x26}, + {0x32CD, CRL_REG_LEN_08BIT, 0xA1}, + {0x32D0, CRL_REG_LEN_08BIT, 0xB6}, + {0x32D1, CRL_REG_LEN_08BIT, 0xC0}, + {0x32D2, CRL_REG_LEN_08BIT, 0x0B}, + {0x32D4, CRL_REG_LEN_08BIT, 0xE2}, + {0x32D5, CRL_REG_LEN_08BIT, 0x40}, + {0x32D8, CRL_REG_LEN_08BIT, 0x4E}, + {0x32D9, CRL_REG_LEN_08BIT, 0xA1}, + {0x32EC, CRL_REG_LEN_08BIT, 0xF0}, + /* 0x05h */ + {0x3303, CRL_REG_LEN_08BIT, 0x10}, /* repetation wdr */ + {0x3314, CRL_REG_LEN_08BIT, 0x08}, + {0x3316, CRL_REG_LEN_08BIT, 0x04}, + {0x3317, CRL_REG_LEN_08BIT, 0x04}, + {0x3318, CRL_REG_LEN_08BIT, 0x48}, /* PIC_SIZE = 1096 */ + {0x3319, CRL_REG_LEN_08BIT, 0x04}, + {0x334E, CRL_REG_LEN_08BIT, 0x3D}, /* INCL selection 27MHz */ + {0x334F, CRL_REG_LEN_08BIT, 0x01}, +}; + +static struct crl_register_write_rep imx185_1952_1208_27MHZ[] = { + /* 0x02h */ + {0x3005, CRL_REG_LEN_08BIT, 0x00}, /* ADBIT: 10/12 */ + {0x3007, CRL_REG_LEN_08BIT, 0x00}, /* WUXGA cropping */ + {0x3018, CRL_REG_LEN_08BIT, 0x28}, + {0x3019, CRL_REG_LEN_08BIT, 0x05}, + {0x301B, CRL_REG_LEN_08BIT, 0x53}, + {0x301C, CRL_REG_LEN_08BIT, 0x07}, + {0x301D, CRL_REG_LEN_08BIT, 0x08}, + {0x301E, CRL_REG_LEN_08BIT, 0x02}, + {0x3048, CRL_REG_LEN_08BIT, 0x33}, + {0x305C, CRL_REG_LEN_08BIT, 0x2c}, /* INCLKSEL default */ + {0x305E, CRL_REG_LEN_08BIT, 0x21}, + {0x3063, CRL_REG_LEN_08BIT, 0x54}, + /* Crop settings */ + {0x3038, CRL_REG_LEN_08BIT, 0x00}, /* WPV = 0 */ + {0x3039, CRL_REG_LEN_08BIT, 0x00}, + {0x303A, CRL_REG_LEN_08BIT, 0xC0}, /* WV = PIC_SIZE + 8 */ + {0x303B, CRL_REG_LEN_08BIT, 0x04}, + {0x303C, CRL_REG_LEN_08BIT, 0x00}, /* WPH = 0 */ + {0x303D, CRL_REG_LEN_08BIT, 0x00}, + {0x303E, CRL_REG_LEN_08BIT, 0x9C}, + {0x303F, CRL_REG_LEN_08BIT, 0x07}, + /* 0x03h */ + {0x311D, CRL_REG_LEN_08BIT, 0x0A}, + {0x3123, CRL_REG_LEN_08BIT, 0x0F}, + {0x3126, CRL_REG_LEN_08BIT, 0x00}, + {0x3147, CRL_REG_LEN_08BIT, 0x87}, + {0x31E0, CRL_REG_LEN_08BIT, 0x00}, + {0x31E1, CRL_REG_LEN_08BIT, 0x9E}, + {0x31E2, CRL_REG_LEN_08BIT, 0x01}, + {0x31E5, CRL_REG_LEN_08BIT, 0x05}, + {0x31E6, CRL_REG_LEN_08BIT, 0x05}, + {0x31E7, CRL_REG_LEN_08BIT, 0x3A}, + {0x31E8, CRL_REG_LEN_08BIT, 0x3A}, + /* 0x04h */ + {0x3203, CRL_REG_LEN_08BIT, 0xC8}, + {0x3207, CRL_REG_LEN_08BIT, 0x54}, + {0x3213, CRL_REG_LEN_08BIT, 0x16}, + {0x3215, CRL_REG_LEN_08BIT, 0xF6}, + {0x321A, CRL_REG_LEN_08BIT, 0x14}, + {0x321B, CRL_REG_LEN_08BIT, 0x51}, + {0x3229, CRL_REG_LEN_08BIT, 0xE7}, + {0x322A, CRL_REG_LEN_08BIT, 0xF0}, + {0x322B, CRL_REG_LEN_08BIT, 0x10}, + {0x3231, CRL_REG_LEN_08BIT, 0xE7}, + {0x3232, CRL_REG_LEN_08BIT, 0xF0}, + {0x3233, CRL_REG_LEN_08BIT, 0x10}, + {0x323C, CRL_REG_LEN_08BIT, 0xE8}, + {0x323D, CRL_REG_LEN_08BIT, 0x70}, + {0x3243, CRL_REG_LEN_08BIT, 0x08}, + {0x3244, CRL_REG_LEN_08BIT, 0xE1}, + {0x3245, CRL_REG_LEN_08BIT, 0x10}, + {0x3247, CRL_REG_LEN_08BIT, 0xE7}, + {0x3248, CRL_REG_LEN_08BIT, 0x60}, + {0x3249, CRL_REG_LEN_08BIT, 0x1E}, + {0x324B, CRL_REG_LEN_08BIT, 0x00}, + {0x324C, CRL_REG_LEN_08BIT, 0x41}, + {0x3250, CRL_REG_LEN_08BIT, 0x30}, + {0x3251, CRL_REG_LEN_08BIT, 0x0A}, + {0x3252, CRL_REG_LEN_08BIT, 0xFF}, + {0x3253, CRL_REG_LEN_08BIT, 0xFF}, + {0x3254, CRL_REG_LEN_08BIT, 0xFF}, + {0x3255, CRL_REG_LEN_08BIT, 0x02}, + {0x3257, CRL_REG_LEN_08BIT, 0xF0}, + {0x325A, CRL_REG_LEN_08BIT, 0xA6}, + {0x325D, CRL_REG_LEN_08BIT, 0x14}, + {0x325E, CRL_REG_LEN_08BIT, 0x51}, + {0x3261, CRL_REG_LEN_08BIT, 0x61}, + {0x3266, CRL_REG_LEN_08BIT, 0x30}, + {0x3267, CRL_REG_LEN_08BIT, 0x05}, + {0x3275, CRL_REG_LEN_08BIT, 0xE7}, + {0x3281, CRL_REG_LEN_08BIT, 0xEA}, + {0x3282, CRL_REG_LEN_08BIT, 0x70}, + {0x3285, CRL_REG_LEN_08BIT, 0xFF}, + {0x328A, CRL_REG_LEN_08BIT, 0xF0}, + {0x328D, CRL_REG_LEN_08BIT, 0xB6}, + {0x328E, CRL_REG_LEN_08BIT, 0x40}, + {0x3290, CRL_REG_LEN_08BIT, 0x42}, + {0x3291, CRL_REG_LEN_08BIT, 0x51}, + {0x3292, CRL_REG_LEN_08BIT, 0x1E}, + {0x3294, CRL_REG_LEN_08BIT, 0xC4}, + {0x3295, CRL_REG_LEN_08BIT, 0x20}, + {0x3297, CRL_REG_LEN_08BIT, 0x50}, + {0x3298, CRL_REG_LEN_08BIT, 0x31}, + {0x3299, CRL_REG_LEN_08BIT, 0x1F}, + {0x329B, CRL_REG_LEN_08BIT, 0xC0}, + {0x329C, CRL_REG_LEN_08BIT, 0x60}, + {0x329E, CRL_REG_LEN_08BIT, 0x4C}, + {0x329F, CRL_REG_LEN_08BIT, 0x71}, + {0x32A0, CRL_REG_LEN_08BIT, 0x1F}, + {0x32A2, CRL_REG_LEN_08BIT, 0xB6}, + {0x32A3, CRL_REG_LEN_08BIT, 0xC0}, + {0x32A4, CRL_REG_LEN_08BIT, 0x0B}, + {0x32A9, CRL_REG_LEN_08BIT, 0x24}, + {0x32AA, CRL_REG_LEN_08BIT, 0x41}, + {0x32B0, CRL_REG_LEN_08BIT, 0x25}, + {0x32B1, CRL_REG_LEN_08BIT, 0x51}, + {0x32B7, CRL_REG_LEN_08BIT, 0x1C}, + {0x32B8, CRL_REG_LEN_08BIT, 0xC1}, + {0x32B9, CRL_REG_LEN_08BIT, 0x12}, + {0x32BE, CRL_REG_LEN_08BIT, 0x1D}, + {0x32BF, CRL_REG_LEN_08BIT, 0xD1}, + {0x32C0, CRL_REG_LEN_08BIT, 0x12}, + {0x32C2, CRL_REG_LEN_08BIT, 0xA8}, + {0x32C3, CRL_REG_LEN_08BIT, 0xC0}, + {0x32C4, CRL_REG_LEN_08BIT, 0x0A}, + {0x32C5, CRL_REG_LEN_08BIT, 0x1E}, + {0x32C6, CRL_REG_LEN_08BIT, 0x21}, + {0x32C9, CRL_REG_LEN_08BIT, 0xB0}, + {0x32CA, CRL_REG_LEN_08BIT, 0x40}, + {0x32CC, CRL_REG_LEN_08BIT, 0x26}, + {0x32CD, CRL_REG_LEN_08BIT, 0xA1}, + {0x32D0, CRL_REG_LEN_08BIT, 0xB6}, + {0x32D1, CRL_REG_LEN_08BIT, 0xC0}, + {0x32D2, CRL_REG_LEN_08BIT, 0x0B}, + {0x32D4, CRL_REG_LEN_08BIT, 0xE2}, + {0x32D5, CRL_REG_LEN_08BIT, 0x40}, + {0x32D8, CRL_REG_LEN_08BIT, 0x4E}, + {0x32D9, CRL_REG_LEN_08BIT, 0xA1}, + {0x32EC, CRL_REG_LEN_08BIT, 0xF0}, + /* 0x05h */ + {0x3316, CRL_REG_LEN_08BIT, 0x04}, + {0x3317, CRL_REG_LEN_08BIT, 0x04}, + {0x3318, CRL_REG_LEN_08BIT, 0xB8}, /* PIC_SIZE = 1208 */ + {0x3319, CRL_REG_LEN_08BIT, 0x04}, + {0x334E, CRL_REG_LEN_08BIT, 0x3D}, /* INCL selection 27MHz */ + {0x334F, CRL_REG_LEN_08BIT, 0x01}, +}; + +static struct crl_register_write_rep imx185_1952_1208_BUILD_IN_WDR_27MHZ[] = { + /* 0x02h */ + {0x3005, CRL_REG_LEN_08BIT, 0x00}, /* ADBIT: 10/12 */ + {0x3007, CRL_REG_LEN_08BIT, 0x00}, /* WUXGA cropping */ + {0x300C, CRL_REG_LEN_08BIT, 0x02}, + {0x300F, CRL_REG_LEN_08BIT, 0x05}, + {0x3010, CRL_REG_LEN_08BIT, 0x38}, + {0x3012, CRL_REG_LEN_08BIT, 0x0F}, + {0x3018, CRL_REG_LEN_08BIT, 0x98}, + {0x3019, CRL_REG_LEN_08BIT, 0x08}, + {0x301B, CRL_REG_LEN_08BIT, 0x65}, + {0x301C, CRL_REG_LEN_08BIT, 0x04}, + {0x301D, CRL_REG_LEN_08BIT, 0x08}, + {0x301E, CRL_REG_LEN_08BIT, 0x02}, + {0x3048, CRL_REG_LEN_08BIT, 0x33}, + {0x3056, CRL_REG_LEN_08BIT, 0xC9}, + {0x3057, CRL_REG_LEN_08BIT, 0x33}, + {0x305C, CRL_REG_LEN_08BIT, 0x2c}, /* INCLKSEL default */ + {0x305E, CRL_REG_LEN_08BIT, 0x21}, + {0x3063, CRL_REG_LEN_08BIT, 0x54}, + {0x30E1, CRL_REG_LEN_08BIT, 0xE1}, + /* Crop settings */ + {0x3038, CRL_REG_LEN_08BIT, 0x00}, /* WPV = 0 */ + {0x3039, CRL_REG_LEN_08BIT, 0x00}, + {0x303A, CRL_REG_LEN_08BIT, 0xC9}, + {0x303B, CRL_REG_LEN_08BIT, 0x04}, + {0x303C, CRL_REG_LEN_08BIT, 0x00}, + {0x303D, CRL_REG_LEN_08BIT, 0x00}, + {0x303E, CRL_REG_LEN_08BIT, 0x9C}, + {0x303F, CRL_REG_LEN_08BIT, 0x07}, + /* 0x03h */ + {0x311D, CRL_REG_LEN_08BIT, 0x0A}, + {0x3123, CRL_REG_LEN_08BIT, 0x0F}, + {0x3126, CRL_REG_LEN_08BIT, 0xDF}, + {0x3147, CRL_REG_LEN_08BIT, 0x87}, + {0x31E0, CRL_REG_LEN_08BIT, 0x01}, + {0x31E1, CRL_REG_LEN_08BIT, 0x9E}, + {0x31E2, CRL_REG_LEN_08BIT, 0x01}, + {0x31E5, CRL_REG_LEN_08BIT, 0x05}, + {0x31E6, CRL_REG_LEN_08BIT, 0x05}, + {0x31E7, CRL_REG_LEN_08BIT, 0x3A}, + {0x31E8, CRL_REG_LEN_08BIT, 0x3A}, + /* 0x04h */ + {0x3203, CRL_REG_LEN_08BIT, 0xC8}, + {0x3207, CRL_REG_LEN_08BIT, 0x54}, + {0x3213, CRL_REG_LEN_08BIT, 0x16}, + {0x3215, CRL_REG_LEN_08BIT, 0xF6}, + {0x321A, CRL_REG_LEN_08BIT, 0x14}, + {0x321B, CRL_REG_LEN_08BIT, 0x51}, + {0x3229, CRL_REG_LEN_08BIT, 0xE7}, + {0x322A, CRL_REG_LEN_08BIT, 0xF0}, + {0x322B, CRL_REG_LEN_08BIT, 0x10}, + {0x3231, CRL_REG_LEN_08BIT, 0xE7}, + {0x3232, CRL_REG_LEN_08BIT, 0xF0}, + {0x3233, CRL_REG_LEN_08BIT, 0x10}, + {0x323C, CRL_REG_LEN_08BIT, 0xE8}, + {0x323D, CRL_REG_LEN_08BIT, 0x70}, + {0x3243, CRL_REG_LEN_08BIT, 0x08}, + {0x3244, CRL_REG_LEN_08BIT, 0xE1}, + {0x3245, CRL_REG_LEN_08BIT, 0x10}, + {0x3247, CRL_REG_LEN_08BIT, 0xE7}, + {0x3248, CRL_REG_LEN_08BIT, 0x60}, + {0x3249, CRL_REG_LEN_08BIT, 0x1E}, + {0x324B, CRL_REG_LEN_08BIT, 0x00}, + {0x324C, CRL_REG_LEN_08BIT, 0x41}, + {0x3250, CRL_REG_LEN_08BIT, 0x30}, + {0x3251, CRL_REG_LEN_08BIT, 0x0A}, + {0x3252, CRL_REG_LEN_08BIT, 0xFF}, + {0x3253, CRL_REG_LEN_08BIT, 0xFF}, + {0x3254, CRL_REG_LEN_08BIT, 0xFF}, + {0x3255, CRL_REG_LEN_08BIT, 0x02}, + {0x3257, CRL_REG_LEN_08BIT, 0xF0}, + {0x325A, CRL_REG_LEN_08BIT, 0xA6}, + {0x325D, CRL_REG_LEN_08BIT, 0x14}, + {0x325E, CRL_REG_LEN_08BIT, 0x51}, + {0x3261, CRL_REG_LEN_08BIT, 0x61}, + {0x3266, CRL_REG_LEN_08BIT, 0x30}, + {0x3267, CRL_REG_LEN_08BIT, 0x05}, + {0x3275, CRL_REG_LEN_08BIT, 0xE7}, + {0x3281, CRL_REG_LEN_08BIT, 0xEA}, + {0x3282, CRL_REG_LEN_08BIT, 0x70}, + {0x3285, CRL_REG_LEN_08BIT, 0xFF}, + {0x328A, CRL_REG_LEN_08BIT, 0xF0}, + {0x328D, CRL_REG_LEN_08BIT, 0xB6}, + {0x328E, CRL_REG_LEN_08BIT, 0x40}, + {0x3290, CRL_REG_LEN_08BIT, 0x42}, + {0x3291, CRL_REG_LEN_08BIT, 0x51}, + {0x3292, CRL_REG_LEN_08BIT, 0x1E}, + {0x3294, CRL_REG_LEN_08BIT, 0xC4}, + {0x3295, CRL_REG_LEN_08BIT, 0x20}, + {0x3297, CRL_REG_LEN_08BIT, 0x50}, + {0x3298, CRL_REG_LEN_08BIT, 0x31}, + {0x3299, CRL_REG_LEN_08BIT, 0x1F}, + {0x329B, CRL_REG_LEN_08BIT, 0xC0}, + {0x329C, CRL_REG_LEN_08BIT, 0x60}, + {0x329E, CRL_REG_LEN_08BIT, 0x4C}, + {0x329F, CRL_REG_LEN_08BIT, 0x71}, + {0x32A0, CRL_REG_LEN_08BIT, 0x1F}, + {0x32A2, CRL_REG_LEN_08BIT, 0xB6}, + {0x32A3, CRL_REG_LEN_08BIT, 0xC0}, + {0x32A4, CRL_REG_LEN_08BIT, 0x0B}, + {0x32A9, CRL_REG_LEN_08BIT, 0x24}, + {0x32AA, CRL_REG_LEN_08BIT, 0x41}, + {0x32B0, CRL_REG_LEN_08BIT, 0x25}, + {0x32B1, CRL_REG_LEN_08BIT, 0x51}, + {0x32B7, CRL_REG_LEN_08BIT, 0x1C}, + {0x32B8, CRL_REG_LEN_08BIT, 0xC1}, + {0x32B9, CRL_REG_LEN_08BIT, 0x12}, + {0x32BE, CRL_REG_LEN_08BIT, 0x1D}, + {0x32BF, CRL_REG_LEN_08BIT, 0xD1}, + {0x32C0, CRL_REG_LEN_08BIT, 0x12}, + {0x32C2, CRL_REG_LEN_08BIT, 0xA8}, + {0x32C3, CRL_REG_LEN_08BIT, 0xC0}, + {0x32C4, CRL_REG_LEN_08BIT, 0x0A}, + {0x32C5, CRL_REG_LEN_08BIT, 0x1E}, + {0x32C6, CRL_REG_LEN_08BIT, 0x21}, + {0x32C9, CRL_REG_LEN_08BIT, 0xB0}, + {0x32CA, CRL_REG_LEN_08BIT, 0x40}, + {0x32CC, CRL_REG_LEN_08BIT, 0x26}, + {0x32CD, CRL_REG_LEN_08BIT, 0xA1}, + {0x32D0, CRL_REG_LEN_08BIT, 0xB6}, + {0x32D1, CRL_REG_LEN_08BIT, 0xC0}, + {0x32D2, CRL_REG_LEN_08BIT, 0x0B}, + {0x32D4, CRL_REG_LEN_08BIT, 0xE2}, + {0x32D5, CRL_REG_LEN_08BIT, 0x40}, + {0x32D8, CRL_REG_LEN_08BIT, 0x4E}, + {0x32D9, CRL_REG_LEN_08BIT, 0xA1}, + {0x32EC, CRL_REG_LEN_08BIT, 0xF0}, + /* 0x05h */ + {0x3303, CRL_REG_LEN_08BIT, 0x00}, + {0x3314, CRL_REG_LEN_08BIT, 0x08}, + {0x3316, CRL_REG_LEN_08BIT, 0x04}, + {0x3317, CRL_REG_LEN_08BIT, 0x04}, + {0x3318, CRL_REG_LEN_08BIT, 0xB8}, /* PIC_SIZE = 1208 */ + {0x3319, CRL_REG_LEN_08BIT, 0x04}, + {0x334E, CRL_REG_LEN_08BIT, 0x3D}, /* INCL selection 27MHz */ + {0x334F, CRL_REG_LEN_08BIT, 0x01}, +}; + +static struct crl_register_write_rep imx185_streamon_regs[] = { + {IMX185_REG_STANDBY, CRL_REG_LEN_08BIT, 0x00}, + {0x00, CRL_REG_LEN_DELAY, 30, 0x00}, /* Delay 30ms */ + {IMX185_REG_XMSTA, CRL_REG_LEN_08BIT, 0x00}, + {0x00, CRL_REG_LEN_DELAY, 30, 0x00}, /* Delay 30ms */ +}; + +static struct crl_register_write_rep imx185_streamoff_regs[] = { + {IMX185_REG_STANDBY, CRL_REG_LEN_08BIT, 0x01}, + {0x00, CRL_REG_LEN_DELAY, 30, 0x00}, /* Delay 30ms */ + {IMX185_REG_XMSTA, CRL_REG_LEN_08BIT, 0x01}, + {0x00, CRL_REG_LEN_DELAY, 30, 0x00}, /* Delay 30ms */ +}; + +static struct crl_arithmetic_ops imx185_hflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + } +}; + +/* shs1 = fll - exposure -1 */ +static struct crl_arithmetic_ops imx185_shs1_lsb_ops[] = { + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, + .operand.entity_val = V4L2_CID_FRAME_LENGTH_LINES, + }, + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + } +}; + +static struct crl_arithmetic_ops imx185_shs1_msb_ops[] = { + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, + .operand.entity_val = V4L2_CID_FRAME_LENGTH_LINES, + }, + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 8, + } +}; + +static struct crl_arithmetic_ops imx185_shs1_hsb_ops[] = { + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, + .operand.entity_val = V4L2_CID_FRAME_LENGTH_LINES, + }, + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 16, + } +}; + +/* shs2 = fll - exposure * 16 -1 */ +static struct crl_arithmetic_ops imx185_shs2_lsb_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 4, + }, + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, + .operand.entity_val = V4L2_CID_FRAME_LENGTH_LINES, + }, + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + } +}; + +static struct crl_arithmetic_ops imx185_shs2_msb_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 4, + }, + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, + .operand.entity_val = V4L2_CID_FRAME_LENGTH_LINES, + }, + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 8, + } +}; + +static struct crl_arithmetic_ops imx185_shs2_hsb_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 4, + }, + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, + .operand.entity_val = V4L2_CID_FRAME_LENGTH_LINES, + }, + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 16, + } +}; + +static struct crl_arithmetic_ops imx185_fll_msb_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 8, + } +}; + +static struct crl_arithmetic_ops imx185_llp_msb_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 8, + } +}; + +static struct crl_arithmetic_ops imx185_fll_hsb_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 16, + } +}; + +static struct crl_dynamic_register_access imx185_h_flip_regs[] = { + { + .address = 0x3007, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(imx185_hflip_ops), + .ops = imx185_hflip_ops, + .mask = 0x2, + } +}; + +static struct crl_dynamic_register_access imx185_v_flip_regs[] = { + { + .address = 0x3007, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = 0, + .ops = 0, + .mask = 0x1, + } +}; + +static struct crl_dynamic_register_access imx185_ana_gain_global_regs[] = { + { + .address = 0x3014, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + } +}; + +static struct crl_dynamic_register_access imx185_shs_regs[] = { + /* + * Use 8bits access since 24bits or 32bits access will fail + * TODO: root cause the 24bits and 32bits access issues + */ + { + .address = 0x3020, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx185_shs1_lsb_ops), + .ops = imx185_shs1_lsb_ops, + .mask = 0xff, + }, + { + .address = 0x3021, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx185_shs1_msb_ops), + .ops = imx185_shs1_msb_ops, + .mask = 0xff, + }, + { + .address = 0x3022, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx185_shs1_hsb_ops), + .ops = imx185_shs1_hsb_ops, + .mask = 0x1, + }, + { + .address = 0x3023, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx185_shs2_lsb_ops), + .ops = imx185_shs2_lsb_ops, + .mask = 0xff, + }, + { + .address = 0x3024, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx185_shs2_msb_ops), + .ops = imx185_shs2_msb_ops, + .mask = 0xff, + }, + { + .address = 0x3025, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx185_shs2_hsb_ops), + .ops = imx185_shs2_hsb_ops, + .mask = 0x1, + } +}; + +static struct crl_dynamic_register_access imx185_fll_regs[] = { + /* + * Use 8bits access since 24bits or 32bits access will fail + * TODO: root cause the 24bits and 32bits access issues + */ + { + .address = 0x3018, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, + { + .address = 0x3019, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx185_fll_msb_ops), + .ops = imx185_fll_msb_ops, + .mask = 0xff, + }, + { + .address = 0x301a, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx185_fll_hsb_ops), + .ops = imx185_fll_hsb_ops, + .mask = 0x1, + }, +}; + +static struct crl_dynamic_register_access imx185_llp_regs[] = { + { + .address = 0x301b, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, + { + .address = 0x301c, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx185_llp_msb_ops), + .ops = imx185_llp_msb_ops, + .mask = 0xff, + }, +}; + +/* ctrl-val == 1 ? 1 * 0x02 : 0 * 0x02 -> 2 and 0 */ +static struct crl_arithmetic_ops imx185_wdr_switch_r300c_ops[] = { + { + .op = CRL_MULTIPLY, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 0x02, + } +}; + +/* ctrl-val == 1 ? (1 * 0x04 + 0x1) : (0 * 0x04 + 0x1) -> 0x05 and 0x01 */ +static struct crl_arithmetic_ops imx185_wdr_switch_r300f_ops[] = { + { + .op = CRL_MULTIPLY, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 0x04, + }, + { + .op = CRL_ADD, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 0x01, + } +}; + +/* ctrl-val == 1 ? (0x39 - 1 * 0x01) : (0x39 - 0 * 0x01) -> 0x38 and 0x39 */ +static struct crl_arithmetic_ops imx185_wdr_switch_r3010_ops[] = { + { + .op = CRL_MULTIPLY, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 0x01, + }, + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 0x39, + } +}; + +/* ctrl-val == 1 ? (0x50 - 1 * 0x41) : (0x50 - 0 * 0x41) -> 0x0f and 0x50 */ +static struct crl_arithmetic_ops imx185_wdr_switch_r3012_ops[] = { + { + .op = CRL_MULTIPLY, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 0x41, + }, + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 0x50, + } +}; + +static struct crl_dynamic_register_access imx185_wdr_switch_regs[] = { + { 0x300c, CRL_REG_LEN_08BIT, 0xff, + ARRAY_SIZE(imx185_wdr_switch_r300c_ops), + imx185_wdr_switch_r300c_ops, 0 }, + { 0x300f, CRL_REG_LEN_08BIT, 0xff, + ARRAY_SIZE(imx185_wdr_switch_r300f_ops), + imx185_wdr_switch_r300f_ops, 0 }, + { 0x3010, CRL_REG_LEN_08BIT, 0xff, + ARRAY_SIZE(imx185_wdr_switch_r3010_ops), + imx185_wdr_switch_r3010_ops, 0 }, + { 0x3012, CRL_REG_LEN_08BIT, 0xff, + ARRAY_SIZE(imx185_wdr_switch_r3012_ops), + imx185_wdr_switch_r3012_ops, 0 }, +}; + +/* Needed for acpi support for runtime detection */ +static struct crl_sensor_detect_config imx185_sensor_detect_regset[] = { + { + .reg = { 0x3385, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, + { + .reg = { 0x3384, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + } +}; + +static struct crl_pll_configuration imx185_pll_configurations[] = { + { + .input_clk = 27000000, + .op_sys_clk = 56250000, + .bitsperpixel = 10, + .pixel_rate_csi = 45000000, + .pixel_rate_pa = 45000000, /* pixel_rate = MIPICLK*2 *4/10 */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx185_pll_111mbps), + .pll_regs = imx185_pll_111mbps, + }, + { + .input_clk = 27000000, + .op_sys_clk = 112500000, + .bitsperpixel = 10, + .pixel_rate_csi = 90000000, + .pixel_rate_pa = 90000000, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx185_pll_222mbps), + .pll_regs = imx185_pll_222mbps, + }, + { + .input_clk = 27000000, + .op_sys_clk = 112500000, + .bitsperpixel = 12, + .pixel_rate_csi = 75000000, + .pixel_rate_pa = 75000000, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx185_pll_222mbps), + .pll_regs = imx185_pll_222mbps, + }, + { + .input_clk = 27000000, + .op_sys_clk = 225000000, + .bitsperpixel = 12, + .pixel_rate_csi = 150000000, + .pixel_rate_pa = 150000000, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx185_pll_445mbps), + .pll_regs = imx185_pll_445mbps, + } +}; + +static struct crl_subdev_rect_rep imx185_1952_1208_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1952, + .in_rect.height = 1208, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1952, + .out_rect.height = 1208, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1952, + .in_rect.height = 1208, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1952, + .out_rect.height = 1208, + } +}; + +static struct crl_subdev_rect_rep imx185_1952_1096_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1952, + .in_rect.height = 1208, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1952, + .out_rect.height = 1208, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1952, + .in_rect.height = 1208, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1952, + .out_rect.height = 1096, + } +}; + +static struct crl_subdev_rect_rep imx185_1312_728_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1952, + .in_rect.height = 1208, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1952, + .out_rect.height = 1208, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1952, + .in_rect.height = 1208, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1312, + .out_rect.height = 728, + } +}; + +static struct crl_mode_rep imx185_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(imx185_1952_1208_rects), + .sd_rects = imx185_1952_1208_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1952, + .height = 1208, + .min_llp = 2250, + .min_fll = 1333, + .comp_items = 1, + .ctrl_data = &ctrl_data_modes[0], + .mode_regs_items = ARRAY_SIZE(imx185_1952_1208_27MHZ), + .mode_regs = imx185_1952_1208_27MHZ, + }, + { + .sd_rects_items = ARRAY_SIZE(imx185_1952_1208_rects), + .sd_rects = imx185_1952_1208_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1952, + .height = 1208, + .min_llp = 2250, + .min_fll = 1333, + .comp_items = 1, + .ctrl_data = &ctrl_data_modes[1], + .mode_regs_items = + ARRAY_SIZE(imx185_1952_1208_BUILD_IN_WDR_27MHZ), + .mode_regs = imx185_1952_1208_BUILD_IN_WDR_27MHZ, + }, + { + .sd_rects_items = ARRAY_SIZE(imx185_1952_1096_rects), + .sd_rects = imx185_1952_1096_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1952, + .height = 1096, + .min_llp = 2200, + .min_fll = 1135, + .comp_items = 1, + .ctrl_data = &ctrl_data_modes[0], + .mode_regs_items = ARRAY_SIZE(imx185_1952_1096_27MHZ), + .mode_regs = imx185_1952_1096_27MHZ, + }, + { + .sd_rects_items = ARRAY_SIZE(imx185_1952_1096_rects), + .sd_rects = imx185_1952_1096_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1952, + .height = 1096, + .min_llp = 2200, + .min_fll = 1135, + .comp_items = 1, + .ctrl_data = &ctrl_data_modes[1], + .mode_regs_items = + ARRAY_SIZE(imx185_1952_1096_BUILD_IN_WDR_27MHZ), + .mode_regs = imx185_1952_1096_BUILD_IN_WDR_27MHZ, + }, + { + .sd_rects_items = ARRAY_SIZE(imx185_1312_728_rects), + .sd_rects = imx185_1312_728_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1312, + .height = 728, + .min_llp = 1300, + .min_fll = 787, + .comp_items = 1, + .ctrl_data = &ctrl_data_modes[0], + .mode_regs_items = ARRAY_SIZE(imx185_1312_728_27MHZ_CROPPING), + .mode_regs = imx185_1312_728_27MHZ_CROPPING, + } +}; + +static struct crl_sensor_subdev_config imx185_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "imx185 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "imx185 pixel array", + } +}; + +static struct crl_sensor_limits imx185_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1952, + .y_addr_max = 1208, + .min_frame_length_lines = 320, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 380, + .max_line_length_pixels = 32752, +}; + +static struct crl_flip_data imx185_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + } +}; + +static struct crl_csi_data_fmt imx185_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = ARRAY_SIZE(imx185_fmt_raw10), + .regs = imx185_fmt_raw10, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .bits_per_pixel = 10, + .regs_items = ARRAY_SIZE(imx185_fmt_raw10), + .regs = imx185_fmt_raw10, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 10, + .regs_items = ARRAY_SIZE(imx185_fmt_raw10), + .regs = imx185_fmt_raw10, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .bits_per_pixel = 10, + .regs_items = ARRAY_SIZE(imx185_fmt_raw10), + .regs = imx185_fmt_raw10, + }, + { + .code = MEDIA_BUS_FMT_SGRBG12_1X12, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 12, + .regs_items = ARRAY_SIZE(imx185_fmt_raw12), + .regs = imx185_fmt_raw12, + }, + { + .code = MEDIA_BUS_FMT_SRGGB12_1X12, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .bits_per_pixel = 12, + .regs_items = ARRAY_SIZE(imx185_fmt_raw12), + .regs = imx185_fmt_raw12, + }, + { + .code = MEDIA_BUS_FMT_SBGGR12_1X12, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 12, + .regs_items = ARRAY_SIZE(imx185_fmt_raw12), + .regs = imx185_fmt_raw12, + }, + { + .code = MEDIA_BUS_FMT_SGBRG12_1X12, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .bits_per_pixel = 12, + .regs_items = ARRAY_SIZE(imx185_fmt_raw12), + .regs = imx185_fmt_raw12, + } +}; + +static struct crl_v4l2_ctrl imx185_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx185_h_flip_regs), + .regs = imx185_h_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .name = "V4L2_CID_VFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx185_v_flip_regs), + .regs = imx185_v_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 160, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx185_ana_gain_global_regs), + .regs = imx185_ana_gain_global_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "V4L2_CID_EXPOSURE", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = IMX185_MAX_SHS1, + .data.std_data.step = 1, + .data.std_data.def = 0x47, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx185_shs_regs), + .regs = imx185_shs_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame length lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 720, + .data.std_data.max = IMX185_VMAX, + .data.std_data.step = 1, + .data.std_data.def = 0x465, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx185_fll_regs), + .regs = imx185_fll_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0x898, + .data.std_data.max = IMX185_HMAX, + .data.std_data.step = 1, + .data.std_data.def = 0x898, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx185_llp_regs), + .regs = imx185_llp_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_WDR_MODE, + .name = "V4L2_CID_WDR_MODE", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_MODE_SELECTION, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx185_wdr_switch_regs), + .regs = imx185_wdr_switch_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, +}; + +static struct crl_arithmetic_ops imx185_frame_desc_width_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .operand.entity_val = CRL_VAR_REF_OUTPUT_WIDTH, + }, +}; + +static struct crl_arithmetic_ops imx185_frame_desc_height_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, +}; + +static struct crl_frame_desc imx185_frame_desc[] = { + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(imx185_frame_desc_width_ops), + .ops = imx185_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(imx185_frame_desc_height_ops), + .ops = imx185_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 0, + .csi2_data_type.entity_val = 0x12, + }, +}; + +/* Power items, they are enabled in the order they are listed here */ +static struct crl_power_seq_entity imx185_power_items[] = { + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 27000000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + }, +}; + +struct crl_sensor_configuration imx185_crl_configuration = { + + .power_items = ARRAY_SIZE(imx185_power_items), + .power_entities = imx185_power_items, + + .powerup_regs_items = ARRAY_SIZE(imx185_powerup_standby), + .powerup_regs = imx185_powerup_standby, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + .id_reg_items = ARRAY_SIZE(imx185_sensor_detect_regset), + .id_regs = imx185_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(imx185_sensor_subdevs), + .subdevs = imx185_sensor_subdevs, + + .sensor_limits = &imx185_sensor_limits, + + .pll_config_items = ARRAY_SIZE(imx185_pll_configurations), + .pll_configs = imx185_pll_configurations, + + .modes_items = ARRAY_SIZE(imx185_modes), + .modes = imx185_modes, + + .streamon_regs_items = ARRAY_SIZE(imx185_streamon_regs), + .streamon_regs = imx185_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(imx185_streamoff_regs), + .streamoff_regs = imx185_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(imx185_v4l2_ctrls), + .v4l2_ctrl_bank = imx185_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(imx185_crl_csi_data_fmt), + .csi_fmts = imx185_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(imx185_flip_configurations), + .flip_data = imx185_flip_configurations, + + .frame_desc_entries = ARRAY_SIZE(imx185_frame_desc), + .frame_desc_type = CRL_V4L2_MBUS_FRAME_DESC_TYPE_CSI2, + .frame_desc = imx185_frame_desc, +}; + +#endif /* __CRLMODULE_IMX185_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_imx214_configuration.h b/drivers/media/i2c/crlmodule/crl_imx214_configuration.h new file mode 100644 index 0000000000000..f49deefade25f --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_imx214_configuration.h @@ -0,0 +1,1428 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2014 - 2018 Intel Corporation + * + * Author: Vinod Govindapillai + * + */ + +#ifndef __CRLMODULE_imx214_CONFIGURATION_H_ +#define __CRLMODULE_imx214_CONFIGURATION_H_ + +#include "crlmodule-nvm.h" +#include "crlmodule-sensor-ds.h" + +static struct crl_register_write_rep imx214_pll_1080mbps[] = { + { 0x0301, CRL_REG_LEN_08BIT, 0x05 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0306, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0307, CRL_REG_LEN_08BIT, 0x87 }, + { 0x0309, CRL_REG_LEN_08BIT, 0x0a }, + { 0x030B, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0310, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0820, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0821, CRL_REG_LEN_08BIT, 0xe0 }, + { 0x0822, CRL_REG_LEN_08BIT, 0x66 }, + { 0x0823, CRL_REG_LEN_08BIT, 0x66 }, + { 0x3A03, CRL_REG_LEN_08BIT, 0x09 }, + { 0x3A04, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3A05, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_register_write_rep imx214_pll_8_1080mbps[] = { + { 0x0301, CRL_REG_LEN_08BIT, 0x05 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0306, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0307, CRL_REG_LEN_08BIT, 0x87 }, + { 0x030B, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0309, CRL_REG_LEN_08BIT, 0x08 }, + { 0x0310, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0820, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0821, CRL_REG_LEN_08BIT, 0xe0 }, + { 0x0822, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0823, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A03, CRL_REG_LEN_08BIT, 0x09 }, + { 0x3A04, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3A05, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_register_write_rep imx214_pll_1200mbps[] = { + { 0x0301, CRL_REG_LEN_08BIT, 0x05 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0306, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0307, CRL_REG_LEN_08BIT, 0x96 }, + { 0x0309, CRL_REG_LEN_08BIT, 0x0a }, + { 0x030B, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0310, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0820, CRL_REG_LEN_08BIT, 0x12}, + { 0x0821, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x0822, CRL_REG_LEN_08BIT, 0x66 }, + { 0x0823, CRL_REG_LEN_08BIT, 0x66 }, + { 0x3A03, CRL_REG_LEN_08BIT, 0x09 }, + { 0x3A04, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3A05, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_register_write_rep imx214_pll_8_1200mbps[] = { + { 0x0301, CRL_REG_LEN_08BIT, 0x05 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0306, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0307, CRL_REG_LEN_08BIT, 0x96 }, + { 0x030B, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0309, CRL_REG_LEN_08BIT, 0x08 }, + { 0x0310, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0820, CRL_REG_LEN_08BIT, 0x12}, + { 0x0821, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x0822, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0823, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A03, CRL_REG_LEN_08BIT, 0x09 }, + { 0x3A04, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3A05, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_register_write_rep imx214_powerup_regset[] = { + { 0x0103, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0100, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0136, CRL_REG_LEN_08BIT, 0x18 }, /*24Mhz*/ + { 0x0137, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0101, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0105, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0138, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0106, CRL_REG_LEN_08BIT, 0x01 }, + { 0x4550, CRL_REG_LEN_08BIT, 0x02 }, + { 0x4601, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4642, CRL_REG_LEN_08BIT, 0x05 }, + { 0x6227, CRL_REG_LEN_08BIT, 0x11 }, + { 0x6276, CRL_REG_LEN_08BIT, 0x00 }, + { 0x900E, CRL_REG_LEN_08BIT, 0x06 }, + { 0xA802, CRL_REG_LEN_08BIT, 0x90 }, + { 0xA803, CRL_REG_LEN_08BIT, 0x11 }, + { 0xA804, CRL_REG_LEN_08BIT, 0x62 }, + { 0xA805, CRL_REG_LEN_08BIT, 0x77 }, + { 0xA806, CRL_REG_LEN_08BIT, 0xAE }, + { 0xA807, CRL_REG_LEN_08BIT, 0x34 }, + { 0xA808, CRL_REG_LEN_08BIT, 0xAE }, + { 0xA809, CRL_REG_LEN_08BIT, 0x35 }, + { 0xA80A, CRL_REG_LEN_08BIT, 0x62 }, + { 0xA80B, CRL_REG_LEN_08BIT, 0x83 }, + { 0xAE33, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4174, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4175, CRL_REG_LEN_08BIT, 0x11 }, + { 0x4612, CRL_REG_LEN_08BIT, 0x29 }, + { 0x461B, CRL_REG_LEN_08BIT, 0x12 }, + { 0x461F, CRL_REG_LEN_08BIT, 0x06 }, + { 0x4635, CRL_REG_LEN_08BIT, 0x07 }, + { 0x4637, CRL_REG_LEN_08BIT, 0x30 }, + { 0x463F, CRL_REG_LEN_08BIT, 0x18 }, + { 0x4641, CRL_REG_LEN_08BIT, 0x0D }, + { 0x465B, CRL_REG_LEN_08BIT, 0x12 }, + { 0x465F, CRL_REG_LEN_08BIT, 0x11 }, + { 0x4663, CRL_REG_LEN_08BIT, 0x11 }, + { 0x4667, CRL_REG_LEN_08BIT, 0x0F }, + { 0x466F, CRL_REG_LEN_08BIT, 0x0F }, + { 0x470E, CRL_REG_LEN_08BIT, 0x09 }, + { 0x4909, CRL_REG_LEN_08BIT, 0xAB }, + { 0x490B, CRL_REG_LEN_08BIT, 0x95 }, + { 0x4915, CRL_REG_LEN_08BIT, 0x5D }, + { 0x4A5F, CRL_REG_LEN_08BIT, 0xFF }, + { 0x4A61, CRL_REG_LEN_08BIT, 0xFF }, + { 0x4A73, CRL_REG_LEN_08BIT, 0x62 }, + { 0x4A85, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4A87, CRL_REG_LEN_08BIT, 0xFF }, + { 0x583C, CRL_REG_LEN_08BIT, 0x04 }, + { 0x620E, CRL_REG_LEN_08BIT, 0x04 }, + { 0x6EB2, CRL_REG_LEN_08BIT, 0x01 }, + { 0x6EB3, CRL_REG_LEN_08BIT, 0x00 }, + { 0x9300, CRL_REG_LEN_08BIT, 0x02 }, +}; + +/* + * 0, 4207, 0, 3119 + * 4208, 3120 + * 4208x3120 + */ +static struct crl_register_write_rep imx214_mode_13m[] = { + { 0x0114, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0220, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0221, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0222, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0349, CRL_REG_LEN_08BIT, 0x6F }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0C }, + { 0x034B, CRL_REG_LEN_08BIT, 0x2F }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3000, CRL_REG_LEN_08BIT, 0x35 }, + { 0x3054, CRL_REG_LEN_08BIT, 0x01 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x10 }, + { 0x034D, CRL_REG_LEN_08BIT, 0x70 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x0C }, + { 0x034F, CRL_REG_LEN_08BIT, 0x30 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x10 }, + { 0x040D, CRL_REG_LEN_08BIT, 0x70 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x0C }, + { 0x040F, CRL_REG_LEN_08BIT, 0x30 }, + { 0x3A03, CRL_REG_LEN_08BIT, 0x09 }, + { 0x3A04, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3A05, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0B06, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30A2, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30B4, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A02, CRL_REG_LEN_08BIT, 0xFF }, + { 0x3011, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3013, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0224, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0225, CRL_REG_LEN_08BIT, 0xF4 }, + { 0x020E, CRL_REG_LEN_08BIT, 0x01 }, + { 0x020F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0210, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0211, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0212, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0213, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0214, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0215, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0216, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0217, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4170, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4171, CRL_REG_LEN_08BIT, 0x10 }, + { 0x4176, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4177, CRL_REG_LEN_08BIT, 0x3C }, + { 0xAE20, CRL_REG_LEN_08BIT, 0x04 }, + { 0xAE21, CRL_REG_LEN_08BIT, 0x5C }, + { 0x0138, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_register_write_rep imx214_mode_2k[] = { + { 0x0114, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0220, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0221, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0222, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0349, CRL_REG_LEN_08BIT, 0x6F }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0C }, + { 0x034B, CRL_REG_LEN_08BIT, 0x2F }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x22 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3000, CRL_REG_LEN_08BIT, 0x35 }, + { 0x3054, CRL_REG_LEN_08BIT, 0x01 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x08 }, + { 0x034D, CRL_REG_LEN_08BIT, 0x38 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x06 }, + { 0x034F, CRL_REG_LEN_08BIT, 0x18 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x08 }, + { 0x040D, CRL_REG_LEN_08BIT, 0x38 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x06 }, + { 0x040F, CRL_REG_LEN_08BIT, 0x18 }, + { 0x0B06, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30A2, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30B4, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A02, CRL_REG_LEN_08BIT, 0xFF }, + { 0x3011, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3013, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0224, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0225, CRL_REG_LEN_08BIT, 0xF4 }, + { 0x020E, CRL_REG_LEN_08BIT, 0x01 }, + { 0x020F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0210, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0211, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0212, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0213, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0214, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0215, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0216, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0217, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4170, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4171, CRL_REG_LEN_08BIT, 0x10 }, + { 0x4176, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4177, CRL_REG_LEN_08BIT, 0x3C }, + { 0xAE20, CRL_REG_LEN_08BIT, 0x04 }, + { 0xAE21, CRL_REG_LEN_08BIT, 0x5C }, + { 0x0138, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_register_write_rep imx214_mode_4k2k[] = { + { 0x0114, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0220, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0221, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0222, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x78 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0349, CRL_REG_LEN_08BIT, 0x6F }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0A }, + { 0x034B, CRL_REG_LEN_08BIT, 0xB8 }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3000, CRL_REG_LEN_08BIT, 0x35 }, + { 0x3054, CRL_REG_LEN_08BIT, 0x01 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x10 }, + { 0x034D, CRL_REG_LEN_08BIT, 0x70 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x09 }, + { 0x034F, CRL_REG_LEN_08BIT, 0x40 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x10 }, + { 0x040D, CRL_REG_LEN_08BIT, 0x70 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x09 }, + { 0x040F, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3A03, CRL_REG_LEN_08BIT, 0x09 }, + { 0x3A04, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3A05, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0B06, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30A2, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30B4, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A02, CRL_REG_LEN_08BIT, 0xFF }, + { 0x3011, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3013, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0224, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0225, CRL_REG_LEN_08BIT, 0xF4 }, + { 0x020E, CRL_REG_LEN_08BIT, 0x01 }, + { 0x020F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0210, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0211, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0212, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0213, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0214, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0215, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0216, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0217, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4170, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4171, CRL_REG_LEN_08BIT, 0x10 }, + { 0x4176, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4177, CRL_REG_LEN_08BIT, 0x3C }, + { 0xAE20, CRL_REG_LEN_08BIT, 0x04 }, + { 0xAE21, CRL_REG_LEN_08BIT, 0x5C }, + { 0x0138, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_register_write_rep imx214_mode_1120[] = { + { 0x0114, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0220, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0221, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0222, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x38 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0347, CRL_REG_LEN_08BIT, 0xB8 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0349, CRL_REG_LEN_08BIT, 0x6F }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0A }, + { 0x034B, CRL_REG_LEN_08BIT, 0xB8 }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x22 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3000, CRL_REG_LEN_08BIT, 0x35 }, + { 0x3054, CRL_REG_LEN_08BIT, 0x01 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x08 }, + { 0x034D, CRL_REG_LEN_08BIT, 0x00 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x04 }, + { 0x034F, CRL_REG_LEN_08BIT, 0x60 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x08 }, + { 0x040D, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x04 }, + { 0x040F, CRL_REG_LEN_08BIT, 0x60 }, + { 0x3A03, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3A04, CRL_REG_LEN_08BIT, 0x68 }, + { 0x3A05, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0B06, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30A2, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30B4, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A02, CRL_REG_LEN_08BIT, 0xFF }, + { 0x3011, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3013, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0224, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0225, CRL_REG_LEN_08BIT, 0xF4 }, + { 0x020E, CRL_REG_LEN_08BIT, 0x01 }, + { 0x020F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0210, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0211, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0212, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0213, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0214, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0215, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0216, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0217, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4170, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4171, CRL_REG_LEN_08BIT, 0x10 }, + { 0x4176, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4177, CRL_REG_LEN_08BIT, 0x3C }, + { 0xAE20, CRL_REG_LEN_08BIT, 0x04 }, + { 0xAE21, CRL_REG_LEN_08BIT, 0x5C }, + { 0x0138, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_register_write_rep imx214_mode_1080[] = { + { 0x0114, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0220, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0221, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0222, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x78 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0349, CRL_REG_LEN_08BIT, 0x6F }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0A }, + { 0x034B, CRL_REG_LEN_08BIT, 0xB8 }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x22 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3000, CRL_REG_LEN_08BIT, 0x35 }, + { 0x3054, CRL_REG_LEN_08BIT, 0x01 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x07 }, + { 0x034D, CRL_REG_LEN_08BIT, 0x80 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x04 }, + { 0x034F, CRL_REG_LEN_08BIT, 0x38 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x07 }, + { 0x040D, CRL_REG_LEN_08BIT, 0x80 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x04 }, + { 0x040F, CRL_REG_LEN_08BIT, 0x38 }, + { 0x3A03, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3A04, CRL_REG_LEN_08BIT, 0x68 }, + { 0x3A05, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0B06, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30A2, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30B4, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A02, CRL_REG_LEN_08BIT, 0xFF }, + { 0x3011, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3013, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0224, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0225, CRL_REG_LEN_08BIT, 0xF4 }, + { 0x020E, CRL_REG_LEN_08BIT, 0x01 }, + { 0x020F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0210, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0211, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0212, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0213, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0214, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0215, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0216, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0217, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4170, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4171, CRL_REG_LEN_08BIT, 0x10 }, + { 0x4176, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4177, CRL_REG_LEN_08BIT, 0x3C }, + { 0xAE20, CRL_REG_LEN_08BIT, 0x04 }, + { 0xAE21, CRL_REG_LEN_08BIT, 0x5C }, + { 0x0138, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_register_write_rep imx214_mode_720[] = { + { 0x0114, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0220, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0221, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0222, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0344, CRL_REG_LEN_08BIT, 0x05 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x78 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x8E }, + { 0x0348, CRL_REG_LEN_08BIT, 0x0A }, + { 0x0349, CRL_REG_LEN_08BIT, 0xF7 }, + { 0x034A, CRL_REG_LEN_08BIT, 0x07 }, + { 0x034B, CRL_REG_LEN_08BIT, 0xA1 }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3000, CRL_REG_LEN_08BIT, 0x35 }, + { 0x3054, CRL_REG_LEN_08BIT, 0x01 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x05 }, + { 0x034D, CRL_REG_LEN_08BIT, 0x00 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x02 }, + { 0x034F, CRL_REG_LEN_08BIT, 0xD0 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x05 }, + { 0x040D, CRL_REG_LEN_08BIT, 0x80 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x03 }, + { 0x040F, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A03, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3A04, CRL_REG_LEN_08BIT, 0xF8 }, + { 0x3A05, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0B06, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30A2, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30B4, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A02, CRL_REG_LEN_08BIT, 0xFF }, + { 0x3011, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3013, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0224, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0225, CRL_REG_LEN_08BIT, 0xF4 }, + { 0x020E, CRL_REG_LEN_08BIT, 0x01 }, + { 0x020F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0210, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0211, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0212, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0213, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0214, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0215, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0216, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0217, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4170, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4171, CRL_REG_LEN_08BIT, 0x10 }, + { 0x4176, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4177, CRL_REG_LEN_08BIT, 0x3C }, + { 0xAE20, CRL_REG_LEN_08BIT, 0x04 }, + { 0xAE21, CRL_REG_LEN_08BIT, 0x5C }, + { 0x0138, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_register_write_rep imx214_streamon_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x01 } +}; + +static struct crl_register_write_rep imx214_streamoff_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x00 } +}; + +static struct crl_register_write_rep imx214_data_fmt_width10[] = { + { 0x0112, CRL_REG_LEN_16BIT, 0x0a0a } +}; + +static struct crl_register_write_rep imx214_data_fmt_width8[] = { + { 0x0112, CRL_REG_LEN_16BIT, 0x0808 } +}; + +static struct crl_arithmetic_ops imx214_vflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_dynamic_register_access imx214_h_flip_regs[] = { + { + .address = 0x0101, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = 0, + .ops = 0, + .mask = 0x1, + }, +}; + +static struct crl_dynamic_register_access imx214_v_flip_regs[] = { + { + .address = 0x0101, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(imx214_vflip_ops), + .ops = imx214_vflip_ops, + .mask = 0x2, + }, +}; + +struct crl_register_write_rep imx214_poweroff_regset[] = { + { 0x0103, CRL_REG_LEN_08BIT, 0x01 }, +}; + + +static struct crl_dynamic_register_access imx214_ana_gain_global_regs[] = { + { + .address = 0x0204, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access imx214_exposure_regs[] = { + { + .address = 0x0202, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + } +}; + +static struct crl_dynamic_register_access imx214_vblank_regs[] = { + { + .address = 0x0340, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access imx214_hblank_regs[] = { + { + .address = 0x0342, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access imx214_test_pattern_regs[] = { + { + .address = 0x0600, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff + }, +}; + +static struct crl_sensor_detect_config imx214_sensor_detect_regset[] = { + { + .reg = { 0x0019, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 5, + }, + { + .reg = { 0x0016, CRL_REG_LEN_16BIT, 0x0000ffff }, + .width = 7, + }, +}; + +const s64 imx214_op_sys_clock[] = { 504000000, 504000000, 600000000, + 600000000}; + +static struct crl_pll_configuration imx214_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 504000000, + .bitsperpixel = 8, + .pixel_rate_csi = 432000000, + .pixel_rate_pa = 432000000, + .comp_items = 0, + .ctrl_data = 0, + .csi_lanes = 4, + .pll_regs_items = ARRAY_SIZE(imx214_pll_8_1080mbps), + .pll_regs = imx214_pll_8_1080mbps, + }, + { + .input_clk = 24000000, + .op_sys_clk = 504000000, + .bitsperpixel = 10, + .pixel_rate_csi = 432000000, + .pixel_rate_pa = 432000000, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx214_pll_1080mbps), + .pll_regs = imx214_pll_1080mbps, + }, + { + .input_clk = 24000000, + .op_sys_clk = 600000000, + .bitsperpixel = 8, + .pixel_rate_csi = 480000000, + .pixel_rate_pa = 480000000, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx214_pll_8_1200mbps), + .pll_regs = imx214_pll_8_1200mbps, + }, + { + .input_clk = 24000000, + .op_sys_clk = 600000000, + .bitsperpixel = 10, + .pixel_rate_csi = 480000000, + .pixel_rate_pa = 480000000, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx214_pll_1200mbps), + .pll_regs = imx214_pll_1200mbps, + }, + +}; + +/* + * 0,5343,448,3567 + * 5344, 3120 + * Dig Crop: (568,0)->4208x3120 + * Scale_m 16 + * 4208x3120 + */ + +static struct crl_subdev_rect_rep imx214_13m_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4208, + .out_rect.height = 3120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4208, + .out_rect.height = 3120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4208, + .out_rect.height = 3120, + }, +}; + +static struct crl_subdev_rect_rep imx214_4k2k_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 376, + .out_rect.width = 4208, + .out_rect.height = 2368, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 2368, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4208, + .out_rect.height = 2368, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 2368, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4208, + .out_rect.height = 2368, + }, +}; + +static struct crl_subdev_rect_rep imx214_2k_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4208, + .out_rect.height = 3120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 2104, + .out_rect.height = 1560, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 2104, + .in_rect.height = 1560, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 2104, + .out_rect.height = 1560, + }, +}; + +static struct crl_subdev_rect_rep imx214_1120_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 3120, + .out_rect.left = 56, + .out_rect.top = 440, + .out_rect.width = 4096, + .out_rect.height = 2240, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4096, + .in_rect.height = 2240, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 2048, + .out_rect.height = 1120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 2048, + .in_rect.height = 1120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 2048, + .out_rect.height = 1120, + }, +}; + +static struct crl_subdev_rect_rep imx214_1080_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 376, + .out_rect.width = 4208, + .out_rect.height = 2368, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 2368, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 2104, + .out_rect.height = 1184, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 2104, + .in_rect.height = 1184, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, +}; + +static struct crl_subdev_rect_rep imx214_720_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 3120, + .out_rect.left = 1400, + .out_rect.top = 1166, + .out_rect.width = 1408, + .out_rect.height = 788, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1408, + .in_rect.height = 788, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1408, + .out_rect.height = 788, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1408, + .in_rect.height = 788, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, +}; + +static struct crl_mode_rep imx214_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(imx214_13m_rects), + .sd_rects = imx214_13m_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 4208, + .height = 3120, + .min_llp = 5008, + .min_fll = 3180, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx214_mode_13m), + .mode_regs = imx214_mode_13m, + }, + { + .sd_rects_items = ARRAY_SIZE(imx214_4k2k_rects), + .sd_rects = imx214_4k2k_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 4208, + .height = 2368, + .min_llp = 5008, + .min_fll = 2408, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx214_mode_4k2k), + .mode_regs = imx214_mode_4k2k, + }, + { + .sd_rects_items = ARRAY_SIZE(imx214_2k_rects), + .sd_rects = imx214_2k_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 1, + .width = 2104, + .height = 1560, + .min_llp = 5008, + .min_fll = 1700, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx214_mode_2k), + .mode_regs = imx214_mode_2k, + }, + { + .sd_rects_items = ARRAY_SIZE(imx214_1120_rects), + .sd_rects = imx214_1120_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 1, + .width = 2048, + .height = 1120, + .min_llp = 5008, + .min_fll = 1600, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx214_mode_1120), + .mode_regs = imx214_mode_1120, + }, + { + .sd_rects_items = ARRAY_SIZE(imx214_1080_rects), + .sd_rects = imx214_1080_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 1, + .width = 1920, + .height = 1080, + .min_llp = 5008, + .min_fll = 1200, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx214_mode_1080), + .mode_regs = imx214_mode_1080, + }, + { + .sd_rects_items = ARRAY_SIZE(imx214_720_rects), + .sd_rects = imx214_720_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 720, + .min_llp = 5008, + .min_fll = 828, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx214_mode_720), + .mode_regs = imx214_mode_720, + }, +}; + +static struct crl_sensor_subdev_config imx214_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .name = "imx214 scaler", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "imx214 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "imx214 pixel array", + }, +}; + +static struct crl_sensor_limits imx214_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 4208, + .y_addr_max = 3120, + .min_frame_length_lines = 184, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 5008, + .max_line_length_pixels = 32752, + .scaler_m_min = 16, + .scaler_m_max = 255, + .scaler_n_min = 16, + .scaler_n_max = 16, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 3, +}; + +static struct crl_flip_data imx214_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + }, +}; + +static struct crl_csi_data_fmt imx214_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = ARRAY_SIZE(imx214_data_fmt_width10), + .regs = imx214_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .regs_items = ARRAY_SIZE(imx214_data_fmt_width10), + .bits_per_pixel = 10, + .regs = imx214_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .regs_items = ARRAY_SIZE(imx214_data_fmt_width10), + .bits_per_pixel = 10, + .regs = imx214_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = ARRAY_SIZE(imx214_data_fmt_width10), + .bits_per_pixel = 10, + .regs = imx214_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SGRBG8_1X8, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .regs_items = ARRAY_SIZE(imx214_data_fmt_width8), + .bits_per_pixel = 8, + .regs = imx214_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SRGGB8_1X8, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .regs_items = ARRAY_SIZE(imx214_data_fmt_width8), + .bits_per_pixel = 8, + .regs = imx214_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SBGGR8_1X8, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .regs_items = ARRAY_SIZE(imx214_data_fmt_width8), + .bits_per_pixel = 8, + .regs = imx214_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SGBRG8_1X8, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = ARRAY_SIZE(imx214_data_fmt_width8), + .bits_per_pixel = 8, + .regs = imx214_data_fmt_width8, + }, +}; + + +static const char * const imx214_test_patterns[] = { + "Disabled", + "Solid Colour", + "Eight Vertical Colour Bars", +}; + + +static struct crl_v4l2_ctrl imx214_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_SCALER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = + ARRAY_SIZE(imx214_pll_configurations) - 1, + .data.v4l2_int_menu.menu = imx214_op_sys_clock, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_SCALER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 480, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx214_ana_gain_global_regs), + .regs = imx214_ana_gain_global_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "V4L2_CID_EXPOSURE", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 1700, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx214_exposure_regs), + .regs = imx214_exposure_regs, + .dep_items = 0, /* FLL is changes automatically */ + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx214_h_flip_regs), + .regs = imx214_h_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .name = "V4L2_CID_VFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx214_v_flip_regs), + .regs = imx214_v_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame length lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 160, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 4130, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx214_vblank_regs), + .regs = imx214_vblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 5008, + .data.std_data.max = 65520, + .data.std_data.step = 1, + .data.std_data.def = 5008, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx214_hblank_regs), + .regs = imx214_hblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_TEST_PATTERN, + .name = "V4L2_CID_TEST_PATTERN", + .type = CRL_V4L2_CTRL_TYPE_MENU_ITEMS, + .data.v4l2_menu_items.menu = imx214_test_patterns, + .data.v4l2_menu_items.size = ARRAY_SIZE(imx214_test_patterns), + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx214_test_pattern_regs), + .regs = imx214_test_pattern_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +/* Power items, they are enabled in the order they are listed here */ +static struct crl_power_seq_entity imx214_power_items[] = { + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VANA", + .val = 2700000, + .delay = 30000, + }, + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VDIG", + .val = 1100000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 24000000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + .undo_val = 0, + }, +}; + +static struct crl_nvm_blob imx214_nvm_blobs[] = { + { 0x50, 0x00, 0x100 }, + { 0x51, 0x00, 0x100 }, + { 0x52, 0x00, 0x20 }, +}; + +struct crl_sensor_configuration imx214_crl_configuration = { + + .power_items = ARRAY_SIZE(imx214_power_items), + .power_entities = imx214_power_items, + + .powerup_regs_items = ARRAY_SIZE(imx214_powerup_regset), + .powerup_regs = imx214_powerup_regset, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + + .id_reg_items = ARRAY_SIZE(imx214_sensor_detect_regset), + .id_regs = imx214_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(imx214_sensor_subdevs), + .subdevs = imx214_sensor_subdevs, + + .sensor_limits = &imx214_sensor_limits, + + .pll_config_items = ARRAY_SIZE(imx214_pll_configurations), + .pll_configs = imx214_pll_configurations, + + .modes_items = ARRAY_SIZE(imx214_modes), + .modes = imx214_modes, + .fail_safe_mode_index = 3, + + .streamon_regs_items = ARRAY_SIZE(imx214_streamon_regs), + .streamon_regs = imx214_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(imx214_streamoff_regs), + .streamoff_regs = imx214_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(imx214_v4l2_ctrls), + .v4l2_ctrl_bank = imx214_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(imx214_crl_csi_data_fmt), + .csi_fmts = imx214_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(imx214_flip_configurations), + .flip_data = imx214_flip_configurations, + + .crl_nvm_info.nvm_flags = CRL_NVM_ADDR_MODE_8BIT, + .crl_nvm_info.nvm_preop_regs_items = 0, + .crl_nvm_info.nvm_postop_regs_items = 0, + .crl_nvm_info.nvm_blobs_items = ARRAY_SIZE(imx214_nvm_blobs), + .crl_nvm_info.nvm_config = imx214_nvm_blobs, +}; + + + + + + +#endif /* __CRLMODULE_DUMMY_imx230_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_imx230_configuration.h b/drivers/media/i2c/crlmodule/crl_imx230_configuration.h new file mode 100644 index 0000000000000..c19afad589e5e --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_imx230_configuration.h @@ -0,0 +1,2367 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2014 - 2018 Intel Corporation + * + * Author: Vinod Govindapillai + * + */ + +#ifndef __CRLMODULE_imx230_CONFIGURATION_H_ +#define __CRLMODULE_imx230_CONFIGURATION_H_ + +#include "crlmodule-nvm.h" +#include "crlmodule-sensor-ds.h" + + +static struct crl_register_write_rep imx230_pll_1500mbps[] = { + { 0x0136, CRL_REG_LEN_08BIT, 0x18 }, /* EXT clock 24 MHz*/ + { 0x0137, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0301, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0306, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0307, CRL_REG_LEN_08BIT, 0xc8 }, + { 0x0309, CRL_REG_LEN_08BIT, 0x0A }, + { 0x030B, CRL_REG_LEN_08BIT, 0x01 }, + { 0x030D, CRL_REG_LEN_08BIT, 0x0F }, + { 0x030E, CRL_REG_LEN_08BIT, 0x03 }, + { 0x030F, CRL_REG_LEN_08BIT, 0xa9 }, + { 0x0310, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0114, CRL_REG_LEN_08BIT, 0x03 }, /* Mipi settings, 4 lane */ + { 0x0820, CRL_REG_LEN_08BIT, 0x17 }, /*Data rate setting*/ + { 0x0821, CRL_REG_LEN_08BIT, 0x6c }, + { 0x0822, CRL_REG_LEN_08BIT, 0xcc }, + { 0x0823, CRL_REG_LEN_08BIT, 0xcc }, + { 0x0808, CRL_REG_LEN_08BIT, 0x01 }, +}; + +/* PLL settings for CSI lanes: 4, RAW14 output */ +static struct crl_register_write_rep imx230_pll_4_14_1500mbps[] = { + { 0x0136, CRL_REG_LEN_08BIT, 0x18 }, /* EXT clock 24 MHz*/ + { 0x0137, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0301, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0306, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0307, CRL_REG_LEN_08BIT, 0xbf }, + { 0x0309, CRL_REG_LEN_08BIT, 0x0e }, + { 0x030B, CRL_REG_LEN_08BIT, 0x01 }, + { 0x030D, CRL_REG_LEN_08BIT, 0x04 }, + { 0x030E, CRL_REG_LEN_08BIT, 0x00 }, + { 0x030F, CRL_REG_LEN_08BIT, 0xfa }, + { 0x0310, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0114, CRL_REG_LEN_08BIT, 0x03 }, /* Mipi settings, 4 lane */ + { 0x0820, CRL_REG_LEN_08BIT, 0x17 }, /*Data rate setting*/ + { 0x0821, CRL_REG_LEN_08BIT, 0x70 }, + { 0x0822, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0823, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0808, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_register_write_rep imx230_pll_2_10_1500mbps[] = { + { 0x0136, CRL_REG_LEN_08BIT, 0x18 }, /* EXT clock 24 MHz*/ + { 0x0137, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0301, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0306, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0307, CRL_REG_LEN_08BIT, 0xc8 }, + { 0x0309, CRL_REG_LEN_08BIT, 0x0A }, + { 0x030B, CRL_REG_LEN_08BIT, 0x01 }, + { 0x030D, CRL_REG_LEN_08BIT, 0x0F }, + { 0x030E, CRL_REG_LEN_08BIT, 0x03 }, + { 0x030F, CRL_REG_LEN_08BIT, 0xa9 }, + { 0x0310, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0114, CRL_REG_LEN_08BIT, 0x01 }, /* Mipi settings, 2 lane */ + { 0x0820, CRL_REG_LEN_08BIT, 0x09 }, /*Data rate setting*/ + { 0x0821, CRL_REG_LEN_08BIT, 0x60 }, + { 0x0822, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0823, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0808, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_register_write_rep imx230_pll_2_8_1500mbps[] = { + { 0x0136, CRL_REG_LEN_08BIT, 0x18 }, /* EXT clock 24 MHz*/ + { 0x0137, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0301, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0306, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0307, CRL_REG_LEN_08BIT, 0xc8 }, + { 0x0309, CRL_REG_LEN_08BIT, 0x08 }, + { 0x030B, CRL_REG_LEN_08BIT, 0x01 }, + { 0x030D, CRL_REG_LEN_08BIT, 0x0F }, + { 0x030E, CRL_REG_LEN_08BIT, 0x03 }, + { 0x030F, CRL_REG_LEN_08BIT, 0xa9 }, + { 0x0310, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0114, CRL_REG_LEN_08BIT, 0x01 }, /* Mipi settings, 2 lane */ + { 0x0820, CRL_REG_LEN_08BIT, 0x09 }, /*Data rate setting*/ + { 0x0821, CRL_REG_LEN_08BIT, 0x60 }, + { 0x0822, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0823, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0808, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_register_write_rep imx230_powerup_regset[] = { + { 0x4800, CRL_REG_LEN_08BIT, 0x0E }, + { 0x4890, CRL_REG_LEN_08BIT, 0x01 }, + { 0x4D1E, CRL_REG_LEN_08BIT, 0x01 }, + { 0x4D1F, CRL_REG_LEN_08BIT, 0xFF }, + { 0x4FA0, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4FA1, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4FA2, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4FA3, CRL_REG_LEN_08BIT, 0x83 }, + { 0x6153, CRL_REG_LEN_08BIT, 0x01 }, + { 0x6156, CRL_REG_LEN_08BIT, 0x01 }, + { 0x69BB, CRL_REG_LEN_08BIT, 0x01 }, + { 0x69BC, CRL_REG_LEN_08BIT, 0x05 }, + { 0x69BD, CRL_REG_LEN_08BIT, 0x05 }, + { 0x69C1, CRL_REG_LEN_08BIT, 0x00 }, + { 0x69C4, CRL_REG_LEN_08BIT, 0x01 }, + { 0x69C6, CRL_REG_LEN_08BIT, 0x01 }, + { 0x7300, CRL_REG_LEN_08BIT, 0x00 }, + { 0x9009, CRL_REG_LEN_08BIT, 0x1A }, + { 0xB040, CRL_REG_LEN_08BIT, 0x90 }, + { 0xB041, CRL_REG_LEN_08BIT, 0x14 }, + { 0xB042, CRL_REG_LEN_08BIT, 0x6B }, + { 0xB043, CRL_REG_LEN_08BIT, 0x43 }, + { 0xB044, CRL_REG_LEN_08BIT, 0x63 }, + { 0xB045, CRL_REG_LEN_08BIT, 0x2A }, + { 0xB046, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB047, CRL_REG_LEN_08BIT, 0x06 }, + { 0xB048, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB049, CRL_REG_LEN_08BIT, 0x07 }, + { 0xB04A, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB04B, CRL_REG_LEN_08BIT, 0x04 }, + { 0xB04C, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB04D, CRL_REG_LEN_08BIT, 0x05 }, + { 0xB04E, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB04F, CRL_REG_LEN_08BIT, 0x16 }, + { 0xB050, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB051, CRL_REG_LEN_08BIT, 0x17 }, + { 0xB052, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB053, CRL_REG_LEN_08BIT, 0x74 }, + { 0xB054, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB055, CRL_REG_LEN_08BIT, 0x75 }, + { 0xB056, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB057, CRL_REG_LEN_08BIT, 0x76 }, + { 0xB058, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB059, CRL_REG_LEN_08BIT, 0x77 }, + { 0xB05A, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB05B, CRL_REG_LEN_08BIT, 0x7A }, + { 0xB05C, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB05D, CRL_REG_LEN_08BIT, 0x7B }, + { 0xB05E, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB05F, CRL_REG_LEN_08BIT, 0x0A }, + { 0xB060, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB061, CRL_REG_LEN_08BIT, 0x0B }, + { 0xB062, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB063, CRL_REG_LEN_08BIT, 0x08 }, + { 0xB064, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB065, CRL_REG_LEN_08BIT, 0x09 }, + { 0xB066, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB067, CRL_REG_LEN_08BIT, 0x0E }, + { 0xB068, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB069, CRL_REG_LEN_08BIT, 0x0F }, + { 0xB06A, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB06B, CRL_REG_LEN_08BIT, 0x0C }, + { 0xB06C, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB06D, CRL_REG_LEN_08BIT, 0x0D }, + { 0xB06E, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB06F, CRL_REG_LEN_08BIT, 0x13 }, + { 0xB070, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB071, CRL_REG_LEN_08BIT, 0x12 }, + { 0xB072, CRL_REG_LEN_08BIT, 0x90 }, + { 0xB073, CRL_REG_LEN_08BIT, 0x0E }, + { 0xD000, CRL_REG_LEN_08BIT, 0xDA }, + { 0xD001, CRL_REG_LEN_08BIT, 0xDA }, + { 0xD002, CRL_REG_LEN_08BIT, 0xAF }, + { 0xD003, CRL_REG_LEN_08BIT, 0xE1 }, + { 0xD004, CRL_REG_LEN_08BIT, 0x55 }, + { 0xD005, CRL_REG_LEN_08BIT, 0x34 }, + { 0xD006, CRL_REG_LEN_08BIT, 0x21 }, + { 0xD007, CRL_REG_LEN_08BIT, 0x00 }, + { 0xD008, CRL_REG_LEN_08BIT, 0x1C }, + { 0xD009, CRL_REG_LEN_08BIT, 0x80 }, + { 0xD00A, CRL_REG_LEN_08BIT, 0xFE }, + { 0xD00B, CRL_REG_LEN_08BIT, 0xC5 }, + { 0xD00C, CRL_REG_LEN_08BIT, 0x55 }, + { 0xD00D, CRL_REG_LEN_08BIT, 0xDC }, + { 0xD00E, CRL_REG_LEN_08BIT, 0xB6 }, + { 0xD00F, CRL_REG_LEN_08BIT, 0x00 }, + { 0xD010, CRL_REG_LEN_08BIT, 0x31 }, + { 0xD011, CRL_REG_LEN_08BIT, 0x02 }, + { 0xD012, CRL_REG_LEN_08BIT, 0x4A }, + { 0xD013, CRL_REG_LEN_08BIT, 0x0E }, + { 0xD014, CRL_REG_LEN_08BIT, 0x55 }, + { 0xD015, CRL_REG_LEN_08BIT, 0xF0 }, + { 0xD016, CRL_REG_LEN_08BIT, 0x1B }, + { 0xD017, CRL_REG_LEN_08BIT, 0x00 }, + { 0xD018, CRL_REG_LEN_08BIT, 0xFA }, + { 0xD019, CRL_REG_LEN_08BIT, 0x2C }, + { 0xD01A, CRL_REG_LEN_08BIT, 0xF1 }, + { 0xD01B, CRL_REG_LEN_08BIT, 0x7E }, + { 0xD01C, CRL_REG_LEN_08BIT, 0x55 }, + { 0xD01D, CRL_REG_LEN_08BIT, 0x1C }, + { 0xD01E, CRL_REG_LEN_08BIT, 0xD8 }, + { 0xD01F, CRL_REG_LEN_08BIT, 0x00 }, + { 0xD020, CRL_REG_LEN_08BIT, 0x76 }, + { 0xD021, CRL_REG_LEN_08BIT, 0xC1 }, + { 0xD022, CRL_REG_LEN_08BIT, 0xBF }, + { 0xD044, CRL_REG_LEN_08BIT, 0x40 }, + { 0xD045, CRL_REG_LEN_08BIT, 0xBA }, + { 0xD046, CRL_REG_LEN_08BIT, 0x70 }, + { 0xD047, CRL_REG_LEN_08BIT, 0x47 }, + { 0xD048, CRL_REG_LEN_08BIT, 0xC0 }, + { 0xD049, CRL_REG_LEN_08BIT, 0xBA }, + { 0xD04A, CRL_REG_LEN_08BIT, 0x70 }, + { 0xD04B, CRL_REG_LEN_08BIT, 0x47 }, + { 0xD04C, CRL_REG_LEN_08BIT, 0x82 }, + { 0xD04D, CRL_REG_LEN_08BIT, 0xF6 }, + { 0xD04E, CRL_REG_LEN_08BIT, 0xDA }, + { 0xD04F, CRL_REG_LEN_08BIT, 0xFA }, + { 0xD050, CRL_REG_LEN_08BIT, 0x00 }, + { 0xD051, CRL_REG_LEN_08BIT, 0xF0 }, + { 0xD052, CRL_REG_LEN_08BIT, 0x02 }, + { 0xD053, CRL_REG_LEN_08BIT, 0xF8 }, + { 0xD054, CRL_REG_LEN_08BIT, 0x81 }, + { 0xD055, CRL_REG_LEN_08BIT, 0xF6 }, + { 0xD056, CRL_REG_LEN_08BIT, 0xCE }, + { 0xD057, CRL_REG_LEN_08BIT, 0xFD }, + { 0xD058, CRL_REG_LEN_08BIT, 0x10 }, + { 0xD059, CRL_REG_LEN_08BIT, 0xB5 }, + { 0xD05A, CRL_REG_LEN_08BIT, 0x0D }, + { 0xD05B, CRL_REG_LEN_08BIT, 0x48 }, + { 0xD05C, CRL_REG_LEN_08BIT, 0x40 }, + { 0xD05D, CRL_REG_LEN_08BIT, 0x7A }, + { 0xD05E, CRL_REG_LEN_08BIT, 0x01 }, + { 0xD05F, CRL_REG_LEN_08BIT, 0x28 }, + { 0xD060, CRL_REG_LEN_08BIT, 0x15 }, + { 0xD061, CRL_REG_LEN_08BIT, 0xD1 }, + { 0xD062, CRL_REG_LEN_08BIT, 0x0C }, + { 0xD063, CRL_REG_LEN_08BIT, 0x49 }, + { 0xD064, CRL_REG_LEN_08BIT, 0x0C }, + { 0xD065, CRL_REG_LEN_08BIT, 0x46 }, + { 0xD066, CRL_REG_LEN_08BIT, 0x40 }, + { 0xD067, CRL_REG_LEN_08BIT, 0x3C }, + { 0xD068, CRL_REG_LEN_08BIT, 0x48 }, + { 0xD069, CRL_REG_LEN_08BIT, 0x8A }, + { 0xD06A, CRL_REG_LEN_08BIT, 0x62 }, + { 0xD06B, CRL_REG_LEN_08BIT, 0x8A }, + { 0xD06C, CRL_REG_LEN_08BIT, 0x80 }, + { 0xD06D, CRL_REG_LEN_08BIT, 0x1A }, + { 0xD06E, CRL_REG_LEN_08BIT, 0x8A }, + { 0xD06F, CRL_REG_LEN_08BIT, 0x89 }, + { 0xD070, CRL_REG_LEN_08BIT, 0x00 }, + { 0xD071, CRL_REG_LEN_08BIT, 0xB2 }, + { 0xD072, CRL_REG_LEN_08BIT, 0x10 }, + { 0xD073, CRL_REG_LEN_08BIT, 0x18 }, + { 0xD074, CRL_REG_LEN_08BIT, 0x0A }, + { 0xD075, CRL_REG_LEN_08BIT, 0x46 }, + { 0xD076, CRL_REG_LEN_08BIT, 0x20 }, + { 0xD077, CRL_REG_LEN_08BIT, 0x32 }, + { 0xD078, CRL_REG_LEN_08BIT, 0x12 }, + { 0xD079, CRL_REG_LEN_08BIT, 0x88 }, + { 0xD07A, CRL_REG_LEN_08BIT, 0x90 }, + { 0xD07B, CRL_REG_LEN_08BIT, 0x42 }, + { 0xD07C, CRL_REG_LEN_08BIT, 0x00 }, + { 0xD07D, CRL_REG_LEN_08BIT, 0xDA }, + { 0xD07E, CRL_REG_LEN_08BIT, 0x10 }, + { 0xD07F, CRL_REG_LEN_08BIT, 0x46 }, + { 0xD080, CRL_REG_LEN_08BIT, 0x80 }, + { 0xD081, CRL_REG_LEN_08BIT, 0xB2 }, + { 0xD082, CRL_REG_LEN_08BIT, 0x88 }, + { 0xD083, CRL_REG_LEN_08BIT, 0x81 }, + { 0xD084, CRL_REG_LEN_08BIT, 0x84 }, + { 0xD085, CRL_REG_LEN_08BIT, 0xF6 }, + { 0xD086, CRL_REG_LEN_08BIT, 0x06 }, + { 0xD087, CRL_REG_LEN_08BIT, 0xF8 }, + { 0xD088, CRL_REG_LEN_08BIT, 0xE0 }, + { 0xD089, CRL_REG_LEN_08BIT, 0x67 }, + { 0xD08A, CRL_REG_LEN_08BIT, 0x85 }, + { 0xD08B, CRL_REG_LEN_08BIT, 0xF6 }, + { 0xD08C, CRL_REG_LEN_08BIT, 0x4B }, + { 0xD08D, CRL_REG_LEN_08BIT, 0xFC }, + { 0xD08E, CRL_REG_LEN_08BIT, 0x10 }, + { 0xD08F, CRL_REG_LEN_08BIT, 0xBD }, + { 0xD090, CRL_REG_LEN_08BIT, 0x00 }, + { 0xD091, CRL_REG_LEN_08BIT, 0x18 }, + { 0xD092, CRL_REG_LEN_08BIT, 0x1E }, + { 0xD093, CRL_REG_LEN_08BIT, 0x78 }, + { 0xD094, CRL_REG_LEN_08BIT, 0x00 }, + { 0xD095, CRL_REG_LEN_08BIT, 0x18 }, + { 0xD096, CRL_REG_LEN_08BIT, 0x17 }, + { 0xD097, CRL_REG_LEN_08BIT, 0x98 }, + { 0x5869, CRL_REG_LEN_08BIT, 0x01 }, /*Global settings done*/ + { 0x0216, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0217, CRL_REG_LEN_08BIT, 0x00 }, + { 0x020E, CRL_REG_LEN_08BIT, 0x01 }, + { 0x020F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0210, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0211, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0212, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0213, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0214, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0215, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A21, CRL_REG_LEN_08BIT, 0x00 }, /* LSC setting */ + { 0x3011, CRL_REG_LEN_08BIT, 0x00 }, /* STATS Calc enable/disable */ + { 0x3013, CRL_REG_LEN_08BIT, 0x00 }, /*stats output enable/disable */ + { 0x5041, CRL_REG_LEN_08BIT, 0x04 }, /*embedded data on/off, 4 lines */ + { 0x0138, CRL_REG_LEN_08BIT, 0x01 }, /* Temperature control enable */ +}; + +static struct crl_register_write_rep imx230_mode_2k2k[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x14 }, + { 0x0349, CRL_REG_LEN_08BIT, 0xDF }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0F }, + { 0x034B, CRL_REG_LEN_08BIT, 0xAF }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x22 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x0A }, /*Output*/ + { 0x034D, CRL_REG_LEN_08BIT, 0x70 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x07 }, + { 0x034F, CRL_REG_LEN_08BIT, 0xD7 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x0A }, + { 0x040D, CRL_REG_LEN_08BIT, 0x70 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x07 }, + { 0x040F, CRL_REG_LEN_08BIT, 0xD7 }, + { 0x697D, CRL_REG_LEN_08BIT, 0x02 }, /* PAF settings */ + { 0x6985, CRL_REG_LEN_08BIT, 0x02 }, + { 0x698D, CRL_REG_LEN_08BIT, 0x0B }, + { 0x6995, CRL_REG_LEN_08BIT, 0x0B }, + { 0x699D, CRL_REG_LEN_08BIT, 0x16 }, + { 0x69A5, CRL_REG_LEN_08BIT, 0x16 }, + { 0x69AD, CRL_REG_LEN_08BIT, 0x1F }, + { 0x69B5, CRL_REG_LEN_08BIT, 0x1F }, + { 0x3A22, CRL_REG_LEN_08BIT, 0x20 }, /* DPC2D settings */ + { 0x3A23, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A24, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A25, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3A26, CRL_REG_LEN_08BIT, 0xD8 }, + { 0x3A2F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A30, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A31, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A32, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A33, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A34, CRL_REG_LEN_08BIT, 0xDF }, + { 0x3A35, CRL_REG_LEN_08BIT, 0x0F }, + { 0x3A36, CRL_REG_LEN_08BIT, 0xAF }, + { 0x3A37, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A38, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3A39, CRL_REG_LEN_08BIT, 0x00 }, +}; + +/* UHD Scale */ +static struct crl_register_write_rep imx230_mode_4k2k[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0347, CRL_REG_LEN_08BIT, 0xF8 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x14 }, + { 0x0349, CRL_REG_LEN_08BIT, 0xDF }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0D }, + { 0x034B, CRL_REG_LEN_08BIT, 0xB7 }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x0F }, + { 0x034D, CRL_REG_LEN_08BIT, 0x2E }, + { 0x034E, CRL_REG_LEN_08BIT, 0x08 }, + { 0x034F, CRL_REG_LEN_08BIT, 0x88 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x16 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x14 }, + { 0x040D, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x0B }, + { 0x040F, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x3A22, CRL_REG_LEN_08BIT, 0x20 }, /* DPC2D settings */ + { 0x3A23, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A24, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A25, CRL_REG_LEN_08BIT, 0x0B }, + { 0x3A26, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x3A2F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A30, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A31, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3A32, CRL_REG_LEN_08BIT, 0xF8 }, + { 0x3A33, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A34, CRL_REG_LEN_08BIT, 0xDF }, + { 0x3A35, CRL_REG_LEN_08BIT, 0x0D }, + { 0x3A36, CRL_REG_LEN_08BIT, 0xB7 }, + { 0x3A37, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A38, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A39, CRL_REG_LEN_08BIT, 0x00 }, +}; + + +/* UHD crop*/ +static struct crl_register_write_rep imx230_mode_uhd_crop[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0347, CRL_REG_LEN_08BIT, 0xA0 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x14 }, + { 0x0349, CRL_REG_LEN_08BIT, 0xDF }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0C }, + { 0x034B, CRL_REG_LEN_08BIT, 0x0F }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x0F }, + { 0x034D, CRL_REG_LEN_08BIT, 0x00 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x08 }, + { 0x034F, CRL_REG_LEN_08BIT, 0x70 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x0F }, + { 0x040D, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x08 }, + { 0x040F, CRL_REG_LEN_08BIT, 0x70 }, + { 0x3A22, CRL_REG_LEN_08BIT, 0x20 }, /* DPC2D settings */ + { 0x3A23, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A24, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A25, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3A26, CRL_REG_LEN_08BIT, 0x70 }, + { 0x3A2F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A30, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A31, CRL_REG_LEN_08BIT, 0x03 }, + { 0x3A32, CRL_REG_LEN_08BIT, 0xA0 }, + { 0x3A33, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A34, CRL_REG_LEN_08BIT, 0xDF }, + { 0x3A35, CRL_REG_LEN_08BIT, 0x0C }, + { 0x3A36, CRL_REG_LEN_08BIT, 0x0F }, + { 0x3A37, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A38, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A39, CRL_REG_LEN_08BIT, 0x00 }, +}; + + +/*5344 x 40160*/ +static struct crl_register_write_rep imx230_mode_full_4_3[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x14 }, + { 0x0349, CRL_REG_LEN_08BIT, 0xDF }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0F }, + { 0x034B, CRL_REG_LEN_08BIT, 0xAF }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x14 }, + { 0x034D, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x0F }, + { 0x034F, CRL_REG_LEN_08BIT, 0xB0 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x14 }, + { 0x040D, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x0F }, + { 0x040F, CRL_REG_LEN_08BIT, 0xB0 }, + { 0x3A22, CRL_REG_LEN_08BIT, 0x00 }, /* DPC2D settings */ + { 0x3A23, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A24, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A25, CRL_REG_LEN_08BIT, 0x0f }, + { 0x3A26, CRL_REG_LEN_08BIT, 0xB0 }, + { 0x3A2F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A30, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A31, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A32, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A33, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A34, CRL_REG_LEN_08BIT, 0xDF }, + { 0x3A35, CRL_REG_LEN_08BIT, 0x0f }, + { 0x3A36, CRL_REG_LEN_08BIT, 0xAF }, + { 0x3A37, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A38, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A39, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A21, CRL_REG_LEN_08BIT, 0x02 }, +}; + +/*5344 x 4016*/ +static struct crl_register_write_rep imx230_mode_full_16_9[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0347, CRL_REG_LEN_08BIT, 0xF8 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x14 }, + { 0x0349, CRL_REG_LEN_08BIT, 0xDF }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0D }, + { 0x034B, CRL_REG_LEN_08BIT, 0xB7 }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x14 }, + { 0x034D, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x0B }, + { 0x034F, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x14 }, + { 0x040D, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x0B }, + { 0x040F, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x3A22, CRL_REG_LEN_08BIT, 0x20 }, /* DPC2D settings */ + { 0x3A23, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A24, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A25, CRL_REG_LEN_08BIT, 0x0B }, + { 0x3A26, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x3A2F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A30, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A31, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3A32, CRL_REG_LEN_08BIT, 0xF8 }, + { 0x3A33, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A34, CRL_REG_LEN_08BIT, 0xDF }, + { 0x3A35, CRL_REG_LEN_08BIT, 0x0D }, + { 0x3A36, CRL_REG_LEN_08BIT, 0xB7 }, + { 0x3A37, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A38, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A39, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep imx230_mode_3264x2448_crop[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x14 }, + { 0x0349, CRL_REG_LEN_08BIT, 0xDF }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0C }, + { 0x034B, CRL_REG_LEN_08BIT, 0x9F }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x0C }, + { 0x034D, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x09 }, + { 0x034F, CRL_REG_LEN_08BIT, 0x90 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x10 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x0C }, + { 0x040D, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x09 }, + { 0x040F, CRL_REG_LEN_08BIT, 0x90 }, + { 0x3A22, CRL_REG_LEN_08BIT, 0x20 }, /* DPC2D settings */ + { 0x3A23, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A24, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A25, CRL_REG_LEN_08BIT, 0x09 }, + { 0x3A26, CRL_REG_LEN_08BIT, 0x90 }, + { 0x3A2F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A30, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A31, CRL_REG_LEN_08BIT, 0x03 }, + { 0x3A32, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3A33, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A34, CRL_REG_LEN_08BIT, 0xDF }, + { 0x3A35, CRL_REG_LEN_08BIT, 0x0C }, + { 0x3A36, CRL_REG_LEN_08BIT, 0x9F }, + { 0x3A37, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A38, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A39, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep imx230_mode_3264x2448_scale[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x14 }, + { 0x0349, CRL_REG_LEN_08BIT, 0xDF }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0F }, + { 0x034B, CRL_REG_LEN_08BIT, 0xAF }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x0C }, + { 0x034D, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x09 }, + { 0x034F, CRL_REG_LEN_08BIT, 0x90 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x1A }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x14 }, + { 0x040D, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x0F }, /*dig crop y*/ + { 0x040F, CRL_REG_LEN_08BIT, 0xB0 }, + { 0x3A22, CRL_REG_LEN_08BIT, 0x20 }, /* DPC2D settings */ + { 0x3A23, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A24, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A25, CRL_REG_LEN_08BIT, 0x0f }, + { 0x3A26, CRL_REG_LEN_08BIT, 0xB0 }, + { 0x3A2F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A30, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A31, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3A32, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x3A33, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A34, CRL_REG_LEN_08BIT, 0xDF }, + { 0x3A35, CRL_REG_LEN_08BIT, 0x0f }, + { 0x3A36, CRL_REG_LEN_08BIT, 0xAF }, + { 0x3A37, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A38, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A39, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep imx230_mode_3280x2460_scale[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x14 }, + { 0x0349, CRL_REG_LEN_08BIT, 0xDF }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0F }, + { 0x034B, CRL_REG_LEN_08BIT, 0xAF }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x0C }, + { 0x034D, CRL_REG_LEN_08BIT, 0xD0 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x09 }, + { 0x034F, CRL_REG_LEN_08BIT, 0x9C }, + { 0x0401, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x1A }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x14 }, + { 0x040D, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x0F }, + { 0x040F, CRL_REG_LEN_08BIT, 0xB0 }, + { 0x3A22, CRL_REG_LEN_08BIT, 0x20 }, /* DPC2D settings */ + { 0x3A23, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A24, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A25, CRL_REG_LEN_08BIT, 0x0f }, + { 0x3A26, CRL_REG_LEN_08BIT, 0xB0 }, + { 0x3A2F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A30, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A31, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3A32, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x3A33, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A34, CRL_REG_LEN_08BIT, 0xDF }, + { 0x3A35, CRL_REG_LEN_08BIT, 0x0f }, + { 0x3A36, CRL_REG_LEN_08BIT, 0xAF }, + { 0x3A37, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A38, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A39, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep imx230_mode_3336x2502_scale[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x14 }, + { 0x0349, CRL_REG_LEN_08BIT, 0xDF }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0F }, + { 0x034B, CRL_REG_LEN_08BIT, 0xAF }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x0D }, + { 0x034D, CRL_REG_LEN_08BIT, 0x08 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x09 }, + { 0x034F, CRL_REG_LEN_08BIT, 0xC6 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x19 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x14 }, + { 0x040D, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x0F }, + { 0x040F, CRL_REG_LEN_08BIT, 0xB0 }, + { 0x3A22, CRL_REG_LEN_08BIT, 0x20 }, /* DPC2D settings */ + { 0x3A23, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A24, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A25, CRL_REG_LEN_08BIT, 0x0f }, + { 0x3A26, CRL_REG_LEN_08BIT, 0xB0 }, + { 0x3A2F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A30, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A31, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3A32, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x3A33, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A34, CRL_REG_LEN_08BIT, 0xDF }, + { 0x3A35, CRL_REG_LEN_08BIT, 0x0f }, + { 0x3A36, CRL_REG_LEN_08BIT, 0xAF }, + { 0x3A37, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A38, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A39, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep imx230_mode_2672x1504[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0347, CRL_REG_LEN_08BIT, 0xF8 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x14 }, + { 0x0349, CRL_REG_LEN_08BIT, 0xDF }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0D }, + { 0x034B, CRL_REG_LEN_08BIT, 0xB7 }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x22 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3000, CRL_REG_LEN_08BIT, 0x74 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x0A }, + { 0x034D, CRL_REG_LEN_08BIT, 0x70 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x05 }, + { 0x034F, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x0A }, + { 0x040D, CRL_REG_LEN_08BIT, 0x70 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x05 }, + { 0x040F, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A22, CRL_REG_LEN_08BIT, 0x20 }, /* DPC2D settings */ + { 0x3A23, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A24, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A25, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3A26, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A2F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A30, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A31, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3A32, CRL_REG_LEN_08BIT, 0xF8 }, + { 0x3A33, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A34, CRL_REG_LEN_08BIT, 0xDF }, + { 0x3A35, CRL_REG_LEN_08BIT, 0x0D }, + { 0x3A36, CRL_REG_LEN_08BIT, 0xB7 }, + { 0x3A37, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A38, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3A39, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep imx230_mode_1940x1092[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0347, CRL_REG_LEN_08BIT, 0xF8 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x14 }, + { 0x0349, CRL_REG_LEN_08BIT, 0xDF }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0D }, + { 0x034B, CRL_REG_LEN_08BIT, 0xB7 }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x22 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x07 } /*1920 x 1080*/, + { 0x034D, CRL_REG_LEN_08BIT, 0x94 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x04 }, + { 0x034F, CRL_REG_LEN_08BIT, 0x44 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x16 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x0A }, + { 0x040D, CRL_REG_LEN_08BIT, 0x70 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x05 }, + { 0x040F, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A22, CRL_REG_LEN_08BIT, 0x20 }, /* DPC2D settings */ + { 0x3A23, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A24, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A25, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3A26, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A2F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A30, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A31, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3A32, CRL_REG_LEN_08BIT, 0xF8 }, + { 0x3A33, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A34, CRL_REG_LEN_08BIT, 0xDF }, + { 0x3A35, CRL_REG_LEN_08BIT, 0x0D }, + { 0x3A36, CRL_REG_LEN_08BIT, 0xB7 }, + { 0x3A37, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A38, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3A39, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep imx230_mode_1440[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x05 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x08 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x14 }, + { 0x0349, CRL_REG_LEN_08BIT, 0xDF }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0A }, + { 0x034B, CRL_REG_LEN_08BIT, 0xA7 }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x22 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x0A }, /* 2560 x 1440 */ + { 0x034D, CRL_REG_LEN_08BIT, 0x00 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x05 }, + { 0x034F, CRL_REG_LEN_08BIT, 0xA0 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x05 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x70 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x0A }, + { 0x040D, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x05 }, + { 0x040F, CRL_REG_LEN_08BIT, 0xA0 }, + { 0x3A22, CRL_REG_LEN_08BIT, 0x20 }, /* DPC2D settings */ + { 0x3A23, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A24, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A25, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3A26, CRL_REG_LEN_08BIT, 0xA0 }, + { 0x3A2F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A30, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A31, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3A32, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3A33, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A34, CRL_REG_LEN_08BIT, 0xDF }, + { 0x3A35, CRL_REG_LEN_08BIT, 0x0A }, + { 0x3A36, CRL_REG_LEN_08BIT, 0xA7 }, + { 0x3A37, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A38, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A39, CRL_REG_LEN_08BIT, 0x00 }, +}; + + +static struct crl_register_write_rep imx230_mode_720[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x18 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x14 }, + { 0x0349, CRL_REG_LEN_08BIT, 0xDF }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0D }, + { 0x034B, CRL_REG_LEN_08BIT, 0x97 }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x44 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x05 } /* 1296 x 736 */, + { 0x034D, CRL_REG_LEN_08BIT, 0x10 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x02 }, + { 0x034F, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x14 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x05 }, + { 0x040D, CRL_REG_LEN_08BIT, 0x10 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x02 }, + { 0x040F, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A22, CRL_REG_LEN_08BIT, 0x20 }, /* DPC2D settings */ + { 0x3A23, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A24, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A25, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3A26, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A2F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A30, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A31, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3A32, CRL_REG_LEN_08BIT, 0x18 }, + { 0x3A33, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A34, CRL_REG_LEN_08BIT, 0xDF }, + { 0x3A35, CRL_REG_LEN_08BIT, 0x0D }, + { 0x3A36, CRL_REG_LEN_08BIT, 0x97 }, + { 0x3A37, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A38, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3A39, CRL_REG_LEN_08BIT, 0x00 }, +}; + + +static struct crl_register_write_rep imx230_streamon_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x01 } +}; + +static struct crl_register_write_rep imx230_streamoff_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x00 } +}; + +static struct crl_register_write_rep imx230_data_fmt_width10[] = { + { 0x0112, CRL_REG_LEN_16BIT, 0x0a0a } +}; + +static struct crl_register_write_rep imx230_data_fmt_width8[] = { + { 0x0112, CRL_REG_LEN_16BIT, 0x0808 } +}; + +static struct crl_register_write_rep imx230_data_fmt_width14[] = { + { 0x0112, CRL_REG_LEN_16BIT, 0x0e0e } +}; + +static struct crl_arithmetic_ops imx230_vflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_dynamic_register_access imx230_h_flip_regs[] = { + { + .address = 0x0101, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = 0, + .ops = 0, + .mask = 0x1, + }, +}; + +static struct crl_dynamic_register_access imx230_v_flip_regs[] = { + { + .address = 0x0101, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(imx230_vflip_ops), + .ops = imx230_vflip_ops, + .mask = 0x2, + }, +}; + + +static struct crl_dynamic_register_access imx230_ana_gain_global_regs[] = { + { + .address = 0x0204, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access imx230_dig_gain_regs[] = { + { + .address = 0x020e, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xfff, + }, + { + .address = 0x0210, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xfff, + }, + { + .address = 0x0212, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xfff, + }, + { + .address = 0x0214, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xfff, + }, +}; + + + +static struct crl_dynamic_register_access imx230_exposure_regs[] = { + { + .address = 0x0202, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + } +}; + +static struct crl_dynamic_register_access imx230_fll_regs[] = { + { + .address = 0x0340, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access imx230_llp_regs[] = { + { + .address = 0x0342, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access imx230_hdr_et_ratio_regs[] = { + { 0x0222, CRL_REG_LEN_08BIT, 0xff, 0, NULL, 0 }, +}; + +static struct crl_register_write_rep imx230_hdr_mode_off[] = { + { 0x0220, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0221, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0224, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0225, CRL_REG_LEN_08BIT, 0xF4 }, + { 0x3000, CRL_REG_LEN_08BIT, 0x74 }, /* HDR output control */ + { 0x3001, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3006, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3007, CRL_REG_LEN_08BIT, 0x02 }, + { 0x31e0, CRL_REG_LEN_08BIT, 0x03 }, + { 0x31e1, CRL_REG_LEN_08BIT, 0xff }, + { 0x31e4, CRL_REG_LEN_08BIT, 0x02 }, + { 0x30b4, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b5, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b6, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b7, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b8, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b9, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30ba, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30bb, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30bc, CRL_REG_LEN_08BIT, 0x00 }, +}; + +/* HDR Type3 ZIGZAG */ +static struct crl_register_write_rep imx230_hdr_mode_type3[] = { + /* + * 0x220 HDR control register + * bit 0: 0:HDR Disable 1:HDR enable *1-> below + * bit 1: 0:Combined gain 1:separate gain *0-> below + * bit 5: 0:Use ET Ratio 1:Short exposure by direct control *0-> below + */ + { 0x0220, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0221, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0224, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0225, CRL_REG_LEN_08BIT, 0xF4 }, + /* Enable ATR 0x3000 bit 0 */ + { 0x3000, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3001, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3006, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3007, CRL_REG_LEN_08BIT, 0x02 }, + { 0x31e0, CRL_REG_LEN_08BIT, 0x03 }, + { 0x31e4, CRL_REG_LEN_08BIT, 0x02 }, + { 0x30b4, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30b5, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30b6, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30b7, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30b8, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30b9, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30ba, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30bb, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30bc, CRL_REG_LEN_08BIT, 0x01 }, +}; + +/* HDR Type2 */ +static struct crl_register_write_rep imx230_hdr_mode_type2[] = { + /* + * 0x220 HDR control register + * bit 0: 0:HDR Disable 1:HDR enable *1-> below + * bit 1: 0:Combined gain 1:separate gain *0-> below + * bit 5: 0:Use ET Ratio 1:Short exposure by direct control *0-> below + */ + { 0x0220, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0221, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0224, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0225, CRL_REG_LEN_08BIT, 0xF4 }, + + /* Disable ATR for Type 2 0x3000 bit 0 */ + { 0x3000, CRL_REG_LEN_08BIT, 0x64 }, + { 0x3001, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3006, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3007, CRL_REG_LEN_08BIT, 0x01 }, + { 0x31e0, CRL_REG_LEN_08BIT, 0x3f }, + { 0x31e4, CRL_REG_LEN_08BIT, 0x02 }, + { 0x30b4, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b5, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b6, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b7, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b8, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b9, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30ba, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30bb, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30bc, CRL_REG_LEN_08BIT, 0x00 }, +}; + +/* HDR Type1 */ +static struct crl_register_write_rep imx230_hdr_mode_type1[] = { + /* + * 0x220 HDR control register + * bit 0: 0:HDR Disable 1:HDR enable *1-> below + * bit 1: 0:Combined gain 1:separate gain *0-> below + * bit 5: 0:Use ET Ratio 1:Short exposure by direct control *0-> below + */ + { 0x0220, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0221, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0224, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0225, CRL_REG_LEN_08BIT, 0xF4 }, + /* ATR is enabled 0x3000 bit 0 */ + { 0x3000, CRL_REG_LEN_08BIT, 0x75 }, + { 0x3001, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3006, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3007, CRL_REG_LEN_08BIT, 0x01 }, + { 0x31e0, CRL_REG_LEN_08BIT, 0x3f }, + { 0x31e4, CRL_REG_LEN_08BIT, 0x02 }, + { 0x30b4, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b5, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b6, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b7, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b8, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b9, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30ba, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30bb, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30bc, CRL_REG_LEN_08BIT, 0x00 }, +}; + +/* + * IMX230 HDR types + * Type 1 10bit output after HDR and ATR blocks + * Type 2 14bit RAW after HDR block + * Type 3 10bit ZIGZAG pattern + */ +static struct crl_dep_reg_list imx230_hdr_types_regs[] = { + { CRL_DEP_CTRL_CONDITION_EQUAL, + { CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, 0 }, + ARRAY_SIZE(imx230_hdr_mode_off), imx230_hdr_mode_off, 0, 0 }, + { CRL_DEP_CTRL_CONDITION_EQUAL, + { CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, 1 }, + ARRAY_SIZE(imx230_hdr_mode_type1), imx230_hdr_mode_type1, 0, 0 }, + { CRL_DEP_CTRL_CONDITION_EQUAL, + { CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, 2 }, + ARRAY_SIZE(imx230_hdr_mode_type2), imx230_hdr_mode_type2, 0, 0 }, + { CRL_DEP_CTRL_CONDITION_EQUAL, + { CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, 3 }, + ARRAY_SIZE(imx230_hdr_mode_type3), imx230_hdr_mode_type3, 0, 0 }, +}; + +/* PDAF ON -> 0X3121 = 1 when HDR is off and 0x3121 = 0 when HDR is on */ +static struct crl_arithmetic_ops imx230_reg3121_pdaf_on[] = { + { CRL_ASSIGNMENT, + { CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, CRL_CID_IMX230_HDR_MODE } }, + { CRL_BITWISE_COMPLEMENT, { CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, 0 } }, + { CRL_BITWISE_AND, { CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, 1 } }, +}; + +/* PDAF ON -> 0X3001 = 0 when HDR is off and 0x3001 = 1 when HDR is on */ +static struct crl_arithmetic_ops imx230_reg3001_pdaf_on[] = { + { CRL_ASSIGNMENT, + { CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, CRL_CID_IMX230_HDR_MODE } }, + { CRL_BITWISE_AND, { CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, 1 } }, +}; + +/* 0x3001 and 0x3121 behaves differently when HDR is ON or OFF */ +static struct crl_dynamic_register_access imx230_pdaf_on[] = { + { 0x3121, CRL_REG_LEN_08BIT, 0xff, + ARRAY_SIZE(imx230_reg3121_pdaf_on), imx230_reg3121_pdaf_on, 0 }, + { 0x3001, CRL_REG_LEN_08BIT, 0xff, + ARRAY_SIZE(imx230_reg3001_pdaf_on), imx230_reg3001_pdaf_on, 0 }, + { 0x3123, CRL_REG_LEN_08BIT, 0xff, 0, 0, 0 }, +}; + +/* All the following registers are set to 0 when PDAF is Off*/ +static struct crl_dynamic_register_access imx230_pdaf_off[] = { + { 0x3121, CRL_REG_LEN_08BIT, 0xff, 0, 0, 0 }, + { 0x3001, CRL_REG_LEN_08BIT, 0xff, 0, 0, 0 }, + { 0x3123, CRL_REG_LEN_08BIT, 0xff, 0, 0, 0 }, +}; + +/* + * There are two different registers to enable/disable PDAF with HDR On and Off + * + * PDAF On, HDR Off -> 0x3121: 1, 0x3001: 0, 0x3123: 1 + * PDAF Off, HDR Off-> 0x3121: 0, 0x3001: 0, 0x3123: 0 + * PDAF Off, HDR On -> 0x3121: 0, 0x3001: 0, 0x3123: 0 + * PDAF On, HDR On -> 0x3121: 0, 0x3001: 1, 0x3123: 1 + */ +static struct crl_dep_reg_list imx230_pdaf_ctrl_regs[] = { + { CRL_DEP_CTRL_CONDITION_EQUAL, + { CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, 1 }, 0, 0, + ARRAY_SIZE(imx230_pdaf_on), imx230_pdaf_on }, + { CRL_DEP_CTRL_CONDITION_EQUAL, + { CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, 0 }, 0, 0, + ARRAY_SIZE(imx230_pdaf_off), imx230_pdaf_off }, +}; + +/* PDAF enable controls are dependent on HDR on or OFF */ +struct crl_dep_ctrl_provision imx230_hdr_dep_controls[] = { + /* Self update PDAF settins after change in HDR settings */ + { CRL_CID_SENSOR_PDAF, CRL_DEP_CTRL_ACTION_TYPE_DEP_CTRL, 0, 0 }, +}; + +static struct crl_sensor_detect_config imx230_sensor_detect_regset[] = { + { + .reg = { 0x0019, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 5, + }, + { + .reg = { 0x0018, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 5, + }, + { + .reg = { 0x0016, CRL_REG_LEN_16BIT, 0x0000ffff }, + .width = 7, + }, +}; + +static struct crl_arithmetic_ops imx230_thermal_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_REG_VAL, + .operand.entity_val = 0x013a, + }, +}; + +static struct crl_dynamic_register_access imx230_thermal_regs[] = { + { + .address = 0x013a, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx230_thermal_ops), + .ops = imx230_thermal_ops, + .mask = 0xff, + }, +}; + +static struct crl_pll_configuration imx230_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 749600000, + .bitsperpixel = 10, + .pixel_rate_csi = 599680000, + .pixel_rate_pa = 600000000, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx230_pll_1500mbps), + .pll_regs = imx230_pll_1500mbps, + }, + { + .input_clk = 24000000, + .op_sys_clk = 749600000, /* Actual value is 750000000 */ + .bitsperpixel = 14, + .pixel_rate_csi = 428570000, + .pixel_rate_pa = 573000000, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx230_pll_4_14_1500mbps), + .pll_regs = imx230_pll_4_14_1500mbps, + }, + { + .input_clk = 24000000, + .op_sys_clk = 749600000, + .bitsperpixel = 8, + .pixel_rate_csi = 374800000, + .pixel_rate_pa = 600000000, + .csi_lanes = 2, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx230_pll_2_8_1500mbps), + .pll_regs = imx230_pll_2_8_1500mbps, + }, + { + .input_clk = 24000000, + .op_sys_clk = 749600000, + .bitsperpixel = 10, + .pixel_rate_csi = 299840000, + .pixel_rate_pa = 600000000, + .csi_lanes = 2, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx230_pll_2_10_1500mbps), + .pll_regs = imx230_pll_2_10_1500mbps, + }, +}; + +static struct crl_subdev_rect_rep imx230_full_4_3_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 5344, 4016 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 5344, 4016 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 5344, 4016 }, + }, +}; + +static struct crl_subdev_rect_rep imx230_3280x2460_s_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 5344, 4016 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 5344, 4016 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 3280, 2460 }, + }, +}; + +static struct crl_subdev_rect_rep imx230_3264x2448_s_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 5344, 4016 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 5344, 4016 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 3264, 2448 }, + }, +}; + +static struct crl_subdev_rect_rep imx230_3336x2502_s_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 5344, 4016 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 5344, 4016 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 3336, 2502 }, + }, +}; + +static struct crl_subdev_rect_rep imx230_3264x2448_c_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 504, 5344, 3008 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5344, 3008 }, + .out_rect = { 0, 0, 5344, 3008 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 5344, 3008 }, + .out_rect = { 0, 0, 3264, 2448 }, + }, +}; + +static struct crl_subdev_rect_rep imx230_full_16_9_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 504, 5344, 3008 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5344, 3008 }, + .out_rect = { 0, 0, 5344, 3008 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 5344, 3008 }, + .out_rect = { 0, 0, 5344, 3008 }, + }, +}; + +static struct crl_subdev_rect_rep imx230_4k2k_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 504, 5344, 3008 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5344, 3008 }, + .out_rect = { 0, 0, 5344, 3008 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 5344, 3008 }, + .out_rect = { 0, 0, 3886, 2184 }, + }, +}; + +static struct crl_subdev_rect_rep imx230_uhd_crop_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 928, 5344, 2160 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5344, 2160 }, + .out_rect = { 0, 0, 5344, 2160 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 5344, 2160 }, + .out_rect = { 0, 0, 3840, 2160 }, + }, +}; + +static struct crl_subdev_rect_rep imx230_2k2k_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 5344, 4016 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 2672, 2008 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 2672, 2008 }, + .out_rect = { 0, 0, 2672, 2008 }, + }, +}; + + +static struct crl_subdev_rect_rep imx230_1940x1092_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 504, 5344, 3008 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5344, 3008 }, + .out_rect = { 0, 0, 2672, 1504 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 2672, 1504 }, + .out_rect = { 0, 0, 1940, 1092 }, + }, +}; + +static struct crl_subdev_rect_rep imx230_2672x1504_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 504, 5344, 3008 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5344, 3008 }, + .out_rect = { 0, 0, 2672, 1504 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 2672, 1504 }, + .out_rect = { 0, 0, 2672, 1504 }, + }, +}; + +static struct crl_subdev_rect_rep imx230_1440_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 1288, 5344, 1440 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5344, 1440 }, + .out_rect = { 0, 0, 5344, 1440 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 5344, 1440 }, + .out_rect = { 0, 0, 2560, 1440 }, + }, +}; + +static struct crl_subdev_rect_rep imx230_720_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 5344, 3008 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5344, 3008 }, + .out_rect = { 0, 0, 1336, 752 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 1336, 752 }, + .out_rect = { 0, 0, 1296, 736 }, + }, +}; + +static struct crl_mode_rep imx230_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(imx230_full_4_3_rects), + .sd_rects = imx230_full_4_3_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 5344, + .height = 4016, + .min_llp = 6024, + .min_fll = 4106, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx230_mode_full_4_3), + .mode_regs = imx230_mode_full_4_3, + }, + { + .sd_rects_items = ARRAY_SIZE(imx230_3280x2460_s_rects), + .sd_rects = imx230_3280x2460_s_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 26, + .width = 3280, + .height = 2460, + .min_llp = 6024, + .min_fll = 4106, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx230_mode_3280x2460_scale), + .mode_regs = imx230_mode_3280x2460_scale, + }, + { + .sd_rects_items = ARRAY_SIZE(imx230_3264x2448_s_rects), + .sd_rects = imx230_3264x2448_s_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 26, + .width = 3264, + .height = 2448, + .min_llp = 6024, + .min_fll = 4106, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx230_mode_3264x2448_scale), + .mode_regs = imx230_mode_3264x2448_scale, + }, + { + .sd_rects_items = ARRAY_SIZE(imx230_3336x2502_s_rects), + .sd_rects = imx230_3336x2502_s_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 25, + .width = 3336, + .height = 2502, + .min_llp = 6024, + .min_fll = 4106, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx230_mode_3336x2502_scale), + .mode_regs = imx230_mode_3336x2502_scale, + }, + { + .sd_rects_items = ARRAY_SIZE(imx230_3264x2448_c_rects), + .sd_rects = imx230_3264x2448_c_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 3264, + .height = 2448, + .min_llp = 6024, + .min_fll = 2538, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx230_mode_3264x2448_crop), + .mode_regs = imx230_mode_3264x2448_crop, + }, + { + .sd_rects_items = ARRAY_SIZE(imx230_full_16_9_rects), + .sd_rects = imx230_full_16_9_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 5344, + .height = 3008, + .min_llp = 6024, + .min_fll = 3098, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx230_mode_full_16_9), + .mode_regs = imx230_mode_full_16_9, + }, + { + .sd_rects_items = ARRAY_SIZE(imx230_4k2k_rects), + .sd_rects = imx230_4k2k_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 22, + .width = 3886, + .height = 2184, + .min_llp = 6024, + .min_fll = 3300, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx230_mode_4k2k), + .mode_regs = imx230_mode_4k2k, + }, + { + .sd_rects_items = ARRAY_SIZE(imx230_uhd_crop_rects), + .sd_rects = imx230_uhd_crop_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 3840, + .height = 2160, + .min_llp = 6024, + .min_fll = 2250, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx230_mode_uhd_crop), + .mode_regs = imx230_mode_uhd_crop, + }, + { + .sd_rects_items = ARRAY_SIZE(imx230_2k2k_rects), + .sd_rects = imx230_2k2k_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 1, + .width = 2672, + .height = 2008, + .min_llp = 6024, + .min_fll = 2108, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx230_mode_2k2k), + .mode_regs = imx230_mode_2k2k, + }, + { + .sd_rects_items = ARRAY_SIZE(imx230_2672x1504_rects), + .sd_rects = imx230_2672x1504_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 1, + .width = 2672, + .height = 1504, + .min_llp = 6024, + .min_fll = 1660, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx230_mode_2672x1504), + .mode_regs = imx230_mode_2672x1504, + }, + { + .sd_rects_items = ARRAY_SIZE(imx230_1940x1092_rects), + .sd_rects = imx230_1940x1092_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 22, + .width = 1940, + .height = 1092, + .min_llp = 6024, + .min_fll = 1660, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx230_mode_1940x1092), + .mode_regs = imx230_mode_1940x1092, + }, + { + .sd_rects_items = ARRAY_SIZE(imx230_1440_rects), + .sd_rects = imx230_1440_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 2560, + .height = 1440, + .min_llp = 6024, + .min_fll = 1530, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx230_mode_1440), + .mode_regs = imx230_mode_1440, + }, + { + .sd_rects_items = ARRAY_SIZE(imx230_720_rects), + .sd_rects = imx230_720_rects, + .binn_hor = 4, + .binn_vert = 4, + .scale_m = 1, + .width = 1296, + .height = 736, + .min_llp = 6024, + .min_fll = 826, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx230_mode_720), + .mode_regs = imx230_mode_720, + }, +}; + +static struct crl_sensor_subdev_config imx230_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .name = "imx230 scaler", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "imx230 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "imx230 pixel array", + }, +}; + +static struct crl_sensor_limits imx230_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 5344, + .y_addr_max = 4016, + .min_frame_length_lines = 160, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 6024, + .max_line_length_pixels = 32752, + .scaler_m_min = 16, + .scaler_m_max = 255, + .scaler_n_min = 16, + .scaler_n_max = 16, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 3, +}; + +static struct crl_flip_data imx230_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + }, +}; + +static struct crl_csi_data_fmt imx230_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = 1, + .regs = imx230_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = imx230_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = imx230_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = imx230_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SGRBG8_1X8, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx230_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SRGGB8_1X8, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx230_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SBGGR8_1X8, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx230_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SGBRG8_1X8, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx230_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SGRBG14_1X14, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .regs_items = 1, + .bits_per_pixel = 14, + .regs = imx230_data_fmt_width14, + }, + { + .code = MEDIA_BUS_FMT_SRGGB14_1X14, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .regs_items = 1, + .bits_per_pixel = 14, + .regs = imx230_data_fmt_width14, + }, + { + .code = MEDIA_BUS_FMT_SBGGR14_1X14, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .regs_items = 1, + .bits_per_pixel = 14, + .regs = imx230_data_fmt_width14, + }, + { + .code = MEDIA_BUS_FMT_SGBRG14_1X14, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = 1, + .bits_per_pixel = 14, + .regs = imx230_data_fmt_width14, + }, +}; + +static const char * const imx132_hdr_types[] = { + "HDR Off", + "HDR Type1", + "HDR Type2", + "HDRC Type3", +}; + +static struct crl_v4l2_ctrl imx230_vl42_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_SCALER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_SCALER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 448, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx230_ana_gain_global_regs), + .regs = imx230_ana_gain_global_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "V4L2_CID_EXPOSURE", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx230_exposure_regs), + .regs = imx230_exposure_regs, + .dep_items = 0, /* FLL is changes automatically */ + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx230_h_flip_regs), + .regs = imx230_h_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .name = "V4L2_CID_VFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx230_v_flip_regs), + .regs = imx230_v_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame length lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 160, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 4130, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx230_fll_regs), + .regs = imx230_fll_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 6024, + .data.std_data.max = 65520, + .data.std_data.step = 1, + .data.std_data.def = 6024, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx230_llp_regs), + .regs = imx230_llp_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_GAIN, + .name = "Digital Gain", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 4095, + .data.std_data.step = 1, + .data.std_data.def = 256, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx230_dig_gain_regs), + .regs = imx230_dig_gain_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_STREAMING, + .ctrl_id = CRL_CID_SENSOR_THERMAL_DATA, + .name = "Sensor Thermal Data", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx230_thermal_regs), + .regs = imx230_thermal_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, /* Cannot be set when streaming? */ + .ctrl_id = CRL_CID_IMX230_HDR_ET_RATIO, + .name = "imx230 HDR ET Ratio", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1, + .data.std_data.max = 16, + .data.std_data.step = 1, + .data.std_data.def = 1, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx230_hdr_et_ratio_regs), + .regs = imx230_hdr_et_ratio_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = CRL_CID_IMX230_HDR_MODE, + .name = "imx230 HDR mode", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.v4l2_menu_items.menu = imx132_hdr_types, + .data.v4l2_menu_items.size = ARRAY_SIZE(imx132_hdr_types), + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = ARRAY_SIZE(imx230_hdr_dep_controls), + .dep_ctrls = imx230_hdr_dep_controls, + .v4l2_type = V4L2_CTRL_TYPE_MENU, + .crl_ctrl_dep_reg_list = ARRAY_SIZE(imx230_hdr_types_regs), + .dep_regs = imx230_hdr_types_regs, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_SENSOR_PDAF, + .name = "CRL_CID_SENSOR_PDAF", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = NULL, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + .crl_ctrl_dep_reg_list = ARRAY_SIZE(imx230_pdaf_ctrl_regs), + .dep_regs = imx230_pdaf_ctrl_regs, + }, +}; + +/* Power items, they are enabled in the order they are listed here */ +static struct crl_power_seq_entity imx230_power_items[] = { + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VANA", + .val = 2500000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VDIG", + .val = 1100000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VIO", + .val = 1800000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VAF", + .val = 3000000, + .delay = 2000, + }, + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 24000000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + .delay = 10700, + }, +}; + +static struct crl_nvm_blob imx230_nvm_blobs[] = { + { 0x54, 0x00, 0x100 }, + { 0x55, 0x00, 0x100 }, + { 0x56, 0x00, 0x021 }, +}; + +static struct crl_arithmetic_ops imx230_frame_desc_width_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .operand.entity_val = CRL_VAR_REF_OUTPUT_WIDTH, + }, +}; + +static struct crl_arithmetic_ops imx230_frame_desc_height_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 4, + }, +}; + +static struct crl_frame_desc imx230_frame_desc[] = { + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(imx230_frame_desc_width_ops), + .ops = imx230_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(imx230_frame_desc_height_ops), + .ops = imx230_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 0, + .csi2_data_type.entity_val = 0x12, + }, +}; + +struct crl_sensor_configuration imx230_crl_configuration = { + + + .power_items = ARRAY_SIZE(imx230_power_items), + .power_entities = imx230_power_items, + + .powerup_regs_items = ARRAY_SIZE(imx230_powerup_regset), + .powerup_regs = imx230_powerup_regset, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + .id_reg_items = ARRAY_SIZE(imx230_sensor_detect_regset), + .id_regs = imx230_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(imx230_sensor_subdevs), + .subdevs = imx230_sensor_subdevs, + + .sensor_limits = &imx230_sensor_limits, + + .pll_config_items = ARRAY_SIZE(imx230_pll_configurations), + .pll_configs = imx230_pll_configurations, + + .modes_items = ARRAY_SIZE(imx230_modes), + .modes = imx230_modes, + .fail_safe_mode_index = 3, + + .streamon_regs_items = ARRAY_SIZE(imx230_streamon_regs), + .streamon_regs = imx230_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(imx230_streamoff_regs), + .streamoff_regs = imx230_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(imx230_vl42_ctrls), + .v4l2_ctrl_bank = imx230_vl42_ctrls, + + .csi_fmts_items = ARRAY_SIZE(imx230_crl_csi_data_fmt), + .csi_fmts = imx230_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(imx230_flip_configurations), + .flip_data = imx230_flip_configurations, + + .crl_nvm_info.nvm_flags = CRL_NVM_ADDR_MODE_8BIT, + .crl_nvm_info.nvm_preop_regs_items = 0, + .crl_nvm_info.nvm_postop_regs_items = 0, + .crl_nvm_info.nvm_blobs_items = ARRAY_SIZE(imx230_nvm_blobs), + .crl_nvm_info.nvm_config = imx230_nvm_blobs, + + .frame_desc_entries = ARRAY_SIZE(imx230_frame_desc), + .frame_desc_type = CRL_V4L2_MBUS_FRAME_DESC_TYPE_CSI2, + .frame_desc = imx230_frame_desc, + + .msr_file_name = "00imx230.bxt_rvp.drvb", +}; + +#endif /* __CRLMODULE_imx230_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_imx274_configuration.h b/drivers/media/i2c/crlmodule/crl_imx274_configuration.h new file mode 100644 index 0000000000000..6ec84fb42a32d --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_imx274_configuration.h @@ -0,0 +1,1272 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2016 - 2018 Intel Corporation + * + * Author: Yuning Pu + * + */ + +#ifndef __CRLMODULE_IMX274_CONFIGURATION_H_ +#define __CRLMODULE_IMX274_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +#define IMX274_REG_STANDBY 0x3000 /* STBLOGIC STBMIPI STBDV */ + +#define IMX274_HMAX 65535 +#define IMX274_VMAX 1048575 +#define IMX274_MAX_SHS1 65535 +#define IMX274_MAX_SHS2 65535 +#define IMX274_MAX_RHS1 65535 + +/* imx274 mode standby cancel sequence */ +static struct crl_register_write_rep imx274_powerup_standby[] = { + {IMX274_REG_STANDBY, CRL_REG_LEN_08BIT, 0x12}, +}; + +/* 1440Mbps for imx274 4K 30fps 1080p 60fps */ +static struct crl_register_write_rep imx274_pll_1440mbps[] = { + {0x3120, CRL_REG_LEN_08BIT, 0xF0}, + {0x3121, CRL_REG_LEN_08BIT, 0x00}, + {0x3122, CRL_REG_LEN_08BIT, 0x02}, + {0x3129, CRL_REG_LEN_08BIT, 0x9C}, + {0x312A, CRL_REG_LEN_08BIT, 0x02}, + {0x312D, CRL_REG_LEN_08BIT, 0x02}, + {0x310B, CRL_REG_LEN_08BIT, 0x00}, /* PLL standby */ + {0x304C, CRL_REG_LEN_08BIT, 0x00}, /* PLSTMG01 */ + {0x304D, CRL_REG_LEN_08BIT, 0x03}, + {0x331C, CRL_REG_LEN_08BIT, 0x1A}, + {0x331D, CRL_REG_LEN_08BIT, 0x00}, + {0x3502, CRL_REG_LEN_08BIT, 0x02}, + {0x3529, CRL_REG_LEN_08BIT, 0x0E}, + {0x352A, CRL_REG_LEN_08BIT, 0x0E}, + {0x352B, CRL_REG_LEN_08BIT, 0x0E}, + {0x3538, CRL_REG_LEN_08BIT, 0x0E}, + {0x3539, CRL_REG_LEN_08BIT, 0x0E}, + {0x3553, CRL_REG_LEN_08BIT, 0x00}, + {0x357D, CRL_REG_LEN_08BIT, 0x05}, + {0x357F, CRL_REG_LEN_08BIT, 0x05}, + {0x3581, CRL_REG_LEN_08BIT, 0x04}, + {0x3583, CRL_REG_LEN_08BIT, 0x76}, + {0x3587, CRL_REG_LEN_08BIT, 0x01}, + {0x35BB, CRL_REG_LEN_08BIT, 0x0E}, + {0x35BC, CRL_REG_LEN_08BIT, 0x0E}, + {0x35BD, CRL_REG_LEN_08BIT, 0x0E}, + {0x35BE, CRL_REG_LEN_08BIT, 0x0E}, + {0x35BF, CRL_REG_LEN_08BIT, 0x0E}, + {0x366E, CRL_REG_LEN_08BIT, 0x00}, + {0x366F, CRL_REG_LEN_08BIT, 0x00}, + {0x3670, CRL_REG_LEN_08BIT, 0x00}, + {0x3671, CRL_REG_LEN_08BIT, 0x00}, /* PLSTMG01 */ + {0x30EE, CRL_REG_LEN_08BIT, 0x01}, + {0x3304, CRL_REG_LEN_08BIT, 0x32}, /* For Mipi */ + {0x3305, CRL_REG_LEN_08BIT, 0x00}, + {0x3306, CRL_REG_LEN_08BIT, 0x32}, + {0x3307, CRL_REG_LEN_08BIT, 0x00}, + {0x3590, CRL_REG_LEN_08BIT, 0x32}, + {0x3591, CRL_REG_LEN_08BIT, 0x00}, + {0x3686, CRL_REG_LEN_08BIT, 0x32}, + {0x3687, CRL_REG_LEN_08BIT, 0x00}, +}; + +static struct crl_register_write_rep imx274_3864_2202_RAW12_NORMAL[] = { + {0x30E2, CRL_REG_LEN_08BIT, 0x00}, /* VCUTMODE */ + {0x3130, CRL_REG_LEN_08BIT, 0xAA}, /* WRITE_VSIZE */ + {0x3131, CRL_REG_LEN_08BIT, 0x08}, + {0x3132, CRL_REG_LEN_08BIT, 0x9A}, /* Y_OUT_SIZE */ + {0x3133, CRL_REG_LEN_08BIT, 0x08}, + {0x3004, CRL_REG_LEN_08BIT, 0x01}, /* MDSEL */ + {0x3005, CRL_REG_LEN_08BIT, 0x07}, + {0x3006, CRL_REG_LEN_08BIT, 0x00}, + {0x3007, CRL_REG_LEN_08BIT, 0x02}, + {0x3A41, CRL_REG_LEN_08BIT, 0x10}, /* MDSEL5 */ + {0x3342, CRL_REG_LEN_08BIT, 0xFF}, /* MDPLS01 */ + {0x3343, CRL_REG_LEN_08BIT, 0x01}, + {0x3344, CRL_REG_LEN_08BIT, 0xFF}, + {0x3345, CRL_REG_LEN_08BIT, 0x01}, + {0x3528, CRL_REG_LEN_08BIT, 0x0F}, /* MDPLS03 */ + {0x3A54, CRL_REG_LEN_08BIT, 0x18}, /* Metadata Size */ + {0x3A55, CRL_REG_LEN_08BIT, 0x0F}, + {0x3554, CRL_REG_LEN_08BIT, 0x00}, + {0x3555, CRL_REG_LEN_08BIT, 0x00}, + {0x3556, CRL_REG_LEN_08BIT, 0x00}, + {0x3557, CRL_REG_LEN_08BIT, 0x00}, + {0x3558, CRL_REG_LEN_08BIT, 0x00}, + {0x3559, CRL_REG_LEN_08BIT, 0x1F}, + {0x355A, CRL_REG_LEN_08BIT, 0x1F}, + {0x35BA, CRL_REG_LEN_08BIT, 0x0F}, + {0x366A, CRL_REG_LEN_08BIT, 0x00}, + {0x366B, CRL_REG_LEN_08BIT, 0x00}, + {0x366C, CRL_REG_LEN_08BIT, 0x00}, + {0x366D, CRL_REG_LEN_08BIT, 0x00}, + {0x33A6, CRL_REG_LEN_08BIT, 0x01}, + {0x306B, CRL_REG_LEN_08BIT, 0x07}, /* MDPLS17 */ + {0x3019, CRL_REG_LEN_08BIT, 0x00}, /* Disable DOL */ +}; + +static struct crl_register_write_rep imx274_3864_2174_RAW10_NORMAL[] = { + {0x30E2, CRL_REG_LEN_08BIT, 0x01}, /* VCUTMODE */ + {0x3130, CRL_REG_LEN_08BIT, 0x86}, /* WRITE_VSIZE */ + {0x3131, CRL_REG_LEN_08BIT, 0x08}, + {0x3132, CRL_REG_LEN_08BIT, 0x7E}, /* Y_OUT_SIZE */ + {0x3133, CRL_REG_LEN_08BIT, 0x08}, + {0x3004, CRL_REG_LEN_08BIT, 0x01}, /* MDSEL */ + {0x3005, CRL_REG_LEN_08BIT, 0x01}, + {0x3006, CRL_REG_LEN_08BIT, 0x00}, + {0x3007, CRL_REG_LEN_08BIT, 0x02}, + {0x3A41, CRL_REG_LEN_08BIT, 0x08}, /* MDSEL5 */ + {0x3342, CRL_REG_LEN_08BIT, 0x0A}, /* MDPLS01 */ + {0x3343, CRL_REG_LEN_08BIT, 0x00}, + {0x3344, CRL_REG_LEN_08BIT, 0x16}, + {0x3345, CRL_REG_LEN_08BIT, 0x00}, + {0x3528, CRL_REG_LEN_08BIT, 0x0E}, /* MDPLS03 */ + {0x3A54, CRL_REG_LEN_08BIT, 0x18}, /* Metadata Size */ + {0x3A55, CRL_REG_LEN_08BIT, 0x0F}, + {0x3554, CRL_REG_LEN_08BIT, 0x1F}, + {0x3555, CRL_REG_LEN_08BIT, 0x01}, + {0x3556, CRL_REG_LEN_08BIT, 0x01}, + {0x3557, CRL_REG_LEN_08BIT, 0x01}, + {0x3558, CRL_REG_LEN_08BIT, 0x01}, + {0x3559, CRL_REG_LEN_08BIT, 0x00}, + {0x355A, CRL_REG_LEN_08BIT, 0x00}, + {0x35BA, CRL_REG_LEN_08BIT, 0x0E}, + {0x366A, CRL_REG_LEN_08BIT, 0x1B}, + {0x366B, CRL_REG_LEN_08BIT, 0x1A}, + {0x366C, CRL_REG_LEN_08BIT, 0x19}, + {0x366D, CRL_REG_LEN_08BIT, 0x17}, + {0x33A6, CRL_REG_LEN_08BIT, 0x01}, + {0x306B, CRL_REG_LEN_08BIT, 0x05}, /* MDPLS17 */ + {0x3019, CRL_REG_LEN_08BIT, 0x00}, /* Disable DOL */ +}; + +static struct crl_register_write_rep imx274_3868_4536_RAW10_DOL[] = { + {0x30E2, CRL_REG_LEN_08BIT, 0x01}, /* VCUTMODE */ + {0x3130, CRL_REG_LEN_08BIT, 0x86}, /* WRITE_VSIZE */ + {0x3131, CRL_REG_LEN_08BIT, 0x08}, + {0x3132, CRL_REG_LEN_08BIT, 0x8E}, /* Y_OUT_SIZE */ + {0x3133, CRL_REG_LEN_08BIT, 0x08}, + {0x3004, CRL_REG_LEN_08BIT, 0x06}, /* MDSEL */ + {0x3005, CRL_REG_LEN_08BIT, 0x01}, + {0x3006, CRL_REG_LEN_08BIT, 0x00}, + {0x3007, CRL_REG_LEN_08BIT, 0x02}, + {0x3A41, CRL_REG_LEN_08BIT, 0x00}, /* MDSEL5 */ + {0x3342, CRL_REG_LEN_08BIT, 0x0A}, /* MDPLS01 */ + {0x3343, CRL_REG_LEN_08BIT, 0x00}, + {0x3344, CRL_REG_LEN_08BIT, 0x16}, + {0x3345, CRL_REG_LEN_08BIT, 0x00}, + {0x3528, CRL_REG_LEN_08BIT, 0x0E}, /* MDPLS03 */ + {0x3A54, CRL_REG_LEN_08BIT, 0x1C}, /* Metadata Size */ + {0x3A55, CRL_REG_LEN_08BIT, 0x0F}, + {0x3554, CRL_REG_LEN_08BIT, 0x1F}, + {0x3555, CRL_REG_LEN_08BIT, 0x01}, + {0x3556, CRL_REG_LEN_08BIT, 0x01}, + {0x3557, CRL_REG_LEN_08BIT, 0x01}, + {0x3558, CRL_REG_LEN_08BIT, 0x01}, + {0x3559, CRL_REG_LEN_08BIT, 0x00}, + {0x355A, CRL_REG_LEN_08BIT, 0x00}, + {0x35BA, CRL_REG_LEN_08BIT, 0x0E}, + {0x366A, CRL_REG_LEN_08BIT, 0x1B}, + {0x366B, CRL_REG_LEN_08BIT, 0x1A}, + {0x366C, CRL_REG_LEN_08BIT, 0x19}, + {0x366D, CRL_REG_LEN_08BIT, 0x17}, + {0x33A6, CRL_REG_LEN_08BIT, 0x01}, + {0x306B, CRL_REG_LEN_08BIT, 0x05}, /* MDPLS17 */ + /* DOL mode settings */ + {0x3019, CRL_REG_LEN_08BIT, 0x01}, /* DOLMODE,DOLSCDEN,HINFOEN */ + {0x3041, CRL_REG_LEN_08BIT, 0x31}, /* DOLSET1 */ + {0x3042, CRL_REG_LEN_08BIT, 0x04}, /* HCYCLE */ + {0x3043, CRL_REG_LEN_08BIT, 0x01}, + {0x30E9, CRL_REG_LEN_08BIT, 0x01}, /* DOLSET2 */ +}; + +static struct crl_register_write_rep imx274_1932_1094_RAW10_NORMAL[] = { + {0x30E2, CRL_REG_LEN_08BIT, 0x02}, /* VCUTMODE */ + {0x3130, CRL_REG_LEN_08BIT, 0x4E}, /* WRITE_VSIZE */ + {0x3131, CRL_REG_LEN_08BIT, 0x04}, + {0x3132, CRL_REG_LEN_08BIT, 0x46}, /* Y_OUT_SIZE */ + {0x3133, CRL_REG_LEN_08BIT, 0x04}, + {0x3004, CRL_REG_LEN_08BIT, 0x02}, /* MDSEL */ + {0x3005, CRL_REG_LEN_08BIT, 0x21}, + {0x3006, CRL_REG_LEN_08BIT, 0x00}, + {0x3007, CRL_REG_LEN_08BIT, 0x11}, + {0x3A41, CRL_REG_LEN_08BIT, 0x08}, /* MDSEL5 */ + {0x3342, CRL_REG_LEN_08BIT, 0x0A}, /* MDPLS01 */ + {0x3343, CRL_REG_LEN_08BIT, 0x00}, + {0x3344, CRL_REG_LEN_08BIT, 0x1A}, + {0x3345, CRL_REG_LEN_08BIT, 0x00}, + {0x3528, CRL_REG_LEN_08BIT, 0x0E}, /* MDPLS03 */ + {0x3A54, CRL_REG_LEN_08BIT, 0x8C}, /* Metadata Size */ + {0x3A55, CRL_REG_LEN_08BIT, 0x07}, + {0x3554, CRL_REG_LEN_08BIT, 0x00}, + {0x3555, CRL_REG_LEN_08BIT, 0x01}, + {0x3556, CRL_REG_LEN_08BIT, 0x01}, + {0x3557, CRL_REG_LEN_08BIT, 0x01}, + {0x3558, CRL_REG_LEN_08BIT, 0x01}, + {0x3559, CRL_REG_LEN_08BIT, 0x00}, + {0x355A, CRL_REG_LEN_08BIT, 0x00}, + {0x35BA, CRL_REG_LEN_08BIT, 0x0E}, + {0x366A, CRL_REG_LEN_08BIT, 0x1B}, + {0x366B, CRL_REG_LEN_08BIT, 0x1A}, + {0x366C, CRL_REG_LEN_08BIT, 0x19}, + {0x366D, CRL_REG_LEN_08BIT, 0x17}, + {0x33A6, CRL_REG_LEN_08BIT, 0x01}, + {0x306B, CRL_REG_LEN_08BIT, 0x05}, /* MDPLS17 */ + {0x3019, CRL_REG_LEN_08BIT, 0x00}, /* Disable DOL */ +}; + +static struct crl_register_write_rep imx274_1932_1094_RAW12_NORMAL[] = { + {0x30E2, CRL_REG_LEN_08BIT, 0x02}, /* VCUTMODE */ + {0x3130, CRL_REG_LEN_08BIT, 0x4E}, /* WRITE_VSIZE */ + {0x3131, CRL_REG_LEN_08BIT, 0x04}, + {0x3132, CRL_REG_LEN_08BIT, 0x46}, /* Y_OUT_SIZE */ + {0x3133, CRL_REG_LEN_08BIT, 0x04}, + {0x3004, CRL_REG_LEN_08BIT, 0x02}, /* MDSEL */ + {0x3005, CRL_REG_LEN_08BIT, 0x27}, + {0x3006, CRL_REG_LEN_08BIT, 0x00}, + {0x3007, CRL_REG_LEN_08BIT, 0x11}, + {0x3A41, CRL_REG_LEN_08BIT, 0x08}, /* MDSEL5 */ + {0x3342, CRL_REG_LEN_08BIT, 0xFF}, /* MDPLS01 */ + {0x3343, CRL_REG_LEN_08BIT, 0x01}, + {0x3344, CRL_REG_LEN_08BIT, 0xFF}, + {0x3345, CRL_REG_LEN_08BIT, 0x01}, + {0x3528, CRL_REG_LEN_08BIT, 0x0F}, /* MDPLS03 */ + {0x3A54, CRL_REG_LEN_08BIT, 0x8C}, /* Metadata Size */ + {0x3A55, CRL_REG_LEN_08BIT, 0x07}, + {0x3554, CRL_REG_LEN_08BIT, 0x00}, + {0x3555, CRL_REG_LEN_08BIT, 0x00}, + {0x3556, CRL_REG_LEN_08BIT, 0x00}, + {0x3557, CRL_REG_LEN_08BIT, 0x00}, + {0x3558, CRL_REG_LEN_08BIT, 0x00}, + {0x3559, CRL_REG_LEN_08BIT, 0x1F}, + {0x355A, CRL_REG_LEN_08BIT, 0x1F}, + {0x35BA, CRL_REG_LEN_08BIT, 0x0F}, + {0x366A, CRL_REG_LEN_08BIT, 0x00}, + {0x366B, CRL_REG_LEN_08BIT, 0x00}, + {0x366C, CRL_REG_LEN_08BIT, 0x00}, + {0x366D, CRL_REG_LEN_08BIT, 0x00}, + {0x33A6, CRL_REG_LEN_08BIT, 0x01}, + {0x306B, CRL_REG_LEN_08BIT, 0x07}, /* MDPLS17 */ + {0x3019, CRL_REG_LEN_08BIT, 0x00}, /* Disable DOL */ +}; + +static struct crl_register_write_rep imx274_1936_2376_RAW10_DOL[] = { + {0x30E2, CRL_REG_LEN_08BIT, 0x02}, /* VCUTMODE */ + {0x3130, CRL_REG_LEN_08BIT, 0x4E}, /* WRITE_VSIZE */ + {0x3131, CRL_REG_LEN_08BIT, 0x04}, + {0x3132, CRL_REG_LEN_08BIT, 0x54}, /* Y_OUT_SIZE */ + {0x3133, CRL_REG_LEN_08BIT, 0x04}, + {0x3004, CRL_REG_LEN_08BIT, 0x07}, /* MDSEL */ + {0x3005, CRL_REG_LEN_08BIT, 0x21}, + {0x3006, CRL_REG_LEN_08BIT, 0x00}, + {0x3007, CRL_REG_LEN_08BIT, 0x11}, + {0x3A41, CRL_REG_LEN_08BIT, 0x08}, /* MDSEL5 */ + {0x3342, CRL_REG_LEN_08BIT, 0x0A}, /* MDPLS01 */ + {0x3343, CRL_REG_LEN_08BIT, 0x00}, + {0x3344, CRL_REG_LEN_08BIT, 0x1A}, + {0x3345, CRL_REG_LEN_08BIT, 0x00}, + {0x3528, CRL_REG_LEN_08BIT, 0x0E}, /* MDPLS03 */ + {0x3A54, CRL_REG_LEN_08BIT, 0x90}, /* Metadata Size */ + {0x3A55, CRL_REG_LEN_08BIT, 0x07}, + {0x3554, CRL_REG_LEN_08BIT, 0x00}, + {0x3555, CRL_REG_LEN_08BIT, 0x01}, + {0x3556, CRL_REG_LEN_08BIT, 0x01}, + {0x3557, CRL_REG_LEN_08BIT, 0x01}, + {0x3558, CRL_REG_LEN_08BIT, 0x01}, + {0x3559, CRL_REG_LEN_08BIT, 0x00}, + {0x355A, CRL_REG_LEN_08BIT, 0x00}, + {0x35BA, CRL_REG_LEN_08BIT, 0x0E}, + {0x366A, CRL_REG_LEN_08BIT, 0x1B}, + {0x366B, CRL_REG_LEN_08BIT, 0x1A}, + {0x366C, CRL_REG_LEN_08BIT, 0x19}, + {0x366D, CRL_REG_LEN_08BIT, 0x17}, + {0x33A6, CRL_REG_LEN_08BIT, 0x01}, + {0x306B, CRL_REG_LEN_08BIT, 0x05}, /* MDPLS17 */ + /* DOL mode settings */ + {0x3019, CRL_REG_LEN_08BIT, 0x01}, /* DOLMODE,DOLSCDEN,HINFOEN */ + {0x3041, CRL_REG_LEN_08BIT, 0x31}, /* DOLSET1 */ + {0x3042, CRL_REG_LEN_08BIT, 0x04}, /* HCYCLE */ + {0x3043, CRL_REG_LEN_08BIT, 0x01}, + {0x30E9, CRL_REG_LEN_08BIT, 0x01}, /* DOLSET2 */ +}; + +static struct crl_register_write_rep imx274_1288_738_RAW10_NORMAL[] = { + {0x30E2, CRL_REG_LEN_08BIT, 0x03}, /* VCUTMODE */ + {0x3130, CRL_REG_LEN_08BIT, 0xE2}, /* WRITE_VSIZE */ + {0x3131, CRL_REG_LEN_08BIT, 0x02}, + {0x3132, CRL_REG_LEN_08BIT, 0xDE}, /* Y_OUT_SIZE */ + {0x3133, CRL_REG_LEN_08BIT, 0x02}, + {0x3004, CRL_REG_LEN_08BIT, 0x03}, /* MDSEL */ + {0x3005, CRL_REG_LEN_08BIT, 0x31}, + {0x3006, CRL_REG_LEN_08BIT, 0x00}, + {0x3007, CRL_REG_LEN_08BIT, 0x09}, + {0x3A41, CRL_REG_LEN_08BIT, 0x04}, /* MDSEL5 */ + {0x3342, CRL_REG_LEN_08BIT, 0x0A}, /* MDPLS01 */ + {0x3343, CRL_REG_LEN_08BIT, 0x00}, + {0x3344, CRL_REG_LEN_08BIT, 0x1B}, + {0x3345, CRL_REG_LEN_08BIT, 0x00}, + {0x3528, CRL_REG_LEN_08BIT, 0x0E}, /* MDPLS03 */ + {0x3A54, CRL_REG_LEN_08BIT, 0x8C}, /* Metadata Size */ + {0x3A55, CRL_REG_LEN_08BIT, 0x00}, + {0x3554, CRL_REG_LEN_08BIT, 0x00}, + {0x3555, CRL_REG_LEN_08BIT, 0x01}, + {0x3556, CRL_REG_LEN_08BIT, 0x01}, + {0x3557, CRL_REG_LEN_08BIT, 0x01}, + {0x3558, CRL_REG_LEN_08BIT, 0x01}, + {0x3559, CRL_REG_LEN_08BIT, 0x00}, + {0x355A, CRL_REG_LEN_08BIT, 0x00}, + {0x35BA, CRL_REG_LEN_08BIT, 0x0E}, + {0x366A, CRL_REG_LEN_08BIT, 0x1B}, + {0x366B, CRL_REG_LEN_08BIT, 0x19}, + {0x366C, CRL_REG_LEN_08BIT, 0x17}, + {0x366D, CRL_REG_LEN_08BIT, 0x17}, + {0x33A6, CRL_REG_LEN_08BIT, 0x01}, + {0x306B, CRL_REG_LEN_08BIT, 0x05}, /* MDPLS17 */ + {0x3019, CRL_REG_LEN_08BIT, 0x00}, /* Disable DOL */ +}; + +static struct crl_register_write_rep imx274_streamon_regs[] = { + {0x00, CRL_REG_LEN_DELAY, 10, 0x00}, /* Add a pre 10ms delay */ + {IMX274_REG_STANDBY, CRL_REG_LEN_08BIT, 0x00}, + {0x303E, CRL_REG_LEN_08BIT, 0x02}, + {0x00, CRL_REG_LEN_DELAY, 7, 0x00}, /* Add a 7ms delay */ + {0x30F4, CRL_REG_LEN_08BIT, 0x00}, + {0x3018, CRL_REG_LEN_08BIT, 0x02}, +}; + +static struct crl_register_write_rep imx274_streamoff_regs[] = { + {0x00, CRL_REG_LEN_DELAY, 10, 0x00}, /* Add a pre 10ms delay */ + {IMX274_REG_STANDBY, CRL_REG_LEN_08BIT, 0x01}, + {0x303E, CRL_REG_LEN_08BIT, 0x02}, + {0x00, CRL_REG_LEN_DELAY, 7, 0x00}, /* Add a delay */ + {0x30F4, CRL_REG_LEN_08BIT, 0x01}, + {0x3018, CRL_REG_LEN_08BIT, 0x02}, +}; + +static struct crl_arithmetic_ops imx274_rshift8_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 8, + } +}; + +static struct crl_arithmetic_ops imx274_rshift16_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 16, + } +}; + +static struct crl_arithmetic_ops imx274_nan_gain_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 8, + }, + { + .op = CRL_BITWISE_AND, + .operand.entity_val = 0x07, + } +}; + +/* imx274 use register PGC[10:0] 300A 300B to indicate analog gain */ +static struct crl_dynamic_register_access imx274_ana_gain_global_regs[] = { + { + .address = 0x300A, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + }, + { + .address = 0x300B, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx274_nan_gain_ops), + .ops = imx274_nan_gain_ops, + }, +}; + +static struct crl_dynamic_register_access imx274_dig_gain_regs[] = { + { + .address = 0x3012, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xf, + }, +}; + +/* shr = fll - exposure */ +static struct crl_arithmetic_ops imx274_shr_lsb_ops[] = { + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, + .operand.entity_val = V4L2_CID_FRAME_LENGTH_LINES, + } +}; + +static struct crl_arithmetic_ops imx274_shr_msb_ops[] = { + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, + .operand.entity_val = V4L2_CID_FRAME_LENGTH_LINES, + }, + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 8, + } +}; + +static struct crl_dynamic_register_access imx274_shr_regs[] = { + { + .address = 0x300C, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx274_shr_lsb_ops), + .ops = imx274_shr_lsb_ops, + .mask = 0xff, + }, + { + .address = 0x300D, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx274_shr_msb_ops), + .ops = imx274_shr_msb_ops, + .mask = 0xff, + }, +}; + +/* Short exposure for DOL */ +static struct crl_dynamic_register_access imx274_shs1_regs[] = { + { + .address = 0x302E, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, + { + .address = 0x302F, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx274_rshift8_ops), + .ops = imx274_rshift8_ops, + .mask = 0xff, + }, +}; + +/* Long exposure for DOL */ +static struct crl_dynamic_register_access imx274_shs2_regs[] = { + { + .address = 0x3030, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, + { + .address = 0x3031, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx274_rshift8_ops), + .ops = imx274_rshift8_ops, + .mask = 0xff, + }, +}; + +static struct crl_dynamic_register_access imx274_rhs1_regs[] = { + { + .address = 0x3032, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, + { + .address = 0x3033, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx274_rshift8_ops), + .ops = imx274_rshift8_ops, + .mask = 0xff, + }, +}; + +static struct crl_dynamic_register_access imx274_fll_regs[] = { + /* + * Use 8bits access since 24bits or 32bits access will fail + * TODO: root cause the 24bits and 32bits access issues + */ + { + .address = 0x30F8, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, + { + .address = 0x30F9, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx274_rshift8_ops), + .ops = imx274_rshift8_ops, + .mask = 0xff, + }, + { + .address = 0x30FA, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx274_rshift16_ops), + .ops = imx274_rshift16_ops, + .mask = 0xf, + }, +}; + +static struct crl_dynamic_register_access imx274_llp_regs[] = { + { + .address = 0x30F6, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, + { + .address = 0x30F7, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx274_rshift8_ops), + .ops = imx274_rshift8_ops, + .mask = 0xff, + }, +}; + +static struct crl_sensor_detect_config imx274_sensor_detect_regset[] = { + { + .reg = { 0x30F8, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, + { + .reg = { 0x30F9, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, +}; + +static struct crl_pll_configuration imx274_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 720000000, /* 1440000000/2 */ + .bitsperpixel = 10, + .pixel_rate_csi = 72000000, + .pixel_rate_pa = 72000000, /* 72MHz */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx274_pll_1440mbps), + .pll_regs = imx274_pll_1440mbps, + }, + { + .input_clk = 24000000, + .op_sys_clk = 720000000, /* 1440000000/2 */ + .bitsperpixel = 12, + .pixel_rate_csi = 72000000, + .pixel_rate_pa = 72000000, /* 72MHz */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx274_pll_1440mbps), + .pll_regs = imx274_pll_1440mbps, + } +}; + +static struct crl_subdev_rect_rep imx274_3864_2202_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3868, + .in_rect.height = 4536, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3868, + .out_rect.height = 4536, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3868, + .in_rect.height = 4536, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3864, + .out_rect.height = 2202, + } +}; + +static struct crl_subdev_rect_rep imx274_3864_2174_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3868, + .in_rect.height = 4536, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3868, + .out_rect.height = 4536, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3868, + .in_rect.height = 4536, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3864, + .out_rect.height = 2174, + } +}; + +/* DOL pixel array includes 4 pixel sync code each line */ +static struct crl_subdev_rect_rep imx274_3868_4536_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3868, + .in_rect.height = 4536, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3868, + .out_rect.height = 4536, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3868, + .in_rect.height = 4536, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3868, + .out_rect.height = 4536, + } +}; + +static struct crl_subdev_rect_rep imx274_1932_1094_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3868, + .in_rect.height = 4536, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3868, + .out_rect.height = 4536, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3868, + .in_rect.height = 4536, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1932, + .out_rect.height = 1094, + } +}; + +/* DOL pixel array includes 4 pixel sync code each line */ +static struct crl_subdev_rect_rep imx274_1936_2376_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3868, + .in_rect.height = 4536, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3868, + .out_rect.height = 4536, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3868, + .in_rect.height = 4536, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1936, + .out_rect.height = 2376, + } +}; + +static struct crl_subdev_rect_rep imx274_1288_738_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3868, + .in_rect.height = 4536, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3868, + .out_rect.height = 4536, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3868, + .in_rect.height = 4536, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1288, + .out_rect.height = 738, + } +}; + +static struct crl_mode_rep imx274_modes[] = { + { + /* mode 0 12bit all pixel scan per datasheet */ + .sd_rects_items = ARRAY_SIZE(imx274_3864_2202_rects), + .sd_rects = imx274_3864_2202_rects, + .binn_hor = 1, + .binn_vert = 2, + .scale_m = 1, + .width = 3864, + .height = 2202, + .min_llp = 493, /* 01EDh */ + .min_fll = 4868, /* default 30fps */ + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = + ARRAY_SIZE(imx274_3864_2202_RAW12_NORMAL), + .mode_regs = imx274_3864_2202_RAW12_NORMAL, + }, + { + /* mode 1 10bit all pixel scan per datasheet */ + .sd_rects_items = ARRAY_SIZE(imx274_3864_2174_rects), + .sd_rects = imx274_3864_2174_rects, + .binn_hor = 1, + .binn_vert = 2, + .scale_m = 1, + .width = 3864, + .height = 2174, + .min_llp = 493, /* 01EDh */ + .min_fll = 4868, /* default 30fps */ + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = + ARRAY_SIZE(imx274_3864_2174_RAW10_NORMAL), + .mode_regs = imx274_3864_2174_RAW10_NORMAL, + }, + { + /* mode 1 DOL 10bit per datasheet */ + .sd_rects_items = ARRAY_SIZE(imx274_3868_4536_rects), + .sd_rects = imx274_3868_4536_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 3868, + .height = 4536, /* 2*(2160+22+VBP) */ + .min_llp = 1052, /* 041Ch */ + .min_fll = 2281, /* 30fps */ + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = + ARRAY_SIZE(imx274_3868_4536_RAW10_DOL), + .mode_regs = imx274_3868_4536_RAW10_DOL, + }, + { + /* mode 3 10bit all pixel scan per datasheet */ + .sd_rects_items = ARRAY_SIZE(imx274_1932_1094_rects), + .sd_rects = imx274_1932_1094_rects, + .binn_hor = 2, + .binn_vert = 4, + .scale_m = 1, + .width = 1932, + .height = 1094, + .min_llp = 493, /* 01EDh */ + .min_fll = 4868, /* default 30fps */ + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE( + imx274_1932_1094_RAW10_NORMAL), + .mode_regs = imx274_1932_1094_RAW10_NORMAL, + }, + { + /* mode 3 12bit all pixel scan per datasheet */ + .sd_rects_items = ARRAY_SIZE(imx274_1932_1094_rects), + .sd_rects = imx274_1932_1094_rects, + .binn_hor = 2, + .binn_vert = 4, + .scale_m = 1, + .width = 1932, + .height = 1094, + .min_llp = 493, /* 01EDh */ + .min_fll = 4868, /* default 30fps */ + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE( + imx274_1932_1094_RAW12_NORMAL), + .mode_regs = imx274_1932_1094_RAW12_NORMAL, + }, + { + /* mode 3 DOL bit10 per datasheet */ + .sd_rects_items = ARRAY_SIZE(imx274_1936_2376_rects), + .sd_rects = imx274_1936_2376_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1936, + .height = 2376, + .min_llp = 1052, /* 041Ch */ + .min_fll = 2281, /* 30fps */ + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE( + imx274_1936_2376_RAW10_DOL), + .mode_regs = imx274_1936_2376_RAW10_DOL, + }, + { + /* mode 5 bit10 per datasheet */ + .sd_rects_items = ARRAY_SIZE(imx274_1288_738_rects), + .sd_rects = imx274_1288_738_rects, + .binn_hor = 3, + .binn_vert = 6, + .scale_m = 1, + .width = 1288, + .height = 738, + .min_llp = 260, + .min_fll = 2310, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE( + imx274_1288_738_RAW10_NORMAL), + .mode_regs = imx274_1288_738_RAW10_NORMAL, + }, +}; + +struct crl_sensor_subdev_config imx274_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "imx274 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "imx274 pixel array", + } +}; + +static struct crl_sensor_limits imx274_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 3868, /* pixel area length and width */ + .y_addr_max = 4536, + .min_frame_length_lines = 1111, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 260, + .max_line_length_pixels = 32752, +}; + +static struct crl_flip_data imx274_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + } +}; + +static struct crl_csi_data_fmt imx274_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, /* default order */ + .bits_per_pixel = 10, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 10, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .bits_per_pixel = 10, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SGRBG12_1X12, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SRGGB12_1X12, + .pixel_order = CRL_PIXEL_ORDER_RGGB, /* default order */ + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SBGGR12_1X12, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SGBRG12_1X12, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + } +}; + +static struct crl_v4l2_ctrl imx274_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame length lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1111, + .data.std_data.max = IMX274_VMAX, + .data.std_data.step = 1, + .data.std_data.def = 1111, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx274_fll_regs), + .regs = imx274_fll_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 260, + .data.std_data.max = IMX274_HMAX, + .data.std_data.step = 1, + .data.std_data.def = 260, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx274_llp_regs), + .regs = imx274_llp_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_GAIN, + .name = "Digital Gain", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 6, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx274_dig_gain_regs), + .regs = imx274_dig_gain_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0x7A5, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx274_ana_gain_global_regs), + .regs = imx274_ana_gain_global_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "V4L2_CID_EXPOSURE", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 4, + .data.std_data.max = IMX274_MAX_SHS2, + .data.std_data.step = 1, + .data.std_data.def = 0x400, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx274_shr_regs), + .regs = imx274_shr_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_SHS1, + .name = "CRL_CID_EXPOSURE_SHS1", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 4, + .data.std_data.max = IMX274_MAX_SHS1, + .data.std_data.step = 1, + .data.std_data.def = 0x06, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx274_shs1_regs), + .regs = imx274_shs1_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_SHS2, + .name = "CRL_CID_EXPOSURE_SHS2", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 10, + .data.std_data.max = IMX274_MAX_SHS2, + .data.std_data.step = 1, + .data.std_data.def = 0x2d, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx274_shs2_regs), + .regs = imx274_shs2_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_RHS1, + .name = "CRL_CID_EXPOSURE_RHS1", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 6, + .data.std_data.max = IMX274_MAX_RHS1, + .data.std_data.step = 1, + .data.std_data.def = 0x56, /* Fixed to 86 by default */ + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx274_rhs1_regs), + .regs = imx274_rhs1_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_SENSOR_MODE, + .name = "CRL_CID_SENSOR_MODE", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = ARRAY_SIZE(imx274_modes) - 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_MODE_SELECTION, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, +}; + +static struct crl_arithmetic_ops imx274_frame_desc_width_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .operand.entity_val = CRL_VAR_REF_OUTPUT_WIDTH, + }, +}; + +static struct crl_arithmetic_ops imx274_frame_desc_height_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, +}; + +static struct crl_frame_desc imx274_frame_desc[] = { + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(imx274_frame_desc_width_ops), + .ops = imx274_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(imx274_frame_desc_height_ops), + .ops = imx274_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 0, + .csi2_data_type.entity_val = 0x12, + }, +}; + +static struct crl_power_seq_entity imx274_power_items[] = { + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 24000000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + }, +}; + +struct crl_sensor_configuration imx274_crl_configuration = { + + .power_items = ARRAY_SIZE(imx274_power_items), + .power_entities = imx274_power_items, + + .powerup_regs_items = ARRAY_SIZE(imx274_powerup_standby), + .powerup_regs = imx274_powerup_standby, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + .id_reg_items = ARRAY_SIZE(imx274_sensor_detect_regset), + .id_regs = imx274_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(imx274_sensor_subdevs), + .subdevs = imx274_sensor_subdevs, + + .sensor_limits = &imx274_sensor_limits, + + .pll_config_items = ARRAY_SIZE(imx274_pll_configurations), + .pll_configs = imx274_pll_configurations, + + .modes_items = ARRAY_SIZE(imx274_modes), + .modes = imx274_modes, + + .streamon_regs_items = ARRAY_SIZE(imx274_streamon_regs), + .streamon_regs = imx274_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(imx274_streamoff_regs), + .streamoff_regs = imx274_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(imx274_v4l2_ctrls), + .v4l2_ctrl_bank = imx274_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(imx274_crl_csi_data_fmt), + .csi_fmts = imx274_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(imx274_flip_configurations), + .flip_data = imx274_flip_configurations, + + .frame_desc_entries = ARRAY_SIZE(imx274_frame_desc), + .frame_desc_type = CRL_V4L2_MBUS_FRAME_DESC_TYPE_CSI2, + .frame_desc = imx274_frame_desc, + +}; + +#endif /* __CRLMODULE_IMX274_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_imx290_configuration.h b/drivers/media/i2c/crlmodule/crl_imx290_configuration.h new file mode 100644 index 0000000000000..6a3561bea27fe --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_imx290_configuration.h @@ -0,0 +1,1078 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2017 - 2018 Intel Corporation + * + * Author: Yuning Pu + * + */ + +#ifndef __CRLMODULE_IMX290_CONFIGURATION_H_ +#define __CRLMODULE_IMX290_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +#define IMX290_REG_STANDBY 0x3000 +#define IMX290_REG_XMSTA 0x3002 + +#define IMX290_HMAX 65535 +#define IMX290_VMAX 131071 +#define IMX290_MAX_SHS1 (IMX290_VMAX - 2) + +static struct crl_register_write_rep imx290_pll_891mbps[] = { + {0x3405, CRL_REG_LEN_08BIT, 0x00}, /* repetition */ + {0x3407, CRL_REG_LEN_08BIT, 0x03}, /* physical lane num(fixed) */ + {0x3009, CRL_REG_LEN_08BIT, 0x00}, /* FRSEL FDG_SEL */ + {0x300F, CRL_REG_LEN_08BIT, 0x00}, /* fixed setting */ + {0x3010, CRL_REG_LEN_08BIT, 0x21}, + {0x3012, CRL_REG_LEN_08BIT, 0x64}, + {0x3414, CRL_REG_LEN_08BIT, 0x0A}, + {0x3415, CRL_REG_LEN_08BIT, 0x00}, + {0x3016, CRL_REG_LEN_08BIT, 0x09}, /* changed */ + {0x3119, CRL_REG_LEN_08BIT, 0x9E}, + {0x311C, CRL_REG_LEN_08BIT, 0x1E}, + {0x311E, CRL_REG_LEN_08BIT, 0x08}, + {0x3128, CRL_REG_LEN_08BIT, 0x05}, + {0x332C, CRL_REG_LEN_08BIT, 0xD3}, + {0x332D, CRL_REG_LEN_08BIT, 0x10}, + {0x332E, CRL_REG_LEN_08BIT, 0x0D}, + {0x313D, CRL_REG_LEN_08BIT, 0x83}, + {0x3443, CRL_REG_LEN_08BIT, 0x03}, /* csi_lane_mode(fixed) */ + {0x3444, CRL_REG_LEN_08BIT, 0x20}, /* extck_freq */ + {0x3445, CRL_REG_LEN_08BIT, 0x25}, + {0x3446, CRL_REG_LEN_08BIT, 0x77}, /* tclkpost */ + {0x3447, CRL_REG_LEN_08BIT, 0x00}, + {0x3448, CRL_REG_LEN_08BIT, 0x67}, /* thszero */ + {0x3449, CRL_REG_LEN_08BIT, 0x00}, + {0x344A, CRL_REG_LEN_08BIT, 0x47}, /* thsprepare */ + {0x344B, CRL_REG_LEN_08BIT, 0x00}, + {0x344C, CRL_REG_LEN_08BIT, 0x37}, /* thstrail */ + {0x344D, CRL_REG_LEN_08BIT, 0x00}, + {0x344E, CRL_REG_LEN_08BIT, 0x3F}, /* thstrail */ + {0x344F, CRL_REG_LEN_08BIT, 0x00}, + {0x3150, CRL_REG_LEN_08BIT, 0x03}, + {0x3450, CRL_REG_LEN_08BIT, 0xFF}, /* tclkzero */ + {0x3451, CRL_REG_LEN_08BIT, 0x00}, + {0x3452, CRL_REG_LEN_08BIT, 0x3F}, /* tclkprepare */ + {0x3453, CRL_REG_LEN_08BIT, 0x00}, + {0x3454, CRL_REG_LEN_08BIT, 0x37}, /* tlpx */ + {0x3455, CRL_REG_LEN_08BIT, 0x00}, + {0x3358, CRL_REG_LEN_08BIT, 0x06}, /* fixed setting */ + {0x3359, CRL_REG_LEN_08BIT, 0xE1}, + {0x335A, CRL_REG_LEN_08BIT, 0x11}, + {0x305C, CRL_REG_LEN_08BIT, 0x18}, /* incksel1 */ + {0x305D, CRL_REG_LEN_08BIT, 0x03}, /* incksel2 */ + {0x305E, CRL_REG_LEN_08BIT, 0x20}, /* incksel3 */ + {0x315E, CRL_REG_LEN_08BIT, 0x1A}, /* incksel5 */ + {0x305F, CRL_REG_LEN_08BIT, 0x01}, /* incksel4 */ + {0x3360, CRL_REG_LEN_08BIT, 0x1E}, + {0x3361, CRL_REG_LEN_08BIT, 0x61}, + {0x3362, CRL_REG_LEN_08BIT, 0x10}, + {0x3164, CRL_REG_LEN_08BIT, 0x1A}, /* incksel6 */ + {0x3070, CRL_REG_LEN_08BIT, 0x02}, + {0x3071, CRL_REG_LEN_08BIT, 0x11}, + {0x317E, CRL_REG_LEN_08BIT, 0x00}, + {0x3480, CRL_REG_LEN_08BIT, 0x49}, /* inclsel7 */ + {0x309B, CRL_REG_LEN_08BIT, 0x10}, + {0x309C, CRL_REG_LEN_08BIT, 0x22}, + {0x30A2, CRL_REG_LEN_08BIT, 0x02}, + {0x30A6, CRL_REG_LEN_08BIT, 0x20}, + {0x30A8, CRL_REG_LEN_08BIT, 0x20}, + {0x30AA, CRL_REG_LEN_08BIT, 0x20}, + {0x30AC, CRL_REG_LEN_08BIT, 0x20}, + {0x30B0, CRL_REG_LEN_08BIT, 0x43}, + {0x33B0, CRL_REG_LEN_08BIT, 0x50}, + {0x33B2, CRL_REG_LEN_08BIT, 0x1A}, + {0x33B3, CRL_REG_LEN_08BIT, 0x04}, + {0x32B8, CRL_REG_LEN_08BIT, 0x50}, + {0x32B9, CRL_REG_LEN_08BIT, 0x10}, + {0x32BA, CRL_REG_LEN_08BIT, 0x00}, + {0x32BB, CRL_REG_LEN_08BIT, 0x04}, + {0x32C8, CRL_REG_LEN_08BIT, 0x50}, + {0x32C9, CRL_REG_LEN_08BIT, 0x10}, + {0x32CA, CRL_REG_LEN_08BIT, 0x00}, + {0x32CB, CRL_REG_LEN_08BIT, 0x04}, +}; + +/* 445Mbps for imx290 1080p 30fps */ +static struct crl_register_write_rep imx290_pll_445mbps[] = { + {0x3405, CRL_REG_LEN_08BIT, 0x20}, /* repetition */ + {0x3407, CRL_REG_LEN_08BIT, 0x03}, /* physical lane num(fixed) */ + {0x3009, CRL_REG_LEN_08BIT, 0x02}, /* FRSEL FDG_SEL */ + {0x300F, CRL_REG_LEN_08BIT, 0x00}, /* fixed setting */ + {0x3010, CRL_REG_LEN_08BIT, 0x21}, + {0x3012, CRL_REG_LEN_08BIT, 0x64}, + {0x3414, CRL_REG_LEN_08BIT, 0x0A}, + {0x3016, CRL_REG_LEN_08BIT, 0x09}, /* changed */ + {0x3119, CRL_REG_LEN_08BIT, 0x9E}, + {0x311C, CRL_REG_LEN_08BIT, 0x1E}, + {0x311E, CRL_REG_LEN_08BIT, 0x08}, + {0x3128, CRL_REG_LEN_08BIT, 0x05}, + {0x332C, CRL_REG_LEN_08BIT, 0xD3}, + {0x332D, CRL_REG_LEN_08BIT, 0x10}, + {0x332E, CRL_REG_LEN_08BIT, 0x0D}, + {0x313D, CRL_REG_LEN_08BIT, 0x83}, + {0x3443, CRL_REG_LEN_08BIT, 0x03}, /* csi_lane_mode(fixed) */ + {0x3444, CRL_REG_LEN_08BIT, 0x20}, /* extck_freq */ + {0x3445, CRL_REG_LEN_08BIT, 0x25}, + {0x3446, CRL_REG_LEN_08BIT, 0x47}, /* tclkpost */ + {0x3447, CRL_REG_LEN_08BIT, 0x00}, + {0x3448, CRL_REG_LEN_08BIT, 0x1F}, /* thszero */ + {0x3449, CRL_REG_LEN_08BIT, 0x00}, + {0x344A, CRL_REG_LEN_08BIT, 0x17}, /* thsprepare */ + {0x344B, CRL_REG_LEN_08BIT, 0x00}, + {0x344C, CRL_REG_LEN_08BIT, 0x0F}, /* thstrail */ + {0x344D, CRL_REG_LEN_08BIT, 0x00}, + {0x344E, CRL_REG_LEN_08BIT, 0x17}, /* thstrail */ + {0x344F, CRL_REG_LEN_08BIT, 0x00}, + {0x3150, CRL_REG_LEN_08BIT, 0x03}, + {0x3450, CRL_REG_LEN_08BIT, 0x47}, /* tclkzero */ + {0x3451, CRL_REG_LEN_08BIT, 0x00}, + {0x3452, CRL_REG_LEN_08BIT, 0x0F}, /* tclkprepare */ + {0x3453, CRL_REG_LEN_08BIT, 0x00}, + {0x3454, CRL_REG_LEN_08BIT, 0x0F}, /* tlpx */ + {0x3455, CRL_REG_LEN_08BIT, 0x00}, + {0x3358, CRL_REG_LEN_08BIT, 0x06}, /* fixed setting */ + {0x3359, CRL_REG_LEN_08BIT, 0xE1}, + {0x335A, CRL_REG_LEN_08BIT, 0x11}, + {0x305C, CRL_REG_LEN_08BIT, 0x18}, /* incksel1 */ + {0x305D, CRL_REG_LEN_08BIT, 0x03}, /* incksel2 */ + {0x305E, CRL_REG_LEN_08BIT, 0x20}, /* incksel3 */ + {0x315E, CRL_REG_LEN_08BIT, 0x1A}, /* incksel5 */ + {0x305F, CRL_REG_LEN_08BIT, 0x01}, /* incksel4 */ + {0x3360, CRL_REG_LEN_08BIT, 0x1E}, + {0x3361, CRL_REG_LEN_08BIT, 0x61}, + {0x3362, CRL_REG_LEN_08BIT, 0x10}, + {0x3164, CRL_REG_LEN_08BIT, 0x1A}, /* incksel6 */ + {0x3070, CRL_REG_LEN_08BIT, 0x02}, + {0x3071, CRL_REG_LEN_08BIT, 0x11}, + {0x317E, CRL_REG_LEN_08BIT, 0x00}, + {0x3480, CRL_REG_LEN_08BIT, 0x49}, /* inclsel7 */ + {0x309B, CRL_REG_LEN_08BIT, 0x10}, + {0x309C, CRL_REG_LEN_08BIT, 0x22}, + {0x30A2, CRL_REG_LEN_08BIT, 0x02}, + {0x30A6, CRL_REG_LEN_08BIT, 0x20}, + {0x30A8, CRL_REG_LEN_08BIT, 0x20}, + {0x30AA, CRL_REG_LEN_08BIT, 0x20}, + {0x30AC, CRL_REG_LEN_08BIT, 0x20}, + {0x30B0, CRL_REG_LEN_08BIT, 0x43}, + {0x33B0, CRL_REG_LEN_08BIT, 0x50}, + {0x33B2, CRL_REG_LEN_08BIT, 0x1A}, + {0x33B3, CRL_REG_LEN_08BIT, 0x04}, + {0x32B8, CRL_REG_LEN_08BIT, 0x50}, + {0x32B9, CRL_REG_LEN_08BIT, 0x10}, + {0x32BA, CRL_REG_LEN_08BIT, 0x00}, + {0x32BB, CRL_REG_LEN_08BIT, 0x04}, + {0x32C8, CRL_REG_LEN_08BIT, 0x50}, + {0x32C9, CRL_REG_LEN_08BIT, 0x10}, + {0x32CA, CRL_REG_LEN_08BIT, 0x00}, + {0x32CB, CRL_REG_LEN_08BIT, 0x04}, +}; + +static struct crl_register_write_rep imx290_fmt_raw10[] = { + {0x3005, CRL_REG_LEN_08BIT, 0x00}, /* ADBIT */ + {0x300A, CRL_REG_LEN_08BIT, 0x3C}, /* BLKLEVEL */ + {0x3129, CRL_REG_LEN_08BIT, 0x1D}, /* ADBIT1 */ + {0x3441, CRL_REG_LEN_08BIT, 0x0A}, /* CSI_DT_FMT */ + {0x3442, CRL_REG_LEN_08BIT, 0x0A}, + {0x3046, CRL_REG_LEN_08BIT, 0x00}, /* ODBIT OPORTSEL */ + {0x317C, CRL_REG_LEN_08BIT, 0x12}, /* ADBIT2 */ + {0x31EC, CRL_REG_LEN_08BIT, 0x37}, /* ADBIT3 */ +}; + +static struct crl_register_write_rep imx290_fmt_raw12[] = { + {0x3005, CRL_REG_LEN_08BIT, 0x01}, /* ADBIT */ + {0x300A, CRL_REG_LEN_08BIT, 0xF0}, /* BLKLEVEL */ + {0x3129, CRL_REG_LEN_08BIT, 0x00}, /* ADBIT1 */ + {0x3441, CRL_REG_LEN_08BIT, 0x0C}, /* CSI_DT_FMT */ + {0x3442, CRL_REG_LEN_08BIT, 0x0C}, + {0x3046, CRL_REG_LEN_08BIT, 0x01}, /* ODBIT OPORTSEL */ + {0x317C, CRL_REG_LEN_08BIT, 0x00}, /* ADBIT2 */ + {0x31EC, CRL_REG_LEN_08BIT, 0x0E}, /* ADBIT3 */ +}; + +static struct crl_register_write_rep imx290_powerup_standby[] = { + {IMX290_REG_STANDBY, CRL_REG_LEN_08BIT, 0x01}, + {0x00, CRL_REG_LEN_DELAY, 20, 0x00}, + {IMX290_REG_XMSTA, CRL_REG_LEN_08BIT, 0x01}, +}; + +/* Horizontal dumpy added 1097(1094+3) */ +static struct crl_register_write_rep imx290_1948_1096_37MHZ_CROPPING[] = { + /*TODO need a test if necessary to open XMSTA*/ + {0x3000, CRL_REG_LEN_08BIT, 0x01}, /* reset to standby mode */ + {0x3002, CRL_REG_LEN_08BIT, 0x01}, /* default:reset slave mode */ + {0x3005, CRL_REG_LEN_08BIT, 0x01}, /* ADBIT */ + {0x3405, CRL_REG_LEN_08BIT, 0x20}, /* repetition */ + {0x3007, CRL_REG_LEN_08BIT, 0x04}, /* H/V verse and WINMODE */ + {0x3407, CRL_REG_LEN_08BIT, 0x03}, /* physical lane num(fixed) */ + {0x3009, CRL_REG_LEN_08BIT, 0x02}, /* FRSEL FDG_SEL */ + {0x300A, CRL_REG_LEN_08BIT, 0xF0}, /* BLKLEVEL */ + {0x300F, CRL_REG_LEN_08BIT, 0x00}, /* fixed setting */ + {0x3010, CRL_REG_LEN_08BIT, 0x21}, + {0x3012, CRL_REG_LEN_08BIT, 0x64}, + {0x3414, CRL_REG_LEN_08BIT, 0x0A}, /* OPB_SIZE_V */ + {0x3016, CRL_REG_LEN_08BIT, 0x09}, + {0x3018, CRL_REG_LEN_08BIT, 0x65}, /* VMAX */ + {0x3019, CRL_REG_LEN_08BIT, 0x04}, + {0x3418, CRL_REG_LEN_08BIT, 0x49}, /* Y_OUT_SIZE */ + {0x3419, CRL_REG_LEN_08BIT, 0x04}, + {0x3119, CRL_REG_LEN_08BIT, 0x9E}, + {0x301C, CRL_REG_LEN_08BIT, 0x30}, /* HMAX */ + {0x301D, CRL_REG_LEN_08BIT, 0x11}, + {0x311C, CRL_REG_LEN_08BIT, 0x1E}, + {0x311E, CRL_REG_LEN_08BIT, 0x08}, + {0x3128, CRL_REG_LEN_08BIT, 0x05}, + {0x3129, CRL_REG_LEN_08BIT, 0x00}, /* ADBIT1 */ + {0x332C, CRL_REG_LEN_08BIT, 0xD3}, + {0x332D, CRL_REG_LEN_08BIT, 0x10}, + {0x332E, CRL_REG_LEN_08BIT, 0x0D}, + {0x313D, CRL_REG_LEN_08BIT, 0x83}, + {0x3441, CRL_REG_LEN_08BIT, 0x0C}, /* CSI_DT_FMT */ + {0x3442, CRL_REG_LEN_08BIT, 0x0C}, + {0x3443, CRL_REG_LEN_08BIT, 0x03}, /* csi_lane_mode(fixed) */ + {0x3444, CRL_REG_LEN_08BIT, 0x20}, /* extck_freq */ + {0x3445, CRL_REG_LEN_08BIT, 0x25}, + {0x3046, CRL_REG_LEN_08BIT, 0x01}, /* ODBIT OPORTSEL */ + {0x3446, CRL_REG_LEN_08BIT, 0x47}, /* tclkpost */ + {0x3447, CRL_REG_LEN_08BIT, 0x00}, + {0x3448, CRL_REG_LEN_08BIT, 0x1F}, /* thszero */ + {0x3449, CRL_REG_LEN_08BIT, 0x00}, + {0x304B, CRL_REG_LEN_08BIT, 0x0A}, /* XH/VS OUTSEL */ + {0x344A, CRL_REG_LEN_08BIT, 0x17}, /* thsprepare */ + {0x344B, CRL_REG_LEN_08BIT, 0x00}, + {0x344C, CRL_REG_LEN_08BIT, 0x0F}, /* thstrail */ + {0x344D, CRL_REG_LEN_08BIT, 0x00}, + {0x344E, CRL_REG_LEN_08BIT, 0x17}, /* thstrail */ + {0x344F, CRL_REG_LEN_08BIT, 0x00}, + {0x3150, CRL_REG_LEN_08BIT, 0x03}, + {0x3450, CRL_REG_LEN_08BIT, 0x47}, /* tclkzero */ + {0x3451, CRL_REG_LEN_08BIT, 0x00}, + {0x3452, CRL_REG_LEN_08BIT, 0x0F}, /* tclkprepare */ + {0x3453, CRL_REG_LEN_08BIT, 0x00}, + {0x3454, CRL_REG_LEN_08BIT, 0x0F}, /* tlpx */ + {0x3455, CRL_REG_LEN_08BIT, 0x00}, + {0x3358, CRL_REG_LEN_08BIT, 0x06}, /* fixed setting */ + {0x3359, CRL_REG_LEN_08BIT, 0xE1}, + {0x335A, CRL_REG_LEN_08BIT, 0x11}, + {0x305C, CRL_REG_LEN_08BIT, 0x18}, /* incksel1 */ + {0x305D, CRL_REG_LEN_08BIT, 0x03}, /* incksel2 */ + {0x305E, CRL_REG_LEN_08BIT, 0x20}, /* incksel3 */ + {0x315E, CRL_REG_LEN_08BIT, 0x1A}, /* incksel5 */ + {0x305F, CRL_REG_LEN_08BIT, 0x01}, /* incksel4 */ + {0x3360, CRL_REG_LEN_08BIT, 0x1E}, + {0x3361, CRL_REG_LEN_08BIT, 0x61}, + {0x3362, CRL_REG_LEN_08BIT, 0x10}, + {0x3164, CRL_REG_LEN_08BIT, 0x1A}, /* incksel6 */ + {0x3070, CRL_REG_LEN_08BIT, 0x02}, + {0x3071, CRL_REG_LEN_08BIT, 0x11}, + {0x3472, CRL_REG_LEN_08BIT, 0x9C}, /* X_OUT_SIZE */ + {0x3473, CRL_REG_LEN_08BIT, 0x07}, + {0x317C, CRL_REG_LEN_08BIT, 0x00}, /* ADBIT2 */ + {0x317E, CRL_REG_LEN_08BIT, 0x00}, + {0x3480, CRL_REG_LEN_08BIT, 0x49}, /* inclsel7 */ + {0x309B, CRL_REG_LEN_08BIT, 0x10}, + {0x309C, CRL_REG_LEN_08BIT, 0x22}, + {0x30A2, CRL_REG_LEN_08BIT, 0x02}, + {0x30A6, CRL_REG_LEN_08BIT, 0x20}, + {0x30A8, CRL_REG_LEN_08BIT, 0x20}, + {0x30AA, CRL_REG_LEN_08BIT, 0x20}, + {0x30AC, CRL_REG_LEN_08BIT, 0x20}, + {0x30B0, CRL_REG_LEN_08BIT, 0x43}, + {0x33B0, CRL_REG_LEN_08BIT, 0x50}, + {0x33B2, CRL_REG_LEN_08BIT, 0x1A}, + {0x33B3, CRL_REG_LEN_08BIT, 0x04}, + {0x32B8, CRL_REG_LEN_08BIT, 0x50}, + {0x32B9, CRL_REG_LEN_08BIT, 0x10}, + {0x32BA, CRL_REG_LEN_08BIT, 0x00}, + {0x32BB, CRL_REG_LEN_08BIT, 0x04}, + {0x32C8, CRL_REG_LEN_08BIT, 0x50}, + {0x32C9, CRL_REG_LEN_08BIT, 0x10}, + {0x32CA, CRL_REG_LEN_08BIT, 0x00}, + {0x32CB, CRL_REG_LEN_08BIT, 0x04}, + {0x31EC, CRL_REG_LEN_08BIT, 0x0E}, /* ADBIT3 */ + /* WINDOW CROPPING */ + {0x303C, CRL_REG_LEN_08BIT, 0x01}, + {0x303D, CRL_REG_LEN_08BIT, 0x00}, + {0x303E, CRL_REG_LEN_08BIT, 0x48}, + {0x303F, CRL_REG_LEN_08BIT, 0x04}, +}; + +static struct crl_register_write_rep imx290_1952_3435_37MHZ_CROPPING[] = { + /*TODO need a test if necessary to open XMSTA*/ + {0x3000, CRL_REG_LEN_08BIT, 0x01}, /* reset to standby mode */ + {0x3002, CRL_REG_LEN_08BIT, 0x01}, /* default:reset to slave mode */ + {0x3005, CRL_REG_LEN_08BIT, 0x00}, /* ADBIT */ + {0x3405, CRL_REG_LEN_08BIT, 0x00}, /* repetition */ + {0x3106, CRL_REG_LEN_08BIT, 0x33}, + {0x3007, CRL_REG_LEN_08BIT, 0x00}, /* H/V verse and WINMODE */ + {0x3407, CRL_REG_LEN_08BIT, 0x03}, /* physical lane num(fixed) */ + {0x3009, CRL_REG_LEN_08BIT, 0x00}, /* FRSEL FDG_SEL */ + {0x300A, CRL_REG_LEN_08BIT, 0x3C}, /* BLKLEVEL */ + {0x300C, CRL_REG_LEN_08BIT, 0x21}, + {0x300F, CRL_REG_LEN_08BIT, 0x00}, /* fixed setting */ + {0x3010, CRL_REG_LEN_08BIT, 0x21}, + {0x3012, CRL_REG_LEN_08BIT, 0x64}, + {0x3414, CRL_REG_LEN_08BIT, 0x0A}, /* OPB_SIZE_V */ + {0x3415, CRL_REG_LEN_08BIT, 0x00}, + {0x3016, CRL_REG_LEN_08BIT, 0x09}, + {0x3018, CRL_REG_LEN_08BIT, 0x65}, /* VMAX */ + {0x3019, CRL_REG_LEN_08BIT, 0x04}, + {0x3418, CRL_REG_LEN_08BIT, 0x55}, /* Y_OUT_SIZE */ + {0x3419, CRL_REG_LEN_08BIT, 0x11}, + {0x3119, CRL_REG_LEN_08BIT, 0x9E}, + {0x301C, CRL_REG_LEN_08BIT, 0x4C}, /* HMAX */ + {0x301D, CRL_REG_LEN_08BIT, 0x04}, + {0x311C, CRL_REG_LEN_08BIT, 0x1E}, + {0x311E, CRL_REG_LEN_08BIT, 0x08}, + {0x3020, CRL_REG_LEN_08BIT, 0x04}, /* SHS1 */ + {0x3021, CRL_REG_LEN_08BIT, 0x00}, + {0x3024, CRL_REG_LEN_08BIT, 0x89}, /* SHS2 */ + {0x3025, CRL_REG_LEN_08BIT, 0x00}, + {0x3028, CRL_REG_LEN_08BIT, 0x93}, /* SHS3 */ + {0x3029, CRL_REG_LEN_08BIT, 0x01}, + {0x3128, CRL_REG_LEN_08BIT, 0x05}, + {0x3129, CRL_REG_LEN_08BIT, 0x1D}, /* ADBIT1 */ + {0x332C, CRL_REG_LEN_08BIT, 0xD3}, + {0x332D, CRL_REG_LEN_08BIT, 0x10}, + {0x332E, CRL_REG_LEN_08BIT, 0x0D}, + {0x3030, CRL_REG_LEN_08BIT, 0x85}, /* RHS1 */ + {0x3031, CRL_REG_LEN_08BIT, 0x00}, + {0x3034, CRL_REG_LEN_08BIT, 0x92}, /* RHS2 */ + {0x3035, CRL_REG_LEN_08BIT, 0x00}, + {0x313D, CRL_REG_LEN_08BIT, 0x83}, + {0x3441, CRL_REG_LEN_08BIT, 0x0A}, /* CSI_DT_FMT */ + {0x3442, CRL_REG_LEN_08BIT, 0x0A}, + {0x3443, CRL_REG_LEN_08BIT, 0x03}, /* csi_lane_mode(fixed) */ + {0x3444, CRL_REG_LEN_08BIT, 0x20}, /* extck_freq */ + {0x3045, CRL_REG_LEN_08BIT, 0x05}, /* DOL sp */ + {0x3445, CRL_REG_LEN_08BIT, 0x25}, + {0x3046, CRL_REG_LEN_08BIT, 0x00}, /* ODBIT OPORTSEL */ + {0x3446, CRL_REG_LEN_08BIT, 0x77}, /* tclkpost */ + {0x3447, CRL_REG_LEN_08BIT, 0x00}, + {0x3448, CRL_REG_LEN_08BIT, 0x67}, /* thszero */ + {0x3449, CRL_REG_LEN_08BIT, 0x00}, + {0x304B, CRL_REG_LEN_08BIT, 0x0A}, /* XH/VS OUTSEL */ + {0x344A, CRL_REG_LEN_08BIT, 0x47}, /* thsprepare */ + {0x344B, CRL_REG_LEN_08BIT, 0x00}, + {0x344C, CRL_REG_LEN_08BIT, 0x37}, /* thstrail */ + {0x344D, CRL_REG_LEN_08BIT, 0x00}, + {0x344E, CRL_REG_LEN_08BIT, 0x3F}, /* thstrail */ + {0x344F, CRL_REG_LEN_08BIT, 0x00}, + {0x3150, CRL_REG_LEN_08BIT, 0x03}, + {0x3450, CRL_REG_LEN_08BIT, 0xFF}, /* tclkzero */ + {0x3451, CRL_REG_LEN_08BIT, 0x00}, + {0x3452, CRL_REG_LEN_08BIT, 0x3F}, /* tclkprepare */ + {0x3453, CRL_REG_LEN_08BIT, 0x00}, + {0x3454, CRL_REG_LEN_08BIT, 0x37}, /* tlpx */ + {0x3455, CRL_REG_LEN_08BIT, 0x00}, + {0x3358, CRL_REG_LEN_08BIT, 0x06}, /* fixed setting */ + {0x3359, CRL_REG_LEN_08BIT, 0xE1}, + {0x335A, CRL_REG_LEN_08BIT, 0x11}, + {0x305C, CRL_REG_LEN_08BIT, 0x18}, /* incksel1 */ + {0x305D, CRL_REG_LEN_08BIT, 0x03}, /* incksel2 */ + {0x305E, CRL_REG_LEN_08BIT, 0x20}, /* incksel3 */ + {0x315E, CRL_REG_LEN_08BIT, 0x1A}, /* incksel5 */ + {0x305F, CRL_REG_LEN_08BIT, 0x01}, /* incksel4 */ + {0x3360, CRL_REG_LEN_08BIT, 0x1E}, + {0x3361, CRL_REG_LEN_08BIT, 0x61}, + {0x3362, CRL_REG_LEN_08BIT, 0x10}, + {0x3164, CRL_REG_LEN_08BIT, 0x1A}, /* incksel6 */ + {0x3070, CRL_REG_LEN_08BIT, 0x02}, + {0x3071, CRL_REG_LEN_08BIT, 0x11}, + {0x3472, CRL_REG_LEN_08BIT, 0xA0}, /* X_OUT_SIZE */ + {0x3473, CRL_REG_LEN_08BIT, 0x07}, + {0x347B, CRL_REG_LEN_08BIT, 0x23}, + {0x317C, CRL_REG_LEN_08BIT, 0x12}, /* ADBIT2 */ + {0x317E, CRL_REG_LEN_08BIT, 0x00}, + {0x3480, CRL_REG_LEN_08BIT, 0x49}, /* inclsel7 */ + {0x309B, CRL_REG_LEN_08BIT, 0x10}, + {0x309C, CRL_REG_LEN_08BIT, 0x22}, + {0x30A2, CRL_REG_LEN_08BIT, 0x02}, + {0x30A6, CRL_REG_LEN_08BIT, 0x20}, + {0x30A8, CRL_REG_LEN_08BIT, 0x20}, + {0x30AA, CRL_REG_LEN_08BIT, 0x20}, + {0x30AC, CRL_REG_LEN_08BIT, 0x20}, + {0x30B0, CRL_REG_LEN_08BIT, 0x43}, + {0x33B0, CRL_REG_LEN_08BIT, 0x50}, + {0x33B2, CRL_REG_LEN_08BIT, 0x1A}, + {0x33B3, CRL_REG_LEN_08BIT, 0x04}, + {0x32B8, CRL_REG_LEN_08BIT, 0x50}, + {0x32B9, CRL_REG_LEN_08BIT, 0x10}, + {0x32BA, CRL_REG_LEN_08BIT, 0x00}, + {0x32BB, CRL_REG_LEN_08BIT, 0x04}, + {0x32C8, CRL_REG_LEN_08BIT, 0x50}, + {0x32C9, CRL_REG_LEN_08BIT, 0x10}, + {0x32CA, CRL_REG_LEN_08BIT, 0x00}, + {0x32CB, CRL_REG_LEN_08BIT, 0x04}, + {0x31EC, CRL_REG_LEN_08BIT, 0x37}, /* ADBIT3 */ +}; + +static struct crl_register_write_rep imx290_streamon_regs[] = { + {IMX290_REG_STANDBY, CRL_REG_LEN_08BIT, 0x00}, + {0x00, CRL_REG_LEN_DELAY, 50, 0x00}, /* Add a 50ms delay */ + {IMX290_REG_XMSTA, CRL_REG_LEN_08BIT, 0x00}, +}; + +static struct crl_register_write_rep imx290_streamoff_regs[] = { + {IMX290_REG_STANDBY, CRL_REG_LEN_08BIT, 0x01}, + {IMX290_REG_XMSTA, CRL_REG_LEN_08BIT, 0x01}, +}; + +static struct crl_arithmetic_ops imx290_hflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + } +}; + +static struct crl_dynamic_register_access imx290_h_flip_regs[] = { + { + .address = 0x3007, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(imx290_hflip_ops), + .ops = imx290_hflip_ops, + .mask = 0x2, + } +}; + +static struct crl_dynamic_register_access imx290_v_flip_regs[] = { + { + .address = 0x3007, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = 0, + .ops = 0, + .mask = 0x1, + } +}; + +static struct crl_dynamic_register_access imx290_ana_gain_global_regs[] = { + { + .address = 0x3014, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +/* shs1[17:0] = fll - exposure - 1 */ +static struct crl_arithmetic_ops imx290_shs1_lsb_ops[] = { + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, + .operand.entity_val = V4L2_CID_FRAME_LENGTH_LINES, + }, + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + } +}; + +static struct crl_arithmetic_ops imx290_shs1_msb0_ops[] = { + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, + .operand.entity_val = V4L2_CID_FRAME_LENGTH_LINES, + }, + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 8, + } +}; + +static struct crl_arithmetic_ops imx290_shs1_msb1_ops[] = { + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, + .operand.entity_val = V4L2_CID_FRAME_LENGTH_LINES, + }, + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 16, + }, + { + .op = CRL_BITWISE_AND, + .operand.entity_val = 0x03, + } +}; + +static struct crl_dynamic_register_access imx290_shs1_regs[] = { + { + .address = 0x3020, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx290_shs1_lsb_ops), + .ops = imx290_shs1_lsb_ops, + .mask = 0xff, + }, + { + .address = 0x3021, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx290_shs1_msb0_ops), + .ops = imx290_shs1_msb0_ops, + .mask = 0xff, + }, + { + .address = 0x3022, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx290_shs1_msb1_ops), + .ops = imx290_shs1_msb1_ops, + .mask = 0xff, + }, +}; + +static struct crl_arithmetic_ops imx290_fll_msb_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 8, + } +}; + +static struct crl_arithmetic_ops imx290_fll_hsb_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 16, + } +}; + +static struct crl_dynamic_register_access imx290_fll_regs[] = { + /* + * Use 8bits access since 24bits or 32bits access will fail + * TODO: root cause the 24bits and 32bits access issues + */ + { + .address = 0x3018, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, + { + .address = 0x3019, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx290_fll_msb_ops), + .ops = imx290_fll_msb_ops, + .mask = 0xff, + }, + { + .address = 0x301A, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx290_fll_hsb_ops), + .ops = imx290_fll_hsb_ops, + .mask = 0x3, + }, +}; + +static struct crl_dynamic_register_access imx290_llp_regs[] = { + { + .address = 0x301C, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + } +}; + +static struct crl_sensor_detect_config imx290_sensor_detect_regset[] = { + { + .reg = { 0x348F, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, + { + .reg = { 0x348E, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, +}; + +static struct crl_pll_configuration imx290_pll_configurations[] = { + /* + * IMX290 supports only 37.125MHz and 72.25MHz input clocks. + * IPU4 supports up to 38.4MHz, however the sensor module we use + * has its own oscillator. + * The "input_clk" value is specified here for the reference. + */ + { + .input_clk = 37125000, + .op_sys_clk = 222750000,/* 445500000/2 */ + .bitsperpixel = 12, + .pixel_rate_csi = 148500000, + .pixel_rate_pa = 148500000, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx290_pll_445mbps), + .pll_regs = imx290_pll_445mbps, + }, + { + .input_clk = 37125000, + .op_sys_clk = 445500000,/* 891000000/2 */ + .bitsperpixel = 10, + .pixel_rate_csi = 356400000, + .pixel_rate_pa = 356400000, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx290_pll_891mbps), + .pll_regs = imx290_pll_891mbps, + } +}; + +/* Temporary use a single rect range */ +static struct crl_subdev_rect_rep imx290_1948_1096_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1952, + .in_rect.height = 3435, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1952, + .out_rect.height = 3435, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1952, + .in_rect.height = 3435, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1948, + .out_rect.height = 1096, + } +}; + +static struct crl_subdev_rect_rep imx290_1952_3435_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1952, + .in_rect.height = 3435, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1952, + .out_rect.height = 3435, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1952, + .in_rect.height = 3435, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1952, + .out_rect.height = 3435, + } +}; + +static struct crl_mode_rep imx290_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(imx290_1948_1096_rects), + .sd_rects = imx290_1948_1096_rects, + .binn_hor = 1, + .binn_vert = 3, + .scale_m = 1, + .width = 1948, + .height = 1096, + .min_llp = 2220, + .min_fll = 1112, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx290_1948_1096_37MHZ_CROPPING), + .mode_regs = imx290_1948_1096_37MHZ_CROPPING, + }, + { + .sd_rects_items = ARRAY_SIZE(imx290_1952_3435_rects), + .sd_rects = imx290_1952_3435_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1952, + .height = 3435, + .min_llp = 2220, + .min_fll = 1112, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx290_1952_3435_37MHZ_CROPPING), + .mode_regs = imx290_1952_3435_37MHZ_CROPPING, + }, +}; + +struct crl_sensor_subdev_config imx290_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "imx290 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "imx290 pixel array", + } +}; + +static struct crl_sensor_limits imx290_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1952, + .y_addr_max = 3435, + .min_frame_length_lines = 320, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 380, + .max_line_length_pixels = 32752, +}; + +static struct crl_flip_data imx290_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + } +}; + +static struct crl_csi_data_fmt imx290_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = ARRAY_SIZE(imx290_fmt_raw10), + .regs = imx290_fmt_raw10, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .bits_per_pixel = 10, + .regs_items = ARRAY_SIZE(imx290_fmt_raw10), + .regs = imx290_fmt_raw10, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 10, + .regs_items = ARRAY_SIZE(imx290_fmt_raw10), + .regs = imx290_fmt_raw10, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .bits_per_pixel = 10, + .regs_items = ARRAY_SIZE(imx290_fmt_raw10), + .regs = imx290_fmt_raw10, + }, + { + .code = MEDIA_BUS_FMT_SGRBG12_1X12, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 12, + .regs_items = ARRAY_SIZE(imx290_fmt_raw12), + .regs = imx290_fmt_raw12, + }, + { + .code = MEDIA_BUS_FMT_SRGGB12_1X12, + .pixel_order = CRL_PIXEL_ORDER_RGGB, /*default pixel order*/ + .bits_per_pixel = 12, + .regs_items = ARRAY_SIZE(imx290_fmt_raw12), + .regs = imx290_fmt_raw12, + }, + { + .code = MEDIA_BUS_FMT_SBGGR12_1X12, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 12, + .regs_items = ARRAY_SIZE(imx290_fmt_raw12), + .regs = imx290_fmt_raw12, + }, + { + .code = MEDIA_BUS_FMT_SGBRG12_1X12, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .bits_per_pixel = 12, + .regs_items = ARRAY_SIZE(imx290_fmt_raw12), + .regs = imx290_fmt_raw12, + } +}; + +static struct crl_v4l2_ctrl imx290_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx290_h_flip_regs), + .regs = imx290_h_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .name = "V4L2_CID_VFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx290_v_flip_regs), + .regs = imx290_v_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 240, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx290_ana_gain_global_regs), + .regs = imx290_ana_gain_global_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "V4L2_CID_EXPOSURE", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = IMX290_MAX_SHS1, + .data.std_data.step = 1, + .data.std_data.def = 0x264, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx290_shs1_regs), + .regs = imx290_shs1_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame length lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 720, + .data.std_data.max = IMX290_VMAX, + .data.std_data.step = 1, + .data.std_data.def = 1097, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx290_fll_regs), + .regs = imx290_fll_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1948, + .data.std_data.max = IMX290_HMAX, + .data.std_data.step = 1, + .data.std_data.def = 1948, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx290_llp_regs), + .regs = imx290_llp_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, +}; + +static struct crl_arithmetic_ops imx290_frame_desc_width_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .operand.entity_val = CRL_VAR_REF_OUTPUT_WIDTH, + }, +}; + +static struct crl_arithmetic_ops imx290_frame_desc_height_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, +}; + +static struct crl_frame_desc imx290_frame_desc[] = { + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(imx290_frame_desc_width_ops), + .ops = imx290_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(imx290_frame_desc_height_ops), + .ops = imx290_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 0, + .csi2_data_type.entity_val = 0x12, + }, +}; + +static struct crl_power_seq_entity imx290_power_items[] = { + /* If your sensor uses IPU reference clock, make sure it's enabled here. */ + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + }, +}; + +struct crl_sensor_configuration imx290_crl_configuration = { + + .power_items = ARRAY_SIZE(imx290_power_items), + .power_entities = imx290_power_items, + + .powerup_regs_items = ARRAY_SIZE(imx290_powerup_standby), + .powerup_regs = imx290_powerup_standby, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + .id_reg_items = ARRAY_SIZE(imx290_sensor_detect_regset), + .id_regs = imx290_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(imx290_sensor_subdevs), + .subdevs = imx290_sensor_subdevs, + + .sensor_limits = &imx290_sensor_limits, + + .pll_config_items = ARRAY_SIZE(imx290_pll_configurations), + .pll_configs = imx290_pll_configurations, + + .modes_items = ARRAY_SIZE(imx290_modes), + .modes = imx290_modes, + + .streamon_regs_items = ARRAY_SIZE(imx290_streamon_regs), + .streamon_regs = imx290_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(imx290_streamoff_regs), + .streamoff_regs = imx290_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(imx290_v4l2_ctrls), + .v4l2_ctrl_bank = imx290_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(imx290_crl_csi_data_fmt), + .csi_fmts = imx290_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(imx290_flip_configurations), + .flip_data = imx290_flip_configurations, + + .frame_desc_entries = ARRAY_SIZE(imx290_frame_desc), + .frame_desc_type = CRL_V4L2_MBUS_FRAME_DESC_TYPE_CSI2, + .frame_desc = imx290_frame_desc, +}; + +#endif /* __CRLMODULE_IMX274_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_imx318_configuration.h b/drivers/media/i2c/crlmodule/crl_imx318_configuration.h new file mode 100644 index 0000000000000..631a6d10400fd --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_imx318_configuration.h @@ -0,0 +1,1050 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2016 - 2018 Intel Corporation + * + * Author: Jouni Ukkonen + * + */ +#ifndef __CRLMODULE_IMX318_CONFIGURATION_H_ +#define __CRLMODULE_IMX318_CONFIGURATION_H_ + +#include "crlmodule-nvm.h" +#include "crlmodule-sensor-ds.h" + +static const struct crl_register_write_rep imx318_pll_1164mbps[] = { + { 0x0136, CRL_REG_LEN_08BIT, 0x18 }, /* 24 Mhz */ + { 0x0137, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0111, CRL_REG_LEN_08BIT, 0x02 }, /* 2 = DPHY, 3 = CPHY */ + { 0x0112, CRL_REG_LEN_08BIT, 0x0A }, + { 0x0113, CRL_REG_LEN_08BIT, 0x0A }, + { 0x0114, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0301, CRL_REG_LEN_08BIT, 0x05 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0306, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0307, CRL_REG_LEN_08BIT, 0x4D }, + { 0x0309, CRL_REG_LEN_08BIT, 0x0A }, + { 0x030B, CRL_REG_LEN_08BIT, 0x02 }, + { 0x030D, CRL_REG_LEN_08BIT, 0x04 }, + { 0x030E, CRL_REG_LEN_08BIT, 0x01 }, + { 0x030F, CRL_REG_LEN_08BIT, 0x84 }, + { 0x0820, CRL_REG_LEN_08BIT, 0x12 }, + { 0x0821, CRL_REG_LEN_08BIT, 0x30 }, + { 0x0822, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0823, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static const struct crl_register_write_rep imx318_pll_8_1164mbps[] = { + { 0x0136, CRL_REG_LEN_08BIT, 0x18 }, /* 24 Mhz */ + { 0x0137, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0111, CRL_REG_LEN_08BIT, 0x02 }, /* 2 = DPHY, 3 = CPHY */ + { 0x0112, CRL_REG_LEN_08BIT, 0x0A }, + { 0x0113, CRL_REG_LEN_08BIT, 0x0A }, + { 0x0114, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0301, CRL_REG_LEN_08BIT, 0x05 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0306, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0307, CRL_REG_LEN_08BIT, 0x4D }, + { 0x0309, CRL_REG_LEN_08BIT, 0x08 }, + { 0x030B, CRL_REG_LEN_08BIT, 0x02 }, + { 0x030D, CRL_REG_LEN_08BIT, 0x04 }, + { 0x030E, CRL_REG_LEN_08BIT, 0x01 }, + { 0x030F, CRL_REG_LEN_08BIT, 0x84 }, + { 0x0820, CRL_REG_LEN_08BIT, 0x12 }, + { 0x0821, CRL_REG_LEN_08BIT, 0x30 }, + { 0x0822, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0823, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static const struct crl_register_write_rep imx318_pll_1920mbps[] = { + { 0x0136, CRL_REG_LEN_08BIT, 0x18 }, /* 24 Mhz */ + { 0x0137, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0111, CRL_REG_LEN_08BIT, 0x02 }, /* 2 = DPHY, 3 = CPHY */ + { 0x0112, CRL_REG_LEN_08BIT, 0x0A }, + { 0x0113, CRL_REG_LEN_08BIT, 0x0A }, + { 0x0114, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0301, CRL_REG_LEN_08BIT, 0x05 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0306, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0307, CRL_REG_LEN_08BIT, 0x4D }, + { 0x0309, CRL_REG_LEN_08BIT, 0x0A }, + { 0x030B, CRL_REG_LEN_08BIT, 0x01 }, + { 0x030D, CRL_REG_LEN_08BIT, 0x04 }, + { 0x030E, CRL_REG_LEN_08BIT, 0x01 }, + { 0x030F, CRL_REG_LEN_08BIT, 0x40 }, + { 0x0820, CRL_REG_LEN_08BIT, 0x1E }, + { 0x0821, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0822, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0823, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static const struct crl_register_write_rep imx318_pll_8_1920mbps[] = { + { 0x0136, CRL_REG_LEN_08BIT, 0x18 }, /* 24 Mhz */ + { 0x0137, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0111, CRL_REG_LEN_08BIT, 0x02 }, /* 2 = DPHY, 3 = CPHY */ + { 0x0112, CRL_REG_LEN_08BIT, 0x0A }, + { 0x0113, CRL_REG_LEN_08BIT, 0x0A }, + { 0x0114, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0301, CRL_REG_LEN_08BIT, 0x05 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0306, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0307, CRL_REG_LEN_08BIT, 0x4D }, + { 0x0309, CRL_REG_LEN_08BIT, 0x08 }, + { 0x030B, CRL_REG_LEN_08BIT, 0x01 }, + { 0x030D, CRL_REG_LEN_08BIT, 0x04 }, + { 0x030E, CRL_REG_LEN_08BIT, 0x01 }, + { 0x030F, CRL_REG_LEN_08BIT, 0x40 }, + { 0x0820, CRL_REG_LEN_08BIT, 0x1E }, + { 0x0821, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0822, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0823, CRL_REG_LEN_08BIT, 0x00 }, +}; + + +static const struct crl_register_write_rep imx318_powerup_regset[] = { + { 0x3067, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x1B }, + { 0x46C2, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4877, CRL_REG_LEN_08BIT, 0x11 }, + { 0x487B, CRL_REG_LEN_08BIT, 0x4D }, + { 0x487F, CRL_REG_LEN_08BIT, 0x3B }, + { 0x4883, CRL_REG_LEN_08BIT, 0xB4 }, + { 0x4C6F, CRL_REG_LEN_08BIT, 0x5E }, + { 0x5113, CRL_REG_LEN_08BIT, 0xF4 }, + { 0x5115, CRL_REG_LEN_08BIT, 0xF6 }, + { 0x5125, CRL_REG_LEN_08BIT, 0xF4 }, + { 0x5127, CRL_REG_LEN_08BIT, 0xF8 }, + { 0x51CF, CRL_REG_LEN_08BIT, 0xF4 }, + { 0x51E9, CRL_REG_LEN_08BIT, 0xF4 }, + { 0x5483, CRL_REG_LEN_08BIT, 0x7A }, + { 0x5485, CRL_REG_LEN_08BIT, 0x7C }, + { 0x5495, CRL_REG_LEN_08BIT, 0x7A }, + { 0x5497, CRL_REG_LEN_08BIT, 0x7F }, + { 0x5515, CRL_REG_LEN_08BIT, 0xC3 }, + { 0x5517, CRL_REG_LEN_08BIT, 0xC7 }, + { 0x552B, CRL_REG_LEN_08BIT, 0x7A }, + { 0x5535, CRL_REG_LEN_08BIT, 0x7A }, + { 0x5A35, CRL_REG_LEN_08BIT, 0x1B }, + { 0x5C13, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5D89, CRL_REG_LEN_08BIT, 0xB1 }, + { 0x5D8B, CRL_REG_LEN_08BIT, 0x2C }, + { 0x5D8D, CRL_REG_LEN_08BIT, 0x61 }, + { 0x5D8F, CRL_REG_LEN_08BIT, 0xE1 }, + { 0x5D91, CRL_REG_LEN_08BIT, 0x4D }, + { 0x5D93, CRL_REG_LEN_08BIT, 0xB4 }, + { 0x5D95, CRL_REG_LEN_08BIT, 0x41 }, + { 0x5D97, CRL_REG_LEN_08BIT, 0x96 }, + { 0x5D99, CRL_REG_LEN_08BIT, 0x37 }, + { 0x5D9B, CRL_REG_LEN_08BIT, 0x81 }, + { 0x5D9D, CRL_REG_LEN_08BIT, 0x31 }, + { 0x5D9F, CRL_REG_LEN_08BIT, 0x71 }, + { 0x5DA1, CRL_REG_LEN_08BIT, 0x2B }, + { 0x5DA3, CRL_REG_LEN_08BIT, 0x64 }, + { 0x5DA5, CRL_REG_LEN_08BIT, 0x27 }, + { 0x5DA7, CRL_REG_LEN_08BIT, 0x5A }, + { 0x6009, CRL_REG_LEN_08BIT, 0x03 }, + { 0x613A, CRL_REG_LEN_08BIT, 0x05 }, + { 0x613C, CRL_REG_LEN_08BIT, 0x23 }, + { 0x6142, CRL_REG_LEN_08BIT, 0x02 }, + { 0x6143, CRL_REG_LEN_08BIT, 0x62 }, + { 0x6144, CRL_REG_LEN_08BIT, 0x89 }, + { 0x6145, CRL_REG_LEN_08BIT, 0x0A }, + { 0x6146, CRL_REG_LEN_08BIT, 0x24 }, + { 0x6147, CRL_REG_LEN_08BIT, 0x28 }, + { 0x6148, CRL_REG_LEN_08BIT, 0x90 }, + { 0x6149, CRL_REG_LEN_08BIT, 0xA2 }, + { 0x614A, CRL_REG_LEN_08BIT, 0x40 }, + { 0x614B, CRL_REG_LEN_08BIT, 0x8A }, + { 0x614C, CRL_REG_LEN_08BIT, 0x01 }, + { 0x614D, CRL_REG_LEN_08BIT, 0x12 }, + { 0x614E, CRL_REG_LEN_08BIT, 0x2C }, + { 0x614F, CRL_REG_LEN_08BIT, 0x98 }, + { 0x6150, CRL_REG_LEN_08BIT, 0xA2 }, + { 0x615D, CRL_REG_LEN_08BIT, 0x37 }, + { 0x615E, CRL_REG_LEN_08BIT, 0xE6 }, + { 0x615F, CRL_REG_LEN_08BIT, 0x4B }, + { 0x616C, CRL_REG_LEN_08BIT, 0x41 }, + { 0x616D, CRL_REG_LEN_08BIT, 0x05 }, + { 0x616E, CRL_REG_LEN_08BIT, 0x48 }, + { 0x616F, CRL_REG_LEN_08BIT, 0xC5 }, + { 0x6174, CRL_REG_LEN_08BIT, 0xB9 }, + { 0x6175, CRL_REG_LEN_08BIT, 0x42 }, + { 0x6176, CRL_REG_LEN_08BIT, 0x44 }, + { 0x6177, CRL_REG_LEN_08BIT, 0xC3 }, + { 0x6178, CRL_REG_LEN_08BIT, 0x81 }, + { 0x6179, CRL_REG_LEN_08BIT, 0x78 }, + { 0x6182, CRL_REG_LEN_08BIT, 0x15 }, + { 0x6A5F, CRL_REG_LEN_08BIT, 0x03 }, + { 0x9302, CRL_REG_LEN_08BIT, 0xFF }, +}; + +static const struct crl_register_write_rep imx318_mode_full[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x15 }, + { 0x0349, CRL_REG_LEN_08BIT, 0x6F }, + { 0x034A, CRL_REG_LEN_08BIT, 0x10 }, + { 0x034B, CRL_REG_LEN_08BIT, 0x0F }, + { 0x0220, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0221, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0222, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3010, CRL_REG_LEN_08BIT, 0x65 }, + { 0x3011, CRL_REG_LEN_08BIT, 0x11 }, + { 0x30FC, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30FD, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3194, CRL_REG_LEN_08BIT, 0x01 }, + { 0x31A0, CRL_REG_LEN_08BIT, 0x00 }, + { 0x31A1, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4711, CRL_REG_LEN_08BIT, 0x00 }, + { 0x6669, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x15 }, + { 0x040D, CRL_REG_LEN_08BIT, 0x70 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x10 }, + { 0x040F, CRL_REG_LEN_08BIT, 0x10 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x15 }, + { 0x034D, CRL_REG_LEN_08BIT, 0x70 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x10 }, + { 0x034F, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3031, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3033, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3035, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3037, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3039, CRL_REG_LEN_08BIT, 0x00 }, + { 0x303B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x306C, CRL_REG_LEN_08BIT, 0x00 }, + { 0x306E, CRL_REG_LEN_08BIT, 0x0D }, + { 0x306F, CRL_REG_LEN_08BIT, 0x56 }, + { 0x6636, CRL_REG_LEN_08BIT, 0x00 }, + { 0x6637, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3066, CRL_REG_LEN_08BIT, 0x00 }, + { 0x7B63, CRL_REG_LEN_08BIT, 0x00 }, + { 0x56FB, CRL_REG_LEN_08BIT, 0x50 }, + { 0x56FF, CRL_REG_LEN_08BIT, 0x50 }, + { 0x9323, CRL_REG_LEN_08BIT, 0x10 }, +}; + + +static const struct crl_register_write_rep imx318_mode_uhd[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x15 }, + { 0x0349, CRL_REG_LEN_08BIT, 0x6F }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0E }, + { 0x034B, CRL_REG_LEN_08BIT, 0x0F }, + { 0x0220, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0221, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0222, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3010, CRL_REG_LEN_08BIT, 0x65 }, + { 0x3011, CRL_REG_LEN_08BIT, 0x11 }, + { 0x30FC, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30FD, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3194, CRL_REG_LEN_08BIT, 0x00 }, + { 0x31A0, CRL_REG_LEN_08BIT, 0x00 }, + { 0x31A1, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4711, CRL_REG_LEN_08BIT, 0x00 }, + { 0x6669, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x16 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x68 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x3A }, + { 0x040C, CRL_REG_LEN_08BIT, 0x14 }, + { 0x040D, CRL_REG_LEN_08BIT, 0xA0 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x0B }, + { 0x040F, CRL_REG_LEN_08BIT, 0x9C }, + { 0x034C, CRL_REG_LEN_08BIT, 0x0F }, + { 0x034D, CRL_REG_LEN_08BIT, 0x00 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x08 }, + { 0x034F, CRL_REG_LEN_08BIT, 0x70 }, + { 0x3031, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3033, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3035, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3037, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3039, CRL_REG_LEN_08BIT, 0x00 }, + { 0x303B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x306C, CRL_REG_LEN_08BIT, 0x00 }, + { 0x306E, CRL_REG_LEN_08BIT, 0x0D }, + { 0x306F, CRL_REG_LEN_08BIT, 0x56 }, + { 0x6636, CRL_REG_LEN_08BIT, 0x00 }, + { 0x6637, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3066, CRL_REG_LEN_08BIT, 0x00 }, + { 0x7B63, CRL_REG_LEN_08BIT, 0x00 }, + { 0x56FB, CRL_REG_LEN_08BIT, 0x33 }, + { 0x56FF, CRL_REG_LEN_08BIT, 0x33 }, + { 0x9323, CRL_REG_LEN_08BIT, 0x16 }, +}; + +static const struct crl_register_write_rep imx318_mode_1080[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x15 }, + { 0x0349, CRL_REG_LEN_08BIT, 0x6F }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0E }, + { 0x034B, CRL_REG_LEN_08BIT, 0x0F }, + { 0x0220, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0221, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0222, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x22 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3010, CRL_REG_LEN_08BIT, 0x65 }, + { 0x3011, CRL_REG_LEN_08BIT, 0x11 }, + { 0x30FC, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30FD, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3194, CRL_REG_LEN_08BIT, 0x00 }, + { 0x31A0, CRL_REG_LEN_08BIT, 0x00 }, + { 0x31A1, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4711, CRL_REG_LEN_08BIT, 0x00 }, + { 0x6669, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x16 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x34 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x1C }, + { 0x040C, CRL_REG_LEN_08BIT, 0x0A }, + { 0x040D, CRL_REG_LEN_08BIT, 0x50 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x05 }, + { 0x040F, CRL_REG_LEN_08BIT, 0xCE }, + { 0x034C, CRL_REG_LEN_08BIT, 0x07 }, + { 0x034D, CRL_REG_LEN_08BIT, 0x80 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x04 }, + { 0x034F, CRL_REG_LEN_08BIT, 0x38 }, + { 0x3031, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3033, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3035, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3037, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3039, CRL_REG_LEN_08BIT, 0x00 }, + { 0x303B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x306C, CRL_REG_LEN_08BIT, 0x00 }, + { 0x306E, CRL_REG_LEN_08BIT, 0x0D }, + { 0x306F, CRL_REG_LEN_08BIT, 0x56 }, + { 0x6636, CRL_REG_LEN_08BIT, 0x00 }, + { 0x6637, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3066, CRL_REG_LEN_08BIT, 0x00 }, + { 0x7B63, CRL_REG_LEN_08BIT, 0x00 }, + { 0x56FB, CRL_REG_LEN_08BIT, 0x33 }, + { 0x56FF, CRL_REG_LEN_08BIT, 0x33 }, + { 0x9323, CRL_REG_LEN_08BIT, 0x16 }, +}; + +static const struct crl_register_write_rep imx318_mode_720[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x15 }, + { 0x0349, CRL_REG_LEN_08BIT, 0x6F }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0E }, + { 0x034B, CRL_REG_LEN_08BIT, 0x13 }, + { 0x0220, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0221, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0222, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x44 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3010, CRL_REG_LEN_08BIT, 0x65 }, + { 0x3011, CRL_REG_LEN_08BIT, 0x11 }, + { 0x30FC, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30FD, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3194, CRL_REG_LEN_08BIT, 0x00 }, + { 0x31A0, CRL_REG_LEN_08BIT, 0x00 }, + { 0x31A1, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4711, CRL_REG_LEN_08BIT, 0x00 }, + { 0x6669, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x04 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x02 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x05 }, + { 0x040D, CRL_REG_LEN_08BIT, 0x52 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x02 }, + { 0x040F, CRL_REG_LEN_08BIT, 0xFE }, + { 0x034C, CRL_REG_LEN_08BIT, 0x05 }, + { 0x034D, CRL_REG_LEN_08BIT, 0x00 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x02 }, + { 0x034F, CRL_REG_LEN_08BIT, 0xD0 }, + { 0x3031, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3033, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3035, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3037, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3039, CRL_REG_LEN_08BIT, 0x00 }, + { 0x303B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x306C, CRL_REG_LEN_08BIT, 0x00 }, + { 0x306E, CRL_REG_LEN_08BIT, 0x0D }, + { 0x306F, CRL_REG_LEN_08BIT, 0x56 }, + { 0x6636, CRL_REG_LEN_08BIT, 0x00 }, + { 0x6637, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3066, CRL_REG_LEN_08BIT, 0x00 }, + { 0x7B63, CRL_REG_LEN_08BIT, 0x00 }, + { 0x56FB, CRL_REG_LEN_08BIT, 0x33 }, + { 0x56FF, CRL_REG_LEN_08BIT, 0x33 }, + { 0x9323, CRL_REG_LEN_08BIT, 0x16 }, +}; + +static struct crl_register_write_rep imx318_streamon_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x01 } +}; + +static struct crl_register_write_rep imx318_streamoff_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x00 } +}; + +static struct crl_register_write_rep imx318_data_fmt_width10[] = { + { 0x0112, CRL_REG_LEN_16BIT, 0x0a0a } +}; + +static struct crl_register_write_rep imx318_data_fmt_width8[] = { + { 0x0112, CRL_REG_LEN_16BIT, 0x0808 } +}; + +static struct crl_arithmetic_ops imx318_vflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_dynamic_register_access imx318_h_flip_regs[] = { + { + .address = 0x0101, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = 0, + .ops = 0, + .mask = 0x1, + }, +}; + +static struct crl_dynamic_register_access imx318_v_flip_regs[] = { + { + .address = 0x0101, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(imx318_vflip_ops), + .ops = imx318_vflip_ops, + .mask = 0x2, + }, +}; + +static struct crl_dynamic_register_access imx318_ana_gain_global_regs[] = { + { + .address = 0x0204, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access imx318_exposure_regs[] = { + { + .address = 0x0202, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + } +}; + +static struct crl_dynamic_register_access imx318_fll_regs[] = { + { + .address = 0x0340, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access imx318_llp_regs[] = { + { + .address = 0x0342, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_sensor_detect_config imx318_sensor_detect_regset[] = { + { + .reg = { 0x0019, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 5, + }, + { + .reg = { 0x0018, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 5, + }, + { + .reg = { 0x0016, CRL_REG_LEN_16BIT, 0x0000ffff }, + .width = 7, + }, +}; + +static struct crl_pll_configuration imx318_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 582000000, + .bitsperpixel = 10, + .pixel_rate_csi = 465600000, + .pixel_rate_pa = 799206000, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx318_pll_1164mbps), + .pll_regs = imx318_pll_1164mbps, + }, + { + .input_clk = 24000000, + .op_sys_clk = 582000000, + .bitsperpixel = 8, + .pixel_rate_csi = 465600000, + .pixel_rate_pa = 799206000, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx318_pll_8_1164mbps), + .pll_regs = imx318_pll_8_1164mbps, + }, + { + .input_clk = 24000000, + .op_sys_clk = 960000000, + .bitsperpixel = 10, + .pixel_rate_csi = 768000000, + .pixel_rate_pa = 799206000, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx318_pll_1920mbps), + .pll_regs = imx318_pll_1920mbps, + }, + { + .input_clk = 24000000, + .op_sys_clk = 960000000, + .bitsperpixel = 8, + .pixel_rate_csi = 960000000, + .pixel_rate_pa = 799206000, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx318_pll_8_1920mbps), + .pll_regs = imx318_pll_8_1920mbps, + }, + +}; + +static struct crl_subdev_rect_rep imx318_full_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5488, 4112 }, + .out_rect = { 0, 0, 5488, 4112 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5488, 4112 }, + .out_rect = { 0, 0, 5488, 4112 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 5488, 4112 }, + .out_rect = { 0, 0, 5488, 4112 }, + }, +}; + + +static struct crl_subdev_rect_rep imx318_uhd_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5488, 4112 }, + .out_rect = { 0, 512, 5280, 3088 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5280, 3088 }, + .out_rect = { 0, 0, 5280, 3088 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 5280, 3088 }, + .out_rect = { 0, 0, 3840, 2160 }, + }, +}; + + +static struct crl_subdev_rect_rep imx318_1080_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5488, 4112 }, + .out_rect = { 0, 512, 5488, 3088 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5488, 3088 }, + .out_rect = { 0, 0, 2744, 1544 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 2744, 1544 }, + .out_rect = { 0, 0, 1920, 1080 }, + }, +}; + +static struct crl_subdev_rect_rep imx318_720_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5488, 4112 }, + .out_rect = { 0, 516, 5488, 3088 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5488, 3088 }, + .out_rect = { 0, 0, 1372, 772 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 1372, 772 }, + .out_rect = { 0, 0, 1280, 720 }, + }, +}; + +static struct crl_mode_rep imx318_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(imx318_full_rects), + .sd_rects = imx318_full_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 5488, + .height = 4112, + .min_llp = 6224, + .min_fll = 4280, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx318_mode_full), + .mode_regs = imx318_mode_full, + }, + { + .sd_rects_items = ARRAY_SIZE(imx318_uhd_rects), + .sd_rects = imx318_uhd_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 22, + .width = 3840, + .height = 2160, + .min_llp = 6224, + .min_fll = 3622, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx318_mode_uhd), + .mode_regs = imx318_mode_uhd, + }, + { + .sd_rects_items = ARRAY_SIZE(imx318_1080_rects), + .sd_rects = imx318_1080_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 22, + .width = 1920, + .height = 1080, + .min_llp = 6224, + .min_fll = 1600, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx318_mode_1080), + .mode_regs = imx318_mode_1080, + }, + { + .sd_rects_items = ARRAY_SIZE(imx318_720_rects), + .sd_rects = imx318_720_rects, + .binn_hor = 4, + .binn_vert = 4, + .scale_m = 17, + .width = 1280, + .height = 720, + .min_llp = 6224, + .min_fll = 904, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx318_mode_720), + .mode_regs = imx318_mode_720, + }, +}; + +static struct crl_sensor_subdev_config imx318_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .name = "imx318 scaler", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "imx318 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "imx318 pixel array", + }, +}; + +static struct crl_sensor_limits imx318_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 5488, + .y_addr_max = 4112, + .min_frame_length_lines = 160, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 6224, /*TBD*/ + .max_line_length_pixels = 32752, + .scaler_m_min = 16, + .scaler_m_max = 255, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 3, +}; + +static struct crl_flip_data imx318_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + }, +}; + +static struct crl_csi_data_fmt imx318_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = 1, + .regs = imx318_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = imx318_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = imx318_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = imx318_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SGRBG8_1X8, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx318_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SRGGB8_1X8, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx318_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SBGGR8_1X8, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx318_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SGBRG8_1X8, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx318_data_fmt_width8, + }, +}; + +static const s64 imx318_op_sys_clock[] = { 582000000, + 582000000, + 960000000, + 960000000, }; + +static struct crl_v4l2_ctrl imx318_vl42_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_SCALER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = + ARRAY_SIZE(imx318_pll_configurations) - 1, + .data.v4l2_int_menu.menu = imx318_op_sys_clock, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + }, + { + .sd_type = CRL_SUBDEV_TYPE_SCALER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 480, + .data.std_data.step = 1, + .data.std_data.def = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(imx318_ana_gain_global_regs), + .regs = imx318_ana_gain_global_regs, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "V4L2_CID_EXPOSURE", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(imx318_exposure_regs), + .regs = imx318_exposure_regs, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx318_h_flip_regs), + .regs = imx318_h_flip_regs, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .name = "V4L2_CID_VFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(imx318_v_flip_regs), + .regs = imx318_v_flip_regs, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame length lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 160, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 4130, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(imx318_fll_regs), + .regs = imx318_fll_regs, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 6024, + .data.std_data.max = 65520, + .data.std_data.step = 1, + .data.std_data.def = 6024, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(imx318_llp_regs), + .regs = imx318_llp_regs, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, +}; + +/* Power items, they are enabled in the order they are listed here */ +static struct crl_power_seq_entity imx318_power_items[] = { + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VANA", + .val = 2800000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VDIG", + .val = 1050000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VIO", + .val = 1800000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VAF", + .val = 3000000, + .delay = 2000, + }, + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 24000000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + .delay = 10700, + }, +}; + + +struct crl_sensor_configuration imx318_crl_configuration = { + + .power_items = ARRAY_SIZE(imx318_power_items), + .power_entities = imx318_power_items, + + .powerup_regs_items = ARRAY_SIZE(imx318_powerup_regset), + .powerup_regs = imx318_powerup_regset, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + .id_reg_items = ARRAY_SIZE(imx318_sensor_detect_regset), + .id_regs = imx318_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(imx318_sensor_subdevs), + .subdevs = imx318_sensor_subdevs, + + .sensor_limits = &imx318_sensor_limits, + + .pll_config_items = ARRAY_SIZE(imx318_pll_configurations), + .pll_configs = imx318_pll_configurations, + + .modes_items = ARRAY_SIZE(imx318_modes), + .modes = imx318_modes, + .fail_safe_mode_index = 0, + + .streamon_regs_items = ARRAY_SIZE(imx318_streamon_regs), + .streamon_regs = imx318_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(imx318_streamoff_regs), + .streamoff_regs = imx318_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(imx318_vl42_ctrls), + .v4l2_ctrl_bank = imx318_vl42_ctrls, + + .csi_fmts_items = ARRAY_SIZE(imx318_crl_csi_data_fmt), + .csi_fmts = imx318_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(imx318_flip_configurations), + .flip_data = imx318_flip_configurations, + +}; + +#endif /* __CRLMODULE_imx318_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_imx477_common_regs.h b/drivers/media/i2c/crlmodule/crl_imx477_common_regs.h new file mode 100644 index 0000000000000..eebb884df8c80 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_imx477_common_regs.h @@ -0,0 +1,1096 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2017 - 2018 Intel Corporation + * + * Author: Alexei Zavjalov + * + */ + +#ifndef __CRLMODULE_IMX477_COMMON_REGS_H_ +#define __CRLMODULE_IMX477_COMMON_REGS_H_ + +#include "crlmodule-sensor-ds.h" + +#define IMX477_CAPTURE_MODE_MAX 10 + +static struct crl_dynamic_register_access imx477_fll_regs[] = { + { + .address = 0x0340, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access imx477_llp_regs[] = { + { + .address = 0x0342, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access imx477_exposure_regs[] = { + { + .address = 0x0202, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + } +}; + +static struct crl_dynamic_register_access imx477_ana_gain_global_regs[] = { + { + .address = 0x0204, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xfff, + }, +}; + +static struct crl_dynamic_register_access imx477_wdr_switch_regs[] = { +}; + +static struct crl_arithmetic_ops imx477_vflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_dynamic_register_access imx477_h_flip_regs[] = { + { + .address = 0x0101, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = 0, + .ops = 0, + .mask = 0x1, + }, +}; + +static struct crl_dynamic_register_access imx477_v_flip_regs[] = { + { + .address = 0x0101, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(imx477_vflip_ops), + .ops = imx477_vflip_ops, + .mask = 0x2, + }, +}; + +static struct crl_dynamic_register_access imx477_test_pattern_regs[] = { + { + .address = 0x0600, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* 1st exposure for DOL */ +static struct crl_dynamic_register_access imx477_shs1_regs[] = { + { + .address = 0x00EA, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* 2nd exposure for DOL */ +static struct crl_dynamic_register_access imx477_shs2_regs[] = { + { + .address = 0x00EC, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* 3rd exposure for DOL */ +static struct crl_dynamic_register_access imx477_shs3_regs[] = { + { + .address = 0x00EE, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* Line number of 2nd frame readout start from XVS for DOL */ +static struct crl_dynamic_register_access imx477_rhs1_regs[] = { + { + .address = 0x00E6, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* Line number of 3rd frame readout start from XVS for DOL */ +static struct crl_dynamic_register_access imx477_rhs2_regs[] = { + { + .address = 0x00E8, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* set analog gain for 1st HDR frame */ +static struct crl_dynamic_register_access imx477_ana_gain_1st_regs[] = { + { + .address = 0x00F0, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* set analog gain for 2nd HDR frames */ +static struct crl_dynamic_register_access imx477_ana_gain_2nd_regs[] = { + { + .address = 0x00F2, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* set analog gain for 3rd HDR frames */ +static struct crl_dynamic_register_access imx477_ana_gain_3rd_regs[] = { + { + .address = 0x00F4, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* set digital gain for 1st HDR frames */ +static struct crl_dynamic_register_access imx477_dig_gain_1st_regs[] = { + { + .address = 0x00F6, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* set digital gain for 2nd HDR frames */ +static struct crl_dynamic_register_access imx477_dig_gain_2nd_regs[] = { + { + .address = 0x00F8, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* set digital gain for 3rd HDR frames */ +static struct crl_dynamic_register_access imx477_dig_gain_3rd_regs[] = { + { + .address = 0x00FA, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static const char * const imx477_test_patterns[] = { + "Disabled", + "Solid Colour", + "Eight Vertical Color Bars", + "Fade to Grey Color Bars", + "PN9", +}; + +static struct crl_v4l2_ctrl imx477_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame length lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 720, + .data.std_data.max = 131071, + .data.std_data.step = 1, + .data.std_data.def = 8209, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_fll_regs), + .regs = imx477_fll_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1280, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 14612, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_llp_regs), + .regs = imx477_llp_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .name = "V4L2_CID_HFLIP", + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_h_flip_regs), + .regs = imx477_h_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .name = "V4L2_CID_VFLIP", + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_v_flip_regs), + .regs = imx477_v_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_TEST_PATTERN, + .name = "V4L2_CID_TEST_PATTERN", + .type = CRL_V4L2_CTRL_TYPE_MENU_ITEMS, + .data.v4l2_menu_items.menu = imx477_test_patterns, + .data.v4l2_menu_items.size = ARRAY_SIZE(imx477_test_patterns), + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_test_pattern_regs), + .regs = imx477_test_pattern_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .name = "V4L2_CID_ANALOGUE_GAIN", + .data.std_data.min = 0, + .data.std_data.max = 0x978, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_ana_gain_global_regs), + .regs = imx477_ana_gain_global_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .name = "V4L2_CID_EXPOSURE", + .data.std_data.min = 0, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 5500, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_exposure_regs), + .regs = imx477_exposure_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_WDR_MODE, + .name = "V4L2_CID_WDR_MODE", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_MODE_SELECTION, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_wdr_switch_regs), + .regs = imx477_wdr_switch_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_SHS1, + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .name = "CRL_CID_EXPOSURE_SHS1", + .data.std_data.min = 4, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 0X5500, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_shs1_regs), + .regs = imx477_shs1_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_SHS2, + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .name = "CRL_CID_EXPOSURE_SHS2", + .data.std_data.min = 4, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 0X500, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_shs2_regs), + .regs = imx477_shs2_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_SHS3, + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .name = "CRL_CID_EXPOSURE_SHS3", + .data.std_data.min = 4, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 0X1000, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_shs3_regs), + .regs = imx477_shs3_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_SENSOR_MODE, + .name = "CRL_CID_SENSOR_MODE", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = IMX477_CAPTURE_MODE_MAX - 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_MODE_SELECTION, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_RHS1, + .name = "CRL_CID_EXPOSURE_RHS1", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 6, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 0x1000, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_rhs1_regs), + .regs = imx477_rhs1_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_RHS2, + .name = "CRL_CID_EXPOSURE_RHS2", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 6, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 0x1500, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_rhs2_regs), + .regs = imx477_rhs2_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_ANALOG_GAIN_L, + .name = "CRL_CID_ANALOG_GAIN_L", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 0x978, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_ana_gain_1st_regs), + .regs = imx477_ana_gain_1st_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_ANALOG_GAIN_S, + .name = "CRL_CID_ANALOG_GAIN_S", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 0x978, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_ana_gain_2nd_regs), + .regs = imx477_ana_gain_2nd_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_ANALOG_GAIN_VS, + .name = "CRL_CID_ANALOG_GAIN_VS", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 0x978, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_ana_gain_3rd_regs), + .regs = imx477_ana_gain_3rd_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_DIGITAL_GAIN_L, + .name = "CRL_CID_DIGITAL_GAIN_L", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 0x978, + .data.std_data.step = 1, + .data.std_data.def = 64, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_dig_gain_1st_regs), + .regs = imx477_dig_gain_1st_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_DIGITAL_GAIN_S, + .name = "CRL_CID_DIGITAL_GAIN_S", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 0x978, + .data.std_data.step = 1, + .data.std_data.def = 64, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_dig_gain_2nd_regs), + .regs = imx477_dig_gain_2nd_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_DIGITAL_GAIN_VS, + .name = "CRL_CID_DIGITAL_GAIN_VS", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 0x978, + .data.std_data.step = 1, + .data.std_data.def = 64, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_dig_gain_3rd_regs), + .regs = imx477_dig_gain_3rd_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, +}; + +static struct crl_register_write_rep imx477_streamon_regs[] = { + {0x0100, CRL_REG_LEN_08BIT, 0x01}, + {0x00, CRL_REG_LEN_DELAY, 20, 0x00}, /* Delay 20ms */ +}; + +static struct crl_register_write_rep imx477_streamoff_regs[] = { + {0x0100, CRL_REG_LEN_08BIT, 0x00}, + {0x00, CRL_REG_LEN_DELAY, 20, 0x00}, /* Delay 20ms */ +}; + +static struct crl_register_write_rep imx477_fmt_raw10[] = { + {0x0112, CRL_REG_LEN_08BIT, 0x0a}, /* FMT RAW10 */ + {0x0113, CRL_REG_LEN_08BIT, 0x0a}, + {0x3F0D, CRL_REG_LEN_08BIT, 0x00}, + {0x00FC, CRL_REG_LEN_08BIT, 0x0A}, /* The output data fmt for CSI: RAW10 */ + {0x00FD, CRL_REG_LEN_08BIT, 0x0A}, /* The output data fmt for CSI: RAW10 */ + {0x00FE, CRL_REG_LEN_08BIT, 0x0A}, /* The output data fmt for CSI: RAW10 */ + {0x00FF, CRL_REG_LEN_08BIT, 0x0A}, /* The output data fmt for CSI: RAW10 */ +}; + +static struct crl_register_write_rep imx477_fmt_raw12[] = { + {0x0112, CRL_REG_LEN_08BIT, 0x0c}, /* FMT RAW12 */ + {0x0113, CRL_REG_LEN_08BIT, 0x0c}, + {0x3F0D, CRL_REG_LEN_08BIT, 0x01}, +}; + +static struct crl_csi_data_fmt imx477_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = ARRAY_SIZE(imx477_fmt_raw10), + .regs = imx477_fmt_raw10, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .bits_per_pixel = 10, + .regs_items = ARRAY_SIZE(imx477_fmt_raw10), + .regs = imx477_fmt_raw10, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 10, + .regs_items = ARRAY_SIZE(imx477_fmt_raw10), + .regs = imx477_fmt_raw10, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .bits_per_pixel = 10, + .regs_items = ARRAY_SIZE(imx477_fmt_raw10), + .regs = imx477_fmt_raw10, + }, + { + .code = MEDIA_BUS_FMT_SRGGB12_1X12, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .bits_per_pixel = 12, + .regs_items = ARRAY_SIZE(imx477_fmt_raw12), + .regs = imx477_fmt_raw12, + }, +}; + +static struct crl_subdev_rect_rep imx477_4056_3040_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4056, + .in_rect.height = 3040, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4056, + .out_rect.height = 3040, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4056, + .in_rect.height = 3040, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4056, + .out_rect.height = 3040, + } +}; + +static struct crl_subdev_rect_rep imx477_4056_2288_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4056, + .in_rect.height = 3040, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4056, + .out_rect.height = 3040, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4056, + .in_rect.height = 3040, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4056, + .out_rect.height = 2288, + } +}; + +static struct crl_subdev_rect_rep imx477_2832_1632_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4056, + .in_rect.height = 3040, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4056, + .out_rect.height = 3040, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4056, + .in_rect.height = 3040, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 2832, + .out_rect.height = 1632, + } +}; + +static struct crl_subdev_rect_rep imx477_2028_1128_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4056, + .in_rect.height = 3040, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4056, + .out_rect.height = 3040, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4056, + .in_rect.height = 3040, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 2028, + .out_rect.height = 1128, + } +}; + +static struct crl_subdev_rect_rep imx477_1296_768_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4056, + .in_rect.height = 3040, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4056, + .out_rect.height = 3040, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4056, + .in_rect.height = 3040, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1296, + .out_rect.height = 768, + } +}; + +static struct crl_subdev_rect_rep imx477_656_512_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4056, + .in_rect.height = 3040, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4056, + .out_rect.height = 3040, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4056, + .in_rect.height = 3040, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 656, + .out_rect.height = 512, + } +}; + +static struct crl_register_write_rep imx477_pll_1200mbps[] = { + /* MIPI Settings */ + {0x0114, CRL_REG_LEN_08BIT, 0x01}, /* 2-lane Mode */ + + /* Clock Setting */ + {0x0301, CRL_REG_LEN_08BIT, 0x05}, /* The Pixel Clock Divider for IVTS */ + {0x0303, CRL_REG_LEN_08BIT, 0x02}, /* The System Clock Divider for IVTS */ + {0x0305, CRL_REG_LEN_08BIT, 0x03}, /* The pre-PLL Clock Divider for IVTS */ + {0x0306, CRL_REG_LEN_08BIT, 0x01}, /* The PLL multiplier for IVTS [10:8] */ + {0x0307, CRL_REG_LEN_08BIT, 0x48}, /* The PLL multiplier for IVTS [7:0] */ + {0x0309, CRL_REG_LEN_08BIT, 0x0A}, /* The Pixel Clock Divider for IOPS */ + {0x030B, CRL_REG_LEN_08BIT, 0x01}, /* The System Clock Divider for IOPS */ + {0x030D, CRL_REG_LEN_08BIT, 0x02}, /* The pre-PLL Clock Divider for IOPS */ + {0x030E, CRL_REG_LEN_08BIT, 0x00}, /* The PLL multiplier for IOPS [10:8] */ + {0x030F, CRL_REG_LEN_08BIT, 0x7D}, /* The PLL multiplier for IOPS [7:0] */ + {0x0310, CRL_REG_LEN_08BIT, 0x01}, /* PLL mode select: Dual Mode */ + {0x0820, CRL_REG_LEN_08BIT, 0x09}, /* Output Data Rate, Mbps [31:24] */ + {0x0821, CRL_REG_LEN_08BIT, 0x60}, /* Output Data Rate, Mbps [23:16] */ + {0x0822, CRL_REG_LEN_08BIT, 0x00}, /* Output Data Rate, Mbps [15:8] */ + {0x0823, CRL_REG_LEN_08BIT, 0x00}, /* Output Data Rate, Mbps [7:0] */ + + /* Global Timing Setting */ + {0x080A, CRL_REG_LEN_08BIT, 0x00}, /* MIPI Global Timing (Tclk) [9:8] */ + {0x080B, CRL_REG_LEN_08BIT, 0x87}, /* MIPI Global Timing (Tclk) [7:0] */ + {0x080C, CRL_REG_LEN_08BIT, 0x00}, /* MIPI Global Timing (ths_prepare) */ + {0x080D, CRL_REG_LEN_08BIT, 0x4F}, /* MIPI Global Timing (ths_prepare) */ + {0x080E, CRL_REG_LEN_08BIT, 0x00}, /* MIPI Global Timing (ths_zero_min) */ + {0x080F, CRL_REG_LEN_08BIT, 0x87}, /* MIPI Global Timing (ths_zero_min) */ + {0x0810, CRL_REG_LEN_08BIT, 0x00}, /* MIPI Global Timing (ths_trail) */ + {0x0811, CRL_REG_LEN_08BIT, 0x5F}, /* MIPI Global Timing (ths_trail) */ + {0x0812, CRL_REG_LEN_08BIT, 0x00}, /* MIPI Global Timing (Tclk_trail_min)*/ + {0x0813, CRL_REG_LEN_08BIT, 0x5F}, /* MIPI Global Timing (Tclk_trail_min)*/ + {0x0814, CRL_REG_LEN_08BIT, 0x00}, /* MIPI Global Timing (Tclk_prepare) */ + {0x0815, CRL_REG_LEN_08BIT, 0x4F}, /* MIPI Global Timing (Tclk_prepare) */ + {0x0816, CRL_REG_LEN_08BIT, 0x01}, /* MIPI Global Timing (Tclk_zero) */ + {0x0817, CRL_REG_LEN_08BIT, 0x3F}, /* MIPI Global Timing (Tclk_zero) */ + {0x0818, CRL_REG_LEN_08BIT, 0x00}, /* MIPI Global Timing (Tlpx) */ + {0x0819, CRL_REG_LEN_08BIT, 0x3F}, /* MIPI Global Timing (Tlpx) */ + {0xE04C, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0xE04D, CRL_REG_LEN_08BIT, 0x87}, /* Undocumented */ + {0xE04E, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0xE04F, CRL_REG_LEN_08BIT, 0x1F}, /* Undocumented */ + + /* Output Data Select Setting */ + {0x3E20, CRL_REG_LEN_08BIT, 0x01}, /* Undocumented */ + {0x3E37, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + + /* PowerSave Setting */ + {0x3F50, CRL_REG_LEN_08BIT, 0x00}, /* Power save: Disable */ + {0x3F56, CRL_REG_LEN_08BIT, 0x01}, + {0x3F57, CRL_REG_LEN_08BIT, 0x4F}, +}; + +static struct crl_pll_configuration imx477_pll_configurations[] = { + { + .input_clk = 19200000, + .op_sys_clk = 600000000, /* 1200mbps / 2 */ + .bitsperpixel = 10, + .pixel_rate_csi = 240000000, + /* pixel_rate = (MIPICLK*2 * CSILANES)/10 */ + .pixel_rate_pa = 240000000, + .csi_lanes = 2, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx477_pll_1200mbps), + .pll_regs = imx477_pll_1200mbps, + }, + { + .input_clk = 19200000, + .op_sys_clk = 600000000, /* 1200mbps / 2 */ + .bitsperpixel = 12, + .pixel_rate_csi = 240000000, + /* pixel_rate = (MIPICLK*2 * CSILANES)/10 */ + .pixel_rate_pa = 240000000, + .csi_lanes = 2, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx477_pll_1200mbps), + .pll_regs = imx477_pll_1200mbps, + }, +}; + +static struct crl_sensor_subdev_config imx477_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "imx477 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "imx477 pixel array", + } +}; + +static struct crl_sensor_limits imx477_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 4056, + .y_addr_max = 3040, + .min_frame_length_lines = 320, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 380, + .max_line_length_pixels = 32752, +}; + +static struct crl_sensor_detect_config imx477_sensor_detect_regset[] = { + { + .reg = { 0x0016, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, + { + .reg = { 0x0017, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + } +}; + +static struct crl_register_write_rep imx477_powerup_standby[] = { + {0x0100, CRL_REG_LEN_08BIT, 0x00}, + {0x00, CRL_REG_LEN_DELAY, 20, 0x00}, /* Delay 20ms */ +}; + +/* Power items, they are enabled in the order they are listed here */ +static struct crl_power_seq_entity imx477_power_items[] = { + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 19200000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + .undo_val = 1, + }, +}; + +static struct crl_arithmetic_ops imx477_frame_desc_width_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .operand.entity_val = CRL_VAR_REF_OUTPUT_WIDTH, + }, +}; + +static struct crl_arithmetic_ops imx477_frame_desc_height_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, +}; + +static struct crl_frame_desc imx477_frame_desc[] = { + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(imx477_frame_desc_width_ops), + .ops = imx477_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(imx477_frame_desc_height_ops), + .ops = imx477_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 0, + .csi2_data_type.entity_val = 0x12, + }, + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(imx477_frame_desc_width_ops), + .ops = imx477_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(imx477_frame_desc_height_ops), + .ops = imx477_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 1, + .csi2_data_type.entity_val = 0x12, + }, + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(imx477_frame_desc_width_ops), + .ops = imx477_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(imx477_frame_desc_height_ops), + .ops = imx477_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 2, + .csi2_data_type.entity_val = 0x12, + }, +}; + +#endif /* __CRLMODULE_IMX477_COMMON_REGS_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_imx477_master_configuration.h b/drivers/media/i2c/crlmodule/crl_imx477_master_configuration.h new file mode 100644 index 0000000000000..10be93b072153 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_imx477_master_configuration.h @@ -0,0 +1,1375 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2017 - 2018 Intel Corporation + * + * Author: Alexei Zavjalov + * + */ + +#ifndef __CRLMODULE_IMX477_MASTER_CONFIGURATION_H_ +#define __CRLMODULE_IMX477_MASTER_CONFIGURATION_H_ + +#include "crl_imx477_common_regs.h" + +static struct crl_register_write_rep imx477_onetime_init_regset_master[] = { + {0x0103, CRL_REG_LEN_08BIT, 0x01}, /* Software reset */ + + {0x3010, CRL_REG_LEN_08BIT, 0x01}, /* SLAVE_ADD_EN_2ND */ + {0x3011, CRL_REG_LEN_08BIT, 0x01}, /* SLAVE_ADD_ACKEN_2ND */ + + {0x3F0B, CRL_REG_LEN_08BIT, 0x01}, /* Multi camera mode: on */ + + {0x3041, CRL_REG_LEN_08BIT, 0x01}, /* Mode: Master */ + {0x3040, CRL_REG_LEN_08BIT, 0x01}, /* XVS pin: out */ + {0x4B81, CRL_REG_LEN_08BIT, 0x01}, /* Mode: Master */ + + {0x3042, CRL_REG_LEN_08BIT, 0x00}, /* VSYNC Delay in lines [15:8] */ + {0x3043, CRL_REG_LEN_08BIT, 0x00}, /* VSYNC Delay in lines [7:0] */ + {0x3044, CRL_REG_LEN_08BIT, 0x00}, /* VSYNC Delay in clocks [15:8] */ + {0x3045, CRL_REG_LEN_08BIT, 0x00}, /* VSYNC Delay in clocks [7:0] */ + {0x3045, CRL_REG_LEN_08BIT, 0x00}, /* VSYNC thin down setting */ + + /* External Clock Setting */ + {0x0136, CRL_REG_LEN_08BIT, 0x13}, /* External clock freq (dec) [15:8] */ + {0x0137, CRL_REG_LEN_08BIT, 0x33}, /* External clock freq (dec) [7:0] */ + + /* Global Setting */ + {0x0808, CRL_REG_LEN_08BIT, 0x02}, /* MIPI Global Timing: Register Control */ + {0xE07A, CRL_REG_LEN_08BIT, 0x01}, + {0xE000, CRL_REG_LEN_08BIT, 0x00}, /* RUN/STOP of CSI2 during Frame Blanking: HS */ + {0x4AE9, CRL_REG_LEN_08BIT, 0x18}, + {0x4AEA, CRL_REG_LEN_08BIT, 0x08}, + {0xF61C, CRL_REG_LEN_08BIT, 0x04}, + {0xF61E, CRL_REG_LEN_08BIT, 0x04}, + {0x4AE9, CRL_REG_LEN_08BIT, 0x21}, + {0x4AEA, CRL_REG_LEN_08BIT, 0x80}, + {0x38A8, CRL_REG_LEN_08BIT, 0x1F}, + {0x38A9, CRL_REG_LEN_08BIT, 0xFF}, + {0x38AA, CRL_REG_LEN_08BIT, 0x1F}, + {0x38AB, CRL_REG_LEN_08BIT, 0xFF}, + {0x420B, CRL_REG_LEN_08BIT, 0x01}, + {0x55D4, CRL_REG_LEN_08BIT, 0x00}, + {0x55D5, CRL_REG_LEN_08BIT, 0x00}, + {0x55D6, CRL_REG_LEN_08BIT, 0x07}, + {0x55D7, CRL_REG_LEN_08BIT, 0xFF}, + {0x55E8, CRL_REG_LEN_08BIT, 0x07}, + {0x55E9, CRL_REG_LEN_08BIT, 0xFF}, + {0x55EA, CRL_REG_LEN_08BIT, 0x00}, + {0x55EB, CRL_REG_LEN_08BIT, 0x00}, + {0x574C, CRL_REG_LEN_08BIT, 0x07}, + {0x574D, CRL_REG_LEN_08BIT, 0xFF}, + {0x574E, CRL_REG_LEN_08BIT, 0x00}, + {0x574F, CRL_REG_LEN_08BIT, 0x00}, + {0x5754, CRL_REG_LEN_08BIT, 0x00}, + {0x5755, CRL_REG_LEN_08BIT, 0x00}, + {0x5756, CRL_REG_LEN_08BIT, 0x07}, + {0x5757, CRL_REG_LEN_08BIT, 0xFF}, + {0x5973, CRL_REG_LEN_08BIT, 0x04}, + {0x5974, CRL_REG_LEN_08BIT, 0x01}, + {0x5D13, CRL_REG_LEN_08BIT, 0xC3}, + {0x5D14, CRL_REG_LEN_08BIT, 0x58}, + {0x5D15, CRL_REG_LEN_08BIT, 0xA3}, + {0x5D16, CRL_REG_LEN_08BIT, 0x1D}, + {0x5D17, CRL_REG_LEN_08BIT, 0x65}, + {0x5D18, CRL_REG_LEN_08BIT, 0x8C}, + {0x5D1A, CRL_REG_LEN_08BIT, 0x06}, + {0x5D1B, CRL_REG_LEN_08BIT, 0xA9}, + {0x5D1C, CRL_REG_LEN_08BIT, 0x45}, + {0x5D1D, CRL_REG_LEN_08BIT, 0x3A}, + {0x5D1E, CRL_REG_LEN_08BIT, 0xAB}, + {0x5D1F, CRL_REG_LEN_08BIT, 0x15}, + {0x5D21, CRL_REG_LEN_08BIT, 0x0E}, + {0x5D22, CRL_REG_LEN_08BIT, 0x52}, + {0x5D23, CRL_REG_LEN_08BIT, 0xAA}, + {0x5D24, CRL_REG_LEN_08BIT, 0x7D}, + {0x5D25, CRL_REG_LEN_08BIT, 0x57}, + {0x5D26, CRL_REG_LEN_08BIT, 0xA8}, + {0x5D37, CRL_REG_LEN_08BIT, 0x5A}, + {0x5D38, CRL_REG_LEN_08BIT, 0x5A}, + {0x5D77, CRL_REG_LEN_08BIT, 0x7F}, + {0x7B7C, CRL_REG_LEN_08BIT, 0x00}, + {0x7B7D, CRL_REG_LEN_08BIT, 0x00}, + {0x8D1F, CRL_REG_LEN_08BIT, 0x00}, + {0x8D27, CRL_REG_LEN_08BIT, 0x00}, + {0x9004, CRL_REG_LEN_08BIT, 0x03}, + {0x9200, CRL_REG_LEN_08BIT, 0x50}, + {0x9201, CRL_REG_LEN_08BIT, 0x6C}, + {0x9202, CRL_REG_LEN_08BIT, 0x71}, + {0x9203, CRL_REG_LEN_08BIT, 0x00}, + {0x9204, CRL_REG_LEN_08BIT, 0x71}, + {0x9205, CRL_REG_LEN_08BIT, 0x01}, + {0x9371, CRL_REG_LEN_08BIT, 0x6A}, + {0x9373, CRL_REG_LEN_08BIT, 0x6A}, + {0x9375, CRL_REG_LEN_08BIT, 0x64}, + {0x990C, CRL_REG_LEN_08BIT, 0x00}, + {0x990D, CRL_REG_LEN_08BIT, 0x08}, + {0x9956, CRL_REG_LEN_08BIT, 0x8C}, + {0x9957, CRL_REG_LEN_08BIT, 0x64}, + {0x9958, CRL_REG_LEN_08BIT, 0x50}, + {0x9A48, CRL_REG_LEN_08BIT, 0x06}, + {0x9A49, CRL_REG_LEN_08BIT, 0x06}, + {0x9A4A, CRL_REG_LEN_08BIT, 0x06}, + {0x9A4B, CRL_REG_LEN_08BIT, 0x06}, + {0x9A4C, CRL_REG_LEN_08BIT, 0x06}, + {0x9A4D, CRL_REG_LEN_08BIT, 0x06}, + {0xA001, CRL_REG_LEN_08BIT, 0x0A}, + {0xA003, CRL_REG_LEN_08BIT, 0x0A}, + {0xA005, CRL_REG_LEN_08BIT, 0x0A}, + {0xA006, CRL_REG_LEN_08BIT, 0x01}, + {0xA007, CRL_REG_LEN_08BIT, 0xC0}, + {0xA009, CRL_REG_LEN_08BIT, 0xC0}, + + /* Image Tuning */ + {0x3D8A, CRL_REG_LEN_08BIT, 0x01}, + {0x7B3B, CRL_REG_LEN_08BIT, 0x01}, + {0x7B4C, CRL_REG_LEN_08BIT, 0x00}, + {0x9905, CRL_REG_LEN_08BIT, 0x00}, + {0x9907, CRL_REG_LEN_08BIT, 0x00}, + {0x9909, CRL_REG_LEN_08BIT, 0x00}, + {0x990B, CRL_REG_LEN_08BIT, 0x00}, + {0x9944, CRL_REG_LEN_08BIT, 0x3C}, + {0x9947, CRL_REG_LEN_08BIT, 0x3C}, + {0x994A, CRL_REG_LEN_08BIT, 0x8C}, + {0x994B, CRL_REG_LEN_08BIT, 0x50}, + {0x994C, CRL_REG_LEN_08BIT, 0x1B}, + {0x994D, CRL_REG_LEN_08BIT, 0x8C}, + {0x994E, CRL_REG_LEN_08BIT, 0x50}, + {0x994F, CRL_REG_LEN_08BIT, 0x1B}, + {0x9950, CRL_REG_LEN_08BIT, 0x8C}, + {0x9951, CRL_REG_LEN_08BIT, 0x1B}, + {0x9952, CRL_REG_LEN_08BIT, 0x0A}, + {0x9953, CRL_REG_LEN_08BIT, 0x8C}, + {0x9954, CRL_REG_LEN_08BIT, 0x1B}, + {0x9955, CRL_REG_LEN_08BIT, 0x0A}, + {0x9A13, CRL_REG_LEN_08BIT, 0x04}, + {0x9A14, CRL_REG_LEN_08BIT, 0x04}, + {0x9A19, CRL_REG_LEN_08BIT, 0x00}, + {0x9A1C, CRL_REG_LEN_08BIT, 0x04}, + {0x9A1D, CRL_REG_LEN_08BIT, 0x04}, + {0x9A26, CRL_REG_LEN_08BIT, 0x05}, + {0x9A27, CRL_REG_LEN_08BIT, 0x05}, + {0x9A2C, CRL_REG_LEN_08BIT, 0x01}, + {0x9A2D, CRL_REG_LEN_08BIT, 0x03}, + {0x9A2F, CRL_REG_LEN_08BIT, 0x05}, + {0x9A30, CRL_REG_LEN_08BIT, 0x05}, + {0x9A41, CRL_REG_LEN_08BIT, 0x00}, + {0x9A46, CRL_REG_LEN_08BIT, 0x00}, + {0x9A47, CRL_REG_LEN_08BIT, 0x00}, + {0x9C17, CRL_REG_LEN_08BIT, 0x35}, + {0x9C1D, CRL_REG_LEN_08BIT, 0x31}, + {0x9C29, CRL_REG_LEN_08BIT, 0x50}, + {0x9C3B, CRL_REG_LEN_08BIT, 0x2F}, + {0x9C41, CRL_REG_LEN_08BIT, 0x6B}, + {0x9C47, CRL_REG_LEN_08BIT, 0x2D}, + {0x9C4D, CRL_REG_LEN_08BIT, 0x40}, + {0x9C6B, CRL_REG_LEN_08BIT, 0x00}, + {0x9C71, CRL_REG_LEN_08BIT, 0xC8}, + {0x9C73, CRL_REG_LEN_08BIT, 0x32}, + {0x9C75, CRL_REG_LEN_08BIT, 0x04}, + {0x9C7D, CRL_REG_LEN_08BIT, 0x2D}, + {0x9C83, CRL_REG_LEN_08BIT, 0x40}, + {0x9C94, CRL_REG_LEN_08BIT, 0x3F}, + {0x9C95, CRL_REG_LEN_08BIT, 0x3F}, + {0x9C96, CRL_REG_LEN_08BIT, 0x3F}, + {0x9C97, CRL_REG_LEN_08BIT, 0x00}, + {0x9C98, CRL_REG_LEN_08BIT, 0x00}, + {0x9C99, CRL_REG_LEN_08BIT, 0x00}, + {0x9C9A, CRL_REG_LEN_08BIT, 0x3F}, + {0x9C9B, CRL_REG_LEN_08BIT, 0x3F}, + {0x9C9C, CRL_REG_LEN_08BIT, 0x3F}, + {0x9CA0, CRL_REG_LEN_08BIT, 0x0F}, + {0x9CA1, CRL_REG_LEN_08BIT, 0x0F}, + {0x9CA2, CRL_REG_LEN_08BIT, 0x0F}, + {0x9CA3, CRL_REG_LEN_08BIT, 0x00}, + {0x9CA4, CRL_REG_LEN_08BIT, 0x00}, + {0x9CA5, CRL_REG_LEN_08BIT, 0x00}, + {0x9CA6, CRL_REG_LEN_08BIT, 0x1E}, + {0x9CA7, CRL_REG_LEN_08BIT, 0x1E}, + {0x9CA8, CRL_REG_LEN_08BIT, 0x1E}, + {0x9CA9, CRL_REG_LEN_08BIT, 0x00}, + {0x9CAA, CRL_REG_LEN_08BIT, 0x00}, + {0x9CAB, CRL_REG_LEN_08BIT, 0x00}, + {0x9CAC, CRL_REG_LEN_08BIT, 0x09}, + {0x9CAD, CRL_REG_LEN_08BIT, 0x09}, + {0x9CAE, CRL_REG_LEN_08BIT, 0x09}, + {0x9CBD, CRL_REG_LEN_08BIT, 0x50}, + {0x9CBF, CRL_REG_LEN_08BIT, 0x50}, + {0x9CC1, CRL_REG_LEN_08BIT, 0x50}, + {0x9CC3, CRL_REG_LEN_08BIT, 0x40}, + {0x9CC5, CRL_REG_LEN_08BIT, 0x40}, + {0x9CC7, CRL_REG_LEN_08BIT, 0x40}, + {0x9CC9, CRL_REG_LEN_08BIT, 0x0A}, + {0x9CCB, CRL_REG_LEN_08BIT, 0x0A}, + {0x9CCD, CRL_REG_LEN_08BIT, 0x0A}, + {0x9D17, CRL_REG_LEN_08BIT, 0x35}, + {0x9D1D, CRL_REG_LEN_08BIT, 0x31}, + {0x9D29, CRL_REG_LEN_08BIT, 0x50}, + {0x9D3B, CRL_REG_LEN_08BIT, 0x2F}, + {0x9D41, CRL_REG_LEN_08BIT, 0x6B}, + {0x9D47, CRL_REG_LEN_08BIT, 0x42}, + {0x9D4D, CRL_REG_LEN_08BIT, 0x5A}, + {0x9D6B, CRL_REG_LEN_08BIT, 0x00}, + {0x9D71, CRL_REG_LEN_08BIT, 0xC8}, + {0x9D73, CRL_REG_LEN_08BIT, 0x32}, + {0x9D75, CRL_REG_LEN_08BIT, 0x04}, + {0x9D7D, CRL_REG_LEN_08BIT, 0x42}, + {0x9D83, CRL_REG_LEN_08BIT, 0x5A}, + {0x9D94, CRL_REG_LEN_08BIT, 0x3F}, + {0x9D95, CRL_REG_LEN_08BIT, 0x3F}, + {0x9D96, CRL_REG_LEN_08BIT, 0x3F}, + {0x9D97, CRL_REG_LEN_08BIT, 0x00}, + {0x9D98, CRL_REG_LEN_08BIT, 0x00}, + {0x9D99, CRL_REG_LEN_08BIT, 0x00}, + {0x9D9A, CRL_REG_LEN_08BIT, 0x3F}, + {0x9D9B, CRL_REG_LEN_08BIT, 0x3F}, + {0x9D9C, CRL_REG_LEN_08BIT, 0x3F}, + {0x9D9D, CRL_REG_LEN_08BIT, 0x1F}, + {0x9D9E, CRL_REG_LEN_08BIT, 0x1F}, + {0x9D9F, CRL_REG_LEN_08BIT, 0x1F}, + {0x9DA0, CRL_REG_LEN_08BIT, 0x0F}, + {0x9DA1, CRL_REG_LEN_08BIT, 0x0F}, + {0x9DA2, CRL_REG_LEN_08BIT, 0x0F}, + {0x9DA3, CRL_REG_LEN_08BIT, 0x00}, + {0x9DA4, CRL_REG_LEN_08BIT, 0x00}, + {0x9DA5, CRL_REG_LEN_08BIT, 0x00}, + {0x9DA6, CRL_REG_LEN_08BIT, 0x1E}, + {0x9DA7, CRL_REG_LEN_08BIT, 0x1E}, + {0x9DA8, CRL_REG_LEN_08BIT, 0x1E}, + {0x9DA9, CRL_REG_LEN_08BIT, 0x00}, + {0x9DAA, CRL_REG_LEN_08BIT, 0x00}, + {0x9DAB, CRL_REG_LEN_08BIT, 0x00}, + {0x9DAC, CRL_REG_LEN_08BIT, 0x09}, + {0x9DAD, CRL_REG_LEN_08BIT, 0x09}, + {0x9DAE, CRL_REG_LEN_08BIT, 0x09}, + {0x9DC9, CRL_REG_LEN_08BIT, 0x0A}, + {0x9DCB, CRL_REG_LEN_08BIT, 0x0A}, + {0x9DCD, CRL_REG_LEN_08BIT, 0x0A}, + {0x9E17, CRL_REG_LEN_08BIT, 0x35}, + {0x9E1D, CRL_REG_LEN_08BIT, 0x31}, + {0x9E29, CRL_REG_LEN_08BIT, 0x50}, + {0x9E3B, CRL_REG_LEN_08BIT, 0x2F}, + {0x9E41, CRL_REG_LEN_08BIT, 0x6B}, + {0x9E47, CRL_REG_LEN_08BIT, 0x2D}, + {0x9E4D, CRL_REG_LEN_08BIT, 0x40}, + {0x9E6B, CRL_REG_LEN_08BIT, 0x00}, + {0x9E71, CRL_REG_LEN_08BIT, 0xC8}, + {0x9E73, CRL_REG_LEN_08BIT, 0x32}, + {0x9E75, CRL_REG_LEN_08BIT, 0x04}, + {0x9E94, CRL_REG_LEN_08BIT, 0x0F}, + {0x9E95, CRL_REG_LEN_08BIT, 0x0F}, + {0x9E96, CRL_REG_LEN_08BIT, 0x0F}, + {0x9E97, CRL_REG_LEN_08BIT, 0x00}, + {0x9E98, CRL_REG_LEN_08BIT, 0x00}, + {0x9E99, CRL_REG_LEN_08BIT, 0x00}, + {0x9EA0, CRL_REG_LEN_08BIT, 0x0F}, + {0x9EA1, CRL_REG_LEN_08BIT, 0x0F}, + {0x9EA2, CRL_REG_LEN_08BIT, 0x0F}, + {0x9EA3, CRL_REG_LEN_08BIT, 0x00}, + {0x9EA4, CRL_REG_LEN_08BIT, 0x00}, + {0x9EA5, CRL_REG_LEN_08BIT, 0x00}, + {0x9EA6, CRL_REG_LEN_08BIT, 0x3F}, + {0x9EA7, CRL_REG_LEN_08BIT, 0x3F}, + {0x9EA8, CRL_REG_LEN_08BIT, 0x3F}, + {0x9EA9, CRL_REG_LEN_08BIT, 0x00}, + {0x9EAA, CRL_REG_LEN_08BIT, 0x00}, + {0x9EAB, CRL_REG_LEN_08BIT, 0x00}, + {0x9EAC, CRL_REG_LEN_08BIT, 0x09}, + {0x9EAD, CRL_REG_LEN_08BIT, 0x09}, + {0x9EAE, CRL_REG_LEN_08BIT, 0x09}, + {0x9EC9, CRL_REG_LEN_08BIT, 0x0A}, + {0x9ECB, CRL_REG_LEN_08BIT, 0x0A}, + {0x9ECD, CRL_REG_LEN_08BIT, 0x0A}, + {0x9F17, CRL_REG_LEN_08BIT, 0x35}, + {0x9F1D, CRL_REG_LEN_08BIT, 0x31}, + {0x9F29, CRL_REG_LEN_08BIT, 0x50}, + {0x9F3B, CRL_REG_LEN_08BIT, 0x2F}, + {0x9F41, CRL_REG_LEN_08BIT, 0x6B}, + {0x9F47, CRL_REG_LEN_08BIT, 0x42}, + {0x9F4D, CRL_REG_LEN_08BIT, 0x5A}, + {0x9F6B, CRL_REG_LEN_08BIT, 0x00}, + {0x9F71, CRL_REG_LEN_08BIT, 0xC8}, + {0x9F73, CRL_REG_LEN_08BIT, 0x32}, + {0x9F75, CRL_REG_LEN_08BIT, 0x04}, + {0x9F94, CRL_REG_LEN_08BIT, 0x0F}, + {0x9F95, CRL_REG_LEN_08BIT, 0x0F}, + {0x9F96, CRL_REG_LEN_08BIT, 0x0F}, + {0x9F97, CRL_REG_LEN_08BIT, 0x00}, + {0x9F98, CRL_REG_LEN_08BIT, 0x00}, + {0x9F99, CRL_REG_LEN_08BIT, 0x00}, + {0x9F9A, CRL_REG_LEN_08BIT, 0x2F}, + {0x9F9B, CRL_REG_LEN_08BIT, 0x2F}, + {0x9F9C, CRL_REG_LEN_08BIT, 0x2F}, + {0x9F9D, CRL_REG_LEN_08BIT, 0x00}, + {0x9F9E, CRL_REG_LEN_08BIT, 0x00}, + {0x9F9F, CRL_REG_LEN_08BIT, 0x00}, + {0x9FA0, CRL_REG_LEN_08BIT, 0x0F}, + {0x9FA1, CRL_REG_LEN_08BIT, 0x0F}, + {0x9FA2, CRL_REG_LEN_08BIT, 0x0F}, + {0x9FA3, CRL_REG_LEN_08BIT, 0x00}, + {0x9FA4, CRL_REG_LEN_08BIT, 0x00}, + {0x9FA5, CRL_REG_LEN_08BIT, 0x00}, + {0x9FA6, CRL_REG_LEN_08BIT, 0x1E}, + {0x9FA7, CRL_REG_LEN_08BIT, 0x1E}, + {0x9FA8, CRL_REG_LEN_08BIT, 0x1E}, + {0x9FA9, CRL_REG_LEN_08BIT, 0x00}, + {0x9FAA, CRL_REG_LEN_08BIT, 0x00}, + {0x9FAB, CRL_REG_LEN_08BIT, 0x00}, + {0x9FAC, CRL_REG_LEN_08BIT, 0x09}, + {0x9FAD, CRL_REG_LEN_08BIT, 0x09}, + {0x9FAE, CRL_REG_LEN_08BIT, 0x09}, + {0x9FC9, CRL_REG_LEN_08BIT, 0x0A}, + {0x9FCB, CRL_REG_LEN_08BIT, 0x0A}, + {0x9FCD, CRL_REG_LEN_08BIT, 0x0A}, + {0xA14B, CRL_REG_LEN_08BIT, 0xFF}, + {0xA151, CRL_REG_LEN_08BIT, 0x0C}, + {0xA153, CRL_REG_LEN_08BIT, 0x50}, + {0xA155, CRL_REG_LEN_08BIT, 0x02}, + {0xA157, CRL_REG_LEN_08BIT, 0x00}, + {0xA1AD, CRL_REG_LEN_08BIT, 0xFF}, + {0xA1B3, CRL_REG_LEN_08BIT, 0x0C}, + {0xA1B5, CRL_REG_LEN_08BIT, 0x50}, + {0xA1B9, CRL_REG_LEN_08BIT, 0x00}, + {0xA24B, CRL_REG_LEN_08BIT, 0xFF}, + {0xA257, CRL_REG_LEN_08BIT, 0x00}, + {0xA2AD, CRL_REG_LEN_08BIT, 0xFF}, + {0xA2B9, CRL_REG_LEN_08BIT, 0x00}, + {0xB21F, CRL_REG_LEN_08BIT, 0x04}, + {0xB35C, CRL_REG_LEN_08BIT, 0x00}, + {0xB35E, CRL_REG_LEN_08BIT, 0x08}, +}; + +static struct crl_register_write_rep imx477_4056_3040_19MHZ_master[] = { + /* Frame Horizontal Clock Count */ + {0x0342, CRL_REG_LEN_08BIT, 0x39}, /* Line length [15:8] */ + {0x0343, CRL_REG_LEN_08BIT, 0x14}, /* Line length [7:0] */ + + /* Frame Vertical Clock Count */ + {0x0340, CRL_REG_LEN_08BIT, 0x20}, /* Frame length [15:8] */ + {0x0341, CRL_REG_LEN_08BIT, 0x11}, /* Frame length [7:0] */ + + /* Visible Size */ + {0x0344, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [12:8] */ + {0x0345, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [7:0] */ + {0x0346, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start Y [12:8] */ + {0x0347, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start Y [7:0] */ + {0x0348, CRL_REG_LEN_08BIT, 0x0F}, /* Analog cropping end X [12:8] */ + {0x0349, CRL_REG_LEN_08BIT, 0xD7}, /* Analog cropping end X [7:0] */ + {0x034A, CRL_REG_LEN_08BIT, 0x0B}, /* Analog cropping end Y [12:8] */ + {0x034B, CRL_REG_LEN_08BIT, 0xDF}, /* Analog cropping end Y [7:0] */ + + /* Mode Setting */ + {0x00E3, CRL_REG_LEN_08BIT, 0x00}, /* DOL-HDR Disable */ + {0x00E4, CRL_REG_LEN_08BIT, 0x00}, /* DOL Mode: DOL-HDR Disable */ + {0x0220, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x0221, CRL_REG_LEN_08BIT, 0x11}, /* Undocumented */ + {0x0381, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, even -> odd */ + {0x0383, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, odd -> even */ + {0x0385, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, even -> odd */ + {0x0387, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, odd -> even */ + {0x0900, CRL_REG_LEN_08BIT, 0x00}, /* Binning mode: Disable */ + {0x0901, CRL_REG_LEN_08BIT, 0x11}, /* Binning Type for Horizontal */ + {0x0902, CRL_REG_LEN_08BIT, 0x02}, /* Binning Type for Vertical */ + {0x3140, CRL_REG_LEN_08BIT, 0x02}, /* Undocumented */ + {0x3C00, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x3C01, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x3C02, CRL_REG_LEN_08BIT, 0xDC}, /* Undocumented */ + {0x3F0D, CRL_REG_LEN_08BIT, 0x00}, /* AD converter: 10 bit */ + {0x5748, CRL_REG_LEN_08BIT, 0x07}, /* Undocumented */ + {0x5749, CRL_REG_LEN_08BIT, 0xFF}, /* Undocumented */ + {0x574A, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x574B, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x7B75, CRL_REG_LEN_08BIT, 0x0E}, /* Undocumented */ + {0x7B76, CRL_REG_LEN_08BIT, 0x09}, /* Undocumented */ + {0x7B77, CRL_REG_LEN_08BIT, 0x0C}, /* Undocumented */ + {0x7B78, CRL_REG_LEN_08BIT, 0x06}, /* Undocumented */ + {0x7B79, CRL_REG_LEN_08BIT, 0x3B}, /* Undocumented */ + {0x7B53, CRL_REG_LEN_08BIT, 0x01}, /* Undocumented */ + {0x9369, CRL_REG_LEN_08BIT, 0x5A}, /* Undocumented */ + {0x936B, CRL_REG_LEN_08BIT, 0x55}, /* Undocumented */ + {0x936D, CRL_REG_LEN_08BIT, 0x28}, /* Undocumented */ + {0x9304, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x9305, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9A, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9B, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9C, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9D, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9E, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9F, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0xA2A9, CRL_REG_LEN_08BIT, 0x60}, /* Undocumented */ + {0xA2B7, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + + /* Digital Crop & Scaling */ + {0x0401, CRL_REG_LEN_08BIT, 0x00}, /* Scaling mode: No Scaling */ + {0x0404, CRL_REG_LEN_08BIT, 0x00}, /* Down Scaling Factor M [8] */ + {0x0405, CRL_REG_LEN_08BIT, 0x10}, /* Down Scaling Factor M [7:0] */ + {0x0408, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [12:8] */ + {0x0409, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [7:0] */ + {0x040A, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [12:8] */ + {0x040B, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [7:0] */ + {0x040C, CRL_REG_LEN_08BIT, 0x0F}, /* Width after cropping [12:8] */ + {0x040D, CRL_REG_LEN_08BIT, 0xD8}, /* Width after cropping [7:0] */ + {0x040E, CRL_REG_LEN_08BIT, 0x0B}, /* Height after cropping [12:8] */ + {0x040F, CRL_REG_LEN_08BIT, 0xE0}, /* Height after cropping [7:0] */ + + /* Output Crop */ + {0x034C, CRL_REG_LEN_08BIT, 0x0F}, /* X output size [12:8] */ + {0x034D, CRL_REG_LEN_08BIT, 0xD8}, /* X output size [7:0] */ + {0x034E, CRL_REG_LEN_08BIT, 0x0B}, /* Y output size [12:8] */ + {0x034F, CRL_REG_LEN_08BIT, 0xE0}, /* Y output size [7:0] */ +}; + +static struct crl_register_write_rep imx477_4056_3040_19MHZ_DOL_2f_master[] = { + /* Frame Horizontal Clock Count */ + {0x0342, CRL_REG_LEN_08BIT, 0x39}, /* Line length [15:8] */ + {0x0343, CRL_REG_LEN_08BIT, 0x14}, /* Line length [7:0] */ + /* Frame Vertical Clock Count */ + {0x0340, CRL_REG_LEN_08BIT, 0x20}, /* Frame length [15:8] */ + {0x0341, CRL_REG_LEN_08BIT, 0x11}, /* Frame length [7:0] */ + /* Visible Size */ + {0x0344, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [12:8] */ + {0x0345, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [7:0] */ + {0x0346, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start Y [12:8] */ + {0x0347, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start Y [7:0] */ + {0x0348, CRL_REG_LEN_08BIT, 0x0F}, /* Analog cropping end X [12:8] */ + {0x0349, CRL_REG_LEN_08BIT, 0xD7}, /* Analog cropping end X [7:0] */ + {0x034A, CRL_REG_LEN_08BIT, 0x0B}, /* Analog cropping end Y [12:8] */ + {0x034B, CRL_REG_LEN_08BIT, 0xDF}, /* Analog cropping end Y [7:0] */ + /* Mode Setting */ + {0x00E3, CRL_REG_LEN_08BIT, 0x01}, /* DOL-HDR Enable */ + {0x00E4, CRL_REG_LEN_08BIT, 0x01}, /* DOL Mode: 2 frames in DOL-HDR */ + /* virtual channel ID of visible line and embedded line of DOL 2nd frame */ + {0x3E10, CRL_REG_LEN_08BIT, 0x01}, + {0x0220, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x0221, CRL_REG_LEN_08BIT, 0x11}, /* Undocumented */ + {0x0381, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, even -> odd */ + {0x0383, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, odd -> even */ + {0x0385, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, even -> odd */ + {0x0387, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, odd -> even */ + {0x0900, CRL_REG_LEN_08BIT, 0x00}, /* Binning mode: Disable */ + {0x0901, CRL_REG_LEN_08BIT, 0x11}, /* Binning Type for Horizontal */ + {0x0902, CRL_REG_LEN_08BIT, 0x02}, /* Binning Type for Vertical */ + {0x3140, CRL_REG_LEN_08BIT, 0x02}, /* Undocumented */ + {0x3C00, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x3C01, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x3C02, CRL_REG_LEN_08BIT, 0xDC}, /* Undocumented */ + {0x3F0D, CRL_REG_LEN_08BIT, 0x00}, /* AD converter: 10 bit */ + {0x5748, CRL_REG_LEN_08BIT, 0x07}, /* Undocumented */ + {0x5749, CRL_REG_LEN_08BIT, 0xFF}, /* Undocumented */ + {0x574A, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x574B, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x7B75, CRL_REG_LEN_08BIT, 0x0E}, /* Undocumented */ + {0x7B76, CRL_REG_LEN_08BIT, 0x09}, /* Undocumented */ + {0x7B77, CRL_REG_LEN_08BIT, 0x0C}, /* Undocumented */ + {0x7B78, CRL_REG_LEN_08BIT, 0x06}, /* Undocumented */ + {0x7B79, CRL_REG_LEN_08BIT, 0x3B}, /* Undocumented */ + {0x7B53, CRL_REG_LEN_08BIT, 0x01}, /* Undocumented */ + {0x9369, CRL_REG_LEN_08BIT, 0x5A}, /* Undocumented */ + {0x936B, CRL_REG_LEN_08BIT, 0x55}, /* Undocumented */ + {0x936D, CRL_REG_LEN_08BIT, 0x28}, /* Undocumented */ + {0x9304, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x9305, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9A, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9B, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9C, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9D, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9E, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9F, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0xA2A9, CRL_REG_LEN_08BIT, 0x60}, /* Undocumented */ + {0xA2B7, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + /* Digital Crop & Scaling */ + {0x0401, CRL_REG_LEN_08BIT, 0x00}, /* Scaling mode: No Scaling */ + {0x0404, CRL_REG_LEN_08BIT, 0x00}, /* Down Scaling Factor M [8] */ + {0x0405, CRL_REG_LEN_08BIT, 0x10}, /* Down Scaling Factor M [7:0] */ + {0x0408, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [12:8] */ + {0x0409, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [7:0] */ + {0x040A, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [12:8] */ + {0x040B, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [7:0] */ + {0x040C, CRL_REG_LEN_08BIT, 0x0F}, /* Width after cropping [12:8] */ + {0x040D, CRL_REG_LEN_08BIT, 0xD8}, /* Width after cropping [7:0] */ + {0x040E, CRL_REG_LEN_08BIT, 0x0B}, /* Height after cropping [12:8] */ + {0x040F, CRL_REG_LEN_08BIT, 0xE0}, /* Height after cropping [7:0] */ + /* Output Crop */ + {0x034C, CRL_REG_LEN_08BIT, 0x0F}, /* X output size [12:8] */ + {0x034D, CRL_REG_LEN_08BIT, 0xD8}, /* X output size [7:0] */ + {0x034E, CRL_REG_LEN_08BIT, 0x0B}, /* Y output size [12:8] */ + {0x034F, CRL_REG_LEN_08BIT, 0xE0}, /* Y output size [7:0] */ +}; + +static struct crl_register_write_rep imx477_4056_3040_19MHZ_DOL_3f_master[] = { + /* Frame Horizontal Clock Count */ + {0x0342, CRL_REG_LEN_08BIT, 0x39}, /* Line length [15:8] */ + {0x0343, CRL_REG_LEN_08BIT, 0x14}, /* Line length [7:0] */ + /* Frame Vertical Clock Count */ + {0x0340, CRL_REG_LEN_08BIT, 0x20}, /* Frame length [15:8] */ + {0x0341, CRL_REG_LEN_08BIT, 0x11}, /* Frame length [7:0] */ + /* Visible Size */ + {0x0344, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [12:8] */ + {0x0345, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [7:0] */ + {0x0346, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start Y [12:8] */ + {0x0347, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start Y [7:0] */ + {0x0348, CRL_REG_LEN_08BIT, 0x0F}, /* Analog cropping end X [12:8] */ + {0x0349, CRL_REG_LEN_08BIT, 0xD7}, /* Analog cropping end X [7:0] */ + {0x034A, CRL_REG_LEN_08BIT, 0x0B}, /* Analog cropping end Y [12:8] */ + {0x034B, CRL_REG_LEN_08BIT, 0xDF}, /* Analog cropping end Y [7:0] */ + /* Mode Setting */ + {0x00E3, CRL_REG_LEN_08BIT, 0x01}, /* DOL-HDR Enable */ + {0x00E4, CRL_REG_LEN_08BIT, 0x02}, /* DOL Mode: 2 frames in DOL-HDR */ + /* virtual channel ID of visible line and embedded line of DOL 2nd frame */ + {0x3E10, CRL_REG_LEN_08BIT, 0x01}, + /* virtual channel ID of visible line and embedded line of DOL 3rd frame */ + {0x3E11, CRL_REG_LEN_08BIT, 0x02}, + {0x0220, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x0221, CRL_REG_LEN_08BIT, 0x11}, /* Undocumented */ + {0x0381, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, even -> odd */ + {0x0383, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, odd -> even */ + {0x0385, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, even -> odd */ + {0x0387, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, odd -> even */ + {0x0900, CRL_REG_LEN_08BIT, 0x00}, /* Binning mode: Disable */ + {0x0901, CRL_REG_LEN_08BIT, 0x11}, /* Binning Type for Horizontal */ + {0x0902, CRL_REG_LEN_08BIT, 0x02}, /* Binning Type for Vertical */ + {0x3140, CRL_REG_LEN_08BIT, 0x02}, /* Undocumented */ + {0x3C00, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x3C01, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x3C02, CRL_REG_LEN_08BIT, 0xDC}, /* Undocumented */ + {0x3F0D, CRL_REG_LEN_08BIT, 0x00}, /* AD converter: 10 bit */ + {0x5748, CRL_REG_LEN_08BIT, 0x07}, /* Undocumented */ + {0x5749, CRL_REG_LEN_08BIT, 0xFF}, /* Undocumented */ + {0x574A, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x574B, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x7B75, CRL_REG_LEN_08BIT, 0x0E}, /* Undocumented */ + {0x7B76, CRL_REG_LEN_08BIT, 0x09}, /* Undocumented */ + {0x7B77, CRL_REG_LEN_08BIT, 0x0C}, /* Undocumented */ + {0x7B78, CRL_REG_LEN_08BIT, 0x06}, /* Undocumented */ + {0x7B79, CRL_REG_LEN_08BIT, 0x3B}, /* Undocumented */ + {0x7B53, CRL_REG_LEN_08BIT, 0x01}, /* Undocumented */ + {0x9369, CRL_REG_LEN_08BIT, 0x5A}, /* Undocumented */ + {0x936B, CRL_REG_LEN_08BIT, 0x55}, /* Undocumented */ + {0x936D, CRL_REG_LEN_08BIT, 0x28}, /* Undocumented */ + {0x9304, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x9305, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9A, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9B, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9C, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9D, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9E, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9F, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0xA2A9, CRL_REG_LEN_08BIT, 0x60}, /* Undocumented */ + {0xA2B7, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + /* Digital Crop & Scaling */ + {0x0401, CRL_REG_LEN_08BIT, 0x00}, /* Scaling mode: No Scaling */ + {0x0404, CRL_REG_LEN_08BIT, 0x00}, /* Down Scaling Factor M [8] */ + {0x0405, CRL_REG_LEN_08BIT, 0x10}, /* Down Scaling Factor M [7:0] */ + {0x0408, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [12:8] */ + {0x0409, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [7:0] */ + {0x040A, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [12:8] */ + {0x040B, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [7:0] */ + {0x040C, CRL_REG_LEN_08BIT, 0x0F}, /* Width after cropping [12:8] */ + {0x040D, CRL_REG_LEN_08BIT, 0xD8}, /* Width after cropping [7:0] */ + {0x040E, CRL_REG_LEN_08BIT, 0x0B}, /* Height after cropping [12:8] */ + {0x040F, CRL_REG_LEN_08BIT, 0xE0}, /* Height after cropping [7:0] */ + /* Output Crop */ + {0x034C, CRL_REG_LEN_08BIT, 0x0F}, /* X output size [12:8] */ + {0x034D, CRL_REG_LEN_08BIT, 0xD8}, /* X output size [7:0] */ + {0x034E, CRL_REG_LEN_08BIT, 0x0B}, /* Y output size [12:8] */ + {0x034F, CRL_REG_LEN_08BIT, 0xE0}, /* Y output size [7:0] */ +}; + +static struct crl_register_write_rep imx477_4056_2288_19MHZ_master[] = { + /* Frame Horizontal Clock Count */ + {0x0342, CRL_REG_LEN_08BIT, 0x39}, /* Line length [15:8] */ + {0x0343, CRL_REG_LEN_08BIT, 0x14}, /* Line length [7:0] */ + + /* Frame Vertical Clock Count */ + {0x0340, CRL_REG_LEN_08BIT, 0x20}, /* Frame length [15:8] */ + {0x0341, CRL_REG_LEN_08BIT, 0x11}, /* Frame length [7:0] */ + + /* Visible Size */ + /* (0,376) to (4055, 2664) */ + {0x0344, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [12:8] */ + {0x0345, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [7:0] */ + {0x0346, CRL_REG_LEN_08BIT, 0x01}, /* Analog cropping start Y [12:8] */ + {0x0347, CRL_REG_LEN_08BIT, 0x78}, /* Analog cropping start Y [7:0] */ + {0x0348, CRL_REG_LEN_08BIT, 0x0F}, /* Analog cropping end X [12:8] */ + {0x0349, CRL_REG_LEN_08BIT, 0xD7}, /* Analog cropping end X [7:0] */ + {0x034A, CRL_REG_LEN_08BIT, 0x0A}, /* Analog cropping end Y [12:8] */ + {0x034B, CRL_REG_LEN_08BIT, 0x68}, /* Analog cropping end Y [7:0] */ + + /* Mode Setting */ + {0x00E3, CRL_REG_LEN_08BIT, 0x00}, /* DOL-HDR Disable */ + {0x00E4, CRL_REG_LEN_08BIT, 0x00}, /* DOL Mode: DOL-HDR Disable */ + {0x0220, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x0221, CRL_REG_LEN_08BIT, 0x11}, /* Undocumented */ + {0x0381, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, even -> odd */ + {0x0383, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, odd -> even */ + {0x0385, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, even -> odd */ + {0x0387, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, odd -> even */ + {0x0900, CRL_REG_LEN_08BIT, 0x00}, /* Binning mode: Disable */ + {0x0901, CRL_REG_LEN_08BIT, 0x11}, /* Binning Type for Horizontal */ + {0x0902, CRL_REG_LEN_08BIT, 0x02}, /* Binning Type for Vertical */ + {0x3140, CRL_REG_LEN_08BIT, 0x02}, /* Undocumented */ + {0x3C00, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x3C01, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x3C02, CRL_REG_LEN_08BIT, 0xDC}, /* Undocumented */ + {0x3F0D, CRL_REG_LEN_08BIT, 0x00}, /* AD converter: 10 bit */ + {0x5748, CRL_REG_LEN_08BIT, 0x07}, /* Undocumented */ + {0x5749, CRL_REG_LEN_08BIT, 0xFF}, /* Undocumented */ + {0x574A, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x574B, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x7B75, CRL_REG_LEN_08BIT, 0x0E}, /* Undocumented */ + {0x7B76, CRL_REG_LEN_08BIT, 0x09}, /* Undocumented */ + {0x7B77, CRL_REG_LEN_08BIT, 0x0C}, /* Undocumented */ + {0x7B78, CRL_REG_LEN_08BIT, 0x06}, /* Undocumented */ + {0x7B79, CRL_REG_LEN_08BIT, 0x3B}, /* Undocumented */ + {0x7B53, CRL_REG_LEN_08BIT, 0x01}, /* Undocumented */ + {0x9369, CRL_REG_LEN_08BIT, 0x5A}, /* Undocumented */ + {0x936B, CRL_REG_LEN_08BIT, 0x55}, /* Undocumented */ + {0x936D, CRL_REG_LEN_08BIT, 0x28}, /* Undocumented */ + {0x9304, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x9305, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9A, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9B, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9C, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9D, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9E, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9F, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0xA2A9, CRL_REG_LEN_08BIT, 0x60}, /* Undocumented */ + {0xA2B7, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + + /* Digital Crop & Scaling */ + {0x0401, CRL_REG_LEN_08BIT, 0x00}, /* Scaling mode: No Scaling */ + {0x0404, CRL_REG_LEN_08BIT, 0x00}, /* Down Scaling Factor M [8] */ + {0x0405, CRL_REG_LEN_08BIT, 0x10}, /* Down Scaling Factor M [7:0] */ + {0x0408, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [12:8] */ + {0x0409, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [7:0] */ + {0x040A, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [12:8] */ + {0x040B, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [7:0] */ + {0x040C, CRL_REG_LEN_08BIT, 0x0F}, /* Width after cropping [12:8] */ + {0x040D, CRL_REG_LEN_08BIT, 0xD8}, /* Width after cropping [7:0] */ + {0x040E, CRL_REG_LEN_08BIT, 0x08}, /* Height after cropping [12:8] */ + {0x040F, CRL_REG_LEN_08BIT, 0xF0}, /* Height after cropping [7:0] */ + + /* Output Crop */ + {0x034C, CRL_REG_LEN_08BIT, 0x0F}, /* X output size [12:8] */ + {0x034D, CRL_REG_LEN_08BIT, 0xD8}, /* X output size [7:0] */ + {0x034E, CRL_REG_LEN_08BIT, 0x08}, /* Y output size [12:8] */ + {0x034F, CRL_REG_LEN_08BIT, 0xF0}, /* Y output size [7:0] */ +}; + + +static struct crl_register_write_rep imx477_2832_1632_19MHZ_master[] = { + /* Frame Horizontal Clock Count */ + {0x0342, CRL_REG_LEN_08BIT, 0x39}, /* Line length [15:8] */ + {0x0343, CRL_REG_LEN_08BIT, 0x14}, /* Line length [7:0] */ + + /* Frame Vertical Clock Count */ + {0x0340, CRL_REG_LEN_08BIT, 0x20}, /* Frame length [15:8] */ + {0x0341, CRL_REG_LEN_08BIT, 0x11}, /* Frame length [7:0] */ + + /* Visible Size */ + {0x0344, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [12:8] */ + {0x0345, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [7:0] */ + {0x0346, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start Y [12:8] */ + {0x0347, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start Y [7:0] */ + {0x0348, CRL_REG_LEN_08BIT, 0x0F}, /* Analog cropping end X [12:8] */ + {0x0349, CRL_REG_LEN_08BIT, 0xD7}, /* Analog cropping end X [7:0] */ + {0x034A, CRL_REG_LEN_08BIT, 0x0B}, /* Analog cropping end Y [12:8] */ + {0x034B, CRL_REG_LEN_08BIT, 0xDF}, /* Analog cropping end Y [7:0] */ + + /* Mode Setting */ + {0x00E3, CRL_REG_LEN_08BIT, 0x00}, /* DOL-HDR Disable */ + {0x00E4, CRL_REG_LEN_08BIT, 0x00}, /* DOL Mode: DOL-HDR Disable */ + {0x0220, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x0221, CRL_REG_LEN_08BIT, 0x11}, /* Undocumented */ + {0x0381, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, even -> odd */ + {0x0383, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, odd -> even */ + {0x0385, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, even -> odd */ + {0x0387, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, odd -> even */ + {0x0900, CRL_REG_LEN_08BIT, 0x00}, /* Binning mode: Disable */ + {0x0901, CRL_REG_LEN_08BIT, 0x11}, /* Binning Type for Horizontal */ + {0x0902, CRL_REG_LEN_08BIT, 0x02}, /* Binning Type for Vertical */ + {0x3140, CRL_REG_LEN_08BIT, 0x02}, /* Undocumented */ + {0x3C00, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x3C01, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x3C02, CRL_REG_LEN_08BIT, 0xDC}, /* Undocumented */ + {0x3F0D, CRL_REG_LEN_08BIT, 0x00}, /* AD converter: 10 bit */ + {0x5748, CRL_REG_LEN_08BIT, 0x07}, /* Undocumented */ + {0x5749, CRL_REG_LEN_08BIT, 0xFF}, /* Undocumented */ + {0x574A, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x574B, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x7B75, CRL_REG_LEN_08BIT, 0x0E}, /* Undocumented */ + {0x7B76, CRL_REG_LEN_08BIT, 0x09}, /* Undocumented */ + {0x7B77, CRL_REG_LEN_08BIT, 0x0C}, /* Undocumented */ + {0x7B78, CRL_REG_LEN_08BIT, 0x06}, /* Undocumented */ + {0x7B79, CRL_REG_LEN_08BIT, 0x3B}, /* Undocumented */ + {0x7B53, CRL_REG_LEN_08BIT, 0x01}, /* Undocumented */ + {0x9369, CRL_REG_LEN_08BIT, 0x5A}, /* Undocumented */ + {0x936B, CRL_REG_LEN_08BIT, 0x55}, /* Undocumented */ + {0x936D, CRL_REG_LEN_08BIT, 0x28}, /* Undocumented */ + {0x9304, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x9305, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9A, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9B, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9C, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9D, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9E, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9F, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0xA2A9, CRL_REG_LEN_08BIT, 0x60}, /* Undocumented */ + {0xA2B7, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + + /* Digital Crop & Scaling */ + /* scale factor 16/22, 3894x2244 to 2832x1632 */ + {0x0401, CRL_REG_LEN_08BIT, 0x02}, /* Scaling mode: Scaling */ + {0x0404, CRL_REG_LEN_08BIT, 0x00}, /* Down Scaling Factor M [8] */ + {0x0405, CRL_REG_LEN_08BIT, 0x16}, /* Down Scaling Factor M [7:0] */ + {0x0408, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [12:8] */ + {0x0409, CRL_REG_LEN_08BIT, 0x52}, /* Crop Offset from X [7:0] */ + {0x040A, CRL_REG_LEN_08BIT, 0x01}, /* Crop Offset from Y [12:8] */ + {0x040B, CRL_REG_LEN_08BIT, 0x8E}, /* Crop Offset from Y [7:0] */ + {0x040C, CRL_REG_LEN_08BIT, 0x0F}, /* Width after cropping [12:8] */ + {0x040D, CRL_REG_LEN_08BIT, 0x36}, /* Width after cropping [7:0] */ + {0x040E, CRL_REG_LEN_08BIT, 0x08}, /* Height after cropping [12:8] */ + {0x040F, CRL_REG_LEN_08BIT, 0xC4}, /* Height after cropping [7:0] */ + + /* Output Crop */ + {0x034C, CRL_REG_LEN_08BIT, 0x0B}, /* X output size [12:8] */ + {0x034D, CRL_REG_LEN_08BIT, 0x10}, /* X output size [7:0] */ + {0x034E, CRL_REG_LEN_08BIT, 0x06}, /* Y output size [12:8] */ + {0x034F, CRL_REG_LEN_08BIT, 0x60}, /* Y output size [7:0] */ +}; + + +static struct crl_register_write_rep imx477_2028_1128_19MHZ_master[] = { + /* Frame Horizontal Clock Count */ + {0x0342, CRL_REG_LEN_08BIT, 0x39}, /* Line length [15:8] */ + {0x0343, CRL_REG_LEN_08BIT, 0x14}, /* Line length [7:0] */ + + /* Frame Vertical Clock Count */ + {0x0340, CRL_REG_LEN_08BIT, 0x20}, /* Frame length [15:8] */ + {0x0341, CRL_REG_LEN_08BIT, 0x11}, /* Frame length [7:0] */ + + /* Visible Size */ + {0x0344, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [12:8] */ + {0x0345, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [7:0] */ + {0x0346, CRL_REG_LEN_08BIT, 0x01}, /* Analog cropping start Y [12:8] */ + {0x0347, CRL_REG_LEN_08BIT, 0x88}, /* Analog cropping start Y [7:0] */ + {0x0348, CRL_REG_LEN_08BIT, 0x0F}, /* Analog cropping end X [12:8] */ + {0x0349, CRL_REG_LEN_08BIT, 0xD7}, /* Analog cropping end X [7:0] */ + {0x034A, CRL_REG_LEN_08BIT, 0x0A}, /* Analog cropping end Y [12:8] */ + {0x034B, CRL_REG_LEN_08BIT, 0x58}, /* Analog cropping end Y [7:0] */ + + /* Mode Setting */ + {0x00E3, CRL_REG_LEN_08BIT, 0x00}, /* DOL-HDR Disable */ + {0x00E4, CRL_REG_LEN_08BIT, 0x00}, /* DOL Mode: DOL-HDR Disable */ + {0x0220, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x0221, CRL_REG_LEN_08BIT, 0x11}, /* Undocumented */ + {0x0381, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, even -> odd */ + {0x0383, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, odd -> even */ + {0x0385, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, even -> odd */ + {0x0387, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, odd -> even */ + {0x0900, CRL_REG_LEN_08BIT, 0x01}, /* Binning mode: Disable */ + {0x0901, CRL_REG_LEN_08BIT, 0x22}, /* Binning Type for Horizontal */ + {0x0902, CRL_REG_LEN_08BIT, 0x02}, /* Binning Type for Vertical */ + {0x3140, CRL_REG_LEN_08BIT, 0x02}, /* Undocumented */ + {0x3C00, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x3C01, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x3C02, CRL_REG_LEN_08BIT, 0xDC}, /* Undocumented */ + {0x3F0D, CRL_REG_LEN_08BIT, 0x00}, /* AD converter: 10 bit */ + {0x5748, CRL_REG_LEN_08BIT, 0x07}, /* Undocumented */ + {0x5749, CRL_REG_LEN_08BIT, 0xFF}, /* Undocumented */ + {0x574A, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x574B, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x7B75, CRL_REG_LEN_08BIT, 0x0E}, /* Undocumented */ + {0x7B76, CRL_REG_LEN_08BIT, 0x09}, /* Undocumented */ + {0x7B77, CRL_REG_LEN_08BIT, 0x0C}, /* Undocumented */ + {0x7B78, CRL_REG_LEN_08BIT, 0x06}, /* Undocumented */ + {0x7B79, CRL_REG_LEN_08BIT, 0x3B}, /* Undocumented */ + {0x7B53, CRL_REG_LEN_08BIT, 0x01}, /* Undocumented */ + {0x9369, CRL_REG_LEN_08BIT, 0x5A}, /* Undocumented */ + {0x936B, CRL_REG_LEN_08BIT, 0x55}, /* Undocumented */ + {0x936D, CRL_REG_LEN_08BIT, 0x28}, /* Undocumented */ + {0x9304, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x9305, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9A, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9B, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9C, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9D, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9E, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9F, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0xA2A9, CRL_REG_LEN_08BIT, 0x60}, /* Undocumented */ + {0xA2B7, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + + /* Digital Crop & Scaling */ + {0x0401, CRL_REG_LEN_08BIT, 0x00}, /* Scaling mode: No Scaling */ + {0x0404, CRL_REG_LEN_08BIT, 0x00}, /* Down Scaling Factor M [8] */ + {0x0405, CRL_REG_LEN_08BIT, 0x10}, /* Down Scaling Factor M [7:0] */ + {0x0408, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [12:8] */ + {0x0409, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [7:0] */ + {0x040A, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [12:8] */ + {0x040B, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [7:0] */ + {0x040C, CRL_REG_LEN_08BIT, 0x07}, /* Width after cropping [12:8] */ + {0x040D, CRL_REG_LEN_08BIT, 0xEC}, /* Width after cropping [7:0] */ + {0x040E, CRL_REG_LEN_08BIT, 0x04}, /* Height after cropping [12:8] */ + {0x040F, CRL_REG_LEN_08BIT, 0x68}, /* Height after cropping [7:0] */ + + /* Output Crop */ + {0x034C, CRL_REG_LEN_08BIT, 0x07}, /* X output size [12:8] */ + {0x034D, CRL_REG_LEN_08BIT, 0xEC}, /* X output size [7:0] */ + {0x034E, CRL_REG_LEN_08BIT, 0x04}, /* Y output size [12:8] */ + {0x034F, CRL_REG_LEN_08BIT, 0x68}, /* Y output size [7:0] */ +}; + +static struct crl_register_write_rep imx477_1296_768_19MHZ_master[] = { + /* Frame Horizontal Clock Count */ + {0x0342, CRL_REG_LEN_08BIT, 0x39}, /* Line length [15:8] */ + {0x0343, CRL_REG_LEN_08BIT, 0x14}, /* Line length [7:0] */ + + /* Frame Vertical Clock Count */ + {0x0340, CRL_REG_LEN_08BIT, 0x20}, /* Frame length [15:8] */ + {0x0341, CRL_REG_LEN_08BIT, 0x11}, /* Frame length [7:0] */ + + /* Visible Size */ + {0x0344, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [12:8] */ + {0x0345, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [7:0] */ + {0x0346, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start Y [12:8] */ + {0x0347, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start Y [7:0] */ + {0x0348, CRL_REG_LEN_08BIT, 0x0F}, /* Analog cropping end X [12:8] */ + {0x0349, CRL_REG_LEN_08BIT, 0xD7}, /* Analog cropping end X [7:0] */ + {0x034A, CRL_REG_LEN_08BIT, 0x0B}, /* Analog cropping end Y [12:8] */ + {0x034B, CRL_REG_LEN_08BIT, 0xDF}, /* Analog cropping end Y [7:0] */ + + /* Mode Setting */ + {0x00E3, CRL_REG_LEN_08BIT, 0x00}, /* DOL-HDR Disable */ + {0x00E4, CRL_REG_LEN_08BIT, 0x00}, /* DOL Mode: DOL-HDR Disable */ + {0x0220, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x0221, CRL_REG_LEN_08BIT, 0x11}, /* Undocumented */ + {0x0381, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, even -> odd */ + {0x0383, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, odd -> even */ + {0x0385, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, even -> odd */ + {0x0387, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, odd -> even */ + {0x0900, CRL_REG_LEN_08BIT, 0x00}, /* Binning mode: Disable */ + {0x0901, CRL_REG_LEN_08BIT, 0x11}, /* Binning Type for Horizontal */ + {0x0902, CRL_REG_LEN_08BIT, 0x02}, /* Binning Type for Vertical */ + {0x3140, CRL_REG_LEN_08BIT, 0x02}, /* Undocumented */ + {0x3C00, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x3C01, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x3C02, CRL_REG_LEN_08BIT, 0xDC}, /* Undocumented */ + {0x3F0D, CRL_REG_LEN_08BIT, 0x00}, /* AD converter: 10 bit */ + {0x5748, CRL_REG_LEN_08BIT, 0x07}, /* Undocumented */ + {0x5749, CRL_REG_LEN_08BIT, 0xFF}, /* Undocumented */ + {0x574A, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x574B, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x7B75, CRL_REG_LEN_08BIT, 0x0E}, /* Undocumented */ + {0x7B76, CRL_REG_LEN_08BIT, 0x09}, /* Undocumented */ + {0x7B77, CRL_REG_LEN_08BIT, 0x0C}, /* Undocumented */ + {0x7B78, CRL_REG_LEN_08BIT, 0x06}, /* Undocumented */ + {0x7B79, CRL_REG_LEN_08BIT, 0x3B}, /* Undocumented */ + {0x7B53, CRL_REG_LEN_08BIT, 0x01}, /* Undocumented */ + {0x9369, CRL_REG_LEN_08BIT, 0x5A}, /* Undocumented */ + {0x936B, CRL_REG_LEN_08BIT, 0x55}, /* Undocumented */ + {0x936D, CRL_REG_LEN_08BIT, 0x28}, /* Undocumented */ + {0x9304, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x9305, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9A, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9B, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9C, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9D, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9E, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9F, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0xA2A9, CRL_REG_LEN_08BIT, 0x60}, /* Undocumented */ + {0xA2B7, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + + /* Digital Crop & Scaling */ + /* scale factor 16/50, 4050x2400 to 1296x768 */ + {0x0401, CRL_REG_LEN_08BIT, 0x02}, /* Scaling mode: Scaling */ + {0x0404, CRL_REG_LEN_08BIT, 0x00}, /* Down Scaling Factor M [8] */ + {0x0405, CRL_REG_LEN_08BIT, 0x32}, /* Down Scaling Factor M [7:0] */ + {0x0408, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [12:8] */ + {0x0409, CRL_REG_LEN_08BIT, 0x04}, /* Crop Offset from X [7:0] */ + {0x040A, CRL_REG_LEN_08BIT, 0x01}, /* Crop Offset from Y [12:8] */ + {0x040B, CRL_REG_LEN_08BIT, 0x40}, /* Crop Offset from Y [7:0] */ + {0x040C, CRL_REG_LEN_08BIT, 0x0F}, /* Width after cropping [12:8] */ + {0x040D, CRL_REG_LEN_08BIT, 0xD2}, /* Width after cropping [7:0] */ + {0x040E, CRL_REG_LEN_08BIT, 0x09}, /* Height after cropping [12:8] */ + {0x040F, CRL_REG_LEN_08BIT, 0x60}, /* Height after cropping [7:0] */ + + /* Output Crop */ + {0x034C, CRL_REG_LEN_08BIT, 0x05}, /* X output size [12:8] */ + {0x034D, CRL_REG_LEN_08BIT, 0x10}, /* X output size [7:0] */ + {0x034E, CRL_REG_LEN_08BIT, 0x03}, /* Y output size [12:8] */ + {0x034F, CRL_REG_LEN_08BIT, 0x00}, /* Y output size [7:0] */ +}; + +static struct crl_register_write_rep imx477_656_512_19MHZ_master[] = { + /* Frame Horizontal Clock Count */ + {0x0342, CRL_REG_LEN_08BIT, 0x39}, /* Line length [15:8] */ + {0x0343, CRL_REG_LEN_08BIT, 0x14}, /* Line length [7:0] */ + + /* Frame Vertical Clock Count */ + {0x0340, CRL_REG_LEN_08BIT, 0x20}, /* Frame length [15:8] */ + {0x0341, CRL_REG_LEN_08BIT, 0x11}, /* Frame length [7:0] */ + + /* Visible Size */ + {0x0344, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [12:8] */ + {0x0345, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [7:0] */ + {0x0346, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start Y [12:8] */ + {0x0347, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start Y [7:0] */ + {0x0348, CRL_REG_LEN_08BIT, 0x0F}, /* Analog cropping end X [12:8] */ + {0x0349, CRL_REG_LEN_08BIT, 0xD7}, /* Analog cropping end X [7:0] */ + {0x034A, CRL_REG_LEN_08BIT, 0x0B}, /* Analog cropping end Y [12:8] */ + {0x034B, CRL_REG_LEN_08BIT, 0xDF}, /* Analog cropping end Y [7:0] */ + + /* Mode Setting */ + {0x00E3, CRL_REG_LEN_08BIT, 0x00}, /* DOL-HDR Disable */ + {0x00E4, CRL_REG_LEN_08BIT, 0x00}, /* DOL Mode: DOL-HDR Disable */ + {0x0220, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x0221, CRL_REG_LEN_08BIT, 0x11}, /* Undocumented */ + {0x0381, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, even -> odd */ + {0x0383, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, odd -> even */ + {0x0385, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, even -> odd */ + {0x0387, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, odd -> even */ + {0x0900, CRL_REG_LEN_08BIT, 0x00}, /* Binning mode: Disable */ + {0x0901, CRL_REG_LEN_08BIT, 0x11}, /* Binning Type for Horizontal */ + {0x0902, CRL_REG_LEN_08BIT, 0x02}, /* Binning Type for Vertical */ + {0x3140, CRL_REG_LEN_08BIT, 0x02}, /* Undocumented */ + {0x3C00, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x3C01, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x3C02, CRL_REG_LEN_08BIT, 0xDC}, /* Undocumented */ + {0x3F0D, CRL_REG_LEN_08BIT, 0x00}, /* AD converter: 10 bit */ + {0x5748, CRL_REG_LEN_08BIT, 0x07}, /* Undocumented */ + {0x5749, CRL_REG_LEN_08BIT, 0xFF}, /* Undocumented */ + {0x574A, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x574B, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x7B75, CRL_REG_LEN_08BIT, 0x0E}, /* Undocumented */ + {0x7B76, CRL_REG_LEN_08BIT, 0x09}, /* Undocumented */ + {0x7B77, CRL_REG_LEN_08BIT, 0x0C}, /* Undocumented */ + {0x7B78, CRL_REG_LEN_08BIT, 0x06}, /* Undocumented */ + {0x7B79, CRL_REG_LEN_08BIT, 0x3B}, /* Undocumented */ + {0x7B53, CRL_REG_LEN_08BIT, 0x01}, /* Undocumented */ + {0x9369, CRL_REG_LEN_08BIT, 0x5A}, /* Undocumented */ + {0x936B, CRL_REG_LEN_08BIT, 0x55}, /* Undocumented */ + {0x936D, CRL_REG_LEN_08BIT, 0x28}, /* Undocumented */ + {0x9304, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x9305, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9A, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9B, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9C, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9D, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9E, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9F, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0xA2A9, CRL_REG_LEN_08BIT, 0x60}, /* Undocumented */ + {0xA2B7, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + + /* Digital Crop & Scaling */ + /* scale factor 16/95, 3895x3040 to 656x512 */ + {0x0401, CRL_REG_LEN_08BIT, 0x02}, /* Scaling mode: Scaling */ + {0x0404, CRL_REG_LEN_08BIT, 0x00}, /* Down Scaling Factor M [8] */ + {0x0405, CRL_REG_LEN_08BIT, 0x5F}, /* Down Scaling Factor M [7:0] */ + {0x0408, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [12:8] */ + {0x0409, CRL_REG_LEN_08BIT, 0x50}, /* Crop Offset from X [7:0] */ + {0x040A, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [12:8] */ + {0x040B, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [7:0] */ + {0x040C, CRL_REG_LEN_08BIT, 0x0F}, /* Width after cropping [12:8] */ + {0x040D, CRL_REG_LEN_08BIT, 0x37}, /* Width after cropping [7:0] */ + {0x040E, CRL_REG_LEN_08BIT, 0x0B}, /* Height after cropping [12:8] */ + {0x040F, CRL_REG_LEN_08BIT, 0xE0}, /* Height after cropping [7:0] */ + + /* Output Crop */ + {0x034C, CRL_REG_LEN_08BIT, 0x02}, /* X output size [12:8] */ + {0x034D, CRL_REG_LEN_08BIT, 0x90}, /* X output size [7:0] */ + {0x034E, CRL_REG_LEN_08BIT, 0x02}, /* Y output size [12:8] */ + {0x034F, CRL_REG_LEN_08BIT, 0x00}, /* Y output size [7:0] */ +}; + +static struct crl_register_write_rep imx477_4056_2288_19MHZ_DOL_2f_master[] = { + /* Frame Horizontal Clock Count */ + {0x0342, CRL_REG_LEN_08BIT, 0x39}, /* Line length [15:8] */ + {0x0343, CRL_REG_LEN_08BIT, 0x14}, /* Line length [7:0] */ + + /* Frame Vertical Clock Count */ + {0x0340, CRL_REG_LEN_08BIT, 0x20}, /* Frame length [15:8] */ + {0x0341, CRL_REG_LEN_08BIT, 0x11}, /* Frame length [7:0] */ + + /* Visible Size */ + /* (0,376) to (4055, 2664) */ + {0x0344, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [12:8] */ + {0x0345, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [7:0] */ + {0x0346, CRL_REG_LEN_08BIT, 0x01}, /* Analog cropping start Y [12:8] */ + {0x0347, CRL_REG_LEN_08BIT, 0x78}, /* Analog cropping start Y [7:0] */ + {0x0348, CRL_REG_LEN_08BIT, 0x0F}, /* Analog cropping end X [12:8] */ + {0x0349, CRL_REG_LEN_08BIT, 0xD7}, /* Analog cropping end X [7:0] */ + {0x034A, CRL_REG_LEN_08BIT, 0x0A}, /* Analog cropping end Y [12:8] */ + {0x034B, CRL_REG_LEN_08BIT, 0x68}, /* Analog cropping end Y [7:0] */ + + /* Mode Setting */ + {0x00E3, CRL_REG_LEN_08BIT, 0x01}, /* DOL-HDR enabled */ + {0x00E4, CRL_REG_LEN_08BIT, 0x01}, /* DOL Mode: DOL2 */ + {0x00FC, CRL_REG_LEN_08BIT, 0x0A}, /* The output data fmt for CSI: RAW10 */ + {0x00FD, CRL_REG_LEN_08BIT, 0x0A}, /* The output data fmt for CSI: RAW10 */ + {0x3E10, CRL_REG_LEN_08BIT, 0x01}, /* VC ID of DOL 2nd frame */ + + {0x0220, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x0221, CRL_REG_LEN_08BIT, 0x11}, /* Undocumented */ + {0x0381, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, even -> odd */ + {0x0383, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, odd -> even */ + {0x0385, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, even -> odd */ + {0x0387, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, odd -> even */ + {0x0900, CRL_REG_LEN_08BIT, 0x00}, /* Binning mode: Disable */ + {0x0901, CRL_REG_LEN_08BIT, 0x11}, /* Binning Type for Horizontal */ + {0x0902, CRL_REG_LEN_08BIT, 0x02}, /* Binning Type for Vertical */ + {0x3140, CRL_REG_LEN_08BIT, 0x02}, /* Undocumented */ + {0x3C00, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x3C01, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x3C02, CRL_REG_LEN_08BIT, 0xDC}, /* Undocumented */ + {0x3F0D, CRL_REG_LEN_08BIT, 0x00}, /* AD converter: 10 bit */ + {0x5748, CRL_REG_LEN_08BIT, 0x07}, /* Undocumented */ + {0x5749, CRL_REG_LEN_08BIT, 0xFF}, /* Undocumented */ + {0x574A, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x574B, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x7B75, CRL_REG_LEN_08BIT, 0x0E}, /* Undocumented */ + {0x7B76, CRL_REG_LEN_08BIT, 0x09}, /* Undocumented */ + {0x7B77, CRL_REG_LEN_08BIT, 0x0C}, /* Undocumented */ + {0x7B78, CRL_REG_LEN_08BIT, 0x06}, /* Undocumented */ + {0x7B79, CRL_REG_LEN_08BIT, 0x3B}, /* Undocumented */ + {0x7B53, CRL_REG_LEN_08BIT, 0x01}, /* Undocumented */ + {0x9369, CRL_REG_LEN_08BIT, 0x5A}, /* Undocumented */ + {0x936B, CRL_REG_LEN_08BIT, 0x55}, /* Undocumented */ + {0x936D, CRL_REG_LEN_08BIT, 0x28}, /* Undocumented */ + {0x9304, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x9305, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9A, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9B, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9C, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9D, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9E, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9F, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0xA2A9, CRL_REG_LEN_08BIT, 0x60}, /* Undocumented */ + {0xA2B7, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + + /* Digital Crop & Scaling */ + {0x0401, CRL_REG_LEN_08BIT, 0x00}, /* Scaling mode: No Scaling */ + {0x0404, CRL_REG_LEN_08BIT, 0x00}, /* Down Scaling Factor M [8] */ + {0x0405, CRL_REG_LEN_08BIT, 0x10}, /* Down Scaling Factor M [7:0] */ + {0x0408, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [12:8] */ + {0x0409, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [7:0] */ + {0x040A, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [12:8] */ + {0x040B, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [7:0] */ + {0x040C, CRL_REG_LEN_08BIT, 0x0F}, /* Width after cropping [12:8] */ + {0x040D, CRL_REG_LEN_08BIT, 0xD8}, /* Width after cropping [7:0] */ + {0x040E, CRL_REG_LEN_08BIT, 0x08}, /* Height after cropping [12:8] */ + {0x040F, CRL_REG_LEN_08BIT, 0xF0}, /* Height after cropping [7:0] */ + + /* Output Crop */ + {0x034C, CRL_REG_LEN_08BIT, 0x0F}, /* X output size [12:8] */ + {0x034D, CRL_REG_LEN_08BIT, 0xD8}, /* X output size [7:0] */ + {0x034E, CRL_REG_LEN_08BIT, 0x08}, /* Y output size [12:8] */ + {0x034F, CRL_REG_LEN_08BIT, 0xF0}, /* Y output size [7:0] */ +}; + +static struct crl_register_write_rep imx477_4056_2288_19MHZ_DOL_3f_master[] = { + /* Frame Horizontal Clock Count */ + {0x0342, CRL_REG_LEN_08BIT, 0x39}, /* Line length [15:8] */ + {0x0343, CRL_REG_LEN_08BIT, 0x14}, /* Line length [7:0] */ + + /* Frame Vertical Clock Count */ + {0x0340, CRL_REG_LEN_08BIT, 0x20}, /* Frame length [15:8] */ + {0x0341, CRL_REG_LEN_08BIT, 0x11}, /* Frame length [7:0] */ + + /* Visible Size */ + /* (0,376) to (4055, 2664) */ + {0x0344, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [12:8] */ + {0x0345, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [7:0] */ + {0x0346, CRL_REG_LEN_08BIT, 0x01}, /* Analog cropping start Y [12:8] */ + {0x0347, CRL_REG_LEN_08BIT, 0x78}, /* Analog cropping start Y [7:0] */ + {0x0348, CRL_REG_LEN_08BIT, 0x0F}, /* Analog cropping end X [12:8] */ + {0x0349, CRL_REG_LEN_08BIT, 0xD7}, /* Analog cropping end X [7:0] */ + {0x034A, CRL_REG_LEN_08BIT, 0x0A}, /* Analog cropping end Y [12:8] */ + {0x034B, CRL_REG_LEN_08BIT, 0x68}, /* Analog cropping end Y [7:0] */ + + /* Mode Setting */ + {0x00E3, CRL_REG_LEN_08BIT, 0x01}, /* DOL-HDR Disable */ + {0x00E4, CRL_REG_LEN_08BIT, 0x02}, /* DOL Mode: DOL3 */ + {0x00FC, CRL_REG_LEN_08BIT, 0x0A}, /* The output data fmt for CSI: RAW10 */ + {0x00FD, CRL_REG_LEN_08BIT, 0x0A}, /* The output data fmt for CSI: RAW10 */ + {0x00FE, CRL_REG_LEN_08BIT, 0x0A}, /* The output data fmt for CSI: RAW10 */ + {0x00FF, CRL_REG_LEN_08BIT, 0x0A}, /* The output data fmt for CSI: RAW10 */ + {0x3E10, CRL_REG_LEN_08BIT, 0x01}, /* VC ID of DOL 2nd frame */ + {0x3E11, CRL_REG_LEN_08BIT, 0x02}, /* VC ID of DOL 3rd frame */ + + {0x0220, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x0221, CRL_REG_LEN_08BIT, 0x11}, /* Undocumented */ + {0x0381, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, even -> odd */ + {0x0383, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, odd -> even */ + {0x0385, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, even -> odd */ + {0x0387, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, odd -> even */ + {0x0900, CRL_REG_LEN_08BIT, 0x00}, /* Binning mode: Disable */ + {0x0901, CRL_REG_LEN_08BIT, 0x11}, /* Binning Type for Horizontal */ + {0x0902, CRL_REG_LEN_08BIT, 0x02}, /* Binning Type for Vertical */ + {0x3140, CRL_REG_LEN_08BIT, 0x02}, /* Undocumented */ + {0x3C00, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x3C01, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x3C02, CRL_REG_LEN_08BIT, 0xDC}, /* Undocumented */ + {0x3F0D, CRL_REG_LEN_08BIT, 0x00}, /* AD converter: 10 bit */ + {0x5748, CRL_REG_LEN_08BIT, 0x07}, /* Undocumented */ + {0x5749, CRL_REG_LEN_08BIT, 0xFF}, /* Undocumented */ + {0x574A, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x574B, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x7B75, CRL_REG_LEN_08BIT, 0x0E}, /* Undocumented */ + {0x7B76, CRL_REG_LEN_08BIT, 0x09}, /* Undocumented */ + {0x7B77, CRL_REG_LEN_08BIT, 0x0C}, /* Undocumented */ + {0x7B78, CRL_REG_LEN_08BIT, 0x06}, /* Undocumented */ + {0x7B79, CRL_REG_LEN_08BIT, 0x3B}, /* Undocumented */ + {0x7B53, CRL_REG_LEN_08BIT, 0x01}, /* Undocumented */ + {0x9369, CRL_REG_LEN_08BIT, 0x5A}, /* Undocumented */ + {0x936B, CRL_REG_LEN_08BIT, 0x55}, /* Undocumented */ + {0x936D, CRL_REG_LEN_08BIT, 0x28}, /* Undocumented */ + {0x9304, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x9305, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9A, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9B, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9C, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9D, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9E, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9F, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0xA2A9, CRL_REG_LEN_08BIT, 0x60}, /* Undocumented */ + {0xA2B7, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + + /* Digital Crop & Scaling */ + {0x0401, CRL_REG_LEN_08BIT, 0x00}, /* Scaling mode: No Scaling */ + {0x0404, CRL_REG_LEN_08BIT, 0x00}, /* Down Scaling Factor M [8] */ + {0x0405, CRL_REG_LEN_08BIT, 0x10}, /* Down Scaling Factor M [7:0] */ + {0x0408, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [12:8] */ + {0x0409, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [7:0] */ + {0x040A, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [12:8] */ + {0x040B, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [7:0] */ + {0x040C, CRL_REG_LEN_08BIT, 0x0F}, /* Width after cropping [12:8] */ + {0x040D, CRL_REG_LEN_08BIT, 0xD8}, /* Width after cropping [7:0] */ + {0x040E, CRL_REG_LEN_08BIT, 0x08}, /* Height after cropping [12:8] */ + {0x040F, CRL_REG_LEN_08BIT, 0xF0}, /* Height after cropping [7:0] */ + + /* Output Crop */ + {0x034C, CRL_REG_LEN_08BIT, 0x0F}, /* X output size [12:8] */ + {0x034D, CRL_REG_LEN_08BIT, 0xD8}, /* X output size [7:0] */ + {0x034E, CRL_REG_LEN_08BIT, 0x08}, /* Y output size [12:8] */ + {0x034F, CRL_REG_LEN_08BIT, 0xF0}, /* Y output size [7:0] */ +}; + +static struct crl_mode_rep imx477_modes_master[] = { + { + .sd_rects_items = ARRAY_SIZE(imx477_4056_3040_rects), + .sd_rects = imx477_4056_3040_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 4056, + .height = 3040, + .min_llp = 14612, + .min_fll = 8209, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx477_4056_3040_19MHZ_master), + .mode_regs = imx477_4056_3040_19MHZ_master, + }, + { + .sd_rects_items = ARRAY_SIZE(imx477_4056_3040_rects), + .sd_rects = imx477_4056_3040_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 4056, + .height = 3040, + .min_llp = 14612, + .min_fll = 8209, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx477_4056_3040_19MHZ_DOL_2f_master), + .mode_regs = imx477_4056_3040_19MHZ_DOL_2f_master, + }, + { + .sd_rects_items = ARRAY_SIZE(imx477_4056_3040_rects), + .sd_rects = imx477_4056_3040_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 4056, + .height = 3040, + .min_llp = 14612, + .min_fll = 8209, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx477_4056_3040_19MHZ_DOL_3f_master), + .mode_regs = imx477_4056_3040_19MHZ_DOL_3f_master, + }, + { + .sd_rects_items = ARRAY_SIZE(imx477_4056_2288_rects), + .sd_rects = imx477_4056_2288_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 4056, + .height = 2288, + .min_llp = 14612, + .min_fll = 8209, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx477_4056_2288_19MHZ_master), + .mode_regs = imx477_4056_2288_19MHZ_master, + }, + { + .sd_rects_items = ARRAY_SIZE(imx477_2832_1632_rects), + .sd_rects = imx477_2832_1632_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 2832, + .height = 1632, + .min_llp = 14612, + .min_fll = 8209, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx477_2832_1632_19MHZ_master), + .mode_regs = imx477_2832_1632_19MHZ_master, + }, + { + .sd_rects_items = ARRAY_SIZE(imx477_2028_1128_rects), + .sd_rects = imx477_2028_1128_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 2028, + .height = 1128, + .min_llp = 14612, + .min_fll = 8209, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx477_2028_1128_19MHZ_master), + .mode_regs = imx477_2028_1128_19MHZ_master, + }, + { + .sd_rects_items = ARRAY_SIZE(imx477_1296_768_rects), + .sd_rects = imx477_1296_768_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1296, + .height = 768, + .min_llp = 14612, + .min_fll = 8209, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx477_1296_768_19MHZ_master), + .mode_regs = imx477_1296_768_19MHZ_master, + }, + { + .sd_rects_items = ARRAY_SIZE(imx477_656_512_rects), + .sd_rects = imx477_656_512_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 656, + .height = 512, + .min_llp = 14612, + .min_fll = 8209, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx477_656_512_19MHZ_master), + .mode_regs = imx477_656_512_19MHZ_master, + }, + { + .sd_rects_items = ARRAY_SIZE(imx477_4056_2288_rects), + .sd_rects = imx477_4056_2288_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 4056, + .height = 2288, + .min_llp = 14612, + .min_fll = 8209, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx477_4056_2288_19MHZ_DOL_2f_master), + .mode_regs = imx477_4056_2288_19MHZ_DOL_2f_master, + }, + { + .sd_rects_items = ARRAY_SIZE(imx477_4056_2288_rects), + .sd_rects = imx477_4056_2288_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 4056, + .height = 2288, + .min_llp = 14612, + .min_fll = 8209, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx477_4056_2288_19MHZ_DOL_3f_master), + .mode_regs = imx477_4056_2288_19MHZ_DOL_3f_master, + }, +}; + +static struct crl_flip_data imx477_flip_configurations_master[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + } +}; + +struct crl_sensor_configuration imx477_master_crl_configuration = { + + .power_items = ARRAY_SIZE(imx477_power_items), + .power_entities = imx477_power_items, + + .onetime_init_regs_items = ARRAY_SIZE(imx477_onetime_init_regset_master), + .onetime_init_regs = imx477_onetime_init_regset_master, + + .powerup_regs_items = ARRAY_SIZE(imx477_powerup_standby), + .powerup_regs = imx477_powerup_standby, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + .id_reg_items = ARRAY_SIZE(imx477_sensor_detect_regset), + .id_regs = imx477_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(imx477_sensor_subdevs), + .subdevs = imx477_sensor_subdevs, + + .sensor_limits = &imx477_sensor_limits, + + .pll_config_items = ARRAY_SIZE(imx477_pll_configurations), + .pll_configs = imx477_pll_configurations, + + .modes_items = ARRAY_SIZE(imx477_modes_master), + .modes = imx477_modes_master, + + .streamon_regs_items = ARRAY_SIZE(imx477_streamon_regs), + .streamon_regs = imx477_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(imx477_streamoff_regs), + .streamoff_regs = imx477_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(imx477_v4l2_ctrls), + .v4l2_ctrl_bank = imx477_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(imx477_crl_csi_data_fmt), + .csi_fmts = imx477_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(imx477_flip_configurations_master), + .flip_data = imx477_flip_configurations_master, + + .frame_desc_entries = ARRAY_SIZE(imx477_frame_desc), + .frame_desc_type = CRL_V4L2_MBUS_FRAME_DESC_TYPE_CSI2, + .frame_desc = imx477_frame_desc, +}; + +#endif /* __CRLMODULE_IMX477_MASTER_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_imx477_slave_configuration.h b/drivers/media/i2c/crlmodule/crl_imx477_slave_configuration.h new file mode 100644 index 0000000000000..b8dc15c0f1f8e --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_imx477_slave_configuration.h @@ -0,0 +1,509 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2017 - 2018 Intel Corporation + * + * Author: Alexei Zavjalov + * + */ + +#ifndef __CRLMODULE_IMX477_SLAVE_CONFIGURATION_H_ +#define __CRLMODULE_IMX477_SLAVE_CONFIGURATION_H_ + +#include "crl_imx477_common_regs.h" + +static struct crl_register_write_rep imx477_onetime_init_regset_slave[] = { + {0x0103, CRL_REG_LEN_08BIT, 0x01}, /* Software reset */ + + {0x3010, CRL_REG_LEN_08BIT, 0x01}, /* SLAVE_ADD_EN_2ND */ + {0x3011, CRL_REG_LEN_08BIT, 0x01}, /* SLAVE_ADD_ACKEN_2ND */ + + {0x3F0B, CRL_REG_LEN_08BIT, 0x01}, /* Multi camera mode: on */ + + {0x3041, CRL_REG_LEN_08BIT, 0x00}, /* Mode: Slave */ + {0x3040, CRL_REG_LEN_08BIT, 0x00}, /* XVS pin: in */ + {0x4B81, CRL_REG_LEN_08BIT, 0x00}, /* Mode: Slave */ + + {0x3042, CRL_REG_LEN_08BIT, 0x00}, /* VSYNC Delay in lines [15:8] */ + {0x3043, CRL_REG_LEN_08BIT, 0x00}, /* VSYNC Delay in lines [7:0] */ + {0x3044, CRL_REG_LEN_08BIT, 0x00}, /* VSYNC Delay in clocks [15:8] */ + {0x3045, CRL_REG_LEN_08BIT, 0x00}, /* VSYNC Delay in clocks [7:0] */ + {0x3045, CRL_REG_LEN_08BIT, 0x00}, /* VSYNC thin down setting */ + + /* External Clock Setting */ + {0x0136, CRL_REG_LEN_08BIT, 0x13}, /* External clock freq (dec) [15:8] */ + {0x0137, CRL_REG_LEN_08BIT, 0x33}, /* External clock freq (dec) [7:0] */ + + /* Global Setting */ + {0x0808, CRL_REG_LEN_08BIT, 0x02}, /* MIPI Global Timing: Register Control */ + {0xE07A, CRL_REG_LEN_08BIT, 0x01}, + {0xE000, CRL_REG_LEN_08BIT, 0x00}, /* RUN/STOP of CSI2 during Frame Blanking: HS */ + {0x4AE9, CRL_REG_LEN_08BIT, 0x18}, + {0x4AEA, CRL_REG_LEN_08BIT, 0x08}, + {0xF61C, CRL_REG_LEN_08BIT, 0x04}, + {0xF61E, CRL_REG_LEN_08BIT, 0x04}, + {0x4AE9, CRL_REG_LEN_08BIT, 0x21}, + {0x4AEA, CRL_REG_LEN_08BIT, 0x80}, + {0x38A8, CRL_REG_LEN_08BIT, 0x1F}, + {0x38A9, CRL_REG_LEN_08BIT, 0xFF}, + {0x38AA, CRL_REG_LEN_08BIT, 0x1F}, + {0x38AB, CRL_REG_LEN_08BIT, 0xFF}, + {0x420B, CRL_REG_LEN_08BIT, 0x01}, + {0x55D4, CRL_REG_LEN_08BIT, 0x00}, + {0x55D5, CRL_REG_LEN_08BIT, 0x00}, + {0x55D6, CRL_REG_LEN_08BIT, 0x07}, + {0x55D7, CRL_REG_LEN_08BIT, 0xFF}, + {0x55E8, CRL_REG_LEN_08BIT, 0x07}, + {0x55E9, CRL_REG_LEN_08BIT, 0xFF}, + {0x55EA, CRL_REG_LEN_08BIT, 0x00}, + {0x55EB, CRL_REG_LEN_08BIT, 0x00}, + {0x574C, CRL_REG_LEN_08BIT, 0x07}, + {0x574D, CRL_REG_LEN_08BIT, 0xFF}, + {0x574E, CRL_REG_LEN_08BIT, 0x00}, + {0x574F, CRL_REG_LEN_08BIT, 0x00}, + {0x5754, CRL_REG_LEN_08BIT, 0x00}, + {0x5755, CRL_REG_LEN_08BIT, 0x00}, + {0x5756, CRL_REG_LEN_08BIT, 0x07}, + {0x5757, CRL_REG_LEN_08BIT, 0xFF}, + {0x5973, CRL_REG_LEN_08BIT, 0x04}, + {0x5974, CRL_REG_LEN_08BIT, 0x01}, + {0x5D13, CRL_REG_LEN_08BIT, 0xC3}, + {0x5D14, CRL_REG_LEN_08BIT, 0x58}, + {0x5D15, CRL_REG_LEN_08BIT, 0xA3}, + {0x5D16, CRL_REG_LEN_08BIT, 0x1D}, + {0x5D17, CRL_REG_LEN_08BIT, 0x65}, + {0x5D18, CRL_REG_LEN_08BIT, 0x8C}, + {0x5D1A, CRL_REG_LEN_08BIT, 0x06}, + {0x5D1B, CRL_REG_LEN_08BIT, 0xA9}, + {0x5D1C, CRL_REG_LEN_08BIT, 0x45}, + {0x5D1D, CRL_REG_LEN_08BIT, 0x3A}, + {0x5D1E, CRL_REG_LEN_08BIT, 0xAB}, + {0x5D1F, CRL_REG_LEN_08BIT, 0x15}, + {0x5D21, CRL_REG_LEN_08BIT, 0x0E}, + {0x5D22, CRL_REG_LEN_08BIT, 0x52}, + {0x5D23, CRL_REG_LEN_08BIT, 0xAA}, + {0x5D24, CRL_REG_LEN_08BIT, 0x7D}, + {0x5D25, CRL_REG_LEN_08BIT, 0x57}, + {0x5D26, CRL_REG_LEN_08BIT, 0xA8}, + {0x5D37, CRL_REG_LEN_08BIT, 0x5A}, + {0x5D38, CRL_REG_LEN_08BIT, 0x5A}, + {0x5D77, CRL_REG_LEN_08BIT, 0x7F}, + {0x7B7C, CRL_REG_LEN_08BIT, 0x00}, + {0x7B7D, CRL_REG_LEN_08BIT, 0x00}, + {0x8D1F, CRL_REG_LEN_08BIT, 0x00}, + {0x8D27, CRL_REG_LEN_08BIT, 0x00}, + {0x9004, CRL_REG_LEN_08BIT, 0x03}, + {0x9200, CRL_REG_LEN_08BIT, 0x50}, + {0x9201, CRL_REG_LEN_08BIT, 0x6C}, + {0x9202, CRL_REG_LEN_08BIT, 0x71}, + {0x9203, CRL_REG_LEN_08BIT, 0x00}, + {0x9204, CRL_REG_LEN_08BIT, 0x71}, + {0x9205, CRL_REG_LEN_08BIT, 0x01}, + {0x9371, CRL_REG_LEN_08BIT, 0x6A}, + {0x9373, CRL_REG_LEN_08BIT, 0x6A}, + {0x9375, CRL_REG_LEN_08BIT, 0x64}, + {0x990C, CRL_REG_LEN_08BIT, 0x00}, + {0x990D, CRL_REG_LEN_08BIT, 0x08}, + {0x9956, CRL_REG_LEN_08BIT, 0x8C}, + {0x9957, CRL_REG_LEN_08BIT, 0x64}, + {0x9958, CRL_REG_LEN_08BIT, 0x50}, + {0x9A48, CRL_REG_LEN_08BIT, 0x06}, + {0x9A49, CRL_REG_LEN_08BIT, 0x06}, + {0x9A4A, CRL_REG_LEN_08BIT, 0x06}, + {0x9A4B, CRL_REG_LEN_08BIT, 0x06}, + {0x9A4C, CRL_REG_LEN_08BIT, 0x06}, + {0x9A4D, CRL_REG_LEN_08BIT, 0x06}, + {0xA001, CRL_REG_LEN_08BIT, 0x0A}, + {0xA003, CRL_REG_LEN_08BIT, 0x0A}, + {0xA005, CRL_REG_LEN_08BIT, 0x0A}, + {0xA006, CRL_REG_LEN_08BIT, 0x01}, + {0xA007, CRL_REG_LEN_08BIT, 0xC0}, + {0xA009, CRL_REG_LEN_08BIT, 0xC0}, + + /* Image Tuning */ + {0x3D8A, CRL_REG_LEN_08BIT, 0x01}, + {0x7B3B, CRL_REG_LEN_08BIT, 0x01}, + {0x7B4C, CRL_REG_LEN_08BIT, 0x00}, + {0x9905, CRL_REG_LEN_08BIT, 0x00}, + {0x9907, CRL_REG_LEN_08BIT, 0x00}, + {0x9909, CRL_REG_LEN_08BIT, 0x00}, + {0x990B, CRL_REG_LEN_08BIT, 0x00}, + {0x9944, CRL_REG_LEN_08BIT, 0x3C}, + {0x9947, CRL_REG_LEN_08BIT, 0x3C}, + {0x994A, CRL_REG_LEN_08BIT, 0x8C}, + {0x994B, CRL_REG_LEN_08BIT, 0x50}, + {0x994C, CRL_REG_LEN_08BIT, 0x1B}, + {0x994D, CRL_REG_LEN_08BIT, 0x8C}, + {0x994E, CRL_REG_LEN_08BIT, 0x50}, + {0x994F, CRL_REG_LEN_08BIT, 0x1B}, + {0x9950, CRL_REG_LEN_08BIT, 0x8C}, + {0x9951, CRL_REG_LEN_08BIT, 0x1B}, + {0x9952, CRL_REG_LEN_08BIT, 0x0A}, + {0x9953, CRL_REG_LEN_08BIT, 0x8C}, + {0x9954, CRL_REG_LEN_08BIT, 0x1B}, + {0x9955, CRL_REG_LEN_08BIT, 0x0A}, + {0x9A13, CRL_REG_LEN_08BIT, 0x04}, + {0x9A14, CRL_REG_LEN_08BIT, 0x04}, + {0x9A19, CRL_REG_LEN_08BIT, 0x00}, + {0x9A1C, CRL_REG_LEN_08BIT, 0x04}, + {0x9A1D, CRL_REG_LEN_08BIT, 0x04}, + {0x9A26, CRL_REG_LEN_08BIT, 0x05}, + {0x9A27, CRL_REG_LEN_08BIT, 0x05}, + {0x9A2C, CRL_REG_LEN_08BIT, 0x01}, + {0x9A2D, CRL_REG_LEN_08BIT, 0x03}, + {0x9A2F, CRL_REG_LEN_08BIT, 0x05}, + {0x9A30, CRL_REG_LEN_08BIT, 0x05}, + {0x9A41, CRL_REG_LEN_08BIT, 0x00}, + {0x9A46, CRL_REG_LEN_08BIT, 0x00}, + {0x9A47, CRL_REG_LEN_08BIT, 0x00}, + {0x9C17, CRL_REG_LEN_08BIT, 0x35}, + {0x9C1D, CRL_REG_LEN_08BIT, 0x31}, + {0x9C29, CRL_REG_LEN_08BIT, 0x50}, + {0x9C3B, CRL_REG_LEN_08BIT, 0x2F}, + {0x9C41, CRL_REG_LEN_08BIT, 0x6B}, + {0x9C47, CRL_REG_LEN_08BIT, 0x2D}, + {0x9C4D, CRL_REG_LEN_08BIT, 0x40}, + {0x9C6B, CRL_REG_LEN_08BIT, 0x00}, + {0x9C71, CRL_REG_LEN_08BIT, 0xC8}, + {0x9C73, CRL_REG_LEN_08BIT, 0x32}, + {0x9C75, CRL_REG_LEN_08BIT, 0x04}, + {0x9C7D, CRL_REG_LEN_08BIT, 0x2D}, + {0x9C83, CRL_REG_LEN_08BIT, 0x40}, + {0x9C94, CRL_REG_LEN_08BIT, 0x3F}, + {0x9C95, CRL_REG_LEN_08BIT, 0x3F}, + {0x9C96, CRL_REG_LEN_08BIT, 0x3F}, + {0x9C97, CRL_REG_LEN_08BIT, 0x00}, + {0x9C98, CRL_REG_LEN_08BIT, 0x00}, + {0x9C99, CRL_REG_LEN_08BIT, 0x00}, + {0x9C9A, CRL_REG_LEN_08BIT, 0x3F}, + {0x9C9B, CRL_REG_LEN_08BIT, 0x3F}, + {0x9C9C, CRL_REG_LEN_08BIT, 0x3F}, + {0x9CA0, CRL_REG_LEN_08BIT, 0x0F}, + {0x9CA1, CRL_REG_LEN_08BIT, 0x0F}, + {0x9CA2, CRL_REG_LEN_08BIT, 0x0F}, + {0x9CA3, CRL_REG_LEN_08BIT, 0x00}, + {0x9CA4, CRL_REG_LEN_08BIT, 0x00}, + {0x9CA5, CRL_REG_LEN_08BIT, 0x00}, + {0x9CA6, CRL_REG_LEN_08BIT, 0x1E}, + {0x9CA7, CRL_REG_LEN_08BIT, 0x1E}, + {0x9CA8, CRL_REG_LEN_08BIT, 0x1E}, + {0x9CA9, CRL_REG_LEN_08BIT, 0x00}, + {0x9CAA, CRL_REG_LEN_08BIT, 0x00}, + {0x9CAB, CRL_REG_LEN_08BIT, 0x00}, + {0x9CAC, CRL_REG_LEN_08BIT, 0x09}, + {0x9CAD, CRL_REG_LEN_08BIT, 0x09}, + {0x9CAE, CRL_REG_LEN_08BIT, 0x09}, + {0x9CBD, CRL_REG_LEN_08BIT, 0x50}, + {0x9CBF, CRL_REG_LEN_08BIT, 0x50}, + {0x9CC1, CRL_REG_LEN_08BIT, 0x50}, + {0x9CC3, CRL_REG_LEN_08BIT, 0x40}, + {0x9CC5, CRL_REG_LEN_08BIT, 0x40}, + {0x9CC7, CRL_REG_LEN_08BIT, 0x40}, + {0x9CC9, CRL_REG_LEN_08BIT, 0x0A}, + {0x9CCB, CRL_REG_LEN_08BIT, 0x0A}, + {0x9CCD, CRL_REG_LEN_08BIT, 0x0A}, + {0x9D17, CRL_REG_LEN_08BIT, 0x35}, + {0x9D1D, CRL_REG_LEN_08BIT, 0x31}, + {0x9D29, CRL_REG_LEN_08BIT, 0x50}, + {0x9D3B, CRL_REG_LEN_08BIT, 0x2F}, + {0x9D41, CRL_REG_LEN_08BIT, 0x6B}, + {0x9D47, CRL_REG_LEN_08BIT, 0x42}, + {0x9D4D, CRL_REG_LEN_08BIT, 0x5A}, + {0x9D6B, CRL_REG_LEN_08BIT, 0x00}, + {0x9D71, CRL_REG_LEN_08BIT, 0xC8}, + {0x9D73, CRL_REG_LEN_08BIT, 0x32}, + {0x9D75, CRL_REG_LEN_08BIT, 0x04}, + {0x9D7D, CRL_REG_LEN_08BIT, 0x42}, + {0x9D83, CRL_REG_LEN_08BIT, 0x5A}, + {0x9D94, CRL_REG_LEN_08BIT, 0x3F}, + {0x9D95, CRL_REG_LEN_08BIT, 0x3F}, + {0x9D96, CRL_REG_LEN_08BIT, 0x3F}, + {0x9D97, CRL_REG_LEN_08BIT, 0x00}, + {0x9D98, CRL_REG_LEN_08BIT, 0x00}, + {0x9D99, CRL_REG_LEN_08BIT, 0x00}, + {0x9D9A, CRL_REG_LEN_08BIT, 0x3F}, + {0x9D9B, CRL_REG_LEN_08BIT, 0x3F}, + {0x9D9C, CRL_REG_LEN_08BIT, 0x3F}, + {0x9D9D, CRL_REG_LEN_08BIT, 0x1F}, + {0x9D9E, CRL_REG_LEN_08BIT, 0x1F}, + {0x9D9F, CRL_REG_LEN_08BIT, 0x1F}, + {0x9DA0, CRL_REG_LEN_08BIT, 0x0F}, + {0x9DA1, CRL_REG_LEN_08BIT, 0x0F}, + {0x9DA2, CRL_REG_LEN_08BIT, 0x0F}, + {0x9DA3, CRL_REG_LEN_08BIT, 0x00}, + {0x9DA4, CRL_REG_LEN_08BIT, 0x00}, + {0x9DA5, CRL_REG_LEN_08BIT, 0x00}, + {0x9DA6, CRL_REG_LEN_08BIT, 0x1E}, + {0x9DA7, CRL_REG_LEN_08BIT, 0x1E}, + {0x9DA8, CRL_REG_LEN_08BIT, 0x1E}, + {0x9DA9, CRL_REG_LEN_08BIT, 0x00}, + {0x9DAA, CRL_REG_LEN_08BIT, 0x00}, + {0x9DAB, CRL_REG_LEN_08BIT, 0x00}, + {0x9DAC, CRL_REG_LEN_08BIT, 0x09}, + {0x9DAD, CRL_REG_LEN_08BIT, 0x09}, + {0x9DAE, CRL_REG_LEN_08BIT, 0x09}, + {0x9DC9, CRL_REG_LEN_08BIT, 0x0A}, + {0x9DCB, CRL_REG_LEN_08BIT, 0x0A}, + {0x9DCD, CRL_REG_LEN_08BIT, 0x0A}, + {0x9E17, CRL_REG_LEN_08BIT, 0x35}, + {0x9E1D, CRL_REG_LEN_08BIT, 0x31}, + {0x9E29, CRL_REG_LEN_08BIT, 0x50}, + {0x9E3B, CRL_REG_LEN_08BIT, 0x2F}, + {0x9E41, CRL_REG_LEN_08BIT, 0x6B}, + {0x9E47, CRL_REG_LEN_08BIT, 0x2D}, + {0x9E4D, CRL_REG_LEN_08BIT, 0x40}, + {0x9E6B, CRL_REG_LEN_08BIT, 0x00}, + {0x9E71, CRL_REG_LEN_08BIT, 0xC8}, + {0x9E73, CRL_REG_LEN_08BIT, 0x32}, + {0x9E75, CRL_REG_LEN_08BIT, 0x04}, + {0x9E94, CRL_REG_LEN_08BIT, 0x0F}, + {0x9E95, CRL_REG_LEN_08BIT, 0x0F}, + {0x9E96, CRL_REG_LEN_08BIT, 0x0F}, + {0x9E97, CRL_REG_LEN_08BIT, 0x00}, + {0x9E98, CRL_REG_LEN_08BIT, 0x00}, + {0x9E99, CRL_REG_LEN_08BIT, 0x00}, + {0x9EA0, CRL_REG_LEN_08BIT, 0x0F}, + {0x9EA1, CRL_REG_LEN_08BIT, 0x0F}, + {0x9EA2, CRL_REG_LEN_08BIT, 0x0F}, + {0x9EA3, CRL_REG_LEN_08BIT, 0x00}, + {0x9EA4, CRL_REG_LEN_08BIT, 0x00}, + {0x9EA5, CRL_REG_LEN_08BIT, 0x00}, + {0x9EA6, CRL_REG_LEN_08BIT, 0x3F}, + {0x9EA7, CRL_REG_LEN_08BIT, 0x3F}, + {0x9EA8, CRL_REG_LEN_08BIT, 0x3F}, + {0x9EA9, CRL_REG_LEN_08BIT, 0x00}, + {0x9EAA, CRL_REG_LEN_08BIT, 0x00}, + {0x9EAB, CRL_REG_LEN_08BIT, 0x00}, + {0x9EAC, CRL_REG_LEN_08BIT, 0x09}, + {0x9EAD, CRL_REG_LEN_08BIT, 0x09}, + {0x9EAE, CRL_REG_LEN_08BIT, 0x09}, + {0x9EC9, CRL_REG_LEN_08BIT, 0x0A}, + {0x9ECB, CRL_REG_LEN_08BIT, 0x0A}, + {0x9ECD, CRL_REG_LEN_08BIT, 0x0A}, + {0x9F17, CRL_REG_LEN_08BIT, 0x35}, + {0x9F1D, CRL_REG_LEN_08BIT, 0x31}, + {0x9F29, CRL_REG_LEN_08BIT, 0x50}, + {0x9F3B, CRL_REG_LEN_08BIT, 0x2F}, + {0x9F41, CRL_REG_LEN_08BIT, 0x6B}, + {0x9F47, CRL_REG_LEN_08BIT, 0x42}, + {0x9F4D, CRL_REG_LEN_08BIT, 0x5A}, + {0x9F6B, CRL_REG_LEN_08BIT, 0x00}, + {0x9F71, CRL_REG_LEN_08BIT, 0xC8}, + {0x9F73, CRL_REG_LEN_08BIT, 0x32}, + {0x9F75, CRL_REG_LEN_08BIT, 0x04}, + {0x9F94, CRL_REG_LEN_08BIT, 0x0F}, + {0x9F95, CRL_REG_LEN_08BIT, 0x0F}, + {0x9F96, CRL_REG_LEN_08BIT, 0x0F}, + {0x9F97, CRL_REG_LEN_08BIT, 0x00}, + {0x9F98, CRL_REG_LEN_08BIT, 0x00}, + {0x9F99, CRL_REG_LEN_08BIT, 0x00}, + {0x9F9A, CRL_REG_LEN_08BIT, 0x2F}, + {0x9F9B, CRL_REG_LEN_08BIT, 0x2F}, + {0x9F9C, CRL_REG_LEN_08BIT, 0x2F}, + {0x9F9D, CRL_REG_LEN_08BIT, 0x00}, + {0x9F9E, CRL_REG_LEN_08BIT, 0x00}, + {0x9F9F, CRL_REG_LEN_08BIT, 0x00}, + {0x9FA0, CRL_REG_LEN_08BIT, 0x0F}, + {0x9FA1, CRL_REG_LEN_08BIT, 0x0F}, + {0x9FA2, CRL_REG_LEN_08BIT, 0x0F}, + {0x9FA3, CRL_REG_LEN_08BIT, 0x00}, + {0x9FA4, CRL_REG_LEN_08BIT, 0x00}, + {0x9FA5, CRL_REG_LEN_08BIT, 0x00}, + {0x9FA6, CRL_REG_LEN_08BIT, 0x1E}, + {0x9FA7, CRL_REG_LEN_08BIT, 0x1E}, + {0x9FA8, CRL_REG_LEN_08BIT, 0x1E}, + {0x9FA9, CRL_REG_LEN_08BIT, 0x00}, + {0x9FAA, CRL_REG_LEN_08BIT, 0x00}, + {0x9FAB, CRL_REG_LEN_08BIT, 0x00}, + {0x9FAC, CRL_REG_LEN_08BIT, 0x09}, + {0x9FAD, CRL_REG_LEN_08BIT, 0x09}, + {0x9FAE, CRL_REG_LEN_08BIT, 0x09}, + {0x9FC9, CRL_REG_LEN_08BIT, 0x0A}, + {0x9FCB, CRL_REG_LEN_08BIT, 0x0A}, + {0x9FCD, CRL_REG_LEN_08BIT, 0x0A}, + {0xA14B, CRL_REG_LEN_08BIT, 0xFF}, + {0xA151, CRL_REG_LEN_08BIT, 0x0C}, + {0xA153, CRL_REG_LEN_08BIT, 0x50}, + {0xA155, CRL_REG_LEN_08BIT, 0x02}, + {0xA157, CRL_REG_LEN_08BIT, 0x00}, + {0xA1AD, CRL_REG_LEN_08BIT, 0xFF}, + {0xA1B3, CRL_REG_LEN_08BIT, 0x0C}, + {0xA1B5, CRL_REG_LEN_08BIT, 0x50}, + {0xA1B9, CRL_REG_LEN_08BIT, 0x00}, + {0xA24B, CRL_REG_LEN_08BIT, 0xFF}, + {0xA257, CRL_REG_LEN_08BIT, 0x00}, + {0xA2AD, CRL_REG_LEN_08BIT, 0xFF}, + {0xA2B9, CRL_REG_LEN_08BIT, 0x00}, + {0xB21F, CRL_REG_LEN_08BIT, 0x04}, + {0xB35C, CRL_REG_LEN_08BIT, 0x00}, + {0xB35E, CRL_REG_LEN_08BIT, 0x08}, +}; + +static struct crl_register_write_rep imx477_4056_3040_19MHZ_slave[] = { + /* Frame Horizontal Clock Count */ + {0x0342, CRL_REG_LEN_08BIT, 0x39}, /* Line length [15:8] */ + {0x0343, CRL_REG_LEN_08BIT, 0x14}, /* Line length [7:0] */ + + /* Frame Vertical Clock Count */ + {0x0340, CRL_REG_LEN_08BIT, 0x20}, /* Frame length [15:8] */ + {0x0341, CRL_REG_LEN_08BIT, 0x11}, /* Frame length [7:0] */ + + /* Visible Size */ + {0x0344, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [12:8] */ + {0x0345, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [7:0] */ + {0x0346, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start Y [12:8] */ + {0x0347, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start Y [7:0] */ + {0x0348, CRL_REG_LEN_08BIT, 0x0F}, /* Analog cropping end X [12:8] */ + {0x0349, CRL_REG_LEN_08BIT, 0xD7}, /* Analog cropping end X [7:0] */ + {0x034A, CRL_REG_LEN_08BIT, 0x0B}, /* Analog cropping end Y [12:8] */ + {0x034B, CRL_REG_LEN_08BIT, 0xDF}, /* Analog cropping end Y [7:0] */ + + /* Mode Setting */ + {0x00E3, CRL_REG_LEN_08BIT, 0x00}, /* DOL-HDR Disable */ + {0x00E4, CRL_REG_LEN_08BIT, 0x00}, /* DOL Mode: DOL-HDR Disable */ + {0x00FC, CRL_REG_LEN_08BIT, 0x0A}, /* The output data fmt for CSI: RAW10 */ + {0x00FD, CRL_REG_LEN_08BIT, 0x0A}, /* The output data fmt for CSI: RAW10 */ + {0x00FE, CRL_REG_LEN_08BIT, 0x0A}, /* The output data fmt for CSI: RAW10 */ + {0x00FF, CRL_REG_LEN_08BIT, 0x0A}, /* The output data fmt for CSI: RAW10 */ + {0x0220, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x0221, CRL_REG_LEN_08BIT, 0x11}, /* Undocumented */ + {0x0381, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, even -> odd */ + {0x0383, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, odd -> even */ + {0x0385, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, even -> odd */ + {0x0387, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, odd -> even */ + {0x0900, CRL_REG_LEN_08BIT, 0x00}, /* Binning mode: Disable */ + {0x0901, CRL_REG_LEN_08BIT, 0x11}, /* Binning Type for Horizontal */ + {0x0902, CRL_REG_LEN_08BIT, 0x02}, /* Binning Type for Vertical */ + {0x3140, CRL_REG_LEN_08BIT, 0x02}, /* Undocumented */ + {0x3C00, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x3C01, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x3C02, CRL_REG_LEN_08BIT, 0xDC}, /* Undocumented */ + {0x3F0D, CRL_REG_LEN_08BIT, 0x00}, /* AD converter: 10 bit */ + {0x5748, CRL_REG_LEN_08BIT, 0x07}, /* Undocumented */ + {0x5749, CRL_REG_LEN_08BIT, 0xFF}, /* Undocumented */ + {0x574A, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x574B, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x7B75, CRL_REG_LEN_08BIT, 0x0E}, /* Undocumented */ + {0x7B76, CRL_REG_LEN_08BIT, 0x09}, /* Undocumented */ + {0x7B77, CRL_REG_LEN_08BIT, 0x0C}, /* Undocumented */ + {0x7B78, CRL_REG_LEN_08BIT, 0x06}, /* Undocumented */ + {0x7B79, CRL_REG_LEN_08BIT, 0x3B}, /* Undocumented */ + {0x7B53, CRL_REG_LEN_08BIT, 0x01}, /* Undocumented */ + {0x9369, CRL_REG_LEN_08BIT, 0x5A}, /* Undocumented */ + {0x936B, CRL_REG_LEN_08BIT, 0x55}, /* Undocumented */ + {0x936D, CRL_REG_LEN_08BIT, 0x28}, /* Undocumented */ + {0x9304, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x9305, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9A, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9B, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9C, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9D, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9E, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9F, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0xA2A9, CRL_REG_LEN_08BIT, 0x60}, /* Undocumented */ + {0xA2B7, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + + /* Digital Crop & Scaling */ + {0x0401, CRL_REG_LEN_08BIT, 0x00}, /* Scaling mode: No Scaling */ + {0x0404, CRL_REG_LEN_08BIT, 0x00}, /* Down Scaling Factor M [8] */ + {0x0405, CRL_REG_LEN_08BIT, 0x10}, /* Down Scaling Factor M [7:0] */ + {0x0408, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [12:8] */ + {0x0409, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [7:0] */ + {0x040A, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [12:8] */ + {0x040B, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [7:0] */ + {0x040C, CRL_REG_LEN_08BIT, 0x0F}, /* Width after cropping [12:8] */ + {0x040D, CRL_REG_LEN_08BIT, 0xD8}, /* Width after cropping [7:0] */ + {0x040E, CRL_REG_LEN_08BIT, 0x0B}, /* Height after cropping [12:8] */ + {0x040F, CRL_REG_LEN_08BIT, 0xE0}, /* Height after cropping [7:0] */ + + /* Output Crop */ + {0x034C, CRL_REG_LEN_08BIT, 0x0F}, /* X output size [12:8] */ + {0x034D, CRL_REG_LEN_08BIT, 0xD8}, /* X output size [7:0] */ + {0x034E, CRL_REG_LEN_08BIT, 0x0B}, /* Y output size [12:8] */ + {0x034F, CRL_REG_LEN_08BIT, 0xE0}, /* Y output size [7:0] */ +}; + +static struct crl_mode_rep imx477_modes_slave[] = { + { + .sd_rects_items = ARRAY_SIZE(imx477_4056_3040_rects), + .sd_rects = imx477_4056_3040_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 4056, + .height = 3040, + .min_llp = 14612, + .min_fll = 8209, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx477_4056_3040_19MHZ_slave), + .mode_regs = imx477_4056_3040_19MHZ_slave, + }, +}; + +static struct crl_flip_data imx477_flip_configurations_slave[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + } +}; + +struct crl_sensor_configuration imx477_slave_crl_configuration = { + + .power_items = ARRAY_SIZE(imx477_power_items), + .power_entities = imx477_power_items, + + .onetime_init_regs_items = ARRAY_SIZE(imx477_onetime_init_regset_slave), + .onetime_init_regs = imx477_onetime_init_regset_slave, + + .powerup_regs_items = ARRAY_SIZE(imx477_powerup_standby), + .powerup_regs = imx477_powerup_standby, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + .id_reg_items = ARRAY_SIZE(imx477_sensor_detect_regset), + .id_regs = imx477_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(imx477_sensor_subdevs), + .subdevs = imx477_sensor_subdevs, + + .sensor_limits = &imx477_sensor_limits, + + .pll_config_items = ARRAY_SIZE(imx477_pll_configurations), + .pll_configs = imx477_pll_configurations, + + .modes_items = ARRAY_SIZE(imx477_modes_slave), + .modes = imx477_modes_slave, + + .streamon_regs_items = ARRAY_SIZE(imx477_streamon_regs), + .streamon_regs = imx477_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(imx477_streamoff_regs), + .streamoff_regs = imx477_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(imx477_v4l2_ctrls), + .v4l2_ctrl_bank = imx477_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(imx477_crl_csi_data_fmt), + .csi_fmts = imx477_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(imx477_flip_configurations_slave), + .flip_data = imx477_flip_configurations_slave, + + .frame_desc_entries = ARRAY_SIZE(imx477_frame_desc), + .frame_desc_type = CRL_V4L2_MBUS_FRAME_DESC_TYPE_CSI2, + .frame_desc = imx477_frame_desc, +}; + +#endif /* __CRLMODULE_IMX477_SLAVE_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_magna_configuration.h b/drivers/media/i2c/crlmodule/crl_magna_configuration.h new file mode 100644 index 0000000000000..cd1e316a2cabf --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_magna_configuration.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2017 - 2018 Intel Corporation + * + * Author: Kishore Bodke + * + */ + +#ifndef __CRLMODULE_MAGNA_CONFIGURATION_H_ +#define __CRLMODULE_MAGNA_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +static struct crl_pll_configuration magna_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 16, + .pixel_rate_csi = 529000000, + .pixel_rate_pa = 529000000, /* pixel_rate = MIPICLK*2 *4/12 */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 10, + .pixel_rate_csi = 529000000, + .pixel_rate_pa = 529000000, /* pixel_rate = MIPICLK*2 *4/12 */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 20, + .pixel_rate_csi = 529000000, + .pixel_rate_pa = 529000000, /* pixel_rate = MIPICLK*2 *4/12 */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + } +}; + +static struct crl_subdev_rect_rep magna_1280_720_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 720, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 720, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, +}; + +static struct crl_mode_rep magna_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(magna_1280_720_rects), + .sd_rects = magna_1280_720_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 720, + .min_llp = 2250, + .min_fll = 1320, + }, +}; + +static struct crl_sensor_subdev_config magna_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "magna binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "magna pixel array", + } +}; + +static struct crl_sensor_limits magna_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1280, + .y_addr_max = 720, + .min_frame_length_lines = 240, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 320, + .max_line_length_pixels = 32752, +}; + +static struct crl_csi_data_fmt magna_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_YUYV8_1X16, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + .bits_per_pixel = 16, + }, + { + .code = MEDIA_BUS_FMT_UYVY8_1X16, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + .bits_per_pixel = 16, + }, +}; + +static struct crl_v4l2_ctrl magna_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +struct crl_sensor_configuration magna_crl_configuration = { + + .subdev_items = ARRAY_SIZE(magna_sensor_subdevs), + .subdevs = magna_sensor_subdevs, + + .pll_config_items = ARRAY_SIZE(magna_pll_configurations), + .pll_configs = magna_pll_configurations, + + .sensor_limits = &magna_sensor_limits, + + .modes_items = ARRAY_SIZE(magna_modes), + .modes = magna_modes, + + .streamon_regs_items = 0, + .streamon_regs = 0, + + .streamoff_regs_items = 0, + .streamoff_regs = 0, + + .v4l2_ctrls_items = ARRAY_SIZE(magna_v4l2_ctrls), + .v4l2_ctrl_bank = magna_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(magna_crl_csi_data_fmt), + .csi_fmts = magna_crl_csi_data_fmt, + +}; + +#endif /* __CRLMODULE_MAGNA_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_ov10635_configuration.h b/drivers/media/i2c/crlmodule/crl_ov10635_configuration.h new file mode 100644 index 0000000000000..0f81f7de90e61 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_ov10635_configuration.h @@ -0,0 +1,6368 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2016 - 2018 Intel Corporation + * + * Author: Yunliang Ding + * + */ + +#ifndef __CRLMODULE_OV10635_CONFIGURATION_H_ +#define __CRLMODULE_OV10635_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +#define OV10635_REG_RESET 0x0103 + +static struct crl_register_write_rep ov10635_1280_800_YUV_HDR[] = { + {0x301b, CRL_REG_LEN_08BIT, 0xff}, + {0x301c, CRL_REG_LEN_08BIT, 0xff}, + {0x301a, CRL_REG_LEN_08BIT, 0xff}, + {0x3011, CRL_REG_LEN_08BIT, 0x42}, + {0x6900, CRL_REG_LEN_08BIT, 0x0c}, + {0x6901, CRL_REG_LEN_08BIT, 0x11}, + {0x3503, CRL_REG_LEN_08BIT, 0x10}, + {0x3025, CRL_REG_LEN_08BIT, 0x03}, + {0x3003, CRL_REG_LEN_08BIT, 0x20}, + {0x3004, CRL_REG_LEN_08BIT, 0x21}, + {0x3005, CRL_REG_LEN_08BIT, 0x20}, + {0x3006, CRL_REG_LEN_08BIT, 0x91}, + {0x3600, CRL_REG_LEN_08BIT, 0x74}, + {0x3601, CRL_REG_LEN_08BIT, 0x2b}, + {0x3612, CRL_REG_LEN_08BIT, 0x00}, + {0x3611, CRL_REG_LEN_08BIT, 0x67}, + {0x3633, CRL_REG_LEN_08BIT, 0xca}, + {0x3602, CRL_REG_LEN_08BIT, 0x2f}, + {0x3603, CRL_REG_LEN_08BIT, 0x00}, + {0x3630, CRL_REG_LEN_08BIT, 0x28}, + {0x3631, CRL_REG_LEN_08BIT, 0x16}, + {0x3714, CRL_REG_LEN_08BIT, 0x10}, + {0x371d, CRL_REG_LEN_08BIT, 0x01}, + {0x3007, CRL_REG_LEN_08BIT, 0x01}, + {0x3024, CRL_REG_LEN_08BIT, 0x01}, + {0x3020, CRL_REG_LEN_08BIT, 0x0b}, + {0x3702, CRL_REG_LEN_08BIT, 0x20}, + {0x3703, CRL_REG_LEN_08BIT, 0x48}, + {0x3704, CRL_REG_LEN_08BIT, 0x32}, + {0x3709, CRL_REG_LEN_08BIT, 0xa8}, + {0x3709, CRL_REG_LEN_08BIT, 0xa8}, + {0x370c, CRL_REG_LEN_08BIT, 0xc7}, + {0x370d, CRL_REG_LEN_08BIT, 0x80}, + {0x3712, CRL_REG_LEN_08BIT, 0x00}, + {0x3713, CRL_REG_LEN_08BIT, 0x20}, + {0x3715, CRL_REG_LEN_08BIT, 0x04}, + {0x381d, CRL_REG_LEN_08BIT, 0x40}, + {0x381c, CRL_REG_LEN_08BIT, 0x00}, + {0x3822, CRL_REG_LEN_08BIT, 0x50}, + {0x3824, CRL_REG_LEN_08BIT, 0x50}, + {0x3815, CRL_REG_LEN_08BIT, 0x8c}, + {0x3804, CRL_REG_LEN_08BIT, 0x05}, + {0x3805, CRL_REG_LEN_08BIT, 0x1f}, + {0x3800, CRL_REG_LEN_08BIT, 0x00}, + {0x3801, CRL_REG_LEN_08BIT, 0x00}, + {0x3806, CRL_REG_LEN_08BIT, 0x03}, + {0x3807, CRL_REG_LEN_08BIT, 0x29}, + {0x3802, CRL_REG_LEN_08BIT, 0x00}, + {0x3803, CRL_REG_LEN_08BIT, 0x04}, + {0x3808, CRL_REG_LEN_08BIT, 0x05}, + {0x3809, CRL_REG_LEN_08BIT, 0x00}, + {0x380a, CRL_REG_LEN_08BIT, 0x03}, + {0x380b, CRL_REG_LEN_08BIT, 0x20}, + {0x380c, CRL_REG_LEN_08BIT, 0x07}, + {0x380d, CRL_REG_LEN_08BIT, 0x71}, + {0x6e42, CRL_REG_LEN_08BIT, 0x03}, + {0x6e43, CRL_REG_LEN_08BIT, 0x48}, + {0x380e, CRL_REG_LEN_08BIT, 0x03}, + {0x380f, CRL_REG_LEN_08BIT, 0x48}, + {0x3813, CRL_REG_LEN_08BIT, 0x02}, + {0x3811, CRL_REG_LEN_08BIT, 0x10}, + {0x381f, CRL_REG_LEN_08BIT, 0x0c}, + {0x3828, CRL_REG_LEN_08BIT, 0x03}, + {0x3829, CRL_REG_LEN_08BIT, 0x10}, + {0x382a, CRL_REG_LEN_08BIT, 0x10}, + {0x382b, CRL_REG_LEN_08BIT, 0x10}, + {0x3621, CRL_REG_LEN_08BIT, 0x64}, + {0x5005, CRL_REG_LEN_08BIT, 0x08}, + {0x56d5, CRL_REG_LEN_08BIT, 0x00}, + {0x56d6, CRL_REG_LEN_08BIT, 0x80}, + {0x56d7, CRL_REG_LEN_08BIT, 0x00}, + {0x56d8, CRL_REG_LEN_08BIT, 0x00}, + {0x56d9, CRL_REG_LEN_08BIT, 0x00}, + {0x56da, CRL_REG_LEN_08BIT, 0x80}, + {0x56db, CRL_REG_LEN_08BIT, 0x00}, + {0x56dc, CRL_REG_LEN_08BIT, 0x00}, + {0x56e8, CRL_REG_LEN_08BIT, 0x00}, + {0x56e9, CRL_REG_LEN_08BIT, 0x7f}, + {0x56ea, CRL_REG_LEN_08BIT, 0x00}, + {0x56eb, CRL_REG_LEN_08BIT, 0x7f}, + {0x5100, CRL_REG_LEN_08BIT, 0x00}, + {0x5101, CRL_REG_LEN_08BIT, 0x80}, + {0x5102, CRL_REG_LEN_08BIT, 0x00}, + {0x5103, CRL_REG_LEN_08BIT, 0x80}, + {0x5104, CRL_REG_LEN_08BIT, 0x00}, + {0x5105, CRL_REG_LEN_08BIT, 0x80}, + {0x5106, CRL_REG_LEN_08BIT, 0x00}, + {0x5107, CRL_REG_LEN_08BIT, 0x80}, + {0x5108, CRL_REG_LEN_08BIT, 0x00}, + {0x5109, CRL_REG_LEN_08BIT, 0x00}, + {0x510a, CRL_REG_LEN_08BIT, 0x00}, + {0x510b, CRL_REG_LEN_08BIT, 0x00}, + {0x510c, CRL_REG_LEN_08BIT, 0x00}, + {0x510d, CRL_REG_LEN_08BIT, 0x00}, + {0x510e, CRL_REG_LEN_08BIT, 0x00}, + {0x510f, CRL_REG_LEN_08BIT, 0x00}, + {0x5110, CRL_REG_LEN_08BIT, 0x00}, + {0x5111, CRL_REG_LEN_08BIT, 0x80}, + {0x5112, CRL_REG_LEN_08BIT, 0x00}, + {0x5113, CRL_REG_LEN_08BIT, 0x80}, + {0x5114, CRL_REG_LEN_08BIT, 0x00}, + {0x5115, CRL_REG_LEN_08BIT, 0x80}, + {0x5116, CRL_REG_LEN_08BIT, 0x00}, + {0x5117, CRL_REG_LEN_08BIT, 0x80}, + {0x5118, CRL_REG_LEN_08BIT, 0x00}, + {0x5119, CRL_REG_LEN_08BIT, 0x00}, + {0x511a, CRL_REG_LEN_08BIT, 0x00}, + {0x511b, CRL_REG_LEN_08BIT, 0x00}, + {0x511c, CRL_REG_LEN_08BIT, 0x00}, + {0x511d, CRL_REG_LEN_08BIT, 0x00}, + {0x511e, CRL_REG_LEN_08BIT, 0x00}, + {0x511f, CRL_REG_LEN_08BIT, 0x00}, + {0x56d0, CRL_REG_LEN_08BIT, 0x00}, + {0x5006, CRL_REG_LEN_08BIT, 0x24}, + {0x5608, CRL_REG_LEN_08BIT, 0x0d}, + {0x52d7, CRL_REG_LEN_08BIT, 0x06}, + {0x528d, CRL_REG_LEN_08BIT, 0x08}, + {0x5293, CRL_REG_LEN_08BIT, 0x12}, + {0x52d3, CRL_REG_LEN_08BIT, 0x12}, + {0x5288, CRL_REG_LEN_08BIT, 0x06}, + {0x5289, CRL_REG_LEN_08BIT, 0x20}, + {0x52c8, CRL_REG_LEN_08BIT, 0x06}, + {0x52c9, CRL_REG_LEN_08BIT, 0x20}, + {0x52cd, CRL_REG_LEN_08BIT, 0x04}, + {0x5381, CRL_REG_LEN_08BIT, 0x00}, + {0x5382, CRL_REG_LEN_08BIT, 0xff}, + {0x5589, CRL_REG_LEN_08BIT, 0x76}, + {0x558a, CRL_REG_LEN_08BIT, 0x47}, + {0x558b, CRL_REG_LEN_08BIT, 0xef}, + {0x558c, CRL_REG_LEN_08BIT, 0xc9}, + {0x558d, CRL_REG_LEN_08BIT, 0x49}, + {0x558e, CRL_REG_LEN_08BIT, 0x30}, + {0x558f, CRL_REG_LEN_08BIT, 0x67}, + {0x5590, CRL_REG_LEN_08BIT, 0x3f}, + {0x5591, CRL_REG_LEN_08BIT, 0xf0}, + {0x5592, CRL_REG_LEN_08BIT, 0x10}, + {0x55a2, CRL_REG_LEN_08BIT, 0x6d}, + {0x55a3, CRL_REG_LEN_08BIT, 0x55}, + {0x55a4, CRL_REG_LEN_08BIT, 0xc3}, + {0x55a5, CRL_REG_LEN_08BIT, 0xb5}, + {0x55a6, CRL_REG_LEN_08BIT, 0x43}, + {0x55a7, CRL_REG_LEN_08BIT, 0x38}, + {0x55a8, CRL_REG_LEN_08BIT, 0x5f}, + {0x55a9, CRL_REG_LEN_08BIT, 0x4b}, + {0x55aa, CRL_REG_LEN_08BIT, 0xf0}, + {0x55ab, CRL_REG_LEN_08BIT, 0x10}, + {0x5581, CRL_REG_LEN_08BIT, 0x52}, + {0x5300, CRL_REG_LEN_08BIT, 0x01}, + {0x5301, CRL_REG_LEN_08BIT, 0x00}, + {0x5302, CRL_REG_LEN_08BIT, 0x00}, + {0x5303, CRL_REG_LEN_08BIT, 0x0e}, + {0x5304, CRL_REG_LEN_08BIT, 0x00}, + {0x5305, CRL_REG_LEN_08BIT, 0x0e}, + {0x5306, CRL_REG_LEN_08BIT, 0x00}, + {0x5307, CRL_REG_LEN_08BIT, 0x36}, + {0x5308, CRL_REG_LEN_08BIT, 0x00}, + {0x5309, CRL_REG_LEN_08BIT, 0xd9}, + {0x530a, CRL_REG_LEN_08BIT, 0x00}, + {0x530b, CRL_REG_LEN_08BIT, 0x0f}, + {0x530c, CRL_REG_LEN_08BIT, 0x00}, + {0x530d, CRL_REG_LEN_08BIT, 0x2c}, + {0x530e, CRL_REG_LEN_08BIT, 0x00}, + {0x530f, CRL_REG_LEN_08BIT, 0x59}, + {0x5310, CRL_REG_LEN_08BIT, 0x00}, + {0x5311, CRL_REG_LEN_08BIT, 0x7b}, + {0x5312, CRL_REG_LEN_08BIT, 0x00}, + {0x5313, CRL_REG_LEN_08BIT, 0x22}, + {0x5314, CRL_REG_LEN_08BIT, 0x00}, + {0x5315, CRL_REG_LEN_08BIT, 0xd5}, + {0x5316, CRL_REG_LEN_08BIT, 0x00}, + {0x5317, CRL_REG_LEN_08BIT, 0x13}, + {0x5318, CRL_REG_LEN_08BIT, 0x00}, + {0x5319, CRL_REG_LEN_08BIT, 0x18}, + {0x531a, CRL_REG_LEN_08BIT, 0x00}, + {0x531b, CRL_REG_LEN_08BIT, 0x26}, + {0x531c, CRL_REG_LEN_08BIT, 0x00}, + {0x531d, CRL_REG_LEN_08BIT, 0xdc}, + {0x531e, CRL_REG_LEN_08BIT, 0x00}, + {0x531f, CRL_REG_LEN_08BIT, 0x02}, + {0x5320, CRL_REG_LEN_08BIT, 0x00}, + {0x5321, CRL_REG_LEN_08BIT, 0x24}, + {0x5322, CRL_REG_LEN_08BIT, 0x00}, + {0x5323, CRL_REG_LEN_08BIT, 0x56}, + {0x5324, CRL_REG_LEN_08BIT, 0x00}, + {0x5325, CRL_REG_LEN_08BIT, 0x85}, + {0x5326, CRL_REG_LEN_08BIT, 0x00}, + {0x5327, CRL_REG_LEN_08BIT, 0x20}, + {0x5609, CRL_REG_LEN_08BIT, 0x01}, + {0x560a, CRL_REG_LEN_08BIT, 0x40}, + {0x560b, CRL_REG_LEN_08BIT, 0x01}, + {0x560c, CRL_REG_LEN_08BIT, 0x40}, + {0x560d, CRL_REG_LEN_08BIT, 0x00}, + {0x560e, CRL_REG_LEN_08BIT, 0xfa}, + {0x560f, CRL_REG_LEN_08BIT, 0x00}, + {0x5610, CRL_REG_LEN_08BIT, 0xfa}, + {0x5611, CRL_REG_LEN_08BIT, 0x02}, + {0x5612, CRL_REG_LEN_08BIT, 0x80}, + {0x5613, CRL_REG_LEN_08BIT, 0x02}, + {0x5614, CRL_REG_LEN_08BIT, 0x80}, + {0x5615, CRL_REG_LEN_08BIT, 0x01}, + {0x5616, CRL_REG_LEN_08BIT, 0x2c}, + {0x5617, CRL_REG_LEN_08BIT, 0x01}, + {0x5618, CRL_REG_LEN_08BIT, 0x2c}, + {0x563b, CRL_REG_LEN_08BIT, 0x01}, + {0x563c, CRL_REG_LEN_08BIT, 0x01}, + {0x563d, CRL_REG_LEN_08BIT, 0x01}, + {0x563e, CRL_REG_LEN_08BIT, 0x01}, + {0x563f, CRL_REG_LEN_08BIT, 0x03}, + {0x5640, CRL_REG_LEN_08BIT, 0x03}, + {0x5641, CRL_REG_LEN_08BIT, 0x03}, + {0x5642, CRL_REG_LEN_08BIT, 0x05}, + {0x5643, CRL_REG_LEN_08BIT, 0x09}, + {0x5644, CRL_REG_LEN_08BIT, 0x05}, + {0x5645, CRL_REG_LEN_08BIT, 0x05}, + {0x5646, CRL_REG_LEN_08BIT, 0x05}, + {0x5647, CRL_REG_LEN_08BIT, 0x05}, + {0x5651, CRL_REG_LEN_08BIT, 0x00}, + {0x5652, CRL_REG_LEN_08BIT, 0x80}, + {0x521a, CRL_REG_LEN_08BIT, 0x01}, + {0x521b, CRL_REG_LEN_08BIT, 0x03}, + {0x521c, CRL_REG_LEN_08BIT, 0x06}, + {0x521d, CRL_REG_LEN_08BIT, 0x0a}, + {0x521e, CRL_REG_LEN_08BIT, 0x0e}, + {0x521f, CRL_REG_LEN_08BIT, 0x12}, + {0x5220, CRL_REG_LEN_08BIT, 0x16}, + {0x5223, CRL_REG_LEN_08BIT, 0x02}, + {0x5225, CRL_REG_LEN_08BIT, 0x04}, + {0x5227, CRL_REG_LEN_08BIT, 0x08}, + {0x5229, CRL_REG_LEN_08BIT, 0x0c}, + {0x522b, CRL_REG_LEN_08BIT, 0x12}, + {0x522d, CRL_REG_LEN_08BIT, 0x18}, + {0x522f, CRL_REG_LEN_08BIT, 0x1e}, + {0x5241, CRL_REG_LEN_08BIT, 0x04}, + {0x5242, CRL_REG_LEN_08BIT, 0x01}, + {0x5243, CRL_REG_LEN_08BIT, 0x03}, + {0x5244, CRL_REG_LEN_08BIT, 0x06}, + {0x5245, CRL_REG_LEN_08BIT, 0x0a}, + {0x5246, CRL_REG_LEN_08BIT, 0x0e}, + {0x5247, CRL_REG_LEN_08BIT, 0x12}, + {0x5248, CRL_REG_LEN_08BIT, 0x16}, + {0x524a, CRL_REG_LEN_08BIT, 0x03}, + {0x524c, CRL_REG_LEN_08BIT, 0x04}, + {0x524e, CRL_REG_LEN_08BIT, 0x08}, + {0x5250, CRL_REG_LEN_08BIT, 0x0c}, + {0x5252, CRL_REG_LEN_08BIT, 0x12}, + {0x5254, CRL_REG_LEN_08BIT, 0x18}, + {0x5256, CRL_REG_LEN_08BIT, 0x1e}, + {0x4606, CRL_REG_LEN_08BIT, 0x07}, + {0x4607, CRL_REG_LEN_08BIT, 0x71}, + {0x460a, CRL_REG_LEN_08BIT, 0x02}, + {0x460b, CRL_REG_LEN_08BIT, 0x70}, + {0x460c, CRL_REG_LEN_08BIT, 0x00}, + {0x4620, CRL_REG_LEN_08BIT, 0x0e}, + {0x4700, CRL_REG_LEN_08BIT, 0x04}, + {0x4701, CRL_REG_LEN_08BIT, 0x00}, + {0x4702, CRL_REG_LEN_08BIT, 0x01}, + {0x4004, CRL_REG_LEN_08BIT, 0x04}, + {0x4005, CRL_REG_LEN_08BIT, 0x18}, + {0x4001, CRL_REG_LEN_08BIT, 0x06}, + {0x4050, CRL_REG_LEN_08BIT, 0x22}, + {0x4051, CRL_REG_LEN_08BIT, 0x24}, + {0x4052, CRL_REG_LEN_08BIT, 0x02}, + {0x4057, CRL_REG_LEN_08BIT, 0x9c}, + {0x405a, CRL_REG_LEN_08BIT, 0x00}, + {0x3832, CRL_REG_LEN_08BIT, 0x00}, + {0x3833, CRL_REG_LEN_08BIT, 0x02}, + {0x3834, CRL_REG_LEN_08BIT, 0x03}, + {0x3835, CRL_REG_LEN_08BIT, 0x48}, + {0x302e, CRL_REG_LEN_08BIT, 0x00}, + {0x4202, CRL_REG_LEN_08BIT, 0x02}, + {0x3023, CRL_REG_LEN_08BIT, 0x10}, + {0x0100, CRL_REG_LEN_08BIT, 0x01}, + {0x0100, CRL_REG_LEN_08BIT, 0x01}, + {0x6f10, CRL_REG_LEN_08BIT, 0x07}, + {0x6f11, CRL_REG_LEN_08BIT, 0x82}, + {0x6f12, CRL_REG_LEN_08BIT, 0x04}, + {0x6f13, CRL_REG_LEN_08BIT, 0x00}, + {0x6f14, CRL_REG_LEN_08BIT, 0x1f}, + {0x6f15, CRL_REG_LEN_08BIT, 0xdd}, + {0x6f16, CRL_REG_LEN_08BIT, 0x04}, + {0x6f17, CRL_REG_LEN_08BIT, 0x04}, + {0x6f18, CRL_REG_LEN_08BIT, 0x36}, + {0x6f19, CRL_REG_LEN_08BIT, 0x66}, + {0x6f1a, CRL_REG_LEN_08BIT, 0x04}, + {0x6f1b, CRL_REG_LEN_08BIT, 0x08}, + {0x6f1c, CRL_REG_LEN_08BIT, 0x0c}, + {0x6f1d, CRL_REG_LEN_08BIT, 0xe7}, + {0x6f1e, CRL_REG_LEN_08BIT, 0x04}, + {0x6f1f, CRL_REG_LEN_08BIT, 0x0c}, + {0xd000, CRL_REG_LEN_08BIT, 0x19}, + {0xd001, CRL_REG_LEN_08BIT, 0xa0}, + {0xd002, CRL_REG_LEN_08BIT, 0x00}, + {0xd003, CRL_REG_LEN_08BIT, 0x01}, + {0xd004, CRL_REG_LEN_08BIT, 0xa9}, + {0xd005, CRL_REG_LEN_08BIT, 0xad}, + {0xd006, CRL_REG_LEN_08BIT, 0x10}, + {0xd007, CRL_REG_LEN_08BIT, 0x40}, + {0xd008, CRL_REG_LEN_08BIT, 0x44}, + {0xd009, CRL_REG_LEN_08BIT, 0x00}, + {0xd00a, CRL_REG_LEN_08BIT, 0x68}, + {0xd00b, CRL_REG_LEN_08BIT, 0x00}, + {0xd00c, CRL_REG_LEN_08BIT, 0x15}, + {0xd00d, CRL_REG_LEN_08BIT, 0x00}, + {0xd00e, CRL_REG_LEN_08BIT, 0x00}, + {0xd00f, CRL_REG_LEN_08BIT, 0x00}, + {0xd010, CRL_REG_LEN_08BIT, 0x19}, + {0xd011, CRL_REG_LEN_08BIT, 0xa0}, + {0xd012, CRL_REG_LEN_08BIT, 0x00}, + {0xd013, CRL_REG_LEN_08BIT, 0x01}, + {0xd014, CRL_REG_LEN_08BIT, 0xa9}, + {0xd015, CRL_REG_LEN_08BIT, 0xad}, + {0xd016, CRL_REG_LEN_08BIT, 0x13}, + {0xd017, CRL_REG_LEN_08BIT, 0xd0}, + {0xd018, CRL_REG_LEN_08BIT, 0x44}, + {0xd019, CRL_REG_LEN_08BIT, 0x00}, + {0xd01a, CRL_REG_LEN_08BIT, 0x68}, + {0xd01b, CRL_REG_LEN_08BIT, 0x00}, + {0xd01c, CRL_REG_LEN_08BIT, 0x15}, + {0xd01d, CRL_REG_LEN_08BIT, 0x00}, + {0xd01e, CRL_REG_LEN_08BIT, 0x00}, + {0xd01f, CRL_REG_LEN_08BIT, 0x00}, + {0xd020, CRL_REG_LEN_08BIT, 0x19}, + {0xd021, CRL_REG_LEN_08BIT, 0xa0}, + {0xd022, CRL_REG_LEN_08BIT, 0x00}, + {0xd023, CRL_REG_LEN_08BIT, 0x01}, + {0xd024, CRL_REG_LEN_08BIT, 0xa9}, + {0xd025, CRL_REG_LEN_08BIT, 0xad}, + {0xd026, CRL_REG_LEN_08BIT, 0x14}, + {0xd027, CRL_REG_LEN_08BIT, 0xb8}, + {0xd028, CRL_REG_LEN_08BIT, 0x44}, + {0xd029, CRL_REG_LEN_08BIT, 0x00}, + {0xd02a, CRL_REG_LEN_08BIT, 0x68}, + {0xd02b, CRL_REG_LEN_08BIT, 0x00}, + {0xd02c, CRL_REG_LEN_08BIT, 0x15}, + {0xd02d, CRL_REG_LEN_08BIT, 0x00}, + {0xd02e, CRL_REG_LEN_08BIT, 0x00}, + {0xd02f, CRL_REG_LEN_08BIT, 0x00}, + {0xd030, CRL_REG_LEN_08BIT, 0x19}, + {0xd031, CRL_REG_LEN_08BIT, 0xa0}, + {0xd032, CRL_REG_LEN_08BIT, 0x00}, + {0xd033, CRL_REG_LEN_08BIT, 0x01}, + {0xd034, CRL_REG_LEN_08BIT, 0xa9}, + {0xd035, CRL_REG_LEN_08BIT, 0xad}, + {0xd036, CRL_REG_LEN_08BIT, 0x14}, + {0xd037, CRL_REG_LEN_08BIT, 0xdc}, + {0xd038, CRL_REG_LEN_08BIT, 0x44}, + {0xd039, CRL_REG_LEN_08BIT, 0x00}, + {0xd03a, CRL_REG_LEN_08BIT, 0x68}, + {0xd03b, CRL_REG_LEN_08BIT, 0x00}, + {0xd03c, CRL_REG_LEN_08BIT, 0x15}, + {0xd03d, CRL_REG_LEN_08BIT, 0x00}, + {0xd03e, CRL_REG_LEN_08BIT, 0x00}, + {0xd03f, CRL_REG_LEN_08BIT, 0x00}, + {0xd040, CRL_REG_LEN_08BIT, 0x9c}, + {0xd041, CRL_REG_LEN_08BIT, 0x21}, + {0xd042, CRL_REG_LEN_08BIT, 0xff}, + {0xd043, CRL_REG_LEN_08BIT, 0xe4}, + {0xd044, CRL_REG_LEN_08BIT, 0xd4}, + {0xd045, CRL_REG_LEN_08BIT, 0x01}, + {0xd046, CRL_REG_LEN_08BIT, 0x48}, + {0xd047, CRL_REG_LEN_08BIT, 0x00}, + {0xd048, CRL_REG_LEN_08BIT, 0xd4}, + {0xd049, CRL_REG_LEN_08BIT, 0x01}, + {0xd04a, CRL_REG_LEN_08BIT, 0x50}, + {0xd04b, CRL_REG_LEN_08BIT, 0x04}, + {0xd04c, CRL_REG_LEN_08BIT, 0xd4}, + {0xd04d, CRL_REG_LEN_08BIT, 0x01}, + {0xd04e, CRL_REG_LEN_08BIT, 0x60}, + {0xd04f, CRL_REG_LEN_08BIT, 0x08}, + {0xd050, CRL_REG_LEN_08BIT, 0xd4}, + {0xd051, CRL_REG_LEN_08BIT, 0x01}, + {0xd052, CRL_REG_LEN_08BIT, 0x70}, + {0xd053, CRL_REG_LEN_08BIT, 0x0c}, + {0xd054, CRL_REG_LEN_08BIT, 0xd4}, + {0xd055, CRL_REG_LEN_08BIT, 0x01}, + {0xd056, CRL_REG_LEN_08BIT, 0x80}, + {0xd057, CRL_REG_LEN_08BIT, 0x10}, + {0xd058, CRL_REG_LEN_08BIT, 0x19}, + {0xd059, CRL_REG_LEN_08BIT, 0xc0}, + {0xd05a, CRL_REG_LEN_08BIT, 0x00}, + {0xd05b, CRL_REG_LEN_08BIT, 0x01}, + {0xd05c, CRL_REG_LEN_08BIT, 0xa9}, + {0xd05d, CRL_REG_LEN_08BIT, 0xce}, + {0xd05e, CRL_REG_LEN_08BIT, 0x02}, + {0xd05f, CRL_REG_LEN_08BIT, 0xa4}, + {0xd060, CRL_REG_LEN_08BIT, 0x9c}, + {0xd061, CRL_REG_LEN_08BIT, 0xa0}, + {0xd062, CRL_REG_LEN_08BIT, 0x00}, + {0xd063, CRL_REG_LEN_08BIT, 0x00}, + {0xd064, CRL_REG_LEN_08BIT, 0x84}, + {0xd065, CRL_REG_LEN_08BIT, 0x6e}, + {0xd066, CRL_REG_LEN_08BIT, 0x00}, + {0xd067, CRL_REG_LEN_08BIT, 0x00}, + {0xd068, CRL_REG_LEN_08BIT, 0xd8}, + {0xd069, CRL_REG_LEN_08BIT, 0x03}, + {0xd06a, CRL_REG_LEN_08BIT, 0x28}, + {0xd06b, CRL_REG_LEN_08BIT, 0x76}, + {0xd06c, CRL_REG_LEN_08BIT, 0x1a}, + {0xd06d, CRL_REG_LEN_08BIT, 0x00}, + {0xd06e, CRL_REG_LEN_08BIT, 0x00}, + {0xd06f, CRL_REG_LEN_08BIT, 0x01}, + {0xd070, CRL_REG_LEN_08BIT, 0xaa}, + {0xd071, CRL_REG_LEN_08BIT, 0x10}, + {0xd072, CRL_REG_LEN_08BIT, 0x03}, + {0xd073, CRL_REG_LEN_08BIT, 0xf0}, + {0xd074, CRL_REG_LEN_08BIT, 0x18}, + {0xd075, CRL_REG_LEN_08BIT, 0x60}, + {0xd076, CRL_REG_LEN_08BIT, 0x00}, + {0xd077, CRL_REG_LEN_08BIT, 0x01}, + {0xd078, CRL_REG_LEN_08BIT, 0xa8}, + {0xd079, CRL_REG_LEN_08BIT, 0x63}, + {0xd07a, CRL_REG_LEN_08BIT, 0x07}, + {0xd07b, CRL_REG_LEN_08BIT, 0x80}, + {0xd07c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd07d, CRL_REG_LEN_08BIT, 0xa0}, + {0xd07e, CRL_REG_LEN_08BIT, 0x00}, + {0xd07f, CRL_REG_LEN_08BIT, 0x04}, + {0xd080, CRL_REG_LEN_08BIT, 0x18}, + {0xd081, CRL_REG_LEN_08BIT, 0xc0}, + {0xd082, CRL_REG_LEN_08BIT, 0x00}, + {0xd083, CRL_REG_LEN_08BIT, 0x00}, + {0xd084, CRL_REG_LEN_08BIT, 0xa8}, + {0xd085, CRL_REG_LEN_08BIT, 0xc6}, + {0xd086, CRL_REG_LEN_08BIT, 0x00}, + {0xd087, CRL_REG_LEN_08BIT, 0x00}, + {0xd088, CRL_REG_LEN_08BIT, 0x8c}, + {0xd089, CRL_REG_LEN_08BIT, 0x63}, + {0xd08a, CRL_REG_LEN_08BIT, 0x00}, + {0xd08b, CRL_REG_LEN_08BIT, 0x00}, + {0xd08c, CRL_REG_LEN_08BIT, 0xd4}, + {0xd08d, CRL_REG_LEN_08BIT, 0x01}, + {0xd08e, CRL_REG_LEN_08BIT, 0x28}, + {0xd08f, CRL_REG_LEN_08BIT, 0x14}, + {0xd090, CRL_REG_LEN_08BIT, 0xd4}, + {0xd091, CRL_REG_LEN_08BIT, 0x01}, + {0xd092, CRL_REG_LEN_08BIT, 0x30}, + {0xd093, CRL_REG_LEN_08BIT, 0x18}, + {0xd094, CRL_REG_LEN_08BIT, 0x07}, + {0xd095, CRL_REG_LEN_08BIT, 0xff}, + {0xd096, CRL_REG_LEN_08BIT, 0xf8}, + {0xd097, CRL_REG_LEN_08BIT, 0xfd}, + {0xd098, CRL_REG_LEN_08BIT, 0x9c}, + {0xd099, CRL_REG_LEN_08BIT, 0x80}, + {0xd09a, CRL_REG_LEN_08BIT, 0x00}, + {0xd09b, CRL_REG_LEN_08BIT, 0x03}, + {0xd09c, CRL_REG_LEN_08BIT, 0xa5}, + {0xd09d, CRL_REG_LEN_08BIT, 0x6b}, + {0xd09e, CRL_REG_LEN_08BIT, 0x00}, + {0xd09f, CRL_REG_LEN_08BIT, 0xff}, + {0xd0a0, CRL_REG_LEN_08BIT, 0x18}, + {0xd0a1, CRL_REG_LEN_08BIT, 0xc0}, + {0xd0a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd0a3, CRL_REG_LEN_08BIT, 0x01}, + {0xd0a4, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0a5, CRL_REG_LEN_08BIT, 0xc6}, + {0xd0a6, CRL_REG_LEN_08BIT, 0x01}, + {0xd0a7, CRL_REG_LEN_08BIT, 0x02}, + {0xd0a8, CRL_REG_LEN_08BIT, 0xe1}, + {0xd0a9, CRL_REG_LEN_08BIT, 0x6b}, + {0xd0aa, CRL_REG_LEN_08BIT, 0x58}, + {0xd0ab, CRL_REG_LEN_08BIT, 0x00}, + {0xd0ac, CRL_REG_LEN_08BIT, 0x84}, + {0xd0ad, CRL_REG_LEN_08BIT, 0x8e}, + {0xd0ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd0af, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b0, CRL_REG_LEN_08BIT, 0xe1}, + {0xd0b1, CRL_REG_LEN_08BIT, 0x6b}, + {0xd0b2, CRL_REG_LEN_08BIT, 0x30}, + {0xd0b3, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b4, CRL_REG_LEN_08BIT, 0x98}, + {0xd0b5, CRL_REG_LEN_08BIT, 0xb0}, + {0xd0b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b8, CRL_REG_LEN_08BIT, 0x8c}, + {0xd0b9, CRL_REG_LEN_08BIT, 0x64}, + {0xd0ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd0bb, CRL_REG_LEN_08BIT, 0x6e}, + {0xd0bc, CRL_REG_LEN_08BIT, 0xe5}, + {0xd0bd, CRL_REG_LEN_08BIT, 0xa5}, + {0xd0be, CRL_REG_LEN_08BIT, 0x18}, + {0xd0bf, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c0, CRL_REG_LEN_08BIT, 0x10}, + {0xd0c1, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c3, CRL_REG_LEN_08BIT, 0x06}, + {0xd0c4, CRL_REG_LEN_08BIT, 0x95}, + {0xd0c5, CRL_REG_LEN_08BIT, 0x8b}, + {0xd0c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c8, CRL_REG_LEN_08BIT, 0x94}, + {0xd0c9, CRL_REG_LEN_08BIT, 0xa4}, + {0xd0ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd0cb, CRL_REG_LEN_08BIT, 0x70}, + {0xd0cc, CRL_REG_LEN_08BIT, 0xe5}, + {0xd0cd, CRL_REG_LEN_08BIT, 0x65}, + {0xd0ce, CRL_REG_LEN_08BIT, 0x60}, + {0xd0cf, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d0, CRL_REG_LEN_08BIT, 0x0c}, + {0xd0d1, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d2, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d3, CRL_REG_LEN_08BIT, 0x62}, + {0xd0d4, CRL_REG_LEN_08BIT, 0x15}, + {0xd0d5, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d8, CRL_REG_LEN_08BIT, 0x18}, + {0xd0d9, CRL_REG_LEN_08BIT, 0x60}, + {0xd0da, CRL_REG_LEN_08BIT, 0x80}, + {0xd0db, CRL_REG_LEN_08BIT, 0x06}, + {0xd0dc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0dd, CRL_REG_LEN_08BIT, 0x83}, + {0xd0de, CRL_REG_LEN_08BIT, 0x38}, + {0xd0df, CRL_REG_LEN_08BIT, 0x29}, + {0xd0e0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0e1, CRL_REG_LEN_08BIT, 0xe3}, + {0xd0e2, CRL_REG_LEN_08BIT, 0x40}, + {0xd0e3, CRL_REG_LEN_08BIT, 0x08}, + {0xd0e4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd0e5, CRL_REG_LEN_08BIT, 0x84}, + {0xd0e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0e8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0e9, CRL_REG_LEN_08BIT, 0xa3}, + {0xd0ea, CRL_REG_LEN_08BIT, 0x40}, + {0xd0eb, CRL_REG_LEN_08BIT, 0x09}, + {0xd0ec, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0ed, CRL_REG_LEN_08BIT, 0xc3}, + {0xd0ee, CRL_REG_LEN_08BIT, 0x38}, + {0xd0ef, CRL_REG_LEN_08BIT, 0x2a}, + {0xd0f0, CRL_REG_LEN_08BIT, 0xd8}, + {0xd0f1, CRL_REG_LEN_08BIT, 0x07}, + {0xd0f2, CRL_REG_LEN_08BIT, 0x20}, + {0xd0f3, CRL_REG_LEN_08BIT, 0x00}, + {0xd0f4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd0f5, CRL_REG_LEN_08BIT, 0x66}, + {0xd0f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0f8, CRL_REG_LEN_08BIT, 0xd8}, + {0xd0f9, CRL_REG_LEN_08BIT, 0x05}, + {0xd0fa, CRL_REG_LEN_08BIT, 0x18}, + {0xd0fb, CRL_REG_LEN_08BIT, 0x00}, + {0xd0fc, CRL_REG_LEN_08BIT, 0x18}, + {0xd0fd, CRL_REG_LEN_08BIT, 0x60}, + {0xd0fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd0ff, CRL_REG_LEN_08BIT, 0x01}, + {0xd100, CRL_REG_LEN_08BIT, 0x98}, + {0xd101, CRL_REG_LEN_08BIT, 0x90}, + {0xd102, CRL_REG_LEN_08BIT, 0x00}, + {0xd103, CRL_REG_LEN_08BIT, 0x00}, + {0xd104, CRL_REG_LEN_08BIT, 0x84}, + {0xd105, CRL_REG_LEN_08BIT, 0xae}, + {0xd106, CRL_REG_LEN_08BIT, 0x00}, + {0xd107, CRL_REG_LEN_08BIT, 0x00}, + {0xd108, CRL_REG_LEN_08BIT, 0xa8}, + {0xd109, CRL_REG_LEN_08BIT, 0x63}, + {0xd10a, CRL_REG_LEN_08BIT, 0x06}, + {0xd10b, CRL_REG_LEN_08BIT, 0x4c}, + {0xd10c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd10d, CRL_REG_LEN_08BIT, 0xc0}, + {0xd10e, CRL_REG_LEN_08BIT, 0x00}, + {0xd10f, CRL_REG_LEN_08BIT, 0x00}, + {0xd110, CRL_REG_LEN_08BIT, 0xd8}, + {0xd111, CRL_REG_LEN_08BIT, 0x03}, + {0xd112, CRL_REG_LEN_08BIT, 0x30}, + {0xd113, CRL_REG_LEN_08BIT, 0x00}, + {0xd114, CRL_REG_LEN_08BIT, 0x8c}, + {0xd115, CRL_REG_LEN_08BIT, 0x65}, + {0xd116, CRL_REG_LEN_08BIT, 0x00}, + {0xd117, CRL_REG_LEN_08BIT, 0x6e}, + {0xd118, CRL_REG_LEN_08BIT, 0xe5}, + {0xd119, CRL_REG_LEN_08BIT, 0x84}, + {0xd11a, CRL_REG_LEN_08BIT, 0x18}, + {0xd11b, CRL_REG_LEN_08BIT, 0x00}, + {0xd11c, CRL_REG_LEN_08BIT, 0x10}, + {0xd11d, CRL_REG_LEN_08BIT, 0x00}, + {0xd11e, CRL_REG_LEN_08BIT, 0x00}, + {0xd11f, CRL_REG_LEN_08BIT, 0x07}, + {0xd120, CRL_REG_LEN_08BIT, 0x18}, + {0xd121, CRL_REG_LEN_08BIT, 0x80}, + {0xd122, CRL_REG_LEN_08BIT, 0x80}, + {0xd123, CRL_REG_LEN_08BIT, 0x06}, + {0xd124, CRL_REG_LEN_08BIT, 0x94}, + {0xd125, CRL_REG_LEN_08BIT, 0x65}, + {0xd126, CRL_REG_LEN_08BIT, 0x00}, + {0xd127, CRL_REG_LEN_08BIT, 0x70}, + {0xd128, CRL_REG_LEN_08BIT, 0xe5}, + {0xd129, CRL_REG_LEN_08BIT, 0x43}, + {0xd12a, CRL_REG_LEN_08BIT, 0x60}, + {0xd12b, CRL_REG_LEN_08BIT, 0x00}, + {0xd12c, CRL_REG_LEN_08BIT, 0x0c}, + {0xd12d, CRL_REG_LEN_08BIT, 0x00}, + {0xd12e, CRL_REG_LEN_08BIT, 0x00}, + {0xd12f, CRL_REG_LEN_08BIT, 0x3e}, + {0xd130, CRL_REG_LEN_08BIT, 0xa8}, + {0xd131, CRL_REG_LEN_08BIT, 0x64}, + {0xd132, CRL_REG_LEN_08BIT, 0x38}, + {0xd133, CRL_REG_LEN_08BIT, 0x24}, + {0xd134, CRL_REG_LEN_08BIT, 0x18}, + {0xd135, CRL_REG_LEN_08BIT, 0x80}, + {0xd136, CRL_REG_LEN_08BIT, 0x80}, + {0xd137, CRL_REG_LEN_08BIT, 0x06}, + {0xd138, CRL_REG_LEN_08BIT, 0xa8}, + {0xd139, CRL_REG_LEN_08BIT, 0x64}, + {0xd13a, CRL_REG_LEN_08BIT, 0x38}, + {0xd13b, CRL_REG_LEN_08BIT, 0x24}, + {0xd13c, CRL_REG_LEN_08BIT, 0x8c}, + {0xd13d, CRL_REG_LEN_08BIT, 0x63}, + {0xd13e, CRL_REG_LEN_08BIT, 0x00}, + {0xd13f, CRL_REG_LEN_08BIT, 0x00}, + {0xd140, CRL_REG_LEN_08BIT, 0xa4}, + {0xd141, CRL_REG_LEN_08BIT, 0x63}, + {0xd142, CRL_REG_LEN_08BIT, 0x00}, + {0xd143, CRL_REG_LEN_08BIT, 0x40}, + {0xd144, CRL_REG_LEN_08BIT, 0xbc}, + {0xd145, CRL_REG_LEN_08BIT, 0x23}, + {0xd146, CRL_REG_LEN_08BIT, 0x00}, + {0xd147, CRL_REG_LEN_08BIT, 0x00}, + {0xd148, CRL_REG_LEN_08BIT, 0x0c}, + {0xd149, CRL_REG_LEN_08BIT, 0x00}, + {0xd14a, CRL_REG_LEN_08BIT, 0x00}, + {0xd14b, CRL_REG_LEN_08BIT, 0x2a}, + {0xd14c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd14d, CRL_REG_LEN_08BIT, 0x64}, + {0xd14e, CRL_REG_LEN_08BIT, 0x6e}, + {0xd14f, CRL_REG_LEN_08BIT, 0x44}, + {0xd150, CRL_REG_LEN_08BIT, 0x19}, + {0xd151, CRL_REG_LEN_08BIT, 0x00}, + {0xd152, CRL_REG_LEN_08BIT, 0x80}, + {0xd153, CRL_REG_LEN_08BIT, 0x06}, + {0xd154, CRL_REG_LEN_08BIT, 0xa8}, + {0xd155, CRL_REG_LEN_08BIT, 0xe8}, + {0xd156, CRL_REG_LEN_08BIT, 0x3d}, + {0xd157, CRL_REG_LEN_08BIT, 0x05}, + {0xd158, CRL_REG_LEN_08BIT, 0x8c}, + {0xd159, CRL_REG_LEN_08BIT, 0x67}, + {0xd15a, CRL_REG_LEN_08BIT, 0x00}, + {0xd15b, CRL_REG_LEN_08BIT, 0x00}, + {0xd15c, CRL_REG_LEN_08BIT, 0xb8}, + {0xd15d, CRL_REG_LEN_08BIT, 0x63}, + {0xd15e, CRL_REG_LEN_08BIT, 0x00}, + {0xd15f, CRL_REG_LEN_08BIT, 0x18}, + {0xd160, CRL_REG_LEN_08BIT, 0xb8}, + {0xd161, CRL_REG_LEN_08BIT, 0x63}, + {0xd162, CRL_REG_LEN_08BIT, 0x00}, + {0xd163, CRL_REG_LEN_08BIT, 0x98}, + {0xd164, CRL_REG_LEN_08BIT, 0xbc}, + {0xd165, CRL_REG_LEN_08BIT, 0x03}, + {0xd166, CRL_REG_LEN_08BIT, 0x00}, + {0xd167, CRL_REG_LEN_08BIT, 0x00}, + {0xd168, CRL_REG_LEN_08BIT, 0x10}, + {0xd169, CRL_REG_LEN_08BIT, 0x00}, + {0xd16a, CRL_REG_LEN_08BIT, 0x00}, + {0xd16b, CRL_REG_LEN_08BIT, 0x10}, + {0xd16c, CRL_REG_LEN_08BIT, 0xa9}, + {0xd16d, CRL_REG_LEN_08BIT, 0x48}, + {0xd16e, CRL_REG_LEN_08BIT, 0x67}, + {0xd16f, CRL_REG_LEN_08BIT, 0x02}, + {0xd170, CRL_REG_LEN_08BIT, 0xb8}, + {0xd171, CRL_REG_LEN_08BIT, 0xa3}, + {0xd172, CRL_REG_LEN_08BIT, 0x00}, + {0xd173, CRL_REG_LEN_08BIT, 0x19}, + {0xd174, CRL_REG_LEN_08BIT, 0x8c}, + {0xd175, CRL_REG_LEN_08BIT, 0x8a}, + {0xd176, CRL_REG_LEN_08BIT, 0x00}, + {0xd177, CRL_REG_LEN_08BIT, 0x00}, + {0xd178, CRL_REG_LEN_08BIT, 0xa9}, + {0xd179, CRL_REG_LEN_08BIT, 0x68}, + {0xd17a, CRL_REG_LEN_08BIT, 0x67}, + {0xd17b, CRL_REG_LEN_08BIT, 0x03}, + {0xd17c, CRL_REG_LEN_08BIT, 0xb8}, + {0xd17d, CRL_REG_LEN_08BIT, 0xc4}, + {0xd17e, CRL_REG_LEN_08BIT, 0x00}, + {0xd17f, CRL_REG_LEN_08BIT, 0x08}, + {0xd180, CRL_REG_LEN_08BIT, 0x8c}, + {0xd181, CRL_REG_LEN_08BIT, 0x6b}, + {0xd182, CRL_REG_LEN_08BIT, 0x00}, + {0xd183, CRL_REG_LEN_08BIT, 0x00}, + {0xd184, CRL_REG_LEN_08BIT, 0xb8}, + {0xd185, CRL_REG_LEN_08BIT, 0x85}, + {0xd186, CRL_REG_LEN_08BIT, 0x00}, + {0xd187, CRL_REG_LEN_08BIT, 0x98}, + {0xd188, CRL_REG_LEN_08BIT, 0xe0}, + {0xd189, CRL_REG_LEN_08BIT, 0x63}, + {0xd18a, CRL_REG_LEN_08BIT, 0x30}, + {0xd18b, CRL_REG_LEN_08BIT, 0x04}, + {0xd18c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd18d, CRL_REG_LEN_08BIT, 0x64}, + {0xd18e, CRL_REG_LEN_08BIT, 0x18}, + {0xd18f, CRL_REG_LEN_08BIT, 0x00}, + {0xd190, CRL_REG_LEN_08BIT, 0xa4}, + {0xd191, CRL_REG_LEN_08BIT, 0x83}, + {0xd192, CRL_REG_LEN_08BIT, 0xff}, + {0xd193, CRL_REG_LEN_08BIT, 0xff}, + {0xd194, CRL_REG_LEN_08BIT, 0xb8}, + {0xd195, CRL_REG_LEN_08BIT, 0x64}, + {0xd196, CRL_REG_LEN_08BIT, 0x00}, + {0xd197, CRL_REG_LEN_08BIT, 0x48}, + {0xd198, CRL_REG_LEN_08BIT, 0xd8}, + {0xd199, CRL_REG_LEN_08BIT, 0x0a}, + {0xd19a, CRL_REG_LEN_08BIT, 0x18}, + {0xd19b, CRL_REG_LEN_08BIT, 0x00}, + {0xd19c, CRL_REG_LEN_08BIT, 0xd8}, + {0xd19d, CRL_REG_LEN_08BIT, 0x0b}, + {0xd19e, CRL_REG_LEN_08BIT, 0x20}, + {0xd19f, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a0, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1a1, CRL_REG_LEN_08BIT, 0x60}, + {0xd1a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a3, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a4, CRL_REG_LEN_08BIT, 0xd8}, + {0xd1a5, CRL_REG_LEN_08BIT, 0x07}, + {0xd1a6, CRL_REG_LEN_08BIT, 0x18}, + {0xd1a7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1a9, CRL_REG_LEN_08BIT, 0x68}, + {0xd1aa, CRL_REG_LEN_08BIT, 0x38}, + {0xd1ab, CRL_REG_LEN_08BIT, 0x22}, + {0xd1ac, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1ad, CRL_REG_LEN_08BIT, 0x80}, + {0xd1ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd1af, CRL_REG_LEN_08BIT, 0x70}, + {0xd1b0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1b1, CRL_REG_LEN_08BIT, 0xe8}, + {0xd1b2, CRL_REG_LEN_08BIT, 0x38}, + {0xd1b3, CRL_REG_LEN_08BIT, 0x43}, + {0xd1b4, CRL_REG_LEN_08BIT, 0xd8}, + {0xd1b5, CRL_REG_LEN_08BIT, 0x03}, + {0xd1b6, CRL_REG_LEN_08BIT, 0x20}, + {0xd1b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1b8, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1b9, CRL_REG_LEN_08BIT, 0xa0}, + {0xd1ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd1bb, CRL_REG_LEN_08BIT, 0x00}, + {0xd1bc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1bd, CRL_REG_LEN_08BIT, 0xc8}, + {0xd1be, CRL_REG_LEN_08BIT, 0x38}, + {0xd1bf, CRL_REG_LEN_08BIT, 0x42}, + {0xd1c0, CRL_REG_LEN_08BIT, 0x8c}, + {0xd1c1, CRL_REG_LEN_08BIT, 0x66}, + {0xd1c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1c3, CRL_REG_LEN_08BIT, 0x00}, + {0xd1c4, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1c5, CRL_REG_LEN_08BIT, 0xa5}, + {0xd1c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd1c7, CRL_REG_LEN_08BIT, 0x01}, + {0xd1c8, CRL_REG_LEN_08BIT, 0xb8}, + {0xd1c9, CRL_REG_LEN_08BIT, 0x83}, + {0xd1ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd1cb, CRL_REG_LEN_08BIT, 0x08}, + {0xd1cc, CRL_REG_LEN_08BIT, 0xa4}, + {0xd1cd, CRL_REG_LEN_08BIT, 0xa5}, + {0xd1ce, CRL_REG_LEN_08BIT, 0x00}, + {0xd1cf, CRL_REG_LEN_08BIT, 0xff}, + {0xd1d0, CRL_REG_LEN_08BIT, 0x8c}, + {0xd1d1, CRL_REG_LEN_08BIT, 0x67}, + {0xd1d2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1d3, CRL_REG_LEN_08BIT, 0x00}, + {0xd1d4, CRL_REG_LEN_08BIT, 0xe0}, + {0xd1d5, CRL_REG_LEN_08BIT, 0x63}, + {0xd1d6, CRL_REG_LEN_08BIT, 0x20}, + {0xd1d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1d8, CRL_REG_LEN_08BIT, 0xa4}, + {0xd1d9, CRL_REG_LEN_08BIT, 0x63}, + {0xd1da, CRL_REG_LEN_08BIT, 0xff}, + {0xd1db, CRL_REG_LEN_08BIT, 0xff}, + {0xd1dc, CRL_REG_LEN_08BIT, 0xbc}, + {0xd1dd, CRL_REG_LEN_08BIT, 0x43}, + {0xd1de, CRL_REG_LEN_08BIT, 0x00}, + {0xd1df, CRL_REG_LEN_08BIT, 0x07}, + {0xd1e0, CRL_REG_LEN_08BIT, 0x0c}, + {0xd1e1, CRL_REG_LEN_08BIT, 0x00}, + {0xd1e2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1e3, CRL_REG_LEN_08BIT, 0x5b}, + {0xd1e4, CRL_REG_LEN_08BIT, 0xbc}, + {0xd1e5, CRL_REG_LEN_08BIT, 0x05}, + {0xd1e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd1e7, CRL_REG_LEN_08BIT, 0x02}, + {0xd1e8, CRL_REG_LEN_08BIT, 0x03}, + {0xd1e9, CRL_REG_LEN_08BIT, 0xff}, + {0xd1ea, CRL_REG_LEN_08BIT, 0xff}, + {0xd1eb, CRL_REG_LEN_08BIT, 0xf6}, + {0xd1ec, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1ed, CRL_REG_LEN_08BIT, 0xa0}, + {0xd1ee, CRL_REG_LEN_08BIT, 0x00}, + {0xd1ef, CRL_REG_LEN_08BIT, 0x00}, + {0xd1f0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1f1, CRL_REG_LEN_08BIT, 0xa4}, + {0xd1f2, CRL_REG_LEN_08BIT, 0x55}, + {0xd1f3, CRL_REG_LEN_08BIT, 0x86}, + {0xd1f4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd1f5, CRL_REG_LEN_08BIT, 0x63}, + {0xd1f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd1f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1f8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1f9, CRL_REG_LEN_08BIT, 0xc4}, + {0xd1fa, CRL_REG_LEN_08BIT, 0x6e}, + {0xd1fb, CRL_REG_LEN_08BIT, 0x45}, + {0xd1fc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1fd, CRL_REG_LEN_08BIT, 0xe4}, + {0xd1fe, CRL_REG_LEN_08BIT, 0x55}, + {0xd1ff, CRL_REG_LEN_08BIT, 0x87}, + {0xd200, CRL_REG_LEN_08BIT, 0xd8}, + {0xd201, CRL_REG_LEN_08BIT, 0x05}, + {0xd202, CRL_REG_LEN_08BIT, 0x18}, + {0xd203, CRL_REG_LEN_08BIT, 0x00}, + {0xd204, CRL_REG_LEN_08BIT, 0x8c}, + {0xd205, CRL_REG_LEN_08BIT, 0x66}, + {0xd206, CRL_REG_LEN_08BIT, 0x00}, + {0xd207, CRL_REG_LEN_08BIT, 0x00}, + {0xd208, CRL_REG_LEN_08BIT, 0xa8}, + {0xd209, CRL_REG_LEN_08BIT, 0xa4}, + {0xd20a, CRL_REG_LEN_08BIT, 0x6e}, + {0xd20b, CRL_REG_LEN_08BIT, 0x46}, + {0xd20c, CRL_REG_LEN_08BIT, 0xd8}, + {0xd20d, CRL_REG_LEN_08BIT, 0x07}, + {0xd20e, CRL_REG_LEN_08BIT, 0x18}, + {0xd20f, CRL_REG_LEN_08BIT, 0x00}, + {0xd210, CRL_REG_LEN_08BIT, 0xa8}, + {0xd211, CRL_REG_LEN_08BIT, 0x84}, + {0xd212, CRL_REG_LEN_08BIT, 0x55}, + {0xd213, CRL_REG_LEN_08BIT, 0x88}, + {0xd214, CRL_REG_LEN_08BIT, 0x8c}, + {0xd215, CRL_REG_LEN_08BIT, 0x65}, + {0xd216, CRL_REG_LEN_08BIT, 0x00}, + {0xd217, CRL_REG_LEN_08BIT, 0x00}, + {0xd218, CRL_REG_LEN_08BIT, 0xd8}, + {0xd219, CRL_REG_LEN_08BIT, 0x04}, + {0xd21a, CRL_REG_LEN_08BIT, 0x18}, + {0xd21b, CRL_REG_LEN_08BIT, 0x00}, + {0xd21c, CRL_REG_LEN_08BIT, 0x03}, + {0xd21d, CRL_REG_LEN_08BIT, 0xff}, + {0xd21e, CRL_REG_LEN_08BIT, 0xff}, + {0xd21f, CRL_REG_LEN_08BIT, 0xce}, + {0xd220, CRL_REG_LEN_08BIT, 0x19}, + {0xd221, CRL_REG_LEN_08BIT, 0x00}, + {0xd222, CRL_REG_LEN_08BIT, 0x80}, + {0xd223, CRL_REG_LEN_08BIT, 0x06}, + {0xd224, CRL_REG_LEN_08BIT, 0x8c}, + {0xd225, CRL_REG_LEN_08BIT, 0x63}, + {0xd226, CRL_REG_LEN_08BIT, 0x00}, + {0xd227, CRL_REG_LEN_08BIT, 0x00}, + {0xd228, CRL_REG_LEN_08BIT, 0xa4}, + {0xd229, CRL_REG_LEN_08BIT, 0x63}, + {0xd22a, CRL_REG_LEN_08BIT, 0x00}, + {0xd22b, CRL_REG_LEN_08BIT, 0x40}, + {0xd22c, CRL_REG_LEN_08BIT, 0xbc}, + {0xd22d, CRL_REG_LEN_08BIT, 0x23}, + {0xd22e, CRL_REG_LEN_08BIT, 0x00}, + {0xd22f, CRL_REG_LEN_08BIT, 0x00}, + {0xd230, CRL_REG_LEN_08BIT, 0x13}, + {0xd231, CRL_REG_LEN_08BIT, 0xff}, + {0xd232, CRL_REG_LEN_08BIT, 0xff}, + {0xd233, CRL_REG_LEN_08BIT, 0xc8}, + {0xd234, CRL_REG_LEN_08BIT, 0x9d}, + {0xd235, CRL_REG_LEN_08BIT, 0x00}, + {0xd236, CRL_REG_LEN_08BIT, 0x00}, + {0xd237, CRL_REG_LEN_08BIT, 0x40}, + {0xd238, CRL_REG_LEN_08BIT, 0xa8}, + {0xd239, CRL_REG_LEN_08BIT, 0x64}, + {0xd23a, CRL_REG_LEN_08BIT, 0x55}, + {0xd23b, CRL_REG_LEN_08BIT, 0x86}, + {0xd23c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd23d, CRL_REG_LEN_08BIT, 0xa4}, + {0xd23e, CRL_REG_LEN_08BIT, 0x55}, + {0xd23f, CRL_REG_LEN_08BIT, 0x87}, + {0xd240, CRL_REG_LEN_08BIT, 0xd8}, + {0xd241, CRL_REG_LEN_08BIT, 0x03}, + {0xd242, CRL_REG_LEN_08BIT, 0x40}, + {0xd243, CRL_REG_LEN_08BIT, 0x00}, + {0xd244, CRL_REG_LEN_08BIT, 0xa8}, + {0xd245, CRL_REG_LEN_08BIT, 0x64}, + {0xd246, CRL_REG_LEN_08BIT, 0x55}, + {0xd247, CRL_REG_LEN_08BIT, 0x88}, + {0xd248, CRL_REG_LEN_08BIT, 0xd8}, + {0xd249, CRL_REG_LEN_08BIT, 0x05}, + {0xd24a, CRL_REG_LEN_08BIT, 0x40}, + {0xd24b, CRL_REG_LEN_08BIT, 0x00}, + {0xd24c, CRL_REG_LEN_08BIT, 0xd8}, + {0xd24d, CRL_REG_LEN_08BIT, 0x03}, + {0xd24e, CRL_REG_LEN_08BIT, 0x40}, + {0xd24f, CRL_REG_LEN_08BIT, 0x00}, + {0xd250, CRL_REG_LEN_08BIT, 0x03}, + {0xd251, CRL_REG_LEN_08BIT, 0xff}, + {0xd252, CRL_REG_LEN_08BIT, 0xff}, + {0xd253, CRL_REG_LEN_08BIT, 0xc1}, + {0xd254, CRL_REG_LEN_08BIT, 0x19}, + {0xd255, CRL_REG_LEN_08BIT, 0x00}, + {0xd256, CRL_REG_LEN_08BIT, 0x80}, + {0xd257, CRL_REG_LEN_08BIT, 0x06}, + {0xd258, CRL_REG_LEN_08BIT, 0x94}, + {0xd259, CRL_REG_LEN_08BIT, 0x84}, + {0xd25a, CRL_REG_LEN_08BIT, 0x00}, + {0xd25b, CRL_REG_LEN_08BIT, 0x72}, + {0xd25c, CRL_REG_LEN_08BIT, 0xe5}, + {0xd25d, CRL_REG_LEN_08BIT, 0xa4}, + {0xd25e, CRL_REG_LEN_08BIT, 0x60}, + {0xd25f, CRL_REG_LEN_08BIT, 0x00}, + {0xd260, CRL_REG_LEN_08BIT, 0x0c}, + {0xd261, CRL_REG_LEN_08BIT, 0x00}, + {0xd262, CRL_REG_LEN_08BIT, 0x00}, + {0xd263, CRL_REG_LEN_08BIT, 0x3f}, + {0xd264, CRL_REG_LEN_08BIT, 0x9d}, + {0xd265, CRL_REG_LEN_08BIT, 0x60}, + {0xd266, CRL_REG_LEN_08BIT, 0x01}, + {0xd267, CRL_REG_LEN_08BIT, 0x00}, + {0xd268, CRL_REG_LEN_08BIT, 0x85}, + {0xd269, CRL_REG_LEN_08BIT, 0x4e}, + {0xd26a, CRL_REG_LEN_08BIT, 0x00}, + {0xd26b, CRL_REG_LEN_08BIT, 0x00}, + {0xd26c, CRL_REG_LEN_08BIT, 0x98}, + {0xd26d, CRL_REG_LEN_08BIT, 0x70}, + {0xd26e, CRL_REG_LEN_08BIT, 0x00}, + {0xd26f, CRL_REG_LEN_08BIT, 0x00}, + {0xd270, CRL_REG_LEN_08BIT, 0x8c}, + {0xd271, CRL_REG_LEN_08BIT, 0x8a}, + {0xd272, CRL_REG_LEN_08BIT, 0x00}, + {0xd273, CRL_REG_LEN_08BIT, 0x6f}, + {0xd274, CRL_REG_LEN_08BIT, 0xe5}, + {0xd275, CRL_REG_LEN_08BIT, 0x63}, + {0xd276, CRL_REG_LEN_08BIT, 0x20}, + {0xd277, CRL_REG_LEN_08BIT, 0x00}, + {0xd278, CRL_REG_LEN_08BIT, 0x10}, + {0xd279, CRL_REG_LEN_08BIT, 0x00}, + {0xd27a, CRL_REG_LEN_08BIT, 0x00}, + {0xd27b, CRL_REG_LEN_08BIT, 0x07}, + {0xd27c, CRL_REG_LEN_08BIT, 0x15}, + {0xd27d, CRL_REG_LEN_08BIT, 0x00}, + {0xd27e, CRL_REG_LEN_08BIT, 0x00}, + {0xd27f, CRL_REG_LEN_08BIT, 0x00}, + {0xd280, CRL_REG_LEN_08BIT, 0x8c}, + {0xd281, CRL_REG_LEN_08BIT, 0xaa}, + {0xd282, CRL_REG_LEN_08BIT, 0x00}, + {0xd283, CRL_REG_LEN_08BIT, 0x6e}, + {0xd284, CRL_REG_LEN_08BIT, 0xe0}, + {0xd285, CRL_REG_LEN_08BIT, 0x63}, + {0xd286, CRL_REG_LEN_08BIT, 0x28}, + {0xd287, CRL_REG_LEN_08BIT, 0x02}, + {0xd288, CRL_REG_LEN_08BIT, 0xe0}, + {0xd289, CRL_REG_LEN_08BIT, 0x84}, + {0xd28a, CRL_REG_LEN_08BIT, 0x28}, + {0xd28b, CRL_REG_LEN_08BIT, 0x02}, + {0xd28c, CRL_REG_LEN_08BIT, 0x07}, + {0xd28d, CRL_REG_LEN_08BIT, 0xff}, + {0xd28e, CRL_REG_LEN_08BIT, 0xf8}, + {0xd28f, CRL_REG_LEN_08BIT, 0x66}, + {0xd290, CRL_REG_LEN_08BIT, 0xe0}, + {0xd291, CRL_REG_LEN_08BIT, 0x63}, + {0xd292, CRL_REG_LEN_08BIT, 0x5b}, + {0xd293, CRL_REG_LEN_08BIT, 0x06}, + {0xd294, CRL_REG_LEN_08BIT, 0x8c}, + {0xd295, CRL_REG_LEN_08BIT, 0x6a}, + {0xd296, CRL_REG_LEN_08BIT, 0x00}, + {0xd297, CRL_REG_LEN_08BIT, 0x77}, + {0xd298, CRL_REG_LEN_08BIT, 0xe0}, + {0xd299, CRL_REG_LEN_08BIT, 0x63}, + {0xd29a, CRL_REG_LEN_08BIT, 0x5b}, + {0xd29b, CRL_REG_LEN_08BIT, 0x06}, + {0xd29c, CRL_REG_LEN_08BIT, 0xbd}, + {0xd29d, CRL_REG_LEN_08BIT, 0x63}, + {0xd29e, CRL_REG_LEN_08BIT, 0x00}, + {0xd29f, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a0, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2a1, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a3, CRL_REG_LEN_08BIT, 0x3c}, + {0xd2a4, CRL_REG_LEN_08BIT, 0x15}, + {0xd2a5, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a7, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a8, CRL_REG_LEN_08BIT, 0x8c}, + {0xd2a9, CRL_REG_LEN_08BIT, 0x8a}, + {0xd2aa, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ab, CRL_REG_LEN_08BIT, 0x78}, + {0xd2ac, CRL_REG_LEN_08BIT, 0xb8}, + {0xd2ad, CRL_REG_LEN_08BIT, 0x63}, + {0xd2ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd2af, CRL_REG_LEN_08BIT, 0x88}, + {0xd2b0, CRL_REG_LEN_08BIT, 0xe1}, + {0xd2b1, CRL_REG_LEN_08BIT, 0x64}, + {0xd2b2, CRL_REG_LEN_08BIT, 0x5b}, + {0xd2b3, CRL_REG_LEN_08BIT, 0x06}, + {0xd2b4, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2b5, CRL_REG_LEN_08BIT, 0x6b}, + {0xd2b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd2b8, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2b9, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd2bb, CRL_REG_LEN_08BIT, 0x34}, + {0xd2bc, CRL_REG_LEN_08BIT, 0xd4}, + {0xd2bd, CRL_REG_LEN_08BIT, 0x01}, + {0xd2be, CRL_REG_LEN_08BIT, 0x18}, + {0xd2bf, CRL_REG_LEN_08BIT, 0x14}, + {0xd2c0, CRL_REG_LEN_08BIT, 0xb9}, + {0xd2c1, CRL_REG_LEN_08BIT, 0x6b}, + {0xd2c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2c3, CRL_REG_LEN_08BIT, 0x88}, + {0xd2c4, CRL_REG_LEN_08BIT, 0x85}, + {0xd2c5, CRL_REG_LEN_08BIT, 0x01}, + {0xd2c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2c7, CRL_REG_LEN_08BIT, 0x14}, + {0xd2c8, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2c9, CRL_REG_LEN_08BIT, 0x68}, + {0xd2ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd2cb, CRL_REG_LEN_08BIT, 0x00}, + {0xd2cc, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2cd, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ce, CRL_REG_LEN_08BIT, 0x00}, + {0xd2cf, CRL_REG_LEN_08BIT, 0x2c}, + {0xd2d0, CRL_REG_LEN_08BIT, 0xd4}, + {0xd2d1, CRL_REG_LEN_08BIT, 0x01}, + {0xd2d2, CRL_REG_LEN_08BIT, 0x58}, + {0xd2d3, CRL_REG_LEN_08BIT, 0x18}, + {0xd2d4, CRL_REG_LEN_08BIT, 0x84}, + {0xd2d5, CRL_REG_LEN_08BIT, 0x81}, + {0xd2d6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2d7, CRL_REG_LEN_08BIT, 0x14}, + {0xd2d8, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2d9, CRL_REG_LEN_08BIT, 0xa4}, + {0xd2da, CRL_REG_LEN_08BIT, 0x01}, + {0xd2db, CRL_REG_LEN_08BIT, 0x00}, + {0xd2dc, CRL_REG_LEN_08BIT, 0x10}, + {0xd2dd, CRL_REG_LEN_08BIT, 0x00}, + {0xd2de, CRL_REG_LEN_08BIT, 0x00}, + {0xd2df, CRL_REG_LEN_08BIT, 0x05}, + {0xd2e0, CRL_REG_LEN_08BIT, 0x84}, + {0xd2e1, CRL_REG_LEN_08BIT, 0xc1}, + {0xd2e2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2e3, CRL_REG_LEN_08BIT, 0x18}, + {0xd2e4, CRL_REG_LEN_08BIT, 0x9c}, + {0xd2e5, CRL_REG_LEN_08BIT, 0xa0}, + {0xd2e6, CRL_REG_LEN_08BIT, 0x01}, + {0xd2e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd2e8, CRL_REG_LEN_08BIT, 0xd4}, + {0xd2e9, CRL_REG_LEN_08BIT, 0x01}, + {0xd2ea, CRL_REG_LEN_08BIT, 0x28}, + {0xd2eb, CRL_REG_LEN_08BIT, 0x14}, + {0xd2ec, CRL_REG_LEN_08BIT, 0x84}, + {0xd2ed, CRL_REG_LEN_08BIT, 0xc1}, + {0xd2ee, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ef, CRL_REG_LEN_08BIT, 0x18}, + {0xd2f0, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2f1, CRL_REG_LEN_08BIT, 0x66}, + {0xd2f2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f3, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f4, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2f5, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f7, CRL_REG_LEN_08BIT, 0x20}, + {0xd2f8, CRL_REG_LEN_08BIT, 0x9d}, + {0xd2f9, CRL_REG_LEN_08BIT, 0x00}, + {0xd2fa, CRL_REG_LEN_08BIT, 0x00}, + {0xd2fb, CRL_REG_LEN_08BIT, 0x00}, + {0xd2fc, CRL_REG_LEN_08BIT, 0x84}, + {0xd2fd, CRL_REG_LEN_08BIT, 0x61}, + {0xd2fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ff, CRL_REG_LEN_08BIT, 0x18}, + {0xd300, CRL_REG_LEN_08BIT, 0xbd}, + {0xd301, CRL_REG_LEN_08BIT, 0xa3}, + {0xd302, CRL_REG_LEN_08BIT, 0x01}, + {0xd303, CRL_REG_LEN_08BIT, 0x00}, + {0xd304, CRL_REG_LEN_08BIT, 0x10}, + {0xd305, CRL_REG_LEN_08BIT, 0x00}, + {0xd306, CRL_REG_LEN_08BIT, 0x00}, + {0xd307, CRL_REG_LEN_08BIT, 0x03}, + {0xd308, CRL_REG_LEN_08BIT, 0x9c}, + {0xd309, CRL_REG_LEN_08BIT, 0x80}, + {0xd30a, CRL_REG_LEN_08BIT, 0x01}, + {0xd30b, CRL_REG_LEN_08BIT, 0x00}, + {0xd30c, CRL_REG_LEN_08BIT, 0xd4}, + {0xd30d, CRL_REG_LEN_08BIT, 0x01}, + {0xd30e, CRL_REG_LEN_08BIT, 0x20}, + {0xd30f, CRL_REG_LEN_08BIT, 0x18}, + {0xd310, CRL_REG_LEN_08BIT, 0x18}, + {0xd311, CRL_REG_LEN_08BIT, 0x60}, + {0xd312, CRL_REG_LEN_08BIT, 0x80}, + {0xd313, CRL_REG_LEN_08BIT, 0x06}, + {0xd314, CRL_REG_LEN_08BIT, 0x85}, + {0xd315, CRL_REG_LEN_08BIT, 0x01}, + {0xd316, CRL_REG_LEN_08BIT, 0x00}, + {0xd317, CRL_REG_LEN_08BIT, 0x14}, + {0xd318, CRL_REG_LEN_08BIT, 0xa8}, + {0xd319, CRL_REG_LEN_08BIT, 0x83}, + {0xd31a, CRL_REG_LEN_08BIT, 0x38}, + {0xd31b, CRL_REG_LEN_08BIT, 0x29}, + {0xd31c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd31d, CRL_REG_LEN_08BIT, 0xc3}, + {0xd31e, CRL_REG_LEN_08BIT, 0x40}, + {0xd31f, CRL_REG_LEN_08BIT, 0x08}, + {0xd320, CRL_REG_LEN_08BIT, 0x8c}, + {0xd321, CRL_REG_LEN_08BIT, 0x84}, + {0xd322, CRL_REG_LEN_08BIT, 0x00}, + {0xd323, CRL_REG_LEN_08BIT, 0x00}, + {0xd324, CRL_REG_LEN_08BIT, 0xa8}, + {0xd325, CRL_REG_LEN_08BIT, 0xa3}, + {0xd326, CRL_REG_LEN_08BIT, 0x38}, + {0xd327, CRL_REG_LEN_08BIT, 0x2a}, + {0xd328, CRL_REG_LEN_08BIT, 0xa8}, + {0xd329, CRL_REG_LEN_08BIT, 0xe3}, + {0xd32a, CRL_REG_LEN_08BIT, 0x40}, + {0xd32b, CRL_REG_LEN_08BIT, 0x09}, + {0xd32c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd32d, CRL_REG_LEN_08BIT, 0x64}, + {0xd32e, CRL_REG_LEN_08BIT, 0x40}, + {0xd32f, CRL_REG_LEN_08BIT, 0x00}, + {0xd330, CRL_REG_LEN_08BIT, 0xd8}, + {0xd331, CRL_REG_LEN_08BIT, 0x06}, + {0xd332, CRL_REG_LEN_08BIT, 0x18}, + {0xd333, CRL_REG_LEN_08BIT, 0x00}, + {0xd334, CRL_REG_LEN_08BIT, 0x8c}, + {0xd335, CRL_REG_LEN_08BIT, 0x65}, + {0xd336, CRL_REG_LEN_08BIT, 0x00}, + {0xd337, CRL_REG_LEN_08BIT, 0x00}, + {0xd338, CRL_REG_LEN_08BIT, 0x84}, + {0xd339, CRL_REG_LEN_08BIT, 0x81}, + {0xd33a, CRL_REG_LEN_08BIT, 0x00}, + {0xd33b, CRL_REG_LEN_08BIT, 0x18}, + {0xd33c, CRL_REG_LEN_08BIT, 0xe3}, + {0xd33d, CRL_REG_LEN_08BIT, 0xe3}, + {0xd33e, CRL_REG_LEN_08BIT, 0x20}, + {0xd33f, CRL_REG_LEN_08BIT, 0x00}, + {0xd340, CRL_REG_LEN_08BIT, 0xd8}, + {0xd341, CRL_REG_LEN_08BIT, 0x07}, + {0xd342, CRL_REG_LEN_08BIT, 0xf8}, + {0xd343, CRL_REG_LEN_08BIT, 0x00}, + {0xd344, CRL_REG_LEN_08BIT, 0x03}, + {0xd345, CRL_REG_LEN_08BIT, 0xff}, + {0xd346, CRL_REG_LEN_08BIT, 0xff}, + {0xd347, CRL_REG_LEN_08BIT, 0x6f}, + {0xd348, CRL_REG_LEN_08BIT, 0x18}, + {0xd349, CRL_REG_LEN_08BIT, 0x60}, + {0xd34a, CRL_REG_LEN_08BIT, 0x00}, + {0xd34b, CRL_REG_LEN_08BIT, 0x01}, + {0xd34c, CRL_REG_LEN_08BIT, 0x0f}, + {0xd34d, CRL_REG_LEN_08BIT, 0xff}, + {0xd34e, CRL_REG_LEN_08BIT, 0xff}, + {0xd34f, CRL_REG_LEN_08BIT, 0x9d}, + {0xd350, CRL_REG_LEN_08BIT, 0x18}, + {0xd351, CRL_REG_LEN_08BIT, 0x60}, + {0xd352, CRL_REG_LEN_08BIT, 0x80}, + {0xd353, CRL_REG_LEN_08BIT, 0x06}, + {0xd354, CRL_REG_LEN_08BIT, 0x00}, + {0xd355, CRL_REG_LEN_08BIT, 0x00}, + {0xd356, CRL_REG_LEN_08BIT, 0x00}, + {0xd357, CRL_REG_LEN_08BIT, 0x11}, + {0xd358, CRL_REG_LEN_08BIT, 0xa8}, + {0xd359, CRL_REG_LEN_08BIT, 0x83}, + {0xd35a, CRL_REG_LEN_08BIT, 0x6e}, + {0xd35b, CRL_REG_LEN_08BIT, 0x43}, + {0xd35c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd35d, CRL_REG_LEN_08BIT, 0x6c}, + {0xd35e, CRL_REG_LEN_08BIT, 0x28}, + {0xd35f, CRL_REG_LEN_08BIT, 0x02}, + {0xd360, CRL_REG_LEN_08BIT, 0xe0}, + {0xd361, CRL_REG_LEN_08BIT, 0x84}, + {0xd362, CRL_REG_LEN_08BIT, 0x28}, + {0xd363, CRL_REG_LEN_08BIT, 0x02}, + {0xd364, CRL_REG_LEN_08BIT, 0x07}, + {0xd365, CRL_REG_LEN_08BIT, 0xff}, + {0xd366, CRL_REG_LEN_08BIT, 0xf8}, + {0xd367, CRL_REG_LEN_08BIT, 0x30}, + {0xd368, CRL_REG_LEN_08BIT, 0xb8}, + {0xd369, CRL_REG_LEN_08BIT, 0x63}, + {0xd36a, CRL_REG_LEN_08BIT, 0x00}, + {0xd36b, CRL_REG_LEN_08BIT, 0x08}, + {0xd36c, CRL_REG_LEN_08BIT, 0x03}, + {0xd36d, CRL_REG_LEN_08BIT, 0xff}, + {0xd36e, CRL_REG_LEN_08BIT, 0xff}, + {0xd36f, CRL_REG_LEN_08BIT, 0xc0}, + {0xd370, CRL_REG_LEN_08BIT, 0x85}, + {0xd371, CRL_REG_LEN_08BIT, 0x4e}, + {0xd372, CRL_REG_LEN_08BIT, 0x00}, + {0xd373, CRL_REG_LEN_08BIT, 0x00}, + {0xd374, CRL_REG_LEN_08BIT, 0x03}, + {0xd375, CRL_REG_LEN_08BIT, 0xff}, + {0xd376, CRL_REG_LEN_08BIT, 0xff}, + {0xd377, CRL_REG_LEN_08BIT, 0xe7}, + {0xd378, CRL_REG_LEN_08BIT, 0xd4}, + {0xd379, CRL_REG_LEN_08BIT, 0x01}, + {0xd37a, CRL_REG_LEN_08BIT, 0x40}, + {0xd37b, CRL_REG_LEN_08BIT, 0x18}, + {0xd37c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd37d, CRL_REG_LEN_08BIT, 0x60}, + {0xd37e, CRL_REG_LEN_08BIT, 0x00}, + {0xd37f, CRL_REG_LEN_08BIT, 0x00}, + {0xd380, CRL_REG_LEN_08BIT, 0x03}, + {0xd381, CRL_REG_LEN_08BIT, 0xff}, + {0xd382, CRL_REG_LEN_08BIT, 0xff}, + {0xd383, CRL_REG_LEN_08BIT, 0xdb}, + {0xd384, CRL_REG_LEN_08BIT, 0xd4}, + {0xd385, CRL_REG_LEN_08BIT, 0x01}, + {0xd386, CRL_REG_LEN_08BIT, 0x18}, + {0xd387, CRL_REG_LEN_08BIT, 0x14}, + {0xd388, CRL_REG_LEN_08BIT, 0x03}, + {0xd389, CRL_REG_LEN_08BIT, 0xff}, + {0xd38a, CRL_REG_LEN_08BIT, 0xff}, + {0xd38b, CRL_REG_LEN_08BIT, 0xce}, + {0xd38c, CRL_REG_LEN_08BIT, 0x9d}, + {0xd38d, CRL_REG_LEN_08BIT, 0x6b}, + {0xd38e, CRL_REG_LEN_08BIT, 0x00}, + {0xd38f, CRL_REG_LEN_08BIT, 0xff}, + {0xd390, CRL_REG_LEN_08BIT, 0x03}, + {0xd391, CRL_REG_LEN_08BIT, 0xff}, + {0xd392, CRL_REG_LEN_08BIT, 0xff}, + {0xd393, CRL_REG_LEN_08BIT, 0xc6}, + {0xd394, CRL_REG_LEN_08BIT, 0x9c}, + {0xd395, CRL_REG_LEN_08BIT, 0x63}, + {0xd396, CRL_REG_LEN_08BIT, 0x00}, + {0xd397, CRL_REG_LEN_08BIT, 0xff}, + {0xd398, CRL_REG_LEN_08BIT, 0xa8}, + {0xd399, CRL_REG_LEN_08BIT, 0xe3}, + {0xd39a, CRL_REG_LEN_08BIT, 0x38}, + {0xd39b, CRL_REG_LEN_08BIT, 0x0f}, + {0xd39c, CRL_REG_LEN_08BIT, 0x8c}, + {0xd39d, CRL_REG_LEN_08BIT, 0x84}, + {0xd39e, CRL_REG_LEN_08BIT, 0x00}, + {0xd39f, CRL_REG_LEN_08BIT, 0x00}, + {0xd3a0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3a1, CRL_REG_LEN_08BIT, 0xa3}, + {0xd3a2, CRL_REG_LEN_08BIT, 0x38}, + {0xd3a3, CRL_REG_LEN_08BIT, 0x0e}, + {0xd3a4, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3a5, CRL_REG_LEN_08BIT, 0xc3}, + {0xd3a6, CRL_REG_LEN_08BIT, 0x6e}, + {0xd3a7, CRL_REG_LEN_08BIT, 0x42}, + {0xd3a8, CRL_REG_LEN_08BIT, 0xd8}, + {0xd3a9, CRL_REG_LEN_08BIT, 0x07}, + {0xd3aa, CRL_REG_LEN_08BIT, 0x20}, + {0xd3ab, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ac, CRL_REG_LEN_08BIT, 0x8c}, + {0xd3ad, CRL_REG_LEN_08BIT, 0x66}, + {0xd3ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd3af, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b0, CRL_REG_LEN_08BIT, 0xd8}, + {0xd3b1, CRL_REG_LEN_08BIT, 0x05}, + {0xd3b2, CRL_REG_LEN_08BIT, 0x18}, + {0xd3b3, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b4, CRL_REG_LEN_08BIT, 0x85}, + {0xd3b5, CRL_REG_LEN_08BIT, 0x21}, + {0xd3b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b8, CRL_REG_LEN_08BIT, 0x85}, + {0xd3b9, CRL_REG_LEN_08BIT, 0x41}, + {0xd3ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd3bb, CRL_REG_LEN_08BIT, 0x04}, + {0xd3bc, CRL_REG_LEN_08BIT, 0x85}, + {0xd3bd, CRL_REG_LEN_08BIT, 0x81}, + {0xd3be, CRL_REG_LEN_08BIT, 0x00}, + {0xd3bf, CRL_REG_LEN_08BIT, 0x08}, + {0xd3c0, CRL_REG_LEN_08BIT, 0x85}, + {0xd3c1, CRL_REG_LEN_08BIT, 0xc1}, + {0xd3c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd3c3, CRL_REG_LEN_08BIT, 0x0c}, + {0xd3c4, CRL_REG_LEN_08BIT, 0x86}, + {0xd3c5, CRL_REG_LEN_08BIT, 0x01}, + {0xd3c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3c7, CRL_REG_LEN_08BIT, 0x10}, + {0xd3c8, CRL_REG_LEN_08BIT, 0x44}, + {0xd3c9, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ca, CRL_REG_LEN_08BIT, 0x48}, + {0xd3cb, CRL_REG_LEN_08BIT, 0x00}, + {0xd3cc, CRL_REG_LEN_08BIT, 0x9c}, + {0xd3cd, CRL_REG_LEN_08BIT, 0x21}, + {0xd3ce, CRL_REG_LEN_08BIT, 0x00}, + {0xd3cf, CRL_REG_LEN_08BIT, 0x1c}, + {0xd3d0, CRL_REG_LEN_08BIT, 0x9c}, + {0xd3d1, CRL_REG_LEN_08BIT, 0x21}, + {0xd3d2, CRL_REG_LEN_08BIT, 0xff}, + {0xd3d3, CRL_REG_LEN_08BIT, 0xfc}, + {0xd3d4, CRL_REG_LEN_08BIT, 0xd4}, + {0xd3d5, CRL_REG_LEN_08BIT, 0x01}, + {0xd3d6, CRL_REG_LEN_08BIT, 0x48}, + {0xd3d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3d8, CRL_REG_LEN_08BIT, 0x18}, + {0xd3d9, CRL_REG_LEN_08BIT, 0x60}, + {0xd3da, CRL_REG_LEN_08BIT, 0x00}, + {0xd3db, CRL_REG_LEN_08BIT, 0x01}, + {0xd3dc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3dd, CRL_REG_LEN_08BIT, 0x63}, + {0xd3de, CRL_REG_LEN_08BIT, 0x07}, + {0xd3df, CRL_REG_LEN_08BIT, 0x80}, + {0xd3e0, CRL_REG_LEN_08BIT, 0x8c}, + {0xd3e1, CRL_REG_LEN_08BIT, 0x63}, + {0xd3e2, CRL_REG_LEN_08BIT, 0x00}, + {0xd3e3, CRL_REG_LEN_08BIT, 0x68}, + {0xd3e4, CRL_REG_LEN_08BIT, 0xbc}, + {0xd3e5, CRL_REG_LEN_08BIT, 0x03}, + {0xd3e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3e8, CRL_REG_LEN_08BIT, 0x10}, + {0xd3e9, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ea, CRL_REG_LEN_08BIT, 0x00}, + {0xd3eb, CRL_REG_LEN_08BIT, 0x0c}, + {0xd3ec, CRL_REG_LEN_08BIT, 0x15}, + {0xd3ed, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ee, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ef, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f0, CRL_REG_LEN_08BIT, 0x07}, + {0xd3f1, CRL_REG_LEN_08BIT, 0xff}, + {0xd3f2, CRL_REG_LEN_08BIT, 0xd9}, + {0xd3f3, CRL_REG_LEN_08BIT, 0x98}, + {0xd3f4, CRL_REG_LEN_08BIT, 0x15}, + {0xd3f5, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f8, CRL_REG_LEN_08BIT, 0x18}, + {0xd3f9, CRL_REG_LEN_08BIT, 0x60}, + {0xd3fa, CRL_REG_LEN_08BIT, 0x80}, + {0xd3fb, CRL_REG_LEN_08BIT, 0x06}, + {0xd3fc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3fd, CRL_REG_LEN_08BIT, 0x63}, + {0xd3fe, CRL_REG_LEN_08BIT, 0xc4}, + {0xd3ff, CRL_REG_LEN_08BIT, 0xb8}, + {0xd400, CRL_REG_LEN_08BIT, 0x8c}, + {0xd401, CRL_REG_LEN_08BIT, 0x63}, + {0xd402, CRL_REG_LEN_08BIT, 0x00}, + {0xd403, CRL_REG_LEN_08BIT, 0x00}, + {0xd404, CRL_REG_LEN_08BIT, 0xbc}, + {0xd405, CRL_REG_LEN_08BIT, 0x23}, + {0xd406, CRL_REG_LEN_08BIT, 0x00}, + {0xd407, CRL_REG_LEN_08BIT, 0x01}, + {0xd408, CRL_REG_LEN_08BIT, 0x10}, + {0xd409, CRL_REG_LEN_08BIT, 0x00}, + {0xd40a, CRL_REG_LEN_08BIT, 0x00}, + {0xd40b, CRL_REG_LEN_08BIT, 0x25}, + {0xd40c, CRL_REG_LEN_08BIT, 0x9d}, + {0xd40d, CRL_REG_LEN_08BIT, 0x00}, + {0xd40e, CRL_REG_LEN_08BIT, 0x00}, + {0xd40f, CRL_REG_LEN_08BIT, 0x00}, + {0xd410, CRL_REG_LEN_08BIT, 0x00}, + {0xd411, CRL_REG_LEN_08BIT, 0x00}, + {0xd412, CRL_REG_LEN_08BIT, 0x00}, + {0xd413, CRL_REG_LEN_08BIT, 0x0b}, + {0xd414, CRL_REG_LEN_08BIT, 0xb8}, + {0xd415, CRL_REG_LEN_08BIT, 0xe8}, + {0xd416, CRL_REG_LEN_08BIT, 0x00}, + {0xd417, CRL_REG_LEN_08BIT, 0x02}, + {0xd418, CRL_REG_LEN_08BIT, 0x07}, + {0xd419, CRL_REG_LEN_08BIT, 0xff}, + {0xd41a, CRL_REG_LEN_08BIT, 0xd6}, + {0xd41b, CRL_REG_LEN_08BIT, 0x24}, + {0xd41c, CRL_REG_LEN_08BIT, 0x15}, + {0xd41d, CRL_REG_LEN_08BIT, 0x00}, + {0xd41e, CRL_REG_LEN_08BIT, 0x00}, + {0xd41f, CRL_REG_LEN_08BIT, 0x00}, + {0xd420, CRL_REG_LEN_08BIT, 0x18}, + {0xd421, CRL_REG_LEN_08BIT, 0x60}, + {0xd422, CRL_REG_LEN_08BIT, 0x80}, + {0xd423, CRL_REG_LEN_08BIT, 0x06}, + {0xd424, CRL_REG_LEN_08BIT, 0xa8}, + {0xd425, CRL_REG_LEN_08BIT, 0x63}, + {0xd426, CRL_REG_LEN_08BIT, 0xc4}, + {0xd427, CRL_REG_LEN_08BIT, 0xb8}, + {0xd428, CRL_REG_LEN_08BIT, 0x8c}, + {0xd429, CRL_REG_LEN_08BIT, 0x63}, + {0xd42a, CRL_REG_LEN_08BIT, 0x00}, + {0xd42b, CRL_REG_LEN_08BIT, 0x00}, + {0xd42c, CRL_REG_LEN_08BIT, 0xbc}, + {0xd42d, CRL_REG_LEN_08BIT, 0x23}, + {0xd42e, CRL_REG_LEN_08BIT, 0x00}, + {0xd42f, CRL_REG_LEN_08BIT, 0x01}, + {0xd430, CRL_REG_LEN_08BIT, 0x10}, + {0xd431, CRL_REG_LEN_08BIT, 0x00}, + {0xd432, CRL_REG_LEN_08BIT, 0x00}, + {0xd433, CRL_REG_LEN_08BIT, 0x1b}, + {0xd434, CRL_REG_LEN_08BIT, 0x9d}, + {0xd435, CRL_REG_LEN_08BIT, 0x00}, + {0xd436, CRL_REG_LEN_08BIT, 0x00}, + {0xd437, CRL_REG_LEN_08BIT, 0x00}, + {0xd438, CRL_REG_LEN_08BIT, 0xb8}, + {0xd439, CRL_REG_LEN_08BIT, 0xe8}, + {0xd43a, CRL_REG_LEN_08BIT, 0x00}, + {0xd43b, CRL_REG_LEN_08BIT, 0x02}, + {0xd43c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd43d, CRL_REG_LEN_08BIT, 0xc0}, + {0xd43e, CRL_REG_LEN_08BIT, 0x00}, + {0xd43f, CRL_REG_LEN_08BIT, 0x00}, + {0xd440, CRL_REG_LEN_08BIT, 0x18}, + {0xd441, CRL_REG_LEN_08BIT, 0xa0}, + {0xd442, CRL_REG_LEN_08BIT, 0x80}, + {0xd443, CRL_REG_LEN_08BIT, 0x06}, + {0xd444, CRL_REG_LEN_08BIT, 0xe0}, + {0xd445, CRL_REG_LEN_08BIT, 0x67}, + {0xd446, CRL_REG_LEN_08BIT, 0x30}, + {0xd447, CRL_REG_LEN_08BIT, 0x00}, + {0xd448, CRL_REG_LEN_08BIT, 0xa8}, + {0xd449, CRL_REG_LEN_08BIT, 0xa5}, + {0xd44a, CRL_REG_LEN_08BIT, 0xce}, + {0xd44b, CRL_REG_LEN_08BIT, 0xb0}, + {0xd44c, CRL_REG_LEN_08BIT, 0x19}, + {0xd44d, CRL_REG_LEN_08BIT, 0x60}, + {0xd44e, CRL_REG_LEN_08BIT, 0x00}, + {0xd44f, CRL_REG_LEN_08BIT, 0x01}, + {0xd450, CRL_REG_LEN_08BIT, 0xa9}, + {0xd451, CRL_REG_LEN_08BIT, 0x6b}, + {0xd452, CRL_REG_LEN_08BIT, 0x06}, + {0xd453, CRL_REG_LEN_08BIT, 0x14}, + {0xd454, CRL_REG_LEN_08BIT, 0xe0}, + {0xd455, CRL_REG_LEN_08BIT, 0x83}, + {0xd456, CRL_REG_LEN_08BIT, 0x28}, + {0xd457, CRL_REG_LEN_08BIT, 0x00}, + {0xd458, CRL_REG_LEN_08BIT, 0x9c}, + {0xd459, CRL_REG_LEN_08BIT, 0xc6}, + {0xd45a, CRL_REG_LEN_08BIT, 0x00}, + {0xd45b, CRL_REG_LEN_08BIT, 0x01}, + {0xd45c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd45d, CRL_REG_LEN_08BIT, 0x63}, + {0xd45e, CRL_REG_LEN_08BIT, 0x18}, + {0xd45f, CRL_REG_LEN_08BIT, 0x00}, + {0xd460, CRL_REG_LEN_08BIT, 0x8c}, + {0xd461, CRL_REG_LEN_08BIT, 0x84}, + {0xd462, CRL_REG_LEN_08BIT, 0x00}, + {0xd463, CRL_REG_LEN_08BIT, 0x00}, + {0xd464, CRL_REG_LEN_08BIT, 0xe0}, + {0xd465, CRL_REG_LEN_08BIT, 0xa3}, + {0xd466, CRL_REG_LEN_08BIT, 0x58}, + {0xd467, CRL_REG_LEN_08BIT, 0x00}, + {0xd468, CRL_REG_LEN_08BIT, 0xa4}, + {0xd469, CRL_REG_LEN_08BIT, 0xc6}, + {0xd46a, CRL_REG_LEN_08BIT, 0x00}, + {0xd46b, CRL_REG_LEN_08BIT, 0xff}, + {0xd46c, CRL_REG_LEN_08BIT, 0xb8}, + {0xd46d, CRL_REG_LEN_08BIT, 0x64}, + {0xd46e, CRL_REG_LEN_08BIT, 0x00}, + {0xd46f, CRL_REG_LEN_08BIT, 0x18}, + {0xd470, CRL_REG_LEN_08BIT, 0xbc}, + {0xd471, CRL_REG_LEN_08BIT, 0x46}, + {0xd472, CRL_REG_LEN_08BIT, 0x00}, + {0xd473, CRL_REG_LEN_08BIT, 0x03}, + {0xd474, CRL_REG_LEN_08BIT, 0x94}, + {0xd475, CRL_REG_LEN_08BIT, 0x85}, + {0xd476, CRL_REG_LEN_08BIT, 0x00}, + {0xd477, CRL_REG_LEN_08BIT, 0x00}, + {0xd478, CRL_REG_LEN_08BIT, 0xb8}, + {0xd479, CRL_REG_LEN_08BIT, 0x63}, + {0xd47a, CRL_REG_LEN_08BIT, 0x00}, + {0xd47b, CRL_REG_LEN_08BIT, 0x98}, + {0xd47c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd47d, CRL_REG_LEN_08BIT, 0x64}, + {0xd47e, CRL_REG_LEN_08BIT, 0x18}, + {0xd47f, CRL_REG_LEN_08BIT, 0x00}, + {0xd480, CRL_REG_LEN_08BIT, 0x0f}, + {0xd481, CRL_REG_LEN_08BIT, 0xff}, + {0xd482, CRL_REG_LEN_08BIT, 0xff}, + {0xd483, CRL_REG_LEN_08BIT, 0xf0}, + {0xd484, CRL_REG_LEN_08BIT, 0xdc}, + {0xd485, CRL_REG_LEN_08BIT, 0x05}, + {0xd486, CRL_REG_LEN_08BIT, 0x18}, + {0xd487, CRL_REG_LEN_08BIT, 0x00}, + {0xd488, CRL_REG_LEN_08BIT, 0x9c}, + {0xd489, CRL_REG_LEN_08BIT, 0x68}, + {0xd48a, CRL_REG_LEN_08BIT, 0x00}, + {0xd48b, CRL_REG_LEN_08BIT, 0x01}, + {0xd48c, CRL_REG_LEN_08BIT, 0xa5}, + {0xd48d, CRL_REG_LEN_08BIT, 0x03}, + {0xd48e, CRL_REG_LEN_08BIT, 0x00}, + {0xd48f, CRL_REG_LEN_08BIT, 0xff}, + {0xd490, CRL_REG_LEN_08BIT, 0xbc}, + {0xd491, CRL_REG_LEN_08BIT, 0x48}, + {0xd492, CRL_REG_LEN_08BIT, 0x00}, + {0xd493, CRL_REG_LEN_08BIT, 0x01}, + {0xd494, CRL_REG_LEN_08BIT, 0x0f}, + {0xd495, CRL_REG_LEN_08BIT, 0xff}, + {0xd496, CRL_REG_LEN_08BIT, 0xff}, + {0xd497, CRL_REG_LEN_08BIT, 0xea}, + {0xd498, CRL_REG_LEN_08BIT, 0xb8}, + {0xd499, CRL_REG_LEN_08BIT, 0xe8}, + {0xd49a, CRL_REG_LEN_08BIT, 0x00}, + {0xd49b, CRL_REG_LEN_08BIT, 0x02}, + {0xd49c, CRL_REG_LEN_08BIT, 0x18}, + {0xd49d, CRL_REG_LEN_08BIT, 0x60}, + {0xd49e, CRL_REG_LEN_08BIT, 0x00}, + {0xd49f, CRL_REG_LEN_08BIT, 0x01}, + {0xd4a0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd4a1, CRL_REG_LEN_08BIT, 0x63}, + {0xd4a2, CRL_REG_LEN_08BIT, 0x06}, + {0xd4a3, CRL_REG_LEN_08BIT, 0x14}, + {0xd4a4, CRL_REG_LEN_08BIT, 0x07}, + {0xd4a5, CRL_REG_LEN_08BIT, 0xff}, + {0xd4a6, CRL_REG_LEN_08BIT, 0xe4}, + {0xd4a7, CRL_REG_LEN_08BIT, 0x05}, + {0xd4a8, CRL_REG_LEN_08BIT, 0x9c}, + {0xd4a9, CRL_REG_LEN_08BIT, 0x83}, + {0xd4aa, CRL_REG_LEN_08BIT, 0x00}, + {0xd4ab, CRL_REG_LEN_08BIT, 0x10}, + {0xd4ac, CRL_REG_LEN_08BIT, 0x85}, + {0xd4ad, CRL_REG_LEN_08BIT, 0x21}, + {0xd4ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd4af, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b0, CRL_REG_LEN_08BIT, 0x44}, + {0xd4b1, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b2, CRL_REG_LEN_08BIT, 0x48}, + {0xd4b3, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b4, CRL_REG_LEN_08BIT, 0x9c}, + {0xd4b5, CRL_REG_LEN_08BIT, 0x21}, + {0xd4b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b7, CRL_REG_LEN_08BIT, 0x04}, + {0xd4b8, CRL_REG_LEN_08BIT, 0x18}, + {0xd4b9, CRL_REG_LEN_08BIT, 0x60}, + {0xd4ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd4bb, CRL_REG_LEN_08BIT, 0x01}, + {0xd4bc, CRL_REG_LEN_08BIT, 0x9c}, + {0xd4bd, CRL_REG_LEN_08BIT, 0x80}, + {0xd4be, CRL_REG_LEN_08BIT, 0xff}, + {0xd4bf, CRL_REG_LEN_08BIT, 0xff}, + {0xd4c0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd4c1, CRL_REG_LEN_08BIT, 0x63}, + {0xd4c2, CRL_REG_LEN_08BIT, 0x09}, + {0xd4c3, CRL_REG_LEN_08BIT, 0xef}, + {0xd4c4, CRL_REG_LEN_08BIT, 0xd8}, + {0xd4c5, CRL_REG_LEN_08BIT, 0x03}, + {0xd4c6, CRL_REG_LEN_08BIT, 0x20}, + {0xd4c7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4c8, CRL_REG_LEN_08BIT, 0x18}, + {0xd4c9, CRL_REG_LEN_08BIT, 0x60}, + {0xd4ca, CRL_REG_LEN_08BIT, 0x80}, + {0xd4cb, CRL_REG_LEN_08BIT, 0x06}, + {0xd4cc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd4cd, CRL_REG_LEN_08BIT, 0x63}, + {0xd4ce, CRL_REG_LEN_08BIT, 0xc9}, + {0xd4cf, CRL_REG_LEN_08BIT, 0xef}, + {0xd4d0, CRL_REG_LEN_08BIT, 0xd8}, + {0xd4d1, CRL_REG_LEN_08BIT, 0x03}, + {0xd4d2, CRL_REG_LEN_08BIT, 0x20}, + {0xd4d3, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d4, CRL_REG_LEN_08BIT, 0x44}, + {0xd4d5, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d6, CRL_REG_LEN_08BIT, 0x48}, + {0xd4d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d8, CRL_REG_LEN_08BIT, 0x15}, + {0xd4d9, CRL_REG_LEN_08BIT, 0x00}, + {0xd4da, CRL_REG_LEN_08BIT, 0x00}, + {0xd4db, CRL_REG_LEN_08BIT, 0x00}, + {0xd4dc, CRL_REG_LEN_08BIT, 0x18}, + {0xd4dd, CRL_REG_LEN_08BIT, 0x80}, + {0xd4de, CRL_REG_LEN_08BIT, 0x00}, + {0xd4df, CRL_REG_LEN_08BIT, 0x01}, + {0xd4e0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd4e1, CRL_REG_LEN_08BIT, 0x84}, + {0xd4e2, CRL_REG_LEN_08BIT, 0x0a}, + {0xd4e3, CRL_REG_LEN_08BIT, 0x12}, + {0xd4e4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd4e5, CRL_REG_LEN_08BIT, 0x64}, + {0xd4e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd4e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4e8, CRL_REG_LEN_08BIT, 0xbc}, + {0xd4e9, CRL_REG_LEN_08BIT, 0x03}, + {0xd4ea, CRL_REG_LEN_08BIT, 0x00}, + {0xd4eb, CRL_REG_LEN_08BIT, 0x00}, + {0xd4ec, CRL_REG_LEN_08BIT, 0x13}, + {0xd4ed, CRL_REG_LEN_08BIT, 0xff}, + {0xd4ee, CRL_REG_LEN_08BIT, 0xff}, + {0xd4ef, CRL_REG_LEN_08BIT, 0xfe}, + {0xd4f0, CRL_REG_LEN_08BIT, 0x15}, + {0xd4f1, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f2, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f3, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f4, CRL_REG_LEN_08BIT, 0x44}, + {0xd4f5, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f6, CRL_REG_LEN_08BIT, 0x48}, + {0xd4f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f8, CRL_REG_LEN_08BIT, 0x15}, + {0xd4f9, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fa, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fb, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fc, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fd, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd4ff, CRL_REG_LEN_08BIT, 0x00}, + {0xd500, CRL_REG_LEN_08BIT, 0x00}, + {0xd501, CRL_REG_LEN_08BIT, 0x00}, + {0xd502, CRL_REG_LEN_08BIT, 0x00}, + {0xd503, CRL_REG_LEN_08BIT, 0x00}, + {0x6f0e, CRL_REG_LEN_08BIT, 0x33}, + {0x6f0f, CRL_REG_LEN_08BIT, 0x33}, + {0x460e, CRL_REG_LEN_08BIT, 0x08}, + {0x460f, CRL_REG_LEN_08BIT, 0x01}, + {0x4610, CRL_REG_LEN_08BIT, 0x00}, + {0x4611, CRL_REG_LEN_08BIT, 0x01}, + {0x4612, CRL_REG_LEN_08BIT, 0x00}, + {0x4613, CRL_REG_LEN_08BIT, 0x01}, + {0x4605, CRL_REG_LEN_08BIT, 0x08}, + {0x4608, CRL_REG_LEN_08BIT, 0x00}, + {0x4609, CRL_REG_LEN_08BIT, 0x08}, + {0x6804, CRL_REG_LEN_08BIT, 0x00}, + {0x6805, CRL_REG_LEN_08BIT, 0x06}, + {0x6806, CRL_REG_LEN_08BIT, 0x00}, + {0x5120, CRL_REG_LEN_08BIT, 0x00}, + {0x3510, CRL_REG_LEN_08BIT, 0x00}, + {0x3504, CRL_REG_LEN_08BIT, 0x00}, + {0x6800, CRL_REG_LEN_08BIT, 0x00}, + {0x6f0d, CRL_REG_LEN_08BIT, 0x0f}, + {0x5000, CRL_REG_LEN_08BIT, 0xff}, + {0x5001, CRL_REG_LEN_08BIT, 0xbf}, + {0x5002, CRL_REG_LEN_08BIT, 0x7e}, + {0x5003, CRL_REG_LEN_08BIT, 0x0c}, + {0x503d, CRL_REG_LEN_08BIT, 0x00}, + {0xc450, CRL_REG_LEN_08BIT, 0x01}, + {0xc452, CRL_REG_LEN_08BIT, 0x04}, + {0xc453, CRL_REG_LEN_08BIT, 0x00}, + {0xc454, CRL_REG_LEN_08BIT, 0x01}, + {0xc455, CRL_REG_LEN_08BIT, 0x00}, + {0xc456, CRL_REG_LEN_08BIT, 0x00}, + {0xc457, CRL_REG_LEN_08BIT, 0x00}, + {0xc458, CRL_REG_LEN_08BIT, 0x00}, + {0xc459, CRL_REG_LEN_08BIT, 0x00}, + {0xc45b, CRL_REG_LEN_08BIT, 0x00}, + {0xc45c, CRL_REG_LEN_08BIT, 0x00}, + {0xc45d, CRL_REG_LEN_08BIT, 0x00}, + {0xc45e, CRL_REG_LEN_08BIT, 0x02}, + {0xc45f, CRL_REG_LEN_08BIT, 0x01}, + {0xc460, CRL_REG_LEN_08BIT, 0x01}, + {0xc461, CRL_REG_LEN_08BIT, 0x01}, + {0xc462, CRL_REG_LEN_08BIT, 0x01}, + {0xc464, CRL_REG_LEN_08BIT, 0x88}, + {0xc465, CRL_REG_LEN_08BIT, 0x00}, + {0xc466, CRL_REG_LEN_08BIT, 0x8a}, + {0xc467, CRL_REG_LEN_08BIT, 0x00}, + {0xc468, CRL_REG_LEN_08BIT, 0x86}, + {0xc469, CRL_REG_LEN_08BIT, 0x00}, + {0xc46a, CRL_REG_LEN_08BIT, 0x40}, + {0xc46b, CRL_REG_LEN_08BIT, 0x50}, + {0xc46c, CRL_REG_LEN_08BIT, 0x30}, + {0xc46d, CRL_REG_LEN_08BIT, 0x28}, + {0xc46e, CRL_REG_LEN_08BIT, 0x60}, + {0xc46f, CRL_REG_LEN_08BIT, 0x40}, + {0xc47c, CRL_REG_LEN_08BIT, 0x01}, + {0xc47d, CRL_REG_LEN_08BIT, 0x38}, + {0xc47e, CRL_REG_LEN_08BIT, 0x00}, + {0xc47f, CRL_REG_LEN_08BIT, 0x00}, + {0xc480, CRL_REG_LEN_08BIT, 0x00}, + {0xc481, CRL_REG_LEN_08BIT, 0xff}, + {0xc482, CRL_REG_LEN_08BIT, 0x00}, + {0xc483, CRL_REG_LEN_08BIT, 0x40}, + {0xc484, CRL_REG_LEN_08BIT, 0x00}, + {0xc485, CRL_REG_LEN_08BIT, 0x18}, + {0xc486, CRL_REG_LEN_08BIT, 0x00}, + {0xc487, CRL_REG_LEN_08BIT, 0x18}, + {0xc488, CRL_REG_LEN_08BIT, 0x34}, + {0xc489, CRL_REG_LEN_08BIT, 0x00}, + {0xc48a, CRL_REG_LEN_08BIT, 0x34}, + {0xc48b, CRL_REG_LEN_08BIT, 0x00}, + {0xc48c, CRL_REG_LEN_08BIT, 0x00}, + {0xc48d, CRL_REG_LEN_08BIT, 0x04}, + {0xc48e, CRL_REG_LEN_08BIT, 0x00}, + {0xc48f, CRL_REG_LEN_08BIT, 0x04}, + {0xc490, CRL_REG_LEN_08BIT, 0x07}, + {0xc492, CRL_REG_LEN_08BIT, 0x20}, + {0xc493, CRL_REG_LEN_08BIT, 0x08}, + {0xc498, CRL_REG_LEN_08BIT, 0x02}, + {0xc499, CRL_REG_LEN_08BIT, 0x00}, + {0xc49a, CRL_REG_LEN_08BIT, 0x02}, + {0xc49b, CRL_REG_LEN_08BIT, 0x00}, + {0xc49c, CRL_REG_LEN_08BIT, 0x02}, + {0xc49d, CRL_REG_LEN_08BIT, 0x00}, + {0xc49e, CRL_REG_LEN_08BIT, 0x02}, + {0xc49f, CRL_REG_LEN_08BIT, 0x60}, + {0xc4a0, CRL_REG_LEN_08BIT, 0x03}, + {0xc4a1, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a2, CRL_REG_LEN_08BIT, 0x04}, + {0xc4a3, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a4, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a5, CRL_REG_LEN_08BIT, 0x10}, + {0xc4a6, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a7, CRL_REG_LEN_08BIT, 0x40}, + {0xc4a8, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a9, CRL_REG_LEN_08BIT, 0x80}, + {0xc4aa, CRL_REG_LEN_08BIT, 0x0d}, + {0xc4ab, CRL_REG_LEN_08BIT, 0x00}, + {0xc4ac, CRL_REG_LEN_08BIT, 0x03}, + {0xc4ad, CRL_REG_LEN_08BIT, 0xf0}, + {0xc4b4, CRL_REG_LEN_08BIT, 0x01}, + {0xc4b5, CRL_REG_LEN_08BIT, 0x01}, + {0xc4b6, CRL_REG_LEN_08BIT, 0x00}, + {0xc4b7, CRL_REG_LEN_08BIT, 0x01}, + {0xc4b8, CRL_REG_LEN_08BIT, 0x00}, + {0xc4b9, CRL_REG_LEN_08BIT, 0x01}, + {0xc4ba, CRL_REG_LEN_08BIT, 0x01}, + {0xc4bb, CRL_REG_LEN_08BIT, 0x00}, + {0xc4bc, CRL_REG_LEN_08BIT, 0x01}, + {0xc4bd, CRL_REG_LEN_08BIT, 0x60}, + {0xc4be, CRL_REG_LEN_08BIT, 0x02}, + {0xc4bf, CRL_REG_LEN_08BIT, 0x33}, + {0xc4c8, CRL_REG_LEN_08BIT, 0x03}, + {0xc4c9, CRL_REG_LEN_08BIT, 0xd0}, + {0xc4ca, CRL_REG_LEN_08BIT, 0x0e}, + {0xc4cb, CRL_REG_LEN_08BIT, 0x00}, + {0xc4cc, CRL_REG_LEN_08BIT, 0x10}, + {0xc4cd, CRL_REG_LEN_08BIT, 0x18}, + {0xc4ce, CRL_REG_LEN_08BIT, 0x10}, + {0xc4cf, CRL_REG_LEN_08BIT, 0x18}, + {0xc4d0, CRL_REG_LEN_08BIT, 0x04}, + {0xc4d1, CRL_REG_LEN_08BIT, 0x80}, + {0xc4e0, CRL_REG_LEN_08BIT, 0x04}, + {0xc4e1, CRL_REG_LEN_08BIT, 0x02}, + {0xc4e2, CRL_REG_LEN_08BIT, 0x01}, + {0xc4e4, CRL_REG_LEN_08BIT, 0x10}, + {0xc4e5, CRL_REG_LEN_08BIT, 0x20}, + {0xc4e6, CRL_REG_LEN_08BIT, 0x30}, + {0xc4e7, CRL_REG_LEN_08BIT, 0x40}, + {0xc4e8, CRL_REG_LEN_08BIT, 0x50}, + {0xc4e9, CRL_REG_LEN_08BIT, 0x60}, + {0xc4ea, CRL_REG_LEN_08BIT, 0x70}, + {0xc4eb, CRL_REG_LEN_08BIT, 0x80}, + {0xc4ec, CRL_REG_LEN_08BIT, 0x90}, + {0xc4ed, CRL_REG_LEN_08BIT, 0xa0}, + {0xc4ee, CRL_REG_LEN_08BIT, 0xb0}, + {0xc4ef, CRL_REG_LEN_08BIT, 0xc0}, + {0xc4f0, CRL_REG_LEN_08BIT, 0xd0}, + {0xc4f1, CRL_REG_LEN_08BIT, 0xe0}, + {0xc4f2, CRL_REG_LEN_08BIT, 0xf0}, + {0xc4f3, CRL_REG_LEN_08BIT, 0x80}, + {0xc4f4, CRL_REG_LEN_08BIT, 0x00}, + {0xc4f5, CRL_REG_LEN_08BIT, 0x20}, + {0xc4f6, CRL_REG_LEN_08BIT, 0x02}, + {0xc4f7, CRL_REG_LEN_08BIT, 0x00}, + {0xc4f8, CRL_REG_LEN_08BIT, 0x04}, + {0xc4f9, CRL_REG_LEN_08BIT, 0x0b}, + {0xc4fa, CRL_REG_LEN_08BIT, 0x00}, + {0xc4fb, CRL_REG_LEN_08BIT, 0x00}, + {0xc4fc, CRL_REG_LEN_08BIT, 0x01}, + {0xc4fd, CRL_REG_LEN_08BIT, 0x00}, + {0xc4fe, CRL_REG_LEN_08BIT, 0x04}, + {0xc4ff, CRL_REG_LEN_08BIT, 0x02}, + {0xc500, CRL_REG_LEN_08BIT, 0x48}, + {0xc501, CRL_REG_LEN_08BIT, 0x74}, + {0xc502, CRL_REG_LEN_08BIT, 0x58}, + {0xc503, CRL_REG_LEN_08BIT, 0x80}, + {0xc504, CRL_REG_LEN_08BIT, 0x05}, + {0xc505, CRL_REG_LEN_08BIT, 0x80}, + {0xc506, CRL_REG_LEN_08BIT, 0x03}, + {0xc507, CRL_REG_LEN_08BIT, 0x80}, + {0xc508, CRL_REG_LEN_08BIT, 0x01}, + {0xc509, CRL_REG_LEN_08BIT, 0xc0}, + {0xc50a, CRL_REG_LEN_08BIT, 0x01}, + {0xc50b, CRL_REG_LEN_08BIT, 0xa0}, + {0xc50c, CRL_REG_LEN_08BIT, 0x01}, + {0xc50d, CRL_REG_LEN_08BIT, 0x2c}, + {0xc50e, CRL_REG_LEN_08BIT, 0x01}, + {0xc50f, CRL_REG_LEN_08BIT, 0x0a}, + {0xc510, CRL_REG_LEN_08BIT, 0x00}, + {0xc511, CRL_REG_LEN_08BIT, 0x01}, + {0xc512, CRL_REG_LEN_08BIT, 0x01}, + {0xc513, CRL_REG_LEN_08BIT, 0x80}, + {0xc514, CRL_REG_LEN_08BIT, 0x04}, + {0xc515, CRL_REG_LEN_08BIT, 0x00}, + {0xc518, CRL_REG_LEN_08BIT, 0x03}, + {0xc519, CRL_REG_LEN_08BIT, 0x48}, + {0xc51a, CRL_REG_LEN_08BIT, 0x07}, + {0xc51b, CRL_REG_LEN_08BIT, 0x70}, + {0xc2e0, CRL_REG_LEN_08BIT, 0x00}, + {0xc2e1, CRL_REG_LEN_08BIT, 0x51}, + {0xc2e2, CRL_REG_LEN_08BIT, 0x00}, + {0xc2e3, CRL_REG_LEN_08BIT, 0xd6}, + {0xc2e4, CRL_REG_LEN_08BIT, 0x01}, + {0xc2e5, CRL_REG_LEN_08BIT, 0x5e}, + {0xc2e9, CRL_REG_LEN_08BIT, 0x01}, + {0xc2ea, CRL_REG_LEN_08BIT, 0x7a}, + {0xc2eb, CRL_REG_LEN_08BIT, 0x90}, + {0xc2ed, CRL_REG_LEN_08BIT, 0x00}, + {0xc2ee, CRL_REG_LEN_08BIT, 0x7a}, + {0xc2ef, CRL_REG_LEN_08BIT, 0x64}, + {0xc308, CRL_REG_LEN_08BIT, 0x00}, + {0xc309, CRL_REG_LEN_08BIT, 0x00}, + {0xc30a, CRL_REG_LEN_08BIT, 0x00}, + {0xc30c, CRL_REG_LEN_08BIT, 0x00}, + {0xc30d, CRL_REG_LEN_08BIT, 0x01}, + {0xc30e, CRL_REG_LEN_08BIT, 0x00}, + {0xc30f, CRL_REG_LEN_08BIT, 0x00}, + {0xc310, CRL_REG_LEN_08BIT, 0x01}, + {0xc311, CRL_REG_LEN_08BIT, 0x60}, + {0xc312, CRL_REG_LEN_08BIT, 0xff}, + {0xc313, CRL_REG_LEN_08BIT, 0x08}, + {0xc314, CRL_REG_LEN_08BIT, 0x01}, + {0xc315, CRL_REG_LEN_08BIT, 0x7f}, + {0xc316, CRL_REG_LEN_08BIT, 0xff}, + {0xc317, CRL_REG_LEN_08BIT, 0x0b}, + {0xc318, CRL_REG_LEN_08BIT, 0x00}, + {0xc319, CRL_REG_LEN_08BIT, 0x0c}, + {0xc31a, CRL_REG_LEN_08BIT, 0x00}, + {0xc31b, CRL_REG_LEN_08BIT, 0xe0}, + {0xc31c, CRL_REG_LEN_08BIT, 0x00}, + {0xc31d, CRL_REG_LEN_08BIT, 0x14}, + {0xc31e, CRL_REG_LEN_08BIT, 0x00}, + {0xc31f, CRL_REG_LEN_08BIT, 0xc5}, + {0xc320, CRL_REG_LEN_08BIT, 0xff}, + {0xc321, CRL_REG_LEN_08BIT, 0x4b}, + {0xc322, CRL_REG_LEN_08BIT, 0xff}, + {0xc323, CRL_REG_LEN_08BIT, 0xf0}, + {0xc324, CRL_REG_LEN_08BIT, 0xff}, + {0xc325, CRL_REG_LEN_08BIT, 0xe8}, + {0xc326, CRL_REG_LEN_08BIT, 0x00}, + {0xc327, CRL_REG_LEN_08BIT, 0x46}, + {0xc328, CRL_REG_LEN_08BIT, 0xff}, + {0xc329, CRL_REG_LEN_08BIT, 0xd2}, + {0xc32a, CRL_REG_LEN_08BIT, 0xff}, + {0xc32b, CRL_REG_LEN_08BIT, 0xe4}, + {0xc32c, CRL_REG_LEN_08BIT, 0xff}, + {0xc32d, CRL_REG_LEN_08BIT, 0xbb}, + {0xc32e, CRL_REG_LEN_08BIT, 0x00}, + {0xc32f, CRL_REG_LEN_08BIT, 0x61}, + {0xc330, CRL_REG_LEN_08BIT, 0xff}, + {0xc331, CRL_REG_LEN_08BIT, 0xf9}, + {0xc332, CRL_REG_LEN_08BIT, 0x00}, + {0xc333, CRL_REG_LEN_08BIT, 0xd9}, + {0xc334, CRL_REG_LEN_08BIT, 0x00}, + {0xc335, CRL_REG_LEN_08BIT, 0x2e}, + {0xc336, CRL_REG_LEN_08BIT, 0x00}, + {0xc337, CRL_REG_LEN_08BIT, 0xb1}, + {0xc338, CRL_REG_LEN_08BIT, 0xff}, + {0xc339, CRL_REG_LEN_08BIT, 0x64}, + {0xc33a, CRL_REG_LEN_08BIT, 0xff}, + {0xc33b, CRL_REG_LEN_08BIT, 0xeb}, + {0xc33c, CRL_REG_LEN_08BIT, 0xff}, + {0xc33d, CRL_REG_LEN_08BIT, 0xe8}, + {0xc33e, CRL_REG_LEN_08BIT, 0x00}, + {0xc33f, CRL_REG_LEN_08BIT, 0x48}, + {0xc340, CRL_REG_LEN_08BIT, 0xff}, + {0xc341, CRL_REG_LEN_08BIT, 0xd0}, + {0xc342, CRL_REG_LEN_08BIT, 0xff}, + {0xc343, CRL_REG_LEN_08BIT, 0xed}, + {0xc344, CRL_REG_LEN_08BIT, 0xff}, + {0xc345, CRL_REG_LEN_08BIT, 0xad}, + {0xc346, CRL_REG_LEN_08BIT, 0x00}, + {0xc347, CRL_REG_LEN_08BIT, 0x66}, + {0xc348, CRL_REG_LEN_08BIT, 0x01}, + {0xc349, CRL_REG_LEN_08BIT, 0x00}, + {0x6700, CRL_REG_LEN_08BIT, 0x04}, + {0x6701, CRL_REG_LEN_08BIT, 0x7b}, + {0x6702, CRL_REG_LEN_08BIT, 0xfd}, + {0x6703, CRL_REG_LEN_08BIT, 0xf9}, + {0x6704, CRL_REG_LEN_08BIT, 0x3d}, + {0x6705, CRL_REG_LEN_08BIT, 0x71}, + {0x6706, CRL_REG_LEN_08BIT, 0x78}, + {0x6708, CRL_REG_LEN_08BIT, 0x05}, + {0x6f06, CRL_REG_LEN_08BIT, 0x6f}, + {0x6f07, CRL_REG_LEN_08BIT, 0x00}, + {0x6f0a, CRL_REG_LEN_08BIT, 0x6f}, + {0x6f0b, CRL_REG_LEN_08BIT, 0x00}, + {0x6f00, CRL_REG_LEN_08BIT, 0x03}, + {0xc34c, CRL_REG_LEN_08BIT, 0x01}, + {0xc34d, CRL_REG_LEN_08BIT, 0x00}, + {0xc34e, CRL_REG_LEN_08BIT, 0x46}, + {0xc34f, CRL_REG_LEN_08BIT, 0x55}, + {0xc350, CRL_REG_LEN_08BIT, 0x00}, + {0xc351, CRL_REG_LEN_08BIT, 0x40}, + {0xc352, CRL_REG_LEN_08BIT, 0x00}, + {0xc353, CRL_REG_LEN_08BIT, 0xff}, + {0xc354, CRL_REG_LEN_08BIT, 0x04}, + {0xc355, CRL_REG_LEN_08BIT, 0x08}, + {0xc356, CRL_REG_LEN_08BIT, 0x01}, + {0xc357, CRL_REG_LEN_08BIT, 0xef}, + {0xc358, CRL_REG_LEN_08BIT, 0x30}, + {0xc359, CRL_REG_LEN_08BIT, 0x01}, + {0xc35a, CRL_REG_LEN_08BIT, 0x64}, + {0xc35b, CRL_REG_LEN_08BIT, 0x46}, + {0xc35c, CRL_REG_LEN_08BIT, 0x00}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x302e, CRL_REG_LEN_08BIT, 0x00}, + {0x301b, CRL_REG_LEN_08BIT, 0xf0}, + {0x301c, CRL_REG_LEN_08BIT, 0xf0}, + {0x301a, CRL_REG_LEN_08BIT, 0xf0}, + {0xceb0, CRL_REG_LEN_08BIT, 0x00}, + {0xceb1, CRL_REG_LEN_08BIT, 0x00}, + {0xceb2, CRL_REG_LEN_08BIT, 0x00}, + {0xceb3, CRL_REG_LEN_08BIT, 0x00}, + {0xceb4, CRL_REG_LEN_08BIT, 0x00}, + {0xceb5, CRL_REG_LEN_08BIT, 0x00}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0xceb6, CRL_REG_LEN_08BIT, 0x00}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0xceb7, CRL_REG_LEN_08BIT, 0x00}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0xc4bc, CRL_REG_LEN_08BIT, 0x01}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0xc4bd, CRL_REG_LEN_08BIT, 0x60}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, +}; + +static struct crl_register_write_rep ov10635_1280_720_YUV_HDR_BT656[] = { + {0x0103, CRL_REG_LEN_08BIT, 0x01}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x301b, CRL_REG_LEN_08BIT, 0xff}, + {0x301c, CRL_REG_LEN_08BIT, 0xff}, + {0x301a, CRL_REG_LEN_08BIT, 0xff}, + {0x3011, CRL_REG_LEN_08BIT, 0x42}, + {0x6900, CRL_REG_LEN_08BIT, 0x0c}, + {0x6901, CRL_REG_LEN_08BIT, 0x11}, + {0x3503, CRL_REG_LEN_08BIT, 0x10}, + {0x3025, CRL_REG_LEN_08BIT, 0x03}, + {0x3003, CRL_REG_LEN_08BIT, 0x14}, + {0x3004, CRL_REG_LEN_08BIT, 0x11}, + {0x3005, CRL_REG_LEN_08BIT, 0x20}, + {0x3006, CRL_REG_LEN_08BIT, 0x91}, + {0x3600, CRL_REG_LEN_08BIT, 0x74}, + {0x3601, CRL_REG_LEN_08BIT, 0x2b}, + {0x3612, CRL_REG_LEN_08BIT, 0x00}, + {0x3611, CRL_REG_LEN_08BIT, 0x67}, + {0x3633, CRL_REG_LEN_08BIT, 0xca}, + {0x3602, CRL_REG_LEN_08BIT, 0x2f}, + {0x3603, CRL_REG_LEN_08BIT, 0x00}, + {0x3630, CRL_REG_LEN_08BIT, 0x28}, + {0x3631, CRL_REG_LEN_08BIT, 0x16}, + {0x3714, CRL_REG_LEN_08BIT, 0x10}, + {0x371d, CRL_REG_LEN_08BIT, 0x01}, + {0x4300, CRL_REG_LEN_08BIT, 0x3a}, + {0x3007, CRL_REG_LEN_08BIT, 0x01}, + {0x3024, CRL_REG_LEN_08BIT, 0x01}, + {0x3020, CRL_REG_LEN_08BIT, 0x0b}, + {0x3702, CRL_REG_LEN_08BIT, 0x1a}, + {0x3703, CRL_REG_LEN_08BIT, 0x40}, + {0x3704, CRL_REG_LEN_08BIT, 0x2a}, + {0x3709, CRL_REG_LEN_08BIT, 0xa8}, + {0x3709, CRL_REG_LEN_08BIT, 0xa8}, + {0x370c, CRL_REG_LEN_08BIT, 0xc7}, + {0x370d, CRL_REG_LEN_08BIT, 0x80}, + {0x3712, CRL_REG_LEN_08BIT, 0x00}, + {0x3713, CRL_REG_LEN_08BIT, 0x20}, + {0x3715, CRL_REG_LEN_08BIT, 0x04}, + {0x381d, CRL_REG_LEN_08BIT, 0x40}, + {0x381c, CRL_REG_LEN_08BIT, 0x00}, + {0x3822, CRL_REG_LEN_08BIT, 0x50}, + {0x3824, CRL_REG_LEN_08BIT, 0x50}, + {0x3815, CRL_REG_LEN_08BIT, 0x8c}, + {0x3804, CRL_REG_LEN_08BIT, 0x05}, + {0x3805, CRL_REG_LEN_08BIT, 0x1f}, + {0x3800, CRL_REG_LEN_08BIT, 0x00}, + {0x3801, CRL_REG_LEN_08BIT, 0x00}, + {0x3806, CRL_REG_LEN_08BIT, 0x02}, + {0x3807, CRL_REG_LEN_08BIT, 0xfd}, + {0x3802, CRL_REG_LEN_08BIT, 0x00}, + {0x3803, CRL_REG_LEN_08BIT, 0x2c}, + {0x3808, CRL_REG_LEN_08BIT, 0x05}, + {0x3809, CRL_REG_LEN_08BIT, 0x00}, + {0x380a, CRL_REG_LEN_08BIT, 0x02}, + {0x380b, CRL_REG_LEN_08BIT, 0xd0}, + {0x380c, CRL_REG_LEN_08BIT, 0x06}, + {0x380d, CRL_REG_LEN_08BIT, 0xf6}, + {0x6e42, CRL_REG_LEN_08BIT, 0x02}, + {0x6e43, CRL_REG_LEN_08BIT, 0xec}, + {0x380e, CRL_REG_LEN_08BIT, 0x02}, + {0x380f, CRL_REG_LEN_08BIT, 0xec}, + {0x3813, CRL_REG_LEN_08BIT, 0x02}, + {0x3811, CRL_REG_LEN_08BIT, 0x10}, + {0x381f, CRL_REG_LEN_08BIT, 0x0c}, + {0x3828, CRL_REG_LEN_08BIT, 0x03}, + {0x3829, CRL_REG_LEN_08BIT, 0x10}, + {0x382a, CRL_REG_LEN_08BIT, 0x10}, + {0x382b, CRL_REG_LEN_08BIT, 0x10}, + {0x3621, CRL_REG_LEN_08BIT, 0x64}, + {0x5005, CRL_REG_LEN_08BIT, 0x08}, + {0x56d5, CRL_REG_LEN_08BIT, 0x00}, + {0x56d6, CRL_REG_LEN_08BIT, 0x80}, + {0x56d7, CRL_REG_LEN_08BIT, 0x00}, + {0x56d8, CRL_REG_LEN_08BIT, 0x00}, + {0x56d9, CRL_REG_LEN_08BIT, 0x00}, + {0x56da, CRL_REG_LEN_08BIT, 0x80}, + {0x56db, CRL_REG_LEN_08BIT, 0x00}, + {0x56dc, CRL_REG_LEN_08BIT, 0x00}, + {0x56e8, CRL_REG_LEN_08BIT, 0x00}, + {0x56e9, CRL_REG_LEN_08BIT, 0x7f}, + {0x56ea, CRL_REG_LEN_08BIT, 0x00}, + {0x56eb, CRL_REG_LEN_08BIT, 0x7f}, + {0x5100, CRL_REG_LEN_08BIT, 0x00}, + {0x5101, CRL_REG_LEN_08BIT, 0x80}, + {0x5102, CRL_REG_LEN_08BIT, 0x00}, + {0x5103, CRL_REG_LEN_08BIT, 0x80}, + {0x5104, CRL_REG_LEN_08BIT, 0x00}, + {0x5105, CRL_REG_LEN_08BIT, 0x80}, + {0x5106, CRL_REG_LEN_08BIT, 0x00}, + {0x5107, CRL_REG_LEN_08BIT, 0x80}, + {0x5108, CRL_REG_LEN_08BIT, 0x00}, + {0x5109, CRL_REG_LEN_08BIT, 0x00}, + {0x510a, CRL_REG_LEN_08BIT, 0x00}, + {0x510b, CRL_REG_LEN_08BIT, 0x00}, + {0x510c, CRL_REG_LEN_08BIT, 0x00}, + {0x510d, CRL_REG_LEN_08BIT, 0x00}, + {0x510e, CRL_REG_LEN_08BIT, 0x00}, + {0x510f, CRL_REG_LEN_08BIT, 0x00}, + {0x5110, CRL_REG_LEN_08BIT, 0x00}, + {0x5111, CRL_REG_LEN_08BIT, 0x80}, + {0x5112, CRL_REG_LEN_08BIT, 0x00}, + {0x5113, CRL_REG_LEN_08BIT, 0x80}, + {0x5114, CRL_REG_LEN_08BIT, 0x00}, + {0x5115, CRL_REG_LEN_08BIT, 0x80}, + {0x5116, CRL_REG_LEN_08BIT, 0x00}, + {0x5117, CRL_REG_LEN_08BIT, 0x80}, + {0x5118, CRL_REG_LEN_08BIT, 0x00}, + {0x5119, CRL_REG_LEN_08BIT, 0x00}, + {0x511a, CRL_REG_LEN_08BIT, 0x00}, + {0x511b, CRL_REG_LEN_08BIT, 0x00}, + {0x511c, CRL_REG_LEN_08BIT, 0x00}, + {0x511d, CRL_REG_LEN_08BIT, 0x00}, + {0x511e, CRL_REG_LEN_08BIT, 0x00}, + {0x511f, CRL_REG_LEN_08BIT, 0x00}, + {0x56d0, CRL_REG_LEN_08BIT, 0x00}, + {0x5006, CRL_REG_LEN_08BIT, 0x24}, + {0x5608, CRL_REG_LEN_08BIT, 0x0e}, + {0x52d7, CRL_REG_LEN_08BIT, 0x06}, + {0x528d, CRL_REG_LEN_08BIT, 0x08}, + {0x5293, CRL_REG_LEN_08BIT, 0x12}, + {0x52d3, CRL_REG_LEN_08BIT, 0x12}, + {0x5288, CRL_REG_LEN_08BIT, 0x06}, + {0x5289, CRL_REG_LEN_08BIT, 0x20}, + {0x52c8, CRL_REG_LEN_08BIT, 0x06}, + {0x52c9, CRL_REG_LEN_08BIT, 0x20}, + {0x52cd, CRL_REG_LEN_08BIT, 0x04}, + {0x5381, CRL_REG_LEN_08BIT, 0x00}, + {0x5382, CRL_REG_LEN_08BIT, 0xff}, + {0x5589, CRL_REG_LEN_08BIT, 0x76}, + {0x558a, CRL_REG_LEN_08BIT, 0x47}, + {0x558b, CRL_REG_LEN_08BIT, 0xef}, + {0x558c, CRL_REG_LEN_08BIT, 0xc9}, + {0x558d, CRL_REG_LEN_08BIT, 0x49}, + {0x558e, CRL_REG_LEN_08BIT, 0x30}, + {0x558f, CRL_REG_LEN_08BIT, 0x67}, + {0x5590, CRL_REG_LEN_08BIT, 0x3f}, + {0x5591, CRL_REG_LEN_08BIT, 0xf0}, + {0x5592, CRL_REG_LEN_08BIT, 0x10}, + {0x55a2, CRL_REG_LEN_08BIT, 0x6d}, + {0x55a3, CRL_REG_LEN_08BIT, 0x55}, + {0x55a4, CRL_REG_LEN_08BIT, 0xc3}, + {0x55a5, CRL_REG_LEN_08BIT, 0xb5}, + {0x55a6, CRL_REG_LEN_08BIT, 0x43}, + {0x55a7, CRL_REG_LEN_08BIT, 0x38}, + {0x55a8, CRL_REG_LEN_08BIT, 0x5f}, + {0x55a9, CRL_REG_LEN_08BIT, 0x4b}, + {0x55aa, CRL_REG_LEN_08BIT, 0xf0}, + {0x55ab, CRL_REG_LEN_08BIT, 0x10}, + {0x5581, CRL_REG_LEN_08BIT, 0x52}, + {0x5300, CRL_REG_LEN_08BIT, 0x01}, + {0x5301, CRL_REG_LEN_08BIT, 0x00}, + {0x5302, CRL_REG_LEN_08BIT, 0x00}, + {0x5303, CRL_REG_LEN_08BIT, 0x0e}, + {0x5304, CRL_REG_LEN_08BIT, 0x00}, + {0x5305, CRL_REG_LEN_08BIT, 0x0e}, + {0x5306, CRL_REG_LEN_08BIT, 0x00}, + {0x5307, CRL_REG_LEN_08BIT, 0x36}, + {0x5308, CRL_REG_LEN_08BIT, 0x00}, + {0x5309, CRL_REG_LEN_08BIT, 0xd9}, + {0x530a, CRL_REG_LEN_08BIT, 0x00}, + {0x530b, CRL_REG_LEN_08BIT, 0x0f}, + {0x530c, CRL_REG_LEN_08BIT, 0x00}, + {0x530d, CRL_REG_LEN_08BIT, 0x2c}, + {0x530e, CRL_REG_LEN_08BIT, 0x00}, + {0x530f, CRL_REG_LEN_08BIT, 0x59}, + {0x5310, CRL_REG_LEN_08BIT, 0x00}, + {0x5311, CRL_REG_LEN_08BIT, 0x7b}, + {0x5312, CRL_REG_LEN_08BIT, 0x00}, + {0x5313, CRL_REG_LEN_08BIT, 0x22}, + {0x5314, CRL_REG_LEN_08BIT, 0x00}, + {0x5315, CRL_REG_LEN_08BIT, 0xd5}, + {0x5316, CRL_REG_LEN_08BIT, 0x00}, + {0x5317, CRL_REG_LEN_08BIT, 0x13}, + {0x5318, CRL_REG_LEN_08BIT, 0x00}, + {0x5319, CRL_REG_LEN_08BIT, 0x18}, + {0x531a, CRL_REG_LEN_08BIT, 0x00}, + {0x531b, CRL_REG_LEN_08BIT, 0x26}, + {0x531c, CRL_REG_LEN_08BIT, 0x00}, + {0x531d, CRL_REG_LEN_08BIT, 0xdc}, + {0x531e, CRL_REG_LEN_08BIT, 0x00}, + {0x531f, CRL_REG_LEN_08BIT, 0x02}, + {0x5320, CRL_REG_LEN_08BIT, 0x00}, + {0x5321, CRL_REG_LEN_08BIT, 0x24}, + {0x5322, CRL_REG_LEN_08BIT, 0x00}, + {0x5323, CRL_REG_LEN_08BIT, 0x56}, + {0x5324, CRL_REG_LEN_08BIT, 0x00}, + {0x5325, CRL_REG_LEN_08BIT, 0x85}, + {0x5326, CRL_REG_LEN_08BIT, 0x00}, + {0x5327, CRL_REG_LEN_08BIT, 0x20}, + {0x5609, CRL_REG_LEN_08BIT, 0x01}, + {0x560a, CRL_REG_LEN_08BIT, 0x40}, + {0x560b, CRL_REG_LEN_08BIT, 0x01}, + {0x560c, CRL_REG_LEN_08BIT, 0x40}, + {0x560d, CRL_REG_LEN_08BIT, 0x00}, + {0x560e, CRL_REG_LEN_08BIT, 0xfa}, + {0x560f, CRL_REG_LEN_08BIT, 0x00}, + {0x5610, CRL_REG_LEN_08BIT, 0xfa}, + {0x5611, CRL_REG_LEN_08BIT, 0x02}, + {0x5612, CRL_REG_LEN_08BIT, 0x80}, + {0x5613, CRL_REG_LEN_08BIT, 0x02}, + {0x5614, CRL_REG_LEN_08BIT, 0x80}, + {0x5615, CRL_REG_LEN_08BIT, 0x01}, + {0x5616, CRL_REG_LEN_08BIT, 0x2c}, + {0x5617, CRL_REG_LEN_08BIT, 0x01}, + {0x5618, CRL_REG_LEN_08BIT, 0x2c}, + {0x563b, CRL_REG_LEN_08BIT, 0x01}, + {0x563c, CRL_REG_LEN_08BIT, 0x01}, + {0x563d, CRL_REG_LEN_08BIT, 0x01}, + {0x563e, CRL_REG_LEN_08BIT, 0x01}, + {0x563f, CRL_REG_LEN_08BIT, 0x03}, + {0x5640, CRL_REG_LEN_08BIT, 0x03}, + {0x5641, CRL_REG_LEN_08BIT, 0x03}, + {0x5642, CRL_REG_LEN_08BIT, 0x05}, + {0x5643, CRL_REG_LEN_08BIT, 0x09}, + {0x5644, CRL_REG_LEN_08BIT, 0x05}, + {0x5645, CRL_REG_LEN_08BIT, 0x05}, + {0x5646, CRL_REG_LEN_08BIT, 0x05}, + {0x5647, CRL_REG_LEN_08BIT, 0x05}, + {0x5651, CRL_REG_LEN_08BIT, 0x00}, + {0x5652, CRL_REG_LEN_08BIT, 0x80}, + {0x521a, CRL_REG_LEN_08BIT, 0x01}, + {0x521b, CRL_REG_LEN_08BIT, 0x03}, + {0x521c, CRL_REG_LEN_08BIT, 0x06}, + {0x521d, CRL_REG_LEN_08BIT, 0x0a}, + {0x521e, CRL_REG_LEN_08BIT, 0x0e}, + {0x521f, CRL_REG_LEN_08BIT, 0x12}, + {0x5220, CRL_REG_LEN_08BIT, 0x16}, + {0x5223, CRL_REG_LEN_08BIT, 0x02}, + {0x5225, CRL_REG_LEN_08BIT, 0x04}, + {0x5227, CRL_REG_LEN_08BIT, 0x08}, + {0x5229, CRL_REG_LEN_08BIT, 0x0c}, + {0x522b, CRL_REG_LEN_08BIT, 0x12}, + {0x522d, CRL_REG_LEN_08BIT, 0x18}, + {0x522f, CRL_REG_LEN_08BIT, 0x1e}, + {0x5241, CRL_REG_LEN_08BIT, 0x04}, + {0x5242, CRL_REG_LEN_08BIT, 0x01}, + {0x5243, CRL_REG_LEN_08BIT, 0x03}, + {0x5244, CRL_REG_LEN_08BIT, 0x06}, + {0x5245, CRL_REG_LEN_08BIT, 0x0a}, + {0x5246, CRL_REG_LEN_08BIT, 0x0e}, + {0x5247, CRL_REG_LEN_08BIT, 0x12}, + {0x5248, CRL_REG_LEN_08BIT, 0x16}, + {0x524a, CRL_REG_LEN_08BIT, 0x03}, + {0x524c, CRL_REG_LEN_08BIT, 0x04}, + {0x524e, CRL_REG_LEN_08BIT, 0x08}, + {0x5250, CRL_REG_LEN_08BIT, 0x0c}, + {0x5252, CRL_REG_LEN_08BIT, 0x12}, + {0x5254, CRL_REG_LEN_08BIT, 0x18}, + {0x5256, CRL_REG_LEN_08BIT, 0x1e}, + {0x4606, CRL_REG_LEN_08BIT, 0x07}, + {0x4607, CRL_REG_LEN_08BIT, 0x71}, + {0x460a, CRL_REG_LEN_08BIT, 0x03}, + {0x460b, CRL_REG_LEN_08BIT, 0xe7}, + {0x460c, CRL_REG_LEN_08BIT, 0x40}, + {0x4620, CRL_REG_LEN_08BIT, 0x0e}, + {0x4700, CRL_REG_LEN_08BIT, 0x06}, + {0x4701, CRL_REG_LEN_08BIT, 0x00}, + {0x4702, CRL_REG_LEN_08BIT, 0x01}, + {0x4004, CRL_REG_LEN_08BIT, 0x04}, + {0x4005, CRL_REG_LEN_08BIT, 0x18}, + {0x4001, CRL_REG_LEN_08BIT, 0x06}, + {0x4050, CRL_REG_LEN_08BIT, 0x22}, + {0x4051, CRL_REG_LEN_08BIT, 0x24}, + {0x4052, CRL_REG_LEN_08BIT, 0x02}, + {0x4057, CRL_REG_LEN_08BIT, 0x9c}, + {0x405a, CRL_REG_LEN_08BIT, 0x00}, + {0x4302, CRL_REG_LEN_08BIT, 0x03}, + {0x4303, CRL_REG_LEN_08BIT, 0xff}, + {0x4304, CRL_REG_LEN_08BIT, 0x00}, + {0x4305, CRL_REG_LEN_08BIT, 0x10}, + {0x4306, CRL_REG_LEN_08BIT, 0x03}, + {0x4307, CRL_REG_LEN_08BIT, 0xff}, + {0x4308, CRL_REG_LEN_08BIT, 0x00}, + {0x4309, CRL_REG_LEN_08BIT, 0x10}, + {0x4202, CRL_REG_LEN_08BIT, 0x02}, + {0x3023, CRL_REG_LEN_08BIT, 0x10}, + {0x0100, CRL_REG_LEN_08BIT, 0x01}, + {0x0100, CRL_REG_LEN_08BIT, 0x01}, + {0x6f10, CRL_REG_LEN_08BIT, 0x07}, + {0x6f11, CRL_REG_LEN_08BIT, 0x82}, + {0x6f12, CRL_REG_LEN_08BIT, 0x04}, + {0x6f13, CRL_REG_LEN_08BIT, 0x00}, + {0x6f14, CRL_REG_LEN_08BIT, 0x1f}, + {0x6f15, CRL_REG_LEN_08BIT, 0xdd}, + {0x6f16, CRL_REG_LEN_08BIT, 0x04}, + {0x6f17, CRL_REG_LEN_08BIT, 0x04}, + {0x6f18, CRL_REG_LEN_08BIT, 0x36}, + {0x6f19, CRL_REG_LEN_08BIT, 0x66}, + {0x6f1a, CRL_REG_LEN_08BIT, 0x04}, + {0x6f1b, CRL_REG_LEN_08BIT, 0x08}, + {0x6f1c, CRL_REG_LEN_08BIT, 0x0c}, + {0x6f1d, CRL_REG_LEN_08BIT, 0xe7}, + {0x6f1e, CRL_REG_LEN_08BIT, 0x04}, + {0x6f1f, CRL_REG_LEN_08BIT, 0x0c}, + {0xd000, CRL_REG_LEN_08BIT, 0x19}, + {0xd001, CRL_REG_LEN_08BIT, 0xa0}, + {0xd002, CRL_REG_LEN_08BIT, 0x00}, + {0xd003, CRL_REG_LEN_08BIT, 0x01}, + {0xd004, CRL_REG_LEN_08BIT, 0xa9}, + {0xd005, CRL_REG_LEN_08BIT, 0xad}, + {0xd006, CRL_REG_LEN_08BIT, 0x10}, + {0xd007, CRL_REG_LEN_08BIT, 0x40}, + {0xd008, CRL_REG_LEN_08BIT, 0x44}, + {0xd009, CRL_REG_LEN_08BIT, 0x00}, + {0xd00a, CRL_REG_LEN_08BIT, 0x68}, + {0xd00b, CRL_REG_LEN_08BIT, 0x00}, + {0xd00c, CRL_REG_LEN_08BIT, 0x15}, + {0xd00d, CRL_REG_LEN_08BIT, 0x00}, + {0xd00e, CRL_REG_LEN_08BIT, 0x00}, + {0xd00f, CRL_REG_LEN_08BIT, 0x00}, + {0xd010, CRL_REG_LEN_08BIT, 0x19}, + {0xd011, CRL_REG_LEN_08BIT, 0xa0}, + {0xd012, CRL_REG_LEN_08BIT, 0x00}, + {0xd013, CRL_REG_LEN_08BIT, 0x01}, + {0xd014, CRL_REG_LEN_08BIT, 0xa9}, + {0xd015, CRL_REG_LEN_08BIT, 0xad}, + {0xd016, CRL_REG_LEN_08BIT, 0x14}, + {0xd017, CRL_REG_LEN_08BIT, 0x40}, + {0xd018, CRL_REG_LEN_08BIT, 0x44}, + {0xd019, CRL_REG_LEN_08BIT, 0x00}, + {0xd01a, CRL_REG_LEN_08BIT, 0x68}, + {0xd01b, CRL_REG_LEN_08BIT, 0x00}, + {0xd01c, CRL_REG_LEN_08BIT, 0x15}, + {0xd01d, CRL_REG_LEN_08BIT, 0x00}, + {0xd01e, CRL_REG_LEN_08BIT, 0x00}, + {0xd01f, CRL_REG_LEN_08BIT, 0x00}, + {0xd020, CRL_REG_LEN_08BIT, 0x19}, + {0xd021, CRL_REG_LEN_08BIT, 0xa0}, + {0xd022, CRL_REG_LEN_08BIT, 0x00}, + {0xd023, CRL_REG_LEN_08BIT, 0x01}, + {0xd024, CRL_REG_LEN_08BIT, 0xa9}, + {0xd025, CRL_REG_LEN_08BIT, 0xad}, + {0xd026, CRL_REG_LEN_08BIT, 0x15}, + {0xd027, CRL_REG_LEN_08BIT, 0x28}, + {0xd028, CRL_REG_LEN_08BIT, 0x44}, + {0xd029, CRL_REG_LEN_08BIT, 0x00}, + {0xd02a, CRL_REG_LEN_08BIT, 0x68}, + {0xd02b, CRL_REG_LEN_08BIT, 0x00}, + {0xd02c, CRL_REG_LEN_08BIT, 0x15}, + {0xd02d, CRL_REG_LEN_08BIT, 0x00}, + {0xd02e, CRL_REG_LEN_08BIT, 0x00}, + {0xd02f, CRL_REG_LEN_08BIT, 0x00}, + {0xd030, CRL_REG_LEN_08BIT, 0x19}, + {0xd031, CRL_REG_LEN_08BIT, 0xa0}, + {0xd032, CRL_REG_LEN_08BIT, 0x00}, + {0xd033, CRL_REG_LEN_08BIT, 0x01}, + {0xd034, CRL_REG_LEN_08BIT, 0xa9}, + {0xd035, CRL_REG_LEN_08BIT, 0xad}, + {0xd036, CRL_REG_LEN_08BIT, 0x15}, + {0xd037, CRL_REG_LEN_08BIT, 0x4c}, + {0xd038, CRL_REG_LEN_08BIT, 0x44}, + {0xd039, CRL_REG_LEN_08BIT, 0x00}, + {0xd03a, CRL_REG_LEN_08BIT, 0x68}, + {0xd03b, CRL_REG_LEN_08BIT, 0x00}, + {0xd03c, CRL_REG_LEN_08BIT, 0x15}, + {0xd03d, CRL_REG_LEN_08BIT, 0x00}, + {0xd03e, CRL_REG_LEN_08BIT, 0x00}, + {0xd03f, CRL_REG_LEN_08BIT, 0x00}, + {0xd040, CRL_REG_LEN_08BIT, 0x9c}, + {0xd041, CRL_REG_LEN_08BIT, 0x21}, + {0xd042, CRL_REG_LEN_08BIT, 0xff}, + {0xd043, CRL_REG_LEN_08BIT, 0xe4}, + {0xd044, CRL_REG_LEN_08BIT, 0xd4}, + {0xd045, CRL_REG_LEN_08BIT, 0x01}, + {0xd046, CRL_REG_LEN_08BIT, 0x48}, + {0xd047, CRL_REG_LEN_08BIT, 0x00}, + {0xd048, CRL_REG_LEN_08BIT, 0xd4}, + {0xd049, CRL_REG_LEN_08BIT, 0x01}, + {0xd04a, CRL_REG_LEN_08BIT, 0x50}, + {0xd04b, CRL_REG_LEN_08BIT, 0x04}, + {0xd04c, CRL_REG_LEN_08BIT, 0xd4}, + {0xd04d, CRL_REG_LEN_08BIT, 0x01}, + {0xd04e, CRL_REG_LEN_08BIT, 0x60}, + {0xd04f, CRL_REG_LEN_08BIT, 0x08}, + {0xd050, CRL_REG_LEN_08BIT, 0xd4}, + {0xd051, CRL_REG_LEN_08BIT, 0x01}, + {0xd052, CRL_REG_LEN_08BIT, 0x70}, + {0xd053, CRL_REG_LEN_08BIT, 0x0c}, + {0xd054, CRL_REG_LEN_08BIT, 0xd4}, + {0xd055, CRL_REG_LEN_08BIT, 0x01}, + {0xd056, CRL_REG_LEN_08BIT, 0x80}, + {0xd057, CRL_REG_LEN_08BIT, 0x10}, + {0xd058, CRL_REG_LEN_08BIT, 0x19}, + {0xd059, CRL_REG_LEN_08BIT, 0xc0}, + {0xd05a, CRL_REG_LEN_08BIT, 0x00}, + {0xd05b, CRL_REG_LEN_08BIT, 0x01}, + {0xd05c, CRL_REG_LEN_08BIT, 0xa9}, + {0xd05d, CRL_REG_LEN_08BIT, 0xce}, + {0xd05e, CRL_REG_LEN_08BIT, 0x02}, + {0xd05f, CRL_REG_LEN_08BIT, 0xa4}, + {0xd060, CRL_REG_LEN_08BIT, 0x9c}, + {0xd061, CRL_REG_LEN_08BIT, 0xa0}, + {0xd062, CRL_REG_LEN_08BIT, 0x00}, + {0xd063, CRL_REG_LEN_08BIT, 0x00}, + {0xd064, CRL_REG_LEN_08BIT, 0x84}, + {0xd065, CRL_REG_LEN_08BIT, 0x6e}, + {0xd066, CRL_REG_LEN_08BIT, 0x00}, + {0xd067, CRL_REG_LEN_08BIT, 0x00}, + {0xd068, CRL_REG_LEN_08BIT, 0xd8}, + {0xd069, CRL_REG_LEN_08BIT, 0x03}, + {0xd06a, CRL_REG_LEN_08BIT, 0x28}, + {0xd06b, CRL_REG_LEN_08BIT, 0x76}, + {0xd06c, CRL_REG_LEN_08BIT, 0x1a}, + {0xd06d, CRL_REG_LEN_08BIT, 0x00}, + {0xd06e, CRL_REG_LEN_08BIT, 0x00}, + {0xd06f, CRL_REG_LEN_08BIT, 0x01}, + {0xd070, CRL_REG_LEN_08BIT, 0xaa}, + {0xd071, CRL_REG_LEN_08BIT, 0x10}, + {0xd072, CRL_REG_LEN_08BIT, 0x03}, + {0xd073, CRL_REG_LEN_08BIT, 0xf0}, + {0xd074, CRL_REG_LEN_08BIT, 0x18}, + {0xd075, CRL_REG_LEN_08BIT, 0x60}, + {0xd076, CRL_REG_LEN_08BIT, 0x00}, + {0xd077, CRL_REG_LEN_08BIT, 0x01}, + {0xd078, CRL_REG_LEN_08BIT, 0xa8}, + {0xd079, CRL_REG_LEN_08BIT, 0x63}, + {0xd07a, CRL_REG_LEN_08BIT, 0x07}, + {0xd07b, CRL_REG_LEN_08BIT, 0x80}, + {0xd07c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd07d, CRL_REG_LEN_08BIT, 0xa0}, + {0xd07e, CRL_REG_LEN_08BIT, 0x00}, + {0xd07f, CRL_REG_LEN_08BIT, 0x04}, + {0xd080, CRL_REG_LEN_08BIT, 0x18}, + {0xd081, CRL_REG_LEN_08BIT, 0xc0}, + {0xd082, CRL_REG_LEN_08BIT, 0x00}, + {0xd083, CRL_REG_LEN_08BIT, 0x00}, + {0xd084, CRL_REG_LEN_08BIT, 0xa8}, + {0xd085, CRL_REG_LEN_08BIT, 0xc6}, + {0xd086, CRL_REG_LEN_08BIT, 0x00}, + {0xd087, CRL_REG_LEN_08BIT, 0x00}, + {0xd088, CRL_REG_LEN_08BIT, 0x8c}, + {0xd089, CRL_REG_LEN_08BIT, 0x63}, + {0xd08a, CRL_REG_LEN_08BIT, 0x00}, + {0xd08b, CRL_REG_LEN_08BIT, 0x00}, + {0xd08c, CRL_REG_LEN_08BIT, 0xd4}, + {0xd08d, CRL_REG_LEN_08BIT, 0x01}, + {0xd08e, CRL_REG_LEN_08BIT, 0x28}, + {0xd08f, CRL_REG_LEN_08BIT, 0x14}, + {0xd090, CRL_REG_LEN_08BIT, 0xd4}, + {0xd091, CRL_REG_LEN_08BIT, 0x01}, + {0xd092, CRL_REG_LEN_08BIT, 0x30}, + {0xd093, CRL_REG_LEN_08BIT, 0x18}, + {0xd094, CRL_REG_LEN_08BIT, 0x07}, + {0xd095, CRL_REG_LEN_08BIT, 0xff}, + {0xd096, CRL_REG_LEN_08BIT, 0xf8}, + {0xd097, CRL_REG_LEN_08BIT, 0xfd}, + {0xd098, CRL_REG_LEN_08BIT, 0x9c}, + {0xd099, CRL_REG_LEN_08BIT, 0x80}, + {0xd09a, CRL_REG_LEN_08BIT, 0x00}, + {0xd09b, CRL_REG_LEN_08BIT, 0x03}, + {0xd09c, CRL_REG_LEN_08BIT, 0xa5}, + {0xd09d, CRL_REG_LEN_08BIT, 0x6b}, + {0xd09e, CRL_REG_LEN_08BIT, 0x00}, + {0xd09f, CRL_REG_LEN_08BIT, 0xff}, + {0xd0a0, CRL_REG_LEN_08BIT, 0x18}, + {0xd0a1, CRL_REG_LEN_08BIT, 0xc0}, + {0xd0a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd0a3, CRL_REG_LEN_08BIT, 0x01}, + {0xd0a4, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0a5, CRL_REG_LEN_08BIT, 0xc6}, + {0xd0a6, CRL_REG_LEN_08BIT, 0x01}, + {0xd0a7, CRL_REG_LEN_08BIT, 0x02}, + {0xd0a8, CRL_REG_LEN_08BIT, 0xe1}, + {0xd0a9, CRL_REG_LEN_08BIT, 0x6b}, + {0xd0aa, CRL_REG_LEN_08BIT, 0x58}, + {0xd0ab, CRL_REG_LEN_08BIT, 0x00}, + {0xd0ac, CRL_REG_LEN_08BIT, 0x84}, + {0xd0ad, CRL_REG_LEN_08BIT, 0x8e}, + {0xd0ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd0af, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b0, CRL_REG_LEN_08BIT, 0xe1}, + {0xd0b1, CRL_REG_LEN_08BIT, 0x6b}, + {0xd0b2, CRL_REG_LEN_08BIT, 0x30}, + {0xd0b3, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b4, CRL_REG_LEN_08BIT, 0x98}, + {0xd0b5, CRL_REG_LEN_08BIT, 0xb0}, + {0xd0b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b8, CRL_REG_LEN_08BIT, 0x8c}, + {0xd0b9, CRL_REG_LEN_08BIT, 0x64}, + {0xd0ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd0bb, CRL_REG_LEN_08BIT, 0x6e}, + {0xd0bc, CRL_REG_LEN_08BIT, 0xe5}, + {0xd0bd, CRL_REG_LEN_08BIT, 0xa5}, + {0xd0be, CRL_REG_LEN_08BIT, 0x18}, + {0xd0bf, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c0, CRL_REG_LEN_08BIT, 0x10}, + {0xd0c1, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c3, CRL_REG_LEN_08BIT, 0x06}, + {0xd0c4, CRL_REG_LEN_08BIT, 0x95}, + {0xd0c5, CRL_REG_LEN_08BIT, 0x8b}, + {0xd0c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c8, CRL_REG_LEN_08BIT, 0x94}, + {0xd0c9, CRL_REG_LEN_08BIT, 0xa4}, + {0xd0ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd0cb, CRL_REG_LEN_08BIT, 0x70}, + {0xd0cc, CRL_REG_LEN_08BIT, 0xe5}, + {0xd0cd, CRL_REG_LEN_08BIT, 0x65}, + {0xd0ce, CRL_REG_LEN_08BIT, 0x60}, + {0xd0cf, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d0, CRL_REG_LEN_08BIT, 0x0c}, + {0xd0d1, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d2, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d3, CRL_REG_LEN_08BIT, 0x62}, + {0xd0d4, CRL_REG_LEN_08BIT, 0x15}, + {0xd0d5, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d8, CRL_REG_LEN_08BIT, 0x18}, + {0xd0d9, CRL_REG_LEN_08BIT, 0x60}, + {0xd0da, CRL_REG_LEN_08BIT, 0x80}, + {0xd0db, CRL_REG_LEN_08BIT, 0x06}, + {0xd0dc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0dd, CRL_REG_LEN_08BIT, 0x83}, + {0xd0de, CRL_REG_LEN_08BIT, 0x38}, + {0xd0df, CRL_REG_LEN_08BIT, 0x29}, + {0xd0e0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0e1, CRL_REG_LEN_08BIT, 0xe3}, + {0xd0e2, CRL_REG_LEN_08BIT, 0x40}, + {0xd0e3, CRL_REG_LEN_08BIT, 0x08}, + {0xd0e4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd0e5, CRL_REG_LEN_08BIT, 0x84}, + {0xd0e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0e8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0e9, CRL_REG_LEN_08BIT, 0xa3}, + {0xd0ea, CRL_REG_LEN_08BIT, 0x40}, + {0xd0eb, CRL_REG_LEN_08BIT, 0x09}, + {0xd0ec, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0ed, CRL_REG_LEN_08BIT, 0xc3}, + {0xd0ee, CRL_REG_LEN_08BIT, 0x38}, + {0xd0ef, CRL_REG_LEN_08BIT, 0x2a}, + {0xd0f0, CRL_REG_LEN_08BIT, 0xd8}, + {0xd0f1, CRL_REG_LEN_08BIT, 0x07}, + {0xd0f2, CRL_REG_LEN_08BIT, 0x20}, + {0xd0f3, CRL_REG_LEN_08BIT, 0x00}, + {0xd0f4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd0f5, CRL_REG_LEN_08BIT, 0x66}, + {0xd0f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0f8, CRL_REG_LEN_08BIT, 0xd8}, + {0xd0f9, CRL_REG_LEN_08BIT, 0x05}, + {0xd0fa, CRL_REG_LEN_08BIT, 0x18}, + {0xd0fb, CRL_REG_LEN_08BIT, 0x00}, + {0xd0fc, CRL_REG_LEN_08BIT, 0x18}, + {0xd0fd, CRL_REG_LEN_08BIT, 0x60}, + {0xd0fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd0ff, CRL_REG_LEN_08BIT, 0x01}, + {0xd100, CRL_REG_LEN_08BIT, 0x98}, + {0xd101, CRL_REG_LEN_08BIT, 0x90}, + {0xd102, CRL_REG_LEN_08BIT, 0x00}, + {0xd103, CRL_REG_LEN_08BIT, 0x00}, + {0xd104, CRL_REG_LEN_08BIT, 0x84}, + {0xd105, CRL_REG_LEN_08BIT, 0xae}, + {0xd106, CRL_REG_LEN_08BIT, 0x00}, + {0xd107, CRL_REG_LEN_08BIT, 0x00}, + {0xd108, CRL_REG_LEN_08BIT, 0xa8}, + {0xd109, CRL_REG_LEN_08BIT, 0x63}, + {0xd10a, CRL_REG_LEN_08BIT, 0x06}, + {0xd10b, CRL_REG_LEN_08BIT, 0x4c}, + {0xd10c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd10d, CRL_REG_LEN_08BIT, 0xc0}, + {0xd10e, CRL_REG_LEN_08BIT, 0x00}, + {0xd10f, CRL_REG_LEN_08BIT, 0x00}, + {0xd110, CRL_REG_LEN_08BIT, 0xd8}, + {0xd111, CRL_REG_LEN_08BIT, 0x03}, + {0xd112, CRL_REG_LEN_08BIT, 0x30}, + {0xd113, CRL_REG_LEN_08BIT, 0x00}, + {0xd114, CRL_REG_LEN_08BIT, 0x8c}, + {0xd115, CRL_REG_LEN_08BIT, 0x65}, + {0xd116, CRL_REG_LEN_08BIT, 0x00}, + {0xd117, CRL_REG_LEN_08BIT, 0x6e}, + {0xd118, CRL_REG_LEN_08BIT, 0xe5}, + {0xd119, CRL_REG_LEN_08BIT, 0x84}, + {0xd11a, CRL_REG_LEN_08BIT, 0x18}, + {0xd11b, CRL_REG_LEN_08BIT, 0x00}, + {0xd11c, CRL_REG_LEN_08BIT, 0x10}, + {0xd11d, CRL_REG_LEN_08BIT, 0x00}, + {0xd11e, CRL_REG_LEN_08BIT, 0x00}, + {0xd11f, CRL_REG_LEN_08BIT, 0x07}, + {0xd120, CRL_REG_LEN_08BIT, 0x18}, + {0xd121, CRL_REG_LEN_08BIT, 0x80}, + {0xd122, CRL_REG_LEN_08BIT, 0x80}, + {0xd123, CRL_REG_LEN_08BIT, 0x06}, + {0xd124, CRL_REG_LEN_08BIT, 0x94}, + {0xd125, CRL_REG_LEN_08BIT, 0x65}, + {0xd126, CRL_REG_LEN_08BIT, 0x00}, + {0xd127, CRL_REG_LEN_08BIT, 0x70}, + {0xd128, CRL_REG_LEN_08BIT, 0xe5}, + {0xd129, CRL_REG_LEN_08BIT, 0x43}, + {0xd12a, CRL_REG_LEN_08BIT, 0x60}, + {0xd12b, CRL_REG_LEN_08BIT, 0x00}, + {0xd12c, CRL_REG_LEN_08BIT, 0x0c}, + {0xd12d, CRL_REG_LEN_08BIT, 0x00}, + {0xd12e, CRL_REG_LEN_08BIT, 0x00}, + {0xd12f, CRL_REG_LEN_08BIT, 0x3e}, + {0xd130, CRL_REG_LEN_08BIT, 0xa8}, + {0xd131, CRL_REG_LEN_08BIT, 0x64}, + {0xd132, CRL_REG_LEN_08BIT, 0x38}, + {0xd133, CRL_REG_LEN_08BIT, 0x24}, + {0xd134, CRL_REG_LEN_08BIT, 0x18}, + {0xd135, CRL_REG_LEN_08BIT, 0x80}, + {0xd136, CRL_REG_LEN_08BIT, 0x80}, + {0xd137, CRL_REG_LEN_08BIT, 0x06}, + {0xd138, CRL_REG_LEN_08BIT, 0xa8}, + {0xd139, CRL_REG_LEN_08BIT, 0x64}, + {0xd13a, CRL_REG_LEN_08BIT, 0x38}, + {0xd13b, CRL_REG_LEN_08BIT, 0x24}, + {0xd13c, CRL_REG_LEN_08BIT, 0x8c}, + {0xd13d, CRL_REG_LEN_08BIT, 0x63}, + {0xd13e, CRL_REG_LEN_08BIT, 0x00}, + {0xd13f, CRL_REG_LEN_08BIT, 0x00}, + {0xd140, CRL_REG_LEN_08BIT, 0xa4}, + {0xd141, CRL_REG_LEN_08BIT, 0x63}, + {0xd142, CRL_REG_LEN_08BIT, 0x00}, + {0xd143, CRL_REG_LEN_08BIT, 0x40}, + {0xd144, CRL_REG_LEN_08BIT, 0xbc}, + {0xd145, CRL_REG_LEN_08BIT, 0x23}, + {0xd146, CRL_REG_LEN_08BIT, 0x00}, + {0xd147, CRL_REG_LEN_08BIT, 0x00}, + {0xd148, CRL_REG_LEN_08BIT, 0x0c}, + {0xd149, CRL_REG_LEN_08BIT, 0x00}, + {0xd14a, CRL_REG_LEN_08BIT, 0x00}, + {0xd14b, CRL_REG_LEN_08BIT, 0x2a}, + {0xd14c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd14d, CRL_REG_LEN_08BIT, 0x64}, + {0xd14e, CRL_REG_LEN_08BIT, 0x6e}, + {0xd14f, CRL_REG_LEN_08BIT, 0x44}, + {0xd150, CRL_REG_LEN_08BIT, 0x19}, + {0xd151, CRL_REG_LEN_08BIT, 0x00}, + {0xd152, CRL_REG_LEN_08BIT, 0x80}, + {0xd153, CRL_REG_LEN_08BIT, 0x06}, + {0xd154, CRL_REG_LEN_08BIT, 0xa8}, + {0xd155, CRL_REG_LEN_08BIT, 0xe8}, + {0xd156, CRL_REG_LEN_08BIT, 0x3d}, + {0xd157, CRL_REG_LEN_08BIT, 0x05}, + {0xd158, CRL_REG_LEN_08BIT, 0x8c}, + {0xd159, CRL_REG_LEN_08BIT, 0x67}, + {0xd15a, CRL_REG_LEN_08BIT, 0x00}, + {0xd15b, CRL_REG_LEN_08BIT, 0x00}, + {0xd15c, CRL_REG_LEN_08BIT, 0xb8}, + {0xd15d, CRL_REG_LEN_08BIT, 0x63}, + {0xd15e, CRL_REG_LEN_08BIT, 0x00}, + {0xd15f, CRL_REG_LEN_08BIT, 0x18}, + {0xd160, CRL_REG_LEN_08BIT, 0xb8}, + {0xd161, CRL_REG_LEN_08BIT, 0x63}, + {0xd162, CRL_REG_LEN_08BIT, 0x00}, + {0xd163, CRL_REG_LEN_08BIT, 0x98}, + {0xd164, CRL_REG_LEN_08BIT, 0xbc}, + {0xd165, CRL_REG_LEN_08BIT, 0x03}, + {0xd166, CRL_REG_LEN_08BIT, 0x00}, + {0xd167, CRL_REG_LEN_08BIT, 0x00}, + {0xd168, CRL_REG_LEN_08BIT, 0x10}, + {0xd169, CRL_REG_LEN_08BIT, 0x00}, + {0xd16a, CRL_REG_LEN_08BIT, 0x00}, + {0xd16b, CRL_REG_LEN_08BIT, 0x10}, + {0xd16c, CRL_REG_LEN_08BIT, 0xa9}, + {0xd16d, CRL_REG_LEN_08BIT, 0x48}, + {0xd16e, CRL_REG_LEN_08BIT, 0x67}, + {0xd16f, CRL_REG_LEN_08BIT, 0x02}, + {0xd170, CRL_REG_LEN_08BIT, 0xb8}, + {0xd171, CRL_REG_LEN_08BIT, 0xa3}, + {0xd172, CRL_REG_LEN_08BIT, 0x00}, + {0xd173, CRL_REG_LEN_08BIT, 0x19}, + {0xd174, CRL_REG_LEN_08BIT, 0x8c}, + {0xd175, CRL_REG_LEN_08BIT, 0x8a}, + {0xd176, CRL_REG_LEN_08BIT, 0x00}, + {0xd177, CRL_REG_LEN_08BIT, 0x00}, + {0xd178, CRL_REG_LEN_08BIT, 0xa9}, + {0xd179, CRL_REG_LEN_08BIT, 0x68}, + {0xd17a, CRL_REG_LEN_08BIT, 0x67}, + {0xd17b, CRL_REG_LEN_08BIT, 0x03}, + {0xd17c, CRL_REG_LEN_08BIT, 0xb8}, + {0xd17d, CRL_REG_LEN_08BIT, 0xc4}, + {0xd17e, CRL_REG_LEN_08BIT, 0x00}, + {0xd17f, CRL_REG_LEN_08BIT, 0x08}, + {0xd180, CRL_REG_LEN_08BIT, 0x8c}, + {0xd181, CRL_REG_LEN_08BIT, 0x6b}, + {0xd182, CRL_REG_LEN_08BIT, 0x00}, + {0xd183, CRL_REG_LEN_08BIT, 0x00}, + {0xd184, CRL_REG_LEN_08BIT, 0xb8}, + {0xd185, CRL_REG_LEN_08BIT, 0x85}, + {0xd186, CRL_REG_LEN_08BIT, 0x00}, + {0xd187, CRL_REG_LEN_08BIT, 0x98}, + {0xd188, CRL_REG_LEN_08BIT, 0xe0}, + {0xd189, CRL_REG_LEN_08BIT, 0x63}, + {0xd18a, CRL_REG_LEN_08BIT, 0x30}, + {0xd18b, CRL_REG_LEN_08BIT, 0x04}, + {0xd18c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd18d, CRL_REG_LEN_08BIT, 0x64}, + {0xd18e, CRL_REG_LEN_08BIT, 0x18}, + {0xd18f, CRL_REG_LEN_08BIT, 0x00}, + {0xd190, CRL_REG_LEN_08BIT, 0xa4}, + {0xd191, CRL_REG_LEN_08BIT, 0x83}, + {0xd192, CRL_REG_LEN_08BIT, 0xff}, + {0xd193, CRL_REG_LEN_08BIT, 0xff}, + {0xd194, CRL_REG_LEN_08BIT, 0xb8}, + {0xd195, CRL_REG_LEN_08BIT, 0x64}, + {0xd196, CRL_REG_LEN_08BIT, 0x00}, + {0xd197, CRL_REG_LEN_08BIT, 0x48}, + {0xd198, CRL_REG_LEN_08BIT, 0xd8}, + {0xd199, CRL_REG_LEN_08BIT, 0x0a}, + {0xd19a, CRL_REG_LEN_08BIT, 0x18}, + {0xd19b, CRL_REG_LEN_08BIT, 0x00}, + {0xd19c, CRL_REG_LEN_08BIT, 0xd8}, + {0xd19d, CRL_REG_LEN_08BIT, 0x0b}, + {0xd19e, CRL_REG_LEN_08BIT, 0x20}, + {0xd19f, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a0, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1a1, CRL_REG_LEN_08BIT, 0x60}, + {0xd1a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a3, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a4, CRL_REG_LEN_08BIT, 0xd8}, + {0xd1a5, CRL_REG_LEN_08BIT, 0x07}, + {0xd1a6, CRL_REG_LEN_08BIT, 0x18}, + {0xd1a7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1a9, CRL_REG_LEN_08BIT, 0x68}, + {0xd1aa, CRL_REG_LEN_08BIT, 0x38}, + {0xd1ab, CRL_REG_LEN_08BIT, 0x22}, + {0xd1ac, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1ad, CRL_REG_LEN_08BIT, 0x80}, + {0xd1ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd1af, CRL_REG_LEN_08BIT, 0x70}, + {0xd1b0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1b1, CRL_REG_LEN_08BIT, 0xe8}, + {0xd1b2, CRL_REG_LEN_08BIT, 0x38}, + {0xd1b3, CRL_REG_LEN_08BIT, 0x43}, + {0xd1b4, CRL_REG_LEN_08BIT, 0xd8}, + {0xd1b5, CRL_REG_LEN_08BIT, 0x03}, + {0xd1b6, CRL_REG_LEN_08BIT, 0x20}, + {0xd1b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1b8, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1b9, CRL_REG_LEN_08BIT, 0xa0}, + {0xd1ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd1bb, CRL_REG_LEN_08BIT, 0x00}, + {0xd1bc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1bd, CRL_REG_LEN_08BIT, 0xc8}, + {0xd1be, CRL_REG_LEN_08BIT, 0x38}, + {0xd1bf, CRL_REG_LEN_08BIT, 0x42}, + {0xd1c0, CRL_REG_LEN_08BIT, 0x8c}, + {0xd1c1, CRL_REG_LEN_08BIT, 0x66}, + {0xd1c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1c3, CRL_REG_LEN_08BIT, 0x00}, + {0xd1c4, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1c5, CRL_REG_LEN_08BIT, 0xa5}, + {0xd1c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd1c7, CRL_REG_LEN_08BIT, 0x01}, + {0xd1c8, CRL_REG_LEN_08BIT, 0xb8}, + {0xd1c9, CRL_REG_LEN_08BIT, 0x83}, + {0xd1ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd1cb, CRL_REG_LEN_08BIT, 0x08}, + {0xd1cc, CRL_REG_LEN_08BIT, 0xa4}, + {0xd1cd, CRL_REG_LEN_08BIT, 0xa5}, + {0xd1ce, CRL_REG_LEN_08BIT, 0x00}, + {0xd1cf, CRL_REG_LEN_08BIT, 0xff}, + {0xd1d0, CRL_REG_LEN_08BIT, 0x8c}, + {0xd1d1, CRL_REG_LEN_08BIT, 0x67}, + {0xd1d2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1d3, CRL_REG_LEN_08BIT, 0x00}, + {0xd1d4, CRL_REG_LEN_08BIT, 0xe0}, + {0xd1d5, CRL_REG_LEN_08BIT, 0x63}, + {0xd1d6, CRL_REG_LEN_08BIT, 0x20}, + {0xd1d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1d8, CRL_REG_LEN_08BIT, 0xa4}, + {0xd1d9, CRL_REG_LEN_08BIT, 0x63}, + {0xd1da, CRL_REG_LEN_08BIT, 0xff}, + {0xd1db, CRL_REG_LEN_08BIT, 0xff}, + {0xd1dc, CRL_REG_LEN_08BIT, 0xbc}, + {0xd1dd, CRL_REG_LEN_08BIT, 0x43}, + {0xd1de, CRL_REG_LEN_08BIT, 0x00}, + {0xd1df, CRL_REG_LEN_08BIT, 0x07}, + {0xd1e0, CRL_REG_LEN_08BIT, 0x0c}, + {0xd1e1, CRL_REG_LEN_08BIT, 0x00}, + {0xd1e2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1e3, CRL_REG_LEN_08BIT, 0x5b}, + {0xd1e4, CRL_REG_LEN_08BIT, 0xbc}, + {0xd1e5, CRL_REG_LEN_08BIT, 0x05}, + {0xd1e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd1e7, CRL_REG_LEN_08BIT, 0x02}, + {0xd1e8, CRL_REG_LEN_08BIT, 0x03}, + {0xd1e9, CRL_REG_LEN_08BIT, 0xff}, + {0xd1ea, CRL_REG_LEN_08BIT, 0xff}, + {0xd1eb, CRL_REG_LEN_08BIT, 0xf6}, + {0xd1ec, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1ed, CRL_REG_LEN_08BIT, 0xa0}, + {0xd1ee, CRL_REG_LEN_08BIT, 0x00}, + {0xd1ef, CRL_REG_LEN_08BIT, 0x00}, + {0xd1f0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1f1, CRL_REG_LEN_08BIT, 0xa4}, + {0xd1f2, CRL_REG_LEN_08BIT, 0x55}, + {0xd1f3, CRL_REG_LEN_08BIT, 0x86}, + {0xd1f4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd1f5, CRL_REG_LEN_08BIT, 0x63}, + {0xd1f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd1f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1f8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1f9, CRL_REG_LEN_08BIT, 0xc4}, + {0xd1fa, CRL_REG_LEN_08BIT, 0x6e}, + {0xd1fb, CRL_REG_LEN_08BIT, 0x45}, + {0xd1fc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1fd, CRL_REG_LEN_08BIT, 0xe4}, + {0xd1fe, CRL_REG_LEN_08BIT, 0x55}, + {0xd1ff, CRL_REG_LEN_08BIT, 0x87}, + {0xd200, CRL_REG_LEN_08BIT, 0xd8}, + {0xd201, CRL_REG_LEN_08BIT, 0x05}, + {0xd202, CRL_REG_LEN_08BIT, 0x18}, + {0xd203, CRL_REG_LEN_08BIT, 0x00}, + {0xd204, CRL_REG_LEN_08BIT, 0x8c}, + {0xd205, CRL_REG_LEN_08BIT, 0x66}, + {0xd206, CRL_REG_LEN_08BIT, 0x00}, + {0xd207, CRL_REG_LEN_08BIT, 0x00}, + {0xd208, CRL_REG_LEN_08BIT, 0xa8}, + {0xd209, CRL_REG_LEN_08BIT, 0xa4}, + {0xd20a, CRL_REG_LEN_08BIT, 0x6e}, + {0xd20b, CRL_REG_LEN_08BIT, 0x46}, + {0xd20c, CRL_REG_LEN_08BIT, 0xd8}, + {0xd20d, CRL_REG_LEN_08BIT, 0x07}, + {0xd20e, CRL_REG_LEN_08BIT, 0x18}, + {0xd20f, CRL_REG_LEN_08BIT, 0x00}, + {0xd210, CRL_REG_LEN_08BIT, 0xa8}, + {0xd211, CRL_REG_LEN_08BIT, 0x84}, + {0xd212, CRL_REG_LEN_08BIT, 0x55}, + {0xd213, CRL_REG_LEN_08BIT, 0x88}, + {0xd214, CRL_REG_LEN_08BIT, 0x8c}, + {0xd215, CRL_REG_LEN_08BIT, 0x65}, + {0xd216, CRL_REG_LEN_08BIT, 0x00}, + {0xd217, CRL_REG_LEN_08BIT, 0x00}, + {0xd218, CRL_REG_LEN_08BIT, 0xd8}, + {0xd219, CRL_REG_LEN_08BIT, 0x04}, + {0xd21a, CRL_REG_LEN_08BIT, 0x18}, + {0xd21b, CRL_REG_LEN_08BIT, 0x00}, + {0xd21c, CRL_REG_LEN_08BIT, 0x03}, + {0xd21d, CRL_REG_LEN_08BIT, 0xff}, + {0xd21e, CRL_REG_LEN_08BIT, 0xff}, + {0xd21f, CRL_REG_LEN_08BIT, 0xce}, + {0xd220, CRL_REG_LEN_08BIT, 0x19}, + {0xd221, CRL_REG_LEN_08BIT, 0x00}, + {0xd222, CRL_REG_LEN_08BIT, 0x80}, + {0xd223, CRL_REG_LEN_08BIT, 0x06}, + {0xd224, CRL_REG_LEN_08BIT, 0x8c}, + {0xd225, CRL_REG_LEN_08BIT, 0x63}, + {0xd226, CRL_REG_LEN_08BIT, 0x00}, + {0xd227, CRL_REG_LEN_08BIT, 0x00}, + {0xd228, CRL_REG_LEN_08BIT, 0xa4}, + {0xd229, CRL_REG_LEN_08BIT, 0x63}, + {0xd22a, CRL_REG_LEN_08BIT, 0x00}, + {0xd22b, CRL_REG_LEN_08BIT, 0x40}, + {0xd22c, CRL_REG_LEN_08BIT, 0xbc}, + {0xd22d, CRL_REG_LEN_08BIT, 0x23}, + {0xd22e, CRL_REG_LEN_08BIT, 0x00}, + {0xd22f, CRL_REG_LEN_08BIT, 0x00}, + {0xd230, CRL_REG_LEN_08BIT, 0x13}, + {0xd231, CRL_REG_LEN_08BIT, 0xff}, + {0xd232, CRL_REG_LEN_08BIT, 0xff}, + {0xd233, CRL_REG_LEN_08BIT, 0xc8}, + {0xd234, CRL_REG_LEN_08BIT, 0x9d}, + {0xd235, CRL_REG_LEN_08BIT, 0x00}, + {0xd236, CRL_REG_LEN_08BIT, 0x00}, + {0xd237, CRL_REG_LEN_08BIT, 0x40}, + {0xd238, CRL_REG_LEN_08BIT, 0xa8}, + {0xd239, CRL_REG_LEN_08BIT, 0x64}, + {0xd23a, CRL_REG_LEN_08BIT, 0x55}, + {0xd23b, CRL_REG_LEN_08BIT, 0x86}, + {0xd23c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd23d, CRL_REG_LEN_08BIT, 0xa4}, + {0xd23e, CRL_REG_LEN_08BIT, 0x55}, + {0xd23f, CRL_REG_LEN_08BIT, 0x87}, + {0xd240, CRL_REG_LEN_08BIT, 0xd8}, + {0xd241, CRL_REG_LEN_08BIT, 0x03}, + {0xd242, CRL_REG_LEN_08BIT, 0x40}, + {0xd243, CRL_REG_LEN_08BIT, 0x00}, + {0xd244, CRL_REG_LEN_08BIT, 0xa8}, + {0xd245, CRL_REG_LEN_08BIT, 0x64}, + {0xd246, CRL_REG_LEN_08BIT, 0x55}, + {0xd247, CRL_REG_LEN_08BIT, 0x88}, + {0xd248, CRL_REG_LEN_08BIT, 0xd8}, + {0xd249, CRL_REG_LEN_08BIT, 0x05}, + {0xd24a, CRL_REG_LEN_08BIT, 0x40}, + {0xd24b, CRL_REG_LEN_08BIT, 0x00}, + {0xd24c, CRL_REG_LEN_08BIT, 0xd8}, + {0xd24d, CRL_REG_LEN_08BIT, 0x03}, + {0xd24e, CRL_REG_LEN_08BIT, 0x40}, + {0xd24f, CRL_REG_LEN_08BIT, 0x00}, + {0xd250, CRL_REG_LEN_08BIT, 0x03}, + {0xd251, CRL_REG_LEN_08BIT, 0xff}, + {0xd252, CRL_REG_LEN_08BIT, 0xff}, + {0xd253, CRL_REG_LEN_08BIT, 0xc1}, + {0xd254, CRL_REG_LEN_08BIT, 0x19}, + {0xd255, CRL_REG_LEN_08BIT, 0x00}, + {0xd256, CRL_REG_LEN_08BIT, 0x80}, + {0xd257, CRL_REG_LEN_08BIT, 0x06}, + {0xd258, CRL_REG_LEN_08BIT, 0x94}, + {0xd259, CRL_REG_LEN_08BIT, 0x84}, + {0xd25a, CRL_REG_LEN_08BIT, 0x00}, + {0xd25b, CRL_REG_LEN_08BIT, 0x72}, + {0xd25c, CRL_REG_LEN_08BIT, 0xe5}, + {0xd25d, CRL_REG_LEN_08BIT, 0xa4}, + {0xd25e, CRL_REG_LEN_08BIT, 0x60}, + {0xd25f, CRL_REG_LEN_08BIT, 0x00}, + {0xd260, CRL_REG_LEN_08BIT, 0x0c}, + {0xd261, CRL_REG_LEN_08BIT, 0x00}, + {0xd262, CRL_REG_LEN_08BIT, 0x00}, + {0xd263, CRL_REG_LEN_08BIT, 0x4d}, + {0xd264, CRL_REG_LEN_08BIT, 0x9d}, + {0xd265, CRL_REG_LEN_08BIT, 0x60}, + {0xd266, CRL_REG_LEN_08BIT, 0x01}, + {0xd267, CRL_REG_LEN_08BIT, 0x00}, + {0xd268, CRL_REG_LEN_08BIT, 0x85}, + {0xd269, CRL_REG_LEN_08BIT, 0x4e}, + {0xd26a, CRL_REG_LEN_08BIT, 0x00}, + {0xd26b, CRL_REG_LEN_08BIT, 0x00}, + {0xd26c, CRL_REG_LEN_08BIT, 0x98}, + {0xd26d, CRL_REG_LEN_08BIT, 0x70}, + {0xd26e, CRL_REG_LEN_08BIT, 0x00}, + {0xd26f, CRL_REG_LEN_08BIT, 0x00}, + {0xd270, CRL_REG_LEN_08BIT, 0x8c}, + {0xd271, CRL_REG_LEN_08BIT, 0x8a}, + {0xd272, CRL_REG_LEN_08BIT, 0x00}, + {0xd273, CRL_REG_LEN_08BIT, 0x6f}, + {0xd274, CRL_REG_LEN_08BIT, 0xe5}, + {0xd275, CRL_REG_LEN_08BIT, 0x63}, + {0xd276, CRL_REG_LEN_08BIT, 0x20}, + {0xd277, CRL_REG_LEN_08BIT, 0x00}, + {0xd278, CRL_REG_LEN_08BIT, 0x10}, + {0xd279, CRL_REG_LEN_08BIT, 0x00}, + {0xd27a, CRL_REG_LEN_08BIT, 0x00}, + {0xd27b, CRL_REG_LEN_08BIT, 0x07}, + {0xd27c, CRL_REG_LEN_08BIT, 0x15}, + {0xd27d, CRL_REG_LEN_08BIT, 0x00}, + {0xd27e, CRL_REG_LEN_08BIT, 0x00}, + {0xd27f, CRL_REG_LEN_08BIT, 0x00}, + {0xd280, CRL_REG_LEN_08BIT, 0x8c}, + {0xd281, CRL_REG_LEN_08BIT, 0xaa}, + {0xd282, CRL_REG_LEN_08BIT, 0x00}, + {0xd283, CRL_REG_LEN_08BIT, 0x6e}, + {0xd284, CRL_REG_LEN_08BIT, 0xe0}, + {0xd285, CRL_REG_LEN_08BIT, 0x63}, + {0xd286, CRL_REG_LEN_08BIT, 0x28}, + {0xd287, CRL_REG_LEN_08BIT, 0x02}, + {0xd288, CRL_REG_LEN_08BIT, 0xe0}, + {0xd289, CRL_REG_LEN_08BIT, 0x84}, + {0xd28a, CRL_REG_LEN_08BIT, 0x28}, + {0xd28b, CRL_REG_LEN_08BIT, 0x02}, + {0xd28c, CRL_REG_LEN_08BIT, 0x07}, + {0xd28d, CRL_REG_LEN_08BIT, 0xff}, + {0xd28e, CRL_REG_LEN_08BIT, 0xf8}, + {0xd28f, CRL_REG_LEN_08BIT, 0x66}, + {0xd290, CRL_REG_LEN_08BIT, 0xe0}, + {0xd291, CRL_REG_LEN_08BIT, 0x63}, + {0xd292, CRL_REG_LEN_08BIT, 0x5b}, + {0xd293, CRL_REG_LEN_08BIT, 0x06}, + {0xd294, CRL_REG_LEN_08BIT, 0x8c}, + {0xd295, CRL_REG_LEN_08BIT, 0x6a}, + {0xd296, CRL_REG_LEN_08BIT, 0x00}, + {0xd297, CRL_REG_LEN_08BIT, 0x77}, + {0xd298, CRL_REG_LEN_08BIT, 0xe0}, + {0xd299, CRL_REG_LEN_08BIT, 0x63}, + {0xd29a, CRL_REG_LEN_08BIT, 0x5b}, + {0xd29b, CRL_REG_LEN_08BIT, 0x06}, + {0xd29c, CRL_REG_LEN_08BIT, 0xbd}, + {0xd29d, CRL_REG_LEN_08BIT, 0x63}, + {0xd29e, CRL_REG_LEN_08BIT, 0x00}, + {0xd29f, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a0, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2a1, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a3, CRL_REG_LEN_08BIT, 0x5a}, + {0xd2a4, CRL_REG_LEN_08BIT, 0x15}, + {0xd2a5, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a7, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a8, CRL_REG_LEN_08BIT, 0x8c}, + {0xd2a9, CRL_REG_LEN_08BIT, 0x8a}, + {0xd2aa, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ab, CRL_REG_LEN_08BIT, 0x78}, + {0xd2ac, CRL_REG_LEN_08BIT, 0xb8}, + {0xd2ad, CRL_REG_LEN_08BIT, 0x63}, + {0xd2ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd2af, CRL_REG_LEN_08BIT, 0x88}, + {0xd2b0, CRL_REG_LEN_08BIT, 0xe1}, + {0xd2b1, CRL_REG_LEN_08BIT, 0x64}, + {0xd2b2, CRL_REG_LEN_08BIT, 0x5b}, + {0xd2b3, CRL_REG_LEN_08BIT, 0x06}, + {0xd2b4, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2b5, CRL_REG_LEN_08BIT, 0x6b}, + {0xd2b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd2b8, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2b9, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd2bb, CRL_REG_LEN_08BIT, 0x59}, + {0xd2bc, CRL_REG_LEN_08BIT, 0xd4}, + {0xd2bd, CRL_REG_LEN_08BIT, 0x01}, + {0xd2be, CRL_REG_LEN_08BIT, 0x18}, + {0xd2bf, CRL_REG_LEN_08BIT, 0x14}, + {0xd2c0, CRL_REG_LEN_08BIT, 0xb9}, + {0xd2c1, CRL_REG_LEN_08BIT, 0x6b}, + {0xd2c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2c3, CRL_REG_LEN_08BIT, 0x88}, + {0xd2c4, CRL_REG_LEN_08BIT, 0x85}, + {0xd2c5, CRL_REG_LEN_08BIT, 0x01}, + {0xd2c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2c7, CRL_REG_LEN_08BIT, 0x14}, + {0xd2c8, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2c9, CRL_REG_LEN_08BIT, 0x68}, + {0xd2ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd2cb, CRL_REG_LEN_08BIT, 0x00}, + {0xd2cc, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2cd, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ce, CRL_REG_LEN_08BIT, 0x00}, + {0xd2cf, CRL_REG_LEN_08BIT, 0x51}, + {0xd2d0, CRL_REG_LEN_08BIT, 0xd4}, + {0xd2d1, CRL_REG_LEN_08BIT, 0x01}, + {0xd2d2, CRL_REG_LEN_08BIT, 0x58}, + {0xd2d3, CRL_REG_LEN_08BIT, 0x18}, + {0xd2d4, CRL_REG_LEN_08BIT, 0x84}, + {0xd2d5, CRL_REG_LEN_08BIT, 0x81}, + {0xd2d6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2d7, CRL_REG_LEN_08BIT, 0x14}, + {0xd2d8, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2d9, CRL_REG_LEN_08BIT, 0xa4}, + {0xd2da, CRL_REG_LEN_08BIT, 0x01}, + {0xd2db, CRL_REG_LEN_08BIT, 0x00}, + {0xd2dc, CRL_REG_LEN_08BIT, 0x10}, + {0xd2dd, CRL_REG_LEN_08BIT, 0x00}, + {0xd2de, CRL_REG_LEN_08BIT, 0x00}, + {0xd2df, CRL_REG_LEN_08BIT, 0x05}, + {0xd2e0, CRL_REG_LEN_08BIT, 0x84}, + {0xd2e1, CRL_REG_LEN_08BIT, 0xc1}, + {0xd2e2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2e3, CRL_REG_LEN_08BIT, 0x18}, + {0xd2e4, CRL_REG_LEN_08BIT, 0x9c}, + {0xd2e5, CRL_REG_LEN_08BIT, 0xa0}, + {0xd2e6, CRL_REG_LEN_08BIT, 0x01}, + {0xd2e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd2e8, CRL_REG_LEN_08BIT, 0xd4}, + {0xd2e9, CRL_REG_LEN_08BIT, 0x01}, + {0xd2ea, CRL_REG_LEN_08BIT, 0x28}, + {0xd2eb, CRL_REG_LEN_08BIT, 0x14}, + {0xd2ec, CRL_REG_LEN_08BIT, 0x84}, + {0xd2ed, CRL_REG_LEN_08BIT, 0xc1}, + {0xd2ee, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ef, CRL_REG_LEN_08BIT, 0x18}, + {0xd2f0, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2f1, CRL_REG_LEN_08BIT, 0x66}, + {0xd2f2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f3, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f4, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2f5, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f7, CRL_REG_LEN_08BIT, 0x43}, + {0xd2f8, CRL_REG_LEN_08BIT, 0x9d}, + {0xd2f9, CRL_REG_LEN_08BIT, 0x00}, + {0xd2fa, CRL_REG_LEN_08BIT, 0x00}, + {0xd2fb, CRL_REG_LEN_08BIT, 0x00}, + {0xd2fc, CRL_REG_LEN_08BIT, 0x84}, + {0xd2fd, CRL_REG_LEN_08BIT, 0x61}, + {0xd2fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ff, CRL_REG_LEN_08BIT, 0x18}, + {0xd300, CRL_REG_LEN_08BIT, 0xbd}, + {0xd301, CRL_REG_LEN_08BIT, 0xa3}, + {0xd302, CRL_REG_LEN_08BIT, 0x01}, + {0xd303, CRL_REG_LEN_08BIT, 0x00}, + {0xd304, CRL_REG_LEN_08BIT, 0x10}, + {0xd305, CRL_REG_LEN_08BIT, 0x00}, + {0xd306, CRL_REG_LEN_08BIT, 0x00}, + {0xd307, CRL_REG_LEN_08BIT, 0x03}, + {0xd308, CRL_REG_LEN_08BIT, 0x9c}, + {0xd309, CRL_REG_LEN_08BIT, 0x80}, + {0xd30a, CRL_REG_LEN_08BIT, 0x01}, + {0xd30b, CRL_REG_LEN_08BIT, 0x00}, + {0xd30c, CRL_REG_LEN_08BIT, 0xd4}, + {0xd30d, CRL_REG_LEN_08BIT, 0x01}, + {0xd30e, CRL_REG_LEN_08BIT, 0x20}, + {0xd30f, CRL_REG_LEN_08BIT, 0x18}, + {0xd310, CRL_REG_LEN_08BIT, 0x18}, + {0xd311, CRL_REG_LEN_08BIT, 0x60}, + {0xd312, CRL_REG_LEN_08BIT, 0x80}, + {0xd313, CRL_REG_LEN_08BIT, 0x06}, + {0xd314, CRL_REG_LEN_08BIT, 0x85}, + {0xd315, CRL_REG_LEN_08BIT, 0x01}, + {0xd316, CRL_REG_LEN_08BIT, 0x00}, + {0xd317, CRL_REG_LEN_08BIT, 0x14}, + {0xd318, CRL_REG_LEN_08BIT, 0xa8}, + {0xd319, CRL_REG_LEN_08BIT, 0x83}, + {0xd31a, CRL_REG_LEN_08BIT, 0x38}, + {0xd31b, CRL_REG_LEN_08BIT, 0x29}, + {0xd31c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd31d, CRL_REG_LEN_08BIT, 0xc3}, + {0xd31e, CRL_REG_LEN_08BIT, 0x40}, + {0xd31f, CRL_REG_LEN_08BIT, 0x08}, + {0xd320, CRL_REG_LEN_08BIT, 0x8c}, + {0xd321, CRL_REG_LEN_08BIT, 0x84}, + {0xd322, CRL_REG_LEN_08BIT, 0x00}, + {0xd323, CRL_REG_LEN_08BIT, 0x00}, + {0xd324, CRL_REG_LEN_08BIT, 0xa8}, + {0xd325, CRL_REG_LEN_08BIT, 0xa3}, + {0xd326, CRL_REG_LEN_08BIT, 0x38}, + {0xd327, CRL_REG_LEN_08BIT, 0x2a}, + {0xd328, CRL_REG_LEN_08BIT, 0xa8}, + {0xd329, CRL_REG_LEN_08BIT, 0xe3}, + {0xd32a, CRL_REG_LEN_08BIT, 0x40}, + {0xd32b, CRL_REG_LEN_08BIT, 0x09}, + {0xd32c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd32d, CRL_REG_LEN_08BIT, 0x64}, + {0xd32e, CRL_REG_LEN_08BIT, 0x40}, + {0xd32f, CRL_REG_LEN_08BIT, 0x00}, + {0xd330, CRL_REG_LEN_08BIT, 0xd8}, + {0xd331, CRL_REG_LEN_08BIT, 0x06}, + {0xd332, CRL_REG_LEN_08BIT, 0x18}, + {0xd333, CRL_REG_LEN_08BIT, 0x00}, + {0xd334, CRL_REG_LEN_08BIT, 0x8c}, + {0xd335, CRL_REG_LEN_08BIT, 0x65}, + {0xd336, CRL_REG_LEN_08BIT, 0x00}, + {0xd337, CRL_REG_LEN_08BIT, 0x00}, + {0xd338, CRL_REG_LEN_08BIT, 0x84}, + {0xd339, CRL_REG_LEN_08BIT, 0x81}, + {0xd33a, CRL_REG_LEN_08BIT, 0x00}, + {0xd33b, CRL_REG_LEN_08BIT, 0x18}, + {0xd33c, CRL_REG_LEN_08BIT, 0xe3}, + {0xd33d, CRL_REG_LEN_08BIT, 0xe3}, + {0xd33e, CRL_REG_LEN_08BIT, 0x20}, + {0xd33f, CRL_REG_LEN_08BIT, 0x00}, + {0xd340, CRL_REG_LEN_08BIT, 0xd8}, + {0xd341, CRL_REG_LEN_08BIT, 0x07}, + {0xd342, CRL_REG_LEN_08BIT, 0xf8}, + {0xd343, CRL_REG_LEN_08BIT, 0x00}, + {0xd344, CRL_REG_LEN_08BIT, 0x03}, + {0xd345, CRL_REG_LEN_08BIT, 0xff}, + {0xd346, CRL_REG_LEN_08BIT, 0xff}, + {0xd347, CRL_REG_LEN_08BIT, 0x6f}, + {0xd348, CRL_REG_LEN_08BIT, 0x18}, + {0xd349, CRL_REG_LEN_08BIT, 0x60}, + {0xd34a, CRL_REG_LEN_08BIT, 0x00}, + {0xd34b, CRL_REG_LEN_08BIT, 0x01}, + {0xd34c, CRL_REG_LEN_08BIT, 0x0f}, + {0xd34d, CRL_REG_LEN_08BIT, 0xff}, + {0xd34e, CRL_REG_LEN_08BIT, 0xff}, + {0xd34f, CRL_REG_LEN_08BIT, 0x9d}, + {0xd350, CRL_REG_LEN_08BIT, 0x18}, + {0xd351, CRL_REG_LEN_08BIT, 0x60}, + {0xd352, CRL_REG_LEN_08BIT, 0x80}, + {0xd353, CRL_REG_LEN_08BIT, 0x06}, + {0xd354, CRL_REG_LEN_08BIT, 0xa8}, + {0xd355, CRL_REG_LEN_08BIT, 0x83}, + {0xd356, CRL_REG_LEN_08BIT, 0x6e}, + {0xd357, CRL_REG_LEN_08BIT, 0x43}, + {0xd358, CRL_REG_LEN_08BIT, 0xa8}, + {0xd359, CRL_REG_LEN_08BIT, 0xa3}, + {0xd35a, CRL_REG_LEN_08BIT, 0x38}, + {0xd35b, CRL_REG_LEN_08BIT, 0x0f}, + {0xd35c, CRL_REG_LEN_08BIT, 0x8c}, + {0xd35d, CRL_REG_LEN_08BIT, 0x84}, + {0xd35e, CRL_REG_LEN_08BIT, 0x00}, + {0xd35f, CRL_REG_LEN_08BIT, 0x00}, + {0xd360, CRL_REG_LEN_08BIT, 0xa8}, + {0xd361, CRL_REG_LEN_08BIT, 0xc3}, + {0xd362, CRL_REG_LEN_08BIT, 0x38}, + {0xd363, CRL_REG_LEN_08BIT, 0x0e}, + {0xd364, CRL_REG_LEN_08BIT, 0xa8}, + {0xd365, CRL_REG_LEN_08BIT, 0xe3}, + {0xd366, CRL_REG_LEN_08BIT, 0x6e}, + {0xd367, CRL_REG_LEN_08BIT, 0x42}, + {0xd368, CRL_REG_LEN_08BIT, 0xd8}, + {0xd369, CRL_REG_LEN_08BIT, 0x05}, + {0xd36a, CRL_REG_LEN_08BIT, 0x20}, + {0xd36b, CRL_REG_LEN_08BIT, 0x00}, + {0xd36c, CRL_REG_LEN_08BIT, 0x8c}, + {0xd36d, CRL_REG_LEN_08BIT, 0x67}, + {0xd36e, CRL_REG_LEN_08BIT, 0x00}, + {0xd36f, CRL_REG_LEN_08BIT, 0x00}, + {0xd370, CRL_REG_LEN_08BIT, 0xd8}, + {0xd371, CRL_REG_LEN_08BIT, 0x06}, + {0xd372, CRL_REG_LEN_08BIT, 0x18}, + {0xd373, CRL_REG_LEN_08BIT, 0x00}, + {0xd374, CRL_REG_LEN_08BIT, 0x18}, + {0xd375, CRL_REG_LEN_08BIT, 0x60}, + {0xd376, CRL_REG_LEN_08BIT, 0x80}, + {0xd377, CRL_REG_LEN_08BIT, 0x01}, + {0xd378, CRL_REG_LEN_08BIT, 0xa8}, + {0xd379, CRL_REG_LEN_08BIT, 0x63}, + {0xd37a, CRL_REG_LEN_08BIT, 0x00}, + {0xd37b, CRL_REG_LEN_08BIT, 0xc8}, + {0xd37c, CRL_REG_LEN_08BIT, 0x8c}, + {0xd37d, CRL_REG_LEN_08BIT, 0x63}, + {0xd37e, CRL_REG_LEN_08BIT, 0x00}, + {0xd37f, CRL_REG_LEN_08BIT, 0x00}, + {0xd380, CRL_REG_LEN_08BIT, 0xbc}, + {0xd381, CRL_REG_LEN_08BIT, 0x23}, + {0xd382, CRL_REG_LEN_08BIT, 0x00}, + {0xd383, CRL_REG_LEN_08BIT, 0x01}, + {0xd384, CRL_REG_LEN_08BIT, 0x10}, + {0xd385, CRL_REG_LEN_08BIT, 0x00}, + {0xd386, CRL_REG_LEN_08BIT, 0x00}, + {0xd387, CRL_REG_LEN_08BIT, 0x28}, + {0xd388, CRL_REG_LEN_08BIT, 0x9c}, + {0xd389, CRL_REG_LEN_08BIT, 0xa0}, + {0xd38a, CRL_REG_LEN_08BIT, 0x00}, + {0xd38b, CRL_REG_LEN_08BIT, 0x00}, + {0xd38c, CRL_REG_LEN_08BIT, 0x00}, + {0xd38d, CRL_REG_LEN_08BIT, 0x00}, + {0xd38e, CRL_REG_LEN_08BIT, 0x00}, + {0xd38f, CRL_REG_LEN_08BIT, 0x08}, + {0xd390, CRL_REG_LEN_08BIT, 0x15}, + {0xd391, CRL_REG_LEN_08BIT, 0x00}, + {0xd392, CRL_REG_LEN_08BIT, 0x00}, + {0xd393, CRL_REG_LEN_08BIT, 0x00}, + {0xd394, CRL_REG_LEN_08BIT, 0xe0}, + {0xd395, CRL_REG_LEN_08BIT, 0x6c}, + {0xd396, CRL_REG_LEN_08BIT, 0x28}, + {0xd397, CRL_REG_LEN_08BIT, 0x02}, + {0xd398, CRL_REG_LEN_08BIT, 0xe0}, + {0xd399, CRL_REG_LEN_08BIT, 0x84}, + {0xd39a, CRL_REG_LEN_08BIT, 0x28}, + {0xd39b, CRL_REG_LEN_08BIT, 0x02}, + {0xd39c, CRL_REG_LEN_08BIT, 0x07}, + {0xd39d, CRL_REG_LEN_08BIT, 0xff}, + {0xd39e, CRL_REG_LEN_08BIT, 0xf8}, + {0xd39f, CRL_REG_LEN_08BIT, 0x22}, + {0xd3a0, CRL_REG_LEN_08BIT, 0xb8}, + {0xd3a1, CRL_REG_LEN_08BIT, 0x63}, + {0xd3a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd3a3, CRL_REG_LEN_08BIT, 0x08}, + {0xd3a4, CRL_REG_LEN_08BIT, 0x03}, + {0xd3a5, CRL_REG_LEN_08BIT, 0xff}, + {0xd3a6, CRL_REG_LEN_08BIT, 0xff}, + {0xd3a7, CRL_REG_LEN_08BIT, 0xb2}, + {0xd3a8, CRL_REG_LEN_08BIT, 0x85}, + {0xd3a9, CRL_REG_LEN_08BIT, 0x4e}, + {0xd3aa, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ab, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ac, CRL_REG_LEN_08BIT, 0x18}, + {0xd3ad, CRL_REG_LEN_08BIT, 0xe0}, + {0xd3ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd3af, CRL_REG_LEN_08BIT, 0x01}, + {0xd3b0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3b1, CRL_REG_LEN_08BIT, 0xe7}, + {0xd3b2, CRL_REG_LEN_08BIT, 0x06}, + {0xd3b3, CRL_REG_LEN_08BIT, 0x55}, + {0xd3b4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd3b5, CRL_REG_LEN_08BIT, 0x87}, + {0xd3b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b8, CRL_REG_LEN_08BIT, 0xb8}, + {0xd3b9, CRL_REG_LEN_08BIT, 0x64}, + {0xd3ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd3bb, CRL_REG_LEN_08BIT, 0x02}, + {0xd3bc, CRL_REG_LEN_08BIT, 0x19}, + {0xd3bd, CRL_REG_LEN_08BIT, 0x00}, + {0xd3be, CRL_REG_LEN_08BIT, 0x80}, + {0xd3bf, CRL_REG_LEN_08BIT, 0x06}, + {0xd3c0, CRL_REG_LEN_08BIT, 0xe0}, + {0xd3c1, CRL_REG_LEN_08BIT, 0x63}, + {0xd3c2, CRL_REG_LEN_08BIT, 0x20}, + {0xd3c3, CRL_REG_LEN_08BIT, 0x00}, + {0xd3c4, CRL_REG_LEN_08BIT, 0xa9}, + {0xd3c5, CRL_REG_LEN_08BIT, 0x08}, + {0xd3c6, CRL_REG_LEN_08BIT, 0x56}, + {0xd3c7, CRL_REG_LEN_08BIT, 0x01}, + {0xd3c8, CRL_REG_LEN_08BIT, 0xb8}, + {0xd3c9, CRL_REG_LEN_08BIT, 0x63}, + {0xd3ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd3cb, CRL_REG_LEN_08BIT, 0x04}, + {0xd3cc, CRL_REG_LEN_08BIT, 0x18}, + {0xd3cd, CRL_REG_LEN_08BIT, 0x80}, + {0xd3ce, CRL_REG_LEN_08BIT, 0x80}, + {0xd3cf, CRL_REG_LEN_08BIT, 0x01}, + {0xd3d0, CRL_REG_LEN_08BIT, 0xe0}, + {0xd3d1, CRL_REG_LEN_08BIT, 0xc5}, + {0xd3d2, CRL_REG_LEN_08BIT, 0x40}, + {0xd3d3, CRL_REG_LEN_08BIT, 0x00}, + {0xd3d4, CRL_REG_LEN_08BIT, 0xe0}, + {0xd3d5, CRL_REG_LEN_08BIT, 0x63}, + {0xd3d6, CRL_REG_LEN_08BIT, 0x28}, + {0xd3d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3d8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3d9, CRL_REG_LEN_08BIT, 0x84}, + {0xd3da, CRL_REG_LEN_08BIT, 0x1d}, + {0xd3db, CRL_REG_LEN_08BIT, 0x00}, + {0xd3dc, CRL_REG_LEN_08BIT, 0x9c}, + {0xd3dd, CRL_REG_LEN_08BIT, 0xa5}, + {0xd3de, CRL_REG_LEN_08BIT, 0x00}, + {0xd3df, CRL_REG_LEN_08BIT, 0x01}, + {0xd3e0, CRL_REG_LEN_08BIT, 0xe0}, + {0xd3e1, CRL_REG_LEN_08BIT, 0x63}, + {0xd3e2, CRL_REG_LEN_08BIT, 0x20}, + {0xd3e3, CRL_REG_LEN_08BIT, 0x00}, + {0xd3e4, CRL_REG_LEN_08BIT, 0xbd}, + {0xd3e5, CRL_REG_LEN_08BIT, 0x45}, + {0xd3e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3e7, CRL_REG_LEN_08BIT, 0x48}, + {0xd3e8, CRL_REG_LEN_08BIT, 0x8c}, + {0xd3e9, CRL_REG_LEN_08BIT, 0x63}, + {0xd3ea, CRL_REG_LEN_08BIT, 0x00}, + {0xd3eb, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ec, CRL_REG_LEN_08BIT, 0xd8}, + {0xd3ed, CRL_REG_LEN_08BIT, 0x06}, + {0xd3ee, CRL_REG_LEN_08BIT, 0x18}, + {0xd3ef, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f0, CRL_REG_LEN_08BIT, 0x0f}, + {0xd3f1, CRL_REG_LEN_08BIT, 0xff}, + {0xd3f2, CRL_REG_LEN_08BIT, 0xff}, + {0xd3f3, CRL_REG_LEN_08BIT, 0xf1}, + {0xd3f4, CRL_REG_LEN_08BIT, 0x15}, + {0xd3f5, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f8, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f9, CRL_REG_LEN_08BIT, 0x00}, + {0xd3fa, CRL_REG_LEN_08BIT, 0x00}, + {0xd3fb, CRL_REG_LEN_08BIT, 0x0b}, + {0xd3fc, CRL_REG_LEN_08BIT, 0x15}, + {0xd3fd, CRL_REG_LEN_08BIT, 0x00}, + {0xd3fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ff, CRL_REG_LEN_08BIT, 0x00}, + {0xd400, CRL_REG_LEN_08BIT, 0x03}, + {0xd401, CRL_REG_LEN_08BIT, 0xff}, + {0xd402, CRL_REG_LEN_08BIT, 0xff}, + {0xd403, CRL_REG_LEN_08BIT, 0xc4}, + {0xd404, CRL_REG_LEN_08BIT, 0xd4}, + {0xd405, CRL_REG_LEN_08BIT, 0x01}, + {0xd406, CRL_REG_LEN_08BIT, 0x40}, + {0xd407, CRL_REG_LEN_08BIT, 0x18}, + {0xd408, CRL_REG_LEN_08BIT, 0x03}, + {0xd409, CRL_REG_LEN_08BIT, 0xff}, + {0xd40a, CRL_REG_LEN_08BIT, 0xff}, + {0xd40b, CRL_REG_LEN_08BIT, 0xa8}, + {0xd40c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd40d, CRL_REG_LEN_08BIT, 0x63}, + {0xd40e, CRL_REG_LEN_08BIT, 0x00}, + {0xd40f, CRL_REG_LEN_08BIT, 0xff}, + {0xd410, CRL_REG_LEN_08BIT, 0x9c}, + {0xd411, CRL_REG_LEN_08BIT, 0x60}, + {0xd412, CRL_REG_LEN_08BIT, 0x00}, + {0xd413, CRL_REG_LEN_08BIT, 0x00}, + {0xd414, CRL_REG_LEN_08BIT, 0x03}, + {0xd415, CRL_REG_LEN_08BIT, 0xff}, + {0xd416, CRL_REG_LEN_08BIT, 0xff}, + {0xd417, CRL_REG_LEN_08BIT, 0xb6}, + {0xd418, CRL_REG_LEN_08BIT, 0xd4}, + {0xd419, CRL_REG_LEN_08BIT, 0x01}, + {0xd41a, CRL_REG_LEN_08BIT, 0x18}, + {0xd41b, CRL_REG_LEN_08BIT, 0x14}, + {0xd41c, CRL_REG_LEN_08BIT, 0x03}, + {0xd41d, CRL_REG_LEN_08BIT, 0xff}, + {0xd41e, CRL_REG_LEN_08BIT, 0xff}, + {0xd41f, CRL_REG_LEN_08BIT, 0xa9}, + {0xd420, CRL_REG_LEN_08BIT, 0x9d}, + {0xd421, CRL_REG_LEN_08BIT, 0x6b}, + {0xd422, CRL_REG_LEN_08BIT, 0x00}, + {0xd423, CRL_REG_LEN_08BIT, 0xff}, + {0xd424, CRL_REG_LEN_08BIT, 0x85}, + {0xd425, CRL_REG_LEN_08BIT, 0x21}, + {0xd426, CRL_REG_LEN_08BIT, 0x00}, + {0xd427, CRL_REG_LEN_08BIT, 0x00}, + {0xd428, CRL_REG_LEN_08BIT, 0x85}, + {0xd429, CRL_REG_LEN_08BIT, 0x41}, + {0xd42a, CRL_REG_LEN_08BIT, 0x00}, + {0xd42b, CRL_REG_LEN_08BIT, 0x04}, + {0xd42c, CRL_REG_LEN_08BIT, 0x85}, + {0xd42d, CRL_REG_LEN_08BIT, 0x81}, + {0xd42e, CRL_REG_LEN_08BIT, 0x00}, + {0xd42f, CRL_REG_LEN_08BIT, 0x08}, + {0xd430, CRL_REG_LEN_08BIT, 0x85}, + {0xd431, CRL_REG_LEN_08BIT, 0xc1}, + {0xd432, CRL_REG_LEN_08BIT, 0x00}, + {0xd433, CRL_REG_LEN_08BIT, 0x0c}, + {0xd434, CRL_REG_LEN_08BIT, 0x86}, + {0xd435, CRL_REG_LEN_08BIT, 0x01}, + {0xd436, CRL_REG_LEN_08BIT, 0x00}, + {0xd437, CRL_REG_LEN_08BIT, 0x10}, + {0xd438, CRL_REG_LEN_08BIT, 0x44}, + {0xd439, CRL_REG_LEN_08BIT, 0x00}, + {0xd43a, CRL_REG_LEN_08BIT, 0x48}, + {0xd43b, CRL_REG_LEN_08BIT, 0x00}, + {0xd43c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd43d, CRL_REG_LEN_08BIT, 0x21}, + {0xd43e, CRL_REG_LEN_08BIT, 0x00}, + {0xd43f, CRL_REG_LEN_08BIT, 0x1c}, + {0xd440, CRL_REG_LEN_08BIT, 0x9c}, + {0xd441, CRL_REG_LEN_08BIT, 0x21}, + {0xd442, CRL_REG_LEN_08BIT, 0xff}, + {0xd443, CRL_REG_LEN_08BIT, 0xfc}, + {0xd444, CRL_REG_LEN_08BIT, 0xd4}, + {0xd445, CRL_REG_LEN_08BIT, 0x01}, + {0xd446, CRL_REG_LEN_08BIT, 0x48}, + {0xd447, CRL_REG_LEN_08BIT, 0x00}, + {0xd448, CRL_REG_LEN_08BIT, 0x18}, + {0xd449, CRL_REG_LEN_08BIT, 0x60}, + {0xd44a, CRL_REG_LEN_08BIT, 0x00}, + {0xd44b, CRL_REG_LEN_08BIT, 0x01}, + {0xd44c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd44d, CRL_REG_LEN_08BIT, 0x63}, + {0xd44e, CRL_REG_LEN_08BIT, 0x07}, + {0xd44f, CRL_REG_LEN_08BIT, 0x80}, + {0xd450, CRL_REG_LEN_08BIT, 0x8c}, + {0xd451, CRL_REG_LEN_08BIT, 0x63}, + {0xd452, CRL_REG_LEN_08BIT, 0x00}, + {0xd453, CRL_REG_LEN_08BIT, 0x68}, + {0xd454, CRL_REG_LEN_08BIT, 0xbc}, + {0xd455, CRL_REG_LEN_08BIT, 0x03}, + {0xd456, CRL_REG_LEN_08BIT, 0x00}, + {0xd457, CRL_REG_LEN_08BIT, 0x00}, + {0xd458, CRL_REG_LEN_08BIT, 0x10}, + {0xd459, CRL_REG_LEN_08BIT, 0x00}, + {0xd45a, CRL_REG_LEN_08BIT, 0x00}, + {0xd45b, CRL_REG_LEN_08BIT, 0x0c}, + {0xd45c, CRL_REG_LEN_08BIT, 0x15}, + {0xd45d, CRL_REG_LEN_08BIT, 0x00}, + {0xd45e, CRL_REG_LEN_08BIT, 0x00}, + {0xd45f, CRL_REG_LEN_08BIT, 0x00}, + {0xd460, CRL_REG_LEN_08BIT, 0x07}, + {0xd461, CRL_REG_LEN_08BIT, 0xff}, + {0xd462, CRL_REG_LEN_08BIT, 0xd9}, + {0xd463, CRL_REG_LEN_08BIT, 0x7c}, + {0xd464, CRL_REG_LEN_08BIT, 0x15}, + {0xd465, CRL_REG_LEN_08BIT, 0x00}, + {0xd466, CRL_REG_LEN_08BIT, 0x00}, + {0xd467, CRL_REG_LEN_08BIT, 0x00}, + {0xd468, CRL_REG_LEN_08BIT, 0x18}, + {0xd469, CRL_REG_LEN_08BIT, 0x60}, + {0xd46a, CRL_REG_LEN_08BIT, 0x80}, + {0xd46b, CRL_REG_LEN_08BIT, 0x06}, + {0xd46c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd46d, CRL_REG_LEN_08BIT, 0x63}, + {0xd46e, CRL_REG_LEN_08BIT, 0xc4}, + {0xd46f, CRL_REG_LEN_08BIT, 0xb8}, + {0xd470, CRL_REG_LEN_08BIT, 0x8c}, + {0xd471, CRL_REG_LEN_08BIT, 0x63}, + {0xd472, CRL_REG_LEN_08BIT, 0x00}, + {0xd473, CRL_REG_LEN_08BIT, 0x00}, + {0xd474, CRL_REG_LEN_08BIT, 0xbc}, + {0xd475, CRL_REG_LEN_08BIT, 0x23}, + {0xd476, CRL_REG_LEN_08BIT, 0x00}, + {0xd477, CRL_REG_LEN_08BIT, 0x01}, + {0xd478, CRL_REG_LEN_08BIT, 0x10}, + {0xd479, CRL_REG_LEN_08BIT, 0x00}, + {0xd47a, CRL_REG_LEN_08BIT, 0x00}, + {0xd47b, CRL_REG_LEN_08BIT, 0x25}, + {0xd47c, CRL_REG_LEN_08BIT, 0x9d}, + {0xd47d, CRL_REG_LEN_08BIT, 0x00}, + {0xd47e, CRL_REG_LEN_08BIT, 0x00}, + {0xd47f, CRL_REG_LEN_08BIT, 0x00}, + {0xd480, CRL_REG_LEN_08BIT, 0x00}, + {0xd481, CRL_REG_LEN_08BIT, 0x00}, + {0xd482, CRL_REG_LEN_08BIT, 0x00}, + {0xd483, CRL_REG_LEN_08BIT, 0x0b}, + {0xd484, CRL_REG_LEN_08BIT, 0xb8}, + {0xd485, CRL_REG_LEN_08BIT, 0xe8}, + {0xd486, CRL_REG_LEN_08BIT, 0x00}, + {0xd487, CRL_REG_LEN_08BIT, 0x02}, + {0xd488, CRL_REG_LEN_08BIT, 0x07}, + {0xd489, CRL_REG_LEN_08BIT, 0xff}, + {0xd48a, CRL_REG_LEN_08BIT, 0xd6}, + {0xd48b, CRL_REG_LEN_08BIT, 0x08}, + {0xd48c, CRL_REG_LEN_08BIT, 0x15}, + {0xd48d, CRL_REG_LEN_08BIT, 0x00}, + {0xd48e, CRL_REG_LEN_08BIT, 0x00}, + {0xd48f, CRL_REG_LEN_08BIT, 0x00}, + {0xd490, CRL_REG_LEN_08BIT, 0x18}, + {0xd491, CRL_REG_LEN_08BIT, 0x60}, + {0xd492, CRL_REG_LEN_08BIT, 0x80}, + {0xd493, CRL_REG_LEN_08BIT, 0x06}, + {0xd494, CRL_REG_LEN_08BIT, 0xa8}, + {0xd495, CRL_REG_LEN_08BIT, 0x63}, + {0xd496, CRL_REG_LEN_08BIT, 0xc4}, + {0xd497, CRL_REG_LEN_08BIT, 0xb8}, + {0xd498, CRL_REG_LEN_08BIT, 0x8c}, + {0xd499, CRL_REG_LEN_08BIT, 0x63}, + {0xd49a, CRL_REG_LEN_08BIT, 0x00}, + {0xd49b, CRL_REG_LEN_08BIT, 0x00}, + {0xd49c, CRL_REG_LEN_08BIT, 0xbc}, + {0xd49d, CRL_REG_LEN_08BIT, 0x23}, + {0xd49e, CRL_REG_LEN_08BIT, 0x00}, + {0xd49f, CRL_REG_LEN_08BIT, 0x01}, + {0xd4a0, CRL_REG_LEN_08BIT, 0x10}, + {0xd4a1, CRL_REG_LEN_08BIT, 0x00}, + {0xd4a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd4a3, CRL_REG_LEN_08BIT, 0x1b}, + {0xd4a4, CRL_REG_LEN_08BIT, 0x9d}, + {0xd4a5, CRL_REG_LEN_08BIT, 0x00}, + {0xd4a6, CRL_REG_LEN_08BIT, 0x00}, + {0xd4a7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4a8, CRL_REG_LEN_08BIT, 0xb8}, + {0xd4a9, CRL_REG_LEN_08BIT, 0xe8}, + {0xd4aa, CRL_REG_LEN_08BIT, 0x00}, + {0xd4ab, CRL_REG_LEN_08BIT, 0x02}, + {0xd4ac, CRL_REG_LEN_08BIT, 0x9c}, + {0xd4ad, CRL_REG_LEN_08BIT, 0xc0}, + {0xd4ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd4af, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b0, CRL_REG_LEN_08BIT, 0x18}, + {0xd4b1, CRL_REG_LEN_08BIT, 0xa0}, + {0xd4b2, CRL_REG_LEN_08BIT, 0x80}, + {0xd4b3, CRL_REG_LEN_08BIT, 0x06}, + {0xd4b4, CRL_REG_LEN_08BIT, 0xe0}, + {0xd4b5, CRL_REG_LEN_08BIT, 0x67}, + {0xd4b6, CRL_REG_LEN_08BIT, 0x30}, + {0xd4b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd4b9, CRL_REG_LEN_08BIT, 0xa5}, + {0xd4ba, CRL_REG_LEN_08BIT, 0xce}, + {0xd4bb, CRL_REG_LEN_08BIT, 0xb0}, + {0xd4bc, CRL_REG_LEN_08BIT, 0x19}, + {0xd4bd, CRL_REG_LEN_08BIT, 0x60}, + {0xd4be, CRL_REG_LEN_08BIT, 0x00}, + {0xd4bf, CRL_REG_LEN_08BIT, 0x01}, + {0xd4c0, CRL_REG_LEN_08BIT, 0xa9}, + {0xd4c1, CRL_REG_LEN_08BIT, 0x6b}, + {0xd4c2, CRL_REG_LEN_08BIT, 0x06}, + {0xd4c3, CRL_REG_LEN_08BIT, 0x14}, + {0xd4c4, CRL_REG_LEN_08BIT, 0xe0}, + {0xd4c5, CRL_REG_LEN_08BIT, 0x83}, + {0xd4c6, CRL_REG_LEN_08BIT, 0x28}, + {0xd4c7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4c8, CRL_REG_LEN_08BIT, 0x9c}, + {0xd4c9, CRL_REG_LEN_08BIT, 0xc6}, + {0xd4ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd4cb, CRL_REG_LEN_08BIT, 0x01}, + {0xd4cc, CRL_REG_LEN_08BIT, 0xe0}, + {0xd4cd, CRL_REG_LEN_08BIT, 0x63}, + {0xd4ce, CRL_REG_LEN_08BIT, 0x18}, + {0xd4cf, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d0, CRL_REG_LEN_08BIT, 0x8c}, + {0xd4d1, CRL_REG_LEN_08BIT, 0x84}, + {0xd4d2, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d3, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d4, CRL_REG_LEN_08BIT, 0xe0}, + {0xd4d5, CRL_REG_LEN_08BIT, 0xa3}, + {0xd4d6, CRL_REG_LEN_08BIT, 0x58}, + {0xd4d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d8, CRL_REG_LEN_08BIT, 0xa4}, + {0xd4d9, CRL_REG_LEN_08BIT, 0xc6}, + {0xd4da, CRL_REG_LEN_08BIT, 0x00}, + {0xd4db, CRL_REG_LEN_08BIT, 0xff}, + {0xd4dc, CRL_REG_LEN_08BIT, 0xb8}, + {0xd4dd, CRL_REG_LEN_08BIT, 0x64}, + {0xd4de, CRL_REG_LEN_08BIT, 0x00}, + {0xd4df, CRL_REG_LEN_08BIT, 0x18}, + {0xd4e0, CRL_REG_LEN_08BIT, 0xbc}, + {0xd4e1, CRL_REG_LEN_08BIT, 0x46}, + {0xd4e2, CRL_REG_LEN_08BIT, 0x00}, + {0xd4e3, CRL_REG_LEN_08BIT, 0x03}, + {0xd4e4, CRL_REG_LEN_08BIT, 0x94}, + {0xd4e5, CRL_REG_LEN_08BIT, 0x85}, + {0xd4e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd4e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4e8, CRL_REG_LEN_08BIT, 0xb8}, + {0xd4e9, CRL_REG_LEN_08BIT, 0x63}, + {0xd4ea, CRL_REG_LEN_08BIT, 0x00}, + {0xd4eb, CRL_REG_LEN_08BIT, 0x98}, + {0xd4ec, CRL_REG_LEN_08BIT, 0xe0}, + {0xd4ed, CRL_REG_LEN_08BIT, 0x64}, + {0xd4ee, CRL_REG_LEN_08BIT, 0x18}, + {0xd4ef, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f0, CRL_REG_LEN_08BIT, 0x0f}, + {0xd4f1, CRL_REG_LEN_08BIT, 0xff}, + {0xd4f2, CRL_REG_LEN_08BIT, 0xff}, + {0xd4f3, CRL_REG_LEN_08BIT, 0xf0}, + {0xd4f4, CRL_REG_LEN_08BIT, 0xdc}, + {0xd4f5, CRL_REG_LEN_08BIT, 0x05}, + {0xd4f6, CRL_REG_LEN_08BIT, 0x18}, + {0xd4f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f8, CRL_REG_LEN_08BIT, 0x9c}, + {0xd4f9, CRL_REG_LEN_08BIT, 0x68}, + {0xd4fa, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fb, CRL_REG_LEN_08BIT, 0x01}, + {0xd4fc, CRL_REG_LEN_08BIT, 0xa5}, + {0xd4fd, CRL_REG_LEN_08BIT, 0x03}, + {0xd4fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd4ff, CRL_REG_LEN_08BIT, 0xff}, + {0xd500, CRL_REG_LEN_08BIT, 0xbc}, + {0xd501, CRL_REG_LEN_08BIT, 0x48}, + {0xd502, CRL_REG_LEN_08BIT, 0x00}, + {0xd503, CRL_REG_LEN_08BIT, 0x01}, + {0xd504, CRL_REG_LEN_08BIT, 0x0f}, + {0xd505, CRL_REG_LEN_08BIT, 0xff}, + {0xd506, CRL_REG_LEN_08BIT, 0xff}, + {0xd507, CRL_REG_LEN_08BIT, 0xea}, + {0xd508, CRL_REG_LEN_08BIT, 0xb8}, + {0xd509, CRL_REG_LEN_08BIT, 0xe8}, + {0xd50a, CRL_REG_LEN_08BIT, 0x00}, + {0xd50b, CRL_REG_LEN_08BIT, 0x02}, + {0xd50c, CRL_REG_LEN_08BIT, 0x18}, + {0xd50d, CRL_REG_LEN_08BIT, 0x60}, + {0xd50e, CRL_REG_LEN_08BIT, 0x00}, + {0xd50f, CRL_REG_LEN_08BIT, 0x01}, + {0xd510, CRL_REG_LEN_08BIT, 0xa8}, + {0xd511, CRL_REG_LEN_08BIT, 0x63}, + {0xd512, CRL_REG_LEN_08BIT, 0x06}, + {0xd513, CRL_REG_LEN_08BIT, 0x14}, + {0xd514, CRL_REG_LEN_08BIT, 0x07}, + {0xd515, CRL_REG_LEN_08BIT, 0xff}, + {0xd516, CRL_REG_LEN_08BIT, 0xe3}, + {0xd517, CRL_REG_LEN_08BIT, 0xe9}, + {0xd518, CRL_REG_LEN_08BIT, 0x9c}, + {0xd519, CRL_REG_LEN_08BIT, 0x83}, + {0xd51a, CRL_REG_LEN_08BIT, 0x00}, + {0xd51b, CRL_REG_LEN_08BIT, 0x10}, + {0xd51c, CRL_REG_LEN_08BIT, 0x85}, + {0xd51d, CRL_REG_LEN_08BIT, 0x21}, + {0xd51e, CRL_REG_LEN_08BIT, 0x00}, + {0xd51f, CRL_REG_LEN_08BIT, 0x00}, + {0xd520, CRL_REG_LEN_08BIT, 0x44}, + {0xd521, CRL_REG_LEN_08BIT, 0x00}, + {0xd522, CRL_REG_LEN_08BIT, 0x48}, + {0xd523, CRL_REG_LEN_08BIT, 0x00}, + {0xd524, CRL_REG_LEN_08BIT, 0x9c}, + {0xd525, CRL_REG_LEN_08BIT, 0x21}, + {0xd526, CRL_REG_LEN_08BIT, 0x00}, + {0xd527, CRL_REG_LEN_08BIT, 0x04}, + {0xd528, CRL_REG_LEN_08BIT, 0x18}, + {0xd529, CRL_REG_LEN_08BIT, 0x60}, + {0xd52a, CRL_REG_LEN_08BIT, 0x00}, + {0xd52b, CRL_REG_LEN_08BIT, 0x01}, + {0xd52c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd52d, CRL_REG_LEN_08BIT, 0x80}, + {0xd52e, CRL_REG_LEN_08BIT, 0xff}, + {0xd52f, CRL_REG_LEN_08BIT, 0xff}, + {0xd530, CRL_REG_LEN_08BIT, 0xa8}, + {0xd531, CRL_REG_LEN_08BIT, 0x63}, + {0xd532, CRL_REG_LEN_08BIT, 0x09}, + {0xd533, CRL_REG_LEN_08BIT, 0xef}, + {0xd534, CRL_REG_LEN_08BIT, 0xd8}, + {0xd535, CRL_REG_LEN_08BIT, 0x03}, + {0xd536, CRL_REG_LEN_08BIT, 0x20}, + {0xd537, CRL_REG_LEN_08BIT, 0x00}, + {0xd538, CRL_REG_LEN_08BIT, 0x18}, + {0xd539, CRL_REG_LEN_08BIT, 0x60}, + {0xd53a, CRL_REG_LEN_08BIT, 0x80}, + {0xd53b, CRL_REG_LEN_08BIT, 0x06}, + {0xd53c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd53d, CRL_REG_LEN_08BIT, 0x63}, + {0xd53e, CRL_REG_LEN_08BIT, 0xc9}, + {0xd53f, CRL_REG_LEN_08BIT, 0xef}, + {0xd540, CRL_REG_LEN_08BIT, 0xd8}, + {0xd541, CRL_REG_LEN_08BIT, 0x03}, + {0xd542, CRL_REG_LEN_08BIT, 0x20}, + {0xd543, CRL_REG_LEN_08BIT, 0x00}, + {0xd544, CRL_REG_LEN_08BIT, 0x44}, + {0xd545, CRL_REG_LEN_08BIT, 0x00}, + {0xd546, CRL_REG_LEN_08BIT, 0x48}, + {0xd547, CRL_REG_LEN_08BIT, 0x00}, + {0xd548, CRL_REG_LEN_08BIT, 0x15}, + {0xd549, CRL_REG_LEN_08BIT, 0x00}, + {0xd54a, CRL_REG_LEN_08BIT, 0x00}, + {0xd54b, CRL_REG_LEN_08BIT, 0x00}, + {0xd54c, CRL_REG_LEN_08BIT, 0x18}, + {0xd54d, CRL_REG_LEN_08BIT, 0x80}, + {0xd54e, CRL_REG_LEN_08BIT, 0x00}, + {0xd54f, CRL_REG_LEN_08BIT, 0x01}, + {0xd550, CRL_REG_LEN_08BIT, 0xa8}, + {0xd551, CRL_REG_LEN_08BIT, 0x84}, + {0xd552, CRL_REG_LEN_08BIT, 0x0a}, + {0xd553, CRL_REG_LEN_08BIT, 0x12}, + {0xd554, CRL_REG_LEN_08BIT, 0x8c}, + {0xd555, CRL_REG_LEN_08BIT, 0x64}, + {0xd556, CRL_REG_LEN_08BIT, 0x00}, + {0xd557, CRL_REG_LEN_08BIT, 0x00}, + {0xd558, CRL_REG_LEN_08BIT, 0xbc}, + {0xd559, CRL_REG_LEN_08BIT, 0x03}, + {0xd55a, CRL_REG_LEN_08BIT, 0x00}, + {0xd55b, CRL_REG_LEN_08BIT, 0x00}, + {0xd55c, CRL_REG_LEN_08BIT, 0x13}, + {0xd55d, CRL_REG_LEN_08BIT, 0xff}, + {0xd55e, CRL_REG_LEN_08BIT, 0xff}, + {0xd55f, CRL_REG_LEN_08BIT, 0xfe}, + {0xd560, CRL_REG_LEN_08BIT, 0x15}, + {0xd561, CRL_REG_LEN_08BIT, 0x00}, + {0xd562, CRL_REG_LEN_08BIT, 0x00}, + {0xd563, CRL_REG_LEN_08BIT, 0x00}, + {0xd564, CRL_REG_LEN_08BIT, 0x44}, + {0xd565, CRL_REG_LEN_08BIT, 0x00}, + {0xd566, CRL_REG_LEN_08BIT, 0x48}, + {0xd567, CRL_REG_LEN_08BIT, 0x00}, + {0xd568, CRL_REG_LEN_08BIT, 0x15}, + {0xd569, CRL_REG_LEN_08BIT, 0x00}, + {0xd56a, CRL_REG_LEN_08BIT, 0x00}, + {0xd56b, CRL_REG_LEN_08BIT, 0x00}, + {0xd56c, CRL_REG_LEN_08BIT, 0x00}, + {0xd56d, CRL_REG_LEN_08BIT, 0x00}, + {0xd56e, CRL_REG_LEN_08BIT, 0x00}, + {0xd56f, CRL_REG_LEN_08BIT, 0x00}, + {0xd570, CRL_REG_LEN_08BIT, 0x00}, + {0xd571, CRL_REG_LEN_08BIT, 0x00}, + {0xd572, CRL_REG_LEN_08BIT, 0x00}, + {0xd573, CRL_REG_LEN_08BIT, 0x00}, + {0x6f0e, CRL_REG_LEN_08BIT, 0x33}, + {0x6f0f, CRL_REG_LEN_08BIT, 0x33}, + {0x460e, CRL_REG_LEN_08BIT, 0x08}, + {0x460f, CRL_REG_LEN_08BIT, 0x01}, + {0x4610, CRL_REG_LEN_08BIT, 0x00}, + {0x4611, CRL_REG_LEN_08BIT, 0x01}, + {0x4612, CRL_REG_LEN_08BIT, 0x00}, + {0x4613, CRL_REG_LEN_08BIT, 0x01}, + {0x4605, CRL_REG_LEN_08BIT, 0x0b}, + {0x4608, CRL_REG_LEN_08BIT, 0x00}, + {0x4609, CRL_REG_LEN_08BIT, 0x08}, + {0x4602, CRL_REG_LEN_08BIT, 0x02}, + {0x4603, CRL_REG_LEN_08BIT, 0xd8}, + {0x6804, CRL_REG_LEN_08BIT, 0x00}, + {0x6805, CRL_REG_LEN_08BIT, 0x06}, + {0x6806, CRL_REG_LEN_08BIT, 0x00}, + {0x5120, CRL_REG_LEN_08BIT, 0x00}, + {0x3510, CRL_REG_LEN_08BIT, 0x00}, + {0x3504, CRL_REG_LEN_08BIT, 0x00}, + {0x6800, CRL_REG_LEN_08BIT, 0x00}, + {0x6f0d, CRL_REG_LEN_08BIT, 0x0f}, + {0x5000, CRL_REG_LEN_08BIT, 0xff}, + {0x5001, CRL_REG_LEN_08BIT, 0xbf}, + {0x5002, CRL_REG_LEN_08BIT, 0x7e}, + {0x5003, CRL_REG_LEN_08BIT, 0x0c}, + {0x503d, CRL_REG_LEN_08BIT, 0x00}, + {0xc450, CRL_REG_LEN_08BIT, 0x01}, + {0xc452, CRL_REG_LEN_08BIT, 0x04}, + {0xc453, CRL_REG_LEN_08BIT, 0x00}, + {0xc454, CRL_REG_LEN_08BIT, 0x00}, + {0xc455, CRL_REG_LEN_08BIT, 0x00}, + {0xc456, CRL_REG_LEN_08BIT, 0x00}, + {0xc457, CRL_REG_LEN_08BIT, 0x00}, + {0xc458, CRL_REG_LEN_08BIT, 0x00}, + {0xc459, CRL_REG_LEN_08BIT, 0x00}, + {0xc45b, CRL_REG_LEN_08BIT, 0x00}, + {0xc45c, CRL_REG_LEN_08BIT, 0x00}, + {0xc45d, CRL_REG_LEN_08BIT, 0x00}, + {0xc45e, CRL_REG_LEN_08BIT, 0x02}, + {0xc45f, CRL_REG_LEN_08BIT, 0x01}, + {0xc460, CRL_REG_LEN_08BIT, 0x01}, + {0xc461, CRL_REG_LEN_08BIT, 0x01}, + {0xc462, CRL_REG_LEN_08BIT, 0x01}, + {0xc464, CRL_REG_LEN_08BIT, 0x88}, + {0xc465, CRL_REG_LEN_08BIT, 0x00}, + {0xc466, CRL_REG_LEN_08BIT, 0x8a}, + {0xc467, CRL_REG_LEN_08BIT, 0x00}, + {0xc468, CRL_REG_LEN_08BIT, 0x86}, + {0xc469, CRL_REG_LEN_08BIT, 0x00}, + {0xc46a, CRL_REG_LEN_08BIT, 0x40}, + {0xc46b, CRL_REG_LEN_08BIT, 0x50}, + {0xc46c, CRL_REG_LEN_08BIT, 0x30}, + {0xc46d, CRL_REG_LEN_08BIT, 0x28}, + {0xc46e, CRL_REG_LEN_08BIT, 0x60}, + {0xc46f, CRL_REG_LEN_08BIT, 0x40}, + {0xc47c, CRL_REG_LEN_08BIT, 0x01}, + {0xc47d, CRL_REG_LEN_08BIT, 0x38}, + {0xc47e, CRL_REG_LEN_08BIT, 0x00}, + {0xc47f, CRL_REG_LEN_08BIT, 0x00}, + {0xc480, CRL_REG_LEN_08BIT, 0x00}, + {0xc481, CRL_REG_LEN_08BIT, 0xff}, + {0xc482, CRL_REG_LEN_08BIT, 0x00}, + {0xc483, CRL_REG_LEN_08BIT, 0x40}, + {0xc484, CRL_REG_LEN_08BIT, 0x00}, + {0xc485, CRL_REG_LEN_08BIT, 0x18}, + {0xc486, CRL_REG_LEN_08BIT, 0x00}, + {0xc487, CRL_REG_LEN_08BIT, 0x18}, + {0xc488, CRL_REG_LEN_08BIT, 0x2e}, + {0xc489, CRL_REG_LEN_08BIT, 0x40}, + {0xc48a, CRL_REG_LEN_08BIT, 0x2e}, + {0xc48b, CRL_REG_LEN_08BIT, 0x40}, + {0xc48c, CRL_REG_LEN_08BIT, 0x00}, + {0xc48d, CRL_REG_LEN_08BIT, 0x04}, + {0xc48e, CRL_REG_LEN_08BIT, 0x00}, + {0xc48f, CRL_REG_LEN_08BIT, 0x04}, + {0xc490, CRL_REG_LEN_08BIT, 0x07}, + {0xc492, CRL_REG_LEN_08BIT, 0x20}, + {0xc493, CRL_REG_LEN_08BIT, 0x08}, + {0xc498, CRL_REG_LEN_08BIT, 0x02}, + {0xc499, CRL_REG_LEN_08BIT, 0x00}, + {0xc49a, CRL_REG_LEN_08BIT, 0x02}, + {0xc49b, CRL_REG_LEN_08BIT, 0x00}, + {0xc49c, CRL_REG_LEN_08BIT, 0x02}, + {0xc49d, CRL_REG_LEN_08BIT, 0x00}, + {0xc49e, CRL_REG_LEN_08BIT, 0x02}, + {0xc49f, CRL_REG_LEN_08BIT, 0x60}, + {0xc4a0, CRL_REG_LEN_08BIT, 0x03}, + {0xc4a1, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a2, CRL_REG_LEN_08BIT, 0x04}, + {0xc4a3, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a4, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a5, CRL_REG_LEN_08BIT, 0x10}, + {0xc4a6, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a7, CRL_REG_LEN_08BIT, 0x40}, + {0xc4a8, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a9, CRL_REG_LEN_08BIT, 0x80}, + {0xc4aa, CRL_REG_LEN_08BIT, 0x0d}, + {0xc4ab, CRL_REG_LEN_08BIT, 0x00}, + {0xc4ac, CRL_REG_LEN_08BIT, 0x03}, + {0xc4ad, CRL_REG_LEN_08BIT, 0xf0}, + {0xc4b4, CRL_REG_LEN_08BIT, 0x01}, + {0xc4b5, CRL_REG_LEN_08BIT, 0x01}, + {0xc4b6, CRL_REG_LEN_08BIT, 0x00}, + {0xc4b7, CRL_REG_LEN_08BIT, 0x01}, + {0xc4b8, CRL_REG_LEN_08BIT, 0x00}, + {0xc4b9, CRL_REG_LEN_08BIT, 0x01}, + {0xc4ba, CRL_REG_LEN_08BIT, 0x01}, + {0xc4bb, CRL_REG_LEN_08BIT, 0x00}, + {0xc4bc, CRL_REG_LEN_08BIT, 0x01}, + {0xc4bd, CRL_REG_LEN_08BIT, 0x60}, + {0xc4be, CRL_REG_LEN_08BIT, 0x02}, + {0xc4bf, CRL_REG_LEN_08BIT, 0x33}, + {0xc4c8, CRL_REG_LEN_08BIT, 0x03}, + {0xc4c9, CRL_REG_LEN_08BIT, 0xd0}, + {0xc4ca, CRL_REG_LEN_08BIT, 0x0e}, + {0xc4cb, CRL_REG_LEN_08BIT, 0x00}, + {0xc4cc, CRL_REG_LEN_08BIT, 0x0e}, + {0xc4cd, CRL_REG_LEN_08BIT, 0x51}, + {0xc4ce, CRL_REG_LEN_08BIT, 0x0e}, + {0xc4cf, CRL_REG_LEN_08BIT, 0x51}, + {0xc4d0, CRL_REG_LEN_08BIT, 0x04}, + {0xc4d1, CRL_REG_LEN_08BIT, 0x80}, + {0xc4e0, CRL_REG_LEN_08BIT, 0x04}, + {0xc4e1, CRL_REG_LEN_08BIT, 0x02}, + {0xc4e2, CRL_REG_LEN_08BIT, 0x01}, + {0xc4e4, CRL_REG_LEN_08BIT, 0x10}, + {0xc4e5, CRL_REG_LEN_08BIT, 0x20}, + {0xc4e6, CRL_REG_LEN_08BIT, 0x30}, + {0xc4e7, CRL_REG_LEN_08BIT, 0x40}, + {0xc4e8, CRL_REG_LEN_08BIT, 0x50}, + {0xc4e9, CRL_REG_LEN_08BIT, 0x60}, + {0xc4ea, CRL_REG_LEN_08BIT, 0x70}, + {0xc4eb, CRL_REG_LEN_08BIT, 0x80}, + {0xc4ec, CRL_REG_LEN_08BIT, 0x90}, + {0xc4ed, CRL_REG_LEN_08BIT, 0xa0}, + {0xc4ee, CRL_REG_LEN_08BIT, 0xb0}, + {0xc4ef, CRL_REG_LEN_08BIT, 0xc0}, + {0xc4f0, CRL_REG_LEN_08BIT, 0xd0}, + {0xc4f1, CRL_REG_LEN_08BIT, 0xe0}, + {0xc4f2, CRL_REG_LEN_08BIT, 0xf0}, + {0xc4f3, CRL_REG_LEN_08BIT, 0x80}, + {0xc4f4, CRL_REG_LEN_08BIT, 0x00}, + {0xc4f5, CRL_REG_LEN_08BIT, 0x20}, + {0xc4f6, CRL_REG_LEN_08BIT, 0x02}, + {0xc4f7, CRL_REG_LEN_08BIT, 0x00}, + {0xc4f8, CRL_REG_LEN_08BIT, 0x04}, + {0xc4f9, CRL_REG_LEN_08BIT, 0x0b}, + {0xc4fa, CRL_REG_LEN_08BIT, 0x00}, + {0xc4fb, CRL_REG_LEN_08BIT, 0x00}, + {0xc4fc, CRL_REG_LEN_08BIT, 0x01}, + {0xc4fd, CRL_REG_LEN_08BIT, 0x00}, + {0xc4fe, CRL_REG_LEN_08BIT, 0x04}, + {0xc4ff, CRL_REG_LEN_08BIT, 0x02}, + {0xc500, CRL_REG_LEN_08BIT, 0x48}, + {0xc501, CRL_REG_LEN_08BIT, 0x74}, + {0xc502, CRL_REG_LEN_08BIT, 0x58}, + {0xc503, CRL_REG_LEN_08BIT, 0x80}, + {0xc504, CRL_REG_LEN_08BIT, 0x05}, + {0xc505, CRL_REG_LEN_08BIT, 0x80}, + {0xc506, CRL_REG_LEN_08BIT, 0x03}, + {0xc507, CRL_REG_LEN_08BIT, 0x80}, + {0xc508, CRL_REG_LEN_08BIT, 0x01}, + {0xc509, CRL_REG_LEN_08BIT, 0xc0}, + {0xc50a, CRL_REG_LEN_08BIT, 0x01}, + {0xc50b, CRL_REG_LEN_08BIT, 0xa0}, + {0xc50c, CRL_REG_LEN_08BIT, 0x01}, + {0xc50d, CRL_REG_LEN_08BIT, 0x2c}, + {0xc50e, CRL_REG_LEN_08BIT, 0x01}, + {0xc50f, CRL_REG_LEN_08BIT, 0x0a}, + {0xc510, CRL_REG_LEN_08BIT, 0x00}, + {0xc511, CRL_REG_LEN_08BIT, 0x00}, + {0xc512, CRL_REG_LEN_08BIT, 0xe5}, + {0xc513, CRL_REG_LEN_08BIT, 0x14}, + {0xc514, CRL_REG_LEN_08BIT, 0x04}, + {0xc515, CRL_REG_LEN_08BIT, 0x00}, + {0xc518, CRL_REG_LEN_08BIT, 0x03}, + {0xc519, CRL_REG_LEN_08BIT, 0x48}, + {0xc51a, CRL_REG_LEN_08BIT, 0x07}, + {0xc51b, CRL_REG_LEN_08BIT, 0x70}, + {0xc2e0, CRL_REG_LEN_08BIT, 0x00}, + {0xc2e1, CRL_REG_LEN_08BIT, 0x51}, + {0xc2e2, CRL_REG_LEN_08BIT, 0x00}, + {0xc2e3, CRL_REG_LEN_08BIT, 0xd6}, + {0xc2e4, CRL_REG_LEN_08BIT, 0x01}, + {0xc2e5, CRL_REG_LEN_08BIT, 0x5e}, + {0xc2e9, CRL_REG_LEN_08BIT, 0x01}, + {0xc2ea, CRL_REG_LEN_08BIT, 0x7a}, + {0xc2eb, CRL_REG_LEN_08BIT, 0x90}, + {0xc2ed, CRL_REG_LEN_08BIT, 0x00}, + {0xc2ee, CRL_REG_LEN_08BIT, 0x7a}, + {0xc2ef, CRL_REG_LEN_08BIT, 0x64}, + {0xc308, CRL_REG_LEN_08BIT, 0x00}, + {0xc309, CRL_REG_LEN_08BIT, 0x00}, + {0xc30a, CRL_REG_LEN_08BIT, 0x00}, + {0xc30c, CRL_REG_LEN_08BIT, 0x00}, + {0xc30d, CRL_REG_LEN_08BIT, 0x01}, + {0xc30e, CRL_REG_LEN_08BIT, 0x00}, + {0xc30f, CRL_REG_LEN_08BIT, 0x00}, + {0xc310, CRL_REG_LEN_08BIT, 0x01}, + {0xc311, CRL_REG_LEN_08BIT, 0x60}, + {0xc312, CRL_REG_LEN_08BIT, 0xff}, + {0xc313, CRL_REG_LEN_08BIT, 0x08}, + {0xc314, CRL_REG_LEN_08BIT, 0x01}, + {0xc315, CRL_REG_LEN_08BIT, 0x7f}, + {0xc316, CRL_REG_LEN_08BIT, 0xff}, + {0xc317, CRL_REG_LEN_08BIT, 0x0b}, + {0xc318, CRL_REG_LEN_08BIT, 0x00}, + {0xc319, CRL_REG_LEN_08BIT, 0x0c}, + {0xc31a, CRL_REG_LEN_08BIT, 0x00}, + {0xc31b, CRL_REG_LEN_08BIT, 0xe0}, + {0xc31c, CRL_REG_LEN_08BIT, 0x00}, + {0xc31d, CRL_REG_LEN_08BIT, 0x14}, + {0xc31e, CRL_REG_LEN_08BIT, 0x00}, + {0xc31f, CRL_REG_LEN_08BIT, 0xc5}, + {0xc320, CRL_REG_LEN_08BIT, 0xff}, + {0xc321, CRL_REG_LEN_08BIT, 0x4b}, + {0xc322, CRL_REG_LEN_08BIT, 0xff}, + {0xc323, CRL_REG_LEN_08BIT, 0xf0}, + {0xc324, CRL_REG_LEN_08BIT, 0xff}, + {0xc325, CRL_REG_LEN_08BIT, 0xe8}, + {0xc326, CRL_REG_LEN_08BIT, 0x00}, + {0xc327, CRL_REG_LEN_08BIT, 0x46}, + {0xc328, CRL_REG_LEN_08BIT, 0xff}, + {0xc329, CRL_REG_LEN_08BIT, 0xd2}, + {0xc32a, CRL_REG_LEN_08BIT, 0xff}, + {0xc32b, CRL_REG_LEN_08BIT, 0xe4}, + {0xc32c, CRL_REG_LEN_08BIT, 0xff}, + {0xc32d, CRL_REG_LEN_08BIT, 0xbb}, + {0xc32e, CRL_REG_LEN_08BIT, 0x00}, + {0xc32f, CRL_REG_LEN_08BIT, 0x61}, + {0xc330, CRL_REG_LEN_08BIT, 0xff}, + {0xc331, CRL_REG_LEN_08BIT, 0xf9}, + {0xc332, CRL_REG_LEN_08BIT, 0x00}, + {0xc333, CRL_REG_LEN_08BIT, 0xd9}, + {0xc334, CRL_REG_LEN_08BIT, 0x00}, + {0xc335, CRL_REG_LEN_08BIT, 0x2e}, + {0xc336, CRL_REG_LEN_08BIT, 0x00}, + {0xc337, CRL_REG_LEN_08BIT, 0xb1}, + {0xc338, CRL_REG_LEN_08BIT, 0xff}, + {0xc339, CRL_REG_LEN_08BIT, 0x64}, + {0xc33a, CRL_REG_LEN_08BIT, 0xff}, + {0xc33b, CRL_REG_LEN_08BIT, 0xeb}, + {0xc33c, CRL_REG_LEN_08BIT, 0xff}, + {0xc33d, CRL_REG_LEN_08BIT, 0xe8}, + {0xc33e, CRL_REG_LEN_08BIT, 0x00}, + {0xc33f, CRL_REG_LEN_08BIT, 0x48}, + {0xc340, CRL_REG_LEN_08BIT, 0xff}, + {0xc341, CRL_REG_LEN_08BIT, 0xd0}, + {0xc342, CRL_REG_LEN_08BIT, 0xff}, + {0xc343, CRL_REG_LEN_08BIT, 0xed}, + {0xc344, CRL_REG_LEN_08BIT, 0xff}, + {0xc345, CRL_REG_LEN_08BIT, 0xad}, + {0xc346, CRL_REG_LEN_08BIT, 0x00}, + {0xc347, CRL_REG_LEN_08BIT, 0x66}, + {0xc348, CRL_REG_LEN_08BIT, 0x01}, + {0xc349, CRL_REG_LEN_08BIT, 0x00}, + {0x6700, CRL_REG_LEN_08BIT, 0x04}, + {0x6701, CRL_REG_LEN_08BIT, 0x7b}, + {0x6702, CRL_REG_LEN_08BIT, 0xfd}, + {0x6703, CRL_REG_LEN_08BIT, 0xf9}, + {0x6704, CRL_REG_LEN_08BIT, 0x3d}, + {0x6705, CRL_REG_LEN_08BIT, 0x71}, + {0x6706, CRL_REG_LEN_08BIT, 0x78}, + {0x6708, CRL_REG_LEN_08BIT, 0x05}, + {0x6f06, CRL_REG_LEN_08BIT, 0x6f}, + {0x6f07, CRL_REG_LEN_08BIT, 0x00}, + {0x6f0a, CRL_REG_LEN_08BIT, 0x6f}, + {0x6f0b, CRL_REG_LEN_08BIT, 0x00}, + {0x6f00, CRL_REG_LEN_08BIT, 0x03}, + {0xc34c, CRL_REG_LEN_08BIT, 0x01}, + {0xc34d, CRL_REG_LEN_08BIT, 0x00}, + {0xc34e, CRL_REG_LEN_08BIT, 0x46}, + {0xc34f, CRL_REG_LEN_08BIT, 0x55}, + {0xc350, CRL_REG_LEN_08BIT, 0x00}, + {0xc351, CRL_REG_LEN_08BIT, 0x40}, + {0xc352, CRL_REG_LEN_08BIT, 0x00}, + {0xc353, CRL_REG_LEN_08BIT, 0xff}, + {0xc354, CRL_REG_LEN_08BIT, 0x04}, + {0xc355, CRL_REG_LEN_08BIT, 0x08}, + {0xc356, CRL_REG_LEN_08BIT, 0x01}, + {0xc357, CRL_REG_LEN_08BIT, 0xef}, + {0xc358, CRL_REG_LEN_08BIT, 0x30}, + {0xc359, CRL_REG_LEN_08BIT, 0x01}, + {0xc35a, CRL_REG_LEN_08BIT, 0x64}, + {0xc35b, CRL_REG_LEN_08BIT, 0x46}, + {0xc35c, CRL_REG_LEN_08BIT, 0x00}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x301b, CRL_REG_LEN_08BIT, 0xf0}, + {0x301c, CRL_REG_LEN_08BIT, 0xf0}, + {0x301a, CRL_REG_LEN_08BIT, 0xf0}, + {0xceb0, CRL_REG_LEN_08BIT, 0x00}, + {0xceb1, CRL_REG_LEN_08BIT, 0x00}, + {0xceb2, CRL_REG_LEN_08BIT, 0x00}, + {0xceb3, CRL_REG_LEN_08BIT, 0x00}, + {0xceb4, CRL_REG_LEN_08BIT, 0x00}, + {0xceb5, CRL_REG_LEN_08BIT, 0x00}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0xceb6, CRL_REG_LEN_08BIT, 0x00}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0xceb7, CRL_REG_LEN_08BIT, 0x00}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0xc4bc, CRL_REG_LEN_08BIT, 0x01}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0xc4bd, CRL_REG_LEN_08BIT, 0x60}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0xc4a0, CRL_REG_LEN_08BIT, 0x03}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0xc4a2, CRL_REG_LEN_08BIT, 0x04}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0x3011, CRL_REG_LEN_08BIT, 0x42}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0x5608, CRL_REG_LEN_08BIT, 0x0d}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, +}; + +static struct crl_register_write_rep ov10635_640_480_YUV_HDR[] = { + {0x301b, CRL_REG_LEN_08BIT, 0xff}, + {0x301c, CRL_REG_LEN_08BIT, 0xff}, + {0x301a, CRL_REG_LEN_08BIT, 0xff}, + {0x3011, CRL_REG_LEN_08BIT, 0x42}, + {0x6900, CRL_REG_LEN_08BIT, 0x0c}, + {0x6901, CRL_REG_LEN_08BIT, 0x11}, + {0x3503, CRL_REG_LEN_08BIT, 0x10}, + {0x3025, CRL_REG_LEN_08BIT, 0x03}, + {0x3003, CRL_REG_LEN_08BIT, 0x14}, + {0x3004, CRL_REG_LEN_08BIT, 0x23}, + {0x3005, CRL_REG_LEN_08BIT, 0x20}, + {0x3006, CRL_REG_LEN_08BIT, 0x91}, + {0x3600, CRL_REG_LEN_08BIT, 0x74}, + {0x3601, CRL_REG_LEN_08BIT, 0x2b}, + {0x3612, CRL_REG_LEN_08BIT, 0x00}, + {0x3611, CRL_REG_LEN_08BIT, 0x67}, + {0x3633, CRL_REG_LEN_08BIT, 0xca}, + {0x3602, CRL_REG_LEN_08BIT, 0x2f}, + {0x3603, CRL_REG_LEN_08BIT, 0x00}, + {0x3630, CRL_REG_LEN_08BIT, 0x28}, + {0x3631, CRL_REG_LEN_08BIT, 0x16}, + {0x3714, CRL_REG_LEN_08BIT, 0x10}, + {0x371d, CRL_REG_LEN_08BIT, 0x01}, + {0x3007, CRL_REG_LEN_08BIT, 0x01}, + {0x3024, CRL_REG_LEN_08BIT, 0x01}, + {0x3020, CRL_REG_LEN_08BIT, 0x0b}, + {0x3702, CRL_REG_LEN_08BIT, 0x0a}, + {0x3703, CRL_REG_LEN_08BIT, 0x17}, + {0x3704, CRL_REG_LEN_08BIT, 0x0f}, + {0x3709, CRL_REG_LEN_08BIT, 0xa8}, + {0x3709, CRL_REG_LEN_08BIT, 0xa8}, + {0x370c, CRL_REG_LEN_08BIT, 0xc7}, + {0x370d, CRL_REG_LEN_08BIT, 0x80}, + {0x3712, CRL_REG_LEN_08BIT, 0x00}, + {0x3713, CRL_REG_LEN_08BIT, 0x20}, + {0x3715, CRL_REG_LEN_08BIT, 0x04}, + {0x381d, CRL_REG_LEN_08BIT, 0x40}, + {0x381c, CRL_REG_LEN_08BIT, 0x00}, + {0x3822, CRL_REG_LEN_08BIT, 0x50}, + {0x3824, CRL_REG_LEN_08BIT, 0x50}, + {0x3815, CRL_REG_LEN_08BIT, 0x8c}, + {0x3804, CRL_REG_LEN_08BIT, 0x05}, + {0x3805, CRL_REG_LEN_08BIT, 0x1f}, + {0x3800, CRL_REG_LEN_08BIT, 0x00}, + {0x3801, CRL_REG_LEN_08BIT, 0x00}, + {0x3806, CRL_REG_LEN_08BIT, 0x02}, + {0x3807, CRL_REG_LEN_08BIT, 0x89}, + {0x3802, CRL_REG_LEN_08BIT, 0x00}, + {0x3803, CRL_REG_LEN_08BIT, 0xa4}, + {0x3808, CRL_REG_LEN_08BIT, 0x02}, + {0x3809, CRL_REG_LEN_08BIT, 0x80}, + {0x380a, CRL_REG_LEN_08BIT, 0x01}, + {0x380b, CRL_REG_LEN_08BIT, 0xe0}, + {0x380c, CRL_REG_LEN_08BIT, 0x03}, + {0x380d, CRL_REG_LEN_08BIT, 0xc0}, + {0x6e42, CRL_REG_LEN_08BIT, 0x02}, + {0x6e43, CRL_REG_LEN_08BIT, 0x08}, + {0x380e, CRL_REG_LEN_08BIT, 0x02}, + {0x380f, CRL_REG_LEN_08BIT, 0x08}, + {0x3813, CRL_REG_LEN_08BIT, 0x02}, + {0x3811, CRL_REG_LEN_08BIT, 0x08}, + {0x381f, CRL_REG_LEN_08BIT, 0x0c}, + {0x3828, CRL_REG_LEN_08BIT, 0x03}, + {0x3829, CRL_REG_LEN_08BIT, 0x10}, + {0x382a, CRL_REG_LEN_08BIT, 0x10}, + {0x382b, CRL_REG_LEN_08BIT, 0x10}, + {0x3621, CRL_REG_LEN_08BIT, 0x74}, + {0x5005, CRL_REG_LEN_08BIT, 0x08}, + {0x56d5, CRL_REG_LEN_08BIT, 0x00}, + {0x56d6, CRL_REG_LEN_08BIT, 0x80}, + {0x56d7, CRL_REG_LEN_08BIT, 0x00}, + {0x56d8, CRL_REG_LEN_08BIT, 0x00}, + {0x56d9, CRL_REG_LEN_08BIT, 0x00}, + {0x56da, CRL_REG_LEN_08BIT, 0x80}, + {0x56db, CRL_REG_LEN_08BIT, 0x00}, + {0x56dc, CRL_REG_LEN_08BIT, 0x00}, + {0x56e8, CRL_REG_LEN_08BIT, 0x00}, + {0x56e9, CRL_REG_LEN_08BIT, 0x7f}, + {0x56ea, CRL_REG_LEN_08BIT, 0x00}, + {0x56eb, CRL_REG_LEN_08BIT, 0x7f}, + {0x5100, CRL_REG_LEN_08BIT, 0x00}, + {0x5101, CRL_REG_LEN_08BIT, 0x80}, + {0x5102, CRL_REG_LEN_08BIT, 0x00}, + {0x5103, CRL_REG_LEN_08BIT, 0x80}, + {0x5104, CRL_REG_LEN_08BIT, 0x00}, + {0x5105, CRL_REG_LEN_08BIT, 0x80}, + {0x5106, CRL_REG_LEN_08BIT, 0x00}, + {0x5107, CRL_REG_LEN_08BIT, 0x80}, + {0x5108, CRL_REG_LEN_08BIT, 0x00}, + {0x5109, CRL_REG_LEN_08BIT, 0x00}, + {0x510a, CRL_REG_LEN_08BIT, 0x00}, + {0x510b, CRL_REG_LEN_08BIT, 0x00}, + {0x510c, CRL_REG_LEN_08BIT, 0x00}, + {0x510d, CRL_REG_LEN_08BIT, 0x00}, + {0x510e, CRL_REG_LEN_08BIT, 0x00}, + {0x510f, CRL_REG_LEN_08BIT, 0x00}, + {0x5110, CRL_REG_LEN_08BIT, 0x00}, + {0x5111, CRL_REG_LEN_08BIT, 0x80}, + {0x5112, CRL_REG_LEN_08BIT, 0x00}, + {0x5113, CRL_REG_LEN_08BIT, 0x80}, + {0x5114, CRL_REG_LEN_08BIT, 0x00}, + {0x5115, CRL_REG_LEN_08BIT, 0x80}, + {0x5116, CRL_REG_LEN_08BIT, 0x00}, + {0x5117, CRL_REG_LEN_08BIT, 0x80}, + {0x5118, CRL_REG_LEN_08BIT, 0x00}, + {0x5119, CRL_REG_LEN_08BIT, 0x00}, + {0x511a, CRL_REG_LEN_08BIT, 0x00}, + {0x511b, CRL_REG_LEN_08BIT, 0x00}, + {0x511c, CRL_REG_LEN_08BIT, 0x00}, + {0x511d, CRL_REG_LEN_08BIT, 0x00}, + {0x511e, CRL_REG_LEN_08BIT, 0x00}, + {0x511f, CRL_REG_LEN_08BIT, 0x00}, + {0x56d0, CRL_REG_LEN_08BIT, 0x00}, + {0x5006, CRL_REG_LEN_08BIT, 0x24}, + {0x5608, CRL_REG_LEN_08BIT, 0x19}, + {0x52d7, CRL_REG_LEN_08BIT, 0x06}, + {0x528d, CRL_REG_LEN_08BIT, 0x08}, + {0x5293, CRL_REG_LEN_08BIT, 0x12}, + {0x52d3, CRL_REG_LEN_08BIT, 0x12}, + {0x5288, CRL_REG_LEN_08BIT, 0x06}, + {0x5289, CRL_REG_LEN_08BIT, 0x20}, + {0x52c8, CRL_REG_LEN_08BIT, 0x06}, + {0x52c9, CRL_REG_LEN_08BIT, 0x20}, + {0x52cd, CRL_REG_LEN_08BIT, 0x04}, + {0x5381, CRL_REG_LEN_08BIT, 0x00}, + {0x5382, CRL_REG_LEN_08BIT, 0xff}, + {0x5589, CRL_REG_LEN_08BIT, 0x76}, + {0x558a, CRL_REG_LEN_08BIT, 0x47}, + {0x558b, CRL_REG_LEN_08BIT, 0xef}, + {0x558c, CRL_REG_LEN_08BIT, 0xc9}, + {0x558d, CRL_REG_LEN_08BIT, 0x49}, + {0x558e, CRL_REG_LEN_08BIT, 0x30}, + {0x558f, CRL_REG_LEN_08BIT, 0x67}, + {0x5590, CRL_REG_LEN_08BIT, 0x3f}, + {0x5591, CRL_REG_LEN_08BIT, 0xf0}, + {0x5592, CRL_REG_LEN_08BIT, 0x10}, + {0x55a2, CRL_REG_LEN_08BIT, 0x6d}, + {0x55a3, CRL_REG_LEN_08BIT, 0x55}, + {0x55a4, CRL_REG_LEN_08BIT, 0xc3}, + {0x55a5, CRL_REG_LEN_08BIT, 0xb5}, + {0x55a6, CRL_REG_LEN_08BIT, 0x43}, + {0x55a7, CRL_REG_LEN_08BIT, 0x38}, + {0x55a8, CRL_REG_LEN_08BIT, 0x5f}, + {0x55a9, CRL_REG_LEN_08BIT, 0x4b}, + {0x55aa, CRL_REG_LEN_08BIT, 0xf0}, + {0x55ab, CRL_REG_LEN_08BIT, 0x10}, + {0x5581, CRL_REG_LEN_08BIT, 0x52}, + {0x5300, CRL_REG_LEN_08BIT, 0x01}, + {0x5301, CRL_REG_LEN_08BIT, 0x00}, + {0x5302, CRL_REG_LEN_08BIT, 0x00}, + {0x5303, CRL_REG_LEN_08BIT, 0x0e}, + {0x5304, CRL_REG_LEN_08BIT, 0x00}, + {0x5305, CRL_REG_LEN_08BIT, 0x0e}, + {0x5306, CRL_REG_LEN_08BIT, 0x00}, + {0x5307, CRL_REG_LEN_08BIT, 0x36}, + {0x5308, CRL_REG_LEN_08BIT, 0x00}, + {0x5309, CRL_REG_LEN_08BIT, 0xd9}, + {0x530a, CRL_REG_LEN_08BIT, 0x00}, + {0x530b, CRL_REG_LEN_08BIT, 0x0f}, + {0x530c, CRL_REG_LEN_08BIT, 0x00}, + {0x530d, CRL_REG_LEN_08BIT, 0x2c}, + {0x530e, CRL_REG_LEN_08BIT, 0x00}, + {0x530f, CRL_REG_LEN_08BIT, 0x59}, + {0x5310, CRL_REG_LEN_08BIT, 0x00}, + {0x5311, CRL_REG_LEN_08BIT, 0x7b}, + {0x5312, CRL_REG_LEN_08BIT, 0x00}, + {0x5313, CRL_REG_LEN_08BIT, 0x22}, + {0x5314, CRL_REG_LEN_08BIT, 0x00}, + {0x5315, CRL_REG_LEN_08BIT, 0xd5}, + {0x5316, CRL_REG_LEN_08BIT, 0x00}, + {0x5317, CRL_REG_LEN_08BIT, 0x13}, + {0x5318, CRL_REG_LEN_08BIT, 0x00}, + {0x5319, CRL_REG_LEN_08BIT, 0x18}, + {0x531a, CRL_REG_LEN_08BIT, 0x00}, + {0x531b, CRL_REG_LEN_08BIT, 0x26}, + {0x531c, CRL_REG_LEN_08BIT, 0x00}, + {0x531d, CRL_REG_LEN_08BIT, 0xdc}, + {0x531e, CRL_REG_LEN_08BIT, 0x00}, + {0x531f, CRL_REG_LEN_08BIT, 0x02}, + {0x5320, CRL_REG_LEN_08BIT, 0x00}, + {0x5321, CRL_REG_LEN_08BIT, 0x24}, + {0x5322, CRL_REG_LEN_08BIT, 0x00}, + {0x5323, CRL_REG_LEN_08BIT, 0x56}, + {0x5324, CRL_REG_LEN_08BIT, 0x00}, + {0x5325, CRL_REG_LEN_08BIT, 0x85}, + {0x5326, CRL_REG_LEN_08BIT, 0x00}, + {0x5327, CRL_REG_LEN_08BIT, 0x20}, + {0x5609, CRL_REG_LEN_08BIT, 0x01}, + {0x560a, CRL_REG_LEN_08BIT, 0x40}, + {0x560b, CRL_REG_LEN_08BIT, 0x01}, + {0x560c, CRL_REG_LEN_08BIT, 0x40}, + {0x560d, CRL_REG_LEN_08BIT, 0x00}, + {0x560e, CRL_REG_LEN_08BIT, 0xfa}, + {0x560f, CRL_REG_LEN_08BIT, 0x00}, + {0x5610, CRL_REG_LEN_08BIT, 0xfa}, + {0x5611, CRL_REG_LEN_08BIT, 0x02}, + {0x5612, CRL_REG_LEN_08BIT, 0x80}, + {0x5613, CRL_REG_LEN_08BIT, 0x02}, + {0x5614, CRL_REG_LEN_08BIT, 0x80}, + {0x5615, CRL_REG_LEN_08BIT, 0x01}, + {0x5616, CRL_REG_LEN_08BIT, 0x2c}, + {0x5617, CRL_REG_LEN_08BIT, 0x01}, + {0x5618, CRL_REG_LEN_08BIT, 0x2c}, + {0x563b, CRL_REG_LEN_08BIT, 0x01}, + {0x563c, CRL_REG_LEN_08BIT, 0x01}, + {0x563d, CRL_REG_LEN_08BIT, 0x01}, + {0x563e, CRL_REG_LEN_08BIT, 0x01}, + {0x563f, CRL_REG_LEN_08BIT, 0x03}, + {0x5640, CRL_REG_LEN_08BIT, 0x03}, + {0x5641, CRL_REG_LEN_08BIT, 0x03}, + {0x5642, CRL_REG_LEN_08BIT, 0x05}, + {0x5643, CRL_REG_LEN_08BIT, 0x09}, + {0x5644, CRL_REG_LEN_08BIT, 0x05}, + {0x5645, CRL_REG_LEN_08BIT, 0x05}, + {0x5646, CRL_REG_LEN_08BIT, 0x05}, + {0x5647, CRL_REG_LEN_08BIT, 0x05}, + {0x5651, CRL_REG_LEN_08BIT, 0x00}, + {0x5652, CRL_REG_LEN_08BIT, 0x80}, + {0x521a, CRL_REG_LEN_08BIT, 0x01}, + {0x521b, CRL_REG_LEN_08BIT, 0x03}, + {0x521c, CRL_REG_LEN_08BIT, 0x06}, + {0x521d, CRL_REG_LEN_08BIT, 0x0a}, + {0x521e, CRL_REG_LEN_08BIT, 0x0e}, + {0x521f, CRL_REG_LEN_08BIT, 0x12}, + {0x5220, CRL_REG_LEN_08BIT, 0x16}, + {0x5223, CRL_REG_LEN_08BIT, 0x02}, + {0x5225, CRL_REG_LEN_08BIT, 0x04}, + {0x5227, CRL_REG_LEN_08BIT, 0x08}, + {0x5229, CRL_REG_LEN_08BIT, 0x0c}, + {0x522b, CRL_REG_LEN_08BIT, 0x12}, + {0x522d, CRL_REG_LEN_08BIT, 0x18}, + {0x522f, CRL_REG_LEN_08BIT, 0x1e}, + {0x5241, CRL_REG_LEN_08BIT, 0x04}, + {0x5242, CRL_REG_LEN_08BIT, 0x01}, + {0x5243, CRL_REG_LEN_08BIT, 0x03}, + {0x5244, CRL_REG_LEN_08BIT, 0x06}, + {0x5245, CRL_REG_LEN_08BIT, 0x0a}, + {0x5246, CRL_REG_LEN_08BIT, 0x0e}, + {0x5247, CRL_REG_LEN_08BIT, 0x12}, + {0x5248, CRL_REG_LEN_08BIT, 0x16}, + {0x524a, CRL_REG_LEN_08BIT, 0x03}, + {0x524c, CRL_REG_LEN_08BIT, 0x04}, + {0x524e, CRL_REG_LEN_08BIT, 0x08}, + {0x5250, CRL_REG_LEN_08BIT, 0x0c}, + {0x5252, CRL_REG_LEN_08BIT, 0x12}, + {0x5254, CRL_REG_LEN_08BIT, 0x18}, + {0x5256, CRL_REG_LEN_08BIT, 0x1e}, + {0x4606, CRL_REG_LEN_08BIT, 0x07}, + {0x4607, CRL_REG_LEN_08BIT, 0x71}, + {0x460a, CRL_REG_LEN_08BIT, 0x02}, + {0x460b, CRL_REG_LEN_08BIT, 0x70}, + {0x460c, CRL_REG_LEN_08BIT, 0x00}, + {0x4620, CRL_REG_LEN_08BIT, 0x0e}, + {0x4700, CRL_REG_LEN_08BIT, 0x04}, + {0x4701, CRL_REG_LEN_08BIT, 0x00}, + {0x4702, CRL_REG_LEN_08BIT, 0x01}, + {0x4004, CRL_REG_LEN_08BIT, 0x04}, + {0x4005, CRL_REG_LEN_08BIT, 0x18}, + {0x4001, CRL_REG_LEN_08BIT, 0x06}, + {0x4050, CRL_REG_LEN_08BIT, 0x22}, + {0x4051, CRL_REG_LEN_08BIT, 0x24}, + {0x4052, CRL_REG_LEN_08BIT, 0x02}, + {0x4057, CRL_REG_LEN_08BIT, 0x9c}, + {0x405a, CRL_REG_LEN_08BIT, 0x00}, + /*FSIN enable*/ + {0x3832, CRL_REG_LEN_08BIT, 0x00}, + {0x3833, CRL_REG_LEN_08BIT, 0x02}, + {0x3834, CRL_REG_LEN_08BIT, 0x02}, + {0x3835, CRL_REG_LEN_08BIT, 0x08}, + {0x302e, CRL_REG_LEN_08BIT, 0x00}, + /*FSIN end*/ + {0x4202, CRL_REG_LEN_08BIT, 0x02}, + {0x3023, CRL_REG_LEN_08BIT, 0x10}, + {0x3003, CRL_REG_LEN_08BIT, 0x20}, + {0x3004, CRL_REG_LEN_08BIT, 0x21}, + {0x3005, CRL_REG_LEN_08BIT, 0x14}, + {0x3006, CRL_REG_LEN_08BIT, 0x11}, + {0x3024, CRL_REG_LEN_08BIT, 0x01}, + {0x0100, CRL_REG_LEN_08BIT, 0x01}, + {0x0100, CRL_REG_LEN_08BIT, 0x01}, + {0x6f10, CRL_REG_LEN_08BIT, 0x07}, + {0x6f11, CRL_REG_LEN_08BIT, 0x82}, + {0x6f12, CRL_REG_LEN_08BIT, 0x04}, + {0x6f13, CRL_REG_LEN_08BIT, 0x00}, + {0x6f14, CRL_REG_LEN_08BIT, 0x1f}, + {0x6f15, CRL_REG_LEN_08BIT, 0xdd}, + {0x6f16, CRL_REG_LEN_08BIT, 0x04}, + {0x6f17, CRL_REG_LEN_08BIT, 0x04}, + {0x6f18, CRL_REG_LEN_08BIT, 0x36}, + {0x6f19, CRL_REG_LEN_08BIT, 0x66}, + {0x6f1a, CRL_REG_LEN_08BIT, 0x04}, + {0x6f1b, CRL_REG_LEN_08BIT, 0x08}, + {0x6f1c, CRL_REG_LEN_08BIT, 0x0c}, + {0x6f1d, CRL_REG_LEN_08BIT, 0xe7}, + {0x6f1e, CRL_REG_LEN_08BIT, 0x04}, + {0x6f1f, CRL_REG_LEN_08BIT, 0x0c}, + {0xd000, CRL_REG_LEN_08BIT, 0x19}, + {0xd001, CRL_REG_LEN_08BIT, 0xa0}, + {0xd002, CRL_REG_LEN_08BIT, 0x00}, + {0xd003, CRL_REG_LEN_08BIT, 0x01}, + {0xd004, CRL_REG_LEN_08BIT, 0xa9}, + {0xd005, CRL_REG_LEN_08BIT, 0xad}, + {0xd006, CRL_REG_LEN_08BIT, 0x10}, + {0xd007, CRL_REG_LEN_08BIT, 0x40}, + {0xd008, CRL_REG_LEN_08BIT, 0x44}, + {0xd009, CRL_REG_LEN_08BIT, 0x00}, + {0xd00a, CRL_REG_LEN_08BIT, 0x68}, + {0xd00b, CRL_REG_LEN_08BIT, 0x00}, + {0xd00c, CRL_REG_LEN_08BIT, 0x15}, + {0xd00d, CRL_REG_LEN_08BIT, 0x00}, + {0xd00e, CRL_REG_LEN_08BIT, 0x00}, + {0xd00f, CRL_REG_LEN_08BIT, 0x00}, + {0xd010, CRL_REG_LEN_08BIT, 0x19}, + {0xd011, CRL_REG_LEN_08BIT, 0xa0}, + {0xd012, CRL_REG_LEN_08BIT, 0x00}, + {0xd013, CRL_REG_LEN_08BIT, 0x01}, + {0xd014, CRL_REG_LEN_08BIT, 0xa9}, + {0xd015, CRL_REG_LEN_08BIT, 0xad}, + {0xd016, CRL_REG_LEN_08BIT, 0x13}, + {0xd017, CRL_REG_LEN_08BIT, 0xd0}, + {0xd018, CRL_REG_LEN_08BIT, 0x44}, + {0xd019, CRL_REG_LEN_08BIT, 0x00}, + {0xd01a, CRL_REG_LEN_08BIT, 0x68}, + {0xd01b, CRL_REG_LEN_08BIT, 0x00}, + {0xd01c, CRL_REG_LEN_08BIT, 0x15}, + {0xd01d, CRL_REG_LEN_08BIT, 0x00}, + {0xd01e, CRL_REG_LEN_08BIT, 0x00}, + {0xd01f, CRL_REG_LEN_08BIT, 0x00}, + {0xd020, CRL_REG_LEN_08BIT, 0x19}, + {0xd021, CRL_REG_LEN_08BIT, 0xa0}, + {0xd022, CRL_REG_LEN_08BIT, 0x00}, + {0xd023, CRL_REG_LEN_08BIT, 0x01}, + {0xd024, CRL_REG_LEN_08BIT, 0xa9}, + {0xd025, CRL_REG_LEN_08BIT, 0xad}, + {0xd026, CRL_REG_LEN_08BIT, 0x14}, + {0xd027, CRL_REG_LEN_08BIT, 0xb8}, + {0xd028, CRL_REG_LEN_08BIT, 0x44}, + {0xd029, CRL_REG_LEN_08BIT, 0x00}, + {0xd02a, CRL_REG_LEN_08BIT, 0x68}, + {0xd02b, CRL_REG_LEN_08BIT, 0x00}, + {0xd02c, CRL_REG_LEN_08BIT, 0x15}, + {0xd02d, CRL_REG_LEN_08BIT, 0x00}, + {0xd02e, CRL_REG_LEN_08BIT, 0x00}, + {0xd02f, CRL_REG_LEN_08BIT, 0x00}, + {0xd030, CRL_REG_LEN_08BIT, 0x19}, + {0xd031, CRL_REG_LEN_08BIT, 0xa0}, + {0xd032, CRL_REG_LEN_08BIT, 0x00}, + {0xd033, CRL_REG_LEN_08BIT, 0x01}, + {0xd034, CRL_REG_LEN_08BIT, 0xa9}, + {0xd035, CRL_REG_LEN_08BIT, 0xad}, + {0xd036, CRL_REG_LEN_08BIT, 0x14}, + {0xd037, CRL_REG_LEN_08BIT, 0xdc}, + {0xd038, CRL_REG_LEN_08BIT, 0x44}, + {0xd039, CRL_REG_LEN_08BIT, 0x00}, + {0xd03a, CRL_REG_LEN_08BIT, 0x68}, + {0xd03b, CRL_REG_LEN_08BIT, 0x00}, + {0xd03c, CRL_REG_LEN_08BIT, 0x15}, + {0xd03d, CRL_REG_LEN_08BIT, 0x00}, + {0xd03e, CRL_REG_LEN_08BIT, 0x00}, + {0xd03f, CRL_REG_LEN_08BIT, 0x00}, + {0xd040, CRL_REG_LEN_08BIT, 0x9c}, + {0xd041, CRL_REG_LEN_08BIT, 0x21}, + {0xd042, CRL_REG_LEN_08BIT, 0xff}, + {0xd043, CRL_REG_LEN_08BIT, 0xe4}, + {0xd044, CRL_REG_LEN_08BIT, 0xd4}, + {0xd045, CRL_REG_LEN_08BIT, 0x01}, + {0xd046, CRL_REG_LEN_08BIT, 0x48}, + {0xd047, CRL_REG_LEN_08BIT, 0x00}, + {0xd048, CRL_REG_LEN_08BIT, 0xd4}, + {0xd049, CRL_REG_LEN_08BIT, 0x01}, + {0xd04a, CRL_REG_LEN_08BIT, 0x50}, + {0xd04b, CRL_REG_LEN_08BIT, 0x04}, + {0xd04c, CRL_REG_LEN_08BIT, 0xd4}, + {0xd04d, CRL_REG_LEN_08BIT, 0x01}, + {0xd04e, CRL_REG_LEN_08BIT, 0x60}, + {0xd04f, CRL_REG_LEN_08BIT, 0x08}, + {0xd050, CRL_REG_LEN_08BIT, 0xd4}, + {0xd051, CRL_REG_LEN_08BIT, 0x01}, + {0xd052, CRL_REG_LEN_08BIT, 0x70}, + {0xd053, CRL_REG_LEN_08BIT, 0x0c}, + {0xd054, CRL_REG_LEN_08BIT, 0xd4}, + {0xd055, CRL_REG_LEN_08BIT, 0x01}, + {0xd056, CRL_REG_LEN_08BIT, 0x80}, + {0xd057, CRL_REG_LEN_08BIT, 0x10}, + {0xd058, CRL_REG_LEN_08BIT, 0x19}, + {0xd059, CRL_REG_LEN_08BIT, 0xc0}, + {0xd05a, CRL_REG_LEN_08BIT, 0x00}, + {0xd05b, CRL_REG_LEN_08BIT, 0x01}, + {0xd05c, CRL_REG_LEN_08BIT, 0xa9}, + {0xd05d, CRL_REG_LEN_08BIT, 0xce}, + {0xd05e, CRL_REG_LEN_08BIT, 0x02}, + {0xd05f, CRL_REG_LEN_08BIT, 0xa4}, + {0xd060, CRL_REG_LEN_08BIT, 0x9c}, + {0xd061, CRL_REG_LEN_08BIT, 0xa0}, + {0xd062, CRL_REG_LEN_08BIT, 0x00}, + {0xd063, CRL_REG_LEN_08BIT, 0x00}, + {0xd064, CRL_REG_LEN_08BIT, 0x84}, + {0xd065, CRL_REG_LEN_08BIT, 0x6e}, + {0xd066, CRL_REG_LEN_08BIT, 0x00}, + {0xd067, CRL_REG_LEN_08BIT, 0x00}, + {0xd068, CRL_REG_LEN_08BIT, 0xd8}, + {0xd069, CRL_REG_LEN_08BIT, 0x03}, + {0xd06a, CRL_REG_LEN_08BIT, 0x28}, + {0xd06b, CRL_REG_LEN_08BIT, 0x76}, + {0xd06c, CRL_REG_LEN_08BIT, 0x1a}, + {0xd06d, CRL_REG_LEN_08BIT, 0x00}, + {0xd06e, CRL_REG_LEN_08BIT, 0x00}, + {0xd06f, CRL_REG_LEN_08BIT, 0x01}, + {0xd070, CRL_REG_LEN_08BIT, 0xaa}, + {0xd071, CRL_REG_LEN_08BIT, 0x10}, + {0xd072, CRL_REG_LEN_08BIT, 0x03}, + {0xd073, CRL_REG_LEN_08BIT, 0xf0}, + {0xd074, CRL_REG_LEN_08BIT, 0x18}, + {0xd075, CRL_REG_LEN_08BIT, 0x60}, + {0xd076, CRL_REG_LEN_08BIT, 0x00}, + {0xd077, CRL_REG_LEN_08BIT, 0x01}, + {0xd078, CRL_REG_LEN_08BIT, 0xa8}, + {0xd079, CRL_REG_LEN_08BIT, 0x63}, + {0xd07a, CRL_REG_LEN_08BIT, 0x07}, + {0xd07b, CRL_REG_LEN_08BIT, 0x80}, + {0xd07c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd07d, CRL_REG_LEN_08BIT, 0xa0}, + {0xd07e, CRL_REG_LEN_08BIT, 0x00}, + {0xd07f, CRL_REG_LEN_08BIT, 0x04}, + {0xd080, CRL_REG_LEN_08BIT, 0x18}, + {0xd081, CRL_REG_LEN_08BIT, 0xc0}, + {0xd082, CRL_REG_LEN_08BIT, 0x00}, + {0xd083, CRL_REG_LEN_08BIT, 0x00}, + {0xd084, CRL_REG_LEN_08BIT, 0xa8}, + {0xd085, CRL_REG_LEN_08BIT, 0xc6}, + {0xd086, CRL_REG_LEN_08BIT, 0x00}, + {0xd087, CRL_REG_LEN_08BIT, 0x00}, + {0xd088, CRL_REG_LEN_08BIT, 0x8c}, + {0xd089, CRL_REG_LEN_08BIT, 0x63}, + {0xd08a, CRL_REG_LEN_08BIT, 0x00}, + {0xd08b, CRL_REG_LEN_08BIT, 0x00}, + {0xd08c, CRL_REG_LEN_08BIT, 0xd4}, + {0xd08d, CRL_REG_LEN_08BIT, 0x01}, + {0xd08e, CRL_REG_LEN_08BIT, 0x28}, + {0xd08f, CRL_REG_LEN_08BIT, 0x14}, + {0xd090, CRL_REG_LEN_08BIT, 0xd4}, + {0xd091, CRL_REG_LEN_08BIT, 0x01}, + {0xd092, CRL_REG_LEN_08BIT, 0x30}, + {0xd093, CRL_REG_LEN_08BIT, 0x18}, + {0xd094, CRL_REG_LEN_08BIT, 0x07}, + {0xd095, CRL_REG_LEN_08BIT, 0xff}, + {0xd096, CRL_REG_LEN_08BIT, 0xf8}, + {0xd097, CRL_REG_LEN_08BIT, 0xfd}, + {0xd098, CRL_REG_LEN_08BIT, 0x9c}, + {0xd099, CRL_REG_LEN_08BIT, 0x80}, + {0xd09a, CRL_REG_LEN_08BIT, 0x00}, + {0xd09b, CRL_REG_LEN_08BIT, 0x03}, + {0xd09c, CRL_REG_LEN_08BIT, 0xa5}, + {0xd09d, CRL_REG_LEN_08BIT, 0x6b}, + {0xd09e, CRL_REG_LEN_08BIT, 0x00}, + {0xd09f, CRL_REG_LEN_08BIT, 0xff}, + {0xd0a0, CRL_REG_LEN_08BIT, 0x18}, + {0xd0a1, CRL_REG_LEN_08BIT, 0xc0}, + {0xd0a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd0a3, CRL_REG_LEN_08BIT, 0x01}, + {0xd0a4, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0a5, CRL_REG_LEN_08BIT, 0xc6}, + {0xd0a6, CRL_REG_LEN_08BIT, 0x01}, + {0xd0a7, CRL_REG_LEN_08BIT, 0x02}, + {0xd0a8, CRL_REG_LEN_08BIT, 0xe1}, + {0xd0a9, CRL_REG_LEN_08BIT, 0x6b}, + {0xd0aa, CRL_REG_LEN_08BIT, 0x58}, + {0xd0ab, CRL_REG_LEN_08BIT, 0x00}, + {0xd0ac, CRL_REG_LEN_08BIT, 0x84}, + {0xd0ad, CRL_REG_LEN_08BIT, 0x8e}, + {0xd0ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd0af, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b0, CRL_REG_LEN_08BIT, 0xe1}, + {0xd0b1, CRL_REG_LEN_08BIT, 0x6b}, + {0xd0b2, CRL_REG_LEN_08BIT, 0x30}, + {0xd0b3, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b4, CRL_REG_LEN_08BIT, 0x98}, + {0xd0b5, CRL_REG_LEN_08BIT, 0xb0}, + {0xd0b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b8, CRL_REG_LEN_08BIT, 0x8c}, + {0xd0b9, CRL_REG_LEN_08BIT, 0x64}, + {0xd0ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd0bb, CRL_REG_LEN_08BIT, 0x6e}, + {0xd0bc, CRL_REG_LEN_08BIT, 0xe5}, + {0xd0bd, CRL_REG_LEN_08BIT, 0xa5}, + {0xd0be, CRL_REG_LEN_08BIT, 0x18}, + {0xd0bf, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c0, CRL_REG_LEN_08BIT, 0x10}, + {0xd0c1, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c3, CRL_REG_LEN_08BIT, 0x06}, + {0xd0c4, CRL_REG_LEN_08BIT, 0x95}, + {0xd0c5, CRL_REG_LEN_08BIT, 0x8b}, + {0xd0c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c8, CRL_REG_LEN_08BIT, 0x94}, + {0xd0c9, CRL_REG_LEN_08BIT, 0xa4}, + {0xd0ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd0cb, CRL_REG_LEN_08BIT, 0x70}, + {0xd0cc, CRL_REG_LEN_08BIT, 0xe5}, + {0xd0cd, CRL_REG_LEN_08BIT, 0x65}, + {0xd0ce, CRL_REG_LEN_08BIT, 0x60}, + {0xd0cf, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d0, CRL_REG_LEN_08BIT, 0x0c}, + {0xd0d1, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d2, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d3, CRL_REG_LEN_08BIT, 0x62}, + {0xd0d4, CRL_REG_LEN_08BIT, 0x15}, + {0xd0d5, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d8, CRL_REG_LEN_08BIT, 0x18}, + {0xd0d9, CRL_REG_LEN_08BIT, 0x60}, + {0xd0da, CRL_REG_LEN_08BIT, 0x80}, + {0xd0db, CRL_REG_LEN_08BIT, 0x06}, + {0xd0dc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0dd, CRL_REG_LEN_08BIT, 0x83}, + {0xd0de, CRL_REG_LEN_08BIT, 0x38}, + {0xd0df, CRL_REG_LEN_08BIT, 0x29}, + {0xd0e0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0e1, CRL_REG_LEN_08BIT, 0xe3}, + {0xd0e2, CRL_REG_LEN_08BIT, 0x40}, + {0xd0e3, CRL_REG_LEN_08BIT, 0x08}, + {0xd0e4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd0e5, CRL_REG_LEN_08BIT, 0x84}, + {0xd0e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0e8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0e9, CRL_REG_LEN_08BIT, 0xa3}, + {0xd0ea, CRL_REG_LEN_08BIT, 0x40}, + {0xd0eb, CRL_REG_LEN_08BIT, 0x09}, + {0xd0ec, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0ed, CRL_REG_LEN_08BIT, 0xc3}, + {0xd0ee, CRL_REG_LEN_08BIT, 0x38}, + {0xd0ef, CRL_REG_LEN_08BIT, 0x2a}, + {0xd0f0, CRL_REG_LEN_08BIT, 0xd8}, + {0xd0f1, CRL_REG_LEN_08BIT, 0x07}, + {0xd0f2, CRL_REG_LEN_08BIT, 0x20}, + {0xd0f3, CRL_REG_LEN_08BIT, 0x00}, + {0xd0f4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd0f5, CRL_REG_LEN_08BIT, 0x66}, + {0xd0f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0f8, CRL_REG_LEN_08BIT, 0xd8}, + {0xd0f9, CRL_REG_LEN_08BIT, 0x05}, + {0xd0fa, CRL_REG_LEN_08BIT, 0x18}, + {0xd0fb, CRL_REG_LEN_08BIT, 0x00}, + {0xd0fc, CRL_REG_LEN_08BIT, 0x18}, + {0xd0fd, CRL_REG_LEN_08BIT, 0x60}, + {0xd0fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd0ff, CRL_REG_LEN_08BIT, 0x01}, + {0xd100, CRL_REG_LEN_08BIT, 0x98}, + {0xd101, CRL_REG_LEN_08BIT, 0x90}, + {0xd102, CRL_REG_LEN_08BIT, 0x00}, + {0xd103, CRL_REG_LEN_08BIT, 0x00}, + {0xd104, CRL_REG_LEN_08BIT, 0x84}, + {0xd105, CRL_REG_LEN_08BIT, 0xae}, + {0xd106, CRL_REG_LEN_08BIT, 0x00}, + {0xd107, CRL_REG_LEN_08BIT, 0x00}, + {0xd108, CRL_REG_LEN_08BIT, 0xa8}, + {0xd109, CRL_REG_LEN_08BIT, 0x63}, + {0xd10a, CRL_REG_LEN_08BIT, 0x06}, + {0xd10b, CRL_REG_LEN_08BIT, 0x4c}, + {0xd10c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd10d, CRL_REG_LEN_08BIT, 0xc0}, + {0xd10e, CRL_REG_LEN_08BIT, 0x00}, + {0xd10f, CRL_REG_LEN_08BIT, 0x00}, + {0xd110, CRL_REG_LEN_08BIT, 0xd8}, + {0xd111, CRL_REG_LEN_08BIT, 0x03}, + {0xd112, CRL_REG_LEN_08BIT, 0x30}, + {0xd113, CRL_REG_LEN_08BIT, 0x00}, + {0xd114, CRL_REG_LEN_08BIT, 0x8c}, + {0xd115, CRL_REG_LEN_08BIT, 0x65}, + {0xd116, CRL_REG_LEN_08BIT, 0x00}, + {0xd117, CRL_REG_LEN_08BIT, 0x6e}, + {0xd118, CRL_REG_LEN_08BIT, 0xe5}, + {0xd119, CRL_REG_LEN_08BIT, 0x84}, + {0xd11a, CRL_REG_LEN_08BIT, 0x18}, + {0xd11b, CRL_REG_LEN_08BIT, 0x00}, + {0xd11c, CRL_REG_LEN_08BIT, 0x10}, + {0xd11d, CRL_REG_LEN_08BIT, 0x00}, + {0xd11e, CRL_REG_LEN_08BIT, 0x00}, + {0xd11f, CRL_REG_LEN_08BIT, 0x07}, + {0xd120, CRL_REG_LEN_08BIT, 0x18}, + {0xd121, CRL_REG_LEN_08BIT, 0x80}, + {0xd122, CRL_REG_LEN_08BIT, 0x80}, + {0xd123, CRL_REG_LEN_08BIT, 0x06}, + {0xd124, CRL_REG_LEN_08BIT, 0x94}, + {0xd125, CRL_REG_LEN_08BIT, 0x65}, + {0xd126, CRL_REG_LEN_08BIT, 0x00}, + {0xd127, CRL_REG_LEN_08BIT, 0x70}, + {0xd128, CRL_REG_LEN_08BIT, 0xe5}, + {0xd129, CRL_REG_LEN_08BIT, 0x43}, + {0xd12a, CRL_REG_LEN_08BIT, 0x60}, + {0xd12b, CRL_REG_LEN_08BIT, 0x00}, + {0xd12c, CRL_REG_LEN_08BIT, 0x0c}, + {0xd12d, CRL_REG_LEN_08BIT, 0x00}, + {0xd12e, CRL_REG_LEN_08BIT, 0x00}, + {0xd12f, CRL_REG_LEN_08BIT, 0x3e}, + {0xd130, CRL_REG_LEN_08BIT, 0xa8}, + {0xd131, CRL_REG_LEN_08BIT, 0x64}, + {0xd132, CRL_REG_LEN_08BIT, 0x38}, + {0xd133, CRL_REG_LEN_08BIT, 0x24}, + {0xd134, CRL_REG_LEN_08BIT, 0x18}, + {0xd135, CRL_REG_LEN_08BIT, 0x80}, + {0xd136, CRL_REG_LEN_08BIT, 0x80}, + {0xd137, CRL_REG_LEN_08BIT, 0x06}, + {0xd138, CRL_REG_LEN_08BIT, 0xa8}, + {0xd139, CRL_REG_LEN_08BIT, 0x64}, + {0xd13a, CRL_REG_LEN_08BIT, 0x38}, + {0xd13b, CRL_REG_LEN_08BIT, 0x24}, + {0xd13c, CRL_REG_LEN_08BIT, 0x8c}, + {0xd13d, CRL_REG_LEN_08BIT, 0x63}, + {0xd13e, CRL_REG_LEN_08BIT, 0x00}, + {0xd13f, CRL_REG_LEN_08BIT, 0x00}, + {0xd140, CRL_REG_LEN_08BIT, 0xa4}, + {0xd141, CRL_REG_LEN_08BIT, 0x63}, + {0xd142, CRL_REG_LEN_08BIT, 0x00}, + {0xd143, CRL_REG_LEN_08BIT, 0x40}, + {0xd144, CRL_REG_LEN_08BIT, 0xbc}, + {0xd145, CRL_REG_LEN_08BIT, 0x23}, + {0xd146, CRL_REG_LEN_08BIT, 0x00}, + {0xd147, CRL_REG_LEN_08BIT, 0x00}, + {0xd148, CRL_REG_LEN_08BIT, 0x0c}, + {0xd149, CRL_REG_LEN_08BIT, 0x00}, + {0xd14a, CRL_REG_LEN_08BIT, 0x00}, + {0xd14b, CRL_REG_LEN_08BIT, 0x2a}, + {0xd14c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd14d, CRL_REG_LEN_08BIT, 0x64}, + {0xd14e, CRL_REG_LEN_08BIT, 0x6e}, + {0xd14f, CRL_REG_LEN_08BIT, 0x44}, + {0xd150, CRL_REG_LEN_08BIT, 0x19}, + {0xd151, CRL_REG_LEN_08BIT, 0x00}, + {0xd152, CRL_REG_LEN_08BIT, 0x80}, + {0xd153, CRL_REG_LEN_08BIT, 0x06}, + {0xd154, CRL_REG_LEN_08BIT, 0xa8}, + {0xd155, CRL_REG_LEN_08BIT, 0xe8}, + {0xd156, CRL_REG_LEN_08BIT, 0x3d}, + {0xd157, CRL_REG_LEN_08BIT, 0x05}, + {0xd158, CRL_REG_LEN_08BIT, 0x8c}, + {0xd159, CRL_REG_LEN_08BIT, 0x67}, + {0xd15a, CRL_REG_LEN_08BIT, 0x00}, + {0xd15b, CRL_REG_LEN_08BIT, 0x00}, + {0xd15c, CRL_REG_LEN_08BIT, 0xb8}, + {0xd15d, CRL_REG_LEN_08BIT, 0x63}, + {0xd15e, CRL_REG_LEN_08BIT, 0x00}, + {0xd15f, CRL_REG_LEN_08BIT, 0x18}, + {0xd160, CRL_REG_LEN_08BIT, 0xb8}, + {0xd161, CRL_REG_LEN_08BIT, 0x63}, + {0xd162, CRL_REG_LEN_08BIT, 0x00}, + {0xd163, CRL_REG_LEN_08BIT, 0x98}, + {0xd164, CRL_REG_LEN_08BIT, 0xbc}, + {0xd165, CRL_REG_LEN_08BIT, 0x03}, + {0xd166, CRL_REG_LEN_08BIT, 0x00}, + {0xd167, CRL_REG_LEN_08BIT, 0x00}, + {0xd168, CRL_REG_LEN_08BIT, 0x10}, + {0xd169, CRL_REG_LEN_08BIT, 0x00}, + {0xd16a, CRL_REG_LEN_08BIT, 0x00}, + {0xd16b, CRL_REG_LEN_08BIT, 0x10}, + {0xd16c, CRL_REG_LEN_08BIT, 0xa9}, + {0xd16d, CRL_REG_LEN_08BIT, 0x48}, + {0xd16e, CRL_REG_LEN_08BIT, 0x67}, + {0xd16f, CRL_REG_LEN_08BIT, 0x02}, + {0xd170, CRL_REG_LEN_08BIT, 0xb8}, + {0xd171, CRL_REG_LEN_08BIT, 0xa3}, + {0xd172, CRL_REG_LEN_08BIT, 0x00}, + {0xd173, CRL_REG_LEN_08BIT, 0x19}, + {0xd174, CRL_REG_LEN_08BIT, 0x8c}, + {0xd175, CRL_REG_LEN_08BIT, 0x8a}, + {0xd176, CRL_REG_LEN_08BIT, 0x00}, + {0xd177, CRL_REG_LEN_08BIT, 0x00}, + {0xd178, CRL_REG_LEN_08BIT, 0xa9}, + {0xd179, CRL_REG_LEN_08BIT, 0x68}, + {0xd17a, CRL_REG_LEN_08BIT, 0x67}, + {0xd17b, CRL_REG_LEN_08BIT, 0x03}, + {0xd17c, CRL_REG_LEN_08BIT, 0xb8}, + {0xd17d, CRL_REG_LEN_08BIT, 0xc4}, + {0xd17e, CRL_REG_LEN_08BIT, 0x00}, + {0xd17f, CRL_REG_LEN_08BIT, 0x08}, + {0xd180, CRL_REG_LEN_08BIT, 0x8c}, + {0xd181, CRL_REG_LEN_08BIT, 0x6b}, + {0xd182, CRL_REG_LEN_08BIT, 0x00}, + {0xd183, CRL_REG_LEN_08BIT, 0x00}, + {0xd184, CRL_REG_LEN_08BIT, 0xb8}, + {0xd185, CRL_REG_LEN_08BIT, 0x85}, + {0xd186, CRL_REG_LEN_08BIT, 0x00}, + {0xd187, CRL_REG_LEN_08BIT, 0x98}, + {0xd188, CRL_REG_LEN_08BIT, 0xe0}, + {0xd189, CRL_REG_LEN_08BIT, 0x63}, + {0xd18a, CRL_REG_LEN_08BIT, 0x30}, + {0xd18b, CRL_REG_LEN_08BIT, 0x04}, + {0xd18c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd18d, CRL_REG_LEN_08BIT, 0x64}, + {0xd18e, CRL_REG_LEN_08BIT, 0x18}, + {0xd18f, CRL_REG_LEN_08BIT, 0x00}, + {0xd190, CRL_REG_LEN_08BIT, 0xa4}, + {0xd191, CRL_REG_LEN_08BIT, 0x83}, + {0xd192, CRL_REG_LEN_08BIT, 0xff}, + {0xd193, CRL_REG_LEN_08BIT, 0xff}, + {0xd194, CRL_REG_LEN_08BIT, 0xb8}, + {0xd195, CRL_REG_LEN_08BIT, 0x64}, + {0xd196, CRL_REG_LEN_08BIT, 0x00}, + {0xd197, CRL_REG_LEN_08BIT, 0x48}, + {0xd198, CRL_REG_LEN_08BIT, 0xd8}, + {0xd199, CRL_REG_LEN_08BIT, 0x0a}, + {0xd19a, CRL_REG_LEN_08BIT, 0x18}, + {0xd19b, CRL_REG_LEN_08BIT, 0x00}, + {0xd19c, CRL_REG_LEN_08BIT, 0xd8}, + {0xd19d, CRL_REG_LEN_08BIT, 0x0b}, + {0xd19e, CRL_REG_LEN_08BIT, 0x20}, + {0xd19f, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a0, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1a1, CRL_REG_LEN_08BIT, 0x60}, + {0xd1a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a3, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a4, CRL_REG_LEN_08BIT, 0xd8}, + {0xd1a5, CRL_REG_LEN_08BIT, 0x07}, + {0xd1a6, CRL_REG_LEN_08BIT, 0x18}, + {0xd1a7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1a9, CRL_REG_LEN_08BIT, 0x68}, + {0xd1aa, CRL_REG_LEN_08BIT, 0x38}, + {0xd1ab, CRL_REG_LEN_08BIT, 0x22}, + {0xd1ac, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1ad, CRL_REG_LEN_08BIT, 0x80}, + {0xd1ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd1af, CRL_REG_LEN_08BIT, 0x70}, + {0xd1b0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1b1, CRL_REG_LEN_08BIT, 0xe8}, + {0xd1b2, CRL_REG_LEN_08BIT, 0x38}, + {0xd1b3, CRL_REG_LEN_08BIT, 0x43}, + {0xd1b4, CRL_REG_LEN_08BIT, 0xd8}, + {0xd1b5, CRL_REG_LEN_08BIT, 0x03}, + {0xd1b6, CRL_REG_LEN_08BIT, 0x20}, + {0xd1b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1b8, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1b9, CRL_REG_LEN_08BIT, 0xa0}, + {0xd1ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd1bb, CRL_REG_LEN_08BIT, 0x00}, + {0xd1bc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1bd, CRL_REG_LEN_08BIT, 0xc8}, + {0xd1be, CRL_REG_LEN_08BIT, 0x38}, + {0xd1bf, CRL_REG_LEN_08BIT, 0x42}, + {0xd1c0, CRL_REG_LEN_08BIT, 0x8c}, + {0xd1c1, CRL_REG_LEN_08BIT, 0x66}, + {0xd1c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1c3, CRL_REG_LEN_08BIT, 0x00}, + {0xd1c4, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1c5, CRL_REG_LEN_08BIT, 0xa5}, + {0xd1c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd1c7, CRL_REG_LEN_08BIT, 0x01}, + {0xd1c8, CRL_REG_LEN_08BIT, 0xb8}, + {0xd1c9, CRL_REG_LEN_08BIT, 0x83}, + {0xd1ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd1cb, CRL_REG_LEN_08BIT, 0x08}, + {0xd1cc, CRL_REG_LEN_08BIT, 0xa4}, + {0xd1cd, CRL_REG_LEN_08BIT, 0xa5}, + {0xd1ce, CRL_REG_LEN_08BIT, 0x00}, + {0xd1cf, CRL_REG_LEN_08BIT, 0xff}, + {0xd1d0, CRL_REG_LEN_08BIT, 0x8c}, + {0xd1d1, CRL_REG_LEN_08BIT, 0x67}, + {0xd1d2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1d3, CRL_REG_LEN_08BIT, 0x00}, + {0xd1d4, CRL_REG_LEN_08BIT, 0xe0}, + {0xd1d5, CRL_REG_LEN_08BIT, 0x63}, + {0xd1d6, CRL_REG_LEN_08BIT, 0x20}, + {0xd1d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1d8, CRL_REG_LEN_08BIT, 0xa4}, + {0xd1d9, CRL_REG_LEN_08BIT, 0x63}, + {0xd1da, CRL_REG_LEN_08BIT, 0xff}, + {0xd1db, CRL_REG_LEN_08BIT, 0xff}, + {0xd1dc, CRL_REG_LEN_08BIT, 0xbc}, + {0xd1dd, CRL_REG_LEN_08BIT, 0x43}, + {0xd1de, CRL_REG_LEN_08BIT, 0x00}, + {0xd1df, CRL_REG_LEN_08BIT, 0x07}, + {0xd1e0, CRL_REG_LEN_08BIT, 0x0c}, + {0xd1e1, CRL_REG_LEN_08BIT, 0x00}, + {0xd1e2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1e3, CRL_REG_LEN_08BIT, 0x5b}, + {0xd1e4, CRL_REG_LEN_08BIT, 0xbc}, + {0xd1e5, CRL_REG_LEN_08BIT, 0x05}, + {0xd1e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd1e7, CRL_REG_LEN_08BIT, 0x02}, + {0xd1e8, CRL_REG_LEN_08BIT, 0x03}, + {0xd1e9, CRL_REG_LEN_08BIT, 0xff}, + {0xd1ea, CRL_REG_LEN_08BIT, 0xff}, + {0xd1eb, CRL_REG_LEN_08BIT, 0xf6}, + {0xd1ec, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1ed, CRL_REG_LEN_08BIT, 0xa0}, + {0xd1ee, CRL_REG_LEN_08BIT, 0x00}, + {0xd1ef, CRL_REG_LEN_08BIT, 0x00}, + {0xd1f0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1f1, CRL_REG_LEN_08BIT, 0xa4}, + {0xd1f2, CRL_REG_LEN_08BIT, 0x55}, + {0xd1f3, CRL_REG_LEN_08BIT, 0x86}, + {0xd1f4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd1f5, CRL_REG_LEN_08BIT, 0x63}, + {0xd1f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd1f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1f8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1f9, CRL_REG_LEN_08BIT, 0xc4}, + {0xd1fa, CRL_REG_LEN_08BIT, 0x6e}, + {0xd1fb, CRL_REG_LEN_08BIT, 0x45}, + {0xd1fc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1fd, CRL_REG_LEN_08BIT, 0xe4}, + {0xd1fe, CRL_REG_LEN_08BIT, 0x55}, + {0xd1ff, CRL_REG_LEN_08BIT, 0x87}, + {0xd200, CRL_REG_LEN_08BIT, 0xd8}, + {0xd201, CRL_REG_LEN_08BIT, 0x05}, + {0xd202, CRL_REG_LEN_08BIT, 0x18}, + {0xd203, CRL_REG_LEN_08BIT, 0x00}, + {0xd204, CRL_REG_LEN_08BIT, 0x8c}, + {0xd205, CRL_REG_LEN_08BIT, 0x66}, + {0xd206, CRL_REG_LEN_08BIT, 0x00}, + {0xd207, CRL_REG_LEN_08BIT, 0x00}, + {0xd208, CRL_REG_LEN_08BIT, 0xa8}, + {0xd209, CRL_REG_LEN_08BIT, 0xa4}, + {0xd20a, CRL_REG_LEN_08BIT, 0x6e}, + {0xd20b, CRL_REG_LEN_08BIT, 0x46}, + {0xd20c, CRL_REG_LEN_08BIT, 0xd8}, + {0xd20d, CRL_REG_LEN_08BIT, 0x07}, + {0xd20e, CRL_REG_LEN_08BIT, 0x18}, + {0xd20f, CRL_REG_LEN_08BIT, 0x00}, + {0xd210, CRL_REG_LEN_08BIT, 0xa8}, + {0xd211, CRL_REG_LEN_08BIT, 0x84}, + {0xd212, CRL_REG_LEN_08BIT, 0x55}, + {0xd213, CRL_REG_LEN_08BIT, 0x88}, + {0xd214, CRL_REG_LEN_08BIT, 0x8c}, + {0xd215, CRL_REG_LEN_08BIT, 0x65}, + {0xd216, CRL_REG_LEN_08BIT, 0x00}, + {0xd217, CRL_REG_LEN_08BIT, 0x00}, + {0xd218, CRL_REG_LEN_08BIT, 0xd8}, + {0xd219, CRL_REG_LEN_08BIT, 0x04}, + {0xd21a, CRL_REG_LEN_08BIT, 0x18}, + {0xd21b, CRL_REG_LEN_08BIT, 0x00}, + {0xd21c, CRL_REG_LEN_08BIT, 0x03}, + {0xd21d, CRL_REG_LEN_08BIT, 0xff}, + {0xd21e, CRL_REG_LEN_08BIT, 0xff}, + {0xd21f, CRL_REG_LEN_08BIT, 0xce}, + {0xd220, CRL_REG_LEN_08BIT, 0x19}, + {0xd221, CRL_REG_LEN_08BIT, 0x00}, + {0xd222, CRL_REG_LEN_08BIT, 0x80}, + {0xd223, CRL_REG_LEN_08BIT, 0x06}, + {0xd224, CRL_REG_LEN_08BIT, 0x8c}, + {0xd225, CRL_REG_LEN_08BIT, 0x63}, + {0xd226, CRL_REG_LEN_08BIT, 0x00}, + {0xd227, CRL_REG_LEN_08BIT, 0x00}, + {0xd228, CRL_REG_LEN_08BIT, 0xa4}, + {0xd229, CRL_REG_LEN_08BIT, 0x63}, + {0xd22a, CRL_REG_LEN_08BIT, 0x00}, + {0xd22b, CRL_REG_LEN_08BIT, 0x40}, + {0xd22c, CRL_REG_LEN_08BIT, 0xbc}, + {0xd22d, CRL_REG_LEN_08BIT, 0x23}, + {0xd22e, CRL_REG_LEN_08BIT, 0x00}, + {0xd22f, CRL_REG_LEN_08BIT, 0x00}, + {0xd230, CRL_REG_LEN_08BIT, 0x13}, + {0xd231, CRL_REG_LEN_08BIT, 0xff}, + {0xd232, CRL_REG_LEN_08BIT, 0xff}, + {0xd233, CRL_REG_LEN_08BIT, 0xc8}, + {0xd234, CRL_REG_LEN_08BIT, 0x9d}, + {0xd235, CRL_REG_LEN_08BIT, 0x00}, + {0xd236, CRL_REG_LEN_08BIT, 0x00}, + {0xd237, CRL_REG_LEN_08BIT, 0x40}, + {0xd238, CRL_REG_LEN_08BIT, 0xa8}, + {0xd239, CRL_REG_LEN_08BIT, 0x64}, + {0xd23a, CRL_REG_LEN_08BIT, 0x55}, + {0xd23b, CRL_REG_LEN_08BIT, 0x86}, + {0xd23c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd23d, CRL_REG_LEN_08BIT, 0xa4}, + {0xd23e, CRL_REG_LEN_08BIT, 0x55}, + {0xd23f, CRL_REG_LEN_08BIT, 0x87}, + {0xd240, CRL_REG_LEN_08BIT, 0xd8}, + {0xd241, CRL_REG_LEN_08BIT, 0x03}, + {0xd242, CRL_REG_LEN_08BIT, 0x40}, + {0xd243, CRL_REG_LEN_08BIT, 0x00}, + {0xd244, CRL_REG_LEN_08BIT, 0xa8}, + {0xd245, CRL_REG_LEN_08BIT, 0x64}, + {0xd246, CRL_REG_LEN_08BIT, 0x55}, + {0xd247, CRL_REG_LEN_08BIT, 0x88}, + {0xd248, CRL_REG_LEN_08BIT, 0xd8}, + {0xd249, CRL_REG_LEN_08BIT, 0x05}, + {0xd24a, CRL_REG_LEN_08BIT, 0x40}, + {0xd24b, CRL_REG_LEN_08BIT, 0x00}, + {0xd24c, CRL_REG_LEN_08BIT, 0xd8}, + {0xd24d, CRL_REG_LEN_08BIT, 0x03}, + {0xd24e, CRL_REG_LEN_08BIT, 0x40}, + {0xd24f, CRL_REG_LEN_08BIT, 0x00}, + {0xd250, CRL_REG_LEN_08BIT, 0x03}, + {0xd251, CRL_REG_LEN_08BIT, 0xff}, + {0xd252, CRL_REG_LEN_08BIT, 0xff}, + {0xd253, CRL_REG_LEN_08BIT, 0xc1}, + {0xd254, CRL_REG_LEN_08BIT, 0x19}, + {0xd255, CRL_REG_LEN_08BIT, 0x00}, + {0xd256, CRL_REG_LEN_08BIT, 0x80}, + {0xd257, CRL_REG_LEN_08BIT, 0x06}, + {0xd258, CRL_REG_LEN_08BIT, 0x94}, + {0xd259, CRL_REG_LEN_08BIT, 0x84}, + {0xd25a, CRL_REG_LEN_08BIT, 0x00}, + {0xd25b, CRL_REG_LEN_08BIT, 0x72}, + {0xd25c, CRL_REG_LEN_08BIT, 0xe5}, + {0xd25d, CRL_REG_LEN_08BIT, 0xa4}, + {0xd25e, CRL_REG_LEN_08BIT, 0x60}, + {0xd25f, CRL_REG_LEN_08BIT, 0x00}, + {0xd260, CRL_REG_LEN_08BIT, 0x0c}, + {0xd261, CRL_REG_LEN_08BIT, 0x00}, + {0xd262, CRL_REG_LEN_08BIT, 0x00}, + {0xd263, CRL_REG_LEN_08BIT, 0x3f}, + {0xd264, CRL_REG_LEN_08BIT, 0x9d}, + {0xd265, CRL_REG_LEN_08BIT, 0x60}, + {0xd266, CRL_REG_LEN_08BIT, 0x01}, + {0xd267, CRL_REG_LEN_08BIT, 0x00}, + {0xd268, CRL_REG_LEN_08BIT, 0x85}, + {0xd269, CRL_REG_LEN_08BIT, 0x4e}, + {0xd26a, CRL_REG_LEN_08BIT, 0x00}, + {0xd26b, CRL_REG_LEN_08BIT, 0x00}, + {0xd26c, CRL_REG_LEN_08BIT, 0x98}, + {0xd26d, CRL_REG_LEN_08BIT, 0x70}, + {0xd26e, CRL_REG_LEN_08BIT, 0x00}, + {0xd26f, CRL_REG_LEN_08BIT, 0x00}, + {0xd270, CRL_REG_LEN_08BIT, 0x8c}, + {0xd271, CRL_REG_LEN_08BIT, 0x8a}, + {0xd272, CRL_REG_LEN_08BIT, 0x00}, + {0xd273, CRL_REG_LEN_08BIT, 0x6f}, + {0xd274, CRL_REG_LEN_08BIT, 0xe5}, + {0xd275, CRL_REG_LEN_08BIT, 0x63}, + {0xd276, CRL_REG_LEN_08BIT, 0x20}, + {0xd277, CRL_REG_LEN_08BIT, 0x00}, + {0xd278, CRL_REG_LEN_08BIT, 0x10}, + {0xd279, CRL_REG_LEN_08BIT, 0x00}, + {0xd27a, CRL_REG_LEN_08BIT, 0x00}, + {0xd27b, CRL_REG_LEN_08BIT, 0x07}, + {0xd27c, CRL_REG_LEN_08BIT, 0x15}, + {0xd27d, CRL_REG_LEN_08BIT, 0x00}, + {0xd27e, CRL_REG_LEN_08BIT, 0x00}, + {0xd27f, CRL_REG_LEN_08BIT, 0x00}, + {0xd280, CRL_REG_LEN_08BIT, 0x8c}, + {0xd281, CRL_REG_LEN_08BIT, 0xaa}, + {0xd282, CRL_REG_LEN_08BIT, 0x00}, + {0xd283, CRL_REG_LEN_08BIT, 0x6e}, + {0xd284, CRL_REG_LEN_08BIT, 0xe0}, + {0xd285, CRL_REG_LEN_08BIT, 0x63}, + {0xd286, CRL_REG_LEN_08BIT, 0x28}, + {0xd287, CRL_REG_LEN_08BIT, 0x02}, + {0xd288, CRL_REG_LEN_08BIT, 0xe0}, + {0xd289, CRL_REG_LEN_08BIT, 0x84}, + {0xd28a, CRL_REG_LEN_08BIT, 0x28}, + {0xd28b, CRL_REG_LEN_08BIT, 0x02}, + {0xd28c, CRL_REG_LEN_08BIT, 0x07}, + {0xd28d, CRL_REG_LEN_08BIT, 0xff}, + {0xd28e, CRL_REG_LEN_08BIT, 0xf8}, + {0xd28f, CRL_REG_LEN_08BIT, 0x66}, + {0xd290, CRL_REG_LEN_08BIT, 0xe0}, + {0xd291, CRL_REG_LEN_08BIT, 0x63}, + {0xd292, CRL_REG_LEN_08BIT, 0x5b}, + {0xd293, CRL_REG_LEN_08BIT, 0x06}, + {0xd294, CRL_REG_LEN_08BIT, 0x8c}, + {0xd295, CRL_REG_LEN_08BIT, 0x6a}, + {0xd296, CRL_REG_LEN_08BIT, 0x00}, + {0xd297, CRL_REG_LEN_08BIT, 0x77}, + {0xd298, CRL_REG_LEN_08BIT, 0xe0}, + {0xd299, CRL_REG_LEN_08BIT, 0x63}, + {0xd29a, CRL_REG_LEN_08BIT, 0x5b}, + {0xd29b, CRL_REG_LEN_08BIT, 0x06}, + {0xd29c, CRL_REG_LEN_08BIT, 0xbd}, + {0xd29d, CRL_REG_LEN_08BIT, 0x63}, + {0xd29e, CRL_REG_LEN_08BIT, 0x00}, + {0xd29f, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a0, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2a1, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a3, CRL_REG_LEN_08BIT, 0x3c}, + {0xd2a4, CRL_REG_LEN_08BIT, 0x15}, + {0xd2a5, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a7, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a8, CRL_REG_LEN_08BIT, 0x8c}, + {0xd2a9, CRL_REG_LEN_08BIT, 0x8a}, + {0xd2aa, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ab, CRL_REG_LEN_08BIT, 0x78}, + {0xd2ac, CRL_REG_LEN_08BIT, 0xb8}, + {0xd2ad, CRL_REG_LEN_08BIT, 0x63}, + {0xd2ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd2af, CRL_REG_LEN_08BIT, 0x88}, + {0xd2b0, CRL_REG_LEN_08BIT, 0xe1}, + {0xd2b1, CRL_REG_LEN_08BIT, 0x64}, + {0xd2b2, CRL_REG_LEN_08BIT, 0x5b}, + {0xd2b3, CRL_REG_LEN_08BIT, 0x06}, + {0xd2b4, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2b5, CRL_REG_LEN_08BIT, 0x6b}, + {0xd2b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd2b8, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2b9, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd2bb, CRL_REG_LEN_08BIT, 0x34}, + {0xd2bc, CRL_REG_LEN_08BIT, 0xd4}, + {0xd2bd, CRL_REG_LEN_08BIT, 0x01}, + {0xd2be, CRL_REG_LEN_08BIT, 0x18}, + {0xd2bf, CRL_REG_LEN_08BIT, 0x14}, + {0xd2c0, CRL_REG_LEN_08BIT, 0xb9}, + {0xd2c1, CRL_REG_LEN_08BIT, 0x6b}, + {0xd2c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2c3, CRL_REG_LEN_08BIT, 0x88}, + {0xd2c4, CRL_REG_LEN_08BIT, 0x85}, + {0xd2c5, CRL_REG_LEN_08BIT, 0x01}, + {0xd2c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2c7, CRL_REG_LEN_08BIT, 0x14}, + {0xd2c8, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2c9, CRL_REG_LEN_08BIT, 0x68}, + {0xd2ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd2cb, CRL_REG_LEN_08BIT, 0x00}, + {0xd2cc, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2cd, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ce, CRL_REG_LEN_08BIT, 0x00}, + {0xd2cf, CRL_REG_LEN_08BIT, 0x2c}, + {0xd2d0, CRL_REG_LEN_08BIT, 0xd4}, + {0xd2d1, CRL_REG_LEN_08BIT, 0x01}, + {0xd2d2, CRL_REG_LEN_08BIT, 0x58}, + {0xd2d3, CRL_REG_LEN_08BIT, 0x18}, + {0xd2d4, CRL_REG_LEN_08BIT, 0x84}, + {0xd2d5, CRL_REG_LEN_08BIT, 0x81}, + {0xd2d6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2d7, CRL_REG_LEN_08BIT, 0x14}, + {0xd2d8, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2d9, CRL_REG_LEN_08BIT, 0xa4}, + {0xd2da, CRL_REG_LEN_08BIT, 0x01}, + {0xd2db, CRL_REG_LEN_08BIT, 0x00}, + {0xd2dc, CRL_REG_LEN_08BIT, 0x10}, + {0xd2dd, CRL_REG_LEN_08BIT, 0x00}, + {0xd2de, CRL_REG_LEN_08BIT, 0x00}, + {0xd2df, CRL_REG_LEN_08BIT, 0x05}, + {0xd2e0, CRL_REG_LEN_08BIT, 0x84}, + {0xd2e1, CRL_REG_LEN_08BIT, 0xc1}, + {0xd2e2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2e3, CRL_REG_LEN_08BIT, 0x18}, + {0xd2e4, CRL_REG_LEN_08BIT, 0x9c}, + {0xd2e5, CRL_REG_LEN_08BIT, 0xa0}, + {0xd2e6, CRL_REG_LEN_08BIT, 0x01}, + {0xd2e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd2e8, CRL_REG_LEN_08BIT, 0xd4}, + {0xd2e9, CRL_REG_LEN_08BIT, 0x01}, + {0xd2ea, CRL_REG_LEN_08BIT, 0x28}, + {0xd2eb, CRL_REG_LEN_08BIT, 0x14}, + {0xd2ec, CRL_REG_LEN_08BIT, 0x84}, + {0xd2ed, CRL_REG_LEN_08BIT, 0xc1}, + {0xd2ee, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ef, CRL_REG_LEN_08BIT, 0x18}, + {0xd2f0, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2f1, CRL_REG_LEN_08BIT, 0x66}, + {0xd2f2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f3, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f4, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2f5, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f7, CRL_REG_LEN_08BIT, 0x20}, + {0xd2f8, CRL_REG_LEN_08BIT, 0x9d}, + {0xd2f9, CRL_REG_LEN_08BIT, 0x00}, + {0xd2fa, CRL_REG_LEN_08BIT, 0x00}, + {0xd2fb, CRL_REG_LEN_08BIT, 0x00}, + {0xd2fc, CRL_REG_LEN_08BIT, 0x84}, + {0xd2fd, CRL_REG_LEN_08BIT, 0x61}, + {0xd2fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ff, CRL_REG_LEN_08BIT, 0x18}, + {0xd300, CRL_REG_LEN_08BIT, 0xbd}, + {0xd301, CRL_REG_LEN_08BIT, 0xa3}, + {0xd302, CRL_REG_LEN_08BIT, 0x01}, + {0xd303, CRL_REG_LEN_08BIT, 0x00}, + {0xd304, CRL_REG_LEN_08BIT, 0x10}, + {0xd305, CRL_REG_LEN_08BIT, 0x00}, + {0xd306, CRL_REG_LEN_08BIT, 0x00}, + {0xd307, CRL_REG_LEN_08BIT, 0x03}, + {0xd308, CRL_REG_LEN_08BIT, 0x9c}, + {0xd309, CRL_REG_LEN_08BIT, 0x80}, + {0xd30a, CRL_REG_LEN_08BIT, 0x01}, + {0xd30b, CRL_REG_LEN_08BIT, 0x00}, + {0xd30c, CRL_REG_LEN_08BIT, 0xd4}, + {0xd30d, CRL_REG_LEN_08BIT, 0x01}, + {0xd30e, CRL_REG_LEN_08BIT, 0x20}, + {0xd30f, CRL_REG_LEN_08BIT, 0x18}, + {0xd310, CRL_REG_LEN_08BIT, 0x18}, + {0xd311, CRL_REG_LEN_08BIT, 0x60}, + {0xd312, CRL_REG_LEN_08BIT, 0x80}, + {0xd313, CRL_REG_LEN_08BIT, 0x06}, + {0xd314, CRL_REG_LEN_08BIT, 0x85}, + {0xd315, CRL_REG_LEN_08BIT, 0x01}, + {0xd316, CRL_REG_LEN_08BIT, 0x00}, + {0xd317, CRL_REG_LEN_08BIT, 0x14}, + {0xd318, CRL_REG_LEN_08BIT, 0xa8}, + {0xd319, CRL_REG_LEN_08BIT, 0x83}, + {0xd31a, CRL_REG_LEN_08BIT, 0x38}, + {0xd31b, CRL_REG_LEN_08BIT, 0x29}, + {0xd31c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd31d, CRL_REG_LEN_08BIT, 0xc3}, + {0xd31e, CRL_REG_LEN_08BIT, 0x40}, + {0xd31f, CRL_REG_LEN_08BIT, 0x08}, + {0xd320, CRL_REG_LEN_08BIT, 0x8c}, + {0xd321, CRL_REG_LEN_08BIT, 0x84}, + {0xd322, CRL_REG_LEN_08BIT, 0x00}, + {0xd323, CRL_REG_LEN_08BIT, 0x00}, + {0xd324, CRL_REG_LEN_08BIT, 0xa8}, + {0xd325, CRL_REG_LEN_08BIT, 0xa3}, + {0xd326, CRL_REG_LEN_08BIT, 0x38}, + {0xd327, CRL_REG_LEN_08BIT, 0x2a}, + {0xd328, CRL_REG_LEN_08BIT, 0xa8}, + {0xd329, CRL_REG_LEN_08BIT, 0xe3}, + {0xd32a, CRL_REG_LEN_08BIT, 0x40}, + {0xd32b, CRL_REG_LEN_08BIT, 0x09}, + {0xd32c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd32d, CRL_REG_LEN_08BIT, 0x64}, + {0xd32e, CRL_REG_LEN_08BIT, 0x40}, + {0xd32f, CRL_REG_LEN_08BIT, 0x00}, + {0xd330, CRL_REG_LEN_08BIT, 0xd8}, + {0xd331, CRL_REG_LEN_08BIT, 0x06}, + {0xd332, CRL_REG_LEN_08BIT, 0x18}, + {0xd333, CRL_REG_LEN_08BIT, 0x00}, + {0xd334, CRL_REG_LEN_08BIT, 0x8c}, + {0xd335, CRL_REG_LEN_08BIT, 0x65}, + {0xd336, CRL_REG_LEN_08BIT, 0x00}, + {0xd337, CRL_REG_LEN_08BIT, 0x00}, + {0xd338, CRL_REG_LEN_08BIT, 0x84}, + {0xd339, CRL_REG_LEN_08BIT, 0x81}, + {0xd33a, CRL_REG_LEN_08BIT, 0x00}, + {0xd33b, CRL_REG_LEN_08BIT, 0x18}, + {0xd33c, CRL_REG_LEN_08BIT, 0xe3}, + {0xd33d, CRL_REG_LEN_08BIT, 0xe3}, + {0xd33e, CRL_REG_LEN_08BIT, 0x20}, + {0xd33f, CRL_REG_LEN_08BIT, 0x00}, + {0xd340, CRL_REG_LEN_08BIT, 0xd8}, + {0xd341, CRL_REG_LEN_08BIT, 0x07}, + {0xd342, CRL_REG_LEN_08BIT, 0xf8}, + {0xd343, CRL_REG_LEN_08BIT, 0x00}, + {0xd344, CRL_REG_LEN_08BIT, 0x03}, + {0xd345, CRL_REG_LEN_08BIT, 0xff}, + {0xd346, CRL_REG_LEN_08BIT, 0xff}, + {0xd347, CRL_REG_LEN_08BIT, 0x6f}, + {0xd348, CRL_REG_LEN_08BIT, 0x18}, + {0xd349, CRL_REG_LEN_08BIT, 0x60}, + {0xd34a, CRL_REG_LEN_08BIT, 0x00}, + {0xd34b, CRL_REG_LEN_08BIT, 0x01}, + {0xd34c, CRL_REG_LEN_08BIT, 0x0f}, + {0xd34d, CRL_REG_LEN_08BIT, 0xff}, + {0xd34e, CRL_REG_LEN_08BIT, 0xff}, + {0xd34f, CRL_REG_LEN_08BIT, 0x9d}, + {0xd350, CRL_REG_LEN_08BIT, 0x18}, + {0xd351, CRL_REG_LEN_08BIT, 0x60}, + {0xd352, CRL_REG_LEN_08BIT, 0x80}, + {0xd353, CRL_REG_LEN_08BIT, 0x06}, + {0xd354, CRL_REG_LEN_08BIT, 0x00}, + {0xd355, CRL_REG_LEN_08BIT, 0x00}, + {0xd356, CRL_REG_LEN_08BIT, 0x00}, + {0xd357, CRL_REG_LEN_08BIT, 0x11}, + {0xd358, CRL_REG_LEN_08BIT, 0xa8}, + {0xd359, CRL_REG_LEN_08BIT, 0x83}, + {0xd35a, CRL_REG_LEN_08BIT, 0x6e}, + {0xd35b, CRL_REG_LEN_08BIT, 0x43}, + {0xd35c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd35d, CRL_REG_LEN_08BIT, 0x6c}, + {0xd35e, CRL_REG_LEN_08BIT, 0x28}, + {0xd35f, CRL_REG_LEN_08BIT, 0x02}, + {0xd360, CRL_REG_LEN_08BIT, 0xe0}, + {0xd361, CRL_REG_LEN_08BIT, 0x84}, + {0xd362, CRL_REG_LEN_08BIT, 0x28}, + {0xd363, CRL_REG_LEN_08BIT, 0x02}, + {0xd364, CRL_REG_LEN_08BIT, 0x07}, + {0xd365, CRL_REG_LEN_08BIT, 0xff}, + {0xd366, CRL_REG_LEN_08BIT, 0xf8}, + {0xd367, CRL_REG_LEN_08BIT, 0x30}, + {0xd368, CRL_REG_LEN_08BIT, 0xb8}, + {0xd369, CRL_REG_LEN_08BIT, 0x63}, + {0xd36a, CRL_REG_LEN_08BIT, 0x00}, + {0xd36b, CRL_REG_LEN_08BIT, 0x08}, + {0xd36c, CRL_REG_LEN_08BIT, 0x03}, + {0xd36d, CRL_REG_LEN_08BIT, 0xff}, + {0xd36e, CRL_REG_LEN_08BIT, 0xff}, + {0xd36f, CRL_REG_LEN_08BIT, 0xc0}, + {0xd370, CRL_REG_LEN_08BIT, 0x85}, + {0xd371, CRL_REG_LEN_08BIT, 0x4e}, + {0xd372, CRL_REG_LEN_08BIT, 0x00}, + {0xd373, CRL_REG_LEN_08BIT, 0x00}, + {0xd374, CRL_REG_LEN_08BIT, 0x03}, + {0xd375, CRL_REG_LEN_08BIT, 0xff}, + {0xd376, CRL_REG_LEN_08BIT, 0xff}, + {0xd377, CRL_REG_LEN_08BIT, 0xe7}, + {0xd378, CRL_REG_LEN_08BIT, 0xd4}, + {0xd379, CRL_REG_LEN_08BIT, 0x01}, + {0xd37a, CRL_REG_LEN_08BIT, 0x40}, + {0xd37b, CRL_REG_LEN_08BIT, 0x18}, + {0xd37c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd37d, CRL_REG_LEN_08BIT, 0x60}, + {0xd37e, CRL_REG_LEN_08BIT, 0x00}, + {0xd37f, CRL_REG_LEN_08BIT, 0x00}, + {0xd380, CRL_REG_LEN_08BIT, 0x03}, + {0xd381, CRL_REG_LEN_08BIT, 0xff}, + {0xd382, CRL_REG_LEN_08BIT, 0xff}, + {0xd383, CRL_REG_LEN_08BIT, 0xdb}, + {0xd384, CRL_REG_LEN_08BIT, 0xd4}, + {0xd385, CRL_REG_LEN_08BIT, 0x01}, + {0xd386, CRL_REG_LEN_08BIT, 0x18}, + {0xd387, CRL_REG_LEN_08BIT, 0x14}, + {0xd388, CRL_REG_LEN_08BIT, 0x03}, + {0xd389, CRL_REG_LEN_08BIT, 0xff}, + {0xd38a, CRL_REG_LEN_08BIT, 0xff}, + {0xd38b, CRL_REG_LEN_08BIT, 0xce}, + {0xd38c, CRL_REG_LEN_08BIT, 0x9d}, + {0xd38d, CRL_REG_LEN_08BIT, 0x6b}, + {0xd38e, CRL_REG_LEN_08BIT, 0x00}, + {0xd38f, CRL_REG_LEN_08BIT, 0xff}, + {0xd390, CRL_REG_LEN_08BIT, 0x03}, + {0xd391, CRL_REG_LEN_08BIT, 0xff}, + {0xd392, CRL_REG_LEN_08BIT, 0xff}, + {0xd393, CRL_REG_LEN_08BIT, 0xc6}, + {0xd394, CRL_REG_LEN_08BIT, 0x9c}, + {0xd395, CRL_REG_LEN_08BIT, 0x63}, + {0xd396, CRL_REG_LEN_08BIT, 0x00}, + {0xd397, CRL_REG_LEN_08BIT, 0xff}, + {0xd398, CRL_REG_LEN_08BIT, 0xa8}, + {0xd399, CRL_REG_LEN_08BIT, 0xe3}, + {0xd39a, CRL_REG_LEN_08BIT, 0x38}, + {0xd39b, CRL_REG_LEN_08BIT, 0x0f}, + {0xd39c, CRL_REG_LEN_08BIT, 0x8c}, + {0xd39d, CRL_REG_LEN_08BIT, 0x84}, + {0xd39e, CRL_REG_LEN_08BIT, 0x00}, + {0xd39f, CRL_REG_LEN_08BIT, 0x00}, + {0xd3a0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3a1, CRL_REG_LEN_08BIT, 0xa3}, + {0xd3a2, CRL_REG_LEN_08BIT, 0x38}, + {0xd3a3, CRL_REG_LEN_08BIT, 0x0e}, + {0xd3a4, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3a5, CRL_REG_LEN_08BIT, 0xc3}, + {0xd3a6, CRL_REG_LEN_08BIT, 0x6e}, + {0xd3a7, CRL_REG_LEN_08BIT, 0x42}, + {0xd3a8, CRL_REG_LEN_08BIT, 0xd8}, + {0xd3a9, CRL_REG_LEN_08BIT, 0x07}, + {0xd3aa, CRL_REG_LEN_08BIT, 0x20}, + {0xd3ab, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ac, CRL_REG_LEN_08BIT, 0x8c}, + {0xd3ad, CRL_REG_LEN_08BIT, 0x66}, + {0xd3ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd3af, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b0, CRL_REG_LEN_08BIT, 0xd8}, + {0xd3b1, CRL_REG_LEN_08BIT, 0x05}, + {0xd3b2, CRL_REG_LEN_08BIT, 0x18}, + {0xd3b3, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b4, CRL_REG_LEN_08BIT, 0x85}, + {0xd3b5, CRL_REG_LEN_08BIT, 0x21}, + {0xd3b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b8, CRL_REG_LEN_08BIT, 0x85}, + {0xd3b9, CRL_REG_LEN_08BIT, 0x41}, + {0xd3ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd3bb, CRL_REG_LEN_08BIT, 0x04}, + {0xd3bc, CRL_REG_LEN_08BIT, 0x85}, + {0xd3bd, CRL_REG_LEN_08BIT, 0x81}, + {0xd3be, CRL_REG_LEN_08BIT, 0x00}, + {0xd3bf, CRL_REG_LEN_08BIT, 0x08}, + {0xd3c0, CRL_REG_LEN_08BIT, 0x85}, + {0xd3c1, CRL_REG_LEN_08BIT, 0xc1}, + {0xd3c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd3c3, CRL_REG_LEN_08BIT, 0x0c}, + {0xd3c4, CRL_REG_LEN_08BIT, 0x86}, + {0xd3c5, CRL_REG_LEN_08BIT, 0x01}, + {0xd3c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3c7, CRL_REG_LEN_08BIT, 0x10}, + {0xd3c8, CRL_REG_LEN_08BIT, 0x44}, + {0xd3c9, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ca, CRL_REG_LEN_08BIT, 0x48}, + {0xd3cb, CRL_REG_LEN_08BIT, 0x00}, + {0xd3cc, CRL_REG_LEN_08BIT, 0x9c}, + {0xd3cd, CRL_REG_LEN_08BIT, 0x21}, + {0xd3ce, CRL_REG_LEN_08BIT, 0x00}, + {0xd3cf, CRL_REG_LEN_08BIT, 0x1c}, + {0xd3d0, CRL_REG_LEN_08BIT, 0x9c}, + {0xd3d1, CRL_REG_LEN_08BIT, 0x21}, + {0xd3d2, CRL_REG_LEN_08BIT, 0xff}, + {0xd3d3, CRL_REG_LEN_08BIT, 0xfc}, + {0xd3d4, CRL_REG_LEN_08BIT, 0xd4}, + {0xd3d5, CRL_REG_LEN_08BIT, 0x01}, + {0xd3d6, CRL_REG_LEN_08BIT, 0x48}, + {0xd3d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3d8, CRL_REG_LEN_08BIT, 0x18}, + {0xd3d9, CRL_REG_LEN_08BIT, 0x60}, + {0xd3da, CRL_REG_LEN_08BIT, 0x00}, + {0xd3db, CRL_REG_LEN_08BIT, 0x01}, + {0xd3dc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3dd, CRL_REG_LEN_08BIT, 0x63}, + {0xd3de, CRL_REG_LEN_08BIT, 0x07}, + {0xd3df, CRL_REG_LEN_08BIT, 0x80}, + {0xd3e0, CRL_REG_LEN_08BIT, 0x8c}, + {0xd3e1, CRL_REG_LEN_08BIT, 0x63}, + {0xd3e2, CRL_REG_LEN_08BIT, 0x00}, + {0xd3e3, CRL_REG_LEN_08BIT, 0x68}, + {0xd3e4, CRL_REG_LEN_08BIT, 0xbc}, + {0xd3e5, CRL_REG_LEN_08BIT, 0x03}, + {0xd3e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3e8, CRL_REG_LEN_08BIT, 0x10}, + {0xd3e9, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ea, CRL_REG_LEN_08BIT, 0x00}, + {0xd3eb, CRL_REG_LEN_08BIT, 0x0c}, + {0xd3ec, CRL_REG_LEN_08BIT, 0x15}, + {0xd3ed, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ee, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ef, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f0, CRL_REG_LEN_08BIT, 0x07}, + {0xd3f1, CRL_REG_LEN_08BIT, 0xff}, + {0xd3f2, CRL_REG_LEN_08BIT, 0xd9}, + {0xd3f3, CRL_REG_LEN_08BIT, 0x98}, + {0xd3f4, CRL_REG_LEN_08BIT, 0x15}, + {0xd3f5, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f8, CRL_REG_LEN_08BIT, 0x18}, + {0xd3f9, CRL_REG_LEN_08BIT, 0x60}, + {0xd3fa, CRL_REG_LEN_08BIT, 0x80}, + {0xd3fb, CRL_REG_LEN_08BIT, 0x06}, + {0xd3fc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3fd, CRL_REG_LEN_08BIT, 0x63}, + {0xd3fe, CRL_REG_LEN_08BIT, 0xc4}, + {0xd3ff, CRL_REG_LEN_08BIT, 0xb8}, + {0xd400, CRL_REG_LEN_08BIT, 0x8c}, + {0xd401, CRL_REG_LEN_08BIT, 0x63}, + {0xd402, CRL_REG_LEN_08BIT, 0x00}, + {0xd403, CRL_REG_LEN_08BIT, 0x00}, + {0xd404, CRL_REG_LEN_08BIT, 0xbc}, + {0xd405, CRL_REG_LEN_08BIT, 0x23}, + {0xd406, CRL_REG_LEN_08BIT, 0x00}, + {0xd407, CRL_REG_LEN_08BIT, 0x01}, + {0xd408, CRL_REG_LEN_08BIT, 0x10}, + {0xd409, CRL_REG_LEN_08BIT, 0x00}, + {0xd40a, CRL_REG_LEN_08BIT, 0x00}, + {0xd40b, CRL_REG_LEN_08BIT, 0x25}, + {0xd40c, CRL_REG_LEN_08BIT, 0x9d}, + {0xd40d, CRL_REG_LEN_08BIT, 0x00}, + {0xd40e, CRL_REG_LEN_08BIT, 0x00}, + {0xd40f, CRL_REG_LEN_08BIT, 0x00}, + {0xd410, CRL_REG_LEN_08BIT, 0x00}, + {0xd411, CRL_REG_LEN_08BIT, 0x00}, + {0xd412, CRL_REG_LEN_08BIT, 0x00}, + {0xd413, CRL_REG_LEN_08BIT, 0x0b}, + {0xd414, CRL_REG_LEN_08BIT, 0xb8}, + {0xd415, CRL_REG_LEN_08BIT, 0xe8}, + {0xd416, CRL_REG_LEN_08BIT, 0x00}, + {0xd417, CRL_REG_LEN_08BIT, 0x02}, + {0xd418, CRL_REG_LEN_08BIT, 0x07}, + {0xd419, CRL_REG_LEN_08BIT, 0xff}, + {0xd41a, CRL_REG_LEN_08BIT, 0xd6}, + {0xd41b, CRL_REG_LEN_08BIT, 0x24}, + {0xd41c, CRL_REG_LEN_08BIT, 0x15}, + {0xd41d, CRL_REG_LEN_08BIT, 0x00}, + {0xd41e, CRL_REG_LEN_08BIT, 0x00}, + {0xd41f, CRL_REG_LEN_08BIT, 0x00}, + {0xd420, CRL_REG_LEN_08BIT, 0x18}, + {0xd421, CRL_REG_LEN_08BIT, 0x60}, + {0xd422, CRL_REG_LEN_08BIT, 0x80}, + {0xd423, CRL_REG_LEN_08BIT, 0x06}, + {0xd424, CRL_REG_LEN_08BIT, 0xa8}, + {0xd425, CRL_REG_LEN_08BIT, 0x63}, + {0xd426, CRL_REG_LEN_08BIT, 0xc4}, + {0xd427, CRL_REG_LEN_08BIT, 0xb8}, + {0xd428, CRL_REG_LEN_08BIT, 0x8c}, + {0xd429, CRL_REG_LEN_08BIT, 0x63}, + {0xd42a, CRL_REG_LEN_08BIT, 0x00}, + {0xd42b, CRL_REG_LEN_08BIT, 0x00}, + {0xd42c, CRL_REG_LEN_08BIT, 0xbc}, + {0xd42d, CRL_REG_LEN_08BIT, 0x23}, + {0xd42e, CRL_REG_LEN_08BIT, 0x00}, + {0xd42f, CRL_REG_LEN_08BIT, 0x01}, + {0xd430, CRL_REG_LEN_08BIT, 0x10}, + {0xd431, CRL_REG_LEN_08BIT, 0x00}, + {0xd432, CRL_REG_LEN_08BIT, 0x00}, + {0xd433, CRL_REG_LEN_08BIT, 0x1b}, + {0xd434, CRL_REG_LEN_08BIT, 0x9d}, + {0xd435, CRL_REG_LEN_08BIT, 0x00}, + {0xd436, CRL_REG_LEN_08BIT, 0x00}, + {0xd437, CRL_REG_LEN_08BIT, 0x00}, + {0xd438, CRL_REG_LEN_08BIT, 0xb8}, + {0xd439, CRL_REG_LEN_08BIT, 0xe8}, + {0xd43a, CRL_REG_LEN_08BIT, 0x00}, + {0xd43b, CRL_REG_LEN_08BIT, 0x02}, + {0xd43c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd43d, CRL_REG_LEN_08BIT, 0xc0}, + {0xd43e, CRL_REG_LEN_08BIT, 0x00}, + {0xd43f, CRL_REG_LEN_08BIT, 0x00}, + {0xd440, CRL_REG_LEN_08BIT, 0x18}, + {0xd441, CRL_REG_LEN_08BIT, 0xa0}, + {0xd442, CRL_REG_LEN_08BIT, 0x80}, + {0xd443, CRL_REG_LEN_08BIT, 0x06}, + {0xd444, CRL_REG_LEN_08BIT, 0xe0}, + {0xd445, CRL_REG_LEN_08BIT, 0x67}, + {0xd446, CRL_REG_LEN_08BIT, 0x30}, + {0xd447, CRL_REG_LEN_08BIT, 0x00}, + {0xd448, CRL_REG_LEN_08BIT, 0xa8}, + {0xd449, CRL_REG_LEN_08BIT, 0xa5}, + {0xd44a, CRL_REG_LEN_08BIT, 0xce}, + {0xd44b, CRL_REG_LEN_08BIT, 0xb0}, + {0xd44c, CRL_REG_LEN_08BIT, 0x19}, + {0xd44d, CRL_REG_LEN_08BIT, 0x60}, + {0xd44e, CRL_REG_LEN_08BIT, 0x00}, + {0xd44f, CRL_REG_LEN_08BIT, 0x01}, + {0xd450, CRL_REG_LEN_08BIT, 0xa9}, + {0xd451, CRL_REG_LEN_08BIT, 0x6b}, + {0xd452, CRL_REG_LEN_08BIT, 0x06}, + {0xd453, CRL_REG_LEN_08BIT, 0x14}, + {0xd454, CRL_REG_LEN_08BIT, 0xe0}, + {0xd455, CRL_REG_LEN_08BIT, 0x83}, + {0xd456, CRL_REG_LEN_08BIT, 0x28}, + {0xd457, CRL_REG_LEN_08BIT, 0x00}, + {0xd458, CRL_REG_LEN_08BIT, 0x9c}, + {0xd459, CRL_REG_LEN_08BIT, 0xc6}, + {0xd45a, CRL_REG_LEN_08BIT, 0x00}, + {0xd45b, CRL_REG_LEN_08BIT, 0x01}, + {0xd45c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd45d, CRL_REG_LEN_08BIT, 0x63}, + {0xd45e, CRL_REG_LEN_08BIT, 0x18}, + {0xd45f, CRL_REG_LEN_08BIT, 0x00}, + {0xd460, CRL_REG_LEN_08BIT, 0x8c}, + {0xd461, CRL_REG_LEN_08BIT, 0x84}, + {0xd462, CRL_REG_LEN_08BIT, 0x00}, + {0xd463, CRL_REG_LEN_08BIT, 0x00}, + {0xd464, CRL_REG_LEN_08BIT, 0xe0}, + {0xd465, CRL_REG_LEN_08BIT, 0xa3}, + {0xd466, CRL_REG_LEN_08BIT, 0x58}, + {0xd467, CRL_REG_LEN_08BIT, 0x00}, + {0xd468, CRL_REG_LEN_08BIT, 0xa4}, + {0xd469, CRL_REG_LEN_08BIT, 0xc6}, + {0xd46a, CRL_REG_LEN_08BIT, 0x00}, + {0xd46b, CRL_REG_LEN_08BIT, 0xff}, + {0xd46c, CRL_REG_LEN_08BIT, 0xb8}, + {0xd46d, CRL_REG_LEN_08BIT, 0x64}, + {0xd46e, CRL_REG_LEN_08BIT, 0x00}, + {0xd46f, CRL_REG_LEN_08BIT, 0x18}, + {0xd470, CRL_REG_LEN_08BIT, 0xbc}, + {0xd471, CRL_REG_LEN_08BIT, 0x46}, + {0xd472, CRL_REG_LEN_08BIT, 0x00}, + {0xd473, CRL_REG_LEN_08BIT, 0x03}, + {0xd474, CRL_REG_LEN_08BIT, 0x94}, + {0xd475, CRL_REG_LEN_08BIT, 0x85}, + {0xd476, CRL_REG_LEN_08BIT, 0x00}, + {0xd477, CRL_REG_LEN_08BIT, 0x00}, + {0xd478, CRL_REG_LEN_08BIT, 0xb8}, + {0xd479, CRL_REG_LEN_08BIT, 0x63}, + {0xd47a, CRL_REG_LEN_08BIT, 0x00}, + {0xd47b, CRL_REG_LEN_08BIT, 0x98}, + {0xd47c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd47d, CRL_REG_LEN_08BIT, 0x64}, + {0xd47e, CRL_REG_LEN_08BIT, 0x18}, + {0xd47f, CRL_REG_LEN_08BIT, 0x00}, + {0xd480, CRL_REG_LEN_08BIT, 0x0f}, + {0xd481, CRL_REG_LEN_08BIT, 0xff}, + {0xd482, CRL_REG_LEN_08BIT, 0xff}, + {0xd483, CRL_REG_LEN_08BIT, 0xf0}, + {0xd484, CRL_REG_LEN_08BIT, 0xdc}, + {0xd485, CRL_REG_LEN_08BIT, 0x05}, + {0xd486, CRL_REG_LEN_08BIT, 0x18}, + {0xd487, CRL_REG_LEN_08BIT, 0x00}, + {0xd488, CRL_REG_LEN_08BIT, 0x9c}, + {0xd489, CRL_REG_LEN_08BIT, 0x68}, + {0xd48a, CRL_REG_LEN_08BIT, 0x00}, + {0xd48b, CRL_REG_LEN_08BIT, 0x01}, + {0xd48c, CRL_REG_LEN_08BIT, 0xa5}, + {0xd48d, CRL_REG_LEN_08BIT, 0x03}, + {0xd48e, CRL_REG_LEN_08BIT, 0x00}, + {0xd48f, CRL_REG_LEN_08BIT, 0xff}, + {0xd490, CRL_REG_LEN_08BIT, 0xbc}, + {0xd491, CRL_REG_LEN_08BIT, 0x48}, + {0xd492, CRL_REG_LEN_08BIT, 0x00}, + {0xd493, CRL_REG_LEN_08BIT, 0x01}, + {0xd494, CRL_REG_LEN_08BIT, 0x0f}, + {0xd495, CRL_REG_LEN_08BIT, 0xff}, + {0xd496, CRL_REG_LEN_08BIT, 0xff}, + {0xd497, CRL_REG_LEN_08BIT, 0xea}, + {0xd498, CRL_REG_LEN_08BIT, 0xb8}, + {0xd499, CRL_REG_LEN_08BIT, 0xe8}, + {0xd49a, CRL_REG_LEN_08BIT, 0x00}, + {0xd49b, CRL_REG_LEN_08BIT, 0x02}, + {0xd49c, CRL_REG_LEN_08BIT, 0x18}, + {0xd49d, CRL_REG_LEN_08BIT, 0x60}, + {0xd49e, CRL_REG_LEN_08BIT, 0x00}, + {0xd49f, CRL_REG_LEN_08BIT, 0x01}, + {0xd4a0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd4a1, CRL_REG_LEN_08BIT, 0x63}, + {0xd4a2, CRL_REG_LEN_08BIT, 0x06}, + {0xd4a3, CRL_REG_LEN_08BIT, 0x14}, + {0xd4a4, CRL_REG_LEN_08BIT, 0x07}, + {0xd4a5, CRL_REG_LEN_08BIT, 0xff}, + {0xd4a6, CRL_REG_LEN_08BIT, 0xe4}, + {0xd4a7, CRL_REG_LEN_08BIT, 0x05}, + {0xd4a8, CRL_REG_LEN_08BIT, 0x9c}, + {0xd4a9, CRL_REG_LEN_08BIT, 0x83}, + {0xd4aa, CRL_REG_LEN_08BIT, 0x00}, + {0xd4ab, CRL_REG_LEN_08BIT, 0x10}, + {0xd4ac, CRL_REG_LEN_08BIT, 0x85}, + {0xd4ad, CRL_REG_LEN_08BIT, 0x21}, + {0xd4ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd4af, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b0, CRL_REG_LEN_08BIT, 0x44}, + {0xd4b1, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b2, CRL_REG_LEN_08BIT, 0x48}, + {0xd4b3, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b4, CRL_REG_LEN_08BIT, 0x9c}, + {0xd4b5, CRL_REG_LEN_08BIT, 0x21}, + {0xd4b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b7, CRL_REG_LEN_08BIT, 0x04}, + {0xd4b8, CRL_REG_LEN_08BIT, 0x18}, + {0xd4b9, CRL_REG_LEN_08BIT, 0x60}, + {0xd4ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd4bb, CRL_REG_LEN_08BIT, 0x01}, + {0xd4bc, CRL_REG_LEN_08BIT, 0x9c}, + {0xd4bd, CRL_REG_LEN_08BIT, 0x80}, + {0xd4be, CRL_REG_LEN_08BIT, 0xff}, + {0xd4bf, CRL_REG_LEN_08BIT, 0xff}, + {0xd4c0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd4c1, CRL_REG_LEN_08BIT, 0x63}, + {0xd4c2, CRL_REG_LEN_08BIT, 0x09}, + {0xd4c3, CRL_REG_LEN_08BIT, 0xef}, + {0xd4c4, CRL_REG_LEN_08BIT, 0xd8}, + {0xd4c5, CRL_REG_LEN_08BIT, 0x03}, + {0xd4c6, CRL_REG_LEN_08BIT, 0x20}, + {0xd4c7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4c8, CRL_REG_LEN_08BIT, 0x18}, + {0xd4c9, CRL_REG_LEN_08BIT, 0x60}, + {0xd4ca, CRL_REG_LEN_08BIT, 0x80}, + {0xd4cb, CRL_REG_LEN_08BIT, 0x06}, + {0xd4cc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd4cd, CRL_REG_LEN_08BIT, 0x63}, + {0xd4ce, CRL_REG_LEN_08BIT, 0xc9}, + {0xd4cf, CRL_REG_LEN_08BIT, 0xef}, + {0xd4d0, CRL_REG_LEN_08BIT, 0xd8}, + {0xd4d1, CRL_REG_LEN_08BIT, 0x03}, + {0xd4d2, CRL_REG_LEN_08BIT, 0x20}, + {0xd4d3, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d4, CRL_REG_LEN_08BIT, 0x44}, + {0xd4d5, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d6, CRL_REG_LEN_08BIT, 0x48}, + {0xd4d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d8, CRL_REG_LEN_08BIT, 0x15}, + {0xd4d9, CRL_REG_LEN_08BIT, 0x00}, + {0xd4da, CRL_REG_LEN_08BIT, 0x00}, + {0xd4db, CRL_REG_LEN_08BIT, 0x00}, + {0xd4dc, CRL_REG_LEN_08BIT, 0x18}, + {0xd4dd, CRL_REG_LEN_08BIT, 0x80}, + {0xd4de, CRL_REG_LEN_08BIT, 0x00}, + {0xd4df, CRL_REG_LEN_08BIT, 0x01}, + {0xd4e0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd4e1, CRL_REG_LEN_08BIT, 0x84}, + {0xd4e2, CRL_REG_LEN_08BIT, 0x0a}, + {0xd4e3, CRL_REG_LEN_08BIT, 0x12}, + {0xd4e4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd4e5, CRL_REG_LEN_08BIT, 0x64}, + {0xd4e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd4e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4e8, CRL_REG_LEN_08BIT, 0xbc}, + {0xd4e9, CRL_REG_LEN_08BIT, 0x03}, + {0xd4ea, CRL_REG_LEN_08BIT, 0x00}, + {0xd4eb, CRL_REG_LEN_08BIT, 0x00}, + {0xd4ec, CRL_REG_LEN_08BIT, 0x13}, + {0xd4ed, CRL_REG_LEN_08BIT, 0xff}, + {0xd4ee, CRL_REG_LEN_08BIT, 0xff}, + {0xd4ef, CRL_REG_LEN_08BIT, 0xfe}, + {0xd4f0, CRL_REG_LEN_08BIT, 0x15}, + {0xd4f1, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f2, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f3, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f4, CRL_REG_LEN_08BIT, 0x44}, + {0xd4f5, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f6, CRL_REG_LEN_08BIT, 0x48}, + {0xd4f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f8, CRL_REG_LEN_08BIT, 0x15}, + {0xd4f9, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fa, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fb, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fc, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fd, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd4ff, CRL_REG_LEN_08BIT, 0x00}, + {0xd500, CRL_REG_LEN_08BIT, 0x00}, + {0xd501, CRL_REG_LEN_08BIT, 0x00}, + {0xd502, CRL_REG_LEN_08BIT, 0x00}, + {0xd503, CRL_REG_LEN_08BIT, 0x00}, + {0x6f0e, CRL_REG_LEN_08BIT, 0x33}, + {0x6f0f, CRL_REG_LEN_08BIT, 0x33}, + {0x460e, CRL_REG_LEN_08BIT, 0x08}, + {0x460f, CRL_REG_LEN_08BIT, 0x01}, + {0x4610, CRL_REG_LEN_08BIT, 0x00}, + {0x4611, CRL_REG_LEN_08BIT, 0x01}, + {0x4612, CRL_REG_LEN_08BIT, 0x00}, + {0x4613, CRL_REG_LEN_08BIT, 0x01}, + {0x4605, CRL_REG_LEN_08BIT, 0x08},/*YUV 8bit*/ + {0x4608, CRL_REG_LEN_08BIT, 0x00}, + {0x4609, CRL_REG_LEN_08BIT, 0x08}, + {0x6804, CRL_REG_LEN_08BIT, 0x00}, + {0x6805, CRL_REG_LEN_08BIT, 0x06}, + {0x6806, CRL_REG_LEN_08BIT, 0x00}, + {0x5120, CRL_REG_LEN_08BIT, 0x00}, + {0x3510, CRL_REG_LEN_08BIT, 0x00}, + {0x3504, CRL_REG_LEN_08BIT, 0x00}, + {0x6800, CRL_REG_LEN_08BIT, 0x00}, + {0x6f0d, CRL_REG_LEN_08BIT, 0x0f}, + {0x5000, CRL_REG_LEN_08BIT, 0xff}, + {0x5001, CRL_REG_LEN_08BIT, 0xbf}, + {0x5002, CRL_REG_LEN_08BIT, 0x7e}, + {0x5003, CRL_REG_LEN_08BIT, 0x0c}, + {0x503d, CRL_REG_LEN_08BIT, 0x00}, + {0xc450, CRL_REG_LEN_08BIT, 0x01}, + {0xc452, CRL_REG_LEN_08BIT, 0x04}, + {0xc453, CRL_REG_LEN_08BIT, 0x00}, + {0xc454, CRL_REG_LEN_08BIT, 0x01}, + {0xc455, CRL_REG_LEN_08BIT, 0x00}, + {0xc456, CRL_REG_LEN_08BIT, 0x00}, + {0xc457, CRL_REG_LEN_08BIT, 0x00}, + {0xc458, CRL_REG_LEN_08BIT, 0x00}, + {0xc459, CRL_REG_LEN_08BIT, 0x00}, + {0xc45b, CRL_REG_LEN_08BIT, 0x00}, + {0xc45c, CRL_REG_LEN_08BIT, 0x00}, + {0xc45d, CRL_REG_LEN_08BIT, 0x00}, + {0xc45e, CRL_REG_LEN_08BIT, 0x02}, + {0xc45f, CRL_REG_LEN_08BIT, 0x01}, + {0xc460, CRL_REG_LEN_08BIT, 0x01}, + {0xc461, CRL_REG_LEN_08BIT, 0x01}, + {0xc462, CRL_REG_LEN_08BIT, 0x01}, + {0xc464, CRL_REG_LEN_08BIT, 0x88}, + {0xc465, CRL_REG_LEN_08BIT, 0x00}, + {0xc466, CRL_REG_LEN_08BIT, 0x8a}, + {0xc467, CRL_REG_LEN_08BIT, 0x00}, + {0xc468, CRL_REG_LEN_08BIT, 0x86}, + {0xc469, CRL_REG_LEN_08BIT, 0x00}, + {0xc46a, CRL_REG_LEN_08BIT, 0x40}, + {0xc46b, CRL_REG_LEN_08BIT, 0x50}, + {0xc46c, CRL_REG_LEN_08BIT, 0x30}, + {0xc46d, CRL_REG_LEN_08BIT, 0x28}, + {0xc46e, CRL_REG_LEN_08BIT, 0x60}, + {0xc46f, CRL_REG_LEN_08BIT, 0x40}, + {0xc47c, CRL_REG_LEN_08BIT, 0x01}, + {0xc47d, CRL_REG_LEN_08BIT, 0x38}, + {0xc47e, CRL_REG_LEN_08BIT, 0x00}, + {0xc47f, CRL_REG_LEN_08BIT, 0x00}, + {0xc480, CRL_REG_LEN_08BIT, 0x00}, + {0xc481, CRL_REG_LEN_08BIT, 0xff}, + {0xc482, CRL_REG_LEN_08BIT, 0x00}, + {0xc483, CRL_REG_LEN_08BIT, 0x40}, + {0xc484, CRL_REG_LEN_08BIT, 0x00}, + {0xc485, CRL_REG_LEN_08BIT, 0x18}, + {0xc486, CRL_REG_LEN_08BIT, 0x00}, + {0xc487, CRL_REG_LEN_08BIT, 0x18}, + {0xc488, CRL_REG_LEN_08BIT, 0x20}, + {0xc489, CRL_REG_LEN_08BIT, 0x00}, + {0xc48a, CRL_REG_LEN_08BIT, 0x20}, + {0xc48b, CRL_REG_LEN_08BIT, 0x00}, + {0xc48c, CRL_REG_LEN_08BIT, 0x00}, + {0xc48d, CRL_REG_LEN_08BIT, 0x04}, + {0xc48e, CRL_REG_LEN_08BIT, 0x00}, + {0xc48f, CRL_REG_LEN_08BIT, 0x04}, + {0xc490, CRL_REG_LEN_08BIT, 0x07}, + {0xc492, CRL_REG_LEN_08BIT, 0x20}, + {0xc493, CRL_REG_LEN_08BIT, 0x08}, + {0xc498, CRL_REG_LEN_08BIT, 0x02}, + {0xc499, CRL_REG_LEN_08BIT, 0x00}, + {0xc49a, CRL_REG_LEN_08BIT, 0x02}, + {0xc49b, CRL_REG_LEN_08BIT, 0x00}, + {0xc49c, CRL_REG_LEN_08BIT, 0x02}, + {0xc49d, CRL_REG_LEN_08BIT, 0x00}, + {0xc49e, CRL_REG_LEN_08BIT, 0x02}, + {0xc49f, CRL_REG_LEN_08BIT, 0x60}, + {0xc4a0, CRL_REG_LEN_08BIT, 0x03}, + {0xc4a1, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a2, CRL_REG_LEN_08BIT, 0x04}, + {0xc4a3, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a4, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a5, CRL_REG_LEN_08BIT, 0x10}, + {0xc4a6, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a7, CRL_REG_LEN_08BIT, 0x40}, + {0xc4a8, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a9, CRL_REG_LEN_08BIT, 0x80}, + {0xc4aa, CRL_REG_LEN_08BIT, 0x0d}, + {0xc4ab, CRL_REG_LEN_08BIT, 0x00}, + {0xc4ac, CRL_REG_LEN_08BIT, 0x03}, + {0xc4ad, CRL_REG_LEN_08BIT, 0xf0}, + {0xc4b4, CRL_REG_LEN_08BIT, 0x01}, + {0xc4b5, CRL_REG_LEN_08BIT, 0x01}, + {0xc4b6, CRL_REG_LEN_08BIT, 0x00}, + {0xc4b7, CRL_REG_LEN_08BIT, 0x01}, + {0xc4b8, CRL_REG_LEN_08BIT, 0x00}, + {0xc4b9, CRL_REG_LEN_08BIT, 0x01}, + {0xc4ba, CRL_REG_LEN_08BIT, 0x01}, + {0xc4bb, CRL_REG_LEN_08BIT, 0x00}, + {0xc4bc, CRL_REG_LEN_08BIT, 0x01}, + {0xc4bd, CRL_REG_LEN_08BIT, 0x60}, + {0xc4be, CRL_REG_LEN_08BIT, 0x02}, + {0xc4bf, CRL_REG_LEN_08BIT, 0x33}, + {0xc4c8, CRL_REG_LEN_08BIT, 0x03}, + {0xc4c9, CRL_REG_LEN_08BIT, 0xd0}, + {0xc4ca, CRL_REG_LEN_08BIT, 0x0e}, + {0xc4cb, CRL_REG_LEN_08BIT, 0x00}, + {0xc4cc, CRL_REG_LEN_08BIT, 0x04}, + {0xc4cd, CRL_REG_LEN_08BIT, 0xd8}, + {0xc4ce, CRL_REG_LEN_08BIT, 0x04}, + {0xc4cf, CRL_REG_LEN_08BIT, 0xd8}, + {0xc4d0, CRL_REG_LEN_08BIT, 0x04}, + {0xc4d1, CRL_REG_LEN_08BIT, 0x80}, + {0xc4e0, CRL_REG_LEN_08BIT, 0x04}, + {0xc4e1, CRL_REG_LEN_08BIT, 0x02}, + {0xc4e2, CRL_REG_LEN_08BIT, 0x01}, + {0xc4e4, CRL_REG_LEN_08BIT, 0x10}, + {0xc4e5, CRL_REG_LEN_08BIT, 0x20}, + {0xc4e6, CRL_REG_LEN_08BIT, 0x30}, + {0xc4e7, CRL_REG_LEN_08BIT, 0x40}, + {0xc4e8, CRL_REG_LEN_08BIT, 0x50}, + {0xc4e9, CRL_REG_LEN_08BIT, 0x60}, + {0xc4ea, CRL_REG_LEN_08BIT, 0x70}, + {0xc4eb, CRL_REG_LEN_08BIT, 0x80}, + {0xc4ec, CRL_REG_LEN_08BIT, 0x90}, + {0xc4ed, CRL_REG_LEN_08BIT, 0xa0}, + {0xc4ee, CRL_REG_LEN_08BIT, 0xb0}, + {0xc4ef, CRL_REG_LEN_08BIT, 0xc0}, + {0xc4f0, CRL_REG_LEN_08BIT, 0xd0}, + {0xc4f1, CRL_REG_LEN_08BIT, 0xe0}, + {0xc4f2, CRL_REG_LEN_08BIT, 0xf0}, + {0xc4f3, CRL_REG_LEN_08BIT, 0x80}, + {0xc4f4, CRL_REG_LEN_08BIT, 0x00}, + {0xc4f5, CRL_REG_LEN_08BIT, 0x20}, + {0xc4f6, CRL_REG_LEN_08BIT, 0x02}, + {0xc4f7, CRL_REG_LEN_08BIT, 0x00}, + {0xc4f8, CRL_REG_LEN_08BIT, 0x04}, + {0xc4f9, CRL_REG_LEN_08BIT, 0x0b}, + {0xc4fa, CRL_REG_LEN_08BIT, 0x00}, + {0xc4fb, CRL_REG_LEN_08BIT, 0x00}, + {0xc4fc, CRL_REG_LEN_08BIT, 0x01}, + {0xc4fd, CRL_REG_LEN_08BIT, 0x00}, + {0xc4fe, CRL_REG_LEN_08BIT, 0x04}, + {0xc4ff, CRL_REG_LEN_08BIT, 0x02}, + {0xc500, CRL_REG_LEN_08BIT, 0x48}, + {0xc501, CRL_REG_LEN_08BIT, 0x74}, + {0xc502, CRL_REG_LEN_08BIT, 0x58}, + {0xc503, CRL_REG_LEN_08BIT, 0x80}, + {0xc504, CRL_REG_LEN_08BIT, 0x05}, + {0xc505, CRL_REG_LEN_08BIT, 0x80}, + {0xc506, CRL_REG_LEN_08BIT, 0x03}, + {0xc507, CRL_REG_LEN_08BIT, 0x80}, + {0xc508, CRL_REG_LEN_08BIT, 0x01}, + {0xc509, CRL_REG_LEN_08BIT, 0xc0}, + {0xc50a, CRL_REG_LEN_08BIT, 0x01}, + {0xc50b, CRL_REG_LEN_08BIT, 0xa0}, + {0xc50c, CRL_REG_LEN_08BIT, 0x01}, + {0xc50d, CRL_REG_LEN_08BIT, 0x2c}, + {0xc50e, CRL_REG_LEN_08BIT, 0x01}, + {0xc50f, CRL_REG_LEN_08BIT, 0x0a}, + {0xc510, CRL_REG_LEN_08BIT, 0x00}, + {0xc511, CRL_REG_LEN_08BIT, 0x00}, + {0xc512, CRL_REG_LEN_08BIT, 0x4d}, + {0xc513, CRL_REG_LEN_08BIT, 0x84}, + {0xc514, CRL_REG_LEN_08BIT, 0x04}, + {0xc515, CRL_REG_LEN_08BIT, 0x00}, + {0xc518, CRL_REG_LEN_08BIT, 0x03}, + {0xc519, CRL_REG_LEN_08BIT, 0x48}, + {0xc51a, CRL_REG_LEN_08BIT, 0x07}, + {0xc51b, CRL_REG_LEN_08BIT, 0x70}, + {0xc2e0, CRL_REG_LEN_08BIT, 0x00}, + {0xc2e1, CRL_REG_LEN_08BIT, 0x51}, + {0xc2e2, CRL_REG_LEN_08BIT, 0x00}, + {0xc2e3, CRL_REG_LEN_08BIT, 0xd6}, + {0xc2e4, CRL_REG_LEN_08BIT, 0x01}, + {0xc2e5, CRL_REG_LEN_08BIT, 0x5e}, + {0xc2e9, CRL_REG_LEN_08BIT, 0x01}, + {0xc2ea, CRL_REG_LEN_08BIT, 0x7a}, + {0xc2eb, CRL_REG_LEN_08BIT, 0x90}, + {0xc2ed, CRL_REG_LEN_08BIT, 0x00}, + {0xc2ee, CRL_REG_LEN_08BIT, 0x7a}, + {0xc2ef, CRL_REG_LEN_08BIT, 0x64}, + {0xc308, CRL_REG_LEN_08BIT, 0x00}, + {0xc309, CRL_REG_LEN_08BIT, 0x00}, + {0xc30a, CRL_REG_LEN_08BIT, 0x00}, + {0xc30c, CRL_REG_LEN_08BIT, 0x00}, + {0xc30d, CRL_REG_LEN_08BIT, 0x01}, + {0xc30e, CRL_REG_LEN_08BIT, 0x00}, + {0xc30f, CRL_REG_LEN_08BIT, 0x00}, + {0xc310, CRL_REG_LEN_08BIT, 0x01}, + {0xc311, CRL_REG_LEN_08BIT, 0x60}, + {0xc312, CRL_REG_LEN_08BIT, 0xff}, + {0xc313, CRL_REG_LEN_08BIT, 0x08}, + {0xc314, CRL_REG_LEN_08BIT, 0x01}, + {0xc315, CRL_REG_LEN_08BIT, 0x7f}, + {0xc316, CRL_REG_LEN_08BIT, 0xff}, + {0xc317, CRL_REG_LEN_08BIT, 0x0b}, + {0xc318, CRL_REG_LEN_08BIT, 0x00}, + {0xc319, CRL_REG_LEN_08BIT, 0x0c}, + {0xc31a, CRL_REG_LEN_08BIT, 0x00}, + {0xc31b, CRL_REG_LEN_08BIT, 0xe0}, + {0xc31c, CRL_REG_LEN_08BIT, 0x00}, + {0xc31d, CRL_REG_LEN_08BIT, 0x14}, + {0xc31e, CRL_REG_LEN_08BIT, 0x00}, + {0xc31f, CRL_REG_LEN_08BIT, 0xc5}, + {0xc320, CRL_REG_LEN_08BIT, 0xff}, + {0xc321, CRL_REG_LEN_08BIT, 0x4b}, + {0xc322, CRL_REG_LEN_08BIT, 0xff}, + {0xc323, CRL_REG_LEN_08BIT, 0xf0}, + {0xc324, CRL_REG_LEN_08BIT, 0xff}, + {0xc325, CRL_REG_LEN_08BIT, 0xe8}, + {0xc326, CRL_REG_LEN_08BIT, 0x00}, + {0xc327, CRL_REG_LEN_08BIT, 0x46}, + {0xc328, CRL_REG_LEN_08BIT, 0xff}, + {0xc329, CRL_REG_LEN_08BIT, 0xd2}, + {0xc32a, CRL_REG_LEN_08BIT, 0xff}, + {0xc32b, CRL_REG_LEN_08BIT, 0xe4}, + {0xc32c, CRL_REG_LEN_08BIT, 0xff}, + {0xc32d, CRL_REG_LEN_08BIT, 0xbb}, + {0xc32e, CRL_REG_LEN_08BIT, 0x00}, + {0xc32f, CRL_REG_LEN_08BIT, 0x61}, + {0xc330, CRL_REG_LEN_08BIT, 0xff}, + {0xc331, CRL_REG_LEN_08BIT, 0xf9}, + {0xc332, CRL_REG_LEN_08BIT, 0x00}, + {0xc333, CRL_REG_LEN_08BIT, 0xd9}, + {0xc334, CRL_REG_LEN_08BIT, 0x00}, + {0xc335, CRL_REG_LEN_08BIT, 0x2e}, + {0xc336, CRL_REG_LEN_08BIT, 0x00}, + {0xc337, CRL_REG_LEN_08BIT, 0xb1}, + {0xc338, CRL_REG_LEN_08BIT, 0xff}, + {0xc339, CRL_REG_LEN_08BIT, 0x64}, + {0xc33a, CRL_REG_LEN_08BIT, 0xff}, + {0xc33b, CRL_REG_LEN_08BIT, 0xeb}, + {0xc33c, CRL_REG_LEN_08BIT, 0xff}, + {0xc33d, CRL_REG_LEN_08BIT, 0xe8}, + {0xc33e, CRL_REG_LEN_08BIT, 0x00}, + {0xc33f, CRL_REG_LEN_08BIT, 0x48}, + {0xc340, CRL_REG_LEN_08BIT, 0xff}, + {0xc341, CRL_REG_LEN_08BIT, 0xd0}, + {0xc342, CRL_REG_LEN_08BIT, 0xff}, + {0xc343, CRL_REG_LEN_08BIT, 0xed}, + {0xc344, CRL_REG_LEN_08BIT, 0xff}, + {0xc345, CRL_REG_LEN_08BIT, 0xad}, + {0xc346, CRL_REG_LEN_08BIT, 0x00}, + {0xc347, CRL_REG_LEN_08BIT, 0x66}, + {0xc348, CRL_REG_LEN_08BIT, 0x01}, + {0xc349, CRL_REG_LEN_08BIT, 0x00}, + {0x6700, CRL_REG_LEN_08BIT, 0x04}, + {0x6701, CRL_REG_LEN_08BIT, 0x7b}, + {0x6702, CRL_REG_LEN_08BIT, 0xfd}, + {0x6703, CRL_REG_LEN_08BIT, 0xf9}, + {0x6704, CRL_REG_LEN_08BIT, 0x3d}, + {0x6705, CRL_REG_LEN_08BIT, 0x71}, + {0x6706, CRL_REG_LEN_08BIT, 0x78}, + {0x6708, CRL_REG_LEN_08BIT, 0x05}, + {0x6f06, CRL_REG_LEN_08BIT, 0x6f}, + {0x6f07, CRL_REG_LEN_08BIT, 0x00}, + {0x6f0a, CRL_REG_LEN_08BIT, 0x6f}, + {0x6f0b, CRL_REG_LEN_08BIT, 0x00}, + {0x6f00, CRL_REG_LEN_08BIT, 0x03}, + {0xc34c, CRL_REG_LEN_08BIT, 0x01}, + {0xc34d, CRL_REG_LEN_08BIT, 0x00}, + {0xc34e, CRL_REG_LEN_08BIT, 0x46}, + {0xc34f, CRL_REG_LEN_08BIT, 0x55}, + {0xc350, CRL_REG_LEN_08BIT, 0x00}, + {0xc351, CRL_REG_LEN_08BIT, 0x40}, + {0xc352, CRL_REG_LEN_08BIT, 0x00}, + {0xc353, CRL_REG_LEN_08BIT, 0xff}, + {0xc354, CRL_REG_LEN_08BIT, 0x04}, + {0xc355, CRL_REG_LEN_08BIT, 0x08}, + {0xc356, CRL_REG_LEN_08BIT, 0x01}, + {0xc357, CRL_REG_LEN_08BIT, 0xef}, + {0xc358, CRL_REG_LEN_08BIT, 0x30}, + {0xc359, CRL_REG_LEN_08BIT, 0x01}, + {0xc35a, CRL_REG_LEN_08BIT, 0x64}, + {0xc35b, CRL_REG_LEN_08BIT, 0x46}, + {0xc35c, CRL_REG_LEN_08BIT, 0x00}, + {0x3621, CRL_REG_LEN_08BIT, 0x73}, + {0x3702, CRL_REG_LEN_08BIT, 0x20}, + {0x3703, CRL_REG_LEN_08BIT, 0x48}, + {0x3704, CRL_REG_LEN_08BIT, 0x32}, + {0x3800, CRL_REG_LEN_08BIT, 0x00}, + {0x3801, CRL_REG_LEN_08BIT, 0x00}, + {0x3802, CRL_REG_LEN_08BIT, 0x00}, + {0x3803, CRL_REG_LEN_08BIT, 0xA4}, + {0x3804, CRL_REG_LEN_08BIT, 0x00}, + {0x3805, CRL_REG_LEN_08BIT, 0xFF}, + {0x3806, CRL_REG_LEN_08BIT, 0x02}, + {0x3807, CRL_REG_LEN_08BIT, 0x89}, + {0x3808, CRL_REG_LEN_08BIT, 0x02}, + {0x3809, CRL_REG_LEN_08BIT, 0x80}, + {0x380a, CRL_REG_LEN_08BIT, 0x01}, + {0x380b, CRL_REG_LEN_08BIT, 0xE0}, + {0x380c, CRL_REG_LEN_08BIT, 0x04}, + {0x380d, CRL_REG_LEN_08BIT, 0xAC}, + {0x6e42, CRL_REG_LEN_08BIT, 0x05}, + {0x6e43, CRL_REG_LEN_08BIT, 0x3A}, + {0x3810, CRL_REG_LEN_08BIT, 0x00}, + {0x3811, CRL_REG_LEN_08BIT, 0x08}, + {0x3812, CRL_REG_LEN_08BIT, 0x00}, + {0x3813, CRL_REG_LEN_08BIT, 0x02}, + {0x381c, CRL_REG_LEN_08BIT, 0x00}, + {0x381e, CRL_REG_LEN_08BIT, 0x00}, + {0x381f, CRL_REG_LEN_08BIT, 0x0C}, + {0x4001, CRL_REG_LEN_08BIT, 0x06}, + {0x4004, CRL_REG_LEN_08BIT, 0x04}, + {0x4050, CRL_REG_LEN_08BIT, 0x22}, + {0x4051, CRL_REG_LEN_08BIT, 0x24}, + {0x4605, CRL_REG_LEN_08BIT, 0x08}, + {0x4606, CRL_REG_LEN_08BIT, 0x09}, + {0x4607, CRL_REG_LEN_08BIT, 0x58}, + {0xc488, CRL_REG_LEN_08BIT, 0x53}, + {0xc489, CRL_REG_LEN_08BIT, 0x20}, + {0xc48a, CRL_REG_LEN_08BIT, 0x53}, + {0xc48b, CRL_REG_LEN_08BIT, 0x20}, + {0xc4cc, CRL_REG_LEN_08BIT, 0x04}, + {0xc4cd, CRL_REG_LEN_08BIT, 0xD8}, + {0xc4ce, CRL_REG_LEN_08BIT, 0x04}, + {0xc4cf, CRL_REG_LEN_08BIT, 0xD8}, + {0xc510, CRL_REG_LEN_08BIT, 0x00}, + {0xc511, CRL_REG_LEN_08BIT, 0x00}, + {0xc512, CRL_REG_LEN_08BIT, 0x4D}, + {0xc513, CRL_REG_LEN_08BIT, 0x84}, + {0x5005, CRL_REG_LEN_08BIT, 0x08}, + {0x3007, CRL_REG_LEN_08BIT, 0x01}, + {0xc518, CRL_REG_LEN_08BIT, 0x05}, + {0xc519, CRL_REG_LEN_08BIT, 0x3A}, + {0xc51a, CRL_REG_LEN_08BIT, 0x04}, + {0xc51b, CRL_REG_LEN_08BIT, 0xAC}, + {0x5608, CRL_REG_LEN_08BIT, 0x15}, + {0x3815, CRL_REG_LEN_08BIT, 0x8C}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x302e, CRL_REG_LEN_08BIT, 0x00}, + {0x301b, CRL_REG_LEN_08BIT, 0xf0}, + {0x301c, CRL_REG_LEN_08BIT, 0xf0}, + {0x301a, CRL_REG_LEN_08BIT, 0xf0}, + {0xceb0, CRL_REG_LEN_08BIT, 0x00}, + {0xceb1, CRL_REG_LEN_08BIT, 0x00}, + {0xceb2, CRL_REG_LEN_08BIT, 0x00}, + {0xceb3, CRL_REG_LEN_08BIT, 0x00}, + {0xceb4, CRL_REG_LEN_08BIT, 0x00}, + {0xceb5, CRL_REG_LEN_08BIT, 0x00}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0xceb6, CRL_REG_LEN_08BIT, 0x00}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0xceb7, CRL_REG_LEN_08BIT, 0x00}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0xc4bc, CRL_REG_LEN_08BIT, 0x01}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0xc4bd, CRL_REG_LEN_08BIT, 0x60}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, +}; + +static struct crl_dynamic_register_access ov10635_h_flip_regs[] = { + { + .address = 0x381d, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = 0, + .ops = 0, + .mask = 0x3, + } +}; + +static struct crl_dynamic_register_access ov10635_v_flip_regs[] = { + { + .address = 0x381c, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = 0, + .ops = 0, + .mask = 0xc0, + } +}; + +/* Needed for acpi support for runtime detection */ +static struct crl_sensor_detect_config ov10635_sensor_detect_regset[] = { + { + .reg = { 0x300A, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 8, + }, + { + .reg = { 0x300B, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 8, + } +}; + +static struct crl_pll_configuration ov10635_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 16, + .pixel_rate_csi = 529000000, + .pixel_rate_pa = 529000000, /* pixel_rate = MIPICLK*2 *4/12 */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 10, + .pixel_rate_csi = 529000000, + .pixel_rate_pa = 529000000, /* pixel_rate = MIPICLK*2 *4/12 */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 20, + .pixel_rate_csi = 529000000, + .pixel_rate_pa = 529000000, /* pixel_rate = MIPICLK*2 *4/12 */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + } +}; + +static struct crl_subdev_rect_rep ov10635_1280_800_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 800, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 800, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 800, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 800, + }, +}; + +static struct crl_subdev_rect_rep ov10635_1280_720_rects_BT656[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 720, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 720, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, +}; + +static struct crl_subdev_rect_rep ov10635_640_480_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 800, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 800, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 800, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 640, + .out_rect.height = 480, + }, +}; + +static struct crl_register_write_rep ov10635_powerup_regs[] = { + {OV10635_REG_RESET, CRL_REG_LEN_08BIT, 0x01}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, +}; + +static struct crl_register_write_rep ov10635_poweroff_regs[] = { + {OV10635_REG_RESET, CRL_REG_LEN_08BIT, 0x01}, +}; + +static struct crl_power_seq_entity ov10635_power_items[] = { + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + .undo_val = 0, + }, +}; + +static struct crl_mode_rep ov10635_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(ov10635_1280_800_rects), + .sd_rects = ov10635_1280_800_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 800, + .min_llp = 2250, + .min_fll = 1320, + .mode_regs_items = ARRAY_SIZE(ov10635_1280_800_YUV_HDR), + .mode_regs = ov10635_1280_800_YUV_HDR, + }, + { + .sd_rects_items = ARRAY_SIZE(ov10635_1280_720_rects_BT656), + .sd_rects = ov10635_1280_720_rects_BT656, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 720, + .min_llp = 2250, + .min_fll = 1320, + .mode_regs_items = ARRAY_SIZE(ov10635_1280_720_YUV_HDR_BT656), + .mode_regs = ov10635_1280_720_YUV_HDR_BT656, + }, + { + .sd_rects_items = ARRAY_SIZE(ov10635_640_480_rects), + .sd_rects = ov10635_640_480_rects, + .binn_hor = 2, + .binn_vert = 1, + .scale_m = 1, + .width = 640, + .height = 480, + .min_llp = 2250, + .min_fll = 1320, + .mode_regs_items = ARRAY_SIZE(ov10635_640_480_YUV_HDR), + .mode_regs = ov10635_640_480_YUV_HDR, + }, +}; + +static struct crl_sensor_subdev_config ov10635_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "ov10635 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "ov10635 pixel array", + } +}; + +static struct crl_sensor_limits ov10635_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1280, + .y_addr_max = 800, + .min_frame_length_lines = 240, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 320, + .max_line_length_pixels = 32752, +}; + +static struct crl_flip_data ov10635_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + } +}; + +static struct crl_register_write_rep ov10635_yuyv_regs[] = { + {0x4300, CRL_REG_LEN_08BIT, 0x38}, +}; + +static struct crl_register_write_rep ov10635_uyvy_regs[] = { + {0x4300, CRL_REG_LEN_08BIT, 0x3a}, +}; + +static struct crl_csi_data_fmt ov10635_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_YUYV8_1X16, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + .bits_per_pixel = 16, + .regs_items = ARRAY_SIZE(ov10635_yuyv_regs), + .regs = ov10635_yuyv_regs, + }, + { + .code = MEDIA_BUS_FMT_UYVY8_1X16, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + .bits_per_pixel = 16, + .regs_items = ARRAY_SIZE(ov10635_uyvy_regs), + .regs = ov10635_uyvy_regs, + }, +}; + +static struct crl_v4l2_ctrl ov10635_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10635_h_flip_regs), + .regs = ov10635_h_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .name = "V4L2_CID_VFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10635_v_flip_regs), + .regs = ov10635_v_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +struct crl_sensor_configuration ov10635_crl_configuration = { + + .powerup_regs_items = ARRAY_SIZE(ov10635_powerup_regs), + .powerup_regs = ov10635_powerup_regs, + + .poweroff_regs_items = ARRAY_SIZE(ov10635_poweroff_regs), + .poweroff_regs = ov10635_poweroff_regs, + + .power_items = ARRAY_SIZE(ov10635_power_items), + .power_entities = ov10635_power_items, + + .id_reg_items = ARRAY_SIZE(ov10635_sensor_detect_regset), + .id_regs = ov10635_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(ov10635_sensor_subdevs), + .subdevs = ov10635_sensor_subdevs, + + .pll_config_items = ARRAY_SIZE(ov10635_pll_configurations), + .pll_configs = ov10635_pll_configurations, + + .sensor_limits = &ov10635_sensor_limits, + + .modes_items = ARRAY_SIZE(ov10635_modes), + .modes = ov10635_modes, + + .streamon_regs_items = 0, + .streamon_regs = 0, + + .streamoff_regs_items = 0, + .streamoff_regs = 0, + + .v4l2_ctrls_items = ARRAY_SIZE(ov10635_v4l2_ctrls), + .v4l2_ctrl_bank = ov10635_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(ov10635_crl_csi_data_fmt), + .csi_fmts = ov10635_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(ov10635_flip_configurations), + .flip_data = ov10635_flip_configurations, +}; + +#endif /* __CRLMODULE_OV10635_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_ov10640_configuration.h b/drivers/media/i2c/crlmodule/crl_ov10640_configuration.h new file mode 100644 index 0000000000000..ab8378bc09883 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_ov10640_configuration.h @@ -0,0 +1,3235 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2017 - 2018 Intel Corporation + * + * Author: Shuguang Gong + * + */ + +#ifndef __CRLMODULE_OV10640_CONFIGURATION_H_ +#define __CRLMODULE_OV10640_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +#define OV10640_REG_STREAM 0x3012 +#define OV10640_REG_RESET 0x3013 + +#define OV10640_HMAX 65535 +#define OV10640_VMAX 65535 +#define OV10640_MAX_SHS1 (OV10640_VMAX - 6) +#define OV10640_MAX_SHS3 0x7F +#define OV10640_MAX_DGAIN 0x3FFF + +/* 800Mbps for ov10640 1280x1080 30fps */ +static struct crl_register_write_rep ov10640_pll_800mbps[] = { + {0x3000, CRL_REG_LEN_08BIT, 0x03}, + {0x3001, CRL_REG_LEN_08BIT, 0x48}, + {0x3002, CRL_REG_LEN_08BIT, 0x07}, + {0x3004, CRL_REG_LEN_08BIT, 0x03}, + {0x3005, CRL_REG_LEN_08BIT, 0x48}, + {0x3006, CRL_REG_LEN_08BIT, 0x07}, + {0x3007, CRL_REG_LEN_08BIT, 0x01}, +}; + +static struct crl_register_write_rep ov10640_powerup_standby[] = { + {OV10640_REG_RESET, CRL_REG_LEN_08BIT, 0x01}, +}; + +static struct crl_power_seq_entity ov10640_power_items[] = { + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + .undo_val = 0, + }, +}; + +static struct crl_register_write_rep ov10640_1280_1080_LONG_RAW[] = { + {0x328a, CRL_REG_LEN_08BIT, 0x11}, + {0x313f, CRL_REG_LEN_08BIT, 0x80}, + {0x3132, CRL_REG_LEN_08BIT, 0x24}, + {0x3014, CRL_REG_LEN_08BIT, 0x03}, + {0x3023, CRL_REG_LEN_08BIT, 0x05}, + {0x3032, CRL_REG_LEN_08BIT, 0x35}, + {0x3033, CRL_REG_LEN_08BIT, 0x04}, + {0x3054, CRL_REG_LEN_08BIT, 0x00}, + {0x3055, CRL_REG_LEN_08BIT, 0x08}, + {0x3056, CRL_REG_LEN_08BIT, 0x01}, + {0x3057, CRL_REG_LEN_08BIT, 0xff}, + {0x3058, CRL_REG_LEN_08BIT, 0xaf}, + {0x3059, CRL_REG_LEN_08BIT, 0x44}, + {0x305a, CRL_REG_LEN_08BIT, 0x02}, + {0x305b, CRL_REG_LEN_08BIT, 0x00}, + {0x305c, CRL_REG_LEN_08BIT, 0x30}, + {0x305d, CRL_REG_LEN_08BIT, 0x9e}, + {0x305e, CRL_REG_LEN_08BIT, 0x19}, + {0x305f, CRL_REG_LEN_08BIT, 0x18}, + {0x3060, CRL_REG_LEN_08BIT, 0xf9}, + {0x3061, CRL_REG_LEN_08BIT, 0xf0}, + {0x308c, CRL_REG_LEN_08BIT, 0xB3}, + {0x308f, CRL_REG_LEN_08BIT, 0x10}, + {0x3091, CRL_REG_LEN_08BIT, 0x00}, + {0x3093, CRL_REG_LEN_08BIT, 0x01}, + {0x30a3, CRL_REG_LEN_08BIT, 0x08}, + {0x30ad, CRL_REG_LEN_08BIT, 0x03}, + {0x30ae, CRL_REG_LEN_08BIT, 0x80}, + {0x30af, CRL_REG_LEN_08BIT, 0x80}, + {0x30b0, CRL_REG_LEN_08BIT, 0xff}, + {0x30b1, CRL_REG_LEN_08BIT, 0x3f}, + {0x30b2, CRL_REG_LEN_08BIT, 0x22}, + {0x30b9, CRL_REG_LEN_08BIT, 0x22}, + {0x30bb, CRL_REG_LEN_08BIT, 0x00}, + {0x30bc, CRL_REG_LEN_08BIT, 0x00}, + {0x30bd, CRL_REG_LEN_08BIT, 0x00}, + {0x30be, CRL_REG_LEN_08BIT, 0x00}, + {0x30bf, CRL_REG_LEN_08BIT, 0x00}, + {0x30c0, CRL_REG_LEN_08BIT, 0x00}, + {0x30c1, CRL_REG_LEN_08BIT, 0x00}, + {0x30c2, CRL_REG_LEN_08BIT, 0x00}, + {0x30c3, CRL_REG_LEN_08BIT, 0x00}, + {0x30c4, CRL_REG_LEN_08BIT, 0x80}, + {0x30c5, CRL_REG_LEN_08BIT, 0x00}, + {0x30c6, CRL_REG_LEN_08BIT, 0x80}, + {0x30c7, CRL_REG_LEN_08BIT, 0x00}, + {0x30c8, CRL_REG_LEN_08BIT, 0x80}, + {0x3119, CRL_REG_LEN_08BIT, 0x45}, + {0x311a, CRL_REG_LEN_08BIT, 0x01}, + {0x311b, CRL_REG_LEN_08BIT, 0x4a}, + {0x3074, CRL_REG_LEN_08BIT, 0x00}, + {0x3075, CRL_REG_LEN_08BIT, 0x00}, + {0x3076, CRL_REG_LEN_08BIT, 0x00}, + {0x3077, CRL_REG_LEN_08BIT, 0x02}, + {0x3078, CRL_REG_LEN_08BIT, 0x05}, + {0x3079, CRL_REG_LEN_08BIT, 0x07}, + {0x307a, CRL_REG_LEN_08BIT, 0x04}, + {0x307b, CRL_REG_LEN_08BIT, 0x41}, + {0x307c, CRL_REG_LEN_08BIT, 0x05}, + {0x307d, CRL_REG_LEN_08BIT, 0x00}, + {0x307e, CRL_REG_LEN_08BIT, 0x04}, + {0x307f, CRL_REG_LEN_08BIT, 0x38}, + {0x3084, CRL_REG_LEN_08BIT, 0x00}, + {0x3085, CRL_REG_LEN_08BIT, 0x04}, + {0x3086, CRL_REG_LEN_08BIT, 0x00}, + {0x3087, CRL_REG_LEN_08BIT, 0x04}, + {0x3088, CRL_REG_LEN_08BIT, 0x00}, + {0x3089, CRL_REG_LEN_08BIT, 0x40}, + {0x308d, CRL_REG_LEN_08BIT, 0x92}, + {0x3094, CRL_REG_LEN_08BIT, 0xa5}, + {0x30fa, CRL_REG_LEN_08BIT, 0x06}, + {0x3120, CRL_REG_LEN_08BIT, 0x00}, + {0x3121, CRL_REG_LEN_08BIT, 0x01}, + {0x3122, CRL_REG_LEN_08BIT, 0x00}, + {0x3127, CRL_REG_LEN_08BIT, 0x63}, + {0x3128, CRL_REG_LEN_08BIT, 0xc0}, + {0x3129, CRL_REG_LEN_08BIT, 0x00}, + {0x31be, CRL_REG_LEN_08BIT, 0x01}, + {0x30a5, CRL_REG_LEN_08BIT, 0x78}, + {0x30a6, CRL_REG_LEN_08BIT, 0x40}, + {0x30a7, CRL_REG_LEN_08BIT, 0x78}, + {0x30a8, CRL_REG_LEN_08BIT, 0x80}, + {0x30a9, CRL_REG_LEN_08BIT, 0x78}, + {0x30aa, CRL_REG_LEN_08BIT, 0xe0}, + {0x30ab, CRL_REG_LEN_08BIT, 0xf9}, + {0x30ac, CRL_REG_LEN_08BIT, 0xc0}, + {0x3440, CRL_REG_LEN_08BIT, 0x04}, + {0x3444, CRL_REG_LEN_08BIT, 0x28}, + {0x344e, CRL_REG_LEN_08BIT, 0x2c}, + {0x3457, CRL_REG_LEN_08BIT, 0x33}, + {0x345e, CRL_REG_LEN_08BIT, 0x38}, + {0x3461, CRL_REG_LEN_08BIT, 0xa8}, + {0x7002, CRL_REG_LEN_08BIT, 0xaa}, + {0x7001, CRL_REG_LEN_08BIT, 0xdf}, + {0x7048, CRL_REG_LEN_08BIT, 0x00}, + {0x7049, CRL_REG_LEN_08BIT, 0x02}, + {0x704a, CRL_REG_LEN_08BIT, 0x02}, + {0x704b, CRL_REG_LEN_08BIT, 0x00}, + {0x704c, CRL_REG_LEN_08BIT, 0x01}, + {0x704d, CRL_REG_LEN_08BIT, 0x00}, + {0x7043, CRL_REG_LEN_08BIT, 0x04}, + {0x7040, CRL_REG_LEN_08BIT, 0x3c}, + {0x7047, CRL_REG_LEN_08BIT, 0x00}, + {0x7044, CRL_REG_LEN_08BIT, 0x01}, + {0x7000, CRL_REG_LEN_08BIT, 0x1f}, + {0x7084, CRL_REG_LEN_08BIT, 0x01}, + {0x7085, CRL_REG_LEN_08BIT, 0x03}, + {0x7086, CRL_REG_LEN_08BIT, 0x02}, + {0x7087, CRL_REG_LEN_08BIT, 0x40}, + {0x7088, CRL_REG_LEN_08BIT, 0x01}, + {0x7089, CRL_REG_LEN_08BIT, 0x20}, + {0x707f, CRL_REG_LEN_08BIT, 0x04}, + {0x707c, CRL_REG_LEN_08BIT, 0x3c}, + {0x7083, CRL_REG_LEN_08BIT, 0x00}, + {0x7080, CRL_REG_LEN_08BIT, 0x01}, + {0x7003, CRL_REG_LEN_08BIT, 0xdf}, + {0x70c0, CRL_REG_LEN_08BIT, 0x00}, + {0x70c1, CRL_REG_LEN_08BIT, 0x02}, + {0x70c2, CRL_REG_LEN_08BIT, 0x02}, + {0x70c3, CRL_REG_LEN_08BIT, 0x00}, + {0x70c4, CRL_REG_LEN_08BIT, 0x01}, + {0x70c5, CRL_REG_LEN_08BIT, 0x00}, + {0x70b8, CRL_REG_LEN_08BIT, 0x03}, + {0x70b9, CRL_REG_LEN_08BIT, 0x98}, + {0x70bc, CRL_REG_LEN_08BIT, 0x00}, + {0x70bd, CRL_REG_LEN_08BIT, 0x80}, + {0x7004, CRL_REG_LEN_08BIT, 0x02}, + {0x7005, CRL_REG_LEN_08BIT, 0x00}, + {0x7006, CRL_REG_LEN_08BIT, 0x01}, + {0x7007, CRL_REG_LEN_08BIT, 0x80}, + {0x7008, CRL_REG_LEN_08BIT, 0x02}, + {0x7009, CRL_REG_LEN_08BIT, 0x00}, + {0x700a, CRL_REG_LEN_08BIT, 0x04}, + {0x700b, CRL_REG_LEN_08BIT, 0x00}, + {0x700e, CRL_REG_LEN_08BIT, 0x00}, + {0x700f, CRL_REG_LEN_08BIT, 0x60}, + {0x701a, CRL_REG_LEN_08BIT, 0x02}, + {0x701b, CRL_REG_LEN_08BIT, 0x00}, + {0x701c, CRL_REG_LEN_08BIT, 0x01}, + {0x701d, CRL_REG_LEN_08BIT, 0x80}, + {0x701e, CRL_REG_LEN_08BIT, 0x02}, + {0x701f, CRL_REG_LEN_08BIT, 0x00}, + {0x7020, CRL_REG_LEN_08BIT, 0x04}, + {0x7021, CRL_REG_LEN_08BIT, 0x00}, + {0x7024, CRL_REG_LEN_08BIT, 0x00}, + {0x7025, CRL_REG_LEN_08BIT, 0x60}, + {0x70e7, CRL_REG_LEN_08BIT, 0x00}, + {0x70e4, CRL_REG_LEN_08BIT, 0x10}, + {0x70e5, CRL_REG_LEN_08BIT, 0x00}, + {0x70e6, CRL_REG_LEN_08BIT, 0x00}, + {0x70eb, CRL_REG_LEN_08BIT, 0x00}, + {0x70e8, CRL_REG_LEN_08BIT, 0x10}, + {0x70e9, CRL_REG_LEN_08BIT, 0x00}, + {0x70ea, CRL_REG_LEN_08BIT, 0x00}, + {0x70ef, CRL_REG_LEN_08BIT, 0x00}, + {0x70ec, CRL_REG_LEN_08BIT, 0xfd}, + {0x70ed, CRL_REG_LEN_08BIT, 0x00}, + {0x70ee, CRL_REG_LEN_08BIT, 0x00}, + {0x70eb, CRL_REG_LEN_08BIT, 0x00}, + {0x70f0, CRL_REG_LEN_08BIT, 0xfd}, + {0x70f1, CRL_REG_LEN_08BIT, 0x00}, + {0x70f2, CRL_REG_LEN_08BIT, 0x00}, + {0x30fb, CRL_REG_LEN_08BIT, 0x06}, + {0x30fc, CRL_REG_LEN_08BIT, 0x80}, + {0x30fd, CRL_REG_LEN_08BIT, 0x02}, + {0x30fe, CRL_REG_LEN_08BIT, 0x93}, + {0x6000, CRL_REG_LEN_08BIT, 0xc1}, + {0x6001, CRL_REG_LEN_08BIT, 0xb9}, + {0x6002, CRL_REG_LEN_08BIT, 0xba}, + {0x6003, CRL_REG_LEN_08BIT, 0xa4}, + {0x6004, CRL_REG_LEN_08BIT, 0xb5}, + {0x6005, CRL_REG_LEN_08BIT, 0xa0}, + {0x6006, CRL_REG_LEN_08BIT, 0x82}, + {0x6007, CRL_REG_LEN_08BIT, 0xa7}, + {0x6008, CRL_REG_LEN_08BIT, 0xb7}, + {0x6009, CRL_REG_LEN_08BIT, 0x5c}, + {0x600a, CRL_REG_LEN_08BIT, 0x9e}, + {0x600b, CRL_REG_LEN_08BIT, 0xc0}, + {0x600c, CRL_REG_LEN_08BIT, 0xd2}, + {0x600d, CRL_REG_LEN_08BIT, 0x33}, + {0x600e, CRL_REG_LEN_08BIT, 0xcc}, + {0x600f, CRL_REG_LEN_08BIT, 0xe2}, + {0x6010, CRL_REG_LEN_08BIT, 0xc1}, + {0x6011, CRL_REG_LEN_08BIT, 0xab}, + {0x6012, CRL_REG_LEN_08BIT, 0xb7}, + {0x6013, CRL_REG_LEN_08BIT, 0x00}, + {0x6014, CRL_REG_LEN_08BIT, 0x00}, + {0x6015, CRL_REG_LEN_08BIT, 0x00}, + {0x6016, CRL_REG_LEN_08BIT, 0x00}, + {0x6017, CRL_REG_LEN_08BIT, 0x00}, + {0x6018, CRL_REG_LEN_08BIT, 0x00}, + {0x6019, CRL_REG_LEN_08BIT, 0x00}, + {0x601a, CRL_REG_LEN_08BIT, 0x00}, + {0x601b, CRL_REG_LEN_08BIT, 0x00}, + {0x601c, CRL_REG_LEN_08BIT, 0x00}, + {0x601d, CRL_REG_LEN_08BIT, 0x00}, + {0x601e, CRL_REG_LEN_08BIT, 0x9c}, + {0x601f, CRL_REG_LEN_08BIT, 0x94}, + {0x6020, CRL_REG_LEN_08BIT, 0x90}, + {0x6021, CRL_REG_LEN_08BIT, 0xc5}, + {0x6022, CRL_REG_LEN_08BIT, 0x01}, + {0x6023, CRL_REG_LEN_08BIT, 0x54}, + {0x6024, CRL_REG_LEN_08BIT, 0x2a}, + {0x6025, CRL_REG_LEN_08BIT, 0x61}, + {0x6026, CRL_REG_LEN_08BIT, 0xd2}, + {0x6027, CRL_REG_LEN_08BIT, 0xcc}, + {0x6028, CRL_REG_LEN_08BIT, 0x04}, + {0x6029, CRL_REG_LEN_08BIT, 0x35}, + {0x602a, CRL_REG_LEN_08BIT, 0xb1}, + {0x602b, CRL_REG_LEN_08BIT, 0xb2}, + {0x602c, CRL_REG_LEN_08BIT, 0xb3}, + {0x602d, CRL_REG_LEN_08BIT, 0xd2}, + {0x602e, CRL_REG_LEN_08BIT, 0xd3}, + {0x602f, CRL_REG_LEN_08BIT, 0x12}, + {0x6030, CRL_REG_LEN_08BIT, 0x31}, + {0x6031, CRL_REG_LEN_08BIT, 0xcc}, + {0x6032, CRL_REG_LEN_08BIT, 0x06}, + {0x6033, CRL_REG_LEN_08BIT, 0xd2}, + {0x6034, CRL_REG_LEN_08BIT, 0xc4}, + {0x6035, CRL_REG_LEN_08BIT, 0xce}, + {0x6036, CRL_REG_LEN_08BIT, 0x18}, + {0x6037, CRL_REG_LEN_08BIT, 0xcf}, + {0x6038, CRL_REG_LEN_08BIT, 0x1e}, + {0x6039, CRL_REG_LEN_08BIT, 0xd0}, + {0x603a, CRL_REG_LEN_08BIT, 0x24}, + {0x603b, CRL_REG_LEN_08BIT, 0xc5}, + {0x603c, CRL_REG_LEN_08BIT, 0xd2}, + {0x603d, CRL_REG_LEN_08BIT, 0xbc}, + {0x603e, CRL_REG_LEN_08BIT, 0xcc}, + {0x603f, CRL_REG_LEN_08BIT, 0x52}, + {0x6040, CRL_REG_LEN_08BIT, 0x2b}, + {0x6041, CRL_REG_LEN_08BIT, 0xd2}, + {0x6042, CRL_REG_LEN_08BIT, 0xd3}, + {0x6043, CRL_REG_LEN_08BIT, 0x02}, + {0x6044, CRL_REG_LEN_08BIT, 0xcc}, + {0x6045, CRL_REG_LEN_08BIT, 0x0a}, + {0x6046, CRL_REG_LEN_08BIT, 0xd2}, + {0x6047, CRL_REG_LEN_08BIT, 0xd3}, + {0x6048, CRL_REG_LEN_08BIT, 0x0f}, + {0x6049, CRL_REG_LEN_08BIT, 0x1a}, + {0x604a, CRL_REG_LEN_08BIT, 0x2a}, + {0x604b, CRL_REG_LEN_08BIT, 0xd4}, + {0x604c, CRL_REG_LEN_08BIT, 0xf6}, + {0x604d, CRL_REG_LEN_08BIT, 0xba}, + {0x604e, CRL_REG_LEN_08BIT, 0x56}, + {0x604f, CRL_REG_LEN_08BIT, 0xd3}, + {0x6050, CRL_REG_LEN_08BIT, 0x2e}, + {0x6051, CRL_REG_LEN_08BIT, 0x54}, + {0x6052, CRL_REG_LEN_08BIT, 0x26}, + {0x6053, CRL_REG_LEN_08BIT, 0xd2}, + {0x6054, CRL_REG_LEN_08BIT, 0xcc}, + {0x6055, CRL_REG_LEN_08BIT, 0x60}, + {0x6056, CRL_REG_LEN_08BIT, 0xd2}, + {0x6057, CRL_REG_LEN_08BIT, 0xd3}, + {0x6058, CRL_REG_LEN_08BIT, 0x27}, + {0x6059, CRL_REG_LEN_08BIT, 0x27}, + {0x605a, CRL_REG_LEN_08BIT, 0x08}, + {0x605b, CRL_REG_LEN_08BIT, 0x1a}, + {0x605c, CRL_REG_LEN_08BIT, 0xcc}, + {0x605d, CRL_REG_LEN_08BIT, 0x88}, + {0x605e, CRL_REG_LEN_08BIT, 0x00}, + {0x605f, CRL_REG_LEN_08BIT, 0x12}, + {0x6060, CRL_REG_LEN_08BIT, 0x2c}, + {0x6061, CRL_REG_LEN_08BIT, 0x60}, + {0x6062, CRL_REG_LEN_08BIT, 0xc2}, + {0x6063, CRL_REG_LEN_08BIT, 0xb9}, + {0x6064, CRL_REG_LEN_08BIT, 0xa5}, + {0x6065, CRL_REG_LEN_08BIT, 0xb5}, + {0x6066, CRL_REG_LEN_08BIT, 0xa0}, + {0x6067, CRL_REG_LEN_08BIT, 0x82}, + {0x6068, CRL_REG_LEN_08BIT, 0x5c}, + {0x6069, CRL_REG_LEN_08BIT, 0xd4}, + {0x606a, CRL_REG_LEN_08BIT, 0xbe}, + {0x606b, CRL_REG_LEN_08BIT, 0xd4}, + {0x606c, CRL_REG_LEN_08BIT, 0xbe}, + {0x606d, CRL_REG_LEN_08BIT, 0xd3}, + {0x606e, CRL_REG_LEN_08BIT, 0x01}, + {0x606f, CRL_REG_LEN_08BIT, 0x7c}, + {0x6070, CRL_REG_LEN_08BIT, 0x74}, + {0x6071, CRL_REG_LEN_08BIT, 0x00}, + {0x6072, CRL_REG_LEN_08BIT, 0x61}, + {0x6073, CRL_REG_LEN_08BIT, 0x2a}, + {0x6074, CRL_REG_LEN_08BIT, 0xd2}, + {0x6075, CRL_REG_LEN_08BIT, 0xcc}, + {0x6076, CRL_REG_LEN_08BIT, 0xdf}, + {0x6077, CRL_REG_LEN_08BIT, 0xc6}, + {0x6078, CRL_REG_LEN_08BIT, 0x35}, + {0x6079, CRL_REG_LEN_08BIT, 0xd2}, + {0x607a, CRL_REG_LEN_08BIT, 0xcc}, + {0x607b, CRL_REG_LEN_08BIT, 0x06}, + {0x607c, CRL_REG_LEN_08BIT, 0x31}, + {0x607d, CRL_REG_LEN_08BIT, 0xd2}, + {0x607e, CRL_REG_LEN_08BIT, 0xc5}, + {0x607f, CRL_REG_LEN_08BIT, 0xbb}, + {0x6080, CRL_REG_LEN_08BIT, 0xcc}, + {0x6081, CRL_REG_LEN_08BIT, 0x18}, + {0x6082, CRL_REG_LEN_08BIT, 0xc6}, + {0x6083, CRL_REG_LEN_08BIT, 0xd2}, + {0x6084, CRL_REG_LEN_08BIT, 0xbd}, + {0x6085, CRL_REG_LEN_08BIT, 0xcc}, + {0x6086, CRL_REG_LEN_08BIT, 0x52}, + {0x6087, CRL_REG_LEN_08BIT, 0x2b}, + {0x6088, CRL_REG_LEN_08BIT, 0xd2}, + {0x6089, CRL_REG_LEN_08BIT, 0xd3}, + {0x608a, CRL_REG_LEN_08BIT, 0x01}, + {0x608b, CRL_REG_LEN_08BIT, 0xcc}, + {0x608c, CRL_REG_LEN_08BIT, 0x0a}, + {0x608d, CRL_REG_LEN_08BIT, 0xd2}, + {0x608e, CRL_REG_LEN_08BIT, 0xd3}, + {0x608f, CRL_REG_LEN_08BIT, 0x0f}, + {0x6090, CRL_REG_LEN_08BIT, 0x1a}, + {0x6091, CRL_REG_LEN_08BIT, 0x71}, + {0x6092, CRL_REG_LEN_08BIT, 0x2a}, + {0x6093, CRL_REG_LEN_08BIT, 0xd4}, + {0x6094, CRL_REG_LEN_08BIT, 0xf6}, + {0x6095, CRL_REG_LEN_08BIT, 0xd3}, + {0x6096, CRL_REG_LEN_08BIT, 0x22}, + {0x6097, CRL_REG_LEN_08BIT, 0x70}, + {0x6098, CRL_REG_LEN_08BIT, 0xca}, + {0x6099, CRL_REG_LEN_08BIT, 0x26}, + {0x609a, CRL_REG_LEN_08BIT, 0xd2}, + {0x609b, CRL_REG_LEN_08BIT, 0xcc}, + {0x609c, CRL_REG_LEN_08BIT, 0x60}, + {0x609d, CRL_REG_LEN_08BIT, 0xd2}, + {0x609e, CRL_REG_LEN_08BIT, 0xd3}, + {0x609f, CRL_REG_LEN_08BIT, 0x27}, + {0x60a0, CRL_REG_LEN_08BIT, 0x27}, + {0x60a1, CRL_REG_LEN_08BIT, 0x08}, + {0x60a2, CRL_REG_LEN_08BIT, 0x1a}, + {0x60a3, CRL_REG_LEN_08BIT, 0xcc}, + {0x60a4, CRL_REG_LEN_08BIT, 0x88}, + {0x60a5, CRL_REG_LEN_08BIT, 0x12}, + {0x60a6, CRL_REG_LEN_08BIT, 0x2c}, + {0x60a7, CRL_REG_LEN_08BIT, 0x60}, + {0x60a8, CRL_REG_LEN_08BIT, 0x00}, + {0x60a9, CRL_REG_LEN_08BIT, 0x00}, + {0x60aa, CRL_REG_LEN_08BIT, 0xc0}, + {0x60ab, CRL_REG_LEN_08BIT, 0xb9}, + {0x60ac, CRL_REG_LEN_08BIT, 0xa3}, + {0x60ad, CRL_REG_LEN_08BIT, 0xb5}, + {0x60ae, CRL_REG_LEN_08BIT, 0x00}, + {0x60af, CRL_REG_LEN_08BIT, 0xa0}, + {0x60b0, CRL_REG_LEN_08BIT, 0x82}, + {0x60b1, CRL_REG_LEN_08BIT, 0x5c}, + {0x60b2, CRL_REG_LEN_08BIT, 0xd4}, + {0x60b3, CRL_REG_LEN_08BIT, 0xa0}, + {0x60b4, CRL_REG_LEN_08BIT, 0x9d}, + {0x60b5, CRL_REG_LEN_08BIT, 0xd3}, + {0x60b6, CRL_REG_LEN_08BIT, 0x26}, + {0x60b7, CRL_REG_LEN_08BIT, 0xb0}, + {0x60b8, CRL_REG_LEN_08BIT, 0xb7}, + {0x60b9, CRL_REG_LEN_08BIT, 0x00}, + {0x60ba, CRL_REG_LEN_08BIT, 0xd3}, + {0x60bb, CRL_REG_LEN_08BIT, 0x0a}, + {0x60bc, CRL_REG_LEN_08BIT, 0xd3}, + {0x60bd, CRL_REG_LEN_08BIT, 0x10}, + {0x60be, CRL_REG_LEN_08BIT, 0x9c}, + {0x60bf, CRL_REG_LEN_08BIT, 0x94}, + {0x60c0, CRL_REG_LEN_08BIT, 0x90}, + {0x60c1, CRL_REG_LEN_08BIT, 0xc8}, + {0x60c2, CRL_REG_LEN_08BIT, 0xba}, + {0x60c3, CRL_REG_LEN_08BIT, 0x7c}, + {0x60c4, CRL_REG_LEN_08BIT, 0x74}, + {0x60c5, CRL_REG_LEN_08BIT, 0x00}, + {0x60c6, CRL_REG_LEN_08BIT, 0x61}, + {0x60c7, CRL_REG_LEN_08BIT, 0x2a}, + {0x60c8, CRL_REG_LEN_08BIT, 0x00}, + {0x60c9, CRL_REG_LEN_08BIT, 0xd2}, + {0x60ca, CRL_REG_LEN_08BIT, 0xcc}, + {0x60cb, CRL_REG_LEN_08BIT, 0xdf}, + {0x60cc, CRL_REG_LEN_08BIT, 0xc4}, + {0x60cd, CRL_REG_LEN_08BIT, 0x35}, + {0x60ce, CRL_REG_LEN_08BIT, 0xd2}, + {0x60cf, CRL_REG_LEN_08BIT, 0xcc}, + {0x60d0, CRL_REG_LEN_08BIT, 0x06}, + {0x60d1, CRL_REG_LEN_08BIT, 0x31}, + {0x60d2, CRL_REG_LEN_08BIT, 0xd2}, + {0x60d3, CRL_REG_LEN_08BIT, 0xcc}, + {0x60d4, CRL_REG_LEN_08BIT, 0x15}, + {0x60d5, CRL_REG_LEN_08BIT, 0xd2}, + {0x60d6, CRL_REG_LEN_08BIT, 0xbb}, + {0x60d7, CRL_REG_LEN_08BIT, 0xcc}, + {0x60d8, CRL_REG_LEN_08BIT, 0x1a}, + {0x60d9, CRL_REG_LEN_08BIT, 0xd2}, + {0x60da, CRL_REG_LEN_08BIT, 0xbe}, + {0x60db, CRL_REG_LEN_08BIT, 0xce}, + {0x60dc, CRL_REG_LEN_08BIT, 0x52}, + {0x60dd, CRL_REG_LEN_08BIT, 0xcf}, + {0x60de, CRL_REG_LEN_08BIT, 0x56}, + {0x60df, CRL_REG_LEN_08BIT, 0xd0}, + {0x60e0, CRL_REG_LEN_08BIT, 0x5b}, + {0x60e1, CRL_REG_LEN_08BIT, 0x2b}, + {0x60e2, CRL_REG_LEN_08BIT, 0xd2}, + {0x60e3, CRL_REG_LEN_08BIT, 0xd3}, + {0x60e4, CRL_REG_LEN_08BIT, 0x01}, + {0x60e5, CRL_REG_LEN_08BIT, 0xcc}, + {0x60e6, CRL_REG_LEN_08BIT, 0x0a}, + {0x60e7, CRL_REG_LEN_08BIT, 0xd2}, + {0x60e8, CRL_REG_LEN_08BIT, 0xd3}, + {0x60e9, CRL_REG_LEN_08BIT, 0x0f}, + {0x60ea, CRL_REG_LEN_08BIT, 0xd9}, + {0x60eb, CRL_REG_LEN_08BIT, 0xc7}, + {0x60ec, CRL_REG_LEN_08BIT, 0xda}, + {0x60ed, CRL_REG_LEN_08BIT, 0xce}, + {0x60ee, CRL_REG_LEN_08BIT, 0x1a}, + {0x60ef, CRL_REG_LEN_08BIT, 0xd4}, + {0x60f0, CRL_REG_LEN_08BIT, 0xf6}, + {0x60f1, CRL_REG_LEN_08BIT, 0xd4}, + {0x60f2, CRL_REG_LEN_08BIT, 0xa9}, + {0x60f3, CRL_REG_LEN_08BIT, 0x27}, + {0x60f4, CRL_REG_LEN_08BIT, 0x00}, + {0x60f5, CRL_REG_LEN_08BIT, 0xd2}, + {0x60f6, CRL_REG_LEN_08BIT, 0xcc}, + {0x60f7, CRL_REG_LEN_08BIT, 0x60}, + {0x60f8, CRL_REG_LEN_08BIT, 0xd2}, + {0x60f9, CRL_REG_LEN_08BIT, 0xd3}, + {0x60fa, CRL_REG_LEN_08BIT, 0x2d}, + {0x60fb, CRL_REG_LEN_08BIT, 0xd9}, + {0x60fc, CRL_REG_LEN_08BIT, 0xdf}, + {0x60fd, CRL_REG_LEN_08BIT, 0xda}, + {0x60fe, CRL_REG_LEN_08BIT, 0xe5}, + {0x60ff, CRL_REG_LEN_08BIT, 0x1a}, + {0x6100, CRL_REG_LEN_08BIT, 0x12}, + {0x6101, CRL_REG_LEN_08BIT, 0xcc}, + {0x6102, CRL_REG_LEN_08BIT, 0x88}, + {0x6103, CRL_REG_LEN_08BIT, 0xd6}, + {0x6104, CRL_REG_LEN_08BIT, 0xb1}, + {0x6105, CRL_REG_LEN_08BIT, 0xb9}, + {0x6106, CRL_REG_LEN_08BIT, 0xba}, + {0x6107, CRL_REG_LEN_08BIT, 0xaf}, + {0x6108, CRL_REG_LEN_08BIT, 0xdc}, + {0x6109, CRL_REG_LEN_08BIT, 0x00}, + {0x610a, CRL_REG_LEN_08BIT, 0xcb}, + {0x610b, CRL_REG_LEN_08BIT, 0xc3}, + {0x610c, CRL_REG_LEN_08BIT, 0xb9}, + {0x610d, CRL_REG_LEN_08BIT, 0xa4}, + {0x610e, CRL_REG_LEN_08BIT, 0xb5}, + {0x610f, CRL_REG_LEN_08BIT, 0x5c}, + {0x6110, CRL_REG_LEN_08BIT, 0x12}, + {0x6111, CRL_REG_LEN_08BIT, 0x2a}, + {0x6112, CRL_REG_LEN_08BIT, 0x61}, + {0x6113, CRL_REG_LEN_08BIT, 0xd2}, + {0x6114, CRL_REG_LEN_08BIT, 0xcc}, + {0x6115, CRL_REG_LEN_08BIT, 0xdf}, + {0x6116, CRL_REG_LEN_08BIT, 0xc7}, + {0x6117, CRL_REG_LEN_08BIT, 0x35}, + {0x6118, CRL_REG_LEN_08BIT, 0xd2}, + {0x6119, CRL_REG_LEN_08BIT, 0xcc}, + {0x611a, CRL_REG_LEN_08BIT, 0x06}, + {0x611b, CRL_REG_LEN_08BIT, 0x31}, + {0x611c, CRL_REG_LEN_08BIT, 0xc6}, + {0x611d, CRL_REG_LEN_08BIT, 0xbb}, + {0x611e, CRL_REG_LEN_08BIT, 0xd2}, + {0x611f, CRL_REG_LEN_08BIT, 0xcc}, + {0x6120, CRL_REG_LEN_08BIT, 0x18}, + {0x6121, CRL_REG_LEN_08BIT, 0xd2}, + {0x6122, CRL_REG_LEN_08BIT, 0xbe}, + {0x6123, CRL_REG_LEN_08BIT, 0xcc}, + {0x6124, CRL_REG_LEN_08BIT, 0x52}, + {0x6125, CRL_REG_LEN_08BIT, 0xc7}, + {0x6126, CRL_REG_LEN_08BIT, 0xd2}, + {0x6127, CRL_REG_LEN_08BIT, 0xcc}, + {0x6128, CRL_REG_LEN_08BIT, 0x0a}, + {0x6129, CRL_REG_LEN_08BIT, 0xb4}, + {0x612a, CRL_REG_LEN_08BIT, 0xb7}, + {0x612b, CRL_REG_LEN_08BIT, 0x94}, + {0x612c, CRL_REG_LEN_08BIT, 0xd2}, + {0x612d, CRL_REG_LEN_08BIT, 0x12}, + {0x612e, CRL_REG_LEN_08BIT, 0x26}, + {0x612f, CRL_REG_LEN_08BIT, 0x42}, + {0x6130, CRL_REG_LEN_08BIT, 0x46}, + {0x6131, CRL_REG_LEN_08BIT, 0x42}, + {0x6132, CRL_REG_LEN_08BIT, 0xd3}, + {0x6133, CRL_REG_LEN_08BIT, 0x20}, + {0x6134, CRL_REG_LEN_08BIT, 0x27}, + {0x6135, CRL_REG_LEN_08BIT, 0x00}, + {0x6136, CRL_REG_LEN_08BIT, 0x1a}, + {0x6137, CRL_REG_LEN_08BIT, 0xcc}, + {0x6138, CRL_REG_LEN_08BIT, 0x88}, + {0x6139, CRL_REG_LEN_08BIT, 0x60}, + {0x613a, CRL_REG_LEN_08BIT, 0x2c}, + {0x613b, CRL_REG_LEN_08BIT, 0x12}, + {0x613c, CRL_REG_LEN_08BIT, 0x40}, + {0x613d, CRL_REG_LEN_08BIT, 0xb8}, + {0x613e, CRL_REG_LEN_08BIT, 0x90}, + {0x613f, CRL_REG_LEN_08BIT, 0xd5}, + {0x6140, CRL_REG_LEN_08BIT, 0xba}, + {0x6141, CRL_REG_LEN_08BIT, 0x00}, + {0x6142, CRL_REG_LEN_08BIT, 0x00}, + {0x6143, CRL_REG_LEN_08BIT, 0x00}, + {0x6144, CRL_REG_LEN_08BIT, 0x00}, + {0x6145, CRL_REG_LEN_08BIT, 0x00}, + {0x6146, CRL_REG_LEN_08BIT, 0x00}, + {0x6147, CRL_REG_LEN_08BIT, 0xaa}, + {0x6148, CRL_REG_LEN_08BIT, 0xb7}, + {0x6149, CRL_REG_LEN_08BIT, 0x00}, + {0x614a, CRL_REG_LEN_08BIT, 0x00}, + {0x614b, CRL_REG_LEN_08BIT, 0x00}, + {0x614c, CRL_REG_LEN_08BIT, 0x00}, + {0x614d, CRL_REG_LEN_08BIT, 0xa6}, + {0x614e, CRL_REG_LEN_08BIT, 0xb7}, + {0x614f, CRL_REG_LEN_08BIT, 0x00}, + {0x6150, CRL_REG_LEN_08BIT, 0xd5}, + {0x6151, CRL_REG_LEN_08BIT, 0x00}, + {0x6152, CRL_REG_LEN_08BIT, 0x71}, + {0x6153, CRL_REG_LEN_08BIT, 0xd3}, + {0x6154, CRL_REG_LEN_08BIT, 0x30}, + {0x6155, CRL_REG_LEN_08BIT, 0xba}, + {0x6156, CRL_REG_LEN_08BIT, 0x00}, + {0x6157, CRL_REG_LEN_08BIT, 0x00}, + {0x6158, CRL_REG_LEN_08BIT, 0x00}, + {0x6159, CRL_REG_LEN_08BIT, 0x00}, + {0x615a, CRL_REG_LEN_08BIT, 0xd3}, + {0x615b, CRL_REG_LEN_08BIT, 0x10}, + {0x615c, CRL_REG_LEN_08BIT, 0x70}, + {0x615d, CRL_REG_LEN_08BIT, 0x00}, + {0x615e, CRL_REG_LEN_08BIT, 0x00}, + {0x615f, CRL_REG_LEN_08BIT, 0x00}, + {0x6160, CRL_REG_LEN_08BIT, 0x00}, + {0x6161, CRL_REG_LEN_08BIT, 0xd5}, + {0x6162, CRL_REG_LEN_08BIT, 0xba}, + {0x6163, CRL_REG_LEN_08BIT, 0xb0}, + {0x6164, CRL_REG_LEN_08BIT, 0xb7}, + {0x6165, CRL_REG_LEN_08BIT, 0x00}, + {0x6166, CRL_REG_LEN_08BIT, 0x9d}, + {0x6167, CRL_REG_LEN_08BIT, 0xd3}, + {0x6168, CRL_REG_LEN_08BIT, 0x0a}, + {0x6169, CRL_REG_LEN_08BIT, 0x9d}, + {0x616a, CRL_REG_LEN_08BIT, 0x9d}, + {0x616b, CRL_REG_LEN_08BIT, 0xd3}, + {0x616c, CRL_REG_LEN_08BIT, 0x10}, + {0x616d, CRL_REG_LEN_08BIT, 0x9c}, + {0x616e, CRL_REG_LEN_08BIT, 0x94}, + {0x616f, CRL_REG_LEN_08BIT, 0x90}, + {0x6170, CRL_REG_LEN_08BIT, 0xc8}, + {0x6171, CRL_REG_LEN_08BIT, 0xba}, + {0x6172, CRL_REG_LEN_08BIT, 0xd2}, + {0x6173, CRL_REG_LEN_08BIT, 0x60}, + {0x6174, CRL_REG_LEN_08BIT, 0x2c}, + {0x6175, CRL_REG_LEN_08BIT, 0x50}, + {0x6176, CRL_REG_LEN_08BIT, 0x11}, + {0x6177, CRL_REG_LEN_08BIT, 0xcc}, + {0x6178, CRL_REG_LEN_08BIT, 0x00}, + {0x6179, CRL_REG_LEN_08BIT, 0x30}, + {0x617a, CRL_REG_LEN_08BIT, 0xd5}, + {0x617b, CRL_REG_LEN_08BIT, 0x00}, + {0x617c, CRL_REG_LEN_08BIT, 0xba}, + {0x617d, CRL_REG_LEN_08BIT, 0xb0}, + {0x617e, CRL_REG_LEN_08BIT, 0xb7}, + {0x617f, CRL_REG_LEN_08BIT, 0x00}, + {0x6180, CRL_REG_LEN_08BIT, 0x9d}, + {0x6181, CRL_REG_LEN_08BIT, 0xd3}, + {0x6182, CRL_REG_LEN_08BIT, 0x0a}, + {0x6183, CRL_REG_LEN_08BIT, 0x9d}, + {0x6184, CRL_REG_LEN_08BIT, 0x9d}, + {0x6185, CRL_REG_LEN_08BIT, 0xd3}, + {0x6186, CRL_REG_LEN_08BIT, 0x10}, + {0x6187, CRL_REG_LEN_08BIT, 0x9c}, + {0x6188, CRL_REG_LEN_08BIT, 0x94}, + {0x6189, CRL_REG_LEN_08BIT, 0x90}, + {0x618a, CRL_REG_LEN_08BIT, 0xc8}, + {0x618b, CRL_REG_LEN_08BIT, 0xba}, + {0x618c, CRL_REG_LEN_08BIT, 0xd5}, + {0x618d, CRL_REG_LEN_08BIT, 0x00}, + {0x618e, CRL_REG_LEN_08BIT, 0x01}, + {0x618f, CRL_REG_LEN_08BIT, 0x1a}, + {0x6190, CRL_REG_LEN_08BIT, 0xcc}, + {0x6191, CRL_REG_LEN_08BIT, 0x12}, + {0x6192, CRL_REG_LEN_08BIT, 0x12}, + {0x6193, CRL_REG_LEN_08BIT, 0x00}, + {0x6194, CRL_REG_LEN_08BIT, 0xcc}, + {0x6195, CRL_REG_LEN_08BIT, 0x9c}, + {0x6196, CRL_REG_LEN_08BIT, 0xd2}, + {0x6197, CRL_REG_LEN_08BIT, 0xcc}, + {0x6198, CRL_REG_LEN_08BIT, 0x60}, + {0x6199, CRL_REG_LEN_08BIT, 0xd2}, + {0x619a, CRL_REG_LEN_08BIT, 0x04}, + {0x619b, CRL_REG_LEN_08BIT, 0xd5}, + {0x619c, CRL_REG_LEN_08BIT, 0x1a}, + {0x619d, CRL_REG_LEN_08BIT, 0xcc}, + {0x619e, CRL_REG_LEN_08BIT, 0x12}, + {0x619f, CRL_REG_LEN_08BIT, 0x00}, + {0x61a0, CRL_REG_LEN_08BIT, 0x12}, + {0x61a1, CRL_REG_LEN_08BIT, 0xcc}, + {0x61a2, CRL_REG_LEN_08BIT, 0x9c}, + {0x61a3, CRL_REG_LEN_08BIT, 0xd2}, + {0x61a4, CRL_REG_LEN_08BIT, 0xcc}, + {0x61a5, CRL_REG_LEN_08BIT, 0x60}, + {0x61a6, CRL_REG_LEN_08BIT, 0xd2}, + {0x61a7, CRL_REG_LEN_08BIT, 0x1a}, + {0x61a8, CRL_REG_LEN_08BIT, 0xcc}, + {0x61a9, CRL_REG_LEN_08BIT, 0x12}, + {0x61aa, CRL_REG_LEN_08BIT, 0x00}, + {0x61ab, CRL_REG_LEN_08BIT, 0x12}, + {0x61ac, CRL_REG_LEN_08BIT, 0xcc}, + {0x61ad, CRL_REG_LEN_08BIT, 0x9c}, + {0x61ae, CRL_REG_LEN_08BIT, 0xd2}, + {0x61af, CRL_REG_LEN_08BIT, 0xcc}, + {0x61b0, CRL_REG_LEN_08BIT, 0x60}, + {0x61b1, CRL_REG_LEN_08BIT, 0xd2}, + {0x61b2, CRL_REG_LEN_08BIT, 0x1a}, + {0x61b3, CRL_REG_LEN_08BIT, 0xcc}, + {0x61b4, CRL_REG_LEN_08BIT, 0x12}, + {0x61b5, CRL_REG_LEN_08BIT, 0x00}, + {0x61b6, CRL_REG_LEN_08BIT, 0x12}, + {0x61b7, CRL_REG_LEN_08BIT, 0xcc}, + {0x61b8, CRL_REG_LEN_08BIT, 0x9c}, + {0x61b9, CRL_REG_LEN_08BIT, 0xd2}, + {0x61ba, CRL_REG_LEN_08BIT, 0xcc}, + {0x61bb, CRL_REG_LEN_08BIT, 0x60}, + {0x61bc, CRL_REG_LEN_08BIT, 0xd2}, + {0x61bd, CRL_REG_LEN_08BIT, 0xd5}, + {0x61be, CRL_REG_LEN_08BIT, 0x1a}, + {0x61bf, CRL_REG_LEN_08BIT, 0xcc}, + {0x61c0, CRL_REG_LEN_08BIT, 0x12}, + {0x61c1, CRL_REG_LEN_08BIT, 0x12}, + {0x61c2, CRL_REG_LEN_08BIT, 0x00}, + {0x61c3, CRL_REG_LEN_08BIT, 0xcc}, + {0x61c4, CRL_REG_LEN_08BIT, 0x8a}, + {0x61c5, CRL_REG_LEN_08BIT, 0xd2}, + {0x61c6, CRL_REG_LEN_08BIT, 0xcc}, + {0x61c7, CRL_REG_LEN_08BIT, 0x74}, + {0x61c8, CRL_REG_LEN_08BIT, 0xd2}, + {0x61c9, CRL_REG_LEN_08BIT, 0xd5}, + {0x61ca, CRL_REG_LEN_08BIT, 0x1a}, + {0x61cb, CRL_REG_LEN_08BIT, 0xcc}, + {0x61cc, CRL_REG_LEN_08BIT, 0x12}, + {0x61cd, CRL_REG_LEN_08BIT, 0x00}, + {0x61ce, CRL_REG_LEN_08BIT, 0x12}, + {0x61cf, CRL_REG_LEN_08BIT, 0xcc}, + {0x61d0, CRL_REG_LEN_08BIT, 0x8a}, + {0x61d1, CRL_REG_LEN_08BIT, 0xd2}, + {0x61d2, CRL_REG_LEN_08BIT, 0xcc}, + {0x61d3, CRL_REG_LEN_08BIT, 0x74}, + {0x61d4, CRL_REG_LEN_08BIT, 0xd2}, + {0x61d5, CRL_REG_LEN_08BIT, 0x1a}, + {0x61d6, CRL_REG_LEN_08BIT, 0xcc}, + {0x61d7, CRL_REG_LEN_08BIT, 0x12}, + {0x61d8, CRL_REG_LEN_08BIT, 0x00}, + {0x61d9, CRL_REG_LEN_08BIT, 0x12}, + {0x61da, CRL_REG_LEN_08BIT, 0xcc}, + {0x61db, CRL_REG_LEN_08BIT, 0x8a}, + {0x61dc, CRL_REG_LEN_08BIT, 0xd2}, + {0x61dd, CRL_REG_LEN_08BIT, 0xcc}, + {0x61de, CRL_REG_LEN_08BIT, 0x74}, + {0x61df, CRL_REG_LEN_08BIT, 0xd2}, + {0x61e0, CRL_REG_LEN_08BIT, 0x1a}, + {0x61e1, CRL_REG_LEN_08BIT, 0xcc}, + {0x61e2, CRL_REG_LEN_08BIT, 0x12}, + {0x61e3, CRL_REG_LEN_08BIT, 0x00}, + {0x61e4, CRL_REG_LEN_08BIT, 0x12}, + {0x61e5, CRL_REG_LEN_08BIT, 0xcc}, + {0x61e6, CRL_REG_LEN_08BIT, 0x8a}, + {0x61e7, CRL_REG_LEN_08BIT, 0xd2}, + {0x61e8, CRL_REG_LEN_08BIT, 0xcc}, + {0x61e9, CRL_REG_LEN_08BIT, 0x74}, + {0x61ea, CRL_REG_LEN_08BIT, 0xd2}, + {0x61eb, CRL_REG_LEN_08BIT, 0xd5}, + {0x61ec, CRL_REG_LEN_08BIT, 0xcc}, + {0x61ed, CRL_REG_LEN_08BIT, 0x12}, + {0x61ee, CRL_REG_LEN_08BIT, 0x00}, + {0x61ef, CRL_REG_LEN_08BIT, 0x12}, + {0x61f0, CRL_REG_LEN_08BIT, 0xcc}, + {0x61f1, CRL_REG_LEN_08BIT, 0x9c}, + {0x61f2, CRL_REG_LEN_08BIT, 0xd5}, + {0x6400, CRL_REG_LEN_08BIT, 0x04}, + {0x6401, CRL_REG_LEN_08BIT, 0x04}, + {0x6402, CRL_REG_LEN_08BIT, 0x00}, + {0x6403, CRL_REG_LEN_08BIT, 0xff}, + {0x6404, CRL_REG_LEN_08BIT, 0x00}, + {0x6405, CRL_REG_LEN_08BIT, 0x08}, + {0x6406, CRL_REG_LEN_08BIT, 0x00}, + {0x6407, CRL_REG_LEN_08BIT, 0xff}, + {0x6408, CRL_REG_LEN_08BIT, 0x04}, + {0x6409, CRL_REG_LEN_08BIT, 0x70}, + {0x640a, CRL_REG_LEN_08BIT, 0x00}, + {0x640b, CRL_REG_LEN_08BIT, 0xff}, + {0x640c, CRL_REG_LEN_08BIT, 0x05}, + {0x640d, CRL_REG_LEN_08BIT, 0x14}, + {0x640e, CRL_REG_LEN_08BIT, 0x04}, + {0x640f, CRL_REG_LEN_08BIT, 0x71}, + {0x6410, CRL_REG_LEN_08BIT, 0x05}, + {0x6411, CRL_REG_LEN_08BIT, 0x74}, + {0x6412, CRL_REG_LEN_08BIT, 0x00}, + {0x6413, CRL_REG_LEN_08BIT, 0xff}, + {0x6414, CRL_REG_LEN_08BIT, 0x05}, + {0x6415, CRL_REG_LEN_08BIT, 0x54}, + {0x6416, CRL_REG_LEN_08BIT, 0x05}, + {0x6417, CRL_REG_LEN_08BIT, 0x44}, + {0x6418, CRL_REG_LEN_08BIT, 0x04}, + {0x6419, CRL_REG_LEN_08BIT, 0x30}, + {0x641a, CRL_REG_LEN_08BIT, 0x05}, + {0x641b, CRL_REG_LEN_08BIT, 0x46}, + {0x641c, CRL_REG_LEN_08BIT, 0x00}, + {0x641d, CRL_REG_LEN_08BIT, 0xff}, + {0x641e, CRL_REG_LEN_08BIT, 0x04}, + {0x641f, CRL_REG_LEN_08BIT, 0x31}, + {0x6420, CRL_REG_LEN_08BIT, 0x04}, + {0x6421, CRL_REG_LEN_08BIT, 0x30}, + {0x6422, CRL_REG_LEN_08BIT, 0x00}, + {0x6423, CRL_REG_LEN_08BIT, 0xff}, + {0x6424, CRL_REG_LEN_08BIT, 0x04}, + {0x6425, CRL_REG_LEN_08BIT, 0x20}, + {0x6426, CRL_REG_LEN_08BIT, 0x05}, + {0x6427, CRL_REG_LEN_08BIT, 0x06}, + {0x6428, CRL_REG_LEN_08BIT, 0x00}, + {0x6429, CRL_REG_LEN_08BIT, 0xff}, + {0x642a, CRL_REG_LEN_08BIT, 0x08}, + {0x642b, CRL_REG_LEN_08BIT, 0x2a}, + {0x642c, CRL_REG_LEN_08BIT, 0x08}, + {0x642d, CRL_REG_LEN_08BIT, 0x31}, + {0x642e, CRL_REG_LEN_08BIT, 0x00}, + {0x642f, CRL_REG_LEN_08BIT, 0xff}, + {0x6430, CRL_REG_LEN_08BIT, 0x08}, + {0x6431, CRL_REG_LEN_08BIT, 0x2a}, + {0x6432, CRL_REG_LEN_08BIT, 0x08}, + {0x6433, CRL_REG_LEN_08BIT, 0x31}, + {0x6434, CRL_REG_LEN_08BIT, 0x06}, + {0x6435, CRL_REG_LEN_08BIT, 0x20}, + {0x6436, CRL_REG_LEN_08BIT, 0x07}, + {0x6437, CRL_REG_LEN_08BIT, 0x00}, + {0x6438, CRL_REG_LEN_08BIT, 0x08}, + {0x6439, CRL_REG_LEN_08BIT, 0x40}, + {0x643a, CRL_REG_LEN_08BIT, 0x00}, + {0x643b, CRL_REG_LEN_08BIT, 0xff}, + {0x643c, CRL_REG_LEN_08BIT, 0x08}, + {0x643d, CRL_REG_LEN_08BIT, 0x2a}, + {0x643e, CRL_REG_LEN_08BIT, 0x08}, + {0x643f, CRL_REG_LEN_08BIT, 0x36}, + {0x6440, CRL_REG_LEN_08BIT, 0x06}, + {0x6441, CRL_REG_LEN_08BIT, 0x10}, + {0x6442, CRL_REG_LEN_08BIT, 0x07}, + {0x6443, CRL_REG_LEN_08BIT, 0x00}, + {0x6444, CRL_REG_LEN_08BIT, 0x08}, + {0x6445, CRL_REG_LEN_08BIT, 0x40}, + {0x6446, CRL_REG_LEN_08BIT, 0x00}, + {0x6447, CRL_REG_LEN_08BIT, 0xff}, + {0x6448, CRL_REG_LEN_08BIT, 0x08}, + {0x6449, CRL_REG_LEN_08BIT, 0x2a}, + {0x644a, CRL_REG_LEN_08BIT, 0x08}, + {0x644b, CRL_REG_LEN_08BIT, 0x3b}, + {0x644c, CRL_REG_LEN_08BIT, 0x06}, + {0x644d, CRL_REG_LEN_08BIT, 0x00}, + {0x644e, CRL_REG_LEN_08BIT, 0x07}, + {0x644f, CRL_REG_LEN_08BIT, 0x00}, + {0x6450, CRL_REG_LEN_08BIT, 0x08}, + {0x6451, CRL_REG_LEN_08BIT, 0x40}, + {0x6452, CRL_REG_LEN_08BIT, 0x00}, + {0x6453, CRL_REG_LEN_08BIT, 0xff}, + {0x6454, CRL_REG_LEN_08BIT, 0x06}, + {0x6455, CRL_REG_LEN_08BIT, 0x00}, + {0x6456, CRL_REG_LEN_08BIT, 0x07}, + {0x6457, CRL_REG_LEN_08BIT, 0x05}, + {0x6458, CRL_REG_LEN_08BIT, 0x01}, + {0x6459, CRL_REG_LEN_08BIT, 0xaf}, + {0x645a, CRL_REG_LEN_08BIT, 0x01}, + {0x645b, CRL_REG_LEN_08BIT, 0x0f}, + {0x645c, CRL_REG_LEN_08BIT, 0x01}, + {0x645d, CRL_REG_LEN_08BIT, 0x90}, + {0x645e, CRL_REG_LEN_08BIT, 0x01}, + {0x645f, CRL_REG_LEN_08BIT, 0xc8}, + {0x6460, CRL_REG_LEN_08BIT, 0x00}, + {0x6461, CRL_REG_LEN_08BIT, 0xff}, + {0x6462, CRL_REG_LEN_08BIT, 0x01}, + {0x6463, CRL_REG_LEN_08BIT, 0xac}, + {0x6464, CRL_REG_LEN_08BIT, 0x01}, + {0x6465, CRL_REG_LEN_08BIT, 0x0c}, + {0x6466, CRL_REG_LEN_08BIT, 0x01}, + {0x6467, CRL_REG_LEN_08BIT, 0x90}, + {0x6468, CRL_REG_LEN_08BIT, 0x01}, + {0x6469, CRL_REG_LEN_08BIT, 0xe8}, + {0x646a, CRL_REG_LEN_08BIT, 0x00}, + {0x646b, CRL_REG_LEN_08BIT, 0xff}, + {0x646c, CRL_REG_LEN_08BIT, 0x01}, + {0x646d, CRL_REG_LEN_08BIT, 0xad}, + {0x646e, CRL_REG_LEN_08BIT, 0x01}, + {0x646f, CRL_REG_LEN_08BIT, 0x0d}, + {0x6470, CRL_REG_LEN_08BIT, 0x01}, + {0x6471, CRL_REG_LEN_08BIT, 0x90}, + {0x6472, CRL_REG_LEN_08BIT, 0x01}, + {0x6473, CRL_REG_LEN_08BIT, 0xe8}, + {0x6474, CRL_REG_LEN_08BIT, 0x00}, + {0x6475, CRL_REG_LEN_08BIT, 0xff}, + {0x6476, CRL_REG_LEN_08BIT, 0x01}, + {0x6477, CRL_REG_LEN_08BIT, 0xae}, + {0x6478, CRL_REG_LEN_08BIT, 0x01}, + {0x6479, CRL_REG_LEN_08BIT, 0x0e}, + {0x647a, CRL_REG_LEN_08BIT, 0x01}, + {0x647b, CRL_REG_LEN_08BIT, 0x90}, + {0x647c, CRL_REG_LEN_08BIT, 0x01}, + {0x647d, CRL_REG_LEN_08BIT, 0xe8}, + {0x647e, CRL_REG_LEN_08BIT, 0x00}, + {0x647f, CRL_REG_LEN_08BIT, 0xff}, + {0x6480, CRL_REG_LEN_08BIT, 0x01}, + {0x6481, CRL_REG_LEN_08BIT, 0xb0}, + {0x6482, CRL_REG_LEN_08BIT, 0x01}, + {0x6483, CRL_REG_LEN_08BIT, 0xb1}, + {0x6484, CRL_REG_LEN_08BIT, 0x01}, + {0x6485, CRL_REG_LEN_08BIT, 0xb2}, + {0x6486, CRL_REG_LEN_08BIT, 0x01}, + {0x6487, CRL_REG_LEN_08BIT, 0xb3}, + {0x6488, CRL_REG_LEN_08BIT, 0x01}, + {0x6489, CRL_REG_LEN_08BIT, 0xb4}, + {0x648a, CRL_REG_LEN_08BIT, 0x01}, + {0x648b, CRL_REG_LEN_08BIT, 0xb5}, + {0x648c, CRL_REG_LEN_08BIT, 0x01}, + {0x648d, CRL_REG_LEN_08BIT, 0xb6}, + {0x648e, CRL_REG_LEN_08BIT, 0x01}, + {0x648f, CRL_REG_LEN_08BIT, 0xb7}, + {0x6490, CRL_REG_LEN_08BIT, 0x01}, + {0x6491, CRL_REG_LEN_08BIT, 0xb8}, + {0x6492, CRL_REG_LEN_08BIT, 0x01}, + {0x6493, CRL_REG_LEN_08BIT, 0xb9}, + {0x6494, CRL_REG_LEN_08BIT, 0x01}, + {0x6495, CRL_REG_LEN_08BIT, 0xba}, + {0x6496, CRL_REG_LEN_08BIT, 0x01}, + {0x6497, CRL_REG_LEN_08BIT, 0xbb}, + {0x6498, CRL_REG_LEN_08BIT, 0x01}, + {0x6499, CRL_REG_LEN_08BIT, 0xbc}, + {0x649a, CRL_REG_LEN_08BIT, 0x01}, + {0x649b, CRL_REG_LEN_08BIT, 0xbd}, + {0x649c, CRL_REG_LEN_08BIT, 0x01}, + {0x649d, CRL_REG_LEN_08BIT, 0xbe}, + {0x649e, CRL_REG_LEN_08BIT, 0x01}, + {0x649f, CRL_REG_LEN_08BIT, 0xbf}, + {0x64a0, CRL_REG_LEN_08BIT, 0x01}, + {0x64a1, CRL_REG_LEN_08BIT, 0xc0}, + {0x64a2, CRL_REG_LEN_08BIT, 0x00}, + {0x64a3, CRL_REG_LEN_08BIT, 0xff}, + {0x64a4, CRL_REG_LEN_08BIT, 0x06}, + {0x64a5, CRL_REG_LEN_08BIT, 0x00}, + {0x64a6, CRL_REG_LEN_08BIT, 0x01}, + {0x64a7, CRL_REG_LEN_08BIT, 0xf6}, + {0x64a8, CRL_REG_LEN_08BIT, 0x04}, + {0x64a9, CRL_REG_LEN_08BIT, 0x30}, + {0x64aa, CRL_REG_LEN_08BIT, 0x00}, + {0x64ab, CRL_REG_LEN_08BIT, 0xff}, + {0x64ac, CRL_REG_LEN_08BIT, 0x06}, + {0x64ad, CRL_REG_LEN_08BIT, 0x10}, + {0x64ae, CRL_REG_LEN_08BIT, 0x01}, + {0x64af, CRL_REG_LEN_08BIT, 0xf6}, + {0x64b0, CRL_REG_LEN_08BIT, 0x04}, + {0x64b1, CRL_REG_LEN_08BIT, 0x30}, + {0x64b2, CRL_REG_LEN_08BIT, 0x06}, + {0x64b3, CRL_REG_LEN_08BIT, 0x00}, + {0x64b4, CRL_REG_LEN_08BIT, 0x00}, + {0x64b5, CRL_REG_LEN_08BIT, 0xff}, + {0x64b6, CRL_REG_LEN_08BIT, 0x06}, + {0x64b7, CRL_REG_LEN_08BIT, 0x20}, + {0x64b8, CRL_REG_LEN_08BIT, 0x01}, + {0x64b9, CRL_REG_LEN_08BIT, 0xf6}, + {0x64ba, CRL_REG_LEN_08BIT, 0x04}, + {0x64bb, CRL_REG_LEN_08BIT, 0x30}, + {0x64bc, CRL_REG_LEN_08BIT, 0x06}, + {0x64bd, CRL_REG_LEN_08BIT, 0x00}, + {0x64be, CRL_REG_LEN_08BIT, 0x00}, + {0x64bf, CRL_REG_LEN_08BIT, 0xff}, + {0x64c0, CRL_REG_LEN_08BIT, 0x04}, + {0x64c1, CRL_REG_LEN_08BIT, 0x31}, + {0x64c2, CRL_REG_LEN_08BIT, 0x04}, + {0x64c3, CRL_REG_LEN_08BIT, 0x30}, + {0x64c4, CRL_REG_LEN_08BIT, 0x01}, + {0x64c5, CRL_REG_LEN_08BIT, 0x20}, + {0x64c6, CRL_REG_LEN_08BIT, 0x01}, + {0x64c7, CRL_REG_LEN_08BIT, 0x31}, + {0x64c8, CRL_REG_LEN_08BIT, 0x01}, + {0x64c9, CRL_REG_LEN_08BIT, 0x32}, + {0x64ca, CRL_REG_LEN_08BIT, 0x01}, + {0x64cb, CRL_REG_LEN_08BIT, 0x33}, + {0x64cc, CRL_REG_LEN_08BIT, 0x01}, + {0x64cd, CRL_REG_LEN_08BIT, 0x34}, + {0x64ce, CRL_REG_LEN_08BIT, 0x01}, + {0x64cf, CRL_REG_LEN_08BIT, 0x35}, + {0x64d0, CRL_REG_LEN_08BIT, 0x01}, + {0x64d1, CRL_REG_LEN_08BIT, 0x36}, + {0x64d2, CRL_REG_LEN_08BIT, 0x01}, + {0x64d3, CRL_REG_LEN_08BIT, 0x37}, + {0x64d4, CRL_REG_LEN_08BIT, 0x01}, + {0x64d5, CRL_REG_LEN_08BIT, 0x38}, + {0x64d6, CRL_REG_LEN_08BIT, 0x01}, + {0x64d7, CRL_REG_LEN_08BIT, 0x39}, + {0x64d8, CRL_REG_LEN_08BIT, 0x01}, + {0x64d9, CRL_REG_LEN_08BIT, 0x3a}, + {0x64da, CRL_REG_LEN_08BIT, 0x01}, + {0x64db, CRL_REG_LEN_08BIT, 0x3b}, + {0x64dc, CRL_REG_LEN_08BIT, 0x01}, + {0x64dd, CRL_REG_LEN_08BIT, 0x3c}, + {0x64de, CRL_REG_LEN_08BIT, 0x01}, + {0x64df, CRL_REG_LEN_08BIT, 0x3d}, + {0x64e0, CRL_REG_LEN_08BIT, 0x01}, + {0x64e1, CRL_REG_LEN_08BIT, 0x3e}, + {0x64e2, CRL_REG_LEN_08BIT, 0x01}, + {0x64e3, CRL_REG_LEN_08BIT, 0x3f}, + {0x64e4, CRL_REG_LEN_08BIT, 0x02}, + {0x64e5, CRL_REG_LEN_08BIT, 0xa0}, + {0x64e6, CRL_REG_LEN_08BIT, 0x00}, + {0x64e7, CRL_REG_LEN_08BIT, 0xff}, + {0x64e8, CRL_REG_LEN_08BIT, 0x04}, + {0x64e9, CRL_REG_LEN_08BIT, 0x31}, + {0x64ea, CRL_REG_LEN_08BIT, 0x04}, + {0x64eb, CRL_REG_LEN_08BIT, 0x30}, + {0x64ec, CRL_REG_LEN_08BIT, 0x01}, + {0x64ed, CRL_REG_LEN_08BIT, 0x00}, + {0x64ee, CRL_REG_LEN_08BIT, 0x01}, + {0x64ef, CRL_REG_LEN_08BIT, 0x11}, + {0x64f0, CRL_REG_LEN_08BIT, 0x01}, + {0x64f1, CRL_REG_LEN_08BIT, 0x12}, + {0x64f2, CRL_REG_LEN_08BIT, 0x01}, + {0x64f3, CRL_REG_LEN_08BIT, 0x13}, + {0x64f4, CRL_REG_LEN_08BIT, 0x01}, + {0x64f5, CRL_REG_LEN_08BIT, 0x14}, + {0x64f6, CRL_REG_LEN_08BIT, 0x01}, + {0x64f7, CRL_REG_LEN_08BIT, 0x15}, + {0x64f8, CRL_REG_LEN_08BIT, 0x01}, + {0x64f9, CRL_REG_LEN_08BIT, 0x16}, + {0x64fa, CRL_REG_LEN_08BIT, 0x01}, + {0x64fb, CRL_REG_LEN_08BIT, 0x17}, + {0x64fc, CRL_REG_LEN_08BIT, 0x01}, + {0x64fd, CRL_REG_LEN_08BIT, 0x18}, + {0x64fe, CRL_REG_LEN_08BIT, 0x01}, + {0x64ff, CRL_REG_LEN_08BIT, 0x19}, + {0x6500, CRL_REG_LEN_08BIT, 0x01}, + {0x6501, CRL_REG_LEN_08BIT, 0x1a}, + {0x6502, CRL_REG_LEN_08BIT, 0x01}, + {0x6503, CRL_REG_LEN_08BIT, 0x1b}, + {0x6504, CRL_REG_LEN_08BIT, 0x01}, + {0x6505, CRL_REG_LEN_08BIT, 0x1c}, + {0x6506, CRL_REG_LEN_08BIT, 0x01}, + {0x6507, CRL_REG_LEN_08BIT, 0x1d}, + {0x6508, CRL_REG_LEN_08BIT, 0x01}, + {0x6509, CRL_REG_LEN_08BIT, 0x1e}, + {0x650a, CRL_REG_LEN_08BIT, 0x01}, + {0x650b, CRL_REG_LEN_08BIT, 0x1f}, + {0x650c, CRL_REG_LEN_08BIT, 0x02}, + {0x650d, CRL_REG_LEN_08BIT, 0xa0}, + {0x650e, CRL_REG_LEN_08BIT, 0x00}, + {0x650f, CRL_REG_LEN_08BIT, 0xff}, + {0x6510, CRL_REG_LEN_08BIT, 0x04}, + {0x6511, CRL_REG_LEN_08BIT, 0x20}, + {0x6512, CRL_REG_LEN_08BIT, 0x05}, + {0x6513, CRL_REG_LEN_08BIT, 0x86}, + {0x6514, CRL_REG_LEN_08BIT, 0x03}, + {0x6515, CRL_REG_LEN_08BIT, 0x0b}, + {0x6516, CRL_REG_LEN_08BIT, 0x05}, + {0x6517, CRL_REG_LEN_08BIT, 0x86}, + {0x6518, CRL_REG_LEN_08BIT, 0x00}, + {0x6519, CRL_REG_LEN_08BIT, 0x00}, + {0x651a, CRL_REG_LEN_08BIT, 0x05}, + {0x651b, CRL_REG_LEN_08BIT, 0x06}, + {0x651c, CRL_REG_LEN_08BIT, 0x00}, + {0x651d, CRL_REG_LEN_08BIT, 0x04}, + {0x651e, CRL_REG_LEN_08BIT, 0x05}, + {0x651f, CRL_REG_LEN_08BIT, 0x04}, + {0x6520, CRL_REG_LEN_08BIT, 0x00}, + {0x6521, CRL_REG_LEN_08BIT, 0x04}, + {0x6522, CRL_REG_LEN_08BIT, 0x05}, + {0x6523, CRL_REG_LEN_08BIT, 0x00}, + {0x6524, CRL_REG_LEN_08BIT, 0x05}, + {0x6525, CRL_REG_LEN_08BIT, 0x0a}, + {0x6526, CRL_REG_LEN_08BIT, 0x03}, + {0x6527, CRL_REG_LEN_08BIT, 0x9a}, + {0x6528, CRL_REG_LEN_08BIT, 0x05}, + {0x6529, CRL_REG_LEN_08BIT, 0x86}, + {0x652a, CRL_REG_LEN_08BIT, 0x00}, + {0x652b, CRL_REG_LEN_08BIT, 0x00}, + {0x652c, CRL_REG_LEN_08BIT, 0x05}, + {0x652d, CRL_REG_LEN_08BIT, 0x06}, + {0x652e, CRL_REG_LEN_08BIT, 0x00}, + {0x652f, CRL_REG_LEN_08BIT, 0x01}, + {0x6530, CRL_REG_LEN_08BIT, 0x05}, + {0x6531, CRL_REG_LEN_08BIT, 0x04}, + {0x6532, CRL_REG_LEN_08BIT, 0x00}, + {0x6533, CRL_REG_LEN_08BIT, 0x04}, + {0x6534, CRL_REG_LEN_08BIT, 0x05}, + {0x6535, CRL_REG_LEN_08BIT, 0x00}, + {0x6536, CRL_REG_LEN_08BIT, 0x05}, + {0x6537, CRL_REG_LEN_08BIT, 0x0a}, + {0x6538, CRL_REG_LEN_08BIT, 0x03}, + {0x6539, CRL_REG_LEN_08BIT, 0x99}, + {0x653a, CRL_REG_LEN_08BIT, 0x05}, + {0x653b, CRL_REG_LEN_08BIT, 0x06}, + {0x653c, CRL_REG_LEN_08BIT, 0x00}, + {0x653d, CRL_REG_LEN_08BIT, 0x00}, + {0x653e, CRL_REG_LEN_08BIT, 0x05}, + {0x653f, CRL_REG_LEN_08BIT, 0x04}, + {0x6540, CRL_REG_LEN_08BIT, 0x00}, + {0x6541, CRL_REG_LEN_08BIT, 0x04}, + {0x6542, CRL_REG_LEN_08BIT, 0x05}, + {0x6543, CRL_REG_LEN_08BIT, 0x00}, + {0x6544, CRL_REG_LEN_08BIT, 0x05}, + {0x6545, CRL_REG_LEN_08BIT, 0x0a}, + {0x6546, CRL_REG_LEN_08BIT, 0x03}, + {0x6547, CRL_REG_LEN_08BIT, 0x98}, + {0x6548, CRL_REG_LEN_08BIT, 0x05}, + {0x6549, CRL_REG_LEN_08BIT, 0x06}, + {0x654a, CRL_REG_LEN_08BIT, 0x00}, + {0x654b, CRL_REG_LEN_08BIT, 0x00}, + {0x654c, CRL_REG_LEN_08BIT, 0x05}, + {0x654d, CRL_REG_LEN_08BIT, 0x04}, + {0x654e, CRL_REG_LEN_08BIT, 0x00}, + {0x654f, CRL_REG_LEN_08BIT, 0x04}, + {0x6550, CRL_REG_LEN_08BIT, 0x05}, + {0x6551, CRL_REG_LEN_08BIT, 0x00}, + {0x6552, CRL_REG_LEN_08BIT, 0x05}, + {0x6553, CRL_REG_LEN_08BIT, 0x0a}, + {0x6554, CRL_REG_LEN_08BIT, 0x03}, + {0x6555, CRL_REG_LEN_08BIT, 0x97}, + {0x6556, CRL_REG_LEN_08BIT, 0x05}, + {0x6557, CRL_REG_LEN_08BIT, 0x06}, + {0x6558, CRL_REG_LEN_08BIT, 0x05}, + {0x6559, CRL_REG_LEN_08BIT, 0x04}, + {0x655a, CRL_REG_LEN_08BIT, 0x00}, + {0x655b, CRL_REG_LEN_08BIT, 0x04}, + {0x655c, CRL_REG_LEN_08BIT, 0x05}, + {0x655d, CRL_REG_LEN_08BIT, 0x00}, + {0x655e, CRL_REG_LEN_08BIT, 0x05}, + {0x655f, CRL_REG_LEN_08BIT, 0x0a}, + {0x6560, CRL_REG_LEN_08BIT, 0x03}, + {0x6561, CRL_REG_LEN_08BIT, 0x96}, + {0x6562, CRL_REG_LEN_08BIT, 0x05}, + {0x6563, CRL_REG_LEN_08BIT, 0x06}, + {0x6564, CRL_REG_LEN_08BIT, 0x05}, + {0x6565, CRL_REG_LEN_08BIT, 0x04}, + {0x6566, CRL_REG_LEN_08BIT, 0x00}, + {0x6567, CRL_REG_LEN_08BIT, 0x04}, + {0x6568, CRL_REG_LEN_08BIT, 0x05}, + {0x6569, CRL_REG_LEN_08BIT, 0x00}, + {0x656a, CRL_REG_LEN_08BIT, 0x05}, + {0x656b, CRL_REG_LEN_08BIT, 0x0a}, + {0x656c, CRL_REG_LEN_08BIT, 0x03}, + {0x656d, CRL_REG_LEN_08BIT, 0x95}, + {0x656e, CRL_REG_LEN_08BIT, 0x05}, + {0x656f, CRL_REG_LEN_08BIT, 0x06}, + {0x6570, CRL_REG_LEN_08BIT, 0x05}, + {0x6571, CRL_REG_LEN_08BIT, 0x04}, + {0x6572, CRL_REG_LEN_08BIT, 0x00}, + {0x6573, CRL_REG_LEN_08BIT, 0x04}, + {0x6574, CRL_REG_LEN_08BIT, 0x05}, + {0x6575, CRL_REG_LEN_08BIT, 0x00}, + {0x6576, CRL_REG_LEN_08BIT, 0x05}, + {0x6577, CRL_REG_LEN_08BIT, 0x0a}, + {0x6578, CRL_REG_LEN_08BIT, 0x03}, + {0x6579, CRL_REG_LEN_08BIT, 0x94}, + {0x657a, CRL_REG_LEN_08BIT, 0x05}, + {0x657b, CRL_REG_LEN_08BIT, 0x06}, + {0x657c, CRL_REG_LEN_08BIT, 0x00}, + {0x657d, CRL_REG_LEN_08BIT, 0x00}, + {0x657e, CRL_REG_LEN_08BIT, 0x05}, + {0x657f, CRL_REG_LEN_08BIT, 0x04}, + {0x6580, CRL_REG_LEN_08BIT, 0x00}, + {0x6581, CRL_REG_LEN_08BIT, 0x04}, + {0x6582, CRL_REG_LEN_08BIT, 0x05}, + {0x6583, CRL_REG_LEN_08BIT, 0x00}, + {0x6584, CRL_REG_LEN_08BIT, 0x05}, + {0x6585, CRL_REG_LEN_08BIT, 0x0a}, + {0x6586, CRL_REG_LEN_08BIT, 0x03}, + {0x6587, CRL_REG_LEN_08BIT, 0x93}, + {0x6588, CRL_REG_LEN_08BIT, 0x05}, + {0x6589, CRL_REG_LEN_08BIT, 0x06}, + {0x658a, CRL_REG_LEN_08BIT, 0x00}, + {0x658b, CRL_REG_LEN_08BIT, 0x00}, + {0x658c, CRL_REG_LEN_08BIT, 0x05}, + {0x658d, CRL_REG_LEN_08BIT, 0x04}, + {0x658e, CRL_REG_LEN_08BIT, 0x00}, + {0x658f, CRL_REG_LEN_08BIT, 0x04}, + {0x6590, CRL_REG_LEN_08BIT, 0x05}, + {0x6591, CRL_REG_LEN_08BIT, 0x00}, + {0x6592, CRL_REG_LEN_08BIT, 0x05}, + {0x6593, CRL_REG_LEN_08BIT, 0x0a}, + {0x6594, CRL_REG_LEN_08BIT, 0x03}, + {0x6595, CRL_REG_LEN_08BIT, 0x92}, + {0x6596, CRL_REG_LEN_08BIT, 0x05}, + {0x6597, CRL_REG_LEN_08BIT, 0x06}, + {0x6598, CRL_REG_LEN_08BIT, 0x05}, + {0x6599, CRL_REG_LEN_08BIT, 0x04}, + {0x659a, CRL_REG_LEN_08BIT, 0x00}, + {0x659b, CRL_REG_LEN_08BIT, 0x04}, + {0x659c, CRL_REG_LEN_08BIT, 0x05}, + {0x659d, CRL_REG_LEN_08BIT, 0x00}, + {0x659e, CRL_REG_LEN_08BIT, 0x05}, + {0x659f, CRL_REG_LEN_08BIT, 0x0a}, + {0x65a0, CRL_REG_LEN_08BIT, 0x03}, + {0x65a1, CRL_REG_LEN_08BIT, 0x91}, + {0x65a2, CRL_REG_LEN_08BIT, 0x05}, + {0x65a3, CRL_REG_LEN_08BIT, 0x06}, + {0x65a4, CRL_REG_LEN_08BIT, 0x05}, + {0x65a5, CRL_REG_LEN_08BIT, 0x04}, + {0x65a6, CRL_REG_LEN_08BIT, 0x00}, + {0x65a7, CRL_REG_LEN_08BIT, 0x04}, + {0x65a8, CRL_REG_LEN_08BIT, 0x05}, + {0x65a9, CRL_REG_LEN_08BIT, 0x00}, + {0x65aa, CRL_REG_LEN_08BIT, 0x05}, + {0x65ab, CRL_REG_LEN_08BIT, 0x0a}, + {0x65ac, CRL_REG_LEN_08BIT, 0x03}, + {0x65ad, CRL_REG_LEN_08BIT, 0x90}, + {0x65ae, CRL_REG_LEN_08BIT, 0x05}, + {0x65af, CRL_REG_LEN_08BIT, 0x06}, + {0x65b0, CRL_REG_LEN_08BIT, 0x05}, + {0x65b1, CRL_REG_LEN_08BIT, 0x04}, + {0x65b2, CRL_REG_LEN_08BIT, 0x00}, + {0x65b3, CRL_REG_LEN_08BIT, 0x04}, + {0x65b4, CRL_REG_LEN_08BIT, 0x05}, + {0x65b5, CRL_REG_LEN_08BIT, 0x00}, + {0x65b6, CRL_REG_LEN_08BIT, 0x05}, + {0x65b7, CRL_REG_LEN_08BIT, 0x0a}, + {0x65b8, CRL_REG_LEN_08BIT, 0x02}, + {0x65b9, CRL_REG_LEN_08BIT, 0x90}, + {0x65ba, CRL_REG_LEN_08BIT, 0x05}, + {0x65bb, CRL_REG_LEN_08BIT, 0x06}, + {0x65bc, CRL_REG_LEN_08BIT, 0x00}, + {0x65bd, CRL_REG_LEN_08BIT, 0xff}, + {0x65be, CRL_REG_LEN_08BIT, 0x04}, + {0x65bf, CRL_REG_LEN_08BIT, 0x70}, + {0x65c0, CRL_REG_LEN_08BIT, 0x08}, + {0x65c1, CRL_REG_LEN_08BIT, 0x76}, + {0x65c2, CRL_REG_LEN_08BIT, 0x00}, + {0x65c3, CRL_REG_LEN_08BIT, 0xff}, + {0x65c4, CRL_REG_LEN_08BIT, 0x08}, + {0x65c5, CRL_REG_LEN_08BIT, 0x76}, + {0x65c6, CRL_REG_LEN_08BIT, 0x04}, + {0x65c7, CRL_REG_LEN_08BIT, 0x0c}, + {0x65c8, CRL_REG_LEN_08BIT, 0x05}, + {0x65c9, CRL_REG_LEN_08BIT, 0x07}, + {0x65ca, CRL_REG_LEN_08BIT, 0x04}, + {0x65cb, CRL_REG_LEN_08BIT, 0x04}, + {0x65cc, CRL_REG_LEN_08BIT, 0x00}, + {0x65cd, CRL_REG_LEN_08BIT, 0xff}, + {0x65ce, CRL_REG_LEN_08BIT, 0x00}, + {0x65cf, CRL_REG_LEN_08BIT, 0xff}, + {0x65d0, CRL_REG_LEN_08BIT, 0x00}, + {0x65d1, CRL_REG_LEN_08BIT, 0xff}, + {0x303a, CRL_REG_LEN_08BIT, 0x04}, + {0x303b, CRL_REG_LEN_08BIT, 0x7f}, + {0x303c, CRL_REG_LEN_08BIT, 0xfe}, + {0x303d, CRL_REG_LEN_08BIT, 0x19}, + {0x303e, CRL_REG_LEN_08BIT, 0xd7}, + {0x303f, CRL_REG_LEN_08BIT, 0x09}, + {0x3040, CRL_REG_LEN_08BIT, 0x78}, + {0x3042, CRL_REG_LEN_08BIT, 0x05}, + {0x328a, CRL_REG_LEN_08BIT, 0x10}, +}; + +static struct crl_register_write_rep ov10640_1280_1088_LONG_RAW[] = { + {0x328a, CRL_REG_LEN_08BIT, 0x11}, + {0x313f, CRL_REG_LEN_08BIT, 0x80}, + {0x3132, CRL_REG_LEN_08BIT, 0x24}, + {0x3014, CRL_REG_LEN_08BIT, 0x03}, + {0x3023, CRL_REG_LEN_08BIT, 0x05}, + {0x3032, CRL_REG_LEN_08BIT, 0x35}, + {0x3033, CRL_REG_LEN_08BIT, 0x04}, + {0x3054, CRL_REG_LEN_08BIT, 0x00}, + {0x3055, CRL_REG_LEN_08BIT, 0x08}, + {0x3056, CRL_REG_LEN_08BIT, 0x01}, + {0x3057, CRL_REG_LEN_08BIT, 0xff}, + {0x3058, CRL_REG_LEN_08BIT, 0xaf}, + {0x3059, CRL_REG_LEN_08BIT, 0x44}, + {0x305a, CRL_REG_LEN_08BIT, 0x02}, + {0x305b, CRL_REG_LEN_08BIT, 0x00}, + {0x305c, CRL_REG_LEN_08BIT, 0x30}, + {0x305d, CRL_REG_LEN_08BIT, 0x9e}, + {0x305e, CRL_REG_LEN_08BIT, 0x19}, + {0x305f, CRL_REG_LEN_08BIT, 0x18}, + {0x3060, CRL_REG_LEN_08BIT, 0xf9}, + {0x3061, CRL_REG_LEN_08BIT, 0xf0}, + {0x308c, CRL_REG_LEN_08BIT, 0xB3}, + {0x308f, CRL_REG_LEN_08BIT, 0x10}, + {0x3091, CRL_REG_LEN_08BIT, 0x00}, + {0x3093, CRL_REG_LEN_08BIT, 0x01}, + {0x30a3, CRL_REG_LEN_08BIT, 0x08}, + {0x30ad, CRL_REG_LEN_08BIT, 0x03}, + {0x30ae, CRL_REG_LEN_08BIT, 0x80}, + {0x30af, CRL_REG_LEN_08BIT, 0x80}, + {0x30b0, CRL_REG_LEN_08BIT, 0xff}, + {0x30b1, CRL_REG_LEN_08BIT, 0x3f}, + {0x30b2, CRL_REG_LEN_08BIT, 0x22}, + {0x30b9, CRL_REG_LEN_08BIT, 0x22}, + {0x30bb, CRL_REG_LEN_08BIT, 0x00}, + {0x30bc, CRL_REG_LEN_08BIT, 0x00}, + {0x30bd, CRL_REG_LEN_08BIT, 0x00}, + {0x30be, CRL_REG_LEN_08BIT, 0x00}, + {0x30bf, CRL_REG_LEN_08BIT, 0x00}, + {0x30c0, CRL_REG_LEN_08BIT, 0x00}, + {0x30c1, CRL_REG_LEN_08BIT, 0x00}, + {0x30c2, CRL_REG_LEN_08BIT, 0x00}, + {0x30c3, CRL_REG_LEN_08BIT, 0x00}, + {0x30c4, CRL_REG_LEN_08BIT, 0x80}, + {0x30c5, CRL_REG_LEN_08BIT, 0x00}, + {0x30c6, CRL_REG_LEN_08BIT, 0x80}, + {0x30c7, CRL_REG_LEN_08BIT, 0x00}, + {0x30c8, CRL_REG_LEN_08BIT, 0x80}, + {0x3119, CRL_REG_LEN_08BIT, 0x45}, + {0x311a, CRL_REG_LEN_08BIT, 0x01}, + {0x311b, CRL_REG_LEN_08BIT, 0x4a}, + {0x3074, CRL_REG_LEN_08BIT, 0x00}, + {0x3075, CRL_REG_LEN_08BIT, 0x00}, + {0x3076, CRL_REG_LEN_08BIT, 0x00}, + {0x3077, CRL_REG_LEN_08BIT, 0x02}, + {0x3078, CRL_REG_LEN_08BIT, 0x05}, + {0x3079, CRL_REG_LEN_08BIT, 0x07}, + {0x307a, CRL_REG_LEN_08BIT, 0x04}, + {0x307b, CRL_REG_LEN_08BIT, 0x45}, + {0x307c, CRL_REG_LEN_08BIT, 0x05}, + {0x307d, CRL_REG_LEN_08BIT, 0x00}, + {0x307e, CRL_REG_LEN_08BIT, 0x04}, + {0x307f, CRL_REG_LEN_08BIT, 0x40}, + {0x3084, CRL_REG_LEN_08BIT, 0x00}, + {0x3085, CRL_REG_LEN_08BIT, 0x04}, + {0x3086, CRL_REG_LEN_08BIT, 0x00}, + {0x3087, CRL_REG_LEN_08BIT, 0x04}, + {0x3088, CRL_REG_LEN_08BIT, 0x00}, + {0x3089, CRL_REG_LEN_08BIT, 0x40}, + {0x308d, CRL_REG_LEN_08BIT, 0x92}, + {0x3094, CRL_REG_LEN_08BIT, 0xa5}, + {0x30fa, CRL_REG_LEN_08BIT, 0x06}, + {0x3120, CRL_REG_LEN_08BIT, 0x00}, + {0x3121, CRL_REG_LEN_08BIT, 0x01}, + {0x3122, CRL_REG_LEN_08BIT, 0x00}, + {0x3127, CRL_REG_LEN_08BIT, 0x63}, + {0x3128, CRL_REG_LEN_08BIT, 0xc0}, + {0x3129, CRL_REG_LEN_08BIT, 0x00}, + {0x31be, CRL_REG_LEN_08BIT, 0x01}, + {0x30a5, CRL_REG_LEN_08BIT, 0x78}, + {0x30a6, CRL_REG_LEN_08BIT, 0x40}, + {0x30a7, CRL_REG_LEN_08BIT, 0x78}, + {0x30a8, CRL_REG_LEN_08BIT, 0x80}, + {0x30a9, CRL_REG_LEN_08BIT, 0x78}, + {0x30aa, CRL_REG_LEN_08BIT, 0xe0}, + {0x30ab, CRL_REG_LEN_08BIT, 0xf9}, + {0x30ac, CRL_REG_LEN_08BIT, 0xc0}, + {0x3440, CRL_REG_LEN_08BIT, 0x04}, + {0x3444, CRL_REG_LEN_08BIT, 0x28}, + {0x344e, CRL_REG_LEN_08BIT, 0x2c}, + {0x3457, CRL_REG_LEN_08BIT, 0x33}, + {0x345e, CRL_REG_LEN_08BIT, 0x38}, + {0x3461, CRL_REG_LEN_08BIT, 0xa8}, + {0x7002, CRL_REG_LEN_08BIT, 0xaa}, + {0x7001, CRL_REG_LEN_08BIT, 0xdf}, + {0x7048, CRL_REG_LEN_08BIT, 0x00}, + {0x7049, CRL_REG_LEN_08BIT, 0x02}, + {0x704a, CRL_REG_LEN_08BIT, 0x02}, + {0x704b, CRL_REG_LEN_08BIT, 0x00}, + {0x704c, CRL_REG_LEN_08BIT, 0x01}, + {0x704d, CRL_REG_LEN_08BIT, 0x00}, + {0x7043, CRL_REG_LEN_08BIT, 0x04}, + {0x7040, CRL_REG_LEN_08BIT, 0x3c}, + {0x7047, CRL_REG_LEN_08BIT, 0x00}, + {0x7044, CRL_REG_LEN_08BIT, 0x01}, + {0x7000, CRL_REG_LEN_08BIT, 0x1f}, + {0x7084, CRL_REG_LEN_08BIT, 0x01}, + {0x7085, CRL_REG_LEN_08BIT, 0x03}, + {0x7086, CRL_REG_LEN_08BIT, 0x02}, + {0x7087, CRL_REG_LEN_08BIT, 0x40}, + {0x7088, CRL_REG_LEN_08BIT, 0x01}, + {0x7089, CRL_REG_LEN_08BIT, 0x20}, + {0x707f, CRL_REG_LEN_08BIT, 0x04}, + {0x707c, CRL_REG_LEN_08BIT, 0x3c}, + {0x7083, CRL_REG_LEN_08BIT, 0x00}, + {0x7080, CRL_REG_LEN_08BIT, 0x01}, + {0x7003, CRL_REG_LEN_08BIT, 0xdf}, + {0x70c0, CRL_REG_LEN_08BIT, 0x00}, + {0x70c1, CRL_REG_LEN_08BIT, 0x02}, + {0x70c2, CRL_REG_LEN_08BIT, 0x02}, + {0x70c3, CRL_REG_LEN_08BIT, 0x00}, + {0x70c4, CRL_REG_LEN_08BIT, 0x01}, + {0x70c5, CRL_REG_LEN_08BIT, 0x00}, + {0x70b8, CRL_REG_LEN_08BIT, 0x03}, + {0x70b9, CRL_REG_LEN_08BIT, 0x98}, + {0x70bc, CRL_REG_LEN_08BIT, 0x00}, + {0x70bd, CRL_REG_LEN_08BIT, 0x80}, + {0x7004, CRL_REG_LEN_08BIT, 0x02}, + {0x7005, CRL_REG_LEN_08BIT, 0x00}, + {0x7006, CRL_REG_LEN_08BIT, 0x01}, + {0x7007, CRL_REG_LEN_08BIT, 0x80}, + {0x7008, CRL_REG_LEN_08BIT, 0x02}, + {0x7009, CRL_REG_LEN_08BIT, 0x00}, + {0x700a, CRL_REG_LEN_08BIT, 0x04}, + {0x700b, CRL_REG_LEN_08BIT, 0x00}, + {0x700e, CRL_REG_LEN_08BIT, 0x00}, + {0x700f, CRL_REG_LEN_08BIT, 0x60}, + {0x701a, CRL_REG_LEN_08BIT, 0x02}, + {0x701b, CRL_REG_LEN_08BIT, 0x00}, + {0x701c, CRL_REG_LEN_08BIT, 0x01}, + {0x701d, CRL_REG_LEN_08BIT, 0x80}, + {0x701e, CRL_REG_LEN_08BIT, 0x02}, + {0x701f, CRL_REG_LEN_08BIT, 0x00}, + {0x7020, CRL_REG_LEN_08BIT, 0x04}, + {0x7021, CRL_REG_LEN_08BIT, 0x00}, + {0x7024, CRL_REG_LEN_08BIT, 0x00}, + {0x7025, CRL_REG_LEN_08BIT, 0x60}, + {0x70e7, CRL_REG_LEN_08BIT, 0x00}, + {0x70e4, CRL_REG_LEN_08BIT, 0x10}, + {0x70e5, CRL_REG_LEN_08BIT, 0x00}, + {0x70e6, CRL_REG_LEN_08BIT, 0x00}, + {0x70eb, CRL_REG_LEN_08BIT, 0x00}, + {0x70e8, CRL_REG_LEN_08BIT, 0x10}, + {0x70e9, CRL_REG_LEN_08BIT, 0x00}, + {0x70ea, CRL_REG_LEN_08BIT, 0x00}, + {0x70ef, CRL_REG_LEN_08BIT, 0x00}, + {0x70ec, CRL_REG_LEN_08BIT, 0xfd}, + {0x70ed, CRL_REG_LEN_08BIT, 0x00}, + {0x70ee, CRL_REG_LEN_08BIT, 0x00}, + {0x70eb, CRL_REG_LEN_08BIT, 0x00}, + {0x70f0, CRL_REG_LEN_08BIT, 0xfd}, + {0x70f1, CRL_REG_LEN_08BIT, 0x00}, + {0x70f2, CRL_REG_LEN_08BIT, 0x00}, + {0x30fb, CRL_REG_LEN_08BIT, 0x06}, + {0x30fc, CRL_REG_LEN_08BIT, 0x80}, + {0x30fd, CRL_REG_LEN_08BIT, 0x02}, + {0x30fe, CRL_REG_LEN_08BIT, 0x93}, + {0x6000, CRL_REG_LEN_08BIT, 0xc1}, + {0x6001, CRL_REG_LEN_08BIT, 0xb9}, + {0x6002, CRL_REG_LEN_08BIT, 0xba}, + {0x6003, CRL_REG_LEN_08BIT, 0xa4}, + {0x6004, CRL_REG_LEN_08BIT, 0xb5}, + {0x6005, CRL_REG_LEN_08BIT, 0xa0}, + {0x6006, CRL_REG_LEN_08BIT, 0x82}, + {0x6007, CRL_REG_LEN_08BIT, 0xa7}, + {0x6008, CRL_REG_LEN_08BIT, 0xb7}, + {0x6009, CRL_REG_LEN_08BIT, 0x5c}, + {0x600a, CRL_REG_LEN_08BIT, 0x9e}, + {0x600b, CRL_REG_LEN_08BIT, 0xc0}, + {0x600c, CRL_REG_LEN_08BIT, 0xd2}, + {0x600d, CRL_REG_LEN_08BIT, 0x33}, + {0x600e, CRL_REG_LEN_08BIT, 0xcc}, + {0x600f, CRL_REG_LEN_08BIT, 0xe2}, + {0x6010, CRL_REG_LEN_08BIT, 0xc1}, + {0x6011, CRL_REG_LEN_08BIT, 0xab}, + {0x6012, CRL_REG_LEN_08BIT, 0xb7}, + {0x6013, CRL_REG_LEN_08BIT, 0x00}, + {0x6014, CRL_REG_LEN_08BIT, 0x00}, + {0x6015, CRL_REG_LEN_08BIT, 0x00}, + {0x6016, CRL_REG_LEN_08BIT, 0x00}, + {0x6017, CRL_REG_LEN_08BIT, 0x00}, + {0x6018, CRL_REG_LEN_08BIT, 0x00}, + {0x6019, CRL_REG_LEN_08BIT, 0x00}, + {0x601a, CRL_REG_LEN_08BIT, 0x00}, + {0x601b, CRL_REG_LEN_08BIT, 0x00}, + {0x601c, CRL_REG_LEN_08BIT, 0x00}, + {0x601d, CRL_REG_LEN_08BIT, 0x00}, + {0x601e, CRL_REG_LEN_08BIT, 0x9c}, + {0x601f, CRL_REG_LEN_08BIT, 0x94}, + {0x6020, CRL_REG_LEN_08BIT, 0x90}, + {0x6021, CRL_REG_LEN_08BIT, 0xc5}, + {0x6022, CRL_REG_LEN_08BIT, 0x01}, + {0x6023, CRL_REG_LEN_08BIT, 0x54}, + {0x6024, CRL_REG_LEN_08BIT, 0x2a}, + {0x6025, CRL_REG_LEN_08BIT, 0x61}, + {0x6026, CRL_REG_LEN_08BIT, 0xd2}, + {0x6027, CRL_REG_LEN_08BIT, 0xcc}, + {0x6028, CRL_REG_LEN_08BIT, 0x04}, + {0x6029, CRL_REG_LEN_08BIT, 0x35}, + {0x602a, CRL_REG_LEN_08BIT, 0xb1}, + {0x602b, CRL_REG_LEN_08BIT, 0xb2}, + {0x602c, CRL_REG_LEN_08BIT, 0xb3}, + {0x602d, CRL_REG_LEN_08BIT, 0xd2}, + {0x602e, CRL_REG_LEN_08BIT, 0xd3}, + {0x602f, CRL_REG_LEN_08BIT, 0x12}, + {0x6030, CRL_REG_LEN_08BIT, 0x31}, + {0x6031, CRL_REG_LEN_08BIT, 0xcc}, + {0x6032, CRL_REG_LEN_08BIT, 0x06}, + {0x6033, CRL_REG_LEN_08BIT, 0xd2}, + {0x6034, CRL_REG_LEN_08BIT, 0xc4}, + {0x6035, CRL_REG_LEN_08BIT, 0xce}, + {0x6036, CRL_REG_LEN_08BIT, 0x18}, + {0x6037, CRL_REG_LEN_08BIT, 0xcf}, + {0x6038, CRL_REG_LEN_08BIT, 0x1e}, + {0x6039, CRL_REG_LEN_08BIT, 0xd0}, + {0x603a, CRL_REG_LEN_08BIT, 0x24}, + {0x603b, CRL_REG_LEN_08BIT, 0xc5}, + {0x603c, CRL_REG_LEN_08BIT, 0xd2}, + {0x603d, CRL_REG_LEN_08BIT, 0xbc}, + {0x603e, CRL_REG_LEN_08BIT, 0xcc}, + {0x603f, CRL_REG_LEN_08BIT, 0x52}, + {0x6040, CRL_REG_LEN_08BIT, 0x2b}, + {0x6041, CRL_REG_LEN_08BIT, 0xd2}, + {0x6042, CRL_REG_LEN_08BIT, 0xd3}, + {0x6043, CRL_REG_LEN_08BIT, 0x02}, + {0x6044, CRL_REG_LEN_08BIT, 0xcc}, + {0x6045, CRL_REG_LEN_08BIT, 0x0a}, + {0x6046, CRL_REG_LEN_08BIT, 0xd2}, + {0x6047, CRL_REG_LEN_08BIT, 0xd3}, + {0x6048, CRL_REG_LEN_08BIT, 0x0f}, + {0x6049, CRL_REG_LEN_08BIT, 0x1a}, + {0x604a, CRL_REG_LEN_08BIT, 0x2a}, + {0x604b, CRL_REG_LEN_08BIT, 0xd4}, + {0x604c, CRL_REG_LEN_08BIT, 0xf6}, + {0x604d, CRL_REG_LEN_08BIT, 0xba}, + {0x604e, CRL_REG_LEN_08BIT, 0x56}, + {0x604f, CRL_REG_LEN_08BIT, 0xd3}, + {0x6050, CRL_REG_LEN_08BIT, 0x2e}, + {0x6051, CRL_REG_LEN_08BIT, 0x54}, + {0x6052, CRL_REG_LEN_08BIT, 0x26}, + {0x6053, CRL_REG_LEN_08BIT, 0xd2}, + {0x6054, CRL_REG_LEN_08BIT, 0xcc}, + {0x6055, CRL_REG_LEN_08BIT, 0x60}, + {0x6056, CRL_REG_LEN_08BIT, 0xd2}, + {0x6057, CRL_REG_LEN_08BIT, 0xd3}, + {0x6058, CRL_REG_LEN_08BIT, 0x27}, + {0x6059, CRL_REG_LEN_08BIT, 0x27}, + {0x605a, CRL_REG_LEN_08BIT, 0x08}, + {0x605b, CRL_REG_LEN_08BIT, 0x1a}, + {0x605c, CRL_REG_LEN_08BIT, 0xcc}, + {0x605d, CRL_REG_LEN_08BIT, 0x88}, + {0x605e, CRL_REG_LEN_08BIT, 0x00}, + {0x605f, CRL_REG_LEN_08BIT, 0x12}, + {0x6060, CRL_REG_LEN_08BIT, 0x2c}, + {0x6061, CRL_REG_LEN_08BIT, 0x60}, + {0x6062, CRL_REG_LEN_08BIT, 0xc2}, + {0x6063, CRL_REG_LEN_08BIT, 0xb9}, + {0x6064, CRL_REG_LEN_08BIT, 0xa5}, + {0x6065, CRL_REG_LEN_08BIT, 0xb5}, + {0x6066, CRL_REG_LEN_08BIT, 0xa0}, + {0x6067, CRL_REG_LEN_08BIT, 0x82}, + {0x6068, CRL_REG_LEN_08BIT, 0x5c}, + {0x6069, CRL_REG_LEN_08BIT, 0xd4}, + {0x606a, CRL_REG_LEN_08BIT, 0xbe}, + {0x606b, CRL_REG_LEN_08BIT, 0xd4}, + {0x606c, CRL_REG_LEN_08BIT, 0xbe}, + {0x606d, CRL_REG_LEN_08BIT, 0xd3}, + {0x606e, CRL_REG_LEN_08BIT, 0x01}, + {0x606f, CRL_REG_LEN_08BIT, 0x7c}, + {0x6070, CRL_REG_LEN_08BIT, 0x74}, + {0x6071, CRL_REG_LEN_08BIT, 0x00}, + {0x6072, CRL_REG_LEN_08BIT, 0x61}, + {0x6073, CRL_REG_LEN_08BIT, 0x2a}, + {0x6074, CRL_REG_LEN_08BIT, 0xd2}, + {0x6075, CRL_REG_LEN_08BIT, 0xcc}, + {0x6076, CRL_REG_LEN_08BIT, 0xdf}, + {0x6077, CRL_REG_LEN_08BIT, 0xc6}, + {0x6078, CRL_REG_LEN_08BIT, 0x35}, + {0x6079, CRL_REG_LEN_08BIT, 0xd2}, + {0x607a, CRL_REG_LEN_08BIT, 0xcc}, + {0x607b, CRL_REG_LEN_08BIT, 0x06}, + {0x607c, CRL_REG_LEN_08BIT, 0x31}, + {0x607d, CRL_REG_LEN_08BIT, 0xd2}, + {0x607e, CRL_REG_LEN_08BIT, 0xc5}, + {0x607f, CRL_REG_LEN_08BIT, 0xbb}, + {0x6080, CRL_REG_LEN_08BIT, 0xcc}, + {0x6081, CRL_REG_LEN_08BIT, 0x18}, + {0x6082, CRL_REG_LEN_08BIT, 0xc6}, + {0x6083, CRL_REG_LEN_08BIT, 0xd2}, + {0x6084, CRL_REG_LEN_08BIT, 0xbd}, + {0x6085, CRL_REG_LEN_08BIT, 0xcc}, + {0x6086, CRL_REG_LEN_08BIT, 0x52}, + {0x6087, CRL_REG_LEN_08BIT, 0x2b}, + {0x6088, CRL_REG_LEN_08BIT, 0xd2}, + {0x6089, CRL_REG_LEN_08BIT, 0xd3}, + {0x608a, CRL_REG_LEN_08BIT, 0x01}, + {0x608b, CRL_REG_LEN_08BIT, 0xcc}, + {0x608c, CRL_REG_LEN_08BIT, 0x0a}, + {0x608d, CRL_REG_LEN_08BIT, 0xd2}, + {0x608e, CRL_REG_LEN_08BIT, 0xd3}, + {0x608f, CRL_REG_LEN_08BIT, 0x0f}, + {0x6090, CRL_REG_LEN_08BIT, 0x1a}, + {0x6091, CRL_REG_LEN_08BIT, 0x71}, + {0x6092, CRL_REG_LEN_08BIT, 0x2a}, + {0x6093, CRL_REG_LEN_08BIT, 0xd4}, + {0x6094, CRL_REG_LEN_08BIT, 0xf6}, + {0x6095, CRL_REG_LEN_08BIT, 0xd3}, + {0x6096, CRL_REG_LEN_08BIT, 0x22}, + {0x6097, CRL_REG_LEN_08BIT, 0x70}, + {0x6098, CRL_REG_LEN_08BIT, 0xca}, + {0x6099, CRL_REG_LEN_08BIT, 0x26}, + {0x609a, CRL_REG_LEN_08BIT, 0xd2}, + {0x609b, CRL_REG_LEN_08BIT, 0xcc}, + {0x609c, CRL_REG_LEN_08BIT, 0x60}, + {0x609d, CRL_REG_LEN_08BIT, 0xd2}, + {0x609e, CRL_REG_LEN_08BIT, 0xd3}, + {0x609f, CRL_REG_LEN_08BIT, 0x27}, + {0x60a0, CRL_REG_LEN_08BIT, 0x27}, + {0x60a1, CRL_REG_LEN_08BIT, 0x08}, + {0x60a2, CRL_REG_LEN_08BIT, 0x1a}, + {0x60a3, CRL_REG_LEN_08BIT, 0xcc}, + {0x60a4, CRL_REG_LEN_08BIT, 0x88}, + {0x60a5, CRL_REG_LEN_08BIT, 0x12}, + {0x60a6, CRL_REG_LEN_08BIT, 0x2c}, + {0x60a7, CRL_REG_LEN_08BIT, 0x60}, + {0x60a8, CRL_REG_LEN_08BIT, 0x00}, + {0x60a9, CRL_REG_LEN_08BIT, 0x00}, + {0x60aa, CRL_REG_LEN_08BIT, 0xc0}, + {0x60ab, CRL_REG_LEN_08BIT, 0xb9}, + {0x60ac, CRL_REG_LEN_08BIT, 0xa3}, + {0x60ad, CRL_REG_LEN_08BIT, 0xb5}, + {0x60ae, CRL_REG_LEN_08BIT, 0x00}, + {0x60af, CRL_REG_LEN_08BIT, 0xa0}, + {0x60b0, CRL_REG_LEN_08BIT, 0x82}, + {0x60b1, CRL_REG_LEN_08BIT, 0x5c}, + {0x60b2, CRL_REG_LEN_08BIT, 0xd4}, + {0x60b3, CRL_REG_LEN_08BIT, 0xa0}, + {0x60b4, CRL_REG_LEN_08BIT, 0x9d}, + {0x60b5, CRL_REG_LEN_08BIT, 0xd3}, + {0x60b6, CRL_REG_LEN_08BIT, 0x26}, + {0x60b7, CRL_REG_LEN_08BIT, 0xb0}, + {0x60b8, CRL_REG_LEN_08BIT, 0xb7}, + {0x60b9, CRL_REG_LEN_08BIT, 0x00}, + {0x60ba, CRL_REG_LEN_08BIT, 0xd3}, + {0x60bb, CRL_REG_LEN_08BIT, 0x0a}, + {0x60bc, CRL_REG_LEN_08BIT, 0xd3}, + {0x60bd, CRL_REG_LEN_08BIT, 0x10}, + {0x60be, CRL_REG_LEN_08BIT, 0x9c}, + {0x60bf, CRL_REG_LEN_08BIT, 0x94}, + {0x60c0, CRL_REG_LEN_08BIT, 0x90}, + {0x60c1, CRL_REG_LEN_08BIT, 0xc8}, + {0x60c2, CRL_REG_LEN_08BIT, 0xba}, + {0x60c3, CRL_REG_LEN_08BIT, 0x7c}, + {0x60c4, CRL_REG_LEN_08BIT, 0x74}, + {0x60c5, CRL_REG_LEN_08BIT, 0x00}, + {0x60c6, CRL_REG_LEN_08BIT, 0x61}, + {0x60c7, CRL_REG_LEN_08BIT, 0x2a}, + {0x60c8, CRL_REG_LEN_08BIT, 0x00}, + {0x60c9, CRL_REG_LEN_08BIT, 0xd2}, + {0x60ca, CRL_REG_LEN_08BIT, 0xcc}, + {0x60cb, CRL_REG_LEN_08BIT, 0xdf}, + {0x60cc, CRL_REG_LEN_08BIT, 0xc4}, + {0x60cd, CRL_REG_LEN_08BIT, 0x35}, + {0x60ce, CRL_REG_LEN_08BIT, 0xd2}, + {0x60cf, CRL_REG_LEN_08BIT, 0xcc}, + {0x60d0, CRL_REG_LEN_08BIT, 0x06}, + {0x60d1, CRL_REG_LEN_08BIT, 0x31}, + {0x60d2, CRL_REG_LEN_08BIT, 0xd2}, + {0x60d3, CRL_REG_LEN_08BIT, 0xcc}, + {0x60d4, CRL_REG_LEN_08BIT, 0x15}, + {0x60d5, CRL_REG_LEN_08BIT, 0xd2}, + {0x60d6, CRL_REG_LEN_08BIT, 0xbb}, + {0x60d7, CRL_REG_LEN_08BIT, 0xcc}, + {0x60d8, CRL_REG_LEN_08BIT, 0x1a}, + {0x60d9, CRL_REG_LEN_08BIT, 0xd2}, + {0x60da, CRL_REG_LEN_08BIT, 0xbe}, + {0x60db, CRL_REG_LEN_08BIT, 0xce}, + {0x60dc, CRL_REG_LEN_08BIT, 0x52}, + {0x60dd, CRL_REG_LEN_08BIT, 0xcf}, + {0x60de, CRL_REG_LEN_08BIT, 0x56}, + {0x60df, CRL_REG_LEN_08BIT, 0xd0}, + {0x60e0, CRL_REG_LEN_08BIT, 0x5b}, + {0x60e1, CRL_REG_LEN_08BIT, 0x2b}, + {0x60e2, CRL_REG_LEN_08BIT, 0xd2}, + {0x60e3, CRL_REG_LEN_08BIT, 0xd3}, + {0x60e4, CRL_REG_LEN_08BIT, 0x01}, + {0x60e5, CRL_REG_LEN_08BIT, 0xcc}, + {0x60e6, CRL_REG_LEN_08BIT, 0x0a}, + {0x60e7, CRL_REG_LEN_08BIT, 0xd2}, + {0x60e8, CRL_REG_LEN_08BIT, 0xd3}, + {0x60e9, CRL_REG_LEN_08BIT, 0x0f}, + {0x60ea, CRL_REG_LEN_08BIT, 0xd9}, + {0x60eb, CRL_REG_LEN_08BIT, 0xc7}, + {0x60ec, CRL_REG_LEN_08BIT, 0xda}, + {0x60ed, CRL_REG_LEN_08BIT, 0xce}, + {0x60ee, CRL_REG_LEN_08BIT, 0x1a}, + {0x60ef, CRL_REG_LEN_08BIT, 0xd4}, + {0x60f0, CRL_REG_LEN_08BIT, 0xf6}, + {0x60f1, CRL_REG_LEN_08BIT, 0xd4}, + {0x60f2, CRL_REG_LEN_08BIT, 0xa9}, + {0x60f3, CRL_REG_LEN_08BIT, 0x27}, + {0x60f4, CRL_REG_LEN_08BIT, 0x00}, + {0x60f5, CRL_REG_LEN_08BIT, 0xd2}, + {0x60f6, CRL_REG_LEN_08BIT, 0xcc}, + {0x60f7, CRL_REG_LEN_08BIT, 0x60}, + {0x60f8, CRL_REG_LEN_08BIT, 0xd2}, + {0x60f9, CRL_REG_LEN_08BIT, 0xd3}, + {0x60fa, CRL_REG_LEN_08BIT, 0x2d}, + {0x60fb, CRL_REG_LEN_08BIT, 0xd9}, + {0x60fc, CRL_REG_LEN_08BIT, 0xdf}, + {0x60fd, CRL_REG_LEN_08BIT, 0xda}, + {0x60fe, CRL_REG_LEN_08BIT, 0xe5}, + {0x60ff, CRL_REG_LEN_08BIT, 0x1a}, + {0x6100, CRL_REG_LEN_08BIT, 0x12}, + {0x6101, CRL_REG_LEN_08BIT, 0xcc}, + {0x6102, CRL_REG_LEN_08BIT, 0x88}, + {0x6103, CRL_REG_LEN_08BIT, 0xd6}, + {0x6104, CRL_REG_LEN_08BIT, 0xb1}, + {0x6105, CRL_REG_LEN_08BIT, 0xb9}, + {0x6106, CRL_REG_LEN_08BIT, 0xba}, + {0x6107, CRL_REG_LEN_08BIT, 0xaf}, + {0x6108, CRL_REG_LEN_08BIT, 0xdc}, + {0x6109, CRL_REG_LEN_08BIT, 0x00}, + {0x610a, CRL_REG_LEN_08BIT, 0xcb}, + {0x610b, CRL_REG_LEN_08BIT, 0xc3}, + {0x610c, CRL_REG_LEN_08BIT, 0xb9}, + {0x610d, CRL_REG_LEN_08BIT, 0xa4}, + {0x610e, CRL_REG_LEN_08BIT, 0xb5}, + {0x610f, CRL_REG_LEN_08BIT, 0x5c}, + {0x6110, CRL_REG_LEN_08BIT, 0x12}, + {0x6111, CRL_REG_LEN_08BIT, 0x2a}, + {0x6112, CRL_REG_LEN_08BIT, 0x61}, + {0x6113, CRL_REG_LEN_08BIT, 0xd2}, + {0x6114, CRL_REG_LEN_08BIT, 0xcc}, + {0x6115, CRL_REG_LEN_08BIT, 0xdf}, + {0x6116, CRL_REG_LEN_08BIT, 0xc7}, + {0x6117, CRL_REG_LEN_08BIT, 0x35}, + {0x6118, CRL_REG_LEN_08BIT, 0xd2}, + {0x6119, CRL_REG_LEN_08BIT, 0xcc}, + {0x611a, CRL_REG_LEN_08BIT, 0x06}, + {0x611b, CRL_REG_LEN_08BIT, 0x31}, + {0x611c, CRL_REG_LEN_08BIT, 0xc6}, + {0x611d, CRL_REG_LEN_08BIT, 0xbb}, + {0x611e, CRL_REG_LEN_08BIT, 0xd2}, + {0x611f, CRL_REG_LEN_08BIT, 0xcc}, + {0x6120, CRL_REG_LEN_08BIT, 0x18}, + {0x6121, CRL_REG_LEN_08BIT, 0xd2}, + {0x6122, CRL_REG_LEN_08BIT, 0xbe}, + {0x6123, CRL_REG_LEN_08BIT, 0xcc}, + {0x6124, CRL_REG_LEN_08BIT, 0x52}, + {0x6125, CRL_REG_LEN_08BIT, 0xc7}, + {0x6126, CRL_REG_LEN_08BIT, 0xd2}, + {0x6127, CRL_REG_LEN_08BIT, 0xcc}, + {0x6128, CRL_REG_LEN_08BIT, 0x0a}, + {0x6129, CRL_REG_LEN_08BIT, 0xb4}, + {0x612a, CRL_REG_LEN_08BIT, 0xb7}, + {0x612b, CRL_REG_LEN_08BIT, 0x94}, + {0x612c, CRL_REG_LEN_08BIT, 0xd2}, + {0x612d, CRL_REG_LEN_08BIT, 0x12}, + {0x612e, CRL_REG_LEN_08BIT, 0x26}, + {0x612f, CRL_REG_LEN_08BIT, 0x42}, + {0x6130, CRL_REG_LEN_08BIT, 0x46}, + {0x6131, CRL_REG_LEN_08BIT, 0x42}, + {0x6132, CRL_REG_LEN_08BIT, 0xd3}, + {0x6133, CRL_REG_LEN_08BIT, 0x20}, + {0x6134, CRL_REG_LEN_08BIT, 0x27}, + {0x6135, CRL_REG_LEN_08BIT, 0x00}, + {0x6136, CRL_REG_LEN_08BIT, 0x1a}, + {0x6137, CRL_REG_LEN_08BIT, 0xcc}, + {0x6138, CRL_REG_LEN_08BIT, 0x88}, + {0x6139, CRL_REG_LEN_08BIT, 0x60}, + {0x613a, CRL_REG_LEN_08BIT, 0x2c}, + {0x613b, CRL_REG_LEN_08BIT, 0x12}, + {0x613c, CRL_REG_LEN_08BIT, 0x40}, + {0x613d, CRL_REG_LEN_08BIT, 0xb8}, + {0x613e, CRL_REG_LEN_08BIT, 0x90}, + {0x613f, CRL_REG_LEN_08BIT, 0xd5}, + {0x6140, CRL_REG_LEN_08BIT, 0xba}, + {0x6141, CRL_REG_LEN_08BIT, 0x00}, + {0x6142, CRL_REG_LEN_08BIT, 0x00}, + {0x6143, CRL_REG_LEN_08BIT, 0x00}, + {0x6144, CRL_REG_LEN_08BIT, 0x00}, + {0x6145, CRL_REG_LEN_08BIT, 0x00}, + {0x6146, CRL_REG_LEN_08BIT, 0x00}, + {0x6147, CRL_REG_LEN_08BIT, 0xaa}, + {0x6148, CRL_REG_LEN_08BIT, 0xb7}, + {0x6149, CRL_REG_LEN_08BIT, 0x00}, + {0x614a, CRL_REG_LEN_08BIT, 0x00}, + {0x614b, CRL_REG_LEN_08BIT, 0x00}, + {0x614c, CRL_REG_LEN_08BIT, 0x00}, + {0x614d, CRL_REG_LEN_08BIT, 0xa6}, + {0x614e, CRL_REG_LEN_08BIT, 0xb7}, + {0x614f, CRL_REG_LEN_08BIT, 0x00}, + {0x6150, CRL_REG_LEN_08BIT, 0xd5}, + {0x6151, CRL_REG_LEN_08BIT, 0x00}, + {0x6152, CRL_REG_LEN_08BIT, 0x71}, + {0x6153, CRL_REG_LEN_08BIT, 0xd3}, + {0x6154, CRL_REG_LEN_08BIT, 0x30}, + {0x6155, CRL_REG_LEN_08BIT, 0xba}, + {0x6156, CRL_REG_LEN_08BIT, 0x00}, + {0x6157, CRL_REG_LEN_08BIT, 0x00}, + {0x6158, CRL_REG_LEN_08BIT, 0x00}, + {0x6159, CRL_REG_LEN_08BIT, 0x00}, + {0x615a, CRL_REG_LEN_08BIT, 0xd3}, + {0x615b, CRL_REG_LEN_08BIT, 0x10}, + {0x615c, CRL_REG_LEN_08BIT, 0x70}, + {0x615d, CRL_REG_LEN_08BIT, 0x00}, + {0x615e, CRL_REG_LEN_08BIT, 0x00}, + {0x615f, CRL_REG_LEN_08BIT, 0x00}, + {0x6160, CRL_REG_LEN_08BIT, 0x00}, + {0x6161, CRL_REG_LEN_08BIT, 0xd5}, + {0x6162, CRL_REG_LEN_08BIT, 0xba}, + {0x6163, CRL_REG_LEN_08BIT, 0xb0}, + {0x6164, CRL_REG_LEN_08BIT, 0xb7}, + {0x6165, CRL_REG_LEN_08BIT, 0x00}, + {0x6166, CRL_REG_LEN_08BIT, 0x9d}, + {0x6167, CRL_REG_LEN_08BIT, 0xd3}, + {0x6168, CRL_REG_LEN_08BIT, 0x0a}, + {0x6169, CRL_REG_LEN_08BIT, 0x9d}, + {0x616a, CRL_REG_LEN_08BIT, 0x9d}, + {0x616b, CRL_REG_LEN_08BIT, 0xd3}, + {0x616c, CRL_REG_LEN_08BIT, 0x10}, + {0x616d, CRL_REG_LEN_08BIT, 0x9c}, + {0x616e, CRL_REG_LEN_08BIT, 0x94}, + {0x616f, CRL_REG_LEN_08BIT, 0x90}, + {0x6170, CRL_REG_LEN_08BIT, 0xc8}, + {0x6171, CRL_REG_LEN_08BIT, 0xba}, + {0x6172, CRL_REG_LEN_08BIT, 0xd2}, + {0x6173, CRL_REG_LEN_08BIT, 0x60}, + {0x6174, CRL_REG_LEN_08BIT, 0x2c}, + {0x6175, CRL_REG_LEN_08BIT, 0x50}, + {0x6176, CRL_REG_LEN_08BIT, 0x11}, + {0x6177, CRL_REG_LEN_08BIT, 0xcc}, + {0x6178, CRL_REG_LEN_08BIT, 0x00}, + {0x6179, CRL_REG_LEN_08BIT, 0x30}, + {0x617a, CRL_REG_LEN_08BIT, 0xd5}, + {0x617b, CRL_REG_LEN_08BIT, 0x00}, + {0x617c, CRL_REG_LEN_08BIT, 0xba}, + {0x617d, CRL_REG_LEN_08BIT, 0xb0}, + {0x617e, CRL_REG_LEN_08BIT, 0xb7}, + {0x617f, CRL_REG_LEN_08BIT, 0x00}, + {0x6180, CRL_REG_LEN_08BIT, 0x9d}, + {0x6181, CRL_REG_LEN_08BIT, 0xd3}, + {0x6182, CRL_REG_LEN_08BIT, 0x0a}, + {0x6183, CRL_REG_LEN_08BIT, 0x9d}, + {0x6184, CRL_REG_LEN_08BIT, 0x9d}, + {0x6185, CRL_REG_LEN_08BIT, 0xd3}, + {0x6186, CRL_REG_LEN_08BIT, 0x10}, + {0x6187, CRL_REG_LEN_08BIT, 0x9c}, + {0x6188, CRL_REG_LEN_08BIT, 0x94}, + {0x6189, CRL_REG_LEN_08BIT, 0x90}, + {0x618a, CRL_REG_LEN_08BIT, 0xc8}, + {0x618b, CRL_REG_LEN_08BIT, 0xba}, + {0x618c, CRL_REG_LEN_08BIT, 0xd5}, + {0x618d, CRL_REG_LEN_08BIT, 0x00}, + {0x618e, CRL_REG_LEN_08BIT, 0x01}, + {0x618f, CRL_REG_LEN_08BIT, 0x1a}, + {0x6190, CRL_REG_LEN_08BIT, 0xcc}, + {0x6191, CRL_REG_LEN_08BIT, 0x12}, + {0x6192, CRL_REG_LEN_08BIT, 0x12}, + {0x6193, CRL_REG_LEN_08BIT, 0x00}, + {0x6194, CRL_REG_LEN_08BIT, 0xcc}, + {0x6195, CRL_REG_LEN_08BIT, 0x9c}, + {0x6196, CRL_REG_LEN_08BIT, 0xd2}, + {0x6197, CRL_REG_LEN_08BIT, 0xcc}, + {0x6198, CRL_REG_LEN_08BIT, 0x60}, + {0x6199, CRL_REG_LEN_08BIT, 0xd2}, + {0x619a, CRL_REG_LEN_08BIT, 0x04}, + {0x619b, CRL_REG_LEN_08BIT, 0xd5}, + {0x619c, CRL_REG_LEN_08BIT, 0x1a}, + {0x619d, CRL_REG_LEN_08BIT, 0xcc}, + {0x619e, CRL_REG_LEN_08BIT, 0x12}, + {0x619f, CRL_REG_LEN_08BIT, 0x00}, + {0x61a0, CRL_REG_LEN_08BIT, 0x12}, + {0x61a1, CRL_REG_LEN_08BIT, 0xcc}, + {0x61a2, CRL_REG_LEN_08BIT, 0x9c}, + {0x61a3, CRL_REG_LEN_08BIT, 0xd2}, + {0x61a4, CRL_REG_LEN_08BIT, 0xcc}, + {0x61a5, CRL_REG_LEN_08BIT, 0x60}, + {0x61a6, CRL_REG_LEN_08BIT, 0xd2}, + {0x61a7, CRL_REG_LEN_08BIT, 0x1a}, + {0x61a8, CRL_REG_LEN_08BIT, 0xcc}, + {0x61a9, CRL_REG_LEN_08BIT, 0x12}, + {0x61aa, CRL_REG_LEN_08BIT, 0x00}, + {0x61ab, CRL_REG_LEN_08BIT, 0x12}, + {0x61ac, CRL_REG_LEN_08BIT, 0xcc}, + {0x61ad, CRL_REG_LEN_08BIT, 0x9c}, + {0x61ae, CRL_REG_LEN_08BIT, 0xd2}, + {0x61af, CRL_REG_LEN_08BIT, 0xcc}, + {0x61b0, CRL_REG_LEN_08BIT, 0x60}, + {0x61b1, CRL_REG_LEN_08BIT, 0xd2}, + {0x61b2, CRL_REG_LEN_08BIT, 0x1a}, + {0x61b3, CRL_REG_LEN_08BIT, 0xcc}, + {0x61b4, CRL_REG_LEN_08BIT, 0x12}, + {0x61b5, CRL_REG_LEN_08BIT, 0x00}, + {0x61b6, CRL_REG_LEN_08BIT, 0x12}, + {0x61b7, CRL_REG_LEN_08BIT, 0xcc}, + {0x61b8, CRL_REG_LEN_08BIT, 0x9c}, + {0x61b9, CRL_REG_LEN_08BIT, 0xd2}, + {0x61ba, CRL_REG_LEN_08BIT, 0xcc}, + {0x61bb, CRL_REG_LEN_08BIT, 0x60}, + {0x61bc, CRL_REG_LEN_08BIT, 0xd2}, + {0x61bd, CRL_REG_LEN_08BIT, 0xd5}, + {0x61be, CRL_REG_LEN_08BIT, 0x1a}, + {0x61bf, CRL_REG_LEN_08BIT, 0xcc}, + {0x61c0, CRL_REG_LEN_08BIT, 0x12}, + {0x61c1, CRL_REG_LEN_08BIT, 0x12}, + {0x61c2, CRL_REG_LEN_08BIT, 0x00}, + {0x61c3, CRL_REG_LEN_08BIT, 0xcc}, + {0x61c4, CRL_REG_LEN_08BIT, 0x8a}, + {0x61c5, CRL_REG_LEN_08BIT, 0xd2}, + {0x61c6, CRL_REG_LEN_08BIT, 0xcc}, + {0x61c7, CRL_REG_LEN_08BIT, 0x74}, + {0x61c8, CRL_REG_LEN_08BIT, 0xd2}, + {0x61c9, CRL_REG_LEN_08BIT, 0xd5}, + {0x61ca, CRL_REG_LEN_08BIT, 0x1a}, + {0x61cb, CRL_REG_LEN_08BIT, 0xcc}, + {0x61cc, CRL_REG_LEN_08BIT, 0x12}, + {0x61cd, CRL_REG_LEN_08BIT, 0x00}, + {0x61ce, CRL_REG_LEN_08BIT, 0x12}, + {0x61cf, CRL_REG_LEN_08BIT, 0xcc}, + {0x61d0, CRL_REG_LEN_08BIT, 0x8a}, + {0x61d1, CRL_REG_LEN_08BIT, 0xd2}, + {0x61d2, CRL_REG_LEN_08BIT, 0xcc}, + {0x61d3, CRL_REG_LEN_08BIT, 0x74}, + {0x61d4, CRL_REG_LEN_08BIT, 0xd2}, + {0x61d5, CRL_REG_LEN_08BIT, 0x1a}, + {0x61d6, CRL_REG_LEN_08BIT, 0xcc}, + {0x61d7, CRL_REG_LEN_08BIT, 0x12}, + {0x61d8, CRL_REG_LEN_08BIT, 0x00}, + {0x61d9, CRL_REG_LEN_08BIT, 0x12}, + {0x61da, CRL_REG_LEN_08BIT, 0xcc}, + {0x61db, CRL_REG_LEN_08BIT, 0x8a}, + {0x61dc, CRL_REG_LEN_08BIT, 0xd2}, + {0x61dd, CRL_REG_LEN_08BIT, 0xcc}, + {0x61de, CRL_REG_LEN_08BIT, 0x74}, + {0x61df, CRL_REG_LEN_08BIT, 0xd2}, + {0x61e0, CRL_REG_LEN_08BIT, 0x1a}, + {0x61e1, CRL_REG_LEN_08BIT, 0xcc}, + {0x61e2, CRL_REG_LEN_08BIT, 0x12}, + {0x61e3, CRL_REG_LEN_08BIT, 0x00}, + {0x61e4, CRL_REG_LEN_08BIT, 0x12}, + {0x61e5, CRL_REG_LEN_08BIT, 0xcc}, + {0x61e6, CRL_REG_LEN_08BIT, 0x8a}, + {0x61e7, CRL_REG_LEN_08BIT, 0xd2}, + {0x61e8, CRL_REG_LEN_08BIT, 0xcc}, + {0x61e9, CRL_REG_LEN_08BIT, 0x74}, + {0x61ea, CRL_REG_LEN_08BIT, 0xd2}, + {0x61eb, CRL_REG_LEN_08BIT, 0xd5}, + {0x61ec, CRL_REG_LEN_08BIT, 0xcc}, + {0x61ed, CRL_REG_LEN_08BIT, 0x12}, + {0x61ee, CRL_REG_LEN_08BIT, 0x00}, + {0x61ef, CRL_REG_LEN_08BIT, 0x12}, + {0x61f0, CRL_REG_LEN_08BIT, 0xcc}, + {0x61f1, CRL_REG_LEN_08BIT, 0x9c}, + {0x61f2, CRL_REG_LEN_08BIT, 0xd5}, + {0x6400, CRL_REG_LEN_08BIT, 0x04}, + {0x6401, CRL_REG_LEN_08BIT, 0x04}, + {0x6402, CRL_REG_LEN_08BIT, 0x00}, + {0x6403, CRL_REG_LEN_08BIT, 0xff}, + {0x6404, CRL_REG_LEN_08BIT, 0x00}, + {0x6405, CRL_REG_LEN_08BIT, 0x08}, + {0x6406, CRL_REG_LEN_08BIT, 0x00}, + {0x6407, CRL_REG_LEN_08BIT, 0xff}, + {0x6408, CRL_REG_LEN_08BIT, 0x04}, + {0x6409, CRL_REG_LEN_08BIT, 0x70}, + {0x640a, CRL_REG_LEN_08BIT, 0x00}, + {0x640b, CRL_REG_LEN_08BIT, 0xff}, + {0x640c, CRL_REG_LEN_08BIT, 0x05}, + {0x640d, CRL_REG_LEN_08BIT, 0x14}, + {0x640e, CRL_REG_LEN_08BIT, 0x04}, + {0x640f, CRL_REG_LEN_08BIT, 0x71}, + {0x6410, CRL_REG_LEN_08BIT, 0x05}, + {0x6411, CRL_REG_LEN_08BIT, 0x74}, + {0x6412, CRL_REG_LEN_08BIT, 0x00}, + {0x6413, CRL_REG_LEN_08BIT, 0xff}, + {0x6414, CRL_REG_LEN_08BIT, 0x05}, + {0x6415, CRL_REG_LEN_08BIT, 0x54}, + {0x6416, CRL_REG_LEN_08BIT, 0x05}, + {0x6417, CRL_REG_LEN_08BIT, 0x44}, + {0x6418, CRL_REG_LEN_08BIT, 0x04}, + {0x6419, CRL_REG_LEN_08BIT, 0x30}, + {0x641a, CRL_REG_LEN_08BIT, 0x05}, + {0x641b, CRL_REG_LEN_08BIT, 0x46}, + {0x641c, CRL_REG_LEN_08BIT, 0x00}, + {0x641d, CRL_REG_LEN_08BIT, 0xff}, + {0x641e, CRL_REG_LEN_08BIT, 0x04}, + {0x641f, CRL_REG_LEN_08BIT, 0x31}, + {0x6420, CRL_REG_LEN_08BIT, 0x04}, + {0x6421, CRL_REG_LEN_08BIT, 0x30}, + {0x6422, CRL_REG_LEN_08BIT, 0x00}, + {0x6423, CRL_REG_LEN_08BIT, 0xff}, + {0x6424, CRL_REG_LEN_08BIT, 0x04}, + {0x6425, CRL_REG_LEN_08BIT, 0x20}, + {0x6426, CRL_REG_LEN_08BIT, 0x05}, + {0x6427, CRL_REG_LEN_08BIT, 0x06}, + {0x6428, CRL_REG_LEN_08BIT, 0x00}, + {0x6429, CRL_REG_LEN_08BIT, 0xff}, + {0x642a, CRL_REG_LEN_08BIT, 0x08}, + {0x642b, CRL_REG_LEN_08BIT, 0x2a}, + {0x642c, CRL_REG_LEN_08BIT, 0x08}, + {0x642d, CRL_REG_LEN_08BIT, 0x31}, + {0x642e, CRL_REG_LEN_08BIT, 0x00}, + {0x642f, CRL_REG_LEN_08BIT, 0xff}, + {0x6430, CRL_REG_LEN_08BIT, 0x08}, + {0x6431, CRL_REG_LEN_08BIT, 0x2a}, + {0x6432, CRL_REG_LEN_08BIT, 0x08}, + {0x6433, CRL_REG_LEN_08BIT, 0x31}, + {0x6434, CRL_REG_LEN_08BIT, 0x06}, + {0x6435, CRL_REG_LEN_08BIT, 0x20}, + {0x6436, CRL_REG_LEN_08BIT, 0x07}, + {0x6437, CRL_REG_LEN_08BIT, 0x00}, + {0x6438, CRL_REG_LEN_08BIT, 0x08}, + {0x6439, CRL_REG_LEN_08BIT, 0x40}, + {0x643a, CRL_REG_LEN_08BIT, 0x00}, + {0x643b, CRL_REG_LEN_08BIT, 0xff}, + {0x643c, CRL_REG_LEN_08BIT, 0x08}, + {0x643d, CRL_REG_LEN_08BIT, 0x2a}, + {0x643e, CRL_REG_LEN_08BIT, 0x08}, + {0x643f, CRL_REG_LEN_08BIT, 0x36}, + {0x6440, CRL_REG_LEN_08BIT, 0x06}, + {0x6441, CRL_REG_LEN_08BIT, 0x10}, + {0x6442, CRL_REG_LEN_08BIT, 0x07}, + {0x6443, CRL_REG_LEN_08BIT, 0x00}, + {0x6444, CRL_REG_LEN_08BIT, 0x08}, + {0x6445, CRL_REG_LEN_08BIT, 0x40}, + {0x6446, CRL_REG_LEN_08BIT, 0x00}, + {0x6447, CRL_REG_LEN_08BIT, 0xff}, + {0x6448, CRL_REG_LEN_08BIT, 0x08}, + {0x6449, CRL_REG_LEN_08BIT, 0x2a}, + {0x644a, CRL_REG_LEN_08BIT, 0x08}, + {0x644b, CRL_REG_LEN_08BIT, 0x3b}, + {0x644c, CRL_REG_LEN_08BIT, 0x06}, + {0x644d, CRL_REG_LEN_08BIT, 0x00}, + {0x644e, CRL_REG_LEN_08BIT, 0x07}, + {0x644f, CRL_REG_LEN_08BIT, 0x00}, + {0x6450, CRL_REG_LEN_08BIT, 0x08}, + {0x6451, CRL_REG_LEN_08BIT, 0x40}, + {0x6452, CRL_REG_LEN_08BIT, 0x00}, + {0x6453, CRL_REG_LEN_08BIT, 0xff}, + {0x6454, CRL_REG_LEN_08BIT, 0x06}, + {0x6455, CRL_REG_LEN_08BIT, 0x00}, + {0x6456, CRL_REG_LEN_08BIT, 0x07}, + {0x6457, CRL_REG_LEN_08BIT, 0x05}, + {0x6458, CRL_REG_LEN_08BIT, 0x01}, + {0x6459, CRL_REG_LEN_08BIT, 0xaf}, + {0x645a, CRL_REG_LEN_08BIT, 0x01}, + {0x645b, CRL_REG_LEN_08BIT, 0x0f}, + {0x645c, CRL_REG_LEN_08BIT, 0x01}, + {0x645d, CRL_REG_LEN_08BIT, 0x90}, + {0x645e, CRL_REG_LEN_08BIT, 0x01}, + {0x645f, CRL_REG_LEN_08BIT, 0xc8}, + {0x6460, CRL_REG_LEN_08BIT, 0x00}, + {0x6461, CRL_REG_LEN_08BIT, 0xff}, + {0x6462, CRL_REG_LEN_08BIT, 0x01}, + {0x6463, CRL_REG_LEN_08BIT, 0xac}, + {0x6464, CRL_REG_LEN_08BIT, 0x01}, + {0x6465, CRL_REG_LEN_08BIT, 0x0c}, + {0x6466, CRL_REG_LEN_08BIT, 0x01}, + {0x6467, CRL_REG_LEN_08BIT, 0x90}, + {0x6468, CRL_REG_LEN_08BIT, 0x01}, + {0x6469, CRL_REG_LEN_08BIT, 0xe8}, + {0x646a, CRL_REG_LEN_08BIT, 0x00}, + {0x646b, CRL_REG_LEN_08BIT, 0xff}, + {0x646c, CRL_REG_LEN_08BIT, 0x01}, + {0x646d, CRL_REG_LEN_08BIT, 0xad}, + {0x646e, CRL_REG_LEN_08BIT, 0x01}, + {0x646f, CRL_REG_LEN_08BIT, 0x0d}, + {0x6470, CRL_REG_LEN_08BIT, 0x01}, + {0x6471, CRL_REG_LEN_08BIT, 0x90}, + {0x6472, CRL_REG_LEN_08BIT, 0x01}, + {0x6473, CRL_REG_LEN_08BIT, 0xe8}, + {0x6474, CRL_REG_LEN_08BIT, 0x00}, + {0x6475, CRL_REG_LEN_08BIT, 0xff}, + {0x6476, CRL_REG_LEN_08BIT, 0x01}, + {0x6477, CRL_REG_LEN_08BIT, 0xae}, + {0x6478, CRL_REG_LEN_08BIT, 0x01}, + {0x6479, CRL_REG_LEN_08BIT, 0x0e}, + {0x647a, CRL_REG_LEN_08BIT, 0x01}, + {0x647b, CRL_REG_LEN_08BIT, 0x90}, + {0x647c, CRL_REG_LEN_08BIT, 0x01}, + {0x647d, CRL_REG_LEN_08BIT, 0xe8}, + {0x647e, CRL_REG_LEN_08BIT, 0x00}, + {0x647f, CRL_REG_LEN_08BIT, 0xff}, + {0x6480, CRL_REG_LEN_08BIT, 0x01}, + {0x6481, CRL_REG_LEN_08BIT, 0xb0}, + {0x6482, CRL_REG_LEN_08BIT, 0x01}, + {0x6483, CRL_REG_LEN_08BIT, 0xb1}, + {0x6484, CRL_REG_LEN_08BIT, 0x01}, + {0x6485, CRL_REG_LEN_08BIT, 0xb2}, + {0x6486, CRL_REG_LEN_08BIT, 0x01}, + {0x6487, CRL_REG_LEN_08BIT, 0xb3}, + {0x6488, CRL_REG_LEN_08BIT, 0x01}, + {0x6489, CRL_REG_LEN_08BIT, 0xb4}, + {0x648a, CRL_REG_LEN_08BIT, 0x01}, + {0x648b, CRL_REG_LEN_08BIT, 0xb5}, + {0x648c, CRL_REG_LEN_08BIT, 0x01}, + {0x648d, CRL_REG_LEN_08BIT, 0xb6}, + {0x648e, CRL_REG_LEN_08BIT, 0x01}, + {0x648f, CRL_REG_LEN_08BIT, 0xb7}, + {0x6490, CRL_REG_LEN_08BIT, 0x01}, + {0x6491, CRL_REG_LEN_08BIT, 0xb8}, + {0x6492, CRL_REG_LEN_08BIT, 0x01}, + {0x6493, CRL_REG_LEN_08BIT, 0xb9}, + {0x6494, CRL_REG_LEN_08BIT, 0x01}, + {0x6495, CRL_REG_LEN_08BIT, 0xba}, + {0x6496, CRL_REG_LEN_08BIT, 0x01}, + {0x6497, CRL_REG_LEN_08BIT, 0xbb}, + {0x6498, CRL_REG_LEN_08BIT, 0x01}, + {0x6499, CRL_REG_LEN_08BIT, 0xbc}, + {0x649a, CRL_REG_LEN_08BIT, 0x01}, + {0x649b, CRL_REG_LEN_08BIT, 0xbd}, + {0x649c, CRL_REG_LEN_08BIT, 0x01}, + {0x649d, CRL_REG_LEN_08BIT, 0xbe}, + {0x649e, CRL_REG_LEN_08BIT, 0x01}, + {0x649f, CRL_REG_LEN_08BIT, 0xbf}, + {0x64a0, CRL_REG_LEN_08BIT, 0x01}, + {0x64a1, CRL_REG_LEN_08BIT, 0xc0}, + {0x64a2, CRL_REG_LEN_08BIT, 0x00}, + {0x64a3, CRL_REG_LEN_08BIT, 0xff}, + {0x64a4, CRL_REG_LEN_08BIT, 0x06}, + {0x64a5, CRL_REG_LEN_08BIT, 0x00}, + {0x64a6, CRL_REG_LEN_08BIT, 0x01}, + {0x64a7, CRL_REG_LEN_08BIT, 0xf6}, + {0x64a8, CRL_REG_LEN_08BIT, 0x04}, + {0x64a9, CRL_REG_LEN_08BIT, 0x30}, + {0x64aa, CRL_REG_LEN_08BIT, 0x00}, + {0x64ab, CRL_REG_LEN_08BIT, 0xff}, + {0x64ac, CRL_REG_LEN_08BIT, 0x06}, + {0x64ad, CRL_REG_LEN_08BIT, 0x10}, + {0x64ae, CRL_REG_LEN_08BIT, 0x01}, + {0x64af, CRL_REG_LEN_08BIT, 0xf6}, + {0x64b0, CRL_REG_LEN_08BIT, 0x04}, + {0x64b1, CRL_REG_LEN_08BIT, 0x30}, + {0x64b2, CRL_REG_LEN_08BIT, 0x06}, + {0x64b3, CRL_REG_LEN_08BIT, 0x00}, + {0x64b4, CRL_REG_LEN_08BIT, 0x00}, + {0x64b5, CRL_REG_LEN_08BIT, 0xff}, + {0x64b6, CRL_REG_LEN_08BIT, 0x06}, + {0x64b7, CRL_REG_LEN_08BIT, 0x20}, + {0x64b8, CRL_REG_LEN_08BIT, 0x01}, + {0x64b9, CRL_REG_LEN_08BIT, 0xf6}, + {0x64ba, CRL_REG_LEN_08BIT, 0x04}, + {0x64bb, CRL_REG_LEN_08BIT, 0x30}, + {0x64bc, CRL_REG_LEN_08BIT, 0x06}, + {0x64bd, CRL_REG_LEN_08BIT, 0x00}, + {0x64be, CRL_REG_LEN_08BIT, 0x00}, + {0x64bf, CRL_REG_LEN_08BIT, 0xff}, + {0x64c0, CRL_REG_LEN_08BIT, 0x04}, + {0x64c1, CRL_REG_LEN_08BIT, 0x31}, + {0x64c2, CRL_REG_LEN_08BIT, 0x04}, + {0x64c3, CRL_REG_LEN_08BIT, 0x30}, + {0x64c4, CRL_REG_LEN_08BIT, 0x01}, + {0x64c5, CRL_REG_LEN_08BIT, 0x20}, + {0x64c6, CRL_REG_LEN_08BIT, 0x01}, + {0x64c7, CRL_REG_LEN_08BIT, 0x31}, + {0x64c8, CRL_REG_LEN_08BIT, 0x01}, + {0x64c9, CRL_REG_LEN_08BIT, 0x32}, + {0x64ca, CRL_REG_LEN_08BIT, 0x01}, + {0x64cb, CRL_REG_LEN_08BIT, 0x33}, + {0x64cc, CRL_REG_LEN_08BIT, 0x01}, + {0x64cd, CRL_REG_LEN_08BIT, 0x34}, + {0x64ce, CRL_REG_LEN_08BIT, 0x01}, + {0x64cf, CRL_REG_LEN_08BIT, 0x35}, + {0x64d0, CRL_REG_LEN_08BIT, 0x01}, + {0x64d1, CRL_REG_LEN_08BIT, 0x36}, + {0x64d2, CRL_REG_LEN_08BIT, 0x01}, + {0x64d3, CRL_REG_LEN_08BIT, 0x37}, + {0x64d4, CRL_REG_LEN_08BIT, 0x01}, + {0x64d5, CRL_REG_LEN_08BIT, 0x38}, + {0x64d6, CRL_REG_LEN_08BIT, 0x01}, + {0x64d7, CRL_REG_LEN_08BIT, 0x39}, + {0x64d8, CRL_REG_LEN_08BIT, 0x01}, + {0x64d9, CRL_REG_LEN_08BIT, 0x3a}, + {0x64da, CRL_REG_LEN_08BIT, 0x01}, + {0x64db, CRL_REG_LEN_08BIT, 0x3b}, + {0x64dc, CRL_REG_LEN_08BIT, 0x01}, + {0x64dd, CRL_REG_LEN_08BIT, 0x3c}, + {0x64de, CRL_REG_LEN_08BIT, 0x01}, + {0x64df, CRL_REG_LEN_08BIT, 0x3d}, + {0x64e0, CRL_REG_LEN_08BIT, 0x01}, + {0x64e1, CRL_REG_LEN_08BIT, 0x3e}, + {0x64e2, CRL_REG_LEN_08BIT, 0x01}, + {0x64e3, CRL_REG_LEN_08BIT, 0x3f}, + {0x64e4, CRL_REG_LEN_08BIT, 0x02}, + {0x64e5, CRL_REG_LEN_08BIT, 0xa0}, + {0x64e6, CRL_REG_LEN_08BIT, 0x00}, + {0x64e7, CRL_REG_LEN_08BIT, 0xff}, + {0x64e8, CRL_REG_LEN_08BIT, 0x04}, + {0x64e9, CRL_REG_LEN_08BIT, 0x31}, + {0x64ea, CRL_REG_LEN_08BIT, 0x04}, + {0x64eb, CRL_REG_LEN_08BIT, 0x30}, + {0x64ec, CRL_REG_LEN_08BIT, 0x01}, + {0x64ed, CRL_REG_LEN_08BIT, 0x00}, + {0x64ee, CRL_REG_LEN_08BIT, 0x01}, + {0x64ef, CRL_REG_LEN_08BIT, 0x11}, + {0x64f0, CRL_REG_LEN_08BIT, 0x01}, + {0x64f1, CRL_REG_LEN_08BIT, 0x12}, + {0x64f2, CRL_REG_LEN_08BIT, 0x01}, + {0x64f3, CRL_REG_LEN_08BIT, 0x13}, + {0x64f4, CRL_REG_LEN_08BIT, 0x01}, + {0x64f5, CRL_REG_LEN_08BIT, 0x14}, + {0x64f6, CRL_REG_LEN_08BIT, 0x01}, + {0x64f7, CRL_REG_LEN_08BIT, 0x15}, + {0x64f8, CRL_REG_LEN_08BIT, 0x01}, + {0x64f9, CRL_REG_LEN_08BIT, 0x16}, + {0x64fa, CRL_REG_LEN_08BIT, 0x01}, + {0x64fb, CRL_REG_LEN_08BIT, 0x17}, + {0x64fc, CRL_REG_LEN_08BIT, 0x01}, + {0x64fd, CRL_REG_LEN_08BIT, 0x18}, + {0x64fe, CRL_REG_LEN_08BIT, 0x01}, + {0x64ff, CRL_REG_LEN_08BIT, 0x19}, + {0x6500, CRL_REG_LEN_08BIT, 0x01}, + {0x6501, CRL_REG_LEN_08BIT, 0x1a}, + {0x6502, CRL_REG_LEN_08BIT, 0x01}, + {0x6503, CRL_REG_LEN_08BIT, 0x1b}, + {0x6504, CRL_REG_LEN_08BIT, 0x01}, + {0x6505, CRL_REG_LEN_08BIT, 0x1c}, + {0x6506, CRL_REG_LEN_08BIT, 0x01}, + {0x6507, CRL_REG_LEN_08BIT, 0x1d}, + {0x6508, CRL_REG_LEN_08BIT, 0x01}, + {0x6509, CRL_REG_LEN_08BIT, 0x1e}, + {0x650a, CRL_REG_LEN_08BIT, 0x01}, + {0x650b, CRL_REG_LEN_08BIT, 0x1f}, + {0x650c, CRL_REG_LEN_08BIT, 0x02}, + {0x650d, CRL_REG_LEN_08BIT, 0xa0}, + {0x650e, CRL_REG_LEN_08BIT, 0x00}, + {0x650f, CRL_REG_LEN_08BIT, 0xff}, + {0x6510, CRL_REG_LEN_08BIT, 0x04}, + {0x6511, CRL_REG_LEN_08BIT, 0x20}, + {0x6512, CRL_REG_LEN_08BIT, 0x05}, + {0x6513, CRL_REG_LEN_08BIT, 0x86}, + {0x6514, CRL_REG_LEN_08BIT, 0x03}, + {0x6515, CRL_REG_LEN_08BIT, 0x0b}, + {0x6516, CRL_REG_LEN_08BIT, 0x05}, + {0x6517, CRL_REG_LEN_08BIT, 0x86}, + {0x6518, CRL_REG_LEN_08BIT, 0x00}, + {0x6519, CRL_REG_LEN_08BIT, 0x00}, + {0x651a, CRL_REG_LEN_08BIT, 0x05}, + {0x651b, CRL_REG_LEN_08BIT, 0x06}, + {0x651c, CRL_REG_LEN_08BIT, 0x00}, + {0x651d, CRL_REG_LEN_08BIT, 0x04}, + {0x651e, CRL_REG_LEN_08BIT, 0x05}, + {0x651f, CRL_REG_LEN_08BIT, 0x04}, + {0x6520, CRL_REG_LEN_08BIT, 0x00}, + {0x6521, CRL_REG_LEN_08BIT, 0x04}, + {0x6522, CRL_REG_LEN_08BIT, 0x05}, + {0x6523, CRL_REG_LEN_08BIT, 0x00}, + {0x6524, CRL_REG_LEN_08BIT, 0x05}, + {0x6525, CRL_REG_LEN_08BIT, 0x0a}, + {0x6526, CRL_REG_LEN_08BIT, 0x03}, + {0x6527, CRL_REG_LEN_08BIT, 0x9a}, + {0x6528, CRL_REG_LEN_08BIT, 0x05}, + {0x6529, CRL_REG_LEN_08BIT, 0x86}, + {0x652a, CRL_REG_LEN_08BIT, 0x00}, + {0x652b, CRL_REG_LEN_08BIT, 0x00}, + {0x652c, CRL_REG_LEN_08BIT, 0x05}, + {0x652d, CRL_REG_LEN_08BIT, 0x06}, + {0x652e, CRL_REG_LEN_08BIT, 0x00}, + {0x652f, CRL_REG_LEN_08BIT, 0x01}, + {0x6530, CRL_REG_LEN_08BIT, 0x05}, + {0x6531, CRL_REG_LEN_08BIT, 0x04}, + {0x6532, CRL_REG_LEN_08BIT, 0x00}, + {0x6533, CRL_REG_LEN_08BIT, 0x04}, + {0x6534, CRL_REG_LEN_08BIT, 0x05}, + {0x6535, CRL_REG_LEN_08BIT, 0x00}, + {0x6536, CRL_REG_LEN_08BIT, 0x05}, + {0x6537, CRL_REG_LEN_08BIT, 0x0a}, + {0x6538, CRL_REG_LEN_08BIT, 0x03}, + {0x6539, CRL_REG_LEN_08BIT, 0x99}, + {0x653a, CRL_REG_LEN_08BIT, 0x05}, + {0x653b, CRL_REG_LEN_08BIT, 0x06}, + {0x653c, CRL_REG_LEN_08BIT, 0x00}, + {0x653d, CRL_REG_LEN_08BIT, 0x00}, + {0x653e, CRL_REG_LEN_08BIT, 0x05}, + {0x653f, CRL_REG_LEN_08BIT, 0x04}, + {0x6540, CRL_REG_LEN_08BIT, 0x00}, + {0x6541, CRL_REG_LEN_08BIT, 0x04}, + {0x6542, CRL_REG_LEN_08BIT, 0x05}, + {0x6543, CRL_REG_LEN_08BIT, 0x00}, + {0x6544, CRL_REG_LEN_08BIT, 0x05}, + {0x6545, CRL_REG_LEN_08BIT, 0x0a}, + {0x6546, CRL_REG_LEN_08BIT, 0x03}, + {0x6547, CRL_REG_LEN_08BIT, 0x98}, + {0x6548, CRL_REG_LEN_08BIT, 0x05}, + {0x6549, CRL_REG_LEN_08BIT, 0x06}, + {0x654a, CRL_REG_LEN_08BIT, 0x00}, + {0x654b, CRL_REG_LEN_08BIT, 0x00}, + {0x654c, CRL_REG_LEN_08BIT, 0x05}, + {0x654d, CRL_REG_LEN_08BIT, 0x04}, + {0x654e, CRL_REG_LEN_08BIT, 0x00}, + {0x654f, CRL_REG_LEN_08BIT, 0x04}, + {0x6550, CRL_REG_LEN_08BIT, 0x05}, + {0x6551, CRL_REG_LEN_08BIT, 0x00}, + {0x6552, CRL_REG_LEN_08BIT, 0x05}, + {0x6553, CRL_REG_LEN_08BIT, 0x0a}, + {0x6554, CRL_REG_LEN_08BIT, 0x03}, + {0x6555, CRL_REG_LEN_08BIT, 0x97}, + {0x6556, CRL_REG_LEN_08BIT, 0x05}, + {0x6557, CRL_REG_LEN_08BIT, 0x06}, + {0x6558, CRL_REG_LEN_08BIT, 0x05}, + {0x6559, CRL_REG_LEN_08BIT, 0x04}, + {0x655a, CRL_REG_LEN_08BIT, 0x00}, + {0x655b, CRL_REG_LEN_08BIT, 0x04}, + {0x655c, CRL_REG_LEN_08BIT, 0x05}, + {0x655d, CRL_REG_LEN_08BIT, 0x00}, + {0x655e, CRL_REG_LEN_08BIT, 0x05}, + {0x655f, CRL_REG_LEN_08BIT, 0x0a}, + {0x6560, CRL_REG_LEN_08BIT, 0x03}, + {0x6561, CRL_REG_LEN_08BIT, 0x96}, + {0x6562, CRL_REG_LEN_08BIT, 0x05}, + {0x6563, CRL_REG_LEN_08BIT, 0x06}, + {0x6564, CRL_REG_LEN_08BIT, 0x05}, + {0x6565, CRL_REG_LEN_08BIT, 0x04}, + {0x6566, CRL_REG_LEN_08BIT, 0x00}, + {0x6567, CRL_REG_LEN_08BIT, 0x04}, + {0x6568, CRL_REG_LEN_08BIT, 0x05}, + {0x6569, CRL_REG_LEN_08BIT, 0x00}, + {0x656a, CRL_REG_LEN_08BIT, 0x05}, + {0x656b, CRL_REG_LEN_08BIT, 0x0a}, + {0x656c, CRL_REG_LEN_08BIT, 0x03}, + {0x656d, CRL_REG_LEN_08BIT, 0x95}, + {0x656e, CRL_REG_LEN_08BIT, 0x05}, + {0x656f, CRL_REG_LEN_08BIT, 0x06}, + {0x6570, CRL_REG_LEN_08BIT, 0x05}, + {0x6571, CRL_REG_LEN_08BIT, 0x04}, + {0x6572, CRL_REG_LEN_08BIT, 0x00}, + {0x6573, CRL_REG_LEN_08BIT, 0x04}, + {0x6574, CRL_REG_LEN_08BIT, 0x05}, + {0x6575, CRL_REG_LEN_08BIT, 0x00}, + {0x6576, CRL_REG_LEN_08BIT, 0x05}, + {0x6577, CRL_REG_LEN_08BIT, 0x0a}, + {0x6578, CRL_REG_LEN_08BIT, 0x03}, + {0x6579, CRL_REG_LEN_08BIT, 0x94}, + {0x657a, CRL_REG_LEN_08BIT, 0x05}, + {0x657b, CRL_REG_LEN_08BIT, 0x06}, + {0x657c, CRL_REG_LEN_08BIT, 0x00}, + {0x657d, CRL_REG_LEN_08BIT, 0x00}, + {0x657e, CRL_REG_LEN_08BIT, 0x05}, + {0x657f, CRL_REG_LEN_08BIT, 0x04}, + {0x6580, CRL_REG_LEN_08BIT, 0x00}, + {0x6581, CRL_REG_LEN_08BIT, 0x04}, + {0x6582, CRL_REG_LEN_08BIT, 0x05}, + {0x6583, CRL_REG_LEN_08BIT, 0x00}, + {0x6584, CRL_REG_LEN_08BIT, 0x05}, + {0x6585, CRL_REG_LEN_08BIT, 0x0a}, + {0x6586, CRL_REG_LEN_08BIT, 0x03}, + {0x6587, CRL_REG_LEN_08BIT, 0x93}, + {0x6588, CRL_REG_LEN_08BIT, 0x05}, + {0x6589, CRL_REG_LEN_08BIT, 0x06}, + {0x658a, CRL_REG_LEN_08BIT, 0x00}, + {0x658b, CRL_REG_LEN_08BIT, 0x00}, + {0x658c, CRL_REG_LEN_08BIT, 0x05}, + {0x658d, CRL_REG_LEN_08BIT, 0x04}, + {0x658e, CRL_REG_LEN_08BIT, 0x00}, + {0x658f, CRL_REG_LEN_08BIT, 0x04}, + {0x6590, CRL_REG_LEN_08BIT, 0x05}, + {0x6591, CRL_REG_LEN_08BIT, 0x00}, + {0x6592, CRL_REG_LEN_08BIT, 0x05}, + {0x6593, CRL_REG_LEN_08BIT, 0x0a}, + {0x6594, CRL_REG_LEN_08BIT, 0x03}, + {0x6595, CRL_REG_LEN_08BIT, 0x92}, + {0x6596, CRL_REG_LEN_08BIT, 0x05}, + {0x6597, CRL_REG_LEN_08BIT, 0x06}, + {0x6598, CRL_REG_LEN_08BIT, 0x05}, + {0x6599, CRL_REG_LEN_08BIT, 0x04}, + {0x659a, CRL_REG_LEN_08BIT, 0x00}, + {0x659b, CRL_REG_LEN_08BIT, 0x04}, + {0x659c, CRL_REG_LEN_08BIT, 0x05}, + {0x659d, CRL_REG_LEN_08BIT, 0x00}, + {0x659e, CRL_REG_LEN_08BIT, 0x05}, + {0x659f, CRL_REG_LEN_08BIT, 0x0a}, + {0x65a0, CRL_REG_LEN_08BIT, 0x03}, + {0x65a1, CRL_REG_LEN_08BIT, 0x91}, + {0x65a2, CRL_REG_LEN_08BIT, 0x05}, + {0x65a3, CRL_REG_LEN_08BIT, 0x06}, + {0x65a4, CRL_REG_LEN_08BIT, 0x05}, + {0x65a5, CRL_REG_LEN_08BIT, 0x04}, + {0x65a6, CRL_REG_LEN_08BIT, 0x00}, + {0x65a7, CRL_REG_LEN_08BIT, 0x04}, + {0x65a8, CRL_REG_LEN_08BIT, 0x05}, + {0x65a9, CRL_REG_LEN_08BIT, 0x00}, + {0x65aa, CRL_REG_LEN_08BIT, 0x05}, + {0x65ab, CRL_REG_LEN_08BIT, 0x0a}, + {0x65ac, CRL_REG_LEN_08BIT, 0x03}, + {0x65ad, CRL_REG_LEN_08BIT, 0x90}, + {0x65ae, CRL_REG_LEN_08BIT, 0x05}, + {0x65af, CRL_REG_LEN_08BIT, 0x06}, + {0x65b0, CRL_REG_LEN_08BIT, 0x05}, + {0x65b1, CRL_REG_LEN_08BIT, 0x04}, + {0x65b2, CRL_REG_LEN_08BIT, 0x00}, + {0x65b3, CRL_REG_LEN_08BIT, 0x04}, + {0x65b4, CRL_REG_LEN_08BIT, 0x05}, + {0x65b5, CRL_REG_LEN_08BIT, 0x00}, + {0x65b6, CRL_REG_LEN_08BIT, 0x05}, + {0x65b7, CRL_REG_LEN_08BIT, 0x0a}, + {0x65b8, CRL_REG_LEN_08BIT, 0x02}, + {0x65b9, CRL_REG_LEN_08BIT, 0x90}, + {0x65ba, CRL_REG_LEN_08BIT, 0x05}, + {0x65bb, CRL_REG_LEN_08BIT, 0x06}, + {0x65bc, CRL_REG_LEN_08BIT, 0x00}, + {0x65bd, CRL_REG_LEN_08BIT, 0xff}, + {0x65be, CRL_REG_LEN_08BIT, 0x04}, + {0x65bf, CRL_REG_LEN_08BIT, 0x70}, + {0x65c0, CRL_REG_LEN_08BIT, 0x08}, + {0x65c1, CRL_REG_LEN_08BIT, 0x76}, + {0x65c2, CRL_REG_LEN_08BIT, 0x00}, + {0x65c3, CRL_REG_LEN_08BIT, 0xff}, + {0x65c4, CRL_REG_LEN_08BIT, 0x08}, + {0x65c5, CRL_REG_LEN_08BIT, 0x76}, + {0x65c6, CRL_REG_LEN_08BIT, 0x04}, + {0x65c7, CRL_REG_LEN_08BIT, 0x0c}, + {0x65c8, CRL_REG_LEN_08BIT, 0x05}, + {0x65c9, CRL_REG_LEN_08BIT, 0x07}, + {0x65ca, CRL_REG_LEN_08BIT, 0x04}, + {0x65cb, CRL_REG_LEN_08BIT, 0x04}, + {0x65cc, CRL_REG_LEN_08BIT, 0x00}, + {0x65cd, CRL_REG_LEN_08BIT, 0xff}, + {0x65ce, CRL_REG_LEN_08BIT, 0x00}, + {0x65cf, CRL_REG_LEN_08BIT, 0xff}, + {0x65d0, CRL_REG_LEN_08BIT, 0x00}, + {0x65d1, CRL_REG_LEN_08BIT, 0xff}, + {0x303a, CRL_REG_LEN_08BIT, 0x04}, + {0x303b, CRL_REG_LEN_08BIT, 0x7f}, + {0x303c, CRL_REG_LEN_08BIT, 0xfe}, + {0x303d, CRL_REG_LEN_08BIT, 0x19}, + {0x303e, CRL_REG_LEN_08BIT, 0xd7}, + {0x303f, CRL_REG_LEN_08BIT, 0x09}, + {0x3040, CRL_REG_LEN_08BIT, 0x78}, + {0x3042, CRL_REG_LEN_08BIT, 0x05}, + {0x328a, CRL_REG_LEN_08BIT, 0x10}, +}; + +static struct crl_register_write_rep ov10640_streamon_regs[] = { + {OV10640_REG_STREAM, CRL_REG_LEN_08BIT, 0x01}, +}; + +static struct crl_register_write_rep ov10640_streamoff_regs[] = { + {OV10640_REG_STREAM, CRL_REG_LEN_08BIT, 0x00}, +}; + +static struct crl_arithmetic_ops ov10640_ls2_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 2, + } +}; + +static struct crl_dynamic_register_access ov10640_h_flip_regs[] = { + { + .address = 0x3090, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov10640_ls2_ops), + .ops = ov10640_ls2_ops, + .mask = 0x04, + } +}; + +static struct crl_arithmetic_ops ov10640_ls3_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 3, + } +}; + +static struct crl_dynamic_register_access ov10640_v_flip_regs[] = { + { + .address = 0x3090, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov10640_ls3_ops), + .ops = ov10640_ls3_ops, + .mask = 0x08, + } +}; + +static struct crl_arithmetic_ops ov10640_hsb_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 8, + } +}; + +static struct crl_dynamic_register_access ov10640_llp_regs[] = { + { + .address = 0x3080, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov10640_hsb_ops), + .ops = ov10640_hsb_ops, + .mask = 0xff, + }, + { + .address = 0x3081, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +static struct crl_dynamic_register_access ov10640_fll_regs[] = { + { + .address = 0x3082, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov10640_hsb_ops), + .ops = ov10640_hsb_ops, + .mask = 0xff, + }, + { + .address = 0x3083, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +static struct crl_dynamic_register_access ov10640_ana_gain_regs[] = { + { + .address = 0x30EB, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + } +}; + +/* Long digital gain register */ +static struct crl_dynamic_register_access ov10640_gl_regs[] = { + { + .address = 0x30EC, /* High Byte */ + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov10640_hsb_ops), + .ops = ov10640_hsb_ops, + .mask = 0x3f, + }, + { + .address = 0x30ED, /* Low Byte */ + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +static struct crl_arithmetic_ops ov10640_ls1_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + } +}; + +static struct crl_arithmetic_ops ov10640_ls5_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 5, + } +}; + +/* enable ae debug */ +static struct crl_dynamic_register_access ov10640_ae_debug_regs[] = { + { + .address = 0x30FA, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov10640_ls5_ops), + .ops = ov10640_ls5_ops, + .mask = 0x60, + }, +}; + +/* Short digital gain register */ +static struct crl_dynamic_register_access ov10640_gs_regs[] = { + { + .address = 0x30EE, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov10640_hsb_ops), + .ops = ov10640_hsb_ops, + .mask = 0x3f, + }, + { + .address = 0x30EF, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +/* Very short digital gain register */ +static struct crl_dynamic_register_access ov10640_gvs_regs[] = { + { + .address = 0x30F0, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov10640_hsb_ops), + .ops = ov10640_hsb_ops, + .mask = 0x3f, + }, + { + .address = 0x30F1, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +/* Long exposure register, also used in linear(non-HDR) mode */ +static struct crl_dynamic_register_access ov10640_el_regs[] = { + { + .address = 0x30E6, /* High Byte */ + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov10640_hsb_ops), + .ops = ov10640_hsb_ops, + .mask = 0xff, + }, + { + .address = 0x30E7, /* Low Byte */ + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +/* Short exposure register */ +static struct crl_dynamic_register_access ov10640_es_regs[] = { + { + .address = 0x30E8, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov10640_hsb_ops), + .ops = ov10640_hsb_ops, + .mask = 0xff, + }, + { + .address = 0x30E9, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +/* Very short exposure register */ +static struct crl_dynamic_register_access ov10640_evs_regs[] = { + { + .address = 0x30EA, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +/* MSB register */ +static struct crl_dynamic_register_access ov10640_msb_regs[] = { + { + .address = 0x328a, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov10640_ls1_ops), + .ops = ov10640_ls1_ops, + .mask = 0x02, + }, +}; + +/* Needed for acpi support for runtime detection */ +static struct crl_sensor_detect_config ov10640_sensor_detect_regset[] = { + { + .reg = { 0x300A, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 8, + }, + { + .reg = { 0x300B, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 8, + }, +}; + +/* ctrl-val == 1 ? (1 * 0x0F + 0x45) : (0 * 0x0F + 0x45) -> 0x54 and 0x45 */ +static struct crl_arithmetic_ops ov10640_wdr_ops[] = { + { + .op = CRL_MULTIPLY, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 0x0F, + }, + { + .op = CRL_ADD, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 0x45, + } +}; + +static struct crl_dynamic_register_access ov10640_wdr_regs[] = { + { 0x3119, CRL_REG_LEN_08BIT, 0xff, + ARRAY_SIZE(ov10640_wdr_ops), + ov10640_wdr_ops, 0 }, +}; + +static struct crl_arithmetic_ops ov10640_linear_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_REG_VAL, + .operand.entity_val = 0x31BE, + }, + { + .op = CRL_BITWISE_AND, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 0x01, + }, +}; + +static struct crl_dynamic_register_access ov10640_linear_regs[] = { + { + .address = 0x31BE, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov10640_linear_ops), + .ops = ov10640_linear_ops, + .mask = 0xff, + }, +}; + +static struct crl_pll_configuration ov10640_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 12, + .pixel_rate_csi = 72000000, /* Ignore the value here, no use */ + .pixel_rate_pa = 72000000, /* pixel_rate = MIPICLK*2 *4/12 */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(ov10640_pll_800mbps), + .pll_regs = ov10640_pll_800mbps, + } +}; + +static struct crl_subdev_rect_rep ov10640_1280_1080_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = {0, 0, 1280, 1080}, + .out_rect = {0, 0, 1280, 1080}, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = {0, 0, 1280, 1080}, + .out_rect = {0, 0, 1280, 1080}, + } +}; + +static struct crl_subdev_rect_rep ov10640_1280_1088_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = {0, 0, 1280, 1088}, + .out_rect = {0, 0, 1280, 1088}, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = {0, 0, 1280, 1088}, + .out_rect = {0, 0, 1280, 1088}, + } +}; + +static struct crl_mode_rep ov10640_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(ov10640_1280_1080_rects), + .sd_rects = ov10640_1280_1080_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 1080, + .min_llp = 2000, + .min_fll = 1200, + .mode_regs_items = ARRAY_SIZE(ov10640_1280_1080_LONG_RAW), + .mode_regs = ov10640_1280_1080_LONG_RAW, + }, + { + .sd_rects_items = ARRAY_SIZE(ov10640_1280_1088_rects), + .sd_rects = ov10640_1280_1088_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 1088, + .min_llp = 2000, + .min_fll = 1200, + .mode_regs_items = ARRAY_SIZE(ov10640_1280_1088_LONG_RAW), + .mode_regs = ov10640_1280_1088_LONG_RAW, + }, +}; + +static struct crl_sensor_subdev_config ov10640_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "ov10640 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "ov10640 pixel array", + } +}; + +static struct crl_sensor_limits ov10640_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1280, + .y_addr_max = 1088, + .min_frame_length_lines = 320, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 380, + .max_line_length_pixels = 32752, +}; + +static struct crl_flip_data ov10640_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + } +}; + +static struct crl_csi_data_fmt ov10640_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG12_1X12, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SRGGB12_1X12, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SBGGR12_1X12, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SGBRG12_1X12, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + } +}; + +static struct crl_v4l2_ctrl ov10640_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_h_flip_regs), + .regs = ov10640_h_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .name = "V4L2_CID_VFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 1, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_v_flip_regs), + .regs = ov10640_v_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1280, + .data.std_data.max = OV10640_HMAX, + .data.std_data.step = 1, + .data.std_data.def = 2000, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_llp_regs), + .regs = ov10640_llp_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame Length Lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1088, + .data.std_data.max = OV10640_VMAX, + .data.std_data.step = 1, + .data.std_data.def = 1200, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_fll_regs), + .regs = ov10640_fll_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 160, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_ana_gain_regs), + .regs = ov10640_ana_gain_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_AUTO_EXPOSURE_DEBUG, + .name = "CRL_CID_AUTO_EXPOSURE_DEBUG", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 3, + .data.std_data.step = 1, + .data.std_data.def = 0x0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_ae_debug_regs), + .regs = ov10640_ae_debug_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "V4L2_CID_EXPOSURE", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 1, + .data.std_data.max = OV10640_MAX_SHS1, + .data.std_data.step = 1, + .data.std_data.def = 0x040, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_el_regs), + .regs = ov10640_el_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_GAIN, + .name = "Digital Gain", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = OV10640_MAX_DGAIN, + .data.std_data.step = 1, + .data.std_data.def = 0x100, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_gl_regs), + .regs = ov10640_gl_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_DIGITAL_GAIN_S, + .name = "CRL_CID_DIGITAL_GAIN_S", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = OV10640_MAX_DGAIN, + .data.std_data.step = 1, + .data.std_data.def = 0x100, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_gs_regs), + .regs = ov10640_gs_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_DIGITAL_GAIN_VS, + .name = "CRL_CID_DIGITAL_GAIN_VS", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = OV10640_MAX_DGAIN, + .data.std_data.step = 1, + .data.std_data.def = 0x100, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_gvs_regs), + .regs = ov10640_gvs_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_STREAMING, + .ctrl_id = CRL_CID_SENSOR_BIT_LINEAR, + .name = "Sensor bit linear", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_linear_regs), + .regs = ov10640_linear_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_MSB_ALIGN, + .name = "CRL_CID_MSB_ALIGN", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 1, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_msb_regs), + .regs = ov10640_msb_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_SHS1, + .name = "CRL_CID_EXPOSURE_SHS1", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1, + .data.std_data.max = OV10640_MAX_SHS1, + .data.std_data.step = 1, + .data.std_data.def = 0x40, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_es_regs), + .regs = ov10640_es_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_SHS3, + .name = "CRL_CID_EXPOSURE_SHS3", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0x11, + .data.std_data.max = OV10640_MAX_SHS3, + .data.std_data.step = 1, + .data.std_data.def = 0x20, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_evs_regs), + .regs = ov10640_evs_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_WDR_MODE, + .name = "V4L2_CID_WDR_MODE", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_MODE_SELECTION, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_wdr_regs), + .regs = ov10640_wdr_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, +}; + +#define OV10640_OTP_BLANK0_START_ADDR 0x349E +#define OV10640_OTP_BLANK0_END_ADDR 0x34AD +#define OV10640_OTP_BLANK1_START_ADDR 0x34AE +#define OV10640_OTP_BLANK1_END_ADDR 0x34BD +#define OV10640_OTP_BLANK0_LEN (OV10640_OTP_BLANK0_END_ADDR - \ + OV10640_OTP_BLANK0_START_ADDR + 1) +#define OV10640_OTP_BLANK1_LEN (OV10640_OTP_BLANK1_END_ADDR - \ + OV10640_OTP_BLANK1_START_ADDR + 1) + +static struct crl_register_write_rep ov10640_nvm_preop_regset[] = { + /* Start streaming */ + {OV10640_REG_STREAM, CRL_REG_LEN_08BIT, 0x01}, + /* clear blank 0 data registers buffer */ + { 0x349E, CRL_REG_LEN_08BIT, 0x00 }, + { 0x349F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34A0, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34A1, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34A2, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34A3, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34A4, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34A5, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34A6, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34A7, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34A8, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34A9, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34AA, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34AB, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34AC, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34AD, CRL_REG_LEN_08BIT, 0x00 }, + /* set registers buffer range */ + { 0x3496, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3497, CRL_REG_LEN_08BIT, 0x0F }, + /* select blank 0 */ + { 0x3495, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0x00, 0x00, 0x01 }, + /* enable read strobe */ + { 0x349C, CRL_REG_LEN_08BIT, 0x01 }, + /* Wait for the data to load into the buffer */ + { 0x0000, CRL_REG_LEN_DELAY, 25 }, + + /* clear blank 1 data registers buffer */ + { 0x34AE, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34AF, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34B0, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34B1, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34B2, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34B3, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34B4, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34B5, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34B6, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34B7, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34B8, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34B9, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34BA, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34BB, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34BC, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34BD, CRL_REG_LEN_08BIT, 0x00 }, + /* set registers buffer range */ + { 0x3496, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3497, CRL_REG_LEN_08BIT, 0x0F }, + /* select blank 1 */ + { 0x3495, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0x01, 0x00, 0x01 }, + /* enable read strobe */ + { 0x349C, CRL_REG_LEN_08BIT, 0x01 }, + /* Wait for the data to load into the buffer */ + { 0x0000, CRL_REG_LEN_DELAY, 25 }, +}; + +static struct crl_register_write_rep ov10640_nvm_postop_regset[] = { + {OV10640_REG_STREAM, CRL_REG_LEN_08BIT, 0x00} /* Stop streaming */ +}; + +static struct crl_nvm_blob ov10640_nvm_blobs[] = { + {CRL_I2C_ADDRESS_NO_OVERRIDE, OV10640_OTP_BLANK0_START_ADDR, OV10640_OTP_BLANK0_LEN}, + {CRL_I2C_ADDRESS_NO_OVERRIDE, OV10640_OTP_BLANK1_START_ADDR, OV10640_OTP_BLANK1_LEN}, +}; + +struct crl_sensor_configuration ov10640_crl_configuration = { + .powerup_regs_items = ARRAY_SIZE(ov10640_powerup_standby), + .powerup_regs = ov10640_powerup_standby, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + .power_items = ARRAY_SIZE(ov10640_power_items), + .power_entities = ov10640_power_items, + + .id_reg_items = ARRAY_SIZE(ov10640_sensor_detect_regset), + .id_regs = ov10640_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(ov10640_sensor_subdevs), + .subdevs = ov10640_sensor_subdevs, + + .sensor_limits = &ov10640_sensor_limits, + + .pll_config_items = ARRAY_SIZE(ov10640_pll_configurations), + .pll_configs = ov10640_pll_configurations, + + .modes_items = ARRAY_SIZE(ov10640_modes), + .modes = ov10640_modes, + + .streamon_regs_items = ARRAY_SIZE(ov10640_streamon_regs), + .streamon_regs = ov10640_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(ov10640_streamoff_regs), + .streamoff_regs = ov10640_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(ov10640_v4l2_ctrls), + .v4l2_ctrl_bank = ov10640_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(ov10640_crl_csi_data_fmt), + .csi_fmts = ov10640_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(ov10640_flip_configurations), + .flip_data = ov10640_flip_configurations, + + .crl_nvm_info.nvm_flags = CRL_NVM_ADDR_MODE_8BIT, + .crl_nvm_info.nvm_preop_regs_items = + ARRAY_SIZE(ov10640_nvm_preop_regset), + .crl_nvm_info.nvm_preop_regs = ov10640_nvm_preop_regset, + .crl_nvm_info.nvm_postop_regs_items = + ARRAY_SIZE(ov10640_nvm_postop_regset), + .crl_nvm_info.nvm_postop_regs = ov10640_nvm_postop_regset, + .crl_nvm_info.nvm_blobs_items = ARRAY_SIZE(ov10640_nvm_blobs), + .crl_nvm_info.nvm_config = ov10640_nvm_blobs, +}; + +#endif /* __CRLMODULE_OV10640_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_ov13860_configuration.h b/drivers/media/i2c/crlmodule/crl_ov13860_configuration.h new file mode 100644 index 0000000000000..5b701f44fea3d --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_ov13860_configuration.h @@ -0,0 +1,1537 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2015 - 2018 Intel Corporation + * + * Author: Kamal Ramamoorthy + * + */ + +#ifndef __CRLMODULE_OV13860_CONFIGURATION_H_ +#define __CRLMODULE_OV13860_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +static struct crl_register_write_rep ov13860_pll_600mbps[] = { + { 0x0300, CRL_REG_LEN_08BIT, 0x00 },/* pll1_pre_div = default*/ + { 0x0301, CRL_REG_LEN_08BIT, 0x00 },/* pll1_multiplier Bit[8-9] = default */ + { 0x0302, CRL_REG_LEN_08BIT, 0x32 },/* pll1_multiplier Bit[0-7] = default */ + { 0x0303, CRL_REG_LEN_08BIT, 0x01 },/* pll1_divm = /(1 + 1) */ + { 0x0304, CRL_REG_LEN_08BIT, 0x07 },/* pll1_div_mipi = default */ + { 0x0305, CRL_REG_LEN_08BIT, 0x01 },/* pll1 pix clock div */ + { 0x0306, CRL_REG_LEN_08BIT, 0x01 },/* pll1 sys clock div */ + { 0x0308, CRL_REG_LEN_08BIT, 0x00 },/* pll1 bypass = default */ + { 0x0309, CRL_REG_LEN_08BIT, 0x01 },/* pll1 cp = default */ + { 0x030A, CRL_REG_LEN_08BIT, 0x00 },/* pll1 ctr = default */ + { 0x030B, CRL_REG_LEN_08BIT, 0x00 },/* pll2_pre_div = default */ + { 0x030c, CRL_REG_LEN_08BIT, 0x00 }, + { 0x030D, CRL_REG_LEN_08BIT, 0x28 },/* pll2_r_divp = default */ + { 0x030E, CRL_REG_LEN_08BIT, 0x02 },/* pll2_r_divs = default */ + { 0x030F, CRL_REG_LEN_08BIT, 0x07 },/* pll2_r_divsp = default */ + { 0x0310, CRL_REG_LEN_08BIT, 0x01 },/* pll2_cp = default */ + { 0x0311, CRL_REG_LEN_08BIT, 0x00 },/* pll2_cp = default */ + { 0x0312, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0313, CRL_REG_LEN_08BIT, 0x03 }, + { 0x031B, CRL_REG_LEN_08BIT, 0x00 },/* pll1 rst = default */ + { 0x031C, CRL_REG_LEN_08BIT, 0x00 },/* pll2 rst = default */ + { 0x031E, CRL_REG_LEN_08BIT, 0x01 },/* pll ctr::mipi_bitsel_man = 1 */ + { 0x4837, CRL_REG_LEN_08BIT, 0x1a },/* pclk period */ +}; + +static struct crl_register_write_rep ov13860_pll_1200mbps[] = { + { 0x0300, CRL_REG_LEN_08BIT, 0x00 },/* pll1_pre_div = default*/ + { 0x0301, CRL_REG_LEN_08BIT, 0x00 },/* pll1_multiplier Bit[8-9] = default */ + { 0x0302, CRL_REG_LEN_08BIT, 0x32 },/* pll1_multiplier Bit[0-7] = default */ + { 0x0303, CRL_REG_LEN_08BIT, 0x00 },/* pll1_divm = /(1 + 0) */ + { 0x0304, CRL_REG_LEN_08BIT, 0x07 },/* pll1_div_mipi = default */ + { 0x0305, CRL_REG_LEN_08BIT, 0x01 },/* pll1 pix clock div */ + { 0x0306, CRL_REG_LEN_08BIT, 0x01 },/* pll1 sys clock div */ + { 0x0308, CRL_REG_LEN_08BIT, 0x00 },/* pll1 bypass = default */ + { 0x0309, CRL_REG_LEN_08BIT, 0x01 },/* pll1 cp = default */ + { 0x030A, CRL_REG_LEN_08BIT, 0x00 },/* pll1 ctr = default */ + { 0x030B, CRL_REG_LEN_08BIT, 0x00 },/* pll2_pre_div = default */ + { 0x030c, CRL_REG_LEN_08BIT, 0x00 }, + { 0x030D, CRL_REG_LEN_08BIT, 0x28 },/* pll2_r_divp = default */ + { 0x030E, CRL_REG_LEN_08BIT, 0x02 },/* pll2_r_divs = default */ + { 0x030F, CRL_REG_LEN_08BIT, 0x07 },/* pll2_r_divsp = default */ + { 0x0310, CRL_REG_LEN_08BIT, 0x01 },/* pll2_cp = default */ + { 0x0311, CRL_REG_LEN_08BIT, 0x00 },/* pll2_cp = default */ + { 0x0312, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0313, CRL_REG_LEN_08BIT, 0x03 }, + { 0x031B, CRL_REG_LEN_08BIT, 0x00 },/* pll1 rst = default */ + { 0x031C, CRL_REG_LEN_08BIT, 0x00 },/* pll2 rst = default */ + { 0x031E, CRL_REG_LEN_08BIT, 0x01 },/* pll ctr::mipi_bitsel_man = 1 */ + { 0x4837, CRL_REG_LEN_08BIT, 0x0d },/* pclk period */ +}; + +static struct crl_register_write_rep ov13860_powerup_regset[] = { + { 0x3010, CRL_REG_LEN_08BIT, 0x01 }, /* MIPI PHY1 = 1 */ + + /* + * MIPI sc ctrl = 1 + * Bit [7:4] num lane + * Bit [0] phy pad enable + */ + { 0x3012, CRL_REG_LEN_08BIT, 0x21 }, + + { 0x340C, CRL_REG_LEN_08BIT, 0xff }, + { 0x340D, CRL_REG_LEN_08BIT, 0xff }, + + /* + * R Manual + * Bit 0:aec_manual, Bit 1:acg_manual, Bit 2 vts manual + * Bit 4:delay option, Bit 5:gain delay option + */ + { 0x3503, CRL_REG_LEN_08BIT, 0x00 }, + + { 0x3507, CRL_REG_LEN_08BIT, 0x00 },/* MEC Median Exposure Bit[15:8] */ + { 0x3508, CRL_REG_LEN_08BIT, 0x00 },/* MEC Median Exposure Bit[7:8] */ + + { 0x3509, CRL_REG_LEN_08BIT, 0x12 },/* R CTRL9 = default */ + + { 0x350A, CRL_REG_LEN_08BIT, 0x00 },/* MEC Long gain [10:8] */ + { 0x350B, CRL_REG_LEN_08BIT, 0xff },/* MEC Long gain [7:0] */ + + { 0x350F, CRL_REG_LEN_08BIT, 0x10 },/* Median gain [7:0] */ + + { 0x3541, CRL_REG_LEN_08BIT, 0x02 },/* MEC Short exposure [15:8] */ + { 0x3542, CRL_REG_LEN_08BIT, 0x00 },/* Median gain [7:0] */ + { 0x3543, CRL_REG_LEN_08BIT, 0x00 },/* Magic */ + + /* + * HDR related setting + */ + { 0x3547, CRL_REG_LEN_08BIT, 0x00 },/* Very short exposure */ + { 0x3548, CRL_REG_LEN_08BIT, 0x00 },/* Very short exposure */ + { 0x3549, CRL_REG_LEN_08BIT, 0x12 },/* Magic */ + { 0x354B, CRL_REG_LEN_08BIT, 0x10 },/* MEC short gain [7:0] */ + { 0x354F, CRL_REG_LEN_08BIT, 0x10 },/* MEC very short gain [7:0] */ + + /* Analog setting control */ + { 0x3600, CRL_REG_LEN_08BIT, 0x41 }, + { 0x3601, CRL_REG_LEN_08BIT, 0xd4 }, + { 0x3603, CRL_REG_LEN_08BIT, 0x97 }, + { 0x3604, CRL_REG_LEN_08BIT, 0x08 }, + { 0x360A, CRL_REG_LEN_08BIT, 0x35 }, + { 0x360C, CRL_REG_LEN_08BIT, 0xA0 }, + { 0x360D, CRL_REG_LEN_08BIT, 0x53 }, + { 0x3618, CRL_REG_LEN_08BIT, 0x0C }, + { 0x3620, CRL_REG_LEN_08BIT, 0x55 }, + { 0x3622, CRL_REG_LEN_08BIT, 0x8C }, + { 0x3623, CRL_REG_LEN_08BIT, 0x30 }, + { 0x3628, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x3660, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x3662, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3663, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3664, CRL_REG_LEN_08BIT, 0x04 }, + { 0x366B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3701, CRL_REG_LEN_08BIT, 0x20 }, + { 0x3702, CRL_REG_LEN_08BIT, 0x30 }, + { 0x3703, CRL_REG_LEN_08BIT, 0x3B }, + { 0x3704, CRL_REG_LEN_08BIT, 0x26 }, + { 0x3705, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3706, CRL_REG_LEN_08BIT, 0x3F }, + { 0x3708, CRL_REG_LEN_08BIT, 0x3C }, + { 0x3709, CRL_REG_LEN_08BIT, 0x18 }, + { 0x370E, CRL_REG_LEN_08BIT, 0x32 }, + { 0x3710, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3712, CRL_REG_LEN_08BIT, 0x12 }, + { 0x3714, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3717, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3719, CRL_REG_LEN_08BIT, 0x03 }, + { 0x371E, CRL_REG_LEN_08BIT, 0x31 }, + { 0x371F, CRL_REG_LEN_08BIT, 0x7F }, + { 0x3720, CRL_REG_LEN_08BIT, 0x18 }, + { 0x3721, CRL_REG_LEN_08BIT, 0x0A }, + { 0x3726, CRL_REG_LEN_08BIT, 0x22 }, + { 0x3727, CRL_REG_LEN_08BIT, 0x44 }, + { 0x3728, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3729, CRL_REG_LEN_08BIT, 0x00 }, + { 0x372A, CRL_REG_LEN_08BIT, 0x20 }, + { 0x372B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x372E, CRL_REG_LEN_08BIT, 0x2B }, + { 0x3730, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3731, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3732, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3733, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3734, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3735, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3736, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3737, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3744, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3745, CRL_REG_LEN_08BIT, 0x5E }, + { 0x3746, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3747, CRL_REG_LEN_08BIT, 0x1F }, + { 0x3748, CRL_REG_LEN_08BIT, 0x00 }, + { 0x374A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3760, CRL_REG_LEN_08BIT, 0xD1 }, + { 0x3761, CRL_REG_LEN_08BIT, 0x31 }, + { 0x3762, CRL_REG_LEN_08BIT, 0x53 }, + { 0x3763, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3767, CRL_REG_LEN_08BIT, 0x24 }, + { 0x3768, CRL_REG_LEN_08BIT, 0x0C }, + { 0x3769, CRL_REG_LEN_08BIT, 0x24 }, + { 0x376C, CRL_REG_LEN_08BIT, 0x43 }, + { 0x376D, CRL_REG_LEN_08BIT, 0x01 }, + { 0x376E, CRL_REG_LEN_08BIT, 0x53 }, + { 0x378C, CRL_REG_LEN_08BIT, 0x1F }, + { 0x378D, CRL_REG_LEN_08BIT, 0x13 }, + { 0x378F, CRL_REG_LEN_08BIT, 0x88 }, + { 0x3790, CRL_REG_LEN_08BIT, 0x5A }, + { 0x3791, CRL_REG_LEN_08BIT, 0x5A }, + { 0x3792, CRL_REG_LEN_08BIT, 0x21 }, + { 0x3794, CRL_REG_LEN_08BIT, 0x71 }, + { 0x3796, CRL_REG_LEN_08BIT, 0x01 }, + { 0x379F, CRL_REG_LEN_08BIT, 0x3E }, + { 0x37A0, CRL_REG_LEN_08BIT, 0x44 }, + { 0x37A1, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37A2, CRL_REG_LEN_08BIT, 0x44 }, + { 0x37A3, CRL_REG_LEN_08BIT, 0x41 }, + { 0x37A4, CRL_REG_LEN_08BIT, 0x88 }, + { 0x37A5, CRL_REG_LEN_08BIT, 0xA9 }, + { 0x37B3, CRL_REG_LEN_08BIT, 0xDC }, + { 0x37B4, CRL_REG_LEN_08BIT, 0x0E }, + { 0x37B7, CRL_REG_LEN_08BIT, 0x84 }, + { 0x37B9, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3821, CRL_REG_LEN_08BIT, 0x04 }, + { 0x382A, CRL_REG_LEN_08BIT, 0x04 }, + { 0x382F, CRL_REG_LEN_08BIT, 0x84 }, + { 0x3835, CRL_REG_LEN_08BIT, 0x04 }, + { 0x3837, CRL_REG_LEN_08BIT, 0x02 }, + { 0x383C, CRL_REG_LEN_08BIT, 0x88 }, + { 0x383D, CRL_REG_LEN_08BIT, 0xFF }, + { 0x3845, CRL_REG_LEN_08BIT, 0x10 }, + + { 0x3D85, CRL_REG_LEN_08BIT, 0x16 },/* OTP_REGS */ + { 0x3D8C, CRL_REG_LEN_08BIT, 0x79 },/* OTP_REGS */ + { 0x3D8D, CRL_REG_LEN_08BIT, 0x7F },/* OTP_REGS */ + + { 0x4000, CRL_REG_LEN_08BIT, 0x17 },/* BLC_00 */ + + /* + * Magic Registers + */ + { 0x400F, CRL_REG_LEN_08BIT, 0x80 }, + { 0x4011, CRL_REG_LEN_08BIT, 0xFB }, + { 0x4017, CRL_REG_LEN_08BIT, 0x08 }, + { 0x401A, CRL_REG_LEN_08BIT, 0x0E }, + { 0x4020, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4022, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4024, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4026, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4028, CRL_REG_LEN_08BIT, 0x08 }, + { 0x402A, CRL_REG_LEN_08BIT, 0x08 }, + { 0x402C, CRL_REG_LEN_08BIT, 0x08 }, + { 0x402E, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4030, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4032, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4034, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4036, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4038, CRL_REG_LEN_08BIT, 0x08 }, + { 0x403A, CRL_REG_LEN_08BIT, 0x08 }, + { 0x403C, CRL_REG_LEN_08BIT, 0x08 }, + { 0x403E, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4052, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4053, CRL_REG_LEN_08BIT, 0x80 }, + { 0x4054, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4055, CRL_REG_LEN_08BIT, 0x80 }, + { 0x4056, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4057, CRL_REG_LEN_08BIT, 0x80 }, + { 0x4058, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4059, CRL_REG_LEN_08BIT, 0x80 }, + { 0x4202, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4203, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4d00, CRL_REG_LEN_08BIT, 0x05 }, + { 0x4d01, CRL_REG_LEN_08BIT, 0x05 }, + { 0x4d02, CRL_REG_LEN_08BIT, 0xCA }, + { 0x4d03, CRL_REG_LEN_08BIT, 0xD7 }, + { 0x4d04, CRL_REG_LEN_08BIT, 0xAE }, + { 0x4d05, CRL_REG_LEN_08BIT, 0x13 }, + { 0x4813, CRL_REG_LEN_08BIT, 0x10 }, + { 0x4815, CRL_REG_LEN_08BIT, 0x40 }, + { 0x4837, CRL_REG_LEN_08BIT, 0x0D }, + { 0x486E, CRL_REG_LEN_08BIT, 0x03 }, + { 0x4B01, CRL_REG_LEN_08BIT, 0x80 }, + { 0x4B06, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4C01, CRL_REG_LEN_08BIT, 0xDF }, + + /* + * DSP control related registers required for RAW + * Sensor path + */ + { 0x5001, CRL_REG_LEN_08BIT, 0x40 }, + { 0x5002, CRL_REG_LEN_08BIT, 0x04 }, + { 0x5003, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5004, CRL_REG_LEN_08BIT, 0x80 }, + { 0x5005, CRL_REG_LEN_08BIT, 0x00 }, + { 0x501D, CRL_REG_LEN_08BIT, 0x00 }, + { 0x501F, CRL_REG_LEN_08BIT, 0x06 }, + { 0x5021, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5022, CRL_REG_LEN_08BIT, 0x13 }, + { 0x5058, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5200, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5209, CRL_REG_LEN_08BIT, 0x00 }, + { 0x520A, CRL_REG_LEN_08BIT, 0x80 }, + { 0x520B, CRL_REG_LEN_08BIT, 0x04 }, + { 0x520C, CRL_REG_LEN_08BIT, 0x01 }, + { 0x520E, CRL_REG_LEN_08BIT, 0x34 }, + { 0x5210, CRL_REG_LEN_08BIT, 0x10 }, + { 0x5211, CRL_REG_LEN_08BIT, 0xA0 }, + { 0x5280, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5292, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5C80, CRL_REG_LEN_08BIT, 0x05 }, + { 0x5C81, CRL_REG_LEN_08BIT, 0x90 }, + { 0x5C82, CRL_REG_LEN_08BIT, 0x09 }, + { 0x5C83, CRL_REG_LEN_08BIT, 0x5F }, + { 0x5D00, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4001, CRL_REG_LEN_08BIT, 0x60 }, /* BLC control */ + + /* + * Magic Registers + */ + { 0x560F, CRL_REG_LEN_08BIT, 0xFC }, + { 0x5610, CRL_REG_LEN_08BIT, 0xF0 }, + { 0x5611, CRL_REG_LEN_08BIT, 0x10 }, + { 0x562F, CRL_REG_LEN_08BIT, 0xFC }, + { 0x5630, CRL_REG_LEN_08BIT, 0xF0 }, + { 0x5631, CRL_REG_LEN_08BIT, 0x10 }, + { 0x564F, CRL_REG_LEN_08BIT, 0xFC }, + { 0x5650, CRL_REG_LEN_08BIT, 0xF0 }, + { 0x5651, CRL_REG_LEN_08BIT, 0x10 }, + { 0x566F, CRL_REG_LEN_08BIT, 0xFC }, + { 0x5670, CRL_REG_LEN_08BIT, 0xF0 }, + { 0x5671, CRL_REG_LEN_08BIT, 0x10 }, +}; + +static struct crl_register_write_rep ov13860_mode_13m[] = { + + /* + * Exposure & Gain + */ + { 0x3501, CRL_REG_LEN_08BIT, 0x0D },/* Long Exposure */ + { 0x3502, CRL_REG_LEN_08BIT, 0x88 },/* Long Exposure */ + + { 0x370A, CRL_REG_LEN_08BIT, 0x23 }, + { 0x372F, CRL_REG_LEN_08BIT, 0xA0 }, + + /* + * Windowing + */ + { 0x3800, CRL_REG_LEN_08BIT, 0x00 },/* h_crop_start high */ + { 0x3801, CRL_REG_LEN_08BIT, 0x14 },/* h_crop_start low */ + { 0x3802, CRL_REG_LEN_08BIT, 0x00 },/* v_crop_start high */ + { 0x3803, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_start low */ + { 0x3804, CRL_REG_LEN_08BIT, 0x10 },/* h_crop_end high */ + { 0x3805, CRL_REG_LEN_08BIT, 0x8B },/* h_crop_end low */ + { 0x3806, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_end high */ + { 0x3807, CRL_REG_LEN_08BIT, 0x43 },/* v_crop_end low */ + { 0x3808, CRL_REG_LEN_08BIT, 0x10 },/* h_output_size high 4208 x 3120 */ + { 0x3809, CRL_REG_LEN_08BIT, 0x70 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x0C },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0x30 },/* v_output_size low */ + { 0x3810, CRL_REG_LEN_08BIT, 0x00 },/* Manual horizontal window offset high */ + { 0x3811, CRL_REG_LEN_08BIT, 0x04 },/* Manual horizontal window offset low */ + { 0x3813, CRL_REG_LEN_08BIT, 0x04 },/* Manual vertical window offset low */ + { 0x3814, CRL_REG_LEN_08BIT, 0x11 },/* Horizontal sub-sample odd inc */ + { 0x3815, CRL_REG_LEN_08BIT, 0x11 },/* Vertical sub-sample odd inc */ + { 0x383D, CRL_REG_LEN_08BIT, 0xFF },/* Vertical sub-sample odd inc */ + + { 0x3820, CRL_REG_LEN_08BIT, 0x00 },/* Binning */ + { 0x3842, CRL_REG_LEN_08BIT, 0x00 },/* Binning */ + { 0x5000, CRL_REG_LEN_08BIT, 0x99 },/* Binning */ + + { 0x3836, CRL_REG_LEN_08BIT, 0x0C }, /* ablc_use_num */ + { 0x383C, CRL_REG_LEN_08BIT, 0x88 }, /* Boundary Pix num */ + + { 0x4008, CRL_REG_LEN_08BIT, 0x00 }, /* Magic */ + { 0x4009, CRL_REG_LEN_08BIT, 0x13 }, /* Magic */ + { 0x4019, CRL_REG_LEN_08BIT, 0x18 }, /* Magic */ + { 0x4051, CRL_REG_LEN_08BIT, 0x03 }, /* Magic */ + { 0x4066, CRL_REG_LEN_08BIT, 0x04 }, /* Magic */ + + { 0x5201, CRL_REG_LEN_08BIT, 0x80 }, /* Magic */ + { 0x5204, CRL_REG_LEN_08BIT, 0x01 }, /* Magic */ + { 0x5205, CRL_REG_LEN_08BIT, 0x00 }, /* Magic */ +}; + +static struct crl_register_write_rep ov13860_mode_8m[] = { + + /* + * Exposure & Gain + */ + { 0x3501, CRL_REG_LEN_08BIT, 0x0D },/* Long Exposure */ + { 0x3502, CRL_REG_LEN_08BIT, 0x88 },/* Long Exposure */ + + { 0x370A, CRL_REG_LEN_08BIT, 0x23 }, + { 0x372F, CRL_REG_LEN_08BIT, 0xA0 }, + + /* + * Windowing + */ + { 0x3800, CRL_REG_LEN_08BIT, 0x00 },/* h_crop_start high */ + { 0x3801, CRL_REG_LEN_08BIT, 0x14 },/* h_crop_start low */ + { 0x3802, CRL_REG_LEN_08BIT, 0x00 },/* v_crop_start high */ + { 0x3803, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_start low */ + { 0x3804, CRL_REG_LEN_08BIT, 0x10 },/* h_crop_end high */ + { 0x3805, CRL_REG_LEN_08BIT, 0x8B },/* h_crop_end low */ + { 0x3806, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_end high */ + { 0x3807, CRL_REG_LEN_08BIT, 0x43 },/* v_crop_end low */ + { 0x3808, CRL_REG_LEN_08BIT, 0x0C },/* h_output_size high 3264 x 2448 */ + { 0x3809, CRL_REG_LEN_08BIT, 0xC0 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x09 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0x90 },/* v_output_size low */ + { 0x3810, CRL_REG_LEN_08BIT, 0x00 },/* Manual horizontal window offset high */ + { 0x3811, CRL_REG_LEN_08BIT, 0x04 },/* Manual horizontal window offset low */ + { 0x3813, CRL_REG_LEN_08BIT, 0x04 },/* Manual vertical window offset low */ + { 0x3814, CRL_REG_LEN_08BIT, 0x11 },/* Horizontal sub-sample odd inc */ + { 0x3815, CRL_REG_LEN_08BIT, 0x11 },/* Vertical sub-sample odd inc */ + { 0x383D, CRL_REG_LEN_08BIT, 0xFF },/* Vertical sub-sample odd inc */ + + { 0x3820, CRL_REG_LEN_08BIT, 0x00 },/* Binning */ + { 0x3842, CRL_REG_LEN_08BIT, 0x00 },/* Binning */ + { 0x5000, CRL_REG_LEN_08BIT, 0x99 },/* Binning */ + + { 0x3836, CRL_REG_LEN_08BIT, 0x0C }, /* ablc_use_num */ + { 0x383C, CRL_REG_LEN_08BIT, 0x88 }, /* Boundary Pix num */ + + { 0x4008, CRL_REG_LEN_08BIT, 0x00 }, /* Magic */ + { 0x4009, CRL_REG_LEN_08BIT, 0x13 }, /* Magic */ + { 0x4019, CRL_REG_LEN_08BIT, 0x18 }, /* Magic */ + { 0x4051, CRL_REG_LEN_08BIT, 0x03 }, /* Magic */ + { 0x4066, CRL_REG_LEN_08BIT, 0x04 }, /* Magic */ + + { 0x5201, CRL_REG_LEN_08BIT, 0x80 }, /* Magic */ + { 0x5204, CRL_REG_LEN_08BIT, 0x01 }, /* Magic */ + { 0x5205, CRL_REG_LEN_08BIT, 0x00 }, /* Magic */ +}; + +static struct crl_register_write_rep ov13860_mode_4k2k[] = { + + /* + * Exposure & Gain + */ + { 0x3501, CRL_REG_LEN_08BIT, 0x0D },/* Long Exposure */ + { 0x3502, CRL_REG_LEN_08BIT, 0x88 },/* Long Exposure */ + + { 0x370A, CRL_REG_LEN_08BIT, 0x23 }, + { 0x372F, CRL_REG_LEN_08BIT, 0xA0 }, + + /* + * Windowing + */ + { 0x3800, CRL_REG_LEN_08BIT, 0x00 },/* h_crop_start high */ + { 0x3801, CRL_REG_LEN_08BIT, 0x14 },/* h_crop_start low */ + { 0x3802, CRL_REG_LEN_08BIT, 0x00 },/* v_crop_start high */ + { 0x3803, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_start low */ + { 0x3804, CRL_REG_LEN_08BIT, 0x10 },/* h_crop_end high */ + { 0x3805, CRL_REG_LEN_08BIT, 0x8B },/* h_crop_end low */ + { 0x3806, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_end high */ + { 0x3807, CRL_REG_LEN_08BIT, 0x43 },/* v_crop_end low */ + { 0x3808, CRL_REG_LEN_08BIT, 0x10 },/* h_output_size high 4096 x 2160 */ + { 0x3809, CRL_REG_LEN_08BIT, 0x00 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x08 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0x70 },/* v_output_size low */ + { 0x3810, CRL_REG_LEN_08BIT, 0x00 },/* Manual horizontal window offset high */ + { 0x3811, CRL_REG_LEN_08BIT, 0x04 },/* Manual horizontal window offset low */ + { 0x3813, CRL_REG_LEN_08BIT, 0x04 },/* Manual vertical window offset low */ + { 0x3814, CRL_REG_LEN_08BIT, 0x11 },/* Horizontal sub-sample odd inc */ + { 0x3815, CRL_REG_LEN_08BIT, 0x11 },/* Vertical sub-sample odd inc */ + { 0x383D, CRL_REG_LEN_08BIT, 0xFF },/* Vertical sub-sample odd inc */ + + { 0x3820, CRL_REG_LEN_08BIT, 0x00 },/* Binning */ + { 0x3842, CRL_REG_LEN_08BIT, 0x00 },/* Binning */ + { 0x5000, CRL_REG_LEN_08BIT, 0x99 },/* Binning */ + + { 0x3836, CRL_REG_LEN_08BIT, 0x0C }, /* ablc_use_num */ + { 0x383C, CRL_REG_LEN_08BIT, 0x88 }, /* Boundary Pix num */ + + { 0x4008, CRL_REG_LEN_08BIT, 0x00 }, /* Magic */ + { 0x4009, CRL_REG_LEN_08BIT, 0x13 }, /* Magic */ + { 0x4019, CRL_REG_LEN_08BIT, 0x18 }, /* Magic */ + { 0x4051, CRL_REG_LEN_08BIT, 0x03 }, /* Magic */ + { 0x4066, CRL_REG_LEN_08BIT, 0x04 }, /* Magic */ + + { 0x5201, CRL_REG_LEN_08BIT, 0x80 }, /* Magic */ + { 0x5204, CRL_REG_LEN_08BIT, 0x01 }, /* Magic */ + { 0x5205, CRL_REG_LEN_08BIT, 0x00 }, /* Magic */ +}; + +static struct crl_register_write_rep ov13860_mode_uhd[] = { + + /* + * Exposure & Gain + */ + { 0x3501, CRL_REG_LEN_08BIT, 0x0D },/* Long Exposure */ + { 0x3502, CRL_REG_LEN_08BIT, 0x88 },/* Long Exposure */ + + { 0x370A, CRL_REG_LEN_08BIT, 0x23 }, + { 0x372F, CRL_REG_LEN_08BIT, 0xA0 }, + + /* + * Windowing + */ + { 0x3800, CRL_REG_LEN_08BIT, 0x00 },/* h_crop_start high */ + { 0x3801, CRL_REG_LEN_08BIT, 0x14 },/* h_crop_start low */ + { 0x3802, CRL_REG_LEN_08BIT, 0x00 },/* v_crop_start high */ + { 0x3803, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_start low */ + { 0x3804, CRL_REG_LEN_08BIT, 0x10 },/* h_crop_end high */ + { 0x3805, CRL_REG_LEN_08BIT, 0x8B },/* h_crop_end low */ + { 0x3806, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_end high */ + { 0x3807, CRL_REG_LEN_08BIT, 0x43 },/* v_crop_end low */ + { 0x3808, CRL_REG_LEN_08BIT, 0x0F },/* h_output_size high 3840 x 2160 */ + { 0x3809, CRL_REG_LEN_08BIT, 0x00 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x08 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0x70 },/* v_output_size low */ + { 0x3810, CRL_REG_LEN_08BIT, 0x00 },/* Manual horizontal window offset high */ + { 0x3811, CRL_REG_LEN_08BIT, 0x04 },/* Manual horizontal window offset low */ + { 0x3813, CRL_REG_LEN_08BIT, 0x04 },/* Manual vertical window offset low */ + { 0x3814, CRL_REG_LEN_08BIT, 0x11 },/* Horizontal sub-sample odd inc */ + { 0x3815, CRL_REG_LEN_08BIT, 0x11 },/* Vertical sub-sample odd inc */ + { 0x383D, CRL_REG_LEN_08BIT, 0xFF },/* Vertical sub-sample odd inc */ + + { 0x3820, CRL_REG_LEN_08BIT, 0x00 },/* Binning */ + { 0x3842, CRL_REG_LEN_08BIT, 0x00 },/* Binning */ + { 0x5000, CRL_REG_LEN_08BIT, 0x99 },/* Binning */ + + { 0x3836, CRL_REG_LEN_08BIT, 0x0C }, /* ablc_use_num */ + { 0x383C, CRL_REG_LEN_08BIT, 0x88 }, /* Boundary Pix num */ + + { 0x4008, CRL_REG_LEN_08BIT, 0x00 }, /* Magic */ + { 0x4009, CRL_REG_LEN_08BIT, 0x13 }, /* Magic */ + { 0x4019, CRL_REG_LEN_08BIT, 0x18 }, /* Magic */ + { 0x4051, CRL_REG_LEN_08BIT, 0x03 }, /* Magic */ + { 0x4066, CRL_REG_LEN_08BIT, 0x04 }, /* Magic */ + + { 0x5201, CRL_REG_LEN_08BIT, 0x80 }, /* Magic */ + { 0x5204, CRL_REG_LEN_08BIT, 0x01 }, /* Magic */ + { 0x5205, CRL_REG_LEN_08BIT, 0x00 }, /* Magic */ +}; + +static struct crl_register_write_rep ov13860_mode_6m[] = { + + /* + * Exposure & Gain + */ + { 0x3501, CRL_REG_LEN_08BIT, 0x0D },/* Long Exposure */ + { 0x3502, CRL_REG_LEN_08BIT, 0x88 },/* Long Exposure */ + + { 0x370A, CRL_REG_LEN_08BIT, 0x23 }, + { 0x372F, CRL_REG_LEN_08BIT, 0xA0 }, + + /* + * Windowing + */ + { 0x3800, CRL_REG_LEN_08BIT, 0x00 },/* h_crop_start high */ + { 0x3801, CRL_REG_LEN_08BIT, 0x14 },/* h_crop_start low */ + { 0x3802, CRL_REG_LEN_08BIT, 0x00 },/* v_crop_start high */ + { 0x3803, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_start low */ + { 0x3804, CRL_REG_LEN_08BIT, 0x10 },/* h_crop_end high */ + { 0x3805, CRL_REG_LEN_08BIT, 0x8B },/* h_crop_end low */ + { 0x3806, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_end high */ + { 0x3807, CRL_REG_LEN_08BIT, 0x43 },/* v_crop_end low */ + { 0x3808, CRL_REG_LEN_08BIT, 0x0C },/* h_output_size high 3264 x 1836 */ + { 0x3809, CRL_REG_LEN_08BIT, 0xC0 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x07 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0x2C },/* v_output_size low */ + { 0x3810, CRL_REG_LEN_08BIT, 0x00 },/* Manual horizontal window offset high */ + { 0x3811, CRL_REG_LEN_08BIT, 0x04 },/* Manual horizontal window offset low */ + { 0x3813, CRL_REG_LEN_08BIT, 0x04 },/* Manual vertical window offset low */ + { 0x3814, CRL_REG_LEN_08BIT, 0x11 },/* Horizontal sub-sample odd inc */ + { 0x3815, CRL_REG_LEN_08BIT, 0x11 },/* Vertical sub-sample odd inc */ + { 0x383D, CRL_REG_LEN_08BIT, 0xFF },/* Vertical sub-sample odd inc */ + + { 0x3820, CRL_REG_LEN_08BIT, 0x00 },/* Binning */ + { 0x3842, CRL_REG_LEN_08BIT, 0x00 },/* Binning */ + { 0x5000, CRL_REG_LEN_08BIT, 0x99 },/* Binning */ + + { 0x3836, CRL_REG_LEN_08BIT, 0x0C }, /* ablc_use_num */ + { 0x383C, CRL_REG_LEN_08BIT, 0x88 }, /* Boundary Pix num */ + + { 0x4008, CRL_REG_LEN_08BIT, 0x00 }, /* Magic */ + { 0x4009, CRL_REG_LEN_08BIT, 0x13 }, /* Magic */ + { 0x4019, CRL_REG_LEN_08BIT, 0x18 }, /* Magic */ + { 0x4051, CRL_REG_LEN_08BIT, 0x03 }, /* Magic */ + { 0x4066, CRL_REG_LEN_08BIT, 0x04 }, /* Magic */ + + { 0x5201, CRL_REG_LEN_08BIT, 0x80 }, /* Magic */ + { 0x5204, CRL_REG_LEN_08BIT, 0x01 }, /* Magic */ + { 0x5205, CRL_REG_LEN_08BIT, 0x00 }, /* Magic */ +}; + +static struct crl_register_write_rep ov13860_mode_3m[] = { + + /* + * Exposure & Gain + */ + { 0x3501, CRL_REG_LEN_08BIT, 0x06 },/* Long Exposure */ + { 0x3502, CRL_REG_LEN_08BIT, 0xB8 },/* Long Exposure */ + + { 0x370A, CRL_REG_LEN_08BIT, 0x63 }, + { 0x372F, CRL_REG_LEN_08BIT, 0x90 }, + + /* + * Windowing + */ + { 0x3800, CRL_REG_LEN_08BIT, 0x00 },/* h_crop_start high */ + { 0x3801, CRL_REG_LEN_08BIT, 0x14 },/* h_crop_start low */ + { 0x3802, CRL_REG_LEN_08BIT, 0x00 },/* v_crop_start high */ + { 0x3803, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_start low */ + { 0x3804, CRL_REG_LEN_08BIT, 0x10 },/* h_crop_end high */ + { 0x3805, CRL_REG_LEN_08BIT, 0x8B },/* h_crop_end low */ + { 0x3806, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_end high */ + { 0x3807, CRL_REG_LEN_08BIT, 0x43 },/* v_crop_end low */ + { 0x3808, CRL_REG_LEN_08BIT, 0x08 },/* h_output_size high 2048 x 1536 */ + { 0x3809, CRL_REG_LEN_08BIT, 0x00 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x06 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0x00 },/* v_output_size low */ + { 0x3810, CRL_REG_LEN_08BIT, 0x00 },/* Manual horizontal window offset high */ + { 0x3811, CRL_REG_LEN_08BIT, 0x04 },/* Manual horizontal window offset low */ + { 0x3813, CRL_REG_LEN_08BIT, 0x04 },/* Manual vertical window offset low */ + { 0x3814, CRL_REG_LEN_08BIT, 0x11 },/* Horizontal sub-sample odd inc */ + { 0x3815, CRL_REG_LEN_08BIT, 0x31 },/* Vertical sub-sample odd inc */ + { 0x383D, CRL_REG_LEN_08BIT, 0xFF },/* Vertical sub-sample odd inc */ + + { 0x3820, CRL_REG_LEN_08BIT, 0x02 },/* Binning */ + { 0x3842, CRL_REG_LEN_08BIT, 0x40 },/* Binning */ + { 0x5000, CRL_REG_LEN_08BIT, 0xD9 },/* Binning */ + + { 0x3836, CRL_REG_LEN_08BIT, 0x0C }, /* ablc_use_num */ + { 0x383C, CRL_REG_LEN_08BIT, 0x48 }, /* Boundary Pix num */ + + { 0x4008, CRL_REG_LEN_08BIT, 0x02 }, /* Magic */ + { 0x4009, CRL_REG_LEN_08BIT, 0x09 }, /* Magic */ + { 0x4019, CRL_REG_LEN_08BIT, 0x0C }, /* Magic */ + { 0x4051, CRL_REG_LEN_08BIT, 0x01 }, /* Magic */ + { 0x4066, CRL_REG_LEN_08BIT, 0x02 }, /* Magic */ + + { 0x5201, CRL_REG_LEN_08BIT, 0x71 }, /* Magic */ + { 0x5204, CRL_REG_LEN_08BIT, 0x00 }, /* Magic */ + { 0x5205, CRL_REG_LEN_08BIT, 0x80 }, /* Magic */ +}; + +static struct crl_register_write_rep ov13860_mode_1952_1088[] = { + + /* + * Exposure & Gain + */ + { 0x3501, CRL_REG_LEN_08BIT, 0x06 },/* Long Exposure */ + { 0x3502, CRL_REG_LEN_08BIT, 0xB8 },/* Long Exposure */ + + { 0x370A, CRL_REG_LEN_08BIT, 0x63 }, + { 0x372F, CRL_REG_LEN_08BIT, 0x90 }, + + /* + * Windowing + */ + { 0x3800, CRL_REG_LEN_08BIT, 0x00 },/* h_crop_start high */ + { 0x3801, CRL_REG_LEN_08BIT, 0x14 },/* h_crop_start low */ + { 0x3802, CRL_REG_LEN_08BIT, 0x00 },/* v_crop_start high */ + { 0x3803, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_start low */ + { 0x3804, CRL_REG_LEN_08BIT, 0x10 },/* h_crop_end high */ + { 0x3805, CRL_REG_LEN_08BIT, 0x8B },/* h_crop_end low */ + { 0x3806, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_end high */ + { 0x3807, CRL_REG_LEN_08BIT, 0x43 },/* v_crop_end low */ + { 0x3808, CRL_REG_LEN_08BIT, 0x07 },/* h_output_size high 1952 x 1088 */ + { 0x3809, CRL_REG_LEN_08BIT, 0xA0 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x04 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0x40 },/* v_output_size low */ + { 0x3810, CRL_REG_LEN_08BIT, 0x00 },/* Manual horizontal window offset high */ + { 0x3811, CRL_REG_LEN_08BIT, 0x04 },/* Manual horizontal window offset low */ + { 0x3813, CRL_REG_LEN_08BIT, 0x04 },/* Manual vertical window offset low */ + { 0x3814, CRL_REG_LEN_08BIT, 0x11 },/* Horizontal sub-sample odd inc */ + { 0x3815, CRL_REG_LEN_08BIT, 0x31 },/* Vertical sub-sample odd inc */ + { 0x383D, CRL_REG_LEN_08BIT, 0xFF },/* Vertical sub-sample odd inc */ + + { 0x3820, CRL_REG_LEN_08BIT, 0x02 },/* Binning */ + { 0x3842, CRL_REG_LEN_08BIT, 0x40 },/* Binning */ + { 0x5000, CRL_REG_LEN_08BIT, 0xD9 },/* Binning */ + + { 0x3836, CRL_REG_LEN_08BIT, 0x0C }, /* ablc_use_num */ + { 0x383C, CRL_REG_LEN_08BIT, 0x48 }, /* Boundary Pix num */ + + { 0x4008, CRL_REG_LEN_08BIT, 0x02 }, /* Magic */ + { 0x4009, CRL_REG_LEN_08BIT, 0x09 }, /* Magic */ + { 0x4019, CRL_REG_LEN_08BIT, 0x0C }, /* Magic */ + { 0x4051, CRL_REG_LEN_08BIT, 0x01 }, /* Magic */ + { 0x4066, CRL_REG_LEN_08BIT, 0x02 }, /* Magic */ + + { 0x5201, CRL_REG_LEN_08BIT, 0x71 }, /* Magic */ + { 0x5204, CRL_REG_LEN_08BIT, 0x00 }, /* Magic */ + { 0x5205, CRL_REG_LEN_08BIT, 0x80 }, /* Magic */ +}; + +static struct crl_register_write_rep ov13860_mode_720[] = { + + /* + * Exposure & Gain + */ + { 0x3501, CRL_REG_LEN_08BIT, 0x03 },/* Long Exposure */ + { 0x3502, CRL_REG_LEN_08BIT, 0x44 },/* Long Exposure */ + + { 0x370A, CRL_REG_LEN_08BIT, 0x63 }, + { 0x372F, CRL_REG_LEN_08BIT, 0x90 }, + + /* + * Windowing + */ + { 0x3800, CRL_REG_LEN_08BIT, 0x00 },/* h_crop_start high */ + { 0x3801, CRL_REG_LEN_08BIT, 0x14 },/* h_crop_start low */ + { 0x3802, CRL_REG_LEN_08BIT, 0x00 },/* v_crop_start high */ + { 0x3803, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_start low */ + { 0x3804, CRL_REG_LEN_08BIT, 0x10 },/* h_crop_end high */ + { 0x3805, CRL_REG_LEN_08BIT, 0x8B },/* h_crop_end low */ + { 0x3806, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_end high */ + { 0x3807, CRL_REG_LEN_08BIT, 0x43 },/* v_crop_end low */ + { 0x3808, CRL_REG_LEN_08BIT, 0x05 },/* h_output_size high 1280 x 720 */ + { 0x3809, CRL_REG_LEN_08BIT, 0x00 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x02 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0xD0 },/* v_output_size low */ + { 0x3810, CRL_REG_LEN_08BIT, 0x00 },/* Manual horizontal window offset high */ + { 0x3811, CRL_REG_LEN_08BIT, 0x04 },/* Manual horizontal window offset low */ + { 0x3813, CRL_REG_LEN_08BIT, 0x04 },/* Manual vertical window offset low */ + { 0x3814, CRL_REG_LEN_08BIT, 0x11 },/* Horizontal sub-sample odd inc */ + { 0x3815, CRL_REG_LEN_08BIT, 0x31 },/* Vertical sub-sample odd inc */ + { 0x383D, CRL_REG_LEN_08BIT, 0xFF },/* Vertical sub-sample odd inc */ + + { 0x3820, CRL_REG_LEN_08BIT, 0x02 },/* Binning */ + { 0x3842, CRL_REG_LEN_08BIT, 0x40 },/* Binning */ + { 0x5000, CRL_REG_LEN_08BIT, 0xD9 },/* Binning */ + + { 0x3836, CRL_REG_LEN_08BIT, 0x0C }, /* ablc_use_num */ + { 0x383C, CRL_REG_LEN_08BIT, 0x48 }, /* Boundary Pix num */ + + { 0x4008, CRL_REG_LEN_08BIT, 0x02 }, /* Magic */ + { 0x4009, CRL_REG_LEN_08BIT, 0x09 }, /* Magic */ + { 0x4019, CRL_REG_LEN_08BIT, 0x0C }, /* Magic */ + { 0x4051, CRL_REG_LEN_08BIT, 0x01 }, /* Magic */ + { 0x4066, CRL_REG_LEN_08BIT, 0x02 }, /* Magic */ + + { 0x5201, CRL_REG_LEN_08BIT, 0x71 }, /* Magic */ + { 0x5204, CRL_REG_LEN_08BIT, 0x00 }, /* Magic */ + { 0x5205, CRL_REG_LEN_08BIT, 0x80 }, /* Magic */ +}; + +static struct crl_register_write_rep ov13860_mode_480[] = { + + /* + * Exposure & Gain + */ + { 0x3501, CRL_REG_LEN_08BIT, 0x03 },/* Long Exposure */ + { 0x3502, CRL_REG_LEN_08BIT, 0x44 },/* Long Exposure */ + + { 0x370A, CRL_REG_LEN_08BIT, 0x63 }, + { 0x372F, CRL_REG_LEN_08BIT, 0x90 }, + + /* + * Windowing + */ + { 0x3800, CRL_REG_LEN_08BIT, 0x00 },/* h_crop_start high */ + { 0x3801, CRL_REG_LEN_08BIT, 0x14 },/* h_crop_start low */ + { 0x3802, CRL_REG_LEN_08BIT, 0x00 },/* v_crop_start high */ + { 0x3803, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_start low */ + { 0x3804, CRL_REG_LEN_08BIT, 0x10 },/* h_crop_end high */ + { 0x3805, CRL_REG_LEN_08BIT, 0x8B },/* h_crop_end low */ + { 0x3806, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_end high */ + { 0x3807, CRL_REG_LEN_08BIT, 0x43 },/* v_crop_end low */ + { 0x3808, CRL_REG_LEN_08BIT, 0x02 },/* h_output_size high 640 x 480 */ + { 0x3809, CRL_REG_LEN_08BIT, 0x80 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x01 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0xE0 },/* v_output_size low */ + { 0x3810, CRL_REG_LEN_08BIT, 0x00 },/* Manual horizontal window offset high */ + { 0x3811, CRL_REG_LEN_08BIT, 0x04 },/* Manual horizontal window offset low */ + { 0x3813, CRL_REG_LEN_08BIT, 0x04 },/* Manual vertical window offset low */ + { 0x3814, CRL_REG_LEN_08BIT, 0x11 },/* Horizontal sub-sample odd inc */ + { 0x3815, CRL_REG_LEN_08BIT, 0x31 },/* Vertical sub-sample odd inc */ + { 0x383D, CRL_REG_LEN_08BIT, 0xFF },/* Vertical sub-sample odd inc */ + + { 0x3820, CRL_REG_LEN_08BIT, 0x02 },/* Binning */ + { 0x3842, CRL_REG_LEN_08BIT, 0x40 },/* Binning */ + { 0x5000, CRL_REG_LEN_08BIT, 0xD9 },/* Binning */ + + { 0x3836, CRL_REG_LEN_08BIT, 0x0C }, /* ablc_use_num */ + { 0x383C, CRL_REG_LEN_08BIT, 0x48 }, /* Boundary Pix num */ + + { 0x4008, CRL_REG_LEN_08BIT, 0x02 }, /* Magic */ + { 0x4009, CRL_REG_LEN_08BIT, 0x09 }, /* Magic */ + { 0x4019, CRL_REG_LEN_08BIT, 0x0C }, /* Magic */ + { 0x4051, CRL_REG_LEN_08BIT, 0x01 }, /* Magic */ + { 0x4066, CRL_REG_LEN_08BIT, 0x02 }, /* Magic */ + + { 0x5201, CRL_REG_LEN_08BIT, 0x71 }, /* Magic */ + { 0x5204, CRL_REG_LEN_08BIT, 0x00 }, /* Magic */ + { 0x5205, CRL_REG_LEN_08BIT, 0x80 }, /* Magic */ +}; + +static struct crl_register_write_rep ov13860_streamon_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x01 } +}; + +static struct crl_register_write_rep ov13860_streamoff_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x00 } +}; + +static struct crl_arithmetic_ops ov13860_vflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_arithmetic_ops ov13860_hflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_dynamic_register_access ov13860_v_flip_regs[] = { + { + .address = 0x3820, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov13860_vflip_ops), + .ops = ov13860_vflip_ops, + .mask = 0x2, + }, +}; + +static struct crl_dynamic_register_access ov13860_h_flip_regs[] = { + { + .address = 0x3821, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov13860_hflip_ops), + .ops = ov13860_hflip_ops, + .mask = 0x2, + }, +}; + +struct crl_register_write_rep ov13860_poweroff_regset[] = { + { 0x0103, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_dynamic_register_access ov13860_ana_gain_global_regs[] = { + { + .address = 0x350A, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0x7ff, + }, +}; + +static struct crl_dynamic_register_access ov13860_exposure_regs[] = { + { + .address = 0x3501, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + } +}; + +static struct crl_dynamic_register_access ov13860_vblank_regs[] = { + { + .address = 0x380E, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access ov13860_hblank_regs[] = { + { + .address = 0x380C, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_sensor_detect_config ov13860_sensor_detect_regset[] = { + { + .reg = { 0x300A, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, + { + .reg = { 0x300B, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, + { + .reg = { 0x300C, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, +}; + +static struct crl_pll_configuration ov13860_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 600000000, + .bitsperpixel = 10, + .pixel_rate_csi = 150000000, + .pixel_rate_pa = 240000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(ov13860_pll_1200mbps), + .pll_regs = ov13860_pll_1200mbps, + }, + { + .input_clk = 24000000, + .op_sys_clk = 300000000, + .bitsperpixel = 10, + .pixel_rate_csi = 75000000, + .pixel_rate_pa = 240000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(ov13860_pll_600mbps), + .pll_regs = ov13860_pll_600mbps, + } +}; + +static struct crl_subdev_rect_rep ov13860_13m_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4224, + .out_rect.height = 3120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4208, + .out_rect.height = 3120, + }, +}; + +static struct crl_subdev_rect_rep ov13860_8m_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4224, + .out_rect.height = 3120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3280, + .out_rect.height = 2448, + }, +}; + +static struct crl_subdev_rect_rep ov13860_4k2k_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4224, + .out_rect.height = 3120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4096, + .out_rect.height = 2160, + }, +}; + +static struct crl_subdev_rect_rep ov13860_uhd_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4224, + .out_rect.height = 3120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3840, + .out_rect.height = 2160, + }, +}; + +static struct crl_subdev_rect_rep ov13860_6m_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4224, + .out_rect.height = 3120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3280, + .out_rect.height = 1836, + }, +}; + +static struct crl_subdev_rect_rep ov13860_3m_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4224, + .out_rect.height = 3120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 2048, + .out_rect.height = 1536, + }, +}; + +static struct crl_subdev_rect_rep ov13860_1952_1088_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4224, + .out_rect.height = 3120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1952, + .out_rect.height = 1088, + }, +}; + +static struct crl_subdev_rect_rep ov13860_720_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4224, + .out_rect.height = 3120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, +}; + +static struct crl_subdev_rect_rep ov13860_480_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4224, + .out_rect.height = 3120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 640, + .out_rect.height = 480, + }, +}; + +static struct crl_mode_rep ov13860_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(ov13860_13m_rects), + .sd_rects = ov13860_13m_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 4208, + .height = 3120, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov13860_mode_13m), + .mode_regs = ov13860_mode_13m, + }, + { + .sd_rects_items = ARRAY_SIZE(ov13860_8m_rects), + .sd_rects = ov13860_8m_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 3280, + .height = 2448, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov13860_mode_8m), + .mode_regs = ov13860_mode_8m, + }, + { + .sd_rects_items = ARRAY_SIZE(ov13860_4k2k_rects), + .sd_rects = ov13860_4k2k_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 4096, + .height = 2160, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov13860_mode_4k2k), + .mode_regs = ov13860_mode_4k2k, + }, + { + .sd_rects_items = ARRAY_SIZE(ov13860_uhd_rects), + .sd_rects = ov13860_uhd_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 3840, + .height = 2160, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov13860_mode_uhd), + .mode_regs = ov13860_mode_uhd, + }, + { + .sd_rects_items = ARRAY_SIZE(ov13860_6m_rects), + .sd_rects = ov13860_6m_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 3280, + .height = 1836, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov13860_mode_6m), + .mode_regs = ov13860_mode_6m, + }, + { + .sd_rects_items = ARRAY_SIZE(ov13860_3m_rects), + .sd_rects = ov13860_3m_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 1, + .width = 2048, + .height = 1536, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov13860_mode_3m), + .mode_regs = ov13860_mode_3m, + }, + { + .sd_rects_items = ARRAY_SIZE(ov13860_1952_1088_rects), + .sd_rects = ov13860_1952_1088_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 1, + .width = 1952, + .height = 1088, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov13860_mode_1952_1088), + .mode_regs = ov13860_mode_1952_1088, + }, + { + .sd_rects_items = ARRAY_SIZE(ov13860_720_rects), + .sd_rects = ov13860_720_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 1, + .width = 1280, + .height = 720, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov13860_mode_720), + .mode_regs = ov13860_mode_720, + }, + { + .sd_rects_items = ARRAY_SIZE(ov13860_480_rects), + .sd_rects = ov13860_480_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 1, + .width = 640, + .height = 480, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov13860_mode_480), + .mode_regs = ov13860_mode_480, + }, +}; + +static struct crl_sensor_subdev_config ov13860_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "ov13860 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "ov13860 pixel array", + }, +}; + +static struct crl_sensor_limits ov13860_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 4224, + .y_addr_max = 3120, + .min_frame_length_lines = 160, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 6024, + .max_line_length_pixels = 32752, + .scaler_m_min = 16, + .scaler_m_max = 16, + .scaler_n_min = 16, + .scaler_n_max = 16, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 3, +}; + +static struct crl_flip_data ov13860_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + }, +}; + +static struct crl_csi_data_fmt ov13860_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .bits_per_pixel = 10, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 10, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = 0, + }, +}; + +static struct crl_v4l2_ctrl ov13860_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 4096, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov13860_ana_gain_global_regs), + .regs = ov13860_ana_gain_global_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "V4L2_CID_EXPOSURE", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov13860_exposure_regs), + .regs = ov13860_exposure_regs, + .dep_items = 0, /* FLL is changes automatically */ + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov13860_h_flip_regs), + .regs = ov13860_h_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .name = "V4L2_CID_VFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov13860_v_flip_regs), + .regs = ov13860_v_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VBLANK, + .name = "V4L2_CID_VBLANK", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov13860_vblank_regs), + .regs = ov13860_vblank_regs, + .dep_items = 0, /* FLL changed automatically */ + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HBLANK, + .name = "V4L2_CID_HBLANK", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 65520, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov13860_hblank_regs), + .regs = ov13860_hblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +/* Power items, they are enabled in the order they are listed here */ +static struct crl_power_seq_entity ov13860_power_items[] = { + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 24000000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + }, +}; + +struct crl_sensor_configuration ov13860_crl_configuration = { + + .power_items = ARRAY_SIZE(ov13860_power_items), + .power_entities = ov13860_power_items, + + .powerup_regs_items = ARRAY_SIZE(ov13860_powerup_regset), + .powerup_regs = ov13860_powerup_regset, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + .id_reg_items = ARRAY_SIZE(ov13860_sensor_detect_regset), + .id_regs = ov13860_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(ov13860_sensor_subdevs), + .subdevs = ov13860_sensor_subdevs, + + .sensor_limits = &ov13860_sensor_limits, + + .pll_config_items = ARRAY_SIZE(ov13860_pll_configurations), + .pll_configs = ov13860_pll_configurations, + + .modes_items = ARRAY_SIZE(ov13860_modes), + .modes = ov13860_modes, + + .streamon_regs_items = ARRAY_SIZE(ov13860_streamon_regs), + .streamon_regs = ov13860_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(ov13860_streamoff_regs), + .streamoff_regs = ov13860_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(ov13860_v4l2_ctrls), + .v4l2_ctrl_bank = ov13860_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(ov13860_crl_csi_data_fmt), + .csi_fmts = ov13860_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(ov13860_flip_configurations), + .flip_data = ov13860_flip_configurations, +}; + +#endif /* __CRLMODULE_OV13860_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_ov2740_configuration.h b/drivers/media/i2c/crlmodule/crl_ov2740_configuration.h new file mode 100644 index 0000000000000..1060d6b62f041 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_ov2740_configuration.h @@ -0,0 +1,761 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2017 - 2018 Intel Corporation + * + * Author: Roy Yang + * + */ + +#ifndef __CRLMODULE_OV2740_CONFIGURATION_H_ +#define __CRLMODULE_OV2740_CONFIGURATION_H_ + +#include "crlmodule-nvm.h" +#include "crlmodule-sensor-ds.h" + +static struct crl_register_write_rep ov2740_powerup_regset[] = { + /*Reset*/ + {0x0103, CRL_REG_LEN_08BIT, 0x01}, + {0x0302, CRL_REG_LEN_08BIT, 0x4b},/* 26;1e */ + {0x030d, CRL_REG_LEN_08BIT, 0x4b},/* 26;1e */ + {0x030e, CRL_REG_LEN_08BIT, 0x02}, + {0x030a, CRL_REG_LEN_08BIT, 0x01}, + {0x0312, CRL_REG_LEN_08BIT, 0x11},/* 01 */ + {0x3000, CRL_REG_LEN_08BIT, 0x00}, + {0x3018, CRL_REG_LEN_08BIT, 0x32},/* 12(2 lane for 32; 1lane for 12) */ + {0x3031, CRL_REG_LEN_08BIT, 0x0a}, + {0x3080, CRL_REG_LEN_08BIT, 0x08}, + {0x3083, CRL_REG_LEN_08BIT, 0xB4}, + {0x3103, CRL_REG_LEN_08BIT, 0x00}, + {0x3104, CRL_REG_LEN_08BIT, 0x01}, + {0x3106, CRL_REG_LEN_08BIT, 0x01}, + {0x3500, CRL_REG_LEN_08BIT, 0x00}, + {0x3501, CRL_REG_LEN_08BIT, 0x44}, + {0x3502, CRL_REG_LEN_08BIT, 0x40}, + {0x3503, CRL_REG_LEN_08BIT, 0x88}, + {0x3507, CRL_REG_LEN_08BIT, 0x00}, + {0x3508, CRL_REG_LEN_08BIT, 0x00}, + {0x3509, CRL_REG_LEN_08BIT, 0x80}, + {0x350c, CRL_REG_LEN_08BIT, 0x00}, + {0x350d, CRL_REG_LEN_08BIT, 0x80}, + {0x3510, CRL_REG_LEN_08BIT, 0x00}, + {0x3511, CRL_REG_LEN_08BIT, 0x00}, + {0x3512, CRL_REG_LEN_08BIT, 0x20}, + {0x3632, CRL_REG_LEN_08BIT, 0x00}, + {0x3633, CRL_REG_LEN_08BIT, 0x10}, + {0x3634, CRL_REG_LEN_08BIT, 0x10}, + {0x3635, CRL_REG_LEN_08BIT, 0x10}, + {0x3645, CRL_REG_LEN_08BIT, 0x13}, + {0x3646, CRL_REG_LEN_08BIT, 0x81}, + {0x3636, CRL_REG_LEN_08BIT, 0x10}, + {0x3651, CRL_REG_LEN_08BIT, 0x0a}, + {0x3656, CRL_REG_LEN_08BIT, 0x02}, + {0x3659, CRL_REG_LEN_08BIT, 0x04}, + {0x365a, CRL_REG_LEN_08BIT, 0xda}, + {0x365b, CRL_REG_LEN_08BIT, 0xa2}, + {0x365c, CRL_REG_LEN_08BIT, 0x04}, + {0x365d, CRL_REG_LEN_08BIT, 0x1d}, + {0x365e, CRL_REG_LEN_08BIT, 0x1a}, + {0x3662, CRL_REG_LEN_08BIT, 0xd7}, + {0x3667, CRL_REG_LEN_08BIT, 0x78}, + {0x3669, CRL_REG_LEN_08BIT, 0x0a}, + {0x366a, CRL_REG_LEN_08BIT, 0x92}, + {0x3700, CRL_REG_LEN_08BIT, 0x54}, + {0x3702, CRL_REG_LEN_08BIT, 0x10}, + {0x3706, CRL_REG_LEN_08BIT, 0x42}, + {0x3709, CRL_REG_LEN_08BIT, 0x30}, + {0x370b, CRL_REG_LEN_08BIT, 0xc2}, + {0x3714, CRL_REG_LEN_08BIT, 0x63}, + {0x3715, CRL_REG_LEN_08BIT, 0x01}, + {0x3716, CRL_REG_LEN_08BIT, 0x00}, + {0x371a, CRL_REG_LEN_08BIT, 0x3e}, + {0x3732, CRL_REG_LEN_08BIT, 0x0e}, + {0x3733, CRL_REG_LEN_08BIT, 0x10}, + {0x375f, CRL_REG_LEN_08BIT, 0x0e}, + {0x3768, CRL_REG_LEN_08BIT, 0x30}, + {0x3769, CRL_REG_LEN_08BIT, 0x44}, + {0x376a, CRL_REG_LEN_08BIT, 0x22}, + {0x377b, CRL_REG_LEN_08BIT, 0x20}, + {0x377c, CRL_REG_LEN_08BIT, 0x00}, + {0x377d, CRL_REG_LEN_08BIT, 0x0c}, + {0x3798, CRL_REG_LEN_08BIT, 0x00}, + {0x37a1, CRL_REG_LEN_08BIT, 0x55}, + {0x37a8, CRL_REG_LEN_08BIT, 0x6d}, + {0x37c2, CRL_REG_LEN_08BIT, 0x04}, + {0x37c5, CRL_REG_LEN_08BIT, 0x00}, + {0x37c8, CRL_REG_LEN_08BIT, 0x00}, + {0x3800, CRL_REG_LEN_08BIT, 0x00}, + {0x3801, CRL_REG_LEN_08BIT, 0x00}, + {0x3802, CRL_REG_LEN_08BIT, 0x00}, + {0x3803, CRL_REG_LEN_08BIT, 0x00}, + {0x3804, CRL_REG_LEN_08BIT, 0x07}, + {0x3805, CRL_REG_LEN_08BIT, 0x8f}, + {0x3806, CRL_REG_LEN_08BIT, 0x04}, + {0x3807, CRL_REG_LEN_08BIT, 0x47}, + {0x3808, CRL_REG_LEN_08BIT, 0x07}, + {0x3809, CRL_REG_LEN_08BIT, 0x88}, + {0x380a, CRL_REG_LEN_08BIT, 0x04}, + {0x380b, CRL_REG_LEN_08BIT, 0x40}, + {0x380c, CRL_REG_LEN_08BIT, 0x08}, + {0x380d, CRL_REG_LEN_08BIT, 0x70}, + {0x380e, CRL_REG_LEN_08BIT, 0x04}, + {0x380f, CRL_REG_LEN_08BIT, 0x56}, + {0x3810, CRL_REG_LEN_08BIT, 0x00}, + {0x3811, CRL_REG_LEN_08BIT, 0x04}, + {0x3812, CRL_REG_LEN_08BIT, 0x00}, + {0x3813, CRL_REG_LEN_08BIT, 0x04}, + {0x3814, CRL_REG_LEN_08BIT, 0x01}, + {0x3815, CRL_REG_LEN_08BIT, 0x01}, + {0x3820, CRL_REG_LEN_08BIT, 0x80}, + {0x3821, CRL_REG_LEN_08BIT, 0x46}, + {0x3822, CRL_REG_LEN_08BIT, 0x84}, + {0x3829, CRL_REG_LEN_08BIT, 0x00}, + {0x382a, CRL_REG_LEN_08BIT, 0x01}, + {0x382b, CRL_REG_LEN_08BIT, 0x01}, + {0x3830, CRL_REG_LEN_08BIT, 0x04}, + {0x3836, CRL_REG_LEN_08BIT, 0x01}, + {0x3837, CRL_REG_LEN_08BIT, 0x08}, + {0x3839, CRL_REG_LEN_08BIT, 0x01}, + {0x383a, CRL_REG_LEN_08BIT, 0x00}, + {0x383b, CRL_REG_LEN_08BIT, 0x08}, + {0x383c, CRL_REG_LEN_08BIT, 0x00}, + {0x3f0b, CRL_REG_LEN_08BIT, 0x00}, + {0x4001, CRL_REG_LEN_08BIT, 0x20}, + {0x4009, CRL_REG_LEN_08BIT, 0x07}, + {0x4003, CRL_REG_LEN_08BIT, 0x10}, + {0x4010, CRL_REG_LEN_08BIT, 0xe0}, + {0x4016, CRL_REG_LEN_08BIT, 0x00}, + {0x4017, CRL_REG_LEN_08BIT, 0x10}, + {0x4044, CRL_REG_LEN_08BIT, 0x02}, + {0x4304, CRL_REG_LEN_08BIT, 0x08}, + {0x4307, CRL_REG_LEN_08BIT, 0x30}, + {0x4320, CRL_REG_LEN_08BIT, 0x80}, + {0x4322, CRL_REG_LEN_08BIT, 0x00}, + {0x4323, CRL_REG_LEN_08BIT, 0x00}, + {0x4324, CRL_REG_LEN_08BIT, 0x00}, + {0x4325, CRL_REG_LEN_08BIT, 0x00}, + {0x4326, CRL_REG_LEN_08BIT, 0x00}, + {0x4327, CRL_REG_LEN_08BIT, 0x00}, + {0x4328, CRL_REG_LEN_08BIT, 0x00}, + {0x4329, CRL_REG_LEN_08BIT, 0x00}, + {0x432c, CRL_REG_LEN_08BIT, 0x03}, + {0x432d, CRL_REG_LEN_08BIT, 0x81}, + {0x4501, CRL_REG_LEN_08BIT, 0x84}, + {0x4502, CRL_REG_LEN_08BIT, 0x40}, + {0x4503, CRL_REG_LEN_08BIT, 0x18}, + {0x4504, CRL_REG_LEN_08BIT, 0x04}, + {0x4508, CRL_REG_LEN_08BIT, 0x02}, + {0x4601, CRL_REG_LEN_08BIT, 0x10}, + {0x4800, CRL_REG_LEN_08BIT, 0x00}, + {0x4816, CRL_REG_LEN_08BIT, 0x52}, + {0x4837, CRL_REG_LEN_08BIT, 0x16}, + {0x5000, CRL_REG_LEN_08BIT, 0x7f}, + {0x5001, CRL_REG_LEN_08BIT, 0x00}, + {0x5005, CRL_REG_LEN_08BIT, 0x38}, + {0x501e, CRL_REG_LEN_08BIT, 0x0d}, + {0x5040, CRL_REG_LEN_08BIT, 0x00}, + {0x5901, CRL_REG_LEN_08BIT, 0x00}, + {0x3800, CRL_REG_LEN_08BIT, 0x00}, + {0x3801, CRL_REG_LEN_08BIT, 0x00}, + {0x3802, CRL_REG_LEN_08BIT, 0x00}, + {0x3803, CRL_REG_LEN_08BIT, 0x00}, + {0x3804, CRL_REG_LEN_08BIT, 0x07}, + {0x3805, CRL_REG_LEN_08BIT, 0x8f}, + {0x3806, CRL_REG_LEN_08BIT, 0x04}, + {0x3807, CRL_REG_LEN_08BIT, 0x47}, + {0x3808, CRL_REG_LEN_08BIT, 0x07}, + {0x3809, CRL_REG_LEN_08BIT, 0x8c}, + {0x380a, CRL_REG_LEN_08BIT, 0x04}, + {0x380b, CRL_REG_LEN_08BIT, 0x44}, + {0x3810, CRL_REG_LEN_08BIT, 0x00}, + {0x3811, CRL_REG_LEN_08BIT, 0x00},/* 00 */ + {0x3812, CRL_REG_LEN_08BIT, 0x00}, + {0x3813, CRL_REG_LEN_08BIT, 0x02},/* 00 */ + {0x4003, CRL_REG_LEN_08BIT, 0x40},/* set Black level to 0x40 */ +}; + +static struct crl_register_write_rep ov2740_streamon_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x01 } +}; + +static struct crl_register_write_rep ov2740_streamoff_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x00 } +}; + +static struct crl_register_write_rep ov2740_data_fmt_width10[] = { + { 0x3031, CRL_REG_LEN_08BIT, 0x0a } +}; + +static struct crl_arithmetic_ops ov2740_vflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_arithmetic_ops ov2740_hflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_arithmetic_ops ov2740_hblank_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_arithmetic_ops ov2740_exposure_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 4, + }, +}; + +static struct crl_dynamic_register_access ov2740_v_flip_regs[] = { + { + .address = 0x3820, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov2740_vflip_ops), + .ops = ov2740_vflip_ops, + .mask = 0x1, + }, +}; + +static struct crl_dynamic_register_access ov2740_h_flip_regs[] = { + { + .address = 0x3821, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov2740_hflip_ops), + .ops = ov2740_hflip_ops, + .mask = 0x1, + }, +}; + +static struct crl_dynamic_register_access ov2740_dig_gain_regs[] = { + { + .address = 0x500A, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, + { + .address = 0x500C, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, + { + .address = 0x500E, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +struct crl_register_write_rep ov2740_poweroff_regset[] = { + { 0x0103, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_dynamic_register_access ov2740_ana_gain_global_regs[] = { + { + .address = 0x3508, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0x7ff, + }, +}; + +static struct crl_dynamic_register_access ov2740_exposure_regs[] = { + { + .address = 0x3500, + .len = CRL_REG_LEN_24BIT, + .ops_items = ARRAY_SIZE(ov2740_exposure_ops), + .ops = ov2740_exposure_ops, + .mask = 0x0ffff0, + }, +}; + +static struct crl_dynamic_register_access ov2740_vblank_regs[] = { + { + .address = 0x380E, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access ov2740_hblank_regs[] = { + { + .address = 0x380C, + .len = CRL_REG_LEN_16BIT, + .ops_items = ARRAY_SIZE(ov2740_hblank_ops), + .ops = ov2740_hblank_ops, + .mask = 0xffff, + }, +}; + +static struct crl_sensor_detect_config ov2740_sensor_detect_regset[] = { + { + .reg = { 0x300B, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, + { + .reg = { 0x300C, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, +}; + +static struct crl_pll_configuration ov2740_pll_configurations[] = { + { + .input_clk = 19200000, + .op_sys_clk = 72000000, + .bitsperpixel = 10, + .pixel_rate_csi = 28800000, + .pixel_rate_pa = 28800000, + .csi_lanes = 2, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = 0, + }, + +}; + +static struct crl_subdev_rect_rep ov2740_1932x1092_rects_native[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1932, + .in_rect.height = 1092, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1932, + .out_rect.height = 1092, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1932, + .in_rect.height = 1092, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1932, + .out_rect.height = 1092, + }, +}; + +static struct crl_mode_rep ov2740_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(ov2740_1932x1092_rects_native), + .sd_rects = ov2740_1932x1092_rects_native, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1932, + .height = 1092, + .min_llp = 2160, + .min_fll = 1110, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = 0, + }, +}; + +static struct crl_sensor_subdev_config ov2740_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "ov2740 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "ov2740 pixel array", + }, +}; + +static struct crl_sensor_limits ov2740_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1932, + .y_addr_max = 1092, + .min_frame_length_lines = 160, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 2160, + .max_line_length_pixels = 32752, +}; + +static struct crl_flip_data ov2740_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, +}; + +static struct crl_csi_data_fmt ov2740_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = 1, + .regs = ov2740_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = ov2740_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = ov2740_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = ov2740_data_fmt_width10, + }, +}; + +static struct crl_v4l2_ctrl ov2740_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 4096, + .data.std_data.step = 1, + .data.std_data.def = 128, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2740_ana_gain_global_regs), + .regs = ov2740_ana_gain_global_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "V4L2_CID_EXPOSURE", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2740_exposure_regs), + .regs = ov2740_exposure_regs, + .dep_items = 0, /* FLL is changes automatically */ + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2740_h_flip_regs), + .regs = ov2740_h_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .name = "V4L2_CID_VFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2740_v_flip_regs), + .regs = ov2740_v_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame Length Lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 160, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 1110, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2740_vblank_regs), + .regs = ov2740_vblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1024, + .data.std_data.max = 65520, + .data.std_data.step = 1, + .data.std_data.def = 2160, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2740_hblank_regs), + .regs = ov2740_hblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_GAIN, + .name = "Digital Gain", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 4095, + .data.std_data.step = 1, + .data.std_data.def = 1024, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2740_dig_gain_regs), + .regs = ov2740_dig_gain_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_WDR_MODE, + .name = "V4L2_CID_WDR_MODE", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_MODE_SELECTION, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, +}; + +static struct crl_arithmetic_ops ov2740_frame_desc_width_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .operand.entity_val = CRL_VAR_REF_OUTPUT_WIDTH, + }, +}; + +static struct crl_arithmetic_ops ov2740_frame_desc_height_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, +}; + +static struct crl_frame_desc ov2740_frame_desc[] = { + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(ov2740_frame_desc_width_ops), + .ops = ov2740_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(ov2740_frame_desc_height_ops), + .ops = ov2740_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 0, + .csi2_data_type.entity_val = 0x12, + }, +}; + +/* Power items, they are enabled in the order they are listed here */ +static struct crl_power_seq_entity ov2740_power_items[] = { + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 19200000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + .undo_val = 0, + .delay = 1000, + }, +}; + +static struct crl_sensor_configuration ov2740_crl_configuration = { + + .power_items = ARRAY_SIZE(ov2740_power_items), + .power_entities = ov2740_power_items, + + .powerup_regs_items = ARRAY_SIZE(ov2740_powerup_regset), + .powerup_regs = ov2740_powerup_regset, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + + .id_reg_items = ARRAY_SIZE(ov2740_sensor_detect_regset), + .id_regs = ov2740_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(ov2740_sensor_subdevs), + .subdevs = ov2740_sensor_subdevs, + + .sensor_limits = &ov2740_sensor_limits, + + .pll_config_items = ARRAY_SIZE(ov2740_pll_configurations), + .pll_configs = ov2740_pll_configurations, + + .modes_items = ARRAY_SIZE(ov2740_modes), + .modes = ov2740_modes, + + .streamon_regs_items = ARRAY_SIZE(ov2740_streamon_regs), + .streamon_regs = ov2740_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(ov2740_streamoff_regs), + .streamoff_regs = ov2740_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(ov2740_v4l2_ctrls), + .v4l2_ctrl_bank = ov2740_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(ov2740_crl_csi_data_fmt), + .csi_fmts = ov2740_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(ov2740_flip_configurations), + .flip_data = ov2740_flip_configurations, + + .crl_nvm_info.nvm_flags = CRL_NVM_ADDR_MODE_16BIT, + .crl_nvm_info.nvm_preop_regs_items = 0, + .crl_nvm_info.nvm_postop_regs_items = 0, + .crl_nvm_info.nvm_blobs_items = 0, + + .frame_desc_entries = ARRAY_SIZE(ov2740_frame_desc), + .frame_desc_type = CRL_V4L2_MBUS_FRAME_DESC_TYPE_CSI2, + .frame_desc = ov2740_frame_desc, + + .msr_file_name = "", +}; + +#endif /* __CRLMODULE_OV2740_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_ov2775_configuration.h b/drivers/media/i2c/crlmodule/crl_ov2775_configuration.h new file mode 100644 index 0000000000000..1734ed6867659 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_ov2775_configuration.h @@ -0,0 +1,8145 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018 Intel Corporation + * + * Author: Leo Zhao + * Author: Mingda Xu + * + */ + +#ifndef __CRLMODULE_OV2775_CONFIGURATION_H_ +#define __CRLMODULE_OV2775_CONFIGURATION_H_ + +#include "crlmodule-nvm.h" +#include "crlmodule-sensor-ds.h" + +#define OV2775_CAPTURE_MODE_MAX 4 + +static struct crl_register_write_rep + ov2775_linear_hcg_30fps_mipi960_regset[] = { + {0x3000, CRL_REG_LEN_08BIT, 0x02}, + {0x3001, CRL_REG_LEN_08BIT, 0x28}, + {0x3002, CRL_REG_LEN_08BIT, 0x03}, + {0x3003, CRL_REG_LEN_08BIT, 0x01}, + {0x3004, CRL_REG_LEN_08BIT, 0x02}, + {0x3005, CRL_REG_LEN_08BIT, 0x26}, + {0x3006, CRL_REG_LEN_08BIT, 0x00}, + {0x3007, CRL_REG_LEN_08BIT, 0x07}, + {0x3008, CRL_REG_LEN_08BIT, 0x01}, + {0x3009, CRL_REG_LEN_08BIT, 0x00}, + {0x300c, CRL_REG_LEN_08BIT, 0x6c}, + {0x300e, CRL_REG_LEN_08BIT, 0x80}, + {0x300f, CRL_REG_LEN_08BIT, 0x00}, + {0x3012, CRL_REG_LEN_08BIT, 0x00}, + {0x3014, CRL_REG_LEN_08BIT, 0xc4}, + {0x3015, CRL_REG_LEN_08BIT, 0x00}, + {0x3017, CRL_REG_LEN_08BIT, 0x00}, + {0x3018, CRL_REG_LEN_08BIT, 0x00}, + {0x3019, CRL_REG_LEN_08BIT, 0x00}, + {0x301a, CRL_REG_LEN_08BIT, 0x00}, + {0x301b, CRL_REG_LEN_08BIT, 0x0e}, + {0x301e, CRL_REG_LEN_08BIT, 0x17}, + {0x301f, CRL_REG_LEN_08BIT, 0xe1}, + {0x3030, CRL_REG_LEN_08BIT, 0x02}, + {0x3031, CRL_REG_LEN_08BIT, 0x62}, + {0x3032, CRL_REG_LEN_08BIT, 0xf0}, + {0x3033, CRL_REG_LEN_08BIT, 0x30}, + {0x3034, CRL_REG_LEN_08BIT, 0x3f}, + {0x3035, CRL_REG_LEN_08BIT, 0x5f}, + {0x3036, CRL_REG_LEN_08BIT, 0x02}, + {0x3037, CRL_REG_LEN_08BIT, 0x9f}, + {0x3038, CRL_REG_LEN_08BIT, 0x04}, + {0x3039, CRL_REG_LEN_08BIT, 0xb7}, + {0x303a, CRL_REG_LEN_08BIT, 0x04}, + {0x303b, CRL_REG_LEN_08BIT, 0x07}, + {0x303c, CRL_REG_LEN_08BIT, 0xf0}, + {0x303d, CRL_REG_LEN_08BIT, 0x00}, + {0x303e, CRL_REG_LEN_08BIT, 0x0b}, + {0x303f, CRL_REG_LEN_08BIT, 0xe3}, + {0x3040, CRL_REG_LEN_08BIT, 0xf3}, + {0x3041, CRL_REG_LEN_08BIT, 0x29}, + {0x3042, CRL_REG_LEN_08BIT, 0xf6}, + {0x3043, CRL_REG_LEN_08BIT, 0x65}, + {0x3044, CRL_REG_LEN_08BIT, 0x06}, + {0x3045, CRL_REG_LEN_08BIT, 0x0f}, + {0x3046, CRL_REG_LEN_08BIT, 0x59}, + {0x3047, CRL_REG_LEN_08BIT, 0x07}, + {0x3048, CRL_REG_LEN_08BIT, 0x82}, + {0x3049, CRL_REG_LEN_08BIT, 0xcf}, + {0x304a, CRL_REG_LEN_08BIT, 0x12}, + {0x304b, CRL_REG_LEN_08BIT, 0x40}, + {0x304c, CRL_REG_LEN_08BIT, 0x33}, + {0x304d, CRL_REG_LEN_08BIT, 0xa4}, + {0x304e, CRL_REG_LEN_08BIT, 0x0b}, + {0x304f, CRL_REG_LEN_08BIT, 0x3d}, + {0x3050, CRL_REG_LEN_08BIT, 0x10}, + {0x3060, CRL_REG_LEN_08BIT, 0x00}, + {0x3061, CRL_REG_LEN_08BIT, 0x64}, + {0x3062, CRL_REG_LEN_08BIT, 0x00}, + {0x3063, CRL_REG_LEN_08BIT, 0xe4}, + {0x3066, CRL_REG_LEN_08BIT, 0x80}, + {0x3080, CRL_REG_LEN_08BIT, 0x00}, + {0x3081, CRL_REG_LEN_08BIT, 0x00}, + {0x3082, CRL_REG_LEN_08BIT, 0x01}, + {0x3083, CRL_REG_LEN_08BIT, 0xe3}, + {0x3084, CRL_REG_LEN_08BIT, 0x06}, + {0x3085, CRL_REG_LEN_08BIT, 0x00}, + {0x3086, CRL_REG_LEN_08BIT, 0x10}, + {0x3087, CRL_REG_LEN_08BIT, 0x10}, + {0x3089, CRL_REG_LEN_08BIT, 0x00}, + {0x308a, CRL_REG_LEN_08BIT, 0x01}, + {0x3093, CRL_REG_LEN_08BIT, 0x00}, + {0x30a0, CRL_REG_LEN_08BIT, 0x00}, + {0x30a1, CRL_REG_LEN_08BIT, 0x04}, + {0x30a2, CRL_REG_LEN_08BIT, 0x00}, + {0x30a3, CRL_REG_LEN_08BIT, 0x08}, + {0x30a4, CRL_REG_LEN_08BIT, 0x07}, + {0x30a5, CRL_REG_LEN_08BIT, 0x8b}, + {0x30a6, CRL_REG_LEN_08BIT, 0x04}, + {0x30a7, CRL_REG_LEN_08BIT, 0x3f}, + {0x30a8, CRL_REG_LEN_08BIT, 0x00}, + {0x30a9, CRL_REG_LEN_08BIT, 0x04}, + {0x30aa, CRL_REG_LEN_08BIT, 0x00}, + {0x30ab, CRL_REG_LEN_08BIT, 0x00}, + {0x30ac, CRL_REG_LEN_08BIT, 0x07}, + {0x30ad, CRL_REG_LEN_08BIT, 0x80}, + {0x30ae, CRL_REG_LEN_08BIT, 0x04}, + {0x30af, CRL_REG_LEN_08BIT, 0x40}, + {0x30b4, CRL_REG_LEN_08BIT, 0x00}, + {0x30b5, CRL_REG_LEN_08BIT, 0x00}, + {0x30ba, CRL_REG_LEN_08BIT, 0x10}, + {0x30bc, CRL_REG_LEN_08BIT, 0x00}, + {0x30bd, CRL_REG_LEN_08BIT, 0x03}, + {0x30be, CRL_REG_LEN_08BIT, 0x5c}, + {0x30bf, CRL_REG_LEN_08BIT, 0x00}, + {0x30c0, CRL_REG_LEN_08BIT, 0x01}, + {0x30c1, CRL_REG_LEN_08BIT, 0x00}, + {0x30c2, CRL_REG_LEN_08BIT, 0x20}, + {0x30c3, CRL_REG_LEN_08BIT, 0x00}, + {0x30c4, CRL_REG_LEN_08BIT, 0x4a}, + {0x30c5, CRL_REG_LEN_08BIT, 0x00}, + {0x30c7, CRL_REG_LEN_08BIT, 0x00}, + {0x30c8, CRL_REG_LEN_08BIT, 0x00}, + {0x30d1, CRL_REG_LEN_08BIT, 0x00}, + {0x30d2, CRL_REG_LEN_08BIT, 0x00}, + {0x30d3, CRL_REG_LEN_08BIT, 0x80}, + {0x30d4, CRL_REG_LEN_08BIT, 0x00}, + {0x30d9, CRL_REG_LEN_08BIT, 0x09}, + {0x30da, CRL_REG_LEN_08BIT, 0x64}, + {0x30dd, CRL_REG_LEN_08BIT, 0x00}, + {0x30de, CRL_REG_LEN_08BIT, 0x16}, + {0x30df, CRL_REG_LEN_08BIT, 0x00}, + {0x30e0, CRL_REG_LEN_08BIT, 0x17}, + {0x30e1, CRL_REG_LEN_08BIT, 0x00}, + {0x30e2, CRL_REG_LEN_08BIT, 0x18}, + {0x30e3, CRL_REG_LEN_08BIT, 0x10}, + {0x30e4, CRL_REG_LEN_08BIT, 0x04}, + {0x30e5, CRL_REG_LEN_08BIT, 0x00}, + {0x30e6, CRL_REG_LEN_08BIT, 0x00}, + {0x30e7, CRL_REG_LEN_08BIT, 0x00}, + {0x30e8, CRL_REG_LEN_08BIT, 0x00}, + {0x30e9, CRL_REG_LEN_08BIT, 0x00}, + {0x30ea, CRL_REG_LEN_08BIT, 0x00}, + {0x30eb, CRL_REG_LEN_08BIT, 0x00}, + {0x30ec, CRL_REG_LEN_08BIT, 0x00}, + {0x30ed, CRL_REG_LEN_08BIT, 0x00}, + {0x3101, CRL_REG_LEN_08BIT, 0x00}, + {0x3102, CRL_REG_LEN_08BIT, 0x00}, + {0x3103, CRL_REG_LEN_08BIT, 0x00}, + {0x3104, CRL_REG_LEN_08BIT, 0x00}, + {0x3105, CRL_REG_LEN_08BIT, 0x8c}, + {0x3106, CRL_REG_LEN_08BIT, 0x87}, + {0x3107, CRL_REG_LEN_08BIT, 0xc0}, + {0x3108, CRL_REG_LEN_08BIT, 0x9d}, + {0x3109, CRL_REG_LEN_08BIT, 0x8d}, + {0x310a, CRL_REG_LEN_08BIT, 0x8d}, + {0x310b, CRL_REG_LEN_08BIT, 0x6a}, + {0x310c, CRL_REG_LEN_08BIT, 0x3a}, + {0x310d, CRL_REG_LEN_08BIT, 0x5a}, + {0x310e, CRL_REG_LEN_08BIT, 0x00}, + {0x3120, CRL_REG_LEN_08BIT, 0x00}, + {0x3121, CRL_REG_LEN_08BIT, 0x00}, + {0x3122, CRL_REG_LEN_08BIT, 0x00}, + {0x3123, CRL_REG_LEN_08BIT, 0xf0}, + {0x3124, CRL_REG_LEN_08BIT, 0x00}, + {0x3125, CRL_REG_LEN_08BIT, 0x70}, + {0x3126, CRL_REG_LEN_08BIT, 0x1f}, + {0x3127, CRL_REG_LEN_08BIT, 0x0f}, + {0x3128, CRL_REG_LEN_08BIT, 0x00}, + {0x3129, CRL_REG_LEN_08BIT, 0x3a}, + {0x312a, CRL_REG_LEN_08BIT, 0x02}, + {0x312b, CRL_REG_LEN_08BIT, 0x0f}, + {0x312c, CRL_REG_LEN_08BIT, 0x00}, + {0x312d, CRL_REG_LEN_08BIT, 0x0f}, + {0x312e, CRL_REG_LEN_08BIT, 0x1d}, + {0x312f, CRL_REG_LEN_08BIT, 0x00}, + {0x3130, CRL_REG_LEN_08BIT, 0x00}, + {0x3131, CRL_REG_LEN_08BIT, 0x00}, + {0x3132, CRL_REG_LEN_08BIT, 0x00}, + {0x3140, CRL_REG_LEN_08BIT, 0x0a}, + {0x3141, CRL_REG_LEN_08BIT, 0x03}, + {0x3142, CRL_REG_LEN_08BIT, 0x00}, + {0x3143, CRL_REG_LEN_08BIT, 0x00}, + {0x3144, CRL_REG_LEN_08BIT, 0x00}, + {0x3145, CRL_REG_LEN_08BIT, 0x00}, + {0x3146, CRL_REG_LEN_08BIT, 0x00}, + {0x3147, CRL_REG_LEN_08BIT, 0x00}, + {0x3148, CRL_REG_LEN_08BIT, 0x00}, + {0x3149, CRL_REG_LEN_08BIT, 0x00}, + {0x314a, CRL_REG_LEN_08BIT, 0x00}, + {0x314b, CRL_REG_LEN_08BIT, 0x00}, + {0x314c, CRL_REG_LEN_08BIT, 0x00}, + {0x314d, CRL_REG_LEN_08BIT, 0x00}, + {0x314e, CRL_REG_LEN_08BIT, 0x1c}, + {0x314f, CRL_REG_LEN_08BIT, 0xff}, + {0x3150, CRL_REG_LEN_08BIT, 0xff}, + {0x3151, CRL_REG_LEN_08BIT, 0xff}, + {0x3152, CRL_REG_LEN_08BIT, 0x10}, + {0x3153, CRL_REG_LEN_08BIT, 0x10}, + {0x3154, CRL_REG_LEN_08BIT, 0x10}, + {0x3155, CRL_REG_LEN_08BIT, 0x00}, + {0x3156, CRL_REG_LEN_08BIT, 0x03}, + {0x3157, CRL_REG_LEN_08BIT, 0x00}, + {0x3158, CRL_REG_LEN_08BIT, 0x0f}, + {0x3159, CRL_REG_LEN_08BIT, 0xff}, + {0x3160, CRL_REG_LEN_08BIT, 0x01}, + {0x3161, CRL_REG_LEN_08BIT, 0x00}, + {0x3162, CRL_REG_LEN_08BIT, 0x01}, + {0x3163, CRL_REG_LEN_08BIT, 0x00}, + {0x3164, CRL_REG_LEN_08BIT, 0x01}, + {0x3165, CRL_REG_LEN_08BIT, 0x00}, + {0x3190, CRL_REG_LEN_08BIT, 0x08}, + {0x3191, CRL_REG_LEN_08BIT, 0x99}, + {0x3193, CRL_REG_LEN_08BIT, 0x08}, + {0x3194, CRL_REG_LEN_08BIT, 0x13}, + {0x3195, CRL_REG_LEN_08BIT, 0x33}, + {0x3196, CRL_REG_LEN_08BIT, 0x00}, + {0x3197, CRL_REG_LEN_08BIT, 0x10}, + {0x3198, CRL_REG_LEN_08BIT, 0x00}, + {0x3199, CRL_REG_LEN_08BIT, 0x7f}, + {0x319a, CRL_REG_LEN_08BIT, 0x80}, + {0x319b, CRL_REG_LEN_08BIT, 0xff}, + {0x319c, CRL_REG_LEN_08BIT, 0x80}, + {0x319d, CRL_REG_LEN_08BIT, 0xbf}, + {0x319e, CRL_REG_LEN_08BIT, 0xc0}, + {0x319f, CRL_REG_LEN_08BIT, 0xff}, + {0x31a0, CRL_REG_LEN_08BIT, 0x24}, + {0x31a1, CRL_REG_LEN_08BIT, 0x55}, + {0x31a2, CRL_REG_LEN_08BIT, 0x00}, + {0x31a3, CRL_REG_LEN_08BIT, 0x08}, + {0x31a6, CRL_REG_LEN_08BIT, 0x00}, + {0x31a7, CRL_REG_LEN_08BIT, 0x00}, + {0x31b0, CRL_REG_LEN_08BIT, 0x00}, + {0x31b1, CRL_REG_LEN_08BIT, 0x00}, + {0x31b2, CRL_REG_LEN_08BIT, 0x02}, + {0x31b3, CRL_REG_LEN_08BIT, 0x00}, + {0x31b4, CRL_REG_LEN_08BIT, 0x00}, + {0x31b5, CRL_REG_LEN_08BIT, 0x01}, + {0x31b6, CRL_REG_LEN_08BIT, 0x00}, + {0x31b7, CRL_REG_LEN_08BIT, 0x00}, + {0x31b8, CRL_REG_LEN_08BIT, 0x00}, + {0x31b9, CRL_REG_LEN_08BIT, 0x00}, + {0x31ba, CRL_REG_LEN_08BIT, 0x00}, + {0x31d0, CRL_REG_LEN_08BIT, 0x3c}, + {0x31d1, CRL_REG_LEN_08BIT, 0x34}, + {0x31d2, CRL_REG_LEN_08BIT, 0x3c}, + {0x31d3, CRL_REG_LEN_08BIT, 0x00}, + {0x31d4, CRL_REG_LEN_08BIT, 0x2d}, + {0x31d5, CRL_REG_LEN_08BIT, 0x00}, + {0x31d6, CRL_REG_LEN_08BIT, 0x01}, + {0x31d7, CRL_REG_LEN_08BIT, 0x06}, + {0x31d8, CRL_REG_LEN_08BIT, 0x00}, + {0x31d9, CRL_REG_LEN_08BIT, 0x64}, + {0x31da, CRL_REG_LEN_08BIT, 0x00}, + {0x31db, CRL_REG_LEN_08BIT, 0x30}, + {0x31dc, CRL_REG_LEN_08BIT, 0x04}, + {0x31dd, CRL_REG_LEN_08BIT, 0x69}, + {0x31de, CRL_REG_LEN_08BIT, 0x0a}, + {0x31df, CRL_REG_LEN_08BIT, 0x3c}, + {0x31e0, CRL_REG_LEN_08BIT, 0x04}, + {0x31e1, CRL_REG_LEN_08BIT, 0x32}, + {0x31e2, CRL_REG_LEN_08BIT, 0x00}, + {0x31e3, CRL_REG_LEN_08BIT, 0x00}, + {0x31e4, CRL_REG_LEN_08BIT, 0x08}, + {0x31e5, CRL_REG_LEN_08BIT, 0x80}, + {0x31e6, CRL_REG_LEN_08BIT, 0x00}, + {0x31e7, CRL_REG_LEN_08BIT, 0x2c}, + {0x31e8, CRL_REG_LEN_08BIT, 0x6c}, + {0x31e9, CRL_REG_LEN_08BIT, 0xac}, + {0x31ea, CRL_REG_LEN_08BIT, 0xec}, + {0x31eb, CRL_REG_LEN_08BIT, 0x3f}, + {0x31ec, CRL_REG_LEN_08BIT, 0x0f}, + {0x31ed, CRL_REG_LEN_08BIT, 0x20}, + {0x31ee, CRL_REG_LEN_08BIT, 0x04}, + {0x31ef, CRL_REG_LEN_08BIT, 0x48}, + {0x31f0, CRL_REG_LEN_08BIT, 0x07}, + {0x31f1, CRL_REG_LEN_08BIT, 0x90}, + {0x31f2, CRL_REG_LEN_08BIT, 0x04}, + {0x31f3, CRL_REG_LEN_08BIT, 0x48}, + {0x31f4, CRL_REG_LEN_08BIT, 0x07}, + {0x31f5, CRL_REG_LEN_08BIT, 0x90}, + {0x31f6, CRL_REG_LEN_08BIT, 0x04}, + {0x31f7, CRL_REG_LEN_08BIT, 0x48}, + {0x31f8, CRL_REG_LEN_08BIT, 0x07}, + {0x31f9, CRL_REG_LEN_08BIT, 0x90}, + {0x31fa, CRL_REG_LEN_08BIT, 0x04}, + {0x31fb, CRL_REG_LEN_08BIT, 0x48}, + {0x31fd, CRL_REG_LEN_08BIT, 0xcb}, + {0x31fe, CRL_REG_LEN_08BIT, 0x01}, + {0x31ff, CRL_REG_LEN_08BIT, 0x03}, + {0x3200, CRL_REG_LEN_08BIT, 0x00}, + {0x3201, CRL_REG_LEN_08BIT, 0xff}, + {0x3202, CRL_REG_LEN_08BIT, 0x00}, + {0x3203, CRL_REG_LEN_08BIT, 0xff}, + {0x3204, CRL_REG_LEN_08BIT, 0xff}, + {0x3205, CRL_REG_LEN_08BIT, 0xff}, + {0x3206, CRL_REG_LEN_08BIT, 0xff}, + {0x3207, CRL_REG_LEN_08BIT, 0xff}, + {0x3208, CRL_REG_LEN_08BIT, 0xff}, + {0x3209, CRL_REG_LEN_08BIT, 0xff}, + {0x320a, CRL_REG_LEN_08BIT, 0xff}, + {0x320b, CRL_REG_LEN_08BIT, 0x1b}, + {0x320c, CRL_REG_LEN_08BIT, 0x1f}, + {0x320d, CRL_REG_LEN_08BIT, 0x1e}, + {0x320e, CRL_REG_LEN_08BIT, 0x30}, + {0x320f, CRL_REG_LEN_08BIT, 0x2d}, + {0x3210, CRL_REG_LEN_08BIT, 0x2c}, + {0x3211, CRL_REG_LEN_08BIT, 0x2b}, + {0x3212, CRL_REG_LEN_08BIT, 0x2a}, + {0x3213, CRL_REG_LEN_08BIT, 0x24}, + {0x3214, CRL_REG_LEN_08BIT, 0x22}, + {0x3215, CRL_REG_LEN_08BIT, 0x00}, + {0x3216, CRL_REG_LEN_08BIT, 0x04}, + {0x3217, CRL_REG_LEN_08BIT, 0x2c}, + {0x3218, CRL_REG_LEN_08BIT, 0x6c}, + {0x3219, CRL_REG_LEN_08BIT, 0xac}, + {0x321a, CRL_REG_LEN_08BIT, 0xec}, + {0x321b, CRL_REG_LEN_08BIT, 0x00}, + {0x3230, CRL_REG_LEN_08BIT, 0x3a}, + {0x3231, CRL_REG_LEN_08BIT, 0x00}, + {0x3232, CRL_REG_LEN_08BIT, 0x80}, + {0x3233, CRL_REG_LEN_08BIT, 0x00}, + {0x3234, CRL_REG_LEN_08BIT, 0x10}, + {0x3235, CRL_REG_LEN_08BIT, 0xaa}, + {0x3236, CRL_REG_LEN_08BIT, 0x55}, + {0x3237, CRL_REG_LEN_08BIT, 0x99}, + {0x3238, CRL_REG_LEN_08BIT, 0x66}, + {0x3239, CRL_REG_LEN_08BIT, 0x08}, + {0x323a, CRL_REG_LEN_08BIT, 0x88}, + {0x323b, CRL_REG_LEN_08BIT, 0x00}, + {0x323c, CRL_REG_LEN_08BIT, 0x00}, + {0x323d, CRL_REG_LEN_08BIT, 0x03}, + {0x3250, CRL_REG_LEN_08BIT, 0x33}, + {0x3251, CRL_REG_LEN_08BIT, 0x00}, + {0x3252, CRL_REG_LEN_08BIT, 0x20}, + {0x3253, CRL_REG_LEN_08BIT, 0x00}, + {0x3254, CRL_REG_LEN_08BIT, 0x00}, + {0x3255, CRL_REG_LEN_08BIT, 0x01}, + {0x3256, CRL_REG_LEN_08BIT, 0x00}, + {0x3257, CRL_REG_LEN_08BIT, 0x00}, + {0x3258, CRL_REG_LEN_08BIT, 0x00}, + {0x3270, CRL_REG_LEN_08BIT, 0x01}, + {0x3271, CRL_REG_LEN_08BIT, 0xc0}, + {0x3272, CRL_REG_LEN_08BIT, 0xf0}, + {0x3273, CRL_REG_LEN_08BIT, 0x01}, + {0x3274, CRL_REG_LEN_08BIT, 0x00}, + {0x3275, CRL_REG_LEN_08BIT, 0x40}, + {0x3276, CRL_REG_LEN_08BIT, 0x02}, + {0x3277, CRL_REG_LEN_08BIT, 0x08}, + {0x3278, CRL_REG_LEN_08BIT, 0x10}, + {0x3279, CRL_REG_LEN_08BIT, 0x04}, + {0x327a, CRL_REG_LEN_08BIT, 0x00}, + {0x327b, CRL_REG_LEN_08BIT, 0x03}, + {0x327c, CRL_REG_LEN_08BIT, 0x10}, + {0x327d, CRL_REG_LEN_08BIT, 0x60}, + {0x327e, CRL_REG_LEN_08BIT, 0xc0}, + {0x327f, CRL_REG_LEN_08BIT, 0x06}, + {0x3288, CRL_REG_LEN_08BIT, 0x10}, + {0x3289, CRL_REG_LEN_08BIT, 0x00}, + {0x328a, CRL_REG_LEN_08BIT, 0x08}, + {0x328b, CRL_REG_LEN_08BIT, 0x00}, + {0x328c, CRL_REG_LEN_08BIT, 0x04}, + {0x328d, CRL_REG_LEN_08BIT, 0x00}, + {0x328e, CRL_REG_LEN_08BIT, 0x02}, + {0x328f, CRL_REG_LEN_08BIT, 0x00}, + {0x3290, CRL_REG_LEN_08BIT, 0x20}, + {0x3291, CRL_REG_LEN_08BIT, 0x00}, + {0x3292, CRL_REG_LEN_08BIT, 0x10}, + {0x3293, CRL_REG_LEN_08BIT, 0x00}, + {0x3294, CRL_REG_LEN_08BIT, 0x08}, + {0x3295, CRL_REG_LEN_08BIT, 0x00}, + {0x3296, CRL_REG_LEN_08BIT, 0x04}, + {0x3297, CRL_REG_LEN_08BIT, 0x00}, + {0x3298, CRL_REG_LEN_08BIT, 0x40}, + {0x3299, CRL_REG_LEN_08BIT, 0x00}, + {0x329a, CRL_REG_LEN_08BIT, 0x20}, + {0x329b, CRL_REG_LEN_08BIT, 0x00}, + {0x329c, CRL_REG_LEN_08BIT, 0x10}, + {0x329d, CRL_REG_LEN_08BIT, 0x00}, + {0x329e, CRL_REG_LEN_08BIT, 0x08}, + {0x329f, CRL_REG_LEN_08BIT, 0x00}, + {0x32a0, CRL_REG_LEN_08BIT, 0x7f}, + {0x32a1, CRL_REG_LEN_08BIT, 0xff}, + {0x32a2, CRL_REG_LEN_08BIT, 0x40}, + {0x32a3, CRL_REG_LEN_08BIT, 0x00}, + {0x32a4, CRL_REG_LEN_08BIT, 0x20}, + {0x32a5, CRL_REG_LEN_08BIT, 0x00}, + {0x32a6, CRL_REG_LEN_08BIT, 0x10}, + {0x32a7, CRL_REG_LEN_08BIT, 0x00}, + {0x32a8, CRL_REG_LEN_08BIT, 0x00}, + {0x32a9, CRL_REG_LEN_08BIT, 0x00}, + {0x32aa, CRL_REG_LEN_08BIT, 0x00}, + {0x32ab, CRL_REG_LEN_08BIT, 0x00}, + {0x32ac, CRL_REG_LEN_08BIT, 0x00}, + {0x32ad, CRL_REG_LEN_08BIT, 0x00}, + {0x32ae, CRL_REG_LEN_08BIT, 0x00}, + {0x32af, CRL_REG_LEN_08BIT, 0x00}, + {0x32b0, CRL_REG_LEN_08BIT, 0x00}, + {0x32b1, CRL_REG_LEN_08BIT, 0x00}, + {0x32b2, CRL_REG_LEN_08BIT, 0x00}, + {0x32b3, CRL_REG_LEN_08BIT, 0x00}, + {0x32b4, CRL_REG_LEN_08BIT, 0x00}, + {0x32b5, CRL_REG_LEN_08BIT, 0x00}, + {0x32b6, CRL_REG_LEN_08BIT, 0x00}, + {0x32b7, CRL_REG_LEN_08BIT, 0x00}, + {0x32b8, CRL_REG_LEN_08BIT, 0x00}, + {0x32b9, CRL_REG_LEN_08BIT, 0x00}, + {0x32ba, CRL_REG_LEN_08BIT, 0x00}, + {0x32bb, CRL_REG_LEN_08BIT, 0x00}, + {0x32bc, CRL_REG_LEN_08BIT, 0x00}, + {0x32bd, CRL_REG_LEN_08BIT, 0x00}, + {0x32be, CRL_REG_LEN_08BIT, 0x00}, + {0x32bf, CRL_REG_LEN_08BIT, 0x00}, + {0x32c0, CRL_REG_LEN_08BIT, 0x00}, + {0x32c1, CRL_REG_LEN_08BIT, 0x00}, + {0x32c2, CRL_REG_LEN_08BIT, 0x00}, + {0x32c3, CRL_REG_LEN_08BIT, 0x00}, + {0x32c4, CRL_REG_LEN_08BIT, 0x00}, + {0x32c5, CRL_REG_LEN_08BIT, 0x00}, + {0x32c6, CRL_REG_LEN_08BIT, 0x00}, + {0x32c7, CRL_REG_LEN_08BIT, 0x00}, + {0x32c8, CRL_REG_LEN_08BIT, 0x87}, + {0x32c9, CRL_REG_LEN_08BIT, 0x00}, + {0x3330, CRL_REG_LEN_08BIT, 0x03}, + {0x3331, CRL_REG_LEN_08BIT, 0xc8}, + {0x3332, CRL_REG_LEN_08BIT, 0x02}, + {0x3333, CRL_REG_LEN_08BIT, 0x24}, + {0x3334, CRL_REG_LEN_08BIT, 0x00}, + {0x3335, CRL_REG_LEN_08BIT, 0x00}, + {0x3336, CRL_REG_LEN_08BIT, 0x00}, + {0x3337, CRL_REG_LEN_08BIT, 0x00}, + {0x3338, CRL_REG_LEN_08BIT, 0x03}, + {0x3339, CRL_REG_LEN_08BIT, 0xc8}, + {0x333a, CRL_REG_LEN_08BIT, 0x02}, + {0x333b, CRL_REG_LEN_08BIT, 0x24}, + {0x333c, CRL_REG_LEN_08BIT, 0x00}, + {0x333d, CRL_REG_LEN_08BIT, 0x00}, + {0x333e, CRL_REG_LEN_08BIT, 0x00}, + {0x333f, CRL_REG_LEN_08BIT, 0x00}, + {0x3340, CRL_REG_LEN_08BIT, 0x03}, + {0x3341, CRL_REG_LEN_08BIT, 0xc8}, + {0x3342, CRL_REG_LEN_08BIT, 0x02}, + {0x3343, CRL_REG_LEN_08BIT, 0x24}, + {0x3344, CRL_REG_LEN_08BIT, 0x00}, + {0x3345, CRL_REG_LEN_08BIT, 0x00}, + {0x3346, CRL_REG_LEN_08BIT, 0x00}, + {0x3347, CRL_REG_LEN_08BIT, 0x00}, + {0x3348, CRL_REG_LEN_08BIT, 0x40}, + {0x3349, CRL_REG_LEN_08BIT, 0x00}, + {0x334a, CRL_REG_LEN_08BIT, 0x00}, + {0x334b, CRL_REG_LEN_08BIT, 0x00}, + {0x334c, CRL_REG_LEN_08BIT, 0x00}, + {0x334d, CRL_REG_LEN_08BIT, 0x00}, + {0x334e, CRL_REG_LEN_08BIT, 0x80}, + {0x3360, CRL_REG_LEN_08BIT, 0x01}, + {0x3361, CRL_REG_LEN_08BIT, 0x00}, + {0x3362, CRL_REG_LEN_08BIT, 0x01}, + {0x3363, CRL_REG_LEN_08BIT, 0x00}, + {0x3364, CRL_REG_LEN_08BIT, 0x01}, + {0x3365, CRL_REG_LEN_08BIT, 0x00}, + {0x3366, CRL_REG_LEN_08BIT, 0x01}, + {0x3367, CRL_REG_LEN_08BIT, 0x00}, + {0x3368, CRL_REG_LEN_08BIT, 0x01}, + {0x3369, CRL_REG_LEN_08BIT, 0x00}, + {0x336a, CRL_REG_LEN_08BIT, 0x01}, + {0x336b, CRL_REG_LEN_08BIT, 0x00}, + {0x336c, CRL_REG_LEN_08BIT, 0x01}, + {0x336d, CRL_REG_LEN_08BIT, 0x00}, + {0x336e, CRL_REG_LEN_08BIT, 0x01}, + {0x336f, CRL_REG_LEN_08BIT, 0x00}, + {0x3370, CRL_REG_LEN_08BIT, 0x01}, + {0x3371, CRL_REG_LEN_08BIT, 0x00}, + {0x3372, CRL_REG_LEN_08BIT, 0x01}, + {0x3373, CRL_REG_LEN_08BIT, 0x00}, + {0x3374, CRL_REG_LEN_08BIT, 0x01}, + {0x3375, CRL_REG_LEN_08BIT, 0x00}, + {0x3376, CRL_REG_LEN_08BIT, 0x01}, + {0x3377, CRL_REG_LEN_08BIT, 0x00}, + {0x3378, CRL_REG_LEN_08BIT, 0x00}, + {0x3379, CRL_REG_LEN_08BIT, 0x00}, + {0x337a, CRL_REG_LEN_08BIT, 0x00}, + {0x337b, CRL_REG_LEN_08BIT, 0x00}, + {0x337c, CRL_REG_LEN_08BIT, 0x00}, + {0x337d, CRL_REG_LEN_08BIT, 0x00}, + {0x337e, CRL_REG_LEN_08BIT, 0x00}, + {0x337f, CRL_REG_LEN_08BIT, 0x00}, + {0x3380, CRL_REG_LEN_08BIT, 0x00}, + {0x3381, CRL_REG_LEN_08BIT, 0x00}, + {0x3382, CRL_REG_LEN_08BIT, 0x00}, + {0x3383, CRL_REG_LEN_08BIT, 0x00}, + {0x3384, CRL_REG_LEN_08BIT, 0x00}, + {0x3385, CRL_REG_LEN_08BIT, 0x00}, + {0x3386, CRL_REG_LEN_08BIT, 0x00}, + {0x3387, CRL_REG_LEN_08BIT, 0x00}, + {0x3388, CRL_REG_LEN_08BIT, 0x00}, + {0x3389, CRL_REG_LEN_08BIT, 0x00}, + {0x338a, CRL_REG_LEN_08BIT, 0x00}, + {0x338b, CRL_REG_LEN_08BIT, 0x00}, + {0x338c, CRL_REG_LEN_08BIT, 0x00}, + {0x338d, CRL_REG_LEN_08BIT, 0x00}, + {0x338e, CRL_REG_LEN_08BIT, 0x00}, + {0x338f, CRL_REG_LEN_08BIT, 0x00}, + {0x3390, CRL_REG_LEN_08BIT, 0x00}, + {0x3391, CRL_REG_LEN_08BIT, 0x00}, + {0x3392, CRL_REG_LEN_08BIT, 0x00}, + {0x3393, CRL_REG_LEN_08BIT, 0x00}, + {0x3394, CRL_REG_LEN_08BIT, 0x00}, + {0x3395, CRL_REG_LEN_08BIT, 0x00}, + {0x3396, CRL_REG_LEN_08BIT, 0x00}, + {0x3397, CRL_REG_LEN_08BIT, 0x00}, + {0x3398, CRL_REG_LEN_08BIT, 0x00}, + {0x3399, CRL_REG_LEN_08BIT, 0x00}, + {0x339a, CRL_REG_LEN_08BIT, 0x00}, + {0x339b, CRL_REG_LEN_08BIT, 0x00}, + {0x33b0, CRL_REG_LEN_08BIT, 0x00}, + {0x33b1, CRL_REG_LEN_08BIT, 0x50}, + {0x33b2, CRL_REG_LEN_08BIT, 0x01}, + {0x33b3, CRL_REG_LEN_08BIT, 0xff}, + {0x33b4, CRL_REG_LEN_08BIT, 0xe0}, + {0x33b5, CRL_REG_LEN_08BIT, 0x6b}, + {0x33b6, CRL_REG_LEN_08BIT, 0x00}, + {0x33b7, CRL_REG_LEN_08BIT, 0x00}, + {0x33b8, CRL_REG_LEN_08BIT, 0x00}, + {0x33b9, CRL_REG_LEN_08BIT, 0x00}, + {0x33ba, CRL_REG_LEN_08BIT, 0x00}, + {0x33bb, CRL_REG_LEN_08BIT, 0x1f}, + {0x33bc, CRL_REG_LEN_08BIT, 0x01}, + {0x33bd, CRL_REG_LEN_08BIT, 0x01}, + {0x33be, CRL_REG_LEN_08BIT, 0x01}, + {0x33bf, CRL_REG_LEN_08BIT, 0x01}, + {0x33c0, CRL_REG_LEN_08BIT, 0x00}, + {0x33c1, CRL_REG_LEN_08BIT, 0x00}, + {0x33c2, CRL_REG_LEN_08BIT, 0x00}, + {0x33c3, CRL_REG_LEN_08BIT, 0x00}, + {0x33e0, CRL_REG_LEN_08BIT, 0x14}, + {0x33e1, CRL_REG_LEN_08BIT, 0x0f}, + {0x33e2, CRL_REG_LEN_08BIT, 0x02}, + {0x33e3, CRL_REG_LEN_08BIT, 0x01}, + {0x33e4, CRL_REG_LEN_08BIT, 0x01}, + {0x33e5, CRL_REG_LEN_08BIT, 0x01}, + {0x33e6, CRL_REG_LEN_08BIT, 0x00}, + {0x33e7, CRL_REG_LEN_08BIT, 0x04}, + {0x33e8, CRL_REG_LEN_08BIT, 0x0c}, + {0x33e9, CRL_REG_LEN_08BIT, 0x02}, + {0x33ea, CRL_REG_LEN_08BIT, 0x02}, + {0x33eb, CRL_REG_LEN_08BIT, 0x02}, + {0x33ec, CRL_REG_LEN_08BIT, 0x03}, + {0x33ed, CRL_REG_LEN_08BIT, 0x01}, + {0x33ee, CRL_REG_LEN_08BIT, 0x02}, + {0x33ef, CRL_REG_LEN_08BIT, 0x08}, + {0x33f0, CRL_REG_LEN_08BIT, 0x08}, + {0x33f1, CRL_REG_LEN_08BIT, 0x04}, + {0x33f2, CRL_REG_LEN_08BIT, 0x04}, + {0x33f3, CRL_REG_LEN_08BIT, 0x00}, + {0x33f4, CRL_REG_LEN_08BIT, 0x03}, + {0x33f5, CRL_REG_LEN_08BIT, 0x14}, + {0x33f6, CRL_REG_LEN_08BIT, 0x0f}, + {0x33f7, CRL_REG_LEN_08BIT, 0x02}, + {0x33f8, CRL_REG_LEN_08BIT, 0x01}, + {0x33f9, CRL_REG_LEN_08BIT, 0x01}, + {0x33fa, CRL_REG_LEN_08BIT, 0x01}, + {0x33fb, CRL_REG_LEN_08BIT, 0x00}, + {0x33fc, CRL_REG_LEN_08BIT, 0x04}, + {0x33fd, CRL_REG_LEN_08BIT, 0x0c}, + {0x33fe, CRL_REG_LEN_08BIT, 0x02}, + {0x33ff, CRL_REG_LEN_08BIT, 0x02}, + {0x3400, CRL_REG_LEN_08BIT, 0x02}, + {0x3401, CRL_REG_LEN_08BIT, 0x03}, + {0x3402, CRL_REG_LEN_08BIT, 0x01}, + {0x3403, CRL_REG_LEN_08BIT, 0x02}, + {0x3404, CRL_REG_LEN_08BIT, 0x08}, + {0x3405, CRL_REG_LEN_08BIT, 0x08}, + {0x3406, CRL_REG_LEN_08BIT, 0x04}, + {0x3407, CRL_REG_LEN_08BIT, 0x04}, + {0x3408, CRL_REG_LEN_08BIT, 0x00}, + {0x3409, CRL_REG_LEN_08BIT, 0x03}, + {0x340a, CRL_REG_LEN_08BIT, 0x14}, + {0x340b, CRL_REG_LEN_08BIT, 0x0f}, + {0x340c, CRL_REG_LEN_08BIT, 0x04}, + {0x340d, CRL_REG_LEN_08BIT, 0x02}, + {0x340e, CRL_REG_LEN_08BIT, 0x01}, + {0x340f, CRL_REG_LEN_08BIT, 0x01}, + {0x3410, CRL_REG_LEN_08BIT, 0x00}, + {0x3411, CRL_REG_LEN_08BIT, 0x04}, + {0x3412, CRL_REG_LEN_08BIT, 0x0c}, + {0x3413, CRL_REG_LEN_08BIT, 0x02}, + {0x3414, CRL_REG_LEN_08BIT, 0x02}, + {0x3415, CRL_REG_LEN_08BIT, 0x02}, + {0x3416, CRL_REG_LEN_08BIT, 0x03}, + {0x3417, CRL_REG_LEN_08BIT, 0x02}, + {0x3418, CRL_REG_LEN_08BIT, 0x05}, + {0x3419, CRL_REG_LEN_08BIT, 0x0a}, + {0x341a, CRL_REG_LEN_08BIT, 0x08}, + {0x341b, CRL_REG_LEN_08BIT, 0x04}, + {0x341c, CRL_REG_LEN_08BIT, 0x04}, + {0x341d, CRL_REG_LEN_08BIT, 0x00}, + {0x341e, CRL_REG_LEN_08BIT, 0x03}, + {0x3440, CRL_REG_LEN_08BIT, 0x00}, + {0x3441, CRL_REG_LEN_08BIT, 0x00}, + {0x3442, CRL_REG_LEN_08BIT, 0x00}, + {0x3443, CRL_REG_LEN_08BIT, 0x00}, + {0x3444, CRL_REG_LEN_08BIT, 0x02}, + {0x3445, CRL_REG_LEN_08BIT, 0xf0}, + {0x3446, CRL_REG_LEN_08BIT, 0x02}, + {0x3447, CRL_REG_LEN_08BIT, 0x08}, + {0x3448, CRL_REG_LEN_08BIT, 0x00}, + {0x3460, CRL_REG_LEN_08BIT, 0x40}, + {0x3461, CRL_REG_LEN_08BIT, 0x40}, + {0x3462, CRL_REG_LEN_08BIT, 0x40}, + {0x3463, CRL_REG_LEN_08BIT, 0x40}, + {0x3464, CRL_REG_LEN_08BIT, 0x03}, + {0x3465, CRL_REG_LEN_08BIT, 0x01}, + {0x3466, CRL_REG_LEN_08BIT, 0x01}, + {0x3467, CRL_REG_LEN_08BIT, 0x02}, + {0x3468, CRL_REG_LEN_08BIT, 0x30}, + {0x3469, CRL_REG_LEN_08BIT, 0x00}, + {0x346a, CRL_REG_LEN_08BIT, 0x33}, + {0x346b, CRL_REG_LEN_08BIT, 0xbf}, + {0x3480, CRL_REG_LEN_08BIT, 0x40}, + {0x3481, CRL_REG_LEN_08BIT, 0x00}, + {0x3482, CRL_REG_LEN_08BIT, 0x00}, + {0x3483, CRL_REG_LEN_08BIT, 0x00}, + {0x3484, CRL_REG_LEN_08BIT, 0x0d}, + {0x3485, CRL_REG_LEN_08BIT, 0x00}, + {0x3486, CRL_REG_LEN_08BIT, 0x00}, + {0x3487, CRL_REG_LEN_08BIT, 0x00}, + {0x3488, CRL_REG_LEN_08BIT, 0x00}, + {0x3489, CRL_REG_LEN_08BIT, 0x00}, + {0x348a, CRL_REG_LEN_08BIT, 0x00}, + {0x348b, CRL_REG_LEN_08BIT, 0x04}, + {0x348c, CRL_REG_LEN_08BIT, 0x00}, + {0x348d, CRL_REG_LEN_08BIT, 0x01}, + {0x348f, CRL_REG_LEN_08BIT, 0x01}, + {0x3030, CRL_REG_LEN_08BIT, 0x0a}, + {0x3030, CRL_REG_LEN_08BIT, 0x02}, + {0x7000, CRL_REG_LEN_08BIT, 0x58}, + {0x7001, CRL_REG_LEN_08BIT, 0x7a}, + {0x7002, CRL_REG_LEN_08BIT, 0x1a}, + {0x7003, CRL_REG_LEN_08BIT, 0xc1}, + {0x7004, CRL_REG_LEN_08BIT, 0x03}, + {0x7005, CRL_REG_LEN_08BIT, 0xda}, + {0x7006, CRL_REG_LEN_08BIT, 0xbd}, + {0x7007, CRL_REG_LEN_08BIT, 0x03}, + {0x7008, CRL_REG_LEN_08BIT, 0xbd}, + {0x7009, CRL_REG_LEN_08BIT, 0x06}, + {0x700a, CRL_REG_LEN_08BIT, 0xe6}, + {0x700b, CRL_REG_LEN_08BIT, 0xec}, + {0x700c, CRL_REG_LEN_08BIT, 0xbc}, + {0x700d, CRL_REG_LEN_08BIT, 0xff}, + {0x700e, CRL_REG_LEN_08BIT, 0xbc}, + {0x700f, CRL_REG_LEN_08BIT, 0x73}, + {0x7010, CRL_REG_LEN_08BIT, 0xda}, + {0x7011, CRL_REG_LEN_08BIT, 0x72}, + {0x7012, CRL_REG_LEN_08BIT, 0x76}, + {0x7013, CRL_REG_LEN_08BIT, 0xb6}, + {0x7014, CRL_REG_LEN_08BIT, 0xee}, + {0x7015, CRL_REG_LEN_08BIT, 0xcf}, + {0x7016, CRL_REG_LEN_08BIT, 0xac}, + {0x7017, CRL_REG_LEN_08BIT, 0xd0}, + {0x7018, CRL_REG_LEN_08BIT, 0xac}, + {0x7019, CRL_REG_LEN_08BIT, 0xd1}, + {0x701a, CRL_REG_LEN_08BIT, 0x50}, + {0x701b, CRL_REG_LEN_08BIT, 0xac}, + {0x701c, CRL_REG_LEN_08BIT, 0xd2}, + {0x701d, CRL_REG_LEN_08BIT, 0xbc}, + {0x701e, CRL_REG_LEN_08BIT, 0x2e}, + {0x701f, CRL_REG_LEN_08BIT, 0xb4}, + {0x7020, CRL_REG_LEN_08BIT, 0x00}, + {0x7021, CRL_REG_LEN_08BIT, 0xdc}, + {0x7022, CRL_REG_LEN_08BIT, 0xdf}, + {0x7023, CRL_REG_LEN_08BIT, 0xb0}, + {0x7024, CRL_REG_LEN_08BIT, 0x6e}, + {0x7025, CRL_REG_LEN_08BIT, 0xbd}, + {0x7026, CRL_REG_LEN_08BIT, 0x01}, + {0x7027, CRL_REG_LEN_08BIT, 0xd7}, + {0x7028, CRL_REG_LEN_08BIT, 0xed}, + {0x7029, CRL_REG_LEN_08BIT, 0xe1}, + {0x702a, CRL_REG_LEN_08BIT, 0x36}, + {0x702b, CRL_REG_LEN_08BIT, 0x30}, + {0x702c, CRL_REG_LEN_08BIT, 0xd3}, + {0x702d, CRL_REG_LEN_08BIT, 0x2e}, + {0x702e, CRL_REG_LEN_08BIT, 0x54}, + {0x702f, CRL_REG_LEN_08BIT, 0x46}, + {0x7030, CRL_REG_LEN_08BIT, 0xbc}, + {0x7031, CRL_REG_LEN_08BIT, 0x22}, + {0x7032, CRL_REG_LEN_08BIT, 0x66}, + {0x7033, CRL_REG_LEN_08BIT, 0xbc}, + {0x7034, CRL_REG_LEN_08BIT, 0x24}, + {0x7035, CRL_REG_LEN_08BIT, 0x2c}, + {0x7036, CRL_REG_LEN_08BIT, 0x28}, + {0x7037, CRL_REG_LEN_08BIT, 0xbc}, + {0x7038, CRL_REG_LEN_08BIT, 0x3c}, + {0x7039, CRL_REG_LEN_08BIT, 0xa1}, + {0x703a, CRL_REG_LEN_08BIT, 0xac}, + {0x703b, CRL_REG_LEN_08BIT, 0xd8}, + {0x703c, CRL_REG_LEN_08BIT, 0xd6}, + {0x703d, CRL_REG_LEN_08BIT, 0xb4}, + {0x703e, CRL_REG_LEN_08BIT, 0x04}, + {0x703f, CRL_REG_LEN_08BIT, 0x46}, + {0x7040, CRL_REG_LEN_08BIT, 0xb7}, + {0x7041, CRL_REG_LEN_08BIT, 0x04}, + {0x7042, CRL_REG_LEN_08BIT, 0xbe}, + {0x7043, CRL_REG_LEN_08BIT, 0x08}, + {0x7044, CRL_REG_LEN_08BIT, 0xc3}, + {0x7045, CRL_REG_LEN_08BIT, 0xd9}, + {0x7046, CRL_REG_LEN_08BIT, 0xad}, + {0x7047, CRL_REG_LEN_08BIT, 0xc3}, + {0x7048, CRL_REG_LEN_08BIT, 0xbc}, + {0x7049, CRL_REG_LEN_08BIT, 0x19}, + {0x704a, CRL_REG_LEN_08BIT, 0xc1}, + {0x704b, CRL_REG_LEN_08BIT, 0x27}, + {0x704c, CRL_REG_LEN_08BIT, 0xe7}, + {0x704d, CRL_REG_LEN_08BIT, 0x00}, + {0x704e, CRL_REG_LEN_08BIT, 0x50}, + {0x704f, CRL_REG_LEN_08BIT, 0x20}, + {0x7050, CRL_REG_LEN_08BIT, 0xb8}, + {0x7051, CRL_REG_LEN_08BIT, 0x02}, + {0x7052, CRL_REG_LEN_08BIT, 0xbc}, + {0x7053, CRL_REG_LEN_08BIT, 0x17}, + {0x7054, CRL_REG_LEN_08BIT, 0xdb}, + {0x7055, CRL_REG_LEN_08BIT, 0xc7}, + {0x7056, CRL_REG_LEN_08BIT, 0xb8}, + {0x7057, CRL_REG_LEN_08BIT, 0x00}, + {0x7058, CRL_REG_LEN_08BIT, 0x28}, + {0x7059, CRL_REG_LEN_08BIT, 0x54}, + {0x705a, CRL_REG_LEN_08BIT, 0xb4}, + {0x705b, CRL_REG_LEN_08BIT, 0x14}, + {0x705c, CRL_REG_LEN_08BIT, 0xab}, + {0x705d, CRL_REG_LEN_08BIT, 0xbe}, + {0x705e, CRL_REG_LEN_08BIT, 0x06}, + {0x705f, CRL_REG_LEN_08BIT, 0xd8}, + {0x7060, CRL_REG_LEN_08BIT, 0xd6}, + {0x7061, CRL_REG_LEN_08BIT, 0x00}, + {0x7062, CRL_REG_LEN_08BIT, 0xb4}, + {0x7063, CRL_REG_LEN_08BIT, 0xc7}, + {0x7064, CRL_REG_LEN_08BIT, 0x07}, + {0x7065, CRL_REG_LEN_08BIT, 0xb9}, + {0x7066, CRL_REG_LEN_08BIT, 0x05}, + {0x7067, CRL_REG_LEN_08BIT, 0xee}, + {0x7068, CRL_REG_LEN_08BIT, 0xe6}, + {0x7069, CRL_REG_LEN_08BIT, 0xad}, + {0x706a, CRL_REG_LEN_08BIT, 0xb4}, + {0x706b, CRL_REG_LEN_08BIT, 0x26}, + {0x706c, CRL_REG_LEN_08BIT, 0x19}, + {0x706d, CRL_REG_LEN_08BIT, 0xc1}, + {0x706e, CRL_REG_LEN_08BIT, 0x3a}, + {0x706f, CRL_REG_LEN_08BIT, 0xc3}, + {0x7070, CRL_REG_LEN_08BIT, 0xaf}, + {0x7071, CRL_REG_LEN_08BIT, 0x00}, + {0x7072, CRL_REG_LEN_08BIT, 0xc0}, + {0x7073, CRL_REG_LEN_08BIT, 0x3c}, + {0x7074, CRL_REG_LEN_08BIT, 0xc3}, + {0x7075, CRL_REG_LEN_08BIT, 0xbe}, + {0x7076, CRL_REG_LEN_08BIT, 0xe7}, + {0x7077, CRL_REG_LEN_08BIT, 0x00}, + {0x7078, CRL_REG_LEN_08BIT, 0x15}, + {0x7079, CRL_REG_LEN_08BIT, 0xc2}, + {0x707a, CRL_REG_LEN_08BIT, 0x40}, + {0x707b, CRL_REG_LEN_08BIT, 0xc3}, + {0x707c, CRL_REG_LEN_08BIT, 0xa4}, + {0x707d, CRL_REG_LEN_08BIT, 0xc0}, + {0x707e, CRL_REG_LEN_08BIT, 0x3c}, + {0x707f, CRL_REG_LEN_08BIT, 0x00}, + {0x7080, CRL_REG_LEN_08BIT, 0xb9}, + {0x7081, CRL_REG_LEN_08BIT, 0x64}, + {0x7082, CRL_REG_LEN_08BIT, 0x29}, + {0x7083, CRL_REG_LEN_08BIT, 0x00}, + {0x7084, CRL_REG_LEN_08BIT, 0xb8}, + {0x7085, CRL_REG_LEN_08BIT, 0x12}, + {0x7086, CRL_REG_LEN_08BIT, 0xbe}, + {0x7087, CRL_REG_LEN_08BIT, 0x01}, + {0x7088, CRL_REG_LEN_08BIT, 0xd0}, + {0x7089, CRL_REG_LEN_08BIT, 0xbc}, + {0x708a, CRL_REG_LEN_08BIT, 0x01}, + {0x708b, CRL_REG_LEN_08BIT, 0xac}, + {0x708c, CRL_REG_LEN_08BIT, 0x37}, + {0x708d, CRL_REG_LEN_08BIT, 0xd2}, + {0x708e, CRL_REG_LEN_08BIT, 0xac}, + {0x708f, CRL_REG_LEN_08BIT, 0x45}, + {0x7090, CRL_REG_LEN_08BIT, 0xad}, + {0x7091, CRL_REG_LEN_08BIT, 0x28}, + {0x7092, CRL_REG_LEN_08BIT, 0x00}, + {0x7093, CRL_REG_LEN_08BIT, 0xb8}, + {0x7094, CRL_REG_LEN_08BIT, 0x00}, + {0x7095, CRL_REG_LEN_08BIT, 0xbc}, + {0x7096, CRL_REG_LEN_08BIT, 0x01}, + {0x7097, CRL_REG_LEN_08BIT, 0x36}, + {0x7098, CRL_REG_LEN_08BIT, 0xd3}, + {0x7099, CRL_REG_LEN_08BIT, 0x30}, + {0x709a, CRL_REG_LEN_08BIT, 0x04}, + {0x709b, CRL_REG_LEN_08BIT, 0xe0}, + {0x709c, CRL_REG_LEN_08BIT, 0xd8}, + {0x709d, CRL_REG_LEN_08BIT, 0xb4}, + {0x709e, CRL_REG_LEN_08BIT, 0xe9}, + {0x709f, CRL_REG_LEN_08BIT, 0x00}, + {0x70a0, CRL_REG_LEN_08BIT, 0xbe}, + {0x70a1, CRL_REG_LEN_08BIT, 0x05}, + {0x70a2, CRL_REG_LEN_08BIT, 0x62}, + {0x70a3, CRL_REG_LEN_08BIT, 0x07}, + {0x70a4, CRL_REG_LEN_08BIT, 0xb9}, + {0x70a5, CRL_REG_LEN_08BIT, 0x05}, + {0x70a6, CRL_REG_LEN_08BIT, 0xad}, + {0x70a7, CRL_REG_LEN_08BIT, 0xc3}, + {0x70a8, CRL_REG_LEN_08BIT, 0xcf}, + {0x70a9, CRL_REG_LEN_08BIT, 0x00}, + {0x70aa, CRL_REG_LEN_08BIT, 0x15}, + {0x70ab, CRL_REG_LEN_08BIT, 0xc2}, + {0x70ac, CRL_REG_LEN_08BIT, 0x59}, + {0x70ad, CRL_REG_LEN_08BIT, 0xc3}, + {0x70ae, CRL_REG_LEN_08BIT, 0xc9}, + {0x70af, CRL_REG_LEN_08BIT, 0xc0}, + {0x70b0, CRL_REG_LEN_08BIT, 0x55}, + {0x70b1, CRL_REG_LEN_08BIT, 0x00}, + {0x70b2, CRL_REG_LEN_08BIT, 0x46}, + {0x70b3, CRL_REG_LEN_08BIT, 0xa1}, + {0x70b4, CRL_REG_LEN_08BIT, 0xb9}, + {0x70b5, CRL_REG_LEN_08BIT, 0x64}, + {0x70b6, CRL_REG_LEN_08BIT, 0x29}, + {0x70b7, CRL_REG_LEN_08BIT, 0x00}, + {0x70b8, CRL_REG_LEN_08BIT, 0xb8}, + {0x70b9, CRL_REG_LEN_08BIT, 0x02}, + {0x70ba, CRL_REG_LEN_08BIT, 0xbe}, + {0x70bb, CRL_REG_LEN_08BIT, 0x02}, + {0x70bc, CRL_REG_LEN_08BIT, 0xd0}, + {0x70bd, CRL_REG_LEN_08BIT, 0xdc}, + {0x70be, CRL_REG_LEN_08BIT, 0xac}, + {0x70bf, CRL_REG_LEN_08BIT, 0xbc}, + {0x70c0, CRL_REG_LEN_08BIT, 0x01}, + {0x70c1, CRL_REG_LEN_08BIT, 0x37}, + {0x70c2, CRL_REG_LEN_08BIT, 0xac}, + {0x70c3, CRL_REG_LEN_08BIT, 0xd2}, + {0x70c4, CRL_REG_LEN_08BIT, 0x45}, + {0x70c5, CRL_REG_LEN_08BIT, 0xad}, + {0x70c6, CRL_REG_LEN_08BIT, 0x28}, + {0x70c7, CRL_REG_LEN_08BIT, 0x00}, + {0x70c8, CRL_REG_LEN_08BIT, 0xb8}, + {0x70c9, CRL_REG_LEN_08BIT, 0x00}, + {0x70ca, CRL_REG_LEN_08BIT, 0xbc}, + {0x70cb, CRL_REG_LEN_08BIT, 0x01}, + {0x70cc, CRL_REG_LEN_08BIT, 0x36}, + {0x70cd, CRL_REG_LEN_08BIT, 0x30}, + {0x70ce, CRL_REG_LEN_08BIT, 0xe0}, + {0x70cf, CRL_REG_LEN_08BIT, 0xd8}, + {0x70d0, CRL_REG_LEN_08BIT, 0xb5}, + {0x70d1, CRL_REG_LEN_08BIT, 0x0b}, + {0x70d2, CRL_REG_LEN_08BIT, 0xd6}, + {0x70d3, CRL_REG_LEN_08BIT, 0xbe}, + {0x70d4, CRL_REG_LEN_08BIT, 0x07}, + {0x70d5, CRL_REG_LEN_08BIT, 0x00}, + {0x70d6, CRL_REG_LEN_08BIT, 0x62}, + {0x70d7, CRL_REG_LEN_08BIT, 0x07}, + {0x70d8, CRL_REG_LEN_08BIT, 0xb9}, + {0x70d9, CRL_REG_LEN_08BIT, 0x05}, + {0x70da, CRL_REG_LEN_08BIT, 0xad}, + {0x70db, CRL_REG_LEN_08BIT, 0xc3}, + {0x70dc, CRL_REG_LEN_08BIT, 0xcf}, + {0x70dd, CRL_REG_LEN_08BIT, 0x46}, + {0x70de, CRL_REG_LEN_08BIT, 0xcd}, + {0x70df, CRL_REG_LEN_08BIT, 0x07}, + {0x70e0, CRL_REG_LEN_08BIT, 0xcd}, + {0x70e1, CRL_REG_LEN_08BIT, 0x00}, + {0x70e2, CRL_REG_LEN_08BIT, 0xe3}, + {0x70e3, CRL_REG_LEN_08BIT, 0x18}, + {0x70e4, CRL_REG_LEN_08BIT, 0xc2}, + {0x70e5, CRL_REG_LEN_08BIT, 0xa2}, + {0x70e6, CRL_REG_LEN_08BIT, 0xb9}, + {0x70e7, CRL_REG_LEN_08BIT, 0x64}, + {0x70e8, CRL_REG_LEN_08BIT, 0xd1}, + {0x70e9, CRL_REG_LEN_08BIT, 0xdd}, + {0x70ea, CRL_REG_LEN_08BIT, 0xac}, + {0x70eb, CRL_REG_LEN_08BIT, 0xcf}, + {0x70ec, CRL_REG_LEN_08BIT, 0xdf}, + {0x70ed, CRL_REG_LEN_08BIT, 0xb5}, + {0x70ee, CRL_REG_LEN_08BIT, 0x19}, + {0x70ef, CRL_REG_LEN_08BIT, 0x46}, + {0x70f0, CRL_REG_LEN_08BIT, 0x50}, + {0x70f1, CRL_REG_LEN_08BIT, 0xb6}, + {0x70f2, CRL_REG_LEN_08BIT, 0xee}, + {0x70f3, CRL_REG_LEN_08BIT, 0xe8}, + {0x70f4, CRL_REG_LEN_08BIT, 0xe6}, + {0x70f5, CRL_REG_LEN_08BIT, 0xbc}, + {0x70f6, CRL_REG_LEN_08BIT, 0x31}, + {0x70f7, CRL_REG_LEN_08BIT, 0xe1}, + {0x70f8, CRL_REG_LEN_08BIT, 0x36}, + {0x70f9, CRL_REG_LEN_08BIT, 0x30}, + {0x70fa, CRL_REG_LEN_08BIT, 0xd3}, + {0x70fb, CRL_REG_LEN_08BIT, 0x2e}, + {0x70fc, CRL_REG_LEN_08BIT, 0x54}, + {0x70fd, CRL_REG_LEN_08BIT, 0xbd}, + {0x70fe, CRL_REG_LEN_08BIT, 0x03}, + {0x70ff, CRL_REG_LEN_08BIT, 0xec}, + {0x7100, CRL_REG_LEN_08BIT, 0x2c}, + {0x7101, CRL_REG_LEN_08BIT, 0x50}, + {0x7102, CRL_REG_LEN_08BIT, 0x20}, + {0x7103, CRL_REG_LEN_08BIT, 0x04}, + {0x7104, CRL_REG_LEN_08BIT, 0xb8}, + {0x7105, CRL_REG_LEN_08BIT, 0x02}, + {0x7106, CRL_REG_LEN_08BIT, 0xbc}, + {0x7107, CRL_REG_LEN_08BIT, 0x18}, + {0x7108, CRL_REG_LEN_08BIT, 0xc7}, + {0x7109, CRL_REG_LEN_08BIT, 0xb8}, + {0x710a, CRL_REG_LEN_08BIT, 0x00}, + {0x710b, CRL_REG_LEN_08BIT, 0x28}, + {0x710c, CRL_REG_LEN_08BIT, 0x54}, + {0x710d, CRL_REG_LEN_08BIT, 0xbc}, + {0x710e, CRL_REG_LEN_08BIT, 0x02}, + {0x710f, CRL_REG_LEN_08BIT, 0xb4}, + {0x7110, CRL_REG_LEN_08BIT, 0xda}, + {0x7111, CRL_REG_LEN_08BIT, 0xbe}, + {0x7112, CRL_REG_LEN_08BIT, 0x04}, + {0x7113, CRL_REG_LEN_08BIT, 0xd6}, + {0x7114, CRL_REG_LEN_08BIT, 0xd8}, + {0x7115, CRL_REG_LEN_08BIT, 0xab}, + {0x7116, CRL_REG_LEN_08BIT, 0x00}, + {0x7117, CRL_REG_LEN_08BIT, 0x62}, + {0x7118, CRL_REG_LEN_08BIT, 0x07}, + {0x7119, CRL_REG_LEN_08BIT, 0xb9}, + {0x711a, CRL_REG_LEN_08BIT, 0x05}, + {0x711b, CRL_REG_LEN_08BIT, 0xad}, + {0x711c, CRL_REG_LEN_08BIT, 0xc3}, + {0x711d, CRL_REG_LEN_08BIT, 0xbc}, + {0x711e, CRL_REG_LEN_08BIT, 0xe7}, + {0x711f, CRL_REG_LEN_08BIT, 0xb9}, + {0x7120, CRL_REG_LEN_08BIT, 0x64}, + {0x7121, CRL_REG_LEN_08BIT, 0x29}, + {0x7122, CRL_REG_LEN_08BIT, 0x00}, + {0x7123, CRL_REG_LEN_08BIT, 0xb8}, + {0x7124, CRL_REG_LEN_08BIT, 0x02}, + {0x7125, CRL_REG_LEN_08BIT, 0xbe}, + {0x7126, CRL_REG_LEN_08BIT, 0x00}, + {0x7127, CRL_REG_LEN_08BIT, 0x45}, + {0x7128, CRL_REG_LEN_08BIT, 0xad}, + {0x7129, CRL_REG_LEN_08BIT, 0xe2}, + {0x712a, CRL_REG_LEN_08BIT, 0x28}, + {0x712b, CRL_REG_LEN_08BIT, 0x00}, + {0x712c, CRL_REG_LEN_08BIT, 0xb8}, + {0x712d, CRL_REG_LEN_08BIT, 0x00}, + {0x712e, CRL_REG_LEN_08BIT, 0xe0}, + {0x712f, CRL_REG_LEN_08BIT, 0xd8}, + {0x7130, CRL_REG_LEN_08BIT, 0xb4}, + {0x7131, CRL_REG_LEN_08BIT, 0xe9}, + {0x7132, CRL_REG_LEN_08BIT, 0xbe}, + {0x7133, CRL_REG_LEN_08BIT, 0x03}, + {0x7134, CRL_REG_LEN_08BIT, 0x00}, + {0x7135, CRL_REG_LEN_08BIT, 0x30}, + {0x7136, CRL_REG_LEN_08BIT, 0x62}, + {0x7137, CRL_REG_LEN_08BIT, 0x07}, + {0x7138, CRL_REG_LEN_08BIT, 0xb9}, + {0x7139, CRL_REG_LEN_08BIT, 0x05}, + {0x713a, CRL_REG_LEN_08BIT, 0xad}, + {0x713b, CRL_REG_LEN_08BIT, 0xc3}, + {0x713c, CRL_REG_LEN_08BIT, 0xcf}, + {0x713d, CRL_REG_LEN_08BIT, 0x42}, + {0x713e, CRL_REG_LEN_08BIT, 0xe4}, + {0x713f, CRL_REG_LEN_08BIT, 0xcd}, + {0x7140, CRL_REG_LEN_08BIT, 0x07}, + {0x7141, CRL_REG_LEN_08BIT, 0xcd}, + {0x7142, CRL_REG_LEN_08BIT, 0x00}, + {0x7143, CRL_REG_LEN_08BIT, 0x00}, + {0x7144, CRL_REG_LEN_08BIT, 0x17}, + {0x7145, CRL_REG_LEN_08BIT, 0xc2}, + {0x7146, CRL_REG_LEN_08BIT, 0xbb}, + {0x7147, CRL_REG_LEN_08BIT, 0xde}, + {0x7148, CRL_REG_LEN_08BIT, 0xcf}, + {0x7149, CRL_REG_LEN_08BIT, 0xdf}, + {0x714a, CRL_REG_LEN_08BIT, 0xac}, + {0x714b, CRL_REG_LEN_08BIT, 0xd1}, + {0x714c, CRL_REG_LEN_08BIT, 0x44}, + {0x714d, CRL_REG_LEN_08BIT, 0xac}, + {0x714e, CRL_REG_LEN_08BIT, 0xb9}, + {0x714f, CRL_REG_LEN_08BIT, 0x76}, + {0x7150, CRL_REG_LEN_08BIT, 0xb8}, + {0x7151, CRL_REG_LEN_08BIT, 0x08}, + {0x7152, CRL_REG_LEN_08BIT, 0xb6}, + {0x7153, CRL_REG_LEN_08BIT, 0xfe}, + {0x7154, CRL_REG_LEN_08BIT, 0xb4}, + {0x7155, CRL_REG_LEN_08BIT, 0xca}, + {0x7156, CRL_REG_LEN_08BIT, 0xd6}, + {0x7157, CRL_REG_LEN_08BIT, 0xd8}, + {0x7158, CRL_REG_LEN_08BIT, 0xab}, + {0x7159, CRL_REG_LEN_08BIT, 0x00}, + {0x715a, CRL_REG_LEN_08BIT, 0xe1}, + {0x715b, CRL_REG_LEN_08BIT, 0x36}, + {0x715c, CRL_REG_LEN_08BIT, 0x30}, + {0x715d, CRL_REG_LEN_08BIT, 0xd3}, + {0x715e, CRL_REG_LEN_08BIT, 0xbc}, + {0x715f, CRL_REG_LEN_08BIT, 0x29}, + {0x7160, CRL_REG_LEN_08BIT, 0xb4}, + {0x7161, CRL_REG_LEN_08BIT, 0x1f}, + {0x7162, CRL_REG_LEN_08BIT, 0xaa}, + {0x7163, CRL_REG_LEN_08BIT, 0xbd}, + {0x7164, CRL_REG_LEN_08BIT, 0x01}, + {0x7165, CRL_REG_LEN_08BIT, 0xb8}, + {0x7166, CRL_REG_LEN_08BIT, 0x0c}, + {0x7167, CRL_REG_LEN_08BIT, 0x45}, + {0x7168, CRL_REG_LEN_08BIT, 0xa4}, + {0x7169, CRL_REG_LEN_08BIT, 0xbd}, + {0x716a, CRL_REG_LEN_08BIT, 0x03}, + {0x716b, CRL_REG_LEN_08BIT, 0xec}, + {0x716c, CRL_REG_LEN_08BIT, 0xbc}, + {0x716d, CRL_REG_LEN_08BIT, 0x3d}, + {0x716e, CRL_REG_LEN_08BIT, 0xc3}, + {0x716f, CRL_REG_LEN_08BIT, 0xcf}, + {0x7170, CRL_REG_LEN_08BIT, 0x42}, + {0x7171, CRL_REG_LEN_08BIT, 0xb8}, + {0x7172, CRL_REG_LEN_08BIT, 0x00}, + {0x7173, CRL_REG_LEN_08BIT, 0xe4}, + {0x7174, CRL_REG_LEN_08BIT, 0xd5}, + {0x7175, CRL_REG_LEN_08BIT, 0x00}, + {0x7176, CRL_REG_LEN_08BIT, 0xb6}, + {0x7177, CRL_REG_LEN_08BIT, 0x00}, + {0x7178, CRL_REG_LEN_08BIT, 0x74}, + {0x7179, CRL_REG_LEN_08BIT, 0xbd}, + {0x717a, CRL_REG_LEN_08BIT, 0x03}, + {0x717b, CRL_REG_LEN_08BIT, 0xb5}, + {0x717c, CRL_REG_LEN_08BIT, 0x39}, + {0x717d, CRL_REG_LEN_08BIT, 0x40}, + {0x717e, CRL_REG_LEN_08BIT, 0x58}, + {0x717f, CRL_REG_LEN_08BIT, 0xdd}, + {0x7180, CRL_REG_LEN_08BIT, 0x19}, + {0x7181, CRL_REG_LEN_08BIT, 0xc1}, + {0x7182, CRL_REG_LEN_08BIT, 0xc8}, + {0x7183, CRL_REG_LEN_08BIT, 0xbd}, + {0x7184, CRL_REG_LEN_08BIT, 0x06}, + {0x7185, CRL_REG_LEN_08BIT, 0x17}, + {0x7186, CRL_REG_LEN_08BIT, 0xc1}, + {0x7187, CRL_REG_LEN_08BIT, 0xc6}, + {0x7188, CRL_REG_LEN_08BIT, 0xe8}, + {0x7189, CRL_REG_LEN_08BIT, 0x00}, + {0x718a, CRL_REG_LEN_08BIT, 0xc0}, + {0x718b, CRL_REG_LEN_08BIT, 0xc8}, + {0x718c, CRL_REG_LEN_08BIT, 0xe6}, + {0x718d, CRL_REG_LEN_08BIT, 0x95}, + {0x718e, CRL_REG_LEN_08BIT, 0x15}, + {0x718f, CRL_REG_LEN_08BIT, 0x00}, + {0x7190, CRL_REG_LEN_08BIT, 0xbc}, + {0x7191, CRL_REG_LEN_08BIT, 0x19}, + {0x7192, CRL_REG_LEN_08BIT, 0xb9}, + {0x7193, CRL_REG_LEN_08BIT, 0xf6}, + {0x7194, CRL_REG_LEN_08BIT, 0x14}, + {0x7195, CRL_REG_LEN_08BIT, 0xc1}, + {0x7196, CRL_REG_LEN_08BIT, 0xd0}, + {0x7197, CRL_REG_LEN_08BIT, 0xd1}, + {0x7198, CRL_REG_LEN_08BIT, 0xac}, + {0x7199, CRL_REG_LEN_08BIT, 0x37}, + {0x719a, CRL_REG_LEN_08BIT, 0xbc}, + {0x719b, CRL_REG_LEN_08BIT, 0x35}, + {0x719c, CRL_REG_LEN_08BIT, 0x36}, + {0x719d, CRL_REG_LEN_08BIT, 0x30}, + {0x719e, CRL_REG_LEN_08BIT, 0xe1}, + {0x719f, CRL_REG_LEN_08BIT, 0xd3}, + {0x71a0, CRL_REG_LEN_08BIT, 0x7a}, + {0x71a1, CRL_REG_LEN_08BIT, 0xb6}, + {0x71a2, CRL_REG_LEN_08BIT, 0x0c}, + {0x71a3, CRL_REG_LEN_08BIT, 0xff}, + {0x71a4, CRL_REG_LEN_08BIT, 0xb4}, + {0x71a5, CRL_REG_LEN_08BIT, 0xc7}, + {0x71a6, CRL_REG_LEN_08BIT, 0xd9}, + {0x71a7, CRL_REG_LEN_08BIT, 0x00}, + {0x71a8, CRL_REG_LEN_08BIT, 0xbd}, + {0x71a9, CRL_REG_LEN_08BIT, 0x01}, + {0x71aa, CRL_REG_LEN_08BIT, 0x56}, + {0x71ab, CRL_REG_LEN_08BIT, 0xc0}, + {0x71ac, CRL_REG_LEN_08BIT, 0xda}, + {0x71ad, CRL_REG_LEN_08BIT, 0xb4}, + {0x71ae, CRL_REG_LEN_08BIT, 0x1f}, + {0x71af, CRL_REG_LEN_08BIT, 0x56}, + {0x71b0, CRL_REG_LEN_08BIT, 0xaa}, + {0x71b1, CRL_REG_LEN_08BIT, 0xbc}, + {0x71b2, CRL_REG_LEN_08BIT, 0x08}, + {0x71b3, CRL_REG_LEN_08BIT, 0x00}, + {0x71b4, CRL_REG_LEN_08BIT, 0x57}, + {0x71b5, CRL_REG_LEN_08BIT, 0xe8}, + {0x71b6, CRL_REG_LEN_08BIT, 0xb5}, + {0x71b7, CRL_REG_LEN_08BIT, 0x36}, + {0x71b8, CRL_REG_LEN_08BIT, 0x00}, + {0x71b9, CRL_REG_LEN_08BIT, 0x54}, + {0x71ba, CRL_REG_LEN_08BIT, 0xe7}, + {0x71bb, CRL_REG_LEN_08BIT, 0xc8}, + {0x71bc, CRL_REG_LEN_08BIT, 0xb4}, + {0x71bd, CRL_REG_LEN_08BIT, 0x1f}, + {0x71be, CRL_REG_LEN_08BIT, 0x56}, + {0x71bf, CRL_REG_LEN_08BIT, 0xaa}, + {0x71c0, CRL_REG_LEN_08BIT, 0xbc}, + {0x71c1, CRL_REG_LEN_08BIT, 0x08}, + {0x71c2, CRL_REG_LEN_08BIT, 0x57}, + {0x71c3, CRL_REG_LEN_08BIT, 0x00}, + {0x71c4, CRL_REG_LEN_08BIT, 0xb5}, + {0x71c5, CRL_REG_LEN_08BIT, 0x36}, + {0x71c6, CRL_REG_LEN_08BIT, 0x00}, + {0x71c7, CRL_REG_LEN_08BIT, 0x54}, + {0x71c8, CRL_REG_LEN_08BIT, 0xc8}, + {0x71c9, CRL_REG_LEN_08BIT, 0xb5}, + {0x71ca, CRL_REG_LEN_08BIT, 0x18}, + {0x71cb, CRL_REG_LEN_08BIT, 0xd9}, + {0x71cc, CRL_REG_LEN_08BIT, 0x00}, + {0x71cd, CRL_REG_LEN_08BIT, 0xbd}, + {0x71ce, CRL_REG_LEN_08BIT, 0x01}, + {0x71cf, CRL_REG_LEN_08BIT, 0x56}, + {0x71d0, CRL_REG_LEN_08BIT, 0x08}, + {0x71d1, CRL_REG_LEN_08BIT, 0x57}, + {0x71d2, CRL_REG_LEN_08BIT, 0xe8}, + {0x71d3, CRL_REG_LEN_08BIT, 0xb4}, + {0x71d4, CRL_REG_LEN_08BIT, 0x42}, + {0x71d5, CRL_REG_LEN_08BIT, 0x00}, + {0x71d6, CRL_REG_LEN_08BIT, 0x54}, + {0x71d7, CRL_REG_LEN_08BIT, 0xe7}, + {0x71d8, CRL_REG_LEN_08BIT, 0xc8}, + {0x71d9, CRL_REG_LEN_08BIT, 0xab}, + {0x71da, CRL_REG_LEN_08BIT, 0x00}, + {0x71db, CRL_REG_LEN_08BIT, 0x66}, + {0x71dc, CRL_REG_LEN_08BIT, 0x62}, + {0x71dd, CRL_REG_LEN_08BIT, 0x06}, + {0x71de, CRL_REG_LEN_08BIT, 0x74}, + {0x71df, CRL_REG_LEN_08BIT, 0xb9}, + {0x71e0, CRL_REG_LEN_08BIT, 0x05}, + {0x71e1, CRL_REG_LEN_08BIT, 0xb7}, + {0x71e2, CRL_REG_LEN_08BIT, 0x14}, + {0x71e3, CRL_REG_LEN_08BIT, 0x0e}, + {0x71e4, CRL_REG_LEN_08BIT, 0xb7}, + {0x71e5, CRL_REG_LEN_08BIT, 0x04}, + {0x71e6, CRL_REG_LEN_08BIT, 0xc8}, + {0x7600, CRL_REG_LEN_08BIT, 0x04}, + {0x7601, CRL_REG_LEN_08BIT, 0x80}, + {0x7602, CRL_REG_LEN_08BIT, 0x07}, + {0x7603, CRL_REG_LEN_08BIT, 0x44}, + {0x7604, CRL_REG_LEN_08BIT, 0x05}, + {0x7605, CRL_REG_LEN_08BIT, 0x33}, + {0x7606, CRL_REG_LEN_08BIT, 0x0f}, + {0x7607, CRL_REG_LEN_08BIT, 0x00}, + {0x7608, CRL_REG_LEN_08BIT, 0x07}, + {0x7609, CRL_REG_LEN_08BIT, 0x40}, + {0x760a, CRL_REG_LEN_08BIT, 0x04}, + {0x760b, CRL_REG_LEN_08BIT, 0xe5}, + {0x760c, CRL_REG_LEN_08BIT, 0x06}, + {0x760d, CRL_REG_LEN_08BIT, 0x50}, + {0x760e, CRL_REG_LEN_08BIT, 0x04}, + {0x760f, CRL_REG_LEN_08BIT, 0xe4}, + {0x7610, CRL_REG_LEN_08BIT, 0x00}, + {0x7611, CRL_REG_LEN_08BIT, 0x00}, + {0x7612, CRL_REG_LEN_08BIT, 0x06}, + {0x7613, CRL_REG_LEN_08BIT, 0x5c}, + {0x7614, CRL_REG_LEN_08BIT, 0x00}, + {0x7615, CRL_REG_LEN_08BIT, 0x0f}, + {0x7616, CRL_REG_LEN_08BIT, 0x06}, + {0x7617, CRL_REG_LEN_08BIT, 0x1c}, + {0x7618, CRL_REG_LEN_08BIT, 0x00}, + {0x7619, CRL_REG_LEN_08BIT, 0x02}, + {0x761a, CRL_REG_LEN_08BIT, 0x06}, + {0x761b, CRL_REG_LEN_08BIT, 0xa2}, + {0x761c, CRL_REG_LEN_08BIT, 0x00}, + {0x761d, CRL_REG_LEN_08BIT, 0x01}, + {0x761e, CRL_REG_LEN_08BIT, 0x06}, + {0x761f, CRL_REG_LEN_08BIT, 0xae}, + {0x7620, CRL_REG_LEN_08BIT, 0x00}, + {0x7621, CRL_REG_LEN_08BIT, 0x0e}, + {0x7622, CRL_REG_LEN_08BIT, 0x05}, + {0x7623, CRL_REG_LEN_08BIT, 0x30}, + {0x7624, CRL_REG_LEN_08BIT, 0x07}, + {0x7625, CRL_REG_LEN_08BIT, 0x00}, + {0x7626, CRL_REG_LEN_08BIT, 0x0f}, + {0x7627, CRL_REG_LEN_08BIT, 0x00}, + {0x7628, CRL_REG_LEN_08BIT, 0x04}, + {0x7629, CRL_REG_LEN_08BIT, 0xe5}, + {0x762a, CRL_REG_LEN_08BIT, 0x05}, + {0x762b, CRL_REG_LEN_08BIT, 0x33}, + {0x762c, CRL_REG_LEN_08BIT, 0x06}, + {0x762d, CRL_REG_LEN_08BIT, 0x12}, + {0x762e, CRL_REG_LEN_08BIT, 0x00}, + {0x762f, CRL_REG_LEN_08BIT, 0x01}, + {0x7630, CRL_REG_LEN_08BIT, 0x06}, + {0x7631, CRL_REG_LEN_08BIT, 0x52}, + {0x7632, CRL_REG_LEN_08BIT, 0x00}, + {0x7633, CRL_REG_LEN_08BIT, 0x01}, + {0x7634, CRL_REG_LEN_08BIT, 0x06}, + {0x7635, CRL_REG_LEN_08BIT, 0x5e}, + {0x7636, CRL_REG_LEN_08BIT, 0x04}, + {0x7637, CRL_REG_LEN_08BIT, 0xe4}, + {0x7638, CRL_REG_LEN_08BIT, 0x00}, + {0x7639, CRL_REG_LEN_08BIT, 0x01}, + {0x763a, CRL_REG_LEN_08BIT, 0x05}, + {0x763b, CRL_REG_LEN_08BIT, 0x30}, + {0x763c, CRL_REG_LEN_08BIT, 0x0f}, + {0x763d, CRL_REG_LEN_08BIT, 0x00}, + {0x763e, CRL_REG_LEN_08BIT, 0x06}, + {0x763f, CRL_REG_LEN_08BIT, 0xa6}, + {0x7640, CRL_REG_LEN_08BIT, 0x00}, + {0x7641, CRL_REG_LEN_08BIT, 0x02}, + {0x7642, CRL_REG_LEN_08BIT, 0x06}, + {0x7643, CRL_REG_LEN_08BIT, 0x26}, + {0x7644, CRL_REG_LEN_08BIT, 0x00}, + {0x7645, CRL_REG_LEN_08BIT, 0x02}, + {0x7646, CRL_REG_LEN_08BIT, 0x05}, + {0x7647, CRL_REG_LEN_08BIT, 0x33}, + {0x7648, CRL_REG_LEN_08BIT, 0x06}, + {0x7649, CRL_REG_LEN_08BIT, 0x20}, + {0x764a, CRL_REG_LEN_08BIT, 0x0f}, + {0x764b, CRL_REG_LEN_08BIT, 0x00}, + {0x764c, CRL_REG_LEN_08BIT, 0x06}, + {0x764d, CRL_REG_LEN_08BIT, 0x56}, + {0x764e, CRL_REG_LEN_08BIT, 0x00}, + {0x764f, CRL_REG_LEN_08BIT, 0x02}, + {0x7650, CRL_REG_LEN_08BIT, 0x06}, + {0x7651, CRL_REG_LEN_08BIT, 0x16}, + {0x7652, CRL_REG_LEN_08BIT, 0x05}, + {0x7653, CRL_REG_LEN_08BIT, 0x33}, + {0x7654, CRL_REG_LEN_08BIT, 0x06}, + {0x7655, CRL_REG_LEN_08BIT, 0x10}, + {0x7656, CRL_REG_LEN_08BIT, 0x0f}, + {0x7657, CRL_REG_LEN_08BIT, 0x00}, + {0x7658, CRL_REG_LEN_08BIT, 0x06}, + {0x7659, CRL_REG_LEN_08BIT, 0x10}, + {0x765a, CRL_REG_LEN_08BIT, 0x0f}, + {0x765b, CRL_REG_LEN_08BIT, 0x00}, + {0x765c, CRL_REG_LEN_08BIT, 0x06}, + {0x765d, CRL_REG_LEN_08BIT, 0x20}, + {0x765e, CRL_REG_LEN_08BIT, 0x0f}, + {0x765f, CRL_REG_LEN_08BIT, 0x00}, + {0x7660, CRL_REG_LEN_08BIT, 0x00}, + {0x7661, CRL_REG_LEN_08BIT, 0x00}, + {0x7662, CRL_REG_LEN_08BIT, 0x00}, + {0x7663, CRL_REG_LEN_08BIT, 0x02}, + {0x7664, CRL_REG_LEN_08BIT, 0x04}, + {0x7665, CRL_REG_LEN_08BIT, 0xe5}, + {0x7666, CRL_REG_LEN_08BIT, 0x04}, + {0x7667, CRL_REG_LEN_08BIT, 0xe4}, + {0x7668, CRL_REG_LEN_08BIT, 0x0f}, + {0x7669, CRL_REG_LEN_08BIT, 0x00}, + {0x766a, CRL_REG_LEN_08BIT, 0x00}, + {0x766b, CRL_REG_LEN_08BIT, 0x00}, + {0x766c, CRL_REG_LEN_08BIT, 0x00}, + {0x766d, CRL_REG_LEN_08BIT, 0x01}, + {0x766e, CRL_REG_LEN_08BIT, 0x04}, + {0x766f, CRL_REG_LEN_08BIT, 0xe5}, + {0x7670, CRL_REG_LEN_08BIT, 0x04}, + {0x7671, CRL_REG_LEN_08BIT, 0xe4}, + {0x7672, CRL_REG_LEN_08BIT, 0x0f}, + {0x7673, CRL_REG_LEN_08BIT, 0x00}, + {0x7674, CRL_REG_LEN_08BIT, 0x00}, + {0x7675, CRL_REG_LEN_08BIT, 0x02}, + {0x7676, CRL_REG_LEN_08BIT, 0x04}, + {0x7677, CRL_REG_LEN_08BIT, 0xe4}, + {0x7678, CRL_REG_LEN_08BIT, 0x00}, + {0x7679, CRL_REG_LEN_08BIT, 0x02}, + {0x767a, CRL_REG_LEN_08BIT, 0x04}, + {0x767b, CRL_REG_LEN_08BIT, 0xc4}, + {0x767c, CRL_REG_LEN_08BIT, 0x00}, + {0x767d, CRL_REG_LEN_08BIT, 0x02}, + {0x767e, CRL_REG_LEN_08BIT, 0x04}, + {0x767f, CRL_REG_LEN_08BIT, 0xc4}, + {0x7680, CRL_REG_LEN_08BIT, 0x05}, + {0x7681, CRL_REG_LEN_08BIT, 0x83}, + {0x7682, CRL_REG_LEN_08BIT, 0x0f}, + {0x7683, CRL_REG_LEN_08BIT, 0x00}, + {0x7684, CRL_REG_LEN_08BIT, 0x00}, + {0x7685, CRL_REG_LEN_08BIT, 0x02}, + {0x7686, CRL_REG_LEN_08BIT, 0x04}, + {0x7687, CRL_REG_LEN_08BIT, 0xe4}, + {0x7688, CRL_REG_LEN_08BIT, 0x00}, + {0x7689, CRL_REG_LEN_08BIT, 0x02}, + {0x768a, CRL_REG_LEN_08BIT, 0x04}, + {0x768b, CRL_REG_LEN_08BIT, 0xc4}, + {0x768c, CRL_REG_LEN_08BIT, 0x00}, + {0x768d, CRL_REG_LEN_08BIT, 0x02}, + {0x768e, CRL_REG_LEN_08BIT, 0x04}, + {0x768f, CRL_REG_LEN_08BIT, 0xc4}, + {0x7690, CRL_REG_LEN_08BIT, 0x05}, + {0x7691, CRL_REG_LEN_08BIT, 0x83}, + {0x7692, CRL_REG_LEN_08BIT, 0x03}, + {0x7693, CRL_REG_LEN_08BIT, 0x0b}, + {0x7694, CRL_REG_LEN_08BIT, 0x05}, + {0x7695, CRL_REG_LEN_08BIT, 0x83}, + {0x7696, CRL_REG_LEN_08BIT, 0x00}, + {0x7697, CRL_REG_LEN_08BIT, 0x07}, + {0x7698, CRL_REG_LEN_08BIT, 0x05}, + {0x7699, CRL_REG_LEN_08BIT, 0x03}, + {0x769a, CRL_REG_LEN_08BIT, 0x00}, + {0x769b, CRL_REG_LEN_08BIT, 0x05}, + {0x769c, CRL_REG_LEN_08BIT, 0x05}, + {0x769d, CRL_REG_LEN_08BIT, 0x32}, + {0x769e, CRL_REG_LEN_08BIT, 0x05}, + {0x769f, CRL_REG_LEN_08BIT, 0x30}, + {0x76a0, CRL_REG_LEN_08BIT, 0x00}, + {0x76a1, CRL_REG_LEN_08BIT, 0x02}, + {0x76a2, CRL_REG_LEN_08BIT, 0x05}, + {0x76a3, CRL_REG_LEN_08BIT, 0x78}, + {0x76a4, CRL_REG_LEN_08BIT, 0x00}, + {0x76a5, CRL_REG_LEN_08BIT, 0x01}, + {0x76a6, CRL_REG_LEN_08BIT, 0x05}, + {0x76a7, CRL_REG_LEN_08BIT, 0x7c}, + {0x76a8, CRL_REG_LEN_08BIT, 0x03}, + {0x76a9, CRL_REG_LEN_08BIT, 0x9a}, + {0x76aa, CRL_REG_LEN_08BIT, 0x05}, + {0x76ab, CRL_REG_LEN_08BIT, 0x83}, + {0x76ac, CRL_REG_LEN_08BIT, 0x00}, + {0x76ad, CRL_REG_LEN_08BIT, 0x04}, + {0x76ae, CRL_REG_LEN_08BIT, 0x05}, + {0x76af, CRL_REG_LEN_08BIT, 0x03}, + {0x76b0, CRL_REG_LEN_08BIT, 0x00}, + {0x76b1, CRL_REG_LEN_08BIT, 0x03}, + {0x76b2, CRL_REG_LEN_08BIT, 0x05}, + {0x76b3, CRL_REG_LEN_08BIT, 0x32}, + {0x76b4, CRL_REG_LEN_08BIT, 0x05}, + {0x76b5, CRL_REG_LEN_08BIT, 0x30}, + {0x76b6, CRL_REG_LEN_08BIT, 0x00}, + {0x76b7, CRL_REG_LEN_08BIT, 0x02}, + {0x76b8, CRL_REG_LEN_08BIT, 0x05}, + {0x76b9, CRL_REG_LEN_08BIT, 0x78}, + {0x76ba, CRL_REG_LEN_08BIT, 0x00}, + {0x76bb, CRL_REG_LEN_08BIT, 0x01}, + {0x76bc, CRL_REG_LEN_08BIT, 0x05}, + {0x76bd, CRL_REG_LEN_08BIT, 0x7c}, + {0x76be, CRL_REG_LEN_08BIT, 0x03}, + {0x76bf, CRL_REG_LEN_08BIT, 0x99}, + {0x76c0, CRL_REG_LEN_08BIT, 0x05}, + {0x76c1, CRL_REG_LEN_08BIT, 0x83}, + {0x76c2, CRL_REG_LEN_08BIT, 0x00}, + {0x76c3, CRL_REG_LEN_08BIT, 0x03}, + {0x76c4, CRL_REG_LEN_08BIT, 0x05}, + {0x76c5, CRL_REG_LEN_08BIT, 0x03}, + {0x76c6, CRL_REG_LEN_08BIT, 0x00}, + {0x76c7, CRL_REG_LEN_08BIT, 0x01}, + {0x76c8, CRL_REG_LEN_08BIT, 0x05}, + {0x76c9, CRL_REG_LEN_08BIT, 0x32}, + {0x76ca, CRL_REG_LEN_08BIT, 0x05}, + {0x76cb, CRL_REG_LEN_08BIT, 0x30}, + {0x76cc, CRL_REG_LEN_08BIT, 0x00}, + {0x76cd, CRL_REG_LEN_08BIT, 0x02}, + {0x76ce, CRL_REG_LEN_08BIT, 0x05}, + {0x76cf, CRL_REG_LEN_08BIT, 0x78}, + {0x76d0, CRL_REG_LEN_08BIT, 0x00}, + {0x76d1, CRL_REG_LEN_08BIT, 0x01}, + {0x76d2, CRL_REG_LEN_08BIT, 0x05}, + {0x76d3, CRL_REG_LEN_08BIT, 0x7c}, + {0x76d4, CRL_REG_LEN_08BIT, 0x03}, + {0x76d5, CRL_REG_LEN_08BIT, 0x98}, + {0x76d6, CRL_REG_LEN_08BIT, 0x05}, + {0x76d7, CRL_REG_LEN_08BIT, 0x83}, + {0x76d8, CRL_REG_LEN_08BIT, 0x00}, + {0x76d9, CRL_REG_LEN_08BIT, 0x00}, + {0x76da, CRL_REG_LEN_08BIT, 0x05}, + {0x76db, CRL_REG_LEN_08BIT, 0x03}, + {0x76dc, CRL_REG_LEN_08BIT, 0x00}, + {0x76dd, CRL_REG_LEN_08BIT, 0x01}, + {0x76de, CRL_REG_LEN_08BIT, 0x05}, + {0x76df, CRL_REG_LEN_08BIT, 0x32}, + {0x76e0, CRL_REG_LEN_08BIT, 0x05}, + {0x76e1, CRL_REG_LEN_08BIT, 0x30}, + {0x76e2, CRL_REG_LEN_08BIT, 0x00}, + {0x76e3, CRL_REG_LEN_08BIT, 0x02}, + {0x76e4, CRL_REG_LEN_08BIT, 0x05}, + {0x76e5, CRL_REG_LEN_08BIT, 0x78}, + {0x76e6, CRL_REG_LEN_08BIT, 0x00}, + {0x76e7, CRL_REG_LEN_08BIT, 0x01}, + {0x76e8, CRL_REG_LEN_08BIT, 0x05}, + {0x76e9, CRL_REG_LEN_08BIT, 0x7c}, + {0x76ea, CRL_REG_LEN_08BIT, 0x03}, + {0x76eb, CRL_REG_LEN_08BIT, 0x97}, + {0x76ec, CRL_REG_LEN_08BIT, 0x05}, + {0x76ed, CRL_REG_LEN_08BIT, 0x83}, + {0x76ee, CRL_REG_LEN_08BIT, 0x00}, + {0x76ef, CRL_REG_LEN_08BIT, 0x00}, + {0x76f0, CRL_REG_LEN_08BIT, 0x05}, + {0x76f1, CRL_REG_LEN_08BIT, 0x03}, + {0x76f2, CRL_REG_LEN_08BIT, 0x05}, + {0x76f3, CRL_REG_LEN_08BIT, 0x32}, + {0x76f4, CRL_REG_LEN_08BIT, 0x05}, + {0x76f5, CRL_REG_LEN_08BIT, 0x30}, + {0x76f6, CRL_REG_LEN_08BIT, 0x00}, + {0x76f7, CRL_REG_LEN_08BIT, 0x02}, + {0x76f8, CRL_REG_LEN_08BIT, 0x05}, + {0x76f9, CRL_REG_LEN_08BIT, 0x78}, + {0x76fa, CRL_REG_LEN_08BIT, 0x00}, + {0x76fb, CRL_REG_LEN_08BIT, 0x01}, + {0x76fc, CRL_REG_LEN_08BIT, 0x05}, + {0x76fd, CRL_REG_LEN_08BIT, 0x7c}, + {0x76fe, CRL_REG_LEN_08BIT, 0x03}, + {0x76ff, CRL_REG_LEN_08BIT, 0x96}, + {0x7700, CRL_REG_LEN_08BIT, 0x05}, + {0x7701, CRL_REG_LEN_08BIT, 0x83}, + {0x7702, CRL_REG_LEN_08BIT, 0x05}, + {0x7703, CRL_REG_LEN_08BIT, 0x03}, + {0x7704, CRL_REG_LEN_08BIT, 0x05}, + {0x7705, CRL_REG_LEN_08BIT, 0x32}, + {0x7706, CRL_REG_LEN_08BIT, 0x05}, + {0x7707, CRL_REG_LEN_08BIT, 0x30}, + {0x7708, CRL_REG_LEN_08BIT, 0x00}, + {0x7709, CRL_REG_LEN_08BIT, 0x02}, + {0x770a, CRL_REG_LEN_08BIT, 0x05}, + {0x770b, CRL_REG_LEN_08BIT, 0x78}, + {0x770c, CRL_REG_LEN_08BIT, 0x00}, + {0x770d, CRL_REG_LEN_08BIT, 0x01}, + {0x770e, CRL_REG_LEN_08BIT, 0x05}, + {0x770f, CRL_REG_LEN_08BIT, 0x7c}, + {0x7710, CRL_REG_LEN_08BIT, 0x03}, + {0x7711, CRL_REG_LEN_08BIT, 0x95}, + {0x7712, CRL_REG_LEN_08BIT, 0x05}, + {0x7713, CRL_REG_LEN_08BIT, 0x83}, + {0x7714, CRL_REG_LEN_08BIT, 0x05}, + {0x7715, CRL_REG_LEN_08BIT, 0x03}, + {0x7716, CRL_REG_LEN_08BIT, 0x05}, + {0x7717, CRL_REG_LEN_08BIT, 0x32}, + {0x7718, CRL_REG_LEN_08BIT, 0x05}, + {0x7719, CRL_REG_LEN_08BIT, 0x30}, + {0x771a, CRL_REG_LEN_08BIT, 0x00}, + {0x771b, CRL_REG_LEN_08BIT, 0x02}, + {0x771c, CRL_REG_LEN_08BIT, 0x05}, + {0x771d, CRL_REG_LEN_08BIT, 0x78}, + {0x771e, CRL_REG_LEN_08BIT, 0x00}, + {0x771f, CRL_REG_LEN_08BIT, 0x01}, + {0x7720, CRL_REG_LEN_08BIT, 0x05}, + {0x7721, CRL_REG_LEN_08BIT, 0x7c}, + {0x7722, CRL_REG_LEN_08BIT, 0x03}, + {0x7723, CRL_REG_LEN_08BIT, 0x94}, + {0x7724, CRL_REG_LEN_08BIT, 0x05}, + {0x7725, CRL_REG_LEN_08BIT, 0x83}, + {0x7726, CRL_REG_LEN_08BIT, 0x00}, + {0x7727, CRL_REG_LEN_08BIT, 0x01}, + {0x7728, CRL_REG_LEN_08BIT, 0x05}, + {0x7729, CRL_REG_LEN_08BIT, 0x03}, + {0x772a, CRL_REG_LEN_08BIT, 0x00}, + {0x772b, CRL_REG_LEN_08BIT, 0x01}, + {0x772c, CRL_REG_LEN_08BIT, 0x05}, + {0x772d, CRL_REG_LEN_08BIT, 0x32}, + {0x772e, CRL_REG_LEN_08BIT, 0x05}, + {0x772f, CRL_REG_LEN_08BIT, 0x30}, + {0x7730, CRL_REG_LEN_08BIT, 0x00}, + {0x7731, CRL_REG_LEN_08BIT, 0x02}, + {0x7732, CRL_REG_LEN_08BIT, 0x05}, + {0x7733, CRL_REG_LEN_08BIT, 0x78}, + {0x7734, CRL_REG_LEN_08BIT, 0x00}, + {0x7735, CRL_REG_LEN_08BIT, 0x01}, + {0x7736, CRL_REG_LEN_08BIT, 0x05}, + {0x7737, CRL_REG_LEN_08BIT, 0x7c}, + {0x7738, CRL_REG_LEN_08BIT, 0x03}, + {0x7739, CRL_REG_LEN_08BIT, 0x93}, + {0x773a, CRL_REG_LEN_08BIT, 0x05}, + {0x773b, CRL_REG_LEN_08BIT, 0x83}, + {0x773c, CRL_REG_LEN_08BIT, 0x00}, + {0x773d, CRL_REG_LEN_08BIT, 0x00}, + {0x773e, CRL_REG_LEN_08BIT, 0x05}, + {0x773f, CRL_REG_LEN_08BIT, 0x03}, + {0x7740, CRL_REG_LEN_08BIT, 0x00}, + {0x7741, CRL_REG_LEN_08BIT, 0x00}, + {0x7742, CRL_REG_LEN_08BIT, 0x05}, + {0x7743, CRL_REG_LEN_08BIT, 0x32}, + {0x7744, CRL_REG_LEN_08BIT, 0x05}, + {0x7745, CRL_REG_LEN_08BIT, 0x30}, + {0x7746, CRL_REG_LEN_08BIT, 0x00}, + {0x7747, CRL_REG_LEN_08BIT, 0x02}, + {0x7748, CRL_REG_LEN_08BIT, 0x05}, + {0x7749, CRL_REG_LEN_08BIT, 0x78}, + {0x774a, CRL_REG_LEN_08BIT, 0x00}, + {0x774b, CRL_REG_LEN_08BIT, 0x01}, + {0x774c, CRL_REG_LEN_08BIT, 0x05}, + {0x774d, CRL_REG_LEN_08BIT, 0x7c}, + {0x774e, CRL_REG_LEN_08BIT, 0x03}, + {0x774f, CRL_REG_LEN_08BIT, 0x92}, + {0x7750, CRL_REG_LEN_08BIT, 0x05}, + {0x7751, CRL_REG_LEN_08BIT, 0x83}, + {0x7752, CRL_REG_LEN_08BIT, 0x05}, + {0x7753, CRL_REG_LEN_08BIT, 0x03}, + {0x7754, CRL_REG_LEN_08BIT, 0x00}, + {0x7755, CRL_REG_LEN_08BIT, 0x00}, + {0x7756, CRL_REG_LEN_08BIT, 0x05}, + {0x7757, CRL_REG_LEN_08BIT, 0x32}, + {0x7758, CRL_REG_LEN_08BIT, 0x05}, + {0x7759, CRL_REG_LEN_08BIT, 0x30}, + {0x775a, CRL_REG_LEN_08BIT, 0x00}, + {0x775b, CRL_REG_LEN_08BIT, 0x02}, + {0x775c, CRL_REG_LEN_08BIT, 0x05}, + {0x775d, CRL_REG_LEN_08BIT, 0x78}, + {0x775e, CRL_REG_LEN_08BIT, 0x00}, + {0x775f, CRL_REG_LEN_08BIT, 0x01}, + {0x7760, CRL_REG_LEN_08BIT, 0x05}, + {0x7761, CRL_REG_LEN_08BIT, 0x7c}, + {0x7762, CRL_REG_LEN_08BIT, 0x03}, + {0x7763, CRL_REG_LEN_08BIT, 0x91}, + {0x7764, CRL_REG_LEN_08BIT, 0x05}, + {0x7765, CRL_REG_LEN_08BIT, 0x83}, + {0x7766, CRL_REG_LEN_08BIT, 0x05}, + {0x7767, CRL_REG_LEN_08BIT, 0x03}, + {0x7768, CRL_REG_LEN_08BIT, 0x05}, + {0x7769, CRL_REG_LEN_08BIT, 0x32}, + {0x776a, CRL_REG_LEN_08BIT, 0x05}, + {0x776b, CRL_REG_LEN_08BIT, 0x30}, + {0x776c, CRL_REG_LEN_08BIT, 0x00}, + {0x776d, CRL_REG_LEN_08BIT, 0x02}, + {0x776e, CRL_REG_LEN_08BIT, 0x05}, + {0x776f, CRL_REG_LEN_08BIT, 0x78}, + {0x7770, CRL_REG_LEN_08BIT, 0x00}, + {0x7771, CRL_REG_LEN_08BIT, 0x01}, + {0x7772, CRL_REG_LEN_08BIT, 0x05}, + {0x7773, CRL_REG_LEN_08BIT, 0x7c}, + {0x7774, CRL_REG_LEN_08BIT, 0x03}, + {0x7775, CRL_REG_LEN_08BIT, 0x90}, + {0x7776, CRL_REG_LEN_08BIT, 0x05}, + {0x7777, CRL_REG_LEN_08BIT, 0x83}, + {0x7778, CRL_REG_LEN_08BIT, 0x05}, + {0x7779, CRL_REG_LEN_08BIT, 0x03}, + {0x777a, CRL_REG_LEN_08BIT, 0x05}, + {0x777b, CRL_REG_LEN_08BIT, 0x32}, + {0x777c, CRL_REG_LEN_08BIT, 0x05}, + {0x777d, CRL_REG_LEN_08BIT, 0x30}, + {0x777e, CRL_REG_LEN_08BIT, 0x00}, + {0x777f, CRL_REG_LEN_08BIT, 0x02}, + {0x7780, CRL_REG_LEN_08BIT, 0x05}, + {0x7781, CRL_REG_LEN_08BIT, 0x78}, + {0x7782, CRL_REG_LEN_08BIT, 0x00}, + {0x7783, CRL_REG_LEN_08BIT, 0x01}, + {0x7784, CRL_REG_LEN_08BIT, 0x05}, + {0x7785, CRL_REG_LEN_08BIT, 0x7c}, + {0x7786, CRL_REG_LEN_08BIT, 0x02}, + {0x7787, CRL_REG_LEN_08BIT, 0x90}, + {0x7788, CRL_REG_LEN_08BIT, 0x05}, + {0x7789, CRL_REG_LEN_08BIT, 0x03}, + {0x778a, CRL_REG_LEN_08BIT, 0x07}, + {0x778b, CRL_REG_LEN_08BIT, 0x00}, + {0x778c, CRL_REG_LEN_08BIT, 0x0f}, + {0x778d, CRL_REG_LEN_08BIT, 0x00}, + {0x778e, CRL_REG_LEN_08BIT, 0x08}, + {0x778f, CRL_REG_LEN_08BIT, 0x30}, + {0x7790, CRL_REG_LEN_08BIT, 0x08}, + {0x7791, CRL_REG_LEN_08BIT, 0xee}, + {0x7792, CRL_REG_LEN_08BIT, 0x0f}, + {0x7793, CRL_REG_LEN_08BIT, 0x00}, + {0x7794, CRL_REG_LEN_08BIT, 0x05}, + {0x7795, CRL_REG_LEN_08BIT, 0x33}, + {0x7796, CRL_REG_LEN_08BIT, 0x04}, + {0x7797, CRL_REG_LEN_08BIT, 0xe5}, + {0x7798, CRL_REG_LEN_08BIT, 0x06}, + {0x7799, CRL_REG_LEN_08BIT, 0x52}, + {0x779a, CRL_REG_LEN_08BIT, 0x04}, + {0x779b, CRL_REG_LEN_08BIT, 0xe4}, + {0x779c, CRL_REG_LEN_08BIT, 0x00}, + {0x779d, CRL_REG_LEN_08BIT, 0x00}, + {0x779e, CRL_REG_LEN_08BIT, 0x06}, + {0x779f, CRL_REG_LEN_08BIT, 0x5e}, + {0x77a0, CRL_REG_LEN_08BIT, 0x00}, + {0x77a1, CRL_REG_LEN_08BIT, 0x0f}, + {0x77a2, CRL_REG_LEN_08BIT, 0x06}, + {0x77a3, CRL_REG_LEN_08BIT, 0x1e}, + {0x77a4, CRL_REG_LEN_08BIT, 0x00}, + {0x77a5, CRL_REG_LEN_08BIT, 0x02}, + {0x77a6, CRL_REG_LEN_08BIT, 0x06}, + {0x77a7, CRL_REG_LEN_08BIT, 0xa2}, + {0x77a8, CRL_REG_LEN_08BIT, 0x00}, + {0x77a9, CRL_REG_LEN_08BIT, 0x01}, + {0x77aa, CRL_REG_LEN_08BIT, 0x06}, + {0x77ab, CRL_REG_LEN_08BIT, 0xae}, + {0x77ac, CRL_REG_LEN_08BIT, 0x00}, + {0x77ad, CRL_REG_LEN_08BIT, 0x03}, + {0x77ae, CRL_REG_LEN_08BIT, 0x05}, + {0x77af, CRL_REG_LEN_08BIT, 0x30}, + {0x77b0, CRL_REG_LEN_08BIT, 0x09}, + {0x77b1, CRL_REG_LEN_08BIT, 0x19}, + {0x77b2, CRL_REG_LEN_08BIT, 0x0f}, + {0x77b3, CRL_REG_LEN_08BIT, 0x00}, + {0x77b4, CRL_REG_LEN_08BIT, 0x05}, + {0x77b5, CRL_REG_LEN_08BIT, 0x33}, + {0x77b6, CRL_REG_LEN_08BIT, 0x04}, + {0x77b7, CRL_REG_LEN_08BIT, 0xe5}, + {0x77b8, CRL_REG_LEN_08BIT, 0x06}, + {0x77b9, CRL_REG_LEN_08BIT, 0x52}, + {0x77ba, CRL_REG_LEN_08BIT, 0x04}, + {0x77bb, CRL_REG_LEN_08BIT, 0xe4}, + {0x77bc, CRL_REG_LEN_08BIT, 0x00}, + {0x77bd, CRL_REG_LEN_08BIT, 0x00}, + {0x77be, CRL_REG_LEN_08BIT, 0x06}, + {0x77bf, CRL_REG_LEN_08BIT, 0x5e}, + {0x77c0, CRL_REG_LEN_08BIT, 0x00}, + {0x77c1, CRL_REG_LEN_08BIT, 0x0f}, + {0x77c2, CRL_REG_LEN_08BIT, 0x06}, + {0x77c3, CRL_REG_LEN_08BIT, 0x1e}, + {0x77c4, CRL_REG_LEN_08BIT, 0x00}, + {0x77c5, CRL_REG_LEN_08BIT, 0x02}, + {0x77c6, CRL_REG_LEN_08BIT, 0x06}, + {0x77c7, CRL_REG_LEN_08BIT, 0xa2}, + {0x77c8, CRL_REG_LEN_08BIT, 0x00}, + {0x77c9, CRL_REG_LEN_08BIT, 0x01}, + {0x77ca, CRL_REG_LEN_08BIT, 0x06}, + {0x77cb, CRL_REG_LEN_08BIT, 0xae}, + {0x77cc, CRL_REG_LEN_08BIT, 0x00}, + {0x77cd, CRL_REG_LEN_08BIT, 0x03}, + {0x77ce, CRL_REG_LEN_08BIT, 0x05}, + {0x77cf, CRL_REG_LEN_08BIT, 0x30}, + {0x77d0, CRL_REG_LEN_08BIT, 0x0f}, + {0x77d1, CRL_REG_LEN_08BIT, 0x00}, + {0x77d2, CRL_REG_LEN_08BIT, 0x00}, + {0x77d3, CRL_REG_LEN_08BIT, 0x00}, + {0x77d4, CRL_REG_LEN_08BIT, 0x00}, + {0x77d5, CRL_REG_LEN_08BIT, 0x02}, + {0x77d6, CRL_REG_LEN_08BIT, 0x04}, + {0x77d7, CRL_REG_LEN_08BIT, 0xe5}, + {0x77d8, CRL_REG_LEN_08BIT, 0x04}, + {0x77d9, CRL_REG_LEN_08BIT, 0xe4}, + {0x77da, CRL_REG_LEN_08BIT, 0x05}, + {0x77db, CRL_REG_LEN_08BIT, 0x33}, + {0x77dc, CRL_REG_LEN_08BIT, 0x07}, + {0x77dd, CRL_REG_LEN_08BIT, 0x10}, + {0x77de, CRL_REG_LEN_08BIT, 0x00}, + {0x77df, CRL_REG_LEN_08BIT, 0x00}, + {0x77e0, CRL_REG_LEN_08BIT, 0x01}, + {0x77e1, CRL_REG_LEN_08BIT, 0xbb}, + {0x77e2, CRL_REG_LEN_08BIT, 0x00}, + {0x77e3, CRL_REG_LEN_08BIT, 0x00}, + {0x77e4, CRL_REG_LEN_08BIT, 0x01}, + {0x77e5, CRL_REG_LEN_08BIT, 0xaa}, + {0x77e6, CRL_REG_LEN_08BIT, 0x00}, + {0x77e7, CRL_REG_LEN_08BIT, 0x00}, + {0x77e8, CRL_REG_LEN_08BIT, 0x01}, + {0x77e9, CRL_REG_LEN_08BIT, 0x99}, + {0x77ea, CRL_REG_LEN_08BIT, 0x00}, + {0x77eb, CRL_REG_LEN_08BIT, 0x00}, + {0x77ec, CRL_REG_LEN_08BIT, 0x01}, + {0x77ed, CRL_REG_LEN_08BIT, 0x88}, + {0x77ee, CRL_REG_LEN_08BIT, 0x00}, + {0x77ef, CRL_REG_LEN_08BIT, 0x00}, + {0x77f0, CRL_REG_LEN_08BIT, 0x01}, + {0x77f1, CRL_REG_LEN_08BIT, 0x77}, + {0x77f2, CRL_REG_LEN_08BIT, 0x00}, + {0x77f3, CRL_REG_LEN_08BIT, 0x00}, + {0x77f4, CRL_REG_LEN_08BIT, 0x01}, + {0x77f5, CRL_REG_LEN_08BIT, 0x66}, + {0x77f6, CRL_REG_LEN_08BIT, 0x00}, + {0x77f7, CRL_REG_LEN_08BIT, 0x00}, + {0x77f8, CRL_REG_LEN_08BIT, 0x01}, + {0x77f9, CRL_REG_LEN_08BIT, 0x55}, + {0x77fa, CRL_REG_LEN_08BIT, 0x00}, + {0x77fb, CRL_REG_LEN_08BIT, 0x00}, + {0x77fc, CRL_REG_LEN_08BIT, 0x01}, + {0x77fd, CRL_REG_LEN_08BIT, 0x44}, + {0x77fe, CRL_REG_LEN_08BIT, 0x00}, + {0x77ff, CRL_REG_LEN_08BIT, 0x00}, + {0x7800, CRL_REG_LEN_08BIT, 0x01}, + {0x7801, CRL_REG_LEN_08BIT, 0x33}, + {0x7802, CRL_REG_LEN_08BIT, 0x00}, + {0x7803, CRL_REG_LEN_08BIT, 0x00}, + {0x7804, CRL_REG_LEN_08BIT, 0x01}, + {0x7805, CRL_REG_LEN_08BIT, 0x22}, + {0x7806, CRL_REG_LEN_08BIT, 0x00}, + {0x7807, CRL_REG_LEN_08BIT, 0x00}, + {0x7808, CRL_REG_LEN_08BIT, 0x01}, + {0x7809, CRL_REG_LEN_08BIT, 0x11}, + {0x780a, CRL_REG_LEN_08BIT, 0x00}, + {0x780b, CRL_REG_LEN_08BIT, 0x00}, + {0x780c, CRL_REG_LEN_08BIT, 0x01}, + {0x780d, CRL_REG_LEN_08BIT, 0x00}, + {0x780e, CRL_REG_LEN_08BIT, 0x01}, + {0x780f, CRL_REG_LEN_08BIT, 0xff}, + {0x7810, CRL_REG_LEN_08BIT, 0x07}, + {0x7811, CRL_REG_LEN_08BIT, 0x00}, + {0x7812, CRL_REG_LEN_08BIT, 0x02}, + {0x7813, CRL_REG_LEN_08BIT, 0xa0}, + {0x7814, CRL_REG_LEN_08BIT, 0x0f}, + {0x7815, CRL_REG_LEN_08BIT, 0x00}, + {0x7816, CRL_REG_LEN_08BIT, 0x08}, + {0x7817, CRL_REG_LEN_08BIT, 0x35}, + {0x7818, CRL_REG_LEN_08BIT, 0x06}, + {0x7819, CRL_REG_LEN_08BIT, 0x52}, + {0x781a, CRL_REG_LEN_08BIT, 0x04}, + {0x781b, CRL_REG_LEN_08BIT, 0xe4}, + {0x781c, CRL_REG_LEN_08BIT, 0x00}, + {0x781d, CRL_REG_LEN_08BIT, 0x00}, + {0x781e, CRL_REG_LEN_08BIT, 0x06}, + {0x781f, CRL_REG_LEN_08BIT, 0x5e}, + {0x7820, CRL_REG_LEN_08BIT, 0x05}, + {0x7821, CRL_REG_LEN_08BIT, 0x33}, + {0x7822, CRL_REG_LEN_08BIT, 0x09}, + {0x7823, CRL_REG_LEN_08BIT, 0x19}, + {0x7824, CRL_REG_LEN_08BIT, 0x06}, + {0x7825, CRL_REG_LEN_08BIT, 0x1e}, + {0x7826, CRL_REG_LEN_08BIT, 0x05}, + {0x7827, CRL_REG_LEN_08BIT, 0x33}, + {0x7828, CRL_REG_LEN_08BIT, 0x00}, + {0x7829, CRL_REG_LEN_08BIT, 0x01}, + {0x782a, CRL_REG_LEN_08BIT, 0x06}, + {0x782b, CRL_REG_LEN_08BIT, 0x24}, + {0x782c, CRL_REG_LEN_08BIT, 0x06}, + {0x782d, CRL_REG_LEN_08BIT, 0x20}, + {0x782e, CRL_REG_LEN_08BIT, 0x0f}, + {0x782f, CRL_REG_LEN_08BIT, 0x00}, + {0x7830, CRL_REG_LEN_08BIT, 0x08}, + {0x7831, CRL_REG_LEN_08BIT, 0x35}, + {0x7832, CRL_REG_LEN_08BIT, 0x07}, + {0x7833, CRL_REG_LEN_08BIT, 0x10}, + {0x7834, CRL_REG_LEN_08BIT, 0x00}, + {0x7835, CRL_REG_LEN_08BIT, 0x00}, + {0x7836, CRL_REG_LEN_08BIT, 0x01}, + {0x7837, CRL_REG_LEN_08BIT, 0xbb}, + {0x7838, CRL_REG_LEN_08BIT, 0x00}, + {0x7839, CRL_REG_LEN_08BIT, 0x00}, + {0x783a, CRL_REG_LEN_08BIT, 0x01}, + {0x783b, CRL_REG_LEN_08BIT, 0xaa}, + {0x783c, CRL_REG_LEN_08BIT, 0x00}, + {0x783d, CRL_REG_LEN_08BIT, 0x00}, + {0x783e, CRL_REG_LEN_08BIT, 0x01}, + {0x783f, CRL_REG_LEN_08BIT, 0x99}, + {0x7840, CRL_REG_LEN_08BIT, 0x00}, + {0x7841, CRL_REG_LEN_08BIT, 0x00}, + {0x7842, CRL_REG_LEN_08BIT, 0x01}, + {0x7843, CRL_REG_LEN_08BIT, 0x88}, + {0x7844, CRL_REG_LEN_08BIT, 0x00}, + {0x7845, CRL_REG_LEN_08BIT, 0x00}, + {0x7846, CRL_REG_LEN_08BIT, 0x01}, + {0x7847, CRL_REG_LEN_08BIT, 0x77}, + {0x7848, CRL_REG_LEN_08BIT, 0x00}, + {0x7849, CRL_REG_LEN_08BIT, 0x00}, + {0x784a, CRL_REG_LEN_08BIT, 0x01}, + {0x784b, CRL_REG_LEN_08BIT, 0x66}, + {0x784c, CRL_REG_LEN_08BIT, 0x00}, + {0x784d, CRL_REG_LEN_08BIT, 0x00}, + {0x784e, CRL_REG_LEN_08BIT, 0x01}, + {0x784f, CRL_REG_LEN_08BIT, 0x55}, + {0x7850, CRL_REG_LEN_08BIT, 0x00}, + {0x7851, CRL_REG_LEN_08BIT, 0x00}, + {0x7852, CRL_REG_LEN_08BIT, 0x01}, + {0x7853, CRL_REG_LEN_08BIT, 0x44}, + {0x7854, CRL_REG_LEN_08BIT, 0x00}, + {0x7855, CRL_REG_LEN_08BIT, 0x00}, + {0x7856, CRL_REG_LEN_08BIT, 0x01}, + {0x7857, CRL_REG_LEN_08BIT, 0x33}, + {0x7858, CRL_REG_LEN_08BIT, 0x00}, + {0x7859, CRL_REG_LEN_08BIT, 0x00}, + {0x785a, CRL_REG_LEN_08BIT, 0x01}, + {0x785b, CRL_REG_LEN_08BIT, 0x22}, + {0x785c, CRL_REG_LEN_08BIT, 0x00}, + {0x785d, CRL_REG_LEN_08BIT, 0x00}, + {0x785e, CRL_REG_LEN_08BIT, 0x01}, + {0x785f, CRL_REG_LEN_08BIT, 0x11}, + {0x7860, CRL_REG_LEN_08BIT, 0x00}, + {0x7861, CRL_REG_LEN_08BIT, 0x00}, + {0x7862, CRL_REG_LEN_08BIT, 0x01}, + {0x7863, CRL_REG_LEN_08BIT, 0x00}, + {0x7864, CRL_REG_LEN_08BIT, 0x07}, + {0x7865, CRL_REG_LEN_08BIT, 0x00}, + {0x7866, CRL_REG_LEN_08BIT, 0x01}, + {0x7867, CRL_REG_LEN_08BIT, 0xff}, + {0x7868, CRL_REG_LEN_08BIT, 0x02}, + {0x7869, CRL_REG_LEN_08BIT, 0xa0}, + {0x786a, CRL_REG_LEN_08BIT, 0x0f}, + {0x786b, CRL_REG_LEN_08BIT, 0x00}, + {0x786c, CRL_REG_LEN_08BIT, 0x08}, + {0x786d, CRL_REG_LEN_08BIT, 0x3a}, + {0x786e, CRL_REG_LEN_08BIT, 0x08}, + {0x786f, CRL_REG_LEN_08BIT, 0x6a}, + {0x7870, CRL_REG_LEN_08BIT, 0x0f}, + {0x7871, CRL_REG_LEN_08BIT, 0x00}, + {0x7872, CRL_REG_LEN_08BIT, 0x04}, + {0x7873, CRL_REG_LEN_08BIT, 0xc0}, + {0x7874, CRL_REG_LEN_08BIT, 0x09}, + {0x7875, CRL_REG_LEN_08BIT, 0x19}, + {0x7876, CRL_REG_LEN_08BIT, 0x04}, + {0x7877, CRL_REG_LEN_08BIT, 0x99}, + {0x7878, CRL_REG_LEN_08BIT, 0x07}, + {0x7879, CRL_REG_LEN_08BIT, 0x14}, + {0x787a, CRL_REG_LEN_08BIT, 0x00}, + {0x787b, CRL_REG_LEN_08BIT, 0x01}, + {0x787c, CRL_REG_LEN_08BIT, 0x04}, + {0x787d, CRL_REG_LEN_08BIT, 0xa4}, + {0x787e, CRL_REG_LEN_08BIT, 0x00}, + {0x787f, CRL_REG_LEN_08BIT, 0x07}, + {0x7880, CRL_REG_LEN_08BIT, 0x04}, + {0x7881, CRL_REG_LEN_08BIT, 0xa6}, + {0x7882, CRL_REG_LEN_08BIT, 0x00}, + {0x7883, CRL_REG_LEN_08BIT, 0x00}, + {0x7884, CRL_REG_LEN_08BIT, 0x04}, + {0x7885, CRL_REG_LEN_08BIT, 0xa0}, + {0x7886, CRL_REG_LEN_08BIT, 0x04}, + {0x7887, CRL_REG_LEN_08BIT, 0x80}, + {0x7888, CRL_REG_LEN_08BIT, 0x04}, + {0x7889, CRL_REG_LEN_08BIT, 0x00}, + {0x788a, CRL_REG_LEN_08BIT, 0x05}, + {0x788b, CRL_REG_LEN_08BIT, 0x03}, + {0x788c, CRL_REG_LEN_08BIT, 0x06}, + {0x788d, CRL_REG_LEN_08BIT, 0x00}, + {0x788e, CRL_REG_LEN_08BIT, 0x0f}, + {0x788f, CRL_REG_LEN_08BIT, 0x00}, + {0x7890, CRL_REG_LEN_08BIT, 0x0f}, + {0x7891, CRL_REG_LEN_08BIT, 0x00}, + {0x7892, CRL_REG_LEN_08BIT, 0x0f}, + {0x7893, CRL_REG_LEN_08BIT, 0x00}, + {0x30a3, CRL_REG_LEN_08BIT, 0x00}, + {0x30a7, CRL_REG_LEN_08BIT, 0x48}, + {0x30ab, CRL_REG_LEN_08BIT, 0x04}, + {0x30af, CRL_REG_LEN_08BIT, 0x40}, + {0x3001, CRL_REG_LEN_08BIT, 0x32}, + {0x3005, CRL_REG_LEN_08BIT, 0x13}, + {0x3014, CRL_REG_LEN_08BIT, 0x44}, + {0x3196, CRL_REG_LEN_08BIT, 0x00}, + {0x3197, CRL_REG_LEN_08BIT, 0x0a}, + {0x3195, CRL_REG_LEN_08BIT, 0x04}, + {0x31e3, CRL_REG_LEN_08BIT, 0x02}, + {0x31e4, CRL_REG_LEN_08BIT, 0x10}, + {0x3250, CRL_REG_LEN_08BIT, 0xf7}, +}; + +/* ov2775_1928x1088_linearlcg_30fps_mipi960_regset */ +static struct crl_register_write_rep + ov2775_linear_lcg_30fps_mipi960_regset[] = { + {0x3000, CRL_REG_LEN_08BIT, 0x02}, + {0x3001, CRL_REG_LEN_08BIT, 0x28}, + {0x3002, CRL_REG_LEN_08BIT, 0x03}, + {0x3003, CRL_REG_LEN_08BIT, 0x01}, + {0x3004, CRL_REG_LEN_08BIT, 0x02}, + {0x3005, CRL_REG_LEN_08BIT, 0x26}, + {0x3006, CRL_REG_LEN_08BIT, 0x00}, + {0x3007, CRL_REG_LEN_08BIT, 0x07}, + {0x3008, CRL_REG_LEN_08BIT, 0x01}, + {0x3009, CRL_REG_LEN_08BIT, 0x00}, + {0x300c, CRL_REG_LEN_08BIT, 0x6c}, + {0x300e, CRL_REG_LEN_08BIT, 0x80}, + {0x300f, CRL_REG_LEN_08BIT, 0x00}, + {0x3012, CRL_REG_LEN_08BIT, 0x00}, + {0x3014, CRL_REG_LEN_08BIT, 0xc4}, + {0x3015, CRL_REG_LEN_08BIT, 0x00}, + {0x3017, CRL_REG_LEN_08BIT, 0x00}, + {0x3018, CRL_REG_LEN_08BIT, 0x00}, + {0x3019, CRL_REG_LEN_08BIT, 0x00}, + {0x301a, CRL_REG_LEN_08BIT, 0x00}, + {0x301b, CRL_REG_LEN_08BIT, 0x0e}, + {0x301e, CRL_REG_LEN_08BIT, 0x17}, + {0x301f, CRL_REG_LEN_08BIT, 0xe1}, + {0x3030, CRL_REG_LEN_08BIT, 0x02}, + {0x3031, CRL_REG_LEN_08BIT, 0x62}, + {0x3032, CRL_REG_LEN_08BIT, 0xf0}, + {0x3033, CRL_REG_LEN_08BIT, 0x30}, + {0x3034, CRL_REG_LEN_08BIT, 0x3f}, + {0x3035, CRL_REG_LEN_08BIT, 0x5f}, + {0x3036, CRL_REG_LEN_08BIT, 0x02}, + {0x3037, CRL_REG_LEN_08BIT, 0x9f}, + {0x3038, CRL_REG_LEN_08BIT, 0x04}, + {0x3039, CRL_REG_LEN_08BIT, 0xb7}, + {0x303a, CRL_REG_LEN_08BIT, 0x04}, + {0x303b, CRL_REG_LEN_08BIT, 0x07}, + {0x303c, CRL_REG_LEN_08BIT, 0xf0}, + {0x303d, CRL_REG_LEN_08BIT, 0x00}, + {0x303e, CRL_REG_LEN_08BIT, 0x0b}, + {0x303f, CRL_REG_LEN_08BIT, 0xe3}, + {0x3040, CRL_REG_LEN_08BIT, 0xf3}, + {0x3041, CRL_REG_LEN_08BIT, 0x29}, + {0x3042, CRL_REG_LEN_08BIT, 0xf6}, + {0x3043, CRL_REG_LEN_08BIT, 0x65}, + {0x3044, CRL_REG_LEN_08BIT, 0x06}, + {0x3045, CRL_REG_LEN_08BIT, 0x0f}, + {0x3046, CRL_REG_LEN_08BIT, 0x59}, + {0x3047, CRL_REG_LEN_08BIT, 0x07}, + {0x3048, CRL_REG_LEN_08BIT, 0x82}, + {0x3049, CRL_REG_LEN_08BIT, 0xcf}, + {0x304a, CRL_REG_LEN_08BIT, 0x12}, + {0x304b, CRL_REG_LEN_08BIT, 0x40}, + {0x304c, CRL_REG_LEN_08BIT, 0x33}, + {0x304d, CRL_REG_LEN_08BIT, 0xa4}, + {0x304e, CRL_REG_LEN_08BIT, 0x0b}, + {0x304f, CRL_REG_LEN_08BIT, 0x3d}, + {0x3050, CRL_REG_LEN_08BIT, 0x10}, + {0x3060, CRL_REG_LEN_08BIT, 0x00}, + {0x3061, CRL_REG_LEN_08BIT, 0x64}, + {0x3062, CRL_REG_LEN_08BIT, 0x00}, + {0x3063, CRL_REG_LEN_08BIT, 0xe4}, + {0x3066, CRL_REG_LEN_08BIT, 0x80}, + {0x3080, CRL_REG_LEN_08BIT, 0x00}, + {0x3081, CRL_REG_LEN_08BIT, 0x00}, + {0x3082, CRL_REG_LEN_08BIT, 0x01}, + {0x3083, CRL_REG_LEN_08BIT, 0xe3}, + {0x3084, CRL_REG_LEN_08BIT, 0x06}, + {0x3085, CRL_REG_LEN_08BIT, 0x00}, + {0x3086, CRL_REG_LEN_08BIT, 0x10}, + {0x3087, CRL_REG_LEN_08BIT, 0x10}, + {0x3089, CRL_REG_LEN_08BIT, 0x00}, + {0x308a, CRL_REG_LEN_08BIT, 0x01}, + {0x3093, CRL_REG_LEN_08BIT, 0x00}, + {0x30a0, CRL_REG_LEN_08BIT, 0x00}, + {0x30a1, CRL_REG_LEN_08BIT, 0x04}, + {0x30a2, CRL_REG_LEN_08BIT, 0x00}, + {0x30a3, CRL_REG_LEN_08BIT, 0x08}, + {0x30a4, CRL_REG_LEN_08BIT, 0x07}, + {0x30a5, CRL_REG_LEN_08BIT, 0x8b}, + {0x30a6, CRL_REG_LEN_08BIT, 0x04}, + {0x30a7, CRL_REG_LEN_08BIT, 0x3f}, + {0x30a8, CRL_REG_LEN_08BIT, 0x00}, + {0x30a9, CRL_REG_LEN_08BIT, 0x04}, + {0x30aa, CRL_REG_LEN_08BIT, 0x00}, + {0x30ab, CRL_REG_LEN_08BIT, 0x00}, + {0x30ac, CRL_REG_LEN_08BIT, 0x07}, + {0x30ad, CRL_REG_LEN_08BIT, 0x80}, + {0x30ae, CRL_REG_LEN_08BIT, 0x04}, + {0x30af, CRL_REG_LEN_08BIT, 0x40}, + {0x30b4, CRL_REG_LEN_08BIT, 0x00}, + {0x30b5, CRL_REG_LEN_08BIT, 0x00}, + {0x30ba, CRL_REG_LEN_08BIT, 0x10}, + {0x30bc, CRL_REG_LEN_08BIT, 0x00}, + {0x30bd, CRL_REG_LEN_08BIT, 0x03}, + {0x30be, CRL_REG_LEN_08BIT, 0x5c}, + {0x30bf, CRL_REG_LEN_08BIT, 0x00}, + {0x30c0, CRL_REG_LEN_08BIT, 0x01}, + {0x30c1, CRL_REG_LEN_08BIT, 0x00}, + {0x30c2, CRL_REG_LEN_08BIT, 0x20}, + {0x30c3, CRL_REG_LEN_08BIT, 0x00}, + {0x30c4, CRL_REG_LEN_08BIT, 0x4a}, + {0x30c5, CRL_REG_LEN_08BIT, 0x00}, + {0x30c7, CRL_REG_LEN_08BIT, 0x00}, + {0x30c8, CRL_REG_LEN_08BIT, 0x00}, + {0x30d1, CRL_REG_LEN_08BIT, 0x00}, + {0x30d2, CRL_REG_LEN_08BIT, 0x00}, + {0x30d3, CRL_REG_LEN_08BIT, 0x80}, + {0x30d4, CRL_REG_LEN_08BIT, 0x00}, + {0x30d9, CRL_REG_LEN_08BIT, 0x09}, + {0x30da, CRL_REG_LEN_08BIT, 0x64}, + {0x30dd, CRL_REG_LEN_08BIT, 0x00}, + {0x30de, CRL_REG_LEN_08BIT, 0x16}, + {0x30df, CRL_REG_LEN_08BIT, 0x00}, + {0x30e0, CRL_REG_LEN_08BIT, 0x17}, + {0x30e1, CRL_REG_LEN_08BIT, 0x00}, + {0x30e2, CRL_REG_LEN_08BIT, 0x18}, + {0x30e3, CRL_REG_LEN_08BIT, 0x10}, + {0x30e4, CRL_REG_LEN_08BIT, 0x04}, + {0x30e5, CRL_REG_LEN_08BIT, 0x00}, + {0x30e6, CRL_REG_LEN_08BIT, 0x00}, + {0x30e7, CRL_REG_LEN_08BIT, 0x00}, + {0x30e8, CRL_REG_LEN_08BIT, 0x00}, + {0x30e9, CRL_REG_LEN_08BIT, 0x00}, + {0x30ea, CRL_REG_LEN_08BIT, 0x00}, + {0x30eb, CRL_REG_LEN_08BIT, 0x00}, + {0x30ec, CRL_REG_LEN_08BIT, 0x00}, + {0x30ed, CRL_REG_LEN_08BIT, 0x00}, + {0x3101, CRL_REG_LEN_08BIT, 0x00}, + {0x3102, CRL_REG_LEN_08BIT, 0x00}, + {0x3103, CRL_REG_LEN_08BIT, 0x00}, + {0x3104, CRL_REG_LEN_08BIT, 0x00}, + {0x3105, CRL_REG_LEN_08BIT, 0x8c}, + {0x3106, CRL_REG_LEN_08BIT, 0x87}, + {0x3107, CRL_REG_LEN_08BIT, 0xc0}, + {0x3108, CRL_REG_LEN_08BIT, 0x9d}, + {0x3109, CRL_REG_LEN_08BIT, 0x8d}, + {0x310a, CRL_REG_LEN_08BIT, 0x8d}, + {0x310b, CRL_REG_LEN_08BIT, 0x6a}, + {0x310c, CRL_REG_LEN_08BIT, 0x3a}, + {0x310d, CRL_REG_LEN_08BIT, 0x5a}, + {0x310e, CRL_REG_LEN_08BIT, 0x00}, + {0x3120, CRL_REG_LEN_08BIT, 0x00}, + {0x3121, CRL_REG_LEN_08BIT, 0x00}, + {0x3122, CRL_REG_LEN_08BIT, 0x00}, + {0x3123, CRL_REG_LEN_08BIT, 0xf0}, + {0x3124, CRL_REG_LEN_08BIT, 0x00}, + {0x3125, CRL_REG_LEN_08BIT, 0x70}, + {0x3126, CRL_REG_LEN_08BIT, 0x1f}, + {0x3127, CRL_REG_LEN_08BIT, 0x0f}, + {0x3128, CRL_REG_LEN_08BIT, 0x00}, + {0x3129, CRL_REG_LEN_08BIT, 0x3a}, + {0x312a, CRL_REG_LEN_08BIT, 0x02}, + {0x312b, CRL_REG_LEN_08BIT, 0x0f}, + {0x312c, CRL_REG_LEN_08BIT, 0x00}, + {0x312d, CRL_REG_LEN_08BIT, 0x0f}, + {0x312e, CRL_REG_LEN_08BIT, 0x1d}, + {0x312f, CRL_REG_LEN_08BIT, 0x00}, + {0x3130, CRL_REG_LEN_08BIT, 0x00}, + {0x3131, CRL_REG_LEN_08BIT, 0x00}, + {0x3132, CRL_REG_LEN_08BIT, 0x00}, + {0x3140, CRL_REG_LEN_08BIT, 0x0a}, + {0x3141, CRL_REG_LEN_08BIT, 0x03}, + {0x3142, CRL_REG_LEN_08BIT, 0x00}, + {0x3143, CRL_REG_LEN_08BIT, 0x00}, + {0x3144, CRL_REG_LEN_08BIT, 0x00}, + {0x3145, CRL_REG_LEN_08BIT, 0x00}, + {0x3146, CRL_REG_LEN_08BIT, 0x00}, + {0x3147, CRL_REG_LEN_08BIT, 0x00}, + {0x3148, CRL_REG_LEN_08BIT, 0x00}, + {0x3149, CRL_REG_LEN_08BIT, 0x00}, + {0x314a, CRL_REG_LEN_08BIT, 0x00}, + {0x314b, CRL_REG_LEN_08BIT, 0x00}, + {0x314c, CRL_REG_LEN_08BIT, 0x00}, + {0x314d, CRL_REG_LEN_08BIT, 0x00}, + {0x314e, CRL_REG_LEN_08BIT, 0x1c}, + {0x314f, CRL_REG_LEN_08BIT, 0xff}, + {0x3150, CRL_REG_LEN_08BIT, 0xff}, + {0x3151, CRL_REG_LEN_08BIT, 0xff}, + {0x3152, CRL_REG_LEN_08BIT, 0x10}, + {0x3153, CRL_REG_LEN_08BIT, 0x10}, + {0x3154, CRL_REG_LEN_08BIT, 0x10}, + {0x3155, CRL_REG_LEN_08BIT, 0x00}, + {0x3156, CRL_REG_LEN_08BIT, 0x03}, + {0x3157, CRL_REG_LEN_08BIT, 0x00}, + {0x3158, CRL_REG_LEN_08BIT, 0x0f}, + {0x3159, CRL_REG_LEN_08BIT, 0xff}, + {0x315a, CRL_REG_LEN_08BIT, 0x01}, + {0x315b, CRL_REG_LEN_08BIT, 0x00}, + {0x315c, CRL_REG_LEN_08BIT, 0x01}, + {0x315d, CRL_REG_LEN_08BIT, 0x00}, + {0x315e, CRL_REG_LEN_08BIT, 0x01}, + {0x315f, CRL_REG_LEN_08BIT, 0x00}, + {0x3160, CRL_REG_LEN_08BIT, 0x01}, + {0x3161, CRL_REG_LEN_08BIT, 0x00}, + {0x3162, CRL_REG_LEN_08BIT, 0x01}, + {0x3163, CRL_REG_LEN_08BIT, 0x00}, + {0x3164, CRL_REG_LEN_08BIT, 0x01}, + {0x3165, CRL_REG_LEN_08BIT, 0x00}, + {0x3190, CRL_REG_LEN_08BIT, 0x08}, + {0x3191, CRL_REG_LEN_08BIT, 0x99}, + {0x3193, CRL_REG_LEN_08BIT, 0x08}, + {0x3194, CRL_REG_LEN_08BIT, 0x13}, + {0x3195, CRL_REG_LEN_08BIT, 0x33}, + {0x3196, CRL_REG_LEN_08BIT, 0x00}, + {0x3197, CRL_REG_LEN_08BIT, 0x10}, + {0x3198, CRL_REG_LEN_08BIT, 0x00}, + {0x3199, CRL_REG_LEN_08BIT, 0x7f}, + {0x319a, CRL_REG_LEN_08BIT, 0x80}, + {0x319b, CRL_REG_LEN_08BIT, 0xff}, + {0x319c, CRL_REG_LEN_08BIT, 0x80}, + {0x319d, CRL_REG_LEN_08BIT, 0xbf}, + {0x319e, CRL_REG_LEN_08BIT, 0xc0}, + {0x319f, CRL_REG_LEN_08BIT, 0xff}, + {0x31a0, CRL_REG_LEN_08BIT, 0x24}, + {0x31a1, CRL_REG_LEN_08BIT, 0x55}, + {0x31a2, CRL_REG_LEN_08BIT, 0x00}, + {0x31a3, CRL_REG_LEN_08BIT, 0x08}, + {0x31a6, CRL_REG_LEN_08BIT, 0x00}, + {0x31a7, CRL_REG_LEN_08BIT, 0x00}, + {0x31b0, CRL_REG_LEN_08BIT, 0x00}, + {0x31b1, CRL_REG_LEN_08BIT, 0x00}, + {0x31b2, CRL_REG_LEN_08BIT, 0x02}, + {0x31b3, CRL_REG_LEN_08BIT, 0x00}, + {0x31b4, CRL_REG_LEN_08BIT, 0x00}, + {0x31b5, CRL_REG_LEN_08BIT, 0x01}, + {0x31b6, CRL_REG_LEN_08BIT, 0x00}, + {0x31b7, CRL_REG_LEN_08BIT, 0x00}, + {0x31b8, CRL_REG_LEN_08BIT, 0x00}, + {0x31b9, CRL_REG_LEN_08BIT, 0x00}, + {0x31ba, CRL_REG_LEN_08BIT, 0x00}, + {0x31d0, CRL_REG_LEN_08BIT, 0x3c}, + {0x31d1, CRL_REG_LEN_08BIT, 0x34}, + {0x31d2, CRL_REG_LEN_08BIT, 0x3c}, + {0x31d3, CRL_REG_LEN_08BIT, 0x00}, + {0x31d4, CRL_REG_LEN_08BIT, 0x2d}, + {0x31d5, CRL_REG_LEN_08BIT, 0x00}, + {0x31d6, CRL_REG_LEN_08BIT, 0x01}, + {0x31d7, CRL_REG_LEN_08BIT, 0x06}, + {0x31d8, CRL_REG_LEN_08BIT, 0x00}, + {0x31d9, CRL_REG_LEN_08BIT, 0x64}, + {0x31da, CRL_REG_LEN_08BIT, 0x00}, + {0x31db, CRL_REG_LEN_08BIT, 0x30}, + {0x31dc, CRL_REG_LEN_08BIT, 0x04}, + {0x31dd, CRL_REG_LEN_08BIT, 0x69}, + {0x31de, CRL_REG_LEN_08BIT, 0x0a}, + {0x31df, CRL_REG_LEN_08BIT, 0x3c}, + {0x31e0, CRL_REG_LEN_08BIT, 0x04}, + {0x31e1, CRL_REG_LEN_08BIT, 0x32}, + {0x31e2, CRL_REG_LEN_08BIT, 0x00}, + {0x31e3, CRL_REG_LEN_08BIT, 0x00}, + {0x31e4, CRL_REG_LEN_08BIT, 0x08}, + {0x31e5, CRL_REG_LEN_08BIT, 0x80}, + {0x31e6, CRL_REG_LEN_08BIT, 0x00}, + {0x31e7, CRL_REG_LEN_08BIT, 0x2c}, + {0x31e8, CRL_REG_LEN_08BIT, 0x6c}, + {0x31e9, CRL_REG_LEN_08BIT, 0xac}, + {0x31ea, CRL_REG_LEN_08BIT, 0xec}, + {0x31eb, CRL_REG_LEN_08BIT, 0x3f}, + {0x31ec, CRL_REG_LEN_08BIT, 0x0f}, + {0x31ed, CRL_REG_LEN_08BIT, 0x20}, + {0x31ee, CRL_REG_LEN_08BIT, 0x04}, + {0x31ef, CRL_REG_LEN_08BIT, 0x48}, + {0x31f0, CRL_REG_LEN_08BIT, 0x07}, + {0x31f1, CRL_REG_LEN_08BIT, 0x90}, + {0x31f2, CRL_REG_LEN_08BIT, 0x04}, + {0x31f3, CRL_REG_LEN_08BIT, 0x48}, + {0x31f4, CRL_REG_LEN_08BIT, 0x07}, + {0x31f5, CRL_REG_LEN_08BIT, 0x90}, + {0x31f6, CRL_REG_LEN_08BIT, 0x04}, + {0x31f7, CRL_REG_LEN_08BIT, 0x48}, + {0x31f8, CRL_REG_LEN_08BIT, 0x07}, + {0x31f9, CRL_REG_LEN_08BIT, 0x90}, + {0x31fa, CRL_REG_LEN_08BIT, 0x04}, + {0x31fb, CRL_REG_LEN_08BIT, 0x48}, + {0x31fd, CRL_REG_LEN_08BIT, 0xcb}, + {0x31fe, CRL_REG_LEN_08BIT, 0x01}, + {0x31ff, CRL_REG_LEN_08BIT, 0x03}, + {0x3200, CRL_REG_LEN_08BIT, 0x00}, + {0x3201, CRL_REG_LEN_08BIT, 0xff}, + {0x3202, CRL_REG_LEN_08BIT, 0x00}, + {0x3203, CRL_REG_LEN_08BIT, 0xff}, + {0x3204, CRL_REG_LEN_08BIT, 0xff}, + {0x3205, CRL_REG_LEN_08BIT, 0xff}, + {0x3206, CRL_REG_LEN_08BIT, 0xff}, + {0x3207, CRL_REG_LEN_08BIT, 0xff}, + {0x3208, CRL_REG_LEN_08BIT, 0xff}, + {0x3209, CRL_REG_LEN_08BIT, 0xff}, + {0x320a, CRL_REG_LEN_08BIT, 0xff}, + {0x320b, CRL_REG_LEN_08BIT, 0x1b}, + {0x320c, CRL_REG_LEN_08BIT, 0x1f}, + {0x320d, CRL_REG_LEN_08BIT, 0x1e}, + {0x320e, CRL_REG_LEN_08BIT, 0x30}, + {0x320f, CRL_REG_LEN_08BIT, 0x2d}, + {0x3210, CRL_REG_LEN_08BIT, 0x2c}, + {0x3211, CRL_REG_LEN_08BIT, 0x2b}, + {0x3212, CRL_REG_LEN_08BIT, 0x2a}, + {0x3213, CRL_REG_LEN_08BIT, 0x24}, + {0x3214, CRL_REG_LEN_08BIT, 0x22}, + {0x3215, CRL_REG_LEN_08BIT, 0x00}, + {0x3216, CRL_REG_LEN_08BIT, 0x04}, + {0x3217, CRL_REG_LEN_08BIT, 0x2c}, + {0x3218, CRL_REG_LEN_08BIT, 0x6c}, + {0x3219, CRL_REG_LEN_08BIT, 0xac}, + {0x321a, CRL_REG_LEN_08BIT, 0xec}, + {0x321b, CRL_REG_LEN_08BIT, 0x00}, + {0x3230, CRL_REG_LEN_08BIT, 0x3a}, + {0x3231, CRL_REG_LEN_08BIT, 0x00}, + {0x3232, CRL_REG_LEN_08BIT, 0x80}, + {0x3233, CRL_REG_LEN_08BIT, 0x00}, + {0x3234, CRL_REG_LEN_08BIT, 0x10}, + {0x3235, CRL_REG_LEN_08BIT, 0xaa}, + {0x3236, CRL_REG_LEN_08BIT, 0x55}, + {0x3237, CRL_REG_LEN_08BIT, 0x99}, + {0x3238, CRL_REG_LEN_08BIT, 0x66}, + {0x3239, CRL_REG_LEN_08BIT, 0x08}, + {0x323a, CRL_REG_LEN_08BIT, 0x88}, + {0x323b, CRL_REG_LEN_08BIT, 0x00}, + {0x323c, CRL_REG_LEN_08BIT, 0x00}, + {0x323d, CRL_REG_LEN_08BIT, 0x03}, + {0x3250, CRL_REG_LEN_08BIT, 0x33}, + {0x3251, CRL_REG_LEN_08BIT, 0x00}, + {0x3252, CRL_REG_LEN_08BIT, 0x20}, + {0x3253, CRL_REG_LEN_08BIT, 0x00}, + {0x3254, CRL_REG_LEN_08BIT, 0x00}, + {0x3255, CRL_REG_LEN_08BIT, 0x01}, + {0x3256, CRL_REG_LEN_08BIT, 0x00}, + {0x3257, CRL_REG_LEN_08BIT, 0x00}, + {0x3258, CRL_REG_LEN_08BIT, 0x00}, + {0x3270, CRL_REG_LEN_08BIT, 0x01}, + {0x3271, CRL_REG_LEN_08BIT, 0xc0}, + {0x3272, CRL_REG_LEN_08BIT, 0xf0}, + {0x3273, CRL_REG_LEN_08BIT, 0x01}, + {0x3274, CRL_REG_LEN_08BIT, 0x00}, + {0x3275, CRL_REG_LEN_08BIT, 0x40}, + {0x3276, CRL_REG_LEN_08BIT, 0x02}, + {0x3277, CRL_REG_LEN_08BIT, 0x08}, + {0x3278, CRL_REG_LEN_08BIT, 0x10}, + {0x3279, CRL_REG_LEN_08BIT, 0x04}, + {0x327a, CRL_REG_LEN_08BIT, 0x00}, + {0x327b, CRL_REG_LEN_08BIT, 0x03}, + {0x327c, CRL_REG_LEN_08BIT, 0x10}, + {0x327d, CRL_REG_LEN_08BIT, 0x60}, + {0x327e, CRL_REG_LEN_08BIT, 0xc0}, + {0x327f, CRL_REG_LEN_08BIT, 0x06}, + {0x3288, CRL_REG_LEN_08BIT, 0x10}, + {0x3289, CRL_REG_LEN_08BIT, 0x00}, + {0x328a, CRL_REG_LEN_08BIT, 0x08}, + {0x328b, CRL_REG_LEN_08BIT, 0x00}, + {0x328c, CRL_REG_LEN_08BIT, 0x04}, + {0x328d, CRL_REG_LEN_08BIT, 0x00}, + {0x328e, CRL_REG_LEN_08BIT, 0x02}, + {0x328f, CRL_REG_LEN_08BIT, 0x00}, + {0x3290, CRL_REG_LEN_08BIT, 0x20}, + {0x3291, CRL_REG_LEN_08BIT, 0x00}, + {0x3292, CRL_REG_LEN_08BIT, 0x10}, + {0x3293, CRL_REG_LEN_08BIT, 0x00}, + {0x3294, CRL_REG_LEN_08BIT, 0x08}, + {0x3295, CRL_REG_LEN_08BIT, 0x00}, + {0x3296, CRL_REG_LEN_08BIT, 0x04}, + {0x3297, CRL_REG_LEN_08BIT, 0x00}, + {0x3298, CRL_REG_LEN_08BIT, 0x40}, + {0x3299, CRL_REG_LEN_08BIT, 0x00}, + {0x329a, CRL_REG_LEN_08BIT, 0x20}, + {0x329b, CRL_REG_LEN_08BIT, 0x00}, + {0x329c, CRL_REG_LEN_08BIT, 0x10}, + {0x329d, CRL_REG_LEN_08BIT, 0x00}, + {0x329e, CRL_REG_LEN_08BIT, 0x08}, + {0x329f, CRL_REG_LEN_08BIT, 0x00}, + {0x32a0, CRL_REG_LEN_08BIT, 0x7f}, + {0x32a1, CRL_REG_LEN_08BIT, 0xff}, + {0x32a2, CRL_REG_LEN_08BIT, 0x40}, + {0x32a3, CRL_REG_LEN_08BIT, 0x00}, + {0x32a4, CRL_REG_LEN_08BIT, 0x20}, + {0x32a5, CRL_REG_LEN_08BIT, 0x00}, + {0x32a6, CRL_REG_LEN_08BIT, 0x10}, + {0x32a7, CRL_REG_LEN_08BIT, 0x00}, + {0x32a8, CRL_REG_LEN_08BIT, 0x00}, + {0x32a9, CRL_REG_LEN_08BIT, 0x00}, + {0x32aa, CRL_REG_LEN_08BIT, 0x00}, + {0x32ab, CRL_REG_LEN_08BIT, 0x00}, + {0x32ac, CRL_REG_LEN_08BIT, 0x00}, + {0x32ad, CRL_REG_LEN_08BIT, 0x00}, + {0x32ae, CRL_REG_LEN_08BIT, 0x00}, + {0x32af, CRL_REG_LEN_08BIT, 0x00}, + {0x32b0, CRL_REG_LEN_08BIT, 0x00}, + {0x32b1, CRL_REG_LEN_08BIT, 0x00}, + {0x32b2, CRL_REG_LEN_08BIT, 0x00}, + {0x32b3, CRL_REG_LEN_08BIT, 0x00}, + {0x32b4, CRL_REG_LEN_08BIT, 0x00}, + {0x32b5, CRL_REG_LEN_08BIT, 0x00}, + {0x32b6, CRL_REG_LEN_08BIT, 0x00}, + {0x32b7, CRL_REG_LEN_08BIT, 0x00}, + {0x32b8, CRL_REG_LEN_08BIT, 0x00}, + {0x32b9, CRL_REG_LEN_08BIT, 0x00}, + {0x32ba, CRL_REG_LEN_08BIT, 0x00}, + {0x32bb, CRL_REG_LEN_08BIT, 0x00}, + {0x32bc, CRL_REG_LEN_08BIT, 0x00}, + {0x32bd, CRL_REG_LEN_08BIT, 0x00}, + {0x32be, CRL_REG_LEN_08BIT, 0x00}, + {0x32bf, CRL_REG_LEN_08BIT, 0x00}, + {0x32c0, CRL_REG_LEN_08BIT, 0x00}, + {0x32c1, CRL_REG_LEN_08BIT, 0x00}, + {0x32c2, CRL_REG_LEN_08BIT, 0x00}, + {0x32c3, CRL_REG_LEN_08BIT, 0x00}, + {0x32c4, CRL_REG_LEN_08BIT, 0x00}, + {0x32c5, CRL_REG_LEN_08BIT, 0x00}, + {0x32c6, CRL_REG_LEN_08BIT, 0x00}, + {0x32c7, CRL_REG_LEN_08BIT, 0x00}, + {0x32c8, CRL_REG_LEN_08BIT, 0x87}, + {0x32c9, CRL_REG_LEN_08BIT, 0x00}, + {0x3330, CRL_REG_LEN_08BIT, 0x03}, + {0x3331, CRL_REG_LEN_08BIT, 0xc8}, + {0x3332, CRL_REG_LEN_08BIT, 0x02}, + {0x3333, CRL_REG_LEN_08BIT, 0x24}, + {0x3334, CRL_REG_LEN_08BIT, 0x00}, + {0x3335, CRL_REG_LEN_08BIT, 0x00}, + {0x3336, CRL_REG_LEN_08BIT, 0x00}, + {0x3337, CRL_REG_LEN_08BIT, 0x00}, + {0x3338, CRL_REG_LEN_08BIT, 0x03}, + {0x3339, CRL_REG_LEN_08BIT, 0xc8}, + {0x333a, CRL_REG_LEN_08BIT, 0x02}, + {0x333b, CRL_REG_LEN_08BIT, 0x24}, + {0x333c, CRL_REG_LEN_08BIT, 0x00}, + {0x333d, CRL_REG_LEN_08BIT, 0x00}, + {0x333e, CRL_REG_LEN_08BIT, 0x00}, + {0x333f, CRL_REG_LEN_08BIT, 0x00}, + {0x3340, CRL_REG_LEN_08BIT, 0x03}, + {0x3341, CRL_REG_LEN_08BIT, 0xc8}, + {0x3342, CRL_REG_LEN_08BIT, 0x02}, + {0x3343, CRL_REG_LEN_08BIT, 0x24}, + {0x3344, CRL_REG_LEN_08BIT, 0x00}, + {0x3345, CRL_REG_LEN_08BIT, 0x00}, + {0x3346, CRL_REG_LEN_08BIT, 0x00}, + {0x3347, CRL_REG_LEN_08BIT, 0x00}, + {0x3348, CRL_REG_LEN_08BIT, 0x40}, + {0x3349, CRL_REG_LEN_08BIT, 0x00}, + {0x334a, CRL_REG_LEN_08BIT, 0x00}, + {0x334b, CRL_REG_LEN_08BIT, 0x00}, + {0x334c, CRL_REG_LEN_08BIT, 0x00}, + {0x334d, CRL_REG_LEN_08BIT, 0x00}, + {0x334e, CRL_REG_LEN_08BIT, 0x80}, + {0x3360, CRL_REG_LEN_08BIT, 0x01}, + {0x3361, CRL_REG_LEN_08BIT, 0x00}, + {0x3362, CRL_REG_LEN_08BIT, 0x01}, + {0x3363, CRL_REG_LEN_08BIT, 0x00}, + {0x3364, CRL_REG_LEN_08BIT, 0x01}, + {0x3365, CRL_REG_LEN_08BIT, 0x00}, + {0x3366, CRL_REG_LEN_08BIT, 0x01}, + {0x3367, CRL_REG_LEN_08BIT, 0x00}, + {0x3368, CRL_REG_LEN_08BIT, 0x01}, + {0x3369, CRL_REG_LEN_08BIT, 0x00}, + {0x336a, CRL_REG_LEN_08BIT, 0x01}, + {0x336b, CRL_REG_LEN_08BIT, 0x00}, + {0x336c, CRL_REG_LEN_08BIT, 0x01}, + {0x336d, CRL_REG_LEN_08BIT, 0x00}, + {0x336e, CRL_REG_LEN_08BIT, 0x01}, + {0x336f, CRL_REG_LEN_08BIT, 0x00}, + {0x3370, CRL_REG_LEN_08BIT, 0x01}, + {0x3371, CRL_REG_LEN_08BIT, 0x00}, + {0x3372, CRL_REG_LEN_08BIT, 0x01}, + {0x3373, CRL_REG_LEN_08BIT, 0x00}, + {0x3374, CRL_REG_LEN_08BIT, 0x01}, + {0x3375, CRL_REG_LEN_08BIT, 0x00}, + {0x3376, CRL_REG_LEN_08BIT, 0x01}, + {0x3377, CRL_REG_LEN_08BIT, 0x00}, + {0x3378, CRL_REG_LEN_08BIT, 0x00}, + {0x3379, CRL_REG_LEN_08BIT, 0x00}, + {0x337a, CRL_REG_LEN_08BIT, 0x00}, + {0x337b, CRL_REG_LEN_08BIT, 0x00}, + {0x337c, CRL_REG_LEN_08BIT, 0x00}, + {0x337d, CRL_REG_LEN_08BIT, 0x00}, + {0x337e, CRL_REG_LEN_08BIT, 0x00}, + {0x337f, CRL_REG_LEN_08BIT, 0x00}, + {0x3380, CRL_REG_LEN_08BIT, 0x00}, + {0x3381, CRL_REG_LEN_08BIT, 0x00}, + {0x3382, CRL_REG_LEN_08BIT, 0x00}, + {0x3383, CRL_REG_LEN_08BIT, 0x00}, + {0x3384, CRL_REG_LEN_08BIT, 0x00}, + {0x3385, CRL_REG_LEN_08BIT, 0x00}, + {0x3386, CRL_REG_LEN_08BIT, 0x00}, + {0x3387, CRL_REG_LEN_08BIT, 0x00}, + {0x3388, CRL_REG_LEN_08BIT, 0x00}, + {0x3389, CRL_REG_LEN_08BIT, 0x00}, + {0x338a, CRL_REG_LEN_08BIT, 0x00}, + {0x338b, CRL_REG_LEN_08BIT, 0x00}, + {0x338c, CRL_REG_LEN_08BIT, 0x00}, + {0x338d, CRL_REG_LEN_08BIT, 0x00}, + {0x338e, CRL_REG_LEN_08BIT, 0x00}, + {0x338f, CRL_REG_LEN_08BIT, 0x00}, + {0x3390, CRL_REG_LEN_08BIT, 0x00}, + {0x3391, CRL_REG_LEN_08BIT, 0x00}, + {0x3392, CRL_REG_LEN_08BIT, 0x00}, + {0x3393, CRL_REG_LEN_08BIT, 0x00}, + {0x3394, CRL_REG_LEN_08BIT, 0x00}, + {0x3395, CRL_REG_LEN_08BIT, 0x00}, + {0x3396, CRL_REG_LEN_08BIT, 0x00}, + {0x3397, CRL_REG_LEN_08BIT, 0x00}, + {0x3398, CRL_REG_LEN_08BIT, 0x00}, + {0x3399, CRL_REG_LEN_08BIT, 0x00}, + {0x339a, CRL_REG_LEN_08BIT, 0x00}, + {0x339b, CRL_REG_LEN_08BIT, 0x00}, + {0x33b0, CRL_REG_LEN_08BIT, 0x00}, + {0x33b1, CRL_REG_LEN_08BIT, 0x50}, + {0x33b2, CRL_REG_LEN_08BIT, 0x01}, + {0x33b3, CRL_REG_LEN_08BIT, 0xff}, + {0x33b4, CRL_REG_LEN_08BIT, 0xe0}, + {0x33b5, CRL_REG_LEN_08BIT, 0x6b}, + {0x33b6, CRL_REG_LEN_08BIT, 0x00}, + {0x33b7, CRL_REG_LEN_08BIT, 0x00}, + {0x33b8, CRL_REG_LEN_08BIT, 0x00}, + {0x33b9, CRL_REG_LEN_08BIT, 0x00}, + {0x33ba, CRL_REG_LEN_08BIT, 0x00}, + {0x33bb, CRL_REG_LEN_08BIT, 0x1f}, + {0x33bc, CRL_REG_LEN_08BIT, 0x01}, + {0x33bd, CRL_REG_LEN_08BIT, 0x01}, + {0x33be, CRL_REG_LEN_08BIT, 0x01}, + {0x33bf, CRL_REG_LEN_08BIT, 0x01}, + {0x33c0, CRL_REG_LEN_08BIT, 0x00}, + {0x33c1, CRL_REG_LEN_08BIT, 0x00}, + {0x33c2, CRL_REG_LEN_08BIT, 0x00}, + {0x33c3, CRL_REG_LEN_08BIT, 0x00}, + {0x33e0, CRL_REG_LEN_08BIT, 0x14}, + {0x33e1, CRL_REG_LEN_08BIT, 0x0f}, + {0x33e2, CRL_REG_LEN_08BIT, 0x02}, + {0x33e3, CRL_REG_LEN_08BIT, 0x01}, + {0x33e4, CRL_REG_LEN_08BIT, 0x01}, + {0x33e5, CRL_REG_LEN_08BIT, 0x01}, + {0x33e6, CRL_REG_LEN_08BIT, 0x00}, + {0x33e7, CRL_REG_LEN_08BIT, 0x04}, + {0x33e8, CRL_REG_LEN_08BIT, 0x0c}, + {0x33e9, CRL_REG_LEN_08BIT, 0x02}, + {0x33ea, CRL_REG_LEN_08BIT, 0x02}, + {0x33eb, CRL_REG_LEN_08BIT, 0x02}, + {0x33ec, CRL_REG_LEN_08BIT, 0x03}, + {0x33ed, CRL_REG_LEN_08BIT, 0x01}, + {0x33ee, CRL_REG_LEN_08BIT, 0x02}, + {0x33ef, CRL_REG_LEN_08BIT, 0x08}, + {0x33f0, CRL_REG_LEN_08BIT, 0x08}, + {0x33f1, CRL_REG_LEN_08BIT, 0x04}, + {0x33f2, CRL_REG_LEN_08BIT, 0x04}, + {0x33f3, CRL_REG_LEN_08BIT, 0x00}, + {0x33f4, CRL_REG_LEN_08BIT, 0x03}, + {0x33f5, CRL_REG_LEN_08BIT, 0x14}, + {0x33f6, CRL_REG_LEN_08BIT, 0x0f}, + {0x33f7, CRL_REG_LEN_08BIT, 0x02}, + {0x33f8, CRL_REG_LEN_08BIT, 0x01}, + {0x33f9, CRL_REG_LEN_08BIT, 0x01}, + {0x33fa, CRL_REG_LEN_08BIT, 0x01}, + {0x33fb, CRL_REG_LEN_08BIT, 0x00}, + {0x33fc, CRL_REG_LEN_08BIT, 0x04}, + {0x33fd, CRL_REG_LEN_08BIT, 0x0c}, + {0x33fe, CRL_REG_LEN_08BIT, 0x02}, + {0x33ff, CRL_REG_LEN_08BIT, 0x02}, + {0x3400, CRL_REG_LEN_08BIT, 0x02}, + {0x3401, CRL_REG_LEN_08BIT, 0x03}, + {0x3402, CRL_REG_LEN_08BIT, 0x01}, + {0x3403, CRL_REG_LEN_08BIT, 0x02}, + {0x3404, CRL_REG_LEN_08BIT, 0x08}, + {0x3405, CRL_REG_LEN_08BIT, 0x08}, + {0x3406, CRL_REG_LEN_08BIT, 0x04}, + {0x3407, CRL_REG_LEN_08BIT, 0x04}, + {0x3408, CRL_REG_LEN_08BIT, 0x00}, + {0x3409, CRL_REG_LEN_08BIT, 0x03}, + {0x340a, CRL_REG_LEN_08BIT, 0x14}, + {0x340b, CRL_REG_LEN_08BIT, 0x0f}, + {0x340c, CRL_REG_LEN_08BIT, 0x04}, + {0x340d, CRL_REG_LEN_08BIT, 0x02}, + {0x340e, CRL_REG_LEN_08BIT, 0x01}, + {0x340f, CRL_REG_LEN_08BIT, 0x01}, + {0x3410, CRL_REG_LEN_08BIT, 0x00}, + {0x3411, CRL_REG_LEN_08BIT, 0x04}, + {0x3412, CRL_REG_LEN_08BIT, 0x0c}, + {0x3413, CRL_REG_LEN_08BIT, 0x02}, + {0x3414, CRL_REG_LEN_08BIT, 0x02}, + {0x3415, CRL_REG_LEN_08BIT, 0x02}, + {0x3416, CRL_REG_LEN_08BIT, 0x03}, + {0x3417, CRL_REG_LEN_08BIT, 0x02}, + {0x3418, CRL_REG_LEN_08BIT, 0x05}, + {0x3419, CRL_REG_LEN_08BIT, 0x0a}, + {0x341a, CRL_REG_LEN_08BIT, 0x08}, + {0x341b, CRL_REG_LEN_08BIT, 0x04}, + {0x341c, CRL_REG_LEN_08BIT, 0x04}, + {0x341d, CRL_REG_LEN_08BIT, 0x00}, + {0x341e, CRL_REG_LEN_08BIT, 0x03}, + {0x3440, CRL_REG_LEN_08BIT, 0x00}, + {0x3441, CRL_REG_LEN_08BIT, 0x00}, + {0x3442, CRL_REG_LEN_08BIT, 0x00}, + {0x3443, CRL_REG_LEN_08BIT, 0x00}, + {0x3444, CRL_REG_LEN_08BIT, 0x02}, + {0x3445, CRL_REG_LEN_08BIT, 0xf0}, + {0x3446, CRL_REG_LEN_08BIT, 0x02}, + {0x3447, CRL_REG_LEN_08BIT, 0x08}, + {0x3448, CRL_REG_LEN_08BIT, 0x00}, + {0x3460, CRL_REG_LEN_08BIT, 0x40}, + {0x3461, CRL_REG_LEN_08BIT, 0x40}, + {0x3462, CRL_REG_LEN_08BIT, 0x40}, + {0x3463, CRL_REG_LEN_08BIT, 0x40}, + {0x3464, CRL_REG_LEN_08BIT, 0x03}, + {0x3465, CRL_REG_LEN_08BIT, 0x01}, + {0x3466, CRL_REG_LEN_08BIT, 0x01}, + {0x3467, CRL_REG_LEN_08BIT, 0x02}, + {0x3468, CRL_REG_LEN_08BIT, 0x30}, + {0x3469, CRL_REG_LEN_08BIT, 0x00}, + {0x346a, CRL_REG_LEN_08BIT, 0x33}, + {0x346b, CRL_REG_LEN_08BIT, 0xbf}, + {0x3480, CRL_REG_LEN_08BIT, 0x40}, + {0x3481, CRL_REG_LEN_08BIT, 0x00}, + {0x3482, CRL_REG_LEN_08BIT, 0x00}, + {0x3483, CRL_REG_LEN_08BIT, 0x00}, + {0x3484, CRL_REG_LEN_08BIT, 0x0d}, + {0x3485, CRL_REG_LEN_08BIT, 0x00}, + {0x3486, CRL_REG_LEN_08BIT, 0x00}, + {0x3487, CRL_REG_LEN_08BIT, 0x00}, + {0x3488, CRL_REG_LEN_08BIT, 0x00}, + {0x3489, CRL_REG_LEN_08BIT, 0x00}, + {0x348a, CRL_REG_LEN_08BIT, 0x00}, + {0x348b, CRL_REG_LEN_08BIT, 0x04}, + {0x348c, CRL_REG_LEN_08BIT, 0x00}, + {0x348d, CRL_REG_LEN_08BIT, 0x01}, + {0x348f, CRL_REG_LEN_08BIT, 0x01}, + {0x3030, CRL_REG_LEN_08BIT, 0x0a}, + {0x3030, CRL_REG_LEN_08BIT, 0x02}, + {0x7000, CRL_REG_LEN_08BIT, 0x58}, + {0x7001, CRL_REG_LEN_08BIT, 0x7a}, + {0x7002, CRL_REG_LEN_08BIT, 0x1a}, + {0x7003, CRL_REG_LEN_08BIT, 0xc1}, + {0x7004, CRL_REG_LEN_08BIT, 0x03}, + {0x7005, CRL_REG_LEN_08BIT, 0xda}, + {0x7006, CRL_REG_LEN_08BIT, 0xbd}, + {0x7007, CRL_REG_LEN_08BIT, 0x03}, + {0x7008, CRL_REG_LEN_08BIT, 0xbd}, + {0x7009, CRL_REG_LEN_08BIT, 0x06}, + {0x700a, CRL_REG_LEN_08BIT, 0xe6}, + {0x700b, CRL_REG_LEN_08BIT, 0xec}, + {0x700c, CRL_REG_LEN_08BIT, 0xbc}, + {0x700d, CRL_REG_LEN_08BIT, 0xff}, + {0x700e, CRL_REG_LEN_08BIT, 0xbc}, + {0x700f, CRL_REG_LEN_08BIT, 0x73}, + {0x7010, CRL_REG_LEN_08BIT, 0xda}, + {0x7011, CRL_REG_LEN_08BIT, 0x72}, + {0x7012, CRL_REG_LEN_08BIT, 0x76}, + {0x7013, CRL_REG_LEN_08BIT, 0xb6}, + {0x7014, CRL_REG_LEN_08BIT, 0xee}, + {0x7015, CRL_REG_LEN_08BIT, 0xcf}, + {0x7016, CRL_REG_LEN_08BIT, 0xac}, + {0x7017, CRL_REG_LEN_08BIT, 0xd0}, + {0x7018, CRL_REG_LEN_08BIT, 0xac}, + {0x7019, CRL_REG_LEN_08BIT, 0xd1}, + {0x701a, CRL_REG_LEN_08BIT, 0x50}, + {0x701b, CRL_REG_LEN_08BIT, 0xac}, + {0x701c, CRL_REG_LEN_08BIT, 0xd2}, + {0x701d, CRL_REG_LEN_08BIT, 0xbc}, + {0x701e, CRL_REG_LEN_08BIT, 0x2e}, + {0x701f, CRL_REG_LEN_08BIT, 0xb4}, + {0x7020, CRL_REG_LEN_08BIT, 0x00}, + {0x7021, CRL_REG_LEN_08BIT, 0xdc}, + {0x7022, CRL_REG_LEN_08BIT, 0xdf}, + {0x7023, CRL_REG_LEN_08BIT, 0xb0}, + {0x7024, CRL_REG_LEN_08BIT, 0x6e}, + {0x7025, CRL_REG_LEN_08BIT, 0xbd}, + {0x7026, CRL_REG_LEN_08BIT, 0x01}, + {0x7027, CRL_REG_LEN_08BIT, 0xd7}, + {0x7028, CRL_REG_LEN_08BIT, 0xed}, + {0x7029, CRL_REG_LEN_08BIT, 0xe1}, + {0x702a, CRL_REG_LEN_08BIT, 0x36}, + {0x702b, CRL_REG_LEN_08BIT, 0x30}, + {0x702c, CRL_REG_LEN_08BIT, 0xd3}, + {0x702d, CRL_REG_LEN_08BIT, 0x2e}, + {0x702e, CRL_REG_LEN_08BIT, 0x54}, + {0x702f, CRL_REG_LEN_08BIT, 0x46}, + {0x7030, CRL_REG_LEN_08BIT, 0xbc}, + {0x7031, CRL_REG_LEN_08BIT, 0x22}, + {0x7032, CRL_REG_LEN_08BIT, 0x66}, + {0x7033, CRL_REG_LEN_08BIT, 0xbc}, + {0x7034, CRL_REG_LEN_08BIT, 0x24}, + {0x7035, CRL_REG_LEN_08BIT, 0x2c}, + {0x7036, CRL_REG_LEN_08BIT, 0x28}, + {0x7037, CRL_REG_LEN_08BIT, 0xbc}, + {0x7038, CRL_REG_LEN_08BIT, 0x3c}, + {0x7039, CRL_REG_LEN_08BIT, 0xa1}, + {0x703a, CRL_REG_LEN_08BIT, 0xac}, + {0x703b, CRL_REG_LEN_08BIT, 0xd8}, + {0x703c, CRL_REG_LEN_08BIT, 0xd6}, + {0x703d, CRL_REG_LEN_08BIT, 0xb4}, + {0x703e, CRL_REG_LEN_08BIT, 0x04}, + {0x703f, CRL_REG_LEN_08BIT, 0x46}, + {0x7040, CRL_REG_LEN_08BIT, 0xb7}, + {0x7041, CRL_REG_LEN_08BIT, 0x04}, + {0x7042, CRL_REG_LEN_08BIT, 0xbe}, + {0x7043, CRL_REG_LEN_08BIT, 0x08}, + {0x7044, CRL_REG_LEN_08BIT, 0xc3}, + {0x7045, CRL_REG_LEN_08BIT, 0xd9}, + {0x7046, CRL_REG_LEN_08BIT, 0xad}, + {0x7047, CRL_REG_LEN_08BIT, 0xc3}, + {0x7048, CRL_REG_LEN_08BIT, 0xbc}, + {0x7049, CRL_REG_LEN_08BIT, 0x19}, + {0x704a, CRL_REG_LEN_08BIT, 0xc1}, + {0x704b, CRL_REG_LEN_08BIT, 0x27}, + {0x704c, CRL_REG_LEN_08BIT, 0xe7}, + {0x704d, CRL_REG_LEN_08BIT, 0x00}, + {0x704e, CRL_REG_LEN_08BIT, 0x50}, + {0x704f, CRL_REG_LEN_08BIT, 0x20}, + {0x7050, CRL_REG_LEN_08BIT, 0xb8}, + {0x7051, CRL_REG_LEN_08BIT, 0x02}, + {0x7052, CRL_REG_LEN_08BIT, 0xbc}, + {0x7053, CRL_REG_LEN_08BIT, 0x17}, + {0x7054, CRL_REG_LEN_08BIT, 0xdb}, + {0x7055, CRL_REG_LEN_08BIT, 0xc7}, + {0x7056, CRL_REG_LEN_08BIT, 0xb8}, + {0x7057, CRL_REG_LEN_08BIT, 0x00}, + {0x7058, CRL_REG_LEN_08BIT, 0x28}, + {0x7059, CRL_REG_LEN_08BIT, 0x54}, + {0x705a, CRL_REG_LEN_08BIT, 0xb4}, + {0x705b, CRL_REG_LEN_08BIT, 0x14}, + {0x705c, CRL_REG_LEN_08BIT, 0xab}, + {0x705d, CRL_REG_LEN_08BIT, 0xbe}, + {0x705e, CRL_REG_LEN_08BIT, 0x06}, + {0x705f, CRL_REG_LEN_08BIT, 0xd8}, + {0x7060, CRL_REG_LEN_08BIT, 0xd6}, + {0x7061, CRL_REG_LEN_08BIT, 0x00}, + {0x7062, CRL_REG_LEN_08BIT, 0xb4}, + {0x7063, CRL_REG_LEN_08BIT, 0xc7}, + {0x7064, CRL_REG_LEN_08BIT, 0x07}, + {0x7065, CRL_REG_LEN_08BIT, 0xb9}, + {0x7066, CRL_REG_LEN_08BIT, 0x05}, + {0x7067, CRL_REG_LEN_08BIT, 0xee}, + {0x7068, CRL_REG_LEN_08BIT, 0xe6}, + {0x7069, CRL_REG_LEN_08BIT, 0xad}, + {0x706a, CRL_REG_LEN_08BIT, 0xb4}, + {0x706b, CRL_REG_LEN_08BIT, 0x26}, + {0x706c, CRL_REG_LEN_08BIT, 0x19}, + {0x706d, CRL_REG_LEN_08BIT, 0xc1}, + {0x706e, CRL_REG_LEN_08BIT, 0x3a}, + {0x706f, CRL_REG_LEN_08BIT, 0xc3}, + {0x7070, CRL_REG_LEN_08BIT, 0xaf}, + {0x7071, CRL_REG_LEN_08BIT, 0x00}, + {0x7072, CRL_REG_LEN_08BIT, 0xc0}, + {0x7073, CRL_REG_LEN_08BIT, 0x3c}, + {0x7074, CRL_REG_LEN_08BIT, 0xc3}, + {0x7075, CRL_REG_LEN_08BIT, 0xbe}, + {0x7076, CRL_REG_LEN_08BIT, 0xe7}, + {0x7077, CRL_REG_LEN_08BIT, 0x00}, + {0x7078, CRL_REG_LEN_08BIT, 0x15}, + {0x7079, CRL_REG_LEN_08BIT, 0xc2}, + {0x707a, CRL_REG_LEN_08BIT, 0x40}, + {0x707b, CRL_REG_LEN_08BIT, 0xc3}, + {0x707c, CRL_REG_LEN_08BIT, 0xa4}, + {0x707d, CRL_REG_LEN_08BIT, 0xc0}, + {0x707e, CRL_REG_LEN_08BIT, 0x3c}, + {0x707f, CRL_REG_LEN_08BIT, 0x00}, + {0x7080, CRL_REG_LEN_08BIT, 0xb9}, + {0x7081, CRL_REG_LEN_08BIT, 0x64}, + {0x7082, CRL_REG_LEN_08BIT, 0x29}, + {0x7083, CRL_REG_LEN_08BIT, 0x00}, + {0x7084, CRL_REG_LEN_08BIT, 0xb8}, + {0x7085, CRL_REG_LEN_08BIT, 0x12}, + {0x7086, CRL_REG_LEN_08BIT, 0xbe}, + {0x7087, CRL_REG_LEN_08BIT, 0x01}, + {0x7088, CRL_REG_LEN_08BIT, 0xd0}, + {0x7089, CRL_REG_LEN_08BIT, 0xbc}, + {0x708a, CRL_REG_LEN_08BIT, 0x01}, + {0x708b, CRL_REG_LEN_08BIT, 0xac}, + {0x708c, CRL_REG_LEN_08BIT, 0x37}, + {0x708d, CRL_REG_LEN_08BIT, 0xd2}, + {0x708e, CRL_REG_LEN_08BIT, 0xac}, + {0x708f, CRL_REG_LEN_08BIT, 0x45}, + {0x7090, CRL_REG_LEN_08BIT, 0xad}, + {0x7091, CRL_REG_LEN_08BIT, 0x28}, + {0x7092, CRL_REG_LEN_08BIT, 0x00}, + {0x7093, CRL_REG_LEN_08BIT, 0xb8}, + {0x7094, CRL_REG_LEN_08BIT, 0x00}, + {0x7095, CRL_REG_LEN_08BIT, 0xbc}, + {0x7096, CRL_REG_LEN_08BIT, 0x01}, + {0x7097, CRL_REG_LEN_08BIT, 0x36}, + {0x7098, CRL_REG_LEN_08BIT, 0xd3}, + {0x7099, CRL_REG_LEN_08BIT, 0x30}, + {0x709a, CRL_REG_LEN_08BIT, 0x04}, + {0x709b, CRL_REG_LEN_08BIT, 0xe0}, + {0x709c, CRL_REG_LEN_08BIT, 0xd8}, + {0x709d, CRL_REG_LEN_08BIT, 0xb4}, + {0x709e, CRL_REG_LEN_08BIT, 0xe9}, + {0x709f, CRL_REG_LEN_08BIT, 0x00}, + {0x70a0, CRL_REG_LEN_08BIT, 0xbe}, + {0x70a1, CRL_REG_LEN_08BIT, 0x05}, + {0x70a2, CRL_REG_LEN_08BIT, 0x62}, + {0x70a3, CRL_REG_LEN_08BIT, 0x07}, + {0x70a4, CRL_REG_LEN_08BIT, 0xb9}, + {0x70a5, CRL_REG_LEN_08BIT, 0x05}, + {0x70a6, CRL_REG_LEN_08BIT, 0xad}, + {0x70a7, CRL_REG_LEN_08BIT, 0xc3}, + {0x70a8, CRL_REG_LEN_08BIT, 0xcf}, + {0x70a9, CRL_REG_LEN_08BIT, 0x00}, + {0x70aa, CRL_REG_LEN_08BIT, 0x15}, + {0x70ab, CRL_REG_LEN_08BIT, 0xc2}, + {0x70ac, CRL_REG_LEN_08BIT, 0x59}, + {0x70ad, CRL_REG_LEN_08BIT, 0xc3}, + {0x70ae, CRL_REG_LEN_08BIT, 0xc9}, + {0x70af, CRL_REG_LEN_08BIT, 0xc0}, + {0x70b0, CRL_REG_LEN_08BIT, 0x55}, + {0x70b1, CRL_REG_LEN_08BIT, 0x00}, + {0x70b2, CRL_REG_LEN_08BIT, 0x46}, + {0x70b3, CRL_REG_LEN_08BIT, 0xa1}, + {0x70b4, CRL_REG_LEN_08BIT, 0xb9}, + {0x70b5, CRL_REG_LEN_08BIT, 0x64}, + {0x70b6, CRL_REG_LEN_08BIT, 0x29}, + {0x70b7, CRL_REG_LEN_08BIT, 0x00}, + {0x70b8, CRL_REG_LEN_08BIT, 0xb8}, + {0x70b9, CRL_REG_LEN_08BIT, 0x02}, + {0x70ba, CRL_REG_LEN_08BIT, 0xbe}, + {0x70bb, CRL_REG_LEN_08BIT, 0x02}, + {0x70bc, CRL_REG_LEN_08BIT, 0xd0}, + {0x70bd, CRL_REG_LEN_08BIT, 0xdc}, + {0x70be, CRL_REG_LEN_08BIT, 0xac}, + {0x70bf, CRL_REG_LEN_08BIT, 0xbc}, + {0x70c0, CRL_REG_LEN_08BIT, 0x01}, + {0x70c1, CRL_REG_LEN_08BIT, 0x37}, + {0x70c2, CRL_REG_LEN_08BIT, 0xac}, + {0x70c3, CRL_REG_LEN_08BIT, 0xd2}, + {0x70c4, CRL_REG_LEN_08BIT, 0x45}, + {0x70c5, CRL_REG_LEN_08BIT, 0xad}, + {0x70c6, CRL_REG_LEN_08BIT, 0x28}, + {0x70c7, CRL_REG_LEN_08BIT, 0x00}, + {0x70c8, CRL_REG_LEN_08BIT, 0xb8}, + {0x70c9, CRL_REG_LEN_08BIT, 0x00}, + {0x70ca, CRL_REG_LEN_08BIT, 0xbc}, + {0x70cb, CRL_REG_LEN_08BIT, 0x01}, + {0x70cc, CRL_REG_LEN_08BIT, 0x36}, + {0x70cd, CRL_REG_LEN_08BIT, 0x30}, + {0x70ce, CRL_REG_LEN_08BIT, 0xe0}, + {0x70cf, CRL_REG_LEN_08BIT, 0xd8}, + {0x70d0, CRL_REG_LEN_08BIT, 0xb5}, + {0x70d1, CRL_REG_LEN_08BIT, 0x0b}, + {0x70d2, CRL_REG_LEN_08BIT, 0xd6}, + {0x70d3, CRL_REG_LEN_08BIT, 0xbe}, + {0x70d4, CRL_REG_LEN_08BIT, 0x07}, + {0x70d5, CRL_REG_LEN_08BIT, 0x00}, + {0x70d6, CRL_REG_LEN_08BIT, 0x62}, + {0x70d7, CRL_REG_LEN_08BIT, 0x07}, + {0x70d8, CRL_REG_LEN_08BIT, 0xb9}, + {0x70d9, CRL_REG_LEN_08BIT, 0x05}, + {0x70da, CRL_REG_LEN_08BIT, 0xad}, + {0x70db, CRL_REG_LEN_08BIT, 0xc3}, + {0x70dc, CRL_REG_LEN_08BIT, 0xcf}, + {0x70dd, CRL_REG_LEN_08BIT, 0x46}, + {0x70de, CRL_REG_LEN_08BIT, 0xcd}, + {0x70df, CRL_REG_LEN_08BIT, 0x07}, + {0x70e0, CRL_REG_LEN_08BIT, 0xcd}, + {0x70e1, CRL_REG_LEN_08BIT, 0x00}, + {0x70e2, CRL_REG_LEN_08BIT, 0xe3}, + {0x70e3, CRL_REG_LEN_08BIT, 0x18}, + {0x70e4, CRL_REG_LEN_08BIT, 0xc2}, + {0x70e5, CRL_REG_LEN_08BIT, 0xa2}, + {0x70e6, CRL_REG_LEN_08BIT, 0xb9}, + {0x70e7, CRL_REG_LEN_08BIT, 0x64}, + {0x70e8, CRL_REG_LEN_08BIT, 0xd1}, + {0x70e9, CRL_REG_LEN_08BIT, 0xdd}, + {0x70ea, CRL_REG_LEN_08BIT, 0xac}, + {0x70eb, CRL_REG_LEN_08BIT, 0xcf}, + {0x70ec, CRL_REG_LEN_08BIT, 0xdf}, + {0x70ed, CRL_REG_LEN_08BIT, 0xb5}, + {0x70ee, CRL_REG_LEN_08BIT, 0x19}, + {0x70ef, CRL_REG_LEN_08BIT, 0x46}, + {0x70f0, CRL_REG_LEN_08BIT, 0x50}, + {0x70f1, CRL_REG_LEN_08BIT, 0xb6}, + {0x70f2, CRL_REG_LEN_08BIT, 0xee}, + {0x70f3, CRL_REG_LEN_08BIT, 0xe8}, + {0x70f4, CRL_REG_LEN_08BIT, 0xe6}, + {0x70f5, CRL_REG_LEN_08BIT, 0xbc}, + {0x70f6, CRL_REG_LEN_08BIT, 0x31}, + {0x70f7, CRL_REG_LEN_08BIT, 0xe1}, + {0x70f8, CRL_REG_LEN_08BIT, 0x36}, + {0x70f9, CRL_REG_LEN_08BIT, 0x30}, + {0x70fa, CRL_REG_LEN_08BIT, 0xd3}, + {0x70fb, CRL_REG_LEN_08BIT, 0x2e}, + {0x70fc, CRL_REG_LEN_08BIT, 0x54}, + {0x70fd, CRL_REG_LEN_08BIT, 0xbd}, + {0x70fe, CRL_REG_LEN_08BIT, 0x03}, + {0x70ff, CRL_REG_LEN_08BIT, 0xec}, + {0x7100, CRL_REG_LEN_08BIT, 0x2c}, + {0x7101, CRL_REG_LEN_08BIT, 0x50}, + {0x7102, CRL_REG_LEN_08BIT, 0x20}, + {0x7103, CRL_REG_LEN_08BIT, 0x04}, + {0x7104, CRL_REG_LEN_08BIT, 0xb8}, + {0x7105, CRL_REG_LEN_08BIT, 0x02}, + {0x7106, CRL_REG_LEN_08BIT, 0xbc}, + {0x7107, CRL_REG_LEN_08BIT, 0x18}, + {0x7108, CRL_REG_LEN_08BIT, 0xc7}, + {0x7109, CRL_REG_LEN_08BIT, 0xb8}, + {0x710a, CRL_REG_LEN_08BIT, 0x00}, + {0x710b, CRL_REG_LEN_08BIT, 0x28}, + {0x710c, CRL_REG_LEN_08BIT, 0x54}, + {0x710d, CRL_REG_LEN_08BIT, 0xbc}, + {0x710e, CRL_REG_LEN_08BIT, 0x02}, + {0x710f, CRL_REG_LEN_08BIT, 0xb4}, + {0x7110, CRL_REG_LEN_08BIT, 0xda}, + {0x7111, CRL_REG_LEN_08BIT, 0xbe}, + {0x7112, CRL_REG_LEN_08BIT, 0x04}, + {0x7113, CRL_REG_LEN_08BIT, 0xd6}, + {0x7114, CRL_REG_LEN_08BIT, 0xd8}, + {0x7115, CRL_REG_LEN_08BIT, 0xab}, + {0x7116, CRL_REG_LEN_08BIT, 0x00}, + {0x7117, CRL_REG_LEN_08BIT, 0x62}, + {0x7118, CRL_REG_LEN_08BIT, 0x07}, + {0x7119, CRL_REG_LEN_08BIT, 0xb9}, + {0x711a, CRL_REG_LEN_08BIT, 0x05}, + {0x711b, CRL_REG_LEN_08BIT, 0xad}, + {0x711c, CRL_REG_LEN_08BIT, 0xc3}, + {0x711d, CRL_REG_LEN_08BIT, 0xbc}, + {0x711e, CRL_REG_LEN_08BIT, 0xe7}, + {0x711f, CRL_REG_LEN_08BIT, 0xb9}, + {0x7120, CRL_REG_LEN_08BIT, 0x64}, + {0x7121, CRL_REG_LEN_08BIT, 0x29}, + {0x7122, CRL_REG_LEN_08BIT, 0x00}, + {0x7123, CRL_REG_LEN_08BIT, 0xb8}, + {0x7124, CRL_REG_LEN_08BIT, 0x02}, + {0x7125, CRL_REG_LEN_08BIT, 0xbe}, + {0x7126, CRL_REG_LEN_08BIT, 0x00}, + {0x7127, CRL_REG_LEN_08BIT, 0x45}, + {0x7128, CRL_REG_LEN_08BIT, 0xad}, + {0x7129, CRL_REG_LEN_08BIT, 0xe2}, + {0x712a, CRL_REG_LEN_08BIT, 0x28}, + {0x712b, CRL_REG_LEN_08BIT, 0x00}, + {0x712c, CRL_REG_LEN_08BIT, 0xb8}, + {0x712d, CRL_REG_LEN_08BIT, 0x00}, + {0x712e, CRL_REG_LEN_08BIT, 0xe0}, + {0x712f, CRL_REG_LEN_08BIT, 0xd8}, + {0x7130, CRL_REG_LEN_08BIT, 0xb4}, + {0x7131, CRL_REG_LEN_08BIT, 0xe9}, + {0x7132, CRL_REG_LEN_08BIT, 0xbe}, + {0x7133, CRL_REG_LEN_08BIT, 0x03}, + {0x7134, CRL_REG_LEN_08BIT, 0x00}, + {0x7135, CRL_REG_LEN_08BIT, 0x30}, + {0x7136, CRL_REG_LEN_08BIT, 0x62}, + {0x7137, CRL_REG_LEN_08BIT, 0x07}, + {0x7138, CRL_REG_LEN_08BIT, 0xb9}, + {0x7139, CRL_REG_LEN_08BIT, 0x05}, + {0x713a, CRL_REG_LEN_08BIT, 0xad}, + {0x713b, CRL_REG_LEN_08BIT, 0xc3}, + {0x713c, CRL_REG_LEN_08BIT, 0xcf}, + {0x713d, CRL_REG_LEN_08BIT, 0x42}, + {0x713e, CRL_REG_LEN_08BIT, 0xe4}, + {0x713f, CRL_REG_LEN_08BIT, 0xcd}, + {0x7140, CRL_REG_LEN_08BIT, 0x07}, + {0x7141, CRL_REG_LEN_08BIT, 0xcd}, + {0x7142, CRL_REG_LEN_08BIT, 0x00}, + {0x7143, CRL_REG_LEN_08BIT, 0x00}, + {0x7144, CRL_REG_LEN_08BIT, 0x17}, + {0x7145, CRL_REG_LEN_08BIT, 0xc2}, + {0x7146, CRL_REG_LEN_08BIT, 0xbb}, + {0x7147, CRL_REG_LEN_08BIT, 0xde}, + {0x7148, CRL_REG_LEN_08BIT, 0xcf}, + {0x7149, CRL_REG_LEN_08BIT, 0xdf}, + {0x714a, CRL_REG_LEN_08BIT, 0xac}, + {0x714b, CRL_REG_LEN_08BIT, 0xd1}, + {0x714c, CRL_REG_LEN_08BIT, 0x44}, + {0x714d, CRL_REG_LEN_08BIT, 0xac}, + {0x714e, CRL_REG_LEN_08BIT, 0xb9}, + {0x714f, CRL_REG_LEN_08BIT, 0x76}, + {0x7150, CRL_REG_LEN_08BIT, 0xb8}, + {0x7151, CRL_REG_LEN_08BIT, 0x08}, + {0x7152, CRL_REG_LEN_08BIT, 0xb6}, + {0x7153, CRL_REG_LEN_08BIT, 0xfe}, + {0x7154, CRL_REG_LEN_08BIT, 0xb4}, + {0x7155, CRL_REG_LEN_08BIT, 0xca}, + {0x7156, CRL_REG_LEN_08BIT, 0xd6}, + {0x7157, CRL_REG_LEN_08BIT, 0xd8}, + {0x7158, CRL_REG_LEN_08BIT, 0xab}, + {0x7159, CRL_REG_LEN_08BIT, 0x00}, + {0x715a, CRL_REG_LEN_08BIT, 0xe1}, + {0x715b, CRL_REG_LEN_08BIT, 0x36}, + {0x715c, CRL_REG_LEN_08BIT, 0x30}, + {0x715d, CRL_REG_LEN_08BIT, 0xd3}, + {0x715e, CRL_REG_LEN_08BIT, 0xbc}, + {0x715f, CRL_REG_LEN_08BIT, 0x29}, + {0x7160, CRL_REG_LEN_08BIT, 0xb4}, + {0x7161, CRL_REG_LEN_08BIT, 0x1f}, + {0x7162, CRL_REG_LEN_08BIT, 0xaa}, + {0x7163, CRL_REG_LEN_08BIT, 0xbd}, + {0x7164, CRL_REG_LEN_08BIT, 0x01}, + {0x7165, CRL_REG_LEN_08BIT, 0xb8}, + {0x7166, CRL_REG_LEN_08BIT, 0x0c}, + {0x7167, CRL_REG_LEN_08BIT, 0x45}, + {0x7168, CRL_REG_LEN_08BIT, 0xa4}, + {0x7169, CRL_REG_LEN_08BIT, 0xbd}, + {0x716a, CRL_REG_LEN_08BIT, 0x03}, + {0x716b, CRL_REG_LEN_08BIT, 0xec}, + {0x716c, CRL_REG_LEN_08BIT, 0xbc}, + {0x716d, CRL_REG_LEN_08BIT, 0x3d}, + {0x716e, CRL_REG_LEN_08BIT, 0xc3}, + {0x716f, CRL_REG_LEN_08BIT, 0xcf}, + {0x7170, CRL_REG_LEN_08BIT, 0x42}, + {0x7171, CRL_REG_LEN_08BIT, 0xb8}, + {0x7172, CRL_REG_LEN_08BIT, 0x00}, + {0x7173, CRL_REG_LEN_08BIT, 0xe4}, + {0x7174, CRL_REG_LEN_08BIT, 0xd5}, + {0x7175, CRL_REG_LEN_08BIT, 0x00}, + {0x7176, CRL_REG_LEN_08BIT, 0xb6}, + {0x7177, CRL_REG_LEN_08BIT, 0x00}, + {0x7178, CRL_REG_LEN_08BIT, 0x74}, + {0x7179, CRL_REG_LEN_08BIT, 0xbd}, + {0x717a, CRL_REG_LEN_08BIT, 0x03}, + {0x717b, CRL_REG_LEN_08BIT, 0xb5}, + {0x717c, CRL_REG_LEN_08BIT, 0x39}, + {0x717d, CRL_REG_LEN_08BIT, 0x40}, + {0x717e, CRL_REG_LEN_08BIT, 0x58}, + {0x717f, CRL_REG_LEN_08BIT, 0xdd}, + {0x7180, CRL_REG_LEN_08BIT, 0x19}, + {0x7181, CRL_REG_LEN_08BIT, 0xc1}, + {0x7182, CRL_REG_LEN_08BIT, 0xc8}, + {0x7183, CRL_REG_LEN_08BIT, 0xbd}, + {0x7184, CRL_REG_LEN_08BIT, 0x06}, + {0x7185, CRL_REG_LEN_08BIT, 0x17}, + {0x7186, CRL_REG_LEN_08BIT, 0xc1}, + {0x7187, CRL_REG_LEN_08BIT, 0xc6}, + {0x7188, CRL_REG_LEN_08BIT, 0xe8}, + {0x7189, CRL_REG_LEN_08BIT, 0x00}, + {0x718a, CRL_REG_LEN_08BIT, 0xc0}, + {0x718b, CRL_REG_LEN_08BIT, 0xc8}, + {0x718c, CRL_REG_LEN_08BIT, 0xe6}, + {0x718d, CRL_REG_LEN_08BIT, 0x95}, + {0x718e, CRL_REG_LEN_08BIT, 0x15}, + {0x718f, CRL_REG_LEN_08BIT, 0x00}, + {0x7190, CRL_REG_LEN_08BIT, 0xbc}, + {0x7191, CRL_REG_LEN_08BIT, 0x19}, + {0x7192, CRL_REG_LEN_08BIT, 0xb9}, + {0x7193, CRL_REG_LEN_08BIT, 0xf6}, + {0x7194, CRL_REG_LEN_08BIT, 0x14}, + {0x7195, CRL_REG_LEN_08BIT, 0xc1}, + {0x7196, CRL_REG_LEN_08BIT, 0xd0}, + {0x7197, CRL_REG_LEN_08BIT, 0xd1}, + {0x7198, CRL_REG_LEN_08BIT, 0xac}, + {0x7199, CRL_REG_LEN_08BIT, 0x37}, + {0x719a, CRL_REG_LEN_08BIT, 0xbc}, + {0x719b, CRL_REG_LEN_08BIT, 0x35}, + {0x719c, CRL_REG_LEN_08BIT, 0x36}, + {0x719d, CRL_REG_LEN_08BIT, 0x30}, + {0x719e, CRL_REG_LEN_08BIT, 0xe1}, + {0x719f, CRL_REG_LEN_08BIT, 0xd3}, + {0x71a0, CRL_REG_LEN_08BIT, 0x7a}, + {0x71a1, CRL_REG_LEN_08BIT, 0xb6}, + {0x71a2, CRL_REG_LEN_08BIT, 0x0c}, + {0x71a3, CRL_REG_LEN_08BIT, 0xff}, + {0x71a4, CRL_REG_LEN_08BIT, 0xb4}, + {0x71a5, CRL_REG_LEN_08BIT, 0xc7}, + {0x71a6, CRL_REG_LEN_08BIT, 0xd9}, + {0x71a7, CRL_REG_LEN_08BIT, 0x00}, + {0x71a8, CRL_REG_LEN_08BIT, 0xbd}, + {0x71a9, CRL_REG_LEN_08BIT, 0x01}, + {0x71aa, CRL_REG_LEN_08BIT, 0x56}, + {0x71ab, CRL_REG_LEN_08BIT, 0xc0}, + {0x71ac, CRL_REG_LEN_08BIT, 0xda}, + {0x71ad, CRL_REG_LEN_08BIT, 0xb4}, + {0x71ae, CRL_REG_LEN_08BIT, 0x1f}, + {0x71af, CRL_REG_LEN_08BIT, 0x56}, + {0x71b0, CRL_REG_LEN_08BIT, 0xaa}, + {0x71b1, CRL_REG_LEN_08BIT, 0xbc}, + {0x71b2, CRL_REG_LEN_08BIT, 0x08}, + {0x71b3, CRL_REG_LEN_08BIT, 0x00}, + {0x71b4, CRL_REG_LEN_08BIT, 0x57}, + {0x71b5, CRL_REG_LEN_08BIT, 0xe8}, + {0x71b6, CRL_REG_LEN_08BIT, 0xb5}, + {0x71b7, CRL_REG_LEN_08BIT, 0x36}, + {0x71b8, CRL_REG_LEN_08BIT, 0x00}, + {0x71b9, CRL_REG_LEN_08BIT, 0x54}, + {0x71ba, CRL_REG_LEN_08BIT, 0xe7}, + {0x71bb, CRL_REG_LEN_08BIT, 0xc8}, + {0x71bc, CRL_REG_LEN_08BIT, 0xb4}, + {0x71bd, CRL_REG_LEN_08BIT, 0x1f}, + {0x71be, CRL_REG_LEN_08BIT, 0x56}, + {0x71bf, CRL_REG_LEN_08BIT, 0xaa}, + {0x71c0, CRL_REG_LEN_08BIT, 0xbc}, + {0x71c1, CRL_REG_LEN_08BIT, 0x08}, + {0x71c2, CRL_REG_LEN_08BIT, 0x57}, + {0x71c3, CRL_REG_LEN_08BIT, 0x00}, + {0x71c4, CRL_REG_LEN_08BIT, 0xb5}, + {0x71c5, CRL_REG_LEN_08BIT, 0x36}, + {0x71c6, CRL_REG_LEN_08BIT, 0x00}, + {0x71c7, CRL_REG_LEN_08BIT, 0x54}, + {0x71c8, CRL_REG_LEN_08BIT, 0xc8}, + {0x71c9, CRL_REG_LEN_08BIT, 0xb5}, + {0x71ca, CRL_REG_LEN_08BIT, 0x18}, + {0x71cb, CRL_REG_LEN_08BIT, 0xd9}, + {0x71cc, CRL_REG_LEN_08BIT, 0x00}, + {0x71cd, CRL_REG_LEN_08BIT, 0xbd}, + {0x71ce, CRL_REG_LEN_08BIT, 0x01}, + {0x71cf, CRL_REG_LEN_08BIT, 0x56}, + {0x71d0, CRL_REG_LEN_08BIT, 0x08}, + {0x71d1, CRL_REG_LEN_08BIT, 0x57}, + {0x71d2, CRL_REG_LEN_08BIT, 0xe8}, + {0x71d3, CRL_REG_LEN_08BIT, 0xb4}, + {0x71d4, CRL_REG_LEN_08BIT, 0x42}, + {0x71d5, CRL_REG_LEN_08BIT, 0x00}, + {0x71d6, CRL_REG_LEN_08BIT, 0x54}, + {0x71d7, CRL_REG_LEN_08BIT, 0xe7}, + {0x71d8, CRL_REG_LEN_08BIT, 0xc8}, + {0x71d9, CRL_REG_LEN_08BIT, 0xab}, + {0x71da, CRL_REG_LEN_08BIT, 0x00}, + {0x71db, CRL_REG_LEN_08BIT, 0x66}, + {0x71dc, CRL_REG_LEN_08BIT, 0x62}, + {0x71dd, CRL_REG_LEN_08BIT, 0x06}, + {0x71de, CRL_REG_LEN_08BIT, 0x74}, + {0x71df, CRL_REG_LEN_08BIT, 0xb9}, + {0x71e0, CRL_REG_LEN_08BIT, 0x05}, + {0x71e1, CRL_REG_LEN_08BIT, 0xb7}, + {0x71e2, CRL_REG_LEN_08BIT, 0x14}, + {0x71e3, CRL_REG_LEN_08BIT, 0x0e}, + {0x71e4, CRL_REG_LEN_08BIT, 0xb7}, + {0x71e5, CRL_REG_LEN_08BIT, 0x04}, + {0x71e6, CRL_REG_LEN_08BIT, 0xc8}, + {0x7600, CRL_REG_LEN_08BIT, 0x04}, + {0x7601, CRL_REG_LEN_08BIT, 0x80}, + {0x7602, CRL_REG_LEN_08BIT, 0x07}, + {0x7603, CRL_REG_LEN_08BIT, 0x44}, + {0x7604, CRL_REG_LEN_08BIT, 0x05}, + {0x7605, CRL_REG_LEN_08BIT, 0x33}, + {0x7606, CRL_REG_LEN_08BIT, 0x0f}, + {0x7607, CRL_REG_LEN_08BIT, 0x00}, + {0x7608, CRL_REG_LEN_08BIT, 0x07}, + {0x7609, CRL_REG_LEN_08BIT, 0x40}, + {0x760a, CRL_REG_LEN_08BIT, 0x04}, + {0x760b, CRL_REG_LEN_08BIT, 0xe5}, + {0x760c, CRL_REG_LEN_08BIT, 0x06}, + {0x760d, CRL_REG_LEN_08BIT, 0x50}, + {0x760e, CRL_REG_LEN_08BIT, 0x04}, + {0x760f, CRL_REG_LEN_08BIT, 0xe4}, + {0x7610, CRL_REG_LEN_08BIT, 0x00}, + {0x7611, CRL_REG_LEN_08BIT, 0x00}, + {0x7612, CRL_REG_LEN_08BIT, 0x06}, + {0x7613, CRL_REG_LEN_08BIT, 0x5c}, + {0x7614, CRL_REG_LEN_08BIT, 0x00}, + {0x7615, CRL_REG_LEN_08BIT, 0x0f}, + {0x7616, CRL_REG_LEN_08BIT, 0x06}, + {0x7617, CRL_REG_LEN_08BIT, 0x1c}, + {0x7618, CRL_REG_LEN_08BIT, 0x00}, + {0x7619, CRL_REG_LEN_08BIT, 0x02}, + {0x761a, CRL_REG_LEN_08BIT, 0x06}, + {0x761b, CRL_REG_LEN_08BIT, 0xa2}, + {0x761c, CRL_REG_LEN_08BIT, 0x00}, + {0x761d, CRL_REG_LEN_08BIT, 0x01}, + {0x761e, CRL_REG_LEN_08BIT, 0x06}, + {0x761f, CRL_REG_LEN_08BIT, 0xae}, + {0x7620, CRL_REG_LEN_08BIT, 0x00}, + {0x7621, CRL_REG_LEN_08BIT, 0x0e}, + {0x7622, CRL_REG_LEN_08BIT, 0x05}, + {0x7623, CRL_REG_LEN_08BIT, 0x30}, + {0x7624, CRL_REG_LEN_08BIT, 0x07}, + {0x7625, CRL_REG_LEN_08BIT, 0x00}, + {0x7626, CRL_REG_LEN_08BIT, 0x0f}, + {0x7627, CRL_REG_LEN_08BIT, 0x00}, + {0x7628, CRL_REG_LEN_08BIT, 0x04}, + {0x7629, CRL_REG_LEN_08BIT, 0xe5}, + {0x762a, CRL_REG_LEN_08BIT, 0x05}, + {0x762b, CRL_REG_LEN_08BIT, 0x33}, + {0x762c, CRL_REG_LEN_08BIT, 0x06}, + {0x762d, CRL_REG_LEN_08BIT, 0x12}, + {0x762e, CRL_REG_LEN_08BIT, 0x00}, + {0x762f, CRL_REG_LEN_08BIT, 0x01}, + {0x7630, CRL_REG_LEN_08BIT, 0x06}, + {0x7631, CRL_REG_LEN_08BIT, 0x52}, + {0x7632, CRL_REG_LEN_08BIT, 0x00}, + {0x7633, CRL_REG_LEN_08BIT, 0x01}, + {0x7634, CRL_REG_LEN_08BIT, 0x06}, + {0x7635, CRL_REG_LEN_08BIT, 0x5e}, + {0x7636, CRL_REG_LEN_08BIT, 0x04}, + {0x7637, CRL_REG_LEN_08BIT, 0xe4}, + {0x7638, CRL_REG_LEN_08BIT, 0x00}, + {0x7639, CRL_REG_LEN_08BIT, 0x01}, + {0x763a, CRL_REG_LEN_08BIT, 0x05}, + {0x763b, CRL_REG_LEN_08BIT, 0x30}, + {0x763c, CRL_REG_LEN_08BIT, 0x0f}, + {0x763d, CRL_REG_LEN_08BIT, 0x00}, + {0x763e, CRL_REG_LEN_08BIT, 0x06}, + {0x763f, CRL_REG_LEN_08BIT, 0xa6}, + {0x7640, CRL_REG_LEN_08BIT, 0x00}, + {0x7641, CRL_REG_LEN_08BIT, 0x02}, + {0x7642, CRL_REG_LEN_08BIT, 0x06}, + {0x7643, CRL_REG_LEN_08BIT, 0x26}, + {0x7644, CRL_REG_LEN_08BIT, 0x00}, + {0x7645, CRL_REG_LEN_08BIT, 0x02}, + {0x7646, CRL_REG_LEN_08BIT, 0x05}, + {0x7647, CRL_REG_LEN_08BIT, 0x33}, + {0x7648, CRL_REG_LEN_08BIT, 0x06}, + {0x7649, CRL_REG_LEN_08BIT, 0x20}, + {0x764a, CRL_REG_LEN_08BIT, 0x0f}, + {0x764b, CRL_REG_LEN_08BIT, 0x00}, + {0x764c, CRL_REG_LEN_08BIT, 0x06}, + {0x764d, CRL_REG_LEN_08BIT, 0x56}, + {0x764e, CRL_REG_LEN_08BIT, 0x00}, + {0x764f, CRL_REG_LEN_08BIT, 0x02}, + {0x7650, CRL_REG_LEN_08BIT, 0x06}, + {0x7651, CRL_REG_LEN_08BIT, 0x16}, + {0x7652, CRL_REG_LEN_08BIT, 0x05}, + {0x7653, CRL_REG_LEN_08BIT, 0x33}, + {0x7654, CRL_REG_LEN_08BIT, 0x06}, + {0x7655, CRL_REG_LEN_08BIT, 0x10}, + {0x7656, CRL_REG_LEN_08BIT, 0x0f}, + {0x7657, CRL_REG_LEN_08BIT, 0x00}, + {0x7658, CRL_REG_LEN_08BIT, 0x06}, + {0x7659, CRL_REG_LEN_08BIT, 0x10}, + {0x765a, CRL_REG_LEN_08BIT, 0x0f}, + {0x765b, CRL_REG_LEN_08BIT, 0x00}, + {0x765c, CRL_REG_LEN_08BIT, 0x06}, + {0x765d, CRL_REG_LEN_08BIT, 0x20}, + {0x765e, CRL_REG_LEN_08BIT, 0x0f}, + {0x765f, CRL_REG_LEN_08BIT, 0x00}, + {0x7660, CRL_REG_LEN_08BIT, 0x00}, + {0x7661, CRL_REG_LEN_08BIT, 0x00}, + {0x7662, CRL_REG_LEN_08BIT, 0x00}, + {0x7663, CRL_REG_LEN_08BIT, 0x02}, + {0x7664, CRL_REG_LEN_08BIT, 0x04}, + {0x7665, CRL_REG_LEN_08BIT, 0xe5}, + {0x7666, CRL_REG_LEN_08BIT, 0x04}, + {0x7667, CRL_REG_LEN_08BIT, 0xe4}, + {0x7668, CRL_REG_LEN_08BIT, 0x0f}, + {0x7669, CRL_REG_LEN_08BIT, 0x00}, + {0x766a, CRL_REG_LEN_08BIT, 0x00}, + {0x766b, CRL_REG_LEN_08BIT, 0x00}, + {0x766c, CRL_REG_LEN_08BIT, 0x00}, + {0x766d, CRL_REG_LEN_08BIT, 0x01}, + {0x766e, CRL_REG_LEN_08BIT, 0x04}, + {0x766f, CRL_REG_LEN_08BIT, 0xe5}, + {0x7670, CRL_REG_LEN_08BIT, 0x04}, + {0x7671, CRL_REG_LEN_08BIT, 0xe4}, + {0x7672, CRL_REG_LEN_08BIT, 0x0f}, + {0x7673, CRL_REG_LEN_08BIT, 0x00}, + {0x7674, CRL_REG_LEN_08BIT, 0x00}, + {0x7675, CRL_REG_LEN_08BIT, 0x02}, + {0x7676, CRL_REG_LEN_08BIT, 0x04}, + {0x7677, CRL_REG_LEN_08BIT, 0xe4}, + {0x7678, CRL_REG_LEN_08BIT, 0x00}, + {0x7679, CRL_REG_LEN_08BIT, 0x02}, + {0x767a, CRL_REG_LEN_08BIT, 0x04}, + {0x767b, CRL_REG_LEN_08BIT, 0xc4}, + {0x767c, CRL_REG_LEN_08BIT, 0x00}, + {0x767d, CRL_REG_LEN_08BIT, 0x02}, + {0x767e, CRL_REG_LEN_08BIT, 0x04}, + {0x767f, CRL_REG_LEN_08BIT, 0xc4}, + {0x7680, CRL_REG_LEN_08BIT, 0x05}, + {0x7681, CRL_REG_LEN_08BIT, 0x83}, + {0x7682, CRL_REG_LEN_08BIT, 0x0f}, + {0x7683, CRL_REG_LEN_08BIT, 0x00}, + {0x7684, CRL_REG_LEN_08BIT, 0x00}, + {0x7685, CRL_REG_LEN_08BIT, 0x02}, + {0x7686, CRL_REG_LEN_08BIT, 0x04}, + {0x7687, CRL_REG_LEN_08BIT, 0xe4}, + {0x7688, CRL_REG_LEN_08BIT, 0x00}, + {0x7689, CRL_REG_LEN_08BIT, 0x02}, + {0x768a, CRL_REG_LEN_08BIT, 0x04}, + {0x768b, CRL_REG_LEN_08BIT, 0xc4}, + {0x768c, CRL_REG_LEN_08BIT, 0x00}, + {0x768d, CRL_REG_LEN_08BIT, 0x02}, + {0x768e, CRL_REG_LEN_08BIT, 0x04}, + {0x768f, CRL_REG_LEN_08BIT, 0xc4}, + {0x7690, CRL_REG_LEN_08BIT, 0x05}, + {0x7691, CRL_REG_LEN_08BIT, 0x83}, + {0x7692, CRL_REG_LEN_08BIT, 0x03}, + {0x7693, CRL_REG_LEN_08BIT, 0x0b}, + {0x7694, CRL_REG_LEN_08BIT, 0x05}, + {0x7695, CRL_REG_LEN_08BIT, 0x83}, + {0x7696, CRL_REG_LEN_08BIT, 0x00}, + {0x7697, CRL_REG_LEN_08BIT, 0x07}, + {0x7698, CRL_REG_LEN_08BIT, 0x05}, + {0x7699, CRL_REG_LEN_08BIT, 0x03}, + {0x769a, CRL_REG_LEN_08BIT, 0x00}, + {0x769b, CRL_REG_LEN_08BIT, 0x05}, + {0x769c, CRL_REG_LEN_08BIT, 0x05}, + {0x769d, CRL_REG_LEN_08BIT, 0x32}, + {0x769e, CRL_REG_LEN_08BIT, 0x05}, + {0x769f, CRL_REG_LEN_08BIT, 0x30}, + {0x76a0, CRL_REG_LEN_08BIT, 0x00}, + {0x76a1, CRL_REG_LEN_08BIT, 0x02}, + {0x76a2, CRL_REG_LEN_08BIT, 0x05}, + {0x76a3, CRL_REG_LEN_08BIT, 0x78}, + {0x76a4, CRL_REG_LEN_08BIT, 0x00}, + {0x76a5, CRL_REG_LEN_08BIT, 0x01}, + {0x76a6, CRL_REG_LEN_08BIT, 0x05}, + {0x76a7, CRL_REG_LEN_08BIT, 0x7c}, + {0x76a8, CRL_REG_LEN_08BIT, 0x03}, + {0x76a9, CRL_REG_LEN_08BIT, 0x9a}, + {0x76aa, CRL_REG_LEN_08BIT, 0x05}, + {0x76ab, CRL_REG_LEN_08BIT, 0x83}, + {0x76ac, CRL_REG_LEN_08BIT, 0x00}, + {0x76ad, CRL_REG_LEN_08BIT, 0x04}, + {0x76ae, CRL_REG_LEN_08BIT, 0x05}, + {0x76af, CRL_REG_LEN_08BIT, 0x03}, + {0x76b0, CRL_REG_LEN_08BIT, 0x00}, + {0x76b1, CRL_REG_LEN_08BIT, 0x03}, + {0x76b2, CRL_REG_LEN_08BIT, 0x05}, + {0x76b3, CRL_REG_LEN_08BIT, 0x32}, + {0x76b4, CRL_REG_LEN_08BIT, 0x05}, + {0x76b5, CRL_REG_LEN_08BIT, 0x30}, + {0x76b6, CRL_REG_LEN_08BIT, 0x00}, + {0x76b7, CRL_REG_LEN_08BIT, 0x02}, + {0x76b8, CRL_REG_LEN_08BIT, 0x05}, + {0x76b9, CRL_REG_LEN_08BIT, 0x78}, + {0x76ba, CRL_REG_LEN_08BIT, 0x00}, + {0x76bb, CRL_REG_LEN_08BIT, 0x01}, + {0x76bc, CRL_REG_LEN_08BIT, 0x05}, + {0x76bd, CRL_REG_LEN_08BIT, 0x7c}, + {0x76be, CRL_REG_LEN_08BIT, 0x03}, + {0x76bf, CRL_REG_LEN_08BIT, 0x99}, + {0x76c0, CRL_REG_LEN_08BIT, 0x05}, + {0x76c1, CRL_REG_LEN_08BIT, 0x83}, + {0x76c2, CRL_REG_LEN_08BIT, 0x00}, + {0x76c3, CRL_REG_LEN_08BIT, 0x03}, + {0x76c4, CRL_REG_LEN_08BIT, 0x05}, + {0x76c5, CRL_REG_LEN_08BIT, 0x03}, + {0x76c6, CRL_REG_LEN_08BIT, 0x00}, + {0x76c7, CRL_REG_LEN_08BIT, 0x01}, + {0x76c8, CRL_REG_LEN_08BIT, 0x05}, + {0x76c9, CRL_REG_LEN_08BIT, 0x32}, + {0x76ca, CRL_REG_LEN_08BIT, 0x05}, + {0x76cb, CRL_REG_LEN_08BIT, 0x30}, + {0x76cc, CRL_REG_LEN_08BIT, 0x00}, + {0x76cd, CRL_REG_LEN_08BIT, 0x02}, + {0x76ce, CRL_REG_LEN_08BIT, 0x05}, + {0x76cf, CRL_REG_LEN_08BIT, 0x78}, + {0x76d0, CRL_REG_LEN_08BIT, 0x00}, + {0x76d1, CRL_REG_LEN_08BIT, 0x01}, + {0x76d2, CRL_REG_LEN_08BIT, 0x05}, + {0x76d3, CRL_REG_LEN_08BIT, 0x7c}, + {0x76d4, CRL_REG_LEN_08BIT, 0x03}, + {0x76d5, CRL_REG_LEN_08BIT, 0x98}, + {0x76d6, CRL_REG_LEN_08BIT, 0x05}, + {0x76d7, CRL_REG_LEN_08BIT, 0x83}, + {0x76d8, CRL_REG_LEN_08BIT, 0x00}, + {0x76d9, CRL_REG_LEN_08BIT, 0x00}, + {0x76da, CRL_REG_LEN_08BIT, 0x05}, + {0x76db, CRL_REG_LEN_08BIT, 0x03}, + {0x76dc, CRL_REG_LEN_08BIT, 0x00}, + {0x76dd, CRL_REG_LEN_08BIT, 0x01}, + {0x76de, CRL_REG_LEN_08BIT, 0x05}, + {0x76df, CRL_REG_LEN_08BIT, 0x32}, + {0x76e0, CRL_REG_LEN_08BIT, 0x05}, + {0x76e1, CRL_REG_LEN_08BIT, 0x30}, + {0x76e2, CRL_REG_LEN_08BIT, 0x00}, + {0x76e3, CRL_REG_LEN_08BIT, 0x02}, + {0x76e4, CRL_REG_LEN_08BIT, 0x05}, + {0x76e5, CRL_REG_LEN_08BIT, 0x78}, + {0x76e6, CRL_REG_LEN_08BIT, 0x00}, + {0x76e7, CRL_REG_LEN_08BIT, 0x01}, + {0x76e8, CRL_REG_LEN_08BIT, 0x05}, + {0x76e9, CRL_REG_LEN_08BIT, 0x7c}, + {0x76ea, CRL_REG_LEN_08BIT, 0x03}, + {0x76eb, CRL_REG_LEN_08BIT, 0x97}, + {0x76ec, CRL_REG_LEN_08BIT, 0x05}, + {0x76ed, CRL_REG_LEN_08BIT, 0x83}, + {0x76ee, CRL_REG_LEN_08BIT, 0x00}, + {0x76ef, CRL_REG_LEN_08BIT, 0x00}, + {0x76f0, CRL_REG_LEN_08BIT, 0x05}, + {0x76f1, CRL_REG_LEN_08BIT, 0x03}, + {0x76f2, CRL_REG_LEN_08BIT, 0x05}, + {0x76f3, CRL_REG_LEN_08BIT, 0x32}, + {0x76f4, CRL_REG_LEN_08BIT, 0x05}, + {0x76f5, CRL_REG_LEN_08BIT, 0x30}, + {0x76f6, CRL_REG_LEN_08BIT, 0x00}, + {0x76f7, CRL_REG_LEN_08BIT, 0x02}, + {0x76f8, CRL_REG_LEN_08BIT, 0x05}, + {0x76f9, CRL_REG_LEN_08BIT, 0x78}, + {0x76fa, CRL_REG_LEN_08BIT, 0x00}, + {0x76fb, CRL_REG_LEN_08BIT, 0x01}, + {0x76fc, CRL_REG_LEN_08BIT, 0x05}, + {0x76fd, CRL_REG_LEN_08BIT, 0x7c}, + {0x76fe, CRL_REG_LEN_08BIT, 0x03}, + {0x76ff, CRL_REG_LEN_08BIT, 0x96}, + {0x7700, CRL_REG_LEN_08BIT, 0x05}, + {0x7701, CRL_REG_LEN_08BIT, 0x83}, + {0x7702, CRL_REG_LEN_08BIT, 0x05}, + {0x7703, CRL_REG_LEN_08BIT, 0x03}, + {0x7704, CRL_REG_LEN_08BIT, 0x05}, + {0x7705, CRL_REG_LEN_08BIT, 0x32}, + {0x7706, CRL_REG_LEN_08BIT, 0x05}, + {0x7707, CRL_REG_LEN_08BIT, 0x30}, + {0x7708, CRL_REG_LEN_08BIT, 0x00}, + {0x7709, CRL_REG_LEN_08BIT, 0x02}, + {0x770a, CRL_REG_LEN_08BIT, 0x05}, + {0x770b, CRL_REG_LEN_08BIT, 0x78}, + {0x770c, CRL_REG_LEN_08BIT, 0x00}, + {0x770d, CRL_REG_LEN_08BIT, 0x01}, + {0x770e, CRL_REG_LEN_08BIT, 0x05}, + {0x770f, CRL_REG_LEN_08BIT, 0x7c}, + {0x7710, CRL_REG_LEN_08BIT, 0x03}, + {0x7711, CRL_REG_LEN_08BIT, 0x95}, + {0x7712, CRL_REG_LEN_08BIT, 0x05}, + {0x7713, CRL_REG_LEN_08BIT, 0x83}, + {0x7714, CRL_REG_LEN_08BIT, 0x05}, + {0x7715, CRL_REG_LEN_08BIT, 0x03}, + {0x7716, CRL_REG_LEN_08BIT, 0x05}, + {0x7717, CRL_REG_LEN_08BIT, 0x32}, + {0x7718, CRL_REG_LEN_08BIT, 0x05}, + {0x7719, CRL_REG_LEN_08BIT, 0x30}, + {0x771a, CRL_REG_LEN_08BIT, 0x00}, + {0x771b, CRL_REG_LEN_08BIT, 0x02}, + {0x771c, CRL_REG_LEN_08BIT, 0x05}, + {0x771d, CRL_REG_LEN_08BIT, 0x78}, + {0x771e, CRL_REG_LEN_08BIT, 0x00}, + {0x771f, CRL_REG_LEN_08BIT, 0x01}, + {0x7720, CRL_REG_LEN_08BIT, 0x05}, + {0x7721, CRL_REG_LEN_08BIT, 0x7c}, + {0x7722, CRL_REG_LEN_08BIT, 0x03}, + {0x7723, CRL_REG_LEN_08BIT, 0x94}, + {0x7724, CRL_REG_LEN_08BIT, 0x05}, + {0x7725, CRL_REG_LEN_08BIT, 0x83}, + {0x7726, CRL_REG_LEN_08BIT, 0x00}, + {0x7727, CRL_REG_LEN_08BIT, 0x01}, + {0x7728, CRL_REG_LEN_08BIT, 0x05}, + {0x7729, CRL_REG_LEN_08BIT, 0x03}, + {0x772a, CRL_REG_LEN_08BIT, 0x00}, + {0x772b, CRL_REG_LEN_08BIT, 0x01}, + {0x772c, CRL_REG_LEN_08BIT, 0x05}, + {0x772d, CRL_REG_LEN_08BIT, 0x32}, + {0x772e, CRL_REG_LEN_08BIT, 0x05}, + {0x772f, CRL_REG_LEN_08BIT, 0x30}, + {0x7730, CRL_REG_LEN_08BIT, 0x00}, + {0x7731, CRL_REG_LEN_08BIT, 0x02}, + {0x7732, CRL_REG_LEN_08BIT, 0x05}, + {0x7733, CRL_REG_LEN_08BIT, 0x78}, + {0x7734, CRL_REG_LEN_08BIT, 0x00}, + {0x7735, CRL_REG_LEN_08BIT, 0x01}, + {0x7736, CRL_REG_LEN_08BIT, 0x05}, + {0x7737, CRL_REG_LEN_08BIT, 0x7c}, + {0x7738, CRL_REG_LEN_08BIT, 0x03}, + {0x7739, CRL_REG_LEN_08BIT, 0x93}, + {0x773a, CRL_REG_LEN_08BIT, 0x05}, + {0x773b, CRL_REG_LEN_08BIT, 0x83}, + {0x773c, CRL_REG_LEN_08BIT, 0x00}, + {0x773d, CRL_REG_LEN_08BIT, 0x00}, + {0x773e, CRL_REG_LEN_08BIT, 0x05}, + {0x773f, CRL_REG_LEN_08BIT, 0x03}, + {0x7740, CRL_REG_LEN_08BIT, 0x00}, + {0x7741, CRL_REG_LEN_08BIT, 0x00}, + {0x7742, CRL_REG_LEN_08BIT, 0x05}, + {0x7743, CRL_REG_LEN_08BIT, 0x32}, + {0x7744, CRL_REG_LEN_08BIT, 0x05}, + {0x7745, CRL_REG_LEN_08BIT, 0x30}, + {0x7746, CRL_REG_LEN_08BIT, 0x00}, + {0x7747, CRL_REG_LEN_08BIT, 0x02}, + {0x7748, CRL_REG_LEN_08BIT, 0x05}, + {0x7749, CRL_REG_LEN_08BIT, 0x78}, + {0x774a, CRL_REG_LEN_08BIT, 0x00}, + {0x774b, CRL_REG_LEN_08BIT, 0x01}, + {0x774c, CRL_REG_LEN_08BIT, 0x05}, + {0x774d, CRL_REG_LEN_08BIT, 0x7c}, + {0x774e, CRL_REG_LEN_08BIT, 0x03}, + {0x774f, CRL_REG_LEN_08BIT, 0x92}, + {0x7750, CRL_REG_LEN_08BIT, 0x05}, + {0x7751, CRL_REG_LEN_08BIT, 0x83}, + {0x7752, CRL_REG_LEN_08BIT, 0x05}, + {0x7753, CRL_REG_LEN_08BIT, 0x03}, + {0x7754, CRL_REG_LEN_08BIT, 0x00}, + {0x7755, CRL_REG_LEN_08BIT, 0x00}, + {0x7756, CRL_REG_LEN_08BIT, 0x05}, + {0x7757, CRL_REG_LEN_08BIT, 0x32}, + {0x7758, CRL_REG_LEN_08BIT, 0x05}, + {0x7759, CRL_REG_LEN_08BIT, 0x30}, + {0x775a, CRL_REG_LEN_08BIT, 0x00}, + {0x775b, CRL_REG_LEN_08BIT, 0x02}, + {0x775c, CRL_REG_LEN_08BIT, 0x05}, + {0x775d, CRL_REG_LEN_08BIT, 0x78}, + {0x775e, CRL_REG_LEN_08BIT, 0x00}, + {0x775f, CRL_REG_LEN_08BIT, 0x01}, + {0x7760, CRL_REG_LEN_08BIT, 0x05}, + {0x7761, CRL_REG_LEN_08BIT, 0x7c}, + {0x7762, CRL_REG_LEN_08BIT, 0x03}, + {0x7763, CRL_REG_LEN_08BIT, 0x91}, + {0x7764, CRL_REG_LEN_08BIT, 0x05}, + {0x7765, CRL_REG_LEN_08BIT, 0x83}, + {0x7766, CRL_REG_LEN_08BIT, 0x05}, + {0x7767, CRL_REG_LEN_08BIT, 0x03}, + {0x7768, CRL_REG_LEN_08BIT, 0x05}, + {0x7769, CRL_REG_LEN_08BIT, 0x32}, + {0x776a, CRL_REG_LEN_08BIT, 0x05}, + {0x776b, CRL_REG_LEN_08BIT, 0x30}, + {0x776c, CRL_REG_LEN_08BIT, 0x00}, + {0x776d, CRL_REG_LEN_08BIT, 0x02}, + {0x776e, CRL_REG_LEN_08BIT, 0x05}, + {0x776f, CRL_REG_LEN_08BIT, 0x78}, + {0x7770, CRL_REG_LEN_08BIT, 0x00}, + {0x7771, CRL_REG_LEN_08BIT, 0x01}, + {0x7772, CRL_REG_LEN_08BIT, 0x05}, + {0x7773, CRL_REG_LEN_08BIT, 0x7c}, + {0x7774, CRL_REG_LEN_08BIT, 0x03}, + {0x7775, CRL_REG_LEN_08BIT, 0x90}, + {0x7776, CRL_REG_LEN_08BIT, 0x05}, + {0x7777, CRL_REG_LEN_08BIT, 0x83}, + {0x7778, CRL_REG_LEN_08BIT, 0x05}, + {0x7779, CRL_REG_LEN_08BIT, 0x03}, + {0x777a, CRL_REG_LEN_08BIT, 0x05}, + {0x777b, CRL_REG_LEN_08BIT, 0x32}, + {0x777c, CRL_REG_LEN_08BIT, 0x05}, + {0x777d, CRL_REG_LEN_08BIT, 0x30}, + {0x777e, CRL_REG_LEN_08BIT, 0x00}, + {0x777f, CRL_REG_LEN_08BIT, 0x02}, + {0x7780, CRL_REG_LEN_08BIT, 0x05}, + {0x7781, CRL_REG_LEN_08BIT, 0x78}, + {0x7782, CRL_REG_LEN_08BIT, 0x00}, + {0x7783, CRL_REG_LEN_08BIT, 0x01}, + {0x7784, CRL_REG_LEN_08BIT, 0x05}, + {0x7785, CRL_REG_LEN_08BIT, 0x7c}, + {0x7786, CRL_REG_LEN_08BIT, 0x02}, + {0x7787, CRL_REG_LEN_08BIT, 0x90}, + {0x7788, CRL_REG_LEN_08BIT, 0x05}, + {0x7789, CRL_REG_LEN_08BIT, 0x03}, + {0x778a, CRL_REG_LEN_08BIT, 0x07}, + {0x778b, CRL_REG_LEN_08BIT, 0x00}, + {0x778c, CRL_REG_LEN_08BIT, 0x0f}, + {0x778d, CRL_REG_LEN_08BIT, 0x00}, + {0x778e, CRL_REG_LEN_08BIT, 0x08}, + {0x778f, CRL_REG_LEN_08BIT, 0x30}, + {0x7790, CRL_REG_LEN_08BIT, 0x08}, + {0x7791, CRL_REG_LEN_08BIT, 0xee}, + {0x7792, CRL_REG_LEN_08BIT, 0x0f}, + {0x7793, CRL_REG_LEN_08BIT, 0x00}, + {0x7794, CRL_REG_LEN_08BIT, 0x05}, + {0x7795, CRL_REG_LEN_08BIT, 0x33}, + {0x7796, CRL_REG_LEN_08BIT, 0x04}, + {0x7797, CRL_REG_LEN_08BIT, 0xe5}, + {0x7798, CRL_REG_LEN_08BIT, 0x06}, + {0x7799, CRL_REG_LEN_08BIT, 0x52}, + {0x779a, CRL_REG_LEN_08BIT, 0x04}, + {0x779b, CRL_REG_LEN_08BIT, 0xe4}, + {0x779c, CRL_REG_LEN_08BIT, 0x00}, + {0x779d, CRL_REG_LEN_08BIT, 0x00}, + {0x779e, CRL_REG_LEN_08BIT, 0x06}, + {0x779f, CRL_REG_LEN_08BIT, 0x5e}, + {0x77a0, CRL_REG_LEN_08BIT, 0x00}, + {0x77a1, CRL_REG_LEN_08BIT, 0x0f}, + {0x77a2, CRL_REG_LEN_08BIT, 0x06}, + {0x77a3, CRL_REG_LEN_08BIT, 0x1e}, + {0x77a4, CRL_REG_LEN_08BIT, 0x00}, + {0x77a5, CRL_REG_LEN_08BIT, 0x02}, + {0x77a6, CRL_REG_LEN_08BIT, 0x06}, + {0x77a7, CRL_REG_LEN_08BIT, 0xa2}, + {0x77a8, CRL_REG_LEN_08BIT, 0x00}, + {0x77a9, CRL_REG_LEN_08BIT, 0x01}, + {0x77aa, CRL_REG_LEN_08BIT, 0x06}, + {0x77ab, CRL_REG_LEN_08BIT, 0xae}, + {0x77ac, CRL_REG_LEN_08BIT, 0x00}, + {0x77ad, CRL_REG_LEN_08BIT, 0x03}, + {0x77ae, CRL_REG_LEN_08BIT, 0x05}, + {0x77af, CRL_REG_LEN_08BIT, 0x30}, + {0x77b0, CRL_REG_LEN_08BIT, 0x09}, + {0x77b1, CRL_REG_LEN_08BIT, 0x19}, + {0x77b2, CRL_REG_LEN_08BIT, 0x0f}, + {0x77b3, CRL_REG_LEN_08BIT, 0x00}, + {0x77b4, CRL_REG_LEN_08BIT, 0x05}, + {0x77b5, CRL_REG_LEN_08BIT, 0x33}, + {0x77b6, CRL_REG_LEN_08BIT, 0x04}, + {0x77b7, CRL_REG_LEN_08BIT, 0xe5}, + {0x77b8, CRL_REG_LEN_08BIT, 0x06}, + {0x77b9, CRL_REG_LEN_08BIT, 0x52}, + {0x77ba, CRL_REG_LEN_08BIT, 0x04}, + {0x77bb, CRL_REG_LEN_08BIT, 0xe4}, + {0x77bc, CRL_REG_LEN_08BIT, 0x00}, + {0x77bd, CRL_REG_LEN_08BIT, 0x00}, + {0x77be, CRL_REG_LEN_08BIT, 0x06}, + {0x77bf, CRL_REG_LEN_08BIT, 0x5e}, + {0x77c0, CRL_REG_LEN_08BIT, 0x00}, + {0x77c1, CRL_REG_LEN_08BIT, 0x0f}, + {0x77c2, CRL_REG_LEN_08BIT, 0x06}, + {0x77c3, CRL_REG_LEN_08BIT, 0x1e}, + {0x77c4, CRL_REG_LEN_08BIT, 0x00}, + {0x77c5, CRL_REG_LEN_08BIT, 0x02}, + {0x77c6, CRL_REG_LEN_08BIT, 0x06}, + {0x77c7, CRL_REG_LEN_08BIT, 0xa2}, + {0x77c8, CRL_REG_LEN_08BIT, 0x00}, + {0x77c9, CRL_REG_LEN_08BIT, 0x01}, + {0x77ca, CRL_REG_LEN_08BIT, 0x06}, + {0x77cb, CRL_REG_LEN_08BIT, 0xae}, + {0x77cc, CRL_REG_LEN_08BIT, 0x00}, + {0x77cd, CRL_REG_LEN_08BIT, 0x03}, + {0x77ce, CRL_REG_LEN_08BIT, 0x05}, + {0x77cf, CRL_REG_LEN_08BIT, 0x30}, + {0x77d0, CRL_REG_LEN_08BIT, 0x0f}, + {0x77d1, CRL_REG_LEN_08BIT, 0x00}, + {0x77d2, CRL_REG_LEN_08BIT, 0x00}, + {0x77d3, CRL_REG_LEN_08BIT, 0x00}, + {0x77d4, CRL_REG_LEN_08BIT, 0x00}, + {0x77d5, CRL_REG_LEN_08BIT, 0x02}, + {0x77d6, CRL_REG_LEN_08BIT, 0x04}, + {0x77d7, CRL_REG_LEN_08BIT, 0xe5}, + {0x77d8, CRL_REG_LEN_08BIT, 0x04}, + {0x77d9, CRL_REG_LEN_08BIT, 0xe4}, + {0x77da, CRL_REG_LEN_08BIT, 0x05}, + {0x77db, CRL_REG_LEN_08BIT, 0x33}, + {0x77dc, CRL_REG_LEN_08BIT, 0x07}, + {0x77dd, CRL_REG_LEN_08BIT, 0x10}, + {0x77de, CRL_REG_LEN_08BIT, 0x00}, + {0x77df, CRL_REG_LEN_08BIT, 0x00}, + {0x77e0, CRL_REG_LEN_08BIT, 0x01}, + {0x77e1, CRL_REG_LEN_08BIT, 0xbb}, + {0x77e2, CRL_REG_LEN_08BIT, 0x00}, + {0x77e3, CRL_REG_LEN_08BIT, 0x00}, + {0x77e4, CRL_REG_LEN_08BIT, 0x01}, + {0x77e5, CRL_REG_LEN_08BIT, 0xaa}, + {0x77e6, CRL_REG_LEN_08BIT, 0x00}, + {0x77e7, CRL_REG_LEN_08BIT, 0x00}, + {0x77e8, CRL_REG_LEN_08BIT, 0x01}, + {0x77e9, CRL_REG_LEN_08BIT, 0x99}, + {0x77ea, CRL_REG_LEN_08BIT, 0x00}, + {0x77eb, CRL_REG_LEN_08BIT, 0x00}, + {0x77ec, CRL_REG_LEN_08BIT, 0x01}, + {0x77ed, CRL_REG_LEN_08BIT, 0x88}, + {0x77ee, CRL_REG_LEN_08BIT, 0x00}, + {0x77ef, CRL_REG_LEN_08BIT, 0x00}, + {0x77f0, CRL_REG_LEN_08BIT, 0x01}, + {0x77f1, CRL_REG_LEN_08BIT, 0x77}, + {0x77f2, CRL_REG_LEN_08BIT, 0x00}, + {0x77f3, CRL_REG_LEN_08BIT, 0x00}, + {0x77f4, CRL_REG_LEN_08BIT, 0x01}, + {0x77f5, CRL_REG_LEN_08BIT, 0x66}, + {0x77f6, CRL_REG_LEN_08BIT, 0x00}, + {0x77f7, CRL_REG_LEN_08BIT, 0x00}, + {0x77f8, CRL_REG_LEN_08BIT, 0x01}, + {0x77f9, CRL_REG_LEN_08BIT, 0x55}, + {0x77fa, CRL_REG_LEN_08BIT, 0x00}, + {0x77fb, CRL_REG_LEN_08BIT, 0x00}, + {0x77fc, CRL_REG_LEN_08BIT, 0x01}, + {0x77fd, CRL_REG_LEN_08BIT, 0x44}, + {0x77fe, CRL_REG_LEN_08BIT, 0x00}, + {0x77ff, CRL_REG_LEN_08BIT, 0x00}, + {0x7800, CRL_REG_LEN_08BIT, 0x01}, + {0x7801, CRL_REG_LEN_08BIT, 0x33}, + {0x7802, CRL_REG_LEN_08BIT, 0x00}, + {0x7803, CRL_REG_LEN_08BIT, 0x00}, + {0x7804, CRL_REG_LEN_08BIT, 0x01}, + {0x7805, CRL_REG_LEN_08BIT, 0x22}, + {0x7806, CRL_REG_LEN_08BIT, 0x00}, + {0x7807, CRL_REG_LEN_08BIT, 0x00}, + {0x7808, CRL_REG_LEN_08BIT, 0x01}, + {0x7809, CRL_REG_LEN_08BIT, 0x11}, + {0x780a, CRL_REG_LEN_08BIT, 0x00}, + {0x780b, CRL_REG_LEN_08BIT, 0x00}, + {0x780c, CRL_REG_LEN_08BIT, 0x01}, + {0x780d, CRL_REG_LEN_08BIT, 0x00}, + {0x780e, CRL_REG_LEN_08BIT, 0x01}, + {0x780f, CRL_REG_LEN_08BIT, 0xff}, + {0x7810, CRL_REG_LEN_08BIT, 0x07}, + {0x7811, CRL_REG_LEN_08BIT, 0x00}, + {0x7812, CRL_REG_LEN_08BIT, 0x02}, + {0x7813, CRL_REG_LEN_08BIT, 0xa0}, + {0x7814, CRL_REG_LEN_08BIT, 0x0f}, + {0x7815, CRL_REG_LEN_08BIT, 0x00}, + {0x7816, CRL_REG_LEN_08BIT, 0x08}, + {0x7817, CRL_REG_LEN_08BIT, 0x35}, + {0x7818, CRL_REG_LEN_08BIT, 0x06}, + {0x7819, CRL_REG_LEN_08BIT, 0x52}, + {0x781a, CRL_REG_LEN_08BIT, 0x04}, + {0x781b, CRL_REG_LEN_08BIT, 0xe4}, + {0x781c, CRL_REG_LEN_08BIT, 0x00}, + {0x781d, CRL_REG_LEN_08BIT, 0x00}, + {0x781e, CRL_REG_LEN_08BIT, 0x06}, + {0x781f, CRL_REG_LEN_08BIT, 0x5e}, + {0x7820, CRL_REG_LEN_08BIT, 0x05}, + {0x7821, CRL_REG_LEN_08BIT, 0x33}, + {0x7822, CRL_REG_LEN_08BIT, 0x09}, + {0x7823, CRL_REG_LEN_08BIT, 0x19}, + {0x7824, CRL_REG_LEN_08BIT, 0x06}, + {0x7825, CRL_REG_LEN_08BIT, 0x1e}, + {0x7826, CRL_REG_LEN_08BIT, 0x05}, + {0x7827, CRL_REG_LEN_08BIT, 0x33}, + {0x7828, CRL_REG_LEN_08BIT, 0x00}, + {0x7829, CRL_REG_LEN_08BIT, 0x01}, + {0x782a, CRL_REG_LEN_08BIT, 0x06}, + {0x782b, CRL_REG_LEN_08BIT, 0x24}, + {0x782c, CRL_REG_LEN_08BIT, 0x06}, + {0x782d, CRL_REG_LEN_08BIT, 0x20}, + {0x782e, CRL_REG_LEN_08BIT, 0x0f}, + {0x782f, CRL_REG_LEN_08BIT, 0x00}, + {0x7830, CRL_REG_LEN_08BIT, 0x08}, + {0x7831, CRL_REG_LEN_08BIT, 0x35}, + {0x7832, CRL_REG_LEN_08BIT, 0x07}, + {0x7833, CRL_REG_LEN_08BIT, 0x10}, + {0x7834, CRL_REG_LEN_08BIT, 0x00}, + {0x7835, CRL_REG_LEN_08BIT, 0x00}, + {0x7836, CRL_REG_LEN_08BIT, 0x01}, + {0x7837, CRL_REG_LEN_08BIT, 0xbb}, + {0x7838, CRL_REG_LEN_08BIT, 0x00}, + {0x7839, CRL_REG_LEN_08BIT, 0x00}, + {0x783a, CRL_REG_LEN_08BIT, 0x01}, + {0x783b, CRL_REG_LEN_08BIT, 0xaa}, + {0x783c, CRL_REG_LEN_08BIT, 0x00}, + {0x783d, CRL_REG_LEN_08BIT, 0x00}, + {0x783e, CRL_REG_LEN_08BIT, 0x01}, + {0x783f, CRL_REG_LEN_08BIT, 0x99}, + {0x7840, CRL_REG_LEN_08BIT, 0x00}, + {0x7841, CRL_REG_LEN_08BIT, 0x00}, + {0x7842, CRL_REG_LEN_08BIT, 0x01}, + {0x7843, CRL_REG_LEN_08BIT, 0x88}, + {0x7844, CRL_REG_LEN_08BIT, 0x00}, + {0x7845, CRL_REG_LEN_08BIT, 0x00}, + {0x7846, CRL_REG_LEN_08BIT, 0x01}, + {0x7847, CRL_REG_LEN_08BIT, 0x77}, + {0x7848, CRL_REG_LEN_08BIT, 0x00}, + {0x7849, CRL_REG_LEN_08BIT, 0x00}, + {0x784a, CRL_REG_LEN_08BIT, 0x01}, + {0x784b, CRL_REG_LEN_08BIT, 0x66}, + {0x784c, CRL_REG_LEN_08BIT, 0x00}, + {0x784d, CRL_REG_LEN_08BIT, 0x00}, + {0x784e, CRL_REG_LEN_08BIT, 0x01}, + {0x784f, CRL_REG_LEN_08BIT, 0x55}, + {0x7850, CRL_REG_LEN_08BIT, 0x00}, + {0x7851, CRL_REG_LEN_08BIT, 0x00}, + {0x7852, CRL_REG_LEN_08BIT, 0x01}, + {0x7853, CRL_REG_LEN_08BIT, 0x44}, + {0x7854, CRL_REG_LEN_08BIT, 0x00}, + {0x7855, CRL_REG_LEN_08BIT, 0x00}, + {0x7856, CRL_REG_LEN_08BIT, 0x01}, + {0x7857, CRL_REG_LEN_08BIT, 0x33}, + {0x7858, CRL_REG_LEN_08BIT, 0x00}, + {0x7859, CRL_REG_LEN_08BIT, 0x00}, + {0x785a, CRL_REG_LEN_08BIT, 0x01}, + {0x785b, CRL_REG_LEN_08BIT, 0x22}, + {0x785c, CRL_REG_LEN_08BIT, 0x00}, + {0x785d, CRL_REG_LEN_08BIT, 0x00}, + {0x785e, CRL_REG_LEN_08BIT, 0x01}, + {0x785f, CRL_REG_LEN_08BIT, 0x11}, + {0x7860, CRL_REG_LEN_08BIT, 0x00}, + {0x7861, CRL_REG_LEN_08BIT, 0x00}, + {0x7862, CRL_REG_LEN_08BIT, 0x01}, + {0x7863, CRL_REG_LEN_08BIT, 0x00}, + {0x7864, CRL_REG_LEN_08BIT, 0x07}, + {0x7865, CRL_REG_LEN_08BIT, 0x00}, + {0x7866, CRL_REG_LEN_08BIT, 0x01}, + {0x7867, CRL_REG_LEN_08BIT, 0xff}, + {0x7868, CRL_REG_LEN_08BIT, 0x02}, + {0x7869, CRL_REG_LEN_08BIT, 0xa0}, + {0x786a, CRL_REG_LEN_08BIT, 0x0f}, + {0x786b, CRL_REG_LEN_08BIT, 0x00}, + {0x786c, CRL_REG_LEN_08BIT, 0x08}, + {0x786d, CRL_REG_LEN_08BIT, 0x3a}, + {0x786e, CRL_REG_LEN_08BIT, 0x08}, + {0x786f, CRL_REG_LEN_08BIT, 0x6a}, + {0x7870, CRL_REG_LEN_08BIT, 0x0f}, + {0x7871, CRL_REG_LEN_08BIT, 0x00}, + {0x7872, CRL_REG_LEN_08BIT, 0x04}, + {0x7873, CRL_REG_LEN_08BIT, 0xc0}, + {0x7874, CRL_REG_LEN_08BIT, 0x09}, + {0x7875, CRL_REG_LEN_08BIT, 0x19}, + {0x7876, CRL_REG_LEN_08BIT, 0x04}, + {0x7877, CRL_REG_LEN_08BIT, 0x99}, + {0x7878, CRL_REG_LEN_08BIT, 0x07}, + {0x7879, CRL_REG_LEN_08BIT, 0x14}, + {0x787a, CRL_REG_LEN_08BIT, 0x00}, + {0x787b, CRL_REG_LEN_08BIT, 0x01}, + {0x787c, CRL_REG_LEN_08BIT, 0x04}, + {0x787d, CRL_REG_LEN_08BIT, 0xa4}, + {0x787e, CRL_REG_LEN_08BIT, 0x00}, + {0x787f, CRL_REG_LEN_08BIT, 0x07}, + {0x7880, CRL_REG_LEN_08BIT, 0x04}, + {0x7881, CRL_REG_LEN_08BIT, 0xa6}, + {0x7882, CRL_REG_LEN_08BIT, 0x00}, + {0x7883, CRL_REG_LEN_08BIT, 0x00}, + {0x7884, CRL_REG_LEN_08BIT, 0x04}, + {0x7885, CRL_REG_LEN_08BIT, 0xa0}, + {0x7886, CRL_REG_LEN_08BIT, 0x04}, + {0x7887, CRL_REG_LEN_08BIT, 0x80}, + {0x7888, CRL_REG_LEN_08BIT, 0x04}, + {0x7889, CRL_REG_LEN_08BIT, 0x00}, + {0x788a, CRL_REG_LEN_08BIT, 0x05}, + {0x788b, CRL_REG_LEN_08BIT, 0x03}, + {0x788c, CRL_REG_LEN_08BIT, 0x06}, + {0x788d, CRL_REG_LEN_08BIT, 0x00}, + {0x788e, CRL_REG_LEN_08BIT, 0x0f}, + {0x788f, CRL_REG_LEN_08BIT, 0x00}, + {0x7890, CRL_REG_LEN_08BIT, 0x0f}, + {0x7891, CRL_REG_LEN_08BIT, 0x00}, + {0x7892, CRL_REG_LEN_08BIT, 0x0f}, + {0x7893, CRL_REG_LEN_08BIT, 0x00}, + {0x30a3, CRL_REG_LEN_08BIT, 0x00}, + {0x30a7, CRL_REG_LEN_08BIT, 0x48}, + {0x30ab, CRL_REG_LEN_08BIT, 0x04}, + {0x30af, CRL_REG_LEN_08BIT, 0x40}, + {0x3001, CRL_REG_LEN_08BIT, 0x32}, + {0x3005, CRL_REG_LEN_08BIT, 0x13}, + {0x3014, CRL_REG_LEN_08BIT, 0x44}, + {0x3196, CRL_REG_LEN_08BIT, 0x00}, + {0x3197, CRL_REG_LEN_08BIT, 0x0a}, + {0x3195, CRL_REG_LEN_08BIT, 0x04}, + {0x31e3, CRL_REG_LEN_08BIT, 0x02}, + {0x31e4, CRL_REG_LEN_08BIT, 0x10}, + {0x3250, CRL_REG_LEN_08BIT, 0xf7}, +}; + +/* ov2775_1928x1088_2x12_30fps_mipi960_regset */ +static struct crl_register_write_rep ov2775_2x12_30fps_mipi960_regset[] = { + {0x3000, CRL_REG_LEN_08BIT, 0x02}, + {0x3001, CRL_REG_LEN_08BIT, 0x28}, + {0x3002, CRL_REG_LEN_08BIT, 0x03}, + {0x3003, CRL_REG_LEN_08BIT, 0x01}, + {0x3004, CRL_REG_LEN_08BIT, 0x02}, + {0x3005, CRL_REG_LEN_08BIT, 0x26}, + {0x3006, CRL_REG_LEN_08BIT, 0x00}, + {0x3007, CRL_REG_LEN_08BIT, 0x07}, + {0x3008, CRL_REG_LEN_08BIT, 0x01}, + {0x3009, CRL_REG_LEN_08BIT, 0x00}, + {0x300c, CRL_REG_LEN_08BIT, 0x6c}, + {0x300e, CRL_REG_LEN_08BIT, 0x80}, + {0x300f, CRL_REG_LEN_08BIT, 0x00}, + {0x3012, CRL_REG_LEN_08BIT, 0x00}, + {0x3014, CRL_REG_LEN_08BIT, 0xc4}, + {0x3015, CRL_REG_LEN_08BIT, 0x00}, + {0x3017, CRL_REG_LEN_08BIT, 0x00}, + {0x3018, CRL_REG_LEN_08BIT, 0x00}, + {0x3019, CRL_REG_LEN_08BIT, 0x00}, + {0x301a, CRL_REG_LEN_08BIT, 0x00}, + {0x301b, CRL_REG_LEN_08BIT, 0x0e}, + {0x301e, CRL_REG_LEN_08BIT, 0x17}, + {0x301f, CRL_REG_LEN_08BIT, 0xe1}, + {0x3030, CRL_REG_LEN_08BIT, 0x02}, + {0x3031, CRL_REG_LEN_08BIT, 0x62}, + {0x3032, CRL_REG_LEN_08BIT, 0xf0}, + {0x3033, CRL_REG_LEN_08BIT, 0x30}, + {0x3034, CRL_REG_LEN_08BIT, 0x3f}, + {0x3035, CRL_REG_LEN_08BIT, 0x5f}, + {0x3036, CRL_REG_LEN_08BIT, 0x02}, + {0x3037, CRL_REG_LEN_08BIT, 0x9f}, + {0x3038, CRL_REG_LEN_08BIT, 0x04}, + {0x3039, CRL_REG_LEN_08BIT, 0xb7}, + {0x303a, CRL_REG_LEN_08BIT, 0x04}, + {0x303b, CRL_REG_LEN_08BIT, 0x07}, + {0x303c, CRL_REG_LEN_08BIT, 0xf0}, + {0x303d, CRL_REG_LEN_08BIT, 0x00}, + {0x303e, CRL_REG_LEN_08BIT, 0x0b}, + {0x303f, CRL_REG_LEN_08BIT, 0xe3}, + {0x3040, CRL_REG_LEN_08BIT, 0xf3}, + {0x3041, CRL_REG_LEN_08BIT, 0x29}, + {0x3042, CRL_REG_LEN_08BIT, 0xf6}, + {0x3043, CRL_REG_LEN_08BIT, 0x65}, + {0x3044, CRL_REG_LEN_08BIT, 0x06}, + {0x3045, CRL_REG_LEN_08BIT, 0x0f}, + {0x3046, CRL_REG_LEN_08BIT, 0x59}, + {0x3047, CRL_REG_LEN_08BIT, 0x07}, + {0x3048, CRL_REG_LEN_08BIT, 0x82}, + {0x3049, CRL_REG_LEN_08BIT, 0xcf}, + {0x304a, CRL_REG_LEN_08BIT, 0x12}, + {0x304b, CRL_REG_LEN_08BIT, 0x40}, + {0x304c, CRL_REG_LEN_08BIT, 0x33}, + {0x304d, CRL_REG_LEN_08BIT, 0xa4}, + {0x304e, CRL_REG_LEN_08BIT, 0x0b}, + {0x304f, CRL_REG_LEN_08BIT, 0x3d}, + {0x3050, CRL_REG_LEN_08BIT, 0x10}, + {0x3060, CRL_REG_LEN_08BIT, 0x00}, + {0x3061, CRL_REG_LEN_08BIT, 0x64}, + {0x3062, CRL_REG_LEN_08BIT, 0x00}, + {0x3063, CRL_REG_LEN_08BIT, 0xe4}, + {0x3066, CRL_REG_LEN_08BIT, 0x80}, + {0x3080, CRL_REG_LEN_08BIT, 0x00}, + {0x3081, CRL_REG_LEN_08BIT, 0x00}, + {0x3082, CRL_REG_LEN_08BIT, 0x01}, + {0x3083, CRL_REG_LEN_08BIT, 0xe3}, + {0x3084, CRL_REG_LEN_08BIT, 0x06}, + {0x3085, CRL_REG_LEN_08BIT, 0x00}, + {0x3086, CRL_REG_LEN_08BIT, 0x10}, + {0x3087, CRL_REG_LEN_08BIT, 0x10}, + {0x3089, CRL_REG_LEN_08BIT, 0x00}, + {0x308a, CRL_REG_LEN_08BIT, 0x01}, + {0x3093, CRL_REG_LEN_08BIT, 0x00}, + {0x30a0, CRL_REG_LEN_08BIT, 0x00}, + {0x30a1, CRL_REG_LEN_08BIT, 0x04}, + {0x30a2, CRL_REG_LEN_08BIT, 0x00}, + {0x30a3, CRL_REG_LEN_08BIT, 0x08}, + {0x30a4, CRL_REG_LEN_08BIT, 0x07}, + {0x30a5, CRL_REG_LEN_08BIT, 0x8b}, + {0x30a6, CRL_REG_LEN_08BIT, 0x04}, + {0x30a7, CRL_REG_LEN_08BIT, 0x3f}, + {0x30a8, CRL_REG_LEN_08BIT, 0x00}, + {0x30a9, CRL_REG_LEN_08BIT, 0x04}, + {0x30aa, CRL_REG_LEN_08BIT, 0x00}, + {0x30ab, CRL_REG_LEN_08BIT, 0x00}, + {0x30ac, CRL_REG_LEN_08BIT, 0x07}, + {0x30ad, CRL_REG_LEN_08BIT, 0x80}, + {0x30ae, CRL_REG_LEN_08BIT, 0x04}, + {0x30af, CRL_REG_LEN_08BIT, 0x40}, + {0x30b4, CRL_REG_LEN_08BIT, 0x00}, + {0x30b5, CRL_REG_LEN_08BIT, 0x00}, + {0x30ba, CRL_REG_LEN_08BIT, 0x10}, + {0x30bc, CRL_REG_LEN_08BIT, 0x00}, + {0x30bd, CRL_REG_LEN_08BIT, 0x03}, + {0x30be, CRL_REG_LEN_08BIT, 0x5c}, + {0x30bf, CRL_REG_LEN_08BIT, 0x00}, + {0x30c0, CRL_REG_LEN_08BIT, 0x01}, + {0x30c1, CRL_REG_LEN_08BIT, 0x00}, + {0x30c2, CRL_REG_LEN_08BIT, 0x20}, + {0x30c3, CRL_REG_LEN_08BIT, 0x00}, + {0x30c4, CRL_REG_LEN_08BIT, 0x4a}, + {0x30c5, CRL_REG_LEN_08BIT, 0x00}, + {0x30c7, CRL_REG_LEN_08BIT, 0x00}, + {0x30c8, CRL_REG_LEN_08BIT, 0x00}, + {0x30d1, CRL_REG_LEN_08BIT, 0x00}, + {0x30d2, CRL_REG_LEN_08BIT, 0x00}, + {0x30d3, CRL_REG_LEN_08BIT, 0x80}, + {0x30d4, CRL_REG_LEN_08BIT, 0x00}, + {0x30d9, CRL_REG_LEN_08BIT, 0x09}, + {0x30da, CRL_REG_LEN_08BIT, 0x64}, + {0x30dd, CRL_REG_LEN_08BIT, 0x00}, + {0x30de, CRL_REG_LEN_08BIT, 0x16}, + {0x30df, CRL_REG_LEN_08BIT, 0x00}, + {0x30e0, CRL_REG_LEN_08BIT, 0x17}, + {0x30e1, CRL_REG_LEN_08BIT, 0x00}, + {0x30e2, CRL_REG_LEN_08BIT, 0x18}, + {0x30e3, CRL_REG_LEN_08BIT, 0x10}, + {0x30e4, CRL_REG_LEN_08BIT, 0x04}, + {0x30e5, CRL_REG_LEN_08BIT, 0x00}, + {0x30e6, CRL_REG_LEN_08BIT, 0x00}, + {0x30e7, CRL_REG_LEN_08BIT, 0x00}, + {0x30e8, CRL_REG_LEN_08BIT, 0x00}, + {0x30e9, CRL_REG_LEN_08BIT, 0x00}, + {0x30ea, CRL_REG_LEN_08BIT, 0x00}, + {0x30eb, CRL_REG_LEN_08BIT, 0x00}, + {0x30ec, CRL_REG_LEN_08BIT, 0x00}, + {0x30ed, CRL_REG_LEN_08BIT, 0x00}, + {0x3101, CRL_REG_LEN_08BIT, 0x00}, + {0x3102, CRL_REG_LEN_08BIT, 0x00}, + {0x3103, CRL_REG_LEN_08BIT, 0x00}, + {0x3104, CRL_REG_LEN_08BIT, 0x00}, + {0x3105, CRL_REG_LEN_08BIT, 0x8c}, + {0x3106, CRL_REG_LEN_08BIT, 0x87}, + {0x3107, CRL_REG_LEN_08BIT, 0xc0}, + {0x3108, CRL_REG_LEN_08BIT, 0x9d}, + {0x3109, CRL_REG_LEN_08BIT, 0x8d}, + {0x310a, CRL_REG_LEN_08BIT, 0x8d}, + {0x310b, CRL_REG_LEN_08BIT, 0x6a}, + {0x310c, CRL_REG_LEN_08BIT, 0x3a}, + {0x310d, CRL_REG_LEN_08BIT, 0x5a}, + {0x310e, CRL_REG_LEN_08BIT, 0x00}, + {0x3120, CRL_REG_LEN_08BIT, 0x00}, + {0x3121, CRL_REG_LEN_08BIT, 0x00}, + {0x3122, CRL_REG_LEN_08BIT, 0x00}, + {0x3123, CRL_REG_LEN_08BIT, 0x00}, + {0x3124, CRL_REG_LEN_08BIT, 0x00}, + {0x3125, CRL_REG_LEN_08BIT, 0x70}, + {0x3126, CRL_REG_LEN_08BIT, 0x1f}, + {0x3127, CRL_REG_LEN_08BIT, 0x0f}, + {0x3128, CRL_REG_LEN_08BIT, 0x00}, + {0x3129, CRL_REG_LEN_08BIT, 0x3a}, + {0x312a, CRL_REG_LEN_08BIT, 0x02}, + {0x312b, CRL_REG_LEN_08BIT, 0x0f}, + {0x312c, CRL_REG_LEN_08BIT, 0x00}, + {0x312d, CRL_REG_LEN_08BIT, 0x0f}, + {0x312e, CRL_REG_LEN_08BIT, 0x1d}, + {0x312f, CRL_REG_LEN_08BIT, 0x00}, + {0x3130, CRL_REG_LEN_08BIT, 0x00}, + {0x3131, CRL_REG_LEN_08BIT, 0x00}, + {0x3132, CRL_REG_LEN_08BIT, 0x00}, + {0x3140, CRL_REG_LEN_08BIT, 0x0a}, + {0x3141, CRL_REG_LEN_08BIT, 0x03}, + {0x3142, CRL_REG_LEN_08BIT, 0x00}, + {0x3143, CRL_REG_LEN_08BIT, 0x00}, + {0x3144, CRL_REG_LEN_08BIT, 0x00}, + {0x3145, CRL_REG_LEN_08BIT, 0x00}, + {0x3146, CRL_REG_LEN_08BIT, 0x00}, + {0x3147, CRL_REG_LEN_08BIT, 0x00}, + {0x3148, CRL_REG_LEN_08BIT, 0x00}, + {0x3149, CRL_REG_LEN_08BIT, 0x00}, + {0x314a, CRL_REG_LEN_08BIT, 0x00}, + {0x314b, CRL_REG_LEN_08BIT, 0x00}, + {0x314c, CRL_REG_LEN_08BIT, 0x00}, + {0x314d, CRL_REG_LEN_08BIT, 0x00}, + {0x314e, CRL_REG_LEN_08BIT, 0x1c}, + {0x314f, CRL_REG_LEN_08BIT, 0xff}, + {0x3150, CRL_REG_LEN_08BIT, 0xff}, + {0x3151, CRL_REG_LEN_08BIT, 0xff}, + {0x3152, CRL_REG_LEN_08BIT, 0x10}, + {0x3153, CRL_REG_LEN_08BIT, 0x10}, + {0x3154, CRL_REG_LEN_08BIT, 0x10}, + {0x3155, CRL_REG_LEN_08BIT, 0x00}, + {0x3156, CRL_REG_LEN_08BIT, 0x03}, + {0x3157, CRL_REG_LEN_08BIT, 0x00}, + {0x3158, CRL_REG_LEN_08BIT, 0x0f}, + {0x3159, CRL_REG_LEN_08BIT, 0xff}, + {0x315a, CRL_REG_LEN_08BIT, 0x01}, + {0x315b, CRL_REG_LEN_08BIT, 0x00}, + {0x315c, CRL_REG_LEN_08BIT, 0x01}, + {0x315d, CRL_REG_LEN_08BIT, 0x00}, + {0x315e, CRL_REG_LEN_08BIT, 0x01}, + {0x315f, CRL_REG_LEN_08BIT, 0x00}, + {0x3160, CRL_REG_LEN_08BIT, 0x01}, + {0x3161, CRL_REG_LEN_08BIT, 0x00}, + {0x3162, CRL_REG_LEN_08BIT, 0x01}, + {0x3163, CRL_REG_LEN_08BIT, 0x00}, + {0x3164, CRL_REG_LEN_08BIT, 0x01}, + {0x3165, CRL_REG_LEN_08BIT, 0x00}, + {0x3190, CRL_REG_LEN_08BIT, 0x01}, + {0x3191, CRL_REG_LEN_08BIT, 0x99}, + {0x3193, CRL_REG_LEN_08BIT, 0x08}, + {0x3194, CRL_REG_LEN_08BIT, 0x13}, + {0x3195, CRL_REG_LEN_08BIT, 0x33}, + {0x3196, CRL_REG_LEN_08BIT, 0x00}, + {0x3197, CRL_REG_LEN_08BIT, 0x10}, + {0x3198, CRL_REG_LEN_08BIT, 0x00}, + {0x3199, CRL_REG_LEN_08BIT, 0x3f}, + {0x319a, CRL_REG_LEN_08BIT, 0x40}, + {0x319b, CRL_REG_LEN_08BIT, 0x7f}, + {0x319c, CRL_REG_LEN_08BIT, 0x80}, + {0x319d, CRL_REG_LEN_08BIT, 0xbf}, + {0x319e, CRL_REG_LEN_08BIT, 0xc0}, + {0x319f, CRL_REG_LEN_08BIT, 0xff}, + {0x31a0, CRL_REG_LEN_08BIT, 0x24}, + {0x31a1, CRL_REG_LEN_08BIT, 0x55}, + {0x31a2, CRL_REG_LEN_08BIT, 0x00}, + /* vfifo manual override */ + {0x31a3, CRL_REG_LEN_08BIT, 0x08}, + {0x31a6, CRL_REG_LEN_08BIT, 0x00}, + {0x31a7, CRL_REG_LEN_08BIT, 0x00}, + {0x31b0, CRL_REG_LEN_08BIT, 0x00}, + {0x31b1, CRL_REG_LEN_08BIT, 0x00}, + {0x31b2, CRL_REG_LEN_08BIT, 0x02}, + {0x31b3, CRL_REG_LEN_08BIT, 0x00}, + {0x31b4, CRL_REG_LEN_08BIT, 0x00}, + {0x31b5, CRL_REG_LEN_08BIT, 0x01}, + {0x31b6, CRL_REG_LEN_08BIT, 0x00}, + {0x31b7, CRL_REG_LEN_08BIT, 0x00}, + {0x31b8, CRL_REG_LEN_08BIT, 0x00}, + {0x31b9, CRL_REG_LEN_08BIT, 0x00}, + {0x31ba, CRL_REG_LEN_08BIT, 0x00}, + {0x31d0, CRL_REG_LEN_08BIT, 0x3c}, + {0x31d1, CRL_REG_LEN_08BIT, 0x34}, + {0x31d2, CRL_REG_LEN_08BIT, 0x3c}, + {0x31d3, CRL_REG_LEN_08BIT, 0x00}, + {0x31d4, CRL_REG_LEN_08BIT, 0x2d}, + {0x31d5, CRL_REG_LEN_08BIT, 0x00}, + {0x31d6, CRL_REG_LEN_08BIT, 0x01}, + {0x31d7, CRL_REG_LEN_08BIT, 0x06}, + {0x31d8, CRL_REG_LEN_08BIT, 0x00}, + {0x31d9, CRL_REG_LEN_08BIT, 0x64}, + {0x31da, CRL_REG_LEN_08BIT, 0x00}, + {0x31db, CRL_REG_LEN_08BIT, 0x30}, + {0x31dc, CRL_REG_LEN_08BIT, 0x04}, + {0x31dd, CRL_REG_LEN_08BIT, 0x69}, + {0x31de, CRL_REG_LEN_08BIT, 0x0a}, + {0x31df, CRL_REG_LEN_08BIT, 0x3c}, + {0x31e0, CRL_REG_LEN_08BIT, 0x04}, + {0x31e1, CRL_REG_LEN_08BIT, 0x32}, + {0x31e2, CRL_REG_LEN_08BIT, 0x00}, + {0x31e3, CRL_REG_LEN_08BIT, 0x00}, + {0x31e4, CRL_REG_LEN_08BIT, 0x08}, + {0x31e5, CRL_REG_LEN_08BIT, 0x80}, + {0x31e6, CRL_REG_LEN_08BIT, 0x00}, + /* MIPI data type, 0x31e7-0x31eb */ + {0x31e7, CRL_REG_LEN_08BIT, 0x2c}, + {0x31e8, CRL_REG_LEN_08BIT, 0x6c}, + {0x31e9, CRL_REG_LEN_08BIT, 0xac}, + {0x31ea, CRL_REG_LEN_08BIT, 0xec}, + {0x31eb, CRL_REG_LEN_08BIT, 0x3f}, + {0x31ec, CRL_REG_LEN_08BIT, 0x0f}, + {0x31ed, CRL_REG_LEN_08BIT, 0x20}, + {0x31ee, CRL_REG_LEN_08BIT, 0x04}, + {0x31ef, CRL_REG_LEN_08BIT, 0x48}, + {0x31f0, CRL_REG_LEN_08BIT, 0x07}, + {0x31f1, CRL_REG_LEN_08BIT, 0x90}, + {0x31f2, CRL_REG_LEN_08BIT, 0x04}, + {0x31f3, CRL_REG_LEN_08BIT, 0x48}, + {0x31f4, CRL_REG_LEN_08BIT, 0x07}, + {0x31f5, CRL_REG_LEN_08BIT, 0x90}, + {0x31f6, CRL_REG_LEN_08BIT, 0x04}, + {0x31f7, CRL_REG_LEN_08BIT, 0x48}, + {0x31f8, CRL_REG_LEN_08BIT, 0x07}, + {0x31f9, CRL_REG_LEN_08BIT, 0x90}, + {0x31fa, CRL_REG_LEN_08BIT, 0x04}, + {0x31fb, CRL_REG_LEN_08BIT, 0x48}, + {0x31fd, CRL_REG_LEN_08BIT, 0xcb}, + {0x31fe, CRL_REG_LEN_08BIT, 0x01}, + {0x31ff, CRL_REG_LEN_08BIT, 0x03}, + {0x3200, CRL_REG_LEN_08BIT, 0x00}, + {0x3201, CRL_REG_LEN_08BIT, 0xff}, + {0x3202, CRL_REG_LEN_08BIT, 0x00}, + {0x3203, CRL_REG_LEN_08BIT, 0xff}, + {0x3204, CRL_REG_LEN_08BIT, 0xff}, + {0x3205, CRL_REG_LEN_08BIT, 0xff}, + {0x3206, CRL_REG_LEN_08BIT, 0xff}, + {0x3207, CRL_REG_LEN_08BIT, 0xff}, + {0x3208, CRL_REG_LEN_08BIT, 0xff}, + {0x3209, CRL_REG_LEN_08BIT, 0xff}, + {0x320a, CRL_REG_LEN_08BIT, 0xff}, + {0x320b, CRL_REG_LEN_08BIT, 0x1b}, + {0x320c, CRL_REG_LEN_08BIT, 0x1f}, + {0x320d, CRL_REG_LEN_08BIT, 0x1e}, + {0x320e, CRL_REG_LEN_08BIT, 0x30}, + {0x320f, CRL_REG_LEN_08BIT, 0x2d}, + {0x3210, CRL_REG_LEN_08BIT, 0x2c}, + {0x3211, CRL_REG_LEN_08BIT, 0x2b}, + {0x3212, CRL_REG_LEN_08BIT, 0x2a}, + {0x3213, CRL_REG_LEN_08BIT, 0x24}, + {0x3214, CRL_REG_LEN_08BIT, 0x22}, + {0x3215, CRL_REG_LEN_08BIT, 0x00}, + {0x3216, CRL_REG_LEN_08BIT, 0x04}, + /* mipi data tag, 0x3217-0x321a */ + {0x3217, CRL_REG_LEN_08BIT, 0x2c}, + {0x3218, CRL_REG_LEN_08BIT, 0x6c}, + {0x3219, CRL_REG_LEN_08BIT, 0xac}, + {0x321a, CRL_REG_LEN_08BIT, 0xec}, + {0x321b, CRL_REG_LEN_08BIT, 0x00}, + {0x3230, CRL_REG_LEN_08BIT, 0x3a}, + {0x3231, CRL_REG_LEN_08BIT, 0x00}, + {0x3232, CRL_REG_LEN_08BIT, 0x80}, + {0x3233, CRL_REG_LEN_08BIT, 0x00}, + {0x3234, CRL_REG_LEN_08BIT, 0x10}, + {0x3235, CRL_REG_LEN_08BIT, 0xaa}, + {0x3236, CRL_REG_LEN_08BIT, 0x55}, + {0x3237, CRL_REG_LEN_08BIT, 0x99}, + {0x3238, CRL_REG_LEN_08BIT, 0x66}, + {0x3239, CRL_REG_LEN_08BIT, 0x08}, + {0x323a, CRL_REG_LEN_08BIT, 0x88}, + {0x323b, CRL_REG_LEN_08BIT, 0x00}, + {0x323c, CRL_REG_LEN_08BIT, 0x00}, + {0x323d, CRL_REG_LEN_08BIT, 0x03}, + {0x3250, CRL_REG_LEN_08BIT, 0x33}, + {0x3251, CRL_REG_LEN_08BIT, 0x00}, + {0x3252, CRL_REG_LEN_08BIT, 0x20}, + {0x3253, CRL_REG_LEN_08BIT, 0x00}, + {0x3254, CRL_REG_LEN_08BIT, 0x11}, + {0x3255, CRL_REG_LEN_08BIT, 0x01}, + {0x3256, CRL_REG_LEN_08BIT, 0x00}, + {0x3257, CRL_REG_LEN_08BIT, 0x00}, + {0x3258, CRL_REG_LEN_08BIT, 0x00}, + {0x3270, CRL_REG_LEN_08BIT, 0x01}, + {0x3271, CRL_REG_LEN_08BIT, 0xc0}, + {0x3272, CRL_REG_LEN_08BIT, 0xf0}, + {0x3273, CRL_REG_LEN_08BIT, 0x01}, + {0x3274, CRL_REG_LEN_08BIT, 0x00}, + {0x3275, CRL_REG_LEN_08BIT, 0x40}, + {0x3276, CRL_REG_LEN_08BIT, 0x02}, + {0x3277, CRL_REG_LEN_08BIT, 0x08}, + {0x3278, CRL_REG_LEN_08BIT, 0x10}, + {0x3279, CRL_REG_LEN_08BIT, 0x04}, + {0x327a, CRL_REG_LEN_08BIT, 0x00}, + {0x327b, CRL_REG_LEN_08BIT, 0x03}, + {0x327c, CRL_REG_LEN_08BIT, 0x10}, + {0x327d, CRL_REG_LEN_08BIT, 0x60}, + {0x327e, CRL_REG_LEN_08BIT, 0xc0}, + {0x327f, CRL_REG_LEN_08BIT, 0x06}, + {0x3288, CRL_REG_LEN_08BIT, 0x10}, + {0x3289, CRL_REG_LEN_08BIT, 0x00}, + {0x328a, CRL_REG_LEN_08BIT, 0x08}, + {0x328b, CRL_REG_LEN_08BIT, 0x00}, + {0x328c, CRL_REG_LEN_08BIT, 0x04}, + {0x328d, CRL_REG_LEN_08BIT, 0x00}, + {0x328e, CRL_REG_LEN_08BIT, 0x02}, + {0x328f, CRL_REG_LEN_08BIT, 0x00}, + {0x3290, CRL_REG_LEN_08BIT, 0x20}, + {0x3291, CRL_REG_LEN_08BIT, 0x00}, + {0x3292, CRL_REG_LEN_08BIT, 0x10}, + {0x3293, CRL_REG_LEN_08BIT, 0x00}, + {0x3294, CRL_REG_LEN_08BIT, 0x08}, + {0x3295, CRL_REG_LEN_08BIT, 0x00}, + {0x3296, CRL_REG_LEN_08BIT, 0x04}, + {0x3297, CRL_REG_LEN_08BIT, 0x00}, + {0x3298, CRL_REG_LEN_08BIT, 0x40}, + {0x3299, CRL_REG_LEN_08BIT, 0x00}, + {0x329a, CRL_REG_LEN_08BIT, 0x20}, + {0x329b, CRL_REG_LEN_08BIT, 0x00}, + {0x329c, CRL_REG_LEN_08BIT, 0x10}, + {0x329d, CRL_REG_LEN_08BIT, 0x00}, + {0x329e, CRL_REG_LEN_08BIT, 0x08}, + {0x329f, CRL_REG_LEN_08BIT, 0x00}, + {0x32a0, CRL_REG_LEN_08BIT, 0x7f}, + {0x32a1, CRL_REG_LEN_08BIT, 0xff}, + {0x32a2, CRL_REG_LEN_08BIT, 0x40}, + {0x32a3, CRL_REG_LEN_08BIT, 0x00}, + {0x32a4, CRL_REG_LEN_08BIT, 0x20}, + {0x32a5, CRL_REG_LEN_08BIT, 0x00}, + {0x32a6, CRL_REG_LEN_08BIT, 0x10}, + {0x32a7, CRL_REG_LEN_08BIT, 0x00}, + {0x32a8, CRL_REG_LEN_08BIT, 0x00}, + {0x32a9, CRL_REG_LEN_08BIT, 0x00}, + {0x32aa, CRL_REG_LEN_08BIT, 0x00}, + {0x32ab, CRL_REG_LEN_08BIT, 0x00}, + {0x32ac, CRL_REG_LEN_08BIT, 0x00}, + {0x32ad, CRL_REG_LEN_08BIT, 0x00}, + {0x32ae, CRL_REG_LEN_08BIT, 0x00}, + {0x32af, CRL_REG_LEN_08BIT, 0x00}, + {0x32b0, CRL_REG_LEN_08BIT, 0x00}, + {0x32b1, CRL_REG_LEN_08BIT, 0x00}, + {0x32b2, CRL_REG_LEN_08BIT, 0x00}, + {0x32b3, CRL_REG_LEN_08BIT, 0x00}, + {0x32b4, CRL_REG_LEN_08BIT, 0x00}, + {0x32b5, CRL_REG_LEN_08BIT, 0x00}, + {0x32b6, CRL_REG_LEN_08BIT, 0x00}, + {0x32b7, CRL_REG_LEN_08BIT, 0x00}, + {0x32b8, CRL_REG_LEN_08BIT, 0x00}, + {0x32b9, CRL_REG_LEN_08BIT, 0x00}, + {0x32ba, CRL_REG_LEN_08BIT, 0x00}, + {0x32bb, CRL_REG_LEN_08BIT, 0x00}, + {0x32bc, CRL_REG_LEN_08BIT, 0x00}, + {0x32bd, CRL_REG_LEN_08BIT, 0x00}, + {0x32be, CRL_REG_LEN_08BIT, 0x00}, + {0x32bf, CRL_REG_LEN_08BIT, 0x00}, + {0x32c0, CRL_REG_LEN_08BIT, 0x00}, + {0x32c1, CRL_REG_LEN_08BIT, 0x00}, + {0x32c2, CRL_REG_LEN_08BIT, 0x00}, + {0x32c3, CRL_REG_LEN_08BIT, 0x00}, + {0x32c4, CRL_REG_LEN_08BIT, 0x00}, + {0x32c5, CRL_REG_LEN_08BIT, 0x00}, + {0x32c6, CRL_REG_LEN_08BIT, 0x00}, + {0x32c7, CRL_REG_LEN_08BIT, 0x00}, + {0x32c8, CRL_REG_LEN_08BIT, 0x87}, + {0x32c9, CRL_REG_LEN_08BIT, 0x00}, + {0x3330, CRL_REG_LEN_08BIT, 0x03}, + {0x3331, CRL_REG_LEN_08BIT, 0xc8}, + {0x3332, CRL_REG_LEN_08BIT, 0x02}, + {0x3333, CRL_REG_LEN_08BIT, 0x24}, + {0x3334, CRL_REG_LEN_08BIT, 0x00}, + {0x3335, CRL_REG_LEN_08BIT, 0x00}, + {0x3336, CRL_REG_LEN_08BIT, 0x00}, + {0x3337, CRL_REG_LEN_08BIT, 0x00}, + {0x3338, CRL_REG_LEN_08BIT, 0x03}, + {0x3339, CRL_REG_LEN_08BIT, 0xc8}, + {0x333a, CRL_REG_LEN_08BIT, 0x02}, + {0x333b, CRL_REG_LEN_08BIT, 0x24}, + {0x333c, CRL_REG_LEN_08BIT, 0x00}, + {0x333d, CRL_REG_LEN_08BIT, 0x00}, + {0x333e, CRL_REG_LEN_08BIT, 0x00}, + {0x333f, CRL_REG_LEN_08BIT, 0x00}, + {0x3340, CRL_REG_LEN_08BIT, 0x03}, + {0x3341, CRL_REG_LEN_08BIT, 0xc8}, + {0x3342, CRL_REG_LEN_08BIT, 0x02}, + {0x3343, CRL_REG_LEN_08BIT, 0x24}, + {0x3344, CRL_REG_LEN_08BIT, 0x00}, + {0x3345, CRL_REG_LEN_08BIT, 0x00}, + {0x3346, CRL_REG_LEN_08BIT, 0x00}, + {0x3347, CRL_REG_LEN_08BIT, 0x00}, + {0x3348, CRL_REG_LEN_08BIT, 0x40}, + {0x3349, CRL_REG_LEN_08BIT, 0x00}, + {0x334a, CRL_REG_LEN_08BIT, 0x00}, + {0x334b, CRL_REG_LEN_08BIT, 0x00}, + {0x334c, CRL_REG_LEN_08BIT, 0x00}, + {0x334d, CRL_REG_LEN_08BIT, 0x00}, + {0x334e, CRL_REG_LEN_08BIT, 0x80}, + {0x3360, CRL_REG_LEN_08BIT, 0x01}, + {0x3361, CRL_REG_LEN_08BIT, 0x00}, + {0x3362, CRL_REG_LEN_08BIT, 0x01}, + {0x3363, CRL_REG_LEN_08BIT, 0x00}, + {0x3364, CRL_REG_LEN_08BIT, 0x01}, + {0x3365, CRL_REG_LEN_08BIT, 0x00}, + {0x3366, CRL_REG_LEN_08BIT, 0x01}, + {0x3367, CRL_REG_LEN_08BIT, 0x00}, + {0x3368, CRL_REG_LEN_08BIT, 0x01}, + {0x3369, CRL_REG_LEN_08BIT, 0x00}, + {0x336a, CRL_REG_LEN_08BIT, 0x01}, + {0x336b, CRL_REG_LEN_08BIT, 0x00}, + {0x336c, CRL_REG_LEN_08BIT, 0x01}, + {0x336d, CRL_REG_LEN_08BIT, 0x00}, + {0x336e, CRL_REG_LEN_08BIT, 0x01}, + {0x336f, CRL_REG_LEN_08BIT, 0x00}, + {0x3370, CRL_REG_LEN_08BIT, 0x01}, + {0x3371, CRL_REG_LEN_08BIT, 0x00}, + {0x3372, CRL_REG_LEN_08BIT, 0x01}, + {0x3373, CRL_REG_LEN_08BIT, 0x00}, + {0x3374, CRL_REG_LEN_08BIT, 0x01}, + {0x3375, CRL_REG_LEN_08BIT, 0x00}, + {0x3376, CRL_REG_LEN_08BIT, 0x01}, + {0x3377, CRL_REG_LEN_08BIT, 0x00}, + {0x3378, CRL_REG_LEN_08BIT, 0x00}, + {0x3379, CRL_REG_LEN_08BIT, 0x00}, + {0x337a, CRL_REG_LEN_08BIT, 0x00}, + {0x337b, CRL_REG_LEN_08BIT, 0x00}, + {0x337c, CRL_REG_LEN_08BIT, 0x00}, + {0x337d, CRL_REG_LEN_08BIT, 0x00}, + {0x337e, CRL_REG_LEN_08BIT, 0x00}, + {0x337f, CRL_REG_LEN_08BIT, 0x00}, + {0x3380, CRL_REG_LEN_08BIT, 0x00}, + {0x3381, CRL_REG_LEN_08BIT, 0x00}, + {0x3382, CRL_REG_LEN_08BIT, 0x00}, + {0x3383, CRL_REG_LEN_08BIT, 0x00}, + {0x3384, CRL_REG_LEN_08BIT, 0x00}, + {0x3385, CRL_REG_LEN_08BIT, 0x00}, + {0x3386, CRL_REG_LEN_08BIT, 0x00}, + {0x3387, CRL_REG_LEN_08BIT, 0x00}, + {0x3388, CRL_REG_LEN_08BIT, 0x00}, + {0x3389, CRL_REG_LEN_08BIT, 0x00}, + {0x338a, CRL_REG_LEN_08BIT, 0x00}, + {0x338b, CRL_REG_LEN_08BIT, 0x00}, + {0x338c, CRL_REG_LEN_08BIT, 0x00}, + {0x338d, CRL_REG_LEN_08BIT, 0x00}, + {0x338e, CRL_REG_LEN_08BIT, 0x00}, + {0x338f, CRL_REG_LEN_08BIT, 0x00}, + {0x3390, CRL_REG_LEN_08BIT, 0x00}, + {0x3391, CRL_REG_LEN_08BIT, 0x00}, + {0x3392, CRL_REG_LEN_08BIT, 0x00}, + {0x3393, CRL_REG_LEN_08BIT, 0x00}, + {0x3394, CRL_REG_LEN_08BIT, 0x00}, + {0x3395, CRL_REG_LEN_08BIT, 0x00}, + {0x3396, CRL_REG_LEN_08BIT, 0x00}, + {0x3397, CRL_REG_LEN_08BIT, 0x00}, + {0x3398, CRL_REG_LEN_08BIT, 0x00}, + {0x3399, CRL_REG_LEN_08BIT, 0x00}, + {0x339a, CRL_REG_LEN_08BIT, 0x00}, + {0x339b, CRL_REG_LEN_08BIT, 0x00}, + {0x33b0, CRL_REG_LEN_08BIT, 0x00}, + {0x33b1, CRL_REG_LEN_08BIT, 0x50}, + {0x33b2, CRL_REG_LEN_08BIT, 0x01}, + {0x33b3, CRL_REG_LEN_08BIT, 0xff}, + {0x33b4, CRL_REG_LEN_08BIT, 0xe0}, + {0x33b5, CRL_REG_LEN_08BIT, 0x6b}, + {0x33b6, CRL_REG_LEN_08BIT, 0x00}, + {0x33b7, CRL_REG_LEN_08BIT, 0x00}, + {0x33b8, CRL_REG_LEN_08BIT, 0x00}, + {0x33b9, CRL_REG_LEN_08BIT, 0x00}, + {0x33ba, CRL_REG_LEN_08BIT, 0x00}, + {0x33bb, CRL_REG_LEN_08BIT, 0x1f}, + {0x33bc, CRL_REG_LEN_08BIT, 0x01}, + {0x33bd, CRL_REG_LEN_08BIT, 0x01}, + {0x33be, CRL_REG_LEN_08BIT, 0x01}, + {0x33bf, CRL_REG_LEN_08BIT, 0x01}, + {0x33c0, CRL_REG_LEN_08BIT, 0x00}, + {0x33c1, CRL_REG_LEN_08BIT, 0x00}, + {0x33c2, CRL_REG_LEN_08BIT, 0x00}, + {0x33c3, CRL_REG_LEN_08BIT, 0x00}, + {0x33e0, CRL_REG_LEN_08BIT, 0x14}, + {0x33e1, CRL_REG_LEN_08BIT, 0x0f}, + {0x33e2, CRL_REG_LEN_08BIT, 0x04}, + {0x33e3, CRL_REG_LEN_08BIT, 0x02}, + {0x33e4, CRL_REG_LEN_08BIT, 0x01}, + {0x33e5, CRL_REG_LEN_08BIT, 0x01}, + {0x33e6, CRL_REG_LEN_08BIT, 0x00}, + {0x33e7, CRL_REG_LEN_08BIT, 0x04}, + {0x33e8, CRL_REG_LEN_08BIT, 0x0c}, + {0x33e9, CRL_REG_LEN_08BIT, 0x02}, + {0x33ea, CRL_REG_LEN_08BIT, 0x02}, + {0x33eb, CRL_REG_LEN_08BIT, 0x02}, + {0x33ec, CRL_REG_LEN_08BIT, 0x03}, + {0x33ed, CRL_REG_LEN_08BIT, 0x02}, + {0x33ee, CRL_REG_LEN_08BIT, 0x05}, + {0x33ef, CRL_REG_LEN_08BIT, 0x0a}, + {0x33f0, CRL_REG_LEN_08BIT, 0x08}, + {0x33f1, CRL_REG_LEN_08BIT, 0x04}, + {0x33f2, CRL_REG_LEN_08BIT, 0x04}, + {0x33f3, CRL_REG_LEN_08BIT, 0x00}, + {0x33f4, CRL_REG_LEN_08BIT, 0x03}, + {0x33f5, CRL_REG_LEN_08BIT, 0x14}, + {0x33f6, CRL_REG_LEN_08BIT, 0x0f}, + {0x33f7, CRL_REG_LEN_08BIT, 0x02}, + {0x33f8, CRL_REG_LEN_08BIT, 0x01}, + {0x33f9, CRL_REG_LEN_08BIT, 0x01}, + {0x33fa, CRL_REG_LEN_08BIT, 0x01}, + {0x33fb, CRL_REG_LEN_08BIT, 0x00}, + {0x33fc, CRL_REG_LEN_08BIT, 0x04}, + {0x33fd, CRL_REG_LEN_08BIT, 0x0c}, + {0x33fe, CRL_REG_LEN_08BIT, 0x02}, + {0x33ff, CRL_REG_LEN_08BIT, 0x02}, + {0x3400, CRL_REG_LEN_08BIT, 0x02}, + {0x3401, CRL_REG_LEN_08BIT, 0x03}, + {0x3402, CRL_REG_LEN_08BIT, 0x01}, + {0x3403, CRL_REG_LEN_08BIT, 0x02}, + {0x3404, CRL_REG_LEN_08BIT, 0x08}, + {0x3405, CRL_REG_LEN_08BIT, 0x08}, + {0x3406, CRL_REG_LEN_08BIT, 0x04}, + {0x3407, CRL_REG_LEN_08BIT, 0x04}, + {0x3408, CRL_REG_LEN_08BIT, 0x00}, + {0x3409, CRL_REG_LEN_08BIT, 0x03}, + {0x340a, CRL_REG_LEN_08BIT, 0x14}, + {0x340b, CRL_REG_LEN_08BIT, 0x0f}, + {0x340c, CRL_REG_LEN_08BIT, 0x04}, + {0x340d, CRL_REG_LEN_08BIT, 0x02}, + {0x340e, CRL_REG_LEN_08BIT, 0x01}, + {0x340f, CRL_REG_LEN_08BIT, 0x01}, + {0x3410, CRL_REG_LEN_08BIT, 0x00}, + {0x3411, CRL_REG_LEN_08BIT, 0x04}, + {0x3412, CRL_REG_LEN_08BIT, 0x0c}, + {0x3413, CRL_REG_LEN_08BIT, 0x02}, + {0x3414, CRL_REG_LEN_08BIT, 0x02}, + {0x3415, CRL_REG_LEN_08BIT, 0x02}, + {0x3416, CRL_REG_LEN_08BIT, 0x03}, + {0x3417, CRL_REG_LEN_08BIT, 0x02}, + {0x3418, CRL_REG_LEN_08BIT, 0x05}, + {0x3419, CRL_REG_LEN_08BIT, 0x0a}, + {0x341a, CRL_REG_LEN_08BIT, 0x08}, + {0x341b, CRL_REG_LEN_08BIT, 0x04}, + {0x341c, CRL_REG_LEN_08BIT, 0x04}, + {0x341d, CRL_REG_LEN_08BIT, 0x00}, + {0x341e, CRL_REG_LEN_08BIT, 0x03}, + {0x3440, CRL_REG_LEN_08BIT, 0x00}, + {0x3441, CRL_REG_LEN_08BIT, 0x00}, + {0x3442, CRL_REG_LEN_08BIT, 0x00}, + {0x3443, CRL_REG_LEN_08BIT, 0x00}, + {0x3444, CRL_REG_LEN_08BIT, 0x02}, + {0x3445, CRL_REG_LEN_08BIT, 0xf0}, + {0x3446, CRL_REG_LEN_08BIT, 0x02}, + {0x3447, CRL_REG_LEN_08BIT, 0x08}, + {0x3448, CRL_REG_LEN_08BIT, 0x00}, + {0x3460, CRL_REG_LEN_08BIT, 0x40}, + {0x3461, CRL_REG_LEN_08BIT, 0x40}, + {0x3462, CRL_REG_LEN_08BIT, 0x40}, + {0x3463, CRL_REG_LEN_08BIT, 0x40}, + {0x3464, CRL_REG_LEN_08BIT, 0x03}, + {0x3465, CRL_REG_LEN_08BIT, 0x01}, + {0x3466, CRL_REG_LEN_08BIT, 0x01}, + {0x3467, CRL_REG_LEN_08BIT, 0x02}, + {0x3468, CRL_REG_LEN_08BIT, 0x30}, + {0x3469, CRL_REG_LEN_08BIT, 0x00}, + {0x346a, CRL_REG_LEN_08BIT, 0x35}, + {0x346b, CRL_REG_LEN_08BIT, 0x00}, + {0x3480, CRL_REG_LEN_08BIT, 0x40}, + {0x3481, CRL_REG_LEN_08BIT, 0x00}, + {0x3482, CRL_REG_LEN_08BIT, 0x00}, + {0x3483, CRL_REG_LEN_08BIT, 0x00}, + {0x3484, CRL_REG_LEN_08BIT, 0x0d}, + {0x3485, CRL_REG_LEN_08BIT, 0x00}, + {0x3486, CRL_REG_LEN_08BIT, 0x00}, + {0x3487, CRL_REG_LEN_08BIT, 0x00}, + {0x3488, CRL_REG_LEN_08BIT, 0x00}, + {0x3489, CRL_REG_LEN_08BIT, 0x00}, + {0x348a, CRL_REG_LEN_08BIT, 0x00}, + {0x348b, CRL_REG_LEN_08BIT, 0x04}, + {0x348c, CRL_REG_LEN_08BIT, 0x00}, + {0x348d, CRL_REG_LEN_08BIT, 0x01}, + {0x348f, CRL_REG_LEN_08BIT, 0x01}, + {0x3030, CRL_REG_LEN_08BIT, 0x0a}, + {0x3030, CRL_REG_LEN_08BIT, 0x02}, + {0x7000, CRL_REG_LEN_08BIT, 0x58}, + {0x7001, CRL_REG_LEN_08BIT, 0x7a}, + {0x7002, CRL_REG_LEN_08BIT, 0x1a}, + {0x7003, CRL_REG_LEN_08BIT, 0xc1}, + {0x7004, CRL_REG_LEN_08BIT, 0x03}, + {0x7005, CRL_REG_LEN_08BIT, 0xda}, + {0x7006, CRL_REG_LEN_08BIT, 0xbd}, + {0x7007, CRL_REG_LEN_08BIT, 0x03}, + {0x7008, CRL_REG_LEN_08BIT, 0xbd}, + {0x7009, CRL_REG_LEN_08BIT, 0x06}, + {0x700a, CRL_REG_LEN_08BIT, 0xe6}, + {0x700b, CRL_REG_LEN_08BIT, 0xec}, + {0x700c, CRL_REG_LEN_08BIT, 0xbc}, + {0x700d, CRL_REG_LEN_08BIT, 0xff}, + {0x700e, CRL_REG_LEN_08BIT, 0xbc}, + {0x700f, CRL_REG_LEN_08BIT, 0x73}, + {0x7010, CRL_REG_LEN_08BIT, 0xda}, + {0x7011, CRL_REG_LEN_08BIT, 0x72}, + {0x7012, CRL_REG_LEN_08BIT, 0x76}, + {0x7013, CRL_REG_LEN_08BIT, 0xb6}, + {0x7014, CRL_REG_LEN_08BIT, 0xee}, + {0x7015, CRL_REG_LEN_08BIT, 0xcf}, + {0x7016, CRL_REG_LEN_08BIT, 0xac}, + {0x7017, CRL_REG_LEN_08BIT, 0xd0}, + {0x7018, CRL_REG_LEN_08BIT, 0xac}, + {0x7019, CRL_REG_LEN_08BIT, 0xd1}, + {0x701a, CRL_REG_LEN_08BIT, 0x50}, + {0x701b, CRL_REG_LEN_08BIT, 0xac}, + {0x701c, CRL_REG_LEN_08BIT, 0xd2}, + {0x701d, CRL_REG_LEN_08BIT, 0xbc}, + {0x701e, CRL_REG_LEN_08BIT, 0x2e}, + {0x701f, CRL_REG_LEN_08BIT, 0xb4}, + {0x7020, CRL_REG_LEN_08BIT, 0x00}, + {0x7021, CRL_REG_LEN_08BIT, 0xdc}, + {0x7022, CRL_REG_LEN_08BIT, 0xdf}, + {0x7023, CRL_REG_LEN_08BIT, 0xb0}, + {0x7024, CRL_REG_LEN_08BIT, 0x6e}, + {0x7025, CRL_REG_LEN_08BIT, 0xbd}, + {0x7026, CRL_REG_LEN_08BIT, 0x01}, + {0x7027, CRL_REG_LEN_08BIT, 0xd7}, + {0x7028, CRL_REG_LEN_08BIT, 0xed}, + {0x7029, CRL_REG_LEN_08BIT, 0xe1}, + {0x702a, CRL_REG_LEN_08BIT, 0x36}, + {0x702b, CRL_REG_LEN_08BIT, 0x30}, + {0x702c, CRL_REG_LEN_08BIT, 0xd3}, + {0x702d, CRL_REG_LEN_08BIT, 0x2e}, + {0x702e, CRL_REG_LEN_08BIT, 0x54}, + {0x702f, CRL_REG_LEN_08BIT, 0x46}, + {0x7030, CRL_REG_LEN_08BIT, 0xbc}, + {0x7031, CRL_REG_LEN_08BIT, 0x22}, + {0x7032, CRL_REG_LEN_08BIT, 0x66}, + {0x7033, CRL_REG_LEN_08BIT, 0xbc}, + {0x7034, CRL_REG_LEN_08BIT, 0x24}, + {0x7035, CRL_REG_LEN_08BIT, 0x2c}, + {0x7036, CRL_REG_LEN_08BIT, 0x28}, + {0x7037, CRL_REG_LEN_08BIT, 0xbc}, + {0x7038, CRL_REG_LEN_08BIT, 0x3c}, + {0x7039, CRL_REG_LEN_08BIT, 0xa1}, + {0x703a, CRL_REG_LEN_08BIT, 0xac}, + {0x703b, CRL_REG_LEN_08BIT, 0xd8}, + {0x703c, CRL_REG_LEN_08BIT, 0xd6}, + {0x703d, CRL_REG_LEN_08BIT, 0xb4}, + {0x703e, CRL_REG_LEN_08BIT, 0x04}, + {0x703f, CRL_REG_LEN_08BIT, 0x46}, + {0x7040, CRL_REG_LEN_08BIT, 0xb7}, + {0x7041, CRL_REG_LEN_08BIT, 0x04}, + {0x7042, CRL_REG_LEN_08BIT, 0xbe}, + {0x7043, CRL_REG_LEN_08BIT, 0x08}, + {0x7044, CRL_REG_LEN_08BIT, 0xc3}, + {0x7045, CRL_REG_LEN_08BIT, 0xd9}, + {0x7046, CRL_REG_LEN_08BIT, 0xad}, + {0x7047, CRL_REG_LEN_08BIT, 0xc3}, + {0x7048, CRL_REG_LEN_08BIT, 0xbc}, + {0x7049, CRL_REG_LEN_08BIT, 0x19}, + {0x704a, CRL_REG_LEN_08BIT, 0xc1}, + {0x704b, CRL_REG_LEN_08BIT, 0x27}, + {0x704c, CRL_REG_LEN_08BIT, 0xe7}, + {0x704d, CRL_REG_LEN_08BIT, 0x00}, + {0x704e, CRL_REG_LEN_08BIT, 0x50}, + {0x704f, CRL_REG_LEN_08BIT, 0x20}, + {0x7050, CRL_REG_LEN_08BIT, 0xb8}, + {0x7051, CRL_REG_LEN_08BIT, 0x02}, + {0x7052, CRL_REG_LEN_08BIT, 0xbc}, + {0x7053, CRL_REG_LEN_08BIT, 0x17}, + {0x7054, CRL_REG_LEN_08BIT, 0xdb}, + {0x7055, CRL_REG_LEN_08BIT, 0xc7}, + {0x7056, CRL_REG_LEN_08BIT, 0xb8}, + {0x7057, CRL_REG_LEN_08BIT, 0x00}, + {0x7058, CRL_REG_LEN_08BIT, 0x28}, + {0x7059, CRL_REG_LEN_08BIT, 0x54}, + {0x705a, CRL_REG_LEN_08BIT, 0xb4}, + {0x705b, CRL_REG_LEN_08BIT, 0x14}, + {0x705c, CRL_REG_LEN_08BIT, 0xab}, + {0x705d, CRL_REG_LEN_08BIT, 0xbe}, + {0x705e, CRL_REG_LEN_08BIT, 0x06}, + {0x705f, CRL_REG_LEN_08BIT, 0xd8}, + {0x7060, CRL_REG_LEN_08BIT, 0xd6}, + {0x7061, CRL_REG_LEN_08BIT, 0x00}, + {0x7062, CRL_REG_LEN_08BIT, 0xb4}, + {0x7063, CRL_REG_LEN_08BIT, 0xc7}, + {0x7064, CRL_REG_LEN_08BIT, 0x07}, + {0x7065, CRL_REG_LEN_08BIT, 0xb9}, + {0x7066, CRL_REG_LEN_08BIT, 0x05}, + {0x7067, CRL_REG_LEN_08BIT, 0xee}, + {0x7068, CRL_REG_LEN_08BIT, 0xe6}, + {0x7069, CRL_REG_LEN_08BIT, 0xad}, + {0x706a, CRL_REG_LEN_08BIT, 0xb4}, + {0x706b, CRL_REG_LEN_08BIT, 0x26}, + {0x706c, CRL_REG_LEN_08BIT, 0x19}, + {0x706d, CRL_REG_LEN_08BIT, 0xc1}, + {0x706e, CRL_REG_LEN_08BIT, 0x3a}, + {0x706f, CRL_REG_LEN_08BIT, 0xc3}, + {0x7070, CRL_REG_LEN_08BIT, 0xaf}, + {0x7071, CRL_REG_LEN_08BIT, 0x00}, + {0x7072, CRL_REG_LEN_08BIT, 0xc0}, + {0x7073, CRL_REG_LEN_08BIT, 0x3c}, + {0x7074, CRL_REG_LEN_08BIT, 0xc3}, + {0x7075, CRL_REG_LEN_08BIT, 0xbe}, + {0x7076, CRL_REG_LEN_08BIT, 0xe7}, + {0x7077, CRL_REG_LEN_08BIT, 0x00}, + {0x7078, CRL_REG_LEN_08BIT, 0x15}, + {0x7079, CRL_REG_LEN_08BIT, 0xc2}, + {0x707a, CRL_REG_LEN_08BIT, 0x40}, + {0x707b, CRL_REG_LEN_08BIT, 0xc3}, + {0x707c, CRL_REG_LEN_08BIT, 0xa4}, + {0x707d, CRL_REG_LEN_08BIT, 0xc0}, + {0x707e, CRL_REG_LEN_08BIT, 0x3c}, + {0x707f, CRL_REG_LEN_08BIT, 0x00}, + {0x7080, CRL_REG_LEN_08BIT, 0xb9}, + {0x7081, CRL_REG_LEN_08BIT, 0x64}, + {0x7082, CRL_REG_LEN_08BIT, 0x29}, + {0x7083, CRL_REG_LEN_08BIT, 0x00}, + {0x7084, CRL_REG_LEN_08BIT, 0xb8}, + {0x7085, CRL_REG_LEN_08BIT, 0x12}, + {0x7086, CRL_REG_LEN_08BIT, 0xbe}, + {0x7087, CRL_REG_LEN_08BIT, 0x01}, + {0x7088, CRL_REG_LEN_08BIT, 0xd0}, + {0x7089, CRL_REG_LEN_08BIT, 0xbc}, + {0x708a, CRL_REG_LEN_08BIT, 0x01}, + {0x708b, CRL_REG_LEN_08BIT, 0xac}, + {0x708c, CRL_REG_LEN_08BIT, 0x37}, + {0x708d, CRL_REG_LEN_08BIT, 0xd2}, + {0x708e, CRL_REG_LEN_08BIT, 0xac}, + {0x708f, CRL_REG_LEN_08BIT, 0x45}, + {0x7090, CRL_REG_LEN_08BIT, 0xad}, + {0x7091, CRL_REG_LEN_08BIT, 0x28}, + {0x7092, CRL_REG_LEN_08BIT, 0x00}, + {0x7093, CRL_REG_LEN_08BIT, 0xb8}, + {0x7094, CRL_REG_LEN_08BIT, 0x00}, + {0x7095, CRL_REG_LEN_08BIT, 0xbc}, + {0x7096, CRL_REG_LEN_08BIT, 0x01}, + {0x7097, CRL_REG_LEN_08BIT, 0x36}, + {0x7098, CRL_REG_LEN_08BIT, 0xd3}, + {0x7099, CRL_REG_LEN_08BIT, 0x30}, + {0x709a, CRL_REG_LEN_08BIT, 0x04}, + {0x709b, CRL_REG_LEN_08BIT, 0xe0}, + {0x709c, CRL_REG_LEN_08BIT, 0xd8}, + {0x709d, CRL_REG_LEN_08BIT, 0xb4}, + {0x709e, CRL_REG_LEN_08BIT, 0xe9}, + {0x709f, CRL_REG_LEN_08BIT, 0x00}, + {0x70a0, CRL_REG_LEN_08BIT, 0xbe}, + {0x70a1, CRL_REG_LEN_08BIT, 0x05}, + {0x70a2, CRL_REG_LEN_08BIT, 0x62}, + {0x70a3, CRL_REG_LEN_08BIT, 0x07}, + {0x70a4, CRL_REG_LEN_08BIT, 0xb9}, + {0x70a5, CRL_REG_LEN_08BIT, 0x05}, + {0x70a6, CRL_REG_LEN_08BIT, 0xad}, + {0x70a7, CRL_REG_LEN_08BIT, 0xc3}, + {0x70a8, CRL_REG_LEN_08BIT, 0xcf}, + {0x70a9, CRL_REG_LEN_08BIT, 0x00}, + {0x70aa, CRL_REG_LEN_08BIT, 0x15}, + {0x70ab, CRL_REG_LEN_08BIT, 0xc2}, + {0x70ac, CRL_REG_LEN_08BIT, 0x59}, + {0x70ad, CRL_REG_LEN_08BIT, 0xc3}, + {0x70ae, CRL_REG_LEN_08BIT, 0xc9}, + {0x70af, CRL_REG_LEN_08BIT, 0xc0}, + {0x70b0, CRL_REG_LEN_08BIT, 0x55}, + {0x70b1, CRL_REG_LEN_08BIT, 0x00}, + {0x70b2, CRL_REG_LEN_08BIT, 0x46}, + {0x70b3, CRL_REG_LEN_08BIT, 0xa1}, + {0x70b4, CRL_REG_LEN_08BIT, 0xb9}, + {0x70b5, CRL_REG_LEN_08BIT, 0x64}, + {0x70b6, CRL_REG_LEN_08BIT, 0x29}, + {0x70b7, CRL_REG_LEN_08BIT, 0x00}, + {0x70b8, CRL_REG_LEN_08BIT, 0xb8}, + {0x70b9, CRL_REG_LEN_08BIT, 0x02}, + {0x70ba, CRL_REG_LEN_08BIT, 0xbe}, + {0x70bb, CRL_REG_LEN_08BIT, 0x02}, + {0x70bc, CRL_REG_LEN_08BIT, 0xd0}, + {0x70bd, CRL_REG_LEN_08BIT, 0xdc}, + {0x70be, CRL_REG_LEN_08BIT, 0xac}, + {0x70bf, CRL_REG_LEN_08BIT, 0xbc}, + {0x70c0, CRL_REG_LEN_08BIT, 0x01}, + {0x70c1, CRL_REG_LEN_08BIT, 0x37}, + {0x70c2, CRL_REG_LEN_08BIT, 0xac}, + {0x70c3, CRL_REG_LEN_08BIT, 0xd2}, + {0x70c4, CRL_REG_LEN_08BIT, 0x45}, + {0x70c5, CRL_REG_LEN_08BIT, 0xad}, + {0x70c6, CRL_REG_LEN_08BIT, 0x28}, + {0x70c7, CRL_REG_LEN_08BIT, 0x00}, + {0x70c8, CRL_REG_LEN_08BIT, 0xb8}, + {0x70c9, CRL_REG_LEN_08BIT, 0x00}, + {0x70ca, CRL_REG_LEN_08BIT, 0xbc}, + {0x70cb, CRL_REG_LEN_08BIT, 0x01}, + {0x70cc, CRL_REG_LEN_08BIT, 0x36}, + {0x70cd, CRL_REG_LEN_08BIT, 0x30}, + {0x70ce, CRL_REG_LEN_08BIT, 0xe0}, + {0x70cf, CRL_REG_LEN_08BIT, 0xd8}, + {0x70d0, CRL_REG_LEN_08BIT, 0xb5}, + {0x70d1, CRL_REG_LEN_08BIT, 0x0b}, + {0x70d2, CRL_REG_LEN_08BIT, 0xd6}, + {0x70d3, CRL_REG_LEN_08BIT, 0xbe}, + {0x70d4, CRL_REG_LEN_08BIT, 0x07}, + {0x70d5, CRL_REG_LEN_08BIT, 0x00}, + {0x70d6, CRL_REG_LEN_08BIT, 0x62}, + {0x70d7, CRL_REG_LEN_08BIT, 0x07}, + {0x70d8, CRL_REG_LEN_08BIT, 0xb9}, + {0x70d9, CRL_REG_LEN_08BIT, 0x05}, + {0x70da, CRL_REG_LEN_08BIT, 0xad}, + {0x70db, CRL_REG_LEN_08BIT, 0xc3}, + {0x70dc, CRL_REG_LEN_08BIT, 0xcf}, + {0x70dd, CRL_REG_LEN_08BIT, 0x46}, + {0x70de, CRL_REG_LEN_08BIT, 0xcd}, + {0x70df, CRL_REG_LEN_08BIT, 0x07}, + {0x70e0, CRL_REG_LEN_08BIT, 0xcd}, + {0x70e1, CRL_REG_LEN_08BIT, 0x00}, + {0x70e2, CRL_REG_LEN_08BIT, 0xe3}, + {0x70e3, CRL_REG_LEN_08BIT, 0x18}, + {0x70e4, CRL_REG_LEN_08BIT, 0xc2}, + {0x70e5, CRL_REG_LEN_08BIT, 0xa2}, + {0x70e6, CRL_REG_LEN_08BIT, 0xb9}, + {0x70e7, CRL_REG_LEN_08BIT, 0x64}, + {0x70e8, CRL_REG_LEN_08BIT, 0xd1}, + {0x70e9, CRL_REG_LEN_08BIT, 0xdd}, + {0x70ea, CRL_REG_LEN_08BIT, 0xac}, + {0x70eb, CRL_REG_LEN_08BIT, 0xcf}, + {0x70ec, CRL_REG_LEN_08BIT, 0xdf}, + {0x70ed, CRL_REG_LEN_08BIT, 0xb5}, + {0x70ee, CRL_REG_LEN_08BIT, 0x19}, + {0x70ef, CRL_REG_LEN_08BIT, 0x46}, + {0x70f0, CRL_REG_LEN_08BIT, 0x50}, + {0x70f1, CRL_REG_LEN_08BIT, 0xb6}, + {0x70f2, CRL_REG_LEN_08BIT, 0xee}, + {0x70f3, CRL_REG_LEN_08BIT, 0xe8}, + {0x70f4, CRL_REG_LEN_08BIT, 0xe6}, + {0x70f5, CRL_REG_LEN_08BIT, 0xbc}, + {0x70f6, CRL_REG_LEN_08BIT, 0x31}, + {0x70f7, CRL_REG_LEN_08BIT, 0xe1}, + {0x70f8, CRL_REG_LEN_08BIT, 0x36}, + {0x70f9, CRL_REG_LEN_08BIT, 0x30}, + {0x70fa, CRL_REG_LEN_08BIT, 0xd3}, + {0x70fb, CRL_REG_LEN_08BIT, 0x2e}, + {0x70fc, CRL_REG_LEN_08BIT, 0x54}, + {0x70fd, CRL_REG_LEN_08BIT, 0xbd}, + {0x70fe, CRL_REG_LEN_08BIT, 0x03}, + {0x70ff, CRL_REG_LEN_08BIT, 0xec}, + {0x7100, CRL_REG_LEN_08BIT, 0x2c}, + {0x7101, CRL_REG_LEN_08BIT, 0x50}, + {0x7102, CRL_REG_LEN_08BIT, 0x20}, + {0x7103, CRL_REG_LEN_08BIT, 0x04}, + {0x7104, CRL_REG_LEN_08BIT, 0xb8}, + {0x7105, CRL_REG_LEN_08BIT, 0x02}, + {0x7106, CRL_REG_LEN_08BIT, 0xbc}, + {0x7107, CRL_REG_LEN_08BIT, 0x18}, + {0x7108, CRL_REG_LEN_08BIT, 0xc7}, + {0x7109, CRL_REG_LEN_08BIT, 0xb8}, + {0x710a, CRL_REG_LEN_08BIT, 0x00}, + {0x710b, CRL_REG_LEN_08BIT, 0x28}, + {0x710c, CRL_REG_LEN_08BIT, 0x54}, + {0x710d, CRL_REG_LEN_08BIT, 0xbc}, + {0x710e, CRL_REG_LEN_08BIT, 0x02}, + {0x710f, CRL_REG_LEN_08BIT, 0xb4}, + {0x7110, CRL_REG_LEN_08BIT, 0xda}, + {0x7111, CRL_REG_LEN_08BIT, 0xbe}, + {0x7112, CRL_REG_LEN_08BIT, 0x04}, + {0x7113, CRL_REG_LEN_08BIT, 0xd6}, + {0x7114, CRL_REG_LEN_08BIT, 0xd8}, + {0x7115, CRL_REG_LEN_08BIT, 0xab}, + {0x7116, CRL_REG_LEN_08BIT, 0x00}, + {0x7117, CRL_REG_LEN_08BIT, 0x62}, + {0x7118, CRL_REG_LEN_08BIT, 0x07}, + {0x7119, CRL_REG_LEN_08BIT, 0xb9}, + {0x711a, CRL_REG_LEN_08BIT, 0x05}, + {0x711b, CRL_REG_LEN_08BIT, 0xad}, + {0x711c, CRL_REG_LEN_08BIT, 0xc3}, + {0x711d, CRL_REG_LEN_08BIT, 0xbc}, + {0x711e, CRL_REG_LEN_08BIT, 0xe7}, + {0x711f, CRL_REG_LEN_08BIT, 0xb9}, + {0x7120, CRL_REG_LEN_08BIT, 0x64}, + {0x7121, CRL_REG_LEN_08BIT, 0x29}, + {0x7122, CRL_REG_LEN_08BIT, 0x00}, + {0x7123, CRL_REG_LEN_08BIT, 0xb8}, + {0x7124, CRL_REG_LEN_08BIT, 0x02}, + {0x7125, CRL_REG_LEN_08BIT, 0xbe}, + {0x7126, CRL_REG_LEN_08BIT, 0x00}, + {0x7127, CRL_REG_LEN_08BIT, 0x45}, + {0x7128, CRL_REG_LEN_08BIT, 0xad}, + {0x7129, CRL_REG_LEN_08BIT, 0xe2}, + {0x712a, CRL_REG_LEN_08BIT, 0x28}, + {0x712b, CRL_REG_LEN_08BIT, 0x00}, + {0x712c, CRL_REG_LEN_08BIT, 0xb8}, + {0x712d, CRL_REG_LEN_08BIT, 0x00}, + {0x712e, CRL_REG_LEN_08BIT, 0xe0}, + {0x712f, CRL_REG_LEN_08BIT, 0xd8}, + {0x7130, CRL_REG_LEN_08BIT, 0xb4}, + {0x7131, CRL_REG_LEN_08BIT, 0xe9}, + {0x7132, CRL_REG_LEN_08BIT, 0xbe}, + {0x7133, CRL_REG_LEN_08BIT, 0x03}, + {0x7134, CRL_REG_LEN_08BIT, 0x00}, + {0x7135, CRL_REG_LEN_08BIT, 0x30}, + {0x7136, CRL_REG_LEN_08BIT, 0x62}, + {0x7137, CRL_REG_LEN_08BIT, 0x07}, + {0x7138, CRL_REG_LEN_08BIT, 0xb9}, + {0x7139, CRL_REG_LEN_08BIT, 0x05}, + {0x713a, CRL_REG_LEN_08BIT, 0xad}, + {0x713b, CRL_REG_LEN_08BIT, 0xc3}, + {0x713c, CRL_REG_LEN_08BIT, 0xcf}, + {0x713d, CRL_REG_LEN_08BIT, 0x42}, + {0x713e, CRL_REG_LEN_08BIT, 0xe4}, + {0x713f, CRL_REG_LEN_08BIT, 0xcd}, + {0x7140, CRL_REG_LEN_08BIT, 0x07}, + {0x7141, CRL_REG_LEN_08BIT, 0xcd}, + {0x7142, CRL_REG_LEN_08BIT, 0x00}, + {0x7143, CRL_REG_LEN_08BIT, 0x00}, + {0x7144, CRL_REG_LEN_08BIT, 0x17}, + {0x7145, CRL_REG_LEN_08BIT, 0xc2}, + {0x7146, CRL_REG_LEN_08BIT, 0xbb}, + {0x7147, CRL_REG_LEN_08BIT, 0xde}, + {0x7148, CRL_REG_LEN_08BIT, 0xcf}, + {0x7149, CRL_REG_LEN_08BIT, 0xdf}, + {0x714a, CRL_REG_LEN_08BIT, 0xac}, + {0x714b, CRL_REG_LEN_08BIT, 0xd1}, + {0x714c, CRL_REG_LEN_08BIT, 0x44}, + {0x714d, CRL_REG_LEN_08BIT, 0xac}, + {0x714e, CRL_REG_LEN_08BIT, 0xb9}, + {0x714f, CRL_REG_LEN_08BIT, 0x76}, + {0x7150, CRL_REG_LEN_08BIT, 0xb8}, + {0x7151, CRL_REG_LEN_08BIT, 0x08}, + {0x7152, CRL_REG_LEN_08BIT, 0xb6}, + {0x7153, CRL_REG_LEN_08BIT, 0xfe}, + {0x7154, CRL_REG_LEN_08BIT, 0xb4}, + {0x7155, CRL_REG_LEN_08BIT, 0xca}, + {0x7156, CRL_REG_LEN_08BIT, 0xd6}, + {0x7157, CRL_REG_LEN_08BIT, 0xd8}, + {0x7158, CRL_REG_LEN_08BIT, 0xab}, + {0x7159, CRL_REG_LEN_08BIT, 0x00}, + {0x715a, CRL_REG_LEN_08BIT, 0xe1}, + {0x715b, CRL_REG_LEN_08BIT, 0x36}, + {0x715c, CRL_REG_LEN_08BIT, 0x30}, + {0x715d, CRL_REG_LEN_08BIT, 0xd3}, + {0x715e, CRL_REG_LEN_08BIT, 0xbc}, + {0x715f, CRL_REG_LEN_08BIT, 0x29}, + {0x7160, CRL_REG_LEN_08BIT, 0xb4}, + {0x7161, CRL_REG_LEN_08BIT, 0x1f}, + {0x7162, CRL_REG_LEN_08BIT, 0xaa}, + {0x7163, CRL_REG_LEN_08BIT, 0xbd}, + {0x7164, CRL_REG_LEN_08BIT, 0x01}, + {0x7165, CRL_REG_LEN_08BIT, 0xb8}, + {0x7166, CRL_REG_LEN_08BIT, 0x0c}, + {0x7167, CRL_REG_LEN_08BIT, 0x45}, + {0x7168, CRL_REG_LEN_08BIT, 0xa4}, + {0x7169, CRL_REG_LEN_08BIT, 0xbd}, + {0x716a, CRL_REG_LEN_08BIT, 0x03}, + {0x716b, CRL_REG_LEN_08BIT, 0xec}, + {0x716c, CRL_REG_LEN_08BIT, 0xbc}, + {0x716d, CRL_REG_LEN_08BIT, 0x3d}, + {0x716e, CRL_REG_LEN_08BIT, 0xc3}, + {0x716f, CRL_REG_LEN_08BIT, 0xcf}, + {0x7170, CRL_REG_LEN_08BIT, 0x42}, + {0x7171, CRL_REG_LEN_08BIT, 0xb8}, + {0x7172, CRL_REG_LEN_08BIT, 0x00}, + {0x7173, CRL_REG_LEN_08BIT, 0xe4}, + {0x7174, CRL_REG_LEN_08BIT, 0xd5}, + {0x7175, CRL_REG_LEN_08BIT, 0x00}, + {0x7176, CRL_REG_LEN_08BIT, 0xb6}, + {0x7177, CRL_REG_LEN_08BIT, 0x00}, + {0x7178, CRL_REG_LEN_08BIT, 0x74}, + {0x7179, CRL_REG_LEN_08BIT, 0xbd}, + {0x717a, CRL_REG_LEN_08BIT, 0x03}, + {0x717b, CRL_REG_LEN_08BIT, 0xb5}, + {0x717c, CRL_REG_LEN_08BIT, 0x39}, + {0x717d, CRL_REG_LEN_08BIT, 0x40}, + {0x717e, CRL_REG_LEN_08BIT, 0x58}, + {0x717f, CRL_REG_LEN_08BIT, 0xdd}, + {0x7180, CRL_REG_LEN_08BIT, 0x19}, + {0x7181, CRL_REG_LEN_08BIT, 0xc1}, + {0x7182, CRL_REG_LEN_08BIT, 0xc8}, + {0x7183, CRL_REG_LEN_08BIT, 0xbd}, + {0x7184, CRL_REG_LEN_08BIT, 0x06}, + {0x7185, CRL_REG_LEN_08BIT, 0x17}, + {0x7186, CRL_REG_LEN_08BIT, 0xc1}, + {0x7187, CRL_REG_LEN_08BIT, 0xc6}, + {0x7188, CRL_REG_LEN_08BIT, 0xe8}, + {0x7189, CRL_REG_LEN_08BIT, 0x00}, + {0x718a, CRL_REG_LEN_08BIT, 0xc0}, + {0x718b, CRL_REG_LEN_08BIT, 0xc8}, + {0x718c, CRL_REG_LEN_08BIT, 0xe6}, + {0x718d, CRL_REG_LEN_08BIT, 0x95}, + {0x718e, CRL_REG_LEN_08BIT, 0x15}, + {0x718f, CRL_REG_LEN_08BIT, 0x00}, + {0x7190, CRL_REG_LEN_08BIT, 0xbc}, + {0x7191, CRL_REG_LEN_08BIT, 0x19}, + {0x7192, CRL_REG_LEN_08BIT, 0xb9}, + {0x7193, CRL_REG_LEN_08BIT, 0xf6}, + {0x7194, CRL_REG_LEN_08BIT, 0x14}, + {0x7195, CRL_REG_LEN_08BIT, 0xc1}, + {0x7196, CRL_REG_LEN_08BIT, 0xd0}, + {0x7197, CRL_REG_LEN_08BIT, 0xd1}, + {0x7198, CRL_REG_LEN_08BIT, 0xac}, + {0x7199, CRL_REG_LEN_08BIT, 0x37}, + {0x719a, CRL_REG_LEN_08BIT, 0xbc}, + {0x719b, CRL_REG_LEN_08BIT, 0x35}, + {0x719c, CRL_REG_LEN_08BIT, 0x36}, + {0x719d, CRL_REG_LEN_08BIT, 0x30}, + {0x719e, CRL_REG_LEN_08BIT, 0xe1}, + {0x719f, CRL_REG_LEN_08BIT, 0xd3}, + {0x71a0, CRL_REG_LEN_08BIT, 0x7a}, + {0x71a1, CRL_REG_LEN_08BIT, 0xb6}, + {0x71a2, CRL_REG_LEN_08BIT, 0x0c}, + {0x71a3, CRL_REG_LEN_08BIT, 0xff}, + {0x71a4, CRL_REG_LEN_08BIT, 0xb4}, + {0x71a5, CRL_REG_LEN_08BIT, 0xc7}, + {0x71a6, CRL_REG_LEN_08BIT, 0xd9}, + {0x71a7, CRL_REG_LEN_08BIT, 0x00}, + {0x71a8, CRL_REG_LEN_08BIT, 0xbd}, + {0x71a9, CRL_REG_LEN_08BIT, 0x01}, + {0x71aa, CRL_REG_LEN_08BIT, 0x56}, + {0x71ab, CRL_REG_LEN_08BIT, 0xc0}, + {0x71ac, CRL_REG_LEN_08BIT, 0xda}, + {0x71ad, CRL_REG_LEN_08BIT, 0xb4}, + {0x71ae, CRL_REG_LEN_08BIT, 0x1f}, + {0x71af, CRL_REG_LEN_08BIT, 0x56}, + {0x71b0, CRL_REG_LEN_08BIT, 0xaa}, + {0x71b1, CRL_REG_LEN_08BIT, 0xbc}, + {0x71b2, CRL_REG_LEN_08BIT, 0x08}, + {0x71b3, CRL_REG_LEN_08BIT, 0x00}, + {0x71b4, CRL_REG_LEN_08BIT, 0x57}, + {0x71b5, CRL_REG_LEN_08BIT, 0xe8}, + {0x71b6, CRL_REG_LEN_08BIT, 0xb5}, + {0x71b7, CRL_REG_LEN_08BIT, 0x36}, + {0x71b8, CRL_REG_LEN_08BIT, 0x00}, + {0x71b9, CRL_REG_LEN_08BIT, 0x54}, + {0x71ba, CRL_REG_LEN_08BIT, 0xe7}, + {0x71bb, CRL_REG_LEN_08BIT, 0xc8}, + {0x71bc, CRL_REG_LEN_08BIT, 0xb4}, + {0x71bd, CRL_REG_LEN_08BIT, 0x1f}, + {0x71be, CRL_REG_LEN_08BIT, 0x56}, + {0x71bf, CRL_REG_LEN_08BIT, 0xaa}, + {0x71c0, CRL_REG_LEN_08BIT, 0xbc}, + {0x71c1, CRL_REG_LEN_08BIT, 0x08}, + {0x71c2, CRL_REG_LEN_08BIT, 0x57}, + {0x71c3, CRL_REG_LEN_08BIT, 0x00}, + {0x71c4, CRL_REG_LEN_08BIT, 0xb5}, + {0x71c5, CRL_REG_LEN_08BIT, 0x36}, + {0x71c6, CRL_REG_LEN_08BIT, 0x00}, + {0x71c7, CRL_REG_LEN_08BIT, 0x54}, + {0x71c8, CRL_REG_LEN_08BIT, 0xc8}, + {0x71c9, CRL_REG_LEN_08BIT, 0xb5}, + {0x71ca, CRL_REG_LEN_08BIT, 0x18}, + {0x71cb, CRL_REG_LEN_08BIT, 0xd9}, + {0x71cc, CRL_REG_LEN_08BIT, 0x00}, + {0x71cd, CRL_REG_LEN_08BIT, 0xbd}, + {0x71ce, CRL_REG_LEN_08BIT, 0x01}, + {0x71cf, CRL_REG_LEN_08BIT, 0x56}, + {0x71d0, CRL_REG_LEN_08BIT, 0x08}, + {0x71d1, CRL_REG_LEN_08BIT, 0x57}, + {0x71d2, CRL_REG_LEN_08BIT, 0xe8}, + {0x71d3, CRL_REG_LEN_08BIT, 0xb4}, + {0x71d4, CRL_REG_LEN_08BIT, 0x42}, + {0x71d5, CRL_REG_LEN_08BIT, 0x00}, + {0x71d6, CRL_REG_LEN_08BIT, 0x54}, + {0x71d7, CRL_REG_LEN_08BIT, 0xe7}, + {0x71d8, CRL_REG_LEN_08BIT, 0xc8}, + {0x71d9, CRL_REG_LEN_08BIT, 0xab}, + {0x71da, CRL_REG_LEN_08BIT, 0x00}, + {0x71db, CRL_REG_LEN_08BIT, 0x66}, + {0x71dc, CRL_REG_LEN_08BIT, 0x62}, + {0x71dd, CRL_REG_LEN_08BIT, 0x06}, + {0x71de, CRL_REG_LEN_08BIT, 0x74}, + {0x71df, CRL_REG_LEN_08BIT, 0xb9}, + {0x71e0, CRL_REG_LEN_08BIT, 0x05}, + {0x71e1, CRL_REG_LEN_08BIT, 0xb7}, + {0x71e2, CRL_REG_LEN_08BIT, 0x14}, + {0x71e3, CRL_REG_LEN_08BIT, 0x0e}, + {0x71e4, CRL_REG_LEN_08BIT, 0xb7}, + {0x71e5, CRL_REG_LEN_08BIT, 0x04}, + {0x71e6, CRL_REG_LEN_08BIT, 0xc8}, + {0x7600, CRL_REG_LEN_08BIT, 0x04}, + {0x7601, CRL_REG_LEN_08BIT, 0x80}, + {0x7602, CRL_REG_LEN_08BIT, 0x07}, + {0x7603, CRL_REG_LEN_08BIT, 0x44}, + {0x7604, CRL_REG_LEN_08BIT, 0x05}, + {0x7605, CRL_REG_LEN_08BIT, 0x33}, + {0x7606, CRL_REG_LEN_08BIT, 0x0f}, + {0x7607, CRL_REG_LEN_08BIT, 0x00}, + {0x7608, CRL_REG_LEN_08BIT, 0x07}, + {0x7609, CRL_REG_LEN_08BIT, 0x40}, + {0x760a, CRL_REG_LEN_08BIT, 0x04}, + {0x760b, CRL_REG_LEN_08BIT, 0xe5}, + {0x760c, CRL_REG_LEN_08BIT, 0x06}, + {0x760d, CRL_REG_LEN_08BIT, 0x50}, + {0x760e, CRL_REG_LEN_08BIT, 0x04}, + {0x760f, CRL_REG_LEN_08BIT, 0xe4}, + {0x7610, CRL_REG_LEN_08BIT, 0x00}, + {0x7611, CRL_REG_LEN_08BIT, 0x00}, + {0x7612, CRL_REG_LEN_08BIT, 0x06}, + {0x7613, CRL_REG_LEN_08BIT, 0x5c}, + {0x7614, CRL_REG_LEN_08BIT, 0x00}, + {0x7615, CRL_REG_LEN_08BIT, 0x0f}, + {0x7616, CRL_REG_LEN_08BIT, 0x06}, + {0x7617, CRL_REG_LEN_08BIT, 0x1c}, + {0x7618, CRL_REG_LEN_08BIT, 0x00}, + {0x7619, CRL_REG_LEN_08BIT, 0x02}, + {0x761a, CRL_REG_LEN_08BIT, 0x06}, + {0x761b, CRL_REG_LEN_08BIT, 0xa2}, + {0x761c, CRL_REG_LEN_08BIT, 0x00}, + {0x761d, CRL_REG_LEN_08BIT, 0x01}, + {0x761e, CRL_REG_LEN_08BIT, 0x06}, + {0x761f, CRL_REG_LEN_08BIT, 0xae}, + {0x7620, CRL_REG_LEN_08BIT, 0x00}, + {0x7621, CRL_REG_LEN_08BIT, 0x0e}, + {0x7622, CRL_REG_LEN_08BIT, 0x05}, + {0x7623, CRL_REG_LEN_08BIT, 0x30}, + {0x7624, CRL_REG_LEN_08BIT, 0x07}, + {0x7625, CRL_REG_LEN_08BIT, 0x00}, + {0x7626, CRL_REG_LEN_08BIT, 0x0f}, + {0x7627, CRL_REG_LEN_08BIT, 0x00}, + {0x7628, CRL_REG_LEN_08BIT, 0x04}, + {0x7629, CRL_REG_LEN_08BIT, 0xe5}, + {0x762a, CRL_REG_LEN_08BIT, 0x05}, + {0x762b, CRL_REG_LEN_08BIT, 0x33}, + {0x762c, CRL_REG_LEN_08BIT, 0x06}, + {0x762d, CRL_REG_LEN_08BIT, 0x12}, + {0x762e, CRL_REG_LEN_08BIT, 0x00}, + {0x762f, CRL_REG_LEN_08BIT, 0x01}, + {0x7630, CRL_REG_LEN_08BIT, 0x06}, + {0x7631, CRL_REG_LEN_08BIT, 0x52}, + {0x7632, CRL_REG_LEN_08BIT, 0x00}, + {0x7633, CRL_REG_LEN_08BIT, 0x01}, + {0x7634, CRL_REG_LEN_08BIT, 0x06}, + {0x7635, CRL_REG_LEN_08BIT, 0x5e}, + {0x7636, CRL_REG_LEN_08BIT, 0x04}, + {0x7637, CRL_REG_LEN_08BIT, 0xe4}, + {0x7638, CRL_REG_LEN_08BIT, 0x00}, + {0x7639, CRL_REG_LEN_08BIT, 0x01}, + {0x763a, CRL_REG_LEN_08BIT, 0x05}, + {0x763b, CRL_REG_LEN_08BIT, 0x30}, + {0x763c, CRL_REG_LEN_08BIT, 0x0f}, + {0x763d, CRL_REG_LEN_08BIT, 0x00}, + {0x763e, CRL_REG_LEN_08BIT, 0x06}, + {0x763f, CRL_REG_LEN_08BIT, 0xa6}, + {0x7640, CRL_REG_LEN_08BIT, 0x00}, + {0x7641, CRL_REG_LEN_08BIT, 0x02}, + {0x7642, CRL_REG_LEN_08BIT, 0x06}, + {0x7643, CRL_REG_LEN_08BIT, 0x26}, + {0x7644, CRL_REG_LEN_08BIT, 0x00}, + {0x7645, CRL_REG_LEN_08BIT, 0x02}, + {0x7646, CRL_REG_LEN_08BIT, 0x05}, + {0x7647, CRL_REG_LEN_08BIT, 0x33}, + {0x7648, CRL_REG_LEN_08BIT, 0x06}, + {0x7649, CRL_REG_LEN_08BIT, 0x20}, + {0x764a, CRL_REG_LEN_08BIT, 0x0f}, + {0x764b, CRL_REG_LEN_08BIT, 0x00}, + {0x764c, CRL_REG_LEN_08BIT, 0x06}, + {0x764d, CRL_REG_LEN_08BIT, 0x56}, + {0x764e, CRL_REG_LEN_08BIT, 0x00}, + {0x764f, CRL_REG_LEN_08BIT, 0x02}, + {0x7650, CRL_REG_LEN_08BIT, 0x06}, + {0x7651, CRL_REG_LEN_08BIT, 0x16}, + {0x7652, CRL_REG_LEN_08BIT, 0x05}, + {0x7653, CRL_REG_LEN_08BIT, 0x33}, + {0x7654, CRL_REG_LEN_08BIT, 0x06}, + {0x7655, CRL_REG_LEN_08BIT, 0x10}, + {0x7656, CRL_REG_LEN_08BIT, 0x0f}, + {0x7657, CRL_REG_LEN_08BIT, 0x00}, + {0x7658, CRL_REG_LEN_08BIT, 0x06}, + {0x7659, CRL_REG_LEN_08BIT, 0x10}, + {0x765a, CRL_REG_LEN_08BIT, 0x0f}, + {0x765b, CRL_REG_LEN_08BIT, 0x00}, + {0x765c, CRL_REG_LEN_08BIT, 0x06}, + {0x765d, CRL_REG_LEN_08BIT, 0x20}, + {0x765e, CRL_REG_LEN_08BIT, 0x0f}, + {0x765f, CRL_REG_LEN_08BIT, 0x00}, + {0x7660, CRL_REG_LEN_08BIT, 0x00}, + {0x7661, CRL_REG_LEN_08BIT, 0x00}, + {0x7662, CRL_REG_LEN_08BIT, 0x00}, + {0x7663, CRL_REG_LEN_08BIT, 0x02}, + {0x7664, CRL_REG_LEN_08BIT, 0x04}, + {0x7665, CRL_REG_LEN_08BIT, 0xe5}, + {0x7666, CRL_REG_LEN_08BIT, 0x04}, + {0x7667, CRL_REG_LEN_08BIT, 0xe4}, + {0x7668, CRL_REG_LEN_08BIT, 0x0f}, + {0x7669, CRL_REG_LEN_08BIT, 0x00}, + {0x766a, CRL_REG_LEN_08BIT, 0x00}, + {0x766b, CRL_REG_LEN_08BIT, 0x00}, + {0x766c, CRL_REG_LEN_08BIT, 0x00}, + {0x766d, CRL_REG_LEN_08BIT, 0x01}, + {0x766e, CRL_REG_LEN_08BIT, 0x04}, + {0x766f, CRL_REG_LEN_08BIT, 0xe5}, + {0x7670, CRL_REG_LEN_08BIT, 0x04}, + {0x7671, CRL_REG_LEN_08BIT, 0xe4}, + {0x7672, CRL_REG_LEN_08BIT, 0x0f}, + {0x7673, CRL_REG_LEN_08BIT, 0x00}, + {0x7674, CRL_REG_LEN_08BIT, 0x00}, + {0x7675, CRL_REG_LEN_08BIT, 0x02}, + {0x7676, CRL_REG_LEN_08BIT, 0x04}, + {0x7677, CRL_REG_LEN_08BIT, 0xe4}, + {0x7678, CRL_REG_LEN_08BIT, 0x00}, + {0x7679, CRL_REG_LEN_08BIT, 0x02}, + {0x767a, CRL_REG_LEN_08BIT, 0x04}, + {0x767b, CRL_REG_LEN_08BIT, 0xc4}, + {0x767c, CRL_REG_LEN_08BIT, 0x00}, + {0x767d, CRL_REG_LEN_08BIT, 0x02}, + {0x767e, CRL_REG_LEN_08BIT, 0x04}, + {0x767f, CRL_REG_LEN_08BIT, 0xc4}, + {0x7680, CRL_REG_LEN_08BIT, 0x05}, + {0x7681, CRL_REG_LEN_08BIT, 0x83}, + {0x7682, CRL_REG_LEN_08BIT, 0x0f}, + {0x7683, CRL_REG_LEN_08BIT, 0x00}, + {0x7684, CRL_REG_LEN_08BIT, 0x00}, + {0x7685, CRL_REG_LEN_08BIT, 0x02}, + {0x7686, CRL_REG_LEN_08BIT, 0x04}, + {0x7687, CRL_REG_LEN_08BIT, 0xe4}, + {0x7688, CRL_REG_LEN_08BIT, 0x00}, + {0x7689, CRL_REG_LEN_08BIT, 0x02}, + {0x768a, CRL_REG_LEN_08BIT, 0x04}, + {0x768b, CRL_REG_LEN_08BIT, 0xc4}, + {0x768c, CRL_REG_LEN_08BIT, 0x00}, + {0x768d, CRL_REG_LEN_08BIT, 0x02}, + {0x768e, CRL_REG_LEN_08BIT, 0x04}, + {0x768f, CRL_REG_LEN_08BIT, 0xc4}, + {0x7690, CRL_REG_LEN_08BIT, 0x05}, + {0x7691, CRL_REG_LEN_08BIT, 0x83}, + {0x7692, CRL_REG_LEN_08BIT, 0x03}, + {0x7693, CRL_REG_LEN_08BIT, 0x0b}, + {0x7694, CRL_REG_LEN_08BIT, 0x05}, + {0x7695, CRL_REG_LEN_08BIT, 0x83}, + {0x7696, CRL_REG_LEN_08BIT, 0x00}, + {0x7697, CRL_REG_LEN_08BIT, 0x07}, + {0x7698, CRL_REG_LEN_08BIT, 0x05}, + {0x7699, CRL_REG_LEN_08BIT, 0x03}, + {0x769a, CRL_REG_LEN_08BIT, 0x00}, + {0x769b, CRL_REG_LEN_08BIT, 0x05}, + {0x769c, CRL_REG_LEN_08BIT, 0x05}, + {0x769d, CRL_REG_LEN_08BIT, 0x32}, + {0x769e, CRL_REG_LEN_08BIT, 0x05}, + {0x769f, CRL_REG_LEN_08BIT, 0x30}, + {0x76a0, CRL_REG_LEN_08BIT, 0x00}, + {0x76a1, CRL_REG_LEN_08BIT, 0x02}, + {0x76a2, CRL_REG_LEN_08BIT, 0x05}, + {0x76a3, CRL_REG_LEN_08BIT, 0x78}, + {0x76a4, CRL_REG_LEN_08BIT, 0x00}, + {0x76a5, CRL_REG_LEN_08BIT, 0x01}, + {0x76a6, CRL_REG_LEN_08BIT, 0x05}, + {0x76a7, CRL_REG_LEN_08BIT, 0x7c}, + {0x76a8, CRL_REG_LEN_08BIT, 0x03}, + {0x76a9, CRL_REG_LEN_08BIT, 0x9a}, + {0x76aa, CRL_REG_LEN_08BIT, 0x05}, + {0x76ab, CRL_REG_LEN_08BIT, 0x83}, + {0x76ac, CRL_REG_LEN_08BIT, 0x00}, + {0x76ad, CRL_REG_LEN_08BIT, 0x04}, + {0x76ae, CRL_REG_LEN_08BIT, 0x05}, + {0x76af, CRL_REG_LEN_08BIT, 0x03}, + {0x76b0, CRL_REG_LEN_08BIT, 0x00}, + {0x76b1, CRL_REG_LEN_08BIT, 0x03}, + {0x76b2, CRL_REG_LEN_08BIT, 0x05}, + {0x76b3, CRL_REG_LEN_08BIT, 0x32}, + {0x76b4, CRL_REG_LEN_08BIT, 0x05}, + {0x76b5, CRL_REG_LEN_08BIT, 0x30}, + {0x76b6, CRL_REG_LEN_08BIT, 0x00}, + {0x76b7, CRL_REG_LEN_08BIT, 0x02}, + {0x76b8, CRL_REG_LEN_08BIT, 0x05}, + {0x76b9, CRL_REG_LEN_08BIT, 0x78}, + {0x76ba, CRL_REG_LEN_08BIT, 0x00}, + {0x76bb, CRL_REG_LEN_08BIT, 0x01}, + {0x76bc, CRL_REG_LEN_08BIT, 0x05}, + {0x76bd, CRL_REG_LEN_08BIT, 0x7c}, + {0x76be, CRL_REG_LEN_08BIT, 0x03}, + {0x76bf, CRL_REG_LEN_08BIT, 0x99}, + {0x76c0, CRL_REG_LEN_08BIT, 0x05}, + {0x76c1, CRL_REG_LEN_08BIT, 0x83}, + {0x76c2, CRL_REG_LEN_08BIT, 0x00}, + {0x76c3, CRL_REG_LEN_08BIT, 0x03}, + {0x76c4, CRL_REG_LEN_08BIT, 0x05}, + {0x76c5, CRL_REG_LEN_08BIT, 0x03}, + {0x76c6, CRL_REG_LEN_08BIT, 0x00}, + {0x76c7, CRL_REG_LEN_08BIT, 0x01}, + {0x76c8, CRL_REG_LEN_08BIT, 0x05}, + {0x76c9, CRL_REG_LEN_08BIT, 0x32}, + {0x76ca, CRL_REG_LEN_08BIT, 0x05}, + {0x76cb, CRL_REG_LEN_08BIT, 0x30}, + {0x76cc, CRL_REG_LEN_08BIT, 0x00}, + {0x76cd, CRL_REG_LEN_08BIT, 0x02}, + {0x76ce, CRL_REG_LEN_08BIT, 0x05}, + {0x76cf, CRL_REG_LEN_08BIT, 0x78}, + {0x76d0, CRL_REG_LEN_08BIT, 0x00}, + {0x76d1, CRL_REG_LEN_08BIT, 0x01}, + {0x76d2, CRL_REG_LEN_08BIT, 0x05}, + {0x76d3, CRL_REG_LEN_08BIT, 0x7c}, + {0x76d4, CRL_REG_LEN_08BIT, 0x03}, + {0x76d5, CRL_REG_LEN_08BIT, 0x98}, + {0x76d6, CRL_REG_LEN_08BIT, 0x05}, + {0x76d7, CRL_REG_LEN_08BIT, 0x83}, + {0x76d8, CRL_REG_LEN_08BIT, 0x00}, + {0x76d9, CRL_REG_LEN_08BIT, 0x00}, + {0x76da, CRL_REG_LEN_08BIT, 0x05}, + {0x76db, CRL_REG_LEN_08BIT, 0x03}, + {0x76dc, CRL_REG_LEN_08BIT, 0x00}, + {0x76dd, CRL_REG_LEN_08BIT, 0x01}, + {0x76de, CRL_REG_LEN_08BIT, 0x05}, + {0x76df, CRL_REG_LEN_08BIT, 0x32}, + {0x76e0, CRL_REG_LEN_08BIT, 0x05}, + {0x76e1, CRL_REG_LEN_08BIT, 0x30}, + {0x76e2, CRL_REG_LEN_08BIT, 0x00}, + {0x76e3, CRL_REG_LEN_08BIT, 0x02}, + {0x76e4, CRL_REG_LEN_08BIT, 0x05}, + {0x76e5, CRL_REG_LEN_08BIT, 0x78}, + {0x76e6, CRL_REG_LEN_08BIT, 0x00}, + {0x76e7, CRL_REG_LEN_08BIT, 0x01}, + {0x76e8, CRL_REG_LEN_08BIT, 0x05}, + {0x76e9, CRL_REG_LEN_08BIT, 0x7c}, + {0x76ea, CRL_REG_LEN_08BIT, 0x03}, + {0x76eb, CRL_REG_LEN_08BIT, 0x97}, + {0x76ec, CRL_REG_LEN_08BIT, 0x05}, + {0x76ed, CRL_REG_LEN_08BIT, 0x83}, + {0x76ee, CRL_REG_LEN_08BIT, 0x00}, + {0x76ef, CRL_REG_LEN_08BIT, 0x00}, + {0x76f0, CRL_REG_LEN_08BIT, 0x05}, + {0x76f1, CRL_REG_LEN_08BIT, 0x03}, + {0x76f2, CRL_REG_LEN_08BIT, 0x05}, + {0x76f3, CRL_REG_LEN_08BIT, 0x32}, + {0x76f4, CRL_REG_LEN_08BIT, 0x05}, + {0x76f5, CRL_REG_LEN_08BIT, 0x30}, + {0x76f6, CRL_REG_LEN_08BIT, 0x00}, + {0x76f7, CRL_REG_LEN_08BIT, 0x02}, + {0x76f8, CRL_REG_LEN_08BIT, 0x05}, + {0x76f9, CRL_REG_LEN_08BIT, 0x78}, + {0x76fa, CRL_REG_LEN_08BIT, 0x00}, + {0x76fb, CRL_REG_LEN_08BIT, 0x01}, + {0x76fc, CRL_REG_LEN_08BIT, 0x05}, + {0x76fd, CRL_REG_LEN_08BIT, 0x7c}, + {0x76fe, CRL_REG_LEN_08BIT, 0x03}, + {0x76ff, CRL_REG_LEN_08BIT, 0x96}, + {0x7700, CRL_REG_LEN_08BIT, 0x05}, + {0x7701, CRL_REG_LEN_08BIT, 0x83}, + {0x7702, CRL_REG_LEN_08BIT, 0x05}, + {0x7703, CRL_REG_LEN_08BIT, 0x03}, + {0x7704, CRL_REG_LEN_08BIT, 0x05}, + {0x7705, CRL_REG_LEN_08BIT, 0x32}, + {0x7706, CRL_REG_LEN_08BIT, 0x05}, + {0x7707, CRL_REG_LEN_08BIT, 0x30}, + {0x7708, CRL_REG_LEN_08BIT, 0x00}, + {0x7709, CRL_REG_LEN_08BIT, 0x02}, + {0x770a, CRL_REG_LEN_08BIT, 0x05}, + {0x770b, CRL_REG_LEN_08BIT, 0x78}, + {0x770c, CRL_REG_LEN_08BIT, 0x00}, + {0x770d, CRL_REG_LEN_08BIT, 0x01}, + {0x770e, CRL_REG_LEN_08BIT, 0x05}, + {0x770f, CRL_REG_LEN_08BIT, 0x7c}, + {0x7710, CRL_REG_LEN_08BIT, 0x03}, + {0x7711, CRL_REG_LEN_08BIT, 0x95}, + {0x7712, CRL_REG_LEN_08BIT, 0x05}, + {0x7713, CRL_REG_LEN_08BIT, 0x83}, + {0x7714, CRL_REG_LEN_08BIT, 0x05}, + {0x7715, CRL_REG_LEN_08BIT, 0x03}, + {0x7716, CRL_REG_LEN_08BIT, 0x05}, + {0x7717, CRL_REG_LEN_08BIT, 0x32}, + {0x7718, CRL_REG_LEN_08BIT, 0x05}, + {0x7719, CRL_REG_LEN_08BIT, 0x30}, + {0x771a, CRL_REG_LEN_08BIT, 0x00}, + {0x771b, CRL_REG_LEN_08BIT, 0x02}, + {0x771c, CRL_REG_LEN_08BIT, 0x05}, + {0x771d, CRL_REG_LEN_08BIT, 0x78}, + {0x771e, CRL_REG_LEN_08BIT, 0x00}, + {0x771f, CRL_REG_LEN_08BIT, 0x01}, + {0x7720, CRL_REG_LEN_08BIT, 0x05}, + {0x7721, CRL_REG_LEN_08BIT, 0x7c}, + {0x7722, CRL_REG_LEN_08BIT, 0x03}, + {0x7723, CRL_REG_LEN_08BIT, 0x94}, + {0x7724, CRL_REG_LEN_08BIT, 0x05}, + {0x7725, CRL_REG_LEN_08BIT, 0x83}, + {0x7726, CRL_REG_LEN_08BIT, 0x00}, + {0x7727, CRL_REG_LEN_08BIT, 0x01}, + {0x7728, CRL_REG_LEN_08BIT, 0x05}, + {0x7729, CRL_REG_LEN_08BIT, 0x03}, + {0x772a, CRL_REG_LEN_08BIT, 0x00}, + {0x772b, CRL_REG_LEN_08BIT, 0x01}, + {0x772c, CRL_REG_LEN_08BIT, 0x05}, + {0x772d, CRL_REG_LEN_08BIT, 0x32}, + {0x772e, CRL_REG_LEN_08BIT, 0x05}, + {0x772f, CRL_REG_LEN_08BIT, 0x30}, + {0x7730, CRL_REG_LEN_08BIT, 0x00}, + {0x7731, CRL_REG_LEN_08BIT, 0x02}, + {0x7732, CRL_REG_LEN_08BIT, 0x05}, + {0x7733, CRL_REG_LEN_08BIT, 0x78}, + {0x7734, CRL_REG_LEN_08BIT, 0x00}, + {0x7735, CRL_REG_LEN_08BIT, 0x01}, + {0x7736, CRL_REG_LEN_08BIT, 0x05}, + {0x7737, CRL_REG_LEN_08BIT, 0x7c}, + {0x7738, CRL_REG_LEN_08BIT, 0x03}, + {0x7739, CRL_REG_LEN_08BIT, 0x93}, + {0x773a, CRL_REG_LEN_08BIT, 0x05}, + {0x773b, CRL_REG_LEN_08BIT, 0x83}, + {0x773c, CRL_REG_LEN_08BIT, 0x00}, + {0x773d, CRL_REG_LEN_08BIT, 0x00}, + {0x773e, CRL_REG_LEN_08BIT, 0x05}, + {0x773f, CRL_REG_LEN_08BIT, 0x03}, + {0x7740, CRL_REG_LEN_08BIT, 0x00}, + {0x7741, CRL_REG_LEN_08BIT, 0x00}, + {0x7742, CRL_REG_LEN_08BIT, 0x05}, + {0x7743, CRL_REG_LEN_08BIT, 0x32}, + {0x7744, CRL_REG_LEN_08BIT, 0x05}, + {0x7745, CRL_REG_LEN_08BIT, 0x30}, + {0x7746, CRL_REG_LEN_08BIT, 0x00}, + {0x7747, CRL_REG_LEN_08BIT, 0x02}, + {0x7748, CRL_REG_LEN_08BIT, 0x05}, + {0x7749, CRL_REG_LEN_08BIT, 0x78}, + {0x774a, CRL_REG_LEN_08BIT, 0x00}, + {0x774b, CRL_REG_LEN_08BIT, 0x01}, + {0x774c, CRL_REG_LEN_08BIT, 0x05}, + {0x774d, CRL_REG_LEN_08BIT, 0x7c}, + {0x774e, CRL_REG_LEN_08BIT, 0x03}, + {0x774f, CRL_REG_LEN_08BIT, 0x92}, + {0x7750, CRL_REG_LEN_08BIT, 0x05}, + {0x7751, CRL_REG_LEN_08BIT, 0x83}, + {0x7752, CRL_REG_LEN_08BIT, 0x05}, + {0x7753, CRL_REG_LEN_08BIT, 0x03}, + {0x7754, CRL_REG_LEN_08BIT, 0x00}, + {0x7755, CRL_REG_LEN_08BIT, 0x00}, + {0x7756, CRL_REG_LEN_08BIT, 0x05}, + {0x7757, CRL_REG_LEN_08BIT, 0x32}, + {0x7758, CRL_REG_LEN_08BIT, 0x05}, + {0x7759, CRL_REG_LEN_08BIT, 0x30}, + {0x775a, CRL_REG_LEN_08BIT, 0x00}, + {0x775b, CRL_REG_LEN_08BIT, 0x02}, + {0x775c, CRL_REG_LEN_08BIT, 0x05}, + {0x775d, CRL_REG_LEN_08BIT, 0x78}, + {0x775e, CRL_REG_LEN_08BIT, 0x00}, + {0x775f, CRL_REG_LEN_08BIT, 0x01}, + {0x7760, CRL_REG_LEN_08BIT, 0x05}, + {0x7761, CRL_REG_LEN_08BIT, 0x7c}, + {0x7762, CRL_REG_LEN_08BIT, 0x03}, + {0x7763, CRL_REG_LEN_08BIT, 0x91}, + {0x7764, CRL_REG_LEN_08BIT, 0x05}, + {0x7765, CRL_REG_LEN_08BIT, 0x83}, + {0x7766, CRL_REG_LEN_08BIT, 0x05}, + {0x7767, CRL_REG_LEN_08BIT, 0x03}, + {0x7768, CRL_REG_LEN_08BIT, 0x05}, + {0x7769, CRL_REG_LEN_08BIT, 0x32}, + {0x776a, CRL_REG_LEN_08BIT, 0x05}, + {0x776b, CRL_REG_LEN_08BIT, 0x30}, + {0x776c, CRL_REG_LEN_08BIT, 0x00}, + {0x776d, CRL_REG_LEN_08BIT, 0x02}, + {0x776e, CRL_REG_LEN_08BIT, 0x05}, + {0x776f, CRL_REG_LEN_08BIT, 0x78}, + {0x7770, CRL_REG_LEN_08BIT, 0x00}, + {0x7771, CRL_REG_LEN_08BIT, 0x01}, + {0x7772, CRL_REG_LEN_08BIT, 0x05}, + {0x7773, CRL_REG_LEN_08BIT, 0x7c}, + {0x7774, CRL_REG_LEN_08BIT, 0x03}, + {0x7775, CRL_REG_LEN_08BIT, 0x90}, + {0x7776, CRL_REG_LEN_08BIT, 0x05}, + {0x7777, CRL_REG_LEN_08BIT, 0x83}, + {0x7778, CRL_REG_LEN_08BIT, 0x05}, + {0x7779, CRL_REG_LEN_08BIT, 0x03}, + {0x777a, CRL_REG_LEN_08BIT, 0x05}, + {0x777b, CRL_REG_LEN_08BIT, 0x32}, + {0x777c, CRL_REG_LEN_08BIT, 0x05}, + {0x777d, CRL_REG_LEN_08BIT, 0x30}, + {0x777e, CRL_REG_LEN_08BIT, 0x00}, + {0x777f, CRL_REG_LEN_08BIT, 0x02}, + {0x7780, CRL_REG_LEN_08BIT, 0x05}, + {0x7781, CRL_REG_LEN_08BIT, 0x78}, + {0x7782, CRL_REG_LEN_08BIT, 0x00}, + {0x7783, CRL_REG_LEN_08BIT, 0x01}, + {0x7784, CRL_REG_LEN_08BIT, 0x05}, + {0x7785, CRL_REG_LEN_08BIT, 0x7c}, + {0x7786, CRL_REG_LEN_08BIT, 0x02}, + {0x7787, CRL_REG_LEN_08BIT, 0x90}, + {0x7788, CRL_REG_LEN_08BIT, 0x05}, + {0x7789, CRL_REG_LEN_08BIT, 0x03}, + {0x778a, CRL_REG_LEN_08BIT, 0x07}, + {0x778b, CRL_REG_LEN_08BIT, 0x00}, + {0x778c, CRL_REG_LEN_08BIT, 0x0f}, + {0x778d, CRL_REG_LEN_08BIT, 0x00}, + {0x778e, CRL_REG_LEN_08BIT, 0x08}, + {0x778f, CRL_REG_LEN_08BIT, 0x30}, + {0x7790, CRL_REG_LEN_08BIT, 0x08}, + {0x7791, CRL_REG_LEN_08BIT, 0xee}, + {0x7792, CRL_REG_LEN_08BIT, 0x0f}, + {0x7793, CRL_REG_LEN_08BIT, 0x00}, + {0x7794, CRL_REG_LEN_08BIT, 0x05}, + {0x7795, CRL_REG_LEN_08BIT, 0x33}, + {0x7796, CRL_REG_LEN_08BIT, 0x04}, + {0x7797, CRL_REG_LEN_08BIT, 0xe5}, + {0x7798, CRL_REG_LEN_08BIT, 0x06}, + {0x7799, CRL_REG_LEN_08BIT, 0x52}, + {0x779a, CRL_REG_LEN_08BIT, 0x04}, + {0x779b, CRL_REG_LEN_08BIT, 0xe4}, + {0x779c, CRL_REG_LEN_08BIT, 0x00}, + {0x779d, CRL_REG_LEN_08BIT, 0x00}, + {0x779e, CRL_REG_LEN_08BIT, 0x06}, + {0x779f, CRL_REG_LEN_08BIT, 0x5e}, + {0x77a0, CRL_REG_LEN_08BIT, 0x00}, + {0x77a1, CRL_REG_LEN_08BIT, 0x0f}, + {0x77a2, CRL_REG_LEN_08BIT, 0x06}, + {0x77a3, CRL_REG_LEN_08BIT, 0x1e}, + {0x77a4, CRL_REG_LEN_08BIT, 0x00}, + {0x77a5, CRL_REG_LEN_08BIT, 0x02}, + {0x77a6, CRL_REG_LEN_08BIT, 0x06}, + {0x77a7, CRL_REG_LEN_08BIT, 0xa2}, + {0x77a8, CRL_REG_LEN_08BIT, 0x00}, + {0x77a9, CRL_REG_LEN_08BIT, 0x01}, + {0x77aa, CRL_REG_LEN_08BIT, 0x06}, + {0x77ab, CRL_REG_LEN_08BIT, 0xae}, + {0x77ac, CRL_REG_LEN_08BIT, 0x00}, + {0x77ad, CRL_REG_LEN_08BIT, 0x03}, + {0x77ae, CRL_REG_LEN_08BIT, 0x05}, + {0x77af, CRL_REG_LEN_08BIT, 0x30}, + {0x77b0, CRL_REG_LEN_08BIT, 0x09}, + {0x77b1, CRL_REG_LEN_08BIT, 0x19}, + {0x77b2, CRL_REG_LEN_08BIT, 0x0f}, + {0x77b3, CRL_REG_LEN_08BIT, 0x00}, + {0x77b4, CRL_REG_LEN_08BIT, 0x05}, + {0x77b5, CRL_REG_LEN_08BIT, 0x33}, + {0x77b6, CRL_REG_LEN_08BIT, 0x04}, + {0x77b7, CRL_REG_LEN_08BIT, 0xe5}, + {0x77b8, CRL_REG_LEN_08BIT, 0x06}, + {0x77b9, CRL_REG_LEN_08BIT, 0x52}, + {0x77ba, CRL_REG_LEN_08BIT, 0x04}, + {0x77bb, CRL_REG_LEN_08BIT, 0xe4}, + {0x77bc, CRL_REG_LEN_08BIT, 0x00}, + {0x77bd, CRL_REG_LEN_08BIT, 0x00}, + {0x77be, CRL_REG_LEN_08BIT, 0x06}, + {0x77bf, CRL_REG_LEN_08BIT, 0x5e}, + {0x77c0, CRL_REG_LEN_08BIT, 0x00}, + {0x77c1, CRL_REG_LEN_08BIT, 0x0f}, + {0x77c2, CRL_REG_LEN_08BIT, 0x06}, + {0x77c3, CRL_REG_LEN_08BIT, 0x1e}, + {0x77c4, CRL_REG_LEN_08BIT, 0x00}, + {0x77c5, CRL_REG_LEN_08BIT, 0x02}, + {0x77c6, CRL_REG_LEN_08BIT, 0x06}, + {0x77c7, CRL_REG_LEN_08BIT, 0xa2}, + {0x77c8, CRL_REG_LEN_08BIT, 0x00}, + {0x77c9, CRL_REG_LEN_08BIT, 0x01}, + {0x77ca, CRL_REG_LEN_08BIT, 0x06}, + {0x77cb, CRL_REG_LEN_08BIT, 0xae}, + {0x77cc, CRL_REG_LEN_08BIT, 0x00}, + {0x77cd, CRL_REG_LEN_08BIT, 0x03}, + {0x77ce, CRL_REG_LEN_08BIT, 0x05}, + {0x77cf, CRL_REG_LEN_08BIT, 0x30}, + {0x77d0, CRL_REG_LEN_08BIT, 0x0f}, + {0x77d1, CRL_REG_LEN_08BIT, 0x00}, + {0x77d2, CRL_REG_LEN_08BIT, 0x00}, + {0x77d3, CRL_REG_LEN_08BIT, 0x00}, + {0x77d4, CRL_REG_LEN_08BIT, 0x00}, + {0x77d5, CRL_REG_LEN_08BIT, 0x02}, + {0x77d6, CRL_REG_LEN_08BIT, 0x04}, + {0x77d7, CRL_REG_LEN_08BIT, 0xe5}, + {0x77d8, CRL_REG_LEN_08BIT, 0x04}, + {0x77d9, CRL_REG_LEN_08BIT, 0xe4}, + {0x77da, CRL_REG_LEN_08BIT, 0x05}, + {0x77db, CRL_REG_LEN_08BIT, 0x33}, + {0x77dc, CRL_REG_LEN_08BIT, 0x07}, + {0x77dd, CRL_REG_LEN_08BIT, 0x10}, + {0x77de, CRL_REG_LEN_08BIT, 0x00}, + {0x77df, CRL_REG_LEN_08BIT, 0x00}, + {0x77e0, CRL_REG_LEN_08BIT, 0x01}, + {0x77e1, CRL_REG_LEN_08BIT, 0xbb}, + {0x77e2, CRL_REG_LEN_08BIT, 0x00}, + {0x77e3, CRL_REG_LEN_08BIT, 0x00}, + {0x77e4, CRL_REG_LEN_08BIT, 0x01}, + {0x77e5, CRL_REG_LEN_08BIT, 0xaa}, + {0x77e6, CRL_REG_LEN_08BIT, 0x00}, + {0x77e7, CRL_REG_LEN_08BIT, 0x00}, + {0x77e8, CRL_REG_LEN_08BIT, 0x01}, + {0x77e9, CRL_REG_LEN_08BIT, 0x99}, + {0x77ea, CRL_REG_LEN_08BIT, 0x00}, + {0x77eb, CRL_REG_LEN_08BIT, 0x00}, + {0x77ec, CRL_REG_LEN_08BIT, 0x01}, + {0x77ed, CRL_REG_LEN_08BIT, 0x88}, + {0x77ee, CRL_REG_LEN_08BIT, 0x00}, + {0x77ef, CRL_REG_LEN_08BIT, 0x00}, + {0x77f0, CRL_REG_LEN_08BIT, 0x01}, + {0x77f1, CRL_REG_LEN_08BIT, 0x77}, + {0x77f2, CRL_REG_LEN_08BIT, 0x00}, + {0x77f3, CRL_REG_LEN_08BIT, 0x00}, + {0x77f4, CRL_REG_LEN_08BIT, 0x01}, + {0x77f5, CRL_REG_LEN_08BIT, 0x66}, + {0x77f6, CRL_REG_LEN_08BIT, 0x00}, + {0x77f7, CRL_REG_LEN_08BIT, 0x00}, + {0x77f8, CRL_REG_LEN_08BIT, 0x01}, + {0x77f9, CRL_REG_LEN_08BIT, 0x55}, + {0x77fa, CRL_REG_LEN_08BIT, 0x00}, + {0x77fb, CRL_REG_LEN_08BIT, 0x00}, + {0x77fc, CRL_REG_LEN_08BIT, 0x01}, + {0x77fd, CRL_REG_LEN_08BIT, 0x44}, + {0x77fe, CRL_REG_LEN_08BIT, 0x00}, + {0x77ff, CRL_REG_LEN_08BIT, 0x00}, + {0x7800, CRL_REG_LEN_08BIT, 0x01}, + {0x7801, CRL_REG_LEN_08BIT, 0x33}, + {0x7802, CRL_REG_LEN_08BIT, 0x00}, + {0x7803, CRL_REG_LEN_08BIT, 0x00}, + {0x7804, CRL_REG_LEN_08BIT, 0x01}, + {0x7805, CRL_REG_LEN_08BIT, 0x22}, + {0x7806, CRL_REG_LEN_08BIT, 0x00}, + {0x7807, CRL_REG_LEN_08BIT, 0x00}, + {0x7808, CRL_REG_LEN_08BIT, 0x01}, + {0x7809, CRL_REG_LEN_08BIT, 0x11}, + {0x780a, CRL_REG_LEN_08BIT, 0x00}, + {0x780b, CRL_REG_LEN_08BIT, 0x00}, + {0x780c, CRL_REG_LEN_08BIT, 0x01}, + {0x780d, CRL_REG_LEN_08BIT, 0x00}, + {0x780e, CRL_REG_LEN_08BIT, 0x01}, + {0x780f, CRL_REG_LEN_08BIT, 0xff}, + {0x7810, CRL_REG_LEN_08BIT, 0x07}, + {0x7811, CRL_REG_LEN_08BIT, 0x00}, + {0x7812, CRL_REG_LEN_08BIT, 0x02}, + {0x7813, CRL_REG_LEN_08BIT, 0xa0}, + {0x7814, CRL_REG_LEN_08BIT, 0x0f}, + {0x7815, CRL_REG_LEN_08BIT, 0x00}, + {0x7816, CRL_REG_LEN_08BIT, 0x08}, + {0x7817, CRL_REG_LEN_08BIT, 0x35}, + {0x7818, CRL_REG_LEN_08BIT, 0x06}, + {0x7819, CRL_REG_LEN_08BIT, 0x52}, + {0x781a, CRL_REG_LEN_08BIT, 0x04}, + {0x781b, CRL_REG_LEN_08BIT, 0xe4}, + {0x781c, CRL_REG_LEN_08BIT, 0x00}, + {0x781d, CRL_REG_LEN_08BIT, 0x00}, + {0x781e, CRL_REG_LEN_08BIT, 0x06}, + {0x781f, CRL_REG_LEN_08BIT, 0x5e}, + {0x7820, CRL_REG_LEN_08BIT, 0x05}, + {0x7821, CRL_REG_LEN_08BIT, 0x33}, + {0x7822, CRL_REG_LEN_08BIT, 0x09}, + {0x7823, CRL_REG_LEN_08BIT, 0x19}, + {0x7824, CRL_REG_LEN_08BIT, 0x06}, + {0x7825, CRL_REG_LEN_08BIT, 0x1e}, + {0x7826, CRL_REG_LEN_08BIT, 0x05}, + {0x7827, CRL_REG_LEN_08BIT, 0x33}, + {0x7828, CRL_REG_LEN_08BIT, 0x00}, + {0x7829, CRL_REG_LEN_08BIT, 0x01}, + {0x782a, CRL_REG_LEN_08BIT, 0x06}, + {0x782b, CRL_REG_LEN_08BIT, 0x24}, + {0x782c, CRL_REG_LEN_08BIT, 0x06}, + {0x782d, CRL_REG_LEN_08BIT, 0x20}, + {0x782e, CRL_REG_LEN_08BIT, 0x0f}, + {0x782f, CRL_REG_LEN_08BIT, 0x00}, + {0x7830, CRL_REG_LEN_08BIT, 0x08}, + {0x7831, CRL_REG_LEN_08BIT, 0x35}, + {0x7832, CRL_REG_LEN_08BIT, 0x07}, + {0x7833, CRL_REG_LEN_08BIT, 0x10}, + {0x7834, CRL_REG_LEN_08BIT, 0x00}, + {0x7835, CRL_REG_LEN_08BIT, 0x00}, + {0x7836, CRL_REG_LEN_08BIT, 0x01}, + {0x7837, CRL_REG_LEN_08BIT, 0xbb}, + {0x7838, CRL_REG_LEN_08BIT, 0x00}, + {0x7839, CRL_REG_LEN_08BIT, 0x00}, + {0x783a, CRL_REG_LEN_08BIT, 0x01}, + {0x783b, CRL_REG_LEN_08BIT, 0xaa}, + {0x783c, CRL_REG_LEN_08BIT, 0x00}, + {0x783d, CRL_REG_LEN_08BIT, 0x00}, + {0x783e, CRL_REG_LEN_08BIT, 0x01}, + {0x783f, CRL_REG_LEN_08BIT, 0x99}, + {0x7840, CRL_REG_LEN_08BIT, 0x00}, + {0x7841, CRL_REG_LEN_08BIT, 0x00}, + {0x7842, CRL_REG_LEN_08BIT, 0x01}, + {0x7843, CRL_REG_LEN_08BIT, 0x88}, + {0x7844, CRL_REG_LEN_08BIT, 0x00}, + {0x7845, CRL_REG_LEN_08BIT, 0x00}, + {0x7846, CRL_REG_LEN_08BIT, 0x01}, + {0x7847, CRL_REG_LEN_08BIT, 0x77}, + {0x7848, CRL_REG_LEN_08BIT, 0x00}, + {0x7849, CRL_REG_LEN_08BIT, 0x00}, + {0x784a, CRL_REG_LEN_08BIT, 0x01}, + {0x784b, CRL_REG_LEN_08BIT, 0x66}, + {0x784c, CRL_REG_LEN_08BIT, 0x00}, + {0x784d, CRL_REG_LEN_08BIT, 0x00}, + {0x784e, CRL_REG_LEN_08BIT, 0x01}, + {0x784f, CRL_REG_LEN_08BIT, 0x55}, + {0x7850, CRL_REG_LEN_08BIT, 0x00}, + {0x7851, CRL_REG_LEN_08BIT, 0x00}, + {0x7852, CRL_REG_LEN_08BIT, 0x01}, + {0x7853, CRL_REG_LEN_08BIT, 0x44}, + {0x7854, CRL_REG_LEN_08BIT, 0x00}, + {0x7855, CRL_REG_LEN_08BIT, 0x00}, + {0x7856, CRL_REG_LEN_08BIT, 0x01}, + {0x7857, CRL_REG_LEN_08BIT, 0x33}, + {0x7858, CRL_REG_LEN_08BIT, 0x00}, + {0x7859, CRL_REG_LEN_08BIT, 0x00}, + {0x785a, CRL_REG_LEN_08BIT, 0x01}, + {0x785b, CRL_REG_LEN_08BIT, 0x22}, + {0x785c, CRL_REG_LEN_08BIT, 0x00}, + {0x785d, CRL_REG_LEN_08BIT, 0x00}, + {0x785e, CRL_REG_LEN_08BIT, 0x01}, + {0x785f, CRL_REG_LEN_08BIT, 0x11}, + {0x7860, CRL_REG_LEN_08BIT, 0x00}, + {0x7861, CRL_REG_LEN_08BIT, 0x00}, + {0x7862, CRL_REG_LEN_08BIT, 0x01}, + {0x7863, CRL_REG_LEN_08BIT, 0x00}, + {0x7864, CRL_REG_LEN_08BIT, 0x07}, + {0x7865, CRL_REG_LEN_08BIT, 0x00}, + {0x7866, CRL_REG_LEN_08BIT, 0x01}, + {0x7867, CRL_REG_LEN_08BIT, 0xff}, + {0x7868, CRL_REG_LEN_08BIT, 0x02}, + {0x7869, CRL_REG_LEN_08BIT, 0xa0}, + {0x786a, CRL_REG_LEN_08BIT, 0x0f}, + {0x786b, CRL_REG_LEN_08BIT, 0x00}, + {0x786c, CRL_REG_LEN_08BIT, 0x08}, + {0x786d, CRL_REG_LEN_08BIT, 0x3a}, + {0x786e, CRL_REG_LEN_08BIT, 0x08}, + {0x786f, CRL_REG_LEN_08BIT, 0x6a}, + {0x7870, CRL_REG_LEN_08BIT, 0x0f}, + {0x7871, CRL_REG_LEN_08BIT, 0x00}, + {0x7872, CRL_REG_LEN_08BIT, 0x04}, + {0x7873, CRL_REG_LEN_08BIT, 0xc0}, + {0x7874, CRL_REG_LEN_08BIT, 0x09}, + {0x7875, CRL_REG_LEN_08BIT, 0x19}, + {0x7876, CRL_REG_LEN_08BIT, 0x04}, + {0x7877, CRL_REG_LEN_08BIT, 0x99}, + {0x7878, CRL_REG_LEN_08BIT, 0x07}, + {0x7879, CRL_REG_LEN_08BIT, 0x14}, + {0x787a, CRL_REG_LEN_08BIT, 0x00}, + {0x787b, CRL_REG_LEN_08BIT, 0x01}, + {0x787c, CRL_REG_LEN_08BIT, 0x04}, + {0x787d, CRL_REG_LEN_08BIT, 0xa4}, + {0x787e, CRL_REG_LEN_08BIT, 0x00}, + {0x787f, CRL_REG_LEN_08BIT, 0x07}, + {0x7880, CRL_REG_LEN_08BIT, 0x04}, + {0x7881, CRL_REG_LEN_08BIT, 0xa6}, + {0x7882, CRL_REG_LEN_08BIT, 0x00}, + {0x7883, CRL_REG_LEN_08BIT, 0x00}, + {0x7884, CRL_REG_LEN_08BIT, 0x04}, + {0x7885, CRL_REG_LEN_08BIT, 0xa0}, + {0x7886, CRL_REG_LEN_08BIT, 0x04}, + {0x7887, CRL_REG_LEN_08BIT, 0x80}, + {0x7888, CRL_REG_LEN_08BIT, 0x04}, + {0x7889, CRL_REG_LEN_08BIT, 0x00}, + {0x788a, CRL_REG_LEN_08BIT, 0x05}, + {0x788b, CRL_REG_LEN_08BIT, 0x03}, + {0x788c, CRL_REG_LEN_08BIT, 0x06}, + {0x788d, CRL_REG_LEN_08BIT, 0x00}, + {0x788e, CRL_REG_LEN_08BIT, 0x0f}, + {0x788f, CRL_REG_LEN_08BIT, 0x00}, + {0x7890, CRL_REG_LEN_08BIT, 0x0f}, + {0x7891, CRL_REG_LEN_08BIT, 0x00}, + {0x7892, CRL_REG_LEN_08BIT, 0x0f}, + {0x7893, CRL_REG_LEN_08BIT, 0x00}, + {0x30a3, CRL_REG_LEN_08BIT, 0x00}, + {0x30a7, CRL_REG_LEN_08BIT, 0x48}, + {0x30ab, CRL_REG_LEN_08BIT, 0x04}, + {0x30af, CRL_REG_LEN_08BIT, 0x40}, + {0x3001, CRL_REG_LEN_08BIT, 0x32}, + {0x3005, CRL_REG_LEN_08BIT, 0x13}, + {0x3014, CRL_REG_LEN_08BIT, 0x44}, + {0x3196, CRL_REG_LEN_08BIT, 0x00}, + {0x3197, CRL_REG_LEN_08BIT, 0x00}, + {0x3195, CRL_REG_LEN_08BIT, 0x04}, + {0x31e3, CRL_REG_LEN_08BIT, 0x03}, + {0x31e4, CRL_REG_LEN_08BIT, 0x13}, + {0x315a, CRL_REG_LEN_08BIT, 0x01}, + {0x315b, CRL_REG_LEN_08BIT, 0x00}, + {0x315c, CRL_REG_LEN_08BIT, 0x01}, + {0x315d, CRL_REG_LEN_08BIT, 0x00}, + {0x315e, CRL_REG_LEN_08BIT, 0x01}, + {0x315f, CRL_REG_LEN_08BIT, 0x00}, + {0x3250, CRL_REG_LEN_08BIT, 0xf7}, +}; + +/* ov2775_1928x1088_3x12_30fps_mipi960_regset */ +static struct crl_register_write_rep ov2775_3x12_30fps_mipi960_regset[] = { + {0x3000, CRL_REG_LEN_08BIT, 0x02}, + {0x3001, CRL_REG_LEN_08BIT, 0x28}, + {0x3002, CRL_REG_LEN_08BIT, 0x03}, + {0x3003, CRL_REG_LEN_08BIT, 0x01}, + {0x3004, CRL_REG_LEN_08BIT, 0x02}, + {0x3005, CRL_REG_LEN_08BIT, 0x26}, + {0x3006, CRL_REG_LEN_08BIT, 0x00}, + {0x3007, CRL_REG_LEN_08BIT, 0x07}, + {0x3008, CRL_REG_LEN_08BIT, 0x01}, + {0x3009, CRL_REG_LEN_08BIT, 0x00}, + {0x300c, CRL_REG_LEN_08BIT, 0x6c}, + {0x300e, CRL_REG_LEN_08BIT, 0x80}, + {0x300f, CRL_REG_LEN_08BIT, 0x00}, + {0x3014, CRL_REG_LEN_08BIT, 0xc4}, + {0x3015, CRL_REG_LEN_08BIT, 0x00}, + {0x3017, CRL_REG_LEN_08BIT, 0x00}, + {0x3018, CRL_REG_LEN_08BIT, 0x00}, + {0x3019, CRL_REG_LEN_08BIT, 0x00}, + {0x301a, CRL_REG_LEN_08BIT, 0x00}, + {0x301b, CRL_REG_LEN_08BIT, 0x0e}, + {0x301e, CRL_REG_LEN_08BIT, 0x17}, + {0x301f, CRL_REG_LEN_08BIT, 0xe1}, + {0x3030, CRL_REG_LEN_08BIT, 0x02}, + {0x3031, CRL_REG_LEN_08BIT, 0x62}, + {0x3032, CRL_REG_LEN_08BIT, 0xf0}, + {0x3033, CRL_REG_LEN_08BIT, 0x30}, + {0x3034, CRL_REG_LEN_08BIT, 0x3f}, + {0x3035, CRL_REG_LEN_08BIT, 0x5f}, + {0x3036, CRL_REG_LEN_08BIT, 0x02}, + {0x3037, CRL_REG_LEN_08BIT, 0x9f}, + {0x3038, CRL_REG_LEN_08BIT, 0x04}, + {0x3039, CRL_REG_LEN_08BIT, 0xb7}, + {0x303a, CRL_REG_LEN_08BIT, 0x04}, + {0x303b, CRL_REG_LEN_08BIT, 0x07}, + {0x303c, CRL_REG_LEN_08BIT, 0xf0}, + {0x303d, CRL_REG_LEN_08BIT, 0x00}, + {0x303e, CRL_REG_LEN_08BIT, 0x0b}, + {0x303f, CRL_REG_LEN_08BIT, 0xe3}, + {0x3040, CRL_REG_LEN_08BIT, 0xf3}, + {0x3041, CRL_REG_LEN_08BIT, 0x29}, + {0x3042, CRL_REG_LEN_08BIT, 0xf6}, + {0x3043, CRL_REG_LEN_08BIT, 0x65}, + {0x3044, CRL_REG_LEN_08BIT, 0x06}, + {0x3045, CRL_REG_LEN_08BIT, 0x0f}, + {0x3046, CRL_REG_LEN_08BIT, 0x59}, + {0x3047, CRL_REG_LEN_08BIT, 0x07}, + {0x3048, CRL_REG_LEN_08BIT, 0x82}, + {0x3049, CRL_REG_LEN_08BIT, 0xcf}, + {0x304a, CRL_REG_LEN_08BIT, 0x12}, + {0x304b, CRL_REG_LEN_08BIT, 0x40}, + {0x304c, CRL_REG_LEN_08BIT, 0x33}, + {0x304d, CRL_REG_LEN_08BIT, 0xa4}, + {0x304e, CRL_REG_LEN_08BIT, 0x0b}, + {0x304f, CRL_REG_LEN_08BIT, 0x3d}, + {0x3050, CRL_REG_LEN_08BIT, 0x10}, + {0x3060, CRL_REG_LEN_08BIT, 0x00}, + {0x3061, CRL_REG_LEN_08BIT, 0x64}, + {0x3062, CRL_REG_LEN_08BIT, 0x00}, + {0x3063, CRL_REG_LEN_08BIT, 0xe4}, + {0x3066, CRL_REG_LEN_08BIT, 0x80}, + {0x3080, CRL_REG_LEN_08BIT, 0x00}, + {0x3081, CRL_REG_LEN_08BIT, 0x00}, + {0x3082, CRL_REG_LEN_08BIT, 0x01}, + {0x3083, CRL_REG_LEN_08BIT, 0xe3}, + {0x3084, CRL_REG_LEN_08BIT, 0x06}, + {0x3085, CRL_REG_LEN_08BIT, 0x00}, + {0x3086, CRL_REG_LEN_08BIT, 0x10}, + {0x3087, CRL_REG_LEN_08BIT, 0x10}, + {0x3089, CRL_REG_LEN_08BIT, 0x00}, + {0x308a, CRL_REG_LEN_08BIT, 0x01}, + {0x3093, CRL_REG_LEN_08BIT, 0x00}, + {0x30a0, CRL_REG_LEN_08BIT, 0x00}, + {0x30a1, CRL_REG_LEN_08BIT, 0x04}, + {0x30a2, CRL_REG_LEN_08BIT, 0x00}, + {0x30a3, CRL_REG_LEN_08BIT, 0x08}, + {0x30a4, CRL_REG_LEN_08BIT, 0x07}, + {0x30a5, CRL_REG_LEN_08BIT, 0x8b}, + {0x30a6, CRL_REG_LEN_08BIT, 0x04}, + {0x30a7, CRL_REG_LEN_08BIT, 0x3f}, + {0x30a8, CRL_REG_LEN_08BIT, 0x00}, + {0x30a9, CRL_REG_LEN_08BIT, 0x04}, + {0x30aa, CRL_REG_LEN_08BIT, 0x00}, + {0x30ab, CRL_REG_LEN_08BIT, 0x00}, + {0x30ac, CRL_REG_LEN_08BIT, 0x07}, + {0x30ad, CRL_REG_LEN_08BIT, 0x80}, + {0x30ae, CRL_REG_LEN_08BIT, 0x04}, + {0x30af, CRL_REG_LEN_08BIT, 0x40}, + {0x30b4, CRL_REG_LEN_08BIT, 0x00}, + {0x30b5, CRL_REG_LEN_08BIT, 0x00}, + {0x30ba, CRL_REG_LEN_08BIT, 0x10}, + {0x30bc, CRL_REG_LEN_08BIT, 0x00}, + {0x30bd, CRL_REG_LEN_08BIT, 0x03}, + {0x30be, CRL_REG_LEN_08BIT, 0x5c}, + {0x30bf, CRL_REG_LEN_08BIT, 0x00}, + {0x30c0, CRL_REG_LEN_08BIT, 0x01}, + {0x30c1, CRL_REG_LEN_08BIT, 0x00}, + {0x30c2, CRL_REG_LEN_08BIT, 0x20}, + {0x30c3, CRL_REG_LEN_08BIT, 0x00}, + {0x30c4, CRL_REG_LEN_08BIT, 0x4a}, + {0x30c5, CRL_REG_LEN_08BIT, 0x00}, + {0x30c7, CRL_REG_LEN_08BIT, 0x00}, + {0x30c8, CRL_REG_LEN_08BIT, 0x00}, + {0x30d1, CRL_REG_LEN_08BIT, 0x00}, + {0x30d2, CRL_REG_LEN_08BIT, 0x00}, + {0x30d3, CRL_REG_LEN_08BIT, 0x80}, + {0x30d4, CRL_REG_LEN_08BIT, 0x00}, + {0x30d9, CRL_REG_LEN_08BIT, 0x09}, + {0x30da, CRL_REG_LEN_08BIT, 0x64}, + {0x30dd, CRL_REG_LEN_08BIT, 0x00}, + {0x30de, CRL_REG_LEN_08BIT, 0x16}, + {0x30df, CRL_REG_LEN_08BIT, 0x00}, + {0x30e0, CRL_REG_LEN_08BIT, 0x17}, + {0x30e1, CRL_REG_LEN_08BIT, 0x00}, + {0x30e2, CRL_REG_LEN_08BIT, 0x18}, + {0x30e3, CRL_REG_LEN_08BIT, 0x10}, + {0x30e4, CRL_REG_LEN_08BIT, 0x04}, + {0x30e5, CRL_REG_LEN_08BIT, 0x00}, + {0x30e6, CRL_REG_LEN_08BIT, 0x00}, + {0x30e7, CRL_REG_LEN_08BIT, 0x00}, + {0x30e8, CRL_REG_LEN_08BIT, 0x00}, + {0x30e9, CRL_REG_LEN_08BIT, 0x00}, + {0x30ea, CRL_REG_LEN_08BIT, 0x00}, + {0x30eb, CRL_REG_LEN_08BIT, 0x00}, + {0x30ec, CRL_REG_LEN_08BIT, 0x00}, + {0x30ed, CRL_REG_LEN_08BIT, 0x00}, + {0x3101, CRL_REG_LEN_08BIT, 0x00}, + {0x3102, CRL_REG_LEN_08BIT, 0x00}, + {0x3103, CRL_REG_LEN_08BIT, 0x00}, + {0x3104, CRL_REG_LEN_08BIT, 0x00}, + {0x3105, CRL_REG_LEN_08BIT, 0x8c}, + {0x3106, CRL_REG_LEN_08BIT, 0x87}, + {0x3107, CRL_REG_LEN_08BIT, 0xc0}, + {0x3108, CRL_REG_LEN_08BIT, 0x9d}, + {0x3109, CRL_REG_LEN_08BIT, 0x8d}, + {0x310a, CRL_REG_LEN_08BIT, 0x8d}, + {0x310b, CRL_REG_LEN_08BIT, 0x6a}, + {0x310c, CRL_REG_LEN_08BIT, 0x3a}, + {0x310d, CRL_REG_LEN_08BIT, 0x5a}, + {0x310e, CRL_REG_LEN_08BIT, 0x00}, + {0x3120, CRL_REG_LEN_08BIT, 0x00}, + {0x3121, CRL_REG_LEN_08BIT, 0x00}, + {0x3122, CRL_REG_LEN_08BIT, 0x00}, + {0x3123, CRL_REG_LEN_08BIT, 0x00}, + {0x3124, CRL_REG_LEN_08BIT, 0x00}, + {0x3125, CRL_REG_LEN_08BIT, 0x70}, + {0x3126, CRL_REG_LEN_08BIT, 0x1f}, + {0x3127, CRL_REG_LEN_08BIT, 0x0f}, + {0x3128, CRL_REG_LEN_08BIT, 0x00}, + {0x3129, CRL_REG_LEN_08BIT, 0x3a}, + {0x312a, CRL_REG_LEN_08BIT, 0x02}, + {0x312b, CRL_REG_LEN_08BIT, 0x0f}, + {0x312c, CRL_REG_LEN_08BIT, 0x00}, + {0x312d, CRL_REG_LEN_08BIT, 0x0f}, + {0x312e, CRL_REG_LEN_08BIT, 0x1d}, + {0x312f, CRL_REG_LEN_08BIT, 0x00}, + {0x3130, CRL_REG_LEN_08BIT, 0x00}, + {0x3131, CRL_REG_LEN_08BIT, 0x00}, + {0x3132, CRL_REG_LEN_08BIT, 0x00}, + {0x3140, CRL_REG_LEN_08BIT, 0x0a}, + {0x3141, CRL_REG_LEN_08BIT, 0x03}, + {0x3142, CRL_REG_LEN_08BIT, 0x00}, + {0x3143, CRL_REG_LEN_08BIT, 0x00}, + {0x3144, CRL_REG_LEN_08BIT, 0x00}, + {0x3145, CRL_REG_LEN_08BIT, 0x00}, + {0x3146, CRL_REG_LEN_08BIT, 0x00}, + {0x3147, CRL_REG_LEN_08BIT, 0x00}, + {0x3148, CRL_REG_LEN_08BIT, 0x00}, + {0x3149, CRL_REG_LEN_08BIT, 0x00}, + {0x314a, CRL_REG_LEN_08BIT, 0x00}, + {0x314b, CRL_REG_LEN_08BIT, 0x00}, + {0x314c, CRL_REG_LEN_08BIT, 0x00}, + {0x314d, CRL_REG_LEN_08BIT, 0x00}, + {0x314e, CRL_REG_LEN_08BIT, 0x1c}, + {0x314f, CRL_REG_LEN_08BIT, 0xff}, + {0x3150, CRL_REG_LEN_08BIT, 0xff}, + {0x3151, CRL_REG_LEN_08BIT, 0xff}, + {0x3152, CRL_REG_LEN_08BIT, 0x10}, + {0x3153, CRL_REG_LEN_08BIT, 0x10}, + {0x3154, CRL_REG_LEN_08BIT, 0x10}, + {0x3155, CRL_REG_LEN_08BIT, 0x00}, + {0x3156, CRL_REG_LEN_08BIT, 0x03}, + {0x3157, CRL_REG_LEN_08BIT, 0x00}, + {0x3158, CRL_REG_LEN_08BIT, 0x0f}, + {0x3159, CRL_REG_LEN_08BIT, 0xff}, + {0x315a, CRL_REG_LEN_08BIT, 0x01}, + {0x315b, CRL_REG_LEN_08BIT, 0x00}, + {0x315c, CRL_REG_LEN_08BIT, 0x01}, + {0x315d, CRL_REG_LEN_08BIT, 0x00}, + {0x315e, CRL_REG_LEN_08BIT, 0x01}, + {0x315f, CRL_REG_LEN_08BIT, 0x00}, + {0x3160, CRL_REG_LEN_08BIT, 0x01}, + {0x3161, CRL_REG_LEN_08BIT, 0x00}, + {0x3162, CRL_REG_LEN_08BIT, 0x01}, + {0x3163, CRL_REG_LEN_08BIT, 0x00}, + {0x3164, CRL_REG_LEN_08BIT, 0x01}, + {0x3165, CRL_REG_LEN_08BIT, 0x00}, + {0x3190, CRL_REG_LEN_08BIT, 0x05}, + {0x3191, CRL_REG_LEN_08BIT, 0x99}, + {0x3193, CRL_REG_LEN_08BIT, 0x08}, + {0x3194, CRL_REG_LEN_08BIT, 0x13}, + {0x3195, CRL_REG_LEN_08BIT, 0x33}, + {0x3196, CRL_REG_LEN_08BIT, 0x00}, + {0x3197, CRL_REG_LEN_08BIT, 0x10}, + {0x3198, CRL_REG_LEN_08BIT, 0x00}, + {0x3199, CRL_REG_LEN_08BIT, 0x3f}, + {0x319a, CRL_REG_LEN_08BIT, 0x40}, + {0x319b, CRL_REG_LEN_08BIT, 0x7f}, + {0x319c, CRL_REG_LEN_08BIT, 0x80}, + {0x319d, CRL_REG_LEN_08BIT, 0xbf}, + {0x319e, CRL_REG_LEN_08BIT, 0xc0}, + {0x319f, CRL_REG_LEN_08BIT, 0xff}, + {0x31a0, CRL_REG_LEN_08BIT, 0x24}, + {0x31a1, CRL_REG_LEN_08BIT, 0x55}, + {0x31a2, CRL_REG_LEN_08BIT, 0x00}, + {0x31a3, CRL_REG_LEN_08BIT, 0x08}, + {0x31a6, CRL_REG_LEN_08BIT, 0x00}, + {0x31a7, CRL_REG_LEN_08BIT, 0x00}, + {0x31b0, CRL_REG_LEN_08BIT, 0x00}, + {0x31b1, CRL_REG_LEN_08BIT, 0x00}, + {0x31b2, CRL_REG_LEN_08BIT, 0x02}, + {0x31b3, CRL_REG_LEN_08BIT, 0x00}, + {0x31b4, CRL_REG_LEN_08BIT, 0x00}, + {0x31b5, CRL_REG_LEN_08BIT, 0x01}, + {0x31b6, CRL_REG_LEN_08BIT, 0x00}, + {0x31b7, CRL_REG_LEN_08BIT, 0x00}, + {0x31b8, CRL_REG_LEN_08BIT, 0x00}, + {0x31b9, CRL_REG_LEN_08BIT, 0x00}, + {0x31ba, CRL_REG_LEN_08BIT, 0x00}, + {0x31d0, CRL_REG_LEN_08BIT, 0x3c}, + {0x31d1, CRL_REG_LEN_08BIT, 0x34}, + {0x31d2, CRL_REG_LEN_08BIT, 0x3c}, + {0x31d3, CRL_REG_LEN_08BIT, 0x00}, + {0x31d4, CRL_REG_LEN_08BIT, 0x2d}, + {0x31d5, CRL_REG_LEN_08BIT, 0x00}, + {0x31d6, CRL_REG_LEN_08BIT, 0x01}, + {0x31d7, CRL_REG_LEN_08BIT, 0x06}, + {0x31d8, CRL_REG_LEN_08BIT, 0x00}, + {0x31d9, CRL_REG_LEN_08BIT, 0x64}, + {0x31da, CRL_REG_LEN_08BIT, 0x00}, + {0x31db, CRL_REG_LEN_08BIT, 0x30}, + {0x31dc, CRL_REG_LEN_08BIT, 0x04}, + {0x31dd, CRL_REG_LEN_08BIT, 0x69}, + {0x31de, CRL_REG_LEN_08BIT, 0x0a}, + {0x31df, CRL_REG_LEN_08BIT, 0x3c}, + {0x31e0, CRL_REG_LEN_08BIT, 0x04}, + {0x31e1, CRL_REG_LEN_08BIT, 0x32}, + {0x31e2, CRL_REG_LEN_08BIT, 0x00}, + {0x31e3, CRL_REG_LEN_08BIT, 0x00}, + {0x31e4, CRL_REG_LEN_08BIT, 0x08}, + {0x31e5, CRL_REG_LEN_08BIT, 0x80}, + {0x31e6, CRL_REG_LEN_08BIT, 0x00}, + {0x31e7, CRL_REG_LEN_08BIT, 0x2c}, + {0x31e8, CRL_REG_LEN_08BIT, 0x6c}, + {0x31e9, CRL_REG_LEN_08BIT, 0xac}, + {0x31ea, CRL_REG_LEN_08BIT, 0xec}, + {0x31eb, CRL_REG_LEN_08BIT, 0x3f}, + {0x31ec, CRL_REG_LEN_08BIT, 0x0f}, + {0x31ed, CRL_REG_LEN_08BIT, 0x20}, + {0x31ee, CRL_REG_LEN_08BIT, 0x04}, + {0x31ef, CRL_REG_LEN_08BIT, 0x48}, + {0x31f0, CRL_REG_LEN_08BIT, 0x07}, + {0x31f1, CRL_REG_LEN_08BIT, 0x90}, + {0x31f2, CRL_REG_LEN_08BIT, 0x04}, + {0x31f3, CRL_REG_LEN_08BIT, 0x48}, + {0x31f4, CRL_REG_LEN_08BIT, 0x07}, + {0x31f5, CRL_REG_LEN_08BIT, 0x90}, + {0x31f6, CRL_REG_LEN_08BIT, 0x04}, + {0x31f7, CRL_REG_LEN_08BIT, 0x48}, + {0x31f8, CRL_REG_LEN_08BIT, 0x07}, + {0x31f9, CRL_REG_LEN_08BIT, 0x90}, + {0x31fa, CRL_REG_LEN_08BIT, 0x04}, + {0x31fb, CRL_REG_LEN_08BIT, 0x48}, + {0x31fd, CRL_REG_LEN_08BIT, 0xcb}, + {0x31fe, CRL_REG_LEN_08BIT, 0x01}, + {0x31ff, CRL_REG_LEN_08BIT, 0x03}, + {0x3200, CRL_REG_LEN_08BIT, 0x00}, + {0x3201, CRL_REG_LEN_08BIT, 0xff}, + {0x3202, CRL_REG_LEN_08BIT, 0x00}, + {0x3203, CRL_REG_LEN_08BIT, 0xff}, + {0x3204, CRL_REG_LEN_08BIT, 0xff}, + {0x3205, CRL_REG_LEN_08BIT, 0xff}, + {0x3206, CRL_REG_LEN_08BIT, 0xff}, + {0x3207, CRL_REG_LEN_08BIT, 0xff}, + {0x3208, CRL_REG_LEN_08BIT, 0xff}, + {0x3209, CRL_REG_LEN_08BIT, 0xff}, + {0x320a, CRL_REG_LEN_08BIT, 0xff}, + {0x320b, CRL_REG_LEN_08BIT, 0x1b}, + {0x320c, CRL_REG_LEN_08BIT, 0x1f}, + {0x320d, CRL_REG_LEN_08BIT, 0x1e}, + {0x320e, CRL_REG_LEN_08BIT, 0x30}, + {0x320f, CRL_REG_LEN_08BIT, 0x2d}, + {0x3210, CRL_REG_LEN_08BIT, 0x2c}, + {0x3211, CRL_REG_LEN_08BIT, 0x2b}, + {0x3212, CRL_REG_LEN_08BIT, 0x2a}, + {0x3213, CRL_REG_LEN_08BIT, 0x24}, + {0x3214, CRL_REG_LEN_08BIT, 0x22}, + {0x3215, CRL_REG_LEN_08BIT, 0x00}, + {0x3216, CRL_REG_LEN_08BIT, 0x04}, + {0x3217, CRL_REG_LEN_08BIT, 0x2c}, + {0x3218, CRL_REG_LEN_08BIT, 0x6c}, + {0x3219, CRL_REG_LEN_08BIT, 0xac}, + {0x321a, CRL_REG_LEN_08BIT, 0xec}, + {0x321b, CRL_REG_LEN_08BIT, 0x00}, + {0x3230, CRL_REG_LEN_08BIT, 0x3a}, + {0x3231, CRL_REG_LEN_08BIT, 0x00}, + {0x3232, CRL_REG_LEN_08BIT, 0x80}, + {0x3233, CRL_REG_LEN_08BIT, 0x00}, + {0x3234, CRL_REG_LEN_08BIT, 0x10}, + {0x3235, CRL_REG_LEN_08BIT, 0xaa}, + {0x3236, CRL_REG_LEN_08BIT, 0x55}, + {0x3237, CRL_REG_LEN_08BIT, 0x99}, + {0x3238, CRL_REG_LEN_08BIT, 0x66}, + {0x3239, CRL_REG_LEN_08BIT, 0x08}, + {0x323a, CRL_REG_LEN_08BIT, 0x88}, + {0x323b, CRL_REG_LEN_08BIT, 0x00}, + {0x323c, CRL_REG_LEN_08BIT, 0x00}, + {0x323d, CRL_REG_LEN_08BIT, 0x03}, + {0x3250, CRL_REG_LEN_08BIT, 0x33}, + {0x3251, CRL_REG_LEN_08BIT, 0x00}, + {0x3252, CRL_REG_LEN_08BIT, 0x20}, + {0x3253, CRL_REG_LEN_08BIT, 0x00}, + {0x3254, CRL_REG_LEN_08BIT, 0x11}, + {0x3255, CRL_REG_LEN_08BIT, 0x01}, + {0x3256, CRL_REG_LEN_08BIT, 0x00}, + {0x3257, CRL_REG_LEN_08BIT, 0x00}, + {0x3258, CRL_REG_LEN_08BIT, 0x00}, + {0x3270, CRL_REG_LEN_08BIT, 0x01}, + {0x3271, CRL_REG_LEN_08BIT, 0xc0}, + {0x3272, CRL_REG_LEN_08BIT, 0xf0}, + {0x3273, CRL_REG_LEN_08BIT, 0x01}, + {0x3274, CRL_REG_LEN_08BIT, 0x00}, + {0x3275, CRL_REG_LEN_08BIT, 0x40}, + {0x3276, CRL_REG_LEN_08BIT, 0x02}, + {0x3277, CRL_REG_LEN_08BIT, 0x08}, + {0x3278, CRL_REG_LEN_08BIT, 0x10}, + {0x3279, CRL_REG_LEN_08BIT, 0x04}, + {0x327a, CRL_REG_LEN_08BIT, 0x00}, + {0x327b, CRL_REG_LEN_08BIT, 0x03}, + {0x327c, CRL_REG_LEN_08BIT, 0x10}, + {0x327d, CRL_REG_LEN_08BIT, 0x60}, + {0x327e, CRL_REG_LEN_08BIT, 0xc0}, + {0x327f, CRL_REG_LEN_08BIT, 0x06}, + {0x3288, CRL_REG_LEN_08BIT, 0x10}, + {0x3289, CRL_REG_LEN_08BIT, 0x00}, + {0x328a, CRL_REG_LEN_08BIT, 0x08}, + {0x328b, CRL_REG_LEN_08BIT, 0x00}, + {0x328c, CRL_REG_LEN_08BIT, 0x04}, + {0x328d, CRL_REG_LEN_08BIT, 0x00}, + {0x328e, CRL_REG_LEN_08BIT, 0x02}, + {0x328f, CRL_REG_LEN_08BIT, 0x00}, + {0x3290, CRL_REG_LEN_08BIT, 0x20}, + {0x3291, CRL_REG_LEN_08BIT, 0x00}, + {0x3292, CRL_REG_LEN_08BIT, 0x10}, + {0x3293, CRL_REG_LEN_08BIT, 0x00}, + {0x3294, CRL_REG_LEN_08BIT, 0x08}, + {0x3295, CRL_REG_LEN_08BIT, 0x00}, + {0x3296, CRL_REG_LEN_08BIT, 0x04}, + {0x3297, CRL_REG_LEN_08BIT, 0x00}, + {0x3298, CRL_REG_LEN_08BIT, 0x40}, + {0x3299, CRL_REG_LEN_08BIT, 0x00}, + {0x329a, CRL_REG_LEN_08BIT, 0x20}, + {0x329b, CRL_REG_LEN_08BIT, 0x00}, + {0x329c, CRL_REG_LEN_08BIT, 0x10}, + {0x329d, CRL_REG_LEN_08BIT, 0x00}, + {0x329e, CRL_REG_LEN_08BIT, 0x08}, + {0x329f, CRL_REG_LEN_08BIT, 0x00}, + {0x32a0, CRL_REG_LEN_08BIT, 0x7f}, + {0x32a1, CRL_REG_LEN_08BIT, 0xff}, + {0x32a2, CRL_REG_LEN_08BIT, 0x40}, + {0x32a3, CRL_REG_LEN_08BIT, 0x00}, + {0x32a4, CRL_REG_LEN_08BIT, 0x20}, + {0x32a5, CRL_REG_LEN_08BIT, 0x00}, + {0x32a6, CRL_REG_LEN_08BIT, 0x10}, + {0x32a7, CRL_REG_LEN_08BIT, 0x00}, + {0x32a8, CRL_REG_LEN_08BIT, 0x00}, + {0x32a9, CRL_REG_LEN_08BIT, 0x00}, + {0x32aa, CRL_REG_LEN_08BIT, 0x00}, + {0x32ab, CRL_REG_LEN_08BIT, 0x00}, + {0x32ac, CRL_REG_LEN_08BIT, 0x00}, + {0x32ad, CRL_REG_LEN_08BIT, 0x00}, + {0x32ae, CRL_REG_LEN_08BIT, 0x00}, + {0x32af, CRL_REG_LEN_08BIT, 0x00}, + {0x32b0, CRL_REG_LEN_08BIT, 0x00}, + {0x32b1, CRL_REG_LEN_08BIT, 0x00}, + {0x32b2, CRL_REG_LEN_08BIT, 0x00}, + {0x32b3, CRL_REG_LEN_08BIT, 0x00}, + {0x32b4, CRL_REG_LEN_08BIT, 0x00}, + {0x32b5, CRL_REG_LEN_08BIT, 0x00}, + {0x32b6, CRL_REG_LEN_08BIT, 0x00}, + {0x32b7, CRL_REG_LEN_08BIT, 0x00}, + {0x32b8, CRL_REG_LEN_08BIT, 0x00}, + {0x32b9, CRL_REG_LEN_08BIT, 0x00}, + {0x32ba, CRL_REG_LEN_08BIT, 0x00}, + {0x32bb, CRL_REG_LEN_08BIT, 0x00}, + {0x32bc, CRL_REG_LEN_08BIT, 0x00}, + {0x32bd, CRL_REG_LEN_08BIT, 0x00}, + {0x32be, CRL_REG_LEN_08BIT, 0x00}, + {0x32bf, CRL_REG_LEN_08BIT, 0x00}, + {0x32c0, CRL_REG_LEN_08BIT, 0x00}, + {0x32c1, CRL_REG_LEN_08BIT, 0x00}, + {0x32c2, CRL_REG_LEN_08BIT, 0x00}, + {0x32c3, CRL_REG_LEN_08BIT, 0x00}, + {0x32c4, CRL_REG_LEN_08BIT, 0x00}, + {0x32c5, CRL_REG_LEN_08BIT, 0x00}, + {0x32c6, CRL_REG_LEN_08BIT, 0x00}, + {0x32c7, CRL_REG_LEN_08BIT, 0x00}, + {0x32c8, CRL_REG_LEN_08BIT, 0x87}, + {0x32c9, CRL_REG_LEN_08BIT, 0x00}, + {0x3330, CRL_REG_LEN_08BIT, 0x03}, + {0x3331, CRL_REG_LEN_08BIT, 0xc8}, + {0x3332, CRL_REG_LEN_08BIT, 0x02}, + {0x3333, CRL_REG_LEN_08BIT, 0x24}, + {0x3334, CRL_REG_LEN_08BIT, 0x00}, + {0x3335, CRL_REG_LEN_08BIT, 0x00}, + {0x3336, CRL_REG_LEN_08BIT, 0x00}, + {0x3337, CRL_REG_LEN_08BIT, 0x00}, + {0x3338, CRL_REG_LEN_08BIT, 0x03}, + {0x3339, CRL_REG_LEN_08BIT, 0xc8}, + {0x333a, CRL_REG_LEN_08BIT, 0x02}, + {0x333b, CRL_REG_LEN_08BIT, 0x24}, + {0x333c, CRL_REG_LEN_08BIT, 0x00}, + {0x333d, CRL_REG_LEN_08BIT, 0x00}, + {0x333e, CRL_REG_LEN_08BIT, 0x00}, + {0x333f, CRL_REG_LEN_08BIT, 0x00}, + {0x3340, CRL_REG_LEN_08BIT, 0x03}, + {0x3341, CRL_REG_LEN_08BIT, 0xc8}, + {0x3342, CRL_REG_LEN_08BIT, 0x02}, + {0x3343, CRL_REG_LEN_08BIT, 0x24}, + {0x3344, CRL_REG_LEN_08BIT, 0x00}, + {0x3345, CRL_REG_LEN_08BIT, 0x00}, + {0x3346, CRL_REG_LEN_08BIT, 0x00}, + {0x3347, CRL_REG_LEN_08BIT, 0x00}, + {0x3348, CRL_REG_LEN_08BIT, 0x40}, + {0x3349, CRL_REG_LEN_08BIT, 0x00}, + {0x334a, CRL_REG_LEN_08BIT, 0x00}, + {0x334b, CRL_REG_LEN_08BIT, 0x00}, + {0x334c, CRL_REG_LEN_08BIT, 0x00}, + {0x334d, CRL_REG_LEN_08BIT, 0x00}, + {0x334e, CRL_REG_LEN_08BIT, 0x80}, + {0x3360, CRL_REG_LEN_08BIT, 0x01}, + {0x3361, CRL_REG_LEN_08BIT, 0x00}, + {0x3362, CRL_REG_LEN_08BIT, 0x01}, + {0x3363, CRL_REG_LEN_08BIT, 0x00}, + {0x3364, CRL_REG_LEN_08BIT, 0x01}, + {0x3365, CRL_REG_LEN_08BIT, 0x00}, + {0x3366, CRL_REG_LEN_08BIT, 0x01}, + {0x3367, CRL_REG_LEN_08BIT, 0x00}, + {0x3368, CRL_REG_LEN_08BIT, 0x01}, + {0x3369, CRL_REG_LEN_08BIT, 0x00}, + {0x336a, CRL_REG_LEN_08BIT, 0x01}, + {0x336b, CRL_REG_LEN_08BIT, 0x00}, + {0x336c, CRL_REG_LEN_08BIT, 0x01}, + {0x336d, CRL_REG_LEN_08BIT, 0x00}, + {0x336e, CRL_REG_LEN_08BIT, 0x01}, + {0x336f, CRL_REG_LEN_08BIT, 0x00}, + {0x3370, CRL_REG_LEN_08BIT, 0x01}, + {0x3371, CRL_REG_LEN_08BIT, 0x00}, + {0x3372, CRL_REG_LEN_08BIT, 0x01}, + {0x3373, CRL_REG_LEN_08BIT, 0x00}, + {0x3374, CRL_REG_LEN_08BIT, 0x01}, + {0x3375, CRL_REG_LEN_08BIT, 0x00}, + {0x3376, CRL_REG_LEN_08BIT, 0x01}, + {0x3377, CRL_REG_LEN_08BIT, 0x00}, + {0x3378, CRL_REG_LEN_08BIT, 0x00}, + {0x3379, CRL_REG_LEN_08BIT, 0x00}, + {0x337a, CRL_REG_LEN_08BIT, 0x00}, + {0x337b, CRL_REG_LEN_08BIT, 0x00}, + {0x337c, CRL_REG_LEN_08BIT, 0x00}, + {0x337d, CRL_REG_LEN_08BIT, 0x00}, + {0x337e, CRL_REG_LEN_08BIT, 0x00}, + {0x337f, CRL_REG_LEN_08BIT, 0x00}, + {0x3380, CRL_REG_LEN_08BIT, 0x00}, + {0x3381, CRL_REG_LEN_08BIT, 0x00}, + {0x3382, CRL_REG_LEN_08BIT, 0x00}, + {0x3383, CRL_REG_LEN_08BIT, 0x00}, + {0x3384, CRL_REG_LEN_08BIT, 0x00}, + {0x3385, CRL_REG_LEN_08BIT, 0x00}, + {0x3386, CRL_REG_LEN_08BIT, 0x00}, + {0x3387, CRL_REG_LEN_08BIT, 0x00}, + {0x3388, CRL_REG_LEN_08BIT, 0x00}, + {0x3389, CRL_REG_LEN_08BIT, 0x00}, + {0x338a, CRL_REG_LEN_08BIT, 0x00}, + {0x338b, CRL_REG_LEN_08BIT, 0x00}, + {0x338c, CRL_REG_LEN_08BIT, 0x00}, + {0x338d, CRL_REG_LEN_08BIT, 0x00}, + {0x338e, CRL_REG_LEN_08BIT, 0x00}, + {0x338f, CRL_REG_LEN_08BIT, 0x00}, + {0x3390, CRL_REG_LEN_08BIT, 0x00}, + {0x3391, CRL_REG_LEN_08BIT, 0x00}, + {0x3392, CRL_REG_LEN_08BIT, 0x00}, + {0x3393, CRL_REG_LEN_08BIT, 0x00}, + {0x3394, CRL_REG_LEN_08BIT, 0x00}, + {0x3395, CRL_REG_LEN_08BIT, 0x00}, + {0x3396, CRL_REG_LEN_08BIT, 0x00}, + {0x3397, CRL_REG_LEN_08BIT, 0x00}, + {0x3398, CRL_REG_LEN_08BIT, 0x00}, + {0x3399, CRL_REG_LEN_08BIT, 0x00}, + {0x339a, CRL_REG_LEN_08BIT, 0x00}, + {0x339b, CRL_REG_LEN_08BIT, 0x00}, + {0x33b0, CRL_REG_LEN_08BIT, 0x00}, + {0x33b1, CRL_REG_LEN_08BIT, 0x50}, + {0x33b2, CRL_REG_LEN_08BIT, 0x01}, + {0x33b3, CRL_REG_LEN_08BIT, 0xff}, + {0x33b4, CRL_REG_LEN_08BIT, 0xe0}, + {0x33b5, CRL_REG_LEN_08BIT, 0x6b}, + {0x33b6, CRL_REG_LEN_08BIT, 0x00}, + {0x33b7, CRL_REG_LEN_08BIT, 0x00}, + {0x33b8, CRL_REG_LEN_08BIT, 0x00}, + {0x33b9, CRL_REG_LEN_08BIT, 0x00}, + {0x33ba, CRL_REG_LEN_08BIT, 0x00}, + {0x33bb, CRL_REG_LEN_08BIT, 0x1f}, + {0x33bc, CRL_REG_LEN_08BIT, 0x01}, + {0x33bd, CRL_REG_LEN_08BIT, 0x01}, + {0x33be, CRL_REG_LEN_08BIT, 0x01}, + {0x33bf, CRL_REG_LEN_08BIT, 0x01}, + {0x33c0, CRL_REG_LEN_08BIT, 0x00}, + {0x33c1, CRL_REG_LEN_08BIT, 0x00}, + {0x33c2, CRL_REG_LEN_08BIT, 0x00}, + {0x33c3, CRL_REG_LEN_08BIT, 0x00}, + {0x33e0, CRL_REG_LEN_08BIT, 0x14}, + {0x33e1, CRL_REG_LEN_08BIT, 0x0f}, + {0x33e2, CRL_REG_LEN_08BIT, 0x04}, + {0x33e3, CRL_REG_LEN_08BIT, 0x02}, + {0x33e4, CRL_REG_LEN_08BIT, 0x01}, + {0x33e5, CRL_REG_LEN_08BIT, 0x01}, + {0x33e6, CRL_REG_LEN_08BIT, 0x00}, + {0x33e7, CRL_REG_LEN_08BIT, 0x04}, + {0x33e8, CRL_REG_LEN_08BIT, 0x0c}, + {0x33e9, CRL_REG_LEN_08BIT, 0x02}, + {0x33ea, CRL_REG_LEN_08BIT, 0x02}, + {0x33eb, CRL_REG_LEN_08BIT, 0x02}, + {0x33ec, CRL_REG_LEN_08BIT, 0x03}, + {0x33ed, CRL_REG_LEN_08BIT, 0x02}, + {0x33ee, CRL_REG_LEN_08BIT, 0x05}, + {0x33ef, CRL_REG_LEN_08BIT, 0x0a}, + {0x33f0, CRL_REG_LEN_08BIT, 0x08}, + {0x33f1, CRL_REG_LEN_08BIT, 0x04}, + {0x33f2, CRL_REG_LEN_08BIT, 0x04}, + {0x33f3, CRL_REG_LEN_08BIT, 0x00}, + {0x33f4, CRL_REG_LEN_08BIT, 0x03}, + {0x33f5, CRL_REG_LEN_08BIT, 0x14}, + {0x33f6, CRL_REG_LEN_08BIT, 0x0f}, + {0x33f7, CRL_REG_LEN_08BIT, 0x02}, + {0x33f8, CRL_REG_LEN_08BIT, 0x01}, + {0x33f9, CRL_REG_LEN_08BIT, 0x01}, + {0x33fa, CRL_REG_LEN_08BIT, 0x01}, + {0x33fb, CRL_REG_LEN_08BIT, 0x00}, + {0x33fc, CRL_REG_LEN_08BIT, 0x04}, + {0x33fd, CRL_REG_LEN_08BIT, 0x0c}, + {0x33fe, CRL_REG_LEN_08BIT, 0x02}, + {0x33ff, CRL_REG_LEN_08BIT, 0x02}, + {0x3400, CRL_REG_LEN_08BIT, 0x02}, + {0x3401, CRL_REG_LEN_08BIT, 0x03}, + {0x3402, CRL_REG_LEN_08BIT, 0x01}, + {0x3403, CRL_REG_LEN_08BIT, 0x02}, + {0x3404, CRL_REG_LEN_08BIT, 0x08}, + {0x3405, CRL_REG_LEN_08BIT, 0x08}, + {0x3406, CRL_REG_LEN_08BIT, 0x04}, + {0x3407, CRL_REG_LEN_08BIT, 0x04}, + {0x3408, CRL_REG_LEN_08BIT, 0x00}, + {0x3409, CRL_REG_LEN_08BIT, 0x03}, + {0x340a, CRL_REG_LEN_08BIT, 0x14}, + {0x340b, CRL_REG_LEN_08BIT, 0x0f}, + {0x340c, CRL_REG_LEN_08BIT, 0x04}, + {0x340d, CRL_REG_LEN_08BIT, 0x02}, + {0x340e, CRL_REG_LEN_08BIT, 0x01}, + {0x340f, CRL_REG_LEN_08BIT, 0x01}, + {0x3410, CRL_REG_LEN_08BIT, 0x00}, + {0x3411, CRL_REG_LEN_08BIT, 0x04}, + {0x3412, CRL_REG_LEN_08BIT, 0x0c}, + {0x3413, CRL_REG_LEN_08BIT, 0x02}, + {0x3414, CRL_REG_LEN_08BIT, 0x02}, + {0x3415, CRL_REG_LEN_08BIT, 0x02}, + {0x3416, CRL_REG_LEN_08BIT, 0x03}, + {0x3417, CRL_REG_LEN_08BIT, 0x02}, + {0x3418, CRL_REG_LEN_08BIT, 0x05}, + {0x3419, CRL_REG_LEN_08BIT, 0x0a}, + {0x341a, CRL_REG_LEN_08BIT, 0x08}, + {0x341b, CRL_REG_LEN_08BIT, 0x04}, + {0x341c, CRL_REG_LEN_08BIT, 0x04}, + {0x341d, CRL_REG_LEN_08BIT, 0x00}, + {0x341e, CRL_REG_LEN_08BIT, 0x03}, + {0x3440, CRL_REG_LEN_08BIT, 0x00}, + {0x3441, CRL_REG_LEN_08BIT, 0x00}, + {0x3442, CRL_REG_LEN_08BIT, 0x00}, + {0x3443, CRL_REG_LEN_08BIT, 0x00}, + {0x3444, CRL_REG_LEN_08BIT, 0x02}, + {0x3445, CRL_REG_LEN_08BIT, 0xf0}, + {0x3446, CRL_REG_LEN_08BIT, 0x02}, + {0x3447, CRL_REG_LEN_08BIT, 0x08}, + {0x3448, CRL_REG_LEN_08BIT, 0x00}, + {0x3460, CRL_REG_LEN_08BIT, 0x40}, + {0x3461, CRL_REG_LEN_08BIT, 0x40}, + {0x3462, CRL_REG_LEN_08BIT, 0x40}, + {0x3463, CRL_REG_LEN_08BIT, 0x40}, + {0x3464, CRL_REG_LEN_08BIT, 0x03}, + {0x3465, CRL_REG_LEN_08BIT, 0x01}, + {0x3466, CRL_REG_LEN_08BIT, 0x01}, + {0x3467, CRL_REG_LEN_08BIT, 0x02}, + {0x3468, CRL_REG_LEN_08BIT, 0x30}, + {0x3469, CRL_REG_LEN_08BIT, 0x00}, + {0x346a, CRL_REG_LEN_08BIT, 0x35}, + {0x346b, CRL_REG_LEN_08BIT, 0x00}, + {0x3480, CRL_REG_LEN_08BIT, 0x40}, + {0x3481, CRL_REG_LEN_08BIT, 0x00}, + {0x3482, CRL_REG_LEN_08BIT, 0x00}, + {0x3483, CRL_REG_LEN_08BIT, 0x00}, + {0x3484, CRL_REG_LEN_08BIT, 0x0d}, + {0x3485, CRL_REG_LEN_08BIT, 0x00}, + {0x3486, CRL_REG_LEN_08BIT, 0x00}, + {0x3487, CRL_REG_LEN_08BIT, 0x00}, + {0x3488, CRL_REG_LEN_08BIT, 0x00}, + {0x3489, CRL_REG_LEN_08BIT, 0x00}, + {0x348a, CRL_REG_LEN_08BIT, 0x00}, + {0x348b, CRL_REG_LEN_08BIT, 0x04}, + {0x348c, CRL_REG_LEN_08BIT, 0x00}, + {0x348d, CRL_REG_LEN_08BIT, 0x01}, + {0x348f, CRL_REG_LEN_08BIT, 0x01}, + {0x3030, CRL_REG_LEN_08BIT, 0x0a}, + {0x3030, CRL_REG_LEN_08BIT, 0x02}, + {0x7000, CRL_REG_LEN_08BIT, 0x58}, + {0x7001, CRL_REG_LEN_08BIT, 0x7a}, + {0x7002, CRL_REG_LEN_08BIT, 0x1a}, + {0x7003, CRL_REG_LEN_08BIT, 0xc1}, + {0x7004, CRL_REG_LEN_08BIT, 0x03}, + {0x7005, CRL_REG_LEN_08BIT, 0xda}, + {0x7006, CRL_REG_LEN_08BIT, 0xbd}, + {0x7007, CRL_REG_LEN_08BIT, 0x03}, + {0x7008, CRL_REG_LEN_08BIT, 0xbd}, + {0x7009, CRL_REG_LEN_08BIT, 0x06}, + {0x700a, CRL_REG_LEN_08BIT, 0xe6}, + {0x700b, CRL_REG_LEN_08BIT, 0xec}, + {0x700c, CRL_REG_LEN_08BIT, 0xbc}, + {0x700d, CRL_REG_LEN_08BIT, 0xff}, + {0x700e, CRL_REG_LEN_08BIT, 0xbc}, + {0x700f, CRL_REG_LEN_08BIT, 0x73}, + {0x7010, CRL_REG_LEN_08BIT, 0xda}, + {0x7011, CRL_REG_LEN_08BIT, 0x72}, + {0x7012, CRL_REG_LEN_08BIT, 0x76}, + {0x7013, CRL_REG_LEN_08BIT, 0xb6}, + {0x7014, CRL_REG_LEN_08BIT, 0xee}, + {0x7015, CRL_REG_LEN_08BIT, 0xcf}, + {0x7016, CRL_REG_LEN_08BIT, 0xac}, + {0x7017, CRL_REG_LEN_08BIT, 0xd0}, + {0x7018, CRL_REG_LEN_08BIT, 0xac}, + {0x7019, CRL_REG_LEN_08BIT, 0xd1}, + {0x701a, CRL_REG_LEN_08BIT, 0x50}, + {0x701b, CRL_REG_LEN_08BIT, 0xac}, + {0x701c, CRL_REG_LEN_08BIT, 0xd2}, + {0x701d, CRL_REG_LEN_08BIT, 0xbc}, + {0x701e, CRL_REG_LEN_08BIT, 0x2e}, + {0x701f, CRL_REG_LEN_08BIT, 0xb4}, + {0x7020, CRL_REG_LEN_08BIT, 0x00}, + {0x7021, CRL_REG_LEN_08BIT, 0xdc}, + {0x7022, CRL_REG_LEN_08BIT, 0xdf}, + {0x7023, CRL_REG_LEN_08BIT, 0xb0}, + {0x7024, CRL_REG_LEN_08BIT, 0x6e}, + {0x7025, CRL_REG_LEN_08BIT, 0xbd}, + {0x7026, CRL_REG_LEN_08BIT, 0x01}, + {0x7027, CRL_REG_LEN_08BIT, 0xd7}, + {0x7028, CRL_REG_LEN_08BIT, 0xed}, + {0x7029, CRL_REG_LEN_08BIT, 0xe1}, + {0x702a, CRL_REG_LEN_08BIT, 0x36}, + {0x702b, CRL_REG_LEN_08BIT, 0x30}, + {0x702c, CRL_REG_LEN_08BIT, 0xd3}, + {0x702d, CRL_REG_LEN_08BIT, 0x2e}, + {0x702e, CRL_REG_LEN_08BIT, 0x54}, + {0x702f, CRL_REG_LEN_08BIT, 0x46}, + {0x7030, CRL_REG_LEN_08BIT, 0xbc}, + {0x7031, CRL_REG_LEN_08BIT, 0x22}, + {0x7032, CRL_REG_LEN_08BIT, 0x66}, + {0x7033, CRL_REG_LEN_08BIT, 0xbc}, + {0x7034, CRL_REG_LEN_08BIT, 0x24}, + {0x7035, CRL_REG_LEN_08BIT, 0x2c}, + {0x7036, CRL_REG_LEN_08BIT, 0x28}, + {0x7037, CRL_REG_LEN_08BIT, 0xbc}, + {0x7038, CRL_REG_LEN_08BIT, 0x3c}, + {0x7039, CRL_REG_LEN_08BIT, 0xa1}, + {0x703a, CRL_REG_LEN_08BIT, 0xac}, + {0x703b, CRL_REG_LEN_08BIT, 0xd8}, + {0x703c, CRL_REG_LEN_08BIT, 0xd6}, + {0x703d, CRL_REG_LEN_08BIT, 0xb4}, + {0x703e, CRL_REG_LEN_08BIT, 0x04}, + {0x703f, CRL_REG_LEN_08BIT, 0x46}, + {0x7040, CRL_REG_LEN_08BIT, 0xb7}, + {0x7041, CRL_REG_LEN_08BIT, 0x04}, + {0x7042, CRL_REG_LEN_08BIT, 0xbe}, + {0x7043, CRL_REG_LEN_08BIT, 0x08}, + {0x7044, CRL_REG_LEN_08BIT, 0xc3}, + {0x7045, CRL_REG_LEN_08BIT, 0xd9}, + {0x7046, CRL_REG_LEN_08BIT, 0xad}, + {0x7047, CRL_REG_LEN_08BIT, 0xc3}, + {0x7048, CRL_REG_LEN_08BIT, 0xbc}, + {0x7049, CRL_REG_LEN_08BIT, 0x19}, + {0x704a, CRL_REG_LEN_08BIT, 0xc1}, + {0x704b, CRL_REG_LEN_08BIT, 0x27}, + {0x704c, CRL_REG_LEN_08BIT, 0xe7}, + {0x704d, CRL_REG_LEN_08BIT, 0x00}, + {0x704e, CRL_REG_LEN_08BIT, 0x50}, + {0x704f, CRL_REG_LEN_08BIT, 0x20}, + {0x7050, CRL_REG_LEN_08BIT, 0xb8}, + {0x7051, CRL_REG_LEN_08BIT, 0x02}, + {0x7052, CRL_REG_LEN_08BIT, 0xbc}, + {0x7053, CRL_REG_LEN_08BIT, 0x17}, + {0x7054, CRL_REG_LEN_08BIT, 0xdb}, + {0x7055, CRL_REG_LEN_08BIT, 0xc7}, + {0x7056, CRL_REG_LEN_08BIT, 0xb8}, + {0x7057, CRL_REG_LEN_08BIT, 0x00}, + {0x7058, CRL_REG_LEN_08BIT, 0x28}, + {0x7059, CRL_REG_LEN_08BIT, 0x54}, + {0x705a, CRL_REG_LEN_08BIT, 0xb4}, + {0x705b, CRL_REG_LEN_08BIT, 0x14}, + {0x705c, CRL_REG_LEN_08BIT, 0xab}, + {0x705d, CRL_REG_LEN_08BIT, 0xbe}, + {0x705e, CRL_REG_LEN_08BIT, 0x06}, + {0x705f, CRL_REG_LEN_08BIT, 0xd8}, + {0x7060, CRL_REG_LEN_08BIT, 0xd6}, + {0x7061, CRL_REG_LEN_08BIT, 0x00}, + {0x7062, CRL_REG_LEN_08BIT, 0xb4}, + {0x7063, CRL_REG_LEN_08BIT, 0xc7}, + {0x7064, CRL_REG_LEN_08BIT, 0x07}, + {0x7065, CRL_REG_LEN_08BIT, 0xb9}, + {0x7066, CRL_REG_LEN_08BIT, 0x05}, + {0x7067, CRL_REG_LEN_08BIT, 0xee}, + {0x7068, CRL_REG_LEN_08BIT, 0xe6}, + {0x7069, CRL_REG_LEN_08BIT, 0xad}, + {0x706a, CRL_REG_LEN_08BIT, 0xb4}, + {0x706b, CRL_REG_LEN_08BIT, 0x26}, + {0x706c, CRL_REG_LEN_08BIT, 0x19}, + {0x706d, CRL_REG_LEN_08BIT, 0xc1}, + {0x706e, CRL_REG_LEN_08BIT, 0x3a}, + {0x706f, CRL_REG_LEN_08BIT, 0xc3}, + {0x7070, CRL_REG_LEN_08BIT, 0xaf}, + {0x7071, CRL_REG_LEN_08BIT, 0x00}, + {0x7072, CRL_REG_LEN_08BIT, 0xc0}, + {0x7073, CRL_REG_LEN_08BIT, 0x3c}, + {0x7074, CRL_REG_LEN_08BIT, 0xc3}, + {0x7075, CRL_REG_LEN_08BIT, 0xbe}, + {0x7076, CRL_REG_LEN_08BIT, 0xe7}, + {0x7077, CRL_REG_LEN_08BIT, 0x00}, + {0x7078, CRL_REG_LEN_08BIT, 0x15}, + {0x7079, CRL_REG_LEN_08BIT, 0xc2}, + {0x707a, CRL_REG_LEN_08BIT, 0x40}, + {0x707b, CRL_REG_LEN_08BIT, 0xc3}, + {0x707c, CRL_REG_LEN_08BIT, 0xa4}, + {0x707d, CRL_REG_LEN_08BIT, 0xc0}, + {0x707e, CRL_REG_LEN_08BIT, 0x3c}, + {0x707f, CRL_REG_LEN_08BIT, 0x00}, + {0x7080, CRL_REG_LEN_08BIT, 0xb9}, + {0x7081, CRL_REG_LEN_08BIT, 0x64}, + {0x7082, CRL_REG_LEN_08BIT, 0x29}, + {0x7083, CRL_REG_LEN_08BIT, 0x00}, + {0x7084, CRL_REG_LEN_08BIT, 0xb8}, + {0x7085, CRL_REG_LEN_08BIT, 0x12}, + {0x7086, CRL_REG_LEN_08BIT, 0xbe}, + {0x7087, CRL_REG_LEN_08BIT, 0x01}, + {0x7088, CRL_REG_LEN_08BIT, 0xd0}, + {0x7089, CRL_REG_LEN_08BIT, 0xbc}, + {0x708a, CRL_REG_LEN_08BIT, 0x01}, + {0x708b, CRL_REG_LEN_08BIT, 0xac}, + {0x708c, CRL_REG_LEN_08BIT, 0x37}, + {0x708d, CRL_REG_LEN_08BIT, 0xd2}, + {0x708e, CRL_REG_LEN_08BIT, 0xac}, + {0x708f, CRL_REG_LEN_08BIT, 0x45}, + {0x7090, CRL_REG_LEN_08BIT, 0xad}, + {0x7091, CRL_REG_LEN_08BIT, 0x28}, + {0x7092, CRL_REG_LEN_08BIT, 0x00}, + {0x7093, CRL_REG_LEN_08BIT, 0xb8}, + {0x7094, CRL_REG_LEN_08BIT, 0x00}, + {0x7095, CRL_REG_LEN_08BIT, 0xbc}, + {0x7096, CRL_REG_LEN_08BIT, 0x01}, + {0x7097, CRL_REG_LEN_08BIT, 0x36}, + {0x7098, CRL_REG_LEN_08BIT, 0xd3}, + {0x7099, CRL_REG_LEN_08BIT, 0x30}, + {0x709a, CRL_REG_LEN_08BIT, 0x04}, + {0x709b, CRL_REG_LEN_08BIT, 0xe0}, + {0x709c, CRL_REG_LEN_08BIT, 0xd8}, + {0x709d, CRL_REG_LEN_08BIT, 0xb4}, + {0x709e, CRL_REG_LEN_08BIT, 0xe9}, + {0x709f, CRL_REG_LEN_08BIT, 0x00}, + {0x70a0, CRL_REG_LEN_08BIT, 0xbe}, + {0x70a1, CRL_REG_LEN_08BIT, 0x05}, + {0x70a2, CRL_REG_LEN_08BIT, 0x62}, + {0x70a3, CRL_REG_LEN_08BIT, 0x07}, + {0x70a4, CRL_REG_LEN_08BIT, 0xb9}, + {0x70a5, CRL_REG_LEN_08BIT, 0x05}, + {0x70a6, CRL_REG_LEN_08BIT, 0xad}, + {0x70a7, CRL_REG_LEN_08BIT, 0xc3}, + {0x70a8, CRL_REG_LEN_08BIT, 0xcf}, + {0x70a9, CRL_REG_LEN_08BIT, 0x00}, + {0x70aa, CRL_REG_LEN_08BIT, 0x15}, + {0x70ab, CRL_REG_LEN_08BIT, 0xc2}, + {0x70ac, CRL_REG_LEN_08BIT, 0x59}, + {0x70ad, CRL_REG_LEN_08BIT, 0xc3}, + {0x70ae, CRL_REG_LEN_08BIT, 0xc9}, + {0x70af, CRL_REG_LEN_08BIT, 0xc0}, + {0x70b0, CRL_REG_LEN_08BIT, 0x55}, + {0x70b1, CRL_REG_LEN_08BIT, 0x00}, + {0x70b2, CRL_REG_LEN_08BIT, 0x46}, + {0x70b3, CRL_REG_LEN_08BIT, 0xa1}, + {0x70b4, CRL_REG_LEN_08BIT, 0xb9}, + {0x70b5, CRL_REG_LEN_08BIT, 0x64}, + {0x70b6, CRL_REG_LEN_08BIT, 0x29}, + {0x70b7, CRL_REG_LEN_08BIT, 0x00}, + {0x70b8, CRL_REG_LEN_08BIT, 0xb8}, + {0x70b9, CRL_REG_LEN_08BIT, 0x02}, + {0x70ba, CRL_REG_LEN_08BIT, 0xbe}, + {0x70bb, CRL_REG_LEN_08BIT, 0x02}, + {0x70bc, CRL_REG_LEN_08BIT, 0xd0}, + {0x70bd, CRL_REG_LEN_08BIT, 0xdc}, + {0x70be, CRL_REG_LEN_08BIT, 0xac}, + {0x70bf, CRL_REG_LEN_08BIT, 0xbc}, + {0x70c0, CRL_REG_LEN_08BIT, 0x01}, + {0x70c1, CRL_REG_LEN_08BIT, 0x37}, + {0x70c2, CRL_REG_LEN_08BIT, 0xac}, + {0x70c3, CRL_REG_LEN_08BIT, 0xd2}, + {0x70c4, CRL_REG_LEN_08BIT, 0x45}, + {0x70c5, CRL_REG_LEN_08BIT, 0xad}, + {0x70c6, CRL_REG_LEN_08BIT, 0x28}, + {0x70c7, CRL_REG_LEN_08BIT, 0x00}, + {0x70c8, CRL_REG_LEN_08BIT, 0xb8}, + {0x70c9, CRL_REG_LEN_08BIT, 0x00}, + {0x70ca, CRL_REG_LEN_08BIT, 0xbc}, + {0x70cb, CRL_REG_LEN_08BIT, 0x01}, + {0x70cc, CRL_REG_LEN_08BIT, 0x36}, + {0x70cd, CRL_REG_LEN_08BIT, 0x30}, + {0x70ce, CRL_REG_LEN_08BIT, 0xe0}, + {0x70cf, CRL_REG_LEN_08BIT, 0xd8}, + {0x70d0, CRL_REG_LEN_08BIT, 0xb5}, + {0x70d1, CRL_REG_LEN_08BIT, 0x0b}, + {0x70d2, CRL_REG_LEN_08BIT, 0xd6}, + {0x70d3, CRL_REG_LEN_08BIT, 0xbe}, + {0x70d4, CRL_REG_LEN_08BIT, 0x07}, + {0x70d5, CRL_REG_LEN_08BIT, 0x00}, + {0x70d6, CRL_REG_LEN_08BIT, 0x62}, + {0x70d7, CRL_REG_LEN_08BIT, 0x07}, + {0x70d8, CRL_REG_LEN_08BIT, 0xb9}, + {0x70d9, CRL_REG_LEN_08BIT, 0x05}, + {0x70da, CRL_REG_LEN_08BIT, 0xad}, + {0x70db, CRL_REG_LEN_08BIT, 0xc3}, + {0x70dc, CRL_REG_LEN_08BIT, 0xcf}, + {0x70dd, CRL_REG_LEN_08BIT, 0x46}, + {0x70de, CRL_REG_LEN_08BIT, 0xcd}, + {0x70df, CRL_REG_LEN_08BIT, 0x07}, + {0x70e0, CRL_REG_LEN_08BIT, 0xcd}, + {0x70e1, CRL_REG_LEN_08BIT, 0x00}, + {0x70e2, CRL_REG_LEN_08BIT, 0xe3}, + {0x70e3, CRL_REG_LEN_08BIT, 0x18}, + {0x70e4, CRL_REG_LEN_08BIT, 0xc2}, + {0x70e5, CRL_REG_LEN_08BIT, 0xa2}, + {0x70e6, CRL_REG_LEN_08BIT, 0xb9}, + {0x70e7, CRL_REG_LEN_08BIT, 0x64}, + {0x70e8, CRL_REG_LEN_08BIT, 0xd1}, + {0x70e9, CRL_REG_LEN_08BIT, 0xdd}, + {0x70ea, CRL_REG_LEN_08BIT, 0xac}, + {0x70eb, CRL_REG_LEN_08BIT, 0xcf}, + {0x70ec, CRL_REG_LEN_08BIT, 0xdf}, + {0x70ed, CRL_REG_LEN_08BIT, 0xb5}, + {0x70ee, CRL_REG_LEN_08BIT, 0x19}, + {0x70ef, CRL_REG_LEN_08BIT, 0x46}, + {0x70f0, CRL_REG_LEN_08BIT, 0x50}, + {0x70f1, CRL_REG_LEN_08BIT, 0xb6}, + {0x70f2, CRL_REG_LEN_08BIT, 0xee}, + {0x70f3, CRL_REG_LEN_08BIT, 0xe8}, + {0x70f4, CRL_REG_LEN_08BIT, 0xe6}, + {0x70f5, CRL_REG_LEN_08BIT, 0xbc}, + {0x70f6, CRL_REG_LEN_08BIT, 0x31}, + {0x70f7, CRL_REG_LEN_08BIT, 0xe1}, + {0x70f8, CRL_REG_LEN_08BIT, 0x36}, + {0x70f9, CRL_REG_LEN_08BIT, 0x30}, + {0x70fa, CRL_REG_LEN_08BIT, 0xd3}, + {0x70fb, CRL_REG_LEN_08BIT, 0x2e}, + {0x70fc, CRL_REG_LEN_08BIT, 0x54}, + {0x70fd, CRL_REG_LEN_08BIT, 0xbd}, + {0x70fe, CRL_REG_LEN_08BIT, 0x03}, + {0x70ff, CRL_REG_LEN_08BIT, 0xec}, + {0x7100, CRL_REG_LEN_08BIT, 0x2c}, + {0x7101, CRL_REG_LEN_08BIT, 0x50}, + {0x7102, CRL_REG_LEN_08BIT, 0x20}, + {0x7103, CRL_REG_LEN_08BIT, 0x04}, + {0x7104, CRL_REG_LEN_08BIT, 0xb8}, + {0x7105, CRL_REG_LEN_08BIT, 0x02}, + {0x7106, CRL_REG_LEN_08BIT, 0xbc}, + {0x7107, CRL_REG_LEN_08BIT, 0x18}, + {0x7108, CRL_REG_LEN_08BIT, 0xc7}, + {0x7109, CRL_REG_LEN_08BIT, 0xb8}, + {0x710a, CRL_REG_LEN_08BIT, 0x00}, + {0x710b, CRL_REG_LEN_08BIT, 0x28}, + {0x710c, CRL_REG_LEN_08BIT, 0x54}, + {0x710d, CRL_REG_LEN_08BIT, 0xbc}, + {0x710e, CRL_REG_LEN_08BIT, 0x02}, + {0x710f, CRL_REG_LEN_08BIT, 0xb4}, + {0x7110, CRL_REG_LEN_08BIT, 0xda}, + {0x7111, CRL_REG_LEN_08BIT, 0xbe}, + {0x7112, CRL_REG_LEN_08BIT, 0x04}, + {0x7113, CRL_REG_LEN_08BIT, 0xd6}, + {0x7114, CRL_REG_LEN_08BIT, 0xd8}, + {0x7115, CRL_REG_LEN_08BIT, 0xab}, + {0x7116, CRL_REG_LEN_08BIT, 0x00}, + {0x7117, CRL_REG_LEN_08BIT, 0x62}, + {0x7118, CRL_REG_LEN_08BIT, 0x07}, + {0x7119, CRL_REG_LEN_08BIT, 0xb9}, + {0x711a, CRL_REG_LEN_08BIT, 0x05}, + {0x711b, CRL_REG_LEN_08BIT, 0xad}, + {0x711c, CRL_REG_LEN_08BIT, 0xc3}, + {0x711d, CRL_REG_LEN_08BIT, 0xbc}, + {0x711e, CRL_REG_LEN_08BIT, 0xe7}, + {0x711f, CRL_REG_LEN_08BIT, 0xb9}, + {0x7120, CRL_REG_LEN_08BIT, 0x64}, + {0x7121, CRL_REG_LEN_08BIT, 0x29}, + {0x7122, CRL_REG_LEN_08BIT, 0x00}, + {0x7123, CRL_REG_LEN_08BIT, 0xb8}, + {0x7124, CRL_REG_LEN_08BIT, 0x02}, + {0x7125, CRL_REG_LEN_08BIT, 0xbe}, + {0x7126, CRL_REG_LEN_08BIT, 0x00}, + {0x7127, CRL_REG_LEN_08BIT, 0x45}, + {0x7128, CRL_REG_LEN_08BIT, 0xad}, + {0x7129, CRL_REG_LEN_08BIT, 0xe2}, + {0x712a, CRL_REG_LEN_08BIT, 0x28}, + {0x712b, CRL_REG_LEN_08BIT, 0x00}, + {0x712c, CRL_REG_LEN_08BIT, 0xb8}, + {0x712d, CRL_REG_LEN_08BIT, 0x00}, + {0x712e, CRL_REG_LEN_08BIT, 0xe0}, + {0x712f, CRL_REG_LEN_08BIT, 0xd8}, + {0x7130, CRL_REG_LEN_08BIT, 0xb4}, + {0x7131, CRL_REG_LEN_08BIT, 0xe9}, + {0x7132, CRL_REG_LEN_08BIT, 0xbe}, + {0x7133, CRL_REG_LEN_08BIT, 0x03}, + {0x7134, CRL_REG_LEN_08BIT, 0x00}, + {0x7135, CRL_REG_LEN_08BIT, 0x30}, + {0x7136, CRL_REG_LEN_08BIT, 0x62}, + {0x7137, CRL_REG_LEN_08BIT, 0x07}, + {0x7138, CRL_REG_LEN_08BIT, 0xb9}, + {0x7139, CRL_REG_LEN_08BIT, 0x05}, + {0x713a, CRL_REG_LEN_08BIT, 0xad}, + {0x713b, CRL_REG_LEN_08BIT, 0xc3}, + {0x713c, CRL_REG_LEN_08BIT, 0xcf}, + {0x713d, CRL_REG_LEN_08BIT, 0x42}, + {0x713e, CRL_REG_LEN_08BIT, 0xe4}, + {0x713f, CRL_REG_LEN_08BIT, 0xcd}, + {0x7140, CRL_REG_LEN_08BIT, 0x07}, + {0x7141, CRL_REG_LEN_08BIT, 0xcd}, + {0x7142, CRL_REG_LEN_08BIT, 0x00}, + {0x7143, CRL_REG_LEN_08BIT, 0x00}, + {0x7144, CRL_REG_LEN_08BIT, 0x17}, + {0x7145, CRL_REG_LEN_08BIT, 0xc2}, + {0x7146, CRL_REG_LEN_08BIT, 0xbb}, + {0x7147, CRL_REG_LEN_08BIT, 0xde}, + {0x7148, CRL_REG_LEN_08BIT, 0xcf}, + {0x7149, CRL_REG_LEN_08BIT, 0xdf}, + {0x714a, CRL_REG_LEN_08BIT, 0xac}, + {0x714b, CRL_REG_LEN_08BIT, 0xd1}, + {0x714c, CRL_REG_LEN_08BIT, 0x44}, + {0x714d, CRL_REG_LEN_08BIT, 0xac}, + {0x714e, CRL_REG_LEN_08BIT, 0xb9}, + {0x714f, CRL_REG_LEN_08BIT, 0x76}, + {0x7150, CRL_REG_LEN_08BIT, 0xb8}, + {0x7151, CRL_REG_LEN_08BIT, 0x08}, + {0x7152, CRL_REG_LEN_08BIT, 0xb6}, + {0x7153, CRL_REG_LEN_08BIT, 0xfe}, + {0x7154, CRL_REG_LEN_08BIT, 0xb4}, + {0x7155, CRL_REG_LEN_08BIT, 0xca}, + {0x7156, CRL_REG_LEN_08BIT, 0xd6}, + {0x7157, CRL_REG_LEN_08BIT, 0xd8}, + {0x7158, CRL_REG_LEN_08BIT, 0xab}, + {0x7159, CRL_REG_LEN_08BIT, 0x00}, + {0x715a, CRL_REG_LEN_08BIT, 0xe1}, + {0x715b, CRL_REG_LEN_08BIT, 0x36}, + {0x715c, CRL_REG_LEN_08BIT, 0x30}, + {0x715d, CRL_REG_LEN_08BIT, 0xd3}, + {0x715e, CRL_REG_LEN_08BIT, 0xbc}, + {0x715f, CRL_REG_LEN_08BIT, 0x29}, + {0x7160, CRL_REG_LEN_08BIT, 0xb4}, + {0x7161, CRL_REG_LEN_08BIT, 0x1f}, + {0x7162, CRL_REG_LEN_08BIT, 0xaa}, + {0x7163, CRL_REG_LEN_08BIT, 0xbd}, + {0x7164, CRL_REG_LEN_08BIT, 0x01}, + {0x7165, CRL_REG_LEN_08BIT, 0xb8}, + {0x7166, CRL_REG_LEN_08BIT, 0x0c}, + {0x7167, CRL_REG_LEN_08BIT, 0x45}, + {0x7168, CRL_REG_LEN_08BIT, 0xa4}, + {0x7169, CRL_REG_LEN_08BIT, 0xbd}, + {0x716a, CRL_REG_LEN_08BIT, 0x03}, + {0x716b, CRL_REG_LEN_08BIT, 0xec}, + {0x716c, CRL_REG_LEN_08BIT, 0xbc}, + {0x716d, CRL_REG_LEN_08BIT, 0x3d}, + {0x716e, CRL_REG_LEN_08BIT, 0xc3}, + {0x716f, CRL_REG_LEN_08BIT, 0xcf}, + {0x7170, CRL_REG_LEN_08BIT, 0x42}, + {0x7171, CRL_REG_LEN_08BIT, 0xb8}, + {0x7172, CRL_REG_LEN_08BIT, 0x00}, + {0x7173, CRL_REG_LEN_08BIT, 0xe4}, + {0x7174, CRL_REG_LEN_08BIT, 0xd5}, + {0x7175, CRL_REG_LEN_08BIT, 0x00}, + {0x7176, CRL_REG_LEN_08BIT, 0xb6}, + {0x7177, CRL_REG_LEN_08BIT, 0x00}, + {0x7178, CRL_REG_LEN_08BIT, 0x74}, + {0x7179, CRL_REG_LEN_08BIT, 0xbd}, + {0x717a, CRL_REG_LEN_08BIT, 0x03}, + {0x717b, CRL_REG_LEN_08BIT, 0xb5}, + {0x717c, CRL_REG_LEN_08BIT, 0x39}, + {0x717d, CRL_REG_LEN_08BIT, 0x40}, + {0x717e, CRL_REG_LEN_08BIT, 0x58}, + {0x717f, CRL_REG_LEN_08BIT, 0xdd}, + {0x7180, CRL_REG_LEN_08BIT, 0x19}, + {0x7181, CRL_REG_LEN_08BIT, 0xc1}, + {0x7182, CRL_REG_LEN_08BIT, 0xc8}, + {0x7183, CRL_REG_LEN_08BIT, 0xbd}, + {0x7184, CRL_REG_LEN_08BIT, 0x06}, + {0x7185, CRL_REG_LEN_08BIT, 0x17}, + {0x7186, CRL_REG_LEN_08BIT, 0xc1}, + {0x7187, CRL_REG_LEN_08BIT, 0xc6}, + {0x7188, CRL_REG_LEN_08BIT, 0xe8}, + {0x7189, CRL_REG_LEN_08BIT, 0x00}, + {0x718a, CRL_REG_LEN_08BIT, 0xc0}, + {0x718b, CRL_REG_LEN_08BIT, 0xc8}, + {0x718c, CRL_REG_LEN_08BIT, 0xe6}, + {0x718d, CRL_REG_LEN_08BIT, 0x95}, + {0x718e, CRL_REG_LEN_08BIT, 0x15}, + {0x718f, CRL_REG_LEN_08BIT, 0x00}, + {0x7190, CRL_REG_LEN_08BIT, 0xbc}, + {0x7191, CRL_REG_LEN_08BIT, 0x19}, + {0x7192, CRL_REG_LEN_08BIT, 0xb9}, + {0x7193, CRL_REG_LEN_08BIT, 0xf6}, + {0x7194, CRL_REG_LEN_08BIT, 0x14}, + {0x7195, CRL_REG_LEN_08BIT, 0xc1}, + {0x7196, CRL_REG_LEN_08BIT, 0xd0}, + {0x7197, CRL_REG_LEN_08BIT, 0xd1}, + {0x7198, CRL_REG_LEN_08BIT, 0xac}, + {0x7199, CRL_REG_LEN_08BIT, 0x37}, + {0x719a, CRL_REG_LEN_08BIT, 0xbc}, + {0x719b, CRL_REG_LEN_08BIT, 0x35}, + {0x719c, CRL_REG_LEN_08BIT, 0x36}, + {0x719d, CRL_REG_LEN_08BIT, 0x30}, + {0x719e, CRL_REG_LEN_08BIT, 0xe1}, + {0x719f, CRL_REG_LEN_08BIT, 0xd3}, + {0x71a0, CRL_REG_LEN_08BIT, 0x7a}, + {0x71a1, CRL_REG_LEN_08BIT, 0xb6}, + {0x71a2, CRL_REG_LEN_08BIT, 0x0c}, + {0x71a3, CRL_REG_LEN_08BIT, 0xff}, + {0x71a4, CRL_REG_LEN_08BIT, 0xb4}, + {0x71a5, CRL_REG_LEN_08BIT, 0xc7}, + {0x71a6, CRL_REG_LEN_08BIT, 0xd9}, + {0x71a7, CRL_REG_LEN_08BIT, 0x00}, + {0x71a8, CRL_REG_LEN_08BIT, 0xbd}, + {0x71a9, CRL_REG_LEN_08BIT, 0x01}, + {0x71aa, CRL_REG_LEN_08BIT, 0x56}, + {0x71ab, CRL_REG_LEN_08BIT, 0xc0}, + {0x71ac, CRL_REG_LEN_08BIT, 0xda}, + {0x71ad, CRL_REG_LEN_08BIT, 0xb4}, + {0x71ae, CRL_REG_LEN_08BIT, 0x1f}, + {0x71af, CRL_REG_LEN_08BIT, 0x56}, + {0x71b0, CRL_REG_LEN_08BIT, 0xaa}, + {0x71b1, CRL_REG_LEN_08BIT, 0xbc}, + {0x71b2, CRL_REG_LEN_08BIT, 0x08}, + {0x71b3, CRL_REG_LEN_08BIT, 0x00}, + {0x71b4, CRL_REG_LEN_08BIT, 0x57}, + {0x71b5, CRL_REG_LEN_08BIT, 0xe8}, + {0x71b6, CRL_REG_LEN_08BIT, 0xb5}, + {0x71b7, CRL_REG_LEN_08BIT, 0x36}, + {0x71b8, CRL_REG_LEN_08BIT, 0x00}, + {0x71b9, CRL_REG_LEN_08BIT, 0x54}, + {0x71ba, CRL_REG_LEN_08BIT, 0xe7}, + {0x71bb, CRL_REG_LEN_08BIT, 0xc8}, + {0x71bc, CRL_REG_LEN_08BIT, 0xb4}, + {0x71bd, CRL_REG_LEN_08BIT, 0x1f}, + {0x71be, CRL_REG_LEN_08BIT, 0x56}, + {0x71bf, CRL_REG_LEN_08BIT, 0xaa}, + {0x71c0, CRL_REG_LEN_08BIT, 0xbc}, + {0x71c1, CRL_REG_LEN_08BIT, 0x08}, + {0x71c2, CRL_REG_LEN_08BIT, 0x57}, + {0x71c3, CRL_REG_LEN_08BIT, 0x00}, + {0x71c4, CRL_REG_LEN_08BIT, 0xb5}, + {0x71c5, CRL_REG_LEN_08BIT, 0x36}, + {0x71c6, CRL_REG_LEN_08BIT, 0x00}, + {0x71c7, CRL_REG_LEN_08BIT, 0x54}, + {0x71c8, CRL_REG_LEN_08BIT, 0xc8}, + {0x71c9, CRL_REG_LEN_08BIT, 0xb5}, + {0x71ca, CRL_REG_LEN_08BIT, 0x18}, + {0x71cb, CRL_REG_LEN_08BIT, 0xd9}, + {0x71cc, CRL_REG_LEN_08BIT, 0x00}, + {0x71cd, CRL_REG_LEN_08BIT, 0xbd}, + {0x71ce, CRL_REG_LEN_08BIT, 0x01}, + {0x71cf, CRL_REG_LEN_08BIT, 0x56}, + {0x71d0, CRL_REG_LEN_08BIT, 0x08}, + {0x71d1, CRL_REG_LEN_08BIT, 0x57}, + {0x71d2, CRL_REG_LEN_08BIT, 0xe8}, + {0x71d3, CRL_REG_LEN_08BIT, 0xb4}, + {0x71d4, CRL_REG_LEN_08BIT, 0x42}, + {0x71d5, CRL_REG_LEN_08BIT, 0x00}, + {0x71d6, CRL_REG_LEN_08BIT, 0x54}, + {0x71d7, CRL_REG_LEN_08BIT, 0xe7}, + {0x71d8, CRL_REG_LEN_08BIT, 0xc8}, + {0x71d9, CRL_REG_LEN_08BIT, 0xab}, + {0x71da, CRL_REG_LEN_08BIT, 0x00}, + {0x71db, CRL_REG_LEN_08BIT, 0x66}, + {0x71dc, CRL_REG_LEN_08BIT, 0x62}, + {0x71dd, CRL_REG_LEN_08BIT, 0x06}, + {0x71de, CRL_REG_LEN_08BIT, 0x74}, + {0x71df, CRL_REG_LEN_08BIT, 0xb9}, + {0x71e0, CRL_REG_LEN_08BIT, 0x05}, + {0x71e1, CRL_REG_LEN_08BIT, 0xb7}, + {0x71e2, CRL_REG_LEN_08BIT, 0x14}, + {0x71e3, CRL_REG_LEN_08BIT, 0x0e}, + {0x71e4, CRL_REG_LEN_08BIT, 0xb7}, + {0x71e5, CRL_REG_LEN_08BIT, 0x04}, + {0x71e6, CRL_REG_LEN_08BIT, 0xc8}, + {0x7600, CRL_REG_LEN_08BIT, 0x04}, + {0x7601, CRL_REG_LEN_08BIT, 0x80}, + {0x7602, CRL_REG_LEN_08BIT, 0x07}, + {0x7603, CRL_REG_LEN_08BIT, 0x44}, + {0x7604, CRL_REG_LEN_08BIT, 0x05}, + {0x7605, CRL_REG_LEN_08BIT, 0x33}, + {0x7606, CRL_REG_LEN_08BIT, 0x0f}, + {0x7607, CRL_REG_LEN_08BIT, 0x00}, + {0x7608, CRL_REG_LEN_08BIT, 0x07}, + {0x7609, CRL_REG_LEN_08BIT, 0x40}, + {0x760a, CRL_REG_LEN_08BIT, 0x04}, + {0x760b, CRL_REG_LEN_08BIT, 0xe5}, + {0x760c, CRL_REG_LEN_08BIT, 0x06}, + {0x760d, CRL_REG_LEN_08BIT, 0x50}, + {0x760e, CRL_REG_LEN_08BIT, 0x04}, + {0x760f, CRL_REG_LEN_08BIT, 0xe4}, + {0x7610, CRL_REG_LEN_08BIT, 0x00}, + {0x7611, CRL_REG_LEN_08BIT, 0x00}, + {0x7612, CRL_REG_LEN_08BIT, 0x06}, + {0x7613, CRL_REG_LEN_08BIT, 0x5c}, + {0x7614, CRL_REG_LEN_08BIT, 0x00}, + {0x7615, CRL_REG_LEN_08BIT, 0x0f}, + {0x7616, CRL_REG_LEN_08BIT, 0x06}, + {0x7617, CRL_REG_LEN_08BIT, 0x1c}, + {0x7618, CRL_REG_LEN_08BIT, 0x00}, + {0x7619, CRL_REG_LEN_08BIT, 0x02}, + {0x761a, CRL_REG_LEN_08BIT, 0x06}, + {0x761b, CRL_REG_LEN_08BIT, 0xa2}, + {0x761c, CRL_REG_LEN_08BIT, 0x00}, + {0x761d, CRL_REG_LEN_08BIT, 0x01}, + {0x761e, CRL_REG_LEN_08BIT, 0x06}, + {0x761f, CRL_REG_LEN_08BIT, 0xae}, + {0x7620, CRL_REG_LEN_08BIT, 0x00}, + {0x7621, CRL_REG_LEN_08BIT, 0x0e}, + {0x7622, CRL_REG_LEN_08BIT, 0x05}, + {0x7623, CRL_REG_LEN_08BIT, 0x30}, + {0x7624, CRL_REG_LEN_08BIT, 0x07}, + {0x7625, CRL_REG_LEN_08BIT, 0x00}, + {0x7626, CRL_REG_LEN_08BIT, 0x0f}, + {0x7627, CRL_REG_LEN_08BIT, 0x00}, + {0x7628, CRL_REG_LEN_08BIT, 0x04}, + {0x7629, CRL_REG_LEN_08BIT, 0xe5}, + {0x762a, CRL_REG_LEN_08BIT, 0x05}, + {0x762b, CRL_REG_LEN_08BIT, 0x33}, + {0x762c, CRL_REG_LEN_08BIT, 0x06}, + {0x762d, CRL_REG_LEN_08BIT, 0x12}, + {0x762e, CRL_REG_LEN_08BIT, 0x00}, + {0x762f, CRL_REG_LEN_08BIT, 0x01}, + {0x7630, CRL_REG_LEN_08BIT, 0x06}, + {0x7631, CRL_REG_LEN_08BIT, 0x52}, + {0x7632, CRL_REG_LEN_08BIT, 0x00}, + {0x7633, CRL_REG_LEN_08BIT, 0x01}, + {0x7634, CRL_REG_LEN_08BIT, 0x06}, + {0x7635, CRL_REG_LEN_08BIT, 0x5e}, + {0x7636, CRL_REG_LEN_08BIT, 0x04}, + {0x7637, CRL_REG_LEN_08BIT, 0xe4}, + {0x7638, CRL_REG_LEN_08BIT, 0x00}, + {0x7639, CRL_REG_LEN_08BIT, 0x01}, + {0x763a, CRL_REG_LEN_08BIT, 0x05}, + {0x763b, CRL_REG_LEN_08BIT, 0x30}, + {0x763c, CRL_REG_LEN_08BIT, 0x0f}, + {0x763d, CRL_REG_LEN_08BIT, 0x00}, + {0x763e, CRL_REG_LEN_08BIT, 0x06}, + {0x763f, CRL_REG_LEN_08BIT, 0xa6}, + {0x7640, CRL_REG_LEN_08BIT, 0x00}, + {0x7641, CRL_REG_LEN_08BIT, 0x02}, + {0x7642, CRL_REG_LEN_08BIT, 0x06}, + {0x7643, CRL_REG_LEN_08BIT, 0x26}, + {0x7644, CRL_REG_LEN_08BIT, 0x00}, + {0x7645, CRL_REG_LEN_08BIT, 0x02}, + {0x7646, CRL_REG_LEN_08BIT, 0x05}, + {0x7647, CRL_REG_LEN_08BIT, 0x33}, + {0x7648, CRL_REG_LEN_08BIT, 0x06}, + {0x7649, CRL_REG_LEN_08BIT, 0x20}, + {0x764a, CRL_REG_LEN_08BIT, 0x0f}, + {0x764b, CRL_REG_LEN_08BIT, 0x00}, + {0x764c, CRL_REG_LEN_08BIT, 0x06}, + {0x764d, CRL_REG_LEN_08BIT, 0x56}, + {0x764e, CRL_REG_LEN_08BIT, 0x00}, + {0x764f, CRL_REG_LEN_08BIT, 0x02}, + {0x7650, CRL_REG_LEN_08BIT, 0x06}, + {0x7651, CRL_REG_LEN_08BIT, 0x16}, + {0x7652, CRL_REG_LEN_08BIT, 0x05}, + {0x7653, CRL_REG_LEN_08BIT, 0x33}, + {0x7654, CRL_REG_LEN_08BIT, 0x06}, + {0x7655, CRL_REG_LEN_08BIT, 0x10}, + {0x7656, CRL_REG_LEN_08BIT, 0x0f}, + {0x7657, CRL_REG_LEN_08BIT, 0x00}, + {0x7658, CRL_REG_LEN_08BIT, 0x06}, + {0x7659, CRL_REG_LEN_08BIT, 0x10}, + {0x765a, CRL_REG_LEN_08BIT, 0x0f}, + {0x765b, CRL_REG_LEN_08BIT, 0x00}, + {0x765c, CRL_REG_LEN_08BIT, 0x06}, + {0x765d, CRL_REG_LEN_08BIT, 0x20}, + {0x765e, CRL_REG_LEN_08BIT, 0x0f}, + {0x765f, CRL_REG_LEN_08BIT, 0x00}, + {0x7660, CRL_REG_LEN_08BIT, 0x00}, + {0x7661, CRL_REG_LEN_08BIT, 0x00}, + {0x7662, CRL_REG_LEN_08BIT, 0x00}, + {0x7663, CRL_REG_LEN_08BIT, 0x02}, + {0x7664, CRL_REG_LEN_08BIT, 0x04}, + {0x7665, CRL_REG_LEN_08BIT, 0xe5}, + {0x7666, CRL_REG_LEN_08BIT, 0x04}, + {0x7667, CRL_REG_LEN_08BIT, 0xe4}, + {0x7668, CRL_REG_LEN_08BIT, 0x0f}, + {0x7669, CRL_REG_LEN_08BIT, 0x00}, + {0x766a, CRL_REG_LEN_08BIT, 0x00}, + {0x766b, CRL_REG_LEN_08BIT, 0x00}, + {0x766c, CRL_REG_LEN_08BIT, 0x00}, + {0x766d, CRL_REG_LEN_08BIT, 0x01}, + {0x766e, CRL_REG_LEN_08BIT, 0x04}, + {0x766f, CRL_REG_LEN_08BIT, 0xe5}, + {0x7670, CRL_REG_LEN_08BIT, 0x04}, + {0x7671, CRL_REG_LEN_08BIT, 0xe4}, + {0x7672, CRL_REG_LEN_08BIT, 0x0f}, + {0x7673, CRL_REG_LEN_08BIT, 0x00}, + {0x7674, CRL_REG_LEN_08BIT, 0x00}, + {0x7675, CRL_REG_LEN_08BIT, 0x02}, + {0x7676, CRL_REG_LEN_08BIT, 0x04}, + {0x7677, CRL_REG_LEN_08BIT, 0xe4}, + {0x7678, CRL_REG_LEN_08BIT, 0x00}, + {0x7679, CRL_REG_LEN_08BIT, 0x02}, + {0x767a, CRL_REG_LEN_08BIT, 0x04}, + {0x767b, CRL_REG_LEN_08BIT, 0xc4}, + {0x767c, CRL_REG_LEN_08BIT, 0x00}, + {0x767d, CRL_REG_LEN_08BIT, 0x02}, + {0x767e, CRL_REG_LEN_08BIT, 0x04}, + {0x767f, CRL_REG_LEN_08BIT, 0xc4}, + {0x7680, CRL_REG_LEN_08BIT, 0x05}, + {0x7681, CRL_REG_LEN_08BIT, 0x83}, + {0x7682, CRL_REG_LEN_08BIT, 0x0f}, + {0x7683, CRL_REG_LEN_08BIT, 0x00}, + {0x7684, CRL_REG_LEN_08BIT, 0x00}, + {0x7685, CRL_REG_LEN_08BIT, 0x02}, + {0x7686, CRL_REG_LEN_08BIT, 0x04}, + {0x7687, CRL_REG_LEN_08BIT, 0xe4}, + {0x7688, CRL_REG_LEN_08BIT, 0x00}, + {0x7689, CRL_REG_LEN_08BIT, 0x02}, + {0x768a, CRL_REG_LEN_08BIT, 0x04}, + {0x768b, CRL_REG_LEN_08BIT, 0xc4}, + {0x768c, CRL_REG_LEN_08BIT, 0x00}, + {0x768d, CRL_REG_LEN_08BIT, 0x02}, + {0x768e, CRL_REG_LEN_08BIT, 0x04}, + {0x768f, CRL_REG_LEN_08BIT, 0xc4}, + {0x7690, CRL_REG_LEN_08BIT, 0x05}, + {0x7691, CRL_REG_LEN_08BIT, 0x83}, + {0x7692, CRL_REG_LEN_08BIT, 0x03}, + {0x7693, CRL_REG_LEN_08BIT, 0x0b}, + {0x7694, CRL_REG_LEN_08BIT, 0x05}, + {0x7695, CRL_REG_LEN_08BIT, 0x83}, + {0x7696, CRL_REG_LEN_08BIT, 0x00}, + {0x7697, CRL_REG_LEN_08BIT, 0x07}, + {0x7698, CRL_REG_LEN_08BIT, 0x05}, + {0x7699, CRL_REG_LEN_08BIT, 0x03}, + {0x769a, CRL_REG_LEN_08BIT, 0x00}, + {0x769b, CRL_REG_LEN_08BIT, 0x05}, + {0x769c, CRL_REG_LEN_08BIT, 0x05}, + {0x769d, CRL_REG_LEN_08BIT, 0x32}, + {0x769e, CRL_REG_LEN_08BIT, 0x05}, + {0x769f, CRL_REG_LEN_08BIT, 0x30}, + {0x76a0, CRL_REG_LEN_08BIT, 0x00}, + {0x76a1, CRL_REG_LEN_08BIT, 0x02}, + {0x76a2, CRL_REG_LEN_08BIT, 0x05}, + {0x76a3, CRL_REG_LEN_08BIT, 0x78}, + {0x76a4, CRL_REG_LEN_08BIT, 0x00}, + {0x76a5, CRL_REG_LEN_08BIT, 0x01}, + {0x76a6, CRL_REG_LEN_08BIT, 0x05}, + {0x76a7, CRL_REG_LEN_08BIT, 0x7c}, + {0x76a8, CRL_REG_LEN_08BIT, 0x03}, + {0x76a9, CRL_REG_LEN_08BIT, 0x9a}, + {0x76aa, CRL_REG_LEN_08BIT, 0x05}, + {0x76ab, CRL_REG_LEN_08BIT, 0x83}, + {0x76ac, CRL_REG_LEN_08BIT, 0x00}, + {0x76ad, CRL_REG_LEN_08BIT, 0x04}, + {0x76ae, CRL_REG_LEN_08BIT, 0x05}, + {0x76af, CRL_REG_LEN_08BIT, 0x03}, + {0x76b0, CRL_REG_LEN_08BIT, 0x00}, + {0x76b1, CRL_REG_LEN_08BIT, 0x03}, + {0x76b2, CRL_REG_LEN_08BIT, 0x05}, + {0x76b3, CRL_REG_LEN_08BIT, 0x32}, + {0x76b4, CRL_REG_LEN_08BIT, 0x05}, + {0x76b5, CRL_REG_LEN_08BIT, 0x30}, + {0x76b6, CRL_REG_LEN_08BIT, 0x00}, + {0x76b7, CRL_REG_LEN_08BIT, 0x02}, + {0x76b8, CRL_REG_LEN_08BIT, 0x05}, + {0x76b9, CRL_REG_LEN_08BIT, 0x78}, + {0x76ba, CRL_REG_LEN_08BIT, 0x00}, + {0x76bb, CRL_REG_LEN_08BIT, 0x01}, + {0x76bc, CRL_REG_LEN_08BIT, 0x05}, + {0x76bd, CRL_REG_LEN_08BIT, 0x7c}, + {0x76be, CRL_REG_LEN_08BIT, 0x03}, + {0x76bf, CRL_REG_LEN_08BIT, 0x99}, + {0x76c0, CRL_REG_LEN_08BIT, 0x05}, + {0x76c1, CRL_REG_LEN_08BIT, 0x83}, + {0x76c2, CRL_REG_LEN_08BIT, 0x00}, + {0x76c3, CRL_REG_LEN_08BIT, 0x03}, + {0x76c4, CRL_REG_LEN_08BIT, 0x05}, + {0x76c5, CRL_REG_LEN_08BIT, 0x03}, + {0x76c6, CRL_REG_LEN_08BIT, 0x00}, + {0x76c7, CRL_REG_LEN_08BIT, 0x01}, + {0x76c8, CRL_REG_LEN_08BIT, 0x05}, + {0x76c9, CRL_REG_LEN_08BIT, 0x32}, + {0x76ca, CRL_REG_LEN_08BIT, 0x05}, + {0x76cb, CRL_REG_LEN_08BIT, 0x30}, + {0x76cc, CRL_REG_LEN_08BIT, 0x00}, + {0x76cd, CRL_REG_LEN_08BIT, 0x02}, + {0x76ce, CRL_REG_LEN_08BIT, 0x05}, + {0x76cf, CRL_REG_LEN_08BIT, 0x78}, + {0x76d0, CRL_REG_LEN_08BIT, 0x00}, + {0x76d1, CRL_REG_LEN_08BIT, 0x01}, + {0x76d2, CRL_REG_LEN_08BIT, 0x05}, + {0x76d3, CRL_REG_LEN_08BIT, 0x7c}, + {0x76d4, CRL_REG_LEN_08BIT, 0x03}, + {0x76d5, CRL_REG_LEN_08BIT, 0x98}, + {0x76d6, CRL_REG_LEN_08BIT, 0x05}, + {0x76d7, CRL_REG_LEN_08BIT, 0x83}, + {0x76d8, CRL_REG_LEN_08BIT, 0x00}, + {0x76d9, CRL_REG_LEN_08BIT, 0x00}, + {0x76da, CRL_REG_LEN_08BIT, 0x05}, + {0x76db, CRL_REG_LEN_08BIT, 0x03}, + {0x76dc, CRL_REG_LEN_08BIT, 0x00}, + {0x76dd, CRL_REG_LEN_08BIT, 0x01}, + {0x76de, CRL_REG_LEN_08BIT, 0x05}, + {0x76df, CRL_REG_LEN_08BIT, 0x32}, + {0x76e0, CRL_REG_LEN_08BIT, 0x05}, + {0x76e1, CRL_REG_LEN_08BIT, 0x30}, + {0x76e2, CRL_REG_LEN_08BIT, 0x00}, + {0x76e3, CRL_REG_LEN_08BIT, 0x02}, + {0x76e4, CRL_REG_LEN_08BIT, 0x05}, + {0x76e5, CRL_REG_LEN_08BIT, 0x78}, + {0x76e6, CRL_REG_LEN_08BIT, 0x00}, + {0x76e7, CRL_REG_LEN_08BIT, 0x01}, + {0x76e8, CRL_REG_LEN_08BIT, 0x05}, + {0x76e9, CRL_REG_LEN_08BIT, 0x7c}, + {0x76ea, CRL_REG_LEN_08BIT, 0x03}, + {0x76eb, CRL_REG_LEN_08BIT, 0x97}, + {0x76ec, CRL_REG_LEN_08BIT, 0x05}, + {0x76ed, CRL_REG_LEN_08BIT, 0x83}, + {0x76ee, CRL_REG_LEN_08BIT, 0x00}, + {0x76ef, CRL_REG_LEN_08BIT, 0x00}, + {0x76f0, CRL_REG_LEN_08BIT, 0x05}, + {0x76f1, CRL_REG_LEN_08BIT, 0x03}, + {0x76f2, CRL_REG_LEN_08BIT, 0x05}, + {0x76f3, CRL_REG_LEN_08BIT, 0x32}, + {0x76f4, CRL_REG_LEN_08BIT, 0x05}, + {0x76f5, CRL_REG_LEN_08BIT, 0x30}, + {0x76f6, CRL_REG_LEN_08BIT, 0x00}, + {0x76f7, CRL_REG_LEN_08BIT, 0x02}, + {0x76f8, CRL_REG_LEN_08BIT, 0x05}, + {0x76f9, CRL_REG_LEN_08BIT, 0x78}, + {0x76fa, CRL_REG_LEN_08BIT, 0x00}, + {0x76fb, CRL_REG_LEN_08BIT, 0x01}, + {0x76fc, CRL_REG_LEN_08BIT, 0x05}, + {0x76fd, CRL_REG_LEN_08BIT, 0x7c}, + {0x76fe, CRL_REG_LEN_08BIT, 0x03}, + {0x76ff, CRL_REG_LEN_08BIT, 0x96}, + {0x7700, CRL_REG_LEN_08BIT, 0x05}, + {0x7701, CRL_REG_LEN_08BIT, 0x83}, + {0x7702, CRL_REG_LEN_08BIT, 0x05}, + {0x7703, CRL_REG_LEN_08BIT, 0x03}, + {0x7704, CRL_REG_LEN_08BIT, 0x05}, + {0x7705, CRL_REG_LEN_08BIT, 0x32}, + {0x7706, CRL_REG_LEN_08BIT, 0x05}, + {0x7707, CRL_REG_LEN_08BIT, 0x30}, + {0x7708, CRL_REG_LEN_08BIT, 0x00}, + {0x7709, CRL_REG_LEN_08BIT, 0x02}, + {0x770a, CRL_REG_LEN_08BIT, 0x05}, + {0x770b, CRL_REG_LEN_08BIT, 0x78}, + {0x770c, CRL_REG_LEN_08BIT, 0x00}, + {0x770d, CRL_REG_LEN_08BIT, 0x01}, + {0x770e, CRL_REG_LEN_08BIT, 0x05}, + {0x770f, CRL_REG_LEN_08BIT, 0x7c}, + {0x7710, CRL_REG_LEN_08BIT, 0x03}, + {0x7711, CRL_REG_LEN_08BIT, 0x95}, + {0x7712, CRL_REG_LEN_08BIT, 0x05}, + {0x7713, CRL_REG_LEN_08BIT, 0x83}, + {0x7714, CRL_REG_LEN_08BIT, 0x05}, + {0x7715, CRL_REG_LEN_08BIT, 0x03}, + {0x7716, CRL_REG_LEN_08BIT, 0x05}, + {0x7717, CRL_REG_LEN_08BIT, 0x32}, + {0x7718, CRL_REG_LEN_08BIT, 0x05}, + {0x7719, CRL_REG_LEN_08BIT, 0x30}, + {0x771a, CRL_REG_LEN_08BIT, 0x00}, + {0x771b, CRL_REG_LEN_08BIT, 0x02}, + {0x771c, CRL_REG_LEN_08BIT, 0x05}, + {0x771d, CRL_REG_LEN_08BIT, 0x78}, + {0x771e, CRL_REG_LEN_08BIT, 0x00}, + {0x771f, CRL_REG_LEN_08BIT, 0x01}, + {0x7720, CRL_REG_LEN_08BIT, 0x05}, + {0x7721, CRL_REG_LEN_08BIT, 0x7c}, + {0x7722, CRL_REG_LEN_08BIT, 0x03}, + {0x7723, CRL_REG_LEN_08BIT, 0x94}, + {0x7724, CRL_REG_LEN_08BIT, 0x05}, + {0x7725, CRL_REG_LEN_08BIT, 0x83}, + {0x7726, CRL_REG_LEN_08BIT, 0x00}, + {0x7727, CRL_REG_LEN_08BIT, 0x01}, + {0x7728, CRL_REG_LEN_08BIT, 0x05}, + {0x7729, CRL_REG_LEN_08BIT, 0x03}, + {0x772a, CRL_REG_LEN_08BIT, 0x00}, + {0x772b, CRL_REG_LEN_08BIT, 0x01}, + {0x772c, CRL_REG_LEN_08BIT, 0x05}, + {0x772d, CRL_REG_LEN_08BIT, 0x32}, + {0x772e, CRL_REG_LEN_08BIT, 0x05}, + {0x772f, CRL_REG_LEN_08BIT, 0x30}, + {0x7730, CRL_REG_LEN_08BIT, 0x00}, + {0x7731, CRL_REG_LEN_08BIT, 0x02}, + {0x7732, CRL_REG_LEN_08BIT, 0x05}, + {0x7733, CRL_REG_LEN_08BIT, 0x78}, + {0x7734, CRL_REG_LEN_08BIT, 0x00}, + {0x7735, CRL_REG_LEN_08BIT, 0x01}, + {0x7736, CRL_REG_LEN_08BIT, 0x05}, + {0x7737, CRL_REG_LEN_08BIT, 0x7c}, + {0x7738, CRL_REG_LEN_08BIT, 0x03}, + {0x7739, CRL_REG_LEN_08BIT, 0x93}, + {0x773a, CRL_REG_LEN_08BIT, 0x05}, + {0x773b, CRL_REG_LEN_08BIT, 0x83}, + {0x773c, CRL_REG_LEN_08BIT, 0x00}, + {0x773d, CRL_REG_LEN_08BIT, 0x00}, + {0x773e, CRL_REG_LEN_08BIT, 0x05}, + {0x773f, CRL_REG_LEN_08BIT, 0x03}, + {0x7740, CRL_REG_LEN_08BIT, 0x00}, + {0x7741, CRL_REG_LEN_08BIT, 0x00}, + {0x7742, CRL_REG_LEN_08BIT, 0x05}, + {0x7743, CRL_REG_LEN_08BIT, 0x32}, + {0x7744, CRL_REG_LEN_08BIT, 0x05}, + {0x7745, CRL_REG_LEN_08BIT, 0x30}, + {0x7746, CRL_REG_LEN_08BIT, 0x00}, + {0x7747, CRL_REG_LEN_08BIT, 0x02}, + {0x7748, CRL_REG_LEN_08BIT, 0x05}, + {0x7749, CRL_REG_LEN_08BIT, 0x78}, + {0x774a, CRL_REG_LEN_08BIT, 0x00}, + {0x774b, CRL_REG_LEN_08BIT, 0x01}, + {0x774c, CRL_REG_LEN_08BIT, 0x05}, + {0x774d, CRL_REG_LEN_08BIT, 0x7c}, + {0x774e, CRL_REG_LEN_08BIT, 0x03}, + {0x774f, CRL_REG_LEN_08BIT, 0x92}, + {0x7750, CRL_REG_LEN_08BIT, 0x05}, + {0x7751, CRL_REG_LEN_08BIT, 0x83}, + {0x7752, CRL_REG_LEN_08BIT, 0x05}, + {0x7753, CRL_REG_LEN_08BIT, 0x03}, + {0x7754, CRL_REG_LEN_08BIT, 0x00}, + {0x7755, CRL_REG_LEN_08BIT, 0x00}, + {0x7756, CRL_REG_LEN_08BIT, 0x05}, + {0x7757, CRL_REG_LEN_08BIT, 0x32}, + {0x7758, CRL_REG_LEN_08BIT, 0x05}, + {0x7759, CRL_REG_LEN_08BIT, 0x30}, + {0x775a, CRL_REG_LEN_08BIT, 0x00}, + {0x775b, CRL_REG_LEN_08BIT, 0x02}, + {0x775c, CRL_REG_LEN_08BIT, 0x05}, + {0x775d, CRL_REG_LEN_08BIT, 0x78}, + {0x775e, CRL_REG_LEN_08BIT, 0x00}, + {0x775f, CRL_REG_LEN_08BIT, 0x01}, + {0x7760, CRL_REG_LEN_08BIT, 0x05}, + {0x7761, CRL_REG_LEN_08BIT, 0x7c}, + {0x7762, CRL_REG_LEN_08BIT, 0x03}, + {0x7763, CRL_REG_LEN_08BIT, 0x91}, + {0x7764, CRL_REG_LEN_08BIT, 0x05}, + {0x7765, CRL_REG_LEN_08BIT, 0x83}, + {0x7766, CRL_REG_LEN_08BIT, 0x05}, + {0x7767, CRL_REG_LEN_08BIT, 0x03}, + {0x7768, CRL_REG_LEN_08BIT, 0x05}, + {0x7769, CRL_REG_LEN_08BIT, 0x32}, + {0x776a, CRL_REG_LEN_08BIT, 0x05}, + {0x776b, CRL_REG_LEN_08BIT, 0x30}, + {0x776c, CRL_REG_LEN_08BIT, 0x00}, + {0x776d, CRL_REG_LEN_08BIT, 0x02}, + {0x776e, CRL_REG_LEN_08BIT, 0x05}, + {0x776f, CRL_REG_LEN_08BIT, 0x78}, + {0x7770, CRL_REG_LEN_08BIT, 0x00}, + {0x7771, CRL_REG_LEN_08BIT, 0x01}, + {0x7772, CRL_REG_LEN_08BIT, 0x05}, + {0x7773, CRL_REG_LEN_08BIT, 0x7c}, + {0x7774, CRL_REG_LEN_08BIT, 0x03}, + {0x7775, CRL_REG_LEN_08BIT, 0x90}, + {0x7776, CRL_REG_LEN_08BIT, 0x05}, + {0x7777, CRL_REG_LEN_08BIT, 0x83}, + {0x7778, CRL_REG_LEN_08BIT, 0x05}, + {0x7779, CRL_REG_LEN_08BIT, 0x03}, + {0x777a, CRL_REG_LEN_08BIT, 0x05}, + {0x777b, CRL_REG_LEN_08BIT, 0x32}, + {0x777c, CRL_REG_LEN_08BIT, 0x05}, + {0x777d, CRL_REG_LEN_08BIT, 0x30}, + {0x777e, CRL_REG_LEN_08BIT, 0x00}, + {0x777f, CRL_REG_LEN_08BIT, 0x02}, + {0x7780, CRL_REG_LEN_08BIT, 0x05}, + {0x7781, CRL_REG_LEN_08BIT, 0x78}, + {0x7782, CRL_REG_LEN_08BIT, 0x00}, + {0x7783, CRL_REG_LEN_08BIT, 0x01}, + {0x7784, CRL_REG_LEN_08BIT, 0x05}, + {0x7785, CRL_REG_LEN_08BIT, 0x7c}, + {0x7786, CRL_REG_LEN_08BIT, 0x02}, + {0x7787, CRL_REG_LEN_08BIT, 0x90}, + {0x7788, CRL_REG_LEN_08BIT, 0x05}, + {0x7789, CRL_REG_LEN_08BIT, 0x03}, + {0x778a, CRL_REG_LEN_08BIT, 0x07}, + {0x778b, CRL_REG_LEN_08BIT, 0x00}, + {0x778c, CRL_REG_LEN_08BIT, 0x0f}, + {0x778d, CRL_REG_LEN_08BIT, 0x00}, + {0x778e, CRL_REG_LEN_08BIT, 0x08}, + {0x778f, CRL_REG_LEN_08BIT, 0x30}, + {0x7790, CRL_REG_LEN_08BIT, 0x08}, + {0x7791, CRL_REG_LEN_08BIT, 0xee}, + {0x7792, CRL_REG_LEN_08BIT, 0x0f}, + {0x7793, CRL_REG_LEN_08BIT, 0x00}, + {0x7794, CRL_REG_LEN_08BIT, 0x05}, + {0x7795, CRL_REG_LEN_08BIT, 0x33}, + {0x7796, CRL_REG_LEN_08BIT, 0x04}, + {0x7797, CRL_REG_LEN_08BIT, 0xe5}, + {0x7798, CRL_REG_LEN_08BIT, 0x06}, + {0x7799, CRL_REG_LEN_08BIT, 0x52}, + {0x779a, CRL_REG_LEN_08BIT, 0x04}, + {0x779b, CRL_REG_LEN_08BIT, 0xe4}, + {0x779c, CRL_REG_LEN_08BIT, 0x00}, + {0x779d, CRL_REG_LEN_08BIT, 0x00}, + {0x779e, CRL_REG_LEN_08BIT, 0x06}, + {0x779f, CRL_REG_LEN_08BIT, 0x5e}, + {0x77a0, CRL_REG_LEN_08BIT, 0x00}, + {0x77a1, CRL_REG_LEN_08BIT, 0x0f}, + {0x77a2, CRL_REG_LEN_08BIT, 0x06}, + {0x77a3, CRL_REG_LEN_08BIT, 0x1e}, + {0x77a4, CRL_REG_LEN_08BIT, 0x00}, + {0x77a5, CRL_REG_LEN_08BIT, 0x02}, + {0x77a6, CRL_REG_LEN_08BIT, 0x06}, + {0x77a7, CRL_REG_LEN_08BIT, 0xa2}, + {0x77a8, CRL_REG_LEN_08BIT, 0x00}, + {0x77a9, CRL_REG_LEN_08BIT, 0x01}, + {0x77aa, CRL_REG_LEN_08BIT, 0x06}, + {0x77ab, CRL_REG_LEN_08BIT, 0xae}, + {0x77ac, CRL_REG_LEN_08BIT, 0x00}, + {0x77ad, CRL_REG_LEN_08BIT, 0x03}, + {0x77ae, CRL_REG_LEN_08BIT, 0x05}, + {0x77af, CRL_REG_LEN_08BIT, 0x30}, + {0x77b0, CRL_REG_LEN_08BIT, 0x09}, + {0x77b1, CRL_REG_LEN_08BIT, 0x19}, + {0x77b2, CRL_REG_LEN_08BIT, 0x0f}, + {0x77b3, CRL_REG_LEN_08BIT, 0x00}, + {0x77b4, CRL_REG_LEN_08BIT, 0x05}, + {0x77b5, CRL_REG_LEN_08BIT, 0x33}, + {0x77b6, CRL_REG_LEN_08BIT, 0x04}, + {0x77b7, CRL_REG_LEN_08BIT, 0xe5}, + {0x77b8, CRL_REG_LEN_08BIT, 0x06}, + {0x77b9, CRL_REG_LEN_08BIT, 0x52}, + {0x77ba, CRL_REG_LEN_08BIT, 0x04}, + {0x77bb, CRL_REG_LEN_08BIT, 0xe4}, + {0x77bc, CRL_REG_LEN_08BIT, 0x00}, + {0x77bd, CRL_REG_LEN_08BIT, 0x00}, + {0x77be, CRL_REG_LEN_08BIT, 0x06}, + {0x77bf, CRL_REG_LEN_08BIT, 0x5e}, + {0x77c0, CRL_REG_LEN_08BIT, 0x00}, + {0x77c1, CRL_REG_LEN_08BIT, 0x0f}, + {0x77c2, CRL_REG_LEN_08BIT, 0x06}, + {0x77c3, CRL_REG_LEN_08BIT, 0x1e}, + {0x77c4, CRL_REG_LEN_08BIT, 0x00}, + {0x77c5, CRL_REG_LEN_08BIT, 0x02}, + {0x77c6, CRL_REG_LEN_08BIT, 0x06}, + {0x77c7, CRL_REG_LEN_08BIT, 0xa2}, + {0x77c8, CRL_REG_LEN_08BIT, 0x00}, + {0x77c9, CRL_REG_LEN_08BIT, 0x01}, + {0x77ca, CRL_REG_LEN_08BIT, 0x06}, + {0x77cb, CRL_REG_LEN_08BIT, 0xae}, + {0x77cc, CRL_REG_LEN_08BIT, 0x00}, + {0x77cd, CRL_REG_LEN_08BIT, 0x03}, + {0x77ce, CRL_REG_LEN_08BIT, 0x05}, + {0x77cf, CRL_REG_LEN_08BIT, 0x30}, + {0x77d0, CRL_REG_LEN_08BIT, 0x0f}, + {0x77d1, CRL_REG_LEN_08BIT, 0x00}, + {0x77d2, CRL_REG_LEN_08BIT, 0x00}, + {0x77d3, CRL_REG_LEN_08BIT, 0x00}, + {0x77d4, CRL_REG_LEN_08BIT, 0x00}, + {0x77d5, CRL_REG_LEN_08BIT, 0x02}, + {0x77d6, CRL_REG_LEN_08BIT, 0x04}, + {0x77d7, CRL_REG_LEN_08BIT, 0xe5}, + {0x77d8, CRL_REG_LEN_08BIT, 0x04}, + {0x77d9, CRL_REG_LEN_08BIT, 0xe4}, + {0x77da, CRL_REG_LEN_08BIT, 0x05}, + {0x77db, CRL_REG_LEN_08BIT, 0x33}, + {0x77dc, CRL_REG_LEN_08BIT, 0x07}, + {0x77dd, CRL_REG_LEN_08BIT, 0x10}, + {0x77de, CRL_REG_LEN_08BIT, 0x00}, + {0x77df, CRL_REG_LEN_08BIT, 0x00}, + {0x77e0, CRL_REG_LEN_08BIT, 0x01}, + {0x77e1, CRL_REG_LEN_08BIT, 0xbb}, + {0x77e2, CRL_REG_LEN_08BIT, 0x00}, + {0x77e3, CRL_REG_LEN_08BIT, 0x00}, + {0x77e4, CRL_REG_LEN_08BIT, 0x01}, + {0x77e5, CRL_REG_LEN_08BIT, 0xaa}, + {0x77e6, CRL_REG_LEN_08BIT, 0x00}, + {0x77e7, CRL_REG_LEN_08BIT, 0x00}, + {0x77e8, CRL_REG_LEN_08BIT, 0x01}, + {0x77e9, CRL_REG_LEN_08BIT, 0x99}, + {0x77ea, CRL_REG_LEN_08BIT, 0x00}, + {0x77eb, CRL_REG_LEN_08BIT, 0x00}, + {0x77ec, CRL_REG_LEN_08BIT, 0x01}, + {0x77ed, CRL_REG_LEN_08BIT, 0x88}, + {0x77ee, CRL_REG_LEN_08BIT, 0x00}, + {0x77ef, CRL_REG_LEN_08BIT, 0x00}, + {0x77f0, CRL_REG_LEN_08BIT, 0x01}, + {0x77f1, CRL_REG_LEN_08BIT, 0x77}, + {0x77f2, CRL_REG_LEN_08BIT, 0x00}, + {0x77f3, CRL_REG_LEN_08BIT, 0x00}, + {0x77f4, CRL_REG_LEN_08BIT, 0x01}, + {0x77f5, CRL_REG_LEN_08BIT, 0x66}, + {0x77f6, CRL_REG_LEN_08BIT, 0x00}, + {0x77f7, CRL_REG_LEN_08BIT, 0x00}, + {0x77f8, CRL_REG_LEN_08BIT, 0x01}, + {0x77f9, CRL_REG_LEN_08BIT, 0x55}, + {0x77fa, CRL_REG_LEN_08BIT, 0x00}, + {0x77fb, CRL_REG_LEN_08BIT, 0x00}, + {0x77fc, CRL_REG_LEN_08BIT, 0x01}, + {0x77fd, CRL_REG_LEN_08BIT, 0x44}, + {0x77fe, CRL_REG_LEN_08BIT, 0x00}, + {0x77ff, CRL_REG_LEN_08BIT, 0x00}, + {0x7800, CRL_REG_LEN_08BIT, 0x01}, + {0x7801, CRL_REG_LEN_08BIT, 0x33}, + {0x7802, CRL_REG_LEN_08BIT, 0x00}, + {0x7803, CRL_REG_LEN_08BIT, 0x00}, + {0x7804, CRL_REG_LEN_08BIT, 0x01}, + {0x7805, CRL_REG_LEN_08BIT, 0x22}, + {0x7806, CRL_REG_LEN_08BIT, 0x00}, + {0x7807, CRL_REG_LEN_08BIT, 0x00}, + {0x7808, CRL_REG_LEN_08BIT, 0x01}, + {0x7809, CRL_REG_LEN_08BIT, 0x11}, + {0x780a, CRL_REG_LEN_08BIT, 0x00}, + {0x780b, CRL_REG_LEN_08BIT, 0x00}, + {0x780c, CRL_REG_LEN_08BIT, 0x01}, + {0x780d, CRL_REG_LEN_08BIT, 0x00}, + {0x780e, CRL_REG_LEN_08BIT, 0x01}, + {0x780f, CRL_REG_LEN_08BIT, 0xff}, + {0x7810, CRL_REG_LEN_08BIT, 0x07}, + {0x7811, CRL_REG_LEN_08BIT, 0x00}, + {0x7812, CRL_REG_LEN_08BIT, 0x02}, + {0x7813, CRL_REG_LEN_08BIT, 0xa0}, + {0x7814, CRL_REG_LEN_08BIT, 0x0f}, + {0x7815, CRL_REG_LEN_08BIT, 0x00}, + {0x7816, CRL_REG_LEN_08BIT, 0x08}, + {0x7817, CRL_REG_LEN_08BIT, 0x35}, + {0x7818, CRL_REG_LEN_08BIT, 0x06}, + {0x7819, CRL_REG_LEN_08BIT, 0x52}, + {0x781a, CRL_REG_LEN_08BIT, 0x04}, + {0x781b, CRL_REG_LEN_08BIT, 0xe4}, + {0x781c, CRL_REG_LEN_08BIT, 0x00}, + {0x781d, CRL_REG_LEN_08BIT, 0x00}, + {0x781e, CRL_REG_LEN_08BIT, 0x06}, + {0x781f, CRL_REG_LEN_08BIT, 0x5e}, + {0x7820, CRL_REG_LEN_08BIT, 0x05}, + {0x7821, CRL_REG_LEN_08BIT, 0x33}, + {0x7822, CRL_REG_LEN_08BIT, 0x09}, + {0x7823, CRL_REG_LEN_08BIT, 0x19}, + {0x7824, CRL_REG_LEN_08BIT, 0x06}, + {0x7825, CRL_REG_LEN_08BIT, 0x1e}, + {0x7826, CRL_REG_LEN_08BIT, 0x05}, + {0x7827, CRL_REG_LEN_08BIT, 0x33}, + {0x7828, CRL_REG_LEN_08BIT, 0x00}, + {0x7829, CRL_REG_LEN_08BIT, 0x01}, + {0x782a, CRL_REG_LEN_08BIT, 0x06}, + {0x782b, CRL_REG_LEN_08BIT, 0x24}, + {0x782c, CRL_REG_LEN_08BIT, 0x06}, + {0x782d, CRL_REG_LEN_08BIT, 0x20}, + {0x782e, CRL_REG_LEN_08BIT, 0x0f}, + {0x782f, CRL_REG_LEN_08BIT, 0x00}, + {0x7830, CRL_REG_LEN_08BIT, 0x08}, + {0x7831, CRL_REG_LEN_08BIT, 0x35}, + {0x7832, CRL_REG_LEN_08BIT, 0x07}, + {0x7833, CRL_REG_LEN_08BIT, 0x10}, + {0x7834, CRL_REG_LEN_08BIT, 0x00}, + {0x7835, CRL_REG_LEN_08BIT, 0x00}, + {0x7836, CRL_REG_LEN_08BIT, 0x01}, + {0x7837, CRL_REG_LEN_08BIT, 0xbb}, + {0x7838, CRL_REG_LEN_08BIT, 0x00}, + {0x7839, CRL_REG_LEN_08BIT, 0x00}, + {0x783a, CRL_REG_LEN_08BIT, 0x01}, + {0x783b, CRL_REG_LEN_08BIT, 0xaa}, + {0x783c, CRL_REG_LEN_08BIT, 0x00}, + {0x783d, CRL_REG_LEN_08BIT, 0x00}, + {0x783e, CRL_REG_LEN_08BIT, 0x01}, + {0x783f, CRL_REG_LEN_08BIT, 0x99}, + {0x7840, CRL_REG_LEN_08BIT, 0x00}, + {0x7841, CRL_REG_LEN_08BIT, 0x00}, + {0x7842, CRL_REG_LEN_08BIT, 0x01}, + {0x7843, CRL_REG_LEN_08BIT, 0x88}, + {0x7844, CRL_REG_LEN_08BIT, 0x00}, + {0x7845, CRL_REG_LEN_08BIT, 0x00}, + {0x7846, CRL_REG_LEN_08BIT, 0x01}, + {0x7847, CRL_REG_LEN_08BIT, 0x77}, + {0x7848, CRL_REG_LEN_08BIT, 0x00}, + {0x7849, CRL_REG_LEN_08BIT, 0x00}, + {0x784a, CRL_REG_LEN_08BIT, 0x01}, + {0x784b, CRL_REG_LEN_08BIT, 0x66}, + {0x784c, CRL_REG_LEN_08BIT, 0x00}, + {0x784d, CRL_REG_LEN_08BIT, 0x00}, + {0x784e, CRL_REG_LEN_08BIT, 0x01}, + {0x784f, CRL_REG_LEN_08BIT, 0x55}, + {0x7850, CRL_REG_LEN_08BIT, 0x00}, + {0x7851, CRL_REG_LEN_08BIT, 0x00}, + {0x7852, CRL_REG_LEN_08BIT, 0x01}, + {0x7853, CRL_REG_LEN_08BIT, 0x44}, + {0x7854, CRL_REG_LEN_08BIT, 0x00}, + {0x7855, CRL_REG_LEN_08BIT, 0x00}, + {0x7856, CRL_REG_LEN_08BIT, 0x01}, + {0x7857, CRL_REG_LEN_08BIT, 0x33}, + {0x7858, CRL_REG_LEN_08BIT, 0x00}, + {0x7859, CRL_REG_LEN_08BIT, 0x00}, + {0x785a, CRL_REG_LEN_08BIT, 0x01}, + {0x785b, CRL_REG_LEN_08BIT, 0x22}, + {0x785c, CRL_REG_LEN_08BIT, 0x00}, + {0x785d, CRL_REG_LEN_08BIT, 0x00}, + {0x785e, CRL_REG_LEN_08BIT, 0x01}, + {0x785f, CRL_REG_LEN_08BIT, 0x11}, + {0x7860, CRL_REG_LEN_08BIT, 0x00}, + {0x7861, CRL_REG_LEN_08BIT, 0x00}, + {0x7862, CRL_REG_LEN_08BIT, 0x01}, + {0x7863, CRL_REG_LEN_08BIT, 0x00}, + {0x7864, CRL_REG_LEN_08BIT, 0x07}, + {0x7865, CRL_REG_LEN_08BIT, 0x00}, + {0x7866, CRL_REG_LEN_08BIT, 0x01}, + {0x7867, CRL_REG_LEN_08BIT, 0xff}, + {0x7868, CRL_REG_LEN_08BIT, 0x02}, + {0x7869, CRL_REG_LEN_08BIT, 0xa0}, + {0x786a, CRL_REG_LEN_08BIT, 0x0f}, + {0x786b, CRL_REG_LEN_08BIT, 0x00}, + {0x786c, CRL_REG_LEN_08BIT, 0x08}, + {0x786d, CRL_REG_LEN_08BIT, 0x3a}, + {0x786e, CRL_REG_LEN_08BIT, 0x08}, + {0x786f, CRL_REG_LEN_08BIT, 0x6a}, + {0x7870, CRL_REG_LEN_08BIT, 0x0f}, + {0x7871, CRL_REG_LEN_08BIT, 0x00}, + {0x7872, CRL_REG_LEN_08BIT, 0x04}, + {0x7873, CRL_REG_LEN_08BIT, 0xc0}, + {0x7874, CRL_REG_LEN_08BIT, 0x09}, + {0x7875, CRL_REG_LEN_08BIT, 0x19}, + {0x7876, CRL_REG_LEN_08BIT, 0x04}, + {0x7877, CRL_REG_LEN_08BIT, 0x99}, + {0x7878, CRL_REG_LEN_08BIT, 0x07}, + {0x7879, CRL_REG_LEN_08BIT, 0x14}, + {0x787a, CRL_REG_LEN_08BIT, 0x00}, + {0x787b, CRL_REG_LEN_08BIT, 0x01}, + {0x787c, CRL_REG_LEN_08BIT, 0x04}, + {0x787d, CRL_REG_LEN_08BIT, 0xa4}, + {0x787e, CRL_REG_LEN_08BIT, 0x00}, + {0x787f, CRL_REG_LEN_08BIT, 0x07}, + {0x7880, CRL_REG_LEN_08BIT, 0x04}, + {0x7881, CRL_REG_LEN_08BIT, 0xa6}, + {0x7882, CRL_REG_LEN_08BIT, 0x00}, + {0x7883, CRL_REG_LEN_08BIT, 0x00}, + {0x7884, CRL_REG_LEN_08BIT, 0x04}, + {0x7885, CRL_REG_LEN_08BIT, 0xa0}, + {0x7886, CRL_REG_LEN_08BIT, 0x04}, + {0x7887, CRL_REG_LEN_08BIT, 0x80}, + {0x7888, CRL_REG_LEN_08BIT, 0x04}, + {0x7889, CRL_REG_LEN_08BIT, 0x00}, + {0x788a, CRL_REG_LEN_08BIT, 0x05}, + {0x788b, CRL_REG_LEN_08BIT, 0x03}, + {0x788c, CRL_REG_LEN_08BIT, 0x06}, + {0x788d, CRL_REG_LEN_08BIT, 0x00}, + {0x788e, CRL_REG_LEN_08BIT, 0x0f}, + {0x788f, CRL_REG_LEN_08BIT, 0x00}, + {0x7890, CRL_REG_LEN_08BIT, 0x0f}, + {0x7891, CRL_REG_LEN_08BIT, 0x00}, + {0x7892, CRL_REG_LEN_08BIT, 0x0f}, + {0x7893, CRL_REG_LEN_08BIT, 0x00}, + {0x30a3, CRL_REG_LEN_08BIT, 0x00}, + {0x30a7, CRL_REG_LEN_08BIT, 0x48}, + {0x30ab, CRL_REG_LEN_08BIT, 0x04}, + {0x30af, CRL_REG_LEN_08BIT, 0x40}, + {0x3001, CRL_REG_LEN_08BIT, 0x23}, + {0x3005, CRL_REG_LEN_08BIT, 0x13}, + {0x3014, CRL_REG_LEN_08BIT, 0x44}, + {0x3196, CRL_REG_LEN_08BIT, 0x00}, + {0x3197, CRL_REG_LEN_08BIT, 0x00}, + {0x3195, CRL_REG_LEN_08BIT, 0x04}, + {0x31e3, CRL_REG_LEN_08BIT, 0x03}, + {0x31e4, CRL_REG_LEN_08BIT, 0x13}, + {0x315a, CRL_REG_LEN_08BIT, 0x01}, + {0x315b, CRL_REG_LEN_08BIT, 0x00}, + {0x315c, CRL_REG_LEN_08BIT, 0x01}, + {0x315d, CRL_REG_LEN_08BIT, 0x00}, + {0x315e, CRL_REG_LEN_08BIT, 0x01}, + {0x315f, CRL_REG_LEN_08BIT, 0x00}, + {0x3250, CRL_REG_LEN_08BIT, 0xf7}, +}; + +static struct crl_register_write_rep ov2775_powerup_standby_regset[] = { + { 0x3013, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0000, CRL_REG_LEN_DELAY, 0x14 }, + { 0x3013, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep ov2775_streamon_regs[] = { + { 0x3012, CRL_REG_LEN_08BIT, 0x01 } +}; + +static struct crl_register_write_rep ov2775_streamoff_regs[] = { + { 0x3012, CRL_REG_LEN_08BIT, 0x00 } +}; + +static struct crl_arithmetic_ops ov2775_vflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 3, + }, +}; + +static struct crl_arithmetic_ops ov2775_vblank_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 8, + }, +}; + +static struct crl_arithmetic_ops ov2775_hflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 2, + }, +}; + +static struct crl_arithmetic_ops ov2775_hblank_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 8, + }, +}; + +static struct crl_arithmetic_ops ov2775_exposure_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 8, + }, +}; + +static struct crl_arithmetic_ops ov2775_ana_gain_l_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 2, + }, +}; + +static struct crl_arithmetic_ops ov2775_ana_gain_vs_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 4, + }, +}; + +static struct crl_arithmetic_ops ov2775_ana_gain_linear_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 6, + }, +}; + +static struct crl_arithmetic_ops ov2775_digital_gain_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 8, + }, +}; + +static struct crl_dynamic_register_access ov2775_v_flip_regs[] = { + { + .address = 0x30C0, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov2775_vflip_ops), + .ops = ov2775_vflip_ops, + .mask = 0x08, + }, +}; + +static struct crl_dynamic_register_access ov2775_h_flip_regs[] = { + { + .address = 0x30C0, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov2775_hflip_ops), + .ops = ov2775_hflip_ops, + .mask = 0x04, + }, +}; + +/* 0: 1x, 1: 2x, 2: 4x, 3: 8x + * linear mode analog gain uses ana_gain_h + */ +static struct crl_dynamic_register_access ov2775_ana_gain_h_regs[] = { + { + .address = 0x30BB, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = 0, + .ops = 0, + .mask = 0x03, + }, +}; + +static struct crl_dynamic_register_access ov2775_ana_gain_l_regs[] = { + { + .address = 0x30BB, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov2775_ana_gain_l_ops), + .ops = ov2775_ana_gain_l_ops, + .mask = 0x0c, + }, +}; + +static struct crl_dynamic_register_access ov2775_ana_gain_vs_regs[] = { + { + .address = 0x30BB, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov2775_ana_gain_vs_ops), + .ops = ov2775_ana_gain_vs_ops, + .mask = 0x30, + }, +}; + +static struct crl_dynamic_register_access ov2775_ana_gain_linear_cg_regs[] = { + { + .address = 0x30BB, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov2775_ana_gain_linear_ops), + .ops = ov2775_ana_gain_linear_ops, + .mask = 0x40, + }, +}; + +static struct crl_dynamic_register_access ov2775_digital_gain_h_regs[] = { + { + .address = 0x315A, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov2775_digital_gain_ops), + .ops = ov2775_digital_gain_ops, + .mask = 0xff, + }, + { + .address = 0x315B, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +static struct crl_dynamic_register_access ov2775_digital_gain_l_regs[] = { + { + .address = 0x315C, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov2775_digital_gain_ops), + .ops = ov2775_digital_gain_ops, + .mask = 0xff, + }, + { + .address = 0x315D, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +static struct crl_dynamic_register_access ov2775_digital_gain_vs_regs[] = { + { + .address = 0x315E, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov2775_digital_gain_ops), + .ops = ov2775_digital_gain_ops, + .mask = 0xff, + }, + { + .address = 0x315F, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +static struct crl_dynamic_register_access ov2775_exposure_dcg_regs[] = { + { + .address = 0x30B6, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov2775_exposure_ops), + .ops = ov2775_exposure_ops, + .mask = 0xff, + }, + { + .address = 0x30B7, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +/* 03B8 and 03B9 are integer part, 03BA is fractional part with N/32 + * just use integer part + */ +static struct crl_dynamic_register_access ov2775_exposure_vs_regs[] = { + { + .address = 0x30B8, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov2775_exposure_ops), + .ops = ov2775_exposure_ops, + .mask = 0xff, + }, + { + .address = 0x30B9, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +static struct crl_dynamic_register_access ov2775_vblank_regs[] = { + { + .address = 0x30B2, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov2775_vblank_ops), + .ops = ov2775_vblank_ops, + .mask = 0xff, + }, + { + .address = 0x30B3, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +static struct crl_dynamic_register_access ov2775_hblank_regs[] = { + { + .address = 0x30B0, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov2775_hblank_ops), + .ops = ov2775_hblank_ops, + .mask = 0xff, + }, + { + .address = 0x30B1, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +static struct crl_sensor_detect_config ov2775_sensor_detect_regset[] = { + { + .reg = { 0x300A, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, + { + .reg = { 0x300B, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, +}; + +/* pixel_rate = op_sys_clk*2 * csi_lanes / bitsperpixel */ +static struct crl_pll_configuration ov2775_pll_configurations[] = { + { + .input_clk = 19200000, + .op_sys_clk = 202000000, + .bitsperpixel = 10, + .pixel_rate_csi = 50200000, + .pixel_rate_pa = 50200000, + .csi_lanes = 1, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = 0, + }, + { + .input_clk = 19200000, + .op_sys_clk = 202000000, + .bitsperpixel = 12, + .pixel_rate_csi = 50200000, + .pixel_rate_pa = 50200000, + .csi_lanes = 1, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = 0, + }, + { + .input_clk = 24000000, + .op_sys_clk = 480000000, + .bitsperpixel = 10, + .pixel_rate_csi = 80000000, + .pixel_rate_pa = 80000000, + .csi_lanes = 2, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = 0, + }, + { + .input_clk = 24000000, + .op_sys_clk = 480000000, + .bitsperpixel = 12, + .pixel_rate_csi = 80000000, + .pixel_rate_pa = 80000000, + .csi_lanes = 2, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = 0, + }, +}; + +static struct crl_subdev_rect_rep ov2775_1920x1088_rects_native[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1088, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1088, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1088, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1088, + }, +}; + +static struct crl_mode_rep ov2775_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(ov2775_1920x1088_rects_native), + .sd_rects = ov2775_1920x1088_rects_native, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1088, + .min_llp = 3550, + .min_fll = 1096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = + ARRAY_SIZE(ov2775_linear_hcg_30fps_mipi960_regset), + .mode_regs = ov2775_linear_hcg_30fps_mipi960_regset, + }, + { + .sd_rects_items = ARRAY_SIZE(ov2775_1920x1088_rects_native), + .sd_rects = ov2775_1920x1088_rects_native, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1088, + .min_llp = 3550, + .min_fll = 1096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = + ARRAY_SIZE(ov2775_linear_lcg_30fps_mipi960_regset), + .mode_regs = ov2775_linear_lcg_30fps_mipi960_regset, + }, + { + .sd_rects_items = ARRAY_SIZE(ov2775_1920x1088_rects_native), + .sd_rects = ov2775_1920x1088_rects_native, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1088, + .min_llp = 3550, + .min_fll = 1096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov2775_2x12_30fps_mipi960_regset), + .mode_regs = ov2775_2x12_30fps_mipi960_regset, + }, + { + .sd_rects_items = ARRAY_SIZE(ov2775_1920x1088_rects_native), + .sd_rects = ov2775_1920x1088_rects_native, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1088, + .min_llp = 3550, + .min_fll = 1096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov2775_3x12_30fps_mipi960_regset), + .mode_regs = ov2775_3x12_30fps_mipi960_regset, + }, +}; + +static struct crl_sensor_subdev_config ov2775_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "ov2775 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "ov2775 pixel array", + }, +}; + +static struct crl_sensor_limits ov2775_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1936, + .y_addr_max = 1096, + .min_frame_length_lines = 1096, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 3550, + .max_line_length_pixels = 32752, +}; + +static struct crl_flip_data ov2775_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, +}; + +static struct crl_csi_data_fmt ov2775_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 10, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SGRBG12_1X12, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SRGGB12_1X12, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SBGGR12_1X12, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SGBRG12_1X12, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, +}; + +static struct crl_v4l2_ctrl ov2775_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2775_h_flip_regs), + .regs = ov2775_h_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .name = "V4L2_CID_VFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2775_v_flip_regs), + .regs = ov2775_v_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame Length Lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 160, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 1120, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2775_vblank_regs), + .regs = ov2775_vblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1024, + .data.std_data.max = 65520, + .data.std_data.step = 1, + .data.std_data.def = 3550, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2775_hblank_regs), + .regs = ov2775_hblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_ANALOG_GAIN_L, + .name = "CRL_CID_ANALOG_GAIN_HCG", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 0xFF, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2775_ana_gain_h_regs), + .regs = ov2775_ana_gain_h_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_ANALOG_GAIN_S, + .name = "CRL_CID_ANALOG_GAIN_LCG", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 0xFF, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2775_ana_gain_l_regs), + .regs = ov2775_ana_gain_l_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_ANALOG_GAIN_VS, + .name = "CRL_CID_ANALOG_GAIN_VS", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 0xFF, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2775_ana_gain_vs_regs), + .regs = ov2775_ana_gain_vs_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_ANALOG_LINEAR_CG, + .name = "CRL_CID_ANALOG_LINEAR_CG", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 0xFF, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2775_ana_gain_linear_cg_regs), + .regs = ov2775_ana_gain_linear_cg_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_DIGITAL_GAIN_L, + .name = "CRL_CID_DIGITAL_GAIN_HCG", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1, + .data.std_data.max = 0xFFFF, + .data.std_data.step = 1, + .data.std_data.def = 0x100, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2775_digital_gain_h_regs), + .regs = ov2775_digital_gain_h_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_DIGITAL_GAIN_S, + .name = "CRL_CID_DIGITAL_GAIN_LCG", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1, + .data.std_data.max = 0xFFFF, + .data.std_data.step = 1, + .data.std_data.def = 0x100, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2775_digital_gain_l_regs), + .regs = ov2775_digital_gain_l_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_DIGITAL_GAIN_VS, + .name = "CRL_CID_DIGITAL_GAIN_VS", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1, + .data.std_data.max = 0xFFFF, + .data.std_data.step = 1, + .data.std_data.def = 0x100, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2775_digital_gain_vs_regs), + .regs = ov2775_digital_gain_vs_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_SHS1, + .name = "CRL_CID_EXPOSURE_DCG", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1, + .data.std_data.max = 0xFFFF, + .data.std_data.step = 1, + .data.std_data.def = 0x10, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2775_exposure_dcg_regs), + .regs = ov2775_exposure_dcg_regs, + .dep_items = 0, /* FLL is changes automatically */ + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_SHS2, + .name = "CRL_CID_EXPOSURE_VS", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1, + .data.std_data.max = 0xFFFF, + .data.std_data.step = 1, + .data.std_data.def = 0x02, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2775_exposure_vs_regs), + .regs = ov2775_exposure_vs_regs, + .dep_items = 0, /* FLL is changes automatically */ + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_SENSOR_MODE, + .name = "CRL_CID_SENSOR_MODE", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = OV2775_CAPTURE_MODE_MAX - 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_MODE_SELECTION, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, +}; + +static struct crl_arithmetic_ops ov2775_frame_desc_width_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .operand.entity_val = CRL_VAR_REF_OUTPUT_WIDTH, + }, +}; + +static struct crl_arithmetic_ops ov2775_frame_desc_height_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, +}; + +static struct crl_frame_desc ov2775_frame_desc[] = { + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(ov2775_frame_desc_width_ops), + .ops = ov2775_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(ov2775_frame_desc_height_ops), + .ops = ov2775_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 0, + .csi2_data_type.entity_val = 0x12, + }, + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(ov2775_frame_desc_width_ops), + .ops = ov2775_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(ov2775_frame_desc_height_ops), + .ops = ov2775_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 1, + .csi2_data_type.entity_val = 0x12, + }, + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(ov2775_frame_desc_width_ops), + .ops = ov2775_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(ov2775_frame_desc_height_ops), + .ops = ov2775_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 2, + .csi2_data_type.entity_val = 0x12, + }, +}; + +/* Power items, they are enabled in the order they are listed here */ +static struct crl_power_seq_entity ov2775_power_items[] = { + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 24000000, + .delay = 1000, + }, + { + .type = CRL_POWER_ETY_GPIO_CUSTOM, + .ent_number = 284, /* PWDN pin on CNL, 268 + 16 */ + .val = 1, + .undo_val = 1, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + .undo_val = 1, + .delay = 5000, + }, +}; + +static struct crl_sensor_configuration ov2775_crl_configuration = { + + .power_items = ARRAY_SIZE(ov2775_power_items), + .power_entities = ov2775_power_items, + + .onetime_init_regs_items = 0, + .onetime_init_regs = 0, + + .powerup_regs_items = ARRAY_SIZE(ov2775_powerup_standby_regset), + .powerup_regs = ov2775_powerup_standby_regset, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + .id_reg_items = ARRAY_SIZE(ov2775_sensor_detect_regset), + .id_regs = ov2775_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(ov2775_sensor_subdevs), + .subdevs = ov2775_sensor_subdevs, + + .sensor_limits = &ov2775_sensor_limits, + + .pll_config_items = ARRAY_SIZE(ov2775_pll_configurations), + .pll_configs = ov2775_pll_configurations, + + .modes_items = ARRAY_SIZE(ov2775_modes), + .modes = ov2775_modes, + + .streamon_regs_items = ARRAY_SIZE(ov2775_streamon_regs), + .streamon_regs = ov2775_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(ov2775_streamoff_regs), + .streamoff_regs = ov2775_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(ov2775_v4l2_ctrls), + .v4l2_ctrl_bank = ov2775_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(ov2775_crl_csi_data_fmt), + .csi_fmts = ov2775_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(ov2775_flip_configurations), + .flip_data = ov2775_flip_configurations, + + .crl_nvm_info.nvm_flags = CRL_NVM_ADDR_MODE_16BIT, + .crl_nvm_info.nvm_preop_regs_items = 0, + .crl_nvm_info.nvm_postop_regs_items = 0, + .crl_nvm_info.nvm_blobs_items = 0, + + .frame_desc_entries = ARRAY_SIZE(ov2775_frame_desc), + .frame_desc_type = CRL_V4L2_MBUS_FRAME_DESC_TYPE_CSI2, + .frame_desc = ov2775_frame_desc, +}; + +#endif /* __CRLMODULE_OV2775_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_ov495_configuration.h b/drivers/media/i2c/crlmodule/crl_ov495_configuration.h new file mode 100644 index 0000000000000..6884cf503bb4f --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_ov495_configuration.h @@ -0,0 +1,284 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2017 - 2018 Intel Corporation + * + * Author: Ying Chang + * Meng J Chen + * Zhaox Li + * + */ + +#ifndef __CRLMODULE_OV495_CONFIGURATION_H_ +#define __CRLMODULE_OV495_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +struct crl_sensor_detect_config ov495_sensor_detect_regset[] = { + { + .reg = {0x3000, CRL_REG_LEN_08BIT, 0xFF}, + .width = 8, + }, + { + .reg = {0x3001, CRL_REG_LEN_08BIT, 0xFF}, + .width = 8, + }, + { + .reg = {0x3002, CRL_REG_LEN_08BIT, 0xFF}, + .width = 8, + }, + { + .reg = {0x3003, CRL_REG_LEN_08BIT, 0xFF}, + .width = 8, + }, +}; + +static struct crl_pll_configuration ov495_pll_configurations[] = { + { + .input_clk = 27000000, + .op_sys_clk = 400000000, + .bitsperpixel = 16, + .pixel_rate_csi = 108000000, + .pixel_rate_pa = 108000000, /* pixel_rate = op_sys_clk*2 *csi_lanes/bitsperpixel */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = 0, + }, +}; + +static struct crl_subdev_rect_rep ov495_1280_1080_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 1080, + }, +}; + +static struct crl_subdev_rect_rep ov495_1920_1080_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, +}; + +static struct crl_register_write_rep ov495_1920x1080_regs[] = { + {0x3516, CRL_REG_LEN_08BIT, 0x00}, + {0x354d, CRL_REG_LEN_08BIT, 0x10}, + {0x354a, CRL_REG_LEN_08BIT, 0x1d}, + {0x0500, CRL_REG_LEN_08BIT, 0x00}, + {0x30c0, CRL_REG_LEN_08BIT, 0xe2}, + {0x0000, CRL_REG_LEN_DELAY, 0x0a}, + + {0x3516, CRL_REG_LEN_08BIT, 0x00}, + {0x354d, CRL_REG_LEN_08BIT, 0x10}, + {0x354a, CRL_REG_LEN_08BIT, 0x1d}, + {0x0500, CRL_REG_LEN_08BIT, 0x01}, + {0x30c0, CRL_REG_LEN_08BIT, 0xe2}, + {0x0000, CRL_REG_LEN_DELAY, 0x0a}, +}; + +static struct crl_register_write_rep ov495_1280x1080_regs[] = { + {0x3516, CRL_REG_LEN_08BIT, 0x00}, + {0x354d, CRL_REG_LEN_08BIT, 0x10}, + {0x354a, CRL_REG_LEN_08BIT, 0x1d}, + {0x7800, CRL_REG_LEN_08BIT, 0x00}, + {0x0500, CRL_REG_LEN_08BIT, 0x00}, + {0x0501, CRL_REG_LEN_08BIT, 0x01}, + {0x0502, CRL_REG_LEN_08BIT, 0x01}, + {0x0503, CRL_REG_LEN_08BIT, 0x40}, + {0x0504, CRL_REG_LEN_08BIT, 0x00}, + {0x0505, CRL_REG_LEN_08BIT, 0x00}, + {0x0506, CRL_REG_LEN_08BIT, 0x05}, + {0x0507, CRL_REG_LEN_08BIT, 0x00}, + {0x0508, CRL_REG_LEN_08BIT, 0x04}, + {0x0509, CRL_REG_LEN_08BIT, 0x38}, + {0x30c0, CRL_REG_LEN_08BIT, 0xc3}, + {0x0000, CRL_REG_LEN_DELAY, 0x0a}, +}; + +static struct crl_mode_rep ov495_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(ov495_1280_1080_rects), + .sd_rects = ov495_1280_1080_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 1080, + .min_llp = 2250, + .min_fll = 1320, + .mode_regs_items = ARRAY_SIZE(ov495_1280x1080_regs), + .mode_regs = ov495_1280x1080_regs, + }, + { + .sd_rects_items = ARRAY_SIZE(ov495_1920_1080_rects), + .sd_rects = ov495_1920_1080_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1080, + .min_llp = 2250, + .min_fll = 1320, + .mode_regs_items = ARRAY_SIZE(ov495_1920x1080_regs), + .mode_regs = ov495_1920x1080_regs, + }, +}; + +static struct crl_sensor_subdev_config ov495_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "ov495 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "ov495 pixel array", + } +}; + +static struct crl_sensor_limits ov495_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1920, + .y_addr_max = 1080, + .min_frame_length_lines = 240, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 320, + .max_line_length_pixels = 32752, +}; + +static struct crl_csi_data_fmt ov495_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_YUYV8_1X16, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + .bits_per_pixel = 16, + }, + { + .code = MEDIA_BUS_FMT_UYVY8_1X16, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + .bits_per_pixel = 16, + }, +}; + +static struct crl_v4l2_ctrl ov495_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +struct crl_sensor_configuration ov495_crl_configuration = { + + .subdev_items = ARRAY_SIZE(ov495_sensor_subdevs), + .subdevs = ov495_sensor_subdevs, + + .pll_config_items = ARRAY_SIZE(ov495_pll_configurations), + .pll_configs = ov495_pll_configurations, + + .id_reg_items = ARRAY_SIZE(ov495_sensor_detect_regset), + .id_regs = ov495_sensor_detect_regset, + + .sensor_limits = &ov495_sensor_limits, + + .modes_items = ARRAY_SIZE(ov495_modes), + .modes = ov495_modes, + + .streamon_regs_items = 0, + .streamon_regs = 0, + + .streamoff_regs_items = 0, + .streamoff_regs = 0, + + .v4l2_ctrls_items = ARRAY_SIZE(ov495_v4l2_ctrls), + .v4l2_ctrl_bank = ov495_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(ov495_crl_csi_data_fmt), + .csi_fmts = ov495_crl_csi_data_fmt, + +}; + +#endif /* __CRLMODULE_OV495_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_ov5670_configuration.h b/drivers/media/i2c/crlmodule/crl_ov5670_configuration.h new file mode 100644 index 0000000000000..7badb609dd45b --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_ov5670_configuration.h @@ -0,0 +1,1136 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2016 - 2018 Intel Corporation + * + * Author: Tommi Franttila + * + */ + +#ifndef __CRLMODULE_ov5670_CONFIGURATION_H_ +#define __CRLMODULE_ov5670_CONFIGURATION_H_ + +#include "crlmodule-nvm.h" +#include "crlmodule-sensor-ds.h" + +static struct crl_register_write_rep ov5670_pll_840mbps[] = { + { 0x030a, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0300, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0301, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0302, CRL_REG_LEN_08BIT, 0x78 }, + { 0x0304, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0306, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0312, CRL_REG_LEN_08BIT, 0x01 }, + { 0x030b, CRL_REG_LEN_08BIT, 0x00 }, + { 0x030c, CRL_REG_LEN_08BIT, 0x00 }, + { 0x030d, CRL_REG_LEN_08BIT, 0x1e }, + { 0x030f, CRL_REG_LEN_08BIT, 0x06 }, + { 0x030e, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep ov5670_powerup_regset[] = { + { 0x0103, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0100, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3000, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3002, CRL_REG_LEN_08BIT, 0x21 }, + { 0x3005, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x3007, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3015, CRL_REG_LEN_08BIT, 0x0f }, + { 0x3018, CRL_REG_LEN_08BIT, 0x32 }, + { 0x301a, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x301b, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x301c, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x301d, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x301e, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x3021, CRL_REG_LEN_08BIT, 0x03 }, + { 0x3030, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3031, CRL_REG_LEN_08BIT, 0x0a }, + { 0x303c, CRL_REG_LEN_08BIT, 0xff }, + { 0x303e, CRL_REG_LEN_08BIT, 0xff }, + { 0x3040, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x3041, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3042, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x3106, CRL_REG_LEN_08BIT, 0x11 }, + { 0x3500, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3502, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3503, CRL_REG_LEN_08BIT, 0x04 }, + { 0x3504, CRL_REG_LEN_08BIT, 0x03 }, + { 0x3505, CRL_REG_LEN_08BIT, 0x83 }, + { 0x3508, CRL_REG_LEN_08BIT, 0x04 }, + { 0x3509, CRL_REG_LEN_08BIT, 0x00 }, + { 0x350e, CRL_REG_LEN_08BIT, 0x04 }, + { 0x350f, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3510, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3511, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3512, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3601, CRL_REG_LEN_08BIT, 0xc8 }, + { 0x3610, CRL_REG_LEN_08BIT, 0x88 }, + { 0x3612, CRL_REG_LEN_08BIT, 0x48 }, + { 0x3614, CRL_REG_LEN_08BIT, 0x5b }, + { 0x3615, CRL_REG_LEN_08BIT, 0x96 }, + { 0x3621, CRL_REG_LEN_08BIT, 0xd0 }, + { 0x3622, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3623, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3633, CRL_REG_LEN_08BIT, 0x13 }, + { 0x3634, CRL_REG_LEN_08BIT, 0x13 }, + { 0x3635, CRL_REG_LEN_08BIT, 0x13 }, + { 0x3636, CRL_REG_LEN_08BIT, 0x13 }, + { 0x3645, CRL_REG_LEN_08BIT, 0x13 }, + { 0x3646, CRL_REG_LEN_08BIT, 0x82 }, + { 0x3650, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3652, CRL_REG_LEN_08BIT, 0xff }, + { 0x3655, CRL_REG_LEN_08BIT, 0x20 }, + { 0x3656, CRL_REG_LEN_08BIT, 0xff }, + { 0x365a, CRL_REG_LEN_08BIT, 0xff }, + { 0x365e, CRL_REG_LEN_08BIT, 0xff }, + { 0x3668, CRL_REG_LEN_08BIT, 0x00 }, + { 0x366a, CRL_REG_LEN_08BIT, 0x07 }, + { 0x366e, CRL_REG_LEN_08BIT, 0x10 }, + { 0x366d, CRL_REG_LEN_08BIT, 0x00 }, + { 0x366f, CRL_REG_LEN_08BIT, 0x80 }, + { 0x3700, CRL_REG_LEN_08BIT, 0x28 }, + { 0x3701, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3702, CRL_REG_LEN_08BIT, 0x3a }, + { 0x3703, CRL_REG_LEN_08BIT, 0x19 }, + { 0x3704, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3705, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3706, CRL_REG_LEN_08BIT, 0x66 }, + { 0x3707, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3708, CRL_REG_LEN_08BIT, 0x34 }, + { 0x3709, CRL_REG_LEN_08BIT, 0x40 }, + { 0x370a, CRL_REG_LEN_08BIT, 0x01 }, + { 0x370b, CRL_REG_LEN_08BIT, 0x1b }, + { 0x3714, CRL_REG_LEN_08BIT, 0x24 }, + { 0x371a, CRL_REG_LEN_08BIT, 0x3e }, + { 0x3733, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3734, CRL_REG_LEN_08BIT, 0x00 }, + { 0x373a, CRL_REG_LEN_08BIT, 0x05 }, + { 0x373b, CRL_REG_LEN_08BIT, 0x06 }, + { 0x373c, CRL_REG_LEN_08BIT, 0x0a }, + { 0x373f, CRL_REG_LEN_08BIT, 0xa0 }, + { 0x3755, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3758, CRL_REG_LEN_08BIT, 0x00 }, + { 0x375b, CRL_REG_LEN_08BIT, 0x0e }, + { 0x3766, CRL_REG_LEN_08BIT, 0x5f }, + { 0x3768, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3769, CRL_REG_LEN_08BIT, 0x22 }, + { 0x3773, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3774, CRL_REG_LEN_08BIT, 0x1f }, + { 0x3776, CRL_REG_LEN_08BIT, 0x06 }, + { 0x37a0, CRL_REG_LEN_08BIT, 0x88 }, + { 0x37a1, CRL_REG_LEN_08BIT, 0x5c }, + { 0x37a7, CRL_REG_LEN_08BIT, 0x88 }, + { 0x37a8, CRL_REG_LEN_08BIT, 0x70 }, + { 0x37aa, CRL_REG_LEN_08BIT, 0x88 }, + { 0x37ab, CRL_REG_LEN_08BIT, 0x48 }, + { 0x37b3, CRL_REG_LEN_08BIT, 0x66 }, + { 0x37c2, CRL_REG_LEN_08BIT, 0x04 }, + { 0x37c5, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37c8, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3800, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3801, CRL_REG_LEN_08BIT, 0x0c }, + { 0x3802, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3803, CRL_REG_LEN_08BIT, 0x04 }, + { 0x3804, CRL_REG_LEN_08BIT, 0x0a }, + { 0x3805, CRL_REG_LEN_08BIT, 0x33 }, + { 0x3806, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3807, CRL_REG_LEN_08BIT, 0xa3 }, + { 0x3811, CRL_REG_LEN_08BIT, 0x04 }, + { 0x3813, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3815, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3816, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3817, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3818, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3819, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3820, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3822, CRL_REG_LEN_08BIT, 0x48 }, + { 0x3826, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3827, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3830, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3836, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3837, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3838, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3841, CRL_REG_LEN_08BIT, 0xff }, /* Auto size function enabled */ + { 0x3846, CRL_REG_LEN_08BIT, 0x48 }, + { 0x3861, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3862, CRL_REG_LEN_08BIT, 0x04 }, + { 0x3863, CRL_REG_LEN_08BIT, 0x06 }, + { 0x3a11, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3a12, CRL_REG_LEN_08BIT, 0x78 }, + { 0x3b00, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3b02, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3b03, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3b04, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3b05, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3c00, CRL_REG_LEN_08BIT, 0x89 }, + { 0x3c01, CRL_REG_LEN_08BIT, 0xab }, + { 0x3c02, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3c03, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3c04, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3c05, CRL_REG_LEN_08BIT, 0x03 }, + { 0x3c06, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3c07, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3c0c, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3c0d, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3c0e, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3c0f, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3c40, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3c41, CRL_REG_LEN_08BIT, 0xa3 }, + { 0x3c43, CRL_REG_LEN_08BIT, 0x7d }, + { 0x3c45, CRL_REG_LEN_08BIT, 0xd7 }, + { 0x3c47, CRL_REG_LEN_08BIT, 0xfc }, + { 0x3c50, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3c52, CRL_REG_LEN_08BIT, 0xaa }, + { 0x3c54, CRL_REG_LEN_08BIT, 0x71 }, + { 0x3c56, CRL_REG_LEN_08BIT, 0x80 }, + { 0x3d85, CRL_REG_LEN_08BIT, 0x17 }, + { 0x3d8d, CRL_REG_LEN_08BIT, 0xea }, + { 0x3f03, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3f0a, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3f0b, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4001, CRL_REG_LEN_08BIT, 0x60 }, + { 0x4009, CRL_REG_LEN_08BIT, 0x0d }, + { 0x4017, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4020, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4021, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4022, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4023, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4024, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4025, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4026, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4027, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4028, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4029, CRL_REG_LEN_08BIT, 0x00 }, + { 0x402a, CRL_REG_LEN_08BIT, 0x00 }, + { 0x402b, CRL_REG_LEN_08BIT, 0x00 }, + { 0x402c, CRL_REG_LEN_08BIT, 0x00 }, + { 0x402d, CRL_REG_LEN_08BIT, 0x00 }, + { 0x402e, CRL_REG_LEN_08BIT, 0x00 }, + { 0x402f, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4040, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4041, CRL_REG_LEN_08BIT, 0x03 }, + { 0x4042, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4043, CRL_REG_LEN_08BIT, 0x7A }, + { 0x4044, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4045, CRL_REG_LEN_08BIT, 0x7A }, + { 0x4046, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4047, CRL_REG_LEN_08BIT, 0x7A }, + { 0x4048, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4049, CRL_REG_LEN_08BIT, 0x7A }, + { 0x4303, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4307, CRL_REG_LEN_08BIT, 0x30 }, + { 0x4500, CRL_REG_LEN_08BIT, 0x58 }, + { 0x4501, CRL_REG_LEN_08BIT, 0x04 }, + { 0x4502, CRL_REG_LEN_08BIT, 0x40 }, + { 0x4503, CRL_REG_LEN_08BIT, 0x10 }, + { 0x4508, CRL_REG_LEN_08BIT, 0xaa }, + { 0x4509, CRL_REG_LEN_08BIT, 0xaa }, + { 0x450a, CRL_REG_LEN_08BIT, 0x00 }, + { 0x450b, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4700, CRL_REG_LEN_08BIT, 0xa4 }, + { 0x4800, CRL_REG_LEN_08BIT, 0x4c }, + { 0x4816, CRL_REG_LEN_08BIT, 0x53 }, + { 0x481f, CRL_REG_LEN_08BIT, 0x40 }, + { 0x4837, CRL_REG_LEN_08BIT, 0x13 }, + { 0x5000, CRL_REG_LEN_08BIT, 0x56 }, + { 0x5001, CRL_REG_LEN_08BIT, 0x01 }, + { 0x5002, CRL_REG_LEN_08BIT, 0x28 }, + { 0x5004, CRL_REG_LEN_08BIT, 0x0c }, + { 0x5006, CRL_REG_LEN_08BIT, 0x0c }, + { 0x5007, CRL_REG_LEN_08BIT, 0xe0 }, + { 0x5008, CRL_REG_LEN_08BIT, 0x01 }, + { 0x5009, CRL_REG_LEN_08BIT, 0xb0 }, + { 0x5901, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5a01, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5a03, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5a04, CRL_REG_LEN_08BIT, 0x0c }, + { 0x5a05, CRL_REG_LEN_08BIT, 0xe0 }, + { 0x5a06, CRL_REG_LEN_08BIT, 0x09 }, + { 0x5a07, CRL_REG_LEN_08BIT, 0xb0 }, + { 0x5a08, CRL_REG_LEN_08BIT, 0x06 }, + { 0x5e00, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3734, CRL_REG_LEN_08BIT, 0x40 }, + { 0x5b00, CRL_REG_LEN_08BIT, 0x01 }, + { 0x5b01, CRL_REG_LEN_08BIT, 0x10 }, + { 0x5b02, CRL_REG_LEN_08BIT, 0x01 }, + { 0x5b03, CRL_REG_LEN_08BIT, 0xdb }, + { 0x3d8c, CRL_REG_LEN_08BIT, 0x71 }, + { 0x370b, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3618, CRL_REG_LEN_08BIT, 0x2a }, + { 0x5780, CRL_REG_LEN_08BIT, 0x3e }, + { 0x5781, CRL_REG_LEN_08BIT, 0x0f }, + { 0x5782, CRL_REG_LEN_08BIT, 0x44 }, + { 0x5783, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5784, CRL_REG_LEN_08BIT, 0x01 }, + { 0x5785, CRL_REG_LEN_08BIT, 0x01 }, + { 0x5786, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5787, CRL_REG_LEN_08BIT, 0x04 }, + { 0x5788, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5789, CRL_REG_LEN_08BIT, 0x0f }, + { 0x578a, CRL_REG_LEN_08BIT, 0xfd }, + { 0x578b, CRL_REG_LEN_08BIT, 0xf5 }, + { 0x578c, CRL_REG_LEN_08BIT, 0xf5 }, + { 0x578d, CRL_REG_LEN_08BIT, 0x03 }, + { 0x578e, CRL_REG_LEN_08BIT, 0x08 }, + { 0x578f, CRL_REG_LEN_08BIT, 0x0c }, + { 0x5790, CRL_REG_LEN_08BIT, 0x08 }, + { 0x5791, CRL_REG_LEN_08BIT, 0x06 }, + { 0x5792, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5793, CRL_REG_LEN_08BIT, 0x52 }, + { 0x5794, CRL_REG_LEN_08BIT, 0xa3 }, + { 0x3503, CRL_REG_LEN_08BIT, 0x00 }, + { 0x380e, CRL_REG_LEN_08BIT, 0x04 }, + { 0x380f, CRL_REG_LEN_08BIT, 0x60 }, + { 0x3002, CRL_REG_LEN_08BIT, 0x61 }, + { 0x3010, CRL_REG_LEN_08BIT, 0x40 }, + { 0x300D, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5045, CRL_REG_LEN_08BIT, 0x05 }, + { 0x5048, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3610, CRL_REG_LEN_08BIT, 0xa8 }, + { 0x3733, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3734, CRL_REG_LEN_08BIT, 0x40 }, +}; + +static struct crl_register_write_rep ov5670_mode_1944[] = { + /* Auto size function in use, but no cropping in this mode */ + { 0x3808, CRL_REG_LEN_08BIT, 0x0a }, + { 0x3809, CRL_REG_LEN_08BIT, 0x20 }, + { 0x380a, CRL_REG_LEN_08BIT, 0x07 }, + { 0x380b, CRL_REG_LEN_08BIT, 0x98 }, + { 0x3821, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x01 }, + { 0x4601, CRL_REG_LEN_08BIT, 0x03 }, +}; + +static struct crl_register_write_rep ov5670_mode_1940[] = { + /* Auto size function in use, cropping from the centre of the image */ + { 0x3808, CRL_REG_LEN_08BIT, 0x0a }, + { 0x3809, CRL_REG_LEN_08BIT, 0x00 }, + { 0x380a, CRL_REG_LEN_08BIT, 0x07 }, + { 0x380b, CRL_REG_LEN_08BIT, 0x94 }, + { 0x3821, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x01 }, + { 0x4601, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep ov5670_mode_1458[] = { + /* Auto size function in use, cropping from the centre of the image */ + { 0x3808, CRL_REG_LEN_08BIT, 0x0a }, + { 0x3809, CRL_REG_LEN_08BIT, 0x20 }, + { 0x380a, CRL_REG_LEN_08BIT, 0x05 }, + { 0x380b, CRL_REG_LEN_08BIT, 0xB2 }, + { 0x3821, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x01 }, + { 0x4601, CRL_REG_LEN_08BIT, 0x03 }, +}; + +static struct crl_register_write_rep ov5670_mode_1456[] = { + /* Auto size function in use, cropping from the centre of the image */ + { 0x3808, CRL_REG_LEN_08BIT, 0x0a }, + { 0x3809, CRL_REG_LEN_08BIT, 0x00 }, + { 0x380a, CRL_REG_LEN_08BIT, 0x05 }, + { 0x380b, CRL_REG_LEN_08BIT, 0xB0 }, + { 0x3821, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x01 }, + { 0x4601, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep ov5670_mode_1152[] = { + /* Auto size function in use, cropping from the centre of the image */ + { 0x3808, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3809, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x380a, CRL_REG_LEN_08BIT, 0x04 }, + { 0x380b, CRL_REG_LEN_08BIT, 0x80 }, + { 0x3821, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4601, CRL_REG_LEN_08BIT, 0xc6 }, +}; + +static struct crl_register_write_rep ov5670_mode_1080[] = { + /* Auto size function in use, cropping from the centre of the image */ + { 0x3808, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3809, CRL_REG_LEN_08BIT, 0x80 }, + { 0x380a, CRL_REG_LEN_08BIT, 0x04 }, + { 0x380b, CRL_REG_LEN_08BIT, 0x38 }, + { 0x3821, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4601, CRL_REG_LEN_08BIT, 0xc0 }, +}; + +static struct crl_register_write_rep ov5670_streamon_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x01 } +}; + +static struct crl_register_write_rep ov5670_streamoff_regs[] = { + /* MIPI stream off when current frame finish */ + { 0x4202, CRL_REG_LEN_08BIT, 0x0f }, + /* Wait to finish the current frame */ + { 0x0000, CRL_REG_LEN_DELAY, 0x40 }, + /* Sensor to standby */ + { 0x0100, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep ov5670_data_fmt_width10[] = { + { 0x3031, CRL_REG_LEN_08BIT, 0x0a } +}; + +static struct crl_arithmetic_ops ov5670_vflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_arithmetic_ops ov5670_swap_flip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 5, + }, +}; + +static struct crl_arithmetic_ops ov5670_hflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_arithmetic_ops ov5670_hblank_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_arithmetic_ops ov5670_exposure_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 4, + }, +}; + +static struct crl_dynamic_register_access ov5670_v_flip_regs[] = { + { + .address = 0x3820, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov5670_vflip_ops), + .ops = ov5670_vflip_ops, + .mask = 0x2, + }, + { + .address = 0x450B, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov5670_swap_flip_ops), + .ops = ov5670_swap_flip_ops, + .mask = 0x20, + }, +}; + +static struct crl_dynamic_register_access ov5670_h_flip_regs[] = { + { + .address = 0x3821, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov5670_hflip_ops), + .ops = ov5670_hflip_ops, + .mask = 0x2, + }, +}; + +struct crl_register_write_rep ov5670_poweroff_regset[] = { + { 0x0103, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_dynamic_register_access ov5670_ana_gain_global_regs[] = { + { + .address = 0x3508, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0x7ff, + }, +}; + +static struct crl_dynamic_register_access ov5670_exposure_regs[] = { + { + .address = 0x3500, + .len = CRL_REG_LEN_24BIT, + .ops_items = ARRAY_SIZE(ov5670_exposure_ops), + .ops = ov5670_exposure_ops, + .mask = 0x0ffff0, + }, +}; + +static struct crl_dynamic_register_access ov5670_vblank_regs[] = { + { + .address = 0x380E, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access ov5670_hblank_regs[] = { + { + .address = 0x380C, + .len = CRL_REG_LEN_16BIT, + .ops_items = ARRAY_SIZE(ov5670_hblank_ops), + .ops = ov5670_hblank_ops, + .mask = 0xffff, + }, +}; + +static struct crl_sensor_detect_config ov5670_sensor_detect_regset[] = { + { + .reg = { 0x300B, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, + { + .reg = { 0x300C, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, +}; + +static const s64 ov5670_op_sys_clock[] = { 420000000, }; + +static struct crl_pll_configuration ov5670_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 420000000, + .bitsperpixel = 10, + .pixel_rate_csi = 240000000, + .pixel_rate_pa = 199180800, + .csi_lanes = 2, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(ov5670_pll_840mbps), + .pll_regs = ov5670_pll_840mbps, + }, +}; + +static struct crl_subdev_rect_rep ov5670_1944_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 2592, 1944 }, + .out_rect = { 0, 0, 2592, 1944 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 2592, 1944 }, + .out_rect = { 0, 0, 2592, 1944 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 2592, 1944 }, + .out_rect = { 0, 0, 2592, 1944 }, + }, +}; + +static struct crl_subdev_rect_rep ov5670_1940_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 2592, 1944 }, + .out_rect = { 16, 2, 2560, 1940 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 2560, 1940 }, + .out_rect = { 0, 0, 2560, 1940 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 2560, 1940 }, + .out_rect = { 0, 0, 2560, 1940 }, + }, +}; + +static struct crl_subdev_rect_rep ov5670_1458_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 2592, 1944 }, + .out_rect = { 0, 244, 2592, 1458 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 2592, 1458 }, + .out_rect = { 0, 0, 2592, 1458 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 2592, 1458 }, + .out_rect = { 0, 0, 2592, 1458 }, + }, +}; + +static struct crl_subdev_rect_rep ov5670_1456_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 2592, 1944 }, + .out_rect = { 16, 244, 2560, 1456 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 2560, 1456 }, + .out_rect = { 0, 0, 2560, 1456 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 2560, 1456 }, + .out_rect = { 0, 0, 2560, 1456 }, + }, +}; + +static struct crl_subdev_rect_rep ov5670_1152_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 2592, 1944 }, + .out_rect = { 304, 396, 1984, 1152 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 1984, 1152 }, + .out_rect = { 0, 0, 1984, 1152 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 1984, 1152 }, + .out_rect = { 0, 0, 1984, 1152 }, + }, +}; + +static struct crl_subdev_rect_rep ov5670_1080_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 2592, 1944 }, + .out_rect = { 336, 432, 1920, 1080 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 1920, 1080 }, + .out_rect = { 0, 0, 1920, 1080 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 1920, 1080 }, + .out_rect = { 0, 0, 1920, 1080 }, + }, +}; + +static struct crl_mode_rep ov5670_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(ov5670_1944_rects), + .sd_rects = ov5670_1944_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 2592, + .height = 1944, + .min_llp = 3360, + .min_fll = 1976, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov5670_mode_1944), + .mode_regs = ov5670_mode_1944, + }, + { + .sd_rects_items = ARRAY_SIZE(ov5670_1940_rects), + .sd_rects = ov5670_1940_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 2560, + .height = 1940, + .min_llp = 3366, + .min_fll = 1972, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov5670_mode_1940), + .mode_regs = ov5670_mode_1940, + }, + { + .sd_rects_items = ARRAY_SIZE(ov5670_1458_rects), + .sd_rects = ov5670_1458_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 2592, + .height = 1458, + .min_llp = 4455, + .min_fll = 1490, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov5670_mode_1458), + .mode_regs = ov5670_mode_1458, + }, + { + .sd_rects_items = ARRAY_SIZE(ov5670_1456_rects), + .sd_rects = ov5670_1456_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 2560, + .height = 1456, + .min_llp = 4461, + .min_fll = 1488, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov5670_mode_1456), + .mode_regs = ov5670_mode_1456, + }, + + { + .sd_rects_items = ARRAY_SIZE(ov5670_1152_rects), + .sd_rects = ov5670_1152_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1984, + .height = 1152, + .min_llp = 2803, + .min_fll = 1184, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov5670_mode_1152), + .mode_regs = ov5670_mode_1152, + }, + { + .sd_rects_items = ARRAY_SIZE(ov5670_1080_rects), + .sd_rects = ov5670_1080_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1080, + .min_llp = 2985, + .min_fll = 1112, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov5670_mode_1080), + .mode_regs = ov5670_mode_1080, + }, +}; + +static struct crl_sensor_subdev_config ov5670_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .name = "ov5670 scaler", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "ov5670 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "ov5670 pixel array", + }, +}; + +static struct crl_sensor_limits ov5670_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 2592, + .y_addr_max = 1944, + .min_frame_length_lines = 160, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 2700, + .max_line_length_pixels = 32752, + .scaler_m_min = 16, + .scaler_m_max = 255, + .scaler_n_min = 16, + .scaler_n_max = 16, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 3, +}; + +static struct crl_flip_data ov5670_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, +}; + +static struct crl_csi_data_fmt ov5670_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = 1, + .regs = ov5670_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = ov5670_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = ov5670_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = ov5670_data_fmt_width10, + }, +}; + +static struct crl_v4l2_ctrl ov5670_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_SCALER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = + ARRAY_SIZE(ov5670_pll_configurations) - 1, + .data.v4l2_int_menu.menu = ov5670_op_sys_clock, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_SCALER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 4096, + .data.std_data.step = 1, + .data.std_data.def = 128, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov5670_ana_gain_global_regs), + .regs = ov5670_ana_gain_global_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "V4L2_CID_EXPOSURE", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov5670_exposure_regs), + .regs = ov5670_exposure_regs, + .dep_items = 0, /* FLL is changed automatically */ + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov5670_h_flip_regs), + .regs = ov5670_h_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .name = "V4L2_CID_VFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov5670_v_flip_regs), + .regs = ov5670_v_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame Length Lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 160, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 2474, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov5670_vblank_regs), + .regs = ov5670_vblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1024, + .data.std_data.max = 65520, + .data.std_data.step = 1, + .data.std_data.def = 3880, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov5670_hblank_regs), + .regs = ov5670_hblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, +}; + +#define ov5670_OTP_START_ADDR 0x7010 +#define ov5670_OTP_END_ADDR 0x7063 + +#define ov5670_OTP_LEN (ov5670_OTP_END_ADDR - ov5670_OTP_START_ADDR + 1) +#define ov5670_OTP_L_ADDR(x) (x & 0xff) +#define ov5670_OTP_H_ADDR(x) ((x >> 8) & 0xff) + +static struct crl_register_write_rep ov5670_nvm_preop_regset[] = { + /* Start streaming */ + { 0x0100, CRL_REG_LEN_08BIT, 0x01 }, + /* Manual mode, program disable */ + { 0x3D84, CRL_REG_LEN_08BIT, 0xC0 }, + /* Manual OTP start address for access */ + { 0x3D88, CRL_REG_LEN_08BIT, ov5670_OTP_H_ADDR(ov5670_OTP_START_ADDR)}, + { 0x3D89, CRL_REG_LEN_08BIT, ov5670_OTP_L_ADDR(ov5670_OTP_START_ADDR)}, + /* Manual OTP end address for access */ + { 0x3D8A, CRL_REG_LEN_08BIT, ov5670_OTP_H_ADDR(ov5670_OTP_END_ADDR)}, + { 0x3D8B, CRL_REG_LEN_08BIT, ov5670_OTP_L_ADDR(ov5670_OTP_END_ADDR)}, + /* OTP load enable */ + { 0x3D81, CRL_REG_LEN_08BIT, 0x01 }, + /* Wait for the data to load into the buffer */ + { 0x0000, CRL_REG_LEN_DELAY, 0x05 }, +}; + +static struct crl_register_write_rep ov5670_nvm_postop_regset[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x00 }, /* Stop streaming */ +}; + +static struct crl_nvm_blob ov5670_nvm_blobs[] = { + {CRL_I2C_ADDRESS_NO_OVERRIDE, ov5670_OTP_START_ADDR, ov5670_OTP_LEN }, +}; + +static struct crl_arithmetic_ops ov5670_frame_desc_width_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .operand.entity_val = CRL_VAR_REF_OUTPUT_WIDTH, + }, +}; + +static struct crl_arithmetic_ops ov5670_frame_desc_height_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, +}; + +static struct crl_frame_desc ov5670_frame_desc[] = { + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(ov5670_frame_desc_width_ops), + .ops = ov5670_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(ov5670_frame_desc_height_ops), + .ops = ov5670_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 0, + .csi2_data_type.entity_val = 0x12, + }, +}; + +/* Power items, they are enabled in the order they are listed here */ +static const struct crl_power_seq_entity ov5670_power_items[] = { + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VANA", + .val = 2800000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VDIG", + .val = 1200000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VIO", + .val = 1800000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VAF", + .val = 3000000, + .delay = 2000, + }, + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 24000000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + .delay = 10700, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA_BY_NUMBER, + }, + +}; + +static struct crl_sensor_configuration ov5670_crl_configuration = { + + .power_items = ARRAY_SIZE(ov5670_power_items), + .power_entities = ov5670_power_items, + + .powerup_regs_items = ARRAY_SIZE(ov5670_powerup_regset), + .powerup_regs = ov5670_powerup_regset, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + .id_reg_items = ARRAY_SIZE(ov5670_sensor_detect_regset), + .id_regs = ov5670_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(ov5670_sensor_subdevs), + .subdevs = ov5670_sensor_subdevs, + + .sensor_limits = &ov5670_sensor_limits, + + .pll_config_items = ARRAY_SIZE(ov5670_pll_configurations), + .pll_configs = ov5670_pll_configurations, + + .modes_items = ARRAY_SIZE(ov5670_modes), + .modes = ov5670_modes, + + .streamon_regs_items = ARRAY_SIZE(ov5670_streamon_regs), + .streamon_regs = ov5670_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(ov5670_streamoff_regs), + .streamoff_regs = ov5670_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(ov5670_v4l2_ctrls), + .v4l2_ctrl_bank = ov5670_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(ov5670_crl_csi_data_fmt), + .csi_fmts = ov5670_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(ov5670_flip_configurations), + .flip_data = ov5670_flip_configurations, + + .crl_nvm_info.nvm_flags = CRL_NVM_ADDR_MODE_16BIT, + .crl_nvm_info.nvm_preop_regs_items = + ARRAY_SIZE(ov5670_nvm_preop_regset), + .crl_nvm_info.nvm_preop_regs = ov5670_nvm_preop_regset, + .crl_nvm_info.nvm_postop_regs_items = + ARRAY_SIZE(ov5670_nvm_postop_regset), + .crl_nvm_info.nvm_postop_regs = ov5670_nvm_postop_regset, + .crl_nvm_info.nvm_blobs_items = ARRAY_SIZE(ov5670_nvm_blobs), + .crl_nvm_info.nvm_config = ov5670_nvm_blobs, + + .frame_desc_entries = ARRAY_SIZE(ov5670_frame_desc), + .frame_desc_type = CRL_V4L2_MBUS_FRAME_DESC_TYPE_CSI2, + .frame_desc = ov5670_frame_desc, +}; + +#endif /* __CRLMODULE_ov5670_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_ov8858_configuration.h b/drivers/media/i2c/crlmodule/crl_ov8858_configuration.h new file mode 100644 index 0000000000000..63faedcf85ce4 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_ov8858_configuration.h @@ -0,0 +1,1429 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2014 - 2018 Intel Corporation + * + * Author: Vinod Govindapillai + * + */ + +#ifndef __CRLMODULE_OV8858_CONFIGURATION_H_ +#define __CRLMODULE_OV8858_CONFIGURATION_H_ + +#include "crlmodule-nvm.h" +#include "crlmodule-sensor-ds.h" + +static struct crl_register_write_rep ov8858_pll_360mbps[] = { + { 0x0300, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0301, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0302, CRL_REG_LEN_08BIT, 0x1e },/* pll1_multiplier = 30 */ + { 0x0303, CRL_REG_LEN_08BIT, 0x00 },/* pll1_divm = /(1 + 0) */ + { 0x0304, CRL_REG_LEN_08BIT, 0x03 },/* pll1_div_mipi = /8 */ + { 0x0305, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0306, CRL_REG_LEN_08BIT, 0x01 }, + { 0x030A, CRL_REG_LEN_08BIT, 0x02 }, + { 0x030B, CRL_REG_LEN_08BIT, 0x01 },/* pll2_pre_div = /2 */ + { 0x030c, CRL_REG_LEN_08BIT, 0x00 }, + { 0x030D, CRL_REG_LEN_08BIT, 0x44 },/* pll2_r_divp = 30 */ + { 0x030E, CRL_REG_LEN_08BIT, 0x01 },/* pll2_r_divs = /2 */ + { 0x030F, CRL_REG_LEN_08BIT, 0x04 },/* pll2_r_divsp = /(1 + 4) */ + /* pll2_pre_div0 = /1, pll2_r_divdac = /(1 + 1) */ + { 0x0312, CRL_REG_LEN_08BIT, 0x02 }, + /* mipi_lane_mode = 1+3, mipi_lvds_sel = 1 = MIPI enable, + * r_phy_pd_mipi_man = 0, lane_dis_option = 0 + */ + { 0x3018, CRL_REG_LEN_08BIT, 0x72 }, +}; + + +static struct crl_register_write_rep ov8858_powerup_regset[] = { + /*Reset*/ + { 0x0103, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0100, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3007, CRL_REG_LEN_08BIT, 0x80 }, + /* Npump clock div = /2, Ppump clock div = /4 */ + { 0x3015, CRL_REG_LEN_08BIT, 0x01 }, + /* Clock switch output = normal, pclk_div = /1 */ + { 0x3020, CRL_REG_LEN_08BIT, 0x93 }, + { 0x3031, CRL_REG_LEN_08BIT, 0x0a }, + { 0x3032, CRL_REG_LEN_08BIT, 0x80 }, + { 0x3022, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3034, CRL_REG_LEN_08BIT, 0x00 }, + /* sclk_div = /1, sclk_pre_div = /1, chip debug = 1 */ + { 0x3106, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3305, CRL_REG_LEN_08BIT, 0xF1 }, + { 0x3307, CRL_REG_LEN_08BIT, 0x04 }, + { 0x3308, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3309, CRL_REG_LEN_08BIT, 0x28 }, + { 0x330A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x330B, CRL_REG_LEN_08BIT, 0x20 }, + { 0x330C, CRL_REG_LEN_08BIT, 0x00 }, + { 0x330D, CRL_REG_LEN_08BIT, 0x00 }, + { 0x330E, CRL_REG_LEN_08BIT, 0x00 }, + { 0x330F, CRL_REG_LEN_08BIT, 0x40 }, + /* + * Digital fraction gain delay option = Delay 1 frame, + * Gain change delay option = Delay 1 frame, + * Gain delay option = Delay 1 frame, + * Gain manual as sensor gain = Input gain as real gain format, + * Exposure delay option (must be 0 = Delay 1 frame, + * Exposure change delay option (must be 0) = Delay 1 frame + */ + { 0x3503, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3505, CRL_REG_LEN_08BIT, 0x80 },/* gain conversation option */ + /* + * [10:7] are integer gain, [6:0] are fraction gain. For example: + * 0x80 is 1x gain, CRL_REG_LEN_08BIT, 0x100 is 2x gain, + * CRL_REG_LEN_08BIT, 0x1C0 is 3.5x gain + */ + { 0x3508, CRL_REG_LEN_08BIT, 0x02 },/* long gain = 0x0200 */ + { 0x3509, CRL_REG_LEN_08BIT, 0x00 },/* long gain = 0x0200 */ + { 0x350C, CRL_REG_LEN_08BIT, 0x00 },/* short gain = 0x0080 */ + { 0x350D, CRL_REG_LEN_08BIT, 0x80 },/* short gain = 0x0080 */ + { 0x3510, CRL_REG_LEN_08BIT, 0x00 },/* short exposure = 0x000200 */ + { 0x3511, CRL_REG_LEN_08BIT, 0x02 },/* short exposure = 0x000200 */ + { 0x3512, CRL_REG_LEN_08BIT, 0x00 },/* short exposure = 0x000200 */ + { 0x3600, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3601, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3602, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3603, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3604, CRL_REG_LEN_08BIT, 0x22 }, + { 0x3605, CRL_REG_LEN_08BIT, 0x30 }, + { 0x3606, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3607, CRL_REG_LEN_08BIT, 0x20 }, + { 0x3608, CRL_REG_LEN_08BIT, 0x11 }, + { 0x3609, CRL_REG_LEN_08BIT, 0x28 }, + { 0x360A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x360B, CRL_REG_LEN_08BIT, 0x06 }, + { 0x360C, CRL_REG_LEN_08BIT, 0xD4 }, + { 0x360D, CRL_REG_LEN_08BIT, 0x40 }, + { 0x360E, CRL_REG_LEN_08BIT, 0x0C }, + { 0x360F, CRL_REG_LEN_08BIT, 0x20 }, + { 0x3610, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3611, CRL_REG_LEN_08BIT, 0x20 }, + { 0x3612, CRL_REG_LEN_08BIT, 0x88 }, + { 0x3613, CRL_REG_LEN_08BIT, 0x80 }, + { 0x3614, CRL_REG_LEN_08BIT, 0x58 }, + { 0x3615, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3616, CRL_REG_LEN_08BIT, 0x4A }, + { 0x3617, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3618, CRL_REG_LEN_08BIT, 0x5a }, + { 0x3619, CRL_REG_LEN_08BIT, 0x70 }, + { 0x361A, CRL_REG_LEN_08BIT, 0x99 }, + { 0x361B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x361C, CRL_REG_LEN_08BIT, 0x07 }, + { 0x361D, CRL_REG_LEN_08BIT, 0x00 }, + { 0x361E, CRL_REG_LEN_08BIT, 0x00 }, + { 0x361F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3638, CRL_REG_LEN_08BIT, 0xFF }, + { 0x3633, CRL_REG_LEN_08BIT, 0x0f }, + { 0x3634, CRL_REG_LEN_08BIT, 0x0f }, + { 0x3635, CRL_REG_LEN_08BIT, 0x0f }, + { 0x3636, CRL_REG_LEN_08BIT, 0x12 }, + { 0x3645, CRL_REG_LEN_08BIT, 0x13 }, + { 0x3646, CRL_REG_LEN_08BIT, 0x83 }, + { 0x364A, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3700, CRL_REG_LEN_08BIT, 0x30 }, + { 0x3701, CRL_REG_LEN_08BIT, 0x18 }, + { 0x3702, CRL_REG_LEN_08BIT, 0x50 }, + { 0x3703, CRL_REG_LEN_08BIT, 0x32 }, + { 0x3704, CRL_REG_LEN_08BIT, 0x28 }, + { 0x3705, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3706, CRL_REG_LEN_08BIT, 0x82 }, + { 0x3707, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3708, CRL_REG_LEN_08BIT, 0x48 }, + { 0x3709, CRL_REG_LEN_08BIT, 0x66 }, + { 0x370A, CRL_REG_LEN_08BIT, 0x01 }, + { 0x370B, CRL_REG_LEN_08BIT, 0x82 }, + { 0x370C, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3712, CRL_REG_LEN_08BIT, 0x44 }, + { 0x3714, CRL_REG_LEN_08BIT, 0x24 }, + { 0x3718, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3719, CRL_REG_LEN_08BIT, 0x31 }, + { 0x371E, CRL_REG_LEN_08BIT, 0x31 }, + { 0x371F, CRL_REG_LEN_08BIT, 0x7F }, + { 0x3720, CRL_REG_LEN_08BIT, 0x0A }, + { 0x3721, CRL_REG_LEN_08BIT, 0x0A }, + { 0x3724, CRL_REG_LEN_08BIT, 0x0C }, + { 0x3725, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3726, CRL_REG_LEN_08BIT, 0x0C }, + { 0x3728, CRL_REG_LEN_08BIT, 0x0A }, + { 0x3729, CRL_REG_LEN_08BIT, 0x03 }, + { 0x372A, CRL_REG_LEN_08BIT, 0x06 }, + { 0x372B, CRL_REG_LEN_08BIT, 0xA6 }, + { 0x372C, CRL_REG_LEN_08BIT, 0xA6 }, + { 0x372D, CRL_REG_LEN_08BIT, 0xA6 }, + { 0x372E, CRL_REG_LEN_08BIT, 0x0C }, + { 0x372F, CRL_REG_LEN_08BIT, 0x20 }, + { 0x3730, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3731, CRL_REG_LEN_08BIT, 0x0C }, + { 0x3732, CRL_REG_LEN_08BIT, 0x28 }, + { 0x3733, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3734, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3736, CRL_REG_LEN_08BIT, 0x30 }, + { 0x373A, CRL_REG_LEN_08BIT, 0x0A }, + { 0x373B, CRL_REG_LEN_08BIT, 0x0B }, + { 0x373C, CRL_REG_LEN_08BIT, 0x14 }, + { 0x373E, CRL_REG_LEN_08BIT, 0x06 }, + { 0x3750, CRL_REG_LEN_08BIT, 0x0a }, + { 0x3751, CRL_REG_LEN_08BIT, 0x0e }, + { 0x3755, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3758, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3759, CRL_REG_LEN_08BIT, 0x4C }, + { 0x375A, CRL_REG_LEN_08BIT, 0x0C }, + { 0x375B, CRL_REG_LEN_08BIT, 0x26 }, + { 0x375C, CRL_REG_LEN_08BIT, 0x20 }, + { 0x375D, CRL_REG_LEN_08BIT, 0x04 }, + { 0x375E, CRL_REG_LEN_08BIT, 0x00 }, + { 0x375F, CRL_REG_LEN_08BIT, 0x28 }, + { 0x3760, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3761, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3762, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3763, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3766, CRL_REG_LEN_08BIT, 0xFF }, + { 0x376B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3772, CRL_REG_LEN_08BIT, 0x46 }, + { 0x3773, CRL_REG_LEN_08BIT, 0x04 }, + { 0x3774, CRL_REG_LEN_08BIT, 0x2C }, + { 0x3775, CRL_REG_LEN_08BIT, 0x13 }, + { 0x3776, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3777, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3778, CRL_REG_LEN_08BIT, 0x17 }, + { 0x37A0, CRL_REG_LEN_08BIT, 0x88 }, + { 0x37A1, CRL_REG_LEN_08BIT, 0x7A }, + { 0x37A2, CRL_REG_LEN_08BIT, 0x7A }, + { 0x37A3, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37A4, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37A5, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37A6, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37A7, CRL_REG_LEN_08BIT, 0x88 }, + { 0x37A8, CRL_REG_LEN_08BIT, 0x98 }, + { 0x37A9, CRL_REG_LEN_08BIT, 0x98 }, + { 0x37AA, CRL_REG_LEN_08BIT, 0x88 }, + { 0x37AB, CRL_REG_LEN_08BIT, 0x5C }, + { 0x37AC, CRL_REG_LEN_08BIT, 0x5C }, + { 0x37AD, CRL_REG_LEN_08BIT, 0x55 }, + { 0x37AE, CRL_REG_LEN_08BIT, 0x19 }, + { 0x37AF, CRL_REG_LEN_08BIT, 0x19 }, + { 0x37B0, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37B1, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37B2, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37B3, CRL_REG_LEN_08BIT, 0x84 }, + { 0x37B4, CRL_REG_LEN_08BIT, 0x84 }, + { 0x37B5, CRL_REG_LEN_08BIT, 0x60 }, + { 0x37B6, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37B7, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37B8, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37B9, CRL_REG_LEN_08BIT, 0xFF }, + { 0x3800, CRL_REG_LEN_08BIT, 0x00 },/* h_crop_start high */ + { 0x3801, CRL_REG_LEN_08BIT, 0x0C },/* h_crop_start low */ + { 0x3802, CRL_REG_LEN_08BIT, 0x00 },/* v_crop_start high */ + { 0x3803, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_start low */ + { 0x3804, CRL_REG_LEN_08BIT, 0x0C },/* h_crop_end high */ + { 0x3805, CRL_REG_LEN_08BIT, 0xD3 },/* h_crop_end low */ + { 0x3806, CRL_REG_LEN_08BIT, 0x09 },/* v_crop_end high */ + { 0x3807, CRL_REG_LEN_08BIT, 0xA3 },/* v_crop_end low */ + { 0x3808, CRL_REG_LEN_08BIT, 0x0C },/* h_output_size high */ + { 0x3809, CRL_REG_LEN_08BIT, 0xC0 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x09 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0x90 },/* v_output_size low */ + { 0x380C, CRL_REG_LEN_08BIT, 0x07 },/* horizontal timing size high */ + { 0x380D, CRL_REG_LEN_08BIT, 0x94 },/* horizontal timing size low */ + { 0x380E, CRL_REG_LEN_08BIT, 0x0A },/* vertical timing size high */ + { 0x380F, CRL_REG_LEN_08BIT, 0x0D },/* vertical timing size low */ + { 0x3810, CRL_REG_LEN_08BIT, 0x00 },/* h_win offset high */ + { 0x3811, CRL_REG_LEN_08BIT, 0x04 },/* h_win offset low */ + { 0x3812, CRL_REG_LEN_08BIT, 0x00 },/* v_win offset high */ + { 0x3813, CRL_REG_LEN_08BIT, 0x02 },/* v_win offset low */ + { 0x3814, CRL_REG_LEN_08BIT, 0x01 },/* h_odd_inc */ + { 0x3815, CRL_REG_LEN_08BIT, 0x01 },/* h_even_inc */ + { 0x3820, CRL_REG_LEN_08BIT, 0x00 },/* format1 */ + { 0x3821, CRL_REG_LEN_08BIT, 0x40 },/* format2 */ + { 0x382A, CRL_REG_LEN_08BIT, 0x01 },/* v_odd_inc */ + { 0x382B, CRL_REG_LEN_08BIT, 0x01 },/* v_even_inc */ + { 0x3830, CRL_REG_LEN_08BIT, 0x06 }, + { 0x3836, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3837, CRL_REG_LEN_08BIT, 0x18 }, + { 0x3841, CRL_REG_LEN_08BIT, 0xFF },/* AUTO_SIZE_CTRL */ + { 0x3846, CRL_REG_LEN_08BIT, 0x48 }, + { 0x3D85, CRL_REG_LEN_08BIT, 0x16 },/* OTP_REG85 */ + { 0x3D8C, CRL_REG_LEN_08BIT, 0x73 }, + { 0x3D8D, CRL_REG_LEN_08BIT, 0xde }, + { 0x3F08, CRL_REG_LEN_08BIT, 0x10 },/* PSRAM control register */ + { 0x4000, CRL_REG_LEN_08BIT, 0xF1 },/* BLC CTRL00 = default */ + { 0x4001, CRL_REG_LEN_08BIT, 0x00 },/* BLC CTRL01 */ + { 0x4002, CRL_REG_LEN_08BIT, 0x27 },/* BLC offset = 0x27 */ + { 0x4005, CRL_REG_LEN_08BIT, 0x10 },/* BLC target = 0x0010 */ + { 0x4009, CRL_REG_LEN_08BIT, 0x81 },/* BLC CTRL09 */ + { 0x400B, CRL_REG_LEN_08BIT, 0x0C },/* BLC CTRL0B = default */ + { 0x4011, CRL_REG_LEN_08BIT, 0x20 }, + { 0x401B, CRL_REG_LEN_08BIT, 0x00 },/* Zero line R coeff. = 0x0000 */ + { 0x401D, CRL_REG_LEN_08BIT, 0x00 },/* Zero line T coeff. = 0x0000 */ + { 0x401F, CRL_REG_LEN_08BIT, 0x00 },/* BLC CTRL1F */ + { 0x4020, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4021, CRL_REG_LEN_08BIT, 0x04 }, + { 0x4022, CRL_REG_LEN_08BIT, 0x0C }, + { 0x4023, CRL_REG_LEN_08BIT, 0x60 }, + { 0x4024, CRL_REG_LEN_08BIT, 0x0f }, + { 0x4025, CRL_REG_LEN_08BIT, 0x36 }, + { 0x4026, CRL_REG_LEN_08BIT, 0x0f }, + { 0x4027, CRL_REG_LEN_08BIT, 0x37 }, + { 0x4028, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4029, CRL_REG_LEN_08BIT, 0x02 }, + { 0x402A, CRL_REG_LEN_08BIT, 0x04 }, + { 0x402B, CRL_REG_LEN_08BIT, 0x08 }, + { 0x402C, CRL_REG_LEN_08BIT, 0x00 }, + { 0x402D, CRL_REG_LEN_08BIT, 0x02 }, + { 0x402E, CRL_REG_LEN_08BIT, 0x04 }, + { 0x402F, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4034, CRL_REG_LEN_08BIT, 0x3F }, + { 0x403D, CRL_REG_LEN_08BIT, 0x04 },/* BLC CTRL3D */ + { 0x4300, CRL_REG_LEN_08BIT, 0xFF },/* clip_max[11:4] = 0xFFF */ + { 0x4301, CRL_REG_LEN_08BIT, 0x00 },/* clip_min[11:4] = 0 */ + { 0x4302, CRL_REG_LEN_08BIT, 0x0F },/* clip_min/max[3:0] */ + { 0x4316, CRL_REG_LEN_08BIT, 0x00 },/* CTRL16 = default */ + { 0x4503, CRL_REG_LEN_08BIT, 0x18 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x01 }, + { 0x4601, CRL_REG_LEN_08BIT, 0x97 }, + /* wkup_dly = Mark1 wakeup delay/2^10 = 0x25 */ + { 0x4808, CRL_REG_LEN_08BIT, 0x25 }, + { 0x4816, CRL_REG_LEN_08BIT, 0x12 },/* Embedded data type */ + { 0x5A08, CRL_REG_LEN_08BIT, 0x02 },/* Data in beginning of the frame */ + { 0x5041, CRL_REG_LEN_08BIT, 0x01 },/* ISP CTRL41 - embedded data=on */ + { 0x4307, CRL_REG_LEN_08BIT, 0x31 },/* Embedded_en */ + { 0x481F, CRL_REG_LEN_08BIT, 0x32 },/* clk_prepare_min = 0x32 */ + { 0x4837, CRL_REG_LEN_08BIT, 0x16 },/* pclk_period = 0x14 */ + { 0x4850, CRL_REG_LEN_08BIT, 0x10 },/* LANE SEL01 */ + { 0x4851, CRL_REG_LEN_08BIT, 0x32 },/* LANE SEL02 */ + { 0x4B00, CRL_REG_LEN_08BIT, 0x2A }, + { 0x4B0D, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4D00, CRL_REG_LEN_08BIT, 0x04 },/* TPM_CTRL_REG */ + { 0x4D01, CRL_REG_LEN_08BIT, 0x18 },/* TPM_CTRL_REG */ + { 0x4D02, CRL_REG_LEN_08BIT, 0xC3 },/* TPM_CTRL_REG */ + { 0x4D03, CRL_REG_LEN_08BIT, 0xFF },/* TPM_CTRL_REG */ + { 0x4D04, CRL_REG_LEN_08BIT, 0xFF },/* TPM_CTRL_REG */ + { 0x4D05, CRL_REG_LEN_08BIT, 0xFF },/* TPM_CTRL_REG */ + /* + * Lens correction (LENC) function enable = 0 + * Slave sensor AWB Gain function enable = 1 + * Slave sensor AWB Statistics function enable = 1 + * Master sensor AWB Gain function enable = 1 + * Master sensor AWB Statistics function enable = 1 + * Black DPC function enable = 1 + * White DPC function enable =1 + */ + { 0x5000, CRL_REG_LEN_08BIT, 0x7E }, + { 0x5001, CRL_REG_LEN_08BIT, 0x01 },/* BLC function enable = 1 */ + /* + * Horizontal scale function enable = 0 + * WBMATCH bypass mode = Select slave sensor's gain + * WBMATCH function enable = 0 + * Master MWB gain support RGBC = 0 + * OTP_DPC function enable = 1 + * Manual mode of VarioPixel function enable = 0 + * Manual enable of VarioPixel function enable = 0 + * Use VSYNC to latch ISP modules's function enable signals = 0 + */ + { 0x5002, CRL_REG_LEN_08BIT, 0x08 }, + /* + * Bypass all ISP modules after BLC module = 0 + * DPC_DBC buffer control enable = 1 + * WBMATCH VSYNC selection = Select master sensor's VSYNC fall + * Select master AWB gain to embed line = AWB gain before manual mode + * Enable BLC's input flip_i signal = 0 + */ + { 0x5003, CRL_REG_LEN_08BIT, 0x20 }, + { 0x5041, CRL_REG_LEN_08BIT, 0x1D },/* ISP CTRL41 - embedded data=on */ + { 0x5046, CRL_REG_LEN_08BIT, 0x12 },/* ISP CTRL46 = default */ + /* + * Tail enable = 1 + * Saturate cross cluster enable = 1 + * Remove cross cluster enable = 1 + * Enable to remove connected defect pixels in same channel = 1 + * Enable to remove connected defect pixels in different channel = 1 + * Smooth enable, use average G for recovery = 1 + * Black/white sensor mode enable = 0 + * Manual mode enable = 0 + */ + { 0x5780, CRL_REG_LEN_08BIT, 0x3e }, + { 0x5781, CRL_REG_LEN_08BIT, 0x0f }, + { 0x5782, CRL_REG_LEN_08BIT, 0x44 }, + { 0x5783, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5784, CRL_REG_LEN_08BIT, 0x01 },/* DPC CTRL04 */ + { 0x5785, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5786, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5787, CRL_REG_LEN_08BIT, 0x04 },/* DPC CTRL07 */ + { 0x5788, CRL_REG_LEN_08BIT, 0x02 },/* DPC CTRL08 */ + { 0x5789, CRL_REG_LEN_08BIT, 0x0f }, + { 0x578A, CRL_REG_LEN_08BIT, 0xfd },/* DPC CTRL0A */ + { 0x578B, CRL_REG_LEN_08BIT, 0xf5 },/* DPC CTRL0B */ + { 0x578C, CRL_REG_LEN_08BIT, 0xf5 },/* DPC CTRL0C */ + { 0x578D, CRL_REG_LEN_08BIT, 0x03 },/* DPC CTRL0D */ + { 0x578E, CRL_REG_LEN_08BIT, 0x08 },/* DPC CTRL0E */ + { 0x578F, CRL_REG_LEN_08BIT, 0x0c },/* DPC CTRL0F */ + { 0x5790, CRL_REG_LEN_08BIT, 0x08 },/* DPC CTRL10 */ + { 0x5791, CRL_REG_LEN_08BIT, 0x04 }, + { 0x5792, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5793, CRL_REG_LEN_08BIT, 0x52 }, + { 0x5794, CRL_REG_LEN_08BIT, 0xa3 }, + { 0x586E, CRL_REG_LEN_08BIT, 0x10 }, + { 0x586F, CRL_REG_LEN_08BIT, 0x08 }, + { 0x58F8, CRL_REG_LEN_08BIT, 0x3d }, + { 0x5871, CRL_REG_LEN_08BIT, 0x0d }, + { 0x5870, CRL_REG_LEN_08BIT, 0x18 }, + { 0x5901, CRL_REG_LEN_08BIT, 0x00 },/* VAP CTRL01 = default */ + { 0x5B00, CRL_REG_LEN_08BIT, 0x02 },/* OTP CTRL00 */ + { 0x5B01, CRL_REG_LEN_08BIT, 0x10 },/* OTP CTRL01 */ + { 0x5B02, CRL_REG_LEN_08BIT, 0x03 },/* OTP CTRL02 */ + { 0x5B03, CRL_REG_LEN_08BIT, 0xCF },/* OTP CTRL03 */ + { 0x5B05, CRL_REG_LEN_08BIT, 0x6C },/* OTP CTRL05 = default */ + { 0x5E00, CRL_REG_LEN_08BIT, 0x00 },/* PRE CTRL00 = default */ + { 0x5E01, CRL_REG_LEN_08BIT, 0x41 },/* PRE_CTRL01 = default */ + { 0x4825, CRL_REG_LEN_08BIT, 0x3a }, + { 0x4826, CRL_REG_LEN_08BIT, 0x40 }, + { 0x4808, CRL_REG_LEN_08BIT, 0x25 }, + { 0x3763, CRL_REG_LEN_08BIT, 0x18 }, + { 0x3768, CRL_REG_LEN_08BIT, 0xcc }, + { 0x470b, CRL_REG_LEN_08BIT, 0x28 }, + { 0x4202, CRL_REG_LEN_08BIT, 0x00 }, + { 0x400d, CRL_REG_LEN_08BIT, 0x10 }, + { 0x4040, CRL_REG_LEN_08BIT, 0x07 }, + { 0x403e, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4041, CRL_REG_LEN_08BIT, 0xc6 }, + { 0x400a, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_register_write_rep ov8858_mode_8m_native[] = { + { 0x382d, CRL_REG_LEN_08BIT, 0x20 }, + { 0x3808, CRL_REG_LEN_08BIT, 0x0C },/* h_output_size high 3264 x 2448 */ + { 0x3809, CRL_REG_LEN_08BIT, 0xc0 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x09 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0x90 },/* v_output_size low */ + { 0x4022, CRL_REG_LEN_08BIT, 0x0C }, + { 0x4023, CRL_REG_LEN_08BIT, 0x60 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x01 }, + { 0x4601, CRL_REG_LEN_08BIT, 0x97 }, +}; + +static struct crl_register_write_rep ov8858_mode_6m_native[] = { + { 0x382d, CRL_REG_LEN_08BIT, 0x80 }, + { 0x3808, CRL_REG_LEN_08BIT, 0x0C },/* h_output_size high 3264 x 1836 */ + { 0x3809, CRL_REG_LEN_08BIT, 0xc0 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x07 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0x2c },/* v_output_size low */ + { 0x4022, CRL_REG_LEN_08BIT, 0x0C }, + { 0x4023, CRL_REG_LEN_08BIT, 0x60 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x01 }, + { 0x4601, CRL_REG_LEN_08BIT, 0x97 }, +}; + +static struct crl_register_write_rep ov8858_mode_8m_full[] = { + { 0x382d, CRL_REG_LEN_08BIT, 0x20 }, + { 0x3808, CRL_REG_LEN_08BIT, 0x0C },/* h_output_size high 3280 x 2464 */ + { 0x3809, CRL_REG_LEN_08BIT, 0xD0 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x09 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0xA0 },/* v_output_size low */ + { 0x4022, CRL_REG_LEN_08BIT, 0x0C }, + { 0x4023, CRL_REG_LEN_08BIT, 0x60 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x01 }, + { 0x4601, CRL_REG_LEN_08BIT, 0x97 }, +}; + +static struct crl_register_write_rep ov8858_mode_6m_full[] = { + { 0x382d, CRL_REG_LEN_08BIT, 0x80 }, + { 0x3808, CRL_REG_LEN_08BIT, 0x0C },/* h_output_size high 3280 x 1852 */ + { 0x3809, CRL_REG_LEN_08BIT, 0xD0 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x07 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0x3c },/* v_output_size low */ + { 0x4022, CRL_REG_LEN_08BIT, 0x0C }, + { 0x4023, CRL_REG_LEN_08BIT, 0x60 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x01 }, + { 0x4601, CRL_REG_LEN_08BIT, 0x97 }, +}; + +static struct crl_register_write_rep ov8858_mode_1080[] = { + { 0x382d, CRL_REG_LEN_08BIT, 0xa0 }, + { 0x3808, CRL_REG_LEN_08BIT, 0x07 },/* h_output_size high*/ + { 0x3809, CRL_REG_LEN_08BIT, 0x80 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x04 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0x38 },/* v_output_size low */ + { 0x4022, CRL_REG_LEN_08BIT, 0x07 }, + { 0x4023, CRL_REG_LEN_08BIT, 0x20 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4601, CRL_REG_LEN_08BIT, 0xef }, +}; + +static struct crl_register_write_rep ov8858_mode_1920x1440_crop[] = { + { 0x382d, CRL_REG_LEN_08BIT, 0xa0 }, + { 0x3808, CRL_REG_LEN_08BIT, 0x07 },/* h_output_size high*/ + { 0x3809, CRL_REG_LEN_08BIT, 0x80 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x05 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0xA0 },/* v_output_size low */ + { 0x4022, CRL_REG_LEN_08BIT, 0x07 }, + { 0x4023, CRL_REG_LEN_08BIT, 0x20 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4601, CRL_REG_LEN_08BIT, 0xef }, +}; + +static struct crl_register_write_rep ov8858_mode_1984x1116_crop[] = { + { 0x382d, CRL_REG_LEN_08BIT, 0xa0 }, + { 0x3808, CRL_REG_LEN_08BIT, 0x07 },/* h_output_size high*/ + { 0x3809, CRL_REG_LEN_08BIT, 0xC0 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x04 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0x5C },/* v_output_size low */ + { 0x4022, CRL_REG_LEN_08BIT, 0x07 }, + { 0x4023, CRL_REG_LEN_08BIT, 0x20 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4601, CRL_REG_LEN_08BIT, 0xef }, +}; + +static struct crl_register_write_rep ov8858_streamon_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x01 } +}; + +static struct crl_register_write_rep ov8858_streamoff_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x00 } +}; + +static struct crl_register_write_rep ov8858_data_fmt_width10[] = { + { 0x3031, CRL_REG_LEN_08BIT, 0x0a } +}; + +static struct crl_arithmetic_ops ov8858_vflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_arithmetic_ops ov8858_hflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_arithmetic_ops ov8858_hblank_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_arithmetic_ops ov8858_exposure_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 4, + }, +}; + +static struct crl_dynamic_register_access ov8858_v_flip_regs[] = { + { + .address = 0x3820, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov8858_vflip_ops), + .ops = ov8858_vflip_ops, + .mask = 0x2, + }, +}; + +static struct crl_dynamic_register_access ov8858_dig_gain_regs[] = { + { + .address = 0x5032, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, + { + .address = 0x5034, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, + { + .address = 0x5036, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access ov8858_h_flip_regs[] = { + { + .address = 0x3821, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov8858_hflip_ops), + .ops = ov8858_hflip_ops, + .mask = 0x2, + }, +}; + +struct crl_register_write_rep ov8858_poweroff_regset[] = { + { 0x0103, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_dynamic_register_access ov8858_ana_gain_global_regs[] = { + { + .address = 0x3508, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0x7ff, + }, +}; + +static struct crl_dynamic_register_access ov8858_exposure_regs[] = { + { + .address = 0x3500, + .len = CRL_REG_LEN_24BIT, + .ops_items = ARRAY_SIZE(ov8858_exposure_ops), + .ops = ov8858_exposure_ops, + .mask = 0x0ffff0, + }, +}; + +static struct crl_dynamic_register_access ov8858_vblank_regs[] = { + { + .address = 0x380E, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access ov8858_hblank_regs[] = { + { + .address = 0x380C, + .len = CRL_REG_LEN_16BIT, + .ops_items = ARRAY_SIZE(ov8858_hblank_ops), + .ops = ov8858_hblank_ops, + .mask = 0xffff, + }, +}; + +static struct crl_sensor_detect_config ov8858_sensor_detect_regset[] = { + { + .reg = { 0x300B, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, + { + .reg = { 0x300C, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, +}; + +static struct crl_pll_configuration ov8858_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 360000000, + .bitsperpixel = 10, + .pixel_rate_csi = 180000000, + .pixel_rate_pa = 290133334, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(ov8858_pll_360mbps), + .pll_regs = ov8858_pll_360mbps, + }, + +}; + +static struct crl_subdev_rect_rep ov8858_8m_rects_native[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3264, + .out_rect.height = 2448, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3264, + .out_rect.height = 2448, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3264, + .out_rect.height = 2448, + }, +}; + +static struct crl_subdev_rect_rep ov8858_6m_rects_native[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3264, + .out_rect.height = 2448, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3264, + .out_rect.height = 2448, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3264, + .out_rect.height = 1836, + }, +}; + + +static struct crl_subdev_rect_rep ov8858_8m_rects_full[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3280, + .in_rect.height = 2464, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3280, + .out_rect.height = 2464, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3280, + .in_rect.height = 2464, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3280, + .out_rect.height = 2464, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3280, + .in_rect.height = 2464, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3280, + .out_rect.height = 2464, + }, +}; + +static struct crl_subdev_rect_rep ov8858_6m_rects_full[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3280, + .in_rect.height = 2464, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3280, + .out_rect.height = 2464, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3280, + .in_rect.height = 2464, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3280, + .out_rect.height = 2464, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3280, + .in_rect.height = 2464, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3280, + .out_rect.height = 1852, + }, +}; + +static struct crl_subdev_rect_rep ov8858_1080_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3264, + .out_rect.height = 2448, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3264, + .out_rect.height = 2448, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, +}; + +static struct crl_subdev_rect_rep ov8858_1920x1440_rects_crop[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3264, + .out_rect.height = 2448, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3264, + .out_rect.height = 2448, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1440, + }, +}; + +static struct crl_subdev_rect_rep ov8858_1984x1116_rects_crop[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3264, + .out_rect.height = 2448, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3264, + .out_rect.height = 2448, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1984, + .out_rect.height = 1116, + }, +}; + +static struct crl_mode_rep ov8858_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(ov8858_8m_rects_native), + .sd_rects = ov8858_8m_rects_native, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 3264, + .height = 2448, + .min_llp = 3880, + .min_fll = 2474, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov8858_mode_8m_native), + .mode_regs = ov8858_mode_8m_native, + }, + { + .sd_rects_items = ARRAY_SIZE(ov8858_6m_rects_native), + .sd_rects = ov8858_6m_rects_native, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 3264, + .height = 1836, + .min_llp = 5132, + .min_fll = 1872, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov8858_mode_6m_native), + .mode_regs = ov8858_mode_6m_native, + }, + { + .sd_rects_items = ARRAY_SIZE(ov8858_8m_rects_full), + .sd_rects = ov8858_8m_rects_full, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 3280, + .height = 2464, + .min_llp = 3880, + .min_fll = 2474, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov8858_mode_8m_full), + .mode_regs = ov8858_mode_8m_full, + }, + { + .sd_rects_items = ARRAY_SIZE(ov8858_6m_rects_full), + .sd_rects = ov8858_6m_rects_full, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 3280, + .height = 1852, + .min_llp = 5132, + .min_fll = 1872, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov8858_mode_6m_full), + .mode_regs = ov8858_mode_6m_full, + }, + { + .sd_rects_items = ARRAY_SIZE(ov8858_1080_rects), + .sd_rects = ov8858_1080_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1080, + .min_llp = 4284, + .min_fll = 1120, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov8858_mode_1080), + .mode_regs = ov8858_mode_1080, + }, + { + .sd_rects_items = ARRAY_SIZE(ov8858_1920x1440_rects_crop), + .sd_rects = ov8858_1920x1440_rects_crop, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1440, + .min_llp = 3880, + .min_fll = 1480, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov8858_mode_1920x1440_crop), + .mode_regs = ov8858_mode_1920x1440_crop, + }, + { + .sd_rects_items = ARRAY_SIZE(ov8858_1984x1116_rects_crop), + .sd_rects = ov8858_1984x1116_rects_crop, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1984, + .height = 1116, + .min_llp = 3880, + .min_fll = 1120, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov8858_mode_1984x1116_crop), + .mode_regs = ov8858_mode_1984x1116_crop, + }, + +}; + +static struct crl_sensor_subdev_config ov8858_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .name = "ov8858 scaler", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "ov8858 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "ov8858 pixel array", + }, +}; + +static struct crl_sensor_limits ov8858_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 3280, + .y_addr_max = 2464, + .min_frame_length_lines = 160, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 3880, + .max_line_length_pixels = 32752, + .scaler_m_min = 16, + .scaler_m_max = 255, + .scaler_n_min = 16, + .scaler_n_max = 16, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 3, +}; + +static struct crl_flip_data ov8858_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, +}; + +static struct crl_csi_data_fmt ov8858_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = 1, + .regs = ov8858_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = ov8858_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = ov8858_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = ov8858_data_fmt_width10, + }, +}; + +static struct crl_v4l2_ctrl ov8858_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_SCALER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_SCALER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 4096, + .data.std_data.step = 1, + .data.std_data.def = 128, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov8858_ana_gain_global_regs), + .regs = ov8858_ana_gain_global_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "V4L2_CID_EXPOSURE", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov8858_exposure_regs), + .regs = ov8858_exposure_regs, + .dep_items = 0, /* FLL is changes automatically */ + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov8858_h_flip_regs), + .regs = ov8858_h_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .name = "V4L2_CID_VFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov8858_v_flip_regs), + .regs = ov8858_v_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame Length Lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 160, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 2474, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov8858_vblank_regs), + .regs = ov8858_vblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1024, + .data.std_data.max = 65520, + .data.std_data.step = 1, + .data.std_data.def = 3880, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov8858_hblank_regs), + .regs = ov8858_hblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_GAIN, + .name = "Digital Gain", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 4095, + .data.std_data.step = 1, + .data.std_data.def = 1024, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov8858_dig_gain_regs), + .regs = ov8858_dig_gain_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +#define OV8858_OTP_START_ADDR 0x7010 +#define OV8858_OTP_END_ADDR 0x7186 + +#define OV8858_OTP_LEN (OV8858_OTP_END_ADDR - OV8858_OTP_START_ADDR + 1) +#define OV8858_OTP_L_ADDR(x) (x & 0xff) +#define OV8858_OTP_H_ADDR(x) ((x >> 8) & 0xff) + +static struct crl_register_write_rep ov8858_nvm_preop_regset[] = { + /* Start streaming */ + { 0x0100, CRL_REG_LEN_08BIT, 0x01 }, + /* Manual mode, program disable */ + { 0x3D84, CRL_REG_LEN_08BIT, 0xC0 }, + /* Manual OTP start address for access */ + { 0x3D88, CRL_REG_LEN_08BIT, OV8858_OTP_H_ADDR(OV8858_OTP_START_ADDR)}, + { 0x0D89, CRL_REG_LEN_08BIT, OV8858_OTP_L_ADDR(OV8858_OTP_START_ADDR)}, + /* Manual OTP end address for access */ + { 0x3D8A, CRL_REG_LEN_08BIT, OV8858_OTP_H_ADDR(OV8858_OTP_END_ADDR)}, + { 0x3D8B, CRL_REG_LEN_08BIT, OV8858_OTP_L_ADDR(OV8858_OTP_END_ADDR)}, + /* OTP load enable */ + { 0x3D81, CRL_REG_LEN_08BIT, 0x01 }, + /* Wait for the data to load into the buffer */ + { 0x0000, CRL_REG_LEN_DELAY, 0x05 }, +}; + +static struct crl_register_write_rep ov8858_nvm_postop_regset[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x00 }, /* Stop streaming */ +}; + +static struct crl_nvm_blob ov8858_nvm_blobs[] = { + {CRL_I2C_ADDRESS_NO_OVERRIDE, OV8858_OTP_START_ADDR, OV8858_OTP_LEN }, +}; + +static struct crl_arithmetic_ops ov8858_frame_desc_width_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .operand.entity_val = CRL_VAR_REF_OUTPUT_WIDTH, + }, +}; + +static struct crl_arithmetic_ops ov8858_frame_desc_height_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, +}; + +static struct crl_frame_desc ov8858_frame_desc[] = { + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(ov8858_frame_desc_width_ops), + .ops = ov8858_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(ov8858_frame_desc_height_ops), + .ops = ov8858_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 0, + .csi2_data_type.entity_val = 0x12, + }, +}; + +/* Power items, they are enabled in the order they are listed here */ +static struct crl_power_seq_entity ov8858_power_items[] = { + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VANA", + .val = 2800000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VDIG", + .val = 1200000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VIO", + .val = 1800000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 24000000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + .delay = 10000, + }, +}; + +static struct crl_sensor_configuration ov8858_crl_configuration = { + + .power_items = ARRAY_SIZE(ov8858_power_items), + .power_entities = ov8858_power_items, + + .powerup_regs_items = ARRAY_SIZE(ov8858_powerup_regset), + .powerup_regs = ov8858_powerup_regset, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + + .id_reg_items = ARRAY_SIZE(ov8858_sensor_detect_regset), + .id_regs = ov8858_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(ov8858_sensor_subdevs), + .subdevs = ov8858_sensor_subdevs, + + .sensor_limits = &ov8858_sensor_limits, + + .pll_config_items = ARRAY_SIZE(ov8858_pll_configurations), + .pll_configs = ov8858_pll_configurations, + + .modes_items = ARRAY_SIZE(ov8858_modes), + .modes = ov8858_modes, + + .streamon_regs_items = ARRAY_SIZE(ov8858_streamon_regs), + .streamon_regs = ov8858_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(ov8858_streamoff_regs), + .streamoff_regs = ov8858_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(ov8858_v4l2_ctrls), + .v4l2_ctrl_bank = ov8858_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(ov8858_crl_csi_data_fmt), + .csi_fmts = ov8858_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(ov8858_flip_configurations), + .flip_data = ov8858_flip_configurations, + + .crl_nvm_info.nvm_flags = CRL_NVM_ADDR_MODE_16BIT, + .crl_nvm_info.nvm_preop_regs_items = + ARRAY_SIZE(ov8858_nvm_preop_regset), + .crl_nvm_info.nvm_preop_regs = ov8858_nvm_preop_regset, + .crl_nvm_info.nvm_postop_regs_items = + ARRAY_SIZE(ov8858_nvm_postop_regset), + .crl_nvm_info.nvm_postop_regs = ov8858_nvm_postop_regset, + .crl_nvm_info.nvm_blobs_items = ARRAY_SIZE(ov8858_nvm_blobs), + .crl_nvm_info.nvm_config = ov8858_nvm_blobs, + + .frame_desc_entries = ARRAY_SIZE(ov8858_frame_desc), + .frame_desc_type = CRL_V4L2_MBUS_FRAME_DESC_TYPE_CSI2, + .frame_desc = ov8858_frame_desc, + + .msr_file_name = "00ov8858.bxt_rvp.drvb", +}; + +#endif /* __CRLMODULE_OV8858_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_ov9281_configuration.h b/drivers/media/i2c/crlmodule/crl_ov9281_configuration.h new file mode 100644 index 0000000000000..5fb1987ed08ff --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_ov9281_configuration.h @@ -0,0 +1,520 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018 Intel Corporation + * + * Author: Wu Xia + * + */ + +#ifndef __CRLMODULE_OV9281_CONFIGURATION_H_ +#define __CRLMODULE_OV9281_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +static struct crl_register_write_rep ov9281_pll_800mbps[] = { + { 0x0302, CRL_REG_LEN_08BIT, 0x32 }, + { 0x030d, CRL_REG_LEN_08BIT, 0x50 }, + { 0x030e, CRL_REG_LEN_08BIT, 0x02 }, + { 0x00, CRL_REG_LEN_DELAY, 10, 0x00}, /* Add a pre 10ms delay */ +}; + +static struct crl_register_write_rep ov9281_powerup_regset[] = { + { 0x4f00, CRL_REG_LEN_08BIT, 0x00 }, + { 0x00, CRL_REG_LEN_DELAY, 10, 0x00}, /* Add a pre 10ms delay */ +}; + +static struct crl_register_write_rep ov9281_mode_1m[] = { + { 0x3001, CRL_REG_LEN_08BIT, 0x60 }, + { 0x3004, CRL_REG_LEN_08BIT, 0x00 }, + + { 0x3005, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3006, CRL_REG_LEN_08BIT, 0x04 }, + + { 0x3011, CRL_REG_LEN_08BIT, 0x0a }, + { 0x3013, CRL_REG_LEN_08BIT, 0x18 }, + { 0x3022, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3030, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3039, CRL_REG_LEN_08BIT, 0x32 }, + { 0x303a, CRL_REG_LEN_08BIT, 0x00 }, + + { 0x3503, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3505, CRL_REG_LEN_08BIT, 0x8c }, + { 0x3507, CRL_REG_LEN_08BIT, 0x03 }, + { 0x3508, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3610, CRL_REG_LEN_08BIT, 0x80 }, + { 0x3611, CRL_REG_LEN_08BIT, 0xa0 }, + + { 0x3620, CRL_REG_LEN_08BIT, 0x6f }, + { 0x3632, CRL_REG_LEN_08BIT, 0x56 }, + { 0x3633, CRL_REG_LEN_08BIT, 0x78 }, + { 0x3662, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3666, CRL_REG_LEN_08BIT, 0x00 }, + + { 0x366f, CRL_REG_LEN_08BIT, 0x5a }, + { 0x3680, CRL_REG_LEN_08BIT, 0x84 }, + + { 0x3712, CRL_REG_LEN_08BIT, 0x80 }, + { 0x372d, CRL_REG_LEN_08BIT, 0x22 }, + { 0x3731, CRL_REG_LEN_08BIT, 0x80 }, + { 0x3732, CRL_REG_LEN_08BIT, 0x30 }, + { 0x3778, CRL_REG_LEN_08BIT, 0x00 }, + + { 0x377d, CRL_REG_LEN_08BIT, 0x22 }, + { 0x3788, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3789, CRL_REG_LEN_08BIT, 0xa4 }, + { 0x378a, CRL_REG_LEN_08BIT, 0x00 }, + { 0x378b, CRL_REG_LEN_08BIT, 0x4a }, + { 0x3799, CRL_REG_LEN_08BIT, 0x20 }, + + /* window setting*/ + { 0x3800, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3801, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3802, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3803, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3804, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3805, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3806, CRL_REG_LEN_08BIT, 0x03 }, + { 0x3807, CRL_REG_LEN_08BIT, 0x27 }, + + { 0x3808, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3809, CRL_REG_LEN_08BIT, 0x00 }, + { 0x380a, CRL_REG_LEN_08BIT, 0x03 }, + { 0x380b, CRL_REG_LEN_08BIT, 0x20 }, + + { 0x380c, CRL_REG_LEN_08BIT, 0x02 }, + { 0x380d, CRL_REG_LEN_08BIT, 0xd8 }, + { 0x380e, CRL_REG_LEN_08BIT, 0x03 }, + { 0x380f, CRL_REG_LEN_08BIT, 0x8e }, + + { 0x3810, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3811, CRL_REG_LEN_08BIT, 0x00 }, + + { 0x3812, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3813, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3814, CRL_REG_LEN_08BIT, 0x11 }, + { 0x3815, CRL_REG_LEN_08BIT, 0x11 }, + { 0x3820, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3821, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3881, CRL_REG_LEN_08BIT, 0x42 }, + { 0x38b1, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3920, CRL_REG_LEN_08BIT, 0xff }, + { 0x4003, CRL_REG_LEN_08BIT, 0x40 }, + + { 0x4008, CRL_REG_LEN_08BIT, 0x04 }, + { 0x4009, CRL_REG_LEN_08BIT, 0x0b }, + { 0x400c, CRL_REG_LEN_08BIT, 0x00 }, + { 0x400d, CRL_REG_LEN_08BIT, 0x07 }, + { 0x4010, CRL_REG_LEN_08BIT, 0x40 }, + { 0x4043, CRL_REG_LEN_08BIT, 0x40 }, + { 0x4307, CRL_REG_LEN_08BIT, 0x30 }, + + { 0x4317, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4501, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4507, CRL_REG_LEN_08BIT, 0x00 }, + + { 0x4509, CRL_REG_LEN_08BIT, 0x00 }, + { 0x450a, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4601, CRL_REG_LEN_08BIT, 0x04 }, + { 0x470f, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4f07, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4800, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5000, CRL_REG_LEN_08BIT, 0x9f }, + { 0x5001, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5e00, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5d00, CRL_REG_LEN_08BIT, 0x07 }, + { 0x5d01, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep ov9281_streamon_regs[] = { + { 0x00, CRL_REG_LEN_DELAY, 10, 0x00}, /* Add a pre 10ms delay */ + { 0x0100, CRL_REG_LEN_08BIT, 0x01 }, + { 0x00, CRL_REG_LEN_DELAY, 10, 0x00}, /* Add a pre 10ms delay */ +}; + +static struct crl_register_write_rep ov9281_streamoff_regs[] = { + { 0x00, CRL_REG_LEN_DELAY, 10, 0x00}, /* Add a pre 10ms delay */ + { 0x0100, CRL_REG_LEN_08BIT, 0x00 }, + { 0x00, CRL_REG_LEN_DELAY, 10, 0x00}, /* Add a pre 10ms delay */ +}; + +struct crl_register_write_rep ov9281_poweroff_regset[] = { + { 0x4f00, CRL_REG_LEN_08BIT, 0x01 }, + { 0x00, CRL_REG_LEN_DELAY, 10, 0x00}, /* Add a pre 10ms delay */ +}; + +static struct crl_dynamic_register_access ov9281_ana_gain_global_regs[] = { + { + .address = 0x3509, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +static struct crl_arithmetic_ops ov9281_expol_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 4, + }, +}; + +static struct crl_arithmetic_ops ov9281_expom_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 4, + }, + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 8, + }, +}; + +static struct crl_arithmetic_ops ov9281_expoh_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 4, + }, + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 16, + }, +}; + +static struct crl_dynamic_register_access ov9281_exposure_regs[] = { + { + .address = 0x3502, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov9281_expol_ops), + .ops = ov9281_expol_ops, + .mask = 0xff, + }, + { + .address = 0x3501, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov9281_expom_ops), + .ops = ov9281_expom_ops, + .mask = 0xff, + }, + { + .address = 0x3500, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov9281_expoh_ops), + .ops = ov9281_expoh_ops, + .mask = 0x0f, + }, +}; + +static struct crl_sensor_detect_config ov9281_sensor_detect_regset[] = { + { + .reg = { 0x300A, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, + { + .reg = { 0x300B, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, +}; + +static struct crl_pll_configuration ov9281_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 10, + .pixel_rate_csi = 80000000, + .pixel_rate_pa = 80000000, + .csi_lanes = 2, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(ov9281_pll_800mbps), + .pll_regs = ov9281_pll_800mbps, + }, +}; + +static struct crl_subdev_rect_rep ov9281_1m_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 800, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 800, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 800, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 800, + }, +}; + +static struct crl_mode_rep ov9281_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(ov9281_1m_rects), + .sd_rects = ov9281_1m_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 800, + .min_llp = 728, + .min_fll = 910, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov9281_mode_1m), + .mode_regs = ov9281_mode_1m, + }, +}; + +static struct crl_sensor_subdev_config ov9281_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "ov9281 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "ov9281 pixel array", + }, +}; + +static struct crl_sensor_limits ov9281_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1280, + .y_addr_max = 800, + .min_frame_length_lines = 910, + .max_frame_length_lines = 910, + .min_line_length_pixels = 728, + .max_line_length_pixels = 728, + .scaler_m_min = 16, + .scaler_m_max = 16, + .scaler_n_min = 16, + .scaler_n_max = 16, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 3, +}; + +static struct crl_flip_data ov9281_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, +}; + +static struct crl_csi_data_fmt ov9281_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = 0, + .regs = 0, + }, +}; + +static struct crl_v4l2_ctrl ov9281_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0xFF, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov9281_ana_gain_global_regs), + .regs = ov9281_ana_gain_global_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "V4L2_CID_EXPOSURE", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 885, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov9281_exposure_regs), + .regs = ov9281_exposure_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame length lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 910, + .data.std_data.max = 910, + .data.std_data.step = 1, + .data.std_data.def = 910, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 728, + .data.std_data.max = 728, + .data.std_data.step = 1, + .data.std_data.def = 728, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, +}; + +/* Power items, they are enabled in the order they are listed here */ +static struct crl_power_seq_entity ov9281_power_items[] = { + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 24000000, + .delay = 500, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + .delay = 5000, + }, +}; + +struct crl_sensor_configuration ov9281_crl_configuration = { + + .power_items = ARRAY_SIZE(ov9281_power_items), + .power_entities = ov9281_power_items, + + .powerup_regs_items = ARRAY_SIZE(ov9281_powerup_regset), + .powerup_regs = ov9281_powerup_regset, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + .id_reg_items = ARRAY_SIZE(ov9281_sensor_detect_regset), + .id_regs = ov9281_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(ov9281_sensor_subdevs), + .subdevs = ov9281_sensor_subdevs, + + .sensor_limits = &ov9281_sensor_limits, + + .pll_config_items = ARRAY_SIZE(ov9281_pll_configurations), + .pll_configs = ov9281_pll_configurations, + + .modes_items = ARRAY_SIZE(ov9281_modes), + .modes = ov9281_modes, + + .streamon_regs_items = ARRAY_SIZE(ov9281_streamon_regs), + .streamon_regs = ov9281_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(ov9281_streamoff_regs), + .streamoff_regs = ov9281_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(ov9281_v4l2_ctrls), + .v4l2_ctrl_bank = ov9281_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(ov9281_crl_csi_data_fmt), + .csi_fmts = ov9281_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(ov9281_flip_configurations), + .flip_data = ov9281_flip_configurations, +}; + +#endif /* __CRLMODULE_OV9281_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_ox03a10_configuration.h b/drivers/media/i2c/crlmodule/crl_ox03a10_configuration.h new file mode 100644 index 0000000000000..2a28c075cda27 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_ox03a10_configuration.h @@ -0,0 +1,1709 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018 Intel Corporation + * + * Author: Chang Ying + * + */ + +#ifndef __CRLMODULE_OX03A10_CONFIGURATION_H_ +#define __CRLMODULE_OX03A10_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +struct crl_sensor_detect_config ox03a10_sensor_detect_regset[] = { + { + .reg = {0x300A, CRL_REG_LEN_08BIT, 0x58}, + .width = 12, + }, + { + .reg = {0x300B, CRL_REG_LEN_08BIT, 0x03}, + .width = 12, + }, + { + .reg = {0x300C, CRL_REG_LEN_08BIT, 0x41}, + .width = 12, + } +}; + +struct crl_sensor_subdev_config ox03a10_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "ox03a10 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "ox03a10 pixel array", + } +}; + +struct crl_sensor_limits ox03a10_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1920, + .y_addr_max = 1088, +}; + +struct crl_subdev_rect_rep ox03a10_1920_1088_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1088, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1088, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1088, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1088, + } +}; + +static struct crl_register_write_rep ox03a10_1920_1088_12DCG[] = { + { 0x4d09, CRL_REG_LEN_08BIT, 0x5f }, + { 0x0104, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x32 }, + { 0x0307, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0316, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0317, CRL_REG_LEN_08BIT, 0x12 }, + { 0x0323, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0325, CRL_REG_LEN_08BIT, 0x6c }, + { 0x0326, CRL_REG_LEN_08BIT, 0x00 }, + { 0x032b, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0400, CRL_REG_LEN_08BIT, 0xe7 }, + { 0x0401, CRL_REG_LEN_08BIT, 0xff }, + { 0x0404, CRL_REG_LEN_08BIT, 0x2b }, + { 0x0405, CRL_REG_LEN_08BIT, 0x32 }, + { 0x0406, CRL_REG_LEN_08BIT, 0x33 }, + { 0x0407, CRL_REG_LEN_08BIT, 0x8f }, + { 0x0408, CRL_REG_LEN_08BIT, 0x0c }, + { 0x0410, CRL_REG_LEN_08BIT, 0xe7 }, + { 0x0411, CRL_REG_LEN_08BIT, 0xff }, + { 0x0414, CRL_REG_LEN_08BIT, 0x2b }, + { 0x0415, CRL_REG_LEN_08BIT, 0x32 }, + { 0x0416, CRL_REG_LEN_08BIT, 0x33 }, + { 0x0417, CRL_REG_LEN_08BIT, 0x8f }, + { 0x0418, CRL_REG_LEN_08BIT, 0x0c }, + { 0x3002, CRL_REG_LEN_08BIT, 0x03 }, + { 0x3012, CRL_REG_LEN_08BIT, 0x41 }, + { 0x3016, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x3017, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x3018, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x3019, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x301a, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x301b, CRL_REG_LEN_08BIT, 0xb4 }, + { 0x301e, CRL_REG_LEN_08BIT, 0xb8 }, + { 0x301f, CRL_REG_LEN_08BIT, 0xe1 }, + { 0x3022, CRL_REG_LEN_08BIT, 0xf8 }, + { 0x3023, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x3024, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x3028, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x3029, CRL_REG_LEN_08BIT, 0x80 }, + { 0x3706, CRL_REG_LEN_08BIT, 0x39 }, + { 0x370a, CRL_REG_LEN_08BIT, 0x00 }, + { 0x370b, CRL_REG_LEN_08BIT, 0xa3 }, + { 0x3712, CRL_REG_LEN_08BIT, 0x12 }, + { 0x3713, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3716, CRL_REG_LEN_08BIT, 0x04 }, + { 0x371d, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3721, CRL_REG_LEN_08BIT, 0x1c }, + { 0x372c, CRL_REG_LEN_08BIT, 0x17 }, + { 0x3733, CRL_REG_LEN_08BIT, 0x41 }, + { 0x3741, CRL_REG_LEN_08BIT, 0x44 }, + { 0x3742, CRL_REG_LEN_08BIT, 0x34 }, + { 0x3746, CRL_REG_LEN_08BIT, 0x03 }, + { 0x374b, CRL_REG_LEN_08BIT, 0x03 }, + { 0x3755, CRL_REG_LEN_08BIT, 0x00 }, + { 0x376c, CRL_REG_LEN_08BIT, 0x15 }, + { 0x376d, CRL_REG_LEN_08BIT, 0x08 }, + { 0x376f, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3770, CRL_REG_LEN_08BIT, 0x91 }, + { 0x3771, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3774, CRL_REG_LEN_08BIT, 0x82 }, + { 0x3777, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3779, CRL_REG_LEN_08BIT, 0x22 }, + { 0x377a, CRL_REG_LEN_08BIT, 0x00 }, + { 0x377b, CRL_REG_LEN_08BIT, 0x00 }, + { 0x377c, CRL_REG_LEN_08BIT, 0x48 }, + { 0x3785, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3790, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3793, CRL_REG_LEN_08BIT, 0x04 }, + { 0x379c, CRL_REG_LEN_08BIT, 0x01 }, + { 0x37a1, CRL_REG_LEN_08BIT, 0x80 }, + { 0x37b3, CRL_REG_LEN_08BIT, 0x0a }, + { 0x37bb, CRL_REG_LEN_08BIT, 0x08 }, + { 0x37be, CRL_REG_LEN_08BIT, 0xe0 }, + { 0x37bf, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37c6, CRL_REG_LEN_08BIT, 0x48 }, + { 0x37c7, CRL_REG_LEN_08BIT, 0x38 }, + { 0x37c9, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37ca, CRL_REG_LEN_08BIT, 0x08 }, + { 0x37cb, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37cc, CRL_REG_LEN_08BIT, 0x40 }, + { 0x37d1, CRL_REG_LEN_08BIT, 0x39 }, + { 0x37d2, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37d3, CRL_REG_LEN_08BIT, 0xa3 }, + { 0x37d5, CRL_REG_LEN_08BIT, 0x39 }, + { 0x37d6, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37d7, CRL_REG_LEN_08BIT, 0xa3 }, + { 0x3c06, CRL_REG_LEN_08BIT, 0x29 }, + { 0x3c0b, CRL_REG_LEN_08BIT, 0xa8 }, + { 0x3c12, CRL_REG_LEN_08BIT, 0x89 }, + { 0x3c14, CRL_REG_LEN_08BIT, 0x81 }, + { 0x3c18, CRL_REG_LEN_08BIT, 0x0c }, + { 0x3c3b, CRL_REG_LEN_08BIT, 0x38 }, + { 0x3c53, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3c55, CRL_REG_LEN_08BIT, 0xeb }, + { 0x3101, CRL_REG_LEN_08BIT, 0x32 }, + { 0x3192, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3193, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3206, CRL_REG_LEN_08BIT, 0xc8 }, + { 0x3216, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3304, CRL_REG_LEN_08BIT, 0x04 }, + { 0x3400, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3409, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3600, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3601, CRL_REG_LEN_08BIT, 0x70 }, + { 0x3602, CRL_REG_LEN_08BIT, 0x42 }, + { 0x3603, CRL_REG_LEN_08BIT, 0xe3 }, + { 0x3604, CRL_REG_LEN_08BIT, 0x93 }, + { 0x3605, CRL_REG_LEN_08BIT, 0xff }, + { 0x3606, CRL_REG_LEN_08BIT, 0x80 }, + { 0x3607, CRL_REG_LEN_08BIT, 0x4a }, + { 0x3608, CRL_REG_LEN_08BIT, 0x98 }, + { 0x3609, CRL_REG_LEN_08BIT, 0x70 }, + { 0x360a, CRL_REG_LEN_08BIT, 0x90 }, + { 0x360b, CRL_REG_LEN_08BIT, 0x0a }, + { 0x360e, CRL_REG_LEN_08BIT, 0x88 }, + { 0x3610, CRL_REG_LEN_08BIT, 0x89 }, + { 0x3611, CRL_REG_LEN_08BIT, 0x4b }, + { 0x3612, CRL_REG_LEN_08BIT, 0x4e }, + { 0x3619, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3620, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3621, CRL_REG_LEN_08BIT, 0x80 }, + { 0x3626, CRL_REG_LEN_08BIT, 0x0e }, + { 0x362c, CRL_REG_LEN_08BIT, 0x0e }, + { 0x362d, CRL_REG_LEN_08BIT, 0x12 }, + { 0x362e, CRL_REG_LEN_08BIT, 0x00 }, + { 0x362f, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3630, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3631, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3632, CRL_REG_LEN_08BIT, 0x99 }, + { 0x3633, CRL_REG_LEN_08BIT, 0x99 }, + { 0x3643, CRL_REG_LEN_08BIT, 0x0c }, + { 0x3644, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3645, CRL_REG_LEN_08BIT, 0x0e }, + { 0x3646, CRL_REG_LEN_08BIT, 0x0f }, + { 0x3647, CRL_REG_LEN_08BIT, 0x0e }, + { 0x3648, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3649, CRL_REG_LEN_08BIT, 0x11 }, + { 0x364a, CRL_REG_LEN_08BIT, 0x12 }, + { 0x364c, CRL_REG_LEN_08BIT, 0x0e }, + { 0x364d, CRL_REG_LEN_08BIT, 0x0e }, + { 0x364e, CRL_REG_LEN_08BIT, 0x12 }, + { 0x364f, CRL_REG_LEN_08BIT, 0x0e }, + { 0x3652, CRL_REG_LEN_08BIT, 0xc5 }, + { 0x3654, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3656, CRL_REG_LEN_08BIT, 0xcf }, + { 0x3657, CRL_REG_LEN_08BIT, 0x88 }, + { 0x3658, CRL_REG_LEN_08BIT, 0x08 }, + { 0x365a, CRL_REG_LEN_08BIT, 0x00 }, + { 0x365b, CRL_REG_LEN_08BIT, 0x00 }, + { 0x365c, CRL_REG_LEN_08BIT, 0x00 }, + { 0x365d, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3660, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3661, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3662, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3663, CRL_REG_LEN_08BIT, 0x20 }, + { 0x3665, CRL_REG_LEN_08BIT, 0x12 }, + { 0x3666, CRL_REG_LEN_08BIT, 0x13 }, + { 0x3667, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3668, CRL_REG_LEN_08BIT, 0x95 }, + { 0x3669, CRL_REG_LEN_08BIT, 0x16 }, + { 0x366f, CRL_REG_LEN_08BIT, 0xc4 }, + { 0x3671, CRL_REG_LEN_08BIT, 0x37 }, + { 0x3673, CRL_REG_LEN_08BIT, 0x6a }, + { 0x3678, CRL_REG_LEN_08BIT, 0x88 }, + { 0x3800, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3801, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3802, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3803, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3804, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3805, CRL_REG_LEN_08BIT, 0x8f }, + { 0x3806, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3807, CRL_REG_LEN_08BIT, 0x0c }, + { 0x3808, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3809, CRL_REG_LEN_08BIT, 0x80 }, + { 0x380a, CRL_REG_LEN_08BIT, 0x04 }, + { 0x380b, CRL_REG_LEN_08BIT, 0x40 }, + { 0x380e, CRL_REG_LEN_08BIT, 0x05 }, + { 0x380f, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3810, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3813, CRL_REG_LEN_08BIT, 0x04 }, + { 0x381c, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3820, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3822, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3832, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3833, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3834, CRL_REG_LEN_08BIT, 0x00 }, + { 0x383d, CRL_REG_LEN_08BIT, 0x20 }, + { 0x384c, CRL_REG_LEN_08BIT, 0x02 }, + { 0x384d, CRL_REG_LEN_08BIT, 0x14 }, + { 0x384e, CRL_REG_LEN_08BIT, 0x00 }, + { 0x384f, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3850, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3851, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3852, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3853, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3854, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3855, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3856, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3857, CRL_REG_LEN_08BIT, 0x33 }, + { 0x3858, CRL_REG_LEN_08BIT, 0x7c }, + { 0x3859, CRL_REG_LEN_08BIT, 0x00 }, + { 0x385a, CRL_REG_LEN_08BIT, 0x03 }, + { 0x385b, CRL_REG_LEN_08BIT, 0x05 }, + { 0x385c, CRL_REG_LEN_08BIT, 0x32 }, + { 0x385f, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3860, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3861, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3862, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3863, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3864, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3865, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3866, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3b40, CRL_REG_LEN_08BIT, 0x3e }, + { 0x3b41, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3b42, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3b43, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3b44, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3b45, CRL_REG_LEN_08BIT, 0x20 }, + { 0x3b46, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3b47, CRL_REG_LEN_08BIT, 0x20 }, + { 0x3b84, CRL_REG_LEN_08BIT, 0x36 }, + { 0x3b85, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3b86, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3b87, CRL_REG_LEN_08BIT, 0x04 }, + { 0x3b88, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3b89, CRL_REG_LEN_08BIT, 0x04 }, + { 0x3b8a, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3b8b, CRL_REG_LEN_08BIT, 0x0a }, + { 0x3b8e, CRL_REG_LEN_08BIT, 0x03 }, + { 0x3b8f, CRL_REG_LEN_08BIT, 0xe8 }, + { 0x3d85, CRL_REG_LEN_08BIT, 0x0b }, + { 0x3d8c, CRL_REG_LEN_08BIT, 0x70 }, + { 0x3d8d, CRL_REG_LEN_08BIT, 0x26 }, + { 0x3d97, CRL_REG_LEN_08BIT, 0x70 }, + { 0x3d98, CRL_REG_LEN_08BIT, 0x24 }, + { 0x3d99, CRL_REG_LEN_08BIT, 0x70 }, + { 0x3d9a, CRL_REG_LEN_08BIT, 0x6d }, + { 0x3d9b, CRL_REG_LEN_08BIT, 0x70 }, + { 0x3d9c, CRL_REG_LEN_08BIT, 0x6e }, + { 0x3d9d, CRL_REG_LEN_08BIT, 0x73 }, + { 0x3d9e, CRL_REG_LEN_08BIT, 0xff }, + { 0x3e07, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3f00, CRL_REG_LEN_08BIT, 0x04 }, + { 0x4000, CRL_REG_LEN_08BIT, 0xf8 }, + { 0x4001, CRL_REG_LEN_08BIT, 0xeb }, + { 0x4004, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4005, CRL_REG_LEN_08BIT, 0x40 }, + { 0x4008, CRL_REG_LEN_08BIT, 0x02 }, + { 0x4009, CRL_REG_LEN_08BIT, 0x0d }, + { 0x400a, CRL_REG_LEN_08BIT, 0x08 }, + { 0x400b, CRL_REG_LEN_08BIT, 0x00 }, + { 0x400f, CRL_REG_LEN_08BIT, 0x80 }, + { 0x4010, CRL_REG_LEN_08BIT, 0x10 }, + { 0x4011, CRL_REG_LEN_08BIT, 0xff }, + { 0x4016, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4017, CRL_REG_LEN_08BIT, 0x10 }, + { 0x4018, CRL_REG_LEN_08BIT, 0x18 }, + { 0x401a, CRL_REG_LEN_08BIT, 0x58 }, + { 0x4028, CRL_REG_LEN_08BIT, 0x4f }, + { 0x402e, CRL_REG_LEN_08BIT, 0x00 }, + { 0x402f, CRL_REG_LEN_08BIT, 0x40 }, + { 0x4030, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4031, CRL_REG_LEN_08BIT, 0x40 }, + { 0x4032, CRL_REG_LEN_08BIT, 0x9e }, + { 0x4033, CRL_REG_LEN_08BIT, 0x80 }, + { 0x4308, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4501, CRL_REG_LEN_08BIT, 0x18 }, + { 0x4502, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4507, CRL_REG_LEN_08BIT, 0x07 }, + { 0x4580, CRL_REG_LEN_08BIT, 0xf8 }, + { 0x4581, CRL_REG_LEN_08BIT, 0xc7 }, + { 0x4582, CRL_REG_LEN_08BIT, 0x07 }, + { 0x4602, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4603, CRL_REG_LEN_08BIT, 0x01 }, + { 0x460a, CRL_REG_LEN_08BIT, 0x36 }, + { 0x460c, CRL_REG_LEN_08BIT, 0x60 }, + { 0x4700, CRL_REG_LEN_08BIT, 0x2a }, + { 0x470a, CRL_REG_LEN_08BIT, 0x08 }, + { 0x470b, CRL_REG_LEN_08BIT, 0x88 }, + { 0x4800, CRL_REG_LEN_08BIT, 0x04 }, + { 0x480e, CRL_REG_LEN_08BIT, 0x04 }, + { 0x4813, CRL_REG_LEN_08BIT, 0xd2 }, + { 0x4815, CRL_REG_LEN_08BIT, 0x2b }, + { 0x4837, CRL_REG_LEN_08BIT, 0x28 }, + { 0x484a, CRL_REG_LEN_08BIT, 0x3f }, + { 0x484b, CRL_REG_LEN_08BIT, 0x67 }, + { 0x4850, CRL_REG_LEN_08BIT, 0x40 }, + { 0x4861, CRL_REG_LEN_08BIT, 0xa0 }, + { 0x4886, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4900, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4903, CRL_REG_LEN_08BIT, 0x80 }, + { 0x4f00, CRL_REG_LEN_08BIT, 0xff }, + { 0x4f01, CRL_REG_LEN_08BIT, 0xff }, + { 0x4f05, CRL_REG_LEN_08BIT, 0x01 }, + { 0x5180, CRL_REG_LEN_08BIT, 0x04 }, + { 0x5181, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5182, CRL_REG_LEN_08BIT, 0x04 }, + { 0x5183, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5184, CRL_REG_LEN_08BIT, 0x04 }, + { 0x5185, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5186, CRL_REG_LEN_08BIT, 0x04 }, + { 0x5187, CRL_REG_LEN_08BIT, 0x00 }, + { 0x51a0, CRL_REG_LEN_08BIT, 0x04 }, + { 0x51a1, CRL_REG_LEN_08BIT, 0x00 }, + { 0x51a2, CRL_REG_LEN_08BIT, 0x04 }, + { 0x51a3, CRL_REG_LEN_08BIT, 0x00 }, + { 0x51a4, CRL_REG_LEN_08BIT, 0x04 }, + { 0x51a5, CRL_REG_LEN_08BIT, 0x00 }, + { 0x51a6, CRL_REG_LEN_08BIT, 0x04 }, + { 0x51a7, CRL_REG_LEN_08BIT, 0x00 }, + { 0x51c0, CRL_REG_LEN_08BIT, 0x04 }, + { 0x51c1, CRL_REG_LEN_08BIT, 0x00 }, + { 0x51c2, CRL_REG_LEN_08BIT, 0x04 }, + { 0x51c3, CRL_REG_LEN_08BIT, 0x00 }, + { 0x51c4, CRL_REG_LEN_08BIT, 0x04 }, + { 0x51c5, CRL_REG_LEN_08BIT, 0x00 }, + { 0x51c6, CRL_REG_LEN_08BIT, 0x04 }, + { 0x51c7, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5380, CRL_REG_LEN_08BIT, 0x19 }, + { 0x5381, CRL_REG_LEN_08BIT, 0x94 }, + { 0x5382, CRL_REG_LEN_08BIT, 0x2e }, + { 0x53a0, CRL_REG_LEN_08BIT, 0x41 }, + { 0x53a2, CRL_REG_LEN_08BIT, 0x04 }, + { 0x53a3, CRL_REG_LEN_08BIT, 0x00 }, + { 0x53a4, CRL_REG_LEN_08BIT, 0x04 }, + { 0x53a5, CRL_REG_LEN_08BIT, 0x00 }, + { 0x53a7, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5400, CRL_REG_LEN_08BIT, 0x19 }, + { 0x5401, CRL_REG_LEN_08BIT, 0x94 }, + { 0x5402, CRL_REG_LEN_08BIT, 0x2e }, + { 0x5420, CRL_REG_LEN_08BIT, 0x41 }, + { 0x5422, CRL_REG_LEN_08BIT, 0x04 }, + { 0x5423, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5424, CRL_REG_LEN_08BIT, 0x04 }, + { 0x5425, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5427, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5480, CRL_REG_LEN_08BIT, 0x19 }, + { 0x5481, CRL_REG_LEN_08BIT, 0x94 }, + { 0x5482, CRL_REG_LEN_08BIT, 0x2e }, + { 0x54a0, CRL_REG_LEN_08BIT, 0x41 }, + { 0x54a2, CRL_REG_LEN_08BIT, 0x04 }, + { 0x54a3, CRL_REG_LEN_08BIT, 0x00 }, + { 0x54a4, CRL_REG_LEN_08BIT, 0x04 }, + { 0x54a5, CRL_REG_LEN_08BIT, 0x00 }, + { 0x54a7, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5800, CRL_REG_LEN_08BIT, 0x31 }, + { 0x5801, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5804, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5805, CRL_REG_LEN_08BIT, 0x40 }, + { 0x5806, CRL_REG_LEN_08BIT, 0x01 }, + { 0x5807, CRL_REG_LEN_08BIT, 0x00 }, + { 0x580e, CRL_REG_LEN_08BIT, 0x10 }, + { 0x5812, CRL_REG_LEN_08BIT, 0x34 }, + { 0x5000, CRL_REG_LEN_08BIT, 0x89 }, + { 0x5001, CRL_REG_LEN_08BIT, 0x42 }, + { 0x5002, CRL_REG_LEN_08BIT, 0x19 }, + { 0x5003, CRL_REG_LEN_08BIT, 0x16 }, + { 0x503e, CRL_REG_LEN_08BIT, 0x00 }, + { 0x503f, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5602, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5603, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5604, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5605, CRL_REG_LEN_08BIT, 0x20 }, + { 0x5606, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5607, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5608, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5609, CRL_REG_LEN_08BIT, 0x20 }, + { 0x560a, CRL_REG_LEN_08BIT, 0x02 }, + { 0x560b, CRL_REG_LEN_08BIT, 0x58 }, + { 0x560c, CRL_REG_LEN_08BIT, 0x03 }, + { 0x560d, CRL_REG_LEN_08BIT, 0x20 }, + { 0x560e, CRL_REG_LEN_08BIT, 0x02 }, + { 0x560f, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5610, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5611, CRL_REG_LEN_08BIT, 0x20 }, + { 0x5612, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5613, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5614, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5615, CRL_REG_LEN_08BIT, 0x20 }, + { 0x5616, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5617, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5618, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5619, CRL_REG_LEN_08BIT, 0x20 }, + { 0x5642, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5643, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5644, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5645, CRL_REG_LEN_08BIT, 0x20 }, + { 0x5646, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5647, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5648, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5649, CRL_REG_LEN_08BIT, 0x20 }, + { 0x564a, CRL_REG_LEN_08BIT, 0x02 }, + { 0x564b, CRL_REG_LEN_08BIT, 0x58 }, + { 0x564c, CRL_REG_LEN_08BIT, 0x03 }, + { 0x564d, CRL_REG_LEN_08BIT, 0x20 }, + { 0x564e, CRL_REG_LEN_08BIT, 0x02 }, + { 0x564f, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5650, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5651, CRL_REG_LEN_08BIT, 0x20 }, + { 0x5652, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5653, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5654, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5655, CRL_REG_LEN_08BIT, 0x20 }, + { 0x5656, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5657, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5658, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5659, CRL_REG_LEN_08BIT, 0x20 }, + { 0x5682, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5683, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5684, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5685, CRL_REG_LEN_08BIT, 0x20 }, + { 0x5686, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5687, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5688, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5689, CRL_REG_LEN_08BIT, 0x20 }, + { 0x568a, CRL_REG_LEN_08BIT, 0x02 }, + { 0x568b, CRL_REG_LEN_08BIT, 0x58 }, + { 0x568c, CRL_REG_LEN_08BIT, 0x03 }, + { 0x568d, CRL_REG_LEN_08BIT, 0x20 }, + { 0x568e, CRL_REG_LEN_08BIT, 0x02 }, + { 0x568f, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5690, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5691, CRL_REG_LEN_08BIT, 0x20 }, + { 0x5692, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5693, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5694, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5695, CRL_REG_LEN_08BIT, 0x20 }, + { 0x5696, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5697, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5698, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5699, CRL_REG_LEN_08BIT, 0x20 }, + { 0x5709, CRL_REG_LEN_08BIT, 0x0e }, + { 0x5749, CRL_REG_LEN_08BIT, 0x0e }, + { 0x5789, CRL_REG_LEN_08BIT, 0x0e }, + { 0x5200, CRL_REG_LEN_08BIT, 0x70 }, + { 0x5201, CRL_REG_LEN_08BIT, 0x70 }, + { 0x5202, CRL_REG_LEN_08BIT, 0x73 }, + { 0x5203, CRL_REG_LEN_08BIT, 0xff }, + { 0x5205, CRL_REG_LEN_08BIT, 0x6c }, + { 0x5285, CRL_REG_LEN_08BIT, 0x6c }, + { 0x5305, CRL_REG_LEN_08BIT, 0x6c }, + { 0x5082, CRL_REG_LEN_08BIT, 0xb0 }, + { 0x50c2, CRL_REG_LEN_08BIT, 0xb0 }, + { 0x5102, CRL_REG_LEN_08BIT, 0xb0 }, + { 0x380e, CRL_REG_LEN_08BIT, 0x05 }, + { 0x380f, CRL_REG_LEN_08BIT, 0x34 }, + { 0x380c, CRL_REG_LEN_08BIT, 0x08 }, + { 0x380d, CRL_REG_LEN_08BIT, 0x78 }, + { 0x384c, CRL_REG_LEN_08BIT, 0x02 }, + { 0x384d, CRL_REG_LEN_08BIT, 0x14 }, + { 0x460a, CRL_REG_LEN_08BIT, 0x0e }, + { 0x0100, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_register_write_rep ox03a10_1920_1088_12DCG_12VS[] = { + { 0x4d09, CRL_REG_LEN_08BIT, 0x5f }, + { 0x0104, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x36 }, + { 0x0307, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0316, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0317, CRL_REG_LEN_08BIT, 0x12 }, + { 0x0323, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0325, CRL_REG_LEN_08BIT, 0x6c }, + { 0x0326, CRL_REG_LEN_08BIT, 0x00 }, + { 0x032b, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0400, CRL_REG_LEN_08BIT, 0xe7 }, + { 0x0401, CRL_REG_LEN_08BIT, 0xff }, + { 0x0404, CRL_REG_LEN_08BIT, 0x2b }, + { 0x0405, CRL_REG_LEN_08BIT, 0x32 }, + { 0x0406, CRL_REG_LEN_08BIT, 0x33 }, + { 0x0407, CRL_REG_LEN_08BIT, 0x8f }, + { 0x0408, CRL_REG_LEN_08BIT, 0x0c }, + { 0x0410, CRL_REG_LEN_08BIT, 0xe7 }, + { 0x0411, CRL_REG_LEN_08BIT, 0xff }, + { 0x0414, CRL_REG_LEN_08BIT, 0x2b }, + { 0x0415, CRL_REG_LEN_08BIT, 0x32 }, + { 0x0416, CRL_REG_LEN_08BIT, 0x33 }, + { 0x0417, CRL_REG_LEN_08BIT, 0x8f }, + { 0x0418, CRL_REG_LEN_08BIT, 0x0c }, + { 0x3002, CRL_REG_LEN_08BIT, 0x03 }, + { 0x3012, CRL_REG_LEN_08BIT, 0x41 }, + { 0x3016, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x3017, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x3018, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x3019, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x301a, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x301b, CRL_REG_LEN_08BIT, 0xb4 }, + { 0x301e, CRL_REG_LEN_08BIT, 0xb8 }, + { 0x301f, CRL_REG_LEN_08BIT, 0xe1 }, + { 0x3022, CRL_REG_LEN_08BIT, 0xf8 }, + { 0x3023, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x3024, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x3028, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x3029, CRL_REG_LEN_08BIT, 0x80 }, + { 0x3706, CRL_REG_LEN_08BIT, 0x39 }, + { 0x370a, CRL_REG_LEN_08BIT, 0x00 }, + { 0x370b, CRL_REG_LEN_08BIT, 0xa3 }, + { 0x3712, CRL_REG_LEN_08BIT, 0x12 }, + { 0x3713, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3716, CRL_REG_LEN_08BIT, 0x04 }, + { 0x371d, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3721, CRL_REG_LEN_08BIT, 0x1c }, + { 0x372c, CRL_REG_LEN_08BIT, 0x17 }, + { 0x3733, CRL_REG_LEN_08BIT, 0x41 }, + { 0x3741, CRL_REG_LEN_08BIT, 0x44 }, + { 0x3742, CRL_REG_LEN_08BIT, 0x34 }, + { 0x3746, CRL_REG_LEN_08BIT, 0x03 }, + { 0x374b, CRL_REG_LEN_08BIT, 0x03 }, + { 0x3755, CRL_REG_LEN_08BIT, 0x01 }, + { 0x376c, CRL_REG_LEN_08BIT, 0x15 }, + { 0x376d, CRL_REG_LEN_08BIT, 0x08 }, + { 0x376f, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3770, CRL_REG_LEN_08BIT, 0x91 }, + { 0x3771, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3774, CRL_REG_LEN_08BIT, 0x82 }, + { 0x3777, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3779, CRL_REG_LEN_08BIT, 0x22 }, + { 0x377a, CRL_REG_LEN_08BIT, 0x00 }, + { 0x377b, CRL_REG_LEN_08BIT, 0x00 }, + { 0x377c, CRL_REG_LEN_08BIT, 0x48 }, + { 0x3785, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3790, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3793, CRL_REG_LEN_08BIT, 0x00 }, + { 0x379c, CRL_REG_LEN_08BIT, 0x01 }, + { 0x37a1, CRL_REG_LEN_08BIT, 0x80 }, + { 0x37b3, CRL_REG_LEN_08BIT, 0x0a }, + { 0x37bb, CRL_REG_LEN_08BIT, 0x08 }, + { 0x37be, CRL_REG_LEN_08BIT, 0xe0 }, + { 0x37bf, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37c6, CRL_REG_LEN_08BIT, 0x48 }, + { 0x37c7, CRL_REG_LEN_08BIT, 0x38 }, + { 0x37c9, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37ca, CRL_REG_LEN_08BIT, 0x08 }, + { 0x37cb, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37cc, CRL_REG_LEN_08BIT, 0x40 }, + { 0x37d1, CRL_REG_LEN_08BIT, 0x39 }, + { 0x37d2, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37d3, CRL_REG_LEN_08BIT, 0xa3 }, + { 0x37d5, CRL_REG_LEN_08BIT, 0x39 }, + { 0x37d6, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37d7, CRL_REG_LEN_08BIT, 0xa3 }, + { 0x3c06, CRL_REG_LEN_08BIT, 0x29 }, + { 0x3c0b, CRL_REG_LEN_08BIT, 0xa8 }, + { 0x3c12, CRL_REG_LEN_08BIT, 0x89 }, + { 0x3c14, CRL_REG_LEN_08BIT, 0x81 }, + { 0x3c18, CRL_REG_LEN_08BIT, 0x0c }, + { 0x3c3b, CRL_REG_LEN_08BIT, 0x38 }, + { 0x3c53, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3c55, CRL_REG_LEN_08BIT, 0xeb }, + { 0x3101, CRL_REG_LEN_08BIT, 0x32 }, + { 0x3192, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3193, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3206, CRL_REG_LEN_08BIT, 0xc8 }, + { 0x3216, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3304, CRL_REG_LEN_08BIT, 0x04 }, + { 0x3400, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3409, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3600, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3601, CRL_REG_LEN_08BIT, 0x70 }, + { 0x3602, CRL_REG_LEN_08BIT, 0x42 }, + { 0x3603, CRL_REG_LEN_08BIT, 0xe3 }, + { 0x3604, CRL_REG_LEN_08BIT, 0x93 }, + { 0x3605, CRL_REG_LEN_08BIT, 0xff }, + { 0x3606, CRL_REG_LEN_08BIT, 0x80 }, + { 0x3607, CRL_REG_LEN_08BIT, 0x4a }, + { 0x3608, CRL_REG_LEN_08BIT, 0x98 }, + { 0x3609, CRL_REG_LEN_08BIT, 0x70 }, + { 0x360a, CRL_REG_LEN_08BIT, 0xd0 }, + { 0x360b, CRL_REG_LEN_08BIT, 0x0b }, + { 0x360e, CRL_REG_LEN_08BIT, 0x88 }, + { 0x3610, CRL_REG_LEN_08BIT, 0x89 }, + { 0x3611, CRL_REG_LEN_08BIT, 0x4b }, + { 0x3612, CRL_REG_LEN_08BIT, 0x4e }, + { 0x3619, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3620, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3621, CRL_REG_LEN_08BIT, 0x80 }, + { 0x3626, CRL_REG_LEN_08BIT, 0x0e }, + { 0x362c, CRL_REG_LEN_08BIT, 0x0e }, + { 0x362d, CRL_REG_LEN_08BIT, 0x12 }, + { 0x362e, CRL_REG_LEN_08BIT, 0x0a }, + { 0x362f, CRL_REG_LEN_08BIT, 0x17 }, + { 0x3630, CRL_REG_LEN_08BIT, 0x2e }, + { 0x3631, CRL_REG_LEN_08BIT, 0x3f }, + { 0x3632, CRL_REG_LEN_08BIT, 0x99 }, + { 0x3633, CRL_REG_LEN_08BIT, 0x99 }, + { 0x3643, CRL_REG_LEN_08BIT, 0x0c }, + { 0x3644, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3645, CRL_REG_LEN_08BIT, 0x0e }, + { 0x3646, CRL_REG_LEN_08BIT, 0x0f }, + { 0x3647, CRL_REG_LEN_08BIT, 0x0e }, + { 0x3648, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3649, CRL_REG_LEN_08BIT, 0x11 }, + { 0x364a, CRL_REG_LEN_08BIT, 0x12 }, + { 0x364c, CRL_REG_LEN_08BIT, 0x0e }, + { 0x364d, CRL_REG_LEN_08BIT, 0x0e }, + { 0x364e, CRL_REG_LEN_08BIT, 0x12 }, + { 0x364f, CRL_REG_LEN_08BIT, 0x0e }, + { 0x3652, CRL_REG_LEN_08BIT, 0xc5 }, + { 0x3654, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3656, CRL_REG_LEN_08BIT, 0xcf }, + { 0x3657, CRL_REG_LEN_08BIT, 0x88 }, + { 0x3658, CRL_REG_LEN_08BIT, 0x08 }, + { 0x365a, CRL_REG_LEN_08BIT, 0x3f }, + { 0x365b, CRL_REG_LEN_08BIT, 0x2e }, + { 0x365c, CRL_REG_LEN_08BIT, 0x17 }, + { 0x365d, CRL_REG_LEN_08BIT, 0x0a }, + { 0x3660, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3661, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3662, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3663, CRL_REG_LEN_08BIT, 0x20 }, + { 0x3665, CRL_REG_LEN_08BIT, 0x12 }, + { 0x3666, CRL_REG_LEN_08BIT, 0x13 }, + { 0x3667, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3668, CRL_REG_LEN_08BIT, 0x95 }, + { 0x3669, CRL_REG_LEN_08BIT, 0x16 }, + { 0x366f, CRL_REG_LEN_08BIT, 0xc4 }, + { 0x3671, CRL_REG_LEN_08BIT, 0x37 }, + { 0x3673, CRL_REG_LEN_08BIT, 0x6a }, + { 0x3678, CRL_REG_LEN_08BIT, 0x88 }, + { 0x3800, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3801, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3802, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3803, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3804, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3805, CRL_REG_LEN_08BIT, 0x8f }, + { 0x3806, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3807, CRL_REG_LEN_08BIT, 0x0c }, + { 0x3808, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3809, CRL_REG_LEN_08BIT, 0x80 }, + { 0x380a, CRL_REG_LEN_08BIT, 0x04 }, + { 0x380b, CRL_REG_LEN_08BIT, 0x40 }, + { 0x380e, CRL_REG_LEN_08BIT, 0x05 }, + { 0x380f, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3810, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3813, CRL_REG_LEN_08BIT, 0x04 }, + { 0x381c, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3820, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3822, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3832, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3833, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3834, CRL_REG_LEN_08BIT, 0x00 }, + { 0x383d, CRL_REG_LEN_08BIT, 0x20 }, + { 0x384c, CRL_REG_LEN_08BIT, 0x02 }, + { 0x384d, CRL_REG_LEN_08BIT, 0x14 }, + { 0x384e, CRL_REG_LEN_08BIT, 0x00 }, + { 0x384f, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3850, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3851, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3852, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3853, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3854, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3855, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3856, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3857, CRL_REG_LEN_08BIT, 0x33 }, + { 0x3858, CRL_REG_LEN_08BIT, 0x7c }, + { 0x3859, CRL_REG_LEN_08BIT, 0x00 }, + { 0x385a, CRL_REG_LEN_08BIT, 0x03 }, + { 0x385b, CRL_REG_LEN_08BIT, 0x05 }, + { 0x385c, CRL_REG_LEN_08BIT, 0x32 }, + { 0x385f, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3860, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3861, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3862, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3863, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3864, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3865, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3866, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3b40, CRL_REG_LEN_08BIT, 0x3e }, + { 0x3b41, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3b42, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3b43, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3b44, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3b45, CRL_REG_LEN_08BIT, 0x20 }, + { 0x3b46, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3b47, CRL_REG_LEN_08BIT, 0x20 }, + { 0x3b84, CRL_REG_LEN_08BIT, 0x36 }, + { 0x3b85, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3b86, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3b87, CRL_REG_LEN_08BIT, 0x04 }, + { 0x3b88, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3b89, CRL_REG_LEN_08BIT, 0x04 }, + { 0x3b8a, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3b8b, CRL_REG_LEN_08BIT, 0x0a }, + { 0x3b8e, CRL_REG_LEN_08BIT, 0x03 }, + { 0x3b8f, CRL_REG_LEN_08BIT, 0xe8 }, + { 0x3d85, CRL_REG_LEN_08BIT, 0x0b }, + { 0x3d8c, CRL_REG_LEN_08BIT, 0x70 }, + { 0x3d8d, CRL_REG_LEN_08BIT, 0x26 }, + { 0x3d97, CRL_REG_LEN_08BIT, 0x70 }, + { 0x3d98, CRL_REG_LEN_08BIT, 0x24 }, + { 0x3d99, CRL_REG_LEN_08BIT, 0x70 }, + { 0x3d9a, CRL_REG_LEN_08BIT, 0x6d }, + { 0x3d9b, CRL_REG_LEN_08BIT, 0x70 }, + { 0x3d9c, CRL_REG_LEN_08BIT, 0x6e }, + { 0x3d9d, CRL_REG_LEN_08BIT, 0x73 }, + { 0x3d9e, CRL_REG_LEN_08BIT, 0xff }, + { 0x3e07, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3f00, CRL_REG_LEN_08BIT, 0x04 }, + { 0x4000, CRL_REG_LEN_08BIT, 0xf8 }, + { 0x4001, CRL_REG_LEN_08BIT, 0xeb }, + { 0x4004, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4005, CRL_REG_LEN_08BIT, 0x40 }, + { 0x4008, CRL_REG_LEN_08BIT, 0x02 }, + { 0x4009, CRL_REG_LEN_08BIT, 0x0d }, + { 0x400a, CRL_REG_LEN_08BIT, 0x08 }, + { 0x400b, CRL_REG_LEN_08BIT, 0x00 }, + { 0x400f, CRL_REG_LEN_08BIT, 0x80 }, + { 0x4010, CRL_REG_LEN_08BIT, 0x10 }, + { 0x4011, CRL_REG_LEN_08BIT, 0xff }, + { 0x4016, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4017, CRL_REG_LEN_08BIT, 0x10 }, + { 0x4018, CRL_REG_LEN_08BIT, 0x18 }, + { 0x401a, CRL_REG_LEN_08BIT, 0x58 }, + { 0x4028, CRL_REG_LEN_08BIT, 0x4f }, + { 0x402e, CRL_REG_LEN_08BIT, 0x00 }, + { 0x402f, CRL_REG_LEN_08BIT, 0x40 }, + { 0x4030, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4031, CRL_REG_LEN_08BIT, 0x40 }, + { 0x4032, CRL_REG_LEN_08BIT, 0x9e }, + { 0x4033, CRL_REG_LEN_08BIT, 0x80 }, + { 0x4308, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4501, CRL_REG_LEN_08BIT, 0x18 }, + { 0x4502, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4507, CRL_REG_LEN_08BIT, 0x07 }, + { 0x4580, CRL_REG_LEN_08BIT, 0xf8 }, + { 0x4581, CRL_REG_LEN_08BIT, 0xc7 }, + { 0x4582, CRL_REG_LEN_08BIT, 0x07 }, + { 0x4602, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4603, CRL_REG_LEN_08BIT, 0x01 }, + { 0x460a, CRL_REG_LEN_08BIT, 0x36 }, + { 0x460c, CRL_REG_LEN_08BIT, 0x60 }, + { 0x4700, CRL_REG_LEN_08BIT, 0x2a }, + { 0x470a, CRL_REG_LEN_08BIT, 0x08 }, + { 0x470b, CRL_REG_LEN_08BIT, 0x88 }, + { 0x4800, CRL_REG_LEN_08BIT, 0x04 }, + { 0x480e, CRL_REG_LEN_08BIT, 0x04 }, + { 0x4813, CRL_REG_LEN_08BIT, 0xd2 }, + { 0x4815, CRL_REG_LEN_08BIT, 0x2b }, + { 0x4837, CRL_REG_LEN_08BIT, 0x18 }, + { 0x484a, CRL_REG_LEN_08BIT, 0x3f }, + { 0x484b, CRL_REG_LEN_08BIT, 0x67 }, + { 0x4850, CRL_REG_LEN_08BIT, 0x40 }, + { 0x4861, CRL_REG_LEN_08BIT, 0xa0 }, + { 0x4886, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4900, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4903, CRL_REG_LEN_08BIT, 0x80 }, + { 0x4f00, CRL_REG_LEN_08BIT, 0xff }, + { 0x4f01, CRL_REG_LEN_08BIT, 0xff }, + { 0x4f05, CRL_REG_LEN_08BIT, 0x01 }, + { 0x5180, CRL_REG_LEN_08BIT, 0x04 }, + { 0x5181, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5182, CRL_REG_LEN_08BIT, 0x04 }, + { 0x5183, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5184, CRL_REG_LEN_08BIT, 0x04 }, + { 0x5185, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5186, CRL_REG_LEN_08BIT, 0x04 }, + { 0x5187, CRL_REG_LEN_08BIT, 0x00 }, + { 0x51a0, CRL_REG_LEN_08BIT, 0x04 }, + { 0x51a1, CRL_REG_LEN_08BIT, 0x00 }, + { 0x51a2, CRL_REG_LEN_08BIT, 0x04 }, + { 0x51a3, CRL_REG_LEN_08BIT, 0x00 }, + { 0x51a4, CRL_REG_LEN_08BIT, 0x04 }, + { 0x51a5, CRL_REG_LEN_08BIT, 0x00 }, + { 0x51a6, CRL_REG_LEN_08BIT, 0x04 }, + { 0x51a7, CRL_REG_LEN_08BIT, 0x00 }, + { 0x51c0, CRL_REG_LEN_08BIT, 0x04 }, + { 0x51c1, CRL_REG_LEN_08BIT, 0x00 }, + { 0x51c2, CRL_REG_LEN_08BIT, 0x04 }, + { 0x51c3, CRL_REG_LEN_08BIT, 0x00 }, + { 0x51c4, CRL_REG_LEN_08BIT, 0x04 }, + { 0x51c5, CRL_REG_LEN_08BIT, 0x00 }, + { 0x51c6, CRL_REG_LEN_08BIT, 0x04 }, + { 0x51c7, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5380, CRL_REG_LEN_08BIT, 0x19 }, + { 0x5381, CRL_REG_LEN_08BIT, 0x94 }, + { 0x5382, CRL_REG_LEN_08BIT, 0x2e }, + { 0x53a0, CRL_REG_LEN_08BIT, 0x41 }, + { 0x53a2, CRL_REG_LEN_08BIT, 0x04 }, + { 0x53a3, CRL_REG_LEN_08BIT, 0x00 }, + { 0x53a4, CRL_REG_LEN_08BIT, 0x04 }, + { 0x53a5, CRL_REG_LEN_08BIT, 0x00 }, + { 0x53a7, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5400, CRL_REG_LEN_08BIT, 0x19 }, + { 0x5401, CRL_REG_LEN_08BIT, 0x94 }, + { 0x5402, CRL_REG_LEN_08BIT, 0x2e }, + { 0x5420, CRL_REG_LEN_08BIT, 0x41 }, + { 0x5422, CRL_REG_LEN_08BIT, 0x04 }, + { 0x5423, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5424, CRL_REG_LEN_08BIT, 0x04 }, + { 0x5425, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5427, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5480, CRL_REG_LEN_08BIT, 0x19 }, + { 0x5481, CRL_REG_LEN_08BIT, 0x94 }, + { 0x5482, CRL_REG_LEN_08BIT, 0x2e }, + { 0x54a0, CRL_REG_LEN_08BIT, 0x41 }, + { 0x54a2, CRL_REG_LEN_08BIT, 0x04 }, + { 0x54a3, CRL_REG_LEN_08BIT, 0x00 }, + { 0x54a4, CRL_REG_LEN_08BIT, 0x04 }, + { 0x54a5, CRL_REG_LEN_08BIT, 0x00 }, + { 0x54a7, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5800, CRL_REG_LEN_08BIT, 0x31 }, + { 0x5801, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5804, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5805, CRL_REG_LEN_08BIT, 0x40 }, + { 0x5806, CRL_REG_LEN_08BIT, 0x01 }, + { 0x5807, CRL_REG_LEN_08BIT, 0x00 }, + { 0x580e, CRL_REG_LEN_08BIT, 0x10 }, + { 0x5812, CRL_REG_LEN_08BIT, 0x34 }, + { 0x5000, CRL_REG_LEN_08BIT, 0x89 }, + { 0x5001, CRL_REG_LEN_08BIT, 0x42 }, + { 0x5002, CRL_REG_LEN_08BIT, 0x19 }, + { 0x5003, CRL_REG_LEN_08BIT, 0x16 }, + { 0x503e, CRL_REG_LEN_08BIT, 0x00 }, + { 0x503f, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5602, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5603, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5604, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5605, CRL_REG_LEN_08BIT, 0x20 }, + { 0x5606, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5607, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5608, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5609, CRL_REG_LEN_08BIT, 0x20 }, + { 0x560a, CRL_REG_LEN_08BIT, 0x02 }, + { 0x560b, CRL_REG_LEN_08BIT, 0x58 }, + { 0x560c, CRL_REG_LEN_08BIT, 0x03 }, + { 0x560d, CRL_REG_LEN_08BIT, 0x20 }, + { 0x560e, CRL_REG_LEN_08BIT, 0x02 }, + { 0x560f, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5610, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5611, CRL_REG_LEN_08BIT, 0x20 }, + { 0x5612, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5613, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5614, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5615, CRL_REG_LEN_08BIT, 0x20 }, + { 0x5616, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5617, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5618, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5619, CRL_REG_LEN_08BIT, 0x20 }, + { 0x5642, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5643, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5644, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5645, CRL_REG_LEN_08BIT, 0x20 }, + { 0x5646, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5647, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5648, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5649, CRL_REG_LEN_08BIT, 0x20 }, + { 0x564a, CRL_REG_LEN_08BIT, 0x02 }, + { 0x564b, CRL_REG_LEN_08BIT, 0x58 }, + { 0x564c, CRL_REG_LEN_08BIT, 0x03 }, + { 0x564d, CRL_REG_LEN_08BIT, 0x20 }, + { 0x564e, CRL_REG_LEN_08BIT, 0x02 }, + { 0x564f, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5650, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5651, CRL_REG_LEN_08BIT, 0x20 }, + { 0x5652, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5653, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5654, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5655, CRL_REG_LEN_08BIT, 0x20 }, + { 0x5656, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5657, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5658, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5659, CRL_REG_LEN_08BIT, 0x20 }, + { 0x5682, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5683, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5684, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5685, CRL_REG_LEN_08BIT, 0x20 }, + { 0x5686, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5687, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5688, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5689, CRL_REG_LEN_08BIT, 0x20 }, + { 0x568a, CRL_REG_LEN_08BIT, 0x02 }, + { 0x568b, CRL_REG_LEN_08BIT, 0x58 }, + { 0x568c, CRL_REG_LEN_08BIT, 0x03 }, + { 0x568d, CRL_REG_LEN_08BIT, 0x20 }, + { 0x568e, CRL_REG_LEN_08BIT, 0x02 }, + { 0x568f, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5690, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5691, CRL_REG_LEN_08BIT, 0x20 }, + { 0x5692, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5693, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5694, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5695, CRL_REG_LEN_08BIT, 0x20 }, + { 0x5696, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5697, CRL_REG_LEN_08BIT, 0x58 }, + { 0x5698, CRL_REG_LEN_08BIT, 0x03 }, + { 0x5699, CRL_REG_LEN_08BIT, 0x20 }, + { 0x5709, CRL_REG_LEN_08BIT, 0x0e }, + { 0x5749, CRL_REG_LEN_08BIT, 0x0e }, + { 0x5789, CRL_REG_LEN_08BIT, 0x0e }, + { 0x5200, CRL_REG_LEN_08BIT, 0x70 }, + { 0x5201, CRL_REG_LEN_08BIT, 0x70 }, + { 0x5202, CRL_REG_LEN_08BIT, 0x73 }, + { 0x5203, CRL_REG_LEN_08BIT, 0xff }, + { 0x5205, CRL_REG_LEN_08BIT, 0x6c }, + { 0x5285, CRL_REG_LEN_08BIT, 0x6c }, + { 0x5305, CRL_REG_LEN_08BIT, 0x6c }, + { 0x5082, CRL_REG_LEN_08BIT, 0xb0 }, + { 0x50c2, CRL_REG_LEN_08BIT, 0xb0 }, + { 0x5102, CRL_REG_LEN_08BIT, 0xb0 }, + { 0x380e, CRL_REG_LEN_08BIT, 0x05 }, + { 0x380f, CRL_REG_LEN_08BIT, 0x34 }, + { 0x380c, CRL_REG_LEN_08BIT, 0x06 }, + { 0x380d, CRL_REG_LEN_08BIT, 0xcc }, + { 0x384c, CRL_REG_LEN_08BIT, 0x03 }, + { 0x384d, CRL_REG_LEN_08BIT, 0xc0 }, + { 0x460c, CRL_REG_LEN_08BIT, 0x40 }, + { 0x0100, CRL_REG_LEN_08BIT, 0x01 }, +}; + +struct crl_ctrl_data_pair ox03a10_ctrl_data_modes[] = { + { + .ctrl_id = CRL_CID_EXPOSURE_MODE, + .data = 0, + }, + { + .ctrl_id = CRL_CID_EXPOSURE_MODE, + .data = 1, + } +}; + +struct crl_mode_rep ox03a10_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(ox03a10_1920_1088_rects), + .sd_rects = ox03a10_1920_1088_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1088, + .comp_items = 1, + .ctrl_data = &ox03a10_ctrl_data_modes[0], + .mode_regs_items = ARRAY_SIZE(ox03a10_1920_1088_12DCG), + .mode_regs = ox03a10_1920_1088_12DCG, + }, + { + .sd_rects_items = ARRAY_SIZE(ox03a10_1920_1088_rects), + .sd_rects = ox03a10_1920_1088_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1088, + .comp_items = 1, + .ctrl_data = &ox03a10_ctrl_data_modes[1], + .mode_regs_items = ARRAY_SIZE(ox03a10_1920_1088_12DCG_12VS), + .mode_regs = ox03a10_1920_1088_12DCG_12VS, + }, +}; + +static struct crl_arithmetic_ops bits_5_0[] = { + { + .op = CRL_BITWISE_AND, + .operand.entity_val = 0x3F, + }, + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 2, + } +}; + +static struct crl_arithmetic_ops bits_10_6[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 6, + }, + { + .op = CRL_BITWISE_AND, + .operand.entity_val = 0x1F, + } +}; + +static struct crl_arithmetic_ops bits_13_10[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 10, + }, + { + .op = CRL_BITWISE_AND, + .operand.entity_val = 0xF, + } +}; + +static struct crl_arithmetic_ops bits_9_2[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 2, + }, + { + .op = CRL_BITWISE_AND, + .operand.entity_val = 0xFF, + } +}; + +static struct crl_arithmetic_ops bits_1_0[] = { + { + .op = CRL_BITWISE_AND, + .operand.entity_val = 0x3, + }, + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 6, + } +}; + +static struct crl_arithmetic_ops bits_15_8[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 8, + }, + { + .op = CRL_BITWISE_AND, + .operand.entity_val = 0xff, + } +}; + +static struct crl_arithmetic_ops bits_7_0[] = { + { + .op = CRL_BITWISE_AND, + .operand.entity_val = 0xff, + } +}; + +static struct crl_dynamic_register_access ox03a10_hcg_real_gain[] = { + { + .address = 0x3508, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(bits_10_6), + .ops = bits_10_6, + }, + { + .address = 0x3509, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(bits_5_0), + .ops = bits_5_0, + } +}; + +static struct crl_dynamic_register_access ox03a10_hcg_digital_gain[] = { + { + .address = 0x350a, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(bits_13_10), + .ops = bits_13_10, + }, + { + .address = 0x350b, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(bits_9_2), + .ops = bits_9_2, + }, + { + .address = 0x350c, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(bits_1_0), + .ops = bits_1_0, + } +}; + +static struct crl_dynamic_register_access ox03a10_lcg_real_gain[] = { + { + .address = 0x3548, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(bits_10_6), + .ops = bits_10_6, + }, + { + .address = 0x3549, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(bits_5_0), + .ops = bits_5_0, + } +}; + +static struct crl_dynamic_register_access ox03a10_lcg_digital_gain[] = { + { + .address = 0x354a, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(bits_13_10), + .ops = bits_13_10, + }, + { + .address = 0x354b, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(bits_9_2), + .ops = bits_9_2, + }, + { + .address = 0x354c, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(bits_1_0), + .ops = bits_1_0, + } +}; + +static struct crl_dynamic_register_access ox03a10_dcg_exposure_coarse[] = { + { + .address = 0x3501, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(bits_15_8), + .ops = bits_15_8, + }, + { + .address = 0x3502, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(bits_7_0), + .ops = bits_7_0, + } +}; + +static struct crl_dynamic_register_access ox03a10_vs_real_gain[] = { + { + .address = 0x3588, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(bits_10_6), + .ops = bits_10_6, + }, + { + .address = 0x3589, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(bits_5_0), + .ops = bits_5_0, + } +}; + +static struct crl_dynamic_register_access ox03a10_vs_digital_gain[] = { + { + .address = 0x358a, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(bits_13_10), + .ops = bits_13_10, + }, + { + .address = 0x358b, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(bits_9_2), + .ops = bits_9_2, + }, + { + .address = 0x358c, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(bits_1_0), + .ops = bits_1_0, + } +}; + +static struct crl_dynamic_register_access ox03a10_vs_exposure_coarse[] = { + { + .address = 0x3581, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(bits_15_8), + .ops = bits_15_8, + }, + { + .address = 0x3582, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(bits_7_0), + .ops = bits_7_0, + } +}; + +static struct crl_arithmetic_ops ox03a10_mirror_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 2, + }, + { + .op = CRL_BITWISE_OR, + .operand.entity_val = 0x20, + }, +}; + +static struct crl_dynamic_register_access ox03a10_h_flip_regs[] = { + { + .address = 0x3821, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ox03a10_mirror_ops), + .ops = ox03a10_mirror_ops, + .mask = 0x24, + }, + { + .address = 0x3811, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = 0, + .ops = 0, + .mask = 0x1, + }, +}; + +/* keep GRBG no change during flip, for tuning file handle GRBG only */ +static struct crl_flip_data ox03a10_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, +}; + +struct crl_v4l2_ctrl ox03a10_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .name = "DCG exposure", + .ctrl_id = V4L2_CID_EXPOSURE, + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1, + .data.std_data.max = 1280, + .data.std_data.step = 1, + .data.std_data.def = 0x40, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(ox03a10_dcg_exposure_coarse), + .regs = ox03a10_dcg_exposure_coarse, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .name = "VS exposure", + .ctrl_id = CRL_CID_EXPOSURE_SHS1, + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1, + .data.std_data.max = 1280, + .data.std_data.step = 1, + .data.std_data.def = 0x40, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(ox03a10_vs_exposure_coarse), + .regs = ox03a10_vs_exposure_coarse, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .name = "HCG digital gain", + .ctrl_id = V4L2_CID_GAIN, + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0x400, + .data.std_data.max = 0x3FFF, + .data.std_data.step = 1, + .data.std_data.def = 0x400, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(ox03a10_hcg_digital_gain), + .regs = ox03a10_hcg_digital_gain, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .name = "HCG analog gain", + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0x40, + .data.std_data.max = 0x400, + .data.std_data.step = 1, + .data.std_data.def = 0x40, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(ox03a10_hcg_real_gain), + .regs = ox03a10_hcg_real_gain, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .name = "HCG digital gain", + .ctrl_id = V4L2_CID_DIGITAL_GAIN, + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0x400, + .data.std_data.max = 0x3FFF, + .data.std_data.step = 1, + .data.std_data.def = 0x400, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(ox03a10_hcg_digital_gain), + .regs = ox03a10_hcg_digital_gain, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .name = "LCG analog gain", + .ctrl_id = CRL_CID_ANALOG_GAIN_S, + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0x40, + .data.std_data.max = 0x400, + .data.std_data.step = 1, + .data.std_data.def = 0x40, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(ox03a10_lcg_real_gain), + .regs = ox03a10_lcg_real_gain, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .name = "LCG digital gain", + .ctrl_id = CRL_CID_DIGITAL_GAIN_S, + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0x400, + .data.std_data.max = 0x3FFF, + .data.std_data.step = 1, + .data.std_data.def = 0x400, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(ox03a10_lcg_digital_gain), + .regs = ox03a10_lcg_digital_gain, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .name = "LCG analog gain", + .ctrl_id = CRL_CID_ANALOG_GAIN_L, + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0x40, + .data.std_data.max = 0x400, + .data.std_data.step = 1, + .data.std_data.def = 0x40, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(ox03a10_lcg_real_gain), + .regs = ox03a10_lcg_real_gain, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .name = "LCG digital gain", + .ctrl_id = CRL_CID_DIGITAL_GAIN_L, + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0x400, + .data.std_data.max = 0x3FFF, + .data.std_data.step = 1, + .data.std_data.def = 0x400, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(ox03a10_lcg_digital_gain), + .regs = ox03a10_lcg_digital_gain, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .name = "VS analog gain", + .ctrl_id = CRL_CID_ANALOG_GAIN_VS, + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0x40, + .data.std_data.max = 0x400, + .data.std_data.step = 1, + .data.std_data.def = 0x40, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(ox03a10_vs_real_gain), + .regs = ox03a10_vs_real_gain, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .name = "VS digital gain", + .ctrl_id = CRL_CID_DIGITAL_GAIN_VS, + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0x400, + .data.std_data.max = 0x3FFF, + .data.std_data.step = 1, + .data.std_data.def = 0x400, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(ox03a10_vs_digital_gain), + .regs = ox03a10_vs_digital_gain, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .name = "CRL_CID_EXPOSURE_MODE", + .ctrl_id = CRL_CID_EXPOSURE_MODE, + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_MODE_SELECTION, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1920, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 2700, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ar0231at_llp_regs), + .regs = ar0231at_llp_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame Length Lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1088, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 1480, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ar0231at_fll_regs), + .regs = ar0231at_fll_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 1, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ox03a10_h_flip_regs), + .regs = ox03a10_h_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +struct crl_csi_data_fmt ox03a10_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG12_1X12, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, +}; + +struct crl_pll_configuration ox03a10_pll_configurations[] = { + { + .input_clk = 27000000, + .op_sys_clk = 108000000, + .bitsperpixel = 12, + .pixel_rate_csi = 108000000, + .pixel_rate_pa = 108000000, /* pixel_rate = op_sys_clk*2 *csi_lanes/bitsperpixel */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = 0, + }, +}; + +static struct crl_register_write_rep ox03a10_streamoff_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x00 } +}; + +static struct crl_arithmetic_ops ox03a10_frame_desc_width_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .operand.entity_val = CRL_VAR_REF_OUTPUT_WIDTH, + }, +}; + +static struct crl_arithmetic_ops ox03a10_frame_desc_height_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, +}; + +static struct crl_frame_desc ox03a10_frame_desc[] = { + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(ox03a10_frame_desc_width_ops), + .ops = ox03a10_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(ox03a10_frame_desc_height_ops), + .ops = ox03a10_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 0, + .csi2_data_type.entity_val = 0x12, + }, + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(ox03a10_frame_desc_width_ops), + .ops = ox03a10_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(ox03a10_frame_desc_height_ops), + .ops = ox03a10_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 1, + .csi2_data_type.entity_val = 0x12, + }, +}; + +struct crl_sensor_configuration ox03a10_crl_configuration = { + .pll_config_items = ARRAY_SIZE(ox03a10_pll_configurations), + .pll_configs = ox03a10_pll_configurations, + + .id_reg_items = ARRAY_SIZE(ox03a10_sensor_detect_regset), + .id_regs = ox03a10_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(ox03a10_sensor_subdevs), + .subdevs = ox03a10_sensor_subdevs, + + .sensor_limits = &ox03a10_sensor_limits, + + .modes_items = ARRAY_SIZE(ox03a10_modes), + .modes = ox03a10_modes, + + .v4l2_ctrls_items = ARRAY_SIZE(ox03a10_v4l2_ctrls), + .v4l2_ctrl_bank = ox03a10_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(ox03a10_crl_csi_data_fmt), + .csi_fmts = ox03a10_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(ox03a10_flip_configurations), + .flip_data = ox03a10_flip_configurations, + + .streamoff_regs_items = ARRAY_SIZE(ox03a10_streamoff_regs), + .streamoff_regs = ox03a10_streamoff_regs, + + .frame_desc_entries = ARRAY_SIZE(ox03a10_frame_desc), + .frame_desc_type = CRL_V4L2_MBUS_FRAME_DESC_TYPE_CSI2, + .frame_desc = ox03a10_frame_desc, +}; + +#endif /* __CRLMODULE_OX03A10_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_pixter_stub_configuration.h b/drivers/media/i2c/crlmodule/crl_pixter_stub_configuration.h new file mode 100644 index 0000000000000..09f03193ca225 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_pixter_stub_configuration.h @@ -0,0 +1,1386 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2016 - 2018 Intel Corporation + * + * Author: Wang, Zaikuo + * + */ + +#ifndef __CRLMODULE_PIXTER_STUB_CONFIGURATION_H_ +#define __CRLMODULE_PIXTER_STUB_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +static struct crl_pll_configuration pixter_stub_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 8, + .pixel_rate_csi = 800000000, + .pixel_rate_pa = 800000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 10, + .pixel_rate_csi = 800000000, + .pixel_rate_pa = 800000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 12, + .pixel_rate_csi = 800000000, + .pixel_rate_pa = 800000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 16, + .pixel_rate_csi = 800000000, + .pixel_rate_pa = 800000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 20, + .pixel_rate_csi = 800000000, + .pixel_rate_pa = 800000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 24, + .pixel_rate_csi = 800000000, + .pixel_rate_pa = 800000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, +}; + +/* resolutions for linux pss with yuv/rgb pass-through */ +static struct crl_subdev_rect_rep pixter_stub_vga_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 4096, 3072 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 640, 480 }, + }, +}; + +static struct crl_subdev_rect_rep pixter_stub_720p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 4096, 3072 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 1280, 720 }, + }, +}; + +static struct crl_subdev_rect_rep pixter_stub_1080p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 4096, 3072 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 1920, 1080 }, + }, +}; + +static struct crl_subdev_rect_rep pixter_stub_4p5_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 4096, 3072 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 2816, 1600 }, + }, +}; + +static struct crl_subdev_rect_rep pixter_stub_4k_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 4096, 3072 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 3840, 2160 }, + }, +}; + +static struct crl_subdev_rect_rep pixter_stub_480i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 4096, 3072 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 720, 240 }, + }, +}; + +static struct crl_subdev_rect_rep pixter_stub_576i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 4096, 3072 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 720, 288 }, + }, +}; + +static struct crl_subdev_rect_rep pixter_stub_1080i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 4096, 3072 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 1920, 540 }, + }, +}; + +/* vga for linux pss with imx135/imx477 input simulation */ +static struct crl_subdev_rect_rep pixter_stub_vga_pad1_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 4096, 3072 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 672, 512 }, + }, +}; + +/* vga for linux pss with imx135/imx477 input simulation */ +static struct crl_subdev_rect_rep pixter_stub_vga_pad2_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 4096, 3072 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 768, 576 }, + }, +}; + + +/* 720p for linux pss with imx135/imx477 input simulation */ +static struct crl_subdev_rect_rep pixter_stub_720p_pad1_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 4096, 3072 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 1312, 768 }, + }, +}; + +/* 720p for linux pss with imx135/imx477 input simulation */ +static struct crl_subdev_rect_rep pixter_stub_720p_pad2_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 4096, 3072 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 1312, 984 }, + }, +}; + + +/* 1080p for linux pss with imx135 input simulation */ +static struct crl_subdev_rect_rep pixter_stub_1080p_pad1_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 4096, 3072 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 1952, 1120 }, + }, +}; + +/* 1080p for linux pss with imx477 input simulation */ +static struct crl_subdev_rect_rep pixter_stub_1080p_pad2_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 4096, 3072 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 2048, 1128 }, + }, +}; + + +/* 1080p for linux pss with imx477 input simulation */ +static struct crl_subdev_rect_rep pixter_stub_1080p_pad3_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 4096, 3072 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 2048, 1536 }, + }, +}; + + +/* 2816x1600 for linux pss with imx135/imx477 input simulation */ +static struct crl_subdev_rect_rep pixter_stub_4p5_pad1_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 4096, 3072 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 2848, 1632 }, + }, +}; +/* 2816x1600 for linux pss with imx135/imx477 input simulation */ +static struct crl_subdev_rect_rep pixter_stub_4p5_pad2_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 4096, 3072 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 2848, 2136 }, + }, +}; + + +/* 4k for linux pss with imx135 input simulation */ +static struct crl_subdev_rect_rep pixter_stub_4k_pad1_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 4096, 3072 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 3872, 2208 }, + }, +}; + +/* 4k for linux pss with imx477 input simulation */ +static struct crl_subdev_rect_rep pixter_stub_4k_pad2_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 4096, 3072 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 4064, 2288 }, + }, +}; + +/* 4k for linux pss with imx477 input simulation */ +static struct crl_subdev_rect_rep pixter_stub_4k_pad3_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 4096, 3072 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 4032, 2288 }, + }, +}; + +/* 4096x3072 for linux pss with imx135/imx477 full input simulation */ +static struct crl_subdev_rect_rep pixter_stub_full_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 4096, 3072 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 4096, 3072 }, + .out_rect = { 0, 0, 4096, 3072 }, + }, +}; + + +static struct crl_mode_rep pixter_stub_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(pixter_stub_vga_rects), + .sd_rects = pixter_stub_vga_rects, + .scale_m = 1, + .width = 640, + .height = 480, + .min_llp = 6024, + .min_fll = 4096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(pixter_stub_720p_rects), + .sd_rects = pixter_stub_720p_rects, + .scale_m = 1, + .width = 1280, + .height = 720, + .min_llp = 6024, + .min_fll = 4096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(pixter_stub_1080p_rects), + .sd_rects = pixter_stub_1080p_rects, + .scale_m = 1, + .width = 1920, + .height = 1080, + .min_llp = 6024, + .min_fll = 4096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(pixter_stub_4p5_rects), + .sd_rects = pixter_stub_4p5_rects, + .scale_m = 1, + .width = 2816, + .height = 1600, + .min_llp = 6024, + .min_fll = 4096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(pixter_stub_4k_rects), + .sd_rects = pixter_stub_4k_rects, + .scale_m = 1, + .width = 3840, + .height = 2160, + .min_llp = 6024, + .min_fll = 4096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(pixter_stub_vga_pad1_rects), + .sd_rects = pixter_stub_vga_pad1_rects, + .scale_m = 1, + .width = 672, + .height = 512, + .min_llp = 6024, + .min_fll = 4096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(pixter_stub_vga_pad2_rects), + .sd_rects = pixter_stub_vga_pad2_rects, + .scale_m = 1, + .width = 768, + .height = 576, + .min_llp = 6024, + .min_fll = 4096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(pixter_stub_720p_pad1_rects), + .sd_rects = pixter_stub_720p_pad1_rects, + .scale_m = 1, + .width = 1312, + .height = 768, + .min_llp = 6024, + .min_fll = 4096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(pixter_stub_720p_pad2_rects), + .sd_rects = pixter_stub_720p_pad2_rects, + .scale_m = 1, + .width = 1312, + .height = 984, + .min_llp = 6024, + .min_fll = 4096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(pixter_stub_1080p_pad1_rects), + .sd_rects = pixter_stub_1080p_pad1_rects, + .scale_m = 1, + .width = 1952, + .height = 1120, + .min_llp = 6024, + .min_fll = 4096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(pixter_stub_1080p_pad2_rects), + .sd_rects = pixter_stub_1080p_pad2_rects, + .scale_m = 1, + .width = 2048, + .height = 1128, + .min_llp = 6024, + .min_fll = 4096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(pixter_stub_1080p_pad3_rects), + .sd_rects = pixter_stub_1080p_pad3_rects, + .scale_m = 1, + .width = 2048, + .height = 1536, + .min_llp = 6024, + .min_fll = 4096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, + + { + .sd_rects_items = ARRAY_SIZE(pixter_stub_4p5_pad1_rects), + .sd_rects = pixter_stub_4p5_pad1_rects, + .scale_m = 1, + .width = 2848, + .height = 1632, + .min_llp = 6024, + .min_fll = 4096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(pixter_stub_4p5_pad2_rects), + .sd_rects = pixter_stub_4p5_pad2_rects, + .scale_m = 1, + .width = 2848, + .height = 2136, + .min_llp = 6024, + .min_fll = 4096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(pixter_stub_4k_pad1_rects), + .sd_rects = pixter_stub_4k_pad1_rects, + .scale_m = 1, + .width = 3872, + .height = 2208, + .min_llp = 6024, + .min_fll = 4096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(pixter_stub_4k_pad2_rects), + .sd_rects = pixter_stub_4k_pad2_rects, + .scale_m = 1, + .width = 4064, + .height = 2288, + .min_llp = 6024, + .min_fll = 4096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(pixter_stub_4k_pad3_rects), + .sd_rects = pixter_stub_4k_pad3_rects, + .scale_m = 1, + .width = 4032, + .height = 2288, + .min_llp = 6024, + .min_fll = 4096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(pixter_stub_full_rects), + .sd_rects = pixter_stub_full_rects, + .scale_m = 1, + .width = 4096, + .height = 3072, + .min_llp = 6024, + .min_fll = 4096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(pixter_stub_480i_rects), + .sd_rects = pixter_stub_480i_rects, + .scale_m = 1, + .width = 720, + .height = 240, + .min_llp = 6024, + .min_fll = 4096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(pixter_stub_576i_rects), + .sd_rects = pixter_stub_576i_rects, + .scale_m = 1, + .width = 720, + .height = 288, + .min_llp = 6024, + .min_fll = 4096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(pixter_stub_1080i_rects), + .sd_rects = pixter_stub_1080i_rects, + .scale_m = 1, + .width = 1920, + .height = 540, + .min_llp = 6024, + .min_fll = 4096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, +}; + +static struct crl_sensor_subdev_config pixter_stub_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .name = "pixter_stub scaler", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "pixter_stub pixel array", + }, +}; + +static struct crl_sensor_subdev_config pixter_stub_b_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .name = "pixter_stubB scaler", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "pixter_stubB pixel array", + }, +}; + +static struct crl_sensor_subdev_config pixter_stub_c_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .name = "pixter_stubC scaler", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "pixter_stubC pixel array", + }, +}; + +static struct crl_sensor_subdev_config pixter_stub_d_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .name = "pixter_stubD scaler", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "pixter_stubD pixel array", + }, +}; + +static struct crl_sensor_subdev_config pixter_stub_e_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .name = "pixter_stubE scaler", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "pixter_stubE pixel array", + }, +}; + +static struct crl_sensor_subdev_config pixter_stub_f_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .name = "pixter_stubF scaler", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "pixter_stubF pixel array", + }, +}; + +static struct crl_sensor_subdev_config pixter_stub_g_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .name = "pixter_stubG scaler", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "pixter_stubG pixel array", + }, +}; + +static struct crl_sensor_subdev_config pixter_stub_h_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .name = "pixter_stubH scaler", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "pixter_stubH pixel array", + }, +}; + +static struct crl_sensor_limits pixter_stub_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 4096, + .y_addr_max = 3072, + .min_frame_length_lines = 160, + .max_frame_length_lines = 8192, + .min_line_length_pixels = 6024, + .max_line_length_pixels = 8192, + .scaler_m_min = 1, + .scaler_m_max = 1, + .scaler_n_min = 1, + .scaler_n_max = 1, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 1, +}; + +/* no flip for pixter stub as no real sensor HW */ +static struct crl_flip_data pixter_stub_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + }, +}; + +static struct crl_csi_data_fmt pixter_stub_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG8_1X8, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 8, + }, + { + .code = MEDIA_BUS_FMT_SRGGB8_1X8, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .bits_per_pixel = 8, + }, + { + .code = MEDIA_BUS_FMT_SBGGR8_1X8, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 8, + }, + { + .code = MEDIA_BUS_FMT_SGBRG8_1X8, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .bits_per_pixel = 8, + }, + { + .code = MEDIA_BUS_FMT_UYVY8_1X16, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + .bits_per_pixel = 16, + }, + { + .code = MEDIA_BUS_FMT_YUYV8_1X16, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + .bits_per_pixel = 16, + }, + { + .code = MEDIA_BUS_FMT_RGB565_1X16, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + .bits_per_pixel = 16, + }, + { + .code = MEDIA_BUS_FMT_RGB888_1X24, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + .bits_per_pixel = 24, + }, + { + .code = MEDIA_BUS_FMT_YUYV10_1X20, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + .bits_per_pixel = 20, + }, + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .bits_per_pixel = 10, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 10, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .bits_per_pixel = 10, + }, + { + .code = MEDIA_BUS_FMT_SGRBG12_1X12, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 12, + }, + { + .code = MEDIA_BUS_FMT_SRGGB12_1X12, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .bits_per_pixel = 12, + }, + { + .code = MEDIA_BUS_FMT_SBGGR12_1X12, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 12, + }, + { + .code = MEDIA_BUS_FMT_SGBRG12_1X12, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .bits_per_pixel = 12, + }, +}; + +static struct crl_v4l2_ctrl pixter_stub_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_SCALER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 800000000, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_SCALER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 800000000, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 4096, + .data.std_data.step = 1, + .data.std_data.def = 128, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "V4L2_CID_EXPOSURE", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_SHS1, + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .name = "CRL_CID_EXPOSURE_SHS1", + .data.std_data.min = 4, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 0x5500, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_SHS2, + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .name = "CRL_CID_EXPOSURE_SHS2", + .data.std_data.min = 4, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 0x500, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_SHS3, + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .name = "CRL_CID_EXPOSURE_SHS3", + .data.std_data.min = 4, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 0x1000, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .name = "V4L2_CID_VFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame length lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 160, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 4130, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 6024, + .data.std_data.max = 65520, + .data.std_data.step = 1, + .data.std_data.def = 6024, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_GAIN, + .name = "Digital Gain", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 4095, + .data.std_data.step = 1, + .data.std_data.def = 1024, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_ANALOG_GAIN_L, + .name = "CRL_CID_ANALOG_GAIN_L", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 0x978, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_ANALOG_GAIN_S, + .name = "CRL_CID_ANALOG_GAIN_S", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 0x978, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_ANALOG_GAIN_VS, + .name = "CRL_CID_ANALOG_GAIN_VS", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 0x978, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_DIGITAL_GAIN_L, + .name = "CRL_CID_DIGITAL_GAIN_L", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 0x978, + .data.std_data.step = 1, + .data.std_data.def = 64, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_DIGITAL_GAIN_S, + .name = "CRL_CID_DIGITAL_GAIN_S", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 0x978, + .data.std_data.step = 1, + .data.std_data.def = 64, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_DIGITAL_GAIN_VS, + .name = "CRL_CID_DIGITAL_GAIN_VS", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 0x978, + .data.std_data.step = 1, + .data.std_data.def = 64, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, +}; + +static struct crl_arithmetic_ops pixter_stub_frame_desc_width_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .operand.entity_val = CRL_VAR_REF_OUTPUT_WIDTH, + }, +}; + +static struct crl_arithmetic_ops pixter_stub_frame_desc_height_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, +}; + +static struct crl_frame_desc pixter_stub_frame_desc[] = { + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(pixter_stub_frame_desc_width_ops), + .ops = pixter_stub_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(pixter_stub_frame_desc_height_ops), + .ops = pixter_stub_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 0, + .csi2_data_type.entity_val = 0x12, + }, + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(pixter_stub_frame_desc_width_ops), + .ops = pixter_stub_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(pixter_stub_frame_desc_height_ops), + .ops = pixter_stub_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 1, + .csi2_data_type.entity_val = 0x12, + }, + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(pixter_stub_frame_desc_width_ops), + .ops = pixter_stub_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(pixter_stub_frame_desc_height_ops), + .ops = pixter_stub_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 2, + .csi2_data_type.entity_val = 0x12, + }, + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(pixter_stub_frame_desc_width_ops), + .ops = pixter_stub_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(pixter_stub_frame_desc_height_ops), + .ops = pixter_stub_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 3, + .csi2_data_type.entity_val = 0x12, + }, +}; + +#define DEFINE_PIXTER_CRL_CONFIGURATION(port) \ +static struct crl_sensor_configuration pixter_##port##_crl_configuration = { \ + .powerup_regs_items = 0, \ + .powerup_regs = NULL, \ +\ + .poweroff_regs_items = 0, \ + .poweroff_regs = NULL, \ +\ + .id_reg_items = 0, \ + .id_regs = NULL, \ +\ + .subdev_items = ARRAY_SIZE(pixter_##port##_sensor_subdevs), \ + .subdevs = pixter_##port##_sensor_subdevs, \ +\ + .sensor_limits = &pixter_stub_sensor_limits, \ +\ + .pll_config_items = ARRAY_SIZE(pixter_stub_pll_configurations), \ + .pll_configs = pixter_stub_pll_configurations, \ +\ + .modes_items = ARRAY_SIZE(pixter_stub_modes), \ + .modes = pixter_stub_modes, \ +\ + .streamon_regs_items = 0, \ + .streamon_regs = NULL, \ +\ + .streamoff_regs_items = 0, \ + .streamoff_regs = NULL, \ +\ + .v4l2_ctrls_items = ARRAY_SIZE(pixter_stub_v4l2_ctrls), \ + .v4l2_ctrl_bank = pixter_stub_v4l2_ctrls, \ +\ + .flip_items = ARRAY_SIZE(pixter_stub_flip_configurations), \ + .flip_data = pixter_stub_flip_configurations, \ +\ + .frame_desc_entries = ARRAY_SIZE(pixter_stub_frame_desc), \ + .frame_desc_type = CRL_V4L2_MBUS_FRAME_DESC_TYPE_CSI2, \ + .frame_desc = pixter_stub_frame_desc, \ +\ + .csi_fmts_items = ARRAY_SIZE(pixter_stub_crl_csi_data_fmt), \ + .csi_fmts = pixter_stub_crl_csi_data_fmt, \ +} +DEFINE_PIXTER_CRL_CONFIGURATION(stub); +DEFINE_PIXTER_CRL_CONFIGURATION(stub_b); +DEFINE_PIXTER_CRL_CONFIGURATION(stub_c); +DEFINE_PIXTER_CRL_CONFIGURATION(stub_d); +DEFINE_PIXTER_CRL_CONFIGURATION(stub_e); +DEFINE_PIXTER_CRL_CONFIGURATION(stub_f); +DEFINE_PIXTER_CRL_CONFIGURATION(stub_g); +DEFINE_PIXTER_CRL_CONFIGURATION(stub_h); + + +#endif /* __CRLMODULE_PIXTER_STUB_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crlmodule-core.c b/drivers/media/i2c/crlmodule/crlmodule-core.c new file mode 100644 index 0000000000000..7d63252c607ce --- /dev/null +++ b/drivers/media/i2c/crlmodule/crlmodule-core.c @@ -0,0 +1,3519 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2014 - 2018 Intel Corporation + * + * Author: Vinod Govindapillai + * + */ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include "crlmodule.h" +#include "crlmodule-nvm.h" +#include "crlmodule-regs.h" +#include "crlmodule-msrlist.h" + +static void crlmodule_update_current_mode(struct crl_sensor *sensor); + +static int __crlmodule_get_variable_ref(struct crl_sensor *sensor, + enum crl_member_data_reference_ids ref, + u32 *val) +{ + switch (ref) { + case CRL_VAR_REF_OUTPUT_WIDTH: + *val = sensor->src->crop[CRL_PAD_SRC].width; + break; + case CRL_VAR_REF_OUTPUT_HEIGHT: + *val = sensor->src->crop[CRL_PAD_SRC].height; + break; + case CRL_VAR_REF_BITSPERPIXEL: + *val = sensor->sensor_ds->csi_fmts[ + sensor->fmt_index].bits_per_pixel; + break; + default: + return -EINVAL; + }; + + return 0; +} + +/* + * Get the data format index from the configuration definition data + */ +static int __crlmodule_get_data_fmt_index(struct crl_sensor *sensor, + u32 code) +{ + unsigned int i; + + for (i = 0; i < sensor->sensor_ds->csi_fmts_items; i++) { + if (sensor->sensor_ds->csi_fmts[i].code == code) + return i; + } + + return -EINVAL; +} + +/* + * Find the index of the v4l2 ctrl pointer from the array of v4l2 ctrls + * maintained by the CRL module based on the ctrl id. + */ +static int __crlmodule_get_crl_ctrl_index(struct crl_sensor *sensor, + u32 id, unsigned int *index) +{ + unsigned int i; + + for (i = 0; i < sensor->sensor_ds->v4l2_ctrls_items; i++) + if (sensor->v4l2_ctrl_bank[i].ctrl_id == id) + break; + + if (i >= sensor->sensor_ds->v4l2_ctrls_items) + return -EINVAL; + + *index = i; + return 0; +} + +/* + * Finds the value of a specific v4l2 ctrl based on the ctrl-id + */ +static int __crlmodule_get_ctrl_value(struct crl_sensor *sensor, + u32 id, u32 *val) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + struct v4l2_ctrl *ctrl; + unsigned int i; + int ret; + + ret = __crlmodule_get_crl_ctrl_index(sensor, id, &i); + if (ret) + return ret; + + /* If no corresponding v4l2 ctrl created, return */ + if (!sensor->v4l2_ctrl_bank[i].ctrl) { + dev_dbg(&client->dev, + "%s ctrl_id: 0x%x desc: %s not ready\n", __func__, id, + sensor->v4l2_ctrl_bank[i].name); + return -ENODATA; + } + + ctrl = sensor->v4l2_ctrl_bank[i].ctrl; + switch (sensor->v4l2_ctrl_bank[i].type) { + case CRL_V4L2_CTRL_TYPE_MENU_INT: + *val = ctrl->qmenu_int[ctrl->val]; + break; + case CRL_V4L2_CTRL_TYPE_INTEGER: + default: + *val = ctrl->val; + } + + dev_dbg(&client->dev, "%s ctrl_id: 0x%x desc: %s val: %d\n", + __func__, id, + sensor->v4l2_ctrl_bank[i].name, *val); + return 0; +} + +/* + * Finds the v4l2 ctrl based on the control id + */ +static struct v4l2_ctrl *__crlmodule_get_v4l2_ctrl(struct crl_sensor *sensor, + u32 id) +{ + unsigned int i; + + if (__crlmodule_get_crl_ctrl_index(sensor, id, &i)) + return NULL; + + return sensor->v4l2_ctrl_bank[i].ctrl; +} + +/* + * Grab / Release controls based on the ctrl update context + */ +static void __crlmodule_grab_v4l2_ctrl(struct crl_sensor *sensor, + enum crl_v4l2ctrl_update_context ctxt, + bool action) +{ + struct crl_v4l2_ctrl *crl_ctrl; + unsigned int i; + + for (i = 0; i < sensor->sensor_ds->v4l2_ctrls_items; i++) { + crl_ctrl = &sensor->v4l2_ctrl_bank[i]; + + if (crl_ctrl->context == ctxt) + v4l2_ctrl_grab(crl_ctrl->ctrl, action); + } +} + +/* + * Checks if the v4l2 ctrl sepecific data is satisfied in the mode and PLL + * selection logic. + */ +static bool __crlmodule_compare_ctrl_specific_data( + struct crl_sensor *sensor, + unsigned int items, + struct crl_ctrl_data_pair *ctrl_val) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + unsigned int i; + u32 val; + int ret; + + /* Go through all the controls associated with this config */ + for (i = 0; i < items; i++) { + /* Get the value set for the control */ + ret = __crlmodule_get_ctrl_value(sensor, ctrl_val[i].ctrl_id, + &val); + if (ret) { + dev_err(&client->dev, "%s ctrl_id: 0x%x not found\n", + __func__, ctrl_val[i].ctrl_id); + return false; + } + + /* Compare the value from the sensor definition file config */ + if (val != ctrl_val[i].data) { + dev_dbg(&client->dev, + "%s ctrl_id: 0x%x value not match %d != %d\n", + __func__, ctrl_val[i].ctrl_id, val, + ctrl_val[i].data); + return false; + } + } + + dev_dbg(&client->dev, "%s success\n", __func__); + return true; +} + +/* + * Finds the correct PLL settings index based on the parameters + */ +static int __crlmodule_update_pll_index(struct crl_sensor *sensor) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + const struct crl_pll_configuration *pll_config; + const struct crl_csi_data_fmt *fmts = + &sensor->sensor_ds->csi_fmts[sensor->fmt_index]; + u32 link_freq; + unsigned int i; + + link_freq = sensor->link_freq->qmenu_int[sensor->link_freq->val]; + + dev_dbg(&client->dev, "%s PLL Items: %d link_freq: %d\n", __func__, + sensor->sensor_ds->pll_config_items, link_freq); + + for (i = 0; i < sensor->sensor_ds->pll_config_items; i++) { + pll_config = &sensor->sensor_ds->pll_configs[i]; + + if (pll_config->op_sys_clk != link_freq) + continue; + + if (pll_config->input_clk != sensor->platform_data->ext_clk) + continue; + + /* if pll_config->csi_lanes == 0, lanes do not matter */ + if (pll_config->csi_lanes) + if (sensor->platform_data->lanes != + pll_config->csi_lanes) + continue; + + /* PLL config must match to bpps*/ + if (fmts->bits_per_pixel != pll_config->bitsperpixel) + continue; + + /* Check if there are any dynamic compare items */ + if (sensor->ext_ctrl_impacts_pll_selection && + !__crlmodule_compare_ctrl_specific_data(sensor, + pll_config->comp_items, + pll_config->ctrl_data)) + continue; + + /* Found PLL index */ + dev_dbg(&client->dev, "%s Found PLL index: %d for freq: %d\n", + __func__, i, link_freq); + + sensor->pll_index = i; + + /* Update the control values for pixelrate_pa and csi */ + __v4l2_ctrl_s_ctrl_int64(sensor->pixel_rate_pa, + pll_config->pixel_rate_pa); + __v4l2_ctrl_s_ctrl_int64(sensor->pixel_rate_csi, + pll_config->pixel_rate_csi); + return 0; + } + + dev_err(&client->dev, "%s no configuration found for freq: %d\n", + __func__, link_freq); + return -EINVAL; +} + +/* + * Perform the action for the dependency control + */ +static void __crlmodule_dep_ctrl_perform_action( + struct crl_sensor *sensor, + struct crl_dep_ctrl_provision *prov, + u32 *val, u32 *dep_val) +{ + enum crl_dep_ctrl_condition cond; + unsigned int i; + u32 temp; + + if (*val > *dep_val) + cond = CRL_DEP_CTRL_CONDITION_GREATER; + else if (*val < *dep_val) + cond = CRL_DEP_CTRL_CONDITION_LESSER; + else + cond = CRL_DEP_CTRL_CONDITION_EQUAL; + + for (i = 0; i < prov->action_items; i++) { + if (prov->action[i].cond == cond) + break; + } + + /* No handler found-. Return completed */ + if (i >= prov->action_items) + return; + + /* if this is dependency control, switch val and dep val */ + if (prov->action_type == CRL_DEP_CTRL_ACTION_TYPE_DEP_CTRL) { + temp = *val; + *val = *dep_val; + *dep_val = temp; + } + + switch (prov->action[i].action) { + case CRL_DEP_CTRL_CONDITION_ADD: + *val = *dep_val + prov->action[i].action_value; + break; + case CRL_DEP_CTRL_CONDITION_SUBTRACT: + *val = *dep_val - prov->action[i].action_value; + break; + case CRL_DEP_CTRL_CONDITION_MULTIPLY: + *val = *dep_val * prov->action[i].action_value; + break; + case CRL_DEP_CTRL_CONDITION_DIVIDE: + *val = *dep_val / prov->action[i].action_value; + break; + } + + /* if this is dependency control, switch val and dep val back*/ + if (prov->action_type == CRL_DEP_CTRL_ACTION_TYPE_DEP_CTRL) { + temp = *val; + *val = *dep_val; + *dep_val = temp; + } + + return; +} + +/* + * Parse the dynamic entity based on the Operand type + */ +static int __crlmodule_parse_dynamic_entity(struct crl_sensor *sensor, + struct crl_dynamic_entity entity, + u32 *val) +{ + switch (entity.entity_type) { + case CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST: + *val = entity.entity_val; + return 0; + case CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF: + return __crlmodule_get_variable_ref(sensor, + entity.entity_val, val); + case CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL: + return __crlmodule_get_ctrl_value(sensor, + entity.entity_val, val); + case CRL_DYNAMIC_VAL_OPERAND_TYPE_REG_VAL: { + struct crl_register_read_rep reg; + + /* Note: Only 8bit registers are supported. */ + reg.address = entity.entity_val; + reg.len = CRL_REG_LEN_08BIT; + reg.mask = 0xff; + reg.dev_i2c_addr = CRL_I2C_ADDRESS_NO_OVERRIDE; + return crlmodule_read_reg(sensor, reg, val); + } + default: + break; + }; + + return -EINVAL; +} + +static int __crlmodule_calc_dynamic_entity_values( + struct crl_sensor *sensor, + unsigned int ops_items, + struct crl_arithmetic_ops *ops_arr, + unsigned int *val) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + unsigned int i; + + /* perform the bitwise operation on val one by one */ + for (i = 0; i < ops_items; i++) { + struct crl_arithmetic_ops *ops = &ops_arr[i]; + u32 operand; + int ret = __crlmodule_parse_dynamic_entity(sensor, ops->operand, + &operand); + if (ret) { + dev_dbg(&client->dev, + "%s failed to parse dynamic entity: %d %d\n", + __func__, ops->operand.entity_type, + ops->operand.entity_val); + return ret; + } + + switch (ops->op) { + case CRL_BITWISE_AND: + *val &= operand; + break; + case CRL_BITWISE_OR: + *val |= operand; + break; + case CRL_BITWISE_LSHIFT: + *val <<= operand; + break; + case CRL_BITWISE_RSHIFT: + *val >>= operand; + break; + case CRL_BITWISE_XOR: + *val ^= operand; + break; + case CRL_BITWISE_COMPLEMENT: + *val = ~(*val); + break; + case CRL_ADD: + *val += operand; + break; + case CRL_SUBTRACT: + *val = *val > operand ? *val - operand : operand - *val; + break; + case CRL_MULTIPLY: + *val *= operand; + break; + case CRL_DIV: + *val /= operand; + break; + case CRL_ASSIGNMENT: + *val = operand; + break; + default: + return -EINVAL; + } + } + + return 0; +} + +/* + * Dynamic registers' value is not direct but depends on a referrence value. + * This kind of registers are mainly used in crlmodule's v4l2 ctrl logic. + * + * This is to handle cases like the below examples, where mutliple registers + * need to be modified based on the input value "val" + * R3000 = val & 0xff and R3001 = val >> 8 & 0xff and R3002 = val >> 16 & 0xff + * R4001 = val and R4002 = val or + * R2800 = FLL - val and R2802 = LLP - val + */ +static int __crlmodule_parse_and_write_dynamic_reg(struct crl_sensor *sensor, + struct crl_dynamic_register_access *reg, + unsigned int val) +{ + int ret; + + /* + * Get the value associated with the dynamic entity. "val" might + * change after this call based on the arithmetic operations added for + * this group + */ + ret = __crlmodule_calc_dynamic_entity_values(sensor, reg->ops_items, + reg->ops, &val); + if (ret) + return ret; + + /* Now ready to write the value */ + return crlmodule_write_reg(sensor, reg->dev_i2c_addr, reg->address, + reg->len, reg->mask, val); +} + +static int __crlmodule_update_dynamic_regs(struct crl_sensor *sensor, + struct crl_v4l2_ctrl *crl_ctrl, + unsigned int val) +{ + unsigned int i; + int ret; + + for (i = 0; i < crl_ctrl->regs_items; i++) { + /* + * Each register group must start from the initial value, not + * as a continuation of the previous calculations. The sensor + * configurations must take care of this restriction. + */ + ret = __crlmodule_parse_and_write_dynamic_reg(sensor, + &crl_ctrl->regs[i], val); + if (ret) + return ret; + } + + return 0; +} + +/* + * Perform the action for the dependent register lists + */ +static int __crlmodule_handle_dependency_regs( + struct crl_sensor *sensor, + struct crl_v4l2_ctrl *crl_ctrl, + unsigned int val) +{ + unsigned int i; + int ret; + + for (i = 0; i < crl_ctrl->crl_ctrl_dep_reg_list; i++) { + struct crl_dep_reg_list *list = &crl_ctrl->dep_regs[i]; + enum crl_dep_ctrl_condition condition; + unsigned int j; + u32 dep_val; + + /* Parse the condition value */ + ret = __crlmodule_parse_dynamic_entity(sensor, list->cond_value, + &dep_val); + if (ret) + return ret; + + /* Get the kind of condition for this value */ + if (val > dep_val) + condition = CRL_DEP_CTRL_CONDITION_GREATER; + else if (val < dep_val) + condition = CRL_DEP_CTRL_CONDITION_LESSER; + else + condition = CRL_DEP_CTRL_CONDITION_EQUAL; + + /* + * Compare the register list specific condition and if matching + * write the corresponding register lists to the sensor. + */ + if (condition == list->reg_cond) { + /* Handle the direct registers if any */ + if (list->no_direct_regs && list->direct_regs) { + ret = crlmodule_write_regs(sensor, + list->direct_regs, list->no_direct_regs); + if (ret) + return ret; + } + + /* Handle the dynamic registers if any */ + for (j = 0; j < list->no_dyn_items; j++) { + ret = __crlmodule_parse_and_write_dynamic_reg( + sensor, &list->dyn_regs[j], val); + if (ret) + return ret; + } + break; + } + } + + return 0; +} + +/* + * Handles the dependency control actions. Dependency control is a control + * which' value depends on the current control. This information is encoded in + * the sensor configuration file. + */ +static int __crlmodule_handle_dependency_ctrl( + struct crl_sensor *sensor, + struct crl_v4l2_ctrl *crl_ctrl, + unsigned int *val, + enum crl_dep_ctrl_action_type type) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + struct crl_v4l2_ctrl *dep_crl_ctrl; + struct crl_dep_ctrl_provision *dep_prov; + unsigned int i, idx; + u32 dep_val; + int ret; + + dev_dbg(&client->dev, "%s ctrl_id: 0x%x dependency controls: %d\n", + __func__, crl_ctrl->ctrl_id, + crl_ctrl->dep_items); + + for (i = 0; i < crl_ctrl->dep_items; i++) { + dep_prov = &crl_ctrl->dep_ctrls[i]; + + /* If not the type, continue */ + if (dep_prov->action_type != type) + continue; + + /* Get the value from the dependency ctrl */ + ret = __crlmodule_get_ctrl_value(sensor, dep_prov->ctrl_id, + &dep_val); + if (ret) { + dev_err(&client->dev, "%s ctrl_id: 0x%x not found\n", + __func__, dep_prov->ctrl_id); + /* TODO! Shoud continue? */ + continue; + } + + /* Perform the action */ + __crlmodule_dep_ctrl_perform_action(sensor, dep_prov, val, + &dep_val); + + /* if this is dependency control, update the register */ + if (dep_prov->action_type == + CRL_DEP_CTRL_ACTION_TYPE_DEP_CTRL) { + ret = __crlmodule_get_crl_ctrl_index(sensor, + dep_prov->ctrl_id, &idx); + if (ret) + continue; + + dep_crl_ctrl = &sensor->v4l2_ctrl_bank[idx]; + + /* Update the dynamic registers for the dep control */ + ret = __crlmodule_update_dynamic_regs(sensor, + dep_crl_ctrl, dep_val); + if (ret) + dev_info(&client->dev, + "%s dynamic reg update failed for %s\n", + __func__, dep_crl_ctrl->name); + + /* Handle dependened register lists for dep control */ + ret = __crlmodule_handle_dependency_regs(sensor, + dep_crl_ctrl, dep_val); + if (ret) + dev_info(&client->dev, + "%s handle dep regs failed for %s\n", + __func__, dep_crl_ctrl->name); + } + } + + return 0; +} + +static int crlmodule_get_fmt_index(struct crl_sensor *sensor, + u8 pixel_order, u8 bpp) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + const struct crl_csi_data_fmt *f; + int i; + + /* + * Go through the fmt list and check if this format with matching bpp + * is supported by this module definition file + */ + for (i = 0; i < sensor->sensor_ds->csi_fmts_items; i++) { + f = &sensor->sensor_ds->csi_fmts[i]; + + if (f->pixel_order == pixel_order && f->bits_per_pixel == bpp) + return i; + } + + dev_err(&client->dev, "%s no supported format for order: %d bpp: %d\n", + __func__, pixel_order, bpp); + + return -EINVAL; +} + +static int __crlmodule_update_flip_info(struct crl_sensor *sensor, + struct crl_v4l2_ctrl *crl_ctrl, + struct v4l2_ctrl *ctrl) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + const struct crl_csi_data_fmt *fmt = + &sensor->sensor_ds->csi_fmts[sensor->fmt_index]; + u8 bpp = fmt->bits_per_pixel; + u8 flip_info = sensor->flip_info; + u8 new_order; + int i, ret; + + dev_dbg(&client->dev, "%s current flip_info: %d curr index: %d\n", + __func__, flip_info, sensor->fmt_index); + + switch (ctrl->id) { + case V4L2_CID_HFLIP: + flip_info &= CRL_FLIP_HFLIP_MASK; + flip_info |= ctrl->val > 0 ? CRL_FLIP_HFLIP : 0; + break; + case V4L2_CID_VFLIP: + flip_info &= CRL_FLIP_VFLIP_MASK; + flip_info |= ctrl->val > 0 ? CRL_FLIP_VFLIP : 0; + break; + } + + dev_dbg(&client->dev, "%s flip success new flip_info: %d\n", + __func__, flip_info); + + /* First check if the module actually supports any pixelorder changes */ + for (i = 0; i < sensor->sensor_ds->flip_items; i++) { + if (flip_info == sensor->sensor_ds->flip_data[i].flip) { + new_order = sensor->sensor_ds->flip_data[i].pixel_order; + break; + } + } + + if (i >= sensor->sensor_ds->flip_items) { + dev_err(&client->dev, "%s flip not supported %d\n", + __func__, flip_info); + return -EINVAL; + } + + /* Skip format re-selection if pixel order is unrelated to flipping. */ + if (new_order == CRL_PIXEL_ORDER_IGNORE) + return 0; + + /* + * Flip changes only pixel order. So check if the supported format list + * has any format with new pixel order and current bits per pixel + */ + i = crlmodule_get_fmt_index(sensor, new_order, bpp); + if (i < 0) { + dev_err(&client->dev, "%s no format found order: %d bpp: %d\n", + __func__, new_order, bpp); + return -EINVAL; + } + + ret = __crlmodule_update_dynamic_regs(sensor, crl_ctrl, ctrl->val); + if (ret) { + dev_err(&client->dev, "%s register access failed\n", __func__); + return ret; + } + + /* New format found. Update info */ + sensor->fmt_index = i; + sensor->flip_info = flip_info; + + dev_dbg(&client->dev, "%s flip success flip: %d new fmt index: %d\n", + __func__, flip_info, i); + + return 0; +} +static int __crlmodule_update_framesize(struct crl_sensor *sensor, + struct crl_v4l2_ctrl *crl_ctrl, + struct v4l2_ctrl *ctrl) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + const struct crl_mode_rep *mode = sensor->current_mode; + unsigned int val; + int ret; + + switch (ctrl->id) { + case V4L2_CID_FRAME_LENGTH_LINES: + val = max(ctrl->val, mode->min_fll); + break; + case V4L2_CID_LINE_LENGTH_PIXELS: + val = max(ctrl->val, mode->min_llp); + break; + default: + return -EINVAL; + } + + ret = __crlmodule_update_dynamic_regs(sensor, crl_ctrl, val); + if (ret) + return ret; + + ctrl->val = val; + ctrl->cur.val = val; + dev_dbg(&client->dev, "%s: set v4l2 id:0x%0x value %d\n", + __func__, ctrl->id, val); + + return 0; +} +static int __crlmodule_update_blanking(struct crl_sensor *sensor, + struct crl_v4l2_ctrl *crl_ctrl, + struct v4l2_ctrl *ctrl) +{ + unsigned int val; + + switch (ctrl->id) { + case V4L2_CID_HBLANK: + val = sensor->pixel_array->crop[CRL_PA_PAD_SRC].width + + ctrl->val; + break; + case V4L2_CID_VBLANK: + val = sensor->pixel_array->crop[CRL_PA_PAD_SRC].height + + ctrl->val; + break; + default: + return -EINVAL; + } + + return __crlmodule_update_dynamic_regs(sensor, crl_ctrl, val); +} + +static void __crlmodule_update_selection_impact_flags( + struct crl_sensor *sensor, + struct crl_v4l2_ctrl *crl_ctrl) +{ + if (crl_ctrl->impact & CRL_IMPACTS_PLL_SELECTION) + sensor->ext_ctrl_impacts_pll_selection = true; + + if (crl_ctrl->impact & CRL_IMPACTS_MODE_SELECTION) + sensor->ext_ctrl_impacts_mode_selection = true; +} + +static struct crl_v4l2_ctrl *__crlmodule_find_crlctrl( + struct crl_sensor *sensor, + struct v4l2_ctrl *ctrl) +{ + struct crl_v4l2_ctrl *crl_ctrl; + unsigned int i; + + for (i = 0; i < sensor->sensor_ds->v4l2_ctrls_items; i++) { + crl_ctrl = &sensor->v4l2_ctrl_bank[i]; + if (crl_ctrl->ctrl == ctrl) + return crl_ctrl; + } + + return NULL; +} + +static int crlmodule_reset_crlctrl_value(struct crl_sensor *sensor, + unsigned int new_mode) +{ + struct crl_v4l2_ctrl *crl_ctrl; + const struct crl_mode_rep *this; + unsigned int i; + + if (!sensor->v4l2_ctrl_bank) + return -EINVAL; + + this = &sensor->sensor_ds->modes[new_mode]; + + for (i = 0; i < sensor->sensor_ds->v4l2_ctrls_items; i++) { + crl_ctrl = &sensor->v4l2_ctrl_bank[i]; + + switch (crl_ctrl->ctrl_id) { + case V4L2_CID_FRAME_LENGTH_LINES: + if (crl_ctrl->ctrl) { + crl_ctrl->ctrl->val = this->min_fll; + crl_ctrl->ctrl->cur.val = this->min_fll; + } + break; + case V4L2_CID_LINE_LENGTH_PIXELS: + if (crl_ctrl->ctrl) { + crl_ctrl->ctrl->val = this->min_llp; + crl_ctrl->ctrl->cur.val = this->min_llp; + } + break; + } + } + + return 0; +} + +static int crlmodule_set_ctrl(struct v4l2_ctrl *ctrl) +{ + struct crl_sensor *sensor = container_of(ctrl->handler, + struct crl_subdev, ctrl_handler)->sensor; + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + struct crl_v4l2_ctrl *crl_ctrl = NULL; + int ret = 0; + + dev_dbg(&client->dev, "%s id:0x%0x val:%d\n", __func__, ctrl->id, + ctrl->val); + + /* + * Need to find the corresponding crlmodule wrapper for this v4l2_ctrl. + * This is needed because all the register information is associated + * with the crlmodule's wrapper v4l2ctrl. + */ + crl_ctrl = __crlmodule_find_crlctrl(sensor, ctrl); + if (!crl_ctrl) { + dev_err(&client->dev, "%s ctrl :0x%x not supported\n", + __func__, ctrl->id); + return -EINVAL; + } + + dev_dbg(&client->dev, "%s id:0x%x name:%s\n", __func__, ctrl->id, + crl_ctrl->name); + + /* Then go through the mandatory controls */ + switch (ctrl->id) { + case V4L2_CID_LINK_FREQ: + /* Go through the supported list and compare the values */ + ret = __crlmodule_update_pll_index(sensor); + goto out; + }; + + /* update the selection impacts flags */ + __crlmodule_update_selection_impact_flags(sensor, crl_ctrl); + + /* + * Dependency control is a control whose value is affected by the value + * for the current control. For example, vblank can be a dependency + * control for exposure. Whenever exposure changes, the sensor can + * automatically adjust the vblank or rely on manual adjustment. In + * case of manual adjustment the sensor configuration file needs to + * specify the dependency control, the condition for an action and + * typs of action. + * + * Now check if there is any dependency controls for this. And if there + * are any we need to split the action to two. First if the current + * control needs to be changed, then do it before updating the register. + * If some other control is affected, then do it after wrriting the + * current values + * + * Now check in the dependency control list, if the action type is + * "self" and update the value accordingly now + */ + __crlmodule_handle_dependency_ctrl(sensor, crl_ctrl, &ctrl->val, + CRL_DEP_CTRL_ACTION_TYPE_SELF); + + /* Handle specific controls */ + switch (ctrl->id) { + case V4L2_CID_HFLIP: + case V4L2_CID_VFLIP: + ret = __crlmodule_update_flip_info(sensor, crl_ctrl, ctrl); + goto out; + + case V4L2_CID_VBLANK: + case V4L2_CID_HBLANK: + if (sensor->blanking_ctrl_not_use) { + dev_info(&client->dev, "%s Blanking controls are \ + not used in this configuration, setting them has \ + no effect\n", __func__); + /* Disable control*/ + v4l2_ctrl_activate(ctrl, false); + + } else { + ret = __crlmodule_update_blanking(sensor, + crl_ctrl, ctrl); + } + goto out; + + case V4L2_CID_FRAME_LENGTH_LINES: + case V4L2_CID_LINE_LENGTH_PIXELS: + ret = __crlmodule_update_framesize(sensor, crl_ctrl, ctrl); + goto out; + + case CRL_CID_SENSOR_MODE: + /* + * If sensor mode is changed, some v4l2 ctrls value need + * to be reset to default value, or else the value set in + * previous mode will influence the setting in the current + * mode. Especially for llp and fll. + */ + if (sensor->sensor_mode != ctrl->val) + crlmodule_reset_crlctrl_value(sensor, ctrl->val); + + sensor->sensor_mode = ctrl->val; + crlmodule_update_current_mode(sensor); + goto out; + } + + ret = __crlmodule_update_dynamic_regs(sensor, crl_ctrl, ctrl->val); + if (ret) + goto out; + + ret = __crlmodule_handle_dependency_regs(sensor, crl_ctrl, + ctrl->val); + +out: + /* + * Now check in the dependency control list, if the action type is + * "dependency control" and update the value accordingly now + */ + if (!ret && crl_ctrl) + __crlmodule_handle_dependency_ctrl(sensor, crl_ctrl, &ctrl->val, + CRL_DEP_CTRL_ACTION_TYPE_DEP_CTRL); + return ret; +} + +static int crlmodule_get_ctrl(struct v4l2_ctrl *ctrl) +{ + struct crl_sensor *sensor = container_of(ctrl->handler, + struct crl_subdev, ctrl_handler)->sensor; + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + struct crl_v4l2_ctrl *crl_ctrl; + struct crl_dynamic_register_access *reg; + + /* + * Need to find the corresponding crlmodule wrapper for this v4l2_ctrl. + * This is needed because all the register information is associated + * with the crlmodule's wrapper v4l2ctrl. + */ + crl_ctrl = __crlmodule_find_crlctrl(sensor, ctrl); + if (!crl_ctrl) { + dev_err(&client->dev, "%s ctrl :0x%x not supported\n", + __func__, ctrl->id); + return -EINVAL; + } + + dev_dbg(&client->dev, "%s id:0x%x name:%s\n", __func__, ctrl->id, + crl_ctrl->name); + + /* cannot handle if the V4L2_CTRL_FLAG_READ_ONLY flag is not set */ + if (!(ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)) { + dev_err(&client->dev, "%s Control id:0x%x is not read only\n", + __func__, ctrl->id); + return -EINVAL; + } + + /* + * Found the crl control wrapper. Use the dynamic entity information + * to calculate the value for this control. For get control, there + * could be only one item in the crl_dynamic_register_access. ctrl-> + * regs_items must be 1. Also the crl_dynamic_register_access.address + * and crl_dynamic_register_access.len are not used. + * Instead the values to be found or calculated need to be encoded into + * crl_dynamic_register_access.crl_arithmetic_ops. It has possibility + * to read from registers, existing control values and simple arithmetic + * operations etc. + */ + if (crl_ctrl->regs_items > 1) + dev_warn(&client->dev, + "%s multiple dynamic entities, will skip the rest\n", + __func__); + reg = &crl_ctrl->regs[0]; + + /* Get the value associated with the dynamic entity */ + return __crlmodule_calc_dynamic_entity_values(sensor, reg->ops_items, + reg->ops, &ctrl->val); +} + +static const struct v4l2_ctrl_ops crlmodule_ctrl_ops = { + .s_ctrl = crlmodule_set_ctrl, + .g_volatile_ctrl = crlmodule_get_ctrl, +}; + +static struct v4l2_ctrl_handler *__crlmodule_get_sd_ctrl_handler( + struct crl_sensor *sensor, + enum crl_subdev_type sd_type) +{ + switch (sd_type) { + case CRL_SUBDEV_TYPE_SCALER: + case CRL_SUBDEV_TYPE_BINNER: + return &sensor->src->ctrl_handler; + + case CRL_SUBDEV_TYPE_PIXEL_ARRAY: + if (sensor->pixel_array) + return &sensor->pixel_array->ctrl_handler; + break; + }; + + return NULL; +} + +static int __crlmodule_init_link_freq_ctrl_menu( + struct crl_sensor *sensor, + struct crl_v4l2_ctrl *crl_ctrl) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + unsigned int items = 0; + unsigned int i; + + /* Cannot handle if the control type is not integer menu */ + if (crl_ctrl->type != CRL_V4L2_CTRL_TYPE_MENU_INT) + return 0; + + /* If the menu contents exist, skip filling it dynamically */ + if (crl_ctrl->data.v4l2_int_menu.menu) + return 0; + + sensor->link_freq_menu = devm_kzalloc(&client->dev, sizeof(s64) * + sensor->sensor_ds->pll_config_items, + GFP_KERNEL); + if (!sensor->link_freq_menu) + return -ENOMEM; + + for (i = 0; i < sensor->sensor_ds->pll_config_items; i++) { + bool dup = false; + unsigned int j; + + /* + * Skip the duplicate entries. We are using the value to match + * not the index + */ + for (j = 0; j < items && !dup; j++) + dup = (sensor->link_freq_menu[j] == + sensor->sensor_ds->pll_configs[i].op_sys_clk); + if (dup) + continue; + + sensor->link_freq_menu[items] = + sensor->sensor_ds->pll_configs[i].op_sys_clk; + items++; + } + + crl_ctrl->data.v4l2_int_menu.menu = sensor->link_freq_menu; + + /* items will not be 0 as there will be atleast one pll_config_item */ + crl_ctrl->data.v4l2_int_menu.max = items - 1; + + return 0; +} + +static int crlmodule_init_controls(struct crl_sensor *sensor) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + unsigned int pa_ctrls = 0; + unsigned int src_ctrls = 0; + struct crl_v4l2_ctrl *crl_ctrl; + struct v4l2_ctrl_handler *ctrl_handler; + struct v4l2_ctrl_config cfg = { 0 }; + unsigned int i; + int rval; + + sensor->v4l2_ctrl_bank = devm_kzalloc(&client->dev, + sizeof(struct crl_v4l2_ctrl) * + sensor->sensor_ds->v4l2_ctrls_items, + GFP_KERNEL); + if (!sensor->v4l2_ctrl_bank) + return -ENOMEM; + + /* Prepare to initialise the v4l2_ctrls from the crl wrapper */ + for (i = 0; i < sensor->sensor_ds->v4l2_ctrls_items; i++) { + /* + * First copy the v4l2_ctrls to the sensor as there could be + * more than one similar sensors in a product which could share + * the same configuration files + */ + sensor->v4l2_ctrl_bank[i] = + sensor->sensor_ds->v4l2_ctrl_bank[i]; + + crl_ctrl = &sensor->v4l2_ctrl_bank[i]; + if (crl_ctrl->sd_type == CRL_SUBDEV_TYPE_PIXEL_ARRAY) + pa_ctrls++; + + if (crl_ctrl->sd_type == CRL_SUBDEV_TYPE_SCALER || + crl_ctrl->sd_type == CRL_SUBDEV_TYPE_BINNER) + src_ctrls++; + + /* populate the v4l2_ctrl for the Link_freq dynamically */ + if (crl_ctrl->ctrl_id == V4L2_CID_LINK_FREQ && + (crl_ctrl->sd_type == CRL_SUBDEV_TYPE_SCALER || + crl_ctrl->sd_type == CRL_SUBDEV_TYPE_BINNER)) { + rval = __crlmodule_init_link_freq_ctrl_menu(sensor, + crl_ctrl); + if (rval) + return rval; + } + } + dev_dbg(&client->dev, "%s pa_ctrls: %d src_ctrls: %d\n", __func__, + pa_ctrls, src_ctrls); + + if (pa_ctrls) { + rval = v4l2_ctrl_handler_init( + &sensor->pixel_array->ctrl_handler, + pa_ctrls); + if (rval) + return rval; + sensor->pixel_array->ctrl_handler.lock = &sensor->mutex; + } + + if (src_ctrls) { + rval = v4l2_ctrl_handler_init( + &sensor->src->ctrl_handler, + src_ctrls); + if (rval) + return rval; + sensor->src->ctrl_handler.lock = &sensor->mutex; + } + + for (i = 0; i < sensor->sensor_ds->v4l2_ctrls_items; i++) { + crl_ctrl = &sensor->v4l2_ctrl_bank[i]; + ctrl_handler = __crlmodule_get_sd_ctrl_handler(sensor, + crl_ctrl->sd_type); + + if (!ctrl_handler) + continue; + + switch (crl_ctrl->type) { + case CRL_V4L2_CTRL_TYPE_MENU_ITEMS: + crl_ctrl->ctrl = v4l2_ctrl_new_std_menu_items( + ctrl_handler, &crlmodule_ctrl_ops, + crl_ctrl->ctrl_id, + crl_ctrl->data.v4l2_menu_items.size, + 0, 0, + crl_ctrl->data.v4l2_menu_items.menu); + break; + case CRL_V4L2_CTRL_TYPE_MENU_INT: + crl_ctrl->ctrl = v4l2_ctrl_new_int_menu(ctrl_handler, + &crlmodule_ctrl_ops, crl_ctrl->ctrl_id, + crl_ctrl->data.v4l2_int_menu.max, + crl_ctrl->data.v4l2_int_menu.def, + crl_ctrl->data.v4l2_int_menu.menu); + break; + case CRL_V4L2_CTRL_TYPE_INTEGER64: + case CRL_V4L2_CTRL_TYPE_INTEGER: + crl_ctrl->ctrl = v4l2_ctrl_new_std(ctrl_handler, + &crlmodule_ctrl_ops, crl_ctrl->ctrl_id, + crl_ctrl->data.std_data.min, + crl_ctrl->data.std_data.max, + crl_ctrl->data.std_data.step, + crl_ctrl->data.std_data.def); + break; + case CRL_V4L2_CTRL_TYPE_CUSTOM: + cfg.ops = &crlmodule_ctrl_ops; + cfg.id = crl_ctrl->ctrl_id; + cfg.name = crl_ctrl->name; + cfg.type = crl_ctrl->v4l2_type; + if ((crl_ctrl->v4l2_type == V4L2_CTRL_TYPE_INTEGER) || + (crl_ctrl->v4l2_type == + V4L2_CTRL_TYPE_INTEGER64)) { + cfg.max = crl_ctrl->data.std_data.max; + cfg.min = crl_ctrl->data.std_data.min; + cfg.step = crl_ctrl->data.std_data.step; + cfg.def = crl_ctrl->data.std_data.def; + cfg.qmenu = 0; + cfg.elem_size = 0; + } else if (crl_ctrl->v4l2_type == V4L2_CTRL_TYPE_MENU) { + cfg.max = crl_ctrl->data.v4l2_menu_items.size + - 1; + cfg.min = 0; + cfg.step = 0; + cfg.def = 0; + cfg.qmenu = crl_ctrl->data.v4l2_menu_items.menu; + cfg.elem_size = 0; + } else { + dev_dbg(&client->dev, + "%s Custom Control: type %d\n", + __func__, crl_ctrl->v4l2_type); + continue; + } + crl_ctrl->ctrl = v4l2_ctrl_new_custom(ctrl_handler, + &cfg, NULL); + break; + case CRL_V4L2_CTRL_TYPE_BOOLEAN: + case CRL_V4L2_CTRL_TYPE_BUTTON: + case CRL_V4L2_CTRL_TYPE_CTRL_CLASS: + default: + dev_err(&client->dev, + "%s Invalid control type\n", __func__); + continue; + break; + } + + if (!crl_ctrl->ctrl) + continue; + /* + * Blanking and framesize controls access to same register, + * Blank controls are disabled if framesize controls exists. + */ + if (crl_ctrl->ctrl_id == V4L2_CID_FRAME_LENGTH_LINES || + crl_ctrl->ctrl_id == V4L2_CID_LINE_LENGTH_PIXELS) + sensor->blanking_ctrl_not_use = 1; + + if (crl_ctrl->ctrl_id == CRL_CID_SENSOR_MODE) + sensor->direct_mode_in_use = 1; + + /* Save mandatory control references - link_freq in src sd */ + if (crl_ctrl->ctrl_id == V4L2_CID_LINK_FREQ && + (crl_ctrl->sd_type == CRL_SUBDEV_TYPE_SCALER || + crl_ctrl->sd_type == CRL_SUBDEV_TYPE_BINNER)) + sensor->link_freq = crl_ctrl->ctrl; + + /* Save mandatory control references - pixel_rate_pa PA sd */ + if (crl_ctrl->ctrl_id == V4L2_CID_PIXEL_RATE && + crl_ctrl->sd_type == CRL_SUBDEV_TYPE_PIXEL_ARRAY) + sensor->pixel_rate_pa = crl_ctrl->ctrl; + + /* Save mandatory control references - pixel_rate_csi src sd */ + if (crl_ctrl->ctrl_id == V4L2_CID_PIXEL_RATE && + (crl_ctrl->sd_type == CRL_SUBDEV_TYPE_SCALER || + crl_ctrl->sd_type == CRL_SUBDEV_TYPE_BINNER)) + sensor->pixel_rate_csi = crl_ctrl->ctrl; + + crl_ctrl->ctrl->flags |= crl_ctrl->flags; + + dev_dbg(&client->dev, + "%s idx: %d ctrl_id: 0x%x ctrl_name: %s\n ctrl: 0x%p", + __func__, i, crl_ctrl->ctrl_id, crl_ctrl->name, + crl_ctrl->ctrl); + + if (ctrl_handler->error) { + dev_err(&client->dev, + "%s controls initialization failed (%d)\n", + __func__, ctrl_handler->error); + rval = ctrl_handler->error; + goto error; + } + } + + sensor->pixel_array->sd.ctrl_handler = + &sensor->pixel_array->ctrl_handler; + + sensor->src->sd.ctrl_handler = &sensor->src->ctrl_handler; + + return 0; + +error: + v4l2_ctrl_handler_free(&sensor->pixel_array->ctrl_handler); + v4l2_ctrl_handler_free(&sensor->src->ctrl_handler); + + return rval; +} + +static bool __crlmodule_rect_matches(struct i2c_client *client, + const struct v4l2_rect *const rect1, + const struct v4l2_rect *const rect2) +{ + dev_dbg(&client->dev, "%s rect1 l:%d t:%d w:%d h:%d\n", __func__, + rect1->left, rect1->top, rect1->width, rect1->height); + dev_dbg(&client->dev, "%s rect2 l:%d t:%d w:%d h:%d\n", __func__, + rect2->left, rect2->top, rect2->width, rect2->height); + + return (rect1->left == rect2->left && + rect1->top == rect2->top && + rect1->width == rect2->width && + rect1->height == rect2->height); +} + +static unsigned int __crlmodule_get_mode_min_llp(struct crl_sensor *sensor) +{ + const struct crl_mode_rep *mode = sensor->current_mode; + const struct crl_sensor_limits *limits = + sensor->sensor_ds->sensor_limits; + unsigned int width = sensor->pixel_array->crop[CRL_PA_PAD_SRC].width; + unsigned int min_llp; + + if (mode->min_llp) + min_llp = mode->min_llp; /* mode specific limit */ + else if (limits->min_line_length_pixels) + min_llp = limits->min_line_length_pixels; /* sensor limit */ + else /* No restrictions */ + min_llp = width; + + return min_llp; +} + +static unsigned int __crlmodule_get_mode_max_llp(struct crl_sensor *sensor) +{ + const struct crl_mode_rep *mode = sensor->current_mode; + const struct crl_sensor_limits *limits = + sensor->sensor_ds->sensor_limits; + unsigned int max_llp; + + if (mode->max_llp) + max_llp = mode->max_llp; /* mode specific limit */ + else if (limits->min_line_length_pixels) + max_llp = limits->max_line_length_pixels; /* sensor limit */ + else /* No restrictions */ + max_llp = USHRT_MAX; + + return max_llp; +} + +static unsigned int __crlmodule_get_mode_min_fll(struct crl_sensor *sensor) +{ + const struct crl_mode_rep *mode = sensor->current_mode; + const struct crl_sensor_limits *limits = + sensor->sensor_ds->sensor_limits; + unsigned int height = sensor->pixel_array->crop[CRL_PA_PAD_SRC].height; + unsigned int min_fll; + + if (mode->min_fll) + min_fll = mode->min_fll; /* mode specific limit */ + else if (limits->min_frame_length_lines) + min_fll = limits->min_frame_length_lines; /* sensor limit */ + else /* No restrictions */ + min_fll = height; + + return min_fll; +} + +static unsigned int __crlmodule_get_mode_max_fll(struct crl_sensor *sensor) +{ + const struct crl_mode_rep *mode = sensor->current_mode; + const struct crl_sensor_limits *limits = + sensor->sensor_ds->sensor_limits; + unsigned int max_fll; + + if (mode->max_fll) + max_fll = mode->max_fll; /* mode specific limit */ + else if (limits->min_line_length_pixels) + max_fll = limits->max_line_length_pixels; /* sensor limit */ + else /* No restrictions */ + max_fll = USHRT_MAX; + + return max_fll; +} + +static void crlmodule_update_framesize(struct crl_sensor *sensor) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + unsigned int min_llp, max_llp, min_fll, max_fll; + struct v4l2_ctrl *llength; + struct v4l2_ctrl *flength; + + llength = __crlmodule_get_v4l2_ctrl(sensor, + V4L2_CID_LINE_LENGTH_PIXELS); + flength = __crlmodule_get_v4l2_ctrl(sensor, + V4L2_CID_FRAME_LENGTH_LINES); + + if (llength) { + min_llp = __crlmodule_get_mode_min_llp(sensor); + max_llp = __crlmodule_get_mode_max_llp(sensor); + + llength->minimum = min_llp; + llength->maximum = max_llp; + llength->default_value = llength->minimum; + dev_dbg(&client->dev, "%s llp:%d\n", __func__, llength->val); + } + + if (flength) { + min_fll = __crlmodule_get_mode_min_fll(sensor); + max_fll = __crlmodule_get_mode_max_fll(sensor); + flength->minimum = min_fll; + flength->maximum = max_fll; + flength->default_value = flength->minimum; + dev_dbg(&client->dev, "%s fll:%d\n", __func__, flength->val); + } +} + +static int crlmodule_update_frame_blanking(struct crl_sensor *sensor) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + unsigned int width = sensor->pixel_array->crop[CRL_PA_PAD_SRC].width; + unsigned int height = sensor->pixel_array->crop[CRL_PA_PAD_SRC].height; + unsigned int min_llp, max_llp, min_fll, max_fll; + struct v4l2_ctrl *vblank; + struct v4l2_ctrl *hblank; + + vblank = __crlmodule_get_v4l2_ctrl(sensor, V4L2_CID_VBLANK); + hblank = __crlmodule_get_v4l2_ctrl(sensor, V4L2_CID_HBLANK); + + if (hblank) { + min_llp = __crlmodule_get_mode_min_llp(sensor); + max_llp = __crlmodule_get_mode_max_llp(sensor); + + hblank->minimum = min_llp - width; + hblank->maximum = max_llp - width; + hblank->default_value = hblank->minimum; + dev_dbg(&client->dev, "%s hblank:%d\n", __func__, hblank->val); + } + + if (vblank) { + min_fll = __crlmodule_get_mode_min_fll(sensor); + max_fll = __crlmodule_get_mode_max_fll(sensor); + + vblank->minimum = min_fll - height; + vblank->maximum = max_fll - height; + vblank->default_value = vblank->minimum; + dev_dbg(&client->dev, "%s vblank:%d\n", __func__, vblank->val); + } + + return 0; +} + +static int __crlmodule_rect_index(enum crl_subdev_type type, + const struct crl_mode_rep *mode) +{ + int i; + + for (i = 0; i < mode->sd_rects_items; i++) { + if (type == mode->sd_rects[i].subdev_type) + return i; + } + + return -1; +} + +static void crlmodule_update_mode_bysel(struct crl_sensor *sensor) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + const struct crl_mode_rep *this; + unsigned int i; + int rect_index; + + dev_dbg(&client->dev, "%s look for w: %d, h: %d, in [%d] modes\n", + __func__, sensor->src->crop[CRL_PAD_SRC].width, + sensor->src->crop[CRL_PAD_SRC].height, + sensor->sensor_ds->modes_items); + + for (i = 0; i < sensor->sensor_ds->modes_items; i++) { + this = &sensor->sensor_ds->modes[i]; + + dev_dbg(&client->dev, "%s check mode list[%d] w: %d, h: %d\n", + __func__, i, this->width, this->height); + if (this->width != sensor->src->crop[CRL_PAD_SRC].width || + this->height != sensor->src->crop[CRL_PAD_SRC].height) + continue; + + if (sensor->pixel_array) { + dev_dbg(&client->dev, "%s Compare PA out rect\n", + __func__); + rect_index = + __crlmodule_rect_index(CRL_SUBDEV_TYPE_PIXEL_ARRAY, + this); + if (rect_index < 0) + continue; + if (!__crlmodule_rect_matches(client, + &sensor->pixel_array->crop[CRL_PA_PAD_SRC], + &this->sd_rects[rect_index].out_rect)) + continue; + } + if (sensor->binner) { + dev_dbg(&client->dev, "%s binning hor: %d vs. %d\n", + __func__, + sensor->binning_horizontal, + this->binn_hor); + if (sensor->binning_horizontal != this->binn_hor) + continue; + + dev_dbg(&client->dev, "%s binning vert: %d vs. %d\n", + __func__, + sensor->binning_vertical, + this->binn_vert); + if (sensor->binning_vertical != this->binn_vert) + continue; + + dev_dbg(&client->dev, "%s binner in rect\n", __func__); + rect_index = + __crlmodule_rect_index(CRL_SUBDEV_TYPE_BINNER, + this); + if (rect_index < 0) + continue; + if (!__crlmodule_rect_matches(client, + &sensor->binner->crop[CRL_PAD_SINK], + &this->sd_rects[rect_index].in_rect)) + continue; + + dev_dbg(&client->dev, "%s binner out rect\n", __func__); + if (!__crlmodule_rect_matches(client, + &sensor->binner->crop[CRL_PAD_SRC], + &this->sd_rects[rect_index].out_rect)) + continue; + } + + if (sensor->scaler) { + dev_dbg(&client->dev, "%s scaler scale_m %d vs. %d\n", + __func__, sensor->scale_m, + this->scale_m); + if (sensor->scale_m != this->scale_m) + continue; + + rect_index = + __crlmodule_rect_index(CRL_SUBDEV_TYPE_SCALER, + this); + if (rect_index < 0) + continue; + + dev_dbg(&client->dev, "%s scaler in rect\n", __func__); + if (!__crlmodule_rect_matches(client, + &sensor->scaler->crop[CRL_PAD_SINK], + &this->sd_rects[rect_index].in_rect)) + continue; + + dev_dbg(&client->dev, "%s scaler out rect\n", __func__); + if (!__crlmodule_rect_matches(client, + &sensor->scaler->crop[CRL_PAD_SRC], + &this->sd_rects[rect_index].out_rect)) + continue; + } + + /* Check if there are any dynamic compare items */ + if (sensor->ext_ctrl_impacts_mode_selection && + !__crlmodule_compare_ctrl_specific_data(sensor, + this->comp_items, + this->ctrl_data)) + continue; + + /* Found a perfect match! */ + dev_dbg(&client->dev, "%s found mode. idx: %d\n", __func__, i); + break; + } + + /* If no modes found, fall back to the fail safe mode index */ + if (i >= sensor->sensor_ds->modes_items) { + i = sensor->sensor_ds->fail_safe_mode_index; + this = &sensor->sensor_ds->modes[i]; + dev_dbg(&client->dev, + "%s no matching mode, set to default: %d\n", + __func__, i); + } + + sensor->current_mode = this; +} + +static void crlmodule_update_mode_v4l2ctrl(struct crl_sensor *sensor) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + const struct crl_mode_rep *this; + int i; + + dev_dbg(&client->dev, "%s Sensor Mode :%d\n", + __func__, sensor->sensor_mode); + /* point to selected mode */ + this = &sensor->sensor_ds->modes[sensor->sensor_mode]; + sensor->current_mode = this; + + for (i = 0; i < this->sd_rects_items; i++) { + + if (CRL_SUBDEV_TYPE_PIXEL_ARRAY == + this->sd_rects[i].subdev_type) { + sensor->pixel_array->crop[CRL_PA_PAD_SRC] = + this->sd_rects[i].out_rect; + } + + if (CRL_SUBDEV_TYPE_BINNER == + this->sd_rects[i].subdev_type) { + sensor->binner->sink_fmt = + this->sd_rects[i].in_rect; + sensor->binner->crop[CRL_PAD_SINK] = + this->sd_rects[i].in_rect; + sensor->binner->crop[CRL_PAD_SRC] = + this->sd_rects[i].out_rect; + sensor->binning_vertical = this->binn_vert; + sensor->binning_horizontal = this->binn_hor; + if (this->binn_vert > 1) + sensor->binner->compose = + this->sd_rects[i].out_rect; + } + + if (CRL_SUBDEV_TYPE_SCALER == + this->sd_rects[i].subdev_type) { + sensor->scaler->crop[CRL_PAD_SINK] = + this->sd_rects[i].in_rect; + sensor->scaler->crop[CRL_PAD_SRC] = + this->sd_rects[i].out_rect; + sensor->scaler->sink_fmt = + this->sd_rects[i].in_rect; + sensor->scale_m = this->scale_m; + if (this->scale_m != 1) + sensor->scaler->compose = + this->sd_rects[i].out_rect; + } + } + + /* Set source */ + sensor->src->crop[CRL_PAD_SRC].width = this->width; + sensor->src->crop[CRL_PAD_SRC].height = this->height; +} + +static void crlmodule_update_current_mode(struct crl_sensor *sensor) +{ + const struct crl_mode_rep *this; + int i; + + if (sensor->direct_mode_in_use) + crlmodule_update_mode_v4l2ctrl(sensor); + else + crlmodule_update_mode_bysel(sensor); + + /* + * We have a valid mode now. If there are any mode specific "get" + * controls defined in the configuration it could be queried by the + * user space for any mode specific information. So go through the + * mode specific v4l2_ctrls and update its value from the selected mode. + */ + + this = sensor->current_mode; + + for (i = 0; i < this->comp_items; i++) { + struct crl_ctrl_data_pair *ctrl_comp = &this->ctrl_data[i]; + unsigned int idx; + + /* Get the v4l2_ctrl pointer corresponding ctrl id */ + if (__crlmodule_get_crl_ctrl_index(sensor, ctrl_comp->ctrl_id, + &idx)) + /* If not found, move to the next ctrl */ + continue; + + /* No need to update this control, if this is a set op ctrl */ + if (sensor->v4l2_ctrl_bank[idx].op_type == CRL_V4L2_CTRL_SET_OP) + continue; + + /* Update the control value */ + __v4l2_ctrl_s_ctrl(sensor->v4l2_ctrl_bank[idx].ctrl, + ctrl_comp->data); + } + + if (sensor->blanking_ctrl_not_use) + crlmodule_update_framesize(sensor); + else + crlmodule_update_frame_blanking(sensor); +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int __crlmodule_get_format(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct v4l2_rect *r; + + if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { + fmt->format = *v4l2_subdev_get_try_format(subdev, cfg, + fmt->pad); + return 0; + } + + if (fmt->pad == ssd->source_pad) + r = &ssd->crop[ssd->source_pad]; + else + r = &ssd->sink_fmt; + + fmt->format.width = r->width; + fmt->format.height = r->height; + fmt->format.code = + sensor->sensor_ds->csi_fmts[sensor->fmt_index].code; + fmt->format.field = (ssd->field == V4L2_FIELD_ANY) ? + V4L2_FIELD_NONE : ssd->field; + return 0; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int crlmodule_enum_mbus_code(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + + if (code->index >= sensor->sensor_ds->csi_fmts_items) + return -EINVAL; + + code->code = sensor->sensor_ds->csi_fmts[code->index].code; + + return 0; +} + +static int crlmodule_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_frame_size_enum *fse) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(sd); + + if (fse->index >= sensor->sensor_ds->modes_items) + return -EINVAL; + + fse->min_width = sensor->sensor_ds->modes[fse->index].width; + fse->max_width = fse->min_width; + fse->min_height = sensor->sensor_ds->modes[fse->index].height; + fse->max_height = fse->min_height; + + return 0; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int crlmodule_get_format(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + int rval; + + mutex_lock(&sensor->mutex); + rval = __crlmodule_get_format(subdev, cfg, fmt); + mutex_unlock(&sensor->mutex); + + return rval; +} + +static int __crlmodule_sel_supported(struct v4l2_subdev *subdev, + struct v4l2_subdev_selection *sel) +{ + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + + if (ssd == sensor->pixel_array + && sel->pad == CRL_PA_PAD_SRC) { + switch (sel->target) { + case V4L2_SEL_TGT_NATIVE_SIZE: + case V4L2_SEL_TGT_CROP: + case V4L2_SEL_TGT_CROP_BOUNDS: + return 0; + } + } + if (ssd == sensor->binner) { + switch (sel->target) { + case V4L2_SEL_TGT_COMPOSE: + case V4L2_SEL_TGT_COMPOSE_BOUNDS: + if (sel->pad == CRL_PAD_SINK) + return 0; + } + } + if (ssd == sensor->scaler) { + switch (sel->target) { + case V4L2_SEL_TGT_CROP: + case V4L2_SEL_TGT_CROP_BOUNDS: + if (sel->pad == CRL_PAD_SRC) + return 0; + break; + case V4L2_SEL_TGT_COMPOSE: + case V4L2_SEL_TGT_COMPOSE_BOUNDS: + if (sel->pad == CRL_PAD_SINK) + return 0; + } + } + return -EINVAL; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static void crlmodule_get_crop_compose(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_rect **crops, + struct v4l2_rect **comps, int which) +{ + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + unsigned int i; + + /* Currently we support only 2 pads */ + BUG_ON(subdev->entity.num_pads > CRL_PADS); + + if (which == V4L2_SUBDEV_FORMAT_ACTIVE) { + if (crops) + for (i = 0; i < subdev->entity.num_pads; i++) + crops[i] = &ssd->crop[i]; + if (comps) + *comps = &ssd->compose; + } else { + if (crops) { + for (i = 0; i < subdev->entity.num_pads; i++) { + crops[i] = v4l2_subdev_get_try_crop(subdev, + cfg, i); + BUG_ON(!crops[i]); + } + } + if (comps) { + *comps = v4l2_subdev_get_try_compose(subdev, cfg, + CRL_PAD_SINK); + BUG_ON(!*comps); + } + } +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int crlmodule_get_selection(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_selection *sel) +{ + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct v4l2_rect *comp, *crops[CRL_PADS]; + struct v4l2_rect sink_fmt; + int ret; + + ret = __crlmodule_sel_supported(subdev, sel); + if (ret) + return ret; + + crlmodule_get_crop_compose(subdev, cfg, crops, &comp, sel->which); + + if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) { + sink_fmt = ssd->sink_fmt; + } else { + struct v4l2_mbus_framefmt *fmt = + v4l2_subdev_get_try_format(subdev, cfg, ssd->sink_pad); + sink_fmt.left = 0; + sink_fmt.top = 0; + sink_fmt.width = fmt->width; + sink_fmt.height = fmt->height; + } + + switch (sel->target) { + case V4L2_SEL_TGT_CROP_BOUNDS: + case V4L2_SEL_TGT_NATIVE_SIZE: + if (ssd == sensor->pixel_array) { + sel->r.left = sel->r.top = 0; + sel->r.width = + sensor->sensor_ds->sensor_limits->x_addr_max; + sel->r.height = + sensor->sensor_ds->sensor_limits->y_addr_max; + } else if (sel->pad == ssd->sink_pad) { + sel->r = sink_fmt; + } else { + sel->r = *comp; + } + break; + case V4L2_SEL_TGT_CROP: + case V4L2_SEL_TGT_COMPOSE_BOUNDS: + sel->r = *crops[sel->pad]; + break; + case V4L2_SEL_TGT_COMPOSE: + sel->r = *comp; + break; + } + return 0; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static void crlmodule_propagate(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, int which, + int target) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct v4l2_rect *comp, *crops[CRL_PADS]; + + crlmodule_get_crop_compose(subdev, cfg, crops, &comp, which); + + switch (target) { + case V4L2_SEL_TGT_CROP: + comp->width = crops[CRL_PAD_SINK]->width; + comp->height = crops[CRL_PAD_SINK]->height; + if (which == V4L2_SUBDEV_FORMAT_ACTIVE) { + if (ssd == sensor->scaler) { + sensor->scale_m = 1; + } else if (ssd == sensor->binner) { + sensor->binning_horizontal = 1; + sensor->binning_vertical = 1; + } + } + /* Fall through */ + case V4L2_SEL_TGT_COMPOSE: + *crops[CRL_PAD_SRC] = *comp; + break; + default: + BUG(); + } +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int crlmodule_set_compose(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_selection *sel) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct v4l2_rect *comp, *crops[CRL_PADS]; + + crlmodule_get_crop_compose(subdev, cfg, crops, &comp, sel->which); + + sel->r.top = 0; + sel->r.left = 0; + + if (ssd == sensor->binner) { + sensor->binning_horizontal = crops[CRL_PAD_SINK]->width / + sel->r.width; + sensor->binning_vertical = crops[CRL_PAD_SINK]->height / + sel->r.height; + } else { + sensor->scale_m = crops[CRL_PAD_SINK]->width * + sensor->sensor_ds->sensor_limits->scaler_m_min / + sel->r.width; + } + + *comp = sel->r; + + crlmodule_propagate(subdev, cfg, sel->which, + V4L2_SEL_TGT_COMPOSE); + + if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) + crlmodule_update_current_mode(sensor); + + return 0; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int crlmodule_set_crop(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_selection *sel) +{ + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct v4l2_rect *src_size, *crops[CRL_PADS]; + struct v4l2_rect _r; + + crlmodule_get_crop_compose(subdev, cfg, crops, NULL, sel->which); + + if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) { + if (sel->pad == ssd->sink_pad) + src_size = &ssd->sink_fmt; + else + src_size = &ssd->compose; + } else { + if (sel->pad == ssd->sink_pad) { + _r.left = 0; + _r.top = 0; + _r.width = v4l2_subdev_get_try_format(subdev, + cfg, sel->pad) + ->width; + _r.height = v4l2_subdev_get_try_format(subdev, + cfg, sel->pad) + ->height; + src_size = &_r; + } else { + src_size = + v4l2_subdev_get_try_compose(subdev, cfg, + ssd->sink_pad); + } + } + + if (ssd == sensor->src && sel->pad == CRL_PAD_SRC) { + sel->r.left = 0; + sel->r.top = 0; + } + + sel->r.width = min(sel->r.width, src_size->width); + sel->r.height = min(sel->r.height, src_size->height); + + sel->r.left = min_t(s32, sel->r.left, src_size->width - sel->r.width); + sel->r.top = min_t(s32, sel->r.top, src_size->height - sel->r.height); + + *crops[sel->pad] = sel->r; + + if (ssd != sensor->pixel_array && sel->pad == CRL_PAD_SINK) + crlmodule_propagate(subdev, cfg, sel->which, + V4L2_SEL_TGT_CROP); + + /* TODO! Should we short list supported mode? */ + + return 0; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Modified based on the CRL Module changes + */ +static int crlmodule_set_format(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + struct v4l2_rect *crops[CRL_PADS]; + + dev_dbg(&client->dev, "%s sd_name: %s pad: %d w: %d, h: %d code: 0x%x", + __func__, ssd->sd.name, fmt->pad, + fmt->format.width, fmt->format.height, + fmt->format.code); + + mutex_lock(&sensor->mutex); + + /* Currently we only support ALTERNATE interlaced mode. */ + if (fmt->format.field != V4L2_FIELD_ALTERNATE) + fmt->format.field = V4L2_FIELD_NONE; + ssd->field = fmt->format.field; + + if (fmt->pad == ssd->source_pad) { + u32 code = fmt->format.code; + int rval = __crlmodule_get_format(subdev, cfg, fmt); + + if (!rval && subdev == &sensor->src->sd) { + /* Check if this code is supported, if yes get index */ + int idx = __crlmodule_get_data_fmt_index(sensor, code); + + if (idx < 0) { + dev_err(&client->dev, "%s invalid format\n", + __func__); + mutex_unlock(&sensor->mutex); + return -EINVAL; + } + + sensor->fmt_index = idx; + /* TODO! validate PLL? */ + } + mutex_unlock(&sensor->mutex); + return rval; + } + + fmt->format.width = + clamp_t(uint32_t, fmt->format.width, + sensor->sensor_ds->sensor_limits->x_addr_min, + sensor->sensor_ds->sensor_limits->x_addr_max); + fmt->format.height = + clamp_t(uint32_t, fmt->format.height, + sensor->sensor_ds->sensor_limits->y_addr_min, + sensor->sensor_ds->sensor_limits->y_addr_max); + + crlmodule_get_crop_compose(subdev, cfg, crops, NULL, fmt->which); + + crops[ssd->sink_pad]->left = 0; + crops[ssd->sink_pad]->top = 0; + crops[ssd->sink_pad]->width = fmt->format.width; + crops[ssd->sink_pad]->height = fmt->format.height; + if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) + ssd->sink_fmt = *crops[ssd->sink_pad]; + + crlmodule_propagate(subdev, cfg, fmt->which, + V4L2_SEL_TGT_CROP); + + crlmodule_update_current_mode(sensor); + + mutex_unlock(&sensor->mutex); + + return 0; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int crlmodule_set_selection(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_selection *sel) +{ + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + int ret; + + dev_dbg(&client->dev, "%s sd_name: %s sel w: %d, h: %d target: %d", + __func__, ssd->sd.name, sel->r.width, + sel->r.height, sel->target); + + ret = __crlmodule_sel_supported(subdev, sel); + if (ret) { + dev_dbg(&client->dev, + "%s sd_name: %s w: %d, h: %d target: %d not supported", + __func__, ssd->sd.name, sel->r.width, + sel->r.height, sel->target); + return ret; + } + + mutex_lock(&sensor->mutex); + + sel->r.width = max_t(unsigned int, + sensor->sensor_ds->sensor_limits->x_addr_min, + sel->r.width); + sel->r.height = max_t(unsigned int, + sensor->sensor_ds->sensor_limits->y_addr_min, + sel->r.height); + switch (sel->target) { + case V4L2_SEL_TGT_CROP: + ret = crlmodule_set_crop(subdev, cfg, sel); + break; + case V4L2_SEL_TGT_COMPOSE: + ret = crlmodule_set_compose(subdev, cfg, sel); + break; + default: + ret = -EINVAL; + } + + crlmodule_update_current_mode(sensor); + + mutex_unlock(&sensor->mutex); + return ret; +} + +static int crlmodule_get_skip_frames(struct v4l2_subdev *subdev, u32 *frames) +{ + /* TODO Handle this */ + return 0; +} + +static int crlmodule_start_streaming(struct crl_sensor *sensor) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + const struct crl_pll_configuration *pll; + const struct crl_csi_data_fmt *fmt; + int rval; + + dev_dbg(&client->dev, "%s start streaming pll_idx: %d fmt_idx: %d\n", + __func__, sensor->pll_index, + sensor->fmt_index); + + pll = &sensor->sensor_ds->pll_configs[sensor->pll_index]; + fmt = &sensor->sensor_ds->csi_fmts[sensor->fmt_index]; + + crlmodule_update_current_mode(sensor); + + rval = crlmodule_write_regs(sensor, fmt->regs, fmt->regs_items); + if (rval) { + dev_err(&client->dev, "%s failed to set format\n", __func__); + return rval; + } + + rval = crlmodule_write_regs(sensor, pll->pll_regs, pll->pll_regs_items); + if (rval) { + dev_err(&client->dev, "%s failed to set plls\n", __func__); + return rval; + } + + /* Write mode list */ + rval = crlmodule_write_regs(sensor, + sensor->current_mode->mode_regs, + sensor->current_mode->mode_regs_items); + if (rval) { + dev_err(&client->dev, "%s failed to set mode\n", __func__); + return rval; + } + + /* Write stream on list */ + rval = crlmodule_write_regs(sensor, + sensor->sensor_ds->streamon_regs, + sensor->sensor_ds->streamon_regs_items); + if (rval) { + dev_err(&client->dev, "%s failed to set stream\n", __func__); + return rval; + } + + return 0; +} + +static int crlmodule_stop_streaming(struct crl_sensor *sensor) +{ + return crlmodule_write_regs(sensor, + sensor->sensor_ds->streamoff_regs, + sensor->sensor_ds->streamoff_regs_items); +} + +static int crlmodule_set_stream(struct v4l2_subdev *subdev, int enable) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + int rval = 0; + + mutex_lock(&sensor->mutex); + + if (sensor->streaming == enable) + goto out; + + if (enable) { + + if (sensor->msr_list) { + rval = crlmodule_apply_msrlist(client, + sensor->msr_list); + if (rval) + dev_warn(&client->dev, "msrlist write error %d\n", + rval); + } + rval = crlmodule_start_streaming(sensor); + if (!rval) + sensor->streaming = 1; + } else { + rval = crlmodule_stop_streaming(sensor); + sensor->streaming = 0; + } + +out: + mutex_unlock(&sensor->mutex); + + /* SENSOR_IDLE control cannot be set when streaming*/ + __crlmodule_grab_v4l2_ctrl(sensor, SENSOR_IDLE, enable); + + /* SENSOR_STREAMING controls cannot be set when not streaming */ + __crlmodule_grab_v4l2_ctrl(sensor, SENSOR_STREAMING, !enable); + + /* SENSOR_POWERED_ON controls does not matter about streaming. */ + __crlmodule_grab_v4l2_ctrl(sensor, SENSOR_POWERED_ON, false); + + return rval; +} + +static int crlmodule_identify_module(struct v4l2_subdev *subdev) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + unsigned int size = 0, pos; + char *id_string; + const char *expect_id; + int i, ret; + u32 val; + + for (i = 0; i < sensor->sensor_ds->id_reg_items; i++) + size += sensor->sensor_ds->id_regs[i].width + 1; + + /* TODO! If no ID! return success? */ + if (!size) + return 0; + + expect_id = sensor->platform_data->id_string; + /* Create string variabel to append module ID */ + id_string = kzalloc(size, GFP_KERNEL); + if (!id_string) + return -ENOMEM; + *id_string = '\0'; + + /* Go through each regs in the list and append to id_string */ + for (i = 0; i < sensor->sensor_ds->id_reg_items; i++) { + ret = crlmodule_read_reg(sensor, + sensor->sensor_ds->id_regs[i].reg, + &val); + if (ret) + goto out; + + if (i) + pos += snprintf(id_string + pos, size - pos, " 0x%x", val); + else + pos = snprintf(id_string, size, "0x%x", val); + if (pos >= size) + break; + } + + /* Check here if this module in the supported list + * Ideally the module manufacturer and id should be in platform + * data or ACPI and here the driver should read the value from the + * register and check if this matches to any in the supported + * platform data + */ + if (expect_id && + (strnlen(id_string, size) != strnlen(expect_id, size + 1) || + strncmp(id_string, expect_id, size))) { + dev_err(&client->dev, + "Sensor detection failed: expect \"%s\" actual \"%s\"", + expect_id, id_string); + ret = -ENODEV; + } + +out: + dev_dbg(&client->dev, "%s module: %s expected id: %s\n", + __func__, id_string, + (expect_id) ? expect_id : "not specified"); + kfree(id_string); + if (ret) + dev_err(&client->dev, "sensor detection failed\n"); + return ret; +} + +static int crlmodule_get_frame_desc(struct v4l2_subdev *subdev, + unsigned int pad, + struct v4l2_mbus_frame_desc *desc) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct crl_frame_desc *crl_desc = sensor->sensor_ds->frame_desc; + unsigned int i; + + desc->num_entries = sensor->sensor_ds->frame_desc_entries; + if (desc->num_entries) + desc->type = sensor->sensor_ds->frame_desc_type; + + /* + * By any chance the sensor configuration has more than the maximum + * supported, clip the number of entries to the MAX supported. + */ + if (desc->num_entries > V4L2_FRAME_DESC_ENTRY_MAX) + desc->num_entries = V4L2_FRAME_DESC_ENTRY_MAX; + + for (i = 0; i < desc->num_entries; i++) { + int ret; + u32 val; + + ret = __crlmodule_parse_dynamic_entity(sensor, + crl_desc[i].flags, &val); + if (ret) + return ret; + desc->entry[i].flags = (u16)val; + + ret = __crlmodule_parse_dynamic_entity(sensor, crl_desc[i].bpp, + &val); + if (ret) + return ret; + desc->entry[i].bpp = (u8)val; + + ret = __crlmodule_parse_dynamic_entity( + sensor, crl_desc[i].pixelcode, &val); + if (ret) + return ret; + desc->entry[i].pixelcode = val; + + if (desc->entry[i].flags & V4L2_MBUS_FRAME_DESC_FL_BLOB) { + ret = __crlmodule_parse_dynamic_entity( + sensor, crl_desc[i].length, &val); + if (ret) + return ret; + desc->entry[i].length = val; + } else { + ret = __crlmodule_parse_dynamic_entity( + sensor, crl_desc[i].start_line, &val); + if (ret) + return ret; + desc->entry[i].two_dim.start_line = + (u16)val; + + ret = __crlmodule_parse_dynamic_entity( + sensor, crl_desc[i].start_pixel, &val); + if (ret) + return ret; + desc->entry[i].two_dim.start_pixel = + (u16)val; + + ret = __crlmodule_calc_dynamic_entity_values( + sensor, crl_desc[i].height.ops_items, + crl_desc[i].height.ops, &val); + if (ret) + return ret; + desc->entry[i].two_dim.height = (u16)val; + + ret = __crlmodule_calc_dynamic_entity_values( + sensor, crl_desc[i].width.ops_items, + crl_desc[i].width.ops, &val); + if (ret) + return ret; + desc->entry[i].two_dim.width = (u16)val; + } + + if (desc->type == CRL_V4L2_MBUS_FRAME_DESC_TYPE_CSI2) { + ret = __crlmodule_parse_dynamic_entity( + sensor, crl_desc[i].csi2_channel, &val); + if (ret) + return ret; + desc->entry[i].bus.csi2.channel = (u8)val; + + ret = __crlmodule_parse_dynamic_entity( + sensor, crl_desc[i].csi2_data_type, &val); + if (ret) + return ret; + desc->entry[i].bus.csi2.data_type = (u8)val; + } + } + + return 0; +} + + +static int crlmodule_get_routing(struct v4l2_subdev *subdev, + struct v4l2_subdev_routing *route) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + int i; + + if (!route) + return -EINVAL; + + if (ssd != sensor->src || + sensor->sensor_ds->frame_desc_entries <= 1) + return -ENOIOCTLCMD; + + for (i = 0; i < min(sensor->sensor_ds->frame_desc_entries, + route->num_routes); i++) { + route->routes[i].sink_pad = CRL_PAD_SINK; + route->routes[i].sink_stream = 0; + route->routes[i].source_pad = CRL_PAD_SRC; + route->routes[i].source_stream = i; + route->routes[i].flags = sensor->src->route_flags[i]; + } + + route->num_routes = i; + return 0; +} + +static int crlmodule_set_routing(struct v4l2_subdev *subdev, + struct v4l2_subdev_routing *route) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + const unsigned int stream_nr = sensor->sensor_ds->frame_desc_entries; + struct v4l2_subdev_route *t; + int i, ret = 0; + + if (!route) + return -EINVAL; + + if (ssd != sensor->src || + sensor->sensor_ds->frame_desc_entries <= 1) + return -ENOIOCTLCMD; + + for (i = 0; i < min(stream_nr, route->num_routes); ++i) { + t = &route->routes[i]; + + if (t->source_stream > stream_nr - 1) + continue; + + if (t->source_pad != CRL_PAD_SRC || + t->sink_pad != CRL_PAD_SINK) + continue; + + if (sensor->src->route_flags[t->source_stream] & + V4L2_SUBDEV_ROUTE_FL_IMMUTABLE) + continue; + + if (t->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE) + sensor->src->route_flags[t->source_stream] |= + V4L2_SUBDEV_ROUTE_FL_ACTIVE; + else if (!(t->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) + sensor->src->route_flags[t->source_stream] &= + (~V4L2_SUBDEV_ROUTE_FL_ACTIVE); + } + + return ret; +} + +/* + * This function executes the initialisation routines after the power on + * is successfully completed. Following operations are done + * + * Initiases registers after sensor power up - if any such list is configured + * V4l2 Ctrl handler framework intialisation + */ +static int crlmodule_run_poweron_init(struct crl_sensor *sensor) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + int rval; + + dev_dbg(&client->dev, "%s set power up registers: %d\n", + __func__, sensor->sensor_ds->powerup_regs_items); + + /* Write the power up registers */ + rval = crlmodule_write_regs(sensor, sensor->sensor_ds->powerup_regs, + sensor->sensor_ds->powerup_regs_items); + if (rval) { + dev_err(&client->dev, "%s failed to set powerup registers\n", + __func__); + return rval; + } + + /* Are we still initialising...? If yes, return here. */ + if (!sensor->pixel_array) + return 0; + + dev_dbg(&client->dev, "%s init v4l2 controls", __func__); + + rval = v4l2_ctrl_handler_setup( + &sensor->pixel_array->ctrl_handler); + if (rval) { + dev_err(&client->dev, "%s PA v4l2_ctrl_handler failed\n", + __func__); + return rval; + } + + rval = v4l2_ctrl_handler_setup(&sensor->src->ctrl_handler); + if (rval) + dev_err(&client->dev, "%s SRC v4l2_ctrl_handler failed\n", + __func__); + + /* SENSOR_IDLE control can be set only when not streaming*/ + __crlmodule_grab_v4l2_ctrl(sensor, SENSOR_IDLE, false); + + /* SENSOR_STREAMING controls can be set only when streaming */ + __crlmodule_grab_v4l2_ctrl(sensor, SENSOR_STREAMING, true); + + /* SENSOR_POWERED_ON controls can be set after power on */ + __crlmodule_grab_v4l2_ctrl(sensor, SENSOR_POWERED_ON, false); + + mutex_lock(&sensor->mutex); + crlmodule_update_current_mode(sensor); + mutex_unlock(&sensor->mutex); + + return rval; +} + +static int custom_gpio_request(struct crl_sensor *sensor) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + int i; + + for (i = 0; i < CRL_MAX_CUSTOM_GPIO_AMOUNT; i++) { + if (sensor->platform_data->custom_gpio[i].name[0] == '\0') + break; + if (devm_gpio_request_one( + &client->dev, + sensor->platform_data->custom_gpio[i].number, 0, + sensor->platform_data->custom_gpio[i].name) != 0) { + dev_err(&client->dev, + "unable to acquire %s %d\n", + sensor->platform_data->custom_gpio[i].name, + sensor->platform_data->custom_gpio[i].number); + return -ENODEV; + } + } + return 0; +} + +static void custom_gpio_ctrl(struct crl_sensor *sensor, bool set) +{ + int i; + unsigned int val; + + for (i = 0; i < CRL_MAX_CUSTOM_GPIO_AMOUNT; i++) { + if (sensor->platform_data->custom_gpio[i].name[0] == '\0') + break; + if (set) + val = sensor->platform_data->custom_gpio[i].val; + else + val = sensor->platform_data->custom_gpio[i].undo_val; + + gpio_set_value( + sensor->platform_data->custom_gpio[i].number, val); + } +} + +/* + * This function handles sensor power up routine failure because of any failed + * step in the routine. The index "i" is the index to last successfull power + * sequence entity successfull completed. This function executes the power + * senquence entities in the reverse or with undo value. + */ +static void crlmodule_undo_poweron_entities( + struct crl_sensor *sensor, + int rev_idx) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + struct crl_power_seq_entity *entity; + int idx; + + for (idx = rev_idx; idx >= 0; idx--) { + entity = &sensor->pwr_entity[idx]; + dev_dbg(&client->dev, "%s power type %d index %d\n", + __func__, entity->type, idx); + + switch (entity->type) { + case CRL_POWER_ETY_GPIO_FROM_PDATA: + gpio_set_value(sensor->platform_data->xshutdown, + entity->undo_val); + break; + case CRL_POWER_ETY_GPIO_FROM_PDATA_BY_NUMBER: + custom_gpio_ctrl(sensor, false); + break; + case CRL_POWER_ETY_GPIO_CUSTOM: + if (entity->gpiod_priv) { + if (gpiod_cansleep(entity->gpiod_priv)) + gpiod_set_raw_value_cansleep( + entity->gpiod_priv, + entity->undo_val); + else + gpiod_set_raw_value(entity->gpiod_priv, + entity->undo_val); + } else { + gpio_set_value(entity->ent_number, + entity->undo_val); + } + break; + case CRL_POWER_ETY_REGULATOR_FRAMEWORK: + regulator_disable(entity->regulator_priv); + break; + case CRL_POWER_ETY_CLK_FRAMEWORK: + clk_disable_unprepare(sensor->xclk); + break; + default: + dev_err(&client->dev, "%s Invalid power type\n", + __func__); + break; + } + + if (entity->delay) + usleep_range(entity->delay, entity->delay + 10); + } +} + +static int __crlmodule_powerup_sequence(struct crl_sensor *sensor) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + struct crl_power_seq_entity *entity; + unsigned idx; + int rval; + + for (idx = 0; idx < sensor->sensor_ds->power_items; idx++) { + entity = &sensor->pwr_entity[idx]; + dev_dbg(&client->dev, "%s power type %d index %d\n", + __func__, entity->type, idx); + + switch (entity->type) { + case CRL_POWER_ETY_GPIO_FROM_PDATA: + gpio_set_value(sensor->platform_data->xshutdown, + entity->val); + break; + case CRL_POWER_ETY_GPIO_FROM_PDATA_BY_NUMBER: + custom_gpio_ctrl(sensor, true); + break; + case CRL_POWER_ETY_GPIO_CUSTOM: + if (entity->gpiod_priv) { + if (gpiod_cansleep(entity->gpiod_priv)) + gpiod_set_raw_value_cansleep( + entity->gpiod_priv, + entity->val); + else + gpiod_set_raw_value(entity->gpiod_priv, + entity->val); + } else { + gpio_set_value(entity->ent_number, entity->val); + } + break; + case CRL_POWER_ETY_REGULATOR_FRAMEWORK: + rval = regulator_enable(entity->regulator_priv); + if (rval) { + dev_err(&client->dev, + "Failed to enable regulator: %d\n", + rval); + devm_regulator_put(entity->regulator_priv); + entity->regulator_priv = NULL; + goto error; + } + break; + case CRL_POWER_ETY_CLK_FRAMEWORK: + rval = clk_set_rate(sensor->xclk, + sensor->platform_data->ext_clk); + if (rval < 0) { + dev_err(&client->dev, + "unable to set clock freq to %u\n", + sensor->platform_data->ext_clk); + goto error; + } + if (clk_get_rate(sensor->xclk) != + sensor->platform_data->ext_clk) + dev_warn(&client->dev, + "warning: unable to set \ + accurate clock freq %u\n", + sensor->platform_data->ext_clk); + rval = clk_prepare_enable(sensor->xclk); + if (rval) { + dev_err(&client->dev, "Failed to enable \ + clock: %d\n", rval); + goto error; + } + break; + default: + dev_err(&client->dev, "Invalid power type\n"); + rval = -ENODEV; + goto error; + } + + if (entity->delay) + usleep_range(entity->delay, entity->delay + 10); + } + + return 0; +error: + dev_err(&client->dev, "Error:Power sequece failed\n"); + if (idx > 0) + crlmodule_undo_poweron_entities(sensor, idx-1); + return rval; +} + +static int crlmodule_set_power(struct v4l2_subdev *subdev, int on) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + int ret = 0; + + if (on) { + ret = pm_runtime_get_sync(&client->dev); + if (ret < 0) { + pm_runtime_put(&client->dev); + return ret; + } + } + + mutex_lock(&sensor->power_mutex); + if (on && !sensor->power_count) { + usleep_range(2000, 3000); + ret = crlmodule_run_poweron_init(sensor); + if (ret < 0) { + pm_runtime_put(&client->dev); + goto out; + } + } + + /* Update the power count. */ + sensor->power_count += on ? 1 : -1; + WARN_ON(sensor->power_count < 0); + +out: + mutex_unlock(&sensor->power_mutex); + + if (!on) + pm_runtime_put(&client->dev); + + return ret; +} + +static const struct v4l2_subdev_ops crlmodule_ops; +static const struct v4l2_subdev_internal_ops crlmodule_internal_ops; +static const struct media_entity_operations crlmodule_entity_ops; + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Modified based on the CRL Module changes + */ +static int crlmodule_init_subdevs(struct v4l2_subdev *subdev) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct crlmodule_platform_data *platform_data = sensor->platform_data; + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + struct crl_subdev *prev_sd = NULL; + int i = 0, j; + struct crl_subdev *sd; + int rval = 0; + + dev_dbg(&client->dev, "%s\n", __func__); + + /* + * The scaler, binner and PA order matters. Sensor configuration file + * must maintain this order. PA sub dev is a must and binner and + * scaler can be omitted based on the sensor. But if scaler is present + * it must be the first sd. + */ + if (sensor->sensor_ds->subdevs[i].subdev_type + == CRL_SUBDEV_TYPE_SCALER) { + sensor->scaler = &sensor->ssds[sensor->ssds_used]; + sensor->ssds_used++; + i++; + } + + if (sensor->sensor_ds->subdevs[i].subdev_type + == CRL_SUBDEV_TYPE_BINNER) { + sensor->binner = &sensor->ssds[sensor->ssds_used]; + sensor->ssds_used++; + i++; + } + + if (sensor->sensor_ds->subdevs[i].subdev_type + == CRL_SUBDEV_TYPE_PIXEL_ARRAY) { + sensor->pixel_array = &sensor->ssds[sensor->ssds_used]; + sensor->ssds_used++; + i++; + } + + /* CRL MediaCTL IF driver can't handle if none of these sd's present! */ + if (!sensor->ssds_used) { + dev_err(&client->dev, "%s no subdevs present\n", __func__); + return -ENODEV; + } + + if (!sensor->sensor_ds->pll_config_items) { + dev_err(&client->dev, "%s no pll configurations\n", __func__); + return -ENODEV; + } + + /* TODO validate rest of the settings from the sensor definition file */ + + dev_dbg(&client->dev, "%s subdevs: %d\n", __func__, i); + + for (i = 0; i < sensor->ssds_used; i++) { + bool has_substreams = false; + + sd = &sensor->ssds[i]; + + if (sd != sensor->src) + v4l2_subdev_init(&sd->sd, &crlmodule_ops); + else if (sensor->sensor_ds->frame_desc_entries > 1) + has_substreams = true; + + sd->sensor = sensor; + + if (sd == sensor->pixel_array) { + sd->npads = 1; + } else { + sd->npads = 2; + sd->source_pad = 1; + } + + if (platform_data->suffix) + snprintf(sd->sd.name, + sizeof(sd->sd.name), "%s %c", + sensor->sensor_ds->subdevs[i].name, + platform_data->suffix); + else + snprintf(sd->sd.name, + sizeof(sd->sd.name), "%s %d-%4.4x", + sensor->sensor_ds->subdevs[i].name, + i2c_adapter_id(client->adapter), + client->addr); + + + sd->sink_fmt.width = + sensor->sensor_ds->sensor_limits->x_addr_max; + sd->sink_fmt.height = + sensor->sensor_ds->sensor_limits->y_addr_max; + sd->compose.width = sd->sink_fmt.width; + sd->compose.height = sd->sink_fmt.height; + sd->crop[sd->source_pad] = sd->compose; + sd->pads[sd->source_pad].flags = MEDIA_PAD_FL_SOURCE | + (has_substreams ? MEDIA_PAD_FL_MULTIPLEX : 0); + if (sd != sensor->pixel_array) { + sd->crop[sd->sink_pad] = sd->compose; + sd->pads[sd->sink_pad].flags = MEDIA_PAD_FL_SINK; + } + + if (has_substreams) { + sd->route_flags = devm_kzalloc(&client->dev, + sizeof(unsigned int) * + sensor->sensor_ds->frame_desc_entries, + GFP_KERNEL); + if (!sd->route_flags) + return -ENOMEM; + for (j = 0; j < sensor->sensor_ds->frame_desc_entries; + j++) + sd->route_flags[j] = + V4L2_SUBDEV_ROUTE_FL_SOURCE; + sd->route_flags[0] |= + V4L2_SUBDEV_ROUTE_FL_ACTIVE | + V4L2_SUBDEV_ROUTE_FL_IMMUTABLE; + } + + sd->sd.entity.ops = &crlmodule_entity_ops; + + if (prev_sd == NULL) { + prev_sd = sd; + continue; + } + + sd->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + sd->sd.internal_ops = &crlmodule_internal_ops; + sd->sd.owner = THIS_MODULE; + v4l2_set_subdevdata(&sd->sd, client); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + rval = media_entity_init(&sd->sd.entity, sd->npads, + sd->pads, 0); +#else + rval = media_entity_pads_init(&sd->sd.entity, sd->npads, + sd->pads); +#endif + if (rval) { + dev_err(&client->dev, + "media_entity_init failed\n"); + return rval; + } + + rval = v4l2_device_register_subdev(sensor->src->sd.v4l2_dev, + &sd->sd); + if (rval) { + dev_err(&client->dev, + "v4l2_device_register_subdev failed\n"); + return rval; + } + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + rval = media_entity_create_link(&sd->sd.entity, +#else + rval = media_create_pad_link(&sd->sd.entity, +#endif + sd->source_pad, + &prev_sd->sd.entity, + prev_sd->sink_pad, + MEDIA_LNK_FL_ENABLED | + MEDIA_LNK_FL_IMMUTABLE); + if (rval) { + dev_err(&client->dev, + "media_entity_create_link failed\n"); + return rval; + } + + prev_sd = sd; + } + + return rval; +} + +static int __init_power_resources(struct v4l2_subdev *subdev) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + struct crl_power_seq_entity *entity; + unsigned idx; + + sensor->pwr_entity = devm_kzalloc(&client->dev, + sizeof(struct crl_power_seq_entity) * + sensor->sensor_ds->power_items, GFP_KERNEL); + + if (!sensor->pwr_entity) + return -ENOMEM; + + for (idx = 0; idx < sensor->sensor_ds->power_items; idx++) + sensor->pwr_entity[idx] = + sensor->sensor_ds->power_entities[idx]; + + dev_dbg(&client->dev, "%s\n", __func__); + + for (idx = 0; idx < sensor->sensor_ds->power_items; idx++) { + int rval; + + entity = &sensor->pwr_entity[idx]; + + switch (entity->type) { + case CRL_POWER_ETY_GPIO_FROM_PDATA: + if (devm_gpio_request_one(&client->dev, + sensor->platform_data->xshutdown, 0, + "CRL xshutdown") != 0) { + dev_err(&client->dev, + "unable to acquire xshutdown %d\n", + sensor->platform_data->xshutdown); + return -ENODEV; + } + break; + case CRL_POWER_ETY_GPIO_FROM_PDATA_BY_NUMBER: + rval = custom_gpio_request(sensor); + if (rval < 0) + return rval; + break; + case CRL_POWER_ETY_GPIO_CUSTOM: + if (entity->ent_name[0]) { + entity->gpiod_priv = gpiod_get(NULL, + entity->ent_name, GPIOD_OUT_LOW); + if (IS_ERR(entity->gpiod_priv)) { + dev_err(&client->dev, + "unable to acquire custom gpio %s\n", + entity->ent_name); + entity->gpiod_priv = NULL; + return -ENODEV; + } + } else { + if (devm_gpio_request_one(&client->dev, + entity->ent_number, 0, + "CRL Custom") != 0) { + dev_err(&client->dev, + "unable to acquire custom gpio %d\n", + entity->ent_number); + return -ENODEV; + } + } + break; + case CRL_POWER_ETY_REGULATOR_FRAMEWORK: + entity->regulator_priv = devm_regulator_get( + &client->dev, entity->ent_name); + if (IS_ERR(entity->regulator_priv)) { + dev_err(&client->dev, + "Failed to get regulator: %s\n", + entity->ent_name); + entity->regulator_priv = NULL; + return -ENODEV; + } + rval = regulator_set_voltage(entity->regulator_priv, + entity->val, + entity->val); + /* Not all regulator supports voltage change */ + if (rval < 0) + dev_info(&client->dev, + "Failed to set voltage %s %d\n", + entity->ent_name, entity->val); + break; + case CRL_POWER_ETY_CLK_FRAMEWORK: + sensor->xclk = devm_clk_get(&client->dev, + entity->ent_name[0] ? entity->ent_name : NULL); + if (IS_ERR(sensor->xclk)) { + dev_err(&client->dev, + "Cannot get sensor clk\n"); + return -ENODEV; + } + break; + default: + dev_err(&client->dev, "Invalid Power item\n"); + return -ENODEV; + } + } + + return 0; +} + +static int crl_request_gpio_irq(struct crl_sensor *sensor) +{ + int rval; + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + int irq_pin = sensor->platform_data->crl_irq_pin; + + if (!gpio_is_valid(irq_pin)) { + dev_err(&client->dev, "%s: GPIO pin %d is invalid!\n", + __func__, irq_pin); + return -ENODEV; + } + dev_dbg(&client->dev, + "%s: IRQ GPIO %d is valid.\n", __func__, irq_pin); + + rval = devm_gpio_request(&client->dev, irq_pin, + sensor->platform_data->irq_pin_name); + if (rval) { + dev_err(&client->dev, + "%s:IRQ GPIO pin request failed!\n", __func__); + return rval; + } + + gpio_direction_input(irq_pin); + sensor->irq = gpio_to_irq(irq_pin); + rval = devm_request_threaded_irq(&client->dev, sensor->irq, + sensor->sensor_ds->crl_irq_fn, + sensor->sensor_ds->crl_threaded_irq_fn, + sensor->platform_data->irq_pin_flags, + sensor->platform_data->irq_pin_name, + sensor); + + dev_dbg(&client->dev, "%s: GPIO register GPIO IRQ result: %d\n", + __func__, rval); + + return rval; +} + +static int crlmodule_registered(struct v4l2_subdev *subdev) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + + int rval; + + rval = __init_power_resources(subdev); + if (rval) + return -ENODEV; + + pm_runtime_enable(&client->dev); + + /* Power up the sensor */ + if (pm_runtime_get_sync(&client->dev) < 0) { + rval = -ENODEV; + goto out; + } + + /* init GPIO IRQ */ + if (sensor->sensor_ds->irq_in_use == true) { + rval = crl_request_gpio_irq(sensor); + if (rval) { + rval = -ENODEV; + goto out; + } + } + + /* one time init */ + rval = crlmodule_write_regs(sensor, + sensor->sensor_ds->onetime_init_regs, + sensor->sensor_ds->onetime_init_regs_items); + if (rval) { + dev_err(&client->dev, "%s failed to set powerup registers\n", + __func__); + rval = -ENODEV; + goto out; + } + + /* sensor specific init */ + if (sensor->sensor_ds->sensor_init) { + rval = sensor->sensor_ds->sensor_init(client); + + if (rval) { + dev_err(&client->dev, + "%s failed to run sensor specific init\n", + __func__); + rval = -ENODEV; + goto out; + } + } + /* Identify the module */ + rval = crlmodule_identify_module(subdev); + if (rval) { + rval = -ENODEV; + goto out; + } + + rval = crlmodule_init_subdevs(subdev); + if (rval) + goto out; + + sensor->binning_horizontal = 1; + sensor->binning_vertical = 1; + sensor->scale_m = 1; + sensor->flip_info = CRL_FLIP_DEFAULT_NONE; + sensor->ext_ctrl_impacts_pll_selection = false; + sensor->ext_ctrl_impacts_mode_selection = false; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + sensor->pixel_array->sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR; +#else + sensor->pixel_array->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; +#endif + + rval = crlmodule_init_controls(sensor); + if (rval) + goto out; + + mutex_lock(&sensor->mutex); + crlmodule_update_current_mode(sensor); + mutex_unlock(&sensor->mutex); + rval = crlmodule_nvm_init(sensor); + +out: + dev_dbg(&client->dev, "%s rval: %d\n", __func__, rval); + /* crlmodule_power_off(sensor); */ + pm_runtime_put(&client->dev); + + return rval; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int crlmodule_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct crl_subdev *ssd = to_crlmodule_subdev(sd); + struct crl_sensor *sensor = ssd->sensor; + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + u32 mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10; + unsigned int i; + int rval; + + dev_dbg(&client->dev, "%s\n", __func__); + + mutex_lock(&sensor->mutex); + + for (i = 0; i < ssd->npads; i++) { + struct v4l2_mbus_framefmt *try_fmt = + v4l2_subdev_get_try_format(sd, fh->pad, i); + struct v4l2_rect *try_crop = v4l2_subdev_get_try_crop(sd, + fh->pad, i); + struct v4l2_rect *try_comp; + + try_fmt->width = sensor->sensor_ds->sensor_limits->x_addr_max; + try_fmt->height = sensor->sensor_ds->sensor_limits->y_addr_max; + try_fmt->code = mbus_code; + + try_crop->top = 0; + try_crop->left = 0; + try_crop->width = try_fmt->width; + try_crop->height = try_fmt->height; + + if (ssd != sensor->pixel_array) + continue; + + try_comp = v4l2_subdev_get_try_compose(sd, fh->pad, i); + *try_comp = *try_crop; + } + + mutex_unlock(&sensor->mutex); + + + rval = pm_runtime_get_sync(&client->dev); + if (rval < 0) + pm_runtime_put(&client->dev); + return rval; +} + +static int crlmodule_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct i2c_client *client = v4l2_get_subdevdata(sd); + + pm_runtime_put(&client->dev); + + return 0; +} + +static int crlmodule_get_registers(struct v4l2_subdev *sd, struct crl_registers_info *info) +{ + struct crl_subdev *ssd = to_crlmodule_subdev(sd); + struct crl_sensor *sensor = ssd->sensor; + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + struct crl_register_read_rep reg; + int i; + int ret = 0; + + if (info->number > REGS_BUF_SIZE) { + dev_err(&client->dev, "error: max register's numbers than %d\n", REGS_BUF_SIZE); + return -1; + } + + for (i = 0; i < info->number; i++) { + reg.address = info->start_address + i; + reg.dev_i2c_addr = CRL_I2C_ADDRESS_NO_OVERRIDE; + reg.len = info->len; + reg.mask = 0xff; + ret = crlmodule_read_reg(sensor, reg, &info->regs[i]); + if (ret < 0) + return ret; + } + + return ret; +} + +static int crlmodule_set_registers(struct v4l2_subdev *sd, struct crl_registers_info *info) +{ + struct crl_subdev *ssd = to_crlmodule_subdev(sd); + struct crl_sensor *sensor = ssd->sensor; + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + int i; + int ret = 0; + + if (info->number > REGS_BUF_SIZE) { + dev_err(&client->dev, "error: max register's numbers than %d\n", REGS_BUF_SIZE); + return -1; + } + + for (i = 0; i < info->number; i++) { + ret = crlmodule_write_reg(sensor, CRL_I2C_ADDRESS_NO_OVERRIDE, + info->start_address + i, info->len, 0xff, info->regs[i]); + if (ret < 0) + return ret; + } + + return ret; +} + +static long crlmodule_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) +{ + int ret; + + switch (cmd) { + case CRL_G_REGISTERS: + ret = crlmodule_get_registers(sd, arg); + break; + case CRL_S_REGISTERS: + ret = crlmodule_set_registers(sd, arg); + break; + default: + ret = -1; + break; + }; + + return ret; +} + +static const struct v4l2_subdev_video_ops crlmodule_video_ops = { + .s_stream = crlmodule_set_stream, +}; + +static const struct v4l2_subdev_core_ops crlmodule_core_ops = { + .s_power = crlmodule_set_power, + .ioctl = crlmodule_ioctl, +}; + +static const struct v4l2_subdev_pad_ops crlmodule_pad_ops = { + .enum_mbus_code = crlmodule_enum_mbus_code, + .get_fmt = crlmodule_get_format, + .set_fmt = crlmodule_set_format, + .get_selection = crlmodule_get_selection, + .set_selection = crlmodule_set_selection, + .enum_frame_size = crlmodule_enum_frame_size, + .get_frame_desc = crlmodule_get_frame_desc, + .get_routing = crlmodule_get_routing, + .set_routing = crlmodule_set_routing, +}; + +static const struct v4l2_subdev_sensor_ops crlmodule_sensor_ops = { + .g_skip_frames = crlmodule_get_skip_frames, +}; + +static const struct v4l2_subdev_ops crlmodule_ops = { + .core = &crlmodule_core_ops, + .video = &crlmodule_video_ops, + .pad = &crlmodule_pad_ops, + .sensor = &crlmodule_sensor_ops, +}; + +static const struct media_entity_operations crlmodule_entity_ops = { + .link_validate = v4l2_subdev_link_validate, +}; + +static const struct v4l2_subdev_internal_ops crlmodule_internal_src_ops = { + .registered = crlmodule_registered, + .open = crlmodule_open, + .close = crlmodule_close, +}; + +static const struct v4l2_subdev_internal_ops crlmodule_internal_ops = { + .open = crlmodule_open, + .close = crlmodule_close, +}; + +#ifdef CONFIG_PM + +static int crlmodule_runtime_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct crl_sensor *sensor = to_crlmodule_sensor(sd); + + crlmodule_undo_poweron_entities(sensor, + sensor->sensor_ds->power_items - 1); + return 0; +} + +static int crlmodule_runtime_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct crl_sensor *sensor = to_crlmodule_sensor(sd); + + return __crlmodule_powerup_sequence(sensor); +} + +static int crlmodule_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct crl_subdev *ssd = to_crlmodule_subdev(sd); + struct crl_sensor *sensor = ssd->sensor; + + if (sensor->streaming) + crlmodule_stop_streaming(sensor); + + if (sensor->power_count > 0) + crlmodule_undo_poweron_entities(sensor, + sensor->sensor_ds->power_items - 1); + return 0; +} + +static int crlmodule_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct crl_subdev *ssd = to_crlmodule_subdev(sd); + struct crl_sensor *sensor = ssd->sensor; + int rval = 0; + + if (sensor->power_count > 0) { + rval = __crlmodule_powerup_sequence(sensor); + if (!rval) + rval = crlmodule_run_poweron_init(sensor); + } + + if (!rval && sensor->streaming) + rval = crlmodule_start_streaming(sensor); + + return rval; +} +#else +#define crlmodule_runtime_suspend NULL +#define crlmodule_runtime_resume NULL +#define crlmodule_suspend NULL +#define crlmodule_resume NULL +#endif /* CONFIG_PM */ + +static int crlmodule_probe(struct i2c_client *client, + const struct i2c_device_id *devid) +{ + struct crl_sensor *sensor; + int ret; + + if (client->dev.platform_data == NULL) + return -ENODEV; + + /* TODO! Create the sensor based on the interface */ + sensor = devm_kzalloc(&client->dev, sizeof(*sensor), GFP_KERNEL); + if (sensor == NULL) + return -ENOMEM; + + sensor->platform_data = client->dev.platform_data; + mutex_init(&sensor->mutex); + mutex_init(&sensor->power_mutex); + + ret = crlmodule_populate_ds(sensor, &client->dev); + if (ret) + return -ENODEV; + + sensor->src = &sensor->ssds[sensor->ssds_used]; + + v4l2_i2c_subdev_init(&sensor->src->sd, client, &crlmodule_ops); + sensor->src->sd.internal_ops = &crlmodule_internal_src_ops; + sensor->src->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + if (sensor->sensor_ds->frame_desc_entries > 1) + sensor->src->sd.flags |= V4L2_SUBDEV_FL_HAS_SUBSTREAMS; + + sensor->src->sensor = sensor; + + sensor->src->pads[0].flags = MEDIA_PAD_FL_SOURCE; + if (sensor->sensor_ds->frame_desc_entries > 1) + sensor->src->sd.flags |= MEDIA_PAD_FL_MULTIPLEX; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + ret = media_entity_init(&sensor->src->sd.entity, 2, + sensor->src->pads, 0); +#else + ret = media_entity_pads_init(&sensor->src->sd.entity, 2, + sensor->src->pads); +#endif + if (ret < 0) + goto cleanup; + ret = v4l2_async_register_subdev(&sensor->src->sd); + if (ret < 0) + goto cleanup; + + /* Load IQ tuning registers from drvb file*/ + if (sensor->sensor_ds->msr_file_name) { + ret = crlmodule_load_msrlist(client, + sensor->sensor_ds->msr_file_name, + &sensor->msr_list); + if (ret) + dev_warn(&client->dev, + "msrlist loading failed. Ignore, move on\n"); + } else { + /* sensor will still continue streaming */ + dev_warn(&client->dev, "No msrlists associated with sensor\n"); + } + + return 0; + +cleanup: + media_entity_cleanup(&sensor->src->sd.entity); + crlmodule_release_ds(sensor); + return ret; +} + +static void crlmodule_free_controls(struct crl_sensor *sensor) +{ + unsigned int i; + + for (i = 0; i < sensor->ssds_used; i++) + v4l2_ctrl_handler_free(&sensor->ssds[i].ctrl_handler); +} + +static int crlmodule_remove(struct i2c_client *client) +{ + struct v4l2_subdev *subdev = i2c_get_clientdata(client); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + unsigned int i; + + if (sensor->sensor_ds->sensor_cleanup) + sensor->sensor_ds->sensor_cleanup(client); + + v4l2_async_unregister_subdev(&sensor->src->sd); + for (i = 0; i < sensor->ssds_used; i++) { + v4l2_device_unregister_subdev(&sensor->ssds[i].sd); + media_entity_cleanup(&sensor->ssds[i].sd.entity); + } + + for (i = 0; i < sensor->sensor_ds->power_items; i++) { + struct crl_power_seq_entity *entity = + &sensor->pwr_entity[i]; + + if (entity->type == CRL_POWER_ETY_GPIO_CUSTOM && + entity->gpiod_priv) + gpiod_put(entity->gpiod_priv); + } + + crlmodule_nvm_deinit(sensor); + crlmodule_release_ds(sensor); + crlmodule_free_controls(sensor); + crlmodule_release_msrlist(&sensor->msr_list); + + pm_runtime_disable(&client->dev); + + return 0; +} + + +static const struct i2c_device_id crlmodule_id_table[] = { + { CRLMODULE_NAME, 0 }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, crlmodule_id_table); + +static const struct dev_pm_ops crlmodule_pm_ops = { + .runtime_suspend = crlmodule_runtime_suspend, + .runtime_resume = crlmodule_runtime_resume, + .suspend = crlmodule_suspend, + .resume = crlmodule_resume, +}; + +static struct i2c_driver crlmodule_i2c_driver = { + .driver = { + .name = CRLMODULE_NAME, + .pm = &crlmodule_pm_ops, + }, + .probe = crlmodule_probe, + .remove = crlmodule_remove, + .id_table = crlmodule_id_table, +}; + +module_i2c_driver(crlmodule_i2c_driver); + +MODULE_AUTHOR("Vinod Govindapillai "); +MODULE_AUTHOR("Jouni Ukkonen "); +MODULE_AUTHOR("Tommi Franttila "); +MODULE_DESCRIPTION("Generic driver for common register list based \ + camera sensor modules"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/i2c/crlmodule/crlmodule-data.c b/drivers/media/i2c/crlmodule/crlmodule-data.c new file mode 100644 index 0000000000000..2c07ca06a83ac --- /dev/null +++ b/drivers/media/i2c/crlmodule/crlmodule-data.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2014 - 2018 Intel Corporation + * + * Author: Vinod Govindapillai + * + */ + +#include "crlmodule.h" +#include "crl_imx132_configuration.h" +#include "crl_imx214_configuration.h" +#include "crl_imx135_configuration.h" +#include "crl_imx230_configuration.h" +#include "crl_imx318_configuration.h" +#include "crl_ov8858_configuration.h" +#include "crl_ov13860_configuration.h" +#include "crl_adv7481_cvbs_configuration.h" +#include "crl_adv7481_hdmi_configuration.h" +#include "crl_adv7481_eval_configuration.h" +#include "crl_imx185_configuration.h" +#include "crl_ov10635_configuration.h" +#include "crl_ar0231at_configuration.h" +#include "crl_ov10640_configuration.h" +#include "crl_imx477_master_configuration.h" +#include "crl_imx477_slave_configuration.h" +#include "crl_imx274_configuration.h" +#include "crl_ov5670_configuration.h" +#include "crl_imx290_configuration.h" +#include "crl_pixter_stub_configuration.h" +#include "crl_ov2740_configuration.h" +#include "crl_ov9281_configuration.h" +#include "crl_magna_configuration.h" +#include "crl_ar023z_configuration.h" +#include "crl_ov2775_configuration.h" +#include "crl_ox03a10_configuration.h" +#include "crl_ov495_configuration.h" + +static const struct crlmodule_sensors supported_sensors[] = { + { "i2c-SONY214A:00", "imx214", &imx214_crl_configuration }, + { "IMX214", "imx214", &imx214_crl_configuration }, + { "i2c-SONY132A:00", "imx132", &imx132_crl_configuration }, + { "i2c-INT3471:00", "imx135", &imx135_crl_configuration }, + { "i2c-SONY230A:00", "imx230", &imx230_crl_configuration }, + { "i2c-INT3477:00", "ov8858", &ov8858_crl_configuration }, + { "i2c-OV5670AA:00", "ov5670", &ov5670_crl_configuration }, + { "IMX185", "imx185", &imx185_crl_configuration }, + { "IMX477-MASTER", "imx477", &imx477_master_crl_configuration }, + { "IMX477-SLAVE-1", "imx477", &imx477_slave_crl_configuration }, + { "OV13860", "ov13860", &ov13860_crl_configuration }, + { "OV9281", "ov9281", &ov9281_crl_configuration }, + { "ADV7481 CVBS", "adv7481_cvbs", &adv7481_cvbs_crl_configuration }, + { "ADV7481 HDMI", "adv7481_hdmi", &adv7481_hdmi_crl_configuration }, + { "ADV7481_EVAL", "adv7481_eval", &adv7481_eval_crl_configuration }, + { "ADV7481B_EVAL", "adv7481b_eval", &adv7481b_eval_crl_configuration }, + { "SONY318A", "imx318", &imx318_crl_configuration }, + { "OV10635", "ov10635", &ov10635_crl_configuration }, + { "AR0231AT", "ar0231at", &ar0231at_crl_configuration }, + { "OV10640", "ov10640", &ov10640_crl_configuration }, + { "IMX274", "imx274", &imx274_crl_configuration }, + { "OV5670", "ov5670", &ov5670_crl_configuration }, + { "IMX290", "imx290", &imx290_crl_configuration}, + { "PIXTER_STUB", "pixter_stub", &pixter_stub_crl_configuration}, + { "PIXTER_STUB_B", "pixter_stub_b", &pixter_stub_b_crl_configuration}, + { "PIXTER_STUB_C", "pixter_stub_c", &pixter_stub_c_crl_configuration}, + { "PIXTER_STUB_D", "pixter_stub_d", &pixter_stub_d_crl_configuration}, + { "PIXTER_STUB_E", "pixter_stub_e", &pixter_stub_e_crl_configuration}, + { "PIXTER_STUB_F", "pixter_stub_f", &pixter_stub_f_crl_configuration}, + { "PIXTER_STUB_G", "pixter_stub_g", &pixter_stub_g_crl_configuration}, + { "PIXTER_STUB_H", "pixter_stub_h", &pixter_stub_h_crl_configuration}, + { "INT3474", "ov2740", &ov2740_crl_configuration }, + { "MAGNA", "magna", &magna_crl_configuration }, + { "AR023Z", "ar023z", &ar023z_crl_configuration }, + { "OV2775", "ov2775", &ov2775_crl_configuration }, + { "OX03A10", "ox03a10", &ox03a10_crl_configuration }, + { "OV495", "ov495", &ov495_crl_configuration}, +}; + +/* + * Function to populate the CRL data structure from the sensor configuration + * definition file + */ +int crlmodule_populate_ds(struct crl_sensor *sensor, struct device *dev) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(supported_sensors); i++) { + /* Check the ACPI supported modules */ + if (!strcmp(dev_name(dev), supported_sensors[i].pname)) { + sensor->sensor_ds = supported_sensors[i].ds; + dev_info(dev, "%s %s selected\n", + __func__, supported_sensors[i].name); + return 0; + }; + + /* Check the non ACPI modules */ + if (!strcmp(sensor->platform_data->module_name, + supported_sensors[i].pname)) { + sensor->sensor_ds = supported_sensors[i].ds; + dev_info(dev, "%s %s selected\n", + __func__, supported_sensors[i].name); + return 0; + }; + } + + dev_err(dev, "%s No suitable configuration found for %s\n", + __func__, dev_name(dev)); + return -EINVAL; +} + +/* + * Function validate the contents CRL data structure to check if all the + * required fields are filled and are according to the limits. + */ +int crlmodule_validate_ds(struct crl_sensor *sensor) +{ + /* TODO! Revisit this. */ + return 0; +} + +/* Function to free all resources allocated for the CRL data structure */ +void crlmodule_release_ds(struct crl_sensor *sensor) +{ + /* + * TODO! Revisit this. + * Place for cleaning all the resources used for the generation + * of CRL data structure. + */ +} diff --git a/drivers/media/i2c/crlmodule/crlmodule-msrlist.c b/drivers/media/i2c/crlmodule/crlmodule-msrlist.c new file mode 100644 index 0000000000000..a15b76b921d27 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crlmodule-msrlist.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2016 - 2018 Intel Corporation + +#include +#include +#include "crlmodule-msrlist.h" +#include "crlmodule.h" + +/* + * + * DRVB file is part of the old structure of tagged + * binary container, which is used as such in crlmodule. + * Changes needs to be done in cameralibs to remove the + * tagged structure and convert to untagged drvb format. + * Below are the tagged binary data container structure + * definitions. Most of it is copied from libmsrlisthelper.c + * and some changes done for crlmodule. + * + */ + +static int crlmodule_write_msrlist(struct i2c_client *client, u8 *bufptr, + unsigned int size) +{ + /* + * + * The configuration data contains any number of sequences where + * the first byte (that is, u8) that marks the number of bytes + * in the sequence to follow, is indeed followed by the indicated + * number of bytes of actual data to be written to sensor. + * By convention, the first two bytes of actual data should be + * understood as an address in the sensor address space (hibyte + * followed by lobyte) where the remaining data in the sequence + * will be written. + * + */ + + u8 *ptr = bufptr; + int ret; + + while (ptr < bufptr + size) { + struct i2c_msg msg = { + .addr = client->addr, + .flags = 0, + }; + + msg.len = *ptr++; + msg.buf = ptr; + ptr += msg.len; + + if (ptr > bufptr + size) + return -EINVAL; + + ret = i2c_transfer(client->adapter, &msg, 1); + if (ret < 0) { + dev_err(&client->dev, "i2c write error: %d", ret); + return ret; + } + } + return 0; +} + +static int crlmodule_parse_msrlist(struct i2c_client *client, u8 *buffer, + unsigned int size) +{ + u8 *endptr8 = buffer + size; + int ret; + unsigned int dataset = 0; + struct tbd_data_record_header *header = + (struct tbd_data_record_header *)buffer; + + do { + + if ((u8 *)header + sizeof(*header) > endptr8) + return -EINVAL; + + if ((u8 *)header + header->data_offset + + header->data_size > endptr8) + return -EINVAL; + + dataset++; + + if (header->data_size && (header->flags & 1)) { + + ret = crlmodule_write_msrlist(client, + buffer + header->data_offset, + header->data_size); + if (ret) + return ret; + } + header = (struct tbd_data_record_header *)(buffer + + header->next_offset); + } while (header->next_offset); + + return 0; +} + + +int crlmodule_apply_msrlist(struct i2c_client *client, + const struct firmware *fw) +{ + struct tbd_header *header; + struct tbd_record_header *record; + + header = (struct tbd_header *)fw->data; + record = (struct tbd_record_header *)(header + 1); + + if (record->size && record->class_id != TBD_CLASS_DRV_ID) + return -EINVAL; + + return crlmodule_parse_msrlist(client, (u8 *)(record + 1), + record->size); +} + + +int crlmodule_load_msrlist(struct i2c_client *client, char *name, + const struct firmware **fw) +{ + + struct tbd_header *header; + struct tbd_record_header *record; + int ret = -ENOENT; + + ret = request_firmware(fw, name, &client->dev); + if (ret) { + dev_err(&client->dev, + "Error %d while requesting firmware %s\n", + ret, name); + return ret; + } + header = (struct tbd_header *)(*fw)->data; + + if (sizeof(*header) > (*fw)->size) + goto out; + + /* Check that we have drvb block. */ + if (memcmp(&header->tag, "DRVB", 4)) + goto out; + + if (header->size != (*fw)->size) + goto out; + + if (sizeof(*header) + sizeof(*record) > (*fw)->size) + goto out; + + + return 0; + +out: + crlmodule_release_msrlist(fw); + return ret; +} + + +void crlmodule_release_msrlist(const struct firmware **fw) +{ + release_firmware(*fw); + *fw = NULL; +} diff --git a/drivers/media/i2c/crlmodule/crlmodule-msrlist.h b/drivers/media/i2c/crlmodule/crlmodule-msrlist.h new file mode 100644 index 0000000000000..013469bfec1a0 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crlmodule-msrlist.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2016 - 2018 Intel Corporation */ + +#ifndef __CRLMODULE_MSRLIST_H__ +#define __CRLMODULE_MSRLIST_H__ + +#define TBD_CLASS_DRV_ID 2 + +struct i2c_client; +struct firmware; + +struct tbd_header { + /* Tag identifier, also checks endianness */ + u32 tag; + /* Container size including this header */ + u32 size; + /* Version, format 0xYYMMDDVV */ + u32 version; + /* Revision, format 0xYYMMDDVV */ + u32 revision; + /* Configuration flag bits set */ + u32 config_bits; + /* Global checksum, header included */ + u32 checksum; +} __packed; + +struct tbd_record_header { + /* Size of record including header */ + u32 size; + /* tbd_format_t enumeration values used */ + u8 format_id; + /* Packing method; 0 = no packing */ + u8 packing_key; + /* tbd_class_t enumeration values used */ + u16 class_id; +} __packed; + +struct tbd_data_record_header { + u16 next_offset; + u16 flags; + u16 data_offset; + u16 data_size; +} __packed; + +int crlmodule_load_msrlist(struct i2c_client *client, char *name, + const struct firmware **fw); +int crlmodule_apply_msrlist(struct i2c_client *client, + const struct firmware *fw); +void crlmodule_release_msrlist(const struct firmware **fw); + +#endif /* ifndef __CRLMODULE_MSRLIST_H__ */ diff --git a/drivers/media/i2c/crlmodule/crlmodule-nvm.c b/drivers/media/i2c/crlmodule/crlmodule-nvm.c new file mode 100644 index 0000000000000..50d9848047392 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crlmodule-nvm.c @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2016 - 2018 Intel Corporation + * + * Author: Tommi Franttila + * + */ + +#include +#include "crlmodule.h" +#include "crlmodule-nvm.h" +#include "crlmodule-regs.h" + +static ssize_t crlmodule_sysfs_nvm_read(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct v4l2_subdev *subdev = i2c_get_clientdata(to_i2c_client(dev)); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + + memcpy(buf, sensor->nvm_data, min_t(unsigned long, PAGE_SIZE, + sensor->nvm_size)); + return sensor->nvm_size; +} + +DEVICE_ATTR(nvm, S_IRUGO, crlmodule_sysfs_nvm_read, NULL); + +static unsigned int crlmodule_get_nvm_size(struct crl_sensor *sensor) +{ + + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + unsigned int i, size = 0; + + for (i = 0; i < sensor->sensor_ds->crl_nvm_info.nvm_blobs_items; i++) + size += sensor->sensor_ds->crl_nvm_info.nvm_config[i].size; + + if (size > PAGE_SIZE) { + dev_err(&client->dev, "nvm size too big\n"); + size = 0; + } + return size; +} + +static int crlmodule_get_nvm_data(struct crl_sensor *sensor) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + int i; + int rval = 0; + + u8 *nvm_data = sensor->nvm_data; + + if (sensor->sensor_ds->crl_nvm_info.nvm_preop_regs_items) { + dev_dbg(&client->dev, + "%s perform pre-operations\n", __func__); + + rval = crlmodule_write_regs( + sensor, + sensor->sensor_ds->crl_nvm_info.nvm_preop_regs, + sensor->sensor_ds->crl_nvm_info.nvm_preop_regs_items); + if (rval) { + dev_err(&client->dev, + "failed to perform nvm pre-operations\n"); + return rval; + } + } + + for (i = 0; i < sensor->sensor_ds->crl_nvm_info.nvm_blobs_items; i++) { + + dev_dbg(&client->dev, + "%s read blob %d dev_addr: 0x%x start_addr: 0x%x size: %d", + __func__, i, + sensor->sensor_ds->crl_nvm_info.nvm_config->dev_addr, + sensor->sensor_ds->crl_nvm_info.nvm_config->start_addr, + sensor->sensor_ds->crl_nvm_info.nvm_config->size); + + crlmodule_block_read(sensor, + sensor->sensor_ds->crl_nvm_info.nvm_config->dev_addr, + sensor->sensor_ds->crl_nvm_info.nvm_config->start_addr, + sensor->sensor_ds->crl_nvm_info.nvm_flags + & CRL_NVM_ADDR_MODE_MASK, + sensor->sensor_ds->crl_nvm_info.nvm_config->size, + nvm_data); + + nvm_data += sensor->sensor_ds->crl_nvm_info.nvm_config->size; + sensor->sensor_ds->crl_nvm_info.nvm_config++; + } + + if (sensor->sensor_ds->crl_nvm_info.nvm_postop_regs_items) { + dev_dbg(&client->dev, "%s perform post-operations\n", + __func__); + rval = crlmodule_write_regs( + sensor, + sensor->sensor_ds->crl_nvm_info.nvm_postop_regs, + sensor->sensor_ds->crl_nvm_info.nvm_postop_regs_items); + if (rval) { + dev_err(&client->dev, + "failed to perform nvm post-operations\n"); + return rval; + } + } + return rval; +} + +int crlmodule_nvm_init(struct crl_sensor *sensor) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + unsigned int size = crlmodule_get_nvm_size(sensor); + int rval; + + if (size) { + sensor->nvm_data = devm_kzalloc(&client->dev, size, GFP_KERNEL); + if (sensor->nvm_data == NULL) { + dev_err(&client->dev, "nvm buf allocation failed\n"); + return -ENOMEM; + } + sensor->nvm_size = size; + + rval = crlmodule_get_nvm_data(sensor); + if (rval) + goto err; + if (device_create_file(&client->dev, &dev_attr_nvm) != 0) { + dev_err(&client->dev, "sysfs nvm entry failed\n"); + rval = -EBUSY; + goto err; + } + } + + return 0; +err: + sensor->nvm_size = 0; + return rval; +} + +void crlmodule_nvm_deinit(struct crl_sensor *sensor) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + + if (sensor->nvm_size) { + device_remove_file(&client->dev, &dev_attr_nvm); + sensor->nvm_size = 0; + } +} diff --git a/drivers/media/i2c/crlmodule/crlmodule-nvm.h b/drivers/media/i2c/crlmodule/crlmodule-nvm.h new file mode 100644 index 0000000000000..42d462d321cc2 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crlmodule-nvm.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2015 - 2018 Intel Corporation + * + * Author: Tommi Franttila + * + */ + +#ifndef __CRLMODULE_NVM_H_ +#define __CRLMODULE_NVM_H_ + +#include "crlmodule.h" + +#define CRL_NVM_ADDR_MODE_8BIT 0x00000001 +#define CRL_NVM_ADDR_MODE_16BIT 0x00000002 + +#define CRL_NVM_ADDR_MODE_MASK (CRL_NVM_ADDR_MODE_8BIT | \ + CRL_NVM_ADDR_MODE_16BIT) + + +int crlmodule_nvm_init(struct crl_sensor *sensor); +void crlmodule_nvm_deinit(struct crl_sensor *sensor); + +#endif /* __CRLMODULE_NVM_H_ */ diff --git a/drivers/media/i2c/crlmodule/crlmodule-regs.c b/drivers/media/i2c/crlmodule/crlmodule-regs.c new file mode 100644 index 0000000000000..e4b8c8aa36e81 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crlmodule-regs.c @@ -0,0 +1,341 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2017 - 2018 Intel Corporation + * + * Author: Vinod Govindapillai + * + */ + +#include +#include + +#include "crlmodule.h" +#include "crlmodule-nvm.h" +#include "crlmodule-regs.h" + +static DEFINE_MUTEX(crl_i2c_mutex); + +static bool reg_verify; +module_param(reg_verify, bool, 0444); +MODULE_PARM_DESC(reg_verify, "enable/disable registers write value and read value checking"); + +static int crlmodule_i2c_read(struct crl_sensor *sensor, u16 dev_i2c_addr, + u16 reg, u8 len, u32 *val) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + struct i2c_msg msg[2]; + unsigned char data[4]; + int r; + + dev_dbg(&client->dev, "%s reg, len: [0x%04x, %d]", __func__, reg, len); + + if (len != CRL_REG_LEN_08BIT && len != CRL_REG_LEN_16BIT && + len != CRL_REG_LEN_24BIT && len != CRL_REG_LEN_32BIT) + return -EINVAL; + + if (dev_i2c_addr == CRL_I2C_ADDRESS_NO_OVERRIDE) + msg[0].addr = client->addr; + else + msg[0].addr = dev_i2c_addr; + + msg[1].addr = msg[0].addr; + + msg[0].flags = 0; + msg[0].buf = data; + + if (sensor->sensor_ds->addr_len == CRL_ADDR_7BIT) { + /* change address to 7bit format */ + msg[0].addr = msg[0].addr >> 1; + msg[1].addr = msg[1].addr >> 1; + } + if ((sensor->sensor_ds->addr_len == CRL_ADDR_7BIT) || + (sensor->sensor_ds->addr_len == CRL_ADDR_8BIT)) { + data[0] = (u8) (reg & 0xff); + msg[0].len = 1; + } else { + /* high byte goes out first */ + data[0] = (u8) (reg >> 8); + data[1] = (u8) (reg & 0xff); + msg[0].len = 2; + } + + msg[1].flags = I2C_M_RD; + msg[1].buf = data; + msg[1].len = len; + + r = i2c_transfer(client->adapter, msg, 2); + + if (r < 0) + goto err; + + *val = 0; + /* high byte comes first */ + switch (len) { + case CRL_REG_LEN_32BIT: + *val = (data[0] << 24) + (data[1] << 16) + (data[2] << 8) + + data[3]; + break; + case CRL_REG_LEN_24BIT: + *val = (data[0] << 16) + (data[1] << 8) + data[2]; + break; + case CRL_REG_LEN_16BIT: + *val = (data[0] << 8) + data[1]; + break; + case CRL_REG_LEN_08BIT: + *val = data[0]; + break; + } + + return 0; + +err: + dev_err(&client->dev, "read from offset 0x%x error %d\n", reg, r); + + return r; +} + +static int crlmodule_i2c_write(struct crl_sensor *sensor, u16 dev_i2c_addr, + u16 reg, u8 len, u32 val) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + struct i2c_msg msg; + unsigned char data[6]; + unsigned int retries; + int r; + int ret; + u32 rval; + unsigned char *data_offset; + + if (len != CRL_REG_LEN_08BIT && len != CRL_REG_LEN_16BIT && + len != CRL_REG_LEN_24BIT && len != CRL_REG_LEN_32BIT) + return -EINVAL; + + if (dev_i2c_addr == CRL_I2C_ADDRESS_NO_OVERRIDE) + msg.addr = client->addr; + else + msg.addr = dev_i2c_addr; + + msg.flags = 0; /* Write */ + msg.buf = data; + + if (sensor->sensor_ds->addr_len == CRL_ADDR_7BIT) + msg.addr = msg.addr >> 1; + + if ((sensor->sensor_ds->addr_len == CRL_ADDR_7BIT) || + (sensor->sensor_ds->addr_len == CRL_ADDR_8BIT)) { + data[0] = (u8) (reg & 0xff); + msg.len = 1 + len; + data_offset = &data[1]; + } else { + /* high byte goes out first */ + data[0] = (u8) (reg >> 8); + data[1] = (u8) (reg & 0xff); + msg.len = 2 + len; + data_offset = &data[2]; + } + + dev_dbg(&client->dev, "%s len reg, val: [%d, 0x%04x, 0x%04x]", + __func__, len, reg, val); + + switch (len) { + case CRL_REG_LEN_08BIT: + val = val & 0xFF; + data_offset[0] = val; + break; + case CRL_REG_LEN_16BIT: + val = val & 0xFFFF; + data_offset[0] = val >> 8; + data_offset[1] = val; + break; + case CRL_REG_LEN_24BIT: + val = val & 0xFFFFFF; + data_offset[0] = val >> 16; + data_offset[1] = val >> 8; + data_offset[2] = val; + break; + case CRL_REG_LEN_32BIT: + data_offset[0] = val >> 24; + data_offset[1] = val >> 16; + data_offset[2] = val >> 8; + data_offset[3] = val; + break; + } + + for (retries = 0; retries < 5; retries++) { + /* + * Due to unknown reason sensor stops responding. This + * loop is a temporaty solution until the root cause + * is found. + */ + r = i2c_transfer(client->adapter, &msg, 1); + if (r == 1) { + if (retries) + dev_err(&client->dev, + "sensor i2c stall encountered. retries: %d\n", + retries); + + if (reg_verify) { + ret = crlmodule_i2c_read(sensor, dev_i2c_addr, reg, len, &rval); + if (ret < 0) + dev_err(&client->dev, "i2c read error\n"); + else if (rval != val) { + dev_warn(&client->dev, + "reg:0x%x write val(0x%x), read val(0x%x)", + reg, val, rval); + } + } + return 0; + } + + usleep_range(2000, 2000); + } + + dev_err(&client->dev, + "wrote 0x%x to offset 0x%x error %d\n", val, reg, r); + + return r; +} + +int crlmodule_read_reg(struct crl_sensor *sensor, + const struct crl_register_read_rep reg, u32 *val) +{ + return crlmodule_i2c_read(sensor, reg.dev_i2c_addr, reg.address, + reg.len, val); +} + +int crlmodule_write_reg(struct crl_sensor *sensor, u16 dev_i2c_addr, u16 reg, + u8 len, u32 mask, u32 val) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + int ret; + u32 val2; + + /* + * Sensor setting sequence may need some delay. + * delay value is specified by reg.val field + */ + if (len == CRL_REG_LEN_DELAY) { + msleep(val); + return 0; + } + + /* + * If the same register is being used for two settings, updating + * one value should not overwrite the other one. Such registers + * must be marked as CRL_REG_READ_AND_UPDATE. For such registers + * first read the register and update it + */ + + if (len & CRL_REG_READ_AND_UPDATE) { + u32 tmp; + /* Some rare cases 2 different devices can + * make i2c accesses to same physical i2c address, + * those read modify writes must be protected by static + * mutex + */ + if (sensor->sensor_ds->i2c_mutex_in_use) + mutex_lock(&crl_i2c_mutex); + + ret = crlmodule_i2c_read(sensor, dev_i2c_addr, reg, + len & CRL_REG_LEN_READ_MASK, &val2); + if (ret) { + if (sensor->sensor_ds->i2c_mutex_in_use) + mutex_unlock(&crl_i2c_mutex); + return ret; + } + + tmp = val2 & ~mask; + tmp |= val & mask; + val = tmp; + } + + ret = crlmodule_i2c_write(sensor, dev_i2c_addr, reg, + len & CRL_REG_LEN_READ_MASK, val); + + if ((sensor->sensor_ds->i2c_mutex_in_use) + && (len & CRL_REG_READ_AND_UPDATE)) + mutex_unlock(&crl_i2c_mutex); + + if (ret < 0) { + dev_err(&client->dev, + "error %d writing reg 0x%4.4x, val 0x%2.2x", + ret, reg, val); + return ret; + } + + return 0; +} + +int crlmodule_write_regs(struct crl_sensor *sensor, + const struct crl_register_write_rep *regs, int len) +{ + unsigned int i; + int ret; + + for (i = 0; i < len; i++) { + ret = crlmodule_write_reg(sensor, + regs[i].dev_i2c_addr, + regs[i].address, + regs[i].len, + regs[i].mask, + regs[i].val); + if (ret < 0) + return ret; + }; + + return 0; +} + +int crlmodule_block_read(struct crl_sensor *sensor, u16 dev_i2c_addr, u16 addr, + u8 addr_mode, u16 len, u8 *buf) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + struct i2c_msg msg[2]; + u8 data[2]; + u16 offset = 0; + int r; + + memset(msg, 0, sizeof(msg)); + + if (dev_i2c_addr == CRL_I2C_ADDRESS_NO_OVERRIDE) { + msg[0].addr = client->addr; + msg[1].addr = client->addr; + } else { + msg[0].addr = dev_i2c_addr; + msg[1].addr = dev_i2c_addr; + } + + if (addr_mode & CRL_NVM_ADDR_MODE_8BIT) + msg[0].len = 1; + else if (addr_mode & CRL_NVM_ADDR_MODE_16BIT) + msg[0].len = 2; + else + return -EINVAL; + + msg[0].flags = 0; + msg[1].flags = I2C_M_RD; + + while (offset < len) { + if (addr_mode & CRL_NVM_ADDR_MODE_8BIT) { + data[0] = addr & 0xff; + } else { + data[0] = (addr >> 8) & 0xff; + data[1] = addr & 0xff; + } + + msg[0].buf = data; + msg[1].len = min(CRLMODULE_I2C_BLOCK_SIZE, len - offset); + msg[1].buf = &buf[offset]; + r = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)); + if (r != ARRAY_SIZE(msg)) { + if (r >= 0) + r = -EIO; + goto err; + } + addr += msg[1].len; + offset += msg[1].len; + } + return 0; +err: + dev_err(&client->dev, "read from offset 0x%x error %d\n", offset, r); + return r; +} diff --git a/drivers/media/i2c/crlmodule/crlmodule-regs.h b/drivers/media/i2c/crlmodule/crlmodule-regs.h new file mode 100644 index 0000000000000..6d84486e1ae1a --- /dev/null +++ b/drivers/media/i2c/crlmodule/crlmodule-regs.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2014 - 2018 Intel Corporation + * + * Author: Vinod Govindapillai + * + */ + +#ifndef __CRLMODULE_REGS_H_ +#define __CRLMODULE_REGS_H_ + +struct crl_sensor; +struct crl_register_read_rep; +struct crl_register_write_rep; + +#define CRLMODULE_I2C_BLOCK_SIZE 0x20 + +int crlmodule_read_reg(struct crl_sensor *sensor, + const struct crl_register_read_rep reg, u32 *val); +int crlmodule_write_regs(struct crl_sensor *sensor, + const struct crl_register_write_rep *regs, int len); +int crlmodule_write_reg(struct crl_sensor *sensor, u16 dev_i2c_addr, u16 reg, + u8 len, u32 mask, u32 val); +int crlmodule_block_read(struct crl_sensor *sensor, u16 dev_i2c_addr, u16 addr, + u8 addr_mode, u16 len, u8 *buf); + +#endif /* __CRLMODULE_REGS_H_ */ diff --git a/drivers/media/i2c/crlmodule/crlmodule-sensor-ds.h b/drivers/media/i2c/crlmodule/crlmodule-sensor-ds.h new file mode 100644 index 0000000000000..ff03185b1025c --- /dev/null +++ b/drivers/media/i2c/crlmodule/crlmodule-sensor-ds.h @@ -0,0 +1,622 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2014 - 2018 Intel Corporation + * + * Author: Vinod Govindapillai + * + */ + +#ifndef __CRLMODULE_SENSOR_DS_H_ +#define __CRLMODULE_SENSOR_DS_H_ + +#include +#include "crlmodule.h" + +#define CRL_REG_LEN_08BIT 1 +#define CRL_REG_LEN_16BIT 2 +#define CRL_REG_LEN_24BIT 3 +#define CRL_REG_LEN_32BIT 4 + +#define CRL_REG_READ_AND_UPDATE (1 << 3) +#define CRL_REG_LEN_READ_MASK 0x07 +#define CRL_REG_LEN_DELAY 0x10 + +#define CRL_FLIP_DEFAULT_NONE 0 +#define CRL_FLIP_HFLIP 1 +#define CRL_FLIP_VFLIP 2 +#define CRL_FLIP_HFLIP_VFLIP 3 + +#define CRL_FLIP_HFLIP_MASK 0xfe +#define CRL_FLIP_VFLIP_MASK 0xfd + +#define CRL_PIXEL_ORDER_GRBG 0 +#define CRL_PIXEL_ORDER_RGGB 1 +#define CRL_PIXEL_ORDER_BGGR 2 +#define CRL_PIXEL_ORDER_GBRG 3 +#define CRL_PIXEL_ORDER_IGNORE 255 + +/* Flag to notify configuration selction imact from V4l2 Ctrls */ +#define CRL_IMPACTS_NO_IMPACT 0 +#define CRL_IMPACTS_PLL_SELECTION (1 << 1) +#define CRL_IMPACTS_MODE_SELECTION (1 << 2) + +/* + * In crl_dynamic_entity::entity_type is denoted by bits 6 and 7 + * 0 -> crl_dynamic_entity:entity_value is a constant + * 1 -> crl_dynamic_entity:entity_value is a referene to variable + * 2 -> crl_dynamic_entity:entity_value is a v4l2_ctrl value + * 3 -> crl_dynamic_entity:entity_value is a 8 bit register address + */ +enum crl_dynamic_entity_type { + CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST = 0, + CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, + CRL_DYNAMIC_VAL_OPERAND_TYPE_REG_VAL, /* Only 8bit registers */ +}; + +/* + * For some combo device which has some devices inside itself with different + * i2c address, adding flag to specify whether current device needs i2c + * address override. + * For back-compatibility, making flag equals 0. So existing sensor configure + * doesn't need to be modified. + */ +#define CRL_I2C_ADDRESS_NO_OVERRIDE 0 + +struct crl_sensor; +struct i2c_client; + +enum crl_subdev_type { + CRL_SUBDEV_TYPE_SCALER, + CRL_SUBDEV_TYPE_BINNER, + CRL_SUBDEV_TYPE_PIXEL_ARRAY, +}; + +enum crl_v4l2ctrl_op_type { + CRL_V4L2_CTRL_SET_OP, + CRL_V4L2_CTRL_GET_OP, +}; + +enum crl_v4l2ctrl_update_context { + SENSOR_IDLE, /* Powered on. But not streamind */ + SENSOR_STREAMING, /* Sensor streaming */ + SENSOR_POWERED_ON, /* streaming or idle */ +}; + +enum crl_operators { + CRL_BITWISE_AND = 0, + CRL_BITWISE_OR, + CRL_BITWISE_LSHIFT, + CRL_BITWISE_RSHIFT, + CRL_BITWISE_XOR, + CRL_BITWISE_COMPLEMENT, + CRL_ADD, + CRL_SUBTRACT, + CRL_MULTIPLY, + CRL_DIV, + CRL_ASSIGNMENT, +}; + +/* Replicated from videodev2.h */ +enum crl_v4l2_ctrl_type { + CRL_V4L2_CTRL_TYPE_INTEGER = 1, + CRL_V4L2_CTRL_TYPE_BOOLEAN, + CRL_V4L2_CTRL_TYPE_MENU_INT, + CRL_V4L2_CTRL_TYPE_MENU_ITEMS, + CRL_V4L2_CTRL_TYPE_BUTTON, + CRL_V4L2_CTRL_TYPE_INTEGER64, + CRL_V4L2_CTRL_TYPE_CTRL_CLASS, + CRL_V4L2_CTRL_TYPE_CUSTOM, +}; + +enum crl_addr_len { + CRL_ADDR_16BIT = 0, + CRL_ADDR_8BIT, + CRL_ADDR_7BIT, +}; + +enum crl_operands { + CRL_CONSTANT = 0, + CRL_VARIABLE, + CRL_CONTROL, +}; + +/* References to the CRL driver member variables */ +enum crl_member_data_reference_ids { + CRL_VAR_REF_OUTPUT_WIDTH = 1, + CRL_VAR_REF_OUTPUT_HEIGHT, + CRL_VAR_REF_PA_CROP_WIDTH, + CRL_VAR_REF_PA_CROP_HEIGHT, + CRL_VAR_REF_FRAME_TIMING_WIDTH, + CRL_VAR_REF_FRAME_TIMING_HEIGHT, + CRL_VAR_REF_BINNER_WIDTH, + CRL_VAR_REF_BINNER_HEIGHT, + CRL_VAR_REF_H_BINN_FACTOR, + CRL_VAR_REF_V_BINN_FACTOR, + CRL_VAR_REF_SCALE_FACTOR, + CRL_VAR_REF_BITSPERPIXEL, + CRL_VAR_REF_PIXELRATE_PA, + CRL_VAR_REF_PIXELRATE_CSI, + CRL_VAR_REF_PIXELRATE_LINK_FREQ, +}; + +enum crl_frame_desc_type { + CRL_V4L2_MBUS_FRAME_DESC_TYPE_PLATFORM, + CRL_V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL, + CRL_V4L2_MBUS_FRAME_DESC_TYPE_CCP2, + CRL_V4L2_MBUS_FRAME_DESC_TYPE_CSI2, +}; + +enum crl_pwr_ent_type { + CRL_POWER_ETY_GPIO_FROM_PDATA = 1, + CRL_POWER_ETY_GPIO_FROM_PDATA_BY_NUMBER, + CRL_POWER_ETY_GPIO_CUSTOM, + CRL_POWER_ETY_REGULATOR_FRAMEWORK, + CRL_POWER_ETY_CLK_FRAMEWORK, +}; + +struct crl_dynamic_entity { + enum crl_dynamic_entity_type entity_type; + u32 entity_val; +}; + +struct crl_arithmetic_ops { + enum crl_operators op; + struct crl_dynamic_entity operand; +}; + +struct crl_dynamic_calculated_entity { + u8 ops_items; + struct crl_arithmetic_ops *ops; +}; + +struct crl_register_write_rep { + u16 address; + u8 len; + u32 val; + u16 dev_i2c_addr; + u32 mask; +}; + +struct crl_register_read_rep { + u16 address; + u8 len; + u32 mask; + u16 dev_i2c_addr; +}; + +/* + * crl_dynamic_register_access is used mainly in the v4l2_ctrl context. + * This is intended to provide some generic arithmetic operations on the values + * to be written to a control's register or on the values read from a register. + * These arithmetic operations are controlled using struct crl_arithmetic_ops. + * + * One important information is that this structure behave differently for the + * set controls and volatile get controls. + * + * For the set control operation, the usage of the members are straight forward. + * The set control can result into multiple register write operations. Hence + * there can be more than one crl_dynamic_register_access entries associated + * with a control which results into separate register writes. + * + * But for the volatile get control operation, where a v4l2 control is used + * to query read only information from the sensor, there could be only one + * crl_dynamic_register_access entry. Because the result of a get control is + * a single value. crl_dynamic_register_access.address, len and mask values are + * not used in volatile get control context. Instead all the needed information + * must be encoded into member -> ops (struct crl_arithmetic_ops) + */ +struct crl_dynamic_register_access { + u16 address; + u8 len; + u32 mask; + u8 ops_items; + struct crl_arithmetic_ops *ops; + u16 dev_i2c_addr; +}; + +struct crl_sensor_detect_config { + struct crl_register_read_rep reg; /* Register to read */ + unsigned int width; /* width of the value in chars*/ +}; + +struct crl_sensor_subdev_config { + enum crl_subdev_type subdev_type; + char name[32]; +}; + +/* + * The ctrl id value pair which should be compared when selecting a + * configuration. This gives flexibility to provide any data through set ctrl + * and provide selection mechanism for a particular configuration + */ +struct crl_ctrl_data_pair { + u32 ctrl_id; + u32 data; +}; + +enum crl_dep_ctrl_action_type { + CRL_DEP_CTRL_ACTION_TYPE_SELF = 0, + CRL_DEP_CTRL_ACTION_TYPE_DEP_CTRL, +}; + +enum crl_dep_ctrl_condition { + CRL_DEP_CTRL_CONDITION_GREATER = 0, + CRL_DEP_CTRL_CONDITION_LESSER, + CRL_DEP_CTRL_CONDITION_EQUAL, +}; + +enum crl_dep_ctrl_action { + CRL_DEP_CTRL_CONDITION_ADD = 0, + CRL_DEP_CTRL_CONDITION_SUBTRACT, + CRL_DEP_CTRL_CONDITION_MULTIPLY, + CRL_DEP_CTRL_CONDITION_DIVIDE, +}; + +struct crl_dep_ctrl_cond_action { + enum crl_dep_ctrl_condition cond; + u32 cond_value; + enum crl_dep_ctrl_action action; + u32 action_value; +}; + +/* Dependency control provision */ +struct crl_dep_ctrl_provision { + u32 ctrl_id; + enum crl_dep_ctrl_action_type action_type; + unsigned int action_items; + struct crl_dep_ctrl_cond_action *action; +}; + +/* + * Multiple set of register lists can be written to + * the sensor configuration based on the control's value + * struct crl_dep_reg_list introduces a provision for this + * purpose. + * + * struct crl_dep_reg_list *dep_regs; + * + * In dep_regs, a "condition" and "value" is added which is + * compared with ctrl->val and the register list that is to + * be written to the sensor. + * + * Example: For a v4l2_ctrl, if we need to set + * reg_list A when ctrl->val > 60 + * reg_list B when ctrl->val < 60 + * and reg_list C when ctrl->val == 60 + * + * So dep_regs block should be like this in the sensor + * specific configuration file: + * + * dep_regs = { + * { + * reg_condition = CRL_DEP_CTRL_CONDITION_GREATER, + * cond_value = { CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, 60 }, + * no_direct_regs = sizeof(X) + * direct_regs = X + * no_dyn_items = sizeof(A) + * dyn_regs = A + * }, + * { + * reg_condition = CRL_DEP_CTRL_CONDITION_LESSER, + * cond_value = { CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, 60 }, + * no_direct_regs = 0 + * direct_regs = 0 + * no_dyn_items = sizeof(B) + * dyn_regs = B + * }, + * { + * reg_condition = CRL_DEP_CTRL_CONDITION_EQUAL, + * cond_value = { CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, 60 }, + * no_direct_regs = sizeof(Z) + * direct_regs = Z + * no_dyn_items = size(C) + * dyn_regs = C + * }, + * } + * cond_value is defined as dynamic entity, which can be a constant, + * another control value or a reference to the pre-defined set of variables + * or a register value. + * + * CRL driver will execute the above dep_regs in the same order + * as it is written. care must be taken for eample in the cases + * like, ctrl->val > 60, reg_list A. and if ctrl_val > 80, + * reg_list D etc. + */ + +struct crl_dep_reg_list { + enum crl_dep_ctrl_condition reg_cond; + struct crl_dynamic_entity cond_value; + unsigned int no_direct_regs; + struct crl_register_write_rep *direct_regs; + unsigned int no_dyn_items; + struct crl_dynamic_register_access *dyn_regs; +}; + +struct crl_sensor_limits { + unsigned int x_addr_max; + unsigned int y_addr_max; + unsigned int x_addr_min; + unsigned int y_addr_min; + unsigned int min_frame_length_lines; + unsigned int max_frame_length_lines; + unsigned int min_line_length_pixels; + unsigned int max_line_length_pixels; + u8 scaler_m_min; + u8 scaler_m_max; + u8 scaler_n_min; + u8 scaler_n_max; + u8 min_even_inc; + u8 max_even_inc; + u8 min_odd_inc; + u8 max_odd_inc; +}; + +struct crl_v4l2_ctrl_data_std { + s64 min; + s64 max; + u64 step; + s64 def; +}; + +struct crl_v4l2_ctrl_data_menu_items { + const char *const *menu; + unsigned int size; +}; + +struct crl_v4l2_ctrl_data_std_menu { + const int64_t *std_menu; + unsigned int size; +}; + +struct crl_v4l2_ctrl_data_int_menu { + const s64 *menu; + s64 max; + s64 def; +}; + +union crl_v4l2_ctrl_data_types { + struct crl_v4l2_ctrl_data_std std_data; + struct crl_v4l2_ctrl_data_menu_items v4l2_menu_items; + struct crl_v4l2_ctrl_data_std_menu v4l2_std_menu; + struct crl_v4l2_ctrl_data_int_menu v4l2_int_menu; +}; + +/* + * Please note a difference in the usage of "regs" member in case of a + * volatile get control for read only purpose. Please check the + * "struct crl_dynamic_register_access" declaration comments for more details. + * + * Read only controls must have "flags" V4L2_CTRL_FLAG_READ_ONLY set. + */ +struct crl_v4l2_ctrl { + enum crl_subdev_type sd_type; + enum crl_v4l2ctrl_op_type op_type; + enum crl_v4l2ctrl_update_context context; + char name[32]; + u32 ctrl_id; + enum crl_v4l2_ctrl_type type; + union crl_v4l2_ctrl_data_types data; + unsigned long flags; + u32 impact; /* If this control impact any config selection */ + struct v4l2_ctrl *ctrl; + unsigned int regs_items; + struct crl_dynamic_register_access *regs; + unsigned int dep_items; + struct crl_dep_ctrl_provision *dep_ctrls; + enum v4l2_ctrl_type v4l2_type; + unsigned int crl_ctrl_dep_reg_list; /* contains no. of dep_regs */ + struct crl_dep_reg_list *dep_regs; +}; + +struct crl_pll_configuration { + s64 input_clk; + s64 op_sys_clk; + u8 bitsperpixel; + u32 pixel_rate_csi; + u32 pixel_rate_pa; + u8 csi_lanes; + unsigned int comp_items; + struct crl_ctrl_data_pair *ctrl_data; + unsigned int pll_regs_items; + const struct crl_register_write_rep *pll_regs; +}; + +struct crl_subdev_rect_rep { + enum crl_subdev_type subdev_type; + struct v4l2_rect in_rect; + struct v4l2_rect out_rect; +}; + +struct crl_mode_rep { + unsigned int sd_rects_items; + const struct crl_subdev_rect_rep *sd_rects; + u8 binn_hor; + u8 binn_vert; + u8 scale_m; + s32 width; + s32 height; + unsigned int comp_items; + struct crl_ctrl_data_pair *ctrl_data; + unsigned int mode_regs_items; + const struct crl_register_write_rep *mode_regs; + + /* + * Minimum and maximum value for line length pixels and frame length + * lines are added for modes. This facilitates easy handling of + * modes which binning skipping and affects the calculation of + * vblank and hblank values. + * + * The blank values are limited based on the following logic + * + * If mode specific limits are available + * vblank = clamp(min_llp - PA_width, max_llp - PA_width) + * hblank = clamp(min_fll - PA_Height, max_fll - PA_Height + * + * If mode specific blanking limits are not available, then the sensor + * limits will be used in the same manner. + * + * If sensor mode limits are not available, then the values will be + * written directly to the associated control registers. + */ + s32 min_llp; /* minimum/maximum value for line length pixels */ + s32 max_llp; + s32 min_fll; + s32 max_fll; /* minimum/maximum value for frame length lines */ +}; + +struct crl_csi_data_fmt { + u32 code; + u8 pixel_order; + u8 bits_per_pixel; + unsigned int regs_items; + const struct crl_register_write_rep *regs; +}; + +struct crl_flip_data { + u8 flip; + u8 pixel_order; +}; + +struct crl_power_seq_entity { + enum crl_pwr_ent_type type; + char ent_name[12]; + int ent_number; + u16 address; + unsigned int val; + unsigned int undo_val; /* Undo value if any previous step failed */ + unsigned int delay; /* delay in micro seconds */ + struct regulator *regulator_priv; /* R/W */ + struct gpio_desc *gpiod_priv; +}; + +struct crl_nvm_blob { + u8 dev_addr; + u16 start_addr; + u16 size; +}; + +struct crl_nvm { + unsigned int nvm_preop_regs_items; + const struct crl_register_write_rep *nvm_preop_regs; + + unsigned int nvm_postop_regs_items; + const struct crl_register_write_rep *nvm_postop_regs; + + unsigned int nvm_blobs_items; + struct crl_nvm_blob *nvm_config; + u32 nvm_flags; +}; + +/* Representation for v4l2_mbus_frame_desc_entry */ +struct crl_frame_desc { + struct crl_dynamic_entity flags; + struct crl_dynamic_entity bpp; + struct crl_dynamic_entity pixelcode; + struct crl_dynamic_entity start_line; + struct crl_dynamic_entity start_pixel; + struct crl_dynamic_calculated_entity width; + struct crl_dynamic_calculated_entity height; + struct crl_dynamic_entity length; + struct crl_dynamic_entity csi2_channel; + struct crl_dynamic_entity csi2_data_type; +}; + +typedef int (*sensor_specific_init)(struct i2c_client *); +typedef int (*sensor_specific_cleanup)(struct i2c_client *); + +struct crl_sensor_configuration { + + const struct crl_clock_entity *clock_entity; + + const unsigned int power_items; + const struct crl_power_seq_entity *power_entities; + const unsigned int power_delay; /* in micro seconds */ + + const unsigned int onetime_init_regs_items; + const struct crl_register_write_rep *onetime_init_regs; + + const unsigned int powerup_regs_items; + const struct crl_register_write_rep *powerup_regs; + + const unsigned int poweroff_regs_items; + const struct crl_register_write_rep *poweroff_regs; + + const unsigned int id_reg_items; + const struct crl_sensor_detect_config *id_regs; + + const unsigned int subdev_items; + const struct crl_sensor_subdev_config *subdevs; + + const struct crl_sensor_limits *sensor_limits; + + const unsigned int pll_config_items; + const struct crl_pll_configuration *pll_configs; + + const unsigned int modes_items; + const struct crl_mode_rep *modes; + /* + * Fail safe mode should be the largest resolution available in the + * mode list. If none of the mode parameters are matched, the driver + * will select this mode for streaming. + */ + const unsigned int fail_safe_mode_index; + + const unsigned int streamon_regs_items; + const struct crl_register_write_rep *streamon_regs; + + const unsigned int streamoff_regs_items; + const struct crl_register_write_rep *streamoff_regs; + + const unsigned int v4l2_ctrls_items; + const struct crl_v4l2_ctrl *v4l2_ctrl_bank; + + const unsigned int csi_fmts_items; + const struct crl_csi_data_fmt *csi_fmts; + + const unsigned int flip_items; + const struct crl_flip_data *flip_data; + + struct crl_nvm crl_nvm_info; + + enum crl_addr_len addr_len; + + unsigned int frame_desc_entries; + enum crl_frame_desc_type frame_desc_type; + struct crl_frame_desc *frame_desc; + char *msr_file_name; + + sensor_specific_init sensor_init; + sensor_specific_cleanup sensor_cleanup; + /* + * Irq handlers for threaded irq. These are needed if driver need to + * handle gpio interrupt. crl_threaded_irq_fn is then mandatory. Irq + * pin configuration is in platform data. + */ + irqreturn_t (*crl_irq_fn)(int irq, void *sensor_struct); + irqreturn_t (*crl_threaded_irq_fn)(int irq, void *sensor_struct); + const bool irq_in_use; + const bool i2c_mutex_in_use; +}; + +struct crlmodule_sensors { + char *pname; + char *name; + struct crl_sensor_configuration *ds; +}; + +/* + * Function to populate the CRL data structure from the sensor configuration + * definition file + */ +int crlmodule_populate_ds(struct crl_sensor *sensor, struct device *dev); + +/* + * Function validate the contents CRL data structure to check if all the + * required fields are filled and are according to the limits. + */ +int crlmodule_validate_ds(struct crl_sensor *sensor); + +/* Function to free all resources allocated for the CRL data structure */ +void crlmodule_release_ds(struct crl_sensor *sensor); + +#endif /* __CRLMODULE_SENSOR_DS_H_ */ diff --git a/drivers/media/i2c/crlmodule/crlmodule.h b/drivers/media/i2c/crlmodule/crlmodule.h new file mode 100644 index 0000000000000..e68e82fd06344 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crlmodule.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2014 - 2018 Intel Corporation + * + * Author: Vinod Govindapillai + * + */ + +#ifndef __CRLMODULE_PRIV_H_ +#define __CRLMODULE_PRIV_H_ + +#include +#include +#include +#include +#include "../../../../include/media/crlmodule.h" +#include +#include +#include "../../../../include/uapi/linux/crlmodule.h" +#include "crlmodule-sensor-ds.h" + +#define CRL_SUBDEVS 3 + +#define CRL_PA_PAD_SRC 0 +#define CRL_PAD_SINK 0 +#define CRL_PAD_SRC 1 +#define CRL_PADS 2 + +struct crl_subdev { + struct v4l2_subdev sd; + struct media_pad pads[2]; + struct v4l2_rect sink_fmt; + struct v4l2_rect crop[2]; + struct v4l2_rect compose; /* compose on sink */ + unsigned short sink_pad; + unsigned short source_pad; + int npads; + struct crl_sensor *sensor; + struct v4l2_ctrl_handler ctrl_handler; + unsigned int field; + unsigned int *route_flags; +}; + +struct crl_sensor { + /* + * "mutex" is used to serialise access to all fields here + * except v4l2_ctrls at the end of the struct. "mutex" is also + * used to serialise access to file handle specific + * information. The exception to this rule is the power_mutex + * below. + */ + struct mutex mutex; + /* + * power mutex became necessity because of the v4l2_ctrl_handler_setup + * is being called from power on function which needs to be serialised + * but v4l2_ctrl_handler setup uses "mutex" so it cannot be used. + */ + struct mutex power_mutex; + + struct crl_subdev ssds[CRL_SUBDEVS]; + u32 ssds_used; + struct crl_subdev *src; + struct crl_subdev *binner; + struct crl_subdev *scaler; + struct crl_subdev *pixel_array; + + struct crlmodule_platform_data *platform_data; + + u8 binning_horizontal; + u8 binning_vertical; + + u8 sensor_mode; + u8 scale_m; + u8 fmt_index; + u8 flip_info; + u8 pll_index; + + + int power_count; + + bool streaming; + + struct crl_sensor_configuration *sensor_ds; + struct crl_v4l2_ctrl *v4l2_ctrl_bank; + + /* These are mandatory controls. So good to have reference to these */ + struct v4l2_ctrl *pixel_rate_pa; + struct v4l2_ctrl *link_freq; + struct v4l2_ctrl *pixel_rate_csi; + + s64 *link_freq_menu; + + /* If extra v4l2 contrl has an impact on PLL selection */ + bool ext_ctrl_impacts_pll_selection; + bool ext_ctrl_impacts_mode_selection; + bool blanking_ctrl_not_use; + bool direct_mode_in_use; + const struct crl_mode_rep *current_mode; + + struct clk *xclk; + struct crl_power_seq_entity *pwr_entity; + unsigned int irq; + + u8 *nvm_data; + u16 nvm_size; + + /* Pointer to binary file which contains + * tunable IQ parameters like NR, DPC, BLC + * Not all MSR's are moved to the binary + * at the moment. + */ + const struct firmware *msr_list; + /* + * Pointer to store sensor specific data structure, that + * can be used for example in interrupt specific code. + */ + void *sensor_specific_data; +}; + +#define to_crlmodule_subdev(_sd) \ + container_of(_sd, struct crl_subdev, sd) + +#define to_crlmodule_sensor(_sd) \ + (to_crlmodule_subdev(_sd)->sensor) + +#endif /* __CRLMODULE_PRIV_H_ */ diff --git a/drivers/media/i2c/ici/Kconfig b/drivers/media/i2c/ici/Kconfig new file mode 100644 index 0000000000000..dd967ac80d8f9 --- /dev/null +++ b/drivers/media/i2c/ici/Kconfig @@ -0,0 +1,19 @@ +if VIDEO_INTEL_ICI + +menu "Deserializers for ICI" + +config VIDEO_TI964_ICI + tristate "TI964 driver support" + depends on I2C && VIDEO_INTEL_ICI + ---help--- + This is a driver for TI964 camera for ICI. + +config VIDEO_MAX9286_ICI + tristate "MAX9286 driver support" + depends on I2C && VIDEO_INTEL_ICI + ---help--- + This is a driver for MAX9286 camera for ICI. + +endmenu + +endif diff --git a/drivers/media/i2c/ici/Makefile b/drivers/media/i2c/ici/Makefile new file mode 100644 index 0000000000000..4450c5effb940 --- /dev/null +++ b/drivers/media/i2c/ici/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_VIDEO_TI964_ICI) += ti964_ici.o +obj-$(CONFIG_VIDEO_MAX9286_ICI) += max9286_ici.o diff --git a/drivers/media/i2c/ici/max9286_ici.c b/drivers/media/i2c/ici/max9286_ici.c new file mode 100644 index 0000000000000..3400cc11946f7 --- /dev/null +++ b/drivers/media/i2c/ici/max9286_ici.c @@ -0,0 +1,1105 @@ +/* SPDX-LIcense_Identifier: GPL-2.0 */ +/* Copyright (C) 2018 Intel Corporation */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "../max9286-reg-settings.h" + +struct max9286_subdev { + struct ici_ext_subdev *sd; + unsigned short rx_port; + unsigned short fsin_gpio; + unsigned short phy_i2c_addr; + unsigned short alias_i2c_addr; + char sd_name[ICI_MAX_NODE_NAME]; +}; + +struct max9286 { + struct ici_ext_subdev ici_sd; + struct ici_ext_subdev_register reg; + struct max9286_pdata *pdata; + struct crlmodule_lite_platform_data subdev_pdata[NR_OF_MAX_SINK_PADS]; + unsigned char sensor_present; + unsigned int total_sensor_num; + unsigned int nsources; + unsigned int nsinks; + unsigned int npads; + unsigned int nstreams; + const char *name; + struct max9286_subdev sub_devs[NR_OF_MAX_SINK_PADS]; + struct ici_framefmt *ffmts[NR_OF_MAX_PADS]; + + struct rect *crop; + struct rect *compose; + struct { + unsigned int *stream_id; + } *stream; /* stream enable/disable status, indexed by pad */ + struct { + unsigned int sink; + unsigned int source; + int flags; + } *route; /* pad level info, indexed by stream */ + + struct regmap *regmap8; + struct mutex max_mutex; + int (*create_link)( + struct ici_isys_node *src, + u16 src_pad, + struct ici_isys_node *sink, + u16 sink_pad, + u32 flags); +}; + +#define to_max_9286(_sd) container_of(_sd, struct max9286, ici_sd) +#define to_ici_ext_subdev(_node) container_of(_node, struct ici_ext_subdev, node) + +/* + * Order matters. + * + * 1. Bits-per-pixel, descending. + * 2. Bits-per-pixel compressed, descending. + * 3. Pixel order, same as in pixel_order_str. Formats for all four pixel + * orders must be defined. + */ +static const struct max9286_csi_data_format max_csi_data_formats[] = { + { ICI_FORMAT_YUYV, 16, 16, PIXEL_ORDER_GBRG, 0x1e }, + { ICI_FORMAT_UYVY, 16, 16, PIXEL_ORDER_GBRG, 0x1e }, + { ICI_FORMAT_SGRBG12, 12, 12, PIXEL_ORDER_GRBG, 0x2c }, + { ICI_FORMAT_SRGGB12, 12, 12, PIXEL_ORDER_RGGB, 0x2c }, + { ICI_FORMAT_SBGGR12, 12, 12, PIXEL_ORDER_BGGR, 0x2c }, + { ICI_FORMAT_SGBRG12, 12, 12, PIXEL_ORDER_GBRG, 0x2c }, + { ICI_FORMAT_SGRBG10, 10, 10, PIXEL_ORDER_GRBG, 0x2b }, + { ICI_FORMAT_SRGGB10, 10, 10, PIXEL_ORDER_RGGB, 0x2b }, + { ICI_FORMAT_SBGGR10, 10, 10, PIXEL_ORDER_BGGR, 0x2b }, + { ICI_FORMAT_SGBRG10, 10, 10, PIXEL_ORDER_GBRG, 0x2b }, + { ICI_FORMAT_SGRBG8, 8, 8, PIXEL_ORDER_GRBG, 0x2a }, + { ICI_FORMAT_SRGGB8, 8, 8, PIXEL_ORDER_RGGB, 0x2a }, + { ICI_FORMAT_SBGGR8, 8, 8, PIXEL_ORDER_BGGR, 0x2a }, + { ICI_FORMAT_SGBRG8, 8, 8, PIXEL_ORDER_GBRG, 0x2a }, +}; + +static struct regmap_config max9286_reg_config8 = { + .reg_bits = 8, + .val_bits = 8, +}; + +/* Serializer register write */ +static int max96705_write_register(struct max9286 *max, + unsigned int offset, u8 reg, u8 val) +{ + int ret; + int retry, timeout = 10; + struct i2c_client *client = max->ici_sd.client; + + client->addr = S_ADDR_MAX96705 + offset; + for (retry = 0; retry < timeout; retry++) { + ret = i2c_smbus_write_byte_data(client, reg, val); + if (val < 0) + usleep_range(5000, 6000); + else + break; + } + + client->addr = DS_ADDR_MAX9286; + if (retry >= timeout) { + pr_err("%s:write reg failed: reg=%2x\n", __func__, reg); + return -EREMOTEIO; + } + + return 0; +} + +/* Serializer register read */ +static int +max96705_read_register(struct max9286 *max, unsigned int i, u8 reg) +{ + int val; + int retry, timeout = 10; + struct i2c_client *client = max->ici_sd.client; + + client->addr = S_ADDR_MAX96705 + i; + for (retry = 0; retry < timeout; retry++) { + val = i2c_smbus_read_byte_data(client, reg); + if (val >= 0) + break; + usleep_range(5000, 6000); + } + + client->addr = DS_ADDR_MAX9286; + if (retry >= timeout) { + pr_err("%s:read reg failed: reg=%2x\n", __func__, reg); + return -EREMOTEIO; + } + + return val; +} + +/* Validate csi_data_format */ +static const struct max9286_csi_data_format * +max9286_validate_csi_data_format(u32 code) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(max_csi_data_formats); i++) { + if (max_csi_data_formats[i].code == code) + return &max_csi_data_formats[i]; + } + + return &max_csi_data_formats[0]; +} + +/* Initialize image sensors and set stream on registers */ +static int max9286_set_stream( + struct ici_isys_node *node, + void *ip, + int enable) +{ + struct ici_ext_subdev *subsubdev = node->sd; + struct ici_ext_subdev *subdev = i2c_get_clientdata(subsubdev->client); + struct max9286 *max = to_max_9286(subdev); + + int i, rval, j; + unsigned int val; + u8 slval = 0xE0; + u8 dtval = 0xF7; + const struct max9286_register_write *max9286_byte_order_settings; + + pr_info("MAX9286 set stream. enable = %d\n", enable); + /* Disable I2C ACK */ + rval = regmap_write(max->regmap8, DS_I2CLOCACK, 0xB6); + if (rval) { + pr_err("Failed to disable I2C ACK!\n"); + return rval; + } + for (i = 0; i < NR_OF_MAX_SINK_PADS; i++) { + if (((0x01 << (i)) & max->sensor_present) == 0) + continue; + + if (strncmp(node->name, max->sub_devs[i].sd_name, ICI_MAX_NODE_NAME)) + continue; + + if (enable) { + /* + * Enable CSI-2 lanes D0, D1, D2, D3 + * Enable CSI-2 DBL (Double Input Mode) + * Enable GMSL DBL for RAWx2 + * Enable RAW10/RAW12 data type + */ + u8 bpp; + const struct max9286_csi_data_format *csi_format = + max9286_validate_csi_data_format(max->ffmts[i]->pixelformat); + + bpp = csi_format->compressed; + + if (bpp == 10) { + dtval = 0xF6; + max9286_byte_order_settings = + &max9286_byte_order_settings_10bit[0]; + } else if (bpp == 12) { + dtval = 0xF7; + max9286_byte_order_settings = + &max9286_byte_order_settings_12bit[0]; + } else { + pr_err("Only support RAW10/12, current bpp is %d!\n", bpp); + return -EINVAL; + } + + rval = regmap_write(max->regmap8, DS_CSI_DBL_DT, dtval); + if (rval) { + pr_err("Failed to set data type!\n"); + return rval; + } + + for (j = 0; j < bpp * 2; j++) { + rval = max96705_write_register(max, + S_ADDR_MAX96705_BROADCAST - S_ADDR_MAX96705, + (max9286_byte_order_settings + j)->reg, + (max9286_byte_order_settings + j)->val); + if (rval) { + pr_err("Failed to set max9286 byte order\n"); + return rval; + } + } + usleep_range(2000, 3000); + } + + /* Enable link */ + slval |= (0x0F & (1 << i)); + rval = regmap_write(max->regmap8, DS_LINK_ENABLE, slval); + if (rval) { + pr_err("Failed to enable GMSL links!\n"); + return rval; + } + + rval = regmap_write(max->regmap8, DS_ATUO_MASK_LINK, 0x30); + if (rval) { + pr_err("Failed to write 0x69\n"); + return rval; + } + } +#if 0 + /* Enable I2C ACK */ + rval = regmap_write(max->regmap8, DS_I2CLOCACK, 0x36); + if (rval) { + pr_err("Failed to enable I2C ACK!\n"); + return rval; + } +#endif + /* Check if valid PCLK is available for the links */ + for (i = 1; i <= NR_OF_MAX_SINK_PADS; i++) { + if (((0x01 << (i - 1)) & max->sensor_present) == 0) + continue; + + val = max96705_read_register(max, i, S_INPUT_STATUS); + if ((val != -EREMOTEIO) && (val & 0x01)) + pr_info("Valid PCLK detected for link %d\n", i); + else if (val != -EREMOTEIO) + pr_info("Failed to read PCLK reg for link %d\n", i); + } + + /* Set preemphasis settings for all serializers (set to 3.3dB)*/ + max96705_write_register(max, S_ADDR_MAX96705_BROADCAST - + S_ADDR_MAX96705, S_CMLLVL_PREEMP, 0xAA); + usleep_range(5000, 6000); + + /* Set VSYNC Delay */ + max96705_write_register(max, S_ADDR_MAX96705_BROADCAST - + S_ADDR_MAX96705, S_SYNC_GEN_CONFIG, 0x21); + usleep_range(5000, 6000); + + max96705_write_register(max, S_ADDR_MAX96705_BROADCAST - + S_ADDR_MAX96705, S_VS_DLY_2, 0x06); + usleep_range(5000, 6000); + + max96705_write_register(max, S_ADDR_MAX96705_BROADCAST - + S_ADDR_MAX96705, S_VS_DLY_1, 0xD8); + usleep_range(5000, 6000); + + max96705_write_register(max, S_ADDR_MAX96705_BROADCAST - + S_ADDR_MAX96705, S_VS_H_2, 0x26); + usleep_range(5000, 6000); + + max96705_write_register(max, S_ADDR_MAX96705_BROADCAST - + S_ADDR_MAX96705, S_VS_H_1, 0x00); + usleep_range(5000, 6000); + + max96705_write_register(max, S_ADDR_MAX96705_BROADCAST - + S_ADDR_MAX96705, S_VS_H_0, 0x00); + usleep_range(5000, 6000); + + max96705_write_register(max, S_ADDR_MAX96705_BROADCAST - + S_ADDR_MAX96705, S_DBL_ALIGN_TO, 0xC4); + usleep_range(5000, 6000); + + /* Enable link equalizers */ + rval = regmap_write(max->regmap8, DS_ENEQ, 0x0F); + if (rval) { + pr_err("Failed to automatically detect serial data rate!\n"); + return rval; + } + usleep_range(5000, 6000); + rval = regmap_write(max->regmap8, DS_HS_VS, 0x91); + + /* Enable serial links and desable configuration */ + max96705_write_register(max, S_ADDR_MAX96705_BROADCAST - + S_ADDR_MAX96705, S_MAIN_CTL, 0x83); + /* Wait for more than 2 Frames time from each sensor */ + usleep_range(100000, 101000); + + /* + * Poll frame synchronization bit of deserializer + * All the cameras should work in SYNC mode + * MAX9286 sends a pulse to each camera, then each camera sends out + * one frame. The VSYNC for each camera should appear in almost same + * time for the deserializer to lock FSYNC + */ + rval = regmap_read(max->regmap8, DS_FSYNC_LOCKED, &val); + if (rval) { + pr_info("Frame SYNC not locked!\n"); + return rval; + } else if (val & (0x01 << 6)) + pr_info("Deserializer Frame SYNC locked\n"); + + /* + * Enable/set bit[7] of DS_CSI_VC_CTL register for VC operation + * Set VC according to the link number + * Enable CSI-2 output + */ + if (!enable) { + rval = regmap_write(max->regmap8, DS_CSI_VC_CTL, 0x93); + if (rval) { + pr_err("Failed to disable CSI output!\n"); + return rval; + } + } else { + rval = regmap_write(max->regmap8, DS_CSI_VC_CTL, 0x9B); + if (rval) { + pr_err("Failed to enable CSI output!\n"); + return rval; + } + } + + return 0; +} + +/* callback for VIDIOC_SUBDEV_G_FMT ioctl handler code */ +static int max9286_get_format(struct ici_isys_node *node, + struct ici_pad_framefmt *fmt) +{ + struct ici_ext_subdev *subsubdev = node->sd; + struct ici_ext_subdev *subdev = i2c_get_clientdata(subsubdev->client); + struct max9286 *max = to_max_9286(subdev); + int i; + + + mutex_lock(&max->max_mutex); + + for (i = 0; i < NR_OF_MAX_SINK_PADS; i++) { + + if (strncmp(node->name, max->sub_devs[i].sd_name, ICI_MAX_NODE_NAME)) + continue; + + + fmt->ffmt.width = max->ffmts[i]->width; + fmt->ffmt.height = max->ffmts[i]->height; + fmt->ffmt.pixelformat = max->ffmts[i]->pixelformat; + fmt->ffmt.field = max->ffmts[i]->field; + fmt->ffmt.colorspace = max->ffmts[i]->colorspace; + fmt->ffmt.flags = max->ffmts[i]->flags; + + mutex_unlock(&max->max_mutex); + + pr_info("framefmt: width: %d, height: %d, code: 0x%x.\n", + fmt->ffmt.width, fmt->ffmt.height, fmt->ffmt.pixelformat); + + return 0; + } + + mutex_unlock(&max->max_mutex); + + pr_err("max9286_get_format: unknown node name \n"); + + return -1; +} + +/* Enumerate media bus formats available at a given sub-device pad */ +static int max9286_enum_mbus_code(struct ici_isys_node *node, + struct ici_pad_supported_format_desc *psfd) +{ +// struct ici_ext_subdev *subsubdev = node->sd; +// struct ici_ext_subdev *subdev = i2c_get_clientdata(subsubdev->client); + + pr_err(" TBD !\n"); + + return 0; +} + +static int max9286_get_param(struct ici_ext_sd_param *param) +{ + + if (param->id == ICI_EXT_SD_PARAM_ID_LINK_FREQ) { + param->val = 87750000; + } + + return 0; +} + +static int max9286_set_param(struct ici_ext_sd_param *param) +{ + return 0; +} + +static int max9286_get_menu_item(struct ici_ext_sd_param *param, u32 idx) +{ + return 0; +} + +static int max9286_set_power(struct ici_isys_node *node, int on) +{ + return 0; +} + +/* callback for VIDIOC_SUBDEV_S_FMT ioctl handler code */ +static int max9286_set_format(struct ici_isys_node *node, + struct ici_pad_framefmt *fmt) +{ + struct ici_ext_subdev *subsubdev = node->sd; + struct ici_ext_subdev *subdev = i2c_get_clientdata(subsubdev->client); + struct max9286 *max = to_max_9286(subdev); + const struct max9286_csi_data_format *csi_format; + int i; + + csi_format = max9286_validate_csi_data_format(fmt->ffmt.colorspace); + + mutex_lock(&max->max_mutex); + + for (i = 0; i < NR_OF_MAX_SINK_PADS; i++) { + + if (strncmp(node->name, max->sub_devs[i].sd_name, ICI_MAX_NODE_NAME)) + continue; + + + max->ffmts[i]->width = fmt->ffmt.width; + max->ffmts[i]->height = fmt->ffmt.height; + max->ffmts[i]->pixelformat = fmt->ffmt.pixelformat; + max->ffmts[i]->field = fmt->ffmt.field; + max->ffmts[i]->colorspace = fmt->ffmt.colorspace; + max->ffmts[i]->flags = fmt->ffmt.flags; + + mutex_unlock(&max->max_mutex); + + pr_info("framefmt: width: %d, height: %d, code: 0x%x.\n", + fmt->ffmt.width, fmt->ffmt.height, fmt->ffmt.pixelformat); + + return 0; + } + + mutex_unlock(&max->max_mutex); + + pr_err("max9286_set_format: unknown node name\n"); + + return 0; +} + +static int max9286_set_selection(struct ici_isys_node *node, struct ici_pad_selection *ps) +{ +// TODO place holder + pr_err(" TBD!!! \n"); + return 0; +} + +static int max9286_get_selection(struct ici_isys_node *node, struct ici_pad_selection *ps) +{ +// TODO place holder + pr_err(" TBD!!! \n"); + return 0; +} + +static int init_ext_sd(struct i2c_client *client, struct max9286_subdev *max_sd, int idx) +{ + struct max9286 *max; + int rval; + char name[ICI_MAX_NODE_NAME]; + struct ici_ext_subdev *subdev; + + max = to_max_9286(subdev); + subdev = i2c_get_clientdata(client); + + snprintf(name, sizeof(name), "MAX9286 %d", idx); + + strncpy(max->sub_devs[idx].sd_name, name, sizeof(name)); + + max_sd->sd->client = client; + max_sd->sd->num_pads = 2; + max_sd->sd->pads[0].pad_id = 0; + max_sd->sd->pads[0].flags = ICI_PAD_FLAGS_SINK; + max_sd->sd->pads[1].pad_id = 1; + max_sd->sd->pads[1].flags = ICI_PAD_FLAGS_SOURCE; +// TODO +// sd->src_pad = ssd->source_pad; +// below fnctions invoked by csi2 fe code + max_sd->sd->set_param = max9286_set_param; // meant to execute CTRL-IDs/CIDs + max_sd->sd->get_param = max9286_get_param; // meant to execute CTRLIDs/CIDs + max_sd->sd->get_menu_item = max9286_get_menu_item; // get LINK FREQ + if (max->reg.setup_node) { + rval = max->reg.setup_node(max->reg.ipu_data, + max_sd->sd, name); + if (rval) + return rval; + } else { + pr_err("node not registered\n"); + } + +// below invoked by stream code + max_sd->sd->node.node_set_power = max9286_set_power; + max_sd->sd->node.node_set_streaming = max9286_set_stream; +// below invoked by pipeline-device code + max_sd->sd->node.node_get_pad_supported_format = + max9286_enum_mbus_code; //needs modification + max_sd->sd->node.node_set_pad_ffmt = max9286_set_format; + max_sd->sd->node.node_get_pad_ffmt = max9286_get_format; + max_sd->sd->node.node_set_pad_sel = max9286_set_selection; + max_sd->sd->node.node_get_pad_sel = max9286_get_selection; + + + return 0; +} + +static int create_link(struct ici_isys_node *src_node, + u16 srcpad, + struct ici_isys_node *sink_node, + u16 sinkpad, + u32 flag) +{ + struct ici_ext_subdev *sd, *ssd; + struct max9286 *max; + struct max9286_subdev *subdev; + int i, ret; + if (!src_node || !sink_node) + return -EINVAL; + + sd = to_ici_ext_subdev(src_node); + if (!sd) + return -EINVAL; + + max = to_max_9286(sd); + if (!max) + return -EINVAL; + + for (i = 0; i < NR_OF_MAX_SINK_PADS; i++) { + subdev = &max->sub_devs[i]; + if (!subdev) + continue; + ssd = subdev->sd; + ret = max->create_link(&ssd->node, + 1, + sink_node, + sinkpad, + 0); + if (ret) + return ret; + } + return 0; +} + +/* + * called when this subdev is registered. + */ +static int max9286_registered(struct ici_ext_subdev_register *reg) +{ + struct ici_ext_subdev *subdev = reg->sd; + struct i2c_client *client = subdev->client; + struct ici_isys *isys = reg->ipu_data; + struct max9286 *max = to_max_9286(subdev); + struct max9286_subdev *sd, *prev_sd = NULL; + int i, k, rval, num, nsinks; + + + num = max->pdata->subdev_num; + nsinks = max->nsinks; + + max->reg = *reg; + max->create_link = reg->create_link; + + subdev->get_param = max9286_get_param; + subdev->set_param = max9286_set_param; + subdev->get_menu_item = max9286_get_menu_item; + + for (i = 0, k = 0; (i < num) && (k < nsinks); i++, k++) { + struct i2c_client *client2; + struct ici_ext_subdev *sensor_sd; + struct ici_ext_subdev_register sd_register = {0}; + struct max9286_subdev_i2c_info *info = + &max->pdata->subdev_info[i]; + struct crlmodule_lite_platform_data *pdata = + (struct crlmodule_lite_platform_data *) + info->board_info.platform_data; + + if (i >= nsinks) + break; + + /* Spin the sensor subdev name suffix */ +// pdata->suffix = info->suffix; + + memcpy(&max->subdev_pdata[i], pdata, sizeof(*pdata)); + + max->subdev_pdata[i].suffix = info->suffix; + info->board_info.platform_data = &max->subdev_pdata[i]; + + request_module(I2C_MODULE_PREFIX "%s", info->board_info.type); + + client2 = i2c_new_device(client->adapter, &info->board_info); + + if (client2 == NULL || client2->dev.driver == NULL) { + pr_err("@%s, No new i2c device\n", __func__); + continue; + } + + /* Get the clientdata set by the sensor driver */ + sensor_sd = i2c_get_clientdata(client2); + if (!sensor_sd) + pr_err("@%s, Failed to get client data\n", __func__); + + sd_register.ipu_data = isys; + sd_register.sd = sensor_sd; + sd_register.setup_node = reg->setup_node; + sd_register.create_link = reg->create_link; + rval = sensor_sd->do_register(&sd_register); + if (rval) { + pr_err("@%s, Failed to register external subdev\n", __func__); + continue; + } + + + + max->sub_devs[k].sd = devm_kzalloc(&client->dev, sizeof(struct ici_ext_subdev), GFP_KERNEL); + if (!max->sub_devs[k].sd) { + pr_err("can't create MAX9286 subdev %d\n", i); + continue; + } +// max->sub_devs[k].rx_port = info->rx_port; +// max->sub_devs[k].phy_i2c_addr = info->phy_i2c_addr; + max->sub_devs[k].alias_i2c_addr = info->board_info.addr; + + sd = &max->sub_devs[k]; + rval = init_ext_sd(max->ici_sd.client, sd, k); + if (rval) + return rval; + + rval = sd_register.create_link(&sensor_sd->node, + sensor_sd->src_pad, + &sd->sd->node, 0, 0); + if (rval) { + pr_err("@%s, error creating link\n", __func__); + return rval; + } + + prev_sd = sd; + } + + /* Replace existing create_link address with MAX9286 create_link implementation + to create link between MAX9286 node and CSI2 node */ + reg->create_link = create_link; + + return 0; +} + +static void max9286_unregistered(struct ici_ext_subdev *subdev) +{ + pr_debug("%s DO NOTHING ?? \n", __func__); +} + +static const s64 max9286_op_sys_clock[] = { 87750000, }; +/* Registers MAX9286 sub-devices (Image sensors) */ +static int max9286_register_subdev(struct max9286 *max, struct i2c_client *client) +{ + int i; + + max->ici_sd.client = client; + max->ici_sd.do_register = max9286_registered; + max->ici_sd.do_unregister = max9286_unregistered; + + i2c_set_clientdata(client, &max->ici_sd); + + for (i = 0; i < NR_OF_MAX_SINK_PADS; i++) { + max->ffmts[i]->width = 1920; + max->ffmts[i]->height = 1088; + max->ffmts[i]->pixelformat = ICI_FORMAT_SGRBG10; + max->ffmts[i]->field = ICI_FIELD_NONE; + snprintf(max->sub_devs[i].sd_name, sizeof(max->sub_devs[i].sd_name), + "MAX9286 %d", i); + } + return 0; +} + +/* + * Get the output link order + * By default: + * bits[7:6] 11: Link 3 is 4th in the CSI-2 output order + * bits[5:4] 10: Link 2 is 3rd in the CSI-2 output order + * bits[3:2] 01: Link 1 is 2nd in the CSI-2 output order + * bits[1:0] 00: Link 0 is 1st in the CSI-2 output order + */ +static u8 get_output_link_order(struct max9286 *max) +{ + u8 val = 0xE4, i; + u8 order_config[14][3] = { + {1, 8, 0x27}, + {1, 4, 0xC6}, + {1, 2, 0xE1}, + {1, 1, 0xE4}, + {2, 0xC, 0x4E}, + {2, 0xA, 0x72}, + {2, 0x9, 0x78}, + {2, 0x6, 0xD2}, + {2, 0x5, 0xD8}, + {2, 0x3, 0xE4}, + {3, 0xE, 0x93}, + {3, 0xD, 0x9C}, + {3, 0xB, 0xB4}, + {3, 0x7, 0xE4}, + }; + + if (max->total_sensor_num < 4) { + for (i = 0; i < 14; i++) { + if ((max->total_sensor_num == order_config[i][0]) + && (max->sensor_present == order_config[i][1])) + return order_config[i][2]; + } + } + + /* sensor_num = 4 will return 0xE4 */ + return val; +} + +/* MAX9286 initial setup and Reverse channel setup */ +static int max9286_init(struct max9286 *max, struct i2c_client *client) +{ + int i, rval; + unsigned int val, lval; + u8 mval, slval, tmval; + + usleep_range(10000, 11000); + + rval = regmap_read(max->regmap8, DS_MAX9286_DEVID, &val); + if (rval) { + pr_err("Failed to read device ID of MAX9286!\n"); + return rval; + } + pr_info("MAX9286 device ID: 0x%X\n", val); + + rval = regmap_write(max->regmap8, DS_CSI_VC_CTL, 0x93); + if (rval) { + pr_err("Failed to disable CSI output!\n"); + return rval; + } + /* All the links are working in Legacy reverse control-channel mode */ + /* Enable Custom Reverse Channel and First Pulse Length */ + rval = regmap_write(max->regmap8, DS_ENCRC_FPL, 0x4F); + if (rval) { + pr_err("Failed to disable PRBS test!\n"); + return rval; + } + /* + * 2ms of delay is required after any analog change to reverse control + * channel for bus timeout and I2C state machine to settle from any + * glitches + */ + usleep_range(2000, 3000); + /* First pulse length rise time changed from 300ns to 200ns */ + rval = regmap_write(max->regmap8, DS_FPL_RT, 0x1E); + if (rval) { + pr_err("Failed to disable PRBS test!\n"); + return rval; + } + usleep_range(2000, 3000); + + /* Enable configuration links */ + max96705_write_register(max, 0, S_MAIN_CTL, 0x43); + usleep_range(5000, 6000); + + /* + * Enable high threshold for reverse channel input buffer + * This increases immunity to power supply noise when the + * coaxial link is used for power as well as signal + */ + max96705_write_register(max, 0, S_RSVD_8, 0x01); + /* Enable change of reverse control parameters */ + + max96705_write_register(max, 0, S_RSVD_97, 0x5F); + + /* Wait 2ms after any change to reverse control channel */ + usleep_range(2000, 3000); + + /* Increase reverse amplitude from 100mV to 170mV to compensate for + * higher threshold + */ + rval = regmap_write(max->regmap8, DS_FPL_RT, 0x19); + if (rval) { + pr_err("Failed to disable PRBS test!\n"); + return rval; + } + usleep_range(2000, 3000); + + /* + * Enable CSI-2 lanes D0, D1, D2, D3 + * Enable CSI-2 DBL (Double Input Mode) + * Enable GMSL DBL for RAWx2 + * Enable RAW12 data type by default + */ + rval = regmap_write(max->regmap8, DS_CSI_DBL_DT, 0xF7); //RAW12 + if (rval) { + pr_err("Failed to set data type!\n"); + return rval; + } + usleep_range(2000, 3000); + + /* Enable Frame sync Auto-mode for row/column reset on frame sync + * sensors + */ + rval = regmap_write(max->regmap8, DS_FSYNCMODE, 0x00); + if (rval) { + pr_err("Failed to set frame sync mode!\n"); + return rval; + } + usleep_range(2000, 3000); + rval = regmap_write(max->regmap8, DS_OVERLAP_WIN_LOW, 0x00); + rval = regmap_write(max->regmap8, DS_OVERLAP_WIN_HIGH, 0x00); + + rval = regmap_write(max->regmap8, DS_FSYNC_PERIOD_LOW, 0x55); + rval = regmap_write(max->regmap8, DS_FSYNC_PERIOD_MIDDLE, 0xc2); + rval = regmap_write(max->regmap8, DS_FSYNC_PERIOD_HIGH, 0x2C); + + rval = regmap_write(max->regmap8, DS_HIGHIMM, 0x06); + + /* + * Enable DBL + * Edge select: Rising Edge + * Enable HS/VS encoding + */ + max96705_write_register(max, 0, S_CONFIG, 0xD4); + usleep_range(2000, 3000); + + for (i = 0; i < ARRAY_SIZE(max9286_byte_order_settings_12bit); i++) { + rval = max96705_write_register(max, 0, + max9286_byte_order_settings_12bit[i].reg, + max9286_byte_order_settings_12bit[i].val); + if (rval) { + pr_err("Failed to set max9286 byte order\n"); + return rval; + } + } + + /* Detect video links */ + rval = regmap_read(max->regmap8, DS_CONFIGL_VIDEOL_DET, &lval); + if (rval) { + pr_err("Failed to read register 0x49!\n"); + return rval; + } + + /* + * Check on which links the sensors are connected + * And also check total number of sensors connected to the deserializer + */ + max->sensor_present = ((lval >> 4) & 0xF) | (lval & 0xF); + + for (i = 0; i < NR_OF_MAX_STREAMS; i++) { + if (max->sensor_present & (0x1 << i)) { + pr_info("Sensor present on deserializer link %d\n", i); + max->total_sensor_num += 1; + } + } + + pr_info("total sensor present = %d", max->total_sensor_num); + pr_info("sensor present on links = %d", max->sensor_present); + + if (!max->total_sensor_num) { + pr_err("No sensors connected!\n"); + } else { + pr_info("Total number of sensors connected = %d\n", + max->total_sensor_num); + } + + slval = get_output_link_order(max); + + /* Set link output order */ + rval = regmap_write(max->regmap8, DS_LINK_OUTORD, slval); + if (rval) { + pr_err("Failed to set Link output order!\n"); + return rval; + } + + slval = 0xE0 | max->sensor_present; + + mval = 0; + tmval = 0; + /* + * Setup each serializer individually and their respective I2C slave + * address changed to a unique value by enabling one reverse channel + * at a time via deserializer's DS_FWDCCEN_REVCCEN control register. + * Also create broadcast slave address for MAX96705 serializer. + * After this stage, i2cdetect on I2C-ADAPTER should display the + * below devices + * 10: Sensor address + * 11, 12, 13, 14: Sensors alias addresses + * 41, 42, 43, 44: Serializers alias addresses + * 45: Serializer's broadcast address + * 48: Deserializer's address + */ + + for (i = 1; i <= NR_OF_MAX_SINK_PADS; i++) { + /* Setup the link when the sensor is connected to the link */ + if (((0x1 << (i - 1)) & max->sensor_present) == 0) + continue; + + /* Enable only one reverse channel at a time */ + mval = (0x11 << (i - 1)); + tmval |= (0x11 << (i - 1)); + rval = regmap_write(max->regmap8, DS_FWDCCEN_REVCCEN, mval); + if (rval) { + pr_err("Failed to enable channel for %d!\n", i); + return rval; + } + /* Wait 2ms after enabling reverse channel */ + usleep_range(2000, 3000); + + /* Change Serializer slave address */ + max96705_write_register(max, 0, S_SERADDR, + (S_ADDR_MAX96705 + i) << 1); + /* Unique link 'i' image sensor slave address */ + max96705_write_register(max, i, S_I2C_SOURCE_IS, + (ADDR_AR0231AT_SENSOR + i) << 1); + /* Link 'i' image sensor slave address */ + max96705_write_register(max, i, S_I2C_DST_IS, + ADDR_AR0231AT_SENSOR << 1); + /* Serializer broadcast address */ + max96705_write_register(max, i, S_I2C_SOURCE_SER, + S_ADDR_MAX96705_BROADCAST << 1); + /* Link 'i' serializer address */ + max96705_write_register(max, i, S_I2C_DST_SER, + (S_ADDR_MAX96705 + i) << 1); + } + + /* Enable I2c reverse channels */ + rval = regmap_write(max->regmap8, DS_FWDCCEN_REVCCEN, tmval); + if (rval) { + pr_err("Failed to enable channel for %d!\n", i); + return rval; + } + usleep_range(2000, 3000); + + return 0; +} + +/* Unbind the MAX9286 device driver from the I2C client */ +static int max9286_remove(struct i2c_client *client) +{ + struct ici_ext_subdev *subdev = i2c_get_clientdata(client); + struct max9286 *max = to_max_9286(subdev); + int i; + + mutex_destroy(&max->max_mutex); + + for (i = 0; i < NR_OF_MAX_SINK_PADS; i++) { + max->sub_devs[i].sd = NULL; + } + + return 0; +} + +/* Called by I2C probe */ +static int max9286_probe(struct i2c_client *client, + const struct i2c_device_id *devid) +{ + struct max9286 *max; + int i = 0; + int rval = 0; + + if (client->dev.platform_data == NULL) + return -ENODEV; + + dev_err(&client->dev, "MAX9286 probe!\n"); + max = devm_kzalloc(&client->dev, sizeof(*max), GFP_KERNEL); + if (!max) + return -ENOMEM; + + max->pdata = client->dev.platform_data; + + max->nsources = NR_OF_MAX_SOURCE_PADS; + max->nsinks = NR_OF_MAX_SINK_PADS; + max->npads = NR_OF_MAX_PADS; + max->nstreams = NR_OF_MAX_STREAMS; + + max->crop = devm_kcalloc(&client->dev, max->npads, + sizeof(struct ici_rect), GFP_KERNEL); + max->compose = devm_kcalloc(&client->dev, max->npads, + sizeof(struct ici_rect), GFP_KERNEL); + max->route = devm_kcalloc(&client->dev, max->nstreams, + sizeof(*max->route), GFP_KERNEL); + max->stream = devm_kcalloc(&client->dev, max->npads, + sizeof(*max->stream), GFP_KERNEL); + + if (!max->crop || !max->compose || !max->route || !max->stream) + return -ENOMEM; + + for (i = 0; i < max->npads; i++) { + max->ffmts[i] = + devm_kcalloc(&client->dev, max->nstreams, + sizeof(struct ici_framefmt), GFP_KERNEL); + if (!max->ffmts[i]) + return -ENOMEM; + + max->stream[i].stream_id = + devm_kcalloc(&client->dev, max->nsinks, + sizeof(int), GFP_KERNEL); + if (!max->stream[i].stream_id) + return -ENOMEM; + } + + for (i = 0; i < max->nstreams; i++) { + max->route[i].sink = i; + max->route[i].source = MAX_PAD_SOURCE; + max->route[i].flags = 0; + } + + for (i = 0; i < max->nsinks; i++) { + max->stream[i].stream_id[0] = i; + max->stream[MAX_PAD_SOURCE].stream_id[i] = i; + } + + max->regmap8 = devm_regmap_init_i2c(client, &max9286_reg_config8); + if (IS_ERR(max->regmap8)) { + dev_err(&client->dev, "Failed to init regmap8!\n"); + return -EIO; + } + + mutex_init(&max->max_mutex); + + rval = max9286_register_subdev(max, client); + if (rval) { + dev_err(&client->dev, + "Failed to register MAX9286 subdevice!\n"); + goto error_mutex_destroy; + } + + rval = max9286_init(max, client); + if (rval) { + dev_err(&client->dev, "Failed to initialise MAX9286!\n"); + goto error_media_entity; + } + + return 0; + +error_media_entity: +error_mutex_destroy: + mutex_destroy(&max->max_mutex); + + return rval; +} + +#ifdef CONFIG_PM +static int max9286_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ici_ext_subdev *subdev = i2c_get_clientdata(client); + struct max9286 *max = to_max_9286(subdev); + + return max9286_init(max, client); +} +#else +#define max9286_resume NULL +#endif /* CONFIG_PM */ + +static const struct i2c_device_id max9286_id_table[] = { + { MAX9286_NAME, 0 }, + {}, +}; + +static const struct dev_pm_ops max9286_pm_ops = { + .resume = max9286_resume, +}; + +static struct i2c_driver max9286_i2c_driver = { + .driver = { + .name = MAX9286_NAME, + .pm = &max9286_pm_ops, + }, + .probe = max9286_probe, + .remove = max9286_remove, + .id_table = max9286_id_table, +}; + +module_i2c_driver(max9286_i2c_driver); + +MODULE_AUTHOR("Karthik Gopalakrishnan "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Maxim96705 serializer and Maxim9286 deserializer driver"); diff --git a/drivers/media/i2c/ici/ti964_ici.c b/drivers/media/i2c/ici/ti964_ici.c new file mode 100644 index 0000000000000..ec9129556e8e4 --- /dev/null +++ b/drivers/media/i2c/ici/ti964_ici.c @@ -0,0 +1,1130 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2016 - 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../ti964-reg.h" + +struct ti964_subdev { + struct ici_ext_subdev *sd; + unsigned short rx_port; + unsigned short fsin_gpio; + unsigned short phy_i2c_addr; + unsigned short alias_i2c_addr; + char sd_name[ICI_MAX_NODE_NAME]; +}; + +struct ti964 { + struct ici_ext_subdev sd; + struct ici_ext_subdev_register reg; + struct ti964_pdata *pdata; + struct ti964_subdev sub_devs[NR_OF_TI964_SINK_PADS]; + struct crlmodule_lite_platform_data subdev_pdata[NR_OF_TI964_SINK_PADS]; + const char *name; + + struct mutex mutex; + + struct regmap *regmap8; + struct regmap *regmap16; + + struct ici_pad_framefmt *ffmts[NR_OF_TI964_SINK_PADS]; + struct ici_rect *crop; + struct ici_rect *compose; + + struct { + unsigned int *stream_id; + } *stream; /* stream enable/disable status, indexed by pad */ + struct { + unsigned int sink; + unsigned int source; + int flags; + } *route; /* pad level info, indexed by stream */ + + unsigned int nsinks; + unsigned int nsources; + unsigned int nstreams; + unsigned int npads; + + struct gpio_chip gc; + int (*create_link)( + struct ici_isys_node *src, + u16 src_pad, + struct ici_isys_node *sink, + u16 sink_pad, + u32 flags); +}; + +static int init_ext_sd(struct i2c_client *client, struct ti964_subdev *sd, int idx); +static int ti964_find_subdev_index(struct ti964 *va, struct ici_ext_subdev *sd); +static int create_link(struct ici_isys_node *src_node, u16 srcpad, + struct ici_isys_node *sink_node, u16 sinkpad, u32 flag); +static int ti964_get_param(struct ici_ext_sd_param *param); +static int ti964_get_menu_item(struct ici_ext_sd_param *param, u32 idx); +static int ti964_set_param(struct ici_ext_sd_param *param); + +#define to_ti964(_sd) container_of(_sd, struct ti964, sd) +#define to_ici_ext_subdev(_node) container_of(_node, struct ici_ext_subdev, node) +#define TI964_SRC_PAD 1 + +static const s64 ti964_op_sys_clock[] = {400000000, 800000000}; +static const u8 ti964_op_sys_clock_reg_val[] = { + TI964_MIPI_800MBPS, + TI964_MIPI_1600MBPS +}; + + +static const struct ti964_csi_data_format va_csi_data_formats[] = { + { ICI_FORMAT_YUYV, 16, 16, PIXEL_ORDER_GBRG, 0x1e }, + { ICI_FORMAT_UYVY, 16, 16, PIXEL_ORDER_GBRG, 0x1e }, + { ICI_FORMAT_SGRBG12, 12, 12, PIXEL_ORDER_GRBG, 0x2c }, + { ICI_FORMAT_SRGGB12, 12, 12, PIXEL_ORDER_RGGB, 0x2c }, + { ICI_FORMAT_SBGGR12, 12, 12, PIXEL_ORDER_BGGR, 0x2c }, + { ICI_FORMAT_SGBRG12, 12, 12, PIXEL_ORDER_GBRG, 0x2c }, + { ICI_FORMAT_SGRBG10, 10, 10, PIXEL_ORDER_GRBG, 0x2b }, + { ICI_FORMAT_SRGGB10, 10, 10, PIXEL_ORDER_RGGB, 0x2b }, + { ICI_FORMAT_SBGGR10, 10, 10, PIXEL_ORDER_BGGR, 0x2b }, + { ICI_FORMAT_SGBRG10, 10, 10, PIXEL_ORDER_GBRG, 0x2b }, + { ICI_FORMAT_SGRBG8, 8, 8, PIXEL_ORDER_GRBG, 0x2a }, + { ICI_FORMAT_SRGGB8, 8, 8, PIXEL_ORDER_RGGB, 0x2a }, + { ICI_FORMAT_SBGGR8, 8, 8, PIXEL_ORDER_BGGR, 0x2a }, + { ICI_FORMAT_SGBRG8, 8, 8, PIXEL_ORDER_GBRG, 0x2a }, +}; + + +static struct regmap_config ti964_reg_config8 = { + .reg_bits = 8, + .val_bits = 8, +}; + +static struct regmap_config ti964_reg_config16 = { + .reg_bits = 16, + .val_bits = 8, + .reg_format_endian = REGMAP_ENDIAN_BIG, +}; + +static int ti964_reg_set_bit(struct ti964 *va, unsigned char reg, + unsigned char bit, unsigned char val) +{ + int ret; + unsigned int reg_val; + + ret = regmap_read(va->regmap8, reg, ®_val); + if (ret) + return ret; + if (val) + reg_val |= 1 << bit; + else + reg_val &= ~(1 << bit); + + return regmap_write(va->regmap8, reg, reg_val); +} + +static int ti964_map_phy_i2c_addr(struct ti964 *va, unsigned short rx_port, + unsigned short addr) +{ + int rval; + + rval = regmap_write(va->regmap8, TI964_RX_PORT_SEL, + (rx_port << 4) + (1 << rx_port)); + if (rval) + return rval; + + return regmap_write(va->regmap8, TI964_SLAVE_ID0, addr); +} + +static int ti964_map_alias_i2c_addr(struct ti964 *va, unsigned short rx_port, + unsigned short addr) +{ + int rval; + + rval = regmap_write(va->regmap8, TI964_RX_PORT_SEL, + (rx_port << 4) + (1 << rx_port)); + if (rval) + return rval; + + return regmap_write(va->regmap8, TI964_SLAVE_ALIAS_ID0, addr); +} + +static int ti964_fsin_gpio_init(struct ti964 *va, unsigned short rx_port, + unsigned short fsin_gpio) +{ + int rval; + int reg_val; + + rval = regmap_read(va->regmap8, TI964_FS_CTL, ®_val); + if (rval) { + pr_err("Failed to read gpio status.\n"); + return rval; + } + + if (!reg_val & TI964_FSIN_ENABLE) { + pr_warn("FSIN not enabled, skip config FSIN GPIO.\n"); + return 0; + } + + rval = regmap_write(va->regmap8, TI964_RX_PORT_SEL, + (rx_port << 4) + (1 << rx_port)); + if (rval) + return rval; + + rval = regmap_read(va->regmap8, TI964_BC_GPIO_CTL0, ®_val); + if (rval) { + pr_err("Failed to read gpio status.\n"); + return rval; + } + + if (fsin_gpio == 0) { + reg_val &= ~TI964_GPIO0_MASK; + reg_val |= TI964_GPIO0_FSIN; + } else { + reg_val &= ~TI964_GPIO1_MASK; + reg_val |= TI964_GPIO1_FSIN; + } + + rval = regmap_write(va->regmap8, TI964_BC_GPIO_CTL0, reg_val); + if (rval) + pr_err("Failed to set gpio.\n"); + + return rval; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int ti964_enum_mbus_code(struct ici_isys_node *node, struct ici_pad_supported_format_desc *psfd) +{ + + psfd->color_format = 0x1e; // for ICI_FORMAT_UYVY + // sensor->sensor_ds->csi_fmts[psfd->idx].code; + psfd->min_width = TI964_MIN_WIDTH; //sensor->sensor_ds->sensor_limits->x_addr_min; + psfd->max_width = TI964_MAX_WIDTH; //sensor->sensor_ds->sensor_limits->x_addr_max; + psfd->min_height = TI964_MIN_HEIGHT; //sensor->sensor_ds->sensor_limits->y_addr_min; + psfd->max_height = TI964_MAX_HEIGHT; //sensor->sensor_ds->sensor_limits->y_addr_max; + return 0; +} + +static const struct ti964_csi_data_format + *ti964_validate_csi_data_format(u32 code) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(va_csi_data_formats); i++) { + if (va_csi_data_formats[i].code == code) + return &va_csi_data_formats[i]; + } + + return &va_csi_data_formats[0]; +} + +static int __ti964_set_format(struct ici_ext_subdev *subdev, struct ici_pad_framefmt *pff) +{ + struct i2c_client *client = subdev->client; + struct ici_ext_subdev *sd = i2c_get_clientdata(client); + struct ti964 *va = to_ti964(sd); + int index = ti964_find_subdev_index(va, subdev); + + + va->ffmts[index]->ffmt.width = pff->ffmt.width; + va->ffmts[index]->ffmt.height = pff->ffmt.height; + va->ffmts[index]->ffmt.pixelformat = pff->ffmt.pixelformat; + va->ffmts[index]->ffmt.field = pff->ffmt.field; + + return 0; +} + +static int __ti964_get_format(struct ici_ext_subdev *subdev, struct ici_pad_framefmt *pff) +{ + struct i2c_client *client = subdev->client; + struct ici_ext_subdev *sd = i2c_get_clientdata(client); + struct ti964 *va = to_ti964(sd); + int index = ti964_find_subdev_index(va, subdev); + +// TODO hardocded same format for all pads of TI964 + pff->ffmt.width = va->ffmts[index]->ffmt.width; + pff->ffmt.height = va->ffmts[index]->ffmt.height; + pff->ffmt.pixelformat = va->ffmts[index]->ffmt.pixelformat; +// sensor->sensor_ds->csi_fmts[sensor->fmt_index].code; + pff->ffmt.field = + ((va->ffmts[index]->ffmt.field == ICI_FIELD_ANY) ? + ICI_FIELD_NONE : va->ffmts[index]->ffmt.field); + return 0; + +} + +static int ti964_set_selection(struct ici_isys_node *node, struct ici_pad_selection *ps) +{ +// TODO place holder + return 0; +} + +static int ti964_get_selection(struct ici_isys_node *node, struct ici_pad_selection *ps) +{ +// TODO place holder + return 0; +} + +static int ti964_get_format(struct ici_isys_node *node, struct ici_pad_framefmt *pff) +{ + struct ici_ext_subdev *subsubdev = node->sd; + struct ici_ext_subdev *subdev = i2c_get_clientdata(subsubdev->client); + struct ti964 *va = to_ti964(subdev); + + mutex_lock(&va->mutex); + __ti964_get_format(subsubdev, pff); + mutex_unlock(&va->mutex); + +// pr_debug(subdev->dev, "subdev_format: which: %s, pad: %d, stream: %d.\n", +// fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE ? +// "V4L2_SUBDEV_FORMAT_ACTIVE" : "V4L2_SUBDEV_FORMAT_TRY", +// fmt->pad, fmt->stream); + +// pr_debug("framefmt: width: %d, height: %d, code: 0x%x.\n", +// fmt->format.width, fmt->format.height, fmt->format.code); + + return 0; +} + +static int ti964_set_format(struct ici_isys_node *node, struct ici_pad_framefmt *pff) +{ + struct ici_ext_subdev *subsubdev = node->sd; + struct ici_ext_subdev *subdev = i2c_get_clientdata(subsubdev->client); + struct ti964 *va = to_ti964(subdev); + + mutex_lock(&va->mutex); + __ti964_set_format(subsubdev, pff); + + mutex_unlock(&va->mutex); + + pr_debug("framefmt: width: %d, height: %d, code: 0x%x.\n", + pff->ffmt.width, pff->ffmt.height, pff->ffmt.pixelformat); + + return 0; +} +/* Initialize sensor connected to TI964 */ +static int ti964_init_ext_subdev(struct ti964_subdev_info *info, + struct ici_ext_subdev_register *reg, + struct i2c_client *client, + struct ici_ext_subdev_register *sd_register) +{ + struct i2c_client *client2; + struct ici_ext_subdev *sensor_sd; + int rval = 0; + + request_module(I2C_MODULE_PREFIX "%s", info->board_info.type); + + client2 = i2c_new_device(client->adapter, &info->board_info); + + if (client2 == NULL || client2->dev.driver == NULL) { + pr_err("%s, No new i2c device\n", __func__); + return -ENODEV; + } + + /* Get the clientdata set by the sensor driver */ + sensor_sd = i2c_get_clientdata(client2); + if (!sensor_sd) { + pr_err("%s, Failed to get client data\n", __func__); + return -EINVAL; + } + + sd_register->ipu_data = reg->ipu_data; + sd_register->sd = sensor_sd; + sd_register->setup_node = reg->setup_node; + sd_register->create_link = reg->create_link; + + rval = sensor_sd->do_register(sd_register); + + return rval; +} + +static int ti964_registered(struct ici_ext_subdev_register *reg) +{ + struct ici_ext_subdev *subdev = reg->sd; + struct ti964 *va = to_ti964(subdev); + struct ti964_subdev *sd, *prev_sd = NULL; + struct i2c_client *client = subdev->client; + struct ici_ext_subdev_register sd_register = {0}; + int i, k, rval; + + if (!reg->sd || !reg->setup_node || !reg->create_link) { + pr_err("ti964_registered error\n"); + return -EINVAL; + } + + va->reg = *reg; + va->create_link = reg->create_link; + + /* ti964->subdev represents the ti964 itself and + ti964->sub_devs represents every port/vc */ + subdev->get_param = ti964_get_param; + subdev->set_param = ti964_set_param; + subdev->get_menu_item = ti964_get_menu_item; + + for (i = 0, k = 0; i < va->pdata->subdev_num; i++) { + struct ti964_subdev_info *info = + &va->pdata->subdev_info[i]; + struct crlmodule_lite_platform_data *pdata = + (struct crlmodule_lite_platform_data *) + info->board_info.platform_data; + + if (k >= va->nsinks) + break; + + /* + * The sensors should not share the same pdata structure. + * Clone the pdata for each sensor. + */ + memcpy(&va->subdev_pdata[k], pdata, sizeof(*pdata)); + if (va->subdev_pdata[k].xshutdown != 0 && + va->subdev_pdata[k].xshutdown != 1) { + pr_err("xshutdown(%d) must be 0 or 1 to connect.\n", + va->subdev_pdata[k].xshutdown); + return -EINVAL; + } + + /* If 0 is xshutdown, then 1 would be FSIN, vice versa. */ + va->sub_devs[k].fsin_gpio = 1 - va->subdev_pdata[k].xshutdown; + + /* Spin sensor subdev suffix name */ + va->subdev_pdata[k].suffix = info->suffix; + + /* + * Change the gpio value to have xshutdown + * and rx port included, so in gpio_set those + * can be caculated from it. + */ + va->subdev_pdata[k].xshutdown += va->gc.base + + info->rx_port * NR_OF_GPIOS_PER_PORT; + info->board_info.platform_data = &va->subdev_pdata[k]; + + if (!info->phy_i2c_addr || !info->board_info.addr) { + pr_err("No physical i2c address and alias i2c address found\n"); + return -EINVAL; + } + + /* Map PHY I2C address. */ + rval = ti964_map_phy_i2c_addr(va, info->rx_port, + info->phy_i2c_addr); + if (rval) + return rval; + + /* Map 7bit ALIAS I2C address. */ + rval = ti964_map_alias_i2c_addr(va, info->rx_port, + info->board_info.addr << 1); + if (rval) + return rval; + + /* Initialize sensor connected to TI964 */ + rval = ti964_init_ext_subdev(info, reg, client, + &sd_register); + if (rval) { + pr_err("%s, Failed to register external subdev\n", __func__); + continue; + } + + /* Config FSIN GPIO */ + rval = ti964_fsin_gpio_init(va, info->rx_port, + va->sub_devs[k].fsin_gpio); + if (rval) + return rval; + + /* Allocate ici_ext_subdev for each TI964 port */ + va->sub_devs[k].sd = devm_kzalloc(&client->dev, sizeof(struct ici_ext_subdev), GFP_KERNEL); + if (!va->sub_devs[k].sd) { + pr_err("can't create new i2c subdev %d-%04x\n", + info->i2c_adapter_id, + info->board_info.addr); + continue; + } + va->sub_devs[k].rx_port = info->rx_port; + va->sub_devs[k].phy_i2c_addr = info->phy_i2c_addr; + va->sub_devs[k].alias_i2c_addr = info->board_info.addr; + memcpy(va->sub_devs[k].sd_name, + va->subdev_pdata[k].module_name, + min(sizeof(va->sub_devs[k].sd_name) - 1, + sizeof(va->subdev_pdata[k].module_name) - 1)); + + sd = &va->sub_devs[k]; + rval = init_ext_sd(va->sd.client, sd, k); + if (rval) + return rval; + + rval = sd_register.create_link(&sd_register.sd->node, + sd_register.sd->src_pad, + &sd->sd->node, 0, 0); + if (rval) { + pr_err("%s, error creating link\n", __func__); + return rval; + } + if (prev_sd == NULL) { + prev_sd = sd; + k++; + continue; + } + prev_sd = sd; + + k++; + } + /* Replace existing create_link address with TI964 create_link implementation + to create link between TI964 node and CSI2 node */ + reg->create_link = create_link; + return 0; +} + + +static int create_link(struct ici_isys_node *src_node, + u16 srcpad, + struct ici_isys_node *sink_node, + u16 sinkpad, + u32 flag) +{ + struct ici_ext_subdev *sd, *ssd; + struct ti964 *va; + struct ti964_subdev *subdev; + int i, ret; + if (!src_node || !sink_node) + return -EINVAL; + + sd = to_ici_ext_subdev(src_node); + if (!sd) + return -EINVAL; + + va = to_ti964(sd); + if (!va) + return -EINVAL; + + for (i = 0; i < NR_OF_TI964_SINK_PADS; i++) { + subdev = &va->sub_devs[i]; + if (!subdev) + continue; + ssd = subdev->sd; + ret = va->create_link(&ssd->node, + TI964_SRC_PAD, + sink_node, + sinkpad, + 0); + if (ret) + return ret; + } + return 0; +} + +static void ti964_unregistered(struct ici_ext_subdev *subdev) +{ + pr_debug("%s\n", __func__); +} + +static int ti964_set_param(struct ici_ext_sd_param *param) +{ + return 0; +} + +static int ti964_set_power(struct ici_isys_node *node, int on) +{ + struct ici_ext_subdev *subsubdev = node->sd; + struct ti964 *va; + int ret; + u8 val; + struct ici_ext_subdev *subdev = i2c_get_clientdata(subsubdev->client); + if (!subdev) + return -EINVAL; + + va = to_ti964(subdev); + + if (!va) + return -EINVAL; + + pr_debug("%s %d\n", __func__, on); + ret = regmap_write(va->regmap8, TI964_RESET, + (on) ? TI964_POWER_ON : TI964_POWER_OFF); + if (ret || !on) + return ret; + + /* Configure MIPI clock bsaed on control value. */ + ret = regmap_write(va->regmap8, TI964_CSI_PLL_CTL, + ti964_op_sys_clock_reg_val[0]); +// ti964_op_sys_clock_reg_val[ +// v4l2_ctrl_g_ctrl(va->link_freq)]); + if (ret) + return ret; + val = TI964_CSI_ENABLE; +// val |= TI964_CSI_CONTS_CLOCK; + /* Enable skew calculation when 1.6Gbps output is enabled. */ +// TODO pegging to 0.8 Gbps for now +// if (v4l2_ctrl_g_ctrl(va->link_freq)) +// val |= TI964_CSI_SKEWCAL; + return regmap_write(va->regmap8, TI964_CSI_CTL, val); +} + +#ifdef TEST_PATTERN +static int ti964_tp_set_stream(struct ici_ext_subdev *subdev, int enable) +{ + struct ti964 *va = to_ti964(subdev); + int i, rval; + + for (i = 0; i < ARRAY_SIZE(ti964_tp_settings); i++) { + rval = regmap_write(va->regmap8, + ti964_tp_settings[i].reg, + ti964_tp_settings[i].val); + if (rval) { + pr_err("Register write error.\n"); + return rval; + } + } + + rval = regmap_write(va->regmap8, TI964_IND_ACC_DATA, enable); + if (rval) { + pr_err("Register write error.\n"); + return rval; + } + + return 0; +} +#endif + +static int ti964_rx_port_config(struct ti964 *va, int sink, int rx_port) +{ + int rval; + u8 bpp; + int port_cfg2_val; + int vc_mode_reg_index; + int vc_mode_reg_val; + int mipi_dt_type; + int high_fv_flags = va->subdev_pdata[sink].high_framevalid_flags; + + /* Select RX port. */ + rval = regmap_write(va->regmap8, TI964_RX_PORT_SEL, + (rx_port << 4) + (1 << rx_port)); + if (rval) { + pr_err("Failed to select RX port.\n"); + return rval; + } + + /* Set RX port mode. */ + bpp = ti964_validate_csi_data_format( + va->ffmts[0]->ffmt.pixelformat)->width; + rval = regmap_write(va->regmap8, TI964_PORT_CONFIG, + (bpp == 12) ? + TI964_FPD3_RAW12_75MHz : TI964_FPD3_RAW10_100MHz); + if (rval) { + pr_err("Failed to set port config.\n"); + return rval; + } + + mipi_dt_type = ti964_validate_csi_data_format( + va->ffmts[0]->ffmt.pixelformat)->mipi_dt_code; + /* + * RAW8 and YUV422 need to enable RAW10 bit mode. + * RAW12 need to set the RAW10_8bit to reserved. + */ + switch (bpp) { + case 8: + case 16: + port_cfg2_val = TI964_RAW10_8BIT & (~high_fv_flags); + vc_mode_reg_index = TI964_RAW10_ID; + break; + case 12: + port_cfg2_val = TI964_RAW12; + vc_mode_reg_index = TI964_RAW12_ID; + break; + default: + port_cfg2_val = TI964_RAW10_NORMAL & (~high_fv_flags); + vc_mode_reg_index = TI964_RAW10_ID; + break; + } + + vc_mode_reg_val = mipi_dt_type | sink << 6; +#if 0 + rval = regmap_write(va->regmap8, vc_mode_reg_index, vc_mode_reg_val); + if (rval) { + pr_err("Failed to set virtual channel & data type.\n"); + return rval; + } +#endif + + rval = regmap_write(va->regmap8, TI964_PORT_CONFIG2, port_cfg2_val); + if (rval) { + pr_err("Failed to set port config2.\n"); + return rval; + } + + return 0; +} + +static int ti964_map_subdevs_addr(struct ti964 *va) +{ + unsigned short rx_port, phy_i2c_addr, alias_i2c_addr; + int i, rval; + + for (i = 0; i < NR_OF_TI964_SINK_PADS; i++) { + rx_port = va->sub_devs[i].rx_port; + phy_i2c_addr = va->sub_devs[i].phy_i2c_addr; + alias_i2c_addr = va->sub_devs[i].alias_i2c_addr; + + if (!phy_i2c_addr || !alias_i2c_addr) + continue; + + rval = ti964_map_phy_i2c_addr(va, rx_port, phy_i2c_addr); + if (rval) + return rval; + + /* set 7bit alias i2c addr */ + rval = ti964_map_alias_i2c_addr(va, rx_port, + alias_i2c_addr << 1); + if (rval) + return rval; + } + + return 0; +} + +static int ti964_find_subdev_index(struct ti964 *va, struct ici_ext_subdev *sd) +{ + int i; + + for (i = 0; i < NR_OF_TI964_SINK_PADS; i++) { + if (va->sub_devs[i].sd == sd) + return i; + } + + WARN_ON(1); + + return -EINVAL; +} + +static int ti964_set_stream(struct ici_isys_node *node, void *ip, int enable) +{ + struct ti964 *va; + struct ici_ext_subdev *subsubdev = node->sd; + struct ici_ext_subdev *subdev = i2c_get_clientdata(subsubdev->client); + int i, j, rval; + unsigned int rx_port; + DECLARE_BITMAP(rx_port_enabled, 32); + + if (!subdev) + return -EINVAL; + + va = to_ti964(subdev); + + if (!va) + return -EINVAL; + + pr_debug("TI964 set stream, enable %d\n", enable); +#ifdef TEST_PATTERN + return ti964_tp_set_stream(subsubdev, enable); +#endif + + bitmap_zero(rx_port_enabled, 32); + for (i = 0; i < NR_OF_TI964_SINK_PADS; i++) { + j = ti964_find_subdev_index(va, subsubdev); + if (j < 0) + return -EINVAL; + rx_port = va->sub_devs[j].rx_port; + + rval = ti964_rx_port_config(va, i, rx_port); + if (rval < 0) + return rval; + + bitmap_set(rx_port_enabled, rx_port, 1); + /* RX port fordward */ + rval = ti964_reg_set_bit(va, TI964_FWD_CTL1, + rx_port + 4, !enable); + if (rval) { + pr_err("Failed to forward RX port%d. enable %d\n", + i, enable); + return rval; + } + + } + + return 0; +} + +static int ti964_get_param(struct ici_ext_sd_param *param) +{ +// TODO this is hard-coded for now + + param->val = 400000000; +// or param->val = 800000000; + return 0; +} + +static int ti964_get_menu_item(struct ici_ext_sd_param *param, u32 idx) +{ + return 0; +} + +static int init_ext_sd(struct i2c_client *client, struct ti964_subdev *ti_sd, int idx) +{ + struct ti964 *va; + char name[ICI_MAX_NODE_NAME]; + int rval; + struct ici_ext_subdev *subdev = i2c_get_clientdata(client);; + + if (!subdev) + return -EINVAL; + + va = to_ti964(subdev); + + if (!va) + return -EINVAL; + + if (va->pdata->suffix) { + snprintf(name, + sizeof(name), "TI964 %c %d", + va->pdata->suffix, idx); + } else + snprintf(name, + sizeof(name), "TI964 %4.4x %d", + i2c_adapter_id(client->adapter), idx); + + ti_sd->sd->client = client; + ti_sd->sd->num_pads = 2; + //ti_sd->sd->pads[0].pad_id = 0; + //ti_sd->sd->pads[0].flags = ICI_PAD_FLAGS_SINK; + //ti_sd->sd->pads[1].pad_id = 1; + //ti_sd->sd->pads[1].flags = ICI_PAD_FLAGS_SOURCE; + // TODO + //sd->src_pad = ssd->source_pad; + // below fnctions invoked by csi2 fe code + ti_sd->sd->set_param = ti964_set_param; // meant to execute CTRL-IDs/CIDs + ti_sd->sd->get_param = ti964_get_param; // meant to execute CTRLIDs/CIDs + ti_sd->sd->get_menu_item = ti964_get_menu_item; // get LINK FREQ + if (va->reg.setup_node) { + rval = va->reg.setup_node(va->reg.ipu_data, + ti_sd->sd, name); + if (rval) + return rval; + } else + pr_err("node not registered\n"); + +// below invoked by stream code + ti_sd->sd->node.node_set_power = ti964_set_power; + ti_sd->sd->node.node_set_streaming = ti964_set_stream; +// below invoked by pipeline-device code + ti_sd->sd->node.node_get_pad_supported_format = + ti964_enum_mbus_code; //needs modification + ti_sd->sd->node.node_set_pad_ffmt = ti964_set_format; + ti_sd->sd->node.node_get_pad_ffmt = ti964_get_format; + ti_sd->sd->node.node_set_pad_sel = ti964_set_selection; + ti_sd->sd->node.node_get_pad_sel = ti964_get_selection; + return 0; +} + +static int ti964_register_subdev(struct i2c_client *client, struct ti964 *va) +{ + int rval = 0; + int i = 0; + + va->sd.client = client; + va->sd.do_register = ti964_registered; + va->sd.do_unregister = ti964_unregistered; + i2c_set_clientdata(client, &va->sd); + for (i = 0; i < NR_OF_TI964_SINK_PADS; i++) { + va->ffmts[i]->ffmt.width = 1280; + va->ffmts[i]->ffmt.height = 720; + va->ffmts[i]->ffmt.pixelformat = ICI_FORMAT_UYVY; + } + return rval; +} + +static int ti964_init(struct ti964 *va) +{ + unsigned int reset_gpio = va->pdata->reset_gpio; + int i, rval; + unsigned int val; + + gpio_set_value(reset_gpio, 1); + usleep_range(2000, 3000); + pr_debug("Setting reset gpio %d to 1.\n", reset_gpio); + + rval = regmap_read(va->regmap8, TI964_DEVID, &val); + if (rval) { + pr_err("Failed to read device ID of TI964!\n"); + return rval; + } + pr_info("TI964 device ID: 0x%X\n", val); + + for (i = 0; i < ARRAY_SIZE(ti964_init_settings); i++) { + rval = regmap_write(va->regmap8, + ti964_init_settings[i].reg, + ti964_init_settings[i].val); + if (rval) + return rval; + } + + rval = ti964_map_subdevs_addr(va); + if (rval) + return rval; + + for (i = 0; i < NR_OF_TI964_SINK_PADS; i++) { + rval = ti964_fsin_gpio_init(va, va->sub_devs[i].rx_port, + va->sub_devs[i].fsin_gpio); + if (rval) + return rval; + } + + return 0; +} + +static void ti964_gpio_set(struct gpio_chip *chip, unsigned gpio, int value) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + struct i2c_client *client = to_i2c_client(chip->dev); +#else + struct i2c_client *client = to_i2c_client(chip->parent); +#endif + struct ici_ext_subdev *subdev = i2c_get_clientdata(client); + struct ti964 *va = to_ti964(subdev); + unsigned int reg_val; + int rx_port, gpio_port; + int ret; + + if (gpio >= NR_OF_TI964_GPIOS) + return; + + rx_port = gpio / NR_OF_GPIOS_PER_PORT; + gpio_port = gpio % NR_OF_GPIOS_PER_PORT; + + ret = regmap_write(va->regmap8, TI964_RX_PORT_SEL, + (rx_port << 4) + (1 << rx_port)); + if (ret) { + pr_debug("Failed to select RX port.\n"); + return; + } + ret = regmap_read(va->regmap8, TI964_BC_GPIO_CTL0, ®_val); + if (ret) { + pr_debug("Failed to read gpio status.\n"); + return; + } + + if (gpio_port == 0) { + reg_val &= ~TI964_GPIO0_MASK; + reg_val |= value ? TI964_GPIO0_HIGH : TI964_GPIO0_LOW; + } else { + reg_val &= ~TI964_GPIO1_MASK; + reg_val |= value ? TI964_GPIO1_HIGH : TI964_GPIO1_LOW; + } + + ret = regmap_write(va->regmap8, TI964_BC_GPIO_CTL0, reg_val); + if (ret) + pr_debug("Failed to set gpio.\n"); +} + +static int ti964_gpio_direction_output(struct gpio_chip *chip, + unsigned gpio, int level) +{ + return 0; +} + +static int ti964_probe(struct i2c_client *client, + const struct i2c_device_id *devid) +{ + struct ti964 *va; + int i, rval = 0; + + if (client->dev.platform_data == NULL) + return -ENODEV; + + va = devm_kzalloc(&client->dev, sizeof(*va), GFP_KERNEL); + if (!va) + return -ENOMEM; + + va->pdata = client->dev.platform_data; + + va->nsources = NR_OF_TI964_SOURCE_PADS; + va->nsinks = NR_OF_TI964_SINK_PADS; + va->npads = NR_OF_TI964_PADS; + va->nstreams = NR_OF_TI964_STREAMS; + + va->crop = devm_kcalloc(&client->dev, va->npads, + sizeof(struct ici_rect), GFP_KERNEL); + + va->compose = devm_kcalloc(&client->dev, va->npads, + sizeof(struct ici_rect), GFP_KERNEL); + + va->route = devm_kcalloc(&client->dev, va->nstreams, + sizeof(*va->route), GFP_KERNEL); + + va->stream = devm_kcalloc(&client->dev, va->npads, + sizeof(*va->stream), GFP_KERNEL); + + if (!va->crop || !va->compose || !va->route || !va->stream) + return -ENOMEM; + + for (i = 0; i < va->npads; i++) { + va->ffmts[i] = devm_kcalloc(&client->dev, va->nstreams, + sizeof(struct ici_pad_framefmt), + GFP_KERNEL); + if (!va->ffmts[i]) + return -ENOMEM; + + va->stream[i].stream_id = + devm_kcalloc(&client->dev, va->nsinks, + sizeof(*va->stream[i].stream_id), GFP_KERNEL); + if (!va->stream[i].stream_id) + return -ENOMEM; + } + + for (i = 0; i < va->nstreams; i++) { + va->route[i].sink = i; + va->route[i].source = TI964_PAD_SOURCE; + va->route[i].flags = 0; + } + + for (i = 0; i < va->nsinks; i++) { + va->stream[i].stream_id[0] = i; + va->stream[TI964_PAD_SOURCE].stream_id[i] = i; + } + + va->regmap8 = devm_regmap_init_i2c(client, + &ti964_reg_config8); + if (IS_ERR(va->regmap8)) { + pr_err("Failed to init regmap8!\n"); + return -EIO; + } + + va->regmap16 = devm_regmap_init_i2c(client, + &ti964_reg_config16); + if (IS_ERR(va->regmap16)) { + pr_err("Failed to init regmap16!\n"); + return -EIO; + } + + mutex_init(&va->mutex); + rval = ti964_register_subdev(client, va); + if (rval) { + pr_err("Failed to register va subdevice!\n"); + return rval; + } + + if (devm_gpio_request_one(&client->dev, va->pdata->reset_gpio, 0, + "ti964 reset") != 0) { + pr_err("Unable to acquire gpio %d\n", + va->pdata->reset_gpio); + return -ENODEV; + } + + rval = ti964_init(va); + if (rval) { + pr_err("Failed to init TI964!\n"); + return rval; + } + + /* + * TI964 has several back channel GPIOs. + * We export GPIO0 and GPIO1 to control reset or fsin. + */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + va->gc.dev = &client->dev; +#else + va->gc.parent = &client->dev; +#endif + va->gc.owner = THIS_MODULE; + va->gc.label = "TI964 GPIO"; + va->gc.ngpio = NR_OF_TI964_GPIOS; + va->gc.base = -1; + va->gc.set = ti964_gpio_set; + va->gc.direction_output = ti964_gpio_direction_output; + rval = gpiochip_add(&va->gc); + if (rval) { + pr_err("Failed to add gpio chip!\n"); + return -EIO; + } + + return 0; +} + +static int ti964_remove(struct i2c_client *client) +{ + struct ti964 *va; + struct i2c_client *sub_client; + int i; + struct ici_ext_subdev *subdev = i2c_get_clientdata(client);; + + if (!subdev) + return -EINVAL; + + va = to_ti964(subdev); + + if (!va) + return 0; + + mutex_destroy(&va->mutex); + + for (i = 0; i < NR_OF_TI964_SINK_PADS; i++) { + + if (va->sub_devs[i].sd) { + sub_client = va->sub_devs[i].sd->client; + i2c_unregister_device(sub_client); + } + va->sub_devs[i].sd = NULL; + } + + gpiochip_remove(&va->gc); + + return 0; +} + +#ifdef CONFIG_PM +static int ti964_suspend(struct device *dev) +{ + return 0; +} + +static int ti964_resume(struct device *dev) +{ + struct ti964 *va; + struct i2c_client *client = to_i2c_client(dev); + struct ici_ext_subdev *subdev = i2c_get_clientdata(client);; + + if (!subdev) + return -EINVAL; + + va = to_ti964(subdev); + + if (!va) + return -EINVAL; + + return ti964_init(va); +} +#else +#define ti964_suspend NULL +#define ti964_resume NULL +#endif /* CONFIG_PM */ + +static const struct i2c_device_id ti964_id_table[] = { + { TI964_NAME, 0 }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, ti964_id_table); + +static const struct dev_pm_ops ti964_pm_ops = { + .suspend = ti964_suspend, + .resume = ti964_resume, +}; + +static struct i2c_driver ti964_i2c_driver = { + .driver = { + .name = TI964_NAME, + .pm = &ti964_pm_ops, + }, + .probe = ti964_probe, + .remove = ti964_remove, + .id_table = ti964_id_table, +}; +module_i2c_driver(ti964_i2c_driver); + +MODULE_AUTHOR("Karthik Gopalakrishnan "); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("TI964 CSI2-Aggregator driver for RTOS reference"); + diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c index f8c70f1a34feb..8cc3bdb7f608c 100644 --- a/drivers/media/i2c/imx274.c +++ b/drivers/media/i2c/imx274.c @@ -636,16 +636,19 @@ static int imx274_write_table(struct stimx274 *priv, const struct reg_8 table[]) static inline int imx274_read_reg(struct stimx274 *priv, u16 addr, u8 *val) { + unsigned int uint_val; int err; - err = regmap_read(priv->regmap, addr, (unsigned int *)val); + err = regmap_read(priv->regmap, addr, &uint_val); if (err) dev_err(&priv->client->dev, "%s : i2c read failed, addr = %x\n", __func__, addr); else dev_dbg(&priv->client->dev, "%s : addr 0x%x, val=0x%x\n", __func__, - addr, *val); + addr, uint_val); + + *val = uint_val; return err; } diff --git a/drivers/media/i2c/max9286-reg-settings.h b/drivers/media/i2c/max9286-reg-settings.h new file mode 100644 index 0000000000000..f03e6fc1260ab --- /dev/null +++ b/drivers/media/i2c/max9286-reg-settings.h @@ -0,0 +1,113 @@ +/* SPDX-LIcense_Identifier: GPL-2.0 */ +/* Copyright (C) 2018 Intel Corporation */ + +#ifndef MAX9286_REG_H +#define MAX9286_REG_H + +#include + +#define DS_ADDR_MAX9286 0x48 +#define S_ADDR_MAX96705 0x40 +#define S_ADDR_MAX96705_BROADCAST (S_ADDR_MAX96705 + NR_OF_MAX_STREAMS + 1) + +#define ADDR_AR0231AT_SENSOR 0x10 + +/* Deserializer: MAX9286 registers */ +#define DS_LINK_ENABLE 0x00 +#define DS_FSYNCMODE 0x01 +#define DS_FSYNC_PERIOD_LOW 0x06 +#define DS_FSYNC_PERIOD_MIDDLE 0x07 +#define DS_FSYNC_PERIOD_HIGH 0x08 +#define DS_FWDCCEN_REVCCEN 0x0A +#define DS_LINK_OUTORD 0x0B +#define DS_HS_VS 0x0C +#define DS_CSI_DBL_DT 0x12 +#define DS_CSI_VC_CTL 0x15 +#define DS_ENEQ 0x1B +#define DS_HIGHIMM 0x1C +#define DS_MAX9286_DEVID 0x1E +#define DS_FSYNC_LOCKED 0x31 +#define DS_I2CLOCACK 0x34 +#define DS_FPL_RT 0x3B +#define DS_ENCRC_FPL 0x3F +#define DS_CONFIGL_VIDEOL_DET 0x49 +#define DS_OVERLAP_WIN_LOW 0x63 +#define DS_OVERLAP_WIN_HIGH 0x64 +#define DS_ATUO_MASK_LINK 0x69 + +/* Serializer: MAX96705 registers */ +#define S_SERADDR 0x00 +#define S_MAIN_CTL 0x04 +#define S_CMLLVL_PREEMP 0x06 +#define S_CONFIG 0x07 +#define S_RSVD_8 0x08 +#define S_I2C_SOURCE_IS 0x09 +#define S_I2C_DST_IS 0x0A +#define S_I2C_SOURCE_SER 0x0B +#define S_I2C_DST_SER 0x0C +#define S_INPUT_STATUS 0x15 +#define S_SYNC_GEN_CONFIG 0x43 +#define S_VS_DLY_2 0x44 +#define S_VS_DLY_1 0x45 +#define S_VS_H_2 0x47 +#define S_VS_H_1 0x48 +#define S_VS_H_0 0x49 +#define S_DBL_ALIGN_TO 0x67 +#define S_RSVD_97 0x97 + +struct max9286_register_write { + u8 reg; + u8 val; +}; + +static const struct max9286_register_write max9286_byte_order_settings_12bit[] = { + {0x20, 0x0B}, + {0x21, 0x0A}, + {0x22, 0x09}, + {0x23, 0x08}, + {0x24, 0x07}, + {0x25, 0x06}, + {0x26, 0x05}, + {0x27, 0x04}, + {0x28, 0x03}, + {0x29, 0x02}, + {0x2A, 0x01}, + {0x2B, 0x00}, + {0x30, 0x1B}, + {0x31, 0x1A}, + {0x32, 0x19}, + {0x33, 0x18}, + {0x34, 0x17}, + {0x35, 0x16}, + {0x36, 0x15}, + {0x37, 0x14}, + {0x38, 0x13}, + {0x39, 0x12}, + {0x3A, 0x11}, + {0x3B, 0x10}, +}; + +static const struct max9286_register_write max9286_byte_order_settings_10bit[] = { + {0x20, 0x09}, + {0x21, 0x08}, + {0x22, 0x07}, + {0x23, 0x06}, + {0x24, 0x05}, + {0x25, 0x04}, + {0x26, 0x03}, + {0x27, 0x02}, + {0x28, 0x01}, + {0x29, 0x00}, + {0x30, 0x19}, + {0x31, 0x18}, + {0x32, 0x17}, + {0x33, 0x16}, + {0x34, 0x15}, + {0x35, 0x14}, + {0x36, 0x13}, + {0x37, 0x12}, + {0x38, 0x11}, + {0x39, 0x10}, +}; + +#endif diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c new file mode 100644 index 0000000000000..2a973ff6529bc --- /dev/null +++ b/drivers/media/i2c/max9286.c @@ -0,0 +1,1254 @@ +/* SPDX-LIcense_Identifier: GPL-2.0 */ +/* Copyright (C) 2018 Intel Corporation */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "max9286-reg-settings.h" + +struct max9286 { + struct v4l2_subdev v4l2_sd; + struct max9286_pdata *pdata; + struct media_pad pad[NR_OF_MAX_PADS]; + unsigned char sensor_present; + unsigned int total_sensor_num; + unsigned int nsources; + unsigned int nsinks; + unsigned int npads; + unsigned int nstreams; + const char *name; + struct v4l2_ctrl_handler ctrl_handler; + struct v4l2_subdev *sub_devs[NR_OF_MAX_SINK_PADS]; + struct v4l2_mbus_framefmt *ffmts[NR_OF_MAX_PADS]; + struct rect *crop; + struct rect *compose; + struct { + unsigned int *stream_id; + } *stream; /* stream enable/disable status, indexed by pad */ + struct { + unsigned int sink; + unsigned int source; + int flags; + } *route; /* pad level info, indexed by stream */ + + struct regmap *regmap8; + struct mutex max_mutex; + struct v4l2_ctrl *link_freq; + struct v4l2_ctrl *test_pattern; +}; + +#define to_max_9286(_sd) container_of(_sd, struct max9286, v4l2_sd) + +/* + * Order matters. + * + * 1. Bits-per-pixel, descending. + * 2. Bits-per-pixel compressed, descending. + * 3. Pixel order, same as in pixel_order_str. Formats for all four pixel + * orders must be defined. + */ +static const struct max9286_csi_data_format max_csi_data_formats[] = { + { MEDIA_BUS_FMT_YUYV8_1X16, 16, 16, PIXEL_ORDER_GBRG, 0x1e }, + { MEDIA_BUS_FMT_UYVY8_1X16, 16, 16, PIXEL_ORDER_GBRG, 0x1e }, + { MEDIA_BUS_FMT_SGRBG12_1X12, 12, 12, PIXEL_ORDER_GRBG, 0x2c }, + { MEDIA_BUS_FMT_SRGGB12_1X12, 12, 12, PIXEL_ORDER_RGGB, 0x2c }, + { MEDIA_BUS_FMT_SBGGR12_1X12, 12, 12, PIXEL_ORDER_BGGR, 0x2c }, + { MEDIA_BUS_FMT_SGBRG12_1X12, 12, 12, PIXEL_ORDER_GBRG, 0x2c }, + { MEDIA_BUS_FMT_SGRBG10_1X10, 10, 10, PIXEL_ORDER_GRBG, 0x2b }, + { MEDIA_BUS_FMT_SRGGB10_1X10, 10, 10, PIXEL_ORDER_RGGB, 0x2b }, + { MEDIA_BUS_FMT_SBGGR10_1X10, 10, 10, PIXEL_ORDER_BGGR, 0x2b }, + { MEDIA_BUS_FMT_SGBRG10_1X10, 10, 10, PIXEL_ORDER_GBRG, 0x2b }, + { MEDIA_BUS_FMT_SGRBG8_1X8, 8, 8, PIXEL_ORDER_GRBG, 0x2a }, + { MEDIA_BUS_FMT_SRGGB8_1X8, 8, 8, PIXEL_ORDER_RGGB, 0x2a }, + { MEDIA_BUS_FMT_SBGGR8_1X8, 8, 8, PIXEL_ORDER_BGGR, 0x2a }, + { MEDIA_BUS_FMT_SGBRG8_1X8, 8, 8, PIXEL_ORDER_GBRG, 0x2a }, +}; + +static const uint32_t max9286_supported_codes_pad[] = { + MEDIA_BUS_FMT_YUYV8_1X16, + MEDIA_BUS_FMT_UYVY8_1X16, + MEDIA_BUS_FMT_SGRBG12_1X12, + MEDIA_BUS_FMT_SRGGB12_1X12, + MEDIA_BUS_FMT_SBGGR12_1X12, + MEDIA_BUS_FMT_SGBRG12_1X12, + MEDIA_BUS_FMT_SGRBG10_1X10, + MEDIA_BUS_FMT_SRGGB10_1X10, + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SGBRG10_1X10, + MEDIA_BUS_FMT_SGRBG8_1X8, + MEDIA_BUS_FMT_SRGGB8_1X8, + MEDIA_BUS_FMT_SBGGR8_1X8, + MEDIA_BUS_FMT_SGBRG8_1X8, + 0, +}; + +static const uint32_t *max9286_supported_codes[] = { + max9286_supported_codes_pad, +}; + +static struct regmap_config max9286_reg_config8 = { + .reg_bits = 8, + .val_bits = 8, +}; + +/* Serializer register write */ +static int max96705_write_register(struct max9286 *max, + unsigned int offset, u8 reg, u8 val) +{ + int ret; + int retry, timeout = 10; + struct i2c_client *client = v4l2_get_subdevdata(&max->v4l2_sd); + + client->addr = S_ADDR_MAX96705 + offset; + for (retry = 0; retry < timeout; retry++) { + ret = i2c_smbus_write_byte_data(client, reg, val); + if (val < 0) + usleep_range(5000, 6000); + else + break; + } + + client->addr = DS_ADDR_MAX9286; + if (retry >= timeout) { + dev_err(max->v4l2_sd.dev, + "%s:write reg failed: reg=%2x\n", __func__, reg); + return -EREMOTEIO; + } + + return 0; +} + +/* Serializer register read */ +static int +max96705_read_register(struct max9286 *max, unsigned int i, u8 reg) +{ + int val; + int retry, timeout = 10; + struct i2c_client *client = v4l2_get_subdevdata(&max->v4l2_sd); + + client->addr = S_ADDR_MAX96705 + i; + for (retry = 0; retry < timeout; retry++) { + val = i2c_smbus_read_byte_data(client, reg); + if (val >= 0) + break; + usleep_range(5000, 6000); + } + + client->addr = DS_ADDR_MAX9286; + if (retry >= timeout) { + dev_err(max->v4l2_sd.dev, + "%s:read reg failed: reg=%2x\n", __func__, reg); + return -EREMOTEIO; + } + + return val; +} + +/* Validate csi_data_format */ +static const struct max9286_csi_data_format * +max9286_validate_csi_data_format(u32 code) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(max_csi_data_formats); i++) { + if (max_csi_data_formats[i].code == code) + return &max_csi_data_formats[i]; + } + + return &max_csi_data_formats[0]; +} + +/* Initialize image sensors and set stream on registers */ +static int max9286_set_stream(struct v4l2_subdev *subdev, int enable) +{ + struct max9286 *max = to_max_9286(subdev); + struct media_pad *remote_pad; + struct v4l2_subdev *sd; + int i, rval, j; + unsigned int val; + u8 slval = 0xE0; + u8 dtval = 0xF7; + const struct max9286_register_write *max9286_byte_order_settings; + + dev_dbg(max->v4l2_sd.dev, "MAX9286 set stream. enable = %d\n", enable); + + /* Disable I2C ACK */ + rval = regmap_write(max->regmap8, DS_I2CLOCACK, 0xB6); + if (rval) { + dev_err(max->v4l2_sd.dev, "Failed to disable I2C ACK!\n"); + return rval; + } + + for (i = 0; i < NR_OF_MAX_SINK_PADS; i++) { + if (((0x01 << (i)) & max->sensor_present) == 0) + continue; + + /* Find the pad at the remote end of the link */ + remote_pad = media_entity_remote_pad(&max->pad[i]); + + if (!remote_pad) + continue; + + if (enable) { + /* + * Enable CSI-2 lanes D0, D1, D2, D3 + * Enable CSI-2 DBL (Double Input Mode) + * Enable GMSL DBL for RAWx2 + * Enable RAW10/RAW12 data type + */ + u8 bpp = max9286_validate_csi_data_format( + max->ffmts[i][0].code)->width; + + if (bpp == 10) { + dtval = 0xF6; + max9286_byte_order_settings = + &max9286_byte_order_settings_10bit[0]; + } else if (bpp == 12) { + dtval = 0xF7; + max9286_byte_order_settings = + &max9286_byte_order_settings_12bit[0]; + } else { + dev_err(max->v4l2_sd.dev, + "Only support RAW10/12, current bpp is %d!\n", bpp); + return -EINVAL; + } + + rval = regmap_write(max->regmap8, DS_CSI_DBL_DT, dtval); + if (rval) { + dev_err(max->v4l2_sd.dev, "Failed to set data type!\n"); + return rval; + } + + for (j = 0; j < bpp * 2; j++) { + rval = max96705_write_register(max, + S_ADDR_MAX96705_BROADCAST - S_ADDR_MAX96705, + (max9286_byte_order_settings + j)->reg, + (max9286_byte_order_settings + j)->val); + if (rval) { + dev_err(max->v4l2_sd.dev, + "Failed to set max9286 byte order\n"); + return rval; + } + } + usleep_range(2000, 3000); + } + + /* Enable link */ + slval |= (0x0F & (1 << i)); + rval = regmap_write(max->regmap8, DS_LINK_ENABLE, slval); + if (rval) { + dev_err(max->v4l2_sd.dev, + "Failed to enable GMSL links!\n"); + return rval; + } + + rval = regmap_write(max->regmap8, DS_ATUO_MASK_LINK, 0x30); + if (rval) { + dev_err(max->v4l2_sd.dev, "Failed to write 0x69\n"); + return rval; + } + /* Calls sensor set stream */ + sd = media_entity_to_v4l2_subdev(remote_pad->entity); + rval = v4l2_subdev_call(sd, video, s_stream, enable); + if (rval) { + dev_err(max->v4l2_sd.dev, + "Failed to set stream for %s. enable = %d\n", + sd->name, enable); + return rval; + } + } + + /* Enable I2C ACK */ + rval = regmap_write(max->regmap8, DS_I2CLOCACK, 0x36); + if (rval) { + dev_err(max->v4l2_sd.dev, "Failed to enable I2C ACK!\n"); + return rval; + } + + /* Check if valid PCLK is available for the links */ + for (i = 1; i <= NR_OF_MAX_SINK_PADS; i++) { + if (((0x01 << (i - 1)) & max->sensor_present) == 0) + continue; + + val = max96705_read_register(max, i, S_INPUT_STATUS); + if ((val != -EREMOTEIO) && (val & 0x01)) + dev_info(max->v4l2_sd.dev, + "Valid PCLK detected for link %d\n", i); + else if (val != -EREMOTEIO) + dev_info(max->v4l2_sd.dev, + "Failed to read PCLK reg for link %d\n", i); + } + + /* Set preemphasis settings for all serializers (set to 3.3dB)*/ + max96705_write_register(max, S_ADDR_MAX96705_BROADCAST - + S_ADDR_MAX96705, S_CMLLVL_PREEMP, 0xAA); + usleep_range(5000, 6000); + + /* Set VSYNC Delay */ + max96705_write_register(max, S_ADDR_MAX96705_BROADCAST - + S_ADDR_MAX96705, S_SYNC_GEN_CONFIG, 0x21); + usleep_range(5000, 6000); + + max96705_write_register(max, S_ADDR_MAX96705_BROADCAST - + S_ADDR_MAX96705, S_VS_DLY_2, 0x06); + usleep_range(5000, 6000); + + max96705_write_register(max, S_ADDR_MAX96705_BROADCAST - + S_ADDR_MAX96705, S_VS_DLY_1, 0xD8); + usleep_range(5000, 6000); + + max96705_write_register(max, S_ADDR_MAX96705_BROADCAST - + S_ADDR_MAX96705, S_VS_H_2, 0x26); + usleep_range(5000, 6000); + + max96705_write_register(max, S_ADDR_MAX96705_BROADCAST - + S_ADDR_MAX96705, S_VS_H_1, 0x00); + usleep_range(5000, 6000); + + max96705_write_register(max, S_ADDR_MAX96705_BROADCAST - + S_ADDR_MAX96705, S_VS_H_0, 0x00); + usleep_range(5000, 6000); + + max96705_write_register(max, S_ADDR_MAX96705_BROADCAST - + S_ADDR_MAX96705, S_DBL_ALIGN_TO, 0xC4); + usleep_range(5000, 6000); + + /* Enable link equalizers */ + rval = regmap_write(max->regmap8, DS_ENEQ, 0x0F); + if (rval) { + dev_err(max->v4l2_sd.dev, + "Failed to automatically detect serial data rate!\n"); + return rval; + } + usleep_range(5000, 6000); + rval = regmap_write(max->regmap8, DS_HS_VS, 0x91); + + /* Enable serial links and desable configuration */ + max96705_write_register(max, S_ADDR_MAX96705_BROADCAST - + S_ADDR_MAX96705, S_MAIN_CTL, 0x83); + /* Wait for more than 2 Frames time from each sensor */ + usleep_range(100000, 101000); + + /* + * Poll frame synchronization bit of deserializer + * All the cameras should work in SYNC mode + * MAX9286 sends a pulse to each camera, then each camera sends out + * one frame. The VSYNC for each camera should appear in almost same + * time for the deserializer to lock FSYNC + */ + rval = regmap_read(max->regmap8, DS_FSYNC_LOCKED, &val); + if (rval) { + dev_info(max->v4l2_sd.dev, "Frame SYNC not locked!\n"); + return rval; + } else if (val & (0x01 << 6)) + dev_info(max->v4l2_sd.dev, "Deserializer Frame SYNC locked\n"); + + /* + * Enable/set bit[7] of DS_CSI_VC_CTL register for VC operation + * Set VC according to the link number + * Enable CSI-2 output + */ + if (!enable) { + rval = regmap_write(max->regmap8, DS_CSI_VC_CTL, 0x93); + if (rval) { + dev_err(max->v4l2_sd.dev, + "Failed to disable CSI output!\n"); + return rval; + } + } else { + rval = regmap_write(max->regmap8, DS_CSI_VC_CTL, 0x9B); + if (rval) { + dev_err(max->v4l2_sd.dev, + "Failed to enable CSI output!\n"); + return rval; + } + } + + return 0; +} + +/* Get the media bus format */ +static struct v4l2_mbus_framefmt * +__max9286_get_ffmt(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad, unsigned int which, + unsigned int stream) +{ + struct max9286 *max = to_max_9286(subdev); + + if (which == V4L2_SUBDEV_FORMAT_TRY) + return v4l2_subdev_get_try_format(subdev, cfg, pad); + else + return &max->ffmts[pad][stream]; +} + +/* callback for VIDIOC_SUBDEV_G_FMT ioctl handler code */ +static int max9286_get_format(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct max9286 *max = to_max_9286(subdev); + + if (fmt->stream > max->nstreams) + return -EINVAL; + + mutex_lock(&max->max_mutex); + fmt->format = *__max9286_get_ffmt(subdev, cfg, fmt->pad, fmt->which, + fmt->stream); + mutex_unlock(&max->max_mutex); + + dev_dbg(subdev->dev, "subdev_format: which: %s, pad: %d, stream: %d.\n", + fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE ? + "V4L2_SUBDEV_FORMAT_ACTIVE" : "V4L2_SUBDEV_FORMAT_TRY", + fmt->pad, fmt->stream); + + dev_dbg(subdev->dev, "framefmt: width: %d, height: %d, code: 0x%x.\n", + fmt->format.width, fmt->format.height, fmt->format.code); + + return 0; +} + +/* callback for VIDIOC_SUBDEV_S_FMT ioctl handler code */ +static int max9286_set_format(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct max9286 *max = to_max_9286(subdev); + const struct max9286_csi_data_format *csi_format; + struct v4l2_mbus_framefmt *ffmt; + + if (fmt->stream > max->nstreams) + return -EINVAL; + + csi_format = max9286_validate_csi_data_format(fmt->format.code); + + mutex_lock(&max->max_mutex); + ffmt = __max9286_get_ffmt(subdev, cfg, fmt->pad, fmt->which, + fmt->stream); + if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) { + ffmt->width = fmt->format.width; + ffmt->height = fmt->format.height; + ffmt->code = csi_format->code; + } + + fmt->format = *ffmt; + mutex_unlock(&max->max_mutex); + + dev_dbg(subdev->dev, "framefmt: width: %d, height: %d, code: 0x%x.\n", + ffmt->width, ffmt->height, ffmt->code); + + return 0; +} + +/* get the current low level media bus frame parameters */ +static int max9286_get_frame_desc(struct v4l2_subdev *sd, + unsigned int pad, struct v4l2_mbus_frame_desc *desc) +{ + struct max9286 *max = to_max_9286(sd); + struct v4l2_mbus_frame_desc_entry *entry = desc->entry; + u8 vc = 0; + int i; + + desc->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2; + + for (i = 0; i < min_t(int, max->nstreams, desc->num_entries); i++) { + struct v4l2_mbus_framefmt *ffmt = + &max->ffmts[i][MAX_PAD_SOURCE]; + const struct max9286_csi_data_format *csi_format = + max9286_validate_csi_data_format(ffmt->code); + + entry->two_dim.width = ffmt->width; + entry->two_dim.height = ffmt->height; + entry->pixelcode = ffmt->code; + entry->bus.csi2.channel = vc++; + entry->bpp = csi_format->compressed; + entry++; + } + + return 0; +} + +/* Enumerate media bus formats available at a given sub-device pad */ +static int max9286_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct max9286 *max = to_max_9286(sd); + const uint32_t *supported_code = max9286_supported_codes[code->pad]; + bool next_stream = false; + int i; + + if (code->stream & V4L2_SUBDEV_FLAG_NEXT_STREAM) { + next_stream = true; + code->stream &= ~V4L2_SUBDEV_FLAG_NEXT_STREAM; + } + + if (code->stream > max->nstreams) + return -EINVAL; + + if (next_stream) { + if (!(max->pad[code->pad].flags & MEDIA_PAD_FL_MULTIPLEX)) + return -EINVAL; + + if (code->stream < max->nstreams - 1) { + code->stream++; + return 0; + } else + return -EINVAL; + } + + for (i = 0; supported_code[i]; i++) { + if (i == code->index) { + code->code = supported_code[i]; + return 0; + } + } + + return -EINVAL; +} + +/* Configure Media Controller routing */ +static int max9286_set_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_routing *route) +{ + struct max9286 *max = to_max_9286(sd); + int i, j, ret = 0; + + for (i = 0; i < min(route->num_routes, max->nstreams); ++i) { + struct v4l2_subdev_route *t = &route->routes[i]; + unsigned int sink = t->sink_pad; + unsigned int source = t->source_pad; + + if (t->sink_stream > max->nstreams - 1 || + t->source_stream > max->nstreams - 1) + continue; + + if (t->source_pad != MAX_PAD_SOURCE) + continue; + + for (j = 0; j < max->nstreams; j++) { + if (sink == max->route[j].sink && + source == max->route[j].source) + break; + } + + if (j == max->nstreams) + continue; + + max->stream[sink].stream_id[0] = t->sink_stream; + max->stream[source].stream_id[sink] = t->source_stream; + + if (t->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE) + max->route[j].flags |= V4L2_SUBDEV_ROUTE_FL_ACTIVE; + else if (!(t->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) + max->route[j].flags &= (~V4L2_SUBDEV_ROUTE_FL_ACTIVE); + } + + return ret; +} + +/* Configure Media Controller routing */ +static int max9286_get_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_routing *route) +{ + struct max9286 *max = to_max_9286(sd); + int i; + + for (i = 0; i < min(max->nstreams, route->num_routes); ++i) { + unsigned int sink = max->route[i].sink; + unsigned int source = max->route[i].source; + + route->routes[i].sink_pad = sink; + route->routes[i].sink_stream = + max->stream[sink].stream_id[0]; + route->routes[i].source_pad = source; + route->routes[i].source_stream = + max->stream[source].stream_id[sink]; + route->routes[i].flags = max->route[i].flags; + } + + route->num_routes = i; + + return 0; +} + +/* called when the subdev device node is opened by an application */ +static int max9286_open(struct v4l2_subdev *subdev, + struct v4l2_subdev_fh *fh) +{ + struct v4l2_mbus_framefmt *try_fmt = + v4l2_subdev_get_try_format(subdev, fh->pad, 0); + + struct v4l2_subdev_format fmt = { + .which = V4L2_SUBDEV_FORMAT_TRY, + .pad = MAX_PAD_SOURCE, + .format = { + .width = MAX9286_MAX_WIDTH, + .height = MAX9286_MAX_HEIGHT, + + .code = MEDIA_BUS_FMT_SGRBG12_1X12, + }, + .stream = 0, + }; + + *try_fmt = fmt.format; + + return 0; +} + +/* + * called when this subdev is registered. When called the v4l2_dev field is + * set to the correct v4l2_device. + */ +static int max9286_registered(struct v4l2_subdev *subdev) +{ + struct max9286 *max = to_max_9286(subdev); + struct i2c_client *client = v4l2_get_subdevdata(subdev); + int i, j, k, l, rval, num, nsinks; + + num = max->pdata->subdev_num; + nsinks = max->nsinks; + for (i = 0, k = 0; (i < num) && (k < nsinks); i++, k++) { + struct max9286_subdev_i2c_info *info = + &max->pdata->subdev_info[i]; + struct crlmodule_platform_data *pdata = + (struct crlmodule_platform_data *) + info->board_info.platform_data; + + /* Spin the sensor subdev name suffix */ + pdata->suffix = info->suffix; + + /* aggre and subdves share the same i2c bus */ + max->sub_devs[k] = v4l2_i2c_new_subdev_board( + max->v4l2_sd.v4l2_dev, client->adapter, + &info->board_info, 0); + if (!max->sub_devs[k]) { + dev_err(max->v4l2_sd.dev, + "can't create new i2c subdev %d-%04x\n", + info->i2c_adapter_id, + info->board_info.addr); + continue; + } + + for (j = 0; j < max->sub_devs[k]->entity.num_pads; j++) { + if (max->sub_devs[k]->entity.pads[j].flags & + MEDIA_PAD_FL_SOURCE) + break; + } + + if (j == max->sub_devs[k]->entity.num_pads) { + dev_warn(max->v4l2_sd.dev, + "no source pad in subdev %d-%04x\n", + info->i2c_adapter_id, + info->board_info.addr); + return -ENOENT; + } + + for (l = 0; l < max->nsinks; l++) { + rval = media_create_pad_link( + &max->sub_devs[k]->entity, j, + &max->v4l2_sd.entity, l, 0); + if (rval) { + dev_err(max->v4l2_sd.dev, + "can't create link to %d-%04x\n", + info->i2c_adapter_id, + info->board_info.addr); + return -EINVAL; + } + } + } + + return 0; +} + +static int max9286_set_power(struct v4l2_subdev *subdev, int on) +{ + return 0; +} + +static const struct v4l2_subdev_core_ops max9286_core_subdev_ops = { + .s_power = max9286_set_power, +}; + +static bool max9286_sd_has_route(struct media_entity *entity, + unsigned int pad0, unsigned int pad1, int *stream) +{ + struct max9286 *va = to_max_9286(media_entity_to_v4l2_subdev(entity)); + + if (stream == NULL || *stream >= va->nstreams) + return false; + + if ((va->route[*stream].flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE) && + ((va->route[*stream].source == pad0 && + va->route[*stream].sink == pad1) || + (va->route[*stream].source == pad1 && + va->route[*stream].sink == pad0))) + return true; + + return false; +} + +static const struct v4l2_subdev_video_ops max9286_sd_video_ops = { + .s_stream = max9286_set_stream, +}; + +static const struct media_entity_operations max9286_sd_entity_ops = { + .has_route = max9286_sd_has_route, +}; + +static const struct v4l2_subdev_pad_ops max9286_sd_pad_ops = { + .get_fmt = max9286_get_format, + .set_fmt = max9286_set_format, + .get_frame_desc = max9286_get_frame_desc, + .enum_mbus_code = max9286_enum_mbus_code, + .set_routing = max9286_set_routing, + .get_routing = max9286_get_routing, +}; + +static struct v4l2_subdev_ops max9286_sd_ops = { + .core = &max9286_core_subdev_ops, + .video = &max9286_sd_video_ops, + .pad = &max9286_sd_pad_ops, +}; + +static struct v4l2_subdev_internal_ops max9286_sd_internal_ops = { + .open = max9286_open, + .registered = max9286_registered, +}; + +static int max9286_s_ctrl(struct v4l2_ctrl *ctrl) +{ + return 0; +} + +static const struct v4l2_ctrl_ops max9286_ctrl_ops = { + .s_ctrl = max9286_s_ctrl, +}; + +static const s64 max9286_op_sys_clock[] = { 87750000, }; +static const struct v4l2_ctrl_config max9286_controls[] = { + { + .ops = &max9286_ctrl_ops, + .id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = V4L2_CTRL_TYPE_INTEGER_MENU, + .max = ARRAY_SIZE(max9286_op_sys_clock) - 1, + .min = 0, + .step = 0, + .def = 0, + .qmenu_int = max9286_op_sys_clock, + }, + { + .ops = &max9286_ctrl_ops, + .id = V4L2_CID_TEST_PATTERN, + .name = "V4L2_CID_TEST_PATTERN", + .type = V4L2_CTRL_TYPE_INTEGER, + .max = 1, + .min = 0, + .step = 1, + .def = 0, + }, +}; + +/* Registers MAX9286 sub-devices (Image sensors) */ +static int max9286_register_subdev(struct max9286 *max) +{ + int i, rval; + struct i2c_client *client = v4l2_get_subdevdata(&max->v4l2_sd); + + /* subdevice driver initializes v4l2 subdev */ + v4l2_subdev_init(&max->v4l2_sd, &max9286_sd_ops); + snprintf(max->v4l2_sd.name, sizeof(max->v4l2_sd.name), + "MAX9286 %c", max->pdata->suffix); + + max->v4l2_sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | + V4L2_SUBDEV_FL_HAS_SUBSTREAMS; + + max->v4l2_sd.internal_ops = &max9286_sd_internal_ops; + max->v4l2_sd.entity.ops = &max9286_sd_entity_ops; + v4l2_set_subdevdata(&max->v4l2_sd, client); + + v4l2_ctrl_handler_init(&max->ctrl_handler, + ARRAY_SIZE(max9286_controls)); + + if (max->ctrl_handler.error) { + dev_err(max->v4l2_sd.dev, + "Failed to init max9286 controls. ERR: %d!\n", + max->ctrl_handler.error); + return max->ctrl_handler.error; + } + + max->v4l2_sd.ctrl_handler = &max->ctrl_handler; + + for (i = 0; i < ARRAY_SIZE(max9286_controls); i++) { + const struct v4l2_ctrl_config *cfg = + &max9286_controls[i]; + struct v4l2_ctrl *ctrl; + + ctrl = v4l2_ctrl_new_custom(&max->ctrl_handler, cfg, NULL); + if (!ctrl) { + dev_err(max->v4l2_sd.dev, + "Failed to create ctrl %s!\n", cfg->name); + rval = max->ctrl_handler.error; + goto failed_out; + } + } + + max->link_freq = v4l2_ctrl_find(&max->ctrl_handler, V4L2_CID_LINK_FREQ); + max->test_pattern = v4l2_ctrl_find(&max->ctrl_handler, + V4L2_CID_TEST_PATTERN); + + for (i = 0; i < max->nsinks; i++) + max->pad[i].flags = MEDIA_PAD_FL_SINK; + max->pad[MAX_PAD_SOURCE].flags = + MEDIA_PAD_FL_SOURCE | MEDIA_PAD_FL_MULTIPLEX; + rval = media_entity_pads_init(&max->v4l2_sd.entity, + NR_OF_MAX_PADS, max->pad); + if (rval) { + dev_err(max->v4l2_sd.dev, + "Failed to init media entity for max9286!\n"); + goto failed_out; + } + + return 0; +failed_out: + media_entity_cleanup(&max->v4l2_sd.entity); + v4l2_ctrl_handler_free(&max->ctrl_handler); + return rval; +} + +/* + * Get the output link order + * By default: + * bits[7:6] 11: Link 3 is 4th in the CSI-2 output order + * bits[5:4] 10: Link 2 is 3rd in the CSI-2 output order + * bits[3:2] 01: Link 1 is 2nd in the CSI-2 output order + * bits[1:0] 00: Link 0 is 1st in the CSI-2 output order + */ +u8 get_output_link_order(struct max9286 *max) +{ + u8 val = 0xE4, i; + u8 order_config[14][3] = { + {1, 8, 0x27}, + {1, 4, 0xC6}, + {1, 2, 0xE1}, + {1, 1, 0xE4}, + {2, 0xC, 0x4E}, + {2, 0xA, 0x72}, + {2, 0x9, 0x78}, + {2, 0x6, 0xD2}, + {2, 0x5, 0xD8}, + {2, 0x3, 0xE4}, + {3, 0xE, 0x93}, + {3, 0xD, 0x9C}, + {3, 0xB, 0xB4}, + {3, 0x7, 0xE4}, + }; + + if (max->total_sensor_num < 4) { + for (i = 0; i < 14; i++) { + if ((max->total_sensor_num == order_config[i][0]) + && (max->sensor_present == order_config[i][1])) + return order_config[i][2]; + } + } + + /* sensor_num = 4 will return 0xE4 */ + return val; +} + +/* MAX9286 initial setup and Reverse channel setup */ +static int max9286_init(struct max9286 *max, struct i2c_client *client) +{ + int i, rval; + unsigned int val, lval; + u8 mval, slval, tmval; + + usleep_range(10000, 11000); + + rval = regmap_read(max->regmap8, DS_MAX9286_DEVID, &val); + if (rval) { + dev_err(max->v4l2_sd.dev, + "Failed to read device ID of MAX9286!\n"); + return rval; + } + dev_info(max->v4l2_sd.dev, "MAX9286 device ID: 0x%X\n", val); + + rval = regmap_write(max->regmap8, DS_CSI_VC_CTL, 0x93); + if (rval) { + dev_err(max->v4l2_sd.dev, "Failed to disable CSI output!\n"); + return rval; + } + /* All the links are working in Legacy reverse control-channel mode */ + /* Enable Custom Reverse Channel and First Pulse Length */ + rval = regmap_write(max->regmap8, DS_ENCRC_FPL, 0x4F); + if (rval) { + dev_err(max->v4l2_sd.dev, "Failed to disable PRBS test!\n"); + return rval; + } + /* + * 2ms of delay is required after any analog change to reverse control + * channel for bus timeout and I2C state machine to settle from any + * glitches + */ + usleep_range(2000, 3000); + /* First pulse length rise time changed from 300ns to 200ns */ + rval = regmap_write(max->regmap8, DS_FPL_RT, 0x1E); + if (rval) { + dev_err(max->v4l2_sd.dev, "Failed to disable PRBS test!\n"); + return rval; + } + usleep_range(2000, 3000); + + /* Enable configuration links */ + max96705_write_register(max, 0, S_MAIN_CTL, 0x43); + usleep_range(5000, 6000); + + /* + * Enable high threshold for reverse channel input buffer + * This increases immunity to power supply noise when the + * coaxial link is used for power as well as signal + */ + max96705_write_register(max, 0, S_RSVD_8, 0x01); + /* Enable change of reverse control parameters */ + + max96705_write_register(max, 0, S_RSVD_97, 0x5F); + + /* Wait 2ms after any change to reverse control channel */ + usleep_range(2000, 3000); + + /* Increase reverse amplitude from 100mV to 170mV to compensate for + * higher threshold + */ + rval = regmap_write(max->regmap8, DS_FPL_RT, 0x19); + if (rval) { + dev_err(max->v4l2_sd.dev, "Failed to disable PRBS test!\n"); + return rval; + } + usleep_range(2000, 3000); + + /* + * Enable CSI-2 lanes D0, D1, D2, D3 + * Enable CSI-2 DBL (Double Input Mode) + * Enable GMSL DBL for RAWx2 + * Enable RAW12 data type by default + */ + rval = regmap_write(max->regmap8, DS_CSI_DBL_DT, 0xF7); //RAW12 + if (rval) { + dev_err(max->v4l2_sd.dev, "Failed to set data type!\n"); + return rval; + } + usleep_range(2000, 3000); + + /* Enable Frame sync Auto-mode for row/column reset on frame sync + * sensors + */ + rval = regmap_write(max->regmap8, DS_FSYNCMODE, 0x00); + if (rval) { + dev_err(max->v4l2_sd.dev, "Failed to set frame sync mode!\n"); + return rval; + } + usleep_range(2000, 3000); + rval = regmap_write(max->regmap8, DS_OVERLAP_WIN_LOW, 0x00); + rval = regmap_write(max->regmap8, DS_OVERLAP_WIN_HIGH, 0x00); + + rval = regmap_write(max->regmap8, DS_FSYNC_PERIOD_LOW, 0x55); + rval = regmap_write(max->regmap8, DS_FSYNC_PERIOD_MIDDLE, 0xc2); + rval = regmap_write(max->regmap8, DS_FSYNC_PERIOD_HIGH, 0x2C); + + rval = regmap_write(max->regmap8, DS_HIGHIMM, 0x06); + + /* + * Enable DBL + * Edge select: Rising Edge + * Enable HS/VS encoding + */ + max96705_write_register(max, 0, S_CONFIG, 0xD4); + usleep_range(2000, 3000); + + for (i = 0; i < ARRAY_SIZE(max9286_byte_order_settings_12bit); i++) { + rval = max96705_write_register(max, 0, + max9286_byte_order_settings_12bit[i].reg, + max9286_byte_order_settings_12bit[i].val); + if (rval) { + dev_err(max->v4l2_sd.dev, + "Failed to set max9286 byte order\n"); + return rval; + } + } + + /* Detect video links */ + rval = regmap_read(max->regmap8, DS_CONFIGL_VIDEOL_DET, &lval); + if (rval) { + dev_err(max->v4l2_sd.dev, "Failed to read register 0x49!\n"); + return rval; + } + + /* + * Check on which links the sensors are connected + * And also check total number of sensors connected to the deserializer + */ + max->sensor_present = ((lval >> 4) & 0xF) | (lval & 0xF); + + for (i = 0; i < NR_OF_MAX_STREAMS; i++) { + if (max->sensor_present & (0x1 << i)) { + dev_info(max->v4l2_sd.dev, + "Sensor present on deserializer link %d\n", i); + max->total_sensor_num += 1; + } + } + + dev_info(max->v4l2_sd.dev, + "total sensor present = %d", max->total_sensor_num); + dev_info(max->v4l2_sd.dev, + "sensor present on links = %d", max->sensor_present); + + if (!max->total_sensor_num) { + dev_err(max->v4l2_sd.dev, "No sensors connected!\n"); + } else { + dev_info(max->v4l2_sd.dev, + "Total number of sensors connected = %d\n", + max->total_sensor_num); + } + + slval = get_output_link_order(max); + + /* Set link output order */ + rval = regmap_write(max->regmap8, DS_LINK_OUTORD, slval); + if (rval) { + dev_err(max->v4l2_sd.dev, + "Failed to set Link output order!\n"); + return rval; + } + + slval = 0xE0 | max->sensor_present; + + mval = 0; + tmval = 0; + /* + * Setup each serializer individually and their respective I2C slave + * address changed to a unique value by enabling one reverse channel + * at a time via deserializer's DS_FWDCCEN_REVCCEN control register. + * Also create broadcast slave address for MAX96705 serializer. + * After this stage, i2cdetect on I2C-ADAPTER should display the + * below devices + * 10: Sensor address + * 11, 12, 13, 14: Sensors alias addresses + * 41, 42, 43, 44: Serializers alias addresses + * 45: Serializer's broadcast address + * 48: Deserializer's address + */ + + for (i = 1; i <= NR_OF_MAX_SINK_PADS; i++) { + /* Setup the link when the sensor is connected to the link */ + if (((0x1 << (i - 1)) & max->sensor_present) == 0) + continue; + + /* Enable only one reverse channel at a time */ + mval = (0x11 << (i - 1)); + tmval |= (0x11 << (i - 1)); + rval = regmap_write(max->regmap8, DS_FWDCCEN_REVCCEN, mval); + if (rval) { + dev_err(max->v4l2_sd.dev, + "Failed to enable channel for %d!\n", i); + return rval; + } + /* Wait 2ms after enabling reverse channel */ + usleep_range(2000, 3000); + + /* Change Serializer slave address */ + max96705_write_register(max, 0, S_SERADDR, + (S_ADDR_MAX96705 + i) << 1); + /* Unique link 'i' image sensor slave address */ + max96705_write_register(max, i, S_I2C_SOURCE_IS, + (ADDR_AR0231AT_SENSOR + i) << 1); + /* Link 'i' image sensor slave address */ + max96705_write_register(max, i, S_I2C_DST_IS, + ADDR_AR0231AT_SENSOR << 1); + /* Serializer broadcast address */ + max96705_write_register(max, i, S_I2C_SOURCE_SER, + S_ADDR_MAX96705_BROADCAST << 1); + /* Link 'i' serializer address */ + max96705_write_register(max, i, S_I2C_DST_SER, + (S_ADDR_MAX96705 + i) << 1); + } + + /* Enable I2c reverse channels */ + rval = regmap_write(max->regmap8, DS_FWDCCEN_REVCCEN, tmval); + if (rval) { + dev_err(max->v4l2_sd.dev, + "Failed to enable channel for %d!\n", i); + return rval; + } + usleep_range(2000, 3000); + + return 0; +} + +/* Unbind the MAX9286 device driver from the I2C client */ +static int max9286_remove(struct i2c_client *client) +{ + struct v4l2_subdev *subdev = i2c_get_clientdata(client); + struct max9286 *max = to_max_9286(subdev); + int i; + + mutex_destroy(&max->max_mutex); + v4l2_ctrl_handler_free(&max->ctrl_handler); + v4l2_device_unregister_subdev(&max->v4l2_sd); + media_entity_cleanup(&max->v4l2_sd.entity); + + for (i = 0; i < NR_OF_MAX_SINK_PADS; i++) { + if (max->sub_devs[i]) { + struct i2c_client *sub_client = + v4l2_get_subdevdata(max->sub_devs[i]); + + i2c_unregister_device(sub_client); + } + max->sub_devs[i] = NULL; + } + + return 0; +} + +/* Called by I2C probe */ +static int max9286_probe(struct i2c_client *client, + const struct i2c_device_id *devid) +{ + struct max9286 *max; + int i = 0; + int rval = 0; + + if (client->dev.platform_data == NULL) + return -ENODEV; + + max = devm_kzalloc(&client->dev, sizeof(*max), GFP_KERNEL); + if (!max) + return -ENOMEM; + + max->pdata = client->dev.platform_data; + + max->nsources = NR_OF_MAX_SOURCE_PADS; + max->nsinks = NR_OF_MAX_SINK_PADS; + max->npads = NR_OF_MAX_PADS; + max->nstreams = NR_OF_MAX_STREAMS; + + max->crop = devm_kcalloc(&client->dev, max->npads, + sizeof(struct v4l2_rect), GFP_KERNEL); + max->compose = devm_kcalloc(&client->dev, max->npads, + sizeof(struct v4l2_rect), GFP_KERNEL); + max->route = devm_kcalloc(&client->dev, max->nstreams, + sizeof(*max->route), GFP_KERNEL); + max->stream = devm_kcalloc(&client->dev, max->npads, + sizeof(*max->stream), GFP_KERNEL); + + if (!max->crop || !max->compose || !max->route || !max->stream) + return -ENOMEM; + + for (i = 0; i < max->npads; i++) { + max->ffmts[i] = + devm_kcalloc(&client->dev, max->nstreams, + sizeof(struct v4l2_mbus_framefmt), GFP_KERNEL); + if (!max->ffmts[i]) + return -ENOMEM; + + max->stream[i].stream_id = + devm_kcalloc(&client->dev, max->nsinks, + sizeof(int), GFP_KERNEL); + if (!max->stream[i].stream_id) + return -ENOMEM; + } + + for (i = 0; i < max->nstreams; i++) { + max->route[i].sink = i; + max->route[i].source = MAX_PAD_SOURCE; + max->route[i].flags = 0; + } + + for (i = 0; i < max->nsinks; i++) { + max->stream[i].stream_id[0] = i; + max->stream[MAX_PAD_SOURCE].stream_id[i] = i; + } + + max->regmap8 = devm_regmap_init_i2c(client, &max9286_reg_config8); + if (IS_ERR(max->regmap8)) { + dev_err(&client->dev, "Failed to init regmap8!\n"); + return -EIO; + } + + mutex_init(&max->max_mutex); + + v4l2_i2c_subdev_init(&max->v4l2_sd, client, &max9286_sd_ops); + + rval = max9286_register_subdev(max); + if (rval) { + dev_err(&client->dev, + "Failed to register MAX9286 subdevice!\n"); + goto error_mutex_destroy; + } + + rval = max9286_init(max, client); + if (rval) { + dev_err(&client->dev, "Failed to initialise MAX9286!\n"); + goto error_media_entity; + } + + return 0; + +error_media_entity: + media_entity_cleanup(&max->v4l2_sd.entity); + v4l2_ctrl_handler_free(&max->ctrl_handler); +error_mutex_destroy: + mutex_destroy(&max->max_mutex); + + return rval; +} + +#ifdef CONFIG_PM +static int max9286_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *subdev = i2c_get_clientdata(client); + struct max9286 *max = to_max_9286(subdev); + + return max9286_init(max, client); +} +#else +#define max9286_resume NULL +#endif /* CONFIG_PM */ + +static const struct i2c_device_id max9286_id_table[] = { + { MAX9286_NAME, 0 }, + {}, +}; + +static const struct dev_pm_ops max9286_pm_ops = { + .resume = max9286_resume, +}; + +static struct i2c_driver max9286_i2c_driver = { + .driver = { + .name = MAX9286_NAME, + .pm = &max9286_pm_ops, + }, + .probe = max9286_probe, + .remove = max9286_remove, + .id_table = max9286_id_table, +}; + +module_i2c_driver(max9286_i2c_driver); + +MODULE_AUTHOR("Kiran Kumar "); +MODULE_AUTHOR("Kun Jiang "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Maxim96705 serializer and Maxim9286 deserializer driver"); diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c index 071f4bc240ca7..8e7a2a59cd321 100644 --- a/drivers/media/i2c/ov5640.c +++ b/drivers/media/i2c/ov5640.c @@ -223,8 +223,10 @@ struct ov5640_dev { int power_count; struct v4l2_mbus_framefmt fmt; + bool pending_fmt_change; const struct ov5640_mode_info *current_mode; + const struct ov5640_mode_info *last_mode; enum ov5640_frame_rate current_fr; struct v4l2_fract frame_interval; @@ -255,7 +257,7 @@ static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl) * should be identified and removed to speed register load time * over i2c. */ - +/* YUV422 UYVY VGA@30fps */ static const struct reg_value ov5640_init_setting_30fps_VGA[] = { {0x3103, 0x11, 0, 0}, {0x3008, 0x82, 0, 5}, {0x3008, 0x42, 0, 0}, {0x3103, 0x03, 0, 0}, {0x3017, 0x00, 0, 0}, {0x3018, 0x00, 0, 0}, @@ -286,10 +288,10 @@ static const struct reg_value ov5640_init_setting_30fps_VGA[] = { {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0}, {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x3000, 0x00, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3006, 0xc3, 0, 0}, - {0x300e, 0x45, 0, 0}, {0x302e, 0x08, 0, 0}, {0x4300, 0x3f, 0, 0}, + {0x302e, 0x08, 0, 0}, {0x4300, 0x3f, 0, 0}, {0x501f, 0x00, 0, 0}, {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0}, {0x440e, 0x00, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0}, - {0x4837, 0x0a, 0, 0}, {0x4800, 0x04, 0, 0}, {0x3824, 0x02, 0, 0}, + {0x4837, 0x0a, 0, 0}, {0x3824, 0x02, 0, 0}, {0x5000, 0xa7, 0, 0}, {0x5001, 0xa3, 0, 0}, {0x5180, 0xff, 0, 0}, {0x5181, 0xf2, 0, 0}, {0x5182, 0x00, 0, 0}, {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0}, {0x5185, 0x24, 0, 0}, {0x5186, 0x09, 0, 0}, @@ -908,6 +910,26 @@ static int ov5640_mod_reg(struct ov5640_dev *sensor, u16 reg, } /* download ov5640 settings to sensor through i2c */ +static int ov5640_set_timings(struct ov5640_dev *sensor, + const struct ov5640_mode_info *mode) +{ + int ret; + + ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_DVPHO, mode->hact); + if (ret < 0) + return ret; + + ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_DVPVO, mode->vact); + if (ret < 0) + return ret; + + ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_HTS, mode->htot); + if (ret < 0) + return ret; + + return ov5640_write_reg16(sensor, OV5640_REG_TIMING_VTS, mode->vtot); +} + static int ov5640_load_regs(struct ov5640_dev *sensor, const struct ov5640_mode_info *mode) { @@ -935,7 +957,13 @@ static int ov5640_load_regs(struct ov5640_dev *sensor, usleep_range(1000 * delay_ms, 1000 * delay_ms + 100); } - return ret; + return ov5640_set_timings(sensor, mode); +} + +static int ov5640_set_autoexposure(struct ov5640_dev *sensor, bool on) +{ + return ov5640_mod_reg(sensor, OV5640_REG_AEC_PK_MANUAL, + BIT(0), on ? 0 : BIT(0)); } /* read exposure, in number of line periods */ @@ -994,6 +1022,18 @@ static int ov5640_get_gain(struct ov5640_dev *sensor) return gain & 0x3ff; } +static int ov5640_set_gain(struct ov5640_dev *sensor, int gain) +{ + return ov5640_write_reg16(sensor, OV5640_REG_AEC_PK_REAL_GAIN, + (u16)gain & 0x3ff); +} + +static int ov5640_set_autogain(struct ov5640_dev *sensor, bool on) +{ + return ov5640_mod_reg(sensor, OV5640_REG_AEC_PK_MANUAL, + BIT(1), on ? 0 : BIT(1)); +} + static int ov5640_set_stream_dvp(struct ov5640_dev *sensor, bool on) { int ret; @@ -1102,12 +1142,25 @@ static int ov5640_set_stream_mipi(struct ov5640_dev *sensor, bool on) { int ret; - ret = ov5640_mod_reg(sensor, OV5640_REG_MIPI_CTRL00, BIT(5), - on ? 0 : BIT(5)); - if (ret) - return ret; - ret = ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT00, - on ? 0x00 : 0x70); + /* + * Enable/disable the MIPI interface + * + * 0x300e = on ? 0x45 : 0x40 + * + * FIXME: the sensor manual (version 2.03) reports + * [7:5] = 000 : 1 data lane mode + * [7:5] = 001 : 2 data lanes mode + * But this settings do not work, while the following ones + * have been validated for 2 data lanes mode. + * + * [7:5] = 010 : 2 data lanes mode + * [4] = 0 : Power up MIPI HS Tx + * [3] = 0 : Power up MIPI LS Rx + * [2] = 1/0 : MIPI interface enable/disable + * [1:0] = 01/00: FIXME: 'debug' + */ + ret = ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, + on ? 0x45 : 0x40); if (ret) return ret; @@ -1331,7 +1384,7 @@ static int ov5640_set_ae_target(struct ov5640_dev *sensor, int target) return ov5640_write_reg(sensor, OV5640_REG_AEC_CTRL1F, fast_low); } -static int ov5640_binning_on(struct ov5640_dev *sensor) +static int ov5640_get_binning(struct ov5640_dev *sensor) { u8 temp; int ret; @@ -1339,8 +1392,8 @@ static int ov5640_binning_on(struct ov5640_dev *sensor) ret = ov5640_read_reg(sensor, OV5640_REG_TIMING_TC_REG21, &temp); if (ret) return ret; - temp &= 0xfe; - return temp ? 1 : 0; + + return temp & BIT(0); } static int ov5640_set_binning(struct ov5640_dev *sensor, bool enable) @@ -1385,30 +1438,6 @@ static int ov5640_set_virtual_channel(struct ov5640_dev *sensor) return ov5640_write_reg(sensor, OV5640_REG_DEBUG_MODE, temp); } -static int ov5640_set_timings(struct ov5640_dev *sensor, - const struct ov5640_mode_info *mode) -{ - int ret; - - ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_DVPHO, mode->hact); - if (ret < 0) - return ret; - - ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_DVPVO, mode->vact); - if (ret < 0) - return ret; - - ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_HTS, mode->htot); - if (ret < 0) - return ret; - - ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_VTS, mode->vtot); - if (ret < 0) - return ret; - - return 0; -} - static const struct ov5640_mode_info * ov5640_find_mode(struct ov5640_dev *sensor, enum ov5640_frame_rate fr, int width, int height, bool nearest) @@ -1450,7 +1479,7 @@ static int ov5640_set_mode_exposure_calc(struct ov5640_dev *sensor, if (ret < 0) return ret; prev_shutter = ret; - ret = ov5640_binning_on(sensor); + ret = ov5640_get_binning(sensor); if (ret < 0) return ret; if (ret && mode->id != OV5640_MODE_720P_1280_720 && @@ -1571,7 +1600,7 @@ static int ov5640_set_mode_exposure_calc(struct ov5640_dev *sensor, } /* set capture gain */ - ret = __v4l2_ctrl_s_ctrl(sensor->ctrls.gain, cap_gain16); + ret = ov5640_set_gain(sensor, cap_gain16); if (ret) return ret; @@ -1584,7 +1613,7 @@ static int ov5640_set_mode_exposure_calc(struct ov5640_dev *sensor, } /* set exposure */ - return __v4l2_ctrl_s_ctrl(sensor->ctrls.exposure, cap_shutter); + return ov5640_set_exposure(sensor, cap_shutter); } /* @@ -1592,53 +1621,45 @@ static int ov5640_set_mode_exposure_calc(struct ov5640_dev *sensor, * change mode directly */ static int ov5640_set_mode_direct(struct ov5640_dev *sensor, - const struct ov5640_mode_info *mode, - s32 exposure) + const struct ov5640_mode_info *mode) { - int ret; - if (!mode->reg_data) return -EINVAL; /* Write capture setting */ - ret = ov5640_load_regs(sensor, mode); - if (ret < 0) - return ret; - - /* turn auto gain/exposure back on for direct mode */ - ret = __v4l2_ctrl_s_ctrl(sensor->ctrls.auto_gain, 1); - if (ret) - return ret; - - return __v4l2_ctrl_s_ctrl(sensor->ctrls.auto_exp, exposure); + return ov5640_load_regs(sensor, mode); } -static int ov5640_set_mode(struct ov5640_dev *sensor, - const struct ov5640_mode_info *orig_mode) +static int ov5640_set_mode(struct ov5640_dev *sensor) { const struct ov5640_mode_info *mode = sensor->current_mode; + const struct ov5640_mode_info *orig_mode = sensor->last_mode; enum ov5640_downsize_mode dn_mode, orig_dn_mode; - s32 exposure; + bool auto_gain = sensor->ctrls.auto_gain->val == 1; + bool auto_exp = sensor->ctrls.auto_exp->val == V4L2_EXPOSURE_AUTO; int ret; dn_mode = mode->dn_mode; orig_dn_mode = orig_mode->dn_mode; /* auto gain and exposure must be turned off when changing modes */ - ret = __v4l2_ctrl_s_ctrl(sensor->ctrls.auto_gain, 0); - if (ret) - return ret; + if (auto_gain) { + ret = ov5640_set_autogain(sensor, false); + if (ret) + return ret; + } - exposure = sensor->ctrls.auto_exp->val; - ret = ov5640_set_exposure(sensor, V4L2_EXPOSURE_MANUAL); - if (ret) - return ret; + if (auto_exp) { + ret = ov5640_set_autoexposure(sensor, false); + if (ret) + goto restore_auto_gain; + } if ((dn_mode == SUBSAMPLING && orig_dn_mode == SCALING) || (dn_mode == SCALING && orig_dn_mode == SUBSAMPLING)) { /* * change between subsampling and scaling - * go through exposure calucation + * go through exposure calculation */ ret = ov5640_set_mode_exposure_calc(sensor, mode); } else { @@ -1646,15 +1667,16 @@ static int ov5640_set_mode(struct ov5640_dev *sensor, * change inside subsampling or scaling * download firmware directly */ - ret = ov5640_set_mode_direct(sensor, mode, exposure); + ret = ov5640_set_mode_direct(sensor, mode); } - if (ret < 0) - return ret; + goto restore_auto_exp_gain; - ret = ov5640_set_timings(sensor, mode); - if (ret < 0) - return ret; + /* restore auto gain and exposure */ + if (auto_gain) + ov5640_set_autogain(sensor, true); + if (auto_exp) + ov5640_set_autoexposure(sensor, true); ret = ov5640_set_binning(sensor, dn_mode != SCALING); if (ret < 0) @@ -1673,8 +1695,18 @@ static int ov5640_set_mode(struct ov5640_dev *sensor, return ret; sensor->pending_mode_change = false; + sensor->last_mode = mode; return 0; + +restore_auto_exp_gain: + if (auto_exp) + ov5640_set_autoexposure(sensor, true); +restore_auto_gain: + if (auto_gain) + ov5640_set_autogain(sensor, true); + + return ret; } static int ov5640_set_framefmt(struct ov5640_dev *sensor, @@ -1689,6 +1721,7 @@ static int ov5640_restore_mode(struct ov5640_dev *sensor) ret = ov5640_load_regs(sensor, &ov5640_mode_init_data); if (ret < 0) return ret; + sensor->last_mode = &ov5640_mode_init_data; ret = ov5640_mod_reg(sensor, OV5640_REG_SYS_ROOT_DIVIDER, 0x3f, (ilog2(OV5640_SCLK2X_ROOT_DIVIDER_DEFAULT) << 2) | @@ -1697,7 +1730,7 @@ static int ov5640_restore_mode(struct ov5640_dev *sensor) return ret; /* now restore the last capture mode */ - ret = ov5640_set_mode(sensor, &ov5640_mode_init_data); + ret = ov5640_set_mode(sensor); if (ret < 0) return ret; @@ -1786,23 +1819,69 @@ static int ov5640_set_power(struct ov5640_dev *sensor, bool on) if (ret) goto power_off; + /* We're done here for DVP bus, while CSI-2 needs setup. */ + if (sensor->ep.bus_type != V4L2_MBUS_CSI2) + return 0; + + /* + * Power up MIPI HS Tx and LS Rx; 2 data lanes mode + * + * 0x300e = 0x40 + * [7:5] = 010 : 2 data lanes mode (see FIXME note in + * "ov5640_set_stream_mipi()") + * [4] = 0 : Power up MIPI HS Tx + * [3] = 0 : Power up MIPI LS Rx + * [2] = 0 : MIPI interface disabled + */ + ret = ov5640_write_reg(sensor, + OV5640_REG_IO_MIPI_CTRL00, 0x40); + if (ret) + goto power_off; + + /* + * Gate clock and set LP11 in 'no packets mode' (idle) + * + * 0x4800 = 0x24 + * [5] = 1 : Gate clock when 'no packets' + * [2] = 1 : MIPI bus in LP11 when 'no packets' + */ + ret = ov5640_write_reg(sensor, + OV5640_REG_MIPI_CTRL00, 0x24); + if (ret) + goto power_off; + + /* + * Set data lanes and clock in LP11 when 'sleeping' + * + * 0x3019 = 0x70 + * [6] = 1 : MIPI data lane 2 in LP11 when 'sleeping' + * [5] = 1 : MIPI data lane 1 in LP11 when 'sleeping' + * [4] = 1 : MIPI clock lane in LP11 when 'sleeping' + */ + ret = ov5640_write_reg(sensor, + OV5640_REG_PAD_OUTPUT00, 0x70); + if (ret) + goto power_off; + + /* Give lanes some time to coax into LP11 state. */ + usleep_range(500, 1000); + + } else { if (sensor->ep.bus_type == V4L2_MBUS_CSI2) { - /* - * start streaming briefly followed by stream off in - * order to coax the clock lane into LP-11 state. - */ - ret = ov5640_set_stream_mipi(sensor, true); - if (ret) - goto power_off; - usleep_range(1000, 2000); - ret = ov5640_set_stream_mipi(sensor, false); - if (ret) - goto power_off; + /* Reset MIPI bus settings to their default values. */ + ov5640_write_reg(sensor, + OV5640_REG_IO_MIPI_CTRL00, 0x58); + ov5640_write_reg(sensor, + OV5640_REG_MIPI_CTRL00, 0x04); + ov5640_write_reg(sensor, + OV5640_REG_PAD_OUTPUT00, 0x00); } - return 0; + ov5640_set_power_off(sensor); } + return 0; + power_off: ov5640_set_power_off(sensor); return ret; @@ -1941,6 +2020,7 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd, struct ov5640_dev *sensor = to_ov5640_dev(sd); const struct ov5640_mode_info *new_mode; struct v4l2_mbus_framefmt *mbus_fmt = &format->format; + struct v4l2_mbus_framefmt *fmt; int ret; if (format->pad != 0) @@ -1958,19 +2038,20 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd, if (ret) goto out; - if (format->which == V4L2_SUBDEV_FORMAT_TRY) { - struct v4l2_mbus_framefmt *fmt = - v4l2_subdev_get_try_format(sd, cfg, 0); + if (format->which == V4L2_SUBDEV_FORMAT_TRY) + fmt = v4l2_subdev_get_try_format(sd, cfg, 0); + else + fmt = &sensor->fmt; - *fmt = *mbus_fmt; - goto out; - } + *fmt = *mbus_fmt; if (new_mode != sensor->current_mode) { sensor->current_mode = new_mode; - sensor->fmt = *mbus_fmt; sensor->pending_mode_change = true; } + if (mbus_fmt->code != sensor->fmt.code) + sensor->pending_fmt_change = true; + out: mutex_unlock(&sensor->lock); return ret; @@ -2137,20 +2218,20 @@ static int ov5640_set_ctrl_white_balance(struct ov5640_dev *sensor, int awb) return ret; } -static int ov5640_set_ctrl_exposure(struct ov5640_dev *sensor, int exp) +static int ov5640_set_ctrl_exposure(struct ov5640_dev *sensor, + enum v4l2_exposure_auto_type auto_exposure) { struct ov5640_ctrls *ctrls = &sensor->ctrls; - bool auto_exposure = (exp == V4L2_EXPOSURE_AUTO); + bool auto_exp = (auto_exposure == V4L2_EXPOSURE_AUTO); int ret = 0; if (ctrls->auto_exp->is_new) { - ret = ov5640_mod_reg(sensor, OV5640_REG_AEC_PK_MANUAL, - BIT(0), auto_exposure ? 0 : BIT(0)); + ret = ov5640_set_autoexposure(sensor, auto_exp); if (ret) return ret; } - if (!auto_exposure && ctrls->exposure->is_new) { + if (!auto_exp && ctrls->exposure->is_new) { u16 max_exp; ret = ov5640_read_reg16(sensor, OV5640_REG_AEC_PK_VTS, @@ -2170,25 +2251,19 @@ static int ov5640_set_ctrl_exposure(struct ov5640_dev *sensor, int exp) return ret; } -static int ov5640_set_ctrl_gain(struct ov5640_dev *sensor, int auto_gain) +static int ov5640_set_ctrl_gain(struct ov5640_dev *sensor, bool auto_gain) { struct ov5640_ctrls *ctrls = &sensor->ctrls; int ret = 0; if (ctrls->auto_gain->is_new) { - ret = ov5640_mod_reg(sensor, OV5640_REG_AEC_PK_MANUAL, - BIT(1), - ctrls->auto_gain->val ? 0 : BIT(1)); + ret = ov5640_set_autogain(sensor, auto_gain); if (ret) return ret; } - if (!auto_gain && ctrls->gain->is_new) { - u16 gain = (u16)ctrls->gain->val; - - ret = ov5640_write_reg16(sensor, OV5640_REG_AEC_PK_REAL_GAIN, - gain & 0x3ff); - } + if (!auto_gain && ctrls->gain->is_new) + ret = ov5640_set_gain(sensor, ctrls->gain->val); return ret; } @@ -2261,16 +2336,12 @@ static int ov5640_g_volatile_ctrl(struct v4l2_ctrl *ctrl) switch (ctrl->id) { case V4L2_CID_AUTOGAIN: - if (!ctrl->val) - return 0; val = ov5640_get_gain(sensor); if (val < 0) return val; sensor->ctrls.gain->val = val; break; case V4L2_CID_EXPOSURE_AUTO: - if (ctrl->val == V4L2_EXPOSURE_MANUAL) - return 0; val = ov5640_get_exposure(sensor); if (val < 0) return val; @@ -2541,13 +2612,16 @@ static int ov5640_s_stream(struct v4l2_subdev *sd, int enable) if (sensor->streaming == !enable) { if (enable && sensor->pending_mode_change) { - ret = ov5640_set_mode(sensor, sensor->current_mode); + ret = ov5640_set_mode(sensor); if (ret) goto out; + } + if (enable && sensor->pending_fmt_change) { ret = ov5640_set_framefmt(sensor, &sensor->fmt); if (ret) goto out; + sensor->pending_fmt_change = false; } if (sensor->ep.bus_type == V4L2_MBUS_CSI2) @@ -2642,9 +2716,14 @@ static int ov5640_probe(struct i2c_client *client, return -ENOMEM; sensor->i2c_client = client; + + /* + * default init sequence initialize sensor to + * YUV422 UYVY VGA@30fps + */ fmt = &sensor->fmt; - fmt->code = ov5640_formats[0].code; - fmt->colorspace = ov5640_formats[0].colorspace; + fmt->code = MEDIA_BUS_FMT_UYVY8_2X8; + fmt->colorspace = V4L2_COLORSPACE_SRGB; fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace); fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE; fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt->colorspace); @@ -2656,7 +2735,7 @@ static int ov5640_probe(struct i2c_client *client, sensor->current_fr = OV5640_30_FPS; sensor->current_mode = &ov5640_mode_data[OV5640_30_FPS][OV5640_MODE_VGA_640_480]; - sensor->pending_mode_change = true; + sensor->last_mode = sensor->current_mode; sensor->ae_target = 52; diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c index 31bf577b0bd30..64d1402882c82 100644 --- a/drivers/media/i2c/ov7670.c +++ b/drivers/media/i2c/ov7670.c @@ -1808,17 +1808,24 @@ static int ov7670_probe(struct i2c_client *client, info->pclk_hb_disable = true; } - info->clk = devm_clk_get(&client->dev, "xclk"); - if (IS_ERR(info->clk)) - return PTR_ERR(info->clk); - ret = clk_prepare_enable(info->clk); - if (ret) - return ret; + info->clk = devm_clk_get(&client->dev, "xclk"); /* optional */ + if (IS_ERR(info->clk)) { + ret = PTR_ERR(info->clk); + if (ret == -ENOENT) + info->clk = NULL; + else + return ret; + } + if (info->clk) { + ret = clk_prepare_enable(info->clk); + if (ret) + return ret; - info->clock_speed = clk_get_rate(info->clk) / 1000000; - if (info->clock_speed < 10 || info->clock_speed > 48) { - ret = -EINVAL; - goto clk_disable; + info->clock_speed = clk_get_rate(info->clk) / 1000000; + if (info->clock_speed < 10 || info->clock_speed > 48) { + ret = -EINVAL; + goto clk_disable; + } } ret = ov7670_init_gpio(client, info); diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c index 44c41933415ab..ff25ea9aca48e 100644 --- a/drivers/media/i2c/tc358743.c +++ b/drivers/media/i2c/tc358743.c @@ -1243,9 +1243,9 @@ static int tc358743_log_status(struct v4l2_subdev *sd) u8 vi_status3 = i2c_rd8(sd, VI_STATUS3); const int deep_color_mode[4] = { 8, 10, 12, 16 }; static const char * const input_color_space[] = { - "RGB", "YCbCr 601", "Adobe RGB", "YCbCr 709", "NA (4)", + "RGB", "YCbCr 601", "opRGB", "YCbCr 709", "NA (4)", "xvYCC 601", "NA(6)", "xvYCC 709", "NA(8)", "sYCC601", - "NA(10)", "NA(11)", "NA(12)", "Adobe YCC 601"}; + "NA(10)", "NA(11)", "NA(12)", "opYCC 601"}; v4l2_info(sd, "-----Chip status-----\n"); v4l2_info(sd, "Chip ID: 0x%02x\n", diff --git a/drivers/media/i2c/ti960-reg.h b/drivers/media/i2c/ti960-reg.h new file mode 100644 index 0000000000000..0f34651aaa03c --- /dev/null +++ b/drivers/media/i2c/ti960-reg.h @@ -0,0 +1,233 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018 Intel Corporation */ + +#ifndef TI960_REG_H +#define TI960_REG_H + +struct ti960_register_write { + u8 reg; + u8 val; +}; + +struct ti960_register_devid { + u8 reg; + u8 val_expected; +}; + +static const struct ti960_register_write ti960_frame_sync_settings[2][5] = { + { + {0x18, 0x00}, /* Disable frame sync. */ + {0x19, 0x00}, + {0x1a, 0x02}, + {0x1b, 0x0a}, + {0x1c, 0xd3}, + }, + { + {0x19, 0x01}, /* Frame sync high time.*/ + {0x1a, 0x15}, + {0x1b, 0x09}, /* Frame sync low time. */ + {0x1c, 0xC3}, + {0x18, 0x01}, /* Enable frame sync. and use high/low mode */ + } +}; + +static const struct ti960_register_write ti960_gpio_settings[] = { + {0x10, 0x81}, + {0x11, 0x85}, + {0x12, 0x89}, + {0x13, 0x8d}, +}; + +static const struct ti960_register_write ti960_init_settings[] = { + {0x0c, 0x0f}, /* RX_PORT_CTL */ + {0x1f, 0x06}, /* CSI_PLL_CTL */ + {0x4c, 0x01}, /* FPD3_PORT_SEL */ + {0x58, 0x5e}, /* BCC_CONFIG */ + {0x5c, 0xb0}, /* SER_ALIAS_ID */ + {0x5d, 0x6c}, /* SlaveID[0] */ + {0x65, 0x60}, /* SlaveAlias[0] */ + {0x6d, 0x7c}, /* PORT_CONFIG */ + {0x7c, 0x01}, /* PORT_CONFIG2 */ + {0x70, 0x2b}, /* RAW10_ID */ + {0x71, 0x2c}, /* RAW12_ID */ + {0x72, 0xe4}, /* CSI_VC_MAP */ + {0x4c, 0x12}, /* FPD3_PORT_SEL */ + {0x58, 0x5e}, + {0x5c, 0xb2}, + {0x5d, 0x6c}, + {0x65, 0x62}, + {0x6d, 0x7c}, + {0x7c, 0x01}, + {0x70, 0x2b}, + {0x71, 0x2c}, + {0x72, 0xee}, /* CSI_VC_MAP */ + {0x4c, 0x24}, /* FPD3_PORT_SEL */ + {0x58, 0x5e}, + {0x5c, 0xb4}, + {0x5d, 0x6c}, + {0x65, 0x64}, + {0x6d, 0x7c}, + {0x7c, 0x01}, + {0x70, 0x2b}, + {0x71, 0x2c}, + {0x72, 0xe4}, + {0x4c, 0x38}, /* FPD3_PORT_SEL */ + {0x58, 0x5e}, + {0x5c, 0xb6}, + {0x5d, 0x6c}, + {0x65, 0x66}, + {0x6d, 0x7c}, + {0x7c, 0x01}, + {0x70, 0x2b}, + {0x71, 0x2c}, + {0x72, 0xe4}, +}; + +static const struct ti960_register_write ti953_init_settings[] = { + {0x4c, 0x01}, + {0xb0, 0x04}, + {0xb1, 0x03}, + {0xb2, 0x25}, + {0xb1, 0x13}, + {0xb2, 0x25}, + {0xb0, 0x04}, + {0xb1, 0x04}, + {0xb2, 0x30}, + {0xb1, 0x14}, + {0xb2, 0x30}, + {0xb0, 0x04}, + {0xb1, 0x06}, + {0xb2, 0x40}, + {0x42, 0x01}, /* SLAVE_ID_ALIAS_1 */ + {0x41, 0x93}, /* SLAVE_ID_ALIAS_0 */ + {0x4c, 0x12}, + {0xb0, 0x08}, + {0xb1, 0x03}, + {0xb2, 0x25}, + {0xb1, 0x13}, + {0xb2, 0x25}, + {0xb0, 0x08}, + {0xb1, 0x04}, + {0xb2, 0x30}, + {0xb1, 0x14}, + {0xb2, 0x30}, + {0xb0, 0x08}, + {0xb1, 0x06}, + {0xb2, 0x40}, + {0x42, 0x01}, + {0x41, 0x93}, + {0x4c, 0x24}, + {0xb0, 0x0c}, + {0xb1, 0x03}, + {0xb2, 0x25}, + {0xb1, 0x13}, + {0xb2, 0x25}, + {0xb0, 0x0c}, + {0xb1, 0x04}, + {0xb2, 0x30}, + {0xb1, 0x14}, + {0xb2, 0x30}, + {0xb0, 0x0c}, + {0xb1, 0x06}, + {0xb2, 0x40}, + {0x42, 0x01}, + {0x41, 0x93}, + {0x4c, 0x38}, + {0xb0, 0x10}, + {0xb1, 0x03}, + {0xb2, 0x25}, + {0xb1, 0x13}, + {0xb2, 0x25}, + {0xb0, 0x10}, + {0xb1, 0x04}, + {0xb2, 0x30}, + {0xb1, 0x14}, + {0xb2, 0x30}, + {0xb0, 0x10}, + {0xb1, 0x06}, + {0xb2, 0x40}, + {0x42, 0x01}, + {0x41, 0x93}, +}; + +static const struct ti960_register_write ti960_init_settings_2[] = { + {0xb0, 0x14}, + {0xb1, 0x03}, + {0xb2, 0x04}, + {0xb1, 0x04}, + {0xb2, 0x04}, +}; + +static const struct ti960_register_write ti960_init_settings_3[] = { + {0x4c, 0x01}, + {0x32, 0x01}, + {0x33, 0x03}, + {0x32, 0x12}, + {0x33, 0x03}, + {0x20, 0x00}, + {0x21, 0x03}, +}; + +static const struct ti960_register_write ti953_init_settings_2[] = { + {0x06, 0x41}, + {0x07, 0x28}, + {0x0e, 0xf0}, +}; + +static const struct ti960_register_devid ti953_FPD3_RX_ID[] = { + {0xf0, 0x5f}, + {0xf1, 0x55}, + {0xf2, 0x42}, + {0xf3, 0x39}, + {0xf4, 0x35}, + {0xf5, 0x33}, +}; + +/* register definition */ +#define TI960_DEVID 0x0 +#define TI960_RESET 0x1 +#define TI960_CSI_PLL_CTL 0x1f +#define TI960_FS_CTL 0x18 +#define TI960_FWD_CTL1 0x20 +#define TI960_RX_PORT_SEL 0x4c +#define TI960_SER_ALIAS_ID 0x5c +#define TI960_SLAVE_ID0 0x5d +#define TI960_SLAVE_ALIAS_ID0 0x65 +#define TI960_PORT_CONFIG 0x6d +#define TI960_BC_GPIO_CTL0 0x6e +#define TI960_BC_GPIO_CTL1 0x6f +#define TI960_RAW10_ID 0x70 +#define TI960_RAW12_ID 0x71 +#define TI960_CSI_VC_MAP 0x72 +#define TI960_PORT_CONFIG2 0x7c +#define TI960_CSI_CTL 0x33 + +/* register value definition */ +#define TI960_POWER_ON 0x1 +#define TI960_POWER_OFF 0x20 +#define TI960_FPD3_RAW10_100MHz 0x7f +#define TI960_FPD3_RAW12_50MHz 0x7d +#define TI960_FPD3_RAW12_75MHz 0x7e +#define TI960_FPD3_CSI 0x7c +#define TI960_RAW12 0x41 +#define TI960_RAW10_NORMAL 0x1 +#define TI960_RAW10_8BIT 0x81 +#define TI960_GPIO0_HIGH 0x09 +#define TI960_GPIO0_LOW 0x08 +#define TI960_GPIO1_HIGH 0x90 +#define TI960_GPIO1_LOW 0x80 +#define TI960_GPIO0_FSIN 0x0a +#define TI960_GPIO1_FSIN 0xa0 +#define TI960_GPIO0_MASK 0x0f +#define TI960_GPIO1_MASK 0xf0 +#define TI960_GPIO2_FSIN 0x0a +#define TI960_GPIO3_FSIN 0xa0 +#define TI960_GPIO2_MASK 0x0f +#define TI960_GPIO3_MASK 0xf0 +#define TI960_MIPI_800MBPS 0x2 +#define TI960_MIPI_1600MBPS 0x0 +#define TI960_CSI_ENABLE 0x1 +#define TI960_CSI_CONTS_CLOCK 0x2 +#define TI960_CSI_SKEWCAL 0x40 +#define TI960_FSIN_ENABLE 0x1 +#endif diff --git a/drivers/media/i2c/ti960.c b/drivers/media/i2c/ti960.c new file mode 100644 index 0000000000000..381351baff4ad --- /dev/null +++ b/drivers/media/i2c/ti960.c @@ -0,0 +1,1699 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "ti960-reg.h" + +struct ti960_subdev { + struct v4l2_subdev *sd; + unsigned short rx_port; + unsigned short fsin_gpio; + unsigned short phy_i2c_addr; + unsigned short alias_i2c_addr; + unsigned short ser_i2c_addr; + char sd_name[16]; +}; + +struct ti960 { + struct v4l2_subdev sd; + struct media_pad pad[NR_OF_TI960_PADS]; + struct v4l2_ctrl_handler ctrl_handler; + struct ti960_pdata *pdata; + struct ti960_subdev sub_devs[NR_OF_TI960_SINK_PADS]; + struct crlmodule_platform_data subdev_pdata[NR_OF_TI960_SINK_PADS]; + const char *name; + + struct mutex mutex; + + struct regmap *regmap8; + struct regmap *regmap16; + + struct v4l2_mbus_framefmt *ffmts[NR_OF_TI960_PADS]; + struct rect *crop; + struct rect *compose; + + struct v4l2_subdev_route *ti960_route; + + unsigned int nsinks; + unsigned int nsources; + unsigned int nstreams; + unsigned int npads; + + struct gpio_chip gc; + + struct v4l2_ctrl *link_freq; + struct v4l2_ctrl *test_pattern; +}; + +#define to_ti960(_sd) container_of(_sd, struct ti960, sd) + +static const s64 ti960_op_sys_clock[] = {400000000, 800000000}; +static const u8 ti960_op_sys_clock_reg_val[] = { + TI960_MIPI_800MBPS, + TI960_MIPI_1600MBPS +}; + +/* + * Order matters. + * + * 1. Bits-per-pixel, descending. + * 2. Bits-per-pixel compressed, descending. + * 3. Pixel order, same as in pixel_order_str. Formats for all four pixel + * orders must be defined. + */ +static const struct ti960_csi_data_format va_csi_data_formats[] = { + { MEDIA_BUS_FMT_YUYV8_1X16, 16, 16, PIXEL_ORDER_GBRG, 0x1e }, + { MEDIA_BUS_FMT_UYVY8_1X16, 16, 16, PIXEL_ORDER_GBRG, 0x1e }, + { MEDIA_BUS_FMT_SGRBG16_1X16, 16, 16, PIXEL_ORDER_GRBG, 0x2e }, + { MEDIA_BUS_FMT_SRGGB16_1X16, 16, 16, PIXEL_ORDER_RGGB, 0x2e }, + { MEDIA_BUS_FMT_SBGGR16_1X16, 16, 16, PIXEL_ORDER_BGGR, 0x2e }, + { MEDIA_BUS_FMT_SGBRG16_1X16, 16, 16, PIXEL_ORDER_GBRG, 0x2e }, + { MEDIA_BUS_FMT_SGRBG12_1X12, 12, 12, PIXEL_ORDER_GRBG, 0x2c }, + { MEDIA_BUS_FMT_SRGGB12_1X12, 12, 12, PIXEL_ORDER_RGGB, 0x2c }, + { MEDIA_BUS_FMT_SBGGR12_1X12, 12, 12, PIXEL_ORDER_BGGR, 0x2c }, + { MEDIA_BUS_FMT_SGBRG12_1X12, 12, 12, PIXEL_ORDER_GBRG, 0x2c }, + { MEDIA_BUS_FMT_SGRBG10_1X10, 10, 10, PIXEL_ORDER_GRBG, 0x2b }, + { MEDIA_BUS_FMT_SRGGB10_1X10, 10, 10, PIXEL_ORDER_RGGB, 0x2b }, + { MEDIA_BUS_FMT_SBGGR10_1X10, 10, 10, PIXEL_ORDER_BGGR, 0x2b }, + { MEDIA_BUS_FMT_SGBRG10_1X10, 10, 10, PIXEL_ORDER_GBRG, 0x2b }, +}; + +static const uint32_t ti960_supported_codes_pad[] = { + MEDIA_BUS_FMT_YUYV8_1X16, + MEDIA_BUS_FMT_UYVY8_1X16, + MEDIA_BUS_FMT_SBGGR16_1X16, + MEDIA_BUS_FMT_SGBRG16_1X16, + MEDIA_BUS_FMT_SGRBG16_1X16, + MEDIA_BUS_FMT_SRGGB16_1X16, + MEDIA_BUS_FMT_SBGGR12_1X12, + MEDIA_BUS_FMT_SGBRG12_1X12, + MEDIA_BUS_FMT_SGRBG12_1X12, + MEDIA_BUS_FMT_SRGGB12_1X12, + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SGBRG10_1X10, + MEDIA_BUS_FMT_SGRBG10_1X10, + MEDIA_BUS_FMT_SRGGB10_1X10, + 0, +}; + +static const uint32_t *ti960_supported_codes[] = { + ti960_supported_codes_pad, +}; + +static struct regmap_config ti960_reg_config8 = { + .reg_bits = 8, + .val_bits = 8, +}; + +static struct regmap_config ti960_reg_config16 = { + .reg_bits = 16, + .val_bits = 8, + .reg_format_endian = REGMAP_ENDIAN_BIG, +}; + +static int ti953_reg_write(struct ti960 *va, unsigned short rx_port, + unsigned short ser_alias, unsigned char reg, unsigned char val) +{ + int ret; + int retry, timeout = 10; + struct i2c_client *client = v4l2_get_subdevdata(&va->sd); + + dev_dbg(va->sd.dev, "%s port %d, ser_alias %x, reg %x, val %x", + __func__, rx_port, ser_alias, reg, val); + client->addr = ser_alias; + for (retry = 0; retry < timeout; retry++) { + ret = i2c_smbus_write_byte_data(client, reg, val); + if (ret < 0) { + dev_err(va->sd.dev, "ti953 reg write ret=%x", ret); + usleep_range(5000, 6000); + } else + break; + } + + client->addr = TI960_I2C_ADDRESS; + if (retry >= timeout) { + dev_err(va->sd.dev, + "%s:write reg failed: port=%2x, addr=%2x, reg=%2x\n", + __func__, rx_port, ser_alias, reg); + return -EREMOTEIO; + } + + return 0; +} + +static int ti953_reg_read(struct ti960 *va, unsigned short rx_port, + unsigned short ser_alias, unsigned char reg, unsigned char *val) +{ + int retry, timeout = 10; + struct i2c_client *client = v4l2_get_subdevdata(&va->sd); + + client->addr = ser_alias; + for (retry = 0; retry < timeout; retry++) { + *val = i2c_smbus_read_byte_data(client, reg); + if (*val < 0) + usleep_range(5000, 6000); + else + break; + } + + client->addr = TI960_I2C_ADDRESS; + if (retry >= timeout) { + dev_err(va->sd.dev, + "%s:read reg failed: port=%2x, addr=%2x, reg=%2x\n", + __func__, rx_port, ser_alias, reg); + return -EREMOTEIO; + } + + return 0; +} + +static bool ti953_detect(struct ti960 *va, unsigned short rx_port, unsigned short ser_alias) +{ + bool ret = false; + int i; + int rval; + unsigned char val; + + for (i = 0; i < ARRAY_SIZE(ti953_FPD3_RX_ID); i++) { + rval = ti953_reg_read(va, rx_port, ser_alias, + ti953_FPD3_RX_ID[i].reg, &val); + if (rval) { + dev_err(va->sd.dev, "port %d, ti953 write timeout %d\n", rx_port, rval); + break; + } + if (val != ti953_FPD3_RX_ID[i].val_expected) + break; + } + + if (i == ARRAY_SIZE(ti953_FPD3_RX_ID)) + ret = true; + + return ret; +} + +static int ti960_reg_read(struct ti960 *va, unsigned char reg, unsigned int *val) +{ + int ret, retry, timeout = 10; + + for (retry = 0; retry < timeout; retry++) { + ret = regmap_read(va->regmap8, reg, val); + if (ret < 0) { + dev_err(va->sd.dev, "960 reg read ret=%x", ret); + usleep_range(5000, 6000); + } else { + break; + } + } + + if (retry >= timeout) { + dev_err(va->sd.dev, + "%s:devid read failed: reg=%2x, ret=%d\n", + __func__, reg, ret); + return -EREMOTEIO; + } + + return 0; +} + +static int ti960_reg_set_bit(struct ti960 *va, unsigned char reg, + unsigned char bit, unsigned char val) +{ + int ret; + unsigned int reg_val; + + ret = regmap_read(va->regmap8, reg, ®_val); + if (ret) + return ret; + if (val) + reg_val |= 1 << bit; + else + reg_val &= ~(1 << bit); + + return regmap_write(va->regmap8, reg, reg_val); +} + +static int ti960_map_phy_i2c_addr(struct ti960 *va, unsigned short rx_port, + unsigned short addr) +{ + int rval; + + rval = regmap_write(va->regmap8, TI960_RX_PORT_SEL, + (rx_port << 4) + (1 << rx_port)); + if (rval) + return rval; + + return regmap_write(va->regmap8, TI960_SLAVE_ID0, addr); +} + +static int ti960_map_alias_i2c_addr(struct ti960 *va, unsigned short rx_port, + unsigned short addr) +{ + int rval; + + rval = regmap_write(va->regmap8, TI960_RX_PORT_SEL, + (rx_port << 4) + (1 << rx_port)); + if (rval) + return rval; + + return regmap_write(va->regmap8, TI960_SLAVE_ALIAS_ID0, addr); +} + +static int ti960_map_ser_alias_addr(struct ti960 *va, unsigned short rx_port, + unsigned short ser_alias) +{ + int rval; + + dev_dbg(va->sd.dev, "%s port %d, ser_alias %x\n", __func__, rx_port, ser_alias); + rval = regmap_write(va->regmap8, TI960_RX_PORT_SEL, + (rx_port << 4) + (1 << rx_port)); + if (rval) + return rval; + + return regmap_write(va->regmap8, TI960_SER_ALIAS_ID, ser_alias); +} + +static int ti960_fsin_gpio_init(struct ti960 *va, unsigned short rx_port, + unsigned short fsin_gpio) +{ + int rval; + int reg_val; + + dev_dbg(va->sd.dev, "%s\n", __func__); + rval = regmap_read(va->regmap8, TI960_FS_CTL, ®_val); + if (rval) { + dev_dbg(va->sd.dev, "Failed to read gpio status.\n"); + return rval; + } + + if (!reg_val & TI960_FSIN_ENABLE) { + dev_dbg(va->sd.dev, "FSIN not enabled, skip config FSIN GPIO.\n"); + return 0; + } + + rval = regmap_write(va->regmap8, TI960_RX_PORT_SEL, + (rx_port << 4) + (1 << rx_port)); + if (rval) + return rval; + + switch (fsin_gpio) { + case 0: + case 1: + rval = regmap_read(va->regmap8, TI960_BC_GPIO_CTL0, ®_val); + if (rval) { + dev_dbg(va->sd.dev, "Failed to read gpio status.\n"); + return rval; + } + + if (fsin_gpio == 0) { + reg_val &= ~TI960_GPIO0_MASK; + reg_val |= TI960_GPIO0_FSIN; + } else { + reg_val &= ~TI960_GPIO1_MASK; + reg_val |= TI960_GPIO1_FSIN; + } + + rval = regmap_write(va->regmap8, TI960_BC_GPIO_CTL0, reg_val); + if (rval) + dev_dbg(va->sd.dev, "Failed to set gpio.\n"); + break; + case 2: + case 3: + rval = regmap_read(va->regmap8, TI960_BC_GPIO_CTL1, ®_val); + if (rval) { + dev_dbg(va->sd.dev, "Failed to read gpio status.\n"); + return rval; + } + + if (fsin_gpio == 2) { + reg_val &= ~TI960_GPIO2_MASK; + reg_val |= TI960_GPIO2_FSIN; + } else { + reg_val &= ~TI960_GPIO3_MASK; + reg_val |= TI960_GPIO3_FSIN; + } + + rval = regmap_write(va->regmap8, TI960_BC_GPIO_CTL1, reg_val); + if (rval) + dev_dbg(va->sd.dev, "Failed to set gpio.\n"); + break; + } + + return rval; +} + +static int ti960_get_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_routing *route) +{ + struct ti960 *va = to_ti960(sd); + int i, j; + + /* active routing first */ + j = 0; + for (i = 0; i < va->nstreams; ++i) { + if (j >= route->num_routes) + break; + if (!(va->ti960_route[i].flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) + continue; + route->routes[j].sink_pad = va->ti960_route[i].sink_pad; + route->routes[j].sink_stream = va->ti960_route[i].sink_stream; + route->routes[j].source_pad = va->ti960_route[i].source_pad; + route->routes[j].source_stream = va->ti960_route[i].source_stream; + route->routes[j].flags = va->ti960_route[i].flags; + j++; + } + + for (i = 0; i < va->nstreams; ++i) { + if (j >= route->num_routes) + break; + if (va->ti960_route[i].flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE) + continue; + route->routes[j].sink_pad = va->ti960_route[i].sink_pad; + route->routes[j].sink_stream = va->ti960_route[i].sink_stream; + route->routes[j].source_pad = va->ti960_route[i].source_pad; + route->routes[j].source_stream = va->ti960_route[i].source_stream; + route->routes[j].flags = va->ti960_route[i].flags; + j++; + } + + route->num_routes = i; + + return 0; +} + +static int ti960_set_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_routing *route) +{ + struct ti960 *va = to_ti960(sd); + int i, j, ret = 0; + + for (i = 0; i < min(route->num_routes, va->nstreams); ++i) { + struct v4l2_subdev_route *t = &route->routes[i]; + + if (t->sink_stream > va->nstreams - 1 || + t->source_stream > va->nstreams - 1) + continue; + + if (t->source_pad != TI960_PAD_SOURCE) + continue; + + for (j = 0; j < va->nstreams; j++) { + if (t->sink_pad == va->ti960_route[j].sink_pad && + t->source_pad == va->ti960_route[j].source_pad && + t->sink_stream == va->ti960_route[j].sink_stream && + t->source_stream == va->ti960_route[j].source_stream) + break; + } + + if (j == va->nstreams) + continue; + + if (t->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE) + va->ti960_route[j].flags |= + V4L2_SUBDEV_ROUTE_FL_ACTIVE; + else if (!(t->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) + va->ti960_route[j].flags &= + (~V4L2_SUBDEV_ROUTE_FL_ACTIVE); + } + + return ret; +} + +static int ti960_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct ti960 *va = to_ti960(sd); + const uint32_t *supported_code = + ti960_supported_codes[code->pad]; + bool next_stream = false; + int i; + + if (code->stream & V4L2_SUBDEV_FLAG_NEXT_STREAM) { + next_stream = true; + code->stream &= ~V4L2_SUBDEV_FLAG_NEXT_STREAM; + } + + if (code->stream > va->nstreams) + return -EINVAL; + + if (next_stream) { + if (!(va->pad[code->pad].flags & MEDIA_PAD_FL_MULTIPLEX)) + return -EINVAL; + if (code->stream < va->nstreams - 1) { + code->stream++; + return 0; + } else { + return -EINVAL; + } + } + + for (i = 0; supported_code[i]; i++) { + if (i == code->index) { + code->code = supported_code[i]; + return 0; + } + } + + return -EINVAL; +} + +static const struct ti960_csi_data_format + *ti960_validate_csi_data_format(u32 code) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(va_csi_data_formats); i++) { + if (va_csi_data_formats[i].code == code) + return &va_csi_data_formats[i]; + } + + return &va_csi_data_formats[0]; +} + +static int ti960_get_frame_desc(struct v4l2_subdev *sd, + unsigned int pad, struct v4l2_mbus_frame_desc *desc) +{ + struct ti960 *va = to_ti960(sd); + struct v4l2_mbus_frame_desc_entry *entry = desc->entry; + u8 vc = 0; + int i; + + desc->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2; + desc->num_entries = min_t(int, va->nstreams, V4L2_FRAME_DESC_ENTRY_MAX); + + for (i = 0; i < desc->num_entries; i++) { + struct v4l2_mbus_framefmt *ffmt = + &va->ffmts[TI960_PAD_SOURCE][i]; + const struct ti960_csi_data_format *csi_format = + ti960_validate_csi_data_format(ffmt->code); + + entry->two_dim.width = ffmt->width; + entry->two_dim.height = ffmt->height; + entry->pixelcode = ffmt->code; + entry->bus.csi2.channel = vc++; + entry->bpp = csi_format->compressed; + entry++; + } + + return 0; +} + +static struct v4l2_mbus_framefmt * +__ti960_get_ffmt(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad, unsigned int which, + unsigned int stream) +{ + struct ti960 *va = to_ti960(subdev); + + if (which == V4L2_SUBDEV_FORMAT_TRY) + return v4l2_subdev_get_try_format(subdev, cfg, pad); + else + return &va->ffmts[pad][stream]; +} + +static int ti960_get_format(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct ti960 *va = to_ti960(subdev); + + if (fmt->stream > va->nstreams) + return -EINVAL; + + mutex_lock(&va->mutex); + fmt->format = *__ti960_get_ffmt(subdev, cfg, fmt->pad, + fmt->which, fmt->stream); + mutex_unlock(&va->mutex); + + dev_dbg(subdev->dev, "subdev_format: which: %s, pad: %d, stream: %d.\n", + fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE ? + "V4L2_SUBDEV_FORMAT_ACTIVE" : "V4L2_SUBDEV_FORMAT_TRY", + fmt->pad, fmt->stream); + + dev_dbg(subdev->dev, "framefmt: width: %d, height: %d, code: 0x%x.\n", + fmt->format.width, fmt->format.height, fmt->format.code); + + return 0; +} + +static int ti960_set_format(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct ti960 *va = to_ti960(subdev); + const struct ti960_csi_data_format *csi_format; + struct v4l2_mbus_framefmt *ffmt; + + if (fmt->stream > va->nstreams) + return -EINVAL; + + csi_format = ti960_validate_csi_data_format( + fmt->format.code); + + mutex_lock(&va->mutex); + ffmt = __ti960_get_ffmt(subdev, cfg, fmt->pad, fmt->which, + fmt->stream); + + if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) { + ffmt->width = fmt->format.width; + ffmt->height = fmt->format.height; + ffmt->code = csi_format->code; + } + fmt->format = *ffmt; + mutex_unlock(&va->mutex); + + dev_dbg(subdev->dev, "framefmt: width: %d, height: %d, code: 0x%x.\n", + ffmt->width, ffmt->height, ffmt->code); + + return 0; +} + +static int ti960_open(struct v4l2_subdev *subdev, + struct v4l2_subdev_fh *fh) +{ + struct v4l2_mbus_framefmt *try_fmt = + v4l2_subdev_get_try_format(subdev, fh->pad, 0); + + struct v4l2_subdev_format fmt = { + .which = V4L2_SUBDEV_FORMAT_TRY, + .pad = TI960_PAD_SOURCE, + .format = { + .width = TI960_MAX_WIDTH, + .height = TI960_MAX_HEIGHT, + .code = MEDIA_BUS_FMT_SBGGR12_1X12, + }, + .stream = 0, + }; + + *try_fmt = fmt.format; + + return 0; +} + +static int ti960_map_subdevs_addr(struct ti960 *va) +{ + unsigned short rx_port, phy_i2c_addr, alias_i2c_addr; + int i, rval; + + for (i = 0; i < NR_OF_TI960_SINK_PADS; i++) { + rx_port = va->sub_devs[i].rx_port; + phy_i2c_addr = va->sub_devs[i].phy_i2c_addr; + alias_i2c_addr = va->sub_devs[i].alias_i2c_addr; + + if (!phy_i2c_addr || !alias_i2c_addr) + continue; + + rval = ti960_map_phy_i2c_addr(va, rx_port, phy_i2c_addr); + if (rval) + return rval; + + /* set 7bit alias i2c addr */ + rval = ti960_map_alias_i2c_addr(va, rx_port, + alias_i2c_addr << 1); + if (rval) + return rval; + } + + return 0; +} + +static int ti960_registered(struct v4l2_subdev *subdev) +{ + struct ti960 *va = to_ti960(subdev); + struct i2c_client *client = v4l2_get_subdevdata(subdev); + int i, j, k, l, rval; + + for (i = 0, k = 0; i < va->pdata->subdev_num; i++) { + struct ti960_subdev_info *info = + &va->pdata->subdev_info[i]; + struct crlmodule_platform_data *pdata = + (struct crlmodule_platform_data *) + info->board_info.platform_data; + + if (k >= va->nsinks) + break; + + rval = ti960_map_ser_alias_addr(va, info->rx_port, + info->ser_alias << 1); + if (rval) + return rval; + + + if (!ti953_detect(va, info->rx_port, info->ser_alias)) + continue; + + /* + * The sensors should not share the same pdata structure. + * Clone the pdata for each sensor. + */ + memcpy(&va->subdev_pdata[k], pdata, sizeof(*pdata)); + + va->sub_devs[k].fsin_gpio = va->subdev_pdata[k].fsin; + + /* Spin sensor subdev suffix name */ + va->subdev_pdata[k].suffix = info->suffix; + + /* + * Change the gpio value to have xshutdown + * and rx port included, so in gpio_set those + * can be caculated from it. + */ + va->subdev_pdata[k].xshutdown += va->gc.base + + info->rx_port * NR_OF_GPIOS_PER_PORT; + info->board_info.platform_data = &va->subdev_pdata[k]; + + if (!info->phy_i2c_addr || !info->board_info.addr) { + dev_err(va->sd.dev, "can't find the physical and alias addr.\n"); + return -EINVAL; + } + + /* Map PHY I2C address. */ + rval = ti960_map_phy_i2c_addr(va, info->rx_port, + info->phy_i2c_addr); + if (rval) + return rval; + + /* Map 7bit ALIAS I2C address. */ + rval = ti960_map_alias_i2c_addr(va, info->rx_port, + info->board_info.addr << 1); + if (rval) + return rval; + + va->sub_devs[k].sd = v4l2_i2c_new_subdev_board( + va->sd.v4l2_dev, client->adapter, + &info->board_info, 0); + if (!va->sub_devs[k].sd) { + dev_err(va->sd.dev, + "can't create new i2c subdev %c\n", + info->suffix); + continue; + } + va->sub_devs[k].rx_port = info->rx_port; + va->sub_devs[k].phy_i2c_addr = info->phy_i2c_addr; + va->sub_devs[k].alias_i2c_addr = info->board_info.addr; + va->sub_devs[k].ser_i2c_addr = info->ser_alias; + memcpy(va->sub_devs[k].sd_name, + va->subdev_pdata[k].module_name, + min(sizeof(va->sub_devs[k].sd_name) - 1, + sizeof(va->subdev_pdata[k].module_name) - 1)); + + for (j = 0; j < va->sub_devs[k].sd->entity.num_pads; j++) { + if (va->sub_devs[k].sd->entity.pads[j].flags & + MEDIA_PAD_FL_SOURCE) + break; + } + + if (j == va->sub_devs[k].sd->entity.num_pads) { + dev_warn(va->sd.dev, + "no source pad in subdev %c\n", + info->suffix); + return -ENOENT; + } + + for (l = 0; l < va->nsinks; l++) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + rval = media_entity_create_link( +#else + rval = media_create_pad_link( +#endif + &va->sub_devs[k].sd->entity, j, + &va->sd.entity, l, 0); + if (rval) { + dev_err(va->sd.dev, + "can't create link to %c\n", + info->suffix); + return -EINVAL; + } + } + k++; + } + rval = ti960_map_subdevs_addr(va); + if (rval) + return rval; + + return 0; +} + +static int ti960_set_power(struct v4l2_subdev *subdev, int on) +{ + struct ti960 *va = to_ti960(subdev); + int ret; + u8 val; + + ret = regmap_write(va->regmap8, TI960_RESET, + (on) ? TI960_POWER_ON : TI960_POWER_OFF); + if (ret || !on) + return ret; + + /* Configure MIPI clock bsaed on control value. */ + ret = regmap_write(va->regmap8, TI960_CSI_PLL_CTL, + ti960_op_sys_clock_reg_val[ + v4l2_ctrl_g_ctrl(va->link_freq)]); + if (ret) + return ret; + val = TI960_CSI_ENABLE; + val |= TI960_CSI_CONTS_CLOCK; + /* Enable skew calculation when 1.6Gbps output is enabled. */ + if (v4l2_ctrl_g_ctrl(va->link_freq)) + val |= TI960_CSI_SKEWCAL; + return regmap_write(va->regmap8, TI960_CSI_CTL, val); +} + +static bool ti960_broadcast_mode(struct v4l2_subdev *subdev) +{ + struct ti960 *va = to_ti960(subdev); + struct v4l2_subdev_format fmt = { 0 }; + struct v4l2_subdev *sd; + char *sd_name = NULL; + bool first = true; + unsigned int h = 0, w = 0, code = 0; + bool single_stream = true; + int i, rval; + + for (i = 0; i < NR_OF_TI960_SINK_PADS; i++) { + struct media_pad *remote_pad = + media_entity_remote_pad(&va->pad[i]); + + if (!remote_pad) + continue; + + sd = media_entity_to_v4l2_subdev(remote_pad->entity); + fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; + fmt.pad = remote_pad->index; + fmt.stream = 0; + + rval = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt); + if (rval) + return false; + + if (first) { + sd_name = va->sub_devs[i].sd_name; + h = fmt.format.height; + w = fmt.format.width; + code = fmt.format.code; + first = false; + } else { + if (strncmp(sd_name, va->sub_devs[i].sd_name, 16)) + return false; + + if (h != fmt.format.height || w != fmt.format.width + || code != fmt.format.code) + return false; + + single_stream = false; + } + } + + if (single_stream) + return false; + + return true; +} + +static int ti960_rx_port_config(struct ti960 *va, int sink, int rx_port) +{ + int rval; + int i; + unsigned int csi_vc_map; + + /* Select RX port. */ + rval = regmap_write(va->regmap8, TI960_RX_PORT_SEL, + (rx_port << 4) + (1 << rx_port)); + if (rval) { + dev_err(va->sd.dev, "Failed to select RX port.\n"); + return rval; + } + + rval = regmap_write(va->regmap8, TI960_PORT_CONFIG, + TI960_FPD3_CSI); + if (rval) { + dev_err(va->sd.dev, "Failed to set port config.\n"); + return rval; + } + + /* + * CSI VC MAPPING. + */ + rval = regmap_read(va->regmap8, TI960_CSI_VC_MAP, &csi_vc_map); + if (rval < 0) { + dev_err(va->sd.dev, "960 reg read ret=%x", rval); + return rval; + } + for (i = 0; i < va->nstreams; ++i) { + if (!(va->ti960_route[i].flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) + continue; + if (rx_port != va->ti960_route[i].sink_pad) + continue; + csi_vc_map &= ~(0x3 << (va->ti960_route[i].sink_stream & 0x3) * 2); + csi_vc_map |= (va->ti960_route[i].source_stream & 0x3) + << (va->ti960_route[i].sink_stream & 0x3) * 2; + } + dev_dbg(va->sd.dev, "%s port %d, csi_vc_map %x", + __func__, rx_port, csi_vc_map); + rval = regmap_write(va->regmap8, TI960_CSI_VC_MAP, + csi_vc_map); + if (rval) { + dev_err(va->sd.dev, "Failed to set port config.\n"); + return rval; + } + return 0; +} + +static int ti960_find_subdev_index(struct ti960 *va, struct v4l2_subdev *sd) +{ + int i; + + for (i = 0; i < NR_OF_TI960_SINK_PADS; i++) { + if (va->sub_devs[i].sd == sd) + return i; + } + + WARN_ON(1); + + return -EINVAL; +} + +static int ti960_set_frame_sync(struct ti960 *va, int enable) +{ + int i, rval; + int index = !!enable; + + for (i = 0; i < ARRAY_SIZE(ti960_frame_sync_settings[index]); i++) { + rval = regmap_write(va->regmap8, + ti960_frame_sync_settings[index][i].reg, + ti960_frame_sync_settings[index][i].val); + if (rval) { + dev_err(va->sd.dev, "Failed to %s frame sync\n", + enable ? "enable" : "disable"); + return rval; + } + } + + return 0; +} + +static int ti960_set_stream(struct v4l2_subdev *subdev, int enable) +{ + struct ti960 *va = to_ti960(subdev); + struct v4l2_subdev *sd; + int i, j, rval; + bool broadcast; + unsigned short rx_port; + unsigned short ser_alias; + int sd_idx = -1; + DECLARE_BITMAP(rx_port_enabled, 32); + + dev_dbg(va->sd.dev, "TI960 set stream, enable %d\n", enable); + + broadcast = ti960_broadcast_mode(subdev); + if (enable) + dev_info(va->sd.dev, "TI960 in %s mode", + broadcast ? "broadcast" : "non broadcast"); + + bitmap_zero(rx_port_enabled, 32); + for (i = 0; i < NR_OF_TI960_SINK_PADS; i++) { + struct media_pad *remote_pad = + media_entity_remote_pad(&va->pad[i]); + + if (!remote_pad) + continue; + + /* Find ti960 subdev */ + sd = media_entity_to_v4l2_subdev(remote_pad->entity); + j = ti960_find_subdev_index(va, sd); + if (j < 0) + return -EINVAL; + rx_port = va->sub_devs[j].rx_port; + ser_alias = va->sub_devs[j].ser_i2c_addr; + rval = ti960_rx_port_config(va, i, rx_port); + if (rval < 0) + return rval; + + bitmap_set(rx_port_enabled, rx_port, 1); + + if (broadcast && sd_idx == -1) { + sd_idx = j; + } else if (broadcast) { + rval = ti960_map_alias_i2c_addr(va, rx_port, + va->sub_devs[sd_idx].alias_i2c_addr << 1); + if (rval < 0) + return rval; + } else { + /* Stream on/off sensor */ + dev_err(va->sd.dev, + "set stream for %s, enable %d\n", + sd->name, enable); + rval = v4l2_subdev_call(sd, video, s_stream, enable); + if (rval) { + dev_err(va->sd.dev, + "Failed to set stream for %s, enable %d\n", + sd->name, enable); + return rval; + } + + /* RX port fordward */ + rval = ti960_reg_set_bit(va, TI960_FWD_CTL1, + rx_port + 4, !enable); + if (rval) { + dev_err(va->sd.dev, + "Failed to forward RX port%d. enable %d\n", + i, enable); + return rval; + } + /* + * FIXME: workaround for ov495 block issue. + * reset Ser TI953, to avoid ov495 block, + * only do reset for ov495, then it won't break other sensors. + */ + if (memcmp(va->sub_devs[j].sd_name, "OV495", strlen("OV495")) == 0) { + ti953_reg_write(va, rx_port, ser_alias, 0x0e, 0xf0); + msleep(50); + ti953_reg_write(va, rx_port, ser_alias, 0x0d, 00); + msleep(50); + ti953_reg_write(va, rx_port, ser_alias, 0x0d, 0x1); + } + + } + } + + if (broadcast) { + if (sd_idx < 0) { + dev_err(va->sd.dev, "No sensor connected!\n"); + return -ENODEV; + } + sd = va->sub_devs[sd_idx].sd; + rval = v4l2_subdev_call(sd, video, s_stream, enable); + if (rval) { + dev_err(va->sd.dev, + "Failed to set stream for %s. enable %d\n", + sd->name, enable); + return rval; + } + + rval = ti960_set_frame_sync(va, enable); + if (rval) { + dev_err(va->sd.dev, + "Failed to set frame sync.\n"); + return rval; + } + + for (i = 0; i < NR_OF_TI960_SINK_PADS; i++) { + if (enable && test_bit(i, rx_port_enabled)) { + rval = ti960_fsin_gpio_init(va, + va->sub_devs[i].rx_port, + va->sub_devs[i].fsin_gpio); + if (rval) { + dev_err(va->sd.dev, + "Failed to enable frame sync gpio init.\n"); + return rval; + } + /* + * FIXME: workaround for ov495 block issue. + * reset Ser TI953, to avoid ov495 block, + * only do reset for ov495, then it won't break other sensors. + */ + if (memcmp(va->sub_devs[i].sd_name, "OV495", strlen("OV495")) == 0) { + rx_port = va->sub_devs[i].rx_port; + ser_alias = va->sub_devs[i].ser_i2c_addr; + ti953_reg_write(va, rx_port, ser_alias, 0x0e, 0xf0); + msleep(50); + ti953_reg_write(va, rx_port, ser_alias, 0x0d, 00); + msleep(50); + ti953_reg_write(va, rx_port, ser_alias, 0x0d, 0x1); + } + } + } + + for (i = 0; i < NR_OF_TI960_SINK_PADS; i++) { + if (!test_bit(i, rx_port_enabled)) + continue; + + /* RX port fordward */ + rval = ti960_reg_set_bit(va, TI960_FWD_CTL1, + i + 4, !enable); + if (rval) { + dev_err(va->sd.dev, + "Failed to forward RX port%d. enable %d\n", + i, enable); + return rval; + } + } + + /* + * Restore each subdev i2c address as we may + * touch it later. + */ + rval = ti960_map_subdevs_addr(va); + if (rval) + return rval; + } + + return 0; +} + +static struct v4l2_subdev_internal_ops ti960_sd_internal_ops = { + .open = ti960_open, + .registered = ti960_registered, +}; + +static bool ti960_sd_has_route(struct media_entity *entity, + unsigned int pad0, unsigned int pad1, int *stream) +{ + struct ti960 *va = to_ti960(media_entity_to_v4l2_subdev(entity)); + int i; + + if (va == NULL || stream == NULL || + *stream >= va->nstreams || *stream < 0) + return false; + + for (i = 0; i < va->nstreams; ++i) { + if ((va->ti960_route[*stream].flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE) && + ((va->ti960_route[*stream].source_pad == pad0 && + va->ti960_route[*stream].sink_pad == pad1) || + (va->ti960_route[*stream].source_pad == pad1 && + va->ti960_route[*stream].sink_pad == pad0))) + return true; + } + + return false; +} + +static const struct media_entity_operations ti960_sd_entity_ops = { + .has_route = ti960_sd_has_route, +}; + +static const struct v4l2_subdev_video_ops ti960_sd_video_ops = { + .s_stream = ti960_set_stream, +}; + +static const struct v4l2_subdev_core_ops ti960_core_subdev_ops = { + .s_power = ti960_set_power, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + .g_ctrl = v4l2_subdev_g_ctrl, + .s_ctrl = v4l2_subdev_s_ctrl, + .g_ext_ctrls = v4l2_subdev_g_ext_ctrls, + .s_ext_ctrls = v4l2_subdev_s_ext_ctrls, + .try_ext_ctrls = v4l2_subdev_try_ext_ctrls, + .queryctrl = v4l2_subdev_queryctrl, +#endif +}; + +static int ti960_s_ctrl(struct v4l2_ctrl *ctrl) +{ + return 0; +} + +static const struct v4l2_ctrl_ops ti960_ctrl_ops = { + .s_ctrl = ti960_s_ctrl, +}; + +static const struct v4l2_ctrl_config ti960_controls[] = { + { + .ops = &ti960_ctrl_ops, + .id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = V4L2_CTRL_TYPE_INTEGER_MENU, + .max = ARRAY_SIZE(ti960_op_sys_clock) - 1, + .min = 0, + .step = 0, + .def = 1, + .qmenu_int = ti960_op_sys_clock, + }, + { + .ops = &ti960_ctrl_ops, + .id = V4L2_CID_TEST_PATTERN, + .name = "V4L2_CID_TEST_PATTERN", + .type = V4L2_CTRL_TYPE_INTEGER, + .max = 1, + .min = 0, + .step = 1, + .def = 0, + }, +}; + +static const struct v4l2_subdev_pad_ops ti960_sd_pad_ops = { + .get_fmt = ti960_get_format, + .set_fmt = ti960_set_format, + .get_frame_desc = ti960_get_frame_desc, + .enum_mbus_code = ti960_enum_mbus_code, + .set_routing = ti960_set_routing, + .get_routing = ti960_get_routing, +}; + +static struct v4l2_subdev_ops ti960_sd_ops = { + .core = &ti960_core_subdev_ops, + .video = &ti960_sd_video_ops, + .pad = &ti960_sd_pad_ops, +}; + +static int ti960_register_subdev(struct ti960 *va) +{ + int i, rval; + struct i2c_client *client = v4l2_get_subdevdata(&va->sd); + + v4l2_subdev_init(&va->sd, &ti960_sd_ops); + snprintf(va->sd.name, sizeof(va->sd.name), "TI960 %c", + va->pdata->suffix); + + va->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | + V4L2_SUBDEV_FL_HAS_SUBSTREAMS; + + va->sd.internal_ops = &ti960_sd_internal_ops; + va->sd.entity.ops = &ti960_sd_entity_ops; + + v4l2_set_subdevdata(&va->sd, client); + + v4l2_ctrl_handler_init(&va->ctrl_handler, + ARRAY_SIZE(ti960_controls)); + + if (va->ctrl_handler.error) { + dev_err(va->sd.dev, + "Failed to init ti960 controls. ERR: %d!\n", + va->ctrl_handler.error); + return va->ctrl_handler.error; + } + + va->sd.ctrl_handler = &va->ctrl_handler; + + for (i = 0; i < ARRAY_SIZE(ti960_controls); i++) { + const struct v4l2_ctrl_config *cfg = + &ti960_controls[i]; + struct v4l2_ctrl *ctrl; + + ctrl = v4l2_ctrl_new_custom(&va->ctrl_handler, cfg, NULL); + if (!ctrl) { + dev_err(va->sd.dev, + "Failed to create ctrl %s!\n", cfg->name); + rval = va->ctrl_handler.error; + goto failed_out; + } + } + + va->link_freq = v4l2_ctrl_find(&va->ctrl_handler, V4L2_CID_LINK_FREQ); + va->test_pattern = v4l2_ctrl_find(&va->ctrl_handler, + V4L2_CID_TEST_PATTERN); + + for (i = 0; i < va->nsinks; i++) + va->pad[i].flags = MEDIA_PAD_FL_SINK; + va->pad[TI960_PAD_SOURCE].flags = + MEDIA_PAD_FL_SOURCE | MEDIA_PAD_FL_MULTIPLEX; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + rval = media_entity_init(&va->sd.entity, NR_OF_TI960_PADS, va->pad, 0); +#else + rval = media_entity_pads_init(&va->sd.entity, + NR_OF_TI960_PADS, va->pad); +#endif + if (rval) { + dev_err(va->sd.dev, + "Failed to init media entity for ti960!\n"); + goto failed_out; + } + + return 0; + +failed_out: + v4l2_ctrl_handler_free(&va->ctrl_handler); + return rval; +} + +struct slave_register_devid { + u16 reg; + u8 val_expected; +}; + +#define OV495_I2C_PHY_ADDR 0x48 +#define OV495_I2C_ALIAS_ADDR 0x30 + +static const struct slave_register_devid ov495_devid[] = { + {0x3000, 0x51}, + {0x3001, 0x49}, + {0x3002, 0x56}, + {0x3003, 0x4f}, +}; + +/* + * read sensor id reg of 16 bit addr, and 8 bit val + */ +static int slave_id_read(struct i2c_client *client, u8 i2c_addr, + u16 reg, u8 *val) +{ + struct i2c_msg msg[2]; + unsigned char data[2]; + int rval; + + /* override i2c_addr */ + msg[0].addr = i2c_addr; + msg[0].flags = 0; + data[0] = (u8) (reg >> 8); + data[1] = (u8) (reg & 0xff); + msg[0].buf = data; + msg[0].len = 2; + + msg[1].addr = i2c_addr; + msg[1].flags = I2C_M_RD; + msg[1].buf = data; + msg[1].len = 1; + + rval = i2c_transfer(client->adapter, msg, 2); + + if (rval < 0) + return rval; + + *val = data[0]; + + return 0; +} + +static bool slave_detect(struct ti960 *va, u8 i2c_addr, + const struct slave_register_devid *slave_devid, u8 len) +{ + struct i2c_client *client = v4l2_get_subdevdata(&va->sd); + int i; + int rval; + unsigned char val; + + for (i = 0; i < len; i++) { + rval = slave_id_read(client, i2c_addr, + slave_devid[i].reg, &val); + if (rval) { + dev_err(va->sd.dev, "slave id read fail %d\n", rval); + break; + } + if (val != slave_devid[i].val_expected) + break; + } + + if (i == len) + return true; + + return false; +} + +static int ti960_init(struct ti960 *va) +{ + unsigned int reset_gpio = va->pdata->reset_gpio; + int i, rval; + unsigned int val; + int m; + int rx_port = 0; + int ser_alias = 0; + bool ov495_detected; + + gpio_set_value(reset_gpio, 1); + usleep_range(2000, 3000); + dev_err(va->sd.dev, "Setting reset gpio %d to 1.\n", reset_gpio); + + rval = ti960_reg_read(va, TI960_DEVID, &val); + if (rval) { + dev_err(va->sd.dev, "Failed to read device ID of TI960!\n"); + return rval; + } + dev_info(va->sd.dev, "TI960 device ID: 0x%X\n", val); + + for (i = 0; i < ARRAY_SIZE(ti960_gpio_settings); i++) { + rval = regmap_write(va->regmap8, + ti960_gpio_settings[i].reg, + ti960_gpio_settings[i].val); + if (rval) { + dev_err(va->sd.dev, + "Failed to write TI960 gpio setting, reg %2x, val %2x\n", + ti960_gpio_settings[i].reg, ti960_gpio_settings[i].val); + return rval; + } + } + usleep_range(10000, 11000); + + /* + * fixed value of sensor phy, ser_alias, port config for ti960 each port, + * not yet known sensor platform data here. + */ + ser_alias = 0x58; + for (i = 0; i < ARRAY_SIZE(ti960_init_settings); i++) { + rval = regmap_write(va->regmap8, + ti960_init_settings[i].reg, + ti960_init_settings[i].val); + if (rval) { + dev_err(va->sd.dev, + "Failed to write TI960 init setting, reg %2x, val %2x\n", + ti960_init_settings[i].reg, ti960_init_settings[i].val); + return rval; + } + } + + /* wait for ti953 ready */ + msleep(200); + + for (i = 0; i < NR_OF_TI960_SINK_PADS; i++) { + unsigned short rx_port, phy_i2c_addr, alias_i2c_addr; + + rx_port = i; + phy_i2c_addr = OV495_I2C_PHY_ADDR; + alias_i2c_addr = OV495_I2C_ALIAS_ADDR; + + rval = ti960_map_phy_i2c_addr(va, rx_port, phy_i2c_addr); + if (rval) + return rval; + + rval = ti960_map_alias_i2c_addr(va, rx_port, + alias_i2c_addr << 1); + if (rval) + return rval; + + ov495_detected = slave_detect(va, alias_i2c_addr, + ov495_devid, ARRAY_SIZE(ov495_devid)); + + /* unmap to clear i2c addr space */ + rval = ti960_map_phy_i2c_addr(va, rx_port, 0); + if (rval) + return rval; + + rval = ti960_map_alias_i2c_addr(va, rx_port, 0); + if (rval) + return rval; + + if (ov495_detected) { + dev_info(va->sd.dev, "ov495 detected on port %d\n", rx_port); + break; + } + } + + for (i = 0; i < ARRAY_SIZE(ti953_init_settings); i++) { + if (ov495_detected) + break; + rval = ti953_reg_write(va, rx_port, ser_alias, + ti953_init_settings[i].reg, + ti953_init_settings[i].val); + if (rval) { + dev_err(va->sd.dev, "port %d, ti953 write timeout %d\n", 0, rval); + break; + } + } + + for (m = 0; m < ARRAY_SIZE(ti960_init_settings_2); m++) { + rval = regmap_write(va->regmap8, + ti960_init_settings_2[m].reg, + ti960_init_settings_2[m].val); + if (rval) { + dev_err(va->sd.dev, + "Failed to write TI960 init setting 2, reg %2x, val %2x\n", + ti960_init_settings_2[m].reg, ti960_init_settings_2[m].val); + break; + } + } + + rval = regmap_write(va->regmap8, TI960_RX_PORT_SEL, + (rx_port << 4) + (1 << rx_port)); + if (rval) + return rval; + for (m = 1; m < ARRAY_SIZE(ti960_init_settings_3); m++) { + rval = regmap_write(va->regmap8, + ti960_init_settings_3[m].reg, + ti960_init_settings_3[m].val); + if (rval) { + dev_err(va->sd.dev, + "Failed to write TI960 init setting 2, reg %2x, val %2x\n", + ti960_init_settings_3[m].reg, ti960_init_settings_3[m].val); + break; + } + } + + for (i = 0; i < ARRAY_SIZE(ti953_init_settings_2); i++) { + if (ov495_detected) + break; + rval = ti953_reg_write(va, rx_port, ser_alias, + ti953_init_settings_2[i].reg, + ti953_init_settings_2[i].val); + if (rval) { + dev_err(va->sd.dev, "port %d, ti953 write timeout %d\n", 0, rval); + break; + } + } + + /* reset and power for ti953 */ + if (!ov495_detected) { + ti953_reg_write(va, 0, ser_alias, 0x0d, 00); + msleep(50); + ti953_reg_write(va, 0, ser_alias, 0x0d, 0x3); + } + + rval = ti960_map_subdevs_addr(va); + if (rval) + return rval; + + return 0; +} + +static void ti960_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + struct i2c_client *client = to_i2c_client(chip->dev); +#else + struct i2c_client *client = to_i2c_client(chip->parent); +#endif + struct v4l2_subdev *subdev = i2c_get_clientdata(client); + struct ti960 *va = to_ti960(subdev); + unsigned int reg_val; + int rx_port, gpio_port; + int ret; + + if (gpio >= NR_OF_TI960_GPIOS) + return; + + rx_port = gpio / NR_OF_GPIOS_PER_PORT; + gpio_port = gpio % NR_OF_GPIOS_PER_PORT; + + ret = regmap_write(va->regmap8, TI960_RX_PORT_SEL, + (rx_port << 4) + (1 << rx_port)); + if (ret) { + dev_dbg(&client->dev, "Failed to select RX port.\n"); + return; + } + ret = regmap_read(va->regmap8, TI960_BC_GPIO_CTL0, ®_val); + if (ret) { + dev_dbg(&client->dev, "Failed to read gpio status.\n"); + return; + } + + if (gpio_port == 0) { + reg_val &= ~TI960_GPIO0_MASK; + reg_val |= value ? TI960_GPIO0_HIGH : TI960_GPIO0_LOW; + } else { + reg_val &= ~TI960_GPIO1_MASK; + reg_val |= value ? TI960_GPIO1_HIGH : TI960_GPIO1_LOW; + } + + ret = regmap_write(va->regmap8, TI960_BC_GPIO_CTL0, reg_val); + if (ret) + dev_dbg(&client->dev, "Failed to set gpio.\n"); +} + +static int ti960_gpio_direction_output(struct gpio_chip *chip, + unsigned int gpio, int level) +{ + return 0; +} + +static int ti960_probe(struct i2c_client *client, + const struct i2c_device_id *devid) +{ + struct ti960 *va; + int i, j, k, l, rval = 0; + + if (client->dev.platform_data == NULL) + return -ENODEV; + + va = devm_kzalloc(&client->dev, sizeof(*va), GFP_KERNEL); + if (!va) + return -ENOMEM; + + va->pdata = client->dev.platform_data; + + va->nsources = NR_OF_TI960_SOURCE_PADS; + va->nsinks = NR_OF_TI960_SINK_PADS; + va->npads = NR_OF_TI960_PADS; + va->nstreams = NR_OF_TI960_STREAMS; + + va->crop = devm_kcalloc(&client->dev, va->npads, + sizeof(struct v4l2_rect), GFP_KERNEL); + + va->compose = devm_kcalloc(&client->dev, va->npads, + sizeof(struct v4l2_rect), GFP_KERNEL); + + if (!va->crop || !va->compose) + return -ENOMEM; + + for (i = 0; i < va->npads; i++) { + va->ffmts[i] = devm_kcalloc(&client->dev, va->nstreams, + sizeof(struct v4l2_mbus_framefmt), + GFP_KERNEL); + if (!va->ffmts[i]) + return -ENOMEM; + } + + va->ti960_route = devm_kcalloc(&client->dev, NR_OF_TI960_STREAMS, + sizeof(struct v4l2_subdev_routing), GFP_KERNEL); + + if (!va->ti960_route) + return -ENOMEM; + + /* routing for virtual channel supports */ + l = 0; + for (i = 0; i < NR_OF_TI960_SINK_PADS; i++) + for (j = 0; j < NR_OF_TI960_VCS_PER_SINK_PAD; j++) + for (k = 0; k < NR_OF_TI960_VCS_SOURCE_PAD; k++) { + va->ti960_route[l].sink_pad = i; + va->ti960_route[l].sink_stream = j; + va->ti960_route[l].source_pad = TI960_PAD_SOURCE; + va->ti960_route[l].source_stream = k; + va->ti960_route[l].flags = MEDIA_PAD_FL_MULTIPLEX; + l++; + } + + va->regmap8 = devm_regmap_init_i2c(client, + &ti960_reg_config8); + if (IS_ERR(va->regmap8)) { + dev_err(&client->dev, "Failed to init regmap8!\n"); + return -EIO; + } + + va->regmap16 = devm_regmap_init_i2c(client, + &ti960_reg_config16); + if (IS_ERR(va->regmap16)) { + dev_err(&client->dev, "Failed to init regmap16!\n"); + return -EIO; + } + + mutex_init(&va->mutex); + v4l2_i2c_subdev_init(&va->sd, client, &ti960_sd_ops); + rval = ti960_register_subdev(va); + if (rval) { + dev_err(&client->dev, "Failed to register va subdevice!\n"); + return rval; + } + + if (devm_gpio_request_one(va->sd.dev, va->pdata->reset_gpio, 0, + "ti960 reset") != 0) { + dev_err(va->sd.dev, "Unable to acquire gpio %d\n", + va->pdata->reset_gpio); + return -ENODEV; + } + + rval = ti960_init(va); + if (rval) { + dev_err(&client->dev, "Failed to init TI960!\n"); + return rval; + } + + /* + * TI960 has several back channel GPIOs. + * We export GPIO0 and GPIO1 to control reset or fsin. + */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + va->gc.dev = &client->dev; +#else + va->gc.parent = &client->dev; +#endif + va->gc.owner = THIS_MODULE; + va->gc.label = "TI960 GPIO"; + va->gc.ngpio = NR_OF_TI960_GPIOS; + va->gc.base = -1; + va->gc.set = ti960_gpio_set; + va->gc.direction_output = ti960_gpio_direction_output; + rval = gpiochip_add(&va->gc); + if (rval) { + dev_err(&client->dev, "Failed to add gpio chip!\n"); + return -EIO; + } + + return 0; +} + +static int ti960_remove(struct i2c_client *client) +{ + struct v4l2_subdev *subdev = i2c_get_clientdata(client); + struct ti960 *va = to_ti960(subdev); + int i; + + if (!va) + return 0; + + mutex_destroy(&va->mutex); + v4l2_ctrl_handler_free(&va->ctrl_handler); + v4l2_device_unregister_subdev(&va->sd); + media_entity_cleanup(&va->sd.entity); + + for (i = 0; i < NR_OF_TI960_SINK_PADS; i++) { + if (va->sub_devs[i].sd) { + struct i2c_client *sub_client = + v4l2_get_subdevdata(va->sub_devs[i].sd); + + i2c_unregister_device(sub_client); + } + va->sub_devs[i].sd = NULL; + } + + gpiochip_remove(&va->gc); + + return 0; +} + +#ifdef CONFIG_PM +static int ti960_suspend(struct device *dev) +{ + return 0; +} + +static int ti960_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *subdev = i2c_get_clientdata(client); + struct ti960 *va = to_ti960(subdev); + + return ti960_init(va); +} +#else +#define ti960_suspend NULL +#define ti960_resume NULL +#endif /* CONFIG_PM */ + +static const struct i2c_device_id ti960_id_table[] = { + { TI960_NAME, 0 }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, ti960_id_table); + +static const struct dev_pm_ops ti960_pm_ops = { + .suspend = ti960_suspend, + .resume = ti960_resume, +}; + +static struct i2c_driver ti960_i2c_driver = { + .driver = { + .name = TI960_NAME, + .pm = &ti960_pm_ops, + }, + .probe = ti960_probe, + .remove = ti960_remove, + .id_table = ti960_id_table, +}; +module_i2c_driver(ti960_i2c_driver); + +MODULE_AUTHOR("Chen Meng J "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("TI960 CSI2-Aggregator driver"); diff --git a/drivers/media/i2c/ti964-reg.h b/drivers/media/i2c/ti964-reg.h new file mode 100644 index 0000000000000..e916c41b74a15 --- /dev/null +++ b/drivers/media/i2c/ti964-reg.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2016 - 2018 Intel Corporation */ + +#ifndef TI964_REG_H +#define TI964_REG_H + +struct ti964_register_write { + u8 reg; + u8 val; +}; + +static const struct ti964_register_write ti964_frame_sync_settings[2][5] = { + { + {0x18, 0x00}, /* Disable frame sync. */ + {0x19, 0x00}, + {0x1a, 0x02}, + {0x1b, 0x0a}, + {0x1c, 0xd3}, + }, + { + {0x19, 0x01}, /* Frame sync high time.*/ + {0x1a, 0x15}, + {0x1b, 0x09}, /* Frame sync low time. */ + {0x1c, 0xC3}, + {0x18, 0x01}, /* Enable frame sync. and use high/low mode */ + } +}; + +static const struct ti964_register_write ti964_init_settings[] = { + {0x8, 0x1c}, + {0xa, 0x79}, + {0xb, 0x79}, + {0xd, 0xb9}, + {0x10, 0x91}, + {0x11, 0x85}, + {0x12, 0x89}, + {0x13, 0xc1}, + {0x17, 0xe1}, + {0x18, 0x0}, /* Disable frame sync. */ + {0x19, 0x0}, /* Frame sync high time. */ + {0x1a, 0x2}, + {0x1b, 0xa}, /* Frame sync low time. */ + {0x1c, 0xd3}, + {0x21, 0x43}, /* Enable best effort mode. */ + {0xb0, 0x10}, + {0xb1, 0x14}, + {0xb2, 0x1f}, + {0xb3, 0x8}, + {0x32, 0x1}, /* Select CSI port 0 */ + {0x4c, 0x1}, /* Select RX port 0 */ + {0x58, 0x58}, + {0x5c, 0x18}, /* TI913 alias addr 0xc */ + {0x6d, 0x7f}, + {0x70, 0x1e}, /* YUV422_8 */ + {0x7c, 0x81}, /* Use RAW10 8bit mode */ + {0xd2, 0x84}, + {0x4c, 0x12}, /* Select RX port 1 */ + {0x58, 0x58}, + {0x5c, 0x1a}, /* TI913 alias addr 0xd */ + {0x6d, 0x7f}, + {0x70, 0x5e}, /* YUV422_8 */ + {0x7c, 0x81}, /* Use RAW10 8bit mode */ + {0xd2, 0x84}, + {0x4c, 0x24}, /* Select RX port 2*/ + {0x58, 0x58}, + {0x5c, 0x1c}, /* TI913 alias addr 0xe */ + {0x6d, 0x7f}, + {0x70, 0x9e}, /* YUV422_8 */ + {0x7c, 0x81}, /* Use RAW10 8bit mode */ + {0xd2, 0x84}, + {0x4c, 0x38}, /* Select RX port3 */ + {0x58, 0x58}, + {0x5c, 0x1e}, /* TI913 alias addr 0xf */ + {0x6d, 0x7f}, + {0x70, 0xde}, /* YUV422_8 */ + {0x7c, 0x81}, /* Use RAW10 8bit mode */ + {0xd2, 0x84}, + {0xbc, 0x00}, +}; + +static const struct ti964_register_write ti964_tp_settings[] = { + {0xb0, 0x0}, + {0xb1, 0x02}, + {0xb2, 0xb3}, + {0xb1, 0x01}, +}; +/*register definition */ +#define TI964_DEVID 0x0 +#define TI964_RESET 0x1 +#define TI964_CSI_PLL_CTL 0x1f +#define TI964_FS_CTL 0x18 +#define TI964_FWD_CTL1 0x20 +#define TI964_RX_PORT_SEL 0x4c +#define TI964_SLAVE_ID0 0x5d +#define TI964_SLAVE_ALIAS_ID0 0x65 +#define TI964_PORT_CONFIG 0x6d +#define TI964_BC_GPIO_CTL0 0x6e +#define TI964_RAW10_ID 0x70 +#define TI964_RAW12_ID 0x71 +#define TI964_PORT_CONFIG2 0x7c + +#define TI964_IND_ACC_DATA 0xb2 +#define TI964_CSI_CTL 0x33 + +/* register value definition */ +#define TI964_POWER_ON 0x1 +#define TI964_POWER_OFF 0x20 +#define TI964_FPD3_RAW10_100MHz 0x7f +#define TI964_FPD3_RAW12_50MHz 0x7d +#define TI964_FPD3_RAW12_75MHz 0x7e +#define TI964_RAW12 0x41 +#define TI964_RAW10_NORMAL 0x1 +#define TI964_RAW10_8BIT 0x81 +#define TI964_GPIO0_HIGH 0x09 +#define TI964_GPIO0_LOW 0x08 +#define TI964_GPIO1_HIGH 0x90 +#define TI964_GPIO1_LOW 0x80 +#define TI964_GPIO0_FSIN 0x0a +#define TI964_GPIO1_FSIN 0xa0 +#define TI964_GPIO0_MASK 0x0f +#define TI964_GPIO1_MASK 0xf0 +#define TI964_MIPI_800MBPS 0x2 +#define TI964_MIPI_1600MBPS 0x0 +#define TI964_CSI_ENABLE 0x1 +#define TI964_CSI_CONTS_CLOCK 0x2 +#define TI964_CSI_SKEWCAL 0x40 +#define TI964_FSIN_ENABLE 0x1 +#endif diff --git a/drivers/media/i2c/ti964.c b/drivers/media/i2c/ti964.c new file mode 100644 index 0000000000000..606eef257ca62 --- /dev/null +++ b/drivers/media/i2c/ti964.c @@ -0,0 +1,1368 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2016 - 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "ti964-reg.h" + +struct ti964_subdev { + struct v4l2_subdev *sd; + unsigned short rx_port; + unsigned short fsin_gpio; + unsigned short phy_i2c_addr; + unsigned short alias_i2c_addr; + char sd_name[16]; +}; + +struct ti964 { + struct v4l2_subdev sd; + struct media_pad pad[NR_OF_TI964_PADS]; + struct v4l2_ctrl_handler ctrl_handler; + struct ti964_pdata *pdata; + struct ti964_subdev sub_devs[NR_OF_TI964_SINK_PADS]; + struct crlmodule_platform_data subdev_pdata[NR_OF_TI964_SINK_PADS]; + const char *name; + + struct mutex mutex; + + struct regmap *regmap8; + struct regmap *regmap16; + + struct v4l2_mbus_framefmt *ffmts[NR_OF_TI964_PADS]; + struct rect *crop; + struct rect *compose; + + struct { + unsigned int *stream_id; + } *stream; /* stream enable/disable status, indexed by pad */ + struct { + unsigned int sink; + unsigned int source; + int flags; + } *route; /* pad level info, indexed by stream */ + + unsigned int nsinks; + unsigned int nsources; + unsigned int nstreams; + unsigned int npads; + + struct gpio_chip gc; + + struct v4l2_ctrl *link_freq; + struct v4l2_ctrl *test_pattern; +}; + +#define to_ti964(_sd) container_of(_sd, struct ti964, sd) + +static const s64 ti964_op_sys_clock[] = {400000000, 800000000}; +static const u8 ti964_op_sys_clock_reg_val[] = { + TI964_MIPI_800MBPS, + TI964_MIPI_1600MBPS +}; + +/* + * Order matters. + * + * 1. Bits-per-pixel, descending. + * 2. Bits-per-pixel compressed, descending. + * 3. Pixel order, same as in pixel_order_str. Formats for all four pixel + * orders must be defined. + */ +static const struct ti964_csi_data_format va_csi_data_formats[] = { + { MEDIA_BUS_FMT_YUYV8_1X16, 16, 16, PIXEL_ORDER_GBRG, 0x1e }, + { MEDIA_BUS_FMT_UYVY8_1X16, 16, 16, PIXEL_ORDER_GBRG, 0x1e }, + { MEDIA_BUS_FMT_SGRBG12_1X12, 12, 12, PIXEL_ORDER_GRBG, 0x2c }, + { MEDIA_BUS_FMT_SRGGB12_1X12, 12, 12, PIXEL_ORDER_RGGB, 0x2c }, + { MEDIA_BUS_FMT_SBGGR12_1X12, 12, 12, PIXEL_ORDER_BGGR, 0x2c }, + { MEDIA_BUS_FMT_SGBRG12_1X12, 12, 12, PIXEL_ORDER_GBRG, 0x2c }, + { MEDIA_BUS_FMT_SGRBG10_1X10, 10, 10, PIXEL_ORDER_GRBG, 0x2b }, + { MEDIA_BUS_FMT_SRGGB10_1X10, 10, 10, PIXEL_ORDER_RGGB, 0x2b }, + { MEDIA_BUS_FMT_SBGGR10_1X10, 10, 10, PIXEL_ORDER_BGGR, 0x2b }, + { MEDIA_BUS_FMT_SGBRG10_1X10, 10, 10, PIXEL_ORDER_GBRG, 0x2b }, + { MEDIA_BUS_FMT_SGRBG8_1X8, 8, 8, PIXEL_ORDER_GRBG, 0x2a }, + { MEDIA_BUS_FMT_SRGGB8_1X8, 8, 8, PIXEL_ORDER_RGGB, 0x2a }, + { MEDIA_BUS_FMT_SBGGR8_1X8, 8, 8, PIXEL_ORDER_BGGR, 0x2a }, + { MEDIA_BUS_FMT_SGBRG8_1X8, 8, 8, PIXEL_ORDER_GBRG, 0x2a }, +}; + +static const uint32_t ti964_supported_codes_pad[] = { + MEDIA_BUS_FMT_YUYV8_1X16, + MEDIA_BUS_FMT_UYVY8_1X16, + MEDIA_BUS_FMT_SBGGR12_1X12, + MEDIA_BUS_FMT_SGBRG12_1X12, + MEDIA_BUS_FMT_SGRBG12_1X12, + MEDIA_BUS_FMT_SRGGB12_1X12, + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SGBRG10_1X10, + MEDIA_BUS_FMT_SGRBG10_1X10, + MEDIA_BUS_FMT_SRGGB10_1X10, + MEDIA_BUS_FMT_SBGGR8_1X8, + MEDIA_BUS_FMT_SGBRG8_1X8, + MEDIA_BUS_FMT_SGRBG8_1X8, + MEDIA_BUS_FMT_SRGGB8_1X8, + 0, +}; + +static const uint32_t *ti964_supported_codes[] = { + ti964_supported_codes_pad, +}; + +static struct regmap_config ti964_reg_config8 = { + .reg_bits = 8, + .val_bits = 8, +}; + +static struct regmap_config ti964_reg_config16 = { + .reg_bits = 16, + .val_bits = 8, + .reg_format_endian = REGMAP_ENDIAN_BIG, +}; + +static int ti964_reg_set_bit(struct ti964 *va, unsigned char reg, + unsigned char bit, unsigned char val) +{ + int ret; + unsigned int reg_val; + + ret = regmap_read(va->regmap8, reg, ®_val); + if (ret) + return ret; + if (val) + reg_val |= 1 << bit; + else + reg_val &= ~(1 << bit); + + return regmap_write(va->regmap8, reg, reg_val); +} + +static int ti964_map_phy_i2c_addr(struct ti964 *va, unsigned short rx_port, + unsigned short addr) +{ + int rval; + + rval = regmap_write(va->regmap8, TI964_RX_PORT_SEL, + (rx_port << 4) + (1 << rx_port)); + if (rval) + return rval; + + return regmap_write(va->regmap8, TI964_SLAVE_ID0, addr); +} + +static int ti964_map_alias_i2c_addr(struct ti964 *va, unsigned short rx_port, + unsigned short addr) +{ + int rval; + + rval = regmap_write(va->regmap8, TI964_RX_PORT_SEL, + (rx_port << 4) + (1 << rx_port)); + if (rval) + return rval; + + return regmap_write(va->regmap8, TI964_SLAVE_ALIAS_ID0, addr); +} + +static int ti964_fsin_gpio_init(struct ti964 *va, unsigned short rx_port, + unsigned short fsin_gpio) +{ + int rval; + int reg_val; + + rval = regmap_read(va->regmap8, TI964_FS_CTL, ®_val); + if (rval) { + dev_dbg(va->sd.dev, "Failed to read gpio status.\n"); + return rval; + } + + if (!reg_val & TI964_FSIN_ENABLE) { + dev_dbg(va->sd.dev, "FSIN not enabled, skip config FSIN GPIO.\n"); + return 0; + } + + rval = regmap_write(va->regmap8, TI964_RX_PORT_SEL, + (rx_port << 4) + (1 << rx_port)); + if (rval) + return rval; + + rval = regmap_read(va->regmap8, TI964_BC_GPIO_CTL0, ®_val); + if (rval) { + dev_dbg(va->sd.dev, "Failed to read gpio status.\n"); + return rval; + } + + if (fsin_gpio == 0) { + reg_val &= ~TI964_GPIO0_MASK; + reg_val |= TI964_GPIO0_FSIN; + } else { + reg_val &= ~TI964_GPIO1_MASK; + reg_val |= TI964_GPIO1_FSIN; + } + + rval = regmap_write(va->regmap8, TI964_BC_GPIO_CTL0, reg_val); + if (rval) + dev_dbg(va->sd.dev, "Failed to set gpio.\n"); + + return rval; +} + +static int ti964_get_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_routing *route) +{ + struct ti964 *va = to_ti964(sd); + int i; + + for (i = 0; i < min(va->nstreams, route->num_routes); ++i) { + unsigned int sink = va->route[i].sink; + unsigned int source = va->route[i].source; + + route->routes[i].sink_pad = sink; + route->routes[i].sink_stream = + va->stream[sink].stream_id[0]; + route->routes[i].source_pad = source; + route->routes[i].source_stream = + va->stream[source].stream_id[sink]; + route->routes[i].flags = va->route[i].flags; + } + + route->num_routes = i; + + return 0; +} + +static int ti964_set_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_routing *route) +{ + struct ti964 *va = to_ti964(sd); + int i, j, ret = 0; + + for (i = 0; i < min(route->num_routes, va->nstreams); ++i) { + struct v4l2_subdev_route *t = &route->routes[i]; + unsigned int sink = t->sink_pad; + unsigned int source = t->source_pad; + + if (t->sink_stream > va->nstreams - 1 || + t->source_stream > va->nstreams - 1) + continue; + + if (t->source_pad != TI964_PAD_SOURCE) + continue; + + for (j = 0; j < va->nstreams; j++) { + if (sink == va->route[j].sink && + source == va->route[j].source) + break; + } + + if (j == va->nstreams) + continue; + + va->stream[sink].stream_id[0] = t->sink_stream; + va->stream[source].stream_id[sink] = t->source_stream; + + if (t->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE) + va->route[j].flags |= + V4L2_SUBDEV_ROUTE_FL_ACTIVE; + else if (!(t->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) + va->route[j].flags &= + (~V4L2_SUBDEV_ROUTE_FL_ACTIVE); + } + + return ret; +} + +static int ti964_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct ti964 *va = to_ti964(sd); + const uint32_t *supported_code = + ti964_supported_codes[code->pad]; + bool next_stream = false; + int i; + + if (code->stream & V4L2_SUBDEV_FLAG_NEXT_STREAM) { + next_stream = true; + code->stream &= ~V4L2_SUBDEV_FLAG_NEXT_STREAM; + } + + if (code->stream > va->nstreams) + return -EINVAL; + + if (next_stream) { + if (!(va->pad[code->pad].flags & MEDIA_PAD_FL_MULTIPLEX)) + return -EINVAL; + if (code->stream < va->nstreams - 1) { + code->stream++; + return 0; + } else { + return -EINVAL; + } + } + + for (i = 0; supported_code[i]; i++) { + if (i == code->index) { + code->code = supported_code[i]; + return 0; + } + } + + return -EINVAL; +} + +static const struct ti964_csi_data_format + *ti964_validate_csi_data_format(u32 code) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(va_csi_data_formats); i++) { + if (va_csi_data_formats[i].code == code) + return &va_csi_data_formats[i]; + } + + return &va_csi_data_formats[0]; +} + +static int ti964_get_frame_desc(struct v4l2_subdev *sd, + unsigned int pad, struct v4l2_mbus_frame_desc *desc) +{ + struct ti964 *va = to_ti964(sd); + struct v4l2_mbus_frame_desc_entry *entry = desc->entry; + u8 vc = 0; + int i; + + desc->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2; + desc->num_entries = min_t(int, va->nstreams, V4L2_FRAME_DESC_ENTRY_MAX); + + for (i = 0; i < desc->num_entries; i++) { + struct v4l2_mbus_framefmt *ffmt = + &va->ffmts[TI964_PAD_SOURCE][i]; + const struct ti964_csi_data_format *csi_format = + ti964_validate_csi_data_format(ffmt->code); + + entry->two_dim.width = ffmt->width; + entry->two_dim.height = ffmt->height; + entry->pixelcode = ffmt->code; + entry->bus.csi2.channel = vc++; + entry->bpp = csi_format->compressed; + entry++; + } + + return 0; +} + +static struct v4l2_mbus_framefmt * +__ti964_get_ffmt(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad, unsigned int which, + unsigned int stream) +{ + struct ti964 *va = to_ti964(subdev); + + if (which == V4L2_SUBDEV_FORMAT_TRY) + return v4l2_subdev_get_try_format(subdev, cfg, pad); + else + return &va->ffmts[pad][stream]; +} + +static int ti964_get_format(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct ti964 *va = to_ti964(subdev); + + if (fmt->stream > va->nstreams) + return -EINVAL; + + mutex_lock(&va->mutex); + fmt->format = *__ti964_get_ffmt(subdev, cfg, fmt->pad, + fmt->which, fmt->stream); + mutex_unlock(&va->mutex); + + dev_dbg(subdev->dev, "subdev_format: which: %s, pad: %d, stream: %d.\n", + fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE ? + "V4L2_SUBDEV_FORMAT_ACTIVE" : "V4L2_SUBDEV_FORMAT_TRY", + fmt->pad, fmt->stream); + + dev_dbg(subdev->dev, "framefmt: width: %d, height: %d, code: 0x%x.\n", + fmt->format.width, fmt->format.height, fmt->format.code); + + return 0; +} + +static int ti964_set_format(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct ti964 *va = to_ti964(subdev); + const struct ti964_csi_data_format *csi_format; + struct v4l2_mbus_framefmt *ffmt; + + if (fmt->stream > va->nstreams) + return -EINVAL; + + csi_format = ti964_validate_csi_data_format( + fmt->format.code); + + mutex_lock(&va->mutex); + ffmt = __ti964_get_ffmt(subdev, cfg, fmt->pad, fmt->which, + fmt->stream); + + if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) { + ffmt->width = fmt->format.width; + ffmt->height = fmt->format.height; + ffmt->code = csi_format->code; + } + fmt->format = *ffmt; + mutex_unlock(&va->mutex); + + dev_dbg(subdev->dev, "framefmt: width: %d, height: %d, code: 0x%x.\n", + ffmt->width, ffmt->height, ffmt->code); + + return 0; +} + +static int ti964_open(struct v4l2_subdev *subdev, + struct v4l2_subdev_fh *fh) +{ + struct v4l2_mbus_framefmt *try_fmt = + v4l2_subdev_get_try_format(subdev, fh->pad, 0); + + struct v4l2_subdev_format fmt = { + .which = V4L2_SUBDEV_FORMAT_TRY, + .pad = TI964_PAD_SOURCE, + .format = { + .width = TI964_MAX_WIDTH, + .height = TI964_MAX_HEIGHT, + .code = MEDIA_BUS_FMT_YUYV8_1X16, + }, + .stream = 0, + }; + + *try_fmt = fmt.format; + + return 0; +} + +static int ti964_registered(struct v4l2_subdev *subdev) +{ + struct ti964 *va = to_ti964(subdev); + struct i2c_client *client = v4l2_get_subdevdata(subdev); + int i, j, k, l, rval; + + for (i = 0, k = 0; i < va->pdata->subdev_num; i++) { + struct ti964_subdev_info *info = + &va->pdata->subdev_info[i]; + struct crlmodule_platform_data *pdata = + (struct crlmodule_platform_data *) + info->board_info.platform_data; + + if (k >= va->nsinks) + break; + + /* + * The sensors should not share the same pdata structure. + * Clone the pdata for each sensor. + */ + memcpy(&va->subdev_pdata[k], pdata, sizeof(*pdata)); + if (va->subdev_pdata[k].xshutdown != 0 && + va->subdev_pdata[k].xshutdown != 1) { + dev_err(va->sd.dev, "xshutdown(%d) must be 0 or 1 to connect.\n", + va->subdev_pdata[k].xshutdown); + return -EINVAL; + } + + /* If 0 is xshutdown, then 1 would be FSIN, vice versa. */ + va->sub_devs[k].fsin_gpio = 1 - va->subdev_pdata[k].xshutdown; + + /* Spin sensor subdev suffix name */ + va->subdev_pdata[k].suffix = info->suffix; + + /* + * Change the gpio value to have xshutdown + * and rx port included, so in gpio_set those + * can be caculated from it. + */ + va->subdev_pdata[k].xshutdown += va->gc.base + + info->rx_port * NR_OF_GPIOS_PER_PORT; + info->board_info.platform_data = &va->subdev_pdata[k]; + + if (!info->phy_i2c_addr || !info->board_info.addr) { + dev_err(va->sd.dev, "can't find the physical and alias addr.\n"); + return -EINVAL; + } + + /* Map PHY I2C address. */ + rval = ti964_map_phy_i2c_addr(va, info->rx_port, + info->phy_i2c_addr); + if (rval) + return rval; + + /* Map 7bit ALIAS I2C address. */ + rval = ti964_map_alias_i2c_addr(va, info->rx_port, + info->board_info.addr << 1); + if (rval) + return rval; + + /* aggre and subdves share the same i2c bus */ + va->sub_devs[k].sd = v4l2_i2c_new_subdev_board( + va->sd.v4l2_dev, client->adapter, + &info->board_info, 0); + if (!va->sub_devs[k].sd) { + dev_err(va->sd.dev, + "can't create new i2c subdev %d-%04x\n", + info->i2c_adapter_id, + info->board_info.addr); + continue; + } + va->sub_devs[k].rx_port = info->rx_port; + va->sub_devs[k].phy_i2c_addr = info->phy_i2c_addr; + va->sub_devs[k].alias_i2c_addr = info->board_info.addr; + memcpy(va->sub_devs[k].sd_name, + va->subdev_pdata[k].module_name, + min(sizeof(va->sub_devs[k].sd_name) - 1, + sizeof(va->subdev_pdata[k].module_name) - 1)); + + for (j = 0; j < va->sub_devs[k].sd->entity.num_pads; j++) { + if (va->sub_devs[k].sd->entity.pads[j].flags & + MEDIA_PAD_FL_SOURCE) + break; + } + + if (j == va->sub_devs[k].sd->entity.num_pads) { + dev_warn(va->sd.dev, + "no source pad in subdev %d-%04x\n", + info->i2c_adapter_id, + info->board_info.addr); + return -ENOENT; + } + + for (l = 0; l < va->nsinks; l++) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + rval = media_entity_create_link( +#else + rval = media_create_pad_link( +#endif + &va->sub_devs[k].sd->entity, j, + &va->sd.entity, l, 0); + if (rval) { + dev_err(va->sd.dev, + "can't create link to %d-%04x\n", + info->i2c_adapter_id, + info->board_info.addr); + return -EINVAL; + } + } + k++; + } + + return 0; +} + +static int ti964_set_power(struct v4l2_subdev *subdev, int on) +{ + struct ti964 *va = to_ti964(subdev); + int ret; + u8 val; + + ret = regmap_write(va->regmap8, TI964_RESET, + (on) ? TI964_POWER_ON : TI964_POWER_OFF); + if (ret || !on) + return ret; + + /* Configure MIPI clock bsaed on control value. */ + ret = regmap_write(va->regmap8, TI964_CSI_PLL_CTL, + ti964_op_sys_clock_reg_val[ + v4l2_ctrl_g_ctrl(va->link_freq)]); + if (ret) + return ret; + val = TI964_CSI_ENABLE; + val |= TI964_CSI_CONTS_CLOCK; + /* Enable skew calculation when 1.6Gbps output is enabled. */ + if (v4l2_ctrl_g_ctrl(va->link_freq)) + val |= TI964_CSI_SKEWCAL; + return regmap_write(va->regmap8, TI964_CSI_CTL, val); +} + +static bool ti964_broadcast_mode(struct v4l2_subdev *subdev) +{ + struct ti964 *va = to_ti964(subdev); + struct v4l2_subdev_format fmt = { 0 }; + struct v4l2_subdev *sd; + char *sd_name = NULL; + bool first = true; + unsigned int h = 0, w = 0, code = 0; + bool single_stream = true; + int i, rval; + + for (i = 0; i < NR_OF_TI964_SINK_PADS; i++) { + struct media_pad *remote_pad = + media_entity_remote_pad(&va->pad[i]); + + if (!remote_pad) + continue; + + sd = media_entity_to_v4l2_subdev(remote_pad->entity); + fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; + fmt.pad = remote_pad->index; + fmt.stream = 0; + + rval = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt); + if (rval) + return false; + + if (first) { + sd_name = va->sub_devs[i].sd_name; + h = fmt.format.height; + w = fmt.format.width; + code = fmt.format.code; + first = false; + } else { + if (strncmp(sd_name, va->sub_devs[i].sd_name, 16)) + return false; + + if (h != fmt.format.height || w != fmt.format.width + || code != fmt.format.code) + return false; + + single_stream = false; + } + } + + if (single_stream) + return false; + + return true; +} + +static int ti964_tp_set_stream(struct v4l2_subdev *subdev, int enable) +{ + struct ti964 *va = to_ti964(subdev); + int i, rval; + + dev_dbg(va->sd.dev, "TI964 starts to stream test pattern.\n"); + for (i = 0; i < ARRAY_SIZE(ti964_tp_settings); i++) { + rval = regmap_write(va->regmap8, + ti964_tp_settings[i].reg, + ti964_tp_settings[i].val); + if (rval) { + dev_err(va->sd.dev, "Register write error.\n"); + return rval; + } + } + + rval = regmap_write(va->regmap8, TI964_IND_ACC_DATA, enable); + if (rval) { + dev_err(va->sd.dev, "Register write error.\n"); + return rval; + } + + return 0; +} + +static int ti964_rx_port_config(struct ti964 *va, int sink, int rx_port) +{ + int rval; + u8 bpp; + int port_cfg2_val; + int vc_mode_reg_index; + int vc_mode_reg_val; + int mipi_dt_type; + int high_fv_flags = va->subdev_pdata[sink].high_framevalid_flags; + + /* Select RX port. */ + rval = regmap_write(va->regmap8, TI964_RX_PORT_SEL, + (rx_port << 4) + (1 << rx_port)); + if (rval) { + dev_err(va->sd.dev, "Failed to select RX port.\n"); + return rval; + } + + /* Set RX port mode. */ + bpp = ti964_validate_csi_data_format( + va->ffmts[sink][0].code)->width; + rval = regmap_write(va->regmap8, TI964_PORT_CONFIG, + (bpp == 12) ? + TI964_FPD3_RAW12_75MHz : TI964_FPD3_RAW10_100MHz); + if (rval) { + dev_err(va->sd.dev, "Failed to set port config.\n"); + return rval; + } + + mipi_dt_type = ti964_validate_csi_data_format( + va->ffmts[sink][0].code)->mipi_dt_code; + /* + * RAW8 and YUV422 need to enable RAW10 bit mode. + * RAW12 need to set the RAW10_8bit to reserved. + */ + switch (bpp) { + case 8: + case 16: + port_cfg2_val = TI964_RAW10_8BIT & (~high_fv_flags); + vc_mode_reg_index = TI964_RAW10_ID; + break; + case 12: + port_cfg2_val = TI964_RAW12; + vc_mode_reg_index = TI964_RAW12_ID; + break; + default: + port_cfg2_val = TI964_RAW10_NORMAL & (~high_fv_flags); + vc_mode_reg_index = TI964_RAW10_ID; + break; + } + + vc_mode_reg_val = mipi_dt_type | sink << 6; + rval = regmap_write(va->regmap8, vc_mode_reg_index, vc_mode_reg_val); + if (rval) { + dev_err(va->sd.dev, "Failed to set virtual channel & data type.\n"); + return rval; + } + + rval = regmap_write(va->regmap8, TI964_PORT_CONFIG2, port_cfg2_val); + if (rval) { + dev_err(va->sd.dev, "Failed to set port config2.\n"); + return rval; + } + + return 0; +} + +static int ti964_map_subdevs_addr(struct ti964 *va) +{ + unsigned short rx_port, phy_i2c_addr, alias_i2c_addr; + int i, rval; + + for (i = 0; i < NR_OF_TI964_SINK_PADS; i++) { + rx_port = va->sub_devs[i].rx_port; + phy_i2c_addr = va->sub_devs[i].phy_i2c_addr; + alias_i2c_addr = va->sub_devs[i].alias_i2c_addr; + + if (!phy_i2c_addr || !alias_i2c_addr) + continue; + + rval = ti964_map_phy_i2c_addr(va, rx_port, phy_i2c_addr); + if (rval) + return rval; + + /* set 7bit alias i2c addr */ + rval = ti964_map_alias_i2c_addr(va, rx_port, + alias_i2c_addr << 1); + if (rval) + return rval; + } + + return 0; +} + +static int ti964_find_subdev_index(struct ti964 *va, struct v4l2_subdev *sd) +{ + int i; + + for (i = 0; i < NR_OF_TI964_SINK_PADS; i++) { + if (va->sub_devs[i].sd == sd) + return i; + } + + WARN_ON(1); + + return -EINVAL; +} + +static int ti964_set_frame_sync(struct ti964 *va, int enable) +{ + int i, rval; + int index = !!enable; + + for (i = 0; i < ARRAY_SIZE(ti964_frame_sync_settings[index]); i++) { + rval = regmap_write(va->regmap8, + ti964_frame_sync_settings[index][i].reg, + ti964_frame_sync_settings[index][i].val); + if (rval) { + dev_err(va->sd.dev, "Failed to %s frame sync\n", + enable ? "enable" : "disable"); + return rval; + } + } + + return 0; +} + +static int ti964_set_stream(struct v4l2_subdev *subdev, int enable) +{ + struct ti964 *va = to_ti964(subdev); + struct v4l2_subdev *sd; + int i, j, rval; + bool broadcast; + unsigned int rx_port; + int sd_idx = -1; + DECLARE_BITMAP(rx_port_enabled, 32); + + dev_dbg(va->sd.dev, "TI964 set stream, enable %d\n", enable); + + if (v4l2_ctrl_g_ctrl(va->test_pattern)) + return ti964_tp_set_stream(subdev, enable); + + broadcast = ti964_broadcast_mode(subdev); + if (enable) + dev_info(va->sd.dev, "TI964 in %s mode", + broadcast ? "broadcast" : "non broadcast"); + + bitmap_zero(rx_port_enabled, 32); + for (i = 0; i < NR_OF_TI964_SINK_PADS; i++) { + struct media_pad *remote_pad = + media_entity_remote_pad(&va->pad[i]); + + if (!remote_pad) + continue; + + /* Find ti964 subdev */ + sd = media_entity_to_v4l2_subdev(remote_pad->entity); + j = ti964_find_subdev_index(va, sd); + if (j < 0) + return -EINVAL; + rx_port = va->sub_devs[j].rx_port; + rval = ti964_rx_port_config(va, i, rx_port); + if (rval < 0) + return rval; + + bitmap_set(rx_port_enabled, rx_port, 1); + + if (broadcast && sd_idx == -1) { + sd_idx = j; + } else if (broadcast) { + rval = ti964_map_alias_i2c_addr(va, rx_port, + va->sub_devs[sd_idx].alias_i2c_addr << 1); + if (rval < 0) + return rval; + } else { + /* Stream on/off sensor */ + rval = v4l2_subdev_call(sd, video, s_stream, enable); + if (rval) { + dev_err(va->sd.dev, + "Failed to set stream for %s, enable %d\n", + sd->name, enable); + return rval; + } + + /* RX port fordward */ + rval = ti964_reg_set_bit(va, TI964_FWD_CTL1, + rx_port + 4, !enable); + if (rval) { + dev_err(va->sd.dev, + "Failed to forward RX port%d. enable %d\n", + i, enable); + return rval; + } + + } + } + + if (broadcast) { + if (sd_idx < 0) { + dev_err(va->sd.dev, "No sensor connected!\n"); + return -ENODEV; + } + sd = va->sub_devs[sd_idx].sd; + rval = v4l2_subdev_call(sd, video, s_stream, enable); + if (rval) { + dev_err(va->sd.dev, + "Failed to set stream for %s. enable %d\n", + sd->name, enable); + return rval; + } + + rval = ti964_set_frame_sync(va, enable); + if (rval) { + dev_err(va->sd.dev, + "Failed to set frame sync.\n"); + return rval; + } + + for (i = 0; i < NR_OF_TI964_SINK_PADS; i++) { + if (enable && test_bit(i, rx_port_enabled)) { + rval = ti964_fsin_gpio_init(va, + va->sub_devs[i].rx_port, + va->sub_devs[i].fsin_gpio); + if (rval) { + dev_err(va->sd.dev, + "Failed to enable frame sync gpio init.\n"); + return rval; + } + } + } + + for (i = 0; i < NR_OF_TI964_SINK_PADS; i++) { + if (!test_bit(i, rx_port_enabled)) + continue; + + /* RX port fordward */ + rval = ti964_reg_set_bit(va, TI964_FWD_CTL1, + i + 4, !enable); + if (rval) { + dev_err(va->sd.dev, + "Failed to forward RX port%d. enable %d\n", + i, enable); + return rval; + } + } + + /* + * Restore each subdev i2c address as we may + * touch it later. + */ + rval = ti964_map_subdevs_addr(va); + if (rval) + return rval; + } + + return 0; +} + +static struct v4l2_subdev_internal_ops ti964_sd_internal_ops = { + .open = ti964_open, + .registered = ti964_registered, +}; + +static bool ti964_sd_has_route(struct media_entity *entity, + unsigned int pad0, unsigned int pad1, int *stream) +{ + struct ti964 *va = to_ti964(media_entity_to_v4l2_subdev(entity)); + + if (va == NULL || stream == NULL || + *stream >= va->nstreams || *stream < 0) + return false; + + if ((va->route[*stream].flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE) && + ((va->route[*stream].source == pad0 && + va->route[*stream].sink == pad1) || + (va->route[*stream].source == pad1 && + va->route[*stream].sink == pad0))) + return true; + + return false; +} + +static const struct media_entity_operations ti964_sd_entity_ops = { + .has_route = ti964_sd_has_route, +}; + +static const struct v4l2_subdev_video_ops ti964_sd_video_ops = { + .s_stream = ti964_set_stream, +}; + +static const struct v4l2_subdev_core_ops ti964_core_subdev_ops = { + .s_power = ti964_set_power, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + .g_ctrl = v4l2_subdev_g_ctrl, + .s_ctrl = v4l2_subdev_s_ctrl, + .g_ext_ctrls = v4l2_subdev_g_ext_ctrls, + .s_ext_ctrls = v4l2_subdev_s_ext_ctrls, + .try_ext_ctrls = v4l2_subdev_try_ext_ctrls, + .queryctrl = v4l2_subdev_queryctrl, +#endif +}; + +static int ti964_s_ctrl(struct v4l2_ctrl *ctrl) +{ + return 0; +} + +static const struct v4l2_ctrl_ops ti964_ctrl_ops = { + .s_ctrl = ti964_s_ctrl, +}; + +static const struct v4l2_ctrl_config ti964_controls[] = { + { + .ops = &ti964_ctrl_ops, + .id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = V4L2_CTRL_TYPE_INTEGER_MENU, + .max = ARRAY_SIZE(ti964_op_sys_clock) - 1, + .min = 0, + .step = 0, + .def = 0, + .qmenu_int = ti964_op_sys_clock, + }, + { + .ops = &ti964_ctrl_ops, + .id = V4L2_CID_TEST_PATTERN, + .name = "V4L2_CID_TEST_PATTERN", + .type = V4L2_CTRL_TYPE_INTEGER, + .max = 1, + .min = 0, + .step = 1, + .def = 0, + }, +}; + +static const struct v4l2_subdev_pad_ops ti964_sd_pad_ops = { + .get_fmt = ti964_get_format, + .set_fmt = ti964_set_format, + .get_frame_desc = ti964_get_frame_desc, + .enum_mbus_code = ti964_enum_mbus_code, + .set_routing = ti964_set_routing, + .get_routing = ti964_get_routing, +}; + +static struct v4l2_subdev_ops ti964_sd_ops = { + .core = &ti964_core_subdev_ops, + .video = &ti964_sd_video_ops, + .pad = &ti964_sd_pad_ops, +}; + +static int ti964_register_subdev(struct ti964 *va) +{ + int i, rval; + struct i2c_client *client = v4l2_get_subdevdata(&va->sd); + + v4l2_subdev_init(&va->sd, &ti964_sd_ops); + snprintf(va->sd.name, sizeof(va->sd.name), "TI964 %c", + va->pdata->suffix); + + va->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | + V4L2_SUBDEV_FL_HAS_SUBSTREAMS; + + va->sd.internal_ops = &ti964_sd_internal_ops; + va->sd.entity.ops = &ti964_sd_entity_ops; + + v4l2_set_subdevdata(&va->sd, client); + + v4l2_ctrl_handler_init(&va->ctrl_handler, + ARRAY_SIZE(ti964_controls)); + + if (va->ctrl_handler.error) { + dev_err(va->sd.dev, + "Failed to init ti964 controls. ERR: %d!\n", + va->ctrl_handler.error); + return va->ctrl_handler.error; + } + + va->sd.ctrl_handler = &va->ctrl_handler; + + for (i = 0; i < ARRAY_SIZE(ti964_controls); i++) { + const struct v4l2_ctrl_config *cfg = + &ti964_controls[i]; + struct v4l2_ctrl *ctrl; + + ctrl = v4l2_ctrl_new_custom(&va->ctrl_handler, cfg, NULL); + if (!ctrl) { + dev_err(va->sd.dev, + "Failed to create ctrl %s!\n", cfg->name); + rval = va->ctrl_handler.error; + goto failed_out; + } + } + + va->link_freq = v4l2_ctrl_find(&va->ctrl_handler, V4L2_CID_LINK_FREQ); + va->test_pattern = v4l2_ctrl_find(&va->ctrl_handler, + V4L2_CID_TEST_PATTERN); + + for (i = 0; i < va->nsinks; i++) + va->pad[i].flags = MEDIA_PAD_FL_SINK; + va->pad[TI964_PAD_SOURCE].flags = + MEDIA_PAD_FL_SOURCE | MEDIA_PAD_FL_MULTIPLEX; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + rval = media_entity_init(&va->sd.entity, NR_OF_TI964_PADS, va->pad, 0); +#else + rval = media_entity_pads_init(&va->sd.entity, + NR_OF_TI964_PADS, va->pad); +#endif + if (rval) { + dev_err(va->sd.dev, + "Failed to init media entity for ti964!\n"); + goto failed_out; + } + + return 0; + +failed_out: + v4l2_ctrl_handler_free(&va->ctrl_handler); + return rval; +} + +static int ti964_init(struct ti964 *va) +{ + unsigned int reset_gpio = va->pdata->reset_gpio; + int i, rval; + unsigned int val; + + gpio_set_value(reset_gpio, 1); + usleep_range(2000, 3000); + dev_dbg(va->sd.dev, "Setting reset gpio %d to 1.\n", reset_gpio); + + rval = regmap_read(va->regmap8, TI964_DEVID, &val); + if (rval) { + dev_err(va->sd.dev, "Failed to read device ID of TI964!\n"); + return rval; + } + dev_info(va->sd.dev, "TI964 device ID: 0x%X\n", val); + + for (i = 0; i < ARRAY_SIZE(ti964_init_settings); i++) { + rval = regmap_write(va->regmap8, + ti964_init_settings[i].reg, + ti964_init_settings[i].val); + if (rval) + return rval; + } + + rval = ti964_map_subdevs_addr(va); + if (rval) + return rval; + + return 0; +} + +static void ti964_gpio_set(struct gpio_chip *chip, unsigned gpio, int value) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + struct i2c_client *client = to_i2c_client(chip->dev); +#else + struct i2c_client *client = to_i2c_client(chip->parent); +#endif + struct v4l2_subdev *subdev = i2c_get_clientdata(client); + struct ti964 *va = to_ti964(subdev); + unsigned int reg_val; + int rx_port, gpio_port; + int ret; + + if (gpio >= NR_OF_TI964_GPIOS) + return; + + rx_port = gpio / NR_OF_GPIOS_PER_PORT; + gpio_port = gpio % NR_OF_GPIOS_PER_PORT; + + ret = regmap_write(va->regmap8, TI964_RX_PORT_SEL, + (rx_port << 4) + (1 << rx_port)); + if (ret) { + dev_dbg(&client->dev, "Failed to select RX port.\n"); + return; + } + ret = regmap_read(va->regmap8, TI964_BC_GPIO_CTL0, ®_val); + if (ret) { + dev_dbg(&client->dev, "Failed to read gpio status.\n"); + return; + } + + if (gpio_port == 0) { + reg_val &= ~TI964_GPIO0_MASK; + reg_val |= value ? TI964_GPIO0_HIGH : TI964_GPIO0_LOW; + } else { + reg_val &= ~TI964_GPIO1_MASK; + reg_val |= value ? TI964_GPIO1_HIGH : TI964_GPIO1_LOW; + } + + ret = regmap_write(va->regmap8, TI964_BC_GPIO_CTL0, reg_val); + if (ret) + dev_dbg(&client->dev, "Failed to set gpio.\n"); +} + +static int ti964_gpio_direction_output(struct gpio_chip *chip, + unsigned gpio, int level) +{ + return 0; +} + +static int ti964_probe(struct i2c_client *client, + const struct i2c_device_id *devid) +{ + struct ti964 *va; + int i, rval = 0; + + if (client->dev.platform_data == NULL) + return -ENODEV; + + va = devm_kzalloc(&client->dev, sizeof(*va), GFP_KERNEL); + if (!va) + return -ENOMEM; + + va->pdata = client->dev.platform_data; + + va->nsources = NR_OF_TI964_SOURCE_PADS; + va->nsinks = NR_OF_TI964_SINK_PADS; + va->npads = NR_OF_TI964_PADS; + va->nstreams = NR_OF_TI964_STREAMS; + + va->crop = devm_kcalloc(&client->dev, va->npads, + sizeof(struct v4l2_rect), GFP_KERNEL); + + va->compose = devm_kcalloc(&client->dev, va->npads, + sizeof(struct v4l2_rect), GFP_KERNEL); + + va->route = devm_kcalloc(&client->dev, va->nstreams, + sizeof(*va->route), GFP_KERNEL); + + va->stream = devm_kcalloc(&client->dev, va->npads, + sizeof(*va->stream), GFP_KERNEL); + + if (!va->crop || !va->compose || !va->route || !va->stream) + return -ENOMEM; + + for (i = 0; i < va->npads; i++) { + va->ffmts[i] = devm_kcalloc(&client->dev, va->nstreams, + sizeof(struct v4l2_mbus_framefmt), + GFP_KERNEL); + if (!va->ffmts[i]) + return -ENOMEM; + + va->stream[i].stream_id = + devm_kcalloc(&client->dev, va->nsinks, + sizeof(*va->stream[i].stream_id), GFP_KERNEL); + if (!va->stream[i].stream_id) + return -ENOMEM; + } + + for (i = 0; i < va->nstreams; i++) { + va->route[i].sink = i; + va->route[i].source = TI964_PAD_SOURCE; + va->route[i].flags = 0; + } + + for (i = 0; i < va->nsinks; i++) { + va->stream[i].stream_id[0] = i; + va->stream[TI964_PAD_SOURCE].stream_id[i] = i; + } + + va->regmap8 = devm_regmap_init_i2c(client, + &ti964_reg_config8); + if (IS_ERR(va->regmap8)) { + dev_err(&client->dev, "Failed to init regmap8!\n"); + return -EIO; + } + + va->regmap16 = devm_regmap_init_i2c(client, + &ti964_reg_config16); + if (IS_ERR(va->regmap16)) { + dev_err(&client->dev, "Failed to init regmap16!\n"); + return -EIO; + } + + mutex_init(&va->mutex); + v4l2_i2c_subdev_init(&va->sd, client, &ti964_sd_ops); + rval = ti964_register_subdev(va); + if (rval) { + dev_err(&client->dev, "Failed to register va subdevice!\n"); + return rval; + } + + if (devm_gpio_request_one(va->sd.dev, va->pdata->reset_gpio, 0, + "ti964 reset") != 0) { + dev_err(va->sd.dev, "Unable to acquire gpio %d\n", + va->pdata->reset_gpio); + return -ENODEV; + } + + rval = ti964_init(va); + if (rval) { + dev_err(&client->dev, "Failed to init TI964!\n"); + return rval; + } + + /* + * TI964 has several back channel GPIOs. + * We export GPIO0 and GPIO1 to control reset or fsin. + */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + va->gc.dev = &client->dev; +#else + va->gc.parent = &client->dev; +#endif + va->gc.owner = THIS_MODULE; + va->gc.label = "TI964 GPIO"; + va->gc.ngpio = NR_OF_TI964_GPIOS; + va->gc.base = -1; + va->gc.set = ti964_gpio_set; + va->gc.direction_output = ti964_gpio_direction_output; + rval = gpiochip_add(&va->gc); + if (rval) { + dev_err(&client->dev, "Failed to add gpio chip!\n"); + return -EIO; + } + + return 0; +} + +static int ti964_remove(struct i2c_client *client) +{ + struct v4l2_subdev *subdev = i2c_get_clientdata(client); + struct ti964 *va = to_ti964(subdev); + int i; + + if (!va) + return 0; + + mutex_destroy(&va->mutex); + v4l2_ctrl_handler_free(&va->ctrl_handler); + v4l2_device_unregister_subdev(&va->sd); + media_entity_cleanup(&va->sd.entity); + + for (i = 0; i < NR_OF_TI964_SINK_PADS; i++) { + if (va->sub_devs[i].sd) { + struct i2c_client *sub_client = + v4l2_get_subdevdata(va->sub_devs[i].sd); + + i2c_unregister_device(sub_client); + } + va->sub_devs[i].sd = NULL; + } + + gpiochip_remove(&va->gc); + + return 0; +} + +#ifdef CONFIG_PM +static int ti964_suspend(struct device *dev) +{ + return 0; +} + +static int ti964_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *subdev = i2c_get_clientdata(client); + struct ti964 *va = to_ti964(subdev); + + return ti964_init(va); +} +#else +#define ti964_suspend NULL +#define ti964_resume NULL +#endif /* CONFIG_PM */ + +static const struct i2c_device_id ti964_id_table[] = { + { TI964_NAME, 0 }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, ti964_id_table); + +static const struct dev_pm_ops ti964_pm_ops = { + .suspend = ti964_suspend, + .resume = ti964_resume, +}; + +static struct i2c_driver ti964_i2c_driver = { + .driver = { + .name = TI964_NAME, + .pm = &ti964_pm_ops, + }, + .probe = ti964_probe, + .remove = ti964_remove, + .id_table = ti964_id_table, +}; +module_i2c_driver(ti964_i2c_driver); + +MODULE_AUTHOR("Tianshu Qiu "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("TI964 CSI2-Aggregator driver"); diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c index 76e6bed5a1da2..8b450fc53202f 100644 --- a/drivers/media/i2c/tvp5150.c +++ b/drivers/media/i2c/tvp5150.c @@ -901,9 +901,6 @@ static int tvp5150_set_selection(struct v4l2_subdev *sd, /* tvp5150 has some special limits */ rect.left = clamp(rect.left, 0, TVP5150_MAX_CROP_LEFT); - rect.width = clamp_t(unsigned int, rect.width, - TVP5150_H_MAX - TVP5150_MAX_CROP_LEFT - rect.left, - TVP5150_H_MAX - rect.left); rect.top = clamp(rect.top, 0, TVP5150_MAX_CROP_TOP); /* Calculate height based on current standard */ @@ -917,9 +914,16 @@ static int tvp5150_set_selection(struct v4l2_subdev *sd, else hmax = TVP5150_V_MAX_OTHERS; - rect.height = clamp_t(unsigned int, rect.height, + /* + * alignments: + * - width = 2 due to UYVY colorspace + * - height, image = no special alignment + */ + v4l_bound_align_image(&rect.width, + TVP5150_H_MAX - TVP5150_MAX_CROP_LEFT - rect.left, + TVP5150_H_MAX - rect.left, 1, &rect.height, hmax - TVP5150_MAX_CROP_TOP - rect.top, - hmax - rect.top); + hmax - rect.top, 0, 0); tvp5150_write(sd, TVP5150_VERT_BLANKING_START, rect.top); tvp5150_write(sd, TVP5150_VERT_BLANKING_STOP, @@ -1534,7 +1538,7 @@ static int tvp5150_probe(struct i2c_client *c, 27000000, 1, 27000000); v4l2_ctrl_new_std_menu_items(&core->hdl, &tvp5150_ctrl_ops, V4L2_CID_TEST_PATTERN, - ARRAY_SIZE(tvp5150_test_patterns), + ARRAY_SIZE(tvp5150_test_patterns) - 1, 0, 0, tvp5150_test_patterns); sd->ctrl_handler = &core->hdl; if (core->hdl.error) { diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c index 3bae24b15eaa4..732378d0dcf07 100644 --- a/drivers/media/media-device.c +++ b/drivers/media/media-device.c @@ -26,6 +26,8 @@ #include #include #include +#include +#include #include #include @@ -43,6 +45,356 @@ #define MEDIA_ENT_T_DEVNODE_UNKNOWN (MEDIA_ENT_F_OLD_BASE | \ MEDIA_ENT_SUBTYPE_MASK) +static char *__request_state[] = { + "IDLE", + "QUEUED", + "DELETED", + "COMPLETED", +}; + +#define request_state(i) \ + ((i) < ARRAY_SIZE(__request_state) ? __request_state[i] : "UNKNOWN") + + +struct media_device_fh { + struct media_devnode_fh fh; + struct list_head requests; + struct { + struct list_head head; + wait_queue_head_t wait; + atomic_t sequence; + } kevents; +}; + +static inline struct media_device_fh *media_device_fh(struct file *filp) +{ + return container_of(filp->private_data, struct media_device_fh, fh); +} + +/* ----------------------------------------------------------------------------- + * Requests + */ + +/** + * media_device_request_find - Find a request based from its ID + * @mdev: The media device + * @reqid: The request ID + * + * Find and return the request associated with the given ID, or NULL if no such + * request exists. + * + * When the function returns a non-NULL request it increases its reference + * count. The caller is responsible for releasing the reference by calling + * media_device_request_put() on the request. + */ +struct media_device_request * +media_device_request_find(struct media_device *mdev, u16 reqid) +{ + struct media_device_request *req; + unsigned long flags; + bool found = false; + + spin_lock_irqsave(&mdev->req_lock, flags); + list_for_each_entry(req, &mdev->requests, list) { + if (req->id == reqid) { + kref_get(&req->kref); + found = true; + break; + } + } + spin_unlock_irqrestore(&mdev->req_lock, flags); + + if (!found) { + dev_dbg(mdev->dev, + "request: can't find %u\n", reqid); + return NULL; + } + + return req; +} +EXPORT_SYMBOL_GPL(media_device_request_find); + +void media_device_request_get(struct media_device_request *req) +{ + kref_get(&req->kref); +} +EXPORT_SYMBOL_GPL(media_device_request_get); + +static void media_device_request_queue_event(struct media_device *mdev, + struct media_device_request *req, + struct media_device_fh *fh) +{ + struct media_kevent *kev = req->kev; + struct media_event *ev = &kev->ev; + + lockdep_assert_held(&mdev->req_lock); + + ev->sequence = atomic_inc_return(&fh->kevents.sequence); + ev->type = MEDIA_EVENT_TYPE_REQUEST_COMPLETE; + ev->req_complete.id = req->id; + + list_add(&kev->list, &fh->kevents.head); + req->kev = NULL; + req->state = MEDIA_DEVICE_REQUEST_STATE_COMPLETE; + wake_up(&fh->kevents.wait); +} + +static void media_device_request_release(struct kref *kref) +{ + struct media_device_request *req = + container_of(kref, struct media_device_request, kref); + struct media_device *mdev = req->mdev; + + dev_dbg(mdev->dev, "release request %u\n", req->id); + + ida_simple_remove(&mdev->req_ids, req->id); + + kfree(req->kev); + req->kev = NULL; + + mdev->ops->req_free(mdev, req); +} + +void media_device_request_put(struct media_device_request *req) +{ + kref_put(&req->kref, media_device_request_release); +} +EXPORT_SYMBOL_GPL(media_device_request_put); + +static int media_device_request_alloc(struct media_device *mdev, + struct file *filp, + struct media_request_cmd *cmd) +{ + struct media_device_fh *fh = media_device_fh(filp); + struct media_device_request *req; + struct media_kevent *kev; + unsigned long flags; + int id = ida_simple_get(&mdev->req_ids, 1, 0, GFP_KERNEL); + int ret; + + if (id < 0) { + dev_dbg(mdev->dev, "request: unable to obtain new id\n"); + return id; + } + + kev = kzalloc(sizeof(*kev), GFP_KERNEL); + if (!kev) { + ret = -ENOMEM; + goto out_ida_simple_remove; + } + + req = mdev->ops->req_alloc(mdev); + if (!req) { + ret = -ENOMEM; + goto out_kev_free; + } + + req->mdev = mdev; + req->id = id; + req->filp = filp; + req->state = MEDIA_DEVICE_REQUEST_STATE_IDLE; + req->kev = kev; + kref_init(&req->kref); + + spin_lock_irqsave(&mdev->req_lock, flags); + list_add_tail(&req->list, &mdev->requests); + list_add_tail(&req->fh_list, &fh->requests); + spin_unlock_irqrestore(&mdev->req_lock, flags); + + cmd->request = req->id; + + dev_dbg(mdev->dev, "request: allocated id %u\n", req->id); + + return 0; + +out_kev_free: + kfree(kev); + +out_ida_simple_remove: + ida_simple_remove(&mdev->req_ids, id); + + return ret; +} + +static int media_device_request_delete(struct media_device *mdev, + struct media_device_request *req) +{ + unsigned long flags; + + spin_lock_irqsave(&mdev->req_lock, flags); + + if (req->state != MEDIA_DEVICE_REQUEST_STATE_IDLE) { + spin_unlock_irqrestore(&mdev->req_lock, flags); + dev_dbg(mdev->dev, "request: can't delete %u, state %s\n", + req->id, request_state(req->state)); + return -EINVAL; + } + + req->state = MEDIA_DEVICE_REQUEST_STATE_DELETED; + + if (req->filp) { + /* + * If the file handle is gone by now the + * request has already been deleted from the + * two lists. + */ + list_del(&req->list); + list_del(&req->fh_list); + req->filp = NULL; + } + + spin_unlock_irqrestore(&mdev->req_lock, flags); + + media_device_request_put(req); + + return 0; +} + +void media_device_request_complete(struct media_device *mdev, + struct media_device_request *req) +{ + struct file *filp; + unsigned long flags; + + spin_lock_irqsave(&mdev->req_lock, flags); + + if (req->state == MEDIA_DEVICE_REQUEST_STATE_IDLE) { + dev_dbg(mdev->dev, + "request: not completing an idle request %u\n", + req->id); + spin_unlock_irqrestore(&mdev->req_lock, flags); + return; + } + + if (WARN_ON(req->state != MEDIA_DEVICE_REQUEST_STATE_QUEUED)) { + dev_dbg(mdev->dev, "request: can't delete %u, state %s\n", + req->id, request_state(req->state)); + spin_unlock_irqrestore(&mdev->req_lock, flags); + return; + } + + req->state = MEDIA_DEVICE_REQUEST_STATE_COMPLETE; + filp = req->filp; + if (filp) { + /* + * If the file handle is still around we remove if + * from the lists here. Otherwise it has been removed + * when the file handle closed. + */ + list_del(&req->list); + list_del(&req->fh_list); + /* If the user asked for an event, let's queue one. */ + if (req->flags & MEDIA_REQ_FL_COMPLETE_EVENT) + media_device_request_queue_event( + mdev, req, media_device_fh(filp)); + req->filp = NULL; + } + + spin_unlock_irqrestore(&mdev->req_lock, flags); + + /* + * The driver holds a reference to a request if the filp + * pointer is non-NULL: the file handle associated to the + * request may have been released by now, i.e. filp is NULL. + */ + if (filp) + media_device_request_put(req); +} +EXPORT_SYMBOL_GPL(media_device_request_complete); + +static int media_device_request_queue_apply( + struct media_device *mdev, struct media_device_request *req, + u32 req_flags, int (*fn)(struct media_device *mdev, + struct media_device_request *req), bool queue) +{ + char *str = queue ? "queue" : "apply"; + unsigned long flags; + int rval = 0; + + if (!fn) + return -ENOSYS; + + spin_lock_irqsave(&mdev->req_lock, flags); + if (req->state != MEDIA_DEVICE_REQUEST_STATE_IDLE) { + rval = -EINVAL; + dev_dbg(mdev->dev, + "request: unable to %s %u, request in state %s\n", + str, req->id, request_state(req->state)); + } else { + req->state = MEDIA_DEVICE_REQUEST_STATE_QUEUED; + req->flags = req_flags; + } + spin_unlock_irqrestore(&mdev->req_lock, flags); + + if (rval) + return rval; + + rval = fn(mdev, req); + if (rval) { + spin_lock_irqsave(&mdev->req_lock, flags); + req->state = MEDIA_DEVICE_REQUEST_STATE_IDLE; + spin_unlock_irqrestore(&mdev->req_lock, flags); + dev_dbg(mdev->dev, + "request: can't %s %u\n", str, req->id); + } else { + dev_dbg(mdev->dev, + "request: %s %u\n", str, req->id); + } + + return rval; +} + +static long media_device_request_cmd(struct media_device *mdev, + struct file *filp, + struct media_request_cmd *cmd) +{ + struct media_device_request *req = NULL; + int ret; + + if (!mdev->ops || !mdev->ops->req_alloc || !mdev->ops->req_free) + return -ENOTTY; + + if (cmd->cmd != MEDIA_REQ_CMD_ALLOC) { + req = media_device_request_find(mdev, cmd->request); + if (!req) + return -EINVAL; + } + + switch (cmd->cmd) { + case MEDIA_REQ_CMD_ALLOC: + ret = media_device_request_alloc(mdev, filp, cmd); + break; + + case MEDIA_REQ_CMD_DELETE: + ret = media_device_request_delete(mdev, req); + break; + + case MEDIA_REQ_CMD_APPLY: + ret = media_device_request_queue_apply(mdev, req, cmd->flags, + mdev->ops->req_apply, + false); + break; + + case MEDIA_REQ_CMD_QUEUE: + ret = media_device_request_queue_apply(mdev, req, cmd->flags, + mdev->ops->req_queue, + true); + break; + + default: + ret = -EINVAL; + break; + } + + if (req) + media_device_request_put(req); + + if (ret < 0) + return ret; + + return 0; +} + /* ----------------------------------------------------------------------------- * Userspace API */ @@ -54,15 +406,58 @@ static inline void __user *media_get_uptr(__u64 arg) static int media_device_open(struct file *filp) { + struct media_device_fh *fh; + + fh = kzalloc(sizeof(*fh), GFP_KERNEL); + if (!fh) + return -ENOMEM; + + INIT_LIST_HEAD(&fh->requests); + INIT_LIST_HEAD(&fh->kevents.head); + init_waitqueue_head(&fh->kevents.wait); + atomic_set(&fh->kevents.sequence, -1); + filp->private_data = &fh->fh; + return 0; } static int media_device_close(struct file *filp) { + struct media_device_fh *fh = media_device_fh(filp); + struct media_device *mdev = fh->fh.devnode->media_dev; + + spin_lock_irq(&mdev->req_lock); + while (!list_empty(&fh->requests)) { + struct media_device_request *req = + list_first_entry(&fh->requests, typeof(*req), fh_list); + + list_del(&req->list); + list_del(&req->fh_list); + req->filp = NULL; + spin_unlock_irq(&mdev->req_lock); + media_device_request_put(req); + spin_lock_irq(&mdev->req_lock); + } + + while (!list_empty(&fh->kevents.head)) { + struct media_kevent *kev = + list_first_entry(&fh->kevents.head, typeof(*kev), list); + + list_del(&kev->list); + spin_unlock_irq(&mdev->req_lock); + kfree(kev); + spin_lock_irq(&mdev->req_lock); + } + spin_unlock_irq(&mdev->req_lock); + + kfree(fh); + return 0; } -static long media_device_get_info(struct media_device *dev, void *arg) +static long media_device_get_info(struct media_device *dev, + struct file *filp, + void *arg) { struct media_device_info *info = arg; @@ -102,7 +497,9 @@ static struct media_entity *find_entity(struct media_device *mdev, u32 id) return NULL; } -static long media_device_enum_entities(struct media_device *mdev, void *arg) +static long media_device_enum_entities(struct media_device *mdev, + struct file *filp, + void *arg) { struct media_entity_desc *entd = arg; struct media_entity *ent; @@ -155,7 +552,9 @@ static void media_device_kpad_to_upad(const struct media_pad *kpad, upad->flags = kpad->flags; } -static long media_device_enum_links(struct media_device *mdev, void *arg) +static long media_device_enum_links(struct media_device *mdev, + struct file *filp, + void *arg) { struct media_links_enum *links = arg; struct media_entity *entity; @@ -204,7 +603,9 @@ static long media_device_enum_links(struct media_device *mdev, void *arg) return 0; } -static long media_device_setup_link(struct media_device *mdev, void *arg) +static long media_device_setup_link(struct media_device *mdev, + struct file *filp, + void *arg) { struct media_link_desc *linkd = arg; struct media_link *link = NULL; @@ -377,6 +778,49 @@ static long media_device_get_topology(struct media_device *mdev, void *arg) return ret; } +static struct media_kevent *opportunistic_dqevent(struct media_device *mdev, + struct file *filp) +{ + struct media_device_fh *fh = media_device_fh(filp); + struct media_kevent *kev = NULL; + unsigned long flags; + + spin_lock_irqsave(&mdev->req_lock, flags); + if (!list_empty(&fh->kevents.head)) { + kev = list_last_entry(&fh->kevents.head, + struct media_kevent, list); + list_del(&kev->list); + } + spin_unlock_irqrestore(&mdev->req_lock, flags); + + return kev; +} + +static int media_device_dqevent(struct media_device *mdev, + struct file *filp, + struct media_event *ev) +{ + struct media_device_fh *fh = media_device_fh(filp); + struct media_kevent *kev; + + if (filp->f_flags & O_NONBLOCK) { + kev = opportunistic_dqevent(mdev, filp); + if (!kev) + return -ENODATA; + } else { + int ret = wait_event_interruptible( + fh->kevents.wait, + (kev = opportunistic_dqevent(mdev, filp))); + if (ret == -ERESTARTSYS) + return ret; + } + + *ev = kev->ev; + kfree(kev); + + return 0; +} + static long copy_arg_from_user(void *karg, void __user *uarg, unsigned int cmd) { /* All media IOCTLs are _IOWR() */ @@ -401,7 +845,8 @@ static long copy_arg_to_user(void __user *uarg, void *karg, unsigned int cmd) #define MEDIA_IOC_ARG(__cmd, func, fl, from_user, to_user) \ [_IOC_NR(MEDIA_IOC_##__cmd)] = { \ .cmd = MEDIA_IOC_##__cmd, \ - .fn = (long (*)(struct media_device *, void *))func, \ + .fn = (long (*)(struct media_device *, \ + struct file *, void *))func, \ .flags = fl, \ .arg_from_user = from_user, \ .arg_to_user = to_user, \ @@ -414,7 +859,7 @@ static long copy_arg_to_user(void __user *uarg, void *karg, unsigned int cmd) struct media_ioctl_info { unsigned int cmd; unsigned short flags; - long (*fn)(struct media_device *dev, void *arg); + long (*fn)(struct media_device *dev, struct file *file, void *arg); long (*arg_from_user)(void *karg, void __user *uarg, unsigned int cmd); long (*arg_to_user)(void __user *uarg, void *karg, unsigned int cmd); }; @@ -425,6 +870,8 @@ static const struct media_ioctl_info ioctl_info[] = { MEDIA_IOC(ENUM_LINKS, media_device_enum_links, MEDIA_IOC_FL_GRAPH_MUTEX), MEDIA_IOC(SETUP_LINK, media_device_setup_link, MEDIA_IOC_FL_GRAPH_MUTEX), MEDIA_IOC(G_TOPOLOGY, media_device_get_topology, MEDIA_IOC_FL_GRAPH_MUTEX), + MEDIA_IOC(REQUEST_CMD, media_device_request_cmd, 0), + MEDIA_IOC(DQEVENT, media_device_dqevent, 0), }; static long media_device_ioctl(struct file *filp, unsigned int cmd, @@ -458,7 +905,7 @@ static long media_device_ioctl(struct file *filp, unsigned int cmd, if (info->flags & MEDIA_IOC_FL_GRAPH_MUTEX) mutex_lock(&dev->graph_mutex); - ret = info->fn(dev, karg); + ret = info->fn(dev, filp, karg); if (info->flags & MEDIA_IOC_FL_GRAPH_MUTEX) mutex_unlock(&dev->graph_mutex); @@ -473,6 +920,34 @@ static long media_device_ioctl(struct file *filp, unsigned int cmd, return ret; } +static unsigned int media_device_poll(struct file *filp, + struct poll_table_struct *wait) +{ + struct media_device_fh *fh = media_device_fh(filp); + struct media_device *mdev = fh->fh.devnode->media_dev; + unsigned int poll_events = poll_requested_events(wait); + int ret = 0; + + if (poll_events & (POLLIN | POLLOUT)) + return POLLERR; + + if (poll_events & POLLPRI) { + unsigned long flags; + bool empty; + + spin_lock_irqsave(&mdev->req_lock, flags); + empty = list_empty(&fh->kevents.head); + spin_unlock_irqrestore(&mdev->req_lock, flags); + + if (empty) + poll_wait(filp, &fh->kevents.wait, wait); + else + ret |= POLLPRI; + } + + return ret; +} + #ifdef CONFIG_COMPAT struct media_links_enum32 { @@ -483,7 +958,8 @@ struct media_links_enum32 { }; static long media_device_enum_links32(struct media_device *mdev, - struct media_links_enum32 __user *ulinks) + struct file *filp, + struct media_links_enum32 __user *ulinks) { struct media_links_enum links; compat_uptr_t pads_ptr, links_ptr; @@ -498,7 +974,7 @@ static long media_device_enum_links32(struct media_device *mdev, links.pads = compat_ptr(pads_ptr); links.links = compat_ptr(links_ptr); - return media_device_enum_links(mdev, &links); + return media_device_enum_links(mdev, filp, &links); } #define MEDIA_IOC_ENUM_LINKS32 _IOWR('|', 0x02, struct media_links_enum32) @@ -514,6 +990,7 @@ static long media_device_compat_ioctl(struct file *filp, unsigned int cmd, case MEDIA_IOC_ENUM_LINKS32: mutex_lock(&dev->graph_mutex); ret = media_device_enum_links32(dev, + filp, (struct media_links_enum32 __user *)arg); mutex_unlock(&dev->graph_mutex); break; @@ -530,6 +1007,7 @@ static const struct media_file_operations media_device_fops = { .owner = THIS_MODULE, .open = media_device_open, .ioctl = media_device_ioctl, + .poll = media_device_poll, #ifdef CONFIG_COMPAT .compat_ioctl = media_device_compat_ioctl, #endif /* CONFIG_COMPAT */ @@ -717,6 +1195,10 @@ int __must_check __media_device_register(struct media_device *mdev, if (!devnode) return -ENOMEM; + ida_init(&mdev->req_ids); + spin_lock_init(&mdev->req_lock); + INIT_LIST_HEAD(&mdev->requests); + /* Register the device node. */ mdev->devnode = devnode; devnode->fops = &media_device_fops; @@ -739,6 +1221,7 @@ int __must_check __media_device_register(struct media_device *mdev, mdev->devnode = NULL; media_devnode_unregister_prepare(devnode); media_devnode_unregister(devnode); + ida_destroy(&mdev->req_ids); return ret; } @@ -823,6 +1306,7 @@ void media_device_unregister(struct media_device *mdev) device_remove_file(&mdev->devnode->dev, &dev_attr_model); media_devnode_unregister(mdev->devnode); + ida_destroy(&mdev->req_ids); /* devnode free is handled in media_devnode_*() */ mdev->devnode = NULL; } diff --git a/drivers/media/media-devnode.c b/drivers/media/media-devnode.c index 6b87a721dc499..86e92cb4289c5 100644 --- a/drivers/media/media-devnode.c +++ b/drivers/media/media-devnode.c @@ -149,6 +149,7 @@ static long media_compat_ioctl(struct file *filp, unsigned int cmd, /* Override for the open function */ static int media_open(struct inode *inode, struct file *filp) { + struct media_devnode_fh *fh; struct media_devnode *devnode; int ret; @@ -181,6 +182,9 @@ static int media_open(struct inode *inode, struct file *filp) } } + fh = filp->private_data; + fh->devnode = devnode; + return 0; } diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c index 3498551e618e5..a0aeee60cb5d1 100644 --- a/drivers/media/media-entity.c +++ b/drivers/media/media-entity.c @@ -237,18 +237,38 @@ EXPORT_SYMBOL_GPL(media_entity_pads_init); * Graph traversal */ -static struct media_entity * -media_entity_other(struct media_entity *entity, struct media_link *link) +/** + * media_entity_has_route - Check if two entity pads are connected internally + * @entity: The entity + * @pad0: The first pad index + * @pad1: The second pad index + * + * This function can be used to check whether two pads of an entity are + * connected internally in the entity. + * + * The caller must hold entity->source->parent->mutex. + * + * Return: true if the pads are connected internally and false otherwise. + */ +bool media_entity_has_route(struct media_entity *entity, unsigned int pad0, + unsigned int pad1) { - if (link->source->entity == entity) - return link->sink->entity; - else - return link->source->entity; + if (pad0 >= entity->num_pads || pad1 >= entity->num_pads) + return false; + + if (pad0 == pad1) + return true; + + if (!entity->ops || !entity->ops->has_route) + return true; + + return entity->ops->has_route(entity, pad0, pad1, NULL); } +EXPORT_SYMBOL_GPL(media_entity_has_route); /* push an entity to traversal stack */ static void stack_push(struct media_graph *graph, - struct media_entity *entity) + struct media_entity *entity, int pad, int stream) { if (graph->top == MEDIA_ENTITY_ENUM_MAX_DEPTH - 1) { WARN_ON(1); @@ -257,6 +277,8 @@ static void stack_push(struct media_graph *graph, graph->top++; graph->stack[graph->top].link = entity->links.next; graph->stack[graph->top].entity = entity; + graph->stack[graph->top].pad = pad; + graph->stack[graph->top].stream = stream; } static struct media_entity *stack_pop(struct media_graph *graph) @@ -271,6 +293,8 @@ static struct media_entity *stack_pop(struct media_graph *graph) #define link_top(en) ((en)->stack[(en)->top].link) #define stack_top(en) ((en)->stack[(en)->top].entity) +#define pad_top(en) ((en)->stack[(en)->top].pad) +#define stream_top(en) ((en)->stack[(en)->top].stream) /** * media_graph_walk_init - Allocate resources for graph walk @@ -308,7 +332,9 @@ void media_graph_walk_start(struct media_graph *graph, graph->top = 0; graph->stack[graph->top].entity = NULL; - stack_push(graph, entity); + stack_push(graph, entity, + entity->start ? entity->start->index : 0, -1); + entity->start = NULL; dev_dbg(entity->graph_obj.mdev->dev, "begin graph walk at '%s'\n", entity->name); } @@ -319,6 +345,10 @@ static void media_graph_walk_iter(struct media_graph *graph) struct media_entity *entity = stack_top(graph); struct media_link *link; struct media_entity *next; + struct media_pad *remote; + struct media_pad *local; + unsigned int from_pad = pad_top(graph); + int stream = stream_top(graph); link = list_entry(link_top(graph), typeof(*link), list); @@ -332,8 +362,31 @@ static void media_graph_walk_iter(struct media_graph *graph) return; } - /* Get the entity in the other end of the link . */ - next = media_entity_other(entity, link); + /* + * Get the local pad, the remote pad and the entity at the other + * end of the link. + */ + if (link->source->entity == entity) { + remote = link->sink; + local = link->source; + } else { + remote = link->source; + local = link->sink; + } + + next = remote->entity; + + /* + * Are the local pad and the pad we came from connected + * internally in the entity ? + */ + if (entity->ops && entity->ops->has_route) { + if (!entity->ops->has_route(entity, from_pad, + local->index, &stream)) { + link_top(graph) = link_top(graph)->next; + return; + } + } /* Has the entity already been visited? */ if (media_entity_enum_test_and_set(&graph->ent_enum, next)) { @@ -346,7 +399,7 @@ static void media_graph_walk_iter(struct media_graph *graph) /* Push the new entity to stack and start over. */ link_top(graph) = link_top(graph)->next; - stack_push(graph, next); + stack_push(graph, next, remote->index, stream); dev_dbg(entity->graph_obj.mdev->dev, "walk: pushing '%s' on stack\n", next->name); } @@ -436,17 +489,8 @@ __must_check int __media_pipeline_start(struct media_entity *entity, entity->stream_count++; - if (WARN_ON(entity->pipe && entity->pipe != pipe)) { - ret = -EBUSY; - goto error; - } - entity->pipe = pipe; - /* Already streaming --- no need to check. */ - if (entity->stream_count > 1) - continue; - if (!entity->ops || !entity->ops->link_validate) continue; diff --git a/drivers/media/pci/Kconfig b/drivers/media/pci/Kconfig index 1f09123e2bf95..8e6a0108d9c13 100644 --- a/drivers/media/pci/Kconfig +++ b/drivers/media/pci/Kconfig @@ -16,6 +16,7 @@ source "drivers/media/pci/sta2x11/Kconfig" source "drivers/media/pci/tw5864/Kconfig" source "drivers/media/pci/tw68/Kconfig" source "drivers/media/pci/tw686x/Kconfig" +source "drivers/media/pci/intel/Kconfig" endif if MEDIA_ANALOG_TV_SUPPORT diff --git a/drivers/media/pci/cx23885/altera-ci.c b/drivers/media/pci/cx23885/altera-ci.c index 62bc8049b3201..198c05e83f5c0 100644 --- a/drivers/media/pci/cx23885/altera-ci.c +++ b/drivers/media/pci/cx23885/altera-ci.c @@ -665,6 +665,10 @@ static int altera_hw_filt_init(struct altera_ci_config *config, int hw_filt_nr) } temp_int = append_internal(inter); + if (!temp_int) { + ret = -ENOMEM; + goto err; + } inter->filts_used = 1; inter->dev = config->dev; inter->fpga_rw = config->fpga_rw; @@ -699,6 +703,7 @@ static int altera_hw_filt_init(struct altera_ci_config *config, int hw_filt_nr) __func__, ret); kfree(pid_filt); + kfree(inter); return ret; } @@ -733,6 +738,10 @@ int altera_ci_init(struct altera_ci_config *config, int ci_nr) } temp_int = append_internal(inter); + if (!temp_int) { + ret = -ENOMEM; + goto err; + } inter->cis_used = 1; inter->dev = config->dev; inter->fpga_rw = config->fpga_rw; @@ -801,6 +810,7 @@ int altera_ci_init(struct altera_ci_config *config, int ci_nr) ci_dbg_print("%s: Cannot initialize CI: Error %d.\n", __func__, ret); kfree(state); + kfree(inter); return ret; } diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c index 39804d830305c..fd5c52b21436b 100644 --- a/drivers/media/pci/cx23885/cx23885-core.c +++ b/drivers/media/pci/cx23885/cx23885-core.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -41,6 +42,18 @@ MODULE_AUTHOR("Steven Toth "); MODULE_LICENSE("GPL"); MODULE_VERSION(CX23885_VERSION); +/* + * Some platforms have been found to require periodic resetting of the DMA + * engine. Ryzen and XEON platforms are known to be affected. The symptom + * encountered is "mpeg risc op code error". Only Ryzen platforms employ + * this workaround if the option equals 1. The workaround can be explicitly + * disabled for all platforms by setting to 0, the workaround can be forced + * on for any platform by setting to 2. + */ +static unsigned int dma_reset_workaround = 1; +module_param(dma_reset_workaround, int, 0644); +MODULE_PARM_DESC(dma_reset_workaround, "periodic RiSC dma engine reset; 0-force disable, 1-driver detect (default), 2-force enable"); + static unsigned int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "enable debug messages"); @@ -603,8 +616,13 @@ static void cx23885_risc_disasm(struct cx23885_tsport *port, static void cx23885_clear_bridge_error(struct cx23885_dev *dev) { - uint32_t reg1_val = cx_read(TC_REQ); /* read-only */ - uint32_t reg2_val = cx_read(TC_REQ_SET); + uint32_t reg1_val, reg2_val; + + if (!dev->need_dma_reset) + return; + + reg1_val = cx_read(TC_REQ); /* read-only */ + reg2_val = cx_read(TC_REQ_SET); if (reg1_val && reg2_val) { cx_write(TC_REQ, reg1_val); @@ -2058,6 +2076,37 @@ void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput) /* TODO: 23-19 */ } +static struct { + int vendor, dev; +} const broken_dev_id[] = { + /* According with + * https://openbenchmarking.org/system/1703021-RI-AMDZEN08075/Ryzen%207%201800X/lspci, + * 0x1451 is PCI ID for the IOMMU found on Ryzen + */ + { PCI_VENDOR_ID_AMD, 0x1451 }, +}; + +static bool cx23885_does_need_dma_reset(void) +{ + int i; + struct pci_dev *pdev = NULL; + + if (dma_reset_workaround == 0) + return false; + else if (dma_reset_workaround == 2) + return true; + + for (i = 0; i < ARRAY_SIZE(broken_dev_id); i++) { + pdev = pci_get_device(broken_dev_id[i].vendor, + broken_dev_id[i].dev, NULL); + if (pdev) { + pci_dev_put(pdev); + return true; + } + } + return false; +} + static int cx23885_initdev(struct pci_dev *pci_dev, const struct pci_device_id *pci_id) { @@ -2069,6 +2118,8 @@ static int cx23885_initdev(struct pci_dev *pci_dev, if (NULL == dev) return -ENOMEM; + dev->need_dma_reset = cx23885_does_need_dma_reset(); + err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev); if (err < 0) goto fail_free; diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h index d54c7ee1ab218..cf965efabe666 100644 --- a/drivers/media/pci/cx23885/cx23885.h +++ b/drivers/media/pci/cx23885/cx23885.h @@ -451,6 +451,8 @@ struct cx23885_dev { /* Analog raw audio */ struct cx23885_audio_dev *audio_dev; + /* Does the system require periodic DMA resets? */ + unsigned int need_dma_reset:1; }; static inline struct cx23885_dev *to_cx23885(struct v4l2_device *v4l2_dev) diff --git a/drivers/media/pci/intel/Kconfig b/drivers/media/pci/intel/Kconfig new file mode 100644 index 0000000000000..8168fd41ffed7 --- /dev/null +++ b/drivers/media/pci/intel/Kconfig @@ -0,0 +1,92 @@ +config VIDEO_INTEL_IPU + tristate "Intel IPU driver" + depends on ACPI + select IOMMU_API + select IOMMU_IOVA + select X86_DEV_DMA_OPS if X86 + select VIDEOBUF2_DMA_CONTIG + select PHYS_ADDR_T_64BIT + select COMMON_CLK + ---help--- + Say Y here! + +choice + prompt "intel ipu generation type" + depends on VIDEO_INTEL_IPU + default VIDEO_INTEL_IPU4 + +config VIDEO_INTEL_IPU4 + bool "Compile for IPU4 driver" + ---help--- + Say Y here! + +config VIDEO_INTEL_IPU4P + bool "Compile for IPU4P driver" + ---help--- + Say Y here! + +endchoice + +choice + prompt "intel ipu hardware platform type" + depends on VIDEO_INTEL_IPU + default VIDEO_INTEL_IPU_SOC + +config VIDEO_INTEL_IPU_SOC + bool "Compile for SOC" + ---help--- + Select for SOC platform + +endchoice + +config VIDEO_INTEL_IPU_FW_LIB + bool "Compile firmware library" + ---help--- + If selected, the firmware hostlib css would be compiled + +config VIDEO_INTEL_IPU_WERROR + bool "Force GCC to throw an error instead of a warning when compiling" + depends on VIDEO_INTEL_IPU + depends on EXPERT + depends on !COMPILE_TEST + default n + help + Add -Werror to the build flags for (and only for) intel ipu module. + Do not enable this unless you are writing code for the ipu module. + + Recommended for driver developers only. + + If in doubt, say "N". + +config VIDEO_INTEL_ICI + depends on VIDEO_INTEL_IPU + bool "Compile for ICI driver" + ---help--- + If selected ICI driver will be compiled + +config VIDEO_INTEL_UOS + bool "Compile driver per UOS" + ---help--- + If selected UOS driver components will be compiled + +config VIDEO_INTEL_IPU_ACRN + depends on X86_64 && VIRTIO=y + bool "Compile for virtio mediation" + +choice + prompt "Virtio driver type" + depends on VIDEO_INTEL_IPU_ACRN + default VIDEO_INTEL_IPU_VIRTIO_BE + +config VIDEO_INTEL_IPU_VIRTIO_BE + bool "Configure IPU4 as virtio backend" + depends on VBS && VIDEO_INTEL_ICI + ---help--- + Configuring IPU4 driver as virtio backend + +config VIDEO_INTEL_IPU_VIRTIO_FE + bool "Configure IPU4 as virtio frontend" + ---help--- + Configuring IPU4 driver as virtio frontend + +endchoice diff --git a/drivers/media/pci/intel/Makefile b/drivers/media/pci/intel/Makefile index 745c8b2a7819d..d46da91ce6434 100644 --- a/drivers/media/pci/intel/Makefile +++ b/drivers/media/pci/intel/Makefile @@ -1,5 +1,23 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2010 - 2018, Intel Corporation. # -# Makefile for the IPU3 cio2 and ImGU drivers +# +# Makefile for the IPU3 cio2, ImGU and IPU4 drivers # obj-y += ipu3/ + +# force check the compile warning to make sure zero warnings +# note we may have build issue when gcc upgraded. +subdir-ccflags-y := -Wall -Wextra +subdir-ccflags-y += $(call cc-disable-warning, unused-parameter) +subdir-ccflags-y += $(call cc-disable-warning, implicit-fallthrough) +subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers) +subdir-ccflags-$(CONFIG_VIDEO_INTEL_IPU_WERROR) += -Werror + +ifndef CONFIG_VIDEO_INTEL_ICI +obj-$(CONFIG_VIDEO_INTEL_IPU4) += ipu4/ +obj-$(CONFIG_VIDEO_INTEL_IPU4P) += ipu4/ +endif +obj-$(CONFIG_VIDEO_INTEL_ICI) += ici/ +obj-$(CONFIG_VIDEO_INTEL_IPU_ACRN) += virtio/ diff --git a/drivers/media/pci/intel/ici-fw-isys.h b/drivers/media/pci/intel/ici-fw-isys.h new file mode 100644 index 0000000000000..afd7537da02b2 --- /dev/null +++ b/drivers/media/pci/intel/ici-fw-isys.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef ICI_FW_ISYS_H +#define ICI_FW_ISYS_H + +#include "ipu-fw-com.h" + +struct ici_isys; +int ici_fw_isys_init(struct ici_isys *isys, unsigned int num_streams); +int ici_fw_isys_close(struct ici_isys *isys); +int ici_fw_isys_simple_cmd(struct ici_isys *isys, + const unsigned int stream_handle, + enum ipu_fw_isys_send_type send_type); +int ici_fw_isys_complex_cmd(struct ici_isys *isys, + const unsigned int stream_handle, + void *cpu_mapped_buf, + dma_addr_t dma_mapped_buf, + size_t size, enum ipu_fw_isys_send_type send_type); +int ici_fw_isys_send_proxy_token(struct ici_isys *isys, + unsigned int req_id, + unsigned int index, + unsigned int offset, u32 value); +void ici_fw_isys_cleanup(struct ici_isys *isys); +#endif diff --git a/drivers/media/pci/intel/ici/Makefile b/drivers/media/pci/intel/ici/Makefile new file mode 100644 index 0000000000000..fb23912113fc2 --- /dev/null +++ b/drivers/media/pci/intel/ici/Makefile @@ -0,0 +1,74 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2010 - 2018, Intel Corporation. + +ifneq ($(EXTERNAL_BUILD), 1) +srcpath := $(srctree) +endif + +ifdef CONFIG_VIDEO_INTEL_ICI +ccflags-y += -DHAS_DUAL_CMD_CTX_SUPPORT=0 -DIPU_VC_SUPPORT -DIPU_HAS_ISA -DIPU_PSYS_LEGACY -DI2C_WA -Wframe-larger-than=4096 + + +# work-around to re-use ipu4-css and libintel-ipu4_ici.c together when +# compiling ICI-ISYS +$(shell cp -r $(srcpath)/$(src)/../ipu4/ipu4-css/ $(srcpath)/$(src)/) +$(shell cp -f $(srcpath)/$(src)/libintel-ipu4_ici.c $(srcpath)/$(src)/ipu4-css/libintel-ipu4.c) +$(shell cp -f $(srcpath)/$(src)/../ipu4/ipu-platform-resources.h $(srcpath)/$(src)/) + +intel-ipu4-objs +=../ipu.o \ + ../ipu-bus.o \ + ici-dma.o \ + ../ipu-buttress.o \ + ../ipu-trace.o \ + ../ipu-cpd.o \ + ../ipu-fw-com.o \ + ../ipu4/ipu4.o + +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4.o + +intel-ipu4-mmu-objs += ../ipu-mmu.o +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4-mmu.o + +ici-isys-mod-objs += \ + ici-isys.o \ + ici-isys-csi2.o \ + ici-isys-tpg.o \ + ici-isys-csi2-be.o \ + ici-isys-stream.o \ + ici-isys-frame-buf.o \ + ici-isys-subdev.o \ + ici-isys-pipeline.o \ + ici-isys-pipeline-device.o \ + ici-isys-stream-device.o +obj-$(CONFIG_VIDEO_INTEL_IPU) += ici-isys-mod.o + +intel-ipu4-psys-objs += ../ipu-psys.o \ + ../ipu-psys-virt.o \ + ../ipu4/ipu4-resources.o \ + ../ipu4/ipu4-psys.o \ + +ifndef CONFIG_VIDEO_INTEL_IPU_FW_LIB +intel-ipu4-psys-objs += ipu4-fw-resources.o \ + ../ipu-fw-psys.o +endif + +ifeq ($(CONFIG_COMPAT),y) +intel-ipu4-psys-objs += ../ipu-psys-compat32.o +endif + +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4-psys.o + +ifdef CONFIG_VIDEO_INTEL_IPU_FW_LIB +include $(srcpath)/$(src)/ipu4-css/Makefile.isyslib +include $(srcpath)/$(src)/ipu4-css/Makefile.psyslib +endif + +subdir-ccflags-y += -I$(srcpath)/$(src)/../../../../../include/ +subdir-ccflags-y += -I$(srcpath)/$(src)/../ +subdir-ccflags-y += -I$(srcpath)/$(src)/../ipu4/ +subdir-ccflags-y += -I$(srcpath)/$(src)/ +subdir-ccflags-y += -I$(srcpath)/$(src)/ipu4-css + +ccflags-y += -DPARAMETER_INTERFACE_V2 + +endif diff --git a/drivers/media/pci/intel/ici/ici-dma.c b/drivers/media/pci/intel/ici/ici-dma.c new file mode 100644 index 0000000000000..430ece88af69a --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-dma.c @@ -0,0 +1,461 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipu-dma.h" +#include "ipu-mmu.h" + +static void ici_dma_clear_buffer(struct page *page, size_t size, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + /* + * Ensure that the allocated pages are zeroed, and that any data + * lurking in the kernel direct-mapped region is invalidated. + */ + if (PageHighMem(page)) { + for (; size > 0; page++, size -= PAGE_SIZE) { + void *ptr = kmap_atomic(page); + + memset(ptr, 0, PAGE_SIZE); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) +#else + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) +#endif + clflush_cache_range(ptr, PAGE_SIZE); + kunmap_atomic(ptr); + } + } else { + void *ptr = page_address(page); + + memset(ptr, 0, size); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) +#else + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) +#endif + clflush_cache_range(ptr, size); + } +} + + +static struct page **__intel_ipu4_dma_alloc(struct device *dev, + size_t buf_size, + gfp_t gfp, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + int num_pages = buf_size >> PAGE_SHIFT; + int array_size = num_pages * sizeof(struct page *); + struct page **page_list; + int i = 0; + + if (array_size <= PAGE_SIZE) + page_list = kzalloc(array_size, GFP_KERNEL); + else + page_list = vzalloc(array_size); + if (!page_list) + return NULL; + + gfp |= __GFP_NOWARN; + + while (num_pages) { + int j, order = __fls(num_pages); + + page_list[i] = alloc_pages(gfp, order); + while (!page_list[i] && order) + page_list[i] = alloc_pages(gfp, --order); + if (!page_list[i]) + goto error; + + if (order) { + split_page(page_list[i], order); + j = 1 << order; + while (--j) + page_list[i + j] = page_list[i] + j; + } + + ici_dma_clear_buffer(page_list[i], PAGE_SIZE << order, attrs); + i += 1 << order; + num_pages -= 1 << order; + } + + return page_list; +error: + while (i--) + if (page_list[i]) + __free_pages(page_list[i], 0); + if (array_size <= PAGE_SIZE) + kfree(page_list); + else + vfree(page_list); + return NULL; +} + +static int __intel_ipu4_dma_free(struct device *dev, struct page **page_list, + size_t buf_size, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + int num_pages = buf_size >> PAGE_SHIFT; + int array_size = num_pages * sizeof(struct page *); + int i; + + for (i = 0; i < num_pages; i++) { + if (page_list[i]) { + ici_dma_clear_buffer(page_list[i], PAGE_SIZE, attrs); + __free_pages(page_list[i], 0); + } + } + + if (array_size <= PAGE_SIZE) + kfree(page_list); + else + vfree(page_list); + return 0; +} + +static void intel_ipu4_dma_sync_single_for_cpu( + struct device *dev, dma_addr_t dma_handle, size_t size, + enum dma_data_direction dir) +{ + struct device *aiommu = to_ipu_bus_device(dev)->iommu; + struct ipu_mmu *mmu = dev_get_drvdata(aiommu); + + clflush_cache_range( + phys_to_virt(iommu_iova_to_phys( + mmu->dmap->domain, dma_handle)), size); +} + +static void intel_ipu4_dma_sync_sg_for_cpu( + struct device *dev, struct scatterlist *sglist, int nents, + enum dma_data_direction dir) +{ + struct device *aiommu = to_ipu_bus_device(dev)->iommu; + struct ipu_mmu *mmu = dev_get_drvdata(aiommu); + struct scatterlist *sg; + int i; + + for_each_sg(sglist, sg, nents, i) { + clflush_cache_range( + phys_to_virt(iommu_iova_to_phys( + mmu->dmap->domain, + sg_dma_address(sg))), + sg->length); + } +} + +static void *intel_ipu4_dma_alloc(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + struct device *aiommu = to_ipu_bus_device(dev)->iommu; + struct ipu_mmu *mmu = dev_get_drvdata(aiommu); + struct page **pages; + struct iova *iova; + struct vm_struct *area; + int i; + int rval; + + size = PAGE_ALIGN(size); + + iova = alloc_iova(&mmu->dmap->iovad, size >> PAGE_SHIFT, + dma_get_mask(dev) >> PAGE_SHIFT, 0); + if (!iova) + return NULL; + + pages = __intel_ipu4_dma_alloc(dev, size, gfp, attrs); + if (!pages) + goto out_free_iova; + + for (i = 0; iova->pfn_lo + i <= iova->pfn_hi; i++) { + rval = iommu_map(mmu->dmap->domain, + (iova->pfn_lo + i) << PAGE_SHIFT, + page_to_phys(pages[i]), PAGE_SIZE, 0); + if (rval) + goto out_unmap; + } + + area = __get_vm_area(size, 0, VMALLOC_START, VMALLOC_END); + if (!area) + goto out_unmap; + + area->pages = pages; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) + if (map_vm_area(area, PAGE_KERNEL, &pages)) +#else + if (map_vm_area(area, PAGE_KERNEL, pages)) +#endif + goto out_vunmap; + + *dma_handle = iova->pfn_lo << PAGE_SHIFT; + + mmu->tlb_invalidate(mmu); + + return area->addr; + +out_vunmap: + vunmap(area->addr); + +out_unmap: + for (i--; i >= 0; i--) { + iommu_unmap(mmu->dmap->domain, (iova->pfn_lo + i) << PAGE_SHIFT, + PAGE_SIZE); + } + __intel_ipu4_dma_free(dev, pages, size, attrs); +out_free_iova: + __free_iova(&mmu->dmap->iovad, iova); + + return NULL; +} + +static void intel_ipu4_dma_free(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + struct device *aiommu = to_ipu_bus_device(dev)->iommu; + struct ipu_mmu *mmu = dev_get_drvdata(aiommu); + struct vm_struct *area = find_vm_area(vaddr); + struct page **pages; + struct iova *iova = find_iova(&mmu->dmap->iovad, + dma_handle >> PAGE_SHIFT); + + if (WARN_ON(!area)) + return; + + if (WARN_ON(!area->pages)) + return; + + BUG_ON(!iova); + + size = PAGE_ALIGN(size); + + pages = area->pages; + + vunmap(vaddr); + + iommu_unmap(mmu->dmap->domain, iova->pfn_lo << PAGE_SHIFT, + (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT); + + __intel_ipu4_dma_free(dev, pages, size, attrs); + + __free_iova(&mmu->dmap->iovad, iova); + + mmu->tlb_invalidate(mmu); +} + +static int intel_ipu4_dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *addr, dma_addr_t iova, size_t size, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + struct vm_struct *area = find_vm_area(addr); + size_t count = PAGE_ALIGN(size) >> PAGE_SHIFT; + size_t i; + + if (!area) + return -EFAULT; + + if (vma->vm_start & ~PAGE_MASK) + return -EINVAL; + + if (size > area->size) + return -EFAULT; + + for (i = 0; i < count; i++) + vm_insert_page(vma, vma->vm_start + (i << PAGE_SHIFT), + area->pages[i]); + + return 0; +} + +static void intel_ipu4_dma_unmap_sg(struct device *dev, + struct scatterlist *sglist, + int nents, enum dma_data_direction dir, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + struct device *aiommu = to_ipu_bus_device(dev)->iommu; + struct ipu_mmu *mmu = dev_get_drvdata(aiommu); + struct iova *iova = find_iova(&mmu->dmap->iovad, + sg_dma_address(sglist) >> PAGE_SHIFT); + + if (!nents) + return; + + BUG_ON(!iova); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) +#else + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) +#endif + intel_ipu4_dma_sync_sg_for_cpu(dev, sglist, nents, + DMA_BIDIRECTIONAL); + + iommu_unmap(mmu->dmap->domain, iova->pfn_lo << PAGE_SHIFT, + (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT); + + mmu->tlb_invalidate(mmu); + + __free_iova(&mmu->dmap->iovad, iova); +} + +static int intel_ipu4_dma_map_sg(struct device *dev, struct scatterlist *sglist, + int nents, enum dma_data_direction dir, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + struct device *aiommu = to_ipu_bus_device(dev)->iommu; + struct ipu_mmu *mmu = dev_get_drvdata(aiommu); + struct scatterlist *sg; + struct iova *iova; + size_t size = 0; + uint32_t iova_addr; + int i; + + for_each_sg(sglist, sg, nents, i) + size += PAGE_ALIGN(sg->length) >> PAGE_SHIFT; + + dev_dbg(dev, "dmamap: mapping sg %d entries, %zu pages\n", nents, size); + + iova = alloc_iova(&mmu->dmap->iovad, size, + dma_get_mask(dev) >> PAGE_SHIFT, 0); + if (!iova) + return 0; + + dev_dbg(dev, "dmamap: iova low pfn %lu, high pfn %lu\n", iova->pfn_lo, + iova->pfn_hi); + + iova_addr = iova->pfn_lo; + + for_each_sg(sglist, sg, nents, i) { + int rval; + + dev_dbg(dev, + "dmamap details: mapping entry %d: iova 0x%8.8x, \ + physical 0x%16.16llx\n", + i, iova_addr << PAGE_SHIFT, page_to_phys(sg_page(sg))); + rval = iommu_map(mmu->dmap->domain, iova_addr << PAGE_SHIFT, + page_to_phys(sg_page(sg)), + PAGE_ALIGN(sg->length), 0); + if (rval) + goto out_fail; + sg_dma_address(sg) = iova_addr << PAGE_SHIFT; +#ifdef CONFIG_NEED_SG_DMA_LENGTH + sg_dma_len(sg) = sg->length; +#endif /* CONFIG_NEED_SG_DMA_LENGTH */ + + iova_addr += PAGE_ALIGN(sg->length) >> PAGE_SHIFT; + } + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) +#else + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) +#endif + intel_ipu4_dma_sync_sg_for_cpu(dev, sglist, nents, + DMA_BIDIRECTIONAL); + + mmu->tlb_invalidate(mmu); + + return nents; + +out_fail: + intel_ipu4_dma_unmap_sg(dev, sglist, i, dir, attrs); + + return 0; +} + +/* +* Create scatter-list for the already allocated DMA buffer +*/ +static int intel_ipu4_dma_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t handle, size_t size, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + struct vm_struct *area = find_vm_area(cpu_addr); + int n_pages; + int ret = 0; + + if (WARN_ON(!area->pages)) + return -ENOMEM; + + n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; + + ret = sg_alloc_table_from_pages(sgt, area->pages, n_pages, 0, size, + GFP_KERNEL); + if (ret) + dev_dbg(dev, "IPU get sgt table fail\n"); + + return ret; +} + +const struct dma_map_ops ipu_dma_ops = { + .alloc = intel_ipu4_dma_alloc, + .free = intel_ipu4_dma_free, + .mmap = intel_ipu4_dma_mmap, + .map_sg = intel_ipu4_dma_map_sg, + .unmap_sg = intel_ipu4_dma_unmap_sg, + .sync_single_for_cpu = intel_ipu4_dma_sync_single_for_cpu, + .sync_single_for_device = intel_ipu4_dma_sync_single_for_cpu, + .sync_sg_for_cpu = intel_ipu4_dma_sync_sg_for_cpu, + .sync_sg_for_device = intel_ipu4_dma_sync_sg_for_cpu, + .get_sgtable = intel_ipu4_dma_get_sgtable, +}; +EXPORT_SYMBOL_GPL(ipu_dma_ops); + diff --git a/drivers/media/pci/intel/ici/ici-isys-csi2-be.c b/drivers/media/pci/intel/ici/ici-isys-csi2-be.c new file mode 100644 index 0000000000000..6841263a8d16a --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-csi2-be.c @@ -0,0 +1,280 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include "./ici/ici-isys.h" +#ifdef ICI_ENABLED + +#ifndef IPU4_DEBUG +#define IPU4_DEBUG 1 +#endif + +#include "./ici/ici-isys-csi2-be.h" +#include "isysapi/interface/ia_css_isysapi_fw_types.h" + +#define ici_asd_to_csi2_be(__asd) \ + container_of(__asd, struct ici_isys_csi2_be, asd) + +static const uint32_t ici_csi2_be_supported_codes_pad[] = { + ICI_FORMAT_SBGGR12, + ICI_FORMAT_SGBRG12, + ICI_FORMAT_SGRBG12, + ICI_FORMAT_SRGGB12, + ICI_FORMAT_SBGGR10, + ICI_FORMAT_SGBRG10, + ICI_FORMAT_SGRBG10, + ICI_FORMAT_SRGGB10, + ICI_FORMAT_SBGGR8, + ICI_FORMAT_SGBRG8, + ICI_FORMAT_SGRBG8, + ICI_FORMAT_SRGGB8, + 0, +}; + +static const uint32_t ici_csi2_be_soc_supported_codes_pad[] = { + ICI_FORMAT_RGB888, + ICI_FORMAT_RGB565, + ICI_FORMAT_UYVY, + ICI_FORMAT_YUYV, + ICI_FORMAT_SBGGR12, + ICI_FORMAT_SGBRG12, + ICI_FORMAT_SGRBG12, + ICI_FORMAT_SRGGB12, + ICI_FORMAT_SBGGR10, + ICI_FORMAT_SGBRG10, + ICI_FORMAT_SGRBG10, + ICI_FORMAT_SRGGB10, + ICI_FORMAT_SBGGR8, + ICI_FORMAT_SGBRG8, + ICI_FORMAT_SGRBG8, + ICI_FORMAT_SRGGB8, + 0, +}; + +static const uint32_t *ici_csi2_be_supported_codes[] = { + ici_csi2_be_supported_codes_pad, + ici_csi2_be_supported_codes_pad, +}; + +static const uint32_t *ici_csi2_be_soc_supported_codes[] = { + ici_csi2_be_soc_supported_codes_pad, + ici_csi2_be_soc_supported_codes_pad, +}; + +static int get_supported_code_index(uint32_t code) +{ + int i; + + for (i = 0; ici_csi2_be_supported_codes_pad[i]; i++) { + if (ici_csi2_be_supported_codes_pad[i] == code) + return i; + } + return -EINVAL; +} + +void ici_csi2_be_set_ffmt(struct ici_isys_subdev *asd, + unsigned pad, + struct ici_framefmt *ffmt) +{ + struct ici_framefmt *cur_ffmt = + __ici_isys_subdev_get_ffmt(asd, pad); + int idx=0; + if (!cur_ffmt) + return; + + ffmt->colorspace = 0; + memset(ffmt->reserved, 0, sizeof(ffmt->reserved)); + switch (pad) { + case CSI2_BE_ICI_PAD_SINK: + DEBUGK("%s: sink pad %u\n", __func__, pad); + if (ffmt->field != ICI_FIELD_ALTERNATE) + ffmt->field = ICI_FIELD_NONE; + *cur_ffmt = *ffmt; + + ici_isys_subdev_fmt_propagate(asd, pad, NULL, + ICI_ISYS_SUBDEV_PROP_TGT_SINK_FMT, + ffmt); + break; + case CSI2_BE_ICI_PAD_SOURCE: { + struct ici_framefmt *sink_ffmt = + __ici_isys_subdev_get_ffmt(asd, + CSI2_BE_ICI_PAD_SINK); + + struct ici_rect *r = + &asd->crop[CSI2_BE_ICI_PAD_SOURCE]; + + u32 code = 0; + if (sink_ffmt) + code = sink_ffmt->pixelformat; + + idx = get_supported_code_index(code); + + DEBUGK("%s: source pad %u\n", __func__, pad); + + if (asd->valid_tgts[CSI2_BE_ICI_PAD_SOURCE].crop + && idx >= 0) { + int crop_info = 0; + + DEBUGK("%s: setting CROP, pad %u\n", __func__, + pad); + + if (r->top & 1) + crop_info |= CSI2_BE_ICI_CROP_VER; + if (r->left & 1) + crop_info |= CSI2_BE_ICI_CROP_HOR; + code = ici_csi2_be_supported_codes_pad[(( + idx & + CSI2_BE_ICI_CROP_MASK) + ^ + crop_info) + + + (idx & + ~CSI2_BE_ICI_CROP_MASK)]; + } + + DEBUGK("%s: setting to w:%u,h:%u,pf:%u,field:%u\n", + __func__, r->width, + r->height, code, sink_ffmt->field); + cur_ffmt->width = r->width; + cur_ffmt->height = r->height; + cur_ffmt->pixelformat = code; + cur_ffmt->field = sink_ffmt->field; + *ffmt = *cur_ffmt; + break; + } + default: + BUG_ON(1); + } +} + +static int ici_csi2_be_set_stream( + struct ici_isys_node *node, + void* ip, + int state) +{ + return 0; +} + +static int ici_csi2_be_pipeline_validate( + struct node_pipeline *inp, + struct ici_isys_node *node) +{ + struct ici_isys_subdev* asd = node->sd; + struct ici_isys_csi2_be *csi2_be = + ici_asd_to_csi2_be(asd); + struct ici_isys_pipeline *ip = + ici_nodepipe_to_pipeline(inp); + + ip->csi2_be = csi2_be; + return 0; +} + +int ici_isys_csi2_be_init(struct ici_isys_csi2_be + *csi2_be, + struct ici_isys *isys, + unsigned int type) +{ + struct ici_pad_framefmt pff = { + .pad.pad_idx = CSI2_BE_ICI_PAD_SINK, + .ffmt = { + .width = 4096, + .height = 3072, + }, + }; + int rval; + char name[ICI_MAX_NODE_NAME]; + + dev_info(&isys->adev->dev, "ici_isys_csi2_be_init\n"); + + csi2_be->asd.isys = isys; + if (type == ICI_BE_RAW) { + csi2_be->as.buf_list.css_pin_type = + IA_CSS_ISYS_PIN_TYPE_RAW_NS; + snprintf(name, sizeof(name), + IPU_ISYS_ENTITY_PREFIX " CSI2 BE"); + } else if (type >= ICI_BE_SOC) { + csi2_be->as.buf_list.css_pin_type = + IA_CSS_ISYS_PIN_TYPE_RAW_SOC; + snprintf(name, sizeof(name), + IPU_ISYS_ENTITY_PREFIX " CSI2 BE SOC %u", type-1); + } else { + return -EINVAL; + } + + rval = ici_isys_subdev_init(&csi2_be->asd, + name, + NR_OF_CSI2_BE_ICI_PADS, + 0); + if (rval) { + dev_err(&isys->adev->dev, "can't init subdevice\n"); + goto fail_subdev; + } + + csi2_be->asd.pads[CSI2_BE_ICI_PAD_SINK].flags = ICI_PAD_FLAGS_SINK + | ICI_PAD_FLAGS_MUST_CONNECT; + csi2_be->asd.pads[CSI2_BE_ICI_PAD_SOURCE].flags = + ICI_PAD_FLAGS_SOURCE; + + if (type == ICI_BE_RAW) + csi2_be->asd.valid_tgts[CSI2_BE_ICI_PAD_SOURCE].crop = true; + else + csi2_be->asd.valid_tgts[CSI2_BE_ICI_PAD_SOURCE].crop = false; + + csi2_be->asd.set_ffmt_internal = ici_csi2_be_set_ffmt; + + if (type == ICI_BE_RAW) { + csi2_be->asd.supported_codes = ici_csi2_be_supported_codes; + csi2_be->asd.be_mode = ICI_BE_RAW; + csi2_be->asd.isl_mode = ICI_ISL_CSI2_BE; + } else { + csi2_be->asd.supported_codes = ici_csi2_be_soc_supported_codes; + csi2_be->asd.be_mode = ICI_BE_SOC; + csi2_be->asd.isl_mode = ICI_ISL_OFF; + } + + csi2_be->asd.node.node_set_pad_ffmt(&csi2_be->asd.node, &pff); + /* ipu4_isys_csi2_be2_set_sel(&csi2_be->asd.sd, NULL, &sel); */ + /* csi2_be->asd.sd.internal_ops = &csi2_be_sd_internal_ops; */ + csi2_be->asd.node.node_set_streaming = + ici_csi2_be_set_stream; + csi2_be->asd.node.node_pipeline_validate = + ici_csi2_be_pipeline_validate; + + + csi2_be->as.isys = isys; + if (type == ICI_BE_RAW) + csi2_be->as.pfmts = ici_isys_pfmts; + else + csi2_be->as.pfmts = ici_isys_pfmts_be_soc; + + csi2_be->as.try_fmt_vid_mplane = + ici_isys_video_try_fmt_vid_mplane_default; + csi2_be->as.prepare_firmware_stream_cfg = + ici_isys_prepare_firmware_stream_cfg_default; + + rval = ici_isys_stream_init(&csi2_be->as, &csi2_be->asd, + &csi2_be->asd.node, CSI2_BE_ICI_PAD_SOURCE, + ICI_PAD_FLAGS_SINK); + if (rval) { + dev_err(&isys->adev->dev, "can't init stream node\n"); + goto fail_stream; + } + return 0; + +fail_stream: + ici_isys_subdev_cleanup(&csi2_be->asd); +fail_subdev: + return rval; +} +EXPORT_SYMBOL(ici_isys_csi2_be_init); + +void ici_isys_csi2_be_cleanup(struct ici_isys_csi2_be + *csi2_be) +{ + ici_isys_subdev_cleanup(&csi2_be->asd); + ici_isys_stream_cleanup(&csi2_be->as); +} +EXPORT_SYMBOL(ici_isys_csi2_be_cleanup); + +#endif /* ICI_ENABLED */ diff --git a/drivers/media/pci/intel/ici/ici-isys-csi2-be.h b/drivers/media/pci/intel/ici/ici-isys-csi2-be.h new file mode 100644 index 0000000000000..428619d245201 --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-csi2-be.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef ICI_ISYS_CSI2_BE_H +#define ICI_ISYS_CSI2_BE_H + +#include "ici-isys-subdev.h" +#include "ici-isys-stream.h" + +#define CSI2_BE_ICI_PAD_SINK 0 +#define CSI2_BE_ICI_PAD_SOURCE 1 +#define NR_OF_CSI2_BE_ICI_PADS 2 + +#define CSI2_BE_ICI_CROP_HOR (1 << 0) +#define CSI2_BE_ICI_CROP_VER (1 << 1) +#define CSI2_BE_ICI_CROP_MASK (CSI2_BE_ICI_CROP_VER | CSI2_BE_ICI_CROP_HOR) + +struct ici_isys_csi2_be_pdata; +/* + * struct ici_isys_csi2_be + */ +struct ici_isys_csi2_be { + struct ici_isys_csi2_be_pdata *pdata; + struct ici_isys_subdev asd; + struct ici_isys_stream as; +}; + +int ici_isys_csi2_be_init(struct ici_isys_csi2_be + *csi2_be, + struct ici_isys *isys, unsigned int type); +void ici_isys_csi2_be_cleanup(struct ici_isys_csi2_be + *csi2_be); + +#endif /* ICI_ISYS_CSI2_BE_H */ diff --git a/drivers/media/pci/intel/ici/ici-isys-csi2.c b/drivers/media/pci/intel/ici/ici-isys-csi2.c new file mode 100644 index 0000000000000..65416dd6d83be --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-csi2.c @@ -0,0 +1,532 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include "./ici/ici-isys.h" +#ifdef ICI_ENABLED +#include +#include "./ici/ici-isys-subdev.h" +#include "./ici/ici-isys-stream.h" +#include "./ici/ici-isys-csi2.h" +#include "isysapi/interface/ia_css_isysapi_fw_types.h" +#include "ipu-platform-isys-csi2-reg.h" +//#include "intel-ipu-isys-csi2-common.h" + +#define CSI2_ACCINV 8 + +#define ici_asd_to_csi2(__asd, index) \ + container_of(__asd, struct ici_isys_csi2, asd[index]) + +static const uint32_t ici_csi2_supported_codes_pad_sink[] = { + ICI_FORMAT_RGB888, + ICI_FORMAT_RGB565, + ICI_FORMAT_UYVY, + ICI_FORMAT_YUYV, + ICI_FORMAT_SBGGR12, + ICI_FORMAT_SGBRG12, + ICI_FORMAT_SGRBG12, + ICI_FORMAT_SRGGB12, + ICI_FORMAT_SBGGR10, + ICI_FORMAT_SGBRG10, + ICI_FORMAT_SGRBG10, + ICI_FORMAT_SRGGB10, + ICI_FORMAT_SBGGR8, + ICI_FORMAT_SGBRG8, + ICI_FORMAT_SGRBG8, + ICI_FORMAT_SRGGB8, + ICI_FORMAT_SBGGR10_DPCM8, + ICI_FORMAT_SGBRG10_DPCM8, + ICI_FORMAT_SGRBG10_DPCM8, + ICI_FORMAT_SRGGB10_DPCM8, + 0, +}; + +static const uint32_t ici_csi2_supported_codes_pad_source[] = { + ICI_FORMAT_RGB888, + ICI_FORMAT_RGB565, + ICI_FORMAT_UYVY, + ICI_FORMAT_YUYV, + ICI_FORMAT_SBGGR12, + ICI_FORMAT_SGBRG12, + ICI_FORMAT_SGRBG12, + ICI_FORMAT_SRGGB12, + ICI_FORMAT_SBGGR10, + ICI_FORMAT_SGBRG10, + ICI_FORMAT_SGRBG10, + ICI_FORMAT_SRGGB10, + ICI_FORMAT_SBGGR8, + ICI_FORMAT_SGBRG8, + ICI_FORMAT_SGRBG8, + ICI_FORMAT_SRGGB8, + 0, +}; + +static const uint32_t *ici_csi2_supported_codes[] = { + ici_csi2_supported_codes_pad_sink, + ici_csi2_supported_codes_pad_source, +}; + +void ici_csi2_set_ffmt(struct ici_isys_subdev *asd, + unsigned pad, + struct ici_framefmt *ffmt) +{ + struct ici_framefmt *cur_ffmt = + __ici_isys_subdev_get_ffmt(asd, pad); + + if (ffmt->field != ICI_FIELD_ALTERNATE) + ffmt->field = ICI_FIELD_NONE; + ffmt->colorspace = 0; + memset(ffmt->reserved, 0, sizeof(ffmt->reserved)); + + switch (pad) { + case CSI2_ICI_PAD_SINK: + if (cur_ffmt) + *cur_ffmt = *ffmt; + ici_isys_subdev_fmt_propagate(asd, pad, NULL, + ICI_ISYS_SUBDEV_PROP_TGT_SINK_FMT, + ffmt); + break; + case CSI2_ICI_PAD_SOURCE:{ + struct ici_framefmt *sink_ffmt = + __ici_isys_subdev_get_ffmt(asd, + CSI2_ICI_PAD_SINK); + if (sink_ffmt) { + *cur_ffmt = *sink_ffmt; + cur_ffmt->pixelformat = + ici_isys_subdev_code_to_uncompressed + (sink_ffmt->pixelformat); + *ffmt = *cur_ffmt; + } + break; + } + default: + BUG_ON(1); + } +} + +static void ici_isys_csi2_error(struct ici_isys_csi2 + *csi2) +{ + /* + * Strings corresponding to CSI-2 receiver errors are here. + * Corresponding macros are defined in the header file. + */ + static const struct ici_isys_csi2_error { + const char *error_string; + bool is_info_only; + } errors[] = { + { + "Single packet header error corrected", true}, { + "Multiple packet header errors detected", true}, { + "Payload checksum (CRC) error", true}, { + "FIFO overflow", false}, { + "Reserved short packet data type detected", true}, { + "Reserved long packet data type detected", true}, { + "Incomplete long packet detected", false}, { + "Frame sync error", false}, { + "Line sync error", false}, { + "DPHY recoverable synchronization error", true}, { + "DPHY non-recoverable synchronization error", false}, { + "Escape mode error", true}, { + "Escape mode trigger event", true}, { + "Escape mode ultra-low power state for data lane(s)", true}, + { + "Escape mode ultra-low power state exit for clock lane", + true}, { + "Inter-frame short packet discarded", true}, { + "Inter-frame long packet discarded", true},}; + u32 status = csi2->receiver_errors; + unsigned int i; + + csi2->receiver_errors = 0; + + for (i = 0; i < ARRAY_SIZE(errors); i++) { + if (status & BIT(i)) { + if (errors[i].is_info_only) + dev_dbg(&csi2->isys->adev->dev, + "csi2-%i info: %s\n", + csi2->index, errors[i].error_string); + else + dev_err_ratelimited(&csi2->isys->adev->dev, + "csi2-%i error: %s\n", + csi2->index, + errors[i].error_string); + } + } +} + +#define DIV_SHIFT 8 + +static uint32_t calc_timing(int32_t a, int32_t b, int64_t link_freq, + int32_t accinv) +{ + return accinv * a + (accinv * b * (500000000 >> DIV_SHIFT) + / (int32_t) (link_freq >> DIV_SHIFT)); +} + +int ici_isys_csi2_calc_timing(struct ici_isys_csi2 + *csi2, struct + ici_isys_csi2_timing + *timing, uint32_t accinv) +{ + int64_t link_frequency = 0; + + int idx, rval; + + struct ici_ext_subdev *sd = + (struct ici_ext_subdev*)csi2->ext_sd; + + struct ici_ext_sd_param param = { + .sd = sd, + .id = ICI_EXT_SD_PARAM_ID_LINK_FREQ, + .type = ICI_EXT_SD_PARAM_TYPE_INT32, + }; + + if (!sd || !sd->get_param) { + dev_err(&csi2->isys->adev->dev, + "External device not available\n"); + return -ENODEV; + } + rval = sd->get_param(¶m); + if (rval) { + dev_info(&csi2->isys->adev->dev, "can't get link frequency\n"); + return rval; + } + + idx = param.val; + param.type = ICI_EXT_SD_PARAM_TYPE_INT64; + + rval = sd->get_menu_item(¶m, idx); + if (rval) { + dev_info(&csi2->isys->adev->dev, "can't get menu item\n"); + return rval; + } + + link_frequency = param.val; + dev_dbg(&csi2->isys->adev->dev, "%s: link frequency %lld\n", __func__, + link_frequency); + + if (!link_frequency) + return -EINVAL; + + timing->ctermen = calc_timing(CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_A, + CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_B, + link_frequency, accinv); + timing->csettle = + calc_timing(CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_A, + CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_B, link_frequency, + accinv); + dev_dbg(&csi2->isys->adev->dev, "ctermen %u\n", timing->ctermen); + dev_dbg(&csi2->isys->adev->dev, "csettle %u\n", timing->csettle); + + timing->dtermen = calc_timing(CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_A, + CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_B, + link_frequency, accinv); + timing->dsettle = + calc_timing(CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_A, + CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_B, link_frequency, + accinv); + dev_dbg(&csi2->isys->adev->dev, "dtermen %u\n", timing->dtermen); + dev_dbg(&csi2->isys->adev->dev, "dsettle %u\n", timing->dsettle); + + return 0; +} + +static void ici_isys_register_errors(struct + ici_isys_csi2 + *csi2) +{ + u32 status = readl(csi2->base + CSI2_REG_CSIRX_IRQ_STATUS); + + dev_dbg(&csi2->isys->adev->dev, + "ici_isys_register_errors\n"); + writel(status, csi2->base + CSI2_REG_CSIRX_IRQ_CLEAR); + csi2->receiver_errors |= status; +} + +static void ici_isys_csi2_sof_event(struct ici_isys_csi2 + *csi2, unsigned int vc) +{ + unsigned long flags; + + spin_lock_irqsave(&csi2->isys->lock, flags); + csi2->in_frame = true; + spin_unlock_irqrestore(&csi2->isys->lock, flags); +} + +static void ici_isys_csi2_eof_event(struct ici_isys_csi2 + *csi2, unsigned int vc) +{ + unsigned long flags; + + spin_lock_irqsave(&csi2->isys->lock, flags); + csi2->in_frame = false; + if (csi2->wait_for_sync) + complete(&csi2->eof_completion); + spin_unlock_irqrestore(&csi2->isys->lock, flags); +} + +void ici_isys_csi2_isr(struct ici_isys_csi2 *csi2) +{ + u32 status = readl(csi2->base + CSI2_REG_CSI2PART_IRQ_STATUS); + unsigned int i; + + writel(status, csi2->base + CSI2_REG_CSI2PART_IRQ_CLEAR); + + if (status & CSI2_CSI2PART_IRQ_CSIRX) + ici_isys_register_errors(csi2); + + for (i = 0; i < NR_OF_CSI2_ICI_VC; i++) { + if ((status & CSI2_IRQ_FS_VC(i))) + ici_isys_csi2_sof_event(csi2, i); + + if ((status & CSI2_IRQ_FE_VC(i))) + ici_isys_csi2_eof_event(csi2, i); + } + +} +EXPORT_SYMBOL(ici_isys_csi2_isr); + +void ici_isys_csi2_wait_last_eof(struct ici_isys_csi2 + *csi2) +{ + unsigned long flags; + int tout; + + spin_lock_irqsave(&csi2->isys->lock, flags); + if (!csi2->in_frame) { + spin_unlock_irqrestore(&csi2->isys->lock, flags); + return; + } + + reinit_completion(&csi2->eof_completion); + csi2->wait_for_sync = true; + spin_unlock_irqrestore(&csi2->isys->lock, flags); + tout = wait_for_completion_timeout(&csi2->eof_completion, + ICI_EOF_TIMEOUT_JIFFIES); + if (!tout) { + dev_err(&csi2->isys->adev->dev, + "csi2-%d: timeout at sync to eof\n", csi2->index); + } + csi2->wait_for_sync = false; +} + +static void csi2_capture_done(struct ici_isys_pipeline *ip, + struct ia_css_isys_resp_info *info) +{ + ici_isys_frame_buf_capture_done(ip, info); + if (ip->csi2) + ici_isys_csi2_error(ip->csi2); +} + +int ici_csi2_set_stream( + struct ici_isys_node *node, + void* ip, + int state) +{ + struct ici_isys_subdev* asd = node->sd; + struct ici_isys_csi2 *csi2 = + ici_asd_to_csi2(asd, asd->index); + struct ici_isys_csi2_timing timing = { 0 }; + unsigned int i, nlanes; + int rval; + u32 csi2csirx = 0, csi2part = 0; + + dev_dbg(&csi2->isys->adev->dev, "csi2 s_stream %d\n", state); + + if (!state) { + ici_isys_csi2_error(csi2); + writel(0, csi2->base + CSI2_REG_CSI_RX_CONFIG); + writel(0, csi2->base + CSI2_REG_CSI_RX_ENABLE); + + /* Disable interrupts */ + writel(0, csi2->base + CSI2_REG_CSI2S2M_IRQ_MASK); + writel(0, csi2->base + CSI2_REG_CSI2S2M_IRQ_ENABLE); + writel(0, csi2->base + CSI2_REG_CSI2PART_IRQ_MASK); + writel(0, csi2->base + CSI2_REG_CSI2PART_IRQ_ENABLE); + return 0; + } + + ici_isys_stream_add_capture_done(ip, csi2_capture_done); + + nlanes = csi2->nlanes; + + rval = ici_isys_csi2_calc_timing(csi2, + &timing, + CSI2_ACCINV); + if (rval) + return rval; + + writel(timing.ctermen, + csi2->base + CSI2_REG_CSI_RX_DLY_CNT_TERMEN_CLANE); + writel(timing.csettle, + csi2->base + CSI2_REG_CSI_RX_DLY_CNT_SETTLE_CLANE); + + for (i = 0; i < nlanes; i++) { + writel(timing.dtermen, + csi2->base + CSI2_REG_CSI_RX_DLY_CNT_TERMEN_DLANE(i)); + writel(timing.dsettle, + csi2->base + CSI2_REG_CSI_RX_DLY_CNT_SETTLE_DLANE(i)); + } + writel(CSI2_CSI_RX_CONFIG_DISABLE_BYTE_CLK_GATING | + CSI2_CSI_RX_CONFIG_RELEASE_LP11, + csi2->base + CSI2_REG_CSI_RX_CONFIG); + + writel(nlanes, csi2->base + CSI2_REG_CSI_RX_NOF_ENABLED_LANES); + + writel(CSI2_CSI_RX_ENABLE_ENABLE, csi2->base + CSI2_REG_CSI_RX_ENABLE); + + /* SOF enabled from CSI2PART register in B0 */ + for (i = 0; i < NR_OF_CSI2_ICI_VC; i++) + csi2part |= CSI2_IRQ_FS_VC(i) | CSI2_IRQ_FE_VC(i); + + /* Enable csi2 receiver error interrupts */ + csi2csirx = BIT(CSI2_CSIRX_NUM_ERRORS) - 1; + writel(csi2csirx, csi2->base + CSI2_REG_CSIRX_IRQ_EDGE); + writel(0, csi2->base + CSI2_REG_CSIRX_IRQ_LEVEL_NOT_PULSE); + writel(csi2csirx, csi2->base + CSI2_REG_CSIRX_IRQ_CLEAR); + writel(csi2csirx, csi2->base + CSI2_REG_CSIRX_IRQ_MASK); + writel(csi2csirx, csi2->base + CSI2_REG_CSIRX_IRQ_ENABLE); + + /* Enable csi2 error and SOF-related irqs */ + writel(csi2part, csi2->base + CSI2_REG_CSI2PART_IRQ_EDGE); + writel(0, csi2->base + CSI2_REG_CSI2PART_IRQ_LEVEL_NOT_PULSE); + writel(csi2part, csi2->base + CSI2_REG_CSI2PART_IRQ_CLEAR); + writel(csi2part, csi2->base + CSI2_REG_CSI2PART_IRQ_MASK); + writel(csi2part, csi2->base + CSI2_REG_CSI2PART_IRQ_ENABLE); + + return 0; +} + +unsigned int ici_isys_csi2_get_current_field( + struct device* dev, + struct ici_isys_mipi_packet_header *ph) +{ + unsigned int field; + + /* Check if the first SOF packet is received. */ + if ((ph->dtype & ICI_ISYS_SHORT_PACKET_DTYPE_MASK) != 0) + dev_warn(dev, + "First short packet is not SOF.\n"); + field = (ph->word_count % 2) ? ICI_FIELD_TOP : + ICI_FIELD_BOTTOM; + dev_dbg(dev, + "Interlaced field ready. frame_num = %d field = %d\n", + ph->word_count, field); + + return field; +} + +static int ici_csi2_pipeline_validate( + struct node_pipeline *inp, + struct ici_isys_node *node) +{ + struct ici_isys_subdev* asd = node->sd; + struct ici_isys_csi2 *csi2 = + ici_asd_to_csi2(asd, asd->index); + struct ici_isys_pipeline *ip = + ici_nodepipe_to_pipeline(inp); + + if (ip->csi2) { + dev_err(&csi2->isys->adev->dev, + "Pipeline does not support > 1 CSI2 node\n"); + return -EINVAL; + } + node->pipe = inp; + ip->csi2 = csi2; + ip->asd_source = asd; + ip->vc = asd - csi2->asd; // index of asd element in csi2->asd array + ip->asd_source_pad_id = CSI2_ICI_PAD_SINK; + return 0; +} + +int ici_isys_csi2_init(struct ici_isys_csi2 *csi2, + struct ici_isys *isys, + void __iomem *base, unsigned int index) +{ + struct ici_pad_framefmt fmt = { + .pad.pad_idx = CSI2_ICI_PAD_SINK, + .ffmt = { + .width = 4096, + .height = 3072, + }, + }; + + int rval; + char name[ICI_MAX_NODE_NAME]; + unsigned int i; + + csi2->isys = isys; + csi2->base = base; + csi2->index = index; + + for(i=0; iasd[i].isys = isys; + rval = ici_isys_subdev_init(&csi2->asd[i], + name, + NR_OF_CSI2_ICI_PADS, + i); + if (rval) + goto fail; + + csi2->asd[i].pads[CSI2_ICI_PAD_SINK].flags = ICI_PAD_FLAGS_SINK; + csi2->asd[i].pads[CSI2_ICI_PAD_SOURCE].flags = ICI_PAD_FLAGS_SOURCE; + + csi2->asd[i].source = IA_CSS_ISYS_STREAM_SRC_CSI2_PORT0 + index; + csi2->asd[i].supported_codes = ici_csi2_supported_codes; + csi2->asd[i].set_ffmt_internal = ici_csi2_set_ffmt; + csi2->asd[i].node.node_set_streaming = + ici_csi2_set_stream; + csi2->asd[i].node.node_pipeline_validate = + ici_csi2_pipeline_validate; + + csi2->asd[i].node.node_set_pad_ffmt(&csi2->asd[i].node, &fmt); + + snprintf(csi2->as[i].node.name, sizeof(csi2->as[i].node.name), + IPU_ISYS_ENTITY_PREFIX " CSI-2 %u VC %u capture", index, i); + csi2->as[i].isys = isys; + csi2->as[i].try_fmt_vid_mplane = + ici_isys_video_try_fmt_vid_mplane_default; + csi2->as[i].prepare_firmware_stream_cfg = + ici_isys_prepare_firmware_stream_cfg_default; + csi2->as[i].packed = true; + csi2->as[i].buf_list.css_pin_type = IA_CSS_ISYS_PIN_TYPE_MIPI; + csi2->as[i].pfmts = ici_isys_pfmts_packed; + csi2->as[i].line_header_length = + INTEL_IPU4_ISYS_CSI2_LONG_PACKET_HEADER_SIZE; + csi2->as[i].line_footer_length = + INTEL_IPU4_ISYS_CSI2_LONG_PACKET_FOOTER_SIZE; + init_completion(&csi2->eof_completion); + + rval = ici_isys_stream_init(&csi2->as[i], &csi2->asd[i], + &csi2->asd[i].node, CSI2_ICI_PAD_SOURCE, + ICI_PAD_FLAGS_SINK); + if (rval) { + dev_err(&isys->adev->dev, "can't init stream node\n"); + goto fail; + } + } + init_completion(&csi2->eof_completion); + + return 0; + +fail: + ici_isys_csi2_cleanup(csi2); + + return rval; +} +EXPORT_SYMBOL(ici_isys_csi2_init); + +void ici_isys_csi2_cleanup(struct ici_isys_csi2 *csi2) +{ + ici_isys_subdev_cleanup(&csi2->asd[0]); + ici_isys_stream_cleanup(&csi2->as[0]); +} +EXPORT_SYMBOL(ici_isys_csi2_cleanup); + +#endif /* ICI_ENABLED */ diff --git a/drivers/media/pci/intel/ici/ici-isys-csi2.h b/drivers/media/pci/intel/ici/ici-isys-csi2.h new file mode 100644 index 0000000000000..504e413af8ea3 --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-csi2.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef ICI_ISYS_CSI2_H +#define ICI_ISYS_CSI2_H + +#include "ici-isys-frame-buf.h" +#include "ici-isys-subdev.h" +#include "ici-isys-stream.h" + +struct ici_isys_csi2_pdata; + +#define CSI2_ICI_PAD_SINK 0 +#define CSI2_ICI_PAD_SOURCE 1 +#define NR_OF_CSI2_ICI_PADS 2 +#define NR_OF_CSI2_ICI_VC 4 + +#define ICI_ISYS_SHORT_PACKET_BUFFER_NUM 32 +#define ICI_ISYS_SHORT_PACKET_WIDTH 32 +#define ICI_ISYS_SHORT_PACKET_FRAME_PACKETS 2 +#define ICI_ISYS_SHORT_PACKET_EXTRA_PACKETS 64 +#define ICI_ISYS_SHORT_PACKET_UNITSIZE 8 +#define ICI_ISYS_SHORT_PACKET_GENERAL_DT 0 +#define ICI_ISYS_SHORT_PACKET_PT 0 +#define ICI_ISYS_SHORT_PACKET_FT 0 +#define ICI_ISYS_SHORT_PACKET_DTYPE_MASK 0x3f +#define ICI_ISYS_SHORT_PACKET_STRIDE \ + (ICI_ISYS_SHORT_PACKET_WIDTH * \ + ICI_ISYS_SHORT_PACKET_UNITSIZE) +#define ICI_ISYS_SHORT_PACKET_NUM(num_lines) \ + ((num_lines) * 2 + ICI_ISYS_SHORT_PACKET_FRAME_PACKETS + \ + ICI_ISYS_SHORT_PACKET_EXTRA_PACKETS) +#define ICI_ISYS_SHORT_PACKET_PKT_LINES(num_lines) \ + DIV_ROUND_UP(ICI_ISYS_SHORT_PACKET_NUM(num_lines) * \ + ICI_ISYS_SHORT_PACKET_UNITSIZE, \ + ICI_ISYS_SHORT_PACKET_STRIDE) +#define ICI_ISYS_SHORT_PACKET_BUF_SIZE(num_lines) \ + (ICI_ISYS_SHORT_PACKET_WIDTH * \ + ICI_ISYS_SHORT_PACKET_PKT_LINES(num_lines) * \ + ICI_ISYS_SHORT_PACKET_UNITSIZE) +#define IPU_ISYS_SHORT_PACKET_TRACE_MSG_NUMBER 256 +#define IPU_ISYS_SHORT_PACKET_TRACE_MSG_SIZE 16 +#define IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE \ + (IPU_ISYS_SHORT_PACKET_TRACE_MSG_NUMBER * \ + IPU_ISYS_SHORT_PACKET_TRACE_MSG_SIZE) +#define IPU_ISYS_SHORT_PACKET_TRACE_MAX_TIMESHIFT 100 +#define IPU_ISYS_SHORT_PACKET_FROM_RECEIVER 0 +#define IPU_ISYS_SHORT_PACKET_FROM_TUNIT 1 + +#define ICI_EOF_TIMEOUT 1000 +#define ICI_EOF_TIMEOUT_JIFFIES msecs_to_jiffies(ICI_EOF_TIMEOUT) + +#define IPU_ISYS_SHORT_PACKET_TRACE_MAX_TIMESHIFT 100 +#define IPU_ISYS_SHORT_PACKET_TRACE_EVENT_MASK 0x2082 +#define IPU_SKEW_CAL_LIMIT_HZ (1500000000ul / 2) + +#define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_A 0 +#define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_B 0 +#define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_A 95 +#define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_B -8 + +#define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_A 0 +#define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_B 0 +#define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_A 85 +#define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_B -2 + +/* + * struct ici_isys_csi2 + * + */ +struct ici_isys_csi2 { + struct ici_isys_csi2_pdata *pdata; + struct ici_isys *isys; + struct ici_isys_subdev asd[NR_OF_CSI2_ICI_VC]; + struct ici_isys_stream as[NR_OF_CSI2_ICI_VC]; + void *ext_sd; + + void __iomem *base; + u32 receiver_errors; + unsigned int nlanes; + unsigned int index; + atomic_t sof_sequence; + + bool wait_for_sync; + bool in_frame; + struct completion eof_completion; +}; + +struct ici_isys_csi2_timing { + uint32_t ctermen; + uint32_t csettle; + uint32_t dtermen; + uint32_t dsettle; +}; + +/* + * This structure defines the MIPI packet header output + * from IPU4 MIPI receiver. Due to hardware conversion, + * this structure is not the same as defined in CSI-2 spec. + */ +__packed struct ici_isys_mipi_packet_header { + uint32_t word_count : 16, + dtype : 13, + sync : 2, + stype : 1; + uint32_t sid : 4, + port_id : 4, + reserved : 23, + odd_even : 1; +}; + +/* + * This structure defines the trace message content + * for CSI2 receiver monitor messages. + */ +__packed struct ici_isys_csi2_monitor_message { + uint64_t fe : 1, + fs : 1, + pe : 1, + ps : 1, + le : 1, + ls : 1, + reserved1 : 2, + sequence : 2, + reserved2 : 2, + flash_shutter : 4, + error_cause : 12, + fifo_overrun : 1, + crc_error : 2, + reserved3 : 1, + timestamp_l : 16, + port : 4, + vc : 2, + reserved4 : 2, + frame_sync : 4, + reserved5 : 4; + uint64_t reserved6 : 3, + cmd : 2, + reserved7 : 1, + monitor_id : 7, + reserved8 : 1, + timestamp_h : 50; +}; + +int ici_isys_csi2_init(struct ici_isys_csi2 *csi2, + struct ici_isys *isys, + void __iomem *base, unsigned int index); +void ici_isys_csi2_cleanup(struct ici_isys_csi2 *csi2); +void ici_isys_csi2_wait_last_eof(struct ici_isys_csi2 *csi2); +void ici_isys_csi2_isr(struct ici_isys_csi2 *csi2); +unsigned int ici_isys_csi2_get_current_field( + struct device* dev, struct ici_isys_mipi_packet_header *ph); + +#endif /* ICI_ISYS_CSI2_H */ diff --git a/drivers/media/pci/intel/ici/ici-isys-frame-buf.c b/drivers/media/pci/intel/ici/ici-isys-frame-buf.c new file mode 100644 index 0000000000000..c8451b28e8d5f --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-frame-buf.c @@ -0,0 +1,1055 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include + +#include "./ici/ici-isys.h" +#ifdef ICI_ENABLED + +#include "isysapi/interface/ia_css_isysapi_types.h" +#include "isysapi/interface/ia_css_isysapi.h" +#include "./ici/ici-isys-frame-buf.h" + +#define get_frame_entry_to_buf_wrap(get_entry) \ + container_of(get_entry, struct ici_frame_buf_wrapper,\ + get_frame_entry) + +#define put_frame_entry_to_buf_wrap(put_entry) \ + container_of(put_entry, struct ici_frame_buf_wrapper,\ + put_frame_entry) + +static struct ici_frame_buf_wrapper +*ici_frame_buf_lookup(struct ici_isys_frame_buf_list + *buf_list, + struct ici_frame_info + *user_frame_info) +{ + struct ici_frame_buf_wrapper *buf; + int i; + int mem_type = user_frame_info->mem_type; + unsigned long flags = 0; + + spin_lock_irqsave(&buf_list->lock, flags); + list_for_each_entry(buf, &buf_list->getbuf_list, node) { + for (i = 0; i < user_frame_info->num_planes; i++) { + struct ici_frame_plane *new_plane = + &user_frame_info->frame_planes[i]; + struct ici_frame_plane *cur_plane = + &buf->frame_info.frame_planes[i]; + if (buf->state != ICI_BUF_PREPARED && + buf->state != ICI_BUF_DONE){ + continue; + } + + switch (mem_type) { + case ICI_MEM_USERPTR: + if (new_plane->mem.userptr == + cur_plane->mem.userptr) { + spin_unlock_irqrestore(&buf_list->lock, flags); + return buf; + } + break; + case ICI_MEM_DMABUF: + if (new_plane->mem.dmafd == + cur_plane->mem.dmafd) { + spin_unlock_irqrestore(&buf_list->lock, flags); + return buf; + } + break; + } + //TODO: add multiplaner checks + } + + } + spin_unlock_irqrestore(&buf_list->lock, flags); + return NULL; +} + +static void ici_put_userpages(struct device *dev, + struct ici_kframe_plane + *kframe_plane) +{ + struct sg_table *sgt = kframe_plane->sgt; + struct scatterlist *sgl; + unsigned int i; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + DEFINE_DMA_ATTRS(attrs); +#else + unsigned long attrs; +#endif + + struct mm_struct* mm = current->active_mm; + if (!mm){ + dev_err(dev, "Failed to get active mm_struct ptr from current process.\n"); + return; + } + + down_read(&mm->mmap_sem); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_unmap_sg_attrs(kframe_plane->dev, sgt->sgl, sgt->orig_nents, + DMA_FROM_DEVICE, &attrs); +#else + attrs = DMA_ATTR_SKIP_CPU_SYNC; + dma_unmap_sg_attrs(kframe_plane->dev, sgt->sgl, sgt->orig_nents, + DMA_FROM_DEVICE, attrs); +#endif + + for_each_sg(sgt->sgl, sgl, sgt->orig_nents, i) { + struct page *page = sg_page(sgl); + + unsigned int npages = PAGE_ALIGN(sgl->offset + sgl->length) + >> PAGE_SHIFT; + unsigned int page_no; + + for (page_no = 0; page_no < npages; ++page_no, ++page) { + set_page_dirty_lock(page); + put_page(page); + } + } + + kfree(sgt); + kframe_plane->sgt = NULL; + + up_read(&mm->mmap_sem); +} + +static void ici_put_userpages_virt(struct device *dev, + struct ici_kframe_plane + *kframe_plane) +{ + struct sg_table *sgt = kframe_plane->sgt; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + DEFINE_DMA_ATTRS(attrs); +#else + unsigned long attrs; +#endif + + struct mm_struct* mm = current->active_mm; + if (!mm){ + dev_err(dev, "Failed to get active mm_struct ptr from current process.\n"); + return; + } + + down_read(&mm->mmap_sem); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_unmap_sg_attrs(kframe_plane->dev, sgt->sgl, sgt->orig_nents, + DMA_FROM_DEVICE, &attrs); +#else + attrs = DMA_ATTR_SKIP_CPU_SYNC; + dma_unmap_sg_attrs(kframe_plane->dev, sgt->sgl, sgt->orig_nents, + DMA_FROM_DEVICE, attrs); +#endif + + kfree(sgt); + kframe_plane->sgt = NULL; + + up_read(&mm->mmap_sem); +} + +static void ici_put_dma(struct device *dev, + struct ici_kframe_plane + *kframe_plane) +{ + struct sg_table *sgt = kframe_plane->sgt; + + if (WARN_ON(!kframe_plane->db_attach)) { + pr_err("trying to unpin a not attached buffer\n"); + return; + } + + if (WARN_ON(!sgt)) { + pr_err("dmabuf buffer is already unpinned\n"); + return; + } + + if (kframe_plane->kaddr) { + dma_buf_vunmap(kframe_plane->db_attach->dmabuf, + kframe_plane->kaddr); + kframe_plane->kaddr = NULL; + } + dma_buf_unmap_attachment(kframe_plane->db_attach, sgt, + DMA_BIDIRECTIONAL); + + kframe_plane->dma_addr = 0; + kframe_plane->sgt = NULL; + +} + +static int ici_map_dma(struct device *dev, + struct ici_frame_plane + *frame_plane, + struct ici_kframe_plane + *kframe_plane) +{ + + int ret = 0; + int fd = frame_plane->mem.dmafd; + + kframe_plane->dbdbuf = dma_buf_get(fd); + if (!kframe_plane->dbdbuf) { + ret = -EINVAL; + goto error; + } + + if (frame_plane->length == 0) + kframe_plane->length = kframe_plane->dbdbuf->size; + else + kframe_plane->length = frame_plane->length; + + kframe_plane->fd = fd; + kframe_plane->db_attach = dma_buf_attach(kframe_plane->dbdbuf, dev); + + if (IS_ERR(kframe_plane->db_attach)) { + ret = PTR_ERR(kframe_plane->db_attach); + goto error_put; + } + + kframe_plane->sgt = dma_buf_map_attachment(kframe_plane->db_attach, + DMA_BIDIRECTIONAL); + if (IS_ERR_OR_NULL(kframe_plane->sgt)) { + ret = -EINVAL; + kframe_plane->sgt = NULL; + dev_dbg(dev, "map attachment failed\n"); + goto error_detach; + } + + kframe_plane->dma_addr = sg_dma_address(kframe_plane->sgt->sgl); + kframe_plane->kaddr = dma_buf_vmap(kframe_plane->dbdbuf); + + if (!kframe_plane->kaddr) { + ret = -EINVAL; + goto error_detach; + } + + dev_dbg(dev, "MAPBUF: mapped fd %d\n", fd); + + return 0; + +error_detach: + dma_buf_detach(kframe_plane->dbdbuf, kframe_plane->db_attach); +error_put: + dma_buf_put(kframe_plane->dbdbuf); +error: + return ret; +} + +static int ici_get_userpages(struct device *dev, + struct ici_frame_plane + *frame_plane, + struct ici_kframe_plane + *kframe_plane) +{ + unsigned long start, end, addr; + int npages, array_size; + struct page **pages; + int nr = 0; + int ret = 0; + struct sg_table *sgt; + unsigned int i; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + DEFINE_DMA_ATTRS(attrs); +#else + unsigned long attrs; +#endif + + addr = (unsigned long)frame_plane->mem.userptr; + start = addr & PAGE_MASK; + end = PAGE_ALIGN(addr + frame_plane->length); + npages = (end - start) >> PAGE_SHIFT; + array_size = npages * sizeof(struct page *); + + if (!npages) + return -EINVAL; + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return -ENOMEM; + + if (array_size <= PAGE_SIZE) + pages = kzalloc(array_size, GFP_KERNEL); + else + pages = vzalloc(array_size); + + if (!pages) { + kfree(sgt); + return -ENOMEM; + } + + down_read(¤t->mm->mmap_sem); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + nr = get_user_pages( + current, current->mm, + start, npages, 1, 0, pages, NULL); +#else + nr = get_user_pages(start, npages, FOLL_WRITE, pages, NULL); +#endif + if (nr < npages) + goto error_free_pages; + + ret = sg_alloc_table_from_pages(sgt, pages, npages, + addr & ~PAGE_MASK, frame_plane->length, + GFP_KERNEL); + if (ret) { + dev_err(dev, "Failed to init sgt\n"); + goto error_free_pages; + } + + + kframe_plane->dev = dev; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + sgt->nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, + DMA_FROM_DEVICE, &attrs); +#else + attrs = DMA_ATTR_SKIP_CPU_SYNC; + sgt->nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, + DMA_FROM_DEVICE, attrs); +#endif + + if (sgt->nents <= 0) { + dev_err(dev, "Failed to init dma_map\n"); + ret = -EIO; + goto error_dma_map; + } + kframe_plane->dma_addr = sg_dma_address(sgt->sgl); + kframe_plane->sgt = sgt; + +error_free_page_list: + if (pages) { + if (array_size <= PAGE_SIZE) + kfree(pages); + else + vfree(pages); + } + up_read(¤t->mm->mmap_sem); + return ret; + +error_dma_map: +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, + DMA_FROM_DEVICE, &attrs); +#else + dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, + DMA_FROM_DEVICE, attrs); +#endif + +error_free_pages: + if (pages) { + for (i = 0; i < nr; i++) + put_page(pages[i]); + } + kfree(sgt); + goto error_free_page_list; +} + +static int ici_get_userpages_virt(struct device *dev, + struct ici_frame_plane + *frame_plane, + struct ici_kframe_plane + *kframe_plane, + struct page **pages) +{ + unsigned long addr; + int npages; + int ret = 0; + struct sg_table *sgt; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + DEFINE_DMA_ATTRS(attrs); +#else + unsigned long attrs; +#endif + + addr = (unsigned long)frame_plane->mem.userptr; + npages = kframe_plane->npages; + + if (!npages) + return -EINVAL; + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return -ENOMEM; + + ret = sg_alloc_table_from_pages(sgt, pages, npages, + addr & ~PAGE_MASK, frame_plane->length, + GFP_KERNEL); + if (ret) { + dev_err(dev, "Failed to init sgt\n"); + goto error_free_pages; + } + + + kframe_plane->dev = dev; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + sgt->nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, + DMA_FROM_DEVICE, &attrs); +#else + attrs = DMA_ATTR_SKIP_CPU_SYNC; + sgt->nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, + DMA_FROM_DEVICE, attrs); +#endif + + if (sgt->nents <= 0) { + dev_err(dev, "Failed to init dma_map\n"); + ret = -EIO; + goto error_dma_map; + } + kframe_plane->dma_addr = sg_dma_address(sgt->sgl); + kframe_plane->sgt = sgt; + +error_free_page_list: + return ret; + +error_dma_map: +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, + DMA_FROM_DEVICE, &attrs); +#else + dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, + DMA_FROM_DEVICE, attrs); +#endif + +error_free_pages: + kfree(sgt); + goto error_free_page_list; +} + +int ici_isys_get_buf(struct ici_isys_stream *as, + struct ici_frame_info *frame_info) +{ + int res; + unsigned i; + struct ici_frame_buf_wrapper *buf; + unsigned long flags = 0; + + struct ici_kframe_plane *kframe_plane; + struct ici_isys_frame_buf_list *buf_list = &as->buf_list; + int mem_type = frame_info->mem_type; + + if (mem_type != ICI_MEM_USERPTR && + mem_type != ICI_MEM_DMABUF) { + dev_err(&as->isys->adev->dev, "Memory type not supproted\n"); + return -EINVAL; + } + + if (!frame_info->frame_planes[0].length) { + dev_err(&as->isys->adev->dev, "User length not set\n"); + return -EINVAL; + } + buf = ici_frame_buf_lookup(buf_list, frame_info); + + if (buf) { + buf->state = ICI_BUF_PREPARED; + return 0; + } + + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + buf->buf_id = frame_info->frame_buf_id; + buf->buf_list = buf_list; + memcpy(&buf->frame_info, frame_info, sizeof(buf->frame_info)); + + switch (mem_type) { + case ICI_MEM_USERPTR: + if (!frame_info->frame_planes[0].mem.userptr) { + dev_err(&as->isys->adev->dev, + "User pointer not define\n"); + res = -EINVAL; + goto err_exit; + } + for (i = 0; i < frame_info->num_planes; i++) { + kframe_plane = &buf->kframe_info.planes[i]; + kframe_plane->mem_type = + ICI_MEM_USERPTR; + res = + ici_get_userpages( + &as->isys->adev->dev, + &frame_info-> + frame_planes[i], + kframe_plane); + if (res) + goto err_exit; + } + break; + case ICI_MEM_DMABUF: + for (i = 0; i < frame_info->num_planes; i++) { + kframe_plane = &buf->kframe_info.planes[i]; + kframe_plane->mem_type = + ICI_MEM_DMABUF; + res = ici_map_dma( + &as->isys->adev->dev, + &frame_info-> + frame_planes[i], + kframe_plane); + if (res) + goto err_exit; + } + break; + } + + spin_lock_irqsave(&buf_list->lock, flags); + buf->state = ICI_BUF_PREPARED; + list_add_tail(&buf->node, &buf_list->getbuf_list); + spin_unlock_irqrestore(&buf_list->lock, flags); + return 0; + +err_exit: + kfree(buf); + return res; +} + +int ici_isys_get_buf_virt(struct ici_isys_stream *as, + struct ici_frame_buf_wrapper *frame_buf, + struct page **pages) +{ + int res; + unsigned i; + unsigned long flags = 0; + struct ici_frame_buf_wrapper *buf; + + struct ici_kframe_plane *kframe_plane; + struct ici_isys_frame_buf_list *buf_list = &as->buf_list; + int mem_type = frame_buf->frame_info.mem_type; + + if (mem_type != ICI_MEM_USERPTR && + mem_type != ICI_MEM_DMABUF) { + dev_err(&as->isys->adev->dev, "Memory type not supproted\n"); + return -EINVAL; + } + + if (!frame_buf->frame_info.frame_planes[0].length) { + dev_err(&as->isys->adev->dev, "User length not set\n"); + return -EINVAL; + } + buf = ici_frame_buf_lookup(buf_list, &frame_buf->frame_info); + + if (buf) { + buf->state = ICI_BUF_PREPARED; + return 0; + } + + pr_debug("%s: creating new buf object\n", __func__); + pr_debug("%s: mem.userptr %lu", __func__, + frame_buf->frame_info.frame_planes[0].mem.userptr); + + buf = frame_buf; + + buf->buf_list = buf_list; + + switch (mem_type) { + case ICI_MEM_USERPTR: + if (!frame_buf->frame_info.frame_planes[0].mem.userptr) { + dev_err(&as->isys->adev->dev, + "User pointer not define\n"); + return -EINVAL; + } + for (i = 0; i < frame_buf->frame_info.num_planes; i++) { + kframe_plane = &buf->kframe_info.planes[i]; + kframe_plane->mem_type = + ICI_MEM_USERPTR; + res = + ici_get_userpages_virt( + &as->isys->adev->dev, + &frame_buf->frame_info.frame_planes[i], + kframe_plane, + pages); + if (res) + return res; + } + break; + case ICI_MEM_DMABUF: + break; + } + + spin_lock_irqsave(&buf_list->lock, flags); + buf->state = ICI_BUF_PREPARED; + list_add_tail(&buf->node, &buf_list->getbuf_list); + spin_unlock_irqrestore(&buf_list->lock, flags); + return 0; +} + +int ici_isys_put_buf(struct ici_isys_stream *as, + struct ici_frame_info *frame_info, + unsigned int f_flags) +{ + struct ici_frame_buf_wrapper *buf; + struct ici_frame_buf_wrapper *buf_safe; + struct ici_isys_frame_buf_list *buf_list = &as->buf_list; + unsigned long flags = 0; + int rval; + + spin_lock_irqsave(&buf_list->lock, flags); + if (list_empty(&buf_list->putbuf_list)) { + /* Wait */ + if (!(f_flags & O_NONBLOCK)) { + spin_unlock_irqrestore(&buf_list->lock, flags); + rval = wait_event_interruptible(buf_list->wait, + !list_empty(&buf_list-> + putbuf_list)); + if (rval == -ERESTARTSYS) + return rval; + spin_lock_irqsave(&buf_list->lock, flags); + } + } + + if (list_empty(&buf_list->putbuf_list)) { + spin_unlock_irqrestore(&buf_list->lock, flags); + return -ENODATA; + } + + // FIXME: This is different from ICG V4L2 implementation which uses time stamp + // to sort frames + list_for_each_entry_safe(buf, buf_safe, &buf_list->putbuf_list, node) { + if (buf->state == ICI_BUF_READY && buf->frame_info.frame_buf_id == + frame_info->frame_buf_id) { + list_del(&buf->node); + memcpy(frame_info, &buf->frame_info, sizeof(buf->frame_info)); + buf->state = ICI_BUF_DONE; + list_add_tail(&buf->node, + &buf_list->getbuf_list); + break; + } + } + spin_unlock_irqrestore(&buf_list->lock, flags); + + return 0; +} + +static void frame_buf_done( + struct ici_isys_frame_buf_list *buf_list, + struct ici_frame_buf_wrapper *buf) +{ + unsigned long flags = 0; + spin_lock_irqsave(&buf_list->lock, flags); + buf->state = ICI_BUF_READY; + list_add_tail(&buf->node, &buf_list->putbuf_list); + spin_unlock_irqrestore(&buf_list->lock, flags); + wake_up_interruptible(&buf_list->wait); + pr_debug("%s: Frame data arrived! %lu", __func__, + buf->frame_info.frame_planes[0].mem.userptr); +} + +void ici_isys_frame_buf_ready(struct ici_isys_pipeline + *ip, + struct ia_css_isys_resp_info *info) +{ + struct ici_frame_buf_wrapper *buf; + struct ici_isys_stream *as = + ici_pipeline_to_stream(ip); + struct ici_isys_frame_buf_list *buf_list = &as->buf_list; + struct ici_isys *isys = as->isys; + unsigned long flags = 0; + bool found = false; + + dev_dbg(&isys->adev->dev, "buffer: received buffer %8.8x\n", + info->pin.addr); + + spin_lock_irqsave(&buf_list->lock, flags); + + list_for_each_entry_reverse(buf, &buf_list->getbuf_list, node) { + struct ici_kframe_plane* plane; + + if (buf->state != ICI_BUF_ACTIVE) + continue; + plane = &buf->kframe_info.planes[0]; + if (plane->dma_addr == info->pin.addr) { + found = true; + break; + } + } + + if (!found) { + spin_unlock_irqrestore(&buf_list->lock, flags); + dev_err(&isys->adev->dev, + "WARNING: cannot find a matching video buffer!\n"); + return; + } + + list_del(&buf->node); + spin_unlock_irqrestore(&buf_list->lock, flags); + + /* + * For interlaced buffers, the notification to user space + * is postponed to capture_done event since the field + * information is available only at that time. + */ + if (ip->interlaced) { + spin_lock_irqsave(&buf_list->short_packet_queue_lock, flags); + list_add(&buf->node, &buf_list->interlacebuf_list); + spin_unlock_irqrestore(&buf_list->short_packet_queue_lock, + flags); + } else { + buf->frame_info.field = ICI_FIELD_NONE; + frame_buf_done(buf_list, buf); + } + + dev_dbg(&isys->adev->dev, "buffer: found buffer %p\n", buf); +} + +static void unmap_buf(struct ici_frame_buf_wrapper *buf) +{ + int i; + dev_dbg(&buf->buf_list->strm_dev->dev, "buf: %p\n", buf); + for (i = 0; i < buf->frame_info.num_planes; i++) { + struct ici_kframe_plane *kframe_plane = + &buf->kframe_info.planes[i]; + dev_dbg(&buf->buf_list->strm_dev->dev, "kframe_plane: %p\n", + kframe_plane); + switch (kframe_plane->mem_type) { + case ICI_MEM_USERPTR: + ici_put_userpages(kframe_plane->dev, + kframe_plane); + break; + case ICI_MEM_DMABUF: + ici_put_dma(kframe_plane->dev, + kframe_plane); + break; + default: + dev_err(&buf->buf_list->strm_dev->dev, "not supported memory type: %d\n", + kframe_plane->mem_type); + break; + } + } +} + +static void unmap_buf_virt(struct ici_frame_buf_wrapper *buf) +{ + int i; + + for (i = 0; i < buf->frame_info.num_planes; i++) { + struct ici_kframe_plane *kframe_plane = + &buf->kframe_info.planes[i]; + switch (kframe_plane->mem_type) { + case ICI_MEM_USERPTR: + ici_put_userpages_virt(kframe_plane->dev, + kframe_plane); + break; + default: + dev_err(&buf->buf_list->strm_dev->dev, "not supported memory type: %d\n", + kframe_plane->mem_type); + break; + } + } +} + +void ici_isys_frame_buf_stream_cancel(struct + ici_isys_stream + *as) +{ + struct ici_isys_frame_buf_list *buf_list = &as->buf_list; + struct ici_frame_buf_wrapper *buf; + struct ici_frame_buf_wrapper *bufsafe; + unsigned long flags = 0; + + spin_lock_irqsave(&buf_list->lock, flags); + list_for_each_entry_safe(buf, bufsafe, + &buf_list->getbuf_list, node) { + list_del(&buf->node); + spin_unlock_irqrestore(&buf_list->lock, flags); + dev_dbg(&buf_list->strm_dev->dev, "buf: %p\n", buf); + if (as->strm_dev.virt_dev_id < 0) + unmap_buf(buf); + else + unmap_buf_virt(buf); + spin_lock_irqsave(&buf_list->lock, flags); + } + spin_unlock_irqrestore(&buf_list->lock, flags); + + spin_lock_irqsave(&buf_list->lock, flags); + list_for_each_entry_safe(buf, bufsafe, + &buf_list->putbuf_list, node) { + list_del(&buf->node); + spin_unlock_irqrestore(&buf_list->lock, flags); + dev_dbg(&buf_list->strm_dev->dev, "buf: %p\n", buf); + if (as->strm_dev.virt_dev_id < 0) + unmap_buf(buf); + else + unmap_buf_virt(buf); + spin_lock_irqsave(&buf_list->lock, flags); + } + spin_unlock_irqrestore(&buf_list->lock, flags); + + spin_lock_irqsave(&buf_list->short_packet_queue_lock, flags); + list_for_each_entry_safe(buf, bufsafe, + &buf_list->interlacebuf_list, node) { + list_del(&buf->node); + spin_unlock_irqrestore(&buf_list->short_packet_queue_lock, flags); + dev_dbg(&buf_list->strm_dev->dev, "buf: %p\n", buf); + unmap_buf(buf); + spin_lock_irqsave(&buf_list->short_packet_queue_lock, flags); + } + spin_unlock_irqrestore(&buf_list->short_packet_queue_lock, flags); +} + +int ici_isys_frame_buf_add_next( + struct ici_isys_stream *as, + struct ia_css_isys_frame_buff_set *css_buf) +{ + struct ici_frame_buf_wrapper *buf = NULL; + struct ici_isys_frame_buf_list *buf_list = &as->buf_list; + unsigned long flags = 0; + bool found = false; + + spin_lock_irqsave(&buf_list->lock, flags); + + list_for_each_entry(buf, &buf_list->getbuf_list, node) { + if (buf->state == ICI_BUF_PREPARED){ + found = true; + break; + } + } + + if (!found) { + /* No more buffers available */ + goto cleanup_spinlock; + } + + + buf->state = ICI_BUF_ACTIVE; + spin_unlock_irqrestore(&buf_list->lock, flags); + + pr_debug("%s: add buf to FW! %lu", __func__, + buf->frame_info.frame_planes[0].mem.userptr); + + css_buf->send_irq_sof = 1; + css_buf->send_resp_sof = 1; + css_buf->send_irq_eof = 1; + css_buf->send_resp_eof = 1; + css_buf->send_irq_capture_ack = 1; + css_buf->send_irq_capture_done = 1; + + css_buf->output_pins[buf_list->fw_output].addr = + (uint32_t)buf->kframe_info.planes[0].dma_addr; + css_buf->output_pins[buf_list->fw_output].out_buf_id = + buf->buf_id + 1; + + if (buf_list->short_packet_bufs) { + struct ici_frame_short_buf* sb; + struct ici_isys_mipi_packet_header* ph; + struct ia_css_isys_output_pin_payload *output_pin; + spin_lock_irqsave(&buf_list->short_packet_queue_lock, flags); + if (!list_empty(&buf_list->short_packet_incoming)) { + sb = list_entry(buf_list->short_packet_incoming.next, + struct ici_frame_short_buf, node); + list_del(&sb->node); + list_add_tail(&sb->node, &buf_list->short_packet_active); + spin_unlock_irqrestore(&buf_list->short_packet_queue_lock, + flags); + + ph = (struct ici_isys_mipi_packet_header*) + sb->buffer; + ph->word_count = 0xffff; + ph->dtype = 0xff; + dma_sync_single_for_cpu(sb->dev, sb->dma_addr, sizeof(*ph), + DMA_BIDIRECTIONAL); + output_pin = &css_buf->output_pins[ + buf_list->short_packet_output_pin]; + output_pin->addr = sb->dma_addr; + output_pin->out_buf_id = sb->buf_id + 1; + } else { + spin_unlock_irqrestore(&buf_list->short_packet_queue_lock, + flags); + dev_err(&as->isys->adev->dev, + "No more short packet buffers. Driver bug?"); + WARN_ON(1); + } + } + return 0; + +cleanup_spinlock: + spin_unlock_irqrestore(&buf_list->lock, flags); + return -ENODATA; +} + +void ici_isys_frame_buf_capture_done( + struct ici_isys_pipeline *ip, + struct ia_css_isys_resp_info *info) +{ + if (ip->interlaced) { + struct ici_isys_stream *as = + ici_pipeline_to_stream(ip); + struct ici_isys_frame_buf_list *buf_list = + &as->buf_list; + unsigned long flags = 0; + struct ici_frame_short_buf* sb; + struct ici_frame_buf_wrapper* buf; + struct ici_frame_buf_wrapper* buf_safe; + struct list_head list; + + spin_lock_irqsave(&buf_list->short_packet_queue_lock, flags); + if(ip->short_packet_source == IPU_ISYS_SHORT_PACKET_FROM_RECEIVER) + if (!list_empty(&buf_list->short_packet_active)) { + sb = list_last_entry(&buf_list->short_packet_active, + struct ici_frame_short_buf, node); + list_move(&sb->node, &buf_list->short_packet_incoming); + } + + list_cut_position(&list, + &buf_list->interlacebuf_list, + buf_list->interlacebuf_list.prev); + spin_unlock_irqrestore(&buf_list->short_packet_queue_lock, + flags); + + list_for_each_entry_safe(buf, buf_safe, &list, node) { + buf->frame_info.field = ip->cur_field; + list_del(&buf->node); + frame_buf_done(buf_list, buf); + } + } +} + +void ici_isys_frame_short_packet_ready( + struct ici_isys_pipeline *ip, + struct ia_css_isys_resp_info *info) +{ + struct ici_isys_stream *as = + ici_pipeline_to_stream(ip); + struct ici_isys_frame_buf_list *buf_list = + &as->buf_list; + unsigned long flags = 0; + struct ici_frame_short_buf* sb; + + spin_lock_irqsave(&buf_list->short_packet_queue_lock, flags); + if (list_empty(&buf_list->short_packet_active)) { + spin_unlock_irqrestore(&buf_list->short_packet_queue_lock, + flags); + dev_err(&as->isys->adev->dev, + "active short buffer queue empty\n"); + return; + } + list_for_each_entry_reverse(sb, &buf_list->short_packet_active, + node) { + if (sb->dma_addr == info->pin.addr) { + ip->cur_field = + ici_isys_csi2_get_current_field( + &as->isys->adev->dev, + (struct ici_isys_mipi_packet_header*) + sb->buffer); + break; + } + } + spin_unlock_irqrestore(&buf_list->short_packet_queue_lock, flags); +} + +void ici_isys_frame_buf_short_packet_destroy( + struct ici_isys_stream* as) +{ + struct ici_isys_frame_buf_list *buf_list = + &as->buf_list; + unsigned int i; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs attrs; + init_dma_attrs(&attrs); + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); +#else + unsigned long attrs; + attrs = DMA_ATTR_NON_CONSISTENT; +#endif + if (!buf_list->short_packet_bufs) + return; + + for (i = 0 ; i < ICI_ISYS_SHORT_PACKET_BUFFER_NUM ; + i++) { + if (buf_list->short_packet_bufs[i].buffer) +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + dma_free_attrs(&as->isys->adev->dev, + buf_list->short_packet_bufs[i].length, + buf_list->short_packet_bufs[i].buffer, + buf_list->short_packet_bufs[i].dma_addr, &attrs); +#else + dma_free_attrs(&as->isys->adev->dev, + buf_list->short_packet_bufs[i].length, + buf_list->short_packet_bufs[i].buffer, + buf_list->short_packet_bufs[i].dma_addr, attrs); +#endif + } + kfree(buf_list->short_packet_bufs); + buf_list->short_packet_bufs = NULL; +} + +int ici_isys_frame_buf_short_packet_setup( + struct ici_isys_stream* as, + struct ici_stream_format* source_fmt) +{ + struct ici_isys_frame_buf_list *buf_list = + &as->buf_list; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs attrs; +#else + unsigned long attrs; +#endif + unsigned int i; + size_t buf_size; + + buf_size = + ICI_ISYS_SHORT_PACKET_BUF_SIZE(source_fmt->ffmt.height); + buf_list->num_short_packet_lines = + ICI_ISYS_SHORT_PACKET_PKT_LINES(source_fmt->ffmt.height); + + INIT_LIST_HEAD(&buf_list->short_packet_incoming); + INIT_LIST_HEAD(&buf_list->short_packet_active); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + init_dma_attrs(&attrs); + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); +#else + attrs = DMA_ATTR_NON_CONSISTENT; +#endif + + as->ip.cur_field = ICI_FIELD_TOP; + + buf_list->short_packet_bufs = kzalloc( + sizeof(struct ici_frame_short_buf) * + ICI_ISYS_SHORT_PACKET_BUFFER_NUM, GFP_KERNEL); + if (!buf_list->short_packet_bufs) + return -ENOMEM; + + for (i = 0 ; i < ICI_ISYS_SHORT_PACKET_BUFFER_NUM ; + i++) { + struct ici_frame_short_buf* sb = + &buf_list->short_packet_bufs[i]; + sb->buf_id = i; + sb->buf_list = buf_list; + sb->length = buf_size; + sb->dev = &as->isys->adev->dev; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + sb->buffer = dma_alloc_attrs( + sb->dev, buf_size, &sb->dma_addr, GFP_KERNEL, &attrs); +#else + sb->buffer = dma_alloc_attrs( + sb->dev, buf_size, &sb->dma_addr, GFP_KERNEL, attrs); +#endif + if (!sb->buffer) { + ici_isys_frame_buf_short_packet_destroy(as); + return -ENOMEM; + } + list_add(&sb->node, &buf_list->short_packet_incoming); + } + return 0; +} + +int ici_isys_frame_buf_init( + struct ici_isys_frame_buf_list* buf_list) +{ + buf_list->drv_priv = NULL; + mutex_init(&buf_list->mutex); + spin_lock_init(&buf_list->lock); + spin_lock_init(&buf_list->short_packet_queue_lock); + INIT_LIST_HEAD(&buf_list->getbuf_list); + INIT_LIST_HEAD(&buf_list->putbuf_list); + INIT_LIST_HEAD(&buf_list->interlacebuf_list); + init_waitqueue_head(&buf_list->wait); + return 0; +} + +#endif /* #ICI_ENABLED */ diff --git a/drivers/media/pci/intel/ici/ici-isys-frame-buf.h b/drivers/media/pci/intel/ici/ici-isys-frame-buf.h new file mode 100644 index 0000000000000..771967ce53606 --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-frame-buf.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef ICI_ISYS_FRAME_BUF_H +#define ICI_ISYS_FRAME_BUF_H + +#include +#include +#include + +struct ici_isys_pipeline; +struct ia_css_isys_frame_buff_set; +struct ici_stream_device; +struct ici_isys_stream; +struct ia_css_isys_resp_info; + +struct ici_kframe_plane { + struct device *dev; + unsigned int mem_type; + unsigned long length; + + /* For user_ptr */ + unsigned long page_offset; + + /* Common */ + dma_addr_t dma_addr; + struct sg_table *sgt; + + /* For DMA operation */ + int fd; + struct dma_buf_attachment *db_attach; + struct dma_buf *dbdbuf; + void *kaddr; + + /* For mediator */ + int npages; + u64 page_table_ref; +}; + +struct ici_kframe_info { + struct ici_kframe_plane planes[ICI_MAX_PLANES]; + int num_planes; +}; + +typedef enum frame_buf_state_ { + ICI_BUF_NOT_SET, + ICI_BUF_PREPARED, + ICI_BUF_ACTIVE, + ICI_BUF_READY, + ICI_BUF_DONE, +} frame_buf_state; + +struct ici_frame_buf_wrapper { + struct ici_kframe_info kframe_info; + struct ici_frame_info frame_info; + struct list_head node; + struct ici_isys_frame_buf_list *buf_list; + struct list_head uos_node; + struct ici_isys_frame_buf_list *uos_buf_list; + uint32_t buf_id; + frame_buf_state state; +}; + +struct ici_frame_short_buf { + void* buffer; + dma_addr_t dma_addr; + struct device* dev; + size_t length; + struct list_head node; + struct ici_isys_frame_buf_list *buf_list; + uint32_t buf_id; +}; + +struct ici_isys_frame_buf_list { + void *drv_priv; + struct mutex mutex; + struct list_head getbuf_list; + struct list_head putbuf_list; + + struct list_head interlacebuf_list; + + uint32_t css_pin_type; + unsigned int fw_output; + spinlock_t lock; + wait_queue_head_t wait; + struct ici_stream_device *strm_dev; + spinlock_t short_packet_queue_lock; + struct list_head short_packet_incoming; + struct list_head short_packet_active; + struct ici_frame_short_buf* short_packet_bufs; + uint32_t num_short_packet_lines; + uint32_t short_packet_output_pin; +}; + +int ici_isys_get_buf(struct ici_isys_stream *as, + struct ici_frame_info + *user_frame_info); + +int ici_isys_get_buf_virt(struct ici_isys_stream *as, + struct ici_frame_buf_wrapper *frame_buf, + struct page **pages); + +int ici_isys_put_buf(struct ici_isys_stream *as, + struct ici_frame_info + *user_frame_info, unsigned int f_flags); + +int ici_isys_frame_buf_init(struct + ici_isys_frame_buf_list + *buf_list); + +void ici_isys_frame_buf_ready( + struct ici_isys_pipeline *ip, + struct ia_css_isys_resp_info *info); + +int ici_isys_frame_buf_add_next( + struct ici_isys_stream *as, + struct ia_css_isys_frame_buff_set *css_buf); + +void ici_isys_frame_buf_stream_cancel( + struct ici_isys_stream *as); + +int ici_isys_frame_buf_short_packet_setup( + struct ici_isys_stream* as, + struct ici_stream_format* source_fmt); + +void ici_isys_frame_buf_short_packet_destroy( + struct ici_isys_stream* as); + +void ici_isys_frame_short_packet_ready( + struct ici_isys_pipeline *ip, + struct ia_css_isys_resp_info *info); + +void ici_isys_frame_buf_capture_done( + struct ici_isys_pipeline *ip, + struct ia_css_isys_resp_info *info); + +#endif /* ICI_ISYS_FRAME_BUF_H */ diff --git a/drivers/media/pci/intel/ici/ici-isys-pipeline-device.c b/drivers/media/pci/intel/ici/ici-isys-pipeline-device.c new file mode 100644 index 0000000000000..a4e4d77b2a645 --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-pipeline-device.c @@ -0,0 +1,493 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include "./ici/ici-isys.h" + +#ifdef ICI_ENABLED + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "./ici/ici-isys-pipeline-device.h" +#include "./ici/ici-isys-pipeline.h" + +static struct class *pipeline_class; + +static struct ici_isys_node* find_node( + struct ici_isys_pipeline_device *pipe_dev, + unsigned id); + +static int pipeline_device_open(struct inode *inode, struct file *file) +{ + struct ici_isys_pipeline_device *pipe_dev = + inode_to_ici_isys_pipeline_device(inode); + int rval = 0; + + file->private_data = pipe_dev; + + get_device(&pipe_dev->dev); + + DEBUGK("pipeline_device_open\n"); + + return rval; +} + +static int pipeline_device_release(struct inode *inode, + struct file *file) +{ + struct ici_isys_pipeline_device *pipe_dev = + inode_to_ici_isys_pipeline_device(inode); + + put_device(&pipe_dev->dev); + + DEBUGK("pipeline_device_release\n"); + + return 0; +} + +static int pipeline_enum_links(struct file *file, void *fh, + struct ici_links_query *links_query) +{ + struct ici_isys_node *node; + struct node_pipe* pipe; + struct node_pad* pad; + struct ici_isys_pipeline_device *pipe_dev = + file->private_data; + struct ici_link_desc* link; + + node = find_node(pipe_dev, links_query->pad.node_id); + if (!node) + return -ENODEV; + if (links_query->pad.pad_idx >= node->nr_pads) + return -EINVAL; + + pad = &node->node_pad[links_query->pad.pad_idx]; + if (pad->pad_id != links_query->pad.pad_idx) + return -EINVAL; + + links_query->links_cnt = 0; + + list_for_each_entry(pipe, &node->node_pipes, list_entry) { + if (pipe->src_pad != pad && pipe->sink_pad != pad) + continue; + link = &links_query->links[links_query->links_cnt]; + link->source.node_id = pipe->src_pad->node->node_id; + link->source.pad_idx = pipe->src_pad->pad_id; + link->source.flags = pipe->src_pad->flags; + link->sink.node_id = pipe->sink_pad->node->node_id; + link->sink.pad_idx = pipe->sink_pad->pad_id; + link->sink.flags = pipe->sink_pad->flags; + link->flags = pipe->flags; + ++links_query->links_cnt; + if (WARN_ON(links_query->links_cnt >= + ICI_MAX_LINKS)) { + dev_warn(&pipe_dev->dev, + "Too many links defined. %d\n", + links_query->links_cnt); + break; + } + } + return 0; +} + +static int pipeline_enum_nodes(struct file *file, void *fh, + struct ici_node_desc *node_desc) +{ + struct ici_isys_pipeline_device* pipeline_dev = + file->private_data; + struct ici_isys_node *node; + struct ici_pad_desc* pad_desc; + int pad; + bool found = false; + + node_desc->node_count = 0; + list_for_each_entry(node, &pipeline_dev->nodes, node_entry) { + node_desc->node_count++; + if (node_desc->node_id != node->node_id) + continue; + + /* fill out the node data */ + found = true; + memcpy(node_desc->name, node->name, + sizeof(node_desc->name)); + node_desc->nr_pads = node->nr_pads; + for (pad=0; pad < node->nr_pads; pad++) { + pad_desc = &node_desc->node_pad[pad]; + pad_desc->pad_idx = node->node_pad[pad].pad_id; + pad_desc->node_id = node->node_id; + pad_desc->flags = node->node_pad[pad].flags; + } + } + if (node_desc->node_id == -1) + return 0; + if (!found) + return -ENODEV; + return 0; +} + +static struct ici_isys_node* find_node( + struct ici_isys_pipeline_device *pipe_dev, + unsigned id) +{ + struct ici_isys_node *ici_node; + + list_for_each_entry(ici_node, &pipe_dev->nodes, node_entry) { + if (ici_node->node_id == id) + return ici_node; + } + return NULL; +} + +static int ici_pipeline_get_supported_format(struct file *file, + void *fh, + struct ici_pad_supported_format_desc *format_desc) +{ + struct ici_isys_node *node; + struct ici_isys_pipeline_device *pipe_dev = + file->private_data; + + node = find_node(pipe_dev, format_desc->pad.node_id); + if (!node) + return -ENODEV; + + if (node->node_get_pad_supported_format) + return node->node_get_pad_supported_format(node, + format_desc); + return -ENODEV; +} + +static struct node_pipe* find_pipe( + struct ici_isys_node* src_node, + struct ici_link_desc *link) +{ + struct node_pipe *np; + + list_for_each_entry(np, &src_node->node_pipes, list_entry) { + if (np->src_pad->node->node_id == link->source.node_id + && np->src_pad->pad_id == link->source.pad_idx + && np->sink_pad->node->node_id == + link->sink.node_id + && np->sink_pad->pad_id == link->sink.pad_idx) + + return np; + } + + return NULL; +} + +static int ici_setup_link(struct file *file, void *fh, + struct ici_link_desc *link) +{ + int rval = 0; + struct ici_isys_pipeline_device *pipe_dev = + file->private_data; + struct ici_isys_node *src_node, *sink_node; + struct node_pipe *np; + + src_node = find_node(pipe_dev, link->source.node_id); + if (!src_node) + return -ENODEV; + + sink_node = find_node(pipe_dev, link->sink.node_id); + if (!sink_node) + return -ENODEV; + + np = find_pipe(src_node, link); + + if (np) { + np->flags = link->flags; + } else { + dev_warn(&pipe_dev->dev, "Link not found\n"); + return -ENODEV; + } + + np = find_pipe(sink_node, link); + if (np) + np->flags = link->flags | ICI_LINK_FLAG_BACKLINK; + else + dev_warn(&pipe_dev->dev, "Backlink not found\n"); + + return rval; +} + +int ici_pipeline_set_ffmt(struct file *file, void *fh, + struct ici_pad_framefmt *ffmt) +{ + struct ici_isys_pipeline_device *pipe_dev = + file->private_data; + struct ici_isys_node *node; + int rval = -ENODEV; + + node = find_node(pipe_dev, ffmt->pad.node_id); + if (!node) + return -ENODEV; + + if (node->node_set_pad_ffmt) + rval = node->node_set_pad_ffmt(node, ffmt); + + return rval; +} + +int ici_pipeline_get_ffmt(struct file *file, void *fh, + struct ici_pad_framefmt *ffmt) +{ + struct ici_isys_pipeline_device *pipe_dev = + file->private_data; + struct ici_isys_node *node; + int rval = -ENODEV; + + node = find_node(pipe_dev, ffmt->pad.node_id); + if (!node) + return -ENODEV; + + if (node->node_get_pad_ffmt) + rval = node->node_get_pad_ffmt(node, ffmt); + + return rval; +} + +static int ici_pipeline_set_sel(struct file *file, void *fh, + struct ici_pad_selection *pad_sel) +{ + struct ici_isys_node *node; + struct ici_isys_pipeline_device *pipe_dev = + file->private_data; + + node = find_node(pipe_dev, pad_sel->pad.node_id); + if (!node) + return -ENODEV; + + if (node->node_set_pad_sel) + return node->node_set_pad_sel(node, pad_sel); + return -ENODEV; +} + +static int ici_pipeline_get_sel(struct file *file, void *fh, + struct ici_pad_selection *pad_sel) +{ + struct ici_isys_node *node; + struct ici_isys_pipeline_device *pipe_dev = + file->private_data; + + node = find_node(pipe_dev, pad_sel->pad.node_id); + if (!node) + return -ENODEV; + + if (node->node_get_pad_sel) + return node->node_get_pad_sel(node, pad_sel); + return -ENODEV; +} + +static long ici_pipeline_ioctl_common(void __user *up, + struct file *file, unsigned int ioctl_cmd, + unsigned long ioctl_arg) +{ + union { + struct ici_node_desc node_desc; + struct ici_link_desc link; + struct ici_pad_framefmt pad_prop; + struct ici_pad_supported_format_desc + format_desc; + struct ici_links_query links_query; + struct ici_pad_selection pad_sel; + } isys_ioctl_cmd_args; + int err = 0; + struct ici_isys_pipeline_device *pipe_dev = + file->private_data; + const struct ici_pipeline_ioctl_ops *ops; + + if (_IOC_SIZE(ioctl_cmd) > sizeof(isys_ioctl_cmd_args)) + return -ENOTTY; + + if (_IOC_DIR(ioctl_cmd) & _IOC_WRITE) { + err = copy_from_user(&isys_ioctl_cmd_args, up, + _IOC_SIZE(ioctl_cmd)); + if (err) + return -EFAULT; + } + + mutex_lock(&pipe_dev->mutex); + ops = pipe_dev->pipeline_ioctl_ops; + switch(ioctl_cmd) { + case ICI_IOC_ENUM_NODES: + err = ops->pipeline_enum_nodes(file, pipe_dev, + &isys_ioctl_cmd_args.node_desc); + break; + case ICI_IOC_ENUM_LINKS: + err = ops->pipeline_enum_links(file, pipe_dev, + &isys_ioctl_cmd_args.links_query); + break; + case ICI_IOC_SETUP_PIPE: + err = ops->pipeline_setup_pipe(file, pipe_dev, + &isys_ioctl_cmd_args.link); + break; + case ICI_IOC_SET_FRAMEFMT: + err = ops->pad_set_ffmt(file, pipe_dev, + &isys_ioctl_cmd_args.pad_prop); + break; + case ICI_IOC_GET_FRAMEFMT: + err = ops->pad_get_ffmt(file, pipe_dev, + &isys_ioctl_cmd_args.pad_prop); + break; + case ICI_IOC_GET_SUPPORTED_FRAMEFMT: + err = ops->pad_get_supported_format(file, pipe_dev, + &isys_ioctl_cmd_args.format_desc); + break; + case ICI_IOC_SET_SELECTION: + err = ops->pad_set_sel(file, pipe_dev, + &isys_ioctl_cmd_args.pad_sel); + break; + case ICI_IOC_GET_SELECTION: + err = ops->pad_get_sel(file, pipe_dev, + &isys_ioctl_cmd_args.pad_sel); + break; + default: + err = -ENOTTY; + break; + } + + mutex_unlock(&pipe_dev->mutex); + if (err < 0) + return err; + + if (_IOC_DIR(ioctl_cmd) & _IOC_READ) { + err = copy_to_user(up, &isys_ioctl_cmd_args, + _IOC_SIZE(ioctl_cmd)); + if (err) + return -EFAULT; + } + + return 0; +} + +static long ici_pipeline_ioctl(struct file *file, + unsigned int ioctl_cmd, unsigned long ioctl_arg) +{ + long status = 0; + void __user *up = (void __user *)ioctl_arg; + status = ici_pipeline_ioctl_common(up, file, ioctl_cmd, + ioctl_arg); + + return status; +} + +static long ici_pipeline_ioctl32(struct file *file, + unsigned int ioctl_cmd, unsigned long ioctl_arg) +{ + long status = 0; + void __user *up = compat_ptr(ioctl_arg); + status = ici_pipeline_ioctl_common(up, file, ioctl_cmd, + ioctl_arg); + + return status; +} + +static const struct ici_pipeline_ioctl_ops pipeline_ioctls = +{ + .pipeline_setup_pipe = ici_setup_link, + .pipeline_enum_nodes = pipeline_enum_nodes, + .pipeline_enum_links = pipeline_enum_links, + .pad_set_ffmt = ici_pipeline_set_ffmt, + .pad_get_ffmt = ici_pipeline_get_ffmt, + .pad_get_supported_format = + ici_pipeline_get_supported_format, + .pad_set_sel = ici_pipeline_set_sel, + .pad_get_sel = ici_pipeline_get_sel, + +}; + +static const struct file_operations ici_isys_pipeline_fops = +{ + .owner = THIS_MODULE, + .open = pipeline_device_open, + .unlocked_ioctl = ici_pipeline_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = ici_pipeline_ioctl32, +#endif + .release = pipeline_device_release, +}; + +static void pipeline_device_main_release(struct device *sd) +{ +} + +int pipeline_device_register( + struct ici_isys_pipeline_device *pipe_dev, + struct ici_isys *isys) +{ + int rval = 0; + + pipeline_class = + class_create(THIS_MODULE, + ICI_PIPELINE_DEVICE_NAME); + if (IS_ERR(pipeline_class)) { + printk(KERN_WARNING "Failed to register device class %s\n", + ICI_PIPELINE_DEVICE_NAME); + return PTR_ERR(pipeline_class); + } + + pipe_dev->parent = &isys->adev->dev; + pipe_dev->minor = -1; + + cdev_init(&pipe_dev->cdev, &ici_isys_pipeline_fops); + pipe_dev->cdev.owner = ici_isys_pipeline_fops.owner; + + rval = cdev_add(&pipe_dev->cdev, + MKDEV(MAJOR_PIPELINE, MINOR_PIPELINE), 1); + if (rval) { + printk(KERN_ERR "%s: failed to add cdevice\n", __func__); + goto fail; + } + + pipe_dev->dev.class = pipeline_class; + pipe_dev->dev.devt = MKDEV(MAJOR_PIPELINE, MINOR_PIPELINE); + pipe_dev->dev.parent = pipe_dev->parent; + pipe_dev->dev.release = pipeline_device_main_release; + dev_set_name(&pipe_dev->dev, "%s", + ICI_PIPELINE_DEVICE_NAME); + rval = device_register(&pipe_dev->dev); + if (rval < 0) { + printk(KERN_ERR "%s: device_register failed\n", __func__); + goto out_cdev_del; + } + + strlcpy(pipe_dev->name, pipe_dev->dev.kobj.name, + sizeof(pipe_dev->name)); + pipe_dev->minor = MINOR_PIPELINE; + + DEBUGK("Device registered: %s\n", pipe_dev->name); + pipe_dev->pipeline_ioctl_ops = &pipeline_ioctls; + mutex_init(&pipe_dev->mutex); + INIT_LIST_HEAD(&pipe_dev->nodes); + + return 0; + +out_cdev_del: + cdev_del(&pipe_dev->cdev); + +fail: + return rval; +} +EXPORT_SYMBOL(pipeline_device_register); + +void pipeline_device_unregister( + struct ici_isys_pipeline_device* pipe_dev) +{ + DEBUGK("Pipeline device unregistering..."); + device_unregister(&pipe_dev->dev); + cdev_del(&pipe_dev->cdev); + class_destroy(pipeline_class); + mutex_destroy(&pipe_dev->mutex); +} +EXPORT_SYMBOL(pipeline_device_unregister); + + +#endif /*ICI_ENABLED*/ diff --git a/drivers/media/pci/intel/ici/ici-isys-pipeline-device.h b/drivers/media/pci/intel/ici/ici-isys-pipeline-device.h new file mode 100644 index 0000000000000..b218b4d1f10a0 --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-pipeline-device.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef ICI_ISYS_PIPELINE_DEVICE_H +#define ICI_ISYS_PIPELINE_DEVICE_H + +#include +#include +#include +#include +#include + +struct ici_pipeline_ioctl_ops; +struct ici_link_desc; +struct ici_pad_supported_format_desc; + +struct ici_isys_pipeline_device { + struct cdev cdev; + struct device dev; + struct device *parent; + int minor; + char name[32]; + struct mutex mutex; + const struct file_operations *fops; + struct list_head nodes; + const struct ici_pipeline_ioctl_ops *pipeline_ioctl_ops; + unsigned next_node_id; +}; + +/* Pipeline IOCTLs */ +struct ici_pipeline_ioctl_ops { + int (*pipeline_enum_nodes)(struct file *file, void *fh, + struct ici_node_desc *node_desc); + int (*pipeline_enum_links)(struct file *file, void *fh, + struct ici_links_query *links_query); + int (*pipeline_setup_pipe)(struct file *file, void *fh, + struct ici_link_desc *link); + int (*pad_set_ffmt)(struct file *file, void *fh, + struct ici_pad_framefmt* pad_ffmt); + int (*pad_get_ffmt)(struct file *file, void *fh, + struct ici_pad_framefmt* pad_ffmt); + int (*pad_get_supported_format)(struct file *file, void *fh, + struct ici_pad_supported_format_desc *format_desc); + int (*pad_set_sel)(struct file *file, void *fh, + struct ici_pad_selection* pad_sel); + int (*pad_get_sel)(struct file *file, void *fh, + struct ici_pad_selection* pad_sel); +}; + +int pipeline_device_register( + struct ici_isys_pipeline_device *pipe_dev, + struct ici_isys *isys); +void pipeline_device_unregister(struct ici_isys_pipeline_device + *pipe_dev); + +#define inode_to_ici_isys_pipeline_device(inode) \ + container_of((inode)->i_cdev,\ + struct ici_isys_pipeline_device, cdev) + +#endif /*ICI_ISYS_PIPELINE_DEVICE_H */ diff --git a/drivers/media/pci/intel/ici/ici-isys-pipeline.c b/drivers/media/pci/intel/ici/ici-isys-pipeline.c new file mode 100644 index 0000000000000..563a76d0c4157 --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-pipeline.c @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include "./ici/ici-isys.h" + +#ifdef ICI_ENABLED + +#include "./ici/ici-isys-pipeline.h" + +int ici_isys_pipeline_node_init( + struct ici_isys *isys, + struct ici_isys_node *node, + const char* name, + unsigned num_pads, + struct node_pad *node_pads) +{ + unsigned int pad_id; + + mutex_lock(&isys->pipeline_dev.mutex); + node->parent = &isys->pipeline_dev; + snprintf(node->name, sizeof(node->name), "%s", name); + if (num_pads > ICI_MAX_PADS) { + dev_warn(&isys->adev->dev, + "Too many external pads %d\n", num_pads); + num_pads = ICI_MAX_PADS; + } + node->nr_pads = num_pads; + node->node_pad = node_pads; + node->nr_pipes = 0; + node->node_id = isys->pipeline_dev.next_node_id++; + + INIT_LIST_HEAD(&node->node_entry); + INIT_LIST_HEAD(&node->iterate_node); + INIT_LIST_HEAD(&node->node_pipes); + + for (pad_id = 0; pad_id < num_pads; pad_id++) { + node->node_pad[pad_id].node = node; + node->node_pad[pad_id].pad_id = pad_id; + } + + list_add_tail(&node->node_entry, + &node->parent->nodes); + dev_info(&isys->adev->dev, + "Setup node \"%s\" with %d pads\n", + node->name, + node->nr_pads); + mutex_unlock(&isys->pipeline_dev.mutex); + return 0; +} + +void node_pads_cleanup(struct ici_isys_node *node) +{ + struct node_pipe *tmp, *q, *np; + list_for_each_entry_safe(np, q, &node->node_pipes, list_entry) { + tmp = np; + list_del(&np->list_entry); + kfree(tmp); + } +} + +static struct node_pipe* node_pad_add_link(struct ici_isys_node *node) +{ + struct node_pipe *np; + np = kzalloc(sizeof(*np), GFP_KERNEL); + if (!np) + return NULL; + + list_add_tail(&np->list_entry, &node->node_pipes); + node->nr_pipes++; + return np; +} + +int node_pad_create_link(struct ici_isys_node *src, + u16 src_pad, struct ici_isys_node *sink, + u16 sink_pad, u32 flags ) +{ + int rval = 0; + struct node_pipe *np; + struct node_pipe *rnp; + if(!src || !sink || !src->parent) + return -EINVAL; + + mutex_lock(&src->parent->mutex); + np = node_pad_add_link(src); + if(!np) { + rval = -ENOMEM; + goto cleanup_mutex; + } + + np->src_pad = &src->node_pad[src_pad]; + np->sink_pad = &sink->node_pad[sink_pad]; + np->flags = flags; + np->rev_pipe = NULL; + + rnp = node_pad_add_link(sink); + if(!rnp) { + rval = -ENOMEM; + goto cleanup_mutex; + } + + rnp->src_pad = &src->node_pad[src_pad]; + rnp->sink_pad = &sink->node_pad[sink_pad]; + rnp->flags = flags | ICI_LINK_FLAG_BACKLINK; + rnp->rev_pipe = np; + np->rev_pipe = rnp; + +cleanup_mutex: + mutex_unlock(&src->parent->mutex); + return rval; +} + +static int __ici_isys_pipeline_for_each_node( + ici_isys_pipeline_node_cb_fn cb_fn, + void* cb_data, + struct ici_isys_node* start_node, + struct ici_isys_pipeline *ip_active, + bool backwards) +{ + struct node_pipe *pipe; + struct ici_isys_node* node; + struct ici_isys_node* next_node = NULL; + int rval; + LIST_HEAD(node_list); + + if (!cb_fn || !start_node || !start_node->parent) + return -EINVAL; + + rval = cb_fn(cb_data, start_node, NULL); + if (rval) + return rval; + list_add_tail(&start_node->iterate_node, &node_list); + while (!list_empty(&node_list)) { + node = list_entry(node_list.next, + struct ici_isys_node, + iterate_node); + list_del(&node->iterate_node); + list_for_each_entry(pipe, &node->node_pipes, + list_entry) { + if (backwards && !(pipe->flags & ICI_LINK_FLAG_BACKLINK)) + continue; + else if (!backwards && (pipe->flags & ICI_LINK_FLAG_BACKLINK)) + continue; + if (ip_active && !(pipe->flags & ICI_LINK_FLAG_ENABLED)) + continue; + next_node = (backwards ? pipe->src_pad->node : + pipe->sink_pad->node); + rval = cb_fn(cb_data, next_node, pipe); + if (rval) + return rval; + list_add_tail(&next_node->iterate_node, + &node_list); + } + } + return 0; +} + +int ici_isys_pipeline_for_each_node( + ici_isys_pipeline_node_cb_fn cb_fn, + void* cb_data, + struct ici_isys_node* start_node, + struct ici_isys_pipeline *ip_active, + bool backwards) +{ + int rval = 0; + mutex_lock(&start_node->parent->mutex); + rval = __ici_isys_pipeline_for_each_node(cb_fn, + cb_data, start_node, ip_active, backwards); + mutex_unlock(&start_node->parent->mutex); + return rval; +} + +#endif /* ICI_ENABLED */ diff --git a/drivers/media/pci/intel/ici/ici-isys-pipeline.h b/drivers/media/pci/intel/ici/ici-isys-pipeline.h new file mode 100644 index 0000000000000..8004d8df0c060 --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-pipeline.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef ICI_ISYS_PIPELINE_H +#define ICI_ISYS_PIPELINE_H + +#include +#include +#include + +#include + +#define ICI_ISYS_OUTPUT_PINS 11 +#define ICI_NUM_CAPTURE_DONE 2 +#define ICI_ISYS_MAX_PARALLEL_SOF 2 + +struct ici_isys_node; +struct ici_isys_subdev; +struct ici_isys_csi2_be; +struct ici_isys_csi2; +struct ici_isys_tpg; +struct ia_css_isys_resp_info; +struct ici_isys_pipeline; +struct ici_isys_stream; +struct node_pad; + +struct ici_sequence_info { + unsigned int sequence; + u64 timestamp; +}; + +struct ici_output_pin_data { + void (*pin_ready)(struct ici_isys_pipeline *ip, + struct ia_css_isys_resp_info *info); + struct ici_isys_frame_buf_list *buf_list; +}; + +struct ici_isys_pipeline { + struct node_pipeline pipe; + struct ici_isys_pipeline_device *pipeline_dev; + int source; /* SSI stream source */ + int stream_handle; /* stream handle for CSS API */ + unsigned int nr_output_pins; /* How many firmware pins? */ + struct ici_isys_csi2_be *csi2_be; + struct ici_isys_csi2 *csi2; + struct ici_isys_subdev *asd_source; + int asd_source_pad_id; + unsigned int streaming; + struct completion stream_open_completion; + struct completion stream_close_completion; + struct completion stream_start_completion; + struct completion stream_stop_completion; + struct completion capture_ack_completion; + struct ici_isys *isys; + + void (*capture_done[ICI_NUM_CAPTURE_DONE]) + (struct ici_isys_pipeline *ip, + struct ia_css_isys_resp_info *resp); + struct ici_output_pin_data + output_pins[ICI_ISYS_OUTPUT_PINS]; + bool interlaced; + int error; + int cur_field; + unsigned int short_packet_source; + unsigned int short_packet_trace_index; + unsigned int vc; +}; + +int ici_isys_pipeline_node_init( + struct ici_isys *isys, + struct ici_isys_node *node, + const char* name, + unsigned num_pads, + struct node_pad *node_pads); + +int node_pad_create_link(struct ici_isys_node *src, + u16 src_pad, struct ici_isys_node *sink, + u16 sink_pad, u32 flags ); + +void node_pads_cleanup(struct ici_isys_node *node); + +typedef int (*ici_isys_pipeline_node_cb_fn)(void* cb_data, + struct ici_isys_node* node, + struct node_pipe* pipe); + +int ici_isys_pipeline_for_each_node( + ici_isys_pipeline_node_cb_fn cb_fn, + void* cb_data, + struct ici_isys_node* start_node, + struct ici_isys_pipeline* ip_active, + bool backwards); + +#define ici_nodepipe_to_pipeline(__np) \ + container_of(__np, struct ici_isys_pipeline, pipe) + +#endif /* ICI_ISYS_PIPELINE_H */ diff --git a/drivers/media/pci/intel/ici/ici-isys-stream-device.c b/drivers/media/pci/intel/ici/ici-isys-stream-device.c new file mode 100644 index 0000000000000..96336980db7d1 --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-stream-device.c @@ -0,0 +1,397 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "./ici/ici-isys-stream-device.h" +#include "./ici/ici-isys-pipeline-device.h" + +#define MAX_STREAM_DEVICES 64 + +static dev_t ici_stream_dev_t; +static struct class* stream_class; +static int stream_devices_registered = 0; +static int stream_device_init = 0; + +static int ici_stream_init(void); +static void ici_stream_exit(void); + +static int stream_device_open(struct inode *inode, struct file *file) +{ + struct ici_stream_device *strm_dev = inode_to_intel_ipu_stream_device(inode); + int rval = 0; + + get_device(&strm_dev->dev); + + file->private_data = strm_dev; + if (strm_dev->fops->open) + rval = strm_dev->fops->open(inode, file); + + if (rval) + put_device(&strm_dev->dev); + + return rval; +} + +static int stream_device_release(struct inode *inode, struct file *file) +{ + struct ici_stream_device *strm_dev = inode_to_intel_ipu_stream_device(inode); + int rval = 0; + + if (strm_dev->fops->release) + rval = strm_dev->fops->release(inode, file); + + put_device(&strm_dev->dev); + return rval; +} + +static unsigned int ici_fop_poll(struct file *file, struct poll_table_struct *poll) +{ + struct ici_stream_device *strm_dev = file->private_data; + unsigned int rval = POLLERR | POLLHUP; + + if (strm_dev->fops->poll) + rval = strm_dev->fops->poll(file, poll); + else + return DEFAULT_POLLMASK; + + return rval; +} + +#ifdef CONFIG_COMPAT +struct timeval32 { + __u32 tv_sec; + __u32 tv_usec; +} __attribute__((__packed__)); + +struct ici_frame_plane32 { + __u32 bytes_used; + __u32 length; + union { + compat_uptr_t userptr; + __s32 dmafd; + } mem; + __u32 data_offset; + __u32 reserved[2]; +} __attribute__((__packed__)); + +struct ici_frame_info32 { + __u32 frame_type; + __u32 field; + __u32 flag; + __u32 frame_buf_id; + struct timeval32 frame_timestamp; + __u32 frame_sequence_id; + __u32 mem_type; /* _DMA or _USER_PTR */ + struct ici_frame_plane32 frame_planes[ICI_MAX_PLANES]; /* multi-planar */ + __u32 num_planes; /* =1 single-planar > 1 multi-planar array size */ + __u32 reserved[2]; +} __attribute__((__packed__)); + +#define ICI_IOC_GET_BUF32 _IOWR(MAJOR_STREAM, 3, struct ici_frame_info32) +#define ICI_IOC_PUT_BUF32 _IOWR(MAJOR_STREAM, 4, struct ici_frame_info32) + +static void copy_from_user_frame_info32(struct ici_frame_info *kp, struct ici_frame_info32 __user *up) +{ + int i; + compat_uptr_t userptr; + + get_user(kp->frame_type, &up->frame_type); + get_user(kp->field, &up->field); + get_user(kp->flag, &up->flag); + get_user(kp->frame_buf_id, &up->frame_buf_id); + get_user(kp->frame_timestamp.tv_sec, &up->frame_timestamp.tv_sec); + get_user(kp->frame_timestamp.tv_usec, &up->frame_timestamp.tv_usec); + get_user(kp->frame_sequence_id, &up->frame_sequence_id); + get_user(kp->mem_type, &up->mem_type); + get_user(kp->num_planes, &up->num_planes); + for (i=0; inum_planes; i++) { + get_user(kp->frame_planes[i].bytes_used, &up->frame_planes[i].bytes_used); + get_user(kp->frame_planes[i].length, &up->frame_planes[i].length); + if(kp->mem_type==ICI_MEM_USERPTR) { + get_user(userptr, &up->frame_planes[i].mem.userptr); + kp->frame_planes[i].mem.userptr = (unsigned long) compat_ptr(userptr); + } else if (kp->mem_type==ICI_MEM_DMABUF) { + get_user(kp->frame_planes[i].mem.dmafd, &up->frame_planes[i].mem.dmafd); + }; + get_user(kp->frame_planes[i].data_offset, &up->frame_planes[i].data_offset); + } +} + +static void copy_to_user_frame_info32(struct ici_frame_info *kp, struct ici_frame_info32 __user *up) +{ + int i; + compat_uptr_t userptr; + + put_user(kp->frame_type, &up->frame_type); + put_user(kp->field, &up->field); + put_user(kp->flag, &up->flag); + put_user(kp->frame_buf_id, &up->frame_buf_id); + put_user(kp->frame_timestamp.tv_sec, &up->frame_timestamp.tv_sec); + put_user(kp->frame_timestamp.tv_usec, &up->frame_timestamp.tv_usec); + put_user(kp->frame_sequence_id, &up->frame_sequence_id); + put_user(kp->mem_type, &up->mem_type); + put_user(kp->num_planes, &up->num_planes); + for (i=0; inum_planes; i++) { + put_user(kp->frame_planes[i].bytes_used, &up->frame_planes[i].bytes_used); + put_user(kp->frame_planes[i].length, &up->frame_planes[i].length); + if(kp->mem_type==ICI_MEM_USERPTR) { + userptr = (unsigned long)compat_ptr(kp->frame_planes[i].mem.userptr); + put_user(userptr, &up->frame_planes[i].mem.userptr); + } else if (kp->mem_type==ICI_MEM_DMABUF) { + get_user(kp->frame_planes[i].mem.dmafd, &up->frame_planes[i].mem.dmafd); + } + put_user(kp->frame_planes[i].data_offset, &up->frame_planes[i].data_offset); + } +} + +static long ici_stream_ioctl32(struct file *file, __u32 ioctl_cmd, + unsigned long ioctl_arg) { + union { + struct ici_frame_info frame_info; + struct ici_stream_format sf; + } isys_ioctl_cmd_args; + + int err = 0; + struct ici_stream_device *strm_dev = file->private_data; + void __user *up = compat_ptr(ioctl_arg); + + mutex_lock(strm_dev->mutex); + + switch(ioctl_cmd) { + case ICI_IOC_STREAM_ON: + err = strm_dev->ipu_ioctl_ops->ici_stream_on(file, strm_dev); + break; + case ICI_IOC_STREAM_OFF: + err = strm_dev->ipu_ioctl_ops->ici_stream_off(file, strm_dev); + break; + case ICI_IOC_GET_BUF32: + copy_from_user_frame_info32(&isys_ioctl_cmd_args.frame_info, up); + err = strm_dev->ipu_ioctl_ops->ici_get_buf(file, strm_dev, &isys_ioctl_cmd_args.frame_info); + if (err) + break; + copy_to_user_frame_info32(&isys_ioctl_cmd_args.frame_info, up); + break; + case ICI_IOC_PUT_BUF32: + copy_from_user_frame_info32(&isys_ioctl_cmd_args.frame_info, up); + err = strm_dev->ipu_ioctl_ops->ici_put_buf(file, strm_dev, &isys_ioctl_cmd_args.frame_info); + if (err) + break; + copy_to_user_frame_info32(&isys_ioctl_cmd_args.frame_info, up); + break; + case ICI_IOC_SET_FORMAT: + if (_IOC_SIZE(ioctl_cmd) > sizeof(isys_ioctl_cmd_args)) + return -ENOTTY; + + err = copy_from_user(&isys_ioctl_cmd_args, up, + _IOC_SIZE(ioctl_cmd)); + if (err) + return -EFAULT; + + err = strm_dev->ipu_ioctl_ops->ici_set_format(file, strm_dev, &isys_ioctl_cmd_args.sf); + if (err) + break; + + err = copy_to_user(up, &isys_ioctl_cmd_args, _IOC_SIZE(ioctl_cmd)); + if (err) { + return -EFAULT; + } + break; + default: + err = -ENOTTY; + break; + } + + mutex_unlock(strm_dev->mutex); + if (err) { + return err; + } + + return 0; +} +#endif + +static long ici_stream_ioctl(struct file *file, unsigned int ioctl_cmd, + unsigned long ioctl_arg) { + union { + struct ici_frame_info frame_info; + struct ici_stream_format sf; + } isys_ioctl_cmd_args; + int err = 0; + struct ici_stream_device *strm_dev = file->private_data; + void __user *up = (void __user *)ioctl_arg; + + bool copy = (ioctl_cmd != ICI_IOC_STREAM_ON && + ioctl_cmd != ICI_IOC_STREAM_OFF); + + if (copy) { + if (_IOC_SIZE(ioctl_cmd) > sizeof(isys_ioctl_cmd_args)) + return -ENOTTY; + + if (_IOC_DIR(ioctl_cmd) & _IOC_WRITE) { + err = copy_from_user(&isys_ioctl_cmd_args, up, + _IOC_SIZE(ioctl_cmd)); + if (err) + return -EFAULT; + } + } + + mutex_lock(strm_dev->mutex); + + switch(ioctl_cmd) { + case ICI_IOC_STREAM_ON: + err = strm_dev->ipu_ioctl_ops->ici_stream_on(file, strm_dev); + break; + case ICI_IOC_STREAM_OFF: + err = strm_dev->ipu_ioctl_ops->ici_stream_off(file, strm_dev); + break; + case ICI_IOC_GET_BUF: + err = strm_dev->ipu_ioctl_ops->ici_get_buf(file, strm_dev, &isys_ioctl_cmd_args.frame_info); + break; + case ICI_IOC_PUT_BUF: + err = strm_dev->ipu_ioctl_ops->ici_put_buf(file, strm_dev, &isys_ioctl_cmd_args.frame_info); + break; + case ICI_IOC_SET_FORMAT: + err = strm_dev->ipu_ioctl_ops->ici_set_format(file, strm_dev, &isys_ioctl_cmd_args.sf); + break; + default: + err = -ENOTTY; + break; + } + + mutex_unlock(strm_dev->mutex); + if (err) + return err; + + if (copy && _IOC_DIR(ioctl_cmd) & _IOC_READ) { + err = copy_to_user(up, &isys_ioctl_cmd_args, _IOC_SIZE(ioctl_cmd)); + if (err) + return -EFAULT; + } + + return 0; +} + +static const struct file_operations ici_stream_fops = { + .owner = THIS_MODULE, + .open = stream_device_open, /* calls strm_dev->fops->open() */ + .unlocked_ioctl = ici_stream_ioctl, /* calls strm_dev->ipu_ioctl_ops->() */ +#ifdef CONFIG_COMPAT + .compat_ioctl = ici_stream_ioctl32, +#endif + .release = stream_device_release, /* calls strm_dev->fops->release() */ + .poll = ici_fop_poll, /* calls strm_dev->fops->poll() */ +}; + +/* Called on device_unregister */ +static void base_device_release(struct device *sd) +{ +} + +int stream_device_register(struct ici_stream_device *strm_dev) +{ + int rval = 0; + int num; + + if (!stream_device_init) { + rval = ici_stream_init(); + if (rval) { + printk(KERN_ERR "%s: failed to init stream device\n", __func__); + return rval; + } + stream_device_init++; + } + num = stream_devices_registered; + + if (!(num < MAX_STREAM_DEVICES)) { + printk(KERN_WARNING "%s: wrong minor of stream device: %d\n", + __func__, num); + return -EINVAL; + } + strm_dev->minor = -1; + + cdev_init(&strm_dev->cdev, &ici_stream_fops); + strm_dev->cdev.owner = ici_stream_fops.owner; + + rval = cdev_add(&strm_dev->cdev, MKDEV(MAJOR(ici_stream_dev_t), num), 1); + if (rval) { + printk(KERN_WARNING "%s: failed to add cdevice\n", __func__); + return rval; + } + + strm_dev->dev.class = stream_class; + strm_dev->dev.devt = MKDEV(MAJOR(ici_stream_dev_t), num); + strm_dev->dev.parent = strm_dev->dev_parent; + dev_set_name(&strm_dev->dev, "%s%d", ICI_STREAM_DEVICE_NAME, num); + rval = device_register(&strm_dev->dev); + if (rval < 0) { + printk(KERN_WARNING "%s: device_register failed\n", __func__); + cdev_del(&strm_dev->cdev); + return rval; + } + + /* Release function will be called on device unregister, + it is needed to avoid errors */ + strm_dev->dev.release = base_device_release; + strlcpy(strm_dev->name, strm_dev->dev.kobj.name, sizeof(strm_dev->name)); + strm_dev->minor = num; + + printk(KERN_INFO "Device registered: %s\n", strm_dev->name); + stream_devices_registered++; + + return 0; +} + +void stream_device_unregister(struct ici_stream_device *strm_dev) +{ + device_unregister(&strm_dev->dev); + cdev_del(&strm_dev->cdev); + + stream_devices_registered--; + if (!stream_devices_registered) { + ici_stream_exit(); + stream_device_init--; + } +} + +static int ici_stream_init(void) +{ + int rval; + ici_stream_dev_t = MKDEV(MAJOR_STREAM, 0); + + rval = register_chrdev_region(ici_stream_dev_t, + MAX_STREAM_DEVICES, ICI_STREAM_DEVICE_NAME); + if (rval) { + printk(KERN_WARNING "can't register intel_ipu_ici stream chrdev region (%d)\n", rval); + return rval; + } + + stream_class = class_create(THIS_MODULE, ICI_STREAM_DEVICE_NAME); + if (IS_ERR(stream_class)) { + unregister_chrdev_region(ici_stream_dev_t, MAX_STREAM_DEVICES); + printk(KERN_WARNING "Failed to register device class %s\n", ICI_STREAM_DEVICE_NAME); + return PTR_ERR(stream_class); + } + + return 0; +} + +static void ici_stream_exit(void) +{ + class_unregister(stream_class); + //class_destroy(stream_class); + unregister_chrdev_region(ici_stream_dev_t, MAX_STREAM_DEVICES); + + printk(KERN_INFO "intel_ipu_ici stream device unregistered\n"); +} + diff --git a/drivers/media/pci/intel/ici/ici-isys-stream-device.h b/drivers/media/pci/intel/ici/ici-isys-stream-device.h new file mode 100644 index 0000000000000..5aec894509202 --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-stream-device.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef ICI_ISYS_STREAM_DEVICE_H +#define ICI_ISYS_STREAM_DEVICE_H + +#include +#include +#include +#include +#include + +#include "ici-isys-frame-buf.h" +#include "ici-isys-pipeline.h" +#include "virtio/intel-ipu4-virtio-common.h" + +struct ici_ioctl_ops; +struct ici_frame_plane; + +struct ici_stream_device { + struct device dev; /* intel stream base dev */ + struct cdev cdev; /* character device */ + struct device *dev_parent; /* parent device ipu_bus */ + struct mutex *mutex; + const struct file_operations *fops; /* standard Linux fops */ + struct ici_isys_frame_buf_list *frame_buf_list; /* frame buffer wrapper pointer */ + char name[32]; /* device name */ + int minor; /* driver minor */ + unsigned long flags; /* stream device state machine */ + const struct ici_ioctl_ops *ipu_ioctl_ops; + //Mediator param + int virt_dev_id; + struct ipu4_virtio_priv *virt_priv; +}; + +struct ici_ioctl_ops { + int (*ici_set_format) (struct file *file, void *fh, + struct ici_stream_format *psf); + int (*ici_stream_on) (struct file *file, void *fh); + int (*ici_stream_off) (struct file *file, void *fh); + int (*ici_get_buf) (struct file *file, void *fh, + struct ici_frame_info *fram); + int (*ici_get_buf_virt) (struct file *file, void *fh, + struct ici_frame_buf_wrapper *fram, struct page **pages); + int (*ici_put_buf) (struct file *file, void *fh, + struct ici_frame_info *fram); +}; + +#define inode_to_intel_ipu_stream_device(inode) \ + container_of((inode)->i_cdev, struct ici_stream_device, cdev) + +int stream_device_register(struct ici_stream_device *strm_dev); + +void stream_device_unregister(struct ici_stream_device *strm_dev); + +#endif /* ICI_ISYS_STREAM_DEVICE_H */ diff --git a/drivers/media/pci/intel/ici/ici-isys-stream.c b/drivers/media/pci/intel/ici/ici-isys-stream.c new file mode 100644 index 0000000000000..cc9870cb80827 --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-stream.c @@ -0,0 +1,1466 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include "./ici/ici-isys.h" +#ifdef ICI_ENABLED + +#include +#include +#include +#include +#include +#include "isysapi/interface/ia_css_isysapi_fw_types.h" +#include "isysapi/interface/ia_css_isysapi.h" +#include +#include +#include +#include "ipu-trace.h" +#include "ipu-fw-isys.h" +#include "ici-fw-isys.h" +#include "ipu-wrapper.h" +#include "./ici/ici-isys-stream.h" +#include "./ici/ici-isys-csi2.h" +#include "./ici/ici-isys-csi2-be.h" +#include + +#ifndef IPU4_DEBUG +#define IPU4_DEBUG 1 +#endif + +#define dev_to_stream(dev) \ + container_of(dev, struct ici_isys_stream, strm_dev) + +const struct ici_isys_pixelformat ici_isys_pfmts[] = { + /* YUV vector format */ + { ICI_FORMAT_YUYV, 24, 24, ICI_FORMAT_YUYV, IA_CSS_ISYS_FRAME_FORMAT_YUV420_16 }, + /* Raw bayer vector formats. */ + { ICI_FORMAT_SBGGR12, 16, 12, ICI_FORMAT_SBGGR12, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SGBRG12, 16, 12, ICI_FORMAT_SGBRG12, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SGRBG12, 16, 12, ICI_FORMAT_SGRBG12, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SRGGB12, 16, 12, ICI_FORMAT_SRGGB12, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SBGGR10, 16, 10, ICI_FORMAT_SBGGR10, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SGBRG10, 16, 10, ICI_FORMAT_SGBRG10, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SGRBG10, 16, 10, ICI_FORMAT_SGRBG10, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SRGGB10, 16, 10, ICI_FORMAT_SRGGB10, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SBGGR8, 16, 8, ICI_FORMAT_SBGGR8, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SGBRG8, 16, 8, ICI_FORMAT_SGBRG8, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SGRBG8, 16, 8, ICI_FORMAT_SGRBG8, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SRGGB8, 16, 8, ICI_FORMAT_SRGGB8, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + /*{ V4L2_FMT_INTEL_IPU4_ISYS_META, 8, 8, MEDIA_BUS_FMT_FIXED, IA_CSS_ISYS_MIPI_DATA_TYPE_EMBEDDED },*/ + { } +}; + +const struct ici_isys_pixelformat ici_isys_pfmts_be_soc[] = { + { ICI_FORMAT_UYVY, 16, 16, ICI_FORMAT_UYVY, IA_CSS_ISYS_FRAME_FORMAT_UYVY }, + { ICI_FORMAT_YUYV, 16, 16, ICI_FORMAT_YUYV, IA_CSS_ISYS_FRAME_FORMAT_YUYV }, + { ICI_FORMAT_RGB565, 16, 16, ICI_FORMAT_RGB565, IA_CSS_ISYS_FRAME_FORMAT_RGB565 }, + { ICI_FORMAT_RGB888, 32, 32, ICI_FORMAT_RGB888, IA_CSS_ISYS_FRAME_FORMAT_RGBA888 }, + /* Raw bayer formats. */ + { ICI_FORMAT_SBGGR12, 16, 12, ICI_FORMAT_SBGGR12, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SGBRG12, 16, 12, ICI_FORMAT_SGBRG12, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SGRBG12, 16, 12, ICI_FORMAT_SGRBG12, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SRGGB12, 16, 12, ICI_FORMAT_SRGGB12, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SBGGR10, 16, 10, ICI_FORMAT_SBGGR10, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SGBRG10, 16, 10, ICI_FORMAT_SGBRG10, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SGRBG10, 16, 10, ICI_FORMAT_SGRBG10, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SRGGB10, 16, 10, ICI_FORMAT_SRGGB10, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SBGGR8, 8, 8, ICI_FORMAT_SBGGR8, IA_CSS_ISYS_FRAME_FORMAT_RAW8 }, + { ICI_FORMAT_SGBRG8, 8, 8, ICI_FORMAT_SGBRG8, IA_CSS_ISYS_FRAME_FORMAT_RAW8 }, + { ICI_FORMAT_SGRBG8, 8, 8, ICI_FORMAT_SGRBG8, IA_CSS_ISYS_FRAME_FORMAT_RAW8 }, + { ICI_FORMAT_SRGGB8, 8, 8, ICI_FORMAT_SRGGB8, IA_CSS_ISYS_FRAME_FORMAT_RAW8 }, + { } +}; + +const struct ici_isys_pixelformat ici_isys_pfmts_packed[] = { + { ICI_FORMAT_UYVY, 16, 16, ICI_FORMAT_UYVY, IA_CSS_ISYS_FRAME_FORMAT_UYVY }, + { ICI_FORMAT_YUYV, 16, 16, ICI_FORMAT_YUYV, IA_CSS_ISYS_FRAME_FORMAT_YUYV }, + { ICI_FORMAT_RGB565, 16, 16, ICI_FORMAT_RGB565, IA_CSS_ISYS_FRAME_FORMAT_RGB565 }, + { ICI_FORMAT_RGB888, 24, 24, ICI_FORMAT_RGB888, IA_CSS_ISYS_FRAME_FORMAT_RGBA888 }, + { ICI_FORMAT_SBGGR12, 12, 12, ICI_FORMAT_SBGGR12, IA_CSS_ISYS_FRAME_FORMAT_RAW12 }, + { ICI_FORMAT_SGBRG12, 12, 12, ICI_FORMAT_SGBRG12, IA_CSS_ISYS_FRAME_FORMAT_RAW12 }, + { ICI_FORMAT_SGRBG12, 12, 12, ICI_FORMAT_SGRBG12, IA_CSS_ISYS_FRAME_FORMAT_RAW12 }, + { ICI_FORMAT_SRGGB12, 12, 12, ICI_FORMAT_SRGGB12, IA_CSS_ISYS_FRAME_FORMAT_RAW12 }, + { ICI_FORMAT_SBGGR10, 10, 10, ICI_FORMAT_SBGGR10, IA_CSS_ISYS_FRAME_FORMAT_RAW10 }, + { ICI_FORMAT_SGBRG10, 10, 10, ICI_FORMAT_SGBRG10, IA_CSS_ISYS_FRAME_FORMAT_RAW10 }, + { ICI_FORMAT_SGRBG10, 10, 10, ICI_FORMAT_SGRBG10, IA_CSS_ISYS_FRAME_FORMAT_RAW10 }, + { ICI_FORMAT_SRGGB10, 10, 10, ICI_FORMAT_SRGGB10, IA_CSS_ISYS_FRAME_FORMAT_RAW10 }, + { ICI_FORMAT_SBGGR8, 8, 8, ICI_FORMAT_SBGGR8, IA_CSS_ISYS_FRAME_FORMAT_RAW8 }, + { ICI_FORMAT_SGBRG8, 8, 8, ICI_FORMAT_SGBRG8, IA_CSS_ISYS_FRAME_FORMAT_RAW8 }, + { ICI_FORMAT_SGRBG8, 8, 8, ICI_FORMAT_SGRBG8, IA_CSS_ISYS_FRAME_FORMAT_RAW8 }, + { ICI_FORMAT_SRGGB8, 8, 8, ICI_FORMAT_SRGGB8, IA_CSS_ISYS_FRAME_FORMAT_RAW8 }, + { } +}; + +struct pipeline_format_data { + struct ici_isys_stream *as; + struct ici_pad_framefmt pff; +}; + +struct pipeline_power_data { + struct ici_isys_stream *as; + int power; +}; + +static int pipeline_set_node_power(void* cb_data, + struct ici_isys_node* node, + struct node_pipe* pipe) +{ + struct pipeline_power_data* pwr_data = cb_data; + struct ici_isys_stream *as = pwr_data->as; + dev_dbg(&as->isys->adev->dev, + "Set ext sd \"%s\" power to %d\n", + node->name, pwr_data->power); + if (node->node_set_power) { + int ret = node->node_set_power(node, pwr_data->power); + if (ret < 0) + return ret; + } + return 0; +} + +static int pipeline_set_power(struct ici_isys_stream *as, + int state) +{ + struct pipeline_power_data pwr_data = { + .as = as, + .power = state, + }; + + return ici_isys_pipeline_for_each_node( + pipeline_set_node_power, + &pwr_data, + &as->node, + &as->ip, + true); +} + +static int intel_ipu4_isys_library_close(struct ici_isys *isys) +{ + struct device *dev = &isys->adev->dev; + int timeout = IPU_ISYS_TURNOFF_TIMEOUT; + int rval; + unsigned long flags; + /* + * Ask library to stop the isys fw. Actual close takes + * some time as the FW must stop its actions including code fetch + * to SP icache. + */ + mutex_lock(&(isys)->lib_mutex); + spin_lock_irqsave(&isys->power_lock, flags); + rval = ipu_lib_call_notrace_unlocked(device_close, isys); + spin_unlock_irqrestore(&isys->power_lock, flags); + mutex_unlock(&(isys)->lib_mutex); + if (rval) + dev_err(dev, "Device close failure: %d\n", rval); + + /* release probably fails if the close failed. Let's try still */ + do { + usleep_range(IPU_ISYS_TURNOFF_DELAY_US, + 2 * IPU_ISYS_TURNOFF_DELAY_US); + rval = ipu_lib_call_notrace(device_release, isys, 0); + timeout--; + } while (rval != 0 && timeout); + + spin_lock_irqsave(&isys->power_lock, flags); + if (!rval) + isys->fwcom = NULL; /* No further actions needed */ + else + dev_err(dev, "Device release time out %d\n", rval); + spin_unlock_irqrestore(&isys->power_lock, flags); + return rval; +} + +static unsigned int get_comp_format(u32 code) +{ + unsigned int predictor = 0; /* currently hard coded */ + unsigned int udt = ici_isys_format_code_to_mipi(code); + unsigned int scheme = ici_isys_get_compression_scheme(code); + + /* if data type is not user defined return here */ + if ((udt < ICI_ISYS_MIPI_CSI2_TYPE_USER_DEF(1)) + || (udt > ICI_ISYS_MIPI_CSI2_TYPE_USER_DEF(8))) + return 0; + + /* + * For each user defined type (1..8) there is configuration bitfield for + * decompression. + * + * | bit 3 | bits 2:0 | + * | predictor | scheme | + * compression schemes: + * 000 = no compression + * 001 = 10 - 6 - 10 + * 010 = 10 - 7 - 10 + * 011 = 10 - 8 - 10 + * 100 = 12 - 6 - 12 + * 101 = 12 - 7 - 12 + * 110 = 12 - 8 - 12 + */ + + return ((predictor << 3) | scheme) << + ((udt - ICI_ISYS_MIPI_CSI2_TYPE_USER_DEF(1)) * 4); +} + +static void csi_short_packet_prepare_firmware_stream_cfg_ici( + struct ici_isys_pipeline *ip, + struct ia_css_isys_stream_cfg_data *cfg) +{ + struct ici_isys_stream *as = + ici_pipeline_to_stream(ip); + struct ici_isys_frame_buf_list *buf_list = + &as->buf_list; + int input_pin = cfg->nof_input_pins++; + int output_pin = cfg->nof_output_pins++; + struct ia_css_isys_input_pin_info *input_info = + &cfg->input_pins[input_pin]; + struct ia_css_isys_output_pin_info *output_info = + &cfg->output_pins[output_pin]; + + /* + * Setting dt as ICI_ISYS_SHORT_PACKET_GENERAL_DT will cause + * MIPI receiver to receive all MIPI short packets. + */ + input_info->dt = ICI_ISYS_SHORT_PACKET_GENERAL_DT; + input_info->input_res.width = ICI_ISYS_SHORT_PACKET_WIDTH; + input_info->input_res.height = buf_list->num_short_packet_lines; + + ip->output_pins[output_pin].pin_ready = + ici_isys_frame_short_packet_ready; + ip->output_pins[output_pin].buf_list = buf_list; + buf_list->short_packet_output_pin = output_pin; + + output_info->input_pin_id = input_pin; + output_info->output_res.width = ICI_ISYS_SHORT_PACKET_WIDTH; + output_info->output_res.height = buf_list->num_short_packet_lines; + output_info->stride = ICI_ISYS_SHORT_PACKET_WIDTH * + ICI_ISYS_SHORT_PACKET_UNITSIZE; + output_info->pt = ICI_ISYS_SHORT_PACKET_PT; + output_info->ft = ICI_ISYS_SHORT_PACKET_FT; + output_info->send_irq = 1; +} + +static int csi_short_packet_configure_tunit( + struct ici_isys_pipeline *ip, + bool enable) +{ + struct ici_isys *isys = ip->isys; + void __iomem *isys_base = isys->pdata->base; + void __iomem *tunit_base = isys_base + TRACE_REG_IS_TRACE_UNIT_BASE; + void __iomem *csi2_tm_base; + void __iomem *event_mask_reg; + unsigned int trace_addr; + int rval; + int i; + + if (ip->csi2->index >= IPU_ISYS_MAX_CSI2_LEGACY_PORTS) { + csi2_tm_base = isys->pdata->base + TRACE_REG_CSI2_3PH_TM_BASE; + trace_addr = TRACE_REG_CSI2_3PH_TM_TRACE_ADDRESS_VAL; + event_mask_reg = csi2_tm_base + + TRACE_REG_CSI2_3PH_TM_TRACE_DDR_EN_REG_IDX_P( + ip->csi2->index); + } else { + csi2_tm_base = isys->pdata->base + TRACE_REG_CSI2_TM_BASE; + trace_addr = TRACE_REG_CSI2_TM_TRACE_ADDRESS_VAL; + event_mask_reg = csi2_tm_base + + TRACE_REG_CSI2_TM_TRACE_DDR_EN_REG_IDX_P( + ip->csi2->index); + } + + if (!enable) { + writel(0, event_mask_reg); + writel(0, csi2_tm_base + + TRACE_REG_CSI2_TM_OVERALL_ENABLE_REG_IDX); + writel(0, tunit_base + TRACE_REG_TUN_DDR_ENABLE); + return 0; + } + + /* ring buffer base */ + writel(isys->short_packet_trace_buffer_dma_addr, + tunit_base + TRACE_REG_TUN_DRAM_BASE_ADDR); + + /* ring buffer end */ + writel(isys->short_packet_trace_buffer_dma_addr + + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE - + IPU_ISYS_SHORT_PACKET_TRACE_MSG_SIZE, + tunit_base + TRACE_REG_TUN_DRAM_END_ADDR); + + /* Infobits for ddr trace */ + writel(IPU_INFO_REQUEST_DESTINATION_PRIMARY, + tunit_base + TRACE_REG_TUN_DDR_INFO_VAL); + + /* Remove reset from trace timers */ + writel(TRACE_REG_GPREG_TRACE_TIMER_RST_OFF, + isys_base + TRACE_REG_IS_GPREG_TRACE_TIMER_RST_N); + + /* Reset CSI2 monitor */ + writel(1, csi2_tm_base + TRACE_REG_CSI2_TM_RESET_REG_IDX); + + /* Set trace address register. */ + writel(trace_addr, csi2_tm_base + + TRACE_REG_CSI2_TM_TRACE_ADDRESS_REG_IDX); + writel(TRACE_REG_CSI2_TM_TRACE_HEADER_VAL, csi2_tm_base + + TRACE_REG_CSI2_TM_TRACE_HEADER_REG_IDX); + + /* Enable DDR trace. */ + writel(1, tunit_base + TRACE_REG_TUN_DDR_ENABLE); + + /* Enable trace for CSI2 port. */ +#if 0 + reg_val = readl(event_mask_reg); + reg_val |= TRACE_REG_CSI2_TM_EVENT_FS(ip->vc); + writel(reg_val, event_mask_reg); +#else + for (i = 0; i < IPU_ISYS_MAX_CSI2_LEGACY_PORTS + + IPU_ISYS_MAX_CSI2_COMBO_PORTS; i++) { + void __iomem *event_mask_reg = + (i < IPU_ISYS_MAX_CSI2_LEGACY_PORTS) ? + isys->pdata->base + TRACE_REG_CSI2_TM_BASE + + TRACE_REG_CSI2_TM_TRACE_DDR_EN_REG_IDX_P(i) : + isys->pdata->base + TRACE_REG_CSI2_3PH_TM_BASE + + TRACE_REG_CSI2_3PH_TM_TRACE_DDR_EN_REG_IDX_P(i); + + writel(IPU_ISYS_SHORT_PACKET_TRACE_EVENT_MASK, + event_mask_reg); + } +#endif + /* Enable CSI2 receiver monitor */ + writel(1, csi2_tm_base + TRACE_REG_CSI2_TM_OVERALL_ENABLE_REG_IDX); + + rval = ipu_buttress_tsc_read(isys->adev->isp, + &isys->tsc_timer_base); + if (rval) { + dev_err(&isys->adev->dev, + "Failed to read TSC timer.\n"); + return rval; + } + rval = ipu_trace_get_timer(&isys->adev->dev, + &isys->tunit_timer_base); + if (rval) { + dev_err(&isys->adev->dev, + "Failed to read Tunit timer.\n"); + return rval; + } + + return 0; +} + +static void get_stream_opened(struct ici_isys_stream *as) +{ + unsigned long flags; + + spin_lock_irqsave(&as->isys->lock, flags); + as->isys->stream_opened++; + spin_unlock_irqrestore(&as->isys->lock, flags); +} + +static void put_stream_opened(struct ici_isys_stream *as) +{ + unsigned long flags; + + spin_lock_irqsave(&as->isys->lock, flags); + as->isys->stream_opened--; + spin_unlock_irqrestore(&as->isys->lock, flags); +} + +static int get_stream_handle(struct ici_isys_stream *as) +{ + struct ici_isys_pipeline *ip = &as->ip; + unsigned int stream_handle; + unsigned long flags; + + spin_lock_irqsave(&as->isys->lock, flags); + for (stream_handle = 0; + stream_handle < INTEL_IPU4_ISYS_MAX_STREAMS; stream_handle++) + if (as->isys->ici_pipes[stream_handle] == NULL) + break; + if (stream_handle == INTEL_IPU4_ISYS_MAX_STREAMS) { + spin_unlock_irqrestore(&as->isys->lock, flags); + return -EBUSY; + } + as->isys->ici_pipes[stream_handle] = ip; + ip->stream_handle = stream_handle; + spin_unlock_irqrestore(&as->isys->lock, flags); + return 0; +} + +static void put_stream_handle(struct ici_isys_stream *as) +{ + struct ici_isys_pipeline *ip = &as->ip; + unsigned long flags; + + spin_lock_irqsave(&as->isys->lock, flags); + as->isys->ici_pipes[ip->stream_handle] = NULL; + ip->stream_handle = -1; + spin_unlock_irqrestore(&as->isys->lock, flags); +} + +/* Create stream and start it using the CSS library API. */ +static int start_stream_firmware(struct ici_isys_stream *as) +{ + struct ici_isys_pipeline *ip = &as->ip; + struct device *dev = &as->isys->adev->dev; + struct ia_css_isys_stream_cfg_data stream_cfg = { + .src = ip->source, + .vc = 0, + .isl_use = ICI_ISL_OFF, + .nof_input_pins = 1, + }; + struct ia_css_isys_frame_buff_set css_buf; + struct ici_pad_framefmt source_fmt = { + .pad.pad_idx = ip->asd_source_pad_id, + .ffmt = {0}, + + }; + struct ici_isys_node* be_csi2_node = NULL; + + int rval, rvalout, tout, i; + + rval = ip->asd_source->node.node_get_pad_ffmt( + &ip->asd_source->node, &source_fmt); + if (rval) + return rval; + stream_cfg.compfmt = get_comp_format(source_fmt.ffmt.pixelformat); + stream_cfg.input_pins[0].input_res.width = source_fmt.ffmt.width; + stream_cfg.input_pins[0].input_res.height = source_fmt.ffmt.height; + stream_cfg.input_pins[0].dt = + ici_isys_format_code_to_mipi(source_fmt.ffmt.pixelformat); + + /* + * Only CSI2-BE has the capability to do crop, + * so get the crop info from csi2-be. + */ + stream_cfg.crop[0].right_offset = source_fmt.ffmt.width; + stream_cfg.crop[0].bottom_offset = source_fmt.ffmt.height; + if (ip->csi2_be) { + struct ici_pad_selection ps = { + .pad.pad_idx = CSI2_BE_ICI_PAD_SOURCE, + .rect = {0}, + }; + be_csi2_node = &ip->csi2_be->asd.node; + if (be_csi2_node->node_get_pad_sel) + rval = be_csi2_node->node_get_pad_sel( + be_csi2_node, &ps); + else + rval = -ENODEV; + if (!rval) { + stream_cfg.crop[0].left_offset = ps.rect.left; + stream_cfg.crop[0].top_offset = ps.rect.top; + stream_cfg.crop[0].right_offset = ps.rect.left + + ps.rect.width; + stream_cfg.crop[0].bottom_offset = ps.rect.top + + ps.rect.height; + } + } + + as->prepare_firmware_stream_cfg(as, &stream_cfg); + + if (ip->interlaced) { + if (ip->short_packet_source == + IPU_ISYS_SHORT_PACKET_FROM_RECEIVER) + csi_short_packet_prepare_firmware_stream_cfg_ici(ip, + &stream_cfg); + else + csi_short_packet_configure_tunit(ip, 1); + } + +// csslib_dump_isys_stream_cfg(dev, &stream_cfg); //TODO implement corresponding function to dump command input to FW + + ip->nr_output_pins = stream_cfg.nof_output_pins; + + rval = get_stream_handle(as); + if (rval) { + dev_dbg(dev, "Can't get stream_handle\n"); + return rval; + } + + reinit_completion(&ip->stream_open_completion); +/* SKTODO: Debug start */ + dev_dbg(dev, "SKTODO: My stream open\n"); + dev_dbg(dev, "ia_css_isys_stream_source src = %d\n", stream_cfg.src); + dev_dbg(dev, "ia_css_isys_mipi_vc vc = %d\n", stream_cfg.vc); + dev_dbg(dev, "ia_css_isys_isl_use isl_use = %d\n", stream_cfg.isl_use); + dev_dbg(dev, "compfmt = %u\n", stream_cfg.compfmt); + dev_dbg(dev, "struct ia_css_isys_isa_cfg isa_cfg"); + for ( i = 0 ; i < N_IA_CSS_ISYS_CROPPING_LOCATION ; i++ ) { + dev_dbg(dev, "crop[%d].top_offset = %d\n", i, stream_cfg.crop[i].top_offset); + dev_dbg(dev, "crop[%d].left_offset = %d\n", i, stream_cfg.crop[i].left_offset); + dev_dbg(dev, "crop[%d].bottom_offset = %d\n", i, stream_cfg.crop[i].bottom_offset); + dev_dbg(dev, "crop[%d].right_offset = %d\n", i, stream_cfg.crop[i].right_offset); + } + dev_dbg(dev, "send_irq_sof_discarded = %u\n", stream_cfg.send_irq_sof_discarded); + dev_dbg(dev, "send_irq_eof_discarded = %u\n", stream_cfg.send_irq_eof_discarded); + dev_dbg(dev, "send_resp_sof_discarded = %u\n", stream_cfg.send_resp_sof_discarded); + dev_dbg(dev, "send_resp_eof_discarded = %u\n", stream_cfg.send_resp_eof_discarded); + dev_dbg(dev, "nof_input_pins = %u\n", stream_cfg.nof_input_pins); + dev_dbg(dev, "nof_output_pins = %u\n", stream_cfg.nof_output_pins); + for (i = 0 ; i < stream_cfg.nof_input_pins ; i++) { + dev_dbg(dev, "input_pins[%d].input_res.width = %u\n", i, stream_cfg.input_pins[i].input_res.width); + dev_dbg(dev, "input_pins[%d].input_res.height = %u\n", i, stream_cfg.input_pins[i].input_res.height); + dev_dbg(dev, "input_pins[%d].dt = %d\n", i, stream_cfg.input_pins[i].dt); + dev_dbg(dev, "input_pins[%d].mipi_store_mode = %d\n", i, stream_cfg.input_pins[i].mipi_store_mode); + } + for (i = 0 ; i < stream_cfg.nof_output_pins ; i++) { + dev_dbg(dev, "output_pins[%d].input_pin_id = %u\n", i, stream_cfg.output_pins[i].input_pin_id); + dev_dbg(dev, "output_pins[%d].output_res.width = %u\n", i, stream_cfg.output_pins[i].output_res.width); + dev_dbg(dev, "output_pins[%d].output_res.height = %u\n", i, stream_cfg.output_pins[i].output_res.height); + dev_dbg(dev, "output_pins[%d].stride = %u\n", i, stream_cfg.output_pins[i].stride); + dev_dbg(dev, "output_pins[%d].pt = %d\n", i, stream_cfg.output_pins[i].pt); + dev_dbg(dev, "output_pins[%d].ft = %d\n", i, stream_cfg.output_pins[i].ft); + dev_dbg(dev, "output_pins[%d].watermark_in_lines = %u\n", i, stream_cfg.output_pins[i].watermark_in_lines); + dev_dbg(dev, "output_pins[%d].send_irq = %u\n", i, stream_cfg.output_pins[i].send_irq); + } + +/* SKTODO: Debug end */ + rval = ipu_lib_call(stream_open, as->isys, ip->stream_handle, &stream_cfg); + if (rval < 0) { + dev_err(dev, "can't open stream (%d)\n", rval); + goto out_put_stream_handle; + } + get_stream_opened(as); + + tout = wait_for_completion_timeout(&ip->stream_open_completion, + IPU_LIB_CALL_TIMEOUT_JIFFIES); + if (!tout) { + dev_err(dev, "stream open time out\n"); + rval = -ETIMEDOUT; + goto out_put_stream_opened; + } + if (ip->error) { + dev_err(dev, "stream open error: %d\n", ip->error); + rval = -EIO; + goto out_put_stream_opened; + } + dev_dbg(dev, "start stream: open complete\n"); + + rval = ici_isys_frame_buf_add_next(as, &css_buf); + if (rval) { + dev_err(dev, "no buffers for streaming (%d)\n", rval); + goto out_stream_close; + } +//TODO implement corresponding function to dump command input to FW +// csslib_dump_isys_frame_buff_set(dev, &css_buf, +// stream_cfg.nof_output_pins); + + reinit_completion(&ip->stream_start_completion); + rval = ipu_lib_call(stream_start, as->isys, ip->stream_handle, + &css_buf); + if (rval < 0) { + dev_err(dev, "can't start streaming (%d)\n", rval); + goto out_stream_close; + } + + tout = wait_for_completion_timeout(&ip->stream_start_completion, + IPU_LIB_CALL_TIMEOUT_JIFFIES); + if (!tout) { + dev_err(dev, "stream start time out\n"); + rval = -ETIMEDOUT; + goto out_stream_close; + } + if (ip->error) { + dev_err(dev, "stream start error: %d\n", ip->error); + rval = -EIO; + goto out_stream_close; + } + dev_dbg(dev, "start stream: complete\n"); + + return 0; + +out_stream_close: + reinit_completion(&ip->stream_close_completion); + + rvalout = ipu_lib_call(stream_close, as->isys, ip->stream_handle); + if (rvalout < 0) { + dev_dbg(dev, "can't close stream (%d)\n", rvalout); + } else { + tout = wait_for_completion_timeout(&ip->stream_close_completion, + IPU_LIB_CALL_TIMEOUT_JIFFIES); + if (!tout) + dev_err(dev, "stream close time out\n"); + else if (ip->error) + dev_err(dev, "stream close error: %d\n", ip->error); + else + dev_dbg(dev, "stream close complete\n"); + } + +out_put_stream_opened: + put_stream_opened(as); + +out_put_stream_handle: + put_stream_handle(as); + return rval; +} + +static void stop_streaming_firmware(struct ici_isys_stream *as) +{ + struct ici_isys_pipeline *ip = &as->ip; + struct device *dev = &as->isys->adev->dev; + int rval, tout; + + reinit_completion(&ip->stream_stop_completion); + rval = ipu_lib_call(stream_flush, as->isys, ip->stream_handle); + if (rval < 0) { + dev_err(dev, "can't stop stream (%d)\n", rval); + } else { + tout = wait_for_completion_timeout(&ip->stream_stop_completion, + IPU_LIB_CALL_TIMEOUT_JIFFIES); + if (!tout) + dev_err(dev, "stream stop time out\n"); + else if (ip->error) + dev_err(dev, "stream stop error: %d\n", ip->error); + else + dev_dbg(dev, "stop stream: complete\n"); + } + if (ip->interlaced && ip->short_packet_source == + IPU_ISYS_SHORT_PACKET_FROM_TUNIT) + csi_short_packet_configure_tunit(ip, 0); +} + +static void close_streaming_firmware(struct ici_isys_stream *as) +{ + struct ici_isys_pipeline *ip = &as->ip; + struct device *dev = &as->isys->adev->dev; + int rval, tout; + + reinit_completion(&ip->stream_close_completion); + rval = ipu_lib_call(stream_close, as->isys, ip->stream_handle); + if (rval < 0) { + dev_err(dev, "can't close stream (%d)\n", rval); + } else { + tout = wait_for_completion_timeout(&ip->stream_close_completion, + IPU_LIB_CALL_TIMEOUT_JIFFIES); + if (!tout) + dev_err(dev, "stream close time out\n"); + else if (ip->error) + dev_err(dev, "stream close error: %d\n", ip->error); + else + dev_dbg(dev, "close stream: complete\n"); + } + put_stream_opened(as); + put_stream_handle(as); +} + +void ici_isys_stream_add_capture_done( + struct ici_isys_pipeline* ip, + void (*capture_done)(struct ici_isys_pipeline* ip, + struct ia_css_isys_resp_info* resp)) +{ + unsigned int i; + + /* Different instances may register same function. Add only once */ + for (i = 0; i < ICI_NUM_CAPTURE_DONE ; i++) + if (ip->capture_done[i] == capture_done) + return; + + for (i = 0; i < ICI_NUM_CAPTURE_DONE ; i++) { + if (ip->capture_done[i] == NULL) { + ip->capture_done[i] = capture_done; + return; + } + } + /* + * Too many call backs registered. Change to INTEL_IPU4_NUM_CAPTURE_DONE + * constant probably required. + */ + BUG(); +} + +void ici_isys_prepare_firmware_stream_cfg_default( + struct ici_isys_stream *as, + struct ia_css_isys_stream_cfg_data *cfg) +{ + struct ici_isys_pipeline *ip = &as->ip; + + struct ici_isys_frame_buf_list *bl = &as->buf_list; + + struct ia_css_isys_output_pin_info *pin_info; + int pin = cfg->nof_output_pins++; + + bl->fw_output = pin; + ip->output_pins[pin].pin_ready = ici_isys_frame_buf_ready; + ip->output_pins[pin].buf_list = bl; + + pin_info = &cfg->output_pins[pin]; + pin_info->input_pin_id = 0; + pin_info->output_res.width = as->strm_format.ffmt.width; + pin_info->output_res.height = as->strm_format.ffmt.height; + pin_info->stride = as->strm_format.pfmt.plane_fmt[0].bytesperline; + pin_info->pt = bl->css_pin_type; + pin_info->ft = as->pfmt->css_pixelformat; + pin_info->send_irq = 1; + cfg->vc = ip->vc; +} + +static int pipeline_validate_node(void* cb_data, + struct ici_isys_node* src_node, + struct node_pipe* pipe) +{ + int rval; + struct ici_isys_pipeline *ip = cb_data; + + dev_dbg(&ip->pipeline_dev->dev, "Validating node %s\n", + src_node->name); + if (src_node->node_pipeline_validate) { + rval = src_node->node_pipeline_validate(&ip->pipe, + src_node); + if (rval) + return rval; + } + if (pipe) { + struct ici_isys_node* sink_node = + pipe->sink_pad->node; + struct ici_pad_framefmt src_format = { + .pad.pad_idx = pipe->src_pad->pad_id, + }; + struct ici_pad_framefmt sink_format = { + .pad.pad_idx = pipe->sink_pad->pad_id, + }; + if (src_node->node_get_pad_ffmt) { + rval = src_node->node_get_pad_ffmt(src_node, + &src_format); + if (rval) + return rval; + } + if (sink_node->node_get_pad_ffmt) { + rval = sink_node->node_get_pad_ffmt(sink_node, + &sink_format); + if (rval) + return rval; + } + if (src_format.ffmt.width != sink_format.ffmt.width || + src_format.ffmt.height != sink_format.ffmt.height || + src_format.ffmt.pixelformat != sink_format.ffmt.pixelformat || + src_format.ffmt.field != sink_format.ffmt.field || + src_format.ffmt.colorspace != sink_format.ffmt.colorspace) { + dev_err(&ip->pipeline_dev->dev, "Formats don't match node (%d:%d) -> node (%d:%d)\n", + src_node->node_id, src_format.pad.pad_idx, + sink_node->node_id, sink_format.pad.pad_idx); + return -EINVAL; + } + } + return 0; +} + +static int pipeline_validate( + struct ici_isys_node *node, + struct ici_isys_pipeline *ip) +{ + return ici_isys_pipeline_for_each_node( + pipeline_validate_node, + ip, + node, + ip, + true); +} + +struct set_streaming_data { + struct ici_isys_pipeline *ip; + bool external; + int state; +}; + +static int set_streaming_node(void* cb_data, + struct ici_isys_node* node, + struct node_pipe* pipe) +{ + struct set_streaming_data* data = cb_data; + if (data->external != node->external) + return 0; + + if (node->node_set_streaming) + return node->node_set_streaming(node, data->ip, + data->state); + return 0; +} + + +static int set_streaming(struct ici_isys_node *node, + struct ici_isys_pipeline *ip, + bool external, + int state) +{ + struct set_streaming_data data = { + .ip = ip, + .external = external, + .state = state + }; + return ici_isys_pipeline_for_each_node( + set_streaming_node, + &data, + node, + ip, + true); +} + +static int ici_isys_set_streaming( + struct ici_isys_stream *as, + unsigned int state) +{ + struct ici_isys_pipeline *ip = &as->ip; + int rval = 0; + + dev_dbg(&as->isys->adev->dev, "set stream (intel_stream%d): %d\n", state, + as->strm_dev.minor); + + if (!state) { + stop_streaming_firmware(as); + + /* stop external sub-device now. */ + if (ip->csi2) { + ici_isys_csi2_wait_last_eof(ip->csi2); + } + + set_streaming(&as->node, ip, true, 0); + } + + rval = set_streaming(&as->node, ip, false, state); + if (rval) + goto out_stop_streaming; + + if (state) { + rval = start_stream_firmware(as); + if (rval) { + goto out_stop_streaming; + } + dev_dbg(&ip->isys->adev->dev, "set stream: source %d, stream_handle %d\n", + ip->source, ip->stream_handle); + + /* Start external sub-device now. */ + rval = set_streaming(&as->node, ip, true, state); + if (rval) + goto out_stop_streaming_firmware; + + } else { + close_streaming_firmware(as); + } + + ip->streaming = state; + return 0; + +out_stop_streaming_firmware: + stop_streaming_firmware(as); + +out_stop_streaming: + set_streaming(&as->node, ip, false, 0); + return rval; +} + +static void stream_buffers(struct ici_isys_stream *as) +{ + struct ici_isys_pipeline *ip = &as->ip; + struct ia_css_isys_frame_buff_set set = {}; + int rval; + + for (;;) { + rval = ici_isys_frame_buf_add_next(as, &set); + if (rval) { + break; + } +//TODO implement corresponding function to dump command input to FW +// csslib_dump_isys_frame_buff_set(&as->isys->adev->dev, &set, +// ip->nr_output_pins); + WARN_ON(ipu_lib_call( + stream_capture_indication, as->isys, + ip->stream_handle, &set) < 0); + } +} + +static int ici_isys_stream_on(struct file *file, void *fh) +{ + struct ici_isys_stream *as = + dev_to_stream(file->private_data); + struct ici_isys_pipeline *ip = &as->ip; + int rval, i; + + dev_dbg(&as->isys->adev->dev, + "stream_on: %u\n", as->strm_dev.minor); + + if (ip->streaming) { + dev_dbg(&as->isys->adev->dev, + "Already streaming\n"); + return 0; + } + + ip->csi2 = NULL; + ip->csi2_be = NULL; + ip->asd_source = NULL; + ip->asd_source_pad_id = 0; + rval = pipeline_validate(&as->node, ip); + if (rval) + return rval; + + if (!ip->asd_source) { + dev_err(&ip->isys->adev->dev, "set stream: Pipeline does not have a source\n"); + return -ENODEV; + } + + pipeline_set_power(as, 1); + + mutex_lock(&as->isys->stream_mutex); + ip->source = ip->asd_source->source; + + for (i = 0; i < ICI_NUM_CAPTURE_DONE; i++) + ip->capture_done[i] = NULL; + + if (ip->interlaced) { + pr_err("** SKTODO: INTERLACE ENABLED **\n"); + if (ip->short_packet_source == + IPU_ISYS_SHORT_PACKET_FROM_RECEIVER) { + rval = ici_isys_frame_buf_short_packet_setup( + as, &as->strm_format); + if (rval) + goto out_requeue; + } else { + memset(ip->isys->short_packet_trace_buffer, 0, + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE); + dma_sync_single_for_device(&as->isys->adev->dev, + as->isys->short_packet_trace_buffer_dma_addr, + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE, + DMA_BIDIRECTIONAL); + ip->short_packet_trace_index = 0; + } + } + + rval = ici_isys_set_streaming(as, 1); + if (rval) + goto out_cleanup_short_packet; + + ip->streaming = 1; + + dev_dbg(&as->isys->adev->dev, "dispatching queued requests\n"); + stream_buffers(as); + dev_dbg(&as->isys->adev->dev, + "done dispatching queued requests\n"); + + mutex_unlock(&as->isys->stream_mutex); + + return 0; + +out_cleanup_short_packet: + ici_isys_frame_buf_short_packet_destroy(as); + +out_requeue: + mutex_unlock(&as->isys->stream_mutex); + ici_isys_frame_buf_stream_cancel(as); + pipeline_set_power(as, 0); + return rval; +} + +static int ici_isys_stream_off(struct file *file, void *fh) +{ + struct ici_isys_stream *as = + dev_to_stream(file->private_data); + struct ici_isys_pipeline *ip = &as->ip; + + mutex_lock(&as->isys->stream_mutex); + if (ip->streaming) + ici_isys_set_streaming(as, 0); + + ici_isys_frame_buf_short_packet_destroy(as); + mutex_unlock(&as->isys->stream_mutex); + + ici_isys_frame_buf_stream_cancel(as); + + mutex_lock(&as->isys->stream_mutex); + //streaming always should be turned off last. + //This variable prevents other streams from + //starting before we are done with cleanup. + ip->streaming = 0; + mutex_unlock(&as->isys->stream_mutex); + + pipeline_set_power(as, 0); + return 0; +} + +const struct ici_isys_pixelformat +*ici_isys_get_pixelformat( + struct ici_isys_stream *as, unsigned int pixelformat) +{ + const struct ici_isys_pixelformat *pfmt; + unsigned pad; + const unsigned *supported_codes; + + pad = as->pad.pad_id; + supported_codes = as->asd->supported_codes[pad]; + + for (pfmt = as->pfmts; pfmt->bpp; pfmt++) { + unsigned int i; + + if (pfmt->code != pixelformat) + continue; + + for (i = 0; supported_codes[i]; i++) { + if (pfmt->code == supported_codes[i]) + return pfmt; + } + } + + /* Not found. Get the default, i.e. the first defined one. */ + for (pfmt = as->pfmts; pfmt->bpp; pfmt++) { + if (pfmt->code == *supported_codes) + return pfmt; + } + + BUG(); +} + +const struct ici_isys_pixelformat +*ici_isys_video_try_fmt_vid_mplane_default( + struct ici_isys_stream *as, + struct ici_stream_format *mpix) +{ + const struct ici_isys_pixelformat *pfmt = + ici_isys_get_pixelformat(as, mpix->ffmt.pixelformat); + + mpix->ffmt.pixelformat = pfmt->pixelformat; + mpix->pfmt.num_planes = 1; + + if (!as->packed) + mpix->pfmt.plane_fmt[0].bytesperline = + mpix->ffmt.width * DIV_ROUND_UP(pfmt->bpp, + BITS_PER_BYTE); + else + mpix->pfmt.plane_fmt[0].bytesperline = DIV_ROUND_UP( + as->line_header_length + as->line_footer_length + + (unsigned int)mpix->ffmt.width * pfmt->bpp, + BITS_PER_BYTE); + + mpix->pfmt.plane_fmt[0].bytesperline = + ALIGN(mpix->pfmt.plane_fmt[0].bytesperline, + as->isys->line_align); + mpix->pfmt.plane_fmt[0].bpp = pfmt->bpp; + + /* + * (height + 1) * bytesperline due to a hardware issue: the DMA unit + * is a power of two, and a line should be transferred as few units + * as possible. The result is that up to line length more data than + * the image size may be transferred to memory after the image. + * Another limition is the GDA allocation unit size. For low + * resolution it gives a bigger number. Use larger one to avoid + * memory corruption. + */ + mpix->pfmt.plane_fmt[0].sizeimage = + max(max(mpix->pfmt.plane_fmt[0].sizeimage, + mpix->pfmt.plane_fmt[0].bytesperline * + mpix->ffmt.height + + max(mpix->pfmt.plane_fmt[0].bytesperline, + as->isys->pdata->ipdata->isys_dma_overshoot)), + 1U); + + if (mpix->ffmt.field == ICI_FIELD_ANY) + mpix->ffmt.field = ICI_FIELD_NONE; + + return pfmt; +} + +static int ici_s_fmt_vid_cap_mplane( + struct ici_isys_stream *as, + struct ici_stream_format *f) +{ + if (as->ip.streaming) + return -EBUSY; + + as->pfmt = as->try_fmt_vid_mplane(as, f); + as->strm_format = *f; + + return 0; +} + +/** + * Returns true if device does not support real interrupts and + * polling must be used. + */ +static int ici_poll_for_events( + struct ici_isys_stream *as) +{ +// return is_intel_ipu_hw_fpga(); + return 0; +} + +static void ipu_cleanup_fw_msg_bufs(struct ici_isys *isys) +{ + struct isys_fw_msgs *fwmsg, *fwmsg0; + unsigned long flags; + + spin_lock_irqsave(&isys->listlock, flags); + list_for_each_entry_safe(fwmsg, fwmsg0, &isys->framebuflist_fw, head) + list_move(&fwmsg->head, &isys->framebuflist); + spin_unlock_irqrestore(&isys->listlock, flags); +} + +static int stream_fop_open(struct inode *inode, struct file *file) +{ + struct ici_stream_device *strm_dev = + inode_to_intel_ipu_stream_device(inode); + struct ici_isys_stream* as = dev_to_stream(strm_dev); + struct ici_isys *isys = as->isys; + struct ipu_bus_device *adev = + to_ipu_bus_device(&isys->adev->dev); + struct ipu_device *isp = adev->isp; + int rval; + dev_dbg(&isys->adev->dev, "%s: stream open (%p)\n", __func__, as); + + mutex_lock(&isys->mutex); + if (isys->reset_needed) { + mutex_unlock(&isys->mutex); + dev_warn(&isys->adev->dev, "isys power cycle required\n"); + return -EIO; + } + mutex_unlock(&isys->mutex); + + rval = ipu_buttress_authenticate(isp); + if (rval) { + dev_err(&isys->adev->dev, "FW authentication failed\n"); + return rval; + } + + rval = pm_runtime_get_sync(&isys->adev->dev); + if (rval < 0) { + pm_runtime_put_noidle(&isys->adev->dev); + return rval; + } + + mutex_lock(&isys->mutex); + + ipu_configure_spc(adev->isp, + &isys->pdata->ipdata->hw_variant, + IA_CSS_PKG_DIR_ISYS_INDEX, + isys->pdata->base, isys->pkg_dir, + isys->pkg_dir_dma_addr); + + + if (isys->ici_stream_opened++) { + /* Already open */ + mutex_unlock(&isys->mutex); + return 0; + } + + ipu_cleanup_fw_msg_bufs(isys); + + if (isys->fwcom) { + /* + * Something went wrong in previous shutdown. As we are now + * restarting isys we can safely delete old context. + */ + ici_fw_isys_cleanup(isys); + isys->fwcom = NULL; + } + + if (ici_poll_for_events(as)) { + static const struct sched_param param = { + .sched_priority = MAX_USER_RT_PRIO/2, + }; + + isys->isr_thread = kthread_run( + intel_ipu4_isys_isr_run_ici, as->isys, + IPU_ISYS_ENTITY_PREFIX); + + if (IS_ERR(isys->isr_thread)) { + rval = PTR_ERR(isys->isr_thread); + goto out_intel_ipu4_pipeline_pm_use; + } + + sched_setscheduler(isys->isr_thread, SCHED_FIFO, ¶m); + } + + rval = ici_fw_isys_init(as->isys, IPU_ISYS_NUM_STREAMS); + if (rval < 0) + goto out_lib_init; + + mutex_unlock(&isys->mutex); + + strm_dev->virt_dev_id = -1; + + return 0; + +out_lib_init: + if (ici_poll_for_events(as)) + kthread_stop(isys->isr_thread); + +out_intel_ipu4_pipeline_pm_use: + isys->ici_stream_opened--; + mutex_unlock(&isys->mutex); + pm_runtime_put(&isys->adev->dev); + + return rval; +} + +static int stream_fop_release(struct inode *inode, struct file *file) +{ + struct ici_stream_device *strm_dev = + inode_to_intel_ipu_stream_device(inode); + struct ici_isys_stream* as = dev_to_stream(strm_dev); + int ret = 0; + dev_dbg(&as->isys->adev->dev, "%s: stream release (%p)\n", __func__, as); + + if (as->ip.streaming) + ici_isys_stream_off(file, NULL); + else + ici_isys_frame_buf_stream_cancel(as); + + mutex_lock(&as->isys->mutex); + + if (!--as->isys->ici_stream_opened) { + if (ici_poll_for_events(as)) + kthread_stop(as->isys->isr_thread); + + intel_ipu4_isys_library_close(as->isys); + if (as->isys->fwcom) { + as->isys->reset_needed = true; + ret = -EIO; + } + } + + mutex_unlock(&as->isys->mutex); + + pm_runtime_put(&as->isys->adev->dev); + return ret; +} + +static unsigned int stream_fop_poll(struct file *file, + struct poll_table_struct *poll) +{ + struct ici_isys_stream *as = + dev_to_stream(file->private_data); + struct ici_isys *isys = as->isys; + unsigned int res = 0; + + dev_dbg(&isys->adev->dev, "stream_fop_poll\n"); + + poll_wait(file, &as->buf_list.wait, poll); + + if (!list_empty(&as->buf_list.putbuf_list)) + res = POLLIN; + + dev_dbg(&isys->adev->dev, "stream_fop_poll res %u\n", res); + + return res; +} + +static int ici_isys_set_format(struct file *file, void *fh, + struct ici_stream_format *sf) +{ + int rval; + struct ici_isys_stream *as = + dev_to_stream(file->private_data); + struct ici_isys *isys = as->isys; + + dev_dbg(&isys->adev->dev, "%s: ici stream set format (%p)\n \ + width: %u, height: %u, pixelformat: %u, field: %u, colorspace: %u\n", + __func__, as, + sf->ffmt.width, + sf->ffmt.height, + sf->ffmt.pixelformat, + sf->ffmt.field, + sf->ffmt.colorspace); + + if (sf->ffmt.field == ICI_FIELD_ALTERNATE) { + dev_dbg(&isys->adev->dev, "Interlaced enabled\n"); + as->ip.interlaced = true; + as->ip.short_packet_source = 1; + } else { + as->ip.interlaced = false; + } + + rval = ici_s_fmt_vid_cap_mplane(as, sf); + if (rval) { + dev_err(&isys->adev->dev, "failed to set format (vid_cap) %d\n", rval); + return rval; + } + if (sf->pfmt.num_planes != 1) { + dev_err(&isys->adev->dev, "Invalid num of planes %d\n", + sf->pfmt.num_planes); + return rval; + } + if (!sf->pfmt.plane_fmt[0].sizeimage) { + dev_err(&isys->adev->dev, "Zero image size for plane 0\n"); + return rval; + } + + return 0; +} + +static int ici_isys_getbuf(struct file *file, void *fh, + struct ici_frame_info *user_frame_info) +{ + int rval = 0; + struct ici_isys_stream *as = dev_to_stream( + file->private_data); + struct ici_isys *isys = as->isys; + + //DEBUGK("%s: ici stream getbuf (%p)\n", __func__, as); + rval = ici_isys_get_buf(as, user_frame_info); + if(rval) { + dev_err(&isys->adev->dev, "failed to get buffer %d\n", rval); + return rval; + } + + mutex_lock(&as->isys->stream_mutex); + if (as->ip.streaming) { + stream_buffers(as); + } + mutex_unlock(&as->isys->stream_mutex); + return 0; +} + +static int ici_isys_getbuf_virt(struct file *file, void *fh, + struct ici_frame_buf_wrapper *user_frame_buf, struct page **pages) +{ + int rval = 0; + struct ici_isys_stream *as = dev_to_stream( + file->private_data); + struct ici_isys *isys = as->isys; + + rval = ici_isys_get_buf_virt(as, user_frame_buf, pages); + if (rval) { + dev_err(&isys->adev->dev, "failed to get buffer %d\n", rval); + return rval; + } + + mutex_lock(&as->isys->stream_mutex); + if (as->ip.streaming) { + stream_buffers(as); + } + mutex_unlock(&as->isys->stream_mutex); + return 0; +} + +static int ici_isys_putbuf(struct file *file, void *fh, + struct ici_frame_info *user_frame_info) +{ + int rval = 0; + struct ici_isys_stream *as = dev_to_stream(file->private_data); + struct ici_isys *isys = as->isys; + //DEBUGK("%s: ici stream putbuf (%p)\n", __func__, as); + rval = ici_isys_put_buf(as, user_frame_info, + file->f_flags); + if(rval) { + dev_err(&isys->adev->dev, "failed to put buffer %d\n", rval); + return rval; + } + return 0; +} + +static int ici_isys_stream_get_ffmt( + struct ici_isys_node* node, + struct ici_pad_framefmt* pff) +{ + struct ici_isys_stream *as = node->sd; + if (pff->pad.pad_idx != 0) { + dev_err(&as->isys->adev->dev, + "Stream only has pad 0\n"); + return -EINVAL; + } + pff->ffmt = as->strm_format.ffmt; + return 0; +} + +static const struct ici_ioctl_ops ioctl_ops_mplane_ici = { + .ici_set_format = ici_isys_set_format, + .ici_stream_on = ici_isys_stream_on, + .ici_stream_off = ici_isys_stream_off, + .ici_get_buf = ici_isys_getbuf, + .ici_get_buf_virt = ici_isys_getbuf_virt, + .ici_put_buf = ici_isys_putbuf, +}; + +static const struct file_operations ipu4_isys_ici_stream_fops = { + .owner = THIS_MODULE, + .poll = stream_fop_poll, + .open = stream_fop_open, + .release = stream_fop_release, +}; + +int ici_isys_stream_init( + struct ici_isys_stream *as, + struct ici_isys_subdev *asd, + struct ici_isys_node *node, + unsigned int pad, + unsigned long pad_flags) +{ + int rval; + char name[ICI_MAX_NODE_NAME]; + + mutex_init(&as->mutex); + init_completion(&as->ip.stream_open_completion); + init_completion(&as->ip.stream_close_completion); + init_completion(&as->ip.stream_start_completion); + init_completion(&as->ip.stream_stop_completion); + init_completion(&as->ip.capture_ack_completion); + as->ip.isys = as->isys; + as->ip.pipeline_dev = &as->isys->pipeline_dev; + as->asd = asd; + + as->strm_dev.ipu_ioctl_ops = &ioctl_ops_mplane_ici; + + ici_isys_frame_buf_init(&as->buf_list); + + as->pad.flags = pad_flags | ICI_PAD_FLAGS_MUST_CONNECT; + snprintf(name, sizeof(name), + "%s Stream", asd->node.name); + rval = ici_isys_pipeline_node_init(as->isys, + &as->node, name, 1, &as->pad); + if (rval) + goto out_init_fail; + + if (__ici_isys_subdev_get_ffmt(asd, pad)) + as->strm_format.ffmt = + *__ici_isys_subdev_get_ffmt(asd, pad); + + as->node.sd = as; + as->node.pipe = &as->ip.pipe; + as->node.node_get_pad_ffmt = + ici_isys_stream_get_ffmt; + + asd->node.pipe = &as->ip.pipe; + /*asd->node.ops = &entity_ops;*/ + as->strm_dev.fops = &ipu4_isys_ici_stream_fops; + + as->strm_dev.frame_buf_list = &as->buf_list; + as->strm_dev.mutex = &as->mutex; + as->strm_dev.dev_parent = &as->isys->adev->dev; + dev_set_drvdata(&as->strm_dev.dev, as); + + mutex_lock(&as->mutex); + + rval = stream_device_register(&as->strm_dev); + if (rval) + goto out_mutex_unlock; + + if (pad_flags & ICI_PAD_FLAGS_SINK) + rval = node_pad_create_link( + node, pad, &as->node, 0, 0); + else if (pad_flags & ICI_PAD_FLAGS_SOURCE) + rval = node_pad_create_link( + &as->node, 0, node, pad, 0); + + if (rval) { + printk(KERN_WARNING "can't create link\n"); + goto out_mutex_unlock; + } + + mutex_unlock(&as->mutex); + + return rval; + +out_mutex_unlock: + mutex_unlock(&as->mutex); + node_pads_cleanup(&as->asd->node); + //intel_ipu4_isys_framebuf_cleanup(&as->buf_list); +out_init_fail: + mutex_destroy(&as->mutex); + + return rval; +} + +void ici_isys_stream_cleanup(struct ici_isys_stream *as) +{ + list_del(&as->node.node_entry); + stream_device_unregister(&as->strm_dev); + node_pads_cleanup(&as->asd->node); + mutex_destroy(&as->mutex); +} + +#endif //ICI_ENABLED + diff --git a/drivers/media/pci/intel/ici/ici-isys-stream.h b/drivers/media/pci/intel/ici/ici-isys-stream.h new file mode 100644 index 0000000000000..457b123a65db0 --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-stream.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef ICI_ISYS_STREAM_H +#define ICI_ISYS_STREAM_H + +#include +#include +#include + +#include "ici-isys-stream-device.h" +#include "ici-isys-frame-buf.h" +#include "ici-isys-pipeline.h" + + +struct ici_isys; +struct ia_css_isys_stream_cfg_data; +struct ici_isys_subdev; + +struct ici_isys_pixelformat { + uint32_t pixelformat; + uint32_t bpp; + uint32_t bpp_packed; + uint32_t code; + uint32_t css_pixelformat; +}; + +struct ici_isys_stream { + /* Serialise access to other fields in the struct. */ + struct mutex mutex; + struct node_pad pad; + struct ici_isys_node node; + struct ici_stream_device strm_dev; + struct ici_stream_format strm_format; + const struct ici_isys_pixelformat *pfmts; + const struct ici_isys_pixelformat *pfmt; + struct ici_isys_frame_buf_list buf_list; + struct ici_isys_subdev* asd; + struct ici_isys *isys; /* its parent device */ + struct ici_isys_pipeline ip; + unsigned int streaming; + bool packed; + unsigned int line_header_length; /* bits */ + unsigned int line_footer_length; /* bits */ + const struct ici_isys_pixelformat *(*try_fmt_vid_mplane)( + struct ici_isys_stream *as, + struct ici_stream_format *mpix); + void (*prepare_firmware_stream_cfg)( + struct ici_isys_stream *as, + struct ia_css_isys_stream_cfg_data *cfg); +}; + +#define to_intel_ipu4_isys_ici_stream(__buf_list) \ + container_of(__buf_list, struct ici_isys_stream, buf_list) +#define ici_pipeline_to_stream(__ip) \ + container_of(__ip, struct ici_isys_stream, ip) + +extern const struct ici_isys_pixelformat ici_isys_pfmts[]; +extern const struct ici_isys_pixelformat ici_isys_pfmts_be_soc[]; +extern const struct ici_isys_pixelformat ici_isys_pfmts_packed[]; + +const struct ici_isys_pixelformat +*ici_isys_video_try_fmt_vid_mplane_default( + struct ici_isys_stream *as, + struct ici_stream_format *mpix); +void ici_isys_prepare_firmware_stream_cfg_default( + struct ici_isys_stream *as, + struct ia_css_isys_stream_cfg_data *cfg); + +int ici_isys_stream_init(struct ici_isys_stream *as, + struct ici_isys_subdev *asd, + struct ici_isys_node *node, + unsigned int pad, + unsigned long pad_flags); +void ici_isys_stream_cleanup(struct ici_isys_stream *as); + +void ici_isys_stream_add_capture_done( + struct ici_isys_pipeline* ip, + void (*capture_done)(struct ici_isys_pipeline* ip, + struct ia_css_isys_resp_info* resp)); + +#endif /* ICI_ISYS_STREAM_H */ diff --git a/drivers/media/pci/intel/ici/ici-isys-subdev.c b/drivers/media/pci/intel/ici/ici-isys-subdev.c new file mode 100644 index 0000000000000..4d12a700d015d --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-subdev.c @@ -0,0 +1,548 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include "./ici/ici-isys.h" +#ifdef ICI_ENABLED + +#include "./ici/ici-isys-subdev.h" +#include "./ici/ici-isys-pipeline.h" + +unsigned int ici_isys_format_code_to_bpp(u32 code) +{ + switch (code) { + case ICI_FORMAT_RGB888: + return 24; + case ICI_FORMAT_RGB565: + case ICI_FORMAT_UYVY: + case ICI_FORMAT_YUYV: + return 16; + case ICI_FORMAT_SBGGR12: + case ICI_FORMAT_SGBRG12: + case ICI_FORMAT_SGRBG12: + case ICI_FORMAT_SRGGB12: + return 12; + case ICI_FORMAT_SBGGR10: + case ICI_FORMAT_SGBRG10: + case ICI_FORMAT_SGRBG10: + case ICI_FORMAT_SRGGB10: + return 10; + case ICI_FORMAT_SBGGR8: + case ICI_FORMAT_SGBRG8: + case ICI_FORMAT_SGRBG8: + case ICI_FORMAT_SRGGB8: + case ICI_FORMAT_SBGGR10_DPCM8: + case ICI_FORMAT_SGBRG10_DPCM8: + case ICI_FORMAT_SGRBG10_DPCM8: + case ICI_FORMAT_SRGGB10_DPCM8: + return 8; + default: + BUG_ON(1); + } +} + +unsigned int ici_isys_format_code_to_mipi(u32 code) +{ + switch (code) { + case ICI_FORMAT_RGB565: + return ICI_ISYS_MIPI_CSI2_TYPE_RGB565; + case ICI_FORMAT_RGB888: + return ICI_ISYS_MIPI_CSI2_TYPE_RGB888; + case ICI_FORMAT_UYVY: + case ICI_FORMAT_YUYV: + return ICI_ISYS_MIPI_CSI2_TYPE_YUV422_8; + case ICI_FORMAT_SBGGR12: + case ICI_FORMAT_SGBRG12: + case ICI_FORMAT_SGRBG12: + case ICI_FORMAT_SRGGB12: + return ICI_ISYS_MIPI_CSI2_TYPE_RAW12; + case ICI_FORMAT_SBGGR10: + case ICI_FORMAT_SGBRG10: + case ICI_FORMAT_SGRBG10: + case ICI_FORMAT_SRGGB10: + return ICI_ISYS_MIPI_CSI2_TYPE_RAW10; + case ICI_FORMAT_SBGGR8: + case ICI_FORMAT_SGBRG8: + case ICI_FORMAT_SGRBG8: + case ICI_FORMAT_SRGGB8: + return ICI_ISYS_MIPI_CSI2_TYPE_RAW8; + case ICI_FORMAT_SBGGR10_DPCM8: + case ICI_FORMAT_SGBRG10_DPCM8: + case ICI_FORMAT_SGRBG10_DPCM8: + case ICI_FORMAT_SRGGB10_DPCM8: + return ICI_ISYS_MIPI_CSI2_TYPE_USER_DEF(1); + default: + BUG_ON(1); + } +} + +enum ici_isys_subdev_pixelorder +ici_isys_subdev_get_pixelorder(u32 code) +{ + switch (code) { + case ICI_FORMAT_SBGGR12: + case ICI_FORMAT_SBGGR10: + case ICI_FORMAT_SBGGR8: + case ICI_FORMAT_SBGGR10_DPCM8: + return ICI_ISYS_SUBDEV_PIXELORDER_BGGR; + case ICI_FORMAT_SGBRG12: + case ICI_FORMAT_SGBRG10: + case ICI_FORMAT_SGBRG8: + case ICI_FORMAT_SGBRG10_DPCM8: + return ICI_ISYS_SUBDEV_PIXELORDER_GBRG; + case ICI_FORMAT_SGRBG12: + case ICI_FORMAT_SGRBG10: + case ICI_FORMAT_SGRBG8: + case ICI_FORMAT_SGRBG10_DPCM8: + return ICI_ISYS_SUBDEV_PIXELORDER_GRBG; + case ICI_FORMAT_SRGGB12: + case ICI_FORMAT_SRGGB10: + case ICI_FORMAT_SRGGB8: + case ICI_FORMAT_SRGGB10_DPCM8: + return ICI_ISYS_SUBDEV_PIXELORDER_RGGB; + default: + BUG_ON(1); + } +} + +unsigned int ici_isys_get_compression_scheme(u32 code) +{ + switch (code) { + case ICI_FORMAT_SBGGR10_DPCM8: + case ICI_FORMAT_SGBRG10_DPCM8: + case ICI_FORMAT_SGRBG10_DPCM8: + case ICI_FORMAT_SRGGB10_DPCM8: + return 3; + default: + return 0; + } +} + +u32 ici_isys_subdev_code_to_uncompressed(u32 sink_code) +{ + switch (sink_code) { + case ICI_FORMAT_SBGGR10_DPCM8: + return ICI_FORMAT_SBGGR10; + case ICI_FORMAT_SGBRG10_DPCM8: + return ICI_FORMAT_SGBRG10; + case ICI_FORMAT_SGRBG10_DPCM8: + return ICI_FORMAT_SGRBG10; + case ICI_FORMAT_SRGGB10_DPCM8: + return ICI_FORMAT_SRGGB10; + default: + return sink_code; + } +} + +struct ici_framefmt *__ici_isys_subdev_get_ffmt( + struct ici_isys_subdev *asd, + unsigned pad) +{ + if (pad >= asd->num_pads) + return NULL; + + return &asd->ffmt[pad]; +} + +int ici_isys_subdev_get_ffmt( + struct ici_isys_node* node, + struct ici_pad_framefmt* pff) +{ + int ret = 0; + struct ici_framefmt *format_out; + struct ici_isys_subdev *asd = node->sd; + + mutex_lock(&asd->mutex); + format_out = __ici_isys_subdev_get_ffmt(asd, + pff->pad.pad_idx); + if (format_out) + pff->ffmt = *format_out; + else + ret = -EINVAL; + mutex_unlock(&asd->mutex); + return ret; +} + +static int __subdev_set_ffmt(struct ici_isys_subdev *asd, + struct ici_pad_framefmt *pff) +{ + unsigned int i; + unsigned pad = pff->pad.pad_idx; + unsigned pixelformat; + BUG_ON(!mutex_is_locked(&asd->mutex)); + + if (pad >= asd->num_pads) + return -EINVAL; + + pff->ffmt.width = clamp(pff->ffmt.width, + IPU_ISYS_MIN_WIDTH, + IPU_ISYS_MAX_WIDTH); + pff->ffmt.height = clamp(pff->ffmt.height, + IPU_ISYS_MIN_HEIGHT, + IPU_ISYS_MAX_HEIGHT); + + pixelformat = asd->supported_codes[pad][0]; + for (i = 0; asd->supported_codes[pad][i]; i++) { + if (asd->supported_codes[pad][i] == + pff->ffmt.pixelformat) { + + pixelformat = asd->supported_codes[pad][i]; + break; + } + } + pff->ffmt.pixelformat = pixelformat; + asd->set_ffmt_internal(asd, pad, &pff->ffmt); + asd->ffmt[pad] = pff->ffmt; + return 0; +} + +int ici_isys_subdev_set_ffmt( + struct ici_isys_node* node, + struct ici_pad_framefmt* pff) +{ + int res; + struct ici_isys_subdev *asd = node->sd; + + mutex_lock(&asd->mutex); + res = __subdev_set_ffmt(asd, pff); + mutex_unlock(&asd->mutex); + return res; +} + +int ici_isys_subdev_get_supported_format( + struct ici_isys_node* node, + struct ici_pad_supported_format_desc* psfd) +{ + struct ici_isys_subdev *asd = node->sd; + int pad = psfd->pad.pad_idx; + int idx = psfd->idx; + int i; + int rval = 0; + + mutex_lock(&asd->mutex); + if (!asd->supported_code_counts[pad]) { + for (i = 0; asd->supported_codes[pad][i]; i++) {} + asd->supported_code_counts[pad] = i; + } + + if (idx < asd->supported_code_counts[pad]) { + psfd->color_format = asd->supported_codes[pad][idx]; + psfd->min_width = IPU_ISYS_MIN_WIDTH; + psfd->max_width = IPU_ISYS_MAX_WIDTH; + psfd->min_height = IPU_ISYS_MIN_HEIGHT; + psfd->max_height = IPU_ISYS_MAX_HEIGHT; + } else { + rval = -EINVAL; + } + + mutex_unlock(&asd->mutex); + return rval; +} + + +int intel_ipu4_isys_subdev_set_crop_rect(struct ici_isys_subdev + *asd, unsigned pad, + struct ici_rect *r) +{ + struct node_pad *np; + struct ici_rect rmax = { 0 }; + struct ici_rect *rcrop; + unsigned int tgt; + struct ici_framefmt *ffmt = + __ici_isys_subdev_get_ffmt(asd, pad); + + if (!ffmt) + return -EINVAL; + if (!asd->valid_tgts[pad].crop) + return -EINVAL; + np = &asd->pads[pad]; + rcrop = &asd->crop[pad]; + + if (np->flags & ICI_PAD_FLAGS_SINK) { + rmax.width = ffmt->width; + rmax.height = ffmt->height; + tgt = ICI_ISYS_SUBDEV_PROP_TGT_SINK_CROP; + } else { + /* 0 is the sink pad. */ + rmax = asd->crop[0]; + tgt = ICI_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP; + } + rcrop->width = clamp(r->width, IPU_ISYS_MIN_WIDTH, rmax.width); + rcrop->height = clamp(r->height, IPU_ISYS_MIN_HEIGHT, + rmax.height); + ici_isys_subdev_fmt_propagate(asd, pad, rcrop, tgt, NULL); + return 0; +} + +int intel_ipu4_isys_subdev_set_compose_rect(struct ici_isys_subdev + *asd, unsigned pad, + struct ici_rect *r) +{ + struct node_pad *np; + struct ici_rect rmax = { 0 }; + struct ici_rect *rcompose; + unsigned int tgt; + struct ici_framefmt *ffmt = + __ici_isys_subdev_get_ffmt(asd, pad); + + if (!ffmt) + return -EINVAL; + if (!asd->valid_tgts[pad].compose) + return -EINVAL; + np = &asd->pads[pad]; + rcompose = &asd->compose[pad]; + + if (np->flags & ICI_PAD_FLAGS_SINK) { + rmax = asd->crop[pad]; + tgt = ICI_ISYS_SUBDEV_PROP_TGT_SINK_COMPOSE; + } else { + /* 0 is the sink pad. */ + rmax = asd->compose[0]; + tgt = ICI_ISYS_SUBDEV_PROP_TGT_SOURCE_COMPOSE; + } + rcompose->width = clamp(r->width, IPU_ISYS_MIN_WIDTH, + rmax.width); + rcompose->height = clamp(r->height, IPU_ISYS_MIN_HEIGHT, + rmax.height); + ici_isys_subdev_fmt_propagate(asd, pad, rcompose, tgt, + NULL); + return 0; +} + +int ici_isys_subdev_set_sel( + struct ici_isys_node* node, + struct ici_pad_selection* ps) +{ + struct ici_isys_subdev *asd = node->sd; + int rval = 0; + + if (WARN_ON(ps->pad.pad_idx >= asd->num_pads)) + return -EINVAL; + + mutex_lock(&asd->mutex); + switch (ps->sel_type) + { + case ICI_EXT_SEL_TYPE_COMPOSE: + rval = intel_ipu4_isys_subdev_set_compose_rect( + asd, ps->pad.pad_idx, &ps->rect); + break; + case ICI_EXT_SEL_TYPE_CROP: + rval = intel_ipu4_isys_subdev_set_crop_rect( + asd, ps->pad.pad_idx, &ps->rect); + break; + default: + rval = -EINVAL; + } + mutex_unlock(&asd->mutex); + return rval; +} + +int ici_isys_subdev_get_sel( + struct ici_isys_node* node, + struct ici_pad_selection* ps) +{ + struct ici_isys_subdev *asd = node->sd; + int rval = 0; + + if (WARN_ON(ps->pad.pad_idx >= asd->num_pads)) + return -EINVAL; + + mutex_lock(&asd->mutex); + switch (ps->sel_type) + { + case ICI_EXT_SEL_TYPE_COMPOSE: + ps->rect = asd->compose[ps->pad.pad_idx]; + break; + case ICI_EXT_SEL_TYPE_CROP: + ps->rect = asd->crop[ps->pad.pad_idx]; + break; + default: + rval = -EINVAL; + } + mutex_unlock(&asd->mutex); + return rval; +} + +void ici_isys_subdev_fmt_propagate( + struct ici_isys_subdev *asd, + unsigned pad, + struct ici_rect *r, + enum ici_isys_subdev_prop_tgt tgt, + struct ici_framefmt *ffmt) +{ + unsigned i; + struct ici_framefmt *ffmts[asd->num_pads]; + struct ici_rect *crops[asd->num_pads]; + struct ici_rect *compose[asd->num_pads]; + + if (WARN_ON(pad >= asd->num_pads)) + return; + + for (i = 0; i < asd->num_pads; i++) { + ffmts[i] = __ici_isys_subdev_get_ffmt(asd, pad); + crops[i] = &asd->crop[i]; + compose[i] = &asd->compose[i]; + } + + switch (tgt) { + case ICI_ISYS_SUBDEV_PROP_TGT_SINK_FMT: + crops[pad]->left = crops[pad]->top = 0; + crops[pad]->width = ffmt->width; + crops[pad]->height = ffmt->height; + ici_isys_subdev_fmt_propagate(asd, pad, + crops[pad], tgt + 1, + ffmt); + return; + case ICI_ISYS_SUBDEV_PROP_TGT_SINK_CROP: + if (WARN_ON(asd->pads[pad].flags & ICI_PAD_FLAGS_SOURCE)) + return; + compose[pad]->left = compose[pad]->top = 0; + compose[pad]->width = r->width; + compose[pad]->height = r->height; + ici_isys_subdev_fmt_propagate(asd, pad, + compose[pad], tgt + 1, + ffmt); + return; + case ICI_ISYS_SUBDEV_PROP_TGT_SINK_COMPOSE: + if (!(asd->pads[pad].flags & ICI_PAD_FLAGS_SINK)) + return; + + for (i = 1; i < asd->num_pads; i++) { + if (!(asd->pads[i].flags & ICI_PAD_FLAGS_SOURCE)) + continue; + + compose[i]->left = compose[i]->top = 0; + compose[i]->width = r->width; + compose[i]->height = r->height; + ici_isys_subdev_fmt_propagate(asd, i, + compose[i], + tgt + 1, + ffmt); + } + return; + case ICI_ISYS_SUBDEV_PROP_TGT_SOURCE_COMPOSE: + if (WARN_ON(asd->pads[pad].flags & ICI_PAD_FLAGS_SINK)) + return; + + crops[pad]->left = crops[pad]->top = 0; + crops[pad]->width = r->width; + crops[pad]->height = r->height; + ici_isys_subdev_fmt_propagate(asd, pad, + crops[pad], tgt + 1, + ffmt); + return; + case ICI_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP:{ + struct ici_framefmt fmt = { + .width = r->width, + .height = r->height, + /* + * Either use the code from sink pad + * or the current one. + */ + .pixelformat = (ffmt ? ffmt->pixelformat : + ffmts[pad]->pixelformat), + }; + + asd->set_ffmt_internal(asd, pad, &fmt); + return; + } + } +} + +int ici_isys_subdev_init(struct ici_isys_subdev *asd, + const char* name, + unsigned int num_pads, + unsigned int index) +{ + int res = 0; + + mutex_init(&asd->mutex); + asd->num_pads = num_pads; + asd->pads = devm_kcalloc(&asd->isys->adev->dev, num_pads, + sizeof(*asd->pads), GFP_KERNEL); + + asd->ffmt = devm_kcalloc(&asd->isys->adev->dev, num_pads, + sizeof(*asd->ffmt), GFP_KERNEL); + + asd->crop = devm_kcalloc(&asd->isys->adev->dev, num_pads, + sizeof(*asd->crop), GFP_KERNEL); + + asd->compose = devm_kcalloc(&asd->isys->adev->dev, num_pads, + sizeof(*asd->compose), GFP_KERNEL); + + asd->valid_tgts = devm_kcalloc(&asd->isys->adev->dev, num_pads, + sizeof(*asd->valid_tgts), + GFP_KERNEL); + + asd->supported_code_counts = devm_kcalloc(&asd->isys->adev->dev, + num_pads, sizeof(*asd->supported_code_counts), + GFP_KERNEL); + + if (!asd->pads || !asd->ffmt || !asd->crop || !asd->compose || + !asd->valid_tgts || !asd->supported_code_counts) { + res = -ENOMEM; + goto cleanup_allocs; + } + + asd->isl_mode = ICI_ISL_OFF; + asd->be_mode = ICI_BE_RAW; + asd->source = -1; + asd->index = index; + + asd->node.parent = &asd->isys->pipeline_dev; + asd->node.sd = asd; + asd->node.external = false; + + res = ici_isys_pipeline_node_init(asd->isys, + &asd->node, name, asd->num_pads, asd->pads); + if (res) + goto cleanup_allocs; + + asd->node.node_set_pad_ffmt = + ici_isys_subdev_set_ffmt; + asd->node.node_get_pad_ffmt = + ici_isys_subdev_get_ffmt; + asd->node.node_get_pad_supported_format = + ici_isys_subdev_get_supported_format; + asd->node.node_set_pad_sel = + ici_isys_subdev_set_sel; + asd->node.node_get_pad_sel = + ici_isys_subdev_get_sel; + + return 0; + +cleanup_allocs: + if (asd->valid_tgts) + devm_kfree(&asd->isys->adev->dev, asd->valid_tgts); + if (asd->compose) + devm_kfree(&asd->isys->adev->dev, asd->compose); + if (asd->crop) + devm_kfree(&asd->isys->adev->dev, asd->crop); + if (asd->ffmt) + devm_kfree(&asd->isys->adev->dev, asd->ffmt); + if (asd->pads) + devm_kfree(&asd->isys->adev->dev, asd->pads); + mutex_destroy(&asd->mutex); + return res; +} + +void ici_isys_subdev_cleanup( + struct ici_isys_subdev *asd) +{ + list_del(&asd->node.node_entry); + + if (asd->valid_tgts) + devm_kfree(&asd->isys->adev->dev, asd->valid_tgts); + if (asd->compose) + devm_kfree(&asd->isys->adev->dev, asd->compose); + if (asd->crop) + devm_kfree(&asd->isys->adev->dev, asd->crop); + if (asd->ffmt) + devm_kfree(&asd->isys->adev->dev, asd->ffmt); + if (asd->pads) + devm_kfree(&asd->isys->adev->dev, asd->pads); + mutex_destroy(&asd->mutex); +} + +#endif /*ICI_ENABLED*/ diff --git a/drivers/media/pci/intel/ici/ici-isys-subdev.h b/drivers/media/pci/intel/ici/ici-isys-subdev.h new file mode 100644 index 0000000000000..e783fa1041311 --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-subdev.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef ICI_ISYS_SUBDEV_H +#define ICI_ISYS_SUBDEV_H + +#include +#include "ici-isys-pipeline.h" + +struct node_subdev_format; + +#define ICI_ISYS_MIPI_CSI2_TYPE_NULL 0x10 +#define ICI_ISYS_MIPI_CSI2_TYPE_BLANKING 0x11 +#define ICI_ISYS_MIPI_CSI2_TYPE_EMBEDDED8 0x12 +#define ICI_ISYS_MIPI_CSI2_TYPE_YUV422_8 0x1e +#define ICI_ISYS_MIPI_CSI2_TYPE_RGB565 0x22 +#define ICI_ISYS_MIPI_CSI2_TYPE_RGB888 0x24 +#define ICI_ISYS_MIPI_CSI2_TYPE_RAW6 0x28 +#define ICI_ISYS_MIPI_CSI2_TYPE_RAW7 0x29 +#define ICI_ISYS_MIPI_CSI2_TYPE_RAW8 0x2a +#define ICI_ISYS_MIPI_CSI2_TYPE_RAW10 0x2b +#define ICI_ISYS_MIPI_CSI2_TYPE_RAW12 0x2c +#define ICI_ISYS_MIPI_CSI2_TYPE_RAW14 0x2d +#define ICI_ISYS_MIPI_CSI2_TYPE_USER_DEF(i) (0x30 + (i)-1) /* 1-8 */ + +enum ici_be_mode { + ICI_BE_RAW = 0, + ICI_BE_SOC +}; + +enum ici_isl_mode { + ICI_ISL_OFF = 0, /* IA_CSS_ISYS_USE_NO_ISL_NO_ISA */ + ICI_ISL_CSI2_BE, /* IA_CSS_ISYS_USE_SINGLE_DUAL_ISL */ + ICI_ISL_ISA /* IA_CSS_ISYS_USE_SINGLE_ISA */ +}; + +enum ici_isys_subdev_pixelorder { + ICI_ISYS_SUBDEV_PIXELORDER_BGGR = 0, + ICI_ISYS_SUBDEV_PIXELORDER_GBRG, + ICI_ISYS_SUBDEV_PIXELORDER_GRBG, + ICI_ISYS_SUBDEV_PIXELORDER_RGGB, +}; + +enum ici_isys_subdev_prop_tgt { + ICI_ISYS_SUBDEV_PROP_TGT_SINK_FMT, + ICI_ISYS_SUBDEV_PROP_TGT_SINK_CROP, + ICI_ISYS_SUBDEV_PROP_TGT_SINK_COMPOSE, + ICI_ISYS_SUBDEV_PROP_TGT_SOURCE_COMPOSE, + ICI_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP, +}; + +#define ICI_ISYS_SUBDEV_PROP_TGT_NR_OF \ + (INTEL_IPU4_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP + 1) + +struct ici_isys_subdev { + struct ici_isys_node node; + /* Serialise access to any other field in the struct */ + struct mutex mutex; + struct ici_isys *isys; + unsigned const *const *supported_codes; + unsigned* supported_code_counts; + unsigned int num_pads; + struct node_pad *pads; + struct ici_framefmt *ffmt; + struct ici_rect *crop; + struct ici_rect *compose; + struct { + bool crop; + bool compose; + } *valid_tgts; + enum ici_isl_mode isl_mode; + enum ici_be_mode be_mode; + int source; /* SSI stream source; -1 if unset */ + unsigned int index; /* index for sd array in csi2 */ + void (*set_ffmt_internal)( + struct ici_isys_subdev *asd, + unsigned pad, + struct ici_framefmt *format); +}; + +unsigned int ici_isys_format_code_to_bpp(u32 code); +unsigned int ici_isys_format_code_to_mipi(u32 code); +enum ici_isys_subdev_pixelorder +ici_isys_subdev_get_pixelorder(u32 code); +unsigned int ici_isys_get_compression_scheme(u32 code); +u32 ici_isys_subdev_code_to_uncompressed(u32 sink_code); + +struct ici_framefmt* __ici_isys_subdev_get_ffmt( + struct ici_isys_subdev *asd, + unsigned pad); +void ici_isys_subdev_fmt_propagate( + struct ici_isys_subdev *asd, + unsigned pad, + struct ici_rect *r, + enum ici_isys_subdev_prop_tgt + tgt, + struct ici_framefmt *ffmt); + +int ici_isys_subdev_init(struct ici_isys_subdev *asd, + const char* name, + unsigned int num_pads, + unsigned int index); +void ici_isys_subdev_cleanup( + struct ici_isys_subdev *asd); + +#define ici_node_to_subdev(__node) \ + container_of(__node, struct ici_isys_subdev, node) +#endif /* ICI_ISYS_SUBDEV_H */ diff --git a/drivers/media/pci/intel/ici/ici-isys-tpg.c b/drivers/media/pci/intel/ici/ici-isys-tpg.c new file mode 100644 index 0000000000000..250ed01a0cd6a --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-tpg.c @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include "./ici/ici-isys.h" +#ifdef ICI_ENABLED +#include +#include "./ici/ici-isys-subdev.h" +#include "./ici/ici-isys-stream.h" +#include "./ici/ici-isys-tpg.h" +#include "ipu-isys-tpg.h" +#include "isysapi/interface/ia_css_isysapi_fw_types.h" + +#define MIPI_GEN_PPC 4 + +#define ici_asd_to_tpg(__asd) \ + container_of(__asd, struct ici_isys_tpg, asd) + +static const uint32_t ici_tpg_supported_codes_pad[] = { + ICI_FORMAT_SBGGR8, + ICI_FORMAT_SGBRG8, + ICI_FORMAT_SGRBG8, + ICI_FORMAT_SRGGB8, + ICI_FORMAT_SBGGR10, + ICI_FORMAT_SGBRG10, + ICI_FORMAT_SGRBG10, + ICI_FORMAT_SRGGB10, + 0, +}; + +static const uint32_t *ici_tpg_supported_codes[] = { + ici_tpg_supported_codes_pad, +}; + +static int set_stream( + struct ici_isys_node *node, + void* ip, + int enable) +{ + struct ici_isys_subdev *asd = node->sd; + struct ici_isys_tpg *tpg = + to_ici_isys_tpg(asd); + unsigned int bpp = + ici_isys_format_code_to_bpp(tpg->asd. + ffmt[TPG_PAD_SOURCE]. + pixelformat); + /* + * In B0 MIPI_GEN block is CSI2 FB. Need to enable/disable TPG selection + * register to control the TPG streaming. + */ + writel(enable ? 1 : 0, tpg->sel); + + if (!enable) { + writel(0, tpg->base + MIPI_GEN_REG_COM_ENABLE); + return 0; + } + + writel(MIPI_GEN_COM_DTYPE_RAW(bpp), + tpg->base + MIPI_GEN_REG_COM_DTYPE); + writel(ici_isys_format_code_to_mipi + (tpg->asd.ffmt[TPG_PAD_SOURCE].pixelformat), + tpg->base + MIPI_GEN_REG_COM_VTYPE); + + writel(0, tpg->base + MIPI_GEN_REG_COM_VCHAN); + writel(DIV_ROUND_UP + (tpg->asd.ffmt[TPG_PAD_SOURCE].width * bpp, BITS_PER_BYTE), + tpg->base + MIPI_GEN_REG_COM_WCOUNT); + + writel(0, tpg->base + MIPI_GEN_REG_SYNG_NOF_FRAMES); + + writel(DIV_ROUND_UP(tpg->asd.ffmt[TPG_PAD_SOURCE].width, MIPI_GEN_PPC), + tpg->base + MIPI_GEN_REG_SYNG_NOF_PIXELS); + writel(tpg->asd.ffmt[TPG_PAD_SOURCE].height, + tpg->base + MIPI_GEN_REG_SYNG_NOF_LINES); + + writel(0, tpg->base + MIPI_GEN_REG_TPG_MODE); + + writel(-1, tpg->base + MIPI_GEN_REG_TPG_HCNT_MASK); + writel(-1, tpg->base + MIPI_GEN_REG_TPG_VCNT_MASK); + writel(-1, tpg->base + MIPI_GEN_REG_TPG_XYCNT_MASK); + writel(0, tpg->base + MIPI_GEN_REG_TPG_HCNT_DELTA); + writel(0, tpg->base + MIPI_GEN_REG_TPG_VCNT_DELTA); + + writel(2, tpg->base + MIPI_GEN_REG_COM_ENABLE); + return 0; +} + +static const char *const tpg_mode_items[] = { + "Ramp", + "Checkerboard", /* Does not work, disabled. */ + "Frame Based Colour", + NULL, +}; + +void ici_tpg_set_ffmt(struct ici_isys_subdev *asd, + unsigned pad, + struct ici_framefmt *ffmt) +{ + struct ici_framefmt *cur_ffmt = + __ici_isys_subdev_get_ffmt(asd, pad); + + ffmt->field = ICI_FIELD_NONE; + ffmt->colorspace = 0; + memset(ffmt->reserved, 0, sizeof(ffmt->reserved)); + if (cur_ffmt) { + *cur_ffmt = *ffmt; + dev_dbg(&asd->isys->adev->dev, "%s: TPG ici stream set format\n" + "width: %u, height: %u, pixelformat: %u, colorspace: %u field: %u\n", + __func__, + cur_ffmt->width, + cur_ffmt->height, + cur_ffmt->pixelformat, cur_ffmt->colorspace, cur_ffmt->field); + } +} + +static int ici_tpg_pipeline_validate( + struct node_pipeline *inp, + struct ici_isys_node *node) +{ + struct ici_isys_subdev* asd = node->sd; + struct ici_isys_tpg *tpg = + ici_asd_to_tpg(asd); + struct ici_isys_pipeline *ip = + ici_nodepipe_to_pipeline(inp); + + ip->asd_source = &tpg->asd; + ip->asd_source_pad_id = TPG_PAD_SOURCE; + return 0; +} + +int ici_isys_tpg_init(struct ici_isys_tpg *tpg, + struct ici_isys *isys, + void __iomem *base, void __iomem *sel, + unsigned int index) +{ + struct ici_pad_framefmt fmt = { + .pad.pad_idx = TPG_PAD_SOURCE, + .ffmt = { + .width = 4096, + .height = 3072, + }, + }; + + int rval; + char name[ICI_MAX_NODE_NAME]; + + dev_dbg(&isys->adev->dev, "ici_isys_tpg_init\n"); + + tpg->isys = isys; + tpg->base = base; + tpg->index = index; + tpg->sel = sel; + tpg->asd.isys = isys; + + snprintf(name, sizeof(name), + IPU_ISYS_ENTITY_PREFIX " TPG %u", index); + rval = ici_isys_subdev_init(&tpg->asd, + name, NR_OF_TPG_PADS, 0); + if (rval) + goto fail; + + tpg->asd.pads[TPG_PAD_SOURCE].flags = ICI_PAD_FLAGS_SOURCE; + + tpg->asd.source = IA_CSS_ISYS_STREAM_SRC_MIPIGEN_PORT0 + index; + tpg->asd.supported_codes = ici_tpg_supported_codes; + tpg->asd.set_ffmt_internal = ici_tpg_set_ffmt; + tpg->asd.node.node_set_streaming = set_stream; + tpg->asd.node.node_pipeline_validate = + ici_tpg_pipeline_validate; + tpg->asd.node.node_set_pad_ffmt(&tpg->asd.node, &fmt); + tpg->as.isys = isys; + tpg->as.try_fmt_vid_mplane = + ici_isys_video_try_fmt_vid_mplane_default; + tpg->as.prepare_firmware_stream_cfg = + ici_isys_prepare_firmware_stream_cfg_default; + tpg->as.pfmts = ici_isys_pfmts_packed; + tpg->as.packed = true; + tpg->as.buf_list.css_pin_type = IA_CSS_ISYS_PIN_TYPE_MIPI; + tpg->as.line_header_length = + INTEL_IPU4_ISYS_CSI2_LONG_PACKET_HEADER_SIZE; + tpg->as.line_footer_length = + INTEL_IPU4_ISYS_CSI2_LONG_PACKET_FOOTER_SIZE; + + /*TODO:*/ + /* + * Buffer queue management call backs to be added. + */ + + rval = ici_isys_stream_init(&tpg->as, &tpg->asd, + &tpg->asd.node, TPG_PAD_SOURCE, + ICI_PAD_FLAGS_SINK); + if (rval) { + dev_err(&isys->adev->dev, "can't init stream node\n"); + goto fail; + } + + return 0; + +fail: + ici_isys_tpg_cleanup(tpg); + + return 1; +} +EXPORT_SYMBOL(ici_isys_tpg_init); + +void ici_isys_tpg_cleanup(struct ici_isys_tpg *tpg) +{ + ici_isys_subdev_cleanup(&tpg->asd); + ici_isys_stream_cleanup(&tpg->as); +} +EXPORT_SYMBOL(ici_isys_tpg_cleanup); + +#endif /*ICI_ENABLED*/ diff --git a/drivers/media/pci/intel/ici/ici-isys-tpg.h b/drivers/media/pci/intel/ici/ici-isys-tpg.h new file mode 100644 index 0000000000000..5e6eeefc9e83c --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-tpg.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef ICI_ISYS_TPG_H +#define ICI_ISYS_TPG_H + +#include "ici-isys-frame-buf.h" +#include "ici-isys-subdev.h" +#include "ici-isys-stream.h" + +struct intel_ipu4_isys_tpg_pdata; + +#define TPG_PAD_SOURCE 0 +#define NR_OF_TPG_PADS 1 + +/* + * struct ici_isys_tpg + * + * +*/ +struct ici_isys_tpg { + struct intel_ipu4_isys_tpg_pdata *pdata; + struct ici_isys *isys; + struct ici_isys_subdev asd; + struct ici_isys_stream as; + + void __iomem *base; + void __iomem *sel; + int streaming; + u32 receiver_errors; + unsigned int nlanes; + unsigned int index; + atomic_t sof_sequence; +}; + +#define to_ici_isys_tpg(sd) \ + container_of(sd, struct ici_isys_tpg, asd) + +int ici_isys_tpg_init(struct ici_isys_tpg *tpg, + struct ici_isys *isys, + void __iomem *base, void __iomem *sel, + unsigned int index); +void ici_isys_tpg_cleanup(struct ici_isys_tpg *tpg); + +#endif /* ICI_ISYS_TPG_H */ diff --git a/drivers/media/pci/intel/ici/ici-isys.c b/drivers/media/pci/intel/ici/ici-isys.c new file mode 100644 index 0000000000000..500491ee038f6 --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys.c @@ -0,0 +1,1377 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "ipu.h" +#include "ipu-bus.h" +#include "ipu-cpd.h" +#include "ipu-mmu.h" +#include "ipu-dma.h" +#include "ipu-platform-isys-csi2-reg.h" +#include "ipu-trace.h" +#include "ipu-buttress.h" +#include "isysapi/interface/ia_css_isysapi.h" +#include "./ici/ici-isys.h" +#include "./ici/ici-isys-csi2.h" +#include "./ici/ici-isys-pipeline-device.h" + +#ifdef ICI_ENABLED + +#define ISYS_PM_QOS_VALUE 300 + +#define INTEL_IPU4_ISYS_OUTPUT_PINS 11 +#define INTEL_IPU4_NUM_CAPTURE_DONE 2 + +/* Trace block definitions for isys */ +struct ipu_trace_block isys_trace_blocks[] = { + { + .offset = TRACE_REG_IS_TRACE_UNIT_BASE, + .type = IPU_TRACE_BLOCK_TUN, + }, + { + .offset = TRACE_REG_IS_SP_EVQ_BASE, + .type = IPU_TRACE_BLOCK_TM, + }, + { + .offset = TRACE_REG_IS_SP_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_IS_ISL_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_IS_MMU_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_CSI2_TM_BASE, + .type = IPU_TRACE_CSI2, + }, + { + .offset = TRACE_REG_CSI2_3PH_TM_BASE, + .type = IPU_TRACE_CSI2_3PH, + }, + { + /* Note! this covers all 9 blocks */ + .offset = TRACE_REG_CSI2_SIG2SIO_GR_BASE(0), + .type = IPU_TRACE_SIG2CIOS, + }, + { + /* Note! this covers all 9 blocks */ + .offset = TRACE_REG_CSI2_PH3_SIG2SIO_GR_BASE(0), + .type = IPU_TRACE_SIG2CIOS, + }, + { + .offset = TRACE_REG_IS_GPREG_TRACE_TIMER_RST_N, + .type = IPU_TRACE_TIMER_RST, + }, + { + .type = IPU_TRACE_BLOCK_END, + } +}; + + +// Latest code structure doesnt do these functions. +// let it remain to gauge the impact and then remove. +#if 0 +static int isys_determine_legacy_csi_lane_configuration(struct ici_isys *isys) +{ + const struct csi_lane_cfg { + u32 reg_value; + int port_lanes[IPU_ISYS_MAX_CSI2_LEGACY_PORTS]; + } csi_lanes_to_cfg[] = { + { 0x0, { 4, 2, 0, 0 } }, /* no sensor defaults here */ + { 0x1, { 3, 2, 0, 0 } }, + { 0x2, { 2, 2, 0, 0 } }, + { 0x3, { 1, 2, 0, 0 } }, + { 0x4, { 4, 1, 0, 0 } }, + { 0x5, { 3, 1, 0, 0 } }, + { 0x6, { 2, 1, 0, 0 } }, + { 0x7, { 1, 1, 0, 0 } }, + { 0x8, { 4, 1, 0, 1 } }, + { 0x9, { 3, 1, 0, 1 } }, + { 0xa, { 2, 1, 0, 1 } }, + { 0xb, { 1, 1, 0, 1 } }, + { 0x10, { 2, 2, 2, 0 } }, + { 0x11, { 2, 2, 1, 0 } }, + { 0x18, { 2, 1, 2, 1 } }, + { 0x19, { 1, 1, 1, 1 } }, + }; + int i, j; + + for (i = 0; i < ARRAY_SIZE(csi_lanes_to_cfg); i++) { + for (j = 0; j < IPU_ISYS_MAX_CSI2_LEGACY_PORTS; j++) { + /* Port with no sensor can be handled as don't care */ + if (!isys->ici_csi2[j].nlanes) + continue; + if (csi_lanes_to_cfg[i].port_lanes[j] != + isys->ici_csi2[j].nlanes) + break; + } + + if (j < IPU_ISYS_MAX_CSI2_LEGACY_PORTS) + continue; + + isys->legacy_port_cfg = csi_lanes_to_cfg[i].reg_value; + dev_dbg(&isys->adev->dev, "Lane configuration value 0x%x\n,", + isys->legacy_port_cfg); + return 0; + } + dev_err(&isys->adev->dev, "Non supported CSI lane configuration\n,"); + return -EINVAL; +} + +static int isys_determine_csi_combo_lane_configuration(struct ici_isys *isys) +{ + const struct csi_lane_cfg { + u32 reg_value; + int port_lanes[IPU_ISYS_MAX_CSI2_COMBO_PORTS]; + } csi_lanes_to_cfg[] = { + { 0x1f, { 0, 0 } }, /* no sensor defaults here - disable all */ + { 0x10, { 4, 0 } }, + { 0x11, { 3, 0 } }, + { 0x12, { 2, 0 } }, + { 0x13, { 1, 0 } }, + { 0x14, { 3, 1 } }, + { 0x15, { 2, 1 } }, + { 0x16, { 1, 1 } }, + { 0x18, { 2, 2 } }, + { 0x19, { 1, 2 } }, + }; + int i, j; + + for (i = 0; i < ARRAY_SIZE(csi_lanes_to_cfg); i++) { + for (j = 0; j < IPU_ISYS_MAX_CSI2_COMBO_PORTS; j++) { + /* Port with no sensor can be handled as don't care */ + if (!isys->ici_csi2[j + IPU_ISYS_MAX_CSI2_LEGACY_PORTS].nlanes) + continue; + if (csi_lanes_to_cfg[i].port_lanes[j] != + isys->ici_csi2[j + IPU_ISYS_MAX_CSI2_LEGACY_PORTS].nlanes) + break; + } + + if (j < IPU_ISYS_MAX_CSI2_COMBO_PORTS) + continue; + + isys->combo_port_cfg = csi_lanes_to_cfg[i].reg_value; + dev_dbg(&isys->adev->dev, + "Combo port lane configuration value 0x%x\n", + isys->combo_port_cfg); + + return 0; + } + dev_err(&isys->adev->dev, + "Unsupported CSI2-combo lane configuration\n"); + return 0; +} + +#endif +struct isys_i2c_test { + u8 bus_nr; + u16 addr; + struct i2c_client *client; +}; + +static int isys_i2c_test(struct device *dev, void *priv) +{ + struct i2c_client *client = i2c_verify_client(dev); + struct isys_i2c_test *test = priv; + + if (!client) + return 0; + + if (i2c_adapter_id(client->adapter) != test->bus_nr + || client->addr != test->addr) + return 0; + + test->client = client; + + return 0; +} + +static struct i2c_client *isys_find_i2c_subdev(struct i2c_adapter *adapter, + struct ipu_isys_subdev_info *sd_info) +{ + struct i2c_board_info *info = &sd_info->i2c.board_info; + struct isys_i2c_test test = { + .bus_nr = i2c_adapter_id(adapter), + .addr = info->addr, + }; + int rval; + + rval = i2c_for_each_dev(&test, isys_i2c_test); + if (rval || !test.client) + return NULL; + return test.client; +} + +static struct ici_ext_subdev *register_acpi_i2c_subdev( + struct ipu_isys_subdev_info *sd_info, struct i2c_client *client) +{ + struct i2c_board_info *info = &sd_info->i2c.board_info; + struct ici_ext_subdev *sd; + + request_module(I2C_MODULE_PREFIX "%s", info->type); + + /* ACPI overwrite with platform data */ + client->dev.platform_data = info->platform_data; + /* Change I2C client name to one in temporary platform data */ + strlcpy(client->name, info->type, sizeof(client->name)); + + if (device_reprobe(&client->dev)) + return NULL; + + if (!client->dev.driver) + return NULL; + + if (!try_module_get(client->dev.driver->owner)) + return NULL; + + sd = i2c_get_clientdata(client); + + module_put(client->dev.driver->owner); + + return sd; +} + +static int ext_device_setup_node(void* ipu_data, + struct ici_ext_subdev *sd, + const char* name) +{ + int rval; + struct ici_isys *isys = ipu_data; + sd->node.sd = sd; + sd->node.external = true; + + rval = ici_isys_pipeline_node_init( + isys, &sd->node, name, sd->num_pads, sd->pads); + if (rval) + return rval; + sd->num_pads = sd->node.nr_pads; + return 0; +} + +static int isys_complete_ext_device_registration( + struct ici_isys *isys, + struct ici_ext_subdev *sd, + struct ipu_isys_csi2_config *csi2) +{ + int rval; + struct ici_ext_subdev_register sd_register = {0}; + unsigned int i; + + sd_register.ipu_data = isys; + sd_register.sd = sd; + sd_register.setup_node = ext_device_setup_node; + sd_register.create_link = node_pad_create_link; + rval = sd->do_register(&sd_register); + if (rval) { + dev_err(&isys->adev->dev, + "Failed to regsiter external subdev\n"); + return rval; + } + if (csi2) { + for (i = 0; i < NR_OF_CSI2_VC; i++) { + rval = sd_register.create_link(&sd->node, sd->src_pad, + &isys->ici_csi2[csi2->port].asd[i].node, + CSI2_ICI_PAD_SINK, 0); + if (rval) { + dev_warn(&isys->adev->dev, + "can't create link from external node\n"); + } + isys->ici_csi2[csi2->port].nlanes = csi2->nlanes; + isys->ici_csi2[csi2->port].ext_sd = sd; + } + } + return 0; +} + +static int isys_register_ext_subdev(struct ici_isys *isys, + struct ipu_isys_subdev_info *sd_info, + bool acpi_only) +{ + struct i2c_adapter *adapter; + struct ici_ext_subdev *sd; + struct i2c_client *client; + int rval; + int bus; + +#ifdef I2C_WA + bus = ipu_get_i2c_bus_id(sd_info->i2c.i2c_adapter_id); + if (bus < 0) { + dev_err(&isys->adev->dev, "Failed to find adapter!"); + return -ENOENT; + } +#else + bus = sd_info->i2c.i2c_adapter_id; +#endif + + adapter = i2c_get_adapter(bus); + if (!adapter) { + dev_warn(&isys->adev->dev, "can't find adapter\n"); + return -ENOENT; + } + + dev_info(&isys->adev->dev, + "creating new i2c subdev for %s (address %2.2x, bus %d)", + sd_info->i2c.board_info.type, sd_info->i2c.board_info.addr, + bus); + + if (sd_info->csi2) { + dev_info(&isys->adev->dev, "sensor device on CSI port: %d\n", + sd_info->csi2->port); + if (sd_info->csi2->port >= IPU_ISYS_MAX_CSI2_PORTS || + !isys->ici_csi2[sd_info->csi2->port].isys) { + dev_warn(&isys->adev->dev, "invalid csi2 port %u\n", + sd_info->csi2->port); + rval = -EINVAL; + goto skip_put_adapter; + } + } else { + dev_info(&isys->adev->dev, "non camera subdevice\n"); + } + + client = isys_find_i2c_subdev(adapter, sd_info); + + if (acpi_only) { + if (!client) { + dev_dbg(&isys->adev->dev, + "Matching ACPI device not found - postpone\n"); + rval = 0; + goto skip_put_adapter; + } + rval = 0; + goto skip_put_adapter; + if (!sd_info->acpiname) { + dev_dbg(&isys->adev->dev, + "No name in platform data\n"); + rval = 0; + goto skip_put_adapter; + } + if (strcmp(dev_name(&client->dev), sd_info->acpiname)) { + dev_dbg(&isys->adev->dev, "Names don't match: %s != %s", + dev_name(&client->dev), sd_info->acpiname); + rval = 0; + goto skip_put_adapter; + } + /* Acpi match found. Continue to reprobe */ + } else if (client) { + dev_dbg(&isys->adev->dev, "Device exists\n"); + rval = 0; + goto skip_put_adapter; + } + else if (sd_info->acpiname) { + dev_dbg(&isys->adev->dev, "ACPI name don't match: %s\n", + sd_info->acpiname); + rval = 0; + goto skip_put_adapter; + } + if (!client) { + dev_info(&isys->adev->dev, + "i2c device not found in ACPI table\n"); + client = i2c_new_device(adapter, + &sd_info->i2c.board_info); + sd = i2c_get_clientdata(client); + } else { + dev_info(&isys->adev->dev, "i2c device found in ACPI table\n"); + sd = register_acpi_i2c_subdev(sd_info, client); + } + + if (!sd) { + dev_warn(&isys->adev->dev, "can't create new i2c subdev\n"); + rval = -EINVAL; + goto skip_put_adapter; + } + return isys_complete_ext_device_registration(isys, sd, sd_info->csi2); + +skip_put_adapter: + i2c_put_adapter(adapter); + + return rval; +} + +static int isys_acpi_add_device(struct device *dev, void *priv, + struct ipu_isys_csi2_config *csi2, + bool reprobe) +{ + struct ici_isys *isys = priv; + struct i2c_client *client = i2c_verify_client(dev); + struct ici_ext_subdev *sd; + + if (!client) + return -ENODEV; + + if (reprobe) + if (device_reprobe(&client->dev)) + return -ENODEV; + + if (!client->dev.driver) + return -ENODEV; + + /* Lock the module so we can safely get the v4l2_subdev pointer */ + if (!try_module_get(client->dev.driver->owner)) + return -ENODEV; + + sd = i2c_get_clientdata(client); + module_put(client->dev.driver->owner); + if (!sd) { + dev_warn(&isys->adev->dev, "can't create new i2c subdev\n"); + return -ENODEV; + } + + return isys_complete_ext_device_registration(isys, sd, csi2); +} + +static void isys_register_ext_subdevs(struct ici_isys *isys) +{ + struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; + struct ipu_isys_subdev_info **sd_info; + + if (spdata) { + /* Scan spdata first to possibly override ACPI data */ + /* ACPI created devices */ + for (sd_info = spdata->subdevs; *sd_info; sd_info++) + isys_register_ext_subdev(isys, *sd_info, true); + + /* Scan non-acpi devices */ + for (sd_info = spdata->subdevs; *sd_info; sd_info++) + isys_register_ext_subdev(isys, *sd_info, false); + } else { + dev_info(&isys->adev->dev, "no subdevice info provided\n"); + } + + /* Handle real ACPI stuff */ + request_module("ipu4-acpi"); + ipu_get_acpi_devices(isys, &isys->adev->dev, + isys_acpi_add_device); +} + +static void isys_unregister_subdevices(struct ici_isys *isys) +{ + const struct ipu_isys_internal_tpg_pdata *tpg = + &isys->pdata->ipdata->tpg; + const struct ipu_isys_internal_csi2_pdata *csi2 = + &isys->pdata->ipdata->csi2; + unsigned int i; + + for (i = 0; i < NR_OF_CSI2_BE_STREAMS; i++) { + ici_isys_csi2_be_cleanup(&isys->ici_csi2_be[i]); + } + + for (i = 0; i < tpg->ntpgs; i++) { + ici_isys_tpg_cleanup(&isys->ici_tpg[i]); + } + + for (i = 0; i < csi2->nports; i++) { + ici_isys_csi2_cleanup(&isys->ici_csi2[i]); + } +} + +static int isys_register_subdevices(struct ici_isys *isys) +{ + const struct ipu_isys_internal_tpg_pdata *tpg = + &isys->pdata->ipdata->tpg; + const struct ipu_isys_internal_csi2_pdata *csi2 = + &isys->pdata->ipdata->csi2; + + unsigned int i, j, k; + int rval; + + BUG_ON(csi2->nports > IPU_ISYS_MAX_CSI2_PORTS); + BUG_ON(tpg->ntpgs > 2); + + for (i = 0; i < csi2->nports; i++) { + rval = ici_isys_csi2_init( + &isys->ici_csi2[i], isys, + isys->pdata->base + csi2->offsets[i], i); + if (rval) + goto fail; + + isys->isr_csi2_bits |= + IPU_ISYS_UNISPART_IRQ_CSI2(i); + } + + for (i = 0; i < tpg->ntpgs; i++) { + rval = ici_isys_tpg_init(&isys->ici_tpg[i], isys, + isys->pdata->base + tpg->offsets[i], + isys->pdata->base + tpg->sels[i], i); + if(rval) + goto fail; + } + + for (i = 0; i < NR_OF_CSI2_BE_STREAMS; i++) { + rval = ici_isys_csi2_be_init(&isys->ici_csi2_be[i], + isys, i); + if (rval) { + goto fail; + } + } + + for (i = 0; i < csi2->nports; i++) { + for (j = 0; j < NR_OF_CSI2_VC; j++) { + rval = node_pad_create_link( + &isys->ici_csi2[i].asd[j].node, CSI2_ICI_PAD_SOURCE, + &isys->ici_csi2_be[ICI_BE_RAW].asd.node, + CSI2_BE_ICI_PAD_SINK, 0); + if (rval) { + dev_info(&isys->adev->dev, + "can't create link between csi2 and csi2_be\n"); + goto fail; + } + + for (k = 1; k < NR_OF_CSI2_BE_STREAMS; k++) { + rval = node_pad_create_link( + &isys->ici_csi2[i].asd[j].node, CSI2_ICI_PAD_SOURCE, + &isys->ici_csi2_be[k].asd.node, + CSI2_BE_ICI_PAD_SINK, 0); + if (rval) { + dev_info(&isys->adev->dev, + "can't create link between csi2 and csi2_be soc\n"); + goto fail; + } + } + } + } + + for (i = 0; i < tpg->ntpgs; i++) { + rval = node_pad_create_link( + &isys->ici_tpg[i].asd.node, TPG_PAD_SOURCE, + &isys->ici_csi2_be[ICI_BE_RAW].asd.node, + CSI2_BE_ICI_PAD_SINK, 0); + if (rval) { + dev_info(&isys->adev->dev, + "can't create link between tpg and csi2_be\n"); + goto fail; + } + + for (j = 1; j < NR_OF_CSI2_BE_STREAMS; j++) { + rval = node_pad_create_link( + &isys->ici_tpg[i].asd.node, TPG_PAD_SOURCE, + &isys->ici_csi2_be[j].asd.node, + CSI2_BE_ICI_PAD_SINK, 0); + if (rval) { + dev_info(&isys->adev->dev, + "can't create link between tpg and csi2_be soc\n"); + goto fail; + } + } + } + + return 0; + +fail: + isys_unregister_subdevices(isys); + return rval; +} + +static int isys_register_devices(struct ici_isys *isys) +{ + int rval; + +/* Pipeline device registration */ + DEBUGK("Pipeline device registering...\n"); + rval = pipeline_device_register(&isys->pipeline_dev, isys); + if (rval < 0) { + dev_info(&isys->pipeline_dev.dev, "can't register pipeline device\n"); + return rval; + } + dev_info(&isys->pipeline_dev.dev, "@%s\n", __func__); + rval = isys_register_subdevices(isys); + if (rval) + goto out_pipeline_device_unregister; + + isys_register_ext_subdevs(isys); + +// Latest code structure doesnt do these functions. +// let it remain to gaugae impact and then remove. +#if 0 + rval = isys_determine_legacy_csi_lane_configuration(isys); + if (rval) + goto out_isys_unregister_subdevices; + + rval = isys_determine_csi_combo_lane_configuration(isys); + if (rval) + goto out_isys_unregister_subdevices; + +#ifndef CONFIG_PM + ipu_buttress_csi_port_config(isys->adev->isp, + isys->legacy_port_cfg, + isys->combo_port_cfg); +#endif +#endif + return 0; + +#if 0 +out_isys_unregister_subdevices: + isys_unregister_subdevices(isys); +#endif +out_pipeline_device_unregister: + pipeline_device_unregister(&isys->pipeline_dev); + + return rval; +} + +static void isys_unregister_devices(struct ici_isys *isys) +{ + pipeline_device_unregister(&isys->pipeline_dev); + DEBUGK("Pipeline device unregistered\n"); + isys_unregister_subdevices(isys); +} + +static void isys_setup_hw(struct ici_isys *isys) +{ + void __iomem *base = isys->pdata->base; + u32 irqs; + unsigned int i; + + /* Enable irqs for all MIPI busses */ + irqs = IPU_ISYS_UNISPART_IRQ_CSI2(0) | + IPU_ISYS_UNISPART_IRQ_CSI2(1) | + IPU_ISYS_UNISPART_IRQ_CSI2(2) | + IPU_ISYS_UNISPART_IRQ_CSI2(3) | + IPU_ISYS_UNISPART_IRQ_CSI2(4) | + IPU_ISYS_UNISPART_IRQ_CSI2(5); + + irqs |= IPU_ISYS_UNISPART_IRQ_SW; + + writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_EDGE); + writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_LEVEL_NOT_PULSE); + writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_CLEAR); + writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_MASK); + writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_ENABLE); + + writel(0, base + IPU_REG_ISYS_UNISPART_SW_IRQ_REG); + writel(0, base + IPU_REG_ISYS_UNISPART_SW_IRQ_MUX_REG); + + /* Write CDC FIFO threshold values for isys */ + for (i = 0; i < isys->pdata->ipdata->hw_variant.cdc_fifos; i++) + writel(isys->pdata->ipdata->hw_variant.cdc_fifo_threshold[i], + base + IPU_REG_ISYS_CDC_THRESHOLD(i)); +} + +#ifdef CONFIG_PM +static int isys_runtime_pm_resume(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ipu_device *isp = adev->isp; + struct ici_isys *isys = ipu_bus_get_drvdata(adev); + unsigned long flags; + int ret; + + if (!isys) { + WARN(1, "%s called before probing. skipping.\n", __func__); + return 0; + } + + ipu_trace_restore(dev); + + pm_qos_update_request(&isys->pm_qos, ISYS_PM_QOS_VALUE); +#if 0 + ipu_buttress_csi_port_config(isp, + isys->legacy_port_cfg, + isys->combo_port_cfg); +#endif + ret = ipu_buttress_start_tsc_sync(isp); + if (ret) + return ret; + + spin_lock_irqsave(&isys->power_lock, flags); + isys->power = 1; + spin_unlock_irqrestore(&isys->power_lock, flags); + + isys_setup_hw(isys); + + return 0; +} + +static int isys_runtime_pm_suspend(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ici_isys *isys = ipu_bus_get_drvdata(adev); + unsigned long flags; + + if (!isys) { + WARN(1, "%s called before probing. skipping.\n", __func__); + return 0; + } + + spin_lock_irqsave(&isys->power_lock, flags); + isys->power = 0; + spin_unlock_irqrestore(&isys->power_lock, flags); + + ipu_trace_stop(dev); + mutex_lock(&isys->mutex); + isys->reset_needed = false; + mutex_unlock(&isys->mutex); + + pm_qos_update_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE); + + return 0; +} + +static int isys_suspend(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ici_isys *isys = ipu_bus_get_drvdata(adev); + + /* If stream is open, refuse to suspend */ + if (isys->stream_opened) + return -EBUSY; + + return 0; +} + +static int isys_resume(struct device *dev) +{ + return 0; +} + +static const struct dev_pm_ops isys_pm_ops = { + .runtime_suspend = isys_runtime_pm_suspend, + .runtime_resume = isys_runtime_pm_resume, + .suspend = isys_suspend, + .resume = isys_resume, +}; +#define ISYS_PM_OPS (&isys_pm_ops) +#else +#define ISYS_PM_OPS NULL +#endif + +static void isys_remove(struct ipu_bus_device *adev) +{ + struct ici_isys *isys = ipu_bus_get_drvdata(adev); + struct ipu_device *isp = adev->isp; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs attrs; +#else + unsigned long attrs; +#endif + + dev_info(&adev->dev, "removed\n"); + debugfs_remove_recursive(isys->debugfsdir); + + ipu_trace_uninit(&adev->dev); + isys_unregister_devices(isys); + pm_qos_remove_request(&isys->pm_qos); + + if (!isp->secure_mode) { + ipu_cpd_free_pkg_dir(adev, isys->pkg_dir, + isys->pkg_dir_dma_addr, + isys->pkg_dir_size); + ipu_buttress_unmap_fw_image(adev, &isys->fw_sgt); + release_firmware(isys->fw); + } + + mutex_destroy(&isys->stream_mutex); + mutex_destroy(&isys->mutex); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + init_dma_attrs(&attrs); + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); + dma_free_attrs(&adev->dev, + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE, + isys->short_packet_trace_buffer, + isys->short_packet_trace_buffer_dma_addr, &attrs); +#else + attrs = DMA_ATTR_NON_CONSISTENT; + dma_free_attrs(&adev->dev, + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE, + isys->short_packet_trace_buffer, + isys->short_packet_trace_buffer_dma_addr, attrs); +#endif +} + +static int intel_ipu4_isys_icache_prefetch_get(void *data, u64 *val) +{ + struct ici_isys *isys = data; + + *val = isys->icache_prefetch; + return 0; +} + +static int intel_ipu4_isys_icache_prefetch_set(void *data, u64 val) +{ + struct ici_isys *isys = data; + + if (val != !!val) + return -EINVAL; + + isys->icache_prefetch = val; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(isys_icache_prefetch_fops, + intel_ipu4_isys_icache_prefetch_get, + intel_ipu4_isys_icache_prefetch_set, + "%llu\n"); + +static int intel_ipu4_isys_init_debugfs(struct ici_isys *isys) +{ + struct dentry *file; + struct dentry *dir; + + dir = debugfs_create_dir("isys", isys->adev->isp->ipu_dir); + if (IS_ERR(dir)) + return -ENOMEM; + + file = debugfs_create_file("icache_prefetch", S_IRUSR | S_IWUSR, + dir, isys, + &isys_icache_prefetch_fops); + if (IS_ERR(file)) + goto err; + + isys->debugfsdir = dir; + + return 0; +err: + debugfs_remove_recursive(dir); + return -ENOMEM; +} + +static int alloc_fw_msg_buffers(struct ici_isys *isys, int amount) +{ + dma_addr_t dma_addr; + struct isys_fw_msgs *addr; + unsigned int i; + unsigned long flags; + + for (i = 0; i < amount; i++) { + addr = dma_alloc_attrs(&isys->adev->dev, + sizeof(struct isys_fw_msgs), + &dma_addr, GFP_KERNEL, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + NULL +#else + 0 +#endif + ); + if (!addr) + break; + addr->dma_addr = dma_addr; + + spin_lock_irqsave(&isys->listlock, flags); + list_add(&addr->head, &isys->framebuflist); + spin_unlock_irqrestore(&isys->listlock, flags); + } + if (i == amount) + return 0; + spin_lock_irqsave(&isys->listlock, flags); + while (!list_empty(&isys->framebuflist)) { + addr = list_first_entry(&isys->framebuflist, + struct isys_fw_msgs, head); + list_del(&addr->head); + spin_unlock_irqrestore(&isys->listlock, flags); + dma_free_attrs(&isys->adev->dev, + sizeof(struct isys_fw_msgs), + addr, addr->dma_addr, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + NULL +#else + 0 +#endif + ); + spin_lock_irqsave(&isys->listlock, flags); + } + spin_unlock_irqrestore(&isys->listlock, flags); + return -ENOMEM; +} + +static int isys_probe(struct ipu_bus_device *adev) +{ + struct ipu_mmu *mmu = dev_get_drvdata(adev->iommu); + struct ici_isys *isys; + struct ipu_device *isp = adev->isp; + const struct firmware *uninitialized_var(fw); + int rval = 0; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs attrs; +#else + unsigned long attrs; +#endif + + trace_printk("B|%d|TMWK\n", current->pid); + + /* Has the domain been attached? */ + if (!mmu || !isp->pkg_dir_dma_addr) { + trace_printk("E|TMWK\n"); + return -EPROBE_DEFER; + } + + isys = devm_kzalloc(&adev->dev, sizeof(*isys), GFP_KERNEL); + if (!isys) { + trace_printk("E|TMWK\n"); + return -ENOMEM; + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + init_dma_attrs(&attrs); + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); + isys->short_packet_trace_buffer = dma_alloc_attrs(&adev->dev, + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE, + &isys->short_packet_trace_buffer_dma_addr, GFP_KERNEL, &attrs); +#else + attrs = DMA_ATTR_NON_CONSISTENT; + isys->short_packet_trace_buffer = dma_alloc_attrs(&adev->dev, + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE, + &isys->short_packet_trace_buffer_dma_addr, GFP_KERNEL, attrs); +#endif + if (!isys->short_packet_trace_buffer) + return -ENOMEM; + + isys->adev = adev; + isys->pdata = adev->pdata; + + INIT_LIST_HEAD(&isys->requests); + + spin_lock_init(&isys->lock); + spin_lock_init(&isys->power_lock); + isys->power = 0; + + mutex_init(&isys->mutex); + mutex_init(&isys->stream_mutex); + mutex_init(&isys->lib_mutex); + + spin_lock_init(&isys->listlock); + INIT_LIST_HEAD(&isys->framebuflist); + INIT_LIST_HEAD(&isys->framebuflist_fw); + + dev_info(&adev->dev, "isys probe %p %p\n", adev, &adev->dev); + ipu_bus_set_drvdata(adev, isys); + + isys->line_align = IPU_ISYS_2600_MEM_LINE_ALIGN; + isys->icache_prefetch = is_ipu_hw_bxtp_e0(isp); + +#ifndef CONFIG_PM + isys_setup_hw(isys); +#endif + + if (!isp->secure_mode) { + fw = isp->cpd_fw; + + rval = ipu_buttress_map_fw_image( + adev, fw, &isys->fw_sgt); + if (rval) + goto release_firmware; + + isys->pkg_dir = ipu_cpd_create_pkg_dir( + adev, isp->cpd_fw->data, + sg_dma_address(isys->fw_sgt.sgl), + &isys->pkg_dir_dma_addr, + &isys->pkg_dir_size); + if (isys->pkg_dir == NULL) { + rval = -ENOMEM; + goto remove_shared_buffer; + } + } + + /* Debug fs failure is not fatal. */ + intel_ipu4_isys_init_debugfs(isys); + + ipu_trace_init(adev->isp, isys->pdata->base, &adev->dev, + isys_trace_blocks); + + pm_qos_add_request(&isys->pm_qos, PM_QOS_CPU_DMA_LATENCY, + PM_QOS_DEFAULT_VALUE); + + alloc_fw_msg_buffers(isys, 20); + + pm_runtime_allow(&adev->dev); + pm_runtime_enable(&adev->dev); + + rval = isys_register_devices(isys); + if (rval) + goto out_remove_pkg_dir_shared_buffer; + + trace_printk("E|TMWK\n"); + return 0; + +out_remove_pkg_dir_shared_buffer: + if (!isp->secure_mode) + ipu_cpd_free_pkg_dir(adev, isys->pkg_dir, + isys->pkg_dir_dma_addr, + isys->pkg_dir_size); +remove_shared_buffer: + if (!isp->secure_mode) + ipu_buttress_unmap_fw_image( + adev, &isys->fw_sgt); +release_firmware: + if (!isp->secure_mode) + release_firmware(isys->fw); + ipu_trace_uninit(&adev->dev); + + trace_printk("E|TMWK\n"); + + mutex_destroy(&isys->mutex); + mutex_destroy(&isys->stream_mutex); + mutex_destroy(&isys->lib_mutex); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + dma_free_attrs(&adev->dev, + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE, + isys->short_packet_trace_buffer, + isys->short_packet_trace_buffer_dma_addr, &attrs); +#else + dma_free_attrs(&adev->dev, + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE, + isys->short_packet_trace_buffer, + isys->short_packet_trace_buffer_dma_addr, attrs); +#endif + return rval; +} + +struct fwmsg { + int type; + char *msg; + bool valid_ts; +}; + +static const struct fwmsg fw_msg[] = { + { IA_CSS_ISYS_RESP_TYPE_STREAM_OPEN_DONE, "STREAM_OPEN_DONE", 0 }, + { IA_CSS_ISYS_RESP_TYPE_STREAM_CLOSE_ACK, "STREAM_CLOSE_ACK", 0 }, + { IA_CSS_ISYS_RESP_TYPE_STREAM_START_ACK, "STREAM_START_ACK", 0 }, + { IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK, + "STREAM_START_AND_CAPTURE_ACK", 0 }, + { IA_CSS_ISYS_RESP_TYPE_STREAM_STOP_ACK, "STREAM_STOP_ACK", 0 }, + { IA_CSS_ISYS_RESP_TYPE_STREAM_FLUSH_ACK, "STREAM_FLUSH_ACK", 0 }, + { IA_CSS_ISYS_RESP_TYPE_PIN_DATA_READY, "PIN_DATA_READY", 1 }, + { IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK, "STREAM_CAPTURE_ACK", 0 }, + { IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE, + "STREAM_START_AND_CAPTURE_DONE", 1 }, + { IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE, "STREAM_CAPTURE_DONE", 1 }, + { IA_CSS_ISYS_RESP_TYPE_FRAME_SOF, "FRAME_SOF", 1 }, + { IA_CSS_ISYS_RESP_TYPE_FRAME_EOF, "FRAME_EOF", 1 }, + { -1, "UNKNOWN MESSAGE", 0 }, +}; + +static int resp_type_to_index(int type) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(fw_msg); i++) + if (fw_msg[i].type == type) + return i; + + return i - 1; +} + + +static u64 extract_time_from_short_packet_msg( + struct ici_isys_csi2_monitor_message *msg) + +{ + u64 time_h = msg->timestamp_h << 14; + u64 time_l = msg->timestamp_l; + u64 time_h_ovl = time_h & 0xc000; + u64 time_h_h = time_h & (~0xffff); + + /* Fix possible roll overs. */ + if (time_h_ovl >= (time_l & 0xc000)) + return time_h_h | time_l; + else + return (time_h_h - 0x10000) | time_l; +} +static u64 tunit_time_to_us(struct ici_isys *isys, u64 time) +{ + struct ipu_bus_device *adev = + to_ipu_bus_device(isys->adev->iommu); + u64 isys_clk = IS_FREQ_SOURCE / adev->ctrl->divisor / 1000000; + return time / isys_clk; +} + +static u64 tsc_time_to_tunit_time(struct ici_isys *isys, + u64 tsc_base, u64 tunit_base, u64 tsc_time) +{ + struct ipu_bus_device *adev = + to_ipu_bus_device(isys->adev->iommu); + u64 isys_clk = IS_FREQ_SOURCE / adev->ctrl->divisor / 100000; + u64 tsc_clk = IPU_BUTTRESS_TSC_CLK / 100000; + + return (tsc_time - tsc_base) * isys_clk / tsc_clk + tunit_base; +} + +static int isys_isr_one_ici(struct ipu_bus_device *adev) +{ + struct ici_isys *isys = ipu_bus_get_drvdata(adev); + struct ia_css_isys_resp_info resp; + struct ici_isys_pipeline *pipe; + u64 ts; + int rval; + unsigned int i; + + if (!isys->fwcom) + return 0; + + rval = ipu_lib_call_notrace_unlocked(stream_handle_response, + isys, &resp); + if (rval < 0) + return rval; + + ts = (u64)resp.timestamp[1] << 32 | resp.timestamp[0]; + + + if (resp.error == IA_CSS_ISYS_ERROR_STREAM_IN_SUSPENSION) + /* Suspension is kind of special case: not enough buffers */ + dev_dbg(&adev->dev, + "hostlib: error resp %02d %s, stream %u, error SUSPENSION, details %d, timestamp 0x%16.16llx, pin %d\n", + resp.type, + fw_msg[resp_type_to_index(resp.type)].msg, + resp.stream_handle, + resp.error_details, + fw_msg[resp_type_to_index(resp.type)].valid_ts ? + ts : 0, resp.pin_id); + else if (resp.error) + dev_dbg(&adev->dev, + "hostlib: error resp %02d %s, stream %u, error %d, details %d, timestamp 0x%16.16llx, pin %d\n", + resp.type, + fw_msg[resp_type_to_index(resp.type)].msg, + resp.stream_handle, + resp.error, resp.error_details, + fw_msg[resp_type_to_index(resp.type)].valid_ts ? + ts : 0, resp.pin_id); + else + dev_dbg(&adev->dev, + "hostlib: resp %02d %s, stream %u, timestamp 0x%16.16llx, pin %d\n", + resp.type, + fw_msg[resp_type_to_index(resp.type)].msg, + resp.stream_handle, + fw_msg[resp_type_to_index(resp.type)].valid_ts ? + ts : 0, resp.pin_id); + + if (resp.stream_handle >= INTEL_IPU4_ISYS_MAX_STREAMS) { + dev_err(&adev->dev, "bad stream handle %u\n", + resp.stream_handle); + return 0; + } + + pipe = isys->ici_pipes[resp.stream_handle]; + if (!pipe) { + dev_err(&adev->dev, "no pipeline for stream %u\n", + resp.stream_handle); + return 0; + } + pipe->error = resp.error; + + switch (resp.type) { + case IA_CSS_ISYS_RESP_TYPE_STREAM_OPEN_DONE: + complete(&pipe->stream_open_completion); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_CLOSE_ACK: + complete(&pipe->stream_close_completion); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_START_ACK: + complete(&pipe->stream_start_completion); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK: + complete(&pipe->stream_start_completion); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_STOP_ACK: + complete(&pipe->stream_stop_completion); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_FLUSH_ACK: + complete(&pipe->stream_stop_completion); + break; + case IA_CSS_ISYS_RESP_TYPE_PIN_DATA_READY: + if (resp.pin_id < IPU_ISYS_OUTPUT_PINS && + pipe->output_pins[resp.pin_id].pin_ready) + pipe->output_pins[resp.pin_id].pin_ready(pipe, &resp); + else + dev_err(&adev->dev, + "%d:No data pin ready handler for pin id %d\n", + resp.stream_handle, resp.pin_id); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK: + complete(&pipe->capture_ack_completion); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE: + case IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE: + + if(pipe->interlaced && pipe->short_packet_source == + IPU_ISYS_SHORT_PACKET_FROM_TUNIT) { + unsigned int i = pipe->short_packet_trace_index; + bool msg_matched = false; + unsigned int monitor_id; + + if(pipe->csi2->index>= IPU_ISYS_MAX_CSI2_LEGACY_PORTS) + monitor_id = TRACE_REG_CSI2_3PH_TM_MONITOR_ID; + else + monitor_id = TRACE_REG_CSI2_TM_MONITOR_ID; + + dma_sync_single_for_cpu(&isys->adev->dev, + isys->short_packet_trace_buffer_dma_addr, + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE, + DMA_BIDIRECTIONAL); + + do { + struct ici_isys_csi2_monitor_message msg = isys->short_packet_trace_buffer[i]; + u64 sof_time = tsc_time_to_tunit_time(isys, + isys->tsc_timer_base, isys->tunit_timer_base, + (u64) resp.timestamp[1] << 32 | resp.timestamp[0]); + u64 trace_time = extract_time_from_short_packet_msg(&msg); + u64 delta_time_us = tunit_time_to_us(isys, + (sof_time > trace_time) ? + sof_time - trace_time : + trace_time - sof_time); + + i = (i + 1) % IPU_ISYS_SHORT_PACKET_TRACE_MSG_NUMBER; + if (msg.cmd == TRACE_REG_CMD_TYPE_D64MTS && + msg.monitor_id == monitor_id && + msg.fs == 1 && + msg.port == pipe->csi2->index && + msg.vc == pipe->vc && + delta_time_us < IPU_ISYS_SHORT_PACKET_TRACE_MAX_TIMESHIFT) { + pipe->cur_field = (msg.sequence % 2) ? + ICI_FIELD_TOP : ICI_FIELD_BOTTOM; + pipe->short_packet_trace_index = i; + msg_matched = true; + dev_dbg(&isys->adev->dev, "Interlaced field ready. field = %d\n", + pipe->cur_field); + break; + } + } while (i != pipe->short_packet_trace_index); + + if (!msg_matched) + /* We have walked through the whole buffer. */ + dev_dbg(&isys->adev->dev,"No matched trace message found.\n"); + } + + for (i = 0; i < INTEL_IPU4_NUM_CAPTURE_DONE; i++) + if (pipe->capture_done[i]) + pipe->capture_done[i](pipe, &resp); + break; + case IA_CSS_ISYS_RESP_TYPE_FRAME_SOF: + break; + case IA_CSS_ISYS_RESP_TYPE_FRAME_EOF: + break; + default: + dev_err(&adev->dev, "%d:unknown response type %u\n", + resp.stream_handle, resp.type); + break; + } + + return 0; +} + +static irqreturn_t isys_isr(struct ipu_bus_device *adev) +{ + struct ici_isys *isys = ipu_bus_get_drvdata(adev); + void __iomem *base = isys->pdata->base; + u32 status; + + spin_lock(&isys->power_lock); + if (!isys->power) { + spin_unlock(&isys->power_lock); + return IRQ_NONE; + } + + status = readl(isys->pdata->base + + IPU_REG_ISYS_UNISPART_IRQ_STATUS); + do { + writel(status, isys->pdata->base + + IPU_REG_ISYS_UNISPART_IRQ_CLEAR); + + if (isys->isr_csi2_bits & status) { + unsigned int i; + + for (i = 0; i < isys->pdata->ipdata->csi2.nports; i++) { + if (status & + IPU_ISYS_UNISPART_IRQ_CSI2(i)){ + + ici_isys_csi2_isr( + &isys->ici_csi2[i]); + } + } + } + + writel(0, base + IPU_REG_ISYS_UNISPART_SW_IRQ_REG); + + /* + * Handle a single FW event per checking the CSI-2 + * receiver SOF status. This is done in order to avoid + * the case where events arrive to the event queue and + * one of them is a SOF event which then could be + * handled before the SOF interrupt. This would pose + * issues in sequence numbering which is based on SOF + * interrupts, always assumed to arrive before FW SOF + * events. + */ + if (status & IPU_ISYS_UNISPART_IRQ_SW && + !isys_isr_one_ici(adev)) + status = IPU_ISYS_UNISPART_IRQ_SW; + else + status = 0; + + status |= readl(isys->pdata->base + + IPU_REG_ISYS_UNISPART_IRQ_STATUS); + } while (status & (isys->isr_csi2_bits + | IPU_ISYS_UNISPART_IRQ_SW)); + + spin_unlock(&isys->power_lock); + return IRQ_HANDLED; +} + +static void isys_isr_poll_ici(struct ipu_bus_device *adev) +{ + struct ici_isys *isys = ipu_bus_get_drvdata(adev); + + if (!isys->fwcom) { + dev_dbg(&isys->adev->dev, + "got interrupt but device not configured yet\n"); + return; + } + + while (!isys_isr_one_ici(adev)); +} + +int intel_ipu4_isys_isr_run_ici(void *ptr) +{ + struct ici_isys *isys = ptr; + + while (!kthread_should_stop()) { + usleep_range(500, 1000); + if (isys->ici_stream_opened) + isys_isr_poll_ici(isys->adev); + } + + return 0; +} + +static struct ipu_bus_driver isys_driver = { + .probe = isys_probe, + .remove = isys_remove, + .isr = isys_isr, + .wanted = IPU_ISYS_NAME, + .drv = { + .name = IPU_ISYS_NAME, + .owner = THIS_MODULE, + .pm = ISYS_PM_OPS, + }, +}; + +module_ipu_bus_driver(isys_driver); + +MODULE_AUTHOR("Scott Kennedy "); +MODULE_AUTHOR("Marcin Mozejko "); +MODULE_AUTHOR("Sakari Ailus "); +MODULE_AUTHOR("Samu Onkalo "); +MODULE_AUTHOR("Jouni Högander "); +MODULE_AUTHOR("Jouni Ukkonen "); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Intel intel_ipu4 ici input system driver"); + +#endif /* ICI_ENABLED */ + diff --git a/drivers/media/pci/intel/ici/ici-isys.h b/drivers/media/pci/intel/ici/ici-isys.h new file mode 100644 index 0000000000000..db46d7e6935a1 --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys.h @@ -0,0 +1,212 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef ICI_ISYS_H +#define ICI_ISYS_H + +#define ICI_ENABLED + +#ifdef ICI_ENABLED +#define IPU4_DEBUG + +#include +#include + +#include "ipu.h" +#include "ipu-pdata.h" +#include "ipu-fw-isys.h" +#include "ici-fw-isys.h" +#include "ici-isys-stream.h" +#include "ici-isys-csi2.h" +#include "ici-isys-csi2-be.h" +#include "ici-isys-pipeline-device.h" +#include "ici-isys-tpg.h" +#include "ipu-platform.h" +#include "ipu4/ipu-platform-isys.h" +#include "ipu4/ipu-platform-regs.h" + +#define IPU_ISYS_ENTITY_PREFIX "Intel IPU4" + +#define IPU_ISYS_2600_MEM_LINE_ALIGN 64 + +#define IPU_ISYS_MAX_CSI2_PORTS IPU_ISYS_MAX_CSI2_LEGACY_PORTS+IPU_ISYS_MAX_CSI2_COMBO_PORTS +/* for TPG */ +#define INTEL_IPU4_ISYS_FREQ_BXT_FPGA 25000000UL +#define INTEL_IPU4_ISYS_FREQ_BXT 533000000UL + +#define IPU_ISYS_SIZE_RECV_QUEUE 40 +#define IPU_ISYS_SIZE_SEND_QUEUE 40 +#define IPU_ISYS_NUM_RECV_QUEUE 1 + +/* + * Device close takes some time from last ack message to actual stopping + * of the SP processor. As long as the SP processor runs we can't proceed with + * clean up of resources. + */ +#define IPU_ISYS_OPEN_TIMEOUT_US 1000 +#define IPU_ISYS_OPEN_RETRY 1000 +#define IPU_ISYS_TURNOFF_DELAY_US 1000 +#define IPU_ISYS_TURNOFF_TIMEOUT 1000 +#define IPU_LIB_CALL_TIMEOUT_MS 2000 +#define IPU_LIB_CALL_TIMEOUT_JIFFIES \ + msecs_to_jiffies(IPU_LIB_CALL_TIMEOUT_MS) + +#define INTEL_IPU4_ISYS_CSI2_LONG_PACKET_HEADER_SIZE 32 +#define INTEL_IPU4_ISYS_CSI2_LONG_PACKET_FOOTER_SIZE 32 + +/* + * For B0/1: FW support max 8 streams + */ +#define INTEL_IPU4_ISYS_MAX_STREAMS 8 +#define NR_OF_CSI2_BE_STREAMS (NR_OF_CSI2_BE_SOC_STREAMS + 1) + +#define IPU_ISYS_MIN_WIDTH 1U +#define IPU_ISYS_MIN_HEIGHT 1U +#define IPU_ISYS_MAX_WIDTH 16384U +#define IPU_ISYS_MAX_HEIGHT 16384U + +struct task_struct; + +/* + * struct ici_isys + * + * @media_dev: Media device + * @v4l2_dev: V4L2 device + * @adev: ISYS ipu4 bus device + * @power: Is ISYS powered on or not? + * @isr_bits: Which bits does the ISR handle? + * @power_lock: Serialise access to power (power state in general) + * @lock: serialise access to pipes + * @pipes: pipelines per stream ID + * @fwcom: fwcom library private pointer + * @line_align: line alignment in memory + * @legacy_port_cfg: lane mappings for legacy CSI-2 ports + * @combo_port_cfg: lane mappings for D/C-PHY ports + * @isr_thread: for polling for events if interrupt delivery isn't available + * @reset_needed: Isys requires d0i0->i3 transition + * @video_opened: total number of opened file handles on video nodes + * @mutex: serialise access isys video open/release related operations + * @stream_mutex: serialise stream start and stop, queueing requests + * @pdata: platform data pointer + * @csi2: CSI-2 receivers + * @tpg: test pattern generators + * @csi2_be: CSI-2 back-ends + * @isa: Input system accelerator + * @fw: ISYS firmware binary (unsecure firmware) + * @fw_sgt: fw scatterlist + * @pkg_dir: host pointer to pkg_dir + * @pkg_dir_dma_addr: I/O virtual address for pkg_dir + * @pkg_dir_size: size of pkg_dir in bytes + */ +struct ici_isys { + struct ipu_bus_device *adev; + + int power; + spinlock_t power_lock; + u32 isr_csi2_bits; + spinlock_t lock; + struct ipu_isys_pipeline *pipes[IPU_ISYS_MAX_STREAMS]; + void *fwcom; + unsigned int line_align; + u32 legacy_port_cfg; + u32 combo_port_cfg; + struct task_struct *isr_thread; + bool reset_needed; + bool icache_prefetch; + unsigned int video_opened; + unsigned int stream_opened; + struct dentry *debugfsdir; + struct mutex mutex; + struct mutex stream_mutex; + struct mutex lib_mutex; + + struct ipu_isys_pdata *pdata; + + struct ici_isys_pipeline_device pipeline_dev; + + struct ici_isys_pipeline *ici_pipes[IPU_ISYS_MAX_STREAMS]; + struct ici_isys_csi2 ici_csi2[IPU_ISYS_MAX_CSI2_PORTS]; + struct ici_isys_tpg ici_tpg[2]; // TODO map to a macro + struct ici_isys_csi2_be ici_csi2_be[NR_OF_CSI2_BE_STREAMS]; + unsigned int ici_stream_opened; + + const struct firmware *fw; + struct sg_table fw_sgt; + + u64 *pkg_dir; + dma_addr_t pkg_dir_dma_addr; + unsigned pkg_dir_size; + + struct list_head requests; + struct pm_qos_request pm_qos; + struct ici_isys_csi2_monitor_message *short_packet_trace_buffer; + dma_addr_t short_packet_trace_buffer_dma_addr; + u64 tsc_timer_base; + u64 tunit_timer_base; + spinlock_t listlock; /* Protect framebuflist */ + struct list_head framebuflist; + struct list_head framebuflist_fw; +}; + +int intel_ipu4_isys_isr_run_ici(void *ptr); + +struct isys_fw_msgs { + union { + u64 dummy; + struct ipu_fw_isys_frame_buff_set_abi frame; + struct ipu_fw_isys_stream_cfg_data_abi stream; + } fw_msg; + struct list_head head; + dma_addr_t dma_addr; +}; + +#define ipu_lib_call_notrace_unlocked(func, isys, ...) \ + ({ \ + int rval; \ + \ + rval = -ia_css_isys_##func((isys)->fwcom, ##__VA_ARGS__);\ + \ + rval; \ + }) + +#define ipu_lib_call_notrace(func, isys, ...) \ + ({ \ + int rval; \ + \ + mutex_lock(&(isys)->lib_mutex); \ + \ + rval = ipu_lib_call_notrace_unlocked( \ + func, isys, ##__VA_ARGS__); \ + \ + mutex_unlock(&(isys)->lib_mutex); \ + \ + rval; \ + }) + +#define ipu_lib_call(func, isys, ...) \ + ({ \ + int rval; \ + dev_dbg(&(isys)->adev->dev, "hostlib: libcall %s\n", #func);\ + rval = ipu_lib_call_notrace(func, isys, ##__VA_ARGS__);\ + \ + rval; \ + }) + +#undef DEBUGK +#ifdef IPU4_DEBUG /* Macro for printing debug infos */ +# ifdef __KERNEL__ /* for kernel space */ +# define DEBUGK(fmt, args...) printk(KERN_DEBUG "IPU4: " fmt, ## args) +# else /* for user space */ +# define DEBUGK(fmt, args...) fprintf(stderr, fmt, ## args) +# endif +#else /* no debug prints */ +# define DEBUGK(fmt, args...) +#endif + +#else /* ICI_ENABLED */ +#pragma message "IPU ICI version is DISABLED." +#endif /* ICI_ENABLED */ + +#endif /* ICI_ISYS_H */ diff --git a/drivers/media/pci/intel/ici/libintel-ipu4_ici.c b/drivers/media/pci/intel/ici/libintel-ipu4_ici.c new file mode 100644 index 0000000000000..bb226d70f8537 --- /dev/null +++ b/drivers/media/pci/intel/ici/libintel-ipu4_ici.c @@ -0,0 +1,405 @@ +// SPDX-License_Identifier: GPL-2.0 +// Copyright (C) 2014 - 2018 Intel Corporation + +#include +#include +#include +#include "ici/ici-isys.h" +#include "ipu-wrapper.h" +#include + +#include "ipu-platform.h" + +#define ipu_lib_call_notrace_unlocked(func, isys, ...) \ + ({ \ + int rval; \ + \ + rval = -ia_css_isys_##func((isys)->fwcom, ##__VA_ARGS__); \ + \ + rval; \ + }) + +#define ipu_lib_call_notrace(func, isys, ...) \ + ({ \ + int rval; \ + \ + mutex_lock(&(isys)->lib_mutex); \ + \ + rval = ipu_lib_call_notrace_unlocked( \ + func, isys, ##__VA_ARGS__); \ + \ + mutex_unlock(&(isys)->lib_mutex); \ + \ + rval; \ + }) + +#define ipu_lib_call(func, isys, ...) \ + ({ \ + int rval; \ + dev_dbg(&(isys)->adev->dev, "hostlib: libcall %s\n", #func); \ + rval = ipu_lib_call_notrace(func, isys, ##__VA_ARGS__); \ + \ + rval; \ + }) + +static int wrapper_init_done; + +int ici_fw_isys_close(struct ici_isys *isys) +{ + struct device *dev = &isys->adev->dev; + int timeout = IPU_ISYS_TURNOFF_TIMEOUT; + int rval; + unsigned long flags; + + /* + * Ask library to stop the isys fw. Actual close takes + * some time as the FW must stop its actions including code fetch + * to SP icache. + */ + mutex_lock(&isys->lib_mutex); + spin_lock_irqsave(&isys->power_lock, flags); + rval = ipu_lib_call_notrace_unlocked(device_close, isys); + spin_unlock_irqrestore(&isys->power_lock, flags); + mutex_unlock(&isys->lib_mutex); + if (rval) + dev_err(dev, "Device close failure: %d\n", rval); + + /* release probably fails if the close failed. Let's try still */ + do { + usleep_range(IPU_ISYS_TURNOFF_DELAY_US, + 2 * IPU_ISYS_TURNOFF_DELAY_US); + rval = ipu_lib_call_notrace(device_release, isys, 0); + timeout--; + } while (rval != 0 && timeout); + + /* Spin lock to wait the interrupt handler to be finished */ + spin_lock_irqsave(&isys->power_lock, flags); + if (!rval) + isys->fwcom = NULL; /* No further actions needed */ + else + dev_err(dev, "Device release time out %d\n", rval); + spin_unlock_irqrestore(&isys->power_lock, flags); + return rval; +} +EXPORT_SYMBOL_GPL(ici_fw_isys_close); + +int ici_fw_isys_init(struct ici_isys *isys, + unsigned int num_streams) +{ + int retry = IPU_ISYS_OPEN_RETRY; + unsigned int i; + + struct ia_css_isys_device_cfg_data isys_cfg = { + .driver_sys = { + .ssid = ISYS_SSID, + .mmid = ISYS_MMID, + .num_send_queues = clamp_t( + unsigned int, num_streams, 1, + IPU_ISYS_NUM_STREAMS), + .num_recv_queues = IPU_ISYS_NUM_RECV_QUEUE, + .send_queue_size = IPU_ISYS_SIZE_SEND_QUEUE, + .recv_queue_size = IPU_ISYS_SIZE_RECV_QUEUE, + .icache_prefetch = isys->icache_prefetch, + }, + }; + struct device *dev = &isys->adev->dev; + int rval; + + if (!wrapper_init_done) { + wrapper_init_done = true; + ipu_wrapper_init(ISYS_MMID, &isys->adev->dev, + isys->pdata->base); + } + + /* + * SRAM partitioning. Initially equal partitioning is set + * TODO: Fine tune the partitining based on the stream pixel load + */ + for (i = 0; i < min(IPU_NOF_SRAM_BLOCKS_MAX, NOF_SRAM_BLOCKS_MAX); i++) { + if (i < isys_cfg.driver_sys.num_send_queues) + isys_cfg.buffer_partition.num_gda_pages[i] = + (IPU_DEVICE_GDA_NR_PAGES * + IPU_DEVICE_GDA_VIRT_FACTOR) / + isys_cfg.driver_sys.num_send_queues; + else + isys_cfg.buffer_partition.num_gda_pages[i] = 0; + } + + rval = -ia_css_isys_device_open(&isys->fwcom, &isys_cfg); + if (rval < 0) { + dev_err(dev, "isys device open failed %d\n", rval); + return rval; + } + + do { + usleep_range(IPU_ISYS_OPEN_TIMEOUT_US, + IPU_ISYS_OPEN_TIMEOUT_US + 10); + rval = ipu_lib_call(device_open_ready, isys); + if (!rval) + break; + retry--; + } while (retry > 0); + + if (!retry && rval) { + dev_err(dev, "isys device open ready failed %d\n", rval); + ici_fw_isys_close(isys); + } + + return rval; +} +EXPORT_SYMBOL_GPL(ici_fw_isys_init); + +void ici_fw_isys_cleanup(struct ici_isys *isys) +{ + ipu_lib_call(device_release, isys, 1); + isys->fwcom = NULL; +} +EXPORT_SYMBOL_GPL(ici_fw_isys_cleanup); + +struct ipu_fw_isys_resp_info_abi *ipu_fw_isys_get_resp( + void *context, unsigned int queue, + struct ipu_fw_isys_resp_info_abi *response) +{ + struct ia_css_isys_resp_info apiresp; + int rval; + + rval = -ia_css_isys_stream_handle_response(context, &apiresp); + if (rval < 0) + return NULL; + + response->buf_id = 0; + response->type = apiresp.type; + response->timestamp[0] = apiresp.timestamp[0]; + response->timestamp[1] = apiresp.timestamp[1]; + response->stream_handle = apiresp.stream_handle; + response->error_info.error = apiresp.error; + response->error_info.error_details = apiresp.error_details; + response->pin.out_buf_id = apiresp.pin.out_buf_id; + response->pin.addr = apiresp.pin.addr; + response->pin_id = apiresp.pin_id; + response->process_group_light.param_buf_id = + apiresp.process_group_light.param_buf_id; + response->process_group_light.addr = + apiresp.process_group_light.addr; + response->acc_id = apiresp.acc_id; +#ifdef IPU_OTF_SUPPORT + response->frame_counter = apiresp.frame_counter; + response->written_direct = apiresp.written_direct; +#endif + + return response; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_get_resp); + +void ipu_fw_isys_put_resp(void *context, unsigned int queue) +{ + /* Nothing to do here really */ +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_put_resp); + +int ici_fw_isys_simple_cmd(struct ici_isys *isys, + const unsigned int stream_handle, + enum ipu_fw_isys_send_type send_type) +{ + int rval = -1; + + switch (send_type) { + case IPU_FW_ISYS_SEND_TYPE_STREAM_START: + rval = ipu_lib_call(stream_start, isys, stream_handle, + NULL); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_FLUSH: + rval = ipu_lib_call(stream_flush, isys, stream_handle); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_STOP: + rval = ipu_lib_call(stream_stop, isys, stream_handle); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_CLOSE: + rval = ipu_lib_call(stream_close, isys, stream_handle); + break; + default: + WARN_ON(1); + } + + return rval; +} +EXPORT_SYMBOL_GPL(ici_fw_isys_simple_cmd); + +static void resolution_abi_to_api(const struct ipu_fw_isys_resolution_abi *abi, + struct ia_css_isys_resolution *api) +{ + api->width = abi->width; + api->height = abi->height; +} + +static void output_pin_payload_abi_to_api( + struct ipu_fw_isys_output_pin_payload_abi *abi, + struct ia_css_isys_output_pin_payload *api) +{ + api->out_buf_id = abi->out_buf_id; + api->addr = abi->addr; +} + +static void output_pin_info_abi_to_api( + struct ipu_fw_isys_output_pin_info_abi *abi, + struct ia_css_isys_output_pin_info *api) +{ + api->input_pin_id = abi->input_pin_id; + resolution_abi_to_api(&abi->output_res, &api->output_res); + api->stride = abi->stride; + api->pt = abi->pt; + api->watermark_in_lines = abi->watermark_in_lines; + api->payload_buf_size = abi->payload_buf_size; + api->send_irq = abi->send_irq; + api->ft = abi->ft; +#ifdef IPU_OTF_SUPPORT + api->link_id = abi->link_id; +#endif + api->reserve_compression = abi->reserve_compression; +} + +static void param_pin_abi_to_api(struct ipu_fw_isys_param_pin_abi *abi, + struct ia_css_isys_param_pin *api) +{ + api->param_buf_id = abi->param_buf_id; + api->addr = abi->addr; +} + +static void input_pin_info_abi_to_api( + struct ipu_fw_isys_input_pin_info_abi *abi, + struct ia_css_isys_input_pin_info *api) +{ + resolution_abi_to_api(&abi->input_res, &api->input_res); + api->dt = abi->dt; + api->mipi_store_mode = abi->mipi_store_mode; + api->mapped_dt = abi->mapped_dt; +} + +static void isa_cfg_abi_to_api(const struct ipu_fw_isys_isa_cfg_abi *abi, + struct ia_css_isys_isa_cfg *api) +{ + unsigned int i; + + + for (i = 0; i < N_IA_CSS_ISYS_RESOLUTION_INFO; i++) + resolution_abi_to_api(&abi->isa_res[i], &api->isa_res[i]); + + api->blc_enabled = abi->cfg.blc; + api->lsc_enabled = abi->cfg.lsc; + api->dpc_enabled = abi->cfg.dpc; + api->downscaler_enabled = abi->cfg.downscaler; + api->awb_enabled = abi->cfg.awb; + api->af_enabled = abi->cfg.af; + api->ae_enabled = abi->cfg.ae; + api->paf_type = abi->cfg.paf; + api->send_irq_stats_ready = abi->cfg.send_irq_stats_ready; + api->send_resp_stats_ready = abi->cfg.send_irq_stats_ready; +} + +static void cropping_abi_to_api(struct ipu_fw_isys_cropping_abi *abi, + struct ia_css_isys_cropping *api) +{ + api->top_offset = abi->top_offset; + api->left_offset = abi->left_offset; + api->bottom_offset = abi->bottom_offset; + api->right_offset = abi->right_offset; +} + +static void stream_cfg_abi_to_api(struct ipu_fw_isys_stream_cfg_data_abi *abi, + struct ia_css_isys_stream_cfg_data *api) +{ + unsigned int i; + + api->src = abi->src; + api->vc = abi->vc; + api->isl_use = abi->isl_use; + api->compfmt = abi->compfmt; + isa_cfg_abi_to_api(&abi->isa_cfg, &api->isa_cfg); + for (i = 0; i < N_IA_CSS_ISYS_CROPPING_LOCATION; i++) + cropping_abi_to_api(&abi->crop[i], &api->crop[i]); + + api->send_irq_sof_discarded = abi->send_irq_sof_discarded; + api->send_irq_eof_discarded = abi->send_irq_eof_discarded; + api->send_resp_sof_discarded = abi->send_irq_sof_discarded; + api->send_resp_eof_discarded = abi->send_irq_eof_discarded; + api->nof_input_pins = abi->nof_input_pins; + api->nof_output_pins = abi->nof_output_pins; + for (i = 0; i < abi->nof_input_pins; i++) + input_pin_info_abi_to_api(&abi->input_pins[i], + &api->input_pins[i]); + + for (i = 0; i < abi->nof_output_pins; i++) + output_pin_info_abi_to_api(&abi->output_pins[i], + &api->output_pins[i]); +} + +static void frame_buff_set_abi_to_api( + struct ipu_fw_isys_frame_buff_set_abi *abi, + struct ia_css_isys_frame_buff_set *api) +{ + int i; + + for (i = 0; i < min(IPU_MAX_OPINS, MAX_OPINS); i++) + output_pin_payload_abi_to_api(&abi->output_pins[i], + &api->output_pins[i]); + + param_pin_abi_to_api(&abi->process_group_light, + &api->process_group_light); + + api->send_irq_sof = abi->send_irq_sof; + api->send_irq_eof = abi->send_irq_eof; +} + +int ici_fw_isys_complex_cmd(struct ici_isys *isys, + const unsigned int stream_handle, + void *cpu_mapped_buf, + dma_addr_t dma_mapped_buf, + size_t size, + enum ipu_fw_isys_send_type send_type) +{ + union { + struct ia_css_isys_stream_cfg_data stream_cfg; + struct ia_css_isys_frame_buff_set buf; + } param; + int rval = -1; + + memset(¶m, 0, sizeof(param)); + + switch (send_type) { + case IPU_FW_ISYS_SEND_TYPE_STREAM_CAPTURE: + frame_buff_set_abi_to_api(cpu_mapped_buf, ¶m.buf); + rval = ipu_lib_call(stream_capture_indication, + isys, stream_handle, ¶m.buf); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_OPEN: + stream_cfg_abi_to_api(cpu_mapped_buf, ¶m.stream_cfg); + rval = ipu_lib_call(stream_open, isys, stream_handle, + ¶m.stream_cfg); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE: + frame_buff_set_abi_to_api(cpu_mapped_buf, ¶m.buf); + rval = ipu_lib_call(stream_start, isys, stream_handle, + ¶m.buf); + break; + default: + WARN_ON(1); + } + + return rval; +} +EXPORT_SYMBOL_GPL(ici_fw_isys_complex_cmd); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel ipu library"); +EXPORT_SYMBOL_GPL(ia_css_isys_device_open); +EXPORT_SYMBOL_GPL(ia_css_isys_device_open_ready); +EXPORT_SYMBOL_GPL(ia_css_isys_device_close); +EXPORT_SYMBOL_GPL(ia_css_isys_device_release); +EXPORT_SYMBOL_GPL(ia_css_isys_stream_open); +EXPORT_SYMBOL_GPL(ia_css_isys_stream_close); +EXPORT_SYMBOL_GPL(ia_css_isys_stream_start); +EXPORT_SYMBOL_GPL(ia_css_isys_stream_stop); +EXPORT_SYMBOL_GPL(ia_css_isys_stream_flush); +EXPORT_SYMBOL_GPL(ia_css_isys_stream_capture_indication); +EXPORT_SYMBOL_GPL(ia_css_isys_stream_handle_response); + diff --git a/drivers/media/pci/intel/ipu-bus.c b/drivers/media/pci/intel/ipu-bus.c new file mode 100644 index 0000000000000..30e50337d3b32 --- /dev/null +++ b/drivers/media/pci/intel/ipu-bus.c @@ -0,0 +1,471 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipu.h" +#include "ipu-platform.h" +#include "ipu-dma.h" +#include "ipu-mmu.h" + +#ifdef CONFIG_PM +static struct bus_type ipu_bus; + +static int bus_pm_suspend_child_dev(struct device *dev, void *p) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct device *parent = (struct device *)p; + + if (!ipu_bus_get_drvdata(adev)) + return 0; /* Device not attached to any driver yet */ + + if (dev->parent != parent || adev->ctrl) + return 0; + + return pm_generic_runtime_suspend(dev); +} + +static int bus_pm_runtime_suspend(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + int rval; + + if (!adev->ctrl) { + dev_dbg(dev, "has no buttress control info, bailing out\n"); + return 0; + } + + rval = bus_for_each_dev(&ipu_bus, NULL, dev, bus_pm_suspend_child_dev); + if (rval) { + dev_err(dev, "failed to suspend child device\n"); + return rval; + } + + rval = pm_generic_runtime_suspend(dev); + if (rval) + return rval; + + rval = ipu_buttress_power(dev, adev->ctrl, false); + dev_dbg(dev, "%s: buttress power down %d\n", __func__, rval); + if (!rval) + return 0; + + dev_err(dev, "power down failed!\n"); + + /* Powering down failed, attempt to resume device now */ + rval = pm_generic_runtime_resume(dev); + if (!rval) + return -EBUSY; + + return -EIO; +} + +static int bus_pm_resume_child_dev(struct device *dev, void *p) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct device *parent = (struct device *)p; + int r; + + if (!ipu_bus_get_drvdata(adev)) + return 0; /* Device not attached to any driver yet */ + + if (dev->parent != parent || adev->ctrl) + return 0; + + mutex_lock(&adev->resume_lock); + r = pm_generic_runtime_resume(dev); + mutex_unlock(&adev->resume_lock); + return r; +} + +static int bus_pm_runtime_resume(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + int rval; + + if (!adev->ctrl) { + dev_dbg(dev, "has no buttress control info, bailing out\n"); + return 0; + } + + rval = ipu_buttress_power(dev, adev->ctrl, true); + dev_dbg(dev, "%s: buttress power up %d\n", __func__, rval); + if (rval) + return rval; + + rval = pm_generic_runtime_resume(dev); + dev_dbg(dev, "%s: resume %d\n", __func__, rval); + if (rval) + goto out_err; + + /* + * It needs to be ensured that IPU child devices' resume/suspend are + * called only when the child devices' power is turned on/off by the + * parent device here. Therefore, children's suspend/resume are called + * from here, because that is the only way to guarantee it. + */ + rval = bus_for_each_dev(&ipu_bus, NULL, dev, bus_pm_resume_child_dev); + if (rval) { + dev_err(dev, "failed to resume child device - reset it\n"); + + rval = pm_generic_runtime_suspend(dev); + dev_dbg(dev, "%s: suspend %d\n", __func__, rval); + + rval = ipu_buttress_power(dev, adev->ctrl, false); + dev_dbg(dev, "%s: buttress power down %d\n", __func__, rval); + if (rval) + return rval; + + usleep_range(1000, 1100); + + rval = ipu_buttress_power(dev, adev->ctrl, true); + dev_dbg(dev, "%s: buttress power up %d\n", __func__, rval); + if (rval) + return rval; + + rval = pm_generic_runtime_resume(dev); + dev_dbg(dev, "%s: re-resume %d\n", __func__, rval); + if (rval) + goto out_err; + + rval = bus_for_each_dev(&ipu_bus, NULL, dev, + bus_pm_resume_child_dev); + + if (rval) { + dev_err(dev, "resume retry failed\n"); + goto out_err; + } + } + + return 0; + +out_err: + if (adev->ctrl) + ipu_buttress_power(dev, adev->ctrl, false); + + return -EBUSY; +} + +static const struct dev_pm_ops ipu_bus_pm_ops = { + .runtime_suspend = bus_pm_runtime_suspend, + .runtime_resume = bus_pm_runtime_resume, +}; + +#define IPU_BUS_PM_OPS (&ipu_bus_pm_ops) +#else +#define IPU_BUS_PM_OPS NULL +#endif + +static int ipu_bus_match(struct device *dev, struct device_driver *drv) +{ + struct ipu_bus_driver *adrv = to_ipu_bus_driver(drv); + + dev_dbg(dev, "bus match: \"%s\" --- \"%s\"\n", dev_name(dev), + adrv->wanted); + + return !strncmp(dev_name(dev), adrv->wanted, strlen(adrv->wanted)); +} + +static struct ipu_dma_mapping *alloc_dma_mapping(struct device *dev) +{ + struct ipu_dma_mapping *dmap; + + dmap = kzalloc(sizeof(*dmap), GFP_KERNEL); + if (!dmap) + return NULL; + + dmap->domain = iommu_domain_alloc(dev->bus); + if (!dmap->domain) { + kfree(dmap); + return NULL; + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) + init_iova_domain(&dmap->iovad, dma_get_mask(dev) >> PAGE_SHIFT); +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + init_iova_domain(&dmap->iovad, SZ_4K, 1, + dma_get_mask(dev) >> PAGE_SHIFT); +#else + init_iova_domain(&dmap->iovad, SZ_4K, 1); +#endif + + kref_init(&dmap->ref); + + pr_debug("alloc mapping\n"); + + iova_cache_get(); + + return dmap; +} + +static void free_dma_mapping(void *ptr) +{ + struct ipu_mmu *mmu = ptr; + struct ipu_dma_mapping *dmap = mmu->dmap; + + iommu_domain_free(dmap->domain); + mmu->set_mapping(mmu, NULL); + iova_cache_put(); + put_iova_domain(&dmap->iovad); + kfree(dmap); +} + +static struct iommu_group *ipu_bus_get_group(struct device *dev) +{ + struct device *aiommu = to_ipu_bus_device(dev)->iommu; + struct ipu_mmu *mmu = dev_get_drvdata(aiommu); + struct iommu_group *group; + struct ipu_dma_mapping *dmap; + + if (!mmu) { + dev_err(dev, "%s: no iommu available\n", __func__); + return NULL; + } + + group = iommu_group_get(dev); + if (group) + return group; + + group = iommu_group_alloc(); + if (!group) { + dev_err(dev, "%s: can't alloc iommu group\n", __func__); + return NULL; + } + + dmap = alloc_dma_mapping(dev); + if (!dmap) { + dev_err(dev, "%s: can't alloc dma mapping\n", __func__); + iommu_group_put(group); + return NULL; + } + + iommu_group_set_iommudata(group, mmu, free_dma_mapping); + + /* + * Turn mmu on and off synchronously. Otherwise it may still be on + * at psys / isys probing phase and that may cause problems on + * develoment environments. + */ + pm_runtime_get_sync(aiommu); + mmu->set_mapping(mmu, dmap); + pm_runtime_put_sync(aiommu); + + return group; +} + +static int ipu_bus_probe(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ipu_bus_driver *adrv = to_ipu_bus_driver(dev->driver); + struct iommu_group *group = NULL; + int rval; + + dev_dbg(dev, "bus probe dev %s\n", dev_name(dev)); + + if (adev->iommu) { + dev_dbg(dev, "iommu %s\n", dev_name(adev->iommu)); + + group = ipu_bus_get_group(dev); + if (!group) + return -EPROBE_DEFER; + + rval = iommu_group_add_device(group, dev); + if (rval) + goto out_err; + } + + adev->adrv = adrv; + if (adrv->probe) { + rval = adrv->probe(adev); + if (!rval) { + /* + * If the device power, after probe, is enabled + * (from the parent device), its resume needs to + * be called to initialize the device properly. + */ + if (!adev->ctrl && + !pm_runtime_status_suspended(dev->parent)) { + mutex_lock(&adev->resume_lock); + pm_generic_runtime_resume(dev); + mutex_unlock(&adev->resume_lock); + } + } + } else { + rval = -ENODEV; + } + + if (rval) + goto out_err; + + return 0; + +out_err: + ipu_bus_set_drvdata(adev, NULL); + adev->adrv = NULL; + iommu_group_remove_device(dev); + iommu_group_put(group); + return rval; +} + +static int ipu_bus_remove(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ipu_bus_driver *adrv = to_ipu_bus_driver(dev->driver); + + if (adrv->remove) + adrv->remove(adev); + + if (adev->iommu) + iommu_group_remove_device(dev); + + return 0; +} + +static struct bus_type ipu_bus = { + .name = IPU_BUS_NAME, + .match = ipu_bus_match, + .probe = ipu_bus_probe, + .remove = ipu_bus_remove, + .pm = IPU_BUS_PM_OPS, +}; + +static struct mutex ipu_bus_mutex; + +static void ipu_bus_release(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + + kfree(adev); +} + +struct ipu_bus_device *ipu_bus_add_device(struct pci_dev *pdev, + struct device *parent, void *pdata, + struct device *iommu, + struct ipu_buttress_ctrl *ctrl, + char *name, unsigned int nr) +{ + struct ipu_bus_device *adev; + struct ipu_device *isp = pci_get_drvdata(pdev); + int rval; + + adev = kzalloc(sizeof(*adev), GFP_KERNEL); + if (!adev) + return ERR_PTR(-ENOMEM); + + adev->dev.parent = parent; + adev->dev.bus = &ipu_bus; + adev->dev.release = ipu_bus_release; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 16) + adev->dev.dma_ops = &ipu_dma_ops; +#else + adev->dev.archdata.dma_ops = &ipu_dma_ops; +#endif + adev->dma_mask = DMA_BIT_MASK(isp->secure_mode ? + IPU_MMU_ADDRESS_BITS : + IPU_MMU_ADDRESS_BITS_NON_SECURE); + adev->dev.dma_mask = &adev->dma_mask; + adev->dev.coherent_dma_mask = adev->dma_mask; + adev->iommu = iommu; + adev->ctrl = ctrl; + adev->pdata = pdata; + adev->isp = isp; + mutex_init(&adev->resume_lock); + dev_set_name(&adev->dev, "%s%d", name, nr); + + rval = device_register(&adev->dev); + if (rval) { + put_device(&adev->dev); + return ERR_PTR(rval); + } + + mutex_lock(&ipu_bus_mutex); + list_add(&adev->list, &isp->devices); + mutex_unlock(&ipu_bus_mutex); + + return adev; +} + +void ipu_bus_del_devices(struct pci_dev *pdev) +{ + struct ipu_device *isp = pci_get_drvdata(pdev); + struct ipu_bus_device *adev, *save; + + mutex_lock(&ipu_bus_mutex); + + list_for_each_entry_safe(adev, save, &isp->devices, list) { + list_del(&adev->list); + device_unregister(&adev->dev); + } + + mutex_unlock(&ipu_bus_mutex); +} + +int ipu_bus_register_driver(struct ipu_bus_driver *adrv) +{ + adrv->drv.bus = &ipu_bus; + return driver_register(&adrv->drv); +} +EXPORT_SYMBOL(ipu_bus_register_driver); + +int ipu_bus_unregister_driver(struct ipu_bus_driver *adrv) +{ + driver_unregister(&adrv->drv); + return 0; +} +EXPORT_SYMBOL(ipu_bus_unregister_driver); + +int ipu_bus_register(void) +{ + mutex_init(&ipu_bus_mutex); + return bus_register(&ipu_bus); +} +EXPORT_SYMBOL(ipu_bus_register); + +void ipu_bus_unregister(void) +{ + mutex_destroy(&ipu_bus_mutex); + return bus_unregister(&ipu_bus); +} +EXPORT_SYMBOL(ipu_bus_unregister); + +int ipu_bus_set_iommu(struct iommu_ops *ops) +{ + if (iommu_present(&ipu_bus)) + return 0; + + return bus_set_iommu(&ipu_bus, ops); +} +EXPORT_SYMBOL(ipu_bus_set_iommu); + +static int flr_rpm_recovery(struct device *dev, void *p) +{ + dev_dbg(dev, "FLR recovery call\n"); + /* + * We are not necessarily going through device from child to + * parent. runtime PM refuses to change state for parent if the child + * is still active. At FLR (full reset for whole IPU) that doesn't + * matter. Everything has been power gated by HW during the FLR cycle + * and we are just cleaning up SW state. Thus, ignore child during + * set_suspended. + */ + pm_suspend_ignore_children(dev, true); + pm_runtime_set_suspended(dev); + pm_suspend_ignore_children(dev, false); + + return 0; +} + +int ipu_bus_flr_recovery(void) +{ + bus_for_each_dev(&ipu_bus, NULL, NULL, flr_rpm_recovery); + return 0; +} +EXPORT_SYMBOL(ipu_bus_flr_recovery); diff --git a/drivers/media/pci/intel/ipu-bus.h b/drivers/media/pci/intel/ipu-bus.h new file mode 100644 index 0000000000000..4226b865fe994 --- /dev/null +++ b/drivers/media/pci/intel/ipu-bus.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_BUS_H +#define IPU_BUS_H + +#include +#include +#include +#include +#include + +#define IPU_BUS_NAME IPU_NAME "-bus" + +struct ipu_buttress_ctrl; +struct ipu_subsystem_trace_config; + +struct ipu_bus_device { + struct device dev; + struct list_head list; + void *pdata; + struct ipu_bus_driver *adrv; + struct device *iommu; + struct ipu_device *isp; + struct ipu_subsystem_trace_config *trace_cfg; + struct ipu_buttress_ctrl *ctrl; + u64 dma_mask; + /* Protect runtime_resume calls on the dev */ + struct mutex resume_lock; +}; + +#define to_ipu_bus_device(_dev) container_of(_dev, struct ipu_bus_device, dev) + +struct ipu_bus_driver { + struct device_driver drv; + char wanted[20]; + int (*probe)(struct ipu_bus_device *adev); + void (*remove)(struct ipu_bus_device *adev); + irqreturn_t (*isr)(struct ipu_bus_device *adev); + irqreturn_t (*isr_threaded)(struct ipu_bus_device *adev); + bool wake_isr_thread; +}; + +#define to_ipu_bus_driver(_drv) container_of(_drv, struct ipu_bus_driver, drv) + +struct ipu_bus_device *ipu_bus_add_device(struct pci_dev *pdev, + struct device *parent, void *pdata, + struct device *iommu, + struct ipu_buttress_ctrl *ctrl, + char *name, unsigned int nr); +void ipu_bus_del_devices(struct pci_dev *pdev); + +int ipu_bus_register_driver(struct ipu_bus_driver *adrv); +int ipu_bus_unregister_driver(struct ipu_bus_driver *adrv); + +int ipu_bus_register(void); +void ipu_bus_unregister(void); + +int ipu_bus_set_iommu(struct iommu_ops *ops); + +#define module_ipu_bus_driver(drv) \ + module_driver(drv, ipu_bus_register_driver, \ + ipu_bus_unregister_driver) + +#define ipu_bus_set_drvdata(adev, data) dev_set_drvdata(&(adev)->dev, data) +#define ipu_bus_get_drvdata(adev) dev_get_drvdata(&(adev)->dev) + +int ipu_bus_flr_recovery(void); + +#endif diff --git a/drivers/media/pci/intel/ipu-buttress.c b/drivers/media/pci/intel/ipu-buttress.c new file mode 100644 index 0000000000000..d19e965543c46 --- /dev/null +++ b/drivers/media/pci/intel/ipu-buttress.c @@ -0,0 +1,1834 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "ipu.h" +#include "ipu-bus.h" +#include "ipu-buttress.h" +#include "ipu-platform-buttress-regs.h" +#include "ipu-cpd.h" +#define CREATE_TRACE_POINTS +#define IPU_PERF_REG_TRACE +#include "ipu-trace-event.h" + +#define BOOTLOADER_STATUS_OFFSET 0x8000 +#define BOOTLOADER_MAGIC_KEY 0xb00710ad + +#define ENTRY BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1 +#define EXIT BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2 +#define QUERY BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE + +#define BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX 10 + +#define BUTTRESS_CSE_BOOTLOAD_TIMEOUT 5000 +#define BUTTRESS_CSE_AUTHENTICATE_TIMEOUT 10000 +#define BUTTRESS_CSE_FWRESET_TIMEOUT 100 + +#define BUTTRESS_IPC_TX_TIMEOUT 1000 +#define BUTTRESS_IPC_RX_TIMEOUT 1000 +#define BUTTRESS_IPC_VALIDITY_TIMEOUT 1000 + +#define IPU_BUTTRESS_TSC_LIMIT 500 /* 26 us @ 19.2 MHz */ +#define IPU_BUTTRESS_TSC_RETRY 10 + +#define BUTTRESS_CSE_IPC_RESET_RETRY 4 + +#define BUTTRESS_IPC_CMD_SEND_RETRY 1 + +static const struct ipu_buttress_sensor_clk_freq sensor_clk_freqs[] = { + {6750000, BUTTRESS_SENSOR_CLK_FREQ_6P75MHZ}, + {8000000, BUTTRESS_SENSOR_CLK_FREQ_8MHZ}, + {9600000, BUTTRESS_SENSOR_CLK_FREQ_9P6MHZ}, + {12000000, BUTTRESS_SENSOR_CLK_FREQ_12MHZ}, + {13600000, BUTTRESS_SENSOR_CLK_FREQ_13P6MHZ}, + {14400000, BUTTRESS_SENSOR_CLK_FREQ_14P4MHZ}, + {15800000, BUTTRESS_SENSOR_CLK_FREQ_15P8MHZ}, + {16200000, BUTTRESS_SENSOR_CLK_FREQ_16P2MHZ}, + {17300000, BUTTRESS_SENSOR_CLK_FREQ_17P3MHZ}, + {18600000, BUTTRESS_SENSOR_CLK_FREQ_18P6MHZ}, + {19200000, BUTTRESS_SENSOR_CLK_FREQ_19P2MHZ}, + {24000000, BUTTRESS_SENSOR_CLK_FREQ_24MHZ}, + {26000000, BUTTRESS_SENSOR_CLK_FREQ_26MHZ}, + {27000000, BUTTRESS_SENSOR_CLK_FREQ_27MHZ} +}; + +static const u32 ipu_adev_irq_mask[] = { + BUTTRESS_ISR_IS_IRQ, BUTTRESS_ISR_PS_IRQ +}; + +int ipu_buttress_ipc_reset(struct ipu_device *isp, struct ipu_buttress_ipc *ipc) +{ + struct ipu_buttress *b = &isp->buttress; + unsigned long tout_jfs; + unsigned int tout = 500; + u32 val = 0, csr_in_clr; + + mutex_lock(&b->ipc_mutex); + + /* Clear-by-1 CSR (all bits), corresponding internal states. */ + val = readl(isp->base + ipc->csr_in); + writel(val, isp->base + ipc->csr_in); + + /* Set peer CSR bit IPC_PEER_COMP_ACTIONS_RST_PHASE1 */ + writel(ENTRY, isp->base + ipc->csr_out); + + /* + * Clear-by-1 all CSR bits EXCEPT following + * bits: + * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1. + * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2. + * C. Possibly custom bits, depending on + * their role. + */ + csr_in_clr = BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ | + BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID | + BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ | QUERY; + + /* + * How long we should wait here? + */ + tout_jfs = jiffies + msecs_to_jiffies(tout); + do { + val = readl(isp->base + ipc->csr_in); + dev_dbg(&isp->pdev->dev, "%s: csr_in = %x\n", __func__, val); + if (val & ENTRY) { + if (val & EXIT) { + dev_dbg(&isp->pdev->dev, + "%s:%s & %s\n", + __func__, + "IPC_PEER_COMP_ACTIONS_RST_PHASE1", + "IPC_PEER_COMP_ACTIONS_RST_PHASE2"); + /* + * 1) Clear-by-1 CSR bits + * (IPC_PEER_COMP_ACTIONS_RST_PHASE1, + * IPC_PEER_COMP_ACTIONS_RST_PHASE2). + * 2) Set peer CSR bit + * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE. + */ + writel(ENTRY | EXIT, + isp->base + ipc->csr_in); + + writel(QUERY, isp->base + ipc->csr_out); + + tout_jfs = jiffies + msecs_to_jiffies(tout); + continue; + } else { + dev_dbg(&isp->pdev->dev, + "%s:IPC_PEER_COMP_ACTIONS_RST_PHASE1\n", + __func__); + /* + * 1) Clear-by-1 CSR bits + * (IPC_PEER_COMP_ACTIONS_RST_PHASE1, + * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE). + * 2) Set peer CSR bit + * IPC_PEER_COMP_ACTIONS_RST_PHASE1. + */ + writel(ENTRY | QUERY, + isp->base + ipc->csr_in); + + writel(ENTRY, isp->base + ipc->csr_out); + + tout_jfs = jiffies + msecs_to_jiffies(tout); + continue; + } + } else if (val & EXIT) { + dev_dbg(&isp->pdev->dev, + "%s: IPC_PEER_COMP_ACTIONS_RST_PHASE2\n", + __func__); + /* + * Clear-by-1 CSR bit + * IPC_PEER_COMP_ACTIONS_RST_PHASE2. + * 1) Clear incoming doorbell. + * 2) Clear-by-1 all CSR bits EXCEPT following + * bits: + * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1. + * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2. + * C. Possibly custom bits, depending on + * their role. + * 3) Set peer CSR bit + * IPC_PEER_COMP_ACTIONS_RST_PHASE2. + */ + writel(EXIT, isp->base + ipc->csr_in); + + writel(0 << BUTTRESS_IU2CSEDB0_BUSY_SHIFT, + isp->base + ipc->db0_in); + + writel(csr_in_clr, isp->base + ipc->csr_in); + + writel(EXIT, isp->base + ipc->csr_out); + + /* + * Read csr_in again to make sure if RST_PHASE2 is done. + * If csr_in is QUERY, it should be handled again. + */ + usleep_range(100, 500); + val = readl(isp->base + ipc->csr_in); + if (val & QUERY) { + dev_dbg(&isp->pdev->dev, + "%s: RST_PHASE2 retry csr_in = %x\n", + __func__, val); + continue; + } + + mutex_unlock(&b->ipc_mutex); + + return 0; + } else if (val & QUERY) { + dev_dbg(&isp->pdev->dev, + "%s: %s\n", + __func__, + "IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE"); + /* + * 1) Clear-by-1 CSR bit + * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE. + * 2) Set peer CSR bit + * IPC_PEER_COMP_ACTIONS_RST_PHASE1 + */ + writel(QUERY, isp->base + ipc->csr_in); + + writel(ENTRY, isp->base + ipc->csr_out); + + tout_jfs = jiffies + msecs_to_jiffies(tout); + } + usleep_range(100, 500); + } while (!time_after(jiffies, tout_jfs)); + + mutex_unlock(&b->ipc_mutex); + + dev_err(&isp->pdev->dev, "Timed out while waiting for CSE!\n"); + + return -ETIMEDOUT; +} + +static void +ipu_buttress_ipc_validity_close(struct ipu_device *isp, + struct ipu_buttress_ipc *ipc) +{ + /* Set bit 5 in CSE CSR */ + writel(BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ, + isp->base + ipc->csr_out); +} + +static int +ipu_buttress_ipc_validity_open(struct ipu_device *isp, + struct ipu_buttress_ipc *ipc) +{ + unsigned long tout_jfs; + unsigned int tout = BUTTRESS_IPC_VALIDITY_TIMEOUT; + u32 val; + + /* Set bit 3 in CSE CSR */ + writel(BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ, + isp->base + ipc->csr_out); + + /* + * How long we should wait here? + */ + tout_jfs = jiffies + msecs_to_jiffies(tout); + do { + val = readl(isp->base + ipc->csr_in); + dev_dbg(&isp->pdev->dev, "%s: CSE/ISH2IUCSR = %x\n", + __func__, val); + + if (val & BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID) { + dev_dbg(&isp->pdev->dev, + "%s: Validity ack received from peer\n", + __func__); + return 0; + } + usleep_range(100, 1000); + } while (!time_after(jiffies, tout_jfs)); + + dev_err(&isp->pdev->dev, "Timed out while waiting for CSE!\n"); + + ipu_buttress_ipc_validity_close(isp, ipc); + + return -ETIMEDOUT; +} + +static void ipu_buttress_ipc_recv(struct ipu_device *isp, + struct ipu_buttress_ipc *ipc, u32 *ipc_msg) +{ + if (ipc_msg) + *ipc_msg = readl(isp->base + ipc->data0_in); + writel(0, isp->base + ipc->db0_in); +} + +int +ipu_buttress_ipc_send_bulk(struct ipu_device *isp, + enum ipu_buttress_ipc_domain ipc_domain, + struct ipu_ipc_buttress_bulk_msg *msgs, u32 size) +{ + struct ipu_buttress *b = &isp->buttress; + struct ipu_buttress_ipc *ipc; + unsigned long tx_timeout_jiffies, rx_timeout_jiffies; + u32 val; + int ret; + int tout; + unsigned int i, retry = BUTTRESS_IPC_CMD_SEND_RETRY; + + ipc = ipc_domain == IPU_BUTTRESS_IPC_CSE ? &b->cse : &b->ish; + + mutex_lock(&b->ipc_mutex); + + ret = ipu_buttress_ipc_validity_open(isp, ipc); + if (ret) { + dev_err(&isp->pdev->dev, "IPC validity open failed\n"); + goto out; + } + + tx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_TX_TIMEOUT); + rx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_RX_TIMEOUT); + + for (i = 0; i < size; i++) { + reinit_completion(&ipc->send_complete); + if (msgs[i].require_resp) + reinit_completion(&ipc->recv_complete); + + dev_dbg(&isp->pdev->dev, "bulk IPC command: 0x%x\n", + msgs[i].cmd); + writel(msgs[i].cmd, isp->base + ipc->data0_out); + + val = 1 << BUTTRESS_IU2CSEDB0_BUSY_SHIFT | msgs[i].cmd_size; + + writel(val, isp->base + ipc->db0_out); + + tout = wait_for_completion_timeout(&ipc->send_complete, + tx_timeout_jiffies); + if (!tout) { + dev_err(&isp->pdev->dev, "send IPC response timeout\n"); + if (!retry--) { + ret = -ETIMEDOUT; + goto out; + } + + /* + * WORKAROUND: Sometimes CSE is not + * responding on first try, try again. + */ + writel(0, isp->base + ipc->db0_out); + i--; + continue; + } + + retry = BUTTRESS_IPC_CMD_SEND_RETRY; + + if (!msgs[i].require_resp) + continue; + + tout = wait_for_completion_timeout(&ipc->recv_complete, + rx_timeout_jiffies); + if (!tout) { + dev_err(&isp->pdev->dev, "recv IPC response timeout\n"); + ret = -ETIMEDOUT; + goto out; + } + + if (ipc->nack_mask && + (ipc->recv_data & ipc->nack_mask) == ipc->nack) { + dev_err(&isp->pdev->dev, + "IPC NACK for cmd 0x%x\n", msgs[i].cmd); + ret = -ENODEV; + goto out; + } + + if (ipc->recv_data != msgs[i].expected_resp) { + dev_err(&isp->pdev->dev, + "expected resp: 0x%x, IPC response: 0x%x ", + msgs[i].expected_resp, ipc->recv_data); + ret = -EIO; + goto out; + } + } + + dev_dbg(&isp->pdev->dev, "bulk IPC commands completed\n"); + +out: + ipu_buttress_ipc_validity_close(isp, ipc); + mutex_unlock(&b->ipc_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(ipu_buttress_ipc_send_bulk); + +static int +ipu_buttress_ipc_send(struct ipu_device *isp, + enum ipu_buttress_ipc_domain ipc_domain, + u32 ipc_msg, u32 size) +{ + struct ipu_ipc_buttress_bulk_msg msg = { + .cmd = ipc_msg, + .cmd_size = size, + }; + + return ipu_buttress_ipc_send_bulk(isp, ipc_domain, &msg, 1); +} + +static irqreturn_t ipu_buttress_call_isr(struct ipu_bus_device *adev) +{ + irqreturn_t ret = IRQ_WAKE_THREAD; + + if (!adev || !adev->adrv) + return IRQ_NONE; + + if (adev->adrv->isr) + ret = adev->adrv->isr(adev); + + if (ret == IRQ_WAKE_THREAD && !adev->adrv->isr_threaded) + ret = IRQ_NONE; + + adev->adrv->wake_isr_thread = (ret == IRQ_WAKE_THREAD); + + return ret; +} + +irqreturn_t ipu_buttress_isr(int irq, void *isp_ptr) +{ + struct ipu_device *isp = isp_ptr; + struct ipu_bus_device *adev[] = { isp->isys, isp->psys }; + struct ipu_buttress *b = &isp->buttress; + irqreturn_t ret = IRQ_NONE; + u32 disable_irqs = 0; + u32 irq_status; +#ifdef CONFIG_VIDEO_INTEL_IPU4 + u32 reg_irq_sts = BUTTRESS_REG_ISR_ENABLED_STATUS; +#else + u32 reg_irq_sts = BUTTRESS_REG_ISR_STATUS; +#endif + unsigned int i; + + dev_dbg(&isp->pdev->dev, "isr: Buttress interrupt handler\n"); + + pm_runtime_get(&isp->pdev->dev); + + if (!pm_runtime_active(&isp->pdev->dev)) { + irq_status = readl(isp->base + reg_irq_sts); + writel(irq_status, isp->base + BUTTRESS_REG_ISR_CLEAR); + pm_runtime_put(&isp->pdev->dev); + return IRQ_HANDLED; + } + + trace_ipu_perf_reg(BUTTRESS_REG_IS_FREQ_CTL, + readl(isp->base + BUTTRESS_REG_IS_FREQ_CTL)); + trace_ipu_perf_reg(BUTTRESS_REG_PS_FREQ_CTL, + readl(isp->base + BUTTRESS_REG_PS_FREQ_CTL)); + + irq_status = readl(isp->base + reg_irq_sts); + if (!irq_status) { + pm_runtime_put(&isp->pdev->dev); + return IRQ_NONE; + } + + do { + writel(irq_status, isp->base + BUTTRESS_REG_ISR_CLEAR); + + for (i = 0; i < ARRAY_SIZE(ipu_adev_irq_mask); i++) { + if (irq_status & ipu_adev_irq_mask[i]) { + irqreturn_t r = ipu_buttress_call_isr(adev[i]); + + if (r == IRQ_WAKE_THREAD) { + ret = IRQ_WAKE_THREAD; + disable_irqs |= ipu_adev_irq_mask[i]; + } else if (ret == IRQ_NONE && r == IRQ_HANDLED) { + ret = IRQ_HANDLED; + } + } + } + + if (irq_status & (BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING | + BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING | + BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE | + BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH | + BUTTRESS_ISR_SAI_VIOLATION) && + ret == IRQ_NONE) + ret = IRQ_HANDLED; + + if (irq_status & BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING) { + dev_dbg(&isp->pdev->dev, + "BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING\n"); + ipu_buttress_ipc_recv(isp, &b->cse, &b->cse.recv_data); + complete(&b->cse.recv_complete); + } + + if (irq_status & BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING) { + dev_dbg(&isp->pdev->dev, + "BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING\n"); + ipu_buttress_ipc_recv(isp, &b->ish, &b->ish.recv_data); + complete(&b->ish.recv_complete); + } + + if (irq_status & BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE) { + dev_dbg(&isp->pdev->dev, + "BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE\n"); + complete(&b->cse.send_complete); + } + + if (irq_status & BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH) { + dev_dbg(&isp->pdev->dev, + "BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE\n"); + complete(&b->ish.send_complete); + } + + if (irq_status & BUTTRESS_ISR_SAI_VIOLATION) { + dev_err(&isp->pdev->dev, + "BUTTRESS_ISR_SAI_VIOLATION\n"); + WARN_ON(1); + } + + irq_status = readl(isp->base + reg_irq_sts); + } while (irq_status && !isp->flr_done); + + if (disable_irqs) + writel(BUTTRESS_IRQS & ~disable_irqs, + isp->base + BUTTRESS_REG_ISR_ENABLE); + + pm_runtime_put(&isp->pdev->dev); + + return ret; +} + +irqreturn_t ipu_buttress_isr_threaded(int irq, void *isp_ptr) +{ + struct ipu_device *isp = isp_ptr; + struct ipu_bus_device *adev[] = { isp->isys, isp->psys }; + irqreturn_t ret = IRQ_NONE; + unsigned int i; + + dev_dbg(&isp->pdev->dev, "isr: Buttress threaded interrupt handler\n"); + + for (i = 0; i < ARRAY_SIZE(ipu_adev_irq_mask); i++) { + if (adev[i] && adev[i]->adrv && + adev[i]->adrv->wake_isr_thread && + adev[i]->adrv->isr_threaded(adev[i]) == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + + writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE); + + return ret; +} + +int ipu_buttress_power(struct device *dev, + struct ipu_buttress_ctrl *ctrl, bool on) +{ + struct ipu_device *isp = to_ipu_bus_device(dev)->isp; + unsigned long tout_jfs; + u32 pwr_sts, val; + int ret = 0; + + if (!ctrl) + return 0; + + /* Until FLR completion nothing is expected to work */ + if (isp->flr_done) + return 0; + + mutex_lock(&isp->buttress.power_mutex); + + if (!on) { + val = 0; + pwr_sts = ctrl->pwr_sts_off << ctrl->pwr_sts_shift; + } else { + val = 1 << BUTTRESS_FREQ_CTL_START_SHIFT + | ctrl->divisor << ctrl->divisor_shift + | ctrl->qos_floor << BUTTRESS_FREQ_CTL_QOS_FLOOR_SHIFT; + + pwr_sts = ctrl->pwr_sts_on << ctrl->pwr_sts_shift; + } + + val |= ctrl->ovrd << ctrl->ovrd_shift; + writel(val, isp->base + ctrl->freq_ctl); + + tout_jfs = jiffies + msecs_to_jiffies(BUTTRESS_POWER_TIMEOUT); + do { + usleep_range(10, 40); + val = readl(isp->base + BUTTRESS_REG_PWR_STATE); + if ((val & ctrl->pwr_sts_mask) == pwr_sts) { + dev_dbg(&isp->pdev->dev, + "Rail state successfully changed\n"); + goto out; + } + } while (!time_after(jiffies, tout_jfs)); + + dev_err(&isp->pdev->dev, + "Timeout when trying to change state of the rail 0x%x\n", val); + + ret = -ETIMEDOUT; + +out: + ctrl->started = !ret && on; + + trace_ipu_perf_reg(BUTTRESS_REG_IS_FREQ_CTL, + readl(isp->base + BUTTRESS_REG_IS_FREQ_CTL)); + trace_ipu_perf_reg(BUTTRESS_REG_PS_FREQ_CTL, + readl(isp->base + BUTTRESS_REG_PS_FREQ_CTL)); + + mutex_unlock(&isp->buttress.power_mutex); + + return ret; +} + +static bool secure_mode_enable = 1; +module_param(secure_mode_enable, bool, 0660); +MODULE_PARM_DESC(secure_mode, "IPU secure mode enable"); + +void ipu_buttress_set_secure_mode(struct ipu_device *isp) +{ + u8 retry = 100; + u32 val, read; + + /* + * HACK to disable possible secure mode. This can be + * reverted when CSE is disabling the secure mode + */ + read = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); + + if (secure_mode_enable) + val = read |= 1 << BUTTRESS_SECURITY_CTL_FW_SECURE_MODE_SHIFT; + else + val = read & ~(1 << BUTTRESS_SECURITY_CTL_FW_SECURE_MODE_SHIFT); + + if (val == read) + return; + + writel(val, isp->base + BUTTRESS_REG_SECURITY_CTL); + + /* In B0, for some registers in buttress, because of a hw bug, write + * might not succeed at first attempt. Write twice until the + * write is successful + */ + writel(val, isp->base + BUTTRESS_REG_SECURITY_CTL); + + while (retry--) { + read = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); + if (read == val) + break; + + writel(val, isp->base + BUTTRESS_REG_SECURITY_CTL); + + if (retry == 0) + dev_err(&isp->pdev->dev, + "update security control register failed\n"); + } +} +EXPORT_SYMBOL_GPL(ipu_buttress_set_secure_mode); + +bool ipu_buttress_get_secure_mode(struct ipu_device *isp) +{ + u32 val; + + val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); + + return val & (1 << BUTTRESS_SECURITY_CTL_FW_SECURE_MODE_SHIFT); +} + +bool ipu_buttress_auth_done(struct ipu_device *isp) +{ + u32 val; + + if (!isp->secure_mode) + return 1; + + val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); + + return (val & BUTTRESS_SECURITY_CTL_FW_SETUP_MASK) == + BUTTRESS_SECURITY_CTL_AUTH_DONE; +} +EXPORT_SYMBOL(ipu_buttress_auth_done); + +static void ipu_buttress_set_psys_ratio(struct ipu_device *isp, + unsigned int psys_divisor, + unsigned int psys_qos_floor) +{ + struct ipu_buttress_ctrl *ctrl = isp->psys_iommu->ctrl; + + mutex_lock(&isp->buttress.power_mutex); + + if (ctrl->divisor == psys_divisor && ctrl->qos_floor == psys_qos_floor) + goto out_mutex_unlock; + + ctrl->divisor = psys_divisor; + ctrl->qos_floor = psys_qos_floor; + + if (ctrl->started) { + /* + * According to documentation driver initiates DVFS + * transition by writing wanted ratio, floor ratio and start + * bit. No need to stop PS first + */ + writel(1 << BUTTRESS_FREQ_CTL_START_SHIFT | + ctrl-> + qos_floor << BUTTRESS_FREQ_CTL_QOS_FLOOR_SHIFT | + psys_divisor, isp->base + BUTTRESS_REG_PS_FREQ_CTL); + } + +out_mutex_unlock: + mutex_unlock(&isp->buttress.power_mutex); +} + +static void ipu_buttress_set_psys_freq(struct ipu_device *isp, + unsigned int freq) +{ + unsigned int psys_ratio = freq / BUTTRESS_PS_FREQ_STEP; + + if (isp->buttress.psys_force_ratio) + return; + + ipu_buttress_set_psys_ratio(isp, psys_ratio, psys_ratio); +} + +void +ipu_buttress_add_psys_constraint(struct ipu_device *isp, + struct ipu_buttress_constraint *constraint) +{ + struct ipu_buttress *b = &isp->buttress; + + mutex_lock(&b->cons_mutex); + list_add(&constraint->list, &b->constraints); + + if (constraint->min_freq > b->psys_min_freq) { + isp->buttress.psys_min_freq = min(constraint->min_freq, + b->psys_fused_freqs.max_freq); + ipu_buttress_set_psys_freq(isp, b->psys_min_freq); + } + mutex_unlock(&isp->buttress.cons_mutex); +} +EXPORT_SYMBOL_GPL(ipu_buttress_add_psys_constraint); + +void +ipu_buttress_remove_psys_constraint(struct ipu_device *isp, + struct ipu_buttress_constraint *constraint) +{ + struct ipu_buttress *b = &isp->buttress; + struct ipu_buttress_constraint *c; + unsigned int min_freq = 0; + + mutex_lock(&b->cons_mutex); + list_del(&constraint->list); + + if (constraint->min_freq >= b->psys_min_freq) { + list_for_each_entry(c, &b->constraints, list) + if (c->min_freq > min_freq) + min_freq = c->min_freq; + + b->psys_min_freq = clamp(min_freq, + b->psys_fused_freqs.efficient_freq, + b->psys_fused_freqs.max_freq); + ipu_buttress_set_psys_freq(isp, b->psys_min_freq); + } + mutex_unlock(&b->cons_mutex); +} +EXPORT_SYMBOL_GPL(ipu_buttress_remove_psys_constraint); + +int ipu_buttress_reset_authentication(struct ipu_device *isp) +{ + unsigned long tout_jfs; + u32 val; + + if (!isp->secure_mode) { + dev_dbg(&isp->pdev->dev, + "Non-secure mode -> skip authentication\n"); + return 0; + } + + writel(1 << BUTTRESS_FW_RESET_CTL_START_SHIFT, isp->base + + BUTTRESS_REG_FW_RESET_CTL); + + tout_jfs = jiffies + msecs_to_jiffies(BUTTRESS_CSE_FWRESET_TIMEOUT); + do { + val = readl(isp->base + BUTTRESS_REG_FW_RESET_CTL); + if (val & 1 << BUTTRESS_FW_RESET_CTL_DONE_SHIFT) { + dev_info(&isp->pdev->dev, + "FW reset for authentication done!\n"); + writel(0, isp->base + BUTTRESS_REG_FW_RESET_CTL); + /* + * Leave some time for HW restore. + */ + usleep_range(100, 1000); + return 0; + } + usleep_range(100, 1000); + } while (!time_after(jiffies, tout_jfs)); + + dev_err(&isp->pdev->dev, + "Timed out while resetting authentication state!\n"); + + return -ETIMEDOUT; +} + +int ipu_buttress_map_fw_image(struct ipu_bus_device *sys, + const struct firmware *fw, struct sg_table *sgt) +{ + struct page **pages; + const void *addr; + unsigned long n_pages, i; + int rval; + + n_pages = PAGE_ALIGN(fw->size) >> PAGE_SHIFT; + + pages = kmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); + if (!pages) + return -ENOMEM; + + addr = fw->data; + for (i = 0; i < n_pages; i++) { + struct page *p = vmalloc_to_page(addr); + + if (!p) { + rval = -ENODEV; + goto out; + } + pages[i] = p; + addr += PAGE_SIZE; + } + + rval = sg_alloc_table_from_pages(sgt, pages, n_pages, 0, fw->size, + GFP_KERNEL); + if (rval) { + rval = -ENOMEM; + goto out; + } + + n_pages = dma_map_sg(&sys->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE); + if (n_pages != sgt->nents) { + rval = -ENOMEM; + sg_free_table(sgt); + goto out; + } + + dma_sync_sg_for_device(&sys->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE); + +out: + kfree(pages); + + return rval; +} +EXPORT_SYMBOL_GPL(ipu_buttress_map_fw_image); + +int ipu_buttress_unmap_fw_image(struct ipu_bus_device *sys, + struct sg_table *sgt) +{ + dma_unmap_sg(&sys->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE); + sg_free_table(sgt); + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_buttress_unmap_fw_image); + +int ipu_buttress_authenticate(struct ipu_device *isp) +{ + struct ipu_psys_pdata *psys_pdata; + struct ipu_buttress *b = &isp->buttress; + u32 data; + int rval; + unsigned long tout_jfs; + + if (!isp->secure_mode) { + dev_dbg(&isp->pdev->dev, + "Non-secure mode -> skip authentication\n"); + return 0; + } + + psys_pdata = isp->psys->pdata; + + mutex_lock(&b->auth_mutex); + + rval = pm_runtime_get_sync(&isp->psys_iommu->dev); + if (rval < 0) { + dev_err(&isp->pdev->dev, "Runtime PM failed (%d)\n", rval); + goto iunit_power_off; + } + + if (ipu_buttress_auth_done(isp)) { + rval = 0; + goto iunit_power_off; + } + + /* + * Write address of FIT table to FW_SOURCE register + * Let's use fw address. I.e. not using FIT table yet + */ + data = lower_32_bits(isp->pkg_dir_dma_addr); + writel(data, isp->base + BUTTRESS_REG_FW_SOURCE_BASE_LO); + + data = upper_32_bits(isp->pkg_dir_dma_addr); + writel(data, isp->base + BUTTRESS_REG_FW_SOURCE_BASE_HI); + + /* + * Write boot_load into IU2CSEDATA0 + * Write sizeof(boot_load) | 0x2 << CLIENT_ID to + * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as + */ + dev_info(&isp->pdev->dev, "Sending BOOT_LOAD to CSE\n"); + rval = ipu_buttress_ipc_send(isp, IPU_BUTTRESS_IPC_CSE, + BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD, 1); + if (rval) { + dev_err(&isp->pdev->dev, "CSE boot_load failed\n"); + goto iunit_power_off; + } + + tout_jfs = jiffies + msecs_to_jiffies(BUTTRESS_CSE_BOOTLOAD_TIMEOUT); + do { + data = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); + data &= BUTTRESS_SECURITY_CTL_FW_SETUP_MASK; + if (data == BUTTRESS_SECURITY_CTL_FW_SETUP_DONE) { + dev_dbg(&isp->pdev->dev, "CSE boot_load done\n"); + break; + } else if (data == BUTTRESS_SECURITY_CTL_AUTH_FAILED) { + dev_err(&isp->pdev->dev, "CSE boot_load failed\n"); + rval = -EINVAL; + goto iunit_power_off; + } + usleep_range(500, 1000); + } while (!time_after(jiffies, tout_jfs)); + + if (data != BUTTRESS_SECURITY_CTL_FW_SETUP_DONE) { + dev_err(&isp->pdev->dev, "CSE boot_load timed out\n"); + rval = -ETIMEDOUT; + goto iunit_power_off; + } + + tout_jfs = jiffies + msecs_to_jiffies(BUTTRESS_CSE_BOOTLOAD_TIMEOUT); + do { + data = readl(psys_pdata->base + BOOTLOADER_STATUS_OFFSET); + dev_dbg(&isp->pdev->dev, "%s: BOOTLOADER_STATUS 0x%x", + __func__, data); + if (data == BOOTLOADER_MAGIC_KEY) { + dev_dbg(&isp->pdev->dev, + "%s: Expected magic number found, breaking...", + __func__); + break; + } + usleep_range(500, 1000); + } while (!time_after(jiffies, tout_jfs)); + + if (data != BOOTLOADER_MAGIC_KEY) { + dev_dbg(&isp->pdev->dev, + "%s: CSE boot_load timed out...\n", __func__); + rval = -ETIMEDOUT; + goto iunit_power_off; + } + + /* + * Write authenticate_run into IU2CSEDATA0 + * Write sizeof(boot_load) | 0x2 << CLIENT_ID to + * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as + */ + dev_info(&isp->pdev->dev, "Sending AUTHENTICATE_RUN to CSE\n"); + rval = ipu_buttress_ipc_send(isp, IPU_BUTTRESS_IPC_CSE, + BUTTRESS_IU2CSEDATA0_IPC_AUTHENTICATE_RUN, + 1); + if (rval) { + dev_err(&isp->pdev->dev, "CSE authenticate_run failed\n"); + goto iunit_power_off; + } + + tout_jfs = jiffies; + tout_jfs += msecs_to_jiffies(BUTTRESS_CSE_AUTHENTICATE_TIMEOUT); + do { + data = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); + data &= BUTTRESS_SECURITY_CTL_FW_SETUP_MASK; + if (data == BUTTRESS_SECURITY_CTL_AUTH_DONE) { + dev_dbg(&isp->pdev->dev, "CSE authenticate_run done\n"); + break; + } else if (data == BUTTRESS_SECURITY_CTL_AUTH_FAILED) { + dev_err(&isp->pdev->dev, + "CSE authenticate_run failed\n"); + rval = -EINVAL; + goto iunit_power_off; + } + usleep_range(500, 1000); + } while (!time_after(jiffies, tout_jfs)); + + if (data != BUTTRESS_SECURITY_CTL_AUTH_DONE) { + dev_err(&isp->pdev->dev, "CSE authenticate_run timed out\n"); + rval = -ETIMEDOUT; + goto iunit_power_off; + } + +iunit_power_off: + pm_runtime_put(&isp->psys_iommu->dev); + + mutex_unlock(&b->auth_mutex); + + return rval; +} +EXPORT_SYMBOL(ipu_buttress_authenticate); + +static int ipu_buttress_send_tsc_request(struct ipu_device *isp) +{ + unsigned long tout_jfs = msecs_to_jiffies(5); + + writel(BUTTRESS_FABRIC_CMD_START_TSC_SYNC, + isp->base + BUTTRESS_REG_FABRIC_CMD); + + tout_jfs += jiffies; + do { + u32 val; + + val = readl(isp->base + BUTTRESS_REG_PWR_STATE); + val = (val & BUTTRESS_PWR_STATE_HH_STATUS_MASK) >> + BUTTRESS_PWR_STATE_HH_STATUS_SHIFT; + + switch (val) { + case BUTTRESS_PWR_STATE_HH_STATE_DONE: + dev_dbg(&isp->pdev->dev, "Start tsc sync completed!\n"); + return 0; + case BUTTRESS_PWR_STATE_HH_STATE_ERR: + dev_err(&isp->pdev->dev, "Start tsc sync failed!\n"); + return -EINVAL; + default: + usleep_range(500, 1000); + break; + } + } while (!time_after(jiffies, tout_jfs)); + + return -ETIMEDOUT; +} + +int ipu_buttress_start_tsc_sync(struct ipu_device *isp) +{ + unsigned int i; + + for (i = 0; i < BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX; i++) { + int ret; + + ret = ipu_buttress_send_tsc_request(isp); + if (ret == -ETIMEDOUT) { + u32 val; + /* set tsw soft reset */ + val = readl(isp->base + BUTTRESS_REG_TSW_CTL); + val = val | BUTTRESS_TSW_CTL_SOFT_RESET; + writel(val, isp->base + BUTTRESS_REG_TSW_CTL); + /* clear tsw soft reset */ + val = val & (~BUTTRESS_TSW_CTL_SOFT_RESET); + writel(val, isp->base + BUTTRESS_REG_TSW_CTL); + + continue; + } + return ret; + } + + dev_err(&isp->pdev->dev, "TSC sync failed(timeout).\n"); + + return -ETIMEDOUT; +} +EXPORT_SYMBOL(ipu_buttress_start_tsc_sync); + +struct clk_ipu_sensor { + struct ipu_device *isp; + struct clk_hw hw; + unsigned int id; + unsigned long rate; +}; + +#define to_clk_ipu_sensor(_hw) container_of(_hw, struct clk_ipu_sensor, hw) + +static int ipu_buttress_clk_pll_prepare(struct clk_hw *hw) +{ + struct clk_ipu_sensor *ck = to_clk_ipu_sensor(hw); + int ret; + + /* Workaround needed to get sensor clock running in some cases */ + ret = pm_runtime_get_sync(&ck->isp->isys->dev); + return ret >= 0 ? 0 : ret; +} + +static void ipu_buttress_clk_pll_unprepare(struct clk_hw *hw) +{ + struct clk_ipu_sensor *ck = to_clk_ipu_sensor(hw); + + /* Workaround needed to get sensor clock stopped in some cases */ + pm_runtime_put(&ck->isp->isys->dev); +} + +static int ipu_buttress_clk_pll_enable(struct clk_hw *hw) +{ + struct clk_ipu_sensor *ck = to_clk_ipu_sensor(hw); + u32 val; + unsigned int i; + + /* + * Start bit behaves like master clock request towards ICLK. + * It is needed regardless of the 24 MHz or per clock out pll + * setting. + */ + val = readl(ck->isp->base + BUTTRESS_REG_SENSOR_FREQ_CTL); + val |= 1 << BUTTRESS_FREQ_CTL_START_SHIFT; + val &= ~BUTTRESS_SENSOR_FREQ_CTL_OSC_OUT_FREQ_MASK(ck->id); + for (i = 0; i < ARRAY_SIZE(sensor_clk_freqs); i++) + if (sensor_clk_freqs[i].rate == ck->rate) + break; + + if (i < ARRAY_SIZE(sensor_clk_freqs)) + val |= sensor_clk_freqs[i].val << + BUTTRESS_SENSOR_FREQ_CTL_OSC_OUT_FREQ_SHIFT(ck->id); + else + val |= BUTTRESS_SENSOR_FREQ_CTL_OSC_OUT_FREQ_DEFAULT(ck->id); + + writel(val, ck->isp->base + BUTTRESS_REG_SENSOR_FREQ_CTL); + + return 0; +} + +static void ipu_buttress_clk_pll_disable(struct clk_hw *hw) +{ + struct clk_ipu_sensor *ck = to_clk_ipu_sensor(hw); + u32 val; + int i; + + val = readl(ck->isp->base + BUTTRESS_REG_SENSOR_CLK_CTL); + for (i = 0; i < IPU_BUTTRESS_NUM_OF_SENS_CKS; i++) { + if (val & + (1 << BUTTRESS_SENSOR_CLK_CTL_OSC_CLK_OUT_EN_SHIFT(i))) + return; + } + + /* See enable control above */ + val = readl(ck->isp->base + BUTTRESS_REG_SENSOR_FREQ_CTL); + val &= ~(1 << BUTTRESS_FREQ_CTL_START_SHIFT); + writel(val, ck->isp->base + BUTTRESS_REG_SENSOR_FREQ_CTL); +} + +static int ipu_buttress_clk_enable(struct clk_hw *hw) +{ + struct clk_ipu_sensor *ck = to_clk_ipu_sensor(hw); + u32 val; + + val = readl(ck->isp->base + BUTTRESS_REG_SENSOR_CLK_CTL); + val |= 1 << BUTTRESS_SENSOR_CLK_CTL_OSC_CLK_OUT_EN_SHIFT(ck->id); + + /* Enable dynamic sensor clock */ + val |= 1 << BUTTRESS_SENSOR_CLK_CTL_OSC_CLK_OUT_SEL_SHIFT(ck->id); + writel(val, ck->isp->base + BUTTRESS_REG_SENSOR_CLK_CTL); + + return 0; +} + +static void ipu_buttress_clk_disable(struct clk_hw *hw) +{ + struct clk_ipu_sensor *ck = to_clk_ipu_sensor(hw); + u32 val; + + val = readl(ck->isp->base + BUTTRESS_REG_SENSOR_CLK_CTL); + val &= ~(1 << BUTTRESS_SENSOR_CLK_CTL_OSC_CLK_OUT_EN_SHIFT(ck->id)); + writel(val, ck->isp->base + BUTTRESS_REG_SENSOR_CLK_CTL); +} + +static long ipu_buttress_clk_round_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long *parent_rate) +{ + unsigned long best = ULONG_MAX; + unsigned long round_rate = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(sensor_clk_freqs); i++) { + long diff = sensor_clk_freqs[i].rate - rate; + + if (diff == 0) + return rate; + + diff = abs(diff); + if (diff < best) { + best = diff; + round_rate = sensor_clk_freqs[i].rate; + } + } + + return round_rate; +} + +static unsigned long +ipu_buttress_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) +{ + struct clk_ipu_sensor *ck = to_clk_ipu_sensor(hw); + + return ck->rate; +} + +static int ipu_buttress_clk_set_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long parent_rate) +{ + struct clk_ipu_sensor *ck = to_clk_ipu_sensor(hw); + + /* + * R N P PVD PLLout + * 1 45 128 2 6.75 + * 1 40 96 2 8 + * 1 40 80 2 9.6 + * 1 15 20 4 14.4 + * 1 40 32 2 24 + * 1 65 48 1 26 + * + */ + ck->rate = rate; + + return 0; +} + +static const struct clk_ops ipu_buttress_clk_sensor_ops = { + .enable = ipu_buttress_clk_enable, + .disable = ipu_buttress_clk_disable, +}; + +static const struct clk_ops ipu_buttress_clk_sensor_ops_parent = { + .enable = ipu_buttress_clk_pll_enable, + .disable = ipu_buttress_clk_pll_disable, + .prepare = ipu_buttress_clk_pll_prepare, + .unprepare = ipu_buttress_clk_pll_unprepare, + .round_rate = ipu_buttress_clk_round_rate, + .recalc_rate = ipu_buttress_clk_recalc_rate, + .set_rate = ipu_buttress_clk_set_rate, +}; + +static struct clk_init_data ipu_buttress_sensor_clk_data[] = { + { + .name = "OSC_CLK_OUT0", + .ops = &ipu_buttress_clk_sensor_ops, + .parent_names = (const char *[]){"ipu_sensor_pll0"}, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, + { + .name = "OSC_CLK_OUT1", + .ops = &ipu_buttress_clk_sensor_ops, + .parent_names = (const char *[]){"ipu_sensor_pll1"}, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, + { + .name = "OSC_CLK_OUT2", + .ops = &ipu_buttress_clk_sensor_ops, + .parent_names = (const char *[]){"ipu_sensor_pll2"}, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_init_data ipu_buttress_sensor_pll_data[] = { + { + .name = "ipu_sensor_pll0", + .ops = &ipu_buttress_clk_sensor_ops_parent, + }, + { + .name = "ipu_sensor_pll1", + .ops = &ipu_buttress_clk_sensor_ops_parent, + }, + { + .name = "ipu_sensor_pll2", + .ops = &ipu_buttress_clk_sensor_ops_parent, + }, +}; + +static void ipu_buttress_read_psys_fused_freqs(struct ipu_device *isp) +{ + struct ipu_buttress_fused_freqs *fused_freq = + &isp->buttress.psys_fused_freqs; + u32 reg_val, max_ratio, min_ratio, efficient_ratio; + + reg_val = readl(isp->base + BUTTRESS_REG_PS_FREQ_CAPABILITIES); + + min_ratio = (reg_val & + BUTTRESS_PS_FREQ_CAPABILITIES_MIN_RATIO_MASK) >> + BUTTRESS_PS_FREQ_CAPABILITIES_MIN_RATIO_SHIFT; + max_ratio = (reg_val & + BUTTRESS_PS_FREQ_CAPABILITIES_MAX_RATIO_MASK) >> + BUTTRESS_PS_FREQ_CAPABILITIES_MAX_RATIO_SHIFT; + efficient_ratio = + (reg_val & + BUTTRESS_PS_FREQ_CAPABILITIES_EFFICIENT_RATIO_MASK) >> + BUTTRESS_PS_FREQ_CAPABILITIES_EFFICIENT_RATIO_SHIFT; + + fused_freq->min_freq = min_ratio * BUTTRESS_PS_FREQ_STEP; + fused_freq->max_freq = max_ratio * BUTTRESS_PS_FREQ_STEP; + fused_freq->efficient_freq = efficient_ratio * BUTTRESS_PS_FREQ_STEP; +} + +#ifdef I2C_WA +/* + * The dev_id was hard code in platform data, as i2c bus number + * may change dynamiclly, we need to update this bus id + * accordingly. + * + * @adapter_id: hardware i2c adapter id, this was fixed in platform data + * return: i2c bus id registered in system + */ +int ipu_get_i2c_bus_id(int adapter_id) +{ + struct i2c_adapter *adapter; + char name[32]; + int i = 0; + + snprintf(name, sizeof(name), "i2c_designware.%d", adapter_id); + while ((adapter = i2c_get_adapter(i)) != NULL) { + struct device *parent = adapter->dev.parent; + + if (parent && !strncmp(name, dev_name(parent), sizeof(name))) + return i; + i++; + } + + /* Not found, should never happen! */ + WARN_ON_ONCE(1); + return -1; +} +EXPORT_SYMBOL_GPL(ipu_get_i2c_bus_id); +#endif + +static int ipu_buttress_clk_init(struct ipu_device *isp) +{ + struct ipu_buttress *b = &isp->buttress; + struct ipu_isys_subdev_pdata *pdata = isp->pdev->dev.platform_data; + struct ipu_isys_clk_mapping *clkmap = pdata ? pdata->clk_map : NULL; + struct clk_init_data *clk_data_parent; + struct clk_init_data *clk_data; + int i, rval; + unsigned int num_plls; + + ipu_buttress_read_psys_fused_freqs(isp); + isp->buttress.psys_min_freq = b->psys_fused_freqs.efficient_freq; + + clk_data_parent = ipu_buttress_sensor_pll_data; + + num_plls = ARRAY_SIZE(ipu_buttress_sensor_pll_data); + + for (i = 0; i < num_plls; i++) { + struct clk_ipu_sensor *parent_clk = + devm_kzalloc(&isp->pdev->dev, + sizeof(*parent_clk), GFP_KERNEL); + + if (!parent_clk) { + rval = -ENOMEM; + goto err; + } + + parent_clk->hw.init = &clk_data_parent[i]; + parent_clk->isp = isp; + parent_clk->id = i; + + b->pll_sensor[i] = clk_register(NULL, &parent_clk->hw); + if (IS_ERR(b->pll_sensor[i])) { + rval = PTR_ERR(b->pll_sensor[i]); + goto err; + } + } + + clk_data = ipu_buttress_sensor_clk_data; + + for (i = 0; i < IPU_BUTTRESS_NUM_OF_SENS_CKS; i++) { + char buffer[16]; /* max for clk_register_clkdev */ + unsigned int parent_index = 0; + struct clk_ipu_sensor *my_clk = + devm_kzalloc(&isp->pdev->dev, sizeof(*my_clk), + GFP_KERNEL); + + if (!my_clk) { + rval = -ENOMEM; + goto err; + } + + if (i < num_plls) + parent_index = i; + + my_clk->hw.init = &clk_data[i]; + + my_clk->id = i; + my_clk->isp = isp; + + b->clk_sensor[i] = clk_register(NULL, &my_clk->hw); + if (IS_ERR(b->clk_sensor[i])) { + rval = PTR_ERR(b->clk_sensor[i]); + goto err; + } + rval = clk_set_parent(b->clk_sensor[i], + b->pll_sensor[parent_index]); + if (rval) + goto err; + + /* Register generic clocks for sensor driver */ + snprintf(buffer, sizeof(buffer), "ipu_cam_clk%d", i); + rval = clk_register_clkdev(b->clk_sensor[i], buffer, NULL); + if (rval) + goto err; + } + + /* Now map sensor clocks */ + if (!clkmap) + return 0; + + while (clkmap->clkdev_data.dev_id) { +#ifdef I2C_WA + char *dev_id = kstrdup(clkmap->clkdev_data.dev_id, GFP_KERNEL); + int adapter_id = clkmap->clkdev_data.dev_id[0] - '0'; + char *addr = strpbrk(clkmap->clkdev_data.dev_id, "-"); + int bus_id = ipu_get_i2c_bus_id(adapter_id); + + snprintf(dev_id, PAGE_SIZE, "%d-%s", bus_id, addr + 1); +#endif + + /* + * Lookup table must be NULL terminated + * CLKDEV_INIT(NULL, NULL, NULL) + */ + for (i = 0; i < IPU_BUTTRESS_NUM_OF_SENS_CKS; i++) { + if (!strcmp(clkmap->platform_clock_name, + clk_data[i].name)) { + clkmap->clkdev_data.clk = b->clk_sensor[i]; +#ifdef I2C_WA + clkmap->clkdev_data.dev_id = dev_id; +#endif + clkdev_add(&clkmap->clkdev_data); + break; + } + } + clkmap++; + } + + return 0; + +err: + /* It is safe to call clk_unregister with null pointer */ + for (i = IPU_BUTTRESS_NUM_OF_SENS_CKS - 1; i >= 0; i--) + clk_unregister(b->clk_sensor[i]); + + for (i = num_plls - 1; i >= 0; i--) + clk_unregister(b->pll_sensor[i]); + + return rval; +} + +static void ipu_buttress_clk_exit(struct ipu_device *isp) +{ + struct ipu_buttress *b = &isp->buttress; + int i; + + /* It is safe to call clk_unregister with null pointer */ + for (i = 0; i < IPU_BUTTRESS_NUM_OF_SENS_CKS; i++) + clk_unregister(b->clk_sensor[i]); + + for (i = 0; i < ARRAY_SIZE(ipu_buttress_sensor_pll_data); i++) + clk_unregister(b->pll_sensor[i]); +} + +int ipu_buttress_tsc_read(struct ipu_device *isp, u64 *val) +{ + struct ipu_buttress *b = &isp->buttress; + u32 tsc_hi, tsc_lo_1, tsc_lo_2, tsc_lo_3, tsc_chk = 0; + unsigned long flags; + short retry = IPU_BUTTRESS_TSC_RETRY; + + do { + spin_lock_irqsave(&b->tsc_lock, flags); + tsc_hi = readl(isp->base + BUTTRESS_REG_TSC_HI); + + /* + * We are occasionally getting broken values from + * HH. Reading 3 times and doing sanity check as a WA + */ + tsc_lo_1 = readl(isp->base + BUTTRESS_REG_TSC_LO); + tsc_lo_2 = readl(isp->base + BUTTRESS_REG_TSC_LO); + tsc_lo_3 = readl(isp->base + BUTTRESS_REG_TSC_LO); + tsc_chk = readl(isp->base + BUTTRESS_REG_TSC_HI); + spin_unlock_irqrestore(&b->tsc_lock, flags); + if (tsc_chk == tsc_hi && tsc_lo_2 && + tsc_lo_2 - tsc_lo_1 <= IPU_BUTTRESS_TSC_LIMIT && + tsc_lo_3 - tsc_lo_2 <= IPU_BUTTRESS_TSC_LIMIT) { + *val = (u64)tsc_hi << 32 | tsc_lo_2; + return 0; + } + + /* + * Trace error only if limit checkings fails at least + * by two consecutive readings. + */ + if (retry < IPU_BUTTRESS_TSC_RETRY - 1 && tsc_lo_2) + dev_err(&isp->pdev->dev, + "%s = %u, %s = %u, %s = %u, %s = %u, %s = %u", + "failure: tsc_hi", tsc_hi, + "tsc_chk", tsc_chk, + "tsc_lo_1", tsc_lo_1, + "tsc_lo_2", tsc_lo_2, "tsc_lo_3", tsc_lo_3); + } while (retry--); + + if (!tsc_chk && !tsc_lo_2) + return -EIO; + + WARN_ON_ONCE(1); + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(ipu_buttress_tsc_read); + +#ifdef CONFIG_DEBUG_FS + +static int ipu_buttress_reg_open(struct inode *inode, struct file *file) +{ + if (!inode->i_private) + return -EACCES; + + file->private_data = inode->i_private; + return 0; +} + +static ssize_t ipu_buttress_reg_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct debugfs_reg32 *reg = file->private_data; + u8 tmp[11]; + u32 val = readl((void __iomem *)reg->offset); + int len = scnprintf(tmp, sizeof(tmp), "0x%08x", val); + + return simple_read_from_buffer(buf, len, ppos, &tmp, len); +} + +static ssize_t ipu_buttress_reg_write(struct file *file, + const char __user *buf, + size_t count, loff_t *ppos) +{ + struct debugfs_reg32 *reg = file->private_data; + u32 val; + int rval; + + rval = kstrtou32_from_user(buf, count, 0, &val); + if (rval) + return rval; + + writel(val, (void __iomem *)reg->offset); + + return count; +} + +static struct debugfs_reg32 buttress_regs[] = { + {"IU2CSEDB0", BUTTRESS_REG_IU2CSEDB0}, + {"IU2CSEDATA0", BUTTRESS_REG_IU2CSEDATA0}, + {"CSE2IUDB0", BUTTRESS_REG_CSE2IUDB0}, + {"CSE2IUDATA0", BUTTRESS_REG_CSE2IUDATA0}, + {"CSE2IUCSR", BUTTRESS_REG_CSE2IUCSR}, + {"IU2CSECSR", BUTTRESS_REG_IU2CSECSR}, +}; + +static const struct file_operations ipu_buttress_reg_fops = { + .owner = THIS_MODULE, + .open = ipu_buttress_reg_open, + .read = ipu_buttress_reg_read, + .write = ipu_buttress_reg_write, +}; + +static int ipu_buttress_start_tsc_sync_set(void *data, u64 val) +{ + struct ipu_device *isp = data; + + return ipu_buttress_start_tsc_sync(isp); +} + +DEFINE_SIMPLE_ATTRIBUTE(ipu_buttress_start_tsc_sync_fops, NULL, + ipu_buttress_start_tsc_sync_set, "%llu\n"); + +u64 ipu_buttress_tsc_ticks_to_ns(u64 ticks) +{ + u64 ns = ticks * 10000; + /* + * TSC clock frequency is 19.2MHz, + * converting TSC tick count to ns is calculated by: + * ns = ticks * 1000 000 000 / 19.2Mhz + * = ticks * 1000 000 000 / 19200000Hz + * = ticks * 10000 / 192 ns + */ + do_div(ns, 192); + + return ns; +} +EXPORT_SYMBOL_GPL(ipu_buttress_tsc_ticks_to_ns); + +static int ipu_buttress_tsc_get(void *data, u64 *val) +{ + return ipu_buttress_tsc_read(data, val); +} +DEFINE_SIMPLE_ATTRIBUTE(ipu_buttress_tsc_fops, ipu_buttress_tsc_get, + NULL, "%llu\n"); + +static int ipu_buttress_psys_force_freq_get(void *data, u64 *val) +{ + struct ipu_device *isp = data; + + *val = isp->buttress.psys_force_ratio * BUTTRESS_PS_FREQ_STEP; + + return 0; +} + +static int ipu_buttress_psys_force_freq_set(void *data, u64 val) +{ + struct ipu_device *isp = data; + + if (val && (val < BUTTRESS_MIN_FORCE_PS_FREQ || + val > BUTTRESS_MAX_FORCE_PS_FREQ)) + return -EINVAL; + + do_div(val, BUTTRESS_PS_FREQ_STEP); + isp->buttress.psys_force_ratio = val; + + if (isp->buttress.psys_force_ratio) + ipu_buttress_set_psys_ratio(isp, + isp->buttress.psys_force_ratio, + isp->buttress.psys_force_ratio); + else + ipu_buttress_set_psys_freq(isp, isp->buttress.psys_min_freq); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(ipu_buttress_psys_force_freq_fops, + ipu_buttress_psys_force_freq_get, + ipu_buttress_psys_force_freq_set, "%llu\n"); + +DEFINE_SIMPLE_ATTRIBUTE(ipu_buttress_psys_freq_fops, + ipu_buttress_psys_freq_get, NULL, "%llu\n"); + +DEFINE_SIMPLE_ATTRIBUTE(ipu_buttress_isys_freq_fops, + ipu_buttress_isys_freq_get, NULL, "%llu\n"); + +int ipu_buttress_debugfs_init(struct ipu_device *isp) +{ + struct debugfs_reg32 *reg = + devm_kcalloc(&isp->pdev->dev, ARRAY_SIZE(buttress_regs), + sizeof(*reg), GFP_KERNEL); + struct dentry *dir, *file; + int i; + + if (!reg) + return -ENOMEM; + + dir = debugfs_create_dir("buttress", isp->ipu_dir); + if (!dir) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(buttress_regs); i++, reg++) { + reg->offset = (unsigned long)isp->base + + buttress_regs[i].offset; + reg->name = buttress_regs[i].name; + file = debugfs_create_file(reg->name, 0700, + dir, reg, &ipu_buttress_reg_fops); + if (!file) + goto err; + } + + file = debugfs_create_file("start_tsc_sync", 0200, dir, isp, + &ipu_buttress_start_tsc_sync_fops); + if (!file) + goto err; + file = debugfs_create_file("tsc", 0400, dir, isp, + &ipu_buttress_tsc_fops); + if (!file) + goto err; + file = debugfs_create_file("psys_force_freq", 0700, dir, isp, + &ipu_buttress_psys_force_freq_fops); + if (!file) + goto err; + + file = debugfs_create_file("psys_freq", 0400, dir, isp, + &ipu_buttress_psys_freq_fops); + if (!file) + goto err; + + file = debugfs_create_file("isys_freq", 0400, dir, isp, + &ipu_buttress_isys_freq_fops); + if (!file) + goto err; + + return 0; +err: + debugfs_remove_recursive(dir); + return -ENOMEM; +} + +#endif /* CONFIG_DEBUG_FS */ + +static ssize_t +ipu_buttress_psys_fused_min_freq_get(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ipu_device *isp = pci_get_drvdata(to_pci_dev(dev)); + + return snprintf(buf, PAGE_SIZE, "%u\n", + isp->buttress.psys_fused_freqs.min_freq); +} + +static DEVICE_ATTR(psys_fused_min_freq, 0444, + ipu_buttress_psys_fused_min_freq_get, NULL); + +static ssize_t +ipu_buttress_psys_fused_max_freq_get(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ipu_device *isp = pci_get_drvdata(to_pci_dev(dev)); + + return snprintf(buf, PAGE_SIZE, "%u\n", + isp->buttress.psys_fused_freqs.max_freq); +} + +static DEVICE_ATTR(psys_fused_max_freq, 0444, + ipu_buttress_psys_fused_max_freq_get, NULL); + +static ssize_t +ipu_buttress_psys_fused_efficient_freq_get(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ipu_device *isp = pci_get_drvdata(to_pci_dev(dev)); + + return snprintf(buf, PAGE_SIZE, "%u\n", + isp->buttress.psys_fused_freqs.efficient_freq); +} + +static DEVICE_ATTR(psys_fused_efficient_freq, 0444, + ipu_buttress_psys_fused_efficient_freq_get, NULL); + +int ipu_buttress_restore(struct ipu_device *isp) +{ + struct ipu_buttress *b = &isp->buttress; + + writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR); + writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE); + writel(b->wdt_cached_value, isp->base + BUTTRESS_REG_WDT); + + return 0; +} +EXPORT_SYMBOL(ipu_buttress_restore); + +int ipu_buttress_init(struct ipu_device *isp) +{ + struct ipu_buttress *b = &isp->buttress; + int rval, ipc_reset_retry = BUTTRESS_CSE_IPC_RESET_RETRY; + + mutex_init(&b->power_mutex); + mutex_init(&b->auth_mutex); + mutex_init(&b->cons_mutex); + mutex_init(&b->ipc_mutex); + spin_lock_init(&b->tsc_lock); + init_completion(&b->ish.send_complete); + init_completion(&b->cse.send_complete); + init_completion(&b->ish.recv_complete); + init_completion(&b->cse.recv_complete); + + b->cse.nack = BUTTRESS_CSE2IUDATA0_IPC_NACK; + b->cse.nack_mask = BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK; + b->cse.csr_in = BUTTRESS_REG_CSE2IUCSR; + b->cse.csr_out = BUTTRESS_REG_IU2CSECSR; + b->cse.db0_in = BUTTRESS_REG_CSE2IUDB0; + b->cse.db0_out = BUTTRESS_REG_IU2CSEDB0; + b->cse.data0_in = BUTTRESS_REG_CSE2IUDATA0; + b->cse.data0_out = BUTTRESS_REG_IU2CSEDATA0; + + b->ish.csr_in = BUTTRESS_REG_ISH2IUCSR; + b->ish.csr_out = BUTTRESS_REG_IU2ISHCSR; + b->ish.db0_in = BUTTRESS_REG_ISH2IUDB0; + b->ish.db0_out = BUTTRESS_REG_IU2ISHDB0; + b->ish.data0_in = BUTTRESS_REG_ISH2IUDATA0; + b->ish.data0_out = BUTTRESS_REG_IU2ISHDATA0; + INIT_LIST_HEAD(&b->constraints); + + rval = ipu_buttress_clk_init(isp); + if (rval) { + dev_err(&isp->pdev->dev, "Clock init failed\n"); + goto err_mutex_destroy; + } + + ipu_buttress_set_secure_mode(isp); + isp->secure_mode = ipu_buttress_get_secure_mode(isp); + if (isp->secure_mode != secure_mode_enable) + dev_warn(&isp->pdev->dev, "Unable to set secure mode!\n"); + + dev_info(&isp->pdev->dev, "IPU in %s mode\n", + isp->secure_mode ? "secure" : "non-secure"); + + b->wdt_cached_value = readl(isp->base + BUTTRESS_REG_WDT); + writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR); + writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE); + + rval = device_create_file(&isp->pdev->dev, + &dev_attr_psys_fused_min_freq); + if (rval) { + dev_err(&isp->pdev->dev, "Create min freq file failed\n"); + goto err_clk_unregister; + } + + rval = device_create_file(&isp->pdev->dev, + &dev_attr_psys_fused_max_freq); + if (rval) { + dev_err(&isp->pdev->dev, "Create max freq file failed\n"); + goto err_remove_min_freq_file; + } + + rval = device_create_file(&isp->pdev->dev, + &dev_attr_psys_fused_efficient_freq); + if (rval) { + dev_err(&isp->pdev->dev, "Create efficient freq file failed\n"); + goto err_remove_max_freq_file; + } + + /* + * We want to retry couple of time in case CSE initialization + * is delayed for reason or another. + */ + do { + rval = ipu_buttress_ipc_reset(isp, &b->cse); + if (rval) { + dev_err(&isp->pdev->dev, + "IPC reset protocol failed, retry!\n"); + } else { + dev_dbg(&isp->pdev->dev, "IPC reset completed!\n"); + return 0; + } + } while (ipc_reset_retry--); + + dev_err(&isp->pdev->dev, "IPC reset protocol failed\n"); + +err_remove_max_freq_file: + device_remove_file(&isp->pdev->dev, &dev_attr_psys_fused_max_freq); +err_remove_min_freq_file: + device_remove_file(&isp->pdev->dev, &dev_attr_psys_fused_min_freq); +err_clk_unregister: + ipu_buttress_clk_exit(isp); +err_mutex_destroy: + mutex_destroy(&b->power_mutex); + mutex_destroy(&b->auth_mutex); + mutex_destroy(&b->cons_mutex); + mutex_destroy(&b->ipc_mutex); + + return rval; +} + +void ipu_buttress_exit(struct ipu_device *isp) +{ + struct ipu_buttress *b = &isp->buttress; + + writel(0, isp->base + BUTTRESS_REG_ISR_ENABLE); + + device_remove_file(&isp->pdev->dev, + &dev_attr_psys_fused_efficient_freq); + device_remove_file(&isp->pdev->dev, &dev_attr_psys_fused_max_freq); + device_remove_file(&isp->pdev->dev, &dev_attr_psys_fused_min_freq); + + ipu_buttress_clk_exit(isp); + + mutex_destroy(&b->power_mutex); + mutex_destroy(&b->auth_mutex); + mutex_destroy(&b->cons_mutex); + mutex_destroy(&b->ipc_mutex); +} diff --git a/drivers/media/pci/intel/ipu-buttress.h b/drivers/media/pci/intel/ipu-buttress.h new file mode 100644 index 0000000000000..2c5e93af6d544 --- /dev/null +++ b/drivers/media/pci/intel/ipu-buttress.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_BUTTRESS_H +#define IPU_BUTTRESS_H + +#include +#include +#include "ipu.h" + +#define IPU_BUTTRESS_NUM_OF_SENS_CKS 3 +#define IPU_BUTTRESS_NUM_OF_PLL_CKS 3 +#define IPU_BUTTRESS_TSC_CLK 19200000 + +#define BUTTRESS_POWER_TIMEOUT 200 + +#define BUTTRESS_PS_FREQ_STEP 25U +#define BUTTRESS_MIN_FORCE_PS_FREQ (BUTTRESS_PS_FREQ_STEP * 8) +#define BUTTRESS_MAX_FORCE_PS_FREQ (BUTTRESS_PS_FREQ_STEP * 32) + +struct ipu_buttress_ctrl { + u32 freq_ctl, pwr_sts_shift, pwr_sts_mask, pwr_sts_on, pwr_sts_off; + union { + unsigned int divisor; + unsigned int ratio; + }; + union { + unsigned int divisor_shift; + unsigned int ratio_shift; + }; + unsigned int ovrd; + u32 ovrd_shift; + unsigned int qos_floor; + bool started; +}; + +struct ipu_buttress_fused_freqs { + unsigned int min_freq; + unsigned int max_freq; + unsigned int efficient_freq; +}; + +struct ipu_buttress_ipc { + struct completion send_complete; + struct completion recv_complete; + u32 nack; + u32 nack_mask; + u32 recv_data; + u32 csr_out; + u32 csr_in; + u32 db0_in; + u32 db0_out; + u32 data0_out; + u32 data0_in; +}; + +struct ipu_buttress { + struct mutex power_mutex, auth_mutex, cons_mutex, ipc_mutex; + spinlock_t tsc_lock; /* tsc lock */ + struct clk *clk_sensor[IPU_BUTTRESS_NUM_OF_SENS_CKS]; + struct clk *pll_sensor[IPU_BUTTRESS_NUM_OF_PLL_CKS]; + struct ipu_buttress_ipc cse; + struct ipu_buttress_ipc ish; + struct list_head constraints; + struct ipu_buttress_fused_freqs psys_fused_freqs; + unsigned int psys_min_freq; + u32 wdt_cached_value; + u8 psys_force_ratio; + bool force_suspend; + bool ps_started; +}; + +struct ipu_buttress_sensor_clk_freq { + unsigned int rate; + unsigned int val; +}; + +struct firmware; + +enum ipu_buttress_ipc_domain { + IPU_BUTTRESS_IPC_CSE, + IPU_BUTTRESS_IPC_ISH, +}; + +struct ipu_buttress_constraint { + struct list_head list; + unsigned int min_freq; +}; + +struct ipu_ipc_buttress_bulk_msg { + u32 cmd; + u32 expected_resp; + bool require_resp; + u8 cmd_size; +}; + +int ipu_buttress_ipc_reset(struct ipu_device *isp, + struct ipu_buttress_ipc *ipc); +int ipu_buttress_map_fw_image(struct ipu_bus_device *sys, + const struct firmware *fw, struct sg_table *sgt); +int ipu_buttress_unmap_fw_image(struct ipu_bus_device *sys, + struct sg_table *sgt); +int ipu_buttress_power(struct device *dev, + struct ipu_buttress_ctrl *ctrl, bool on); +void +ipu_buttress_add_psys_constraint(struct ipu_device *isp, + struct ipu_buttress_constraint *constraint); +void +ipu_buttress_remove_psys_constraint(struct ipu_device *isp, + struct ipu_buttress_constraint *constraint); +void ipu_buttress_set_secure_mode(struct ipu_device *isp); +bool ipu_buttress_get_secure_mode(struct ipu_device *isp); +int ipu_buttress_authenticate(struct ipu_device *isp); +int ipu_buttress_reset_authentication(struct ipu_device *isp); +bool ipu_buttress_auth_done(struct ipu_device *isp); +int ipu_buttress_start_tsc_sync(struct ipu_device *isp); +int ipu_buttress_tsc_read(struct ipu_device *isp, u64 *val); +u64 ipu_buttress_tsc_ticks_to_ns(u64 ticks); + +irqreturn_t ipu_buttress_isr(int irq, void *isp_ptr); +irqreturn_t ipu_buttress_isr_threaded(int irq, void *isp_ptr); +int ipu_buttress_debugfs_init(struct ipu_device *isp); +int ipu_buttress_init(struct ipu_device *isp); +void ipu_buttress_exit(struct ipu_device *isp); +void ipu_buttress_csi_port_config(struct ipu_device *isp, + u32 legacy, u32 combo); +int ipu_buttress_restore(struct ipu_device *isp); + +int +ipu_buttress_ipc_send_bulk(struct ipu_device *isp, + enum ipu_buttress_ipc_domain ipc_domain, + struct ipu_ipc_buttress_bulk_msg *msgs, u32 size); +int ipu_buttress_psys_freq_get(void *data, u64 *val); +int ipu_buttress_isys_freq_get(void *data, u64 *val); +#ifdef I2C_WA +int ipu_get_i2c_bus_id(int adapter_id); +#endif /* I2C_WA */ +#endif /* IPU_BUTTRESS_H */ diff --git a/drivers/media/pci/intel/ipu-cpd.c b/drivers/media/pci/intel/ipu-cpd.c new file mode 100644 index 0000000000000..dca03232aa4f1 --- /dev/null +++ b/drivers/media/pci/intel/ipu-cpd.c @@ -0,0 +1,478 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2015 - 2018 Intel Corporation + +#include +#include + +#include "ipu.h" +#include "ipu-cpd.h" + +#include + +/* 15 entries + header*/ +#define MAX_PKG_DIR_ENT_CNT 16 +/* 2 qword per entry/header */ +#define PKG_DIR_ENT_LEN 2 +/* PKG_DIR size in bytes */ +#define PKG_DIR_SIZE ((MAX_PKG_DIR_ENT_CNT) * \ + (PKG_DIR_ENT_LEN) * sizeof(u64)) +#define PKG_DIR_ID_SHIFT 48 +#define PKG_DIR_ID_MASK 0x7f +#define PKG_DIR_VERSION_SHIFT 32 +#define PKG_DIR_SIZE_MASK 0xfffff +/* _IUPKDR_ */ +#define PKG_DIR_HDR_MARK 0x5f4955504b44525f + +/* $CPD */ +#define CPD_HDR_MARK 0x44504324 + +/* Maximum size is 2K DWORDs */ +#define MAX_MANIFEST_SIZE (2 * 1024 * sizeof(u32)) + +/* Maximum size is 64k */ +#define MAX_METADATA_SIZE (64 * 1024) + +#define MAX_COMPONENT_ID 127 +#define MAX_COMPONENT_VERSION 0xffff + +#define CPD_MANIFEST_IDX 0 +#define CPD_METADATA_IDX 1 +#define CPD_MODULEDATA_IDX 2 + +#define ipu_cpd_get_entries(cpd) ((struct ipu_cpd_ent *) \ + ((struct ipu_cpd_hdr *)cpd + 1)) +#define ipu_cpd_get_entry(cpd, idx) (&ipu_cpd_get_entries(cpd)[idx]) +#define ipu_cpd_get_manifest(cpd) ipu_cpd_get_entry(cpd, CPD_MANIFEST_IDX) +#define ipu_cpd_get_metadata(cpd) ipu_cpd_get_entry(cpd, CPD_METADATA_IDX) +#define ipu_cpd_get_moduledata(cpd) ipu_cpd_get_entry(cpd, CPD_MODULEDATA_IDX) + +static bool fw_version_check = true; +module_param(fw_version_check, bool, 0444); +MODULE_PARM_DESC(fw_version_check, "enable/disable checking firmware version"); + +static const struct ipu_cpd_metadata_cmpnt * +ipu_cpd_metadata_get_cmpnt(struct ipu_device *isp, + const void *metadata, + unsigned int metadata_size, + u8 idx) +{ + const struct ipu_cpd_metadata_extn *extn; + const struct ipu_cpd_metadata_cmpnt *cmpnts; + int cmpnt_count; + + extn = metadata; + cmpnts = metadata + sizeof(*extn); + cmpnt_count = (metadata_size - sizeof(*extn)) / sizeof(*cmpnts); + + if (idx > MAX_COMPONENT_ID || idx >= cmpnt_count) { + dev_err(&isp->pdev->dev, "Component index out of range (%d)\n", + idx); + return ERR_PTR(-EINVAL); + } + + return &cmpnts[idx]; +} + +static u32 ipu_cpd_metadata_cmpnt_version(struct ipu_device *isp, + const void *metadata, + unsigned int metadata_size, u8 idx) +{ + const struct ipu_cpd_metadata_cmpnt *cmpnt = + ipu_cpd_metadata_get_cmpnt(isp, metadata, + metadata_size, idx); + + if (IS_ERR(cmpnt)) + return PTR_ERR(cmpnt); + + return cmpnt->ver; +} + +static int ipu_cpd_metadata_get_cmpnt_id(struct ipu_device *isp, + const void *metadata, + unsigned int metadata_size, u8 idx) +{ + const struct ipu_cpd_metadata_cmpnt *cmpnt = + ipu_cpd_metadata_get_cmpnt(isp, metadata, + metadata_size, idx); + + if (IS_ERR(cmpnt)) + return PTR_ERR(cmpnt); + + return cmpnt->id; +} + +static u32 +ipu_cpd_metadata_get_cmpnt_icache_base_offs(struct ipu_device *isp, + const void *metadata, + unsigned int metadata_size, u8 idx) +{ + const struct ipu_cpd_metadata_cmpnt *cmpnt = + ipu_cpd_metadata_get_cmpnt(isp, metadata, + metadata_size, idx); + + if (IS_ERR(cmpnt)) + return PTR_ERR(cmpnt); + + return cmpnt->icache_base_offs; +} + +static u32 +ipu_cpd_metadata_get_cmpnt_entry_point(struct ipu_device *isp, + const void *metadata, + unsigned int metadata_size, u8 idx) +{ + const struct ipu_cpd_metadata_cmpnt *cmpnt = + ipu_cpd_metadata_get_cmpnt(isp, metadata, + metadata_size, idx); + + if (IS_ERR(cmpnt)) + return PTR_ERR(cmpnt); + + return cmpnt->entry_point; +} + +static int ipu_cpd_parse_module_data(struct ipu_device *isp, + const void *module_data, + unsigned int module_data_size, + dma_addr_t dma_addr_module_data, + u64 *pkg_dir, + const void *metadata, + unsigned int metadata_size) +{ + const struct ipu_cpd_module_data_hdr *module_data_hdr; + const struct ipu_cpd_hdr *dir_hdr; + const struct ipu_cpd_ent *dir_ent; + int i; + + if (!module_data) + return -EINVAL; + + module_data_hdr = module_data; + dir_hdr = module_data + module_data_hdr->hdr_len; + dir_ent = (struct ipu_cpd_ent *)(dir_hdr + 1); + + pkg_dir[0] = PKG_DIR_HDR_MARK; + /* pkg_dir entry count = component count + pkg_dir header */ + pkg_dir[1] = dir_hdr->ent_cnt + 1; + + for (i = 0; i < dir_hdr->ent_cnt; i++, dir_ent++) { + u64 *p = &pkg_dir[PKG_DIR_ENT_LEN + i * PKG_DIR_ENT_LEN]; + int ver, id; + + *p++ = dma_addr_module_data + dir_ent->offset; + + id = ipu_cpd_metadata_get_cmpnt_id(isp, metadata, + metadata_size, i); + if (id < 0 || id > MAX_COMPONENT_ID) { + dev_err(&isp->pdev->dev, + "Failed to parse component id\n"); + return -EINVAL; + } + ver = ipu_cpd_metadata_cmpnt_version(isp, metadata, + metadata_size, i); + if (ver < 0 || ver > MAX_COMPONENT_VERSION) { + dev_err(&isp->pdev->dev, + "Failed to parse component version\n"); + return -EINVAL; + } + + /* + * PKG_DIR Entry (type == id) + * 63:56 55 54:48 47:32 31:24 23:0 + * Rsvd Rsvd Type Version Rsvd Size + */ + *p = dir_ent->len | (u64) id << PKG_DIR_ID_SHIFT | + (u64)ver << PKG_DIR_VERSION_SHIFT; + } + + return 0; +} + +void *ipu_cpd_create_pkg_dir(struct ipu_bus_device *adev, + const void *src, + dma_addr_t dma_addr_src, + dma_addr_t *dma_addr, unsigned int *pkg_dir_size) +{ + struct ipu_device *isp = adev->isp; + const struct ipu_cpd_ent *ent, *man_ent, *met_ent; + u64 *pkg_dir; + unsigned int man_sz, met_sz; + void *pkg_dir_pos; + int ret; + + man_ent = ipu_cpd_get_manifest(src); + man_sz = man_ent->len; + + met_ent = ipu_cpd_get_metadata(src); + met_sz = met_ent->len; + + *pkg_dir_size = PKG_DIR_SIZE + man_sz + met_sz; + pkg_dir = dma_alloc_attrs(&adev->dev, *pkg_dir_size, dma_addr, + GFP_KERNEL, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + NULL +#else + 0 +#endif + ); + if (!pkg_dir) + return pkg_dir; + + /* + * pkg_dir entry/header: + * qword | 63:56 | 55 | 54:48 | 47:32 | 31:24 | 23:0 + * N Address/Offset/"_IUPKDR_" + * N + 1 | rsvd | rsvd | type | ver | rsvd | size + * + * We can ignore other fields that size in N + 1 qword as they + * are 0 anyway. Just setting size for now. + */ + + ent = ipu_cpd_get_moduledata(src); + + ret = ipu_cpd_parse_module_data(isp, src + ent->offset, + ent->len, + dma_addr_src + ent->offset, + pkg_dir, + src + met_ent->offset, met_ent->len); + if (ret) { + dev_err(&isp->pdev->dev, + "Unable to parse module data section!\n"); + dma_free_attrs(&isp->psys->dev, *pkg_dir_size, pkg_dir, + *dma_addr, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + NULL +#else + 0 +#endif + ); + return NULL; + } + + /* Copy manifest after pkg_dir */ + pkg_dir_pos = pkg_dir + PKG_DIR_ENT_LEN * MAX_PKG_DIR_ENT_CNT; + memcpy(pkg_dir_pos, src + man_ent->offset, man_sz); + + /* Copy metadata after manifest */ + pkg_dir_pos += man_sz; + memcpy(pkg_dir_pos, src + met_ent->offset, met_sz); + + dma_sync_single_range_for_device(&adev->dev, *dma_addr, + 0, *pkg_dir_size, DMA_TO_DEVICE); + + return pkg_dir; +} +EXPORT_SYMBOL_GPL(ipu_cpd_create_pkg_dir); + +void ipu_cpd_free_pkg_dir(struct ipu_bus_device *adev, + u64 *pkg_dir, + dma_addr_t dma_addr, unsigned int pkg_dir_size) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + dma_free_attrs(&adev->dev, pkg_dir_size, pkg_dir, dma_addr, NULL); +#else + dma_free_attrs(&adev->dev, pkg_dir_size, pkg_dir, dma_addr, 0); +#endif +} +EXPORT_SYMBOL_GPL(ipu_cpd_free_pkg_dir); + +u32 ipu_cpd_get_pg_icache_base(struct ipu_device *isp, + u8 idx, + const void *cpd_file, unsigned int cpd_file_size) +{ + const struct ipu_cpd_ent *metadata = ipu_cpd_get_metadata(cpd_file); + const void *metadata_addr = cpd_file + metadata->offset; + + return ipu_cpd_metadata_get_cmpnt_icache_base_offs(isp, + metadata_addr, + metadata->len, idx); +} +EXPORT_SYMBOL_GPL(ipu_cpd_get_pg_icache_base); + +u32 ipu_cpd_get_pg_entry_point(struct ipu_device *isp, + u8 idx, + const void *cpd_file, unsigned int cpd_file_size) +{ + const struct ipu_cpd_ent *metadata = ipu_cpd_get_metadata(cpd_file); + const void *metadata_addr = cpd_file + metadata->offset; + + return ipu_cpd_metadata_get_cmpnt_entry_point(isp, + metadata_addr, + metadata->len, idx); +} +EXPORT_SYMBOL_GPL(ipu_cpd_get_pg_entry_point); + +static int ipu_cpd_validate_cpd(struct ipu_device *isp, + const void *cpd, + unsigned long cpd_size, unsigned long data_size) +{ + const struct ipu_cpd_hdr *cpd_hdr = cpd; + struct ipu_cpd_ent *ent; + unsigned int i; + + /* Ensure cpd hdr is within moduledata */ + if (cpd_size < sizeof(*cpd_hdr)) { + dev_err(&isp->pdev->dev, "Invalid CPD moduledata size\n"); + return -EINVAL; + } + + /* Sanity check for CPD header */ + if ((cpd_size - sizeof(*cpd_hdr)) / sizeof(*ent) < cpd_hdr->ent_cnt) { + dev_err(&isp->pdev->dev, "Invalid CPD header\n"); + return -EINVAL; + } + + /* Ensure that all entries are within moduledata */ + ent = (struct ipu_cpd_ent *)(cpd_hdr + 1); + for (i = 0; i < cpd_hdr->ent_cnt; i++, ent++) { + if (data_size < ent->offset || + data_size - ent->offset < ent->len) { + dev_err(&isp->pdev->dev, "Invalid CPD entry (%d)\n", i); + return -EINVAL; + } + } + + return 0; +} + +static int ipu_cpd_validate_moduledata(struct ipu_device *isp, + const void *moduledata, + u32 moduledata_size) +{ + const struct ipu_cpd_module_data_hdr *mod_hdr = moduledata; + int rval; + + /* Ensure moduledata hdr is within moduledata */ + if (moduledata_size < sizeof(*mod_hdr) || + moduledata_size < mod_hdr->hdr_len) { + dev_err(&isp->pdev->dev, "Invalid moduledata size\n"); + return -EINVAL; + } + + if (fw_version_check && mod_hdr->fw_pkg_date != IA_CSS_FW_PKG_RELEASE) { + dev_err(&isp->pdev->dev, + "Moduledata and library version mismatch (%x != %x)\n", + mod_hdr->fw_pkg_date, IA_CSS_FW_PKG_RELEASE); + return -EINVAL; + } + + dev_warn(&isp->pdev->dev, + "Moduledata version: %x, library version: %x\n", + mod_hdr->fw_pkg_date, IA_CSS_FW_PKG_RELEASE); + + dev_info(&isp->pdev->dev, "CSS release: %x\n", IA_CSS_FW_PKG_RELEASE); + rval = ipu_cpd_validate_cpd(isp, moduledata + + mod_hdr->hdr_len, + moduledata_size - + mod_hdr->hdr_len, moduledata_size); + if (rval) { + dev_err(&isp->pdev->dev, "Invalid CPD in moduledata\n"); + return -EINVAL; + } + + return 0; +} + +static int ipu_cpd_validate_metadata(struct ipu_device *isp, + const void *metadata, u32 meta_size) +{ + const struct ipu_cpd_metadata_extn *extn = metadata; + + /* Sanity check for metadata size */ + if (meta_size < sizeof(*extn) || meta_size > MAX_METADATA_SIZE) { + dev_err(&isp->pdev->dev, "%s: Invalid metadata\n", __func__); + return -EINVAL; + } + + /* Validate extension and image types */ + if (extn->extn_type != IPU_CPD_METADATA_EXTN_TYPE_IUNIT || + extn->img_type != IPU_CPD_METADATA_IMAGE_TYPE_MAIN_FIRMWARE) { + dev_err(&isp->pdev->dev, + "Invalid metadata descriptor img_type (%d)\n", + extn->img_type); + return -EINVAL; + } + + /* Validate metadata size multiple of metadata components */ + if ((meta_size - sizeof(*extn)) % + sizeof(struct ipu_cpd_metadata_cmpnt)) { + dev_err(&isp->pdev->dev, "%s: Invalid metadata size\n", + __func__); + return -EINVAL; + } + + return 0; +} + +int ipu_cpd_validate_cpd_file(struct ipu_device *isp, + const void *cpd_file, unsigned long cpd_file_size) +{ + const struct ipu_cpd_hdr *hdr = cpd_file; + struct ipu_cpd_ent *ent; + int rval; + + rval = ipu_cpd_validate_cpd(isp, cpd_file, + cpd_file_size, cpd_file_size); + if (rval) { + dev_err(&isp->pdev->dev, "Invalid CPD in file\n"); + return -EINVAL; + } + + /* Check for CPD file marker */ + if (hdr->hdr_mark != CPD_HDR_MARK) { + dev_err(&isp->pdev->dev, "Invalid CPD header\n"); + return -EINVAL; + } + + /* Sanity check for manifest size */ + ent = ipu_cpd_get_manifest(cpd_file); + if (ent->len > MAX_MANIFEST_SIZE) { + dev_err(&isp->pdev->dev, "Invalid manifest size\n"); + return -EINVAL; + } + + /* Validate metadata */ + ent = ipu_cpd_get_metadata(cpd_file); + rval = ipu_cpd_validate_metadata(isp, cpd_file + ent->offset, ent->len); + if (rval) { + dev_err(&isp->pdev->dev, "Invalid metadata\n"); + return rval; + } + + /* Validate moduledata */ + ent = ipu_cpd_get_moduledata(cpd_file); + rval = ipu_cpd_validate_moduledata(isp, cpd_file + ent->offset, + ent->len); + if (rval) { + dev_err(&isp->pdev->dev, "Invalid moduledata\n"); + return rval; + } + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_cpd_validate_cpd_file); + +unsigned int ipu_cpd_pkg_dir_get_address(const u64 *pkg_dir, int pkg_dir_idx) +{ + return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN]; +} +EXPORT_SYMBOL_GPL(ipu_cpd_pkg_dir_get_address); + +unsigned int ipu_cpd_pkg_dir_get_num_entries(const u64 *pkg_dir) +{ + return pkg_dir[1]; +} +EXPORT_SYMBOL_GPL(ipu_cpd_pkg_dir_get_num_entries); + +unsigned int ipu_cpd_pkg_dir_get_size(const u64 *pkg_dir, int pkg_dir_idx) +{ + return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN + 1] & PKG_DIR_SIZE_MASK; +} +EXPORT_SYMBOL_GPL(ipu_cpd_pkg_dir_get_size); + +unsigned int ipu_cpd_pkg_dir_get_type(const u64 *pkg_dir, int pkg_dir_idx) +{ + return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN + 1] >> + PKG_DIR_ID_SHIFT & PKG_DIR_ID_MASK; +} +EXPORT_SYMBOL_GPL(ipu_cpd_pkg_dir_get_type); diff --git a/drivers/media/pci/intel/ipu-cpd.h b/drivers/media/pci/intel/ipu-cpd.h new file mode 100644 index 0000000000000..7033e90e135f5 --- /dev/null +++ b/drivers/media/pci/intel/ipu-cpd.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2015 - 2018 Intel Corporation */ + +#ifndef IPU_CPD_H +#define IPU_CPD_H + +#define IPU_CPD_SIZE_OF_FW_ARCH_VERSION 7 +#define IPU_CPD_SIZE_OF_SYSTEM_VERSION 11 +#define IPU_CPD_SIZE_OF_COMPONENT_NAME 12 + +#define IPU_CPD_METADATA_EXTN_TYPE_IUNIT 0x10 + +#define IPU_CPD_METADATA_IMAGE_TYPE_RESERVED 0 +#define IPU_CPD_METADATA_IMAGE_TYPE_BOOTLOADER 1 +#define IPU_CPD_METADATA_IMAGE_TYPE_MAIN_FIRMWARE 2 + +#define IPU_CPD_PKG_DIR_PSYS_SERVER_IDX 0 +#define IPU_CPD_PKG_DIR_ISYS_SERVER_IDX 1 + +#define IPU_CPD_PKG_DIR_CLIENT_PG_TYPE 3 + +struct __packed ipu_cpd_module_data_hdr { + u32 hdr_len; + u32 endian; + u32 fw_pkg_date; + u32 hive_sdk_date; + u32 compiler_date; + u32 target_platform_type; + u8 sys_ver[IPU_CPD_SIZE_OF_SYSTEM_VERSION]; + u8 fw_arch_ver[IPU_CPD_SIZE_OF_FW_ARCH_VERSION]; + u8 rsvd[2]; +}; + +struct __packed ipu_cpd_hdr { + u32 hdr_mark; + u32 ent_cnt; + u8 hdr_ver; + u8 ent_ver; + u8 hdr_len; +#if defined(CONFIG_VIDEO_INTEL_IPU4) || defined(CONFIG_VIDEO_INTEL_IPU4P) + u8 chksm; + u32 name; +#else + u8 rsvd; + u32 sub_partition_name; + u32 chksm; +#endif +}; + +struct __packed ipu_cpd_ent { + u8 name[IPU_CPD_SIZE_OF_COMPONENT_NAME]; + u32 offset; + u32 len; + u8 rsvd[4]; +}; + +struct __packed ipu_cpd_metadata_cmpnt { + u32 id; + u32 size; + u32 ver; + u8 sha2_hash[32]; + u32 entry_point; + u32 icache_base_offs; + u8 attrs[16]; +}; + +struct __packed ipu_cpd_metadata_extn { + u32 extn_type; + u32 len; + u32 img_type; + u8 rsvd[16]; +}; + +struct __packed ipu_cpd_client_pkg_hdr { + u32 prog_list_offs; + u32 prog_list_size; + u32 prog_desc_offs; + u32 prog_desc_size; + u32 pg_manifest_offs; + u32 pg_manifest_size; + u32 prog_bin_offs; + u32 prog_bin_size; +}; + +void *ipu_cpd_create_pkg_dir(struct ipu_bus_device *adev, + const void *src, + dma_addr_t dma_addr_src, + dma_addr_t *dma_addr, unsigned int *pkg_dir_size); +void ipu_cpd_free_pkg_dir(struct ipu_bus_device *adev, + u64 *pkg_dir, + dma_addr_t dma_addr, unsigned int pkg_dir_size); +u32 ipu_cpd_get_pg_icache_base(struct ipu_device *isp, + u8 idx, + const void *cpd_file, + unsigned int cpd_file_size); +u32 ipu_cpd_get_pg_entry_point(struct ipu_device *isp, + u8 idx, + const void *cpd_file, + unsigned int cpd_file_size); +int ipu_cpd_validate_cpd_file(struct ipu_device *isp, + const void *cpd_file, + unsigned long cpd_file_size); +unsigned int ipu_cpd_pkg_dir_get_address(const u64 *pkg_dir, int pkg_dir_idx); +unsigned int ipu_cpd_pkg_dir_get_num_entries(const u64 *pkg_dir); +unsigned int ipu_cpd_pkg_dir_get_size(const u64 *pkg_dir, int pkg_dir_idx); +unsigned int ipu_cpd_pkg_dir_get_type(const u64 *pkg_dir, int pkg_dir_idx); + +#endif /* IPU_CPD_H */ diff --git a/drivers/media/pci/intel/ipu-dma.c b/drivers/media/pci/intel/ipu-dma.c new file mode 100644 index 0000000000000..a5a963dfcc74a --- /dev/null +++ b/drivers/media/pci/intel/ipu-dma.c @@ -0,0 +1,448 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipu-dma.h" +#include "ipu-mmu.h" + +/* Begin of things adapted from arch/arm/mm/dma-mapping.c */ +static void __dma_clear_buffer(struct page *page, size_t size, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + /* + * Ensure that the allocated pages are zeroed, and that any data + * lurking in the kernel direct-mapped region is invalidated. + */ + if (PageHighMem(page)) { + while (size > 0) { + void *ptr = kmap_atomic(page); + + memset(ptr, 0, PAGE_SIZE); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) +#else + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) +#endif + clflush_cache_range(ptr, PAGE_SIZE); + kunmap_atomic(ptr); + page++; + size -= PAGE_SIZE; + } + } else { + void *ptr = page_address(page); + + memset(ptr, 0, size); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) +#else + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) +#endif + clflush_cache_range(ptr, size); + } +} + +static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, + gfp_t gfp, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + struct page **pages; + int count = size >> PAGE_SHIFT; + int array_size = count * sizeof(struct page *); + int i = 0; + + if (array_size <= PAGE_SIZE) + pages = kzalloc(array_size, GFP_KERNEL); + else + pages = vzalloc(array_size); + if (!pages) + return NULL; + + gfp |= __GFP_NOWARN; + + while (count) { + int j, order = __fls(count); + + pages[i] = alloc_pages(gfp, order); + while (!pages[i] && order) + pages[i] = alloc_pages(gfp, --order); + if (!pages[i]) + goto error; + + if (order) { + split_page(pages[i], order); + j = 1 << order; + while (--j) + pages[i + j] = pages[i] + j; + } + + __dma_clear_buffer(pages[i], PAGE_SIZE << order, attrs); + i += 1 << order; + count -= 1 << order; + } + + return pages; +error: + while (i--) + if (pages[i]) + __free_pages(pages[i], 0); + if (array_size <= PAGE_SIZE) + kfree(pages); + else + vfree(pages); + return NULL; +} + +static int __iommu_free_buffer(struct device *dev, struct page **pages, + size_t size, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + int count = size >> PAGE_SHIFT; + int array_size = count * sizeof(struct page *); + int i; + + for (i = 0; i < count; i++) { + if (pages[i]) { + __dma_clear_buffer(pages[i], PAGE_SIZE, attrs); + __free_pages(pages[i], 0); + } + } + + if (array_size <= PAGE_SIZE) + kfree(pages); + else + vfree(pages); + return 0; +} + +/* End of things adapted from arch/arm/mm/dma-mapping.c */ + +static void ipu_dma_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_handle, + size_t size, + enum dma_data_direction dir) +{ + struct device *aiommu = to_ipu_bus_device(dev)->iommu; + struct ipu_mmu *mmu = dev_get_drvdata(aiommu); + unsigned long pa = iommu_iova_to_phys(mmu->dmap->domain, dma_handle); + + clflush_cache_range(phys_to_virt(pa), size); +} + +static void ipu_dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sglist, + int nents, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sglist, sg, nents, i) + clflush_cache_range(page_to_virt(sg_page(sg)), sg->length); +} + +static void *ipu_dma_alloc(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + struct device *aiommu = to_ipu_bus_device(dev)->iommu; + struct ipu_mmu *mmu = dev_get_drvdata(aiommu); + struct page **pages; + struct iova *iova; + struct vm_struct *area; + int i; + int rval; + + size = PAGE_ALIGN(size); + + iova = alloc_iova(&mmu->dmap->iovad, size >> PAGE_SHIFT, + dma_get_mask(dev) >> PAGE_SHIFT, 0); + if (!iova) + return NULL; + + pages = __iommu_alloc_buffer(dev, size, gfp, attrs); + if (!pages) + goto out_free_iova; + + for (i = 0; iova->pfn_lo + i <= iova->pfn_hi; i++) { + rval = iommu_map(mmu->dmap->domain, + (iova->pfn_lo + i) << PAGE_SHIFT, + page_to_phys(pages[i]), PAGE_SIZE, 0); + if (rval) + goto out_unmap; + } + + area = __get_vm_area(size, 0, VMALLOC_START, VMALLOC_END); + if (!area) + goto out_unmap; + + area->pages = pages; + + if (map_vm_area(area, PAGE_KERNEL, pages)) + goto out_vunmap; + + *dma_handle = iova->pfn_lo << PAGE_SHIFT; + + mmu->tlb_invalidate(mmu); + + return area->addr; + +out_vunmap: + vunmap(area->addr); + +out_unmap: + for (i--; i >= 0; i--) { + iommu_unmap(mmu->dmap->domain, (iova->pfn_lo + i) << PAGE_SHIFT, + PAGE_SIZE); + } + __iommu_free_buffer(dev, pages, size, attrs); + +out_free_iova: + __free_iova(&mmu->dmap->iovad, iova); + + return NULL; +} + +static void ipu_dma_free(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + struct device *aiommu = to_ipu_bus_device(dev)->iommu; + struct ipu_mmu *mmu = dev_get_drvdata(aiommu); + struct vm_struct *area = find_vm_area(vaddr); + struct page **pages; + struct iova *iova = find_iova(&mmu->dmap->iovad, + dma_handle >> PAGE_SHIFT); + + if (WARN_ON(!area)) + return; + + if (WARN_ON(!area->pages)) + return; + + WARN_ON(!iova); + + size = PAGE_ALIGN(size); + + pages = area->pages; + + vunmap(vaddr); + + iommu_unmap(mmu->dmap->domain, iova->pfn_lo << PAGE_SHIFT, + (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT); + + __iommu_free_buffer(dev, pages, size, attrs); + + __free_iova(&mmu->dmap->iovad, iova); + + mmu->tlb_invalidate(mmu); +} + +static int ipu_dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *addr, dma_addr_t iova, size_t size, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + struct vm_struct *area = find_vm_area(addr); + size_t count = PAGE_ALIGN(size) >> PAGE_SHIFT; + size_t i; + + if (!area) + return -EFAULT; + + if (vma->vm_start & ~PAGE_MASK) + return -EINVAL; + + if (size > area->size) + return -EFAULT; + + for (i = 0; i < count; i++) + vm_insert_page(vma, vma->vm_start + (i << PAGE_SHIFT), + area->pages[i]); + + return 0; +} + +static void ipu_dma_unmap_sg(struct device *dev, + struct scatterlist *sglist, + int nents, enum dma_data_direction dir, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + struct device *aiommu = to_ipu_bus_device(dev)->iommu; + struct ipu_mmu *mmu = dev_get_drvdata(aiommu); + struct iova *iova = find_iova(&mmu->dmap->iovad, + sg_dma_address(sglist) >> PAGE_SHIFT); + + if (!nents) + return; + + WARN_ON(!iova); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) +#else + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) +#endif + ipu_dma_sync_sg_for_cpu(dev, sglist, nents, DMA_BIDIRECTIONAL); + + iommu_unmap(mmu->dmap->domain, iova->pfn_lo << PAGE_SHIFT, + (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT); + + mmu->tlb_invalidate(mmu); + + __free_iova(&mmu->dmap->iovad, iova); +} + +static int ipu_dma_map_sg(struct device *dev, struct scatterlist *sglist, + int nents, enum dma_data_direction dir, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + struct device *aiommu = to_ipu_bus_device(dev)->iommu; + struct ipu_mmu *mmu = dev_get_drvdata(aiommu); + struct scatterlist *sg; + struct iova *iova; + size_t size = 0; + u32 iova_addr; + int i; + + for_each_sg(sglist, sg, nents, i) + size += PAGE_ALIGN(sg->length) >> PAGE_SHIFT; + + dev_dbg(dev, "dmamap: mapping sg %d entries, %zu pages\n", nents, size); + + iova = alloc_iova(&mmu->dmap->iovad, size, + dma_get_mask(dev) >> PAGE_SHIFT, 0); + if (!iova) + return 0; + + dev_dbg(dev, "dmamap: iova low pfn %lu, high pfn %lu\n", iova->pfn_lo, + iova->pfn_hi); + + iova_addr = iova->pfn_lo; + + for_each_sg(sglist, sg, nents, i) { + int rval; + + dev_dbg(dev, "mapping entry %d: iova 0x%8.8x,phy 0x%16.16llx\n", + i, iova_addr << PAGE_SHIFT, + (unsigned long long)page_to_phys(sg_page(sg))); + rval = iommu_map(mmu->dmap->domain, iova_addr << PAGE_SHIFT, + page_to_phys(sg_page(sg)), + PAGE_ALIGN(sg->length), 0); + if (rval) + goto out_fail; + sg_dma_address(sg) = iova_addr << PAGE_SHIFT; +#ifdef CONFIG_NEED_SG_DMA_LENGTH + sg_dma_len(sg) = sg->length; +#endif /* CONFIG_NEED_SG_DMA_LENGTH */ + + iova_addr += PAGE_ALIGN(sg->length) >> PAGE_SHIFT; + } + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) +#else + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) +#endif + ipu_dma_sync_sg_for_cpu(dev, sglist, nents, DMA_BIDIRECTIONAL); + + mmu->tlb_invalidate(mmu); + + return nents; + +out_fail: + ipu_dma_unmap_sg(dev, sglist, i, dir, attrs); + + return 0; +} + +/* + * Create scatter-list for the already allocated DMA buffer + */ +static int ipu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t handle, size_t size, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + struct vm_struct *area = find_vm_area(cpu_addr); + int n_pages; + int ret = 0; + + if (WARN_ON(!area->pages)) + return -ENOMEM; + + n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; + + ret = sg_alloc_table_from_pages(sgt, area->pages, n_pages, 0, size, + GFP_KERNEL); + if (ret) + dev_dbg(dev, "IPU get sgt table fail\n"); + + return ret; +} + +const struct dma_map_ops ipu_dma_ops = { + .alloc = ipu_dma_alloc, + .free = ipu_dma_free, + .mmap = ipu_dma_mmap, + .map_sg = ipu_dma_map_sg, + .unmap_sg = ipu_dma_unmap_sg, + .sync_single_for_cpu = ipu_dma_sync_single_for_cpu, + .sync_single_for_device = ipu_dma_sync_single_for_cpu, + .sync_sg_for_cpu = ipu_dma_sync_sg_for_cpu, + .sync_sg_for_device = ipu_dma_sync_sg_for_cpu, + .get_sgtable = ipu_dma_get_sgtable, +}; +EXPORT_SYMBOL_GPL(ipu_dma_ops); diff --git a/drivers/media/pci/intel/ipu-dma.h b/drivers/media/pci/intel/ipu-dma.h new file mode 100644 index 0000000000000..9974b69fd6fd3 --- /dev/null +++ b/drivers/media/pci/intel/ipu-dma.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_DMA_H +#define IPU_DMA_H + +#include + +struct iommu_domain; + +struct ipu_dma_mapping { + struct iommu_domain *domain; + struct iova_domain iovad; + struct kref ref; +}; + +extern const struct dma_map_ops ipu_dma_ops; + +#endif /* IPU_DMA_H */ diff --git a/drivers/media/pci/intel/ipu-fw-com.c b/drivers/media/pci/intel/ipu-fw-com.c new file mode 100644 index 0000000000000..4ddf1116a7563 --- /dev/null +++ b/drivers/media/pci/intel/ipu-fw-com.c @@ -0,0 +1,480 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include + +#include +#include +#include +#include +#include + +#include "ipu.h" +#include "ipu-fw-com.h" +#include "ipu-bus.h" + +/* + * FWCOM layer is a shared resource between FW and driver. It consist + * of token queues to both send and receive directions. Queue is simply + * an array of structures with read and write indexes to the queue. + * There are 1...n queues to both directions. Queues locates in + * system ram and are mapped to ISP MMU so that both CPU and ISP can + * see the same buffer. Indexes are located in ISP DMEM so that FW code + * can poll those with very low latency and cost. CPU access to indexes is + * more costly but that happens only at message sending time and + * interrupt trigged message handling. CPU doesn't need to poll indexes. + * wr_reg / rd_reg are offsets to those dmem location. They are not + * the indexes itself. + */ + +/* Shared structure between driver and FW - do not modify */ +struct ipu_fw_sys_queue { + u64 host_address; + u32 vied_address; + u32 size; + u32 token_size; + u32 wr_reg; /* reg no in subsystem's regmem */ + u32 rd_reg; + u32 _align; +}; + +struct ipu_fw_sys_queue_res { + u64 host_address; + u32 vied_address; + u32 reg; +}; + +enum syscom_state { + /* Program load or explicit host setting should init to this */ + SYSCOM_STATE_UNINIT = 0x57A7E000, + /* SP Syscom sets this when it is ready for use */ + SYSCOM_STATE_READY = 0x57A7E001, + /* SP Syscom sets this when no more syscom accesses will happen */ + SYSCOM_STATE_INACTIVE = 0x57A7E002 +}; + +enum syscom_cmd { + /* Program load or explicit host setting should init to this */ + SYSCOM_COMMAND_UNINIT = 0x57A7F000, + /* Host Syscom requests syscom to become inactive */ + SYSCOM_COMMAND_INACTIVE = 0x57A7F001 +}; + +/* firmware config: data that sent from the host to SP via DDR */ +/* Cell copies data into a context */ + +struct ipu_fw_syscom_config { + u32 firmware_address; + + u32 num_input_queues; + u32 num_output_queues; + + /* ISP pointers to an array of ipu_fw_sys_queue structures */ + u32 input_queue; + u32 output_queue; + + /* ISYS / PSYS private data */ + u32 specific_addr; + u32 specific_size; +}; + +/* End of shared structures / data */ + +struct ipu_fw_com_context { + struct ipu_bus_device *adev; + void __iomem *dmem_addr; + int (*cell_ready)(struct ipu_bus_device *adev); + void (*cell_start)(struct ipu_bus_device *adev); + + void *dma_buffer; + dma_addr_t dma_addr; + unsigned int dma_size; + unsigned long attrs; + + unsigned int num_input_queues; + unsigned int num_output_queues; + + struct ipu_fw_sys_queue *input_queue; /* array of host to SP queues */ + struct ipu_fw_sys_queue *output_queue; /* array of SP to host */ + + void *config_host_addr; + void *specific_host_addr; + u64 ibuf_host_addr; + u64 obuf_host_addr; + + u32 config_vied_addr; + u32 input_queue_vied_addr; + u32 output_queue_vied_addr; + u32 specific_vied_addr; + u32 ibuf_vied_addr; + u32 obuf_vied_addr; +}; + +#define FW_COM_WR_REG 0 +#define FW_COM_RD_REG 4 + +#define REGMEM_OFFSET 0 + +enum regmem_id { + /* pass pkg_dir address to SPC in non-secure mode */ + PKG_DIR_ADDR_REG = 0, + /* pass syscom configuration to SPC */ + SYSCOM_CONFIG_REG = 1, + /* syscom state - modified by SP */ + SYSCOM_STATE_REG = 2, + /* syscom commands - modified by the host */ + SYSCOM_COMMAND_REG = 3, + /* Store interrupt status - updated by SP */ + SYSCOM_IRQ_REG = 4, + /* Store VTL0_ADDR_MASK in trusted secure regision - provided by host.*/ + SYSCOM_VTL0_ADDR_MASK = 5, + /* first syscom queue pointer register */ + SYSCOM_QPR_BASE_REG = 6 +}; + +enum message_direction { + DIR_RECV = 0, + DIR_SEND +}; + +static unsigned int num_messages(unsigned int wr, unsigned int rd, + unsigned int size) +{ + if (wr < rd) + wr += size; + return wr - rd; +} + +static unsigned int num_free(unsigned int wr, unsigned int rd, + unsigned int size) +{ + return size - num_messages(wr, rd, size); +} + +static unsigned int curr_index(void __iomem *q_dmem, + enum message_direction dir) +{ + return readl(q_dmem + + (dir == DIR_RECV ? FW_COM_RD_REG : FW_COM_WR_REG)); +} + +static unsigned int inc_index(void __iomem *q_dmem, struct ipu_fw_sys_queue *q, + enum message_direction dir) +{ + unsigned int index; + + index = curr_index(q_dmem, dir) + 1; + return index >= q->size ? 0 : index; +} + +static unsigned int ipu_sys_queue_buf_size(unsigned int size, + unsigned int token_size) +{ + return (size + 1) * token_size; +} + +static void ipu_sys_queue_init(struct ipu_fw_sys_queue *q, unsigned int size, + unsigned int token_size, struct ipu_fw_sys_queue_res *res) +{ + unsigned int buf_size; + + q->size = size + 1; + q->token_size = token_size; + buf_size = ipu_sys_queue_buf_size(size, token_size); + + /* acquire the shared buffer space */ + q->host_address = res->host_address; + res->host_address += buf_size; + q->vied_address = res->vied_address; + res->vied_address += buf_size; + + /* acquire the shared read and writer pointers */ + q->wr_reg = res->reg; + res->reg++; + q->rd_reg = res->reg; + res->reg++; +} + +void *ipu_fw_com_prepare(struct ipu_fw_com_cfg *cfg, + struct ipu_bus_device *adev, void __iomem *base) +{ + struct ipu_fw_com_context *ctx; + struct ipu_fw_syscom_config *fw_cfg; + unsigned int i; + unsigned int sizeall, offset; + unsigned int sizeinput = 0, sizeoutput = 0; + unsigned long attrs = 0; + struct ipu_fw_sys_queue_res res; + + /* error handling */ + if (!cfg || !cfg->cell_start || !cfg->cell_ready) + return NULL; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return NULL; + ctx->dmem_addr = base + cfg->dmem_addr + REGMEM_OFFSET; + ctx->adev = adev; + ctx->cell_start = cfg->cell_start; + ctx->cell_ready = cfg->cell_ready; + + ctx->num_input_queues = cfg->num_input_queues; + ctx->num_output_queues = cfg->num_output_queues; + + /* + * Allocate DMA mapped memory. Allocate one big chunk. + */ + sizeall = + /* Base cfg for FW */ + roundup(sizeof(struct ipu_fw_syscom_config), 8) + + /* Descriptions of the queues */ + cfg->num_input_queues * sizeof(struct ipu_fw_sys_queue) + + cfg->num_output_queues * sizeof(struct ipu_fw_sys_queue) + + /* FW specific information structure */ + roundup(cfg->specific_size, 8); + + for (i = 0; i < cfg->num_input_queues; i++) + sizeinput += ipu_sys_queue_buf_size(cfg->input[i].queue_size, + cfg->input[i].token_size); + + for (i = 0; i < cfg->num_output_queues; i++) + sizeoutput += ipu_sys_queue_buf_size(cfg->output[i].queue_size, + cfg->output[i].token_size); + + sizeall += sizeinput + sizeoutput; + + ctx->dma_buffer = dma_alloc_attrs(&ctx->adev->dev, sizeall, + &ctx->dma_addr, GFP_KERNEL, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) + attrs); + ctx->attrs = attrs; +#else + NULL); +#endif + if (!ctx->dma_buffer) { + dev_err(&ctx->adev->dev, "failed to allocate dma memory\n"); + return NULL; + } + + ctx->dma_size = sizeall; + + /* This is the address where FW starts to parse allocations */ + ctx->config_host_addr = ctx->dma_buffer; + ctx->config_vied_addr = ctx->dma_addr; + fw_cfg = (struct ipu_fw_syscom_config *)ctx->config_host_addr; + offset = roundup(sizeof(struct ipu_fw_syscom_config), 8); + + ctx->input_queue = ctx->dma_buffer + offset; + ctx->input_queue_vied_addr = ctx->dma_addr + offset; + offset += cfg->num_input_queues * sizeof(struct ipu_fw_sys_queue); + + ctx->output_queue = ctx->dma_buffer + offset; + ctx->output_queue_vied_addr = ctx->dma_addr + offset; + offset += cfg->num_output_queues * sizeof(struct ipu_fw_sys_queue); + + ctx->specific_host_addr = ctx->dma_buffer + offset; + ctx->specific_vied_addr = ctx->dma_addr + offset; + offset += roundup(cfg->specific_size, 8); + + ctx->ibuf_host_addr = (uintptr_t)(ctx->dma_buffer + offset); + ctx->ibuf_vied_addr = ctx->dma_addr + offset; + offset += sizeinput; + + ctx->obuf_host_addr = (uintptr_t)(ctx->dma_buffer + offset); + ctx->obuf_vied_addr = ctx->dma_addr + offset; + offset += sizeoutput; + + /* initialize input queues */ + res.reg = SYSCOM_QPR_BASE_REG; + res.host_address = ctx->ibuf_host_addr; + res.vied_address = ctx->ibuf_vied_addr; + for (i = 0; i < cfg->num_input_queues; i++) { + ipu_sys_queue_init(ctx->input_queue + i, + cfg->input[i].queue_size, + cfg->input[i].token_size, &res); + } + + /* initialize output queues */ + res.host_address = ctx->obuf_host_addr; + res.vied_address = ctx->obuf_vied_addr; + for (i = 0; i < cfg->num_output_queues; i++) { + ipu_sys_queue_init(ctx->output_queue + i, + cfg->output[i].queue_size, + cfg->output[i].token_size, &res); + } + + /* copy firmware specific data */ + if (cfg->specific_addr && cfg->specific_size) { + memcpy((void *)ctx->specific_host_addr, + cfg->specific_addr, cfg->specific_size); + } + + fw_cfg->num_input_queues = cfg->num_input_queues; + fw_cfg->num_output_queues = cfg->num_output_queues; + fw_cfg->input_queue = ctx->input_queue_vied_addr; + fw_cfg->output_queue = ctx->output_queue_vied_addr; + fw_cfg->specific_addr = ctx->specific_vied_addr; + fw_cfg->specific_size = cfg->specific_size; + + clflush_cache_range(ctx->dma_buffer, sizeall); + + return ctx; +} +EXPORT_SYMBOL_GPL(ipu_fw_com_prepare); + +int ipu_fw_com_open(struct ipu_fw_com_context *ctx) +{ + /* Check if SP is in valid state */ + if (!ctx->cell_ready(ctx->adev)) + return -EIO; + + /* store syscom uninitialized state */ + writel(SYSCOM_STATE_UNINIT, ctx->dmem_addr + SYSCOM_STATE_REG * 4); + /* store syscom uninitialized command */ + writel(SYSCOM_COMMAND_UNINIT, + ctx->dmem_addr + SYSCOM_COMMAND_REG * 4); + /* store firmware configuration address */ + writel(ctx->config_vied_addr, + ctx->dmem_addr + SYSCOM_CONFIG_REG * 4); + + ctx->cell_start(ctx->adev); + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_fw_com_open); + +int ipu_fw_com_close(struct ipu_fw_com_context *ctx) +{ + int state; + + state = readl(ctx->dmem_addr + 4 * SYSCOM_STATE_REG); + if (state != SYSCOM_STATE_READY) + return -EBUSY; + + /* set close request flag */ + writel(SYSCOM_COMMAND_INACTIVE, ctx->dmem_addr + + SYSCOM_COMMAND_REG * 4); + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_fw_com_close); + +int ipu_fw_com_release(struct ipu_fw_com_context *ctx, unsigned int force) +{ + /* check if release is forced, an verify cell state if it is not */ + if (!force && !ctx->cell_ready(ctx->adev)) + return -EBUSY; + + dma_free_attrs(&ctx->adev->dev, ctx->dma_size, + ctx->dma_buffer, ctx->dma_addr, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) + ctx->attrs); +#else + NULL); +#endif + kfree(ctx); + return 0; +} +EXPORT_SYMBOL_GPL(ipu_fw_com_release); + +int ipu_fw_com_ready(struct ipu_fw_com_context *ctx) +{ + int state; + + /* check if SP syscom is ready to open the queue */ + state = readl(ctx->dmem_addr + SYSCOM_STATE_REG * 4); + if (state != SYSCOM_STATE_READY) + return -EBUSY; /* SPC is not ready to handle messages yet */ + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_fw_com_ready); + +static bool is_index_valid(struct ipu_fw_sys_queue *q, unsigned int index) +{ + if (index >= q->size) + return false; + return true; +} + +void *ipu_send_get_token(struct ipu_fw_com_context *ctx, int q_nbr) +{ + struct ipu_fw_sys_queue *q = &ctx->input_queue[q_nbr]; + void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; + unsigned int wr, rd; + unsigned int packets; + unsigned int index; + + wr = readl(q_dmem + FW_COM_WR_REG); + rd = readl(q_dmem + FW_COM_RD_REG); + + /* Catch indexes in dmem */ + if (!is_index_valid(q, wr) || !is_index_valid(q, rd)) + return NULL; + + packets = num_free(wr + 1, rd, q->size); + if (packets <= 0) + return NULL; + + index = curr_index(q_dmem, DIR_SEND); + + return (void *)(unsigned long)q->host_address + (index * q->token_size); +} +EXPORT_SYMBOL_GPL(ipu_send_get_token); + +void ipu_send_put_token(struct ipu_fw_com_context *ctx, int q_nbr) +{ + struct ipu_fw_sys_queue *q = &ctx->input_queue[q_nbr]; + void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; + int index = curr_index(q_dmem, DIR_SEND); + void *addr = (void *)(unsigned long)q->host_address + + (index * q->token_size); + + clflush_cache_range(addr, q->token_size); + + /* Increment index */ + index = inc_index(q_dmem, q, DIR_SEND); + + writel(index, q_dmem + FW_COM_WR_REG); +} +EXPORT_SYMBOL_GPL(ipu_send_put_token); + +void *ipu_recv_get_token(struct ipu_fw_com_context *ctx, int q_nbr) +{ + struct ipu_fw_sys_queue *q = &ctx->output_queue[q_nbr]; + void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; + unsigned int wr, rd; + unsigned int packets; + void *addr; + + wr = readl(q_dmem + FW_COM_WR_REG); + rd = readl(q_dmem + FW_COM_RD_REG); + + /* Catch indexes in dmem? */ + if (!is_index_valid(q, wr) || !is_index_valid(q, rd)) + return NULL; + + packets = num_messages(wr, rd, q->size); + if (packets <= 0) + return NULL; + + addr = (void *)(unsigned long)q->host_address + (rd * q->token_size); + clflush_cache_range(addr, q->token_size); + + return addr; +} +EXPORT_SYMBOL_GPL(ipu_recv_get_token); + +void ipu_recv_put_token(struct ipu_fw_com_context *ctx, int q_nbr) +{ + struct ipu_fw_sys_queue *q = &ctx->output_queue[q_nbr]; + void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; + unsigned int rd = inc_index(q_dmem, q, DIR_RECV); + + /* Release index */ + writel(rd, q_dmem + FW_COM_RD_REG); +} +EXPORT_SYMBOL_GPL(ipu_recv_put_token); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel ipu fw comm library"); diff --git a/drivers/media/pci/intel/ipu-fw-com.h b/drivers/media/pci/intel/ipu-fw-com.h new file mode 100644 index 0000000000000..de47455ea9a49 --- /dev/null +++ b/drivers/media/pci/intel/ipu-fw-com.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_FW_COM_H +#define IPU_FW_COM_H + +struct ipu_fw_com_context; +struct ipu_bus_device; + +struct ipu_fw_syscom_queue_config { + unsigned int queue_size; /* tokens per queue */ + unsigned int token_size; /* bytes per token */ +}; + +struct ipu_fw_com_cfg { + unsigned int num_input_queues; + unsigned int num_output_queues; + struct ipu_fw_syscom_queue_config *input; + struct ipu_fw_syscom_queue_config *output; + + unsigned int dmem_addr; + + /* firmware-specific configuration data */ + void *specific_addr; + unsigned int specific_size; + int (*cell_ready)(struct ipu_bus_device *adev); + void (*cell_start)(struct ipu_bus_device *adev); +}; + +void *ipu_fw_com_prepare(struct ipu_fw_com_cfg *cfg, + struct ipu_bus_device *adev, void __iomem *base); + +int ipu_fw_com_open(struct ipu_fw_com_context *ctx); +int ipu_fw_com_ready(struct ipu_fw_com_context *ctx); +int ipu_fw_com_close(struct ipu_fw_com_context *ctx); +int ipu_fw_com_release(struct ipu_fw_com_context *ctx, unsigned int force); + +void *ipu_recv_get_token(struct ipu_fw_com_context *ctx, int q_nbr); +void ipu_recv_put_token(struct ipu_fw_com_context *ctx, int q_nbr); +void *ipu_send_get_token(struct ipu_fw_com_context *ctx, int q_nbr); +void ipu_send_put_token(struct ipu_fw_com_context *ctx, int q_nbr); + +#endif diff --git a/drivers/media/pci/intel/ipu-fw-isys.c b/drivers/media/pci/intel/ipu-fw-isys.c new file mode 100644 index 0000000000000..130d2ca4a438f --- /dev/null +++ b/drivers/media/pci/intel/ipu-fw-isys.c @@ -0,0 +1,218 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include + +#include +#include +#include "ipu-platform-regs.h" +#include "ipu-fw-isys.h" +#include "ipu-fw-com.h" +#include "ipu-isys.h" + +#define IPU_FW_UNSUPPORTED_DATA_TYPE 0 +static const uint32_t +extracted_bits_per_pixel_per_mipi_data_type[N_IPU_FW_ISYS_MIPI_DATA_TYPE] = { + + 64, /* [0x00] IPU_FW_ISYS_MIPI_DATA_TYPE_FRAME_START_CODE */ + 64, /* [0x01] IPU_FW_ISYS_MIPI_DATA_TYPE_FRAME_END_CODE */ + 64, /* [0x02] IPU_FW_ISYS_MIPI_DATA_TYPE_LINE_START_CODE */ + 64, /* [0x03] IPU_FW_ISYS_MIPI_DATA_TYPE_LINE_END_CODE */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x04] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x05] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x06] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x07] */ + 64, /* [0x08] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT1 */ + 64, /* [0x09] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT2 */ + 64, /* [0x0A] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT3 */ + 64, /* [0x0B] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT4 */ + 64, /* [0x0C] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT5 */ + 64, /* [0x0D] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT6 */ + 64, /* [0x0E] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT7 */ + 64, /* [0x0F] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT8 */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x10] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x11] */ + 8, /* [0x12] IPU_FW_ISYS_MIPI_DATA_TYPE_EMBEDDED */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x13] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x14] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x15] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x16] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x17] */ + 12, /* [0x18] IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8 */ + 15, /* [0x19] IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_10 */ + 12, /* [0x1A] IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8_LEGACY */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x1B] */ + 12, /* [0x1C] IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8_SHIFT */ + 15, /* [0x1D] IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_10_SHIFT */ + 16, /* [0x1E] IPU_FW_ISYS_MIPI_DATA_TYPE_YUV422_8 */ + 20, /* [0x1F] IPU_FW_ISYS_MIPI_DATA_TYPE_YUV422_10 */ + 16, /* [0x20] IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_444 */ + 16, /* [0x21] IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_555 */ + 16, /* [0x22] IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_565 */ + 18, /* [0x23] IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_666 */ + 24, /* [0x24] IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_888 */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x25] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x26] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x27] */ + 6, /* [0x28] IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_6 */ + 7, /* [0x29] IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_7 */ + 8, /* [0x2A] IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_8 */ + 10, /* [0x2B] IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_10 */ + 12, /* [0x2C] IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_12 */ + 14, /* [0x2D] IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_14 */ + 16, /* [0x2E] IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_16 */ + 8, /* [0x2F] IPU_FW_ISYS_MIPI_DATA_TYPE_BINARY_8 */ + 8, /* [0x30] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF1 */ + 8, /* [0x31] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF2 */ + 8, /* [0x32] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF3 */ + 8, /* [0x33] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF4 */ + 8, /* [0x34] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF5 */ + 8, /* [0x35] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF6 */ + 8, /* [0x36] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF7 */ + 8, /* [0x37] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF8 */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x38] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x39] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x3A] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x3B] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x3C] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x3D] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x3E] */ + IPU_FW_UNSUPPORTED_DATA_TYPE /* [0x3F] */ +}; + + +void ipu_fw_isys_set_params(struct ipu_fw_isys_stream_cfg_data_abi *stream_cfg) +{ + unsigned int i; + unsigned int idx; + + for (i = 0; i < stream_cfg->nof_input_pins; i++) { + idx = stream_cfg->input_pins[i].dt; + stream_cfg->input_pins[i].bits_per_pix = + extracted_bits_per_pixel_per_mipi_data_type[idx]; + stream_cfg->input_pins[i].mapped_dt = + N_IPU_FW_ISYS_MIPI_DATA_TYPE; + } +} + +void +ipu_fw_isys_dump_stream_cfg(struct device *dev, + struct ipu_fw_isys_stream_cfg_data_abi *stream_cfg) +{ + unsigned int i; + + dev_dbg(dev, "---------------------------\n"); + dev_dbg(dev, "IPU_FW_ISYS_STREAM_CFG_DATA\n"); + dev_dbg(dev, "---------------------------\n"); + + dev_dbg(dev, "Source %d\n", stream_cfg->src); + dev_dbg(dev, "VC %d\n", stream_cfg->vc); + dev_dbg(dev, "Nof input pins %d\n", stream_cfg->nof_input_pins); + dev_dbg(dev, "Nof output pins %d\n", stream_cfg->nof_output_pins); + + for (i = 0; i < stream_cfg->nof_input_pins; i++) { + dev_dbg(dev, "Input pin %d\n", i); + dev_dbg(dev, "Mipi data type 0x%0x\n", + stream_cfg->input_pins[i].dt); + dev_dbg(dev, "Mipi store mode %d\n", + stream_cfg->input_pins[i].mipi_store_mode); + dev_dbg(dev, "Bits per pixel %d\n", + stream_cfg->input_pins[i].bits_per_pix); + dev_dbg(dev, "Mapped data type 0x%0x\n", + stream_cfg->input_pins[i].mapped_dt); + dev_dbg(dev, "Input res width %d\n", + stream_cfg->input_pins[i].input_res.width); + dev_dbg(dev, "Input res height %d\n", + stream_cfg->input_pins[i].input_res.height); + } + + for (i = 0; i < N_IPU_FW_ISYS_CROPPING_LOCATION; i++) { + dev_dbg(dev, "Crop info %d\n", i); + dev_dbg(dev, "Crop.top_offset %d\n", + stream_cfg->crop[i].top_offset); + dev_dbg(dev, "Crop.left_offset %d\n", + stream_cfg->crop[i].left_offset); + dev_dbg(dev, "Crop.bottom_offset %d\n", + stream_cfg->crop[i].bottom_offset); + dev_dbg(dev, "Crop.right_offset %d\n", + stream_cfg->crop[i].right_offset); + dev_dbg(dev, "----------------\n"); + } + + for (i = 0; i < stream_cfg->nof_output_pins; i++) { + dev_dbg(dev, "Output pin %d\n", i); + dev_dbg(dev, "Output input pin id %d\n", + stream_cfg->output_pins[i].input_pin_id); + dev_dbg(dev, "Output res width %d\n", + stream_cfg->output_pins[i].output_res.width); + dev_dbg(dev, "Output res height %d\n", + stream_cfg->output_pins[i].output_res.height); + dev_dbg(dev, "Stride %d\n", stream_cfg->output_pins[i].stride); + dev_dbg(dev, "Pin type %d\n", stream_cfg->output_pins[i].pt); + dev_dbg(dev, "Ft %d\n", stream_cfg->output_pins[i].ft); + dev_dbg(dev, "Watermar in lines %d\n", + stream_cfg->output_pins[i].watermark_in_lines); + dev_dbg(dev, "Send irq %d\n", + stream_cfg->output_pins[i].send_irq); + dev_dbg(dev, "Reserve compression %d\n", + stream_cfg->output_pins[i].reserve_compression); + dev_dbg(dev, "snoopable %d\n", + stream_cfg->output_pins[i].snoopable); + dev_dbg(dev, "sensor type %d\n", + stream_cfg->output_pins[i].sensor_type); + dev_dbg(dev, "----------------\n"); + } + + dev_dbg(dev, "Isl_use %d\n", stream_cfg->isl_use); + switch (stream_cfg->isl_use) { + case IPU_FW_ISYS_USE_SINGLE_ISA: + dev_dbg(dev, "ISA cfg:\n"); + dev_dbg(dev, "blc_enabled %d\n", stream_cfg->isa_cfg.cfg.blc); + dev_dbg(dev, "lsc_enabled %d\n", stream_cfg->isa_cfg.cfg.lsc); + dev_dbg(dev, "dpc_enabled %d\n", stream_cfg->isa_cfg.cfg.dpc); + dev_dbg(dev, "downscaler_enabled %d\n", + stream_cfg->isa_cfg.cfg.downscaler); + dev_dbg(dev, "awb_enabled %d\n", stream_cfg->isa_cfg.cfg.awb); + dev_dbg(dev, "af_enabled %d\n", stream_cfg->isa_cfg.cfg.af); + dev_dbg(dev, "ae_enabled %d\n", stream_cfg->isa_cfg.cfg.ae); + break; + case IPU_FW_ISYS_USE_SINGLE_DUAL_ISL: + case IPU_FW_ISYS_USE_NO_ISL_NO_ISA: + default: + break; + } +} + +void ipu_fw_isys_dump_frame_buff_set(struct device *dev, + struct ipu_fw_isys_frame_buff_set_abi *buf, + unsigned int outputs) +{ + unsigned int i; + + dev_dbg(dev, "--------------------------\n"); + dev_dbg(dev, "IPU_FW_ISYS_FRAME_BUFF_SET\n"); + dev_dbg(dev, "--------------------------\n"); + + for (i = 0; i < outputs; i++) { + dev_dbg(dev, "Output pin %d\n", i); + dev_dbg(dev, "out_buf_id %llu\n", + buf->output_pins[i].out_buf_id); + dev_dbg(dev, "addr 0x%x\n", buf->output_pins[i].addr); + dev_dbg(dev, "compress %u\n", buf->output_pins[i].compress); + + dev_dbg(dev, "----------------\n"); + } + + dev_dbg(dev, "process_group_light.addr 0x%x\n", + buf->process_group_light.addr); + dev_dbg(dev, "process_group_light.param_buf_id %llu\n", + buf->process_group_light.param_buf_id); + dev_dbg(dev, "send_irq_sof 0x%x\n", buf->send_irq_sof); + dev_dbg(dev, "send_irq_eof 0x%x\n", buf->send_irq_eof); + dev_dbg(dev, "send_resp_sof 0x%x\n", buf->send_resp_sof); + dev_dbg(dev, "send_resp_eof 0x%x\n", buf->send_resp_eof); +#if defined(CONFIG_VIDEO_INTEL_IPU4) || defined(CONFIG_VIDEO_INTEL_IPU4P) + dev_dbg(dev, "send_irq_capture_ack 0x%x\n", buf->send_irq_capture_ack); + dev_dbg(dev, "send_irq_capture_done 0x%x\n", buf->send_irq_capture_done); +#endif +} diff --git a/drivers/media/pci/intel/ipu-fw-isys.h b/drivers/media/pci/intel/ipu-fw-isys.h new file mode 100644 index 0000000000000..2853e1e1c9d96 --- /dev/null +++ b/drivers/media/pci/intel/ipu-fw-isys.h @@ -0,0 +1,885 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_FW_ISYS_H +#define IPU_FW_ISYS_H + +#include "ipu-fw-com.h" + +/* Max number of Input/Output Pins */ +#define IPU_MAX_IPINS 4 + +/* worst case is ISA use where a single input pin produces: + * Mipi output, NS Pixel Output, and Scaled Pixel Output. + * This is how the 2 is calculated + */ +#define IPU_MAX_OPINS ((IPU_MAX_IPINS) + 2) + +/* Max number of supported virtual streams */ +#if defined(CONFIG_VIDEO_INTEL_IPU4) || defined(CONFIG_VIDEO_INTEL_IPU4P) +#define IPU_STREAM_ID_MAX 8 +#else +#define IPU_STREAM_ID_MAX 16 +#endif + +/* Aligned with the approach of having one dedicated per stream */ +#define IPU_N_MAX_MSG_SEND_QUEUES (IPU_STREAM_ID_MAX) +/* Single return queue for all streams/commands type */ +#define IPU_N_MAX_MSG_RECV_QUEUES 1 +/* Single device queue for high priority commands (bypass in-order queue) */ +#define IPU_N_MAX_DEV_SEND_QUEUES 1 +/* Single dedicated send queue for proxy interface */ +#define IPU_N_MAX_PROXY_SEND_QUEUES 1 +/* Single dedicated recv queue for proxy interface */ +#define IPU_N_MAX_PROXY_RECV_QUEUES 1 +/* Send queues layout */ +#define IPU_BASE_PROXY_SEND_QUEUES 0 +#define IPU_BASE_DEV_SEND_QUEUES \ + (IPU_BASE_PROXY_SEND_QUEUES + IPU_N_MAX_PROXY_SEND_QUEUES) +#define IPU_BASE_MSG_SEND_QUEUES \ + (IPU_BASE_DEV_SEND_QUEUES + IPU_N_MAX_DEV_SEND_QUEUES) +#define IPU_N_MAX_SEND_QUEUES \ + (IPU_BASE_MSG_SEND_QUEUES + IPU_N_MAX_MSG_SEND_QUEUES) +/* Recv queues layout */ +#define IPU_BASE_PROXY_RECV_QUEUES 0 +#define IPU_BASE_MSG_RECV_QUEUES \ + (IPU_BASE_PROXY_RECV_QUEUES + IPU_N_MAX_PROXY_RECV_QUEUES) +#define IPU_N_MAX_RECV_QUEUES \ + (IPU_BASE_MSG_RECV_QUEUES + IPU_N_MAX_MSG_RECV_QUEUES) + +/* Consider 1 slot per stream since driver is not expected to pipeline + * device commands for the same stream + */ +#define IPU_DEV_SEND_QUEUE_SIZE (IPU_STREAM_ID_MAX) + +/* Max number of supported SRAM buffer partitions. + * It refers to the size of stream partitions. + * These partitions are further subpartitioned internally + * by the FW, but by declaring statically the stream + * partitions we solve the buffer fragmentation issue + */ +#define IPU_NOF_SRAM_BLOCKS_MAX (IPU_STREAM_ID_MAX) + +/* Max number of supported input pins routed in ISL */ +#define IPU_MAX_IPINS_IN_ISL 2 + +/* Max number of planes for frame formats supported by the FW */ +#define IPU_PIN_PLANES_MAX 4 + +/** + * enum ipu_fw_isys_resp_type + */ +enum ipu_fw_isys_resp_type { + IPU_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE = 0, + IPU_FW_ISYS_RESP_TYPE_STREAM_START_ACK, + IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK, + IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK, + IPU_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK, + IPU_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK, + IPU_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK, + IPU_FW_ISYS_RESP_TYPE_PIN_DATA_READY, + IPU_FW_ISYS_RESP_TYPE_PIN_DATA_WATERMARK, + IPU_FW_ISYS_RESP_TYPE_FRAME_SOF, + IPU_FW_ISYS_RESP_TYPE_FRAME_EOF, + IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE, + IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE, + IPU_FW_ISYS_RESP_TYPE_PIN_DATA_SKIPPED, + IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_SKIPPED, + IPU_FW_ISYS_RESP_TYPE_FRAME_SOF_DISCARDED, + IPU_FW_ISYS_RESP_TYPE_FRAME_EOF_DISCARDED, + IPU_FW_ISYS_RESP_TYPE_STATS_DATA_READY, + N_IPU_FW_ISYS_RESP_TYPE +}; + +/** + * enum ipu_fw_isys_send_type + */ +enum ipu_fw_isys_send_type { + IPU_FW_ISYS_SEND_TYPE_STREAM_OPEN = 0, + IPU_FW_ISYS_SEND_TYPE_STREAM_START, + IPU_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE, + IPU_FW_ISYS_SEND_TYPE_STREAM_CAPTURE, + IPU_FW_ISYS_SEND_TYPE_STREAM_STOP, + IPU_FW_ISYS_SEND_TYPE_STREAM_FLUSH, + IPU_FW_ISYS_SEND_TYPE_STREAM_CLOSE, + N_IPU_FW_ISYS_SEND_TYPE +}; + +/** + * enum ipu_fw_isys_queue_type + */ +enum ipu_fw_isys_queue_type { + IPU_FW_ISYS_QUEUE_TYPE_PROXY = 0, + IPU_FW_ISYS_QUEUE_TYPE_DEV, + IPU_FW_ISYS_QUEUE_TYPE_MSG, + N_IPU_FW_ISYS_QUEUE_TYPE +}; + +/** + * enum ipu_fw_isys_stream_source: Specifies a source for a stream + */ +enum ipu_fw_isys_stream_source { + IPU_FW_ISYS_STREAM_SRC_PORT_0 = 0, + IPU_FW_ISYS_STREAM_SRC_PORT_1, + IPU_FW_ISYS_STREAM_SRC_PORT_2, + IPU_FW_ISYS_STREAM_SRC_PORT_3, + IPU_FW_ISYS_STREAM_SRC_PORT_4, + IPU_FW_ISYS_STREAM_SRC_PORT_5, + IPU_FW_ISYS_STREAM_SRC_PORT_6, + IPU_FW_ISYS_STREAM_SRC_PORT_7, + IPU_FW_ISYS_STREAM_SRC_PORT_8, + IPU_FW_ISYS_STREAM_SRC_PORT_9, + IPU_FW_ISYS_STREAM_SRC_PORT_10, + IPU_FW_ISYS_STREAM_SRC_PORT_11, + IPU_FW_ISYS_STREAM_SRC_PORT_12, + IPU_FW_ISYS_STREAM_SRC_PORT_13, + IPU_FW_ISYS_STREAM_SRC_PORT_14, + IPU_FW_ISYS_STREAM_SRC_PORT_15, + IPU_FW_ISYS_STREAM_SRC_MIPIGEN_0, + IPU_FW_ISYS_STREAM_SRC_MIPIGEN_1, + IPU_FW_ISYS_STREAM_SRC_MIPIGEN_2, + IPU_FW_ISYS_STREAM_SRC_MIPIGEN_3, + IPU_FW_ISYS_STREAM_SRC_MIPIGEN_4, + IPU_FW_ISYS_STREAM_SRC_MIPIGEN_5, + IPU_FW_ISYS_STREAM_SRC_MIPIGEN_6, + IPU_FW_ISYS_STREAM_SRC_MIPIGEN_7, + IPU_FW_ISYS_STREAM_SRC_MIPIGEN_8, + IPU_FW_ISYS_STREAM_SRC_MIPIGEN_9, + N_IPU_FW_ISYS_STREAM_SRC +}; + +#if !defined(CONFIG_VIDEO_INTEL_IPU4) && !defined(CONFIG_VIDEO_INTEL_IPU4P) +enum ipu_fw_isys_sensor_type { + /* non-snoopable to PSYS */ + IPU_FW_ISYS_VC1_SENSOR_DATA = 0, + /* non-snoopable for PDAF */ + IPU_FW_ISYS_VC1_SENSOR_PDAF, + /* snoopable to CPU */ + IPU_FW_ISYS_VC0_SENSOR_METADATA, + /* snoopable to CPU */ + IPU_FW_ISYS_VC0_SENSOR_DATA, + N_IPU_FW_ISYS_SENSOR_TYPE +}; + +enum ipu_fw_isys_sensor_info { + /* VC1 */ + IPU_FW_ISYS_SENSOR_DATA_1 = 1, + IPU_FW_ISYS_SENSOR_DATA_2 = 2, + IPU_FW_ISYS_SENSOR_DATA_3 = 3, + IPU_FW_ISYS_SENSOR_DATA_4 = 4, + IPU_FW_ISYS_SENSOR_DATA_5 = 5, + IPU_FW_ISYS_SENSOR_DATA_6 = 6, + IPU_FW_ISYS_SENSOR_DATA_7 = 7, + IPU_FW_ISYS_SENSOR_DATA_8 = 8, + IPU_FW_ISYS_SENSOR_DATA_9 = 9, + IPU_FW_ISYS_SENSOR_DATA_10 = 10, + IPU_FW_ISYS_SENSOR_PDAF_1 = 11, + IPU_FW_ISYS_SENSOR_PDAF_2 = 12, + /* VC0 */ + IPU_FW_ISYS_SENSOR_METADATA = 13, + IPU_FW_ISYS_SENSOR_DATA_11 = 14, + IPU_FW_ISYS_SENSOR_DATA_12 = 15, + IPU_FW_ISYS_SENSOR_DATA_13 = 16, + IPU_FW_ISYS_SENSOR_DATA_14 = 17, + IPU_FW_ISYS_SENSOR_DATA_15 = 18, + IPU_FW_ISYS_SENSOR_DATA_16 = 19, + N_IPU_FW_ISYS_SENSOR_INFO, + IPU_FW_ISYS_VC1_SENSOR_DATA_START = IPU_FW_ISYS_SENSOR_DATA_1, + IPU_FW_ISYS_VC1_SENSOR_DATA_END = IPU_FW_ISYS_SENSOR_DATA_10, + IPU_FW_ISYS_VC0_SENSOR_DATA_START = IPU_FW_ISYS_SENSOR_DATA_11, + IPU_FW_ISYS_VC0_SENSOR_DATA_END = IPU_FW_ISYS_SENSOR_DATA_16, + IPU_FW_ISYS_VC1_SENSOR_PDAF_START = IPU_FW_ISYS_SENSOR_PDAF_1, + IPU_FW_ISYS_VC1_SENSOR_PDAF_END = IPU_FW_ISYS_SENSOR_PDAF_2, +}; +#endif + +#define IPU_FW_ISYS_STREAM_SRC_CSI2_PORT0 IPU_FW_ISYS_STREAM_SRC_PORT_0 +#define IPU_FW_ISYS_STREAM_SRC_CSI2_PORT1 IPU_FW_ISYS_STREAM_SRC_PORT_1 +#define IPU_FW_ISYS_STREAM_SRC_CSI2_PORT2 IPU_FW_ISYS_STREAM_SRC_PORT_2 +#define IPU_FW_ISYS_STREAM_SRC_CSI2_PORT3 IPU_FW_ISYS_STREAM_SRC_PORT_3 + +#define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_PORTA IPU_FW_ISYS_STREAM_SRC_PORT_4 +#define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_PORTB IPU_FW_ISYS_STREAM_SRC_PORT_5 +#define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT0 IPU_FW_ISYS_STREAM_SRC_PORT_6 +#define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT1 IPU_FW_ISYS_STREAM_SRC_PORT_7 +#define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT2 IPU_FW_ISYS_STREAM_SRC_PORT_8 +#define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT3 IPU_FW_ISYS_STREAM_SRC_PORT_9 + +#define IPU_FW_ISYS_STREAM_SRC_MIPIGEN_PORT0 IPU_FW_ISYS_STREAM_SRC_MIPIGEN_0 +#define IPU_FW_ISYS_STREAM_SRC_MIPIGEN_PORT1 IPU_FW_ISYS_STREAM_SRC_MIPIGEN_1 + +/** + * enum ipu_fw_isys_mipi_vc: MIPI csi2 spec + * supports up to 4 virtual per physical channel + */ +enum ipu_fw_isys_mipi_vc { + IPU_FW_ISYS_MIPI_VC_0 = 0, + IPU_FW_ISYS_MIPI_VC_1, + IPU_FW_ISYS_MIPI_VC_2, + IPU_FW_ISYS_MIPI_VC_3, + N_IPU_FW_ISYS_MIPI_VC +}; + +/** + * Supported Pixel Frame formats. Expandable if needed + */ +enum ipu_fw_isys_frame_format_type { + IPU_FW_ISYS_FRAME_FORMAT_NV11 = 0, /* 12 bit YUV 411, Y, UV plane */ + IPU_FW_ISYS_FRAME_FORMAT_NV12, /* 12 bit YUV 420, Y, UV plane */ + IPU_FW_ISYS_FRAME_FORMAT_NV12_16, /* 16 bit YUV 420, Y, UV plane */ + IPU_FW_ISYS_FRAME_FORMAT_NV12_TILEY, /* 12 bit YUV 420, + * Intel proprietary tiled format, + * TileY + */ + IPU_FW_ISYS_FRAME_FORMAT_NV16, /* 16 bit YUV 422, Y, UV plane */ + IPU_FW_ISYS_FRAME_FORMAT_NV21, /* 12 bit YUV 420, Y, VU plane */ + IPU_FW_ISYS_FRAME_FORMAT_NV61, /* 16 bit YUV 422, Y, VU plane */ + IPU_FW_ISYS_FRAME_FORMAT_YV12, /* 12 bit YUV 420, Y, V, U plane */ + IPU_FW_ISYS_FRAME_FORMAT_YV16, /* 16 bit YUV 422, Y, V, U plane */ + IPU_FW_ISYS_FRAME_FORMAT_YUV420, /* 12 bit YUV 420, Y, U, V plane */ + IPU_FW_ISYS_FRAME_FORMAT_YUV420_10, /* yuv420, 10 bits per subpixel */ + IPU_FW_ISYS_FRAME_FORMAT_YUV420_12, /* yuv420, 12 bits per subpixel */ + IPU_FW_ISYS_FRAME_FORMAT_YUV420_14, /* yuv420, 14 bits per subpixel */ + IPU_FW_ISYS_FRAME_FORMAT_YUV420_16, /* yuv420, 16 bits per subpixel */ + IPU_FW_ISYS_FRAME_FORMAT_YUV422, /* 16 bit YUV 422, Y, U, V plane */ + IPU_FW_ISYS_FRAME_FORMAT_YUV422_16, /* yuv422, 16 bits per subpixel */ + IPU_FW_ISYS_FRAME_FORMAT_UYVY, /* 16 bit YUV 422, UYVY interleaved */ + IPU_FW_ISYS_FRAME_FORMAT_YUYV, /* 16 bit YUV 422, YUYV interleaved */ + IPU_FW_ISYS_FRAME_FORMAT_YUV444, /* 24 bit YUV 444, Y, U, V plane */ + IPU_FW_ISYS_FRAME_FORMAT_YUV_LINE, /* Internal format, 2 y lines + * followed by a uvinterleaved line + */ + IPU_FW_ISYS_FRAME_FORMAT_RAW8, /* RAW8, 1 plane */ + IPU_FW_ISYS_FRAME_FORMAT_RAW10, /* RAW10, 1 plane */ + IPU_FW_ISYS_FRAME_FORMAT_RAW12, /* RAW12, 1 plane */ + IPU_FW_ISYS_FRAME_FORMAT_RAW14, /* RAW14, 1 plane */ + IPU_FW_ISYS_FRAME_FORMAT_RAW16, /* RAW16, 1 plane */ + IPU_FW_ISYS_FRAME_FORMAT_RGB565, /* 16 bit RGB, 1 plane. Each 3 sub + * pixels are packed into one 16 bit + * value, 5 bits for R, 6 bits + * for G and 5 bits for B. + */ + + IPU_FW_ISYS_FRAME_FORMAT_PLANAR_RGB888, /* 24 bit RGB, 3 planes */ + IPU_FW_ISYS_FRAME_FORMAT_RGBA888, /* 32 bit RGBA, 1 plane, + * A=Alpha (alpha is unused) + */ + IPU_FW_ISYS_FRAME_FORMAT_QPLANE6, /* Internal, for advanced ISP */ + IPU_FW_ISYS_FRAME_FORMAT_BINARY_8, /* byte stream, used for jpeg. */ + N_IPU_FW_ISYS_FRAME_FORMAT +}; + +/* Temporary for driver compatibility */ +#define IPU_FW_ISYS_FRAME_FORMAT_RAW (IPU_FW_ISYS_FRAME_FORMAT_RAW16) + +/** + * Supported MIPI data type. Keep in sync array in ipu_fw_isys_private.c + */ +enum ipu_fw_isys_mipi_data_type { + /** SYNCHRONIZATION SHORT PACKET DATA TYPES */ + IPU_FW_ISYS_MIPI_DATA_TYPE_FRAME_START_CODE = 0x00, + IPU_FW_ISYS_MIPI_DATA_TYPE_FRAME_END_CODE = 0x01, + IPU_FW_ISYS_MIPI_DATA_TYPE_LINE_START_CODE = 0x02, /* Optional */ + IPU_FW_ISYS_MIPI_DATA_TYPE_LINE_END_CODE = 0x03, /* Optional */ + /** Reserved 0x04-0x07 */ + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x04 = 0x04, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x05 = 0x05, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x06 = 0x06, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x07 = 0x07, + /** GENERIC SHORT PACKET DATA TYPES */ + /** They are used to keep the timing information for + * the opening/closing of shutters, + * triggering of flashes and etc. + */ + /* Generic Short Packet Codes 1 - 8 */ + IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT1 = 0x08, + IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT2 = 0x09, + IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT3 = 0x0A, + IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT4 = 0x0B, + IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT5 = 0x0C, + IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT6 = 0x0D, + IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT7 = 0x0E, + IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT8 = 0x0F, + /** GENERIC LONG PACKET DATA TYPES */ + IPU_FW_ISYS_MIPI_DATA_TYPE_NULL = 0x10, + IPU_FW_ISYS_MIPI_DATA_TYPE_BLANKING_DATA = 0x11, + /* Embedded 8-bit non Image Data */ + IPU_FW_ISYS_MIPI_DATA_TYPE_EMBEDDED = 0x12, + /** Reserved 0x13-0x17 */ + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x13 = 0x13, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x14 = 0x14, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x15 = 0x15, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x16 = 0x16, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x17 = 0x17, + /** YUV DATA TYPES */ + /* 8 bits per subpixel */ + IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8 = 0x18, + /* 10 bits per subpixel */ + IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_10 = 0x19, + /* 8 bits per subpixel */ + IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8_LEGACY = 0x1A, + /** Reserved 0x1B */ + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x1B = 0x1B, + /* YUV420 8-bit Chroma Shifted Pixel Sampling) */ + IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8_SHIFT = 0x1C, + /* YUV420 8-bit (Chroma Shifted Pixel Sampling) */ + IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_10_SHIFT = 0x1D, + /* UYVY..UVYV, 8 bits per subpixel */ + IPU_FW_ISYS_MIPI_DATA_TYPE_YUV422_8 = 0x1E, + /* UYVY..UVYV, 10 bits per subpixel */ + IPU_FW_ISYS_MIPI_DATA_TYPE_YUV422_10 = 0x1F, + /** RGB DATA TYPES */ + /* BGR..BGR, 4 bits per subpixel */ + IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_444 = 0x20, + /* BGR..BGR, 5 bits per subpixel */ + IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_555 = 0x21, + /* BGR..BGR, 5 bits B and R, 6 bits G */ + IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_565 = 0x22, + /* BGR..BGR, 6 bits per subpixel */ + IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_666 = 0x23, + /* BGR..BGR, 8 bits per subpixel */ + IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_888 = 0x24, + /** Reserved 0x25-0x27 */ + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x25 = 0x25, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x26 = 0x26, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x27 = 0x27, + /** RAW DATA TYPES */ + /* RAW data, 6 - 14 bits per pixel */ + IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_6 = 0x28, + IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_7 = 0x29, + IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_8 = 0x2A, + IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_10 = 0x2B, + IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_12 = 0x2C, + IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_14 = 0x2D, + /** Reserved 0x2E-2F are used with assigned meaning */ + /* RAW data, 16 bits per pixel, not specified in CSI-MIPI standard */ + IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_16 = 0x2E, + /* Binary byte stream, which is target at JPEG, + * not specified in CSI-MIPI standard + */ + IPU_FW_ISYS_MIPI_DATA_TYPE_BINARY_8 = 0x2F, + + /** USER DEFINED 8-BIT DATA TYPES */ + /** For example, the data transmitter (e.g. the SoC sensor) + * can keep the JPEG data as + * the User Defined Data Type 4 and the MPEG data as the + * User Defined Data Type 7. + */ + IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF1 = 0x30, + IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF2 = 0x31, + IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF3 = 0x32, + IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF4 = 0x33, + IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF5 = 0x34, + IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF6 = 0x35, + IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF7 = 0x36, + IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF8 = 0x37, + /** Reserved 0x38-0x3F */ + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x38 = 0x38, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x39 = 0x39, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3A = 0x3A, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3B = 0x3B, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3C = 0x3C, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3D = 0x3D, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3E = 0x3E, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3F = 0x3F, + + /* Keep always last and max value */ + N_IPU_FW_ISYS_MIPI_DATA_TYPE = 0x40 +}; + +/** enum ipu_fw_isys_pin_type: output pin buffer types. + * Buffers can be queued and de-queued to hand them over between IA and ISYS + */ +enum ipu_fw_isys_pin_type { + /* Captured as MIPI packets */ + IPU_FW_ISYS_PIN_TYPE_MIPI = 0, + /* Captured through the ISApf (with/without ISA) + * and the non-scaled output path + */ + IPU_FW_ISYS_PIN_TYPE_RAW_NS, + /* Captured through the ISApf + ISA and the scaled output path */ + IPU_FW_ISYS_PIN_TYPE_RAW_S, + /* Captured through the SoC path */ + IPU_FW_ISYS_PIN_TYPE_RAW_SOC, + /* Reserved for future use, maybe short packets */ + IPU_FW_ISYS_PIN_TYPE_METADATA_0, + /* Reserved for future use */ + IPU_FW_ISYS_PIN_TYPE_METADATA_1, + /* Legacy (non-PIV2), used for the AWB stats */ + IPU_FW_ISYS_PIN_TYPE_AWB_STATS, + /* Legacy (non-PIV2), used for the AF stats */ + IPU_FW_ISYS_PIN_TYPE_AF_STATS, + /* Legacy (non-PIV2), used for the AE stats */ + IPU_FW_ISYS_PIN_TYPE_HIST_STATS, + /* Used for the PAF FF */ + IPU_FW_ISYS_PIN_TYPE_PAF_FF, +#if !defined(CONFIG_VIDEO_INTEL_IPU4) && !defined(CONFIG_VIDEO_INTEL_IPU4P) + /* Captured through the SoC path + * (2D mode where odd and even lines are handled separately) + */ + IPU_FW_ISYS_PIN_TYPE_RAW_DUAL_SOC, +#endif + /* Keep always last and max value */ + N_IPU_FW_ISYS_PIN_TYPE +}; + +/** + * enum ipu_fw_isys_isl_use + * Describes the ISL/ISA use + */ +enum ipu_fw_isys_isl_use { + IPU_FW_ISYS_USE_NO_ISL_NO_ISA = 0, + IPU_FW_ISYS_USE_SINGLE_DUAL_ISL, + IPU_FW_ISYS_USE_SINGLE_ISA, + N_IPU_FW_ISYS_USE +}; + +/** + * enum ipu_fw_isys_mipi_store_mode. Describes if long MIPI packets reach + * MIPI SRAM with the long packet header or + * if not, then only option is to capture it with pin type MIPI. + */ +enum ipu_fw_isys_mipi_store_mode { + IPU_FW_ISYS_MIPI_STORE_MODE_NORMAL = 0, + IPU_FW_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER, + N_IPU_FW_ISYS_MIPI_STORE_MODE +}; + +/** + * enum ipu_fw_isys_type_paf. Describes the Type of PAF enabled + */ +enum ipu_fw_isys_type_paf { + /* PAF data not present */ + IPU_FW_ISYS_TYPE_NO_PAF = 0, + /* Type 2 sensor types, PAF coming separately from Image Frame */ + /* PAF data in interleaved format(RLRL or LRLR) */ + IPU_FW_ISYS_TYPE_INTERLEAVED_PAF, + /* PAF data in non-interleaved format(LL/RR or RR/LL) */ + IPU_FW_ISYS_TYPE_NON_INTERLEAVED_PAF, + /* Type 3 sensor types , PAF data embedded in Image Frame */ + /* Frame Embedded PAF in interleaved format(RLRL or LRLR) */ + IPU_FW_ISYS_TYPE_FRAME_EMB_INTERLEAVED_PAF, + /* Frame Embedded PAF non-interleaved format(LL/RR or RR/LL) */ + IPU_FW_ISYS_TYPE_FRAME_EMB_NON_INTERLEAVED_PAF, + N_IPU_FW_ISYS_TYPE_PAF +}; + +/** + * enum ipu_fw_isys_cropping_location. Enumerates the cropping locations in ISYS + */ +enum ipu_fw_isys_cropping_location { + /* Cropping executed in ISAPF (mainly), + * ISAPF preproc (odd column) and MIPI STR2MMIO (odd row) + */ + IPU_FW_ISYS_CROPPING_LOCATION_PRE_ISA = 0, + /* Reserved for legacy mode which will never be implemented */ + IPU_FW_ISYS_CROPPING_LOCATION_RESERVED_1, + /* Cropping executed in StreamPifConv in the ISA output for + * RAW_NS pin + */ + IPU_FW_ISYS_CROPPING_LOCATION_POST_ISA_NONSCALED, + /* Cropping executed in StreamScaledPifConv + * in the ISA output for RAW_S pin + */ + IPU_FW_ISYS_CROPPING_LOCATION_POST_ISA_SCALED, + N_IPU_FW_ISYS_CROPPING_LOCATION +}; + +/** + * enum ipu_fw_isys_resolution_info. Describes the resolution, + * required to setup the various ISA GP registers. + */ +enum ipu_fw_isys_resolution_info { + /* Scaled ISA output resolution before + * the StreamScaledPifConv cropping + */ + IPU_FW_ISYS_RESOLUTION_INFO_POST_ISA_NONSCALED = 0, + /* Non-Scaled ISA output resolution before the StreamPifConv cropping */ + IPU_FW_ISYS_RESOLUTION_INFO_POST_ISA_SCALED, + N_IPU_FW_ISYS_RESOLUTION_INFO +}; + +/** + * enum ipu_fw_isys_error. Describes the error type detected by the FW + */ +enum ipu_fw_isys_error { + IPU_FW_ISYS_ERROR_NONE = 0, /* No details */ + IPU_FW_ISYS_ERROR_FW_INTERNAL_CONSISTENCY, /* enum */ + IPU_FW_ISYS_ERROR_HW_CONSISTENCY, /* enum */ + IPU_FW_ISYS_ERROR_DRIVER_INVALID_COMMAND_SEQUENCE, /* enum */ + IPU_FW_ISYS_ERROR_DRIVER_INVALID_DEVICE_CONFIGURATION, /* enum */ + IPU_FW_ISYS_ERROR_DRIVER_INVALID_STREAM_CONFIGURATION, /* enum */ + IPU_FW_ISYS_ERROR_DRIVER_INVALID_FRAME_CONFIGURATION, /* enum */ + IPU_FW_ISYS_ERROR_INSUFFICIENT_RESOURCES, /* enum */ + IPU_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO, /* HW code */ + IPU_FW_ISYS_ERROR_HW_REPORTED_SIG2CIO, /* HW code */ + IPU_FW_ISYS_ERROR_SENSOR_FW_SYNC, /* enum */ + IPU_FW_ISYS_ERROR_STREAM_IN_SUSPENSION, /* FW code */ + IPU_FW_ISYS_ERROR_RESPONSE_QUEUE_FULL, /* FW code */ + N_IPU_FW_ISYS_ERROR +}; + +/** + * enum ipu_fw_proxy_error. Describes the error type for + * the proxy detected by the FW + */ +enum ipu_fw_proxy_error { + IPU_FW_PROXY_ERROR_NONE = 0, + IPU_FW_PROXY_ERROR_INVALID_WRITE_REGION, + IPU_FW_PROXY_ERROR_INVALID_WRITE_OFFSET, + N_IPU_FW_PROXY_ERROR +}; + +struct ipu_isys; + +/** + * struct ipu_fw_isys_buffer_partition_abi - buffer partition information + * @num_gda_pages: Number of virtual gda pages available for each virtual stream + */ +struct ipu_fw_isys_buffer_partition_abi { + u32 num_gda_pages[IPU_STREAM_ID_MAX]; +}; + +/** + * struct ipu_fw_isys_fw_config - contains the parts from + * ia_css_isys_device_cfg_data we need to transfer to the cell + */ +struct ipu_fw_isys_fw_config { + struct ipu_fw_isys_buffer_partition_abi buffer_partition; + u32 num_send_queues[N_IPU_FW_ISYS_QUEUE_TYPE]; + u32 num_recv_queues[N_IPU_FW_ISYS_QUEUE_TYPE]; +}; + +/** + * struct ipu_fw_isys_resolution_abi: Generic resolution structure. + * @Width + * @Height + */ +struct ipu_fw_isys_resolution_abi { + u32 width; + u32 height; +}; + +/** + * struct ipu_fw_isys_output_pin_payload_abi + * @out_buf_id: Points to output pin buffer - buffer identifier + * @addr: Points to output pin buffer - CSS Virtual Address + * @compress: Request frame compression (1), or not (0) + */ +struct ipu_fw_isys_output_pin_payload_abi { + u64 out_buf_id; + u32 addr; + u32 compress; +}; + +/** + * struct ipu_fw_isys_output_pin_info_abi + * @output_res: output pin resolution + * @stride: output stride in Bytes (not valid for statistics) + * @watermark_in_lines: pin watermark level in lines + * @payload_buf_size: minimum size in Bytes of all buffers that will be + * supplied for capture on this pin + * @send_irq: assert if pin event should trigger irq + * @pt: pin type -real format "enum ipu_fw_isys_pin_type" + * @ft: frame format type -real format "enum ipu_fw_isys_frame_format_type" + * @input_pin_id: related input pin id + * @reserve_compression: reserve compression resources for pin + */ +struct ipu_fw_isys_output_pin_info_abi { + struct ipu_fw_isys_resolution_abi output_res; + u32 stride; + u32 watermark_in_lines; + u32 payload_buf_size; + u8 send_irq; + u8 input_pin_id; + u8 pt; + u8 ft; + u8 reserved; + u8 reserve_compression; + u8 snoopable; + u32 sensor_type; +}; + +/** + * struct ipu_fw_isys_param_pin_abi + * @param_buf_id: Points to param port buffer - buffer identifier + * @addr: Points to param pin buffer - CSS Virtual Address + */ +struct ipu_fw_isys_param_pin_abi { + u64 param_buf_id; + u32 addr; +}; + +/** + * struct ipu_fw_isys_input_pin_info_abi + * @input_res: input resolution + * @dt: mipi data type ((enum ipu_fw_isys_mipi_data_type) + * @mipi_store_mode: defines if legacy long packet header will be stored or + * discarded if discarded, output pin pin type for this + * input pin can only be MIPI + * (enum ipu_fw_isys_mipi_store_mode) + * @bits_per_pix: native bits per pixel + * @mapped_dt: actual data type from sensor +#if !defined(CONFIG_VIDEO_INTEL_IPU4) && !defined(CONFIG_VIDEO_INTEL_IPU4P) + * @crop_first_and_last_lines Control whether to crop the + * first and last line of the + * input image. Crop done by HW + * device. +#endif + */ +struct ipu_fw_isys_input_pin_info_abi { + struct ipu_fw_isys_resolution_abi input_res; + u8 dt; + u8 mipi_store_mode; + u8 bits_per_pix; + u8 mapped_dt; +#if !defined(CONFIG_VIDEO_INTEL_IPU4) && !defined(CONFIG_VIDEO_INTEL_IPU4P) + u8 crop_first_and_last_lines; +#endif +}; + +/** + * struct ipu_fw_isys_isa_cfg_abi. Describes the ISA cfg + */ +struct ipu_fw_isys_isa_cfg_abi { + struct ipu_fw_isys_resolution_abi + isa_res[N_IPU_FW_ISYS_RESOLUTION_INFO]; + struct { + unsigned int blc:1; + unsigned int lsc:1; + unsigned int dpc:1; + unsigned int downscaler:1; + unsigned int awb:1; + unsigned int af:1; + unsigned int ae:1; + unsigned int paf:8; + unsigned int send_irq_stats_ready:1; + unsigned int send_resp_stats_ready:1; + } cfg; +}; + +/** + * struct ipu_fw_isys_cropping_abi - cropping coordinates + */ +struct ipu_fw_isys_cropping_abi { + s32 top_offset; + s32 left_offset; + s32 bottom_offset; + s32 right_offset; +}; + +/** + * struct ipu_fw_isys_stream_cfg_data_abi + * ISYS stream configuration data structure + * @isa_cfg: details about what ACCs are active if ISA is used + * @crop: defines cropping resolution for the + * maximum number of input pins which can be cropped, + * it is directly mapped to the HW devices + * @input_pins: input pin descriptors + * @output_pins: output pin descriptors + * @compfmt: de-compression setting for User Defined Data + * @nof_input_pins: number of input pins + * @nof_output_pins: number of output pins + * @send_irq_sof_discarded: send irq on discarded frame sof response + * - if '1' it will override the send_resp_sof_discarded + * and send the response + * - if '0' the send_resp_sof_discarded will determine + * whether to send the response + * @send_irq_eof_discarded: send irq on discarded frame eof response + * - if '1' it will override the send_resp_eof_discarded + * and send the response + * - if '0' the send_resp_eof_discarded will determine + * whether to send the response + * @send_resp_sof_discarded: send response for discarded frame sof detected, + * used only when send_irq_sof_discarded is '0' + * @send_resp_eof_discarded: send response for discarded frame eof detected, + * used only when send_irq_eof_discarded is '0' + * @src: Stream source index e.g. MIPI_generator_0, CSI2-rx_1 + * @vc: MIPI Virtual Channel (up to 4 virtual per physical channel) + * @isl_use: indicates whether stream requires ISL and how + */ +struct ipu_fw_isys_stream_cfg_data_abi { + struct ipu_fw_isys_isa_cfg_abi isa_cfg; + struct ipu_fw_isys_cropping_abi crop[N_IPU_FW_ISYS_CROPPING_LOCATION]; + struct ipu_fw_isys_input_pin_info_abi input_pins[IPU_MAX_IPINS]; + struct ipu_fw_isys_output_pin_info_abi output_pins[IPU_MAX_OPINS]; + u32 compfmt; + u8 nof_input_pins; + u8 nof_output_pins; + u8 send_irq_sof_discarded; + u8 send_irq_eof_discarded; + u8 send_resp_sof_discarded; + u8 send_resp_eof_discarded; + u8 src; + u8 vc; + u8 isl_use; +}; + +/** + * struct ipu_fw_isys_frame_buff_set - frame buffer set + * @output_pins: output pin addresses + * @process_group_light: process_group_light buffer address + * @send_irq_sof: send irq on frame sof response + * - if '1' it will override the send_resp_sof and + * send the response + * - if '0' the send_resp_sof will determine whether to + * send the response + * @send_irq_eof: send irq on frame eof response + * - if '1' it will override the send_resp_eof and + * send the response + * - if '0' the send_resp_eof will determine whether to + * send the response + * @send_resp_sof: send response for frame sof detected, + * used only when send_irq_sof is '0' + * @send_resp_eof: send response for frame eof detected, + * used only when send_irq_eof is '0' + */ +struct ipu_fw_isys_frame_buff_set_abi { + struct ipu_fw_isys_output_pin_payload_abi output_pins[IPU_MAX_OPINS]; + struct ipu_fw_isys_param_pin_abi process_group_light; + u8 send_irq_sof; + u8 send_irq_eof; +#if defined(CONFIG_VIDEO_INTEL_IPU4) || defined(CONFIG_VIDEO_INTEL_IPU4P) + u8 send_irq_capture_ack; + u8 send_irq_capture_done; +#endif + u8 send_resp_sof; + u8 send_resp_eof; + u8 reserved; +}; + +/** + * struct ipu_fw_isys_error_info_abi + * @error: error code if something went wrong + * @error_details: depending on error code, it may contain additional error info + */ +struct ipu_fw_isys_error_info_abi { + enum ipu_fw_isys_error error; + u32 error_details; +}; + +/** + * struct ipu_fw_isys_resp_info_comm + * @pin: this var is only valid for pin event related responses, + * contains pin addresses + * @process_group_light: this var is valid for stats ready related responses, + * contains process group addresses + * @error_info: error information from the FW + * @timestamp: Time information for event if available + * @stream_handle: stream id the response corresponds to + * @type: response type (enum ipu_fw_isys_resp_type) + * @pin_id: pin id that the pin payload corresponds to + * @acc_id: this var is valid for stats ready related responses, + * contains accelerator id that finished producing + * all related statistics + */ +struct ipu_fw_isys_resp_info_abi { + u64 buf_id; + struct ipu_fw_isys_output_pin_payload_abi pin; + struct ipu_fw_isys_param_pin_abi process_group_light; + struct ipu_fw_isys_error_info_abi error_info; + u32 timestamp[2]; + u8 stream_handle; + u8 type; + u8 pin_id; + u8 acc_id; + u16 reserved; +}; + +/** + * struct ipu_fw_isys_proxy_error_info_comm + * @proxy_error: error code if something went wrong + * @proxy_error_details: depending on error code, it may contain additional + * error info + */ +struct ipu_fw_isys_proxy_error_info_abi { + enum ipu_fw_proxy_error error; + u32 error_details; +}; + +struct ipu_fw_isys_proxy_resp_info_abi { + u32 request_id; + struct ipu_fw_isys_proxy_error_info_abi error_info; +}; + +/** + * struct ipu_fw_proxy_write_queue_token + * @request_id: update id for the specific proxy write request + * @region_index: Region id for the proxy write request + * @offset: Offset of the write request according to the base address + * of the region + * @value: Value that is requested to be written with the proxy write request + */ +struct ipu_fw_proxy_write_queue_token { + u32 request_id; + u32 region_index; + u32 offset; + u32 value; +}; + +/* From here on type defines not coming from the ISYSAPI interface */ + +/** + * struct ipu_fw_resp_queue_token + */ +struct ipu_fw_resp_queue_token { + struct ipu_fw_isys_resp_info_abi resp_info; +}; + +/** + * struct ipu_fw_send_queue_token + */ +struct ipu_fw_send_queue_token { + u64 buf_handle; + u32 payload; + u16 send_type; + u16 stream_id; +}; + +/** + * struct ipu_fw_proxy_resp_queue_token + */ +struct ipu_fw_proxy_resp_queue_token { + struct ipu_fw_isys_proxy_resp_info_abi proxy_resp_info; +}; + +/** + * struct ipu_fw_proxy_send_queue_token + */ +struct ipu_fw_proxy_send_queue_token { + u32 request_id; + u32 region_index; + u32 offset; + u32 value; +}; + +void ipu_fw_isys_set_params(struct ipu_fw_isys_stream_cfg_data_abi *stream_cfg); + +void ipu_fw_isys_dump_stream_cfg(struct device *dev, + struct ipu_fw_isys_stream_cfg_data_abi + *stream_cfg); +void ipu_fw_isys_dump_frame_buff_set(struct device *dev, + struct ipu_fw_isys_frame_buff_set_abi *buf, + unsigned int outputs); +int ipu_fw_isys_init(struct ipu_isys *isys, unsigned int num_streams); +int ipu_fw_isys_close(struct ipu_isys *isys); +int ipu_fw_isys_simple_cmd(struct ipu_isys *isys, + const unsigned int stream_handle, + enum ipu_fw_isys_send_type send_type); +int ipu_fw_isys_complex_cmd(struct ipu_isys *isys, + const unsigned int stream_handle, + void *cpu_mapped_buf, + dma_addr_t dma_mapped_buf, + size_t size, enum ipu_fw_isys_send_type send_type); +int ipu_fw_isys_send_proxy_token(struct ipu_isys *isys, + unsigned int req_id, + unsigned int index, + unsigned int offset, u32 value); +void ipu_fw_isys_cleanup(struct ipu_isys *isys); +struct ipu_fw_isys_resp_info_abi *ipu_fw_isys_get_resp(void *context, + unsigned int queue, + struct + ipu_fw_isys_resp_info_abi + *response); +void ipu_fw_isys_put_resp(void *context, unsigned int queue); +#endif diff --git a/drivers/media/pci/intel/ipu-fw-psys.c b/drivers/media/pci/intel/ipu-fw-psys.c new file mode 100644 index 0000000000000..9ffe8cae1d884 --- /dev/null +++ b/drivers/media/pci/intel/ipu-fw-psys.c @@ -0,0 +1,314 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2016 - 2018 Intel Corporation + +#include + +#include + +#include "ipu-fw-com.h" +#include "ipu-fw-psys.h" +#include "ipu-psys.h" + +int ipu_fw_psys_pg_start(struct ipu_psys_kcmd *kcmd) +{ + kcmd->kpg->pg->state = IPU_FW_PSYS_PROCESS_GROUP_STARTED; + return 0; +} + +int ipu_fw_psys_pg_load_cycles(struct ipu_psys_kcmd *kcmd) +{ + return 0; +} + +int ipu_fw_psys_pg_init_cycles(struct ipu_psys_kcmd *kcmd) +{ + return 0; +} + +int ipu_fw_psys_pg_processing_cycles(struct ipu_psys_kcmd *kcmd) +{ + return 0; +} + + + +int ipu_fw_psys_pg_disown(struct ipu_psys_kcmd *kcmd) +{ + struct ipu_fw_psys_cmd *psys_cmd; + int ret = 0; + + psys_cmd = ipu_send_get_token(kcmd->fh->psys->fwcom, 0); + if (!psys_cmd) { + dev_err(&kcmd->fh->psys->adev->dev, "failed to get token!\n"); + kcmd->pg_user = NULL; + ret = -ENODATA; + goto out; + } + psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_START; + psys_cmd->msg = 0; + psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address; + ipu_send_put_token(kcmd->fh->psys->fwcom, 0); + +out: + return ret; +} + + +int ipu_fw_psys_pg_abort(struct ipu_psys_kcmd *kcmd) +{ + struct ipu_fw_psys_cmd *psys_cmd; + int ret = 0; + + psys_cmd = ipu_send_get_token(kcmd->fh->psys->fwcom, 0); + if (!psys_cmd) { + dev_err(&kcmd->fh->psys->adev->dev, "failed to get token!\n"); + kcmd->pg_user = NULL; + ret = -ENODATA; + goto out; + } + psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_STOP; + psys_cmd->msg = 0; + psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address; + ipu_send_put_token(kcmd->fh->psys->fwcom, 0); + +out: + return ret; +} + +int ipu_fw_psys_pg_submit(struct ipu_psys_kcmd *kcmd) +{ + kcmd->kpg->pg->state = IPU_FW_PSYS_PROCESS_GROUP_BLOCKED; + return 0; +} + +int ipu_fw_psys_rcv_event(struct ipu_psys *psys, + struct ipu_fw_psys_event *event) +{ + void *rcv; + + rcv = ipu_recv_get_token(psys->fwcom, 0); + if (!rcv) + return 0; + + memcpy(event, rcv, sizeof(*event)); + ipu_recv_put_token(psys->fwcom, 0); + return 1; +} + +int ipu_fw_psys_terminal_set(struct ipu_fw_psys_terminal *terminal, + int terminal_idx, + struct ipu_psys_kcmd *kcmd, + u32 buffer, unsigned int size) +{ + u32 type; + u32 buffer_state; + + type = terminal->terminal_type; + + switch (type) { + case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_IN: + case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_OUT: + case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_IN: + case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: + case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_IN: + case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_OUT: + case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM: + buffer_state = IPU_FW_PSYS_BUFFER_UNDEFINED; + break; + case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_STREAM: + case IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN: + case IPU_FW_PSYS_TERMINAL_TYPE_STATE_IN: + buffer_state = IPU_FW_PSYS_BUFFER_FULL; + break; + case IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT: + case IPU_FW_PSYS_TERMINAL_TYPE_STATE_OUT: + buffer_state = IPU_FW_PSYS_BUFFER_EMPTY; + break; + default: + dev_err(&kcmd->fh->psys->adev->dev, + "unknown terminal type: 0x%x\n", type); + return -EAGAIN; + } + + if (type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN || + type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT) { + struct ipu_fw_psys_data_terminal *dterminal = + (struct ipu_fw_psys_data_terminal *)terminal; + dterminal->connection_type = IPU_FW_PSYS_CONNECTION_MEMORY; + dterminal->frame.data_bytes = size; + if (!ipu_fw_psys_pg_get_protocol(kcmd)) + dterminal->frame.data = buffer; + else + dterminal->frame.data_index = terminal_idx; + dterminal->frame.buffer_state = buffer_state; + } else { + struct ipu_fw_psys_param_terminal *pterminal = + (struct ipu_fw_psys_param_terminal *)terminal; + if (!ipu_fw_psys_pg_get_protocol(kcmd)) + pterminal->param_payload.buffer = buffer; + else + pterminal->param_payload.terminal_index = terminal_idx; + } + return 0; +} + +static int process_get_cell(struct ipu_fw_psys_process *process, int index) +{ + int cell; + +#if defined(CONFIG_VIDEO_INTEL_IPU4) || defined(CONFIG_VIDEO_INTEL_IPU4P) + cell = process->cell_id; +#else + cell = process->cells[index]; +#endif + return cell; +} + +static u32 process_get_cells_bitmap(struct ipu_fw_psys_process *process) +{ + unsigned int i; + int cell_id; + u32 bitmap = 0; + + for (i = 0; i < IPU_FW_PSYS_PROCESS_MAX_CELLS; i++) { + cell_id = process_get_cell(process, i); + if (cell_id != IPU_FW_PSYS_N_CELL_ID) + bitmap |= (1 << cell_id); + } + return bitmap; +} + +void ipu_fw_psys_pg_dump(struct ipu_psys *psys, + struct ipu_psys_kcmd *kcmd, const char *note) +{ + struct ipu_fw_psys_process_group *pg = kcmd->kpg->pg; + u32 pgid = pg->ID; + u8 processes = pg->process_count; + u16 *process_offset_table = (u16 *)((char *)pg + pg->processes_offset); + unsigned int p, chn, mem, mem_id; + int cell; + + dev_dbg(&psys->adev->dev, "%s %s pgid %i has %i processes:\n", + __func__, note, pgid, processes); + + for (p = 0; p < processes; p++) { + struct ipu_fw_psys_process *process = + (struct ipu_fw_psys_process *) + ((char *)pg + process_offset_table[p]); + cell = process_get_cell(process, 0); + dev_dbg(&psys->adev->dev, "\t process %i size=%u", + p, process->size); + dev_dbg(&psys->adev->dev, + "\t cell %i cell_bitmap=0x%x kernel_bitmap 0x%llx", + cell, process_get_cells_bitmap(process), + (u64) process->kernel_bitmap[1] << 32 | + (u64) process->kernel_bitmap[0]); + for (mem = 0; mem < IPU_FW_PSYS_N_DATA_MEM_TYPE_ID; mem++) { + mem_id = process->ext_mem_id[mem]; + if (mem_id != IPU_FW_PSYS_N_MEM_ID) + dev_dbg(&psys->adev->dev, + "\t mem type %u id %d offset=0x%x", + mem, mem_id, + process->ext_mem_offset[mem]); + } + for (chn = 0; chn < IPU_FW_PSYS_N_DEV_CHN_ID; chn++) { + if (process->dev_chn_offset[chn] != (u16)(-1)) + dev_dbg(&psys->adev->dev, + "\t dev_chn[%u]=0x%x\n", + chn, process->dev_chn_offset[chn]); + } + } +} + +int ipu_fw_psys_pg_get_id(struct ipu_psys_kcmd *kcmd) +{ + return kcmd->kpg->pg->ID; +} + +int ipu_fw_psys_pg_get_terminal_count(struct ipu_psys_kcmd *kcmd) +{ + return kcmd->kpg->pg->terminal_count; +} + +int ipu_fw_psys_pg_get_size(struct ipu_psys_kcmd *kcmd) +{ + return kcmd->kpg->pg->size; +} + +int ipu_fw_psys_pg_set_ipu_vaddress(struct ipu_psys_kcmd *kcmd, + dma_addr_t vaddress) +{ + kcmd->kpg->pg->ipu_virtual_address = vaddress; + return 0; +} + +struct ipu_fw_psys_terminal *ipu_fw_psys_pg_get_terminal(struct ipu_psys_kcmd + *kcmd, int index) +{ + struct ipu_fw_psys_terminal *terminal; + u16 *terminal_offset_table; + + terminal_offset_table = + (uint16_t *)((char *)kcmd->kpg->pg + + kcmd->kpg->pg->terminals_offset); + terminal = (struct ipu_fw_psys_terminal *) + ((char *)kcmd->kpg->pg + terminal_offset_table[index]); + return terminal; +} + +void ipu_fw_psys_pg_set_token(struct ipu_psys_kcmd *kcmd, u64 token) +{ + kcmd->kpg->pg->token = (u64)token; +} + +u64 ipu_fw_psys_pg_get_token(struct ipu_psys_kcmd *kcmd) +{ + return kcmd->kpg->pg->token; +} + +int ipu_fw_psys_pg_get_protocol(struct ipu_psys_kcmd *kcmd) +{ + return kcmd->kpg->pg->protocol_version; +} + + +int ipu_fw_psys_open(struct ipu_psys *psys) +{ + int retry = IPU_PSYS_OPEN_RETRY, retval; + + retval = ipu_fw_com_open(psys->fwcom); + if (retval) { + dev_err(&psys->adev->dev, "fw com open failed.\n"); + return retval; + } + + do { + usleep_range(IPU_PSYS_OPEN_TIMEOUT_US, + IPU_PSYS_OPEN_TIMEOUT_US + 10); + retval = ipu_fw_com_ready(psys->fwcom); + if (!retval) { + dev_dbg(&psys->adev->dev, "psys port open ready!\n"); + break; + } + } while (retry-- > 0); + + if (!retry && retval) { + dev_err(&psys->adev->dev, "psys port open ready failed %d\n", + retval); + ipu_fw_com_close(psys->fwcom); + return retval; + } + return 0; +} + +int ipu_fw_psys_close(struct ipu_psys *psys) +{ + int retval; + + retval = ipu_fw_com_close(psys->fwcom); + if (retval) { + dev_err(&psys->adev->dev, "fw com close failed.\n"); + return retval; + } + return retval; +} diff --git a/drivers/media/pci/intel/ipu-fw-psys.h b/drivers/media/pci/intel/ipu-fw-psys.h new file mode 100644 index 0000000000000..1738f6cec768a --- /dev/null +++ b/drivers/media/pci/intel/ipu-fw-psys.h @@ -0,0 +1,350 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2016 - 2018 Intel Corporation */ + +#ifndef IPU_FW_PSYS_H +#define IPU_FW_PSYS_H + +#include "ipu-platform-resources.h" + +/* ia_css_psys_device.c */ +#define IPU_FW_PSYS_CMD_QUEUE_SIZE 0x20 +#define IPU_FW_PSYS_EVENT_QUEUE_SIZE 0x40 + +/* ia_css_psys_transport.h */ +#define IPU_FW_PSYS_CMD_BITS 64 +#define IPU_FW_PSYS_EVENT_BITS 128 + +/* ia_css_psys_transport.h */ +enum { + IPU_FW_PSYS_EVENT_TYPE_SUCCESS = 0, + IPU_FW_PSYS_EVENT_TYPE_UNKNOWN_ERROR = 1, + IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_NOT_FOUND = 2, + IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_TOO_BIG = 3, + IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_DDR_TRANS_ERR = 4, + IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_NULL_PKG_DIR_ADDR = 5, + IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_LOAD_FRAME_ERR = 6, + IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_LOAD_FRAGMENT_ERR = 7, + IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_PROCESS_COUNT_ZERO = 8, + IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_PROCESS_INIT_ERR = 9, + IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_ABORT = 10, + IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_NULL = 11, + IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_VALIDATION_ERR = 12 +}; + +enum { + IPU_FW_PSYS_EVENT_QUEUE_MAIN_ID, + IPU_FW_PSYS_N_PSYS_EVENT_QUEUE_ID +}; + +enum { + IPU_FW_PSYS_PROCESS_GROUP_ERROR = 0, + IPU_FW_PSYS_PROCESS_GROUP_CREATED, + IPU_FW_PSYS_PROCESS_GROUP_READY, + IPU_FW_PSYS_PROCESS_GROUP_BLOCKED, + IPU_FW_PSYS_PROCESS_GROUP_STARTED, + IPU_FW_PSYS_PROCESS_GROUP_RUNNING, + IPU_FW_PSYS_PROCESS_GROUP_STALLED, + IPU_FW_PSYS_PROCESS_GROUP_STOPPED, + IPU_FW_PSYS_N_PROCESS_GROUP_STATES +}; + +enum { + IPU_FW_PSYS_CONNECTION_MEMORY = 0, + IPU_FW_PSYS_CONNECTION_MEMORY_STREAM, + IPU_FW_PSYS_CONNECTION_STREAM, + IPU_FW_PSYS_N_CONNECTION_TYPES +}; + +enum { + IPU_FW_PSYS_BUFFER_NULL = 0, + IPU_FW_PSYS_BUFFER_UNDEFINED, + IPU_FW_PSYS_BUFFER_EMPTY, + IPU_FW_PSYS_BUFFER_NONEMPTY, + IPU_FW_PSYS_BUFFER_FULL, + IPU_FW_PSYS_N_BUFFER_STATES +}; + +enum { + IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN = 0, + IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT, + IPU_FW_PSYS_TERMINAL_TYPE_PARAM_STREAM, + IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_IN, + IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_OUT, + IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_IN, + IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_OUT, + IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_IN, + IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_OUT, + IPU_FW_PSYS_TERMINAL_TYPE_STATE_IN, + IPU_FW_PSYS_TERMINAL_TYPE_STATE_OUT, + IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM, + IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT, + IPU_FW_PSYS_N_TERMINAL_TYPES +}; + +enum { + IPU_FW_PSYS_COL_DIMENSION = 0, + IPU_FW_PSYS_ROW_DIMENSION = 1, + IPU_FW_PSYS_N_DATA_DIMENSION = 2 +}; + +enum { + IPU_FW_PSYS_PROCESS_GROUP_CMD_NOP = 0, + IPU_FW_PSYS_PROCESS_GROUP_CMD_SUBMIT, + IPU_FW_PSYS_PROCESS_GROUP_CMD_ATTACH, + IPU_FW_PSYS_PROCESS_GROUP_CMD_DETACH, + IPU_FW_PSYS_PROCESS_GROUP_CMD_START, + IPU_FW_PSYS_PROCESS_GROUP_CMD_DISOWN, + IPU_FW_PSYS_PROCESS_GROUP_CMD_RUN, + IPU_FW_PSYS_PROCESS_GROUP_CMD_STOP, + IPU_FW_PSYS_PROCESS_GROUP_CMD_SUSPEND, + IPU_FW_PSYS_PROCESS_GROUP_CMD_RESUME, + IPU_FW_PSYS_PROCESS_GROUP_CMD_ABORT, + IPU_FW_PSYS_PROCESS_GROUP_CMD_RESET, + IPU_FW_PSYS_N_PROCESS_GROUP_CMDS +}; + +enum { + IPU_FW_PSYS_PROCESS_GROUP_PROTOCOL_LEGACY = 0, + IPU_FW_PSYS_PROCESS_GROUP_N_PROTOCOLS +}; + +/* ia_css_psys_process_group_cmd_impl.h */ +struct __packed ipu_fw_psys_process_group { + u64 token; + u64 private_token; + u32 routing_bitmap[IPU_FW_PSYS_RBM_NOF_ELEMS]; + u32 size; + u32 pg_load_start_ts; + u32 pg_load_cycles; + u32 pg_init_cycles; + u32 pg_processing_cycles; + u32 ID; + u32 state; + u32 ipu_virtual_address; + u32 resource_bitmap; + u16 fragment_count; + u16 fragment_state; + u16 fragment_limit; + u16 processes_offset; + u16 terminals_offset; + u8 process_count; + u8 terminal_count; + u8 subgraph_count; + u8 protocol_version; + u8 base_queue_id; + u8 num_queues; + u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT]; +}; + +/* ia_css_psys_init.h */ +struct ipu_fw_psys_srv_init { + void *host_ddr_pkg_dir; + u32 ddr_pkg_dir_address; + u32 pkg_dir_size; + + u32 icache_prefetch_sp; + u32 icache_prefetch_isp; +}; + +/* ia_css_psys_transport.h */ +struct __packed ipu_fw_psys_cmd { + u16 command; + u16 msg; + u32 context_handle; +}; + +struct __packed ipu_fw_psys_event { + u16 status; + u16 command; + u32 context_handle; + u64 token; +}; + +/* ia_css_terminal_base_types.h */ +struct ipu_fw_psys_terminal { + u32 terminal_type; + s16 parent_offset; + u16 size; + u16 tm_index; + u8 ID; + u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_TERMINAL_STRUCT]; +}; + +/* ia_css_terminal_types.h */ +struct ipu_fw_psys_param_payload { + u64 host_buffer; + u32 buffer; + u32 terminal_index; +}; + +/* ia_css_program_group_param_types.h */ +struct ipu_fw_psys_param_terminal { + struct ipu_fw_psys_terminal base; + struct ipu_fw_psys_param_payload param_payload; + u16 param_section_desc_offset; + u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT]; +}; + +struct ipu_fw_psys_frame { + u32 buffer_state; + u32 access_type; + u32 pointer_state; + u32 access_scope; + u32 data; + u32 data_index; + u32 data_bytes; + u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_STRUCT]; +}; + +/* ia_css_program_group_data.h */ +struct ipu_fw_psys_frame_descriptor { + u32 frame_format_type; + u32 plane_count; + u32 plane_offsets[IPU_FW_PSYS_N_FRAME_PLANES]; + u32 stride[1]; + u16 dimension[2]; + u16 size; + u8 bpp; + u8 bpe; + u8 is_compressed; + u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_DESC_STRUCT]; +}; + +struct ipu_fw_psys_stream { + u64 dummy; +}; + +/* ia_css_psys_terminal_private_types.h */ +struct ipu_fw_psys_data_terminal { + struct ipu_fw_psys_terminal base; + struct ipu_fw_psys_frame_descriptor frame_descriptor; + struct ipu_fw_psys_frame frame; + struct ipu_fw_psys_stream stream; + u32 reserved; + u32 connection_type; + u16 fragment_descriptor_offset; + u8 kernel_id; + u8 subgraph_id; + u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT]; +}; + +/* ia_css_psys_buffer_set.h */ +struct ipu_fw_psys_buffer_set { + u64 token; + u32 kernel_enable_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; + u32 ipu_virtual_address; + u32 process_group_handle; + u16 terminal_count; + u8 frame_counter; + u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_BUFFER_SET_STRUCT]; +}; + +struct ipu_fw_psys_program_group_manifest { + u32 kernel_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; + u32 ID; + u16 program_manifest_offset; + u16 terminal_manifest_offset; + u16 private_data_offset; + u16 rbm_manifest_offset; + u16 size; + u8 alignment; + u8 kernel_count; + u8 program_count; + u8 terminal_count; + u8 subgraph_count; + u8 reserved[5]; +}; + +struct ipu_fw_generic_program_manifest { + u16 *dev_chn_size; + u16 *dev_chn_offset; + u16 *ext_mem_size; + u16 *ext_mem_offset; + u8 cell_id; + u8 cells[IPU_FW_PSYS_PROCESS_MAX_CELLS]; + u8 cell_type_id; + u8 *is_dfm_relocatable; + u32 *dfm_port_bitmap; + u32 *dfm_active_port_bitmap; +}; + +struct ipu_fw_generic_process { + u16 ext_mem_id; + u16 ext_mem_offset; + u16 dev_chn_offset; + u16 cell_id; + u16 dfm_port_bitmap; + u16 dfm_active_port_bitmap; +}; + +struct ipu_fw_resource_definitions { + u32 num_cells; + u32 num_cells_type; +#if defined(CONFIG_VIDEO_INTEL_IPU4) || defined(CONFIG_VIDEO_INTEL_IPU4P) + const u32 *cells; +#else + const u8 *cells; +#endif + u32 num_dev_channels; + const u16 *dev_channels; + + u32 num_ext_mem_types; + u32 num_ext_mem_ids; + const u16 *ext_mem_ids; + + u32 num_dfm_ids; + const u16 *dfms; + + u32 cell_mem_row; +#if defined(CONFIG_VIDEO_INTEL_IPU4) || defined(CONFIG_VIDEO_INTEL_IPU4P) + const enum ipu_mem_id *cell_mem; +#else + const u8 *cell_mem; +#endif + struct ipu_fw_generic_process process; +}; + +struct ipu_psys_kcmd; +struct ipu_psys; +int ipu_fw_psys_pg_start(struct ipu_psys_kcmd *kcmd); +int ipu_fw_psys_pg_disown(struct ipu_psys_kcmd *kcmd); +int ipu_fw_psys_pg_abort(struct ipu_psys_kcmd *kcmd); +int ipu_fw_psys_pg_submit(struct ipu_psys_kcmd *kcmd); +int ipu_fw_psys_pg_load_cycles(struct ipu_psys_kcmd *kcmd); +int ipu_fw_psys_pg_init_cycles(struct ipu_psys_kcmd *kcmd); +int ipu_fw_psys_pg_processing_cycles(struct ipu_psys_kcmd *kcmd); +int ipu_fw_psys_rcv_event(struct ipu_psys *psys, + struct ipu_fw_psys_event *event); +int ipu_fw_psys_terminal_set(struct ipu_fw_psys_terminal *terminal, + int terminal_idx, + struct ipu_psys_kcmd *kcmd, + u32 buffer, unsigned int size); +void ipu_fw_psys_pg_dump(struct ipu_psys *psys, + struct ipu_psys_kcmd *kcmd, const char *note); +int ipu_fw_psys_pg_get_id(struct ipu_psys_kcmd *kcmd); +int ipu_fw_psys_pg_get_terminal_count(struct ipu_psys_kcmd *kcmd); +int ipu_fw_psys_pg_get_size(struct ipu_psys_kcmd *kcmd); +int ipu_fw_psys_pg_set_ipu_vaddress(struct ipu_psys_kcmd *kcmd, + dma_addr_t vaddress); +struct ipu_fw_psys_terminal *ipu_fw_psys_pg_get_terminal(struct ipu_psys_kcmd + *kcmd, int index); +void ipu_fw_psys_pg_set_token(struct ipu_psys_kcmd *kcmd, u64 token); +u64 ipu_fw_psys_pg_get_token(struct ipu_psys_kcmd *kcmd); +int ipu_fw_psys_pg_get_protocol(struct ipu_psys_kcmd *kcmd); +int ipu_fw_psys_open(struct ipu_psys *psys); +int ipu_fw_psys_close(struct ipu_psys *psys); + +/* common resource interface for both abi and api mode */ +int ipu_fw_psys_set_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index, + u8 value); +u8 ipu_fw_psys_get_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index); +int ipu_fw_psys_clear_process_cell(struct ipu_fw_psys_process *ptr); +int ipu_fw_psys_set_process_dev_chn_offset(struct ipu_fw_psys_process *ptr, + u16 offset, u16 value); +int ipu_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr, + u16 type_id, u16 mem_id, u16 offset); +int ipu_fw_psys_get_program_manifest_by_process( + struct ipu_fw_generic_program_manifest *gen_pm, + const struct ipu_fw_psys_program_group_manifest *pg_manifest, + struct ipu_fw_psys_process *process); +#endif /* IPU_FW_PSYS_H */ diff --git a/drivers/media/pci/intel/ipu-isys-csi2-be-soc.c b/drivers/media/pci/intel/ipu-isys-csi2-be-soc.c new file mode 100644 index 0000000000000..4faaa96d4724c --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys-csi2-be-soc.c @@ -0,0 +1,364 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2014 - 2018 Intel Corporation + +#include +#include + +#include +#include +#include + +#include "ipu.h" +#include "ipu-bus.h" +#include "ipu-isys.h" +#include "ipu-isys-csi2-be.h" +#include "ipu-isys-subdev.h" +#include "ipu-isys-video.h" + +/* + * Raw bayer format pixel order MUST BE MAINTAINED in groups of four codes. + * Otherwise pixel order calculation below WILL BREAK! + */ +static const u32 csi2_be_soc_supported_codes_pad[] = { + MEDIA_BUS_FMT_Y10_1X10, + MEDIA_BUS_FMT_RGB565_1X16, + MEDIA_BUS_FMT_RGB888_1X24, + /* YUV420 plannar */ + MEDIA_BUS_FMT_UYVY8_2X8, + MEDIA_BUS_FMT_UYVY8_1X16, + MEDIA_BUS_FMT_YUYV8_1X16, + MEDIA_BUS_FMT_SBGGR14_1X14, + MEDIA_BUS_FMT_SGBRG14_1X14, + MEDIA_BUS_FMT_SGRBG14_1X14, + MEDIA_BUS_FMT_SRGGB14_1X14, + MEDIA_BUS_FMT_SBGGR12_1X12, + MEDIA_BUS_FMT_SGBRG12_1X12, + MEDIA_BUS_FMT_SGRBG12_1X12, + MEDIA_BUS_FMT_SRGGB12_1X12, + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SGBRG10_1X10, + MEDIA_BUS_FMT_SGRBG10_1X10, + MEDIA_BUS_FMT_SRGGB10_1X10, + MEDIA_BUS_FMT_SBGGR8_1X8, + MEDIA_BUS_FMT_SGBRG8_1X8, + MEDIA_BUS_FMT_SGRBG8_1X8, + MEDIA_BUS_FMT_SRGGB8_1X8, + 0, +}; + +static const u32 *csi2_be_soc_supported_codes[NR_OF_CSI2_BE_SOC_PADS]; + +static struct v4l2_subdev_internal_ops csi2_be_soc_sd_internal_ops = { + .open = ipu_isys_subdev_open, + .close = ipu_isys_subdev_close, +}; + +static const struct v4l2_subdev_core_ops csi2_be_soc_sd_core_ops = { +}; + +static int set_stream(struct v4l2_subdev *sd, int enable) +{ + return 0; +} + +static const struct v4l2_subdev_video_ops csi2_be_soc_sd_video_ops = { + .s_stream = set_stream, +}; + +static int +__subdev_link_validate(struct v4l2_subdev *sd, struct media_link *link, + struct v4l2_subdev_format *source_fmt, + struct v4l2_subdev_format *sink_fmt) +{ + struct ipu_isys_pipeline *ip = container_of(sd->entity.pipe, + struct ipu_isys_pipeline, + pipe); + + ip->csi2_be_soc = to_ipu_isys_csi2_be_soc(sd); + return ipu_isys_subdev_link_validate(sd, link, source_fmt, sink_fmt); +} + +static int +ipu_isys_csi2_be_soc_set_sel(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_selection *sel) +{ + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + struct media_pad *pad = &asd->sd.entity.pads[sel->pad]; + + if (sel->target == V4L2_SEL_TGT_CROP && + pad->flags & MEDIA_PAD_FL_SOURCE && + asd->valid_tgts[sel->pad].crop) { + struct v4l2_rect *r; + unsigned int sink_pad = 0; + int i; + + for (i = 0; i < asd->nstreams; i++) { + if (!(asd->route[i].flags & + V4L2_SUBDEV_ROUTE_FL_ACTIVE)) + continue; + if (asd->route[i].source == sel->pad) { + sink_pad = asd->route[i].sink; + break; + } + } + + if (i == asd->nstreams) { + dev_dbg(&asd->isys->adev->dev, "No sink pad routed.\n"); + return -EINVAL; + } + r = __ipu_isys_get_selection(sd, cfg, sel->target, + sink_pad, sel->which); + + /* Cropping is not supported by SoC BE. + * Only horizontal padding is allowed. + */ + sel->r.top = r->top; + sel->r.left = r->left; + sel->r.width = clamp(sel->r.width, r->width, + IPU_ISYS_MAX_WIDTH); + sel->r.height = r->height; + + *__ipu_isys_get_selection(sd, cfg, sel->target, sel->pad, + sel->which) = sel->r; + ipu_isys_subdev_fmt_propagate(sd, cfg, NULL, &sel->r, + IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP, + sel->pad, sel->which); + return 0; + } + return -EINVAL; +} + +static const struct v4l2_subdev_pad_ops csi2_be_soc_sd_pad_ops = { + .link_validate = __subdev_link_validate, + .get_fmt = ipu_isys_subdev_get_ffmt, + .set_fmt = ipu_isys_subdev_set_ffmt, + .get_selection = ipu_isys_subdev_get_sel, + .set_selection = ipu_isys_csi2_be_soc_set_sel, + .enum_mbus_code = ipu_isys_subdev_enum_mbus_code, + .set_routing = ipu_isys_subdev_set_routing, + .get_routing = ipu_isys_subdev_get_routing, +}; + +static struct v4l2_subdev_ops csi2_be_soc_sd_ops = { + .core = &csi2_be_soc_sd_core_ops, + .video = &csi2_be_soc_sd_video_ops, + .pad = &csi2_be_soc_sd_pad_ops, +}; + +static struct media_entity_operations csi2_be_soc_entity_ops = { + .link_validate = v4l2_subdev_link_validate, + .has_route = ipu_isys_subdev_has_route, +}; + +static void csi2_be_soc_set_ffmt(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_format *fmt) +{ + struct v4l2_mbus_framefmt *ffmt = + __ipu_isys_get_ffmt(sd, cfg, fmt->pad, + fmt->stream, + fmt->which); + +#if !defined(CONFIG_VIDEO_INTEL_IPU4) && !defined(CONFIG_VIDEO_INTEL_IPU4P) + struct ipu_isys_csi2_be_soc *csi2_be_soc = + to_ipu_isys_csi2_be_soc(sd); +#endif + + if (sd->entity.pads[fmt->pad].flags & MEDIA_PAD_FL_SINK) { + if (fmt->format.field != V4L2_FIELD_ALTERNATE) + fmt->format.field = V4L2_FIELD_NONE; + *ffmt = fmt->format; + + ipu_isys_subdev_fmt_propagate(sd, cfg, &fmt->format, + NULL, + IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT, + fmt->pad, fmt->which); + } else if (sd->entity.pads[fmt->pad].flags & MEDIA_PAD_FL_SOURCE) { + struct v4l2_mbus_framefmt *sink_ffmt; + struct v4l2_rect *r; + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + unsigned int sink_pad = 0; + int i; + + for (i = 0; i < asd->nsinks; i++) + if (media_entity_has_route(&sd->entity, fmt->pad, i)) + break; + if (i != asd->nsinks) + sink_pad = i; + sink_ffmt = __ipu_isys_get_ffmt(sd, cfg, sink_pad, + fmt->stream, + fmt->which); + r = __ipu_isys_get_selection(sd, cfg, V4L2_SEL_TGT_CROP, + fmt->pad, fmt->which); + + ffmt->width = r->width; + ffmt->height = r->height; + ffmt->code = sink_ffmt->code; + ffmt->field = sink_ffmt->field; + +#if !defined(CONFIG_VIDEO_INTEL_IPU4) && !defined(CONFIG_VIDEO_INTEL_IPU4P) + /* + * For new IPU special case, format changing in BE-SOC, + * from YUV422 to I420, which is used to adapt multiple + * YUV sensors and provide I420 to BB for partial processing. + * Use original source pad format from user space. + * And change pin type to RAW_DUAL_SOC for this special case + */ + if (fmt->format.code == MEDIA_BUS_FMT_UYVY8_2X8 && + (sink_ffmt->code == MEDIA_BUS_FMT_YUYV8_1X16 || + sink_ffmt->code == MEDIA_BUS_FMT_UYVY8_1X16)) { + ffmt->code = fmt->format.code; + + for (i = 0; i < NR_OF_CSI2_BE_SOC_SOURCE_PADS; i++) + csi2_be_soc->av[i].aq.css_pin_type = + IPU_FW_ISYS_PIN_TYPE_RAW_DUAL_SOC; + } +#endif + } +} + +void ipu_isys_csi2_be_soc_cleanup(struct ipu_isys_csi2_be_soc *csi2_be_soc) +{ + int i; + + v4l2_device_unregister_subdev(&csi2_be_soc->asd.sd); + ipu_isys_subdev_cleanup(&csi2_be_soc->asd); + for (i = 0; i < NR_OF_CSI2_BE_SOC_STREAMS; i++) + ipu_isys_video_cleanup(&csi2_be_soc->av[i]); +} + +int ipu_isys_csi2_be_soc_init(struct ipu_isys_csi2_be_soc *csi2_be_soc, + struct ipu_isys *isys) +{ + struct v4l2_subdev_format fmt = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = CSI2_BE_SOC_PAD_SINK(0), + .format = { + .width = 4096, + .height = 3072, + }, + }; + int rval, i; + + csi2_be_soc->asd.sd.entity.ops = &csi2_be_soc_entity_ops; + csi2_be_soc->asd.isys = isys; + + rval = ipu_isys_subdev_init(&csi2_be_soc->asd, + &csi2_be_soc_sd_ops, 0, + NR_OF_CSI2_BE_SOC_PADS, + NR_OF_CSI2_BE_SOC_STREAMS, + NR_OF_CSI2_BE_SOC_SOURCE_PADS, + NR_OF_CSI2_BE_SOC_SINK_PADS, 0); + if (rval) + goto fail; + + for (i = CSI2_BE_SOC_PAD_SINK(0); i < NR_OF_CSI2_BE_SOC_SINK_PADS; i++) + csi2_be_soc->asd.pad[i].flags = MEDIA_PAD_FL_SINK; + + for (i = CSI2_BE_SOC_PAD_SOURCE(0); + i < NR_OF_CSI2_BE_SOC_SOURCE_PADS + CSI2_BE_SOC_PAD_SOURCE(0); + i++) { + csi2_be_soc->asd.pad[i].flags = MEDIA_PAD_FL_SOURCE; + csi2_be_soc->asd.valid_tgts[i].crop = true; + } + + for (i = 0; i < NR_OF_CSI2_BE_SOC_PADS; i++) + csi2_be_soc_supported_codes[i] = + csi2_be_soc_supported_codes_pad; + csi2_be_soc->asd.supported_codes = csi2_be_soc_supported_codes; + csi2_be_soc->asd.be_mode = IPU_BE_SOC; + csi2_be_soc->asd.isl_mode = IPU_ISL_OFF; + csi2_be_soc->asd.set_ffmt = csi2_be_soc_set_ffmt; + + for (i = CSI2_BE_SOC_PAD_SINK(0); i < NR_OF_CSI2_BE_SOC_SINK_PADS; + i++) { + fmt.pad = CSI2_BE_SOC_PAD_SINK(i); + ipu_isys_subdev_set_ffmt(&csi2_be_soc->asd.sd, NULL, &fmt); + } + + ipu_isys_subdev_set_ffmt(&csi2_be_soc->asd.sd, NULL, &fmt); + csi2_be_soc->asd.sd.internal_ops = &csi2_be_soc_sd_internal_ops; + + snprintf(csi2_be_soc->asd.sd.name, sizeof(csi2_be_soc->asd.sd.name), + IPU_ISYS_ENTITY_PREFIX " CSI2 BE SOC"); + + v4l2_set_subdevdata(&csi2_be_soc->asd.sd, &csi2_be_soc->asd); + + mutex_lock(&csi2_be_soc->asd.mutex); + rval = v4l2_device_register_subdev(&isys->v4l2_dev, + &csi2_be_soc->asd.sd); + if (rval) { + dev_info(&isys->adev->dev, "can't register v4l2 subdev\n"); + goto fail; + } + + /* create default route information */ + for (i = 0; i < NR_OF_CSI2_BE_SOC_STREAMS; i++) { + csi2_be_soc->asd.route[i].sink = CSI2_BE_SOC_PAD_SINK(i); + csi2_be_soc->asd.route[i].source = CSI2_BE_SOC_PAD_SOURCE(i); + csi2_be_soc->asd.route[i].flags = 0; + } + + for (i = 0; i < NR_OF_CSI2_BE_SOC_SOURCE_PADS; i++) { + csi2_be_soc->asd.stream[CSI2_BE_SOC_PAD_SINK(i)].stream_id[0] + = 0; + csi2_be_soc->asd.stream[CSI2_BE_SOC_PAD_SOURCE(i)].stream_id[0] + = 0; + } + for (i = 0; i < NR_OF_CSI2_BE_SOC_STREAMS; i++) { + csi2_be_soc->asd.route[i].flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE; + bitmap_set(csi2_be_soc->asd.stream[CSI2_BE_SOC_PAD_SINK(i)]. + streams_stat, 0, 1); + bitmap_set(csi2_be_soc->asd.stream[CSI2_BE_SOC_PAD_SOURCE(i)]. + streams_stat, 0, 1); + } + csi2_be_soc->asd.route[0].flags |= V4L2_SUBDEV_ROUTE_FL_IMMUTABLE; + mutex_unlock(&csi2_be_soc->asd.mutex); + for (i = 0; i < NR_OF_CSI2_BE_SOC_SOURCE_PADS; i++) { + snprintf(csi2_be_soc->av[i].vdev.name, + sizeof(csi2_be_soc->av[i].vdev.name), + IPU_ISYS_ENTITY_PREFIX " BE SOC capture %d", i); + /* + * Pin type could be overwritten for YUV422 to I420 case, at + * set_format phase + */ + csi2_be_soc->av[i].aq.css_pin_type = + IPU_FW_ISYS_PIN_TYPE_RAW_SOC; + csi2_be_soc->av[i].isys = isys; + csi2_be_soc->av[i].pfmts = ipu_isys_pfmts_be_soc; + + csi2_be_soc->av[i].try_fmt_vid_mplane = + ipu_isys_video_try_fmt_vid_mplane_default; + csi2_be_soc->av[i].prepare_firmware_stream_cfg = + ipu_isys_prepare_firmware_stream_cfg_default; + csi2_be_soc->av[i].aq.buf_prepare = ipu_isys_buf_prepare; + csi2_be_soc->av[i].aq.fill_frame_buff_set_pin = + ipu_isys_buffer_list_to_ipu_fw_isys_frame_buff_set_pin; + csi2_be_soc->av[i].aq.link_fmt_validate = + ipu_isys_link_fmt_validate; + csi2_be_soc->av[i].aq.vbq.buf_struct_size = + sizeof(struct ipu_isys_video_buffer); + + rval = ipu_isys_video_init(&csi2_be_soc->av[i], + &csi2_be_soc->asd.sd.entity, + CSI2_BE_SOC_PAD_SOURCE(i), + MEDIA_PAD_FL_SINK, + MEDIA_LNK_FL_DYNAMIC); + if (rval) { + dev_info(&isys->adev->dev, "can't init video node\n"); + goto fail; + } + } + + return 0; + +fail: + ipu_isys_csi2_be_soc_cleanup(csi2_be_soc); + + return rval; +} diff --git a/drivers/media/pci/intel/ipu-isys-csi2-be.c b/drivers/media/pci/intel/ipu-isys-csi2-be.c new file mode 100644 index 0000000000000..028c2debc3d83 --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys-csi2-be.c @@ -0,0 +1,306 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2014 - 2018 Intel Corporation + +#include +#include + +#include +#include +#include + +#include "ipu.h" +#include "ipu-bus.h" +#include "ipu-isys.h" +#include "ipu-isys-csi2-be.h" +#include "ipu-isys-subdev.h" +#include "ipu-isys-video.h" + +/* + * Raw bayer format pixel order MUST BE MAINTAINED in groups of four codes. + * Otherwise pixel order calculation below WILL BREAK! + */ +static const u32 csi2_be_supported_codes_pad[] = { + MEDIA_BUS_FMT_SBGGR14_1X14, + MEDIA_BUS_FMT_SGBRG14_1X14, + MEDIA_BUS_FMT_SGRBG14_1X14, + MEDIA_BUS_FMT_SRGGB14_1X14, + MEDIA_BUS_FMT_SBGGR12_1X12, + MEDIA_BUS_FMT_SGBRG12_1X12, + MEDIA_BUS_FMT_SGRBG12_1X12, + MEDIA_BUS_FMT_SRGGB12_1X12, + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SGBRG10_1X10, + MEDIA_BUS_FMT_SGRBG10_1X10, + MEDIA_BUS_FMT_SRGGB10_1X10, + MEDIA_BUS_FMT_SBGGR8_1X8, + MEDIA_BUS_FMT_SGBRG8_1X8, + MEDIA_BUS_FMT_SGRBG8_1X8, + MEDIA_BUS_FMT_SRGGB8_1X8, + 0, +}; + +static const u32 *csi2_be_supported_codes[] = { + csi2_be_supported_codes_pad, + csi2_be_supported_codes_pad, +}; + +static struct v4l2_subdev_internal_ops csi2_be_sd_internal_ops = { + .open = ipu_isys_subdev_open, + .close = ipu_isys_subdev_close, +}; + +static const struct v4l2_subdev_core_ops csi2_be_sd_core_ops = { +}; + +static int set_stream(struct v4l2_subdev *sd, int enable) +{ + return 0; +} + +static const struct v4l2_subdev_video_ops csi2_be_sd_video_ops = { + .s_stream = set_stream, +}; + +static int __subdev_link_validate(struct v4l2_subdev *sd, + struct media_link *link, + struct v4l2_subdev_format *source_fmt, + struct v4l2_subdev_format *sink_fmt) +{ + struct ipu_isys_pipeline *ip = container_of(sd->entity.pipe, + struct ipu_isys_pipeline, + pipe); + + ip->csi2_be = to_ipu_isys_csi2_be(sd); + return ipu_isys_subdev_link_validate(sd, link, source_fmt, sink_fmt); +} + +static int get_supported_code_index(u32 code) +{ + int i; + + for (i = 0; csi2_be_supported_codes_pad[i]; i++) { + if (csi2_be_supported_codes_pad[i] == code) + return i; + } + return -EINVAL; +} + +static int ipu_isys_csi2_be_set_sel(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_selection *sel) +{ + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + struct media_pad *pad = &asd->sd.entity.pads[sel->pad]; + + if (sel->target == V4L2_SEL_TGT_CROP && + pad->flags & MEDIA_PAD_FL_SOURCE && + asd->valid_tgts[CSI2_BE_PAD_SOURCE].crop) { + struct v4l2_mbus_framefmt *ffmt = + __ipu_isys_get_ffmt(sd, cfg, sel->pad, 0, sel->which); + struct v4l2_rect *r = __ipu_isys_get_selection + (sd, cfg, sel->target, CSI2_BE_PAD_SINK, sel->which); + + if (get_supported_code_index(ffmt->code) < 0) { + /* Non-bayer formats can't be single line cropped */ + sel->r.left &= ~1; + sel->r.top &= ~1; + + /* Non-bayer formats can't pe padded at all */ + sel->r.width = clamp(sel->r.width, + IPU_ISYS_MIN_WIDTH, r->width); + } else { + sel->r.width = clamp(sel->r.width, + IPU_ISYS_MIN_WIDTH, + IPU_ISYS_MAX_WIDTH); + } + + /* + * ISAPF can pad only horizontally, height is + * restricted by sink pad resolution. + */ + sel->r.height = clamp(sel->r.height, IPU_ISYS_MIN_HEIGHT, + r->height); + *__ipu_isys_get_selection(sd, cfg, sel->target, sel->pad, + sel->which) = sel->r; + ipu_isys_subdev_fmt_propagate + (sd, cfg, NULL, &sel->r, + IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP, + sel->pad, sel->which); + return 0; + } + return ipu_isys_subdev_set_sel(sd, cfg, sel); +} + +static const struct v4l2_subdev_pad_ops csi2_be_sd_pad_ops = { + .link_validate = __subdev_link_validate, + .get_fmt = ipu_isys_subdev_get_ffmt, + .set_fmt = ipu_isys_subdev_set_ffmt, + .get_selection = ipu_isys_subdev_get_sel, + .set_selection = ipu_isys_csi2_be_set_sel, + .enum_mbus_code = ipu_isys_subdev_enum_mbus_code, +}; + +static struct v4l2_subdev_ops csi2_be_sd_ops = { + .core = &csi2_be_sd_core_ops, + .video = &csi2_be_sd_video_ops, + .pad = &csi2_be_sd_pad_ops, +}; + +static struct media_entity_operations csi2_be_entity_ops = { + .link_validate = v4l2_subdev_link_validate, +}; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) +static void csi2_be_set_ffmt(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *cfg, + struct v4l2_subdev_format *fmt) +#else +static void csi2_be_set_ffmt(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +#endif +{ + struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd); + struct v4l2_mbus_framefmt *ffmt = + __ipu_isys_get_ffmt(sd, cfg, fmt->pad, fmt->stream, + fmt->which); + + switch (fmt->pad) { + case CSI2_BE_PAD_SINK: + if (fmt->format.field != V4L2_FIELD_ALTERNATE) + fmt->format.field = V4L2_FIELD_NONE; + *ffmt = fmt->format; + + ipu_isys_subdev_fmt_propagate + (sd, cfg, &fmt->format, NULL, + IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT, fmt->pad, fmt->which); + return; + case CSI2_BE_PAD_SOURCE: { + struct v4l2_mbus_framefmt *sink_ffmt = + __ipu_isys_get_ffmt(sd, cfg, CSI2_BE_PAD_SINK, + fmt->stream, fmt->which); + struct v4l2_rect *r = + __ipu_isys_get_selection(sd, cfg, V4L2_SEL_TGT_CROP, + CSI2_BE_PAD_SOURCE, + fmt->which); + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + u32 code = sink_ffmt->code; + int idx = get_supported_code_index(code); + + if (asd->valid_tgts[CSI2_BE_PAD_SOURCE].crop && idx >= 0) { + int crop_info = 0; + + if (r->top & 1) + crop_info |= CSI2_BE_CROP_VER; + if (r->left & 1) + crop_info |= CSI2_BE_CROP_HOR; + code = csi2_be_supported_codes_pad + [((idx & CSI2_BE_CROP_MASK) ^ crop_info) + + (idx & ~CSI2_BE_CROP_MASK)]; + } + ffmt->width = r->width; + ffmt->height = r->height; + ffmt->code = code; + ffmt->field = sink_ffmt->field; + return; + } + default: + dev_err(&csi2->isys->adev->dev, "Unknown pad type\n"); + WARN_ON(1); + } +} + +void ipu_isys_csi2_be_cleanup(struct ipu_isys_csi2_be *csi2_be) +{ + v4l2_device_unregister_subdev(&csi2_be->asd.sd); + ipu_isys_subdev_cleanup(&csi2_be->asd); + ipu_isys_video_cleanup(&csi2_be->av); +} + +int ipu_isys_csi2_be_init(struct ipu_isys_csi2_be *csi2_be, + struct ipu_isys *isys) +{ + struct v4l2_subdev_format fmt = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = CSI2_BE_PAD_SINK, + .format = { + .width = 4096, + .height = 3072, + }, + }; + struct v4l2_subdev_selection sel = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = CSI2_BE_PAD_SOURCE, + .target = V4L2_SEL_TGT_CROP, + .r = { + .width = fmt.format.width, + .height = fmt.format.height, + }, + }; + int rval; + + csi2_be->asd.sd.entity.ops = &csi2_be_entity_ops; + csi2_be->asd.isys = isys; + + rval = ipu_isys_subdev_init(&csi2_be->asd, &csi2_be_sd_ops, 0, + NR_OF_CSI2_BE_PADS, + NR_OF_CSI2_BE_STREAMS, + NR_OF_CSI2_BE_SOURCE_PADS, + NR_OF_CSI2_BE_SINK_PADS, 0); + if (rval) + goto fail; + + csi2_be->asd.pad[CSI2_BE_PAD_SINK].flags = MEDIA_PAD_FL_SINK + | MEDIA_PAD_FL_MUST_CONNECT; + csi2_be->asd.pad[CSI2_BE_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE; + csi2_be->asd.valid_tgts[CSI2_BE_PAD_SOURCE].crop = true; + csi2_be->asd.set_ffmt = csi2_be_set_ffmt; + + BUILD_BUG_ON(ARRAY_SIZE(csi2_be_supported_codes) != NR_OF_CSI2_BE_PADS); + csi2_be->asd.supported_codes = csi2_be_supported_codes; + csi2_be->asd.be_mode = IPU_BE_RAW; + csi2_be->asd.isl_mode = IPU_ISL_CSI2_BE; + + ipu_isys_subdev_set_ffmt(&csi2_be->asd.sd, NULL, &fmt); + ipu_isys_csi2_be_set_sel(&csi2_be->asd.sd, NULL, &sel); + + csi2_be->asd.sd.internal_ops = &csi2_be_sd_internal_ops; + snprintf(csi2_be->asd.sd.name, sizeof(csi2_be->asd.sd.name), + IPU_ISYS_ENTITY_PREFIX " CSI2 BE"); + snprintf(csi2_be->av.vdev.name, sizeof(csi2_be->av.vdev.name), + IPU_ISYS_ENTITY_PREFIX " CSI2 BE capture"); + csi2_be->av.aq.css_pin_type = IPU_FW_ISYS_PIN_TYPE_RAW_NS; + v4l2_set_subdevdata(&csi2_be->asd.sd, &csi2_be->asd); + rval = v4l2_device_register_subdev(&isys->v4l2_dev, &csi2_be->asd.sd); + if (rval) { + dev_info(&isys->adev->dev, "can't register v4l2 subdev\n"); + goto fail; + } + + csi2_be->av.isys = isys; + csi2_be->av.pfmts = ipu_isys_pfmts; + csi2_be->av.try_fmt_vid_mplane = + ipu_isys_video_try_fmt_vid_mplane_default; + csi2_be->av.prepare_firmware_stream_cfg = + ipu_isys_prepare_firmware_stream_cfg_default; + csi2_be->av.aq.buf_prepare = ipu_isys_buf_prepare; + csi2_be->av.aq.fill_frame_buff_set_pin = + ipu_isys_buffer_list_to_ipu_fw_isys_frame_buff_set_pin; + csi2_be->av.aq.link_fmt_validate = ipu_isys_link_fmt_validate; + csi2_be->av.aq.vbq.buf_struct_size = + sizeof(struct ipu_isys_video_buffer); + + rval = ipu_isys_video_init(&csi2_be->av, &csi2_be->asd.sd.entity, + CSI2_BE_PAD_SOURCE, MEDIA_PAD_FL_SINK, 0); + if (rval) { + dev_info(&isys->adev->dev, "can't init video node\n"); + goto fail; + } + + return 0; + +fail: + ipu_isys_csi2_be_cleanup(csi2_be); + + return rval; +} diff --git a/drivers/media/pci/intel/ipu-isys-csi2-be.h b/drivers/media/pci/intel/ipu-isys-csi2-be.h new file mode 100644 index 0000000000000..70a17833a9c4e --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys-csi2-be.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2014 - 2018 Intel Corporation */ + +#ifndef IPU_ISYS_CSI2_BE_H +#define IPU_ISYS_CSI2_BE_H + +#include +#include + +#include "ipu-isys-queue.h" +#include "ipu-isys-subdev.h" +#include "ipu-isys-video.h" +#include "ipu-platform-isys.h" + +struct ipu_isys_csi2_be_pdata; +struct ipu_isys; + +#define CSI2_BE_PAD_SINK 0 +#define CSI2_BE_PAD_SOURCE 1 + +#define NR_OF_CSI2_BE_PADS 2 +#define NR_OF_CSI2_BE_SOURCE_PADS 1 +#define NR_OF_CSI2_BE_SINK_PADS 1 + +#define NR_OF_CSI2_BE_STREAMS 1 +#define NR_OF_CSI2_BE_SOC_SOURCE_PADS NR_OF_CSI2_BE_SOC_STREAMS +#define NR_OF_CSI2_BE_SOC_SINK_PADS NR_OF_CSI2_BE_SOC_STREAMS +#define CSI2_BE_SOC_PAD_SINK(n) \ + ({ typeof(n) __n = (n); \ + (__n) >= NR_OF_CSI2_BE_SOC_SINK_PADS ? \ + (NR_OF_CSI2_BE_SOC_SINK_PADS) : (__n); }) +#define CSI2_BE_SOC_PAD_SOURCE(n) \ + ({ typeof(n) __n = (n); \ + (__n) >= NR_OF_CSI2_BE_SOC_SOURCE_PADS ? \ + (NR_OF_CSI2_BE_SOC_PADS - 1) : \ + ((__n) + NR_OF_CSI2_BE_SOC_SINK_PADS); }) +#define NR_OF_CSI2_BE_SOC_PADS \ + (NR_OF_CSI2_BE_SOC_SOURCE_PADS + NR_OF_CSI2_BE_SOC_SINK_PADS) + +#define CSI2_BE_CROP_HOR BIT(0) +#define CSI2_BE_CROP_VER BIT(1) +#define CSI2_BE_CROP_MASK (CSI2_BE_CROP_VER | CSI2_BE_CROP_HOR) + +/* + * struct ipu_isys_csi2_be + */ +struct ipu_isys_csi2_be { + struct ipu_isys_csi2_be_pdata *pdata; + struct ipu_isys_subdev asd; + struct ipu_isys_video av; +}; + +struct ipu_isys_csi2_be_soc { + struct ipu_isys_csi2_be_pdata *pdata; + struct ipu_isys_subdev asd; + struct ipu_isys_video av[NR_OF_CSI2_BE_SOC_SOURCE_PADS]; +}; + +#define to_ipu_isys_csi2_be(sd) \ + container_of(to_ipu_isys_subdev(sd), \ + struct ipu_isys_csi2_be, asd) + +#define to_ipu_isys_csi2_be_soc(sd) \ + container_of(to_ipu_isys_subdev(sd), \ + struct ipu_isys_csi2_be_soc, asd) + +int ipu_isys_csi2_be_init(struct ipu_isys_csi2_be *csi2_be, + struct ipu_isys *isys); +int ipu_isys_csi2_be_soc_init( + struct ipu_isys_csi2_be_soc *csi2_be_soc, struct ipu_isys *isys); +void ipu_isys_csi2_be_cleanup(struct ipu_isys_csi2_be *csi2_be); +void ipu_isys_csi2_be_soc_cleanup(struct ipu_isys_csi2_be_soc *csi2_be); + +#endif /* IPU_ISYS_CSI2_BE_H */ diff --git a/drivers/media/pci/intel/ipu-isys-csi2.c b/drivers/media/pci/intel/ipu-isys-csi2.c new file mode 100644 index 0000000000000..0f3e122439c07 --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys-csi2.c @@ -0,0 +1,945 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include +#include + +#include +#include +#include +#include + +#include "ipu.h" +#include "ipu-bus.h" +#include "ipu-buttress.h" +#include "ipu-isys.h" +#include "ipu-isys-subdev.h" +#include "ipu-isys-video.h" +#include "ipu-platform-regs.h" + +#define CREATE_TRACE_POINTS +#define IPU_SOF_SEQID_TRACE +#define IPU_EOF_SEQID_TRACE +#include "ipu-trace-event.h" + +static const u32 csi2_supported_codes_pad_sink[] = { + MEDIA_BUS_FMT_Y10_1X10, + MEDIA_BUS_FMT_RGB565_1X16, + MEDIA_BUS_FMT_RGB888_1X24, + MEDIA_BUS_FMT_UYVY8_1X16, + MEDIA_BUS_FMT_YUYV8_1X16, + MEDIA_BUS_FMT_YUYV10_1X20, + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SGBRG10_1X10, + MEDIA_BUS_FMT_SGRBG10_1X10, + MEDIA_BUS_FMT_SRGGB10_1X10, + MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, + MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, + MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, + MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, + MEDIA_BUS_FMT_SBGGR12_1X12, + MEDIA_BUS_FMT_SGBRG12_1X12, + MEDIA_BUS_FMT_SGRBG12_1X12, + MEDIA_BUS_FMT_SRGGB12_1X12, + MEDIA_BUS_FMT_SBGGR14_1X14, + MEDIA_BUS_FMT_SGBRG14_1X14, + MEDIA_BUS_FMT_SGRBG14_1X14, + MEDIA_BUS_FMT_SRGGB14_1X14, + MEDIA_BUS_FMT_SBGGR8_1X8, + MEDIA_BUS_FMT_SGBRG8_1X8, + MEDIA_BUS_FMT_SGRBG8_1X8, + MEDIA_BUS_FMT_SRGGB8_1X8, + 0, +}; + +static const u32 csi2_supported_codes_pad_source[] = { + MEDIA_BUS_FMT_Y10_1X10, + MEDIA_BUS_FMT_RGB565_1X16, + MEDIA_BUS_FMT_RGB888_1X24, + MEDIA_BUS_FMT_UYVY8_1X16, + MEDIA_BUS_FMT_YUYV8_1X16, + MEDIA_BUS_FMT_YUYV10_1X20, + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SGBRG10_1X10, + MEDIA_BUS_FMT_SGRBG10_1X10, + MEDIA_BUS_FMT_SRGGB10_1X10, + MEDIA_BUS_FMT_SBGGR12_1X12, + MEDIA_BUS_FMT_SGBRG12_1X12, + MEDIA_BUS_FMT_SGRBG12_1X12, + MEDIA_BUS_FMT_SRGGB12_1X12, + MEDIA_BUS_FMT_SBGGR14_1X14, + MEDIA_BUS_FMT_SGBRG14_1X14, + MEDIA_BUS_FMT_SGRBG14_1X14, + MEDIA_BUS_FMT_SRGGB14_1X14, + MEDIA_BUS_FMT_SBGGR8_1X8, + MEDIA_BUS_FMT_SGBRG8_1X8, + MEDIA_BUS_FMT_SGRBG8_1X8, + MEDIA_BUS_FMT_SRGGB8_1X8, + 0, +}; + +static const u32 csi2_supported_codes_pad_meta[] = { + MEDIA_BUS_FMT_FIXED, + 0, +}; + +static const u32 *csi2_supported_codes[NR_OF_CSI2_PADS]; + +static struct v4l2_subdev_internal_ops csi2_sd_internal_ops = { + .open = ipu_isys_subdev_open, + .close = ipu_isys_subdev_close, +}; + +int ipu_isys_csi2_get_link_freq(struct ipu_isys_csi2 *csi2, __s64 *link_freq) +{ + struct ipu_isys_pipeline *pipe = container_of(csi2->asd.sd.entity.pipe, + struct ipu_isys_pipeline, + pipe); + struct v4l2_subdev *ext_sd = + media_entity_to_v4l2_subdev(pipe->external->entity); + struct v4l2_ext_control c = {.id = V4L2_CID_LINK_FREQ, }; + struct v4l2_ext_controls cs = {.count = 1, + .controls = &c, + }; + struct v4l2_querymenu qm = {.id = c.id, }; + int rval; + + if (!ext_sd) { + WARN_ON(1); + return -ENODEV; + } +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) + rval = v4l2_g_ext_ctrls(ext_sd->ctrl_handler, + ext_sd->v4l2_dev->mdev, + &cs); +#else + rval = v4l2_g_ext_ctrls(ext_sd->ctrl_handler, &cs); +#endif + if (rval) { + dev_info(&csi2->isys->adev->dev, "can't get link frequency\n"); + return rval; + } + + qm.index = c.value; + + rval = v4l2_querymenu(ext_sd->ctrl_handler, &qm); + if (rval) { + dev_info(&csi2->isys->adev->dev, "can't get menu item\n"); + return rval; + } + + dev_dbg(&csi2->isys->adev->dev, "%s: link frequency %lld\n", __func__, + qm.value); + + if (!qm.value) + return -EINVAL; + *link_freq = qm.value; + return 0; +} + +static int ipu_get_frame_desc_entry_by_dt(struct v4l2_subdev *sd, + struct v4l2_mbus_frame_desc_entry + *entry, u8 data_type) +{ + struct v4l2_mbus_frame_desc desc = { + .num_entries = V4L2_FRAME_DESC_ENTRY_MAX, + }; + int rval, i; + + rval = v4l2_subdev_call(sd, pad, get_frame_desc, 0, &desc); + if (rval) + return rval; + + for (i = 0; i < desc.num_entries; i++) { + if (desc.entry[i].bus.csi2.data_type != data_type) + continue; + *entry = desc.entry[i]; + return 0; + } + + return -EINVAL; +} + +static void csi2_meta_prepare_firmware_stream_cfg_default( + struct ipu_isys_video *av, + struct ipu_fw_isys_stream_cfg_data_abi *cfg) +{ + struct ipu_isys_pipeline *ip = + to_ipu_isys_pipeline(av->vdev.entity.pipe); + struct ipu_isys_queue *aq = &av->aq; + struct ipu_fw_isys_output_pin_info_abi *pin_info; + struct v4l2_mbus_frame_desc_entry entry; + int pin = cfg->nof_output_pins++; + int inpin = cfg->nof_input_pins++; + int rval; + + aq->fw_output = pin; + ip->output_pins[pin].pin_ready = ipu_isys_queue_buf_ready; + ip->output_pins[pin].aq = aq; + + pin_info = &cfg->output_pins[pin]; + pin_info->input_pin_id = inpin; + pin_info->output_res.width = av->mpix.width; + pin_info->output_res.height = av->mpix.height; + pin_info->stride = av->mpix.plane_fmt[0].bytesperline; + pin_info->pt = aq->css_pin_type; + pin_info->ft = av->pfmt->css_pixelformat; + pin_info->send_irq = 1; + + rval = + ipu_get_frame_desc_entry_by_dt(media_entity_to_v4l2_subdev + (ip->external->entity), &entry, + IPU_ISYS_MIPI_CSI2_TYPE_EMBEDDED8); + if (!rval) { + cfg->input_pins[inpin].dt = IPU_ISYS_MIPI_CSI2_TYPE_EMBEDDED8; + cfg->input_pins[inpin].input_res.width = + entry.two_dim.width * entry.bpp / BITS_PER_BYTE; + cfg->input_pins[inpin].input_res.height = + entry.two_dim.height; + } +} + +static int subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, + struct v4l2_event_subscription *sub) +{ + struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd); + + dev_dbg(&csi2->isys->adev->dev, "subscribe event(type %u id %u)\n", + sub->type, sub->id); + + switch (sub->type) { + case V4L2_EVENT_FRAME_SYNC: + return v4l2_event_subscribe(fh, sub, 10, NULL); + case V4L2_EVENT_CTRL: + return v4l2_ctrl_subscribe_event(fh, sub); + default: + return -EINVAL; + } +} + +static const struct v4l2_subdev_core_ops csi2_sd_core_ops = { + .subscribe_event = subscribe_event, + .unsubscribe_event = v4l2_event_subdev_unsubscribe, +}; + +static struct ipu_isys_pixelformat csi2_meta_pfmts[] = { + {V4L2_FMT_IPU_ISYS_META, 8, 8, 0, MEDIA_BUS_FMT_FIXED, 0}, + {}, +}; + +/* + * The input system CSI2+ receiver has several + * parameters affecting the receiver timings. These depend + * on the MIPI bus frequency F in Hz (sensor transmitter rate) + * as follows: + * register value = (A/1e9 + B * UI) / COUNT_ACC + * where + * UI = 1 / (2 * F) in seconds + * COUNT_ACC = counter accuracy in seconds + * For IPU4, COUNT_ACC = 0.125 ns + * + * A and B are coefficients from the table below, + * depending whether the register minimum or maximum value is + * calculated. + * Minimum Maximum + * Clock lane A B A B + * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0 + * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16 + * Data lanes + * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4 + * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6 + * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4 + * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6 + * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4 + * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6 + * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4 + * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6 + * + * We use the minimum values of both A and B. + */ + +#define DIV_SHIFT 8 + +static uint32_t calc_timing(s32 a, int32_t b, int64_t link_freq, int32_t accinv) +{ + return accinv * a + (accinv * b * (500000000 >> DIV_SHIFT) + / (int32_t)(link_freq >> DIV_SHIFT)); +} + +static int +ipu_isys_csi2_calc_timing(struct ipu_isys_csi2 *csi2, + struct ipu_isys_csi2_timing *timing, uint32_t accinv) +{ + __s64 link_freq; + int rval; + + rval = ipu_isys_csi2_get_link_freq(csi2, &link_freq); + if (rval) + return rval; + + timing->ctermen = calc_timing(CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_A, + CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_B, + link_freq, accinv); + timing->csettle = calc_timing(CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_A, + CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_B, + link_freq, accinv); + dev_dbg(&csi2->isys->adev->dev, "ctermen %u\n", timing->ctermen); + dev_dbg(&csi2->isys->adev->dev, "csettle %u\n", timing->csettle); + + timing->dtermen = calc_timing(CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_A, + CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_B, + link_freq, accinv); + timing->dsettle = calc_timing(CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_A, + CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_B, + link_freq, accinv); + dev_dbg(&csi2->isys->adev->dev, "dtermen %u\n", timing->dtermen); + dev_dbg(&csi2->isys->adev->dev, "dsettle %u\n", timing->dsettle); + + return 0; +} + +#define CSI2_ACCINV 8 + +static int set_stream(struct v4l2_subdev *sd, int enable) +{ + struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd); + struct ipu_isys_pipeline *ip = container_of(sd->entity.pipe, + struct ipu_isys_pipeline, + pipe); + struct ipu_isys_csi2_config *cfg; + struct v4l2_subdev *ext_sd; + struct v4l2_control c = {.id = V4L2_CID_MIPI_LANES, }; + struct ipu_isys_csi2_timing timing; + unsigned int nlanes; + int rval; + + dev_dbg(&csi2->isys->adev->dev, "csi2 s_stream %d\n", enable); + + if (!ip->external->entity) { + WARN_ON(1); + return -ENODEV; + } + ext_sd = media_entity_to_v4l2_subdev(ip->external->entity); + cfg = v4l2_get_subdev_hostdata(ext_sd); + + if (!enable) { + csi2->stream_count--; + if (csi2->stream_count) + return 0; + + ipu_isys_csi2_set_stream(sd, timing, 0, enable); + return 0; + } + + ip->has_sof = true; + + if (csi2->stream_count) { + csi2->stream_count++; + return 0; + } + + rval = v4l2_g_ctrl(ext_sd->ctrl_handler, &c); + if (!rval && c.value > 0 && cfg->nlanes > c.value) { + nlanes = c.value; + dev_dbg(&csi2->isys->adev->dev, "lane nr %d.\n", nlanes); + } else { + nlanes = cfg->nlanes; + } + + rval = ipu_isys_csi2_calc_timing(csi2, &timing, CSI2_ACCINV); + if (rval) + return rval; + + ipu_isys_csi2_set_stream(sd, timing, nlanes, enable); + csi2->stream_count++; + + return 0; +} + +static void csi2_capture_done(struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_resp_info_abi *info) +{ + if (ip->interlaced && ip->isys->short_packet_source == + IPU_ISYS_SHORT_PACKET_FROM_RECEIVER) { + struct ipu_isys_buffer *ib; + unsigned long flags; + + spin_lock_irqsave(&ip->short_packet_queue_lock, flags); + if (!list_empty(&ip->short_packet_active)) { + ib = list_last_entry(&ip->short_packet_active, + struct ipu_isys_buffer, head); + list_move(&ib->head, &ip->short_packet_incoming); + } + spin_unlock_irqrestore(&ip->short_packet_queue_lock, flags); + } + if (ip->csi2) { + ipu_isys_csi2_error(ip->csi2); + } +} + +static int csi2_link_validate(struct media_link *link) +{ + struct ipu_isys_csi2 *csi2; + struct ipu_isys_pipeline *ip; + struct v4l2_subdev_route r[IPU_ISYS_MAX_STREAMS]; + struct v4l2_subdev_routing routing = { + .routes = r, + .num_routes = IPU_ISYS_MAX_STREAMS, + }; + unsigned int active = 0; + int i; + int rval; + + if (!link->sink->entity || + !link->sink->entity->pipe || !link->source->entity) + return -EINVAL; + csi2 = + to_ipu_isys_csi2(media_entity_to_v4l2_subdev(link->sink->entity)); + ip = to_ipu_isys_pipeline(link->sink->entity->pipe); + csi2->receiver_errors = 0; + ip->csi2 = csi2; + ipu_isys_video_add_capture_done(to_ipu_isys_pipeline + (link->sink->entity->pipe), + csi2_capture_done); + + rval = v4l2_subdev_link_validate(link); + if (rval) + return rval; + + if (!v4l2_ctrl_g_ctrl(csi2->store_csi2_header)) { + for (i = 0; i < NR_OF_CSI2_SOURCE_PADS; i++) { + struct media_pad *remote_pad = + media_entity_remote_pad(&csi2->asd. + pad[CSI2_PAD_SOURCE(i)]); + + if (remote_pad && + is_media_entity_v4l2_subdev(remote_pad->entity)) { + dev_err(&csi2->isys->adev->dev, + "CSI2 BE requires CSI2 headers.\n"); + return -EINVAL; + } + } + } + + rval = + v4l2_subdev_call(media_entity_to_v4l2_subdev(link->source->entity), + pad, get_routing, &routing); + + if (rval) { + csi2->remote_streams = 1; + return 0; + } + + for (i = 0; i < routing.num_routes; i++) { + if (routing.routes[i].flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE) + active++; + } + + if (active != + bitmap_weight(csi2->asd.stream[link->sink->index].streams_stat, 32)) + return -EINVAL; + + csi2->remote_streams = active; + + return 0; +} + +static bool csi2_has_route(struct media_entity *entity, unsigned int pad0, + unsigned int pad1, int *stream) +{ + if (pad0 == CSI2_PAD_META || pad1 == CSI2_PAD_META) + return true; + return ipu_isys_subdev_has_route(entity, pad0, pad1, stream); +} + +static const struct v4l2_subdev_video_ops csi2_sd_video_ops = { + .s_stream = set_stream, +}; + +static int get_metadata_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct media_pad *pad = + media_entity_remote_pad(&sd->entity.pads[CSI2_PAD_SINK]); + struct v4l2_mbus_frame_desc_entry entry; + int rval; + + if (!pad) + return -EINVAL; + + rval = + ipu_get_frame_desc_entry_by_dt(media_entity_to_v4l2_subdev + (pad->entity), &entry, + IPU_ISYS_MIPI_CSI2_TYPE_EMBEDDED8); + + if (!rval) { + fmt->format.width = + entry.two_dim.width * entry.bpp / BITS_PER_BYTE; + fmt->format.height = entry.two_dim.height; + fmt->format.code = entry.pixelcode; + fmt->format.field = V4L2_FIELD_NONE; + } + return rval; +} + +static int ipu_isys_csi2_get_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + if (fmt->pad == CSI2_PAD_META) + return get_metadata_fmt(sd, cfg, fmt); + return ipu_isys_subdev_get_ffmt(sd, cfg, fmt); +} + +static int ipu_isys_csi2_set_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + if (fmt->pad == CSI2_PAD_META) + return get_metadata_fmt(sd, cfg, fmt); + return ipu_isys_subdev_set_ffmt(sd, cfg, fmt); +} + +static int __subdev_link_validate(struct v4l2_subdev *sd, + struct media_link *link, + struct v4l2_subdev_format *source_fmt, + struct v4l2_subdev_format *sink_fmt) +{ + struct ipu_isys_pipeline *ip = container_of(sd->entity.pipe, + struct ipu_isys_pipeline, + pipe); + + if (source_fmt->format.field == V4L2_FIELD_ALTERNATE) + ip->interlaced = true; + + return ipu_isys_subdev_link_validate(sd, link, source_fmt, sink_fmt); +} + +static const struct v4l2_subdev_pad_ops csi2_sd_pad_ops = { + .link_validate = __subdev_link_validate, + .get_fmt = ipu_isys_csi2_get_fmt, + .set_fmt = ipu_isys_csi2_set_fmt, + .enum_mbus_code = ipu_isys_subdev_enum_mbus_code, + .set_routing = ipu_isys_subdev_set_routing, + .get_routing = ipu_isys_subdev_get_routing, +}; + +static struct v4l2_subdev_ops csi2_sd_ops = { + .core = &csi2_sd_core_ops, + .video = &csi2_sd_video_ops, + .pad = &csi2_sd_pad_ops, +}; + +static struct media_entity_operations csi2_entity_ops = { + .link_validate = csi2_link_validate, + .has_route = csi2_has_route, +}; + +static void csi2_set_ffmt(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct v4l2_mbus_framefmt *ffmt = + __ipu_isys_get_ffmt(sd, cfg, fmt->pad, + fmt->stream, + fmt->which); + + if (fmt->format.field != V4L2_FIELD_ALTERNATE) + fmt->format.field = V4L2_FIELD_NONE; + + if (fmt->pad == CSI2_PAD_SINK) { + *ffmt = fmt->format; + if (fmt->stream) + return; + ipu_isys_subdev_fmt_propagate( + sd, cfg, &fmt->format, NULL, + IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT, + fmt->pad, fmt->which); + return; + } + + if (fmt->pad == CSI2_PAD_META) { + struct v4l2_mbus_framefmt *ffmt = + __ipu_isys_get_ffmt(sd, cfg, fmt->pad, + fmt->stream, + fmt->which); + struct media_pad *pad = media_entity_remote_pad( + &sd->entity.pads[CSI2_PAD_SINK]); + struct v4l2_mbus_frame_desc_entry entry; + int rval; + + if (!pad) { + ffmt->width = 0; + ffmt->height = 0; + ffmt->code = 0; + return; + } + + rval = ipu_get_frame_desc_entry_by_dt( + media_entity_to_v4l2_subdev(pad->entity), + &entry, + IPU_ISYS_MIPI_CSI2_TYPE_EMBEDDED8); + + if (!rval) { + ffmt->width = entry.two_dim.width * entry.bpp + / BITS_PER_BYTE; + ffmt->height = entry.two_dim.height; + ffmt->code = entry.pixelcode; + ffmt->field = V4L2_FIELD_NONE; + } + + return; + } + if (sd->entity.pads[fmt->pad].flags & MEDIA_PAD_FL_SOURCE) { + ffmt->width = fmt->format.width; + ffmt->height = fmt->format.height; + ffmt->field = fmt->format.field; + ffmt->code = + ipu_isys_subdev_code_to_uncompressed(fmt->format.code); + return; + } + + WARN_ON(1); +} + +static const struct ipu_isys_pixelformat * +csi2_try_fmt(struct ipu_isys_video *av, + struct v4l2_pix_format_mplane *mpix) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + struct v4l2_subdev *sd = + media_entity_to_v4l2_subdev(av->vdev.entity.links[0].source-> + entity); +#else + struct media_link *link = list_first_entry(&av->vdev.entity.links, + struct media_link, list); + struct v4l2_subdev *sd = + media_entity_to_v4l2_subdev(link->source->entity); +#endif + struct ipu_isys_csi2 *csi2; + + if (!sd) + return NULL; + + csi2 = to_ipu_isys_csi2(sd); + + return ipu_isys_video_try_fmt_vid_mplane(av, mpix, + v4l2_ctrl_g_ctrl(csi2->store_csi2_header)); +} + +void ipu_isys_csi2_cleanup(struct ipu_isys_csi2 *csi2) +{ + int i; + + if (!csi2->isys) + return; + + v4l2_device_unregister_subdev(&csi2->asd.sd); + ipu_isys_subdev_cleanup(&csi2->asd); + for (i = 0; i < NR_OF_CSI2_SOURCE_PADS; i++) + ipu_isys_video_cleanup(&csi2->av[i]); + ipu_isys_video_cleanup(&csi2->av_meta); + csi2->isys = NULL; +} + +static void csi_ctrl_init(struct v4l2_subdev *sd) +{ + struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd); + + static const struct v4l2_ctrl_config cfg = { + .id = V4L2_CID_IPU_STORE_CSI2_HEADER, + .name = "Store CSI-2 Headers", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .min = 0, + .max = 1, + .step = 1, + .def = 1, + }; + + csi2->store_csi2_header = v4l2_ctrl_new_custom(&csi2->asd.ctrl_handler, + &cfg, NULL); +} + +int ipu_isys_csi2_init(struct ipu_isys_csi2 *csi2, + struct ipu_isys *isys, + void __iomem *base, unsigned int index) +{ + struct v4l2_subdev_format fmt = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = CSI2_PAD_SINK, + .format = { + .width = 4096, + .height = 3072, + }, + }; + struct v4l2_subdev_format fmt_meta = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = CSI2_PAD_META, + }; + int i, rval, src; + + csi2->isys = isys; + csi2->base = base; + csi2->index = index; + + csi2->asd.sd.entity.ops = &csi2_entity_ops; + csi2->asd.ctrl_init = csi_ctrl_init; + csi2->asd.isys = isys; + init_completion(&csi2->eof_completion); + csi2->remote_streams = 1; + csi2->stream_count = 0; + + rval = ipu_isys_subdev_init(&csi2->asd, &csi2_sd_ops, 0, + NR_OF_CSI2_PADS, + NR_OF_CSI2_STREAMS, + NR_OF_CSI2_SOURCE_PADS, + NR_OF_CSI2_SINK_PADS, + V4L2_SUBDEV_FL_HAS_SUBSTREAMS); + if (rval) + goto fail; + + csi2->asd.pad[CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK + | MEDIA_PAD_FL_MUST_CONNECT | MEDIA_PAD_FL_MULTIPLEX; + for (i = CSI2_PAD_SOURCE(0); + i < (NR_OF_CSI2_SOURCE_PADS + CSI2_PAD_SOURCE(0)); i++) + csi2->asd.pad[i].flags = MEDIA_PAD_FL_SOURCE; + + csi2->asd.pad[CSI2_PAD_META].flags = MEDIA_PAD_FL_SOURCE; + src = index; +#ifdef CONFIG_VIDEO_INTEL_IPU4P + src = index ? (index + 5) : (index + 3); +#endif + csi2->asd.source = IPU_FW_ISYS_STREAM_SRC_CSI2_PORT0 + src; + csi2_supported_codes[CSI2_PAD_SINK] = csi2_supported_codes_pad_sink; + + for (i = 0; i < NR_OF_CSI2_SOURCE_PADS; i++) + csi2_supported_codes[i + 1] = csi2_supported_codes_pad_source; + csi2_supported_codes[CSI2_PAD_META] = csi2_supported_codes_pad_meta; + csi2->asd.supported_codes = csi2_supported_codes; + csi2->asd.set_ffmt = csi2_set_ffmt; + + csi2->asd.sd.flags |= V4L2_SUBDEV_FL_HAS_EVENTS; + csi2->asd.sd.internal_ops = &csi2_sd_internal_ops; + snprintf(csi2->asd.sd.name, sizeof(csi2->asd.sd.name), + IPU_ISYS_ENTITY_PREFIX " CSI-2 %u", index); + v4l2_set_subdevdata(&csi2->asd.sd, &csi2->asd); + + mutex_lock(&csi2->asd.mutex); + rval = v4l2_device_register_subdev(&isys->v4l2_dev, &csi2->asd.sd); + if (rval) { + mutex_unlock(&csi2->asd.mutex); + dev_info(&isys->adev->dev, "can't register v4l2 subdev\n"); + goto fail; + } + + __ipu_isys_subdev_set_ffmt(&csi2->asd.sd, NULL, &fmt); + __ipu_isys_subdev_set_ffmt(&csi2->asd.sd, NULL, &fmt_meta); + + /* create default route information */ + for (i = 0; i < NR_OF_CSI2_STREAMS; i++) { + csi2->asd.route[i].sink = CSI2_PAD_SINK; + csi2->asd.route[i].source = CSI2_PAD_SOURCE(i); + csi2->asd.route[i].flags = 0; + } + + for (i = 0; i < NR_OF_CSI2_SOURCE_PADS; i++) { + csi2->asd.stream[CSI2_PAD_SINK].stream_id[i] = i; + csi2->asd.stream[CSI2_PAD_SOURCE(i)].stream_id[CSI2_PAD_SINK] + = i; + } + csi2->asd.route[0].flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE | + V4L2_SUBDEV_ROUTE_FL_IMMUTABLE; + bitmap_set(csi2->asd.stream[CSI2_PAD_SINK].streams_stat, 0, 1); + bitmap_set(csi2->asd.stream[CSI2_PAD_SOURCE(0)].streams_stat, 0, 1); + + mutex_unlock(&csi2->asd.mutex); + + for (i = 0; i < NR_OF_CSI2_SOURCE_PADS; i++) { + snprintf(csi2->av[i].vdev.name, sizeof(csi2->av[i].vdev.name), + IPU_ISYS_ENTITY_PREFIX " CSI-2 %u capture %d", + index, i); + csi2->av[i].isys = isys; + csi2->av[i].aq.css_pin_type = IPU_FW_ISYS_PIN_TYPE_MIPI; + csi2->av[i].pfmts = ipu_isys_pfmts_packed; + csi2->av[i].try_fmt_vid_mplane = csi2_try_fmt; + csi2->av[i].prepare_firmware_stream_cfg = + ipu_isys_prepare_firmware_stream_cfg_default; + csi2->av[i].packed = true; + csi2->av[i].line_header_length = + IPU_ISYS_CSI2_LONG_PACKET_HEADER_SIZE; + csi2->av[i].line_footer_length = + IPU_ISYS_CSI2_LONG_PACKET_FOOTER_SIZE; + csi2->av[i].aq.buf_prepare = ipu_isys_buf_prepare; + csi2->av[i].aq.fill_frame_buff_set_pin = + ipu_isys_buffer_list_to_ipu_fw_isys_frame_buff_set_pin; + csi2->av[i].aq.link_fmt_validate = ipu_isys_link_fmt_validate; + csi2->av[i].aq.vbq.buf_struct_size = + sizeof(struct ipu_isys_video_buffer); + + rval = ipu_isys_video_init(&csi2->av[i], + &csi2->asd.sd.entity, + CSI2_PAD_SOURCE(i), + MEDIA_PAD_FL_SINK, 0); + if (rval) { + dev_info(&isys->adev->dev, "can't init video node\n"); + goto fail; + } + } + + snprintf(csi2->av_meta.vdev.name, sizeof(csi2->av_meta.vdev.name), + IPU_ISYS_ENTITY_PREFIX " CSI-2 %u meta", index); + csi2->av_meta.isys = isys; + csi2->av_meta.aq.css_pin_type = IPU_FW_ISYS_PIN_TYPE_MIPI; + csi2->av_meta.pfmts = csi2_meta_pfmts; + csi2->av_meta.try_fmt_vid_mplane = csi2_try_fmt; + csi2->av_meta.prepare_firmware_stream_cfg = + csi2_meta_prepare_firmware_stream_cfg_default; + csi2->av_meta.packed = true; + csi2->av_meta.line_header_length = + IPU_ISYS_CSI2_LONG_PACKET_HEADER_SIZE; + csi2->av_meta.line_footer_length = + IPU_ISYS_CSI2_LONG_PACKET_FOOTER_SIZE; + csi2->av_meta.aq.buf_prepare = ipu_isys_buf_prepare; + csi2->av_meta.aq.fill_frame_buff_set_pin = + ipu_isys_buffer_list_to_ipu_fw_isys_frame_buff_set_pin; + csi2->av_meta.aq.link_fmt_validate = ipu_isys_link_fmt_validate; + csi2->av_meta.aq.vbq.buf_struct_size = + sizeof(struct ipu_isys_video_buffer); + + rval = ipu_isys_video_init(&csi2->av_meta, &csi2->asd.sd.entity, + CSI2_PAD_META, MEDIA_PAD_FL_SINK, 0); + if (rval) { + dev_info(&isys->adev->dev, "can't init metadata node\n"); + goto fail; + } + return 0; + +fail: + ipu_isys_csi2_cleanup(csi2); + + return rval; +} + +void ipu_isys_csi2_sof_event(struct ipu_isys_csi2 *csi2, unsigned int vc) +{ + struct ipu_isys_pipeline *ip = NULL; + struct v4l2_event ev = { + .type = V4L2_EVENT_FRAME_SYNC, + }; + struct video_device *vdev = csi2->asd.sd.devnode; + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&csi2->isys->lock, flags); + csi2->in_frame[vc] = true; + + for (i = 0; i < IPU_ISYS_MAX_STREAMS; i++) { + if (csi2->isys->pipes[i] && + csi2->isys->pipes[i]->vc == vc && + csi2->isys->pipes[i]->csi2 == csi2) { + ip = csi2->isys->pipes[i]; + break; + } + } + + /* Pipe already vanished */ + if (!ip) { + spin_unlock_irqrestore(&csi2->isys->lock, flags); + return; + } + + ev.u.frame_sync.frame_sequence = atomic_inc_return(&ip->sequence) - 1; + ev.id = ip->stream_id; + spin_unlock_irqrestore(&csi2->isys->lock, flags); + + trace_ipu_sof_seqid(ev.u.frame_sync.frame_sequence, csi2->index, vc); + v4l2_event_queue(vdev, &ev); + dev_dbg(&csi2->isys->adev->dev, + "sof_event::csi2-%i CPU-timestamp:%lld, sequence:%i, vc:%d, stream_id:%d\n", + csi2->index, ktime_get_ns(), ev.u.frame_sync.frame_sequence, vc, ip->stream_id); +} + +void ipu_isys_csi2_eof_event(struct ipu_isys_csi2 *csi2, unsigned int vc) +{ + struct ipu_isys_pipeline *ip = NULL; + unsigned long flags; + unsigned int i; + u32 frame_sequence; + + spin_lock_irqsave(&csi2->isys->lock, flags); + csi2->in_frame[vc] = false; + if (csi2->wait_for_sync[vc]) + complete(&csi2->eof_completion); + spin_unlock_irqrestore(&csi2->isys->lock, flags); + + for (i = 0; i < IPU_ISYS_MAX_STREAMS; i++) { + if (csi2->isys->pipes[i] && + csi2->isys->pipes[i]->vc == vc && + csi2->isys->pipes[i]->csi2 == csi2) { + ip = csi2->isys->pipes[i]; + break; + } + } + + if (ip) { + frame_sequence = atomic_read(&ip->sequence); + + trace_ipu_eof_seqid(frame_sequence, csi2->index, vc); + + dev_dbg(&csi2->isys->adev->dev, + "eof_event::csi2-%i sequence: %i, vc: %d, stream_id: %d\n", + csi2->index, frame_sequence, vc, ip->stream_id); + } +} + +/* Call this function only _after_ the sensor has been stopped */ +void ipu_isys_csi2_wait_last_eof(struct ipu_isys_csi2 *csi2) +{ + unsigned long flags, tout; + unsigned int i; + + for (i = 0; i < NR_OF_CSI2_VC; i++) { + spin_lock_irqsave(&csi2->isys->lock, flags); + + if (!csi2->in_frame[i]) { + spin_unlock_irqrestore(&csi2->isys->lock, flags); + continue; + } + + reinit_completion(&csi2->eof_completion); + csi2->wait_for_sync[i] = true; + spin_unlock_irqrestore(&csi2->isys->lock, flags); + tout = wait_for_completion_timeout(&csi2->eof_completion, + IPU_EOF_TIMEOUT_JIFFIES); + if (!tout) + dev_err(&csi2->isys->adev->dev, + "csi2-%d: timeout at sync to eof of vc %d\n", + csi2->index, i); + csi2->wait_for_sync[i] = false; + } +} + +struct ipu_isys_buffer *ipu_isys_csi2_get_short_packet_buffer(struct + ipu_isys_pipeline + *ip) +{ + struct ipu_isys_buffer *ib; + struct ipu_isys_private_buffer *pb; + struct ipu_isys_mipi_packet_header *ph; + + if (list_empty(&ip->short_packet_incoming)) + return NULL; + ib = list_last_entry(&ip->short_packet_incoming, + struct ipu_isys_buffer, head); + pb = ipu_isys_buffer_to_private_buffer(ib); + ph = (struct ipu_isys_mipi_packet_header *)pb->buffer; + + /* Fill the packet header with magic number. */ + ph->word_count = 0xffff; + ph->dtype = 0xff; + + dma_sync_single_for_cpu(&ip->isys->adev->dev, pb->dma_addr, + sizeof(*ph), DMA_BIDIRECTIONAL); + return ib; +} diff --git a/drivers/media/pci/intel/ipu-isys-csi2.h b/drivers/media/pci/intel/ipu-isys-csi2.h new file mode 100644 index 0000000000000..d7f2df3eb805e --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys-csi2.h @@ -0,0 +1,177 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_ISYS_CSI2_H +#define IPU_ISYS_CSI2_H + +#include +#include + +#include "ipu-isys-queue.h" +#include "ipu-isys-subdev.h" +#include "ipu-isys-video.h" +#include "ipu-platform-isys.h" + +struct ipu_isys_csi2_timing; +struct ipu_isys_csi2_pdata; +struct ipu_isys; + +#define NR_OF_CSI2_SINK_PADS 1 +#define CSI2_PAD_SINK 0 +#define NR_OF_CSI2_STREAMS NR_OF_CSI2_VC +#define NR_OF_CSI2_SOURCE_PADS NR_OF_CSI2_STREAMS +#define CSI2_PAD_SOURCE(n) \ + ({ typeof(n) __n = (n); \ + (__n >= NR_OF_CSI2_SOURCE_PADS ? \ + (NR_OF_CSI2_PADS - 2) : \ + (__n + NR_OF_CSI2_SINK_PADS)); }) +#define NR_OF_CSI2_META_PADS 1 +#define NR_OF_CSI2_PADS \ + (NR_OF_CSI2_SINK_PADS + NR_OF_CSI2_SOURCE_PADS + NR_OF_CSI2_META_PADS) +#define CSI2_PAD_META (NR_OF_CSI2_PADS - 1) + +#define IPU_ISYS_SHORT_PACKET_BUFFER_NUM VIDEO_MAX_FRAME +#define IPU_ISYS_SHORT_PACKET_WIDTH 32 +#define IPU_ISYS_SHORT_PACKET_FRAME_PACKETS 2 +#define IPU_ISYS_SHORT_PACKET_EXTRA_PACKETS 64 +#define IPU_ISYS_SHORT_PACKET_UNITSIZE 8 +#define IPU_ISYS_SHORT_PACKET_GENERAL_DT 0 +#define IPU_ISYS_SHORT_PACKET_PT 0 +#define IPU_ISYS_SHORT_PACKET_FT 0 + +#define IPU_ISYS_SHORT_PACKET_STRIDE \ + (IPU_ISYS_SHORT_PACKET_WIDTH * \ + IPU_ISYS_SHORT_PACKET_UNITSIZE) +#define IPU_ISYS_SHORT_PACKET_NUM(num_lines) \ + ((num_lines) * 2 + IPU_ISYS_SHORT_PACKET_FRAME_PACKETS + \ + IPU_ISYS_SHORT_PACKET_EXTRA_PACKETS) +#define IPU_ISYS_SHORT_PACKET_PKT_LINES(num_lines) \ + DIV_ROUND_UP(IPU_ISYS_SHORT_PACKET_NUM(num_lines) * \ + IPU_ISYS_SHORT_PACKET_UNITSIZE, \ + IPU_ISYS_SHORT_PACKET_STRIDE) +#define IPU_ISYS_SHORT_PACKET_BUF_SIZE(num_lines) \ + (IPU_ISYS_SHORT_PACKET_WIDTH * \ + IPU_ISYS_SHORT_PACKET_PKT_LINES(num_lines) * \ + IPU_ISYS_SHORT_PACKET_UNITSIZE) + +#define IPU_ISYS_SHORT_PACKET_TRACE_MSG_NUMBER 256 +#define IPU_ISYS_SHORT_PACKET_TRACE_MSG_SIZE 16 +#define IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE \ + (IPU_ISYS_SHORT_PACKET_TRACE_MSG_NUMBER * \ + IPU_ISYS_SHORT_PACKET_TRACE_MSG_SIZE) + +#define IPU_ISYS_SHORT_PACKET_FROM_RECEIVER 0 +#define IPU_ISYS_SHORT_PACKET_FROM_TUNIT 1 + +#define IPU_ISYS_SHORT_PACKET_TRACE_MAX_TIMESHIFT 100 +#define IPU_ISYS_SHORT_PACKET_TRACE_EVENT_MASK 0x2082 +#define IPU_SKEW_CAL_LIMIT_HZ (1500000000ul / 2) + +#define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_A 0 +#define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_B 0 +#define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_A 95 +#define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_B -8 + +#define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_A 0 +#define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_B 0 +#define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_A 85 +#define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_B -2 + +#define IPU_EOF_TIMEOUT 300 +#define IPU_EOF_TIMEOUT_JIFFIES msecs_to_jiffies(IPU_EOF_TIMEOUT) + +/* + * struct ipu_isys_csi2 + * + * @nlanes: number of lanes in the receiver + */ +struct ipu_isys_csi2 { + struct ipu_isys_csi2_pdata *pdata; + struct ipu_isys *isys; + struct ipu_isys_subdev asd; + struct ipu_isys_video av[NR_OF_CSI2_SOURCE_PADS]; + struct ipu_isys_video av_meta; + struct completion eof_completion; + + void __iomem *base; + u32 receiver_errors; + unsigned int nlanes; + unsigned int index; + atomic_t sof_sequence; + bool in_frame[NR_OF_CSI2_VC]; + bool wait_for_sync[NR_OF_CSI2_VC]; + + unsigned int remote_streams; + unsigned int stream_count; + + struct v4l2_ctrl *store_csi2_header; +}; + +struct ipu_isys_csi2_timing { + u32 ctermen; + u32 csettle; + u32 dtermen; + u32 dsettle; +}; + +/* + * This structure defines the MIPI packet header output + * from IPU MIPI receiver. Due to hardware conversion, + * this structure is not the same as defined in CSI-2 spec. + */ +struct ipu_isys_mipi_packet_header { + u32 word_count:16, dtype:13, sync:2, stype:1; + u32 sid:4, port_id:4, reserved:23, odd_even:1; +} __packed; + +/* + * This structure defines the trace message content + * for CSI2 receiver monitor messages. + */ +struct ipu_isys_csi2_monitor_message { + u64 fe:1, + fs:1, + pe:1, + ps:1, + le:1, + ls:1, + reserved1:2, + sequence:2, + reserved2:2, + flash_shutter:4, + error_cause:12, + fifo_overrun:1, + crc_error:2, + reserved3:1, + timestamp_l:16, + port:4, vc:2, reserved4:2, frame_sync:4, reserved5:4; + u64 reserved6:3, + cmd:2, reserved7:1, monitor_id:7, reserved8:1, timestamp_h:50; +} __packed; + +#define to_ipu_isys_csi2(sd) container_of(to_ipu_isys_subdev(sd), \ + struct ipu_isys_csi2, asd) + +int ipu_isys_csi2_get_link_freq(struct ipu_isys_csi2 *csi2, __s64 *link_freq); +int ipu_isys_csi2_init(struct ipu_isys_csi2 *csi2, + struct ipu_isys *isys, + void __iomem *base, unsigned int index); +void ipu_isys_csi2_cleanup(struct ipu_isys_csi2 *csi2); +struct ipu_isys_buffer * +ipu_isys_csi2_get_short_packet_buffer(struct ipu_isys_pipeline *ip); +void ipu_isys_csi2_sof_event(struct ipu_isys_csi2 *csi2, unsigned int vc); +void ipu_isys_csi2_eof_event(struct ipu_isys_csi2 *csi2, unsigned int vc); +void ipu_isys_csi2_wait_last_eof(struct ipu_isys_csi2 *csi2); + +/* interface for platform specific */ +int ipu_isys_csi2_set_stream(struct v4l2_subdev *sd, + struct ipu_isys_csi2_timing timing, + unsigned int nlanes, int enable); +unsigned int ipu_isys_csi2_get_current_field(struct ipu_isys_pipeline *ip, + unsigned int *timestamp); +void ipu_isys_csi2_isr(struct ipu_isys_csi2 *csi2); +void ipu_isys_csi2_error(struct ipu_isys_csi2 *csi2); +bool ipu_isys_csi2_skew_cal_required(struct ipu_isys_csi2 *csi2); +int ipu_isys_csi2_set_skew_cal(struct ipu_isys_csi2 *csi2, int enable); + +#endif /* IPU_ISYS_CSI2_H */ diff --git a/drivers/media/pci/intel/ipu-isys-media.h b/drivers/media/pci/intel/ipu-isys-media.h new file mode 100644 index 0000000000000..823324ef4a16c --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys-media.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2016 - 2018 Intel Corporation */ + +#ifndef IPU_ISYS_MEDIA_H +#define IPU_ISYS_MEDIA_H + +#include +#include + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) +#define is_media_entity_v4l2_subdev(e) \ + (media_entity_type(e) == MEDIA_ENT_T_V4L2_SUBDEV) +#define is_media_entity_v4l2_io(e) \ + (media_entity_type(e) == MEDIA_ENT_T_DEVNODE) +#define media_create_pad_link(a, b, c, d, e) \ + media_entity_create_link(a, b, c, d, e) +#define media_entity_pads_init(a, b, c) \ + media_entity_init(a, b, c, 0) +#define media_entity_id(ent) ((ent)->id) +#define media_entity_graph_walk_init(a, b) 0 +#define media_entity_graph_walk_cleanup(a) do { } while (0) + +#define IPU_COMPAT_MAX_ENTITIES MEDIA_ENTITY_ENUM_MAX_ID + +struct media_entity_enum { + unsigned long *bmap; + int idx_max; +}; + +static inline int media_entity_enum_init(struct media_entity_enum *ent_enum, + struct media_device *mdev) +{ + int idx_max = IPU_COMPAT_MAX_ENTITIES; + + ent_enum->bmap = kcalloc(DIV_ROUND_UP(idx_max, BITS_PER_LONG), + sizeof(long), GFP_KERNEL); + if (!ent_enum->bmap) + return -ENOMEM; + + bitmap_zero(ent_enum->bmap, idx_max); + + ent_enum->idx_max = idx_max; + return 0; +} + +static inline void media_entity_enum_cleanup(struct media_entity_enum *ent_enum) +{ + kfree(ent_enum->bmap); +} + +static inline void media_entity_enum_set(struct media_entity_enum *ent_enum, + struct media_entity *entity) +{ + if (media_entity_id(entity) >= ent_enum->idx_max) { + WARN_ON(1); + return; + } + __set_bit(media_entity_id(entity), ent_enum->bmap); +} + +static inline void media_entity_enum_zero(struct media_entity_enum *ent_enum) +{ + bitmap_zero(ent_enum->bmap, ent_enum->idx_max); +} + +static inline bool media_entity_enum_test(struct media_entity_enum *ent_enum, + struct media_entity *entity) +{ + if (media_entity_id(entity) >= ent_enum->idx_max) { + WARN_ON(1); + return false; + } + + return test_bit(media_entity_id(entity), ent_enum->bmap); +} +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) +#define media_pipeline_start(e, p) media_entity_pipeline_start(e, p) + +#define media_pipeline_stop(e) media_entity_pipeline_stop(e) + +#define media_graph_walk_init(g, d) media_entity_graph_walk_init(g, d) + +#define media_graph_walk_start(g, p) media_entity_graph_walk_start(g, p) + +#define media_graph_walk_next(g) media_entity_graph_walk_next(g) + +#define media_graph_walk_cleanup(g) media_entity_graph_walk_cleanup(g) +#endif + + +#endif /* IPU_ISYS_MEDIA_H */ diff --git a/drivers/media/pci/intel/ipu-isys-queue.c b/drivers/media/pci/intel/ipu-isys-queue.c new file mode 100644 index 0000000000000..a88c0d7e9ca9f --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys-queue.c @@ -0,0 +1,1534 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include +#include +#include +#include + +#include +#include +#include + +#include "ipu.h" +#include "ipu-bus.h" +#include "ipu-buttress.h" +#include "ipu-isys.h" +#include "ipu-isys-csi2.h" +#include "ipu-isys-video.h" + +static bool wall_clock_ts_on; +module_param(wall_clock_ts_on, bool, 0660); +MODULE_PARM_DESC(wall_clock_ts_on, "Timestamp based on REALTIME clock"); + +static int queue_setup(struct vb2_queue *q, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + const struct v4l2_format *__fmt, +#endif + unsigned int *num_buffers, unsigned int *num_planes, + unsigned int sizes[], +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + void *alloc_ctxs[] +#else + struct device *alloc_devs[] +#endif + ) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + const struct v4l2_format *fmt = __fmt; + const struct ipu_isys_pixelformat *pfmt; + struct v4l2_pix_format_mplane mpix; +#else + bool use_fmt = false; +#endif + unsigned int i; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + if (fmt) + mpix = fmt->fmt.pix_mp; + else + mpix = av->mpix; + + pfmt = av->try_fmt_vid_mplane(av, &mpix); + + *num_planes = mpix.num_planes; +#else + /* num_planes == 0: we're being called through VIDIOC_REQBUFS */ + if (!*num_planes) { + use_fmt = true; + *num_planes = av->mpix.num_planes; + } +#endif + + for (i = 0; i < *num_planes; i++) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + sizes[i] = mpix.plane_fmt[i].sizeimage; +#else + if (use_fmt) + sizes[i] = av->mpix.plane_fmt[i].sizeimage; +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + alloc_ctxs[i] = aq->ctx; +#else + alloc_devs[i] = aq->dev; +#endif + dev_dbg(&av->isys->adev->dev, + "%s: queue setup: plane %d size %u\n", + av->vdev.name, i, sizes[i]); + } + + return 0; +} + +void ipu_isys_queue_lock(struct vb2_queue *q) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + + dev_dbg(&av->isys->adev->dev, "%s: queue lock\n", av->vdev.name); + mutex_lock(&av->mutex); +} + +void ipu_isys_queue_unlock(struct vb2_queue *q) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + + dev_dbg(&av->isys->adev->dev, "%s: queue unlock\n", av->vdev.name); + mutex_unlock(&av->mutex); +} + +static int buf_init(struct vb2_buffer *vb) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + + dev_dbg(&av->isys->adev->dev, "buffer: %s: %s\n", av->vdev.name, + __func__); + + if (aq->buf_init) + return aq->buf_init(vb); + + return 0; +} + +int ipu_isys_buf_prepare(struct vb2_buffer *vb) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + + dev_dbg(&av->isys->adev->dev, + "buffer: %s: configured size %u, buffer size %lu\n", + av->vdev.name, + av->mpix.plane_fmt[0].sizeimage, vb2_plane_size(vb, 0)); + + if (av->mpix.plane_fmt[0].sizeimage > vb2_plane_size(vb, 0)) + return -EINVAL; + + vb2_set_plane_payload(vb, 0, av->mpix.plane_fmt[0].bytesperline * + av->mpix.height); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + vb->v4l2_planes[0].data_offset = av->line_header_length / BITS_PER_BYTE; +#else + vb->planes[0].data_offset = av->line_header_length / BITS_PER_BYTE; +#endif + + return 0; +} + +static int buf_prepare(struct vb2_buffer *vb) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + struct ipu_isys_buffer *ib = vb2_buffer_to_ipu_isys_buffer(vb); + u32 request = +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) + to_vb2_v4l2_buffer(vb)->request; +#else + vb->v4l2_buf.request; +#endif + struct media_device *mdev = &av->isys->media_dev; + struct ipu_isys_request *ireq; + u32 request_state; + unsigned long flags; + int rval; + if (av->isys->adev->isp->flr_done) + return -EIO; + + if (request) { + ib->req = media_device_request_find(&av->isys->media_dev, + request); + if (!ib->req) { + dev_dbg(&av->isys->adev->dev, + "can't find request %u\n", request); + return -ENOENT; + } + } + + rval = aq->buf_prepare(vb); + if (!request) + return rval; + if (rval) + goto out_put_request; + + ireq = to_ipu_isys_request(ib->req); + + spin_lock_irqsave(&ireq->lock, flags); + spin_lock(&mdev->req_lock); + request_state = ib->req->state; + if (request_state == MEDIA_DEVICE_REQUEST_STATE_IDLE) + list_add(&ib->req_head, &ireq->buffers); + spin_unlock(&mdev->req_lock); + spin_unlock_irqrestore(&ireq->lock, flags); + if (request_state != MEDIA_DEVICE_REQUEST_STATE_IDLE) { + dev_dbg(&av->isys->adev->dev, + "%s: request %u state %u\n", __func__, ib->req->id, + request_state); + rval = -EINVAL; + } else { + dev_dbg(&av->isys->adev->dev, + "%s: request %u\n", __func__, ib->req->id); + } + + if (!rval) + return 0; + +out_put_request: + media_device_request_put(ib->req); + ib->req = NULL; + + return rval; +} + +static void buf_finish(struct vb2_buffer *vb) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + struct ipu_isys_buffer *ib = vb2_buffer_to_ipu_isys_buffer(vb); + dev_dbg(&av->isys->adev->dev, "buffer: %s: %s\n", av->vdev.name, + __func__); + + if (ib->req) { + struct ipu_isys_request *ireq = to_ipu_isys_request(ib->req); + unsigned long flags; + bool done; + + spin_lock_irqsave(&ireq->lock, flags); + list_del(&ib->req_head); + done = list_empty(&ireq->buffers); + spin_unlock_irqrestore(&ireq->lock, flags); + dev_dbg(&av->isys->adev->dev, "request %u complete %s\n", + ib->req->id, done ? "true" : "false"); + if (done) { + media_device_request_complete(&av->isys->media_dev, + ib->req); + mutex_lock(&av->isys->stream_mutex); + list_del(&ireq->head); + mutex_unlock(&av->isys->stream_mutex); + } + media_device_request_put(ib->req); + ib->req = NULL; + } +} + +static void buf_cleanup(struct vb2_buffer *vb) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + + dev_dbg(&av->isys->adev->dev, "buffer: %s: %s\n", av->vdev.name, + __func__); + + if (aq->buf_cleanup) + return aq->buf_cleanup(vb); +} + +/* + * Queue a buffer list back to incoming or active queues. The buffers + * are removed from the buffer list. + */ +void ipu_isys_buffer_list_queue(struct ipu_isys_buffer_list *bl, + unsigned long op_flags, + enum vb2_buffer_state state) +{ + struct ipu_isys_buffer *ib, *ib_safe; + unsigned long flags; + bool first = true; + + if (!bl) + return; + + WARN_ON(!bl->nbufs); + WARN_ON(op_flags & IPU_ISYS_BUFFER_LIST_FL_ACTIVE && + op_flags & IPU_ISYS_BUFFER_LIST_FL_INCOMING); + + list_for_each_entry_safe(ib, ib_safe, &bl->head, head) { + struct ipu_isys_video *av; + + if (ib->type == IPU_ISYS_VIDEO_BUFFER) { + struct vb2_buffer *vb = + ipu_isys_buffer_to_vb2_buffer(ib); + struct ipu_isys_queue *aq = + vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + + av = ipu_isys_queue_to_video(aq); + spin_lock_irqsave(&aq->lock, flags); + list_del(&ib->head); + if (op_flags & IPU_ISYS_BUFFER_LIST_FL_ACTIVE) + list_add(&ib->head, &aq->active); + else if (op_flags & IPU_ISYS_BUFFER_LIST_FL_INCOMING) + list_add_tail(&ib->head, &aq->incoming); + spin_unlock_irqrestore(&aq->lock, flags); + + if (op_flags & IPU_ISYS_BUFFER_LIST_FL_SET_STATE) + vb2_buffer_done(vb, state); + } else if (ib->type == IPU_ISYS_SHORT_PACKET_BUFFER) { + struct ipu_isys_private_buffer *pb = + ipu_isys_buffer_to_private_buffer(ib); + struct ipu_isys_pipeline *ip = pb->ip; + + av = container_of(ip, struct ipu_isys_video, ip); + spin_lock_irqsave(&ip->short_packet_queue_lock, flags); + list_del(&ib->head); + if (op_flags & IPU_ISYS_BUFFER_LIST_FL_ACTIVE) + list_add(&ib->head, &ip->short_packet_active); + else if (op_flags & IPU_ISYS_BUFFER_LIST_FL_INCOMING) + list_add(&ib->head, &ip->short_packet_incoming); + spin_unlock_irqrestore(&ip->short_packet_queue_lock, + flags); + } else { + WARN_ON(1); + return; + } + + if (first) { + dev_dbg(&av->isys->adev->dev, + "queue buffer list %p op_flags %lx, state %d, %d buffers\n", + bl, op_flags, state, bl->nbufs); + first = false; + } + + bl->nbufs--; + } + + WARN_ON(bl->nbufs); +} + +/* + * flush_firmware_streamon_fail() - Flush in cases where requests may + * have been queued to firmware and the *firmware streamon fails for a + * reason or another. + */ +static void flush_firmware_streamon_fail(struct ipu_isys_pipeline *ip) +{ + struct ipu_isys_video *pipe_av = + container_of(ip, struct ipu_isys_video, ip); + struct ipu_isys_queue *aq; + unsigned long flags; + + lockdep_assert_held(&pipe_av->mutex); + + list_for_each_entry(aq, &ip->queues, node) { + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + struct ipu_isys_buffer *ib, *ib_safe; + + spin_lock_irqsave(&aq->lock, flags); + list_for_each_entry_safe(ib, ib_safe, &aq->active, head) { + struct vb2_buffer *vb = + ipu_isys_buffer_to_vb2_buffer(ib); + + list_del(&ib->head); + if (av->streaming) { + dev_dbg(&av->isys->adev->dev, + "%s: queue buffer %u back to incoming\n", + av->vdev.name, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + vb->v4l2_buf.index); +#else + vb->index); +#endif + /* Queue already streaming, return to driver. */ + list_add(&ib->head, &aq->incoming); + continue; + } + /* Queue not yet streaming, return to user. */ + dev_dbg(&av->isys->adev->dev, + "%s: return %u back to videobuf2\n", + av->vdev.name, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + vb->v4l2_buf.index); +#else + vb->index); +#endif + vb2_buffer_done(ipu_isys_buffer_to_vb2_buffer(ib), + VB2_BUF_STATE_QUEUED); + } + spin_unlock_irqrestore(&aq->lock, flags); + } +} + +/* + * Attempt obtaining a buffer list from the incoming queues, a list of + * buffers that contains one entry from each video buffer queue. If + * all queues have no buffers, the buffers that were already dequeued + * are returned to their queues. + */ +static int buffer_list_get(struct ipu_isys_pipeline *ip, + struct ipu_isys_buffer_list *bl) +{ + struct ipu_isys_queue *aq; + struct ipu_isys_buffer *ib; + unsigned long flags; + int ret = 0; + + bl->nbufs = 0; + INIT_LIST_HEAD(&bl->head); + + list_for_each_entry(aq, &ip->queues, node) { + struct ipu_isys_buffer *ib; + + spin_lock_irqsave(&aq->lock, flags); + if (list_empty(&aq->incoming)) { + spin_unlock_irqrestore(&aq->lock, flags); + ret = -ENODATA; + goto error; + } + + ib = list_last_entry(&aq->incoming, + struct ipu_isys_buffer, head); + if (ib->req) { + spin_unlock_irqrestore(&aq->lock, flags); + ret = -ENODATA; + goto error; + } + + dev_dbg(&ip->isys->adev->dev, "buffer: %s: buffer %u\n", + ipu_isys_queue_to_video(aq)->vdev.name, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + ipu_isys_buffer_to_vb2_buffer(ib)->v4l2_buf.index +#else + ipu_isys_buffer_to_vb2_buffer(ib)->index +#endif + ); + list_del(&ib->head); + list_add(&ib->head, &bl->head); + spin_unlock_irqrestore(&aq->lock, flags); + + bl->nbufs++; + } + + list_for_each_entry(ib, &bl->head, head) { + struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib); + + aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + if (aq->prepare_frame_buff_set) + aq->prepare_frame_buff_set(vb); + } + + /* Get short packet buffer. */ + if (ip->interlaced && ip->isys->short_packet_source == + IPU_ISYS_SHORT_PACKET_FROM_RECEIVER) { + spin_lock_irqsave(&ip->short_packet_queue_lock, flags); + ib = ipu_isys_csi2_get_short_packet_buffer(ip); + if (!ib) { + spin_unlock_irqrestore(&ip->short_packet_queue_lock, + flags); + ret = -ENODATA; + dev_err(&ip->isys->adev->dev, + "No more short packet buffers. Driver bug?"); + WARN_ON(1); + goto error; + } + list_move(&ib->head, &bl->head); + spin_unlock_irqrestore(&ip->short_packet_queue_lock, flags); + bl->nbufs++; + } + + dev_dbg(&ip->isys->adev->dev, "get buffer list %p, %u buffers\n", bl, + bl->nbufs); + return ret; + +error: + if (!list_empty(&bl->head)) + ipu_isys_buffer_list_queue(bl, + IPU_ISYS_BUFFER_LIST_FL_INCOMING, 0); + return ret; +} + +void ipu_isys_buffer_list_to_ipu_fw_isys_frame_buff_set_pin( + struct vb2_buffer *vb, + struct ipu_fw_isys_frame_buff_set_abi *set) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + + set->output_pins[aq->fw_output].addr = + vb2_dma_contig_plane_dma_addr(vb, 0); + set->output_pins[aq->fw_output].out_buf_id = +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + vb->v4l2_buf.index + 1; +#else + vb->index + 1; +#endif +} + +/* + * Convert a buffer list to a isys fw ABI framebuffer set. The + * buffer list is not modified. + */ +void ipu_isys_buffer_list_to_ipu_fw_isys_frame_buff_set( + struct ipu_fw_isys_frame_buff_set_abi *set, + struct ipu_isys_pipeline *ip, + struct ipu_isys_buffer_list *bl) +{ + struct ipu_isys_buffer *ib; + + WARN_ON(!bl->nbufs); + + set->send_irq_sof = 1; + set->send_resp_sof = 1; + +#if defined(CONFIG_VIDEO_INTEL_IPU4) || defined(CONFIG_VIDEO_INTEL_IPU4P) + set->send_irq_capture_ack = 1; + set->send_irq_capture_done = 1; + set->send_irq_eof = 1; + set->send_resp_eof = 1; +#else + set->send_irq_eof = 0; + set->send_resp_eof = 0; +#endif + + list_for_each_entry(ib, &bl->head, head) { + if (ib->type == IPU_ISYS_VIDEO_BUFFER) { + struct vb2_buffer *vb = + ipu_isys_buffer_to_vb2_buffer(ib); + struct ipu_isys_queue *aq = + vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + + if (aq->fill_frame_buff_set_pin) + aq->fill_frame_buff_set_pin(vb, set); + } else if (ib->type == IPU_ISYS_SHORT_PACKET_BUFFER) { + struct ipu_isys_private_buffer *pb = + ipu_isys_buffer_to_private_buffer(ib); + struct ipu_fw_isys_output_pin_payload_abi *output_pin = + &set->output_pins[ip->short_packet_output_pin]; + + output_pin->addr = pb->dma_addr; + output_pin->out_buf_id = pb->index + 1; + } else { + WARN_ON(1); + } + } +} + +static void +ipu_isys_req_dispatch(struct media_device *mdev, + struct ipu_isys_request *ireq, + struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_frame_buff_set_abi *set, + dma_addr_t dma_addr); + +struct ipu_isys_request *ipu_isys_next_queued_request(struct ipu_isys_pipeline + *ip) +{ + struct ipu_isys *isys = + container_of(ip, struct ipu_isys_video, ip)->isys; + struct ipu_isys_request *ireq; + struct ipu_isys_buffer *ib; + unsigned long flags; + + lockdep_assert_held(&isys->stream_mutex); + + if (list_empty(&isys->requests)) { + dev_dbg(&isys->adev->dev, "%s: no requests found\n", __func__); + return NULL; + } + + list_for_each_entry_reverse(ireq, &isys->requests, head) { + /* Does the request belong to this pipeline? */ + bool is_ours = false; + bool is_others = false; + + dev_dbg(&isys->adev->dev, "%s: checking request %u\n", + __func__, ireq->req.id); + + spin_lock_irqsave(&ireq->lock, flags); + list_for_each_entry(ib, &ireq->buffers, req_head) { + struct vb2_buffer *vb = + ipu_isys_buffer_to_vb2_buffer(ib); + struct ipu_isys_queue *aq = + vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + + dev_dbg(&isys->adev->dev, "%s: buffer in vdev %s\n", + __func__, av->vdev.name); + + if (media_entity_enum_test(&ip->entity_enum, + &av->vdev.entity)) + is_ours = true; + else + is_others = true; + } + spin_unlock_irqrestore(&ireq->lock, flags); + + dev_dbg(&isys->adev->dev, "%s: is%s ours, is%s others'\n", + __func__, is_ours ? "" : "n't", is_others ? "" : "n't"); + + if (!is_ours || WARN_ON(is_others)) + continue; + + list_del_init(&ireq->head); + + return ireq; + } + + return NULL; +} + +/* Start streaming for real. The buffer list must be available. */ +static int ipu_isys_stream_start(struct ipu_isys_pipeline *ip, + struct ipu_isys_buffer_list *bl, bool error) +{ + struct ipu_isys_video *pipe_av = + container_of(ip, struct ipu_isys_video, ip); + struct media_device *mdev = &pipe_av->isys->media_dev; + struct ipu_isys_buffer_list __bl; + struct ipu_isys_request *ireq; + int rval; + + mutex_lock(&pipe_av->isys->stream_mutex); + + rval = ipu_isys_video_set_streaming(pipe_av, 1, bl); + if (rval) { + mutex_unlock(&pipe_av->isys->stream_mutex); + goto out_requeue; + } + + ip->streaming = 1; + + dev_dbg(&pipe_av->isys->adev->dev, "dispatching queued requests\n"); + + while ((ireq = ipu_isys_next_queued_request(ip))) { + struct ipu_fw_isys_frame_buff_set_abi *set; + struct isys_fw_msgs *msg; + + msg = ipu_get_fw_msg_buf(ip); + if (!msg) { + /* TODO: A PROPER CLEAN UP */ + mutex_unlock(&pipe_av->isys->stream_mutex); + return -ENOMEM; + } + + set = to_frame_msg_buf(msg); + + rval = ipu_isys_req_prepare(mdev, ireq, ip, set); + if (rval) { + mutex_unlock(&pipe_av->isys->stream_mutex); + goto out_requeue; + } + + ipu_fw_isys_dump_frame_buff_set(&pipe_av->isys->adev->dev, set, + ip->nr_output_pins); + ipu_isys_req_dispatch(mdev, ireq, ip, set, to_dma_addr(msg)); + } + + dev_dbg(&pipe_av->isys->adev->dev, + "done dispatching queued requests\n"); + + mutex_unlock(&pipe_av->isys->stream_mutex); + + bl = &__bl; + + do { + struct ipu_fw_isys_frame_buff_set_abi *buf = NULL; + struct isys_fw_msgs *msg; + + rval = buffer_list_get(ip, bl); + if (rval == -EINVAL) + goto out_requeue; + else if (rval < 0) + break; + + msg = ipu_get_fw_msg_buf(ip); + if (!msg) + /* TODO: PROPER CLEANUP */ + return -ENOMEM; + + buf = to_frame_msg_buf(msg); + + ipu_isys_buffer_list_to_ipu_fw_isys_frame_buff_set(buf, ip, bl); + + ipu_fw_isys_dump_frame_buff_set(&pipe_av->isys->adev->dev, buf, + ip->nr_output_pins); + + ipu_isys_buffer_list_queue(bl, + IPU_ISYS_BUFFER_LIST_FL_ACTIVE, 0); + + rval = ipu_fw_isys_complex_cmd(pipe_av->isys, + ip->stream_handle, + buf, to_dma_addr(msg), + sizeof(*buf), + IPU_FW_ISYS_SEND_TYPE_STREAM_CAPTURE); + ipu_put_fw_mgs_buffer(pipe_av->isys, (uintptr_t) buf); + } while (!WARN_ON(rval)); + + return 0; + +out_requeue: + if (bl && bl->nbufs) + ipu_isys_buffer_list_queue(bl, + IPU_ISYS_BUFFER_LIST_FL_INCOMING | + (error ? + IPU_ISYS_BUFFER_LIST_FL_SET_STATE : + 0), + error ? VB2_BUF_STATE_ERROR : + VB2_BUF_STATE_QUEUED); + flush_firmware_streamon_fail(ip); + + return rval; +} + +static void __buf_queue(struct vb2_buffer *vb, bool force) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + struct ipu_isys_buffer *ib = vb2_buffer_to_ipu_isys_buffer(vb); + struct ipu_isys_pipeline *ip = + to_ipu_isys_pipeline(av->vdev.entity.pipe); + struct ipu_isys_buffer_list bl; + + struct ipu_fw_isys_frame_buff_set_abi *buf = NULL; + struct isys_fw_msgs *msg; + + struct ipu_isys_video *pipe_av = + container_of(ip, struct ipu_isys_video, ip); + unsigned long flags; + unsigned int i; + int rval; + + dev_dbg(&av->isys->adev->dev, "buffer: %s: buf_queue %u\n", + av->vdev.name, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + vb->v4l2_buf.index +#else + vb->index +#endif + ); + + for (i = 0; i < vb->num_planes; i++) + dev_dbg(&av->isys->adev->dev, "iova: plane %u iova 0x%x\n", i, + (u32) vb2_dma_contig_plane_dma_addr(vb, i)); + + spin_lock_irqsave(&aq->lock, flags); + list_add(&ib->head, &aq->incoming); + spin_unlock_irqrestore(&aq->lock, flags); + + if (ib->req) + return; + + if (!pipe_av || !vb->vb2_queue->streaming) { + dev_dbg(&av->isys->adev->dev, + "not pipe_av set, adding to incoming\n"); + return; + } + + mutex_unlock(&av->mutex); + mutex_lock(&pipe_av->mutex); + + if (!force && ip->nr_streaming != ip->nr_queues) { + dev_dbg(&av->isys->adev->dev, + "not streaming yet, adding to incoming\n"); + goto out; + } + + /* + * We just put one buffer to the incoming list of this queue + * (above). Let's see whether all queues in the pipeline would + * have a buffer. + */ + rval = buffer_list_get(ip, &bl); + if (rval < 0) { + if (rval == -EINVAL) { + dev_err(&av->isys->adev->dev, + "error: should not happen\n"); + WARN_ON(1); + } else { + dev_dbg(&av->isys->adev->dev, + "not enough buffers available\n"); + } + goto out; + } + + msg = ipu_get_fw_msg_buf(ip); + if (!msg) { + rval = -ENOMEM; + goto out; + } + buf = to_frame_msg_buf(msg); + + ipu_isys_buffer_list_to_ipu_fw_isys_frame_buff_set(buf, ip, &bl); + + ipu_fw_isys_dump_frame_buff_set(&pipe_av->isys->adev->dev, buf, + ip->nr_output_pins); + + if (!ip->streaming) { + dev_dbg(&av->isys->adev->dev, + "Wow! Got a buffer to start streaming!\n"); + rval = ipu_isys_stream_start(ip, &bl, true); + if (rval) + dev_err(&av->isys->adev->dev, + "Ouch. Stream start failed.\n"); + goto out; + } + + /* + * We must queue the buffers in the buffer list to the + * appropriate video buffer queues BEFORE passing them to the + * firmware since we could get a buffer event back before we + * have queued them ourselves to the active queue. + */ + ipu_isys_buffer_list_queue(&bl, IPU_ISYS_BUFFER_LIST_FL_ACTIVE, 0); + + rval = ipu_fw_isys_complex_cmd(pipe_av->isys, + ip->stream_handle, + buf, to_dma_addr(msg), + sizeof(*buf), + IPU_FW_ISYS_SEND_TYPE_STREAM_CAPTURE); + ipu_put_fw_mgs_buffer(pipe_av->isys, (uintptr_t) buf); + /* + * FIXME: mark the buffers in the buffer list if the queue + * operation fails. + */ + if (!WARN_ON(rval < 0)) + dev_dbg(&av->isys->adev->dev, "queued buffer\n"); + +out: + mutex_unlock(&pipe_av->mutex); + mutex_lock(&av->mutex); +} + +static void buf_queue(struct vb2_buffer *vb) +{ + __buf_queue(vb, false); +} + +int ipu_isys_link_fmt_validate(struct ipu_isys_queue *aq) +{ + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + struct v4l2_subdev_format fmt = { 0 }; + struct media_pad *pad = media_entity_remote_pad(av->vdev.entity.pads); + struct v4l2_subdev *sd; + int rval; + + if (!pad) { + dev_dbg(&av->isys->adev->dev, + "video node %s pad not connected\n", av->vdev.name); + return -ENOTCONN; + } + + sd = media_entity_to_v4l2_subdev(pad->entity); + + fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; + fmt.pad = pad->index; + fmt.stream = 0; + rval = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt); + if (rval) + return rval; + + if (fmt.format.width != av->mpix.width || + fmt.format.height != av->mpix.height) { + dev_dbg(&av->isys->adev->dev, + "wrong width or height %ux%u (%ux%u expected)\n", + av->mpix.width, av->mpix.height, + fmt.format.width, fmt.format.height); + return -EINVAL; + } + + if (fmt.format.field != av->mpix.field) { + dev_dbg(&av->isys->adev->dev, + "wrong field value 0x%8.8x (0x%8.8x expected)\n", + av->mpix.field, fmt.format.field); + return -EINVAL; + } + + if (fmt.format.code != av->pfmt->code) { + dev_dbg(&av->isys->adev->dev, + "wrong media bus code 0x%8.8x (0x%8.8x expected)\n", + av->pfmt->code, fmt.format.code); + return -EINVAL; + } + + return 0; +} + +/* Return buffers back to videobuf2. */ +static void return_buffers(struct ipu_isys_queue *aq, + enum vb2_buffer_state state) +{ + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + int reset_needed = 0; + unsigned long flags; + + spin_lock_irqsave(&aq->lock, flags); + while (!list_empty(&aq->incoming)) { + struct ipu_isys_buffer *ib = list_first_entry(&aq->incoming, + struct + ipu_isys_buffer, + head); + struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib); + + list_del(&ib->head); + spin_unlock_irqrestore(&aq->lock, flags); + + vb2_buffer_done(vb, state); + + dev_dbg(&av->isys->adev->dev, + "%s: stop_streaming incoming %u\n", + ipu_isys_queue_to_video(vb2_queue_to_ipu_isys_queue + (vb->vb2_queue))->vdev.name, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + vb->v4l2_buf.index); +#else + vb->index); +#endif + + spin_lock_irqsave(&aq->lock, flags); + } + + /* + * Something went wrong (FW crash / HW hang / not all buffers + * returned from isys) if there are still buffers queued in active + * queue. We have to clean up places a bit. + */ + while (!list_empty(&aq->active)) { + struct ipu_isys_buffer *ib = list_first_entry(&aq->active, + struct + ipu_isys_buffer, + head); + struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib); + + list_del(&ib->head); + spin_unlock_irqrestore(&aq->lock, flags); + + vb2_buffer_done(vb, state); + + dev_warn(&av->isys->adev->dev, "%s: cleaning active queue %u\n", + ipu_isys_queue_to_video(vb2_queue_to_ipu_isys_queue + (vb->vb2_queue))->vdev.name, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + vb->v4l2_buf.index); +#else + vb->index); +#endif + + spin_lock_irqsave(&aq->lock, flags); + reset_needed = 1; + } + + spin_unlock_irqrestore(&aq->lock, flags); + + if (reset_needed) { + mutex_lock(&av->isys->mutex); + av->isys->reset_needed = true; + mutex_unlock(&av->isys->mutex); + } +} + +static int start_streaming(struct vb2_queue *q, unsigned int count) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + struct ipu_isys_video *pipe_av; + struct ipu_isys_pipeline *ip; + struct ipu_isys_buffer_list __bl, *bl = NULL; + bool first; + int rval; + + dev_dbg(&av->isys->adev->dev, + "stream: %s: width %u, height %u, css pixelformat %u\n", + av->vdev.name, av->mpix.width, av->mpix.height, + av->pfmt->css_pixelformat); + + mutex_lock(&av->isys->stream_mutex); + + first = !av->vdev.entity.pipe; + + if (first) { + rval = ipu_isys_video_prepare_streaming(av, 1); + if (rval) + goto out_return_buffers; + } + + mutex_unlock(&av->isys->stream_mutex); + + rval = aq->link_fmt_validate(aq); + if (rval) { + dev_dbg(&av->isys->adev->dev, + "%s: link format validation failed (%d)\n", + av->vdev.name, rval); + goto out_unprepare_streaming; + } + + ip = to_ipu_isys_pipeline(av->vdev.entity.pipe); + pipe_av = container_of(ip, struct ipu_isys_video, ip); + mutex_unlock(&av->mutex); + + mutex_lock(&pipe_av->mutex); + ip->nr_streaming++; + dev_dbg(&av->isys->adev->dev, "queue %u of %u\n", ip->nr_streaming, + ip->nr_queues); + list_add(&aq->node, &ip->queues); + if (ip->nr_streaming != ip->nr_queues) + goto out; + + if (list_empty(&av->isys->requests)) { + bl = &__bl; + rval = buffer_list_get(ip, bl); + if (rval == -EINVAL) { + goto out_stream_start; + } else if (rval < 0) { + dev_dbg(&av->isys->adev->dev, + "no request available --- postponing streamon\n"); + goto out; + } + } + + rval = ipu_isys_stream_start(ip, bl, false); + if (rval) + goto out_stream_start; + +out: + mutex_unlock(&pipe_av->mutex); + mutex_lock(&av->mutex); + + return 0; + +out_stream_start: + list_del(&aq->node); + ip->nr_streaming--; + mutex_unlock(&pipe_av->mutex); + mutex_lock(&av->mutex); + +out_unprepare_streaming: + mutex_lock(&av->isys->stream_mutex); + if (first) + ipu_isys_video_prepare_streaming(av, 0); + +out_return_buffers: + mutex_unlock(&av->isys->stream_mutex); + return_buffers(aq, VB2_BUF_STATE_QUEUED); + + return rval; +} + +static void stop_streaming(struct vb2_queue *q) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + struct ipu_isys_pipeline *ip = + to_ipu_isys_pipeline(av->vdev.entity.pipe); + struct ipu_isys_video *pipe_av = + container_of(ip, struct ipu_isys_video, ip); + + if (pipe_av != av) { + mutex_unlock(&av->mutex); + mutex_lock(&pipe_av->mutex); + } + + mutex_lock(&av->isys->stream_mutex); + if (ip->nr_streaming == ip->nr_queues && ip->streaming) + ipu_isys_video_set_streaming(av, 0, NULL); + if (ip->nr_streaming == 1) + ipu_isys_video_prepare_streaming(av, 0); + mutex_unlock(&av->isys->stream_mutex); + + ip->nr_streaming--; + list_del(&aq->node); + ip->streaming = 0; + + if (pipe_av != av) { + mutex_unlock(&pipe_av->mutex); + mutex_lock(&av->mutex); + } + + return_buffers(aq, VB2_BUF_STATE_ERROR); +} + +static unsigned int +get_sof_sequence_by_timestamp(struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_resp_info_abi *info) +{ + struct ipu_isys *isys = + container_of(ip, struct ipu_isys_video, ip)->isys; + u64 time = (u64) info->timestamp[1] << 32 | info->timestamp[0]; + unsigned int i; + + for (i = 0; i < IPU_ISYS_MAX_PARALLEL_SOF; i++) + if (time == ip->seq[i].timestamp) { + dev_dbg(&isys->adev->dev, + "sof: using sequence number %u for timestamp 0x%16.16llx\n", + ip->seq[i].sequence, time); + return ip->seq[i].sequence; + } + + dev_dbg(&isys->adev->dev, "SOF: looking for 0x%16.16llx\n", time); + for (i = 0; i < IPU_ISYS_MAX_PARALLEL_SOF; i++) + dev_dbg(&isys->adev->dev, + "SOF: sequence %u, timestamp value 0x%16.16llx\n", + ip->seq[i].sequence, ip->seq[i].timestamp); + dev_dbg(&isys->adev->dev, "SOF sequence number not found\n"); + + return 0; +} + +static u64 get_sof_ns_delta(struct ipu_isys_video *av, + struct ipu_fw_isys_resp_info_abi *info) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(&av->isys->adev->dev); + struct ipu_device *isp = adev->isp; + u64 delta, tsc_now; + + if (!ipu_buttress_tsc_read(isp, &tsc_now)) + delta = tsc_now - + ((u64) info->timestamp[1] << 32 | info->timestamp[0]); + else + delta = 0; + + return ipu_buttress_tsc_ticks_to_ns(delta); +} + +void +ipu_isys_buf_calc_sequence_time(struct ipu_isys_buffer *ib, + struct ipu_fw_isys_resp_info_abi *info) +{ + struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + struct timespec ts_now; +#endif + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + struct ipu_isys_pipeline *ip = + to_ipu_isys_pipeline(av->vdev.entity.pipe); + u64 ns; + u32 sequence; + + if (ip->has_sof) { + ns = (wall_clock_ts_on) ? ktime_get_real_ns() : ktime_get_ns(); + ns -= get_sof_ns_delta(av, info); + sequence = get_sof_sequence_by_timestamp(ip, info); + } else { + ns = ((wall_clock_ts_on) ? ktime_get_real_ns() : + ktime_get_ns()); + sequence = (atomic_inc_return(&ip->sequence) - 1) + / ip->nr_queues; + } + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + vb->v4l2_buf.sequence = sequence; + ts_now = ns_to_timespec(ns); + vb->v4l2_buf.timestamp.tv_sec = ts_now.tv_sec; + vb->v4l2_buf.timestamp.tv_usec = ts_now.tv_nsec / NSEC_PER_USEC; + + dev_dbg(&av->isys->adev->dev, "buffer: %s: buffer done %u\n", + av->vdev.name, vb->v4l2_buf.index); +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + vbuf->sequence = sequence; + ts_now = ns_to_timespec(ns); + vbuf->timestamp.tv_sec = ts_now.tv_sec; + vbuf->timestamp.tv_usec = ts_now.tv_nsec / NSEC_PER_USEC; + + dev_dbg(&av->isys->adev->dev, "%s: buffer done %u\n", av->vdev.name, + vb->index); +#else + vbuf->vb2_buf.timestamp = ns; + vbuf->sequence = sequence; + + dev_dbg(&av->isys->adev->dev, "buffer: %s: buffer done, CPU-timestamp:%lld, sequence:%d, vc:%d, index:%d, vbuf timestamp:%lld, endl\n", + av->vdev.name, ktime_get_ns(), sequence, ip->vc, vb->index, vbuf->vb2_buf.timestamp); +#endif +} + +void ipu_isys_queue_buf_done(struct ipu_isys_buffer *ib) +{ + struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib); + + if (atomic_read(&ib->str2mmio_flag)) { + vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); + /* + * Operation on buffer is ended with error and will be reported + * to the userspace when it is de-queued + */ + atomic_set(&ib->str2mmio_flag, 0); + } else { + vb2_buffer_done(vb, VB2_BUF_STATE_DONE); + } +} + +void ipu_isys_queue_buf_ready(struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_resp_info_abi *info) +{ + struct ipu_isys *isys = + container_of(ip, struct ipu_isys_video, ip)->isys; + struct ipu_isys_queue *aq = ip->output_pins[info->pin_id].aq; + struct ipu_isys_buffer *ib; + struct vb2_buffer *vb; + unsigned long flags; + bool first = true; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + struct v4l2_buffer *buf; +#else + struct vb2_v4l2_buffer *buf; +#endif + + dev_dbg(&isys->adev->dev, "buffer: %s: received buffer %8.8x\n", + ipu_isys_queue_to_video(aq)->vdev.name, info->pin.addr); + + spin_lock_irqsave(&aq->lock, flags); + if (list_empty(&aq->active)) { + spin_unlock_irqrestore(&aq->lock, flags); + dev_err(&isys->adev->dev, "active queue empty\n"); + return; + } + + list_for_each_entry_reverse(ib, &aq->active, head) { + dma_addr_t addr; + + vb = ipu_isys_buffer_to_vb2_buffer(ib); + addr = vb2_dma_contig_plane_dma_addr(vb, 0); + + if (info->pin.addr != addr) { + if (first) + dev_err(&isys->adev->dev, + "WARNING: buffer address %pad expected!\n", + &addr); + first = false; + continue; + } + + if (info->error_info.error == + IPU_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO) { + /* + * Check for error message: + * 'IPU_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO' + */ + atomic_set(&ib->str2mmio_flag, 1); + } + dev_dbg(&isys->adev->dev, "buffer: found buffer %pad\n", &addr); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + buf = &vb->v4l2_buf; +#else + buf = to_vb2_v4l2_buffer(vb); +#endif + buf->field = V4L2_FIELD_NONE; + + /* + * Use "reserved" field to pass csi2 index and vc. + * May need to change to other approach. + */ + buf->reserved &= 0xFFFFFF00; + if (ip->csi2) + buf->reserved |= ip->csi2->index << 4; + buf->reserved |= ip->vc; + + list_del(&ib->head); + spin_unlock_irqrestore(&aq->lock, flags); + + ipu_isys_buf_calc_sequence_time(ib, info); + + /* + * For interlaced buffers, the notification to user space + * is postponed to capture_done event since the field + * information is available only at that time. + */ + if (ip->interlaced) { + spin_lock_irqsave(&ip->short_packet_queue_lock, flags); + list_add(&ib->head, &ip->pending_interlaced_bufs); + spin_unlock_irqrestore(&ip->short_packet_queue_lock, + flags); + } else { + ipu_isys_queue_buf_done(ib); + } + + return; + } + + dev_err(&isys->adev->dev, + "WARNING: cannot find a matching video buffer!\n"); + + spin_unlock_irqrestore(&aq->lock, flags); +} + +void +ipu_isys_queue_short_packet_ready(struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_resp_info_abi *info) +{ + struct ipu_isys *isys = + container_of(ip, struct ipu_isys_video, ip)->isys; + unsigned long flags; + + dev_dbg(&isys->adev->dev, "receive short packet buffer %8.8x\n", + info->pin.addr); + spin_lock_irqsave(&ip->short_packet_queue_lock, flags); + ip->cur_field = ipu_isys_csi2_get_current_field(ip, info->timestamp); + spin_unlock_irqrestore(&ip->short_packet_queue_lock, flags); +} + +void ipu_isys_req_free(struct media_device *mdev, + struct media_device_request *req) +{ + struct ipu_isys_request *ireq = to_ipu_isys_request(req); + + kfree(ireq); +} + +struct +media_device_request *ipu_isys_req_alloc(struct media_device *mdev) +{ + struct ipu_isys_request *ireq; + + ireq = kzalloc(sizeof(*ireq), GFP_KERNEL); + if (!ireq) + return NULL; + + INIT_LIST_HEAD(&ireq->buffers); + spin_lock_init(&ireq->lock); + INIT_LIST_HEAD(&ireq->head); + + return &ireq->req; +} + +int ipu_isys_req_prepare(struct media_device *mdev, + struct ipu_isys_request *ireq, + struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_frame_buff_set_abi *set) +{ + struct ipu_isys *isys = + container_of(ip, struct ipu_isys_video, ip)->isys; + struct media_device_request *req = &ireq->req; + struct ipu_isys_buffer *ib; + unsigned long flags; + + dev_dbg(&isys->adev->dev, "preparing request %u\n", req->id); + + set->send_irq_sof = 1; + set->send_resp_sof = 1; + set->send_irq_eof = 1; + set->send_resp_eof = 1; +#if defined(CONFIG_VIDEO_INTEL_IPU4) || defined(CONFIG_VIDEO_INTEL_IPU4P) + set->send_irq_capture_ack = 1; + set->send_irq_capture_done = 1; +#endif + + spin_lock_irqsave(&ireq->lock, flags); + + list_for_each_entry(ib, &ireq->buffers, req_head) { + struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib); + struct ipu_isys_queue *aq = + vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + + if (aq->prepare_frame_buff_set) + aq->prepare_frame_buff_set(vb); + + if (aq->fill_frame_buff_set_pin) + aq->fill_frame_buff_set_pin(vb, set); + + spin_lock(&aq->lock); + list_move(&ib->head, &aq->active); + spin_unlock(&aq->lock); + } + + spin_unlock_irqrestore(&ireq->lock, flags); + + return 0; +} + +static void +ipu_isys_req_dispatch(struct media_device *mdev, + struct ipu_isys_request *ireq, + struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_frame_buff_set_abi *set, + dma_addr_t dma_addr) +{ + struct ipu_isys_video *pipe_av = + container_of(ip, struct ipu_isys_video, ip); + int rval; + + rval = ipu_fw_isys_complex_cmd(pipe_av->isys, + ip->stream_handle, + set, dma_addr, sizeof(*set), + IPU_FW_ISYS_SEND_TYPE_STREAM_CAPTURE); + ipu_put_fw_mgs_buffer(pipe_av->isys, (uintptr_t) set); + + WARN_ON(rval); +} + +int ipu_isys_req_queue(struct media_device *mdev, + struct media_device_request *req) +{ + struct ipu_isys *isys = container_of(mdev, struct ipu_isys, media_dev); + struct ipu_isys_request *ireq = to_ipu_isys_request(req); + struct ipu_isys_pipeline *ip; + struct ipu_isys_buffer *ib; + struct media_pipeline *pipe = NULL; + unsigned long flags; + bool no_pipe = false; + int rval = 0; + + spin_lock_irqsave(&ireq->lock, flags); + if (list_empty(&ireq->buffers)) { + rval = -ENODATA; + goto out_list_empty; + } + + /* Verify that all buffers are related to a single pipeline. */ + list_for_each_entry(ib, &ireq->buffers, req_head) { + struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib); + struct ipu_isys_queue *aq = + vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + + dev_dbg(&isys->adev->dev, "%s: device %s, id %u\n", __func__, + av->vdev.name, vb-> +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + v4l2_buf. +#endif + index); + if (!pipe) { + if (!av->vdev.entity.pipe) { + no_pipe = true; + continue; + } + + pipe = av->vdev.entity.pipe; + dev_dbg(&isys->adev->dev, "%s: pipe %p\n", + av->vdev.name, pipe); + continue; + } + + if (av->vdev.entity.pipe != pipe) { + dev_dbg(&isys->adev->dev, + "request %u includes buffers in multiple pipelines\n", + req->id); + rval = -EINVAL; + goto out_list_empty; + } + } + + spin_unlock_irqrestore(&ireq->lock, flags); + + mutex_lock(&isys->stream_mutex); + + ip = to_ipu_isys_pipeline(pipe); + + if (pipe && ip->streaming) { + struct isys_fw_msgs *msg; + struct ipu_fw_isys_frame_buff_set_abi *set; + + msg = ipu_get_fw_msg_buf(ip); + if (!msg) { + rval = -ENOMEM; + goto out_mutex_unlock; + } + + set = to_frame_msg_buf(msg); + + if (no_pipe) { + dev_dbg(&isys->adev->dev, + "request %u includes buffers in and outside pipelines\n", + req->id); + rval = -EINVAL; + goto out_mutex_unlock; + } + + dev_dbg(&isys->adev->dev, + "request has a pipeline, dispatching\n"); + rval = ipu_isys_req_prepare(mdev, ireq, ip, set); + if (rval) + goto out_mutex_unlock; + + ipu_fw_isys_dump_frame_buff_set(&isys->adev->dev, set, + ip->nr_output_pins); + ipu_isys_req_dispatch(mdev, ireq, ip, set, to_dma_addr(msg)); + } else { + dev_dbg(&isys->adev->dev, + "%s: adding request %u to the mdev queue\n", __func__, + req->id); + + list_add(&ireq->head, &isys->requests); + } + +out_mutex_unlock: + mutex_unlock(&isys->stream_mutex); + + return rval; + +out_list_empty: + spin_unlock_irqrestore(&ireq->lock, flags); + + return rval; +} + +struct vb2_ops ipu_isys_queue_ops = { + .queue_setup = queue_setup, + .wait_prepare = ipu_isys_queue_unlock, + .wait_finish = ipu_isys_queue_lock, + .buf_init = buf_init, + .buf_prepare = buf_prepare, + .buf_finish = buf_finish, + .buf_cleanup = buf_cleanup, + .start_streaming = start_streaming, + .stop_streaming = stop_streaming, + .buf_queue = buf_queue, +}; + +int ipu_isys_queue_init(struct ipu_isys_queue *aq) +{ + struct ipu_isys *isys = ipu_isys_queue_to_video(aq)->isys; + int rval; + + if (!aq->vbq.io_modes) + aq->vbq.io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF; + aq->vbq.drv_priv = aq; + aq->vbq.allow_requests = true; + aq->vbq.ops = &ipu_isys_queue_ops; + aq->vbq.mem_ops = &vb2_dma_contig_memops; + aq->vbq.timestamp_flags = (wall_clock_ts_on) ? + V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN : V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; + + rval = vb2_queue_init(&aq->vbq); + if (rval) + return rval; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + aq->ctx = vb2_dma_contig_init_ctx(&isys->adev->dev); + if (IS_ERR(aq->ctx)) { + vb2_queue_release(&aq->vbq); + return PTR_ERR(aq->ctx); + } +#else + aq->dev = &isys->adev->dev; + aq->vbq.dev = &isys->adev->dev; +#endif + spin_lock_init(&aq->lock); + INIT_LIST_HEAD(&aq->active); + INIT_LIST_HEAD(&aq->incoming); + + return 0; +} + +void ipu_isys_queue_cleanup(struct ipu_isys_queue *aq) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + if (IS_ERR_OR_NULL(aq->ctx)) + return; + + vb2_dma_contig_cleanup_ctx(aq->ctx); + aq->ctx = NULL; +#endif + vb2_queue_release(&aq->vbq); +} diff --git a/drivers/media/pci/intel/ipu-isys-queue.h b/drivers/media/pci/intel/ipu-isys-queue.h new file mode 100644 index 0000000000000..5138077ed35aa --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys-queue.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_ISYS_QUEUE_H +#define IPU_ISYS_QUEUE_H + +#include +#include + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) +#include +#else +#include +#endif + +#include "ipu-isys-media.h" + +struct ipu_isys_video; +struct ipu_isys_pipeline; +struct ipu_fw_isys_resp_info_abi; +struct ipu_fw_isys_frame_buff_set_abi; + +enum ipu_isys_buffer_type { + IPU_ISYS_VIDEO_BUFFER, + IPU_ISYS_SHORT_PACKET_BUFFER, +}; + +struct ipu_isys_queue { + struct list_head node; /* struct ipu_isys_pipeline.queues */ + struct vb2_queue vbq; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct vb2_alloc_ctx *ctx; +#else + struct device *dev; +#endif + /* + * @lock: serialise access to queued and pre_streamon_queued + */ + spinlock_t lock; + struct list_head active; + struct list_head incoming; + u32 css_pin_type; + unsigned int fw_output; + int (*buf_init)(struct vb2_buffer *vb); + void (*buf_cleanup)(struct vb2_buffer *vb); + int (*buf_prepare)(struct vb2_buffer *vb); + void (*prepare_frame_buff_set)(struct vb2_buffer *vb); + void (*fill_frame_buff_set_pin)(struct vb2_buffer *vb, + struct ipu_fw_isys_frame_buff_set_abi * + set); + int (*link_fmt_validate)(struct ipu_isys_queue *aq); +}; + +struct ipu_isys_buffer { + struct list_head head; + enum ipu_isys_buffer_type type; + struct list_head req_head; + struct media_device_request *req; + atomic_t str2mmio_flag; +}; + +struct ipu_isys_video_buffer { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + struct vb2_buffer vb; +#else + struct vb2_v4l2_buffer vb_v4l2; +#endif + struct ipu_isys_buffer ib; +}; + +struct ipu_isys_private_buffer { + struct ipu_isys_buffer ib; + struct ipu_isys_pipeline *ip; + unsigned int index; + unsigned int bytesused; + dma_addr_t dma_addr; + void *buffer; +}; + +#define IPU_ISYS_BUFFER_LIST_FL_INCOMING BIT(0) +#define IPU_ISYS_BUFFER_LIST_FL_ACTIVE BIT(1) +#define IPU_ISYS_BUFFER_LIST_FL_SET_STATE BIT(2) + +struct ipu_isys_buffer_list { + struct list_head head; + unsigned int nbufs; +}; + +#define vb2_queue_to_ipu_isys_queue(__vb2) \ + container_of(__vb2, struct ipu_isys_queue, vbq) + +#define ipu_isys_to_isys_video_buffer(__ib) \ + container_of(__ib, struct ipu_isys_video_buffer, ib) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) +#define vb2_buffer_to_ipu_isys_video_buffer(__vb) \ + container_of(__vb, struct ipu_isys_video_buffer, vb) + +#define ipu_isys_buffer_to_vb2_buffer(__ib) \ + (&ipu_isys_to_isys_video_buffer(__ib)->vb) +#else +#define vb2_buffer_to_ipu_isys_video_buffer(__vb) \ + container_of(to_vb2_v4l2_buffer(__vb), \ + struct ipu_isys_video_buffer, vb_v4l2) + +#define ipu_isys_buffer_to_vb2_buffer(__ib) \ + (&ipu_isys_to_isys_video_buffer(__ib)->vb_v4l2.vb2_buf) +#endif + +#define vb2_buffer_to_ipu_isys_buffer(__vb) \ + (&vb2_buffer_to_ipu_isys_video_buffer(__vb)->ib) + +#define ipu_isys_buffer_to_private_buffer(__ib) \ + container_of(__ib, struct ipu_isys_private_buffer, ib) + +struct ipu_isys_request { + struct media_device_request req; + /* serialise access to buffers */ + spinlock_t lock; + struct list_head buffers; /* struct ipu_isys_buffer.head */ + bool dispatched; + /* + * struct ipu_isys.requests; + * struct ipu_isys_pipeline.struct.* + */ + struct list_head head; +}; + +#define to_ipu_isys_request(__req) \ + container_of(__req, struct ipu_isys_request, req) + +void ipu_isys_queue_lock(struct vb2_queue *q); +void ipu_isys_queue_unlock(struct vb2_queue *q); + +int ipu_isys_buf_prepare(struct vb2_buffer *vb); + +void ipu_isys_buffer_list_queue(struct ipu_isys_buffer_list *bl, + unsigned long op_flags, + enum vb2_buffer_state state); +struct ipu_isys_request *ipu_isys_next_queued_request( + struct ipu_isys_pipeline *ip); +void ipu_isys_buffer_list_to_ipu_fw_isys_frame_buff_set_pin( + struct vb2_buffer *vb, + struct ipu_fw_isys_frame_buff_set_abi *set); +void ipu_isys_buffer_list_to_ipu_fw_isys_frame_buff_set( + struct ipu_fw_isys_frame_buff_set_abi *set, + struct ipu_isys_pipeline *ip, + struct ipu_isys_buffer_list *bl); +int ipu_isys_link_fmt_validate(struct ipu_isys_queue *aq); + +void +ipu_isys_buf_calc_sequence_time(struct ipu_isys_buffer *ib, + struct ipu_fw_isys_resp_info_abi *info); +void ipu_isys_queue_buf_done(struct ipu_isys_buffer *ib); +void ipu_isys_queue_buf_ready(struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_resp_info_abi *info); +void +ipu_isys_queue_short_packet_ready(struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_resp_info_abi *inf); + +void ipu_isys_req_free(struct media_device *mdev, + struct media_device_request *req); +struct media_device_request *ipu_isys_req_alloc(struct media_device *mdev); +int ipu_isys_req_prepare(struct media_device *mdev, + struct ipu_isys_request *ireq, + struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_frame_buff_set_abi *set); +int ipu_isys_req_queue(struct media_device *mdev, + struct media_device_request *req); + +int ipu_isys_queue_init(struct ipu_isys_queue *aq); +void ipu_isys_queue_cleanup(struct ipu_isys_queue *aq); + +#endif /* IPU_ISYS_QUEUE_H */ diff --git a/drivers/media/pci/intel/ipu-isys-subdev.c b/drivers/media/pci/intel/ipu-isys-subdev.c new file mode 100644 index 0000000000000..a710450143e8f --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys-subdev.c @@ -0,0 +1,1050 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include +#include + +#include + +#include + +#include "ipu-isys.h" +#include "ipu-isys-video.h" +#include "ipu-isys-subdev.h" + +unsigned int ipu_isys_mbus_code_to_bpp(u32 code) +{ + switch (code) { + case MEDIA_BUS_FMT_RGB888_1X24: + return 24; + case MEDIA_BUS_FMT_YUYV10_1X20: + return 20; + case MEDIA_BUS_FMT_Y10_1X10: + case MEDIA_BUS_FMT_RGB565_1X16: + case MEDIA_BUS_FMT_UYVY8_1X16: + case MEDIA_BUS_FMT_YUYV8_1X16: + return 16; + case MEDIA_BUS_FMT_SBGGR14_1X14: + case MEDIA_BUS_FMT_SGBRG14_1X14: + case MEDIA_BUS_FMT_SGRBG14_1X14: + case MEDIA_BUS_FMT_SRGGB14_1X14: + return 14; + case MEDIA_BUS_FMT_SBGGR12_1X12: + case MEDIA_BUS_FMT_SGBRG12_1X12: + case MEDIA_BUS_FMT_SGRBG12_1X12: + case MEDIA_BUS_FMT_SRGGB12_1X12: + return 12; + case MEDIA_BUS_FMT_SBGGR10_1X10: + case MEDIA_BUS_FMT_SGBRG10_1X10: + case MEDIA_BUS_FMT_SGRBG10_1X10: + case MEDIA_BUS_FMT_SRGGB10_1X10: + return 10; + case MEDIA_BUS_FMT_SBGGR8_1X8: + case MEDIA_BUS_FMT_SGBRG8_1X8: + case MEDIA_BUS_FMT_SGRBG8_1X8: + case MEDIA_BUS_FMT_SRGGB8_1X8: + case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8: + case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8: + case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8: + case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8: + return 8; + default: + WARN_ON(1); + return -EINVAL; + } +} + +unsigned int ipu_isys_mbus_code_to_mipi(u32 code) +{ + switch (code) { + case MEDIA_BUS_FMT_RGB565_1X16: + return IPU_ISYS_MIPI_CSI2_TYPE_RGB565; + case MEDIA_BUS_FMT_RGB888_1X24: + return IPU_ISYS_MIPI_CSI2_TYPE_RGB888; + case MEDIA_BUS_FMT_YUYV10_1X20: + return IPU_ISYS_MIPI_CSI2_TYPE_YUV422_10; + case MEDIA_BUS_FMT_UYVY8_1X16: + case MEDIA_BUS_FMT_YUYV8_1X16: + return IPU_ISYS_MIPI_CSI2_TYPE_YUV422_8; + case MEDIA_BUS_FMT_SBGGR14_1X14: + case MEDIA_BUS_FMT_SGBRG14_1X14: + case MEDIA_BUS_FMT_SGRBG14_1X14: + case MEDIA_BUS_FMT_SRGGB14_1X14: + return IPU_ISYS_MIPI_CSI2_TYPE_RAW14; + case MEDIA_BUS_FMT_SBGGR12_1X12: + case MEDIA_BUS_FMT_SGBRG12_1X12: + case MEDIA_BUS_FMT_SGRBG12_1X12: + case MEDIA_BUS_FMT_SRGGB12_1X12: + return IPU_ISYS_MIPI_CSI2_TYPE_RAW12; + case MEDIA_BUS_FMT_Y10_1X10: + case MEDIA_BUS_FMT_SBGGR10_1X10: + case MEDIA_BUS_FMT_SGBRG10_1X10: + case MEDIA_BUS_FMT_SGRBG10_1X10: + case MEDIA_BUS_FMT_SRGGB10_1X10: + return IPU_ISYS_MIPI_CSI2_TYPE_RAW10; + case MEDIA_BUS_FMT_SBGGR8_1X8: + case MEDIA_BUS_FMT_SGBRG8_1X8: + case MEDIA_BUS_FMT_SGRBG8_1X8: + case MEDIA_BUS_FMT_SRGGB8_1X8: + return IPU_ISYS_MIPI_CSI2_TYPE_RAW8; + case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8: + case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8: + case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8: + case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8: + return IPU_ISYS_MIPI_CSI2_TYPE_USER_DEF(1); + default: + WARN_ON(1); + return -EINVAL; + } +} + +enum ipu_isys_subdev_pixelorder ipu_isys_subdev_get_pixelorder(u32 code) +{ + switch (code) { + case MEDIA_BUS_FMT_SBGGR14_1X14: + case MEDIA_BUS_FMT_SBGGR12_1X12: + case MEDIA_BUS_FMT_SBGGR10_1X10: + case MEDIA_BUS_FMT_SBGGR8_1X8: + case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8: + return IPU_ISYS_SUBDEV_PIXELORDER_BGGR; + case MEDIA_BUS_FMT_SGBRG14_1X14: + case MEDIA_BUS_FMT_SGBRG12_1X12: + case MEDIA_BUS_FMT_SGBRG10_1X10: + case MEDIA_BUS_FMT_SGBRG8_1X8: + case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8: + return IPU_ISYS_SUBDEV_PIXELORDER_GBRG; + case MEDIA_BUS_FMT_SGRBG14_1X14: + case MEDIA_BUS_FMT_SGRBG12_1X12: + case MEDIA_BUS_FMT_SGRBG10_1X10: + case MEDIA_BUS_FMT_SGRBG8_1X8: + case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8: + return IPU_ISYS_SUBDEV_PIXELORDER_GRBG; + case MEDIA_BUS_FMT_SRGGB14_1X14: + case MEDIA_BUS_FMT_SRGGB12_1X12: + case MEDIA_BUS_FMT_SRGGB10_1X10: + case MEDIA_BUS_FMT_SRGGB8_1X8: + case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8: + return IPU_ISYS_SUBDEV_PIXELORDER_RGGB; + default: + WARN_ON(1); + return -EINVAL; + } +} + +u32 ipu_isys_subdev_code_to_uncompressed(u32 sink_code) +{ + switch (sink_code) { + case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8: + return MEDIA_BUS_FMT_SBGGR10_1X10; + case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8: + return MEDIA_BUS_FMT_SGBRG10_1X10; + case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8: + return MEDIA_BUS_FMT_SGRBG10_1X10; + case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8: + return MEDIA_BUS_FMT_SRGGB10_1X10; + default: + return sink_code; + } +} + +struct v4l2_mbus_framefmt *__ipu_isys_get_ffmt(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config + *cfg, +#endif + unsigned int pad, + unsigned int stream, + unsigned int which) +{ + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + + if (which == V4L2_SUBDEV_FORMAT_ACTIVE) + return &asd->ffmt[pad][stream]; + else + return v4l2_subdev_get_try_format( +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) + sd, +#endif + cfg, pad); +} + +struct v4l2_rect *__ipu_isys_get_selection(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + unsigned int target, + unsigned int pad, unsigned int which) +{ + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + + if (which == V4L2_SUBDEV_FORMAT_ACTIVE) { + switch (target) { + case V4L2_SEL_TGT_CROP: + return &asd->crop[pad]; + case V4L2_SEL_TGT_COMPOSE: + return &asd->compose[pad]; + } + } else { + switch (target) { + case V4L2_SEL_TGT_CROP: + return v4l2_subdev_get_try_crop( +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) + sd, +#endif + cfg, pad); + case V4L2_SEL_TGT_COMPOSE: + return v4l2_subdev_get_try_compose( +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) + sd, +#endif + cfg, pad); + } + } + WARN_ON(1); + return NULL; +} + +static int target_valid(struct v4l2_subdev *sd, unsigned int target, + unsigned int pad) +{ + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + + switch (target) { + case V4L2_SEL_TGT_CROP: + return asd->valid_tgts[pad].crop; + case V4L2_SEL_TGT_COMPOSE: + return asd->valid_tgts[pad].compose; + default: + return 0; + } +} + +int ipu_isys_subdev_fmt_propagate(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_mbus_framefmt *ffmt, + struct v4l2_rect *r, + enum isys_subdev_prop_tgt tgt, + unsigned int pad, unsigned int which) +{ + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + struct v4l2_mbus_framefmt **ffmts = NULL; + struct v4l2_rect **crops = NULL; + struct v4l2_rect **compose = NULL; + unsigned int i; + int rval = 0; + + if (tgt == IPU_ISYS_SUBDEV_PROP_TGT_NR_OF) + return 0; + + if (WARN_ON(pad >= sd->entity.num_pads)) + return -EINVAL; + + ffmts = kcalloc(sd->entity.num_pads, + sizeof(*ffmts), GFP_KERNEL); + if (!ffmts) { + rval = -ENOMEM; + goto out_subdev_fmt_propagate; + } + crops = kcalloc(sd->entity.num_pads, + sizeof(*crops), GFP_KERNEL); + if (!crops) { + rval = -ENOMEM; + goto out_subdev_fmt_propagate; + } + compose = kcalloc(sd->entity.num_pads, + sizeof(*compose), GFP_KERNEL); + if (!compose) { + rval = -ENOMEM; + goto out_subdev_fmt_propagate; + } + + for (i = 0; i < sd->entity.num_pads; i++) { + ffmts[i] = __ipu_isys_get_ffmt(sd, cfg, i, 0, which); + crops[i] = __ipu_isys_get_selection( + sd, cfg, V4L2_SEL_TGT_CROP, i, which); + compose[i] = __ipu_isys_get_selection( + sd, cfg, V4L2_SEL_TGT_COMPOSE, i, which); + } + + switch (tgt) { + case IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT: + crops[pad]->left = 0; + crops[pad]->top = 0; + crops[pad]->width = ffmt->width; + crops[pad]->height = ffmt->height; + rval = ipu_isys_subdev_fmt_propagate(sd, cfg, ffmt, crops[pad], + tgt + 1, pad, which); + goto out_subdev_fmt_propagate; + case IPU_ISYS_SUBDEV_PROP_TGT_SINK_CROP: + if (WARN_ON(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) + goto out_subdev_fmt_propagate; + + compose[pad]->left = 0; + compose[pad]->top = 0; + compose[pad]->width = r->width; + compose[pad]->height = r->height; + rval = ipu_isys_subdev_fmt_propagate(sd, cfg, ffmt, + compose[pad], tgt + 1, + pad, which); + goto out_subdev_fmt_propagate; + case IPU_ISYS_SUBDEV_PROP_TGT_SINK_COMPOSE: + if (WARN_ON(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) { + rval = -EINVAL; + goto out_subdev_fmt_propagate; + } + + /* 1:n and 1:1 case: only propagate to the first source pad */ + if (asd->nsinks == 1 && asd->nsources >= 1) { + compose[asd->nsinks]->left = + compose[asd->nsinks]->top = 0; + compose[asd->nsinks]->width = r->width; + compose[asd->nsinks]->height = r->height; + rval = ipu_isys_subdev_fmt_propagate(sd, cfg, ffmt, + compose[asd->nsinks], + tgt + 1, asd->nsinks, + which); + if (rval) + goto out_subdev_fmt_propagate; + /* n:n case: propagate according to route info */ + } else if (asd->nsinks == asd->nsources && asd->nsources > 1) { + for (i = asd->nsinks; i < sd->entity.num_pads; i++) + if (media_entity_has_route(&sd->entity, pad, i)) + break; + + if (i != sd->entity.num_pads) { + compose[i]->left = 0; + compose[i]->top = 0; + compose[i]->width = r->width; + compose[i]->height = r->height; + rval = ipu_isys_subdev_fmt_propagate(sd, cfg, ffmt, + compose[i], + tgt + 1, i, + which); + if (rval) + goto out_subdev_fmt_propagate; + } + /* n:m case: propagate to all source pad */ + } else if (asd->nsinks != asd->nsources && asd->nsources > 1 && + asd->nsources > 1) { + for (i = 1; i < sd->entity.num_pads; i++) { + if (!(sd->entity.pads[i].flags & + MEDIA_PAD_FL_SOURCE)) + continue; + + compose[i]->left = 0; + compose[i]->top = 0; + compose[i]->width = r->width; + compose[i]->height = r->height; + rval = ipu_isys_subdev_fmt_propagate(sd, cfg, + ffmt, + compose[i], + tgt + 1, i, + which); + if (rval) + goto out_subdev_fmt_propagate; + } + } + goto out_subdev_fmt_propagate; + case IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_COMPOSE: + if (WARN_ON(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SINK)) { + rval = -EINVAL; + goto out_subdev_fmt_propagate; + } + + crops[pad]->left = 0; + crops[pad]->top = 0; + crops[pad]->width = r->width; + crops[pad]->height = r->height; + rval = ipu_isys_subdev_fmt_propagate(sd, cfg, ffmt, + crops[pad], tgt + 1, pad, which); + goto out_subdev_fmt_propagate; + case IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP:{ + struct v4l2_subdev_format fmt = { + .which = which, + .pad = pad, + .format = { + .width = r->width, + .height = r->height, + /* + * Either use the code from sink pad + * or the current one. + */ + .code = + ffmt ? ffmt->code : ffmts[pad]->code, + .field = + ffmt ? ffmt->field : ffmts[pad]-> + field, + }, + }; + + asd->set_ffmt(sd, cfg, &fmt); + goto out_subdev_fmt_propagate; + } + } + +out_subdev_fmt_propagate: + kfree(ffmts); + kfree(crops); + kfree(compose); + return rval; +} + +int ipu_isys_subdev_set_ffmt_default(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_format *fmt) +{ + struct v4l2_mbus_framefmt *ffmt = + __ipu_isys_get_ffmt(sd, cfg, fmt->pad, fmt->stream, + fmt->which); + + /* No propagation for non-zero pads. */ + if (fmt->pad) { + struct v4l2_mbus_framefmt *sink_ffmt = + __ipu_isys_get_ffmt(sd, cfg, 0, fmt->stream, + fmt->which); + + ffmt->width = sink_ffmt->width; + ffmt->height = sink_ffmt->height; + ffmt->code = sink_ffmt->code; + ffmt->field = sink_ffmt->field; + } + + ffmt->width = fmt->format.width; + ffmt->height = fmt->format.height; + ffmt->code = fmt->format.code; + ffmt->field = fmt->format.field; + + return ipu_isys_subdev_fmt_propagate(sd, cfg, &fmt->format, NULL, + IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT, + fmt->pad, fmt->which); +} + +int __ipu_isys_subdev_set_ffmt(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_format *fmt) +{ + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + struct v4l2_mbus_framefmt *ffmt = + __ipu_isys_get_ffmt(sd, cfg, fmt->pad, fmt->stream, + fmt->which); + u32 code = asd->supported_codes[fmt->pad][0]; + unsigned int i; + + WARN_ON(!mutex_is_locked(&asd->mutex)); + + fmt->format.width = clamp(fmt->format.width, IPU_ISYS_MIN_WIDTH, + IPU_ISYS_MAX_WIDTH); + fmt->format.height = clamp(fmt->format.height, + IPU_ISYS_MIN_HEIGHT, IPU_ISYS_MAX_HEIGHT); + + for (i = 0; asd->supported_codes[fmt->pad][i]; i++) { + if (asd->supported_codes[fmt->pad][i] == fmt->format.code) { + code = asd->supported_codes[fmt->pad][i]; + break; + } + } + + fmt->format.code = code; + + asd->set_ffmt(sd, cfg, fmt); + + fmt->format = *ffmt; + + return 0; +} + +int ipu_isys_subdev_set_ffmt(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_format *fmt) +{ + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + int rval; + + if (fmt->stream >= asd->nstreams) + return -EINVAL; + + mutex_lock(&asd->mutex); + rval = __ipu_isys_subdev_set_ffmt(sd, cfg, fmt); + mutex_unlock(&asd->mutex); + + return rval; +} + +int ipu_isys_subdev_get_ffmt(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_format *fmt) +{ + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + + if (fmt->stream >= asd->nstreams) + return -EINVAL; + + mutex_lock(&asd->mutex); + fmt->format = *__ipu_isys_get_ffmt(sd, cfg, fmt->pad, + fmt->stream, + fmt->which); + mutex_unlock(&asd->mutex); + + return 0; +} + +int ipu_isys_subdev_get_frame_desc(struct v4l2_subdev *sd, + struct v4l2_mbus_frame_desc *desc) +{ + int i, rval = 0; + + for (i = 0; i < sd->entity.num_pads; i++) { + if (!(sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)) + continue; + + rval = v4l2_subdev_call(sd, pad, get_frame_desc, i, desc); + if (!rval) + return rval; + } + + if (i == sd->entity.num_pads) + rval = -EINVAL; + + return rval; +} + +bool ipu_isys_subdev_has_route(struct media_entity *entity, + unsigned int pad0, unsigned int pad1, int *stream) +{ + struct ipu_isys_subdev *asd; + int i; + + if (!entity) { + WARN_ON(1); + return false; + } + asd = to_ipu_isys_subdev(media_entity_to_v4l2_subdev(entity)); + + /* Two sinks are never connected together. */ + if (pad0 < asd->nsinks && pad1 < asd->nsinks) + return false; + + for (i = 0; i < asd->nstreams; i++) { + if ((asd->route[i].flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE) && + ((asd->route[i].sink == pad0 && + asd->route[i].source == pad1) || + (asd->route[i].sink == pad1 && + asd->route[i].source == pad0))) { + if (stream) + *stream = i; + return true; + } + } + + return false; +} + +int ipu_isys_subdev_set_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_routing *route) +{ + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + int i, j, ret = 0; + + WARN_ON(!mutex_is_locked(&sd->entity. +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + parent +#else + graph_obj.mdev +#endif + ->graph_mutex)); + + for (i = 0; i < min(route->num_routes, asd->nstreams); ++i) { + struct v4l2_subdev_route *t = &route->routes[i]; + + if (t->sink_stream > asd->nstreams - 1 || + t->source_stream > asd->nstreams - 1) + continue; + + for (j = 0; j < asd->nstreams; j++) { + if (t->sink_pad == asd->route[j].sink && + t->source_pad == asd->route[j].source) + break; + } + + if (j == asd->nstreams) + continue; + + if (asd->route[j].flags & V4L2_SUBDEV_ROUTE_FL_IMMUTABLE) + continue; + + if ((t->flags & V4L2_SUBDEV_ROUTE_FL_SOURCE) && asd->nsinks) + continue; + + if (!(t->flags & V4L2_SUBDEV_ROUTE_FL_SOURCE)) { + int source_pad = 0; + + if (sd->entity.pads[t->sink_pad].flags & + MEDIA_PAD_FL_MULTIPLEX) + source_pad = t->source_pad - asd->nsinks; + + asd->stream[t->sink_pad].stream_id[source_pad] = + t->sink_stream; + } + + if (sd->entity.pads[t->source_pad].flags & + MEDIA_PAD_FL_MULTIPLEX) + asd->stream[t->source_pad].stream_id[t->sink_pad] = + t->source_stream; + else + asd->stream[t->source_pad].stream_id[0] = + t->source_stream; + + if (t->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE) { + bitmap_set(asd->stream[t->source_pad].streams_stat, + t->source_stream, 1); + if (!(t->flags & V4L2_SUBDEV_ROUTE_FL_SOURCE)) + bitmap_set(asd->stream[t->sink_pad] + .streams_stat, t->sink_stream, 1); + asd->route[j].flags |= V4L2_SUBDEV_ROUTE_FL_ACTIVE; + } else if (!(t->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) { + bitmap_clear(asd->stream[t->source_pad].streams_stat, + t->source_stream, 1); + if (!(t->flags & V4L2_SUBDEV_ROUTE_FL_SOURCE)) + bitmap_clear(asd->stream[t->sink_pad] + .streams_stat, t->sink_stream, 1); + asd->route[j].flags &= (~V4L2_SUBDEV_ROUTE_FL_ACTIVE); + } + } + + return ret; +} + +int ipu_isys_subdev_get_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_routing *route) +{ + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + int i, j; + + for (i = 0, j = 0; i < min(asd->nstreams, route->num_routes); ++i) { + route->routes[j].sink_pad = asd->route[i].sink; + if (sd->entity.pads[asd->route[i].sink].flags & + MEDIA_PAD_FL_MULTIPLEX) { + int source_pad = asd->route[i].source - asd->nsinks; + + route->routes[j].sink_stream = + asd->stream[asd->route[i].sink]. + stream_id[source_pad]; + } else { + route->routes[j].sink_stream = + asd->stream[asd->route[i].sink].stream_id[0]; + } + + route->routes[j].source_pad = asd->route[i].source; + if (sd->entity.pads[asd->route[i].source].flags & + MEDIA_PAD_FL_MULTIPLEX) { + route->routes[j].source_stream = + asd->stream[asd->route[i].source].stream_id[asd-> + route + [i]. + sink]; + } else { + route->routes[j].source_stream = + asd->stream[asd->route[i].source].stream_id[0]; + } + route->routes[j++].flags = asd->route[i].flags; + } + + route->num_routes = j; + + return 0; +} + +int ipu_isys_subdev_set_sel(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_selection *sel) +{ + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + struct media_pad *pad = &asd->sd.entity.pads[sel->pad]; + struct v4l2_rect *r, __r = { 0 }; + unsigned int tgt; + + if (!target_valid(sd, sel->target, sel->pad)) + return -EINVAL; + + switch (sel->target) { + case V4L2_SEL_TGT_CROP: + if (pad->flags & MEDIA_PAD_FL_SINK) { + struct v4l2_mbus_framefmt *ffmt = + __ipu_isys_get_ffmt(sd, cfg, sel->pad, 0, + sel->which); + + __r.width = ffmt->width; + __r.height = ffmt->height; + r = &__r; + tgt = IPU_ISYS_SUBDEV_PROP_TGT_SINK_CROP; + } else { + /* 0 is the sink pad. */ + r = __ipu_isys_get_selection(sd, cfg, sel->target, 0, + sel->which); + tgt = IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP; + } + + break; + case V4L2_SEL_TGT_COMPOSE: + if (pad->flags & MEDIA_PAD_FL_SINK) { + r = __ipu_isys_get_selection(sd, cfg, V4L2_SEL_TGT_CROP, + sel->pad, sel->which); + tgt = IPU_ISYS_SUBDEV_PROP_TGT_SINK_COMPOSE; + } else { + r = __ipu_isys_get_selection(sd, cfg, + V4L2_SEL_TGT_COMPOSE, 0, + sel->which); + tgt = IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_COMPOSE; + } + break; + default: + return -EINVAL; + } + + sel->r.width = clamp(sel->r.width, IPU_ISYS_MIN_WIDTH, r->width); + sel->r.height = clamp(sel->r.height, IPU_ISYS_MIN_HEIGHT, r->height); + *__ipu_isys_get_selection(sd, cfg, sel->target, sel->pad, + sel->which) = sel->r; + return ipu_isys_subdev_fmt_propagate(sd, cfg, NULL, &sel->r, tgt, + sel->pad, sel->which); +} + +int ipu_isys_subdev_get_sel(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_selection *sel) +{ + if (!target_valid(sd, sel->target, sel->pad)) + return -EINVAL; + + sel->r = *__ipu_isys_get_selection(sd, cfg, sel->target, + sel->pad, sel->which); + + return 0; +} + +int ipu_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_mbus_code_enum *code) +{ + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + const u32 *supported_codes = asd->supported_codes[code->pad]; + u32 index; + bool next_stream = false; + + if (sd->entity.pads[code->pad].flags & MEDIA_PAD_FL_MULTIPLEX) { + if (code->stream & V4L2_SUBDEV_FLAG_NEXT_STREAM) { + next_stream = true; + code->stream &= ~V4L2_SUBDEV_FLAG_NEXT_STREAM; + } + + if (code->stream > asd->nstreams - 1) + return -EINVAL; + + if (next_stream && code->stream < asd->nstreams) { + code->stream++; + return 0; + } + + return -EINVAL; + } + + for (index = 0; supported_codes[index]; index++) { + if (index == code->index) { + code->code = supported_codes[index]; + return 0; + } + } + + return -EINVAL; +} + +#if !defined(CONFIG_VIDEO_INTEL_IPU4) && !defined(CONFIG_VIDEO_INTEL_IPU4P) +/* + * IPU private link validation + * In advanced IPU and special case, there will be format change between + * sink/source pads in ISYS. + * Format code checking is not necessary for these features. + */ +static int ipu_isys_subdev_link_validate_private( + struct v4l2_subdev *sd, + struct media_link *link, + struct v4l2_subdev_format *source_fmt, + struct v4l2_subdev_format *sink_fmt) +{ + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + + /* The width and height must match. */ + if (source_fmt->format.width != sink_fmt->format.width + || source_fmt->format.height != sink_fmt->format.height) + return -EPIPE; + + /* + * The field order must match, or the sink field order must be NONE + * to support interlaced hardware connected to bridges that support + * progressive formats only. + */ + if (source_fmt->format.field != sink_fmt->format.field && + sink_fmt->format.field != V4L2_FIELD_NONE) + return -EPIPE; + + if (source_fmt->stream != sink_fmt->stream) + return -EINVAL; + + /* + * For new IPU special case, YUV format changing in BE-SOC, + * from YUV422 to I420, which is used to adapt multiple + * YUV sensors and provide I420 to BB for partial processing. + * If this entity doing format convert, ignore format check + */ + if (source_fmt->format.code != sink_fmt->format.code) { + if (source_fmt->format.code == MEDIA_BUS_FMT_UYVY8_2X8 && + (sink_fmt->format.code == MEDIA_BUS_FMT_YUYV8_1X16 || + sink_fmt->format.code == MEDIA_BUS_FMT_UYVY8_1X16)) + dev_warn(&asd->isys->adev->dev, + "YUV format change, ignore code check\n"); + else + return -EINVAL; + } + + return 0; +} +#endif + +/* + * Besides validating the link, figure out the external pad and the + * ISYS FW ABI source. + */ +int ipu_isys_subdev_link_validate(struct v4l2_subdev *sd, + struct media_link *link, + struct v4l2_subdev_format *source_fmt, + struct v4l2_subdev_format *sink_fmt) +{ + struct v4l2_subdev *source_sd = + media_entity_to_v4l2_subdev(link->source->entity); + struct ipu_isys_pipeline *ip = container_of(sd->entity.pipe, + struct ipu_isys_pipeline, + pipe); + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + + if (!source_sd) + return -ENODEV; + if (strncmp(source_sd->name, IPU_ISYS_ENTITY_PREFIX, + strlen(IPU_ISYS_ENTITY_PREFIX)) != 0) { + /* + * source_sd isn't ours --- sd must be the external + * sub-device. + */ + ip->external = link->source; + ip->source = to_ipu_isys_subdev(sd)->source; + dev_dbg(&asd->isys->adev->dev, "%s: using source %d\n", + sd->entity.name, ip->source); + } else if (source_sd->entity.num_pads == 1) { + /* All internal sources have a single pad. */ + ip->external = link->source; + ip->source = to_ipu_isys_subdev(source_sd)->source; + + dev_dbg(&asd->isys->adev->dev, "%s: using source %d\n", + sd->entity.name, ip->source); + } + + if (asd->isl_mode != IPU_ISL_OFF) + ip->isl_mode = asd->isl_mode; + +#if !defined(CONFIG_VIDEO_INTEL_IPU4) && !defined(CONFIG_VIDEO_INTEL_IPU4P) + return ipu_isys_subdev_link_validate_private(sd, link, source_fmt, + sink_fmt); +#else + return v4l2_subdev_link_validate_default(sd, link, source_fmt, + sink_fmt); +#endif +} + +int ipu_isys_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + unsigned int i; + + mutex_lock(&asd->mutex); + + for (i = 0; i < asd->sd.entity.num_pads; i++) { + struct v4l2_mbus_framefmt *try_fmt = + v4l2_subdev_get_try_format( +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) + sd, fh->pad, +#else + fh, +#endif + i); + struct v4l2_rect *try_crop = + v4l2_subdev_get_try_crop( +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) + sd, + fh->pad, +#else + fh, +#endif + i); + struct v4l2_rect *try_compose = + v4l2_subdev_get_try_compose( +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) + sd, + fh->pad, +#else + fh, +#endif + i); + + *try_fmt = asd->ffmt[i][0]; + *try_crop = asd->crop[i]; + *try_compose = asd->compose[i]; + } + + mutex_unlock(&asd->mutex); + + return 0; +} + +int ipu_isys_subdev_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + return 0; +} + +int ipu_isys_subdev_init(struct ipu_isys_subdev *asd, + struct v4l2_subdev_ops *ops, + unsigned int nr_ctrls, + unsigned int num_pads, + unsigned int num_streams, + unsigned int num_source, + unsigned int num_sink, + unsigned int sd_flags) +{ + int i; + int rval = -EINVAL; + + mutex_init(&asd->mutex); + + v4l2_subdev_init(&asd->sd, ops); + + asd->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | sd_flags; + asd->sd.owner = THIS_MODULE; + + asd->nstreams = num_streams; + asd->nsources = num_source; + asd->nsinks = num_sink; + + asd->pad = devm_kcalloc(&asd->isys->adev->dev, num_pads, + sizeof(*asd->pad), GFP_KERNEL); + + asd->ffmt = (struct v4l2_mbus_framefmt **) + devm_kcalloc(&asd->isys->adev->dev, num_pads, + sizeof(struct v4l2_mbus_framefmt *), + GFP_KERNEL); + + asd->crop = devm_kcalloc(&asd->isys->adev->dev, num_pads, + sizeof(*asd->crop), GFP_KERNEL); + + asd->compose = devm_kcalloc(&asd->isys->adev->dev, num_pads, + sizeof(*asd->compose), GFP_KERNEL); + + asd->valid_tgts = devm_kcalloc(&asd->isys->adev->dev, num_pads, + sizeof(*asd->valid_tgts), GFP_KERNEL); + asd->route = devm_kcalloc(&asd->isys->adev->dev, num_streams, + sizeof(*asd->route), GFP_KERNEL); + + asd->stream = devm_kcalloc(&asd->isys->adev->dev, num_pads, + sizeof(*asd->stream), GFP_KERNEL); + + if (!asd->pad || !asd->ffmt || !asd->crop || !asd->compose || + !asd->valid_tgts || !asd->route || !asd->stream) + return -ENOMEM; + + for (i = 0; i < num_pads; i++) { + asd->ffmt[i] = (struct v4l2_mbus_framefmt *) + devm_kcalloc(&asd->isys->adev->dev, num_streams, + sizeof(struct v4l2_mbus_framefmt), GFP_KERNEL); + if (!asd->ffmt[i]) + return -ENOMEM; + + asd->stream[i].stream_id = + devm_kcalloc(&asd->isys->adev->dev, num_source, + sizeof(*asd->stream[i].stream_id), GFP_KERNEL); + if (!asd->stream[i].stream_id) + return -ENOMEM; + } + + rval = media_entity_pads_init(&asd->sd.entity, num_pads, asd->pad); + if (rval) + goto out_mutex_destroy; + + if (asd->ctrl_init) { + rval = v4l2_ctrl_handler_init(&asd->ctrl_handler, nr_ctrls); + if (rval) + goto out_media_entity_cleanup; + + asd->ctrl_init(&asd->sd); + if (asd->ctrl_handler.error) { + rval = asd->ctrl_handler.error; + goto out_v4l2_ctrl_handler_free; + } + + asd->sd.ctrl_handler = &asd->ctrl_handler; + } + + asd->source = -1; + + return 0; + +out_v4l2_ctrl_handler_free: + v4l2_ctrl_handler_free(&asd->ctrl_handler); + +out_media_entity_cleanup: + media_entity_cleanup(&asd->sd.entity); + +out_mutex_destroy: + mutex_destroy(&asd->mutex); + + return rval; +} + +void ipu_isys_subdev_cleanup(struct ipu_isys_subdev *asd) +{ + media_entity_cleanup(&asd->sd.entity); + v4l2_ctrl_handler_free(&asd->ctrl_handler); + mutex_destroy(&asd->mutex); +} diff --git a/drivers/media/pci/intel/ipu-isys-subdev.h b/drivers/media/pci/intel/ipu-isys-subdev.h new file mode 100644 index 0000000000000..034b3562e859b --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys-subdev.h @@ -0,0 +1,210 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_ISYS_SUBDEV_H +#define IPU_ISYS_SUBDEV_H + +#include + +#include +#include +#include + +#include "ipu-isys-queue.h" + +#define IPU_ISYS_MIPI_CSI2_TYPE_NULL 0x10 +#define IPU_ISYS_MIPI_CSI2_TYPE_BLANKING 0x11 +#define IPU_ISYS_MIPI_CSI2_TYPE_EMBEDDED8 0x12 +#define IPU_ISYS_MIPI_CSI2_TYPE_YUV422_8 0x1e +#define IPU_ISYS_MIPI_CSI2_TYPE_YUV422_10 0x1f +#define IPU_ISYS_MIPI_CSI2_TYPE_RGB565 0x22 +#define IPU_ISYS_MIPI_CSI2_TYPE_RGB888 0x24 +#define IPU_ISYS_MIPI_CSI2_TYPE_RAW6 0x28 +#define IPU_ISYS_MIPI_CSI2_TYPE_RAW7 0x29 +#define IPU_ISYS_MIPI_CSI2_TYPE_RAW8 0x2a +#define IPU_ISYS_MIPI_CSI2_TYPE_RAW10 0x2b +#define IPU_ISYS_MIPI_CSI2_TYPE_RAW12 0x2c +#define IPU_ISYS_MIPI_CSI2_TYPE_RAW14 0x2d +/* 1-8 */ +#define IPU_ISYS_MIPI_CSI2_TYPE_USER_DEF(i) (0x30 + (i) - 1) + +#define FMT_ENTRY (struct ipu_isys_fmt_entry []) + +enum isys_subdev_prop_tgt { + IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT, + IPU_ISYS_SUBDEV_PROP_TGT_SINK_CROP, + IPU_ISYS_SUBDEV_PROP_TGT_SINK_COMPOSE, + IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_COMPOSE, + IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP, +}; + +#define IPU_ISYS_SUBDEV_PROP_TGT_NR_OF \ + (IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP + 1) + +enum ipu_isl_mode { + IPU_ISL_OFF = 0, /* IPU_FW_ISYS_USE_NO_ISL_NO_ISA */ + IPU_ISL_CSI2_BE, /* IPU_FW_ISYS_USE_SINGLE_DUAL_ISL */ + IPU_ISL_ISA /* IPU_FW_ISYS_USE_SINGLE_ISA */ +}; + +enum ipu_be_mode { + IPU_BE_RAW = 0, + IPU_BE_SOC +}; + +enum ipu_isys_subdev_pixelorder { + IPU_ISYS_SUBDEV_PIXELORDER_BGGR = 0, + IPU_ISYS_SUBDEV_PIXELORDER_GBRG, + IPU_ISYS_SUBDEV_PIXELORDER_GRBG, + IPU_ISYS_SUBDEV_PIXELORDER_RGGB, +}; + +struct ipu_isys; + +struct ipu_isys_subdev { + /* Serialise access to any other field in the struct */ + struct mutex mutex; + struct v4l2_subdev sd; + struct ipu_isys *isys; + u32 const *const *supported_codes; + struct media_pad *pad; + struct v4l2_mbus_framefmt **ffmt; + struct v4l2_rect *crop; + struct v4l2_rect *compose; + struct { + unsigned int *stream_id; + DECLARE_BITMAP(streams_stat, 32); + } *stream; /* stream enable/disable status, indexed by pad */ + struct { + unsigned int sink; + unsigned int source; + int flags; + } *route; /* pad level info, indexed by stream */ + unsigned int nstreams; + unsigned int nsinks; + unsigned int nsources; + struct v4l2_ctrl_handler ctrl_handler; + void (*ctrl_init)(struct v4l2_subdev *sd); + void (*set_ffmt)(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_format *fmt); + struct { + bool crop; + bool compose; + } *valid_tgts; + enum ipu_isl_mode isl_mode; + enum ipu_be_mode be_mode; + int source; /* SSI stream source; -1 if unset */ +}; + +#define to_ipu_isys_subdev(__sd) \ + container_of(__sd, struct ipu_isys_subdev, sd) + +struct v4l2_mbus_framefmt *__ipu_isys_get_ffmt(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config + *cfg, +#endif + unsigned int pad, + unsigned int stream, + unsigned int which); + +unsigned int ipu_isys_mbus_code_to_bpp(u32 code); +unsigned int ipu_isys_mbus_code_to_mipi(u32 code); +u32 ipu_isys_subdev_code_to_uncompressed(u32 sink_code); + +enum ipu_isys_subdev_pixelorder ipu_isys_subdev_get_pixelorder(u32 code); + +int ipu_isys_subdev_fmt_propagate(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_mbus_framefmt *ffmt, + struct v4l2_rect *r, + enum isys_subdev_prop_tgt tgt, + unsigned int pad, unsigned int which); + +int ipu_isys_subdev_set_ffmt_default(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_format *fmt); +int __ipu_isys_subdev_set_ffmt(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_format *fmt); +struct v4l2_rect *__ipu_isys_get_selection(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + unsigned int target, + unsigned int pad, + unsigned int which); +int ipu_isys_subdev_set_ffmt(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_format *fmt); +int ipu_isys_subdev_get_ffmt(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_format *fmt); +int ipu_isys_subdev_get_sel(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_selection *sel); +int ipu_isys_subdev_set_sel(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_selection *sel); +int ipu_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_mbus_code_enum + *code); +int ipu_isys_subdev_link_validate(struct v4l2_subdev *sd, + struct media_link *link, + struct v4l2_subdev_format *source_fmt, + struct v4l2_subdev_format *sink_fmt); + +int ipu_isys_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh); +int ipu_isys_subdev_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh); +int ipu_isys_subdev_init(struct ipu_isys_subdev *asd, + struct v4l2_subdev_ops *ops, + unsigned int nr_ctrls, + unsigned int num_pads, + unsigned int num_streams, + unsigned int num_source, + unsigned int num_sink, + unsigned int sd_flags); +void ipu_isys_subdev_cleanup(struct ipu_isys_subdev *asd); +int ipu_isys_subdev_get_frame_desc(struct v4l2_subdev *sd, + struct v4l2_mbus_frame_desc *desc); +int ipu_isys_subdev_set_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_routing *route); +int ipu_isys_subdev_get_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_routing *route); +bool ipu_isys_subdev_has_route(struct media_entity *entity, + unsigned int pad0, unsigned int pad1, int *stream); +#endif /* IPU_ISYS_SUBDEV_H */ diff --git a/drivers/media/pci/intel/ipu-isys-tpg.c b/drivers/media/pci/intel/ipu-isys-tpg.c new file mode 100644 index 0000000000000..446b445ad93f2 --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys-tpg.c @@ -0,0 +1,355 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include +#include +#include + +#include +#include +#include + +#include "ipu.h" +#include "ipu-bus.h" +#include "ipu-isys.h" +#include "ipu-isys-subdev.h" +#include "ipu-isys-tpg.h" +#include "ipu-isys-video.h" +#include "ipu-platform-isys-csi2-reg.h" + +static const u32 tpg_supported_codes_pad[] = { + MEDIA_BUS_FMT_SBGGR8_1X8, + MEDIA_BUS_FMT_SGBRG8_1X8, + MEDIA_BUS_FMT_SGRBG8_1X8, + MEDIA_BUS_FMT_SRGGB8_1X8, + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SGBRG10_1X10, + MEDIA_BUS_FMT_SGRBG10_1X10, + MEDIA_BUS_FMT_SRGGB10_1X10, + 0, +}; + +static const u32 *tpg_supported_codes[] = { + tpg_supported_codes_pad, +}; + +static struct v4l2_subdev_internal_ops tpg_sd_internal_ops = { + .open = ipu_isys_subdev_open, + .close = ipu_isys_subdev_close, +}; + +static const struct v4l2_subdev_video_ops tpg_sd_video_ops = { + .s_stream = tpg_set_stream, +}; + +static int ipu_isys_tpg_s_ctrl(struct v4l2_ctrl *ctrl) +{ + struct ipu_isys_tpg *tpg = container_of(container_of(ctrl->handler, + struct + ipu_isys_subdev, + ctrl_handler), + struct ipu_isys_tpg, asd); + + switch (ctrl->id) { + case V4L2_CID_HBLANK: + writel(ctrl->val, tpg->base + MIPI_GEN_REG_SYNG_HBLANK_CYC); + break; + case V4L2_CID_VBLANK: + writel(ctrl->val, tpg->base + MIPI_GEN_REG_SYNG_VBLANK_CYC); + break; + case V4L2_CID_LINE_LENGTH_PIXELS: + if (ctrl->val > tpg->asd.ffmt[TPG_PAD_SOURCE][0].width) + writel(ctrl->val - + tpg->asd.ffmt[TPG_PAD_SOURCE][0].width, + tpg->base + MIPI_GEN_REG_SYNG_HBLANK_CYC); + break; + case V4L2_CID_FRAME_LENGTH_LINES: + if (ctrl->val > tpg->asd.ffmt[TPG_PAD_SOURCE][0].height) + writel(ctrl->val - + tpg->asd.ffmt[TPG_PAD_SOURCE][0].height, + tpg->base + MIPI_GEN_REG_SYNG_VBLANK_CYC); + break; + case V4L2_CID_TEST_PATTERN: + writel(ctrl->val, tpg->base + MIPI_GEN_REG_TPG_MODE); + break; + } + + return 0; +} + +static const struct v4l2_ctrl_ops ipu_isys_tpg_ctrl_ops = { + .s_ctrl = ipu_isys_tpg_s_ctrl, +}; + +static s64 ipu_isys_tpg_rate(struct ipu_isys_tpg *tpg, unsigned int bpp) +{ + return MIPI_GEN_PPC * IPU_ISYS_FREQ; +} + +static const char *const tpg_mode_items[] = { + "Ramp", + "Checkerboard", /* Does not work, disabled. */ + "Frame Based Colour", +}; + +static struct v4l2_ctrl_config tpg_mode = { + .ops = &ipu_isys_tpg_ctrl_ops, + .id = V4L2_CID_TEST_PATTERN, + .name = "Test Pattern", + .type = V4L2_CTRL_TYPE_MENU, + .min = 0, + .max = ARRAY_SIZE(tpg_mode_items) - 1, + .def = 0, + .menu_skip_mask = 0x2, + .qmenu = tpg_mode_items, +}; + +static const struct v4l2_ctrl_config csi2_header_cfg = { + .id = V4L2_CID_IPU_STORE_CSI2_HEADER, + .name = "Store CSI-2 Headers", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .min = 0, + .max = 1, + .step = 1, + .def = 1, +}; + +static void ipu_isys_tpg_init_controls(struct v4l2_subdev *sd) +{ + struct ipu_isys_tpg *tpg = to_ipu_isys_tpg(sd); + int hblank; + struct v4l2_ctrl_config cfg = { + .ops = &ipu_isys_tpg_ctrl_ops, + .type = V4L2_CTRL_TYPE_INTEGER, + .max = 65535, + .min = 8, + .step = 1, + .qmenu = NULL, + .elem_size = 0, + }; + + hblank = 1024; + + tpg->hblank = v4l2_ctrl_new_std(&tpg->asd.ctrl_handler, + &ipu_isys_tpg_ctrl_ops, + V4L2_CID_HBLANK, 8, 65535, 1, hblank); + + tpg->vblank = v4l2_ctrl_new_std(&tpg->asd.ctrl_handler, + &ipu_isys_tpg_ctrl_ops, + V4L2_CID_VBLANK, 8, 65535, 1, 1024); + + cfg.id = V4L2_CID_LINE_LENGTH_PIXELS; + cfg.name = "Line Length Pixels"; + cfg.def = 1024 + 4096; + + tpg->llp = v4l2_ctrl_new_custom(&tpg->asd.ctrl_handler, &cfg, NULL); + + cfg.id = V4L2_CID_FRAME_LENGTH_LINES; + cfg.name = "Frame Length Lines"; + cfg.def = 1024 + 3072; + tpg->fll = v4l2_ctrl_new_custom(&tpg->asd.ctrl_handler, &cfg, NULL); + + tpg->pixel_rate = v4l2_ctrl_new_std(&tpg->asd.ctrl_handler, + &ipu_isys_tpg_ctrl_ops, + V4L2_CID_PIXEL_RATE, 0, 0, 1, 0); + + if (tpg->pixel_rate) { + tpg->pixel_rate->cur.val = ipu_isys_tpg_rate(tpg, 8); + tpg->pixel_rate->flags |= V4L2_CTRL_FLAG_READ_ONLY; + } + + v4l2_ctrl_new_custom(&tpg->asd.ctrl_handler, &tpg_mode, NULL); + tpg->store_csi2_header = + v4l2_ctrl_new_custom(&tpg->asd.ctrl_handler, &csi2_header_cfg, NULL); +} + +static void tpg_set_ffmt(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_format *fmt) +{ + fmt->format.field = V4L2_FIELD_NONE; + *__ipu_isys_get_ffmt(sd, cfg, fmt->pad, fmt->stream, + fmt->which) = fmt->format; +} + +static int ipu_isys_tpg_set_ffmt(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_format *fmt) +{ + struct ipu_isys_tpg *tpg = to_ipu_isys_tpg(sd); + __u32 code = tpg->asd.ffmt[TPG_PAD_SOURCE][0].code; + unsigned int bpp = ipu_isys_mbus_code_to_bpp(code); + s64 tpg_rate = ipu_isys_tpg_rate(tpg, bpp); + int rval; + + mutex_lock(&tpg->asd.mutex); + rval = __ipu_isys_subdev_set_ffmt(sd, cfg, fmt); + mutex_unlock(&tpg->asd.mutex); + + if (rval || fmt->which != V4L2_SUBDEV_FORMAT_ACTIVE) + return rval; + + v4l2_ctrl_s_ctrl_int64(tpg->pixel_rate, tpg_rate); + + return 0; +} + +static const struct ipu_isys_pixelformat *ipu_isys_tpg_try_fmt( + struct ipu_isys_video *av, + struct v4l2_pix_format_mplane *mpix) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + struct media_entity entity = av->vdev.entity; + struct v4l2_subdev *sd = + media_entity_to_v4l2_subdev(entity.links[0].source->entity); +#else + struct media_link *link = list_first_entry(&av->vdev.entity.links, + struct media_link, list); + struct v4l2_subdev *sd = + media_entity_to_v4l2_subdev(link->source->entity); +#endif + struct ipu_isys_tpg *tpg; + + if (!sd) + return NULL; + + tpg = to_ipu_isys_tpg(sd); + + return ipu_isys_video_try_fmt_vid_mplane(av, mpix, + v4l2_ctrl_g_ctrl(tpg->store_csi2_header)); +} + +static const struct v4l2_subdev_pad_ops tpg_sd_pad_ops = { + .get_fmt = ipu_isys_subdev_get_ffmt, + .set_fmt = ipu_isys_tpg_set_ffmt, + .enum_mbus_code = ipu_isys_subdev_enum_mbus_code, +}; + +static int subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, + struct v4l2_event_subscription *sub) +{ + switch (sub->type) { + case V4L2_EVENT_CTRL: + return v4l2_ctrl_subscribe_event(fh, sub); + default: + return -EINVAL; + } +}; + +/* V4L2 subdev core operations */ +static const struct v4l2_subdev_core_ops tpg_sd_core_ops = { + .subscribe_event = subscribe_event, + .unsubscribe_event = v4l2_event_subdev_unsubscribe, +}; + +static struct v4l2_subdev_ops tpg_sd_ops = { + .core = &tpg_sd_core_ops, + .video = &tpg_sd_video_ops, + .pad = &tpg_sd_pad_ops, +}; + +static struct media_entity_operations tpg_entity_ops = { + .link_validate = v4l2_subdev_link_validate, +}; + +void ipu_isys_tpg_cleanup(struct ipu_isys_tpg *tpg) +{ + v4l2_device_unregister_subdev(&tpg->asd.sd); + ipu_isys_subdev_cleanup(&tpg->asd); + ipu_isys_video_cleanup(&tpg->av); +} + +int ipu_isys_tpg_init(struct ipu_isys_tpg *tpg, + struct ipu_isys *isys, + void __iomem *base, void __iomem *sel, + unsigned int index) +{ + struct v4l2_subdev_format fmt = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = TPG_PAD_SOURCE, + .format = { + .width = 4096, + .height = 3072, + }, + }; + int rval; + + tpg->isys = isys; + tpg->base = base; + tpg->sel = sel; + tpg->index = index; + + tpg->asd.sd.entity.ops = &tpg_entity_ops; + tpg->asd.ctrl_init = ipu_isys_tpg_init_controls; + tpg->asd.isys = isys; + + rval = ipu_isys_subdev_init(&tpg->asd, &tpg_sd_ops, 5, + NR_OF_TPG_PADS, + NR_OF_TPG_STREAMS, + NR_OF_TPG_SOURCE_PADS, + NR_OF_TPG_SINK_PADS, + V4L2_SUBDEV_FL_HAS_EVENTS); + if (rval) + return rval; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + tpg->asd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR; +#else + tpg->asd.sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; +#endif + tpg->asd.pad[TPG_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE; + + tpg->asd.source = IPU_FW_ISYS_STREAM_SRC_MIPIGEN_PORT0 + index; + tpg->asd.supported_codes = tpg_supported_codes; + tpg->asd.set_ffmt = tpg_set_ffmt; + ipu_isys_subdev_set_ffmt(&tpg->asd.sd, NULL, &fmt); + + tpg->asd.sd.internal_ops = &tpg_sd_internal_ops; + snprintf(tpg->asd.sd.name, sizeof(tpg->asd.sd.name), + IPU_ISYS_ENTITY_PREFIX " TPG %u", index); + v4l2_set_subdevdata(&tpg->asd.sd, &tpg->asd); + rval = v4l2_device_register_subdev(&isys->v4l2_dev, &tpg->asd.sd); + if (rval) { + dev_info(&isys->adev->dev, "can't register v4l2 subdev\n"); + goto fail; + } + + snprintf(tpg->av.vdev.name, sizeof(tpg->av.vdev.name), + IPU_ISYS_ENTITY_PREFIX " TPG %u capture", index); + tpg->av.isys = isys; + tpg->av.aq.css_pin_type = IPU_FW_ISYS_PIN_TYPE_MIPI; + tpg->av.pfmts = ipu_isys_pfmts_packed; + tpg->av.try_fmt_vid_mplane = ipu_isys_tpg_try_fmt; + tpg->av.prepare_firmware_stream_cfg = + ipu_isys_prepare_firmware_stream_cfg_default; + tpg->av.packed = true; + tpg->av.line_header_length = IPU_ISYS_CSI2_LONG_PACKET_HEADER_SIZE; + tpg->av.line_footer_length = IPU_ISYS_CSI2_LONG_PACKET_FOOTER_SIZE; + tpg->av.aq.buf_prepare = ipu_isys_buf_prepare; + tpg->av.aq.fill_frame_buff_set_pin = + ipu_isys_buffer_list_to_ipu_fw_isys_frame_buff_set_pin; + tpg->av.aq.link_fmt_validate = ipu_isys_link_fmt_validate; + tpg->av.aq.vbq.buf_struct_size = sizeof(struct ipu_isys_video_buffer); + + rval = ipu_isys_video_init(&tpg->av, &tpg->asd.sd.entity, + TPG_PAD_SOURCE, MEDIA_PAD_FL_SINK, 0); + if (rval) { + dev_info(&isys->adev->dev, "can't init video node\n"); + goto fail; + } + + return 0; + +fail: + ipu_isys_tpg_cleanup(tpg); + + return rval; +} diff --git a/drivers/media/pci/intel/ipu-isys-tpg.h b/drivers/media/pci/intel/ipu-isys-tpg.h new file mode 100644 index 0000000000000..29ce5002219ff --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys-tpg.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_ISYS_TPG_H +#define IPU_ISYS_TPG_H + +#include +#include +#include + +#include "ipu-isys-subdev.h" +#include "ipu-isys-video.h" +#include "ipu-isys-queue.h" + +struct ipu_isys_tpg_pdata; +struct ipu_isys; + +#define TPG_PAD_SOURCE 0 +#define NR_OF_TPG_PADS 1 +#define NR_OF_TPG_SOURCE_PADS 1 +#define NR_OF_TPG_SINK_PADS 0 +#define NR_OF_TPG_STREAMS 1 + +/* + * PPC is 4 pixels for clock for RAW8, RAW10 and RAW12. + * Source: FW validation test code. + */ +#define MIPI_GEN_PPC 4 + +#define MIPI_GEN_REG_COM_ENABLE 0x0 +#define MIPI_GEN_REG_COM_DTYPE 0x4 +/* RAW8, RAW10 or RAW12 */ +#define MIPI_GEN_COM_DTYPE_RAW(n) (((n) - 8) / 2) +#define MIPI_GEN_REG_COM_VTYPE 0x8 +#define MIPI_GEN_REG_COM_VCHAN 0xc +#define MIPI_GEN_REG_COM_WCOUNT 0x10 +#define MIPI_GEN_REG_PRBS_RSTVAL0 0x14 +#define MIPI_GEN_REG_PRBS_RSTVAL1 0x18 +#define MIPI_GEN_REG_SYNG_FREE_RUN 0x1c +#define MIPI_GEN_REG_SYNG_PAUSE 0x20 +#define MIPI_GEN_REG_SYNG_NOF_FRAMES 0x24 +#define MIPI_GEN_REG_SYNG_NOF_PIXELS 0x28 +#define MIPI_GEN_REG_SYNG_NOF_LINES 0x2c +#define MIPI_GEN_REG_SYNG_HBLANK_CYC 0x30 +#define MIPI_GEN_REG_SYNG_VBLANK_CYC 0x34 +#define MIPI_GEN_REG_SYNG_STAT_HCNT 0x38 +#define MIPI_GEN_REG_SYNG_STAT_VCNT 0x3c +#define MIPI_GEN_REG_SYNG_STAT_FCNT 0x40 +#define MIPI_GEN_REG_SYNG_STAT_DONE 0x44 +#define MIPI_GEN_REG_TPG_MODE 0x48 +#define MIPI_GEN_REG_TPG_HCNT_MASK 0x4c +#define MIPI_GEN_REG_TPG_VCNT_MASK 0x50 +#define MIPI_GEN_REG_TPG_XYCNT_MASK 0x54 +#define MIPI_GEN_REG_TPG_HCNT_DELTA 0x58 +#define MIPI_GEN_REG_TPG_VCNT_DELTA 0x5c +#define MIPI_GEN_REG_TPG_R1 0x60 +#define MIPI_GEN_REG_TPG_G1 0x64 +#define MIPI_GEN_REG_TPG_B1 0x68 +#define MIPI_GEN_REG_TPG_R2 0x6c +#define MIPI_GEN_REG_TPG_G2 0x70 +#define MIPI_GEN_REG_TPG_B2 0x74 + +/* + * struct ipu_isys_tpg + * + * @nlanes: number of lanes in the receiver + */ +struct ipu_isys_tpg { + struct ipu_isys_tpg_pdata *pdata; + struct ipu_isys *isys; + struct ipu_isys_subdev asd; + struct ipu_isys_video av; + + void __iomem *base; + void __iomem *sel; + unsigned int index; + int streaming; + + struct v4l2_ctrl *hblank; + struct v4l2_ctrl *vblank; + struct v4l2_ctrl *llp; + struct v4l2_ctrl *fll; + struct v4l2_ctrl *pixel_rate; + struct v4l2_ctrl *store_csi2_header; +}; + +#define to_ipu_isys_tpg(sd) \ + container_of(to_ipu_isys_subdev(sd), \ + struct ipu_isys_tpg, asd) +int ipu_isys_tpg_init(struct ipu_isys_tpg *tpg, + struct ipu_isys *isys, + void __iomem *base, void __iomem *sel, + unsigned int index); +void ipu_isys_tpg_cleanup(struct ipu_isys_tpg *tpg); +int tpg_set_stream(struct v4l2_subdev *sd, int enable); + +#endif /* IPU_ISYS_TPG_H */ diff --git a/drivers/media/pci/intel/ipu-isys-video.c b/drivers/media/pci/intel/ipu-isys-video.c new file mode 100644 index 0000000000000..d149354364e4e --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys-video.c @@ -0,0 +1,1922 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include +#include + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) +#include +#else +#include +#endif + +#include +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0) +#include +#endif + +#include "ipu.h" +#include "ipu-bus.h" +#include "ipu-cpd.h" +#include "ipu-isys.h" +#include "ipu-isys-video.h" +#include "ipu-platform.h" +#include "ipu-platform-regs.h" +#include "ipu-platform-buttress-regs.h" +#include "ipu-trace.h" +#include "ipu-fw-isys.h" +#include "ipu-fw-com.h" + +static unsigned int num_stream_support = IPU_ISYS_NUM_STREAMS; +module_param(num_stream_support, uint, 0660); +MODULE_PARM_DESC(num_stream_support, "IPU project support number of stream"); + +static bool use_stream_stop; +module_param(use_stream_stop, bool, 0660); +MODULE_PARM_DESC(use_stream_stop, "Use STOP command if running in CSI capture mode"); + +const struct ipu_isys_pixelformat ipu_isys_pfmts_be_soc[] = { + {V4L2_PIX_FMT_Y10, 16, 10, 0, MEDIA_BUS_FMT_Y10_1X10, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_UYVY, 16, 16, 0, MEDIA_BUS_FMT_UYVY8_1X16, + IPU_FW_ISYS_FRAME_FORMAT_UYVY}, + {V4L2_PIX_FMT_YUYV, 16, 16, 0, MEDIA_BUS_FMT_YUYV8_1X16, + IPU_FW_ISYS_FRAME_FORMAT_YUYV}, + {V4L2_PIX_FMT_NV16, 16, 16, 8, MEDIA_BUS_FMT_UYVY8_1X16, + IPU_FW_ISYS_FRAME_FORMAT_NV16}, + {V4L2_PIX_FMT_YUV420, 12, 0, 8, MEDIA_BUS_FMT_UYVY8_2X8, + IPU_FW_ISYS_FRAME_FORMAT_YUV420}, + {V4L2_PIX_FMT_XRGB32, 32, 32, 0, MEDIA_BUS_FMT_RGB565_1X16, + IPU_FW_ISYS_FRAME_FORMAT_RGBA888}, + {V4L2_PIX_FMT_XBGR32, 32, 32, 0, MEDIA_BUS_FMT_RGB888_1X24, + IPU_FW_ISYS_FRAME_FORMAT_RGBA888}, + /* Raw bayer formats. */ + {V4L2_PIX_FMT_SBGGR14, 16, 14, 0, MEDIA_BUS_FMT_SBGGR14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SGBRG14, 16, 14, 0, MEDIA_BUS_FMT_SGBRG14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SGRBG14, 16, 14, 0, MEDIA_BUS_FMT_SGRBG14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SRGGB14, 16, 14, 0, MEDIA_BUS_FMT_SRGGB14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SBGGR12, 16, 12, 0, MEDIA_BUS_FMT_SBGGR12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SGBRG12, 16, 12, 0, MEDIA_BUS_FMT_SGBRG12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SGRBG12, 16, 12, 0, MEDIA_BUS_FMT_SGRBG12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SRGGB12, 16, 12, 0, MEDIA_BUS_FMT_SRGGB12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SBGGR10, 16, 10, 0, MEDIA_BUS_FMT_SBGGR10_1X10, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SGBRG10, 16, 10, 0, MEDIA_BUS_FMT_SGBRG10_1X10, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SGRBG10, 16, 10, 0, MEDIA_BUS_FMT_SGRBG10_1X10, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SRGGB10, 16, 10, 0, MEDIA_BUS_FMT_SRGGB10_1X10, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SBGGR8, 8, 8, 0, MEDIA_BUS_FMT_SBGGR8_1X8, + IPU_FW_ISYS_FRAME_FORMAT_RAW8}, + {V4L2_PIX_FMT_SGBRG8, 8, 8, 0, MEDIA_BUS_FMT_SGBRG8_1X8, + IPU_FW_ISYS_FRAME_FORMAT_RAW8}, + {V4L2_PIX_FMT_SGRBG8, 8, 8, 0, MEDIA_BUS_FMT_SGRBG8_1X8, + IPU_FW_ISYS_FRAME_FORMAT_RAW8}, + {V4L2_PIX_FMT_SRGGB8, 8, 8, 0, MEDIA_BUS_FMT_SRGGB8_1X8, + IPU_FW_ISYS_FRAME_FORMAT_RAW8}, + {} +}; + +const struct ipu_isys_pixelformat ipu_isys_pfmts_packed[] = { + {V4L2_PIX_FMT_Y10, 10, 10, 0, MEDIA_BUS_FMT_Y10_1X10, + IPU_FW_ISYS_FRAME_FORMAT_RAW10}, + {V4L2_PIX_FMT_Y210, 20, 20, 0, MEDIA_BUS_FMT_YUYV10_1X20, + IPU_FW_ISYS_FRAME_FORMAT_YUYV}, + {V4L2_PIX_FMT_UYVY, 16, 16, 0, MEDIA_BUS_FMT_UYVY8_1X16, + IPU_FW_ISYS_FRAME_FORMAT_UYVY}, + {V4L2_PIX_FMT_YUYV, 16, 16, 0, MEDIA_BUS_FMT_YUYV8_1X16, + IPU_FW_ISYS_FRAME_FORMAT_YUYV}, + {V4L2_PIX_FMT_RGB565, 16, 16, 0, MEDIA_BUS_FMT_RGB565_1X16, + IPU_FW_ISYS_FRAME_FORMAT_RGB565}, + {V4L2_PIX_FMT_BGR24, 24, 24, 0, MEDIA_BUS_FMT_RGB888_1X24, + IPU_FW_ISYS_FRAME_FORMAT_RGBA888}, +#ifndef V4L2_PIX_FMT_SBGGR12P + {V4L2_PIX_FMT_SBGGR12, 12, 12, 0, MEDIA_BUS_FMT_SBGGR12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW12}, + {V4L2_PIX_FMT_SGBRG12, 12, 12, 0, MEDIA_BUS_FMT_SGBRG12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW12}, + {V4L2_PIX_FMT_SGRBG12, 12, 12, 0, MEDIA_BUS_FMT_SGRBG12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW12}, + {V4L2_PIX_FMT_SRGGB12, 12, 12, 0, MEDIA_BUS_FMT_SRGGB12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW12}, + {V4L2_PIX_FMT_SBGGR14, 14, 14, 0, MEDIA_BUS_FMT_SBGGR14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW14}, + {V4L2_PIX_FMT_SGBRG14, 14, 14, 0, MEDIA_BUS_FMT_SGBRG14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW14}, + {V4L2_PIX_FMT_SGRBG14, 14, 14, 0, MEDIA_BUS_FMT_SGRBG14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW14}, + {V4L2_PIX_FMT_SRGGB14, 14, 14, 0, MEDIA_BUS_FMT_SRGGB14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW14}, +#else /* V4L2_PIX_FMT_SBGGR12P */ + {V4L2_PIX_FMT_SBGGR12P, 12, 12, 0, MEDIA_BUS_FMT_SBGGR12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW12}, + {V4L2_PIX_FMT_SGBRG12P, 12, 12, 0, MEDIA_BUS_FMT_SGBRG12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW12}, + {V4L2_PIX_FMT_SGRBG12P, 12, 12, 0, MEDIA_BUS_FMT_SGRBG12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW12}, + {V4L2_PIX_FMT_SRGGB12P, 12, 12, 0, MEDIA_BUS_FMT_SRGGB12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW12}, + {V4L2_PIX_FMT_SBGGR14P, 14, 14, 0, MEDIA_BUS_FMT_SBGGR14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW14}, + {V4L2_PIX_FMT_SGBRG14P, 14, 14, 0, MEDIA_BUS_FMT_SGBRG14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW14}, + {V4L2_PIX_FMT_SGRBG14P, 14, 14, 0, MEDIA_BUS_FMT_SGRBG14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW14}, + {V4L2_PIX_FMT_SRGGB14P, 14, 14, 0, MEDIA_BUS_FMT_SRGGB14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW14}, +#endif /* V4L2_PIX_FMT_SBGGR12P */ + {V4L2_PIX_FMT_SBGGR10P, 10, 10, 0, MEDIA_BUS_FMT_SBGGR10_1X10, + IPU_FW_ISYS_FRAME_FORMAT_RAW10}, + {V4L2_PIX_FMT_SGBRG10P, 10, 10, 0, MEDIA_BUS_FMT_SGBRG10_1X10, + IPU_FW_ISYS_FRAME_FORMAT_RAW10}, + {V4L2_PIX_FMT_SGRBG10P, 10, 10, 0, MEDIA_BUS_FMT_SGRBG10_1X10, + IPU_FW_ISYS_FRAME_FORMAT_RAW10}, + {V4L2_PIX_FMT_SRGGB10P, 10, 10, 0, MEDIA_BUS_FMT_SRGGB10_1X10, + IPU_FW_ISYS_FRAME_FORMAT_RAW10}, + {V4L2_PIX_FMT_SBGGR8, 8, 8, 0, MEDIA_BUS_FMT_SBGGR8_1X8, + IPU_FW_ISYS_FRAME_FORMAT_RAW8}, + {V4L2_PIX_FMT_SGBRG8, 8, 8, 0, MEDIA_BUS_FMT_SGBRG8_1X8, + IPU_FW_ISYS_FRAME_FORMAT_RAW8}, + {V4L2_PIX_FMT_SGRBG8, 8, 8, 0, MEDIA_BUS_FMT_SGRBG8_1X8, + IPU_FW_ISYS_FRAME_FORMAT_RAW8}, + {V4L2_PIX_FMT_SRGGB8, 8, 8, 0, MEDIA_BUS_FMT_SRGGB8_1X8, + IPU_FW_ISYS_FRAME_FORMAT_RAW8}, + {} +}; + +static int video_open(struct file *file) +{ + struct ipu_isys_video *av = video_drvdata(file); + struct ipu_isys *isys = av->isys; + struct ipu_bus_device *adev = to_ipu_bus_device(&isys->adev->dev); + struct ipu_device *isp = adev->isp; + int rval; + + mutex_lock(&isys->mutex); + + if (isys->reset_needed || isp->flr_done) { + mutex_unlock(&isys->mutex); + dev_warn(&isys->adev->dev, "isys power cycle required\n"); + return -EIO; + } + mutex_unlock(&isys->mutex); + + rval = ipu_buttress_authenticate(isp); + if (rval) { + dev_err(&isys->adev->dev, "FW authentication failed\n"); + return rval; + } + + rval = pm_runtime_get_sync(&isys->adev->dev); + if (rval < 0) { + pm_runtime_put_noidle(&isys->adev->dev); + return rval; + } + + rval = v4l2_fh_open(file); + if (rval) + goto out_power_down; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) + rval = ipu_pipeline_pm_use(&av->vdev.entity, 1); +#else + rval = v4l2_pipeline_pm_use(&av->vdev.entity, 1); +#endif + if (rval) + goto out_v4l2_fh_release; + + mutex_lock(&isys->mutex); + + if (isys->video_opened++) { + /* Already open */ + mutex_unlock(&isys->mutex); + return 0; + } + + ipu_configure_spc(adev->isp, + &isys->pdata->ipdata->hw_variant, + IPU_CPD_PKG_DIR_ISYS_SERVER_IDX, + isys->pdata->base, isys->pkg_dir, + isys->pkg_dir_dma_addr); + + /* + * Buffers could have been left to wrong queue at last closure. + * Move them now back to empty buffer queue. + */ + ipu_cleanup_fw_msg_bufs(isys); + + if (isys->fwcom) { + /* + * Something went wrong in previous shutdown. As we are now + * restarting isys we can safely delete old context. + */ + dev_err(&isys->adev->dev, "Clearing old context\n"); + ipu_fw_isys_cleanup(isys); + } + + + rval = ipu_fw_isys_init(av->isys, num_stream_support); + if (rval < 0) + goto out_lib_init; + + mutex_unlock(&isys->mutex); + + return 0; + +out_lib_init: + isys->video_opened--; + mutex_unlock(&isys->mutex); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) + ipu_pipeline_pm_use(&av->vdev.entity, 0); +#else + v4l2_pipeline_pm_use(&av->vdev.entity, 0); +#endif + +out_v4l2_fh_release: + v4l2_fh_release(file); +out_power_down: + pm_runtime_put(&isys->adev->dev); + + return rval; +} + +static int video_release(struct file *file) +{ + struct ipu_isys_video *av = video_drvdata(file); + int ret = 0; + + vb2_fop_release(file); + + mutex_lock(&av->isys->mutex); + + if (!--av->isys->video_opened) { + ipu_fw_isys_close(av->isys); + if (av->isys->fwcom) { + av->isys->reset_needed = true; + ret = -EIO; + } + } + + mutex_unlock(&av->isys->mutex); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) + ipu_pipeline_pm_use(&av->vdev.entity, 0); +#else + v4l2_pipeline_pm_use(&av->vdev.entity, 0); +#endif + + if (av->isys->reset_needed) + pm_runtime_put_sync(&av->isys->adev->dev); + else + pm_runtime_put(&av->isys->adev->dev); + + return ret; +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) +static struct media_pad *other_pad(struct media_pad *pad) +{ + struct media_link *link; + + list_for_each_entry(link, &pad->entity->links, list) { + if ((link->flags & MEDIA_LNK_FL_LINK_TYPE) + != MEDIA_LNK_FL_DATA_LINK) + continue; + + return link->source == pad ? link->sink : link->source; + } + + WARN_ON(1); + return NULL; +} +#endif + +const struct ipu_isys_pixelformat *ipu_isys_get_pixelformat( + struct ipu_isys_video *av, + u32 pixelformat) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + struct media_pad *pad = + av->vdev.entity.pads[0].flags & MEDIA_PAD_FL_SOURCE ? + av->vdev.entity.links[0].sink : av->vdev.entity.links[0].source; +#else + struct media_pad *pad = other_pad(&av->vdev.entity.pads[0]); +#endif + struct v4l2_subdev *sd; + const u32 *supported_codes; + const struct ipu_isys_pixelformat *pfmt; + + if (!pad || !pad->entity) { + WARN_ON(1); + return NULL; + } + + sd = media_entity_to_v4l2_subdev(pad->entity); + supported_codes = to_ipu_isys_subdev(sd)->supported_codes[pad->index]; + + for (pfmt = av->pfmts; pfmt->bpp; pfmt++) { + unsigned int i; + + if (pfmt->pixelformat != pixelformat) + continue; + + for (i = 0; supported_codes[i]; i++) { + if (pfmt->code == supported_codes[i]) + return pfmt; + } + } + + /* Not found. Get the default, i.e. the first defined one. */ + for (pfmt = av->pfmts; pfmt->bpp; pfmt++) { + if (pfmt->code == *supported_codes) + return pfmt; + } + + WARN_ON(1); + return NULL; +} + +int ipu_isys_vidioc_querycap(struct file *file, void *fh, + struct v4l2_capability *cap) +{ + struct ipu_isys_video *av = video_drvdata(file); + + strlcpy(cap->driver, IPU_ISYS_NAME, sizeof(cap->driver)); + strlcpy(cap->card, av->isys->media_dev.model, sizeof(cap->card)); + snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s", + av->isys->media_dev.bus_info); + + cap->capabilities = V4L2_CAP_VIDEO_CAPTURE + | V4L2_CAP_VIDEO_CAPTURE_MPLANE + | V4L2_CAP_VIDEO_OUTPUT_MPLANE | V4L2_CAP_STREAMING + | V4L2_CAP_DEVICE_CAPS; + + cap->device_caps = V4L2_CAP_STREAMING; + + switch (av->aq.vbq.type) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE; + break; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE_MPLANE; + break; + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + cap->device_caps |= V4L2_CAP_VIDEO_OUTPUT_MPLANE; + break; + default: + WARN_ON(1); + } + + return 0; +} + +int ipu_isys_vidioc_enum_fmt(struct file *file, void *fh, + struct v4l2_fmtdesc *f) +{ + struct ipu_isys_video *av = video_drvdata(file); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + struct media_pad *pad = + av->vdev.entity.pads[0].flags & MEDIA_PAD_FL_SOURCE ? + av->vdev.entity.links[0].sink : av->vdev.entity.links[0].source; +#else + struct media_pad *pad = other_pad(&av->vdev.entity.pads[0]); +#endif + struct v4l2_subdev *sd; + const u32 *supported_codes; + const struct ipu_isys_pixelformat *pfmt; + u32 index; + + if (!pad || !pad->entity) + return -EINVAL; + sd = media_entity_to_v4l2_subdev(pad->entity); + supported_codes = to_ipu_isys_subdev(sd)->supported_codes[pad->index]; + + /* Walk the 0-terminated array for the f->index-th code. */ + for (index = f->index; *supported_codes && index; + index--, supported_codes++) { + }; + + if (!*supported_codes) + return -EINVAL; + + f->flags = 0; + + /* Code found */ + for (pfmt = av->pfmts; pfmt->bpp; pfmt++) + if (pfmt->code == *supported_codes) + break; + + if (!pfmt->bpp) { + dev_warn(&av->isys->adev->dev, + "Format not found in mapping table."); + return -EINVAL; + } + + f->pixelformat = pfmt->pixelformat; + + return 0; +} + +static int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *fh, + struct v4l2_format *fmt) +{ + struct ipu_isys_video *av = video_drvdata(file); + + fmt->fmt.pix_mp = av->mpix; + + return 0; +} + +const struct ipu_isys_pixelformat * +ipu_isys_video_try_fmt_vid_mplane_default(struct ipu_isys_video *av, + struct v4l2_pix_format_mplane *mpix) +{ + return ipu_isys_video_try_fmt_vid_mplane(av, mpix, 0); +} + +const struct ipu_isys_pixelformat *ipu_isys_video_try_fmt_vid_mplane( + struct ipu_isys_video *av, + struct v4l2_pix_format_mplane *mpix, + int store_csi2_header) +{ + const struct ipu_isys_pixelformat *pfmt = + ipu_isys_get_pixelformat(av, mpix->pixelformat); + + if (!pfmt) + return NULL; + mpix->pixelformat = pfmt->pixelformat; + mpix->num_planes = 1; + + mpix->width = clamp(mpix->width, IPU_ISYS_MIN_WIDTH, + IPU_ISYS_MAX_WIDTH); + mpix->height = clamp(mpix->height, IPU_ISYS_MIN_HEIGHT, + IPU_ISYS_MAX_HEIGHT); + + if (!av->packed) + mpix->plane_fmt[0].bytesperline = + mpix->width * DIV_ROUND_UP(pfmt->bpp_planar ? + pfmt->bpp_planar : pfmt->bpp, + BITS_PER_BYTE); + else if (store_csi2_header) + mpix->plane_fmt[0].bytesperline = + DIV_ROUND_UP(av->line_header_length + + av->line_footer_length + + (unsigned int)mpix->width * pfmt->bpp, + BITS_PER_BYTE); + else + mpix->plane_fmt[0].bytesperline = + DIV_ROUND_UP((unsigned int)mpix->width * pfmt->bpp, + BITS_PER_BYTE); + + mpix->plane_fmt[0].bytesperline = ALIGN(mpix->plane_fmt[0].bytesperline, + av->isys->line_align); + if (pfmt->bpp_planar) + mpix->plane_fmt[0].bytesperline = + mpix->plane_fmt[0].bytesperline * + pfmt->bpp / pfmt->bpp_planar; + /* + * (height + 1) * bytesperline due to a hardware issue: the DMA unit + * is a power of two, and a line should be transferred as few units + * as possible. The result is that up to line length more data than + * the image size may be transferred to memory after the image. + * Another limition is the GDA allocation unit size. For low + * resolution it gives a bigger number. Use larger one to avoid + * memory corruption. + */ + mpix->plane_fmt[0].sizeimage = + max(max(mpix->plane_fmt[0].sizeimage, + mpix->plane_fmt[0].bytesperline * mpix->height + + max(mpix->plane_fmt[0].bytesperline, + av->isys->pdata->ipdata->isys_dma_overshoot)), 1U); + + memset(mpix->plane_fmt[0].reserved, 0, + sizeof(mpix->plane_fmt[0].reserved)); + + if (mpix->field == V4L2_FIELD_ANY) + mpix->field = V4L2_FIELD_NONE; + /* Use defaults */ + mpix->colorspace = V4L2_COLORSPACE_RAW; + mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; + mpix->quantization = V4L2_QUANTIZATION_DEFAULT; + mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT; + + return pfmt; +} + +static int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct ipu_isys_video *av = video_drvdata(file); + + if (av->aq.vbq.streaming) + return -EBUSY; + + av->pfmt = av->try_fmt_vid_mplane(av, &f->fmt.pix_mp); + av->mpix = f->fmt.pix_mp; + + return 0; +} + +static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct ipu_isys_video *av = video_drvdata(file); + + av->try_fmt_vid_mplane(av, &f->fmt.pix_mp); + + return 0; +} + +static void fmt_sp_to_mp(struct v4l2_pix_format_mplane *mpix, + struct v4l2_pix_format *pix) +{ + mpix->width = pix->width; + mpix->height = pix->height; + mpix->pixelformat = pix->pixelformat; + mpix->field = pix->field; + mpix->num_planes = 1; + mpix->plane_fmt[0].bytesperline = pix->bytesperline; + mpix->plane_fmt[0].sizeimage = pix->sizeimage; + mpix->flags = pix->flags; +} + +static void fmt_mp_to_sp(struct v4l2_pix_format *pix, + struct v4l2_pix_format_mplane *mpix) +{ + pix->width = mpix->width; + pix->height = mpix->height; + pix->pixelformat = mpix->pixelformat; + pix->field = mpix->field; + WARN_ON(mpix->num_planes != 1); + pix->bytesperline = mpix->plane_fmt[0].bytesperline; + pix->sizeimage = mpix->plane_fmt[0].sizeimage; + pix->flags = mpix->flags; + pix->colorspace = mpix->colorspace; + pix->ycbcr_enc = mpix->ycbcr_enc; + pix->quantization = mpix->quantization; + pix->xfer_func = mpix->xfer_func; +} + +static int vidioc_g_fmt_vid_cap(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct ipu_isys_video *av = video_drvdata(file); + + fmt_mp_to_sp(&f->fmt.pix, &av->mpix); + + return 0; +} + +static int vidioc_s_fmt_vid_cap(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct ipu_isys_video *av = video_drvdata(file); + struct v4l2_pix_format_mplane mpix = { 0 }; + + if (av->aq.vbq.streaming) + return -EBUSY; + + fmt_sp_to_mp(&mpix, &f->fmt.pix); + + av->pfmt = av->try_fmt_vid_mplane(av, &mpix); + av->mpix = mpix; + + fmt_mp_to_sp(&f->fmt.pix, &mpix); + + return 0; +} + +static int vidioc_try_fmt_vid_cap(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct ipu_isys_video *av = video_drvdata(file); + struct v4l2_pix_format_mplane mpix = { 0 }; + + fmt_sp_to_mp(&mpix, &f->fmt.pix); + + av->try_fmt_vid_mplane(av, &mpix); + + fmt_mp_to_sp(&f->fmt.pix, &mpix); + + return 0; +} + +static long ipu_isys_vidioc_private(struct file *file, void *fh, + bool valid_prio, unsigned int cmd, + void *arg) +{ + struct ipu_isys_video *av = video_drvdata(file); + int ret = 0; + + switch (cmd) { + case VIDIOC_IPU_GET_DRIVER_VERSION: + *(u32 *)arg = IPU_DRIVER_VERSION; + break; + + default: + dev_dbg(&av->isys->adev->dev, "unsupported private ioctl %x\n", + cmd); + } + + return ret; +} + +static int vidioc_enum_input(struct file *file, void *fh, + struct v4l2_input *input) +{ + if (input->index > 0) + return -EINVAL; + strlcpy(input->name, "camera", sizeof(input->name)); + input->type = V4L2_INPUT_TYPE_CAMERA; + + return 0; +} + +static int vidioc_g_input(struct file *file, void *fh, unsigned int *input) +{ + *input = 0; + + return 0; +} + +static int vidioc_s_input(struct file *file, void *fh, unsigned int input) +{ + return input == 0 ? 0 : -EINVAL; +} + +/* + * Return true if an entity directly connected to an Iunit entity is + * an image source for the ISP. This can be any external directly + * connected entity or any of the test pattern generators in the + * Iunit. + */ +static bool is_external(struct ipu_isys_video *av, struct media_entity *entity) +{ + struct v4l2_subdev *sd; + unsigned int i; + + /* All video nodes are ours. */ + if (!is_media_entity_v4l2_subdev(entity)) + return false; + + sd = media_entity_to_v4l2_subdev(entity); + if (strncmp(sd->name, IPU_ISYS_ENTITY_PREFIX, + strlen(IPU_ISYS_ENTITY_PREFIX)) != 0) + return true; + + for (i = 0; i < av->isys->pdata->ipdata->tpg.ntpgs && + av->isys->tpg[i].isys; i++) + if (entity == &av->isys->tpg[i].asd.sd.entity) + return true; + + return false; +} + +static int link_validate(struct media_link *link) +{ + struct ipu_isys_video *av = + container_of(link->sink, struct ipu_isys_video, pad); + /* All sub-devices connected to a video node are ours. */ + struct ipu_isys_pipeline *ip = + to_ipu_isys_pipeline(av->vdev.entity.pipe); + struct v4l2_subdev_route r[IPU_ISYS_MAX_STREAMS]; + struct v4l2_subdev_routing routing = { + .routes = r, + .num_routes = IPU_ISYS_MAX_STREAMS, + }; + int i, rval, active = 0; + struct v4l2_subdev *sd; + + if (!link->source->entity) + return -EINVAL; + sd = media_entity_to_v4l2_subdev(link->source->entity); + if (is_external(av, link->source->entity)) { + ip->external = media_entity_remote_pad(av->vdev.entity.pads); + ip->source = to_ipu_isys_subdev(sd)->source; + } + + rval = v4l2_subdev_call(sd, pad, get_routing, &routing); + if (rval) + goto err_subdev; + + for (i = 0; i < routing.num_routes; i++) { + if (!(routing.routes[i].flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) + continue; + + if (routing.routes[i].source_pad == link->source->index) + ip->stream_id = routing.routes[i].sink_stream; + + active++; + } + + if (ip->external) { + struct v4l2_mbus_frame_desc desc = { + .num_entries = V4L2_FRAME_DESC_ENTRY_MAX, + }; + + sd = media_entity_to_v4l2_subdev(ip->external->entity); + rval = ipu_isys_subdev_get_frame_desc(sd, &desc); + if (!rval && ip->stream_id < desc.num_entries) + ip->vc = desc.entry[ip->stream_id].bus.csi2.channel; + } + +err_subdev: + ip->nr_queues++; + + return 0; +} + +static void get_stream_opened(struct ipu_isys_video *av) +{ + unsigned long flags; + + spin_lock_irqsave(&av->isys->lock, flags); + av->isys->stream_opened++; + spin_unlock_irqrestore(&av->isys->lock, flags); +} + +static void put_stream_opened(struct ipu_isys_video *av) +{ + unsigned long flags; + + spin_lock_irqsave(&av->isys->lock, flags); + av->isys->stream_opened--; + spin_unlock_irqrestore(&av->isys->lock, flags); +} + +static int get_stream_handle(struct ipu_isys_video *av) +{ + struct ipu_isys_pipeline *ip = + to_ipu_isys_pipeline(av->vdev.entity.pipe); + unsigned int stream_handle; + unsigned long flags; + + spin_lock_irqsave(&av->isys->lock, flags); + for (stream_handle = 0; + stream_handle < IPU_ISYS_MAX_STREAMS; stream_handle++) + if (!av->isys->pipes[stream_handle]) + break; + if (stream_handle == IPU_ISYS_MAX_STREAMS) { + spin_unlock_irqrestore(&av->isys->lock, flags); + return -EBUSY; + } + av->isys->pipes[stream_handle] = ip; + ip->stream_handle = stream_handle; + spin_unlock_irqrestore(&av->isys->lock, flags); + return 0; +} + +static void put_stream_handle(struct ipu_isys_video *av) +{ + struct ipu_isys_pipeline *ip = + to_ipu_isys_pipeline(av->vdev.entity.pipe); + unsigned long flags; + + spin_lock_irqsave(&av->isys->lock, flags); + av->isys->pipes[ip->stream_handle] = NULL; + ip->stream_handle = -1; + spin_unlock_irqrestore(&av->isys->lock, flags); +} + +static int get_external_facing_format(struct ipu_isys_pipeline *ip, + struct v4l2_subdev_format *format) +{ + struct ipu_isys_video *av = container_of(ip, struct ipu_isys_video, ip); + struct v4l2_subdev *sd; + struct media_pad *external_facing; + + if (!ip->external->entity) { + WARN_ON(1); + return -ENODEV; + } + sd = media_entity_to_v4l2_subdev(ip->external->entity); + external_facing = (strncmp(sd->name, IPU_ISYS_ENTITY_PREFIX, + strlen(IPU_ISYS_ENTITY_PREFIX)) == 0) ? + ip->external : media_entity_remote_pad(ip->external); + if (WARN_ON(!external_facing)) { + dev_warn(&av->isys->adev->dev, + "no external facing pad --- driver bug?\n"); + return -EINVAL; + } + + format->which = V4L2_SUBDEV_FORMAT_ACTIVE; + format->pad = 0; + format->stream = ip->stream_id; + sd = media_entity_to_v4l2_subdev(external_facing->entity); + + return v4l2_subdev_call(sd, pad, get_fmt, NULL, format); +} + +static void short_packet_queue_destroy(struct ipu_isys_pipeline *ip) +{ + struct ipu_isys_video *av = container_of(ip, struct ipu_isys_video, ip); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs attrs; +#else + unsigned long attrs; +#endif + unsigned int i; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + init_dma_attrs(&attrs); + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); +#else + attrs = DMA_ATTR_NON_CONSISTENT; +#endif + if (!ip->short_packet_bufs) + return; + for (i = 0; i < IPU_ISYS_SHORT_PACKET_BUFFER_NUM; i++) { + if (ip->short_packet_bufs[i].buffer) + dma_free_attrs(&av->isys->adev->dev, + ip->short_packet_buffer_size, + ip->short_packet_bufs[i].buffer, + ip->short_packet_bufs[i].dma_addr, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + &attrs +#else + attrs +#endif + ); + } + kfree(ip->short_packet_bufs); + ip->short_packet_bufs = NULL; +} + +static int short_packet_queue_setup(struct ipu_isys_pipeline *ip) +{ + struct ipu_isys_video *av = container_of(ip, struct ipu_isys_video, ip); + struct v4l2_subdev_format source_fmt = { 0 }; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs attrs; +#else + unsigned long attrs; +#endif + unsigned int i; + int rval; + size_t buf_size; + + INIT_LIST_HEAD(&ip->pending_interlaced_bufs); + ip->cur_field = V4L2_FIELD_TOP; + + if (ip->isys->short_packet_source == IPU_ISYS_SHORT_PACKET_FROM_TUNIT) { + ip->short_packet_trace_index = 0; + return 0; + } + + rval = get_external_facing_format(ip, &source_fmt); + if (rval) + return rval; + buf_size = IPU_ISYS_SHORT_PACKET_BUF_SIZE(source_fmt.format.height); + ip->short_packet_buffer_size = buf_size; + ip->num_short_packet_lines = + IPU_ISYS_SHORT_PACKET_PKT_LINES(source_fmt.format.height); + + /* Initialize short packet queue. */ + INIT_LIST_HEAD(&ip->short_packet_incoming); + INIT_LIST_HEAD(&ip->short_packet_active); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + init_dma_attrs(&attrs); + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); +#else + attrs = DMA_ATTR_NON_CONSISTENT; +#endif + + ip->short_packet_bufs = + kzalloc(sizeof(struct ipu_isys_private_buffer) * + IPU_ISYS_SHORT_PACKET_BUFFER_NUM, GFP_KERNEL); + if (!ip->short_packet_bufs) + return -ENOMEM; + + for (i = 0; i < IPU_ISYS_SHORT_PACKET_BUFFER_NUM; i++) { + struct ipu_isys_private_buffer *buf = &ip->short_packet_bufs[i]; + + buf->index = (unsigned int)i; + buf->ip = ip; + buf->ib.type = IPU_ISYS_SHORT_PACKET_BUFFER; + buf->bytesused = buf_size; + buf->buffer = dma_alloc_attrs(&av->isys->adev->dev, buf_size, + &buf->dma_addr, GFP_KERNEL, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + &attrs +#else + attrs +#endif + ); + if (!buf->buffer) { + short_packet_queue_destroy(ip); + return -ENOMEM; + } + list_add(&buf->ib.head, &ip->short_packet_incoming); + } + + return 0; +} + +static void csi_short_packet_prepare_firmware_stream_cfg( + struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_stream_cfg_data_abi *cfg) +{ + int input_pin = cfg->nof_input_pins++; + int output_pin = cfg->nof_output_pins++; + struct ipu_fw_isys_input_pin_info_abi *input_info = + &cfg->input_pins[input_pin]; + struct ipu_fw_isys_output_pin_info_abi *output_info = + &cfg->output_pins[output_pin]; + + /* + * Setting dt as IPU_ISYS_SHORT_PACKET_GENERAL_DT will cause + * MIPI receiver to receive all MIPI short packets. + */ + input_info->dt = IPU_ISYS_SHORT_PACKET_GENERAL_DT; + input_info->input_res.width = IPU_ISYS_SHORT_PACKET_WIDTH; + input_info->input_res.height = ip->num_short_packet_lines; + + ip->output_pins[output_pin].pin_ready = + ipu_isys_queue_short_packet_ready; + ip->output_pins[output_pin].aq = NULL; + ip->short_packet_output_pin = output_pin; + + output_info->input_pin_id = input_pin; + output_info->output_res.width = IPU_ISYS_SHORT_PACKET_WIDTH; + output_info->output_res.height = ip->num_short_packet_lines; + output_info->stride = IPU_ISYS_SHORT_PACKET_WIDTH * + IPU_ISYS_SHORT_PACKET_UNITSIZE; + output_info->pt = IPU_ISYS_SHORT_PACKET_PT; + output_info->ft = IPU_ISYS_SHORT_PACKET_FT; + output_info->send_irq = 1; +#if !defined(CONFIG_VIDEO_INTEL_IPU4) && !defined(CONFIG_VIDEO_INTEL_IPU4P) + output_info->snoopable = true; + output_info->sensor_type = IPU_FW_ISYS_SENSOR_METADATA; +#endif +} + +void ipu_isys_prepare_firmware_stream_cfg_default( + struct ipu_isys_video *av, + struct ipu_fw_isys_stream_cfg_data_abi *cfg) +{ + struct ipu_isys_pipeline *ip = + to_ipu_isys_pipeline(av->vdev.entity.pipe); + struct ipu_isys_queue *aq = &av->aq; + struct ipu_fw_isys_output_pin_info_abi *pin_info; +#if !defined(CONFIG_VIDEO_INTEL_IPU4) && !defined(CONFIG_VIDEO_INTEL_IPU4P) + struct ipu_isys *isys = av->isys; + unsigned int type_index; +#endif + int pin = cfg->nof_output_pins++; + + aq->fw_output = pin; + ip->output_pins[pin].pin_ready = ipu_isys_queue_buf_ready; + ip->output_pins[pin].aq = aq; + + pin_info = &cfg->output_pins[pin]; + pin_info->input_pin_id = 0; + pin_info->output_res.width = av->mpix.width; + pin_info->output_res.height = av->mpix.height; + + if (!av->pfmt->bpp_planar) + pin_info->stride = av->mpix.plane_fmt[0].bytesperline; + else + pin_info->stride = ALIGN(DIV_ROUND_UP(av->mpix.width * + av->pfmt->bpp_planar, + BITS_PER_BYTE), + av->isys->line_align); + + pin_info->pt = aq->css_pin_type; + pin_info->ft = av->pfmt->css_pixelformat; + pin_info->send_irq = 1; + cfg->vc = ip->vc; + +#if !defined(CONFIG_VIDEO_INTEL_IPU4) && !defined(CONFIG_VIDEO_INTEL_IPU4P) + switch (pin_info->pt) { + /* non-snoopable sensor data to PSYS */ + case IPU_FW_ISYS_PIN_TYPE_RAW_DUAL_SOC: + case IPU_FW_ISYS_PIN_TYPE_RAW_NS: + case IPU_FW_ISYS_PIN_TYPE_RAW_S: + type_index = IPU_FW_ISYS_VC1_SENSOR_DATA; + pin_info->sensor_type = isys->sensor_types[type_index]++; + pin_info->snoopable = false; + + if (isys->sensor_types[type_index] > + IPU_FW_ISYS_VC1_SENSOR_DATA_END) + isys->sensor_types[type_index] = + IPU_FW_ISYS_VC1_SENSOR_DATA_START; + + break; + /* non-snoopable PDAF data */ + case IPU_FW_ISYS_PIN_TYPE_PAF_FF: + type_index = IPU_FW_ISYS_VC1_SENSOR_PDAF; + pin_info->sensor_type = isys->sensor_types[type_index]++; + pin_info->snoopable = false; + + if (isys->sensor_types[type_index] > + IPU_FW_ISYS_VC1_SENSOR_PDAF_END) + isys->sensor_types[type_index] = + IPU_FW_ISYS_VC1_SENSOR_PDAF_START; + + break; + /* snoopable META/Stats data to CPU */ + case IPU_FW_ISYS_PIN_TYPE_METADATA_0: + case IPU_FW_ISYS_PIN_TYPE_METADATA_1: + case IPU_FW_ISYS_PIN_TYPE_AWB_STATS: + case IPU_FW_ISYS_PIN_TYPE_AF_STATS: + case IPU_FW_ISYS_PIN_TYPE_HIST_STATS: + pin_info->sensor_type = IPU_FW_ISYS_SENSOR_METADATA; + pin_info->snoopable = true; + break; + /* snoopable sensor data to CPU */ + case IPU_FW_ISYS_PIN_TYPE_MIPI: + case IPU_FW_ISYS_PIN_TYPE_RAW_SOC: + type_index = IPU_FW_ISYS_VC0_SENSOR_DATA; + pin_info->sensor_type = isys->sensor_types[type_index]++; + pin_info->snoopable = true; + + if (isys->sensor_types[type_index] > + IPU_FW_ISYS_VC0_SENSOR_DATA_END) + isys->sensor_types[type_index] = + IPU_FW_ISYS_VC0_SENSOR_DATA_START; + + break; + default: + dev_err(&av->isys->adev->dev, + "Unknown pin type, use metadata type as default\n"); + + pin_info->sensor_type = IPU_FW_ISYS_SENSOR_METADATA; + pin_info->snoopable = true; + } +#endif +} + +static unsigned int ipu_isys_get_compression_scheme(u32 code) +{ + switch (code) { + case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8: + case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8: + case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8: + case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8: + return 3; + default: + return 0; + } +} + +static unsigned int get_comp_format(u32 code) +{ + unsigned int predictor = 0; /* currently hard coded */ + unsigned int udt = ipu_isys_mbus_code_to_mipi(code); + unsigned int scheme = ipu_isys_get_compression_scheme(code); + + /* if data type is not user defined return here */ + if (udt < IPU_ISYS_MIPI_CSI2_TYPE_USER_DEF(1) || + udt > IPU_ISYS_MIPI_CSI2_TYPE_USER_DEF(8)) + return 0; + + /* + * For each user defined type (1..8) there is configuration bitfield for + * decompression. + * + * | bit 3 | bits 2:0 | + * | predictor | scheme | + * compression schemes: + * 000 = no compression + * 001 = 10 - 6 - 10 + * 010 = 10 - 7 - 10 + * 011 = 10 - 8 - 10 + * 100 = 12 - 6 - 12 + * 101 = 12 - 7 - 12 + * 110 = 12 - 8 - 12 + */ + + return ((predictor << 3) | scheme) << + ((udt - IPU_ISYS_MIPI_CSI2_TYPE_USER_DEF(1)) * 4); +} + +/* Create stream and start it using the CSS FW ABI. */ +static int start_stream_firmware(struct ipu_isys_video *av, + struct ipu_isys_buffer_list *bl) +{ + struct ipu_isys_pipeline *ip = + to_ipu_isys_pipeline(av->vdev.entity.pipe); + struct device *dev = &av->isys->adev->dev; + struct v4l2_subdev_selection sel_fmt = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .target = V4L2_SEL_TGT_CROP, + .pad = CSI2_BE_PAD_SOURCE, + }; + struct ipu_fw_isys_stream_cfg_data_abi *stream_cfg; + struct isys_fw_msgs *msg = NULL; + struct ipu_fw_isys_frame_buff_set_abi *buf = NULL; + struct ipu_isys_queue *aq; + struct ipu_isys_video *isl_av = NULL; + struct ipu_isys_request *ireq = NULL; + struct v4l2_subdev_format source_fmt = { 0 }; + struct v4l2_subdev *be_sd = NULL; + struct media_pad *source_pad = media_entity_remote_pad(&av->pad); + int rval, rvalout, tout; + + rval = get_external_facing_format(ip, &source_fmt); + if (rval) + return rval; + + msg = ipu_get_fw_msg_buf(ip); + if (!msg) + return -ENOMEM; + + stream_cfg = to_stream_cfg_msg_buf(msg); + stream_cfg->compfmt = get_comp_format(source_fmt.format.code); + stream_cfg->input_pins[0].input_res.width = source_fmt.format.width; + stream_cfg->input_pins[0].input_res.height = source_fmt.format.height; + stream_cfg->input_pins[0].dt = + ipu_isys_mbus_code_to_mipi(source_fmt.format.code); + stream_cfg->input_pins[0].mapped_dt = N_IPU_FW_ISYS_MIPI_DATA_TYPE; + + if (ip->csi2 && !v4l2_ctrl_g_ctrl(ip->csi2->store_csi2_header)) + stream_cfg->input_pins[0].mipi_store_mode = + IPU_FW_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER; + else if (ip->tpg && !v4l2_ctrl_g_ctrl(ip->tpg->store_csi2_header)) + stream_cfg->input_pins[0].mipi_store_mode = + IPU_FW_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER; + + stream_cfg->src = ip->source; + stream_cfg->vc = 0; + stream_cfg->isl_use = ip->isl_mode; + stream_cfg->nof_input_pins = 1; + + /* + * Only CSI2-BE and SOC BE has the capability to do crop, + * so get the crop info from csi2-be or csi2-be-soc. + */ + if (ip->csi2_be) { + be_sd = &ip->csi2_be->asd.sd; + } else if (ip->csi2_be_soc) { + be_sd = &ip->csi2_be_soc->asd.sd; + if (source_pad) + sel_fmt.pad = source_pad->index; + } + if (be_sd && + !v4l2_subdev_call(be_sd, pad, get_selection, NULL, &sel_fmt)) { + stream_cfg->crop[0].left_offset = sel_fmt.r.left; + stream_cfg->crop[0].top_offset = sel_fmt.r.top; + stream_cfg->crop[0].right_offset = sel_fmt.r.left + + sel_fmt.r.width; + stream_cfg->crop[0].bottom_offset = sel_fmt.r.top + + sel_fmt.r.height; + + } else { + stream_cfg->crop[0].right_offset = source_fmt.format.width; + stream_cfg->crop[0].bottom_offset = source_fmt.format.height; + } + + /* + * If the CSI-2 backend's video node is part of the pipeline + * it must be arranged first in the output pin list. This is + * the most probably a firmware requirement. + */ + if (ip->isl_mode == IPU_ISL_CSI2_BE) + isl_av = &ip->csi2_be->av; + else if (ip->isl_mode == IPU_ISL_ISA) + isl_av = &av->isys->isa.av; + + if (isl_av) { + struct ipu_isys_queue *safe; + + list_for_each_entry_safe(aq, safe, &ip->queues, node) { + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + + if (av != isl_av) + continue; + + list_del(&aq->node); + list_add(&aq->node, &ip->queues); + break; + } + } + + list_for_each_entry(aq, &ip->queues, node) { + struct ipu_isys_video *__av = ipu_isys_queue_to_video(aq); + + __av->prepare_firmware_stream_cfg(__av, stream_cfg); + } + + if (ip->interlaced && ip->isys->short_packet_source == + IPU_ISYS_SHORT_PACKET_FROM_RECEIVER) + csi_short_packet_prepare_firmware_stream_cfg(ip, stream_cfg); + + ipu_fw_isys_dump_stream_cfg(dev, stream_cfg); + + ip->nr_output_pins = stream_cfg->nof_output_pins; + + rval = get_stream_handle(av); + if (rval) { + dev_dbg(dev, "Can't get stream_handle\n"); + return rval; + } + + reinit_completion(&ip->stream_open_completion); + + ipu_fw_isys_set_params(stream_cfg); + + rval = ipu_fw_isys_complex_cmd(av->isys, + ip->stream_handle, + stream_cfg, + to_dma_addr(msg), + sizeof(*stream_cfg), + IPU_FW_ISYS_SEND_TYPE_STREAM_OPEN); + ipu_put_fw_mgs_buffer(av->isys, (uintptr_t) stream_cfg); + + if (rval < 0) { + dev_err(dev, "can't open stream (%d)\n", rval); + goto out_put_stream_handle; + } + + get_stream_opened(av); + + tout = wait_for_completion_timeout(&ip->stream_open_completion, + IPU_LIB_CALL_TIMEOUT_JIFFIES); + if (!tout) { + dev_err(dev, "stream open time out\n"); + rval = -ETIMEDOUT; + goto out_put_stream_opened; + } + if (ip->error) { + dev_err(dev, "stream open error: %d\n", ip->error); + rval = -EIO; + goto out_put_stream_opened; + } + dev_dbg(dev, "start stream: open complete\n"); + + ireq = ipu_isys_next_queued_request(ip); + + if (bl || ireq) { + msg = ipu_get_fw_msg_buf(ip); + if (!msg) { + rval = -ENOMEM; + goto out_put_stream_opened; + } + buf = to_frame_msg_buf(msg); + } + + if (bl) { + ipu_isys_buffer_list_to_ipu_fw_isys_frame_buff_set(buf, ip, bl); + ipu_isys_buffer_list_queue(bl, + IPU_ISYS_BUFFER_LIST_FL_ACTIVE, 0); + } else if (ireq) { + rval = ipu_isys_req_prepare(&av->isys->media_dev, + ireq, ip, buf); + if (rval) + goto out_put_stream_opened; + } + + reinit_completion(&ip->stream_start_completion); + + if (bl || ireq) { + ipu_fw_isys_dump_frame_buff_set(dev, buf, + stream_cfg->nof_output_pins); + rval = ipu_fw_isys_complex_cmd(av->isys, + ip->stream_handle, + buf, to_dma_addr(msg), + sizeof(*buf), + IPU_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE); + ipu_put_fw_mgs_buffer(av->isys, (uintptr_t) buf); + } else { + rval = ipu_fw_isys_simple_cmd(av->isys, + ip->stream_handle, + IPU_FW_ISYS_SEND_TYPE_STREAM_START); + } + + if (rval < 0) { + dev_err(dev, "can't start streaming (%d)\n", rval); + goto out_stream_close; + } + + tout = wait_for_completion_timeout(&ip->stream_start_completion, + IPU_LIB_CALL_TIMEOUT_JIFFIES); + if (!tout) { + dev_err(dev, "stream start time out\n"); + rval = -ETIMEDOUT; + goto out_stream_close; + } + if (ip->error) { + dev_err(dev, "stream start error: %d\n", ip->error); + rval = -EIO; + goto out_stream_close; + } + dev_dbg(dev, "start stream: complete\n"); + + return 0; + +out_stream_close: + reinit_completion(&ip->stream_close_completion); + + rvalout = ipu_fw_isys_simple_cmd(av->isys, + ip->stream_handle, + IPU_FW_ISYS_SEND_TYPE_STREAM_CLOSE); + if (rvalout < 0) { + dev_dbg(dev, "can't close stream (%d)\n", rvalout); + goto out_put_stream_opened; + } + + tout = wait_for_completion_timeout(&ip->stream_close_completion, + IPU_LIB_CALL_TIMEOUT_JIFFIES); + if (!tout) + dev_err(dev, "stream close time out\n"); + else if (ip->error) + dev_err(dev, "stream close error: %d\n", ip->error); + else + dev_dbg(dev, "stream close complete\n"); + +out_put_stream_opened: + put_stream_opened(av); + +out_put_stream_handle: + put_stream_handle(av); + return rval; +} + +static void stop_streaming_firmware(struct ipu_isys_video *av) +{ + struct ipu_isys_pipeline *ip = + to_ipu_isys_pipeline(av->vdev.entity.pipe); + struct device *dev = &av->isys->adev->dev; + int rval, tout; + enum ipu_fw_isys_send_type send_type = + IPU_FW_ISYS_SEND_TYPE_STREAM_FLUSH; + + reinit_completion(&ip->stream_stop_completion); + + /* Use STOP command if running in CSI capture mode */ + if (use_stream_stop) + send_type = IPU_FW_ISYS_SEND_TYPE_STREAM_STOP; + + rval = ipu_fw_isys_simple_cmd(av->isys, ip->stream_handle, + send_type); + + if (rval < 0) { + dev_err(dev, "can't stop stream (%d)\n", rval); + return; + } + + tout = wait_for_completion_timeout(&ip->stream_stop_completion, + IPU_LIB_CALL_TIMEOUT_JIFFIES); + if (!tout) + dev_err(dev, "stream stop time out\n"); + else if (ip->error) + dev_err(dev, "stream stop error: %d\n", ip->error); + else + dev_dbg(dev, "stop stream: complete\n"); +} + +static void close_streaming_firmware(struct ipu_isys_video *av) +{ + struct ipu_isys_pipeline *ip = + to_ipu_isys_pipeline(av->vdev.entity.pipe); + struct device *dev = &av->isys->adev->dev; + int rval, tout; + + reinit_completion(&ip->stream_close_completion); + + rval = ipu_fw_isys_simple_cmd(av->isys, ip->stream_handle, + IPU_FW_ISYS_SEND_TYPE_STREAM_CLOSE); + if (rval < 0) { + dev_err(dev, "can't close stream (%d)\n", rval); + return; + } + + tout = wait_for_completion_timeout(&ip->stream_close_completion, + IPU_LIB_CALL_TIMEOUT_JIFFIES); + if (!tout) + dev_err(dev, "stream close time out\n"); + else if (ip->error) + dev_err(dev, "stream close error: %d\n", ip->error); + else + dev_dbg(dev, "close stream: complete\n"); + + put_stream_opened(av); + put_stream_handle(av); +} + +void +ipu_isys_video_add_capture_done(struct ipu_isys_pipeline *ip, + void (*capture_done) + (struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_resp_info_abi *resp)) +{ + unsigned int i; + + /* Different instances may register same function. Add only once */ + for (i = 0; i < IPU_NUM_CAPTURE_DONE; i++) + if (ip->capture_done[i] == capture_done) + return; + + for (i = 0; i < IPU_NUM_CAPTURE_DONE; i++) { + if (!ip->capture_done[i]) { + ip->capture_done[i] = capture_done; + return; + } + } + /* + * Too many call backs registered. Change to IPU_NUM_CAPTURE_DONE + * constant probably required. + */ + WARN_ON(1); +} + +int ipu_isys_video_prepare_streaming(struct ipu_isys_video *av, + unsigned int state) +{ + struct ipu_isys *isys = av->isys; + struct device *dev = &isys->adev->dev; + struct ipu_isys_pipeline *ip; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) + struct media_graph graph; +#else + struct media_entity_graph graph; +#endif + struct media_entity *entity; + struct media_device *mdev = &av->isys->media_dev; + int rval; + unsigned int i; + + dev_dbg(dev, "prepare stream: %d\n", state); + + if (!state) { + ip = to_ipu_isys_pipeline(av->vdev.entity.pipe); + + if (ip->interlaced && isys->short_packet_source == + IPU_ISYS_SHORT_PACKET_FROM_RECEIVER) + short_packet_queue_destroy(ip); + media_pipeline_stop(&av->vdev.entity); + media_entity_enum_cleanup(&ip->entity_enum); + return 0; + } + + ip = &av->ip; + + WARN_ON(ip->nr_streaming); + ip->has_sof = false; + ip->nr_queues = 0; + ip->external = NULL; + atomic_set(&ip->sequence, 0); + ip->isl_mode = IPU_ISL_OFF; + + for (i = 0; i < IPU_NUM_CAPTURE_DONE; i++) + ip->capture_done[i] = NULL; + ip->csi2_be = NULL; + ip->csi2_be_soc = NULL; + ip->csi2 = NULL; + ip->tpg = NULL; + ip->seq_index = 0; + memset(ip->seq, 0, sizeof(ip->seq)); + + WARN_ON(!list_empty(&ip->queues)); + ip->interlaced = false; + + rval = media_entity_enum_init(&ip->entity_enum, mdev); + if (rval) + return rval; + + rval = media_pipeline_start(&av->vdev.entity, &ip->pipe); + if (rval < 0) { + dev_dbg(dev, "pipeline start failed\n"); + goto out_enum_cleanup; + } + + if (!ip->external) { + dev_err(dev, "no external entity set! Driver bug?\n"); + rval = -EINVAL; + goto out_pipeline_stop; + } + + rval = media_graph_walk_init(&graph, mdev); + if (rval) + goto out_pipeline_stop; + + /* Gather all entities in the graph. */ + mutex_lock(&mdev->graph_mutex); + media_graph_walk_start(&graph, &av->vdev.entity); + while ((entity = media_graph_walk_next(&graph))) + media_entity_enum_set(&ip->entity_enum, entity); + + mutex_unlock(&mdev->graph_mutex); + + media_graph_walk_cleanup(&graph); + + if (ip->interlaced) { + rval = short_packet_queue_setup(ip); + if (rval) { + dev_err(&isys->adev->dev, + "Failed to setup short packet buffer.\n"); + goto out_pipeline_stop; + } + } + + dev_dbg(dev, "prepare stream: external entity %s\n", + ip->external->entity->name); + + return 0; + +out_pipeline_stop: + media_pipeline_stop(&av->vdev.entity); + +out_enum_cleanup: + media_entity_enum_cleanup(&ip->entity_enum); + + return rval; +} + +static int perform_skew_cal(struct ipu_isys_pipeline *ip) +{ + struct v4l2_subdev *ext_sd = + media_entity_to_v4l2_subdev(ip->external->entity); + int rval; + + if (!ext_sd) { + WARN_ON(1); + return -ENODEV; + } + ipu_isys_csi2_set_skew_cal(ip->csi2, true); + + rval = v4l2_subdev_call(ext_sd, video, s_stream, true); + if (rval) + goto turn_off_skew_cal; + + /* TODO: do we have a better way available than waiting for a while ? */ + msleep(50); + + rval = v4l2_subdev_call(ext_sd, video, s_stream, false); + +turn_off_skew_cal: + ipu_isys_csi2_set_skew_cal(ip->csi2, false); + + /* TODO: do we have a better way available than waiting for a while ? */ + msleep(50); + + return rval; +} + +int ipu_isys_video_set_streaming(struct ipu_isys_video *av, + unsigned int state, + struct ipu_isys_buffer_list *bl) +{ + struct device *dev = &av->isys->adev->dev; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + struct media_device *mdev = av->vdev.entity.parent; + struct media_entity_graph graph; +#else + struct media_device *mdev = av->vdev.entity.graph_obj.mdev; +#endif + struct media_entity_enum entities; + + struct media_entity *entity, *entity2; + struct ipu_isys_pipeline *ip = + to_ipu_isys_pipeline(av->vdev.entity.pipe); + struct v4l2_subdev *sd, *esd; + int rval = 0; + + dev_dbg(dev, "set stream: %d\n", state); + + if (!ip->external->entity) { + WARN_ON(1); + return -ENODEV; + } + esd = media_entity_to_v4l2_subdev(ip->external->entity); + + if (state) { + rval = media_graph_walk_init(&ip->graph, mdev); + if (rval) + return rval; + rval = media_entity_enum_init(&entities, mdev); + if (rval) + goto out_media_entity_graph_init; + } + + if (!state) { + stop_streaming_firmware(av); + + /* stop external sub-device now. */ + dev_err(dev, "s_stream %s (ext)\n", ip->external->entity->name); + + if (ip->csi2) { + if (ip->csi2->stream_count == 1) { + v4l2_subdev_call(esd, video, s_stream, state); +#if defined(CONFIG_VIDEO_INTEL_IPU4) || defined(CONFIG_VIDEO_INTEL_IPU4P) + ipu_isys_csi2_wait_last_eof(ip->csi2); +#endif + } + } else { + v4l2_subdev_call(esd, video, s_stream, state); + } + } + + mutex_lock(&mdev->graph_mutex); + + media_graph_walk_start(& +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + ip-> +#endif + graph, + &av->vdev.entity); + + while ((entity = media_graph_walk_next(& +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + ip-> +#endif + graph))) { + sd = media_entity_to_v4l2_subdev(entity); + + dev_dbg(dev, "set stream: entity %s\n", entity->name); + + /* Non-subdev nodes can be safely ignored here. */ + if (!is_media_entity_v4l2_subdev(entity)) + continue; + + /* Don't start truly external devices quite yet. */ + if (strncmp(sd->name, IPU_ISYS_ENTITY_PREFIX, + strlen(IPU_ISYS_ENTITY_PREFIX)) != 0 || + ip->external->entity == entity) + continue; + + dev_dbg(dev, "s_stream %s\n", entity->name); + rval = v4l2_subdev_call(sd, video, s_stream, state); + if (!state) + continue; + if (rval && rval != -ENOIOCTLCMD) { + mutex_unlock(&mdev->graph_mutex); + goto out_media_entity_stop_streaming; + } + + media_entity_enum_set(&entities, entity); + } + + mutex_unlock(&mdev->graph_mutex); + + /* Oh crap */ + if (state) { + if (ipu_isys_csi2_skew_cal_required(ip->csi2) && + ip->csi2->remote_streams == ip->csi2->stream_count) + perform_skew_cal(ip); + + rval = start_stream_firmware(av, bl); + if (rval) + goto out_media_entity_stop_streaming; + + dev_dbg(dev, "set stream: source %d, stream_handle %d\n", + ip->source, ip->stream_handle); + + /* Start external sub-device now. */ + dev_dbg(dev, "set stream: s_stream %s (ext)\n", + ip->external->entity->name); + + if (ip->csi2 && + ip->csi2->remote_streams == ip->csi2->stream_count) + rval = v4l2_subdev_call(esd, video, s_stream, state); + else if (!ip->csi2) + rval = v4l2_subdev_call(esd, video, s_stream, state); + if (rval) + goto out_media_entity_stop_streaming_firmware; + } else { + close_streaming_firmware(av); + av->ip.stream_id = 0; + av->ip.vc = 0; + } + + if (state) + media_entity_enum_cleanup(&entities); + else + media_graph_walk_cleanup(&ip->graph); + av->streaming = state; + + return 0; + +out_media_entity_stop_streaming_firmware: + stop_streaming_firmware(av); + +out_media_entity_stop_streaming: + mutex_lock(&mdev->graph_mutex); + + media_graph_walk_start(& +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + ip-> +#endif + graph, + &av->vdev.entity); + + while (state && (entity2 = media_graph_walk_next(& +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + ip-> +#endif + graph)) && + entity2 != entity) { + sd = media_entity_to_v4l2_subdev(entity2); + + if (!media_entity_enum_test(&entities, entity2)) + continue; + + v4l2_subdev_call(sd, video, s_stream, 0); + } + + mutex_unlock(&mdev->graph_mutex); + + media_entity_enum_cleanup(&entities); + +out_media_entity_graph_init: + media_graph_walk_cleanup(&ip->graph); + + return rval; +} + +#ifdef CONFIG_COMPAT +static long ipu_isys_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + long ret = -ENOIOCTLCMD; + void __user *up = compat_ptr(arg); + + /* + * at present, there is not any private IOCTL need to compat handle + */ + if (file->f_op->unlocked_ioctl) + ret = file->f_op->unlocked_ioctl(file, cmd, (unsigned long)up); + + return ret; +} +#endif + +static const struct v4l2_ioctl_ops ioctl_ops_splane = { + .vidioc_querycap = ipu_isys_vidioc_querycap, + .vidioc_enum_fmt_vid_cap = ipu_isys_vidioc_enum_fmt, + .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, + .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, + .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, + .vidioc_reqbufs = vb2_ioctl_reqbufs, + .vidioc_create_bufs = vb2_ioctl_create_bufs, + .vidioc_prepare_buf = vb2_ioctl_prepare_buf, + .vidioc_querybuf = vb2_ioctl_querybuf, + .vidioc_qbuf = vb2_ioctl_qbuf, + .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, + .vidioc_expbuf = vb2_ioctl_expbuf, + .vidioc_default = ipu_isys_vidioc_private, + .vidioc_enum_input = vidioc_enum_input, + .vidioc_g_input = vidioc_g_input, + .vidioc_s_input = vidioc_s_input, +}; + +static const struct v4l2_ioctl_ops ioctl_ops_mplane = { + .vidioc_querycap = ipu_isys_vidioc_querycap, + .vidioc_enum_fmt_vid_cap_mplane = ipu_isys_vidioc_enum_fmt, + .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt_vid_cap_mplane, + .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt_vid_cap_mplane, + .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_vid_cap_mplane, + .vidioc_reqbufs = vb2_ioctl_reqbufs, + .vidioc_create_bufs = vb2_ioctl_create_bufs, + .vidioc_prepare_buf = vb2_ioctl_prepare_buf, + .vidioc_querybuf = vb2_ioctl_querybuf, + .vidioc_qbuf = vb2_ioctl_qbuf, + .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, + .vidioc_expbuf = vb2_ioctl_expbuf, + .vidioc_default = ipu_isys_vidioc_private, + .vidioc_enum_input = vidioc_enum_input, + .vidioc_g_input = vidioc_g_input, + .vidioc_s_input = vidioc_s_input, +}; + +static const struct media_entity_operations entity_ops = { + .link_validate = link_validate, +}; + +static const struct v4l2_file_operations isys_fops = { + .owner = THIS_MODULE, + .poll = vb2_fop_poll, + .unlocked_ioctl = video_ioctl2, +#ifdef CONFIG_COMPAT + .compat_ioctl32 = ipu_isys_compat_ioctl, +#endif + .mmap = vb2_fop_mmap, + .open = video_open, + .release = video_release, +}; + +/* + * Do everything that's needed to initialise things related to video + * buffer queue, video node, and the related media entity. The caller + * is expected to assign isys field and set the name of the video + * device. + */ +int ipu_isys_video_init(struct ipu_isys_video *av, + struct media_entity *entity, + unsigned int pad, unsigned long pad_flags, + unsigned int flags) +{ + const struct v4l2_ioctl_ops *ioctl_ops = NULL; + int rval; + + mutex_init(&av->mutex); + init_completion(&av->ip.stream_open_completion); + init_completion(&av->ip.stream_close_completion); + init_completion(&av->ip.stream_start_completion); + init_completion(&av->ip.stream_stop_completion); + init_completion(&av->ip.capture_ack_completion); + INIT_LIST_HEAD(&av->ip.queues); + spin_lock_init(&av->ip.short_packet_queue_lock); + av->ip.isys = av->isys; + av->ip.stream_id = 0; + av->ip.vc = 0; + + if (pad_flags & MEDIA_PAD_FL_SINK) { + /* data_offset is available only for multi-plane buffers */ + if (av->line_header_length) { + av->aq.vbq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + ioctl_ops = &ioctl_ops_mplane; + } else { + av->aq.vbq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + ioctl_ops = &ioctl_ops_splane; + } + av->vdev.vfl_dir = VFL_DIR_RX; + } else { + av->aq.vbq.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; + av->vdev.vfl_dir = VFL_DIR_TX; + } + rval = ipu_isys_queue_init(&av->aq); + if (rval) + goto out_mutex_destroy; + + av->pad.flags = pad_flags | MEDIA_PAD_FL_MUST_CONNECT; + rval = media_entity_pads_init(&av->vdev.entity, 1, &av->pad); + if (rval) + goto out_ipu_isys_queue_cleanup; + + av->vdev.entity.ops = &entity_ops; + av->vdev.release = video_device_release_empty; + av->vdev.fops = &isys_fops; + av->vdev.v4l2_dev = &av->isys->v4l2_dev; + if (!av->vdev.ioctl_ops) + av->vdev.ioctl_ops = ioctl_ops; + av->vdev.queue = &av->aq.vbq; + av->vdev.lock = &av->mutex; + set_bit(V4L2_FL_USES_V4L2_FH, &av->vdev.flags); + video_set_drvdata(&av->vdev, av); + + mutex_lock(&av->mutex); + + rval = video_register_device(&av->vdev, VFL_TYPE_GRABBER, -1); + if (rval) + goto out_media_entity_cleanup; + + if (pad_flags & MEDIA_PAD_FL_SINK) + rval = media_create_pad_link(entity, pad, + &av->vdev.entity, 0, flags); + else + rval = media_create_pad_link(&av->vdev.entity, 0, entity, + pad, flags); + if (rval) { + dev_info(&av->isys->adev->dev, "can't create link\n"); + goto out_media_entity_cleanup; + } + + av->pfmt = av->try_fmt_vid_mplane(av, &av->mpix); + + mutex_unlock(&av->mutex); + + return rval; + +out_media_entity_cleanup: + video_unregister_device(&av->vdev); + mutex_unlock(&av->mutex); + media_entity_cleanup(&av->vdev.entity); + +out_ipu_isys_queue_cleanup: + ipu_isys_queue_cleanup(&av->aq); + +out_mutex_destroy: + mutex_destroy(&av->mutex); + + return rval; +} + +void ipu_isys_video_cleanup(struct ipu_isys_video *av) +{ + video_unregister_device(&av->vdev); + media_entity_cleanup(&av->vdev.entity); + mutex_destroy(&av->mutex); + ipu_isys_queue_cleanup(&av->aq); +} diff --git a/drivers/media/pci/intel/ipu-isys-video.h b/drivers/media/pci/intel/ipu-isys-video.h new file mode 100644 index 0000000000000..c1375f70a897e --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys-video.h @@ -0,0 +1,169 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_ISYS_VIDEO_H +#define IPU_ISYS_VIDEO_H + +#include +#include +#include +#include +#include +#include + +#include "ipu-isys-queue.h" + +#define IPU_ISYS_OUTPUT_PINS 11 +#define IPU_NUM_CAPTURE_DONE 2 +#define IPU_ISYS_MAX_PARALLEL_SOF 2 + +struct ipu_isys; +struct ipu_isys_csi2_be_soc; +struct ipu_fw_isys_stream_cfg_data_abi; + +struct ipu_isys_pixelformat { + u32 pixelformat; + u32 bpp; + u32 bpp_packed; + u32 bpp_planar; + u32 code; + u32 css_pixelformat; +}; + +struct sequence_info { + unsigned int sequence; + u64 timestamp; +}; + +struct output_pin_data { + void (*pin_ready)(struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_resp_info_abi *info); + struct ipu_isys_queue *aq; +}; + +struct ipu_isys_pipeline { + struct media_pipeline pipe; + struct media_pad *external; + atomic_t sequence; + unsigned int seq_index; + struct sequence_info seq[IPU_ISYS_MAX_PARALLEL_SOF]; + int source; /* SSI stream source */ + int stream_handle; /* stream handle for CSS API */ + unsigned int nr_output_pins; /* How many firmware pins? */ + enum ipu_isl_mode isl_mode; + struct ipu_isys_csi2_be *csi2_be; + struct ipu_isys_csi2_be_soc *csi2_be_soc; + struct ipu_isys_csi2 *csi2; + struct ipu_isys_tpg *tpg; + /* + * Number of capture queues, write access serialised using struct + * ipu_isys.stream_mutex + */ + int nr_queues; + int nr_streaming; /* Number of capture queues streaming */ + int streaming; /* Has streaming been really started? */ + struct list_head queues; + struct completion stream_open_completion; + struct completion stream_close_completion; + struct completion stream_start_completion; + struct completion stream_stop_completion; + struct completion capture_ack_completion; + struct ipu_isys *isys; + + void (*capture_done[IPU_NUM_CAPTURE_DONE]) + (struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_resp_info_abi *resp); + struct output_pin_data output_pins[IPU_ISYS_OUTPUT_PINS]; + bool has_sof; + bool interlaced; + int error; + struct ipu_isys_private_buffer *short_packet_bufs; + size_t short_packet_buffer_size; + unsigned int num_short_packet_lines; + unsigned int short_packet_output_pin; + unsigned int cur_field; + struct list_head short_packet_incoming; + struct list_head short_packet_active; + /* Serialize access to short packet active and incoming lists */ + spinlock_t short_packet_queue_lock; + struct list_head pending_interlaced_bufs; + unsigned int short_packet_trace_index; + unsigned int vc; + unsigned int stream_id; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) + struct media_graph graph; +#else + struct media_entity_graph graph; +#endif +#endif + struct media_entity_enum entity_enum; +}; + +#define to_ipu_isys_pipeline(__pipe) \ + container_of((__pipe), struct ipu_isys_pipeline, pipe) + +struct ipu_isys_video { + /* Serialise access to other fields in the struct. */ + struct mutex mutex; + struct media_pad pad; + struct video_device vdev; + struct v4l2_pix_format_mplane mpix; + const struct ipu_isys_pixelformat *pfmts; + const struct ipu_isys_pixelformat *pfmt; + struct ipu_isys_queue aq; + struct ipu_isys *isys; + struct ipu_isys_pipeline ip; + unsigned int streaming; + bool packed; + unsigned int line_header_length; /* bits */ + unsigned int line_footer_length; /* bits */ + const struct ipu_isys_pixelformat *(*try_fmt_vid_mplane)( + struct ipu_isys_video *av, + struct v4l2_pix_format_mplane *mpix); + void (*prepare_firmware_stream_cfg)(struct ipu_isys_video *av, + struct ipu_fw_isys_stream_cfg_data_abi *cfg); +}; + +#define ipu_isys_queue_to_video(__aq) \ + container_of(__aq, struct ipu_isys_video, aq) + +extern const struct ipu_isys_pixelformat ipu_isys_pfmts[]; +extern const struct ipu_isys_pixelformat ipu_isys_pfmts_be_soc[]; +extern const struct ipu_isys_pixelformat ipu_isys_pfmts_packed[]; + +const struct ipu_isys_pixelformat * +ipu_isys_get_pixelformat(struct ipu_isys_video *av, u32 pixelformat); + +int ipu_isys_vidioc_querycap(struct file *file, void *fh, + struct v4l2_capability *cap); + +int ipu_isys_vidioc_enum_fmt(struct file *file, void *fh, + struct v4l2_fmtdesc *f); + +const struct ipu_isys_pixelformat * +ipu_isys_video_try_fmt_vid_mplane_default(struct ipu_isys_video *av, + struct v4l2_pix_format_mplane *mpix); + +const struct ipu_isys_pixelformat * +ipu_isys_video_try_fmt_vid_mplane(struct ipu_isys_video *av, + struct v4l2_pix_format_mplane *mpix, + int store_csi2_header); + +void ipu_isys_prepare_firmware_stream_cfg_default( + struct ipu_isys_video *av, + struct ipu_fw_isys_stream_cfg_data_abi *cfg); +int ipu_isys_video_prepare_streaming(struct ipu_isys_video *av, + unsigned int state); +int ipu_isys_video_set_streaming(struct ipu_isys_video *av, unsigned int state, + struct ipu_isys_buffer_list *bl); +int ipu_isys_video_init(struct ipu_isys_video *av, struct media_entity *source, + unsigned int source_pad, unsigned long pad_flags, + unsigned int flags); +void ipu_isys_video_cleanup(struct ipu_isys_video *av); +void ipu_isys_video_add_capture_done(struct ipu_isys_pipeline *ip, + void (*capture_done) + (struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_resp_info_abi *resp)); + +#endif /* IPU_ISYS_VIDEO_H */ diff --git a/drivers/media/pci/intel/ipu-isys.c b/drivers/media/pci/intel/ipu-isys.c new file mode 100644 index 0000000000000..aa8dec0108729 --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys.c @@ -0,0 +1,1466 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0) +#include +#endif +#include + +#include "ipu.h" +#include "ipu-bus.h" +#include "ipu-cpd.h" +#include "ipu-mmu.h" +#include "ipu-dma.h" +#include "ipu-isys.h" +#include "ipu-isys-csi2.h" +#include "ipu-isys-tpg.h" +#include "ipu-isys-video.h" +#include "ipu-platform-regs.h" +#include "ipu-buttress.h" +#include "ipu-platform.h" +#include "ipu-platform-buttress-regs.h" + +#define ISYS_PM_QOS_VALUE 300 + +/* + * The param was passed from module to indicate if port + * could be optimized. + */ +static bool csi2_port_optimized = true; +module_param(csi2_port_optimized, bool, 0660); +MODULE_PARM_DESC(csi2_port_optimized, "IPU CSI2 port optimization"); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) +/* + * BEGIN adapted code from drivers/media/platform/omap3isp/isp.c. + * FIXME: This (in terms of functionality if not code) should be most + * likely generalised in the framework, and use made optional for + * drivers. + */ +/* + * ipu_pipeline_pm_use_count - Count the number of users of a pipeline + * @entity: The entity + * + * Return the total number of users of all video device nodes in the pipeline. + */ +static int ipu_pipeline_pm_use_count(struct media_pad *pad) +{ + struct media_entity_graph graph; + struct media_entity *entity = pad->entity; + int use = 0; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + media_graph_walk_init(&graph, entity->graph_obj.mdev); +#endif + media_graph_walk_start(&graph, pad); + + while ((entity = media_graph_walk_next(&graph))) { + if (is_media_entity_v4l2_io(entity)) + use += entity->use_count; + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + media_graph_walk_cleanup(&graph); +#endif + return use; +} + +/* + * ipu_pipeline_pm_power_one - Apply power change to an entity + * @entity: The entity + * @change: Use count change + * + * Change the entity use count by @change. If the entity is a subdev update its + * power state by calling the core::s_power operation when the use count goes + * from 0 to != 0 or from != 0 to 0. + * + * Return 0 on success or a negative error code on failure. + */ +static int ipu_pipeline_pm_power_one(struct media_entity *entity, int change) +{ + struct v4l2_subdev *subdev; + int ret; + + subdev = is_media_entity_v4l2_subdev(entity) + ? media_entity_to_v4l2_subdev(entity) : NULL; + + if (entity->use_count == 0 && change > 0 && subdev) { + ret = v4l2_subdev_call(subdev, core, s_power, 1); + if (ret < 0 && ret != -ENOIOCTLCMD) + return ret; + } + + entity->use_count += change; + WARN_ON(entity->use_count < 0); + + if (entity->use_count == 0 && change < 0 && subdev) + v4l2_subdev_call(subdev, core, s_power, 0); + + return 0; +} + +/* + * ipu_get_linked_pad - Find internally connected pad for a given pad + * @entity: The entity + * @pad: Initial pad + * + * Return index of the linked pad. + */ +static int ipu_get_linked_pad(struct media_entity *entity, + struct media_pad *pad) +{ + int i; + + for (i = 0; i < entity->num_pads; i++) { + struct media_pad *opposite_pad = &entity->pads[i]; + + if (opposite_pad == pad) + continue; + + if (media_entity_has_route(entity, pad->index, + opposite_pad->index)) + return opposite_pad->index; + } + + return 0; +} + +/* + * ipu_pipeline_pm_power - Apply power change to all entities + * in a pipeline + * @entity: The entity + * @change: Use count change + * @from_pad: Starting pad + * + * Walk the pipeline to update the use count and the power state of + * all non-node + * entities. + * + * Return 0 on success or a negative error code on failure. + */ +static int ipu_pipeline_pm_power(struct media_entity *entity, + int change, int from_pad) +{ + struct media_entity_graph graph; + struct media_entity *first = entity; + int ret = 0; + + if (!change) + return 0; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + media_graph_walk_init(&graph, entity->graph_obj.mdev); +#endif + media_graph_walk_start(&graph, &entity->pads[from_pad]); + + while (!ret && (entity = media_graph_walk_next(&graph))) + if (!is_media_entity_v4l2_io(entity)) + ret = ipu_pipeline_pm_power_one(entity, change); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + media_graph_walk_cleanup(&graph); +#endif + if (!ret) + return 0; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + media_graph_walk_init(&graph, entity->graph_obj.mdev); +#endif + media_graph_walk_start(&graph, &first->pads[from_pad]); + + while ((first = media_graph_walk_next(&graph)) && + first != entity) + if (!is_media_entity_v4l2_io(first)) + ipu_pipeline_pm_power_one(first, -change); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + media_graph_walk_cleanup(&graph); +#endif + return ret; +} + +/* + * ipu_pipeline_pm_use - Update the use count of an entity + * @entity: The entity + * @use: Use (1) or stop using (0) the entity + * + * Update the use count of all entities in the pipeline and power entities + * on or off accordingly. + * + * Return 0 on success or a negative error code on failure. Powering entities + * off is assumed to never fail. No failure can occur when the use parameter is + * set to 0. + */ +int ipu_pipeline_pm_use(struct media_entity *entity, int use) +{ + int change = use ? 1 : -1; + int ret; + + mutex_lock(&entity-> +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + parent +#else + graph_obj.mdev +#endif + ->graph_mutex); + + /* Apply use count to node. */ + entity->use_count += change; + WARN_ON(entity->use_count < 0); + + /* Apply power change to connected non-nodes. */ + ret = ipu_pipeline_pm_power(entity, change, 0); + if (ret < 0) + entity->use_count -= change; + + mutex_unlock(&entity-> +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + parent +#else + graph_obj.mdev +#endif + ->graph_mutex); + + return ret; +} + +/* + * ipu_pipeline_link_notify - Link management notification callback + * @link: The link + * @flags: New link flags that will be applied + * @notification: The link's state change notification type + * (MEDIA_DEV_NOTIFY_*) + * + * React to link management on powered pipelines by updating the use count of + * all entities in the source and sink sides of the link. Entities are powered + * on or off accordingly. + * + * Return 0 on success or a negative error code on failure. Powering entities + * off is assumed to never fail. This function will not fail for disconnection + * events. + */ +static int ipu_pipeline_link_notify(struct media_link *link, u32 flags, + unsigned int notification) +{ + struct media_entity *source = link->source->entity; + struct media_entity *sink = link->sink->entity; + int source_use = ipu_pipeline_pm_use_count(link->source); + int sink_use = ipu_pipeline_pm_use_count(link->sink); + int ret; + + if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH && + !(flags & MEDIA_LNK_FL_ENABLED)) { + /* Powering off entities is assumed to never fail. */ + ipu_pipeline_pm_power(source, -sink_use, 0); + ipu_pipeline_pm_power(sink, -source_use, 0); + return 0; + } + + if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH && + (flags & MEDIA_LNK_FL_ENABLED)) { + int from_pad = ipu_get_linked_pad(source, link->source); + + ret = ipu_pipeline_pm_power(source, sink_use, from_pad); + if (ret < 0) + return ret; + + ret = ipu_pipeline_pm_power(sink, source_use, 0); + if (ret < 0) + ipu_pipeline_pm_power(source, -sink_use, 0); + + return ret; + } + + return 0; +} + +/* END adapted code from drivers/media/platform/omap3isp/isp.c */ +#endif /* < v4.6 */ + +struct isys_i2c_test { + u8 bus_nr; + u16 addr; + struct i2c_client *client; +}; + +static int isys_i2c_test(struct device *dev, void *priv) +{ + struct i2c_client *client = i2c_verify_client(dev); + struct isys_i2c_test *test = priv; + + if (!client) + return 0; + + if (i2c_adapter_id(client->adapter) != test->bus_nr || + client->addr != test->addr) + return 0; + + test->client = client; + + return 0; +} + +static struct +i2c_client *isys_find_i2c_subdev(struct i2c_adapter *adapter, + struct ipu_isys_subdev_info *sd_info) +{ + struct i2c_board_info *info = &sd_info->i2c.board_info; + struct isys_i2c_test test = { + .bus_nr = i2c_adapter_id(adapter), + .addr = info->addr, + }; + int rval; + + rval = i2c_for_each_dev(&test, isys_i2c_test); + if (rval || !test.client) + return NULL; + return test.client; +} + +static int +isys_complete_ext_device_registration(struct ipu_isys *isys, + struct v4l2_subdev *sd, + struct ipu_isys_csi2_config *csi2) +{ + unsigned int i; + int rval; + + v4l2_set_subdev_hostdata(sd, csi2); + + for (i = 0; i < sd->entity.num_pads; i++) { + if (sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE) + break; + } + + if (i == sd->entity.num_pads) { + dev_warn(&isys->adev->dev, + "no source pad in external entity\n"); + rval = -ENOENT; + goto skip_unregister_subdev; + } + + rval = media_create_pad_link(&sd->entity, i, + &isys->csi2[csi2->port].asd.sd.entity, + 0, 0); + if (rval) { + dev_warn(&isys->adev->dev, "can't create link\n"); + goto skip_unregister_subdev; + } + + isys->csi2[csi2->port].nlanes = csi2->nlanes; + return 0; + +skip_unregister_subdev: + v4l2_device_unregister_subdev(sd); + return rval; +} + +static int isys_register_ext_subdev(struct ipu_isys *isys, + struct ipu_isys_subdev_info *sd_info) +{ + struct i2c_adapter *adapter; + struct v4l2_subdev *sd; + struct i2c_client *client; + int rval; + int bus; + +#ifdef I2C_WA + bus = ipu_get_i2c_bus_id(sd_info->i2c.i2c_adapter_id); + if (bus < 0) { + dev_err(&isys->adev->dev, "Failed to find adapter!"); + return -ENOENT; + } +#else + bus = sd_info->i2c.i2c_adapter_id; +#endif + adapter = i2c_get_adapter(bus); + if (!adapter) { + dev_warn(&isys->adev->dev, "can't find adapter\n"); + return -ENOENT; + } + + dev_info(&isys->adev->dev, + "creating new i2c subdev for %s (address %2.2x, bus %d)", + sd_info->i2c.board_info.type, sd_info->i2c.board_info.addr, + bus); + + if (sd_info->csi2) { + dev_info(&isys->adev->dev, "sensor device on CSI port: %d\n", + sd_info->csi2->port); + if (sd_info->csi2->port >= isys->pdata->ipdata->csi2.nports || + !isys->csi2[sd_info->csi2->port].isys) { + dev_warn(&isys->adev->dev, "invalid csi2 port %u\n", + sd_info->csi2->port); + rval = -EINVAL; + goto skip_put_adapter; + } + } else { + dev_info(&isys->adev->dev, "non camera subdevice\n"); + } + + client = isys_find_i2c_subdev(adapter, sd_info); + if (client) { + dev_dbg(&isys->adev->dev, "Device exists\n"); + rval = 0; + goto skip_put_adapter; + } + + sd = v4l2_i2c_new_subdev_board(&isys->v4l2_dev, adapter, + &sd_info->i2c.board_info, NULL); + if (!sd) { + dev_warn(&isys->adev->dev, "can't create new i2c subdev\n"); + rval = -EINVAL; + goto skip_put_adapter; + } + + if (!sd_info->csi2) + return 0; + + return isys_complete_ext_device_registration(isys, sd, sd_info->csi2); + +skip_put_adapter: + i2c_put_adapter(adapter); + + return rval; +} + +static void isys_register_ext_subdevs(struct ipu_isys *isys) +{ + struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; + struct ipu_isys_subdev_info **sd_info; + + if (!spdata) { + dev_info(&isys->adev->dev, "no subdevice info provided\n"); + return; + } + for (sd_info = spdata->subdevs; *sd_info; sd_info++) + isys_register_ext_subdev(isys, *sd_info); +} + +static void isys_unregister_subdevices(struct ipu_isys *isys) +{ + const struct ipu_isys_internal_tpg_pdata *tpg = + &isys->pdata->ipdata->tpg; + const struct ipu_isys_internal_csi2_pdata *csi2 = + &isys->pdata->ipdata->csi2; + unsigned int i; + + ipu_isys_csi2_be_cleanup(&isys->csi2_be); + ipu_isys_csi2_be_soc_cleanup(&isys->csi2_be_soc); + + ipu_isys_isa_cleanup(&isys->isa); + + for (i = 0; i < tpg->ntpgs; i++) + ipu_isys_tpg_cleanup(&isys->tpg[i]); + + for (i = 0; i < csi2->nports; i++) + ipu_isys_csi2_cleanup(&isys->csi2[i]); +} + +static int isys_register_subdevices(struct ipu_isys *isys) +{ + const struct ipu_isys_internal_tpg_pdata *tpg = + &isys->pdata->ipdata->tpg; + const struct ipu_isys_internal_csi2_pdata *csi2 = + &isys->pdata->ipdata->csi2; + struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; + struct ipu_isys_subdev_info **sd_info; + DECLARE_BITMAP(csi2_enable, 32); + unsigned int i, j, k; + int rval; + + /* + * Here is somewhat a workaround, let each platform decide + * if csi2 port can be optimized, which means only registered + * port from pdata would be enabled. + */ + if (csi2_port_optimized && spdata) { + bitmap_zero(csi2_enable, 32); + for (sd_info = spdata->subdevs; *sd_info; sd_info++) { + if ((*sd_info)->csi2) { + i = (*sd_info)->csi2->port; + if (i >= csi2->nports) { + dev_warn(&isys->adev->dev, + "invalid csi2 port %u\n", i); + continue; + } + bitmap_set(csi2_enable, i, 1); + } + } + } else { + bitmap_fill(csi2_enable, 32); + } + + isys->csi2 = devm_kcalloc(&isys->adev->dev, csi2->nports, + sizeof(*isys->csi2), GFP_KERNEL); + if (!isys->csi2) { + rval = -ENOMEM; + goto fail; + } + + for (i = 0; i < csi2->nports; i++) { + if (!test_bit(i, csi2_enable)) + continue; + + rval = ipu_isys_csi2_init(&isys->csi2[i], isys, + isys->pdata->base + + csi2->offsets[i], i); + if (rval) + goto fail; + + isys->isr_csi2_bits |= IPU_ISYS_UNISPART_IRQ_CSI2(i); + } + + isys->tpg = devm_kcalloc(&isys->adev->dev, tpg->ntpgs, + sizeof(*isys->tpg), GFP_KERNEL); + if (!isys->tpg) { + rval = -ENOMEM; + goto fail; + } + + for (i = 0; i < tpg->ntpgs; i++) { + rval = ipu_isys_tpg_init(&isys->tpg[i], isys, + isys->pdata->base + + tpg->offsets[i], + tpg->sels ? (isys->pdata->base + + tpg->sels[i]) : NULL, i); + if (rval) + goto fail; + } + + rval = ipu_isys_csi2_be_soc_init(&isys->csi2_be_soc, isys); + if (rval) { + dev_info(&isys->adev->dev, + "can't register soc csi2 be device\n"); + goto fail; + } + + rval = ipu_isys_csi2_be_init(&isys->csi2_be, isys); + if (rval) { + dev_info(&isys->adev->dev, + "can't register raw csi2 be device\n"); + goto fail; + } + rval = ipu_isys_isa_init(&isys->isa, isys, NULL); + if (rval) { + dev_info(&isys->adev->dev, "can't register isa device\n"); + goto fail; + } + + for (i = 0; i < csi2->nports; i++) { + if (!test_bit(i, csi2_enable)) + continue; + + for (j = CSI2_PAD_SOURCE(0); + j < (NR_OF_CSI2_SOURCE_PADS + CSI2_PAD_SOURCE(0)); j++) { + rval = + media_create_pad_link(&isys->csi2[i].asd.sd.entity, + j, + &isys->csi2_be.asd.sd.entity, + CSI2_BE_PAD_SINK, 0); + if (rval) { + dev_info(&isys->adev->dev, + "can't create link csi2 <=> csi2_be\n"); + goto fail; + } + + for (k = CSI2_BE_SOC_PAD_SINK(0); + k < NR_OF_CSI2_BE_SOC_SINK_PADS; k++) { + rval = + media_create_pad_link(&isys->csi2[i].asd.sd. + entity, j, + &isys->csi2_be_soc. + asd.sd.entity, k, + MEDIA_LNK_FL_DYNAMIC); + if (rval) { + dev_info(&isys->adev->dev, + "can't create link csi2->be_soc\n"); + goto fail; + } + } + } + } + + for (i = 0; i < tpg->ntpgs; i++) { + rval = media_create_pad_link(&isys->tpg[i].asd.sd.entity, + TPG_PAD_SOURCE, + &isys->csi2_be.asd.sd.entity, + CSI2_BE_PAD_SINK, 0); + if (rval) { + dev_info(&isys->adev->dev, + "can't create link between tpg and csi2_be\n"); + goto fail; + } + + for (k = CSI2_BE_SOC_PAD_SINK(0); + k < NR_OF_CSI2_BE_SOC_SINK_PADS; k++) { + rval = + media_create_pad_link(&isys->tpg[i].asd.sd.entity, + TPG_PAD_SOURCE, + &isys->csi2_be_soc.asd.sd. + entity, k, + MEDIA_LNK_FL_DYNAMIC); + if (rval) { + dev_info(&isys->adev->dev, + "can't create link tpg->be_soc\n"); + goto fail; + } + } + } + + rval = media_create_pad_link(&isys->csi2_be.asd.sd.entity, + CSI2_BE_PAD_SOURCE, + &isys->isa.asd.sd.entity, ISA_PAD_SINK, 0); + if (rval) { + dev_info(&isys->adev->dev, + "can't create link between CSI2 raw be and ISA\n"); + goto fail; + } + return 0; + +fail: + isys_unregister_subdevices(isys); + return rval; +} + +static struct media_device_ops isys_mdev_ops = { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) + .link_notify = ipu_pipeline_link_notify, +#else + .link_notify = v4l2_pipeline_link_notify, +#endif + .req_alloc = ipu_isys_req_alloc, + .req_free = ipu_isys_req_free, + .req_queue = ipu_isys_req_queue, +}; + +static int isys_register_devices(struct ipu_isys *isys) +{ + int rval; + + isys->media_dev.dev = &isys->adev->dev; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 12) + isys->media_dev.ops = &isys_mdev_ops; +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) + isys->media_dev.link_notify = ipu_pipeline_link_notify; +#else + isys->media_dev.link_notify = v4l2_pipeline_link_notify; +#endif + strlcpy(isys->media_dev.model, + IPU_MEDIA_DEV_MODEL_NAME, sizeof(isys->media_dev.model)); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) + isys->media_dev.driver_version = LINUX_VERSION_CODE; +#endif + snprintf(isys->media_dev.bus_info, sizeof(isys->media_dev.bus_info), + "pci:%s", dev_name(isys->adev->dev.parent->parent)); + strlcpy(isys->v4l2_dev.name, isys->media_dev.model, + sizeof(isys->v4l2_dev.name)); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + media_device_init(&isys->media_dev); +#endif + + rval = media_device_register(&isys->media_dev); + if (rval < 0) { + dev_info(&isys->adev->dev, "can't register media device\n"); + goto out_media_device_unregister; + } + + isys->v4l2_dev.mdev = &isys->media_dev; + + rval = v4l2_device_register(&isys->adev->dev, &isys->v4l2_dev); + if (rval < 0) { + dev_info(&isys->adev->dev, "can't register v4l2 device\n"); + goto out_media_device_unregister; + } + + rval = isys_register_subdevices(isys); + if (rval) + goto out_v4l2_device_unregister; + + isys_register_ext_subdevs(isys); + + rval = v4l2_device_register_subdev_nodes(&isys->v4l2_dev); + if (rval) + goto out_isys_unregister_subdevices; + + return 0; + +out_isys_unregister_subdevices: + isys_unregister_subdevices(isys); + +out_v4l2_device_unregister: + v4l2_device_unregister(&isys->v4l2_dev); + +out_media_device_unregister: + media_device_unregister(&isys->media_dev); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + media_device_cleanup(&isys->media_dev); +#endif + + return rval; +} + +static void isys_unregister_devices(struct ipu_isys *isys) +{ + isys_unregister_subdevices(isys); + v4l2_device_unregister(&isys->v4l2_dev); + media_device_unregister(&isys->media_dev); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + media_device_cleanup(&isys->media_dev); +#endif +} + +#ifdef CONFIG_PM +static int isys_runtime_pm_resume(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ipu_device *isp = adev->isp; + struct ipu_isys *isys = ipu_bus_get_drvdata(adev); + unsigned long flags; + int ret; + + if (!isys) { + WARN(1, "%s called before probing. skipping.\n", __func__); + return 0; + } + + ipu_trace_restore(dev); + + pm_qos_update_request(&isys->pm_qos, ISYS_PM_QOS_VALUE); + + ret = ipu_buttress_start_tsc_sync(isp); + if (ret) + return ret; + + spin_lock_irqsave(&isys->power_lock, flags); + isys->power = 1; + spin_unlock_irqrestore(&isys->power_lock, flags); + + if (isys->short_packet_source == IPU_ISYS_SHORT_PACKET_FROM_TUNIT) { + mutex_lock(&isys->short_packet_tracing_mutex); + isys->short_packet_tracing_count = 0; + mutex_unlock(&isys->short_packet_tracing_mutex); + } + isys_setup_hw(isys); + + return 0; +} + +static int isys_runtime_pm_suspend(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ipu_isys *isys = ipu_bus_get_drvdata(adev); + unsigned long flags; + + if (!isys) { + WARN(1, "%s called before probing. skipping.\n", __func__); + return 0; + } + + spin_lock_irqsave(&isys->power_lock, flags); + isys->power = 0; + spin_unlock_irqrestore(&isys->power_lock, flags); + + ipu_trace_stop(dev); + mutex_lock(&isys->mutex); + isys->reset_needed = false; + mutex_unlock(&isys->mutex); + + pm_qos_update_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE); + + return 0; +} + +static int isys_suspend(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ipu_isys *isys = ipu_bus_get_drvdata(adev); + + /* If stream is open, refuse to suspend */ + if (isys->stream_opened) + return -EBUSY; + + return 0; +} + +static int isys_resume(struct device *dev) +{ + return 0; +} + +static const struct dev_pm_ops isys_pm_ops = { + .runtime_suspend = isys_runtime_pm_suspend, + .runtime_resume = isys_runtime_pm_resume, + .suspend = isys_suspend, + .resume = isys_resume, +}; + +#define ISYS_PM_OPS (&isys_pm_ops) +#else +#define ISYS_PM_OPS NULL +#endif + +static void isys_remove(struct ipu_bus_device *adev) +{ + struct ipu_isys *isys = ipu_bus_get_drvdata(adev); + struct ipu_device *isp = adev->isp; + struct isys_fw_msgs *fwmsg, *safe; + + dev_info(&adev->dev, "removed\n"); + if (isp->ipu_dir) + debugfs_remove_recursive(isys->debugfsdir); + + list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist, head) { + dma_free_attrs(&adev->dev, sizeof(struct isys_fw_msgs), + fwmsg, fwmsg->dma_addr, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + NULL +#else + 0 +#endif + ); + } + + list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist_fw, head) { + dma_free_attrs(&adev->dev, sizeof(struct isys_fw_msgs), + fwmsg, fwmsg->dma_addr, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + NULL +#else + 0 +#endif + ); + } + + ipu_trace_uninit(&adev->dev); + isys_unregister_devices(isys); + pm_qos_remove_request(&isys->pm_qos); + + if (!isp->secure_mode) { + ipu_cpd_free_pkg_dir(adev, isys->pkg_dir, + isys->pkg_dir_dma_addr, + isys->pkg_dir_size); + ipu_buttress_unmap_fw_image(adev, &isys->fw_sgt); + release_firmware(isys->fw); + } + + mutex_destroy(&isys->stream_mutex); + mutex_destroy(&isys->mutex); + + if (isys->short_packet_source == IPU_ISYS_SHORT_PACKET_FROM_TUNIT) { + u32 trace_size = IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs attrs; + + init_dma_attrs(&attrs); + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); + dma_free_attrs(&adev->dev, trace_size, + isys->short_packet_trace_buffer, + isys->short_packet_trace_buffer_dma_addr, + &attrs); +#else + unsigned long attrs; + + attrs = DMA_ATTR_NON_CONSISTENT; + dma_free_attrs(&adev->dev, trace_size, + isys->short_packet_trace_buffer, + isys->short_packet_trace_buffer_dma_addr, attrs); +#endif + } +} + +static int ipu_isys_icache_prefetch_get(void *data, u64 *val) +{ + struct ipu_isys *isys = data; + + *val = isys->icache_prefetch; + return 0; +} + +static int ipu_isys_icache_prefetch_set(void *data, u64 val) +{ + struct ipu_isys *isys = data; + + if (val != !!val) + return -EINVAL; + + isys->icache_prefetch = val; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(isys_icache_prefetch_fops, + ipu_isys_icache_prefetch_get, + ipu_isys_icache_prefetch_set, "%llu\n"); + +static int ipu_isys_init_debugfs(struct ipu_isys *isys) +{ + struct dentry *file; + struct dentry *dir; + + dir = debugfs_create_dir("isys", isys->adev->isp->ipu_dir); + if (IS_ERR(dir)) + return -ENOMEM; + + file = debugfs_create_file("icache_prefetch", 0600, + dir, isys, &isys_icache_prefetch_fops); + if (IS_ERR(file)) + goto err; + + isys->debugfsdir = dir; + + + return 0; +err: + debugfs_remove_recursive(dir); + return -ENOMEM; +} + +static int alloc_fw_msg_buffers(struct ipu_isys *isys, int amount) +{ + dma_addr_t dma_addr; + struct isys_fw_msgs *addr; + unsigned int i; + unsigned long flags; + + for (i = 0; i < amount; i++) { + addr = dma_alloc_attrs(&isys->adev->dev, + sizeof(struct isys_fw_msgs), + &dma_addr, GFP_KERNEL, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + NULL +#else + 0 +#endif + ); + if (!addr) + break; + addr->dma_addr = dma_addr; + + spin_lock_irqsave(&isys->listlock, flags); + list_add(&addr->head, &isys->framebuflist); + spin_unlock_irqrestore(&isys->listlock, flags); + } + if (i == amount) + return 0; + spin_lock_irqsave(&isys->listlock, flags); + while (!list_empty(&isys->framebuflist)) { + addr = list_first_entry(&isys->framebuflist, + struct isys_fw_msgs, head); + list_del(&addr->head); + spin_unlock_irqrestore(&isys->listlock, flags); + dma_free_attrs(&isys->adev->dev, + sizeof(struct isys_fw_msgs), + addr, addr->dma_addr, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + NULL +#else + 0 +#endif + ); + spin_lock_irqsave(&isys->listlock, flags); + } + spin_unlock_irqrestore(&isys->listlock, flags); + return -ENOMEM; +} + +struct isys_fw_msgs *ipu_get_fw_msg_buf(struct ipu_isys_pipeline *ip) +{ + struct ipu_isys_video *pipe_av = + container_of(ip, struct ipu_isys_video, ip); + struct ipu_isys *isys; + struct isys_fw_msgs *msg; + unsigned long flags; + + isys = pipe_av->isys; + + spin_lock_irqsave(&isys->listlock, flags); + if (list_empty(&isys->framebuflist)) { + spin_unlock_irqrestore(&isys->listlock, flags); + dev_dbg(&isys->adev->dev, "Frame list empty - Allocate more"); + + alloc_fw_msg_buffers(isys, 5); + + spin_lock_irqsave(&isys->listlock, flags); + if (list_empty(&isys->framebuflist)) { + dev_err(&isys->adev->dev, "Frame list empty"); + spin_unlock_irqrestore(&isys->listlock, flags); + return NULL; + } + } + msg = list_last_entry(&isys->framebuflist, struct isys_fw_msgs, head); + list_move(&msg->head, &isys->framebuflist_fw); + spin_unlock_irqrestore(&isys->listlock, flags); + memset(&msg->fw_msg, 0, sizeof(msg->fw_msg)); + + return msg; +} + +void ipu_cleanup_fw_msg_bufs(struct ipu_isys *isys) +{ + struct isys_fw_msgs *fwmsg, *fwmsg0; + unsigned long flags; + + spin_lock_irqsave(&isys->listlock, flags); + list_for_each_entry_safe(fwmsg, fwmsg0, &isys->framebuflist_fw, head) + list_move(&fwmsg->head, &isys->framebuflist); + spin_unlock_irqrestore(&isys->listlock, flags); +} + +void ipu_put_fw_mgs_buffer(struct ipu_isys *isys, u64 data) +{ + struct isys_fw_msgs *msg; + u64 *ptr = (u64 *)(unsigned long)data; + + if (!ptr) + return; + + spin_lock(&isys->listlock); + msg = container_of(ptr, struct isys_fw_msgs, fw_msg.dummy); + list_move(&msg->head, &isys->framebuflist); + spin_unlock(&isys->listlock); +} +EXPORT_SYMBOL_GPL(ipu_put_fw_mgs_buffer); + +static int isys_probe(struct ipu_bus_device *adev) +{ + struct ipu_mmu *mmu = dev_get_drvdata(adev->iommu); + struct ipu_isys *isys; + struct ipu_device *isp = adev->isp; +#if defined(CONFIG_VIDEO_INTEL_IPU4) || defined(CONFIG_VIDEO_INTEL_IPU4P) + const u32 trace_size = IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE; + dma_addr_t *trace_dma_addr; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs attrs; +#else + unsigned long attrs; +#endif +#endif + const struct firmware *uninitialized_var(fw); + int rval = 0; + + trace_printk("B|%d|TMWK\n", current->pid); + + /* Has the domain been attached? */ + if (!mmu || !isp->pkg_dir_dma_addr) { + trace_printk("E|TMWK\n"); + return -EPROBE_DEFER; + } + + isys = devm_kzalloc(&adev->dev, sizeof(*isys), GFP_KERNEL); + if (!isys) + return -ENOMEM; + + /* By default, short packet is captured from T-Unit. */ +#if defined(CONFIG_VIDEO_INTEL_IPU4) || defined(CONFIG_VIDEO_INTEL_IPU4P) + isys->short_packet_source = IPU_ISYS_SHORT_PACKET_FROM_TUNIT; + trace_dma_addr = &isys->short_packet_trace_buffer_dma_addr; + mutex_init(&isys->short_packet_tracing_mutex); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + init_dma_attrs(&attrs); + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); + isys->short_packet_trace_buffer = + dma_alloc_attrs(&adev->dev, trace_size, trace_dma_addr, + GFP_KERNEL, &attrs); +#else + attrs = DMA_ATTR_NON_CONSISTENT; + isys->short_packet_trace_buffer = + dma_alloc_attrs(&adev->dev, trace_size, trace_dma_addr, + GFP_KERNEL, attrs); +#endif + if (!isys->short_packet_trace_buffer) + return -ENOMEM; +#else + isys->short_packet_source = IPU_ISYS_SHORT_PACKET_FROM_RECEIVER; +#endif + isys->adev = adev; + isys->pdata = adev->pdata; + + INIT_LIST_HEAD(&isys->requests); + + spin_lock_init(&isys->lock); + spin_lock_init(&isys->power_lock); + isys->power = 0; + + mutex_init(&isys->mutex); + mutex_init(&isys->stream_mutex); + mutex_init(&isys->lib_mutex); + + spin_lock_init(&isys->listlock); + INIT_LIST_HEAD(&isys->framebuflist); + INIT_LIST_HEAD(&isys->framebuflist_fw); + + dev_info(&adev->dev, "isys probe %p %p\n", adev, &adev->dev); + ipu_bus_set_drvdata(adev, isys); + + isys->line_align = IPU_ISYS_2600_MEM_LINE_ALIGN; +#ifdef CONFIG_VIDEO_INTEL_IPU4 + isys->icache_prefetch = is_ipu_hw_bxtp_e0(isp); +#else + isys->icache_prefetch = 0; +#endif + +#ifndef CONFIG_PM + isys_setup_hw(isys); +#endif + + if (!isp->secure_mode) { + fw = isp->cpd_fw; + rval = ipu_buttress_map_fw_image(adev, fw, &isys->fw_sgt); + if (rval) + goto release_firmware; + + isys->pkg_dir = ipu_cpd_create_pkg_dir(adev, isp->cpd_fw->data, + sg_dma_address(isys-> + fw_sgt. + sgl), + &isys->pkg_dir_dma_addr, + &isys->pkg_dir_size); + if (!isys->pkg_dir) { + rval = -ENOMEM; + goto remove_shared_buffer; + } + } + + /* Debug fs failure is not fatal. */ + ipu_isys_init_debugfs(isys); + + ipu_trace_init(adev->isp, isys->pdata->base, &adev->dev, + isys_trace_blocks); + + pm_qos_add_request(&isys->pm_qos, PM_QOS_CPU_DMA_LATENCY, + PM_QOS_DEFAULT_VALUE); + alloc_fw_msg_buffers(isys, 20); + + pm_runtime_allow(&adev->dev); + pm_runtime_enable(&adev->dev); + + rval = isys_register_devices(isys); + if (rval) + goto out_remove_pkg_dir_shared_buffer; + + trace_printk("E|TMWK\n"); + return 0; + +out_remove_pkg_dir_shared_buffer: + if (!isp->secure_mode) + ipu_cpd_free_pkg_dir(adev, isys->pkg_dir, + isys->pkg_dir_dma_addr, + isys->pkg_dir_size); +remove_shared_buffer: + if (!isp->secure_mode) + ipu_buttress_unmap_fw_image(adev, &isys->fw_sgt); +release_firmware: + if (!isp->secure_mode) + release_firmware(isys->fw); + ipu_trace_uninit(&adev->dev); + + trace_printk("E|TMWK\n"); + + mutex_destroy(&isys->mutex); + mutex_destroy(&isys->stream_mutex); + + if (isys->short_packet_source == IPU_ISYS_SHORT_PACKET_FROM_TUNIT) { + mutex_destroy(&isys->short_packet_tracing_mutex); +#if defined(CONFIG_VIDEO_INTEL_IPU4) || defined(CONFIG_VIDEO_INTEL_IPU4P) + dma_free_attrs(&adev->dev, trace_size, + isys->short_packet_trace_buffer, + isys->short_packet_trace_buffer_dma_addr, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + &attrs); +#else + attrs); +#endif +#endif + } + + return rval; +} + +struct fwmsg { + int type; + char *msg; + bool valid_ts; +}; + +static const struct fwmsg fw_msg[] = { + {IPU_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE, "STREAM_OPEN_DONE", 0}, + {IPU_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK, "STREAM_CLOSE_ACK", 0}, + {IPU_FW_ISYS_RESP_TYPE_STREAM_START_ACK, "STREAM_START_ACK", 0}, + {IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK, + "STREAM_START_AND_CAPTURE_ACK", 0}, + {IPU_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK, "STREAM_STOP_ACK", 0}, + {IPU_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK, "STREAM_FLUSH_ACK", 0}, + {IPU_FW_ISYS_RESP_TYPE_PIN_DATA_READY, "PIN_DATA_READY", 1}, + {IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK, "STREAM_CAPTURE_ACK", 0}, + {IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE, + "STREAM_START_AND_CAPTURE_DONE", 1}, + {IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE, "STREAM_CAPTURE_DONE", 1}, + {IPU_FW_ISYS_RESP_TYPE_FRAME_SOF, "FRAME_SOF", 1}, + {IPU_FW_ISYS_RESP_TYPE_FRAME_EOF, "FRAME_EOF", 1}, + {IPU_FW_ISYS_RESP_TYPE_STATS_DATA_READY, "STATS_READY", 1}, + {-1, "UNKNOWN MESSAGE", 0}, +}; + +static int resp_type_to_index(int type) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(fw_msg); i++) + if (fw_msg[i].type == type) + return i; + + return i - 1; +} + +int isys_isr_one(struct ipu_bus_device *adev) +{ + struct ipu_isys *isys = ipu_bus_get_drvdata(adev); + struct ipu_fw_isys_resp_info_abi resp_data; + struct ipu_fw_isys_resp_info_abi *resp; + struct ipu_isys_pipeline *pipe; + u64 ts; + unsigned int i; + + if (!isys->fwcom) + return 0; + + resp = ipu_fw_isys_get_resp(isys->fwcom, IPU_BASE_MSG_RECV_QUEUES, + &resp_data); + if (!resp) + return 1; + + ts = (u64) resp->timestamp[1] << 32 | resp->timestamp[0]; + + if (resp->error_info.error == IPU_FW_ISYS_ERROR_STREAM_IN_SUSPENSION) + /* Suspension is kind of special case: not enough buffers */ + dev_dbg(&adev->dev, + "hostlib: error resp %02d %s, stream %u, error SUSPENSION, details %d, timestamp 0x%16.16llx, pin %d\n", + resp->type, + fw_msg[resp_type_to_index(resp->type)].msg, + resp->stream_handle, + resp->error_info.error_details, + fw_msg[resp_type_to_index(resp->type)].valid_ts ? + ts : 0, resp->pin_id); + else if (resp->error_info.error) + dev_dbg(&adev->dev, + "hostlib: error resp %02d %s, stream %u, error %d, details %d, timestamp 0x%16.16llx, pin %d\n", + resp->type, + fw_msg[resp_type_to_index(resp->type)].msg, + resp->stream_handle, + resp->error_info.error, resp->error_info.error_details, + fw_msg[resp_type_to_index(resp->type)].valid_ts ? + ts : 0, resp->pin_id); + else + dev_dbg(&adev->dev, + "hostlib: resp %02d %s, stream %u, timestamp 0x%16.16llx, pin %d\n", + resp->type, + fw_msg[resp_type_to_index(resp->type)].msg, + resp->stream_handle, + fw_msg[resp_type_to_index(resp->type)].valid_ts ? + ts : 0, resp->pin_id); + + if (resp->stream_handle >= IPU_ISYS_MAX_STREAMS) { + dev_err(&adev->dev, "bad stream handle %u\n", + resp->stream_handle); + goto leave; + } + + pipe = isys->pipes[resp->stream_handle]; + if (!pipe) { + dev_err(&adev->dev, "no pipeline for stream %u\n", + resp->stream_handle); + goto leave; + } + pipe->error = resp->error_info.error; + + switch (resp->type) { + case IPU_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE: + ipu_put_fw_mgs_buffer(ipu_bus_get_drvdata(adev), resp->buf_id); + complete(&pipe->stream_open_completion); + break; + case IPU_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK: + complete(&pipe->stream_close_completion); + break; + case IPU_FW_ISYS_RESP_TYPE_STREAM_START_ACK: + complete(&pipe->stream_start_completion); + break; + case IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK: + ipu_put_fw_mgs_buffer(ipu_bus_get_drvdata(adev), resp->buf_id); + complete(&pipe->stream_start_completion); + break; + case IPU_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK: + complete(&pipe->stream_stop_completion); + break; + case IPU_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK: + complete(&pipe->stream_stop_completion); + break; + case IPU_FW_ISYS_RESP_TYPE_PIN_DATA_READY: + if (resp->pin_id < IPU_ISYS_OUTPUT_PINS && + pipe->output_pins[resp->pin_id].pin_ready) + pipe->output_pins[resp->pin_id].pin_ready(pipe, resp); + else + dev_err(&adev->dev, + "%d:No data pin ready handler for pin id %d\n", + resp->stream_handle, resp->pin_id); + break; + case IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK: + ipu_put_fw_mgs_buffer(ipu_bus_get_drvdata(adev), resp->buf_id); + complete(&pipe->capture_ack_completion); + break; + case IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE: + case IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE: + if (pipe->interlaced) { + struct ipu_isys_buffer *ib, *ib_safe; + struct list_head list; + unsigned long flags; + + if (pipe->isys->short_packet_source == + IPU_ISYS_SHORT_PACKET_FROM_TUNIT) + pipe->cur_field = + ipu_isys_csi2_get_current_field(pipe, + resp-> + timestamp); + /* + * Move the pending buffers to a local temp list. + * Then we do not need to handle the lock during + * the loop. + */ + spin_lock_irqsave(&pipe->short_packet_queue_lock, + flags); + list_cut_position(&list, + &pipe->pending_interlaced_bufs, + pipe->pending_interlaced_bufs.prev); + spin_unlock_irqrestore(&pipe->short_packet_queue_lock, + flags); + + list_for_each_entry_safe(ib, ib_safe, &list, head) { + struct vb2_buffer *vb; + + vb = ipu_isys_buffer_to_vb2_buffer(ib); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + vb->v4l2_buf.field = pipe->cur_field; +#else + to_vb2_v4l2_buffer(vb)->field = pipe->cur_field; +#endif + list_del(&ib->head); + + ipu_isys_queue_buf_done(ib); + } + } + for (i = 0; i < IPU_NUM_CAPTURE_DONE; i++) + if (pipe->capture_done[i]) + pipe->capture_done[i] (pipe, resp); + + break; + case IPU_FW_ISYS_RESP_TYPE_FRAME_SOF: + pipe->seq[pipe->seq_index].sequence = + atomic_read(&pipe->sequence) - 1; + pipe->seq[pipe->seq_index].timestamp = ts; + dev_dbg(&adev->dev, + "sof: handle %d: (index %u), timestamp 0x%16.16llx\n", + resp->stream_handle, + pipe->seq[pipe->seq_index].sequence, ts); + pipe->seq_index = (pipe->seq_index + 1) + % IPU_ISYS_MAX_PARALLEL_SOF; + break; + case IPU_FW_ISYS_RESP_TYPE_FRAME_EOF: + + + dev_dbg(&adev->dev, + "eof: handle %d: (index %u), timestamp 0x%16.16llx\n", + resp->stream_handle, + pipe->seq[pipe->seq_index].sequence, ts); + break; + case IPU_FW_ISYS_RESP_TYPE_STATS_DATA_READY: + break; + default: + dev_err(&adev->dev, "%d:unknown response type %u\n", + resp->stream_handle, resp->type); + break; + } + +leave: + ipu_fw_isys_put_resp(isys->fwcom, IPU_BASE_MSG_RECV_QUEUES); + return 0; +} + +static void isys_isr_poll(struct ipu_bus_device *adev) +{ + struct ipu_isys *isys = ipu_bus_get_drvdata(adev); + + if (!isys->fwcom) { + dev_dbg(&isys->adev->dev, + "got interrupt but device not configured yet\n"); + return; + } + + mutex_lock(&isys->mutex); + isys_isr(adev); + mutex_unlock(&isys->mutex); +} + +int ipu_isys_isr_run(void *ptr) +{ + struct ipu_isys *isys = ptr; + + while (!kthread_should_stop()) { + usleep_range(500, 1000); + if (isys->stream_opened) + isys_isr_poll(isys->adev); + } + + return 0; +} + +static struct ipu_bus_driver isys_driver = { + .probe = isys_probe, + .remove = isys_remove, + .isr = isys_isr, + .wanted = IPU_ISYS_NAME, + .drv = { + .name = IPU_ISYS_NAME, + .owner = THIS_MODULE, + .pm = ISYS_PM_OPS, + }, +}; + +module_ipu_bus_driver(isys_driver); + +static const struct pci_device_id ipu_pci_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU_PCI_ID)}, + {0,} +}; +MODULE_DEVICE_TABLE(pci, ipu_pci_tbl); + +MODULE_AUTHOR("Sakari Ailus "); +MODULE_AUTHOR("Samu Onkalo "); +MODULE_AUTHOR("Jouni Högander "); +MODULE_AUTHOR("Jouni Ukkonen "); +MODULE_AUTHOR("Jianxu Zheng "); +MODULE_AUTHOR("Tianshu Qiu "); +MODULE_AUTHOR("Renwei Wu "); +MODULE_AUTHOR("Bingbu Cao "); +MODULE_AUTHOR("Yunliang Ding "); +MODULE_AUTHOR("Zaikuo Wang "); +MODULE_AUTHOR("Leifu Zhao "); +MODULE_AUTHOR("Xia Wu "); +MODULE_AUTHOR("Kun Jiang "); +MODULE_AUTHOR("Yu Xia "); +MODULE_AUTHOR("Jerry Hu "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel ipu input system driver"); diff --git a/drivers/media/pci/intel/ipu-isys.h b/drivers/media/pci/intel/ipu-isys.h new file mode 100644 index 0000000000000..847961062c9fa --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_ISYS_H +#define IPU_ISYS_H + +#include +#include + +#include +#include + +#include + +#include "ipu.h" +#include "ipu-isys-media.h" +#include "ipu-isys-csi2.h" +#include "ipu-isys-csi2-be.h" +#include "ipu-isys-tpg.h" +#include "ipu-isys-video.h" +#include "ipu-pdata.h" +#include "ipu-fw-isys.h" +#include "ipu-platform-isys.h" + +#define IPU_ISYS_2600_MEM_LINE_ALIGN 64 + +/* for TPG */ +#define IPU_ISYS_FREQ 533000000UL + +/* + * Current message queue configuration. These must be big enough + * so that they never gets full. Queues are located in system memory + */ +#define IPU_ISYS_SIZE_RECV_QUEUE 40 +#define IPU_ISYS_SIZE_SEND_QUEUE 40 +#define IPU_ISYS_SIZE_PROXY_RECV_QUEUE 5 +#define IPU_ISYS_SIZE_PROXY_SEND_QUEUE 5 +#define IPU_ISYS_NUM_RECV_QUEUE 1 + +/* + * Device close takes some time from last ack message to actual stopping + * of the SP processor. As long as the SP processor runs we can't proceed with + * clean up of resources. + */ +#define IPU_ISYS_OPEN_TIMEOUT_US 1000 +#define IPU_ISYS_OPEN_RETRY 1000 +#define IPU_ISYS_TURNOFF_DELAY_US 1000 +#define IPU_ISYS_TURNOFF_TIMEOUT 1000 +#define IPU_LIB_CALL_TIMEOUT_JIFFIES \ + msecs_to_jiffies(IPU_LIB_CALL_TIMEOUT_MS) + +#define IPU_ISYS_CSI2_LONG_PACKET_HEADER_SIZE 32 +#define IPU_ISYS_CSI2_LONG_PACKET_FOOTER_SIZE 32 + +#define IPU_ISYS_MIN_WIDTH 1U +#define IPU_ISYS_MIN_HEIGHT 1U +#define IPU_ISYS_MAX_WIDTH 16384U +#define IPU_ISYS_MAX_HEIGHT 16384U + +struct task_struct; + +/* + * struct ipu_isys + * + * @media_dev: Media device + * @v4l2_dev: V4L2 device + * @adev: ISYS bus device + * @power: Is ISYS powered on or not? + * @isr_bits: Which bits does the ISR handle? + * @power_lock: Serialise access to power (power state in general) + * @csi2_rx_ctrl_cached: cached shared value between all CSI2 receivers + * @lock: serialise access to pipes + * @pipes: pipelines per stream ID + * @fwcom: fw communication layer private pointer + * or optional external library private pointer + * @line_align: line alignment in memory + * @reset_needed: Isys requires d0i0->i3 transition + * @video_opened: total number of opened file handles on video nodes + * @mutex: serialise access isys video open/release related operations + * @stream_mutex: serialise stream start and stop, queueing requests + * @lib_mutex: optional external library mutex + * @pdata: platform data pointer + * @csi2: CSI-2 receivers + * @tpg: test pattern generators + * @csi2_be: CSI-2 back-ends + * @isa: Input system accelerator + * @fw: ISYS firmware binary (unsecure firmware) + * @fw_sgt: fw scatterlist + * @pkg_dir: host pointer to pkg_dir + * @pkg_dir_dma_addr: I/O virtual address for pkg_dir + * @pkg_dir_size: size of pkg_dir in bytes + * @short_packet_source: select short packet capture mode + */ +struct ipu_isys { + struct media_device media_dev; + struct v4l2_device v4l2_dev; + struct ipu_bus_device *adev; + + int power; + spinlock_t power_lock; /* Serialise access to power */ + u32 isr_csi2_bits; + u32 csi2_rx_ctrl_cached; + spinlock_t lock; /* Serialise access to pipes */ + struct ipu_isys_pipeline *pipes[IPU_ISYS_MAX_STREAMS]; + void *fwcom; + unsigned int line_align; + bool reset_needed; + bool icache_prefetch; + bool csi2_cse_ipc_not_supported; + unsigned int video_opened; + unsigned int stream_opened; +#if !defined(CONFIG_VIDEO_INTEL_IPU4) && !defined(CONFIG_VIDEO_INTEL_IPU4P) + unsigned int sensor_types[N_IPU_FW_ISYS_SENSOR_TYPE]; +#endif + + struct dentry *debugfsdir; + struct mutex mutex; /* Serialise isys video open/release related */ + struct mutex stream_mutex; /* Stream start, stop, queueing reqs */ + struct mutex lib_mutex; /* Serialise optional external library mutex */ + + struct ipu_isys_pdata *pdata; + + struct ipu_isys_csi2 *csi2; + struct ipu_isys_tpg *tpg; + struct ipu_isys_isa isa; + struct ipu_isys_csi2_be csi2_be; + struct ipu_isys_csi2_be_soc csi2_be_soc; + + const struct firmware *fw; + struct sg_table fw_sgt; + + u64 *pkg_dir; + dma_addr_t pkg_dir_dma_addr; + unsigned int pkg_dir_size; + + struct list_head requests; + struct pm_qos_request pm_qos; + unsigned int short_packet_source; + struct ipu_isys_csi2_monitor_message *short_packet_trace_buffer; + dma_addr_t short_packet_trace_buffer_dma_addr; + unsigned int short_packet_tracing_count; + struct mutex short_packet_tracing_mutex; /* For tracing count */ + u64 tsc_timer_base; + u64 tunit_timer_base; + spinlock_t listlock; /* Protect framebuflist */ + struct list_head framebuflist; + struct list_head framebuflist_fw; +}; + +struct isys_fw_msgs { + union { + u64 dummy; + struct ipu_fw_isys_frame_buff_set_abi frame; + struct ipu_fw_isys_stream_cfg_data_abi stream; + } fw_msg; + struct list_head head; + dma_addr_t dma_addr; +}; + +#define to_frame_msg_buf(a) (&(a)->fw_msg.frame) +#define to_stream_cfg_msg_buf(a) (&(a)->fw_msg.stream) +#define to_dma_addr(a) ((a)->dma_addr) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) +int ipu_pipeline_pm_use(struct media_entity *entity, int use); +#endif +struct isys_fw_msgs *ipu_get_fw_msg_buf(struct ipu_isys_pipeline *ip); +void ipu_put_fw_mgs_buffer(struct ipu_isys *isys, u64 data); +void ipu_cleanup_fw_msg_bufs(struct ipu_isys *isys); + +extern const struct v4l2_ioctl_ops ipu_isys_ioctl_ops; + +void isys_setup_hw(struct ipu_isys *isys); +int isys_isr_one(struct ipu_bus_device *adev); +int ipu_isys_isr_run(void *ptr); +irqreturn_t isys_isr(struct ipu_bus_device *adev); + +#endif /* IPU_ISYS_H */ diff --git a/drivers/media/pci/intel/ipu-mmu.c b/drivers/media/pci/intel/ipu-mmu.c new file mode 100644 index 0000000000000..7718732fe5e6b --- /dev/null +++ b/drivers/media/pci/intel/ipu-mmu.c @@ -0,0 +1,876 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include + +#include +#include +#include +#include +#include +#include + +#include "ipu.h" +#include "ipu-platform.h" +#include "ipu-bus.h" +#include "ipu-dma.h" +#include "ipu-mmu.h" +#include "ipu-platform-regs.h" + +#define ISP_PAGE_SHIFT 12 +#define ISP_PAGE_SIZE BIT(ISP_PAGE_SHIFT) +#define ISP_PAGE_MASK (~(ISP_PAGE_SIZE - 1)) + +#define ISP_L1PT_SHIFT 22 +#define ISP_L1PT_MASK (~((1U << ISP_L1PT_SHIFT) - 1)) + +#define ISP_L2PT_SHIFT 12 +#define ISP_L2PT_MASK (~(ISP_L1PT_MASK | (~(ISP_PAGE_MASK)))) + +#define ISP_L1PT_PTES 1024 +#define ISP_L2PT_PTES 1024 + +#define ISP_PADDR_SHIFT 12 + +#define REG_TLB_INVALIDATE 0x0000 + +#define MMU0_TLB_INVALIDATE 1 + +#define MMU1_TLB_INVALIDATE 0xffff + +#define REG_L1_PHYS 0x0004 /* 27-bit pfn */ +#define REG_INFO 0x0008 + +/* The range of stream ID i in L1 cache is from 0 to 15 */ +#define MMUV2_REG_L1_STREAMID(i) (0x0c + ((i) * 4)) + +/* The range of stream ID i in L2 cache is from 0 to 15 */ +#define MMUV2_REG_L2_STREAMID(i) (0x4c + ((i) * 4)) + +/* ZLW Enable for each stream in L1 MMU AT where i : 0..15 */ +#define MMUV2_AT_REG_L1_ZLW_EN_SID(i) (0x100 + ((i) * 0x20)) + +/* ZLW 1D mode Enable for each stream in L1 MMU AT where i : 0..15 */ +#define MMUV2_AT_REG_L1_ZLW_1DMODE_SID(i) (0x100 + ((i) * 0x20) + 0x0004) + +/* Set ZLW insertion N pages ahead per stream 1D where i : 0..15 */ +#define MMUV2_AT_REG_L1_ZLW_INS_N_AHEAD_SID(i) (0x100 + ((i) * 0x20) + 0x0008) + +/* ZLW 2D mode Enable for each stream in L1 MMU AT where i : 0..15 */ +#define MMUV2_AT_REG_L1_ZLW_2DMODE_SID(i) (0x100 + ((i) * 0x20) + 0x0010) + +/* ZLW Insertion for each stream in L1 MMU AT where i : 0..15 */ +#define MMUV2_AT_REG_L1_ZLW_INSERTION(i) (0x100 + ((i) * 0x20) + 0x000c) + +#define MMUV2_AT_REG_L1_FW_ZLW_FIFO (0x100 + \ + (IPU_MMU_MAX_TLB_L1_STREAMS * 0x20) + 0x003c) + +/* FW ZLW has prioty - needed for ZLW invalidations */ +#define MMUV2_AT_REG_L1_FW_ZLW_PRIO (0x100 + \ + (IPU_MMU_MAX_TLB_L1_STREAMS * 0x20)) + +#define TBL_PHYS_ADDR(a) ((phys_addr_t)(a) << ISP_PADDR_SHIFT) +#define TBL_VIRT_ADDR(a) phys_to_virt(TBL_PHYS_ADDR(a)) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) +#define to_ipu_mmu_domain(dom) ((dom)->priv) +#else +#define to_ipu_mmu_domain(dom) \ + container_of(dom, struct ipu_mmu_domain, domain) +#endif + +static void zlw_invalidate(struct ipu_mmu *mmu, struct ipu_mmu_hw *mmu_hw) +{ + unsigned int retry = 0; + unsigned int i, j; + int ret; + + for (i = 0; i < mmu_hw->nr_l1streams; i++) { + /* We need to invalidate only the zlw enabled stream IDs */ + if (mmu_hw->l1_zlw_en[i]) { + /* + * Maximum 16 blocks per L1 stream + * Write trash buffer iova offset to the FW_ZLW + * register. This will trigger pre-fetching of next 16 + * pages from the page table. So we need to increment + * iova address by 16 * 4K to trigger the next 16 pages. + * Once this loop is completed, the L1 cache will be + * filled with trash buffer translation. + * + * TODO: Instead of maximum 16 blocks, use the allocated + * block size + */ + for (j = 0; j < mmu_hw->l1_block_sz[i]; j++) + writel(mmu->iova_addr_trash + + j * MMUV2_TRASH_L1_BLOCK_OFFSET, + mmu_hw->base + + MMUV2_AT_REG_L1_ZLW_INSERTION(i)); + + /* + * Now we need to fill the L2 cache entry. L2 cache + * entries will be automatically updated, based on the + * L1 entry. The above loop for L1 will update only one + * of the two entries in L2 as the L1 is under 4MB + * range. To force the other entry in L2 to update, we + * just need to trigger another pre-fetch which is + * outside the above 4MB range. + */ + writel(mmu->iova_addr_trash + + MMUV2_TRASH_L2_BLOCK_OFFSET, + mmu_hw->base + + MMUV2_AT_REG_L1_ZLW_INSERTION(0)); + } + } + + /* + * Wait until AT is ready. FIFO read should return 2 when AT is ready. + * Retry value of 1000 is just by guess work to avoid the forever loop. + */ + do { + if (retry > 1000) { + dev_err(mmu->dev, "zlw invalidation failed\n"); + return; + } + ret = readl(mmu_hw->base + MMUV2_AT_REG_L1_FW_ZLW_FIFO); + retry++; + } while (ret != 2); +} + +static void tlb_invalidate(struct ipu_mmu *mmu) +{ + unsigned int i; + unsigned long flags; + + spin_lock_irqsave(&mmu->ready_lock, flags); + if (!mmu->ready) { + spin_unlock_irqrestore(&mmu->ready_lock, flags); + return; + } + + for (i = 0; i < mmu->nr_mmus; i++) { + u32 inv; + + /* + * To avoid the HW bug induced dead lock in some of the IPU4 + * MMUs on successive invalidate calls, we need to first do a + * read to the page table base before writing the invalidate + * register. MMUs which need to implement this WA, will have + * the insert_read_before_invalidate flasg set as true. + * Disregard the return value of the read. + */ + if (mmu->mmu_hw[i].insert_read_before_invalidate) + readl(mmu->mmu_hw[i].base + REG_L1_PHYS); + + /* Normal invalidate or zlw invalidate */ + if (mmu->mmu_hw[i].zlw_invalidate) { + /* trash buffer must be mapped by now, just in case! */ + WARN_ON(!mmu->iova_addr_trash); + + zlw_invalidate(mmu, &mmu->mmu_hw[i]); + } else { + if (mmu->mmu_hw[i].nr_l1streams == 32) + inv = 0xffffffff; + else if (mmu->mmu_hw[i].nr_l1streams == 0) + inv = MMU0_TLB_INVALIDATE; + else + inv = MMU1_TLB_INVALIDATE; + writel(inv, mmu->mmu_hw[i].base + + REG_TLB_INVALIDATE); + } + } + spin_unlock_irqrestore(&mmu->ready_lock, flags); +} + +#ifdef DEBUG +static void page_table_dump(struct ipu_mmu_domain *adom) +{ + u32 l1_idx; + + pr_debug("begin IOMMU page table dump\n"); + + for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) { + u32 l2_idx; + u32 iova = (phys_addr_t) l1_idx << ISP_L1PT_SHIFT; + + if (adom->pgtbl[l1_idx] == adom->dummy_l2_tbl) + continue; + pr_debug("l1 entry %u; iovas 0x%8.8x--0x%8.8x, at %p\n", + l1_idx, iova, iova + ISP_PAGE_SIZE, + (void *)TBL_PHYS_ADDR(adom->pgtbl[l1_idx])); + + for (l2_idx = 0; l2_idx < ISP_L2PT_PTES; l2_idx++) { + u32 *l2_pt = TBL_VIRT_ADDR(adom->pgtbl[l1_idx]); + u32 iova2 = iova + (l2_idx << ISP_L2PT_SHIFT); + + if (l2_pt[l2_idx] == adom->dummy_page) + continue; + + pr_debug("\tl2 entry %u; iova 0x%8.8x, phys %p\n", + l2_idx, iova2, + (void *)TBL_PHYS_ADDR(l2_pt[l2_idx])); + } + } + + pr_debug("end IOMMU page table dump\n"); +} +#endif /* DEBUG */ + +static u32 *alloc_page_table(struct ipu_mmu_domain *adom, bool l1) +{ + u32 *pt = (u32 *) __get_free_page(GFP_KERNEL | GFP_DMA32); + int i; + + if (!pt) + return NULL; + + pr_debug("__get_free_page() == %p\n", pt); + + for (i = 0; i < ISP_L1PT_PTES; i++) + pt[i] = l1 ? adom->dummy_l2_tbl : adom->dummy_page; + + return pt; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) +static int ipu_mmu_domain_init(struct iommu_domain *domain) +{ + struct ipu_mmu_domain *adom; + void *ptr; + + adom = kzalloc(sizeof(*adom), GFP_KERNEL); + if (!adom) + return -ENOMEM; + + domain->priv = adom; + adom->domain = domain; + + ptr = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32); + if (!ptr) + goto err; + + adom->dummy_page = virt_to_phys(ptr) >> ISP_PAGE_SHIFT; + + ptr = alloc_page_table(adom, false); + if (!ptr) + goto err; + + adom->dummy_l2_tbl = virt_to_phys(ptr) >> ISP_PAGE_SHIFT; + + /* + * We always map the L1 page table (a single page as well as + * the L2 page tables). + */ + adom->pgtbl = alloc_page_table(adom, true); + if (!adom->pgtbl) + goto err; + + spin_lock_init(&adom->lock); + + pr_debug("domain initialised\n"); + pr_debug("ops %p\n", domain->ops); + + return 0; + +err: + free_page((unsigned long)TBL_VIRT_ADDR(adom->dummy_page)); + free_page((unsigned long)TBL_VIRT_ADDR(adom->dummy_l2_tbl)); + kfree(adom); + + return -ENOMEM; +} +#else +static struct iommu_domain *ipu_mmu_domain_alloc(unsigned int type) +{ + struct ipu_mmu_domain *adom; + void *ptr; + + if (type != IOMMU_DOMAIN_UNMANAGED) + return NULL; + + adom = kzalloc(sizeof(*adom), GFP_KERNEL); + if (!adom) + return NULL; + + adom->domain.geometry.aperture_start = 0; + adom->domain.geometry.aperture_end = DMA_BIT_MASK(IPU_MMU_ADDRESS_BITS); + adom->domain.geometry.force_aperture = true; + + ptr = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32); + if (!ptr) + goto err_mem; + + adom->dummy_page = virt_to_phys(ptr) >> ISP_PAGE_SHIFT; + + ptr = alloc_page_table(adom, false); + if (!ptr) + goto err; + + adom->dummy_l2_tbl = virt_to_phys(ptr) >> ISP_PAGE_SHIFT; + + /* + * We always map the L1 page table (a single page as well as + * the L2 page tables). + */ + adom->pgtbl = alloc_page_table(adom, true); + if (!adom->pgtbl) + goto err; + + spin_lock_init(&adom->lock); + + pr_debug("domain initialised\n"); + pr_debug("ops %p\n", adom->domain.ops); + + return &adom->domain; + +err: + free_page((unsigned long)TBL_VIRT_ADDR(adom->dummy_page)); + free_page((unsigned long)TBL_VIRT_ADDR(adom->dummy_l2_tbl)); +err_mem: + kfree(adom); + + return NULL; +} +#endif + +static void ipu_mmu_domain_destroy(struct iommu_domain *domain) +{ + struct ipu_mmu_domain *adom = to_ipu_mmu_domain(domain); + struct iova *iova; + u32 l1_idx; + + if (adom->iova_addr_trash) { + iova = find_iova(&adom->dmap->iovad, adom->iova_addr_trash >> + PAGE_SHIFT); + /* unmap and free the corresponding trash buffer iova */ + iommu_unmap(domain, iova->pfn_lo << PAGE_SHIFT, + (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT); + __free_iova(&adom->dmap->iovad, iova); + + /* + * Set iova_addr_trash in mmu to 0, so that on next HW init + * this will be mapped again. + */ + adom->iova_addr_trash = 0; + } + + for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) + if (adom->pgtbl[l1_idx] != adom->dummy_l2_tbl) + free_page((unsigned long) + TBL_VIRT_ADDR(adom->pgtbl[l1_idx])); + + free_page((unsigned long)TBL_VIRT_ADDR(adom->dummy_page)); + free_page((unsigned long)TBL_VIRT_ADDR(adom->dummy_l2_tbl)); + free_page((unsigned long)adom->pgtbl); + kfree(adom); +} + +static int ipu_mmu_attach_dev(struct iommu_domain *domain, struct device *dev) +{ + struct ipu_mmu_domain *adom = to_ipu_mmu_domain(domain); + + spin_lock(&adom->lock); + + adom->users++; + + dev_dbg(dev, "domain attached\n"); + + spin_unlock(&adom->lock); + + return 0; +} + +static void ipu_mmu_detach_dev(struct iommu_domain *domain, struct device *dev) +{ + struct ipu_mmu_domain *adom = to_ipu_mmu_domain(domain); + + spin_lock(&adom->lock); + + adom->users--; + dev_dbg(dev, "domain detached\n"); + + spin_unlock(&adom->lock); +} + +static int l2_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size) +{ + struct ipu_mmu_domain *adom = to_ipu_mmu_domain(domain); + u32 l1_idx = iova >> ISP_L1PT_SHIFT; + u32 l1_entry = adom->pgtbl[l1_idx]; + u32 *l2_pt; + u32 iova_start = iova; + unsigned int l2_idx; + unsigned long flags; + + pr_debug("mapping l2 page table for l1 index %u (iova %8.8x)\n", + l1_idx, (u32) iova); + + if (l1_entry == adom->dummy_l2_tbl) { + u32 *l2_virt = alloc_page_table(adom, false); + + if (!l2_virt) + return -ENOMEM; + + l1_entry = virt_to_phys(l2_virt) >> ISP_PADDR_SHIFT; + pr_debug("allocated page for l1_idx %u\n", l1_idx); + + spin_lock_irqsave(&adom->lock, flags); + if (adom->pgtbl[l1_idx] == adom->dummy_l2_tbl) { + adom->pgtbl[l1_idx] = l1_entry; +#ifdef CONFIG_X86 + clflush_cache_range(&adom->pgtbl[l1_idx], + sizeof(adom->pgtbl[l1_idx])); +#endif /* CONFIG_X86 */ + } else { + spin_unlock_irqrestore(&adom->lock, flags); + free_page((unsigned long)TBL_VIRT_ADDR(l1_entry)); + spin_lock_irqsave(&adom->lock, flags); + } + } else { + spin_lock_irqsave(&adom->lock, flags); + } + + l2_pt = TBL_VIRT_ADDR(adom->pgtbl[l1_idx]); + + pr_debug("l2_pt at %p\n", l2_pt); + + paddr = ALIGN(paddr, ISP_PAGE_SIZE); + + l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT; + + pr_debug("l2_idx %u, phys 0x%8.8x\n", l2_idx, l2_pt[l2_idx]); + if (l2_pt[l2_idx] != adom->dummy_page) { + spin_unlock_irqrestore(&adom->lock, flags); + return -EBUSY; + } + + l2_pt[l2_idx] = paddr >> ISP_PADDR_SHIFT; + + spin_unlock_irqrestore(&adom->lock, flags); + +#ifdef CONFIG_X86 + clflush_cache_range(&l2_pt[l2_idx], sizeof(l2_pt[l2_idx])); +#endif /* CONFIG_X86 */ + + pr_debug("l2 index %u mapped as 0x%8.8x\n", l2_idx, l2_pt[l2_idx]); + + return 0; +} + +static int ipu_mmu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot) +{ + u32 iova_start = round_down(iova, ISP_PAGE_SIZE); + u32 iova_end = ALIGN(iova + size, ISP_PAGE_SIZE); + + pr_debug + ("mapping iova 0x%8.8x--0x%8.8x, size %zu at paddr 0x%10.10llx\n", + iova_start, iova_end, size, paddr); + + return l2_map(domain, iova_start, paddr, size); +} + +static size_t l2_unmap(struct iommu_domain *domain, unsigned long iova, + phys_addr_t dummy, size_t size) +{ + struct ipu_mmu_domain *adom = to_ipu_mmu_domain(domain); + u32 l1_idx = iova >> ISP_L1PT_SHIFT; + u32 *l2_pt = TBL_VIRT_ADDR(adom->pgtbl[l1_idx]); + u32 iova_start = iova; + unsigned int l2_idx; + size_t unmapped = 0; + + pr_debug("unmapping l2 page table for l1 index %u (iova 0x%8.8lx)\n", + l1_idx, iova); + + if (adom->pgtbl[l1_idx] == adom->dummy_l2_tbl) + return -EINVAL; + + pr_debug("l2_pt at %p\n", l2_pt); + + for (l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT; + (iova_start & ISP_L1PT_MASK) + (l2_idx << ISP_PAGE_SHIFT) + < iova_start + size && l2_idx < ISP_L2PT_PTES; l2_idx++) { + unsigned long flags; + + pr_debug("l2 index %u unmapped, was 0x%10.10llx\n", + l2_idx, TBL_PHYS_ADDR(l2_pt[l2_idx])); + spin_lock_irqsave(&adom->lock, flags); + l2_pt[l2_idx] = adom->dummy_page; + spin_unlock_irqrestore(&adom->lock, flags); +#ifdef CONFIG_X86 + clflush_cache_range(&l2_pt[l2_idx], sizeof(l2_pt[l2_idx])); +#endif /* CONFIG_X86 */ + unmapped++; + } + + return unmapped << ISP_PAGE_SHIFT; +} + +static size_t ipu_mmu_unmap(struct iommu_domain *domain, + unsigned long iova, size_t size) +{ + return l2_unmap(domain, iova, 0, size); +} + +static phys_addr_t ipu_mmu_iova_to_phys(struct iommu_domain *domain, + dma_addr_t iova) +{ + struct ipu_mmu_domain *adom = to_ipu_mmu_domain(domain); + u32 *l2_pt = TBL_VIRT_ADDR(adom->pgtbl[iova >> ISP_L1PT_SHIFT]); + + return (phys_addr_t) l2_pt[(iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT] + << ISP_PAGE_SHIFT; +} + +static int allocate_trash_buffer(struct ipu_bus_device *adev) +{ + struct ipu_mmu *mmu = ipu_bus_get_drvdata(adev); + unsigned int n_pages = PAGE_ALIGN(IPU_MMUV2_TRASH_RANGE) >> PAGE_SHIFT; + struct iova *iova; + u32 iova_addr; + unsigned int i; + int ret; + + /* Allocate 8MB in iova range */ + iova = alloc_iova(&mmu->dmap->iovad, n_pages, + dma_get_mask(mmu->dev) >> PAGE_SHIFT, 0); + if (!iova) { + dev_err(&adev->dev, "cannot allocate iova range for trash\n"); + return -ENOMEM; + } + + /* + * Map the 8MB iova address range to the same physical trash page + * mmu->trash_page which is already reserved at the probe + */ + iova_addr = iova->pfn_lo; + for (i = 0; i < n_pages; i++) { + ret = iommu_map(mmu->dmap->domain, iova_addr << PAGE_SHIFT, + page_to_phys(mmu->trash_page), PAGE_SIZE, 0); + if (ret) { + dev_err(&adev->dev, + "mapping trash buffer range failed\n"); + goto out_unmap; + } + + iova_addr++; + } + + /* save the address for the ZLW invalidation */ + mmu->iova_addr_trash = iova->pfn_lo << PAGE_SHIFT; + dev_info(&adev->dev, "iova trash buffer for MMUID: %d is %u\n", + mmu->mmid, (unsigned int)mmu->iova_addr_trash); + return 0; + +out_unmap: + iommu_unmap(mmu->dmap->domain, iova->pfn_lo << PAGE_SHIFT, + (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT); + __free_iova(&mmu->dmap->iovad, iova); + return ret; +} + +static int ipu_mmu_hw_init(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ipu_mmu *mmu = ipu_bus_get_drvdata(adev); + struct ipu_mmu_domain *adom; + unsigned int i; + unsigned long flags; + + dev_dbg(dev, "mmu hw init\n"); + /* + * FIXME: following fix for null pointer check is not a complete one. + * if mmu is not powered cycled before being used, the page table + * address will still not be set into HW. + */ + if (!mmu->dmap) { + dev_warn(dev, "mmu is not ready yet. skipping.\n"); + return 0; + } + adom = to_ipu_mmu_domain(mmu->dmap->domain); + + adom->dmap = mmu->dmap; + + /* Initialise the each MMU HW block */ + for (i = 0; i < mmu->nr_mmus; i++) { + struct ipu_mmu_hw *mmu_hw = &mmu->mmu_hw[i]; +#if defined(CONFIG_VIDEO_INTEL_IPU4) || defined(CONFIG_VIDEO_INTEL_IPU4P) + bool zlw_invalidate = false; +#endif + unsigned int j; + u16 block_addr; + + /* Write page table address per MMU */ + writel((phys_addr_t) virt_to_phys(adom->pgtbl) + >> ISP_PADDR_SHIFT, + mmu->mmu_hw[i].base + REG_L1_PHYS); + + /* Set info bits per MMU */ + writel(mmu->mmu_hw[i].info_bits, + mmu->mmu_hw[i].base + REG_INFO); + + /* Configure MMU TLB stream configuration for L1 */ + for (j = 0, block_addr = 0; j < mmu_hw->nr_l1streams; + block_addr += mmu->mmu_hw[i].l1_block_sz[j], j++) { + if (block_addr > IPU_MAX_LI_BLOCK_ADDR) { + dev_err(dev, "invalid L1 configuration\n"); + return -EINVAL; + } + + /* Write block start address for each streams */ + writel(block_addr, mmu_hw->base + + mmu_hw->l1_stream_id_reg_offset + 4 * j); + +#if defined(CONFIG_VIDEO_INTEL_IPU4) || defined(CONFIG_VIDEO_INTEL_IPU4P) + /* Enable ZLW for streams based on the init table */ + writel(mmu->mmu_hw[i].l1_zlw_en[j], + mmu_hw->base + + MMUV2_AT_REG_L1_ZLW_EN_SID(j)); + + /* To track if zlw is enabled in any streams */ + zlw_invalidate |= mmu->mmu_hw[i].l1_zlw_en[j]; + + /* Enable ZLW 1D mode for streams from the init table */ + writel(mmu->mmu_hw[i].l1_zlw_1d_mode[j], + mmu_hw->base + + MMUV2_AT_REG_L1_ZLW_1DMODE_SID(j)); + + /* Set when the ZLW insertion will happen */ + writel(mmu->mmu_hw[i].l1_ins_zlw_ahead_pages[j], + mmu_hw->base + + MMUV2_AT_REG_L1_ZLW_INS_N_AHEAD_SID(j)); + + /* Set if ZLW 2D mode active for each streams */ + writel(mmu->mmu_hw[i].l1_zlw_2d_mode[j], + mmu_hw->base + + MMUV2_AT_REG_L1_ZLW_2DMODE_SID(j)); +#endif + } + +#if defined(CONFIG_VIDEO_INTEL_IPU4) || defined(CONFIG_VIDEO_INTEL_IPU4P) + /* + * If ZLW invalidate is enabled even for one stream in a MMU1, + * we need to set the FW ZLW operations have higher priority + * on that MMU1 + */ + if (zlw_invalidate) + writel(1, mmu_hw->base + + MMUV2_AT_REG_L1_FW_ZLW_PRIO); +#endif + /* Configure MMU TLB stream configuration for L2 */ + for (j = 0, block_addr = 0; j < mmu_hw->nr_l2streams; + block_addr += mmu->mmu_hw[i].l2_block_sz[j], j++) { + if (block_addr > IPU_MAX_L2_BLOCK_ADDR) { + dev_err(dev, "invalid L2 configuration\n"); + return -EINVAL; + } + + writel(block_addr, mmu_hw->base + + mmu_hw->l2_stream_id_reg_offset + 4 * j); + } + } + + /* Allocate trash buffer, if not allocated. Only once per MMU */ + if (!mmu->iova_addr_trash) { + int ret; + + ret = allocate_trash_buffer(adev); + if (ret) { + dev_err(dev, "trash buffer allocation failed\n"); + return ret; + } + + /* + * Update the domain pointer to trash buffer to release it on + * domain destroy + */ + adom->iova_addr_trash = mmu->iova_addr_trash; + } + + spin_lock_irqsave(&mmu->ready_lock, flags); + mmu->ready = true; + spin_unlock_irqrestore(&mmu->ready_lock, flags); + + return 0; +} + +static void set_mapping(struct ipu_mmu *mmu, struct ipu_dma_mapping *dmap) +{ + mmu->dmap = dmap; + + if (!dmap) + return; + + pm_runtime_get_sync(mmu->dev); + ipu_mmu_hw_init(mmu->dev); + pm_runtime_put(mmu->dev); +} + +static int ipu_mmu_add_device(struct device *dev) +{ + struct device *aiommu = to_ipu_bus_device(dev)->iommu; + struct ipu_dma_mapping *dmap; + int rval; + + if (!aiommu || !dev->iommu_group) + return 0; + + dmap = iommu_group_get_iommudata(dev->iommu_group); + if (!dmap) + return 0; + + pr_debug("attach dev %s\n", dev_name(dev)); + + rval = iommu_attach_device(dmap->domain, dev); + if (rval) + return rval; + + kref_get(&dmap->ref); + + return 0; +} + +static struct iommu_ops ipu_iommu_ops = { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + .domain_init = ipu_mmu_domain_init, + .domain_destroy = ipu_mmu_domain_destroy, +#else + .domain_alloc = ipu_mmu_domain_alloc, + .domain_free = ipu_mmu_domain_destroy, +#endif + .attach_dev = ipu_mmu_attach_dev, + .detach_dev = ipu_mmu_detach_dev, + .map = ipu_mmu_map, + .unmap = ipu_mmu_unmap, + .iova_to_phys = ipu_mmu_iova_to_phys, + .add_device = ipu_mmu_add_device, + .pgsize_bitmap = SZ_4K, +}; + +static int ipu_mmu_probe(struct ipu_bus_device *adev) +{ + struct ipu_mmu_pdata *pdata; + struct ipu_mmu *mmu; + int rval; + + mmu = devm_kzalloc(&adev->dev, sizeof(*mmu), GFP_KERNEL); + if (!mmu) + return -ENOMEM; + + dev_dbg(&adev->dev, "mmu probe %p %p\n", adev, &adev->dev); + ipu_bus_set_drvdata(adev, mmu); + + rval = ipu_bus_set_iommu(&ipu_iommu_ops); + if (rval) + return rval; + + pdata = adev->pdata; + + mmu->mmid = pdata->mmid; + + mmu->mmu_hw = pdata->mmu_hw; + mmu->nr_mmus = pdata->nr_mmus; + mmu->tlb_invalidate = tlb_invalidate; + mmu->set_mapping = set_mapping; + mmu->dev = &adev->dev; + mmu->ready = false; + spin_lock_init(&mmu->ready_lock); + + /* + * Allocate 1 page of physical memory for the trash buffer + * + * TODO! Could be further optimized by allocating only one page per ipu + * instance instead of per mmu + */ + mmu->trash_page = alloc_page(GFP_KERNEL); + if (!mmu->trash_page) { + dev_err(&adev->dev, "insufficient memory for trash buffer\n"); + return -ENOMEM; + } + dev_info(&adev->dev, "MMU: %d, allocated page for trash: 0x%p\n", + mmu->mmid, mmu->trash_page); + + pm_runtime_allow(&adev->dev); + pm_runtime_enable(&adev->dev); + + /* + * FIXME: We can't unload this --- bus_set_iommu() will + * register a notifier which must stay until the devices are + * gone. + */ + __module_get(THIS_MODULE); + + return 0; +} + +/* + * Leave iommu ops as they were --- this means we must be called as + * the very last. + */ +static void ipu_mmu_remove(struct ipu_bus_device *adev) +{ + struct ipu_mmu *mmu = ipu_bus_get_drvdata(adev); + + __free_page(mmu->trash_page); + dev_dbg(&adev->dev, "removed\n"); +} + +static irqreturn_t ipu_mmu_isr(struct ipu_bus_device *adev) +{ + dev_info(&adev->dev, "Yeah!\n"); + return IRQ_NONE; +} + +#ifdef CONFIG_PM +static int ipu_mmu_suspend(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ipu_mmu *mmu = ipu_bus_get_drvdata(adev); + unsigned long flags; + + spin_lock_irqsave(&mmu->ready_lock, flags); + mmu->ready = false; + spin_unlock_irqrestore(&mmu->ready_lock, flags); + + return 0; +} + +static const struct dev_pm_ops ipu_mmu_pm_ops = { + .resume = ipu_mmu_hw_init, + .suspend = ipu_mmu_suspend, + .runtime_resume = ipu_mmu_hw_init, + .runtime_suspend = ipu_mmu_suspend, +}; + +#define IPU_MMU_PM_OPS (&ipu_mmu_pm_ops) + +#else /* !CONFIG_PM */ + +#define IPU_MMU_PM_OPS NULL + +#endif /* !CONFIG_PM */ + +static struct ipu_bus_driver ipu_mmu_driver = { + .probe = ipu_mmu_probe, + .remove = ipu_mmu_remove, + .isr = ipu_mmu_isr, + .wanted = IPU_MMU_NAME, + .drv = { + .name = IPU_MMU_NAME, + .owner = THIS_MODULE, + .pm = IPU_MMU_PM_OPS, + }, +}; +module_ipu_bus_driver(ipu_mmu_driver); + +static const struct pci_device_id ipu_pci_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU_PCI_ID)}, + {0,} +}; +MODULE_DEVICE_TABLE(pci, ipu_pci_tbl); + +MODULE_AUTHOR("Sakari Ailus "); +MODULE_AUTHOR("Samu Onkalo "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel ipu mmu driver"); diff --git a/drivers/media/pci/intel/ipu-mmu.h b/drivers/media/pci/intel/ipu-mmu.h new file mode 100644 index 0000000000000..0e8863a2f024e --- /dev/null +++ b/drivers/media/pci/intel/ipu-mmu.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_MMU_H +#define IPU_MMU_H + +#include + +#include "ipu.h" +#include "ipu-pdata.h" + +#define ISYS_MMID 1 +#define PSYS_MMID 0 + +/* + * @pgtbl: virtual address of the l1 page table (one page) + */ +struct ipu_mmu_domain { + u32 __iomem *pgtbl; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct iommu_domain *domain; +#else + struct iommu_domain domain; +#endif + spinlock_t lock; /* Serialize access to users */ + unsigned int users; + struct ipu_dma_mapping *dmap; + u32 dummy_l2_tbl; + u32 dummy_page; + + /* Reference to the trash address to unmap on domain destroy */ + dma_addr_t iova_addr_trash; +}; + +/* + * @pgtbl: physical address of the l1 page table + */ +struct ipu_mmu { + struct list_head node; + unsigned int users; + + struct ipu_mmu_hw *mmu_hw; + unsigned int nr_mmus; + int mmid; + + phys_addr_t pgtbl; + struct device *dev; + + struct ipu_dma_mapping *dmap; + + struct page *trash_page; + dma_addr_t iova_addr_trash; + + bool ready; + spinlock_t ready_lock; /* Serialize access to bool ready */ + + void (*tlb_invalidate)(struct ipu_mmu *mmu); + void (*set_mapping)(struct ipu_mmu *mmu, + struct ipu_dma_mapping *dmap); +}; + +#endif diff --git a/drivers/media/pci/intel/ipu-pdata.h b/drivers/media/pci/intel/ipu-pdata.h new file mode 100644 index 0000000000000..66f111266f055 --- /dev/null +++ b/drivers/media/pci/intel/ipu-pdata.h @@ -0,0 +1,283 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_PDATA_H +#define IPU_PDATA_H + +#define IPU_MMU_NAME IPU_NAME "-mmu" +#define IPU_ISYS_CSI2_NAME IPU_NAME "-csi2" +#define IPU_ISYS_NAME IPU_NAME "-isys" +#define IPU_PSYS_NAME IPU_NAME "-psys" +#define IPU_BUTTRESS_NAME IPU_NAME "-buttress" + +#define IPU_MMU_MAX_DEVICES 4 +#define IPU_MMU_ADDRESS_BITS 32 +/* The firmware is accessible within the first 2 GiB only in non-secure mode. */ +#define IPU_MMU_ADDRESS_BITS_NON_SECURE 31 + +#if defined(CONFIG_VIDEO_INTEL_IPU4) || defined(CONFIG_VIDEO_INTEL_IPU4P) +#define IPU_MMU_MAX_TLB_L1_STREAMS 16 +#define IPU_MMU_MAX_TLB_L2_STREAMS 16 +#define IPU_MAX_LI_BLOCK_ADDR 64 +#define IPU_MAX_L2_BLOCK_ADDR 32 +#else +#define IPU_MMU_MAX_TLB_L1_STREAMS 32 +#define IPU_MMU_MAX_TLB_L2_STREAMS 32 +#define IPU_MAX_LI_BLOCK_ADDR 128 +#define IPU_MAX_L2_BLOCK_ADDR 64 +#endif + +#define IPU_ISYS_MAX_CSI2_LEGACY_PORTS 4 +#define IPU_ISYS_MAX_CSI2_COMBO_PORTS 2 + +#define IPU_MAX_FRAME_COUNTER 0xff + +/* + * To maximize the IOSF utlization, IPU need to send requests in bursts. + * At the DMA interface with the buttress, there are CDC FIFOs with burst + * collection capability. CDC FIFO burst collectors have a configurable + * threshold and is configured based on the outcome of performance measurements. + * + * isys has 3 ports with IOSF interface for VC0, VC1 and VC2 + * psys has 4 ports with IOSF interface for VC0, VC1w, VC1r and VC2 + * + * Threshold values are pre-defined and are arrived at after performance + * evaluations on a type of IPU4 + */ +#define IPU_MAX_VC_IOSF_PORTS 4 + +/* + * IPU must configure correct arbitration mechanism related to the IOSF VC + * requests. There are two options per VC0 and VC1 - > 0 means rearbitrate on + * stall and 1 means stall until the request is completed. + */ +#define IPU_BTRS_ARB_MODE_TYPE_REARB 0 +#define IPU_BTRS_ARB_MODE_TYPE_STALL 1 + +/* Currently chosen arbitration mechanism for VC0 */ +#define IPU_BTRS_ARB_STALL_MODE_VC0 \ + IPU_BTRS_ARB_MODE_TYPE_REARB + +/* Currently chosen arbitration mechanism for VC1 */ +#define IPU_BTRS_ARB_STALL_MODE_VC1 \ + IPU_BTRS_ARB_MODE_TYPE_REARB + +struct ipu_isys_subdev_pdata; + +/* + * MMU Invalidation HW bug workaround by ZLW mechanism + * + * IPU4 MMUV2 has a bug in the invalidation mechanism which might result in + * wrong translation or replication of the translation. This will cause data + * corruption. So we cannot directly use the MMU V2 invalidation registers + * to invalidate the MMU. Instead, whenever an invalidate is called, we need to + * clear the TLB by evicting all the valid translations by filling it with trash + * buffer (which is guaranteed not to be used by any other processes). ZLW is + * used to fill the L1 and L2 caches with the trash buffer translations. ZLW + * or Zero length write, is pre-fetch mechanism to pre-fetch the pages in + * advance to the L1 and L2 caches without triggering any memory operations. + * + * In MMU V2, L1 -> 16 streams and 64 blocks, maximum 16 blocks per stream + * One L1 block has 16 entries, hence points to 16 * 4K pages + * L2 -> 16 streams and 32 blocks. 2 blocks per streams + * One L2 block maps to 1024 L1 entries, hence points to 4MB address range + * 2 blocks per L2 stream means, 1 stream points to 8MB range + * + * As we need to clear the caches and 8MB being the biggest cache size, we need + * to have trash buffer which points to 8MB address range. As these trash + * buffers are not used for any memory transactions, we need only the least + * amount of physical memory. So we reserve 8MB IOVA address range but only + * one page is reserved from physical memory. Each of this 8MB IOVA address + * range is then mapped to the same physical memory page. + */ +/* One L2 entry maps 1024 L1 entries and one L1 entry per page */ +#define IPU_MMUV2_L2_RANGE (1024 * PAGE_SIZE) +/* Max L2 blocks per stream */ +#define IPU_MMUV2_MAX_L2_BLOCKS 2 +/* Max L1 blocks per stream */ +#define IPU_MMUV2_MAX_L1_BLOCKS 16 +#define IPU_MMUV2_TRASH_RANGE (IPU_MMUV2_L2_RANGE * \ + IPU_MMUV2_MAX_L2_BLOCKS) +/* Entries per L1 block */ +#define MMUV2_ENTRIES_PER_L1_BLOCK 16 +#define MMUV2_TRASH_L1_BLOCK_OFFSET (MMUV2_ENTRIES_PER_L1_BLOCK * \ + PAGE_SIZE) +#define MMUV2_TRASH_L2_BLOCK_OFFSET IPU_MMUV2_L2_RANGE + +/* + * In some of the IPU4 MMUs, there is provision to configure L1 and L2 page + * table caches. Both these L1 and L2 caches are divided into multiple sections + * called streams. There is maximum 16 streams for both caches. Each of these + * sections are subdivided into multiple blocks. When nr_l1streams = 0 and + * nr_l2streams = 0, means the MMU is of type MMU_V1 and do not support + * L1/L2 page table caches. + * + * L1 stream per block sizes are configurable and varies per usecase. + * L2 has constant block sizes - 2 blocks per stream. + * + * MMU1 support pre-fetching of the pages to have less cache lookup misses. To + * enable the pre-fetching, MMU1 AT (Address Translator) device registers + * need to be configured. + * + * There are four types of memory accesses which requires ZLW configuration. + * ZLW(Zero Length Write) is a mechanism to enable VT-d pre-fetching on IOMMU. + * + * 1. Sequential Access or 1D mode + * Set ZLW_EN -> 1 + * set ZLW_PAGE_CROSS_1D -> 1 + * Set ZLW_N to "N" pages so that ZLW will be inserte N pages ahead where + * N is pre-defined and hardcoded in the platform data + * Set ZLW_2D -> 0 + * + * 2. ZLW 2D mode + * Set ZLW_EN -> 1 + * set ZLW_PAGE_CROSS_1D -> 1, + * Set ZLW_N -> 0 + * Set ZLW_2D -> 1 + * + * 3. ZLW Enable (no 1D or 2D mode) + * Set ZLW_EN -> 1 + * set ZLW_PAGE_CROSS_1D -> 0, + * Set ZLW_N -> 0 + * Set ZLW_2D -> 0 + * + * 4. ZLW disable + * Set ZLW_EN -> 0 + * set ZLW_PAGE_CROSS_1D -> 0, + * Set ZLW_N -> 0 + * Set ZLW_2D -> 0 + * + * To configure the ZLW for the above memory access, four registers are + * available. Hence to track these four settings, we have the following entries + * in the struct ipu_mmu_hw. Each of these entries are per stream and + * available only for the L1 streams. + * + * a. l1_zlw_en -> To track zlw enabled per stream (ZLW_EN) + * b. l1_zlw_1d_mode -> Track 1D mode per stream. ZLW inserted at page boundary + * c. l1_ins_zlw_ahead_pages -> to track how advance the ZLW need to be inserted + * Insert ZLW request N pages ahead address. + * d. l1_zlw_2d_mode -> To track 2D mode per stream (ZLW_2D) + * + * + * Currently L1/L2 streams, blocks, AT ZLW configurations etc. are pre-defined + * as per the usecase specific calculations. Any change to this pre-defined + * table has to happen in sync with IPU4 FW. + */ +struct ipu_mmu_hw { + union { + unsigned long offset; + void __iomem *base; + }; + unsigned int info_bits; + u8 nr_l1streams; + /* + * L1 has variable blocks per stream - total of 64 blocks and maximum of + * 16 blocks per stream. Configurable by using the block start address + * per stream. Block start address is calculated from the block size + */ + u8 l1_block_sz[IPU_MMU_MAX_TLB_L1_STREAMS]; + /* Is ZLW is enabled in each stream */ + bool l1_zlw_en[IPU_MMU_MAX_TLB_L1_STREAMS]; + bool l1_zlw_1d_mode[IPU_MMU_MAX_TLB_L1_STREAMS]; + u8 l1_ins_zlw_ahead_pages[IPU_MMU_MAX_TLB_L1_STREAMS]; + bool l1_zlw_2d_mode[IPU_MMU_MAX_TLB_L1_STREAMS]; + + u32 l1_stream_id_reg_offset; + u32 l2_stream_id_reg_offset; + + u8 nr_l2streams; + /* + * L2 has fixed 2 blocks per stream. Block address is calculated + * from the block size + */ + u8 l2_block_sz[IPU_MMU_MAX_TLB_L2_STREAMS]; + /* flag to track if WA is needed for successive invalidate HW bug */ + bool insert_read_before_invalidate; + /* flag to track if zlw based mmu invalidation is needed */ + bool zlw_invalidate; +}; + +struct ipu_mmu_pdata { + unsigned int nr_mmus; + struct ipu_mmu_hw mmu_hw[IPU_MMU_MAX_DEVICES]; + int mmid; +}; + +struct ipu_isys_csi2_pdata { + void __iomem *base; +}; + +#define IPU_EV_AUTO 0xff + +struct ipu_combo_receiver_params { + u8 crc_val; + u8 drc_val; + u8 drc_val_combined; + u8 ctle_val; +}; + +struct ipu_receiver_electrical_params { + u64 min_freq; + u64 max_freq; + unsigned short device; /* PCI DEVICE ID */ + u8 revision; /* PCI REVISION */ + /* base settings at first receiver power on */ + u8 rcomp_val_combo; + u8 rcomp_val_legacy; + + /* Combo per receiver settings */ + struct ipu_combo_receiver_params ports[2]; +}; + +struct ipu_isys_internal_csi2_pdata { + unsigned int nports; + unsigned int *offsets; + struct ipu_receiver_electrical_params *evparams; + u32 evsetmask0; + u32 evsetmask1; + unsigned char *evlanecombine; +}; + +struct ipu_isys_internal_tpg_pdata { + unsigned int ntpgs; + unsigned int *offsets; + unsigned int *sels; +}; + +/* + * One place to handle all the IPU HW variations + */ +struct ipu_hw_variants { + unsigned long offset; + unsigned int nr_mmus; + struct ipu_mmu_hw mmu_hw[IPU_MMU_MAX_DEVICES]; + u8 cdc_fifos; + u8 cdc_fifo_threshold[IPU_MAX_VC_IOSF_PORTS]; + u32 dmem_offset; + u32 spc_offset; /* SPC offset from psys base */ +}; + +struct ipu_isys_internal_pdata { + struct ipu_isys_internal_csi2_pdata csi2; + struct ipu_isys_internal_tpg_pdata tpg; + struct ipu_hw_variants hw_variant; + u32 num_parallel_streams; + u32 isys_dma_overshoot; +}; + +struct ipu_isys_pdata { + void __iomem *base; + const struct ipu_isys_internal_pdata *ipdata; + struct ipu_isys_subdev_pdata *spdata; +}; + +struct ipu_psys_internal_pdata { + struct ipu_hw_variants hw_variant; +}; + +struct ipu_psys_pdata { + void __iomem *base; + const struct ipu_psys_internal_pdata *ipdata; +}; + +#endif diff --git a/drivers/media/pci/intel/ipu-psys-compat32.c b/drivers/media/pci/intel/ipu-psys-compat32.c new file mode 100644 index 0000000000000..0b9bb3ec28aca --- /dev/null +++ b/drivers/media/pci/intel/ipu-psys-compat32.c @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include +#include +#include + +#include + +#include "ipu-psys.h" + +static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + long ret = -ENOTTY; + + if (file->f_op->unlocked_ioctl) + ret = file->f_op->unlocked_ioctl(file, cmd, arg); + + return ret; +} + +struct ipu_psys_buffer32 { + u64 len; + union { + int fd; + compat_uptr_t userptr; + u64 reserved; + } base; + u32 data_offset; + u32 bytes_used; + u32 flags; + u32 reserved[2]; +} __packed; + +struct ipu_psys_command32 { + u64 issue_id; + u64 user_token; + u32 priority; + compat_uptr_t pg_manifest; + compat_uptr_t buffers; + int pg; + u32 pg_manifest_size; + u32 bufcount; + u32 min_psys_freq; + u32 frame_counter; + u32 reserved[2]; +} __packed; + +struct ipu_psys_manifest32 { + u32 index; + u32 size; + compat_uptr_t manifest; + u32 reserved[5]; +} __packed; + +static int +get_ipu_psys_command32(struct ipu_psys_command *kp, + struct ipu_psys_command32 __user *up) +{ + compat_uptr_t pgm, bufs; + + if (!access_ok(VERIFY_READ, up, + sizeof(struct ipu_psys_command32)) || + get_user(kp->issue_id, &up->issue_id) || + get_user(kp->user_token, &up->user_token) || + get_user(kp->priority, &up->priority) || + get_user(pgm, &up->pg_manifest) || + get_user(bufs, &up->buffers) || + get_user(kp->pg, &up->pg) || + get_user(kp->pg_manifest_size, &up->pg_manifest_size) || + get_user(kp->bufcount, &up->bufcount) || + get_user(kp->min_psys_freq, &up->min_psys_freq) + || get_user(kp->frame_counter, &up->frame_counter) + ) + return -EFAULT; + + kp->pg_manifest = compat_ptr(pgm); + kp->buffers = compat_ptr(bufs); + + return 0; +} + +static int +get_ipu_psys_buffer32(struct ipu_psys_buffer *kp, + struct ipu_psys_buffer32 __user *up) +{ + compat_uptr_t ptr; + + if (!access_ok(VERIFY_READ, up, + sizeof(struct ipu_psys_buffer32)) || + get_user(kp->len, &up->len) || + get_user(ptr, &up->base.userptr) || + get_user(kp->data_offset, &up->data_offset) || + get_user(kp->bytes_used, &up->bytes_used) || + get_user(kp->flags, &up->flags)) + return -EFAULT; + + kp->base.userptr = compat_ptr(ptr); + + return 0; +} + +static int +put_ipu_psys_buffer32(struct ipu_psys_buffer *kp, + struct ipu_psys_buffer32 __user *up) +{ + if (!access_ok(VERIFY_WRITE, up, + sizeof(struct ipu_psys_buffer32)) || + put_user(kp->len, &up->len) || + put_user(kp->base.fd, &up->base.fd) || + put_user(kp->data_offset, &up->data_offset) || + put_user(kp->bytes_used, &up->bytes_used) || + put_user(kp->flags, &up->flags)) + return -EFAULT; + + return 0; +} + +static int +get_ipu_psys_manifest32(struct ipu_psys_manifest *kp, + struct ipu_psys_manifest32 __user *up) +{ + compat_uptr_t ptr; + + if (!access_ok(VERIFY_READ, up, + sizeof(struct ipu_psys_manifest32)) || + get_user(kp->index, &up->index) || + get_user(kp->size, &up->size) || get_user(ptr, &up->manifest)) + return -EFAULT; + + kp->manifest = compat_ptr(ptr); + + return 0; +} + +static int +put_ipu_psys_manifest32(struct ipu_psys_manifest *kp, + struct ipu_psys_manifest32 __user *up) +{ + compat_uptr_t ptr = (u32)((unsigned long)kp->manifest); + + if (!access_ok(VERIFY_WRITE, up, + sizeof(struct ipu_psys_manifest32)) || + put_user(kp->index, &up->index) || + put_user(kp->size, &up->size) || put_user(ptr, &up->manifest)) + return -EFAULT; + + return 0; +} + +#define IPU_IOC_GETBUF32 _IOWR('A', 4, struct ipu_psys_buffer32) +#define IPU_IOC_PUTBUF32 _IOWR('A', 5, struct ipu_psys_buffer32) +#define IPU_IOC_QCMD32 _IOWR('A', 6, struct ipu_psys_command32) +#define IPU_IOC_CMD_CANCEL32 _IOWR('A', 8, struct ipu_psys_command32) +#define IPU_IOC_GET_MANIFEST32 _IOWR('A', 9, struct ipu_psys_manifest32) + +long ipu_psys_compat_ioctl32(struct file *file, unsigned int cmd, + unsigned long arg) +{ + union { + struct ipu_psys_buffer buf; + struct ipu_psys_command cmd; + struct ipu_psys_event ev; + struct ipu_psys_manifest m; + } karg; + int compatible_arg = 1; + int err = 0; + void __user *up = compat_ptr(arg); + + switch (cmd) { + case IPU_IOC_GETBUF32: + cmd = IPU_IOC_GETBUF; + break; + case IPU_IOC_PUTBUF32: + cmd = IPU_IOC_PUTBUF; + break; + case IPU_IOC_QCMD32: + cmd = IPU_IOC_QCMD; + break; + case IPU_IOC_GET_MANIFEST32: + cmd = IPU_IOC_GET_MANIFEST; + break; + } + + switch (cmd) { + case IPU_IOC_GETBUF: + case IPU_IOC_PUTBUF: + err = get_ipu_psys_buffer32(&karg.buf, up); + compatible_arg = 0; + break; + case IPU_IOC_QCMD: + err = get_ipu_psys_command32(&karg.cmd, up); + compatible_arg = 0; + break; + case IPU_IOC_GET_MANIFEST: + err = get_ipu_psys_manifest32(&karg.m, up); + compatible_arg = 0; + break; + } + if (err) + return err; + + if (compatible_arg) { + err = native_ioctl(file, cmd, (unsigned long)up); + } else { + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); + err = native_ioctl(file, cmd, (unsigned long)&karg); + set_fs(old_fs); + } + + if (err) + return err; + + switch (cmd) { + case IPU_IOC_GETBUF: + err = put_ipu_psys_buffer32(&karg.buf, up); + break; + case IPU_IOC_GET_MANIFEST: + err = put_ipu_psys_manifest32(&karg.m, up); + break; + } + return err; +} +EXPORT_SYMBOL_GPL(ipu_psys_compat_ioctl32); diff --git a/drivers/media/pci/intel/ipu-psys-virt.c b/drivers/media/pci/intel/ipu-psys-virt.c new file mode 100644 index 0000000000000..76b616b9383a3 --- /dev/null +++ b/drivers/media/pci/intel/ipu-psys-virt.c @@ -0,0 +1,812 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) +#include +#else +#include +#endif +#include +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) +#include +#else +#include +#endif + +#include + +#include "ipu.h" +#include "ipu-bus.h" +#include "ipu-platform.h" +#include "ipu-buttress.h" +#include "ipu-cpd.h" +#include "ipu-fw-psys.h" +#include "ipu-platform-regs.h" +#include "ipu-fw-isys.h" +#include "ipu-fw-com.h" +#include "ipu-psys.h" + +#include +#include "virtio/intel-ipu4-virtio-common.h" +#include "virtio/intel-ipu4-virtio-common-psys.h" +#include "virtio/intel-ipu4-virtio-be.h" +#include "ipu-psys-virt.h" + +extern struct dma_buf_ops ipu_dma_buf_ops; + +#define POLL_WAIT 500 //500ms + +int virt_ipu_psys_get_manifest(struct ipu_psys_fh *fh, + struct ipu4_virtio_req_info *req_info) +{ + struct ipu_psys *psys = fh->psys; + struct ipu_device *isp = psys->adev->isp; + struct ipu_cpd_client_pkg_hdr *client_pkg; + u32 entries; + void *host_fw_data; + dma_addr_t dma_fw_data; + u32 client_pkg_offset; + struct ipu_psys_manifest_wrap *manifest_wrap; + struct ipu_psys_manifest *manifest; + void *manifest_data; + int status = 0; + + manifest_wrap = map_guest_phys(req_info->domid, + req_info->request->payload, + sizeof(struct ipu_psys_manifest_wrap)); + if (manifest_wrap == NULL) { + pr_err("%s: failed to get payload", __func__); + return -EFAULT; + } + + manifest = map_guest_phys(req_info->domid, + manifest_wrap->psys_manifest, + sizeof(struct ipu_psys_manifest)); + if (manifest == NULL) { + pr_err("%s: failed to get ipu_psys_manifest", __func__); + status = -EFAULT; + goto exit_payload; + } + + manifest_data = map_guest_phys( + req_info->domid, + manifest_wrap->manifest_data, + PAGE_SIZE + ); + if (manifest_data == NULL) { + pr_err("%s: failed to get manifest_data", __func__); + status = -EFAULT; + goto exit_psys_manifest; + } + + host_fw_data = (void *)isp->cpd_fw->data; + dma_fw_data = sg_dma_address(psys->fw_sgt.sgl); + + entries = ipu_cpd_pkg_dir_get_num_entries(psys->pkg_dir); + if (!manifest || manifest->index > entries - 1) { + dev_err(&psys->adev->dev, "invalid argument\n"); + status = -EINVAL; + goto exit_manifest_data; + } + + if (!ipu_cpd_pkg_dir_get_size(psys->pkg_dir, manifest->index) || + ipu_cpd_pkg_dir_get_type(psys->pkg_dir, manifest->index) < + IPU_CPD_PKG_DIR_CLIENT_PG_TYPE) { + dev_dbg(&psys->adev->dev, "invalid pkg dir entry\n"); + status = -ENOENT; + goto exit_manifest_data; + } + + client_pkg_offset = ipu_cpd_pkg_dir_get_address(psys->pkg_dir, + manifest->index); + client_pkg_offset -= dma_fw_data; + + client_pkg = host_fw_data + client_pkg_offset; + manifest->size = client_pkg->pg_manifest_size; + + if (manifest->size > PAGE_SIZE) { + pr_err("%s: manifest size is more than 1 page %d", + __func__, + manifest->size); + status = -EFAULT; + goto exit_manifest_data; + } + + memcpy(manifest_data, + (uint8_t *) client_pkg + client_pkg->pg_manifest_offs, + manifest->size); + +exit_manifest_data: + unmap_guest_phys(req_info->domid, + manifest_wrap->manifest_data); + +exit_psys_manifest: + unmap_guest_phys(req_info->domid, + manifest_wrap->psys_manifest); + +exit_payload: + unmap_guest_phys(req_info->domid, + req_info->request->payload); + + return status; +} + +int virt_ipu_psys_map_buf(struct ipu_psys_fh *fh, + struct ipu4_virtio_req_info *req_info) +{ + return -1; +} + +int virt_ipu_psys_unmap_buf(struct ipu_psys_fh *fh, + struct ipu4_virtio_req_info *req_info) +{ + int fd; + + fd = req_info->request->op[0]; + + return ipu_psys_unmapbuf(fd, fh); +} + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(4, 14, 2) +static void ipu_psys_watchdog(unsigned long data) +{ + struct ipu_psys_kcmd *kcmd = (struct ipu_psys_kcmd *)data; +#else +static void ipu_psys_watchdog(struct timer_list *t) +{ + struct ipu_psys_kcmd *kcmd = from_timer(kcmd, t, watchdog); +#endif + struct ipu_psys *psys = kcmd->fh->psys; + + queue_work(IPU_PSYS_WORK_QUEUE, &psys->watchdog_work); +} + +static int ipu_psys_config_legacy_pg(struct ipu_psys_kcmd *kcmd) +{ + struct ipu_psys *psys = kcmd->fh->psys; + unsigned int i; + int ret; + + ret = ipu_fw_psys_pg_set_ipu_vaddress(kcmd, kcmd->kpg->pg_dma_addr); + if (ret) { + ret = -EIO; + goto error; + } + + for (i = 0; i < kcmd->nbuffers; i++) { + struct ipu_fw_psys_terminal *terminal; + u32 buffer; + + terminal = ipu_fw_psys_pg_get_terminal(kcmd, i); + if (!terminal) + continue; + + buffer = (u32) kcmd->kbufs[i]->dma_addr + + kcmd->buffers[i].data_offset; + + ret = ipu_fw_psys_terminal_set(terminal, i, kcmd, + buffer, kcmd->kbufs[i]->len); + if (ret == -EAGAIN) + continue; + + if (ret) { + dev_err(&psys->adev->dev, "Unable to set terminal\n"); + goto error; + } + } + + ipu_fw_psys_pg_set_token(kcmd, (uintptr_t) kcmd); + + ret = ipu_fw_psys_pg_submit(kcmd); + if (ret) { + dev_err(&psys->adev->dev, "failed to submit kcmd!\n"); + goto error; + } + + return 0; + +error: + dev_err(&psys->adev->dev, "failed to config legacy pg\n"); + return ret; +} + +static struct ipu_psys_kcmd *virt_ipu_psys_copy_cmd( + struct ipu_psys_command *cmd, + struct ipu_psys_buffer *buffers, + void *pg_manifest, + struct ipu_psys_fh *fh) +{ + struct ipu_psys *psys = fh->psys; + struct ipu_psys_kcmd *kcmd; + struct ipu_psys_kbuffer *kpgbuf; + unsigned int i; + int prevfd = 0; + + if (cmd->bufcount > IPU_MAX_PSYS_CMD_BUFFERS) + return NULL; + + if (!cmd->pg_manifest_size || + cmd->pg_manifest_size > KMALLOC_MAX_CACHE_SIZE) + return NULL; + + kcmd = kzalloc(sizeof(*kcmd), GFP_KERNEL); + if (!kcmd) + return NULL; + + kcmd->state = KCMD_STATE_NEW; + kcmd->fh = fh; + INIT_LIST_HEAD(&kcmd->list); + INIT_LIST_HEAD(&kcmd->started_list); + + mutex_lock(&fh->mutex); + kpgbuf = ipu_psys_lookup_kbuffer(fh, cmd->pg); + mutex_unlock(&fh->mutex); + if (!kpgbuf || !kpgbuf->sgt) { + pr_err("%s: failed ipu_psys_lookup_kbuffer", __func__); + goto error; + } + + kcmd->pg_user = kpgbuf->kaddr; + kcmd->kpg = __get_pg_buf(psys, kpgbuf->len); + if (!kcmd->kpg) { + pr_err("%s: failed __get_pg_buf", __func__); + goto error; + } + + memcpy(kcmd->kpg->pg, kcmd->pg_user, kcmd->kpg->pg_size); + kcmd->pg_manifest = kzalloc(cmd->pg_manifest_size, GFP_KERNEL); + if (!kcmd->pg_manifest) { + pr_err("%s: failed kzalloc pg_manifest", __func__); + goto error; + } + + memcpy(kcmd->pg_manifest, pg_manifest, + cmd->pg_manifest_size); + + kcmd->pg_manifest_size = cmd->pg_manifest_size; + + kcmd->user_token = cmd->user_token; + kcmd->issue_id = cmd->issue_id; + kcmd->priority = cmd->priority; + if (kcmd->priority >= IPU_PSYS_CMD_PRIORITY_NUM) { + pr_err("%s: failed priority", __func__); + goto error; + } + + kcmd->nbuffers = ipu_fw_psys_pg_get_terminal_count(kcmd); + kcmd->buffers = kcalloc(kcmd->nbuffers, sizeof(*kcmd->buffers), + GFP_KERNEL); + if (!kcmd->buffers) { + pr_err("%s, failed kcalloc buffers", __func__); + goto error; + } + + kcmd->kbufs = kcalloc(kcmd->nbuffers, sizeof(kcmd->kbufs[0]), + GFP_KERNEL); + if (!kcmd->kbufs) { + pr_err("%s: failed kcalloc kbufs", __func__); + goto error; + } + + if (!cmd->bufcount || kcmd->nbuffers > cmd->bufcount) { + pr_err("%s: failed bufcount", __func__); + goto error; + } + + memcpy(kcmd->buffers, buffers, + kcmd->nbuffers * sizeof(*kcmd->buffers)); + + for (i = 0; i < kcmd->nbuffers; i++) { + struct ipu_fw_psys_terminal *terminal; + + terminal = ipu_fw_psys_pg_get_terminal(kcmd, i); + if (!terminal) + continue; + + + mutex_lock(&fh->mutex); + kcmd->kbufs[i] = ipu_psys_lookup_kbuffer(fh, + kcmd->buffers[i].base.fd); + mutex_unlock(&fh->mutex); + if (!kcmd->kbufs[i]) { + pr_err("%s: NULL kcmd->kbufs[i]", __func__); + goto error; + } + if (!kcmd->kbufs[i] || !kcmd->kbufs[i]->sgt || + kcmd->kbufs[i]->len < kcmd->buffers[i].bytes_used) + goto error; + if ((kcmd->kbufs[i]->flags & + IPU_BUFFER_FLAG_NO_FLUSH) || + (kcmd->buffers[i].flags & + IPU_BUFFER_FLAG_NO_FLUSH) || + prevfd == kcmd->buffers[i].base.fd) + continue; + + prevfd = kcmd->buffers[i].base.fd; + dma_sync_sg_for_device(&psys->adev->dev, + kcmd->kbufs[i]->sgt->sgl, + kcmd->kbufs[i]->sgt->orig_nents, + DMA_BIDIRECTIONAL); + } + + return kcmd; + +error: + ipu_psys_kcmd_free(kcmd); + + dev_dbg(&psys->adev->dev, "failed to copy cmd\n"); + + return NULL; +} + +static int virt_ipu_psys_kcmd_new(struct ipu_psys_command *cmd, + struct ipu_psys_buffer *buffers, + void *pg_manifest, + struct ipu_psys_fh *fh) +{ + struct ipu_psys *psys = fh->psys; + struct ipu_psys_kcmd *kcmd; + size_t pg_size; + int ret = 0; + + if (psys->adev->isp->flr_done) + return -EIO; + + kcmd = virt_ipu_psys_copy_cmd(cmd, buffers, pg_manifest, fh); + if(!kcmd) + return -EINVAL; + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(4, 14, 2) + init_timer(&kcmd->watchdog); + kcmd->watchdog.data = (unsigned long)kcmd; + kcmd->watchdog.function = &ipu_psys_watchdog; +#else + timer_setup(&kcmd->watchdog, ipu_psys_watchdog, 0); +#endif + + if (cmd->min_psys_freq) { + kcmd->constraint.min_freq = cmd->min_psys_freq; + ipu_buttress_add_psys_constraint(psys->adev->isp, + &kcmd->constraint); + } + + pg_size = ipu_fw_psys_pg_get_size(kcmd); + if (pg_size > kcmd->kpg->pg_size) { + dev_dbg(&psys->adev->dev, "pg size mismatch %zu %zu\n", + pg_size, kcmd->kpg->pg_size); + ret = -EINVAL; + goto error; + } + + ret = ipu_psys_config_legacy_pg(kcmd); + if (ret) + goto error; + + mutex_lock(&fh->mutex); + list_add_tail(&kcmd->list, &fh->sched.kcmds[cmd->priority]); + if (!fh->sched.new_kcmd_tail[cmd->priority] && + kcmd->state == KCMD_STATE_NEW) { + fh->sched.new_kcmd_tail[cmd->priority] = kcmd; + /* Kick command scheduler thread */ + atomic_set(&psys->wakeup_sched_thread_count, 1); + wake_up_interruptible(&psys->sched_cmd_wq); + } + mutex_unlock(&fh->mutex); + + dev_dbg(&psys->adev->dev, + "IOC_QCMD: user_token:%llx issue_id:0x%llx pri:%d\n", + cmd->user_token, cmd->issue_id, cmd->priority); + + return 0; + +error: + ipu_psys_kcmd_free(kcmd); + + return ret; +} + + +int virt_ipu_psys_qcmd(struct ipu_psys_fh *fh, + struct ipu4_virtio_req_info *req_info) +{ + struct ipu_psys *psys = fh->psys; + struct ipu_psys_command_wrap *cmd_wrap; + struct ipu_psys_command *cmd; + void *pg_manifest; + struct ipu_psys_buffer *buffers; + int ret = 0; + + if (psys->adev->isp->flr_done) + return -EIO; + + cmd_wrap = map_guest_phys(req_info->domid, + req_info->request->payload, + sizeof(struct ipu_psys_command_wrap)); + + if (cmd_wrap == NULL) { + pr_err("%s: failed to get payload", __func__); + return -EFAULT; + } + + cmd = map_guest_phys(req_info->domid, + cmd_wrap->psys_command, + sizeof(struct ipu_psys_command)); + + if (cmd == NULL) { + pr_err("%s: failed to get ipu_psys_command", __func__); + ret = -EFAULT; + goto exit_payload; + } + + pg_manifest = map_guest_phys(req_info->domid, + cmd_wrap->psys_manifest, + cmd->pg_manifest_size); + + if (pg_manifest == NULL) { + pr_err("%s: failed to get pg_manifest", __func__); + ret = -EFAULT; + goto exit_psys_command; + } + + buffers = map_guest_phys(req_info->domid, + cmd_wrap->psys_buffer, + sizeof(struct ipu_psys_buffer)); + + if (buffers == NULL) { + pr_err("%s: failed to get ipu_psys_buffers", __func__); + ret = -EFAULT; + goto exit_psys_manifest; + } + + ret = virt_ipu_psys_kcmd_new(cmd, buffers, pg_manifest, fh); + + unmap_guest_phys(req_info->domid, + cmd_wrap->psys_buffer); + +exit_psys_manifest: + unmap_guest_phys(req_info->domid, + cmd_wrap->psys_manifest); + +exit_psys_command: + unmap_guest_phys(req_info->domid, + cmd_wrap->psys_command); + +exit_payload: + unmap_guest_phys(req_info->domid, + req_info->request->payload); + + return ret; +} + +int virt_ipu_psys_dqevent(struct ipu_psys_fh *fh, + struct ipu4_virtio_req_info *req_info, + unsigned int f_flags) +{ + struct ipu_psys_event *event; + struct ipu_psys_kcmd *kcmd = NULL; + int status = 0, time_remain = -1; + DEFINE_WAIT_FUNC(wait, woken_wake_function); + + pr_debug("%s: IOC_DQEVENT", __func__); + + event = map_guest_phys(req_info->domid, + req_info->request->payload, + sizeof(struct ipu_psys_event)); + if (event == NULL) { + pr_err("%s: failed to get payload", __func__); + return -EFAULT; + } + + add_wait_queue(&fh->wait, &wait); + while (1) { + if (ipu_get_completed_kcmd(fh) || + time_remain == 0) + break; + time_remain = + wait_woken(&wait, TASK_INTERRUPTIBLE, POLL_WAIT); + } + remove_wait_queue(&fh->wait, &wait); + + if ((time_remain == 0) || (time_remain == -ERESTARTSYS)) { + pr_err("%s: poll timeout or unexpected wake up %d", + __func__, time_remain); + req_info->request->func_ret = 0; + goto error_exit; + } + + mutex_lock(&fh->mutex); + if (!kcmd) { + kcmd = __ipu_get_completed_kcmd(fh); + if (!kcmd) { + mutex_unlock(&fh->mutex); + return -ENODATA; + } + } + + *event = kcmd->ev; + ipu_psys_kcmd_free(kcmd); + mutex_unlock(&fh->mutex); + + req_info->request->func_ret = POLLIN; + +error_exit: + unmap_guest_phys(req_info->domid, + req_info->request->payload); + + return status; +} + +int virt_ipu_psys_poll(struct ipu_psys_fh *fh, + struct ipu4_virtio_req_info *req_info) +{ + struct ipu_psys *psys = fh->psys; + long time_remain = -1; + DEFINE_WAIT_FUNC(wait, woken_wake_function); + + dev_dbg(&psys->adev->dev, "ipu psys poll\n"); + + add_wait_queue(&fh->wait, &wait); + while (1) { + if (ipu_get_completed_kcmd(fh) || + time_remain == 0) + break; + time_remain = + wait_woken(&wait, TASK_INTERRUPTIBLE, POLL_WAIT); + } + remove_wait_queue(&fh->wait, &wait); + + if (time_remain) + req_info->request->func_ret = POLLIN; + else + req_info->request->func_ret = 0; + + dev_dbg(&psys->adev->dev, "ipu psys poll res %u\n", + req_info->request->func_ret); + + return 0; +} + +int __map_buf(struct ipu_psys_fh *fh, + struct ipu_psys_buffer_wrap *buf_wrap, + struct ipu_psys_kbuffer *kbuf, + int domid, int fd) +{ + struct ipu_psys *psys = fh->psys; + struct dma_buf *dbuf; + int ret = -1, i, array_size; + struct ipu_dma_buf_attach *ipu_attach; + struct page **data_pages = NULL; + u64 *page_table = NULL; + void *pageaddr; + + mutex_lock(&fh->mutex); + kbuf->dbuf = dma_buf_get(fd); + if (IS_ERR(kbuf->dbuf)) { + goto error_get; + } + + if (kbuf->len == 0) + kbuf->len = kbuf->dbuf->size; + + kbuf->fd = fd; + + kbuf->db_attach = dma_buf_attach(kbuf->dbuf, &psys->adev->dev); + if (IS_ERR(kbuf->db_attach)) { + ret = PTR_ERR(kbuf->db_attach); + goto error_put; + } + + array_size = buf_wrap->map.npages * sizeof(struct page *); + if (array_size <= PAGE_SIZE) + data_pages = kzalloc(array_size, GFP_KERNEL); + else + data_pages = vzalloc(array_size); + if (data_pages == NULL) { + pr_err("%s: Failed alloc data page set", __func__); + goto error_detach; + } + + pr_debug("%s: Total number of pages:%lu", + __func__, buf_wrap->map.npages); + + page_table = map_guest_phys(domid, + buf_wrap->map.page_table_ref, + sizeof(u64) * buf_wrap->map.npages); + + if (page_table == NULL) { + pr_err("%s: Failed to map page table", __func__); + kfree(data_pages); + goto error_detach; + } else { + pr_debug("%s: first page %lld", + __func__, page_table[0]); + for (i = 0; i < buf_wrap->map.npages; i++) { + pageaddr = map_guest_phys(domid, + page_table[i], PAGE_SIZE); + if (pageaddr == NULL) { + pr_err("%s: Cannot map pages from UOS", __func__); + kfree(data_pages); + goto error_page_table_ref; + } + data_pages[i] = virt_to_page(pageaddr); + } + } + + ipu_attach = kbuf->db_attach->priv; + ipu_attach->npages = buf_wrap->map.npages; + ipu_attach->pages = data_pages; + ipu_attach->vma_is_io = buf_wrap->map.vma_is_io; + + kbuf->sgt = dma_buf_map_attachment(kbuf->db_attach, DMA_BIDIRECTIONAL); + if (IS_ERR_OR_NULL(kbuf->sgt)) { + ret = -EINVAL; + kbuf->sgt = NULL; + dev_dbg(&psys->adev->dev, "map attachment failed\n"); + kfree(data_pages); + goto error_page_table; + } + + kbuf->dma_addr = sg_dma_address(kbuf->sgt->sgl); + + kbuf->kaddr = dma_buf_vmap(kbuf->dbuf); + if (!kbuf->kaddr) { + kfree(data_pages); + ret = -EINVAL; + goto error_unmap; + } + + kbuf->valid = true; + + for (i = 0; i < buf_wrap->map.npages; i++) + unmap_guest_phys(domid, page_table[i]); + + unmap_guest_phys(domid, + buf_wrap->map.page_table_ref); + + mutex_unlock(&fh->mutex); + + return 0; + +error_unmap: + dma_buf_unmap_attachment(kbuf->db_attach, kbuf->sgt, DMA_BIDIRECTIONAL); +error_page_table: + for (i = 0; i < buf_wrap->map.npages; i++) + unmap_guest_phys(domid, page_table[i]); +error_page_table_ref: + unmap_guest_phys(domid, + buf_wrap->map.page_table_ref); +error_detach: + dma_buf_detach(kbuf->dbuf, kbuf->db_attach); + kbuf->db_attach = NULL; +error_put: + dbuf = kbuf->dbuf; + dma_buf_put(dbuf); +error_get: + mutex_unlock(&fh->mutex); + + return ret; +} + +int virt_ipu_psys_get_buf(struct ipu_psys_fh *fh, + struct ipu4_virtio_req_info *req_info) +{ + struct dma_buf *dbuf; + int ret = 0; + struct ipu_psys_buffer_wrap *buf_wrap; + struct ipu_psys_buffer *buf; + struct ipu_psys_kbuffer *kbuf; + struct ipu_psys *psys = fh->psys; + + buf_wrap = map_guest_phys(req_info->domid, + req_info->request->payload, + sizeof(struct ipu_psys_buffer_wrap)); + if (buf_wrap == NULL) { + pr_err("%s: failed to get payload", __func__); + return -EFAULT; + } + + buf = map_guest_phys(req_info->domid, + buf_wrap->psys_buf, + sizeof(struct ipu_psys_buffer)); + if (buf == NULL) { + pr_err("%s: failed to get ipu_psys_buffer", __func__); + ret = -EFAULT; + goto exit_payload; + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); +#endif + + if (!buf->base.userptr) { + dev_err(&psys->adev->dev, "Buffer allocation not supported\n"); + ret = -EINVAL; + goto exit_psys_buf; + } + + kbuf = kzalloc(sizeof(*kbuf), GFP_KERNEL); + if (!kbuf) { + ret = -ENOMEM; + goto exit_psys_buf; + } + + kbuf->len = buf->len; + kbuf->userptr = buf->base.userptr; + kbuf->flags = buf->flags; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) + exp_info.ops = &ipu_dma_buf_ops; + exp_info.size = kbuf->len; + exp_info.flags = O_RDWR; + exp_info.priv = kbuf; + + dbuf = dma_buf_export(&exp_info); +#else + dbuf = dma_buf_export(kbuf, &ipu_dma_buf_ops, kbuf->len, 0); +#endif + if (IS_ERR(dbuf)) { + kfree(kbuf); + ret = PTR_ERR(dbuf); + goto exit_psys_buf; + } + + ret = dma_buf_fd(dbuf, 0); + if (ret < 0) { + kfree(kbuf); + goto exit_psys_buf; + } + + dev_dbg(&psys->adev->dev, "IOC_GETBUF: userptr %p", buf->base.userptr); + + kbuf->fd = ret; + buf->base.fd = ret; + kbuf->flags = buf->flags &= ~IPU_BUFFER_FLAG_USERPTR; + kbuf->flags = buf->flags |= IPU_BUFFER_FLAG_DMA_HANDLE; + + ret = __map_buf(fh, buf_wrap, kbuf, req_info->domid, kbuf->fd); + if (ret < 0) { + kfree(kbuf); + goto exit_psys_buf; + } + + mutex_lock(&fh->mutex); + list_add_tail(&kbuf->list, &fh->bufmap); + mutex_unlock(&fh->mutex); + + dev_dbg(&psys->adev->dev, "to %d\n", buf->base.fd); + +exit_psys_buf: + unmap_guest_phys(req_info->domid, + buf_wrap->psys_buf); +exit_payload: + unmap_guest_phys(req_info->domid, + req_info->request->payload); + + return ret; +} + +struct psys_fops_virt psys_vfops = { + .get_manifest = virt_ipu_psys_get_manifest, + .map_buf = virt_ipu_psys_map_buf, + .unmap_buf = virt_ipu_psys_unmap_buf, + .qcmd = virt_ipu_psys_qcmd, + .dqevent = virt_ipu_psys_dqevent, + .get_buf = virt_ipu_psys_get_buf, + .poll = virt_ipu_psys_poll, +}; diff --git a/drivers/media/pci/intel/ipu-psys-virt.h b/drivers/media/pci/intel/ipu-psys-virt.h new file mode 100644 index 0000000000000..6289207dd4bde --- /dev/null +++ b/drivers/media/pci/intel/ipu-psys-virt.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ +#ifndef IPU_PSYS_VIRT_H +#define IPU_PSYS_VIRT_H + +#include "virtio/intel-ipu4-virtio-be-request-queue.h" + +struct ipu_psys_fh; + +struct psys_fops_virt { + int (*get_manifest)(struct ipu_psys_fh *fh, + struct ipu4_virtio_req_info *req_info); + int (*map_buf)(struct ipu_psys_fh *fh, + struct ipu4_virtio_req_info *req_info); + int (*unmap_buf)(struct ipu_psys_fh *fh, + struct ipu4_virtio_req_info *req_info); + int (*qcmd)(struct ipu_psys_fh *fh, + struct ipu4_virtio_req_info *req_info); + int (*dqevent)(struct ipu_psys_fh *fh, + struct ipu4_virtio_req_info *req_info, + unsigned int f_flags); + int (*get_buf)(struct ipu_psys_fh *fh, + struct ipu4_virtio_req_info *req_info); + int (*poll)(struct ipu_psys_fh *fh, + struct ipu4_virtio_req_info *req_info); +}; + +//Function define in ipu-psys.c +long ipu_psys_unmapbuf(int fd, struct ipu_psys_fh *fh); +//Function define in ipu4-psys.c +void ipu_psys_kcmd_free(struct ipu_psys_kcmd *kcmd); +struct ipu_psys_kcmd *__ipu_get_completed_kcmd(struct ipu_psys_fh *fh); + +extern struct psys_fops_virt psys_vfops; + +#endif \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu-psys.c b/drivers/media/pci/intel/ipu-psys.c new file mode 100644 index 0000000000000..345285caaa992 --- /dev/null +++ b/drivers/media/pci/intel/ipu-psys.c @@ -0,0 +1,1669 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(CONFIG_VIDEO_INTEL_IPU_ACRN) && defined(CONFIG_VIDEO_INTEL_IPU_VIRTIO_BE) +#include +#endif +#include +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) +#include +#else +#include +#endif +#include +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) +#include +#else +#include +#endif + +#include + +#include "ipu.h" +#include "ipu-bus.h" +#include "ipu-platform.h" +#include "ipu-buttress.h" +#include "ipu-cpd.h" +#include "ipu-fw-psys.h" +#include "ipu-psys.h" +#include "ipu-platform-psys.h" +#include "ipu-platform-regs.h" +#include "ipu-fw-isys.h" +#include "ipu-fw-com.h" + +static bool async_fw_init; +module_param(async_fw_init, bool, 0664); +MODULE_PARM_DESC(async_fw_init, "Enable asynchronous firmware initialization"); + +#define IPU_PSYS_NUM_DEVICES 4 +#define IPU_PSYS_AUTOSUSPEND_DELAY 2000 + +#ifdef CONFIG_PM +static int psys_runtime_pm_resume(struct device *dev); +static int psys_runtime_pm_suspend(struct device *dev); +#else +#define pm_runtime_dont_use_autosuspend(d) +#define pm_runtime_use_autosuspend(d) +#define pm_runtime_set_autosuspend_delay(d, f) 0 +#define pm_runtime_get_sync(d) 0 +#define pm_runtime_put(d) 0 +#define pm_runtime_put_sync(d) 0 +#define pm_runtime_put_noidle(d) 0 +#define pm_runtime_put_autosuspend(d) 0 +#endif + +static dev_t ipu_psys_dev_t; +static DECLARE_BITMAP(ipu_psys_devices, IPU_PSYS_NUM_DEVICES); +static DEFINE_MUTEX(ipu_psys_mutex); + +static struct fw_init_task { + struct delayed_work work; + struct ipu_psys *psys; +} fw_init_task; + +static void ipu_psys_remove(struct ipu_bus_device *adev); + +static struct bus_type ipu_psys_bus = { + .name = IPU_PSYS_NAME, +}; + +struct ipu_psys_pg *__get_pg_buf(struct ipu_psys *psys, size_t pg_size) +{ + struct ipu_psys_pg *kpg; + unsigned long flags; + + spin_lock_irqsave(&psys->pgs_lock, flags); + list_for_each_entry(kpg, &psys->pgs, list) { + if (!kpg->pg_size && kpg->size >= pg_size) { + kpg->pg_size = pg_size; + spin_unlock_irqrestore(&psys->pgs_lock, flags); + return kpg; + } + } + spin_unlock_irqrestore(&psys->pgs_lock, flags); + /* no big enough buffer available, allocate new one */ + kpg = kzalloc(sizeof(*kpg), GFP_KERNEL); + if (!kpg) + return NULL; + + kpg->pg = dma_alloc_attrs(&psys->adev->dev, pg_size, + &kpg->pg_dma_addr, GFP_KERNEL, + DMA_ATTR_NON_CONSISTENT); + if (!kpg->pg) { + kfree(kpg); + return NULL; + } + + kpg->pg_size = pg_size; + kpg->size = pg_size; + spin_lock_irqsave(&psys->pgs_lock, flags); + list_add(&kpg->list, &psys->pgs); + spin_unlock_irqrestore(&psys->pgs_lock, flags); + + return kpg; +} + +struct ipu_psys_kbuffer *ipu_psys_lookup_kbuffer(struct ipu_psys_fh *fh, int fd) +{ + struct ipu_psys_kbuffer *kbuffer; + + list_for_each_entry(kbuffer, &fh->bufmap, list) { + if (kbuffer->fd == fd) + return kbuffer; + } + + return NULL; +} + +struct ipu_psys_kbuffer * +ipu_psys_lookup_kbuffer_by_kaddr(struct ipu_psys_fh *fh, void *kaddr) +{ + struct ipu_psys_kbuffer *kbuffer; + + list_for_each_entry(kbuffer, &fh->bufmap, list) { + if (kbuffer->kaddr == kaddr) + return kbuffer; + } + + return NULL; +} + +static int ipu_psys_get_userpages(struct ipu_dma_buf_attach *attach) +{ + struct vm_area_struct *vma; + unsigned long start, end; + int npages, array_size; + struct page **pages; + struct sg_table *sgt; + int nr = 0; + int ret = -ENOMEM; + + start = (unsigned long)attach->userptr; + end = PAGE_ALIGN(start + attach->len); + npages = (end - (start & PAGE_MASK)) >> PAGE_SHIFT; + array_size = npages * sizeof(struct page *); + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return -ENOMEM; + + if (attach->npages != 0) { + pages = attach->pages; + npages = attach->npages; + attach->vma_is_io = 1; + goto skip_pages; + } + + if (array_size <= PAGE_SIZE) + pages = kzalloc(array_size, GFP_KERNEL); + else + pages = vzalloc(array_size); + if (!pages) + goto free_sgt; + + down_read(¤t->mm->mmap_sem); + vma = find_vma(current->mm, start); + if (!vma) { + ret = -EFAULT; + goto error_up_read; + } + + if (vma->vm_end < start + attach->len) { + dev_err(attach->dev, + "vma at %lu is too small for %llu bytes\n", + start, attach->len); + ret = -EFAULT; + goto error_up_read; + } + + /* + * For buffers from Gralloc, VM_PFNMAP is expected, + * but VM_IO is set. Possibly bug in Gralloc. + */ + attach->vma_is_io = vma->vm_flags & (VM_IO | VM_PFNMAP); + + if (attach->vma_is_io) { + unsigned long io_start = start; + + for (nr = 0; nr < npages; nr++, io_start += PAGE_SIZE) { + unsigned long pfn; + + ret = follow_pfn(vma, io_start, &pfn); + if (ret) + goto error_up_read; + pages[nr] = pfn_to_page(pfn); + } + } else { + nr = get_user_pages( +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) + current, current->mm, +#endif + start & PAGE_MASK, npages, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) + 1, 0, +#else + FOLL_WRITE, +#endif + pages, NULL); + if (nr < npages) + goto error_up_read; + } + up_read(¤t->mm->mmap_sem); + + attach->pages = pages; + attach->npages = npages; + +skip_pages: + ret = sg_alloc_table_from_pages(sgt, pages, npages, + start & ~PAGE_MASK, attach->len, + GFP_KERNEL); + if (ret < 0) + goto error; + + attach->sgt = sgt; + + return 0; + +error_up_read: + up_read(¤t->mm->mmap_sem); +error: + if (!attach->vma_is_io) + while (nr > 0) + put_page(pages[--nr]); + + if (array_size <= PAGE_SIZE) + kfree(pages); + else + vfree(pages); +free_sgt: + kfree(sgt); + + dev_err(attach->dev, "failed to get userpages:%d\n", ret); + + return ret; +} + +static void ipu_psys_put_userpages(struct ipu_dma_buf_attach *attach) +{ + if (!attach || !attach->userptr || !attach->sgt) + return; + + if (!attach->vma_is_io) { + int i = attach->npages; + + while (--i >= 0) { + set_page_dirty_lock(attach->pages[i]); + put_page(attach->pages[i]); + } + } + + if (is_vmalloc_addr(attach->pages)) + vfree(attach->pages); + else + kfree(attach->pages); + + sg_free_table(attach->sgt); + kfree(attach->sgt); + attach->sgt = NULL; +} +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) +static int ipu_dma_buf_attach(struct dma_buf *dbuf, + struct dma_buf_attachment *attach) +#else +static int ipu_dma_buf_attach(struct dma_buf *dbuf, struct device *dev, + struct dma_buf_attachment *attach) +#endif +{ + struct ipu_psys_kbuffer *kbuf = dbuf->priv; + struct ipu_dma_buf_attach *ipu_attach; + + ipu_attach = kzalloc(sizeof(*ipu_attach), GFP_KERNEL); + if (!ipu_attach) + return -ENOMEM; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) + ipu_attach->dev = dev; +#endif + ipu_attach->len = kbuf->len; + ipu_attach->userptr = kbuf->userptr; + + attach->priv = ipu_attach; + return 0; +} + +static void ipu_dma_buf_detach(struct dma_buf *dbuf, + struct dma_buf_attachment *attach) +{ + struct ipu_dma_buf_attach *ipu_attach = attach->priv; + + kfree(ipu_attach); + attach->priv = NULL; +} + +static struct sg_table *ipu_dma_buf_map(struct dma_buf_attachment *attach, + enum dma_data_direction dir) +{ + struct ipu_dma_buf_attach *ipu_attach = attach->priv; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + DEFINE_DMA_ATTRS(attrs); +#else + unsigned long attrs; +#endif + int ret; + + ret = ipu_psys_get_userpages(ipu_attach); + if (ret) + return NULL; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + ret = dma_map_sg_attrs(attach->dev, ipu_attach->sgt->sgl, + ipu_attach->sgt->orig_nents, dir, &attrs); +#else + attrs = DMA_ATTR_SKIP_CPU_SYNC; + ret = dma_map_sg_attrs(attach->dev, ipu_attach->sgt->sgl, + ipu_attach->sgt->orig_nents, dir, attrs); +#endif + if (ret < ipu_attach->sgt->orig_nents) { + ipu_psys_put_userpages(ipu_attach); + dev_dbg(attach->dev, "buf map failed\n"); + + return ERR_PTR(-EIO); + } + + /* + * Initial cache flush to avoid writing dirty pages for buffers which + * are later marked as IPU_BUFFER_FLAG_NO_FLUSH. + */ + dma_sync_sg_for_device(attach->dev, ipu_attach->sgt->sgl, + ipu_attach->sgt->orig_nents, DMA_BIDIRECTIONAL); + + return ipu_attach->sgt; +} + +static void ipu_dma_buf_unmap(struct dma_buf_attachment *attach, + struct sg_table *sg, enum dma_data_direction dir) +{ + struct ipu_dma_buf_attach *ipu_attach = attach->priv; + + dma_unmap_sg(attach->dev, sg->sgl, sg->orig_nents, dir); + ipu_psys_put_userpages(ipu_attach); +} + +static int ipu_dma_buf_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma) +{ + return -ENOTTY; +} + +static void *ipu_dma_buf_kmap(struct dma_buf *dbuf, unsigned long pgnum) +{ + return NULL; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) +static void *ipu_dma_buf_kmap_atomic(struct dma_buf *dbuf, unsigned long pgnum) +{ + return NULL; +} +#endif + +static void ipu_dma_buf_release(struct dma_buf *buf) +{ + struct ipu_psys_kbuffer *kbuf = buf->priv; + + if (!kbuf) + return; + + if (kbuf->db_attach) { + dev_dbg(kbuf->db_attach->dev, + "releasing buffer %d\n", kbuf->fd); + ipu_psys_put_userpages(kbuf->db_attach->priv); + } + kfree(kbuf); +} + +static int ipu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) + size_t start, size_t len, +#endif + enum dma_data_direction dir) +{ + return -ENOTTY; +} + +static void *ipu_dma_buf_vmap(struct dma_buf *dmabuf) +{ + struct dma_buf_attachment *attach; + struct ipu_dma_buf_attach *ipu_attach; + + if (list_empty(&dmabuf->attachments)) + return NULL; + + attach = list_last_entry(&dmabuf->attachments, + struct dma_buf_attachment, node); + ipu_attach = attach->priv; + + if (!ipu_attach || !ipu_attach->pages || !ipu_attach->npages) + return NULL; + + return vm_map_ram(ipu_attach->pages, + ipu_attach->npages, 0, PAGE_KERNEL); +} + +static void ipu_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) +{ + struct dma_buf_attachment *attach; + struct ipu_dma_buf_attach *ipu_attach; + + if (WARN_ON(list_empty(&dmabuf->attachments))) + return; + + attach = list_last_entry(&dmabuf->attachments, + struct dma_buf_attachment, node); + ipu_attach = attach->priv; + + if (WARN_ON(!ipu_attach || !ipu_attach->pages || !ipu_attach->npages)) + return; + + vm_unmap_ram(vaddr, ipu_attach->npages); +} + +struct dma_buf_ops ipu_dma_buf_ops = { + .attach = ipu_dma_buf_attach, + .detach = ipu_dma_buf_detach, + .map_dma_buf = ipu_dma_buf_map, + .unmap_dma_buf = ipu_dma_buf_unmap, + .release = ipu_dma_buf_release, + .begin_cpu_access = ipu_dma_buf_begin_cpu_access, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) + .kmap = ipu_dma_buf_kmap, + .kmap_atomic = ipu_dma_buf_kmap_atomic, +#else + .map = ipu_dma_buf_kmap, +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) + .map_atomic = ipu_dma_buf_kmap_atomic, +#endif + .mmap = ipu_dma_buf_mmap, + .vmap = ipu_dma_buf_vmap, + .vunmap = ipu_dma_buf_vunmap, +}; + +static int ipu_psys_open(struct inode *inode, struct file *file) +{ + struct ipu_psys *psys = inode_to_ipu_psys(inode); + struct ipu_device *isp = psys->adev->isp; + struct ipu_psys_fh *fh; + int rval; + + if (isp->flr_done) + return -EIO; + + rval = ipu_buttress_authenticate(isp); + if (rval) { + dev_err(&psys->adev->dev, "FW authentication failed\n"); + return rval; + } + + fh = kzalloc(sizeof(*fh), GFP_KERNEL); + if (!fh) + return -ENOMEM; + + fh->psys = psys; + +#if defined(CONFIG_VIDEO_INTEL_IPU_ACRN) && defined(CONFIG_VIDEO_INTEL_IPU_VIRTIO_BE) + fh->vfops = &psys_vfops; +#endif + + file->private_data = fh; + + mutex_init(&fh->mutex); + INIT_LIST_HEAD(&fh->bufmap); + init_waitqueue_head(&fh->wait); + + rval = ipu_psys_fh_init(fh); + if (rval) + goto open_failed; + + mutex_lock(&psys->mutex); + list_add_tail(&fh->list, &psys->fhs); + mutex_unlock(&psys->mutex); + + return 0; + +open_failed: + mutex_destroy(&fh->mutex); + kfree(fh); + return rval; +} + +static int ipu_psys_release(struct inode *inode, struct file *file) +{ + struct ipu_psys *psys = inode_to_ipu_psys(inode); + struct ipu_psys_fh *fh = file->private_data; + struct ipu_psys_kbuffer *kbuf, *kbuf0; +#if defined(CONFIG_VIDEO_INTEL_IPU_ACRN) && defined(CONFIG_VIDEO_INTEL_IPU_VIRTIO_BE) + struct ipu_dma_buf_attach *ipu_attach; +#endif + + mutex_lock(&fh->mutex); + /* clean up buffers */ + if (!list_empty(&fh->bufmap)) { + list_for_each_entry_safe(kbuf, kbuf0, &fh->bufmap, list) { + list_del(&kbuf->list); + /* Unmap and release buffers */ + if (kbuf->dbuf && kbuf->db_attach) { + struct dma_buf *dbuf; + kbuf->valid = false; +#if defined(CONFIG_VIDEO_INTEL_IPU_ACRN) && defined(CONFIG_VIDEO_INTEL_IPU_VIRTIO_BE) + ipu_attach = kbuf->db_attach->priv; + if (ipu_attach->vma_is_io) + ksys_close(kbuf->fd); +#endif + dma_buf_vunmap(kbuf->dbuf, kbuf->kaddr); + dma_buf_unmap_attachment(kbuf->db_attach, + kbuf->sgt, + DMA_BIDIRECTIONAL); + dma_buf_detach(kbuf->dbuf, kbuf->db_attach); + dbuf = kbuf->dbuf; + kbuf->dbuf = NULL; + kbuf->db_attach = NULL; + dma_buf_put(dbuf); + } else { + if (kbuf->db_attach) + ipu_psys_put_userpages( + kbuf->db_attach->priv); + kfree(kbuf); + } + } + } + mutex_unlock(&fh->mutex); + + mutex_lock(&psys->mutex); + list_del(&fh->list); + + mutex_unlock(&psys->mutex); + + ipu_psys_fh_deinit(fh); + mutex_destroy(&fh->mutex); + kfree(fh); + + return 0; +} + +static int ipu_psys_getbuf(struct ipu_psys_buffer *buf, struct ipu_psys_fh *fh) +{ + struct ipu_psys_kbuffer *kbuf; + struct ipu_psys *psys = fh->psys; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); +#endif + struct dma_buf *dbuf; + int ret; + + if (!buf->base.userptr) { + dev_err(&psys->adev->dev, "Buffer allocation not supported\n"); + return -EINVAL; + } + + kbuf = kzalloc(sizeof(*kbuf), GFP_KERNEL); + if (!kbuf) + return -ENOMEM; + + kbuf->len = buf->len; + kbuf->userptr = buf->base.userptr; + kbuf->flags = buf->flags; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) + exp_info.ops = &ipu_dma_buf_ops; + exp_info.size = kbuf->len; + exp_info.flags = O_RDWR; + exp_info.priv = kbuf; + + dbuf = dma_buf_export(&exp_info); +#else + dbuf = dma_buf_export(kbuf, &ipu_dma_buf_ops, kbuf->len, 0); +#endif + if (IS_ERR(dbuf)) { + kfree(kbuf); + return PTR_ERR(dbuf); + } + + ret = dma_buf_fd(dbuf, 0); + if (ret < 0) { + kfree(kbuf); + return ret; + } + + dev_dbg(&psys->adev->dev, "IOC_GETBUF: userptr %p", buf->base.userptr); + + kbuf->fd = ret; + buf->base.fd = ret; + kbuf->flags = buf->flags &= ~IPU_BUFFER_FLAG_USERPTR; + kbuf->flags = buf->flags |= IPU_BUFFER_FLAG_DMA_HANDLE; + + mutex_lock(&fh->mutex); + list_add_tail(&kbuf->list, &fh->bufmap); + mutex_unlock(&fh->mutex); + + dev_dbg(&psys->adev->dev, "to %d\n", buf->base.fd); + + return 0; +} + +static int ipu_psys_putbuf(struct ipu_psys_buffer *buf, struct ipu_psys_fh *fh) +{ + return 0; +} + +static long ipu_psys_mapbuf(int fd, struct ipu_psys_fh *fh) +{ + struct ipu_psys *psys = fh->psys; + struct ipu_psys_kbuffer *kbuf; + struct dma_buf *dbuf; + int ret; + + mutex_lock(&fh->mutex); + kbuf = ipu_psys_lookup_kbuffer(fh, fd); + + if (!kbuf) { + /* This fd isn't generated by ipu_psys_getbuf, it + * is a new fd. Create a new kbuf item for this fd, and + * add this kbuf to bufmap list. + */ + kbuf = kzalloc(sizeof(*kbuf), GFP_KERNEL); + if (!kbuf) { + mutex_unlock(&fh->mutex); + return -ENOMEM; + } + + list_add_tail(&kbuf->list, &fh->bufmap); + } + + if (kbuf->sgt) { + dev_dbg(&psys->adev->dev, "has been mapped!\n"); + goto mapbuf_end; + } + + kbuf->dbuf = dma_buf_get(fd); + if (IS_ERR(kbuf->dbuf)) { + if (!kbuf->userptr) { + list_del(&kbuf->list); + kfree(kbuf); + } + mutex_unlock(&fh->mutex); + return -EINVAL; + } + + if (kbuf->len == 0) + kbuf->len = kbuf->dbuf->size; + + kbuf->fd = fd; + + kbuf->db_attach = dma_buf_attach(kbuf->dbuf, &psys->adev->dev); + if (IS_ERR(kbuf->db_attach)) { + ret = PTR_ERR(kbuf->db_attach); + goto error_put; + } + + kbuf->sgt = dma_buf_map_attachment(kbuf->db_attach, DMA_BIDIRECTIONAL); + if (IS_ERR_OR_NULL(kbuf->sgt)) { + ret = -EINVAL; + kbuf->sgt = NULL; + dev_dbg(&psys->adev->dev, "map attachment failed\n"); + goto error_detach; + } + + kbuf->dma_addr = sg_dma_address(kbuf->sgt->sgl); + + kbuf->kaddr = dma_buf_vmap(kbuf->dbuf); + if (!kbuf->kaddr) { + ret = -EINVAL; + goto error_unmap; + } + +mapbuf_end: + + kbuf->valid = true; + + mutex_unlock(&fh->mutex); + + dev_dbg(&psys->adev->dev, "IOC_MAPBUF: mapped fd %d\n", fd); + + return 0; + +error_unmap: + dma_buf_unmap_attachment(kbuf->db_attach, kbuf->sgt, DMA_BIDIRECTIONAL); +error_detach: + dma_buf_detach(kbuf->dbuf, kbuf->db_attach); + kbuf->db_attach = NULL; +error_put: + list_del(&kbuf->list); + dbuf = kbuf->dbuf; + + if (!kbuf->userptr) + kfree(kbuf); + + mutex_unlock(&fh->mutex); + dma_buf_put(dbuf); + + return ret; +} + +long ipu_psys_unmapbuf(int fd, struct ipu_psys_fh *fh) +{ + struct ipu_psys_kbuffer *kbuf; + struct ipu_psys *psys = fh->psys; + struct dma_buf *dmabuf; + + mutex_lock(&fh->mutex); + kbuf = ipu_psys_lookup_kbuffer(fh, fd); + if (!kbuf) { + dev_dbg(&psys->adev->dev, "buffer %d not found\n", fd); + mutex_unlock(&fh->mutex); + return -EINVAL; + } + + /* From now on it is not safe to use this kbuffer */ + kbuf->valid = false; + + dma_buf_vunmap(kbuf->dbuf, kbuf->kaddr); + dma_buf_unmap_attachment(kbuf->db_attach, kbuf->sgt, DMA_BIDIRECTIONAL); + + dma_buf_detach(kbuf->dbuf, kbuf->db_attach); + + dmabuf = kbuf->dbuf; + + kbuf->db_attach = NULL; + kbuf->dbuf = NULL; + + list_del(&kbuf->list); + + if (!kbuf->userptr) + kfree(kbuf); + + mutex_unlock(&fh->mutex); + dma_buf_put(dmabuf); + + dev_dbg(&psys->adev->dev, "IOC_UNMAPBUF: fd %d\n", fd); + + return 0; +} + +static unsigned int ipu_psys_poll(struct file *file, + struct poll_table_struct *wait) +{ + struct ipu_psys_fh *fh = file->private_data; + struct ipu_psys *psys = fh->psys; + unsigned int res = 0; + + dev_dbg(&psys->adev->dev, "ipu psys poll\n"); + + poll_wait(file, &fh->wait, wait); + + if (ipu_get_completed_kcmd(fh)) + res = POLLIN; + + dev_dbg(&psys->adev->dev, "ipu psys poll res %u\n", res); + + return res; +} + +static long ipu_get_manifest(struct ipu_psys_manifest *manifest, + struct ipu_psys_fh *fh) +{ + struct ipu_psys *psys = fh->psys; + struct ipu_device *isp = psys->adev->isp; + struct ipu_cpd_client_pkg_hdr *client_pkg; + u32 entries; + void *host_fw_data; + dma_addr_t dma_fw_data; + u32 client_pkg_offset; + + host_fw_data = (void *)isp->cpd_fw->data; + dma_fw_data = sg_dma_address(psys->fw_sgt.sgl); + + entries = ipu_cpd_pkg_dir_get_num_entries(psys->pkg_dir); + if (!manifest || manifest->index > entries - 1) { + dev_err(&psys->adev->dev, "invalid argument\n"); + return -EINVAL; + } + + if (!ipu_cpd_pkg_dir_get_size(psys->pkg_dir, manifest->index) || + ipu_cpd_pkg_dir_get_type(psys->pkg_dir, manifest->index) < + IPU_CPD_PKG_DIR_CLIENT_PG_TYPE) { + dev_dbg(&psys->adev->dev, "invalid pkg dir entry\n"); + return -ENOENT; + } + + client_pkg_offset = ipu_cpd_pkg_dir_get_address(psys->pkg_dir, + manifest->index); + client_pkg_offset -= dma_fw_data; + + client_pkg = host_fw_data + client_pkg_offset; + manifest->size = client_pkg->pg_manifest_size; + + if (!manifest->manifest) + return 0; + + if (copy_to_user(manifest->manifest, + (uint8_t *) client_pkg + client_pkg->pg_manifest_offs, + manifest->size)) { + return -EFAULT; + } + + return 0; +} + +static long ipu_psys_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + union { + struct ipu_psys_buffer buf; + struct ipu_psys_command cmd; + struct ipu_psys_event ev; + struct ipu_psys_capability caps; + struct ipu_psys_manifest m; + } karg; + struct ipu_psys_fh *fh = file->private_data; + int err = 0; + void __user *up = (void __user *)arg; + bool copy = (cmd != IPU_IOC_MAPBUF && cmd != IPU_IOC_UNMAPBUF); + + if (copy) { + if (_IOC_SIZE(cmd) > sizeof(karg)) + return -ENOTTY; + + if (_IOC_DIR(cmd) & _IOC_WRITE) { + err = copy_from_user(&karg, up, _IOC_SIZE(cmd)); + if (err) + return -EFAULT; + } + } + + switch (cmd) { + case IPU_IOC_MAPBUF: + err = ipu_psys_mapbuf(arg, fh); + break; + case IPU_IOC_UNMAPBUF: + err = ipu_psys_unmapbuf(arg, fh); + break; + case IPU_IOC_QUERYCAP: + karg.caps = fh->psys->caps; + break; + case IPU_IOC_GETBUF: + err = ipu_psys_getbuf(&karg.buf, fh); + break; + case IPU_IOC_PUTBUF: + err = ipu_psys_putbuf(&karg.buf, fh); + break; + case IPU_IOC_QCMD: + err = ipu_psys_kcmd_new(&karg.cmd, fh); + break; + case IPU_IOC_DQEVENT: + err = ipu_ioctl_dqevent(&karg.ev, fh, file->f_flags); + break; + case IPU_IOC_GET_MANIFEST: + err = ipu_get_manifest(&karg.m, fh); + break; + default: + err = -ENOTTY; + break; + } + + if (err) + return err; + + if (copy && _IOC_DIR(cmd) & _IOC_READ) + if (copy_to_user(up, &karg, _IOC_SIZE(cmd))) + return -EFAULT; + + return 0; +} + +static const struct file_operations ipu_psys_fops = { + .open = ipu_psys_open, + .release = ipu_psys_release, + .unlocked_ioctl = ipu_psys_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = ipu_psys_compat_ioctl32, +#endif + .poll = ipu_psys_poll, + .owner = THIS_MODULE, +}; + +static void ipu_psys_dev_release(struct device *dev) +{ +} + +#ifdef CONFIG_PM +static int psys_runtime_pm_resume(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ipu_psys *psys = ipu_bus_get_drvdata(adev); + unsigned long flags; + int retval; + + if (!psys) { + WARN(1, "%s called before probing. skipping.\n", __func__); + return 0; + } + /* + * In runtime autosuspend mode, if the psys is in power on state, no + * need to resume again. + */ + spin_lock_irqsave(&psys->power_lock, flags); + if (psys->power) { + spin_unlock_irqrestore(&psys->power_lock, flags); + return 0; + } + spin_unlock_irqrestore(&psys->power_lock, flags); + + if (async_fw_init && !psys->fwcom) { + dev_err(dev, + "%s: asynchronous firmware init not finished, skipping\n", + __func__); + return 0; + } + + if (!ipu_buttress_auth_done(adev->isp)) { + dev_err(dev, "%s: not yet authenticated, skipping\n", __func__); + return 0; + } + + ipu_psys_setup_hw(psys); + + ipu_trace_restore(&psys->adev->dev); + + ipu_configure_spc(adev->isp, + &psys->pdata->ipdata->hw_variant, + IPU_CPD_PKG_DIR_PSYS_SERVER_IDX, + psys->pdata->base, psys->pkg_dir, + psys->pkg_dir_dma_addr); + + retval = ipu_fw_psys_open(psys); + if (retval) { + dev_err(&psys->adev->dev, "Failed to open abi.\n"); + return retval; + } + + spin_lock_irqsave(&psys->power_lock, flags); + psys->power = 1; + spin_unlock_irqrestore(&psys->power_lock, flags); + + return 0; +} + +static int psys_runtime_pm_suspend(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ipu_psys *psys = ipu_bus_get_drvdata(adev); + unsigned long flags; + int rval; + + if (!psys) { + WARN(1, "%s called before probing. skipping.\n", __func__); + return 0; + } + + if (!psys->power) + return 0; + + spin_lock_irqsave(&psys->power_lock, flags); + psys->power = 0; + spin_unlock_irqrestore(&psys->power_lock, flags); + + /* + * We can trace failure but better to not return an error. + * At suspend we are progressing towards psys power gated state. + * Any hang / failure inside psys will be forgotten soon. + */ + rval = ipu_fw_psys_close(psys); + if (rval) + dev_err(dev, "Device close failure: %d\n", rval); + + return 0; +} + +static const struct dev_pm_ops psys_pm_ops = { + .runtime_suspend = psys_runtime_pm_suspend, + .runtime_resume = psys_runtime_pm_resume, +}; + +#define PSYS_PM_OPS (&psys_pm_ops) +#else +#define PSYS_PM_OPS NULL +#endif + +static int cpd_fw_reload(struct ipu_device *isp) +{ + struct ipu_psys *psys = ipu_bus_get_drvdata(isp->psys); + int rval; + + if (!isp->secure_mode) { + dev_warn(&isp->pdev->dev, + "CPD firmware reload was only supported for secure mode.\n"); + return -EINVAL; + } + + if (isp->cpd_fw) { + ipu_cpd_free_pkg_dir(isp->psys, psys->pkg_dir, + psys->pkg_dir_dma_addr, + psys->pkg_dir_size); + + ipu_buttress_unmap_fw_image(isp->psys, &psys->fw_sgt); + release_firmware(isp->cpd_fw); + isp->cpd_fw = NULL; + dev_info(&isp->pdev->dev, "Old FW removed\n"); + } + + rval = request_cpd_fw(&isp->cpd_fw, isp->cpd_fw_name, + &isp->pdev->dev); + if (rval) { + dev_err(&isp->pdev->dev, "Requesting firmware(%s) failed\n", + IPU_CPD_FIRMWARE_NAME); + return rval; + } + + rval = ipu_cpd_validate_cpd_file(isp, isp->cpd_fw->data, + isp->cpd_fw->size); + if (rval) { + dev_err(&isp->pdev->dev, "Failed to validate cpd file\n"); + goto out_release_firmware; + } + + rval = ipu_buttress_map_fw_image(isp->psys, isp->cpd_fw, &psys->fw_sgt); + if (rval) + goto out_release_firmware; + + psys->pkg_dir = ipu_cpd_create_pkg_dir(isp->psys, + isp->cpd_fw->data, + sg_dma_address(psys->fw_sgt.sgl), + &psys->pkg_dir_dma_addr, + &psys->pkg_dir_size); + + if (!psys->pkg_dir) { + rval = -EINVAL; + goto out_unmap_fw_image; + } + + isp->pkg_dir = psys->pkg_dir; + isp->pkg_dir_dma_addr = psys->pkg_dir_dma_addr; + isp->pkg_dir_size = psys->pkg_dir_size; + + if (!isp->secure_mode) + return 0; + + rval = ipu_fw_authenticate(isp, 1); + if (rval) + goto out_free_pkg_dir; + + return 0; + +out_free_pkg_dir: + ipu_cpd_free_pkg_dir(isp->psys, psys->pkg_dir, + psys->pkg_dir_dma_addr, psys->pkg_dir_size); +out_unmap_fw_image: + ipu_buttress_unmap_fw_image(isp->psys, &psys->fw_sgt); +out_release_firmware: + release_firmware(isp->cpd_fw); + isp->cpd_fw = NULL; + + return rval; +} + +static int ipu_psys_icache_prefetch_sp_get(void *data, u64 *val) +{ + struct ipu_psys *psys = data; + + *val = psys->icache_prefetch_sp; + return 0; +} + +static int ipu_psys_icache_prefetch_sp_set(void *data, u64 val) +{ + struct ipu_psys *psys = data; + + if (val != !!val) + return -EINVAL; + + psys->icache_prefetch_sp = val; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(psys_icache_prefetch_sp_fops, + ipu_psys_icache_prefetch_sp_get, + ipu_psys_icache_prefetch_sp_set, "%llu\n"); + +static int ipu_psys_icache_prefetch_isp_get(void *data, u64 *val) +{ + struct ipu_psys *psys = data; + + *val = psys->icache_prefetch_isp; + return 0; +} + +static int ipu_psys_icache_prefetch_isp_set(void *data, u64 val) +{ + struct ipu_psys *psys = data; + + if (val != !!val) + return -EINVAL; + + psys->icache_prefetch_isp = val; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(psys_icache_prefetch_isp_fops, + ipu_psys_icache_prefetch_isp_get, + ipu_psys_icache_prefetch_isp_set, "%llu\n"); + +static int ipu_psys_init_debugfs(struct ipu_psys *psys) +{ + struct dentry *file; + struct dentry *dir; + + dir = debugfs_create_dir("psys", psys->adev->isp->ipu_dir); + if (IS_ERR(dir)) + return -ENOMEM; + + file = debugfs_create_file("icache_prefetch_sp", 0600, + dir, psys, &psys_icache_prefetch_sp_fops); + if (IS_ERR(file)) + goto err; + + file = debugfs_create_file("icache_prefetch_isp", 0600, + dir, psys, &psys_icache_prefetch_isp_fops); + if (IS_ERR(file)) + goto err; + + psys->debugfsdir = dir; + + + return 0; +err: + debugfs_remove_recursive(dir); + return -ENOMEM; +} + +static int ipu_psys_sched_cmd(void *ptr) +{ + struct ipu_psys *psys = ptr; + size_t pending = 0; + + while (1) { + wait_event_interruptible(psys->sched_cmd_wq, + (kthread_should_stop() || (pending = + atomic_read(&psys->wakeup_sched_thread_count)))); + + if (kthread_should_stop()) + break; + + if (pending == 0) + continue; + + mutex_lock(&psys->mutex); + atomic_set(&psys->wakeup_sched_thread_count, 0); + ipu_psys_run_next(psys); + mutex_unlock(&psys->mutex); + } + + return 0; +} + +static void start_sp(struct ipu_bus_device *adev) +{ + struct ipu_psys *psys = ipu_bus_get_drvdata(adev); + void __iomem *spc_regs_base = psys->pdata->base + + psys->pdata->ipdata->hw_variant.spc_offset; + u32 val = 0; + + val |= IPU_ISYS_SPC_STATUS_START | + IPU_ISYS_SPC_STATUS_RUN | + IPU_ISYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE; + val |= psys->icache_prefetch_sp ? + IPU_ISYS_SPC_STATUS_ICACHE_PREFETCH : 0; + writel(val, spc_regs_base + IPU_ISYS_REG_SPC_STATUS_CTRL); +} + +static int query_sp(struct ipu_bus_device *adev) +{ + struct ipu_psys *psys = ipu_bus_get_drvdata(adev); + void __iomem *spc_regs_base = psys->pdata->base + + psys->pdata->ipdata->hw_variant.spc_offset; + u32 val = readl(spc_regs_base + IPU_ISYS_REG_SPC_STATUS_CTRL); + + /* return true when READY == 1, START == 0 */ + val &= IPU_ISYS_SPC_STATUS_READY | IPU_ISYS_SPC_STATUS_START; + + return val == IPU_ISYS_SPC_STATUS_READY; +} + +static int ipu_psys_fw_init(struct ipu_psys *psys) +{ + struct ipu_fw_syscom_queue_config + fw_psys_cmd_queue_cfg[IPU_FW_PSYS_N_PSYS_CMD_QUEUE_ID]; + struct ipu_fw_syscom_queue_config fw_psys_event_queue_cfg[] = { + { + IPU_FW_PSYS_EVENT_QUEUE_SIZE, + sizeof(struct ipu_fw_psys_event) + } + }; + struct ipu_fw_psys_srv_init server_init = { + .ddr_pkg_dir_address = 0, + .host_ddr_pkg_dir = NULL, + .pkg_dir_size = 0, + .icache_prefetch_sp = psys->icache_prefetch_sp, + .icache_prefetch_isp = psys->icache_prefetch_isp, + }; + struct ipu_fw_com_cfg fwcom = { + .num_input_queues = IPU_FW_PSYS_N_PSYS_CMD_QUEUE_ID, + .num_output_queues = IPU_FW_PSYS_N_PSYS_EVENT_QUEUE_ID, + .output = fw_psys_event_queue_cfg, + .specific_addr = &server_init, + .specific_size = sizeof(server_init), + .cell_start = start_sp, + .cell_ready = query_sp, + }; + int rval, i; + + for (i = 0; i < IPU_FW_PSYS_N_PSYS_CMD_QUEUE_ID; i++) { + fw_psys_cmd_queue_cfg[i].queue_size = + IPU_FW_PSYS_CMD_QUEUE_SIZE; + fw_psys_cmd_queue_cfg[i].token_size = + sizeof(struct ipu_fw_psys_cmd); + } + + fwcom.input = fw_psys_cmd_queue_cfg; + + fwcom.dmem_addr = psys->pdata->ipdata->hw_variant.dmem_offset; + + rval = ipu_buttress_authenticate(psys->adev->isp); + if (rval) { + dev_err(&psys->adev->dev, "FW authentication failed(%d)\n", + rval); + return rval; + } + + psys->fwcom = ipu_fw_com_prepare(&fwcom, psys->adev, psys->pdata->base); + if (!psys->fwcom) { + dev_err(&psys->adev->dev, "psys fw com prepare failed\n"); + return -EIO; + } + + return 0; +} + +static void run_fw_init_work(struct work_struct *work) +{ + struct fw_init_task *task = (struct fw_init_task *)work; + struct ipu_psys *psys = task->psys; + int rval; + + rval = ipu_psys_fw_init(psys); + + if (rval) { + dev_err(&psys->adev->dev, "FW init failed(%d)\n", rval); + ipu_psys_remove(psys->adev); + } else { + dev_info(&psys->adev->dev, "FW init done\n"); + } +} + +static int ipu_psys_probe(struct ipu_bus_device *adev) +{ + struct ipu_mmu *mmu = dev_get_drvdata(adev->iommu); + struct ipu_device *isp = adev->isp; + struct ipu_psys_pg *kpg, *kpg0; + struct ipu_psys *psys; + const struct firmware *fw; + unsigned int minor; + int i, rval = -E2BIG; + + trace_printk("B|%d|TMWK\n", current->pid); + + /* Has the domain been attached? */ + if (!mmu) { + trace_printk("E|TMWK\n"); + return -EPROBE_DEFER; + } + + mutex_lock(&ipu_psys_mutex); + + minor = find_next_zero_bit(ipu_psys_devices, IPU_PSYS_NUM_DEVICES, 0); + if (minor == IPU_PSYS_NUM_DEVICES) { + dev_err(&adev->dev, "too many devices\n"); + goto out_unlock; + } + + psys = devm_kzalloc(&adev->dev, sizeof(*psys), GFP_KERNEL); + if (!psys) { + rval = -ENOMEM; + goto out_unlock; + } + + psys->adev = adev; + psys->pdata = adev->pdata; +#ifdef CONFIG_VIDEO_INTEL_IPU4 + psys->icache_prefetch_sp = is_ipu_hw_bxtp_e0(isp); +#else + psys->icache_prefetch_sp = 0; +#endif + + ipu_trace_init(adev->isp, psys->pdata->base, &adev->dev, + psys_trace_blocks); + + cdev_init(&psys->cdev, &ipu_psys_fops); + psys->cdev.owner = ipu_psys_fops.owner; + + rval = cdev_add(&psys->cdev, MKDEV(MAJOR(ipu_psys_dev_t), minor), 1); + if (rval) { + dev_err(&adev->dev, "cdev_add failed (%d)\n", rval); + goto out_unlock; + } + + set_bit(minor, ipu_psys_devices); + + spin_lock_init(&psys->power_lock); + spin_lock_init(&psys->pgs_lock); + psys->power = 0; + psys->timeout = IPU_PSYS_CMD_TIMEOUT_MS; + + mutex_init(&psys->mutex); + INIT_LIST_HEAD(&psys->fhs); + INIT_LIST_HEAD(&psys->pgs); + INIT_LIST_HEAD(&psys->started_kcmds_list); + INIT_WORK(&psys->watchdog_work, ipu_psys_watchdog_work); + + init_waitqueue_head(&psys->sched_cmd_wq); + atomic_set(&psys->wakeup_sched_thread_count, 0); + /* + * Create a thread to schedule commands sent to IPU firmware. + * The thread reduces the coupling between the command scheduler + * and queueing commands from the user to driver. + */ + psys->sched_cmd_thread = kthread_run(ipu_psys_sched_cmd, psys, + "psys_sched_cmd"); + + if (IS_ERR(psys->sched_cmd_thread)) { + psys->sched_cmd_thread = NULL; + mutex_destroy(&psys->mutex); + goto out_unlock; + } + + ipu_bus_set_drvdata(adev, psys); + + rval = ipu_psys_resource_pool_init(&psys->resource_pool_started); + if (rval < 0) { + dev_err(&psys->dev, + "unable to alloc process group resources\n"); + goto out_mutex_destroy; + } + + rval = ipu_psys_resource_pool_init(&psys->resource_pool_running); + if (rval < 0) { + dev_err(&psys->dev, + "unable to alloc process group resources\n"); + goto out_resources_started_free; + } + + fw = adev->isp->cpd_fw; + + rval = ipu_buttress_map_fw_image(adev, fw, &psys->fw_sgt); + if (rval) + goto out_resources_running_free; + + psys->pkg_dir = ipu_cpd_create_pkg_dir(adev, fw->data, + sg_dma_address(psys->fw_sgt.sgl), + &psys->pkg_dir_dma_addr, + &psys->pkg_dir_size); + if (!psys->pkg_dir) { + rval = -ENOMEM; + goto out_unmap_fw_image; + } + + /* allocate and map memory for process groups */ + for (i = 0; i < IPU_PSYS_PG_POOL_SIZE; i++) { + kpg = kzalloc(sizeof(*kpg), GFP_KERNEL); + if (!kpg) + goto out_free_pgs; + kpg->pg = dma_alloc_attrs(&adev->dev, + IPU_PSYS_PG_MAX_SIZE, + &kpg->pg_dma_addr, + GFP_KERNEL, DMA_ATTR_NON_CONSISTENT); + if (!kpg->pg) { + kfree(kpg); + goto out_free_pgs; + } + kpg->size = IPU_PSYS_PG_MAX_SIZE; + list_add(&kpg->list, &psys->pgs); + } + + isp->pkg_dir = psys->pkg_dir; + isp->pkg_dir_dma_addr = psys->pkg_dir_dma_addr; + isp->pkg_dir_size = psys->pkg_dir_size; + + psys->caps.pg_count = ipu_cpd_pkg_dir_get_num_entries(psys->pkg_dir); + + dev_info(&adev->dev, "pkg_dir entry count:%d\n", psys->caps.pg_count); + if (async_fw_init) { + INIT_DELAYED_WORK((struct delayed_work *)&fw_init_task, + run_fw_init_work); + fw_init_task.psys = psys; + schedule_delayed_work((struct delayed_work *)&fw_init_task, 0); + } else { + rval = ipu_psys_fw_init(psys); + } + + if (rval) { + dev_err(&adev->dev, "FW init failed(%d)\n", rval); + goto out_free_pgs; + } + + psys->dev.parent = &adev->dev; + psys->dev.bus = &ipu_psys_bus; + psys->dev.devt = MKDEV(MAJOR(ipu_psys_dev_t), minor); + psys->dev.release = ipu_psys_dev_release; + dev_set_name(&psys->dev, "ipu-psys%d", minor); + rval = device_register(&psys->dev); + if (rval < 0) { + dev_err(&psys->dev, "psys device_register failed\n"); + goto out_release_fw_com; + } + + /* Add the hw stepping information to caps */ + strlcpy(psys->caps.dev_model, IPU_MEDIA_DEV_MODEL_NAME, + sizeof(psys->caps.dev_model)); + + pm_runtime_allow(&adev->dev); + pm_runtime_enable(&adev->dev); + + pm_runtime_set_autosuspend_delay(&psys->adev->dev, + IPU_PSYS_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(&psys->adev->dev); + pm_runtime_mark_last_busy(&psys->adev->dev); + + mutex_unlock(&ipu_psys_mutex); + + /* Debug fs failure is not fatal. */ + ipu_psys_init_debugfs(psys); + + adev->isp->cpd_fw_reload = &cpd_fw_reload; + + dev_info(&adev->dev, "psys probe minor: %d\n", minor); + + trace_printk("E|TMWK\n"); + return 0; + +out_release_fw_com: + ipu_fw_com_release(psys->fwcom, 1); +out_free_pgs: + list_for_each_entry_safe(kpg, kpg0, &psys->pgs, list) { + dma_free_attrs(&adev->dev, kpg->size, kpg->pg, + kpg->pg_dma_addr, DMA_ATTR_NON_CONSISTENT); + kfree(kpg); + } + + if (!isp->secure_mode) + ipu_cpd_free_pkg_dir(adev, psys->pkg_dir, + psys->pkg_dir_dma_addr, + psys->pkg_dir_size); +out_unmap_fw_image: + ipu_buttress_unmap_fw_image(adev, &psys->fw_sgt); +out_resources_running_free: + ipu_psys_resource_pool_cleanup(&psys->resource_pool_running); +out_resources_started_free: + ipu_psys_resource_pool_cleanup(&psys->resource_pool_started); +out_mutex_destroy: + mutex_destroy(&psys->mutex); + cdev_del(&psys->cdev); + if (psys->sched_cmd_thread) { + kthread_stop(psys->sched_cmd_thread); + psys->sched_cmd_thread = NULL; + } +out_unlock: + /* Safe to call even if the init is not called */ + ipu_trace_uninit(&adev->dev); + mutex_unlock(&ipu_psys_mutex); + + trace_printk("E|TMWK\n"); + return rval; +} + +static void ipu_psys_remove(struct ipu_bus_device *adev) +{ + struct ipu_device *isp = adev->isp; + struct ipu_psys *psys = ipu_bus_get_drvdata(adev); + struct ipu_psys_pg *kpg, *kpg0; + + if (isp->ipu_dir) + debugfs_remove_recursive(psys->debugfsdir); + + flush_workqueue(IPU_PSYS_WORK_QUEUE); + + if (psys->sched_cmd_thread) { + kthread_stop(psys->sched_cmd_thread); + psys->sched_cmd_thread = NULL; + } + + pm_runtime_dont_use_autosuspend(&psys->adev->dev); + + mutex_lock(&ipu_psys_mutex); + + list_for_each_entry_safe(kpg, kpg0, &psys->pgs, list) { + dma_free_attrs(&adev->dev, kpg->size, kpg->pg, + kpg->pg_dma_addr, DMA_ATTR_NON_CONSISTENT); + kfree(kpg); + } + + if (psys->fwcom && ipu_fw_com_release(psys->fwcom, 1)) + dev_err(&adev->dev, "fw com release failed.\n"); + + isp->pkg_dir = NULL; + isp->pkg_dir_dma_addr = 0; + isp->pkg_dir_size = 0; + + ipu_cpd_free_pkg_dir(adev, psys->pkg_dir, + psys->pkg_dir_dma_addr, psys->pkg_dir_size); + + ipu_buttress_unmap_fw_image(adev, &psys->fw_sgt); + + kfree(psys->server_init); + kfree(psys->syscom_config); + + ipu_trace_uninit(&adev->dev); + + ipu_psys_resource_pool_cleanup(&psys->resource_pool_started); + ipu_psys_resource_pool_cleanup(&psys->resource_pool_running); + + device_unregister(&psys->dev); + + clear_bit(MINOR(psys->cdev.dev), ipu_psys_devices); + cdev_del(&psys->cdev); + + mutex_unlock(&ipu_psys_mutex); + + mutex_destroy(&psys->mutex); + + dev_info(&adev->dev, "removed\n"); +} + +static irqreturn_t psys_isr_threaded(struct ipu_bus_device *adev) +{ + struct ipu_psys *psys = ipu_bus_get_drvdata(adev); + void __iomem *base = psys->pdata->base; + u32 status; + int r; + + mutex_lock(&psys->mutex); +#ifdef CONFIG_PM + if (!READ_ONCE(psys->power)) { + mutex_unlock(&psys->mutex); + return IRQ_NONE; + } + + r = pm_runtime_get_sync(&psys->adev->dev); + if (r < 0) { + pm_runtime_put(&psys->adev->dev); + mutex_unlock(&psys->mutex); + return IRQ_NONE; + } +#endif + + status = readl(base + IPU_REG_PSYS_GPDEV_IRQ_STATUS); + writel(status, base + IPU_REG_PSYS_GPDEV_IRQ_CLEAR); + + if (status & IPU_PSYS_GPDEV_IRQ_FWIRQ(IPU_PSYS_GPDEV_FWIRQ0)) { + writel(0, base + IPU_REG_PSYS_GPDEV_FWIRQ(0)); + ipu_psys_handle_events(psys); + } + + pm_runtime_mark_last_busy(&psys->adev->dev); + pm_runtime_put_autosuspend(&psys->adev->dev); + mutex_unlock(&psys->mutex); + + return status ? IRQ_HANDLED : IRQ_NONE; +} + + +static struct ipu_bus_driver ipu_psys_driver = { + .probe = ipu_psys_probe, + .remove = ipu_psys_remove, + .isr_threaded = psys_isr_threaded, + .wanted = IPU_PSYS_NAME, + .drv = { + .name = IPU_PSYS_NAME, + .owner = THIS_MODULE, + .pm = PSYS_PM_OPS, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; + +static int __init ipu_psys_init(void) +{ + int rval = alloc_chrdev_region(&ipu_psys_dev_t, 0, + IPU_PSYS_NUM_DEVICES, IPU_PSYS_NAME); + if (rval) { + pr_err("can't alloc psys chrdev region (%d)\n", rval); + return rval; + } + + rval = bus_register(&ipu_psys_bus); + if (rval) { + pr_warn("can't register psys bus (%d)\n", rval); + goto out_bus_register; + } + + ipu_bus_register_driver(&ipu_psys_driver); + + return rval; + +out_bus_register: + unregister_chrdev_region(ipu_psys_dev_t, IPU_PSYS_NUM_DEVICES); + + return rval; +} + +static void __exit ipu_psys_exit(void) +{ + ipu_bus_unregister_driver(&ipu_psys_driver); + bus_unregister(&ipu_psys_bus); + unregister_chrdev_region(ipu_psys_dev_t, IPU_PSYS_NUM_DEVICES); +} + +static const struct pci_device_id ipu_pci_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU_PCI_ID)}, + {0,} +}; +MODULE_DEVICE_TABLE(pci, ipu_pci_tbl); + +module_init(ipu_psys_init); +module_exit(ipu_psys_exit); + +MODULE_AUTHOR("Antti Laakso "); +MODULE_AUTHOR("Bin Han "); +MODULE_AUTHOR("Renwei Wu "); +MODULE_AUTHOR("Jianxu Zheng "); +MODULE_AUTHOR("Xia Wu "); +MODULE_AUTHOR("Bingbu Cao "); +MODULE_AUTHOR("Zaikuo Wang "); +MODULE_AUTHOR("Yunliang Ding "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel ipu processing system driver"); diff --git a/drivers/media/pci/intel/ipu-psys.h b/drivers/media/pci/intel/ipu-psys.h new file mode 100644 index 0000000000000..09780c811db47 --- /dev/null +++ b/drivers/media/pci/intel/ipu-psys.h @@ -0,0 +1,210 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_PSYS_H +#define IPU_PSYS_H + +#include +#include + +#include "ipu.h" +#include "ipu-pdata.h" +#include "ipu-fw-psys.h" +#if defined(CONFIG_VIDEO_INTEL_IPU_ACRN) && defined(CONFIG_VIDEO_INTEL_IPU_VIRTIO_BE) +#include "ipu-psys-virt.h" +#endif +#include "ipu-platform-psys.h" + +#define IPU_PSYS_PG_POOL_SIZE 16 +#define IPU_PSYS_PG_MAX_SIZE 2048 +#define IPU_MAX_PSYS_CMD_BUFFERS 32 +#define IPU_PSYS_EVENT_CMD_COMPLETE IPU_FW_PSYS_EVENT_TYPE_SUCCESS +#define IPU_PSYS_EVENT_FRAGMENT_COMPLETE IPU_FW_PSYS_EVENT_TYPE_SUCCESS +#define IPU_PSYS_CLOSE_TIMEOUT_US 50 +#define IPU_PSYS_CLOSE_TIMEOUT (100000 / IPU_PSYS_CLOSE_TIMEOUT_US) +#define IPU_PSYS_WORK_QUEUE system_power_efficient_wq +#define IPU_MAX_RESOURCES 128 + +/* Opaque structure. Do not access fields. */ +struct ipu_resource { + u32 id; + int elements; /* Number of elements available to allocation */ + unsigned long *bitmap; /* Allocation bitmap, a bit for each element */ +}; + +enum ipu_resource_type { + IPU_RESOURCE_DEV_CHN = 0, + IPU_RESOURCE_EXT_MEM, + IPU_RESOURCE_DFM +}; + +/* Allocation of resource(s) */ +/* Opaque structure. Do not access fields. */ +struct ipu_resource_alloc { + enum ipu_resource_type type; + struct ipu_resource *resource; + int elements; + int pos; +}; + +/* + * This struct represents all of the currently allocated + * resources from IPU model. It is used also for allocating + * resources for the next set of PGs to be run on IPU + * (ie. those PGs which are not yet being run and which don't + * yet reserve real IPU resources). + */ +#define IPU_PSYS_RESOURCE_OVERALLOC 2 /* Some room for ABI / ext lib delta */ +struct ipu_psys_resource_pool { + u32 cells; /* Bitmask of cells allocated */ + struct ipu_resource dev_channels[IPU_FW_PSYS_N_DEV_CHN_ID + + IPU_PSYS_RESOURCE_OVERALLOC]; + struct ipu_resource ext_memory[IPU_FW_PSYS_N_MEM_ID + + IPU_PSYS_RESOURCE_OVERALLOC]; + struct ipu_resource dfms[IPU_FW_PSYS_N_DEV_DFM_ID + + IPU_PSYS_RESOURCE_OVERALLOC]; +}; + +/* + * This struct keeps book of the resources allocated for a specific PG. + * It is used for freeing up resources from struct ipu_psys_resources + * when the PG is released from IPU4 (or model of IPU4). + */ +struct ipu_psys_resource_alloc { + u32 cells; /* Bitmask of cells needed */ + struct ipu_resource_alloc + resource_alloc[IPU_MAX_RESOURCES]; + int resources; +}; + +struct task_struct; +struct ipu_psys { + struct ipu_psys_capability caps; + struct cdev cdev; + struct device dev; + + struct mutex mutex; /* Psys various */ + int power; + bool icache_prefetch_sp; + bool icache_prefetch_isp; + spinlock_t power_lock; /* Serialize access to power */ + spinlock_t pgs_lock; /* Protect pgs list access */ + struct list_head fhs; + struct list_head pgs; + struct list_head started_kcmds_list; + struct ipu_psys_pdata *pdata; + struct ipu_bus_device *adev; + struct ia_css_syscom_context *dev_ctx; + struct ia_css_syscom_config *syscom_config; + struct ia_css_psys_server_init *server_init; + struct task_struct *sched_cmd_thread; + struct work_struct watchdog_work; + wait_queue_head_t sched_cmd_wq; + atomic_t wakeup_sched_thread_count; + struct dentry *debugfsdir; + + /* Resources needed to be managed for process groups */ + struct ipu_psys_resource_pool resource_pool_running; + struct ipu_psys_resource_pool resource_pool_started; + + const struct firmware *fw; + struct sg_table fw_sgt; + u64 *pkg_dir; + dma_addr_t pkg_dir_dma_addr; + unsigned int pkg_dir_size; + unsigned long timeout; + + int active_kcmds, started_kcmds; + void *fwcom; +}; + +struct ipu_psys_fh { +#if defined(CONFIG_VIDEO_INTEL_IPU_ACRN) && defined(CONFIG_VIDEO_INTEL_IPU_VIRTIO_BE) + const struct psys_fops_virt *vfops; +#endif + struct ipu_psys *psys; + struct mutex mutex; /* Protects bufmap & kcmds fields */ + struct list_head list; + struct list_head bufmap; + wait_queue_head_t wait; + struct ipu_psys_scheduler sched; +}; + +struct ipu_psys_pg { + struct ipu_fw_psys_process_group *pg; + size_t size; + size_t pg_size; + dma_addr_t pg_dma_addr; + struct list_head list; + struct ipu_psys_resource_alloc resource_alloc; +}; + +struct ipu_psys_kcmd { + struct ipu_psys_fh *fh; + struct list_head list; + struct list_head started_list; + enum ipu_psys_cmd_state state; + void *pg_manifest; + size_t pg_manifest_size; + struct ipu_psys_kbuffer **kbufs; + struct ipu_psys_buffer *buffers; + size_t nbuffers; + struct ipu_fw_psys_process_group *pg_user; + struct ipu_psys_pg *kpg; + u64 user_token; + u64 issue_id; + u32 priority; + struct ipu_buttress_constraint constraint; + struct ipu_psys_event ev; + struct timer_list watchdog; +}; + +struct ipu_dma_buf_attach { + struct device *dev; + u64 len; + void *userptr; + struct sg_table *sgt; + bool vma_is_io; + struct page **pages; + size_t npages; +}; + +struct ipu_psys_kbuffer { + u64 len; + void *userptr; + u32 flags; + int fd; + void *kaddr; + struct list_head list; + dma_addr_t dma_addr; + struct sg_table *sgt; + struct dma_buf_attachment *db_attach; + struct dma_buf *dbuf; + bool valid; /* True when buffer is usable */ +}; + +#define inode_to_ipu_psys(inode) \ + container_of((inode)->i_cdev, struct ipu_psys, cdev) + +#ifdef CONFIG_COMPAT +long ipu_psys_compat_ioctl32(struct file *file, unsigned int cmd, + unsigned long arg); +#endif + +void ipu_psys_setup_hw(struct ipu_psys *psys); +void ipu_psys_handle_events(struct ipu_psys *psys); +int ipu_psys_kcmd_new(struct ipu_psys_command *cmd, struct ipu_psys_fh *fh); +void ipu_psys_run_next(struct ipu_psys *psys); +void ipu_psys_watchdog_work(struct work_struct *work); +struct ipu_psys_pg *__get_pg_buf(struct ipu_psys *psys, size_t pg_size); +struct ipu_psys_kbuffer * +ipu_psys_lookup_kbuffer(struct ipu_psys_fh *fh, int fd); +struct ipu_psys_kbuffer * +ipu_psys_lookup_kbuffer_by_kaddr(struct ipu_psys_fh *fh, void *kaddr); +int ipu_psys_resource_pool_init(struct ipu_psys_resource_pool *pool); +void ipu_psys_resource_pool_cleanup(struct ipu_psys_resource_pool *pool); +struct ipu_psys_kcmd *ipu_get_completed_kcmd(struct ipu_psys_fh *fh); +long ipu_ioctl_dqevent(struct ipu_psys_event *event, + struct ipu_psys_fh *fh, unsigned int f_flags); + +#endif /* IPU_PSYS_H */ diff --git a/drivers/media/pci/intel/ipu-trace-event.h b/drivers/media/pci/intel/ipu-trace-event.h new file mode 100644 index 0000000000000..fe61ac27a5804 --- /dev/null +++ b/drivers/media/pci/intel/ipu-trace-event.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2015 - 2018 Intel Corporation */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ipu + +#if !defined(IPU_TRACE_EVENT_H) || defined(TRACE_HEADER_MULTI_READ) +#define IPU_EVENT_H + +#include + +#ifdef IPU_SOF_SEQID_TRACE +TRACE_EVENT(ipu_sof_seqid, + TP_PROTO(unsigned int seqid, unsigned int csiport, + unsigned int csivc), + TP_ARGS(seqid, csiport, csivc), + TP_STRUCT__entry(__field(unsigned int, seqid) + __field(unsigned int, csiport) + __field(unsigned int, csivc) + ), + TP_fast_assign(__entry->seqid = seqid; + __entry->csiport = csiport; + __entry->csivc = csivc;), + TP_printk("seqid<%u>,csiport<%u>,csivc<%u>", __entry->seqid, + __entry->csiport, __entry->csivc) + ); +#endif + +#ifdef IPU_EOF_SEQID_TRACE +TRACE_EVENT(ipu_eof_seqid, + TP_PROTO(unsigned int seqid, unsigned int csiport, + unsigned int csivc), + TP_ARGS(seqid, csiport, csivc), + TP_STRUCT__entry(__field(unsigned int, seqid) + __field(unsigned int, csiport) + __field(unsigned int, csivc) + ), + TP_fast_assign(__entry->seqid = seqid; + __entry->csiport = csiport; + __entry->csivc = csivc;), + TP_printk("seqid<%u>,csiport<%u>,csivc<%u>", __entry->seqid, + __entry->csiport, __entry->csivc) + ); +#endif + +#ifdef IPU_PERF_REG_TRACE +TRACE_EVENT(ipu_perf_reg, + TP_PROTO(unsigned int addr, unsigned int val), + TP_ARGS(addr, val), TP_STRUCT__entry(__field(unsigned int, addr) + __field(unsigned int, val) + ), + TP_fast_assign(__entry->addr = addr; + __entry->val = val;), + TP_printk("addr=%u,val=%u", __entry->addr, __entry->val) + ); +#endif + +#ifdef IPU_PG_KCMD_TRACE +TRACE_EVENT(ipu_pg_kcmd, + TP_PROTO(const char *func, unsigned int id, + unsigned long long issue_id, unsigned int pri, + unsigned int pg_id, unsigned int load_cycles, + unsigned int init_cycles, + unsigned int processing_cycles), + TP_ARGS(func, id, issue_id, pri, pg_id, load_cycles, init_cycles, + processing_cycles), + TP_STRUCT__entry(__field(const char *, func) + __field(unsigned int, id) + __field(unsigned long long, issue_id) + __field(unsigned int, pri) + __field(unsigned int, pg_id) + __field(unsigned int, load_cycles) + __field(unsigned int, init_cycles) + __field(unsigned int, processing_cycles) + ), + TP_fast_assign(__entry->func = func; + __entry->id = id; + __entry->issue_id = issue_id; + __entry->pri = pri; + __entry->pg_id = pg_id; + __entry->load_cycles = load_cycles; + __entry->init_cycles = init_cycles; + __entry->processing_cycles = processing_cycles;), + TP_printk + ("pg-kcmd: func=%s,id=%u,issue_id=0x%llx,pri=%u,pg_id=%d," + "load_cycles=%u,init_cycles=%u," + "processing_cycles=%u", + __entry->func, __entry->id, __entry->issue_id, __entry->pri, + __entry->pg_id, __entry->load_cycles, __entry->init_cycles, + __entry->processing_cycles) + ); + +#endif +#endif + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE ipu-trace-event +/* This part must be outside protection */ +#include diff --git a/drivers/media/pci/intel/ipu-trace.c b/drivers/media/pci/intel/ipu-trace.c new file mode 100644 index 0000000000000..5e0795d786494 --- /dev/null +++ b/drivers/media/pci/intel/ipu-trace.c @@ -0,0 +1,915 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2014 - 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipu.h" +#include "ipu-platform-regs.h" +#include "ipu-trace.h" + +/* Input data processing states */ +enum config_file_parse_states { + STATE_FILL = 0, + STATE_COMMENT, + STATE_COMPLETE, +}; + +struct trace_register_range { + u32 start; + u32 end; +}; + +static u16 trace_unit_template[] = TRACE_REG_CREATE_TUN_REGISTER_LIST; +static u16 trace_monitor_template[] = TRACE_REG_CREATE_TM_REGISTER_LIST; +static u16 trace_gpc_template[] = TRACE_REG_CREATE_GPC_REGISTER_LIST; + +static struct trace_register_range trace_csi2_range_template[] = { + { + .start = TRACE_REG_CSI2_TM_RESET_REG_IDX, + .end = TRACE_REG_CSI2_TM_IRQ_ENABLE_REG_IDn(7) + }, + { + .start = TRACE_REG_END_MARK, + .end = TRACE_REG_END_MARK + } +}; + +static struct trace_register_range trace_csi2_3ph_range_template[] = { + { + .start = TRACE_REG_CSI2_3PH_TM_RESET_REG_IDX, + .end = TRACE_REG_CSI2_3PH_TM_IRQ_ENABLE_REG_IDn(7) + }, + { + .start = TRACE_REG_END_MARK, + .end = TRACE_REG_END_MARK + } +}; + +static struct trace_register_range trace_sig2cio_range_template[] = { + { + .start = TRACE_REG_SIG2CIO_ADDRESS, + .end = (TRACE_REG_SIG2CIO_STATUS + 8 * TRACE_REG_SIG2CIO_SIZE_OF) + }, + { + .start = TRACE_REG_END_MARK, + .end = TRACE_REG_END_MARK + } +}; + +#define LINE_MAX_LEN 128 +#define MEMORY_RING_BUFFER_SIZE (SZ_1M * 10) +#define TRACE_MESSAGE_SIZE 16 +/* + * It looks that the trace unit sometimes writes outside the given buffer. + * To avoid memory corruption one extra page is reserved at the end + * of the buffer. Read also the extra area since it may contain valid data. + */ +#define MEMORY_RING_BUFFER_GUARD PAGE_SIZE +#define MEMORY_RING_BUFFER_OVERREAD MEMORY_RING_BUFFER_GUARD +#define MAX_TRACE_REGISTERS 200 +#define TRACE_CONF_DUMP_BUFFER_SIZE (MAX_TRACE_REGISTERS * 2 * 32) + +#define IPU_TRACE_TIME_RETRY 5 + +struct config_value { + u32 reg; + u32 value; +}; + +struct ipu_trace_buffer { + dma_addr_t dma_handle; + void *memory_buffer; +}; + +struct ipu_subsystem_trace_config { + u32 offset; + void __iomem *base; + struct ipu_trace_buffer memory; /* ring buffer */ + struct device *dev; + struct ipu_trace_block *blocks; + unsigned int fill_level; /* Nbr of regs in config table below */ + bool running; + /* Cached register values */ + struct config_value config[MAX_TRACE_REGISTERS]; +}; + +/* + * State of the input data processing is kept in this structure. + * Only one user is supported at time. + */ +struct buf_state { + char line_buffer[LINE_MAX_LEN]; + enum config_file_parse_states state; + int offset; /* Offset to line_buffer */ +}; + +struct ipu_trace { + struct mutex lock; + bool open; + char *conf_dump_buffer; + int size_conf_dump; + struct buf_state buffer_state; + + struct ipu_subsystem_trace_config isys; + struct ipu_subsystem_trace_config psys; +}; + +int ipu_trace_get_timer(struct device *dev, u64 *timer) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ipu_subsystem_trace_config *sys = adev->trace_cfg; + struct ipu_trace_block *blocks; + void __iomem *addr = NULL; + uint32_t time_hi1, time_hi2, time_lo, retry; + + if (!sys) + return -ENODEV; + /* Find trace unit base address */ + blocks = sys->blocks; + while (blocks->type != IPU_TRACE_BLOCK_END) { + if (blocks->type == IPU_TRACE_BLOCK_TUN) { + addr = sys->base + blocks->offset; + break; + } + blocks++; + } + if (!addr) + return -ENODEV; + + for (retry = 0; retry < IPU_TRACE_TIME_RETRY; retry++) { + time_hi1 = readl(addr + TRACE_REG_TUN_LOCAL_TIMER1); + time_lo = readl(addr + TRACE_REG_TUN_LOCAL_TIMER0); + time_hi2 = readl(addr + TRACE_REG_TUN_LOCAL_TIMER1); + *timer = (((u64) time_hi1) << 32) | time_lo; + if (time_hi1 == time_hi2) + return 0; + } + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(ipu_trace_get_timer); + +static void __ipu_trace_restore(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ipu_device *isp = adev->isp; + struct ipu_trace *trace = isp->trace; + struct config_value *config; + struct ipu_subsystem_trace_config *sys = adev->trace_cfg; + struct ipu_trace_block *blocks; + uint32_t mapped_trace_buffer; + void __iomem *addr = NULL; + int i; + + if (trace->open) { + dev_info(dev, "Trace control file open. Skipping update\n"); + return; + } + + if (!sys) + return; + + /* leave if no trace configuration for this subsystem */ + if (sys->fill_level == 0) + return; + + /* Find trace unit base address */ + blocks = sys->blocks; + while (blocks->type != IPU_TRACE_BLOCK_END) { + if (blocks->type == IPU_TRACE_BLOCK_TUN) { + addr = sys->base + blocks->offset; + break; + } + blocks++; + } + if (!addr) + return; + + if (!sys->memory.memory_buffer) { + sys->memory.memory_buffer = + dma_alloc_attrs(dev, MEMORY_RING_BUFFER_SIZE + + MEMORY_RING_BUFFER_GUARD, + &sys->memory.dma_handle, + GFP_KERNEL, DMA_ATTR_NON_CONSISTENT); + } + + if (!sys->memory.memory_buffer) { + dev_err(dev, "No memory for tracing. Trace unit disabled\n"); + return; + } + + config = sys->config; + mapped_trace_buffer = sys->memory.dma_handle; + + /* ring buffer base */ + writel(mapped_trace_buffer, addr + TRACE_REG_TUN_DRAM_BASE_ADDR); + + /* ring buffer end */ + writel(mapped_trace_buffer + MEMORY_RING_BUFFER_SIZE - + TRACE_MESSAGE_SIZE, addr + TRACE_REG_TUN_DRAM_END_ADDR); + + /* Infobits for ddr trace */ + writel(IPU_INFO_REQUEST_DESTINATION_PRIMARY, + addr + TRACE_REG_TUN_DDR_INFO_VAL); + + /* Find trace timer reset address */ + addr = NULL; + blocks = sys->blocks; + while (blocks->type != IPU_TRACE_BLOCK_END) { + if (blocks->type == IPU_TRACE_TIMER_RST) { + addr = sys->base + blocks->offset; + break; + } + blocks++; + } + if (!addr) { + dev_err(dev, "No trace reset addr\n"); + return; + } + + /* Remove reset from trace timers */ + writel(TRACE_REG_GPREG_TRACE_TIMER_RST_OFF, addr); + + /* Register config received from userspace */ + for (i = 0; i < sys->fill_level; i++) { + dev_dbg(dev, + "Trace restore: reg 0x%08x, value 0x%08x\n", + config[i].reg, config[i].value); + writel(config[i].value, isp->base + config[i].reg); + } + + sys->running = true; +} + +void ipu_trace_restore(struct device *dev) +{ + struct ipu_trace *trace = to_ipu_bus_device(dev)->isp->trace; + + if (!trace) + return; + + mutex_lock(&trace->lock); + __ipu_trace_restore(dev); + mutex_unlock(&trace->lock); +} +EXPORT_SYMBOL_GPL(ipu_trace_restore); + +static void __ipu_trace_stop(struct device *dev) +{ + struct ipu_subsystem_trace_config *sys = + to_ipu_bus_device(dev)->trace_cfg; + struct ipu_trace_block *blocks; + + if (!sys) + return; + + if (!sys->running) + return; + sys->running = false; + + /* Turn off all the gpc blocks */ + blocks = sys->blocks; + while (blocks->type != IPU_TRACE_BLOCK_END) { + if (blocks->type == IPU_TRACE_BLOCK_GPC) { + writel(0, sys->base + blocks->offset + + TRACE_REG_GPC_OVERALL_ENABLE); + } + blocks++; + } + + /* Turn off all the trace monitors */ + blocks = sys->blocks; + while (blocks->type != IPU_TRACE_BLOCK_END) { + if (blocks->type == IPU_TRACE_BLOCK_TM) { + writel(0, sys->base + blocks->offset + + TRACE_REG_TM_TRACE_ENABLE_NPK); + + writel(0, sys->base + blocks->offset + + TRACE_REG_TM_TRACE_ENABLE_DDR); + } + blocks++; + } + + /* Turn off trace units */ + blocks = sys->blocks; + while (blocks->type != IPU_TRACE_BLOCK_END) { + if (blocks->type == IPU_TRACE_BLOCK_TUN) { + writel(0, sys->base + blocks->offset + + TRACE_REG_TUN_DDR_ENABLE); + writel(0, sys->base + blocks->offset + + TRACE_REG_TUN_NPK_ENABLE); + } + blocks++; + } +} + +void ipu_trace_stop(struct device *dev) +{ + struct ipu_trace *trace = to_ipu_bus_device(dev)->isp->trace; + + if (!trace) + return; + + mutex_lock(&trace->lock); + __ipu_trace_stop(dev); + mutex_unlock(&trace->lock); +} +EXPORT_SYMBOL_GPL(ipu_trace_stop); + +static int validate_register(u32 base, u32 reg, u16 *template) +{ + int i = 0; + + while (template[i] != TRACE_REG_END_MARK) { + if (template[i] + base != reg) { + i++; + continue; + } + /* This is a valid register */ + return 0; + } + return -EINVAL; +} + +static int validate_register_range(u32 base, u32 reg, + struct trace_register_range *template) +{ + unsigned int i = 0; + + if (!IS_ALIGNED(reg, sizeof(u32))) + return -EINVAL; + + while (template[i].start != TRACE_REG_END_MARK) { + if ((reg < template[i].start + base) || + (reg > template[i].end + base)) { + i++; + continue; + } + /* This is a valid register */ + return 0; + } + return -EINVAL; +} + +static int update_register_cache(struct ipu_device *isp, u32 reg, u32 value) +{ + struct ipu_trace *dctrl = isp->trace; + const struct ipu_trace_block *blocks; + struct ipu_subsystem_trace_config *sys; + struct device *dev; + u32 base = 0; + u16 *template = NULL; + struct trace_register_range *template_range = NULL; + int i, range; + int rval = -EINVAL; + + if (dctrl->isys.offset == dctrl->psys.offset) { + /* For the IPU with uniform address space */ + if (reg >= IPU_ISYS_OFFSET && + reg < IPU_ISYS_OFFSET + TRACE_REG_MAX_ISYS_OFFSET) + sys = &dctrl->isys; + else if (reg >= IPU_PSYS_OFFSET && + reg < IPU_PSYS_OFFSET + TRACE_REG_MAX_PSYS_OFFSET) + sys = &dctrl->psys; + else + goto error; + } else { + if (dctrl->isys.offset && + reg >= dctrl->isys.offset && + reg < dctrl->isys.offset + TRACE_REG_MAX_ISYS_OFFSET) + sys = &dctrl->isys; + else if (dctrl->psys.offset && + reg >= dctrl->psys.offset && + reg < dctrl->psys.offset + TRACE_REG_MAX_PSYS_OFFSET) + sys = &dctrl->psys; + else + goto error; + } + + blocks = sys->blocks; + dev = sys->dev; + + /* Check registers block by block */ + i = 0; + while (blocks[i].type != IPU_TRACE_BLOCK_END) { + base = blocks[i].offset + sys->offset; + if ((reg >= base && reg < base + TRACE_REG_MAX_BLOCK_SIZE)) + break; + i++; + } + + range = 0; + switch (blocks[i].type) { + case IPU_TRACE_BLOCK_TUN: + template = trace_unit_template; + break; + case IPU_TRACE_BLOCK_TM: + template = trace_monitor_template; + break; + case IPU_TRACE_BLOCK_GPC: + template = trace_gpc_template; + break; + case IPU_TRACE_CSI2: + range = 1; + template_range = trace_csi2_range_template; + break; + case IPU_TRACE_CSI2_3PH: + range = 1; + template_range = trace_csi2_3ph_range_template; + break; + case IPU_TRACE_SIG2CIOS: + range = 1; + template_range = trace_sig2cio_range_template; + break; + default: + goto error; + } + + if (range) + rval = validate_register_range(base, reg, template_range); + else + rval = validate_register(base, reg, template); + + if (rval) + goto error; + + if (sys->fill_level < MAX_TRACE_REGISTERS) { + dev_dbg(dev, + "Trace reg addr 0x%08x value 0x%08x\n", reg, value); + sys->config[sys->fill_level].reg = reg; + sys->config[sys->fill_level].value = value; + sys->fill_level++; + } else { + rval = -ENOMEM; + goto error; + } + return 0; +error: + dev_info(&isp->pdev->dev, + "Trace register address 0x%08x ignored as invalid register\n", + reg); + return rval; +} + +/* + * We don't know how much data is received this time. Process given data + * character by character. + * Fill the line buffer until either + * 1) new line is got -> go to decode + * or + * 2) line_buffer is full -> ignore rest of line and then try to decode + * or + * 3) Comment mark is found -> ignore rest of the line and then try to decode + * the data which was received before the comment mark + * + * Decode phase tries to find "reg = value" pairs and validates those + */ +static int process_buffer(struct ipu_device *isp, + char *buffer, int size, struct buf_state *state) +{ + int i, ret; + int curr_state = state->state; + u32 reg, value; + + for (i = 0; i < size; i++) { + /* + * Comment mark in any position turns on comment mode + * until end of line + */ + if (curr_state != STATE_COMMENT && buffer[i] == '#') { + state->line_buffer[state->offset] = '\0'; + curr_state = STATE_COMMENT; + continue; + } + + switch (curr_state) { + case STATE_COMMENT: + /* Only new line can break this mode */ + if (buffer[i] == '\n') + curr_state = STATE_COMPLETE; + break; + case STATE_FILL: + state->line_buffer[state->offset] = buffer[i]; + state->offset++; + + if (state->offset >= sizeof(state->line_buffer) - 1) { + /* Line buffer full - ignore rest */ + state->line_buffer[state->offset] = '\0'; + curr_state = STATE_COMMENT; + break; + } + + if (buffer[i] == '\n') { + state->line_buffer[state->offset] = '\0'; + curr_state = STATE_COMPLETE; + } + break; + default: + state->offset = 0; + state->line_buffer[state->offset] = '\0'; + curr_state = STATE_COMMENT; + } + + if (curr_state == STATE_COMPLETE) { + ret = sscanf(state->line_buffer, "%x = %x", + ®, &value); + if (ret == 2) + update_register_cache(isp, reg, value); + + state->offset = 0; + curr_state = STATE_FILL; + } + } + state->state = curr_state; + return 0; +} + +static void traceconf_dump(struct ipu_device *isp) +{ + struct ipu_subsystem_trace_config *sys[2] = { + &isp->trace->isys, + &isp->trace->psys + }; + int i, j, rem_size; + char *out; + + isp->trace->size_conf_dump = 0; + out = isp->trace->conf_dump_buffer; + rem_size = TRACE_CONF_DUMP_BUFFER_SIZE; + + for (j = 0; j < ARRAY_SIZE(sys); j++) { + for (i = 0; i < sys[j]->fill_level && rem_size > 0; i++) { + int bytes_print; + int n = snprintf(out, rem_size, "0x%08x = 0x%08x\n", + sys[j]->config[i].reg, + sys[j]->config[i].value); + + bytes_print = min(n, rem_size - 1); + rem_size -= bytes_print; + out += bytes_print; + } + } + isp->trace->size_conf_dump = out - isp->trace->conf_dump_buffer; +} + +static void clear_trace_buffer(struct ipu_subsystem_trace_config *sys) +{ + if (!sys->memory.memory_buffer) + return; + + memset(sys->memory.memory_buffer, 0, MEMORY_RING_BUFFER_SIZE + + MEMORY_RING_BUFFER_OVERREAD); + + dma_sync_single_for_device(sys->dev, + sys->memory.dma_handle, + MEMORY_RING_BUFFER_SIZE + + MEMORY_RING_BUFFER_GUARD, DMA_FROM_DEVICE); +} + +static int traceconf_open(struct inode *inode, struct file *file) +{ + int ret; + struct ipu_device *isp; + + if (!inode->i_private) + return -EACCES; + + isp = inode->i_private; + + ret = mutex_trylock(&isp->trace->lock); + if (!ret) + return -EBUSY; + + if (isp->trace->open) { + mutex_unlock(&isp->trace->lock); + return -EBUSY; + } + + file->private_data = isp; + isp->trace->open = 1; + if (file->f_mode & FMODE_WRITE) { + /* TBD: Allocate temp buffer for processing. + * Push validated buffer to active config + */ + + /* Forget old config if opened for write */ + isp->trace->isys.fill_level = 0; + isp->trace->psys.fill_level = 0; + } + + if (file->f_mode & FMODE_READ) { + isp->trace->conf_dump_buffer = + vzalloc(TRACE_CONF_DUMP_BUFFER_SIZE); + if (!isp->trace->conf_dump_buffer) { + isp->trace->open = 0; + mutex_unlock(&isp->trace->lock); + return -ENOMEM; + } + traceconf_dump(isp); + } + mutex_unlock(&isp->trace->lock); + return 0; +} + +static ssize_t traceconf_read(struct file *file, char __user *buf, + size_t len, loff_t *ppos) +{ + struct ipu_device *isp = file->private_data; + + return simple_read_from_buffer(buf, len, ppos, + isp->trace->conf_dump_buffer, + isp->trace->size_conf_dump); +} + +static ssize_t traceconf_write(struct file *file, const char __user *buf, + size_t len, loff_t *ppos) +{ + struct ipu_device *isp = file->private_data; + char buffer[64]; + ssize_t bytes, count; + loff_t pos = *ppos; + + if (*ppos < 0) + return -EINVAL; + + count = min(len, sizeof(buffer)); + bytes = copy_from_user(buffer, buf, count); + if (bytes == count) + return -EFAULT; + + count -= bytes; + mutex_lock(&isp->trace->lock); + process_buffer(isp, buffer, count, &isp->trace->buffer_state); + mutex_unlock(&isp->trace->lock); + *ppos = pos + count; + + return count; +} + +static int traceconf_release(struct inode *inode, struct file *file) +{ + struct ipu_device *isp = file->private_data; + struct device *psys_dev = isp->psys ? &isp->psys->dev : NULL; + struct device *isys_dev = isp->isys ? &isp->isys->dev : NULL; + int pm_rval = -EINVAL; + + /* + * Turn devices on outside trace->lock mutex. PM transition may + * cause call to function which tries to take the same lock. + * Also do this before trace->open is set back to 0 to avoid + * double restore (one here and one in pm transition). We can't + * rely purely on the restore done by pm call backs since trace + * configuration can occur in any phase compared to other activity. + */ + + if (file->f_mode & FMODE_WRITE) { + if (isys_dev) + pm_rval = pm_runtime_get_sync(isys_dev); + + if (pm_rval >= 0) { + /* ISYS ok or missing */ + if (psys_dev) + pm_rval = pm_runtime_get_sync(psys_dev); + + if (pm_rval < 0) { + pm_runtime_put_noidle(psys_dev); + if (isys_dev) + pm_runtime_put(isys_dev); + } + } else { + pm_runtime_put_noidle(&isp->isys->dev); + } + } + + mutex_lock(&isp->trace->lock); + isp->trace->open = 0; + vfree(isp->trace->conf_dump_buffer); + isp->trace->conf_dump_buffer = NULL; + + if (pm_rval >= 0) { + /* Update new cfg to HW */ + if (isys_dev) { + __ipu_trace_stop(isys_dev); + clear_trace_buffer(isp->isys->trace_cfg); + __ipu_trace_restore(isys_dev); + } + + if (psys_dev) { + __ipu_trace_stop(psys_dev); + clear_trace_buffer(isp->psys->trace_cfg); + __ipu_trace_restore(psys_dev); + } + } + + mutex_unlock(&isp->trace->lock); + + if (pm_rval >= 0) { + /* Again - this must be done with trace->lock not taken */ + if (psys_dev) + pm_runtime_put(psys_dev); + if (isys_dev) + pm_runtime_put(isys_dev); + } + return 0; +} + +static const struct file_operations ipu_traceconf_fops = { + .owner = THIS_MODULE, + .open = traceconf_open, + .release = traceconf_release, + .read = traceconf_read, + .write = traceconf_write, + .llseek = no_llseek, +}; + +static int gettrace_open(struct inode *inode, struct file *file) +{ + struct ipu_subsystem_trace_config *sys = inode->i_private; + + if (!sys) + return -EACCES; + + if (!sys->memory.memory_buffer) + return -EACCES; + + dma_sync_single_for_cpu(sys->dev, + sys->memory.dma_handle, + MEMORY_RING_BUFFER_SIZE + + MEMORY_RING_BUFFER_GUARD, DMA_FROM_DEVICE); + + file->private_data = sys; + return 0; +}; + +static ssize_t gettrace_read(struct file *file, char __user *buf, + size_t len, loff_t *ppos) +{ + struct ipu_subsystem_trace_config *sys = file->private_data; + + return simple_read_from_buffer(buf, len, ppos, + sys->memory.memory_buffer, + MEMORY_RING_BUFFER_SIZE + + MEMORY_RING_BUFFER_OVERREAD); +} + +static ssize_t gettrace_write(struct file *file, const char __user *buf, + size_t len, loff_t *ppos) +{ + struct ipu_subsystem_trace_config *sys = file->private_data; + const char str[] = "clear"; + char buffer[sizeof(str)] = { 0 }; + ssize_t ret; + + ret = simple_write_to_buffer(buffer, sizeof(buffer), ppos, buf, len); + if (ret < 0) + return ret; + + if (ret < sizeof(str) - 1) + return -EINVAL; + + if (!strncmp(str, buffer, sizeof(str) - 1)) { + clear_trace_buffer(sys); + return len; + } + + return -EINVAL; +} + +static int gettrace_release(struct inode *inode, struct file *file) +{ + return 0; +} + +static const struct file_operations ipu_gettrace_fops = { + .owner = THIS_MODULE, + .open = gettrace_open, + .release = gettrace_release, + .read = gettrace_read, + .write = gettrace_write, + .llseek = no_llseek, +}; + +int ipu_trace_init(struct ipu_device *isp, void __iomem *base, + struct device *dev, struct ipu_trace_block *blocks) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ipu_trace *trace = isp->trace; + struct ipu_subsystem_trace_config *sys; + int ret = 0; + + if (!isp->trace) + return 0; + + mutex_lock(&isp->trace->lock); + + if (dev == &isp->isys->dev) { + sys = &trace->isys; + } else if (dev == &isp->psys->dev) { + sys = &trace->psys; + } else { + ret = -EINVAL; + goto leave; + } + + adev->trace_cfg = sys; + sys->dev = dev; + sys->offset = base - isp->base; /* sub system offset */ + sys->base = base; + sys->blocks = blocks; + +leave: + mutex_unlock(&isp->trace->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(ipu_trace_init); + +void ipu_trace_uninit(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ipu_device *isp = adev->isp; + struct ipu_trace *trace = isp->trace; + struct ipu_subsystem_trace_config *sys = adev->trace_cfg; + + if (!trace || !sys) + return; + + mutex_lock(&trace->lock); + + if (sys->memory.memory_buffer) + dma_free_attrs(sys->dev, + MEMORY_RING_BUFFER_SIZE + + MEMORY_RING_BUFFER_GUARD, + sys->memory.memory_buffer, + sys->memory.dma_handle, DMA_ATTR_NON_CONSISTENT); + + sys->dev = NULL; + sys->memory.memory_buffer = NULL; + + mutex_unlock(&trace->lock); +} +EXPORT_SYMBOL_GPL(ipu_trace_uninit); + +int ipu_trace_debugfs_add(struct ipu_device *isp, struct dentry *dir) +{ + struct dentry *files[3]; + int i = 0; + + files[i] = debugfs_create_file("traceconf", 0644, + dir, isp, &ipu_traceconf_fops); + if (!files[i]) + return -ENOMEM; + i++; + + files[i] = debugfs_create_file("getisystrace", 0444, + dir, + &isp->trace->isys, &ipu_gettrace_fops); + + if (!files[i]) + goto error; + i++; + + files[i] = debugfs_create_file("getpsystrace", 0444, + dir, + &isp->trace->psys, &ipu_gettrace_fops); + if (!files[i]) + goto error; + + return 0; + +error: + for (; i > 0; i--) + debugfs_remove(files[i - 1]); + return -ENOMEM; +} + +int ipu_trace_add(struct ipu_device *isp) +{ + isp->trace = devm_kzalloc(&isp->pdev->dev, + sizeof(struct ipu_trace), GFP_KERNEL); + if (!isp->trace) + return -ENOMEM; + + mutex_init(&isp->trace->lock); + + return 0; +} + +void ipu_trace_release(struct ipu_device *isp) +{ + if (!isp->trace) + return; + mutex_destroy(&isp->trace->lock); +} + +MODULE_AUTHOR("Samu Onkalo "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel ipu trace support"); diff --git a/drivers/media/pci/intel/ipu-trace.h b/drivers/media/pci/intel/ipu-trace.h new file mode 100644 index 0000000000000..9167c0400273f --- /dev/null +++ b/drivers/media/pci/intel/ipu-trace.h @@ -0,0 +1,312 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2014 - 2018 Intel Corporation */ + +#ifndef IPU_TRACE_H +#define IPU_TRACE_H +#include + +#define TRACE_REG_MAX_BLOCK_SIZE 0x0fff + +#define TRACE_REG_END_MARK 0xffff + +#define TRACE_REG_CMD_TYPE_D64 0x0 +#define TRACE_REG_CMD_TYPE_D64M 0x1 +#define TRACE_REG_CMD_TYPE_D64TS 0x2 +#define TRACE_REG_CMD_TYPE_D64MTS 0x3 + +/* Trace unit register offsets */ +#define TRACE_REG_TUN_DDR_ENABLE 0x000 +#define TRACE_REG_TUN_NPK_ENABLE 0x004 +#define TRACE_REG_TUN_DDR_INFO_VAL 0x008 +#define TRACE_REG_TUN_NPK_ADDR 0x00C +#define TRACE_REG_TUN_DRAM_BASE_ADDR 0x010 +#define TRACE_REG_TUN_DRAM_END_ADDR 0x014 +#define TRACE_REG_TUN_LOCAL_TIMER0 0x018 +#define TRACE_REG_TUN_LOCAL_TIMER1 0x01C +#define TRACE_REG_TUN_WR_PTR 0x020 +#define TRACE_REG_TUN_RD_PTR 0x024 + +#define TRACE_REG_CREATE_TUN_REGISTER_LIST { \ + TRACE_REG_TUN_DDR_ENABLE, \ + TRACE_REG_TUN_NPK_ENABLE, \ + TRACE_REG_TUN_DDR_INFO_VAL, \ + TRACE_REG_TUN_NPK_ADDR, \ + TRACE_REG_END_MARK \ +} +/* + * Following registers are left out on purpose: + * TUN_LOCAL_TIMER0, TUN_LOCAL_TIMER1, TUN_DRAM_BASE_ADDR + * TUN_DRAM_END_ADDR, TUN_WR_PTR, TUN_RD_PTR + */ + +/* Trace monitor register offsets */ +#define TRACE_REG_TM_TRACE_ADDR_A 0x0900 +#define TRACE_REG_TM_TRACE_ADDR_B 0x0904 +#define TRACE_REG_TM_TRACE_ADDR_C 0x0908 +#define TRACE_REG_TM_TRACE_ADDR_D 0x090c +#define TRACE_REG_TM_TRACE_ENABLE_NPK 0x0910 +#define TRACE_REG_TM_TRACE_ENABLE_DDR 0x0914 +#define TRACE_REG_TM_TRACE_PER_PC 0x0918 +#define TRACE_REG_TM_TRACE_PER_BRANCH 0x091c +#define TRACE_REG_TM_TRACE_HEADER 0x0920 +#define TRACE_REG_TM_TRACE_CFG 0x0924 +#define TRACE_REG_TM_TRACE_LOST_PACKETS 0x0928 +#define TRACE_REG_TM_TRACE_LP_CLEAR 0x092c +#define TRACE_REG_TM_TRACE_LMRUN_MASK 0x0930 +#define TRACE_REG_TM_TRACE_LMRUN_PC_LOW 0x0934 +#define TRACE_REG_TM_TRACE_LMRUN_PC_HIGH 0x0938 +#define TRACE_REG_TM_TRACE_MMIO_SEL 0x093c +#define TRACE_REG_TM_TRACE_MMIO_WP0_LOW 0x0940 +#define TRACE_REG_TM_TRACE_MMIO_WP1_LOW 0x0944 +#define TRACE_REG_TM_TRACE_MMIO_WP2_LOW 0x0948 +#define TRACE_REG_TM_TRACE_MMIO_WP3_LOW 0x094c +#define TRACE_REG_TM_TRACE_MMIO_WP0_HIGH 0x0950 +#define TRACE_REG_TM_TRACE_MMIO_WP1_HIGH 0x0954 +#define TRACE_REG_TM_TRACE_MMIO_WP2_HIGH 0x0958 +#define TRACE_REG_TM_TRACE_MMIO_WP3_HIGH 0x095c +#define TRACE_REG_TM_FWTRACE_FIRST 0x0A00 +#define TRACE_REG_TM_FWTRACE_MIDDLE 0x0A04 +#define TRACE_REG_TM_FWTRACE_LAST 0x0A08 + +#define TRACE_REG_CREATE_TM_REGISTER_LIST { \ + TRACE_REG_TM_TRACE_ADDR_A, \ + TRACE_REG_TM_TRACE_ADDR_B, \ + TRACE_REG_TM_TRACE_ADDR_C, \ + TRACE_REG_TM_TRACE_ADDR_D, \ + TRACE_REG_TM_TRACE_ENABLE_NPK, \ + TRACE_REG_TM_TRACE_ENABLE_DDR, \ + TRACE_REG_TM_TRACE_PER_PC, \ + TRACE_REG_TM_TRACE_PER_BRANCH, \ + TRACE_REG_TM_TRACE_HEADER, \ + TRACE_REG_TM_TRACE_CFG, \ + TRACE_REG_TM_TRACE_LOST_PACKETS, \ + TRACE_REG_TM_TRACE_LP_CLEAR, \ + TRACE_REG_TM_TRACE_LMRUN_MASK, \ + TRACE_REG_TM_TRACE_LMRUN_PC_LOW, \ + TRACE_REG_TM_TRACE_LMRUN_PC_HIGH, \ + TRACE_REG_TM_TRACE_MMIO_SEL, \ + TRACE_REG_TM_TRACE_MMIO_WP0_LOW, \ + TRACE_REG_TM_TRACE_MMIO_WP1_LOW, \ + TRACE_REG_TM_TRACE_MMIO_WP2_LOW, \ + TRACE_REG_TM_TRACE_MMIO_WP3_LOW, \ + TRACE_REG_TM_TRACE_MMIO_WP0_HIGH, \ + TRACE_REG_TM_TRACE_MMIO_WP1_HIGH, \ + TRACE_REG_TM_TRACE_MMIO_WP2_HIGH, \ + TRACE_REG_TM_TRACE_MMIO_WP3_HIGH, \ + TRACE_REG_END_MARK \ +} + +/* + * Following exists only in (I)SP address space: + * TM_FWTRACE_FIRST, TM_FWTRACE_MIDDLE, TM_FWTRACE_LAST + */ + +#define TRACE_REG_GPC_RESET 0x000 +#define TRACE_REG_GPC_OVERALL_ENABLE 0x004 +#define TRACE_REG_GPC_TRACE_HEADER 0x008 +#define TRACE_REG_GPC_TRACE_ADDRESS 0x00C +#define TRACE_REG_GPC_TRACE_NPK_EN 0x010 +#define TRACE_REG_GPC_TRACE_DDR_EN 0x014 +#define TRACE_REG_GPC_TRACE_LPKT_CLEAR 0x018 +#define TRACE_REG_GPC_TRACE_LPKT 0x01C + +#define TRACE_REG_GPC_ENABLE_ID0 0x020 +#define TRACE_REG_GPC_ENABLE_ID1 0x024 +#define TRACE_REG_GPC_ENABLE_ID2 0x028 +#define TRACE_REG_GPC_ENABLE_ID3 0x02c + +#define TRACE_REG_GPC_VALUE_ID0 0x030 +#define TRACE_REG_GPC_VALUE_ID1 0x034 +#define TRACE_REG_GPC_VALUE_ID2 0x038 +#define TRACE_REG_GPC_VALUE_ID3 0x03c + +#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID0 0x040 +#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID1 0x044 +#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID2 0x048 +#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID3 0x04c + +#define TRACE_REG_GPC_CNT_START_SELECT_ID0 0x050 +#define TRACE_REG_GPC_CNT_START_SELECT_ID1 0x054 +#define TRACE_REG_GPC_CNT_START_SELECT_ID2 0x058 +#define TRACE_REG_GPC_CNT_START_SELECT_ID3 0x05c + +#define TRACE_REG_GPC_CNT_STOP_SELECT_ID0 0x060 +#define TRACE_REG_GPC_CNT_STOP_SELECT_ID1 0x064 +#define TRACE_REG_GPC_CNT_STOP_SELECT_ID2 0x068 +#define TRACE_REG_GPC_CNT_STOP_SELECT_ID3 0x06c + +#define TRACE_REG_GPC_CNT_MSG_SELECT_ID0 0x070 +#define TRACE_REG_GPC_CNT_MSG_SELECT_ID1 0x074 +#define TRACE_REG_GPC_CNT_MSG_SELECT_ID2 0x078 +#define TRACE_REG_GPC_CNT_MSG_SELECT_ID3 0x07c + +#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID0 0x080 +#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID1 0x084 +#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID2 0x088 +#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID3 0x08c + +#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID0 0x090 +#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID1 0x094 +#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID2 0x098 +#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID3 0x09c + +#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID0 0x0a0 +#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID1 0x0a4 +#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID2 0x0a8 +#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID3 0x0ac + +#define TRACE_REG_GPC_IRQ_ENABLE_ID0 0x0b0 +#define TRACE_REG_GPC_IRQ_ENABLE_ID1 0x0b4 +#define TRACE_REG_GPC_IRQ_ENABLE_ID2 0x0b8 +#define TRACE_REG_GPC_IRQ_ENABLE_ID3 0x0bc + +#define TRACE_REG_CREATE_GPC_REGISTER_LIST { \ + TRACE_REG_GPC_RESET, \ + TRACE_REG_GPC_OVERALL_ENABLE, \ + TRACE_REG_GPC_TRACE_HEADER, \ + TRACE_REG_GPC_TRACE_ADDRESS, \ + TRACE_REG_GPC_TRACE_NPK_EN, \ + TRACE_REG_GPC_TRACE_DDR_EN, \ + TRACE_REG_GPC_TRACE_LPKT_CLEAR, \ + TRACE_REG_GPC_TRACE_LPKT, \ + TRACE_REG_GPC_ENABLE_ID0, \ + TRACE_REG_GPC_ENABLE_ID1, \ + TRACE_REG_GPC_ENABLE_ID2, \ + TRACE_REG_GPC_ENABLE_ID3, \ + TRACE_REG_GPC_VALUE_ID0, \ + TRACE_REG_GPC_VALUE_ID1, \ + TRACE_REG_GPC_VALUE_ID2, \ + TRACE_REG_GPC_VALUE_ID3, \ + TRACE_REG_GPC_CNT_INPUT_SELECT_ID0, \ + TRACE_REG_GPC_CNT_INPUT_SELECT_ID1, \ + TRACE_REG_GPC_CNT_INPUT_SELECT_ID2, \ + TRACE_REG_GPC_CNT_INPUT_SELECT_ID3, \ + TRACE_REG_GPC_CNT_START_SELECT_ID0, \ + TRACE_REG_GPC_CNT_START_SELECT_ID1, \ + TRACE_REG_GPC_CNT_START_SELECT_ID2, \ + TRACE_REG_GPC_CNT_START_SELECT_ID3, \ + TRACE_REG_GPC_CNT_STOP_SELECT_ID0, \ + TRACE_REG_GPC_CNT_STOP_SELECT_ID1, \ + TRACE_REG_GPC_CNT_STOP_SELECT_ID2, \ + TRACE_REG_GPC_CNT_STOP_SELECT_ID3, \ + TRACE_REG_GPC_CNT_MSG_SELECT_ID0, \ + TRACE_REG_GPC_CNT_MSG_SELECT_ID1, \ + TRACE_REG_GPC_CNT_MSG_SELECT_ID2, \ + TRACE_REG_GPC_CNT_MSG_SELECT_ID3, \ + TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID0, \ + TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID1, \ + TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID2, \ + TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID3, \ + TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID0, \ + TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID1, \ + TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID2, \ + TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID3, \ + TRACE_REG_GPC_IRQ_TIMER_SELECT_ID0, \ + TRACE_REG_GPC_IRQ_TIMER_SELECT_ID1, \ + TRACE_REG_GPC_IRQ_TIMER_SELECT_ID2, \ + TRACE_REG_GPC_IRQ_TIMER_SELECT_ID3, \ + TRACE_REG_GPC_IRQ_ENABLE_ID0, \ + TRACE_REG_GPC_IRQ_ENABLE_ID1, \ + TRACE_REG_GPC_IRQ_ENABLE_ID2, \ + TRACE_REG_GPC_IRQ_ENABLE_ID3, \ + TRACE_REG_END_MARK \ +} + +/* CSI2 legacy receiver trace registers */ +#define TRACE_REG_CSI2_TM_RESET_REG_IDX 0x0000 +#define TRACE_REG_CSI2_TM_OVERALL_ENABLE_REG_IDX 0x0004 +#define TRACE_REG_CSI2_TM_TRACE_HEADER_REG_IDX 0x0008 +#define TRACE_REG_CSI2_TM_TRACE_ADDRESS_REG_IDX 0x000c +#define TRACE_REG_CSI2_TM_TRACE_HEADER_VAL 0xf +#define TRACE_REG_CSI2_TM_TRACE_ADDRESS_VAL 0x100218 +#define TRACE_REG_CSI2_TM_MONITOR_ID 0x8 + +/* 0 <= n <= 3 */ +#define TRACE_REG_CSI2_TM_TRACE_NPK_EN_REG_IDX_P(n) (0x0010 + (n) * 4) +#define TRACE_REG_CSI2_TM_TRACE_DDR_EN_REG_IDX_P(n) (0x0020 + (n) * 4) +#define TRACE_CSI2_TM_EVENT_FE(vc) (BIT(0) << (vc * 6)) +#define TRACE_CSI2_TM_EVENT_FS(vc) (BIT(1) << (vc * 6)) +#define TRACE_CSI2_TM_EVENT_PE(vc) (BIT(2) << (vc * 6)) +#define TRACE_CSI2_TM_EVENT_PS(vc) (BIT(3) << (vc * 6)) +#define TRACE_CSI2_TM_EVENT_LE(vc) (BIT(4) << (vc * 6)) +#define TRACE_CSI2_TM_EVENT_LS(vc) (BIT(5) << (vc * 6)) + +#define TRACE_REG_CSI2_TM_TRACE_LPKT_CLEAR_REG_IDX 0x0030 +#define TRACE_REG_CSI2_TM_TRACE_LPKT_REG_IDX 0x0034 + +/* 0 <= n <= 7 */ +#define TRACE_REG_CSI2_TM_ENABLE_REG_IDn(n) (0x0038 + (n) * 4) +#define TRACE_REG_CSI2_TM_VALUE_REG_IDn(n) (0x0058 + (n) * 4) +#define TRACE_REG_CSI2_TM_CNT_INPUT_SELECT_REG_IDn(n) (0x0078 + (n) * 4) +#define TRACE_REG_CSI2_TM_CNT_START_SELECT_REG_IDn(n) (0x0098 + (n) * 4) +#define TRACE_REG_CSI2_TM_CNT_STOP_SELECT_REG_IDn(n) (0x00b8 + (n) * 4) +#define TRACE_REG_CSI2_TM_IRQ_TRIGGER_VALUE_REG_IDn(n) (0x00d8 + (n) * 4) +#define TRACE_REG_CSI2_TM_IRQ_TIMER_SELECT_REG_IDn(n) (0x00f8 + (n) * 4) +#define TRACE_REG_CSI2_TM_IRQ_ENABLE_REG_IDn(n) (0x0118 + (n) * 4) + +/* CSI2_3PH combo receiver trace registers */ +#define TRACE_REG_CSI2_3PH_TM_RESET_REG_IDX 0x0000 +#define TRACE_REG_CSI2_3PH_TM_OVERALL_ENABLE_REG_IDX 0x0004 +#define TRACE_REG_CSI2_3PH_TM_TRACE_HEADER_REG_IDX 0x0008 +#define TRACE_REG_CSI2_3PH_TM_TRACE_ADDRESS_REG_IDX 0x000c +#define TRACE_REG_CSI2_3PH_TM_TRACE_ADDRESS_VAL 0x100258 +#define TRACE_REG_CSI2_3PH_TM_MONITOR_ID 0x9 + +/* 0 <= n <= 5 */ +#define TRACE_REG_CSI2_3PH_TM_TRACE_NPK_EN_REG_IDX_P(n) (0x0010 + (n) * 4) +#define TRACE_REG_CSI2_3PH_TM_TRACE_DDR_EN_REG_IDX_P(n) (0x0028 + (n) * 4) + +#define TRACE_REG_CSI2_3PH_TM_TRACE_LPKT_CLEAR_REG_IDX 0x0040 +#define TRACE_REG_CSI2_3PH_TM_TRACE_LPKT_REG_IDX 0x0044 + +/* 0 <= n <= 7 */ +#define TRACE_REG_CSI2_3PH_TM_ENABLE_REG_IDn(n) (0x0048 + (n) * 4) +#define TRACE_REG_CSI2_3PH_TM_VALUE_REG_IDn(n) (0x0068 + (n) * 4) +#define TRACE_REG_CSI2_3PH_TM_CNT_INPUT_SELECT_REG_IDn(n) (0x0088 + (n) * 4) +#define TRACE_REG_CSI2_3PH_TM_CNT_START_SELECT_REG_IDn(n) (0x00a8 + (n) * 4) +#define TRACE_REG_CSI2_3PH_TM_CNT_STOP_SELECT_REG_IDn(n) (0x00c8 + (n) * 4) +#define TRACE_REG_CSI2_3PH_TM_IRQ_TRIGGER_VALUE_REG_IDn(n) (0x00e8 + (n) * 4) +#define TRACE_REG_CSI2_3PH_TM_IRQ_TIMER_SELECT_REG_IDn(n) (0x0108 + (n) * 4) +#define TRACE_REG_CSI2_3PH_TM_IRQ_ENABLE_REG_IDn(n) (0x0128 + (n) * 4) + +/* SIG2CIO trace monitors */ +#define TRACE_REG_SIG2CIO_ADDRESS 0x0000 +#define TRACE_REG_SIG2CIO_WDATA 0x0004 +#define TRACE_REG_SIG2CIO_MASK 0x0008 +#define TRACE_REG_SIG2CIO_GROUP_CFG 0x000c +#define TRACE_REG_SIG2CIO_STICKY 0x0010 +#define TRACE_REG_SIG2CIO_RST_STICKY 0x0014 +#define TRACE_REG_SIG2CIO_MANUAL_RST_STICKY 0x0018 +#define TRACE_REG_SIG2CIO_STATUS 0x001c +/* Size of on SIG2CIO block */ +#define TRACE_REG_SIG2CIO_SIZE_OF 0x0020 + +struct ipu_trace; +struct ipu_subsystem_trace_config; + +enum ipu_trace_block_type { + IPU_TRACE_BLOCK_TUN = 0, /* Trace unit */ + IPU_TRACE_BLOCK_TM, /* Trace monitor */ + IPU_TRACE_BLOCK_GPC, /* General purpose control */ + IPU_TRACE_CSI2, /* CSI2 legacy receiver */ + IPU_TRACE_CSI2_3PH, /* CSI2 combo receiver */ + IPU_TRACE_SIG2CIOS, + IPU_TRACE_TIMER_RST, /* Trace reset control timer */ + IPU_TRACE_BLOCK_END /* End of list */ +}; + +struct ipu_trace_block { + u32 offset; /* Offset to block inside subsystem */ + enum ipu_trace_block_type type; +}; + +int ipu_trace_add(struct ipu_device *isp); +int ipu_trace_debugfs_add(struct ipu_device *isp, struct dentry *dir); +void ipu_trace_release(struct ipu_device *isp); +int ipu_trace_init(struct ipu_device *isp, void __iomem *base, + struct device *dev, struct ipu_trace_block *blocks); +void ipu_trace_restore(struct device *dev); +void ipu_trace_uninit(struct device *dev); +void ipu_trace_stop(struct device *dev); +int ipu_trace_get_timer(struct device *dev, u64 *timer); +#endif diff --git a/drivers/media/pci/intel/ipu-wrapper.c b/drivers/media/pci/intel/ipu-wrapper.c new file mode 100644 index 0000000000000..47fa06202fdd7 --- /dev/null +++ b/drivers/media/pci/intel/ipu-wrapper.c @@ -0,0 +1,515 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include +#include + +#include +#include +#include +#include +#include + +#include "ipu-bus.h" +#include "ipu-dma.h" +#include "ipu-mmu.h" +#include "ipu-wrapper.h" +#include "vied_subsystem_access.h" +#include "vied_subsystem_access_initialization.h" +#include "shared_memory_map.h" +#include "shared_memory_access.h" + +struct wrapper_base { + void __iomem *sys_base; + const struct dma_map_ops *ops; + /* Protect shared memory buffers */ + spinlock_t lock; + struct list_head buffers; + u32 css_map_done; + struct device *dev; +}; + +static struct wrapper_base isys; +static struct wrapper_base psys; + +struct my_css_memory_buffer_item { + struct list_head list; + dma_addr_t iova; + unsigned long *addr; + size_t bytes; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs attrs; +#else + unsigned long attrs; +#endif +}; + +static struct wrapper_base *get_mem_sub_system(int mmid) +{ + if (mmid == ISYS_MMID) + return &isys; + + if (mmid == PSYS_MMID) + return &psys; + + WARN(1, "Invalid mem subsystem"); + return NULL; +} + +static struct wrapper_base *get_sub_system(int ssid) +{ + if (ssid == ISYS_SSID) + return &isys; + + if (ssid == PSYS_SSID) + return &psys; + WARN(1, "Invalid subsystem"); + return NULL; +} + +/* + * Subsystem access functions to access IUNIT MMIO space + */ +static void *host_addr(int ssid, u32 addr) +{ + if (ssid == ISYS_SSID) + return isys.sys_base + addr; + else if (ssid == PSYS_SSID) + return psys.sys_base + addr; + /* + * Calling WARN_ON is a bit brutal but better to capture wrong register + * accesses immediately. We have no way to return an error here. + */ + WARN_ON(1); + + return NULL; +} + +void vied_subsystem_store_32(unsigned int ssid, u32 addr, u32 data) +{ + writel(data, host_addr(ssid, addr)); +} + +void vied_subsystem_store_16(unsigned int ssid, u32 addr, u16 data) +{ + writew(data, host_addr(ssid, addr)); +} + +void vied_subsystem_store_8(unsigned int ssid, u32 addr, u8 data) +{ + writeb(data, host_addr(ssid, addr)); +} + +void vied_subsystem_store(unsigned int ssid, + u32 addr, const void *data, unsigned int size) +{ + void *dst = host_addr(ssid, addr); + + dev_dbg(get_sub_system(ssid)->dev, "access: %s 0x%x size: %d\n", + __func__, addr, size); + + for (; size >= sizeof(u32); size -= sizeof(u32), + dst += sizeof(u32), data += sizeof(u32)) { + writel(*(u32 *) data, dst); + } + if (size >= sizeof(u16)) { + writew(*(u16 *) data, dst); + size -= sizeof(u16), dst += sizeof(u16), data += sizeof(u16); + } + if (size) + writeb(*(u8 *) data, dst); +} + +u32 vied_subsystem_load_32(unsigned int ssid, u32 addr) +{ + return readl(host_addr(ssid, addr)); +} + +u16 vied_subsystem_load_16(unsigned int ssid, u32 addr) +{ + return readw(host_addr(ssid, addr)); +} + +u8 vied_subsystem_load_8(unsigned int ssid, u32 addr) +{ + return readb(host_addr(ssid, addr)); +} + +void vied_subsystem_load(unsigned int ssid, u32 addr, + void *data, unsigned int size) +{ + void *src = host_addr(ssid, addr); + + dev_dbg(get_sub_system(ssid)->dev, "access: %s 0x%x size: %d\n", + __func__, addr, size); + + for (; size >= sizeof(u32); size -= sizeof(u32), + src += sizeof(u32), data += sizeof(u32)) + *(u32 *) data = readl(src); + if (size >= sizeof(u16)) { + *(u16 *) data = readw(src); + size -= sizeof(u16), src += sizeof(u16), data += sizeof(u16); + } + if (size) + *(u8 *) data = readb(src); +} + +/* + * Initialize base address for subsystem + */ +void vied_subsystem_access_initialize(unsigned int system) +{ +} + +/* + * Shared memory access codes written by Dash Biswait, + * copied from FPGA environment + */ + +/** + * \brief Initialize the shared memory interface administration on the host. + * \param mmid: id of ddr memory + * \param host_ddr_addr: physical address of memory as seen from host + * \param memory_size: size of ddr memory in bytes + * \param ps: size of page in bytes (for instance 4096) + */ +int shared_memory_allocation_initialize(unsigned int mmid, u64 host_ddr_addr, + size_t memory_size, size_t ps) +{ + return 0; +} + +/** + * \brief De-initialize the shared memory interface administration on the host. + * + */ +void shared_memory_allocation_uninitialize(unsigned int mmid) +{ +} + +/** + * \brief Initialize the shared memory interface administration on the host. + * \param ssid: id of subsystem + * \param mmid: id of ddr memory + * \param mmu_ps: size of page in bits + * \param mmu_pnrs: page numbers + * \param ddr_addr: base address + * \param inv_tlb: invalidate tbl + * \param sbt: set l1 base address + */ +int shared_memory_map_initialize(unsigned int ssid, unsigned int mmid, + size_t mmu_ps, size_t mmu_pnrs, u64 ddr_addr, + shared_memory_invalidate_mmu_tlb inv_tlb, + shared_memory_set_page_table_base_address sbt) +{ + return 0; +} + +/** + * \brief De-initialize the shared memory interface administration on the host. + */ +void shared_memory_map_uninitialize(unsigned int ssid, unsigned int mmid) +{ +} + +static u8 alloc_cookie; + +/** + * \brief Allocate (DDR) shared memory space and return a host virtual address. + * \Returns NULL when insufficient memory available + */ +u64 shared_memory_alloc(unsigned int mmid, size_t bytes) +{ + struct wrapper_base *mine = get_mem_sub_system(mmid); + struct my_css_memory_buffer_item *buf; + unsigned long flags; + + dev_dbg(mine->dev, "%s: in, size: %zu\n", __func__, bytes); + + if (!bytes) + return (unsigned long)&alloc_cookie; + + might_sleep(); + + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) + return 0; + + /*alloc using ipu dma driver */ + buf->bytes = PAGE_ALIGN(bytes); + + buf->addr = dma_alloc_attrs(mine->dev, buf->bytes, &buf->iova, + GFP_KERNEL, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + NULL +#else + 0 +#endif + ); + if (!buf->addr) { + kfree(buf); + return 0; + } + + spin_lock_irqsave(&mine->lock, flags); + list_add(&buf->list, &mine->buffers); + spin_unlock_irqrestore(&mine->lock, flags); + + return (unsigned long)buf->addr; +} + +/** + * \brief Free (DDR) shared memory space. + */ +void shared_memory_free(unsigned int mmid, u64 addr) +{ + struct wrapper_base *mine = get_mem_sub_system(mmid); + struct my_css_memory_buffer_item *buf = NULL; + unsigned long flags; + + if ((void *)(unsigned long)addr == &alloc_cookie) + return; + + might_sleep(); + + dev_dbg(mine->dev, "looking for iova %8.8llx\n", addr); + + spin_lock_irqsave(&mine->lock, flags); + list_for_each_entry(buf, &mine->buffers, list) { + dev_dbg(mine->dev, "buffer addr %8.8lx\n", (long)buf->addr); + if ((long)buf->addr != addr) + continue; + + dev_dbg(mine->dev, "found it!\n"); + list_del(&buf->list); + spin_unlock_irqrestore(&mine->lock, flags); + dma_free_attrs(mine->dev, buf->bytes, buf->addr, buf->iova, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + &buf->attrs +#else + buf->attrs +#endif + ); + kfree(buf); + return; + } + dev_warn(mine->dev, "Can't find mem object %8.8llx\n", addr); + spin_unlock_irqrestore(&mine->lock, flags); +} + +/** + * \brief Convert a host virtual address to a CSS virtual address and + * \update the MMU. + */ +u32 shared_memory_map(unsigned int ssid, unsigned int mmid, u64 addr) +{ + struct wrapper_base *mine = get_mem_sub_system(mmid); + struct my_css_memory_buffer_item *buf = NULL; + unsigned long flags; + + if ((void *)(unsigned long)addr == &alloc_cookie) + return 0; + + spin_lock_irqsave(&mine->lock, flags); + list_for_each_entry(buf, &mine->buffers, list) { + dev_dbg(mine->dev, "%s %8.8lx\n", __func__, (long)buf->addr); + if ((long)buf->addr != addr) + continue; + + dev_dbg(mine->dev, "mapped!!\n"); + spin_unlock_irqrestore(&mine->lock, flags); + return buf->iova; + } + dev_err(mine->dev, "Can't find mapped object %8.8llx\n", addr); + spin_unlock_irqrestore(&mine->lock, flags); + return 0; +} + +/** + * \brief Free a CSS virtual address and update the MMU. + */ +void shared_memory_unmap(unsigned int ssid, unsigned int mmid, u32 addr) +{ +} + +/** + * \brief Store a byte into (DDR) shared memory space using a host + * \virtual address + */ +void shared_memory_store_8(unsigned int mmid, u64 addr, u8 data) +{ + dev_dbg(get_mem_sub_system(mmid)->dev, + "access: %s: Enter addr = 0x%llx data = 0x%x\n", + __func__, addr, data); + + *((u8 *)(unsigned long) addr) = data; + /*Invalidate the cache lines to flush the content to ddr. */ + clflush_cache_range((void *)(unsigned long)addr, sizeof(u8)); +} + +/** + * \brief Store a 16-bit word into (DDR) shared memory space using a host + * \virtual address + */ +void shared_memory_store_16(unsigned int mmid, u64 addr, u16 data) +{ + dev_dbg(get_mem_sub_system(mmid)->dev, + "access: %s: Enter addr = 0x%llx data = 0x%x\n", + __func__, addr, data); + + *((u16 *)(unsigned long) addr) = data; + /*Invalidate the cache lines to flush the content to ddr. */ + clflush_cache_range((void *)(unsigned long) addr, sizeof(u16)); +} + +/** + * \brief Store a 32-bit word into (DDR) shared memory space using a host + * \virtual address + */ +void shared_memory_store_32(unsigned int mmid, u64 addr, u32 data) +{ + dev_dbg(get_mem_sub_system(mmid)->dev, + "access: %s: Enter addr = 0x%llx data = 0x%x\n", + __func__, addr, data); + + *((u32 *)(unsigned long) addr) = data; + /* Invalidate the cache lines to flush the content to ddr. */ + clflush_cache_range((void *)(unsigned long) addr, sizeof(u32)); +} + +/** + * \brief Store a number of bytes into (DDR) shared memory space using a host + * \virtual address + */ +void shared_memory_store(unsigned int mmid, u64 addr, const void *data, + size_t bytes) +{ + dev_dbg(get_mem_sub_system(mmid)->dev, + "access: %s: Enter addr = 0x%lx bytes = 0x%zx\n", __func__, + (unsigned long)addr, bytes); + + if (!data) { + dev_err(get_mem_sub_system(mmid)->dev, + "%s: data ptr is null\n", __func__); + } else { + const u8 *pdata = data; + u8 *paddr = (u8 *)(unsigned long)addr; + size_t i = 0; + + for (; i < bytes; ++i) + *paddr++ = *pdata++; + + /* Invalidate the cache lines to flush the content to ddr. */ + clflush_cache_range((void *)(unsigned long) addr, bytes); + } +} + +/** + * \brief Set a number of bytes of (DDR) shared memory space to 0 using a host + * \virtual address + */ +void shared_memory_zero(unsigned int mmid, u64 addr, size_t bytes) +{ + dev_dbg(get_mem_sub_system(mmid)->dev, + "access: %s: Enter addr = 0x%llx data = 0x%zx\n", + __func__, (unsigned long long)addr, bytes); + + memset((void *)(unsigned long)addr, 0, bytes); + clflush_cache_range((void *)(unsigned long)addr, bytes); +} + +/** + * \brief Load a byte from (DDR) shared memory space using a host + * \virtual address + */ +u8 shared_memory_load_8(unsigned int mmid, u64 addr) +{ + u8 data = 0; + + dev_dbg(get_mem_sub_system(mmid)->dev, + "access: %s: Enter addr = 0x%llx\n", __func__, addr); + + /* Invalidate the cache lines to flush the content to ddr. */ + clflush_cache_range((void *)(unsigned long)addr, sizeof(u8)); + data = *(u8 *)(unsigned long) addr; + return data; +} + +/** + * \brief Load a 16-bit word from (DDR) shared memory space using a host + * \virtual address + */ +u16 shared_memory_load_16(unsigned int mmid, u64 addr) +{ + u16 data = 0; + + dev_dbg(get_mem_sub_system(mmid)->dev, + "access: %s: Enter addr = 0x%llx\n", __func__, addr); + + /* Invalidate the cache lines to flush the content to ddr. */ + clflush_cache_range((void *)(unsigned long)addr, sizeof(u16)); + data = *(u16 *)(unsigned long)addr; + return data; +} + +/** + * \brief Load a 32-bit word from (DDR) shared memory space using a host + * \virtual address + */ +u32 shared_memory_load_32(unsigned int mmid, u64 addr) +{ + u32 data = 0; + + dev_dbg(get_mem_sub_system(mmid)->dev, + "access: %s: Enter addr = 0x%llx\n", __func__, addr); + + /* Invalidate the cache lines to flush the content to ddr. */ + clflush_cache_range((void *)(unsigned long)addr, sizeof(u32)); + data = *(u32 *)(unsigned long)addr; + return data; +} + +/** + * \brief Load a number of bytes from (DDR) shared memory space using a host + * \virtual address + */ +void shared_memory_load(unsigned int mmid, u64 addr, void *data, size_t bytes) +{ + dev_dbg(get_mem_sub_system(mmid)->dev, + "access: %s: Enter addr = 0x%lx bytes = 0x%zx\n", __func__, + (unsigned long)addr, bytes); + + if (!data) { + dev_err(get_mem_sub_system(mmid)->dev, + "%s: data ptr is null\n", __func__); + + } else { + u8 *pdata = data; + u8 *paddr = (u8 *)(unsigned long)addr; + size_t i = 0; + + /* Invalidate the cache lines to flush the content to ddr. */ + clflush_cache_range((void *)(unsigned long)addr, bytes); + for (; i < bytes; ++i) + *pdata++ = *paddr++; + } +} + +static int init_wrapper(struct wrapper_base *sys) +{ + INIT_LIST_HEAD(&sys->buffers); + spin_lock_init(&sys->lock); + return 0; +} + +/* + * Wrapper driver set base address for library use + */ +void ipu_wrapper_init(int mmid, struct device *dev, void __iomem *base) +{ + struct wrapper_base *sys = get_mem_sub_system(mmid); + + init_wrapper(sys); + sys->dev = dev; + sys->sys_base = base; +} diff --git a/drivers/media/pci/intel/ipu-wrapper.h b/drivers/media/pci/intel/ipu-wrapper.h new file mode 100644 index 0000000000000..52ca2d1593cd2 --- /dev/null +++ b/drivers/media/pci/intel/ipu-wrapper.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_WRAPPER_H +#define IPU_WRAPPER_H + +#define ISYS_SSID 1 +#define PSYS_SSID 0 + +#define ISYS_MMID 1 +#define PSYS_MMID 0 +struct device; + +void ipu_wrapper_init(int mmid, struct device *dev, void __iomem *base); + +#endif /* IPU_WRAPPER_H */ diff --git a/drivers/media/pci/intel/ipu.c b/drivers/media/pci/intel/ipu.c new file mode 100644 index 0000000000000..04293f4117481 --- /dev/null +++ b/drivers/media/pci/intel/ipu.c @@ -0,0 +1,768 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipu.h" +#include "ipu-buttress.h" +#include "ipu-platform.h" +#include "ipu-platform-buttress-regs.h" +#include "ipu-cpd.h" +#include "ipu-pdata.h" +#include "ipu-bus.h" +#include "ipu-mmu.h" +#include "ipu-platform-regs.h" +#include "ipu-platform-isys-csi2-reg.h" +#include "ipu-trace.h" + +#define IPU_PCI_BAR 0 + +static struct ipu_bus_device *ipu_mmu_init(struct pci_dev *pdev, + struct device *parent, + struct ipu_buttress_ctrl *ctrl, + void __iomem *base, + const struct ipu_hw_variants *hw, + unsigned int nr, int mmid) +{ + struct ipu_mmu_pdata *pdata = + devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + unsigned int i; + + if (!pdata) + return ERR_PTR(-ENOMEM); + + if (hw->nr_mmus > IPU_MMU_MAX_DEVICES) + return ERR_PTR(-EINVAL); + + for (i = 0; i < hw->nr_mmus; i++) { + struct ipu_mmu_hw *pdata_mmu = &pdata->mmu_hw[i]; + const struct ipu_mmu_hw *src_mmu = &hw->mmu_hw[i]; + + if (src_mmu->nr_l1streams > IPU_MMU_MAX_TLB_L1_STREAMS || + src_mmu->nr_l2streams > IPU_MMU_MAX_TLB_L2_STREAMS) + return ERR_PTR(-EINVAL); + + *pdata_mmu = *src_mmu; + pdata_mmu->base = base + src_mmu->offset; + } + + pdata->nr_mmus = hw->nr_mmus; + pdata->mmid = mmid; + + return ipu_bus_add_device(pdev, parent, pdata, NULL, ctrl, + IPU_MMU_NAME, nr); +} + +static struct ipu_bus_device *ipu_isys_init(struct pci_dev *pdev, + struct device *parent, + struct device *iommu, + void __iomem *base, + const struct ipu_isys_internal_pdata + *ipdata, + struct ipu_isys_subdev_pdata + *spdata, unsigned int nr) +{ + struct ipu_isys_pdata *pdata = + devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + + if (!pdata) + return ERR_PTR(-ENOMEM); + + pdata->base = base; + pdata->ipdata = ipdata; + pdata->spdata = spdata; + + return ipu_bus_add_device(pdev, parent, pdata, iommu, NULL, + IPU_ISYS_NAME, nr); +} + +static struct ipu_bus_device *ipu_psys_init(struct pci_dev *pdev, + struct device *parent, + struct device *iommu, + void __iomem *base, + const struct ipu_psys_internal_pdata + *ipdata, unsigned int nr) +{ + struct ipu_psys_pdata *pdata = + devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + + if (!pdata) + return ERR_PTR(-ENOMEM); + + pdata->base = base; + pdata->ipdata = ipdata; + return ipu_bus_add_device(pdev, parent, pdata, iommu, NULL, + IPU_PSYS_NAME, nr); +} + +int ipu_fw_authenticate(void *data, u64 val) +{ + struct ipu_device *isp = data; + int ret; + + if (!isp->secure_mode) + return -EINVAL; + + ret = ipu_buttress_reset_authentication(isp); + if (ret) { + dev_err(&isp->pdev->dev, "Failed to reset authentication!\n"); + return ret; + } + + return ipu_buttress_authenticate(isp); +} +EXPORT_SYMBOL(ipu_fw_authenticate); +DEFINE_SIMPLE_ATTRIBUTE(authenticate_fops, NULL, ipu_fw_authenticate, "%llu\n"); + +#ifdef CONFIG_DEBUG_FS +static int resume_ipu_bus_device(struct ipu_bus_device *adev) +{ + struct device *dev = &adev->dev; + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + if (!pm || !pm->resume) + return -EIO; + + return pm->resume(dev); +} + +static int suspend_ipu_bus_device(struct ipu_bus_device *adev) +{ + struct device *dev = &adev->dev; + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + if (!pm || !pm->suspend) + return -EIO; + + return pm->suspend(dev); +} + +static int force_suspend_get(void *data, u64 *val) +{ + struct ipu_device *isp = data; + struct ipu_buttress *b = &isp->buttress; + + *val = b->force_suspend; + return 0; +} + +static int force_suspend_set(void *data, u64 val) +{ + struct ipu_device *isp = data; + struct ipu_buttress *b = &isp->buttress; + int ret = 0; + + if (val == b->force_suspend) + return 0; + + if (val) { + b->force_suspend = 1; + ret = suspend_ipu_bus_device(isp->psys_iommu); + if (ret) { + dev_err(&isp->pdev->dev, "Failed to suspend psys\n"); + return ret; + } + ret = suspend_ipu_bus_device(isp->isys_iommu); + if (ret) { + dev_err(&isp->pdev->dev, "Failed to suspend isys\n"); + return ret; + } + ret = pci_set_power_state(isp->pdev, PCI_D3hot); + if (ret) { + dev_err(&isp->pdev->dev, + "Failed to suspend IUnit PCI device\n"); + return ret; + } + } else { + ret = pci_set_power_state(isp->pdev, PCI_D0); + if (ret) { + dev_err(&isp->pdev->dev, + "Failed to suspend IUnit PCI device\n"); + return ret; + } + ret = resume_ipu_bus_device(isp->isys_iommu); + if (ret) { + dev_err(&isp->pdev->dev, "Failed to resume isys\n"); + return ret; + } + ret = resume_ipu_bus_device(isp->psys_iommu); + if (ret) { + dev_err(&isp->pdev->dev, "Failed to resume psys\n"); + return ret; + } + b->force_suspend = 0; + } + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(force_suspend_fops, force_suspend_get, + force_suspend_set, "%llu\n"); +/* + * The sysfs interface for reloading cpd fw is there only for debug purpose, + * and it must not be used when either isys or psys is in use. + */ +static int cpd_fw_reload(void *data, u64 val) +{ + struct ipu_device *isp = data; + int rval = -EINVAL; + + if (isp->cpd_fw_reload) + rval = isp->cpd_fw_reload(isp); + if (!rval && isp->isys_fw_reload) + rval = isp->isys_fw_reload(isp); + + return rval; +} + +DEFINE_SIMPLE_ATTRIBUTE(cpd_fw_fops, NULL, cpd_fw_reload, "%llu\n"); + +#endif /* CONFIG_DEBUG_FS */ + +static int ipu_init_debugfs(struct ipu_device *isp) +{ +#ifdef CONFIG_DEBUG_FS + struct dentry *file; + struct dentry *dir; + + dir = debugfs_create_dir(pci_name(isp->pdev), NULL); + if (!dir) + return -ENOMEM; + + file = debugfs_create_file("force_suspend", 0700, dir, isp, + &force_suspend_fops); + if (!file) + goto err; + file = debugfs_create_file("authenticate", 0700, dir, isp, + &authenticate_fops); + if (!file) + goto err; + + file = debugfs_create_file("cpd_fw_reload", 0700, dir, isp, + &cpd_fw_fops); + if (!file) + goto err; + + if (ipu_trace_debugfs_add(isp, dir)) + goto err; + + isp->ipu_dir = dir; + + if (ipu_buttress_debugfs_init(isp)) + goto err; + + return 0; +err: + debugfs_remove_recursive(dir); + return -ENOMEM; +#else + return 0; +#endif /* CONFIG_DEBUG_FS */ +} + +static void ipu_remove_debugfs(struct ipu_device *isp) +{ + /* + * Since isys and psys debugfs dir will be created under ipu root dir, + * mark its dentry to NULL to avoid duplicate removal. + */ + debugfs_remove_recursive(isp->ipu_dir); + isp->ipu_dir = NULL; +} + +static int ipu_pci_config_setup(struct pci_dev *dev) +{ + u16 pci_command; + int rval = pci_enable_msi(dev); + + if (rval) { + dev_err(&dev->dev, "Failed to enable msi (%d)\n", rval); + return rval; + } + + pci_read_config_word(dev, PCI_COMMAND, &pci_command); + pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | + PCI_COMMAND_INTX_DISABLE; + pci_write_config_word(dev, PCI_COMMAND, pci_command); + + return 0; +} + +static void ipu_configure_vc_mechanism(struct ipu_device *isp) +{ + u32 val = readl(isp->base + BUTTRESS_REG_BTRS_CTRL); + + if (IPU_BTRS_ARB_STALL_MODE_VC0 == IPU_BTRS_ARB_MODE_TYPE_STALL) + val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0; + else + val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0; + + if (IPU_BTRS_ARB_STALL_MODE_VC1 == IPU_BTRS_ARB_MODE_TYPE_STALL) + val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1; + else + val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1; + + writel(val, isp->base + BUTTRESS_REG_BTRS_CTRL); +} + +int request_cpd_fw(const struct firmware **firmware_p, const char *name, + struct device *device) +{ + const struct firmware *fw; + struct firmware *tmp; + int ret; + + ret = request_firmware(&fw, name, device); + if (ret) + return ret; + + if (is_vmalloc_addr(fw->data)) { + *firmware_p = fw; + } else { + tmp = (struct firmware *)kzalloc(sizeof(struct firmware), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + tmp->size = fw->size; + tmp->data = vmalloc(fw->size); + if (!tmp->data) { + kfree(tmp); + return -ENOMEM; + } + memcpy((void *)tmp->data, fw->data, fw->size); + *firmware_p = tmp; + release_firmware(fw); + } + + return 0; +} +EXPORT_SYMBOL(request_cpd_fw); + +static int ipu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct ipu_device *isp; + phys_addr_t phys; + void __iomem *const *iomap; + void __iomem *isys_base = NULL; + void __iomem *psys_base = NULL; + struct ipu_buttress_ctrl *isys_ctrl, *psys_ctrl; + unsigned int dma_mask = IPU_DMA_MASK; + int rval; + + trace_printk("B|%d|TMWK\n", current->pid); + + isp = devm_kzalloc(&pdev->dev, sizeof(*isp), GFP_KERNEL); + if (!isp) + return -ENOMEM; + + dev_set_name(&pdev->dev, "intel-ipu"); + isp->pdev = pdev; + INIT_LIST_HEAD(&isp->devices); + + rval = pcim_enable_device(pdev); + if (rval) { + dev_err(&pdev->dev, "Failed to enable CI ISP device (%d)\n", + rval); + trace_printk("E|TMWK\n"); + return rval; + } + + dev_info(&pdev->dev, "Device 0x%x (rev: 0x%x)\n", + pdev->device, pdev->revision); + + phys = pci_resource_start(pdev, IPU_PCI_BAR); + + rval = pcim_iomap_regions(pdev, + 1 << IPU_PCI_BAR, + pci_name(pdev)); + if (rval) { + dev_err(&pdev->dev, "Failed to I/O memory remapping (%d)\n", + rval); + trace_printk("E|TMWK\n"); + return rval; + } + dev_info(&pdev->dev, "physical base address 0x%llx\n", phys); + + iomap = pcim_iomap_table(pdev); + if (!iomap) { + dev_err(&pdev->dev, "Failed to iomap table (%d)\n", rval); + trace_printk("E|TMWK\n"); + return -ENODEV; + } + + isp->base = iomap[IPU_PCI_BAR]; + dev_info(&pdev->dev, "mapped as: 0x%p\n", isp->base); + + pci_set_drvdata(pdev, isp); + pci_set_master(pdev); + + isp->cpd_fw_name = IPU_CPD_FIRMWARE_NAME; + + isys_base = isp->base + isys_ipdata.hw_variant.offset; + psys_base = isp->base + psys_ipdata.hw_variant.offset; + + rval = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_mask)); + if (!rval) + rval = pci_set_consistent_dma_mask(pdev, + DMA_BIT_MASK(dma_mask)); + if (rval) { + dev_err(&pdev->dev, "Failed to set DMA mask (%d)\n", rval); + trace_printk("E|TMWK\n"); + return rval; + } + + rval = ipu_pci_config_setup(pdev); + if (rval) { + trace_printk("E|TMWK\n"); + return rval; + } + + rval = devm_request_threaded_irq(&pdev->dev, pdev->irq, + ipu_buttress_isr, + ipu_buttress_isr_threaded, + IRQF_SHARED, IPU_NAME, isp); + if (rval) { + dev_err(&pdev->dev, "Requesting irq failed(%d)\n", rval); + trace_printk("E|TMWK\n"); + return rval; + } + + rval = ipu_buttress_init(isp); + if (rval) { + trace_printk("E|TMWK\n"); + return rval; + } + + dev_info(&pdev->dev, "cpd file name: %s\n", isp->cpd_fw_name); + + rval = request_cpd_fw(&isp->cpd_fw, isp->cpd_fw_name, &pdev->dev); + if (rval) { + dev_err(&isp->pdev->dev, "Requesting signed firmware failed\n"); + trace_printk("E|TMWK\n"); + return rval; + } + + rval = ipu_cpd_validate_cpd_file(isp, isp->cpd_fw->data, + isp->cpd_fw->size); + if (rval) { + dev_err(&isp->pdev->dev, "Failed to validate cpd\n"); + goto out_ipu_bus_del_devices; + } + + rval = ipu_trace_add(isp); + if (rval) + dev_err(&pdev->dev, "Trace support not available\n"); + + /* + * NOTE Device hierarchy below is important to ensure proper + * runtime suspend and resume order. + * Also registration order is important to ensure proper + * suspend and resume order during system + * suspend. Registration order is as follows: + * isys_iommu->isys->psys_iommu->psys + */ + isys_ctrl = devm_kzalloc(&pdev->dev, sizeof(*isys_ctrl), GFP_KERNEL); + if (!isys_ctrl) { + rval = -ENOMEM; + goto out_ipu_bus_del_devices; + } + + /* Init butress control with default values based on the HW */ + memcpy(isys_ctrl, &isys_buttress_ctrl, sizeof(*isys_ctrl)); + + isp->isys_iommu = ipu_mmu_init(pdev, &pdev->dev, isys_ctrl, + isys_base, + &isys_ipdata.hw_variant, 0, ISYS_MMID); + rval = PTR_ERR(isp->isys_iommu); + if (IS_ERR(isp->isys_iommu)) { + dev_err(&pdev->dev, "can't create isys iommu device\n"); + rval = -ENOMEM; + goto out_ipu_bus_del_devices; + } + + isp->isys = ipu_isys_init(pdev, &isp->isys_iommu->dev, + &isp->isys_iommu->dev, isys_base, + &isys_ipdata, pdev->dev.platform_data, 0); + rval = PTR_ERR(isp->isys); + if (IS_ERR(isp->isys)) + goto out_ipu_bus_del_devices; + + psys_ctrl = devm_kzalloc(&pdev->dev, sizeof(*psys_ctrl), GFP_KERNEL); + if (!psys_ctrl) { + rval = -ENOMEM; + goto out_ipu_bus_del_devices; + } + + /* Init butress control with default values based on the HW */ + memcpy(psys_ctrl, &psys_buttress_ctrl, sizeof(*psys_ctrl)); + + isp->psys_iommu = ipu_mmu_init(pdev, + isp->isys_iommu ? + &isp->isys_iommu->dev : + &pdev->dev, psys_ctrl, psys_base, + &psys_ipdata.hw_variant, 1, PSYS_MMID); + rval = PTR_ERR(isp->psys_iommu); + if (IS_ERR(isp->psys_iommu)) { + dev_err(&pdev->dev, "can't create psys iommu device\n"); + goto out_ipu_bus_del_devices; + } + + isp->psys = ipu_psys_init(pdev, &isp->psys_iommu->dev, + &isp->psys_iommu->dev, psys_base, + &psys_ipdata, 0); + rval = PTR_ERR(isp->psys); + if (IS_ERR(isp->psys)) + goto out_ipu_bus_del_devices; + + rval = ipu_init_debugfs(isp); + if (rval) { + dev_err(&pdev->dev, "Failed to initialize debugfs"); + goto out_ipu_bus_del_devices; + } + + /* Configure the arbitration mechanisms for VC requests */ + ipu_configure_vc_mechanism(isp); + + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_allow(&pdev->dev); + + dev_info(&pdev->dev, "IPU driver verion %d.%d\n", IPU_MAJOR_VERSION, + IPU_MINOR_VERSION); + + trace_printk("E|TMWK\n"); + return 0; + +out_ipu_bus_del_devices: + ipu_bus_del_devices(pdev); + ipu_buttress_exit(isp); + release_firmware(isp->cpd_fw); + + trace_printk("E|TMWK\n"); + return rval; +} + +static void ipu_pci_remove(struct pci_dev *pdev) +{ + struct ipu_device *isp = pci_get_drvdata(pdev); + + ipu_remove_debugfs(isp); + ipu_trace_release(isp); + + ipu_bus_del_devices(pdev); + + pm_runtime_forbid(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); + + pci_release_regions(pdev); + pci_disable_device(pdev); + + ipu_buttress_exit(isp); + + release_firmware(isp->cpd_fw); +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) +static void ipu_pci_reset_notify(struct pci_dev *pdev, bool prepare) +{ + struct ipu_device *isp = pci_get_drvdata(pdev); + + if (prepare) { + dev_err(&pdev->dev, "FLR prepare\n"); + pm_runtime_forbid(&isp->pdev->dev); + isp->flr_done = true; + return; + } + + ipu_buttress_restore(isp); + if (isp->secure_mode) + ipu_buttress_reset_authentication(isp); + + ipu_bus_flr_recovery(); + isp->ipc_reinit = true; + pm_runtime_allow(&isp->pdev->dev); + + dev_err(&pdev->dev, "FLR completed\n"); +} +#else +static void ipu_pci_reset_prepare(struct pci_dev *pdev) +{ + struct ipu_device *isp = pci_get_drvdata(pdev); + + dev_warn(&pdev->dev, "FLR prepare\n"); + pm_runtime_forbid(&isp->pdev->dev); + isp->flr_done = true; +} + +static void ipu_pci_reset_done(struct pci_dev *pdev) +{ + struct ipu_device *isp = pci_get_drvdata(pdev); + + ipu_buttress_restore(isp); + if (isp->secure_mode) + ipu_buttress_reset_authentication(isp); + + ipu_bus_flr_recovery(); + isp->ipc_reinit = true; + pm_runtime_allow(&isp->pdev->dev); + + dev_warn(&pdev->dev, "FLR completed\n"); +} +#endif + +#ifdef CONFIG_PM + +/* + * PCI base driver code requires driver to provide these to enable + * PCI device level PM state transitions (D0<->D3) + */ +static int ipu_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct ipu_device *isp = pci_get_drvdata(pdev); + + isp->flr_done = false; + + return 0; +} + +static int ipu_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct ipu_device *isp = pci_get_drvdata(pdev); + struct ipu_buttress *b = &isp->buttress; + int rval; + + /* Configure the arbitration mechanisms for VC requests */ + ipu_configure_vc_mechanism(isp); + + ipu_buttress_set_secure_mode(isp); + isp->secure_mode = ipu_buttress_get_secure_mode(isp); + dev_info(dev, "IPU in %s mode\n", + isp->secure_mode ? "secure" : "non-secure"); + + ipu_buttress_restore(isp); + + rval = ipu_buttress_ipc_reset(isp, &b->cse); + if (rval) + dev_err(&isp->pdev->dev, "IPC reset protocol failed!\n"); + + return 0; +} + +static int ipu_runtime_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct ipu_device *isp = pci_get_drvdata(pdev); + int rval; + + ipu_configure_vc_mechanism(isp); + ipu_buttress_restore(isp); + + if (isp->ipc_reinit) { + struct ipu_buttress *b = &isp->buttress; + + isp->ipc_reinit = false; + rval = ipu_buttress_ipc_reset(isp, &b->cse); + if (rval) + dev_err(&isp->pdev->dev, + "IPC reset protocol failed!\n"); + } + + return 0; +} + +static const struct dev_pm_ops ipu_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(&ipu_suspend, &ipu_resume) + SET_RUNTIME_PM_OPS(&ipu_suspend, /* Same as in suspend flow */ + &ipu_runtime_resume, + NULL) +}; + +#define IPU_PM (&ipu_pm_ops) +#else +#define IPU_PM NULL +#endif + +static const struct pci_device_id ipu_pci_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU_PCI_ID)}, + {0,} +}; +MODULE_DEVICE_TABLE(pci, ipu_pci_tbl); + +static const struct pci_error_handlers pci_err_handlers = { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) + .reset_notify = ipu_pci_reset_notify, +#else + .reset_prepare = ipu_pci_reset_prepare, + .reset_done = ipu_pci_reset_done, +#endif +}; + +static struct pci_driver ipu_pci_driver = { + .name = IPU_NAME, + .id_table = ipu_pci_tbl, + .probe = ipu_pci_probe, + .remove = ipu_pci_remove, + .driver = { + .pm = IPU_PM, + }, + .err_handler = &pci_err_handlers, +}; + +static int __init ipu_init(void) +{ + int rval = ipu_bus_register(); + + if (rval) { + pr_warn("can't register ipu bus (%d)\n", rval); + return rval; + } + + rval = pci_register_driver(&ipu_pci_driver); + if (rval) { + pr_warn("can't register pci driver (%d)\n", rval); + goto out_pci_register_driver; + } + + return 0; + +out_pci_register_driver: + ipu_bus_unregister(); + + return rval; +} + +static void __exit ipu_exit(void) +{ + pci_unregister_driver(&ipu_pci_driver); + ipu_bus_unregister(); +} + +module_init(ipu_init); +module_exit(ipu_exit); + +MODULE_AUTHOR("Sakari Ailus "); +MODULE_AUTHOR("Jouni Högander "); +MODULE_AUTHOR("Antti Laakso "); +MODULE_AUTHOR("Samu Onkalo "); +MODULE_AUTHOR("Jianxu Zheng "); +MODULE_AUTHOR("Tianshu Qiu "); +MODULE_AUTHOR("Renwei Wu "); +MODULE_AUTHOR("Bingbu Cao "); +MODULE_AUTHOR("Yunliang Ding "); +MODULE_AUTHOR("Zaikuo Wang "); +MODULE_AUTHOR("Leifu Zhao "); +MODULE_AUTHOR("Xia Wu "); +MODULE_AUTHOR("Kun Jiang "); +MODULE_AUTHOR("Intel"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel ipu pci driver"); diff --git a/drivers/media/pci/intel/ipu.h b/drivers/media/pci/intel/ipu.h new file mode 100644 index 0000000000000..96e9a2144133b --- /dev/null +++ b/drivers/media/pci/intel/ipu.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_H +#define IPU_H + +#include +#include +#include +#include + +#include "ipu-pdata.h" +#include "ipu-bus.h" +#include "ipu-buttress.h" +#include "ipu-trace.h" + +#if defined(CONFIG_VIDEO_INTEL_IPU4) +#define IPU_PCI_ID 0x5a88 +#elif defined(CONFIG_VIDEO_INTEL_IPU4P) +#define IPU_PCI_ID 0x8a19 +#endif + +/* + * IPU version definitions to reflect the IPU driver changes. + * Both ISYS and PSYS share the same version. + */ +#define IPU_MAJOR_VERSION 1 +#define IPU_MINOR_VERSION 0 +#define IPU_DRIVER_VERSION (IPU_MAJOR_VERSION << 16 | IPU_MINOR_VERSION) + +/* processing system frequency: 25Mhz x ratio, Legal values [8,32] */ +#define PS_FREQ_CTL_DEFAULT_RATIO 0x12 + +/* input system frequency: 1600Mhz / divisor. Legal values [2,8] */ +#define IS_FREQ_SOURCE 1600000000 +#define IS_FREQ_CTL_DIVISOR 0x4 + +/* + * ISYS DMA can overshoot. For higher resolutions over allocation is one line + * but it must be at minimum 1024 bytes. Value could be different in + * different versions / generations thus provide it via platform data. + */ +#define IPU_ISYS_OVERALLOC_MIN 1024 + +/* + * Physical pages in GDA 128 * 1K pages. + */ +#define IPU_DEVICE_GDA_NR_PAGES 128 + +/* + * Virtualization factor to calculate the available virtual pages. + */ +#if defined(CONFIG_VIDEO_INTEL_IPU4) +#define IPU_DEVICE_GDA_VIRT_FACTOR 8 +#elif defined(CONFIG_VIDEO_INTEL_IPU4P) +#define IPU_DEVICE_GDA_VIRT_FACTOR 32 +#else +#define IPU_DEVICE_GDA_VIRT_FACTOR 8 +#endif + +struct pci_dev; +struct list_head; +struct firmware; + +#define NR_OF_MMU_RESOURCES 2 + +struct ipu_device { + struct pci_dev *pdev; + struct list_head devices; + struct ipu_bus_device *isys_iommu, *isys; + struct ipu_bus_device *psys_iommu, *psys; + struct ipu_buttress buttress; + + const struct firmware *cpd_fw; + const char *cpd_fw_name; + u64 *pkg_dir; + dma_addr_t pkg_dir_dma_addr; + unsigned int pkg_dir_size; + + void __iomem *base; + void __iomem *base2; + struct dentry *ipu_dir; + struct ipu_trace *trace; + bool flr_done; + bool ipc_reinit; + bool secure_mode; + + int (*isys_fw_reload)(struct ipu_device *isp); + int (*cpd_fw_reload)(struct ipu_device *isp); +}; + +#define IPU_DMA_MASK 39 +#define IPU_LIB_CALL_TIMEOUT_MS 2000 +#define IPU_PSYS_CMD_TIMEOUT_MS 2000 +#define IPU_PSYS_OPEN_TIMEOUT_US 50 +#define IPU_PSYS_OPEN_RETRY (10000 / IPU_PSYS_OPEN_TIMEOUT_US) + +int ipu_fw_authenticate(void *data, u64 val); +void ipu_configure_spc(struct ipu_device *isp, + const struct ipu_hw_variants *hw_variant, + int pkg_dir_idx, void __iomem *base, u64 *pkg_dir, + dma_addr_t pkg_dir_dma_addr); +int request_cpd_fw(const struct firmware **firmware_p, const char *name, + struct device *device); +#endif /* IPU_H */ diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c index 29027159eced8..ca1a4d8e972ec 100644 --- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c +++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c @@ -1846,12 +1846,12 @@ static void cio2_pci_remove(struct pci_dev *pci_dev) struct cio2_device *cio2 = pci_get_drvdata(pci_dev); unsigned int i; + media_device_unregister(&cio2->media_dev); cio2_notifier_exit(cio2); - cio2_fbpt_exit_dummy(cio2); for (i = 0; i < CIO2_QUEUES; i++) cio2_queue_exit(cio2, &cio2->queue[i]); + cio2_fbpt_exit_dummy(cio2); v4l2_device_unregister(&cio2->v4l2_dev); - media_device_unregister(&cio2->media_dev); media_device_cleanup(&cio2->media_dev); mutex_destroy(&cio2->lock); } diff --git a/drivers/media/pci/intel/ipu4/Makefile b/drivers/media/pci/intel/ipu4/Makefile new file mode 100644 index 0000000000000..dae16f35e7f26 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/Makefile @@ -0,0 +1,131 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2010 - 2018, Intel Corporation. + +ifneq ($(EXTERNAL_BUILD), 1) +srcpath := $(srctree) +endif + +ifdef CONFIG_VIDEO_INTEL_IPU4 +ccflags-y += -DHAS_DUAL_CMD_CTX_SUPPORT=0 -DIPU_VC_SUPPORT -DIPU_HAS_ISA -DIPU_PSYS_LEGACY +ccflags-y += -DIPU_META_DATA_SUPPORT -DI2C_WA + +intel-ipu4-objs += ../ipu.o \ + ../ipu-bus.o \ + ../ipu-dma.o \ + ../ipu-buttress.o \ + ../ipu-trace.o \ + ../ipu-cpd.o \ + ../ipu-fw-com.o \ + ipu4.o + +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4.o + +intel-ipu4-mmu-objs += ../ipu-mmu.o +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4-mmu.o + +intel-ipu4-isys-objs += ../ipu-isys.o \ + ../ipu-isys-csi2.o \ + ipu4-isys.o \ + ipu4-isys-csi2.o \ + ../ipu-isys-csi2-be-soc.o \ + ../ipu-isys-csi2-be.o \ + ../ipu-fw-isys.o \ + ipu4-isys-isa.o \ + ../ipu-isys-video.o \ + ../ipu-isys-queue.o \ + ../ipu-isys-subdev.o \ + ../ipu-isys-tpg.o + +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4-isys.o + +intel-ipu4-psys-objs += ../ipu-psys.o \ + ipu4-psys.o \ + ipu4-resources.o \ + +ifndef CONFIG_VIDEO_INTEL_IPU_FW_LIB +intel-ipu4-psys-objs += ipu4-fw-resources.o \ + ../ipu-fw-psys.o +endif + +ifeq ($(CONFIG_COMPAT),y) +intel-ipu4-psys-objs += ../ipu-psys-compat32.o +endif + +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4-psys.o + +ifdef CONFIG_VIDEO_INTEL_IPU_FW_LIB +include $(srcpath)/$(src)/ipu4-css/Makefile.isyslib +include $(srcpath)/$(src)/ipu4-css/Makefile.psyslib +endif + +ccflags-y += -I$(srcpath)/$(src)/../../../../../include/ +ccflags-y += -I$(srcpath)/$(src)/../ +ccflags-y += -I$(srcpath)/$(src)/ +ifdef CONFIG_VIDEO_INTEL_IPU_FW_LIB +ccflags-y += -I$(srcpath)/$(src)/ipu4-css +endif + +ccflags-y += -DPARAMETER_INTERFACE_V2 +endif + +ifdef CONFIG_VIDEO_INTEL_IPU4P +ccflags-y += -DHAS_DUAL_CMD_CTX_SUPPORT=0 -DIPU_VC_SUPPORT -DIPU_PSYS_LEGACY -DIPU_HAS_ISA +ccflags-y += -DIPU_META_DATA_SUPPORT + +intel-ipu4p-objs += ../ipu.o \ + ../ipu-bus.o \ + ../ipu-dma.o \ + ../ipu-buttress.o \ + ../ipu-trace.o \ + ../ipu-cpd.o \ + ../ipu-fw-com.o \ + ipu4.o + +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4p.o + +intel-ipu4p-mmu-objs += ../ipu-mmu.o +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4p-mmu.o + +intel-ipu4p-isys-objs += ../ipu-isys.o \ + ../ipu-isys-csi2.o \ + ipu4-isys.o \ + ipu4p-isys-csi2.o \ + ../ipu-isys-csi2-be-soc.o \ + ../ipu-isys-csi2-be.o \ + ../ipu-fw-isys.o \ + ipu4-isys-isa.o \ + ../ipu-isys-video.o \ + ../ipu-isys-queue.o \ + ../ipu-isys-subdev.o \ + ../ipu-isys-tpg.o +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4p-isys.o + +intel-ipu4p-psys-objs += ../ipu-psys.o \ + ipu4-psys.o \ + ipu4-resources.o \ + +ifndef CONFIG_VIDEO_INTEL_IPU_FW_LIB +intel-ipu4p-psys-objs += ipu4-fw-resources.o \ + ../ipu-fw-psys.o +endif + +ifeq ($(CONFIG_COMPAT),y) +intel-ipu4p-psys-objs += ../ipu-psys-compat32.o +endif + +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4p-psys.o + +ifdef CONFIG_VIDEO_INTEL_IPU_FW_LIB +include $(srcpath)/$(src)/ipu4p-css/Makefile.isyslib +include $(srcpath)/$(src)/ipu4p-css/Makefile.psyslib +endif + +ccflags-y += -I$(srcpath)/$(src)/../../../../../include/ +ccflags-y += -I$(srcpath)/$(src)/../ +ccflags-y += -I$(srcpath)/$(src)/ +ifdef CONFIG_VIDEO_INTEL_IPU_FW_LIB +ccflags-y += -I$(srcpath)/$(src)/ipu4p-css +endif + +ccflags-y += -DPARAMETER_INTERFACE_V2 +endif diff --git a/drivers/media/pci/intel/ipu4/ipu-platform-buttress-regs.h b/drivers/media/pci/intel/ipu4/ipu-platform-buttress-regs.h new file mode 100644 index 0000000000000..ffd770c881987 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu-platform-buttress-regs.h @@ -0,0 +1,282 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2014 - 2018 Intel Corporation */ + +#ifndef IPU_PLATFORM_BUTTRESS_REGS_H +#define IPU_PLATFORM_BUTTRESS_REGS_H + +#ifdef CONFIG_VIDEO_INTEL_IPU4P +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_SHIFT 20 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_MASK (0x1f << 20) +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_IDLE 0x0 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_IS_RDY 0xc + +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_SHIFT 25 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_MASK (0x1f << 25) +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_IDLE 0x0 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_PS_PWR_UP 0x10 + +#define BUTTRESS_REG_CSI_BSCAN_EXCLUDE 0x100d8 +#define CPHY0_DLL_OVRD_OFFSET 0x10100 +#define CPHY0_RX_CONTROL1_OFFSET 0x10110 +#define DPHY0_DLL_OVRD_OFFSET 0x1014c +#define DPHY0_RX_CNTRL_OFFSET 0x10158 +#define BB0_AFE_CONFIG_OFFSET 0x10174 + +#define BUTTRESS_REG_IS_FREQ_CTL_RATIO_SHIFT 1 +#define BUTTRESS_REG_PS_FREQ_CTL_OVRD_SHIFT 7 +#define BUTTRESS_REG_PS_FREQ_CTL_RATIO_SHIFT 8 + +#define BUTTRESS_REG_CPHYX_DLL_OVRD(x) \ + (CPHY0_DLL_OVRD_OFFSET + (x >> 1) * 0x100) +#define BUTTRESS_REG_CPHYX_RX_CONTROL1(x) \ + (CPHY0_RX_CONTROL1_OFFSET + (x >> 1) * 0x100) +#define BUTTRESS_REG_DPHYX_DLL_OVRD(x) \ + (DPHY0_DLL_OVRD_OFFSET + (x >> 1) * 0x100) +#define BUTTRESS_REG_DPHYX_RX_CNTRL(x) \ + (DPHY0_RX_CNTRL_OFFSET + (x >> 1) * 0x100) +#define BUTTRESS_REG_BBX_AFE_CONFIG(x) \ + (BB0_AFE_CONFIG_OFFSET + (x >> 1) * 0x100) +#endif /* CONFIG_VIDEO_INTEL_IPU4P */ + +#ifdef CONFIG_VIDEO_INTEL_IPU4 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_SHIFT 20 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_MASK (0xf << 20) +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_IDLE 0x0 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_IS_RDY 0xa + +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_SHIFT 24 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_MASK (0x1f << 24) +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_IDLE 0x0 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_PS_PWR_UP 0xf +#endif /* CONFIG_VIDEO_INTEL_IPU4 */ + +#define BUTTRESS_REG_WDT 0x8 +#define BUTTRESS_REG_BTRS_CTRL 0xc +#define BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0 BIT(0) +#define BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1 BIT(1) + +#define BUTTRESS_REG_FW_RESET_CTL 0x30 +#define BUTTRESS_FW_RESET_CTL_START_SHIFT 0 +#define BUTTRESS_FW_RESET_CTL_DONE_SHIFT 1 + +#define BUTTRESS_REG_IS_FREQ_CTL 0x34 +#define BUTTRESS_IS_FREQ_CTL_DIVISOR_MASK 0xf + +#define BUTTRESS_REG_PS_FREQ_CTL 0x38 +#define BUTTRESS_PS_FREQ_CTL_RATIO_MASK 0xff + +#define BUTTRESS_FREQ_CTL_START_SHIFT 31 +#define BUTTRESS_FREQ_CTL_QOS_FLOOR_SHIFT 8 +#define BUTTRESS_FREQ_CTL_QOS_FLOOR_MASK (0xff << 8) + +#define BUTTRESS_REG_PWR_STATE 0x5c +#define BUTTRESS_PWR_STATE_IS_PWR_SHIFT 4 +#define BUTTRESS_PWR_STATE_IS_PWR_MASK (0x7 << 4) + +#define BUTTRESS_PWR_STATE_PS_PWR_SHIFT 8 +#define BUTTRESS_PWR_STATE_PS_PWR_MASK (0x7 << 8) + +#define BUTTRESS_PWR_STATE_RESET 0x0 +#define BUTTRESS_PWR_STATE_PWR_ON_DONE 0x1 +#define BUTTRESS_PWR_STATE_PWR_RDY 0x3 +#define BUTTRESS_PWR_STATE_PWR_IDLE 0x4 + +#define BUTTRESS_PWR_STATE_HH_STATUS_SHIFT 12 +#define BUTTRESS_PWR_STATE_HH_STATUS_MASK (0x3 << 12) + +enum { + BUTTRESS_PWR_STATE_HH_STATE_IDLE, + BUTTRESS_PWR_STATE_HH_STATE_IN_PRGS, + BUTTRESS_PWR_STATE_HH_STATE_DONE, + BUTTRESS_PWR_STATE_HH_STATE_ERR, +}; + +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PLL_CMP 0x1 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_CLKACK 0x2 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PG_ACK 0x3 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_ASSRT_CYCLES 0x4 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_STOP_CLK_CYCLES1 0x5 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_STOP_CLK_CYCLES2 0x6 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_DEASSRT_CYCLES 0x7 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_FUSE_WR_CMP 0x8 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_BRK_POINT 0x9 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_HALT_HALTED 0xb +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_DURATION_CNT3 0xc +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_CLKACK_PD 0xd +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_PD_BRK_POINT 0xe +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PD_PG_ACK0 0xf +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_PLL_IP_RDY 0x1 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_RO_PRE_CNT_EXH 0x2 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_VGI_PWRGOOD 0x3 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_RO_POST_CNT_EXH 0x4 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WR_PLL_RATIO 0x5 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_PLL_CMP 0x6 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_CLKACK 0x7 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RST_ASSRT_CYCLES 0x8 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_STOP_CLK_CYCLES1 0x9 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_STOP_CLK_CYCLES2 0xa +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RST_DEASSRT_CYCLES 0xb +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_PU_BRK_PNT 0xc +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_FUSE_ACCPT 0xd +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_4_HALTED 0x10 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RESET_CNT3 0x11 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PD_CLKACK 0x12 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PD_OFF_IND 0x13 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_PH4 0x14 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_PLL_CMP 0x15 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_CLKACK 0x16 + +#define BUTTRESS_REG_SECURITY_CTL 0x300 + +#define BUTTRESS_SECURITY_CTL_FW_SECURE_MODE_SHIFT 16 +#define BUTTRESS_SECURITY_CTL_FW_SETUP_SHIFT 0 +#define BUTTRESS_SECURITY_CTL_FW_SETUP_MASK 0x1f + +#define BUTTRESS_SECURITY_CTL_FW_SETUP_DONE 0x1 +#define BUTTRESS_SECURITY_CTL_AUTH_DONE 0x2 +#define BUTTRESS_SECURITY_CTL_AUTH_FAILED 0x8 + +#define BUTTRESS_REG_SENSOR_FREQ_CTL 0x16c + +#define BUTTRESS_SENSOR_FREQ_CTL_OSC_OUT_FREQ_DEFAULT(i) \ + (0x1b << ((i) * 10)) +#define BUTTRESS_SENSOR_FREQ_CTL_OSC_OUT_FREQ_SHIFT(i) ((i) * 10) +#define BUTTRESS_SENSOR_FREQ_CTL_OSC_OUT_FREQ_MASK(i) \ + (0x1ff << ((i) * 10)) + +#define BUTTRESS_SENSOR_CLK_FREQ_6P75MHZ 0x176 +#define BUTTRESS_SENSOR_CLK_FREQ_8MHZ 0x164 +#define BUTTRESS_SENSOR_CLK_FREQ_9P6MHZ 0x2 +#define BUTTRESS_SENSOR_CLK_FREQ_12MHZ 0x1b2 +#define BUTTRESS_SENSOR_CLK_FREQ_13P6MHZ 0x1ac +#define BUTTRESS_SENSOR_CLK_FREQ_14P4MHZ 0x1cc +#define BUTTRESS_SENSOR_CLK_FREQ_15P8MHZ 0x1a6 +#define BUTTRESS_SENSOR_CLK_FREQ_16P2MHZ 0xca +#define BUTTRESS_SENSOR_CLK_FREQ_17P3MHZ 0x12e +#define BUTTRESS_SENSOR_CLK_FREQ_18P6MHZ 0x1c0 +#define BUTTRESS_SENSOR_CLK_FREQ_19P2MHZ 0x0 +#define BUTTRESS_SENSOR_CLK_FREQ_24MHZ 0xb2 +#define BUTTRESS_SENSOR_CLK_FREQ_26MHZ 0xae +#define BUTTRESS_SENSOR_CLK_FREQ_27MHZ 0x196 + +#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_FB_RATIO_MASK 0xff +#define BUTTRESS_SENSOR_FREQ_CTL_SEL_MIPICLK_A_SHIFT 8 +#define BUTTRESS_SENSOR_FREQ_CTL_SEL_MIPICLK_A_MASK (0x2 << 8) +#define BUTTRESS_SENSOR_FREQ_CTL_SEL_MIPICLK_C_SHIFT 10 +#define BUTTRESS_SENSOR_FREQ_CTL_SEL_MIPICLK_C_MASK (0x2 << 10) +#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_FORCE_OFF_SHIFT 12 +#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_REF_RATIO_SHIFT 14 +#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_REF_RATIO_MASK (0x2 << 14) +#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_PVD_RATIO_SHIFT 16 +#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_PVD_RATIO_MASK (0x2 << 16) +#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_OUTPUT_RATIO_SHIFT 18 +#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_OUTPUT_RATIO_MASK (0x2 << 18) +#define BUTTRESS_SENSOR_FREQ_CTL_START_SHIFT 31 + +#define BUTTRESS_REG_SENSOR_CLK_CTL 0x170 + +/* 0 <= i <= 2 */ +#define BUTTRESS_SENSOR_CLK_CTL_OSC_CLK_OUT_EN_SHIFT(i) ((i) * 2) +#define BUTTRESS_SENSOR_CLK_CTL_OSC_CLK_OUT_SEL_SHIFT(i) ((i) * 2 + 1) + +#define BUTTRESS_REG_FW_SOURCE_BASE_LO 0x78 +#define BUTTRESS_REG_FW_SOURCE_BASE_HI 0x7C +#define BUTTRESS_REG_FW_SOURCE_SIZE 0x80 + +#define BUTTRESS_REG_ISR_STATUS 0x90 +#define BUTTRESS_REG_ISR_ENABLED_STATUS 0x94 +#define BUTTRESS_REG_ISR_ENABLE 0x98 +#define BUTTRESS_REG_ISR_CLEAR 0x9C + +#define BUTTRESS_ISR_IS_IRQ BIT(0) +#define BUTTRESS_ISR_PS_IRQ BIT(1) +#define BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE BIT(2) +#define BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH BIT(3) +#define BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING BIT(4) +#define BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING BIT(5) +#define BUTTRESS_ISR_CSE_CSR_SET BIT(6) +#define BUTTRESS_ISR_ISH_CSR_SET BIT(7) +#define BUTTRESS_ISR_SPURIOUS_CMP BIT(8) +#define BUTTRESS_ISR_WATCHDOG_EXPIRED BIT(9) +#define BUTTRESS_ISR_PUNIT_2_IUNIT_IRQ BIT(10) +#define BUTTRESS_ISR_SAI_VIOLATION BIT(11) +#define BUTTRESS_ISR_HW_ASSERTION BIT(12) + +#define BUTTRESS_REG_IU2CSEDB0 0x100 + +#define BUTTRESS_IU2CSEDB0_BUSY_SHIFT 31 +#define BUTTRESS_IU2CSEDB0_SHORT_FORMAT_SHIFT 27 +#define BUTTRESS_IU2CSEDB0_CLIENT_ID_SHIFT 10 +#define BUTTRESS_IU2CSEDB0_IPC_CLIENT_ID_VAL 2 + +#define BUTTRESS_REG_IU2CSEDATA0 0x104 + +#define BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD 1 +#define BUTTRESS_IU2CSEDATA0_IPC_AUTHENTICATE_RUN 2 +#define BUTTRESS_IU2CSEDATA0_IPC_AUTHENTICATE_REPLACE 3 +#define BUTTRESS_IU2CSEDATA0_IPC_UPDATE_SECURE_TOUCH 16 + +#define BUTTRESS_REG_IU2CSECSR 0x108 + +#define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1 BIT(0) +#define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2 BIT(1) +#define BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE BIT(2) +#define BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ BIT(3) +#define BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID BIT(4) +#define BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ BIT(5) + +#define BUTTRESS_REG_CSE2IUDB0 0x304 +#define BUTTRESS_REG_CSE2IUCSR 0x30C +#define BUTTRESS_REG_CSE2IUDATA0 0x308 + +/* 0x20 == NACK, 0xf == unknown command */ +#define BUTTRESS_CSE2IUDATA0_IPC_NACK 0xf20 +#define BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK 0xffff + +#define BUTTRESS_REG_ISH2IUCSR 0x50 +#define BUTTRESS_REG_ISH2IUDB0 0x54 +#define BUTTRESS_REG_ISH2IUDATA0 0x58 + +#define BUTTRESS_REG_IU2ISHDB0 0x10C +#define BUTTRESS_REG_IU2ISHDATA0 0x110 +#define BUTTRESS_REG_IU2ISHDATA1 0x114 +#define BUTTRESS_REG_IU2ISHCSR 0x118 + +#define BUTTRESS_REG_ISH_START_DETECT 0x198 +#define BUTTRESS_REG_ISH_START_DETECT_MASK 0x19C + +#define BUTTRESS_REG_FABRIC_CMD 0x88 + +#define BUTTRESS_FABRIC_CMD_START_TSC_SYNC BIT(0) +#define BUTTRESS_FABRIC_CMD_IS_DRAIN BIT(4) + +#define BUTTRESS_REG_TSW_CTL 0x120 +#define BUTTRESS_TSW_CTL_SOFT_RESET BIT(8) + +#define BUTTRESS_REG_TSC_LO 0x164 +#define BUTTRESS_REG_TSC_HI 0x168 + +#define BUTTRESS_REG_CSI2_PORT_CONFIG_AB 0x200 +#define BUTTRESS_CSI2_PORT_CONFIG_AB_MUX_MASK 0x1f +#define BUTTRESS_CSI2_PORT_CONFIG_AB_COMBO_SHIFT_B0 16 + +#define BUTTRESS_REG_PS_FREQ_CAPABILITIES 0xf7498 + +#define BUTTRESS_PS_FREQ_CAPABILITIES_LAST_RESOLVED_RATIO_SHIFT 24 +#define BUTTRESS_PS_FREQ_CAPABILITIES_LAST_RESOLVED_RATIO_MASK (0xff << 24) +#define BUTTRESS_PS_FREQ_CAPABILITIES_MAX_RATIO_SHIFT 16 +#define BUTTRESS_PS_FREQ_CAPABILITIES_MAX_RATIO_MASK (0xff << 16) +#define BUTTRESS_PS_FREQ_CAPABILITIES_EFFICIENT_RATIO_SHIFT 8 +#define BUTTRESS_PS_FREQ_CAPABILITIES_EFFICIENT_RATIO_MASK (0xff << 8) +#define BUTTRESS_PS_FREQ_CAPABILITIES_MIN_RATIO_SHIFT 0 +#define BUTTRESS_PS_FREQ_CAPABILITIES_MIN_RATIO_MASK (0xff) + +#define BUTTRESS_IRQS (BUTTRESS_ISR_SAI_VIOLATION | \ + BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING | \ + BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING | \ + BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE | \ + BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH | \ + BUTTRESS_ISR_IS_IRQ | \ + BUTTRESS_ISR_PS_IRQ) + +#endif /* IPU_BUTTRESS_REGS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu-platform-isys-csi2-reg.h b/drivers/media/pci/intel/ipu4/ipu-platform-isys-csi2-reg.h new file mode 100644 index 0000000000000..efdf287e38f61 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu-platform-isys-csi2-reg.h @@ -0,0 +1,222 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_PLATFORM_ISYS_CSI2_REG_H +#define IPU_PLATFORM_ISYS_CSI2_REG_H + +#ifdef CONFIG_VIDEO_INTEL_IPU4P +/* CSI RX CPHY regs */ +#define CSI2_REG_CSI_RX_CPHY_NOF_ENABLED_LANES 0x04 +#define CSI2_REG_CSI_RX_CPHY_HBP_TESTMODE 0x08 +#define CSI2_REG_CSI_RX_CPHY_PH_CRC_CFG 0x0C +#define CSI2_REG_CSI_RX_CPHY_ERR_HANDLING 0x10 +#define CSI2_REG_CSI_RX_CPHY_PORTCFG_CTL 0x14 +#define CSI2_REG_CSI_RX_CPHY_PORTCFG_TIMEOUT_CNTR 0x18 +#define CSI2_REG_CSI_RX_CPHY_SYNC_CNTR_SEL 0x1C +#define CSI2_REG_CSI_RX_CPHY_STATS 0x20 + +#define CSI2_REG_CSI2PART_IRQ_EDGE 0xB00 +#define CSI2_REG_CSI2PART_IRQ_MASK 0xB04 +#define CSI2_REG_CSI2PART_IRQ_STATUS 0xB08 +#define CSI2_REG_CSI2PART_IRQ_CLEAR 0xB0c +#define CSI2_REG_CSI2PART_IRQ_ENABLE 0xB10 +#define CSI2_REG_CSI2PART_IRQ_LEVEL_NOT_PULSE 0xB14 +#define CSI2_CSI2PART_IRQ_CSIRX 0x10000 +#define CSI2_CSI2PART_IRQ_CSI2S2M 0x20000 + +#define CSI2_REG_CSIRX_IRQ_EDGE 0xC00 +#define CSI2_REG_CSIRX_IRQ_MASK 0xC04 +#define CSI2_REG_CSIRX_IRQ_STATUS 0xC08 +#define CSI2_REG_CSIRX_IRQ_CLEAR 0xC0c +#define CSI2_REG_CSIRX_IRQ_ENABLE 0xC10 +#define CSI2_REG_CSIRX_IRQ_LEVEL_NOT_PULSE 0xC14 +#define CSI2_CSIRX_HEADER_SINGLE_ERROR_CORRECTED BIT(0) +#define CSI2_CSIRX_HEADER_MULTIPLE_ERRORS_CORRECTED BIT(1) +#define CSI2_CSIRX_PAYLOAD_CRC_ERROR BIT(2) +#define CSI2_CSIRX_FIFO_OVERFLOW BIT(3) +#define CSI2_CSIRX_RESERVED_SHORT_PACKET_DATA_TYPE BIT(4) +#define CSI2_CSIRX_RESERVED_LONG_PACKET_DATA_TYPE BIT(5) +#define CSI2_CSIRX_INCOMPLETE_LONG_PACKET BIT(6) +#define CSI2_CSIRX_FRAME_SYNC_ERROR BIT(7) +#define CSI2_CSIRX_LINE_SYNC_ERROR BIT(8) +#define CSI2_CSIRX_DPHY_RECOVERABLE_SYNC_ERROR BIT(9) +#define CSI2_CSIRX_DPHY_NONRECOVERABLE_SYNC_ERROR BIT(10) +#define CSI2_CSIRX_ESCAPE_MODE_ERROR BIT(11) +#define CSI2_CSIRX_ESCAPE_MODE_TRIGGER_EVENT BIT(12) +#define CSI2_CSIRX_ESCAPE_MODE_ULTRALOW_POWER_DATA BIT(13) +#define CSI2_CSIRX_ESCAPE_MODE_ULTRALOW_POWER_EXIT_CLK BIT(14) +#define CSI2_CSIRX_INTER_FRAME_SHORT_PACKET_DISCARDED BIT(15) +#define CSI2_CSIRX_INTER_FRAME_LONG_PACKET_DISCARDED BIT(16) +#define CSI2_CSIRX_NUM_ERRORS 17 + +#define CSI2_REG_CSI2S2M_IRQ_EDGE 0xD00 +#define CSI2_REG_CSI2S2M_IRQ_MASK 0xD04 +#define CSI2_REG_CSI2S2M_IRQ_STATUS 0xD08 +#define CSI2_REG_CSI2S2M_IRQ_CLEAR 0xD0c +#define CSI2_REG_CSI2S2M_IRQ_ENABLE 0xD10 +#define CSI2_REG_CSI2S2M_IRQ_LEVEL_NOT_PULSE 0xD14 + +#ifdef IPU_VC_SUPPORT +#define CSI2_IRQ_FS_VC(chn) (0x10000 << ((chn) * 4)) +#define CSI2_IRQ_FE_VC(chn) (0x20000 << ((chn) * 4)) +#define CSI2_IRQ_LS_VC(chn) (0x40000 << ((chn) * 4)) +#define CSI2_IRQ_LE_VC(chn) (0x80000 << ((chn) * 4)) +#else +#define CSI2_IRQ_FS_VC 0x10000 +#define CSI2_IRQ_FE_VC 0x20000 +#define CSI2_IRQ_LS_VC 0x40000 +#define CSI2_IRQ_LE_VC 0x80000 +#endif /* IPU_VC_SUPPORT */ +#define CSI2_REG_CL0_IBUFCTL_EN_FLUSH_FOR_IDRAIN 0x6002c +#define CSI2_REG_CL1_IBUFCTL_EN_FLUSH_FOR_IDRAIN 0x6802c +#define IPU_REG_ISYS_IBUFCTL_EN_FLUSH_FOR_IDRAIN 0xb602c +#endif /* CONFIG_VIDEO_INTEL_IPU4P */ + +#ifdef CONFIG_VIDEO_INTEL_IPU4 +/* IRQ-related registers specific to each of the four CSI receivers */ +#define CSI2_REG_CSI2PART_IRQ_EDGE 0x400 +#define CSI2_REG_CSI2PART_IRQ_MASK 0x404 +#define CSI2_REG_CSI2PART_IRQ_STATUS 0x408 +#define CSI2_REG_CSI2PART_IRQ_CLEAR 0x40c +#define CSI2_REG_CSI2PART_IRQ_ENABLE 0x410 +#define CSI2_REG_CSI2PART_IRQ_LEVEL_NOT_PULSE 0x414 +#define CSI2_CSI2PART_IRQ_CSIRX 0x10000 +#define CSI2_CSI2PART_IRQ_CSI2S2M 0x20000 + +#define CSI2_REG_CSIRX_IRQ_EDGE 0x500 +#define CSI2_REG_CSIRX_IRQ_MASK 0x504 +#define CSI2_REG_CSIRX_IRQ_STATUS 0x508 +#define CSI2_REG_CSIRX_IRQ_CLEAR 0x50c +#define CSI2_REG_CSIRX_IRQ_ENABLE 0x510 +#define CSI2_REG_CSIRX_IRQ_LEVEL_NOT_PULSE 0x514 +#define CSI2_CSIRX_HEADER_SINGLE_ERROR_CORRECTED BIT(0) +#define CSI2_CSIRX_HEADER_MULTIPLE_ERRORS_CORRECTED BIT(1) +#define CSI2_CSIRX_PAYLOAD_CRC_ERROR BIT(2) +#define CSI2_CSIRX_FIFO_OVERFLOW BIT(3) +#define CSI2_CSIRX_RESERVED_SHORT_PACKET_DATA_TYPE BIT(4) +#define CSI2_CSIRX_RESERVED_LONG_PACKET_DATA_TYPE BIT(5) +#define CSI2_CSIRX_INCOMPLETE_LONG_PACKET BIT(6) +#define CSI2_CSIRX_FRAME_SYNC_ERROR BIT(7) +#define CSI2_CSIRX_LINE_SYNC_ERROR BIT(8) +#define CSI2_CSIRX_DPHY_RECOVERABLE_SYNC_ERROR BIT(9) +#define CSI2_CSIRX_DPHY_NONRECOVERABLE_SYNC_ERROR BIT(10) +#define CSI2_CSIRX_ESCAPE_MODE_ERROR BIT(11) +#define CSI2_CSIRX_ESCAPE_MODE_TRIGGER_EVENT BIT(12) +#define CSI2_CSIRX_ESCAPE_MODE_ULTRALOW_POWER_DATA BIT(13) +#define CSI2_CSIRX_ESCAPE_MODE_ULTRALOW_POWER_EXIT_CLK BIT(14) +#define CSI2_CSIRX_INTER_FRAME_SHORT_PACKET_DISCARDED BIT(15) +#define CSI2_CSIRX_INTER_FRAME_LONG_PACKET_DISCARDED BIT(16) +#define CSI2_CSIRX_NUM_ERRORS 17 + +#define CSI2_REG_CSI2S2M_IRQ_EDGE 0x600 +#define CSI2_REG_CSI2S2M_IRQ_MASK 0x604 +#define CSI2_REG_CSI2S2M_IRQ_STATUS 0x608 +#define CSI2_REG_CSI2S2M_IRQ_CLEAR 0x60c +#define CSI2_REG_CSI2S2M_IRQ_ENABLE 0x610 +#define CSI2_REG_CSI2S2M_IRQ_LEVEL_NOT_PULSE 0x614 + +#ifdef IPU_VC_SUPPORT +#define CSI2_IRQ_FS_VC(chn) (1 << ((chn) * 4)) +#define CSI2_IRQ_FE_VC(chn) (2 << ((chn) * 4)) +#define CSI2_IRQ_LS_VC(chn) (4 << ((chn) * 4)) +#define CSI2_IRQ_LE_VC(chn) (8 << ((chn) * 4)) +#else +#define CSI2_IRQ_FS_VC 1 +#define CSI2_IRQ_FE_VC 2 +#define CSI2_IRQ_LS_VC 4 +#define CSI2_IRQ_LE_VC 8 +#endif /* IPU_VC_SUPPORT */ +#endif /* CONFIG_VIDEO_INTEL_IPU4 */ + +#define CSI2_REG_CSI_RX_ENABLE 0x00 +#define CSI2_CSI_RX_ENABLE_ENABLE 0x01 +/* Enabled lanes - 1 */ +#define CSI2_REG_CSI_RX_NOF_ENABLED_LANES 0x04 +#define CSI2_REG_CSI_RX_CONFIG 0x08 +#define CSI2_CSI_RX_CONFIG_RELEASE_LP11 0x1 +#define CSI2_CSI_RX_CONFIG_DISABLE_BYTE_CLK_GATING 0x2 +#define CSI2_CSI_RX_CONFIG_SKEWCAL_ENABLE 0x4 +#define CSI2_REG_CSI_RX_HBP_TESTMODE_ENABLE 0x0c +#define CSI2_REG_CSI_RX_ERROR_HANDLING 0x10 +#define CSI2_REG_CSI_RX_SYNC_COUNTER_SEL 0x14 +#define CSI2_RX_SYNC_COUNTER_INTERNAL 0 +#define CSI2_RX_SYNC_COUNTER_EXTERNAL 3 +#define CSI2_REG_CSI_RX_SP_IF_CONFIG 0x18 +#define CSI2_REG_CSI_RX_LP_IF_CONFIG 0x1C +#define CSI2_REG_CSI_RX_STATUS 0x20 +#define CSI2_CSI_RX_STATUS_BUSY 0x01 +#define CSI2_REG_CSI_RX_STATUS_DLANE_HS 0x24 +#define CSI2_REG_CSI_RX_STATUS_DLANE_LP 0x28 +#define CSI2_REG_CSI_RX_DLY_CNT_TERMEN_CLANE 0x2c +#define CSI2_REG_CSI_RX_DLY_CNT_SETTLE_CLANE 0x30 +/* 0..3 */ +#define CSI2_REG_CSI_RX_DLY_CNT_TERMEN_DLANE(n) (0x34 + (n) * 8) +#define CSI2_REG_CSI_RX_DLY_CNT_SETTLE_DLANE(n) (0x38 + (n) * 8) + +/*General purposer registers, offset to gpreg base*/ +#define CSI2_REG_CSI_GPREG_SOFT_RESET 0 +#define CSI2_REG_CSI_GPREG_SOFT_RESET_SLV 0x4 +#define CSI2_REG_CSI_GPREG_HPLL_FREQ 0x8 +#define CSI2_REG_CSI_GPREG_ISCLK_RATIO 0xc +#define CSI2_REG_CSI_GPREG_HPLL_FREQ_ISCLK_RATIO_OVERRIDE 0x10 +#define CSI2_REG_CSI_GPREG_CR_PORT_CONFIG 0x14 +#define CSI2_REG_CSI_GPREG_RCOMP_TIMER_DISABLE 0x18 +#define CSI2_REG_CSI_GPREG_RCOMP_TIMER_VALUE 0x1c + +/* + * Following is the list of relevant registers and + * their offset within the legacy PHY endpoint. Accessible only via + * sideband bus. + * Register naming is a bit misleading. DPHY / CPHY / LANE0 / LANE1 + * all are required for DPHY configurations. + * Registers are accessible only via sideband bus. + */ + +/* Legacy receiver block */ +#define CSI2_SB_CSI_RCOMP_CONTROL_LEGACY 0xb8 +#define CSI2_SB_CSI_RCOMP_CONTROL_LEGACY_OVR_ENABLE_PORT4_SHIFT 9 +#define CSI2_SB_CSI_RCOMP_CONTROL_LEGACY_OVR_ENABLE_PORT3_SHIFT 8 +#define CSI2_SB_CSI_RCOMP_CONTROL_LEGACY_OVR_ENABLE_PORT2_SHIFT 7 +#define CSI2_SB_CSI_RCOMP_CONTROL_LEGACY_OVR_ENABLE_PORT1_SHIFT 6 +#define CSI2_SB_CSI_RCOMP_CONTROL_LEGACY_OVR_CODE_SHIFT 1 +#define CSI2_SB_CSI_RCOMP_CONTROL_LEGACY_OVR_ENABLE_SHIFT 0 + +/* Combo receiver block */ +#define CSI2_SB_CSI_RCOMP_CONTROL_COMBO 0x08 +#define CSI2_SB_CSI_RCOMP_UPDATE_MODE_SHIFT 15 +#define CSI2_SB_CSI_RCOMP_OVR_ENABLE_SHIFT 6 +#define CSI2_SB_CSI_RCOMP_OVR_CODE_SHIFT 1 + +#define CSI2_SB_CPHY0_DLL_OVRD 0x18 +#define CSI2_SB_CPHY0_DLL_OVRD_CRCDC_FSM_DLANE0_SHIFT 1 +#define CSI2_SB_CPHY0_DLL_OVRD_LDEN_CRCDC_FSM_DLANE0 BIT(0) +#define CSI2_SB_CPHY2_DLL_OVRD 0x60 +#define CSI2_SB_CPHY2_DLL_OVRD_CRCDC_FSM_DLANE1_SHIFT 1 +#define CSI2_SB_CPHY2_DLL_OVRD_LDEN_CRCDC_FSM_DLANE1 BIT(0) + +#define CSI2_SB_CPHY0_RX_CONTROL1 0x28 +#define CSI2_SB_CPHY0_RX_CONTROL1_EQ_LANE0_SHIFT 27 +#define CSI2_SB_CPHY2_RX_CONTROL1 0x68 +#define CSI2_SB_CPHY2_RX_CONTROL1_EQ_LANE1_SHIFT 27 + +#define CSI2_SB_DPHY0_DLL_OVRD 0xA4 +#define CSI2_SB_DPHY0_DLL_OVRD_LDEN_DRC_FSM_SHIFT 0 +#define CSI2_SB_DPHY0_DLL_OVRD_DRC_FSM_OVRD_SHIFT 1 +#define CSI2_SB_DPHY1_DLL_OVRD 0xD0 +#define CSI2_SB_DPHY1_DLL_OVRD_LDEN_DRC_FSM_SHIFT 0 +#define CSI2_SB_DPHY1_DLL_OVRD_DRC_FSM_OVRD_SHIFT 1 + +#define CSI2_SB_DPHY0_RX_CNTRL 0xB0 +#define CSI2_SB_DPHY0_RX_CNTRL_SKEWCAL_CR_SEL_DLANE3_SHIFT 28 +#define CSI2_SB_DPHY0_RX_CNTRL_SKEWCAL_CR_SEL_DLANE2_SHIFT 26 +#define CSI2_SB_DPHY0_RX_CNTRL_SKEWCAL_CR_SEL_DLANE1_SHIFT 24 +#define CSI2_SB_DPHY0_RX_CNTRL_SKEWCAL_CR_SEL_DLANE0_SHIFT 22 +#define CSI2_SB_DPHY0_RX_CNTRL_SKEWCAL_CR_SEL_DLANE23_MASK \ + ((1 << CSI2_SB_DPHY0_RX_CNTRL_SKEWCAL_CR_SEL_DLANE3_SHIFT) | \ + (1 << CSI2_SB_DPHY0_RX_CNTRL_SKEWCAL_CR_SEL_DLANE2_SHIFT)) + +#define CSI2_SB_DPHY0_RX_CNTRL_SKEWCAL_CR_SEL_DLANE01_MASK \ + ((1 << CSI2_SB_DPHY0_RX_CNTRL_SKEWCAL_CR_SEL_DLANE1_SHIFT) | \ + (1 << CSI2_SB_DPHY0_RX_CNTRL_SKEWCAL_CR_SEL_DLANE0_SHIFT)) + +#endif /* IPU_ISYS_CSI2_REG_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu-platform-isys.h b/drivers/media/pci/intel/ipu4/ipu-platform-isys.h new file mode 100644 index 0000000000000..dfd3799972e6b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu-platform-isys.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018 Intel Corporation */ + +#ifndef IPU_PLATFORM_ISYS_H +#define IPU_PLATFORM_ISYS_H + +#include "ipu4-isys-isa.h" + +#define IPU_ISYS_ENTITY_PREFIX "Intel IPU4" + +/* + * FW support max 8 streams + */ +#define IPU_ISYS_MAX_STREAMS 8 +#ifdef IPU_VC_SUPPORT + +#define NR_OF_CSI2_BE_SOC_STREAMS 8 +#define NR_OF_CSI2_VC 4 +#endif + +#endif diff --git a/drivers/media/pci/intel/ipu4/ipu-platform-psys.h b/drivers/media/pci/intel/ipu4/ipu-platform-psys.h new file mode 100644 index 0000000000000..7826727f377aa --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu-platform-psys.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018 Intel Corporation */ + +#ifndef IPU_PLATFORM_PSYS_H +#define IPU_PLATFORM_PSYS_H + +#include + +struct ipu_psys_fh; +struct ipu_psys_kcmd; + +struct ipu_psys_scheduler { + struct list_head kcmds[IPU_PSYS_CMD_PRIORITY_NUM]; + struct ipu_psys_kcmd + *new_kcmd_tail[IPU_PSYS_CMD_PRIORITY_NUM]; +}; + +enum ipu_psys_cmd_state { + KCMD_STATE_NEW, + KCMD_STATE_START_PREPARED, + KCMD_STATE_STARTED, + KCMD_STATE_RUN_PREPARED, + KCMD_STATE_RUNNING, + KCMD_STATE_COMPLETE +}; + +int ipu_psys_fh_init(struct ipu_psys_fh *fh); +int ipu_psys_fh_deinit(struct ipu_psys_fh *fh); + +#endif /* IPU_PLATFORM_PSYS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu-platform-regs.h b/drivers/media/pci/intel/ipu4/ipu-platform-regs.h new file mode 100644 index 0000000000000..e54b2b55afbf8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu-platform-regs.h @@ -0,0 +1,263 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2014 - 2018 Intel Corporation */ + +#ifndef IPU_PLATFORM_REGS_H +#define IPU_PLATFORM_REGS_H + +#ifdef CONFIG_VIDEO_INTEL_IPU4P +#define IPU_ISYS_IOMMU0_OFFSET 0x000e0000 +#define IPU_ISYS_IOMMU1_OFFSET 0x000e0100 + +#define IPU_ISYS_OFFSET 0x00100000 +#define IPU_PSYS_OFFSET 0x00400000 + +#define IPU_PSYS_IOMMU0_OFFSET 0x000b0000 +#define IPU_PSYS_IOMMU1_OFFSET 0x000b0100 +#define IPU_PSYS_IOMMU1R_OFFSET 0x000b0600 + +/* the offset from IOMMU base register */ +#define IPU_MMU_L1_STREAM_ID_REG_OFFSET 0x0c +#define IPU_MMU_L2_STREAM_ID_REG_OFFSET 0x4c + +#define IPU_TPG0_ADDR_OFFSET 0x66c00 +#define IPU_TPG1_ADDR_OFFSET 0x6ec00 +#define IPU_CSI2BE_ADDR_OFFSET 0xba000 + +#define IPU_PSYS_MMU0_CTRL_OFFSET 0x08 + +#define IPU_GPOFFSET 0x66800 +#define IPU_COMBO_GPOFFSET 0x6e800 + +#define IPU_GPREG_MIPI_PKT_GEN0_SEL 0x1c +#define IPU_GPREG_MIPI_PKT_GEN1_SEL 0x1c + +#define IPU_REG_ISYS_ISA_ACC_IRQ_CTRL_BASE 0xb0c00 +#define IPU_REG_ISYS_A_IRQ_CTRL_BASE 0xbe200 +#define IPU_REG_ISYS_SIP0_IRQ_CTRL_BASE 0x66d00 +#define IPU_REG_ISYS_SIP1_IRQ_CTRL_BASE 0x6ed00 +#define IPU_REG_ISYS_SIP0_IRQ_CTRL_STATUS 0x66d08 +#define IPU_REG_ISYS_SIP1_IRQ_CTRL_STATUS 0x6ed08 +#define IPU_REG_ISYS_SIP0_IRQ_CTRL_CLEAR 0x66d0c +#define IPU_REG_ISYS_SIP1_IRQ_CTRL_CLEAR 0x6ed0c +#define IPU_REG_ISYS_CSI_IRQ_CTRL_BASE(p) \ + ({ typeof(p) __p = (p); \ + __p > 0 ? (0x6cb00 + 0x800 * (__p - 1)) : (0x66300); }) +#define IPU_REG_ISYS_CSI_IRQ_CTRL0_BASE(p) \ + ({ typeof(p) __p = (p); \ + __p > 0 ? (0x6cc00 + 0x800 * (__p - 1)) : (0x66400); }) +#define IPU_ISYS_CSI2_A_IRQ_MASK GENMASK(0, 0) +#define IPU_ISYS_CSI2_B_IRQ_MASK GENMASK(1, 1) +#define IPU_ISYS_CSI2_C_IRQ_MASK GENMASK(2, 2) +#define IPU_ISYS_CSI2_D_IRQ_MASK GENMASK(3, 3) + +/* IRQ-related registers relative to ISYS_OFFSET */ +#define IPU_REG_ISYS_UNISPART_IRQ_EDGE 0x7c000 +#define IPU_REG_ISYS_UNISPART_IRQ_MASK 0x7c004 +#define IPU_REG_ISYS_UNISPART_IRQ_STATUS 0x7c008 +#define IPU_REG_ISYS_UNISPART_IRQ_CLEAR 0x7c00c +#define IPU_REG_ISYS_UNISPART_IRQ_ENABLE 0x7c010 +#define IPU_REG_ISYS_UNISPART_IRQ_LEVEL_NOT_PULSE 0x7c014 +#define IPU_REG_ISYS_UNISPART_SW_IRQ_REG 0x7c414 +#define IPU_REG_ISYS_UNISPART_SW_IRQ_MUX_REG 0x7c418 +#define IPU_ISYS_UNISPART_IRQ_SW BIT(22) +#endif + +#ifdef CONFIG_VIDEO_INTEL_IPU4 +#define IPU_ISYS_IOMMU0_OFFSET 0x000e0000 +#define IPU_ISYS_IOMMU1_OFFSET 0x000e0100 + +#define IPU_ISYS_OFFSET 0x00100000 +#define IPU_PSYS_OFFSET 0x00400000 + +#define IPU_PSYS_IOMMU0_OFFSET 0x000b0000 +#define IPU_PSYS_IOMMU1_OFFSET 0x000b0100 +#define IPU_PSYS_IOMMU1R_OFFSET 0x000b0600 + +/* the offset from IOMMU base register */ +#define IPU_MMU_L1_STREAM_ID_REG_OFFSET 0x0c +#define IPU_MMU_L2_STREAM_ID_REG_OFFSET 0x4c + +#define IPU_TPG0_ADDR_OFFSET 0x64800 +#define IPU_TPG1_ADDR_OFFSET 0x6f400 +#define IPU_CSI2BE_ADDR_OFFSET 0xba000 + +#define IPU_PSYS_MMU0_CTRL_OFFSET 0x08 + +#define IPU_GPOFFSET 0x67800 +#define IPU_COMBO_GPOFFSET 0x6f000 + +#define IPU_GPREG_MIPI_PKT_GEN0_SEL 0x24 +#define IPU_GPREG_MIPI_PKT_GEN1_SEL 0x1c + +/* IRQ-related registers relative to ISYS_OFFSET */ +#define IPU_REG_ISYS_UNISPART_IRQ_EDGE 0x7c000 +#define IPU_REG_ISYS_UNISPART_IRQ_MASK 0x7c004 +#define IPU_REG_ISYS_UNISPART_IRQ_STATUS 0x7c008 +#define IPU_REG_ISYS_UNISPART_IRQ_CLEAR 0x7c00c +#define IPU_REG_ISYS_UNISPART_IRQ_ENABLE 0x7c010 +#define IPU_REG_ISYS_UNISPART_IRQ_LEVEL_NOT_PULSE 0x7c014 +#define IPU_REG_ISYS_UNISPART_SW_IRQ_REG 0x7c414 +#define IPU_REG_ISYS_UNISPART_SW_IRQ_MUX_REG 0x7c418 +#define IPU_ISYS_UNISPART_IRQ_SW BIT(30) +#endif /* CONFIG_VIDEO_INTEL_IPU4 */ + +#define IPU_ISYS_SPC_OFFSET 0x000000 +#define IPU_PSYS_SPC_OFFSET 0x000000 +#define IPU_ISYS_DMEM_OFFSET 0x008000 +#define IPU_PSYS_DMEM_OFFSET 0x008000 + +/* PKG DIR OFFSET in IMR in secure mode */ +#define IPU_PKG_DIR_IMR_OFFSET 0x40 + +/* PCI config registers */ +#define IPU_REG_PCI_PCIECAPHDR_PCIECAP 0x70 +#define IPU_REG_PCI_DEVICECAP 0x74 +#define IPU_REG_PCI_DEVICECTL_DEVICESTS 0x78 +#define IPU_REG_PCI_MSI_CAPID 0xac +#define IPU_REG_PCI_MSI_ADDRESS_LO 0xb0 +#define IPU_REG_PCI_MSI_ADDRESS_HI 0xb4 +#define IPU_REG_PCI_MSI_DATA 0xb8 +#define IPU_REG_PCI_PMCAP 0xd0 +#define IPU_REG_PCI_PMCS 0xd4 +#define IPU_REG_PCI_MANUFACTURING_ID 0xf8 +#define IPU_REG_PCI_IUNIT_ACCESS_CTRL_VIOL 0xfc + +/* ISYS registers */ +/* Isys DMA CIO info register */ +#define IPU_REG_ISYS_INFO_CIO_DMA0(a) (0x81810 + (a) * 0x40) +#define IPU_REG_ISYS_INFO_CIO_DMA1(a) (0x93010 + (a) * 0x40) +#define IPU_REG_ISYS_INFO_CIO_DMA_IS(a) (0xb0610 + (a) * 0x40) +#define IPU_ISYS_NUM_OF_DMA0_CHANNELS 16 +#define IPU_ISYS_NUM_OF_DMA1_CHANNELS 32 +#define IPU_ISYS_NUM_OF_IS_CHANNELS 4 +/*Isys Info register offsets*/ +#define IPU_REG_ISYS_INFO_SEG_0_CONFIG_ICACHE_MASTER 0x14 +#define IPU_REG_ISYS_INFO_SEG_CMEM_MASTER(a) (0x2C + (a * 12)) +#define IPU_REG_ISYS_INFO_SEG_XMEM_MASTER(a) (0x5C + (a * 12)) + +/* CDC Burst collector thresholds for isys - 3 FIFOs i = 0..2 */ +#define IPU_REG_ISYS_CDC_THRESHOLD(i) (0x7c400 + ((i) * 4)) + +/*Iunit Info bits*/ +#define IPU_REG_PSYS_INFO_SEG_CMEM_MASTER(a) (0x2C + ((a) * 12)) +#define IPU_REG_PSYS_INFO_SEG_XMEM_MASTER(a) (0x5C + ((a) * 12)) +#define IPU_REG_PSYS_INFO_SEG_DATA_MASTER(a) (0x8C + ((a) * 12)) + +#define IPU_ISYS_REG_SPC_STATUS_CTRL 0x0 + +#define IPU_ISYS_SPC_STATUS_START BIT(1) +#define IPU_ISYS_SPC_STATUS_RUN BIT(3) +#define IPU_ISYS_SPC_STATUS_READY BIT(5) +#define IPU_ISYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE BIT(12) +#define IPU_ISYS_SPC_STATUS_ICACHE_PREFETCH BIT(13) + +#define IPU_PSYS_REG_SPC_STATUS_CTRL 0x0 + +#define IPU_PSYS_SPC_STATUS_START BIT(1) +#define IPU_PSYS_SPC_STATUS_RUN BIT(3) +#define IPU_PSYS_SPC_STATUS_READY BIT(5) +#define IPU_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE BIT(12) +#define IPU_PSYS_SPC_STATUS_ICACHE_PREFETCH BIT(13) + +#define IPU_PSYS_REG_SPC_START_PC 0x4 +#define IPU_PSYS_REG_SPC_ICACHE_BASE 0x10 +#define IPU_PSYS_REG_SPP0_STATUS_CTRL 0x20000 +#define IPU_PSYS_REG_SPP1_STATUS_CTRL 0x30000 +#define IPU_PSYS_REG_SPF_STATUS_CTRL 0x40000 +#define IPU_PSYS_REG_ISP0_STATUS_CTRL 0x1C0000 +#define IPU_PSYS_REG_ISP1_STATUS_CTRL 0x240000 +#define IPU_PSYS_REG_ISP2_STATUS_CTRL 0x2C0000 +#define IPU_PSYS_REG_ISP3_STATUS_CTRL 0x340000 +#define IPU_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER 0x14 + +/* VC0 */ +#define IPU_INFO_ENABLE_SNOOP BIT(0) +#define IPU_INFO_IMR_DESTINED BIT(1) +#define IPU_INFO_REQUEST_DESTINATION_BUT_REGS 0 +#define IPU_INFO_REQUEST_DESTINATION_PRIMARY BIT(4) +#define IPU_INFO_REQUEST_DESTINATION_P2P (BIT(4) | BIT(5)) +/* VC1 */ +#define IPU_INFO_DEADLINE_PTR BIT(1) +#define IPU_INFO_ZLW BIT(2) +#define IPU_INFO_STREAM_ID_SET(a) ((a & 0xF) << 4) +#define IPU_INFO_ADDRESS_SWIZZ BIT(8) + +/* Trace unit related register definitions */ +#define TRACE_REG_MAX_ISYS_OFFSET 0x0fffff +#define TRACE_REG_MAX_PSYS_OFFSET 0xffffff +/* ISYS trace registers - offsets to isys base address */ +/* Trace unit base offset */ +#define TRACE_REG_IS_TRACE_UNIT_BASE 0x07d000 +/* Trace monitors */ +#define TRACE_REG_IS_SP_EVQ_BASE 0x001000 +/* GPC blocks */ +#define TRACE_REG_IS_SP_GPC_BASE 0x000800 +#define TRACE_REG_IS_ISL_GPC_BASE 0x0bd400 +#define TRACE_REG_IS_MMU_GPC_BASE 0x0e0B00 +/* CSI2 receivers */ +#define TRACE_REG_CSI2_TM_BASE 0x067a00 +#define TRACE_REG_CSI2_3PH_TM_BASE 0x06f200 +/* Trace timers */ +#define TRACE_REG_PS_GPREG_TRACE_TIMER_RST_N 0x060614 +#define TRACE_REG_IS_GPREG_TRACE_TIMER_RST_N 0x07c410 +#define TRACE_REG_GPREG_TRACE_TIMER_RST_OFF BIT(0) +/* SIG2CIO */ +/* 0 < n <= 8 */ +#define TRACE_REG_CSI2_SIG2SIO_GR_BASE(n) (0x067c00 + (n) * 0x20) +#define TRACE_REG_CSI2_SIG2SIO_GR_NUM 9 +/* 0 < n <= 8 */ +#define TRACE_REG_CSI2_PH3_SIG2SIO_GR_BASE(n) (0x06f600 + (n) * 0x20) +#define TRACE_REG_CSI2_PH3_SIG2SIO_GR_NUM 9 +/* PSYS trace registers - offsets to isys base address */ +/* Trace unit base offset */ +#define TRACE_REG_PS_TRACE_UNIT_BASE 0x3e0000 +/* Trace monitors */ +#define TRACE_REG_PS_SPC_EVQ_BASE 0x001000 +#define TRACE_REG_PS_SPP0_EVQ_BASE 0x021000 +#define TRACE_REG_PS_SPP1_EVQ_BASE 0x031000 +#define TRACE_REG_PS_SPF_EVQ_BASE 0x041000 +#define TRACE_REG_PS_ISP0_EVQ_BASE 0x1c1000 +#define TRACE_REG_PS_ISP1_EVQ_BASE 0x241000 +#define TRACE_REG_PS_ISP2_EVQ_BASE 0x2c1000 +#define TRACE_REG_PS_ISP3_EVQ_BASE 0x341000 +/* GPC blocks */ +#define TRACE_REG_PS_SPC_GPC_BASE 0x000800 +#define TRACE_REG_PS_SPP0_GPC_BASE 0x020800 +#define TRACE_REG_PS_SPP1_GPC_BASE 0x030800 +#define TRACE_REG_PS_SPF_GPC_BASE 0x040800 +#define TRACE_REG_PS_MMU_GPC_BASE 0x0b0b00 +#define TRACE_REG_PS_ISL_GPC_BASE 0x0fe800 +#define TRACE_REG_PS_ISP0_GPC_BASE 0x1c0800 +#define TRACE_REG_PS_ISP1_GPC_BASE 0x240800 +#define TRACE_REG_PS_ISP2_GPC_BASE 0x2c0800 +#define TRACE_REG_PS_ISP3_GPC_BASE 0x340800 + +/* common macros on each platform */ +#ifdef CONFIG_VIDEO_INTEL_IPU4 +#define IPU_ISYS_UNISPART_IRQ_CSI2(port) \ + ({ typeof(port) __port = (port); \ + __port < IPU_ISYS_MAX_CSI2_LEGACY_PORTS ? \ + ((0x8) << __port) : \ + (0x800 << (__port - IPU_ISYS_MAX_CSI2_LEGACY_PORTS)); }) +#define IPU_PSYS_GPDEV_IRQ_FWIRQ(n) (BIT(17) << (n)) +#endif +#ifdef CONFIG_VIDEO_INTEL_IPU4P +#define IPU_ISYS_UNISPART_IRQ_CSI2(port) \ + ((port) > 0 ? 0x10 : 0x8) +/* bit 20 for fw irqreg0 */ +#define IPU_PSYS_GPDEV_IRQ_FWIRQ(n) (BIT(20) << (n)) +#endif +/* IRQ-related registers in PSYS, relative to IPU_xx_PSYS_OFFSET */ +#define IPU_REG_PSYS_GPDEV_IRQ_EDGE 0x60200 +#define IPU_REG_PSYS_GPDEV_IRQ_MASK 0x60204 +#define IPU_REG_PSYS_GPDEV_IRQ_STATUS 0x60208 +#define IPU_REG_PSYS_GPDEV_IRQ_CLEAR 0x6020c +#define IPU_REG_PSYS_GPDEV_IRQ_ENABLE 0x60210 +#define IPU_REG_PSYS_GPDEV_IRQ_LEVEL_NOT_PULSE 0x60214 +/* There are 8 FW interrupts, n = 0..7 */ +#define IPU_PSYS_GPDEV_FWIRQ0 0 +#define IPU_REG_PSYS_GPDEV_FWIRQ(n) (4 * (n) + 0x60100) +/* CDC Burst collector thresholds for psys - 4 FIFOs i= 0..3 */ +#define IPU_REG_PSYS_CDC_THRESHOLD(i) (0x60600 + ((i) * 4)) + +#endif /* IPU_REGS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu-platform-resources.h b/drivers/media/pci/intel/ipu4/ipu-platform-resources.h new file mode 100644 index 0000000000000..59b2cd46c9f1e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu-platform-resources.h @@ -0,0 +1,224 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2016 - 2018 Intel Corporation */ + +#ifndef IPU_PLATFORM_RESOURCES_H +#define IPU_PLATFORM_RESOURCES_H + +#include + +/* ia_css_psys_program_group_private.h */ +/* ia_css_psys_process_group_cmd_impl.h */ +#ifdef CONFIG_VIDEO_INTEL_IPU4P +#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_STRUCT 2 +#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROGRAM_MANIFEST 0 +#else +#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_STRUCT 4 +#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROGRAM_MANIFEST 4 +#endif +#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT 4 + +/* ia_css_terminal_base_types.h */ +#define IPU_FW_PSYS_N_PADDING_UINT8_IN_TERMINAL_STRUCT 5 + +/* ia_css_terminal_types.h */ +#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT 6 + +/* ia_css_psys_terminal.c */ +#define IPU_FW_PSYS_N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT 4 + +/* ia_css_program_group_data.h */ +#define IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_DESC_STRUCT 3 +#define IPU_FW_PSYS_N_FRAME_PLANES 6 +#define IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_STRUCT 4 + +/* ia_css_psys_buffer_set.h */ +#define IPU_FW_PSYS_N_PADDING_UINT8_IN_BUFFER_SET_STRUCT 5 + +enum { + IPU_FW_PSYS_CMD_QUEUE_COMMAND_ID, + IPU_FW_PSYS_CMD_QUEUE_DEVICE_ID, + IPU_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID, + IPU_FW_PSYS_CMD_QUEUE_PPG1_COMMAND_ID, + IPU_FW_PSYS_N_PSYS_CMD_QUEUE_ID +}; + +enum { + IPU_FW_PSYS_GMEM_TYPE_ID = 0, + IPU_FW_PSYS_DMEM_TYPE_ID, + IPU_FW_PSYS_VMEM_TYPE_ID, + IPU_FW_PSYS_BAMEM_TYPE_ID, + IPU_FW_PSYS_PMEM_TYPE_ID, + IPU_FW_PSYS_N_MEM_TYPE_ID +}; + +enum ipu_mem_id { + IPU_FW_PSYS_VMEM0_ID = 0, + IPU_FW_PSYS_VMEM1_ID, + IPU_FW_PSYS_VMEM2_ID, + IPU_FW_PSYS_VMEM3_ID, + IPU_FW_PSYS_VMEM4_ID, + IPU_FW_PSYS_BAMEM0_ID, + IPU_FW_PSYS_BAMEM1_ID, + IPU_FW_PSYS_BAMEM2_ID, + IPU_FW_PSYS_BAMEM3_ID, + IPU_FW_PSYS_DMEM0_ID, + IPU_FW_PSYS_DMEM1_ID, + IPU_FW_PSYS_DMEM2_ID, + IPU_FW_PSYS_DMEM3_ID, + IPU_FW_PSYS_DMEM4_ID, + IPU_FW_PSYS_DMEM5_ID, + IPU_FW_PSYS_DMEM6_ID, + IPU_FW_PSYS_DMEM7_ID, + IPU_FW_PSYS_PMEM0_ID, + IPU_FW_PSYS_PMEM1_ID, + IPU_FW_PSYS_PMEM2_ID, + IPU_FW_PSYS_PMEM3_ID, + IPU_FW_PSYS_N_MEM_ID +}; + +enum { + IPU_FW_PSYS_DEV_CHN_DMA_EXT0_ID = 0, + IPU_FW_PSYS_DEV_CHN_GDC_ID, + IPU_FW_PSYS_DEV_CHN_DMA_EXT1_READ_ID, + IPU_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_ID, + IPU_FW_PSYS_DEV_CHN_DMA_INTERNAL_ID, + IPU_FW_PSYS_DEV_CHN_DMA_IPFD_ID, + IPU_FW_PSYS_DEV_CHN_DMA_ISA_ID, + IPU_FW_PSYS_DEV_CHN_DMA_FW_ID, +#ifdef CONFIG_VIDEO_INTEL_IPU4P + IPU_FW_PSYS_DEV_CHN_DMA_CMPRS_ID, +#endif + IPU_FW_PSYS_N_DEV_CHN_ID +}; + +enum { + IPU_FW_PSYS_SP_CTRL_TYPE_ID = 0, + IPU_FW_PSYS_SP_SERVER_TYPE_ID, + IPU_FW_PSYS_VP_TYPE_ID, + IPU_FW_PSYS_ACC_PSA_TYPE_ID, + IPU_FW_PSYS_ACC_ISA_TYPE_ID, + IPU_FW_PSYS_ACC_OSA_TYPE_ID, + IPU_FW_PSYS_GDC_TYPE_ID, + IPU_FW_PSYS_N_CELL_TYPE_ID +}; + +enum { + IPU_FW_PSYS_SP0_ID = 0, + IPU_FW_PSYS_SP1_ID, + IPU_FW_PSYS_SP2_ID, + IPU_FW_PSYS_VP0_ID, + IPU_FW_PSYS_VP1_ID, + IPU_FW_PSYS_VP2_ID, + IPU_FW_PSYS_VP3_ID, + IPU_FW_PSYS_ACC0_ID, + IPU_FW_PSYS_ACC1_ID, + IPU_FW_PSYS_ACC2_ID, + IPU_FW_PSYS_ACC3_ID, + IPU_FW_PSYS_ACC4_ID, + IPU_FW_PSYS_ACC5_ID, + IPU_FW_PSYS_ACC6_ID, + IPU_FW_PSYS_ACC7_ID, + IPU_FW_PSYS_GDC0_ID, + IPU_FW_PSYS_GDC1_ID, + IPU_FW_PSYS_N_CELL_ID +}; + +#define IPU_FW_PSYS_N_DEV_DFM_ID 0 +#define IPU_FW_PSYS_N_DATA_MEM_TYPE_ID (IPU_FW_PSYS_N_MEM_TYPE_ID - 1) +#define IPU_FW_PSYS_PROCESS_MAX_CELLS 1 +#define IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS 2 +#define IPU_FW_PSYS_RBM_NOF_ELEMS 2 + +#define IPU_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE 30 +#define IPU_FW_PSYS_DEV_CHN_GDC_MAX_SIZE 4 +#define IPU_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE 30 +#define IPU_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE 20 +#define IPU_FW_PSYS_DEV_CHN_DMA_INTERNAL_MAX_SIZE 2 +#define IPU_FW_PSYS_DEV_CHN_DMA_IPFD_MAX_SIZE 5 +#define IPU_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE 2 +#define IPU_FW_PSYS_DEV_CHN_DMA_FW_MAX_SIZE 1 +#define IPU_FW_PSYS_DEV_CHN_DMA_CMPRS_MAX_SIZE 6 + +#define IPU_FW_PSYS_VMEM0_MAX_SIZE 0x0800 +#define IPU_FW_PSYS_VMEM1_MAX_SIZE 0x0800 +#define IPU_FW_PSYS_VMEM2_MAX_SIZE 0x0800 +#define IPU_FW_PSYS_VMEM3_MAX_SIZE 0x0800 +#define IPU_FW_PSYS_VMEM4_MAX_SIZE 0x0800 +#define IPU_FW_PSYS_BAMEM0_MAX_SIZE 0x0400 +#define IPU_FW_PSYS_BAMEM1_MAX_SIZE 0x0400 +#define IPU_FW_PSYS_BAMEM2_MAX_SIZE 0x0400 +#define IPU_FW_PSYS_BAMEM3_MAX_SIZE 0x0400 +#define IPU_FW_PSYS_DMEM0_MAX_SIZE 0x4000 +#define IPU_FW_PSYS_DMEM1_MAX_SIZE 0x1000 +#define IPU_FW_PSYS_DMEM2_MAX_SIZE 0x1000 +#define IPU_FW_PSYS_DMEM3_MAX_SIZE 0x1000 +#define IPU_FW_PSYS_DMEM4_MAX_SIZE 0x1000 +#define IPU_FW_PSYS_DMEM5_MAX_SIZE 0x1000 +#define IPU_FW_PSYS_DMEM6_MAX_SIZE 0x1000 +#define IPU_FW_PSYS_DMEM7_MAX_SIZE 0x1000 +#define IPU_FW_PSYS_PMEM0_MAX_SIZE 0x0500 +#define IPU_FW_PSYS_PMEM1_MAX_SIZE 0x0500 +#define IPU_FW_PSYS_PMEM2_MAX_SIZE 0x0500 +#define IPU_FW_PSYS_PMEM3_MAX_SIZE 0x0500 + +struct ipu_fw_psys_program_manifest { + u32 kernel_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; + u32 ID; + u32 program_type; + s32 parent_offset; + u32 program_dependency_offset; + u32 terminal_dependency_offset; + u16 size; + u16 int_mem_size[IPU_FW_PSYS_N_MEM_TYPE_ID]; + u16 ext_mem_size[IPU_FW_PSYS_N_DATA_MEM_TYPE_ID]; + u16 ext_mem_offset[IPU_FW_PSYS_N_DATA_MEM_TYPE_ID]; + u16 dev_chn_size[IPU_FW_PSYS_N_DEV_CHN_ID]; + u16 dev_chn_offset[IPU_FW_PSYS_N_DEV_CHN_ID]; + u8 cell_id; + u8 cell_type_id; + u8 program_dependency_count; + u8 terminal_dependency_count; +#ifndef CONFIG_VIDEO_INTEL_IPU4P + u8 reserved[IPU_FW_PSYS_N_PADDING_UINT8_IN_PROGRAM_MANIFEST]; +#endif +}; + +struct ipu_fw_psys_process { + u32 kernel_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; + u32 size; + u32 ID; + u32 program_idx; + u32 state; + s16 parent_offset; + u16 cell_dependencies_offset; + u16 terminal_dependencies_offset; + u16 int_mem_offset[IPU_FW_PSYS_N_MEM_TYPE_ID]; + u16 ext_mem_offset[IPU_FW_PSYS_N_DATA_MEM_TYPE_ID]; + u16 dev_chn_offset[IPU_FW_PSYS_N_DEV_CHN_ID]; + u8 cell_id; + u8 int_mem_id[IPU_FW_PSYS_N_MEM_TYPE_ID]; + u8 ext_mem_id[IPU_FW_PSYS_N_DATA_MEM_TYPE_ID]; + u8 cell_dependency_count; + u8 terminal_dependency_count; + u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_STRUCT]; +}; + +struct ipu_psys_resource_alloc; +struct ipu_fw_psys_process_group; +struct ipu_psys_resource_pool; +int ipu_psys_allocate_resources(const struct device *dev, + struct ipu_fw_psys_process_group *pg, + void *pg_manifest, + struct ipu_psys_resource_alloc *alloc, + struct ipu_psys_resource_pool *pool); +int ipu_psys_move_resources(const struct device *dev, + struct ipu_psys_resource_alloc *alloc, + struct ipu_psys_resource_pool *source_pool, + struct ipu_psys_resource_pool *target_pool); + +void ipu_psys_free_resources(struct ipu_psys_resource_alloc *alloc, + struct ipu_psys_resource_pool *pool); + +extern const struct ipu_fw_resource_definitions *res_defs; + +#endif /* IPU_PLATFORM_RESOURCES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu-platform.h b/drivers/media/pci/intel/ipu4/ipu-platform.h new file mode 100644 index 0000000000000..32956125d5e7d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu-platform.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_PLATFORM_H +#define IPU_PLATFORM_H + +#define IPU_NAME "intel-ipu4" +#define IPU_ISYS_NUM_STREAMS 8 /* Max 8 */ + +#ifdef CONFIG_VIDEO_INTEL_IPU4 +#define IPU_CPD_FIRMWARE_NAME "ipu4_cpd_b0.bin" +#else +#define IPU_CPD_FIRMWARE_NAME "ipu4p_cpd.bin" +#endif + +/* + * The following definitions are encoded to the media_device's model field so + * that the software components which uses IPU driver can get the hw stepping + * information. + */ +#ifdef CONFIG_VIDEO_INTEL_IPU4 +#define IPU_MEDIA_DEV_MODEL_NAME "ipu4/Broxton B" +#else +#define IPU_MEDIA_DEV_MODEL_NAME "ipu4p" +#endif + +#ifdef CONFIG_VIDEO_INTEL_IPU4 + +#define IPU_HW_BXT_P_B1_REV 0xa +#define IPU_HW_BXT_P_D0_REV 0xb +#define IPU_HW_BXT_P_E0_REV 0xc + +/* BXTP E0 has icache bug fixed */ +#define is_ipu_hw_bxtp_e0(isp) \ + ({ typeof(isp) __isp = (isp); \ + (__isp->pdev->device == IPU_PCI_ID && \ + __isp->pdev->revision == IPU_HW_BXT_P_E0_REV); }) +#endif + +/* declearations, definitions in ipu4.c */ +extern const struct ipu_isys_internal_pdata isys_ipdata; +extern const struct ipu_psys_internal_pdata psys_ipdata; +extern const struct ipu_buttress_ctrl isys_buttress_ctrl; +extern const struct ipu_buttress_ctrl psys_buttress_ctrl; + +/* definitions in ipu4-isys.c */ +extern struct ipu_trace_block isys_trace_blocks[]; +/* definitions in ipu4-psys.c */ +extern struct ipu_trace_block psys_trace_blocks[]; + +#endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.ipu4isys_inc b/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.ipu4isys_inc new file mode 100644 index 0000000000000..48a4edea420fc --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.ipu4isys_inc @@ -0,0 +1,26 @@ +IPU_ISYSLIB_INC = \ + -I$(IPU_ISYSLIB_ROOT)/buffer/interface \ + -I$(IPU_ISYSLIB_ROOT)/cell/interface \ + -I$(IPU_ISYSLIB_ROOT)/cell/src \ + -I$(IPU_ISYSLIB_ROOT)/device_access/interface \ + -I$(IPU_ISYSLIB_ROOT)/device_access/src \ + -I$(IPU_ISYSLIB_ROOT)/devices \ + -I$(IPU_ISYSLIB_ROOT)/devices/interface \ + -I$(IPU_ISYSLIB_ROOT)/devices/isys/bxtB0 \ + -I$(IPU_ISYSLIB_ROOT)/devices/src \ + -I$(IPU_ISYSLIB_ROOT)/fw_abi_common_types \ + -I$(IPU_ISYSLIB_ROOT)/fw_abi_common_types/cpu \ + -I$(IPU_ISYSLIB_ROOT)/isysapi/interface \ + -I$(IPU_ISYSLIB_ROOT)/pkg_dir/interface \ + -I$(IPU_ISYSLIB_ROOT)/pkg_dir/src \ + -I$(IPU_ISYSLIB_ROOT)/port/interface \ + -I$(IPU_ISYSLIB_ROOT)/reg_dump/src/isys/bxtB0_gen_reg_dump \ + -I$(IPU_ISYSLIB_ROOT)/regmem/interface \ + -I$(IPU_ISYSLIB_ROOT)/regmem/src \ + -I$(IPU_ISYSLIB_ROOT)/support \ + -I$(IPU_ISYSLIB_ROOT)/syscom/interface \ + -I$(IPU_ISYSLIB_ROOT)/syscom/src \ + -I$(IPU_ISYSLIB_ROOT)/trace/interface \ + -I$(IPU_ISYSLIB_ROOT)/utils/system_defs/ \ + -I$(IPU_ISYSLIB_ROOT)/vied \ + -I$(IPU_ISYSLIB_ROOT)/vied/vied/ \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.ipu4isys_src b/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.ipu4isys_src new file mode 100644 index 0000000000000..c20760bdb5f1d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.ipu4isys_src @@ -0,0 +1,19 @@ +IPU_ISYSLIB_SRC = \ + $(IPU_ISYSLIB_ROOT_REL)/isysapi/src/ia_css_isys_private.o \ + $(IPU_ISYSLIB_ROOT_REL)/isysapi/src/ia_css_isys_public.o \ + $(IPU_ISYSLIB_ROOT_REL)/isysapi/src/ia_css_isys_public_trace.o + +ifeq ($(CONFIG_VIDEO_INTEL_IPU), m) +IPU_ISYSLIB_SRC += \ + $(IPU_ISYSLIB_ROOT_REL)/buffer/src/cpu/buffer_access.o \ + $(IPU_ISYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_buffer.o \ + $(IPU_ISYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_input_buffer.o \ + $(IPU_ISYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_output_buffer.o \ + $(IPU_ISYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_shared_buffer.o \ + $(IPU_ISYSLIB_ROOT_REL)/pkg_dir/src/ia_css_pkg_dir.o \ + $(IPU_ISYSLIB_ROOT_REL)/port/src/queue.o \ + $(IPU_ISYSLIB_ROOT_REL)/port/src/recv_port.o \ + $(IPU_ISYSLIB_ROOT_REL)/port/src/send_port.o \ + $(IPU_ISYSLIB_ROOT_REL)/reg_dump/src/reg_dump_generic_bridge.o \ + $(IPU_ISYSLIB_ROOT_REL)/syscom/src/ia_css_syscom.o +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.ipu4psys_inc b/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.ipu4psys_inc new file mode 100644 index 0000000000000..abc61475e9887 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.ipu4psys_inc @@ -0,0 +1,52 @@ +IPU_PSYSLIB_INC = \ + -I$(IPU_PSYSLIB_ROOT)/buffer/interface \ + -I$(IPU_PSYSLIB_ROOT)/cell/interface \ + -I$(IPU_PSYSLIB_ROOT)/cell/src \ + -I$(IPU_PSYSLIB_ROOT)/client_pkg/interface \ + -I$(IPU_PSYSLIB_ROOT)/client_pkg/src \ + -I$(IPU_PSYSLIB_ROOT)/cpd/ \ + -I$(IPU_PSYSLIB_ROOT)/cpd/cpd_component/interface \ + -I$(IPU_PSYSLIB_ROOT)/cpd/cpd_metadata/interface \ + -I$(IPU_PSYSLIB_ROOT)/device_access/interface \ + -I$(IPU_PSYSLIB_ROOT)/device_access/src \ + -I$(IPU_PSYSLIB_ROOT)/devices \ + -I$(IPU_PSYSLIB_ROOT)/devices/interface \ + -I$(IPU_PSYSLIB_ROOT)/devices/psys/bxtB0 \ + -I$(IPU_PSYSLIB_ROOT)/devices/src \ + -I$(IPU_PSYSLIB_ROOT)/fw_abi_common_types \ + -I$(IPU_PSYSLIB_ROOT)/fw_abi_common_types/cpu \ + -I$(IPU_PSYSLIB_ROOT)/pkg_dir/interface \ + -I$(IPU_PSYSLIB_ROOT)/pkg_dir/src \ + -I$(IPU_PSYSLIB_ROOT)/port/interface \ + -I$(IPU_PSYSLIB_ROOT)/psys_private_pg/interface \ + -I$(IPU_PSYSLIB_ROOT)/psys_server/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/data/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/data/src \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/device/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/device/interface/bxtB0 \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/dynamic/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/dynamic/src \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/kernel/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/param/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/param/src \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/psys_server_manifest/bxtB0 \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/resource_model/bxtB0 \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/sim/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/sim/src \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/static/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/static/src \ + -I$(IPU_PSYSLIB_ROOT)/reg_dump/src/psys/bxtB0_gen_reg_dump \ + -I$(IPU_PSYSLIB_ROOT)/regmem/interface \ + -I$(IPU_PSYSLIB_ROOT)/regmem/src \ + -I$(IPU_PSYSLIB_ROOT)/routing_bitmap/interface \ + -I$(IPU_PSYSLIB_ROOT)/routing_bitmap/src \ + -I$(IPU_PSYSLIB_ROOT)/support \ + -I$(IPU_PSYSLIB_ROOT)/syscom/interface \ + -I$(IPU_PSYSLIB_ROOT)/syscom/src \ + -I$(IPU_PSYSLIB_ROOT)/trace/interface \ + -I$(IPU_PSYSLIB_ROOT)/vied \ + -I$(IPU_PSYSLIB_ROOT)/vied/vied/ \ + -I$(IPU_PSYSLIB_ROOT)/vied_nci_acb/interface \ + -I$(IPU_PSYSLIB_ROOT)/vied_parameters/interface \ + -I$(IPU_PSYSLIB_ROOT)/vied_parameters/src \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.ipu4psys_src b/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.ipu4psys_src new file mode 100644 index 0000000000000..8344bf569e13e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.ipu4psys_src @@ -0,0 +1,32 @@ +IPU_PSYSLIB_SRC = \ + $(IPU_PSYSLIB_ROOT_REL)/buffer/src/cpu/buffer_access.o \ + $(IPU_PSYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_buffer.o \ + $(IPU_PSYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_input_buffer.o \ + $(IPU_PSYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_output_buffer.o \ + $(IPU_PSYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_shared_buffer.o \ + $(IPU_PSYSLIB_ROOT_REL)/client_pkg/src/ia_css_client_pkg.o \ + $(IPU_PSYSLIB_ROOT_REL)/pkg_dir/src/ia_css_pkg_dir.o \ + $(IPU_PSYSLIB_ROOT_REL)/port/src/queue.o \ + $(IPU_PSYSLIB_ROOT_REL)/port/src/recv_port.o \ + $(IPU_PSYSLIB_ROOT_REL)/port/src/send_port.o \ + $(IPU_PSYSLIB_ROOT_REL)/psys_server/src/bxt_spctrl_process_group_cmd_impl.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/data/src/ia_css_program_group_data.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/device/src/ia_css_psys_device.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/dynamic/src/ia_css_psys_buffer_set.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/dynamic/src/ia_css_psys_process.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/dynamic/src/ia_css_psys_process_group.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/dynamic/src/ia_css_psys_terminal.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/kernel/src/ia_css_kernel_bitmap.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/param/src/ia_css_program_group_param.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/psys_server_manifest/bxtB0/ia_css_psys_server_manifest.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/resource_model/bxtB0/vied_nci_psys_resource_model.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/sim/src/vied_nci_psys_system.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/static/src/ia_css_psys_program_group_manifest.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/static/src/ia_css_psys_program_manifest.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/static/src/ia_css_psys_terminal_manifest.o \ + $(IPU_PSYSLIB_ROOT_REL)/reg_dump/src/reg_dump_generic_bridge.o \ + $(IPU_PSYSLIB_ROOT_REL)/routing_bitmap/src/ia_css_rbm.o \ + $(IPU_PSYSLIB_ROOT_REL)/routing_bitmap/src/ia_css_rbm_manifest.o \ + $(IPU_PSYSLIB_ROOT_REL)/syscom/src/ia_css_syscom.o \ + $(IPU_PSYSLIB_ROOT_REL)/vied_parameters/src/ia_css_terminal.o \ + $(IPU_PSYSLIB_ROOT_REL)/vied_parameters/src/ia_css_terminal_manifest.o \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.isyslib b/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.isyslib new file mode 100644 index 0000000000000..33a3df8969194 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.isyslib @@ -0,0 +1,42 @@ +ifneq ($(EXTERNAL_BUILD), 1) +srcpath := $(srctree) +endif + +PROGRAMS = isys_fw +SYSTEM = input_system_system +IPU_ISYSLIB_ROOT_REL = ipu4-css/lib2600 +IPU_ISYSLIB_ROOT = $(srcpath)/$(src)/$(IPU_ISYSLIB_ROOT_REL) + +include $(srcpath)/$(src)/ipu4-css/Makefile.ipu4isys_inc +include $(srcpath)/$(src)/ipu4-css/Makefile.ipu4isys_src + +intel-ipu4-isys-csslib-objs := \ + ipu4-css/libintel-ipu4.o \ + $(IPU_ISYSLIB_SRC) + +ifeq ($(CONFIG_VIDEO_INTEL_IPU), m) +intel-ipu4-isys-csslib-objs += ipu4-css/ipu-wrapper.o +endif +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4-isys-csslib.o + +INCLUDES := -I$(srcpath)/$(src)/$(IPU_ISYSLIB_ROOT_REL) \ + -I$(srcpath)/$(src) \ + $(IPU_ISYSLIB_INC) + +DEFINES:= -D__HOST__ -D__KERNEL__ -DISYS_FPGA -DPSYS_FPGA + +DEFINES += -DSSID=1 +DEFINES += -DMMID=1 +DEFINES += -DPROGNAME=isys_fw +DEFINES += -DPROGMAP=\"isys_fw.map.h\" +DEFINES += -DSUBSYSTEM_INCLUDE=\ +DEFINES += -DCELL=input_system_unis_logic_sp_control_tile_sp +DEFINES += -DSPMAIN=isys_fw +DEFINES += -DRUN_INTEGRATION +DEFINES += -DDEBUG_SP_NCI +DEFINES += -DCFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL=1 +DEFINES += -DHRT_ON_VIED_SUBSYSTEM_ACCESS=0 +DEFINES += -DHRT_USE_VIR_ADDRS +DEFINES += -DHRT_HW + +ccflags-y += $(INCLUDES) $(DEFINES) -fno-common diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.psyslib b/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.psyslib new file mode 100644 index 0000000000000..c93852bd09a1d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.psyslib @@ -0,0 +1,14 @@ +ifneq ($(EXTERNAL_BUILD), 1) +srcpath := $(srctree) +endif + +# note: this file only defines INCLUDES paths for psyslib +include $(srcpath)/$(src)/ipu4-css/Makefile.ipu4psys_inc + +IPU_PSYSLIB_ROOT = $(srcpath)/$(src)/ipu4-css/lib2600psys/lib +HOST_DEFINES += -DPSYS_SERVER_ON_SPC +HOST_DEFINES += -DCFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL=1 + +ccflags-y += $(IPU_PSYSLIB_INC) $(HOST_DEFINES) + +obj-$(CONFIG_VIDEO_INTEL_IPU) += ipu4-css/lib2600psys/ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/ia_css_fw_pkg_release.h b/drivers/media/pci/intel/ipu4/ipu4-css/ia_css_fw_pkg_release.h new file mode 100644 index 0000000000000..408726c817146 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/ia_css_fw_pkg_release.h @@ -0,0 +1,14 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +#define IA_CSS_FW_PKG_RELEASE 0x20181222 diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/ipu-wrapper.c b/drivers/media/pci/intel/ipu4/ipu4-css/ipu-wrapper.c new file mode 120000 index 0000000000000..3167dda06f067 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/ipu-wrapper.c @@ -0,0 +1 @@ +../../ipu-wrapper.c \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/buffer.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/buffer.mk new file mode 100644 index 0000000000000..c00a1133b440f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/buffer.mk @@ -0,0 +1,43 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is BUFFER + +ifdef _H_BUFFER_MK +$(error ERROR: buffer.mk included multiple times, please check makefile) +else +_H_BUFFER_MK=1 +endif + +BUFFER_DIR=$${MODULES_DIR}/buffer + +BUFFER_INTERFACE=$(BUFFER_DIR)/interface +BUFFER_SOURCES_CPU=$(BUFFER_DIR)/src/cpu +BUFFER_SOURCES_CSS=$(BUFFER_DIR)/src/css + +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_output_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_input_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_shared_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/buffer_access.c +BUFFER_HOST_CPPFLAGS += -I$(BUFFER_INTERFACE) +BUFFER_HOST_CPPFLAGS += -I$${MODULES_DIR}/support + +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/ia_css_input_buffer.c +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/ia_css_output_buffer.c +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/ia_css_shared_buffer.c +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/buffer_access.c + +BUFFER_FW_CPPFLAGS += -I$(BUFFER_INTERFACE) +BUFFER_FW_CPPFLAGS += -I$${MODULES_DIR}/support diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/buffer_access.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/buffer_access.h new file mode 100644 index 0000000000000..e5fe647742c9f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/buffer_access.h @@ -0,0 +1,36 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __BUFFER_ACCESS_H +#define __BUFFER_ACCESS_H + +#include "buffer_type.h" +/* #def to keep consistent the buffer load interfaces for host and css */ +#define IDM 0 + +void +buffer_load( + buffer_address address, + void *data, + unsigned int size, + unsigned int mm_id); + +void +buffer_store( + buffer_address address, + const void *data, + unsigned int size, + unsigned int mm_id); + +#endif /* __BUFFER_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/buffer_type.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/buffer_type.h new file mode 100644 index 0000000000000..de51f23941582 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/buffer_type.h @@ -0,0 +1,29 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __BUFFER_TYPE_H +#define __BUFFER_TYPE_H + +/* portable access to buffers in DDR */ + +#ifdef __VIED_CELL +typedef unsigned int buffer_address; +#else +/* workaround needed because shared_memory_access.h uses size_t */ +#include "type_support.h" +#include "vied/shared_memory_access.h" +typedef host_virtual_address_t buffer_address; +#endif + +#endif /* __BUFFER_TYPE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_buffer_address.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_buffer_address.h new file mode 100644 index 0000000000000..137bfb1fda166 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_buffer_address.h @@ -0,0 +1,24 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_BUFFER_ADDRESS_H +#define __IA_CSS_BUFFER_ADDRESS_H + +#include "type_support.h" + +typedef uint32_t ia_css_buffer_address; /* CSS virtual address */ + +#define ia_css_buffer_address_null ((ia_css_buffer_address)0) + +#endif /* __IA_CSS_BUFFER_ADDRESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_input_buffer.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_input_buffer.h new file mode 100644 index 0000000000000..4e92e35b61843 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_input_buffer.h @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_INPUT_BUFFER_H +#define __IA_CSS_INPUT_BUFFER_H + + +/* Input Buffers */ + +/* A CSS input buffer is a buffer in DDR that can be written by the CPU, + * and that can be read by CSS hardware, after the buffer has been handed over. + * Examples: command buffer, input frame buffer, parameter buffer + * An input buffer must be mapped into the CPU address space before it can be + * written by the CPU. + * After mapping, writing, and unmapping, the buffer can be handed over to the + * firmware. An input buffer is handed over to the CSS by mapping it to the + * CSS address space (by the CPU), and by passing the resulting CSS (virtial) + * address of the input buffer to the DA CSS hardware. + * The firmware can read from an input buffer as soon as it has been received + * CSS virtual address. + * The firmware should not write into an input buffer. + * The firmware hands over the input buffer (back to the CPU) by sending the + * buffer handle via a response. The host should unmap the buffer, + * before reusing it. + * The firmware should not read from the input buffer after returning the + * buffer handle to the CPU. + * + * A buffer may be pre-mapped to the CPU and/or to the CSS upon allocation, + * depending on the allocator's preference. In case of pre-mapped buffers, + * the map and unmap functions will only manage read and write access. + */ + +#include "ia_css_buffer_address.h" + +typedef struct ia_css_buffer_s *ia_css_input_buffer; /* input buffer handle */ +typedef void *ia_css_input_buffer_cpu_address; /* CPU virtual address */ +/* CSS virtual address */ +typedef ia_css_buffer_address ia_css_input_buffer_css_address; + +#endif /* __IA_CSS_INPUT_BUFFER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_input_buffer_cpu.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_input_buffer_cpu.h new file mode 100644 index 0000000000000..d3d01353ce431 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_input_buffer_cpu.h @@ -0,0 +1,49 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_INPUT_BUFFER_CPU_H +#define __IA_CSS_INPUT_BUFFER_CPU_H + +#include "vied/shared_memory_map.h" +#include "ia_css_input_buffer.h" + +ia_css_input_buffer +ia_css_input_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_input_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_input_buffer b); + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_map(ia_css_input_buffer b); + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_unmap(ia_css_input_buffer b); + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map(vied_memory_t mid, ia_css_input_buffer b); + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map_no_invalidate(vied_memory_t mid, ia_css_input_buffer b); + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_unmap(ia_css_input_buffer b); + + +#endif /* __IA_CSS_INPUT_BUFFER_CPU_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_output_buffer.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_output_buffer.h new file mode 100644 index 0000000000000..2c310ea92c6af --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_output_buffer.h @@ -0,0 +1,30 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_OUTPUT_BUFFER_H +#define __IA_CSS_OUTPUT_BUFFER_H + +/* Output Buffers */ +/* A CSS output buffer a buffer in DDR that can be written by CSS hardware + * and that can be read by the host, after the buffer has been handed over + * Examples: output frame buffer + */ + +#include "ia_css_buffer_address.h" + +typedef struct ia_css_buffer_s *ia_css_output_buffer; +typedef void *ia_css_output_buffer_cpu_address; +typedef ia_css_buffer_address ia_css_output_buffer_css_address; + +#endif /* __IA_CSS_OUTPUT_BUFFER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_output_buffer_cpu.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_output_buffer_cpu.h new file mode 100644 index 0000000000000..0299fc3b7eb66 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_output_buffer_cpu.h @@ -0,0 +1,48 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_OUTPUT_BUFFER_CPU_H +#define __IA_CSS_OUTPUT_BUFFER_CPU_H + +#include "vied/shared_memory_map.h" +#include "ia_css_output_buffer.h" + +ia_css_output_buffer +ia_css_output_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_output_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_output_buffer b); + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_map(ia_css_output_buffer b); + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_unmap(ia_css_output_buffer b); + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map(vied_memory_t mid, ia_css_output_buffer b); +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map_no_invalidate(vied_memory_t mid, ia_css_output_buffer b); + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_unmap(ia_css_output_buffer b); + + +#endif /* __IA_CSS_OUTPUT_BUFFER_CPU_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_return_token.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_return_token.h new file mode 100644 index 0000000000000..440161d2f32b3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_return_token.h @@ -0,0 +1,54 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_RETURN_TOKEN_H +#define __IA_CSS_RETURN_TOKEN_H + +#include "storage_class.h" +#include "assert_support.h" /* For CT_ASSERT */ + +/* ia_css_return_token: data item of exacly 8 bytes (64 bits) + * which can be used to pass a return token back to the host +*/ +typedef unsigned long long ia_css_return_token; + +STORAGE_CLASS_INLINE void +ia_css_return_token_copy(ia_css_return_token *to, + const ia_css_return_token *from) +{ + /* copy a return token on VIED processor */ + int *dst = (int *)to; + int *src = (int *)from; + + dst[0] = src[0]; + dst[1] = src[1]; +} + +STORAGE_CLASS_INLINE void +ia_css_return_token_zero(ia_css_return_token *to) +{ + /* zero return token on VIED processor */ + int *dst = (int *)to; + + dst[0] = 0; + dst[1] = 0; +} + +STORAGE_CLASS_INLINE void _check_return_token_size(void) +{ + CT_ASSERT(sizeof(int) == 4); + CT_ASSERT(sizeof(ia_css_return_token) == 8); +} + +#endif /* __IA_CSS_RETURN_TOKEN_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_shared_buffer.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_shared_buffer.h new file mode 100644 index 0000000000000..558ec679f98a0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_shared_buffer.h @@ -0,0 +1,32 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SHARED_BUFFER_H +#define __IA_CSS_SHARED_BUFFER_H + +/* Shared Buffers */ +/* A CSS shared buffer is a buffer in DDR that can be read and written by the + * CPU and CSS. + * Both the CPU and CSS can have the buffer mapped simultaneously. + * Access rights are not managed by this interface, this could be done by means + * the read and write pointer of a queue, for example. + */ + +#include "ia_css_buffer_address.h" + +typedef struct ia_css_buffer_s *ia_css_shared_buffer; +typedef void *ia_css_shared_buffer_cpu_address; +typedef ia_css_buffer_address ia_css_shared_buffer_css_address; + +#endif /* __IA_CSS_SHARED_BUFFER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_shared_buffer_cpu.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_shared_buffer_cpu.h new file mode 100644 index 0000000000000..ff62914f99dc3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_shared_buffer_cpu.h @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SHARED_BUFFER_CPU_H +#define __IA_CSS_SHARED_BUFFER_CPU_H + +#include "vied/shared_memory_map.h" +#include "ia_css_shared_buffer.h" + +ia_css_shared_buffer +ia_css_shared_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_shared_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_shared_buffer b); + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_map(ia_css_shared_buffer b); + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_unmap(ia_css_shared_buffer b); + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_map(ia_css_shared_buffer b); + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_unmap(ia_css_shared_buffer b); + +ia_css_shared_buffer +ia_css_shared_buffer_css_update(vied_memory_t mid, ia_css_shared_buffer b); + +ia_css_shared_buffer +ia_css_shared_buffer_cpu_update(vied_memory_t mid, ia_css_shared_buffer b); + +#endif /* __IA_CSS_SHARED_BUFFER_CPU_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/buffer_access.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/buffer_access.c new file mode 100644 index 0000000000000..f0c617fe501a0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/buffer_access.c @@ -0,0 +1,39 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/* implementation of buffer access from the CPU */ +/* using shared_memory interface */ + +#include "buffer_access.h" +#include "vied/shared_memory_access.h" + +void +buffer_load( + buffer_address address, + void *data, + unsigned int bytes, + unsigned int mm_id) +{ + shared_memory_load(mm_id, address, data, bytes); +} + +void +buffer_store( + buffer_address address, + const void *data, + unsigned int bytes, + unsigned int mm_id) +{ + shared_memory_store(mm_id, address, data, bytes); +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_buffer.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_buffer.c new file mode 100644 index 0000000000000..146d4109de440 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_buffer.c @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/* provided interface */ +#include "ia_css_buffer.h" + +/* used interfaces */ +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + +ia_css_buffer_t +ia_css_buffer_alloc(vied_subsystem_t sid, vied_memory_t mid, unsigned int size) +{ + ia_css_buffer_t b; + + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + + b->css_address = shared_memory_map(sid, mid, b->mem); + b->size = size; + return b; +} + + +void +ia_css_buffer_free(vied_subsystem_t sid, vied_memory_t mid, ia_css_buffer_t b) +{ + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_buffer.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_buffer.h new file mode 100644 index 0000000000000..0f99a06e9a89b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_buffer.h @@ -0,0 +1,58 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_BUFFER_H +#define __IA_CSS_BUFFER_H + +/* workaround: needed because uses size_t */ +#include "type_support.h" +#include "vied/shared_memory_map.h" + +typedef enum { + buffer_unmapped, /* buffer is not accessible by cpu, nor css */ + buffer_write, /* output buffer: css has write access */ + /* input buffer: cpu has write access */ + buffer_read, /* input buffer: css has read access */ + /* output buffer: cpu has read access */ + buffer_cpu, /* shared buffer: cpu has read/write access */ + buffer_css /* shared buffer: css has read/write access */ +} buffer_state; + +struct ia_css_buffer_s { + /* number of bytes bytes allocated */ + unsigned int size; + /* allocated virtual memory object */ + host_virtual_address_t mem; + /* virtual address to be used on css/firmware */ + vied_virtual_address_t css_address; + /* virtual address to be used on cpu/host */ + void *cpu_address; + buffer_state state; +}; + +typedef struct ia_css_buffer_s *ia_css_buffer_t; + +ia_css_buffer_t +ia_css_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_buffer_t b); + +#endif /* __IA_CSS_BUFFER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_input_buffer.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_input_buffer.c new file mode 100644 index 0000000000000..2a128795d03e2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_input_buffer.c @@ -0,0 +1,184 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include "ia_css_input_buffer_cpu.h" +#include "ia_css_buffer.h" +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + + +ia_css_input_buffer +ia_css_input_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size) +{ + ia_css_input_buffer b; + + /* allocate buffer container */ + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + +#ifndef HRT_HW + /* initialize the buffer to avoid warnings when copying */ + shared_memory_zero(mid, b->mem, size); + + /* in simulation, we need to allocate a shadow host buffer */ + b->cpu_address = ia_css_cpu_mem_alloc_page_aligned(size); + if (b->cpu_address == NULL) { + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); + return NULL; + } +#else + /* on hw / real platform we can use the pointer from + * shared memory alloc + */ + b->cpu_address = (void *)HOST_ADDRESS(b->mem); +#endif + + b->css_address = shared_memory_map(sid, mid, b->mem); + + b->size = size; + b->state = buffer_unmapped; + + return b; +} + + +void +ia_css_input_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_input_buffer b) +{ + if (b == NULL) + return; + if (b->state != buffer_unmapped) + return; + +#ifndef HRT_HW + /* only free if we actually allocated it separately */ + ia_css_cpu_mem_free(b->cpu_address); +#endif + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} + + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_map(ia_css_input_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map input buffer to CPU address space, acquire write access */ + b->state = buffer_write; + + /* return pre-mapped buffer */ + return b->cpu_address; +} + + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_unmap(ia_css_input_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_write) + return NULL; + + /* unmap input buffer from CPU address space, release write access */ + b->state = buffer_unmapped; + + /* return pre-mapped buffer */ + return b->cpu_address; +} + + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map(vied_memory_t mid, ia_css_input_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map input buffer to CSS address space, acquire read access */ + b->state = buffer_read; + + /* now flush the cache */ + ia_css_cpu_mem_cache_flush(b->cpu_address, b->size); +#ifndef HRT_HW + /* only copy in case of simulation, otherwise it should just work */ + /* copy data from CPU address space to CSS address space */ + shared_memory_store(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return (ia_css_input_buffer_css_address)b->css_address; +} + + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map_no_invalidate(vied_memory_t mid, ia_css_input_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map input buffer to CSS address space, acquire read access */ + b->state = buffer_read; + +#ifndef HRT_HW + /* only copy in case of simulation, otherwise it should just work */ + /* copy data from CPU address space to CSS address space */ + shared_memory_store(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return (ia_css_input_buffer_css_address)b->css_address; +} + + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_unmap(ia_css_input_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_read) + return 0; + + /* unmap input buffer from CSS address space, release read access */ + b->state = buffer_unmapped; + + /* input buffer only, no need to invalidate cache */ + + return (ia_css_input_buffer_css_address)b->css_address; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_output_buffer.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_output_buffer.c new file mode 100644 index 0000000000000..30bc8d52a5a9e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_output_buffer.c @@ -0,0 +1,181 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include "ia_css_output_buffer_cpu.h" +#include "ia_css_buffer.h" +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + + +ia_css_output_buffer +ia_css_output_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size) +{ + ia_css_output_buffer b; + + /* allocate buffer container */ + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + +#ifndef HRT_HW + /* initialize the buffer to avoid warnings when copying */ + shared_memory_zero(mid, b->mem, size); + + /* in simulation, we need to allocate a shadow host buffer */ + b->cpu_address = ia_css_cpu_mem_alloc_page_aligned(size); + if (b->cpu_address == NULL) { + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); + return NULL; + } +#else + /* on hw / real platform we can use the pointer from + * shared memory alloc + */ + b->cpu_address = (void *)HOST_ADDRESS(b->mem); +#endif + + b->css_address = shared_memory_map(sid, mid, b->mem); + + b->size = size; + b->state = buffer_unmapped; + + return b; +} + + +void +ia_css_output_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_output_buffer b) +{ + if (b == NULL) + return; + if (b->state != buffer_unmapped) + return; + +#ifndef HRT_HW + /* only free if we actually allocated it separately */ + ia_css_cpu_mem_free(b->cpu_address); +#endif + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} + + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_map(ia_css_output_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map output buffer to CSS address space, acquire write access */ + b->state = buffer_write; + + return (ia_css_output_buffer_css_address)b->css_address; +} + + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_unmap(ia_css_output_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_write) + return 0; + + /* unmap output buffer from CSS address space, release write access */ + b->state = buffer_unmapped; + + return (ia_css_output_buffer_css_address)b->css_address; +} + + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map(vied_memory_t mid, ia_css_output_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map output buffer to CPU address space, acquire read access */ + b->state = buffer_read; + +#ifndef HRT_HW + /* only in simulation */ + /* copy data from CSS address space to CPU address space */ + shared_memory_load(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + /* now invalidate the cache */ + ia_css_cpu_mem_cache_invalidate(b->cpu_address, b->size); + + return b->cpu_address; +} + + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map_no_invalidate(vied_memory_t mid, ia_css_output_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map output buffer to CPU address space, acquire read access */ + b->state = buffer_read; + +#ifndef HRT_HW + /* only in simulation */ + /* copy data from CSS address space to CPU address space */ + shared_memory_load(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return b->cpu_address; +} + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_unmap(ia_css_output_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_read) + return NULL; + + /* unmap output buffer from CPU address space, release read access */ + b->state = buffer_unmapped; + + /* output only, no need to flush cache */ + + return b->cpu_address; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_shared_buffer.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_shared_buffer.c new file mode 100644 index 0000000000000..92b7110644fe3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_shared_buffer.c @@ -0,0 +1,187 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include "ia_css_shared_buffer_cpu.h" +#include "ia_css_buffer.h" +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + + +ia_css_shared_buffer +ia_css_shared_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size) +{ + ia_css_shared_buffer b; + + /* allocate buffer container */ + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + +#ifndef HRT_HW + /* initialize the buffer to avoid warnings when copying */ + shared_memory_zero(mid, b->mem, size); + + /* in simulation, we need to allocate a shadow host buffer */ + b->cpu_address = ia_css_cpu_mem_alloc_page_aligned(size); + if (b->cpu_address == NULL) { + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); + return NULL; + } +#else + /* on hw / real platform we can use the pointer from + * shared memory alloc + */ + b->cpu_address = (void *)HOST_ADDRESS(b->mem); +#endif + + b->css_address = shared_memory_map(sid, mid, b->mem); + + b->size = size; + b->state = buffer_unmapped; + + return b; +} + + +void +ia_css_shared_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_shared_buffer b) +{ + if (b == NULL) + return; + if (b->state != buffer_unmapped) + return; + +#ifndef HRT_HW + /* only free if we actually allocated it separately */ + ia_css_cpu_mem_free(b->cpu_address); +#endif + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} + + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_map(ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map shared buffer to CPU address space */ + b->state = buffer_cpu; + + return b->cpu_address; +} + + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_unmap(ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_cpu) + return NULL; + + /* unmap shared buffer from CPU address space */ + b->state = buffer_unmapped; + + return b->cpu_address; +} + + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_map(ia_css_shared_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map shared buffer to CSS address space */ + b->state = buffer_css; + + return (ia_css_shared_buffer_css_address)b->css_address; +} + + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_unmap(ia_css_shared_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_css) + return 0; + + /* unmap shared buffer from CSS address space */ + b->state = buffer_unmapped; + + return (ia_css_shared_buffer_css_address)b->css_address; +} + + +ia_css_shared_buffer +ia_css_shared_buffer_css_update(vied_memory_t mid, ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + + /* flush the buffer to CSS after it was modified by the CPU */ + /* flush cache to ddr */ + ia_css_cpu_mem_cache_flush(b->cpu_address, b->size); +#ifndef HRT_HW + /* copy data from CPU address space to CSS address space */ + shared_memory_store(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return b; +} + + +ia_css_shared_buffer +ia_css_shared_buffer_cpu_update(vied_memory_t mid, ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + + /* flush the buffer to the CPU after it has been modified by CSS */ +#ifndef HRT_HW + /* copy data from CSS address space to CPU address space */ + shared_memory_load(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + /* flush cache to ddr */ + ia_css_cpu_mem_cache_invalidate(b->cpu_address, b->size); + + return b; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/cell/cell.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/cell/cell.mk new file mode 100644 index 0000000000000..fa5e650226017 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/cell/cell.mk @@ -0,0 +1,43 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +ifndef _CELL_MK_ +_CELL_MK_ = 1 + + +CELL_DIR=$${MODULES_DIR}/cell +CELL_INTERFACE=$(CELL_DIR)/interface +CELL_SOURCES=$(CELL_DIR)/src + +CELL_HOST_FILES = +CELL_FW_FILES = + +CELL_HOST_CPPFLAGS = \ + -I$(CELL_INTERFACE) \ + -I$(CELL_SOURCES) + +CELL_FW_CPPFLAGS = \ + -I$(CELL_INTERFACE) \ + -I$(CELL_SOURCES) + +ifdef 0 +# Disabled until it is decided to go this way or not +include $(MODULES_DIR)/device_access/device_access.mk +CELL_HOST_FILES += $(DEVICE_ACCESS_HOST_FILES) +CELL_FW_FILES += $(DEVICE_ACCESS_FW_FILES) +CELL_HOST_CPPFLAGS += $(DEVICE_ACCESS_HOST_CPPFLAGS) +CELL_FW_CPPFLAGS += $(DEVICE_ACCESS_FW_CPPFLAGS) +endif + +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/cell/interface/ia_css_cell.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/cell/interface/ia_css_cell.h new file mode 100644 index 0000000000000..3fac3c791b6e6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/cell/interface/ia_css_cell.h @@ -0,0 +1,112 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_H +#define __IA_CSS_CELL_H + +#include "storage_class.h" +#include "type_support.h" + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_stat_ctrl(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_stat_ctrl(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_pc(unsigned int ssid, unsigned int cell_id, + unsigned int pc); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_icache_base_address(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +#if 0 /* To be implemented after completing cell device properties */ +STORAGE_CLASS_INLINE void +ia_css_cell_set_icache_info_bits(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_debug_pc(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_stall_bits(unsigned int ssid, unsigned int cell_id); +#endif + +/* configure master ports */ + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_base_address(unsigned int ssid, unsigned int cell_id, + unsigned int master, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_base_address(unsigned int ssid, + unsigned int cell_id, + unsigned int master, unsigned int segment, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_bits(unsigned int ssid, unsigned int cell_id, + unsigned int master, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_bits(unsigned int ssid, + unsigned int cell_id, + unsigned int master, unsigned int segment, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_override_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_override_bits(unsigned int ssid, + unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value); + +/* Access memories */ + +STORAGE_CLASS_INLINE void +ia_css_cell_mem_store_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr, unsigned int value); + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_mem_load_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr); + +/***********************************************************************/ + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_is_ready(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_bit(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_run_bit(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_start(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_start_prefetch(unsigned int ssid, unsigned int cell_id, + bool prefetch); + +STORAGE_CLASS_INLINE void +ia_css_cell_wait(unsigned int ssid, unsigned int cell_id); + +/* include inline implementation */ +#include "ia_css_cell_impl.h" + +#endif /* __IA_CSS_CELL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/cell/src/ia_css_cell_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/cell/src/ia_css_cell_impl.h new file mode 100644 index 0000000000000..60b2e234da1a0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/cell/src/ia_css_cell_impl.h @@ -0,0 +1,272 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_IMPL_H +#define __IA_CSS_CELL_IMPL_H + +#include "ia_css_cell.h" + +#include "ia_css_cmem.h" +#include "ipu_device_cell_properties.h" +#include "storage_class.h" +#include "assert_support.h" +#include "platform_support.h" +#include "misc_support.h" + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_regs_addr(unsigned int cell_id) +{ + /* mem_id 0 is for registers */ + return ipu_device_cell_memory_address(cell_id, 0); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_dmem_addr(unsigned int cell_id) +{ + /* mem_id 1 is for DMEM */ + return ipu_device_cell_memory_address(cell_id, 1); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_mem_store_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr, unsigned int value) +{ + ia_css_cmem_store_32( + ssid, ipu_device_cell_memory_address( + cell_id, mem_id) + addr, value); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_mem_load_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr) +{ + return ia_css_cmem_load_32( + ssid, ipu_device_cell_memory_address(cell_id, mem_id) + addr); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_stat_ctrl(unsigned int ssid, unsigned int cell_id) +{ + return ia_css_cmem_load_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_STAT_CTRL_REG_ADDRESS); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_stat_ctrl(unsigned int ssid, unsigned int cell_id, + unsigned int value) +{ + ia_css_cmem_store_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_STAT_CTRL_REG_ADDRESS, value); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_is_ready(unsigned int ssid, unsigned int cell_id) +{ + unsigned int reg; + + reg = ia_css_cell_get_stat_ctrl(ssid, cell_id); + /* READY must be 1, START must be 0 */ + return (reg & (1 << IPU_DEVICE_CELL_STAT_CTRL_READY_BIT)) && + ((~reg) & (1 << IPU_DEVICE_CELL_STAT_CTRL_START_BIT)); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_pc(unsigned int ssid, unsigned int cell_id, + unsigned int pc) +{ + /* set start PC */ + ia_css_cmem_store_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_START_PC_REG_ADDRESS, pc); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_bit(unsigned int ssid, unsigned int cell_id) +{ + unsigned int reg; + + reg = 1 << IPU_DEVICE_CELL_STAT_CTRL_START_BIT; + ia_css_cell_set_stat_ctrl(ssid, cell_id, reg); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_run_bit(unsigned int ssid, unsigned int cell_id, + unsigned int value) +{ + unsigned int reg; + + reg = value << IPU_DEVICE_CELL_STAT_CTRL_RUN_BIT; + ia_css_cell_set_stat_ctrl(ssid, cell_id, reg); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_start(unsigned int ssid, unsigned int cell_id) +{ + ia_css_cell_start_prefetch(ssid, cell_id, 0); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_start_prefetch(unsigned int ssid, unsigned int cell_id, + bool prefetch) +{ + unsigned int reg = 0; + + /* Set run bit and start bit */ + reg |= (1 << IPU_DEVICE_CELL_STAT_CTRL_START_BIT); + reg |= (1 << IPU_DEVICE_CELL_STAT_CTRL_RUN_BIT); + /* Invalidate the icache */ + reg |= (1 << IPU_DEVICE_CELL_STAT_CTRL_INVALIDATE_ICACHE_BIT); + /* Optionally enable prefetching */ + reg |= ((prefetch == 1) ? + (1 << IPU_DEVICE_CELL_STAT_CTRL_ICACHE_ENABLE_PREFETCH_BIT) : + 0); + + /* store into register */ + ia_css_cell_set_stat_ctrl(ssid, cell_id, reg); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_wait(unsigned int ssid, unsigned int cell_id) +{ + do { + ia_css_sleep(); + } while (!ia_css_cell_is_ready(ssid, cell_id)); +}; + +STORAGE_CLASS_INLINE void +ia_css_cell_set_icache_base_address(unsigned int ssid, unsigned int cell_id, + unsigned int value) +{ + ia_css_cmem_store_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_ICACHE_BASE_REG_ADDRESS, value); +} + +/* master port configuration */ + + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value) +{ + unsigned int addr; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + assert(segment < ipu_device_cell_master_num_segments(cell, master)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_reg(cell, master); + addr += segment * ipu_device_cell_master_stride(cell, master); + ia_css_cmem_store_32(ssid, addr, value); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_override_bits(unsigned int ssid, + unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value) +{ + unsigned int addr; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + assert(segment < ipu_device_cell_master_num_segments(cell, master)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_override_reg(cell, master); + addr += segment * ipu_device_cell_master_stride(cell, master); + ia_css_cmem_store_32(ssid, addr, value); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_base_address(unsigned int ssid, + unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value) + +{ + unsigned int addr; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + assert(segment < ipu_device_cell_master_num_segments(cell, master)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_base_reg(cell, master); + addr += segment * ipu_device_cell_master_stride(cell, master); + ia_css_cmem_store_32(ssid, addr, value); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value) +{ + unsigned int addr, s, stride, num_segments; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_reg(cell, master); + stride = ipu_device_cell_master_stride(cell, master); + num_segments = ipu_device_cell_master_num_segments(cell, master); + for (s = 0; s < num_segments; s++) { + ia_css_cmem_store_32(ssid, addr, value); + addr += stride; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_override_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value) +{ + unsigned int addr, s, stride, num_segments; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_override_reg(cell, master); + stride = ipu_device_cell_master_stride(cell, master); + num_segments = ipu_device_cell_master_num_segments(cell, master); + for (s = 0; s < num_segments; s++) { + ia_css_cmem_store_32(ssid, addr, value); + addr += stride; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_base_address(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value) +{ + unsigned int addr, s, stride, num_segments, segment_size; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_base_reg(cell, master); + stride = ipu_device_cell_master_stride(cell, master); + num_segments = ipu_device_cell_master_num_segments(cell, master); + segment_size = ipu_device_cell_master_segment_size(cell, master); + + for (s = 0; s < num_segments; s++) { + ia_css_cmem_store_32(ssid, addr, value); + addr += stride; + value += segment_size; + } +} + +#endif /* __IA_CSS_CELL_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/config/isys/subsystem_bxtB0.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/config/isys/subsystem_bxtB0.mk new file mode 100644 index 0000000000000..da142032349f1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/config/isys/subsystem_bxtB0.mk @@ -0,0 +1,60 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# + +############################################################################ +# This file is used to specify versions and properties of ISYS firmware +# components. Please note that these are subsystem specific. System specific +# properties should go to system_$IPU_SYSVER.mk. Also the device versions +# should be defined under "devices" or should be taken from the SDK. +############################################################################ + +############################################################################ +# FIRMWARE RELATED VARIABLES +############################################################################ + +# Activate loading params and storing stats DDR<->REGs with DMA +ISYS_USE_ISA_DMA = 1 +# Used in ISA module +ISYS_ISL_DPC_DPC_V2 = 0 + +# Specification for Isys server's fixed globals' locations +REGMEM_OFFSET = 0 # Starting from 0 +REGMEM_SIZE = 34 +REGMEM_WORD_BYTES = 4 +FW_LOAD_NO_OF_REQUEST_OFFSET = 136 # Taken from REGMEM_OFFSET + REGMEM_SIZE_BYTES +FW_LOAD_NO_OF_REQUEST_SIZE_BYTES = 4 + +# Workarounds: + +# This WA is not to pipeline store frame commands for SID processors that control a Str2Vec (ISA output) +WA_HSD1304553438 = 1 + +# Larger than specified frames that complete mid-line +WA_HSD1209062354 = 1 + +# WA to disable clock gating for the devices in the CSI receivers needed for using the mipi_pkt_gen device +WA_HSD1805168877 = 0 + +# Support IBUF soft-reset at stream start +SOFT_RESET_IBUF_STREAM_START_SUPPORT = 1 + +############################################################################ +# TESTING RELATED VARIABLES +############################################################################ + +# TODO: This define should be entirely removed. +# Used in mipi_capture +ISYS_DISABLE_VERIFY_RECEIVED_SOF_EOF = 0 + +ISYS_ACCESS_BLOCKER_VERSION = v1 diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/config/system_bxtB0.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/config/system_bxtB0.mk new file mode 100644 index 0000000000000..24d079b405167 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/config/system_bxtB0.mk @@ -0,0 +1,88 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# + +LOGICAL_FW_INPUT_SYSTEM = input_system_system +LOGICAL_FW_PROCESSING_SYSTEM = processing_system_system +LOGICAL_FW_IPU_SYSTEM = css_broxton_system +LOGICAL_FW_ISP_SYSTEM = isp2601_default_system +SP_CONTROL_CELL = sp2601_control +SP_PROXY_CELL = sp2601_proxy +SP_FP_CELL = sp2601_fp +ISP_CELL = isp2601 +# The non-capital define isp2601 is used in the sdk, in order to distinguish +# between different isp versions the ISP_CELL_IDENTIFIER define is added. +ISP_CELL_IDENTIFIER = ISP2601 +HAS_IPFD = 1 +HAS_S2M_IN_ISYS_ISL_NONSOC_PATH = 0 +HAS_S2V_IN_ISYS_ISL_NONSOC_PATH = 1 +# ISL-IS non-SoC path has ISA without PAF and DPC-Pext support for IPU4-B0 +HAS_ISA_IN_ISYS_ISL = 1 +HAS_PAF_IN_ISYS_ISL = 0 +HAS_DPC_PEXT_IN_ISYS_ISL = 0 +HAS_PMA_IF = 0 + +HAS_MIPIBE_IN_PSYS_ISL = 1 + +HAS_VPLESS_SUPPORT = 0 + +DLI_SYSTEM = hive_isp_css_2600_system +RESOURCE_MANAGER_VERSION = v1 +MEM_RESOURCE_VALIDATION_ERROR = 0 +OFS_SCALER_1_4K_TILEY_422_SUPPORT= 1 +PROGDESC_ACC_SYMBOLS_VERSION = v1 +DEVPROXY_INTERFACE_VERSION = v1 +FW_ABI_IPU_TYPES_VERSION = v1 + +HAS_ONLINE_MODE_SUPPORT_IN_ISYS_PSYS = 0 + +MMU_INTERFACE_VERSION = v1 +DEVICE_ACCESS_VERSION = v2 +PSYS_SERVER_VERSION = v2 +PSYS_SERVER_LOADER_VERSION = v1 +PSYS_HW_VERSION = BXT_B0_HW + +# Enable FW_DMA for loading firmware +PSYS_SERVER_ENABLE_FW_LOAD_DMA = 1 + +NCI_SPA_VERSION = v1 +MANIFEST_TOOL_VERSION = v2 +PSYS_CON_MGR_TOOL_VERSION = v1 +# TODO: Should be removed after performance issues OTF are solved +PSYS_PROC_MGR_VERSION = v1 +IPU_RESOURCES_VERSION = v1 + +HAS_ACC_CLUSTER_PAF_PAL = 0 +HAS_ACC_CLUSTER_PEXT_PAL = 0 +HAS_ACC_CLUSTER_GBL_PAL = 1 + +# TODO use version naming scheme "v#" to decouple +# IPU_SYSVER from version. +PARAMBINTOOL_ISA_INIT_VERSION = bxtB0 + +# Select EQC2EQ version +# Version 1: uniform address space, equal EQ addresses regardless of EQC device +# Version 2: multiple addresses per EQ, depending on location of EQC device +EQC2EQ_VERSION = v1 + +# Select DMA instance for fw_load +FW_LOAD_DMA_INSTANCE = NCI_DMA_FW + +HAS_DMA_FW = 1 + +HAS_SIS = 0 +HAS_IDS = 1 + +PSYS_SERVER_ENABLE_TPROXY = 1 +PSYS_SERVER_ENABLE_DEVPROXY = 1 +NCI_OFS_VERSION = v1 diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/device_access.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/device_access.mk new file mode 100644 index 0000000000000..1629d9af803b6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/device_access.mk @@ -0,0 +1,40 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# + +ifndef _DEVICE_ACCESS_MK_ +_DEVICE_ACCESS_MK_ = 1 + +# DEVICE_ACCESS_VERSION= +include $(MODULES_DIR)/config/system_$(IPU_SYSVER).mk + +DEVICE_ACCESS_DIR=$${MODULES_DIR}/device_access +DEVICE_ACCESS_INTERFACE=$(DEVICE_ACCESS_DIR)/interface +DEVICE_ACCESS_SOURCES=$(DEVICE_ACCESS_DIR)/src + +DEVICE_ACCESS_HOST_FILES = + +DEVICE_ACCESS_FW_FILES = + +DEVICE_ACCESS_HOST_CPPFLAGS = \ + -I$(DEVICE_ACCESS_INTERFACE) \ + -I$(DEVICE_ACCESS_SOURCES) + +DEVICE_ACCESS_FW_CPPFLAGS = \ + -I$(DEVICE_ACCESS_INTERFACE) \ + -I$(DEVICE_ACCESS_SOURCES) + +DEVICE_ACCESS_FW_CPPFLAGS += \ + -I$(DEVICE_ACCESS_SOURCES)/$(DEVICE_ACCESS_VERSION) +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/interface/ia_css_cmem.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/interface/ia_css_cmem.h new file mode 100644 index 0000000000000..3dc47c29fcab7 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/interface/ia_css_cmem.h @@ -0,0 +1,58 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CMEM_H +#define __IA_CSS_CMEM_H + +#include "type_support.h" +#include "storage_class.h" + +#ifdef __VIED_CELL +typedef unsigned int ia_css_cmem_address_t; +#else +#include +typedef vied_subsystem_address_t ia_css_cmem_address_t; +#endif + +STORAGE_CLASS_INLINE uint32_t +ia_css_cmem_load_32(unsigned int ssid, ia_css_cmem_address_t address); + +STORAGE_CLASS_INLINE void +ia_css_cmem_store_32(unsigned int ssid, ia_css_cmem_address_t address, + uint32_t value); + +STORAGE_CLASS_INLINE void +ia_css_cmem_load(unsigned int ssid, ia_css_cmem_address_t address, void *data, + unsigned int size); + +STORAGE_CLASS_INLINE void +ia_css_cmem_store(unsigned int ssid, ia_css_cmem_address_t address, + const void *data, unsigned int size); + +STORAGE_CLASS_INLINE void +ia_css_cmem_zero(unsigned int ssid, ia_css_cmem_address_t address, + unsigned int size); + +STORAGE_CLASS_INLINE ia_css_cmem_address_t +ia_css_cmem_get_cmem_addr_from_dmem(unsigned int base_addr, void *p); + +/* Include inline implementation */ + +#ifdef __VIED_CELL +#include "ia_css_cmem_cell.h" +#else +#include "ia_css_cmem_host.h" +#endif + +#endif /* __IA_CSS_CMEM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/interface/ia_css_xmem.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/interface/ia_css_xmem.h new file mode 100644 index 0000000000000..de2b94d8af541 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/interface/ia_css_xmem.h @@ -0,0 +1,65 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_H +#define __IA_CSS_XMEM_H + +#include "type_support.h" +#include "storage_class.h" + +#ifdef __VIED_CELL +typedef unsigned int ia_css_xmem_address_t; +#else +#include +typedef host_virtual_address_t ia_css_xmem_address_t; +#endif + +STORAGE_CLASS_INLINE uint8_t +ia_css_xmem_load_8(unsigned int mmid, ia_css_xmem_address_t address); + +STORAGE_CLASS_INLINE uint16_t +ia_css_xmem_load_16(unsigned int mmid, ia_css_xmem_address_t address); + +STORAGE_CLASS_INLINE uint32_t +ia_css_xmem_load_32(unsigned int mmid, ia_css_xmem_address_t address); + +STORAGE_CLASS_INLINE void +ia_css_xmem_load(unsigned int mmid, ia_css_xmem_address_t address, void *data, + unsigned int size); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_8(unsigned int mmid, ia_css_xmem_address_t address, + uint8_t value); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_16(unsigned int mmid, ia_css_xmem_address_t address, + uint16_t value); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_32(unsigned int mmid, ia_css_xmem_address_t address, + uint32_t value); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store(unsigned int mmid, ia_css_xmem_address_t address, + const void *data, unsigned int bytes); + +/* Include inline implementation */ + +#ifdef __VIED_CELL +#include "ia_css_xmem_cell.h" +#else +#include "ia_css_xmem_host.h" +#endif + +#endif /* __IA_CSS_XMEM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/interface/ia_css_xmem_cmem.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/interface/ia_css_xmem_cmem.h new file mode 100644 index 0000000000000..57aab3323c739 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/interface/ia_css_xmem_cmem.h @@ -0,0 +1,35 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_CMEM_H +#define __IA_CSS_XMEM_CMEM_H + +#include "ia_css_cmem.h" +#include "ia_css_xmem.h" + +/* Copy data from xmem to cmem, e.g., from a program in DDR to a cell's DMEM */ +/* This may also be implemented using DMA */ + +STORAGE_CLASS_INLINE void +ia_css_xmem_to_cmem_copy( + unsigned int mmid, + unsigned int ssid, + ia_css_xmem_address_t src, + ia_css_cmem_address_t dst, + unsigned int size); + +/* include inline implementation */ +#include "ia_css_xmem_cmem_impl.h" + +#endif /* __IA_CSS_XMEM_CMEM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/src/ia_css_cmem_host.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/src/ia_css_cmem_host.h new file mode 100644 index 0000000000000..22799e67214c1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/src/ia_css_cmem_host.h @@ -0,0 +1,121 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CMEM_HOST_H +#define __IA_CSS_CMEM_HOST_H + +/* This file is an inline implementation for the interface ia_css_cmem.h + * and should only be included there. */ + +#include "assert_support.h" +#include "misc_support.h" + +STORAGE_CLASS_INLINE uint32_t +ia_css_cmem_load_32(unsigned int ssid, ia_css_cmem_address_t address) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + return vied_subsystem_load_32(ssid, address); +} + +STORAGE_CLASS_INLINE uint32_t +ia_css_cond_cmem_load_32(bool cond, unsigned int ssid, + ia_css_cmem_address_t address) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + if (cond) + return vied_subsystem_load_32(ssid, address); + else + return 0; +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_store_32(unsigned int ssid, ia_css_cmem_address_t address, + uint32_t data) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + vied_subsystem_store_32(ssid, address, data); +} + +STORAGE_CLASS_INLINE void +ia_css_cond_cmem_store_32(bool cond, unsigned int ssid, + ia_css_cmem_address_t address, uint32_t data) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + if (cond) + vied_subsystem_store_32(ssid, address, data); +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_load(unsigned int ssid, ia_css_cmem_address_t address, void *data, + unsigned int size) +{ + uint32_t *data32 = (uint32_t *)data; + uint32_t end = address + size; + + assert(size % 4 == 0); + assert(address % 4 == 0); + assert((long)data % 4 == 0); + + while (address != end) { + *data32 = ia_css_cmem_load_32(ssid, address); + address += 4; + data32 += 1; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_store(unsigned int ssid, ia_css_cmem_address_t address, + const void *data, unsigned int size) +{ + uint32_t *data32 = (uint32_t *)data; + uint32_t end = address + size; + + assert(size % 4 == 0); + assert(address % 4 == 0); + assert((long)data % 4 == 0); + + while (address != end) { + ia_css_cmem_store_32(ssid, address, *data32); + address += 4; + data32 += 1; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_zero(unsigned int ssid, ia_css_cmem_address_t address, + unsigned int size) +{ + uint32_t end = address + size; + + assert(size % 4 == 0); + assert(address % 4 == 0); + + while (address != end) { + ia_css_cmem_store_32(ssid, address, 0); + address += 4; + } +} + +STORAGE_CLASS_INLINE ia_css_cmem_address_t +ia_css_cmem_get_cmem_addr_from_dmem(unsigned int base_addr, void *p) +{ + NOT_USED(base_addr); + return (ia_css_cmem_address_t)(uintptr_t)p; +} + +#endif /* __IA_CSS_CMEM_HOST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/src/ia_css_xmem_cmem_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/src/ia_css_xmem_cmem_impl.h new file mode 100644 index 0000000000000..adc178b75059a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/src/ia_css_xmem_cmem_impl.h @@ -0,0 +1,79 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_CMEM_IMPL_H +#define __IA_CSS_XMEM_CMEM_IMPL_H + +#include "ia_css_xmem_cmem.h" + +#include "ia_css_cmem.h" +#include "ia_css_xmem.h" + +/* Copy data from xmem to cmem, e.g., from a program in DDR to a cell's DMEM */ +/* This may also be implemented using DMA */ + +STORAGE_CLASS_INLINE void +ia_css_xmem_to_cmem_copy( + unsigned int mmid, + unsigned int ssid, + ia_css_xmem_address_t src, + ia_css_cmem_address_t dst, + unsigned int size) +{ + /* copy from ddr to subsystem, e.g., cell dmem */ + ia_css_cmem_address_t end = dst + size; + + assert(size % 4 == 0); + assert((uintptr_t) dst % 4 == 0); + assert((uintptr_t) src % 4 == 0); + + while (dst != end) { + uint32_t data; + + data = ia_css_xmem_load_32(mmid, src); + ia_css_cmem_store_32(ssid, dst, data); + dst += 4; + src += 4; + } +} + +/* Copy data from cmem to xmem */ + +STORAGE_CLASS_INLINE void +ia_css_cmem_to_xmem_copy( + unsigned int mmid, + unsigned int ssid, + ia_css_cmem_address_t src, + ia_css_xmem_address_t dst, + unsigned int size) +{ + /* copy from ddr to subsystem, e.g., cell dmem */ + ia_css_xmem_address_t end = dst + size; + + assert(size % 4 == 0); + assert((uintptr_t) dst % 4 == 0); + assert((uintptr_t) src % 4 == 0); + + while (dst != end) { + uint32_t data; + + data = ia_css_cmem_load_32(mmid, src); + ia_css_xmem_store_32(ssid, dst, data); + dst += 4; + src += 4; + } +} + + +#endif /* __IA_CSS_XMEM_CMEM_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/src/ia_css_xmem_host.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/src/ia_css_xmem_host.h new file mode 100644 index 0000000000000..d94991fc11143 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/src/ia_css_xmem_host.h @@ -0,0 +1,84 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_HOST_H +#define __IA_CSS_XMEM_HOST_H + +#include "ia_css_xmem.h" +#include +#include "assert_support.h" +#include + +STORAGE_CLASS_INLINE uint8_t +ia_css_xmem_load_8(unsigned int mmid, ia_css_xmem_address_t address) +{ + return shared_memory_load_8(mmid, address); +} + +STORAGE_CLASS_INLINE uint16_t +ia_css_xmem_load_16(unsigned int mmid, ia_css_xmem_address_t address) +{ + /* Address has to be half-word aligned */ + assert(0 == (uintptr_t) address % 2); + return shared_memory_load_16(mmid, address); +} + +STORAGE_CLASS_INLINE uint32_t +ia_css_xmem_load_32(unsigned int mmid, ia_css_xmem_address_t address) +{ + /* Address has to be word aligned */ + assert(0 == (uintptr_t) address % 4); + return shared_memory_load_32(mmid, address); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_load(unsigned int mmid, ia_css_xmem_address_t address, void *data, + unsigned int size) +{ + shared_memory_load(mmid, address, data, size); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_8(unsigned int mmid, ia_css_xmem_address_t address, + uint8_t value) +{ + shared_memory_store_8(mmid, address, value); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_16(unsigned int mmid, ia_css_xmem_address_t address, + uint16_t value) +{ + /* Address has to be half-word aligned */ + assert(0 == (uintptr_t) address % 2); + shared_memory_store_16(mmid, address, value); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_32(unsigned int mmid, ia_css_xmem_address_t address, + uint32_t value) +{ + /* Address has to be word aligned */ + assert(0 == (uintptr_t) address % 4); + shared_memory_store_32(mmid, address, value); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store(unsigned int mmid, ia_css_xmem_address_t address, + const void *data, unsigned int bytes) +{ + shared_memory_store(mmid, address, data, bytes); +} + +#endif /* __IA_CSS_XMEM_HOST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/bxtB0/ipu_device_buttress_properties_struct.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/bxtB0/ipu_device_buttress_properties_struct.h new file mode 100644 index 0000000000000..5102f6e44d2f6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/bxtB0/ipu_device_buttress_properties_struct.h @@ -0,0 +1,68 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_BUTTRESS_PROPERTIES_STRUCT_H +#define __IPU_DEVICE_BUTTRESS_PROPERTIES_STRUCT_H + +/* Destination values for master port 0 and bitfield "request_dest" */ +enum cio_M0_btrs_dest { + DEST_IS_BUT_REGS = 0, + DEST_IS_DDR, + RESERVED, + DEST_IS_SUBSYSTEM, + N_BTRS_DEST +}; + +/* Bit-field positions for M0 info bits */ +enum ia_css_info_bits_m0_pos { + IA_CSS_INFO_BITS_M0_SNOOPABLE_POS = 0, + IA_CSS_INFO_BITS_M0_IMR_DESTINED_POS = 1, + IA_CSS_INFO_BITS_M0_REQUEST_DEST_POS = 4 +}; + +#define IA_CSS_INFO_BITS_M0_DDR \ + (DEST_IS_DDR << IA_CSS_INFO_BITS_M0_REQUEST_DEST_POS) +#define IA_CSS_INFO_BITS_M0_SNOOPABLE (1 << IA_CSS_INFO_BITS_M0_SNOOPABLE_POS) + +/* Info bits as expected by the buttress */ +/* Deprecated because bit fields are not portable */ + +/* For master port 0*/ +union cio_M0_t { + struct { + unsigned int snoopable : 1; + unsigned int imr_destined : 1; + unsigned int spare0 : 2; + unsigned int request_dest : 2; + unsigned int spare1 : 26; + } as_bitfield; + unsigned int as_word; +}; + +/* For master port 1*/ +union cio_M1_t { + struct { + unsigned int spare0 : 1; + unsigned int deadline_pointer : 1; + unsigned int reserved : 1; + unsigned int zlw : 1; + unsigned int stream_id : 4; + unsigned int address_swizzling : 1; + unsigned int spare1 : 23; + } as_bitfield; + unsigned int as_word; +}; + + +#endif /* __IPU_DEVICE_BUTTRESS_PROPERTIES_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/ipu_device_cell_properties.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/ipu_device_cell_properties.h new file mode 100644 index 0000000000000..e6e1e9dcbe80c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/ipu_device_cell_properties.h @@ -0,0 +1,76 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_H +#define __IPU_DEVICE_CELL_PROPERTIES_H + +#include "storage_class.h" +#include "ipu_device_cell_type_properties.h" + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_devices(void); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_memories(const unsigned int cell_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_size(const unsigned int cell_id, + const unsigned int mem_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_address(const unsigned int cell_id, + const unsigned int mem_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_databus_memory_address(const unsigned int cell_id, + const unsigned int mem_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_masters(const unsigned int cell_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_bits(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_num_segments(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_size(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_stride(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_base_reg(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_info_reg(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_icache_align(unsigned int cell_id); + +#ifdef C_RUN +STORAGE_CLASS_INLINE int +ipu_device_cell_id_crun(int cell_id); +#endif + +#include "ipu_device_cell_properties_func.h" + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/ipu_device_cell_properties_func.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/ipu_device_cell_properties_func.h new file mode 100644 index 0000000000000..481b0504a2378 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/ipu_device_cell_properties_func.h @@ -0,0 +1,164 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_FUNC_H +#define __IPU_DEVICE_CELL_PROPERTIES_FUNC_H + +/* define properties for all cells uses in ISYS */ + +#include "ipu_device_cell_properties_impl.h" +#include "ipu_device_cell_devices.h" +#include "assert_support.h" +#include "storage_class.h" + +enum {IA_CSS_CELL_MASTER_ADDRESS_WIDTH = 32}; + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_devices(void) +{ + return NUM_CELLS; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_memories(const unsigned int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_cell_properties[cell_id].type_properties->count-> + num_memories; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_size(const unsigned int cell_id, + const unsigned int mem_id) +{ + assert(cell_id < NUM_CELLS); + assert(mem_id < ipu_device_cell_num_memories(cell_id)); + return ipu_device_cell_properties[cell_id].type_properties-> + mem_size[mem_id]; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_address(const unsigned int cell_id, + const unsigned int mem_id) +{ + assert(cell_id < NUM_CELLS); + assert(mem_id < ipu_device_cell_num_memories(cell_id)); + return ipu_device_cell_properties[cell_id].mem_address[mem_id]; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_databus_memory_address(const unsigned int cell_id, + const unsigned int mem_id) +{ + assert(cell_id < NUM_CELLS); + assert(mem_id < ipu_device_cell_num_memories(cell_id)); + assert(mem_id != 0); + return ipu_device_cell_properties[cell_id].mem_databus_address[mem_id]; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_masters(const unsigned int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_cell_properties[cell_id].type_properties->count-> + num_master_ports; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_bits(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].segment_bits; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_num_segments(const unsigned int cell_id, + const unsigned int master_id) +{ + return 1u << ipu_device_cell_master_segment_bits(cell_id, master_id); +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_size(const unsigned int cell_id, + const unsigned int master_id) +{ + return 1u << (IA_CSS_CELL_MASTER_ADDRESS_WIDTH - + ipu_device_cell_master_segment_bits(cell_id, master_id)); +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_stride(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].stride; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_base_reg(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].base_address_register; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_info_reg(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].info_bits_register; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_info_override_reg(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].info_override_bits_register; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_icache_align(unsigned int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_cell_properties[cell_id].type_properties->count-> + icache_align; +} + +#ifdef C_RUN +STORAGE_CLASS_INLINE int +ipu_device_cell_id_crun(int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_map_cell_id_to_crun_proc_id[cell_id]; +} +#endif + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_FUNC_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/ipu_device_cell_properties_struct.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/ipu_device_cell_properties_struct.h new file mode 100644 index 0000000000000..63397dc0b7fe6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/ipu_device_cell_properties_struct.h @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_STRUCT_H +#define __IPU_DEVICE_CELL_PROPERTIES_STRUCT_H + +/* definitions for all cell types */ + +struct ipu_device_cell_count_s { + unsigned int num_memories; + unsigned int num_master_ports; + unsigned int num_stall_bits; + unsigned int icache_align; +}; + +struct ipu_device_cell_master_properties_s { + unsigned int segment_bits; + unsigned int stride; /* offset to register of next segment */ + unsigned int base_address_register; /* address of first base address + register */ + unsigned int info_bits_register; + unsigned int info_override_bits_register; +}; + +struct ipu_device_cell_type_properties_s { + const struct ipu_device_cell_count_s *count; + const struct ipu_device_cell_master_properties_s *master; + const unsigned int *reg_offset; /* offsets of registers, some depend + on cell type */ + const unsigned int *mem_size; +}; + +struct ipu_device_cell_properties_s { + const struct ipu_device_cell_type_properties_s *type_properties; + const unsigned int *mem_address; + const unsigned int *mem_databus_address; + /* const cell_master_port_properties_s* master_port_properties; */ +}; + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/ipu_device_cell_type_properties.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/ipu_device_cell_type_properties.h new file mode 100644 index 0000000000000..72caed3eef0c9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/ipu_device_cell_type_properties.h @@ -0,0 +1,69 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_TYPE_PROPERTIES_H +#define __IPU_DEVICE_CELL_TYPE_PROPERTIES_H + +#define IPU_DEVICE_INVALID_MEM_ADDRESS 0xFFFFFFFF + +enum ipu_device_cell_stat_ctrl_bit { + IPU_DEVICE_CELL_STAT_CTRL_RESET_BIT = 0, + IPU_DEVICE_CELL_STAT_CTRL_START_BIT = 1, + IPU_DEVICE_CELL_STAT_CTRL_RUN_BIT = 3, + IPU_DEVICE_CELL_STAT_CTRL_READY_BIT = 5, + IPU_DEVICE_CELL_STAT_CTRL_SLEEP_BIT = 6, + IPU_DEVICE_CELL_STAT_CTRL_STALL_BIT = 7, + IPU_DEVICE_CELL_STAT_CTRL_CLEAR_IRQ_MASK_FLAG_BIT = 8, + IPU_DEVICE_CELL_STAT_CTRL_BROKEN_IRQ_MASK_FLAG_BIT = 9, + IPU_DEVICE_CELL_STAT_CTRL_READY_IRQ_MASK_FLAG_BIT = 10, + IPU_DEVICE_CELL_STAT_CTRL_SLEEP_IRQ_MASK_FLAG_BIT = 11, + IPU_DEVICE_CELL_STAT_CTRL_INVALIDATE_ICACHE_BIT = 12, + IPU_DEVICE_CELL_STAT_CTRL_ICACHE_ENABLE_PREFETCH_BIT = 13 +}; + +enum ipu_device_cell_reg_addr { + IPU_DEVICE_CELL_STAT_CTRL_REG_ADDRESS = 0x0, + IPU_DEVICE_CELL_START_PC_REG_ADDRESS = 0x4, + IPU_DEVICE_CELL_ICACHE_BASE_REG_ADDRESS = 0x10, + IPU_DEVICE_CELL_ICACHE_INFO_BITS_REG_ADDRESS = 0x14 +}; + +enum ipu_device_cell_reg { + IPU_DEVICE_CELL_STAT_CTRL_REG, + IPU_DEVICE_CELL_START_PC_REG, + IPU_DEVICE_CELL_ICACHE_BASE_REG, + IPU_DEVICE_CELL_DEBUG_PC_REG, + IPU_DEVICE_CELL_STALL_REG, + IPU_DEVICE_CELL_NUM_REGS +}; + +enum ipu_device_cell_mem { + IPU_DEVICE_CELL_REGS, /* memory id of registers */ + IPU_DEVICE_CELL_PMEM, /* memory id of pmem */ + IPU_DEVICE_CELL_DMEM, /* memory id of dmem */ + IPU_DEVICE_CELL_BAMEM, /* memory id of bamem */ + IPU_DEVICE_CELL_VMEM /* memory id of vmem */ +}; +#define IPU_DEVICE_CELL_NUM_MEMORIES (IPU_DEVICE_CELL_VMEM + 1) + +enum ipu_device_cell_master { + IPU_DEVICE_CELL_MASTER_ICACHE, /* master port id of icache */ + IPU_DEVICE_CELL_MASTER_QMEM, + IPU_DEVICE_CELL_MASTER_CMEM, + IPU_DEVICE_CELL_MASTER_XMEM, + IPU_DEVICE_CELL_MASTER_XVMEM +}; +#define IPU_DEVICE_CELL_MASTER_NUM_MASTERS (IPU_DEVICE_CELL_MASTER_XVMEM + 1) + +#endif /* __IPU_DEVICE_CELL_TYPE_PROPERTIES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/isys/bxtB0/ipu_device_cell_devices.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/isys/bxtB0/ipu_device_cell_devices.h new file mode 100644 index 0000000000000..bd672104db3bd --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/isys/bxtB0/ipu_device_cell_devices.h @@ -0,0 +1,27 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_DEVICES_H +#define __IPU_DEVICE_CELL_DEVICES_H + +/* define cell instances in ISYS */ + +#define SPC0_CELL input_system_unis_logic_sp_control_tile_sp + +enum ipu_device_isys_cell_id { + SPC0, + NUM_CELLS +}; + +#endif /* __IPU_DEVICE_CELL_DEVICES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/isys/bxtB0/ipu_device_cell_properties_defs.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/isys/bxtB0/ipu_device_cell_properties_defs.h new file mode 100644 index 0000000000000..1b4df534a665a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/isys/bxtB0/ipu_device_cell_properties_defs.h @@ -0,0 +1,22 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +/* Generated file - please do not edit. */ + +#ifndef _IPU_DEVICE_CELL_PROPERTIES_DEFS_H_ +#define _IPU_DEVICE_CELL_PROPERTIES_DEFS_H_ +#define SPC0_REGS_CBUS_ADDRESS 0x0 +#define SPC0_DMEM_CBUS_ADDRESS 0x8000 +#define SPC0_DMEM_DBUS_ADDRESS 0x8000 +#define SPC0_DMEM_DMA_M0_ADDRESS 0x210000 +#endif /* _IPU_DEVICE_CELL_PROPERTIES_DEFS_H_ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/isys/bxtB0/ipu_device_cell_properties_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/isys/bxtB0/ipu_device_cell_properties_impl.h new file mode 100644 index 0000000000000..5f8ab1ac928f3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/isys/bxtB0/ipu_device_cell_properties_impl.h @@ -0,0 +1,57 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_IMPL_H +#define __IPU_DEVICE_CELL_PROPERTIES_IMPL_H + +/* define properties for all cells uses in ISYS */ + +#include "ipu_device_sp2600_control_properties_impl.h" +#include "ipu_device_cell_properties_defs.h" +#include "ipu_device_cell_devices.h" +#include "ipu_device_cell_type_properties.h"/* IPU_DEVICE_INVALID_MEM_ADDRESS */ + +static const unsigned int +ipu_device_spc0_mem_address[IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES] = { + SPC0_REGS_CBUS_ADDRESS, + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPC0_DMEM_CBUS_ADDRESS +}; + +static const unsigned int +ipu_device_spc0_databus_mem_address[IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* regs not accessible from DBUS */ + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPC0_DMEM_DBUS_ADDRESS +}; + +static const struct +ipu_device_cell_properties_s ipu_device_cell_properties[NUM_CELLS] = { + { + &ipu_device_sp2600_control_properties, + ipu_device_spc0_mem_address, + ipu_device_spc0_databus_mem_address + } +}; + +#ifdef C_RUN + +/* Mapping between hrt_hive_processors enum and cell_id's used in FW */ +static const int ipu_device_map_cell_id_to_crun_proc_id[NUM_CELLS] = { + 0 /* SPC0 */ +}; + +#endif + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/src/ipu_device_sp2600_control_properties_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/src/ipu_device_sp2600_control_properties_impl.h new file mode 100644 index 0000000000000..430295cd9d949 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/src/ipu_device_sp2600_control_properties_impl.h @@ -0,0 +1,136 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_SP2600_CONTROL_PROPERTIES_IMPL_H +#define __IPU_DEVICE_SP2600_CONTROL_PROPERTIES_IMPL_H + +/* sp2600_control definition */ + +#include "ipu_device_cell_properties_struct.h" + +enum ipu_device_sp2600_control_registers { + /* control registers */ + IPU_DEVICE_SP2600_CONTROL_STAT_CTRL = 0x0, + IPU_DEVICE_SP2600_CONTROL_START_PC = 0x4, + + /* master port registers */ + IPU_DEVICE_SP2600_CONTROL_ICACHE_BASE = 0x10, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO = 0x14, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO_OVERRIDE = 0x18, + + IPU_DEVICE_SP2600_CONTROL_QMEM_BASE = 0x1C, + + IPU_DEVICE_SP2600_CONTROL_CMEM_BASE = 0x28, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO = 0x2C, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO_OVERRIDE = 0x30, + + IPU_DEVICE_SP2600_CONTROL_XMEM_BASE = 0x58, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO = 0x5C, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO_OVERRIDE = 0x60, + + /* debug registers */ + IPU_DEVICE_SP2600_CONTROL_DEBUG_PC = 0x9C, + IPU_DEVICE_SP2600_CONTROL_STALL = 0xA0 +}; + +enum ipu_device_sp2600_control_mems { + IPU_DEVICE_SP2600_CONTROL_REGS, + IPU_DEVICE_SP2600_CONTROL_PMEM, + IPU_DEVICE_SP2600_CONTROL_DMEM, + IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES +}; + +static const unsigned int +ipu_device_sp2600_control_mem_size[IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES] = { + 0x000AC, + 0x00000, + 0x10000 +}; + +enum ipu_device_sp2600_control_masters { + IPU_DEVICE_SP2600_CONTROL_ICACHE, + IPU_DEVICE_SP2600_CONTROL_QMEM, + IPU_DEVICE_SP2600_CONTROL_CMEM, + IPU_DEVICE_SP2600_CONTROL_XMEM, + IPU_DEVICE_SP2600_CONTROL_NUM_MASTERS +}; + +static const struct ipu_device_cell_master_properties_s +ipu_device_sp2600_control_masters[IPU_DEVICE_SP2600_CONTROL_NUM_MASTERS] = { + { + 0, + 0xC, + IPU_DEVICE_SP2600_CONTROL_ICACHE_BASE, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO_OVERRIDE + }, + { + 0, + 0xC, + IPU_DEVICE_SP2600_CONTROL_QMEM_BASE, + 0xFFFFFFFF, + 0xFFFFFFFF + }, + { + 2, + 0xC, + IPU_DEVICE_SP2600_CONTROL_CMEM_BASE, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO_OVERRIDE + }, + { + 2, + 0xC, + IPU_DEVICE_SP2600_CONTROL_XMEM_BASE, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO_OVERRIDE + } +}; + +enum ipu_device_sp2600_control_stall_bits { + IPU_DEVICE_SP2600_CONTROL_STALL_ICACHE, + IPU_DEVICE_SP2600_CONTROL_STALL_DMEM, + IPU_DEVICE_SP2600_CONTROL_STALL_QMEM, + IPU_DEVICE_SP2600_CONTROL_STALL_CMEM, + IPU_DEVICE_SP2600_CONTROL_STALL_XMEM, + IPU_DEVICE_SP2600_CONTROL_NUM_STALL_BITS +}; + +/* 32 bits per instruction */ +#define IPU_DEVICE_SP2600_CONTROL_ICACHE_WORD_SIZE 4 +/* 32 instructions per burst */ +#define IPU_DEVICE_SP2600_CONTROL_ICACHE_BURST_SIZE 32 + +static const struct ipu_device_cell_count_s ipu_device_sp2600_control_count = { + IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES, + IPU_DEVICE_SP2600_CONTROL_NUM_MASTERS, + IPU_DEVICE_SP2600_CONTROL_NUM_STALL_BITS, + IPU_DEVICE_SP2600_CONTROL_ICACHE_WORD_SIZE * + IPU_DEVICE_SP2600_CONTROL_ICACHE_BURST_SIZE +}; + +static const unsigned int +ipu_device_sp2600_control_reg_offset[/* CELL_NUM_REGS */] = { + 0x0, 0x4, 0x10, 0x9C, 0xA0 +}; + +static const struct ipu_device_cell_type_properties_s +ipu_device_sp2600_control_properties = { + &ipu_device_sp2600_control_count, + ipu_device_sp2600_control_masters, + ipu_device_sp2600_control_reg_offset, + ipu_device_sp2600_control_mem_size +}; + +#endif /* __IPU_DEVICE_SP2600_CONTROL_PROPERTIES_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/cpu/fw_abi_cpu_types.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/cpu/fw_abi_cpu_types.mk new file mode 100644 index 0000000000000..b1ffbf7ea21ff --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/cpu/fw_abi_cpu_types.mk @@ -0,0 +1,24 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# + +# MODULE is FW ABI COMMON TYPES + +FW_ABI_COMMON_TYPES_DIRS = -I$${MODULES_DIR}/fw_abi_common_types +FW_ABI_COMMON_TYPES_DIRS += -I$${MODULES_DIR}/fw_abi_common_types/cpu + +FW_ABI_COMMON_TYPES_HOST_FILES = +FW_ABI_COMMON_TYPES_HOST_CPPFLAGS = $(FW_ABI_COMMON_TYPES_DIRS) + +FW_ABI_COMMON_TYPES_FW_FILES = +FW_ABI_COMMON_TYPES_FW_CPPFLAGS = $(FW_ABI_COMMON_TYPES_DIRS) diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/cpu/ia_css_terminal_base_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/cpu/ia_css_terminal_base_types.h new file mode 100644 index 0000000000000..21cc3f43f485e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/cpu/ia_css_terminal_base_types.h @@ -0,0 +1,42 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_BASE_TYPES_H +#define __IA_CSS_TERMINAL_BASE_TYPES_H + + +#include "type_support.h" +#include "ia_css_terminal_defs.h" + +#define N_UINT16_IN_TERMINAL_STRUCT 3 +#define N_PADDING_UINT8_IN_TERMINAL_STRUCT 5 + +#define SIZE_OF_TERMINAL_STRUCT_BITS \ + (IA_CSS_TERMINAL_TYPE_BITS \ + + IA_CSS_TERMINAL_ID_BITS \ + + N_UINT16_IN_TERMINAL_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_PADDING_UINT8_IN_TERMINAL_STRUCT * IA_CSS_UINT8_T_BITS) + +/* ==================== Base Terminal - START ==================== */ +struct ia_css_terminal_s { /**< Base terminal */ + ia_css_terminal_type_t terminal_type; /**< Type ia_css_terminal_type_t */ + int16_t parent_offset; /**< Offset to the process group */ + uint16_t size; /**< Size of this whole terminal layout-structure */ + uint16_t tm_index; /**< Index of the terminal manifest object */ + ia_css_terminal_ID_t ID; /**< Absolute referal ID for this terminal, valid ID's != 0 */ + uint8_t padding[N_PADDING_UINT8_IN_TERMINAL_STRUCT]; +}; +/* ==================== Base Terminal - END ==================== */ + +#endif /* __IA_CSS_TERMINAL_BASE_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/cpu/ia_css_terminal_manifest_base_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/cpu/ia_css_terminal_manifest_base_types.h new file mode 100644 index 0000000000000..056e1b6d5d4bd --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/cpu/ia_css_terminal_manifest_base_types.h @@ -0,0 +1,42 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_MANIFEST_BASE_TYPES_H +#define __IA_CSS_TERMINAL_MANIFEST_BASE_TYPES_H + +#include "ia_css_terminal_defs.h" + +#define N_PADDING_UINT8_IN_TERMINAL_MAN_STRUCT 5 +#define SIZE_OF_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + (IA_CSS_UINT16_T_BITS \ + + IA_CSS_TERMINAL_ID_BITS \ + + IA_CSS_TERMINAL_TYPE_BITS \ + + IA_CSS_UINT32_T_BITS \ + + (N_PADDING_UINT8_IN_TERMINAL_MAN_STRUCT*IA_CSS_UINT8_T_BITS)) + +/* ==================== Base Terminal Manifest - START ==================== */ +struct ia_css_terminal_manifest_s { + ia_css_terminal_type_t terminal_type; /**< Type ia_css_terminal_type_t */ + int16_t parent_offset; /**< Offset to the program group manifest */ + uint16_t size; /**< Size of this whole terminal-manifest layout-structure */ + ia_css_terminal_ID_t ID; + uint8_t padding[N_PADDING_UINT8_IN_TERMINAL_MAN_STRUCT]; +}; + +typedef struct ia_css_terminal_manifest_s + ia_css_terminal_manifest_t; + +/* ==================== Base Terminal Manifest - END ==================== */ + +#endif /* __IA_CSS_TERMINAL_MANIFEST_BASE_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/ia_css_base_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/ia_css_base_types.h new file mode 100644 index 0000000000000..3b80a17a6ad38 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/ia_css_base_types.h @@ -0,0 +1,38 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_BASE_TYPES_H +#define __IA_CSS_BASE_TYPES_H + +#include "type_support.h" + +#define VIED_VADDRESS_BITS 32 +typedef uint32_t vied_vaddress_t; + +#define DEVICE_DESCRIPTOR_ID_BITS 32 +typedef struct { + uint8_t device_id; + uint8_t instance_id; + uint8_t channel_id; + uint8_t section_id; +} device_descriptor_fields_t; + +typedef union { + device_descriptor_fields_t fields; + uint32_t data; +} device_descriptor_id_t; + +typedef uint16_t ia_css_process_id_t; + +#endif /* __IA_CSS_BASE_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/ia_css_terminal_defs.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/ia_css_terminal_defs.h new file mode 100644 index 0000000000000..dbf1cf93756ff --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/ia_css_terminal_defs.h @@ -0,0 +1,105 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_DEFS_H +#define __IA_CSS_TERMINAL_DEFS_H + + +#include "type_support.h" + +#define IA_CSS_TERMINAL_ID_BITS 8 +typedef uint8_t ia_css_terminal_ID_t; +#define IA_CSS_TERMINAL_INVALID_ID ((ia_css_terminal_ID_t)(-1)) + +/* + * Terminal Base Type + */ +typedef enum ia_css_terminal_type { + /**< Data input */ + IA_CSS_TERMINAL_TYPE_DATA_IN = 0, + /**< Data output */ + IA_CSS_TERMINAL_TYPE_DATA_OUT, + /**< Type 6 parameter input */ + IA_CSS_TERMINAL_TYPE_PARAM_STREAM, + /**< Type 1-5 parameter input */ + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN, + /**< Type 1-5 parameter output */ + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT, + /**< Represent the new type of terminal for the + * "spatial dependent parameters", when params go in + */ + IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN, + /**< Represent the new type of terminal for the + * "spatial dependent parameters", when params go out + */ + IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT, + /**< Represent the new type of terminal for the + * explicit slicing, when params go in + */ + IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN, + /**< Represent the new type of terminal for the + * explicit slicing, when params go out + */ + IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT, + /**< State (private data) input */ + IA_CSS_TERMINAL_TYPE_STATE_IN, + /**< State (private data) output */ + IA_CSS_TERMINAL_TYPE_STATE_OUT, + IA_CSS_TERMINAL_TYPE_PROGRAM, + IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT, + IA_CSS_N_TERMINAL_TYPES +} ia_css_terminal_type_t; + +#define IA_CSS_TERMINAL_TYPE_BITS 32 + +/* Temporary redirection needed to facilicate merging with the drivers + in a backwards compatible manner */ +#define IA_CSS_TERMINAL_TYPE_PARAM_CACHED IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN + +/* + * Dimensions of the data objects. Note that a C-style + * data order is assumed. Data stored by row. + */ +typedef enum ia_css_dimension { + /**< The number of columns, i.e. the size of the row */ + IA_CSS_COL_DIMENSION = 0, + /**< The number of rows, i.e. the size of the column */ + IA_CSS_ROW_DIMENSION = 1, + IA_CSS_N_DATA_DIMENSION = 2 +} ia_css_dimension_t; + +#define IA_CSS_N_COMMAND_COUNT (4) + +#ifndef PIPE_GENERATION +/* Don't include these complex enum structures in Genpipe, it can't handle and it does not need them */ +/* + * enum ia_css_isys_link_id. Lists the link IDs used by the FW for On The Fly feature + */ +typedef enum ia_css_isys_link_id { + IA_CSS_ISYS_LINK_OFFLINE = 0, + IA_CSS_ISYS_LINK_MAIN_OUTPUT = 1, + IA_CSS_ISYS_LINK_PDAF_OUTPUT = 2 +} ia_css_isys_link_id_t; +#define N_IA_CSS_ISYS_LINK_ID (IA_CSS_ISYS_LINK_PDAF_OUTPUT + 1) + +/* + * enum ia_css_data_barrier_link_id. Lists the link IDs used by the FW for data barrier feature + */ +typedef enum ia_css_data_barrier_link_id { + IA_CSS_DATA_BARRIER_LINK_MEMORY = N_IA_CSS_ISYS_LINK_ID, + N_IA_CSS_DATA_BARRIER_LINK_ID +} ia_css_data_barrier_link_id_t; + +#endif /* #ifndef PIPE_GENERATION */ +#endif /* __IA_CSS_TERMINAL_DEFS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isys_fw_bridged_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isys_fw_bridged_types.h new file mode 100644 index 0000000000000..5e47fe7026bd7 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isys_fw_bridged_types.h @@ -0,0 +1,402 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYS_FW_BRIDGED_TYPES_H +#define __IA_CSS_ISYS_FW_BRIDGED_TYPES_H + +#include "platform_support.h" + +#include "ia_css_isysapi_fw_types.h" + +/** + * struct ia_css_isys_buffer_partition_comm - buffer partition information + * @num_gda_pages: Number of virtual gda pages available for each + * virtual stream + */ +struct ia_css_isys_buffer_partition_comm { + aligned_uint32(unsigned int, num_gda_pages[STREAM_ID_MAX]); +}; + +/** + * struct ia_css_isys_fw_config - contains the parts from + * ia_css_isys_device_cfg_data + * we need to transfer to the cell + * @num_send_queues: Number of send queues per queue + * type(N_IA_CSS_ISYS_QUEUE_TYPE) + * @num_recv_queues: Number of receive queues per queue + * type(N_IA_CSS_ISYS_QUEUE_TYPE) + */ +struct ia_css_isys_fw_config { + aligned_struct(struct ia_css_isys_buffer_partition_comm, + buffer_partition); + aligned_uint32(unsigned int, + num_send_queues[N_IA_CSS_ISYS_QUEUE_TYPE]); + aligned_uint32(unsigned int, + num_recv_queues[N_IA_CSS_ISYS_QUEUE_TYPE]); +}; + +/** + * struct ia_css_isys_resolution_comm: Generic resolution structure. + * @Width + * @Height + */ +struct ia_css_isys_resolution_comm { + aligned_uint32(unsigned int, width); + aligned_uint32(unsigned int, height); +}; + +/** + * struct ia_css_isys_output_pin_payload_comm + * @out_buf_id: Points to output pin buffer - buffer identifier + * @addr: Points to output pin buffer - CSS Virtual Address + * @compress: Request frame compression (1), or not (0) + * This must be the same as ia_css_isys_output_pin_info_comm::reserve_compression + */ +struct ia_css_isys_output_pin_payload_comm { + aligned_uint64(ia_css_return_token, out_buf_id); + aligned_uint32(ia_css_output_buffer_css_address, addr); + aligned_uint32(unsigned int, compress); +}; + +/** + * struct ia_css_isys_output_pin_info_comm + * @input_pin_id: input pin id/index which is source of + * the data for this output pin + * @output_res: output pin resolution + * @stride: output stride in Bytes (not valid for statistics) + * @watermark_in_lines: pin watermark level in lines + * @payload_buf_size: Size in Bytes of all buffers that will be supplied for capture + * on this pin (i.e. addressed by ia_css_isys_output_pin_payload::addr) + * @send_irq: assert if pin event should trigger irq + * @pt: pin type + * @ft: frame format type + * @link_id: identifies PPG to connect to, link_id = 0 implies offline + * while link_id > 0 implies buffer_chasing or online mode + * can be entered. + * @reserve_compression: Reserve compression resources for pin. + */ +struct ia_css_isys_output_pin_info_comm { + aligned_struct(struct ia_css_isys_resolution_comm, output_res); + aligned_uint32(unsigned int, stride); + aligned_uint32(unsigned int, watermark_in_lines); + aligned_uint32(unsigned int, payload_buf_size); + aligned_uint8(unsigned int, send_irq); + aligned_uint8(unsigned int, input_pin_id); + aligned_uint8(enum ia_css_isys_pin_type, pt); + aligned_uint8(enum ia_css_isys_frame_format_type, ft); + aligned_uint8(enum ia_css_isys_link_id, link_id); + aligned_uint8(unsigned int, reserve_compression); +}; + +/** + * struct ia_css_isys_param_pin_comm + * @param_buf_id: Points to param port buffer - buffer identifier + * @addr: Points to param pin buffer - CSS Virtual Address + */ +struct ia_css_isys_param_pin_comm { + aligned_uint64(ia_css_return_token, param_buf_id); + aligned_uint32(ia_css_input_buffer_css_address, addr); +}; + +/** + * struct ia_css_isys_input_pin_info_comm + * @input_res: input resolution + * @dt: mipi data type + * @mipi_store_mode: defines if legacy long packet header will be stored or + * hdiscarded if discarded, output pin pin type for this + * input pin can only be MIPI + * @bits_per_pix: native bits per pixel + * @dt_rename: mapped_dt + */ +struct ia_css_isys_input_pin_info_comm { + aligned_struct(struct ia_css_isys_resolution_comm, input_res); + aligned_uint8(enum ia_css_isys_mipi_data_type, dt); + aligned_uint8(enum ia_css_isys_mipi_store_mode, mipi_store_mode); + aligned_uint8(unsigned int, bits_per_pix); + aligned_uint8(unsigned int, mapped_dt); +}; + +/** + * ISA configuration fields, definition and macros + */ +#define ISA_CFG_FIELD_BLC_EN_LEN 1 +#define ISA_CFG_FIELD_BLC_EN_SHIFT 0 + +#define ISA_CFG_FIELD_LSC_EN_LEN 1 +#define ISA_CFG_FIELD_LSC_EN_SHIFT 1 + +#define ISA_CFG_FIELD_DPC_EN_LEN 1 +#define ISA_CFG_FIELD_DPC_EN_SHIFT 2 + +#define ISA_CFG_FIELD_DOWNSCALER_EN_LEN 1 +#define ISA_CFG_FIELD_DOWNSCALER_EN_SHIFT 3 + +#define ISA_CFG_FIELD_AWB_EN_LEN 1 +#define ISA_CFG_FIELD_AWB_EN_SHIFT 4 + +#define ISA_CFG_FIELD_AF_EN_LEN 1 +#define ISA_CFG_FIELD_AF_EN_SHIFT 5 + +#define ISA_CFG_FIELD_AE_EN_LEN 1 +#define ISA_CFG_FIELD_AE_EN_SHIFT 6 + +#define ISA_CFG_FIELD_PAF_TYPE_LEN 8 +#define ISA_CFG_FIELD_PAF_TYPE_SHIFT 7 + +#define ISA_CFG_FIELD_SEND_IRQ_STATS_READY_LEN 1 +#define ISA_CFG_FIELD_SEND_IRQ_STATS_READY_SHIFT 15 + +#define ISA_CFG_FIELD_SEND_RESP_STATS_READY_LEN 1 +#define ISA_CFG_FIELD_SEND_RESP_STATS_READY_SHIFT 16 + +/* Helper macros */ +#define ISA_CFG_GET_MASK_FROM_LEN(len) ((1 << (len)) - 1) +#define ISA_CFG_GET_MASK_FROM_TAG(tag) \ + (ISA_CFG_GET_MASK_FROM_LEN(ISA_CFG_FIELD_##tag##_LEN)) +#define ISA_CFG_GET_SHIFT_FROM_TAG(tag) \ + (ISA_CFG_FIELD_##tag##_SHIFT) +/* Get/Set macros */ +#define ISA_CFG_FIELD_GET(tag, word) \ + ( \ + ((word) >> (ISA_CFG_GET_SHIFT_FROM_TAG(tag))) &\ + ISA_CFG_GET_MASK_FROM_TAG(tag) \ + ) +#define ISA_CFG_FIELD_SET(tag, word, value) \ + word |= ( \ + ((value) & ISA_CFG_GET_MASK_FROM_TAG(tag)) << \ + ISA_CFG_GET_SHIFT_FROM_TAG(tag) \ + ) + +/** + * struct ia_css_isys_isa_cfg_comm. Describes the ISA cfg + */ +struct ia_css_isys_isa_cfg_comm { + aligned_struct(struct ia_css_isys_resolution_comm, + isa_res[N_IA_CSS_ISYS_RESOLUTION_INFO]); + aligned_uint32(/* multi-field packing */, cfg_fields); +}; + + /** + * struct ia_css_isys_cropping_comm - cropping coordinates + */ +struct ia_css_isys_cropping_comm { + aligned_int32(int, top_offset); + aligned_int32(int, left_offset); + aligned_int32(int, bottom_offset); + aligned_int32(int, right_offset); +}; + + /** + * struct ia_css_isys_stream_cfg_data_comm + * ISYS stream configuration data structure + * @isa_cfg: details about what ACCs are active if ISA is used + * @crop: defines cropping resolution for the + * maximum number of input pins which can be cropped, + * it is directly mapped to the HW devices + * @input_pins: input pin descriptors + * @output_pins: output pin descriptors + * @compfmt: de-compression setting for User Defined Data + * @nof_input_pins: number of input pins + * @nof_output_pins: number of output pins + * @send_irq_sof_discarded: send irq on discarded frame sof response + * - if '1' it will override the send_resp_sof_discarded and send + * the response + * - if '0' the send_resp_sof_discarded will determine whether to + * send the response + * @send_irq_eof_discarded: send irq on discarded frame eof response + * - if '1' it will override the send_resp_eof_discarded and send + * the response + * - if '0' the send_resp_eof_discarded will determine whether to + * send the response + * @send_resp_sof_discarded: send response for discarded frame sof detected, + * used only when send_irq_sof_discarded is '0' + * @send_resp_eof_discarded: send response for discarded frame eof detected, + * used only when send_irq_eof_discarded is '0' + * @src: Stream source index e.g. MIPI_generator_0, CSI2-rx_1 + * @vc: MIPI Virtual Channel (up to 4 virtual per physical channel) + * @isl_use: indicates whether stream requires ISL and how + */ +struct ia_css_isys_stream_cfg_data_comm { + aligned_struct(struct ia_css_isys_isa_cfg_comm, isa_cfg); + aligned_struct(struct ia_css_isys_cropping_comm, + crop[N_IA_CSS_ISYS_CROPPING_LOCATION]); + aligned_struct(struct ia_css_isys_input_pin_info_comm, + input_pins[MAX_IPINS]); + aligned_struct(struct ia_css_isys_output_pin_info_comm, + output_pins[MAX_OPINS]); + aligned_uint32(unsigned int, compfmt); + aligned_uint8(unsigned int, nof_input_pins); + aligned_uint8(unsigned int, nof_output_pins); + aligned_uint8(unsigned int, send_irq_sof_discarded); + aligned_uint8(unsigned int, send_irq_eof_discarded); + aligned_uint8(unsigned int, send_resp_sof_discarded); + aligned_uint8(unsigned int, send_resp_eof_discarded); + aligned_uint8(enum ia_css_isys_stream_source, src); + aligned_uint8(enum ia_css_isys_mipi_vc, vc); + aligned_uint8(enum ia_css_isys_isl_use, isl_use); +}; + +/** + * struct ia_css_isys_frame_buff_set - frame buffer set + * @output_pins: output pin addresses + * @process_group_light: process_group_light buffer address + * @send_irq_sof: send irq on frame sof response + * - if '1' it will override the send_resp_sof and send the + * response + * - if '0' the send_resp_sof will determine whether to send the + * response + * @send_irq_eof: send irq on frame eof response + * - if '1' it will override the send_resp_eof and send the + * response + * - if '0' the send_resp_eof will determine whether to send the + * response + * @send_resp_sof: send response for frame sof detected, used only when + * send_irq_sof is '0' + * @send_resp_eof: send response for frame eof detected, used only when + * send_irq_eof is '0' + * @frame_counter: frame number associated with this buffer set. + */ +struct ia_css_isys_frame_buff_set_comm { + aligned_struct(struct ia_css_isys_output_pin_payload_comm, + output_pins[MAX_OPINS]); + aligned_struct(struct ia_css_isys_param_pin_comm, process_group_light); + aligned_uint8(unsigned int, send_irq_sof); + aligned_uint8(unsigned int, send_irq_eof); + aligned_uint8(unsigned int, send_irq_capture_ack); + aligned_uint8(unsigned int, send_irq_capture_done); + aligned_uint8(unsigned int, send_resp_sof); + aligned_uint8(unsigned int, send_resp_eof); + aligned_uint8(unsigned int, frame_counter); +}; + +/** + * struct ia_css_isys_error_info_comm + * @error: error code if something went wrong + * @error_details: depending on error code, it may contain additional + * error info + */ +struct ia_css_isys_error_info_comm { + aligned_enum(enum ia_css_isys_error, error); + aligned_uint32(unsigned int, error_details); +}; + +/** + * struct ia_css_isys_resp_info_comm + * @pin: this var is only valid for pin event related responses, + * contains pin addresses + * @process_group_light: this var is valid for stats ready related responses, + * contains process group addresses + * @error_info: error information from the FW + * @timestamp: Time information for event if available + * @stream_handle: stream id the response corresponds to + * @type: response type + * @pin_id: pin id that the pin payload corresponds to + * @acc_id: this var is valid for stats ready related responses, + * contains accelerator id that finished producing + * all related statistics + * @frame_counter: valid for STREAM_START_AND_CAPTURE_DONE, + * STREAM_CAPTURE_DONE and STREAM_CAPTURE_DISCARDED, + * @written_direct: indicates if frame was written direct (online mode) or not. + * + */ + +struct ia_css_isys_resp_info_comm { + aligned_uint64(ia_css_return_token, buf_id); /* Used internally only */ + aligned_struct(struct ia_css_isys_output_pin_payload_comm, pin); + aligned_struct(struct ia_css_isys_param_pin_comm, process_group_light); + aligned_struct(struct ia_css_isys_error_info_comm, error_info); + aligned_uint32(unsigned int, timestamp[2]); + aligned_uint8(unsigned int, stream_handle); + aligned_uint8(enum ia_css_isys_resp_type, type); + aligned_uint8(unsigned int, pin_id); + aligned_uint8(unsigned int, acc_id); + aligned_uint8(unsigned int, frame_counter); + aligned_uint8(unsigned int, written_direct); +}; + +/** + * struct ia_css_isys_proxy_error_info_comm + * @proxy_error: error code if something went wrong + * @proxy_error_details: depending on error code, it may contain additional + * error info + */ +struct ia_css_isys_proxy_error_info_comm { + aligned_enum(enum ia_css_proxy_error, error); + aligned_uint32(unsigned int, error_details); +}; + +/** + * struct ia_css_isys_proxy_resp_info_comm + * @request_id: Unique identifier for the write request + * (in case multiple write requests are issued for same register) + * @error_info: details in struct definition + */ +struct ia_css_isys_proxy_resp_info_comm { + aligned_uint32(uint32_t, request_id); + aligned_struct(struct ia_css_isys_proxy_error_info_comm, error_info); +}; + +/** + * struct ia_css_proxy_write_queue_token + * @request_id: update id for the specific proxy write request + * @region_index: Region id for the proxy write request + * @offset: Offset of the write request according to the base address of the + * region + * @value: Value that is requested to be written with the proxy write request + */ +struct ia_css_proxy_write_queue_token { + aligned_uint32(uint32_t, request_id); + aligned_uint32(uint32_t, region_index); + aligned_uint32(uint32_t, offset); + aligned_uint32(uint32_t, value); +}; + +/* From here on type defines not coming from the ISYSAPI interface */ + +/** + * struct resp_queue_token + */ +struct resp_queue_token { + aligned_struct(struct ia_css_isys_resp_info_comm, resp_info); +}; + +/** + * struct send_queue_token + */ +struct send_queue_token { + aligned_uint64(ia_css_return_token, buf_handle); + aligned_uint32(ia_css_input_buffer_css_address, payload); + aligned_uint16(enum ia_css_isys_send_type, send_type); + aligned_uint16(unsigned int, stream_id); +}; + +/** + * struct proxy_resp_queue_token + */ +struct proxy_resp_queue_token { + aligned_struct(struct ia_css_isys_proxy_resp_info_comm, + proxy_resp_info); +}; + +/** + * struct proxy_send_queue_token + */ +struct proxy_send_queue_token { + aligned_uint32(uint32_t, request_id); + aligned_uint32(uint32_t, region_index); + aligned_uint32(uint32_t, offset); + aligned_uint32(uint32_t, value); +}; + +#endif /* __IA_CSS_ISYS_FW_BRIDGED_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi.h new file mode 100644 index 0000000000000..abbc8b8d26ed8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi.h @@ -0,0 +1,321 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYSAPI_H +#define __IA_CSS_ISYSAPI_H + + +/* The following is needed for the function arguments */ +#include "ia_css_isysapi_types.h" + +/* To define the HANDLE */ +#include "type_support.h" + + +/** + * ia_css_isys_device_open() - configure ISYS device + * @ context : device handle output parameter + * @config: device configuration data struct ptr as input parameter, + * read only by css fw until function return + * Ownership, ISYS will only access read my_device during fct call + * Prepares and Sends to PG server (SP) the syscom and isys context + * Executes the host level 0 and 1 boot sequence and starts the PG server (SP) + * All streams must be stopped when calling ia_css_isys_device_open() + * + * Return: int type error code (errno.h) + */ +#if HAS_DUAL_CMD_CTX_SUPPORT +extern int ia_css_isys_context_create( + HANDLE * context, + const struct ia_css_isys_device_cfg_data *config +); +extern int ia_css_isys_context_store_dmem( + const HANDLE *context, + const struct ia_css_isys_device_cfg_data *config +); +extern bool ia_css_isys_ab_spc_ready( + HANDLE *context +); +extern int ia_css_isys_device_open( + const struct ia_css_isys_device_cfg_data *config +); +#else +extern int ia_css_isys_device_open( + HANDLE * context, + const struct ia_css_isys_device_cfg_data *config +); +#endif + +/** + * ia_css_isys_device_open_ready() - Complete ISYS device configuration + * @ context : device handle output parameter + * read only by css fw until function return + * Requires the boot failure to be completed before it can return + * successfully (includes syscom and isys context) + * Initialise Host/ISYS messaging queues + * Must be called multiple times until it succeeds or it is determined by + * the driver that the boot seuqence has failed. + * All streams must be stopped when calling ia_css_isys_device_open() + * + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_device_open_ready( + HANDLE context +); + + /** + * ia_css_isys_stream_open() - open and configure a virtual stream + * @ stream_handle: stream handle + * @ stream_cfg: stream configuration data struct pointer, which is + * "read only" by ISYS until function return + * ownership, ISYS will only read access stream_cfg during fct call + * Pre-conditions: + * Any Isys/Ssys interface changes must call ia_css_isys_stream_open() + * Post-condition: + * On successful call, ISYS hardware resource (IBFctrl, ISL, DMAs) + * are acquired and ISYS server is able to handle stream specific commands + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_stream_open( + HANDLE context, + const unsigned int stream_handle, + const struct ia_css_isys_stream_cfg_data *stream_cfg +); + +/** + * ia_css_isys_stream_close() - close virtual stream + * @ stream_handle: stream identifier + * release ISYS resources by freeing up stream HW resources + * output pin buffers ownership is returned to the driver + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_stream_close( + HANDLE context, + const unsigned int stream_handle +); + +/** + * ia_css_isys_stream_start() - starts handling a mipi virtual stream + * @ stream_handle: stream identifier + * @next_frame: + * if next_frame != NULL: apply next_frame + * settings asynchronously and start stream + * This mode ensures that the first frame is captured + * and thus a minimal start up latency + * (preconditions: sensor streaming must be switched off) + * + * if next_frame == NULL: sensor can be in a streaming state, + * all capture indicates commands will be + * processed synchronously (e.g. on mipi SOF events) + * + * To be called once ia_css_isys_stream_open() successly called + * On success, the stream's HW resources are in active state + * + * Object ownership: During this function call, + * next_frame struct must be read but not modified by the ISYS, + * and in addition the driver is not allowed to modify it + * on function exit next_frame ownership is returned to + * the driver and is no longer accesses by iSYS + * next_frame contains a collection of + * ia_css_isys_output_pin * and ia_css_isys_input_pin * + * which point to the frame's "output/input pin info & data buffers", + * + * Upon the ia_css_isys_stream_start() call, + * ia_css_isys_output_pin* or ia_css_isys_input_pin* + * will now be owned by the ISYS + * these ptr will enable runtime/dynamic ISYS configuration and also + * to store and write captured payload data + * at the address specified in ia_css_isys_output_pin_payload + * These ptrs should no longer be accessed by any other + * code until (ia_css_isys_output_pin) gets handed + * back to the driver via the response mechansim + * ia_css_isys_stream_handle_response() + * the driver is responsible for providing valid + * ia_css_isys_output_pin* or ia_css_isys_output_pin* + * Pointers set to NULL will simply not be used by the ISYS + * + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_stream_start( + HANDLE context, + const unsigned int stream_handle, + const struct ia_css_isys_frame_buff_set *next_frame +); + +/** + * ia_css_isys_stream_stop() - Stops a mipi virtual stream + * @ stream_handle: stream identifier + * stop both accepting new commands and processing + * submitted capture indication commands + * Support for Secure Touch + * Precondition: stream must be started + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_stream_stop( + HANDLE context, + const unsigned int stream_handle +); + +/** + * ia_css_isys_stream_flush() - stops a mipi virtual stream but + * completes processing cmd backlog + * @ stream_handle: stream identifier + * stop accepting commands, but process + * the already submitted capture indicates + * Precondition: stream must be started + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_stream_flush( + HANDLE context, + const unsigned int stream_handle +); + +/** + * ia_css_isys_stream_capture_indication() + * captures "next frame" on stream_handle + * @ stream_handle: stream identifier + * @ next_frame: frame pin payloads are provided atomically + * purpose: stream capture new frame command, Successfull calls will + * result in frame output pins being captured + * + * To be called once ia_css_isys_stream_start() is successly called + * On success, the stream's HW resources are in active state + * + * Object ownership: During this function call, + * next_frame struct must be read but not modified by the ISYS, + * and in addition the driver is not allowed to modify it + * on function exit next_frame ownership is returned to + * the driver and is no longer accesses by iSYS + * next_frame contains a collection of + * ia_css_isys_output_pin * and ia_css_isys_input_pin * + * which point to the frame's "output/input pin info & data buffers", + * + * Upon the ia_css_isys_stream_capture_indication() call, + * ia_css_isys_output_pin* or ia_css_isys_input_pin* + * will now be owned by the ISYS + * these ptr will enable runtime/dynamic ISYS configuration and also + * to store and write captured payload data + * at the address specified in ia_css_isys_output_pin_payload + * These ptrs should no longer be accessed by any other + * code until (ia_css_isys_output_pin) gets handed + * back to the driver via the response mechanism + * ia_css_isys_stream_handle_response() + * the driver is responsible for providing valid + * ia_css_isys_output_pin* or ia_css_isys_output_pin* + * Pointers set to NULL will simply not be used by the ISYS, and this + * refers specifically the following cases: + * - output pins from SOC path if the same datatype is also passed into ISAPF + * path or it has active MIPI output (not NULL) + * - full resolution pin from ISA (but not when bypassing ISA) + * - scaled pin from ISA (bypassing ISA for scaled pin is impossible) + * - output pins from MIPI path but only when the same datatype is also + * either forwarded to the ISAPF path based on the stream configuration + * (it is ok if the second output pin of this datatype is also skipped) + * or it has an active SOC output (not NULL) + * + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_stream_capture_indication( + HANDLE context, + const unsigned int stream_handle, + const struct ia_css_isys_frame_buff_set *next_frame +); + +/** + * ia_css_isys_stream_handle_response() - handle ISYS responses + * @received_response: provides response info from the + * "next response element" from ISYS server + * received_response will be written to during the fct call and + * can be read by the drv once fct is returned + * + * purpose: Allows the client to handle received ISYS responses + * Upon an IRQ event, the driver will call ia_css_isys_stream_handle_response() + * until the queue is emptied + * Responses returning IA_CSS_ISYS_RESP_TYPE_PIN_DATA_READY to the driver will + * hand back ia_css_isys_output_pin ownership to the drv + * ISYS FW will not write/read access ia_css_isys_output_pin + * once it belongs to the driver + * Pre-conditions: ISYS client must have sent a CMDs to ISYS srv + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_stream_handle_response( + HANDLE context, + struct ia_css_isys_resp_info *received_response +); + +/** + * ia_css_isys_device_close() - close ISYS device + * @context : device handle output parameter + * Purpose: Request for the cell to close + * All streams must be stopped when calling ia_css_isys_device_close() + * + * Return: int type error code (errno.h) + */ +#if HAS_DUAL_CMD_CTX_SUPPORT +extern int ia_css_isys_context_destroy( + HANDLE context +); +extern void ia_css_isys_device_close( + void +); +#else +extern int ia_css_isys_device_close( + HANDLE context +); +#endif + +/** + * ia_css_isys_device_release() - release ISYS device + * @context : device handle output parameter + * @force: forces release or verifies the state before releasing + * Purpose: Free context forcibly or not + * Must be called after ia_css_isys_device_close() + * + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_device_release( + HANDLE context, + unsigned int force +); + +/** + * ia_css_isys_proxy_write_req() - issue a isys proxy write request + * @context : device handle output parameter + * Purpose: Issues a write request for the regions that are exposed + * by proxy interface + * Can be called any time between ia_css_isys_device_open + * ia_css_isys_device_close + * + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_proxy_write_req( + HANDLE context, + const struct ia_css_proxy_write_req_val *write_req_val +); + +/** + * ia_css_isys_proxy_handle_write_response() + * - Handles isys proxy write request responses + * @context : device handle output parameter + * Purpose: Handling the responses that are created by FW upon the completion + * proxy interface write request + * + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_proxy_handle_write_response( + HANDLE context, + struct ia_css_proxy_write_req_resp *received_response +); + +#endif /* __IA_CSS_ISYSAPI_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_fw_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_fw_types.h new file mode 100644 index 0000000000000..938f726d1cfb8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_fw_types.h @@ -0,0 +1,512 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYSAPI_FW_TYPES_H +#define __IA_CSS_ISYSAPI_FW_TYPES_H + + +/* Max number of Input/Output Pins */ +#define MAX_IPINS (4) +/* worst case is ISA use where a single input pin produces: +* Mipi output, NS Pixel Output, and Scaled Pixel Output. +* This is how the 2 is calculated +*/ +#define MAX_OPINS ((MAX_IPINS) + 2) + +/* Max number of supported virtual streams */ +#define STREAM_ID_MAX (8) + +/* Aligned with the approach of having one dedicated per stream */ +#define N_MAX_MSG_SEND_QUEUES (STREAM_ID_MAX) +/* Single return queue for all streams/commands type */ +#define N_MAX_MSG_RECV_QUEUES (1) +/* Single device queue for high priority commands (bypass in-order queue) */ +#define N_MAX_DEV_SEND_QUEUES (1) +/* Single dedicated send queue for proxy interface */ +#define N_MAX_PROXY_SEND_QUEUES (1) +/* Single dedicated recv queue for proxy interface */ +#define N_MAX_PROXY_RECV_QUEUES (1) +/* Send queues layout */ +#define BASE_PROXY_SEND_QUEUES (0) +#define BASE_DEV_SEND_QUEUES (BASE_PROXY_SEND_QUEUES + N_MAX_PROXY_SEND_QUEUES) +#define BASE_MSG_SEND_QUEUES (BASE_DEV_SEND_QUEUES + N_MAX_DEV_SEND_QUEUES) +#define N_MAX_SEND_QUEUES (BASE_MSG_SEND_QUEUES + N_MAX_MSG_SEND_QUEUES) +/* Recv queues layout */ +#define BASE_PROXY_RECV_QUEUES (0) +#define BASE_MSG_RECV_QUEUES (BASE_PROXY_RECV_QUEUES + N_MAX_PROXY_RECV_QUEUES) +#define N_MAX_RECV_QUEUES (BASE_MSG_RECV_QUEUES + N_MAX_MSG_RECV_QUEUES) + +#define MAX_QUEUE_SIZE (256) +#define MIN_QUEUE_SIZE (1) + +/* Consider 1 slot per stream since driver is not expected to pipeline + * device commands for the same stream */ +#define DEV_SEND_QUEUE_SIZE (STREAM_ID_MAX) + +/* Max number of supported SRAM buffer partitions */ +/* It refers to the size of stream partitions */ +/* These partitions are further subpartitioned internally */ +/* by the FW, but by declaring statically the stream */ +/* partitions we solve the buffer fragmentation issue */ +#define NOF_SRAM_BLOCKS_MAX (STREAM_ID_MAX) + +/* Max number of supported input pins routed in ISL */ +#define MAX_IPINS_IN_ISL (2) + +/* Max number of planes for frame formats supported by the FW */ +#define PIN_PLANES_MAX (4) + +/** + * enum ia_css_isys_resp_type + */ +enum ia_css_isys_resp_type { + IA_CSS_ISYS_RESP_TYPE_STREAM_OPEN_DONE = 0, + IA_CSS_ISYS_RESP_TYPE_STREAM_START_ACK, + IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK, + IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK, + IA_CSS_ISYS_RESP_TYPE_STREAM_STOP_ACK, + IA_CSS_ISYS_RESP_TYPE_STREAM_FLUSH_ACK, + IA_CSS_ISYS_RESP_TYPE_STREAM_CLOSE_ACK, + IA_CSS_ISYS_RESP_TYPE_PIN_DATA_READY, + IA_CSS_ISYS_RESP_TYPE_PIN_DATA_WATERMARK, + IA_CSS_ISYS_RESP_TYPE_FRAME_SOF, + IA_CSS_ISYS_RESP_TYPE_FRAME_EOF, + IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE, + IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE, + IA_CSS_ISYS_RESP_TYPE_PIN_DATA_SKIPPED, + IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_SKIPPED, + IA_CSS_ISYS_RESP_TYPE_FRAME_SOF_DISCARDED, + IA_CSS_ISYS_RESP_TYPE_FRAME_EOF_DISCARDED, + IA_CSS_ISYS_RESP_TYPE_STATS_DATA_READY, + N_IA_CSS_ISYS_RESP_TYPE +}; + +/** + * enum ia_css_isys_send_type + */ +enum ia_css_isys_send_type { + IA_CSS_ISYS_SEND_TYPE_STREAM_OPEN = 0, + IA_CSS_ISYS_SEND_TYPE_STREAM_START, + IA_CSS_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE, + IA_CSS_ISYS_SEND_TYPE_STREAM_CAPTURE, + IA_CSS_ISYS_SEND_TYPE_STREAM_STOP, + IA_CSS_ISYS_SEND_TYPE_STREAM_FLUSH, + IA_CSS_ISYS_SEND_TYPE_STREAM_CLOSE, + N_IA_CSS_ISYS_SEND_TYPE +}; + +/** + * enum ia_css_isys_queue_type + */ +enum ia_css_isys_queue_type { + IA_CSS_ISYS_QUEUE_TYPE_PROXY = 0, + IA_CSS_ISYS_QUEUE_TYPE_DEV, + IA_CSS_ISYS_QUEUE_TYPE_MSG, + N_IA_CSS_ISYS_QUEUE_TYPE +}; + +/** + * enum ia_css_isys_stream_source: Specifies a source for a stream + */ +enum ia_css_isys_stream_source { + IA_CSS_ISYS_STREAM_SRC_PORT_0 = 0, + IA_CSS_ISYS_STREAM_SRC_PORT_1, + IA_CSS_ISYS_STREAM_SRC_PORT_2, + IA_CSS_ISYS_STREAM_SRC_PORT_3, + IA_CSS_ISYS_STREAM_SRC_PORT_4, + IA_CSS_ISYS_STREAM_SRC_PORT_5, + IA_CSS_ISYS_STREAM_SRC_PORT_6, + IA_CSS_ISYS_STREAM_SRC_PORT_7, + IA_CSS_ISYS_STREAM_SRC_PORT_8, + IA_CSS_ISYS_STREAM_SRC_PORT_9, + IA_CSS_ISYS_STREAM_SRC_PORT_10, + IA_CSS_ISYS_STREAM_SRC_PORT_11, + IA_CSS_ISYS_STREAM_SRC_PORT_12, + IA_CSS_ISYS_STREAM_SRC_PORT_13, + IA_CSS_ISYS_STREAM_SRC_PORT_14, + IA_CSS_ISYS_STREAM_SRC_PORT_15, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_0, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_1, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_2, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_3, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_4, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_5, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_6, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_7, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_8, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_9, + N_IA_CSS_ISYS_STREAM_SRC +}; + +#define IA_CSS_ISYS_STREAM_SRC_CSI2_PORT0 IA_CSS_ISYS_STREAM_SRC_PORT_0 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_PORT1 IA_CSS_ISYS_STREAM_SRC_PORT_1 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_PORT2 IA_CSS_ISYS_STREAM_SRC_PORT_2 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_PORT3 IA_CSS_ISYS_STREAM_SRC_PORT_3 + +#define IA_CSS_ISYS_STREAM_SRC_CSI2_3PH_PORTA IA_CSS_ISYS_STREAM_SRC_PORT_4 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_3PH_PORTB IA_CSS_ISYS_STREAM_SRC_PORT_5 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT0 IA_CSS_ISYS_STREAM_SRC_PORT_6 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT1 IA_CSS_ISYS_STREAM_SRC_PORT_7 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT2 IA_CSS_ISYS_STREAM_SRC_PORT_8 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT3 IA_CSS_ISYS_STREAM_SRC_PORT_9 + +#define IA_CSS_ISYS_STREAM_SRC_MIPIGEN_PORT0 IA_CSS_ISYS_STREAM_SRC_MIPIGEN_0 +#define IA_CSS_ISYS_STREAM_SRC_MIPIGEN_PORT1 IA_CSS_ISYS_STREAM_SRC_MIPIGEN_1 + +/** + * enum ia_css_isys_mipi_vc: MIPI csi2 spec + * supports upto 4 virtual per physical channel + */ +enum ia_css_isys_mipi_vc { + IA_CSS_ISYS_MIPI_VC_0 = 0, + IA_CSS_ISYS_MIPI_VC_1, + IA_CSS_ISYS_MIPI_VC_2, + IA_CSS_ISYS_MIPI_VC_3, + N_IA_CSS_ISYS_MIPI_VC +}; + +/** + * Supported Pixel Frame formats. Expandable if needed + */ +enum ia_css_isys_frame_format_type { + IA_CSS_ISYS_FRAME_FORMAT_NV11 = 0,/* 12 bit YUV 411, Y, UV plane */ + IA_CSS_ISYS_FRAME_FORMAT_NV12,/* 12 bit YUV 420, Y, UV plane */ + IA_CSS_ISYS_FRAME_FORMAT_NV12_16,/* 16 bit YUV 420, Y, UV plane */ + IA_CSS_ISYS_FRAME_FORMAT_NV12_TILEY,/* 12 bit YUV 420, Intel + proprietary tiled format, + TileY + */ + IA_CSS_ISYS_FRAME_FORMAT_NV16,/* 16 bit YUV 422, Y, UV plane */ + IA_CSS_ISYS_FRAME_FORMAT_NV21,/* 12 bit YUV 420, Y, VU plane */ + IA_CSS_ISYS_FRAME_FORMAT_NV61,/* 16 bit YUV 422, Y, VU plane */ + IA_CSS_ISYS_FRAME_FORMAT_YV12,/* 12 bit YUV 420, Y, V, U plane */ + IA_CSS_ISYS_FRAME_FORMAT_YV16,/* 16 bit YUV 422, Y, V, U plane */ + IA_CSS_ISYS_FRAME_FORMAT_YUV420,/* 12 bit YUV 420, Y, U, V plane */ + IA_CSS_ISYS_FRAME_FORMAT_YUV420_10,/* yuv420, 10 bits per subpixel */ + IA_CSS_ISYS_FRAME_FORMAT_YUV420_12,/* yuv420, 12 bits per subpixel */ + IA_CSS_ISYS_FRAME_FORMAT_YUV420_14,/* yuv420, 14 bits per subpixel */ + IA_CSS_ISYS_FRAME_FORMAT_YUV420_16,/* yuv420, 16 bits per subpixel */ + IA_CSS_ISYS_FRAME_FORMAT_YUV422,/* 16 bit YUV 422, Y, U, V plane */ + IA_CSS_ISYS_FRAME_FORMAT_YUV422_16,/* yuv422, 16 bits per subpixel */ + IA_CSS_ISYS_FRAME_FORMAT_UYVY,/* 16 bit YUV 422, UYVY interleaved */ + IA_CSS_ISYS_FRAME_FORMAT_YUYV,/* 16 bit YUV 422, YUYV interleaved */ + IA_CSS_ISYS_FRAME_FORMAT_YUV444,/* 24 bit YUV 444, Y, U, V plane */ + IA_CSS_ISYS_FRAME_FORMAT_YUV_LINE,/* Internal format, 2 y lines + followed by a uvinterleaved line + */ + IA_CSS_ISYS_FRAME_FORMAT_RAW8, /* RAW8, 1 plane */ + IA_CSS_ISYS_FRAME_FORMAT_RAW10, /* RAW10, 1 plane */ + IA_CSS_ISYS_FRAME_FORMAT_RAW12, /* RAW12, 1 plane */ + IA_CSS_ISYS_FRAME_FORMAT_RAW14, /* RAW14, 1 plane */ + IA_CSS_ISYS_FRAME_FORMAT_RAW16, /* RAW16, 1 plane */ + IA_CSS_ISYS_FRAME_FORMAT_RGB565,/* 16 bit RGB, 1 plane. Each 3 sub + pixels are packed into one 16 bit + value, 5 bits for R, 6 bits for G + and 5 bits for B. + */ + IA_CSS_ISYS_FRAME_FORMAT_PLANAR_RGB888, /* 24 bit RGB, 3 planes */ + IA_CSS_ISYS_FRAME_FORMAT_RGBA888,/* 32 bit RGBA, 1 plane, + A=Alpha (alpha is unused) + */ + IA_CSS_ISYS_FRAME_FORMAT_QPLANE6,/* Internal, for advanced ISP */ + IA_CSS_ISYS_FRAME_FORMAT_BINARY_8,/* byte stream, used for jpeg. */ + N_IA_CSS_ISYS_FRAME_FORMAT +}; +/* Temporary for driver compatibility */ +#define IA_CSS_ISYS_FRAME_FORMAT_RAW (IA_CSS_ISYS_FRAME_FORMAT_RAW16) + + +/** + * Supported MIPI data type. Keep in sync array in ia_css_isys_private.c + */ +enum ia_css_isys_mipi_data_type { + /** SYNCHRONIZATION SHORT PACKET DATA TYPES */ + IA_CSS_ISYS_MIPI_DATA_TYPE_FRAME_START_CODE = 0x00, + IA_CSS_ISYS_MIPI_DATA_TYPE_FRAME_END_CODE = 0x01, + IA_CSS_ISYS_MIPI_DATA_TYPE_LINE_START_CODE = 0x02, /* Optional */ + IA_CSS_ISYS_MIPI_DATA_TYPE_LINE_END_CODE = 0x03, /* Optional */ + /** Reserved 0x04-0x07 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x04 = 0x04, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x05 = 0x05, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x06 = 0x06, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x07 = 0x07, + /** GENERIC SHORT PACKET DATA TYPES */ + /** They are used to keep the timing information for the + * opening/closing of shutters, triggering of flashes and etc. + */ + /* Generic Short Packet Code 1 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT1 = 0x08, + /* Generic Short Packet Code 2 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT2 = 0x09, + /* Generic Short Packet Code 3 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT3 = 0x0A, + /* Generic Short Packet Code 4 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT4 = 0x0B, + /* Generic Short Packet Code 5 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT5 = 0x0C, + /* Generic Short Packet Code 6 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT6 = 0x0D, + /* Generic Short Packet Code 7 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT7 = 0x0E, + /* Generic Short Packet Code 8 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT8 = 0x0F, + /** GENERIC LONG PACKET DATA TYPES */ + IA_CSS_ISYS_MIPI_DATA_TYPE_NULL = 0x10, + IA_CSS_ISYS_MIPI_DATA_TYPE_BLANKING_DATA = 0x11, + /* Embedded 8-bit non Image Data */ + IA_CSS_ISYS_MIPI_DATA_TYPE_EMBEDDED = 0x12, + /** Reserved 0x13-0x17 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x13 = 0x13, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x14 = 0x14, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x15 = 0x15, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x16 = 0x16, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x17 = 0x17, + /** YUV DATA TYPES */ + /* 8 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_YUV420_8 = 0x18, + /* 10 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_YUV420_10 = 0x19, + /* 8 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_YUV420_8_LEGACY = 0x1A, + /** Reserved 0x1B */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x1B = 0x1B, + /* YUV420 8-bit (Chroma Shifted Pixel Sampling) */ + IA_CSS_ISYS_MIPI_DATA_TYPE_YUV420_8_SHIFT = 0x1C, + /* YUV420 10-bit (Chroma Shifted Pixel Sampling) */ + IA_CSS_ISYS_MIPI_DATA_TYPE_YUV420_10_SHIFT = 0x1D, + /* UYVY..UVYV, 8 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_YUV422_8 = 0x1E, + /* UYVY..UVYV, 10 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_YUV422_10 = 0x1F, + /** RGB DATA TYPES */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RGB_444 = 0x20, + /* BGR..BGR, 5 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RGB_555 = 0x21, + /* BGR..BGR, 5 bits B and R, 6 bits G */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RGB_565 = 0x22, + /* BGR..BGR, 6 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RGB_666 = 0x23, + /* BGR..BGR, 8 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RGB_888 = 0x24, + /** Reserved 0x25-0x27 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x25 = 0x25, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x26 = 0x26, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x27 = 0x27, + /** RAW DATA TYPES */ + /* RAW data, 6 bits per pixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RAW_6 = 0x28, + /* RAW data, 7 bits per pixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RAW_7 = 0x29, + /* RAW data, 8 bits per pixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RAW_8 = 0x2A, + /* RAW data, 10 bits per pixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RAW_10 = 0x2B, + /* RAW data, 12 bits per pixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RAW_12 = 0x2C, + /* RAW data, 14 bits per pixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RAW_14 = 0x2D, + /** Reserved 0x2E-2F are used with assigned meaning */ + /* RAW data, 16 bits per pixel, not specified in CSI-MIPI standard */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RAW_16 = 0x2E, + /* Binary byte stream, which is target at JPEG, not specified in + * CSI-MIPI standard + */ + IA_CSS_ISYS_MIPI_DATA_TYPE_BINARY_8 = 0x2F, + /** USER DEFINED 8-BIT DATA TYPES */ + /** For example, the data transmitter (e.g. the SoC sensor) can keep + * the JPEG data as the User Defined Data Type 4 and the MPEG data as + * the User Defined Data Type 7. + */ + /* User defined 8-bit data type 1 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF1 = 0x30, + /* User defined 8-bit data type 2 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF2 = 0x31, + /* User defined 8-bit data type 3 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF3 = 0x32, + /* User defined 8-bit data type 4 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF4 = 0x33, + /* User defined 8-bit data type 5 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF5 = 0x34, + /* User defined 8-bit data type 6 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF6 = 0x35, + /* User defined 8-bit data type 7 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF7 = 0x36, + /* User defined 8-bit data type 8 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF8 = 0x37, + /** Reserved 0x38-0x3F */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x38 = 0x38, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x39 = 0x39, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x3A = 0x3A, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x3B = 0x3B, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x3C = 0x3C, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x3D = 0x3D, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x3E = 0x3E, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x3F = 0x3F, + + /* Keep always last and max value */ + N_IA_CSS_ISYS_MIPI_DATA_TYPE = 0x40 +}; + +/** enum ia_css_isys_pin_type: output pin buffer types. + * Buffers can be queued and de-queued to hand them over between IA and ISYS + */ +enum ia_css_isys_pin_type { + /* Captured as MIPI packets */ + IA_CSS_ISYS_PIN_TYPE_MIPI = 0, + /* Captured through the ISApf (with/without ISA) + * and the non-scaled output path + */ + IA_CSS_ISYS_PIN_TYPE_RAW_NS, + /* Captured through the ISApf + ISA and the scaled output path */ + IA_CSS_ISYS_PIN_TYPE_RAW_S, + /* Captured through the SoC path */ + IA_CSS_ISYS_PIN_TYPE_RAW_SOC, + /* Reserved for future use, maybe short packets */ + IA_CSS_ISYS_PIN_TYPE_METADATA_0, + /* Reserved for future use */ + IA_CSS_ISYS_PIN_TYPE_METADATA_1, + /* Legacy (non-PIV2), used for the AWB stats */ + IA_CSS_ISYS_PIN_TYPE_AWB_STATS, + /* Legacy (non-PIV2), used for the AF stats */ + IA_CSS_ISYS_PIN_TYPE_AF_STATS, + /* Legacy (non-PIV2), used for the AE stats */ + IA_CSS_ISYS_PIN_TYPE_HIST_STATS, + /* Used for the PAF FF*/ + IA_CSS_ISYS_PIN_TYPE_PAF_FF, + /* Keep always last and max value */ + N_IA_CSS_ISYS_PIN_TYPE +}; + +/** + * enum ia_css_isys_isl_use. Describes the ISL/ISA use + * (ISAPF path in after BXT A0) + */ +enum ia_css_isys_isl_use { + IA_CSS_ISYS_USE_NO_ISL_NO_ISA = 0, + IA_CSS_ISYS_USE_SINGLE_DUAL_ISL, + IA_CSS_ISYS_USE_SINGLE_ISA, + N_IA_CSS_ISYS_USE +}; + +/** + * enum ia_css_isys_mipi_store_mode. Describes if long MIPI packets reach MIPI + * SRAM with the long packet header or not. + * if not, then only option is to capture it with pin type MIPI. + */ +enum ia_css_isys_mipi_store_mode { + IA_CSS_ISYS_MIPI_STORE_MODE_NORMAL = 0, + IA_CSS_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER, + N_IA_CSS_ISYS_MIPI_STORE_MODE +}; + +/** + * enum ia_css_isys_mipi_dt_rename_mode. Describes if long MIPI packets have + * DT with some other DT format. + */ +enum ia_css_isys_mipi_dt_rename_mode { + IA_CSS_ISYS_MIPI_DT_NO_RENAME = 0, + IA_CSS_ISYS_MIPI_DT_RENAMED_MODE, + N_IA_CSS_ISYS_MIPI_DT_MODE +}; + +/** + * enum ia_css_isys_type_paf. Describes the Type of PAF enabled + * (PAF path in after cnlB0) + */ +enum ia_css_isys_type_paf { + /* PAF data not present */ + IA_CSS_ISYS_TYPE_NO_PAF = 0, + /* Type 2 sensor types, PAF coming separately from Image Frame */ + /* PAF data in interleaved format(RLRL or LRLR)*/ + IA_CSS_ISYS_TYPE_INTERLEAVED_PAF, + /* PAF data in non-interleaved format(LL/RR or RR/LL) */ + IA_CSS_ISYS_TYPE_NON_INTERLEAVED_PAF, + /* Type 3 sensor types , PAF data embedded in Image Frame*/ + /* Frame Embedded PAF in interleaved format(RLRL or LRLR)*/ + IA_CSS_ISYS_TYPE_FRAME_EMB_INTERLEAVED_PAF, + /* Frame Embedded PAF non-interleaved format(LL/RR or RR/LL)*/ + IA_CSS_ISYS_TYPE_FRAME_EMB_NON_INTERLEAVED_PAF, + N_IA_CSS_ISYS_TYPE_PAF +}; + +/** + * enum ia_css_isys_cropping_location. Enumerates the cropping locations + * in ISYS + */ +enum ia_css_isys_cropping_location { + /* Cropping executed in ISAPF (mainly), ISAPF preproc (odd column) and + * MIPI STR2MMIO (odd row) + */ + IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA = 0, + /* BXT A0 legacy mode which will never be implemented */ + IA_CSS_ISYS_CROPPING_LOCATION_RESERVED_1, + /* Cropping executed in StreamPifConv in the ISA output for + * RAW_NS pin + */ + IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_NONSCALED, + /* Cropping executed in StreamScaledPifConv in the ISA output for + * RAW_S pin + */ + IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_SCALED, + N_IA_CSS_ISYS_CROPPING_LOCATION +}; + +/** + * enum ia_css_isys_resolution_info. Describes the resolution, required to + * setup the various ISA GP registers. + */ +enum ia_css_isys_resolution_info { + /* Scaled ISA output resolution before the + * StreamScaledPifConv cropping + */ + IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_NONSCALED = 0, + /* Non-Scaled ISA output resolution before the + * StreamPifConv cropping + */ + IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_SCALED, + N_IA_CSS_ISYS_RESOLUTION_INFO +}; + +/** + * enum ia_css_isys_error. Describes the error type detected by the FW + */ +enum ia_css_isys_error { + IA_CSS_ISYS_ERROR_NONE = 0, /* No details */ + IA_CSS_ISYS_ERROR_FW_INTERNAL_CONSISTENCY, /* enum */ + IA_CSS_ISYS_ERROR_HW_CONSISTENCY, /* enum */ + IA_CSS_ISYS_ERROR_DRIVER_INVALID_COMMAND_SEQUENCE, /* enum */ + IA_CSS_ISYS_ERROR_DRIVER_INVALID_DEVICE_CONFIGURATION, /* enum */ + IA_CSS_ISYS_ERROR_DRIVER_INVALID_STREAM_CONFIGURATION, /* enum */ + IA_CSS_ISYS_ERROR_DRIVER_INVALID_FRAME_CONFIGURATION, /* enum */ + IA_CSS_ISYS_ERROR_INSUFFICIENT_RESOURCES, /* enum */ + IA_CSS_ISYS_ERROR_HW_REPORTED_STR2MMIO, /* HW code */ + IA_CSS_ISYS_ERROR_HW_REPORTED_SIG2CIO, /* HW code */ + IA_CSS_ISYS_ERROR_SENSOR_FW_SYNC, /* enum */ + IA_CSS_ISYS_ERROR_STREAM_IN_SUSPENSION, /* FW code */ + IA_CSS_ISYS_ERROR_RESPONSE_QUEUE_FULL, /* FW code */ + N_IA_CSS_ISYS_ERROR +}; + +/** + * enum ia_css_proxy_error. Describes the error type for the proxy detected by + * the FW + */ +enum ia_css_proxy_error { + IA_CSS_PROXY_ERROR_NONE = 0, + IA_CSS_PROXY_ERROR_INVALID_WRITE_REGION, + IA_CSS_PROXY_ERROR_INVALID_WRITE_OFFSET, + N_IA_CSS_PROXY_ERROR +}; + +#endif /* __IA_CSS_ISYSAPI_FW_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_fw_version.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_fw_version.h new file mode 100644 index 0000000000000..bc056157cedb6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_fw_version.h @@ -0,0 +1,21 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYSAPI_FW_VERSION_H +#define __IA_CSS_ISYSAPI_FW_VERSION_H + +/* ISYSAPI FW VERSION is taken from Makefile for FW tests */ +#define BXT_FW_RELEASE_VERSION ISYS_FIRMWARE_VERSION + +#endif /* __IA_CSS_ISYSAPI_FW_VERSION_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_proxy_region_defs.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_proxy_region_defs.h new file mode 100644 index 0000000000000..c002b33bdfaf0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_proxy_region_defs.h @@ -0,0 +1,113 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYSAPI_PROXY_REGION_DEFS_H +#define __IA_CSS_ISYSAPI_PROXY_REGION_DEFS_H + +#include "ia_css_isysapi_proxy_region_types.h" + +/* + * Definitions for IPU4_B0_PROXY_INT + */ + +#if defined(IPU4_B0_PROXY_INT) + +/** + * enum ipu4_b0_ia_css_proxy_write_region. Provides the list of regions for ipu4B0 that + * can be accessed (for writing purpose) through the proxy interface + */ +enum ipu4_b0_ia_css_proxy_write_region { + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_0_ERROR_FILL_RATE = 0, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_1_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_2_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_3_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_4_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_5_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_6_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_7_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_8_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_9_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_GDA_IRQ_URGENT_THRESHOLD, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_GDA_IRQ_CRITICAL_THRESHOLD, + N_IPU4_B0_IA_CSS_PROXY_WRITE_REGION +}; + +struct ia_css_proxy_write_region_description ipu4_b0_reg_write_desc[N_IPU4_B0_IA_CSS_PROXY_WRITE_REGION] = { + /* base_addr, offset */ + {0x64128, /*input_system_csi2_logic_s2m_a_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_0_ERROR_FILL_RATE*/ + {0x65128, /*input_system_csi2_logic_s2m_b_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_1_ERROR_FILL_RATE*/ + {0x66128, /*input_system_csi2_logic_s2m_c_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_2_ERROR_FILL_RATE*/ + {0x67128, /*input_system_csi2_logic_s2m_d_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_3_ERROR_FILL_RATE*/ + {0x6C128, /*input_system_csi2_3ph_logic_s2m_a_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_4_ERROR_FILL_RATE*/ + {0x6C928, /*input_system_csi2_3ph_logic_s2m_b_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_5_ERROR_FILL_RATE*/ + {0x6D128, /*input_system_csi2_3ph_logic_s2m_0_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_6_ERROR_FILL_RATE*/ + {0x6D928, /*input_system_csi2_3ph_logic_s2m_1_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_7_ERROR_FILL_RATE*/ + {0x6E128, /*input_system_csi2_3ph_logic_s2m_2_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_8_ERROR_FILL_RATE*/ + {0x6E928, /*input_system_csi2_3ph_logic_s2m_3_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_9_ERROR_FILL_RATE*/ + {0x7800C, /*input_system_unis_logic_gda_irq_urgent_threshold*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_GDA_IRQ_URGENT_THRESHOLD*/ + {0x78010, /*input_system_unis_logic_gda_irq_critical_threshold*/ 4} /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_GDA_IRQ_CRITICAL_THRESHOLD*/ +}; + +#endif /*defined(IPU4_B0_PROXY_INT)*/ + +/* + * Definitions for IPU4P_A0_PROXY_INT + */ + +#if defined(IPU4P_A0_PROXY_INT) + +/** + * enum ipu4p_a0_ia_css_proxy_write_region. Provides the list of regions for ipu4pA0 that + * can be accessed (for writing purpose) through the proxy interface + */ +enum ipu4p_a0_ia_css_proxy_write_region { + N_IPU4P_A0_IA_CSS_PROXY_WRITE_REGION +}; + +#define IPU4P_A0_NO_PROXY_WRITE_REGION_AVAILABLE + +#ifndef IPU4P_A0_NO_PROXY_WRITE_REGION_AVAILABLE +struct ia_css_proxy_write_region_description ipu4p_a0_reg_write_desc[N_IPU4P_A0_IA_CSS_PROXY_WRITE_REGION] = { +} +#endif /*IPU4P_A0_NO_PROXY_WRITE_REGION_AVAILABLE*/ + +#endif /*defined(IPU4P_A0_PROXY_INT)*/ + +/* + * Definitions for IPU4P_B0_PROXY_INT + */ + +#if defined(IPU4P_B0_PROXY_INT) + +/** + * enum ipu4p_b0_ia_css_proxy_write_region. Provides the list of regions for ipu4pB0 that + * can be accessed (for writing purpose) through the proxy interface + */ +enum ipu4p_b0_ia_css_proxy_write_region { + IPU4P_B0_IA_CSS_PROXY_WRITE_REGION_GDA_IWAKE_THRESHOLD = 0, + IPU4P_B0_IA_CSS_PROXY_WRITE_REGION_GDA_ENABLE_IWAKE, + N_IPU4P_B0_IA_CSS_PROXY_WRITE_REGION +}; + +struct ia_css_proxy_write_region_description ipu4p_b0_reg_write_desc[N_IPU4P_B0_IA_CSS_PROXY_WRITE_REGION] = { + /* base_addr, max_offset */ + /*input_system_unis_logic_gda_iwake_threshold*/ + {0x78014, 4}, /*IPU4P_B0_IA_CSS_PROXY_WRITE_REGION_GDA_IWAKE_THRESHOLD*/ + /*input_system_unis_logic_gda_enable_iwake*/ + {0x7801C, 4} /*IPU4P_B0_IA_CSS_PROXY_WRITE_REGION_GDA_ENABLE_IWAKE*/ +}; + +#endif /*defined(IPU4P_B0_PROXY_INT)*/ + +#endif /* __IA_CSS_ISYSAPI_PROXY_REGION_DEFS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_proxy_region_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_proxy_region_types.h new file mode 100644 index 0000000000000..045f089e5a4c8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_proxy_region_types.h @@ -0,0 +1,24 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYSAPI_PROXY_REGION_TYPES_H +#define __IA_CSS_ISYSAPI_PROXY_REGION_TYPES_H + + +struct ia_css_proxy_write_region_description { + uint32_t base_addr; + uint32_t offset; +}; + +#endif /* __IA_CSS_ISYSAPI_PROXY_REGION_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_types.h new file mode 100644 index 0000000000000..481a7dc7b4813 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_types.h @@ -0,0 +1,349 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYSAPI_TYPES_H +#define __IA_CSS_ISYSAPI_TYPES_H + +#include "ia_css_isysapi_fw_types.h" +#include "type_support.h" + +#include "ia_css_return_token.h" +#include "ia_css_output_buffer.h" +#include "ia_css_input_buffer.h" +#include "ia_css_terminal_defs.h" + +/** + * struct ia_css_isys_buffer_partition - buffer partition information + * @num_gda_pages: Number of virtual gda pages available for each virtual stream + */ +struct ia_css_isys_buffer_partition { + unsigned int num_gda_pages[STREAM_ID_MAX]; +}; + +/** + * This should contain the driver specified info for sys + */ +struct ia_css_driver_sys_config { + unsigned int ssid; + unsigned int mmid; + unsigned int num_send_queues; /* # of MSG send queues */ + unsigned int num_recv_queues; /* # of MSG recv queues */ + unsigned int send_queue_size; /* max # tokens per queue */ + unsigned int recv_queue_size; /* max # tokens per queue */ + + unsigned int icache_prefetch; /* enable prefetching for SPC */ +}; + +/** + * This should contain the driver specified info for proxy write queues + */ +struct ia_css_driver_proxy_config { + /* max # tokens per PROXY send/recv queue. + * Proxy queues are used for write access purpose + */ + unsigned int proxy_write_queue_size; +}; + + /** + * struct ia_css_isys_device_cfg_data - ISYS device configuration data + * @driver_sys + * @buffer_partition: Information required for the virtual SRAM + * space partition of the streams. + * @driver_proxy + * @secure: Driver needs to set 'secure' to indicate the intention + * when invoking ia_css_isys_context_create() in + * HAS_DUAL_CMD_CTX_SUPPORT case. If 'true', it's for + * secure case. + */ +struct ia_css_isys_device_cfg_data { + struct ia_css_driver_sys_config driver_sys; + struct ia_css_isys_buffer_partition buffer_partition; + struct ia_css_driver_proxy_config driver_proxy; + bool secure; + unsigned vtl0_addr_mask; /* only applicable in 'secure' case */ +}; + +/** + * struct ia_css_isys_resolution: Generic resolution structure. + * @Width + * @Height + */ +struct ia_css_isys_resolution { + unsigned int width; + unsigned int height; +}; + +/** + * struct ia_css_isys_output_pin_payload + * @out_buf_id: Points to output pin buffer - buffer identifier + * @addr: Points to output pin buffer - CSS Virtual Address + * @compressed: Request frame compression (1), or not (0) + * This must be the same as ia_css_isys_output_pin_info::reserve_compression + */ +struct ia_css_isys_output_pin_payload { + ia_css_return_token out_buf_id; + ia_css_output_buffer_css_address addr; + unsigned int compress; +}; + +/** + * struct ia_css_isys_output_pin_info + * @input_pin_id: input pin id/index which is source of + * the data for this output pin + * @output_res: output pin resolution + * @stride: output stride in Bytes (not valid for statistics) + * @pt: pin type + * @ft: frame format type + * @watermark_in_lines: pin watermark level in lines + * @send_irq: assert if pin event should trigger irq + * @link_id: identifies PPG to connect to, link_id = 0 implies offline + * while link_id > 0 implies buffer_chasing or online mode + * can be entered. + * @reserve_compression: Reserve compression resources for pin. + * @payload_buf_size: Minimum size in Bytes of all buffers that will be supplied for capture + * on this pin (i.e. addressed by ia_css_isys_output_pin_payload::addr) + */ +struct ia_css_isys_output_pin_info { + unsigned int input_pin_id; + struct ia_css_isys_resolution output_res; + unsigned int stride; + enum ia_css_isys_pin_type pt; + enum ia_css_isys_frame_format_type ft; + unsigned int watermark_in_lines; + unsigned int send_irq; + enum ia_css_isys_link_id link_id; + unsigned int reserve_compression; + unsigned int payload_buf_size; +}; + +/** + * struct ia_css_isys_param_pin + * @param_buf_id: Points to param buffer - buffer identifier + * @addr: Points to param buffer - CSS Virtual Address + */ +struct ia_css_isys_param_pin { + ia_css_return_token param_buf_id; + ia_css_input_buffer_css_address addr; +}; + +/** + * struct ia_css_isys_input_pin_info + * @input_res: input resolution + * @dt: mipi data type + * @mipi_store_mode: defines if legacy long packet header will be stored or + * discarded if discarded, output pin pin type for this + * input pin can only be MIPI + * @dt_rename_mode: defines if MIPI data is encapsulated in some other + * data type + * @mapped_dt: Encapsulating in mipi data type(what sensor sends) + */ +struct ia_css_isys_input_pin_info { + struct ia_css_isys_resolution input_res; + enum ia_css_isys_mipi_data_type dt; + enum ia_css_isys_mipi_store_mode mipi_store_mode; + enum ia_css_isys_mipi_dt_rename_mode dt_rename_mode; + enum ia_css_isys_mipi_data_type mapped_dt; +}; + +/** + * struct ia_css_isys_isa_cfg. Describes the ISA cfg + */ +struct ia_css_isys_isa_cfg { + /* Following sets resolution information neeed by the IS GP registers, + * For index IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_NONSCALED, + * it is needed when there is RAW_NS pin + * For index IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_SCALED, + * it is needed when there is RAW_S pin + */ + struct ia_css_isys_resolution isa_res[N_IA_CSS_ISYS_RESOLUTION_INFO]; + /* acc id 0, set if process required */ + unsigned int blc_enabled; + /* acc id 1, set if process required */ + unsigned int lsc_enabled; + /* acc id 2, set if process required */ + unsigned int dpc_enabled; + /* acc id 3, set if process required */ + unsigned int downscaler_enabled; + /* acc id 4, set if process required */ + unsigned int awb_enabled; + /* acc id 5, set if process required */ + unsigned int af_enabled; + /* acc id 6, set if process required */ + unsigned int ae_enabled; + /* acc id 7, disabled, or type of paf enabled*/ + enum ia_css_isys_type_paf paf_type; + /* Send irq for any statistics buffers which got completed */ + unsigned int send_irq_stats_ready; + /* Send response for any statistics buffers which got completed */ + unsigned int send_resp_stats_ready; +}; + +/** + * struct ia_css_isys_cropping - cropping coordinates + * Left/Top offsets are INCLUDED + * Right/Bottom offsets are EXCLUDED + * Horizontal: [left_offset,right_offset) + * Vertical: [top_offset,bottom_offset) + * Padding is supported + */ +struct ia_css_isys_cropping { + int top_offset; + int left_offset; + int bottom_offset; + int right_offset; +}; + + /** + * struct ia_css_isys_stream_cfg_data + * ISYS stream configuration data structure + * @src: Stream source index e.g. MIPI_generator_0, CSI2-rx_1 + * @vc: MIPI Virtual Channel (up to 4 virtual per physical channel) + * @isl_use: indicates whether stream requires ISL and how + * @compfmt: de-compression setting for User Defined Data + * @isa_cfg: details about what ACCs are active if ISA is used + * @crop: defines cropping resolution for the + * maximum number of input pins which can be cropped, + * it is directly mapped to the HW devices + * @send_irq_sof_discarded: send irq on discarded frame sof response + * - if '1' it will override the send_resp_sof_discarded and send + * the response + * - if '0' the send_resp_sof_discarded will determine whether to + * send the response + * @send_irq_eof_discarded: send irq on discarded frame eof response + * - if '1' it will override the send_resp_eof_discarded and send + * the response + * - if '0' the send_resp_eof_discarded will determine whether to + * send the response + * @send_resp_sof_discarded: send response for discarded frame sof detected, + * used only when send_irq_sof_discarded is '0' + * @send_resp_eof_discarded: send response for discarded frame eof detected, + * used only when send_irq_eof_discarded is '0' + * @the rest: input/output pin descriptors + */ +struct ia_css_isys_stream_cfg_data { + enum ia_css_isys_stream_source src; + enum ia_css_isys_mipi_vc vc; + enum ia_css_isys_isl_use isl_use; + unsigned int compfmt; + struct ia_css_isys_isa_cfg isa_cfg; + struct ia_css_isys_cropping crop[N_IA_CSS_ISYS_CROPPING_LOCATION]; + unsigned int send_irq_sof_discarded; + unsigned int send_irq_eof_discarded; + unsigned int send_resp_sof_discarded; + unsigned int send_resp_eof_discarded; + unsigned int nof_input_pins; + unsigned int nof_output_pins; + struct ia_css_isys_input_pin_info input_pins[MAX_IPINS]; + struct ia_css_isys_output_pin_info output_pins[MAX_OPINS]; +}; + +/** + * struct ia_css_isys_frame_buff_set - frame buffer set + * @output_pins: output pin addresses + * @process_group_light: process_group_light buffer address + * @send_irq_sof: send irq on frame sof response + * - if '1' it will override the send_resp_sof and send + * the response + * - if '0' the send_resp_sof will determine whether to send + * the response + * @send_irq_eof: send irq on frame eof response + * - if '1' it will override the send_resp_eof and send + * the response + * - if '0' the send_resp_eof will determine whether to send + * the response + * @send_resp_sof: send response for frame sof detected, + * used only when send_irq_sof is '0' + * @send_resp_eof: send response for frame eof detected, + * used only when send_irq_eof is '0' + * @frame_counter: frame number associated with this buffer set. + */ +struct ia_css_isys_frame_buff_set { + struct ia_css_isys_output_pin_payload output_pins[MAX_OPINS]; + struct ia_css_isys_param_pin process_group_light; + unsigned int send_irq_sof; + unsigned int send_irq_eof; + unsigned int send_irq_capture_ack; + unsigned int send_irq_capture_done; + unsigned int send_resp_sof; + unsigned int send_resp_eof; + uint8_t frame_counter; +}; + +/** + * struct ia_css_isys_resp_info + * @type: response type + * @stream_handle: stream id the response corresponds to + * @timestamp: Time information for event if available + * @error: error code if something went wrong + * @error_details: depending on error code, it may contain additional + * error info + * @pin: this var is valid for pin event related responses, + * contains pin addresses + * @pin_id: this var is valid for pin event related responses, + * contains pin id that the pin payload corresponds to + * @process_group_light: this var is valid for stats ready related responses, + * contains process group addresses + * @acc_id: this var is valid for stats ready related responses, + * contains accelerator id that finished producing + * all related statistics + * @frame_counter: valid for STREAM_START_AND_CAPTURE_DONE, + * STREAM_CAPTURE_DONE and STREAM_CAPTURE_DISCARDED + * @written_direct: indicates if frame was written direct (online mode) or to DDR. + */ +struct ia_css_isys_resp_info { + enum ia_css_isys_resp_type type; + unsigned int stream_handle; + unsigned int timestamp[2]; + enum ia_css_isys_error error; + unsigned int error_details; + struct ia_css_isys_output_pin_payload pin; + unsigned int pin_id; + struct ia_css_isys_param_pin process_group_light; + unsigned int acc_id; + uint8_t frame_counter; + uint8_t written_direct; +}; + +/** + * struct ia_css_proxy_write_req_val + * @request_id: Unique identifier for the write request + * (in case multiple write requests are issued for same register) + * @region_index: region id for the write request + * @offset: Offset to the specific register within the region + * @value: Value to be written to register + */ +struct ia_css_proxy_write_req_val { + uint32_t request_id; + uint32_t region_index; + uint32_t offset; + uint32_t value; +}; + +/** + * struct ia_css_proxy_write_req_resp + * @request_id: Unique identifier for the write request + * (in case multiple write requests are issued for same register) + * @error: error code if something went wrong + * @error_details: error detail includes either offset or region index + * information which caused proxy request to be rejected + * (invalid access request) + */ +struct ia_css_proxy_write_req_resp { + uint32_t request_id; + enum ia_css_proxy_error error; + uint32_t error_details; +}; + + +#endif /* __IA_CSS_ISYSAPI_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/isysapi.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/isysapi.mk new file mode 100644 index 0000000000000..0d06298f9acb0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/isysapi.mk @@ -0,0 +1,77 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is ISYSAPI + +include $(MODULES_DIR)/config/isys/subsystem_$(IPU_SYSVER).mk + +ISYSAPI_DIR=$${MODULES_DIR}/isysapi + +ISYSAPI_INTERFACE=$(ISYSAPI_DIR)/interface +ISYSAPI_SOURCES=$(ISYSAPI_DIR)/src +ISYSAPI_EXTINCLUDE=$${MODULES_DIR}/support +ISYSAPI_EXTINTERFACE=$${MODULES_DIR}/syscom/interface + +ISYSAPI_HOST_FILES += $(ISYSAPI_SOURCES)/ia_css_isys_public.c + +ISYSAPI_HOST_FILES += $(ISYSAPI_SOURCES)/ia_css_isys_private.c + +# ISYSAPI Trace Log Level = ISYSAPI_TRACE_LOG_LEVEL_NORMAL +# Other options are [ISYSAPI_TRACE_LOG_LEVEL_OFF, ISYSAPI_TRACE_LOG_LEVEL_DEBUG] +ifndef ISYSAPI_TRACE_CONFIG_HOST + ISYSAPI_TRACE_CONFIG_HOST=ISYSAPI_TRACE_LOG_LEVEL_NORMAL +endif +ifndef ISYSAPI_TRACE_CONFIG_FW + ISYSAPI_TRACE_CONFIG_FW=ISYSAPI_TRACE_LOG_LEVEL_NORMAL +endif + +ISYSAPI_HOST_CPPFLAGS += -DISYSAPI_TRACE_CONFIG=$(ISYSAPI_TRACE_CONFIG_HOST) +ISYSAPI_FW_CPPFLAGS += -DISYSAPI_TRACE_CONFIG=$(ISYSAPI_TRACE_CONFIG_FW) + +ISYSAPI_HOST_FILES += $(ISYSAPI_SOURCES)/ia_css_isys_public_trace.c + +ISYSAPI_HOST_CPPFLAGS += -I$(ISYSAPI_INTERFACE) +ISYSAPI_HOST_CPPFLAGS += -I$(ISYSAPI_EXTINCLUDE) +ISYSAPI_HOST_CPPFLAGS += -I$(ISYSAPI_EXTINTERFACE) +ISYSAPI_HOST_CPPFLAGS += -I$(HIVESDK)/systems/ipu_system/dai/include +ISYSAPI_HOST_CPPFLAGS += -I$(HIVESDK)/systems/ipu_system/dai/include/default_system +ISYSAPI_HOST_CPPFLAGS += -I$(HIVESDK)/include/ipu/dai +ISYSAPI_HOST_CPPFLAGS += -I$(HIVESDK)/include/ipu + +ISYSAPI_FW_FILES += $(ISYSAPI_SOURCES)/isys_fw.c +ISYSAPI_FW_FILES += $(ISYSAPI_SOURCES)/isys_fw_utils.c + +ISYSAPI_FW_CPPFLAGS += -I$(ISYSAPI_INTERFACE) +ISYSAPI_FW_CPPFLAGS += -I$(ISYSAPI_SOURCES)/$(IPU_SYSVER) +ISYSAPI_FW_CPPFLAGS += -I$(ISYSAPI_EXTINCLUDE) +ISYSAPI_FW_CPPFLAGS += -I$(ISYSAPI_EXTINTERFACE) +ISYSAPI_FW_CPPFLAGS += -I$(HIVESDK)/systems/ipu_system/dai/include +ISYSAPI_FW_CPPFLAGS += -I$(HIVESDK)/systems/ipu_system/dai/include/default_system +ISYSAPI_FW_CPPFLAGS += -I$(HIVESDK)/include/ipu/dai +ISYSAPI_FW_CPPFLAGS += -I$(HIVESDK)/include/ipu + +ISYSAPI_FW_CPPFLAGS += -DWA_HSD1805168877=$(WA_HSD1805168877) + +ISYSAPI_HOST_CPPFLAGS += -DREGMEM_OFFSET=$(REGMEM_OFFSET) + +ifeq ($(ISYS_HAS_DUAL_CMD_CTX_SUPPORT), 1) +ISYSAPI_HOST_CPPFLAGS += -DHAS_DUAL_CMD_CTX_SUPPORT=$(ISYS_HAS_DUAL_CMD_CTX_SUPPORT) +ISYSAPI_FW_CPPFLAGS += -DHAS_DUAL_CMD_CTX_SUPPORT=$(ISYS_HAS_DUAL_CMD_CTX_SUPPORT) +endif + +ifdef AB_CONFIG_ARRAY_SIZE +ISYSAPI_FW_CPPFLAGS += -DAB_CONFIG_ARRAY_SIZE=$(AB_CONFIG_ARRAY_SIZE) +else +ISYSAPI_FW_CPPFLAGS += -DAB_CONFIG_ARRAY_SIZE=1 +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_private.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_private.c new file mode 100644 index 0000000000000..8297a1ff2d1be --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_private.c @@ -0,0 +1,980 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_isys_private.h" +/* The following is needed for the contained data types */ +#include "ia_css_isys_fw_bridged_types.h" +#include "ia_css_isysapi_types.h" +#include "ia_css_syscom_config.h" +/* + * The following header file is needed for the + * stddef.h (NULL), + * limits.h (CHAR_BIT definition). + */ +#include "type_support.h" +#include "error_support.h" +#include "ia_css_isysapi_trace.h" +#include "misc_support.h" +#include "cpu_mem_support.h" +#include "storage_class.h" + +#include "ia_css_shared_buffer_cpu.h" + +/* + * defines how many stream cfg host may sent concurrently + * before receiving the stream ack + */ +#define STREAM_CFG_BUFS_PER_MSG_QUEUE (1) +#define NEXT_FRAME_BUFS_PER_MSG_QUEUE \ + (ctx->send_queue_size[IA_CSS_ISYS_QUEUE_TYPE_MSG] + 4 + 1) +/* + * There is an edge case that host has filled the full queue + * with capture requests (ctx->send_queue_size), + * SP reads and HW-queues all of them (4), + * while in the meantime host continues queueing capture requests + * without checking for responses which SP will have sent with each HW-queue + * capture request (if it does then the 4 is much more improbable to appear, + * but still not impossible). + * After this, host tries to queue an extra capture request + * even though there is no space in the msg queue because msg queue + * is checked at a later point, so +1 is needed + */ + +/* + * A DT is supported assuming when the MIPI packets + * have the same size even when even/odd lines are different, + * and the size is the average per line + */ +#define IA_CSS_UNSUPPORTED_DATA_TYPE (0) +static const uint32_t +ia_css_isys_extracted_bits_per_pixel_per_mipi_data_type[ + N_IA_CSS_ISYS_MIPI_DATA_TYPE] = { + /* + * Remove Prefix "IA_CSS_ISYS_MIPI_DATA_TYPE_" in comments + * to align with Checkpatch 80 characters requirements + * For detailed comments of each field, please refer to + * definition of enum ia_css_isys_mipi_data_type{} in + * isysapi/interface/ia_css_isysapi_fw_types.h + */ + 64, /* [0x00] FRAME_START_CODE */ + 64, /* [0x01] FRAME_END_CODE */ + 64, /* [0x02] LINE_START_CODE Optional */ + 64, /* [0x03] LINE_END_CODE Optional */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x04] RESERVED_0x04 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x05] RESERVED_0x05 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x06] RESERVED_0x06 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x07] RESERVED_0x07 */ + 64, /* [0x08] GENERIC_SHORT1 */ + 64, /* [0x09] GENERIC_SHORT2 */ + 64, /* [0x0A] GENERIC_SHORT3 */ + 64, /* [0x0B] GENERIC_SHORT4 */ + 64, /* [0x0C] GENERIC_SHORT5 */ + 64, /* [0x0D] GENERIC_SHORT6 */ + 64, /* [0x0E] GENERIC_SHORT7 */ + 64, /* [0x0F] GENERIC_SHORT8 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x10] NULL To be ignored */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x11] BLANKING_DATA To be ignored */ + 8, /* [0x12] EMBEDDED non Image Data */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x13] RESERVED_0x13 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x14] RESERVED_0x14 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x15] RESERVED_0x15 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x16] RESERVED_0x16 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x17] RESERVED_0x17 */ + 12, /* [0x18] YUV420_8 */ + 15, /* [0x19] YUV420_10 */ + 12, /* [0x1A] YUV420_8_LEGACY */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x1B] RESERVED_0x1B */ + 12, /* [0x1C] YUV420_8_SHIFT */ + 15, /* [0x1D] YUV420_10_SHIFT */ + 16, /* [0x1E] YUV422_8 */ + 20, /* [0x1F] YUV422_10 */ + 16, /* [0x20] RGB_444 */ + 16, /* [0x21] RGB_555 */ + 16, /* [0x22] RGB_565 */ + 18, /* [0x23] RGB_666 */ + 24, /* [0x24] RGB_888 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x25] RESERVED_0x25 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x26] RESERVED_0x26 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x27] RESERVED_0x27 */ + 6, /* [0x28] RAW_6 */ + 7, /* [0x29] RAW_7 */ + 8, /* [0x2A] RAW_8 */ + 10, /* [0x2B] RAW_10 */ + 12, /* [0x2C] RAW_12 */ + 14, /* [0x2D] RAW_14 */ + 16, /* [0x2E] RAW_16 */ + 8, /* [0x2F] BINARY_8 */ + 8, /* [0x30] USER_DEF1 */ + 8, /* [0x31] USER_DEF2 */ + 8, /* [0x32] USER_DEF3 */ + 8, /* [0x33] USER_DEF4 */ + 8, /* [0x34] USER_DEF5 */ + 8, /* [0x35] USER_DEF6 */ + 8, /* [0x36] USER_DEF7 */ + 8, /* [0x37] USER_DEF8 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x38] RESERVED_0x38 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x39] RESERVED_0x39 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x3A] RESERVED_0x3A */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x3B] RESERVED_0x3B */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x3C] RESERVED_0x3C */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x3D] RESERVED_0x3D */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x3E] RESERVED_0x3E */ + IA_CSS_UNSUPPORTED_DATA_TYPE /* [0x3F] RESERVED_0x3F */ +}; + +STORAGE_CLASS_INLINE int get_stream_cfg_buff_slot( + struct ia_css_isys_context *ctx, + int stream_handle, + int stream_cfg_buff_counter) +{ + NOT_USED(ctx); + return (stream_handle * STREAM_CFG_BUFS_PER_MSG_QUEUE) + + stream_cfg_buff_counter; +} + +STORAGE_CLASS_INLINE int get_next_frame_buff_slot( + struct ia_css_isys_context *ctx, + int stream_handle, + int next_frame_buff_counter) +{ + NOT_USED(ctx); + return (stream_handle * NEXT_FRAME_BUFS_PER_MSG_QUEUE) + + next_frame_buff_counter; +} + +STORAGE_CLASS_INLINE void free_comm_buff_shared_mem( + struct ia_css_isys_context *ctx, + int stream_handle, + int stream_cfg_buff_counter, + int next_frame_buff_counter) +{ + int buff_slot; + + /* Initialiser is the current value of stream_handle */ + for (; stream_handle >= 0; stream_handle--) { + /* + * Initialiser is the current value of stream_cfg_buff_counter + */ + for (; stream_cfg_buff_counter >= 0; + stream_cfg_buff_counter--) { + buff_slot = get_stream_cfg_buff_slot( + ctx, stream_handle, stream_cfg_buff_counter); + ia_css_shared_buffer_free( + ctx->ssid, ctx->mmid, + ctx->isys_comm_buffer_queue. + pstream_cfg_buff_id[buff_slot]); + } + /* Set for the next iteration */ + stream_cfg_buff_counter = STREAM_CFG_BUFS_PER_MSG_QUEUE - 1; + /* + * Initialiser is the current value of next_frame_buff_counter + */ + for (; next_frame_buff_counter >= 0; + next_frame_buff_counter--) { + buff_slot = get_next_frame_buff_slot( + ctx, stream_handle, next_frame_buff_counter); + ia_css_shared_buffer_free( + ctx->ssid, ctx->mmid, + ctx->isys_comm_buffer_queue. + pnext_frame_buff_id[buff_slot]); + } + next_frame_buff_counter = NEXT_FRAME_BUFS_PER_MSG_QUEUE - 1; + } +} + +/* + * ia_css_isys_constr_comm_buff_queue() + */ +int ia_css_isys_constr_comm_buff_queue( + struct ia_css_isys_context *ctx) +{ + int stream_handle; + int stream_cfg_buff_counter; + int next_frame_buff_counter; + int buff_slot; + + verifret(ctx, EFAULT); /* Host Consistency */ + + ctx->isys_comm_buffer_queue.pstream_cfg_buff_id = + (ia_css_shared_buffer *) + ia_css_cpu_mem_alloc(ctx-> + num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG] * + STREAM_CFG_BUFS_PER_MSG_QUEUE * + sizeof(ia_css_shared_buffer)); + verifret(ctx->isys_comm_buffer_queue.pstream_cfg_buff_id != NULL, + EFAULT); + + ctx->isys_comm_buffer_queue.pnext_frame_buff_id = + (ia_css_shared_buffer *) + ia_css_cpu_mem_alloc(ctx-> + num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG] * + NEXT_FRAME_BUFS_PER_MSG_QUEUE * + sizeof(ia_css_shared_buffer)); + if (ctx->isys_comm_buffer_queue.pnext_frame_buff_id == NULL) { + ia_css_cpu_mem_free( + ctx->isys_comm_buffer_queue.pstream_cfg_buff_id); + verifret(0, EFAULT); /* return EFAULT; equivalent */ + } + + for (stream_handle = 0; stream_handle < + (int)ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG]; + stream_handle++) { + /* Initialisation needs to happen here for both loops */ + stream_cfg_buff_counter = 0; + next_frame_buff_counter = 0; + + for (; stream_cfg_buff_counter < STREAM_CFG_BUFS_PER_MSG_QUEUE; + stream_cfg_buff_counter++) { + buff_slot = get_stream_cfg_buff_slot( + ctx, stream_handle, stream_cfg_buff_counter); + ctx->isys_comm_buffer_queue. + pstream_cfg_buff_id[buff_slot] = + ia_css_shared_buffer_alloc( + ctx->ssid, ctx->mmid, + sizeof(struct + ia_css_isys_stream_cfg_data_comm)); + if (ctx->isys_comm_buffer_queue.pstream_cfg_buff_id[ + buff_slot] == 0) { + goto SHARED_BUFF_ALLOC_FAILURE; + } + } + ctx->isys_comm_buffer_queue. + stream_cfg_queue_head[stream_handle] = 0; + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle] = 0; + for (; next_frame_buff_counter < + (int)NEXT_FRAME_BUFS_PER_MSG_QUEUE; + next_frame_buff_counter++) { + buff_slot = get_next_frame_buff_slot( + ctx, stream_handle, + next_frame_buff_counter); + ctx->isys_comm_buffer_queue. + pnext_frame_buff_id[buff_slot] = + ia_css_shared_buffer_alloc( + ctx->ssid, ctx->mmid, + sizeof(struct + ia_css_isys_frame_buff_set_comm)); + if (ctx->isys_comm_buffer_queue. + pnext_frame_buff_id[buff_slot] == 0) { + goto SHARED_BUFF_ALLOC_FAILURE; + } + } + ctx->isys_comm_buffer_queue. + next_frame_queue_head[stream_handle] = 0; + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle] = 0; + } + + return 0; + +SHARED_BUFF_ALLOC_FAILURE: + /* stream_handle has correct value for calling the free function */ + /* prepare stream_cfg_buff_counter for calling the free function */ + stream_cfg_buff_counter--; + /* prepare next_frame_buff_counter for calling the free function */ + next_frame_buff_counter--; + free_comm_buff_shared_mem( + ctx, + stream_handle, + stream_cfg_buff_counter, + next_frame_buff_counter); + + verifret(0, EFAULT); /* return EFAULT; equivalent */ +} + +/* + * ia_css_isys_force_unmap_comm_buff_queue() + */ +int ia_css_isys_force_unmap_comm_buff_queue( + struct ia_css_isys_context *ctx) +{ + int stream_handle; + int buff_slot; + + verifret(ctx, EFAULT); /* Host Consistency */ + + IA_CSS_TRACE_0(ISYSAPI, WARNING, + "ia_css_isys_force_unmap_comm_buff_queue() called\n"); + for (stream_handle = 0; stream_handle < + (int)ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG]; + stream_handle++) { + /* Host-FW Consistency */ + verifret((ctx->isys_comm_buffer_queue. + stream_cfg_queue_head[stream_handle] - + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle]) <= + STREAM_CFG_BUFS_PER_MSG_QUEUE, EPROTO); + for (; ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle] < + ctx->isys_comm_buffer_queue. + stream_cfg_queue_head[stream_handle]; + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle]++) { + IA_CSS_TRACE_1(ISYSAPI, WARNING, + "CSS forced unmapping stream_cfg %d\n", + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle]); + buff_slot = get_stream_cfg_buff_slot( + ctx, stream_handle, + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle] % + STREAM_CFG_BUFS_PER_MSG_QUEUE); + ia_css_shared_buffer_css_unmap( + ctx->isys_comm_buffer_queue. + pstream_cfg_buff_id[buff_slot]); + } + /* Host-FW Consistency */ + verifret((ctx->isys_comm_buffer_queue. + next_frame_queue_head[stream_handle] - + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle]) <= + NEXT_FRAME_BUFS_PER_MSG_QUEUE, EPROTO); + for (; ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle] < + ctx->isys_comm_buffer_queue. + next_frame_queue_head[stream_handle]; + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle]++) { + IA_CSS_TRACE_1(ISYSAPI, WARNING, + "CSS forced unmapping next_frame %d\n", + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle]); + buff_slot = get_next_frame_buff_slot( + ctx, stream_handle, + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle] % + NEXT_FRAME_BUFS_PER_MSG_QUEUE); + ia_css_shared_buffer_css_unmap( + ctx->isys_comm_buffer_queue. + pnext_frame_buff_id[buff_slot]); + } + } + + return 0; +} + +/* + * ia_css_isys_destr_comm_buff_queue() + */ +int ia_css_isys_destr_comm_buff_queue( + struct ia_css_isys_context *ctx) +{ + verifret(ctx, EFAULT); /* Host Consistency */ + + free_comm_buff_shared_mem( + ctx, + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG] - 1, + STREAM_CFG_BUFS_PER_MSG_QUEUE - 1, + NEXT_FRAME_BUFS_PER_MSG_QUEUE - 1); + + ia_css_cpu_mem_free(ctx->isys_comm_buffer_queue.pnext_frame_buff_id); + ia_css_cpu_mem_free(ctx->isys_comm_buffer_queue.pstream_cfg_buff_id); + + return 0; +} + +STORAGE_CLASS_INLINE void resolution_host_to_css( + const struct ia_css_isys_resolution *resolution_host, + struct ia_css_isys_resolution_comm *resolution_css) +{ + resolution_css->width = resolution_host->width; + resolution_css->height = resolution_host->height; +} + +STORAGE_CLASS_INLINE void output_pin_payload_host_to_css( + const struct ia_css_isys_output_pin_payload *output_pin_payload_host, + struct ia_css_isys_output_pin_payload_comm *output_pin_payload_css) +{ + output_pin_payload_css->out_buf_id = + output_pin_payload_host->out_buf_id; + output_pin_payload_css->addr = output_pin_payload_host->addr; +#ifdef ENABLE_DEC400 + output_pin_payload_css->compress = output_pin_payload_host->compress; +#else + output_pin_payload_css->compress = 0; +#endif /* ENABLE_DEC400 */ +} + +STORAGE_CLASS_INLINE void output_pin_info_host_to_css( + const struct ia_css_isys_output_pin_info *output_pin_info_host, + struct ia_css_isys_output_pin_info_comm *output_pin_info_css) +{ + output_pin_info_css->input_pin_id = output_pin_info_host->input_pin_id; + resolution_host_to_css( + &output_pin_info_host->output_res, + &output_pin_info_css->output_res); + output_pin_info_css->stride = output_pin_info_host->stride; + output_pin_info_css->pt = output_pin_info_host->pt; + output_pin_info_css->watermark_in_lines = + output_pin_info_host->watermark_in_lines; + output_pin_info_css->send_irq = output_pin_info_host->send_irq; + output_pin_info_css->ft = output_pin_info_host->ft; + output_pin_info_css->link_id = output_pin_info_host->link_id; +#ifdef ENABLE_DEC400 + output_pin_info_css->reserve_compression = output_pin_info_host->reserve_compression; + output_pin_info_css->payload_buf_size = output_pin_info_host->payload_buf_size; +#else + output_pin_info_css->reserve_compression = 0; + /* Though payload_buf_size was added for compression, set sane value for + * payload_buf_size, just in case... + */ + output_pin_info_css->payload_buf_size = + output_pin_info_host->stride * output_pin_info_host->output_res.height; +#endif /* ENABLE_DEC400 */ +} + +STORAGE_CLASS_INLINE void param_pin_host_to_css( + const struct ia_css_isys_param_pin *param_pin_host, + struct ia_css_isys_param_pin_comm *param_pin_css) +{ + param_pin_css->param_buf_id = param_pin_host->param_buf_id; + param_pin_css->addr = param_pin_host->addr; +} + +STORAGE_CLASS_INLINE void input_pin_info_host_to_css( + const struct ia_css_isys_input_pin_info *input_pin_info_host, + struct ia_css_isys_input_pin_info_comm *input_pin_info_css) +{ + resolution_host_to_css( + &input_pin_info_host->input_res, + &input_pin_info_css->input_res); + if (input_pin_info_host->dt >= N_IA_CSS_ISYS_MIPI_DATA_TYPE) { + IA_CSS_TRACE_0(ISYSAPI, ERROR, + "input_pin_info_host->dt out of range\n"); + return; + } + if (input_pin_info_host->dt_rename_mode >= N_IA_CSS_ISYS_MIPI_DT_MODE) { + IA_CSS_TRACE_0(ISYSAPI, ERROR, + "input_pin_info_host->dt_rename_mode out of range\n"); + return; + } + /* Mapped DT check if data type renaming is being used*/ + if (input_pin_info_host->dt_rename_mode == IA_CSS_ISYS_MIPI_DT_RENAMED_MODE && + input_pin_info_host->mapped_dt >= N_IA_CSS_ISYS_MIPI_DATA_TYPE) { + IA_CSS_TRACE_0(ISYSAPI, ERROR, + "input_pin_info_host->mapped_dt out of range\n"); + return; + } + input_pin_info_css->dt = input_pin_info_host->dt; + input_pin_info_css->mipi_store_mode = + input_pin_info_host->mipi_store_mode; + input_pin_info_css->bits_per_pix = + ia_css_isys_extracted_bits_per_pixel_per_mipi_data_type[ + input_pin_info_host->dt]; + if (input_pin_info_host->dt_rename_mode == IA_CSS_ISYS_MIPI_DT_RENAMED_MODE) { + input_pin_info_css->mapped_dt = input_pin_info_host->mapped_dt; + } + else { + input_pin_info_css->mapped_dt = N_IA_CSS_ISYS_MIPI_DATA_TYPE; + } +} + +STORAGE_CLASS_INLINE void isa_cfg_host_to_css( + const struct ia_css_isys_isa_cfg *isa_cfg_host, + struct ia_css_isys_isa_cfg_comm *isa_cfg_css) +{ + unsigned int i; + + for (i = 0; i < N_IA_CSS_ISYS_RESOLUTION_INFO; i++) { + resolution_host_to_css(&isa_cfg_host->isa_res[i], + &isa_cfg_css->isa_res[i]); + } + isa_cfg_css->cfg_fields = 0; + ISA_CFG_FIELD_SET(BLC_EN, isa_cfg_css->cfg_fields, + isa_cfg_host->blc_enabled ? 1 : 0); + ISA_CFG_FIELD_SET(LSC_EN, isa_cfg_css->cfg_fields, + isa_cfg_host->lsc_enabled ? 1 : 0); + ISA_CFG_FIELD_SET(DPC_EN, isa_cfg_css->cfg_fields, + isa_cfg_host->dpc_enabled ? 1 : 0); + ISA_CFG_FIELD_SET(DOWNSCALER_EN, isa_cfg_css->cfg_fields, + isa_cfg_host->downscaler_enabled ? 1 : 0); + ISA_CFG_FIELD_SET(AWB_EN, isa_cfg_css->cfg_fields, + isa_cfg_host->awb_enabled ? 1 : 0); + ISA_CFG_FIELD_SET(AF_EN, isa_cfg_css->cfg_fields, + isa_cfg_host->af_enabled ? 1 : 0); + ISA_CFG_FIELD_SET(AE_EN, isa_cfg_css->cfg_fields, + isa_cfg_host->ae_enabled ? 1 : 0); + ISA_CFG_FIELD_SET(PAF_TYPE, isa_cfg_css->cfg_fields, + isa_cfg_host->paf_type); + ISA_CFG_FIELD_SET(SEND_IRQ_STATS_READY, isa_cfg_css->cfg_fields, + isa_cfg_host->send_irq_stats_ready ? 1 : 0); + ISA_CFG_FIELD_SET(SEND_RESP_STATS_READY, isa_cfg_css->cfg_fields, + (isa_cfg_host->send_irq_stats_ready || + isa_cfg_host->send_resp_stats_ready) ? 1 : 0); +} + +STORAGE_CLASS_INLINE void cropping_host_to_css( + const struct ia_css_isys_cropping *cropping_host, + struct ia_css_isys_cropping_comm *cropping_css) +{ + cropping_css->top_offset = cropping_host->top_offset; + cropping_css->left_offset = cropping_host->left_offset; + cropping_css->bottom_offset = cropping_host->bottom_offset; + cropping_css->right_offset = cropping_host->right_offset; + +} + +STORAGE_CLASS_INLINE int stream_cfg_data_host_to_css( + const struct ia_css_isys_stream_cfg_data *stream_cfg_data_host, + struct ia_css_isys_stream_cfg_data_comm *stream_cfg_data_css) +{ + unsigned int i; + + stream_cfg_data_css->src = stream_cfg_data_host->src; + stream_cfg_data_css->vc = stream_cfg_data_host->vc; + stream_cfg_data_css->isl_use = stream_cfg_data_host->isl_use; + stream_cfg_data_css->compfmt = stream_cfg_data_host->compfmt; + stream_cfg_data_css->isa_cfg.cfg_fields = 0; + + switch (stream_cfg_data_host->isl_use) { + case IA_CSS_ISYS_USE_SINGLE_ISA: + isa_cfg_host_to_css(&stream_cfg_data_host->isa_cfg, + &stream_cfg_data_css->isa_cfg); + /* deliberate fall-through */ + case IA_CSS_ISYS_USE_SINGLE_DUAL_ISL: + for (i = 0; i < N_IA_CSS_ISYS_CROPPING_LOCATION; i++) { + cropping_host_to_css(&stream_cfg_data_host->crop[i], + &stream_cfg_data_css->crop[i]); + } + break; + case IA_CSS_ISYS_USE_NO_ISL_NO_ISA: + break; + default: + break; + } + + stream_cfg_data_css->send_irq_sof_discarded = + stream_cfg_data_host->send_irq_sof_discarded ? 1 : 0; + stream_cfg_data_css->send_irq_eof_discarded = + stream_cfg_data_host->send_irq_eof_discarded ? 1 : 0; + stream_cfg_data_css->send_resp_sof_discarded = + stream_cfg_data_host->send_irq_sof_discarded ? + 1 : stream_cfg_data_host->send_resp_sof_discarded; + stream_cfg_data_css->send_resp_eof_discarded = + stream_cfg_data_host->send_irq_eof_discarded ? + 1 : stream_cfg_data_host->send_resp_eof_discarded; + stream_cfg_data_css->nof_input_pins = + stream_cfg_data_host->nof_input_pins; + stream_cfg_data_css->nof_output_pins = + stream_cfg_data_host->nof_output_pins; + for (i = 0; i < stream_cfg_data_host->nof_input_pins; i++) { + input_pin_info_host_to_css( + &stream_cfg_data_host->input_pins[i], + &stream_cfg_data_css->input_pins[i]); + verifret(stream_cfg_data_css->input_pins[i].bits_per_pix, + EINVAL); + } + for (i = 0; i < stream_cfg_data_host->nof_output_pins; i++) { + output_pin_info_host_to_css( + &stream_cfg_data_host->output_pins[i], + &stream_cfg_data_css->output_pins[i]); + } + return 0; +} + +STORAGE_CLASS_INLINE void frame_buff_set_host_to_css( + const struct ia_css_isys_frame_buff_set *frame_buff_set_host, + struct ia_css_isys_frame_buff_set_comm *frame_buff_set_css) +{ + int i; + + for (i = 0; i < MAX_OPINS; i++) { + output_pin_payload_host_to_css( + &frame_buff_set_host->output_pins[i], + &frame_buff_set_css->output_pins[i]); + } + + param_pin_host_to_css(&frame_buff_set_host->process_group_light, + &frame_buff_set_css->process_group_light); + frame_buff_set_css->send_irq_sof = + frame_buff_set_host->send_irq_sof ? 1 : 0; + frame_buff_set_css->send_irq_eof = + frame_buff_set_host->send_irq_eof ? 1 : 0; + frame_buff_set_css->send_irq_capture_done = + (uint8_t)frame_buff_set_host->send_irq_capture_done; + frame_buff_set_css->send_irq_capture_ack = + frame_buff_set_host->send_irq_capture_ack ? 1 : 0; + frame_buff_set_css->send_resp_sof = + frame_buff_set_host->send_irq_sof ? + 1 : frame_buff_set_host->send_resp_sof; + frame_buff_set_css->send_resp_eof = + frame_buff_set_host->send_irq_eof ? + 1 : frame_buff_set_host->send_resp_eof; + frame_buff_set_css->frame_counter = + frame_buff_set_host->frame_counter; +} + +STORAGE_CLASS_INLINE void buffer_partition_host_to_css( + const struct ia_css_isys_buffer_partition *buffer_partition_host, + struct ia_css_isys_buffer_partition_comm *buffer_partition_css) +{ + int i; + + for (i = 0; i < STREAM_ID_MAX; i++) { + buffer_partition_css->num_gda_pages[i] = + buffer_partition_host->num_gda_pages[i]; + } +} + +STORAGE_CLASS_INLINE void output_pin_payload_css_to_host( + const struct ia_css_isys_output_pin_payload_comm * + output_pin_payload_css, + struct ia_css_isys_output_pin_payload *output_pin_payload_host) +{ + output_pin_payload_host->out_buf_id = + output_pin_payload_css->out_buf_id; + output_pin_payload_host->addr = output_pin_payload_css->addr; +#ifdef ENABLE_DEC400 + output_pin_payload_host->compress = output_pin_payload_css->compress; +#else + output_pin_payload_host->compress = 0; +#endif /* ENABLE_DEC400 */ +} + +STORAGE_CLASS_INLINE void param_pin_css_to_host( + const struct ia_css_isys_param_pin_comm *param_pin_css, + struct ia_css_isys_param_pin *param_pin_host) +{ + param_pin_host->param_buf_id = param_pin_css->param_buf_id; + param_pin_host->addr = param_pin_css->addr; + +} + +STORAGE_CLASS_INLINE void resp_info_css_to_host( + const struct ia_css_isys_resp_info_comm *resp_info_css, + struct ia_css_isys_resp_info *resp_info_host) +{ + resp_info_host->type = resp_info_css->type; + resp_info_host->timestamp[0] = resp_info_css->timestamp[0]; + resp_info_host->timestamp[1] = resp_info_css->timestamp[1]; + resp_info_host->stream_handle = resp_info_css->stream_handle; + resp_info_host->error = resp_info_css->error_info.error; + resp_info_host->error_details = + resp_info_css->error_info.error_details; + output_pin_payload_css_to_host( + &resp_info_css->pin, &resp_info_host->pin); + resp_info_host->pin_id = resp_info_css->pin_id; + param_pin_css_to_host(&resp_info_css->process_group_light, + &resp_info_host->process_group_light); + resp_info_host->acc_id = resp_info_css->acc_id; + resp_info_host->frame_counter = resp_info_css->frame_counter; + resp_info_host->written_direct = resp_info_css->written_direct; +} + +/* + * ia_css_isys_constr_fw_stream_cfg() + */ +int ia_css_isys_constr_fw_stream_cfg( + struct ia_css_isys_context *ctx, + const unsigned int stream_handle, + ia_css_shared_buffer_css_address *pstream_cfg_fw, + ia_css_shared_buffer *pbuf_stream_cfg_id, + const struct ia_css_isys_stream_cfg_data *stream_cfg) +{ + ia_css_shared_buffer_cpu_address stream_cfg_cpu_addr; + ia_css_shared_buffer_css_address stream_cfg_css_addr; + int buff_slot; + int retval = 0; + unsigned int wrap_compensation; + const unsigned int wrap_condition = 0xFFFFFFFF; + + verifret(ctx, EFAULT); /* Host Consistency */ + verifret(pstream_cfg_fw, EFAULT); /* Host Consistency */ + verifret(pbuf_stream_cfg_id, EFAULT); /* Host Consistency */ + verifret(stream_cfg, EFAULT); /* Host Consistency */ + + /* Host-FW Consistency */ + verifret((ctx->isys_comm_buffer_queue. + stream_cfg_queue_head[stream_handle] - + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle]) < + STREAM_CFG_BUFS_PER_MSG_QUEUE, EPROTO); + buff_slot = get_stream_cfg_buff_slot(ctx, stream_handle, + ctx->isys_comm_buffer_queue. + stream_cfg_queue_head[stream_handle] % + STREAM_CFG_BUFS_PER_MSG_QUEUE); + *pbuf_stream_cfg_id = + ctx->isys_comm_buffer_queue.pstream_cfg_buff_id[buff_slot]; + /* Host-FW Consistency */ + verifret(*pbuf_stream_cfg_id, EADDRNOTAVAIL); + + stream_cfg_cpu_addr = + ia_css_shared_buffer_cpu_map(*pbuf_stream_cfg_id); + /* Host-FW Consistency */ + verifret(stream_cfg_cpu_addr, EADDRINUSE); + + retval = stream_cfg_data_host_to_css(stream_cfg, stream_cfg_cpu_addr); + if (retval) + return retval; + + stream_cfg_cpu_addr = + ia_css_shared_buffer_cpu_unmap(*pbuf_stream_cfg_id); + /* Host Consistency */ + verifret(stream_cfg_cpu_addr, EADDRINUSE); + + stream_cfg_css_addr = + ia_css_shared_buffer_css_map(*pbuf_stream_cfg_id); + /* Host Consistency */ + verifret(stream_cfg_css_addr, EADDRINUSE); + + ia_css_shared_buffer_css_update(ctx->mmid, *pbuf_stream_cfg_id); + + *pstream_cfg_fw = stream_cfg_css_addr; + + /* + * cover head wrap around extreme case, + * in which case force tail to wrap around too + * while maintaining diff and modulo + */ + if (ctx->isys_comm_buffer_queue.stream_cfg_queue_head[stream_handle] == + wrap_condition) { + /* Value to be added to both head and tail */ + wrap_compensation = + /* + * Distance of wrap_condition to 0, + * will need to be added for wrapping around head to 0 + */ + (0 - wrap_condition) + + /* + * To force tail to also wrap around, + * since it has to happen concurrently + */ + STREAM_CFG_BUFS_PER_MSG_QUEUE + + /* To preserve the same modulo, + * since the previous will result in head modulo 0 + */ + (wrap_condition % STREAM_CFG_BUFS_PER_MSG_QUEUE); + ctx->isys_comm_buffer_queue. + stream_cfg_queue_head[stream_handle] += + wrap_compensation; + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle] += + wrap_compensation; + } + ctx->isys_comm_buffer_queue.stream_cfg_queue_head[stream_handle]++; + + return 0; +} + +/* + * ia_css_isys_constr_fw_next_frame() + */ +int ia_css_isys_constr_fw_next_frame( + struct ia_css_isys_context *ctx, + const unsigned int stream_handle, + ia_css_shared_buffer_css_address *pnext_frame_fw, + ia_css_shared_buffer *pbuf_next_frame_id, + const struct ia_css_isys_frame_buff_set *next_frame) +{ + ia_css_shared_buffer_cpu_address next_frame_cpu_addr; + ia_css_shared_buffer_css_address next_frame_css_addr; + int buff_slot; + unsigned int wrap_compensation; + const unsigned int wrap_condition = 0xFFFFFFFF; + + verifret(ctx, EFAULT); /* Host Consistency */ + verifret(pnext_frame_fw, EFAULT); /* Host Consistency */ + verifret(next_frame, EFAULT); /* Host Consistency */ + verifret(pbuf_next_frame_id, EFAULT); /* Host Consistency */ + + /* For some reason responses are not dequeued in time */ + verifret((ctx->isys_comm_buffer_queue. + next_frame_queue_head[stream_handle] - + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle]) < + NEXT_FRAME_BUFS_PER_MSG_QUEUE, EPERM); + buff_slot = get_next_frame_buff_slot(ctx, stream_handle, + ctx->isys_comm_buffer_queue. + next_frame_queue_head[stream_handle] % + NEXT_FRAME_BUFS_PER_MSG_QUEUE); + *pbuf_next_frame_id = + ctx->isys_comm_buffer_queue.pnext_frame_buff_id[buff_slot]; + /* Host-FW Consistency */ + verifret(*pbuf_next_frame_id, EADDRNOTAVAIL); + + /* map it in cpu */ + next_frame_cpu_addr = + ia_css_shared_buffer_cpu_map(*pbuf_next_frame_id); + /* Host-FW Consistency */ + verifret(next_frame_cpu_addr, EADDRINUSE); + + frame_buff_set_host_to_css(next_frame, next_frame_cpu_addr); + + /* unmap the buffer from cpu */ + next_frame_cpu_addr = + ia_css_shared_buffer_cpu_unmap(*pbuf_next_frame_id); + /* Host Consistency */ + verifret(next_frame_cpu_addr, EADDRINUSE); + + /* map it to css */ + next_frame_css_addr = + ia_css_shared_buffer_css_map(*pbuf_next_frame_id); + /* Host Consistency */ + verifret(next_frame_css_addr, EADDRINUSE); + + ia_css_shared_buffer_css_update(ctx->mmid, *pbuf_next_frame_id); + + *pnext_frame_fw = next_frame_css_addr; + + /* + * cover head wrap around extreme case, + * in which case force tail to wrap around too + * while maintaining diff and modulo + */ + if (ctx->isys_comm_buffer_queue.next_frame_queue_head[stream_handle] == + wrap_condition) { + /* Value to be added to both head and tail */ + wrap_compensation = + /* + * Distance of wrap_condition to 0, + * will need to be added for wrapping around head to 0 + */ + (0 - wrap_condition) + + /* + * To force tail to also wrap around, + * since it has to happen concurrently + */ + NEXT_FRAME_BUFS_PER_MSG_QUEUE + + /* + * To preserve the same modulo, + * since the previous will result in head modulo 0 + */ + (wrap_condition % NEXT_FRAME_BUFS_PER_MSG_QUEUE); + ctx->isys_comm_buffer_queue. + next_frame_queue_head[stream_handle] += + wrap_compensation; + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle] += + wrap_compensation; + } + ctx->isys_comm_buffer_queue.next_frame_queue_head[stream_handle]++; + + return 0; +} + +/* + * ia_css_isys_extract_fw_response() + */ +int ia_css_isys_extract_fw_response( + struct ia_css_isys_context *ctx, + const struct resp_queue_token *token, + struct ia_css_isys_resp_info *received_response) +{ + int buff_slot; + unsigned int css_address; + + verifret(ctx, EFAULT); /* Host Consistency */ + verifret(token, EFAULT); /* Host Consistency */ + verifret(received_response, EFAULT); /* Host Consistency */ + + resp_info_css_to_host(&(token->resp_info), received_response); + + switch (token->resp_info.type) { + case IA_CSS_ISYS_RESP_TYPE_STREAM_OPEN_DONE: + /* Host-FW Consistency */ + verifret((ctx->isys_comm_buffer_queue. + stream_cfg_queue_head[token->resp_info.stream_handle] - + ctx->isys_comm_buffer_queue.stream_cfg_queue_tail[ + token->resp_info.stream_handle]) > 0, EPROTO); + buff_slot = get_stream_cfg_buff_slot(ctx, + token->resp_info.stream_handle, + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[ + token->resp_info.stream_handle] % + STREAM_CFG_BUFS_PER_MSG_QUEUE); + verifret((ia_css_shared_buffer)HOST_ADDRESS( + token->resp_info.buf_id) == + ctx->isys_comm_buffer_queue. + pstream_cfg_buff_id[buff_slot], EIO); + ctx->isys_comm_buffer_queue.stream_cfg_queue_tail[ + token->resp_info.stream_handle]++; + css_address = ia_css_shared_buffer_css_unmap( + (ia_css_shared_buffer) + HOST_ADDRESS(token->resp_info.buf_id)); + verifret(css_address, EADDRINUSE); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK: + case IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK: + /* Host-FW Consistency */ + verifret((ctx->isys_comm_buffer_queue. + next_frame_queue_head[token->resp_info.stream_handle] - + ctx->isys_comm_buffer_queue.next_frame_queue_tail[ + token->resp_info.stream_handle]) > 0, EPROTO); + buff_slot = get_next_frame_buff_slot(ctx, + token->resp_info.stream_handle, + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[ + token->resp_info.stream_handle] % + NEXT_FRAME_BUFS_PER_MSG_QUEUE); + verifret((ia_css_shared_buffer)HOST_ADDRESS( + token->resp_info.buf_id) == + ctx->isys_comm_buffer_queue. + pnext_frame_buff_id[buff_slot], EIO); + ctx->isys_comm_buffer_queue.next_frame_queue_tail[ + token->resp_info.stream_handle]++; + css_address = ia_css_shared_buffer_css_unmap( + (ia_css_shared_buffer) + HOST_ADDRESS(token->resp_info.buf_id)); + verifret(css_address, EADDRINUSE); + break; + default: + break; + } + + return 0; +} + +/* + * ia_css_isys_extract_proxy_response() + */ +int ia_css_isys_extract_proxy_response( + const struct proxy_resp_queue_token *token, + struct ia_css_proxy_write_req_resp *preceived_response) +{ + verifret(token, EFAULT); /* Host Consistency */ + verifret(preceived_response, EFAULT); /* Host Consistency */ + + preceived_response->request_id = token->proxy_resp_info.request_id; + preceived_response->error = token->proxy_resp_info.error_info.error; + preceived_response->error_details = + token->proxy_resp_info.error_info.error_details; + + return 0; +} + +/* + * ia_css_isys_prepare_param() + */ +int ia_css_isys_prepare_param( + struct ia_css_isys_fw_config *isys_fw_cfg, + const struct ia_css_isys_buffer_partition *buf_partition, + const unsigned int num_send_queues[], + const unsigned int num_recv_queues[]) +{ + unsigned int i; + + verifret(isys_fw_cfg, EFAULT); /* Host Consistency */ + verifret(buf_partition, EFAULT); /* Host Consistency */ + verifret(num_send_queues, EFAULT); /* Host Consistency */ + verifret(num_recv_queues, EFAULT); /* Host Consistency */ + + buffer_partition_host_to_css(buf_partition, + &isys_fw_cfg->buffer_partition); + for (i = 0; i < N_IA_CSS_ISYS_QUEUE_TYPE; i++) { + isys_fw_cfg->num_send_queues[i] = num_send_queues[i]; + isys_fw_cfg->num_recv_queues[i] = num_recv_queues[i]; + } + + return 0; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_private.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_private.h new file mode 100644 index 0000000000000..d53fa53c9a818 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_private.h @@ -0,0 +1,156 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYS_PRIVATE_H +#define __IA_CSS_ISYS_PRIVATE_H + + +#include "type_support.h" +/* Needed for the structure member ia_css_sys_context * sys */ +#include "ia_css_syscom.h" +/* Needed for the definitions of STREAM_ID_MAX */ +#include "ia_css_isysapi.h" +/* The following is needed for the function arguments */ +#include "ia_css_isys_fw_bridged_types.h" + +#include "ia_css_shared_buffer.h" + + +/* Set for the respective error handling */ +#define VERIFY_DEVSTATE 1 + +#if (VERIFY_DEVSTATE != 0) +/** + * enum device_state + */ +enum device_state { + IA_CSS_ISYS_DEVICE_STATE_IDLE = 0, + IA_CSS_ISYS_DEVICE_STATE_CONFIGURED = 1, + IA_CSS_ISYS_DEVICE_STATE_READY = 2 +}; +#endif /* VERIFY_DEVSTATE */ + +/** + * enum stream_state + */ +enum stream_state { + IA_CSS_ISYS_STREAM_STATE_IDLE = 0, + IA_CSS_ISYS_STREAM_STATE_OPENED = 1, + IA_CSS_ISYS_STREAM_STATE_STARTED = 2 +}; + + +/** + * struct ia_css_isys_comm_buffer_queue + */ +struct ia_css_isys_comm_buffer_queue { + ia_css_shared_buffer *pstream_cfg_buff_id; + unsigned int stream_cfg_queue_head[STREAM_ID_MAX]; + unsigned int stream_cfg_queue_tail[STREAM_ID_MAX]; + ia_css_shared_buffer *pnext_frame_buff_id; + unsigned int next_frame_queue_head[STREAM_ID_MAX]; + unsigned int next_frame_queue_tail[STREAM_ID_MAX]; +}; + + +/** + * struct ia_css_isys_context + */ +struct ia_css_isys_context { + struct ia_css_syscom_context *sys; + /* add here any isys specific members that need + to be passed into the isys api functions as input */ + unsigned int ssid; + unsigned int mmid; + unsigned int num_send_queues[N_IA_CSS_ISYS_QUEUE_TYPE]; + unsigned int num_recv_queues[N_IA_CSS_ISYS_QUEUE_TYPE]; + unsigned int send_queue_size[N_IA_CSS_ISYS_QUEUE_TYPE]; + struct ia_css_isys_comm_buffer_queue isys_comm_buffer_queue; + unsigned int stream_nof_output_pins[STREAM_ID_MAX]; +#if (VERIFY_DEVSTATE != 0) + enum device_state dev_state; +#endif /* VERIFY_DEVSTATE */ + enum stream_state stream_state_array[STREAM_ID_MAX]; + /* If true, this context is created based on secure config */ + bool secure; +}; + + +/** + * ia_css_isys_constr_comm_buff_queue() + */ +extern int ia_css_isys_constr_comm_buff_queue( + struct ia_css_isys_context *ctx +); + +/** + * ia_css_isys_force_unmap_comm_buff_queue() + */ +extern int ia_css_isys_force_unmap_comm_buff_queue( + struct ia_css_isys_context *ctx +); + +/** + * ia_css_isys_destr_comm_buff_queue() + */ +extern int ia_css_isys_destr_comm_buff_queue( + struct ia_css_isys_context *ctx +); + +/** + * ia_css_isys_constr_fw_stream_cfg() + */ +extern int ia_css_isys_constr_fw_stream_cfg( + struct ia_css_isys_context *ctx, + const unsigned int stream_handle, + ia_css_shared_buffer_css_address *pstream_cfg_fw, + ia_css_shared_buffer *pbuf_stream_cfg_id, + const struct ia_css_isys_stream_cfg_data *stream_cfg +); + +/** + * ia_css_isys_constr_fw_next_frame() + */ +extern int ia_css_isys_constr_fw_next_frame( + struct ia_css_isys_context *ctx, + const unsigned int stream_handle, + ia_css_shared_buffer_css_address *pnext_frame_fw, + ia_css_shared_buffer *pbuf_next_frame_id, + const struct ia_css_isys_frame_buff_set *next_frame +); + +/** + * ia_css_isys_extract_fw_response() + */ +extern int ia_css_isys_extract_fw_response( + struct ia_css_isys_context *ctx, + const struct resp_queue_token *token, + struct ia_css_isys_resp_info *received_response +); +extern int ia_css_isys_extract_proxy_response( + const struct proxy_resp_queue_token *token, + struct ia_css_proxy_write_req_resp *received_response +); + +/** + * ia_css_isys_prepare_param() + */ +extern int ia_css_isys_prepare_param( + struct ia_css_isys_fw_config *isys_fw_cfg, + const struct ia_css_isys_buffer_partition *buf_partition, + const unsigned int num_send_queues[], + const unsigned int num_recv_queues[] +); + +#endif /* __IA_CSS_ISYS_PRIVATE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_public.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_public.c new file mode 100644 index 0000000000000..478d49f51cdd8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_public.c @@ -0,0 +1,1283 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/* TODO: REMOVE --> START IF EXTERNALLY INCLUDED/DEFINED */ +/* These are temporary, the correct numbers need to be inserted/linked */ +/* Until this happens, the following definitions stay here */ +#define INPUT_MIN_WIDTH 1 +#define INPUT_MAX_WIDTH 16384 +#define INPUT_MIN_HEIGHT 1 +#define INPUT_MAX_HEIGHT 16384 +#define OUTPUT_MIN_WIDTH 1 +#define OUTPUT_MAX_WIDTH 16384 +#define OUTPUT_MIN_HEIGHT 1 +#define OUTPUT_MAX_HEIGHT 16384 +/* REMOVE --> END IF EXTERNALLY INCLUDED/DEFINED */ + + +/* The FW bridged types are included through the following */ +#include "ia_css_isysapi.h" +/* The following provides the isys-sys context */ +#include "ia_css_isys_private.h" +/* The following provides the sys layer functions */ +#include "ia_css_syscom.h" + +#include "ia_css_cell.h" +#include "ipu_device_cell_properties.h" + +/* The following provides the tracing functions */ +#include "ia_css_isysapi_trace.h" +#include "ia_css_isys_public_trace.h" + +#include "ia_css_shared_buffer_cpu.h" +/* The following is needed for the + * stddef.h (NULL), + * limits.h (CHAR_BIT definition). + */ +#include "type_support.h" +#include "error_support.h" +#include "cpu_mem_support.h" +#include "math_support.h" +#include "misc_support.h" +#include "system_const.h" + +static int isys_context_create( + HANDLE * context, + const struct ia_css_isys_device_cfg_data *config); +static int isys_start_server( + const struct ia_css_isys_device_cfg_data *config); + +static int isys_context_create( + HANDLE * context, + const struct ia_css_isys_device_cfg_data *config) +{ + int retval; + unsigned int stream_handle; + struct ia_css_isys_context *ctx; + struct ia_css_syscom_config sys; + /* Needs to be updated in case new type of queues are introduced */ + struct ia_css_syscom_queue_config input_queue_cfg[N_MAX_SEND_QUEUES]; + /* Needs to be updated in case new type of queues are introduced */ + struct ia_css_syscom_queue_config output_queue_cfg[N_MAX_RECV_QUEUES]; + struct ia_css_isys_fw_config isys_fw_cfg; + unsigned int proxy_write_queue_size; + unsigned int ssid; + unsigned int mmid; + unsigned int i; + + /* Printing "ENTRY isys_context_create" + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY isys_context_create\n"); + + verifret(config != NULL, EFAULT); + + /* Printing configuration information if tracing level = VERBOSE. */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_device_config_data(config); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + + /* Runtime check for # of send and recv MSG queues */ + verifret(config->driver_sys.num_send_queues <= + N_MAX_MSG_SEND_QUEUES/*=STREAM_ID_MAX*/, EINVAL); + verifret(config->driver_sys.num_recv_queues <= + N_MAX_MSG_RECV_QUEUES, EINVAL); + + /* Runtime check for send and recv MSG queue sizes */ + verifret(config->driver_sys.send_queue_size <= MAX_QUEUE_SIZE, EINVAL); + verifret(config->driver_sys.recv_queue_size <= MAX_QUEUE_SIZE, EINVAL); + + /* TODO: return an error in case MAX_QUEUE_SIZE is exceeded + * (Similar to runtime check on MSG queue sizes) + */ + proxy_write_queue_size = uclip( + config->driver_proxy.proxy_write_queue_size, + MIN_QUEUE_SIZE, + MAX_QUEUE_SIZE); + + ctx = (struct ia_css_isys_context *) + ia_css_cpu_mem_alloc(sizeof(struct ia_css_isys_context)); + verifret(ctx != NULL, EFAULT); + *context = (HANDLE)ctx; + + /* Copy to the sys config the driver_sys config, + * and add the internal info (token sizes) + */ + ssid = config->driver_sys.ssid; + mmid = config->driver_sys.mmid; + sys.ssid = ssid; + sys.mmid = mmid; + + ctx->secure = config->secure; + /* Following operations need to be aligned with + * "enum ia_css_isys_queue_type" list (list of queue types) + */ + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY] = + N_MAX_PROXY_SEND_QUEUES; + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_DEV] = + N_MAX_DEV_SEND_QUEUES; + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG] = + config->driver_sys.num_send_queues; + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY] = + N_MAX_PROXY_RECV_QUEUES; + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_DEV] = + 0; /* Common msg/dev return queue */ + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG] = + config->driver_sys.num_recv_queues; + + sys.num_input_queues = + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY] + + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_DEV] + + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG]; + sys.num_output_queues = + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY] + + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_DEV] + + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG]; + + sys.input = input_queue_cfg; + for (i = 0; + i < ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY]; + i++) { + input_queue_cfg[BASE_PROXY_SEND_QUEUES + i].queue_size = + proxy_write_queue_size; + input_queue_cfg[BASE_PROXY_SEND_QUEUES + i].token_size = + sizeof(struct proxy_send_queue_token); + } + for (i = 0; + i < ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_DEV]; + i++) { + input_queue_cfg[BASE_DEV_SEND_QUEUES + i].queue_size = + DEV_SEND_QUEUE_SIZE; + input_queue_cfg[BASE_DEV_SEND_QUEUES + i].token_size = + sizeof(struct send_queue_token); + } + for (i = 0; + i < ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG]; + i++) { + input_queue_cfg[BASE_MSG_SEND_QUEUES + i].queue_size = + config->driver_sys.send_queue_size; + input_queue_cfg[BASE_MSG_SEND_QUEUES + i].token_size = + sizeof(struct send_queue_token); + } + + ctx->send_queue_size[IA_CSS_ISYS_QUEUE_TYPE_PROXY] = + proxy_write_queue_size; + ctx->send_queue_size[IA_CSS_ISYS_QUEUE_TYPE_DEV] = + DEV_SEND_QUEUE_SIZE; + ctx->send_queue_size[IA_CSS_ISYS_QUEUE_TYPE_MSG] = + config->driver_sys.send_queue_size; + + sys.output = output_queue_cfg; + for (i = 0; + i < ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY]; + i++) { + output_queue_cfg[BASE_PROXY_RECV_QUEUES + i].queue_size = + proxy_write_queue_size; + output_queue_cfg[BASE_PROXY_RECV_QUEUES + i].token_size = + sizeof(struct proxy_resp_queue_token); + } + /* There is no recv DEV queue */ + for (i = 0; + i < ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG]; + i++) { + output_queue_cfg[BASE_MSG_RECV_QUEUES + i].queue_size = + config->driver_sys.recv_queue_size; + output_queue_cfg[BASE_MSG_RECV_QUEUES + i].token_size = + sizeof(struct resp_queue_token); + } + + sys.regs_addr = ipu_device_cell_memory_address(SPC0, + IPU_DEVICE_SP2600_CONTROL_REGS); + sys.dmem_addr = ipu_device_cell_memory_address(SPC0, + IPU_DEVICE_SP2600_CONTROL_DMEM); + +#if HAS_DUAL_CMD_CTX_SUPPORT + sys.dmem_addr += config->secure ? REGMEM_SECURE_OFFSET : REGMEM_OFFSET; +#endif + + /* Prepare the param */ + ia_css_isys_prepare_param( + &isys_fw_cfg, + &config->buffer_partition, + ctx->num_send_queues, + ctx->num_recv_queues); + + /* parameter struct to be passed to fw */ + sys.specific_addr = &isys_fw_cfg; + /* parameters size */ + sys.specific_size = sizeof(isys_fw_cfg); + sys.secure = config->secure; + if (config->secure) { + sys.vtl0_addr_mask = config->vtl0_addr_mask; + } + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "isys_context_create || call ia_css_syscom_open()\n"); + /* The allocation of the queues will take place within this call and + * info will be stored in sys_context output + */ + ctx->sys = ia_css_syscom_open(&sys, NULL); + if (!ctx->sys) { + ia_css_cpu_mem_free(ctx); + return -EFAULT; + } + + /* Update the context with the id's */ + ctx->ssid = ssid; + ctx->mmid = mmid; + + for (stream_handle = 0; stream_handle < STREAM_ID_MAX; + stream_handle++) { + ctx->stream_state_array[stream_handle] = + IA_CSS_ISYS_STREAM_STATE_IDLE; + } + + retval = ia_css_isys_constr_comm_buff_queue(ctx); + if (retval) { + ia_css_syscom_close(ctx->sys); + ia_css_syscom_release(ctx->sys, 1); + ia_css_cpu_mem_free(ctx); + return retval; + } + +#if (VERIFY_DEVSTATE != 0) + ctx->dev_state = IA_CSS_ISYS_DEVICE_STATE_CONFIGURED; +#endif /* VERIFY_DEVSTATE */ + + /* Printing device configuration and device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + + /* Printing "LEAVE isys_context_create" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE isys_context_create\n"); + return 0; +} + +static int isys_start_server( + const struct ia_css_isys_device_cfg_data *config) +{ + verifret(config != NULL, EFAULT); + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "isys_start_server || start SPC\n"); + /* The firmware is loaded and syscom is ready, start the SPC */ + ia_css_cell_start_prefetch(config->driver_sys.ssid, SPC0, + config->driver_sys.icache_prefetch); + IA_CSS_TRACE_1(ISYSAPI, VERBOSE, "SPC prefetch: %d\n", + config->driver_sys.icache_prefetch); + return 0; +} + +/** + * ia_css_isys_device_open() - open and configure ISYS device + */ +#if HAS_DUAL_CMD_CTX_SUPPORT +int ia_css_isys_context_create( + HANDLE * context, + const struct ia_css_isys_device_cfg_data *config) +{ + return isys_context_create(context, config); +} + +/* push context information to DMEM for FW to access */ +int ia_css_isys_context_store_dmem( + const HANDLE * context, + const struct ia_css_isys_device_cfg_data *config) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *) *context; + + return ia_css_syscom_store_dmem(ctx->sys, config->driver_sys.ssid, config->vtl0_addr_mask); +} + +bool ia_css_isys_ab_spc_ready( + HANDLE * context) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *) *context; + + return ia_css_syscom_is_ab_spc_ready(ctx->sys); +} + +int ia_css_isys_device_open( + const struct ia_css_isys_device_cfg_data *config) +{ + return isys_start_server(config); +} +#else +int ia_css_isys_device_open( + HANDLE * context, + const struct ia_css_isys_device_cfg_data *config) +{ + int retval; + + retval = isys_context_create(context, config); + if (retval) { + IA_CSS_TRACE_1(ISYSAPI, ERROR, "ia_css_isys_device_open() failed (retval %d)\n", retval); + return retval; + } + + isys_start_server(config); + return 0; +} +#endif + +/** + * ia_css_isys_device_open_ready() - open and configure ISYS device + */ +int ia_css_isys_device_open_ready(HANDLE context) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + unsigned int i; + int retval; + + /* Printing "ENTRY IA_CSS_ISYS_DEVICE_OPEN" + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_DEVICE_OPEN\n"); + + verifret(ctx, EFAULT); + + /* Printing device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_CONFIGURED, EPERM); +#endif /* VERIFY_DEVSTATE */ + + /* Open the ports for all the non-MSG send queues (PROXY + DEV) */ + for (i = 0; + i < ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY] + + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_DEV]; + i++) { + retval = ia_css_syscom_send_port_open(ctx->sys, i); + verifret(retval != FW_ERROR_BUSY, EBUSY); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval == 0, EINVAL); + } + + /* Open the ports for all the recv queues (PROXY + MSG) */ + for (i = 0; + i < (ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY] + + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG]); + i++) { + retval = ia_css_syscom_recv_port_open(ctx->sys, i); + verifret(retval != FW_ERROR_BUSY, EBUSY); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval == 0, EINVAL); + } + +#if (VERIFY_DEVSTATE != 0) + ctx->dev_state = IA_CSS_ISYS_DEVICE_STATE_READY; +#endif /* VERIFY_DEVSTATE */ + + /* Printing "LEAVE IA_CSS_ISYS_DEVICE_OPEN_READY" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "LEAVE IA_CSS_ISYS_DEVICE_OPEN_READY\n"); + return 0; +} + + + /** + * ia_css_isys_stream_open() - open and configure a virtual stream + */ +int ia_css_isys_stream_open( + HANDLE context, + const unsigned int stream_handle, + const struct ia_css_isys_stream_cfg_data *stream_cfg) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + unsigned int i; + int retval = 0; + int packets; + struct send_queue_token token; + ia_css_shared_buffer_css_address stream_cfg_fw = 0; + ia_css_shared_buffer buf_stream_cfg_id = (ia_css_shared_buffer)NULL; + /* Printing "ENTRY IA_CSS_ISYS_STREAM_OPEN" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_STREAM_OPEN\n"); + + verifret(ctx, EFAULT); + + /* Printing stream configuration and device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); + print_stream_config_data(stream_cfg); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + verifret(stream_handle < STREAM_ID_MAX, EINVAL); + verifret(stream_handle < + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG], EINVAL); + + verifret(ctx->stream_state_array[stream_handle] == + IA_CSS_ISYS_STREAM_STATE_IDLE, EPERM); + + verifret(stream_cfg != NULL, EFAULT); + verifret(stream_cfg->src < N_IA_CSS_ISYS_STREAM_SRC, EINVAL); + verifret(stream_cfg->vc < N_IA_CSS_ISYS_MIPI_VC, EINVAL); + verifret(stream_cfg->isl_use < N_IA_CSS_ISYS_USE, EINVAL); + if (stream_cfg->isl_use != IA_CSS_ISYS_USE_NO_ISL_NO_ISA) { + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].bottom_offset >= + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].top_offset + + OUTPUT_MIN_HEIGHT, EINVAL); + + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].bottom_offset <= + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].top_offset + + OUTPUT_MAX_HEIGHT, EINVAL); + + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].right_offset >= + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].left_offset + + OUTPUT_MIN_WIDTH, EINVAL); + + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].right_offset <= + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].left_offset + + OUTPUT_MAX_WIDTH, EINVAL); + } + verifret(stream_cfg->nof_input_pins <= MAX_IPINS, EINVAL); + verifret(stream_cfg->nof_output_pins <= MAX_OPINS, EINVAL); + for (i = 0; i < stream_cfg->nof_input_pins; i++) { + /* Verify input pin */ + verifret( + stream_cfg->input_pins[i].input_res.width >= + INPUT_MIN_WIDTH && + stream_cfg->input_pins[i].input_res.width <= + INPUT_MAX_WIDTH && + stream_cfg->input_pins[i].input_res.height >= + INPUT_MIN_HEIGHT && + stream_cfg->input_pins[i].input_res.height <= + INPUT_MAX_HEIGHT, EINVAL); + verifret(stream_cfg->input_pins[i].dt < + N_IA_CSS_ISYS_MIPI_DATA_TYPE, EINVAL); +/* #ifdef To be removed when driver inits the value */ +#ifdef DRIVER_INIT_MIPI_STORE_MODE + verifret(stream_cfg->input_pins[i].mipi_store_mode < + N_IA_CSS_ISYS_MIPI_STORE_MODE, EINVAL); +#endif /* DRIVER_INIT_MIPI_STORE_MODE */ + } + for (i = 0; i < stream_cfg->nof_output_pins; i++) { + /* Verify output pin */ + verifret(stream_cfg->output_pins[i].input_pin_id < + stream_cfg->nof_input_pins, EINVAL); + verifret(stream_cfg->output_pins[i].pt < + N_IA_CSS_ISYS_PIN_TYPE, EINVAL); + verifret(stream_cfg->output_pins[i].ft < + N_IA_CSS_ISYS_FRAME_FORMAT, EINVAL); + /* Verify that the stride is aligned to 64 bytes: HW spec */ + verifret(stream_cfg->output_pins[i].stride%(XMEM_WIDTH/8) == + 0, EINVAL); + verifret((stream_cfg->output_pins[i].output_res.width >= + OUTPUT_MIN_WIDTH) && + (stream_cfg->output_pins[i].output_res.width <= + OUTPUT_MAX_WIDTH) && + (stream_cfg->output_pins[i].output_res.height >= + OUTPUT_MIN_HEIGHT) && + (stream_cfg->output_pins[i].output_res.height <= + OUTPUT_MAX_HEIGHT), EINVAL); + verifret((stream_cfg->output_pins[i].pt == + IA_CSS_ISYS_PIN_TYPE_MIPI) || + (stream_cfg-> + input_pins[stream_cfg->output_pins[i].input_pin_id].mipi_store_mode != + IA_CSS_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER), EINVAL); + if (stream_cfg->isl_use == IA_CSS_ISYS_USE_SINGLE_ISA) { + switch (stream_cfg->output_pins[i].pt) { + case IA_CSS_ISYS_PIN_TYPE_RAW_NS: + /* Ensure the PIFCONV cropped resolution + * matches the RAW_NS output pin resolution + */ + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_NONSCALED].bottom_offset == + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_NONSCALED].top_offset + + (int)stream_cfg->output_pins[i].output_res.height, EINVAL); + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_NONSCALED].right_offset == + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_NONSCALED].left_offset + + (int)stream_cfg->output_pins[i].output_res.width, EINVAL); + /* Ensure the ISAPF cropped resolution matches + * the Non-scaled ISA output resolution before + * the PIFCONV cropping, since nothing can + * modify the resolution in that part of + * the pipe + */ + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].bottom_offset == + stream_cfg->crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].top_offset + + (int)stream_cfg-> + isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_NONSCALED].height, + EINVAL); + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].right_offset == + stream_cfg->crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].left_offset + + (int)stream_cfg-> + isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_NONSCALED].width, + EINVAL); + /* Ensure the Non-scaled ISA output resolution + * before the PIFCONV cropping bounds the + * RAW_NS pin output resolution since padding + * is not supported + */ + verifret(stream_cfg-> +isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_NONSCALED].height >= +stream_cfg->output_pins[i].output_res.height, EINVAL); + verifret(stream_cfg-> +isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_NONSCALED].width >= +stream_cfg->output_pins[i].output_res.width, EINVAL); + break; + case IA_CSS_ISYS_PIN_TYPE_RAW_S: + /* Ensure the ScaledPIFCONV cropped resolution + * matches the RAW_S output pin resolution + */ + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_SCALED].bottom_offset == + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_SCALED].top_offset + + (int)stream_cfg->output_pins[i].output_res.height, EINVAL); + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_SCALED].right_offset == + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_SCALED].left_offset + + (int)stream_cfg->output_pins[i].output_res.width, EINVAL); + /* Ensure the ISAPF cropped resolution bounds + * the Scaled ISA output resolution before the + * ScaledPIFCONV cropping, since only IDS can + * modify the resolution, and this only to + * make it smaller + */ + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].bottom_offset >= + stream_cfg->crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].top_offset + + (int)stream_cfg-> + isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_SCALED].height, + EINVAL); + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].right_offset >= + stream_cfg->crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].left_offset + + (int)stream_cfg-> + isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_SCALED].width, + EINVAL); + /* Ensure the Scaled ISA output resolution + * before the ScaledPIFCONV cropping bounds + * the RAW_S pin output resolution since + * padding is not supported + */ + verifret(stream_cfg-> + isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_SCALED].height >= + stream_cfg->output_pins[i].output_res.height, EINVAL); + verifret(stream_cfg-> + isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_SCALED].width >= + stream_cfg->output_pins[i].output_res.width, EINVAL); + break; + default: + break; + } + } + } + + /* open 1 send queue/stream and a single receive queue + * if not existing + */ + retval = ia_css_syscom_send_port_open(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle)); + verifret(retval != FW_ERROR_BUSY, EBUSY); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval == 0, EINVAL); + + packets = ia_css_syscom_send_port_available(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle)); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + token.send_type = IA_CSS_ISYS_SEND_TYPE_STREAM_OPEN; + retval = ia_css_isys_constr_fw_stream_cfg(ctx, stream_handle, + &stream_cfg_fw, &buf_stream_cfg_id, stream_cfg); + verifret(retval == 0, retval); + token.payload = stream_cfg_fw; + token.buf_handle = HOST_ADDRESS(buf_stream_cfg_id); + retval = ia_css_syscom_send_port_transfer(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle), &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + ctx->stream_nof_output_pins[stream_handle] = + stream_cfg->nof_output_pins; + ctx->stream_state_array[stream_handle] = + IA_CSS_ISYS_STREAM_STATE_OPENED; + + /* Printing "LEAVE IA_CSS_ISYS_STREAM_OPEN" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE IA_CSS_ISYS_STREAM_OPEN\n"); + + return 0; +} + + +/** + * ia_css_isys_stream_close() - close virtual stream + */ +int ia_css_isys_stream_close( + HANDLE context, + const unsigned int stream_handle) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + int retval = 0; + int packets; + struct send_queue_token token; + + /* Printing "LEAVE IA_CSS_ISYS_STREAM_CLOSE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_STREAM_CLOSE\n"); + + verifret(ctx, EFAULT); + + /* Printing device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + verifret(stream_handle < STREAM_ID_MAX, EINVAL); + verifret(stream_handle < + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG], EINVAL); + + verifret(ctx->stream_state_array[stream_handle] == + IA_CSS_ISYS_STREAM_STATE_OPENED, EPERM); + + packets = ia_css_syscom_send_port_available(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle)); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + token.send_type = IA_CSS_ISYS_SEND_TYPE_STREAM_CLOSE; + token.stream_id = stream_handle; + token.payload = 0; + token.buf_handle = 0; + retval = ia_css_syscom_send_port_transfer(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle), &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + /* close 1 send queue/stream and the single receive queue + * if none is using it + */ + retval = ia_css_syscom_send_port_close(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle)); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval == 0, EINVAL); + + ctx->stream_state_array[stream_handle] = IA_CSS_ISYS_STREAM_STATE_IDLE; + /* Printing "LEAVE IA_CSS_ISYS_STREAM_CLOSE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE IA_CSS_ISYS_STREAM_CLOSE\n"); + + return 0; +} + + +/** + * ia_css_isys_stream_start() - starts handling a mipi virtual stream + */ +int ia_css_isys_stream_start( + HANDLE context, + const unsigned int stream_handle, + const struct ia_css_isys_frame_buff_set *next_frame) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + int retval = 0; + int packets; + struct send_queue_token token; + ia_css_shared_buffer_css_address next_frame_fw = 0; + ia_css_shared_buffer buf_next_frame_id = (ia_css_shared_buffer)NULL; + + /* Printing "ENTRY IA_CSS_ISYS_STREAM_START" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_STREAM_START\n"); + + verifret(ctx, EFAULT); + + /* Printing frame configuration and device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); + print_isys_frame_buff_set(next_frame, + ctx->stream_nof_output_pins[stream_handle]); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + verifret(stream_handle < STREAM_ID_MAX, EINVAL); + verifret(stream_handle < + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG], EINVAL); + + verifret(ctx->stream_state_array[stream_handle] == + IA_CSS_ISYS_STREAM_STATE_OPENED, EPERM); + + packets = ia_css_syscom_send_port_available(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle)); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + if (next_frame != NULL) { + token.send_type = + IA_CSS_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE; + retval = ia_css_isys_constr_fw_next_frame(ctx, stream_handle, + &next_frame_fw, &buf_next_frame_id, next_frame); + verifret(retval == 0, retval); + token.payload = next_frame_fw; + token.buf_handle = HOST_ADDRESS(buf_next_frame_id); + } else { + token.send_type = IA_CSS_ISYS_SEND_TYPE_STREAM_START; + token.payload = 0; + token.buf_handle = 0; + } + retval = ia_css_syscom_send_port_transfer(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle), &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + ctx->stream_state_array[stream_handle] = + IA_CSS_ISYS_STREAM_STATE_STARTED; + /* Printing "LEAVE IA_CSS_ISYS_STREAM_START" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE IA_CSS_ISYS_STREAM_START\n"); + + return 0; +} + + +/** + * ia_css_isys_stream_stop() - Stops a mipi virtual stream + */ +int ia_css_isys_stream_stop( + HANDLE context, + const unsigned int stream_handle) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + int retval = 0; + int packets; + struct send_queue_token token; + + /* Printing "ENTRY IA_CSS_ISYS_STREAM_STOP" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_STREAM_STOP\n"); + + verifret(ctx, EFAULT); + + /* Printing device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + verifret(stream_handle < STREAM_ID_MAX, EINVAL); + verifret(stream_handle < + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG], EINVAL); + + verifret(ctx->stream_state_array[stream_handle] == + IA_CSS_ISYS_STREAM_STATE_STARTED, EPERM); + + packets = ia_css_syscom_send_port_available(ctx->sys, + (BASE_DEV_SEND_QUEUES)); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + token.send_type = IA_CSS_ISYS_SEND_TYPE_STREAM_STOP; + token.stream_id = stream_handle; + token.payload = 0; + token.buf_handle = 0; + retval = ia_css_syscom_send_port_transfer(ctx->sys, + (BASE_DEV_SEND_QUEUES), &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + ctx->stream_state_array[stream_handle] = + IA_CSS_ISYS_STREAM_STATE_OPENED; + + /* Printing "LEAVE IA_CSS_ISYS_STREAM_STOP" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE IA_CSS_ISYS_STREAM_STOP\n"); + + return 0; +} + + +/** + * ia_css_isys_stream_flush() - stops a mipi virtual stream but + * completes processing cmd backlog + */ +int ia_css_isys_stream_flush( + HANDLE context, + const unsigned int stream_handle) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + int retval = 0; + int packets; + struct send_queue_token token; + + /* Printing "ENTRY IA_CSS_ISYS_STREAM_FLUSH" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_STREAM_FLUSH\n"); + + verifret(ctx, EFAULT); + + /* Printing device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + verifret(stream_handle < STREAM_ID_MAX, EINVAL); + verifret(stream_handle < + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG], EINVAL); + + verifret(ctx->stream_state_array[stream_handle] == + IA_CSS_ISYS_STREAM_STATE_STARTED, EPERM); + + packets = ia_css_syscom_send_port_available(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle)); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + token.send_type = IA_CSS_ISYS_SEND_TYPE_STREAM_FLUSH; + token.payload = 0; + token.buf_handle = 0; + retval = ia_css_syscom_send_port_transfer(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle), &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + ctx->stream_state_array[stream_handle] = + IA_CSS_ISYS_STREAM_STATE_OPENED; + + /* Printing "LEAVE IA_CSS_ISYS_STREAM_FLUSH" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE IA_CSS_ISYS_STREAM_FLUSH\n"); + + return 0; +} + + +/** + * ia_css_isys_stream_capture_indication() + * - captures "next frame" on stream_handle + */ +int ia_css_isys_stream_capture_indication( + HANDLE context, + const unsigned int stream_handle, + const struct ia_css_isys_frame_buff_set *next_frame) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + int retval = 0; + int packets; + struct send_queue_token token; + ia_css_shared_buffer_css_address next_frame_fw = 0; + ia_css_shared_buffer buf_next_frame_id = (ia_css_shared_buffer)NULL; + + /* Printing "ENTRY IA_CSS_ISYS_STREAM_CAPTURE_INDICATION" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "ENTRY IA_CSS_ISYS_STREAM_CAPTURE_INDICATION\n"); + + verifret(ctx, EFAULT); + + /* Printing frame configuration and device handle context information + *if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); + print_isys_frame_buff_set(next_frame, + ctx->stream_nof_output_pins[stream_handle]); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + verifret(stream_handle < STREAM_ID_MAX, EINVAL); + verifret(stream_handle < + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG], EINVAL); + verifret(ctx->stream_state_array[stream_handle] == + IA_CSS_ISYS_STREAM_STATE_STARTED, EPERM); + verifret(next_frame != NULL, EFAULT); + + packets = ia_css_syscom_send_port_available(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle)); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + { + token.send_type = IA_CSS_ISYS_SEND_TYPE_STREAM_CAPTURE; + retval = ia_css_isys_constr_fw_next_frame(ctx, stream_handle, + &next_frame_fw, &buf_next_frame_id, next_frame); + verifret(retval == 0, retval); + token.payload = next_frame_fw; + token.buf_handle = HOST_ADDRESS(buf_next_frame_id); + } + retval = ia_css_syscom_send_port_transfer(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle), &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + /* Printing "LEAVE IA_CSS_ISYS_STREAM_CAPTURE_INDICATION" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "LEAVE IA_CSS_ISYS_STREAM_CAPTURE_INDICATION\n"); + + return 0; +} + + +/** + * ia_css_isys_stream_handle_response() - handle ISYS responses + */ +int ia_css_isys_stream_handle_response( + HANDLE context, + struct ia_css_isys_resp_info *received_response) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + int retval = 0; + int packets; + struct resp_queue_token token; + + /* Printing "ENTRY IA_CSS_ISYS_STREAM_HANDLE_RESPONSE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "ENTRY IA_CSS_ISYS_STREAM_HANDLE_RESPONSE\n"); + + verifret(ctx, EFAULT); + + /* Printing device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + verifret(received_response != NULL, EFAULT); + + packets = ia_css_syscom_recv_port_available( + ctx->sys, BASE_MSG_RECV_QUEUES); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + + retval = ia_css_syscom_recv_port_transfer( + ctx->sys, BASE_MSG_RECV_QUEUES, &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + retval = ia_css_isys_extract_fw_response( + ctx, &token, received_response); + verifret(retval == 0, retval); + + /* Printing received response information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_isys_resp_info(received_response); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + + verifret(received_response->type < N_IA_CSS_ISYS_RESP_TYPE, EINVAL); + verifret(received_response->stream_handle < STREAM_ID_MAX, EINVAL); + + if (received_response->type == IA_CSS_ISYS_RESP_TYPE_PIN_DATA_READY || + received_response->type == IA_CSS_ISYS_RESP_TYPE_PIN_DATA_WATERMARK || + received_response->type == IA_CSS_ISYS_RESP_TYPE_PIN_DATA_SKIPPED) { + verifret(received_response->pin.addr != 0, EFAULT); + verifret(received_response->pin.out_buf_id != 0, EFAULT); + verifret(received_response->pin_id < + ctx->stream_nof_output_pins[received_response->stream_handle], + EINVAL); + } + + /* Printing "LEAVE IA_CSS_ISYS_STREAM_HANDLE_RESPONSE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "LEAVE IA_CSS_ISYS_STREAM_HANDLE_RESPONSE\n"); + + return 0; +} + + +/** + * ia_css_isys_device_close() - close ISYS device + */ +static int isys_context_destroy(HANDLE context) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + unsigned int stream_handle; + unsigned int queue_id; + unsigned int nof_recv_queues; + int retval = 0; + + /* Printing "ENTRY IA_CSS_ISYS_DEVICE_CLOSE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY isys_context_destroy\n"); + + verifret(ctx, EFAULT); + + /* Printing device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + nof_recv_queues = ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG] + + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY]; + /* Close the ports for all the recv queues (MSG and PROXY) */ + for (queue_id = 0; queue_id < nof_recv_queues; queue_id++) { + retval = ia_css_syscom_recv_port_close( + ctx->sys, queue_id); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval == 0, EINVAL); + } + + /* Close the ports for PROXY send queue(s) */ + for (queue_id = 0; + queue_id < ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY] + + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_DEV]; + queue_id++) { + retval = ia_css_syscom_send_port_close( + ctx->sys, queue_id); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval == 0, EINVAL); + } + + for (stream_handle = 0; stream_handle < STREAM_ID_MAX; + stream_handle++) { + verifret(ctx->stream_state_array[stream_handle] == + IA_CSS_ISYS_STREAM_STATE_IDLE, EPERM); + } + + retval = ia_css_syscom_close(ctx->sys); + verifret(retval == 0, EBUSY); + +#if (VERIFY_DEVSTATE != 0) + ctx->dev_state = IA_CSS_ISYS_DEVICE_STATE_CONFIGURED; +#endif /* VERIFY_DEVSTATE */ + + /* Printing "LEAVE IA_CSS_ISYS_DEVICE_CLOSE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE isys_context_destroy\n"); + + return 0; +} +/** + * ia_css_isys_device_close() - close ISYS device + */ +#if HAS_DUAL_CMD_CTX_SUPPORT +int ia_css_isys_context_destroy(HANDLE context) +{ + return isys_context_destroy(context); +} + +void ia_css_isys_device_close(void) +{ + /* Created for legacy, nothing to perform here */ +} + +#else +int ia_css_isys_device_close(HANDLE context) +{ + return isys_context_destroy(context); +} +#endif + +/** + * ia_css_isys_device_release() - release ISYS device + */ +int ia_css_isys_device_release(HANDLE context, unsigned int force) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + int retval = 0; + + /* Printing "ENTRY IA_CSS_ISYS_DEVICE_RELEASE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_DEVICE_RELEASE\n"); + + verifret(ctx, EFAULT); + + /* Printing device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_CONFIGURED, EPERM); +#endif /* VERIFY_DEVSTATE */ + + retval = ia_css_syscom_release(ctx->sys, force); + verifret(retval == 0, EBUSY); + + /* If ia_css_isys_device_release called with force==1, this should + * happen after timeout, so no active transfers + * If ia_css_isys_device_release called with force==0, this should + * happen after SP has gone idle, so no active transfers + */ + ia_css_isys_force_unmap_comm_buff_queue(ctx); + ia_css_isys_destr_comm_buff_queue(ctx); + +#if (VERIFY_DEVSTATE != 0) + ctx->dev_state = IA_CSS_ISYS_DEVICE_STATE_IDLE; +#endif /* VERIFY_DEVSTATE */ + + ia_css_cpu_mem_free(ctx); + + /* Printing "LEAVE IA_CSS_ISYS_DEVICE_RELEASE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE IA_CSS_ISYS_DEVICE_RELEASE\n"); + + return 0; +} + +/** + * ia_css_isys_proxy_write_req() - send ISYS proxy write requests + */ +int ia_css_isys_proxy_write_req( + HANDLE context, + const struct ia_css_proxy_write_req_val *write_req_val) +{ + + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + struct proxy_send_queue_token token; + int packets; + int retval = 0; + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_PROXY_WRITE_REQ\n"); + verifret(ctx, EFAULT); + verifret(write_req_val != NULL, EFAULT); + + packets = ia_css_syscom_send_port_available(ctx->sys, 0); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + + token.request_id = write_req_val->request_id; + token.region_index = write_req_val->region_index; + token.offset = write_req_val->offset; + token.value = write_req_val->value; + + retval = ia_css_syscom_send_port_transfer(ctx->sys, 0, &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE IA_CSS_ISYS_PROXY_WRITE_REQ\n"); + + return 0; +} + +/** + * ia_css_isys_proxy_handle_write_response() - handle ISYS proxy responses + */ +int ia_css_isys_proxy_handle_write_response( + HANDLE context, + struct ia_css_proxy_write_req_resp *received_response) +{ + + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + struct proxy_resp_queue_token token; + int retval = 0; + int packets; + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "ENTRY IA_CSS_ISYS_PROXY_HANDLE_WRITE_RESPONSE\n"); + verifret(ctx, EFAULT); + verifret(received_response != NULL, EFAULT); + + packets = ia_css_syscom_recv_port_available(ctx->sys, 0); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + + retval = ia_css_syscom_recv_port_transfer(ctx->sys, 0, &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + + retval = ia_css_isys_extract_proxy_response(&token, received_response); + verifret(retval == 0, retval); + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "LEAVE IA_CSS_ISYS_PROXY_HANDLE_WRITE_RESPONSE\n"); + + return 0; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_public_trace.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_public_trace.c new file mode 100644 index 0000000000000..d6500a0cb6056 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_public_trace.c @@ -0,0 +1,379 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_isysapi_trace.h" +#include "ia_css_isys_public_trace.h" +#include "ia_css_isysapi_types.h" +#include "ia_css_isysapi.h" +#include "ia_css_isys_private.h" +#include "error_support.h" +#include "ia_css_syscom.h" + +/** + * print_handle_context - formatted print function for + * struct ia_css_isys_context *ctx variable + */ +int print_handle_context(struct ia_css_isys_context *ctx) +{ + unsigned int i; + + verifret(ctx != NULL, EFAULT); + /* Print ctx->(ssid, mmid, dev_state) */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "Print ia_css_isys_context *ctx\n" + "-------------------------------------------------------\n"); + IA_CSS_TRACE_3(ISYSAPI, VERBOSE, + "\tia_css_isys_context->ssid = %d\n" + "\t\t\tia_css_isys_context->mmid = %d\n" + "\t\t\tia_css_isys_context->device_state = %d\n" + , ctx->ssid + , ctx->mmid + , ctx->dev_state); + /* Print ctx->(stream_state_array, stream_nof_output_pins) */ + for (i = 0; i < STREAM_ID_MAX; i++) { + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_context->stream_state[i = %d] = %d\n" + "\t\t\tia_css_isys_context->stream_nof_output_pins[i = %d] = %d\n" + , i + , ctx->stream_state_array[i] + , i + , ctx->stream_nof_output_pins[i]); + } + /* Print ctx->ia_css_syscom_context */ + IA_CSS_TRACE_1(ISYSAPI, VERBOSE, + "\tia_css_isys_context->ia_css_syscom_context = %p\n" + , (struct ia_css_syscom_context *)(ctx->sys)); + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "-------------------------------------------------------\n"); + return 0; +} + +/** + * print_device_config_data - formatted print function for + * struct ia_css_isys_device_cfg_data *config variable + */ +int print_device_config_data(const struct ia_css_isys_device_cfg_data *config) +{ + verifret(config != NULL, EFAULT); + IA_CSS_TRACE_0(ISYSAPI, + VERBOSE, + "Print ia_css_isys_device_cfg_data *config\n" + "-------------------------------------------------------\n"); + IA_CSS_TRACE_7(ISYSAPI, + VERBOSE, + "\tia_css_isys_device_cfg_data->driver_sys.ssid = %d\n" + "\t\t\tia_css_isys_device_cfg_data->driver_sys.mmid = %d\n" + "\t\t\tia_css_isys_device_cfg_data->driver_sys.num_send_queues = %d\n" + "\t\t\tia_css_isys_device_cfg_data->driver_sys.num_recv_queues = %d\n" + "\t\t\tia_css_isys_device_cfg_data->driver_sys.send_queue_size = %d\n" + "\t\t\tia_css_isys_device_cfg_data->driver_sys.recv_queue_size = %d\n" + "\t\t\tia_css_isys_device_cfg_data->driver_proxy.proxy_write_queue_size = %d\n", + config->driver_sys.ssid, + config->driver_sys.mmid, + config->driver_sys.num_send_queues, + config->driver_sys.num_recv_queues, + config->driver_sys.send_queue_size, + config->driver_sys.recv_queue_size, + config->driver_proxy.proxy_write_queue_size); + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "-------------------------------------------------------\n"); + return 0; +} + +/** + * print_stream_config_data - formatted print function for + * ia_css_isys_stream_cfg_data stream_cfg variable + */ +int print_stream_config_data( + const struct ia_css_isys_stream_cfg_data *stream_cfg) +{ + unsigned int i; + + verifret(stream_cfg != NULL, EFAULT); + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "Print ia_css_isys_stream_cfg_data stream_cfg\n" + "-------------------------------------------------------\n"); + IA_CSS_TRACE_5(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_isl_use = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_stream_source = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_mipi_vc = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->nof_input_pins = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->nof_output_pins = %d\n" + , stream_cfg->isl_use + , stream_cfg->src + , stream_cfg->vc + , stream_cfg->nof_input_pins + , stream_cfg->nof_output_pins); + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->send_irq_sof_discarded = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->send_irq_eof_discarded = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->send_resp_sof_discarded = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->send_resp_eof_discarded = %d\n" + , stream_cfg->send_irq_sof_discarded + , stream_cfg->send_irq_eof_discarded + , stream_cfg->send_resp_sof_discarded + , stream_cfg->send_resp_eof_discarded); + for (i = 0; i < stream_cfg->nof_input_pins; i++) { + IA_CSS_TRACE_6(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_input_pin_info[i = %d].ia_css_isys_mipi_data_type = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_input_pin_info[i = %d].ia_css_isys_resolution.width = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_input_pin_info[i = %d].ia_css_isys_resolution.height = %d\n" + , i + , stream_cfg->input_pins[i].dt + , i + , stream_cfg->input_pins[i].input_res.width + , i + , stream_cfg->input_pins[i].input_res.height); + IA_CSS_TRACE_2(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_input_pin_info[i = %d].ia_css_isys_mipi_store_mode = %d\n" + , i + , stream_cfg->input_pins[i].mipi_store_mode); + } + for (i = 0; i < N_IA_CSS_ISYS_CROPPING_LOCATION; i++) { + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_cropping[i = %d].top_offset = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_cropping[i = %d].left_offset = %d\n" + , i + , stream_cfg->crop[i].top_offset + , i + , stream_cfg->crop[i].left_offset); + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_cropping[i = %d].bottom_offset = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_cropping[i = %d].right_offset = %d\n" + , i + , stream_cfg->crop[i].bottom_offset + , i + , stream_cfg->crop[i].right_offset); + } + for (i = 0; i < stream_cfg->nof_output_pins; i++) { + IA_CSS_TRACE_6(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].ia_css_isys_pin_type = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].ia_css_isys_frame_format_type = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].input_pin_id = %d\n" + , i + , stream_cfg->output_pins[i].pt + , i + , stream_cfg->output_pins[i].ft + , i + , stream_cfg->output_pins[i].input_pin_id); + IA_CSS_TRACE_6(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].watermark_in_lines = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].send_irq = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].stride = %d\n" + , i + , stream_cfg->output_pins[i].watermark_in_lines + , i + , stream_cfg->output_pins[i].send_irq + , i + , stream_cfg->output_pins[i].stride); + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].ia_css_isys_resolution.width = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].ia_css_isys_resolution.height = %d\n" + , i + , stream_cfg->output_pins[i].output_res.width + , i + , stream_cfg->output_pins[i].output_res.height); + } + for (i = 0; i < N_IA_CSS_ISYS_RESOLUTION_INFO; i++) { + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.ia_css_isys_resolution[i = %d].width = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.ia_css_isys_resolution[i = %d].height = %d\n" + , i + , stream_cfg->isa_cfg.isa_res[i].width + , i + , stream_cfg->isa_cfg.isa_res[i].height); + } + IA_CSS_TRACE_7(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.blc_enabled = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.lsc_enabled = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.dpc_enabled = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.downscaler_enabled = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.awb_enabled = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.af_enabled = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.ae_enabled = %d\n" + , stream_cfg->isa_cfg.blc_enabled + , stream_cfg->isa_cfg.lsc_enabled + , stream_cfg->isa_cfg.dpc_enabled + , stream_cfg->isa_cfg.downscaler_enabled + , stream_cfg->isa_cfg.awb_enabled + , stream_cfg->isa_cfg.af_enabled + , stream_cfg->isa_cfg.ae_enabled); + + IA_CSS_TRACE_1(ISYSAPI, VERBOSE, + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.paf_type = %d\n" + , stream_cfg->isa_cfg.paf_type); + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "-------------------------------------------------------\n"); + return 0; +} + +/** + * print_isys_frame_buff_set - formatted print function for + * struct ia_css_isys_frame_buff_set *next_frame variable + */ +int print_isys_frame_buff_set( + const struct ia_css_isys_frame_buff_set *next_frame, + const unsigned int nof_output_pins) +{ + unsigned int i; + + verifret(next_frame != NULL, EFAULT); + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "Print ia_css_isys_frame_buff_set *next_frame\n" + "-------------------------------------------------------\n"); + for (i = 0; i < nof_output_pins; i++) { + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_frame_buff_set->ia_css_isys_output_pin_payload[i = %d].ia_css_return_token = %016lxu\n" + "\t\t\tia_css_isys_frame_buff_set->ia_css_isys_output_pin_payload[i = %d].ia_css_input_buffer_css_address = %08xu\n" + , i + , (unsigned long int) + next_frame->output_pins[i].out_buf_id + , i + , next_frame->output_pins[i].addr); + } + IA_CSS_TRACE_2(ISYSAPI, VERBOSE, + "\tia_css_isys_frame_buff_set->process_group_light.ia_css_return_token = %016lxu\n" + "\t\t\tia_css_isys_frame_buff_set->process_group_light.ia_css_input_buffer_css_address = %08xu\n" + , (unsigned long int) + next_frame->process_group_light.param_buf_id + , next_frame->process_group_light.addr); + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_frame_buff_set->send_irq_sof = %d\n" + "\t\t\tia_css_isys_frame_buff_set->send_irq_eof = %d\n" + "\t\t\tia_css_isys_frame_buff_set->send_resp_sof = %d\n" + "\t\t\tia_css_isys_frame_buff_set->send_resp_eof = %d\n" + , (int) next_frame->send_irq_sof + , (int) next_frame->send_irq_eof + , (int) next_frame->send_resp_sof + , (int) next_frame->send_resp_eof); + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "-------------------------------------------------------\n"); + return 0; +} + +/** + * print_isys_resp_info - formatted print function for + * struct ia_css_isys_frame_buff_set *next_frame variable + */ +int print_isys_resp_info(struct ia_css_isys_resp_info *received_response) +{ + verifret(received_response != NULL, EFAULT); + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ISYS_RESPONSE_INFO\n" + "-------------------------------------------------------\n"); + switch (received_response->type) { + case IA_CSS_ISYS_RESP_TYPE_STREAM_OPEN_DONE: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_OPEN_DONE\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_START_ACK: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_START_ACK\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_STOP_ACK: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_STOP_ACK\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_FLUSH_ACK: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_FLUSH_ACK\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_CLOSE_ACK: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_CLOSE_ACK\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_PIN_DATA_READY: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_PIN_DATA_READY\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_PIN_DATA_WATERMARK: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_PIN_DATA_WATERMARK\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_FRAME_SOF: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_FRAME_SOF\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_FRAME_EOF: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_FRAME_EOF\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_PIN_DATA_SKIPPED: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_PIN_DATA_SKIPPED\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_SKIPPED: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_SKIPPED\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_FRAME_SOF_DISCARDED: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_FRAME_SOF_DISCARDED\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_FRAME_EOF_DISCARDED: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_FRAME_EOF_DISCARDED\n"); + break; + default: + IA_CSS_TRACE_0(ISYSAPI, ERROR, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = INVALID\n"); + break; + } + + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.type = %d\n" + "\t\t\tia_css_isys_resp_info.stream_handle = %d\n" + "\t\t\tia_css_isys_resp_info.time_stamp[0] = %d\n" + "\t\t\tia_css_isys_resp_info.time_stamp[1] = %d\n", + received_response->type, + received_response->stream_handle, + received_response->timestamp[0], + received_response->timestamp[1]); + IA_CSS_TRACE_7(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.error = %d\n" + "\t\t\tia_css_isys_resp_info.error_details = %d\n" + "\t\t\tia_css_isys_resp_info.pin.out_buf_id = %016llxu\n" + "\t\t\tia_css_isys_resp_info.pin.addr = %016llxu\n" + "\t\t\tia_css_isys_resp_info.pin_id = %d\n" + "\t\t\tia_css_isys_resp_info.frame_counter = %d\n," + "\t\t\tia_css_isys_resp_info.written_direct = %d\n", + received_response->error, + received_response->error_details, + (unsigned long long)received_response->pin.out_buf_id, + (unsigned long long)received_response->pin.addr, + received_response->pin_id, + received_response->frame_counter, + received_response->written_direct); + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "------------------------------------------------------\n"); + + return 0; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_public_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_public_trace.h new file mode 100644 index 0000000000000..5b6508058fd6e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_public_trace.h @@ -0,0 +1,55 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYS_PUBLIC_TRACE_H +#define __IA_CSS_ISYS_PUBLIC_TRACE_H + +#include "ia_css_isysapi_trace.h" + +#include "ia_css_isysapi_types.h" + +#include "ia_css_isysapi.h" + +#include "ia_css_isys_private.h" +/** + * print_handle_context - formatted print function for + * struct ia_css_isys_context *ctx variable + */ +int print_handle_context(struct ia_css_isys_context *ctx); + +/** + * print_device_config_data - formatted print function for + * struct ia_css_isys_device_cfg_data *config variable + */ +int print_device_config_data(const struct ia_css_isys_device_cfg_data *config); +/** + * print_stream_config_data - formatted print function for + * ia_css_isys_stream_cfg_data stream_cfg variable + */ +int print_stream_config_data( + const struct ia_css_isys_stream_cfg_data *stream_cfg); +/** + * print_isys_frame_buff_set - formatted print function for + * struct ia_css_isys_frame_buff_set *next_frame variable + */ +int print_isys_frame_buff_set( + const struct ia_css_isys_frame_buff_set *next_frame, + const unsigned int nof_output_pins); +/** + * print_isys_isys_resp_info - formatted print function for + * struct ia_css_isys_frame_buff_set *next_frame variable + */ +int print_isys_resp_info(struct ia_css_isys_resp_info *received_response); + +#endif /* __IA_CSS_ISYS_PUBLIC_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isysapi_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isysapi_trace.h new file mode 100644 index 0000000000000..c6b944f245b11 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isysapi_trace.h @@ -0,0 +1,79 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYSAPI_TRACE_H +#define __IA_CSS_ISYSAPI_TRACE_H + +#include "ia_css_trace.h" + +#define ISYSAPI_TRACE_LOG_LEVEL_OFF 0 +#define ISYSAPI_TRACE_LOG_LEVEL_NORMAL 1 +#define ISYSAPI_TRACE_LOG_LEVEL_DEBUG 2 + +/* ISYSAPI and all the submodules in ISYSAPI will have + * the default tracing level set to this level + */ +#define ISYSAPI_TRACE_CONFIG_DEFAULT ISYSAPI_TRACE_LOG_LEVEL_NORMAL + +/* In case ISYSAPI_TRACE_CONFIG is not defined, set it to default level */ +#if !defined(ISYSAPI_TRACE_CONFIG) + #define ISYSAPI_TRACE_CONFIG ISYSAPI_TRACE_CONFIG_DEFAULT +#endif + +/* ISYSAPI Module tracing backend is mapped to + * TUNIT tracing for target platforms + */ +#ifdef IA_CSS_TRACE_PLATFORM_CELL + #ifndef HRT_CSIM + #define ISYSAPI_TRACE_METHOD IA_CSS_TRACE_METHOD_TRACE + #else + #define ISYSAPI_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE + #endif +#else + #define ISYSAPI_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +#endif + +#if (defined(ISYSAPI_TRACE_CONFIG)) + /* TRACE_OFF */ + #if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_OFF + #define ISYSAPI_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_DISABLED + #define ISYSAPI_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_DISABLED + #define ISYSAPI_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_DISABLED + #define ISYSAPI_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_DISABLED + #define ISYSAPI_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_DISABLED + #define ISYSAPI_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED + /* TRACE_NORMAL */ + #elif ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_NORMAL + #define ISYSAPI_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_DISABLED + #define ISYSAPI_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED + /* TRACE_DEBUG */ + #elif ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + #define ISYSAPI_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No ISYSAPI_TRACE_CONFIG Tracing level defined" + #endif +#else + #error "ISYSAPI_TRACE_CONFIG not defined" +#endif + +#endif /* __IA_CSS_ISYSAPI_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/interface/ia_css_pkg_dir.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/interface/ia_css_pkg_dir.h new file mode 100644 index 0000000000000..a284d74bb4a67 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/interface/ia_css_pkg_dir.h @@ -0,0 +1,99 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_H +#define __IA_CSS_PKG_DIR_H + +#include "ia_css_pkg_dir_storage_class.h" +#include "ia_css_pkg_dir_types.h" +#include "type_support.h" + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +const ia_css_pkg_dir_entry_t *ia_css_pkg_dir_get_entry( + const ia_css_pkg_dir_t *pkg_dir, + uint32_t index +); + +/* User is expected to call the verify function manually, + * other functions do not call it internally + */ +IA_CSS_PKG_DIR_STORAGE_CLASS_H +int ia_css_pkg_dir_verify_header( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_get_num_entries( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_get_size_in_bytes( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +enum ia_css_pkg_dir_version ia_css_pkg_dir_get_version( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint16_t ia_css_pkg_dir_set_version( + ia_css_pkg_dir_entry_t *pkg_dir_header, + enum ia_css_pkg_dir_version version +); + + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_entry_get_address_lo( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_entry_get_address_hi( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_entry_get_size( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint16_t ia_css_pkg_dir_entry_get_version( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint8_t ia_css_pkg_dir_entry_get_type( + const ia_css_pkg_dir_entry_t *entry +); + +/* Get the address of the specified entry in the PKG_DIR + * Note: This function expects the complete PKG_DIR in the same memory space + * and the entries contains offsets and not addresses. + */ +IA_CSS_PKG_DIR_STORAGE_CLASS_H +void *ia_css_pkg_dir_get_entry_address( + const ia_css_pkg_dir_t *pkg_dir, + uint32_t index +); + +#ifdef __IA_CSS_PKG_DIR_INLINE__ + +#include "ia_css_pkg_dir_impl.h" + +#endif + +#endif /* __IA_CSS_PKG_DIR_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_iunit.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_iunit.h new file mode 100644 index 0000000000000..ad194b0389eb7 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_iunit.h @@ -0,0 +1,46 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_IUNIT_H +#define __IA_CSS_PKG_DIR_IUNIT_H + +/* In bootflow, pkg_dir only supports upto 16 entries in pkg_dir + * pkg_dir_header + Psys_server pg + Isys_server pg + 13 Client pg + */ + +enum { + IA_CSS_PKG_DIR_SIZE = 16, + IA_CSS_PKG_DIR_ENTRIES = IA_CSS_PKG_DIR_SIZE - 1 +}; + +#define IUNIT_MAX_CLIENT_PKG_ENTRIES 13 + +/* Example assignment of unique identifiers for the FW components + * This should match the identifiers in the manifest + */ +enum ia_css_pkg_dir_entry_type { + IA_CSS_PKG_DIR_HEADER = 0, + IA_CSS_PKG_DIR_PSYS_SERVER_PG, + IA_CSS_PKG_DIR_ISYS_SERVER_PG, + IA_CSS_PKG_DIR_CLIENT_PG +}; + +/* Fixed entries in the package directory */ +enum ia_css_pkg_dir_index { + IA_CSS_PKG_DIR_PSYS_INDEX = 0, + IA_CSS_PKG_DIR_ISYS_INDEX = 1, + IA_CSS_PKG_DIR_CLIENT_0 = 2 +}; + +#endif /* __IA_CSS_PKG_DIR_IUNIT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_storage_class.h new file mode 100644 index 0000000000000..cb64172151f92 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_storage_class.h @@ -0,0 +1,29 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_STORAGE_CLASS_H +#define __IA_CSS_PKG_DIR_STORAGE_CLASS_H + + +#include "storage_class.h" + +#ifndef __IA_CSS_PKG_DIR_INLINE__ +#define IA_CSS_PKG_DIR_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_PKG_DIR_STORAGE_CLASS_C +#else +#define IA_CSS_PKG_DIR_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_PKG_DIR_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_PKG_DIR_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_types.h new file mode 100644 index 0000000000000..b024b3da2f9e6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_types.h @@ -0,0 +1,41 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_TYPES_H +#define __IA_CSS_PKG_DIR_TYPES_H + +#include "type_support.h" + +struct ia_css_pkg_dir_entry { + uint32_t address[2]; + uint32_t size; + uint16_t version; + uint8_t type; + uint8_t unused; +}; + +typedef void ia_css_pkg_dir_t; +typedef struct ia_css_pkg_dir_entry ia_css_pkg_dir_entry_t; + +/* The version field of the pkg_dir header defines + * if entries contain offsets or pointers + */ +/* This is temporary, until all pkg_dirs use pointers */ +enum ia_css_pkg_dir_version { + IA_CSS_PKG_DIR_POINTER, + IA_CSS_PKG_DIR_OFFSET +}; + + +#endif /* __IA_CSS_PKG_DIR_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/pkg_dir.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/pkg_dir.mk new file mode 100644 index 0000000000000..32c8a68f3653c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/pkg_dir.mk @@ -0,0 +1,29 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is PKG DIR + +PKG_DIR_DIR = $${MODULES_DIR}/pkg_dir +PKG_DIR_INTERFACE = $(PKG_DIR_DIR)/interface +PKG_DIR_SOURCES = $(PKG_DIR_DIR)/src + +PKG_DIR_FILES = $(PKG_DIR_DIR)/src/ia_css_pkg_dir.c +PKG_DIR_CPPFLAGS = -I$(PKG_DIR_INTERFACE) +PKG_DIR_CPPFLAGS += -I$(PKG_DIR_SOURCES) +PKG_DIR_CPPFLAGS += -I$${MODULES_DIR}/../isp/kernels/io_ls/common +PKG_DIR_CPPFLAGS += -I$${MODULES_DIR}/fw_abi_common_types/ipu +PKG_DIR_CPPFLAGS += -I$${MODULES_DIR}/fw_abi_common_types/ipu/$(FW_ABI_IPU_TYPES_VERSION) + +PKG_DIR_CREATE_FILES = $(PKG_DIR_DIR)/src/ia_css_pkg_dir_create.c +PKG_DIR_UPDATE_FILES = $(PKG_DIR_DIR)/src/ia_css_pkg_dir_update.c diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/src/ia_css_pkg_dir.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/src/ia_css_pkg_dir.c new file mode 100644 index 0000000000000..348b56833e060 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/src/ia_css_pkg_dir.c @@ -0,0 +1,27 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifdef __IA_CSS_PKG_DIR_INLINE__ + +#include "storage_class.h" + +STORAGE_CLASS_INLINE int __ia_css_pkg_dir_avoid_warning_on_empty_file(void) +{ + return 0; +} + +#else +#include "ia_css_pkg_dir_impl.h" + +#endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/src/ia_css_pkg_dir_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/src/ia_css_pkg_dir_impl.h new file mode 100644 index 0000000000000..d5067d21398f9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/src/ia_css_pkg_dir_impl.h @@ -0,0 +1,201 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_IMPL_H +#define __IA_CSS_PKG_DIR_IMPL_H + +#include "ia_css_pkg_dir.h" +#include "ia_css_pkg_dir_int.h" +#include "error_support.h" +#include "type_support.h" +#include "assert_support.h" + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +const ia_css_pkg_dir_entry_t *ia_css_pkg_dir_get_entry( + const ia_css_pkg_dir_t *pkg_dir, + uint32_t index) +{ + DECLARE_ERRVAL + struct ia_css_pkg_dir_entry *pkg_dir_header = NULL; + + verifexitval(pkg_dir != NULL, EFAULT); + + pkg_dir_header = (struct ia_css_pkg_dir_entry *)pkg_dir; + + /* First entry of the structure is the header, skip that */ + index++; + verifexitval(index < pkg_dir_header->size, EFAULT); + +EXIT: + if (haserror(EFAULT)) { + return NULL; + } + return &(pkg_dir_header[index]); +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +int ia_css_pkg_dir_verify_header(const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + DECLARE_ERRVAL + verifexitval(pkg_dir_header != NULL, EFAULT); + +EXIT: + if (haserror(EFAULT)) { + return -1; + } + return ((pkg_dir_header->address[0] == PKG_DIR_MAGIC_VAL_0) + && (pkg_dir_header->address[1] == PKG_DIR_MAGIC_VAL_1)) ? + 0 : -1; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_get_num_entries( + const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + DECLARE_ERRVAL + uint32_t size = 0; + + verifexitval(pkg_dir_header != NULL, EFAULT); + size = pkg_dir_header->size; + verifexitval(size > 0, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return size - 1; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +enum ia_css_pkg_dir_version +ia_css_pkg_dir_get_version(const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + assert(pkg_dir_header != NULL); + return pkg_dir_header->version; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint16_t ia_css_pkg_dir_set_version(ia_css_pkg_dir_entry_t *pkg_dir_header, + enum ia_css_pkg_dir_version version) +{ + DECLARE_ERRVAL + + verifexitval(pkg_dir_header != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 1; + } + pkg_dir_header->version = version; + return 0; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_get_size_in_bytes( + const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + DECLARE_ERRVAL + + verifexitval(pkg_dir_header != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return sizeof(struct ia_css_pkg_dir_entry) * pkg_dir_header->size; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_entry_get_address_lo( + const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->address[0]; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_entry_get_address_hi( + const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->address[1]; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_entry_get_size(const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->size; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint16_t ia_css_pkg_dir_entry_get_version(const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->version; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint8_t ia_css_pkg_dir_entry_get_type(const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->type; +} + + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +void *ia_css_pkg_dir_get_entry_address(const ia_css_pkg_dir_t *pkg_dir, + uint32_t index) +{ + void *entry_blob = NULL; + const ia_css_pkg_dir_entry_t *pkg_dir_entry = + ia_css_pkg_dir_get_entry(pkg_dir, index-1); + + if ((pkg_dir_entry != NULL) && + (ia_css_pkg_dir_entry_get_size(pkg_dir_entry) > 0)) { + assert(ia_css_pkg_dir_entry_get_address_hi(pkg_dir_entry) == 0); + entry_blob = (void *)((char *)pkg_dir + + ia_css_pkg_dir_entry_get_address_lo(pkg_dir_entry)); + } + return entry_blob; +} + +#endif /* __IA_CSS_PKG_DIR_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/src/ia_css_pkg_dir_int.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/src/ia_css_pkg_dir_int.h new file mode 100644 index 0000000000000..203505fbee54e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/src/ia_css_pkg_dir_int.h @@ -0,0 +1,49 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_INT_H +#define __IA_CSS_PKG_DIR_INT_H + +/* + * Package Dir structure as specified in CSE FAS + * + * PKG DIR Header + * Qword 63:56 55 54:48 47:32 31:24 23:0 + * 0 "_IUPKDR_" + * 1 Rsvd Rsvd Type Version Rsvd Size + * + * Version: Version of the Structure + * Size: Size of the entire table (including header) in 16 byte chunks + * Type: Must be 0 for header + * + * Figure 13: PKG DIR Header + * + * + * PKG DIR Entry + * Qword 63:56 55 54:48 47:32 31:24 23:0 + * N Address/Offset + * N+1 Rsvd Rsvd Type Version Rsvd Size + * + * Version: Version # of the Component + * Size: Size of the component in bytes + * Type: Component Identifier + */ + +#define PKG_DIR_SIZE_BITS 24 +#define PKG_DIR_TYPE_BITS 7 + +#define PKG_DIR_MAGIC_VAL_1 (('_' << 24) | ('I' << 16) | ('U' << 8) | 'P') +#define PKG_DIR_MAGIC_VAL_0 (('K' << 24) | ('D' << 16) | ('R' << 8) | '_') + +#endif /* __IA_CSS_PKG_DIR_INT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/port_env_struct.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/port_env_struct.h new file mode 100644 index 0000000000000..4d39a4739a8b0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/port_env_struct.h @@ -0,0 +1,24 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PORT_ENV_STRUCT_H +#define __PORT_ENV_STRUCT_H + +struct port_env { + unsigned int mmid; + unsigned int ssid; + unsigned int mem_addr; +}; + +#endif /* __PORT_ENV_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/queue.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/queue.h new file mode 100644 index 0000000000000..b233ab3baf014 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/queue.h @@ -0,0 +1,40 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __QUEUE_H +#define __QUEUE_H + +#include "queue_struct.h" +#include "port_env_struct.h" + +/* + * SYS queues are created by the host + * SYS queues cannot be accessed through the queue interface + * To send data into a queue a send_port must be opened. + * To receive data from a queue, a recv_port must be opened. + */ + +/* return required buffer size for queue */ +unsigned int +sys_queue_buf_size(unsigned int size, unsigned int token_size); + +/* + * initialize a queue that can hold at least 'size' tokens of + * 'token_size' bytes. + */ +void +sys_queue_init(struct sys_queue *q, unsigned int size, + unsigned int token_size, struct sys_queue_res *res); + +#endif /* __QUEUE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/queue_struct.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/queue_struct.h new file mode 100644 index 0000000000000..ef48fcfded2b6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/queue_struct.h @@ -0,0 +1,47 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __QUEUE_STRUCT_H +#define __QUEUE_STRUCT_H + +/* queue description, shared between sender and receiver */ + +#include "type_support.h" + +#ifdef __VIED_CELL +typedef struct {uint32_t v[2]; } host_buffer_address_t; +#else +typedef uint64_t host_buffer_address_t; +#endif + +typedef uint32_t vied_buffer_address_t; + + +struct sys_queue { + host_buffer_address_t host_address; + vied_buffer_address_t vied_address; + unsigned int size; + unsigned int token_size; + unsigned int wr_reg; /* reg no in subsystem's regmem */ + unsigned int rd_reg; + unsigned int _align; +}; + +struct sys_queue_res { + host_buffer_address_t host_address; + vied_buffer_address_t vied_address; + unsigned int reg; +}; + +#endif /* __QUEUE_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/recv_port.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/recv_port.h new file mode 100644 index 0000000000000..cce253b266687 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/recv_port.h @@ -0,0 +1,34 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __RECV_PORT_H +#define __RECV_PORT_H + + +struct recv_port; +struct sys_queue; +struct port_env; + +void +recv_port_open(struct recv_port *p, const struct sys_queue *q, + const struct port_env *env); + +unsigned int +recv_port_available(const struct recv_port *p); + +unsigned int +recv_port_transfer(const struct recv_port *p, void *data); + + +#endif /* __RECV_PORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/recv_port_struct.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/recv_port_struct.h new file mode 100644 index 0000000000000..52ec563b13cf5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/recv_port_struct.h @@ -0,0 +1,32 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __RECV_PORT_STRUCT_H +#define __RECV_PORT_STRUCT_H + +#include "buffer_type.h" + +struct recv_port { + buffer_address buffer; /* address of buffer in DDR */ + unsigned int size; + unsigned int token_size; + unsigned int wr_reg; /* index of write pointer located in regmem */ + unsigned int rd_reg; /* index read pointer located in regmem */ + + unsigned int mmid; + unsigned int ssid; + unsigned int mem_addr; /* address of memory containing regmem */ +}; + +#endif /* __RECV_PORT_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/send_port.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/send_port.h new file mode 100644 index 0000000000000..04a160f3f0199 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/send_port.h @@ -0,0 +1,52 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __SEND_PORT_H +#define __SEND_PORT_H + + +/* + * A send port can be used to send tokens into a queue. + * The interface can be used on any type of processor (host, SP, ...) + */ + +struct send_port; +struct sys_queue; +struct port_env; + +/* + * Open a send port on a queue. After the port is opened, tokens can be sent + */ +void +send_port_open(struct send_port *p, const struct sys_queue *q, + const struct port_env *env); + +/* + * Determine how many tokens can be sent + */ +unsigned int +send_port_available(const struct send_port *p); + +/* + * Send a token via a send port. The function returns the number of + * tokens that have been sent: + * 1: the token was accepted + * 0: the token was not accepted (full queue) + * The size of a token is determined at initialization. + */ +unsigned int +send_port_transfer(const struct send_port *p, const void *data); + + +#endif /* __SEND_PORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/send_port_struct.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/send_port_struct.h new file mode 100644 index 0000000000000..f834c62bc3db6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/send_port_struct.h @@ -0,0 +1,32 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __SEND_PORT_STRUCT_H +#define __SEND_PORT_STRUCT_H + +#include "buffer_type.h" + +struct send_port { + buffer_address buffer; + unsigned int size; + unsigned int token_size; + unsigned int wr_reg; /* index of write pointer in regmem */ + unsigned int rd_reg; /* index of read pointer in regmem */ + + unsigned int mmid; + unsigned int ssid; + unsigned int mem_addr; +}; + +#endif /* __SEND_PORT_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/port.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/port.mk new file mode 100644 index 0000000000000..b3801247802e9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/port.mk @@ -0,0 +1,31 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is PORT + +PORT_DIR=$${MODULES_DIR}/port + +PORT_INTERFACE=$(PORT_DIR)/interface +PORT_SOURCES1=$(PORT_DIR)/src + +PORT_HOST_FILES += $(PORT_SOURCES1)/send_port.c +PORT_HOST_FILES += $(PORT_SOURCES1)/recv_port.c +PORT_HOST_FILES += $(PORT_SOURCES1)/queue.c + +PORT_HOST_CPPFLAGS += -I$(PORT_INTERFACE) + +PORT_FW_FILES += $(PORT_SOURCES1)/send_port.c +PORT_FW_FILES += $(PORT_SOURCES1)/recv_port.c + +PORT_FW_CPPFLAGS += -I$(PORT_INTERFACE) diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/src/queue.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/src/queue.c new file mode 100644 index 0000000000000..eeec99dfe2d0d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/src/queue.c @@ -0,0 +1,47 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "queue.h" + +#include "regmem_access.h" +#include "port_env_struct.h" + +unsigned int sys_queue_buf_size(unsigned int size, unsigned int token_size) +{ + return (size + 1) * token_size; +} + +void +sys_queue_init(struct sys_queue *q, unsigned int size, unsigned int token_size, + struct sys_queue_res *res) +{ + unsigned int buf_size; + + q->size = size + 1; + q->token_size = token_size; + buf_size = sys_queue_buf_size(size, token_size); + + /* acquire the shared buffer space */ + q->host_address = res->host_address; + res->host_address += buf_size; + q->vied_address = res->vied_address; + res->vied_address += buf_size; + + /* acquire the shared read and writer pointers */ + q->wr_reg = res->reg; + res->reg++; + q->rd_reg = res->reg; + res->reg++; + +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/src/recv_port.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/src/recv_port.c new file mode 100644 index 0000000000000..31b36e9ceafbb --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/src/recv_port.c @@ -0,0 +1,95 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "recv_port.h" +#include "port_env_struct.h" /* for port_env */ +#include "queue_struct.h" /* for sys_queue */ +#include "recv_port_struct.h" /* for recv_port */ +#include "buffer_access.h" /* for buffer_load, buffer_address */ +#include "regmem_access.h" /* for regmem_load_32, regmem_store_32 */ +#include "storage_class.h" /* for STORAGE_CLASS_INLINE */ +#include "math_support.h" /* for OP_std_modadd */ +#include "type_support.h" /* for HOST_ADDRESS */ + +#ifndef __VIED_CELL +#include "cpu_mem_support.h" /* for ia_css_cpu_mem_cache_invalidate */ +#endif + +void +recv_port_open(struct recv_port *p, const struct sys_queue *q, + const struct port_env *env) +{ + p->mmid = env->mmid; + p->ssid = env->ssid; + p->mem_addr = env->mem_addr; + + p->size = q->size; + p->token_size = q->token_size; + p->wr_reg = q->wr_reg; + p->rd_reg = q->rd_reg; + +#ifdef __VIED_CELL + p->buffer = q->vied_address; +#else + p->buffer = q->host_address; +#endif +} + +STORAGE_CLASS_INLINE unsigned int +recv_port_index(const struct recv_port *p, unsigned int i) +{ + unsigned int rd = regmem_load_32(p->mem_addr, p->rd_reg, p->ssid); + + return OP_std_modadd(rd, i, p->size); +} + +unsigned int +recv_port_available(const struct recv_port *p) +{ + int wr = (int)regmem_load_32(p->mem_addr, p->wr_reg, p->ssid); + int rd = (int)regmem_load_32(p->mem_addr, p->rd_reg, p->ssid); + + return OP_std_modadd(wr, -rd, p->size); +} + +STORAGE_CLASS_INLINE void +recv_port_copy(const struct recv_port *p, unsigned int i, void *data) +{ + unsigned int rd = recv_port_index(p, i); + unsigned int token_size = p->token_size; + buffer_address addr = p->buffer + (rd * token_size); +#ifndef __VIED_CELL + ia_css_cpu_mem_cache_invalidate((void *)HOST_ADDRESS(p->buffer), + token_size*p->size); +#endif + buffer_load(addr, data, token_size, p->mmid); +} + +STORAGE_CLASS_INLINE void +recv_port_release(const struct recv_port *p, unsigned int i) +{ + unsigned int rd = recv_port_index(p, i); + + regmem_store_32(p->mem_addr, p->rd_reg, rd, p->ssid); +} + +unsigned int +recv_port_transfer(const struct recv_port *p, void *data) +{ + if (!recv_port_available(p)) + return 0; + recv_port_copy(p, 0, data); + recv_port_release(p, 1); + return 1; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/src/send_port.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/src/send_port.c new file mode 100644 index 0000000000000..8d1fba08c5d58 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/src/send_port.c @@ -0,0 +1,94 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "send_port.h" +#include "queue_struct.h" /* for sys_queue */ +#include "send_port_struct.h" /* for send_port */ +#include "port_env_struct.h" /* for port_env */ +#include "regmem_access.h" /* for regmem_load_32, regmem_store_32 */ +#include "buffer_access.h" /* for buffer_store, buffer_address */ +#include "storage_class.h" /* for STORAGE_CLASS_INLINE */ +#include "math_support.h" /* for OP_std_modadd */ +#include "type_support.h" /* for HOST_ADDRESS */ + +#ifndef __VIED_CELL +#include "cpu_mem_support.h" /* for ia_css_cpu_mem_cache_flush */ +#endif + +void +send_port_open(struct send_port *p, const struct sys_queue *q, + const struct port_env *env) +{ + p->mmid = env->mmid; + p->ssid = env->ssid; + p->mem_addr = env->mem_addr; + + p->size = q->size; + p->token_size = q->token_size; + p->wr_reg = q->wr_reg; + p->rd_reg = q->rd_reg; +#ifdef __VIED_CELL + p->buffer = q->vied_address; +#else + p->buffer = q->host_address; +#endif +} + +STORAGE_CLASS_INLINE unsigned int +send_port_index(const struct send_port *p, unsigned int i) +{ + unsigned int wr = regmem_load_32(p->mem_addr, p->wr_reg, p->ssid); + + return OP_std_modadd(wr, i, p->size); +} + +unsigned int +send_port_available(const struct send_port *p) +{ + int rd = (int)regmem_load_32(p->mem_addr, p->rd_reg, p->ssid); + int wr = (int)regmem_load_32(p->mem_addr, p->wr_reg, p->ssid); + + return OP_std_modadd(rd, -(wr+1), p->size); +} + +STORAGE_CLASS_INLINE void +send_port_copy(const struct send_port *p, unsigned int i, const void *data) +{ + unsigned int wr = send_port_index(p, i); + unsigned int token_size = p->token_size; + buffer_address addr = p->buffer + (wr * token_size); + + buffer_store(addr, data, token_size, p->mmid); +#ifndef __VIED_CELL + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(addr), token_size); +#endif +} + +STORAGE_CLASS_INLINE void +send_port_release(const struct send_port *p, unsigned int i) +{ + unsigned int wr = send_port_index(p, i); + + regmem_store_32(p->mem_addr, p->wr_reg, wr, p->ssid); +} + +unsigned int +send_port_transfer(const struct send_port *p, const void *data) +{ + if (!send_port_available(p)) + return 0; + send_port_copy(p, 0, data); + send_port_release(p, 1); + return 1; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/reg_dump/src/isys/bxtB0_gen_reg_dump/ia_css_debug_dump.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/reg_dump/src/isys/bxtB0_gen_reg_dump/ia_css_debug_dump.c new file mode 100644 index 0000000000000..c51d65c8cb647 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/reg_dump/src/isys/bxtB0_gen_reg_dump/ia_css_debug_dump.c @@ -0,0 +1,15 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +#include "ia_css_debug_dump.h" + void ia_css_debug_dump(void) {} \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/reg_dump/src/isys/bxtB0_gen_reg_dump/ia_css_debug_dump.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/reg_dump/src/isys/bxtB0_gen_reg_dump/ia_css_debug_dump.h new file mode 100644 index 0000000000000..5dd23ddbd180b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/reg_dump/src/isys/bxtB0_gen_reg_dump/ia_css_debug_dump.h @@ -0,0 +1,17 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +#ifndef __IA_CSS_DEBUG_DUMP_H_ + #define __IA_CSS_DEBUG_DUMP_H_ + void ia_css_debug_dump(void); + #endif /* __IA_CSS_DEBUG_DUMP_H_ */ \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/reg_dump/src/reg_dump_generic_bridge.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/reg_dump/src/reg_dump_generic_bridge.c new file mode 100644 index 0000000000000..9b9161ae78cf2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/reg_dump/src/reg_dump_generic_bridge.c @@ -0,0 +1,39 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include +#include "ia_css_trace.h" +#ifdef USE_LOGICAL_SSIDS +/* + Logical names can be used to define the SSID + In order to resolve these names the following include file should be provided + and the define above should be enabled +*/ +#include +#endif + +#define REG_DUMP_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +#define REG_DUMP_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_ENABLED + +/* SSID value is defined in test makefiles as either isys0 or psys0 */ +#define REG_DUMP_READ_REGISTER(addr) vied_subsystem_load_32(SSID, addr) + +#define REG_DUMP_PRINT_0(...) \ +EXPAND_VA_ARGS(IA_CSS_TRACE_0(REG_DUMP, VERBOSE, __VA_ARGS__)) +#define REG_DUMP_PRINT_1(...) \ +EXPAND_VA_ARGS(IA_CSS_TRACE_1(REG_DUMP, VERBOSE, __VA_ARGS__)) +#define EXPAND_VA_ARGS(x) x + +/* Including generated source code for reg_dump */ +#include "ia_css_debug_dump.c" diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/regmem/interface/regmem_access.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/regmem/interface/regmem_access.h new file mode 100644 index 0000000000000..d4576af936f6d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/regmem/interface/regmem_access.h @@ -0,0 +1,67 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __REGMEM_ACCESS_H +#define __REGMEM_ACCESS_H + +#include "storage_class.h" + +enum regmem_id { + /* pass pkg_dir address to SPC in non-secure mode */ + PKG_DIR_ADDR_REG = 0, + /* pass syscom configuration to SPC */ + SYSCOM_CONFIG_REG = 1, + /* syscom state - modified by SP */ + SYSCOM_STATE_REG = 2, + /* syscom commands - modified by the host */ + SYSCOM_COMMAND_REG = 3, + /* Store interrupt status - updated by SP */ + SYSCOM_IRQ_REG = 4, + /* Store VTL0_ADDR_MASK in trusted secure regision - provided by host.*/ + SYSCOM_VTL0_ADDR_MASK = 5, +#if HAS_DUAL_CMD_CTX_SUPPORT + /* Initialized if trustlet exists - updated by host */ + TRUSTLET_STATUS = 6, + /* identify if SPC access blocker programming is completed - updated by SP */ + AB_SPC_STATUS = 7, + /* first syscom queue pointer register */ + SYSCOM_QPR_BASE_REG = 8 +#else + /* first syscom queue pointer register */ + SYSCOM_QPR_BASE_REG = 6 +#endif +}; + +#if HAS_DUAL_CMD_CTX_SUPPORT +/* Bit 0: for untrusted non-secure DRV driver on VTL0 + * Bit 1: for trusted secure TEE driver on VTL1 + */ +#define SYSCOM_IRQ_VTL0_MASK 0x1 +#define SYSCOM_IRQ_VTL1_MASK 0x2 +#endif + +STORAGE_CLASS_INLINE unsigned int +regmem_load_32(unsigned int mem_address, unsigned int reg, unsigned int ssid); + +STORAGE_CLASS_INLINE void +regmem_store_32(unsigned int mem_address, unsigned int reg, unsigned int value, + unsigned int ssid); + +#ifdef __VIED_CELL +#include "regmem_access_cell.h" +#else +#include "regmem_access_host.h" +#endif + +#endif /* __REGMEM_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/regmem/regmem.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/regmem/regmem.mk new file mode 100644 index 0000000000000..24ebc1c325d8e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/regmem/regmem.mk @@ -0,0 +1,32 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +ifndef REGMEM_MK +REGMEM_MK=1 + +# MODULE is REGMEM + +REGMEM_DIR=$${MODULES_DIR}/regmem + +REGMEM_INTERFACE=$(REGMEM_DIR)/interface +REGMEM_SOURCES=$(REGMEM_DIR)/src + +REGMEM_HOST_FILES = +REGMEM_FW_FILES = $(REGMEM_SOURCES)/regmem.c + +REGMEM_CPPFLAGS = -I$(REGMEM_INTERFACE) -I$(REGMEM_SOURCES) +REGMEM_HOST_CPPFLAGS = $(REGMEM_CPPFLAGS) +REGMEM_FW_CPPFLAGS = $(REGMEM_CPPFLAGS) + +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/regmem/src/regmem_access_host.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/regmem/src/regmem_access_host.h new file mode 100644 index 0000000000000..8878d7074fabb --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/regmem/src/regmem_access_host.h @@ -0,0 +1,41 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __REGMEM_ACCESS_HOST_H +#define __REGMEM_ACCESS_HOST_H + +#include "regmem_access.h" /* implemented interface */ + +#include "storage_class.h" +#include "regmem_const.h" +#include +#include "ia_css_cmem.h" + +STORAGE_CLASS_INLINE unsigned int +regmem_load_32(unsigned int mem_addr, unsigned int reg, unsigned int ssid) +{ + /* No need to add REGMEM_OFFSET, it is already included in mem_addr. */ + return ia_css_cmem_load_32(ssid, mem_addr + (REGMEM_WORD_BYTES*reg)); +} + +STORAGE_CLASS_INLINE void +regmem_store_32(unsigned int mem_addr, unsigned int reg, + unsigned int value, unsigned int ssid) +{ + /* No need to add REGMEM_OFFSET, it is already included in mem_addr. */ + ia_css_cmem_store_32(ssid, mem_addr + (REGMEM_WORD_BYTES*reg), + value); +} + +#endif /* __REGMEM_ACCESS_HOST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/regmem/src/regmem_const.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/regmem/src/regmem_const.h new file mode 100644 index 0000000000000..ac7e3a98a434f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/regmem/src/regmem_const.h @@ -0,0 +1,28 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __REGMEM_CONST_H +#define __REGMEM_CONST_H + +#ifndef REGMEM_SIZE +#define REGMEM_SIZE (16) +#endif /* REGMEM_SIZE */ +#ifndef REGMEM_OFFSET +#define REGMEM_OFFSET (0) +#endif /* REGMEM_OFFSET */ +#ifndef REGMEM_WORD_BYTES +#define REGMEM_WORD_BYTES (4) +#endif + +#endif /* __REGMEM_CONST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/assert_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/assert_support.h new file mode 100644 index 0000000000000..f904a494b53c9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/assert_support.h @@ -0,0 +1,197 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __ASSERT_SUPPORT_H +#define __ASSERT_SUPPORT_H + +/* This file provides support for run-time assertions + * and compile-time assertions. + * + * Run-time asstions are provided via the following syntax: + * assert(condition) + * Run-time assertions are disabled using the NDEBUG flag. + * + * Compile time assertions are provided via the following syntax: + * COMPILATION_ERROR_IF(condition); + * A compile-time assertion will fail to compile if the condition is false. + * The condition must be constant, such that it can be evaluated + * at compile time. + * + * OP___assert is deprecated. + */ + +#define IA_CSS_ASSERT(expr) assert(expr) + +#ifdef __KLOCWORK__ +/* Klocwork does not see that assert will lead to abortion + * as there is no good way to tell this to KW and the code + * should not depend on assert to function (actually the assert + * could be disabled in a release build) it was decided to + * disable the assert for KW scans (by defining NDEBUG) + */ +#define NDEBUG +#endif /* __KLOCWORK__ */ + +/** + * The following macro can help to test the size of a struct at compile + * time rather than at run-time. It does not work for all compilers; see + * below. + * + * Depending on the value of 'condition', the following macro is expanded to: + * - condition==true: + * an expression containing an array declaration with negative size, + * usually resulting in a compilation error + * - condition==false: + * (void) 1; // C statement with no effect + * + * example: + * COMPILATION_ERROR_IF( sizeof(struct host_sp_queues) != + * SIZE_OF_HOST_SP_QUEUES_STRUCT); + * + * verify that the macro indeed triggers a compilation error with your compiler: + * COMPILATION_ERROR_IF( sizeof(struct host_sp_queues) != + * (sizeof(struct host_sp_queues)+1) ); + * + * Not all compilers will trigger an error with this macro; + * use a search engine to search for BUILD_BUG_ON to find other methods. + */ +#define COMPILATION_ERROR_IF(condition) \ +((void)sizeof(char[1 - 2*!!(condition)])) + +/* Compile time assertion */ +#ifndef CT_ASSERT +#define CT_ASSERT(cnd) ((void)sizeof(char[(cnd)?1 : -1])) +#endif /* CT_ASSERT */ + +#ifdef NDEBUG + +#define assert(cnd) ((void)0) + +#else + +#include "storage_class.h" + +#if defined(_MSC_VER) +#ifdef _KERNEL_MODE +/* Windows kernel mode compilation */ +#include +#define assert(cnd) ASSERT(cnd) +#else +/* Windows usermode compilation */ +#include +#endif + +#elif defined(__HIVECC) + +/* + * target: assert disabled + * sched: assert enabled only when SCHED_DEBUG is defined + * unsched: assert enabled + */ +#if defined(HRT_HW) +#define assert(cnd) ((void)0) +#elif defined(HRT_SCHED) && !defined(DEBUG_SCHED) +#define assert(cnd) ((void)0) +#elif defined(PIPE_GENERATION) +#define assert(cnd) ((void)0) +#else +#include +#define assert(cnd) OP___csim_assert(cnd) +#endif + +#elif defined(__KERNEL__) +#include + +#ifndef KERNEL_ASSERT_TO_BUG +#ifndef KERNEL_ASSERT_TO_BUG_ON +#ifndef KERNEL_ASSERT_TO_WARN_ON +#ifndef KERNEL_ASSERT_TO_WARN_ON_INF_LOOP +#ifndef KERNEL_ASSERT_UNDEFINED +/* Default */ +#define KERNEL_ASSERT_TO_BUG +#endif /*KERNEL_ASSERT_UNDEFINED*/ +#endif /*KERNEL_ASSERT_TO_WARN_ON_INF_LOOP*/ +#endif /*KERNEL_ASSERT_TO_WARN_ON*/ +#endif /*KERNEL_ASSERT_TO_BUG_ON*/ +#endif /*KERNEL_ASSERT_TO_BUG*/ + +#ifdef KERNEL_ASSERT_TO_BUG +/* TODO: it would be cleaner to use this: + * #define assert(cnd) BUG_ON(cnd) + * but that causes many compiler warnings (==errors) under Android + * because it seems that the BUG_ON() macro is not seen as a check by + * gcc like the BUG() macro is. */ +#define assert(cnd) \ + do { \ + if (!(cnd)) { \ + BUG(); \ + } \ + } while (0) +#endif /*KERNEL_ASSERT_TO_BUG*/ + +#ifdef KERNEL_ASSERT_TO_BUG_ON +#define assert(cnd) BUG_ON(!(cnd)) +#endif /*KERNEL_ASSERT_TO_BUG_ON*/ + +#ifdef KERNEL_ASSERT_TO_WARN_ON +#define assert(cnd) WARN_ON(!(cnd)) +#endif /*KERNEL_ASSERT_TO_WARN_ON*/ + +#ifdef KERNEL_ASSERT_TO_WARN_ON_INF_LOOP +#define assert(cnd) \ + do { \ + int not_cnd = !(cnd); \ + WARN_ON(not_cnd); \ + if (not_cnd) { \ + for (;;) { \ + } \ + } \ + } while (0) +#endif /*KERNEL_ASSERT_TO_WARN_ON_INF_LOOP*/ + +#ifdef KERNEL_ASSERT_UNDEFINED +#include KERNEL_ASSERT_DEFINITION_FILESTRING +#endif /*KERNEL_ASSERT_UNDEFINED*/ + +#elif defined(__FIST__) || defined(__GNUC__) + +#include "assert.h" + +#else /* default is for unknown environments */ +#define assert(cnd) ((void)0) +#endif + +#endif /* NDEBUG */ + +#ifndef PIPE_GENERATION +/* Deprecated OP___assert, this is still used in ~1000 places + * in the code. This will be removed over time. + * The implementation for the pipe generation tool is in see support.isp.h */ +#define OP___assert(cnd) assert(cnd) + +#ifdef C_RUN +#define compile_time_assert(cond) OP___assert(cond) +#else +#include "storage_class.h" +extern void _compile_time_assert(void); +STORAGE_CLASS_INLINE void compile_time_assert(unsigned cond) +{ + /* Call undefined function if cond is false */ + if (!cond) + _compile_time_assert(); +} +#endif +#endif /* PIPE_GENERATION */ + +#endif /* __ASSERT_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/cpu_mem_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/cpu_mem_support.h new file mode 100644 index 0000000000000..fa349cac4b24a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/cpu_mem_support.h @@ -0,0 +1,233 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __CPU_MEM_SUPPORT_H +#define __CPU_MEM_SUPPORT_H + +#include "storage_class.h" +#include "assert_support.h" +#include "type_support.h" + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_copy(void *dst, const void *src, unsigned int size) +{ + /* memcpy cannot be used in in Windows (function is not allowed), + * and the safer function memcpy_s is not available on other platforms. + * Because usage of ia_css_cpu_mem_copy is minimal, we implement it here in an easy, + * but sub-optimal way. + */ + unsigned int i; + + assert(dst != NULL && src != NULL); + + if (!(dst != NULL && src != NULL)) { + return NULL; + } + for (i = 0; i < size; i++) { + ((char *)dst)[i] = ((char *)src)[i]; + } + return dst; +} + +#if defined(__KERNEL__) + +#include +#include +#include +#include + +/* TODO: remove, workaround for issue in hrt file ibuf_ctrl_2600_config.c + * error checking code added to SDK that uses calls to exit function + */ +#define exit(a) return + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc(unsigned int size) +{ + return kmalloc(size, GFP_KERNEL); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc_page_aligned(unsigned int size) +{ + return ia_css_cpu_mem_alloc(size); /* todo: align to page size */ +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_protect(void *ptr, unsigned int size, int prot) +{ + /* nothing here yet */ +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_set_zero(void *dst, unsigned int size) +{ + return memset(dst, 0, size); /* available in kernel in linux/string.h */ +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_free(void *ptr) +{ + kfree(ptr); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_flush(void *ptr, unsigned int size) +{ + /* parameter check here */ + if (ptr == NULL) + return; + + clflush_cache_range(ptr, size); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_invalidate(void *ptr, unsigned int size) +{ + /* for now same as flush */ + ia_css_cpu_mem_cache_flush(ptr, size); +} + +#elif defined(_MSC_VER) + +#include +#include +#include + +extern void *hrt_malloc(size_t bytes, int zero_mem); +extern void *hrt_free(void *ptr); +extern void hrt_mem_cache_flush(void *ptr, unsigned int size); +extern void hrt_mem_cache_invalidate(void *ptr, unsigned int size); + +#define malloc(a) hrt_malloc(a, 1) +#define free(a) hrt_free(a) + +#define CSS_PAGE_SIZE (1<<12) + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc(unsigned int size) +{ + return malloc(size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc_page_aligned(unsigned int size) +{ + unsigned int buffer_size = size; + + /* Currently hrt_malloc calls Windows ExAllocatePoolWithTag() routine + * to request system memory. If the number of bytes is equal or bigger + * than the page size, then the returned address is page aligned, + * but if it's smaller it's not necessarily page-aligned We agreed + * with Windows team that we allocate a full page + * if it's less than page size + */ + if (buffer_size < CSS_PAGE_SIZE) + buffer_size = CSS_PAGE_SIZE; + + return ia_css_cpu_mem_alloc(buffer_size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_set_zero(void *dst, unsigned int size) +{ + return memset(dst, 0, size); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_free(void *ptr) +{ + free(ptr); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_flush(void *ptr, unsigned int size) +{ +#ifdef _KERNEL_MODE + hrt_mem_cache_flush(ptr, size); +#else + (void)ptr; + (void)size; +#endif +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_invalidate(void *ptr, unsigned int size) +{ +#ifdef _KERNEL_MODE + hrt_mem_cache_invalidate(ptr, size); +#else + (void)ptr; + (void)size; +#endif +} + +#else + +#include +#include +#include +/* Needed for the MPROTECT */ +#include +#include +#include +#include + + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc(unsigned int size) +{ + return malloc(size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc_page_aligned(unsigned int size) +{ + int pagesize; + + pagesize = sysconf(_SC_PAGE_SIZE); + return memalign(pagesize, size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_set_zero(void *dst, unsigned int size) +{ + return memset(dst, 0, size); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_free(void *ptr) +{ + free(ptr); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_flush(void *ptr, unsigned int size) +{ + /* not needed in simulation */ + (void)ptr; + (void)size; +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_invalidate(void *ptr, unsigned int size) +{ + /* not needed in simulation */ + (void)ptr; + (void)size; +} + +#endif + +#endif /* __CPU_MEM_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/error_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/error_support.h new file mode 100644 index 0000000000000..9fe1f65125e6c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/error_support.h @@ -0,0 +1,110 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __ERROR_SUPPORT_H +#define __ERROR_SUPPORT_H + +#if defined(__KERNEL__) +#include +#else +#include +#endif +#include + +/* OS-independent definition of IA_CSS errno values */ +/* #define IA_CSS_EINVAL 1 */ +/* #define IA_CSS_EFAULT 2 */ + +#ifdef __HIVECC +#define ERR_EMBEDDED 1 +#else +#define ERR_EMBEDDED 0 +#endif + +#if ERR_EMBEDDED +#define DECLARE_ERRVAL +#else +#define DECLARE_ERRVAL \ + int _errval = 0; +#endif + +/* Use "owl" in while to prevent compiler warnings in Windows */ +#define ALWAYS_FALSE ((void)0, 0) + +#define verifret(cond, error_type) \ +do { \ + if (!(cond)) { \ + return error_type; \ + } \ +} while (ALWAYS_FALSE) + +#define verifjmp(cond, error_tag) \ +do { \ + if (!(cond)) { \ + goto error_tag; \ + } \ +} while (ALWAYS_FALSE) + +#define verifexit(cond) \ +do { \ + if (!(cond)) { \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) + +#if ERR_EMBEDDED +#define verifexitval(cond, error_tag) \ +do { \ + assert(cond); \ +} while (ALWAYS_FALSE) +#else +#define verifexitval(cond, error_tag) \ +do { \ + if (!(cond)) { \ + _errval = (error_tag); \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) +#endif + +#if ERR_EMBEDDED +#define haserror(error_tag) (0) +#else +#define haserror(error_tag) \ + (_errval == (error_tag)) +#endif + +#if ERR_EMBEDDED +#define noerror() (1) +#else +#define noerror() \ + (_errval == 0) +#endif + +#define verifjmpexit(cond) \ +do { \ + if (!(cond)) { \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) + +#define verifjmpexitsetretval(cond, retval) \ +do { \ + if (!(cond)) { \ + retval = -1; \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) + +#endif /* __ERROR_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/math_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/math_support.h new file mode 100644 index 0000000000000..633f86f1a1b09 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/math_support.h @@ -0,0 +1,314 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __MATH_SUPPORT_H +#define __MATH_SUPPORT_H + +#include "storage_class.h" /* for STORAGE_CLASS_INLINE */ +#include "type_support.h" +#include "assert_support.h" + +/* in case we have min/max/MIN/MAX macro's undefine them */ +#ifdef min +#undef min +#endif +#ifdef max +#undef max +#endif +#ifdef MIN /* also defined in include/hrt/numeric.h from SDK */ +#undef MIN +#endif +#ifdef MAX +#undef MAX +#endif + +#ifndef UINT16_MAX +#define UINT16_MAX (0xffffUL) +#endif + +#ifndef UINT32_MAX +#define UINT32_MAX (0xffffffffUL) +#endif + +#define IS_ODD(a) ((a) & 0x1) +#define IS_EVEN(a) (!IS_ODD(a)) +#define IS_POWER2(a) (!((a)&((a)-1))) +#define IS_MASK_BITS_SET(a, b) ((a & b) != 0) + +/*To Find next power of 2 number from x */ +#define bit2(x) ((x) | ((x) >> 1)) +#define bit4(x) (bit2(x) | (bit2(x) >> 2)) +#define bit8(x) (bit4(x) | (bit4(x) >> 4)) +#define bit16(x) (bit8(x) | (bit8(x) >> 8)) +#define bit32(x) (bit16(x) | (bit16(x) >> 16)) +#define NEXT_POWER_OF_2(x) (bit32(x-1) + 1) + +/* force a value to a lower even value */ +#define EVEN_FLOOR(x) ((x) & ~1UL) + +/* A => B */ +#define IMPLIES(a, b) (!(a) || (b)) + +/* The ORIG_BITS th bit is the sign bit */ +/* Sign extends a ORIG_BITS bits long signed number to a 64-bit signed number */ +/* By type casting it can relimited to any valid type-size + * (32-bit signed or 16-bit or 8-bit) + */ +/* By masking it can be transformed to any arbitrary bit size */ +#define SIGN_EXTEND(VAL, ORIG_BITS) \ +((~(((VAL)&(1ULL<<((ORIG_BITS)-1)))-1))|(VAL)) + +#define EXTRACT_BIT(a, b) ((a >> b) & 1) + +/* for preprocessor and array sizing use MIN and MAX + otherwise use min and max */ +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#define CLIP(a, b, c) MIN((MAX((a), (b))), (c)) +/* Integer round-down division of a with b */ +#define FLOOR_DIV(a, b) ((b) ? ((a) / (b)) : 0) +/* Align a to the lower multiple of b */ +#define FLOOR_MUL(a, b) (FLOOR_DIV(a, b) * (b)) +/* Integer round-up division of a with b */ +#define CEIL_DIV(a, b) ((b) ? (((a) + (b) - 1) / (b)) : 0) +/* Align a to the upper multiple of b */ +#define CEIL_MUL(a, b) (CEIL_DIV(a, b) * (b)) +/* Align a to the upper multiple of b - fast implementation + * for cases when b=pow(2,n) + */ +#define CEIL_MUL2(a, b) (((a) + (b) - 1) & ~((b) - 1)) +/* integer round-up division of a with pow(2,b) */ +#define CEIL_SHIFT(a, b) (((a) + (1UL << (b)) - 1) >> (b)) +/* Align a to the upper multiple of pow(2,b) */ +#define CEIL_SHIFT_MUL(a, b) (CEIL_SHIFT(a, b) << (b)) +/* Absolute difference of a and b */ +#define ABS_DIF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a))) +#define ABS(a) ABS_DIF(a, 0) +/* Square of x */ +#define SQR(x) ((x)*(x)) +/* Integer round-half-down division of a nad b */ +#define ROUND_HALF_DOWN_DIV(a, b) ((b) ? ((a) + (b / 2) - 1) / (b) : 0) +/* Align a to the round-half-down multiple of b */ +#define ROUND_HALF_DOWN_MUL(a, b) (ROUND_HALF_DOWN_DIV(a, b) * (b)) + +#define MAX3(a, b, c) MAX((a), MAX((b), (c))) +#define MIN3(a, b, c) MIN((a), MIN((b), (c))) +#define MAX4(a, b, c, d) MAX((MAX((a), (b))), (MAX((c), (d)))) +#define MIN4(a, b, c, d) MIN((MIN((a), (b))), (MIN((c), (d)))) + +/* min and max should not be macros as they will evaluate their arguments twice. + if you really need a macro (e.g. for CPP or for initializing an array) + use MIN() and MAX(), otherwise use min() and max() */ + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(a) ((sizeof(a) / sizeof(*(a)))) +#endif + +#ifndef BYTES +#define BYTES(bit) (((bit)+7)/8) +#endif + +#if !defined(PIPE_GENERATION) +STORAGE_CLASS_INLINE unsigned int max_value_bits(unsigned int bits) +{ + return (bits == 0) ? 0 : ((2 * ((1 << ((bits) - 1)) - 1)) + 1); +} +STORAGE_CLASS_INLINE unsigned int max_value_bytes(unsigned int bytes) +{ + return max_value_bits(IA_CSS_UINT8_T_BITS * bytes); +} +STORAGE_CLASS_INLINE int max(int a, int b) +{ + return MAX(a, b); +} + +STORAGE_CLASS_INLINE int min(int a, int b) +{ + return MIN(a, b); +} + +STORAGE_CLASS_INLINE int clip(int a, int b, int c) +{ + return min(max(a, b), c); +} + +STORAGE_CLASS_INLINE unsigned int umax(unsigned int a, unsigned int b) +{ + return MAX(a, b); +} + +STORAGE_CLASS_INLINE unsigned int umin(unsigned int a, unsigned int b) +{ + return MIN(a, b); +} + +STORAGE_CLASS_INLINE unsigned int uclip(unsigned int a, unsigned int b, + unsigned int c) +{ + return umin(umax(a, b), c); +} + +STORAGE_CLASS_INLINE unsigned int ceil_div(unsigned int a, unsigned int b) +{ + return CEIL_DIV(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_mul(unsigned int a, unsigned int b) +{ + return CEIL_MUL(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_mul2(unsigned int a, unsigned int b) +{ + return CEIL_MUL2(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_shift(unsigned int a, unsigned int b) +{ + return CEIL_SHIFT(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_shift_mul(unsigned int a, unsigned int b) +{ + return CEIL_SHIFT_MUL(a, b); +} + +STORAGE_CLASS_INLINE int abs_dif(int a, int b) +{ + return ABS_DIF(a, b); +} + +STORAGE_CLASS_INLINE unsigned int uabs_dif(unsigned int a, unsigned int b) +{ + return ABS_DIF(a, b); +} + +STORAGE_CLASS_INLINE unsigned int round_half_down_div(unsigned int a, + unsigned int b) +{ + return ROUND_HALF_DOWN_DIV(a, b); +} + +STORAGE_CLASS_INLINE unsigned int round_half_down_mul(unsigned int a, + unsigned int b) +{ + return ROUND_HALF_DOWN_MUL(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_pow2(uint32_t a) +{ + unsigned int retval = 0; + + if (IS_POWER2(a)) { + retval = (unsigned int)a; + } else { + unsigned int v = a; + + v |= v>>1; + v |= v>>2; + v |= v>>4; + v |= v>>8; + v |= v>>16; + retval = (unsigned int)(v+1); + } + return retval; +} + +STORAGE_CLASS_INLINE unsigned int floor_log2(uint32_t a) +{ + static const uint8_t de_bruijn[] = { + 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, + 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 + }; + uint32_t v = a; + + v |= v>>1; + v |= v>>2; + v |= v>>4; + v |= v>>8; + v |= v>>16; + return (unsigned int)de_bruijn[(v*0x07C4ACDDU)>>27]; +} + +/* Divide by small power of two */ +STORAGE_CLASS_INLINE unsigned int +udiv2_small_i(uint32_t a, uint32_t b) +{ + assert(b <= 2); + return a >> (b-1); +} + +/* optimized divide for small results + * a will be divided by b + * outbits is the number of bits needed for the result + * the smaller the cheaper the function will be. + * if the result doesn't fit in the number of output bits + * the result is incorrect and the function will assert + */ +STORAGE_CLASS_INLINE unsigned int +udiv_medium(uint32_t a, uint32_t b, unsigned outbits) +{ + int bit; + unsigned res = 0; + unsigned mask; + +#ifdef VOLCANO +#pragma ipu unroll +#endif + for (bit = outbits-1 ; bit >= 0; bit--) { + mask = 1<= (b<= c ? a+b-c : a+b); +} + +/* + * For SP and ISP, SDK provides the definition of OP_asp_slor. + * We need it only for host + */ +STORAGE_CLASS_INLINE unsigned int OP_asp_slor(int a, int b, int c) +{ + return ((a << c) | b); +} +#else +#include "hive/customops.h" +#endif /* !defined(__VIED_CELL) */ + +#endif /* !defined(PIPE_GENERATION) */ + +#if !defined(__KERNEL__) +#define clamp(a, min_val, max_val) MIN(MAX((a), (min_val)), (max_val)) +#endif /* !defined(__KERNEL__) */ + +#endif /* __MATH_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/misc_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/misc_support.h new file mode 100644 index 0000000000000..a2c2729e946d2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/misc_support.h @@ -0,0 +1,76 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __MISC_SUPPORT_H +#define __MISC_SUPPORT_H + +/* suppress compiler warnings on unused variables */ +#ifndef NOT_USED +#define NOT_USED(a) ((void)(a)) +#endif + +/* Calculate the total bytes for pow(2) byte alignment */ +#define tot_bytes_for_pow2_align(pow2, cur_bytes) \ + ((cur_bytes + (pow2 - 1)) & ~(pow2 - 1)) + +/* Display the macro value given a string */ +#define _STR(x) #x +#define STR(x) _STR(x) + +/* Concatenate */ +#ifndef CAT /* also defined in */ +#define _CAT(a, b) a ## b +#define CAT(a, b) _CAT(a, b) +#endif + +#define _CAT3(a, b, c) a ## b ## c +#define CAT3(a, b, c) _CAT3(a, b, c) + +/* NO_HOIST, NO_CSE, NO_ALIAS attributes must be ignored for host code */ +#ifndef __HIVECC +#ifndef NO_HOIST +#define NO_HOIST +#endif +#ifndef NO_CSE +#define NO_CSE +#endif +#ifndef NO_ALIAS +#define NO_ALIAS +#endif +#endif + +enum hive_method_id { + HIVE_METHOD_ID_CRUN, + HIVE_METHOD_ID_UNSCHED, + HIVE_METHOD_ID_SCHED, + HIVE_METHOD_ID_TARGET +}; + +/* Derive METHOD */ +#if defined(C_RUN) + #define HIVE_METHOD "crun" + #define HIVE_METHOD_ID HIVE_METHOD_ID_CRUN +#elif defined(HRT_UNSCHED) + #define HIVE_METHOD "unsched" + #define HIVE_METHOD_ID HIVE_METHOD_ID_UNSCHED +#elif defined(HRT_SCHED) + #define HIVE_METHOD "sched" + #define HIVE_METHOD_ID HIVE_METHOD_ID_SCHED +#else + #define HIVE_METHOD "target" + #define HIVE_METHOD_ID HIVE_METHOD_ID_TARGET + #define HRT_TARGET 1 +#endif + +#endif /* __MISC_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/platform_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/platform_support.h new file mode 100644 index 0000000000000..1752efc7b4df8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/platform_support.h @@ -0,0 +1,146 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PLATFORM_SUPPORT_H +#define __PLATFORM_SUPPORT_H + +#include "storage_class.h" + +#define MSEC_IN_SEC 1000 +#define NSEC_IN_MSEC 1000000 + +#if defined(_MSC_VER) +#include + +#define IA_CSS_EXTERN +#define SYNC_WITH(x) +#define CSS_ALIGN(d, a) _declspec(align(a)) d + +STORAGE_CLASS_INLINE void ia_css_sleep(void) +{ + /* Placeholder for driver team*/ +} + +STORAGE_CLASS_INLINE void ia_css_sleep_msec(long unsigned int delay_time_ms) +{ + /* Placeholder for driver team*/ + (void)delay_time_ms; +} + +#elif defined(__HIVECC) +#include +#include + +#define IA_CSS_EXTERN extern +#define CSS_ALIGN(d, a) d __attribute__((aligned(a))) +STORAGE_CLASS_INLINE void ia_css_sleep(void) +{ + OP___schedule(); +} + +#elif defined(__KERNEL__) +#include +#include + +#define IA_CSS_EXTERN +#define CSS_ALIGN(d, a) d __aligned(a) + +STORAGE_CLASS_INLINE void ia_css_sleep(void) +{ + usleep_range(1, 50); +} + +#elif defined(__GNUC__) +#include + +#define IA_CSS_EXTERN +#define CSS_ALIGN(d, a) d __attribute__((aligned(a))) + +/* Define some __HIVECC specific macros to nothing to allow host code compilation */ +#ifndef NO_ALIAS +#define NO_ALIAS +#endif + +#ifndef SYNC_WITH +#define SYNC_WITH(x) +#endif + +#if defined(HRT_CSIM) + #include "hrt/host.h" /* Using hrt_sleep from hrt/host.h */ + STORAGE_CLASS_INLINE void ia_css_sleep(void) + { + /* For the SDK still using hrt_sleep */ + hrt_sleep(); + } + STORAGE_CLASS_INLINE void ia_css_sleep_msec(long unsigned int delay_time_ms) + { + /* For the SDK still using hrt_sleep */ + long unsigned int i = 0; + for (i = 0; i < delay_time_ms; i++) { + hrt_sleep(); + } + } +#else + #include + STORAGE_CLASS_INLINE void ia_css_sleep(void) + { + struct timespec delay_time; + + delay_time.tv_sec = 0; + delay_time.tv_nsec = 10; + nanosleep(&delay_time, NULL); + } + STORAGE_CLASS_INLINE void ia_css_sleep_msec(long unsigned int delay_time_ms) + { + struct timespec delay_time; + + if (delay_time_ms >= MSEC_IN_SEC) { + delay_time.tv_sec = delay_time_ms / MSEC_IN_SEC; + delay_time.tv_nsec = (delay_time_ms % MSEC_IN_SEC) * NSEC_IN_MSEC; + } else { + delay_time.tv_sec = 0; + delay_time.tv_nsec = delay_time_ms * NSEC_IN_MSEC; + } + nanosleep(&delay_time, NULL); + } +#endif + +#else +#include +#endif + +/*needed for the include in stdint.h for various environments */ +#include "type_support.h" +#include "storage_class.h" + +#define MAX_ALIGNMENT 8 +#define aligned_uint8(type, obj) CSS_ALIGN(uint8_t obj, 1) +#define aligned_int8(type, obj) CSS_ALIGN(int8_t obj, 1) +#define aligned_uint16(type, obj) CSS_ALIGN(uint16_t obj, 2) +#define aligned_int16(type, obj) CSS_ALIGN(int16_t obj, 2) +#define aligned_uint32(type, obj) CSS_ALIGN(uint32_t obj, 4) +#define aligned_int32(type, obj) CSS_ALIGN(int32_t obj, 4) + +/* needed as long as hivecc does not define the type (u)int64_t */ +#if defined(__HIVECC) +#define aligned_uint64(type, obj) CSS_ALIGN(unsigned long long obj, 8) +#define aligned_int64(type, obj) CSS_ALIGN(signed long long obj, 8) +#else +#define aligned_uint64(type, obj) CSS_ALIGN(uint64_t obj, 8) +#define aligned_int64(type, obj) CSS_ALIGN(int64_t obj, 8) +#endif +#define aligned_enum(enum_type, obj) CSS_ALIGN(uint32_t obj, 4) +#define aligned_struct(struct_type, obj) struct_type obj + +#endif /* __PLATFORM_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/print_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/print_support.h new file mode 100644 index 0000000000000..0b614f7ef12d8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/print_support.h @@ -0,0 +1,90 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PRINT_SUPPORT_H +#define __PRINT_SUPPORT_H + +#if defined(_MSC_VER) +#ifdef _KERNEL_MODE + +/* TODO: Windows driver team to provide tracing mechanism for kernel mode + * e.g. DbgPrint and DbgPrintEx + */ +extern void FwTracePrintPWARN(const char *fmt, ...); +extern void FwTracePrintPRINT(const char *fmt, ...); +extern void FwTracePrintPERROR(const char *fmt, ...); +extern void FwTracePrintPDEBUG(const char *fmt, ...); + +#define PWARN(format, ...) FwTracePrintPWARN(format, __VA_ARGS__) +#define PRINT(format, ...) FwTracePrintPRINT(format, __VA_ARGS__) +#define PERROR(format, ...) FwTracePrintPERROR(format, __VA_ARGS__) +#define PDEBUG(format, ...) FwTracePrintPDEBUG(format, __VA_ARGS__) + +#else +/* Windows usermode compilation */ +#include + +/* To change the defines below, communicate with Windows team first + * to ensure they will not get flooded with prints + */ +/* This is temporary workaround to avoid flooding userspace + * Windows driver with prints + */ + +#define PWARN(format, ...) +#define PRINT(format, ...) +#define PERROR(format, ...) printf("error: " format, __VA_ARGS__) +#define PDEBUG(format, ...) + +#endif /* _KERNEL_MODE */ + +#elif defined(__HIVECC) +#include +/* To be revised + +#define PWARN(format) +#define PRINT(format) OP___printstring(format) +#define PERROR(variable) OP___dump(9999, arguments) +#define PDEBUG(variable) OP___dump(__LINE__, arguments) + +*/ + +#define PRINTSTRING(str) OP___printstring(str) + +#elif defined(__KERNEL__) +#include +#include + + +#define PWARN(format, arguments...) pr_debug(format, ##arguments) +#define PRINT(format, arguments...) pr_debug(format, ##arguments) +#define PERROR(format, arguments...) pr_debug(format, ##arguments) +#define PDEBUG(format, arguments...) pr_debug(format, ##arguments) + +#else +#include + +#define PRINT_HELPER(prefix, format, ...) printf(prefix format "%s", __VA_ARGS__) + +/* The trailing "" allows the edge case of printing single string */ +#define PWARN(...) PRINT_HELPER("warning: ", __VA_ARGS__, "") +#define PRINT(...) PRINT_HELPER("", __VA_ARGS__, "") +#define PERROR(...) PRINT_HELPER("error: ", __VA_ARGS__, "") +#define PDEBUG(...) PRINT_HELPER("debug: ", __VA_ARGS__, "") + +#define PRINTSTRING(str) PRINT(str) + +#endif + +#endif /* __PRINT_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/storage_class.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/storage_class.h new file mode 100644 index 0000000000000..af19b4026220a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/storage_class.h @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __STORAGE_CLASS_H +#define __STORAGE_CLASS_H + +#define STORAGE_CLASS_EXTERN \ +extern + +#if defined(_MSC_VER) +#define STORAGE_CLASS_INLINE \ +static __inline +#elif defined(__HIVECC) +#define STORAGE_CLASS_INLINE \ +static inline +#else +#define STORAGE_CLASS_INLINE \ +static inline +#endif + +/* Register struct */ +#ifndef __register +#if defined(__HIVECC) && !defined(PIPE_GENERATION) +#define __register register +#else +#define __register +#endif +#endif + +/* Memory attribute */ +#ifndef MEM +#ifdef PIPE_GENERATION +#elif defined(__HIVECC) +#include +#else +#define MEM(any_mem) +#endif +#endif + +#endif /* __STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/type_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/type_support.h new file mode 100644 index 0000000000000..a86da0e78941c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/type_support.h @@ -0,0 +1,80 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __TYPE_SUPPORT_H +#define __TYPE_SUPPORT_H + +/* Per the DLI spec, types are in "type_support.h" and + * "platform_support.h" is for unclassified/to be refactored + * platform specific definitions. + */ +#define IA_CSS_UINT8_T_BITS 8 +#define IA_CSS_UINT16_T_BITS 16 +#define IA_CSS_UINT32_T_BITS 32 +#define IA_CSS_INT32_T_BITS 32 +#define IA_CSS_UINT64_T_BITS 64 + + +#if defined(_MSC_VER) +#include +#include +#include +#include +#if defined(_M_X64) +#define HOST_ADDRESS(x) (unsigned long long)(x) +#else +#define HOST_ADDRESS(x) (unsigned long)(x) +#endif + +#elif defined(PARAM_GENERATION) +/* Nothing */ +#elif defined(__HIVECC) +#include +#include +#include +#include +#define HOST_ADDRESS(x) (unsigned long)(x) + +typedef long long int64_t; +typedef unsigned long long uint64_t; + +#elif defined(__KERNEL__) +#include +#include + +#define CHAR_BIT (8) +#define HOST_ADDRESS(x) (unsigned long)(x) + +#elif defined(__GNUC__) +#include +#include +#include +#include +#define HOST_ADDRESS(x) (unsigned long)(x) + +#else /* default is for the FIST environment */ +#include +#include +#include +#include +#define HOST_ADDRESS(x) (unsigned long)(x) + +#endif + +#if !defined(PIPE_GENERATION) && !defined(IO_GENERATION) +/* genpipe cannot handle the void* syntax */ +typedef void *HANDLE; +#endif + +#endif /* __TYPE_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/interface/ia_css_syscom.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/interface/ia_css_syscom.h new file mode 100644 index 0000000000000..5426d6d18e0bd --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/interface/ia_css_syscom.h @@ -0,0 +1,247 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_H +#define __IA_CSS_SYSCOM_H + + +/* + * The CSS Subsystem Communication Interface - Host side + * + * It provides subsystem initialzation, send ports and receive ports + * The PSYS and ISYS interfaces are implemented on top of this interface. + */ + +#include "ia_css_syscom_config.h" + +#define FW_ERROR_INVALID_PARAMETER (-1) +#define FW_ERROR_BAD_ADDRESS (-2) +#define FW_ERROR_BUSY (-3) +#define FW_ERROR_NO_MEMORY (-4) + +struct ia_css_syscom_context; + +/** + * ia_css_syscom_size() - provide syscom external buffer requirements + * @config: pointer to the configuration data (read) + * @size: pointer to the buffer size (write) + * + * Purpose: + * - Provide external buffer requirements + * - To be used for external buffer allocation + * + */ +extern void +ia_css_syscom_size( + const struct ia_css_syscom_config *cfg, + struct ia_css_syscom_size *size +); + +/** + * ia_css_syscom_open() - initialize a subsystem context + * @config: pointer to the configuration data (read) + * @buf: pointer to externally allocated buffers (read) + * @returns: struct ia_css_syscom_context* on success, 0 otherwise. + * + * Purpose: + * - initialize host side data structures + * - boot the subsystem? + * + */ +extern struct ia_css_syscom_context* +ia_css_syscom_open( + struct ia_css_syscom_config *config, + struct ia_css_syscom_buf *buf +); + +/** + * ia_css_syscom_close() - signal close to cell + * @context: pointer to the subsystem context + * @returns: 0 on success, -2 (FW_ERROR_BUSY) if SPC is not ready yet. + * + * Purpose: + * Request from the Cell to terminate + */ +extern int +ia_css_syscom_close( + struct ia_css_syscom_context *context +); + +/** + * ia_css_syscom_release() - free context + * @context: pointer to the subsystem context + * @force: flag which specifies whether cell + * state will be checked before freeing the + * context. + * @returns: 0 on success, -2 (FW_ERROR_BUSY) if cell + * is busy and call was not forced. + * + * Purpose: + * 2 modes, with first (force==true) immediately + * free context, and second (force==false) verifying + * that the cell state is ok and freeing context if so, + * returning error otherwise. + */ +extern int +ia_css_syscom_release( + struct ia_css_syscom_context *context, + unsigned int force +); + +/** + * Open a port for sending tokens to the subsystem + * @context: pointer to the subsystem context + * @port: send port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_open( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Closes a port for sending tokens to the subsystem + * @context: pointer to the subsystem context + * @port: send port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_close( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Get the number of tokens that can be sent to a port without error. + * @context: pointer to the subsystem context + * @port: send port index + * @returns: number of available tokens on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_available( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Send a token to the subsystem port + * The token size is determined during initialization + * @context: pointer to the subsystem context + * @port: send port index + * @token: pointer to the token value that is transferred to the subsystem + * @returns: number of tokens sent on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_transfer( + struct ia_css_syscom_context *context, + unsigned int port, + const void *token +); + +/** + * Open a port for receiving tokens to the subsystem + * @context: pointer to the subsystem context + * @port: receive port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_open( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Closes a port for receiving tokens to the subsystem + * Returns 0 on success, otherwise negative value of error code + * @context: pointer to the subsystem context + * @port: receive port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_close( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Get the number of tokens that can be received from a port without errors. + * @context: pointer to the subsystem context + * @port: receive port index + * @returns: number of available tokens on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_available( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Receive a token from the subsystem port + * The token size is determined during initialization + * @context: pointer to the subsystem context + * @port: receive port index + * @token (output): pointer to (space for) the token to be received + * @returns: number of tokens received on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_transfer( + struct ia_css_syscom_context *context, + unsigned int port, + void *token +); + +#if HAS_DUAL_CMD_CTX_SUPPORT +/** + * ia_css_syscom_store_dmem() - store subsystem context information in DMEM + * @context: pointer to the subsystem context + * @ssid: subsystem id + * @vtl0_addr_mask: VTL0 address mask; only applicable when the passed in context is secure + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_store_dmem( + struct ia_css_syscom_context *context, + unsigned int ssid, + unsigned int vtl0_addr_mask +); + +/** + * ia_css_syscom_set_trustlet_status() - store truslet configuration setting + * @context: pointer to the subsystem context + * @trustlet_exist: 1 if trustlet exists + */ +extern void +ia_css_syscom_set_trustlet_status( + unsigned int dmem_addr, + unsigned int ssid, + bool trustlet_exist +); + +/** + * ia_css_syscom_is_ab_spc_ready() - check if SPC access blocker programming is completed + * @context: pointer to the subsystem context + * @returns: 1 when status is ready. 0 otherwise + */ +bool +ia_css_syscom_is_ab_spc_ready( + struct ia_css_syscom_context *ctx +); +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ + +#endif /* __IA_CSS_SYSCOM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/interface/ia_css_syscom_config.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/interface/ia_css_syscom_config.h new file mode 100644 index 0000000000000..2f5eb309df94e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/interface/ia_css_syscom_config.h @@ -0,0 +1,97 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_CONFIG_H +#define __IA_CSS_SYSCOM_CONFIG_H + +#include +#include + +/* syscom size struct, output of ia_css_syscom_size, + * input for (external) allocation + */ +struct ia_css_syscom_size { + /* Size of host buffer */ + unsigned int cpu; + /* Size of shared config buffer (host to cell) */ + unsigned int shm; + /* Size of shared input queue buffers (host to cell) */ + unsigned int ibuf; + /* Size of shared output queue buffers (cell to host) */ + unsigned int obuf; +}; + +/* syscom buffer struct, output of (external) allocation, + * input for ia_css_syscom_open + */ +struct ia_css_syscom_buf { + char *cpu; /* host buffer */ + + /* shared memory buffer host address */ + host_virtual_address_t shm_host; + /* shared memory buffer cell address */ + vied_virtual_address_t shm_cell; + + /* input queue shared buffer host address */ + host_virtual_address_t ibuf_host; + /* input queue shared buffer cell address */ + vied_virtual_address_t ibuf_cell; + + /* output queue shared buffer host address */ + host_virtual_address_t obuf_host; + /* output queue shared buffer cell address */ + vied_virtual_address_t obuf_cell; +}; + +struct ia_css_syscom_queue_config { + unsigned int queue_size; /* tokens per queue */ + unsigned int token_size; /* bytes per token */ +}; + +/** + * Parameter struct for ia_css_syscom_open + */ +struct ia_css_syscom_config { + /* This member in no longer used in syscom. + It is kept to not break any driver builds, and will be removed when + all assignments have been removed from driver code */ + /* address of firmware in DDR/IMR */ + unsigned long long host_firmware_address; + + /* address of firmware in DDR, seen from SPC */ + unsigned int vied_firmware_address; + + unsigned int ssid; + unsigned int mmid; + + unsigned int num_input_queues; + unsigned int num_output_queues; + struct ia_css_syscom_queue_config *input; + struct ia_css_syscom_queue_config *output; + + unsigned int regs_addr; + unsigned int dmem_addr; + + /* firmware-specific configuration data */ + void *specific_addr; + unsigned int specific_size; + + /* if true; secure syscom in VTIO Case + * if false, non-secure syscom + */ + bool secure; + unsigned int vtl0_addr_mask; /* only applicable in 'secure' case */ +}; + +#endif /* __IA_CSS_SYSCOM_CONFIG_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/interface/ia_css_syscom_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/interface/ia_css_syscom_trace.h new file mode 100644 index 0000000000000..2c32693c2a82e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/interface/ia_css_syscom_trace.h @@ -0,0 +1,51 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IA_CSS_SYSCOM_TRACE_H +#define __IA_CSS_SYSCOM_TRACE_H + +#include "ia_css_trace.h" + +#define SYSCOM_TRACE_LEVEL_DEFAULT 1 +#define SYSCOM_TRACE_LEVEL_DEBUG 2 + +/* Set to default level if no level is defined */ +#ifndef SYSCOM_TRACE_LEVEL +#define SYSCOM_TRACE_LEVEL SYSCOM_TRACE_LEVEL_DEFAULT +#endif /* SYSCOM_TRACE_LEVEL */ + +/* SYSCOM Module tracing backend is mapped to TUNIT tracing for target platforms */ +#ifdef __HIVECC +# ifndef HRT_CSIM +# define SYSCOM_TRACE_METHOD IA_CSS_TRACE_METHOD_TRACE +# else +# define SYSCOM_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +# endif +#else +# define SYSCOM_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +#endif + +#define SYSCOM_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED +#define SYSCOM_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_ENABLED +#define SYSCOM_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + +#if (SYSCOM_TRACE_LEVEL == SYSCOM_TRACE_LEVEL_DEFAULT) +# define SYSCOM_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED +#elif (SYSCOM_TRACE_LEVEL == SYSCOM_TRACE_LEVEL_DEBUG) +# define SYSCOM_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_ENABLED +#else +# error "Connection manager trace level not defined!" +#endif /* SYSCOM_TRACE_LEVEL */ + +#endif /* __IA_CSS_SYSCOM_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/src/ia_css_syscom.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/src/ia_css_syscom.c new file mode 100644 index 0000000000000..cdf9df0531ff0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/src/ia_css_syscom.c @@ -0,0 +1,650 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_syscom.h" + +#include "ia_css_syscom_context.h" +#include "ia_css_syscom_config_fw.h" +#include "ia_css_syscom_trace.h" + +#include "queue.h" +#include "send_port.h" +#include "recv_port.h" +#include "regmem_access.h" + +#include "error_support.h" +#include "cpu_mem_support.h" + +#include "queue_struct.h" +#include "send_port_struct.h" +#include "recv_port_struct.h" + +#include "type_support.h" +#include +#include +#include "platform_support.h" + +#include "ia_css_cell.h" + +/* struct of internal buffer sizes */ +struct ia_css_syscom_size_intern { + unsigned int context; + unsigned int input_queue; + unsigned int output_queue; + unsigned int input_port; + unsigned int output_port; + + unsigned int fw_config; + unsigned int specific; + + unsigned int input_buffer; + unsigned int output_buffer; +}; + +/* Allocate buffers internally, when no buffers are provided */ +static int +ia_css_syscom_alloc( + unsigned int ssid, + unsigned int mmid, + const struct ia_css_syscom_size *size, + struct ia_css_syscom_buf *buf) +{ + /* zero the buffer to set all pointers to zero */ + memset(buf, 0, sizeof(*buf)); + + /* allocate cpu_mem */ + buf->cpu = (char *)ia_css_cpu_mem_alloc(size->cpu); + if (!buf->cpu) + goto EXIT7; + + /* allocate and map shared config buffer */ + buf->shm_host = shared_memory_alloc(mmid, size->shm); + if (!buf->shm_host) + goto EXIT6; + buf->shm_cell = shared_memory_map(ssid, mmid, buf->shm_host); + if (!buf->shm_cell) + goto EXIT5; + + /* allocate and map input queue buffer */ + buf->ibuf_host = shared_memory_alloc(mmid, size->ibuf); + if (!buf->ibuf_host) + goto EXIT4; + buf->ibuf_cell = shared_memory_map(ssid, mmid, buf->ibuf_host); + if (!buf->ibuf_cell) + goto EXIT3; + + /* allocate and map output queue buffer */ + buf->obuf_host = shared_memory_alloc(mmid, size->obuf); + if (!buf->obuf_host) + goto EXIT2; + buf->obuf_cell = shared_memory_map(ssid, mmid, buf->obuf_host); + if (!buf->obuf_cell) + goto EXIT1; + + return 0; + +EXIT1: shared_memory_free(mmid, buf->obuf_host); +EXIT2: shared_memory_unmap(ssid, mmid, buf->ibuf_cell); +EXIT3: shared_memory_free(mmid, buf->ibuf_host); +EXIT4: shared_memory_unmap(ssid, mmid, buf->shm_cell); +EXIT5: shared_memory_free(mmid, buf->shm_host); +EXIT6: ia_css_cpu_mem_free(buf->cpu); +EXIT7: return FW_ERROR_NO_MEMORY; +} + +static void +ia_css_syscom_size_intern( + const struct ia_css_syscom_config *cfg, + struct ia_css_syscom_size_intern *size) +{ + /* convert syscom config into syscom internal size struct */ + + unsigned int i; + + size->context = sizeof(struct ia_css_syscom_context); + size->input_queue = cfg->num_input_queues * sizeof(struct sys_queue); + size->output_queue = cfg->num_output_queues * sizeof(struct sys_queue); + size->input_port = cfg->num_input_queues * sizeof(struct send_port); + size->output_port = cfg->num_output_queues * sizeof(struct recv_port); + + size->fw_config = sizeof(struct ia_css_syscom_config_fw); + size->specific = cfg->specific_size; + + /* accumulate input queue buffer sizes */ + size->input_buffer = 0; + for (i = 0; i < cfg->num_input_queues; i++) { + size->input_buffer += + sys_queue_buf_size(cfg->input[i].queue_size, + cfg->input[i].token_size); + } + + /* accumulate outut queue buffer sizes */ + size->output_buffer = 0; + for (i = 0; i < cfg->num_output_queues; i++) { + size->output_buffer += + sys_queue_buf_size(cfg->output[i].queue_size, + cfg->output[i].token_size); + } +} + +static void +ia_css_syscom_size_extern( + const struct ia_css_syscom_size_intern *i, + struct ia_css_syscom_size *e) +{ + /* convert syscom internal size struct into external size struct */ + + e->cpu = i->context + i->input_queue + i->output_queue + + i->input_port + i->output_port; + e->shm = i->fw_config + i->input_queue + i->output_queue + i->specific; + e->ibuf = i->input_buffer; + e->obuf = i->output_buffer; +} + +/* Function that provides buffer sizes to be allocated */ +void +ia_css_syscom_size( + const struct ia_css_syscom_config *cfg, + struct ia_css_syscom_size *size) +{ + struct ia_css_syscom_size_intern i; + + ia_css_syscom_size_intern(cfg, &i); + ia_css_syscom_size_extern(&i, size); +} + +static struct ia_css_syscom_context* +ia_css_syscom_assign_buf( + const struct ia_css_syscom_size_intern *i, + const struct ia_css_syscom_buf *buf) +{ + struct ia_css_syscom_context *ctx; + char *cpu_mem_buf; + host_virtual_address_t shm_buf_host; + vied_virtual_address_t shm_buf_cell; + + /* host context */ + cpu_mem_buf = buf->cpu; + + ctx = (struct ia_css_syscom_context *)cpu_mem_buf; + ia_css_cpu_mem_set_zero(ctx, i->context); + cpu_mem_buf += i->context; + + ctx->input_queue = (struct sys_queue *) cpu_mem_buf; + cpu_mem_buf += i->input_queue; + + ctx->output_queue = (struct sys_queue *) cpu_mem_buf; + cpu_mem_buf += i->output_queue; + + ctx->send_port = (struct send_port *) cpu_mem_buf; + cpu_mem_buf += i->input_port; + + ctx->recv_port = (struct recv_port *) cpu_mem_buf; + + + /* cell config */ + shm_buf_host = buf->shm_host; + shm_buf_cell = buf->shm_cell; + + ctx->config_host_addr = shm_buf_host; + shm_buf_host += i->fw_config; + ctx->config_vied_addr = shm_buf_cell; + shm_buf_cell += i->fw_config; + + ctx->input_queue_host_addr = shm_buf_host; + shm_buf_host += i->input_queue; + ctx->input_queue_vied_addr = shm_buf_cell; + shm_buf_cell += i->input_queue; + + ctx->output_queue_host_addr = shm_buf_host; + shm_buf_host += i->output_queue; + ctx->output_queue_vied_addr = shm_buf_cell; + shm_buf_cell += i->output_queue; + + ctx->specific_host_addr = shm_buf_host; + ctx->specific_vied_addr = shm_buf_cell; + + ctx->ibuf_host_addr = buf->ibuf_host; + ctx->ibuf_vied_addr = buf->ibuf_cell; + + ctx->obuf_host_addr = buf->obuf_host; + ctx->obuf_vied_addr = buf->obuf_cell; + + return ctx; +} + +struct ia_css_syscom_context* +ia_css_syscom_open( + struct ia_css_syscom_config *cfg, + struct ia_css_syscom_buf *buf_extern +) +{ + struct ia_css_syscom_size_intern size_intern; + struct ia_css_syscom_size size; + struct ia_css_syscom_buf buf_intern; + struct ia_css_syscom_buf *buf; + struct ia_css_syscom_context *ctx; + struct ia_css_syscom_config_fw fw_cfg; + unsigned int i; + struct sys_queue_res res; + + IA_CSS_TRACE_0(SYSCOM, INFO, "Entered: ia_css_syscom_open\n"); + + /* error handling */ + if (cfg == NULL) + return NULL; + + IA_CSS_TRACE_1(SYSCOM, INFO, "ia_css_syscom_open (secure %d) start\n", cfg->secure); + + /* check members of cfg: TBD */ + + /* + * Check if SP is in valid state, have to wait if not ready. + * In some platform (Such as VP), it will need more time to wait due to system performance; + * If return NULL without wait for SPC0 ready, Driver load FW will failed + */ + ia_css_cell_wait(cfg->ssid, SPC0); + + ia_css_syscom_size_intern(cfg, &size_intern); + ia_css_syscom_size_extern(&size_intern, &size); + + if (buf_extern) { + /* use externally allocated buffers */ + buf = buf_extern; + } else { + /* use internally allocated buffers */ + buf = &buf_intern; + if (ia_css_syscom_alloc(cfg->ssid, cfg->mmid, &size, buf) != 0) + return NULL; + } + + /* assign buffer pointers */ + ctx = ia_css_syscom_assign_buf(&size_intern, buf); + /* only need to free internally allocated buffers */ + ctx->free_buf = !buf_extern; + + ctx->cell_regs_addr = cfg->regs_addr; + /* regmem is at cell_dmem_addr + REGMEM_OFFSET */ + ctx->cell_dmem_addr = cfg->dmem_addr; + + ctx->num_input_queues = cfg->num_input_queues; + ctx->num_output_queues = cfg->num_output_queues; + + ctx->env.mmid = cfg->mmid; + ctx->env.ssid = cfg->ssid; + ctx->env.mem_addr = cfg->dmem_addr; + + ctx->regmem_idx = SYSCOM_QPR_BASE_REG; + + /* initialize input queues */ + res.reg = SYSCOM_QPR_BASE_REG; + res.host_address = ctx->ibuf_host_addr; + res.vied_address = ctx->ibuf_vied_addr; + for (i = 0; i < cfg->num_input_queues; i++) { + sys_queue_init(ctx->input_queue + i, + cfg->input[i].queue_size, + cfg->input[i].token_size, &res); + } + + /* initialize output queues */ + res.host_address = ctx->obuf_host_addr; + res.vied_address = ctx->obuf_vied_addr; + for (i = 0; i < cfg->num_output_queues; i++) { + sys_queue_init(ctx->output_queue + i, + cfg->output[i].queue_size, + cfg->output[i].token_size, &res); + } + + /* fill shared queue structs */ + shared_memory_store(cfg->mmid, ctx->input_queue_host_addr, + ctx->input_queue, + cfg->num_input_queues * sizeof(struct sys_queue)); + ia_css_cpu_mem_cache_flush( + (void *)HOST_ADDRESS(ctx->input_queue_host_addr), + cfg->num_input_queues * sizeof(struct sys_queue)); + shared_memory_store(cfg->mmid, ctx->output_queue_host_addr, + ctx->output_queue, + cfg->num_output_queues * sizeof(struct sys_queue)); + ia_css_cpu_mem_cache_flush( + (void *)HOST_ADDRESS(ctx->output_queue_host_addr), + cfg->num_output_queues * sizeof(struct sys_queue)); + + /* Zero the queue buffers. Is this really needed? */ + shared_memory_zero(cfg->mmid, buf->ibuf_host, size.ibuf); + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(buf->ibuf_host), + size.ibuf); + shared_memory_zero(cfg->mmid, buf->obuf_host, size.obuf); + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(buf->obuf_host), + size.obuf); + + /* copy firmware specific data */ + if (cfg->specific_addr && cfg->specific_size) { + shared_memory_store(cfg->mmid, ctx->specific_host_addr, + cfg->specific_addr, cfg->specific_size); + ia_css_cpu_mem_cache_flush( + (void *)HOST_ADDRESS(ctx->specific_host_addr), + cfg->specific_size); + } + + fw_cfg.num_input_queues = cfg->num_input_queues; + fw_cfg.num_output_queues = cfg->num_output_queues; + fw_cfg.input_queue = ctx->input_queue_vied_addr; + fw_cfg.output_queue = ctx->output_queue_vied_addr; + fw_cfg.specific_addr = ctx->specific_vied_addr; + fw_cfg.specific_size = cfg->specific_size; + + shared_memory_store(cfg->mmid, ctx->config_host_addr, + &fw_cfg, sizeof(struct ia_css_syscom_config_fw)); + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(ctx->config_host_addr), + sizeof(struct ia_css_syscom_config_fw)); + +#if !HAS_DUAL_CMD_CTX_SUPPORT + /* store syscom uninitialized state */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_open store STATE_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_STATE_UNINIT, ctx->cell_dmem_addr, cfg->ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + SYSCOM_STATE_UNINIT, cfg->ssid); + /* store syscom uninitialized command */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_open store COMMAND_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_COMMAND_UNINIT, ctx->cell_dmem_addr, cfg->ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_COMMAND_REG, + SYSCOM_COMMAND_UNINIT, cfg->ssid); + /* store firmware configuration address */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_open store CONFIG_REG (%#x) @ dmem_addr %#x ssid %d\n", + ctx->config_vied_addr, ctx->cell_dmem_addr, cfg->ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_CONFIG_REG, + ctx->config_vied_addr, cfg->ssid); +#endif + + /* Indicate if ctx is created for secure stream purpose */ + ctx->secure = cfg->secure; + + IA_CSS_TRACE_1(SYSCOM, INFO, "ia_css_syscom_open (secure %d) completed\n", cfg->secure); + return ctx; +} + + +int +ia_css_syscom_close( + struct ia_css_syscom_context *ctx +) { + int state; + + state = regmem_load_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + ctx->env.ssid); + if (state != SYSCOM_STATE_READY) { + /* SPC is not ready to handle close request yet */ + return FW_ERROR_BUSY; + } + + /* set close request flag */ + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_COMMAND_REG, + SYSCOM_COMMAND_INACTIVE, ctx->env.ssid); + + return 0; +} + +static void +ia_css_syscom_free(struct ia_css_syscom_context *ctx) +{ + shared_memory_unmap(ctx->env.ssid, ctx->env.mmid, ctx->ibuf_vied_addr); + shared_memory_free(ctx->env.mmid, ctx->ibuf_host_addr); + shared_memory_unmap(ctx->env.ssid, ctx->env.mmid, ctx->obuf_vied_addr); + shared_memory_free(ctx->env.mmid, ctx->obuf_host_addr); + shared_memory_unmap(ctx->env.ssid, ctx->env.mmid, + ctx->config_vied_addr); + shared_memory_free(ctx->env.mmid, ctx->config_host_addr); + ia_css_cpu_mem_free(ctx); +} + +int +ia_css_syscom_release( + struct ia_css_syscom_context *ctx, + unsigned int force +) { + /* check if release is forced, an verify cell state if it is not */ + if (!force) { + if (!ia_css_cell_is_ready(ctx->env.ssid, SPC0)) + return FW_ERROR_BUSY; + } + + /* Reset the regmem idx */ + ctx->regmem_idx = 0; + + if (ctx->free_buf) + ia_css_syscom_free(ctx); + + return 0; +} + +int ia_css_syscom_send_port_open( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + int state; + + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + /* check if SP syscom is ready to open the queue */ + state = regmem_load_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + ctx->env.ssid); + if (state != SYSCOM_STATE_READY) { + /* SPC is not ready to handle messages yet */ + return FW_ERROR_BUSY; + } + + /* initialize the port */ + send_port_open(ctx->send_port + port, + ctx->input_queue + port, &(ctx->env)); + + return 0; +} + +int ia_css_syscom_send_port_close( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + return 0; +} + +int ia_css_syscom_send_port_available( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + return send_port_available(ctx->send_port + port); +} + +int ia_css_syscom_send_port_transfer( + struct ia_css_syscom_context *ctx, + unsigned int port, + const void *token +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + return send_port_transfer(ctx->send_port + port, token); +} + +int ia_css_syscom_recv_port_open( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + int state; + + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + /* check if SP syscom is ready to open the queue */ + state = regmem_load_32(ctx->cell_dmem_addr, + SYSCOM_STATE_REG, ctx->env.ssid); + if (state != SYSCOM_STATE_READY) { + /* SPC is not ready to handle messages yet */ + return FW_ERROR_BUSY; + } + + /* initialize the port */ + recv_port_open(ctx->recv_port + port, + ctx->output_queue + port, &(ctx->env)); + + return 0; +} + +int ia_css_syscom_recv_port_close( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + return 0; +} + +/* + * Get the number of responses in the response queue + */ +int +ia_css_syscom_recv_port_available( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + return recv_port_available(ctx->recv_port + port); +} + + +/* + * Dequeue the head of the response queue + * returns an error when the response queue is empty + */ +int +ia_css_syscom_recv_port_transfer( + struct ia_css_syscom_context *ctx, + unsigned int port, + void *token +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + return recv_port_transfer(ctx->recv_port + port, token); +} + +#if HAS_DUAL_CMD_CTX_SUPPORT +/* + * store subsystem context information in DMEM + */ +int +ia_css_syscom_store_dmem( + struct ia_css_syscom_context *ctx, + unsigned int ssid, + unsigned int vtl0_addr_mask +) +{ + unsigned int read_back; + + NOT_USED(vtl0_addr_mask); + NOT_USED(read_back); + + if (ctx->secure) { + /* store VTL0 address mask in 'secure' context */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem VTL0_ADDR_MASK (%#x) @ dmem_addr %#x ssid %d\n", + vtl0_addr_mask, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_VTL0_ADDR_MASK, vtl0_addr_mask, ssid); + } + /* store firmware configuration address */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem CONFIG_REG (%#x) @ dmem_addr %#x ssid %d\n", + ctx->config_vied_addr, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_CONFIG_REG, + ctx->config_vied_addr, ssid); + /* store syscom uninitialized state */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem STATE_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_STATE_UNINIT, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + SYSCOM_STATE_UNINIT, ssid); + /* store syscom uninitialized command */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem COMMAND_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_COMMAND_UNINIT, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_COMMAND_REG, + SYSCOM_COMMAND_UNINIT, ssid); + + return 0; +} + +/* + * store truslet configuration status setting + */ +void +ia_css_syscom_set_trustlet_status( + unsigned int dmem_addr, + unsigned int ssid, + bool trustlet_exist +) +{ + unsigned int value; + + value = trustlet_exist ? TRUSTLET_EXIST : TRUSTLET_NOT_EXIST; + IA_CSS_TRACE_3(SYSCOM, INFO, + "ia_css_syscom_set_trustlet_status TRUSTLET_STATUS (%#x) @ dmem_addr %#x ssid %d\n", + value, dmem_addr, ssid); + regmem_store_32(dmem_addr, TRUSTLET_STATUS, value, ssid); +} + +/* + * check if SPC access blocker programming is completed + */ +bool +ia_css_syscom_is_ab_spc_ready( + struct ia_css_syscom_context *ctx +) +{ + unsigned int value; + + /* We only expect the call from non-secure context only */ + if (ctx->secure) { + IA_CSS_TRACE_0(SYSCOM, ERROR, "ia_css_syscom_is_spc_ab_ready - Please call from non-secure context\n"); + return false; + } + + value = regmem_load_32(ctx->cell_dmem_addr, AB_SPC_STATUS, ctx->env.ssid); + IA_CSS_TRACE_3(SYSCOM, INFO, + "ia_css_syscom_is_spc_ab_ready AB_SPC_STATUS @ dmem_addr %#x ssid %d - value %#x\n", + ctx->cell_dmem_addr, ctx->env.ssid, value); + + return (value == AB_SPC_READY); +} +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/src/ia_css_syscom_config_fw.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/src/ia_css_syscom_config_fw.h new file mode 100644 index 0000000000000..0cacd5a34934d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/src/ia_css_syscom_config_fw.h @@ -0,0 +1,69 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_CONFIG_FW_H +#define __IA_CSS_SYSCOM_CONFIG_FW_H + +#include "type_support.h" + +enum { + /* Program load or explicit host setting should init to this */ + SYSCOM_STATE_UNINIT = 0x57A7E000, + /* SP Syscom sets this when it is ready for use */ + SYSCOM_STATE_READY = 0x57A7E001, + /* SP Syscom sets this when no more syscom accesses will happen */ + SYSCOM_STATE_INACTIVE = 0x57A7E002 +}; + +enum { + /* Program load or explicit host setting should init to this */ + SYSCOM_COMMAND_UNINIT = 0x57A7F000, + /* Host Syscom requests syscom to become inactive */ + SYSCOM_COMMAND_INACTIVE = 0x57A7F001 +}; + +#if HAS_DUAL_CMD_CTX_SUPPORT +enum { + /* Program load or explicit host setting should init to this */ + TRUSTLET_UNINIT = 0x57A8E000, + /* Host Syscom informs SP that Trustlet exists */ + TRUSTLET_EXIST = 0x57A8E001, + /* Host Syscom informs SP that Trustlet does not exist */ + TRUSTLET_NOT_EXIST = 0x57A8E002 +}; + +enum { + /* Program load or explicit setting initialized by SP */ + AB_SPC_NOT_READY = 0x57A8F000, + /* SP informs host that SPC access programming is completed */ + AB_SPC_READY = 0x57A8F001 +}; +#endif + +/* firmware config: data that sent from the host to SP via DDR */ +/* Cell copies data into a context */ + +struct ia_css_syscom_config_fw { + unsigned int firmware_address; + + unsigned int num_input_queues; + unsigned int num_output_queues; + unsigned int input_queue; /* hmm_ptr / struct queue* */ + unsigned int output_queue; /* hmm_ptr / struct queue* */ + + unsigned int specific_addr; /* vied virtual address */ + unsigned int specific_size; +}; + +#endif /* __IA_CSS_SYSCOM_CONFIG_FW_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/src/ia_css_syscom_context.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/src/ia_css_syscom_context.h new file mode 100644 index 0000000000000..ecf22f6b7ac53 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/src/ia_css_syscom_context.h @@ -0,0 +1,65 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_CONTEXT_H +#define __IA_CSS_SYSCOM_CONTEXT_H + +#include + +#include "port_env_struct.h" +#include + +/* host context */ +struct ia_css_syscom_context { + vied_virtual_address_t cell_firmware_addr; + unsigned int cell_regs_addr; + unsigned int cell_dmem_addr; + + struct port_env env; + + unsigned int num_input_queues; + unsigned int num_output_queues; + + /* array of input queues (from host to SP) */ + struct sys_queue *input_queue; + /* array of output queues (from SP to host) */ + struct sys_queue *output_queue; + + struct send_port *send_port; + struct recv_port *recv_port; + + unsigned int regmem_idx; + unsigned int free_buf; + + host_virtual_address_t config_host_addr; + host_virtual_address_t input_queue_host_addr; + host_virtual_address_t output_queue_host_addr; + host_virtual_address_t specific_host_addr; + host_virtual_address_t ibuf_host_addr; + host_virtual_address_t obuf_host_addr; + + vied_virtual_address_t config_vied_addr; + vied_virtual_address_t input_queue_vied_addr; + vied_virtual_address_t output_queue_vied_addr; + vied_virtual_address_t specific_vied_addr; + vied_virtual_address_t ibuf_vied_addr; + vied_virtual_address_t obuf_vied_addr; + + /* if true; secure syscom object as in VTIO Case + * if false, non-secure syscom + */ + bool secure; +}; + +#endif /* __IA_CSS_SYSCOM_CONTEXT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/syscom.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/syscom.mk new file mode 100644 index 0000000000000..8d36b8928af55 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/syscom.mk @@ -0,0 +1,42 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is SYSCOM + +SYSCOM_DIR=$${MODULES_DIR}/syscom + +SYSCOM_INTERFACE=$(SYSCOM_DIR)/interface +SYSCOM_SOURCES1=$(SYSCOM_DIR)/src + +SYSCOM_HOST_FILES += $(SYSCOM_SOURCES1)/ia_css_syscom.c + +SYSCOM_HOST_CPPFLAGS += -I$(SYSCOM_INTERFACE) +SYSCOM_HOST_CPPFLAGS += -I$(SYSCOM_SOURCES1) +SYSCOM_HOST_CPPFLAGS += -I$${MODULES_DIR}/devices +ifdef REGMEM_SECURE_OFFSET +SYSCOM_HOST_CPPFLAGS += -DREGMEM_SECURE_OFFSET=$(REGMEM_SECURE_OFFSET) +else +SYSCOM_HOST_CPPFLAGS += -DREGMEM_SECURE_OFFSET=0 +endif + +SYSCOM_FW_FILES += $(SYSCOM_SOURCES1)/ia_css_syscom_fw.c + +SYSCOM_FW_CPPFLAGS += -I$(SYSCOM_INTERFACE) +SYSCOM_FW_CPPFLAGS += -I$(SYSCOM_SOURCES1) +SYSCOM_FW_CPPFLAGS += -DREGMEM_OFFSET=$(REGMEM_OFFSET) +ifdef REGMEM_SECURE_OFFSET +SYSCOM_FW_CPPFLAGS += -DREGMEM_SECURE_OFFSET=$(REGMEM_SECURE_OFFSET) +else +SYSCOM_FW_CPPFLAGS += -DREGMEM_SECURE_OFFSET=0 +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/trace/interface/ia_css_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/trace/interface/ia_css_trace.h new file mode 100644 index 0000000000000..b85b1810f1070 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/trace/interface/ia_css_trace.h @@ -0,0 +1,883 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/*! \file */ + +#ifndef __IA_CSS_TRACE_H +#define __IA_CSS_TRACE_H + +/* +** Configurations +*/ + +/** + * STEP 1: Define {Module Name}_TRACE_METHOD to one of the following. + * Where: + * {Module Name} is the name of the targeted module. + * + * Example: + * #define NCI_DMA_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE + */ + +/**< Use whatever method of tracing that best suits the platform + * this code is compiled for. + */ +#define IA_CSS_TRACE_METHOD_NATIVE 1 +/**< Use the Tracing NCI. */ +#define IA_CSS_TRACE_METHOD_TRACE 2 + +/** + * STEP 2: Define {Module Name}_TRACE_LEVEL_{Level} to one of the following. + * Where: + * {Module Name} is the name of the targeted module. + * {Level}, in decreasing order of severity, is one of the + * following values: + * {ASSERT, ERROR, WARNING, INFO, DEBUG, VERBOSE}. + * + * Example: + * #define NCI_DMA_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_DISABLED + * #define NCI_DMA_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + */ +/**< Disables the corresponding trace level. */ +#define IA_CSS_TRACE_LEVEL_DISABLED 0 +/**< Enables the corresponding trace level. */ +#define IA_CSS_TRACE_LEVEL_ENABLED 1 + +/* + * Used in macro definition with do-while loop + * for removing checkpatch warnings + */ +#define IA_CSS_TRACE_FILE_DUMMY_DEFINE + +/** + * STEP 3: Define IA_CSS_TRACE_PRINT_FILE_LINE to have file name and + * line printed with every log message. + * + * Example: + * #define IA_CSS_TRACE_PRINT_FILE_LINE + */ + +/* +** Interface +*/ + +/* +** Static +*/ + +/** + * Logs a message with zero arguments if the targeted severity level is enabled + * at compile-time. + * @param module The targeted module. + * @param severity The severity level of the trace message. In decreasing order: + * {ASSERT, ERROR, WARNING, INFO, DEBUG, VERBOSE}. + * @param format The message to be traced. + */ +#define IA_CSS_TRACE_0(module, severity, format) \ + IA_CSS_TRACE_IMPL(module, 0, severity, format) + +/** + * Logs a message with one argument if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_1(module, severity, format, a1) \ + IA_CSS_TRACE_IMPL(module, 1, severity, format, a1) + +/** + * Logs a message with two arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_2(module, severity, format, a1, a2) \ + IA_CSS_TRACE_IMPL(module, 2, severity, format, a1, a2) + +/** + * Logs a message with three arguments if the targeted severity level + * is enabled at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_3(module, severity, format, a1, a2, a3) \ + IA_CSS_TRACE_IMPL(module, 3, severity, format, a1, a2, a3) + +/** + * Logs a message with four arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_4(module, severity, format, a1, a2, a3, a4) \ + IA_CSS_TRACE_IMPL(module, 4, severity, format, a1, a2, a3, a4) + +/** + * Logs a message with five arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_5(module, severity, format, a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_IMPL(module, 5, severity, format, a1, a2, a3, a4, a5) + +/** + * Logs a message with six arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_6(module, severity, format, a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_IMPL(module, 6, severity, format, a1, a2, a3, a4, a5, a6) + +/** + * Logs a message with seven arguments if the targeted severity level + * is enabled at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_7(module, severity, format, a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_IMPL(module, 7, severity, format, \ + a1, a2, a3, a4, a5, a6, a7) + +/* +** Dynamic +*/ + +/** +* Declares, but does not define, dynamic tracing functions and variables +* for module \p module. For each module, place an instance of this macro +* in the compilation unit in which you want to use dynamic tracing facility +* so as to inform the compiler of the declaration of the available functions. +* An invocation of this function does not enable any of the available tracing +* levels. Do not place a semicolon after a call to this macro. +* @see IA_CSS_TRACE_DYNAMIC_DEFINE +*/ +#define IA_CSS_TRACE_DYNAMIC_DECLARE(module) \ + IA_CSS_TRACE_DYNAMIC_DECLARE_IMPL(module) +/** +* Declares the configuration function for the dynamic api seperatly, if one +* wants to use it. +*/ +#define IA_CSS_TRACE_DYNAMIC_DECLARE_CONFIG_FUNC(module) \ + IA_CSS_TRACE_DYNAMIC_DECLARE_CONFIG_FUNC_IMPL(module) + +/** +* Defines dynamic tracing functions and variables for module \p module. +* For each module, place an instance of this macro in one, and only one, +* of your SOURCE files so as to allow the linker resolve the related symbols. +* An invocation of this macro does not enable any of the available tracing +* levels. Do not place a semicolon after a call to this macro. +* @see IA_CSS_TRACE_DYNAMIC_DECLARE +*/ +#define IA_CSS_TRACE_DYNAMIC_DEFINE(module) \ + IA_CSS_TRACE_DYNAMIC_DEFINE_IMPL(module) +/** +* Defines the configuration function for the dynamic api seperatly, if one +* wants to use it. +*/ +#define IA_CSS_TRACE_DYNAMIC_DEFINE_CONFIG_FUNC(module) \ + IA_CSS_TRACE_DYNAMIC_DEFINE_CONFIG_FUNC_IMPL(module) + +/** + * Logs a message with zero arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @param module The targeted module. + * @param severity The severity level of the trace message. In decreasing order: + * {ASSERT, ERROR, WARNING, INFO, DEBUG, VERBOSE}. + * @param format The message to be traced. + */ +#define IA_CSS_TRACE_DYNAMIC_0(module, severity, format) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 0, severity, format) + +/** + * Logs a message with one argument if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_1(module, severity, format, a1) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 1, severity, format, a1) + +/** + * Logs a message with two arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_2(module, severity, format, a1, a2) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 2, severity, format, a1, a2) + +/** + * Logs a message with three arguments if the targeted severity level + * is enabled both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_3(module, severity, format, a1, a2, a3) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 3, severity, format, a1, a2, a3) + +/** + * Logs a message with four arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_4(module, severity, format, a1, a2, a3, a4) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 4, severity, format, a1, a2, a3, a4) + +/** + * Logs a message with five arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_5(module, severity, format, a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 5, severity, format, \ + a1, a2, a3, a4, a5) + +/** + * Logs a message with six arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_6(module, severity, format, \ + a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 6, severity, format, \ + a1, a2, a3, a4, a5, a6) + +/** + * Logs a message with seven arguments if the targeted severity level + * is enabled both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_7(module, severity, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 7, severity, format, \ + a1, a2, a3, a4, a5, a6, a7) + +/* +** Implementation +*/ + +/* CAT */ +#define IA_CSS_TRACE_CAT_IMPL(a, b) a ## b +#define IA_CSS_TRACE_CAT(a, b) IA_CSS_TRACE_CAT_IMPL(a, b) + +/* Bridge */ +#if defined(__HIVECC) || defined(__GNUC__) +#define IA_CSS_TRACE_IMPL(module, argument_count, severity, arguments ...) \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_, \ + argument_count \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_LEVEL_ \ + ), \ + severity \ + ) \ + ( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_SEVERITY_, \ + severity \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + #module, \ + ## arguments \ + ) \ + ) + +/* Bridge */ +#define IA_CSS_TRACE_DYNAMIC_IMPL(module, argument_count, severity, \ + arguments ...) \ + do { \ + if (IA_CSS_TRACE_CAT(IA_CSS_TRACE_CAT(module, _trace_level_), \ + severity)) { \ + IA_CSS_TRACE_IMPL(module, argument_count, severity, \ + ## arguments); \ + } \ + } while (0) +#elif defined(_MSC_VER) +#define IA_CSS_TRACE_IMPL(module, argument_count, severity, ...) \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_, \ + argument_count \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_LEVEL_ \ + ), \ + severity \ + ) \ + ( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_SEVERITY_, \ + severity \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + #module, \ + __VA_ARGS__ \ + ) \ + ) + +/* Bridge */ +#define IA_CSS_TRACE_DYNAMIC_IMPL(module, argument_count, severity, ...) \ + do { \ + if (IA_CSS_TRACE_CAT(IA_CSS_TRACE_CAT(module, _trace_level_), \ + severity)) { \ + IA_CSS_TRACE_IMPL(module, argument_count, severity, \ + __VA_ARGS__); \ + } \ + } while (0) +#endif + +/* +** Native Backend +*/ + +#if defined(__HIVECC) + #define IA_CSS_TRACE_PLATFORM_CELL +#elif defined(__GNUC__) + #define IA_CSS_TRACE_PLATFORM_HOST + + #define IA_CSS_TRACE_NATIVE(severity, module, format, arguments ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_NATIVE(severity, module, \ + format), ## arguments); \ + } while (0) + /* TODO: In case Host Side tracing is needed to be mapped to the + * Tunit, the following "IA_CSS_TRACE_TRACE" needs to be modified from + * PRINT to vied_nci_tunit_print function calls + */ + #define IA_CSS_TRACE_TRACE(severity, module, format, arguments ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_TRACE(severity, module, \ + format), ## arguments); \ + } while (0) + +#elif defined(_MSC_VER) + #define IA_CSS_TRACE_PLATFORM_HOST + + #define IA_CSS_TRACE_NATIVE(severity, module, format, ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_NATIVE(severity, \ + module, format), __VA_ARGS__); \ + } while (0) + /* TODO: In case Host Side tracing is needed to be mapped to the + * Tunit, the following "IA_CSS_TRACE_TRACE" needs to be modified from + * PRINT to vied_nci_tunit_print function calls + */ + #define IA_CSS_TRACE_TRACE(severity, module, format, ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_TRACE(severity, \ + module, format), __VA_ARGS__); \ + } while (0) +#else + #error Unsupported platform! +#endif /* Platform */ + +#if defined(IA_CSS_TRACE_PLATFORM_CELL) + #include /* VOLATILE */ + + #ifdef IA_CSS_TRACE_PRINT_FILE_LINE + #define IA_CSS_TRACE_FILE_PRINT_COMMAND \ + do { \ + OP___printstring(__FILE__":") VOLATILE; \ + OP___printdec(__LINE__) VOLATILE; \ + OP___printstring("\n") VOLATILE; \ + } while (0) + #else + #define IA_CSS_TRACE_FILE_PRINT_COMMAND + #endif + + #define IA_CSS_TRACE_MODULE_SEVERITY_PRINT(module, severity) \ + do { \ + IA_CSS_TRACE_FILE_DUMMY_DEFINE; \ + OP___printstring("["module"]:["severity"]:") \ + VOLATILE; \ + } while (0) + + #define IA_CSS_TRACE_MSG_NATIVE(severity, module, format) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + OP___printstring("["module"]:["severity"]: "format) \ + VOLATILE; \ + } while (0) + + #define IA_CSS_TRACE_ARG_NATIVE(module, severity, i, value) \ + do { \ + IA_CSS_TRACE_MODULE_SEVERITY_PRINT(module, severity); \ + OP___dump(i, value) VOLATILE; \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_0(severity, module, format) \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format) + + #define IA_CSS_TRACE_NATIVE_1(severity, module, format, a1) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_2(severity, module, format, a1, a2) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_3(severity, module, format, a1, a2, a3) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_4(severity, module, format, \ + a1, a2, a3, a4) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 5, a5); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 5, a5); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 6, a6); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 5, a5); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 6, a6); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 7, a7); \ + } while (0) + /* + ** Tracing Backend + */ +#if !defined(HRT_CSIM) && !defined(NO_TUNIT) + #include "vied_nci_tunit.h" +#endif + #define IA_CSS_TRACE_AUG_FORMAT_TRACE(format, module) \ + "[" module "]" format " : PID = %x : Timestamp = %d : PC = %x" + + #define IA_CSS_TRACE_TRACE_0(severity, module, format) \ + vied_nci_tunit_print(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity) + + #define IA_CSS_TRACE_TRACE_1(severity, module, format, a1) \ + vied_nci_tunit_print1i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1) + + #define IA_CSS_TRACE_TRACE_2(severity, module, format, a1, a2) \ + vied_nci_tunit_print2i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2) + + #define IA_CSS_TRACE_TRACE_3(severity, module, format, a1, a2, a3) \ + vied_nci_tunit_print3i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3) + + #define IA_CSS_TRACE_TRACE_4(severity, module, format, a1, a2, a3, a4) \ + vied_nci_tunit_print4i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4) + + #define IA_CSS_TRACE_TRACE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + vied_nci_tunit_print5i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4, a5) + + #define IA_CSS_TRACE_TRACE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + vied_nci_tunit_print6i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4, a5, a6) + + #define IA_CSS_TRACE_TRACE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + vied_nci_tunit_print7i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4, a5, a6, a7) + +#elif defined(IA_CSS_TRACE_PLATFORM_HOST) + #include "print_support.h" + + #ifdef IA_CSS_TRACE_PRINT_FILE_LINE + #define IA_CSS_TRACE_FILE_PRINT_COMMAND \ + PRINT("%s:%d:\n", __FILE__, __LINE__) + #else + #define IA_CSS_TRACE_FILE_PRINT_COMMAND + #endif + + #define IA_CSS_TRACE_FORMAT_AUG_NATIVE(severity, module, format) \ + "[" module "]:[" severity "]: " format + + #define IA_CSS_TRACE_NATIVE_0(severity, module, format) \ + IA_CSS_TRACE_NATIVE(severity, module, format) + + #define IA_CSS_TRACE_NATIVE_1(severity, module, format, a1) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1) + + #define IA_CSS_TRACE_NATIVE_2(severity, module, format, a1, a2) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1, a2) + + #define IA_CSS_TRACE_NATIVE_3(severity, module, format, a1, a2, a3) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1, a2, a3) + + #define IA_CSS_TRACE_NATIVE_4(severity, module, format, \ + a1, a2, a3, a4) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1, a2, a3, a4) + + #define IA_CSS_TRACE_NATIVE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_NATIVE(severity, module, format, \ + a1, a2, a3, a4, a5) + + #define IA_CSS_TRACE_NATIVE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_NATIVE(severity, module, format, \ + a1, a2, a3, a4, a5, a6) + + #define IA_CSS_TRACE_NATIVE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_NATIVE(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) + + #define IA_CSS_TRACE_FORMAT_AUG_TRACE(severity, module, format) \ + "["module"]:["severity"]: "format + + #define IA_CSS_TRACE_TRACE_0(severity, module, format) \ + IA_CSS_TRACE_TRACE(severity, module, format) + + #define IA_CSS_TRACE_TRACE_1(severity, module, format, a1) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1) + + #define IA_CSS_TRACE_TRACE_2(severity, module, format, a1, a2) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1, a2) + + #define IA_CSS_TRACE_TRACE_3(severity, module, format, a1, a2, a3) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1, a2, a3) + + #define IA_CSS_TRACE_TRACE_4(severity, module, format, \ + a1, a2, a3, a4) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1, a2, a3, a4) + + #define IA_CSS_TRACE_TRACE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_TRACE(severity, module, format, \ + a1, a2, a3, a4, a5) + + #define IA_CSS_TRACE_TRACE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_TRACE(severity, module, format, \ + a1, a2, a3, a4, a5, a6) + + #define IA_CSS_TRACE_TRACE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_TRACE(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) +#endif + +/* Disabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_1_0(severity, module, format) +#define IA_CSS_TRACE_1_1_0(severity, module, format, arg1) +#define IA_CSS_TRACE_2_1_0(severity, module, format, arg1, arg2) +#define IA_CSS_TRACE_3_1_0(severity, module, format, arg1, arg2, arg3) +#define IA_CSS_TRACE_4_1_0(severity, module, format, arg1, arg2, arg3, arg4) +#define IA_CSS_TRACE_5_1_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5) +#define IA_CSS_TRACE_6_1_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6) +#define IA_CSS_TRACE_7_1_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6, arg7) + +/* Enabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_1_1 IA_CSS_TRACE_NATIVE_0 +#define IA_CSS_TRACE_1_1_1 IA_CSS_TRACE_NATIVE_1 +#define IA_CSS_TRACE_2_1_1 IA_CSS_TRACE_NATIVE_2 +#define IA_CSS_TRACE_3_1_1 IA_CSS_TRACE_NATIVE_3 +#define IA_CSS_TRACE_4_1_1 IA_CSS_TRACE_NATIVE_4 +#define IA_CSS_TRACE_5_1_1 IA_CSS_TRACE_NATIVE_5 +#define IA_CSS_TRACE_6_1_1 IA_CSS_TRACE_NATIVE_6 +#define IA_CSS_TRACE_7_1_1 IA_CSS_TRACE_NATIVE_7 + +/* Enabled */ +/* Legend: IA_CSS_TRACE_SEVERITY_{Severity Level}_{Backend ID} */ +#define IA_CSS_TRACE_SEVERITY_ASSERT_1 "Assert" +#define IA_CSS_TRACE_SEVERITY_ERROR_1 "Error" +#define IA_CSS_TRACE_SEVERITY_WARNING_1 "Warning" +#define IA_CSS_TRACE_SEVERITY_INFO_1 "Info" +#define IA_CSS_TRACE_SEVERITY_DEBUG_1 "Debug" +#define IA_CSS_TRACE_SEVERITY_VERBOSE_1 "Verbose" + +/* Disabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_2_0(severity, module, format) +#define IA_CSS_TRACE_1_2_0(severity, module, format, arg1) +#define IA_CSS_TRACE_2_2_0(severity, module, format, arg1, arg2) +#define IA_CSS_TRACE_3_2_0(severity, module, format, arg1, arg2, arg3) +#define IA_CSS_TRACE_4_2_0(severity, module, format, arg1, arg2, arg3, arg4) +#define IA_CSS_TRACE_5_2_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5) +#define IA_CSS_TRACE_6_2_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6) +#define IA_CSS_TRACE_7_2_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6, arg7) + +/* Enabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_2_1 IA_CSS_TRACE_TRACE_0 +#define IA_CSS_TRACE_1_2_1 IA_CSS_TRACE_TRACE_1 +#define IA_CSS_TRACE_2_2_1 IA_CSS_TRACE_TRACE_2 +#define IA_CSS_TRACE_3_2_1 IA_CSS_TRACE_TRACE_3 +#define IA_CSS_TRACE_4_2_1 IA_CSS_TRACE_TRACE_4 +#define IA_CSS_TRACE_5_2_1 IA_CSS_TRACE_TRACE_5 +#define IA_CSS_TRACE_6_2_1 IA_CSS_TRACE_TRACE_6 +#define IA_CSS_TRACE_7_2_1 IA_CSS_TRACE_TRACE_7 + +/* Enabled */ +/* Legend: IA_CSS_TRACE_SEVERITY_{Severity Level}_{Backend ID} */ +#define IA_CSS_TRACE_SEVERITY_ASSERT_2 VIED_NCI_TUNIT_MSG_SEVERITY_FATAL +#define IA_CSS_TRACE_SEVERITY_ERROR_2 VIED_NCI_TUNIT_MSG_SEVERITY_ERROR +#define IA_CSS_TRACE_SEVERITY_WARNING_2 VIED_NCI_TUNIT_MSG_SEVERITY_WARNING +#define IA_CSS_TRACE_SEVERITY_INFO_2 VIED_NCI_TUNIT_MSG_SEVERITY_NORMAL +#define IA_CSS_TRACE_SEVERITY_DEBUG_2 VIED_NCI_TUNIT_MSG_SEVERITY_USER1 +#define IA_CSS_TRACE_SEVERITY_VERBOSE_2 VIED_NCI_TUNIT_MSG_SEVERITY_USER2 + +/* +** Dynamicism +*/ + +#define IA_CSS_TRACE_DYNAMIC_DECLARE_IMPL(module) \ + do { \ + void IA_CSS_TRACE_CAT(module, _trace_assert_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_assert_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_error_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_error_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_warning_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_warning_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_info_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_info_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_debug_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_debug_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_disable)(void); \ + } while (0) + +#define IA_CSS_TRACE_DYNAMIC_DECLARE_CONFIG_FUNC_IMPL(module) \ + do { \ + IA_CSS_TRACE_FILE_DUMMY_DEFINE; \ + void IA_CSS_TRACE_CAT(module, _trace_configure)\ + (int argc, const char *const *argv); \ + } while (0) + +#include "platform_support.h" +#include "type_support.h" + +#define IA_CSS_TRACE_DYNAMIC_DEFINE_IMPL(module) \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_assert); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_error); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_warning); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_info); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_debug); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_verbose); \ + \ + void IA_CSS_TRACE_CAT(module, _trace_assert_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_assert) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_assert_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_assert) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_error_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_error) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_error_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_error) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_warning_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_warning) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_warning_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_warning) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_info_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_info) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_info_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_info) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_debug_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_debug) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_debug_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_debug) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_verbose) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_verbose) = 0; \ + } + +#define IA_CSS_TRACE_DYNAMIC_DEFINE_CONFIG_FUNC_IMPL(module) \ +void IA_CSS_TRACE_CAT(module, _trace_configure)(const int argc, \ + const char *const *const argv) \ +{ \ + int i = 1; \ + const char *levels = 0; \ + \ + while (i < argc) { \ + if (!strcmp(argv[i], "-" #module "_trace")) { \ + ++i; \ + \ + if (i < argc) { \ + levels = argv[i]; \ + \ + while (*levels) { \ + switch (*levels++) { \ + case 'a': \ + IA_CSS_TRACE_CAT \ + (module, _trace_assert_enable)(); \ + break; \ + \ + case 'e': \ + IA_CSS_TRACE_CAT \ + (module, _trace_error_enable)(); \ + break; \ + \ + case 'w': \ + IA_CSS_TRACE_CAT \ + (module, _trace_warning_enable)(); \ + break; \ + \ + case 'i': \ + IA_CSS_TRACE_CAT \ + (module, _trace_info_enable)(); \ + break; \ + \ + case 'd': \ + IA_CSS_TRACE_CAT \ + (module, _trace_debug_enable)(); \ + break; \ + \ + case 'v': \ + IA_CSS_TRACE_CAT \ + (module, _trace_verbose_enable)(); \ + break; \ + \ + default: \ + } \ + } \ + } \ + } \ + \ + ++i; \ + } \ +} + +#endif /* __IA_CSS_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/trace/trace.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/trace/trace.mk new file mode 100644 index 0000000000000..b232880b882bd --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/trace/trace.mk @@ -0,0 +1,40 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE Trace + +# Dependencies +IA_CSS_TRACE_SUPPORT = $${MODULES_DIR}/support + +# API +IA_CSS_TRACE = $${MODULES_DIR}/trace +IA_CSS_TRACE_INTERFACE = $(IA_CSS_TRACE)/interface + +# +# Host +# + +# Host CPP Flags +IA_CSS_TRACE_HOST_CPPFLAGS += -I$(IA_CSS_TRACE_SUPPORT) +IA_CSS_TRACE_HOST_CPPFLAGS += -I$(IA_CSS_TRACE_INTERFACE) +IA_CSS_TRACE_HOST_CPPFLAGS += -I$(IA_CSS_TRACE)/trace_modules + +# +# Firmware +# + +# Firmware CPP Flags +IA_CSS_TRACE_FW_CPPFLAGS += -I$(IA_CSS_TRACE_SUPPORT) +IA_CSS_TRACE_FW_CPPFLAGS += -I$(IA_CSS_TRACE_INTERFACE) +IA_CSS_TRACE_FW_CPPFLAGS += -I$(IA_CSS_TRACE)/trace_modules diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/utils/system_defs/system_const.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/utils/system_defs/system_const.h new file mode 100644 index 0000000000000..161f28fced973 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/utils/system_defs/system_const.h @@ -0,0 +1,26 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __SYSTEM_CONST_H +#define __SYSTEM_CONST_H + +/* The values included in this file should have been + * taken from system/device properties which + * are not currently available in SDK + */ + +#define XMEM_WIDTH (512) +#define MG_PPC (4) + +#endif /* __SYSTEM_CONST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/shared_memory_access.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/shared_memory_access.h new file mode 100755 index 0000000000000..1e81bad9f4eec --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/shared_memory_access.h @@ -0,0 +1,139 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _SHARED_MEMORY_ACCESS_H +#define _SHARED_MEMORY_ACCESS_H + +#include +#include +#include + +typedef enum { + sm_esuccess, + sm_enomem, + sm_ezeroalloc, + sm_ebadvaddr, + sm_einternalerror, + sm_ecorruption, + sm_enocontiguousmem, + sm_enolocmem, + sm_emultiplefree, +} shared_memory_error; + +/** + * \brief Virtual address of (DDR) shared memory space as seen from the VIED subsystem + */ +typedef uint32_t vied_virtual_address_t; + +/** + * \brief Virtual address of (DDR) shared memory space as seen from the host + */ +typedef unsigned long long host_virtual_address_t; + +/** + * \brief List of physical addresses of (DDR) shared memory space. This is used to represent a list of physical pages. + */ +typedef struct shared_memory_physical_page_list_s *shared_memory_physical_page_list; +typedef struct shared_memory_physical_page_list_s +{ + shared_memory_physical_page_list next; + vied_physical_address_t address; +}shared_memory_physical_page_list_s; + + +/** + * \brief Initialize the shared memory interface administration on the host. + * \param idm: id of ddr memory + * \param host_ddr_addr: physical address of memory as seen from host + * \param memory_size: size of ddr memory in bytes + * \param ps: size of page in bytes (for instance 4096) + */ +int shared_memory_allocation_initialize(vied_memory_t idm, vied_physical_address_t host_ddr_addr, size_t memory_size, size_t ps); + +/** + * \brief De-initialize the shared memory interface administration on the host. + * + */ +void shared_memory_allocation_uninitialize(vied_memory_t idm); + +/** + * \brief Allocate (DDR) shared memory space and return a host virtual address. Returns NULL when insufficient memory available + */ +host_virtual_address_t shared_memory_alloc(vied_memory_t idm, size_t bytes); + +/** + * \brief Free (DDR) shared memory space. +*/ +void shared_memory_free(vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Translate a virtual host.address to a physical address. +*/ +vied_physical_address_t shared_memory_virtual_host_to_physical_address (vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Return the allocated physical pages for a virtual host.address. +*/ +shared_memory_physical_page_list shared_memory_virtual_host_to_physical_pages (vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Destroy a shared_memory_physical_page_list. +*/ +void shared_memory_physical_pages_list_destroy (shared_memory_physical_page_list ppl); + +/** + * \brief Store a byte into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store_8 (vied_memory_t idm, host_virtual_address_t addr, uint8_t data); + +/** + * \brief Store a 16-bit word into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store_16(vied_memory_t idm, host_virtual_address_t addr, uint16_t data); + +/** + * \brief Store a 32-bit word into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store_32(vied_memory_t idm, host_virtual_address_t addr, uint32_t data); + +/** + * \brief Store a number of bytes into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store(vied_memory_t idm, host_virtual_address_t addr, const void *data, size_t bytes); + +/** + * \brief Set a number of bytes of (DDR) shared memory space to 0 using a host virtual address + */ +void shared_memory_zero(vied_memory_t idm, host_virtual_address_t addr, size_t bytes); + +/** + * \brief Load a byte from (DDR) shared memory space using a host virtual address + */ +uint8_t shared_memory_load_8 (vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Load a 16-bit word from (DDR) shared memory space using a host virtual address + */ +uint16_t shared_memory_load_16(vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Load a 32-bit word from (DDR) shared memory space using a host virtual address + */ +uint32_t shared_memory_load_32(vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Load a number of bytes from (DDR) shared memory space using a host virtual address + */ +void shared_memory_load(vied_memory_t idm, host_virtual_address_t addr, void *data, size_t bytes); + +#endif /* _SHARED_MEMORY_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/shared_memory_map.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/shared_memory_map.h new file mode 100755 index 0000000000000..1bbedcf9e7fd8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/shared_memory_map.h @@ -0,0 +1,53 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _SHARED_MEMORY_MAP_H +#define _SHARED_MEMORY_MAP_H + +#include +#include +#include + +typedef void (*shared_memory_invalidate_mmu_tlb)(void); +typedef void (*shared_memory_set_page_table_base_address)(vied_physical_address_t); + +typedef void (*shared_memory_invalidate_mmu_tlb_ssid)(vied_subsystem_t id); +typedef void (*shared_memory_set_page_table_base_address_ssid)(vied_subsystem_t id, vied_physical_address_t); + +/** + * \brief Initialize the CSS virtual address system and MMU. The subsystem id will NOT be taken into account. +*/ +int shared_memory_map_initialize(vied_subsystem_t id, vied_memory_t idm, size_t mmu_ps, size_t mmu_pnrs, vied_physical_address_t ddr_addr, shared_memory_invalidate_mmu_tlb inv_tlb, shared_memory_set_page_table_base_address sbt); + +/** + * \brief Initialize the CSS virtual address system and MMU. The subsystem id will be taken into account. +*/ +int shared_memory_map_initialize_ssid(vied_subsystem_t id, vied_memory_t idm, size_t mmu_ps, size_t mmu_pnrs, vied_physical_address_t ddr_addr, shared_memory_invalidate_mmu_tlb_ssid inv_tlb, shared_memory_set_page_table_base_address_ssid sbt); + +/** + * \brief De-initialize the CSS virtual address system and MMU. +*/ +void shared_memory_map_uninitialize(vied_subsystem_t id, vied_memory_t idm); + +/** + * \brief Convert a host virtual address to a CSS virtual address and update the MMU. +*/ +vied_virtual_address_t shared_memory_map(vied_subsystem_t id, vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Free a CSS virtual address and update the MMU. +*/ +void shared_memory_unmap(vied_subsystem_t id, vied_memory_t idm, vied_virtual_address_t addr); + + +#endif /* _SHARED_MEMORY_MAP_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_config.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_config.h new file mode 100755 index 0000000000000..912f016ead241 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_config.h @@ -0,0 +1,33 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_CONFIG_H +#define _HRT_VIED_CONFIG_H + +/* Defines from the compiler: + * HRT_HOST - this is code running on the host + * HRT_CELL - this is code running on a cell + */ +#ifdef HRT_HOST +# define CFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL 1 +# undef CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL + +#elif defined (HRT_CELL) +# undef CFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL +# define CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL 1 + +#else /* !HRT_CELL */ +/* Allow neither HRT_HOST nor HRT_CELL for testing purposes */ +#endif /* !HRT_CELL */ + +#endif /* _HRT_VIED_CONFIG_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_memory_access_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_memory_access_types.h new file mode 100755 index 0000000000000..0b44492789e37 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_memory_access_types.h @@ -0,0 +1,36 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_MEMORY_ACCESS_TYPES_H +#define _HRT_VIED_MEMORY_ACCESS_TYPES_H + +/** Types for the VIED memory access interface */ + +#include "vied_types.h" + +/** + * \brief An identifier for a system memory. + * + * This identifier must be a compile-time constant. It is used in + * access to system memory. + */ +typedef unsigned int vied_memory_t; + +#ifndef __HIVECC +/** + * \brief The type for a physical address + */ +typedef unsigned long long vied_physical_address_t; +#endif + +#endif /* _HRT_VIED_MEMORY_ACCESS_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_subsystem_access.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_subsystem_access.h new file mode 100755 index 0000000000000..674f5fb5b0f99 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_subsystem_access.h @@ -0,0 +1,70 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_SUBSYSTEM_ACCESS_H +#define _HRT_VIED_SUBSYSTEM_ACCESS_H + +#include +#include "vied_config.h" +#include "vied_subsystem_access_types.h" + +#if !defined(CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL) && \ + !defined(CFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL) +#error Implementation selection macro for vied subsystem access not defined +#endif + +#if defined(CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL) +#ifndef __HIVECC +#error "Inline implementation of subsystem access not supported for host" +#endif +#define _VIED_SUBSYSTEM_ACCESS_INLINE static __inline +#include "vied_subsystem_access_impl.h" +#else +#define _VIED_SUBSYSTEM_ACCESS_INLINE +#endif + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store_8 (vied_subsystem_t dev, + vied_subsystem_address_t addr, uint8_t data); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store_16(vied_subsystem_t dev, + vied_subsystem_address_t addr, uint16_t data); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store_32(vied_subsystem_t dev, + vied_subsystem_address_t addr, uint32_t data); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store(vied_subsystem_t dev, + vied_subsystem_address_t addr, + const void *data, unsigned int size); + +_VIED_SUBSYSTEM_ACCESS_INLINE +uint8_t vied_subsystem_load_8 (vied_subsystem_t dev, + vied_subsystem_address_t addr); + +_VIED_SUBSYSTEM_ACCESS_INLINE +uint16_t vied_subsystem_load_16(vied_subsystem_t dev, + vied_subsystem_address_t addr); + +_VIED_SUBSYSTEM_ACCESS_INLINE +uint32_t vied_subsystem_load_32(vied_subsystem_t dev, + vied_subsystem_address_t addr); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_load(vied_subsystem_t dev, + vied_subsystem_address_t addr, + void *data, unsigned int size); + +#endif /* _HRT_VIED_SUBSYSTEM_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_subsystem_access_initialization.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_subsystem_access_initialization.h new file mode 100755 index 0000000000000..81f4d08d5ae0e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_subsystem_access_initialization.h @@ -0,0 +1,44 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_SUBSYSTEM_ACCESS_INITIALIZE_H +#define _HRT_VIED_SUBSYSTEM_ACCESS_INITIALIZE_H + +#include "vied_subsystem_access_types.h" + +/** @brief Initialises the access of a subsystem. + * @param[in] system The subsystem for which the access has to be initialised. + * + * vied_subsystem_access_initialize initilalises the access a subsystem. + * It sets the base address of the subsystem. This base address is extracted from the hsd file. + * + */ +void +vied_subsystem_access_initialize(vied_subsystem_t system); + + +/** @brief Initialises the access of multiple subsystems. + * @param[in] nr _subsystems The number of subsystems for which the access has to be initialised. + * @param[in] dev_base_addresses A pointer to an array of base addresses of subsystems. + * The size of this array must be "nr_subsystems". + * This array must be available during the accesses of the subsystem. + * + * vied_subsystems_access_initialize initilalises the access to multiple subsystems. + * It sets the base addresses of the subsystems that are provided by the array dev_base_addresses. + * + */ +void +vied_subsystems_access_initialize( unsigned int nr_subsystems + , const vied_subsystem_base_address_t *base_addresses); + +#endif /* _HRT_VIED_SUBSYSTEM_ACCESS_INITIALIZE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_subsystem_access_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_subsystem_access_types.h new file mode 100755 index 0000000000000..75fef6c4ddba2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_subsystem_access_types.h @@ -0,0 +1,34 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_SUBSYSTEM_ACCESS_TYPES_H +#define _HRT_VIED_SUBSYSTEM_ACCESS_TYPES_H + +/** Types for the VIED subsystem access interface */ +#include + +/** \brief An identifier for a VIED subsystem. + * + * This identifier must be a compile-time constant. It is used in + * access to a VIED subsystem. + */ +typedef unsigned int vied_subsystem_t; + + +/** \brief An address within a VIED subsystem */ +typedef uint32_t vied_subsystem_address_t; + +/** \brief A base address of a VIED subsystem seen from the host */ +typedef unsigned long long vied_subsystem_base_address_t; + +#endif /* _HRT_VIED_SUBSYSTEM_ACCESS_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_types.h new file mode 100755 index 0000000000000..0acfdbb00cfa3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_types.h @@ -0,0 +1,45 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_TYPES_H +#define _HRT_VIED_TYPES_H + +/** Types shared by VIED interfaces */ + +#include + +/** \brief An address within a VIED subsystem + * + * This will eventually replace teh vied_memory_address_t and vied_subsystem_address_t + */ +typedef uint32_t vied_address_t; + +/** \brief Memory address type + * + * A memory address is an offset within a memory. + */ +typedef uint32_t vied_memory_address_t; + +/** \brief Master port id */ +typedef int vied_master_port_id_t; + +/** + * \brief Require the existence of a certain type + * + * This macro can be used in interface header files to ensure that + * an implementation define type with a specified name exists. + */ +#define _VIED_REQUIRE_TYPE(T) enum { _VIED_SIZEOF_##T = sizeof(T) } + + +#endif /* _HRT_VIED_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/Makefile b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/Makefile new file mode 100644 index 0000000000000..d15887320a3d8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/Makefile @@ -0,0 +1,49 @@ +# +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# + +ifneq ($(EXTERNAL_BUILD), 1) +srcpath := $(srctree) +endif + +include $(srcpath)/$(src)/../Makefile.ipu4psys_src +include $(srcpath)/$(src)/../Makefile.ipu4psys_inc + +SSID = 0 +MMID = 0 + +IPU_PSYSLIB_ROOT_REL = lib +IPU_PSYSLIB_ROOT = $(srcpath)/$(src)/$(IPU_PSYSLIB_ROOT_REL) + +ccflags-y += -I$(srcpath)/$(src)/../../../ +ccflags-y += -I$(srcpath)/$(src)/../../ +ccflags-y += -DHAS_DUAL_CMD_CTX_SUPPORT=0 -DHAS_LATE_BINDING_SUPPORT=0 -DIPU_PSYS_LEGACY + +IPU_PSYSLIB_SRC += libcsspsys2600.o + +#CFLAGS = -W -Wall -Wstrict-prototypes -Wmissing-prototypes -O2 -fomit-frame-pointer -Wno-unused-variable +HOST_DEFINES += -DSSID=$(SSID) +HOST_DEFINES += -DMMID=$(MMID) +HOST_DEFINES += -DHRT_ON_VIED_SUBSYSTEM_ACCESS=$(SSID) +HOST_DEFINES += -DCFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL +HOST_DEFINES += -DHRT_USE_VIR_ADDRS +HOST_DEFINES += -DHRT_HW +HOST_DEFINES += -DVIED_NCI_TUNIT_PSYS +HOST_DEFINES += -DFIRMWARE_RELEASE_VERSION +HOST_DEFINES += -DPSYS_SERVER_ON_SPC +HOST_DEFINES += -DAPI_SPLIT_START_STATE_UPDATE + +intel-ipu4-psys-csslib-objs := ../../../ipu-wrapper.o \ + $(IPU_PSYSLIB_SRC) + +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4-psys-csslib.o +ccflags-y += $(IPU_PSYSLIB_INC) $(HOST_DEFINES) -fno-common -v diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/DSS_V2_program_group/ia_css_fw_pkg_release.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/DSS_V2_program_group/ia_css_fw_pkg_release.h new file mode 100644 index 0000000000000..408726c817146 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/DSS_V2_program_group/ia_css_fw_pkg_release.h @@ -0,0 +1,14 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +#define IA_CSS_FW_PKG_RELEASE 0x20181222 diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/buffer.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/buffer.mk new file mode 100644 index 0000000000000..c00a1133b440f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/buffer.mk @@ -0,0 +1,43 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is BUFFER + +ifdef _H_BUFFER_MK +$(error ERROR: buffer.mk included multiple times, please check makefile) +else +_H_BUFFER_MK=1 +endif + +BUFFER_DIR=$${MODULES_DIR}/buffer + +BUFFER_INTERFACE=$(BUFFER_DIR)/interface +BUFFER_SOURCES_CPU=$(BUFFER_DIR)/src/cpu +BUFFER_SOURCES_CSS=$(BUFFER_DIR)/src/css + +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_output_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_input_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_shared_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/buffer_access.c +BUFFER_HOST_CPPFLAGS += -I$(BUFFER_INTERFACE) +BUFFER_HOST_CPPFLAGS += -I$${MODULES_DIR}/support + +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/ia_css_input_buffer.c +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/ia_css_output_buffer.c +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/ia_css_shared_buffer.c +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/buffer_access.c + +BUFFER_FW_CPPFLAGS += -I$(BUFFER_INTERFACE) +BUFFER_FW_CPPFLAGS += -I$${MODULES_DIR}/support diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/buffer_access.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/buffer_access.h new file mode 100644 index 0000000000000..e5fe647742c9f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/buffer_access.h @@ -0,0 +1,36 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __BUFFER_ACCESS_H +#define __BUFFER_ACCESS_H + +#include "buffer_type.h" +/* #def to keep consistent the buffer load interfaces for host and css */ +#define IDM 0 + +void +buffer_load( + buffer_address address, + void *data, + unsigned int size, + unsigned int mm_id); + +void +buffer_store( + buffer_address address, + const void *data, + unsigned int size, + unsigned int mm_id); + +#endif /* __BUFFER_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/buffer_type.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/buffer_type.h new file mode 100644 index 0000000000000..de51f23941582 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/buffer_type.h @@ -0,0 +1,29 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __BUFFER_TYPE_H +#define __BUFFER_TYPE_H + +/* portable access to buffers in DDR */ + +#ifdef __VIED_CELL +typedef unsigned int buffer_address; +#else +/* workaround needed because shared_memory_access.h uses size_t */ +#include "type_support.h" +#include "vied/shared_memory_access.h" +typedef host_virtual_address_t buffer_address; +#endif + +#endif /* __BUFFER_TYPE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_buffer_address.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_buffer_address.h new file mode 100644 index 0000000000000..137bfb1fda166 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_buffer_address.h @@ -0,0 +1,24 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_BUFFER_ADDRESS_H +#define __IA_CSS_BUFFER_ADDRESS_H + +#include "type_support.h" + +typedef uint32_t ia_css_buffer_address; /* CSS virtual address */ + +#define ia_css_buffer_address_null ((ia_css_buffer_address)0) + +#endif /* __IA_CSS_BUFFER_ADDRESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_input_buffer.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_input_buffer.h new file mode 100644 index 0000000000000..4e92e35b61843 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_input_buffer.h @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_INPUT_BUFFER_H +#define __IA_CSS_INPUT_BUFFER_H + + +/* Input Buffers */ + +/* A CSS input buffer is a buffer in DDR that can be written by the CPU, + * and that can be read by CSS hardware, after the buffer has been handed over. + * Examples: command buffer, input frame buffer, parameter buffer + * An input buffer must be mapped into the CPU address space before it can be + * written by the CPU. + * After mapping, writing, and unmapping, the buffer can be handed over to the + * firmware. An input buffer is handed over to the CSS by mapping it to the + * CSS address space (by the CPU), and by passing the resulting CSS (virtial) + * address of the input buffer to the DA CSS hardware. + * The firmware can read from an input buffer as soon as it has been received + * CSS virtual address. + * The firmware should not write into an input buffer. + * The firmware hands over the input buffer (back to the CPU) by sending the + * buffer handle via a response. The host should unmap the buffer, + * before reusing it. + * The firmware should not read from the input buffer after returning the + * buffer handle to the CPU. + * + * A buffer may be pre-mapped to the CPU and/or to the CSS upon allocation, + * depending on the allocator's preference. In case of pre-mapped buffers, + * the map and unmap functions will only manage read and write access. + */ + +#include "ia_css_buffer_address.h" + +typedef struct ia_css_buffer_s *ia_css_input_buffer; /* input buffer handle */ +typedef void *ia_css_input_buffer_cpu_address; /* CPU virtual address */ +/* CSS virtual address */ +typedef ia_css_buffer_address ia_css_input_buffer_css_address; + +#endif /* __IA_CSS_INPUT_BUFFER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_input_buffer_cpu.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_input_buffer_cpu.h new file mode 100644 index 0000000000000..d3d01353ce431 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_input_buffer_cpu.h @@ -0,0 +1,49 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_INPUT_BUFFER_CPU_H +#define __IA_CSS_INPUT_BUFFER_CPU_H + +#include "vied/shared_memory_map.h" +#include "ia_css_input_buffer.h" + +ia_css_input_buffer +ia_css_input_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_input_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_input_buffer b); + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_map(ia_css_input_buffer b); + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_unmap(ia_css_input_buffer b); + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map(vied_memory_t mid, ia_css_input_buffer b); + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map_no_invalidate(vied_memory_t mid, ia_css_input_buffer b); + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_unmap(ia_css_input_buffer b); + + +#endif /* __IA_CSS_INPUT_BUFFER_CPU_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_output_buffer.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_output_buffer.h new file mode 100644 index 0000000000000..2c310ea92c6af --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_output_buffer.h @@ -0,0 +1,30 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_OUTPUT_BUFFER_H +#define __IA_CSS_OUTPUT_BUFFER_H + +/* Output Buffers */ +/* A CSS output buffer a buffer in DDR that can be written by CSS hardware + * and that can be read by the host, after the buffer has been handed over + * Examples: output frame buffer + */ + +#include "ia_css_buffer_address.h" + +typedef struct ia_css_buffer_s *ia_css_output_buffer; +typedef void *ia_css_output_buffer_cpu_address; +typedef ia_css_buffer_address ia_css_output_buffer_css_address; + +#endif /* __IA_CSS_OUTPUT_BUFFER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_output_buffer_cpu.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_output_buffer_cpu.h new file mode 100644 index 0000000000000..0299fc3b7eb66 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_output_buffer_cpu.h @@ -0,0 +1,48 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_OUTPUT_BUFFER_CPU_H +#define __IA_CSS_OUTPUT_BUFFER_CPU_H + +#include "vied/shared_memory_map.h" +#include "ia_css_output_buffer.h" + +ia_css_output_buffer +ia_css_output_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_output_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_output_buffer b); + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_map(ia_css_output_buffer b); + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_unmap(ia_css_output_buffer b); + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map(vied_memory_t mid, ia_css_output_buffer b); +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map_no_invalidate(vied_memory_t mid, ia_css_output_buffer b); + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_unmap(ia_css_output_buffer b); + + +#endif /* __IA_CSS_OUTPUT_BUFFER_CPU_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_shared_buffer.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_shared_buffer.h new file mode 100644 index 0000000000000..558ec679f98a0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_shared_buffer.h @@ -0,0 +1,32 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SHARED_BUFFER_H +#define __IA_CSS_SHARED_BUFFER_H + +/* Shared Buffers */ +/* A CSS shared buffer is a buffer in DDR that can be read and written by the + * CPU and CSS. + * Both the CPU and CSS can have the buffer mapped simultaneously. + * Access rights are not managed by this interface, this could be done by means + * the read and write pointer of a queue, for example. + */ + +#include "ia_css_buffer_address.h" + +typedef struct ia_css_buffer_s *ia_css_shared_buffer; +typedef void *ia_css_shared_buffer_cpu_address; +typedef ia_css_buffer_address ia_css_shared_buffer_css_address; + +#endif /* __IA_CSS_SHARED_BUFFER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_shared_buffer_cpu.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_shared_buffer_cpu.h new file mode 100644 index 0000000000000..ff62914f99dc3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_shared_buffer_cpu.h @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SHARED_BUFFER_CPU_H +#define __IA_CSS_SHARED_BUFFER_CPU_H + +#include "vied/shared_memory_map.h" +#include "ia_css_shared_buffer.h" + +ia_css_shared_buffer +ia_css_shared_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_shared_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_shared_buffer b); + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_map(ia_css_shared_buffer b); + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_unmap(ia_css_shared_buffer b); + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_map(ia_css_shared_buffer b); + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_unmap(ia_css_shared_buffer b); + +ia_css_shared_buffer +ia_css_shared_buffer_css_update(vied_memory_t mid, ia_css_shared_buffer b); + +ia_css_shared_buffer +ia_css_shared_buffer_cpu_update(vied_memory_t mid, ia_css_shared_buffer b); + +#endif /* __IA_CSS_SHARED_BUFFER_CPU_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/buffer_access.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/buffer_access.c new file mode 100644 index 0000000000000..f0c617fe501a0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/buffer_access.c @@ -0,0 +1,39 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/* implementation of buffer access from the CPU */ +/* using shared_memory interface */ + +#include "buffer_access.h" +#include "vied/shared_memory_access.h" + +void +buffer_load( + buffer_address address, + void *data, + unsigned int bytes, + unsigned int mm_id) +{ + shared_memory_load(mm_id, address, data, bytes); +} + +void +buffer_store( + buffer_address address, + const void *data, + unsigned int bytes, + unsigned int mm_id) +{ + shared_memory_store(mm_id, address, data, bytes); +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_buffer.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_buffer.c new file mode 100644 index 0000000000000..146d4109de440 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_buffer.c @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/* provided interface */ +#include "ia_css_buffer.h" + +/* used interfaces */ +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + +ia_css_buffer_t +ia_css_buffer_alloc(vied_subsystem_t sid, vied_memory_t mid, unsigned int size) +{ + ia_css_buffer_t b; + + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + + b->css_address = shared_memory_map(sid, mid, b->mem); + b->size = size; + return b; +} + + +void +ia_css_buffer_free(vied_subsystem_t sid, vied_memory_t mid, ia_css_buffer_t b) +{ + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_buffer.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_buffer.h new file mode 100644 index 0000000000000..0f99a06e9a89b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_buffer.h @@ -0,0 +1,58 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_BUFFER_H +#define __IA_CSS_BUFFER_H + +/* workaround: needed because uses size_t */ +#include "type_support.h" +#include "vied/shared_memory_map.h" + +typedef enum { + buffer_unmapped, /* buffer is not accessible by cpu, nor css */ + buffer_write, /* output buffer: css has write access */ + /* input buffer: cpu has write access */ + buffer_read, /* input buffer: css has read access */ + /* output buffer: cpu has read access */ + buffer_cpu, /* shared buffer: cpu has read/write access */ + buffer_css /* shared buffer: css has read/write access */ +} buffer_state; + +struct ia_css_buffer_s { + /* number of bytes bytes allocated */ + unsigned int size; + /* allocated virtual memory object */ + host_virtual_address_t mem; + /* virtual address to be used on css/firmware */ + vied_virtual_address_t css_address; + /* virtual address to be used on cpu/host */ + void *cpu_address; + buffer_state state; +}; + +typedef struct ia_css_buffer_s *ia_css_buffer_t; + +ia_css_buffer_t +ia_css_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_buffer_t b); + +#endif /* __IA_CSS_BUFFER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_input_buffer.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_input_buffer.c new file mode 100644 index 0000000000000..2a128795d03e2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_input_buffer.c @@ -0,0 +1,184 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include "ia_css_input_buffer_cpu.h" +#include "ia_css_buffer.h" +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + + +ia_css_input_buffer +ia_css_input_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size) +{ + ia_css_input_buffer b; + + /* allocate buffer container */ + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + +#ifndef HRT_HW + /* initialize the buffer to avoid warnings when copying */ + shared_memory_zero(mid, b->mem, size); + + /* in simulation, we need to allocate a shadow host buffer */ + b->cpu_address = ia_css_cpu_mem_alloc_page_aligned(size); + if (b->cpu_address == NULL) { + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); + return NULL; + } +#else + /* on hw / real platform we can use the pointer from + * shared memory alloc + */ + b->cpu_address = (void *)HOST_ADDRESS(b->mem); +#endif + + b->css_address = shared_memory_map(sid, mid, b->mem); + + b->size = size; + b->state = buffer_unmapped; + + return b; +} + + +void +ia_css_input_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_input_buffer b) +{ + if (b == NULL) + return; + if (b->state != buffer_unmapped) + return; + +#ifndef HRT_HW + /* only free if we actually allocated it separately */ + ia_css_cpu_mem_free(b->cpu_address); +#endif + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} + + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_map(ia_css_input_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map input buffer to CPU address space, acquire write access */ + b->state = buffer_write; + + /* return pre-mapped buffer */ + return b->cpu_address; +} + + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_unmap(ia_css_input_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_write) + return NULL; + + /* unmap input buffer from CPU address space, release write access */ + b->state = buffer_unmapped; + + /* return pre-mapped buffer */ + return b->cpu_address; +} + + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map(vied_memory_t mid, ia_css_input_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map input buffer to CSS address space, acquire read access */ + b->state = buffer_read; + + /* now flush the cache */ + ia_css_cpu_mem_cache_flush(b->cpu_address, b->size); +#ifndef HRT_HW + /* only copy in case of simulation, otherwise it should just work */ + /* copy data from CPU address space to CSS address space */ + shared_memory_store(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return (ia_css_input_buffer_css_address)b->css_address; +} + + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map_no_invalidate(vied_memory_t mid, ia_css_input_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map input buffer to CSS address space, acquire read access */ + b->state = buffer_read; + +#ifndef HRT_HW + /* only copy in case of simulation, otherwise it should just work */ + /* copy data from CPU address space to CSS address space */ + shared_memory_store(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return (ia_css_input_buffer_css_address)b->css_address; +} + + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_unmap(ia_css_input_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_read) + return 0; + + /* unmap input buffer from CSS address space, release read access */ + b->state = buffer_unmapped; + + /* input buffer only, no need to invalidate cache */ + + return (ia_css_input_buffer_css_address)b->css_address; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_output_buffer.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_output_buffer.c new file mode 100644 index 0000000000000..30bc8d52a5a9e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_output_buffer.c @@ -0,0 +1,181 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include "ia_css_output_buffer_cpu.h" +#include "ia_css_buffer.h" +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + + +ia_css_output_buffer +ia_css_output_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size) +{ + ia_css_output_buffer b; + + /* allocate buffer container */ + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + +#ifndef HRT_HW + /* initialize the buffer to avoid warnings when copying */ + shared_memory_zero(mid, b->mem, size); + + /* in simulation, we need to allocate a shadow host buffer */ + b->cpu_address = ia_css_cpu_mem_alloc_page_aligned(size); + if (b->cpu_address == NULL) { + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); + return NULL; + } +#else + /* on hw / real platform we can use the pointer from + * shared memory alloc + */ + b->cpu_address = (void *)HOST_ADDRESS(b->mem); +#endif + + b->css_address = shared_memory_map(sid, mid, b->mem); + + b->size = size; + b->state = buffer_unmapped; + + return b; +} + + +void +ia_css_output_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_output_buffer b) +{ + if (b == NULL) + return; + if (b->state != buffer_unmapped) + return; + +#ifndef HRT_HW + /* only free if we actually allocated it separately */ + ia_css_cpu_mem_free(b->cpu_address); +#endif + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} + + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_map(ia_css_output_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map output buffer to CSS address space, acquire write access */ + b->state = buffer_write; + + return (ia_css_output_buffer_css_address)b->css_address; +} + + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_unmap(ia_css_output_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_write) + return 0; + + /* unmap output buffer from CSS address space, release write access */ + b->state = buffer_unmapped; + + return (ia_css_output_buffer_css_address)b->css_address; +} + + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map(vied_memory_t mid, ia_css_output_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map output buffer to CPU address space, acquire read access */ + b->state = buffer_read; + +#ifndef HRT_HW + /* only in simulation */ + /* copy data from CSS address space to CPU address space */ + shared_memory_load(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + /* now invalidate the cache */ + ia_css_cpu_mem_cache_invalidate(b->cpu_address, b->size); + + return b->cpu_address; +} + + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map_no_invalidate(vied_memory_t mid, ia_css_output_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map output buffer to CPU address space, acquire read access */ + b->state = buffer_read; + +#ifndef HRT_HW + /* only in simulation */ + /* copy data from CSS address space to CPU address space */ + shared_memory_load(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return b->cpu_address; +} + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_unmap(ia_css_output_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_read) + return NULL; + + /* unmap output buffer from CPU address space, release read access */ + b->state = buffer_unmapped; + + /* output only, no need to flush cache */ + + return b->cpu_address; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_shared_buffer.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_shared_buffer.c new file mode 100644 index 0000000000000..92b7110644fe3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_shared_buffer.c @@ -0,0 +1,187 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include "ia_css_shared_buffer_cpu.h" +#include "ia_css_buffer.h" +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + + +ia_css_shared_buffer +ia_css_shared_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size) +{ + ia_css_shared_buffer b; + + /* allocate buffer container */ + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + +#ifndef HRT_HW + /* initialize the buffer to avoid warnings when copying */ + shared_memory_zero(mid, b->mem, size); + + /* in simulation, we need to allocate a shadow host buffer */ + b->cpu_address = ia_css_cpu_mem_alloc_page_aligned(size); + if (b->cpu_address == NULL) { + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); + return NULL; + } +#else + /* on hw / real platform we can use the pointer from + * shared memory alloc + */ + b->cpu_address = (void *)HOST_ADDRESS(b->mem); +#endif + + b->css_address = shared_memory_map(sid, mid, b->mem); + + b->size = size; + b->state = buffer_unmapped; + + return b; +} + + +void +ia_css_shared_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_shared_buffer b) +{ + if (b == NULL) + return; + if (b->state != buffer_unmapped) + return; + +#ifndef HRT_HW + /* only free if we actually allocated it separately */ + ia_css_cpu_mem_free(b->cpu_address); +#endif + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} + + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_map(ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map shared buffer to CPU address space */ + b->state = buffer_cpu; + + return b->cpu_address; +} + + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_unmap(ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_cpu) + return NULL; + + /* unmap shared buffer from CPU address space */ + b->state = buffer_unmapped; + + return b->cpu_address; +} + + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_map(ia_css_shared_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map shared buffer to CSS address space */ + b->state = buffer_css; + + return (ia_css_shared_buffer_css_address)b->css_address; +} + + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_unmap(ia_css_shared_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_css) + return 0; + + /* unmap shared buffer from CSS address space */ + b->state = buffer_unmapped; + + return (ia_css_shared_buffer_css_address)b->css_address; +} + + +ia_css_shared_buffer +ia_css_shared_buffer_css_update(vied_memory_t mid, ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + + /* flush the buffer to CSS after it was modified by the CPU */ + /* flush cache to ddr */ + ia_css_cpu_mem_cache_flush(b->cpu_address, b->size); +#ifndef HRT_HW + /* copy data from CPU address space to CSS address space */ + shared_memory_store(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return b; +} + + +ia_css_shared_buffer +ia_css_shared_buffer_cpu_update(vied_memory_t mid, ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + + /* flush the buffer to the CPU after it has been modified by CSS */ +#ifndef HRT_HW + /* copy data from CSS address space to CPU address space */ + shared_memory_load(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + /* flush cache to ddr */ + ia_css_cpu_mem_cache_invalidate(b->cpu_address, b->size); + + return b; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cell/cell.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cell/cell.mk new file mode 100644 index 0000000000000..fa5e650226017 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cell/cell.mk @@ -0,0 +1,43 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +ifndef _CELL_MK_ +_CELL_MK_ = 1 + + +CELL_DIR=$${MODULES_DIR}/cell +CELL_INTERFACE=$(CELL_DIR)/interface +CELL_SOURCES=$(CELL_DIR)/src + +CELL_HOST_FILES = +CELL_FW_FILES = + +CELL_HOST_CPPFLAGS = \ + -I$(CELL_INTERFACE) \ + -I$(CELL_SOURCES) + +CELL_FW_CPPFLAGS = \ + -I$(CELL_INTERFACE) \ + -I$(CELL_SOURCES) + +ifdef 0 +# Disabled until it is decided to go this way or not +include $(MODULES_DIR)/device_access/device_access.mk +CELL_HOST_FILES += $(DEVICE_ACCESS_HOST_FILES) +CELL_FW_FILES += $(DEVICE_ACCESS_FW_FILES) +CELL_HOST_CPPFLAGS += $(DEVICE_ACCESS_HOST_CPPFLAGS) +CELL_FW_CPPFLAGS += $(DEVICE_ACCESS_FW_CPPFLAGS) +endif + +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cell/interface/ia_css_cell.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cell/interface/ia_css_cell.h new file mode 100644 index 0000000000000..3fac3c791b6e6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cell/interface/ia_css_cell.h @@ -0,0 +1,112 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_H +#define __IA_CSS_CELL_H + +#include "storage_class.h" +#include "type_support.h" + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_stat_ctrl(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_stat_ctrl(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_pc(unsigned int ssid, unsigned int cell_id, + unsigned int pc); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_icache_base_address(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +#if 0 /* To be implemented after completing cell device properties */ +STORAGE_CLASS_INLINE void +ia_css_cell_set_icache_info_bits(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_debug_pc(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_stall_bits(unsigned int ssid, unsigned int cell_id); +#endif + +/* configure master ports */ + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_base_address(unsigned int ssid, unsigned int cell_id, + unsigned int master, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_base_address(unsigned int ssid, + unsigned int cell_id, + unsigned int master, unsigned int segment, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_bits(unsigned int ssid, unsigned int cell_id, + unsigned int master, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_bits(unsigned int ssid, + unsigned int cell_id, + unsigned int master, unsigned int segment, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_override_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_override_bits(unsigned int ssid, + unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value); + +/* Access memories */ + +STORAGE_CLASS_INLINE void +ia_css_cell_mem_store_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr, unsigned int value); + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_mem_load_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr); + +/***********************************************************************/ + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_is_ready(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_bit(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_run_bit(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_start(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_start_prefetch(unsigned int ssid, unsigned int cell_id, + bool prefetch); + +STORAGE_CLASS_INLINE void +ia_css_cell_wait(unsigned int ssid, unsigned int cell_id); + +/* include inline implementation */ +#include "ia_css_cell_impl.h" + +#endif /* __IA_CSS_CELL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cell/src/ia_css_cell_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cell/src/ia_css_cell_impl.h new file mode 100644 index 0000000000000..60b2e234da1a0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cell/src/ia_css_cell_impl.h @@ -0,0 +1,272 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_IMPL_H +#define __IA_CSS_CELL_IMPL_H + +#include "ia_css_cell.h" + +#include "ia_css_cmem.h" +#include "ipu_device_cell_properties.h" +#include "storage_class.h" +#include "assert_support.h" +#include "platform_support.h" +#include "misc_support.h" + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_regs_addr(unsigned int cell_id) +{ + /* mem_id 0 is for registers */ + return ipu_device_cell_memory_address(cell_id, 0); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_dmem_addr(unsigned int cell_id) +{ + /* mem_id 1 is for DMEM */ + return ipu_device_cell_memory_address(cell_id, 1); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_mem_store_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr, unsigned int value) +{ + ia_css_cmem_store_32( + ssid, ipu_device_cell_memory_address( + cell_id, mem_id) + addr, value); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_mem_load_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr) +{ + return ia_css_cmem_load_32( + ssid, ipu_device_cell_memory_address(cell_id, mem_id) + addr); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_stat_ctrl(unsigned int ssid, unsigned int cell_id) +{ + return ia_css_cmem_load_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_STAT_CTRL_REG_ADDRESS); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_stat_ctrl(unsigned int ssid, unsigned int cell_id, + unsigned int value) +{ + ia_css_cmem_store_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_STAT_CTRL_REG_ADDRESS, value); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_is_ready(unsigned int ssid, unsigned int cell_id) +{ + unsigned int reg; + + reg = ia_css_cell_get_stat_ctrl(ssid, cell_id); + /* READY must be 1, START must be 0 */ + return (reg & (1 << IPU_DEVICE_CELL_STAT_CTRL_READY_BIT)) && + ((~reg) & (1 << IPU_DEVICE_CELL_STAT_CTRL_START_BIT)); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_pc(unsigned int ssid, unsigned int cell_id, + unsigned int pc) +{ + /* set start PC */ + ia_css_cmem_store_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_START_PC_REG_ADDRESS, pc); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_bit(unsigned int ssid, unsigned int cell_id) +{ + unsigned int reg; + + reg = 1 << IPU_DEVICE_CELL_STAT_CTRL_START_BIT; + ia_css_cell_set_stat_ctrl(ssid, cell_id, reg); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_run_bit(unsigned int ssid, unsigned int cell_id, + unsigned int value) +{ + unsigned int reg; + + reg = value << IPU_DEVICE_CELL_STAT_CTRL_RUN_BIT; + ia_css_cell_set_stat_ctrl(ssid, cell_id, reg); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_start(unsigned int ssid, unsigned int cell_id) +{ + ia_css_cell_start_prefetch(ssid, cell_id, 0); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_start_prefetch(unsigned int ssid, unsigned int cell_id, + bool prefetch) +{ + unsigned int reg = 0; + + /* Set run bit and start bit */ + reg |= (1 << IPU_DEVICE_CELL_STAT_CTRL_START_BIT); + reg |= (1 << IPU_DEVICE_CELL_STAT_CTRL_RUN_BIT); + /* Invalidate the icache */ + reg |= (1 << IPU_DEVICE_CELL_STAT_CTRL_INVALIDATE_ICACHE_BIT); + /* Optionally enable prefetching */ + reg |= ((prefetch == 1) ? + (1 << IPU_DEVICE_CELL_STAT_CTRL_ICACHE_ENABLE_PREFETCH_BIT) : + 0); + + /* store into register */ + ia_css_cell_set_stat_ctrl(ssid, cell_id, reg); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_wait(unsigned int ssid, unsigned int cell_id) +{ + do { + ia_css_sleep(); + } while (!ia_css_cell_is_ready(ssid, cell_id)); +}; + +STORAGE_CLASS_INLINE void +ia_css_cell_set_icache_base_address(unsigned int ssid, unsigned int cell_id, + unsigned int value) +{ + ia_css_cmem_store_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_ICACHE_BASE_REG_ADDRESS, value); +} + +/* master port configuration */ + + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value) +{ + unsigned int addr; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + assert(segment < ipu_device_cell_master_num_segments(cell, master)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_reg(cell, master); + addr += segment * ipu_device_cell_master_stride(cell, master); + ia_css_cmem_store_32(ssid, addr, value); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_override_bits(unsigned int ssid, + unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value) +{ + unsigned int addr; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + assert(segment < ipu_device_cell_master_num_segments(cell, master)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_override_reg(cell, master); + addr += segment * ipu_device_cell_master_stride(cell, master); + ia_css_cmem_store_32(ssid, addr, value); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_base_address(unsigned int ssid, + unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value) + +{ + unsigned int addr; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + assert(segment < ipu_device_cell_master_num_segments(cell, master)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_base_reg(cell, master); + addr += segment * ipu_device_cell_master_stride(cell, master); + ia_css_cmem_store_32(ssid, addr, value); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value) +{ + unsigned int addr, s, stride, num_segments; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_reg(cell, master); + stride = ipu_device_cell_master_stride(cell, master); + num_segments = ipu_device_cell_master_num_segments(cell, master); + for (s = 0; s < num_segments; s++) { + ia_css_cmem_store_32(ssid, addr, value); + addr += stride; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_override_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value) +{ + unsigned int addr, s, stride, num_segments; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_override_reg(cell, master); + stride = ipu_device_cell_master_stride(cell, master); + num_segments = ipu_device_cell_master_num_segments(cell, master); + for (s = 0; s < num_segments; s++) { + ia_css_cmem_store_32(ssid, addr, value); + addr += stride; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_base_address(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value) +{ + unsigned int addr, s, stride, num_segments, segment_size; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_base_reg(cell, master); + stride = ipu_device_cell_master_stride(cell, master); + num_segments = ipu_device_cell_master_num_segments(cell, master); + segment_size = ipu_device_cell_master_segment_size(cell, master); + + for (s = 0; s < num_segments; s++) { + ia_css_cmem_store_32(ssid, addr, value); + addr += stride; + value += segment_size; + } +} + +#endif /* __IA_CSS_CELL_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg.h new file mode 100644 index 0000000000000..e8b0a48b27e33 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg.h @@ -0,0 +1,60 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CLIENT_PKG_H +#define __IA_CSS_CLIENT_PKG_H + +#include "type_support.h" +#include "ia_css_client_pkg_storage_class.h" +/* for ia_css_client_pkg_header_s (ptr only), ia_css_client_pkg_t */ +#include "ia_css_client_pkg_types.h" + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_H +int ia_css_client_pkg_get_pg_manifest_offset_size( + const struct ia_css_client_pkg_header_s *client_pkg_header, + uint32_t *offset, + uint32_t *size); + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_H +int ia_css_client_pkg_get_prog_list_offset_size( + const struct ia_css_client_pkg_header_s *client_pkg_header, + uint32_t *offset, + uint32_t *size); + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_H +int ia_css_client_pkg_get_prog_desc_offset_size( + const struct ia_css_client_pkg_header_s *client_pkg_header, + uint32_t *offset, + uint32_t *size); + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_H +int ia_css_client_pkg_get_prog_bin_entry_offset_size( + const ia_css_client_pkg_t *client_pkg, + uint32_t program_id, + uint32_t *offset, + uint32_t *size); + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_H +int ia_css_client_pkg_get_indexed_prog_desc_entry_offset_size( + const ia_css_client_pkg_t *client_pkg, + uint32_t program_id, + uint32_t program_index, + uint32_t *offset, + uint32_t *size); + +#ifdef __INLINE_CLIENT_PKG__ +#include "ia_css_client_pkg_impl.h" +#endif + +#endif /* __IA_CSS_CLIENT_PKG_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg_storage_class.h new file mode 100644 index 0000000000000..98af98d5d824d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg_storage_class.h @@ -0,0 +1,28 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CLIENT_PKG_STORAGE_CLASS_H +#define __IA_CSS_CLIENT_PKG_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __INLINE_CLIENT_PKG__ +#define IA_CSS_CLIENT_PKG_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_CLIENT_PKG_STORAGE_CLASS_C +#else +#define IA_CSS_CLIENT_PKG_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_CLIENT_PKG_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_CLIENT_PKG_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg_types.h new file mode 100644 index 0000000000000..ff5bf01358f1a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg_types.h @@ -0,0 +1,44 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CLIENT_PKG_TYPES_H +#define __IA_CSS_CLIENT_PKG_TYPES_H + +#include "type_support.h" + +typedef void ia_css_client_pkg_t; + +struct ia_css_client_pkg_header_s { + uint32_t prog_list_offset; + uint32_t prog_list_size; + uint32_t prog_desc_offset; + uint32_t prog_desc_size; + uint32_t pg_manifest_offset; + uint32_t pg_manifest_size; + uint32_t prog_bin_offset; + uint32_t prog_bin_size; +}; + +struct ia_css_client_pkg_prog_s { + uint32_t prog_id; + uint32_t prog_offset; + uint32_t prog_size; +}; + +struct ia_css_client_pkg_prog_list_s { + uint32_t prog_desc_count; + uint32_t prog_bin_count; +}; + +#endif /* __IA_CSS_CLIENT_PKG_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/src/ia_css_client_pkg.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/src/ia_css_client_pkg.c new file mode 100644 index 0000000000000..0b2fd86d09f36 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/src/ia_css_client_pkg.c @@ -0,0 +1,20 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifdef __INLINE_CLIENT_PKG__ +#include "storage_class.h" +STORAGE_CLASS_INLINE int __ia_css_client_pkg_avoid_warning_on_empty_file(void) { return 0; } +#else /* __INLINE_CLIENT_PKG__ */ +#include "ia_css_client_pkg_impl.h" +#endif /* __INLINE_CLIENT_PKG__ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/src/ia_css_client_pkg_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/src/ia_css_client_pkg_impl.h new file mode 100644 index 0000000000000..b79e5de02b893 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/src/ia_css_client_pkg_impl.h @@ -0,0 +1,161 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CLIENT_PKG_IMPL_H +#define __IA_CSS_CLIENT_PKG_IMPL_H + +#include "ia_css_client_pkg.h" +#include "ia_css_client_pkg_types.h" +#include "error_support.h" + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_C +int ia_css_client_pkg_get_pg_manifest_offset_size( + const struct ia_css_client_pkg_header_s *client_pkg_header, + uint32_t *offset, + uint32_t *size) +{ + int ret_val = -1; + + verifjmpexit(NULL != client_pkg_header); + verifjmpexit(NULL != offset); + verifjmpexit(NULL != size); + + *(offset) = client_pkg_header->pg_manifest_offset; + *(size) = client_pkg_header->pg_manifest_size; + ret_val = 0; +EXIT: + return ret_val; +} + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_C +int ia_css_client_pkg_get_prog_list_offset_size( + const struct ia_css_client_pkg_header_s *client_pkg_header, + uint32_t *offset, + uint32_t *size) +{ + int ret_val = -1; + + verifjmpexit(NULL != client_pkg_header); + verifjmpexit(NULL != offset); + verifjmpexit(NULL != size); + + *(offset) = client_pkg_header->prog_list_offset; + *(size) = client_pkg_header->prog_list_size; + ret_val = 0; +EXIT: + return ret_val; +} + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_C +int ia_css_client_pkg_get_prog_desc_offset_size( + const struct ia_css_client_pkg_header_s *client_pkg_header, + uint32_t *offset, + uint32_t *size) +{ + int ret_val = -1; + + verifjmpexit(NULL != client_pkg_header); + verifjmpexit(NULL != offset); + verifjmpexit(NULL != size); + + *(offset) = client_pkg_header->prog_desc_offset; + *(size) = client_pkg_header->prog_desc_size; + ret_val = 0; +EXIT: + return ret_val; +} + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_C +int ia_css_client_pkg_get_prog_bin_entry_offset_size( + const ia_css_client_pkg_t *client_pkg, + uint32_t program_id, + uint32_t *offset, + uint32_t *size) +{ + uint8_t i; + int ret_val = -1; + struct ia_css_client_pkg_header_s *client_pkg_header = NULL; + const struct ia_css_client_pkg_prog_list_s *pkg_prog_list = NULL; + const struct ia_css_client_pkg_prog_s *pkg_prog_bin_entry = NULL; + + verifjmpexit(NULL != client_pkg); + verifjmpexit(NULL != offset); + verifjmpexit(NULL != size); + + client_pkg_header = + (struct ia_css_client_pkg_header_s *)((uint8_t *)client_pkg); + pkg_prog_list = + (struct ia_css_client_pkg_prog_list_s *)((uint8_t *)client_pkg + + client_pkg_header->prog_list_offset); + pkg_prog_bin_entry = + (struct ia_css_client_pkg_prog_s *)((uint8_t *)pkg_prog_list + + sizeof(struct ia_css_client_pkg_prog_list_s)); + pkg_prog_bin_entry += pkg_prog_list->prog_desc_count; + + for (i = 0; i < pkg_prog_list->prog_bin_count; i++) { + if (program_id == pkg_prog_bin_entry->prog_id) { + *(offset) = pkg_prog_bin_entry->prog_offset; + *(size) = pkg_prog_bin_entry->prog_size; + ret_val = 0; + break; + } else if (0 == pkg_prog_bin_entry->prog_size) { + /* We can have a variable number of program descriptors. + * The first non-valid one will have size set to 0 + */ + break; + } + pkg_prog_bin_entry++; + } +EXIT: + return ret_val; +} + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_C +int ia_css_client_pkg_get_indexed_prog_desc_entry_offset_size( + const ia_css_client_pkg_t *client_pkg, + uint32_t program_id, + uint32_t program_index, + uint32_t *offset, + uint32_t *size) +{ + int ret_val = -1; + struct ia_css_client_pkg_header_s *client_pkg_header = NULL; + const struct ia_css_client_pkg_prog_list_s *pkg_prog_list = NULL; + const struct ia_css_client_pkg_prog_s *pkg_prog_desc_entry = NULL; + + verifjmpexit(NULL != client_pkg); + verifjmpexit(NULL != offset); + verifjmpexit(NULL != size); + + client_pkg_header = + (struct ia_css_client_pkg_header_s *)((uint8_t *)client_pkg); + pkg_prog_list = + (struct ia_css_client_pkg_prog_list_s *)((uint8_t *)client_pkg + + client_pkg_header->prog_list_offset); + pkg_prog_desc_entry = + (struct ia_css_client_pkg_prog_s *)((uint8_t *)pkg_prog_list + + sizeof(struct ia_css_client_pkg_prog_list_s)); + + verifjmpexit(program_index < pkg_prog_list->prog_desc_count); + verifjmpexit(program_id == pkg_prog_desc_entry[program_index].prog_id); + verifjmpexit(pkg_prog_desc_entry[program_index].prog_size > 0); + *(offset) = pkg_prog_desc_entry[program_index].prog_offset; + *(size) = pkg_prog_desc_entry[program_index].prog_size; + ret_val = 0; + +EXIT: + return ret_val; +} + +#endif /* __IA_CSS_CLIENT_PKG_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/config/psys/subsystem_bxtB0.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/config/psys/subsystem_bxtB0.mk new file mode 100644 index 0000000000000..2f60853f00894 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/config/psys/subsystem_bxtB0.mk @@ -0,0 +1,109 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# + +############################################################################ +# This file is used to specify versions and properties of PSYS firmware +# components. Please note that these are subsystem specific. System specific +# properties should go to system_$IPU_SYSVER.mk. Also the device versions +# should be defined under "devices" or should be taken from the SDK. +############################################################################ + +# Activate loading params and storing stats DDR<->REGs with DMA +PSYS_USE_ISA_DMA = 1 + +# Used in ISA module +PSYS_ISL_DPC_DPC_V2 = 0 + +# Assume OFS will be running concurrently with IPF, and prioritize according to rates of services on devproxy +CONCURRENT_OFS_IPF_PRIORITY_OPTIMIZATION_ENABLED = 1 + +# Use the DMA for terminal loading in Psys server +PSYS_SERVER_ENABLE_TERMINAL_LOAD_DMA = 1 + +HAS_GMEM = 1 +# use DMA NCI for OFS Service to reduce load in tproxy +DMA_NCI_IN_OFS_SERVICE = 1 + +# See HSD 1805169230 +HAS_FWDMA_ALIGNMENT_ISSUE_SIGHTING = 1 + +HAS_SPC = 1 +HAS_SPP0 = 1 +HAS_SPP1 = 1 +HAS_ISP0 = 1 +HAS_ISP1 = 1 +HAS_ISP2 = 1 +HAS_ISP3 = 1 + +# Specification for Psys server's fixed globals' locations +REGMEM_OFFSET = 0 # Starting from 0 +REGMEM_SIZE = 18 +REGMEM_WORD_BYTES = 4 +REGMEM_SIZE_BYTES = 72 +GPC_ISP_PERF_DATA_OFFSET = 72 # Taken from REGMEM_OFFSET + REGMEM_SIZE_BYTES +GPC_ISP_PERF_DATA_SIZE_BYTES = 80 +FW_LOAD_NO_OF_REQUEST_OFFSET = 152 # Taken from GPC_ISP_PERF_DATA_OFFSET + GPC_ISP_PERF_DATA_SIZE_BYTES +FW_LOAD_NO_OF_REQUEST_SIZE_BYTES = 4 +DISPATCHER_SCRATCH_SPACE_OFFSET = 156 # Taken from FW_LOAD_NO_OF_REQUEST_OFFSET + FW_LOAD_NO_OF_REQUEST_SIZE_BYTES + +# TODO use version naming scheme "v#" to decouple +# IPU_SYSVER from version. +PSYS_SERVER_MANIFEST_VERSION = bxtB0 +PSYS_RESOURCE_MODEL_VERSION = bxtB0 +PSYS_ACCESS_BLOCKER_VERSION = v1 + +# Disable support for PPG protocol to save codesize +PSYS_HAS_PPG_SUPPORT = 0 +# Disable support for late binding +PSYS_HAS_LATE_BINDING_SUPPORT = 0 + +# Specify PSYS server context spaces for caching context from DDR +PSYS_SERVER_NOF_CACHES = 4 +PSYS_SERVER_MAX_NUM_PROC_GRP = $(PSYS_SERVER_NOF_CACHES) +PSYS_SERVER_MAX_NUM_EXEC_PROC_GRP = 8 # Max PG's running, 4 running on Cores, 4 being updated on the host upon executing. +PSYS_SERVER_MAX_PROC_GRP_SIZE = 4052 +PSYS_SERVER_MAX_MANIFEST_SIZE = 3732 +PSYS_SERVER_MAX_CLIENT_PKG_SIZE = 2420 +PSYS_SERVER_MAX_BUFFER_SET_SIZE = 0 +PSYS_SERVER_MAX_NUMBER_OF_TERMINAL_SECTIONS = 88 +PSYS_SERVER_MAX_NUMBER_OF_TERMINAL_STORE_SECTIONS = 1 +# The caching scheme for this subsystem suits the method of queueing ahead separate PGs for frames in an interleaved +# fashion. As such there should be as many caches to support to heaviest two concurrent PGs, times two. This results +# in the following distribution of caches: two large ones for the maximum sized PG, two smaller ones for the +# second-largest sized PG. +PSYS_SERVER_CACHE_0_PROC_GRP_SIZE = $(PSYS_SERVER_MAX_PROC_GRP_SIZE) +PSYS_SERVER_CACHE_0_MANIFEST_SIZE = $(PSYS_SERVER_MAX_MANIFEST_SIZE) +PSYS_SERVER_CACHE_0_CLIENT_PKG_SIZE = $(PSYS_SERVER_MAX_CLIENT_PKG_SIZE) +PSYS_SERVER_CACHE_0_BUFFER_SET_SIZE = $(PSYS_SERVER_MAX_BUFFER_SET_SIZE) +PSYS_SERVER_CACHE_0_NUMBER_OF_TERMINAL_SECTIONS = $(PSYS_SERVER_MAX_NUMBER_OF_TERMINAL_SECTIONS) +PSYS_SERVER_CACHE_0_NUMBER_OF_TERMINAL_STORE_SECTIONS = $(PSYS_SERVER_MAX_NUMBER_OF_TERMINAL_STORE_SECTIONS) +PSYS_SERVER_CACHE_1_PROC_GRP_SIZE = $(PSYS_SERVER_CACHE_0_PROC_GRP_SIZE) +PSYS_SERVER_CACHE_1_MANIFEST_SIZE = $(PSYS_SERVER_CACHE_0_MANIFEST_SIZE) +PSYS_SERVER_CACHE_1_CLIENT_PKG_SIZE = $(PSYS_SERVER_CACHE_0_CLIENT_PKG_SIZE) +PSYS_SERVER_CACHE_1_BUFFER_SET_SIZE = $(PSYS_SERVER_CACHE_0_BUFFER_SET_SIZE) +PSYS_SERVER_CACHE_1_NUMBER_OF_TERMINAL_SECTIONS = $(PSYS_SERVER_CACHE_0_NUMBER_OF_TERMINAL_SECTIONS) +PSYS_SERVER_CACHE_1_NUMBER_OF_TERMINAL_STORE_SECTIONS = $(PSYS_SERVER_MAX_NUMBER_OF_TERMINAL_STORE_SECTIONS) +PSYS_SERVER_CACHE_2_PROC_GRP_SIZE = 1800 +PSYS_SERVER_CACHE_2_MANIFEST_SIZE = 2344 +PSYS_SERVER_CACHE_2_CLIENT_PKG_SIZE = 1240 +PSYS_SERVER_CACHE_2_BUFFER_SET_SIZE = 0 +PSYS_SERVER_CACHE_2_NUMBER_OF_TERMINAL_SECTIONS = 45 +PSYS_SERVER_CACHE_2_NUMBER_OF_TERMINAL_STORE_SECTIONS = $(PSYS_SERVER_MAX_NUMBER_OF_TERMINAL_STORE_SECTIONS) + +PSYS_SERVER_CACHE_3_PROC_GRP_SIZE = $(PSYS_SERVER_CACHE_2_PROC_GRP_SIZE) +PSYS_SERVER_CACHE_3_MANIFEST_SIZE = $(PSYS_SERVER_CACHE_2_MANIFEST_SIZE) +PSYS_SERVER_CACHE_3_CLIENT_PKG_SIZE = $(PSYS_SERVER_CACHE_2_CLIENT_PKG_SIZE) +PSYS_SERVER_CACHE_3_BUFFER_SET_SIZE = $(PSYS_SERVER_CACHE_2_BUFFER_SET_SIZE) +PSYS_SERVER_CACHE_3_NUMBER_OF_TERMINAL_SECTIONS = $(PSYS_SERVER_CACHE_2_NUMBER_OF_TERMINAL_SECTIONS) +PSYS_SERVER_CACHE_3_NUMBER_OF_TERMINAL_STORE_SECTIONS = $(PSYS_SERVER_MAX_NUMBER_OF_TERMINAL_STORE_SECTIONS) diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/config/system_bxtB0.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/config/system_bxtB0.mk new file mode 100644 index 0000000000000..24d079b405167 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/config/system_bxtB0.mk @@ -0,0 +1,88 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# + +LOGICAL_FW_INPUT_SYSTEM = input_system_system +LOGICAL_FW_PROCESSING_SYSTEM = processing_system_system +LOGICAL_FW_IPU_SYSTEM = css_broxton_system +LOGICAL_FW_ISP_SYSTEM = isp2601_default_system +SP_CONTROL_CELL = sp2601_control +SP_PROXY_CELL = sp2601_proxy +SP_FP_CELL = sp2601_fp +ISP_CELL = isp2601 +# The non-capital define isp2601 is used in the sdk, in order to distinguish +# between different isp versions the ISP_CELL_IDENTIFIER define is added. +ISP_CELL_IDENTIFIER = ISP2601 +HAS_IPFD = 1 +HAS_S2M_IN_ISYS_ISL_NONSOC_PATH = 0 +HAS_S2V_IN_ISYS_ISL_NONSOC_PATH = 1 +# ISL-IS non-SoC path has ISA without PAF and DPC-Pext support for IPU4-B0 +HAS_ISA_IN_ISYS_ISL = 1 +HAS_PAF_IN_ISYS_ISL = 0 +HAS_DPC_PEXT_IN_ISYS_ISL = 0 +HAS_PMA_IF = 0 + +HAS_MIPIBE_IN_PSYS_ISL = 1 + +HAS_VPLESS_SUPPORT = 0 + +DLI_SYSTEM = hive_isp_css_2600_system +RESOURCE_MANAGER_VERSION = v1 +MEM_RESOURCE_VALIDATION_ERROR = 0 +OFS_SCALER_1_4K_TILEY_422_SUPPORT= 1 +PROGDESC_ACC_SYMBOLS_VERSION = v1 +DEVPROXY_INTERFACE_VERSION = v1 +FW_ABI_IPU_TYPES_VERSION = v1 + +HAS_ONLINE_MODE_SUPPORT_IN_ISYS_PSYS = 0 + +MMU_INTERFACE_VERSION = v1 +DEVICE_ACCESS_VERSION = v2 +PSYS_SERVER_VERSION = v2 +PSYS_SERVER_LOADER_VERSION = v1 +PSYS_HW_VERSION = BXT_B0_HW + +# Enable FW_DMA for loading firmware +PSYS_SERVER_ENABLE_FW_LOAD_DMA = 1 + +NCI_SPA_VERSION = v1 +MANIFEST_TOOL_VERSION = v2 +PSYS_CON_MGR_TOOL_VERSION = v1 +# TODO: Should be removed after performance issues OTF are solved +PSYS_PROC_MGR_VERSION = v1 +IPU_RESOURCES_VERSION = v1 + +HAS_ACC_CLUSTER_PAF_PAL = 0 +HAS_ACC_CLUSTER_PEXT_PAL = 0 +HAS_ACC_CLUSTER_GBL_PAL = 1 + +# TODO use version naming scheme "v#" to decouple +# IPU_SYSVER from version. +PARAMBINTOOL_ISA_INIT_VERSION = bxtB0 + +# Select EQC2EQ version +# Version 1: uniform address space, equal EQ addresses regardless of EQC device +# Version 2: multiple addresses per EQ, depending on location of EQC device +EQC2EQ_VERSION = v1 + +# Select DMA instance for fw_load +FW_LOAD_DMA_INSTANCE = NCI_DMA_FW + +HAS_DMA_FW = 1 + +HAS_SIS = 0 +HAS_IDS = 1 + +PSYS_SERVER_ENABLE_TPROXY = 1 +PSYS_SERVER_ENABLE_DEVPROXY = 1 +NCI_OFS_VERSION = v1 diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cpd/cpd_component/cpd_component.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cpd/cpd_component/cpd_component.mk new file mode 100644 index 0000000000000..8ecc3e42e55d3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cpd/cpd_component/cpd_component.mk @@ -0,0 +1,28 @@ +## +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +## + +# MODULE is cpd/cpd_component + +CPD_DIR = $${MODULES_DIR}/cpd +CPD_COMPONENT_DIR = $${MODULES_DIR}/cpd/cpd_component +CPD_COMPONENT_INTERFACE = $(CPD_COMPONENT_DIR)/interface +CPD_COMPONENT_SOURCES = $(CPD_COMPONENT_DIR)/src + +CPD_COMPONENT_FILES = $(CPD_COMPONENT_SOURCES)/ia_css_cpd_component_create.c +CPD_COMPONENT_FILES += $(CPD_COMPONENT_SOURCES)/ia_css_cpd_component.c +CPD_COMPONENT_CPPFLAGS = -I$(CPD_COMPONENT_INTERFACE) +CPD_COMPONENT_CPPFLAGS += -I$(CPD_COMPONENT_SOURCES) +CPD_COMPONENT_CPPFLAGS += -I$(CPD_DIR) diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cpd/cpd_component/interface/ia_css_cpd_component_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cpd/cpd_component/interface/ia_css_cpd_component_types.h new file mode 100644 index 0000000000000..7ad3070b2fd72 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cpd/cpd_component/interface/ia_css_cpd_component_types.h @@ -0,0 +1,90 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IA_CSS_CPD_COMPONENT_TYPES_H +#define __IA_CSS_CPD_COMPONENT_TYPES_H + +/** @file + * This file contains datastructure related to generation of CPD file + */ + +#include "type_support.h" + +#define SIZE_OF_FW_ARCH_VERSION 7 +#define SIZE_OF_SYSTEM_VERSION 11 +#define SIZE_OF_COMPONENT_NAME 12 + +enum ia_css_cpd_component_endianness { + IA_CSSCPD_COMP_ENDIAN_RSVD, + IA_CSS_CPD_COMP_LITTLE_ENDIAN, + IA_CSS_CPD_COMP_BIG_ENDIAN +}; + +/** Module Data (components) Header + * Following data structure has been created using FAS section 5.25 + * Open : Should we add padding at the end of module directory + * (the component must be 512 aligned) + */ +typedef struct { + uint32_t header_size; + /**< Specifies endianness of the binary data */ + unsigned int endianness; + /**< fw_pkg_date is current date stored in 'binary decimal' + * representation e.g. 538248729 (0x20150619) + */ + uint32_t fw_pkg_date; + /**< hive_sdk_date is date of HIVE_SDK stored in + * 'binary decimal' representation + */ + uint32_t hive_sdk_date; + /**< compiler_date is date of ptools stored in + * 'binary decimal' representation + */ + uint32_t compiler_date; + /**< UNSCHED / SCHED / TARGET / CRUN */ + unsigned int target_platform_type; + /**< specifies the system version stored as string + * e.g. BXTB0_IPU4'\0' + */ + uint8_t system_version[SIZE_OF_SYSTEM_VERSION]; + /**< specifies fw architecture version e.g. for BXT CSS3.0'\0' */ + uint8_t fw_arch_version[SIZE_OF_FW_ARCH_VERSION]; + uint8_t rsvd[2]; +} ia_css_header_component_t; + +/** Module Data Directory = Directory Header + Directory Entry (0..n) + * Following two Data Structure has been taken from CSE Storage FAS (CPD desgin) + * Module Data Directory Header + */ +typedef struct { + uint32_t header_marker; + uint32_t number_of_entries; + uint8_t header_version; + uint8_t entry_version; + uint8_t header_length; /**< 0x10 (16) Fixed for this version*/ + uint8_t checksum; + uint32_t partition_name; +} ia_css_directory_header_component_t; + +/** Module Date Directory Entry + */ +typedef struct { + /**< character string describing the component name */ + uint8_t entry_name[SIZE_OF_COMPONENT_NAME]; + uint32_t offset; + uint32_t length; + uint32_t rsvd; /**< Must be 0 */ +} ia_css_directory_entry_component_t; + +#endif /* __IA_CSS_CPD_COMPONENT_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cpd/cpd_metadata/cpd_metadata.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cpd/cpd_metadata/cpd_metadata.mk new file mode 100644 index 0000000000000..ac78815dfbd8c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cpd/cpd_metadata/cpd_metadata.mk @@ -0,0 +1,29 @@ +## +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +## + + +# MODULE is CPD UTL (Metadata File Extension) + +CPD_DIR = $${MODULES_DIR}/cpd/ +CPD_METADATA_DIR = $${MODULES_DIR}/cpd/cpd_metadata +CPD_METADATA_INTERFACE = $(CPD_METADATA_DIR)/interface +CPD_METADATA_SOURCES = $(CPD_METADATA_DIR)/src + +CPD_METADATA_FILES = $(CPD_METADATA_SOURCES)/ia_css_cpd_metadata_create.c +CPD_METADATA_FILES += $(CPD_METADATA_SOURCES)/ia_css_cpd_metadata.c +CPD_METADATA_CPPFLAGS = -I$(CPD_METADATA_INTERFACE) \ + -I$(CPD_METADATA_SOURCES) \ + -I$(CPD_DIR) diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cpd/cpd_metadata/interface/ia_css_cpd_metadata_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cpd/cpd_metadata/interface/ia_css_cpd_metadata_types.h new file mode 100644 index 0000000000000..a88c6aede08c5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cpd/cpd_metadata/interface/ia_css_cpd_metadata_types.h @@ -0,0 +1,111 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IA_CSS_CPD_METADATA_TYPES_H +#define __IA_CSS_CPD_METADATA_TYPES_H + +/** @file + * This file contains data structures related to generation of + * metadata file extension + */ +#include + +/* As per v0.2 manifest document + * Header = Extension Type (4) + Extension Length (4) + + * iUnit Image Type (4) + Reserved (16) + */ +#define IPU_METADATA_HEADER_RSVD_SIZE 16 +#define IPU_METADATA_HEADER_FIELDS_SIZE 12 +#define IPU_METADATA_HEADER_SIZE \ + (IPU_METADATA_HEADER_FIELDS_SIZE + IPU_METADATA_HEADER_RSVD_SIZE) + +/* iUnit metadata extension tpye value */ +#define IPU_METADATA_EXTENSION_TYPE 16 + +/* Unique id for level 0 bootloader component */ +#define IA_CSS_IUNIT_BTLDR_ID 0 +/* Unique id for psys server program group component */ +#define IA_CSS_IUNIT_PSYS_SERVER_ID 1 +/* Unique id for isys server program group component */ +#define IA_CSS_IUNIT_ISYS_SERVER_ID 2 +/* Initial Identifier for client program group component */ +#define IA_CSS_IUNIT_CLIENT_ID 3 + +/* Use this to parse date from release version from the iUnit component + * e.g. 20150701 + */ +#define IA_CSS_IUNIT_COMP_DATE_SIZE 8 +/* offset of release version in program group binary + * e.g. release_version = "scci_gerrit_20150716_2117" + * In cpd file we only use date/version for the component + */ +#define IA_CSS_IUNIT_DATE_OFFSET 12 + +#define IPU_METADATA_HASH_KEY_SIZE 32 +#define IPU_METADATA_ATTRIBUTE_SIZE 16 +#define IA_CSE_METADATA_COMPONENT_ID_MAX 127 + +typedef enum { + IA_CSS_CPD_METADATA_IMAGE_TYPE_RESERVED, + IA_CSS_CPD_METADATA_IMAGE_TYPE_BOOTLOADER, + IA_CSS_CPD_METADATA_IMAGE_TYPE_MAIN_FIRMWARE +} ia_css_cpd_metadata_image_type_t; + +typedef enum { + IA_CSS_CPD_MAIN_FW_TYPE_RESERVED, + IA_CSS_CPD_MAIN_FW_TYPE_PSYS_SERVER, + IA_CSS_CPD_MAIN_FW_TYPE_ISYS_SERVER, + IA_CSS_CPD_MAIN_FW_TYPE_CLIENT +} ia_css_cpd_iunit_main_fw_type_t; + +/** Data structure for component specific information + * Following data structure has been taken from CSE Manifest v0.2 + */ +typedef struct { + /**< Component ID - unique for each component */ + uint32_t id; + /**< Size of the components */ + uint32_t size; + /**< Version/date of when the components is being generated/created */ + uint32_t version; + /**< SHA 256 Hash Key for component */ + uint8_t sha2_hash[IPU_METADATA_HASH_KEY_SIZE]; + /**< component sp entry point + * - Only valid for btldr/psys/isys server component + */ + uint32_t entry_point; + /**< component icache base address + * - Only valid for btldr/psys/isys server component + */ + uint32_t icache_base_offset; + /**< Resevred - must be 0 */ + uint8_t attributes[IPU_METADATA_ATTRIBUTE_SIZE]; +} ia_css_cpd_metadata_component_t; + +/** Data structure for Metadata File Extension Header + */ +typedef struct { + /**< Specifies the binary image type + * - could be bootloader or main firmware + */ + ia_css_cpd_metadata_image_type_t image_type; + /**< Number of components available in metadata file extension + * (For btldr always 1) + */ + uint32_t component_count; + /**< Component specific information */ + ia_css_cpd_metadata_component_t *components; +} ia_css_cpd_metadata_desc_t; + +#endif /* __IA_CSS_CPD_METADATA_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/device_access.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/device_access.mk new file mode 100644 index 0000000000000..1629d9af803b6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/device_access.mk @@ -0,0 +1,40 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# + +ifndef _DEVICE_ACCESS_MK_ +_DEVICE_ACCESS_MK_ = 1 + +# DEVICE_ACCESS_VERSION= +include $(MODULES_DIR)/config/system_$(IPU_SYSVER).mk + +DEVICE_ACCESS_DIR=$${MODULES_DIR}/device_access +DEVICE_ACCESS_INTERFACE=$(DEVICE_ACCESS_DIR)/interface +DEVICE_ACCESS_SOURCES=$(DEVICE_ACCESS_DIR)/src + +DEVICE_ACCESS_HOST_FILES = + +DEVICE_ACCESS_FW_FILES = + +DEVICE_ACCESS_HOST_CPPFLAGS = \ + -I$(DEVICE_ACCESS_INTERFACE) \ + -I$(DEVICE_ACCESS_SOURCES) + +DEVICE_ACCESS_FW_CPPFLAGS = \ + -I$(DEVICE_ACCESS_INTERFACE) \ + -I$(DEVICE_ACCESS_SOURCES) + +DEVICE_ACCESS_FW_CPPFLAGS += \ + -I$(DEVICE_ACCESS_SOURCES)/$(DEVICE_ACCESS_VERSION) +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/interface/ia_css_cmem.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/interface/ia_css_cmem.h new file mode 100644 index 0000000000000..3dc47c29fcab7 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/interface/ia_css_cmem.h @@ -0,0 +1,58 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CMEM_H +#define __IA_CSS_CMEM_H + +#include "type_support.h" +#include "storage_class.h" + +#ifdef __VIED_CELL +typedef unsigned int ia_css_cmem_address_t; +#else +#include +typedef vied_subsystem_address_t ia_css_cmem_address_t; +#endif + +STORAGE_CLASS_INLINE uint32_t +ia_css_cmem_load_32(unsigned int ssid, ia_css_cmem_address_t address); + +STORAGE_CLASS_INLINE void +ia_css_cmem_store_32(unsigned int ssid, ia_css_cmem_address_t address, + uint32_t value); + +STORAGE_CLASS_INLINE void +ia_css_cmem_load(unsigned int ssid, ia_css_cmem_address_t address, void *data, + unsigned int size); + +STORAGE_CLASS_INLINE void +ia_css_cmem_store(unsigned int ssid, ia_css_cmem_address_t address, + const void *data, unsigned int size); + +STORAGE_CLASS_INLINE void +ia_css_cmem_zero(unsigned int ssid, ia_css_cmem_address_t address, + unsigned int size); + +STORAGE_CLASS_INLINE ia_css_cmem_address_t +ia_css_cmem_get_cmem_addr_from_dmem(unsigned int base_addr, void *p); + +/* Include inline implementation */ + +#ifdef __VIED_CELL +#include "ia_css_cmem_cell.h" +#else +#include "ia_css_cmem_host.h" +#endif + +#endif /* __IA_CSS_CMEM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/interface/ia_css_xmem.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/interface/ia_css_xmem.h new file mode 100644 index 0000000000000..de2b94d8af541 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/interface/ia_css_xmem.h @@ -0,0 +1,65 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_H +#define __IA_CSS_XMEM_H + +#include "type_support.h" +#include "storage_class.h" + +#ifdef __VIED_CELL +typedef unsigned int ia_css_xmem_address_t; +#else +#include +typedef host_virtual_address_t ia_css_xmem_address_t; +#endif + +STORAGE_CLASS_INLINE uint8_t +ia_css_xmem_load_8(unsigned int mmid, ia_css_xmem_address_t address); + +STORAGE_CLASS_INLINE uint16_t +ia_css_xmem_load_16(unsigned int mmid, ia_css_xmem_address_t address); + +STORAGE_CLASS_INLINE uint32_t +ia_css_xmem_load_32(unsigned int mmid, ia_css_xmem_address_t address); + +STORAGE_CLASS_INLINE void +ia_css_xmem_load(unsigned int mmid, ia_css_xmem_address_t address, void *data, + unsigned int size); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_8(unsigned int mmid, ia_css_xmem_address_t address, + uint8_t value); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_16(unsigned int mmid, ia_css_xmem_address_t address, + uint16_t value); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_32(unsigned int mmid, ia_css_xmem_address_t address, + uint32_t value); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store(unsigned int mmid, ia_css_xmem_address_t address, + const void *data, unsigned int bytes); + +/* Include inline implementation */ + +#ifdef __VIED_CELL +#include "ia_css_xmem_cell.h" +#else +#include "ia_css_xmem_host.h" +#endif + +#endif /* __IA_CSS_XMEM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/interface/ia_css_xmem_cmem.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/interface/ia_css_xmem_cmem.h new file mode 100644 index 0000000000000..57aab3323c739 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/interface/ia_css_xmem_cmem.h @@ -0,0 +1,35 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_CMEM_H +#define __IA_CSS_XMEM_CMEM_H + +#include "ia_css_cmem.h" +#include "ia_css_xmem.h" + +/* Copy data from xmem to cmem, e.g., from a program in DDR to a cell's DMEM */ +/* This may also be implemented using DMA */ + +STORAGE_CLASS_INLINE void +ia_css_xmem_to_cmem_copy( + unsigned int mmid, + unsigned int ssid, + ia_css_xmem_address_t src, + ia_css_cmem_address_t dst, + unsigned int size); + +/* include inline implementation */ +#include "ia_css_xmem_cmem_impl.h" + +#endif /* __IA_CSS_XMEM_CMEM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/src/ia_css_cmem_host.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/src/ia_css_cmem_host.h new file mode 100644 index 0000000000000..22799e67214c1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/src/ia_css_cmem_host.h @@ -0,0 +1,121 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CMEM_HOST_H +#define __IA_CSS_CMEM_HOST_H + +/* This file is an inline implementation for the interface ia_css_cmem.h + * and should only be included there. */ + +#include "assert_support.h" +#include "misc_support.h" + +STORAGE_CLASS_INLINE uint32_t +ia_css_cmem_load_32(unsigned int ssid, ia_css_cmem_address_t address) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + return vied_subsystem_load_32(ssid, address); +} + +STORAGE_CLASS_INLINE uint32_t +ia_css_cond_cmem_load_32(bool cond, unsigned int ssid, + ia_css_cmem_address_t address) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + if (cond) + return vied_subsystem_load_32(ssid, address); + else + return 0; +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_store_32(unsigned int ssid, ia_css_cmem_address_t address, + uint32_t data) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + vied_subsystem_store_32(ssid, address, data); +} + +STORAGE_CLASS_INLINE void +ia_css_cond_cmem_store_32(bool cond, unsigned int ssid, + ia_css_cmem_address_t address, uint32_t data) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + if (cond) + vied_subsystem_store_32(ssid, address, data); +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_load(unsigned int ssid, ia_css_cmem_address_t address, void *data, + unsigned int size) +{ + uint32_t *data32 = (uint32_t *)data; + uint32_t end = address + size; + + assert(size % 4 == 0); + assert(address % 4 == 0); + assert((long)data % 4 == 0); + + while (address != end) { + *data32 = ia_css_cmem_load_32(ssid, address); + address += 4; + data32 += 1; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_store(unsigned int ssid, ia_css_cmem_address_t address, + const void *data, unsigned int size) +{ + uint32_t *data32 = (uint32_t *)data; + uint32_t end = address + size; + + assert(size % 4 == 0); + assert(address % 4 == 0); + assert((long)data % 4 == 0); + + while (address != end) { + ia_css_cmem_store_32(ssid, address, *data32); + address += 4; + data32 += 1; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_zero(unsigned int ssid, ia_css_cmem_address_t address, + unsigned int size) +{ + uint32_t end = address + size; + + assert(size % 4 == 0); + assert(address % 4 == 0); + + while (address != end) { + ia_css_cmem_store_32(ssid, address, 0); + address += 4; + } +} + +STORAGE_CLASS_INLINE ia_css_cmem_address_t +ia_css_cmem_get_cmem_addr_from_dmem(unsigned int base_addr, void *p) +{ + NOT_USED(base_addr); + return (ia_css_cmem_address_t)(uintptr_t)p; +} + +#endif /* __IA_CSS_CMEM_HOST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/src/ia_css_xmem_cmem_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/src/ia_css_xmem_cmem_impl.h new file mode 100644 index 0000000000000..adc178b75059a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/src/ia_css_xmem_cmem_impl.h @@ -0,0 +1,79 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_CMEM_IMPL_H +#define __IA_CSS_XMEM_CMEM_IMPL_H + +#include "ia_css_xmem_cmem.h" + +#include "ia_css_cmem.h" +#include "ia_css_xmem.h" + +/* Copy data from xmem to cmem, e.g., from a program in DDR to a cell's DMEM */ +/* This may also be implemented using DMA */ + +STORAGE_CLASS_INLINE void +ia_css_xmem_to_cmem_copy( + unsigned int mmid, + unsigned int ssid, + ia_css_xmem_address_t src, + ia_css_cmem_address_t dst, + unsigned int size) +{ + /* copy from ddr to subsystem, e.g., cell dmem */ + ia_css_cmem_address_t end = dst + size; + + assert(size % 4 == 0); + assert((uintptr_t) dst % 4 == 0); + assert((uintptr_t) src % 4 == 0); + + while (dst != end) { + uint32_t data; + + data = ia_css_xmem_load_32(mmid, src); + ia_css_cmem_store_32(ssid, dst, data); + dst += 4; + src += 4; + } +} + +/* Copy data from cmem to xmem */ + +STORAGE_CLASS_INLINE void +ia_css_cmem_to_xmem_copy( + unsigned int mmid, + unsigned int ssid, + ia_css_cmem_address_t src, + ia_css_xmem_address_t dst, + unsigned int size) +{ + /* copy from ddr to subsystem, e.g., cell dmem */ + ia_css_xmem_address_t end = dst + size; + + assert(size % 4 == 0); + assert((uintptr_t) dst % 4 == 0); + assert((uintptr_t) src % 4 == 0); + + while (dst != end) { + uint32_t data; + + data = ia_css_cmem_load_32(mmid, src); + ia_css_xmem_store_32(ssid, dst, data); + dst += 4; + src += 4; + } +} + + +#endif /* __IA_CSS_XMEM_CMEM_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/src/ia_css_xmem_host.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/src/ia_css_xmem_host.h new file mode 100644 index 0000000000000..d94991fc11143 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/src/ia_css_xmem_host.h @@ -0,0 +1,84 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_HOST_H +#define __IA_CSS_XMEM_HOST_H + +#include "ia_css_xmem.h" +#include +#include "assert_support.h" +#include + +STORAGE_CLASS_INLINE uint8_t +ia_css_xmem_load_8(unsigned int mmid, ia_css_xmem_address_t address) +{ + return shared_memory_load_8(mmid, address); +} + +STORAGE_CLASS_INLINE uint16_t +ia_css_xmem_load_16(unsigned int mmid, ia_css_xmem_address_t address) +{ + /* Address has to be half-word aligned */ + assert(0 == (uintptr_t) address % 2); + return shared_memory_load_16(mmid, address); +} + +STORAGE_CLASS_INLINE uint32_t +ia_css_xmem_load_32(unsigned int mmid, ia_css_xmem_address_t address) +{ + /* Address has to be word aligned */ + assert(0 == (uintptr_t) address % 4); + return shared_memory_load_32(mmid, address); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_load(unsigned int mmid, ia_css_xmem_address_t address, void *data, + unsigned int size) +{ + shared_memory_load(mmid, address, data, size); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_8(unsigned int mmid, ia_css_xmem_address_t address, + uint8_t value) +{ + shared_memory_store_8(mmid, address, value); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_16(unsigned int mmid, ia_css_xmem_address_t address, + uint16_t value) +{ + /* Address has to be half-word aligned */ + assert(0 == (uintptr_t) address % 2); + shared_memory_store_16(mmid, address, value); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_32(unsigned int mmid, ia_css_xmem_address_t address, + uint32_t value) +{ + /* Address has to be word aligned */ + assert(0 == (uintptr_t) address % 4); + shared_memory_store_32(mmid, address, value); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store(unsigned int mmid, ia_css_xmem_address_t address, + const void *data, unsigned int bytes) +{ + shared_memory_store(mmid, address, data, bytes); +} + +#endif /* __IA_CSS_XMEM_HOST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/bxtB0/ipu_device_buttress_properties_struct.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/bxtB0/ipu_device_buttress_properties_struct.h new file mode 100644 index 0000000000000..5102f6e44d2f6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/bxtB0/ipu_device_buttress_properties_struct.h @@ -0,0 +1,68 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_BUTTRESS_PROPERTIES_STRUCT_H +#define __IPU_DEVICE_BUTTRESS_PROPERTIES_STRUCT_H + +/* Destination values for master port 0 and bitfield "request_dest" */ +enum cio_M0_btrs_dest { + DEST_IS_BUT_REGS = 0, + DEST_IS_DDR, + RESERVED, + DEST_IS_SUBSYSTEM, + N_BTRS_DEST +}; + +/* Bit-field positions for M0 info bits */ +enum ia_css_info_bits_m0_pos { + IA_CSS_INFO_BITS_M0_SNOOPABLE_POS = 0, + IA_CSS_INFO_BITS_M0_IMR_DESTINED_POS = 1, + IA_CSS_INFO_BITS_M0_REQUEST_DEST_POS = 4 +}; + +#define IA_CSS_INFO_BITS_M0_DDR \ + (DEST_IS_DDR << IA_CSS_INFO_BITS_M0_REQUEST_DEST_POS) +#define IA_CSS_INFO_BITS_M0_SNOOPABLE (1 << IA_CSS_INFO_BITS_M0_SNOOPABLE_POS) + +/* Info bits as expected by the buttress */ +/* Deprecated because bit fields are not portable */ + +/* For master port 0*/ +union cio_M0_t { + struct { + unsigned int snoopable : 1; + unsigned int imr_destined : 1; + unsigned int spare0 : 2; + unsigned int request_dest : 2; + unsigned int spare1 : 26; + } as_bitfield; + unsigned int as_word; +}; + +/* For master port 1*/ +union cio_M1_t { + struct { + unsigned int spare0 : 1; + unsigned int deadline_pointer : 1; + unsigned int reserved : 1; + unsigned int zlw : 1; + unsigned int stream_id : 4; + unsigned int address_swizzling : 1; + unsigned int spare1 : 23; + } as_bitfield; + unsigned int as_word; +}; + + +#endif /* __IPU_DEVICE_BUTTRESS_PROPERTIES_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties.h new file mode 100644 index 0000000000000..e6e1e9dcbe80c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties.h @@ -0,0 +1,76 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_H +#define __IPU_DEVICE_CELL_PROPERTIES_H + +#include "storage_class.h" +#include "ipu_device_cell_type_properties.h" + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_devices(void); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_memories(const unsigned int cell_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_size(const unsigned int cell_id, + const unsigned int mem_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_address(const unsigned int cell_id, + const unsigned int mem_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_databus_memory_address(const unsigned int cell_id, + const unsigned int mem_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_masters(const unsigned int cell_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_bits(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_num_segments(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_size(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_stride(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_base_reg(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_info_reg(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_icache_align(unsigned int cell_id); + +#ifdef C_RUN +STORAGE_CLASS_INLINE int +ipu_device_cell_id_crun(int cell_id); +#endif + +#include "ipu_device_cell_properties_func.h" + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties_func.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties_func.h new file mode 100644 index 0000000000000..481b0504a2378 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties_func.h @@ -0,0 +1,164 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_FUNC_H +#define __IPU_DEVICE_CELL_PROPERTIES_FUNC_H + +/* define properties for all cells uses in ISYS */ + +#include "ipu_device_cell_properties_impl.h" +#include "ipu_device_cell_devices.h" +#include "assert_support.h" +#include "storage_class.h" + +enum {IA_CSS_CELL_MASTER_ADDRESS_WIDTH = 32}; + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_devices(void) +{ + return NUM_CELLS; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_memories(const unsigned int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_cell_properties[cell_id].type_properties->count-> + num_memories; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_size(const unsigned int cell_id, + const unsigned int mem_id) +{ + assert(cell_id < NUM_CELLS); + assert(mem_id < ipu_device_cell_num_memories(cell_id)); + return ipu_device_cell_properties[cell_id].type_properties-> + mem_size[mem_id]; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_address(const unsigned int cell_id, + const unsigned int mem_id) +{ + assert(cell_id < NUM_CELLS); + assert(mem_id < ipu_device_cell_num_memories(cell_id)); + return ipu_device_cell_properties[cell_id].mem_address[mem_id]; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_databus_memory_address(const unsigned int cell_id, + const unsigned int mem_id) +{ + assert(cell_id < NUM_CELLS); + assert(mem_id < ipu_device_cell_num_memories(cell_id)); + assert(mem_id != 0); + return ipu_device_cell_properties[cell_id].mem_databus_address[mem_id]; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_masters(const unsigned int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_cell_properties[cell_id].type_properties->count-> + num_master_ports; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_bits(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].segment_bits; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_num_segments(const unsigned int cell_id, + const unsigned int master_id) +{ + return 1u << ipu_device_cell_master_segment_bits(cell_id, master_id); +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_size(const unsigned int cell_id, + const unsigned int master_id) +{ + return 1u << (IA_CSS_CELL_MASTER_ADDRESS_WIDTH - + ipu_device_cell_master_segment_bits(cell_id, master_id)); +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_stride(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].stride; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_base_reg(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].base_address_register; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_info_reg(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].info_bits_register; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_info_override_reg(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].info_override_bits_register; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_icache_align(unsigned int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_cell_properties[cell_id].type_properties->count-> + icache_align; +} + +#ifdef C_RUN +STORAGE_CLASS_INLINE int +ipu_device_cell_id_crun(int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_map_cell_id_to_crun_proc_id[cell_id]; +} +#endif + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_FUNC_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties_struct.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties_struct.h new file mode 100644 index 0000000000000..63397dc0b7fe6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties_struct.h @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_STRUCT_H +#define __IPU_DEVICE_CELL_PROPERTIES_STRUCT_H + +/* definitions for all cell types */ + +struct ipu_device_cell_count_s { + unsigned int num_memories; + unsigned int num_master_ports; + unsigned int num_stall_bits; + unsigned int icache_align; +}; + +struct ipu_device_cell_master_properties_s { + unsigned int segment_bits; + unsigned int stride; /* offset to register of next segment */ + unsigned int base_address_register; /* address of first base address + register */ + unsigned int info_bits_register; + unsigned int info_override_bits_register; +}; + +struct ipu_device_cell_type_properties_s { + const struct ipu_device_cell_count_s *count; + const struct ipu_device_cell_master_properties_s *master; + const unsigned int *reg_offset; /* offsets of registers, some depend + on cell type */ + const unsigned int *mem_size; +}; + +struct ipu_device_cell_properties_s { + const struct ipu_device_cell_type_properties_s *type_properties; + const unsigned int *mem_address; + const unsigned int *mem_databus_address; + /* const cell_master_port_properties_s* master_port_properties; */ +}; + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_cell_type_properties.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_cell_type_properties.h new file mode 100644 index 0000000000000..72caed3eef0c9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_cell_type_properties.h @@ -0,0 +1,69 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_TYPE_PROPERTIES_H +#define __IPU_DEVICE_CELL_TYPE_PROPERTIES_H + +#define IPU_DEVICE_INVALID_MEM_ADDRESS 0xFFFFFFFF + +enum ipu_device_cell_stat_ctrl_bit { + IPU_DEVICE_CELL_STAT_CTRL_RESET_BIT = 0, + IPU_DEVICE_CELL_STAT_CTRL_START_BIT = 1, + IPU_DEVICE_CELL_STAT_CTRL_RUN_BIT = 3, + IPU_DEVICE_CELL_STAT_CTRL_READY_BIT = 5, + IPU_DEVICE_CELL_STAT_CTRL_SLEEP_BIT = 6, + IPU_DEVICE_CELL_STAT_CTRL_STALL_BIT = 7, + IPU_DEVICE_CELL_STAT_CTRL_CLEAR_IRQ_MASK_FLAG_BIT = 8, + IPU_DEVICE_CELL_STAT_CTRL_BROKEN_IRQ_MASK_FLAG_BIT = 9, + IPU_DEVICE_CELL_STAT_CTRL_READY_IRQ_MASK_FLAG_BIT = 10, + IPU_DEVICE_CELL_STAT_CTRL_SLEEP_IRQ_MASK_FLAG_BIT = 11, + IPU_DEVICE_CELL_STAT_CTRL_INVALIDATE_ICACHE_BIT = 12, + IPU_DEVICE_CELL_STAT_CTRL_ICACHE_ENABLE_PREFETCH_BIT = 13 +}; + +enum ipu_device_cell_reg_addr { + IPU_DEVICE_CELL_STAT_CTRL_REG_ADDRESS = 0x0, + IPU_DEVICE_CELL_START_PC_REG_ADDRESS = 0x4, + IPU_DEVICE_CELL_ICACHE_BASE_REG_ADDRESS = 0x10, + IPU_DEVICE_CELL_ICACHE_INFO_BITS_REG_ADDRESS = 0x14 +}; + +enum ipu_device_cell_reg { + IPU_DEVICE_CELL_STAT_CTRL_REG, + IPU_DEVICE_CELL_START_PC_REG, + IPU_DEVICE_CELL_ICACHE_BASE_REG, + IPU_DEVICE_CELL_DEBUG_PC_REG, + IPU_DEVICE_CELL_STALL_REG, + IPU_DEVICE_CELL_NUM_REGS +}; + +enum ipu_device_cell_mem { + IPU_DEVICE_CELL_REGS, /* memory id of registers */ + IPU_DEVICE_CELL_PMEM, /* memory id of pmem */ + IPU_DEVICE_CELL_DMEM, /* memory id of dmem */ + IPU_DEVICE_CELL_BAMEM, /* memory id of bamem */ + IPU_DEVICE_CELL_VMEM /* memory id of vmem */ +}; +#define IPU_DEVICE_CELL_NUM_MEMORIES (IPU_DEVICE_CELL_VMEM + 1) + +enum ipu_device_cell_master { + IPU_DEVICE_CELL_MASTER_ICACHE, /* master port id of icache */ + IPU_DEVICE_CELL_MASTER_QMEM, + IPU_DEVICE_CELL_MASTER_CMEM, + IPU_DEVICE_CELL_MASTER_XMEM, + IPU_DEVICE_CELL_MASTER_XVMEM +}; +#define IPU_DEVICE_CELL_MASTER_NUM_MASTERS (IPU_DEVICE_CELL_MASTER_XVMEM + 1) + +#endif /* __IPU_DEVICE_CELL_TYPE_PROPERTIES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_gp_properties.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_gp_properties.h new file mode 100644 index 0000000000000..fd0c5a586c949 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_gp_properties.h @@ -0,0 +1,26 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_GP_PROPERTIES_H +#define __IPU_DEVICE_GP_PROPERTIES_H + +#include "storage_class.h" +#include "ipu_device_gp_properties_types.h" + +STORAGE_CLASS_INLINE unsigned int +ipu_device_gp_mux_addr(const unsigned int device_id, const unsigned int mux_id); + +#include "ipu_device_gp_properties_func.h" + +#endif /* __IPU_DEVICE_GP_PROPERTIES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_gp_properties_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_gp_properties_types.h new file mode 100644 index 0000000000000..3032273696eab --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_gp_properties_types.h @@ -0,0 +1,103 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_GP_PROPERTIES_TYPES_H +#define __IPU_DEVICE_GP_PROPERTIES_TYPES_H + +enum ipu_device_gp_isa_value { + /* ISA_MUX_SEL options */ + IPU_DEVICE_GP_ISA_MUX_SEL_ICA = 0, /* Enable output after FF ICA */ + IPU_DEVICE_GP_ISA_MUX_SEL_LSC = 1, /* Enable output after FF LSC */ + IPU_DEVICE_GP_ISA_MUX_SEL_DPC = 2, /* Enable output after FF DPC */ + /* ICA stream block options */ + /* UNBLOCK signal received from ICA */ + IPU_DEVICE_GP_ISA_ICA_UNBLOCK = 0, + /* BLOCK signal received from ICA */ + IPU_DEVICE_GP_ISA_ICA_BLOCK = 1, + /* LSC stream block options */ + /* UNBLOCK signal received from LSC */ + IPU_DEVICE_GP_ISA_LSC_UNBLOCK = 0, + /* BLOCK signal received from LSC */ + IPU_DEVICE_GP_ISA_LSC_BLOCK = 1, + /* DPC stream block options */ + /* UNBLOCK signal received from DPC */ + IPU_DEVICE_GP_ISA_DPC_UNBLOCK = 0, + /* BLOCK signal received from DPC */ + IPU_DEVICE_GP_ISA_DPC_BLOCK = 1, + /* Defines needed only for bxtB0 */ + /* ISA_AWB_MUX_SEL options */ + /* Input Correction input */ + IPU_DEVICE_GP_ISA_AWB_MUX_SEL_ICA = 0, + /* DPC input */ + IPU_DEVICE_GP_ISA_AWB_MUX_SEL_DPC = 1, + /* ISA_AWB_MUX_SEL options */ + /* UNBLOCK DPC input */ + IPU_DEVICE_GP_ISA_AWB_MUX_ICA_UNBLOCK = 0, + /* BLOCK DPC input */ + IPU_DEVICE_GP_ISA_AWB_MUX_ICA_BLOCK = 1, + /* ISA_AWB_MUX_SEL options */ + /* UNBLOCK Input Correction input */ + IPU_DEVICE_GP_ISA_AWB_MUX_DPC_UNBLOCK = 0, + /* BLOCK Input Correction input */ + IPU_DEVICE_GP_ISA_AWB_MUX_DPC_BLOCK = 1, + + /* PAF STRM options */ + /* Disable streaming to PAF FF*/ + IPU_DEVICE_GP_ISA_PAF_DISABLE_STREAM = 0, + /* Enable stream0 to PAF FF*/ + IPU_DEVICE_GP_ISA_PAF_ENABLE_STREAM0 = 1, + /* Enable stream1 to PAF FF*/ + IPU_DEVICE_GP_ISA_PAF_ENABLE_STREAM1 = 2, + /* PAF SRC SEL options */ + /* External channel input */ + IPU_DEVICE_GP_ISA_PAF_SRC_SEL0 = 0, + /* DPC extracted input */ + IPU_DEVICE_GP_ISA_PAF_SRC_SEL1 = 1, + /* PAF_GDDPC_BLK options */ + IPU_DEVICE_GP_ISA_PAF_GDDPC_PORT_BLK0 = 0, + IPU_DEVICE_GP_ISA_PAF_GDDPC_PORT_BLK1 = 1, + /* PAF ISA STR_PORT options */ + IPU_DEVICE_GP_ISA_PAF_STR_PORT0 = 0, + IPU_DEVICE_GP_ISA_PAF_STR_PORT1 = 1, + + /* sis port block options */ + IPU_DEVICE_GP_ISA_SIS_PORT_UNBLOCK = 0, + IPU_DEVICE_GP_ISA_SIS_PORT_BLOCK = 1, + IPU_DEVICE_GP_ISA_CONF_INVALID = 0xFF +}; + +enum ipu_device_gp_psa_value { + /* Defines needed for bxtB0 */ + /* PSA_STILLS_MODE_MUX */ + IPU_DEVICE_GP_PSA_MUX_POST_RYNR_ROUTE_WO_DM = 0, + IPU_DEVICE_GP_PSA_MUX_POST_RYNR_ROUTE_W_DM = 1, + /* PSA_ACM_DEMUX */ + IPU_DEVICE_GP_PSA_DEMUX_PRE_ACM_ROUTE_TO_ACM = 0, + IPU_DEVICE_GP_PSA_DEMUX_PRE_ACM_ROUTE_TO_S2V = 1, + /* PSA_S2V_RGB_F_MUX */ + IPU_DEVICE_GP_PSA_MUX_PRE_S2V_RGB_F_FROM_ACM = 0, + IPU_DEVICE_GP_PSA_MUX_PRE_S2V_RGB_F_FROM_DM_OR_SPLITTER = 1, + /* PSA_V2S_RGB_4_DEMUX */ + IPU_DEVICE_GP_PSA_DEMUX_POST_V2S_RGB_4_TO_GTM = 0, + IPU_DEVICE_GP_PSA_DEMUX_POST_V2S_RGB_4_TO_ACM = 1, +}; + +enum ipu_device_gp_isl_value { + /* choose and route pixel stream to CSI BE */ + IPU_DEVICE_GP_ISL_CSI_BE_IN_USE = 0, + /* choose and route pixel stream bypass CSI BE */ + IPU_DEVICE_GP_ISL_CSI_BE_BYPASS +}; + +#endif /* __IPU_DEVICE_GP_PROPERTIES_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_acb_devices.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_acb_devices.h new file mode 100644 index 0000000000000..d9472a5d33cad --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_acb_devices.h @@ -0,0 +1,43 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IPU_DEVICE_ACB_DEVICES_H +#define __IPU_DEVICE_ACB_DEVICES_H + +enum ipu_device_acb_id { + /* PSA accelerators */ + IPU_DEVICE_ACB_WBA_ID = 0, + IPU_DEVICE_ACB_RYNR_ID, + IPU_DEVICE_ACB_DEMOSAIC_ID, + IPU_DEVICE_ACB_ACM_ID, + IPU_DEVICE_ACB_GTC_ID, + IPU_DEVICE_ACB_YUV1_ID, + IPU_DEVICE_ACB_DVS_ID, + IPU_DEVICE_ACB_LACE_ID, + /* ISA accelerators */ + IPU_DEVICE_ACB_ICA_ID, + IPU_DEVICE_ACB_LSC_ID, + IPU_DEVICE_ACB_DPC_ID, + IPU_DEVICE_ACB_IDS_ID, + IPU_DEVICE_ACB_AWB_ID, + IPU_DEVICE_ACB_AF_ID, + IPU_DEVICE_ACB_AE_ID, + IPU_DEVICE_ACB_NUM_ACB +}; + +#define IPU_DEVICE_ACB_NUM_PSA_ACB (IPU_DEVICE_ACB_LACE_ID + 1) +#define IPU_DEVICE_ACB_NUM_ISA_ACB \ + (IPU_DEVICE_ACB_NUM_ACB - IPU_DEVICE_ACB_NUM_PSA_ACB) + +#endif /* __IPU_DEVICE_ACB_DEVICES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_cell_devices.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_cell_devices.h new file mode 100644 index 0000000000000..7a57967cb6eb7 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_cell_devices.h @@ -0,0 +1,38 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IPU_DEVICE_CELL_DEVICES_H +#define __IPU_DEVICE_CELL_DEVICES_H + +#define SPC0_CELL processing_system_sp_cluster_sp_cluster_logic_spc_tile_sp +#define SPP0_CELL processing_system_sp_cluster_sp_cluster_logic_spp_tile0_sp +#define SPP1_CELL processing_system_sp_cluster_sp_cluster_logic_spp_tile1_sp +#define ISP0_CELL processing_system_isp_tile0_logic_isp +#define ISP1_CELL processing_system_isp_tile1_logic_isp +#define ISP2_CELL processing_system_isp_tile2_logic_isp +#define ISP3_CELL processing_system_isp_tile3_logic_isp + +enum ipu_device_psys_cell_id { + SPC0, + SPP0, + SPP1, + ISP0, + ISP1, + ISP2, + ISP3, + NUM_CELLS +}; +#define NUM_ISP_CELLS 4 + +#endif /* __IPU_DEVICE_CELL_DEVICES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_cell_properties_defs.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_cell_properties_defs.h new file mode 100644 index 0000000000000..2b80e2822a906 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_cell_properties_defs.h @@ -0,0 +1,65 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +/* Generated file - please do not edit. */ + +#ifndef _IPU_DEVICE_CELL_PROPERTIES_DEFS_H_ +#define _IPU_DEVICE_CELL_PROPERTIES_DEFS_H_ +#define SPC0_REGS_CBUS_ADDRESS 0x00000000 +#define SPC0_DMEM_CBUS_ADDRESS 0x00008000 +#define SPC0_DMEM_DBUS_ADDRESS 0x02000000 +#define SPC0_DMEM_DMA_M0_ADDRESS SPC0_DMEM_DBUS_ADDRESS +#define SPC0_DMEM_INT_DMA_M0_ADDRESS SPC0_DMEM_DBUS_ADDRESS +#define SPP0_REGS_CBUS_ADDRESS 0x00020000 +#define SPP0_DMEM_CBUS_ADDRESS 0x00028000 +#define SPP0_DMEM_DBUS_ADDRESS 0x02020000 +#define SPP1_REGS_CBUS_ADDRESS 0x00030000 +#define SPP1_DMEM_CBUS_ADDRESS 0x00038000 +#define SPP1_DMEM_DBUS_ADDRESS 0x02030000 +#define ISP0_REGS_CBUS_ADDRESS 0x001C0000 +#define ISP0_PMEM_CBUS_ADDRESS 0x001D0000 +#define ISP0_DMEM_CBUS_ADDRESS 0x001F0000 +#define ISP0_BAMEM_CBUS_ADDRESS 0x00200000 +#define ISP0_VMEM_CBUS_ADDRESS 0x00220000 +#define ISP1_REGS_CBUS_ADDRESS 0x00240000 +#define ISP1_PMEM_CBUS_ADDRESS 0x00250000 +#define ISP1_DMEM_CBUS_ADDRESS 0x00270000 +#define ISP1_BAMEM_CBUS_ADDRESS 0x00280000 +#define ISP1_VMEM_CBUS_ADDRESS 0x002A0000 +#define ISP2_REGS_CBUS_ADDRESS 0x002C0000 +#define ISP2_PMEM_CBUS_ADDRESS 0x002D0000 +#define ISP2_DMEM_CBUS_ADDRESS 0x002F0000 +#define ISP2_BAMEM_CBUS_ADDRESS 0x00300000 +#define ISP2_VMEM_CBUS_ADDRESS 0x00320000 +#define ISP3_REGS_CBUS_ADDRESS 0x00340000 +#define ISP3_PMEM_CBUS_ADDRESS 0x00350000 +#define ISP3_DMEM_CBUS_ADDRESS 0x00370000 +#define ISP3_BAMEM_CBUS_ADDRESS 0x00380000 +#define ISP3_VMEM_CBUS_ADDRESS 0x003A0000 +#define ISP0_PMEM_DBUS_ADDRESS 0x08000000 +#define ISP0_DMEM_DBUS_ADDRESS 0x08400000 +#define ISP0_BAMEM_DBUS_ADDRESS 0x09000000 +#define ISP0_VMEM_DBUS_ADDRESS 0x08800000 +#define ISP1_PMEM_DBUS_ADDRESS 0x0A000000 +#define ISP1_DMEM_DBUS_ADDRESS 0x0A400000 +#define ISP1_BAMEM_DBUS_ADDRESS 0x0B000000 +#define ISP1_VMEM_DBUS_ADDRESS 0x0A800000 +#define ISP2_PMEM_DBUS_ADDRESS 0x0C000000 +#define ISP2_DMEM_DBUS_ADDRESS 0x0C400000 +#define ISP2_BAMEM_DBUS_ADDRESS 0x0D000000 +#define ISP2_VMEM_DBUS_ADDRESS 0x0C800000 +#define ISP3_PMEM_DBUS_ADDRESS 0x0E000000 +#define ISP3_DMEM_DBUS_ADDRESS 0x0E400000 +#define ISP3_BAMEM_DBUS_ADDRESS 0x0F000000 +#define ISP3_VMEM_DBUS_ADDRESS 0x0E800000 +#endif /* _IPU_DEVICE_CELL_PROPERTIES_DEFS_H_ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_cell_properties_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_cell_properties_impl.h new file mode 100644 index 0000000000000..10c28983eeb6f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_cell_properties_impl.h @@ -0,0 +1,193 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_IMPL_H +#define __IPU_DEVICE_CELL_PROPERTIES_IMPL_H + +#include "ipu_device_sp2600_control_properties_impl.h" +#include "ipu_device_sp2600_proxy_properties_impl.h" +#include "ipu_device_isp2600_properties_impl.h" +#include "ipu_device_cell_properties_defs.h" +#include "ipu_device_cell_devices.h" +#include "ipu_device_cell_type_properties.h"/* IPU_DEVICE_INVALID_MEM_ADDRESS */ + +static const unsigned int +ipu_device_spc0_mem_address[IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES] = { + SPC0_REGS_CBUS_ADDRESS, + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPC0_DMEM_CBUS_ADDRESS +}; + +static const unsigned int +ipu_device_spp0_mem_address[IPU_DEVICE_SP2600_PROXY_NUM_MEMORIES] = { + SPP0_REGS_CBUS_ADDRESS, + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPP0_DMEM_CBUS_ADDRESS +}; + +static const unsigned int +ipu_device_spp1_mem_address[IPU_DEVICE_SP2600_PROXY_NUM_MEMORIES] = { + SPP1_REGS_CBUS_ADDRESS, + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPP1_DMEM_CBUS_ADDRESS +}; + +static const unsigned int +ipu_device_isp0_mem_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + ISP0_REGS_CBUS_ADDRESS, /* reg addr */ + ISP0_PMEM_CBUS_ADDRESS, /* pmem addr */ + ISP0_DMEM_CBUS_ADDRESS, /* dmem addr */ + ISP0_BAMEM_CBUS_ADDRESS,/* bamem addr */ + ISP0_VMEM_CBUS_ADDRESS /* vmem addr */ +}; + +static const unsigned int +ipu_device_isp1_mem_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + ISP1_REGS_CBUS_ADDRESS, /* reg addr */ + ISP1_PMEM_CBUS_ADDRESS, /* pmem addr */ + ISP1_DMEM_CBUS_ADDRESS, /* dmem addr */ + ISP1_BAMEM_CBUS_ADDRESS,/* bamem addr */ + ISP1_VMEM_CBUS_ADDRESS /* vmem addr */ +}; + +static const unsigned int +ipu_device_isp2_mem_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + ISP2_REGS_CBUS_ADDRESS, /* reg addr */ + ISP2_PMEM_CBUS_ADDRESS, /* pmem addr */ + ISP2_DMEM_CBUS_ADDRESS, /* dmem addr */ + ISP2_BAMEM_CBUS_ADDRESS,/* bamem addr */ + ISP2_VMEM_CBUS_ADDRESS /* vmem addr */ +}; + +static const unsigned int +ipu_device_isp3_mem_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + ISP3_REGS_CBUS_ADDRESS, /* reg addr */ + ISP3_PMEM_CBUS_ADDRESS, /* pmem addr */ + ISP3_DMEM_CBUS_ADDRESS, /* dmem addr */ + ISP3_BAMEM_CBUS_ADDRESS,/* bamem addr */ + ISP3_VMEM_CBUS_ADDRESS /* vmem addr */ +}; + +static const unsigned int +ipu_device_spc0_mem_databus_address[IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no reg addr */ + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPC0_DMEM_DBUS_ADDRESS +}; + +static const unsigned int +ipu_device_spp0_mem_databus_address[IPU_DEVICE_SP2600_PROXY_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no reg addr */ + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPP0_DMEM_DBUS_ADDRESS +}; + +static const unsigned int +ipu_device_spp1_mem_databus_address[IPU_DEVICE_SP2600_PROXY_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no reg addr */ + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPP1_DMEM_DBUS_ADDRESS +}; + +static const unsigned int +ipu_device_isp0_mem_databus_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no reg addr */ + ISP0_PMEM_DBUS_ADDRESS, /* pmem databus addr */ + ISP0_DMEM_DBUS_ADDRESS, /* dmem databus addr */ + ISP0_BAMEM_DBUS_ADDRESS, /* bamem databus addr */ + ISP0_VMEM_DBUS_ADDRESS /* vmem databus addr */ +}; + +static const unsigned int +ipu_device_isp1_mem_databus_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no reg addr */ + ISP1_PMEM_DBUS_ADDRESS, /* pmem databus addr */ + ISP1_DMEM_DBUS_ADDRESS, /* dmem databus addr */ + ISP1_BAMEM_DBUS_ADDRESS, /* bamem databus addr */ + ISP1_VMEM_DBUS_ADDRESS /* vmem databus addr */ +}; + +static const unsigned int +ipu_device_isp2_mem_databus_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no reg addr */ + ISP2_PMEM_DBUS_ADDRESS, /* pmem databus addr */ + ISP2_DMEM_DBUS_ADDRESS, /* dmem databus addr */ + ISP2_BAMEM_DBUS_ADDRESS, /* bamem databus addr */ + ISP2_VMEM_DBUS_ADDRESS /* vmem databus addr */ +}; + +static const unsigned int +ipu_device_isp3_mem_databus_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no reg addr */ + ISP3_PMEM_DBUS_ADDRESS, /* pmem databus addr */ + ISP3_DMEM_DBUS_ADDRESS, /* dmem databus addr */ + ISP3_BAMEM_DBUS_ADDRESS, /* bamem databus addr */ + ISP3_VMEM_DBUS_ADDRESS /* vmem databus addr */ +}; + +static const struct ipu_device_cell_properties_s +ipu_device_cell_properties[NUM_CELLS] = { + { + &ipu_device_sp2600_control_properties, + ipu_device_spc0_mem_address, + ipu_device_spc0_mem_databus_address + }, + { + &ipu_device_sp2600_proxy_properties, + ipu_device_spp0_mem_address, + ipu_device_spp0_mem_databus_address + }, + { + &ipu_device_sp2600_proxy_properties, + ipu_device_spp1_mem_address, + ipu_device_spp1_mem_databus_address + }, + { + &ipu_device_isp2600_properties, + ipu_device_isp0_mem_address, + ipu_device_isp0_mem_databus_address + }, + { + &ipu_device_isp2600_properties, + ipu_device_isp1_mem_address, + ipu_device_isp1_mem_databus_address + }, + { + &ipu_device_isp2600_properties, + ipu_device_isp2_mem_address, + ipu_device_isp2_mem_databus_address + }, + { + &ipu_device_isp2600_properties, + ipu_device_isp3_mem_address, + ipu_device_isp3_mem_databus_address + } +}; + +#ifdef C_RUN + +/* Mapping between hrt_hive_processors enum and cell_id's used in FW */ +static const int ipu_device_map_cell_id_to_crun_proc_id[NUM_CELLS] = { + 4, /* SPC0 */ + 5, /* SPP0 */ + 6, /* SPP1 */ + 0, /* ISP0 */ + 1, /* ISP1 */ + 2, /* ISP2 */ + 3 /* ISP3 */ +}; + +#endif + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_ff_devices.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_ff_devices.h new file mode 100644 index 0000000000000..3af7ba63a3644 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_ff_devices.h @@ -0,0 +1,55 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IPU_DEVICE_FF_DEVICES_H +#define __IPU_DEVICE_FF_DEVICES_H + +enum ipu_device_ff_id { + /* PSA fixed functions */ + IPU_DEVICE_FF_WBA_WBA = 0, + IPU_DEVICE_FF_RYNR_SPLITTER, + IPU_DEVICE_FF_RYNR_COLLECTOR, + IPU_DEVICE_FF_RYNR_BNLM, + IPU_DEVICE_FF_RYNR_VCUD, + IPU_DEVICE_FF_DEMOSAIC_DEMOSAIC, + IPU_DEVICE_FF_ACM_CCM, + IPU_DEVICE_FF_ACM_ACM, + IPU_DEVICE_FF_GTC_CSC_CDS, + IPU_DEVICE_FF_GTC_GTM, + IPU_DEVICE_FF_YUV1_SPLITTER, + IPU_DEVICE_FF_YUV1_IEFD, + IPU_DEVICE_FF_YUV1_YDS, + IPU_DEVICE_FF_YUV1_TCC, + IPU_DEVICE_FF_DVS_YBIN, + IPU_DEVICE_FF_DVS_DVS, + IPU_DEVICE_FF_LACE_LACE, + /* ISA fixed functions */ + IPU_DEVICE_FF_ICA_INL, + IPU_DEVICE_FF_ICA_GBL, + IPU_DEVICE_FF_ICA_PCLN, + IPU_DEVICE_FF_LSC_LSC, + IPU_DEVICE_FF_DPC_DPC, + IPU_DEVICE_FF_IDS_SCALER, + IPU_DEVICE_FF_AWB_AWRG, + IPU_DEVICE_FF_AF_AF, + IPU_DEVICE_FF_AE_WGHT_HIST, + IPU_DEVICE_FF_AE_CCM, + IPU_DEVICE_FF_NUM_FF +}; + +#define IPU_DEVICE_FF_NUM_PSA_FF (IPU_DEVICE_FF_LACE_LACE + 1) +#define IPU_DEVICE_FF_NUM_ISA_FF \ + (IPU_DEVICE_FF_NUM_FF - IPU_DEVICE_FF_NUM_PSA_FF) + +#endif /* __IPU_DEVICE_FF_DEVICES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_gp_devices.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_gp_devices.h new file mode 100644 index 0000000000000..f6afd6003324a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_gp_devices.h @@ -0,0 +1,67 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_GP_DEVICES_H +#define __IPU_DEVICE_GP_DEVICES_H +#include "math_support.h" +#include "type_support.h" + +enum ipu_device_gp_id { + IPU_DEVICE_GP_PSA = 0, /* PSA */ + IPU_DEVICE_GP_ISA_STATIC, /* ISA Static */ + IPU_DEVICE_GP_ISA_RUNTIME, /* ISA Runtime */ + IPU_DEVICE_GP_ISL, /* ISL */ + IPU_DEVICE_GP_NUM_GP +}; + +enum ipu_device_gp_psa_mux_id { + /* Post RYNR/CCN: 0-To ACM (Video), 1-To Demosaic (Stills)*/ + IPU_DEVICE_GP_PSA_STILLS_MODE_MUX = 0, + /* Post Vec2Str 4: 0-To GTC, 1-To ACM */ + IPU_DEVICE_GP_PSA_V2S_RGB_4_DEMUX, + /* Post DM and pre ACM 0-CCM/ACM: 1-DM Component Splitter */ + IPU_DEVICE_GP_PSA_S2V_RGB_F_MUX, + /* Pre ACM/CCM: 0-To CCM/ACM, 1-To str2vec id_f */ + IPU_DEVICE_GP_PSA_ACM_DEMUX, + IPU_DEVICE_GP_PSA_MUX_NUM_MUX +}; + +enum ipu_device_gp_isa_static_mux_id { + IPU_DEVICE_GP_ISA_STATIC_MUX_SEL = 0, + IPU_DEVICE_GP_ISA_STATIC_PORTA_BLK, + IPU_DEVICE_GP_ISA_STATIC_PORTB_BLK, + IPU_DEVICE_GP_ISA_STATIC_PORTC_BLK, + IPU_DEVICE_GP_ISA_STATIC_AWB_MUX_SEL, + IPU_DEVICE_GP_ISA_STATIC_AWB_MUX_INPUT_CORR_PORT_BLK, + IPU_DEVICE_GP_ISA_STATIC_AWB_MUX_DPC_PORT_BLK, + IPU_DEVICE_GP_ISA_STATIC_MUX_NUM_MUX +}; + +enum ipu_device_gp_isa_runtime_mux_id { + IPU_DEVICE_GP_ISA_RUNTIME_FRAME_SIZE = 0, + IPU_DEVICE_GP_ISA_RUNTIME_SCALED_FRAME_SIZE, + IPU_DEVICE_GP_ISA_RUNTIME_MUX_NUM_MUX +}; + +enum ipu_device_gp_isl_mux_id { + IPU_DEVICE_GP_ISL_MIPI_BE_MUX = 0, + IPU_DEVICE_GP_ISL_MUX_NUM_MUX +}; + +#define IPU_DEVICE_GP_MAX_NUM MAX4((uint32_t)IPU_DEVICE_GP_PSA_MUX_NUM_MUX, \ + (uint32_t)IPU_DEVICE_GP_ISA_STATIC_MUX_NUM_MUX, \ + (uint32_t)IPU_DEVICE_GP_ISA_RUNTIME_MUX_NUM_MUX, \ + (uint32_t)IPU_DEVICE_GP_ISL_MUX_NUM_MUX) + +#endif /* __IPU_DEVICE_GP_DEVICES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/src/ipu_device_isp2600_properties_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/src/ipu_device_isp2600_properties_impl.h new file mode 100644 index 0000000000000..de733be679986 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/src/ipu_device_isp2600_properties_impl.h @@ -0,0 +1,151 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_ISP2600_PROPERTIES_IMPL_H +#define __IPU_DEVICE_ISP2600_PROPERTIES_IMPL_H + +/* isp2600 definition */ + +#include "ipu_device_cell_properties_struct.h" + +enum ipu_device_isp2600_registers { + /* control registers */ + IPU_DEVICE_ISP2600_STAT_CTRL = 0x0, + IPU_DEVICE_ISP2600_START_PC = 0x4, + + /* master port registers */ + IPU_DEVICE_ISP2600_ICACHE_BASE = 0x10, + IPU_DEVICE_ISP2600_ICACHE_INFO = 0x14, + IPU_DEVICE_ISP2600_ICACHE_INFO_OVERRIDE = 0x18, + + IPU_DEVICE_ISP2600_QMEM_BASE = 0x1C, + + IPU_DEVICE_ISP2600_CMEM_BASE = 0x28, + + IPU_DEVICE_ISP2600_XMEM_BASE = 0x88, + IPU_DEVICE_ISP2600_XMEM_INFO = 0x8C, + IPU_DEVICE_ISP2600_XMEM_INFO_OVERRIDE = 0x90, + + IPU_DEVICE_ISP2600_XVMEM_BASE = 0xB8, + + /* debug registers */ + IPU_DEVICE_ISP2600_DEBUG_PC = 0x130, + IPU_DEVICE_ISP2600_STALL = 0x134 +}; + + +enum ipu_device_isp2600_memories { + IPU_DEVICE_ISP2600_REGS, + IPU_DEVICE_ISP2600_PMEM, + IPU_DEVICE_ISP2600_DMEM, + IPU_DEVICE_ISP2600_BAMEM, + IPU_DEVICE_ISP2600_VMEM, + IPU_DEVICE_ISP2600_NUM_MEMORIES +}; + +static const unsigned int +ipu_device_isp2600_mem_size[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + 0x00140, + 0x14000, + 0x04000, + 0x20000, + 0x20000 +}; + + +enum ipu_device_isp2600_masters { + IPU_DEVICE_ISP2600_ICACHE, + IPU_DEVICE_ISP2600_QMEM, + IPU_DEVICE_ISP2600_CMEM, + IPU_DEVICE_ISP2600_XMEM, + IPU_DEVICE_ISP2600_XVMEM, + IPU_DEVICE_ISP2600_NUM_MASTERS +}; + +static const struct ipu_device_cell_master_properties_s +ipu_device_isp2600_masters[IPU_DEVICE_ISP2600_NUM_MASTERS] = { + { + 0, + 0xC, + IPU_DEVICE_ISP2600_ICACHE_BASE, + IPU_DEVICE_ISP2600_ICACHE_INFO, + IPU_DEVICE_ISP2600_ICACHE_INFO_OVERRIDE + }, + { + 0, + 0xC, + IPU_DEVICE_ISP2600_QMEM_BASE, + 0xFFFFFFFF, + 0xFFFFFFFF + }, + { + 3, + 0xC, + IPU_DEVICE_ISP2600_CMEM_BASE, + 0xFFFFFFFF, + 0xFFFFFFFF + }, + { + 2, + 0xC, + IPU_DEVICE_ISP2600_XMEM_BASE, + IPU_DEVICE_ISP2600_XMEM_INFO, + IPU_DEVICE_ISP2600_XMEM_INFO_OVERRIDE + }, + { + 3, + 0xC, + IPU_DEVICE_ISP2600_XVMEM_BASE, + 0xFFFFFFFF, + 0xFFFFFFFF + } +}; + +enum ipu_device_isp2600_stall_bits { + IPU_DEVICE_ISP2600_STALL_ICACHE0, + IPU_DEVICE_ISP2600_STALL_ICACHE1, + IPU_DEVICE_ISP2600_STALL_DMEM, + IPU_DEVICE_ISP2600_STALL_QMEM, + IPU_DEVICE_ISP2600_STALL_CMEM, + IPU_DEVICE_ISP2600_STALL_XMEM, + IPU_DEVICE_ISP2600_STALL_BAMEM, + IPU_DEVICE_ISP2600_STALL_VMEM, + IPU_DEVICE_ISP2600_STALL_XVMEM, + IPU_DEVICE_ISP2600_NUM_STALL_BITS +}; + +#define IPU_DEVICE_ISP2600_ICACHE_WORD_SIZE 64 /* 512 bits per instruction */ +#define IPU_DEVICE_ISP2600_ICACHE_BURST_SIZE 8 /* 8 instructions per burst */ + +static const struct ipu_device_cell_count_s ipu_device_isp2600_count = { + IPU_DEVICE_ISP2600_NUM_MEMORIES, + IPU_DEVICE_ISP2600_NUM_MASTERS, + IPU_DEVICE_ISP2600_NUM_STALL_BITS, + IPU_DEVICE_ISP2600_ICACHE_WORD_SIZE * + IPU_DEVICE_ISP2600_ICACHE_BURST_SIZE +}; + +static const unsigned int ipu_device_isp2600_reg_offset[/* CELL_NUM_REGS */] = { + 0x0, 0x4, 0x10, 0x130, 0x134 +}; + +static const struct ipu_device_cell_type_properties_s +ipu_device_isp2600_properties = { + &ipu_device_isp2600_count, + ipu_device_isp2600_masters, + ipu_device_isp2600_reg_offset, + ipu_device_isp2600_mem_size +}; + +#endif /* __IPU_DEVICE_ISP2600_PROPERTIES_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/src/ipu_device_sp2600_control_properties_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/src/ipu_device_sp2600_control_properties_impl.h new file mode 100644 index 0000000000000..430295cd9d949 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/src/ipu_device_sp2600_control_properties_impl.h @@ -0,0 +1,136 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_SP2600_CONTROL_PROPERTIES_IMPL_H +#define __IPU_DEVICE_SP2600_CONTROL_PROPERTIES_IMPL_H + +/* sp2600_control definition */ + +#include "ipu_device_cell_properties_struct.h" + +enum ipu_device_sp2600_control_registers { + /* control registers */ + IPU_DEVICE_SP2600_CONTROL_STAT_CTRL = 0x0, + IPU_DEVICE_SP2600_CONTROL_START_PC = 0x4, + + /* master port registers */ + IPU_DEVICE_SP2600_CONTROL_ICACHE_BASE = 0x10, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO = 0x14, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO_OVERRIDE = 0x18, + + IPU_DEVICE_SP2600_CONTROL_QMEM_BASE = 0x1C, + + IPU_DEVICE_SP2600_CONTROL_CMEM_BASE = 0x28, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO = 0x2C, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO_OVERRIDE = 0x30, + + IPU_DEVICE_SP2600_CONTROL_XMEM_BASE = 0x58, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO = 0x5C, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO_OVERRIDE = 0x60, + + /* debug registers */ + IPU_DEVICE_SP2600_CONTROL_DEBUG_PC = 0x9C, + IPU_DEVICE_SP2600_CONTROL_STALL = 0xA0 +}; + +enum ipu_device_sp2600_control_mems { + IPU_DEVICE_SP2600_CONTROL_REGS, + IPU_DEVICE_SP2600_CONTROL_PMEM, + IPU_DEVICE_SP2600_CONTROL_DMEM, + IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES +}; + +static const unsigned int +ipu_device_sp2600_control_mem_size[IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES] = { + 0x000AC, + 0x00000, + 0x10000 +}; + +enum ipu_device_sp2600_control_masters { + IPU_DEVICE_SP2600_CONTROL_ICACHE, + IPU_DEVICE_SP2600_CONTROL_QMEM, + IPU_DEVICE_SP2600_CONTROL_CMEM, + IPU_DEVICE_SP2600_CONTROL_XMEM, + IPU_DEVICE_SP2600_CONTROL_NUM_MASTERS +}; + +static const struct ipu_device_cell_master_properties_s +ipu_device_sp2600_control_masters[IPU_DEVICE_SP2600_CONTROL_NUM_MASTERS] = { + { + 0, + 0xC, + IPU_DEVICE_SP2600_CONTROL_ICACHE_BASE, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO_OVERRIDE + }, + { + 0, + 0xC, + IPU_DEVICE_SP2600_CONTROL_QMEM_BASE, + 0xFFFFFFFF, + 0xFFFFFFFF + }, + { + 2, + 0xC, + IPU_DEVICE_SP2600_CONTROL_CMEM_BASE, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO_OVERRIDE + }, + { + 2, + 0xC, + IPU_DEVICE_SP2600_CONTROL_XMEM_BASE, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO_OVERRIDE + } +}; + +enum ipu_device_sp2600_control_stall_bits { + IPU_DEVICE_SP2600_CONTROL_STALL_ICACHE, + IPU_DEVICE_SP2600_CONTROL_STALL_DMEM, + IPU_DEVICE_SP2600_CONTROL_STALL_QMEM, + IPU_DEVICE_SP2600_CONTROL_STALL_CMEM, + IPU_DEVICE_SP2600_CONTROL_STALL_XMEM, + IPU_DEVICE_SP2600_CONTROL_NUM_STALL_BITS +}; + +/* 32 bits per instruction */ +#define IPU_DEVICE_SP2600_CONTROL_ICACHE_WORD_SIZE 4 +/* 32 instructions per burst */ +#define IPU_DEVICE_SP2600_CONTROL_ICACHE_BURST_SIZE 32 + +static const struct ipu_device_cell_count_s ipu_device_sp2600_control_count = { + IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES, + IPU_DEVICE_SP2600_CONTROL_NUM_MASTERS, + IPU_DEVICE_SP2600_CONTROL_NUM_STALL_BITS, + IPU_DEVICE_SP2600_CONTROL_ICACHE_WORD_SIZE * + IPU_DEVICE_SP2600_CONTROL_ICACHE_BURST_SIZE +}; + +static const unsigned int +ipu_device_sp2600_control_reg_offset[/* CELL_NUM_REGS */] = { + 0x0, 0x4, 0x10, 0x9C, 0xA0 +}; + +static const struct ipu_device_cell_type_properties_s +ipu_device_sp2600_control_properties = { + &ipu_device_sp2600_control_count, + ipu_device_sp2600_control_masters, + ipu_device_sp2600_control_reg_offset, + ipu_device_sp2600_control_mem_size +}; + +#endif /* __IPU_DEVICE_SP2600_CONTROL_PROPERTIES_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/src/ipu_device_sp2600_fp_properties_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/src/ipu_device_sp2600_fp_properties_impl.h new file mode 100644 index 0000000000000..b3f120f9fea86 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/src/ipu_device_sp2600_fp_properties_impl.h @@ -0,0 +1,140 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_SP2600_FP_PROPERTIES_IMPL_H +#define __IPU_DEVICE_SP2600_FP_PROPERTIES_IMPL_H + +/* sp2600_fp definition */ + +#include "ipu_device_cell_properties_struct.h" + +enum ipu_device_sp2600_fp_registers { + /* control registers */ + IPU_DEVICE_SP2600_FP_STAT_CTRL = 0x0, + IPU_DEVICE_SP2600_FP_START_PC = 0x4, + + /* master port registers */ + IPU_DEVICE_SP2600_FP_ICACHE_BASE = 0x10, + IPU_DEVICE_SP2600_FP_ICACHE_INFO = 0x14, + IPU_DEVICE_SP2600_FP_ICACHE_INFO_OVERRIDE = 0x18, + + IPU_DEVICE_SP2600_FP_QMEM_BASE = 0x1C, + + IPU_DEVICE_SP2600_FP_CMEM_BASE = 0x28, + IPU_DEVICE_SP2600_FP_CMEM_INFO = 0x2C, + IPU_DEVICE_SP2600_FP_CMEM_INFO_OVERRIDE = 0x30, + + IPU_DEVICE_SP2600_FP_XMEM_BASE = 0x88, + IPU_DEVICE_SP2600_FP_XMEM_INFO = 0x8C, + IPU_DEVICE_SP2600_FP_XMEM_INFO_OVERRIDE = 0x90, + + /* debug registers */ + IPU_DEVICE_SP2600_FP_DEBUG_PC = 0xCC, + IPU_DEVICE_SP2600_FP_STALL = 0xD0 +}; + + +enum ipu_device_sp2600_fp_memories { + IPU_DEVICE_SP2600_FP_REGS, + IPU_DEVICE_SP2600_FP_PMEM, + IPU_DEVICE_SP2600_FP_DMEM, + IPU_DEVICE_SP2600_FP_DMEM1, + IPU_DEVICE_SP2600_FP_NUM_MEMORIES +}; + +static const unsigned int +ipu_device_sp2600_fp_mem_size[IPU_DEVICE_SP2600_FP_NUM_MEMORIES] = { + 0x000DC, + 0x00000, + 0x10000, + 0x08000 +}; + +enum ipu_device_sp2600_fp_masters { + IPU_DEVICE_SP2600_FP_ICACHE, + IPU_DEVICE_SP2600_FP_QMEM, + IPU_DEVICE_SP2600_FP_CMEM, + IPU_DEVICE_SP2600_FP_XMEM, + IPU_DEVICE_SP2600_FP_NUM_MASTERS +}; + +static const struct ipu_device_cell_master_properties_s +ipu_device_sp2600_fp_masters[IPU_DEVICE_SP2600_FP_NUM_MASTERS] = { + { + 0, + 0xC, + IPU_DEVICE_SP2600_FP_ICACHE_BASE, + IPU_DEVICE_SP2600_FP_ICACHE_INFO, + IPU_DEVICE_SP2600_FP_ICACHE_INFO_OVERRIDE + }, + { + 0, + 0xC, + IPU_DEVICE_SP2600_FP_QMEM_BASE, + 0xFFFFFFFF, + 0xFFFFFFFF + }, + { + 3, + 0xC, + IPU_DEVICE_SP2600_FP_CMEM_BASE, + IPU_DEVICE_SP2600_FP_CMEM_INFO, + IPU_DEVICE_SP2600_FP_CMEM_INFO_OVERRIDE + }, + { + 2, + 0xC, + IPU_DEVICE_SP2600_FP_XMEM_BASE, + IPU_DEVICE_SP2600_FP_XMEM_INFO, + IPU_DEVICE_SP2600_FP_XMEM_INFO_OVERRIDE + } +}; + +enum ipu_device_sp2600_fp_stall_bits { + IPU_DEVICE_SP2600_FP_STALL_ICACHE, + IPU_DEVICE_SP2600_FP_STALL_DMEM, + IPU_DEVICE_SP2600_FP_STALL_QMEM, + IPU_DEVICE_SP2600_FP_STALL_CMEM, + IPU_DEVICE_SP2600_FP_STALL_XMEM, + IPU_DEVICE_SP2600_FP_STALL_DMEM1, + IPU_DEVICE_SP2600_FP_NUM_STALL_BITS +}; + +/* 32 bits per instruction */ +#define IPU_DEVICE_SP2600_FP_ICACHE_WORD_SIZE 4 +/* 32 instructions per burst */ +#define IPU_DEVICE_SP2600_FP_ICACHE_BURST_SIZE 32 + +static const struct ipu_device_cell_count_s ipu_device_sp2600_fp_count = { + IPU_DEVICE_SP2600_FP_NUM_MEMORIES, + IPU_DEVICE_SP2600_FP_NUM_MASTERS, + IPU_DEVICE_SP2600_FP_NUM_STALL_BITS, + IPU_DEVICE_SP2600_FP_ICACHE_WORD_SIZE * + IPU_DEVICE_SP2600_FP_ICACHE_BURST_SIZE +}; + +static const unsigned int +ipu_device_sp2600_fp_reg_offset[/* CELL_NUM_REGS */] = { + 0x0, 0x4, 0x10, 0x9C, 0xA0 +}; + +static const struct ipu_device_cell_type_properties_s +ipu_device_sp2600_fp_properties = { + &ipu_device_sp2600_fp_count, + ipu_device_sp2600_fp_masters, + ipu_device_sp2600_fp_reg_offset, + ipu_device_sp2600_fp_mem_size +}; + +#endif /* __IPU_DEVICE_SP2600_FP_PROPERTIES_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/src/ipu_device_sp2600_proxy_properties_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/src/ipu_device_sp2600_proxy_properties_impl.h new file mode 100644 index 0000000000000..6fdcd7faea9b8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/src/ipu_device_sp2600_proxy_properties_impl.h @@ -0,0 +1,138 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_SP2600_PROXY_PROPERTIES_IMPL_H +#define __IPU_DEVICE_SP2600_PROXY_PROPERTIES_IMPL_H + +/* sp2600_proxy definition */ + +#include "ipu_device_cell_properties_struct.h" + +enum ipu_device_sp2600_proxy_registers { + /* control registers */ + IPU_DEVICE_SP2600_PROXY_STAT_CTRL = 0x0, + IPU_DEVICE_SP2600_PROXY_START_PC = 0x4, + + /* THESE ADDRESSES NEED TO BE CHECKED !!!! */ + /* master port registers */ + IPU_DEVICE_SP2600_PROXY_ICACHE_BASE = 0x10, + IPU_DEVICE_SP2600_PROXY_ICACHE_INFO = 0x14, + IPU_DEVICE_SP2600_PROXY_ICACHE_INFO_OVERRIDE = 0x18, + + IPU_DEVICE_SP2600_PROXY_QMEM_BASE = 0x1C, + + IPU_DEVICE_SP2600_PROXY_CMEM_BASE = 0x28, + IPU_DEVICE_SP2600_PROXY_CMEM_INFO = 0x2C, + IPU_DEVICE_SP2600_PROXY_CMEM_INFO_OVERRIDE = 0x30, + + IPU_DEVICE_SP2600_PROXY_XMEM_BASE = 0x58, + IPU_DEVICE_SP2600_PROXY_XMEM_INFO = 0x5C, + IPU_DEVICE_SP2600_PROXY_XMEM_INFO_OVERRIDE = 0x60, + + /* debug registers */ + IPU_DEVICE_SP2600_PROXY_DEBUG_PC = 0x9C, + IPU_DEVICE_SP2600_PROXY_STALL = 0xA0 +}; + + +enum ipu_device_sp2600_proxy_memories { + IPU_DEVICE_SP2600_PROXY_REGS, + IPU_DEVICE_SP2600_PROXY_PMEM, + IPU_DEVICE_SP2600_PROXY_DMEM, + IPU_DEVICE_SP2600_PROXY_NUM_MEMORIES +}; + +static const unsigned int +ipu_device_sp2600_proxy_mem_size[IPU_DEVICE_SP2600_PROXY_NUM_MEMORIES] = { + 0x00AC, + 0x0000, + 0x4000 +}; + +enum ipu_device_sp2600_proxy_masters { + IPU_DEVICE_SP2600_PROXY_ICACHE, + IPU_DEVICE_SP2600_PROXY_QMEM, + IPU_DEVICE_SP2600_PROXY_CMEM, + IPU_DEVICE_SP2600_PROXY_XMEM, + IPU_DEVICE_SP2600_PROXY_NUM_MASTERS +}; + +static const struct ipu_device_cell_master_properties_s +ipu_device_sp2600_proxy_masters[IPU_DEVICE_SP2600_PROXY_NUM_MASTERS] = { + { + 0, + 0xC, + IPU_DEVICE_SP2600_PROXY_ICACHE_BASE, + IPU_DEVICE_SP2600_PROXY_ICACHE_INFO, + IPU_DEVICE_SP2600_PROXY_ICACHE_INFO_OVERRIDE + }, + { + 0, + 0xC, + IPU_DEVICE_SP2600_PROXY_QMEM_BASE, + 0xFFFFFFFF, + 0xFFFFFFFF + }, + { + 2, + 0xC, + IPU_DEVICE_SP2600_PROXY_CMEM_BASE, + IPU_DEVICE_SP2600_PROXY_CMEM_INFO, + IPU_DEVICE_SP2600_PROXY_CMEM_INFO_OVERRIDE + }, + { + 2, + 0xC, + IPU_DEVICE_SP2600_PROXY_XMEM_BASE, + IPU_DEVICE_SP2600_PROXY_XMEM_INFO, + IPU_DEVICE_SP2600_PROXY_XMEM_INFO_OVERRIDE + } +}; + +enum ipu_device_sp2600_proxy_stall_bits { + IPU_DEVICE_SP2600_PROXY_STALL_ICACHE, + IPU_DEVICE_SP2600_PROXY_STALL_DMEM, + IPU_DEVICE_SP2600_PROXY_STALL_QMEM, + IPU_DEVICE_SP2600_PROXY_STALL_CMEM, + IPU_DEVICE_SP2600_PROXY_STALL_XMEM, + IPU_DEVICE_SP2600_PROXY_NUM_STALL_BITS +}; + +/* 32 bits per instruction */ +#define IPU_DEVICE_SP2600_PROXY_ICACHE_WORD_SIZE 4 +/* 32 instructions per burst */ +#define IPU_DEVICE_SP2600_PROXY_ICACHE_BURST_SIZE 32 + +static const struct ipu_device_cell_count_s ipu_device_sp2600_proxy_count = { + IPU_DEVICE_SP2600_PROXY_NUM_MEMORIES, + IPU_DEVICE_SP2600_PROXY_NUM_MASTERS, + IPU_DEVICE_SP2600_PROXY_NUM_STALL_BITS, + IPU_DEVICE_SP2600_PROXY_ICACHE_WORD_SIZE * + IPU_DEVICE_SP2600_PROXY_ICACHE_BURST_SIZE +}; + +static const unsigned int +ipu_device_sp2600_proxy_reg_offset[/* CELL_NUM_REGS */] = { + 0x0, 0x4, 0x10, 0xCC, 0xD0 +}; + +static const struct ipu_device_cell_type_properties_s +ipu_device_sp2600_proxy_properties = { + &ipu_device_sp2600_proxy_count, + ipu_device_sp2600_proxy_masters, + ipu_device_sp2600_proxy_reg_offset, + ipu_device_sp2600_proxy_mem_size +}; + +#endif /* __IPU_DEVICE_SP2600_PROXY_PROPERTIES_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/cpu/fw_abi_cpu_types.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/cpu/fw_abi_cpu_types.mk new file mode 100644 index 0000000000000..b1ffbf7ea21ff --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/cpu/fw_abi_cpu_types.mk @@ -0,0 +1,24 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# + +# MODULE is FW ABI COMMON TYPES + +FW_ABI_COMMON_TYPES_DIRS = -I$${MODULES_DIR}/fw_abi_common_types +FW_ABI_COMMON_TYPES_DIRS += -I$${MODULES_DIR}/fw_abi_common_types/cpu + +FW_ABI_COMMON_TYPES_HOST_FILES = +FW_ABI_COMMON_TYPES_HOST_CPPFLAGS = $(FW_ABI_COMMON_TYPES_DIRS) + +FW_ABI_COMMON_TYPES_FW_FILES = +FW_ABI_COMMON_TYPES_FW_CPPFLAGS = $(FW_ABI_COMMON_TYPES_DIRS) diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/cpu/ia_css_terminal_base_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/cpu/ia_css_terminal_base_types.h new file mode 100644 index 0000000000000..21cc3f43f485e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/cpu/ia_css_terminal_base_types.h @@ -0,0 +1,42 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_BASE_TYPES_H +#define __IA_CSS_TERMINAL_BASE_TYPES_H + + +#include "type_support.h" +#include "ia_css_terminal_defs.h" + +#define N_UINT16_IN_TERMINAL_STRUCT 3 +#define N_PADDING_UINT8_IN_TERMINAL_STRUCT 5 + +#define SIZE_OF_TERMINAL_STRUCT_BITS \ + (IA_CSS_TERMINAL_TYPE_BITS \ + + IA_CSS_TERMINAL_ID_BITS \ + + N_UINT16_IN_TERMINAL_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_PADDING_UINT8_IN_TERMINAL_STRUCT * IA_CSS_UINT8_T_BITS) + +/* ==================== Base Terminal - START ==================== */ +struct ia_css_terminal_s { /**< Base terminal */ + ia_css_terminal_type_t terminal_type; /**< Type ia_css_terminal_type_t */ + int16_t parent_offset; /**< Offset to the process group */ + uint16_t size; /**< Size of this whole terminal layout-structure */ + uint16_t tm_index; /**< Index of the terminal manifest object */ + ia_css_terminal_ID_t ID; /**< Absolute referal ID for this terminal, valid ID's != 0 */ + uint8_t padding[N_PADDING_UINT8_IN_TERMINAL_STRUCT]; +}; +/* ==================== Base Terminal - END ==================== */ + +#endif /* __IA_CSS_TERMINAL_BASE_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/cpu/ia_css_terminal_manifest_base_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/cpu/ia_css_terminal_manifest_base_types.h new file mode 100644 index 0000000000000..056e1b6d5d4bd --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/cpu/ia_css_terminal_manifest_base_types.h @@ -0,0 +1,42 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_MANIFEST_BASE_TYPES_H +#define __IA_CSS_TERMINAL_MANIFEST_BASE_TYPES_H + +#include "ia_css_terminal_defs.h" + +#define N_PADDING_UINT8_IN_TERMINAL_MAN_STRUCT 5 +#define SIZE_OF_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + (IA_CSS_UINT16_T_BITS \ + + IA_CSS_TERMINAL_ID_BITS \ + + IA_CSS_TERMINAL_TYPE_BITS \ + + IA_CSS_UINT32_T_BITS \ + + (N_PADDING_UINT8_IN_TERMINAL_MAN_STRUCT*IA_CSS_UINT8_T_BITS)) + +/* ==================== Base Terminal Manifest - START ==================== */ +struct ia_css_terminal_manifest_s { + ia_css_terminal_type_t terminal_type; /**< Type ia_css_terminal_type_t */ + int16_t parent_offset; /**< Offset to the program group manifest */ + uint16_t size; /**< Size of this whole terminal-manifest layout-structure */ + ia_css_terminal_ID_t ID; + uint8_t padding[N_PADDING_UINT8_IN_TERMINAL_MAN_STRUCT]; +}; + +typedef struct ia_css_terminal_manifest_s + ia_css_terminal_manifest_t; + +/* ==================== Base Terminal Manifest - END ==================== */ + +#endif /* __IA_CSS_TERMINAL_MANIFEST_BASE_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/ia_css_base_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/ia_css_base_types.h new file mode 100644 index 0000000000000..3b80a17a6ad38 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/ia_css_base_types.h @@ -0,0 +1,38 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_BASE_TYPES_H +#define __IA_CSS_BASE_TYPES_H + +#include "type_support.h" + +#define VIED_VADDRESS_BITS 32 +typedef uint32_t vied_vaddress_t; + +#define DEVICE_DESCRIPTOR_ID_BITS 32 +typedef struct { + uint8_t device_id; + uint8_t instance_id; + uint8_t channel_id; + uint8_t section_id; +} device_descriptor_fields_t; + +typedef union { + device_descriptor_fields_t fields; + uint32_t data; +} device_descriptor_id_t; + +typedef uint16_t ia_css_process_id_t; + +#endif /* __IA_CSS_BASE_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/ia_css_terminal_defs.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/ia_css_terminal_defs.h new file mode 100644 index 0000000000000..dbf1cf93756ff --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/ia_css_terminal_defs.h @@ -0,0 +1,105 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_DEFS_H +#define __IA_CSS_TERMINAL_DEFS_H + + +#include "type_support.h" + +#define IA_CSS_TERMINAL_ID_BITS 8 +typedef uint8_t ia_css_terminal_ID_t; +#define IA_CSS_TERMINAL_INVALID_ID ((ia_css_terminal_ID_t)(-1)) + +/* + * Terminal Base Type + */ +typedef enum ia_css_terminal_type { + /**< Data input */ + IA_CSS_TERMINAL_TYPE_DATA_IN = 0, + /**< Data output */ + IA_CSS_TERMINAL_TYPE_DATA_OUT, + /**< Type 6 parameter input */ + IA_CSS_TERMINAL_TYPE_PARAM_STREAM, + /**< Type 1-5 parameter input */ + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN, + /**< Type 1-5 parameter output */ + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT, + /**< Represent the new type of terminal for the + * "spatial dependent parameters", when params go in + */ + IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN, + /**< Represent the new type of terminal for the + * "spatial dependent parameters", when params go out + */ + IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT, + /**< Represent the new type of terminal for the + * explicit slicing, when params go in + */ + IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN, + /**< Represent the new type of terminal for the + * explicit slicing, when params go out + */ + IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT, + /**< State (private data) input */ + IA_CSS_TERMINAL_TYPE_STATE_IN, + /**< State (private data) output */ + IA_CSS_TERMINAL_TYPE_STATE_OUT, + IA_CSS_TERMINAL_TYPE_PROGRAM, + IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT, + IA_CSS_N_TERMINAL_TYPES +} ia_css_terminal_type_t; + +#define IA_CSS_TERMINAL_TYPE_BITS 32 + +/* Temporary redirection needed to facilicate merging with the drivers + in a backwards compatible manner */ +#define IA_CSS_TERMINAL_TYPE_PARAM_CACHED IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN + +/* + * Dimensions of the data objects. Note that a C-style + * data order is assumed. Data stored by row. + */ +typedef enum ia_css_dimension { + /**< The number of columns, i.e. the size of the row */ + IA_CSS_COL_DIMENSION = 0, + /**< The number of rows, i.e. the size of the column */ + IA_CSS_ROW_DIMENSION = 1, + IA_CSS_N_DATA_DIMENSION = 2 +} ia_css_dimension_t; + +#define IA_CSS_N_COMMAND_COUNT (4) + +#ifndef PIPE_GENERATION +/* Don't include these complex enum structures in Genpipe, it can't handle and it does not need them */ +/* + * enum ia_css_isys_link_id. Lists the link IDs used by the FW for On The Fly feature + */ +typedef enum ia_css_isys_link_id { + IA_CSS_ISYS_LINK_OFFLINE = 0, + IA_CSS_ISYS_LINK_MAIN_OUTPUT = 1, + IA_CSS_ISYS_LINK_PDAF_OUTPUT = 2 +} ia_css_isys_link_id_t; +#define N_IA_CSS_ISYS_LINK_ID (IA_CSS_ISYS_LINK_PDAF_OUTPUT + 1) + +/* + * enum ia_css_data_barrier_link_id. Lists the link IDs used by the FW for data barrier feature + */ +typedef enum ia_css_data_barrier_link_id { + IA_CSS_DATA_BARRIER_LINK_MEMORY = N_IA_CSS_ISYS_LINK_ID, + N_IA_CSS_DATA_BARRIER_LINK_ID +} ia_css_data_barrier_link_id_t; + +#endif /* #ifndef PIPE_GENERATION */ +#endif /* __IA_CSS_TERMINAL_DEFS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir.h new file mode 100644 index 0000000000000..a284d74bb4a67 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir.h @@ -0,0 +1,99 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_H +#define __IA_CSS_PKG_DIR_H + +#include "ia_css_pkg_dir_storage_class.h" +#include "ia_css_pkg_dir_types.h" +#include "type_support.h" + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +const ia_css_pkg_dir_entry_t *ia_css_pkg_dir_get_entry( + const ia_css_pkg_dir_t *pkg_dir, + uint32_t index +); + +/* User is expected to call the verify function manually, + * other functions do not call it internally + */ +IA_CSS_PKG_DIR_STORAGE_CLASS_H +int ia_css_pkg_dir_verify_header( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_get_num_entries( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_get_size_in_bytes( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +enum ia_css_pkg_dir_version ia_css_pkg_dir_get_version( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint16_t ia_css_pkg_dir_set_version( + ia_css_pkg_dir_entry_t *pkg_dir_header, + enum ia_css_pkg_dir_version version +); + + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_entry_get_address_lo( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_entry_get_address_hi( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_entry_get_size( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint16_t ia_css_pkg_dir_entry_get_version( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint8_t ia_css_pkg_dir_entry_get_type( + const ia_css_pkg_dir_entry_t *entry +); + +/* Get the address of the specified entry in the PKG_DIR + * Note: This function expects the complete PKG_DIR in the same memory space + * and the entries contains offsets and not addresses. + */ +IA_CSS_PKG_DIR_STORAGE_CLASS_H +void *ia_css_pkg_dir_get_entry_address( + const ia_css_pkg_dir_t *pkg_dir, + uint32_t index +); + +#ifdef __IA_CSS_PKG_DIR_INLINE__ + +#include "ia_css_pkg_dir_impl.h" + +#endif + +#endif /* __IA_CSS_PKG_DIR_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_iunit.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_iunit.h new file mode 100644 index 0000000000000..ad194b0389eb7 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_iunit.h @@ -0,0 +1,46 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_IUNIT_H +#define __IA_CSS_PKG_DIR_IUNIT_H + +/* In bootflow, pkg_dir only supports upto 16 entries in pkg_dir + * pkg_dir_header + Psys_server pg + Isys_server pg + 13 Client pg + */ + +enum { + IA_CSS_PKG_DIR_SIZE = 16, + IA_CSS_PKG_DIR_ENTRIES = IA_CSS_PKG_DIR_SIZE - 1 +}; + +#define IUNIT_MAX_CLIENT_PKG_ENTRIES 13 + +/* Example assignment of unique identifiers for the FW components + * This should match the identifiers in the manifest + */ +enum ia_css_pkg_dir_entry_type { + IA_CSS_PKG_DIR_HEADER = 0, + IA_CSS_PKG_DIR_PSYS_SERVER_PG, + IA_CSS_PKG_DIR_ISYS_SERVER_PG, + IA_CSS_PKG_DIR_CLIENT_PG +}; + +/* Fixed entries in the package directory */ +enum ia_css_pkg_dir_index { + IA_CSS_PKG_DIR_PSYS_INDEX = 0, + IA_CSS_PKG_DIR_ISYS_INDEX = 1, + IA_CSS_PKG_DIR_CLIENT_0 = 2 +}; + +#endif /* __IA_CSS_PKG_DIR_IUNIT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_storage_class.h new file mode 100644 index 0000000000000..cb64172151f92 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_storage_class.h @@ -0,0 +1,29 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_STORAGE_CLASS_H +#define __IA_CSS_PKG_DIR_STORAGE_CLASS_H + + +#include "storage_class.h" + +#ifndef __IA_CSS_PKG_DIR_INLINE__ +#define IA_CSS_PKG_DIR_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_PKG_DIR_STORAGE_CLASS_C +#else +#define IA_CSS_PKG_DIR_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_PKG_DIR_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_PKG_DIR_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_types.h new file mode 100644 index 0000000000000..b024b3da2f9e6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_types.h @@ -0,0 +1,41 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_TYPES_H +#define __IA_CSS_PKG_DIR_TYPES_H + +#include "type_support.h" + +struct ia_css_pkg_dir_entry { + uint32_t address[2]; + uint32_t size; + uint16_t version; + uint8_t type; + uint8_t unused; +}; + +typedef void ia_css_pkg_dir_t; +typedef struct ia_css_pkg_dir_entry ia_css_pkg_dir_entry_t; + +/* The version field of the pkg_dir header defines + * if entries contain offsets or pointers + */ +/* This is temporary, until all pkg_dirs use pointers */ +enum ia_css_pkg_dir_version { + IA_CSS_PKG_DIR_POINTER, + IA_CSS_PKG_DIR_OFFSET +}; + + +#endif /* __IA_CSS_PKG_DIR_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/pkg_dir.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/pkg_dir.mk new file mode 100644 index 0000000000000..32c8a68f3653c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/pkg_dir.mk @@ -0,0 +1,29 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is PKG DIR + +PKG_DIR_DIR = $${MODULES_DIR}/pkg_dir +PKG_DIR_INTERFACE = $(PKG_DIR_DIR)/interface +PKG_DIR_SOURCES = $(PKG_DIR_DIR)/src + +PKG_DIR_FILES = $(PKG_DIR_DIR)/src/ia_css_pkg_dir.c +PKG_DIR_CPPFLAGS = -I$(PKG_DIR_INTERFACE) +PKG_DIR_CPPFLAGS += -I$(PKG_DIR_SOURCES) +PKG_DIR_CPPFLAGS += -I$${MODULES_DIR}/../isp/kernels/io_ls/common +PKG_DIR_CPPFLAGS += -I$${MODULES_DIR}/fw_abi_common_types/ipu +PKG_DIR_CPPFLAGS += -I$${MODULES_DIR}/fw_abi_common_types/ipu/$(FW_ABI_IPU_TYPES_VERSION) + +PKG_DIR_CREATE_FILES = $(PKG_DIR_DIR)/src/ia_css_pkg_dir_create.c +PKG_DIR_UPDATE_FILES = $(PKG_DIR_DIR)/src/ia_css_pkg_dir_update.c diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir.c new file mode 100644 index 0000000000000..348b56833e060 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir.c @@ -0,0 +1,27 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifdef __IA_CSS_PKG_DIR_INLINE__ + +#include "storage_class.h" + +STORAGE_CLASS_INLINE int __ia_css_pkg_dir_avoid_warning_on_empty_file(void) +{ + return 0; +} + +#else +#include "ia_css_pkg_dir_impl.h" + +#endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir_impl.h new file mode 100644 index 0000000000000..d5067d21398f9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir_impl.h @@ -0,0 +1,201 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_IMPL_H +#define __IA_CSS_PKG_DIR_IMPL_H + +#include "ia_css_pkg_dir.h" +#include "ia_css_pkg_dir_int.h" +#include "error_support.h" +#include "type_support.h" +#include "assert_support.h" + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +const ia_css_pkg_dir_entry_t *ia_css_pkg_dir_get_entry( + const ia_css_pkg_dir_t *pkg_dir, + uint32_t index) +{ + DECLARE_ERRVAL + struct ia_css_pkg_dir_entry *pkg_dir_header = NULL; + + verifexitval(pkg_dir != NULL, EFAULT); + + pkg_dir_header = (struct ia_css_pkg_dir_entry *)pkg_dir; + + /* First entry of the structure is the header, skip that */ + index++; + verifexitval(index < pkg_dir_header->size, EFAULT); + +EXIT: + if (haserror(EFAULT)) { + return NULL; + } + return &(pkg_dir_header[index]); +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +int ia_css_pkg_dir_verify_header(const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + DECLARE_ERRVAL + verifexitval(pkg_dir_header != NULL, EFAULT); + +EXIT: + if (haserror(EFAULT)) { + return -1; + } + return ((pkg_dir_header->address[0] == PKG_DIR_MAGIC_VAL_0) + && (pkg_dir_header->address[1] == PKG_DIR_MAGIC_VAL_1)) ? + 0 : -1; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_get_num_entries( + const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + DECLARE_ERRVAL + uint32_t size = 0; + + verifexitval(pkg_dir_header != NULL, EFAULT); + size = pkg_dir_header->size; + verifexitval(size > 0, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return size - 1; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +enum ia_css_pkg_dir_version +ia_css_pkg_dir_get_version(const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + assert(pkg_dir_header != NULL); + return pkg_dir_header->version; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint16_t ia_css_pkg_dir_set_version(ia_css_pkg_dir_entry_t *pkg_dir_header, + enum ia_css_pkg_dir_version version) +{ + DECLARE_ERRVAL + + verifexitval(pkg_dir_header != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 1; + } + pkg_dir_header->version = version; + return 0; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_get_size_in_bytes( + const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + DECLARE_ERRVAL + + verifexitval(pkg_dir_header != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return sizeof(struct ia_css_pkg_dir_entry) * pkg_dir_header->size; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_entry_get_address_lo( + const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->address[0]; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_entry_get_address_hi( + const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->address[1]; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_entry_get_size(const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->size; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint16_t ia_css_pkg_dir_entry_get_version(const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->version; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint8_t ia_css_pkg_dir_entry_get_type(const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->type; +} + + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +void *ia_css_pkg_dir_get_entry_address(const ia_css_pkg_dir_t *pkg_dir, + uint32_t index) +{ + void *entry_blob = NULL; + const ia_css_pkg_dir_entry_t *pkg_dir_entry = + ia_css_pkg_dir_get_entry(pkg_dir, index-1); + + if ((pkg_dir_entry != NULL) && + (ia_css_pkg_dir_entry_get_size(pkg_dir_entry) > 0)) { + assert(ia_css_pkg_dir_entry_get_address_hi(pkg_dir_entry) == 0); + entry_blob = (void *)((char *)pkg_dir + + ia_css_pkg_dir_entry_get_address_lo(pkg_dir_entry)); + } + return entry_blob; +} + +#endif /* __IA_CSS_PKG_DIR_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir_int.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir_int.h new file mode 100644 index 0000000000000..203505fbee54e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir_int.h @@ -0,0 +1,49 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_INT_H +#define __IA_CSS_PKG_DIR_INT_H + +/* + * Package Dir structure as specified in CSE FAS + * + * PKG DIR Header + * Qword 63:56 55 54:48 47:32 31:24 23:0 + * 0 "_IUPKDR_" + * 1 Rsvd Rsvd Type Version Rsvd Size + * + * Version: Version of the Structure + * Size: Size of the entire table (including header) in 16 byte chunks + * Type: Must be 0 for header + * + * Figure 13: PKG DIR Header + * + * + * PKG DIR Entry + * Qword 63:56 55 54:48 47:32 31:24 23:0 + * N Address/Offset + * N+1 Rsvd Rsvd Type Version Rsvd Size + * + * Version: Version # of the Component + * Size: Size of the component in bytes + * Type: Component Identifier + */ + +#define PKG_DIR_SIZE_BITS 24 +#define PKG_DIR_TYPE_BITS 7 + +#define PKG_DIR_MAGIC_VAL_1 (('_' << 24) | ('I' << 16) | ('U' << 8) | 'P') +#define PKG_DIR_MAGIC_VAL_0 (('K' << 24) | ('D' << 16) | ('R' << 8) | '_') + +#endif /* __IA_CSS_PKG_DIR_INT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/port_env_struct.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/port_env_struct.h new file mode 100644 index 0000000000000..4d39a4739a8b0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/port_env_struct.h @@ -0,0 +1,24 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PORT_ENV_STRUCT_H +#define __PORT_ENV_STRUCT_H + +struct port_env { + unsigned int mmid; + unsigned int ssid; + unsigned int mem_addr; +}; + +#endif /* __PORT_ENV_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/queue.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/queue.h new file mode 100644 index 0000000000000..b233ab3baf014 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/queue.h @@ -0,0 +1,40 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __QUEUE_H +#define __QUEUE_H + +#include "queue_struct.h" +#include "port_env_struct.h" + +/* + * SYS queues are created by the host + * SYS queues cannot be accessed through the queue interface + * To send data into a queue a send_port must be opened. + * To receive data from a queue, a recv_port must be opened. + */ + +/* return required buffer size for queue */ +unsigned int +sys_queue_buf_size(unsigned int size, unsigned int token_size); + +/* + * initialize a queue that can hold at least 'size' tokens of + * 'token_size' bytes. + */ +void +sys_queue_init(struct sys_queue *q, unsigned int size, + unsigned int token_size, struct sys_queue_res *res); + +#endif /* __QUEUE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/queue_struct.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/queue_struct.h new file mode 100644 index 0000000000000..ef48fcfded2b6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/queue_struct.h @@ -0,0 +1,47 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __QUEUE_STRUCT_H +#define __QUEUE_STRUCT_H + +/* queue description, shared between sender and receiver */ + +#include "type_support.h" + +#ifdef __VIED_CELL +typedef struct {uint32_t v[2]; } host_buffer_address_t; +#else +typedef uint64_t host_buffer_address_t; +#endif + +typedef uint32_t vied_buffer_address_t; + + +struct sys_queue { + host_buffer_address_t host_address; + vied_buffer_address_t vied_address; + unsigned int size; + unsigned int token_size; + unsigned int wr_reg; /* reg no in subsystem's regmem */ + unsigned int rd_reg; + unsigned int _align; +}; + +struct sys_queue_res { + host_buffer_address_t host_address; + vied_buffer_address_t vied_address; + unsigned int reg; +}; + +#endif /* __QUEUE_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/recv_port.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/recv_port.h new file mode 100644 index 0000000000000..cce253b266687 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/recv_port.h @@ -0,0 +1,34 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __RECV_PORT_H +#define __RECV_PORT_H + + +struct recv_port; +struct sys_queue; +struct port_env; + +void +recv_port_open(struct recv_port *p, const struct sys_queue *q, + const struct port_env *env); + +unsigned int +recv_port_available(const struct recv_port *p); + +unsigned int +recv_port_transfer(const struct recv_port *p, void *data); + + +#endif /* __RECV_PORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/recv_port_struct.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/recv_port_struct.h new file mode 100644 index 0000000000000..52ec563b13cf5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/recv_port_struct.h @@ -0,0 +1,32 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __RECV_PORT_STRUCT_H +#define __RECV_PORT_STRUCT_H + +#include "buffer_type.h" + +struct recv_port { + buffer_address buffer; /* address of buffer in DDR */ + unsigned int size; + unsigned int token_size; + unsigned int wr_reg; /* index of write pointer located in regmem */ + unsigned int rd_reg; /* index read pointer located in regmem */ + + unsigned int mmid; + unsigned int ssid; + unsigned int mem_addr; /* address of memory containing regmem */ +}; + +#endif /* __RECV_PORT_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/send_port.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/send_port.h new file mode 100644 index 0000000000000..04a160f3f0199 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/send_port.h @@ -0,0 +1,52 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __SEND_PORT_H +#define __SEND_PORT_H + + +/* + * A send port can be used to send tokens into a queue. + * The interface can be used on any type of processor (host, SP, ...) + */ + +struct send_port; +struct sys_queue; +struct port_env; + +/* + * Open a send port on a queue. After the port is opened, tokens can be sent + */ +void +send_port_open(struct send_port *p, const struct sys_queue *q, + const struct port_env *env); + +/* + * Determine how many tokens can be sent + */ +unsigned int +send_port_available(const struct send_port *p); + +/* + * Send a token via a send port. The function returns the number of + * tokens that have been sent: + * 1: the token was accepted + * 0: the token was not accepted (full queue) + * The size of a token is determined at initialization. + */ +unsigned int +send_port_transfer(const struct send_port *p, const void *data); + + +#endif /* __SEND_PORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/send_port_struct.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/send_port_struct.h new file mode 100644 index 0000000000000..f834c62bc3db6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/send_port_struct.h @@ -0,0 +1,32 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __SEND_PORT_STRUCT_H +#define __SEND_PORT_STRUCT_H + +#include "buffer_type.h" + +struct send_port { + buffer_address buffer; + unsigned int size; + unsigned int token_size; + unsigned int wr_reg; /* index of write pointer in regmem */ + unsigned int rd_reg; /* index of read pointer in regmem */ + + unsigned int mmid; + unsigned int ssid; + unsigned int mem_addr; +}; + +#endif /* __SEND_PORT_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/port.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/port.mk new file mode 100644 index 0000000000000..b3801247802e9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/port.mk @@ -0,0 +1,31 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is PORT + +PORT_DIR=$${MODULES_DIR}/port + +PORT_INTERFACE=$(PORT_DIR)/interface +PORT_SOURCES1=$(PORT_DIR)/src + +PORT_HOST_FILES += $(PORT_SOURCES1)/send_port.c +PORT_HOST_FILES += $(PORT_SOURCES1)/recv_port.c +PORT_HOST_FILES += $(PORT_SOURCES1)/queue.c + +PORT_HOST_CPPFLAGS += -I$(PORT_INTERFACE) + +PORT_FW_FILES += $(PORT_SOURCES1)/send_port.c +PORT_FW_FILES += $(PORT_SOURCES1)/recv_port.c + +PORT_FW_CPPFLAGS += -I$(PORT_INTERFACE) diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/src/queue.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/src/queue.c new file mode 100644 index 0000000000000..eeec99dfe2d0d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/src/queue.c @@ -0,0 +1,47 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "queue.h" + +#include "regmem_access.h" +#include "port_env_struct.h" + +unsigned int sys_queue_buf_size(unsigned int size, unsigned int token_size) +{ + return (size + 1) * token_size; +} + +void +sys_queue_init(struct sys_queue *q, unsigned int size, unsigned int token_size, + struct sys_queue_res *res) +{ + unsigned int buf_size; + + q->size = size + 1; + q->token_size = token_size; + buf_size = sys_queue_buf_size(size, token_size); + + /* acquire the shared buffer space */ + q->host_address = res->host_address; + res->host_address += buf_size; + q->vied_address = res->vied_address; + res->vied_address += buf_size; + + /* acquire the shared read and writer pointers */ + q->wr_reg = res->reg; + res->reg++; + q->rd_reg = res->reg; + res->reg++; + +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/src/recv_port.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/src/recv_port.c new file mode 100644 index 0000000000000..31b36e9ceafbb --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/src/recv_port.c @@ -0,0 +1,95 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "recv_port.h" +#include "port_env_struct.h" /* for port_env */ +#include "queue_struct.h" /* for sys_queue */ +#include "recv_port_struct.h" /* for recv_port */ +#include "buffer_access.h" /* for buffer_load, buffer_address */ +#include "regmem_access.h" /* for regmem_load_32, regmem_store_32 */ +#include "storage_class.h" /* for STORAGE_CLASS_INLINE */ +#include "math_support.h" /* for OP_std_modadd */ +#include "type_support.h" /* for HOST_ADDRESS */ + +#ifndef __VIED_CELL +#include "cpu_mem_support.h" /* for ia_css_cpu_mem_cache_invalidate */ +#endif + +void +recv_port_open(struct recv_port *p, const struct sys_queue *q, + const struct port_env *env) +{ + p->mmid = env->mmid; + p->ssid = env->ssid; + p->mem_addr = env->mem_addr; + + p->size = q->size; + p->token_size = q->token_size; + p->wr_reg = q->wr_reg; + p->rd_reg = q->rd_reg; + +#ifdef __VIED_CELL + p->buffer = q->vied_address; +#else + p->buffer = q->host_address; +#endif +} + +STORAGE_CLASS_INLINE unsigned int +recv_port_index(const struct recv_port *p, unsigned int i) +{ + unsigned int rd = regmem_load_32(p->mem_addr, p->rd_reg, p->ssid); + + return OP_std_modadd(rd, i, p->size); +} + +unsigned int +recv_port_available(const struct recv_port *p) +{ + int wr = (int)regmem_load_32(p->mem_addr, p->wr_reg, p->ssid); + int rd = (int)regmem_load_32(p->mem_addr, p->rd_reg, p->ssid); + + return OP_std_modadd(wr, -rd, p->size); +} + +STORAGE_CLASS_INLINE void +recv_port_copy(const struct recv_port *p, unsigned int i, void *data) +{ + unsigned int rd = recv_port_index(p, i); + unsigned int token_size = p->token_size; + buffer_address addr = p->buffer + (rd * token_size); +#ifndef __VIED_CELL + ia_css_cpu_mem_cache_invalidate((void *)HOST_ADDRESS(p->buffer), + token_size*p->size); +#endif + buffer_load(addr, data, token_size, p->mmid); +} + +STORAGE_CLASS_INLINE void +recv_port_release(const struct recv_port *p, unsigned int i) +{ + unsigned int rd = recv_port_index(p, i); + + regmem_store_32(p->mem_addr, p->rd_reg, rd, p->ssid); +} + +unsigned int +recv_port_transfer(const struct recv_port *p, void *data) +{ + if (!recv_port_available(p)) + return 0; + recv_port_copy(p, 0, data); + recv_port_release(p, 1); + return 1; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/src/send_port.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/src/send_port.c new file mode 100644 index 0000000000000..8d1fba08c5d58 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/src/send_port.c @@ -0,0 +1,94 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "send_port.h" +#include "queue_struct.h" /* for sys_queue */ +#include "send_port_struct.h" /* for send_port */ +#include "port_env_struct.h" /* for port_env */ +#include "regmem_access.h" /* for regmem_load_32, regmem_store_32 */ +#include "buffer_access.h" /* for buffer_store, buffer_address */ +#include "storage_class.h" /* for STORAGE_CLASS_INLINE */ +#include "math_support.h" /* for OP_std_modadd */ +#include "type_support.h" /* for HOST_ADDRESS */ + +#ifndef __VIED_CELL +#include "cpu_mem_support.h" /* for ia_css_cpu_mem_cache_flush */ +#endif + +void +send_port_open(struct send_port *p, const struct sys_queue *q, + const struct port_env *env) +{ + p->mmid = env->mmid; + p->ssid = env->ssid; + p->mem_addr = env->mem_addr; + + p->size = q->size; + p->token_size = q->token_size; + p->wr_reg = q->wr_reg; + p->rd_reg = q->rd_reg; +#ifdef __VIED_CELL + p->buffer = q->vied_address; +#else + p->buffer = q->host_address; +#endif +} + +STORAGE_CLASS_INLINE unsigned int +send_port_index(const struct send_port *p, unsigned int i) +{ + unsigned int wr = regmem_load_32(p->mem_addr, p->wr_reg, p->ssid); + + return OP_std_modadd(wr, i, p->size); +} + +unsigned int +send_port_available(const struct send_port *p) +{ + int rd = (int)regmem_load_32(p->mem_addr, p->rd_reg, p->ssid); + int wr = (int)regmem_load_32(p->mem_addr, p->wr_reg, p->ssid); + + return OP_std_modadd(rd, -(wr+1), p->size); +} + +STORAGE_CLASS_INLINE void +send_port_copy(const struct send_port *p, unsigned int i, const void *data) +{ + unsigned int wr = send_port_index(p, i); + unsigned int token_size = p->token_size; + buffer_address addr = p->buffer + (wr * token_size); + + buffer_store(addr, data, token_size, p->mmid); +#ifndef __VIED_CELL + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(addr), token_size); +#endif +} + +STORAGE_CLASS_INLINE void +send_port_release(const struct send_port *p, unsigned int i) +{ + unsigned int wr = send_port_index(p, i); + + regmem_store_32(p->mem_addr, p->wr_reg, wr, p->ssid); +} + +unsigned int +send_port_transfer(const struct send_port *p, const void *data) +{ + if (!send_port_available(p)) + return 0; + send_port_copy(p, 0, data); + send_port_release(p, 1); + return 1; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psys_private_pg/interface/ia_css_psys_private_pg_data.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psys_private_pg/interface/ia_css_psys_private_pg_data.h new file mode 100644 index 0000000000000..6b2387352ae36 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psys_private_pg/interface/ia_css_psys_private_pg_data.h @@ -0,0 +1,43 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PRIVATE_PG_DATA_H +#define __IA_CSS_PSYS_PRIVATE_PG_DATA_H + +#include "ipu_device_acb_devices.h" +#include "ipu_device_gp_devices.h" +#include "type_support.h" +#include "vied_nci_acb_route_type.h" + +#define PRIV_CONF_INVALID 0xFF + +struct ia_css_psys_pg_buffer_information_s { + unsigned int buffer_base_addr; + unsigned int bpe; + unsigned int buffer_width; + unsigned int buffer_height; + unsigned int num_of_buffers; + unsigned int dfm_port_addr; +}; + +typedef struct ia_css_psys_pg_buffer_information_s ia_css_psys_pg_buffer_information_t; + +struct ia_css_psys_private_pg_data { + nci_acb_route_t acb_route[IPU_DEVICE_ACB_NUM_ACB]; + uint8_t psa_mux_conf[IPU_DEVICE_GP_PSA_MUX_NUM_MUX]; + uint8_t isa_mux_conf[IPU_DEVICE_GP_ISA_STATIC_MUX_NUM_MUX]; + ia_css_psys_pg_buffer_information_t input_buffer_info; +}; + +#endif /* __IA_CSS_PSYS_PRIVATE_PG_DATA_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psys_server/interface/ia_css_bxt_spctrl_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psys_server/interface/ia_css_bxt_spctrl_trace.h new file mode 100644 index 0000000000000..eee1d6ab0a496 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psys_server/interface/ia_css_bxt_spctrl_trace.h @@ -0,0 +1,107 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_BXT_SPCTRL_TRACE_H +#define __IA_CSS_BXT_SPCTRL_TRACE_H + +#include "ia_css_trace.h" + +/* Not using 0 to identify wrong configuration being passed from + * the .mk file outside. + * Log levels not in the range below will cause a + * "No BXT_SPCTRL_TRACE_CONFIG Tracing level defined" + */ +#define BXT_SPCTRL_TRACE_LOG_LEVEL_OFF 1 +#define BXT_SPCTRL_TRACE_LOG_LEVEL_NORMAL 2 +#define BXT_SPCTRL_TRACE_LOG_LEVEL_DEBUG 3 + +/* BXT_SPCTRL and all the submodules in BXT_SPCTRL will have the + * default tracing level set to the BXT_SPCTRL_TRACE_CONFIG level. + * If not defined in the psysapi.mk fill it will be set by + * default to no trace (BXT_SPCTRL_TRACE_LOG_LEVEL_NORMAL) + */ +#define BXT_SPCTRL_TRACE_CONFIG_DEFAULT BXT_SPCTRL_TRACE_LOG_LEVEL_NORMAL + +#if !defined(BXT_SPCTRL_TRACE_CONFIG) +# define BXT_SPCTRL_TRACE_CONFIG BXT_SPCTRL_TRACE_CONFIG_DEFAULT +#endif + +/* BXT_SPCTRL Module tracing backend is mapped to TUNIT tracing for + * target platforms + */ +#ifdef __HIVECC +# ifndef HRT_CSIM +# define BXT_SPCTRL_TRACE_METHOD IA_CSS_TRACE_METHOD_TRACE +# else +# define BXT_SPCTRL_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +# endif +#else +# define BXT_SPCTRL_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +#endif + +#if (defined(BXT_SPCTRL_TRACE_CONFIG)) + /* Module specific trace setting */ +# if BXT_SPCTRL_TRACE_CONFIG == BXT_SPCTRL_TRACE_LOG_LEVEL_OFF + /* BXT_SPCTRL_TRACE_LOG_LEVEL_OFF */ +# define BXT_SPCTRL_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED +# elif BXT_SPCTRL_TRACE_CONFIG == BXT_SPCTRL_TRACE_LOG_LEVEL_NORMAL + /* BXT_SPCTRL_TRACE_LOG_LEVEL_NORMAL */ +# define BXT_SPCTRL_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED +# define BXT_SPCTRL_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED +# define BXT_SPCTRL_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED +# elif BXT_SPCTRL_TRACE_CONFIG == BXT_SPCTRL_TRACE_LOG_LEVEL_DEBUG + /* BXT_SPCTRL_TRACE_LOG_LEVEL_DEBUG */ +# define BXT_SPCTRL_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED +# define BXT_SPCTRL_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED +# define BXT_SPCTRL_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED +# define BXT_SPCTRL_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED +# define BXT_SPCTRL_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED +# define BXT_SPCTRL_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED +# else +# error "No BXT_SPCTRL_TRACE_CONFIG Tracing level defined" +# endif +#else +# error "BXT_SPCTRL_TRACE_CONFIG not defined" +#endif + +/* Overriding submodules in BXT_SPCTRL with a specific tracing level */ +/* #define BXT_SPCTRL_DYNAMIC_TRACING_OVERRIDE TRACE_LOG_LEVEL_VERBOSE */ + +#endif /* __IA_CSS_BXT_SPCTRL_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psys_server/psys_server.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psys_server/psys_server.mk new file mode 100644 index 0000000000000..c4462c9847935 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psys_server/psys_server.mk @@ -0,0 +1,81 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is PSYS_SERVER + +include $(MODULES_DIR)/config/system_$(IPU_SYSVER).mk +include $(MODULES_DIR)/config/$(SUBSYSTEM)/subsystem_$(IPU_SYSVER).mk + +PSYS_SERVER_DIR=${MODULES_DIR}/psys_server + +# The watchdog should never be merged enabled +PSYS_SERVER_WATCHDOG_ENABLE ?= 0 + +PSYS_SERVER_INTERFACE=$(PSYS_SERVER_DIR)/interface +PSYS_SERVER_SOURCES=$(PSYS_SERVER_DIR)/src + +# PSYS API implementation files. Consider a new module for those to avoid +# having them together with firmware. +PSYS_SERVER_HOST_FILES += $${MODULES_DIR}/psysapi/device/src/ia_css_psys_device.c +PSYS_SERVER_HOST_FILES += $(PSYS_SERVER_SOURCES)/bxt_spctrl_process_group_cmd_impl.c + +PSYS_SERVER_HOST_CPPFLAGS += -I$(PSYS_SERVER_INTERFACE) + +PSYS_SERVER_HOST_CPPFLAGS += -DSSID=$(SSID) +PSYS_SERVER_HOST_CPPFLAGS += -DMMID=$(MMID) + + +PSYS_SERVER_FW_FILES += $(PSYS_SERVER_SOURCES)/psys_cmd_queue_fw.c +PSYS_SERVER_FW_FILES += $(PSYS_SERVER_SOURCES)/psys_event_queue_fw.c +PSYS_SERVER_FW_FILES += $(PSYS_SERVER_SOURCES)/psys_init_fw.c +PSYS_SERVER_FW_FILES += $(PSYS_SERVER_SOURCES)/psys_process_group_fw.c + +# Files that server modules need to use +PSYS_SERVER_SUPPORT_FILES = $(PSYS_SERVER_SOURCES)/dev_access_conv/$(IPU_SYSVER)/ia_css_psys_server_dev_access_type_conv.c +PSYS_SERVER_SUPPORT_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_server_config.c + +# Include those to build the release firmware. Otherwise replace by test code. +PSYS_SERVER_RELEASE_FW_FILES = $(PSYS_SERVER_SOURCES)/psys_server.c +PSYS_SERVER_RELEASE_FW_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_proxy.c +PSYS_SERVER_RELEASE_FW_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_server_dev_access.c +PSYS_SERVER_RELEASE_FW_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_server_terminal_load.c +PSYS_SERVER_RELEASE_FW_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_server_remote_obj_access.c +PSYS_SERVER_RELEASE_FW_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_server_dma_access.c +ifeq ($(HAS_DEC400), 1) +PSYS_SERVER_RELEASE_FW_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_server_dec400_access.c +endif +PSYS_SERVER_RELEASE_FW_FILES += $(PSYS_SERVER_SUPPORT_FILES) + +PSYS_SERVER_FW_CPPFLAGS += -I$(PSYS_SERVER_INTERFACE) +PSYS_SERVER_FW_CPPFLAGS += -I$(PSYS_SERVER_SOURCES) +PSYS_SERVER_FW_CPPFLAGS += -I$(PSYS_SERVER_SOURCES)/$(IPU_SYSVER) +PSYS_SERVER_FW_CPPFLAGS += -I$(PSYS_SERVER_SOURCES)/$(PSYS_SERVER_VERSION) +PSYS_SERVER_FW_CPPFLAGS += -I$(PSYS_SERVER_SOURCES)/loader/$(PSYS_SERVER_LOADER_VERSION) +PSYS_SERVER_FW_CPPFLAGS += -I$(PSYS_SERVER_SOURCES)/access_blocker/$(PSYS_ACCESS_BLOCKER_VERSION) +PSYS_SERVER_FW_CPPFLAGS += -I$(PSYS_SERVER_SOURCES)/access_blocker/src + +PSYS_SERVER_FW_CPPFLAGS += -DSSID=$(SSID) +PSYS_SERVER_FW_CPPFLAGS += -DMMID=$(MMID) +PSYS_SERVER_FW_CPPFLAGS += -DHAS_DPCM=$(if $(HAS_DPCM),1,0) + +# PSYS server watchdog for debugging +ifeq ($(PSYS_SERVER_WATCHDOG_ENABLE), 1) + PSYS_SERVER_FW_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_server_watchdog.c + PSYS_SERVER_FW_CPPFLAGS += -DPSYS_SERVER_WATCHDOG_DEBUG +endif + +PSYS_SERVER_FW_CPPFLAGS += -D$(PSYS_HW_VERSION) + +PSYS_SERVER_FW_CPPFLAGS += -DENABLE_TPROXY=$(PSYS_SERVER_ENABLE_TPROXY) +PSYS_SERVER_FW_CPPFLAGS += -DENABLE_DEVPROXY=$(PSYS_SERVER_ENABLE_DEVPROXY) diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psys_server/src/bxt_spctrl_process_group_cmd_impl.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psys_server/src/bxt_spctrl_process_group_cmd_impl.c new file mode 100644 index 0000000000000..6f8aea782464a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psys_server/src/bxt_spctrl_process_group_cmd_impl.c @@ -0,0 +1,332 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_device.h" +#include "ia_css_psys_process_group_cmd_impl.h" +#include "ia_css_psysapi.h" +#include "ia_css_psys_terminal.h" +#include "ia_css_psys_process.h" +#include "ia_css_psys_process.psys.h" +#include "ia_css_psys_process_group.h" +#include "ia_css_psys_process_group.psys.h" +#include "ia_css_psys_program_group_manifest.h" +#include "type_support.h" +#include "error_support.h" +#include "misc_support.h" +#include "cpu_mem_support.h" +#include "ia_css_bxt_spctrl_trace.h" + +#if HAS_DUAL_CMD_CTX_SUPPORT +#define MAX_CLIENT_PGS 8 /* same as test_params.h */ +struct ia_css_process_group_context { + ia_css_process_group_t *pg; + bool secure; +}; +struct ia_css_process_group_context pg_contexts[MAX_CLIENT_PGS]; +static unsigned int num_of_pgs; + +STORAGE_CLASS_INLINE +struct ia_css_syscom_context *ia_css_process_group_get_context(ia_css_process_group_t *process_group) +{ + unsigned int i; + bool secure = false; + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_get_context(): enter:\n"); + + for (i = 0; i < num_of_pgs; i++) { + if (pg_contexts[i].pg == process_group) { + secure = pg_contexts[i].secure; + break; + } + } + + IA_CSS_TRACE_1(BXT_SPCTRL, INFO, + "ia_css_process_group_get_context(): secure %d\n", secure); + return secure ? psys_syscom_secure : psys_syscom; +} + +int ia_css_process_group_store(ia_css_process_group_t *process_group, bool secure) +{ + IA_CSS_TRACE_2(BXT_SPCTRL, INFO, + "ia_css_process_group_store(): pg instance %d secure %d\n", num_of_pgs, secure); + + pg_contexts[num_of_pgs].pg = process_group; + pg_contexts[num_of_pgs].secure = secure; + num_of_pgs++; + return 0; +} +#else /* HAS_DUAL_CMD_CTX_SUPPORT */ +STORAGE_CLASS_INLINE +struct ia_css_syscom_context *ia_css_process_group_get_context(ia_css_process_group_t *process_group) +{ + NOT_USED(process_group); + + return psys_syscom; +} + +int ia_css_process_group_store(ia_css_process_group_t *process_group, bool secure) +{ + NOT_USED(process_group); + NOT_USED(secure); + + return 0; +} +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ + +int ia_css_process_group_on_create( + ia_css_process_group_t *process_group, + const ia_css_program_group_manifest_t *program_group_manifest, + const ia_css_program_group_param_t *program_group_param) +{ + NOT_USED(process_group); + NOT_USED(program_group_manifest); + NOT_USED(program_group_param); + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_on_create(): enter:\n"); + + return 0; +} + +int ia_css_process_group_on_destroy( + ia_css_process_group_t *process_group) +{ + NOT_USED(process_group); + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_on_destroy(): enter:\n"); + + return 0; +} + +int ia_css_process_group_exec_cmd( + ia_css_process_group_t *process_group, + const ia_css_process_group_cmd_t cmd) +{ + int retval = -1; + ia_css_process_group_state_t state; + struct ia_css_psys_cmd_s psys_cmd; + bool cmd_queue_full; + unsigned int queue_id; + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_exec_cmd(): enter:\n"); + + verifexit(process_group != NULL); + + state = ia_css_process_group_get_state(process_group); + + verifexit(state != IA_CSS_PROCESS_GROUP_ERROR); + verifexit(state < IA_CSS_N_PROCESS_GROUP_STATES); + + switch (cmd) { + case IA_CSS_PROCESS_GROUP_CMD_SUBMIT: + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_exec_cmd(): IA_CSS_PROCESS_GROUP_CMD_SUBMIT:\n"); + verifexit(state == IA_CSS_PROCESS_GROUP_READY); + + /* External resource availability checks */ + verifexit(ia_css_can_process_group_submit(process_group)); + + process_group->state = IA_CSS_PROCESS_GROUP_BLOCKED; + break; + case IA_CSS_PROCESS_GROUP_CMD_START: + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_exec_cmd(): IA_CSS_PROCESS_GROUP_CMD_START:\n"); + verifexit(state == IA_CSS_PROCESS_GROUP_BLOCKED); + + /* External resource state checks */ + verifexit(ia_css_can_process_group_start(process_group)); + + process_group->state = IA_CSS_PROCESS_GROUP_STARTED; + break; + case IA_CSS_PROCESS_GROUP_CMD_DISOWN: + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_exec_cmd(): IA_CSS_PROCESS_GROUP_CMD_DISOWN:\n"); + verifexit(state == IA_CSS_PROCESS_GROUP_STARTED); + + cmd_queue_full = ia_css_is_psys_cmd_queue_full(ia_css_process_group_get_context(process_group), + IA_CSS_PSYS_CMD_QUEUE_COMMAND_ID); + retval = EBUSY; + verifexit(cmd_queue_full == false); + + psys_cmd.command = IA_CSS_PROCESS_GROUP_CMD_START; + psys_cmd.msg = 0; + psys_cmd.context_handle = process_group->ipu_virtual_address; + + verifexit(ia_css_process_group_print(process_group, NULL) == 0); + + retval = ia_css_psys_cmd_queue_send(ia_css_process_group_get_context(process_group), + IA_CSS_PSYS_CMD_QUEUE_COMMAND_ID, &psys_cmd); + verifexit(retval > 0); + break; + case IA_CSS_PROCESS_GROUP_CMD_STOP: + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_exec_cmd(): IA_CSS_PROCESS_GROUP_CMD_STOP:\n"); + + cmd_queue_full = ia_css_is_psys_cmd_queue_full(ia_css_process_group_get_context(process_group), + IA_CSS_PSYS_CMD_QUEUE_COMMAND_ID); + retval = EBUSY; + verifexit(cmd_queue_full == false); + + psys_cmd.command = IA_CSS_PROCESS_GROUP_CMD_STOP; + psys_cmd.msg = 0; + psys_cmd.context_handle = process_group->ipu_virtual_address; + + queue_id = ia_css_process_group_get_base_queue_id(process_group); + verifexit(queue_id < IA_CSS_N_PSYS_CMD_QUEUE_ID); + + retval = ia_css_psys_cmd_queue_send(ia_css_process_group_get_context(process_group), + queue_id, &psys_cmd); + verifexit(retval > 0); + break; + case IA_CSS_PROCESS_GROUP_CMD_ABORT: + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_exec_cmd(): IA_CSS_PROCESS_GROUP_CMD_ABORT:\n"); + + /* Once the flushing of shared buffers is fixed this verifexit + * should be changed to be state = IA_CSS_PROCESS_GROUP_STARTED + */ + verifexit(state == IA_CSS_PROCESS_GROUP_BLOCKED); + + cmd_queue_full = ia_css_is_psys_cmd_queue_full(ia_css_process_group_get_context(process_group), + IA_CSS_PSYS_CMD_QUEUE_COMMAND_ID); + retval = EBUSY; + verifexit(cmd_queue_full == false); + + psys_cmd.command = IA_CSS_PROCESS_GROUP_CMD_ABORT; + psys_cmd.msg = 0; + psys_cmd.context_handle = process_group->ipu_virtual_address; + + retval = ia_css_psys_cmd_queue_send(ia_css_process_group_get_context(process_group), + IA_CSS_PSYS_CMD_QUEUE_DEVICE_ID, &psys_cmd); + verifexit(retval > 0); + break; + default: + verifexit(false); + break; + } + + retval = 0; +EXIT: + if (0 != retval) { + IA_CSS_TRACE_1(BXT_SPCTRL, ERROR, + "ia_css_process_group_exec_cmd failed (%i)\n", retval); + } + return retval; +} + +STORAGE_CLASS_INLINE int enqueue_buffer_set_cmd( + ia_css_process_group_t *process_group, + ia_css_buffer_set_t *buffer_set, + unsigned int queue_offset, + uint16_t command + ) +{ + int retval = -1; + struct ia_css_psys_cmd_s psys_cmd; + bool cmd_queue_full; + unsigned int queue_id; + + verifexit(ia_css_process_group_get_state(process_group) + == IA_CSS_PROCESS_GROUP_STARTED); + + verifexit(queue_offset < + ia_css_process_group_get_num_queues(process_group)); + + queue_id = + ia_css_process_group_get_base_queue_id(process_group) + + queue_offset; + verifexit(queue_id < IA_CSS_N_PSYS_CMD_QUEUE_ID); + + cmd_queue_full = ia_css_is_psys_cmd_queue_full(ia_css_process_group_get_context(process_group), queue_id); + retval = EBUSY; + verifexit(cmd_queue_full == false); + + psys_cmd.command = command; + psys_cmd.msg = 0; + psys_cmd.context_handle = + ia_css_buffer_set_get_ipu_address(buffer_set); + + retval = ia_css_psys_cmd_queue_send(ia_css_process_group_get_context(process_group), queue_id, &psys_cmd); + verifexit(retval > 0); + + retval = 0; + +EXIT: + if (0 != retval) { + IA_CSS_TRACE_1(BXT_SPCTRL, ERROR, + "enqueue_buffer_set failed (%i)\n", retval); + } + return retval; +} + +int ia_css_enqueue_buffer_set( + ia_css_process_group_t *process_group, + ia_css_buffer_set_t *buffer_set, + unsigned int queue_offset) +{ + int retval = -1; + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_enqueue_buffer_set():\n"); + retval = enqueue_buffer_set_cmd( + process_group, + buffer_set, + queue_offset, + IA_CSS_PROCESS_GROUP_CMD_RUN); + + if (0 != retval) { + IA_CSS_TRACE_1(BXT_SPCTRL, ERROR, + "ia_css_enqueue_buffer_set failed (%i)\n", retval); + } + return retval; +} + +int ia_css_enqueue_param_buffer_set( + ia_css_process_group_t *process_group, + ia_css_buffer_set_t *param_buffer_set) +{ +#if (HAS_LATE_BINDING_SUPPORT == 1) + int retval = -1; + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_enqueue_param_buffer_set():\n"); + + retval = enqueue_buffer_set_cmd( + process_group, + param_buffer_set, + IA_CSS_PSYS_LATE_BINDING_QUEUE_OFFSET, + IA_CSS_PROCESS_GROUP_CMD_SUBMIT); + + if (0 != retval) { + IA_CSS_TRACE_1(BXT_SPCTRL, ERROR, + "ia_css_enqueue_param_buffer_set failed (%i)\n", retval); + } +#else + int retval = -1; + + NOT_USED(process_group); + NOT_USED(param_buffer_set); + IA_CSS_TRACE_0(BXT_SPCTRL, ERROR, + "ia_css_enqueue_param_buffer_set failed, no late binding supported\n"); +#endif /* (HAS_LATE_BINDING_SUPPORT == 1) */ + return retval; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/interface/ia_css_program_group_data.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/interface/ia_css_program_group_data.h new file mode 100644 index 0000000000000..6ccca1d9b69e1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/interface/ia_css_program_group_data.h @@ -0,0 +1,418 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PROGRAM_GROUP_DATA_H +#define __IA_CSS_PROGRAM_GROUP_DATA_H + +#include "ia_css_psys_data_storage_class.h" + +/*! \file */ + +/** @file ia_css_program_group_data.h + * + * Define the data objects that are passed to the process groups + * i.e. frames and matrices with their sub-structures + * + * The data objects are separate from the process group terminal, + * although they are stored by value rather than by reference and + * make the process group terminal dependendent on its definition + * + * This frame definition overloads the current CSS frame definition + * they are the same object, just a slightly different implementation + */ + +#include /* vied_vaddress_t */ + +#include +#include "ia_css_program_group_data_defs.h" /* ia_css_frame_format_type */ + +#include "ia_css_terminal_defs.h" + +/* + * Frame buffer state used for sequencing + * (see FAS 5.5.3) + * + * The buffer can be in DDR or a handle to a stream + */ +typedef enum ia_css_buffer_state { + IA_CSS_BUFFER_NULL = 0, + IA_CSS_BUFFER_UNDEFINED, + IA_CSS_BUFFER_EMPTY, + IA_CSS_BUFFER_NONEMPTY, + IA_CSS_BUFFER_FULL, + IA_CSS_N_BUFFER_STATES +} ia_css_buffer_state_t; + +#define IA_CSS_BUFFER_STATE_IN_BITS 32 + +/* + * Pointer state used to signal MMU invalidation + */ +typedef enum ia_css_pointer_state { + IA_CSS_POINTER_INVALID = 0, + IA_CSS_POINTER_VALID, + IA_CSS_N_POINTER_STATES +} ia_css_pointer_state_t; + +#define IA_CSS_POINTER_STATE_IN_BITS 32 + +/* + * Access direction needed to select the access port + */ +typedef enum ia_css_access_type { + IA_CSS_ACCESS_LOCKED = 0, + IA_CSS_ACCESS_READ, + IA_CSS_ACCESS_WRITE, + IA_CSS_ACCESS_MODIFY, + IA_CSS_N_ACCESS_TYPES +} ia_css_access_type_t; + +#define IA_CSS_ACCESS_TYPE_IN_BITS 32 + +/* + * Access attribute needed to select the access port + * - public : snooped + * - private: non-snooped + * Naming is a bit awkward, lack of inspiration + */ +typedef enum ia_css_access_scope { + IA_CSS_ACCESS_PRIVATE = 0, + IA_CSS_ACCESS_PUBLIC, + IA_CSS_N_ACCESS_SCOPES +} ia_css_access_scopes_t; + +#define IA_CSS_ACCESS_SCOPES_IN_BITS 32 + +#define IA_CSS_N_FRAME_PLANES 6 + +#define IA_CSS_FRAME_FORMAT_BITMAP_BITS 64 +typedef uint64_t ia_css_frame_format_bitmap_t; + +typedef struct ia_css_param_frame_descriptor_s ia_css_param_frame_descriptor_t; +typedef struct ia_css_param_frame_s ia_css_param_frame_t; + +typedef struct ia_css_frame_descriptor_s ia_css_frame_descriptor_t; +typedef struct ia_css_frame_s ia_css_frame_t; +typedef struct ia_css_fragment_descriptor_s ia_css_fragment_descriptor_t; + +typedef struct ia_css_stream_s ia_css_stream_t; + + +#define N_UINT64_IN_STREAM_STRUCT 1 + +#define IA_CSS_STREAM_STRUCT_BITS \ + (N_UINT64_IN_STREAM_STRUCT * 64) + +struct ia_css_stream_s { + uint64_t dummy; +}; + +struct ia_css_param_frame_descriptor_s { + uint16_t size; /**< Size of the descriptor */ + uint32_t buffer_count; /**< Number of parameter buffers */ +}; + +struct ia_css_param_frame_s { + /*< Base virtual addresses to parameters in subsystem virtual + * memory space + */ + vied_vaddress_t *data; +}; + +#define N_UINT32_IN_FRAME_DESC_STRUCT \ + (1 + IA_CSS_N_FRAME_PLANES + (IA_CSS_N_DATA_DIMENSION - 1)) +#define N_UINT16_IN_FRAME_DESC_STRUCT (1 + IA_CSS_N_DATA_DIMENSION) +#define N_UINT8_IN_FRAME_DESC_STRUCT 3 +#define N_PADDING_UINT8_IN_FRAME_DESC_STRUCT 3 + +#define IA_CSS_FRAME_DESCRIPTOR_STRUCT_BITS \ + (IA_CSS_FRAME_FORMAT_TYPE_BITS \ + + (N_UINT32_IN_FRAME_DESC_STRUCT * 32) \ + + (N_UINT16_IN_FRAME_DESC_STRUCT * 16) \ + + (N_UINT8_IN_FRAME_DESC_STRUCT * 8) \ + + (N_PADDING_UINT8_IN_FRAME_DESC_STRUCT * 8)) + +/* + * Structure defining the frame (size and access) properties for + * inbuild types only. + * + * The inbuild types like FourCC, MIPI and CSS private types are supported + * by FW all other types are custom types which interpretation must be encoded + * on the buffer itself or known by the source and sink + */ +struct ia_css_frame_descriptor_s { + /**< Indicates if this is a generic type or inbuild with + * variable size descriptor + */ + ia_css_frame_format_type_t frame_format_type; + /**< Number of data planes (pointers) */ + uint32_t plane_count; + /**< Plane offsets accounting for fragments */ + uint32_t plane_offsets[IA_CSS_N_FRAME_PLANES]; + /**< Physical size aspects */ + uint32_t stride[IA_CSS_N_DATA_DIMENSION - 1]; + /**< Logical dimensions */ + uint16_t dimension[IA_CSS_N_DATA_DIMENSION]; + /**< Size of this descriptor */ + uint16_t size; + /**< Bits per pixel */ + uint8_t bpp; + /**< Bits per element */ + uint8_t bpe; + /**< 1 if terminal uses compressed datatype, 0 otherwise */ + uint8_t is_compressed; + /**< Padding for 64bit alignment */ + uint8_t padding[N_PADDING_UINT8_IN_FRAME_DESC_STRUCT]; +}; + +#define N_UINT32_IN_FRAME_STRUCT 2 +#define N_PADDING_UINT8_IN_FRAME_STRUCT 4 + +#define IA_CSS_FRAME_STRUCT_BITS \ + (IA_CSS_BUFFER_STATE_IN_BITS \ + + IA_CSS_ACCESS_TYPE_IN_BITS \ + + IA_CSS_POINTER_STATE_IN_BITS \ + + IA_CSS_ACCESS_SCOPES_IN_BITS \ + + VIED_VADDRESS_BITS \ + + (N_UINT32_IN_FRAME_STRUCT * 32) \ + + (N_PADDING_UINT8_IN_FRAME_STRUCT * 8)) + + +/* + * Main frame structure holding the main store and auxilary access properties + * the "pointer_state" and "access_scope" should be encoded on the + * "vied_vaddress_t" type + */ +struct ia_css_frame_s { + /**< State of the frame for purpose of sequencing */ + ia_css_buffer_state_t buffer_state; + /**< Access direction, may change when buffer state changes */ + ia_css_access_type_t access_type; + /**< State of the pointer for purpose of embedded MMU coherency */ + ia_css_pointer_state_t pointer_state; + /**< Access to the pointer for purpose of host cache coherency */ + ia_css_access_scopes_t access_scope; + /**< Base virtual address to data in subsystem virtual memory space */ + vied_vaddress_t data; + /**< Offset to buffer address within external buffer set structure */ + uint32_t data_index; + /**< Total allocation size in bytes */ + uint32_t data_bytes; + /**< Padding for 64bit alignment */ + uint8_t padding[N_PADDING_UINT8_IN_FRAME_STRUCT]; +}; + +#define N_UINT16_IN_FRAGMENT_DESC_STRUCT (3 * IA_CSS_N_DATA_DIMENSION) +#define N_PADDING_UINT8_IN_FRAGMENT_DESC_STRUCT 4 + +#define IA_CSS_FRAGMENT_DESCRIPTOR_STRUCT_BITS \ + ((N_UINT16_IN_FRAME_DESC_STRUCT * 16) \ + + (N_PADDING_UINT8_IN_FRAGMENT_DESC_STRUCT * 8)) + +/* + * Structure defining the fragment (size and access) properties. + * + * All cropping and padding effects are described by the difference between + * the frame size and its location and the fragment size(s) and location(s) + */ +struct ia_css_fragment_descriptor_s { + /**< Logical dimensions of the fragment */ + uint16_t dimension[IA_CSS_N_DATA_DIMENSION]; + /**< Logical location of the fragment in the frame */ + uint16_t index[IA_CSS_N_DATA_DIMENSION]; + /**< Fractional start (phase) of the fragment in the access unit */ + uint16_t offset[IA_CSS_N_DATA_DIMENSION]; + /**< Padding for 64bit alignment */ + uint8_t padding[N_PADDING_UINT8_IN_FRAGMENT_DESC_STRUCT]; +}; + + +/*! Print the frame object to file/stream + + @param frame[in] frame object + @param fid[out] file/stream handle + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_print( + const ia_css_frame_t *frame, void *fid); + +/*! Get the data buffer handle from the frame object + +@param frame[in] frame object + +@return buffer pointer, VIED_NULL on error +*/ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +const vied_vaddress_t *ia_css_frame_get_buffer_host_virtual_address( + const ia_css_frame_t *frame); + +/*! Get the data buffer handle from the frame object + + @param frame[in] frame object + + @return buffer pointer, VIED_NULL on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +vied_vaddress_t ia_css_frame_get_buffer(const ia_css_frame_t *frame); + +/*! Set the data buffer handle on the frame object + + @param frame[in] frame object + @param buffer[in] buffer pointer + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_set_buffer( + ia_css_frame_t *frame, vied_vaddress_t buffer); + +/*! Get the data buffer index in the frame object + + @param frame[in] frame object + + @return data buffer index on success, -1 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_get_data_index( + const ia_css_frame_t *frame); + +/*! Set the data buffer index in the frame object + + @param frame[in] frame object + @param data_index[in] data buffer index + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_set_data_index( + ia_css_frame_t *frame, + unsigned int data_index); + +/*! Set the data buffer size on the frame object + + @param frame[in] frame object + @param size[in] number of data bytes + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_set_data_bytes( + ia_css_frame_t *frame, unsigned size); + +/*! Get the data buffer state from the frame object + + @param frame[in] frame object + + @return buffer state, limit value on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +ia_css_buffer_state_t ia_css_frame_get_buffer_state( + const ia_css_frame_t *frame); + +/*! Set the data buffer state of the frame object + + @param frame[in] frame object + @param buffer_state[in] buffer state + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_set_buffer_state(ia_css_frame_t *frame, + const ia_css_buffer_state_t buffer_state); + +/*! Get the data pointer state from the frame object + + @param frame[in] frame object + + @return pointer state, limit value on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +ia_css_pointer_state_t ia_css_frame_get_pointer_state( + const ia_css_frame_t *frame); + +/*! Set the data pointer state of the frame object + + @param frame[in] frame object + @param pointer_state[in] pointer state + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_set_pointer_state(ia_css_frame_t *frame, + const ia_css_pointer_state_t pointer_state); + +/*! Print the frame descriptor object to file/stream + + @param frame_descriptor[in] frame descriptor object + @param fid[out] file/stream handle + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_descriptor_print( + const ia_css_frame_descriptor_t *frame_descriptor, void *fid); + +/*! Print the fragment descriptor object to file/stream + + @param fragment_descriptor[in] fragment descriptor object + @param fid[out] file/stream handle + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_fragment_descriptor_print( + const ia_css_fragment_descriptor_t *fragment_descriptor, void *fid); + +/*! Compute the bitmap for the frame format type + + @param frame_format_type[in] frame format type + + @return 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +ia_css_frame_format_bitmap_t ia_css_frame_format_bit_mask( + const ia_css_frame_format_type_t frame_format_type); + +/*! clear frame format bitmap + + @return cleared bitmap + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +ia_css_frame_format_bitmap_t ia_css_frame_format_bitmap_clear(void); + + +/*! Compute the size of storage required for the data descriptor object + * on a terminal + *@param plane_count[in] The number of data planes in the buffer + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +size_t ia_css_sizeof_frame_descriptor( + const uint8_t plane_count); +/*! Compute the size of storage required for the kernel parameter descriptor + * object on a terminal + + @param section_count[in] The number of parameter sections in the buffer + + @return 0 on error + */ +extern size_t ia_css_sizeof_kernel_param_descriptor( + const uint16_t section_count); + +#ifdef __IA_CSS_PSYS_DATA_INLINE__ +#include "ia_css_program_group_data_impl.h" +#endif /* __IA_CSS_PSYS_DATA_INLINE__ */ + +#endif /* __IA_CSS_PROGRAM_GROUP_DATA_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/interface/ia_css_program_group_data_defs.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/interface/ia_css_program_group_data_defs.h new file mode 100644 index 0000000000000..3f177a19b98b4 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/interface/ia_css_program_group_data_defs.h @@ -0,0 +1,196 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PROGRAM_GROUP_DATA_DEFS_H +#define __IA_CSS_PROGRAM_GROUP_DATA_DEFS_H + + +/* + * Pre-defined frame format + * + * Those formats have inbuild support of traffic + * and access functions + * + * Note that the formats are for terminals, so there + * is no distinction between input and output formats + * - Custom formats with ot without descriptor + * - 4CC formats such as YUV variants + * - MIPI (line) formats as produced by CSI receivers + * - MIPI (sensor) formats such as Bayer or RGBC + * - CSS internal formats (private types) + * - CSS parameters (type 1 - 6) + */ +#define IA_CSS_FRAME_FORMAT_TYPE_BITS 32 +typedef enum ia_css_frame_format_type { + IA_CSS_DATA_CUSTOM_NO_DESCRIPTOR = 0, + IA_CSS_DATA_CUSTOM, + + /* 12 bit YUV 411, Y, UV 2-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_NV11, + /* bpp bit YUV 420, Y, U, V 3-plane (bpp/1.5 bpe) */ + IA_CSS_DATA_FORMAT_YUV420, + /* 12 bit YUV 420, Y, V, U 3-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_YV12, + /* 12 bit YUV 420, Y, UV 2-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_NV12, + /* 16 bit YUV 420, Y, UV 2-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_NV12_16, + /* 12 bit YUV 420, Intel proprietary tiled format, TileY */ + IA_CSS_DATA_FORMAT_NV12_TILEY, + /* 12 bit YUV 420, Y, VU 2-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_NV21, + /* bpp bit YUV 422, Y, U, V 3-plane (bpp/2 bpe) */ + IA_CSS_DATA_FORMAT_YUV422, + /* 16 bit YUV 422, Y, V, U 3-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_YV16, + /* 16 bit YUV 422, Y, UV 2-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_NV16, + /* 16 bit YUV 422, Y, VU 2-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_NV61, + /* 16 bit YUV 422, UYVY 1-plane interleaved (8 bit per element) */ + IA_CSS_DATA_FORMAT_UYVY, + /* 16 bit YUV 422, YUYV 1-plane interleaved (8 bit per element) */ + IA_CSS_DATA_FORMAT_YUYV, + /* bpp bit YUV 444, Y, U, V 3-plane (bpp/3 bpe) */ + IA_CSS_DATA_FORMAT_YUV444, + /* 8 bit monochrome plane */ + IA_CSS_DATA_FORMAT_Y800, + + /* 5-6-5 bit packed (1-plane) RGB (16bpp, ~5 bpe) */ + IA_CSS_DATA_FORMAT_RGB565, + /* 24 bit RGB, 3 planes (8 bit per element) */ + IA_CSS_DATA_FORMAT_RGB888, + /* 32 bit RGB-Alpha, 1 plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_RGBA888, + + /* bpp bit raw, [[Gr, R];[B, Gb]] 1-plane (bpp == bpe) */ + IA_CSS_DATA_FORMAT_BAYER_GRBG, + /* bpp bit raw, [[R, Gr];[Gb, B]] 1-plane (bpp == bpe) */ + IA_CSS_DATA_FORMAT_BAYER_RGGB, + /* bpp bit raw, [[B, Gb];[Gr, R]] 1-plane (bpp == bpe) */ + IA_CSS_DATA_FORMAT_BAYER_BGGR, + /* bpp bit raw, [[Gb, B];[R, Gr]] 1-plane (bpp == bpe) */ + IA_CSS_DATA_FORMAT_BAYER_GBRG, + + /* bpp bit (NV12) YUV 420, Y, UV 2-plane derived 3-line, + * 2-Y, 1-UV (bpp/1.5 bpe): M420 format + */ + IA_CSS_DATA_FORMAT_YUV420_LINE, + /* Deprecated RAW, 1 plane */ + IA_CSS_DATA_FORMAT_RAW, + /* Deprecated RAW, 1 plane, packed */ + IA_CSS_DATA_FORMAT_RAW_PACKED, + /* Internal, for advanced ISP */ + IA_CSS_DATA_FORMAT_QPLANE6, + /* 1D byte stream, used for jpeg 1-plane */ + IA_CSS_DATA_FORMAT_BINARY_8, + /* Deprecated MIPI frame, 1D byte stream 1 plane */ + IA_CSS_DATA_FORMAT_MIPI, + /* 12 bit [[YY];[UYVY]] 1-plane interleaved 2-line + * (8 bit per element) + */ + IA_CSS_DATA_FORMAT_MIPI_YUV420_8, + /* 15 bit [[YY];[UYVY]] 1-plane interleaved 2-line + * (10 bit per element) + */ + IA_CSS_DATA_FORMAT_MIPI_YUV420_10, + /* 12 bit [[UY];[VY]] 1-plane interleaved 2-line (8 bit per element) */ + IA_CSS_DATA_FORMAT_MIPI_LEGACY_YUV420_8, + + /* Type 1-5 parameter, not fragmentable */ + IA_CSS_DATA_GENERIC_PARAMETER, + /* Video stabilisation Type 6 parameter, fragmentable */ + IA_CSS_DATA_DVS_PARAMETER, + /* Video stabilisation Type 6 parameter, coordinates */ + IA_CSS_DATA_DVS_COORDINATES, + /* Dead Pixel correction Type 6 parameter, fragmentable */ + IA_CSS_DATA_DPC_PARAMETER, + /* Lens Shading Correction Type 6 parameter, fragmentable */ + IA_CSS_DATA_LSC_PARAMETER, + /* 3A statistics output HI. */ + IA_CSS_DATA_S3A_STATISTICS_HI, + /* 3A statistics output LO. */ + IA_CSS_DATA_S3A_STATISTICS_LO, + /* histogram output */ + IA_CSS_DATA_S3A_HISTOGRAM, + /* GammaStar grid */ + IA_CSS_DATA_GAMMASTAR_GRID, + + /* Gr R B Gb Gr R B Gb in PIXELS (also called isys interleaved) */ + IA_CSS_DATA_FORMAT_BAYER_LINE_INTERLEAVED, + /* Gr R B Gb Gr R B Gb in VECTORS (VCC IMAGE, ISP NWAY depentdent) */ + IA_CSS_DATA_FORMAT_BAYER_VECTORIZED, + /* Gr R Gr R ... | B Gb B Gb .. in VECTORS (ISP NWAY depentdent) */ + IA_CSS_DATA_FORMAT_BAYER_GRBG_VECTORIZED, + + /* 16 bit YUV 420, Y even plane, Y uneven plane, + * UV plane vector interleaved + */ + IA_CSS_DATA_FORMAT_YUV420_VECTORIZED, + /* 16 bit YUV 420, YYUVYY vector interleaved */ + IA_CSS_DATA_FORMAT_YYUVYY_VECTORIZED, + + /* 12 bit YUV 420, Intel proprietary tiled format, TileYf */ + IA_CSS_DATA_FORMAT_NV12_TILEYF, + + /*Y samples appear first in the memory. All Y samples are array of WORDs; + * even number of lines ; + * Surface stride can be larger than the width of Y plane. + * This array is followed immediately by chroma array. + * Chroma array is an array of WORDs, with interleaved U/V samples. + * If the interleaved U/V plane is addresses as an * array of DWORDs, + * the least significant word contains U sample. The stride of the + * interleaved U/V plane is equal to Y plane. 10 bit data. + */ + IA_CSS_DATA_FORMAT_P010, + + /* MSB aligned version of P010*/ + IA_CSS_DATA_FORMAT_P010_MSB, + + /* P016/P012 Y samples appear first in the memory. + * All Y samples are array of WORDs; + * even number of lines ; + * Surface stride can be larger than the width of Y plane. + * This array is followed immediately by chroma array. + * Chroma array is an array of WORDs, with interleaved U/V samples. + * If the interleaved U/V plane is addresses as an * array of DWORDs, + * the least significant word contains U sample. The stride of the + * interleaved U/V plane is equal to Y plane. 12 bit data. + */ + IA_CSS_DATA_FORMAT_P016, + + /* MSB aligned version of P016*/ + IA_CSS_DATA_FORMAT_P016_MSB, + + /* TILEYYf representation of P010*/ + IA_CSS_DATA_FORMAT_P010_TILEYF, + + /* TILEYYf representation of P010 MSB aligned*/ + IA_CSS_DATA_FORMAT_P010_MSB_TILEYF, + + /* TILEYYf representation of P016*/ + IA_CSS_DATA_FORMAT_P016_TILEYF, + + /* TILEYYf representation of P016 MSB aligned*/ + IA_CSS_DATA_FORMAT_P016_MSB_TILEYF, + + /* consists of L and R PDAF pixel pairs. + * L and R can be interleaved or not. 1-plane (bpp == bpe) */ + IA_CSS_DATA_FORMAT_PAF, + + IA_CSS_N_FRAME_FORMAT_TYPES +} ia_css_frame_format_type_t; + + +#endif /* __IA_CSS_PROGRAM_GROUP_DATA_DEFS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/interface/ia_css_psys_data_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/interface/ia_css_psys_data_storage_class.h new file mode 100644 index 0000000000000..6a4e3a28e5336 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/interface/ia_css_psys_data_storage_class.h @@ -0,0 +1,28 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_DATA_STORAGE_CLASS_H +#define __IA_CSS_PSYS_DATA_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __IA_CSS_PSYS_DATA_INLINE__ +#define IA_CSS_PSYS_DATA_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_PSYS_DATA_STORAGE_CLASS_C +#else +#define IA_CSS_PSYS_DATA_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_PSYS_DATA_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_PSYS_DATA_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/interface/ia_css_psys_data_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/interface/ia_css_psys_data_trace.h new file mode 100644 index 0000000000000..49afed9ce9dfc --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/interface/ia_css_psys_data_trace.h @@ -0,0 +1,102 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_DATA_TRACE_H +#define __IA_CSS_PSYS_DATA_TRACE_H + +#include "ia_css_psysapi_trace.h" + +#define PSYS_DATA_TRACE_LEVEL_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +/* Default sub-module tracing config */ +#if (!defined(PSYSAPI_DATA_TRACING_OVERRIDE)) + #define PSYS_DATA_TRACE_LEVEL_CONFIG PSYS_DATA_TRACE_LEVEL_CONFIG_DEFAULT +#endif + +/* Module/sub-module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_DATA_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_DATA_TRACING_OVERRIDE)) + /* Module/sub-module specific trace setting */ + #if PSYSAPI_DATA_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_DATA_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DATA_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_DATA_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_DATA_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DATA_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DATA_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DATA_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_DATA_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_DATA_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DATA_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DATA_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DATA_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DATA_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DATA_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DATA_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_DATA Tracing level defined" + #endif +#else + /* Inherit Module trace setting */ + #define PSYSAPI_DATA_TRACE_METHOD \ + PSYSAPI_TRACE_METHOD + #define PSYSAPI_DATA_TRACE_LEVEL_ASSERT \ + PSYSAPI_TRACE_LEVEL_ASSERT + #define PSYSAPI_DATA_TRACE_LEVEL_ERROR \ + PSYSAPI_TRACE_LEVEL_ERROR + #define PSYSAPI_DATA_TRACE_LEVEL_WARNING \ + PSYSAPI_TRACE_LEVEL_WARNING + #define PSYSAPI_DATA_TRACE_LEVEL_INFO \ + PSYSAPI_TRACE_LEVEL_INFO + #define PSYSAPI_DATA_TRACE_LEVEL_DEBUG \ + PSYSAPI_TRACE_LEVEL_DEBUG + #define PSYSAPI_DATA_TRACE_LEVEL_VERBOSE \ + PSYSAPI_TRACE_LEVEL_VERBOSE +#endif + +#endif /* __IA_CSS_PSYSAPI_DATA_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/src/ia_css_program_group_data.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/src/ia_css_program_group_data.c new file mode 100644 index 0000000000000..edf3e55e6c399 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/src/ia_css_program_group_data.c @@ -0,0 +1,26 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_data_storage_class.h" + +/* + * Functions to possibly inline + */ + +#ifdef __IA_CSS_PSYS_DATA_INLINE__ +STORAGE_CLASS_INLINE int +__ia_css_program_group_data_avoid_warning_on_empty_file(void) { return 0; } +#else /* __IA_CSS_PSYS_DATA_INLINE__ */ +#include "ia_css_program_group_data_impl.h" +#endif /* __IA_CSS_PSYS_DATA_INLINE__ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/src/ia_css_program_group_data_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/src/ia_css_program_group_data_impl.h new file mode 100644 index 0000000000000..f08a057e4480e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/src/ia_css_program_group_data_impl.h @@ -0,0 +1,455 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PROGRAM_GROUP_DATA_IMPL_H +#define __IA_CSS_PROGRAM_GROUP_DATA_IMPL_H + +#include "ia_css_program_group_data.h" +#include "ia_css_psys_data_trace.h" +#include "ia_css_terminal_defs.h" +#include /* for verifexit */ +#include /* for COMPILATION_ERROR_IF */ +#include /* for NOT_USED */ + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_print( + const ia_css_frame_t *frame, void *fid) +{ + int retval = -1; + + NOT_USED(fid); + + IA_CSS_TRACE_0(PSYSAPI_DATA, INFO, "ia_css_frame_print(): enter:\n"); + + verifexit(frame != NULL); + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tbuffer = %d\n", ia_css_frame_get_buffer(frame)); + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tbuffer_state = %d\n", ia_css_frame_get_buffer_state(frame)); + /* IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, "\tbuffer_state = %s\n", + * ia_css_buffer_state_string(ia_css_frame_get_buffer_state(frame))); + */ + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tpointer_state = %d\n", ia_css_frame_get_pointer_state(frame)); + /* IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, "\tpointer_state = %s\n", + * ia_css_pointer_state_string(ia_css_frame_get_pointer_state(frame))); + */ + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tdata_bytes = %d\n", frame->data_bytes); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_frame_print failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +const vied_vaddress_t *ia_css_frame_get_buffer_host_virtual_address( + const ia_css_frame_t *frame) { + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_get_buffer_host_virtual_address(): enter:\n"); + + verifexit(frame != NULL); + return &(frame->data); + +EXIT: + if (NULL == frame) { + IA_CSS_TRACE_0(PSYSAPI_DATA, WARNING, + "ia_css_frame_get_buffer_host_virtual_address invalid argument\n"); + } + return NULL; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +vied_vaddress_t ia_css_frame_get_buffer( + const ia_css_frame_t *frame) +{ + vied_vaddress_t buffer = VIED_NULL; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_get_buffer(): enter:\n"); + + verifexit(frame != NULL); + buffer = frame->data; + +EXIT: + if (NULL == frame) { + IA_CSS_TRACE_0(PSYSAPI_DATA, WARNING, + "ia_css_frame_get_buffer invalid argument\n"); + } + return buffer; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_set_buffer( + ia_css_frame_t *frame, + vied_vaddress_t buffer) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_set_buffer(): enter:\n"); + + verifexit(frame != NULL); + frame->data = buffer; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_frame_set_buffer failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_get_data_index( + const ia_css_frame_t *frame) +{ + int data_index = -1; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_get_data_index(): enter:\n"); + + verifexit(frame != NULL); + + data_index = frame->data_index; + +EXIT: + if (NULL == frame) { + IA_CSS_TRACE_0(PSYSAPI_DATA, WARNING, + "ia_css_frame_get_data_index invalid argument\n"); + } + return data_index; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_set_data_index( + ia_css_frame_t *frame, + unsigned int data_index) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_set_data_index(): enter:\n"); + + verifexit(frame != NULL); + + frame->data_index = data_index; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_frame_set_data_index failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_set_data_bytes( + ia_css_frame_t *frame, + unsigned int size) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_set_data_bytes(): enter:\n"); + + verifexit(frame != NULL); + frame->data_bytes = size; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_frame_set_data_bytes failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +ia_css_buffer_state_t ia_css_frame_get_buffer_state( + const ia_css_frame_t *frame) +{ + ia_css_buffer_state_t buffer_state = IA_CSS_N_BUFFER_STATES; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_get_buffer_state(): enter:\n"); + + verifexit(frame != NULL); + buffer_state = frame->buffer_state; + +EXIT: + if (NULL == frame) { + IA_CSS_TRACE_0(PSYSAPI_DATA, WARNING, + "ia_css_frame_get_buffer_state invalid argument\n"); + } + return buffer_state; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_set_buffer_state( + ia_css_frame_t *frame, + const ia_css_buffer_state_t buffer_state) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_set_buffer_state(): enter:\n"); + + verifexit(frame != NULL); + frame->buffer_state = buffer_state; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_frame_set_buffer_state failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +ia_css_pointer_state_t ia_css_frame_get_pointer_state( + const ia_css_frame_t *frame) +{ + ia_css_pointer_state_t pointer_state = IA_CSS_N_POINTER_STATES; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_get_pointer_state(): enter:\n"); + + verifexit(frame != NULL); + pointer_state = frame->pointer_state; + +EXIT: + if (NULL == frame) { + IA_CSS_TRACE_0(PSYSAPI_DATA, WARNING, + "ia_css_frame_get_pointer_state invalid argument\n"); + } + return pointer_state; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_set_pointer_state( + ia_css_frame_t *frame, + const ia_css_pointer_state_t pointer_state) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_set_pointer_state(): enter:\n"); + + verifexit(frame != NULL); + frame->pointer_state = pointer_state; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_frame_set_pointer_state failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_descriptor_print( + const ia_css_frame_descriptor_t *frame_descriptor, + void *fid) +{ + int retval = -1; + int i; + uint8_t frame_plane_count; + + NOT_USED(fid); + + IA_CSS_TRACE_0(PSYSAPI_DATA, INFO, + "ia_css_frame_descriptor_print(): enter:\n"); + + COMPILATION_ERROR_IF(IA_CSS_N_DATA_DIMENSION <= 0); + + verifexit(frame_descriptor != NULL); + + IA_CSS_TRACE_0(PSYSAPI_DATA, INFO, + "ia_css_frame_descriptor_print(): enter:\n"); + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tframe_format_type = %d\n", + frame_descriptor->frame_format_type); + /* IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, "\tframe_format_type = %s\n", + * ia_css_frame_format_string(frame_descriptor->frame_format_type)); + */ + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tbpp = %d\n", frame_descriptor->bpp); + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tbpe = %d\n", frame_descriptor->bpe); + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tis_compressed = %d\n", frame_descriptor->is_compressed); + + frame_plane_count = IA_CSS_N_FRAME_PLANES; + /* frame_plane_count = + * ia_css_frame_plane_count(frame_descriptor->frame_format_type); + */ + + verifexit(frame_plane_count > 0); + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tplane_offsets[%d]: [\n", frame_plane_count); + for (i = 0; i < (int)frame_plane_count - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d,\n", frame_descriptor->plane_offsets[i]); + } + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d ]\n", frame_descriptor->plane_offsets[i]); + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tdimension[%d] = {\n", IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d,\n", frame_descriptor->dimension[i]); + } + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d }\n", frame_descriptor->dimension[i]); + + COMPILATION_ERROR_IF(0 > (IA_CSS_N_DATA_DIMENSION - 2)); + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tstride[%d] = {\n", IA_CSS_N_DATA_DIMENSION - 1); + i = 0; + if (IA_CSS_N_DATA_DIMENSION > 2) { + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 2; i++) { + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d,\n", frame_descriptor->stride[i]); + } + } + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d }\n", frame_descriptor->stride[i]); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_frame_descriptor_print failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_fragment_descriptor_print( + const ia_css_fragment_descriptor_t *fragment_descriptor, + void *fid) +{ + int retval = -1; + int i; + + NOT_USED(fid); + + IA_CSS_TRACE_0(PSYSAPI_DATA, INFO, + "ia_css_fragment_descriptor_print(): enter:\n"); + + verifexit(fragment_descriptor != NULL); + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "dimension[%d] = {\n", IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d,\n", fragment_descriptor->dimension[i]); + } + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d }\n", fragment_descriptor->dimension[i]); + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "index[%d] = {\n", IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d,\n", fragment_descriptor->index[i]); + } + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d }\n", fragment_descriptor->index[i]); + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "offset[%d] = {\n", IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d,\n", fragment_descriptor->offset[i]); + } + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, "\t%4d }\n", + fragment_descriptor->offset[i]); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_fragment_descriptor_print failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +ia_css_frame_format_bitmap_t ia_css_frame_format_bit_mask( + const ia_css_frame_format_type_t frame_format_type) +{ + ia_css_frame_format_bitmap_t bit_mask = 0; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_format_bit_mask(): enter:\n"); + + if ((frame_format_type < IA_CSS_N_FRAME_FORMAT_TYPES) && + (frame_format_type < IA_CSS_FRAME_FORMAT_BITMAP_BITS)) { + bit_mask = (ia_css_frame_format_bitmap_t)1 << frame_format_type; + } else { + IA_CSS_TRACE_0(PSYSAPI_DATA, WARNING, + "ia_css_frame_format_bit_mask invalid argument\n"); + } + + return bit_mask; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +ia_css_frame_format_bitmap_t ia_css_frame_format_bitmap_clear(void) +{ + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_format_bitmap_clear(): enter:\n"); + + return 0; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +size_t ia_css_sizeof_frame_descriptor( + const uint8_t plane_count) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_sizeof_frame_descriptor(): enter:\n"); + + verifexit(plane_count > 0); + size += sizeof(ia_css_frame_descriptor_t); + size += plane_count * sizeof(uint32_t); + +EXIT: + if (0 == plane_count) { + IA_CSS_TRACE_0(PSYSAPI_DATA, WARNING, + "ia_css_sizeof_frame_descriptor invalid argument\n"); + } + return size; +} + +#endif /* __IA_CSS_PROGRAM_GROUP_DATA_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/bxtB0/ia_css_psys_transport_dep.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/bxtB0/ia_css_psys_transport_dep.h new file mode 100644 index 0000000000000..7bb145c1b183b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/bxtB0/ia_css_psys_transport_dep.h @@ -0,0 +1,35 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TRANSPORT_DEP_H +#define __IA_CSS_PSYS_TRANSPORT_DEP_H + +/* + * The ID's of the Psys specific queues. + */ +typedef enum ia_css_psys_cmd_queues { + /**< The in-order queue for scheduled process groups */ + IA_CSS_PSYS_CMD_QUEUE_COMMAND_ID = 0, + /**< The in-order queue for commands changing psys or + * process group state + */ + IA_CSS_PSYS_CMD_QUEUE_DEVICE_ID, + /**< An in-order queue for dedicated PPG commands */ + IA_CSS_PSYS_CMD_QUEUE_PPG0_COMMAND_ID, + /**< An in-order queue for dedicated PPG commands */ + IA_CSS_PSYS_CMD_QUEUE_PPG1_COMMAND_ID, + IA_CSS_N_PSYS_CMD_QUEUE_ID +} ia_css_psys_cmd_queue_ID_t; + +#endif /* __IA_CSS_PSYS_TRANSPORT_DEP_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_device.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_device.h new file mode 100644 index 0000000000000..dc8fa531b11e3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_device.h @@ -0,0 +1,516 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_DEVICE_H +#define __IA_CSS_PSYS_DEVICE_H + +#include "ia_css_psys_init.h" +#include "ia_css_psys_transport.h" + +/*! \file */ + +/** @file ia_css_psys_device.h + * + * Define the interface to open the psys specific communication layer + * instance + */ + +#include /* vied_vaddress_t */ + +#include +#include + +#include +#include + +#define IA_CSS_PSYS_STATE_READY_PATTERN (0xF7F7F7F7) +#define IA_CSS_PSYS_STATE_RUNNING_PATTERN (0xE6E6E6E6) +#define IA_CSS_PSYS_STATE_STARTING_PATTERN (0xD5D5D5D5) +#define IA_CSS_PSYS_STATE_STARTED_PATTERN (0xC4C4C4C4) +#define IA_CSS_PSYS_STATE_INITIALIZING_PATTERN (0xB3B3B3B3) +#define IA_CSS_PSYS_STATE_INITIALIZED_PATTERN (0xA0A0A0A0) + +/* + * Defines the state of psys: + * - IA_CSS_PSYS_STATE_UNKNOWN = psys status is unknown (or not recognized) + * - IA_CSS_PSYS_STATE_INITIALING = some of the psys components are + * not initialized yet + * - IA_CSS_PSYS_STATE_INITIALIZED = psys components are initialized + * - IA_CSS_PSYS_STATE_STARTING = some of the psys components are initialized + * but not started yet + * - IA_CSS_PSYS_STATE_STARTED = psys components are started + * - IA_CSS_PSYS_STATE_RUNNING = some of the psys components are started + * but not ready yet + * - IA_CSS_PSYS_STATE_READY = psys is ready + * The state of psys can be obtained calling ia_css_psys_check_state() +*/ +typedef enum ia_css_psys_state { + IA_CSS_PSYS_STATE_UNKNOWN = 0, /**< psys state is unknown */ + /*< some of the psys components are not initialized yet*/ + IA_CSS_PSYS_STATE_INITIALIZING = IA_CSS_PSYS_STATE_INITIALIZING_PATTERN, + /**< psys components are initialized */ + IA_CSS_PSYS_STATE_INITIALIZED = IA_CSS_PSYS_STATE_INITIALIZED_PATTERN, + /**< some of the psys components are not started yet */ + IA_CSS_PSYS_STATE_STARTING = IA_CSS_PSYS_STATE_STARTING_PATTERN, + /**< psys components are started */ + IA_CSS_PSYS_STATE_STARTED = IA_CSS_PSYS_STATE_STARTED_PATTERN, + /**< some of the psys components are not ready yet */ + IA_CSS_PSYS_STATE_RUNNING = IA_CSS_PSYS_STATE_RUNNING_PATTERN, + /**< psys is ready */ + IA_CSS_PSYS_STATE_READY = IA_CSS_PSYS_STATE_READY_PATTERN, +} ia_css_psys_state_t; + +extern struct ia_css_syscom_context *psys_syscom; +#if HAS_DUAL_CMD_CTX_SUPPORT +extern struct ia_css_syscom_context *psys_syscom_secure; +#endif + +/*! Print the syscom creation descriptor to file/stream + + @param config[in] Psys syscom descriptor + @param fid[out] file/stream handle + + @return < 0 on error +*/ +extern int ia_css_psys_config_print( + const struct ia_css_syscom_config *config, void *fid); + +/*! Print the Psys syscom object to file/stream + + @param context[in] Psys syscom object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_psys_print( + const struct ia_css_syscom_context *context, void *fid); + +/*! Create the syscom creation descriptor + + @return NULL on error + */ +extern struct ia_css_syscom_config *ia_css_psys_specify(void); + +#if HAS_DUAL_CMD_CTX_SUPPORT +/*! Create the syscom creation descriptor for secure stream + + @param vtl0_addr_mask[in] VTL0 address mask that will be stored in 'secure' ctx + @return NULL on error + */ +extern struct ia_css_syscom_config *ia_css_psys_specify_secure(unsigned int vtl0_addr_mask); +#endif + +/*! Compute the size of storage required for allocating the Psys syscom object + + @param config[in] Psys syscom descriptor + + @return 0 on error + */ +extern size_t ia_css_sizeof_psys( + struct ia_css_syscom_config *config); + +#if HAS_DUAL_CMD_CTX_SUPPORT +/*! Open (and map the storage for) the Psys syscom object + This is the same as ia_css_psys_open() excluding server start. + Target for VTIO usage where multiple syscom objects need to be + created first before this API is invoked. + + @param buffer[in] storage buffers for the syscom object + in the kernel virtual memory space and + its Psys mapped version + @param config[in] Psys syscom descriptor + @return NULL on error + */ + +extern struct ia_css_syscom_context *ia_css_psys_context_create( + const struct ia_css_psys_buffer_s *buffer, + struct ia_css_syscom_config *config); + +/*! Store the parameters of the Psys syscom object in DMEM, so + they can be communicated with FW. This step needs to be invoked + after SPC starts in ia_css_psys_open(), so SPC DMEM access blocker + programming already takes effective. + + @param context[in] Psys syscom object + @param config[in] Psys syscom descriptor + @return 0 if successful + */ +extern int ia_css_psys_context_store_dmem( + struct ia_css_syscom_context *context, + struct ia_css_syscom_config *config); + +/*! Start PSYS Server. Psys syscom object must have been created already. + Target for VTIO usage where multiple syscom objects need to be + created first before this API is invoked. + @param config[in] Psys syscom descriptor + + @return true if psys open started successfully + */ +extern int ia_css_psys_open( + struct ia_css_syscom_config *config); +#else +/*! Open (and map the storage for) the Psys syscom object + + @param buffer[in] storage buffers for the syscom object + in the kernel virtual memory space and + its Psys mapped version + @param config[in] Psys syscom descriptor + + Precondition(1): The buffer must be large enough to hold the syscom object. + Its size must be computed with the function "ia_css_sizeof_psys()". + The buffer must be created in the kernel memory space. + + Precondition(2): If buffer == NULL, the storage allocations and mapping + is performed in this function. Config must hold the handle to the Psys + virtual memory space + + Postcondition: The context is initialised in the provided/created buffer. + The syscom context pointer is the kernel space handle to the syscom object + + @return NULL on error + */ +extern struct ia_css_syscom_context *ia_css_psys_open( + const struct ia_css_psys_buffer_s *buffer, + struct ia_css_syscom_config *config); +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ + +/*! completes the psys open procedure. Must be called multiple times + until it succeeds or driver determines the boot sequence has failed. + + @param context[in] Psys syscom object + + @return false if psys open has not completed successfully + */ +extern bool ia_css_psys_open_is_ready( + struct ia_css_syscom_context *context); + +#if HAS_DUAL_CMD_CTX_SUPPORT +/*! Request close of a PSYS context + * The functionatlity is the same as ia_css_psys_close() which closes PSYS syscom object. + * Counterpart of ia_css_psys_context_create() + * @param context[in]: Psys context + * @return NULL if close is successful context otherwise + */ +extern struct ia_css_syscom_context *ia_css_psys_context_destroy( + struct ia_css_syscom_context *context); + +/*! Request close of a PSYS device for VTIO case + * @param None + * @return 0 if successful + */ +extern int ia_css_psys_close(void); +#else +/*! Request close of a PSYS context + * @param context[in]: Psys context + * @return NULL if close is successful context otherwise + */ +extern struct ia_css_syscom_context *ia_css_psys_close( + struct ia_css_syscom_context *context); +#endif /* HAS_DUAL_CMD_CTX_SUPPORT*/ + +/*! Unmap and free the storage of the PSYS context + * @param context[in] Psys context + * @param force[in] Force release even if device is busy + * @return 0 if release is successful + * EINVAL if context is invalid + * EBUSY if device is not yet idle, and force==0 + */ +extern int ia_css_psys_release( + struct ia_css_syscom_context *context, + bool force); + +/*! Checks the state of the Psys syscom object + + @param context[in] Psys syscom object + + @return State of the syscom object + */ +extern ia_css_psys_state_t ia_css_psys_check_state( + struct ia_css_syscom_context *context); + +/*!Indicate if the designated cmd queue in the Psys syscom object is full + + @param context[in] Psys syscom object + @param id[in] Psys syscom cmd queue ID + + @return false if the cmd queue is not full or on error + */ + +extern bool ia_css_is_psys_cmd_queue_full( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id); + +/*!Indicate if the designated cmd queue in the Psys syscom object is notfull + + @param context[in] Psys syscom object + @param id[in] Psys syscom cmd queue ID + + @return false if the cmd queue is full on error + */ +extern bool ia_css_is_psys_cmd_queue_not_full( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id); + +/*!Indicate if the designated cmd queue in the Psys syscom object holds N space + + @param context[in] Psys syscom object + @param id[in] Psys syscom cmd queue ID + @param N[in] Number of messages + + @return false if the cmd queue space is unavailable or on error + */ +extern bool ia_css_has_psys_cmd_queue_N_space( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id, + const unsigned int N); + +/*!Return the free space count in the designated cmd queue in the + * Psys syscom object + + @param context[in] Psys syscom object + @param id[in] Psys syscom cmd queue ID + + @return the space, < 0 on error + */ +extern int ia_css_psys_cmd_queue_get_available_space( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id); + +/*!Indicate if there are any messages pending in the Psys syscom + * object event queues + + @param context[in] Psys syscom object + + @return false if there are no messages or on error + */ +extern bool ia_css_any_psys_event_queue_not_empty( + struct ia_css_syscom_context *context); + +/*!Indicate if the designated event queue in the Psys syscom object is empty + + @param context[in] Psys syscom object + @param id[in] Psys syscom event queue ID + + @return false if the event queue is not empty or on error + */ +extern bool ia_css_is_psys_event_queue_empty( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id); + +/*!Indicate if the designated event queue in the Psys syscom object is not empty + + @param context[in] Psys syscom object + @param id[in] Psys syscom event queue ID + + @return false if the receive queue is empty or on error + */ +extern bool ia_css_is_psys_event_queue_not_empty( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id); + +/*!Indicate if the designated event queue + * in the Psys syscom object holds N items + + @param context[in] Psys syscom object + @param id[in] Psys syscom event queue ID + @param N[in] Number of messages + + @return false if the event queue has insufficient messages + available or on error +*/ +extern bool ia_css_has_psys_event_queue_N_msgs( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id, + const unsigned int N); + +/*!Return the message count in the designated event queue in the + * Psys syscom object + + @param context[in] Psys syscom object + @param id[in] Psys syscom event queue ID + + @return the messages, < 0 on error + */ +extern int ia_css_psys_event_queue_get_available_msgs( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id); + +/*! Send (pass by value) a command on a queue in the Psys syscom object + + @param context[in] Psys syscom object + @param id[in] Psys syscom cmd queue ID +@param cmd_msg_buffer[in] pointer to the command message buffer + +Precondition: The command message buffer must be large enough + to hold the command + +Postcondition: Either 0 or 1 commands have been sent + +Note: The message size is fixed and determined on creation + + @return the number of sent commands (1), <= 0 on error + */ +extern int ia_css_psys_cmd_queue_send( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id, + const void *cmd_msg_buffer); + +/*! Send (pass by value) N commands on a queue in the Psys syscom object + + @param context[in] Psys syscom object + @param id[in] Psys syscom cmd queue ID + @param cmd_msg_buffer[in] Pointer to the command message buffer +@param N[in] Number of commands + +Precondition: The command message buffer must be large enough + to hold the commands + +Postcondition: Either 0 or up to and including N commands have been sent + + Note: The message size is fixed and determined on creation + + @return the number of sent commands, <= 0 on error + */ +extern int ia_css_psys_cmd_queue_send_N( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id, + const void *cmd_msg_buffer, + const unsigned int N); + +/*! Receive (pass by value) an event from an event queue in the + * Psys syscom object + + @param context[in] Psys syscom object + @param id[in] Psys syscom event queue ID + @param event_msg_buffer[out] pointer to the event message buffer + + Precondition: The event message buffer must be large enough to hold the event + + Postcondition: Either 0 or 1 events have been received + + Note: The event size is fixed and determined on creation + + @return the number of received events (1), <= 0 on error + */ +extern int ia_css_psys_event_queue_receive( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id, + void *event_msg_buffer); + +/*! Receive (pass by value) N events from an event queue in the + * Psys syscom object + + @param context[in] Psys syscom object + @param id[in] Psys syscom event queue ID + @param event_msg_buffer[out] pointer to the event message buffer + @param N[in] Number of events + + Precondition: The event buffer must be large enough to hold the events + + Postcondition: Either 0 or up to and including N events have been received + + Note: The message size is fixed and determined on creation + + @return the number of received event messages, <= 0 on error + */ +extern int ia_css_psys_event_queue_receive_N( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id, + void *event_msg_buffer, + const unsigned int N); + + +/* + * Access functions to query the object stats + */ + + +/*!Return the size of the Psys syscom object + + @param context[in] Psys syscom object + + @return 0 on error + */ +extern size_t ia_css_psys_get_size( + const struct ia_css_syscom_context *context); + +/*!Return the number of cmd queues in the Psys syscom object + + @param context[in] Psys syscom object + + @return 0 on error + */ +extern unsigned int ia_css_psys_get_cmd_queue_count( + const struct ia_css_syscom_context *context); + +/*!Return the number of event queues in the Psys syscom object + + @param context[in] Psys syscom object + + @return 0 on error + */ +extern unsigned int ia_css_psys_get_event_queue_count( + const struct ia_css_syscom_context *context); + +/*!Return the size of the indicated Psys command queue + + @param context[in] Psys syscom object + @param id[in] Psys syscom cmd queue ID + + Note: The queue size is expressed in the number of fields + + @return 0 on error + */ +extern size_t ia_css_psys_get_cmd_queue_size( + const struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id); + +/*!Return the size of the indicated Psys event queue + + @param context[in] Psys syscom object + @param id[in] Psys syscom event queue ID + + Note: The queue size is expressed in the number of fields + + @return 0 on error + */ +extern size_t ia_css_psys_get_event_queue_size( + const struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id); + +/*!Return the command message size of the indicated Psys command queue + + @param context[in] Psys syscom object + + Note: The message size is expressed in uint8_t + + @return 0 on error + */ +extern size_t ia_css_psys_get_cmd_msg_size( + const struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id); + +/*!Return the event message size of the indicated Psys event queue + + @param context[in] Psys syscom object + + Note: The message size is expressed in uint8_t + + @return 0 on error + */ +extern size_t ia_css_psys_get_event_msg_size( + const struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id); + +#endif /* __IA_CSS_PSYS_DEVICE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_device_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_device_trace.h new file mode 100644 index 0000000000000..8e5899bc66dba --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_device_trace.h @@ -0,0 +1,103 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_DEVICE_TRACE_H +#define __IA_CSS_PSYS_DEVICE_TRACE_H + +#include "ia_css_psysapi_trace.h" + +#define PSYS_DEVICE_TRACE_LEVEL_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +/* Default sub-module tracing config */ +#if (!defined(PSYSAPI_DEVICE_TRACING_OVERRIDE)) + #define PSYS_DEVICE_TRACE_LEVEL_CONFIG \ + PSYS_DEVICE_TRACE_LEVEL_CONFIG_DEFAULT +#endif + +/* Module/sub-module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_DEVICE_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_DEVICE_TRACING_OVERRIDE)) + /* Module/sub-module specific trace setting */ + #if PSYSAPI_DEVICE_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_DEVICE_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DEVICE_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_DEVICE_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_DEVICE_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DEVICE_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_DEVICE_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_DEVICE_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DEVICE_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_DATA Tracing level defined" + #endif +#else + /* Inherit Module trace setting */ + #define PSYSAPI_DEVICE_TRACE_METHOD \ + PSYSAPI_TRACE_METHOD + #define PSYSAPI_DEVICE_TRACE_LEVEL_ASSERT \ + PSYSAPI_TRACE_LEVEL_ASSERT + #define PSYSAPI_DEVICE_TRACE_LEVEL_ERROR \ + PSYSAPI_TRACE_LEVEL_ERROR + #define PSYSAPI_DEVICE_TRACE_LEVEL_WARNING \ + PSYSAPI_TRACE_LEVEL_WARNING + #define PSYSAPI_DEVICE_TRACE_LEVEL_INFO \ + PSYSAPI_TRACE_LEVEL_INFO + #define PSYSAPI_DEVICE_TRACE_LEVEL_DEBUG \ + PSYSAPI_TRACE_LEVEL_DEBUG + #define PSYSAPI_DEVICE_TRACE_LEVEL_VERBOSE \ + PSYSAPI_TRACE_LEVEL_VERBOSE +#endif + +#endif /* __IA_CSS_PSYSAPI_DEVICE_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_init.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_init.h new file mode 100644 index 0000000000000..1120b357632cf --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_init.h @@ -0,0 +1,37 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_INIT_H +#define __IA_CSS_PSYS_INIT_H + +#include /* vied_vaddress_t */ + +/* Init parameters passed to the fw on device open (non secure mode) */ +typedef struct ia_css_psys_server_init { + /* These members are used in PSS only and will be removed */ + /* Shared memory host address of pkg dir */ + unsigned long long host_ddr_pkg_dir; + /* Address of pkg_dir structure in DDR */ + vied_vaddress_t ddr_pkg_dir_address; + /* Size of Package dir in DDR */ + uint32_t pkg_dir_size; + + /* Prefetch configiration */ + /* enable prefetching on SPC, SPP0 and SPP1 */ + uint32_t icache_prefetch_sp; + /* enable prefetching on ISP0..N */ + uint32_t icache_prefetch_isp; +} ia_css_psys_server_init_t; + +#endif /* __IA_CSS_PSYS_INIT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_transport.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_transport.h new file mode 100644 index 0000000000000..e0d1e935c2211 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_transport.h @@ -0,0 +1,92 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TRANSPORT_H +#define __IA_CSS_PSYS_TRANSPORT_H + +#include /* ia_css_psys_cmd_queues */ +#include /* vied_vaddress_t */ + +#include + +typedef enum ia_css_psys_event_queues { + /**< The in-order queue for event returns */ + IA_CSS_PSYS_EVENT_QUEUE_MAIN_ID, + IA_CSS_N_PSYS_EVENT_QUEUE_ID +} ia_css_psys_event_queue_ID_t; + +typedef enum ia_css_psys_event_types { + /**< No error to report. */ + IA_CSS_PSYS_EVENT_TYPE_SUCCESS = 0, + /**< Unknown unhandled error */ + IA_CSS_PSYS_EVENT_TYPE_UNKNOWN_ERROR = 1, + /* Retrieving remote object: */ + /**< Object ID not found */ + IA_CSS_PSYS_EVENT_TYPE_RET_REM_OBJ_NOT_FOUND = 2, + /**< Objects too big, or size is zero. */ + IA_CSS_PSYS_EVENT_TYPE_RET_REM_OBJ_TOO_BIG = 3, + /**< Failed to load whole process group from tproxy/dma */ + IA_CSS_PSYS_EVENT_TYPE_RET_REM_OBJ_DDR_TRANS_ERR = 4, + /**< The proper package could not be found */ + IA_CSS_PSYS_EVENT_TYPE_RET_REM_OBJ_NULL_PKG_DIR_ADDR = 5, + /* Process group: */ + /**< Failed to run, error while loading frame */ + IA_CSS_PSYS_EVENT_TYPE_PROC_GRP_LOAD_FRAME_ERR = 6, + /**< Failed to run, error while loading fragment */ + IA_CSS_PSYS_EVENT_TYPE_PROC_GRP_LOAD_FRAGMENT_ERR = 7, + /**< The process count of the process group is zero */ + IA_CSS_PSYS_EVENT_TYPE_PROC_GRP_PROCESS_COUNT_ZERO = 8, + /**< Process(es) initialization */ + IA_CSS_PSYS_EVENT_TYPE_PROC_GRP_PROCESS_INIT_ERR = 9, + /**< Aborted (after host request) */ + IA_CSS_PSYS_EVENT_TYPE_PROC_GRP_ABORT = 10, + /**< NULL pointer in the process group */ + IA_CSS_PSYS_EVENT_TYPE_PROC_GRP_NULL = 11, + /**< Process group validation failed */ + IA_CSS_PSYS_EVENT_TYPE_PROC_GRP_VALIDATION_ERR = 12 +} ia_css_psys_event_type_t; + +#define IA_CSS_PSYS_CMD_BITS 64 +struct ia_css_psys_cmd_s { + /**< The command issued to the process group */ + uint16_t command; + /**< Message field of the command */ + uint16_t msg; + /**< The context reference (process group/buffer set/...) */ + uint32_t context_handle; +}; + +#define IA_CSS_PSYS_EVENT_BITS 128 +struct ia_css_psys_event_s { + /**< The (return) status of the command issued to + * the process group this event refers to + */ + uint16_t status; + /**< The command issued to the process group this event refers to */ + uint16_t command; + /**< The context reference (process group/buffer set/...) */ + uint32_t context_handle; + /**< This token (size) must match the token registered + * in a process group + */ + uint64_t token; +}; + +struct ia_css_psys_buffer_s { + /**< The in-order queue for scheduled process groups */ + void *host_buffer; + vied_vaddress_t *isp_buffer; +}; + +#endif /* __IA_CSS_PSYS_TRANSPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/src/ia_css_psys_device.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/src/ia_css_psys_device.c new file mode 100644 index 0000000000000..106fe0a0da859 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/src/ia_css_psys_device.c @@ -0,0 +1,853 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include "ia_css_psys_device.h" +#include "ia_css_psys_device_trace.h" +#include "ia_css_psys_init.h" +#include "regmem_access.h" + +#include +#include +#include + +#include "ia_css_cell.h" + +#define IA_CSS_PSYS_CMD_QUEUE_SIZE 0x20 +#define IA_CSS_PSYS_EVENT_QUEUE_SIZE 0x40 + +static struct ia_css_syscom_queue_config ia_css_psys_cmd_queue_cfg[IA_CSS_N_PSYS_CMD_QUEUE_ID]; + +static struct ia_css_syscom_queue_config + ia_css_psys_event_queue_cfg[IA_CSS_N_PSYS_EVENT_QUEUE_ID] = { + {IA_CSS_PSYS_EVENT_QUEUE_SIZE, IA_CSS_PSYS_EVENT_BITS/8}, +}; + +static struct ia_css_syscom_config psys_syscom_config; +struct ia_css_syscom_context *psys_syscom; +#if HAS_DUAL_CMD_CTX_SUPPORT +static struct ia_css_syscom_config psys_syscom_config_secure; +struct ia_css_syscom_context *psys_syscom_secure; +#endif +static bool external_alloc = true; + +int ia_css_psys_config_print( + const struct ia_css_syscom_config *config, + void *fh) +{ + int retval = -1; + + NOT_USED(fh); + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "ia_css_frame_print(): enter:\n"); + + verifexit(config != NULL); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DEVICE, ERROR, + "ia_css_frame_print failed (%i)\n", retval); + } + return retval; +} + +int ia_css_psys_print( + const struct ia_css_syscom_context *context, + void *fh) +{ + int retval = -1; + + NOT_USED(fh); + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "ia_css_psys_print(): enter:\n"); + + verifexit(context != NULL); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_print failed (%i)\n", retval); + } + return retval; +} + +static void set_syscom_config(struct ia_css_syscom_config *config) +{ + int i; + config->num_input_queues = IA_CSS_N_PSYS_CMD_QUEUE_ID; + config->num_output_queues = IA_CSS_N_PSYS_EVENT_QUEUE_ID; + /* The number of queues are different for different platforms + * so the array is initialized here + */ + for (i = 0; i < IA_CSS_N_PSYS_CMD_QUEUE_ID; i++) { + ia_css_psys_cmd_queue_cfg[i].queue_size = IA_CSS_PSYS_CMD_QUEUE_SIZE; + ia_css_psys_cmd_queue_cfg[i].token_size = IA_CSS_PSYS_CMD_BITS/8; + } + config->input = ia_css_psys_cmd_queue_cfg; + config->output = ia_css_psys_event_queue_cfg; + config->vtl0_addr_mask = 0; +} + +struct ia_css_syscom_config *ia_css_psys_specify(void) +{ + struct ia_css_syscom_config *config = &psys_syscom_config; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "ia_css_psys_specify(): enter:\n"); + set_syscom_config(config); + config->secure = false; + + return config; +} + +#if HAS_DUAL_CMD_CTX_SUPPORT +struct ia_css_syscom_config *ia_css_psys_specify_secure(unsigned int vtl0_addr_mask) +{ + struct ia_css_syscom_config *config = &psys_syscom_config_secure; + + IA_CSS_TRACE_1(PSYSAPI_DEVICE, INFO, "ia_css_psys_specify_secure(mask %#x): enter:\n", vtl0_addr_mask); + set_syscom_config(config); + config->secure = true; + config->vtl0_addr_mask = vtl0_addr_mask; + return config; +} +#endif + +size_t ia_css_sizeof_psys( + struct ia_css_syscom_config *config) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_sizeof_psys(): enter:\n"); + + NOT_USED(config); + + return size; +} + +/* Internal function to create syscom_context */ +static struct ia_css_syscom_context *psys_context_create( + const struct ia_css_psys_buffer_s *buffer, + struct ia_css_syscom_config *config) +{ + struct ia_css_syscom_context *context; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "psys_context_create(): enter:\n"); + + if (config == NULL) + goto EXIT; + + if (buffer == NULL) { + /* Allocate locally */ + external_alloc = false; + } + + /* + * Here we would like to pass separately the sub-system ID + * and optionally the user pointer to be mapped, depending on + * where this open is called, and which virtual memory handles + * we see here. + */ + /* context = ia_css_syscom_open(get_virtual_memory_handle(vied_psys_ID), + * buffer, config); + */ + context = ia_css_syscom_open(config, NULL); + if (context == NULL) + goto EXIT; + + return context; + +EXIT: + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, "psys_context_create failed\n"); + return NULL; +} + +#if HAS_DUAL_CMD_CTX_SUPPORT +struct ia_css_syscom_context *ia_css_psys_context_create( + const struct ia_css_psys_buffer_s *buffer, + struct ia_css_syscom_config *config) +{ + return psys_context_create(buffer, config); +} + +/* push context information to DMEM for FW to access */ +int ia_css_psys_context_store_dmem( + struct ia_css_syscom_context *context, + struct ia_css_syscom_config *config) +{ + return ia_css_syscom_store_dmem(context, config->ssid, config->vtl0_addr_mask); +} +#endif + +/* Internal function to start psys server */ +static int psys_start_server( + struct ia_css_syscom_config *config) +{ + ia_css_psys_server_init_t *server_config; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "psys_start_server(): enter:\n"); + + /* Configure SPC icache prefetching and start SPC */ + server_config = (ia_css_psys_server_init_t *)config->specific_addr; + IA_CSS_TRACE_1(PSYSAPI_DEVICE, INFO, "SPC prefetch: %d\n", + server_config->icache_prefetch_sp); + ia_css_cell_start_prefetch(config->ssid, SPC0, + server_config->icache_prefetch_sp); + return 0; +} + +#if HAS_DUAL_CMD_CTX_SUPPORT +int ia_css_psys_open( + struct ia_css_syscom_config *config) +{ + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "ia_css_psys_open(): enter:\n"); + return psys_start_server(config); +} +#else +struct ia_css_syscom_context *ia_css_psys_open( + const struct ia_css_psys_buffer_s *buffer, + struct ia_css_syscom_config *config) +{ + struct ia_css_syscom_context *context; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "ia_css_psys_open(): enter:\n"); + + context = psys_context_create(buffer, config); + + /* Configure SPC icache prefetching and start SPC */ + psys_start_server(config); + + return context; +} +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ + +bool ia_css_psys_open_is_ready( + struct ia_css_syscom_context *context) +{ + int retval = -1; + bool ready = 0; + unsigned int i; + int syscom_retval; + + verifexit(context != NULL); + + for (i = 0; i < IA_CSS_N_PSYS_CMD_QUEUE_ID; i++) { + syscom_retval = ia_css_syscom_send_port_open(context, i); + if (syscom_retval != 0) { + if (syscom_retval == FW_ERROR_BUSY) { + /* Do not print error */ + retval = 0; + } + /* Not ready yet */ + goto EXIT; + } + } + + for (i = 0; i < IA_CSS_N_PSYS_EVENT_QUEUE_ID; i++) { + syscom_retval = ia_css_syscom_recv_port_open(context, i); + if (syscom_retval != 0) { + if (syscom_retval == FW_ERROR_BUSY) { + /* Do not print error */ + retval = 0; + } + /* Not ready yet */ + goto EXIT; + } + } + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, + "ia_css_psys_open_is_ready(): complete:\n"); + + /* If this point reached, do not print error */ + retval = 0; + /* If this point reached, ready */ + ready = 1; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_open_is_ready failed\n"); + } + return ready; +} + +/* Internal function to close syscom_context */ +static struct ia_css_syscom_context *psys_context_destroy( + struct ia_css_syscom_context *context) +{ + /* Success: return NULL, Error: return context pointer value + * Intention is to change return type to int (errno), + * see commented values. + */ + + unsigned int i; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "psys_context_destroy(): enter:\n"); + + /* NULL pointer check disabled, since there is no proper return value */ + + for (i = 0; i < IA_CSS_N_PSYS_CMD_QUEUE_ID; i++) { + if (ia_css_syscom_send_port_close(context, i) != 0) + return context; /* EINVAL */ + } + + for (i = 0; i < IA_CSS_N_PSYS_EVENT_QUEUE_ID; i++) { + if (ia_css_syscom_recv_port_close(context, i) != 0) + return context; /* EINVAL */ + } + + /* request device close */ + if (ia_css_syscom_close(context) != 0) + return context; /* EBUSY */ + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, + "psys_context_destroy(): leave: OK\n"); + return NULL; +} + +#if HAS_DUAL_CMD_CTX_SUPPORT +struct ia_css_syscom_context *ia_css_psys_context_destroy( + struct ia_css_syscom_context *context) +{ + return psys_context_destroy(context); +} + +int ia_css_psys_close() +{ + /* Intentionally left blank for now since syscom objects should have + * been destroyed already by prior ia_css_psys_context_destroy() calls. + */ + return 0; +} +#else +struct ia_css_syscom_context *ia_css_psys_close( + struct ia_css_syscom_context *context) +{ + return psys_context_destroy(context); +} +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ + +int ia_css_psys_release( + struct ia_css_syscom_context *context, + bool force) +{ + if (context == NULL) + return -EFAULT; + + /* try to free resources */ + if (ia_css_syscom_release(context, force) != 0) + return -EBUSY; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, + "ia_css_psys_release(): leave: OK\n"); + return 0; +} + +ia_css_psys_state_t ia_css_psys_check_state( + struct ia_css_syscom_context *context) +{ + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_check_state(): enter:\n"); + + NOT_USED(context); + + /* For the time being, return the READY state to be used by SPC test */ + return IA_CSS_PSYS_STATE_READY; +} + +bool ia_css_is_psys_cmd_queue_full( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id) +{ + bool is_full = false; + int num_tokens; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_is_psys_cmd_queue_full(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_send_port_available(context, + (unsigned int)id); + verifexit(num_tokens >= 0); + + is_full = (num_tokens == 0); + retval = 0; +EXIT: + if (retval != 0) { + is_full = true; + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_is_psys_cmd_queue_full failed\n"); + } + return is_full; +} + +bool ia_css_is_psys_cmd_queue_not_full( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id) +{ + bool is_not_full = false; + int num_tokens; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_is_psys_cmd_queue_not_full(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_send_port_available(context, + (unsigned int)id); + verifexit(num_tokens >= 0); + + is_not_full = (num_tokens != 0); + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_is_psys_cmd_queue_not_full failed\n"); + } + return is_not_full; +} + +bool ia_css_has_psys_cmd_queue_N_space( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id, + const unsigned int N) +{ + bool has_N_space = false; + int num_tokens; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_has_psys_cmd_queue_N_space(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_send_port_available(context, + (unsigned int)id); + verifexit(num_tokens >= 0); + + has_N_space = ((unsigned int)num_tokens >= N); +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_has_psys_cmd_queue_N_space failed\n"); + } + return has_N_space; +} + +int ia_css_psys_cmd_queue_get_available_space( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id) +{ + int N_space = -1; + int num_tokens; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_cmd_queue_get_available_space(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_send_port_available(context, + (unsigned int)id); + verifexit(num_tokens >= 0); + + N_space = (int)(num_tokens); +EXIT: + if (N_space < 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_cmd_queue_get_available_space failed\n"); + } + return N_space; +} + +bool ia_css_any_psys_event_queue_not_empty( + struct ia_css_syscom_context *context) +{ + ia_css_psys_event_queue_ID_t i; + bool any_msg = false; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_any_psys_event_queue_not_empty(): enter:\n"); + verifexit(context != NULL); + + for (i = (ia_css_psys_event_queue_ID_t)0; + i < IA_CSS_N_PSYS_EVENT_QUEUE_ID; i++) { + any_msg = + any_msg || ia_css_is_psys_event_queue_not_empty(context, i); + } + +EXIT: + return any_msg; +} + +bool ia_css_is_psys_event_queue_empty( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id) +{ + bool is_empty = false; + int num_tokens; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_is_psys_event_queue_empty(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_recv_port_available(context, (unsigned int)id); + verifexit(num_tokens >= 0); + + is_empty = (num_tokens == 0); + retval = 0; +EXIT: + if (retval != 0) { + is_empty = true; + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_is_psys_event_queue_empty failed\n"); + } + return is_empty; +} + +bool ia_css_is_psys_event_queue_not_empty( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id) +{ + bool is_not_empty = false; + int num_tokens; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_is_psys_event_queue_not_empty(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_recv_port_available(context, + (unsigned int)id); + verifexit(num_tokens >= 0); + + is_not_empty = (num_tokens != 0); + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_is_psys_event_queue_not_empty failed\n"); + } + return is_not_empty; +} + +bool ia_css_has_psys_event_queue_N_msgs( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id, + const unsigned int N) +{ + bool has_N_msgs = false; + int num_tokens; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_has_psys_event_queue_N_msgs(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_recv_port_available(context, + (unsigned int)id); + verifexit(num_tokens >= 0); + + has_N_msgs = ((unsigned int)num_tokens >= N); + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_has_psys_event_queue_N_msgs failed\n"); + } + return has_N_msgs; +} + +int ia_css_psys_event_queue_get_available_msgs( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id) +{ + int N_msgs = -1; + int num_tokens; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_event_queue_get_available_msgs(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_recv_port_available(context, + (unsigned int)id); + verifexit(num_tokens >= 0); + + N_msgs = (int)(num_tokens); +EXIT: + if (N_msgs < 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_event_queue_get_available_msgs failed\n"); + } + return N_msgs; +} + +int ia_css_psys_cmd_queue_send( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id, + const void *cmd_msg_buffer) +{ + int count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_cmd_queue_send(): enter:\n"); + verifexit(context != NULL); + + verifexit(context != NULL); + /* The ~full check fails on receive queues */ + verifexit(ia_css_is_psys_cmd_queue_not_full(context, id)); + verifexit(cmd_msg_buffer != NULL); + + verifexit(ia_css_syscom_send_port_transfer(context, (unsigned int)id, + cmd_msg_buffer) >= 0); + + count = 1; +EXIT: + if (count == 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_cmd_queue_send failed\n"); + } + return count; +} + +int ia_css_psys_cmd_queue_send_N( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id, + const void *cmd_msg_buffer, + const unsigned int N) +{ + struct ia_css_psys_cmd_s *cmd_msg_buffer_loc = + (struct ia_css_psys_cmd_s *)cmd_msg_buffer; + int count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_cmd_queue_send_N(): enter:\n"); + verifexit(context != NULL); + + for (count = 0; count < (int)N; count++) { + int count_loc = ia_css_psys_cmd_queue_send(context, id, + (void *)(&cmd_msg_buffer_loc[count])); + + verifexit(count_loc == 1); + } + +EXIT: + if ((unsigned int) count < N) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_cmd_queue_send_N failed\n"); + } + return count; +} + +int ia_css_psys_event_queue_receive( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id, + void *event_msg_buffer) +{ + int count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_event_queue_receive(): enter:\n"); + + verifexit(context != NULL); + /* The ~empty check fails on send queues */ + verifexit(ia_css_is_psys_event_queue_not_empty(context, id)); + verifexit(event_msg_buffer != NULL); + + verifexit(ia_css_syscom_recv_port_transfer(context, (unsigned int)id, + event_msg_buffer) >= 0); + + count = 1; +EXIT: + if (count == 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_event_queue_receive failed\n"); + } + return count; +} + +int ia_css_psys_event_queue_receive_N( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id, + void *event_msg_buffer, + const unsigned int N) +{ + struct ia_css_psys_event_s *event_msg_buffer_loc; + int count; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_event_queue_receive_N(): enter:\n"); + + event_msg_buffer_loc = (struct ia_css_psys_event_s *)event_msg_buffer; + + for (count = 0; count < (int)N; count++) { + int count_loc = ia_css_psys_event_queue_receive(context, id, + (void *)(&event_msg_buffer_loc[count])); + + verifexit(count_loc == 1); + } + +EXIT: + if ((unsigned int) count < N) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_event_queue_receive_N failed\n"); + } + return count; +} + +size_t ia_css_psys_get_size( + const struct ia_css_syscom_context *context) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_get_size(): enter:\n"); + + verifexit(context != NULL); + /* How can I query the context ? */ +EXIT: + if (size == 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_get_size failed\n"); + } + return size; +} + +unsigned int ia_css_psys_get_cmd_queue_count( + const struct ia_css_syscom_context *context) +{ + unsigned int count = 0; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_get_cmd_queue_count(): enter:\n"); + + verifexit(context != NULL); + /* How can I query the context ? */ + NOT_USED(context); + count = (unsigned int)IA_CSS_N_PSYS_CMD_QUEUE_ID; + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_get_cmd_queue_count failed\n"); + } + return count; +} + +unsigned int ia_css_psys_get_event_queue_count( + const struct ia_css_syscom_context *context) +{ + unsigned int count = 0; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_get_event_queue_count(): enter:\n"); + + verifexit(context != NULL); + /* How can I query the context ? */ + NOT_USED(context); + count = (unsigned int)IA_CSS_N_PSYS_EVENT_QUEUE_ID; + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_get_event_queue_count failed\n"); + } + return count; +} + +size_t ia_css_psys_get_cmd_queue_size( + const struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id) +{ + size_t queue_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_get_cmd_queue_size(): enter:\n"); + + verifexit(context != NULL); + /* How can I query the context ? */ + NOT_USED(context); + queue_size = ia_css_psys_cmd_queue_cfg[id].queue_size; +EXIT: + if (queue_size == 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_get_cmd_queue_size failed\n"); + } + return queue_size; +} + +size_t ia_css_psys_get_event_queue_size( + const struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id) +{ + size_t queue_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_get_event_queue_size(): enter:\n"); + + verifexit(context != NULL); + /* How can I query the context ? */ + NOT_USED(context); + queue_size = ia_css_psys_event_queue_cfg[id].queue_size; +EXIT: + if (queue_size == 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_get_event_queue_size failed\n"); + } + return queue_size; +} + +size_t ia_css_psys_get_cmd_msg_size( + const struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id) +{ + size_t msg_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_get_cmd_msg_size(): enter:\n"); + + verifexit(context != NULL); + /* How can I query the context ? */ + NOT_USED(context); + msg_size = ia_css_psys_cmd_queue_cfg[id].token_size; +EXIT: + if (msg_size == 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_get_cmd_msg_size failed\n"); + } + return msg_size; +} + +size_t ia_css_psys_get_event_msg_size( + const struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id) +{ + size_t msg_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_get_event_msg_size(): enter:\n"); + + verifexit(context != NULL); + /* How can I query the context ? */ + NOT_USED(context); + msg_size = ia_css_psys_event_queue_cfg[id].token_size; +EXIT: + if (msg_size == 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_get_cmd_msg_size failed\n"); + } + return msg_size; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_buffer_set.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_buffer_set.h new file mode 100644 index 0000000000000..392b4359353f4 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_buffer_set.h @@ -0,0 +1,174 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IA_CSS_PSYS_BUFFER_SET_H +#define __IA_CSS_PSYS_BUFFER_SET_H + +#include "ia_css_base_types.h" +#include "ia_css_psys_dynamic_storage_class.h" +#include "ia_css_psys_process_types.h" +#include "ia_css_terminal_types.h" + +#define N_UINT64_IN_BUFFER_SET_STRUCT 1 +#define N_UINT16_IN_BUFFER_SET_STRUCT 1 +#define N_UINT8_IN_BUFFER_SET_STRUCT 1 +#define N_PADDING_UINT8_IN_BUFFER_SET_STRUCT 5 +#define SIZE_OF_BUFFER_SET \ + (N_UINT64_IN_BUFFER_SET_STRUCT * IA_CSS_UINT64_T_BITS \ + + VIED_VADDRESS_BITS \ + + VIED_VADDRESS_BITS \ + + N_UINT16_IN_BUFFER_SET_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_UINT8_IN_BUFFER_SET_STRUCT * IA_CSS_UINT8_T_BITS \ + + N_PADDING_UINT8_IN_BUFFER_SET_STRUCT * IA_CSS_UINT8_T_BITS) + +typedef struct ia_css_buffer_set_s ia_css_buffer_set_t; + +struct ia_css_buffer_set_s { + /* Token for user context reference */ + uint64_t token; + /* IPU virtual address of this buffer set */ + vied_vaddress_t ipu_virtual_address; + /* IPU virtual address of the process group corresponding to this buffer set */ + vied_vaddress_t process_group_handle; + /* Number of terminal buffer addresses in this structure */ + uint16_t terminal_count; + /* Frame id to associate with this buffer set */ + uint8_t frame_counter; + /* Padding for 64bit alignment */ + uint8_t padding[N_PADDING_UINT8_IN_BUFFER_SET_STRUCT]; +}; + + +/*! Construct a buffer set object at specified location + + @param buffer_set_mem[in] memory location to create buffer set object + @param process_group[in] process group corresponding to this buffer set + @param frame_counter[in] frame number for this buffer set object + + @return pointer to buffer set object on success, NULL on error + */ +ia_css_buffer_set_t *ia_css_buffer_set_create( + void *buffer_set_mem, + const ia_css_process_group_t *process_group, + const unsigned int frame_counter); + +/*! Compute size (in bytes) required for full buffer set object + + @param process_group[in] process group corresponding to this buffer set + + @return size in bytes of buffer set object on success, 0 on error + */ +size_t ia_css_sizeof_buffer_set( + const ia_css_process_group_t *process_group); + +/*! Set a buffer address in a buffer set object + + @param buffer_set[in] buffer set object to set buffer in + @param terminal_index[in] terminal index to use as a reference between + buffer and terminal + @param buffer[in] buffer address to store + + @return 0 on success, -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_buffer_set_set_buffer( + ia_css_buffer_set_t *buffer_set, + const unsigned int terminal_index, + const vied_vaddress_t buffer); + +/*! Get virtual buffer address from a buffer set object and terminal object by + resolving the index used + + @param buffer_set[in] buffer set object to get buffer from + @param terminal[in] terminal object to get buffer of + + @return virtual buffer address on success, VIED_NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_vaddress_t ia_css_buffer_set_get_buffer( + const ia_css_buffer_set_t *buffer_set, + const ia_css_terminal_t *terminal); + +/*! Set ipu virtual address of a buffer set object within the buffer set object + + @param buffer_set[in] buffer set object to set ipu address in + @param ipu_vaddress[in] ipu virtual address of the buffer set object + + @return 0 on success, -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_buffer_set_set_ipu_address( + ia_css_buffer_set_t *buffer_set, + const vied_vaddress_t ipu_vaddress); + +/*! Get ipu virtual address from a buffer set object + + @param buffer_set[in] buffer set object to get ipu address from + + @return virtual buffer set address on success, VIED_NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_vaddress_t ia_css_buffer_set_get_ipu_address( + const ia_css_buffer_set_t *buffer_set); + +/*! Set process group handle in a buffer set object + + @param buffer_set[in] buffer set object to set handle in + @param process_group_handle[in] process group handle of the buffer set + object + + @return 0 on success, -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_buffer_set_set_process_group_handle( + ia_css_buffer_set_t *buffer_set, + const vied_vaddress_t process_group_handle); + +/*! Get process group handle from a buffer set object + + @param buffer_set[in] buffer set object to get handle from + + @return virtual process group address on success, VIED_NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_vaddress_t ia_css_buffer_set_get_process_group_handle( + const ia_css_buffer_set_t *buffer_set); + +/*! Set token of a buffer set object within the buffer set object + + @param buffer_set[in] buffer set object to set ipu address in + @param token[in] token of the buffer set object + + @return 0 on success, -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_buffer_set_set_token( + ia_css_buffer_set_t *buffer_set, + const uint64_t token); + +/*! Get token from a buffer set object + + @param buffer_set[in] buffer set object to get token from + + @return token on success, NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint64_t ia_css_buffer_set_get_token( + const ia_css_buffer_set_t *buffer_set); + +#ifdef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_buffer_set_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +#endif /* __IA_CSS_PSYS_BUFFER_SET_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_dynamic_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_dynamic_storage_class.h new file mode 100644 index 0000000000000..9a1e3a7a12949 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_dynamic_storage_class.h @@ -0,0 +1,28 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +#define __IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#define IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +#else +#define IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_dynamic_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_dynamic_trace.h new file mode 100644 index 0000000000000..e8a979dfce0bf --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_dynamic_trace.h @@ -0,0 +1,103 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_DYNAMIC_TRACE_H +#define __IA_CSS_PSYS_DYNAMIC_TRACE_H + +#include "ia_css_psysapi_trace.h" + +#define PSYS_DYNAMIC_TRACE_LEVEL_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +/* Default sub-module tracing config */ +#if (!defined(PSYSAPI_DYNAMIC_TRACING_OVERRIDE)) + #define PSYS_DYNAMIC_TRACE_LEVEL_CONFIG \ + PSYS_DYNAMIC_TRACE_LEVEL_CONFIG_DEFAULT +#endif + +/* Module/sub-module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_DYNAMIC_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_DYNAMIC_TRACING_OVERRIDE)) + /* Module/sub-module specific trace setting */ + #if PSYSAPI_DYNAMIC_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_DYNAMIC_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_DYNAMIC_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_DYNAMIC_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_DYNAMIC_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_DYNAMIC_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_DATA Tracing level defined" + #endif +#else + /* Inherit Module trace setting */ + #define PSYSAPI_DYNAMIC_TRACE_METHOD \ + PSYSAPI_TRACE_METHOD + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ASSERT \ + PSYSAPI_TRACE_LEVEL_ASSERT + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ERROR \ + PSYSAPI_TRACE_LEVEL_ERROR + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_WARNING \ + PSYSAPI_TRACE_LEVEL_WARNING + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_INFO \ + PSYSAPI_TRACE_LEVEL_INFO + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_DEBUG \ + PSYSAPI_TRACE_LEVEL_DEBUG + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_VERBOSE \ + PSYSAPI_TRACE_LEVEL_VERBOSE +#endif + +#endif /* __IA_CSS_PSYSAPI_DYNAMIC_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.h new file mode 100644 index 0000000000000..f4ef80f742135 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.h @@ -0,0 +1,396 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_H +#define __IA_CSS_PSYS_PROCESS_H + +/*! \file */ + +/** @file ia_css_psys_process.h + * + * Define the methods on the process object that are not part of + * a single interface + */ + +#include +#include + +#include + +#include /* uint8_t */ + +/* + * Creation + */ +#include + +/* + * Internal resources + */ +#include + +/* + * Process manager + */ +#include + +/* + * Command processor + */ + +/*! Execute a command locally or send it to be processed remotely + + @param process[in] process object + @param cmd[in] command + + @return < 0 on invalid argument(s) or process state + */ +extern int ia_css_process_cmd( + ia_css_process_t *process, + const ia_css_process_cmd_t cmd); + +/*! Get the internal memory offset of the process object + + @param process[in] process object + @param mem_id[in] memory id + + @return internal memory offset, + IA_CSS_PROCESS_INVALID_OFFSET on invalid argument(s) +*/ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_size_t ia_css_process_get_int_mem_offset( + const ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_id); + + +/*! Get the external memory offset of the process object + + @param process[in] process object + @param mem_id[in] memory id + + @return external memory offset, + IA_CSS_PROCESS_INVALID_OFFSET on invalid argument(s) +*/ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_size_t ia_css_process_get_ext_mem_offset( + const ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id); + + +/*! Get the stored size of the process object + + @param process[in] process object + + @return size, 0 on invalid argument + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +size_t ia_css_process_get_size(const ia_css_process_t *process); + +/*! Get the (pointer to) the process group parent of the process object + + @param process[in] process object + + @return the pointer to the parent, NULL on invalid argument + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_process_group_t *ia_css_process_get_parent( + const ia_css_process_t *process); + +/*! Set the (pointer to) the process group parent of the process object + + @param process[in] process object + @param parent[in] (pointer to the) process group parent object + + @return < 0 on invalid argument(s) + */ +extern int ia_css_process_set_parent( + ia_css_process_t *process, + ia_css_process_group_t *parent); + +/*! Get the unique ID of program used by the process object + + @param process[in] process object + + @return ID, 0 on invalid argument + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_program_ID_t ia_css_process_get_program_ID( + const ia_css_process_t *process); + +/*! Get the state of the process object + + @param process[in] process object + + @return state, limit value (IA_CSS_N_PROCESS_STATES) on invalid argument + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_process_state_t ia_css_process_get_state( + const ia_css_process_t *process); + +/*! Set the state of the process object + + @param process[in] process object + @param state[in] state of the process + + @return < 0 on invalid argument + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_set_state( + ia_css_process_t *process, + ia_css_process_state_t state); + +/*! Get the assigned cell of the the process object + + @param process[in] process object + + @return cell ID, limit value (VIED_NCI_N_CELL_ID) on invalid argument + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_cell_ID_t ia_css_process_get_cell( + const ia_css_process_t *process); + +/*! Get the number of cells the process object depends on + + @param process[in] process object + + @return number of cells + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_get_cell_dependency_count( + const ia_css_process_t *process); + +/*! Get the number of terminals the process object depends on + + @param process[in] process object + + @return number of terminals + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_get_terminal_dependency_count( + const ia_css_process_t *process); + +/*! Set n-th cell dependency of a process object + + @param process[in] Process object + @param dep_index[in] dep index + @param id[in] dep id + + @return < 0 on invalid process argument + */ +extern int ia_css_process_set_cell_dependency( + const ia_css_process_t *process, + const unsigned int dep_index, + const vied_nci_resource_id_t id); + +/*! Get n-th cell dependency of a process object + + @param process[in] Process object + @param cell_num[in] n-th cell + + @return n-th cell dependency, + IA_CSS_PROCESS_INVALID_DEPENDENCY on invalid argument(s) +*/ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_id_t ia_css_process_get_cell_dependency( + const ia_css_process_t *process, + const unsigned int cell_num); + +/*! Set n-th terminal dependency of a process object + + @param process[in] Process object + @param dep_index[in] dep index + @param id[in] dep id + + @return < 0 on on invalid argument(s) + */ +extern int ia_css_process_set_terminal_dependency( + const ia_css_process_t *process, + const unsigned int dep_index, + const vied_nci_resource_id_t id); + +/*! Get n-th terminal dependency of a process object + + @param process[in] Process object + @param terminal_num[in] n-th cell + + @return n-th terminal dependency, + IA_CSS_PROCESS_INVALID_DEPENDENCY on invalid argument(s) +*/ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_get_terminal_dependency( + const ia_css_process_t *process, + const unsigned int terminal_num); + +/*! Get the kernel bitmap of the the process object + + @param process[in] process object + + @return process kernel bitmap + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_kernel_bitmap_t ia_css_process_get_kernel_bitmap( + const ia_css_process_t *process); + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_bitmap_t* ia_css_process_get_dfm_port_bitmap_ptr( + ia_css_process_t *process); + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_bitmap_t* ia_css_process_get_dfm_active_port_bitmap_ptr( + ia_css_process_t *process); + + +/*! Get the cells bitmap of the the process object + + @param process[in] process object + + @return process cells bitmap + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_bitmap_t ia_css_process_get_cells_bitmap( + const ia_css_process_t *process); + +/*! Sets the dfm device resource allocation bitmap of + * the process object + + @param process[in] process object + @param dfm_dev_id[in] dfm device id + @param bitmap[in] resource bitmap + + @return < 0 on invalid argument(s) or process state + */ +int ia_css_process_set_dfm_port_bitmap( + ia_css_process_t *process, + const vied_nci_dev_dfm_id_t dfm_dev_id, + const vied_nci_resource_bitmap_t bitmap); + + +/*! Sets the active dfm ports bitmap of + * the process object + + @param process[in] process object + @param dfm_dev_id[in] dfm device id + @param bitmap[in] active ports bitmap + + @return < 0 on invalid argument(s) or process state + */ +int ia_css_process_set_dfm_active_port_bitmap( + ia_css_process_t *process, + const vied_nci_dev_dfm_id_t dfm_dev_id, + const vied_nci_resource_bitmap_t bitmap); + +/*! Get the dfm port bitmap of the the process object + + @param process[in] process object + @param dfm_res_id dfm resource id + + @return bitmap of all DFM ports used by process, corresponding to the input dfm resource id + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_bitmap_t ia_css_process_get_dfm_port_bitmap( + const ia_css_process_t *process, + vied_nci_dev_dfm_id_t dfm_res_id); + +/*! Get the dfm active port bitmap of the the process object + + @param process[in] process object + @param dfm_res_id[in] dfm resource id + + @return bitmap of all active DFM ports used by the process, corresponding to the input + dfm resource id + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_bitmap_t ia_css_process_get_dfm_active_port_bitmap( + const ia_css_process_t *process, + vied_nci_dev_dfm_id_t dfm_res_id); + + +/*! Sets the cells bitmap of + * the process object + + @param process[in] process object + @param bitmap[in] bitmap + + @return < 0 on invalid argument(s) or process state + */ +int ia_css_process_set_cells_bitmap( + ia_css_process_t *process, + const vied_nci_resource_bitmap_t bitmap); + +/*! Get the device channel id-n resource allocation offset of the process object + + @param process[in] process object + @param dev_chn_id[in] channel id + + @return resource offset, IA_CSS_PROCESS_INVALID_OFFSET on invalid argument(s) + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_size_t ia_css_process_get_dev_chn( + const ia_css_process_t *process, + const vied_nci_dev_chn_ID_t dev_chn_id); + +/*! Get the ext mem type-n resource id of the the process object + + @param process[in] process object + @param mem_type[in] mem type + + @return resource offset, IA_CSS_PROCESS_INVALID_OFFSET on invalid argument(s) + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_mem_ID_t ia_css_process_get_ext_mem_id( + const ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type); + + +/*! Sets the device channel id-n resource allocation offset of + * the process object + + @param process[in] process object + @param dev_chn_id[in] channel id + @param offset[in] resource offset + + @return < 0 on invalid argument(s) or process state + */ +int ia_css_process_set_dev_chn( + ia_css_process_t *process, + const vied_nci_dev_chn_ID_t dev_chn_id, + const vied_nci_resource_size_t offset); + +/*! Boolean test if the process object type is valid + + @param process[in] process object + @param p_manifest[in] program manifest + + @return true if the process object is correct, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_process_valid( + const ia_css_process_t *process, + const ia_css_program_manifest_t *p_manifest); + +/*! Gets the program_idx from the process object + + @param process[in] process object + + @return program index + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint32_t ia_css_process_get_program_idx( + const ia_css_process_t *process); + +#ifdef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_process_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +#endif /* __IA_CSS_PSYS_PROCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.hsys.kernel.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.hsys.kernel.h new file mode 100644 index 0000000000000..cab7965604146 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.hsys.kernel.h @@ -0,0 +1,144 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_HSYS_KERNEL_H +#define __IA_CSS_PSYS_PROCESS_HSYS_KERNEL_H + +/*! \file */ + +/** @file ia_css_psys_process.hsys.kernel.h + * + * Define the methods on the process object: Hsys kernel interface + */ + +#include + +#include + +/* + * Internal resources + */ + +/*! Clear all resource (offset) specifications + + @param process[in] process object + + @return < 0 on error + */ +extern int ia_css_process_clear_all(ia_css_process_t *process); + +/*! Set the cell ID resource specification + + @param process[in] process object + @param cell_id[in] cell ID + + @return < 0 on error + */ +extern int ia_css_process_set_cell( + ia_css_process_t *process, + const vied_nci_cell_ID_t cell_id); + +/*! Clear cell ID resource specification + + @param process[in] process object + + @return < 0 on error + */ +extern int ia_css_process_clear_cell(ia_css_process_t *process); + +/*! Set the memory resource (offset) specification for a memory + that belongs to the cell that is assigned to the process + + @param process[in] process object + @param mem_type_id[in] mem type ID + @param offset[in] offset + + Precondition: The cell ID must be set + + @return < 0 on error + */ +extern int ia_css_process_set_int_mem( + ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t offset); + +/*! Clear the memory resource (offset) specification for a memory + type that belongs to the cell that is assigned to the process + + @param process[in] process object + @param mem_id[in] mem ID + + Precondition: The cell ID must be set + + @return < 0 on error + */ +extern int ia_css_process_clear_int_mem( + ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id); + +/*! Set the memory resource (offset) specification for a memory + that does not belong to the cell that is assigned to the process + + @param process[in] process object + @param mem_type_id[in] mem type ID + @param offset[in] offset + + Precondition: The cell ID must be set + + @return < 0 on error + */ +extern int ia_css_process_set_ext_mem( + ia_css_process_t *process, + const vied_nci_mem_ID_t mem_id, + const vied_nci_resource_size_t offset); + +/*! Clear the memory resource (offset) specification for a memory + type that does not belong to the cell that is assigned to the process + + @param process[in] process object + @param mem_id[in] mem ID + + Precondition: The cell ID must be set + + @return < 0 on error + */ +extern int ia_css_process_clear_ext_mem( + ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id); + +/*! Set a device channel resource (offset) specification + + @param process[in] process object + @param dev_chn_id[in] device channel ID + @param offset[in] offset + + @return < 0 on error + */ +extern int ia_css_process_set_dev_chn( + ia_css_process_t *process, + const vied_nci_dev_chn_ID_t dev_chn_id, + const vied_nci_resource_size_t offset); + +/*! Clear a device channel resource (offset) specification + + @param process[in] process object + @param dev_chn_id[in] device channel ID + + @return < 0 on error + */ +extern int ia_css_process_clear_dev_chn( + ia_css_process_t *process, + const vied_nci_dev_chn_ID_t dev_chn_id); + +#endif /* __IA_CSS_PSYS_PROCESS_HSYS_KERNEL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.hsys.user.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.hsys.user.h new file mode 100644 index 0000000000000..015a60b0e1afb --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.hsys.user.h @@ -0,0 +1,85 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_HSYS_USER_H +#define __IA_CSS_PSYS_PROCESS_HSYS_USER_H + +/*! \file */ + +/** @file ia_css_psys_process.hsys.user.h + * + * Define the methods on the process object: Hsys user interface + */ + +#include /* ia_css_program_param_t */ + +#include +#include + +#include /* uint8_t */ + +/* + * Creation + */ + +/*! Compute the size of storage required for allocating the process object + + @param manifest[in] program manifest + @param param[in] program parameters + + @return 0 on error + */ +extern size_t ia_css_sizeof_process( + const ia_css_program_manifest_t *manifest, + const ia_css_program_param_t *param); + +/*! Create the process object + + @param raw_mem[in] pre allocated memory + @param manifest[in] program manifest + @param param[in] program parameters + + @return NULL on error + */ +extern ia_css_process_t *ia_css_process_create( + void *raw_mem, + const ia_css_program_manifest_t *manifest, + const ia_css_program_param_t *param, + const uint32_t program_idx); + +/*! Destroy (the storage of) the process object + + @param process[in] process object + + @return NULL + */ +extern ia_css_process_t *ia_css_process_destroy( + ia_css_process_t *process); + +/* + * Access functions + */ + +/*! Print the process object to file/stream + + @param process[in] process object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_process_print( + const ia_css_process_t *process, + void *fid); + +#endif /* __IA_CSS_PSYS_PROCESS_HSYS_USER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.psys.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.psys.h new file mode 100644 index 0000000000000..ba1db574a4388 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.psys.h @@ -0,0 +1,53 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_PSYS_H +#define __IA_CSS_PSYS_PROCESS_PSYS_H + +/*! \file */ + +/** @file ia_css_psys_process.psys.h + * + * Define the methods on the process object: Psys embedded interface + */ + +#include + +/* + * Process manager + */ + +/*! Acquire the resources specificed in process object + + @param process[in] process object + + Postcondition: This is a try process if any of the + resources is not available, all succesfully acquired + ones will be release and the function will return an + error + + @return < 0 on error + */ +extern int ia_css_process_acquire(ia_css_process_t *process); + +/*! Release the resources specificed in process object + + @param process[in] process object + + @return < 0 on error + */ +extern int ia_css_process_release(ia_css_process_t *process); + + +#endif /* __IA_CSS_PSYS_PROCESS_PSYS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.h new file mode 100644 index 0000000000000..c0f6901adeb01 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.h @@ -0,0 +1,366 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_GROUP_H +#define __IA_CSS_PSYS_PROCESS_GROUP_H + +/*! \file */ + +/** @file ia_css_psys_process_group.h + * + * Define the methods on the process object that are not part of + * a single interface + */ +#include "ia_css_rbm.h" + +#include +#include + +#include /* uint8_t */ + +/* + * Creation + */ +#include + +/* + * Registration of user contexts / callback info + * External resources + * Sequencing resources + */ +#include + +/* + * Dispatcher + */ +#include + +/* + * Access to sub-structure handles / fields + */ + +#include "ia_css_terminal.h" + +/*! Get the number of fragments on the process group + + @param process_group[in] process group object + + Note: Future change is to have a fragment count per + independent subgraph + + @return the fragment count, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint16_t ia_css_process_group_get_fragment_count( + const ia_css_process_group_t *process_group); + + +/*! Get the fragment state on the process group + + @param process_group[in] process group object + @param fragment_state[in] current fragment of processing + + @return -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_get_fragment_state( + const ia_css_process_group_t *process_group, + uint16_t *fragment_state); + +/*! Set the fragment state on the process group + + @param process_group[in] process group object + @param fragment_state[in] current fragment of processing + + @return -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_fragment_state( + ia_css_process_group_t *process_group, + uint16_t fragment_state); + +/*! Get the number of processes on the process group + + @param process_group[in] process group object + + @return the process count, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_group_get_process_count( + const ia_css_process_group_t *process_group); + +/*! Get the number of terminals on the process group + + @param process_group[in] process group object + + Note: Future change is to have a terminal count per + independent subgraph + + @return the terminal count, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_group_get_terminal_count( + const ia_css_process_group_t *process_group); + +/*! Get the PG load start timestamp + + @param process_group[in] process group object + + @return PG load start timestamp, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint32_t ia_css_process_group_get_pg_load_start_ts( + const ia_css_process_group_t *process_group); + +/*! Get the PG load time in cycles + + @param process_group[in] process group object + + @return PG load time in cycles, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint32_t ia_css_process_group_get_pg_load_cycles( + const ia_css_process_group_t *process_group); + +/*! Get the PG init time in cycles + + @param process_group[in] process group object + + @return PG init time in cycles, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint32_t ia_css_process_group_get_pg_init_cycles( + const ia_css_process_group_t *process_group); + +/*! Get the PG processing time in cycles + + @param process_group[in] process group object + + @return PG processing time in cycles, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint32_t ia_css_process_group_get_pg_processing_cycles( + const ia_css_process_group_t *process_group); + +/*! Get the (pointer to) the terminal of the process group object + + @param process_group[in] process group object + @param terminal_type[in] terminal type of terminal + + @return the pointer to the terminal, NULL on error + */ + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_terminal_t *ia_css_process_group_get_terminal_from_type( + const ia_css_process_group_t *process_group, + const ia_css_terminal_type_t terminal_type); + +/*! Get the (pointer to) the terminal of the process group object + * for terminals which have only a single instance + * (cached in, cached out, program, program_ctrl_init) + + @param process_group[in] process group object + @param terminal_type[in] terminal type of terminal + + @return the pointer to the terminal, NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +const ia_css_terminal_t *ia_css_process_group_get_single_instance_terminal( + const ia_css_process_group_t *process_group, + ia_css_terminal_type_t term_type); + +/*! Get the (pointer to) the indexed terminal of the process group object + + @param process_group[in] process group object + @param terminal_index[in] index of the terminal + + @return the pointer to the terminal, NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_terminal_t *ia_css_process_group_get_terminal( + const ia_css_process_group_t *process_group, + const unsigned int terminal_index); + +/*! Get the (pointer to) the indexed process of the process group object + + @param process_group[in] process group object + @param process_index[in] index of the process + + @return the pointer to the process, NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_process_t *ia_css_process_group_get_process( + const ia_css_process_group_t *process_group, + const unsigned int process_index); + +/*! Get the stored size of the process group object + + @param process_group[in] process group object + + @return size, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +size_t ia_css_process_group_get_size( + const ia_css_process_group_t *process_group); + +/*! Get the state of the the process group object + + @param process_group[in] process group object + + @return state, limit value on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_process_group_state_t ia_css_process_group_get_state( + const ia_css_process_group_t *process_group); + +/*! Get the unique ID of program group used by the process group object + + @param process_group[in] process group object + + @return ID, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_program_group_ID_t ia_css_process_group_get_program_group_ID( + const ia_css_process_group_t *process_group); + +/*! Get the resource bitmap of the process group + + @param process_group[in] process group object + + @return the reource bitmap + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_bitmap_t ia_css_process_group_get_resource_bitmap( + const ia_css_process_group_t *process_group); + +/*! Set the resource bitmap of the process group + + @param process_group[in] process group object + @param resource_bitmap[in] the resource bitmap + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_resource_bitmap( + ia_css_process_group_t *process_group, + const vied_nci_resource_bitmap_t resource_bitmap); + +/*! Get the routing bitmap of the process group + + @param process_group[in] process group object + + @return routing bitmap (pointer) + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +const ia_css_rbm_t *ia_css_process_group_get_routing_bitmap( + const ia_css_process_group_t *process_group); + +/*! Set the routing bitmap of the process group + + @param process_group[in] process group object + @param rbm[in] routing bitmap + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_routing_bitmap( + ia_css_process_group_t *process_group, + const ia_css_rbm_t rbm); + +/*! Get IPU virtual address of process group + + @param process_group[in] process group object + @param ipu_vaddress[in/out] process group ipu virtual address + + @return -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_get_ipu_vaddress( + const ia_css_process_group_t *process_group, + vied_vaddress_t *ipu_vaddress); + +/*! Set IPU virtual address of process group + + @param process_group[in] process group object + @param ipu_vaddress[in] process group ipu address + + @return -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_ipu_vaddress( + ia_css_process_group_t *process_group, + vied_vaddress_t ipu_vaddress); + +/*! Get protocol version used by a process group + + @param process_group[in] process group object + + @return invalid protocol version on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_group_get_protocol_version( + const ia_css_process_group_t *process_group); + +/*! Get base queue id used by a process group + + @param process_group[in] process group object + + @return -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_group_get_base_queue_id( + ia_css_process_group_t *process_group); + +/*! Set base queue id used by a process group + + @param process_group[in] process group object + @param queue_id[in] process group queue id + + @return invalid queue id on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_base_queue_id( + ia_css_process_group_t *process_group, + uint8_t queue_id); + +/*! Get number of queues used by a process group + + @param process_group[in] process group object + + @return invalid number of queues (0) on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_group_get_num_queues( + ia_css_process_group_t *process_group); + +/*! Set number of queues used by a process group + + @param process_group[in] process group object + @param num_queues[in] process group number of queues + + @return -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_num_queues( + ia_css_process_group_t *process_group, + uint8_t num_queues); + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_process_group_has_vp(const ia_css_process_group_t *process_group); + +#ifdef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_process_group_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +#endif /* __IA_CSS_PSYS_PROCESS_GROUP_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.hsys.kernel.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.hsys.kernel.h new file mode 100644 index 0000000000000..93cce2555de9f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.hsys.kernel.h @@ -0,0 +1,324 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_GROUP_HSYS_KERNEL_H +#define __IA_CSS_PSYS_PROCESS_GROUP_HSYS_KERNEL_H + +/*! \file */ + +/** @file ia_css_psys_process_group.hsys.kernel.h + * + * Define the methods on the process group object: Hsys kernel interface + */ + +#include + +#include +#include + +#include /* uint8_t */ + +/* + * Registration of user contexts / callback info + */ + +/*! Get the user (callback) token as registered in the process group + + @param process_group[in] process group object + + @return 0 on error + */ +extern uint64_t ia_css_process_group_get_token( + ia_css_process_group_t *process_group); + +/*! Set (register) a user (callback) token in the process group + + @param process_group[in] process group object + @param token[in] user token + + Note: The token value shall be non-zero. This token is + returned in each return message related to the process + group the token is registered with. + + @return < 0 on error + */ +extern int ia_css_process_group_set_token( + ia_css_process_group_t *process_group, + const uint64_t token); + +/* + * Passing of a (fragment) watermark + */ + +/*! Get the fragment progress limit of the process group + + @param process_group[in] process group object + + @return 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint16_t ia_css_process_group_get_fragment_limit( + const ia_css_process_group_t *process_group); + +/*! Set the new fragment progress limit of the process group + + @param process_group[in] process group object + @param fragment_limit[in] New limit value + + Note: The limit value must be less or equal to the fragment + count value. The process group will not make progress beyond + the limit value. The limit value can be modified asynchronously + If the limit value is reached before an update happens, the + process group will suspend and will not automatically resume. + + The limit is monotonically increasing. The default value is + equal to the fragment count + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_fragment_limit( + ia_css_process_group_t *process_group, + const uint16_t fragment_limit); + +/*! Clear the fragment progress limit of the process group + + @param process_group[in] process group object + + Note: This function sets the fragment limit to zero. + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_clear_fragment_limit( + ia_css_process_group_t *process_group); + +/* + * Commands + */ + +/*! Perform the start command on the process group + + @param process_group[in] process group object + + Note: Start is an action of the l-Scheduler it makes the + process group eligible for execution + + Precondition: The external resources that are attached to + the process group must be in the correct state, i.e. input + buffers are not-empty and output buffers not-full + + @return < 0 on error + */ +extern int ia_css_process_group_start( + ia_css_process_group_t *process_group); + +/*! Perform the suspend command on the process group + + @param process_group[in] process group object + + Note: Suspend indicates that the process group execution + is halted at the next fragment boundary. The process group + will not automatically resume + + Precondition: The process group must be running + + @return < 0 on error + */ +extern int ia_css_process_group_suspend( + ia_css_process_group_t *process_group); + +/*! Perform the resume command on the process group + + @param process_group[in] process group object + + Note: Resume indicates that the process group is again + eligible for execution + + Precondition: The process group must be started + + @return < 0 on error + */ +extern int ia_css_process_group_resume( + ia_css_process_group_t *process_group); + +/*! Perform the reset command on the process group + + @param process_group[in] process group object + + Note: Return the process group to the started state + + Precondition: The process group must be running or stopped + + @return < 0 on error + */ +extern int ia_css_process_group_reset( + ia_css_process_group_t *process_group); + +/*! Perform the abort command on the process group + + @param process_group[in] process group object + + Note: Force the process group to the stopped state + + Precondition: The process group must be running or started + + @return < 0 on error + */ +extern int ia_css_process_group_abort( + ia_css_process_group_t *process_group); + +/*! Release ownership of the process group + + @param process_group[in] process group object + + Note: Release notifies PSYS and hands over ownership of the + process group from SW to FW + + Precondition: The process group must be in the started state + + @return < 0 on error + */ +extern int ia_css_process_group_disown( + ia_css_process_group_t *process_group); + +/* + * External resources + */ + +/*! Set (register) a data buffer to the indexed terminal in the process group + + @param process_group[in] process group object + @param buffer[in] buffer handle + @param buffer_state[in] state of the buffer + @param terminal_index[in] index of the terminal + + Note: The buffer handle shall not be VIED_NULL, the buffer + state can be undefined; BUFFER_UNDEFINED + + Note: The buffer can be in memory or streaming over memory + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_attach_buffer( + ia_css_process_group_t *process_group, + vied_vaddress_t buffer, + const ia_css_buffer_state_t buffer_state, + const unsigned int terminal_index); + +/*! Get (unregister) the data buffer on the indexed terminal of + * the process group + + @param process_group[in] process group object + @param terminal_index[in] index of the terminal + + Precondition: The process group must be stopped + + Postcondition: The buffer handle shall be reset to VIED_NULL, the buffer + state to BUFFER_NULL + + @return VIED_NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_vaddress_t ia_css_process_group_detach_buffer( + ia_css_process_group_t *process_group, + const unsigned int terminal_index); + +/*! Set (register) a data buffer to the indexed terminal in the process group + + @param process_group[in] process group object + @param stream[in] stream handle + @param buffer_state[in] state of the buffer + @param terminal_index[in] index of the terminal + + Note: The stream handle shall not be zero, the buffer + state can be undefined; BUFFER_UNDEFINED + + Note: The stream is used exclusive to a buffer; the latter can be in memory + or streaming over memory + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_attach_stream( + ia_css_process_group_t *process_group, + uint32_t stream, + const ia_css_buffer_state_t buffer_state, + const unsigned int terminal_index); + +/*! Get (unregister) the stream handle on the indexed terminal of + * the process group + + @param process_group[in] process group object + @param terminal_index[in] index of the terminal + + Precondition: The process group must be stopped + + Postcondition: The stream handle shall be reset to zero, the buffer + state to BUFFER_NULL + + @return 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint32_t ia_css_process_group_detach_stream( + ia_css_process_group_t *process_group, + const unsigned int terminal_index); + +/* + * Sequencing resources + */ + +/*! Set a(n artificial) blocking resource (barrier) in + * the process group resource map + + @param process_group[in] process group object + @param barrier_index[in] index of the barrier + + Note: The barriers have to be set to force sequence between started + process groups + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_barrier( + ia_css_process_group_t *process_group, + const vied_nci_barrier_ID_t barrier_index); + +/*! Clear a previously set blocking resource (barrier) in + * the process group resource map + + @param process_group[in] process group object + @param barrier_index[in] index of the barrier + + Precondition: The barriers must have been set + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_clear_barrier( + ia_css_process_group_t *process_group, + const vied_nci_barrier_ID_t barrier_index); + +/*! Boolean test if the process group preconditions for start are satisfied + + @param process_group[in] process group object + + @return true if the process group can be started + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_can_process_group_start( + const ia_css_process_group_t *process_group); + +#endif /* __IA_CSS_PSYS_PROCESS_GROUP_HSYS_KERNEL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.hsys.user.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.hsys.user.h new file mode 100644 index 0000000000000..dfbcc8815c1ef --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.hsys.user.h @@ -0,0 +1,199 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_GROUP_HSYS_USER_H +#define __IA_CSS_PSYS_PROCESS_GROUP_HSYS_USER_H + +/*! \file */ + +/** @file ia_css_psys_process_group.hsys.user.h + * + * Define the methods on the process group object: Hsys user interface + */ + +#include /* ia_css_program_group_param_t */ + +#include +#include +#include + +#include "ia_css_psys_dynamic_storage_class.h" + +#include /* uint8_t */ + +/* + * Creation + */ + +/*! Compute the size of storage required for allocating the process group object + + @param manifest[in] program group manifest + @param param[in] program group parameters + + @return 0 on error + */ +extern size_t ia_css_sizeof_process_group( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Create (the storage for) the process group object + + @param process_grp_mem[in/out] raw memory for process group + @param manifest[in] program group manifest + @param param[in] program group parameters + + @return NULL on error + */ +extern ia_css_process_group_t *ia_css_process_group_create( + void *process_grp_mem, + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Destroy (the storage of) the process group object + + @param process_group[in] process group object + + @return NULL + */ +extern ia_css_process_group_t *ia_css_process_group_destroy( + ia_css_process_group_t *process_group); + +/*! Print the process group object to file/stream + + @param process_group[in] process group object + @param fid[out] file/stream handle + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_print( + const ia_css_process_group_t *process_group, + void *fid); + +/* + * Commands + */ + +/*! Perform the submit command on the process group + + @param process_group[in] process group object + + Note: Submit is an action of the h-Scheduler it makes the + process group eligible for the l-Scheduler + + Precondition: The external resources must be attached to + the process group + + @return < 0 on error + */ +extern int ia_css_process_group_submit( + ia_css_process_group_t *process_group); + +/*! Boolean test if the process group object type is valid + + @param process_group[in] process group object + @param manifest[in] program group manifest + @param param[in] program group parameters + + @return true if the process group is correct, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_process_group_valid( + const ia_css_process_group_t *process_group, + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Boolean test if the process group preconditions for submit are satisfied + + @param process_group[in] process group object + + @return true if the process group can be submitted + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_can_process_group_submit( + const ia_css_process_group_t *process_group); + +/*! Boolean test if the preconditions on process group and buffer set are + satisfied for enqueuing buffer set + + @param process_group[in] process group object + @param buffer_set[in] buffer set object + + @return true if the buffer set can be enqueued + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_can_enqueue_buffer_set( + const ia_css_process_group_t *process_group, + const ia_css_buffer_set_t *buffer_set); + +/*! Compute the cyclecount required for executing the process group object + + @param manifest[in] program group manifest + @param param[in] program group parameters + + @return 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint32_t ia_css_process_group_compute_cycle_count( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Compute the number of processes required for + * executing the process group object + + @param manifest[in] program group manifest + @param param[in] program group parameters + + @return 0 on error + */ +extern uint8_t ia_css_process_group_compute_process_count( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Compute the number of terminals required for + * executing the process group object + + @param manifest[in] program group manifest + @param param[in] program group parameters + + @return 0 on error + */ +extern uint8_t ia_css_process_group_compute_terminal_count( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Get private token as registered in the process group by the implementation + + @param process_group[in] process group object + + @return 0 on error + */ +extern uint64_t ia_css_process_group_get_private_token( + ia_css_process_group_t *process_group); + +/*! Set private token in the process group as needed by the implementation + + @param process_group[in] process group object + @param token[in] user token + + Note: The token value shall be non-zero. This token is private + to the implementation. This is in addition to the user token + + @return < 0 on error, 0 on success + */ +extern int ia_css_process_group_set_private_token( + ia_css_process_group_t *process_group, + const uint64_t token); + +#endif /* __IA_CSS_PSYS_PROCESS_GROUP_HSYS_USER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.psys.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.psys.h new file mode 100644 index 0000000000000..6ceccfc2f9bc3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.psys.h @@ -0,0 +1,60 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_GROUP_PSYS_H +#define __IA_CSS_PSYS_PROCESS_GROUP_PSYS_H + +/*! \file */ + +/** @file ia_css_psys_process_group.psys.h + * + * Define the methods on the process group object: Psys embedded interface + */ + +#include + +/* + * Dispatcher + */ + +/*! Perform the run command on the process group + + @param process_group[in] process group object + + Note: Run indicates that the process group will execute + + Precondition: The process group must be started or + suspended and the processes have acquired the necessary + internal resources + + @return < 0 on error + */ +extern int ia_css_process_group_run( + ia_css_process_group_t *process_group); + +/*! Perform the stop command on the process group + + @param process_group[in] process group object + + Note: Stop indicates that the process group has completed execution + + Postcondition: The external resoruces can now be detached + + @return < 0 on error + */ +extern int ia_css_process_group_stop( + ia_css_process_group_t *process_group); + + +#endif /* __IA_CSS_PSYS_PROCESS_GROUP_PSYS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group_cmd_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group_cmd_impl.h new file mode 100644 index 0000000000000..530f93ef6ce03 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group_cmd_impl.h @@ -0,0 +1,178 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_GROUP_CMD_IMPL_H +#define __IA_CSS_PSYS_PROCESS_GROUP_CMD_IMPL_H + +#include "type_support.h" +#include "ia_css_psys_process_group.h" +#include "ia_css_rbm_manifest_types.h" + +#define N_UINT64_IN_PROCESS_GROUP_STRUCT 2 +#define N_UINT32_IN_PROCESS_GROUP_STRUCT 5 +#define N_UINT16_IN_PROCESS_GROUP_STRUCT 5 +#define N_UINT8_IN_PROCESS_GROUP_STRUCT 7 +#define N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT 3 + +#define SIZE_OF_PROCESS_GROUP_STRUCT_BITS \ + (IA_CSS_RBM_BITS \ + + N_UINT64_IN_PROCESS_GROUP_STRUCT * IA_CSS_UINT64_T_BITS \ + + N_UINT32_IN_PROCESS_GROUP_STRUCT * IA_CSS_UINT32_T_BITS \ + + IA_CSS_PROGRAM_GROUP_ID_BITS \ + + IA_CSS_PROCESS_GROUP_STATE_BITS \ + + VIED_VADDRESS_BITS \ + + VIED_NCI_RESOURCE_BITMAP_BITS \ + + N_UINT16_IN_PROCESS_GROUP_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_UINT8_IN_PROCESS_GROUP_STRUCT * IA_CSS_UINT8_T_BITS \ + + N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT * IA_CSS_UINT8_T_BITS) + +struct ia_css_process_group_s { + /**< User (callback) token / user context reference, + * zero is an error value + */ + uint64_t token; + /**< private token / context reference, zero is an error value */ + uint64_t private_token; + /**< PG routing bitmap used to set connection between programs >*/ + ia_css_rbm_t routing_bitmap; + /**< Size of this structure */ + uint32_t size; + /**< The timestamp when PG load starts */ + uint32_t pg_load_start_ts; + /**< PG load time in cycles */ + uint32_t pg_load_cycles; + /**< PG init time in cycles */ + uint32_t pg_init_cycles; + /**< PG processing time in cycles */ + uint32_t pg_processing_cycles; + /**< Referral ID to program group FW */ + ia_css_program_group_ID_t ID; + /**< State of the process group FSM */ + ia_css_process_group_state_t state; + /**< Virtual address of process group in IPU */ + vied_vaddress_t ipu_virtual_address; + /**< Bitmap of the compute resources used by the process group */ + vied_nci_resource_bitmap_t resource_bitmap; + /**< Number of fragments offered on each terminal */ + uint16_t fragment_count; + /**< Current fragment of processing */ + uint16_t fragment_state; + /**< Watermark to control fragment processing */ + uint16_t fragment_limit; + /**< Array[process_count] of process addresses in this process group */ + uint16_t processes_offset; + /**< Array[terminal_count] of terminal addresses on this process group */ + uint16_t terminals_offset; + /**< Parameter dependent number of processes in this process group */ + uint8_t process_count; + /**< Parameter dependent number of terminals on this process group */ + uint8_t terminal_count; + /**< Parameter dependent number of independent subgraphs in + * this process group + */ + uint8_t subgraph_count; + /**< Process group protocol version */ + uint8_t protocol_version; + /**< Dedicated base queue id used for enqueueing payload buffer sets */ + uint8_t base_queue_id; + /**< Number of dedicated queues used */ + uint8_t num_queues; + /**< Mask the send_pg_done IRQ */ + uint8_t mask_irq; + /**< Padding for 64bit alignment */ + uint8_t padding[N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT]; +}; + +/*! Callback after process group is created. Implementations can provide + * suitable actions needed when process group is created. + + @param process_group[in] process group object + @param program_group_manifest[in] program group manifest + @param program_group_param[in] program group parameters + + @return 0 on success and non-zero on failure + */ +extern int ia_css_process_group_on_create( + ia_css_process_group_t *process_group, + const ia_css_program_group_manifest_t *program_group_manifest, + const ia_css_program_group_param_t *program_group_param); + +/*! Callback before process group is about to be destoyed. Any implementation + * specific cleanups can be done here. + + @param process_group[in] process group object + + @return 0 on success and non-zero on failure + */ +extern int ia_css_process_group_on_destroy( + ia_css_process_group_t *process_group); + +/* + * Command processor + */ + +/*! Execute a command locally or send it to be processed remotely + + @param process_group[in] process group object + @param cmd[in] command + + @return < 0 on error + */ +extern int ia_css_process_group_exec_cmd( + ia_css_process_group_t *process_group, + const ia_css_process_group_cmd_t cmd); + + +/*! Enqueue a buffer set corresponding to a persistent program group by + * sending a command to subsystem. + + @param process_group[in] process group object + @param buffer_set[in] buffer set + @param queue_offset[in] offset to be used from the queue id + specified in the process group object + (0 for first buffer set for frame, 1 + for late binding) + + @return < 0 on error + */ +extern int ia_css_enqueue_buffer_set( + ia_css_process_group_t *process_group, + ia_css_buffer_set_t *buffer_set, + unsigned int queue_offset); + +/*! Enqueue a parameter buffer set corresponding to a persistent program + * group by sending a command to subsystem. + + @param process_group[in] process group object + @param buffer_set[in] parameter buffer set + + @return < 0 on error + */ +extern int ia_css_enqueue_param_buffer_set( + ia_css_process_group_t *process_group, + ia_css_buffer_set_t *buffer_set); + +/*! Need to store the 'secure' mode for each PG for FW test app only + * + * @param process_group[in] process group object + * @param secure[in] parameter buffer set + * + * @return < 0 on error + */ +extern int ia_css_process_group_store( + ia_css_process_group_t *process_group, + bool secure); + + +#endif /* __IA_CSS_PSYS_PROCESS_GROUP_CMD_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_types.h new file mode 100644 index 0000000000000..4fb064dc00df6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_types.h @@ -0,0 +1,95 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_TYPES_H +#define __IA_CSS_PSYS_PROCESS_TYPES_H + +/*! \file */ + +/** @file ia_css_psys_process_types.h + * + * The types belonging to the terminal/process/process group dynamic module + */ + +#include +#include + +#include + +#define IA_CSS_PROCESS_INVALID_PROGRAM_IDX ((uint32_t)-1) + +/* private */ +typedef enum ia_css_process_group_cmd { + IA_CSS_PROCESS_GROUP_CMD_NOP = 0, + IA_CSS_PROCESS_GROUP_CMD_SUBMIT, + IA_CSS_PROCESS_GROUP_CMD_ATTACH, + IA_CSS_PROCESS_GROUP_CMD_DETACH, + IA_CSS_PROCESS_GROUP_CMD_START, + IA_CSS_PROCESS_GROUP_CMD_DISOWN, + IA_CSS_PROCESS_GROUP_CMD_RUN, + IA_CSS_PROCESS_GROUP_CMD_STOP, + IA_CSS_PROCESS_GROUP_CMD_SUSPEND, + IA_CSS_PROCESS_GROUP_CMD_RESUME, + IA_CSS_PROCESS_GROUP_CMD_ABORT, + IA_CSS_PROCESS_GROUP_CMD_RESET, + IA_CSS_N_PROCESS_GROUP_CMDS +} ia_css_process_group_cmd_t; + +/* private */ +#define IA_CSS_PROCESS_GROUP_STATE_BITS 32 +typedef enum ia_css_process_group_state { + IA_CSS_PROCESS_GROUP_ERROR = 0, + IA_CSS_PROCESS_GROUP_CREATED, + IA_CSS_PROCESS_GROUP_READY, + IA_CSS_PROCESS_GROUP_BLOCKED, + IA_CSS_PROCESS_GROUP_STARTED, + IA_CSS_PROCESS_GROUP_RUNNING, + IA_CSS_PROCESS_GROUP_STALLED, + IA_CSS_PROCESS_GROUP_STOPPED, + IA_CSS_N_PROCESS_GROUP_STATES +} ia_css_process_group_state_t; + +/* private */ +typedef enum ia_css_process_cmd { + IA_CSS_PROCESS_CMD_NOP = 0, + IA_CSS_PROCESS_CMD_ACQUIRE, + IA_CSS_PROCESS_CMD_RELEASE, + IA_CSS_PROCESS_CMD_START, + IA_CSS_PROCESS_CMD_LOAD, + IA_CSS_PROCESS_CMD_STOP, + IA_CSS_PROCESS_CMD_SUSPEND, + IA_CSS_PROCESS_CMD_RESUME, + IA_CSS_N_PROCESS_CMDS +} ia_css_process_cmd_t; + +/* private */ +#define IA_CSS_PROCESS_STATE_BITS 32 +typedef enum ia_css_process_state { + IA_CSS_PROCESS_ERROR = 0, + IA_CSS_PROCESS_CREATED, + IA_CSS_PROCESS_READY, + IA_CSS_PROCESS_STARTED, + IA_CSS_PROCESS_RUNNING, + IA_CSS_PROCESS_STOPPED, + IA_CSS_PROCESS_SUSPENDED, + IA_CSS_N_PROCESS_STATES +} ia_css_process_state_t; + +/* public */ +typedef struct ia_css_process_group_s ia_css_process_group_t; +typedef struct ia_css_process_s ia_css_process_t; + +typedef struct ia_css_data_terminal_s ia_css_data_terminal_t; + +#endif /* __IA_CSS_PSYS_PROCESS_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_terminal.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_terminal.h new file mode 100644 index 0000000000000..abf398299d166 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_terminal.h @@ -0,0 +1,316 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TERMINAL_H +#define __IA_CSS_PSYS_TERMINAL_H + +/*! \file */ + +/** @file ia_css_psys_terminal.h + * + * Define the methods on the terminal object that are not part of + * a single interface + */ + +#include /* ia_css_frame_t */ +#include /* ia_css_program_group_param_t */ + +#include +#include + +#include /* bool */ +#include /* FILE */ +#include "ia_css_psys_dynamic_storage_class.h" +#include "ia_css_terminal.h" +#include "ia_css_terminal_manifest_base_types.h" + +/* + * Creation + */ +#include + +/*! Boolean test if the terminal object type is input + + @param terminal[in] terminal object + + @return true if the terminal is input, false otherwise or on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_terminal_input( + const ia_css_terminal_t *terminal); + +/*! Get the stored size of the terminal object + + @param terminal[in] terminal object + + @return size, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +size_t ia_css_terminal_get_size( + const ia_css_terminal_t *terminal); + +/*! Get the type of the terminal object + + @param terminal[in] terminal object + + @return the type of the terminal, limit value on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_terminal_type_t ia_css_terminal_get_type( + const ia_css_terminal_t *terminal); + +/*! Set the type of the terminal object + + @param terminal[in] terminal object + @param terminal_type[in] type of the terminal + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_terminal_set_type( + ia_css_terminal_t *terminal, + const ia_css_terminal_type_t terminal_type); + +/*! Get the index of the terminal manifest object + + @param terminal[in] terminal object + + @return the index of the terminal manifest object, limit value on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint16_t ia_css_terminal_get_terminal_manifest_index( + const ia_css_terminal_t *terminal); + +/*! Set the index of the terminal manifest object + + @param terminal[in] terminal object + @param tm_index[in] terminal manifest index + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_terminal_set_terminal_manifest_index( + ia_css_terminal_t *terminal, + const uint16_t tm_index); + +/*! Get id of the terminal object + + @param terminal[in] terminal object + + @return id of terminal + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_terminal_ID_t ia_css_terminal_get_ID( + const ia_css_terminal_t *terminal); + +/*! Get kernel id of the data terminal object + + @param dterminal[in] data terminal object + + @return kernel id of terminal + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_data_terminal_get_kernel_id( + const ia_css_data_terminal_t *dterminal); + +/*! Get the connection type from the terminal object + + @param terminal[in] terminal object + + @return buffer type, limit value on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_connection_type_t ia_css_data_terminal_get_connection_type( + const ia_css_data_terminal_t *dterminal); + +/*! Set the connection type of the terminal object + + @param terminal[in] terminal object + @param connection_type[in] connection type + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_data_terminal_set_connection_type( + ia_css_data_terminal_t *dterminal, + const ia_css_connection_type_t connection_type); + +/*! Get link id of the data terminal object + + @param dterminal[in] data terminal object + + @return link id of terminal + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_data_terminal_get_link_id( + const ia_css_data_terminal_t *dterminal); + + +/*! Set link id of the terminal object + + @param terminal[in] data terminal object + @param link_id[in] synchronization link id + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_data_terminal_set_link_id( + ia_css_data_terminal_t *dterminal, + const uint8_t link_id); + +/*! Get the (pointer to) the process group parent of the terminal object + + @param terminal[in] terminal object + + @return the pointer to the parent, NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_process_group_t *ia_css_terminal_get_parent( + const ia_css_terminal_t *terminal); + +/*! Set the (pointer to) the process group parent of the terminal object + + @param terminal[in] terminal object + @param parent[in] (pointer to the) process group parent object + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_terminal_set_parent( + ia_css_terminal_t *terminal, + ia_css_process_group_t *parent); + +/*! Boolean test if the terminal object type is valid + + @param terminal[in] process terminal object + @param terminal_manifest[in] program terminal manifest + + @return true if the process terminal object is correct, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_terminal_valid( + const ia_css_terminal_t *terminal, + const ia_css_terminal_manifest_t *terminal_manifest); + +/* ================= Program Control Init Terminal - START ================= */ + +/*! + * Gets the program init terminal descripor size + * @param manifest[in] program control init terminal manifest + * @return size, error if < 0. + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +unsigned int +ia_css_program_control_init_terminal_get_descriptor_size( + const ia_css_program_control_init_terminal_manifest_t *manifest); + +/*! + * Initialize program control init terminal + * @param nof_fragments[in] Number of fragments + * @param terminal[in] program control init terminal + * @param manifest[in] program control init terminal manifest + * @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int +ia_css_program_control_init_terminal_init( + ia_css_program_control_init_terminal_t *terminal, + const ia_css_program_control_init_terminal_manifest_t *manifest); + +/*! + * Get a program desc for a program control init terminal + * @param terminal[in] program control init terminal + * @param manifest[in] program control init terminal manifest + * @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_program_control_init_program_desc_t * +ia_css_program_control_init_terminal_get_program_desc( + const ia_css_program_control_init_terminal_t *prog_ctrl_init_terminal, + const unsigned int program_index +); + +/*! + * Pretty prints the program control init termnial + * @param terminal[in] program control init terminal + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +void ia_css_program_control_init_terminal_print( + const ia_css_program_control_init_terminal_t *terminal); + +/*! + * Gets a load section desc for a program desc + * of a program control init terminal + * @param program_desc[in] program control init terminal program desc + * @param load_section_index[in] section index + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_program_control_init_load_section_desc_t * +ia_css_program_control_init_terminal_get_load_section_desc( + const ia_css_program_control_init_program_desc_t *program_desc, + const unsigned int load_section_index +); + +/*! + * Gets process_id from program desc + * of a program control init terminal + * @param program_desc[in] program control init terminal program desc + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_process_id_t ia_css_program_control_init_terminal_get_process_id( + const ia_css_program_control_init_program_desc_t *program_desc); + +/*! + * Set control info of program desc + * of a program control init terminal + * @param program_desc[in] program control init terminal program desc + * @param process_id unique process id used to identify the process + * among all active process + * @param num_done_events number of events required to close the process + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +void ia_css_program_control_init_terminal_set_control_info( + ia_css_program_control_init_program_desc_t *program_desc, + ia_css_process_id_t process_id, + uint8_t num_done_events); + +/*! + * Gets num_done_events value from program desc + * of a program control init terminal + * @param program_desc[in] program control init terminal program desc + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_program_control_init_terminal_get_num_done_events( + const ia_css_program_control_init_program_desc_t *program_desc); + +/*! + * Gets a connect section desc for a program desc + * of a program control init terminal + * @param program_desc[in] program control init terminal program desc + * @param connect_section_index[in] section index + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_program_control_init_connect_section_desc_t * +ia_css_program_control_init_terminal_get_connect_section_desc( + const ia_css_program_control_init_program_desc_t *program_desc, + const unsigned int connect_section_index +); + +/* ================= Program Control Init Terminal - END ================= */ + +#ifdef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_terminal_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +#endif /* __IA_CSS_PSYS_TERMINAL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_terminal.hsys.user.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_terminal.hsys.user.h new file mode 100644 index 0000000000000..b8aa08c19754a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_terminal.hsys.user.h @@ -0,0 +1,255 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TERMINAL_HSYS_USER_H +#define __IA_CSS_PSYS_TERMINAL_HSYS_USER_H + +/*! \file */ + +/** @file ia_css_psys_terminal.hsys.user.h + * + * Define the methods on the terminal object: Hsys user interface + */ + +#include /* ia_css_frame_t */ +#include /* ia_css_program_group_param_t */ + +#include +#include + +#include /* bool */ +#include "ia_css_psys_dynamic_storage_class.h" +#include "ia_css_terminal.h" +#include "ia_css_terminal_manifest.h" +#include "ia_css_kernel_bitmap.h" + +/* + * Creation + */ + +/* + * This source file is created with the intention of sharing and + * compiled for host and firmware. Since there is no native 64bit + * data type support for firmware this wouldn't compile for SP + * tile. The part of the file that is not compilable are marked + * with the following __VIED_CELL marker and this comment. Once we + * come up with a solution to address this issue this will be + * removed. + */ +#if !defined(__VIED_CELL) +/*! Compute the size of storage required for allocating the terminal object + + @param manifest[in] terminal manifest + @param param[in] program group parameters + + @return 0 on error + */ +extern size_t ia_css_sizeof_terminal( + const ia_css_terminal_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Create the terminal object + + @param raw_mem[in] pre allocated memory + @param manifest[in] terminal manifest + @param terminal_param[in] terminal parameter + @param enable_bitmap program group enable bitmap + + @return NULL on error + */ +extern ia_css_terminal_t *ia_css_terminal_create( + void *raw_mem, + const ia_css_terminal_manifest_t *manifest, + const ia_css_terminal_param_t *terminal_param, + ia_css_kernel_bitmap_t enable_bitmap); + +/*! Destroy (the storage of) the process object + + @param terminal[in] terminal object + + @return NULL + */ +extern ia_css_terminal_t *ia_css_terminal_destroy( + ia_css_terminal_t *terminal); +#endif /* !defined(__VIED_CELL) */ + +/*! Print the terminal object to file/stream + + @param terminal[in] terminal object + @param fid[out] file/stream handle + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_terminal_print( + const ia_css_terminal_t *terminal, + void *fid); + +/*! Get the (pointer to) the frame object in the terminal object + + @param terminal[in] terminal object + + @return the pointer to the frame, NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_frame_t *ia_css_data_terminal_get_frame( + const ia_css_data_terminal_t *terminal); + +/*! Get the (pointer to) the frame descriptor object in the terminal object + + @param terminal[in] terminal object + + @return the pointer to the frame descriptor, NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_frame_descriptor_t *ia_css_data_terminal_get_frame_descriptor( + const ia_css_data_terminal_t *dterminal); + +/*! Get the (pointer to) the fragment descriptor object in the terminal object + + @param terminal[in] terminal object + +@return the pointer to the fragment descriptor, NULL on error +*/ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_fragment_descriptor_t + *ia_css_data_terminal_get_fragment_descriptor( + const ia_css_data_terminal_t *dterminal, + const unsigned int fragment_index); + +/*! Get the number of fragments on the terminal + + @param terminal[in] terminal object + + @return the fragment count, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint16_t ia_css_data_terminal_get_fragment_count( + const ia_css_data_terminal_t *dterminal); + +/*! Get the number of section on the (param)terminal + @param manifest[in] terminal manifest + @param terminal_param[in] terminal parameter + + @return the section count, 0 on error + */ +extern uint16_t ia_css_param_terminal_compute_section_count( + const ia_css_terminal_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Get the number of planes on the (data)terminal + @param manifest[in] terminal manifest + @param terminal_param[in] terminal parameter + + @return the plane count, 1(default) on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_data_terminal_compute_plane_count( + const ia_css_terminal_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! check if given terminal is parameter terminal. + + @param terminal[in] (base)terminal object + + @return true on success, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_terminal_parameter_terminal( + const ia_css_terminal_t *terminal); + +/*! check if given terminal is program terminal. + + @program terminal[in] (base)terminal object + + @return true on success, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_terminal_program_terminal( + const ia_css_terminal_t *terminal); + +/*! check if given terminal is program control init terminal. + + @program control init terminal[in] (base)terminal object + + @return true on success, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_terminal_program_control_init_terminal( + const ia_css_terminal_t *terminal); + +/*! check if given terminal is spatial parameter terminal. + + @spatial terminal[in] (base)terminal object + + @return true on success, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_terminal_spatial_parameter_terminal( + const ia_css_terminal_t *terminal); + +/*! check if given terminal is data terminal. + + @param terminal[in] (base)terminal object + + @return true on success, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_terminal_data_terminal( + const ia_css_terminal_t *terminal); + +/*! obtain buffer out of terminal(both data & param terminals can call this) + + @param terminal[in] (base)terminal object of either data or param terminal. + + @return vied address of buffer stored in terminal + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_vaddress_t ia_css_terminal_get_buffer( + const ia_css_terminal_t *terminal); + +/*!store a buffer in the terminal. + + @param terminal[in] (base)terminal object of either data or param terminal. + @param buffer[in] buffer in vied (hrt address) space. + + @return 0 on success + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_terminal_set_buffer(ia_css_terminal_t *terminal, + vied_vaddress_t buffer); + +/*! Obtain terminal buffer index out of terminal object + + @param terminal[in] (base)terminal object of either data or param terminal. + + @return terminal buffer index stored in terminal object on success, -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_get_terminal_index( + const ia_css_terminal_t *terminal); + +/*! Store a terminal buffer index in the terminal object + + @param terminal[in] (base)terminal object of either data or param terminal. + @param terminal_index[in] terminal buffer index + + @return 0 on success + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_set_terminal_index( + ia_css_terminal_t *terminal, + unsigned int terminal_index); + +#endif /* __IA_CSS_PSYS_TERMINAL_HSYS_USER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_buffer_set.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_buffer_set.c new file mode 100644 index 0000000000000..82d53831f9a98 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_buffer_set.c @@ -0,0 +1,111 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include "assert_support.h" +#include "ia_css_psys_dynamic_trace.h" +#include "ia_css_psys_buffer_set.h" +#include "ia_css_psys_process_group.h" + +/* + * Functions to possibly inline + */ +#ifndef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_buffer_set_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +STORAGE_CLASS_INLINE void __buffer_set_dummy_check_alignment(void) +{ + COMPILATION_ERROR_IF(SIZE_OF_BUFFER_SET != + CHAR_BIT * sizeof(ia_css_buffer_set_t)); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_buffer_set_t) % sizeof(uint64_t)); +} + +/* + * Functions not to inline + */ + +/* The below functions are not to be compiled for firmware */ +#if !defined(__HIVECC) + +ia_css_buffer_set_t *ia_css_buffer_set_create( + void *buffer_set_mem, + const ia_css_process_group_t *process_group, + const unsigned int frame_counter) +{ + ia_css_buffer_set_t *buffer_set = NULL; + unsigned int i; + int ret = -1; + + verifexit(buffer_set_mem != NULL); + verifexit(process_group != NULL); + + buffer_set = (ia_css_buffer_set_t *)buffer_set_mem; + + /* + * Set base struct members + */ + buffer_set->ipu_virtual_address = VIED_NULL; + ia_css_process_group_get_ipu_vaddress(process_group, + &buffer_set->process_group_handle); + buffer_set->frame_counter = frame_counter; + buffer_set->terminal_count = + ia_css_process_group_get_terminal_count(process_group); + + /* + * Initialize adjacent buffer addresses + */ + for (i = 0; i < buffer_set->terminal_count; i++) { + vied_vaddress_t *buffer = + (vied_vaddress_t *)( + (char *)buffer_set + + sizeof(ia_css_buffer_set_t) + + sizeof(vied_vaddress_t) * i); + + *buffer = VIED_NULL; + } + ret = 0; + +EXIT: + if (ret != 0) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_create failed\n"); + } + return buffer_set; +} + +size_t ia_css_sizeof_buffer_set( + const ia_css_process_group_t *process_group) +{ + size_t size = 0; + + verifexit(process_group != NULL); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_sizeof_buffer_set(): enter:\n"); + + size = sizeof(ia_css_buffer_set_t) + + ia_css_process_group_get_terminal_count(process_group) * + sizeof(vied_vaddress_t); + +EXIT: + if (size == 0) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_sizeof_buffer_set failed\n"); + } + return size; +} + +#endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_buffer_set_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_buffer_set_impl.h new file mode 100644 index 0000000000000..0399d76f33315 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_buffer_set_impl.h @@ -0,0 +1,241 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IA_CSS_PSYS_BUFFER_SET_IMPL_H +#define __IA_CSS_PSYS_BUFFER_SET_IMPL_H + +#include "error_support.h" +#include "ia_css_psys_dynamic_trace.h" +#include "vied_nci_psys_system_global.h" +#include "ia_css_psys_terminal.hsys.user.h" + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_buffer_set_set_buffer( + ia_css_buffer_set_t *buffer_set, + const unsigned int terminal_index, + const vied_vaddress_t buffer) +{ + DECLARE_ERRVAL + vied_vaddress_t *buffer_ptr; + int ret = -1; + + verifexitval(buffer_set != NULL, EFAULT); + verifexitval(terminal_index < buffer_set->terminal_count, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_set_buffer(): enter:\n"); + + /* + * Set address in buffer set object + */ + buffer_ptr = + (vied_vaddress_t *)( + (char *)buffer_set + + sizeof(ia_css_buffer_set_t) + + terminal_index * sizeof(vied_vaddress_t)); + *buffer_ptr = buffer; + + ret = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_set_buffer: invalid argument\n"); + } + return ret; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_vaddress_t ia_css_buffer_set_get_buffer( + const ia_css_buffer_set_t *buffer_set, + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + vied_vaddress_t buffer = VIED_NULL; + vied_vaddress_t *buffer_ptr; + int terminal_index; + + verifexitval(buffer_set != NULL, EFAULT); + verifexitval(terminal != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_get_buffer(): enter:\n"); + + /* + * Retrieve terminal index from terminal object + */ + terminal_index = ia_css_terminal_get_terminal_index(terminal); + verifexitval(terminal_index >= 0, EFAULT); + verifexitval(terminal_index < buffer_set->terminal_count, EFAULT); + + /* + * Retrieve address from buffer set object + */ + buffer_ptr = + (vied_vaddress_t *)( + (char *)buffer_set + + sizeof(ia_css_buffer_set_t) + + terminal_index * sizeof(vied_vaddress_t)); + buffer = *buffer_ptr; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_get_buffer: invalid argument\n"); + } + return buffer; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_buffer_set_set_ipu_address( + ia_css_buffer_set_t *buffer_set, + const vied_vaddress_t ipu_vaddress) +{ + DECLARE_ERRVAL + int ret = -1; + + verifexitval(buffer_set != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_set_ipu_address(): enter:\n"); + + buffer_set->ipu_virtual_address = ipu_vaddress; + + ret = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_set_ipu_address invalid argument\n"); + } + return ret; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_vaddress_t ia_css_buffer_set_get_ipu_address( + const ia_css_buffer_set_t *buffer_set) +{ + DECLARE_ERRVAL + vied_vaddress_t ipu_virtual_address = VIED_NULL; + + verifexitval(buffer_set != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_get_ipu_address(): enter:\n"); + + ipu_virtual_address = buffer_set->ipu_virtual_address; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_get_ipu_address: invalid argument\n"); + } + return ipu_virtual_address; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_buffer_set_set_process_group_handle( + ia_css_buffer_set_t *buffer_set, + const vied_vaddress_t process_group_handle) +{ + DECLARE_ERRVAL + int ret = -1; + + verifexitval(buffer_set != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_set_process_group_context(): enter:\n"); + + buffer_set->process_group_handle = process_group_handle; + + ret = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_set_process_group_context invalid argument\n"); + } + return ret; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_vaddress_t ia_css_buffer_set_get_process_group_handle( + const ia_css_buffer_set_t *buffer_set) +{ + DECLARE_ERRVAL + vied_vaddress_t process_group_handle = VIED_NULL; + + verifexitval(buffer_set != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_get_process_group_handle(): enter:\n"); + + process_group_handle = buffer_set->process_group_handle; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_get_process_group_handle: invalid argument\n"); + } + return process_group_handle; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_buffer_set_set_token( + ia_css_buffer_set_t *buffer_set, + const uint64_t token) +{ + DECLARE_ERRVAL + int ret = -1; + + verifexitval(buffer_set != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_set_token(): enter:\n"); + + buffer_set->token = token; + + ret = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_set_token invalid argument\n"); + } + return ret; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint64_t ia_css_buffer_set_get_token( + const ia_css_buffer_set_t *buffer_set) +{ + DECLARE_ERRVAL + uint64_t token = 0; + + verifexitval(buffer_set != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_get_token(): enter:\n"); + + token = buffer_set->token; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_get_token: invalid argument\n"); + } + return token; +} + +#endif /* __IA_CSS_PSYS_BUFFER_SET_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process.c new file mode 100644 index 0000000000000..cca0fa73fb374 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process.c @@ -0,0 +1,1147 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_process.h" +#include "ia_css_psys_dynamic_storage_class.h" +#include "ia_css_psys_process_private_types.h" +#include /* for NOT_USED */ + +/* + * Functions to possibly inline + */ + +#ifndef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_process_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +/* + * Functions not to inline + */ + +/* This source file is created with the intention of sharing and + * compiled for host and firmware. Since there is no native 64bit + * data type support for firmware this wouldn't compile for SP + * tile. The part of the file that is not compilable are marked + * with the following __HIVECC marker and this comment. Once we + * come up with a solution to address this issue this will be + * removed. + */ +#if !defined(__HIVECC) +size_t ia_css_sizeof_process( + const ia_css_program_manifest_t *manifest, + const ia_css_program_param_t *param) +{ + size_t size = 0, tmp_size; + + uint8_t program_dependency_count; + uint8_t terminal_dependency_count; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_sizeof_process(): enter:\n"); + + COMPILATION_ERROR_IF( + SIZE_OF_PROCESS_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_process_t))); + + COMPILATION_ERROR_IF(0 != sizeof(ia_css_process_t)%sizeof(uint64_t)); + + verifexit(manifest != NULL); + verifexit(param != NULL); + + size += sizeof(ia_css_process_t); + + program_dependency_count = + ia_css_program_manifest_get_program_dependency_count(manifest); + terminal_dependency_count = + ia_css_program_manifest_get_terminal_dependency_count(manifest); + + tmp_size = program_dependency_count*sizeof(vied_nci_resource_id_t); + size += tot_bytes_for_pow2_align(sizeof(uint64_t), tmp_size); + tmp_size = terminal_dependency_count*sizeof(uint8_t); + size += tot_bytes_for_pow2_align(sizeof(uint64_t), tmp_size); + +EXIT: + if (NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_sizeof_process invalid argument\n"); + } + return size; +} + +ia_css_process_t *ia_css_process_create( + void *raw_mem, + const ia_css_program_manifest_t *manifest, + const ia_css_program_param_t *param, + const uint32_t program_idx) +{ + size_t tmp_size; + int retval = -1; + ia_css_process_t *process = NULL; + char *process_raw_ptr = (char *) raw_mem; + + /* size_t size = ia_css_sizeof_process(manifest, param); */ + uint8_t program_dependency_count; + uint8_t terminal_dependency_count; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_create(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(param != NULL); + verifexit(process_raw_ptr != NULL); + + process = (ia_css_process_t *) process_raw_ptr; + verifexit(process != NULL); + + process->kernel_bitmap = + ia_css_program_manifest_get_kernel_bitmap(manifest); + process->state = IA_CSS_PROCESS_CREATED; + + program_dependency_count = + ia_css_program_manifest_get_program_dependency_count(manifest); + terminal_dependency_count = + ia_css_program_manifest_get_terminal_dependency_count(manifest); + + /* A process requires at least one input or output */ + verifexit((program_dependency_count + + terminal_dependency_count) != 0); + + process_raw_ptr += sizeof(ia_css_process_t); + if (program_dependency_count != 0) { + process->cell_dependencies_offset = + (uint16_t) (process_raw_ptr - (char *)process); + tmp_size = + program_dependency_count * sizeof(vied_nci_resource_id_t); + process_raw_ptr += + tot_bytes_for_pow2_align(sizeof(uint64_t), tmp_size); + } else { + process->cell_dependencies_offset = 0; + } + + if (terminal_dependency_count != 0) { + process->terminal_dependencies_offset = + (uint16_t) (process_raw_ptr - (char *)process); + } + + process->size = (uint32_t)ia_css_sizeof_process(manifest, param); + + process->ID = ia_css_program_manifest_get_program_ID(manifest); + verifexit(process->ID != 0); + process->program_idx = program_idx; + + process->cell_dependency_count = program_dependency_count; + process->terminal_dependency_count = terminal_dependency_count; + + process->parent_offset = 0; + + verifexit(ia_css_process_clear_all(process) == 0); + + process->state = IA_CSS_PROCESS_READY; + retval = 0; + + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_create(): Created successfully process %p ID 0x%x\n", + process, process->ID); + +EXIT: + if (NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_create invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_create failed (%i)\n", retval); + process = ia_css_process_destroy(process); + } + return process; +} + +ia_css_process_t *ia_css_process_destroy( + ia_css_process_t *process) +{ + + return process; +} +#endif + +int ia_css_process_set_cell( + ia_css_process_t *process, + const vied_nci_cell_ID_t cell_id) +{ + int retval = -1; + vied_nci_resource_bitmap_t bit_mask; + vied_nci_resource_bitmap_t resource_bitmap; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_cell(): enter:\n"); + + verifexit(process != NULL); + + parent = ia_css_process_get_parent(process); + + verifexit(parent != NULL); + + parent_state = ia_css_process_group_get_state(parent); + state = ia_css_process_get_state(process); + +/* Some programs are mapped on a fixed cell, + * when the process group is created + */ + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED) || + (parent_state == IA_CSS_PROCESS_GROUP_CREATED) || + /* If the process group has already been created, but no VP cell + * has been assigned to this process (i.e. not fixed in + * manifest), then we need to set the cell of this process + * while its parent state is READY (the ready state is set at + * the end of ia_css_process_group_create) + */ + (parent_state == IA_CSS_PROCESS_GROUP_READY))); + verifexit(state == IA_CSS_PROCESS_READY); + +/* Some programs are mapped on a fixed cell, thus check is not secure, + * but it will detect a preset, the process manager will do the secure check + */ + verifexit(ia_css_process_get_cell(process) == + VIED_NCI_N_CELL_ID); + + bit_mask = vied_nci_cell_bit_mask(cell_id); + resource_bitmap = ia_css_process_group_get_resource_bitmap(parent); + + verifexit(bit_mask != 0); + verifexit(vied_nci_is_bitmap_clear(bit_mask, resource_bitmap)); + + ia_css_process_cells_clear(process); + ia_css_process_cells_set_cell(process, 0, cell_id); + + resource_bitmap = vied_nci_bitmap_set(resource_bitmap, bit_mask); + + retval = ia_css_process_group_set_resource_bitmap( + parent, resource_bitmap); +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_set_cell invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_cell failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_clear_cell( + ia_css_process_t *process) +{ + int retval = -1; + vied_nci_cell_ID_t cell_id; + ia_css_process_group_t *parent; + vied_nci_resource_bitmap_t resource_bitmap; + vied_nci_resource_bitmap_t bit_mask; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_clear_cell(): enter:\n"); + verifexit(process != NULL); + + cell_id = ia_css_process_get_cell(process); + parent = ia_css_process_get_parent(process); + + verifexit(parent != NULL); + + parent_state = ia_css_process_group_get_state(parent); + state = ia_css_process_get_state(process); + + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) + || (parent_state == IA_CSS_PROCESS_GROUP_STARTED))); + verifexit(state == IA_CSS_PROCESS_READY); + + bit_mask = vied_nci_cell_bit_mask(cell_id); + resource_bitmap = ia_css_process_group_get_resource_bitmap(parent); + + verifexit(bit_mask != 0); + verifexit(vied_nci_is_bitmap_set(bit_mask, resource_bitmap)); + + ia_css_process_cells_clear(process); + + resource_bitmap = vied_nci_bitmap_clear(resource_bitmap, bit_mask); + + retval = ia_css_process_group_set_resource_bitmap( + parent, resource_bitmap); +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_clear_cell invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_clear_cell failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_set_int_mem( + ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t offset) +{ + int retval = -1; + ia_css_process_group_t *parent; + vied_nci_cell_ID_t cell_id; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_int_mem(): enter:\n"); + + verifexit(process != NULL); + verifexit(mem_type_id < VIED_NCI_N_MEM_TYPE_ID); + + parent = ia_css_process_get_parent(process); + cell_id = ia_css_process_get_cell(process); + + parent_state = ia_css_process_group_get_state(parent); + state = ia_css_process_get_state(process); + + /* TODO : separate process group start and run from + * process_group_exec_cmd() + */ + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED) || + (parent_state == IA_CSS_PROCESS_GROUP_RUNNING))); + verifexit(state == IA_CSS_PROCESS_READY); + + if (vied_nci_is_cell_mem_of_type(cell_id, mem_type_id, mem_type_id)) { + vied_nci_mem_ID_t mem_id = + vied_nci_cell_get_mem(cell_id, mem_type_id); + + process->int_mem_id[mem_type_id] = mem_id; + process->int_mem_offset[mem_type_id] = offset; + retval = 0; + } +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_int_mem failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_clear_int_mem( + ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id) +{ + int retval = -1; + uint16_t mem_index; + ia_css_process_group_t *parent; + vied_nci_cell_ID_t cell_id; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_clear_int_mem(): enter:\n"); + + verifexit(process != NULL); + verifexit(mem_type_id < VIED_NCI_N_MEM_TYPE_ID); + + parent = ia_css_process_get_parent(process); + cell_id = ia_css_process_get_cell(process); + + /* We should have a check on NULL != parent but it parent is NULL + * ia_css_process_group_get_state will return + * IA_CSS_N_PROCESS_GROUP_STATES so it will be filtered anyway later. + */ + + /* verifexit(parent != NULL); */ + + parent_state = ia_css_process_group_get_state(parent); + state = ia_css_process_get_state(process); + + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) + || (parent_state == IA_CSS_PROCESS_GROUP_STARTED))); + verifexit(state == IA_CSS_PROCESS_READY); + +/* We could just clear the field, but lets check the state for + * consistency first + */ + for (mem_index = 0; mem_index < (int)VIED_NCI_N_MEM_TYPE_ID; + mem_index++) { + if (vied_nci_is_cell_mem_of_type( + cell_id, mem_index, mem_type_id)) { + vied_nci_mem_ID_t mem_id = + vied_nci_cell_get_mem(cell_id, mem_index); + int mem_of_type; + + mem_of_type = + vied_nci_is_mem_of_type(mem_id, mem_type_id); + + assert(mem_of_type); + assert((process->int_mem_id[mem_type_id] == mem_id) || + (process->int_mem_id[mem_type_id] == + VIED_NCI_N_MEM_ID)); + process->int_mem_id[mem_type_id] = VIED_NCI_N_MEM_ID; + process->int_mem_offset[mem_type_id] = + IA_CSS_PROCESS_INVALID_OFFSET; + retval = 0; + } + } + +EXIT: + if (NULL == process || mem_type_id >= VIED_NCI_N_MEM_TYPE_ID) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_clear_int_mem invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_clear_int_mem failed (%i)\n", retval); + } +return retval; +} + +int ia_css_process_set_ext_mem( + ia_css_process_t *process, + const vied_nci_mem_ID_t mem_id, + const vied_nci_resource_size_t offset) +{ + int retval = -1; + ia_css_process_group_t *parent; + vied_nci_cell_ID_t cell_id; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + vied_nci_mem_type_ID_t mem_type_id; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_ext_mem(): enter:\n"); + + verifexit(process != NULL); + + parent = ia_css_process_get_parent(process); + cell_id = ia_css_process_get_cell(process); + + /* We should have a check on NULL != parent but it parent is NULL + * ia_css_process_group_get_state will return + * IA_CSS_N_PROCESS_GROUP_STATES so it will be filtered anyway later. + */ + + /* verifexit(parent != NULL); */ + + parent_state = ia_css_process_group_get_state(parent); + state = ia_css_process_get_state(process); + + /* TODO : separate process group start and run from + * process_group_exec_cmd() + */ + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED) || + (parent_state == IA_CSS_PROCESS_GROUP_RUNNING))); + verifexit(state == IA_CSS_PROCESS_READY); + + /* Check that the memory actually exists, "vied_nci_has_cell_mem_of_id()" + * will return false on error + */ + + mem_type_id = vied_nci_mem_get_type(mem_id); + if (((!vied_nci_has_cell_mem_of_id(cell_id, mem_id) && + (mem_type_id != VIED_NCI_PMEM_TYPE_ID)) + || vied_nci_mem_is_ext_type(mem_type_id)) && + (mem_id < VIED_NCI_N_MEM_ID)) { + + verifexit(mem_type_id < VIED_NCI_N_DATA_MEM_TYPE_ID); + process->ext_mem_id[mem_type_id] = mem_id; + process->ext_mem_offset[mem_type_id] = offset; + retval = 0; + } + +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_set_ext_mem invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_ext_mem failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_clear_ext_mem( + ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id) +{ + int retval = -1; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_clear_ext_mem(): enter:\n"); + + verifexit(process != NULL); + verifexit(mem_type_id < VIED_NCI_N_DATA_MEM_TYPE_ID); + + parent = ia_css_process_get_parent(process); + state = ia_css_process_get_state(process); + + verifexit(parent != NULL); + verifexit(state == IA_CSS_PROCESS_READY); + + parent_state = ia_css_process_group_get_state(parent); + + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED))); + + process->ext_mem_id[mem_type_id] = VIED_NCI_N_MEM_ID; + process->ext_mem_offset[mem_type_id] = IA_CSS_PROCESS_INVALID_OFFSET; + + retval = 0; +EXIT: + if (NULL == process || mem_type_id >= VIED_NCI_N_DATA_MEM_TYPE_ID) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_clear_ext_mem invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_clear_ext_mem failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_set_cells_bitmap( + ia_css_process_t *process, + const vied_nci_resource_bitmap_t bitmap) +{ + int retval = -1; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + int array_index = 0; + int bit_index; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_cells_bitmap(): enter:\n"); + + verifexit(process != NULL); + parent = ia_css_process_get_parent(process); + state = ia_css_process_get_state(process); + + parent_state = ia_css_process_group_get_state(parent); + + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED) || + (parent_state == IA_CSS_PROCESS_GROUP_CREATED) || + (parent_state == IA_CSS_PROCESS_GROUP_READY))); + verifexit(state == IA_CSS_PROCESS_READY); + + for (bit_index = 0; bit_index < VIED_NCI_N_CELL_ID; bit_index++) { + if (vied_nci_is_bit_set_in_bitmap(bitmap, bit_index)) { + verifexit(array_index < IA_CSS_PROCESS_MAX_CELLS); + ia_css_process_cells_set_cell(process, + array_index, (vied_nci_cell_ID_t)bit_index); + array_index++; + } + } + for (; array_index < IA_CSS_PROCESS_MAX_CELLS; array_index++) { + ia_css_process_cells_set_cell(process, + array_index, VIED_NCI_N_CELL_ID); + } + + retval = 0; +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_set_cells_bitmap invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_cells_bitmap failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_set_dev_chn( + ia_css_process_t *process, + const vied_nci_dev_chn_ID_t dev_chn_id, + const vied_nci_resource_size_t offset) +{ + int retval = -1; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_dev_chn(): enter:\n"); + + verifexit(process != NULL); + verifexit(dev_chn_id <= VIED_NCI_N_DEV_CHN_ID); + + parent = ia_css_process_get_parent(process); + state = ia_css_process_get_state(process); + + parent_state = ia_css_process_group_get_state(parent); + + /* TODO : separate process group start and run from + * process_group_exec_cmd() + */ + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED) || + (parent_state == IA_CSS_PROCESS_GROUP_RUNNING))); + verifexit(state == IA_CSS_PROCESS_READY); + + process->dev_chn_offset[dev_chn_id] = offset; + + retval = 0; +EXIT: + if (NULL == process || dev_chn_id >= VIED_NCI_N_DEV_CHN_ID) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_set_dev_chn invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_dev_chn invalid argument\n"); + } + return retval; +} + +int ia_css_process_set_dfm_port_bitmap( + ia_css_process_t *process, + const vied_nci_dev_dfm_id_t dfm_dev_id, + const vied_nci_resource_bitmap_t bitmap) +{ + int retval = -1; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_dfm_port(): enter:\n"); + + verifexit(process != NULL); + + parent = ia_css_process_get_parent(process); + state = ia_css_process_get_state(process); + + parent_state = ia_css_process_group_get_state(parent); + + /* TODO : separate process group start and run from + * process_group_exec_cmd() + */ + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED) || + (parent_state == IA_CSS_PROCESS_GROUP_RUNNING))); + verifexit(state == IA_CSS_PROCESS_READY); + +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_dev_id <= VIED_NCI_N_DEV_DFM_ID); + process->dfm_port_bitmap[dfm_dev_id] = bitmap; +#else + (void)bitmap; + (void)dfm_dev_id; +#endif + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_dfm_port invalid argument\n"); + } + return retval; +} + +int ia_css_process_set_dfm_active_port_bitmap( + ia_css_process_t *process, + const vied_nci_dev_dfm_id_t dfm_dev_id, + const vied_nci_resource_bitmap_t bitmap) +{ + int retval = -1; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_dfm_active_port_bitmap(): enter:\n"); + + verifexit(process != NULL); + + parent = ia_css_process_get_parent(process); + state = ia_css_process_get_state(process); + + parent_state = ia_css_process_group_get_state(parent); + + /* TODO : separate process group start and run from + * process_group_exec_cmd() + */ + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED) || + (parent_state == IA_CSS_PROCESS_GROUP_RUNNING))); + verifexit(state == IA_CSS_PROCESS_READY); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_dev_id <= VIED_NCI_N_DEV_DFM_ID); + process->dfm_active_port_bitmap[dfm_dev_id] = bitmap; +#else + (void)bitmap; + (void)dfm_dev_id; +#endif + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_set_dfm_active_port_bitmap invalid argument\n"); + } + return retval; +} + +int ia_css_process_clear_dev_chn( + ia_css_process_t *process, + const vied_nci_dev_chn_ID_t dev_chn_id) +{ + int retval = -1; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_clear_dev_chn(): enter:\n"); + + verifexit(process != NULL); + + parent = ia_css_process_get_parent(process); + + /* We should have a check on NULL != parent but it parent is NULL + * ia_css_process_group_get_state will return + * IA_CSS_N_PROCESS_GROUP_STATES so it will be filtered anyway later. + */ + + /* verifexit(parent != NULL); */ + + parent_state = ia_css_process_group_get_state(parent); + state = ia_css_process_get_state(process); + + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) + || (parent_state == IA_CSS_PROCESS_GROUP_STARTED))); + verifexit(state == IA_CSS_PROCESS_READY); + + verifexit(dev_chn_id <= VIED_NCI_N_DEV_CHN_ID); + + process->dev_chn_offset[dev_chn_id] = IA_CSS_PROCESS_INVALID_OFFSET; + + retval = 0; +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_clear_dev_chn invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_clear_dev_chn failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_clear_all( + ia_css_process_t *process) +{ + int retval = -1; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + int mem_index; + int dev_chn_index; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_clear_all(): enter:\n"); + + verifexit(process != NULL); + + parent = ia_css_process_get_parent(process); + state = ia_css_process_get_state(process); + + /* We should have a check on NULL != parent but it parent is NULL + * ia_css_process_group_get_state will return + * IA_CSS_N_PROCESS_GROUP_STATES so it will be filtered anyway later. + */ + + /* verifexit(parent != NULL); */ + + parent_state = ia_css_process_group_get_state(parent); + +/* Resource clear can only be called in excluded states contrary to set */ + verifexit((parent_state != IA_CSS_PROCESS_GROUP_RUNNING) || + (parent_state == IA_CSS_N_PROCESS_GROUP_STATES)); + verifexit((state == IA_CSS_PROCESS_CREATED) || + (state == IA_CSS_PROCESS_READY)); + + for (dev_chn_index = 0; dev_chn_index < VIED_NCI_N_DEV_CHN_ID; + dev_chn_index++) { + process->dev_chn_offset[dev_chn_index] = + IA_CSS_PROCESS_INVALID_OFFSET; + } +/* No difference whether a cell_id has been set or not, clear all */ + for (mem_index = 0; mem_index < VIED_NCI_N_DATA_MEM_TYPE_ID; + mem_index++) { + process->ext_mem_id[mem_index] = VIED_NCI_N_MEM_ID; + process->ext_mem_offset[mem_index] = + IA_CSS_PROCESS_INVALID_OFFSET; + } + for (mem_index = 0; mem_index < VIED_NCI_N_MEM_TYPE_ID; mem_index++) { + process->int_mem_id[mem_index] = VIED_NCI_N_MEM_ID; + process->int_mem_offset[mem_index] = + IA_CSS_PROCESS_INVALID_OFFSET; + } + + ia_css_process_cells_clear(process); + + retval = 0; +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_clear_all invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_clear_all failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_acquire( + ia_css_process_t *process) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_acquire(): enter:\n"); + + verifexit(process != NULL); + + retval = 0; +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_acquire invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_acquire failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_release( + ia_css_process_t *process) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_release(): enter:\n"); + + verifexit(process != NULL); + + retval = 0; +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_t invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_release failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_print(const ia_css_process_t *process, void *fid) +{ + int retval = -1; + int i, dev_chn_index; + uint16_t mem_index; + uint8_t cell_dependency_count, terminal_dependency_count; + vied_nci_cell_ID_t cell_id = ia_css_process_get_cell(process); + NOT_USED(fid); + + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_print(process %p): enter:\n", process); + + verifexit(process != NULL); + + IA_CSS_TRACE_6(PSYSAPI_DYNAMIC, INFO, + "\tprocess %p, sizeof %d, programID %d, state %d, parent %p, cell %d\n", + process, + (int)ia_css_process_get_size(process), + (int)ia_css_process_get_program_ID(process), + (int)ia_css_process_get_state(process), + (void *)ia_css_process_get_parent(process), + (int)ia_css_process_get_cell(process)); + + for (mem_index = 0; mem_index < (int)VIED_NCI_N_MEM_TYPE_ID; + mem_index++) { + vied_nci_mem_ID_t mem_id = + (vied_nci_mem_ID_t)(process->int_mem_id[mem_index]); + if (cell_id == VIED_NCI_N_CELL_ID) { + verifexit(mem_id == VIED_NCI_N_MEM_ID); + continue; + } + verifexit(((mem_id == vied_nci_cell_get_mem(cell_id, mem_index)) + || (mem_id == VIED_NCI_N_MEM_ID))); + + IA_CSS_TRACE_4(PSYSAPI_DYNAMIC, INFO, + "\tinternal index %d, type %d, id %d offset 0x%x\n", + mem_index, + (int)vied_nci_cell_get_mem_type(cell_id, mem_index), + (int)mem_id, + process->int_mem_offset[mem_index]); + } + + for (mem_index = 0; mem_index < (int)VIED_NCI_N_DATA_MEM_TYPE_ID; + mem_index++) { + vied_nci_mem_ID_t mem_id = + (vied_nci_mem_ID_t)(process->ext_mem_id[mem_index]); + /* TODO: in case of an cells_bitmap = [], + * vied_nci_cell_get_mem_type will return a wrong result. + */ + IA_CSS_TRACE_4(PSYSAPI_DYNAMIC, INFO, + "\texternal index %d, type %d, id %d offset 0x%x\n", + mem_index, + (int)vied_nci_cell_get_mem_type(cell_id, mem_index), + (int)mem_id, + process->ext_mem_offset[mem_index]); + NOT_USED(mem_id); + } + for (dev_chn_index = 0; dev_chn_index < (int)VIED_NCI_N_DEV_CHN_ID; + dev_chn_index++) { + IA_CSS_TRACE_3(PSYSAPI_DYNAMIC, INFO, + "\tdevice channel index %d, type %d, offset 0x%x\n", + dev_chn_index, + (int)dev_chn_index, + process->dev_chn_offset[dev_chn_index]); + } +#if HAS_DFM + for (dev_chn_index = 0; dev_chn_index < (int)VIED_NCI_N_DEV_DFM_ID; + dev_chn_index++) { + IA_CSS_TRACE_4(PSYSAPI_DYNAMIC, INFO, + "\tdfm device index %d, type %d, bitmap 0x%x active_ports_bitmap 0x%x\n", + dev_chn_index, dev_chn_index, + process->dfm_port_bitmap[dev_chn_index], + process->dfm_active_port_bitmap[dev_chn_index]); + } +#endif + + for (i = 0; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + "\tcells[%d] = 0x%x\n", + i, ia_css_process_cells_get_cell(process, i)); + } + + cell_dependency_count = + ia_css_process_get_cell_dependency_count(process); + if (cell_dependency_count == 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tcell_dependencies[%d] {};\n", cell_dependency_count); + } else { + vied_nci_resource_id_t cell_dependency; + + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tcell_dependencies[%d] {", cell_dependency_count); + for (i = 0; i < (int)cell_dependency_count - 1; i++) { + cell_dependency = + ia_css_process_get_cell_dependency(process, i); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "%4d, ", cell_dependency); + } + cell_dependency = + ia_css_process_get_cell_dependency(process, i); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "%4d}\n", cell_dependency); + (void)cell_dependency; + } + + terminal_dependency_count = + ia_css_process_get_terminal_dependency_count(process); + if (terminal_dependency_count == 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tterminal_dependencies[%d] {};\n", + terminal_dependency_count); + } else { + uint8_t terminal_dependency; + + terminal_dependency_count = + ia_css_process_get_terminal_dependency_count(process); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tterminal_dependencies[%d] {", + terminal_dependency_count); + for (i = 0; i < (int)terminal_dependency_count - 1; i++) { + terminal_dependency = + ia_css_process_get_terminal_dependency(process, i); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "%4d, ", terminal_dependency); + } + terminal_dependency = + ia_css_process_get_terminal_dependency(process, i); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "%4d}\n", terminal_dependency); + (void)terminal_dependency; + } + + retval = 0; +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_print invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_print failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_set_parent( + ia_css_process_t *process, + ia_css_process_group_t *parent) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_parent(): enter:\n"); + + verifexit(process != NULL); + verifexit(parent != NULL); + + process->parent_offset = (uint16_t) ((char *)parent - (char *)process); + retval = 0; +EXIT: + if (NULL == process || NULL == parent) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_set_parent invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_parent failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_set_cell_dependency( + const ia_css_process_t *process, + const unsigned int dep_index, + const vied_nci_resource_id_t id) +{ + int retval = -1; + uint8_t *process_dep_ptr; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_cell_dependency(): enter:\n"); + verifexit(process != NULL); + + process_dep_ptr = + (uint8_t *)process + process->cell_dependencies_offset + + dep_index*sizeof(vied_nci_resource_id_t); + + + *process_dep_ptr = id; + retval = 0; +EXIT: + return retval; +} + +int ia_css_process_set_terminal_dependency( + const ia_css_process_t *process, + const unsigned int dep_index, + const vied_nci_resource_id_t id) +{ + int retval = -1; + uint8_t *terminal_dep_ptr; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_terminal_dependency(): enter:\n"); + verifexit(process != NULL); + verifexit(ia_css_process_get_terminal_dependency_count(process) > dep_index); + + terminal_dep_ptr = + (uint8_t *)process + process->terminal_dependencies_offset + + dep_index*sizeof(uint8_t); + + *terminal_dep_ptr = id; + retval = 0; +EXIT: + return retval; +} + +int ia_css_process_cmd( + ia_css_process_t *process, + const ia_css_process_cmd_t cmd) +{ + int retval = -1; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, "ia_css_process_cmd(): enter:\n"); + + verifexit(process != NULL); + + state = ia_css_process_get_state(process); + + verifexit(state != IA_CSS_PROCESS_ERROR); + verifexit(state < IA_CSS_N_PROCESS_STATES); + + switch (cmd) { + case IA_CSS_PROCESS_CMD_NOP: + break; + case IA_CSS_PROCESS_CMD_ACQUIRE: + verifexit(state == IA_CSS_PROCESS_READY); + break; + case IA_CSS_PROCESS_CMD_RELEASE: + verifexit(state == IA_CSS_PROCESS_READY); + break; + case IA_CSS_PROCESS_CMD_START: + verifexit((state == IA_CSS_PROCESS_READY) + || (state == IA_CSS_PROCESS_STOPPED)); + process->state = IA_CSS_PROCESS_STARTED; + break; + case IA_CSS_PROCESS_CMD_LOAD: + verifexit(state == IA_CSS_PROCESS_STARTED); + process->state = IA_CSS_PROCESS_RUNNING; + break; + case IA_CSS_PROCESS_CMD_STOP: + verifexit((state == IA_CSS_PROCESS_RUNNING) + || (state == IA_CSS_PROCESS_SUSPENDED)); + process->state = IA_CSS_PROCESS_STOPPED; + break; + case IA_CSS_PROCESS_CMD_SUSPEND: + verifexit(state == IA_CSS_PROCESS_RUNNING); + process->state = IA_CSS_PROCESS_SUSPENDED; + break; + case IA_CSS_PROCESS_CMD_RESUME: + verifexit(state == IA_CSS_PROCESS_SUSPENDED); + process->state = IA_CSS_PROCESS_RUNNING; + break; + case IA_CSS_N_PROCESS_CMDS: /* Fall through */ + default: + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_cmd invalid cmd (0x%x)\n", cmd); + goto EXIT; + } + retval = 0; +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_cmd invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_cmd failed (%i)\n", retval); + } + return retval; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_group.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_group.c new file mode 100644 index 0000000000000..46bb828041534 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_group.c @@ -0,0 +1,886 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_process_group.h" +#include "ia_css_psys_dynamic_storage_class.h" + +/* + * Functions to possibly inline + */ + +#ifndef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_process_group_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +/* + * Functions not to inline + */ + +/* This header is need for cpu memset to 0 +* and process groups are not created in SP +*/ +#if !defined(__VIED_CELL) +#include "cpu_mem_support.h" +#endif + +/* This source file is created with the intention of sharing and +* compiled for host and firmware. Since there is no native 64bit +* data type support for firmware this wouldn't compile for SP +* tile. The part of the file that is not compilable are marked +* with the following __VIED_CELL marker and this comment. Once we +* come up with a solution to address this issue this will be +* removed. +*/ +#if !defined(__VIED_CELL) +static bool ia_css_process_group_is_program_enabled( + const ia_css_program_manifest_t *program_manifest, + ia_css_kernel_bitmap_t enable_bitmap) +{ + ia_css_kernel_bitmap_t program_bitmap = + ia_css_program_manifest_get_kernel_bitmap(program_manifest); + ia_css_program_type_t program_type = + ia_css_program_manifest_get_type(program_manifest); + ia_css_kernel_bitmap_t program_enable_bitmap; + + if (!ia_css_is_kernel_bitmap_intersection_empty(enable_bitmap, + program_bitmap)) { + + if (program_type == IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB || + program_type == IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER || + program_type == IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB) { + /* + * EXCLUSIVE_SUB programs are subsets of + * EXCLUSIVE_SUPER so the bits of the enable_bitmap + * that refer to those are those of their + * EXCLUSIVE_SUPER program (on which the depend) and + * not the subset that their own program_bitmap has + */ + if (program_type == + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB || + program_type == + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB) { + ia_css_kernel_bitmap_t super_program_bitmap; + + const ia_css_program_group_manifest_t * + prog_group_manifest = + ia_css_program_manifest_get_parent(program_manifest); + uint8_t super_prog_idx = + ia_css_program_manifest_get_program_dependency( + program_manifest, 0); + const ia_css_program_manifest_t * + super_program_manifest = + ia_css_program_group_manifest_get_prgrm_mnfst( + prog_group_manifest, super_prog_idx); + + verifexit(super_program_manifest != NULL); + if (((program_type == + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB) && + (ia_css_program_manifest_get_type( + super_program_manifest) != + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER)) + || ((program_type == + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB) && + (ia_css_program_manifest_get_type( + super_program_manifest) != + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUPER))) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_is_program_enabled(): Error\n"); + verifexit(0); + } + + super_program_bitmap = + ia_css_program_manifest_get_kernel_bitmap( + super_program_manifest); + program_enable_bitmap = + ia_css_kernel_bitmap_intersection( + enable_bitmap, + super_program_bitmap); + } else { + program_enable_bitmap = + ia_css_kernel_bitmap_intersection( + enable_bitmap, program_bitmap); + } + + if (ia_css_is_kernel_bitmap_equal( + program_enable_bitmap, program_bitmap)) { + return true; + } + } else if (program_type == IA_CSS_PROGRAM_TYPE_VIRTUAL_SUPER) { + /* + * Virtual super programs are not selectable + * only the virtual sub programs + */ + return false; + } else { + return true; + } + } + +EXIT: + return false; +} + +static bool ia_css_process_group_is_terminal_enabled( + const ia_css_terminal_manifest_t *terminal_manifest, + ia_css_kernel_bitmap_t enable_bitmap) +{ + ia_css_terminal_type_t terminal_type; + + verifjmpexit(NULL != terminal_manifest); + terminal_type = ia_css_terminal_manifest_get_type(terminal_manifest); + + if (ia_css_is_terminal_manifest_data_terminal(terminal_manifest)) { + ia_css_data_terminal_manifest_t *data_term_manifest = + (ia_css_data_terminal_manifest_t *)terminal_manifest; + ia_css_kernel_bitmap_t term_bitmap = + ia_css_data_terminal_manifest_get_kernel_bitmap( + data_term_manifest); + /* + * Terminals depend on a kernel, + * if the kernel is present the program it contains and + * the terminal the program depends on are active + */ + if (!ia_css_is_kernel_bitmap_intersection_empty( + enable_bitmap, term_bitmap)) { + return true; + } + } else if (ia_css_is_terminal_manifest_spatial_parameter_terminal( + terminal_manifest)) { + ia_css_kernel_bitmap_t term_kernel_bitmap = ia_css_kernel_bitmap_clear(); + ia_css_spatial_param_terminal_manifest_t *spatial_term_man = + (ia_css_spatial_param_terminal_manifest_t *) + terminal_manifest; + + term_kernel_bitmap = + ia_css_kernel_bitmap_set( + term_kernel_bitmap, + spatial_term_man->kernel_id); + if (!ia_css_is_kernel_bitmap_intersection_empty( + enable_bitmap, term_kernel_bitmap)) { + return true; + } + + } else if (ia_css_is_terminal_manifest_parameter_terminal( + terminal_manifest) && terminal_type == + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN) { + return true; + + } else if (ia_css_is_terminal_manifest_parameter_terminal( + terminal_manifest) && terminal_type == + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT) { + /* + * For parameter out terminals, we disable the terminals + * if ALL the corresponding kernels are disabled, + * for parameter in terminals we cannot do this; + * even if kernels are disabled, it may be required that + * (HW) parameters must be supplied via the parameter + * in terminal (e.g. bypass bits). + */ + ia_css_kernel_bitmap_t term_kernel_bitmap = ia_css_kernel_bitmap_clear(); + ia_css_param_terminal_manifest_t *param_term_man = + (ia_css_param_terminal_manifest_t *)terminal_manifest; + ia_css_param_manifest_section_desc_t *section_desc; + unsigned int section = 0; + + for (section = 0; section < param_term_man-> + param_manifest_section_desc_count; section++) { + section_desc = + ia_css_param_terminal_manifest_get_prm_sct_desc( + param_term_man, section); + verifjmpexit(section_desc != NULL); + term_kernel_bitmap = ia_css_kernel_bitmap_set( + term_kernel_bitmap, + section_desc->kernel_id); + } + + if (!ia_css_is_kernel_bitmap_intersection_empty( + enable_bitmap, term_kernel_bitmap)) { + return true; + } + } else if (ia_css_is_terminal_manifest_program_terminal( + terminal_manifest)) { + return true; + } else if (ia_css_is_terminal_manifest_program_control_init_terminal( + terminal_manifest)) { + return true; + } +EXIT: + return false; +} + +size_t ia_css_sizeof_process_group( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param) +{ + size_t size = 0, tmp_size; + int i, error_val = -1; + uint8_t process_count, process_num; + uint8_t terminal_count; + ia_css_kernel_bitmap_t enable_bitmap; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_sizeof_process_group(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(param != NULL); + + COMPILATION_ERROR_IF( + SIZE_OF_PROCESS_GROUP_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_process_group_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_process_group_t) % sizeof(uint64_t)); + + process_count = + ia_css_process_group_compute_process_count(manifest, param); + terminal_count = + ia_css_process_group_compute_terminal_count(manifest, param); + + verifexit(process_count != 0); + verifexit(terminal_count != 0); + + size += sizeof(ia_css_process_group_t); + + tmp_size = process_count * sizeof(uint16_t); + size += tot_bytes_for_pow2_align(sizeof(uint64_t), tmp_size); + + tmp_size = terminal_count * sizeof(uint16_t); + size += tot_bytes_for_pow2_align(sizeof(uint64_t), tmp_size); + + enable_bitmap = + ia_css_program_group_param_get_kernel_enable_bitmap(param); + process_num = 0; + for (i = 0; i < (int)ia_css_program_group_manifest_get_program_count( + manifest); i++) { + ia_css_program_manifest_t *program_manifest = + ia_css_program_group_manifest_get_prgrm_mnfst(manifest, i); + ia_css_program_param_t *program_param = + ia_css_program_group_param_get_program_param(param, i); + + if (ia_css_process_group_is_program_enabled( + program_manifest, enable_bitmap)) { + verifexit(process_num < process_count); + size += ia_css_sizeof_process( + program_manifest, program_param); + process_num++; + } + } + + verifexit(process_num == process_count); + + for (i = 0; i < (int)ia_css_program_group_manifest_get_terminal_count( + manifest); i++) { + ia_css_terminal_manifest_t *terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst( + manifest, i); + + if (ia_css_process_group_is_terminal_enabled( + terminal_manifest, enable_bitmap)) { + size += ia_css_sizeof_terminal( + terminal_manifest, param); + } + } + + error_val = 0; + +EXIT: + if (NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_sizeof_process_group invalid argument\n"); + } + if (error_val != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_sizeof_process_group ERROR(%d)\n", error_val); + } + return size; +} + +ia_css_process_group_t *ia_css_process_group_create( + void *process_grp_mem, + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param) +{ + size_t size = ia_css_sizeof_process_group(manifest, param); + int retval = -1; + int ret; + int i; + ia_css_process_group_t *process_group = NULL; + uint8_t process_count, process_num; + uint8_t terminal_count, terminal_num; + uint16_t fragment_count; + char *process_grp_raw_ptr; + uint16_t *process_tab_ptr, *terminal_tab_ptr; + ia_css_kernel_bitmap_t enable_bitmap; + uint8_t manifest_terminal_count; + + IA_CSS_TRACE_3(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_create(process_grp_mem %p, manifest %p, group_param %p): enter:\n", + process_grp_mem, manifest, param); + + verifexit(process_grp_mem != NULL); + verifexit(manifest != NULL); + verifexit(param != NULL); + verifexit(ia_css_is_program_group_manifest_valid(manifest)); + + process_group = (ia_css_process_group_t *)process_grp_mem; + ia_css_cpu_mem_set_zero(process_group, size); + process_grp_raw_ptr = (char *) process_group; + + process_group->state = IA_CSS_PROCESS_GROUP_CREATED; + + process_group->protocol_version = + ia_css_program_group_param_get_protocol_version(param); + + fragment_count = ia_css_program_group_param_get_fragment_count(param); + process_count = + ia_css_process_group_compute_process_count(manifest, param); + terminal_count = + ia_css_process_group_compute_terminal_count(manifest, param); + enable_bitmap = + ia_css_program_group_param_get_kernel_enable_bitmap(param); + + process_group->fragment_count = fragment_count; + process_group->process_count = process_count; + process_group->terminal_count = terminal_count; + + process_grp_raw_ptr += sizeof(ia_css_process_group_t); + process_tab_ptr = (uint16_t *) process_grp_raw_ptr; + process_group->processes_offset = + (uint16_t)(process_grp_raw_ptr - (char *)process_group); + + process_grp_raw_ptr += tot_bytes_for_pow2_align( + sizeof(uint64_t), process_count * sizeof(uint16_t)); + terminal_tab_ptr = (uint16_t *) process_grp_raw_ptr; + process_group->terminals_offset = + (uint16_t)(process_grp_raw_ptr - (char *)process_group); + + /* Move raw pointer to the first process */ + process_grp_raw_ptr += tot_bytes_for_pow2_align( + sizeof(uint64_t), terminal_count * sizeof(uint16_t)); + + /* Set default */ + verifexit(ia_css_process_group_set_fragment_limit( + process_group, fragment_count) == 0); + + /* Set process group terminal dependency list */ + /* This list is used during creating the process dependency list */ + manifest_terminal_count = + ia_css_program_group_manifest_get_terminal_count(manifest); + + terminal_num = 0; + for (i = 0; i < (int)manifest_terminal_count; i++) { + ia_css_terminal_manifest_t *t_manifest = + ia_css_program_group_manifest_get_term_mnfst( + manifest, i); + + verifexit(NULL != t_manifest); + if (ia_css_process_group_is_terminal_enabled( + t_manifest, enable_bitmap)) { + ia_css_terminal_t *terminal = NULL; + ia_css_terminal_param_t *terminal_param = + ia_css_program_group_param_get_terminal_param( + param, i); + + verifexit(NULL != terminal_param); + terminal_tab_ptr[terminal_num] = + (uint16_t)(process_grp_raw_ptr - + (char *)process_group); + terminal = ia_css_terminal_create( + process_grp_raw_ptr, t_manifest, + terminal_param, enable_bitmap); + verifexit(terminal != NULL); + verifexit((ia_css_terminal_set_parent( + terminal, process_group) == 0)); + verifexit((ia_css_terminal_set_terminal_manifest_index( + terminal, i) == 0)); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_create: terminal_manifest_index %d\n", + i); + + process_grp_raw_ptr += ia_css_terminal_get_size( + terminal); + terminal_num++; + } + } + verifexit(terminal_num == terminal_count); + + process_num = 0; + for (i = 0; i < (int)ia_css_program_group_manifest_get_program_count( + manifest); i++) { + ia_css_process_t *process = NULL; + ia_css_program_manifest_t *program_manifest = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, i); + ia_css_program_param_t *program_param = + ia_css_program_group_param_get_program_param(param, i); + unsigned int prog_dep_index, proc_dep_index; + unsigned int term_dep_index, term_index; + + if (ia_css_process_group_is_program_enabled( + program_manifest, enable_bitmap)) { + + verifexit(process_num < process_count); + + process_tab_ptr[process_num] = + (uint16_t)(process_grp_raw_ptr - + (char *)process_group); + process = ia_css_process_create( + process_grp_raw_ptr, + program_manifest, + program_param, + i); + verifexit(process != NULL); + + ia_css_process_set_parent(process, process_group); + if (ia_css_has_program_manifest_fixed_cell( + program_manifest)) { + vied_nci_cell_ID_t cell_id = + ia_css_program_manifest_get_cell_ID( + program_manifest); + + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_create: cell_id %d\n", + cell_id); + ia_css_process_set_cell(process, cell_id); + } + + process_grp_raw_ptr += ia_css_process_get_size( + process); + /* + * Set process dependencies of process derived + * from program manifest + */ + for (prog_dep_index = 0; prog_dep_index < + ia_css_program_manifest_get_program_dependency_count( + program_manifest); prog_dep_index++) { + uint8_t dep_prog_idx = + ia_css_program_manifest_get_program_dependency( + program_manifest, prog_dep_index); + const ia_css_program_manifest_t * + dep_prg_manifest = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, dep_prog_idx); + ia_css_program_ID_t id = + ia_css_program_manifest_get_program_ID( + dep_prg_manifest); + + verifexit(id != 0); + for (proc_dep_index = 0; + proc_dep_index < process_num; + proc_dep_index++) { + ia_css_process_t *dep_process = + ia_css_process_group_get_process( + process_group, + proc_dep_index); + + ia_css_process_set_cell_dependency( + process, + prog_dep_index, 0); + + if (ia_css_process_get_program_ID( + dep_process) == id) { + ia_css_process_set_cell_dependency( + process, + prog_dep_index, + proc_dep_index); + break; + } + } + } + process_num++; + + /* + * Set terminal dependencies of process derived + * from program manifest + */ + for (term_dep_index = 0; term_dep_index < + ia_css_program_manifest_get_terminal_dependency_count( + program_manifest); term_dep_index++) { + uint8_t pm_term_index = + ia_css_program_manifest_get_terminal_dependency + (program_manifest, term_dep_index); + + verifexit(pm_term_index < manifest_terminal_count); + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_create(): term_dep_index: %d, pm_term_index: %d\n", + term_dep_index, pm_term_index); + for (term_index = 0; + term_index < terminal_count; + term_index++) { + ia_css_terminal_t *terminal = + ia_css_process_group_get_terminal( + process_group, + term_index); + + if (ia_css_terminal_get_terminal_manifest_index + (terminal) == pm_term_index) { + ia_css_process_set_terminal_dependency( + process, + term_dep_index, + term_index); + IA_CSS_TRACE_3(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_create() set_terminal_dependency(process: %d, dep_idx: %d, term_idx: %d)\n", + i, term_dep_index, term_index); + + break; + } + } + } + } + } + verifexit(process_num == process_count); + + process_group->size = + (uint32_t)ia_css_sizeof_process_group(manifest, param); + process_group->ID = + ia_css_program_group_manifest_get_program_group_ID(manifest); + + /* Initialize performance measurement fields to zero */ + process_group->pg_load_start_ts = 0; + process_group->pg_load_cycles = 0; + process_group->pg_init_cycles = 0; + process_group->pg_processing_cycles = 0; + + verifexit(process_group->ID != 0); + + ret = ia_css_process_group_on_create(process_group, manifest, param); + verifexit(ret == 0); + + process_group->state = IA_CSS_PROCESS_GROUP_READY; + retval = 0; + + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_create(): Created successfully process group ID 0x%x\n", + process_group->ID); + +EXIT: + if (NULL == process_grp_mem || NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_create invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_create failed (%i)\n", retval); + process_group = ia_css_process_group_destroy(process_group); + } + return process_group; +} + +ia_css_process_group_t *ia_css_process_group_destroy( + ia_css_process_group_t *process_group) +{ + if (process_group != NULL) { + ia_css_process_group_on_destroy(process_group); + process_group = NULL; + } else { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_destroy invalid argument\n"); + } + return process_group; +} + +int ia_css_process_group_submit( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_submit(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_SUBMIT); +} + +int ia_css_process_group_start( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_start(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_START); +} + +int ia_css_process_group_stop( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_stop(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_STOP); +} + +int ia_css_process_group_run( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_run(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_RUN); +} + +int ia_css_process_group_suspend( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_suspend(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_SUSPEND); +} + +int ia_css_process_group_resume( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_resume(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_RESUME); +} + +int ia_css_process_group_reset( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_reset(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_RESET); +} + +int ia_css_process_group_abort( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_abort(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_ABORT); +} + +int ia_css_process_group_disown( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_disown(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_DISOWN); +} + +extern uint64_t ia_css_process_group_get_token( + ia_css_process_group_t *process_group) +{ + uint64_t token = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_token(): enter:\n"); + + verifexit(process_group != NULL); + + token = process_group->token; + +EXIT: + if (NULL == process_group) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_get_token invalid argument\n"); + } + return token; +} + +int ia_css_process_group_set_token( + ia_css_process_group_t *process_group, + const uint64_t token) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_token(): enter:\n"); + + verifexit(process_group != NULL); + verifexit(token != 0); + + process_group->token = token; + + retval = 0; +EXIT: + if (NULL == process_group || 0 == token) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_set_token invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_token failed (%i)\n", + retval); + } + return retval; +} + +extern uint64_t ia_css_process_group_get_private_token( + ia_css_process_group_t *process_group) +{ + uint64_t token = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_private_token(): enter:\n"); + + verifexit(process_group != NULL); + + token = process_group->private_token; + +EXIT: + if (NULL == process_group) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_get_private_token invalid argument\n"); + } + return token; +} + +int ia_css_process_group_set_private_token( + ia_css_process_group_t *process_group, + const uint64_t token) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_private_token(): enter:\n"); + + verifexit(process_group != NULL); + verifexit(token != 0); + + process_group->private_token = token; + + retval = 0; +EXIT: + if (NULL == process_group || 0 == token) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_set_private_token invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_private_token failed (%i)\n", + retval); + } + return retval; +} + +uint8_t ia_css_process_group_compute_process_count( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param) +{ + uint8_t process_count = 0; + ia_css_kernel_bitmap_t total_bitmap; + ia_css_kernel_bitmap_t enable_bitmap; + int i; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_compute_process_count(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(param != NULL); + + total_bitmap = + ia_css_program_group_manifest_get_kernel_bitmap(manifest); + enable_bitmap = + ia_css_program_group_param_get_kernel_enable_bitmap(param); + + verifexit(ia_css_is_program_group_manifest_valid(manifest)); + verifexit(ia_css_is_kernel_bitmap_subset(total_bitmap, enable_bitmap)); + verifexit(!ia_css_is_kernel_bitmap_empty(enable_bitmap)); + + for (i = 0; i < + (int)ia_css_program_group_manifest_get_program_count(manifest); + i++) { + ia_css_program_manifest_t *program_manifest = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, i); + ia_css_kernel_bitmap_t program_bitmap = + ia_css_program_manifest_get_kernel_bitmap( + program_manifest); + /* + * Programs can be orthogonal, + * a mutually exclusive subset, + * or a concurrent subset + */ + if (!ia_css_is_kernel_bitmap_intersection_empty(enable_bitmap, + program_bitmap)) { + ia_css_program_type_t program_type = + ia_css_program_manifest_get_type( + program_manifest); + /* + * An exclusive subnode < exclusive supernode, + * so simply don't count it + */ + if (program_type != + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB && + program_type != + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB) { + process_count++; + } + } + } + +EXIT: + if (NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_compute_process_count invalid argument\n"); + } + return process_count; +} + +uint8_t ia_css_process_group_compute_terminal_count( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param) +{ + uint8_t terminal_count = 0; + ia_css_kernel_bitmap_t total_bitmap, enable_bitmap; + int i; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_compute_terminal_count(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(param != NULL); + + total_bitmap = + ia_css_program_group_manifest_get_kernel_bitmap(manifest); + enable_bitmap = + ia_css_program_group_param_get_kernel_enable_bitmap(param); + + verifexit(ia_css_is_program_group_manifest_valid(manifest)); + verifexit(ia_css_is_kernel_bitmap_subset(total_bitmap, enable_bitmap)); + verifexit(!ia_css_is_kernel_bitmap_empty(enable_bitmap)); + + for (i = 0; i < + (int)ia_css_program_group_manifest_get_terminal_count( + manifest); i++) { + ia_css_terminal_manifest_t *tmanifest = + ia_css_program_group_manifest_get_term_mnfst( + manifest, i); + + if (ia_css_process_group_is_terminal_enabled( + tmanifest, enable_bitmap)) { + terminal_count++; + } + } + +EXIT: + if (NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_compute_terminal_count invalid argument\n"); + } + return terminal_count; +} +#endif /* !defined(__VIED_CELL) */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_group_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_group_impl.h new file mode 100644 index 0000000000000..f99602dc3c9e2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_group_impl.h @@ -0,0 +1,1538 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_GROUP_IMPL_H +#define __IA_CSS_PSYS_PROCESS_GROUP_IMPL_H + +#include +#include +#include "ia_css_psys_process_group_cmd_impl.h" +#include +#include +#include +#include +#include +#include +#include "ia_css_terminal_manifest_types.h" + +#include "ia_css_rbm.h" + +#include /* ia_css_kernel_bitmap_t */ + +#include +#include +#include "ia_css_rbm_manifest_types.h" +#include +#include +#include + +#include "ia_css_psys_dynamic_trace.h" + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint16_t ia_css_process_group_get_fragment_limit( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint16_t fragment_limit = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_fragment_limit(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + fragment_limit = process_group->fragment_limit; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_fragment_limit invalid argument\n"); + } + return fragment_limit; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_fragment_limit( + ia_css_process_group_t *process_group, + const uint16_t fragment_limit) +{ + DECLARE_ERRVAL + int retval = -1; + uint16_t fragment_state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_fragment_limit(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + retval = ia_css_process_group_get_fragment_state(process_group, + &fragment_state); + + verifexitval(retval == 0, EINVAL); + verifexitval(fragment_limit > fragment_state, EINVAL); + verifexitval(fragment_limit <= ia_css_process_group_get_fragment_count( + process_group), EINVAL); + + process_group->fragment_limit = fragment_limit; + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_fragment_limit invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_fragment_limit failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_clear_fragment_limit( + ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_clear_fragment_limit(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + process_group->fragment_limit = 0; + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_clear_fragment_limit invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_clear_fragment_limit failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_attach_buffer( + ia_css_process_group_t *process_group, + vied_vaddress_t buffer, + const ia_css_buffer_state_t buffer_state, + const unsigned int terminal_index) +{ + DECLARE_ERRVAL + int retval = -1; + ia_css_terminal_t *terminal = NULL; + + NOT_USED(buffer_state); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_attach_buffer(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + terminal = ia_css_process_group_get_terminal( + process_group, terminal_index); + + verifexitval(terminal != NULL, EINVAL); + verifexitval(ia_css_process_group_get_state(process_group) == + IA_CSS_PROCESS_GROUP_READY, EINVAL); + verifexitval(process_group->protocol_version == + IA_CSS_PROCESS_GROUP_PROTOCOL_LEGACY || + process_group->protocol_version == + IA_CSS_PROCESS_GROUP_PROTOCOL_PPG, EINVAL); + + if (process_group->protocol_version == + IA_CSS_PROCESS_GROUP_PROTOCOL_LEGACY) { + /* + * Legacy flow: + * Terminal address is part of the process group structure + */ + retval = ia_css_terminal_set_buffer( + terminal, buffer); + } else if (process_group->protocol_version == + IA_CSS_PROCESS_GROUP_PROTOCOL_PPG) { + /* + * PPG flow: + * Terminal address is part of external buffer set structure + */ + retval = ia_css_terminal_set_terminal_index( + terminal, terminal_index); + } + verifexitval(retval == 0, EFAULT); + + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + "\tTerminal %p has buffer 0x%x\n", terminal, buffer); + + if (ia_css_is_terminal_data_terminal(terminal) == true) { + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + verifexitval(frame != NULL, EINVAL); + + retval = ia_css_frame_set_buffer_state(frame, buffer_state); + verifexitval(retval == 0, EINVAL); + } + + retval = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_attach_buffer invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_attach_buffer failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_vaddress_t ia_css_process_group_detach_buffer( + ia_css_process_group_t *process_group, + const unsigned int terminal_index) +{ + DECLARE_ERRVAL + int retval = -1; + vied_vaddress_t buffer = VIED_NULL; + + ia_css_terminal_t *terminal = NULL; + ia_css_process_group_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_detach_buffer(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + terminal = + ia_css_process_group_get_terminal( + process_group, terminal_index); + state = ia_css_process_group_get_state(process_group); + + verifexitval(terminal != NULL, EINVAL); + verifexitval(state == IA_CSS_PROCESS_GROUP_READY, EINVAL); + + buffer = ia_css_terminal_get_buffer(terminal); + + if (ia_css_is_terminal_data_terminal(terminal) == true) { + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + verifexitval(frame != NULL, EINVAL); + + retval = ia_css_frame_set_buffer_state(frame, IA_CSS_BUFFER_NULL); + verifexitval(retval == 0, EINVAL); + } + ia_css_terminal_set_buffer(terminal, VIED_NULL); + + retval = 0; +EXIT: + /* + * buffer pointer will appear on output, + * regardless of subsequent fails to avoid memory leaks + */ + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_detach_buffer invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_detach_buffer failed (%i)\n", + retval); + } + return buffer; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_attach_stream( + ia_css_process_group_t *process_group, + uint32_t stream, + const ia_css_buffer_state_t buffer_state, + const unsigned int terminal_index) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_attach_stream(): enter:\n"); + + NOT_USED(process_group); + NOT_USED(stream); + NOT_USED(buffer_state); + NOT_USED(terminal_index); + + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_attach_stream failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint32_t ia_css_process_group_detach_stream( + ia_css_process_group_t *process_group, + const unsigned int terminal_index) +{ + int retval = -1; + uint32_t stream = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_detach_stream(): enter:\n"); + + NOT_USED(process_group); + NOT_USED(terminal_index); + + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_detach_stream failed (%i)\n", + retval); + } + return stream; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_barrier( + ia_css_process_group_t *process_group, + const vied_nci_barrier_ID_t barrier_index) +{ + DECLARE_ERRVAL + int retval = -1; + vied_nci_resource_bitmap_t bit_mask; + vied_nci_resource_bitmap_t resource_bitmap; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_barrier(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + resource_bitmap = + ia_css_process_group_get_resource_bitmap(process_group); + + bit_mask = vied_nci_barrier_bit_mask(barrier_index); + + verifexitval(bit_mask != 0, EINVAL); + verifexitval(vied_nci_is_bitmap_clear(bit_mask, resource_bitmap), EINVAL); + + resource_bitmap = vied_nci_bitmap_set(resource_bitmap, bit_mask); + + retval = + ia_css_process_group_set_resource_bitmap( + process_group, resource_bitmap); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_barrier invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_barrier failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_clear_barrier( + ia_css_process_group_t *process_group, + const vied_nci_barrier_ID_t barrier_index) +{ + DECLARE_ERRVAL + int retval = -1; + vied_nci_resource_bitmap_t bit_mask, resource_bitmap; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_clear_barrier(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + resource_bitmap = + ia_css_process_group_get_resource_bitmap(process_group); + + bit_mask = vied_nci_barrier_bit_mask(barrier_index); + + verifexitval(bit_mask != 0, EINVAL); + verifexitval(vied_nci_is_bitmap_set(bit_mask, resource_bitmap), EINVAL); + + resource_bitmap = vied_nci_bitmap_clear(resource_bitmap, bit_mask); + + retval = + ia_css_process_group_set_resource_bitmap( + process_group, resource_bitmap); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_clear_barrier invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_clear_barrier failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_print( + const ia_css_process_group_t *process_group, + void *fid) +{ + DECLARE_ERRVAL + int retval = -1; + int i; + + uint8_t process_count; + uint8_t terminal_count; + vied_vaddress_t ipu_vaddress = VIED_NULL; + ia_css_rbm_t routing_bitmap; + + NOT_USED(fid); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_print(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + retval = ia_css_process_group_get_ipu_vaddress(process_group, &ipu_vaddress); + verifexitval(retval == 0, EINVAL); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "=============== Process group print start ===============\n"); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tprocess_group cpu address = %p\n", process_group); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tipu_virtual_address = %#x\n", ipu_vaddress); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tsizeof(process_group) = %d\n", + (int)ia_css_process_group_get_size(process_group)); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tfragment_count = %d\n", + (int)ia_css_process_group_get_fragment_count(process_group)); + + routing_bitmap = *ia_css_process_group_get_routing_bitmap(process_group); + for (i = 0; i < (int)IA_CSS_RBM_NOF_ELEMS; i++) { + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + "\trouting_bitmap[index = %d] = 0x%X\n", + i, (int)routing_bitmap.data[i]); + } + + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tprogram_group(process_group) = %d\n", + (int)ia_css_process_group_get_program_group_ID(process_group)); + process_count = ia_css_process_group_get_process_count(process_group); + terminal_count = + ia_css_process_group_get_terminal_count(process_group); + + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\t%d processes\n", (int)process_count); + for (i = 0; i < (int)process_count; i++) { + ia_css_process_t *process = + ia_css_process_group_get_process(process_group, i); + + retval = ia_css_process_print(process, fid); + verifjmpexit(retval == 0); + } + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\t%d terminals\n", (int)terminal_count); + for (i = 0; i < (int)terminal_count; i++) { + ia_css_terminal_t *terminal = + ia_css_process_group_get_terminal(process_group, i); + + retval = ia_css_terminal_print(terminal, fid); + verifjmpexit(retval == 0); + } + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "=============== Process group print end ===============\n"); + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_print invalid argument\n"); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_process_group_valid( + const ia_css_process_group_t *process_group, + const ia_css_program_group_manifest_t *pg_manifest, + const ia_css_program_group_param_t *param) +{ + DECLARE_ERRVAL + bool invalid_flag = false; + uint8_t proc_idx; + uint8_t prog_idx; + uint8_t proc_term_idx; + uint8_t process_count; + uint8_t program_count; + uint8_t terminal_count; + uint8_t man_terminal_count; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_process_group_valid(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + verifexitval(pg_manifest != NULL, EFAULT); + NOT_USED(param); + + process_count = process_group->process_count; + terminal_count = process_group->terminal_count; + program_count = + ia_css_program_group_manifest_get_program_count(pg_manifest); + man_terminal_count = + ia_css_program_group_manifest_get_terminal_count(pg_manifest); + + /* Validate process group */ + invalid_flag = invalid_flag || + !(program_count >= process_count) || + !(man_terminal_count >= terminal_count) || + !(process_group->size > process_group->processes_offset) || + !(process_group->size > process_group->terminals_offset); + + /* Validate processes */ + for (proc_idx = 0; proc_idx < process_count; proc_idx++) { + const ia_css_process_t *process; + ia_css_program_ID_t prog_id; + bool no_match_found = true; + + process = ia_css_process_group_get_process( + process_group, proc_idx); + verifexitval(NULL != process, EFAULT); + prog_id = ia_css_process_get_program_ID(process); + for (prog_idx = 0; prog_idx < program_count; prog_idx++) { + ia_css_program_manifest_t *p_manifest = NULL; + + p_manifest = + ia_css_program_group_manifest_get_prgrm_mnfst( + pg_manifest, prog_idx); + if (prog_id == + ia_css_program_manifest_get_program_ID( + p_manifest)) { + invalid_flag = invalid_flag || + !ia_css_is_process_valid( + process, p_manifest); + no_match_found = false; + break; + } + } + invalid_flag = invalid_flag || no_match_found; + } + + /* Validate terminals */ + for (proc_term_idx = 0; proc_term_idx < terminal_count; + proc_term_idx++) { + int man_term_idx; + const ia_css_terminal_t *terminal; + const ia_css_terminal_manifest_t *terminal_manifest; + + terminal = + ia_css_process_group_get_terminal( + process_group, proc_term_idx); + verifexitval(NULL != terminal, EFAULT); + man_term_idx = + ia_css_terminal_get_terminal_manifest_index(terminal); + terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst( + pg_manifest, man_term_idx); + invalid_flag = invalid_flag || + !ia_css_is_terminal_valid(terminal, terminal_manifest); + } + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_process_group_valid() invalid argument\n"); + return false; + } else { + return (!invalid_flag); + } +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_can_process_group_submit( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + int i; + bool can_submit = false; + int retval = -1; + uint8_t terminal_count = + ia_css_process_group_get_terminal_count(process_group); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_can_process_group_submit(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + for (i = 0; i < (int)terminal_count; i++) { + ia_css_terminal_t *terminal = + ia_css_process_group_get_terminal(process_group, i); + vied_vaddress_t buffer; + ia_css_buffer_state_t buffer_state; + + verifexitval(terminal != NULL, EINVAL); + + if (process_group->protocol_version == + IA_CSS_PROCESS_GROUP_PROTOCOL_LEGACY) { + /* + * For legacy pg flow, buffer addresses are contained inside + * the process group structure, so these need to be validated + * on process group submission. + */ + buffer = ia_css_terminal_get_buffer(terminal); + IA_CSS_TRACE_3(PSYSAPI_DYNAMIC, INFO, + "\tH: Terminal number(%d) is %p having buffer 0x%x\n", + i, terminal, buffer); + } + + /* buffer_state is applicable only for data terminals*/ + if (ia_css_is_terminal_data_terminal(terminal) == true) { + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + + verifexitval(frame != NULL, EINVAL); + buffer_state = ia_css_frame_get_buffer_state(frame); + if ((buffer_state == IA_CSS_BUFFER_NULL) || + (buffer_state == IA_CSS_N_BUFFER_STATES)) { + break; + } + } else if ( + (ia_css_is_terminal_parameter_terminal(terminal) + != true) && + (ia_css_is_terminal_program_terminal(terminal) + != true) && + (ia_css_is_terminal_program_control_init_terminal(terminal) + != true) && + (ia_css_is_terminal_spatial_parameter_terminal( + terminal) != true)) { + /* neither data nor parameter terminal, so error.*/ + break; + } + + } + /* Only true if no check failed */ + can_submit = (i == terminal_count); + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_can_process_group_submit invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_can_process_group_submit failed (%i)\n", + retval); + } + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_can_process_group_submit(): leave:\n"); + return can_submit; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_can_enqueue_buffer_set( + const ia_css_process_group_t *process_group, + const ia_css_buffer_set_t *buffer_set) +{ + DECLARE_ERRVAL + int i; + bool can_enqueue = false; + int retval = -1; + uint8_t terminal_count; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_can_enqueue_buffer_set(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + verifexitval(buffer_set != NULL, EFAULT); + + terminal_count = + ia_css_process_group_get_terminal_count(process_group); + + /* + * For ppg flow, buffer addresses are contained in the + * external buffer set structure, so these need to be + * validated before enqueueing. + */ + verifexitval(process_group->protocol_version == + IA_CSS_PROCESS_GROUP_PROTOCOL_PPG, EFAULT); + + for (i = 0; i < (int)terminal_count; i++) { + ia_css_terminal_t *terminal = + ia_css_process_group_get_terminal(process_group, i); + vied_vaddress_t buffer; + ia_css_buffer_state_t buffer_state; + + verifexitval(terminal != NULL, EINVAL); + + buffer = ia_css_buffer_set_get_buffer(buffer_set, terminal); + IA_CSS_TRACE_3(PSYSAPI_DYNAMIC, INFO, + "\tH: Terminal number(%d) is %p having buffer 0x%x\n", + i, terminal, buffer); + + /* buffer_state is applicable only for data terminals*/ + if (ia_css_is_terminal_data_terminal(terminal) == true) { + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + + verifexitval(frame != NULL, EINVAL); + buffer_state = ia_css_frame_get_buffer_state(frame); + if ((buffer_state == IA_CSS_BUFFER_NULL) || + (buffer_state == IA_CSS_N_BUFFER_STATES)) { + break; + } + } else if ( + (ia_css_is_terminal_parameter_terminal(terminal) + != true) && + (ia_css_is_terminal_program_terminal(terminal) + != true) && + (ia_css_is_terminal_program_control_init_terminal(terminal) + != true) && + (ia_css_is_terminal_spatial_parameter_terminal( + terminal) != true)) { + /* neither data nor parameter terminal, so error.*/ + break; + } + } + /* Only true if no check failed */ + can_enqueue = (i == terminal_count); + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_can_enqueue_buffer_set invalid argument\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_can_enqueue_buffer_set failed (%i)\n", + retval); + } + return can_enqueue; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_can_process_group_start( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + int i; + bool can_start = false; + int retval = -1; + uint8_t terminal_count; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_can_process_group_start(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + terminal_count = + ia_css_process_group_get_terminal_count(process_group); + for (i = 0; i < (int)terminal_count; i++) { + ia_css_terminal_t *terminal = + ia_css_process_group_get_terminal(process_group, i); + ia_css_buffer_state_t buffer_state; + bool ok = false; + + verifexitval(terminal != NULL, EINVAL); + if (ia_css_is_terminal_data_terminal(terminal) == true) { + /* + * buffer_state is applicable only for data terminals + */ + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + bool is_input = ia_css_is_terminal_input(terminal); + /* + * check for NULL here. + * then invoke next 2 statements + */ + verifexitval(frame != NULL, EINVAL); + IA_CSS_TRACE_5(PSYSAPI_DYNAMIC, VERBOSE, + "\tTerminal %d: buffer_state %u, access_type %u, data_bytes %u, data %u\n", + i, frame->buffer_state, frame->access_type, + frame->data_bytes, frame->data); + buffer_state = ia_css_frame_get_buffer_state(frame); + + ok = ((is_input && + (buffer_state == IA_CSS_BUFFER_FULL)) || + (!is_input && (buffer_state == + IA_CSS_BUFFER_EMPTY))); + + } else if (ia_css_is_terminal_parameter_terminal(terminal) == + true) { + /* + * FIXME: + * is there any pre-requisite for param_terminal? + */ + ok = true; + } else if (ia_css_is_terminal_program_terminal(terminal) == + true) { + ok = true; + } else if (ia_css_is_terminal_program_control_init_terminal(terminal) == + true) { + ok = true; + } else if (ia_css_is_terminal_spatial_parameter_terminal( + terminal) == true) { + ok = true; + } else { + /* neither data nor parameter terminal, so error.*/ + break; + } + + if (!ok) + break; + } + /* Only true if no check failed */ + can_start = (i == terminal_count); + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_can_process_group_submit invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_can_process_group_start failed (%i)\n", + retval); + } + return can_start; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +size_t ia_css_process_group_get_size( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_size(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + size = process_group->size; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_size invalid argument\n"); + } + return size; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_process_group_state_t ia_css_process_group_get_state( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + ia_css_process_group_state_t state = IA_CSS_N_PROCESS_GROUP_STATES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_state(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + state = process_group->state; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_state invalid argument\n"); + } + return state; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +const ia_css_rbm_t *ia_css_process_group_get_routing_bitmap( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + const ia_css_rbm_t *rbm = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_routing_bitmap(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + rbm = &(process_group->routing_bitmap); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_routing_bitmap invalid argument\n"); + } + return rbm; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint16_t ia_css_process_group_get_fragment_count( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint16_t fragment_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_fragment_count(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + fragment_count = process_group->fragment_count; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_fragment_count invalid argument\n"); + } + return fragment_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_group_get_process_count( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint8_t process_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_process_count(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + process_count = process_group->process_count; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_process_count invalid argument\n"); + } + return process_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_group_get_terminal_count( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint8_t terminal_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_terminal_count(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + terminal_count = process_group->terminal_count; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_terminal_count invalid argument\n"); + } + return terminal_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint32_t ia_css_process_group_get_pg_load_start_ts( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint32_t pg_load_start_ts = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_pg_load_start_ts(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + pg_load_start_ts = process_group->pg_load_start_ts; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_pg_load_start_ts invalid argument\n"); + } + return pg_load_start_ts; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint32_t ia_css_process_group_get_pg_load_cycles( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint32_t pg_load_cycles = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_pg_load_cycles(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + pg_load_cycles = process_group->pg_load_cycles; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_pg_load_cycles invalid argument\n"); + } + return pg_load_cycles; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint32_t ia_css_process_group_get_pg_init_cycles( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint32_t pg_init_cycles = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_pg_init_cycles(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + pg_init_cycles = process_group->pg_init_cycles; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_pg_init_cycles invalid argument\n"); + } + return pg_init_cycles; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint32_t ia_css_process_group_get_pg_processing_cycles( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint32_t pg_processing_cycles = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_pg_processing_cycles(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + pg_processing_cycles = process_group->pg_processing_cycles; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_pg_processing_cycles invalid argument\n"); + } + return pg_processing_cycles; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_terminal_t *ia_css_process_group_get_terminal_from_type( + const ia_css_process_group_t *process_group, + const ia_css_terminal_type_t terminal_type) +{ + unsigned int proc_cnt; + ia_css_terminal_t *terminal = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_terminal_from_type(): enter:\n"); + + for (proc_cnt = 0; proc_cnt < (unsigned int)ia_css_process_group_get_terminal_count(process_group); proc_cnt++) { + terminal = ia_css_process_group_get_terminal(process_group, proc_cnt); + if (terminal == NULL) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_terminal_from_type() Failed to get terminal %d", proc_cnt); + goto EXIT; + } + if (ia_css_terminal_get_type(terminal) == terminal_type) { + return terminal; + } + terminal = NULL; /* If not the expected type, return NULL */ + } +EXIT: + return terminal; +} + +/* Returns the terminal or NULL if it was not found + For some of those maybe valid to not exist at all in the process group */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +const ia_css_terminal_t *ia_css_process_group_get_single_instance_terminal( + const ia_css_process_group_t *process_group, + ia_css_terminal_type_t term_type) +{ + int i, term_count; + + assert(process_group != NULL); + + /* Those below have at most one instance per process group */ + assert(term_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN || + term_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT || + term_type == IA_CSS_TERMINAL_TYPE_PROGRAM || + term_type == IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT); + + term_count = ia_css_process_group_get_terminal_count(process_group); + + for (i = 0; i < term_count; i++) { + const ia_css_terminal_t *terminal = ia_css_process_group_get_terminal(process_group, i); + + if (ia_css_terminal_get_type(terminal) == term_type) { + /* Only one parameter terminal per process group */ + return terminal; + } + } + + return NULL; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_terminal_t *ia_css_process_group_get_terminal( + const ia_css_process_group_t *process_grp, + const unsigned int terminal_num) +{ + DECLARE_ERRVAL + ia_css_terminal_t *terminal_ptr = NULL; + uint16_t *terminal_offset_table; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_terminal(): enter:\n"); + + verifexitval(process_grp != NULL, EFAULT); + verifexitval(terminal_num < process_grp->terminal_count, EINVAL); + + terminal_offset_table = + (uint16_t *)((char *)process_grp + + process_grp->terminals_offset); + terminal_ptr = + (ia_css_terminal_t *)((char *)process_grp + + terminal_offset_table[terminal_num]); + + verifexitval(terminal_ptr != NULL, EFAULT); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_terminal invalid argument\n"); + } + return terminal_ptr; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_process_t *ia_css_process_group_get_process( + const ia_css_process_group_t *process_grp, + const unsigned int process_num) +{ + DECLARE_ERRVAL + ia_css_process_t *process_ptr = NULL; + uint16_t *process_offset_table; + + verifexitval(process_grp != NULL, EFAULT); + verifexitval(process_num < process_grp->process_count, EINVAL); + + process_offset_table = + (uint16_t *)((char *)process_grp + + process_grp->processes_offset); + process_ptr = + (ia_css_process_t *)((char *)process_grp + + process_offset_table[process_num]); + + verifexitval(process_ptr != NULL, EFAULT); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_process invalid argument\n"); + } + return process_ptr; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_program_group_ID_t ia_css_process_group_get_program_group_ID( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + ia_css_program_group_ID_t id = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_program_group_ID(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + id = process_group->ID; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_program_group_ID invalid argument\n"); + } + return id; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_bitmap_t ia_css_process_group_get_resource_bitmap( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + vied_nci_resource_bitmap_t resource_bitmap = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_resource_bitmap(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + resource_bitmap = process_group->resource_bitmap; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_resource_bitmap invalid argument\n"); + } + return resource_bitmap; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_resource_bitmap( + ia_css_process_group_t *process_group, + const vied_nci_resource_bitmap_t resource_bitmap) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_resource_bitmap(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + process_group->resource_bitmap = resource_bitmap; + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_resource_bitmap invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_resource_bitmap failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_routing_bitmap( + ia_css_process_group_t *process_group, + const ia_css_rbm_t rbm) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_routing_bitmap(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + process_group->routing_bitmap = rbm; + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_routing_bitmap invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_routing_bitmap failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint32_t ia_css_process_group_compute_cycle_count( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param) +{ + DECLARE_ERRVAL + uint32_t cycle_count = 0; + + NOT_USED(manifest); + NOT_USED(param); + + verifexitval(manifest != NULL, EFAULT); + verifexitval(param != NULL, EFAULT); + + cycle_count = 1; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_compute_cycle_count invalid argument\n"); + } + return cycle_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_fragment_state( + ia_css_process_group_t *process_group, + uint16_t fragment_state) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_set_fragment_state(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + verifexitval(fragment_state <= ia_css_process_group_get_fragment_count( + process_group), EINVAL); + + process_group->fragment_state = fragment_state; + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_fragment_state invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_fragment_state failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_get_fragment_state( + const ia_css_process_group_t *process_group, + uint16_t *fragment_state) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_fragment_state(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + verifexitval(fragment_state != NULL, EFAULT); + + *fragment_state = process_group->fragment_state; + retval = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_fragment_state invalid argument\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_fragment_state failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_get_ipu_vaddress( + const ia_css_process_group_t *process_group, + vied_vaddress_t *ipu_vaddress) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_ipu_vaddress(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + verifexitval(ipu_vaddress != NULL, EFAULT); + + *ipu_vaddress = process_group->ipu_virtual_address; + retval = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_ipu_vaddress invalid argument\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_ipu_vaddress failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_ipu_vaddress( + ia_css_process_group_t *process_group, + vied_vaddress_t ipu_vaddress) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_ipu_vaddress(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + process_group->ipu_virtual_address = ipu_vaddress; + retval = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_ipu_vaddress invalid argument\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_ipu_vaddress failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_group_get_protocol_version( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint8_t protocol_version = IA_CSS_PROCESS_GROUP_N_PROTOCOLS; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_protocol_version(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + protocol_version = process_group->protocol_version; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_protocol_version invalid argument\n"); + } + return protocol_version; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_group_get_base_queue_id( + ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint8_t queue_id = IA_CSS_N_PSYS_CMD_QUEUE_ID; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_base_queue_id(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + queue_id = process_group->base_queue_id; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_base_queue_id invalid argument\n"); + } + return queue_id; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_base_queue_id( + ia_css_process_group_t *process_group, + uint8_t queue_id) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_base_queue_id(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + process_group->base_queue_id = queue_id; + retval = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_base_queue_id invalid argument\n"); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_group_get_num_queues( + ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint8_t num_queues = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_num_queues(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + num_queues = process_group->num_queues; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_num_queues invalid argument\n"); + } + return num_queues; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_num_queues( + ia_css_process_group_t *process_group, + uint8_t num_queues) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_num_queues(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + process_group->num_queues = num_queues; + retval = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_num_queues invalid argument\n"); + } + return retval; +} + + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_process_group_has_vp(const ia_css_process_group_t *process_group) +{ + bool has_vp = false; + uint32_t i; + + uint8_t process_count = ia_css_process_group_get_process_count(process_group); + + for (i = 0; i < process_count; i++) { + ia_css_process_t *process; + vied_nci_cell_ID_t cell_id; + + process = ia_css_process_group_get_process(process_group, i); + cell_id = ia_css_process_get_cell(process); + + if (VIED_NCI_VP_TYPE_ID == vied_nci_cell_get_type(cell_id)) { + has_vp = true; + break; + } + } + + return has_vp; +} + +#endif /* __IA_CSS_PSYS_PROCESS_GROUP_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_impl.h new file mode 100644 index 0000000000000..5d0303012700b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_impl.h @@ -0,0 +1,637 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_IMPL_H +#define __IA_CSS_PSYS_PROCESS_IMPL_H + +#include + +#include +#include + +#include +#include +#include + +#include + +#include "ia_css_psys_dynamic_trace.h" +#include "ia_css_psys_process_private_types.h" + +/** Function only to be used in ia_css_psys_process_impl.h and ia_css_psys_process.h */ +STORAGE_CLASS_INLINE vied_nci_cell_ID_t ia_css_process_cells_get_cell(const ia_css_process_t *process, int index) +{ + assert(index < IA_CSS_PROCESS_MAX_CELLS); + if (index >= IA_CSS_PROCESS_MAX_CELLS) { + return VIED_NCI_N_CELL_ID; + } +#if IA_CSS_PROCESS_MAX_CELLS == 1 + return process->cell_id; +#else + return process->cells[index]; +#endif +} + +/** Function only to be used in ia_css_psys_process_impl.h and ia_css_psys_process.h */ +STORAGE_CLASS_INLINE void ia_css_process_cells_set_cell(ia_css_process_t *process, int index, vied_nci_cell_ID_t cell_id) +{ + assert(index < IA_CSS_PROCESS_MAX_CELLS); + if (index >= IA_CSS_PROCESS_MAX_CELLS) { + return; + } +#if IA_CSS_PROCESS_MAX_CELLS == 1 + process->cell_id = cell_id; +#else + process->cells[index] = cell_id; +#endif +} + +/** Function only to be used in ia_css_psys_process_impl.h and ia_css_psys_process */ +STORAGE_CLASS_INLINE void ia_css_process_cells_clear(ia_css_process_t *process) +{ + int i; + for (i = 0; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + ia_css_process_cells_set_cell(process, i, VIED_NCI_N_CELL_ID); + } +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_cell_ID_t ia_css_process_get_cell( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + vied_nci_cell_ID_t cell_id = VIED_NCI_N_CELL_ID; + int i = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_cell(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + +#if IA_CSS_PROCESS_MAX_CELLS > 1 + for (i = 1; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + assert(VIED_NCI_N_CELL_ID == ia_css_process_cells_get_cell(process, i)); +#ifdef __HIVECC +#pragma hivecc unroll +#endif + } +#else + (void)i; +#endif + cell_id = ia_css_process_cells_get_cell(process, 0); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_cell invalid argument\n"); + } + return cell_id; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_mem_ID_t ia_css_process_get_ext_mem_id( + const ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type) +{ + DECLARE_ERRVAL + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_ext_mem(): enter:\n"); + + verifexitval(process != NULL && mem_type < VIED_NCI_N_DATA_MEM_TYPE_ID, EFAULT); + +EXIT: + if (!noerror()) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_ext_mem invalid argument\n"); + return IA_CSS_PROCESS_INVALID_OFFSET; + } + return process->ext_mem_id[mem_type]; +} + + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint32_t ia_css_process_get_program_idx( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_program_idx(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_program_idx invalid argument\n"); + return IA_CSS_PROCESS_INVALID_PROGRAM_IDX; + } + return process->program_idx; +} + + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_size_t ia_css_process_get_dev_chn( + const ia_css_process_t *process, + const vied_nci_dev_chn_ID_t dev_chn_id) +{ + DECLARE_ERRVAL + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_dev_chn(): enter:\n"); + + verifexitval(process != NULL && dev_chn_id < VIED_NCI_N_DEV_CHN_ID, EFAULT); + +EXIT: + if (!noerror()) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_dev_chn(): invalid arguments\n"); + return IA_CSS_PROCESS_INVALID_OFFSET; + } + return process->dev_chn_offset[dev_chn_id]; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_size_t ia_css_process_get_int_mem_offset( + const ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_id) +{ + DECLARE_ERRVAL + vied_nci_resource_size_t int_mem_offset = IA_CSS_PROCESS_INVALID_OFFSET; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_int_mem_offset(): enter:\n"); + + verifexitval(process != NULL && mem_id < VIED_NCI_N_MEM_TYPE_ID, EFAULT); + +EXIT: + if (noerror()) { + int_mem_offset = process->int_mem_offset[mem_id]; + } else { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_int_mem_offset invalid argument\n"); + } + + return int_mem_offset; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_size_t ia_css_process_get_ext_mem_offset( + const ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id) +{ + DECLARE_ERRVAL + vied_nci_resource_size_t ext_mem_offset = IA_CSS_PROCESS_INVALID_OFFSET; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_ext_mem_offset(): enter:\n"); + + verifexitval(process != NULL && mem_type_id < VIED_NCI_N_DATA_MEM_TYPE_ID, EFAULT); + +EXIT: + if (noerror()) { + ext_mem_offset = process->ext_mem_offset[mem_type_id]; + } else { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_ext_mem_offset invalid argument\n"); + } + + return ext_mem_offset; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +size_t ia_css_process_get_size( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_size(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + +EXIT: + if (noerror()) { + size = process->size; + } else { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_size invalid argument\n"); + } + + return size; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_process_state_t ia_css_process_get_state( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + ia_css_process_state_t state = IA_CSS_N_PROCESS_STATES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_state(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + +EXIT: + if (noerror()) { + state = process->state; + } else { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_state invalid argument\n"); + } + + return state; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_set_state( + ia_css_process_t *process, + ia_css_process_state_t state) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_state(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + + process->state = state; + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_state invalid argument\n"); + } + + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_get_cell_dependency_count( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + uint8_t cell_dependency_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_cell_dependency_count(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + cell_dependency_count = process->cell_dependency_count; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_cell_dependency_count invalid argument\n"); + } + return cell_dependency_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_get_terminal_dependency_count( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + uint8_t terminal_dependency_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_terminal_dependency_count(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + terminal_dependency_count = process->terminal_dependency_count; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_terminal_dependency_count invalid argument process\n"); + } + return terminal_dependency_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_process_group_t *ia_css_process_get_parent( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + ia_css_process_group_t *parent = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_parent(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + + parent = + (ia_css_process_group_t *) ((char *)process + process->parent_offset); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_parent invalid argument process\n"); + } + return parent; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_program_ID_t ia_css_process_get_program_ID( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + ia_css_program_ID_t id = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_program_ID(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + + id = process->ID; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_program_ID invalid argument process\n"); + } + return id; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_id_t ia_css_process_get_cell_dependency( + const ia_css_process_t *process, + const unsigned int cell_num) +{ + DECLARE_ERRVAL + vied_nci_resource_id_t cell_dependency = + IA_CSS_PROCESS_INVALID_DEPENDENCY; + vied_nci_resource_id_t *cell_dep_ptr = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_cell_dependency(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + verifexitval(cell_num < process->cell_dependency_count, EFAULT); + + cell_dep_ptr = + (vied_nci_resource_id_t *) + ((char *)process + process->cell_dependencies_offset); + cell_dependency = *(cell_dep_ptr + cell_num); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_cell_dependency invalid argument\n"); + } + return cell_dependency; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_get_terminal_dependency( + const ia_css_process_t *process, + const unsigned int terminal_num) +{ + DECLARE_ERRVAL + uint8_t *ter_dep_ptr = NULL; + uint8_t ter_dep = IA_CSS_PROCESS_INVALID_DEPENDENCY; + + verifexitval(process != NULL, EFAULT); + verifexitval(terminal_num < process->terminal_dependency_count, EFAULT); + + ter_dep_ptr = (uint8_t *) ((char *)process + + process->terminal_dependencies_offset); + + ter_dep = *(ter_dep_ptr + terminal_num); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_terminal_dependency invalid argument\n"); + } + return ter_dep; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_kernel_bitmap_t ia_css_process_get_kernel_bitmap( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + ia_css_kernel_bitmap_t bitmap = ia_css_kernel_bitmap_clear(); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_kernel_bitmap(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + + bitmap = process->kernel_bitmap; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_kernel_bitmap invalid argument process\n"); + } + return bitmap; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_bitmap_t ia_css_process_get_cells_bitmap( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + vied_nci_resource_bitmap_t bitmap = 0; + vied_nci_cell_ID_t cell_id; + int i = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_cell_bitmap(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + + for (i = 0; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + cell_id = ia_css_process_cells_get_cell(process, i); + if (VIED_NCI_N_CELL_ID != cell_id) { + bitmap |= (1 << cell_id); + } +#ifdef __HIVECC +#pragma hivecc unroll +#endif + } + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_cells_bitmap invalid argument process\n"); + } + + return bitmap; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_bitmap_t* ia_css_process_get_dfm_port_bitmap_ptr( + ia_css_process_t *process) +{ + DECLARE_ERRVAL + vied_nci_resource_bitmap_t *p_bitmap = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_dfm_port_bitmap(): enter:\n"); + + verifexitval(process != NULL, EFAULT); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + p_bitmap = &process->dfm_port_bitmap[0]; +#else + p_bitmap = NULL; +#endif +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_dfm_port_bitmap invalid argument process\n"); + } + + return p_bitmap; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_bitmap_t* ia_css_process_get_dfm_active_port_bitmap_ptr( + ia_css_process_t *process) +{ + DECLARE_ERRVAL + vied_nci_resource_bitmap_t *p_bitmap = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_dfm_port_bitmap(): enter:\n"); + + verifexitval(process != NULL, EFAULT); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + p_bitmap = &process->dfm_active_port_bitmap[0]; +#else + p_bitmap = NULL; +#endif +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_dfm_port_bitmap invalid argument process\n"); + } + + return p_bitmap; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_bitmap_t ia_css_process_get_dfm_port_bitmap( + const ia_css_process_t *process, + vied_nci_dev_dfm_id_t dfm_res_id) +{ + DECLARE_ERRVAL + vied_nci_resource_bitmap_t bitmap = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_dfm_port_bitmap(): enter:\n"); + + verifexitval(process != NULL, EFAULT); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexitval(dfm_res_id < VIED_NCI_N_DEV_DFM_ID, EFAULT); + bitmap = process->dfm_port_bitmap[dfm_res_id]; +#else + bitmap = 0; + (void)dfm_res_id; +#endif +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_dfm_port_bitmap invalid argument process\n"); + } + + return bitmap; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_bitmap_t ia_css_process_get_dfm_active_port_bitmap( + const ia_css_process_t *process, + vied_nci_dev_dfm_id_t dfm_res_id) +{ + DECLARE_ERRVAL + vied_nci_resource_bitmap_t bitmap = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_dfm_active_port_bitmap(): enter:\n"); + + verifexitval(process != NULL, EFAULT); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexitval(dfm_res_id < VIED_NCI_N_DEV_DFM_ID, EFAULT); + bitmap = process->dfm_active_port_bitmap[dfm_res_id]; +#else + bitmap = 0; + (void)dfm_res_id; +#endif +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_dfm_active_port_bitmap invalid argument process\n"); + } + return bitmap; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_process_valid( + const ia_css_process_t *process, + const ia_css_program_manifest_t *p_manifest) +{ + DECLARE_ERRVAL + bool invalid_flag = false; + ia_css_program_ID_t prog_id; + ia_css_kernel_bitmap_t prog_kernel_bitmap; + + verifexitval(NULL != process, EFAULT); + verifexitval(NULL != p_manifest, EFAULT); + + prog_id = ia_css_process_get_program_ID(process); + verifjmpexit(prog_id == + ia_css_program_manifest_get_program_ID(p_manifest)); + + prog_kernel_bitmap = + ia_css_program_manifest_get_kernel_bitmap(p_manifest); + + invalid_flag = (process->size <= process->cell_dependencies_offset) || + (process->size <= process->terminal_dependencies_offset) || + !ia_css_is_kernel_bitmap_subset(prog_kernel_bitmap, + process->kernel_bitmap); + + if (ia_css_has_program_manifest_fixed_cell(p_manifest)) { + vied_nci_cell_ID_t cell_id; + + cell_id = ia_css_program_manifest_get_cell_ID(p_manifest); + invalid_flag = invalid_flag || + (cell_id != (vied_nci_cell_ID_t)(ia_css_process_get_cell(process))); + } + invalid_flag = invalid_flag || + ((process->cell_dependency_count + + process->terminal_dependency_count) == 0) || + (process->cell_dependency_count != + ia_css_program_manifest_get_program_dependency_count(p_manifest)) || + (process->terminal_dependency_count != + ia_css_program_manifest_get_terminal_dependency_count(p_manifest)); + + /* TODO: to be removed once all PGs pass validation */ + if (invalid_flag == true) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_is_process_valid(): false\n"); + } + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_process_valid() invalid argument\n"); + return false; + } else { + return (!invalid_flag); + } +} + +#endif /* __IA_CSS_PSYS_PROCESS_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_private_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_private_types.h new file mode 100644 index 0000000000000..ae0affde97187 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_private_types.h @@ -0,0 +1,87 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_PRIVATE_TYPES_H +#define __IA_CSS_PSYS_PROCESS_PRIVATE_TYPES_H + +#include "ia_css_psys_process_types.h" +#include "vied_nci_psys_resource_model.h" + +#define N_UINT32_IN_PROCESS_STRUCT 2 +#define N_UINT16_IN_PROCESS_STRUCT 3 +#define N_UINT8_IN_PROCESS_STRUCT 2 + +#define SIZE_OF_PROCESS_STRUCT_BITS \ + (IA_CSS_KERNEL_BITMAP_BITS \ + + (N_UINT32_IN_PROCESS_STRUCT * 32) \ + + IA_CSS_PROGRAM_ID_BITS \ + + (VIED_NCI_RESOURCE_BITMAP_BITS * VIED_NCI_N_DEV_DFM_ID) \ + + (VIED_NCI_RESOURCE_BITMAP_BITS * VIED_NCI_N_DEV_DFM_ID) \ + + IA_CSS_PROCESS_STATE_BITS \ + + (N_UINT16_IN_PROCESS_STRUCT * 16) \ + + (VIED_NCI_N_MEM_TYPE_ID * VIED_NCI_RESOURCE_SIZE_BITS) \ + + (VIED_NCI_N_DATA_MEM_TYPE_ID * VIED_NCI_RESOURCE_SIZE_BITS) \ + + (VIED_NCI_N_DEV_CHN_ID * VIED_NCI_RESOURCE_SIZE_BITS) \ + + (IA_CSS_PROCESS_MAX_CELLS * VIED_NCI_RESOURCE_ID_BITS) \ + + (VIED_NCI_N_MEM_TYPE_ID * VIED_NCI_RESOURCE_ID_BITS) \ + + (VIED_NCI_N_DATA_MEM_TYPE_ID * VIED_NCI_RESOURCE_ID_BITS) \ + + (N_UINT8_IN_PROCESS_STRUCT * 8) \ + + (N_PADDING_UINT8_IN_PROCESS_STRUCT * 8)) + +struct ia_css_process_s { + /**< Indicate which kernels lead to this process being used */ + ia_css_kernel_bitmap_t kernel_bitmap; + uint32_t size; /**< Size of this structure */ + ia_css_program_ID_t ID; /**< Referal ID to a specific program FW */ + uint32_t program_idx; /**< Program Index into the PG manifest */ +#if (VIED_NCI_N_DEV_DFM_ID > 0) + /**< DFM port allocated to this process */ + vied_nci_resource_bitmap_t dfm_port_bitmap[VIED_NCI_N_DEV_DFM_ID]; + /**< Active DFM ports which need a kick */ + vied_nci_resource_bitmap_t dfm_active_port_bitmap[VIED_NCI_N_DEV_DFM_ID]; +#endif + /**< State of the process FSM dependent on the parent FSM */ + ia_css_process_state_t state; + int16_t parent_offset; /**< Reference to the process group */ + /**< Array[dependency_count] of ID's of the cells that provide input */ + uint16_t cell_dependencies_offset; + /**< Array[terminal_dependency_count] of indices of connected terminals */ + uint16_t terminal_dependencies_offset; + /**< (internal) Memory allocation offset given to this process */ + vied_nci_resource_size_t int_mem_offset[VIED_NCI_N_MEM_TYPE_ID]; + /**< (external) Memory allocation offset given to this process */ + vied_nci_resource_size_t ext_mem_offset[VIED_NCI_N_DATA_MEM_TYPE_ID]; + /**< Device channel allocation offset given to this process */ + vied_nci_resource_size_t dev_chn_offset[VIED_NCI_N_DEV_CHN_ID]; + /**< Cells (VP, ACB) allocated for the process*/ +#if IA_CSS_PROCESS_MAX_CELLS == 1 + vied_nci_resource_id_t cell_id; +#else + vied_nci_resource_id_t cells[IA_CSS_PROCESS_MAX_CELLS]; +#endif /* IA_CSS_PROCESS_MAX_CELLS == 1 */ + /**< (internal) Memory ID; This is redundant, derived from cell_id */ + vied_nci_resource_id_t int_mem_id[VIED_NCI_N_MEM_TYPE_ID]; + /**< (external) Memory ID */ + vied_nci_resource_id_t ext_mem_id[VIED_NCI_N_DATA_MEM_TYPE_ID]; + /**< Number of processes (mapped on cells) this process depends on */ + uint8_t cell_dependency_count; + /**< Number of terminals this process depends on */ + uint8_t terminal_dependency_count; + /**< Padding bytes for 64bit alignment*/ +#if (N_PADDING_UINT8_IN_PROCESS_STRUCT > 0) + uint8_t padding[N_PADDING_UINT8_IN_PROCESS_STRUCT]; +#endif /*(N_PADDING_UINT8_IN_PROCESS_STRUCT > 0)*/ +}; + +#endif /* __IA_CSS_PSYS_PROCESS_PRIVATE_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal.c new file mode 100644 index 0000000000000..ea406f2292739 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal.c @@ -0,0 +1,604 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_dynamic_storage_class.h" +#include "ia_css_psys_terminal_private_types.h" +#include "ia_css_terminal_types.h" + +/* + * Functions to possibly inline + */ + +#ifndef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_terminal_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +STORAGE_CLASS_INLINE void __terminal_dummy_check_alignment(void) +{ + COMPILATION_ERROR_IF( + SIZE_OF_PARAM_TERMINAL_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_param_terminal_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_param_terminal_t) % sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PARAM_SEC_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_param_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_param_section_desc_t) % sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_SPATIAL_PARAM_TERM_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_spatial_param_terminal_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_spatial_param_terminal_t) % sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAME_GRID_PARAM_SEC_STRUCT_BITS != + (CHAR_BIT * sizeof( + ia_css_frame_grid_param_section_desc_t))); + + COMPILATION_ERROR_IF(0 != sizeof( + ia_css_frame_grid_param_section_desc_t) % sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAG_GRID_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_fragment_grid_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_fragment_grid_desc_t) % sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_SLICED_PARAM_TERM_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_sliced_param_terminal_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_sliced_param_terminal_t)%sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAGMENT_SLICE_DESC_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_fragment_slice_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_fragment_slice_desc_t)%sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_SLICE_PARAM_SECTION_DESC_STRUCT_BITS != + (CHAR_BIT * sizeof( + ia_css_slice_param_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_slice_param_section_desc_t)%sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PROG_TERM_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_program_terminal_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_program_terminal_t)%sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAG_SEQ_INFO_STRUCT_BITS != + (CHAR_BIT * sizeof( + ia_css_kernel_fragment_sequencer_info_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_kernel_fragment_sequencer_info_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAG_SEQ_COMMANDS_STRUCT_BITS != + (CHAR_BIT * sizeof( + ia_css_kernel_fragment_sequencer_command_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_kernel_fragment_sequencer_command_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAG_PARAM_SEC_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_fragment_param_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_fragment_param_section_desc_t)%sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PROG_CONTROL_INIT_LOAD_SECTION_DESC_STRUCT_BITS != + (CHAR_BIT * + sizeof(ia_css_program_control_init_load_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_program_control_init_load_section_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PROG_CONTROL_INIT_CONNECT_SECTION_DESC_STRUCT_BITS != + (CHAR_BIT * + sizeof(ia_css_program_control_init_connect_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_program_control_init_connect_section_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PROGRAM_DESC_CONTROL_INFO_STRUCT_BITS != + (CHAR_BIT * + sizeof(struct ia_css_program_desc_control_info_s))); + + COMPILATION_ERROR_IF( + SIZE_OF_PROG_CONTROL_INIT_PROG_DESC_STRUCT_BITS != + (CHAR_BIT * + sizeof(ia_css_program_control_init_program_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_program_control_init_program_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PROG_CONTROL_INIT_TERM_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_program_control_init_terminal_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_program_control_init_terminal_t) % + sizeof(uint64_t)); +} + +/* + * Functions not to inline + */ + +/* + * This source file is created with the intention of sharing and + * compiled for host and firmware. Since there is no native 64bit + * data type support for firmware this wouldn't compile for SP + * tile. The part of the file that is not compilable are marked + * with the following __VIED_CELL marker and this comment. Once we + * come up with a solution to address this issue this will be + * removed. + */ +#if !defined(__VIED_CELL) +size_t ia_css_sizeof_terminal( + const ia_css_terminal_manifest_t *manifest, + const ia_css_program_group_param_t *param) +{ + size_t size = 0; + uint16_t fragment_count = + ia_css_program_group_param_get_fragment_count(param); + + COMPILATION_ERROR_IF( + SIZE_OF_DATA_TERMINAL_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_data_terminal_t))); + + COMPILATION_ERROR_IF( + 0 != sizeof(ia_css_data_terminal_t)%sizeof(uint64_t)); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_sizeof_terminal(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(param != NULL); + + if (ia_css_is_terminal_manifest_parameter_terminal(manifest)) { + const ia_css_param_terminal_manifest_t *param_term_man = + (const ia_css_param_terminal_manifest_t *)manifest; + if (ia_css_terminal_manifest_get_type(manifest) == + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN) { + size = ia_css_param_in_terminal_get_descriptor_size( + param_term_man->param_manifest_section_desc_count); + } else if (ia_css_terminal_manifest_get_type(manifest) == + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT) { + size = ia_css_param_out_terminal_get_descriptor_size( + param_term_man->param_manifest_section_desc_count, + fragment_count); + } else { + assert(NULL == "Invalid parameter terminal type"); + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_sizeof_terminal(): Invalid parameter terminal type:\n"); + verifjmpexit(0); + } + } else if (ia_css_is_terminal_manifest_data_terminal(manifest)) { + size += sizeof(ia_css_data_terminal_t); + size += fragment_count * sizeof(ia_css_fragment_descriptor_t); + } else if (ia_css_is_terminal_manifest_program_terminal(manifest)) { + ia_css_program_terminal_manifest_t *prog_term_man = + (ia_css_program_terminal_manifest_t *)manifest; + + size = ia_css_program_terminal_get_descriptor_size( + fragment_count, + prog_term_man-> + fragment_param_manifest_section_desc_count, + prog_term_man-> + kernel_fragment_sequencer_info_manifest_info_count, + (fragment_count * prog_term_man-> + max_kernel_fragment_sequencer_command_desc)); + } else if (ia_css_is_terminal_manifest_spatial_parameter_terminal( + manifest)) { + ia_css_spatial_param_terminal_manifest_t *spatial_param_term = + (ia_css_spatial_param_terminal_manifest_t *)manifest; + size = ia_css_spatial_param_terminal_get_descriptor_size( + spatial_param_term-> + frame_grid_param_manifest_section_desc_count, + fragment_count); + } else if (ia_css_is_terminal_manifest_program_control_init_terminal( + manifest)) { + ia_css_program_control_init_terminal_manifest_t *progctrlinit_term_man = + (ia_css_program_control_init_terminal_manifest_t *)manifest; + + size = ia_css_program_control_init_terminal_get_descriptor_size( + progctrlinit_term_man); + } +EXIT: + if (NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_sizeof_terminal invalid argument\n"); + } + return size; +} + +ia_css_terminal_t *ia_css_terminal_create( + void *raw_mem, + const ia_css_terminal_manifest_t *manifest, + const ia_css_terminal_param_t *terminal_param, + ia_css_kernel_bitmap_t enable_bitmap) +{ + char *terminal_raw_ptr; + ia_css_terminal_t *terminal = NULL; + uint16_t fragment_count; + int i, j; + int retval = -1; + ia_css_program_group_param_t *param; + + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + "ia_css_terminal_create(manifest %p, terminal_param %p): enter:\n", + manifest, terminal_param); + + param = ia_css_terminal_param_get_parent(terminal_param); + fragment_count = ia_css_program_group_param_get_fragment_count(param); + + verifexit(manifest != NULL); + verifexit(param != NULL); + + terminal_raw_ptr = (char *) raw_mem; + + terminal = (ia_css_terminal_t *) terminal_raw_ptr; + verifexit(terminal != NULL); + + terminal->size = (uint16_t)ia_css_sizeof_terminal(manifest, param); + verifexit(ia_css_terminal_set_type( + terminal, ia_css_terminal_manifest_get_type(manifest)) == 0); + + terminal->ID = ia_css_terminal_manifest_get_ID(manifest); + + verifexit(ia_css_terminal_set_buffer(terminal, + VIED_NULL) == 0); + + if (ia_css_is_terminal_manifest_data_terminal(manifest) == true) { + ia_css_data_terminal_t *dterminal = + (ia_css_data_terminal_t *)terminal; + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame(dterminal); + ia_css_kernel_bitmap_t intersection = + ia_css_kernel_bitmap_intersection(enable_bitmap, + ia_css_data_terminal_manifest_get_kernel_bitmap( + (const ia_css_data_terminal_manifest_t *)manifest)); + + verifexit(frame != NULL); + verifexit(ia_css_frame_set_buffer_state( + frame, IA_CSS_BUFFER_NULL) == 0); + verifexit(ia_css_is_kernel_bitmap_onehot(intersection) == + true); + + terminal_raw_ptr += sizeof(ia_css_data_terminal_t); + dterminal->fragment_descriptor_offset = + (uint16_t) (terminal_raw_ptr - (char *)terminal); + + dterminal->kernel_id = 0; + while (!ia_css_is_kernel_bitmap_empty(intersection)) { + intersection = ia_css_kernel_bitmap_shift( + intersection); + dterminal->kernel_id++; + } + assert(dterminal->kernel_id > 0); + dterminal->kernel_id -= 1; + + /* some terminal and fragment initialization */ + dterminal->frame_descriptor.frame_format_type = + terminal_param->frame_format_type; + for (i = 0; i < IA_CSS_N_DATA_DIMENSION; i++) { + dterminal->frame_descriptor.dimension[i] = + terminal_param->dimensions[i]; + } + dterminal->frame_descriptor.stride[IA_CSS_COL_DIMENSION] = + terminal_param->stride; + dterminal->frame_descriptor.bpp = terminal_param->bpp; + dterminal->frame_descriptor.bpe = terminal_param->bpe; + switch (dterminal->frame_descriptor.frame_format_type) { + case IA_CSS_DATA_FORMAT_UYVY: + case IA_CSS_DATA_FORMAT_YUYV: + case IA_CSS_DATA_FORMAT_Y800: + case IA_CSS_DATA_FORMAT_RGB565: + case IA_CSS_DATA_FORMAT_RGBA888: + case IA_CSS_DATA_FORMAT_BAYER_GRBG: + case IA_CSS_DATA_FORMAT_BAYER_RGGB: + case IA_CSS_DATA_FORMAT_BAYER_BGGR: + case IA_CSS_DATA_FORMAT_BAYER_GBRG: + case IA_CSS_DATA_FORMAT_RAW: + case IA_CSS_DATA_FORMAT_RAW_PACKED: + case IA_CSS_DATA_FORMAT_YYUVYY_VECTORIZED: + case IA_CSS_DATA_FORMAT_PAF: + dterminal->frame_descriptor.plane_count = 1; + dterminal->frame_descriptor.plane_offsets[0] = 0; + break; + case IA_CSS_DATA_FORMAT_NV12: + case IA_CSS_DATA_FORMAT_NV21: + case IA_CSS_DATA_FORMAT_NV16: + case IA_CSS_DATA_FORMAT_NV61: + dterminal->frame_descriptor.plane_count = 2; + dterminal->frame_descriptor.plane_offsets[0] = 0; + dterminal->frame_descriptor.plane_offsets[1] = + dterminal->frame_descriptor.plane_offsets[0] + + dterminal->frame_descriptor.stride[IA_CSS_COL_DIMENSION] * + dterminal->frame_descriptor.dimension[IA_CSS_ROW_DIMENSION]; + break; + case IA_CSS_DATA_FORMAT_YUV444: + case IA_CSS_DATA_FORMAT_RGB888: + case IA_CSS_DATA_FORMAT_YUV420_VECTORIZED: + dterminal->frame_descriptor.plane_count = 3; + dterminal->frame_descriptor.plane_offsets[0] = 0; + dterminal->frame_descriptor.plane_offsets[1] = + dterminal->frame_descriptor.plane_offsets[0] + + dterminal->frame_descriptor.stride[IA_CSS_COL_DIMENSION] * + dterminal->frame_descriptor.dimension[IA_CSS_ROW_DIMENSION]; + dterminal->frame_descriptor.plane_offsets[2] = + dterminal->frame_descriptor.plane_offsets[1] + + dterminal->frame_descriptor.stride[IA_CSS_COL_DIMENSION] * + dterminal->frame_descriptor.dimension[IA_CSS_ROW_DIMENSION]; + break; + case IA_CSS_DATA_FORMAT_YUV420: + dterminal->frame_descriptor.plane_count = 3; + dterminal->frame_descriptor.plane_offsets[0] = 0; + dterminal->frame_descriptor.plane_offsets[1] = + dterminal->frame_descriptor.plane_offsets[0] + + dterminal->frame_descriptor.stride[IA_CSS_COL_DIMENSION] * + dterminal->frame_descriptor.dimension[IA_CSS_ROW_DIMENSION]; + dterminal->frame_descriptor.plane_offsets[2] = + dterminal->frame_descriptor.plane_offsets[1] + + dterminal->frame_descriptor.stride[IA_CSS_COL_DIMENSION]/2 * + dterminal->frame_descriptor.dimension[IA_CSS_ROW_DIMENSION]/2; + break; + default: + /* Unset, resulting in potential terminal connect issues */ + dterminal->frame_descriptor.plane_count = 1; + dterminal->frame_descriptor.plane_offsets[0] = 0; + break; + } + /* + * Initial solution for single fragment initialization + * TODO: + * where to get the fragment description params from??? + */ + if (fragment_count > 0) { + ia_css_fragment_descriptor_t *fragment_descriptor = + (ia_css_fragment_descriptor_t *) + terminal_raw_ptr; + + fragment_descriptor->index[IA_CSS_COL_DIMENSION] = + terminal_param->index[IA_CSS_COL_DIMENSION]; + fragment_descriptor->index[IA_CSS_ROW_DIMENSION] = + terminal_param->index[IA_CSS_ROW_DIMENSION]; + fragment_descriptor->offset[0] = + terminal_param->offset; + for (i = 0; i < IA_CSS_N_DATA_DIMENSION; i++) { + fragment_descriptor->dimension[i] = + terminal_param->fragment_dimensions[i]; + } + } + /* end fragment stuff */ + } else if (ia_css_is_terminal_manifest_parameter_terminal(manifest) == + true) { + ia_css_param_terminal_t *pterminal = + (ia_css_param_terminal_t *)terminal; + uint16_t section_count = + ((const ia_css_param_terminal_manifest_t *)manifest)-> + param_manifest_section_desc_count; + size_t curr_offset = 0; + + pterminal->param_section_desc_offset = + sizeof(ia_css_param_terminal_t); + + for (i = 0; i < section_count; i++) { + ia_css_param_section_desc_t *section = + ia_css_param_in_terminal_get_param_section_desc( + pterminal, i); + const ia_css_param_manifest_section_desc_t * + man_section = + ia_css_param_terminal_manifest_get_prm_sct_desc( + (const ia_css_param_terminal_manifest_t *)manifest, i); + + verifjmpexit(man_section != NULL); + verifjmpexit(section != NULL); + + section->mem_size = man_section->max_mem_size; + section->mem_offset = curr_offset; + curr_offset += man_section->max_mem_size; + } + } else if (ia_css_is_terminal_manifest_program_terminal(manifest) == + true && + ia_css_terminal_manifest_get_type(manifest) == + IA_CSS_TERMINAL_TYPE_PROGRAM) { /* for program terminal */ + ia_css_program_terminal_t *prog_terminal = + (ia_css_program_terminal_t *)terminal; + const ia_css_program_terminal_manifest_t *prog_terminal_man = + (const ia_css_program_terminal_manifest_t *)manifest; + ia_css_kernel_fragment_sequencer_info_desc_t + *sequencer_info_desc_base = NULL; + uint16_t section_count = prog_terminal_man-> + fragment_param_manifest_section_desc_count; + uint16_t manifest_info_count = + prog_terminal_man-> + kernel_fragment_sequencer_info_manifest_info_count; + /* information needs to come from user or manifest once + * the size sizeof function is updated. + */ + uint16_t nof_command_objs = 0; + size_t curr_offset = 0; + + prog_terminal->kernel_fragment_sequencer_info_desc_offset = + sizeof(ia_css_program_terminal_t); + prog_terminal->fragment_param_section_desc_offset = + prog_terminal-> + kernel_fragment_sequencer_info_desc_offset + + (fragment_count * manifest_info_count * + sizeof(ia_css_kernel_fragment_sequencer_info_desc_t)) + + (nof_command_objs * + sizeof( + ia_css_kernel_fragment_sequencer_command_desc_t)); + + NOT_USED(sequencer_info_desc_base); + for (i = 0; i < fragment_count; i++) { + for (j = 0; j < section_count; j++) { + ia_css_fragment_param_section_desc_t *section = + ia_css_program_terminal_get_frgmnt_prm_sct_desc( + prog_terminal, i, j, section_count); + const ia_css_fragment_param_manifest_section_desc_t * + man_section = +ia_css_program_terminal_manifest_get_frgmnt_prm_sct_desc + (prog_terminal_man, j); + + verifjmpexit(man_section != NULL); + verifjmpexit(section != NULL); + + section->mem_size = man_section->max_mem_size; + section->mem_offset = curr_offset; + curr_offset += man_section->max_mem_size; + } + + sequencer_info_desc_base = + ia_css_program_terminal_get_kernel_frgmnt_seq_info_desc( + prog_terminal, i, 0, + manifest_info_count); + + /* + * This offset cannot be initialized properly + * since the number of commands in every sequencer + * is not known at this point + */ + /*for (j = 0; j < manifest_info_count; j++) { + sequencer_info_desc_base[j]. + command_desc_offset = + prog_terminal-> + kernel_fragment_sequencer_info_desc_offset + + (manifest_info_count * + sizeof( + ia_css_kernel_fragment_sequencer_info_desc_t) + + (nof_command_objs * + sizeof( + ia_css_kernel_fragment_sequencer_command_desc_t + )); + }*/ + } + } else if (ia_css_is_terminal_manifest_spatial_parameter_terminal( + manifest) == true) { + ia_css_spatial_param_terminal_t *spatial_param_terminal = + (ia_css_spatial_param_terminal_t *)terminal; + ia_css_spatial_param_terminal_manifest_t * + spatia_param_terminal_man = + (ia_css_spatial_param_terminal_manifest_t *)manifest; + + /* Initialize the spatial terminal structure */ + spatial_param_terminal->fragment_grid_desc_offset = + sizeof(ia_css_spatial_param_terminal_t); + spatial_param_terminal->frame_grid_param_section_desc_offset = + spatial_param_terminal->fragment_grid_desc_offset + + (fragment_count * sizeof(ia_css_fragment_grid_desc_t)); + spatial_param_terminal->kernel_id = + spatia_param_terminal_man->kernel_id; + } else if (ia_css_is_terminal_manifest_sliced_terminal(manifest) == + true) { + ia_css_sliced_param_terminal_t *sliced_param_terminal = + (ia_css_sliced_param_terminal_t *)terminal; + ia_css_sliced_param_terminal_manifest_t + *sliced_param_terminal_man = + (ia_css_sliced_param_terminal_manifest_t *)manifest; + + /* Initialize the sliced terminal structure */ + sliced_param_terminal->fragment_slice_desc_offset = + sizeof(ia_css_sliced_param_terminal_t); + sliced_param_terminal->kernel_id = + sliced_param_terminal_man->kernel_id; + } else if (ia_css_is_terminal_manifest_program_control_init_terminal( + manifest) == true) { + verifjmpexit(ia_css_program_control_init_terminal_init( + (ia_css_program_control_init_terminal_t *) + terminal, + (const ia_css_program_control_init_terminal_manifest_t *) + manifest) == 0); + } else { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_create failed, not a data or param terminal. Returning (%i)\n", + EFAULT); + goto EXIT; + } + + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "ia_css_terminal_create(): Created successfully terminal %p\n", + terminal); + + retval = 0; +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_terminal_create invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_create failed (%i)\n", retval); + terminal = ia_css_terminal_destroy(terminal); + } + return terminal; +} + +ia_css_terminal_t *ia_css_terminal_destroy( + ia_css_terminal_t *terminal) +{ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "ia_css_terminal_destroy(terminal %p): enter:\n", terminal); + return terminal; +} + +uint16_t ia_css_param_terminal_compute_section_count( + const ia_css_terminal_manifest_t *manifest, + const ia_css_program_group_param_t *param) /* Delete 2nd argument*/ +{ + uint16_t section_count = 0; + + NOT_USED(param); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_param_terminal_compute_section_count(): enter:\n"); + + verifexit(manifest != NULL); + section_count = ((const ia_css_param_terminal_manifest_t *)manifest)-> + param_manifest_section_desc_count; +EXIT: + if (NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_param_terminal_compute_section_count: invalid argument\n"); + } + return section_count; +} +#endif /* !defined(__VIED_CELL) */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal_impl.h new file mode 100644 index 0000000000000..36fb0f1d469a1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal_impl.h @@ -0,0 +1,1868 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TERMINAL_IMPL_H +#define __IA_CSS_PSYS_TERMINAL_IMPL_H + +#include + +#include +#include + +#include +#include + +#include + + +#include +#include /* for verifexit, verifjmpexit */ +#include /* for COMPILATION_ERROR_IF */ +#include /* for NOT_USED */ +#include "ia_css_psys_terminal_private_types.h" +#include "ia_css_terminal_manifest_types.h" +#include "ia_css_psys_dynamic_trace.h" +#include "ia_css_psys_manifest_types.h" +#include "ia_css_psys_program_group_private.h" +#include "ia_css_terminal_types.h" + +STORAGE_CLASS_INLINE int ia_css_data_terminal_print(const ia_css_terminal_t *terminal, + void *fid) { + + DECLARE_ERRVAL + int retval = -1; + int i; + ia_css_data_terminal_t *dterminal = (ia_css_data_terminal_t *)terminal; + uint16_t fragment_count = + ia_css_data_terminal_get_fragment_count(dterminal); + verifexitval(fragment_count != 0, EINVAL); + + retval = ia_css_frame_descriptor_print( + ia_css_data_terminal_get_frame_descriptor(dterminal), + fid); + verifexitval(retval == 0, EINVAL); + + retval = ia_css_frame_print( + ia_css_data_terminal_get_frame(dterminal), fid); + verifexitval(retval == 0, EINVAL); + + for (i = 0; i < (int)fragment_count; i++) { + retval = ia_css_fragment_descriptor_print( + ia_css_data_terminal_get_fragment_descriptor( + dterminal, i), fid); + verifexitval(retval == 0, EINVAL); + } + + retval = 0; +EXIT: + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_print failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_print( + const ia_css_terminal_t *terminal, + void *fid) +{ + DECLARE_ERRVAL + int retval = -1; + ia_css_terminal_type_t term_type = ia_css_terminal_get_type(terminal); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_terminal_print(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + IA_CSS_TRACE_4(PSYSAPI_DYNAMIC, INFO, + "\tTerminal %p sizeof %d, typeof %d, parent %p\n", + terminal, + (int)ia_css_terminal_get_size(terminal), + (int)ia_css_terminal_get_type(terminal), + (void *)ia_css_terminal_get_parent(terminal)); + + switch (term_type) { + case IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: + ia_css_program_control_init_terminal_print( + (ia_css_program_control_init_terminal_t *)terminal); + break; + case IA_CSS_TERMINAL_TYPE_DATA_IN: + case IA_CSS_TERMINAL_TYPE_DATA_OUT: + ia_css_data_terminal_print(terminal, fid); + break; + default: + /* other terminal prints are currently not supported */ + break; + } + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_print invalid argument terminal\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_print failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_terminal_input( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + bool is_input = false; + ia_css_terminal_type_t terminal_type; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_terminal_input(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + terminal_type = ia_css_terminal_get_type(terminal); + + switch (terminal_type) { + case IA_CSS_TERMINAL_TYPE_DATA_IN: /* Fall through */ + case IA_CSS_TERMINAL_TYPE_STATE_IN: /* Fall through */ + case IA_CSS_TERMINAL_TYPE_PARAM_STREAM: /* Fall through */ + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN: + case IA_CSS_TERMINAL_TYPE_PROGRAM: + case IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: + is_input = true; + break; + case IA_CSS_TERMINAL_TYPE_DATA_OUT: /* Fall through */ + case IA_CSS_TERMINAL_TYPE_STATE_OUT: + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT: + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: + is_input = false; + break; + default: + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_input: Unknown terminal type (%d)\n", + terminal_type); + goto EXIT; + } + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_input invalid argument\n"); + } + return is_input; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +size_t ia_css_terminal_get_size( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_get_size(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + size = terminal->size; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_get_size invalid argument\n"); + } + return size; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_terminal_type_t ia_css_terminal_get_type( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_terminal_type_t terminal_type = IA_CSS_N_TERMINAL_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_get_type(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + terminal_type = terminal->terminal_type; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_get_type invalid argument\n"); + } + return terminal_type; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_set_type( + ia_css_terminal_t *terminal, + const ia_css_terminal_type_t terminal_type) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_set_type(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + terminal->terminal_type = terminal_type; + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_type invalid argument terminal\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_type failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint16_t ia_css_terminal_get_terminal_manifest_index( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + uint16_t terminal_manifest_index; + + terminal_manifest_index = 0xffff; + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_get_terminal_manifest_index(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + terminal_manifest_index = terminal->tm_index; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_get_terminal_manifest_index: invalid argument\n"); + } + return terminal_manifest_index; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_set_terminal_manifest_index( + ia_css_terminal_t *terminal, + const uint16_t terminal_manifest_index) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_set_terminal_manifest_index(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + terminal->tm_index = terminal_manifest_index; + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_terminal_manifest_index: invalid argument terminal\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_terminal_manifest_index: failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_terminal_ID_t ia_css_terminal_get_ID( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_terminal_ID_t retval = IA_CSS_TERMINAL_INVALID_ID; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_get_ID(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + retval = terminal->ID; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_get_ID invalid argument\n"); + retval = 0; + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_data_terminal_get_kernel_id( + const ia_css_data_terminal_t *dterminal) +{ + DECLARE_ERRVAL + uint8_t retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_get_kernel_id(): enter:\n"); + + verifexitval(dterminal != NULL, EFAULT); + + retval = dterminal->kernel_id; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_get_kernel_id: invalid argument\n"); + retval = 0; + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_connection_type_t ia_css_data_terminal_get_connection_type( + const ia_css_data_terminal_t *dterminal) +{ + DECLARE_ERRVAL + ia_css_connection_type_t connection_type = IA_CSS_N_CONNECTION_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_get_connection_type(): enter:\n"); + + verifexitval(dterminal != NULL, EFAULT); + + connection_type = dterminal->connection_type; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_get_connection_type: invalid argument\n"); + } + return connection_type; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_data_terminal_get_link_id( + const ia_css_data_terminal_t *dterminal) +{ + DECLARE_ERRVAL + uint8_t link_id = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_get_link_id(): enter:\n"); + + verifexitval(dterminal != NULL, EFAULT); + + link_id = dterminal->link_id; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_get_link_id: invalid argument\n"); + } + return link_id; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_data_terminal_set_link_id( + ia_css_data_terminal_t *dterminal, + const uint8_t link_id) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_set_link_id(): enter:\n"); + + verifexitval(dterminal != NULL, EFAULT); + dterminal->link_id = link_id; + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_set_link_id: invalid argument terminal\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_set_link_id: failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_data_terminal_set_connection_type( + ia_css_data_terminal_t *dterminal, + const ia_css_connection_type_t connection_type) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_set_connection_type(): enter:\n"); + + verifexitval(dterminal != NULL, EFAULT); + + dterminal->connection_type = connection_type; + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_set_connection_type: invalid argument dterminal\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_set_connection_type failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_process_group_t *ia_css_terminal_get_parent( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_process_group_t *parent = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_get_parent(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + parent = (ia_css_process_group_t *) ((char *)terminal + + terminal->parent_offset); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_get_parent invalid argument\n"); + } + return parent; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_set_parent( + ia_css_terminal_t *terminal, + ia_css_process_group_t *parent) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_set_parent(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + verifexitval(parent != NULL, EFAULT); + + terminal->parent_offset = (uint16_t) ((char *)parent - + (char *)terminal); + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_parent invalid argument\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_parent failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_frame_t *ia_css_data_terminal_get_frame( + const ia_css_data_terminal_t *dterminal) +{ + DECLARE_ERRVAL + ia_css_frame_t *frame = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_get_frame(): enter:\n"); + + verifexitval(dterminal != NULL, EFAULT); + + frame = (ia_css_frame_t *)(&(dterminal->frame)); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_get_frame invalid argument\n"); + } + return frame; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_frame_descriptor_t *ia_css_data_terminal_get_frame_descriptor( + const ia_css_data_terminal_t *dterminal) +{ + DECLARE_ERRVAL + ia_css_frame_descriptor_t *frame_descriptor = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_get_frame_descriptor(): enter:\n"); + + verifexitval(dterminal != NULL, EFAULT); + + frame_descriptor = + (ia_css_frame_descriptor_t *)(&(dterminal->frame_descriptor)); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_get_frame_descriptor: invalid argument\n"); + } + return frame_descriptor; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_fragment_descriptor_t *ia_css_data_terminal_get_fragment_descriptor( + const ia_css_data_terminal_t *dterminal, + const unsigned int fragment_index) +{ + DECLARE_ERRVAL + ia_css_fragment_descriptor_t *fragment_descriptor = NULL; + uint16_t fragment_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_get_frame_descriptor(): enter:\n"); + + fragment_count = ia_css_data_terminal_get_fragment_count(dterminal); + + verifexitval(dterminal != NULL, EFAULT); + verifexitval(fragment_count != 0, EINVAL); + verifexitval(fragment_index < fragment_count, EINVAL); + + fragment_descriptor = (ia_css_fragment_descriptor_t *) + ((char *)dterminal + dterminal->fragment_descriptor_offset); + + fragment_descriptor += fragment_index; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_get_frame_descriptor: invalid argument\n"); + } + return fragment_descriptor; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint16_t ia_css_data_terminal_get_fragment_count( + const ia_css_data_terminal_t *dterminal) +{ + DECLARE_ERRVAL + ia_css_process_group_t *parent; + uint16_t fragment_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_get_fragment_count(): enter:\n"); + + parent = ia_css_terminal_get_parent((ia_css_terminal_t *)dterminal); + + verifexitval(dterminal != NULL, EFAULT); + verifexitval(parent != NULL, EFAULT); + + fragment_count = ia_css_process_group_get_fragment_count(parent); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_get_fragment_count: invalid argument\n"); + } + return fragment_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_terminal_parameter_terminal( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_terminal_type_t terminal_type = IA_CSS_N_TERMINAL_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_terminal_parameter_terminal(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + /* will return an error value on error */ + terminal_type = ia_css_terminal_get_type(terminal); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_parameter_terminal: invalid argument\n"); + } + return (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT); +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_terminal_data_terminal( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_terminal_type_t terminal_type = IA_CSS_N_TERMINAL_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_terminal_data_terminal(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + /* will return an error value on error */ + terminal_type = ia_css_terminal_get_type(terminal); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_data_terminal invalid argument\n"); + } + return (terminal_type == IA_CSS_TERMINAL_TYPE_DATA_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_DATA_OUT); +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_terminal_program_terminal( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_terminal_type_t terminal_type = IA_CSS_N_TERMINAL_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_terminal_program_terminal(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + /* will return an error value on error */ + terminal_type = ia_css_terminal_get_type(terminal); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_program_terminal: invalid argument\n"); + } + return (terminal_type == IA_CSS_TERMINAL_TYPE_PROGRAM); +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_terminal_program_control_init_terminal( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_terminal_type_t terminal_type = IA_CSS_N_TERMINAL_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_terminal_program_control_init_terminal(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + /* will return an error value on error */ + terminal_type = ia_css_terminal_get_type(terminal); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_program_control_init_terminal: invalid argument\n"); + } + return (terminal_type == IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT); +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_terminal_spatial_parameter_terminal( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_terminal_type_t terminal_type = IA_CSS_N_TERMINAL_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_terminal_spatial_parameter_terminal(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + /* will return an error value on error */ + terminal_type = ia_css_terminal_get_type(terminal); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_spatial_param_terminal: invalid argument\n"); + } + return (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT); +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_data_terminal_compute_plane_count( + const ia_css_terminal_manifest_t *manifest, + const ia_css_program_group_param_t *param) +{ + DECLARE_ERRVAL + uint8_t plane_count = 1; + + NOT_USED(manifest); + NOT_USED(param); + + verifexitval(manifest != NULL, EFAULT); + verifexitval(param != NULL, EFAULT); + /* TODO: Implementation Missing*/ + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_compute_plane_count(): enter:\n"); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_compute_plane_count: invalid argument\n"); + } + return plane_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_vaddress_t ia_css_terminal_get_buffer( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + vied_vaddress_t buffer = VIED_NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_get_buffer(): enter:\n"); + + if (ia_css_is_terminal_data_terminal(terminal)) { + ia_css_frame_t *frame = ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + + verifexitval(frame != NULL, EFAULT); + buffer = ia_css_frame_get_buffer(frame); + } else if (ia_css_is_terminal_parameter_terminal(terminal)) { + const ia_css_param_terminal_t *param_terminal = + (const ia_css_param_terminal_t *)terminal; + + buffer = param_terminal->param_payload.buffer; + } else if (ia_css_is_terminal_program_terminal(terminal)) { + const ia_css_program_terminal_t *program_terminal = + (const ia_css_program_terminal_t *)terminal; + + buffer = program_terminal->param_payload.buffer; + } else if (ia_css_is_terminal_program_control_init_terminal(terminal)) { + const ia_css_program_control_init_terminal_t *program_ctrl_init_terminal = + (const ia_css_program_control_init_terminal_t *)terminal; + + buffer = program_ctrl_init_terminal->param_payload.buffer; + } else if (ia_css_is_terminal_spatial_parameter_terminal(terminal)) { + const ia_css_spatial_param_terminal_t *spatial_terminal = + (const ia_css_spatial_param_terminal_t *)terminal; + + buffer = spatial_terminal->param_payload.buffer; + } +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_get_buffer: invalid argument terminal\n"); + } + return buffer; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_set_buffer( + ia_css_terminal_t *terminal, + vied_vaddress_t buffer) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_set_buffer(): enter:\n"); + + if (ia_css_is_terminal_data_terminal(terminal) == true) { + /* Currently using Frames inside data terminal , + * TODO: start directly using data. + */ + ia_css_data_terminal_t *dterminal = + (ia_css_data_terminal_t *)terminal; + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame(dterminal); + + verifexitval(frame != NULL, EFAULT); + retval = ia_css_frame_set_buffer(frame, buffer); + verifexitval(retval == 0, EINVAL); + } else if (ia_css_is_terminal_parameter_terminal(terminal) == true) { + ia_css_param_terminal_t *pterminal = + (ia_css_param_terminal_t *)terminal; + + pterminal->param_payload.buffer = buffer; + retval = 0; + } else if (ia_css_is_terminal_program_terminal(terminal) == true) { + ia_css_program_terminal_t *pterminal = + (ia_css_program_terminal_t *)terminal; + + pterminal->param_payload.buffer = buffer; + retval = 0; + } else if (ia_css_is_terminal_program_control_init_terminal(terminal) == true) { + ia_css_program_control_init_terminal_t *pterminal = + (ia_css_program_control_init_terminal_t *)terminal; + + pterminal->param_payload.buffer = buffer; + retval = 0; + } else if (ia_css_is_terminal_spatial_parameter_terminal(terminal) == + true) { + ia_css_spatial_param_terminal_t *pterminal = + (ia_css_spatial_param_terminal_t *)terminal; + + pterminal->param_payload.buffer = buffer; + retval = 0; + } else { + return retval; + } + + retval = 0; +EXIT: + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_buffer failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_get_terminal_index( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + int terminal_index = -1; + + verifexitval(terminal != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_get_terminal_index(): enter:\n"); + + if (ia_css_is_terminal_data_terminal(terminal)) { + ia_css_frame_t *frame = ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + + verifexitval(frame != NULL, EFAULT); + terminal_index = ia_css_frame_get_data_index(frame); + } else { + if (ia_css_is_terminal_parameter_terminal(terminal)) { + const ia_css_param_terminal_t *param_terminal = + (const ia_css_param_terminal_t *)terminal; + + terminal_index = param_terminal->param_payload.terminal_index; + } else if (ia_css_is_terminal_program_terminal(terminal)) { + const ia_css_program_terminal_t *program_terminal = + (const ia_css_program_terminal_t *)terminal; + + terminal_index = program_terminal->param_payload.terminal_index; + } else if (ia_css_is_terminal_program_control_init_terminal(terminal)) { + const ia_css_program_control_init_terminal_t *program_ctrl_init_terminal = + (const ia_css_program_control_init_terminal_t *)terminal; + + terminal_index = program_ctrl_init_terminal->param_payload.terminal_index; + } else if (ia_css_is_terminal_spatial_parameter_terminal(terminal)) { + const ia_css_spatial_param_terminal_t *spatial_terminal = + (const ia_css_spatial_param_terminal_t *)terminal; + + terminal_index = spatial_terminal->param_payload.terminal_index; + } else { + verifjmpexit(0); + } + } +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_get_terminal_index: invalid argument\n"); + } + return terminal_index; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_set_terminal_index( + ia_css_terminal_t *terminal, + unsigned int terminal_index) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_set_terminal_index(): enter:\n"); + + if (ia_css_is_terminal_data_terminal(terminal) == true) { + /* Currently using Frames inside data terminal , + * TODO: start directly using data. + */ + ia_css_data_terminal_t *dterminal = + (ia_css_data_terminal_t *)terminal; + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame(dterminal); + + verifexitval(frame != NULL, EFAULT); + retval = ia_css_frame_set_data_index(frame, terminal_index); + verifexitval(retval == 0, EINVAL); + } else { + if (ia_css_is_terminal_parameter_terminal(terminal) == true) { + ia_css_param_terminal_t *pterminal = + (ia_css_param_terminal_t *)terminal; + + pterminal->param_payload.terminal_index = terminal_index; + retval = 0; + } else if (ia_css_is_terminal_program_terminal(terminal) == true) { + ia_css_program_terminal_t *pterminal = + (ia_css_program_terminal_t *)terminal; + + pterminal->param_payload.terminal_index = terminal_index; + retval = 0; + } else if (ia_css_is_terminal_program_control_init_terminal(terminal) + == true) { + ia_css_program_control_init_terminal_t *pterminal = + (ia_css_program_control_init_terminal_t *)terminal; + + pterminal->param_payload.terminal_index = terminal_index; + retval = 0; + } else if (ia_css_is_terminal_spatial_parameter_terminal(terminal) == + true) { + ia_css_spatial_param_terminal_t *pterminal = + (ia_css_spatial_param_terminal_t *)terminal; + + pterminal->param_payload.terminal_index = terminal_index; + retval = 0; + } else { + return retval; + } + } + + retval = 0; +EXIT: + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_terminal_index failed (%i)\n", + retval); + } + return retval; +} + +STORAGE_CLASS_INLINE bool ia_css_is_data_terminal_valid( + const ia_css_terminal_t *terminal, + const ia_css_terminal_manifest_t *terminal_manifest, + const uint16_t nof_fragments) +{ + DECLARE_ERRVAL + bool invalid_flag = false; + + const ia_css_data_terminal_t *dterminal = + (ia_css_data_terminal_t *)terminal; + const ia_css_data_terminal_manifest_t *dt_manifest = + (ia_css_data_terminal_manifest_t *)terminal_manifest; + const ia_css_frame_descriptor_t *frame_descriptor; + ia_css_frame_format_bitmap_t man_frame_format_bitmap; + ia_css_frame_format_bitmap_t proc_frame_format_bitmap; + uint16_t max_value[IA_CSS_N_DATA_DIMENSION]; + uint16_t min_value[IA_CSS_N_DATA_DIMENSION]; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_data_terminal_valid enter\n"); + + frame_descriptor = + ia_css_data_terminal_get_frame_descriptor(dterminal); + verifexitval(frame_descriptor != NULL, EFAULT); + man_frame_format_bitmap = + ia_css_data_terminal_manifest_get_frame_format_bitmap( + dt_manifest); + proc_frame_format_bitmap = + ia_css_frame_format_bit_mask( + frame_descriptor->frame_format_type); + /* + * TODO: Replace by 'validation of frame format type'. + * Currently frame format type is not correctly set by manifest, + * waiting for HSD 1804260604 + */ + if (man_frame_format_bitmap > 0) { + if ((man_frame_format_bitmap & + proc_frame_format_bitmap) == 0) { + uint32_t *bitmap_arr = + (uint32_t *)&man_frame_format_bitmap; + + NOT_USED(bitmap_arr); + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "Frame format type not defined in manifest\n"); + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + " man bitmap_arr[]: %d,%d\n", + bitmap_arr[1], bitmap_arr[0]); + bitmap_arr = (uint32_t *)&proc_frame_format_bitmap; + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + " proc bitmap_arr[]: %d,%d\n", + bitmap_arr[1], bitmap_arr[0]); + } + } else { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "Frame format bitmap not defined in manifest\n"); + } + ia_css_data_terminal_manifest_get_min_size(dt_manifest, min_value); + /* + * TODO: Replace by validation of Minimal frame column dimensions. + * Currently not correctly set by manifest yet, + * waiting for HSD 1804260604 + */ + if ((frame_descriptor->dimension[IA_CSS_COL_DIMENSION] < + min_value[IA_CSS_COL_DIMENSION])) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "Minimal frame column dimensions not set correctly (by manifest)\n"); + } + /* + * TODO: Replace by validation of Minimal frame row dimensions. + * Currently not correctly set by manifest yet, + * waiting for HSD 1804260604 + */ + if (frame_descriptor->dimension[IA_CSS_ROW_DIMENSION] < + min_value[IA_CSS_ROW_DIMENSION]) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "Minimal frame row dimensions not set correctly (by manifest)\n"); + } + + ia_css_data_terminal_manifest_get_max_size(dt_manifest, max_value); + /* + * TODO: Replace by validation of Maximal frame column dimensions. + * Currently not correctly set by manifest yet, + * waiting for HSD 1804260604 + */ + if (frame_descriptor->dimension[IA_CSS_COL_DIMENSION] > + max_value[IA_CSS_COL_DIMENSION]) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "Maximal frame column dimensions not set correctly (by manifest)\n"); + } + /* + * TODO: Replace by validation of Maximal frame row dimensions. + * Currently not correctly set by manifest yet, + * waiting for HSD 1804260604 + */ + if (frame_descriptor->dimension[IA_CSS_ROW_DIMENSION] > + max_value[IA_CSS_ROW_DIMENSION]) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "Maximal frame row dimensions not set correctly (by manifest)\n"); + } + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, VERBOSE, "min_value: [%d,%d]\n", + min_value[IA_CSS_COL_DIMENSION], + min_value[IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, VERBOSE, "max_value: [%d,%d]\n", + max_value[IA_CSS_COL_DIMENSION], + max_value[IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, VERBOSE, "frame dim: [%d,%d]\n", + frame_descriptor->dimension[IA_CSS_COL_DIMENSION], + frame_descriptor->dimension[IA_CSS_ROW_DIMENSION]); + /* + * TODO: Add validation of fragment dimensions. + * Currently not set by manifest yet, waiting for HSD 1804260604 + */ + NOT_USED(nof_fragments); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_data_terminal_valid() invalid argument\n"); + return false; + } else { + return (!invalid_flag); + } +} + +STORAGE_CLASS_INLINE void ia_css_program_terminal_seq_info_print( + const ia_css_kernel_fragment_sequencer_info_manifest_desc_t + *man_seq_info_desc, + const ia_css_kernel_fragment_sequencer_info_desc_t + *term_seq_info_desc) +{ + NOT_USED(man_seq_info_desc); + NOT_USED(term_seq_info_desc); + + /* slice dimension column */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_slice_dimension: %d\n", + term_seq_info_desc-> + fragment_grid_slice_dimension[IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_slice_dimension: %d\n", + man_seq_info_desc-> + max_fragment_grid_slice_dimension[IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_slice_dimension: %d\n", + man_seq_info_desc-> + min_fragment_grid_slice_dimension[IA_CSS_COL_DIMENSION]); + + /* slice dimension row */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_slice_dimension: %d\n", + term_seq_info_desc-> + fragment_grid_slice_dimension[IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_slice_dimension: %d\n", + man_seq_info_desc-> + max_fragment_grid_slice_dimension[IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_slice_dimension: %d\n", + man_seq_info_desc-> + min_fragment_grid_slice_dimension[IA_CSS_ROW_DIMENSION]); + + /* slice count column */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_slice_count: %d\n", + term_seq_info_desc-> + fragment_grid_slice_count[IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_slice_count: %d\n", + man_seq_info_desc-> + max_fragment_grid_slice_count[IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_slice_count: %d\n", + man_seq_info_desc-> + min_fragment_grid_slice_count[IA_CSS_COL_DIMENSION]); + + /* slice count row */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_slice_count: %d\n", + term_seq_info_desc-> + fragment_grid_slice_count[IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_slice_count: %d\n", + man_seq_info_desc-> + max_fragment_grid_slice_count[IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_slice_count: %d\n", + man_seq_info_desc-> + min_fragment_grid_slice_count[IA_CSS_ROW_DIMENSION]); + + /* decimation factor column */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_point_decimation_factor: %d\n", + term_seq_info_desc-> + fragment_grid_point_decimation_factor[IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_point_decimation_factor: %d\n", + man_seq_info_desc-> + max_fragment_grid_point_decimation_factor[IA_CSS_COL_DIMENSION] + ); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_point_decimation_factor: %d\n", + man_seq_info_desc-> + min_fragment_grid_point_decimation_factor[IA_CSS_COL_DIMENSION] + ); + + /* decimation factor row */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_point_decimation_factor: %d\n", + term_seq_info_desc-> + fragment_grid_point_decimation_factor[IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_point_decimation_factor: %d\n", + man_seq_info_desc-> + max_fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_point_decimation_factor: %d\n", + man_seq_info_desc-> + min_fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION]); + + /* index column */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_overlay_pixel_topleft_index: %d\n", + term_seq_info_desc-> + fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_overlay_pixel_topleft_index: %d\n", + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_overlay_pixel_topleft_index: %d\n", + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION]); + + /* index row */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_overlay_pixel_topleft_index: %d\n", + term_seq_info_desc-> + fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_overlay_pixel_topleft_index: %d\n", + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_overlay_pixel_topleft_index: %d\n", + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION]); + + /* dimension column */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_overlay_pixel_dimension: %d\n", + term_seq_info_desc-> + fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_overlay_pixel_dimension: %d\n", + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_overlay_pixel_dimension: %d\n", + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION]); + + /* dimension column */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_overlay_pixel_dimension: %d\n", + term_seq_info_desc-> + fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_overlay_pixel_dimension: %d\n", + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_overlay_pixel_dimension: %d\n", + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION]); +} + +STORAGE_CLASS_INLINE bool ia_css_is_program_terminal_valid( + const ia_css_terminal_t *terminal, + const ia_css_terminal_manifest_t *terminal_manifest, + const uint16_t nof_fragments) +{ + DECLARE_ERRVAL + bool invalid_flag = false; + uint16_t frag_idx; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_program_terminal_valid enter\n"); + + for (frag_idx = 0; frag_idx < nof_fragments; frag_idx++) { + uint16_t frag_seq_info_count, seq_idx; + const ia_css_program_terminal_t *prog_term; + const ia_css_program_terminal_manifest_t *prog_term_man; + + prog_term = (const ia_css_program_terminal_t *)terminal; + prog_term_man = + (const ia_css_program_terminal_manifest_t *) + terminal_manifest; + frag_seq_info_count = + prog_term_man-> + kernel_fragment_sequencer_info_manifest_info_count; + + for (seq_idx = 0; seq_idx < frag_seq_info_count; seq_idx++) { + const ia_css_kernel_fragment_sequencer_info_desc_t + *term_seq_info_desc; + const + ia_css_kernel_fragment_sequencer_info_manifest_desc_t * + man_seq_info_desc; + + term_seq_info_desc = + ia_css_program_terminal_get_kernel_frgmnt_seq_info_desc( + prog_term, frag_idx, seq_idx, + frag_seq_info_count); + verifexitval(term_seq_info_desc != NULL, EFAULT); + man_seq_info_desc = + ia_css_program_terminal_manifest_get_kernel_frgmnt_seq_info_desc + (prog_term_man, seq_idx); + verifexitval(man_seq_info_desc != NULL, EFAULT); + + ia_css_program_terminal_seq_info_print( + man_seq_info_desc, term_seq_info_desc); + /* slice dimension column */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_dimension[ + IA_CSS_COL_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_slice_dimension[ + IA_CSS_COL_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_dimension[ + IA_CSS_COL_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_slice_dimension[ + IA_CSS_COL_DIMENSION]); + + /* slice dimension row */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_dimension[ + IA_CSS_ROW_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_slice_dimension[ + IA_CSS_ROW_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_dimension[ + IA_CSS_ROW_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_slice_dimension[ + IA_CSS_ROW_DIMENSION]); + + /* slice count column */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_count[ + IA_CSS_COL_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_slice_count[ + IA_CSS_COL_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_count[ + IA_CSS_COL_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_slice_count[ + IA_CSS_COL_DIMENSION]); + + /* slice count row */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_count[ + IA_CSS_ROW_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_slice_count[ + IA_CSS_ROW_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_count[ + IA_CSS_ROW_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_slice_count[ + IA_CSS_ROW_DIMENSION]); + + /* decimation factor column */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_point_decimation_factor[ + IA_CSS_COL_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_point_decimation_factor[ + IA_CSS_COL_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_point_decimation_factor[ + IA_CSS_COL_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_point_decimation_factor[ + IA_CSS_COL_DIMENSION]); + + /* decimation factor row */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION]); + + /* index column */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION]); + + /* index row */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION]); + + /* dimension column */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION]); + + /* dimension column */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION]); + } + } + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_program_terminal_valid() invalid argument\n"); + return false; + } + if (invalid_flag == true) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_is_program_terminal_valid(): validation failed\n"); + /* TODO: program terminal parameters not correctly defined, + * disable validation result until issues has been solved + */ + return true; + } + return (!invalid_flag); +} + +STORAGE_CLASS_INLINE bool ia_css_is_sliced_terminal_valid( + const ia_css_terminal_t *terminal, + const ia_css_terminal_manifest_t *terminal_manifest, + const uint16_t nof_fragments) +{ + DECLARE_ERRVAL + bool invalid_flag = false; + uint16_t frag_idx; + + uint16_t slice_idx, section_idx; + + const ia_css_sliced_param_terminal_t *sliced_term = + (const ia_css_sliced_param_terminal_t *)terminal; + const ia_css_sliced_param_terminal_manifest_t *sliced_term_man = + (const ia_css_sliced_param_terminal_manifest_t *) + terminal_manifest; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_sliced_terminal_valid enter\n"); + + for (frag_idx = 0; frag_idx < nof_fragments; frag_idx++) { + const ia_css_fragment_slice_desc_t *fragment_slice_desc = + ia_css_sliced_param_terminal_get_fragment_slice_desc( + sliced_term, frag_idx); + + verifexitval(fragment_slice_desc != NULL, EFAULT); + + for (slice_idx = 0; + slice_idx < fragment_slice_desc->slice_count; + slice_idx++) { + for (section_idx = 0; + section_idx < + sliced_term_man->sliced_param_section_count; + section_idx++) { + const + ia_css_sliced_param_manifest_section_desc_t * + slice_man_section_desc; + const ia_css_slice_param_section_desc_t * + slice_section_desc; + + slice_man_section_desc = + ia_css_sliced_param_terminal_manifest_get_sliced_prm_sct_desc( + sliced_term_man, section_idx); + slice_section_desc = + ia_css_sliced_param_terminal_get_slice_param_section_desc( + sliced_term, frag_idx, + slice_idx, section_idx, + sliced_term_man-> + sliced_param_section_count); + verifexitval(slice_man_section_desc != NULL, EFAULT); + verifexitval(slice_section_desc != NULL, EFAULT); + + invalid_flag = invalid_flag || + (slice_section_desc->mem_size > + slice_man_section_desc->max_mem_size); + } + } + } + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_sliced_terminal_valid() invalid argument\n"); + return false; + } else { + return (!invalid_flag); + } + +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_terminal_valid( + const ia_css_terminal_t *terminal, + const ia_css_terminal_manifest_t *terminal_manifest) +{ + DECLARE_ERRVAL + bool is_valid = false; + uint16_t nof_fragments; + ia_css_terminal_type_t terminal_type = IA_CSS_TERMINAL_INVALID_ID; + + verifexitval(NULL != terminal, EFAULT); + verifexitval(NULL != terminal_manifest, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_terminal_valid enter\n"); + + nof_fragments = ia_css_data_terminal_get_fragment_count( + (const ia_css_data_terminal_t *)terminal); + terminal_type = ia_css_terminal_get_type(terminal); + + switch (terminal_type) { + case IA_CSS_TERMINAL_TYPE_DATA_IN: + case IA_CSS_TERMINAL_TYPE_DATA_OUT: + is_valid = ia_css_is_data_terminal_valid(terminal, + terminal_manifest, nof_fragments); + break; + case IA_CSS_TERMINAL_TYPE_PROGRAM: + is_valid = ia_css_is_program_terminal_valid(terminal, + terminal_manifest, nof_fragments); + break; + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: + case IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: + /* Nothing to be validated for cached and spatial + * parameters, return valid + */ + is_valid = true; + break; + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT: + is_valid = ia_css_is_sliced_terminal_valid(terminal, + terminal_manifest, nof_fragments); + break; + default: + /* Terminal type unknown, return invalid */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, WARNING, + "ia_css_is_terminal_valid() Terminal type %x unknown\n", + (int)terminal_type); + is_valid = false; + break; + } + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_valid() invalid argument\n"); + return false; + } + /* TODO: to be removed once all PGs pass validation */ + if (is_valid == false) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "ia_css_is_terminal_valid(): type: %d validation failed\n", + terminal_type); + } + return is_valid; +} + +/* ================= Program Control Init Terminal - START ================= */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int +ia_css_program_control_init_terminal_init( + ia_css_program_control_init_terminal_t *terminal, + const ia_css_program_control_init_terminal_manifest_t *manifest) +{ + int retval = -1; + unsigned int i; + unsigned int base_load_sec; + unsigned int base_connect_sec; + unsigned int load_index = 0; + unsigned int connect_index = 0; + unsigned int load_section_count = 0; + unsigned int connect_section_count = 0; + + ia_css_program_control_init_manifest_program_desc_t *man_progs; + + verifjmpexit(terminal != NULL); + + man_progs = + ia_css_program_control_init_terminal_manifest_get_program_desc(manifest, 0); + verifjmpexit(man_progs != NULL); + + for (i = 0; i < manifest->program_count; i++) { + load_section_count += man_progs[i].load_section_count; + connect_section_count += man_progs[i].connect_section_count; + } + + terminal->program_count = manifest->program_count; + terminal->program_section_desc_offset = + sizeof(ia_css_program_control_init_terminal_t); + + base_load_sec = /* base_load_sec relative to first program */ + terminal->program_count * + sizeof(ia_css_program_control_init_program_desc_t); + + base_connect_sec = base_load_sec + + load_section_count * + sizeof(ia_css_program_control_init_load_section_desc_t); + + for (i = 0; i < terminal->program_count; i++) { + ia_css_program_control_init_program_desc_t *prog; + + prog = ia_css_program_control_init_terminal_get_program_desc( + terminal, i); + verifjmpexit(prog != NULL); + + prog->load_section_count = man_progs[i].load_section_count; + prog->connect_section_count = man_progs[i].connect_section_count; + + prog->load_section_desc_offset = + base_load_sec + + load_index * + sizeof(ia_css_program_control_init_load_section_desc_t) - + i * sizeof(ia_css_program_control_init_program_desc_t); + prog->connect_section_desc_offset = + base_connect_sec + + connect_index * + sizeof(ia_css_program_control_init_connect_section_desc_t) - + i * sizeof(ia_css_program_control_init_program_desc_t); + + load_index += man_progs[i].load_section_count; + connect_index += man_progs[i].connect_section_count; + } + retval = 0; +EXIT: + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +unsigned int +ia_css_program_control_init_terminal_get_descriptor_size( + const ia_css_program_control_init_terminal_manifest_t *manifest) +{ + unsigned int i; + unsigned size = 0; + unsigned load_section_count = 0; + unsigned connect_section_count = 0; + ia_css_program_control_init_manifest_program_desc_t *man_progs; + verifjmpexit(manifest != NULL); + + man_progs = + ia_css_program_control_init_terminal_manifest_get_program_desc( + manifest, 0); + verifjmpexit(man_progs != NULL); + + for (i = 0; i < manifest->program_count; i++) { + load_section_count += man_progs[i].load_section_count; + connect_section_count += man_progs[i].connect_section_count; + } + + size = sizeof(ia_css_program_control_init_terminal_t) + + manifest->program_count * + sizeof(struct ia_css_program_control_init_program_desc_s) + + load_section_count * + sizeof(struct ia_css_program_control_init_load_section_desc_s) + + connect_section_count * + sizeof(struct ia_css_program_control_init_connect_section_desc_s); +EXIT: + return size; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +void ia_css_program_control_init_terminal_print( + const ia_css_program_control_init_terminal_t *terminal) +{ + unsigned int prog_idx, sec_idx; + ia_css_program_control_init_program_desc_t *prog; + ia_css_program_control_init_load_section_desc_t *load_sec; + ia_css_program_control_init_connect_section_desc_t *connect_sec; + + verifjmpexit(terminal != NULL); + + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + "program_count: %d, payload_fragment_stride: %d\n", + terminal->program_count, + terminal->payload_fragment_stride); + + for (prog_idx = 0; prog_idx < terminal->program_count; prog_idx++) { + prog = ia_css_program_control_init_terminal_get_program_desc( + terminal, prog_idx); + verifjmpexit(prog != NULL); + + for (sec_idx = 0; sec_idx < prog->load_section_count; sec_idx++) { + load_sec = + ia_css_program_control_init_terminal_get_load_section_desc( + prog, sec_idx); + verifjmpexit(load_sec != NULL); + IA_CSS_TRACE_4(PSYSAPI_DYNAMIC, INFO, + "load_section>> device_descriptor_id: 0x%x, mem_offset: %d, " + "mem_size: %d, mode_bitmask: %x\n", + load_sec->device_descriptor_id.data, + load_sec->mem_offset, + load_sec->mem_size, + load_sec->mode_bitmask); + } + for (sec_idx = 0; sec_idx < prog->connect_section_count; sec_idx++) { + connect_sec = + ia_css_program_control_init_terminal_get_connect_section_desc( + prog, sec_idx); + verifjmpexit(connect_sec != NULL); + IA_CSS_TRACE_4(PSYSAPI_DYNAMIC, INFO, + "connect_section>> device_descriptor_id: 0x%x, " + "connect_terminal_ID: %d, connect_section_idx: %d, " + "mode_bitmask: %x\n", + connect_sec->device_descriptor_id.data, + connect_sec->connect_terminal_ID, + connect_sec->connect_section_idx, + connect_sec->mode_bitmask); + } + } +EXIT: + return; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_program_control_init_program_desc_t * +ia_css_program_control_init_terminal_get_program_desc( + const ia_css_program_control_init_terminal_t *prog_ctrl_init_terminal, + const unsigned int program_index) +{ + ia_css_program_control_init_program_desc_t *program_desc_base; + ia_css_program_control_init_program_desc_t *program_desc = NULL; + + verifjmpexit(prog_ctrl_init_terminal != NULL); + verifjmpexit(program_index < prog_ctrl_init_terminal->program_count); + + program_desc_base = (ia_css_program_control_init_program_desc_t *) + (((const char *)prog_ctrl_init_terminal) + + prog_ctrl_init_terminal->program_section_desc_offset); + program_desc = &(program_desc_base[program_index]); + +EXIT: + return program_desc; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_process_id_t ia_css_program_control_init_terminal_get_process_id( + const ia_css_program_control_init_program_desc_t *program_desc) +{ + ia_css_process_id_t process_id = 0; + + verifjmpexit(program_desc != NULL); + + process_id = program_desc->control_info.process_id; + +EXIT: + return process_id; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_program_control_init_terminal_get_num_done_events( + const ia_css_program_control_init_program_desc_t *program_desc) +{ + uint8_t num_done_events = 0; + + verifjmpexit(program_desc != NULL); + + num_done_events = program_desc->control_info.num_done_events; + +EXIT: + return num_done_events; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +void ia_css_program_control_init_terminal_set_control_info( + ia_css_program_control_init_program_desc_t *program_desc, + ia_css_process_id_t process_id, + uint8_t num_done_events) +{ + verifjmpexit(program_desc != NULL); + + program_desc->control_info.process_id = process_id; + program_desc->control_info.num_done_events = num_done_events; + +EXIT: + return; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_program_control_init_load_section_desc_t * +ia_css_program_control_init_terminal_get_load_section_desc( + const ia_css_program_control_init_program_desc_t *program_desc, + const unsigned int load_section_index) +{ + ia_css_program_control_init_load_section_desc_t *load_section_desc_base; + ia_css_program_control_init_load_section_desc_t *load_section_desc = NULL; + + verifjmpexit(program_desc != NULL); + verifjmpexit(load_section_index < program_desc->load_section_count); + + load_section_desc_base = (ia_css_program_control_init_load_section_desc_t *) + (((const char *)program_desc) + + program_desc->load_section_desc_offset); + load_section_desc = &(load_section_desc_base[load_section_index]); + +EXIT: + return load_section_desc; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_program_control_init_connect_section_desc_t * +ia_css_program_control_init_terminal_get_connect_section_desc( + const ia_css_program_control_init_program_desc_t *program_desc, + const unsigned int connect_section_index) +{ + ia_css_program_control_init_connect_section_desc_t *connect_sec_desc_base; + ia_css_program_control_init_connect_section_desc_t *connect_sec_desc = NULL; + + verifjmpexit(program_desc != NULL); + verifjmpexit(connect_section_index < program_desc->connect_section_count); + + connect_sec_desc_base = + (ia_css_program_control_init_connect_section_desc_t *) + (((const char *)program_desc) + + program_desc->connect_section_desc_offset); + connect_sec_desc = &(connect_sec_desc_base[connect_section_index]); + +EXIT: + return connect_sec_desc; +} + +/* ================= Program Control Init Terminal - END ================= */ + +#endif /* __IA_CSS_PSYS_TERMINAL_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal_private_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal_private_types.h new file mode 100644 index 0000000000000..68626561acb5d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal_private_types.h @@ -0,0 +1,186 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TERMINAL_PRIVATE_TYPES_H +#define __IA_CSS_PSYS_TERMINAL_PRIVATE_TYPES_H + +#include "ia_css_terminal_types.h" +#include "ia_css_program_group_data.h" +#include "ia_css_psys_manifest_types.h" + +#define N_UINT16_IN_DATA_TERMINAL_STRUCT 1 +#define N_UINT8_IN_DATA_TERMINAL_STRUCT 3 +#define N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT 3 + +/* ========================= Data terminal - START ========================= */ + +#define SIZE_OF_DATA_TERMINAL_STRUCT_BITS \ + (SIZE_OF_TERMINAL_STRUCT_BITS \ + + IA_CSS_FRAME_DESCRIPTOR_STRUCT_BITS \ + + IA_CSS_FRAME_STRUCT_BITS \ + + IA_CSS_STREAM_STRUCT_BITS \ + + IA_CSS_UINT32_T_BITS \ + + IA_CSS_CONNECTION_TYPE_BITS \ + + (N_UINT16_IN_DATA_TERMINAL_STRUCT * 16) \ + + (N_UINT8_IN_DATA_TERMINAL_STRUCT * 8) \ + + (N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT * 8)) + +/* + * The (data) terminal can be attached to a buffer or a stream. + * The stream interface is not necessarily limited to strict in-order access. + * For a stream the restriction is that contrary to a buffer it cannot be + * addressed directly, i.e. it behaves as a port, + * but it may support stream_pos() and/or seek() operations + */ +struct ia_css_data_terminal_s { + /**< Data terminal base */ + ia_css_terminal_t base; + /**< Properties of the data attached to the terminal */ + ia_css_frame_descriptor_t frame_descriptor; + /**< Data buffer handle attached to the terminal */ + ia_css_frame_t frame; + /**< (exclusive) Data stream handle attached to the terminal + * if the data is sourced over a device port + */ + ia_css_stream_t stream; + /**< Reserved */ + uint32_t reserved; + /**< Connection {buffer, stream, ...} */ + ia_css_connection_type_t connection_type; + /**< Array[fragment_count] (fragment_count being equal for all + * terminals in a subgraph) of fragment descriptors + */ + uint16_t fragment_descriptor_offset; + /**< Kernel id where this terminal is connected to */ + uint8_t kernel_id; + /**< Indicate to which subgraph this terminal belongs + * for common constraints + */ + uint8_t subgraph_id; + /* Link ID of the data terminal */ + uint8_t link_id; + /**< Padding for 64bit alignment */ + uint8_t padding[N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT]; +}; +/* ========================== Data terminal - END ========================== */ + +/* ================= Program Control Init Terminal - START ================= */ +#define SIZE_OF_PROG_CONTROL_INIT_LOAD_SECTION_DESC_STRUCT_BITS \ + (DEVICE_DESCRIPTOR_ID_BITS \ + + (3 * IA_CSS_UINT32_T_BITS) \ + ) +struct ia_css_program_control_init_load_section_desc_s { + /* Offset of the parameter allocation in memory */ + uint32_t mem_offset; + /* Memory allocation size needs of this parameter */ + uint32_t mem_size; + /* Device descriptor */ + device_descriptor_id_t device_descriptor_id; /* 32 bits */ + /* (Applicable to) mode bitmask */ + uint32_t mode_bitmask; +}; + +#define MODE_BITMASK_MEMORY (1u << IA_CSS_CONNECTION_MEMORY) +#define MODE_BITMASK_MEMORY_STREAM (1u << IA_CSS_CONNECTION_MEMORY_STREAM) +#define MODE_BITMASK_STREAM (1u << IA_CSS_CONNECTION_STREAM) +#define MODE_BITMASK_DONT_CARE (MODE_BITMASK_MEMORY | MODE_BITMASK_MEMORY_STREAM | MODE_BITMASK_STREAM) + +#define N_PADDING_UINT8_IN_PROG_CTRL_INIT_CONNECT_SECT_STRUCT (5) +#define SIZE_OF_PROG_CONTROL_INIT_CONNECT_SECTION_DESC_STRUCT_BITS \ + (DEVICE_DESCRIPTOR_ID_BITS \ + + (1 * IA_CSS_UINT32_T_BITS) \ + + (1 * IA_CSS_UINT16_T_BITS) \ + + IA_CSS_TERMINAL_ID_BITS \ + + (N_PADDING_UINT8_IN_PROG_CTRL_INIT_CONNECT_SECT_STRUCT * \ + IA_CSS_UINT8_T_BITS) \ + ) +struct ia_css_program_control_init_connect_section_desc_s { + /* Device descriptor */ + device_descriptor_id_t device_descriptor_id; /* 32 bits */ + /* (Applicable to) mode bitmask */ + uint32_t mode_bitmask; + /* Connected terminal section (plane) index */ + uint16_t connect_section_idx; + /* Absolute referral ID for the connected terminal */ + ia_css_terminal_ID_t connect_terminal_ID; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_PROG_CTRL_INIT_CONNECT_SECT_STRUCT]; +}; + +#define N_PADDING_UINT8_IN_PROG_DESC_CONTROL_INFO (1) +#define N_PADDING_UINT8_IN_PROG_CTRL_INIT_PROGRAM_DESC_STRUCT (4) +#define SIZE_OF_PROGRAM_DESC_CONTROL_INFO_STRUCT_BITS \ + (1 * IA_CSS_UINT16_T_BITS) \ + + (1 * IA_CSS_UINT8_T_BITS) \ + + (N_PADDING_UINT8_IN_PROG_DESC_CONTROL_INFO * IA_CSS_UINT8_T_BITS) + +#define SIZE_OF_PROG_CONTROL_INIT_PROG_DESC_STRUCT_BITS \ + (4 * IA_CSS_UINT16_T_BITS) \ + + (SIZE_OF_PROGRAM_DESC_CONTROL_INFO_STRUCT_BITS) \ + + (N_PADDING_UINT8_IN_PROG_CTRL_INIT_PROGRAM_DESC_STRUCT * \ + IA_CSS_UINT8_T_BITS) + +struct ia_css_program_desc_control_info_s { + /* 12-bit process identifier */ + ia_css_process_id_t process_id; + /* number of done acks required to close the process */ + uint8_t num_done_events; + uint8_t padding[N_PADDING_UINT8_IN_PROG_DESC_CONTROL_INFO]; +}; + +struct ia_css_program_control_init_program_desc_s { + /* Number of load sections in this program */ + uint16_t load_section_count; + /* Points to variable size array of + * ia_css_program_control_init_load_section_desc_s + * in relation to its program_desc + */ + uint16_t load_section_desc_offset; + /* Number of connect sections in this program */ + uint16_t connect_section_count; + /* Points to variable size array of + * ia_css_program_control_init_connect_section_desc_s + * in relation to its program_desc + */ + uint16_t connect_section_desc_offset; + struct ia_css_program_desc_control_info_s control_info; + /* align to 64 bits */ + uint8_t padding[N_PADDING_UINT8_IN_PROG_CTRL_INIT_PROGRAM_DESC_STRUCT]; +}; + +#define SIZE_OF_PROG_CONTROL_INIT_TERM_STRUCT_BITS \ + (SIZE_OF_TERMINAL_STRUCT_BITS \ + + IA_CSS_PARAM_PAYLOAD_STRUCT_BITS \ + + (1 * IA_CSS_UINT32_T_BITS) \ + + (2 * IA_CSS_UINT16_T_BITS) \ + ) +struct ia_css_program_control_init_terminal_s { + /* Parameter terminal base */ + ia_css_terminal_t base; + /* Parameter buffer handle attached to the terminal */ + ia_css_param_payload_t param_payload; + /* Fragment stride for the payload, used to find the base + * of the payload for a given fragment + */ + uint32_t payload_fragment_stride; + /* Points to the variable array of + * ia_css_program_control_init_program_desc_s + */ + uint16_t program_section_desc_offset; + /* Number of instantiated programs in program group (processes) */ + uint16_t program_count; +}; +/* ================= Program Control Init Terminal - END ================= */ + +#endif /* __IA_CSS_PSYS_TERMINAL_PRIVATE_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi.h new file mode 100644 index 0000000000000..4c8fd33b331ca --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi.h @@ -0,0 +1,23 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYSAPI_H +#define __IA_CSS_PSYSAPI_H + +#include +#include +#include +#include + +#endif /* __IA_CSS_PSYSAPI_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi_fw_version.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi_fw_version.h new file mode 100644 index 0000000000000..5658a2988a08d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi_fw_version.h @@ -0,0 +1,33 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IA_CSS_PSYSAPI_FW_VERSION_H +#define __IA_CSS_PSYSAPI_FW_VERSION_H + +/* PSYSAPI FW VERSION is taken from Makefile for FW tests */ +#define BXT_FW_RELEASE_VERSION PSYS_FIRMWARE_VERSION + +enum ia_css_process_group_protocol_version { + /* + * Legacy protocol + */ + IA_CSS_PROCESS_GROUP_PROTOCOL_LEGACY = 0, + /* + * Persistent process group support protocol + */ + IA_CSS_PROCESS_GROUP_PROTOCOL_PPG, + IA_CSS_PROCESS_GROUP_N_PROTOCOLS +}; + +#endif /* __IA_CSS_PSYSAPI_FW_VERSION_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi_trace.h new file mode 100644 index 0000000000000..e35ec24c77b36 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi_trace.h @@ -0,0 +1,78 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYSAPI_TRACE_H +#define __IA_CSS_PSYSAPI_TRACE_H + +#include "ia_css_trace.h" + +#define PSYSAPI_TRACE_LOG_LEVEL_OFF 0 +#define PSYSAPI_TRACE_LOG_LEVEL_NORMAL 1 +#define PSYSAPI_TRACE_LOG_LEVEL_DEBUG 2 + +/* PSYSAPI and all the submodules in PSYSAPI will have the default tracing + * level set to the PSYSAPI_TRACE_CONFIG level. If not defined in the + * psysapi.mk fill it will be set by default to no trace + * (PSYSAPI_TRACE_LOG_LEVEL_OFF) + */ +#define PSYSAPI_TRACE_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +#if !defined(PSYSAPI_TRACE_CONFIG) + #define PSYSAPI_TRACE_CONFIG PSYSAPI_TRACE_CONFIG_DEFAULT +#endif + +/* Module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_TRACE_CONFIG)) + /* Module specific trace setting */ + #if PSYSAPI_TRACE_CONFIG == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_TRACE_CONFIG == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_TRACE_CONFIG == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_TRACE_CONFIG Tracing level defined" + #endif +#else + #error "PSYSAPI_TRACE_CONFIG not defined" +#endif + +/* Overriding submodules in PSYSAPI with a specific tracing level */ +/* #define PSYSAPI_DYNAMIC_TRACING_OVERRIDE TRACE_LOG_LEVEL_VERBOSE */ + +#endif /* __IA_CSS_PSYSAPI_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/kernel/interface/ia_css_kernel_bitmap.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/kernel/interface/ia_css_kernel_bitmap.h new file mode 100644 index 0000000000000..3fec775eb019d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/kernel/interface/ia_css_kernel_bitmap.h @@ -0,0 +1,223 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_KERNEL_BITMAP_H +#define __IA_CSS_KERNEL_BITMAP_H + +/*! \file */ + +/** @file ia_css_kernel_bitmap.h + * + * The types and operations to make logic decisions given kernel bitmaps + * "ia_css_kernel_bitmap_t" can be larger than native types + */ + +#include +#include "vied_nci_psys_resource_model.h" + +#define IA_CSS_KERNEL_BITMAP_BITS 64 +#define IA_CSS_KERNEL_BITMAP_ELEM_TYPE uint32_t +#define IA_CSS_KERNEL_BITMAP_ELEM_BITS \ + (sizeof(IA_CSS_KERNEL_BITMAP_ELEM_TYPE)*8) +#define IA_CSS_KERNEL_BITMAP_NOF_ELEMS \ + ((IA_CSS_KERNEL_BITMAP_BITS) / (IA_CSS_KERNEL_BITMAP_ELEM_BITS)) + +/** An element is a 32 bit unsigned integer. 64 bit integers might cause + * problems in the compiler. + */ +typedef struct { + IA_CSS_KERNEL_BITMAP_ELEM_TYPE data[IA_CSS_KERNEL_BITMAP_NOF_ELEMS]; +} ia_css_kernel_bitmap_elems_t; + +/** Users should make no assumption about the actual type of + * ia_css_kernel_bitmap_t. + * Users should use IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS in + * case they erroneously assume that this type is uint64_t and they + * cannot change their implementation. + */ +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS +typedef ia_css_kernel_bitmap_elems_t ia_css_kernel_bitmap_t; +#else +typedef uint64_t ia_css_kernel_bitmap_t; +#if IA_CSS_KERNEL_BITMAP_BITS > 64 +#error IA_CSS_KERNEL_BITMAP_BITS > 64 not supported \ + with IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS +#endif +#endif + +/*! Print the bits of a kernel bitmap + + @return < 0 on error + */ +extern int ia_css_kernel_bitmap_print( + const ia_css_kernel_bitmap_t bitmap, + void *fid); + +/*! Create an empty kernel bitmap + + @return bitmap = 0 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_clear(void); + +/*! Creates the complement of a kernel bitmap + * @param bitmap[in] kernel bitmap + * @return ~bitmap + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_complement( + const ia_css_kernel_bitmap_t bitmap); + +/*! Create the union of two kernel bitmaps + + @param bitmap0[in] kernel bitmap 0 + @param bitmap1[in] kernel bitmap 1 + + @return bitmap0 | bitmap1 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_union( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1); + +/*! Create the intersection of two kernel bitmaps + + @param bitmap0[in] kernel bitmap 0 + @param bitmap1[in] kernel bitmap 1 + + @return bitmap0 & bitmap1 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_intersection( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1); + +/*! Check if the kernel bitmaps is empty + + @param bitmap[in] kernel bitmap + + @return bitmap == 0 + */ +extern bool ia_css_is_kernel_bitmap_empty( + const ia_css_kernel_bitmap_t bitmap); + +/*! Check if the intersection of two kernel bitmaps is empty + + @param bitmap0[in] kernel bitmap 0 + @param bitmap1[in] kernel bitmap 1 + + @return (bitmap0 & bitmap1) == 0 + */ +extern bool ia_css_is_kernel_bitmap_intersection_empty( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1); + +/*! Check if the second kernel bitmap is a subset of the first (or equal) + + @param bitmap0[in] kernel bitmap 0 + @param bitmap1[in] kernel bitmap 1 + + Note: An empty set is always a subset, this function + returns true if bitmap 1 is empty + + @return (bitmap0 & bitmap1) == bitmap1 + */ +extern bool ia_css_is_kernel_bitmap_subset( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1); + +/*! Check if the kernel bitmaps are equal + + @param bitmap0[in] kernel bitmap 0 + @param bitmap1[in] kernel bitmap 1 + + @return bitmap0 == bitmap1 + */ +extern bool ia_css_is_kernel_bitmap_equal( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1); + +/*! Right shift kernel bitmap + + @param bitmap0[in] kernel bitmap 0 + + @return bitmap0 >> 1 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_shift( + const ia_css_kernel_bitmap_t bitmap); + +/*! Check if the kernel bitmaps contains only a single element + + @param bitmap[in] kernel bitmap + + @return weight(bitmap) == 1 + */ +extern bool ia_css_is_kernel_bitmap_onehot( + const ia_css_kernel_bitmap_t bitmap); + +/*! Checks whether a specific kernel bit is set + * @return bitmap[index] == 1 + */ +extern int ia_css_is_kernel_bitmap_set( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index); + +/*! Create the union of a kernel bitmap with a onehot bitmap + * with a bit set at index + + @return bitmap[index] |= 1 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_set( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index); + +/*! Creates kernel bitmap using a uint64 value. + * @return bitmap with the same bits set as in value (provided that width of bitmap is sufficient). + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_create_from_uint64( + const uint64_t value); + +/*! Converts an ia_css_kernel_bitmap_t type to uint64_t. Note that if + * ia_css_kernel_bitmap_t contains more then 64 bits, only the lowest 64 bits + * are returned. + * @return uint64_t representation of value +*/ +extern uint64_t ia_css_kernel_bitmap_to_uint64( + const ia_css_kernel_bitmap_t value); + +/*! Creates a kernel bitmap with the bit at index 'index' removed. + * @return ~(1 << index) & bitmap + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_unset( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index); + +/*! Set a previously clear field of a kernel bitmap at index + + @return if bitmap[index] == 0, bitmap[index] -> 1, else 0 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_set_unique( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index); + +/*! Create a onehot kernel bitmap with a bit set at index + + @return bitmap[index] = 1 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bit_mask( + const unsigned int index); + +/*! Create a random bitmap + + @return bitmap[index] = 1 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_ran_bitmap(void); + +#endif /* __IA_CSS_KERNEL_BITMAP_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/kernel/interface/ia_css_psys_kernel_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/kernel/interface/ia_css_psys_kernel_trace.h new file mode 100644 index 0000000000000..1ba29c7ab77ec --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/kernel/interface/ia_css_psys_kernel_trace.h @@ -0,0 +1,103 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_KERNEL_TRACE_H +#define __IA_CSS_PSYS_KERNEL_TRACE_H + +#include "ia_css_psysapi_trace.h" + +#define PSYS_KERNEL_TRACE_LEVEL_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +/* Default sub-module tracing config */ +#if (!defined(PSYSAPI_KERNEL_TRACING_OVERRIDE)) + #define PSYS_KERNEL_TRACE_LEVEL_CONFIG \ + PSYS_KERNEL_TRACE_LEVEL_CONFIG_DEFAULT +#endif + +/* Module/sub-module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_KERNEL_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_KERNEL_TRACING_OVERRIDE)) + /* Module/sub-module specific trace setting */ + #if PSYSAPI_KERNEL_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_KERNEL_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_KERNEL_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_KERNEL_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_KERNEL_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_KERNEL_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_KERNEL_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_KERNEL_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_KERNEL_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_DATA Tracing level defined" + #endif +#else + /* Inherit Module trace setting */ + #define PSYSAPI_KERNEL_TRACE_METHOD \ + PSYSAPI_TRACE_METHOD + #define PSYSAPI_KERNEL_TRACE_LEVEL_ASSERT \ + PSYSAPI_TRACE_LEVEL_ASSERT + #define PSYSAPI_KERNEL_TRACE_LEVEL_ERROR \ + PSYSAPI_TRACE_LEVEL_ERROR + #define PSYSAPI_KERNEL_TRACE_LEVEL_WARNING \ + PSYSAPI_TRACE_LEVEL_WARNING + #define PSYSAPI_KERNEL_TRACE_LEVEL_INFO \ + PSYSAPI_TRACE_LEVEL_INFO + #define PSYSAPI_KERNEL_TRACE_LEVEL_DEBUG \ + PSYSAPI_TRACE_LEVEL_DEBUG + #define PSYSAPI_KERNEL_TRACE_LEVEL_VERBOSE \ + PSYSAPI_TRACE_LEVEL_VERBOSE +#endif + +#endif /* __IA_CSS_PSYSAPI_KERNEL_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/kernel/src/ia_css_kernel_bitmap.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/kernel/src/ia_css_kernel_bitmap.c new file mode 100644 index 0000000000000..5fd9496bc3ccd --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/kernel/src/ia_css_kernel_bitmap.c @@ -0,0 +1,413 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include +#include +#include +#include +#include "ia_css_psys_kernel_trace.h" + +static int ia_css_kernel_bitmap_compute_weight( + const ia_css_kernel_bitmap_t bitmap); + +bool ia_css_is_kernel_bitmap_intersection_empty( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1) +{ + ia_css_kernel_bitmap_t intersection; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_is_kernel_bitmap_intersection_empty(): enter:\n"); + + intersection = ia_css_kernel_bitmap_intersection(bitmap0, bitmap1); + return ia_css_is_kernel_bitmap_empty(intersection); +} + +bool ia_css_is_kernel_bitmap_empty( + const ia_css_kernel_bitmap_t bitmap) +{ + unsigned int i; + bool is_empty = true; + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_is_kernel_bitmap_empty(): enter:\n"); +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = 0; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + is_empty &= bitmap.data[i] == 0; + } +#else + NOT_USED(i); + is_empty = (bitmap == 0); +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return is_empty; +} + +bool ia_css_is_kernel_bitmap_equal( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1) +{ + unsigned int i; + bool is_equal = true; + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_is_kernel_bitmap_equal(): enter:\n"); +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = 0; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + is_equal = is_equal && (bitmap0.data[i] == bitmap1.data[i]); + } +#else + NOT_USED(i); + is_equal = (bitmap0 == bitmap1); +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return is_equal; +} + +bool ia_css_is_kernel_bitmap_onehot( + const ia_css_kernel_bitmap_t bitmap) +{ + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_is_kernel_bitmap_onehot(): enter:\n"); + return ia_css_kernel_bitmap_compute_weight(bitmap) == 1; +} + +bool ia_css_is_kernel_bitmap_subset( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1) +{ + ia_css_kernel_bitmap_t intersection; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_is_kernel_bitmap_subset(): enter:\n"); + + intersection = ia_css_kernel_bitmap_intersection(bitmap0, bitmap1); + return ia_css_is_kernel_bitmap_equal(intersection, bitmap1); +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_clear(void) +{ + unsigned int i; + ia_css_kernel_bitmap_t bitmap; + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_clear(): enter:\n"); +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = 0; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + bitmap.data[i] = 0; + } +#else + NOT_USED(i); + bitmap = 0; +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return bitmap; +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_complement( + const ia_css_kernel_bitmap_t bitmap) +{ + unsigned int i; + ia_css_kernel_bitmap_t result; + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_complement(): enter:\n"); +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = 0; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + result.data[i] = ~bitmap.data[i]; + } +#else + NOT_USED(i); + result = ~bitmap; +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return result; +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_union( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1) +{ + unsigned int i; + ia_css_kernel_bitmap_t result; + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_union(): enter:\n"); +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = 0; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + result.data[i] = (bitmap0.data[i] | bitmap1.data[i]); + } +#else + NOT_USED(i); + result = (bitmap0 | bitmap1); +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return result; +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_intersection( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1) +{ + unsigned int i; + ia_css_kernel_bitmap_t result; + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_intersection(): enter:\n"); +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = 0; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + result.data[i] = (bitmap0.data[i] & bitmap1.data[i]); + } +#else + NOT_USED(i); + result = (bitmap0 & bitmap1); +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return result; +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_set( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index) +{ + ia_css_kernel_bitmap_t bit_mask; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_set(): enter:\n"); + + bit_mask = ia_css_kernel_bit_mask(index); + return ia_css_kernel_bitmap_union(bitmap, bit_mask); +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_create_from_uint64( + const uint64_t value) +{ + unsigned int i; + ia_css_kernel_bitmap_t result; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_create_from_uint64(): enter:\n"); + +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + result = ia_css_kernel_bitmap_clear(); + for (i = 0; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + /* masking is done implictly, the MSB bits of casting will be chopped off */ + result.data[i] = (IA_CSS_KERNEL_BITMAP_ELEM_TYPE) + (value >> (i * IA_CSS_KERNEL_BITMAP_ELEM_BITS)); + } +#if IA_CSS_KERNEL_BITMAP_BITS < 64 + if ((value >> IA_CSS_KERNEL_BITMAP_BITS) != 0) { + IA_CSS_TRACE_0(PSYSAPI_KERNEL, ERROR, + "ia_css_kernel_bitmap_create_from_uint64(): " + "kernel bitmap is not wide enough to encode value\n"); + assert(0); + } +#endif +#else + NOT_USED(i); + result = value; +#endif /* IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS */ + return result; +} + +uint64_t ia_css_kernel_bitmap_to_uint64( + const ia_css_kernel_bitmap_t value) +{ + const unsigned int bits64 = sizeof(uint64_t) * 8; + const unsigned int nof_elems_bits64 = bits64 / IA_CSS_KERNEL_BITMAP_ELEM_BITS; + unsigned int i; + uint64_t res = 0; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_to_uint64(): enter:\n"); + + assert((bits64 % IA_CSS_KERNEL_BITMAP_ELEM_BITS) == 0); + assert(nof_elems_bits64 > 0); + +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = 0; i < nof_elems_bits64; i++) { + res |= ((uint64_t)(value.data[i]) << (i * IA_CSS_KERNEL_BITMAP_ELEM_BITS)); + } + for (i = nof_elems_bits64; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + assert(value.data[i] == 0); + } + return res; +#else + (void)i; + (void)res; + (void)nof_elems_bits64; + return (uint64_t)value; +#endif /* IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS */ +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_unset( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index) +{ + ia_css_kernel_bitmap_t result; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_unset(): enter:\n"); + + result = ia_css_kernel_bit_mask(index); + result = ia_css_kernel_bitmap_complement(result); + return ia_css_kernel_bitmap_intersection(bitmap, result); +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_set_unique( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index) +{ + ia_css_kernel_bitmap_t ret; + ia_css_kernel_bitmap_t bit_mask; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_set_unique(): enter:\n"); + + ret = ia_css_kernel_bitmap_clear(); + bit_mask = ia_css_kernel_bit_mask(index); + + if (ia_css_is_kernel_bitmap_intersection_empty(bitmap, bit_mask) + && !ia_css_is_kernel_bitmap_empty(bit_mask)) { + ret = ia_css_kernel_bitmap_union(bitmap, bit_mask); + } + return ret; +} + +ia_css_kernel_bitmap_t ia_css_kernel_bit_mask( + const unsigned int index) +{ + unsigned int elem_index; + unsigned int elem_bit_index; + ia_css_kernel_bitmap_t bit_mask = ia_css_kernel_bitmap_clear(); + + /* Assert disabled for staging, because some PGs do not satisfy this condition */ + /* assert(index < IA_CSS_KERNEL_BITMAP_BITS); */ + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bit_mask(): enter:\n"); +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + if (index < IA_CSS_KERNEL_BITMAP_BITS) { + elem_index = index / IA_CSS_KERNEL_BITMAP_ELEM_BITS; + elem_bit_index = index % IA_CSS_KERNEL_BITMAP_ELEM_BITS; + assert(elem_index < IA_CSS_KERNEL_BITMAP_NOF_ELEMS); + + bit_mask.data[elem_index] = 1 << elem_bit_index; + } +#else + NOT_USED(elem_index); + NOT_USED(elem_bit_index); + if (index < IA_CSS_KERNEL_BITMAP_BITS) { + bit_mask = (ia_css_kernel_bitmap_t)1 << index; + } +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return bit_mask; +} + + +static int ia_css_kernel_bitmap_compute_weight( + const ia_css_kernel_bitmap_t bitmap) +{ + ia_css_kernel_bitmap_t loc_bitmap; + int weight = 0; + int i; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_compute_weight(): enter:\n"); + + loc_bitmap = bitmap; + + /* In fact; do not need the iterator "i" */ + for (i = 0; (i < IA_CSS_KERNEL_BITMAP_BITS) && + !ia_css_is_kernel_bitmap_empty(loc_bitmap); i++) { + weight += ia_css_is_kernel_bitmap_set(loc_bitmap, 0); + loc_bitmap = ia_css_kernel_bitmap_shift(loc_bitmap); + } + + return weight; +} + +int ia_css_is_kernel_bitmap_set( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index) +{ + unsigned int elem_index; + unsigned int elem_bit_index; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_is_kernel_bitmap_set(): enter:\n"); + + /* Assert disabled for staging, because some PGs do not satisfy this condition */ + /* assert(index < IA_CSS_KERNEL_BITMAP_BITS); */ + +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + elem_index = index / IA_CSS_KERNEL_BITMAP_ELEM_BITS; + elem_bit_index = index % IA_CSS_KERNEL_BITMAP_ELEM_BITS; + assert(elem_index < IA_CSS_KERNEL_BITMAP_NOF_ELEMS); + return (((bitmap.data[elem_index] >> elem_bit_index) & 0x1) == 1); +#else + NOT_USED(elem_index); + NOT_USED(elem_bit_index); + return (((bitmap >> index) & 0x1) == 1); +#endif /* IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS */ +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_shift( + const ia_css_kernel_bitmap_t bitmap) +{ + int i; + unsigned int lsb_current_elem = 0; + unsigned int lsb_previous_elem = 0; + ia_css_kernel_bitmap_t loc_bitmap; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_shift(): enter:\n"); + + loc_bitmap = bitmap; + +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = IA_CSS_KERNEL_BITMAP_NOF_ELEMS - 1; i >= 0; i--) { + lsb_current_elem = bitmap.data[i] & 0x01; + loc_bitmap.data[i] >>= 1; + loc_bitmap.data[i] |= (lsb_previous_elem << (IA_CSS_KERNEL_BITMAP_ELEM_BITS - 1)); + lsb_previous_elem = lsb_current_elem; + } +#else + NOT_USED(i); + NOT_USED(lsb_current_elem); + NOT_USED(lsb_previous_elem); + loc_bitmap >>= 1; +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return loc_bitmap; +} + +int ia_css_kernel_bitmap_print( + const ia_css_kernel_bitmap_t bitmap, + void *fid) +{ + int retval = -1; + int bit; + unsigned int bit_index = 0; + ia_css_kernel_bitmap_t loc_bitmap; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, INFO, + "ia_css_kernel_bitmap_print(): enter:\n"); + + NOT_USED(fid); + NOT_USED(bit); + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, INFO, "kernel bitmap {\n"); + + loc_bitmap = bitmap; + + for (bit_index = 0; (bit_index < IA_CSS_KERNEL_BITMAP_BITS) && + !ia_css_is_kernel_bitmap_empty(loc_bitmap); bit_index++) { + + bit = ia_css_is_kernel_bitmap_set(loc_bitmap, 0); + loc_bitmap = ia_css_kernel_bitmap_shift(loc_bitmap); + IA_CSS_TRACE_2(PSYSAPI_KERNEL, INFO, "\t%d\t = %d\n", bit_index, bit); + } + IA_CSS_TRACE_0(PSYSAPI_KERNEL, INFO, "}\n"); + + retval = 0; + return retval; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param.h new file mode 100644 index 0000000000000..485dd63e5a861 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param.h @@ -0,0 +1,293 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PROGRAM_GROUP_PARAM_H +#define __IA_CSS_PROGRAM_GROUP_PARAM_H + +/*! \file */ + +/** @file ia_css_program_group_param.h + * + * Define the methods on the program group parameter object that are not part + * of a single interface + */ +#include + +#include + +#include /* ia_css_kernel_bitmap_t */ + +#include + +/*! Get the stored size of the program group parameter object + + @param param[in] program group parameter object + + @return size, 0 on error + */ +extern size_t ia_css_program_group_param_get_size( + const ia_css_program_group_param_t *param); + +/*! initialize program_group_param + + @param blob[in] program group parameter object + @param program_count[in] number of terminals. + @param terminal_count[in] number of terminals. + @param fragment_count[in] number of terminals. + + @return 0 if success, else failure. + */ +extern int ia_css_program_group_param_init( + ia_css_program_group_param_t *blob, + const uint8_t program_count, + const uint8_t terminal_count, + const uint16_t fragment_count, + const enum ia_css_frame_format_type *frame_format_types); +/*! Get the program parameter object from a program group parameter object + + @param program_group_param[in] program group parameter object + @param i[in] program parameter index + + @return program parameter pointer, NULL on error + */ +extern ia_css_program_param_t *ia_css_program_group_param_get_program_param( + const ia_css_program_group_param_t *param, + const int i); + +/*! Get the terminal parameter object from a program group parameter object + + @param program_group_param[in] program group parameter object + @param i[in] terminal parameter index + + @return terminal parameter pointer, NULL on error + */ +extern ia_css_terminal_param_t *ia_css_program_group_param_get_terminal_param( + const ia_css_program_group_param_t *param, + const int i); + +/*! Get the fragment count from a program group parameter object + + @param program_group_param[in] program group parameter object + + @return fragment count, 0 on error + */ +extern uint16_t ia_css_program_group_param_get_fragment_count( + const ia_css_program_group_param_t *param); + +/*! Get the program count from a program group parameter object + + @param program_group_param[in] program group parameter object + + @return program count, 0 on error + */ +extern uint8_t ia_css_program_group_param_get_program_count( + const ia_css_program_group_param_t *param); + +/*! Get the terminal count from a program group parameter object + + @param program_group_param[in] program group parameter object + + @return terminal count, 0 on error + */ +extern uint8_t ia_css_program_group_param_get_terminal_count( + const ia_css_program_group_param_t *param); + +/*! Set the protocol version in a program group parameter object + + @param program_group_param[in] program group parameter object + @param protocol_version[in] protocol version + + @return nonzero on error +*/ +extern int +ia_css_program_group_param_set_protocol_version( + ia_css_program_group_param_t *param, + uint8_t protocol_version); + +/*! Get the protocol version from a program group parameter object + + @param program_group_param[in] program group parameter object + + @return protocol version +*/ +extern uint8_t +ia_css_program_group_param_get_protocol_version( + const ia_css_program_group_param_t *param); + +/*! Set the kernel enable bitmap from a program group parameter object + + @param param[in] program group parameter object + @param bitmap[in] kernel enable bitmap + + @return non-zero on error + */ +extern int ia_css_program_group_param_set_kernel_enable_bitmap( + ia_css_program_group_param_t *param, + const ia_css_kernel_bitmap_t bitmap); + +/*! Get the kernel enable bitmap from a program group parameter object + + @param program_group_param[in] program group parameter object + + @return kernel enable bitmap, 0 on error +*/ +extern ia_css_kernel_bitmap_t +ia_css_program_group_param_get_kernel_enable_bitmap( + const ia_css_program_group_param_t *param); + +/*! Get the stored size of the program parameter object + + @param param[in] program parameter object + + @return size, 0 on error + */ +extern size_t ia_css_program_param_get_size( + const ia_css_program_param_t *param); + +/*! Set the kernel enable bitmap from a program parameter object + + @param program_param[in] program parameter object + @param bitmap[in] kernel enable bitmap + + @return non-zero on error + */ +extern int ia_css_program_param_set_kernel_enable_bitmap( + ia_css_program_param_t *program_param, + const ia_css_kernel_bitmap_t bitmap); + +/*! Get the kernel enable bitmap from a program parameter object + + @param program_param[in] program parameter object + + Note: This function returns in fact the kernel enable of the program group + parameters + + @return kernel enable bitmap, 0 on error + */ +extern ia_css_kernel_bitmap_t ia_css_program_param_get_kernel_enable_bitmap( + const ia_css_program_param_t *param); + +/*! Get the stored size of the terminal parameter object + + @param param[in] terminal parameter object + + @return size, 0 on error + */ +extern size_t ia_css_terminal_param_get_size( + const ia_css_terminal_param_t *param); + +/*! Get the kernel enable bitmap from a terminal parameter object + + @param terminal_param[in] terminal parameter object + + Note: This function returns in fact the kernel enable of the program group + parameters + + @return kernel enable bitmap, 0 on error + */ +extern ia_css_kernel_bitmap_t ia_css_terminal_param_get_kernel_enable_bitmap( + const ia_css_terminal_param_t *param); + +/*! Get the parent object for this terminal param. + + @param terminal_param[in] terminal parameter object + + @return parent program group param object + */ +extern ia_css_program_group_param_t *ia_css_terminal_param_get_parent( + const ia_css_terminal_param_t *param); + +/*! Get the data format type associated with the terminal. + + @param terminal_param[in] terminal parameter object + + @return data format type (ia_css_data_format_type_t) + */ +extern ia_css_frame_format_type_t ia_css_terminal_param_get_frame_format_type( + const ia_css_terminal_param_t *terminal_param); + +/*! Set the data format type associated with the terminal. + + @param terminal_param[in] terminal parameter object + @param data_format_type[in] data format type + + @return non-zero on error. + */ +extern int ia_css_terminal_param_set_frame_format_type( + ia_css_terminal_param_t *terminal_param, + const ia_css_frame_format_type_t data_format_type); + +/*! Get bits per pixel on the frame associated with the terminal. + + @param terminal_param[in] terminal parameter object + + @return bits per pixel + */ +extern uint8_t ia_css_terminal_param_get_bpp( + const ia_css_terminal_param_t *terminal_param); + +/*! Set bits per pixel on the frame associated with the terminal. + + @param terminal_param[in] terminal parameter object + @param bpp[in] bits per pixel + + @return non-zero on error. + */ +extern int ia_css_terminal_param_set_bpp( + ia_css_terminal_param_t *terminal_param, + const uint8_t bpp); + +/*! Get dimensions on the frame associated with the terminal. + + @param terminal_param[in] terminal parameter object + @param dimensions[out] dimension array + + @return non-zero on error. + */ +extern int ia_css_terminal_param_get_dimensions( + const ia_css_terminal_param_t *terminal_param, + uint16_t dimensions[IA_CSS_N_DATA_DIMENSION]); + +/*! Set dimensions on the frame associated with the terminal. + + @param terminal_param[in] terminal parameter object + @param dimensions[in] dimension array + + @return non-zero on error. + */ +extern int ia_css_terminal_param_set_dimensions( + ia_css_terminal_param_t *terminal_param, + const uint16_t dimensions[IA_CSS_N_DATA_DIMENSION]); + +/*! Get stride on the frame associated with the terminal. + + @param terminal_param[in] terminal parameter object + + @return stride of the frame to be attached. + */ +extern uint32_t ia_css_terminal_param_get_stride( + const ia_css_terminal_param_t *terminal_param); + +/*! Set stride on the frame associated with the terminal. + + @param terminal_param[in] terminal parameter object + @param stride[in] stride + + @return non-zero on error. + */ +extern int ia_css_terminal_param_set_stride( + ia_css_terminal_param_t *terminal_param, + const uint32_t stride); + +#endif /* __IA_CSS_PROGRAM_GROUP_PARAM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param.sim.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param.sim.h new file mode 100644 index 0000000000000..7821f8147a1a0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param.sim.h @@ -0,0 +1,153 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PROGRAM_GROUP_PARAM_SIM_H +#define __IA_CSS_PROGRAM_GROUP_PARAM_SIM_H + +/*! \file */ + +/** @file ia_css_program_group_param.sim.h + * + * Define the methods on the program group parameter object: Simulation only + */ +#include + +#include + +#include + +/* Simulation */ + +/*! Create a program group parameter object from specification + + @param specification[in] specification (index) + @param manifest[in] program group manifest + + @return NULL on error + */ +extern ia_css_program_group_param_t *ia_css_program_group_param_create( + const unsigned int specification, + const ia_css_program_group_manifest_t *manifest); + +/*! Destroy the program group parameter object + + @param program_group_param[in] program group parameter object + + @return NULL + */ +extern ia_css_program_group_param_t *ia_css_program_group_param_destroy( + ia_css_program_group_param_t *param); + +/*! Compute the size of storage required for allocating + * the program group parameter object + + @param program_count[in] Number of programs in the process group + @param terminal_count[in] Number of terminals on the process group + @param fragment_count[in] Number of fragments on the terminals of + the process group + + @return 0 on error + */ +size_t ia_css_sizeof_program_group_param( + const uint8_t program_count, + const uint8_t terminal_count, + const uint16_t fragment_count); + +/*! Allocate (the store of) a program group parameter object + + @param program_count[in] Number of programs in the process group + @param terminal_count[in] Number of terminals on the process group + @param fragment_count[in] Number of fragments on the terminals of + the process group + + @return program group parameter pointer, NULL on error + */ +extern ia_css_program_group_param_t *ia_css_program_group_param_alloc( + const uint8_t program_count, + const uint8_t terminal_count, + const uint16_t fragment_count); + +/*! Free (the store of) a program group parameter object + + @param program_group_param[in] program group parameter object + + @return NULL + */ +extern ia_css_program_group_param_t *ia_css_program_group_param_free( + ia_css_program_group_param_t *param); + +/*! Print the program group parameter object to file/stream + + @param param[in] program group parameter object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_program_group_param_print( + const ia_css_program_group_param_t *param, + void *fid); + +/*! Allocate (the store of) a program parameter object + + @return program parameter pointer, NULL on error + */ +extern ia_css_program_param_t *ia_css_program_param_alloc(void); + +/*! Free (the store of) a program parameter object + + @param param[in] program parameter object + + @return NULL + */ +extern ia_css_program_param_t *ia_css_program_param_free( + ia_css_program_param_t *param); + +/*! Print the program parameter object to file/stream + + @param param[in] program parameter object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_program_param_print( + const ia_css_program_param_t *param, + void *fid); + +/*! Allocate (the store of) a terminal parameter object + + @return terminal parameter pointer, NULL on error + */ +extern ia_css_terminal_param_t *ia_css_terminal_param_alloc(void); + +/*! Free (the store of) a terminal parameter object + + @param param[in] terminal parameter object + + @return NULL + */ +extern ia_css_terminal_param_t *ia_css_terminal_param_free( + ia_css_terminal_param_t *param); + +/*! Print the terminal parameter object to file/stream + + @param param[in] terminal parameter object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_terminal_param_print( + const ia_css_terminal_param_t *param, + void *fid); + +#endif /* __IA_CSS_PROGRAM_GROUP_PARAM_SIM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param_types.h new file mode 100644 index 0000000000000..34f57584a227f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param_types.h @@ -0,0 +1,64 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PROGRAM_GROUP_PARAM_TYPES_H +#define __IA_CSS_PROGRAM_GROUP_PARAM_TYPES_H + +/*! \file */ + +/** @file ia_css_program_group_param_types.h + * + * Define the parameter objects that are necessary to create the process + * groups i.e. enable parameters and parameters to set-up frame descriptors + */ + +#include +#include /* ia_css_kernel_bitmap_t */ +#include + +#include +/*! make this public so that driver can populate, + * size, bpp, dimensions for all terminals. + * + * Currently one API is provided to get frame_format_type. + * + * frame_format_type is set during ia_css_terminal_param_init(). + * Value for that is const and binary specific. + */ +struct ia_css_terminal_param_s { + uint32_t size; /**< Size of this structure */ + /**< Indicates if this is a generic type or inbuild + * with variable size descriptor + */ + ia_css_frame_format_type_t frame_format_type; + /**< offset to add to reach parent. This is negative value.*/ + int32_t parent_offset; + uint16_t dimensions[IA_CSS_N_DATA_DIMENSION];/**< Logical dimensions */ + /**< Mapping to the index field of the terminal descriptor */ + uint16_t index[IA_CSS_N_DATA_DIMENSION]; + /**< Logical fragment dimension, + * TODO: fragment dimensions can be different per fragment + */ + uint16_t fragment_dimensions[IA_CSS_N_DATA_DIMENSION]; + uint32_t stride;/**< Stride of a frame */ + uint16_t offset;/**< Offset in bytes to first fragment */ + uint8_t bpp; /**< Bits per pixel */ + uint8_t bpe; /**< Bits per element */ +}; + +typedef struct ia_css_program_group_param_s ia_css_program_group_param_t; +typedef struct ia_css_program_param_s ia_css_program_param_t; +typedef struct ia_css_terminal_param_s ia_css_terminal_param_t; + +#endif /* __IA_CSS_PROGRAM_GROUP_PARAM_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/interface/ia_css_psys_param_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/interface/ia_css_psys_param_trace.h new file mode 100644 index 0000000000000..f59dfbf165e4d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/interface/ia_css_psys_param_trace.h @@ -0,0 +1,102 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PARAM_TRACE_H +#define __IA_CSS_PSYS_PARAM_TRACE_H + +#include "ia_css_psysapi_trace.h" + +#define PSYS_PARAM_TRACE_LEVEL_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +/* Default sub-module tracing config */ +#if (!defined(PSYSAPI_PARAM_TRACING_OVERRIDE)) + #define PSYS_PARAM_TRACE_LEVEL_CONFIG PSYS_PARAM_TRACE_LEVEL_CONFIG_DEFAULT +#endif + +/* Module/sub-module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_PARAM_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_PARAM_TRACING_OVERRIDE)) + /* Module/sub-module specific trace setting */ + #if PSYSAPI_PARAM_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_PARAM_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_PARAM_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_PARAM_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_PARAM_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_PARAM_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_PARAM_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_PARAM_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_PARAM_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_DATA Tracing level defined" + #endif +#else + /* Inherit Module trace setting */ + #define PSYSAPI_PARAM_TRACE_METHOD \ + PSYSAPI_TRACE_METHOD + #define PSYSAPI_PARAM_TRACE_LEVEL_ASSERT \ + PSYSAPI_TRACE_LEVEL_ASSERT + #define PSYSAPI_PARAM_TRACE_LEVEL_ERROR \ + PSYSAPI_TRACE_LEVEL_ERROR + #define PSYSAPI_PARAM_TRACE_LEVEL_WARNING \ + PSYSAPI_TRACE_LEVEL_WARNING + #define PSYSAPI_PARAM_TRACE_LEVEL_INFO \ + PSYSAPI_TRACE_LEVEL_INFO + #define PSYSAPI_PARAM_TRACE_LEVEL_DEBUG \ + PSYSAPI_TRACE_LEVEL_DEBUG + #define PSYSAPI_PARAM_TRACE_LEVEL_VERBOSE \ + PSYSAPI_TRACE_LEVEL_VERBOSE +#endif + +#endif /* __IA_CSS_PSYSAPI_PARAM_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/src/ia_css_program_group_param.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/src/ia_css_program_group_param.c new file mode 100644 index 0000000000000..067f69a4a01e2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/src/ia_css_program_group_param.c @@ -0,0 +1,771 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ia_css_psys_param_trace.h" + +static int +ia_css_terminal_param_init(ia_css_terminal_param_t *terminal_param, + uint32_t offset, + enum ia_css_frame_format_type frame_format_type); + +static int +ia_css_program_param_init(ia_css_program_param_t *program_param, + int32_t offset); + +size_t ia_css_sizeof_program_group_param( + const uint8_t program_count, + const uint8_t terminal_count, + const uint16_t fragment_count) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_sizeof_program_group_param(): enter:\n"); + + verifexit(program_count != 0); + verifexit(terminal_count != 0); + verifexit(fragment_count != 0); + + size += sizeof(ia_css_program_group_param_t); + size += program_count * fragment_count * sizeof(ia_css_program_param_t); + size += terminal_count * sizeof(ia_css_terminal_param_t); +EXIT: + if (0 == program_count || 0 == terminal_count || 0 == fragment_count) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_sizeof_program_group_param invalid argument\n"); + } + return size; +} + +size_t ia_css_program_group_param_get_size( + const ia_css_program_group_param_t *program_group_param) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_size(): enter:\n"); + + if (program_group_param != NULL) { + size = program_group_param->size; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_size invalid argument\n"); + } + return size; +} + +size_t ia_css_program_param_get_size( + const ia_css_program_param_t *param) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_param_get_size(): enter:\n"); + + if (param != NULL) { + size = param->size; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_param_get_size invalid argument\n"); + } + return size; +} + +ia_css_program_param_t *ia_css_program_group_param_get_program_param( + const ia_css_program_group_param_t *param, + const int i) +{ + ia_css_program_param_t *program_param = NULL; + ia_css_program_param_t *program_param_base; + int program_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_program_param(): enter:\n"); + + verifexit(param != NULL); + + program_count = + (int)ia_css_program_group_param_get_program_count(param); + + verifexit(i < program_count); + + program_param_base = (ia_css_program_param_t *) + (((char *)param) + param->program_param_offset); + + program_param = &program_param_base[i]; + +EXIT: + if (NULL == param || i >= program_count) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_program_param invalid argument\n"); + } + return program_param; +} + +size_t ia_css_terminal_param_get_size( + const ia_css_terminal_param_t *param) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_get_size(): enter:\n"); + + if (param != NULL) { + size = param->size; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_terminal_param_get_size invalid argument\n"); + } + + return size; +} + +ia_css_terminal_param_t *ia_css_program_group_param_get_terminal_param( + const ia_css_program_group_param_t *param, + const int i) +{ + ia_css_terminal_param_t *terminal_param = NULL; + ia_css_terminal_param_t *terminal_param_base; + int program_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_terminal_param(): enter:\n"); + + verifexit(param != NULL); + + program_count = + (int)ia_css_program_group_param_get_terminal_count(param); + + verifexit(i < program_count); + + terminal_param_base = (ia_css_terminal_param_t *) + (((char *)param) + param->terminal_param_offset); + terminal_param = &terminal_param_base[i]; +EXIT: + if (NULL == param || i >= program_count) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_terminal_param invalid argument\n"); + } + return terminal_param; +} + +uint8_t ia_css_program_group_param_get_program_count( + const ia_css_program_group_param_t *param) +{ + uint8_t program_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_program_count(): enter:\n"); + + if (param != NULL) { + program_count = param->program_count; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_program_count invalid argument\n"); + } + return program_count; +} + +uint8_t ia_css_program_group_param_get_terminal_count( + const ia_css_program_group_param_t *param) +{ + uint8_t terminal_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_terminal_count(): enter:\n"); + + if (param != NULL) { + terminal_count = param->terminal_count; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_terminal_count invalid argument\n"); + } + return terminal_count; +} + +uint16_t ia_css_program_group_param_get_fragment_count( + const ia_css_program_group_param_t *param) +{ + uint8_t fragment_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_fragment_count(): enter:\n"); + + if (param != NULL) { + fragment_count = (uint8_t)param->fragment_count; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_fragment_count invalid argument\n"); + } + return fragment_count; +} + +int ia_css_program_group_param_set_protocol_version( + ia_css_program_group_param_t *param, + uint8_t protocol_version) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_set_protocol_version(): enter:\n"); + + if (param != NULL) { + param->protocol_version = protocol_version; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_program_group_param_set_protocol_version failed (%i)\n", + retval); + } + return retval; +} + +uint8_t ia_css_program_group_param_get_protocol_version( + const ia_css_program_group_param_t *param) +{ + uint8_t protocol_version = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_protocol_version(): enter:\n"); + + if (param != NULL) { + protocol_version = param->protocol_version; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_protocol_version invalid argument\n"); + } + return protocol_version; +} + +int ia_css_program_group_param_set_kernel_enable_bitmap( + ia_css_program_group_param_t *param, + const ia_css_kernel_bitmap_t bitmap) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_set_kernel_enable_bitmap(): enter:\n"); + + if (param != NULL) { + param->kernel_enable_bitmap = bitmap; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_program_group_param_set_kernel_enable_bitmap failed (%i)\n", + retval); + } + return retval; +} + +ia_css_kernel_bitmap_t ia_css_program_group_param_get_kernel_enable_bitmap( + const ia_css_program_group_param_t *param) +{ + ia_css_kernel_bitmap_t bitmap = ia_css_kernel_bitmap_clear(); + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_kernel_enable_bitmap(): enter:\n"); + + if (param != NULL) { + bitmap = param->kernel_enable_bitmap; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_kernel_enable_bitmap invalid argument\n"); + } + return bitmap; +} + +int ia_css_program_param_set_kernel_enable_bitmap( + ia_css_program_param_t *program_param, + const ia_css_kernel_bitmap_t bitmap) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_param_set_kernel_enable_bitmap(): enter:\n"); + + if (program_param != NULL) { + program_param->kernel_enable_bitmap = bitmap; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_program_param_set_kernel_enable_bitmap failed (%i)\n", + retval); + } + return retval; +} + +ia_css_kernel_bitmap_t ia_css_program_param_get_kernel_enable_bitmap( + const ia_css_program_param_t *program_param) +{ + ia_css_kernel_bitmap_t bitmap = ia_css_kernel_bitmap_clear(); + char *base; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_param_get_kernel_enable_bitmap(): enter:\n"); + + verifexit(program_param != NULL); + verifexit(program_param->parent_offset != 0); + + base = (char *)((char *)program_param + program_param->parent_offset); + bitmap = ((ia_css_program_group_param_t *)base)->kernel_enable_bitmap; +EXIT: + if (NULL == program_param || 0 == program_param->parent_offset) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_param_get_kernel_enable_bitmap invalid argument\n"); + } + return bitmap; +} + +ia_css_kernel_bitmap_t ia_css_terminal_param_get_kernel_enable_bitmap( + const ia_css_terminal_param_t *param) +{ + ia_css_kernel_bitmap_t bitmap = ia_css_kernel_bitmap_clear(); + char *base; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_get_kernel_enable_bitmap(): enter:\n"); + + verifexit(param != NULL); + verifexit(param->parent_offset != 0); + + base = (char *)((char *)param + param->parent_offset); + bitmap = ((ia_css_program_group_param_t *)base)->kernel_enable_bitmap; +EXIT: + if (NULL == param || 0 == param->parent_offset) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_terminal_param_get_kernel_enable_bitmap invalid argument\n"); + } + return bitmap; +} + +ia_css_frame_format_type_t ia_css_terminal_param_get_frame_format_type( + const ia_css_terminal_param_t *param) +{ + ia_css_frame_format_type_t ft = IA_CSS_N_FRAME_FORMAT_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_get_frame_format_type(): enter:\n"); + + verifexit(param != NULL); + + ft = param->frame_format_type; +EXIT: + if (NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_terminal_param_get_frame_format_type invalid argument\n"); + } + return ft; +} + +int ia_css_terminal_param_set_frame_format_type( + ia_css_terminal_param_t *param, + const ia_css_frame_format_type_t data_format_type) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_set_frame_format_type(): enter:\n"); + + if (param != NULL) { + param->frame_format_type = data_format_type; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_terminal_param_set_frame_format_type failed (%i)\n", + retval); + } + return retval; +} + +uint8_t ia_css_terminal_param_get_bpp( + const ia_css_terminal_param_t *param) +{ + uint8_t bpp = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_get_bpp(): enter:\n"); + + verifexit(param != NULL); + + bpp = param->bpp; + +EXIT: + if (NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_terminal_param_get_bpp invalid argument\n"); + } + return bpp; +} + +int ia_css_terminal_param_set_bpp( + ia_css_terminal_param_t *param, + const uint8_t bpp) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_set_bpp(): enter:\n"); + + if (param != NULL) { + param->bpp = bpp; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_terminal_param_set_bpp failed (%i)\n", retval); + } + return retval; +} + +int ia_css_terminal_param_get_dimensions( + const ia_css_terminal_param_t *param, + uint16_t dimensions[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_get_dimensions(): enter:\n"); + + if (param != NULL) { + dimensions[IA_CSS_COL_DIMENSION] = + param->dimensions[IA_CSS_COL_DIMENSION]; + dimensions[IA_CSS_ROW_DIMENSION] = + param->dimensions[IA_CSS_ROW_DIMENSION]; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_terminal_param_get_dimensions failed (%i)\n", retval); + } + return retval; +} + +int ia_css_terminal_param_set_dimensions( + ia_css_terminal_param_t *param, + const uint16_t dimensions[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_set_dimensions(): enter:\n"); + + if (param != NULL) { + param->dimensions[IA_CSS_COL_DIMENSION] = + dimensions[IA_CSS_COL_DIMENSION]; + param->dimensions[IA_CSS_ROW_DIMENSION] = + dimensions[IA_CSS_ROW_DIMENSION]; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_terminal_param_set_dimensions failed (%i)\n", retval); + } + return retval; +} + +int ia_css_terminal_param_set_stride( + ia_css_terminal_param_t *param, + const uint32_t stride) +{ + int retval = -1; + + verifexit(param != NULL); + param->stride = stride; + retval = 0; + +EXIT: + return retval; +} + +uint32_t ia_css_terminal_param_get_stride( + const ia_css_terminal_param_t *param) +{ + uint32_t stride = 0; + + verifexit(param != NULL); + stride = param->stride; + +EXIT: + return stride; +} + + +static int ia_css_program_param_init( + ia_css_program_param_t *program_param, + int32_t offset) +{ + int retval = -1; + + COMPILATION_ERROR_IF( + SIZE_OF_PROGRAM_PARAM_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_program_param_t))); + verifexit(program_param != NULL); + + IA_CSS_TRACE_0(PSYSAPI_PARAM, INFO, + "ia_css_program_param_init(): enter:\n"); + + program_param->size = sizeof(ia_css_program_param_t); + /* parent is at negative offset from current program.*/ + program_param->parent_offset = -offset; + /*TODO: Kernel_bitmap setting. ?*/ + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_program_param_init failed (%i)\n", retval); + } + return retval; +} + +static int +ia_css_terminal_param_init(ia_css_terminal_param_t *terminal_param, + uint32_t offset, + enum ia_css_frame_format_type frame_format_type) +{ + int retval = -1; + + COMPILATION_ERROR_IF( + SIZE_OF_TERMINAL_PARAM_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_terminal_param_t))); + verifexit(terminal_param != NULL); + + IA_CSS_TRACE_0(PSYSAPI_PARAM, INFO, + "ia_css_terminal_param_init(): enter:\n"); + + terminal_param->size = sizeof(ia_css_terminal_param_t); + /* parent is at negative offset from current program.*/ + terminal_param->parent_offset = -((int32_t)offset); + /*TODO: Kernel_bitmap setting. ?*/ + terminal_param->frame_format_type = frame_format_type; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_terminal_param_init failed (%i)\n", retval); + } + return retval; +} + +ia_css_program_group_param_t * +ia_css_terminal_param_get_parent( + const ia_css_terminal_param_t *param) +{ + ia_css_program_group_param_t *parent = NULL; + char *base; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_get_parent(): enter:\n"); + + verifexit(NULL != param); + + base = (char *)((char *)param + param->parent_offset); + + parent = (ia_css_program_group_param_t *)(base); +EXIT: + if (NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_terminal_param_get_parent invalid argument\n"); + } + return parent; +} + +int ia_css_program_group_param_init( + ia_css_program_group_param_t *blob, + const uint8_t program_count, + const uint8_t terminal_count, + const uint16_t fragment_count, + const enum ia_css_frame_format_type *frame_format_types) +{ + int i = 0; + char *param_base; + uint32_t offset; + int retval = -1; + + COMPILATION_ERROR_IF( + SIZE_OF_PROGRAM_GROUP_PARAM_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_program_group_param_t))); + + IA_CSS_TRACE_0(PSYSAPI_PARAM, INFO, + "ia_css_program_group_param_init(): enter:\n"); + + assert(blob != 0); + + verifexit(blob != NULL); + verifexit(frame_format_types != NULL); + + blob->program_count = program_count; + blob->fragment_count = fragment_count; + blob->terminal_count = terminal_count; + blob->program_param_offset = sizeof(ia_css_program_group_param_t); + blob->terminal_param_offset = blob->program_param_offset + + sizeof(ia_css_program_param_t) * program_count; + + param_base = (char *)((char *)blob + blob->program_param_offset); + offset = blob->program_param_offset; + + for (i = 0; i < program_count; i++) { + ia_css_program_param_init( + (ia_css_program_param_t *)param_base, offset); + offset += sizeof(ia_css_program_param_t); + param_base += sizeof(ia_css_program_param_t); + } + + param_base = (char *)((char *)blob + blob->terminal_param_offset); + offset = blob->terminal_param_offset; + + for (i = 0; i < terminal_count; i++) { + ia_css_terminal_param_init( + (ia_css_terminal_param_t *)param_base, + offset, + frame_format_types[i]); + + offset += sizeof(ia_css_terminal_param_t); + param_base += sizeof(ia_css_terminal_param_t); + } + + /* + * For now, set legacy flow by default. This can be removed as soon + * as all hosts/drivers explicitly set the protocol version. + */ + blob->protocol_version = IA_CSS_PROCESS_GROUP_PROTOCOL_LEGACY; + + blob->size = (uint32_t)ia_css_sizeof_program_group_param(program_count, + terminal_count, + fragment_count); + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_program_group_param_init failed (%i)\n", retval); + } + return retval; +} + +int ia_css_program_group_param_print( + const ia_css_program_group_param_t *param, + void *fid) +{ + int retval = -1; + int i; + uint8_t program_count, terminal_count; + ia_css_kernel_bitmap_t bitmap; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, INFO, + "ia_css_program_group_param_print(): enter:\n"); + + verifexit(param != NULL); + NOT_USED(fid); + + IA_CSS_TRACE_1(PSYSAPI_PARAM, INFO, + "sizeof(program_group_param) = %d\n", + (int)ia_css_program_group_param_get_size(param)); + + program_count = ia_css_program_group_param_get_program_count(param); + terminal_count = ia_css_program_group_param_get_terminal_count(param); + + bitmap = ia_css_program_group_param_get_kernel_enable_bitmap(param); + verifexit(ia_css_kernel_bitmap_print(bitmap, fid) == 0); + + IA_CSS_TRACE_1(PSYSAPI_PARAM, INFO, + "%d program params\n", (int)program_count); + for (i = 0; i < (int)program_count; i++) { + ia_css_program_param_t *program_param = + ia_css_program_group_param_get_program_param(param, i); + + retval = ia_css_program_param_print(program_param, fid); + verifjmpexit(retval == 0); + } + IA_CSS_TRACE_1(PSYSAPI_PARAM, INFO, "%d terminal params\n", + (int)terminal_count); + for (i = 0; i < (int)terminal_count; i++) { + ia_css_terminal_param_t *terminal_param = + ia_css_program_group_param_get_terminal_param(param, i); + + retval = ia_css_terminal_param_print(terminal_param, fid); + verifjmpexit(retval == 0); + } + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_program_group_param_print failed (%i)\n", retval); + } + return retval; +} + +int ia_css_terminal_param_print( + const ia_css_terminal_param_t *param, + void *fid) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, INFO, + "ia_css_terminal_param_print(): enter:\n"); + + verifexit(param != NULL); + NOT_USED(fid); + + IA_CSS_TRACE_1(PSYSAPI_PARAM, INFO, + "sizeof(terminal_param) = %d\n", + (int)ia_css_terminal_param_get_size(param)); + + IA_CSS_TRACE_1(PSYSAPI_PARAM, INFO, + "\tframe_format_type = %d\n", param->frame_format_type); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_terminal_param_print failed (%i)\n", retval); + } + return retval; +} + +int ia_css_program_param_print( + const ia_css_program_param_t *param, + void *fid) +{ + int retval = -1; + ia_css_kernel_bitmap_t bitmap; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, INFO, + "ia_css_program_param_print(): enter:\n"); + + verifexit(param != NULL); + NOT_USED(fid); + + IA_CSS_TRACE_1(PSYSAPI_PARAM, INFO, "sizeof(program_param) = %d\n", + (int)ia_css_program_param_get_size(param)); + + bitmap = ia_css_program_param_get_kernel_enable_bitmap(param); + verifexit(ia_css_kernel_bitmap_print(bitmap, fid) == 0); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_program_param_print failed (%i)\n", retval); + } + return retval; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/src/ia_css_program_group_param_private.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/src/ia_css_program_group_param_private.h new file mode 100644 index 0000000000000..6672737e51a14 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/src/ia_css_program_group_param_private.h @@ -0,0 +1,80 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PROGRAM_GROUP_PARAM_PRIVATE_H +#define __IA_CSS_PROGRAM_GROUP_PARAM_PRIVATE_H + +#include +#include +#include +#include +#include +#include +#include + +#define N_PADDING_UINT8_IN_PROGRAM_GROUP_PARAM_STRUCT 7 +#define SIZE_OF_PROGRAM_GROUP_PARAM_STRUCT_IN_BITS \ + (IA_CSS_KERNEL_BITMAP_BITS \ + + (3 * IA_CSS_UINT32_T_BITS) \ + + IA_CSS_UINT16_T_BITS \ + + (3 * IA_CSS_UINT8_T_BITS) \ + + (N_PADDING_UINT8_IN_PROGRAM_GROUP_PARAM_STRUCT * IA_CSS_UINT8_T_BITS)) + +/* tentative; co-design with ISP algorithm */ +struct ia_css_program_group_param_s { + /* The enable bits for each individual kernel */ + ia_css_kernel_bitmap_t kernel_enable_bitmap; + /* Size of this structure */ + uint32_t size; + uint32_t program_param_offset; + uint32_t terminal_param_offset; + /* Number of (explicit) fragments to use in a frame */ + uint16_t fragment_count; + /* Number of active programs */ + uint8_t program_count; + /* Number of active terminals */ + uint8_t terminal_count; + /* Program group protocol version */ + uint8_t protocol_version; + uint8_t padding[N_PADDING_UINT8_IN_PROGRAM_GROUP_PARAM_STRUCT]; +}; + +#define SIZE_OF_PROGRAM_PARAM_STRUCT_IN_BITS \ + (IA_CSS_KERNEL_BITMAP_BITS \ + + IA_CSS_UINT32_T_BITS \ + + IA_CSS_INT32_T_BITS) + +/* private */ +struct ia_css_program_param_s { + /* What to use this one for ? */ + ia_css_kernel_bitmap_t kernel_enable_bitmap; + /* Size of this structure */ + uint32_t size; + /* offset to add to reach parent. This is negative value.*/ + int32_t parent_offset; +}; + +#define SIZE_OF_TERMINAL_PARAM_STRUCT_IN_BITS \ + (IA_CSS_UINT32_T_BITS \ + + IA_CSS_FRAME_FORMAT_TYPE_BITS \ + + IA_CSS_INT32_T_BITS \ + + (IA_CSS_UINT16_T_BITS * IA_CSS_N_DATA_DIMENSION) \ + + (IA_CSS_UINT16_T_BITS * IA_CSS_N_DATA_DIMENSION) \ + + (IA_CSS_UINT16_T_BITS * IA_CSS_N_DATA_DIMENSION) \ + + IA_CSS_INT32_T_BITS \ + + IA_CSS_UINT16_T_BITS \ + + IA_CSS_UINT8_T_BITS \ + + (IA_CSS_UINT8_T_BITS * 1)) + +#endif /* __IA_CSS_PROGRAM_GROUP_PARAM_PRIVATE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/psys_server_manifest/bxtB0/ia_css_psys_server_manifest.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/psys_server_manifest/bxtB0/ia_css_psys_server_manifest.c new file mode 100644 index 0000000000000..a2dd8cbd1ba1d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/psys_server_manifest/bxtB0/ia_css_psys_server_manifest.c @@ -0,0 +1,50 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_server_manifest.h" + +/** + * Manifest of resources in use by PSYS itself + */ + +const vied_nci_resource_spec_t psys_server_manifest = { + /* internal memory */ + { /* resource id size offset*/ + {VIED_NCI_GMEM_TYPE_ID, 0, 0}, + {VIED_NCI_DMEM_TYPE_ID, VIED_NCI_DMEM0_MAX_SIZE, 0}, + {VIED_NCI_VMEM_TYPE_ID, 0, 0}, + {VIED_NCI_BAMEM_TYPE_ID, 0, 0}, + {VIED_NCI_PMEM_TYPE_ID, 0, 0} + }, + /* external memory */ + { /* resource id size offset*/ + {VIED_NCI_N_MEM_ID, 0, 0}, + {VIED_NCI_N_MEM_ID, 0, 0}, + {VIED_NCI_N_MEM_ID, 0, 0}, + {VIED_NCI_N_MEM_ID, 0, 0}, + }, + /* device channel */ + { /* resource id size offset*/ + {VIED_NCI_DEV_CHN_DMA_EXT0_ID, + PSYS_SERVER_DMA_CHANNEL_SIZE, + PSYS_SERVER_DMA_CHANNEL_OFFSET}, + {VIED_NCI_DEV_CHN_GDC_ID, 0, 0}, + {VIED_NCI_DEV_CHN_DMA_EXT1_READ_ID, 0, 0}, + {VIED_NCI_DEV_CHN_DMA_EXT1_WRITE_ID, 0, 0}, + {VIED_NCI_DEV_CHN_DMA_INTERNAL_ID, 0, 0}, + {VIED_NCI_DEV_CHN_DMA_IPFD_ID, 0, 0}, + {VIED_NCI_DEV_CHN_DMA_ISA_ID, 0, 0}, + {VIED_NCI_DEV_CHN_DMA_FW_ID, 0, 0} + } +}; diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/psys_server_manifest/bxtB0/ia_css_psys_server_manifest.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/psys_server_manifest/bxtB0/ia_css_psys_server_manifest.h new file mode 100644 index 0000000000000..b4c7fbc32d5ba --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/psys_server_manifest/bxtB0/ia_css_psys_server_manifest.h @@ -0,0 +1,29 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_SERVER_MANIFEST_H +#define __IA_CSS_PSYS_SERVER_MANIFEST_H + +#include "vied_nci_psys_resource_model.h" + +/** + * Manifest of resources in use by PSYS itself + */ + +#define PSYS_SERVER_DMA_CHANNEL_SIZE 2 +#define PSYS_SERVER_DMA_CHANNEL_OFFSET 28 + +extern const vied_nci_resource_spec_t psys_server_manifest; + +#endif /* __IA_CSS_PSYS_SERVER_MANIFEST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/psysapi.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/psysapi.mk new file mode 100644 index 0000000000000..e1977cbe2ca2a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/psysapi.mk @@ -0,0 +1,122 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is PSYSAPI +# +ifdef _H_PSYSAPI_MK +$(error ERROR: psysapi.mk included multiple times, please check makefile) +else +_H_PSYSAPI_MK=1 +endif + +include $(MODULES_DIR)/config/psys/subsystem_$(IPU_SYSVER).mk + +PSYSAPI_DIR = $${MODULES_DIR}/psysapi + +PSYSAPI_PROCESS_HOST_FILES = $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_process.c +PSYSAPI_PROCESS_HOST_FILES += $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_process_group.c +PSYSAPI_PROCESS_HOST_FILES += $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_buffer_set.c +PSYSAPI_PROCESS_HOST_FILES += $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_terminal.c +PSYSAPI_PROCESS_HOST_FILES += $(PSYSAPI_DIR)/param/src/ia_css_program_group_param.c + +# Use PSYS_MANIFEST_HOST_FILES when only accessing manifest functions +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/static/src/ia_css_psys_program_group_manifest.c +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/static/src/ia_css_psys_program_manifest.c +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/static/src/ia_css_psys_terminal_manifest.c +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/sim/src/vied_nci_psys_system.c +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/kernel/src/ia_css_kernel_bitmap.c +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/data/src/ia_css_program_group_data.c +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION)/vied_nci_psys_resource_model.c +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/psys_server_manifest/$(PSYS_SERVER_MANIFEST_VERSION)/ia_css_psys_server_manifest.c + +# Use only kernel bitmap functionality from PSYS API +PSYSAPI_KERNEL_BITMAP_FILES += $(PSYSAPI_DIR)/kernel/src/ia_css_kernel_bitmap.c +PSYSAPI_KERNEL_BITMAP_CPPFLAGS += -I$(PSYSAPI_DIR)/kernel/interface +PSYSAPI_KERNEL_BITMAP_CPPFLAGS += -I$(PSYSAPI_DIR)/interface + +# Use PSYSAPI_HOST_FILES when program and process group are both needed +PSYSAPI_HOST_FILES = $(PSYSAPI_PROCESS_HOST_FILES) $(PSYSAPI_MANIFEST_HOST_FILES) + +# Use PSYSAPI_PROCESS_GROUP_HOST_FILES when program and process group are both needed but there is no +# implementation (yet) of the user customization functions defined in ia_css_psys_process_group_cmd_impl.h. +# Dummy implementations are provided in $(PSYSAPI_DIR)/sim/src/ia_css_psys_process_group_cmd_impl.c +PSYSAPI_PROCESS_GROUP_HOST_FILES = $(PSYSAPI_HOST_FILES) +PSYSAPI_PROCESS_GROUP_HOST_FILES += $(PSYSAPI_DIR)/sim/src/ia_css_psys_process_group_cmd_impl.c + +# for now disabled, implementation for now provided by psys api impl +#PSYSAPI_HOST_FILES += $(PSYSAPI_DIR)/device/src/ia_css_psys_device.c + +PSYSAPI_HOST_CPPFLAGS = -I$(PSYSAPI_DIR)/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/device/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/device/interface/$(IPU_SYSVER) +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/dynamic/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/dynamic/src +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/data/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/data/src +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/static/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/static/src +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/kernel/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/param/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/param/src +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/sim/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/sim/src +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION) +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION)/private +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/psys_server_manifest/$(PSYS_SERVER_MANIFEST_VERSION) + +PSYSAPI_FW_CPPFLAGS = $(PSYSAPI_HOST_CPPFLAGS) +PSYSAPI_FW_CPPFLAGS += -I$(PSYSAPI_DIR)/static/interface +PSYSAPI_FW_CPPFLAGS += -I$(PSYSAPI_DIR)/static/src +PSYSAPI_FW_CPPFLAGS += -I$(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION) +PSYSAPI_FW_CPPFLAGS += -I$(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION)/private +PSYSAPI_FW_CPPFLAGS += -I$(PSYSAPI_DIR)/psys_server_manifest/$(PSYS_SERVER_MANIFEST_VERSION) +PSYSAPI_SYSTEM_GLOBAL_CPPFLAGS += -I$(PSYSAPI_DIR)/sim/interface +PSYSAPI_SYSTEM_GLOBAL_CPPFLAGS += -I$(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION) +PSYSAPI_SYSTEM_GLOBAL_CPPFLAGS += -I$(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION)/private +PSYSAPI_SYSTEM_GLOBAL_CPPFLAGS += -I$(PSYSAPI_DIR)/psys_server_manifest/$(PSYS_SERVER_MANIFEST_VERSION) + +# Defining the trace level for the PSYSAPI +PSYSAPI_HOST_CPPFLAGS += -DPSYSAPI_TRACE_CONFIG=PSYSAPI_TRACE_LOG_LEVEL_NORMAL +# Enable/Disable 'late binding' support and it's additional queues +PSYSAPI_HOST_CPPFLAGS += -DHAS_LATE_BINDING_SUPPORT=$(PSYS_HAS_LATE_BINDING_SUPPORT) + +#Example: how to switch to a different log level for a sub-module +#PSYSAPI_HOST_CPPFLAGS += -DPSYSAPI_DYNAMIC_TRACING_OVERRIDE=PSYSAPI_TRACE_LOG_LEVEL_DEBUG + +# enable host side implementation +# TODO: better name for the flag to enable the impl... +PSYSAPI_HOST_CPPFLAGS += -D__X86_SIM__ + +# Files for Firmware +PSYSAPI_FW_FILES = $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_process.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_process_group.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_terminal.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_buffer_set.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/param/src/ia_css_program_group_param.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/data/src/ia_css_program_group_data.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/sim/src/vied_nci_psys_system.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/sim/src/ia_css_psys_sim_data.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/static/src/ia_css_psys_program_group_manifest.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/static/src/ia_css_psys_program_manifest.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/static/src/ia_css_psys_terminal_manifest.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION)/vied_nci_psys_resource_model.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/psys_server_manifest/$(PSYS_SERVER_MANIFEST_VERSION)/ia_css_psys_server_manifest.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/kernel/src/ia_css_kernel_bitmap.c + +# resource model +PSYSAPI_RESOURCE_MODEL_FILES = $(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION)/vied_nci_psys_resource_model.c + +ifeq ($(PSYS_HAS_DUAL_CMD_CTX_SUPPORT), 1) +PSYSAPI_HOST_CPPFLAGS += -DHAS_DUAL_CMD_CTX_SUPPORT=$(PSYS_HAS_DUAL_CMD_CTX_SUPPORT) +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/resource_model/bxtB0/vied_nci_psys_resource_model.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/resource_model/bxtB0/vied_nci_psys_resource_model.c new file mode 100644 index 0000000000000..03359e378d9b6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/resource_model/bxtB0/vied_nci_psys_resource_model.c @@ -0,0 +1,322 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "vied_nci_psys_resource_model.h" + +/* + * Cell types by cell IDs + */ +const vied_nci_cell_type_ID_t vied_nci_cell_type[VIED_NCI_N_CELL_ID] = { + VIED_NCI_SP_CTRL_TYPE_ID, + VIED_NCI_SP_SERVER_TYPE_ID, + VIED_NCI_SP_SERVER_TYPE_ID, + VIED_NCI_VP_TYPE_ID, + VIED_NCI_VP_TYPE_ID, + VIED_NCI_VP_TYPE_ID, + VIED_NCI_VP_TYPE_ID, + VIED_NCI_ACC_ISA_TYPE_ID, + VIED_NCI_ACC_PSA_TYPE_ID, + VIED_NCI_ACC_PSA_TYPE_ID, + VIED_NCI_ACC_PSA_TYPE_ID, + VIED_NCI_ACC_PSA_TYPE_ID, + VIED_NCI_ACC_PSA_TYPE_ID, + VIED_NCI_ACC_PSA_TYPE_ID, + VIED_NCI_ACC_OSA_TYPE_ID, + VIED_NCI_GDC_TYPE_ID, + VIED_NCI_GDC_TYPE_ID +}; + +/* + * Memory types by memory IDs + */ +const vied_nci_mem_type_ID_t vied_nci_mem_type[VIED_NCI_N_MEM_ID] = { + VIED_NCI_VMEM_TYPE_ID, + VIED_NCI_VMEM_TYPE_ID, + VIED_NCI_VMEM_TYPE_ID, + VIED_NCI_VMEM_TYPE_ID, + VIED_NCI_GMEM_TYPE_ID,/* VMEM4 is GMEM according to vied_nci_cell_mem */ + VIED_NCI_BAMEM_TYPE_ID, + VIED_NCI_BAMEM_TYPE_ID, + VIED_NCI_BAMEM_TYPE_ID, + VIED_NCI_BAMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_PMEM_TYPE_ID, + VIED_NCI_PMEM_TYPE_ID, + VIED_NCI_PMEM_TYPE_ID, + VIED_NCI_PMEM_TYPE_ID +}; + +/* + * Cell mem count by cell type ID + */ +const uint16_t vied_nci_N_cell_mem[VIED_NCI_N_CELL_TYPE_ID] = { + VIED_NCI_N_SP_CTRL_MEM, + VIED_NCI_N_SP_SERVER_MEM, + VIED_NCI_N_VP_MEM, + VIED_NCI_N_ACC_PSA_MEM, + VIED_NCI_N_ACC_ISA_MEM, + VIED_NCI_N_ACC_OSA_MEM +}; + +/* + * Cell mem type by cell type ID and memory index + */ +const vied_nci_mem_type_ID_t +vied_nci_cell_mem_type[VIED_NCI_N_CELL_TYPE_ID][VIED_NCI_N_MEM_TYPE_ID] = { + { + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID + }, + { + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID + }, + { + VIED_NCI_GMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_VMEM_TYPE_ID, + VIED_NCI_BAMEM_TYPE_ID, + VIED_NCI_PMEM_TYPE_ID + }, + { + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID + }, + { + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID + }, + { + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID + }, + { + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID + } +}; + +/* + * Ext mem ID by memory index + */ +const vied_nci_mem_ID_t +vied_nci_ext_mem[VIED_NCI_N_MEM_TYPE_ID] = { + VIED_NCI_VMEM4_ID, /* VIED_NCI_GMEM_TYPE_ID */ + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID +}; + +/* + * Cell mem ID by cell ID and memory index + */ +const vied_nci_mem_ID_t +vied_nci_cell_mem[VIED_NCI_N_CELL_ID][VIED_NCI_N_MEM_TYPE_ID] = { + { + VIED_NCI_N_MEM_ID, + VIED_NCI_DMEM0_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_DMEM1_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_DMEM2_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_VMEM4_ID, + VIED_NCI_DMEM4_ID, + VIED_NCI_VMEM0_ID, + VIED_NCI_BAMEM0_ID, + VIED_NCI_PMEM0_ID + }, + { + VIED_NCI_VMEM4_ID, + VIED_NCI_DMEM5_ID, + VIED_NCI_VMEM1_ID, + VIED_NCI_BAMEM1_ID, + VIED_NCI_PMEM1_ID + }, + { + VIED_NCI_VMEM4_ID, + VIED_NCI_DMEM6_ID, + VIED_NCI_VMEM2_ID, + VIED_NCI_BAMEM2_ID, + VIED_NCI_PMEM2_ID + }, + { + VIED_NCI_VMEM4_ID, + VIED_NCI_DMEM7_ID, + VIED_NCI_VMEM3_ID, + VIED_NCI_BAMEM3_ID, + VIED_NCI_PMEM3_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + } +}; + +/* + * Memory sizes by mem ID + */ +const uint16_t vied_nci_mem_size[VIED_NCI_N_MEM_ID] = { + VIED_NCI_VMEM0_MAX_SIZE, + VIED_NCI_VMEM1_MAX_SIZE, + VIED_NCI_VMEM2_MAX_SIZE, + VIED_NCI_VMEM3_MAX_SIZE, + VIED_NCI_VMEM4_MAX_SIZE, + VIED_NCI_BAMEM0_MAX_SIZE, + VIED_NCI_BAMEM1_MAX_SIZE, + VIED_NCI_BAMEM2_MAX_SIZE, + VIED_NCI_BAMEM3_MAX_SIZE, + VIED_NCI_DMEM0_MAX_SIZE, + VIED_NCI_DMEM1_MAX_SIZE, + VIED_NCI_DMEM2_MAX_SIZE, + VIED_NCI_DMEM3_MAX_SIZE, + VIED_NCI_DMEM4_MAX_SIZE, + VIED_NCI_DMEM5_MAX_SIZE, + VIED_NCI_DMEM6_MAX_SIZE, + VIED_NCI_DMEM7_MAX_SIZE, + VIED_NCI_PMEM0_MAX_SIZE, + VIED_NCI_PMEM1_MAX_SIZE, + VIED_NCI_PMEM2_MAX_SIZE, + VIED_NCI_PMEM3_MAX_SIZE +}; + +/* + * Memory word sizes by mem type ID + */ +const uint16_t vied_nci_mem_word_size[VIED_NCI_N_DATA_MEM_TYPE_ID] = { + VIED_NCI_GMEM_WORD_SIZE, + VIED_NCI_DMEM_WORD_SIZE, + VIED_NCI_VMEM_WORD_SIZE, + VIED_NCI_BAMEM_WORD_SIZE +}; + +/* + * Number of channels by device ID + */ +const uint16_t vied_nci_dev_chn_size[VIED_NCI_N_DEV_CHN_ID] = { + VIED_NCI_DEV_CHN_DMA_EXT0_MAX_SIZE, + VIED_NCI_DEV_CHN_GDC_MAX_SIZE, + VIED_NCI_DEV_CHN_DMA_EXT1_READ_MAX_SIZE, + VIED_NCI_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE, + VIED_NCI_DEV_CHN_DMA_INTERNAL_MAX_SIZE, + VIED_NCI_DEV_CHN_DMA_IPFD_MAX_SIZE, + VIED_NCI_DEV_CHN_DMA_ISA_MAX_SIZE, + VIED_NCI_DEV_CHN_DMA_FW_MAX_SIZE +}; diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/resource_model/bxtB0/vied_nci_psys_resource_model.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/resource_model/bxtB0/vied_nci_psys_resource_model.h new file mode 100644 index 0000000000000..1cb4e010d55d0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/resource_model/bxtB0/vied_nci_psys_resource_model.h @@ -0,0 +1,300 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __VIED_NCI_PSYS_RESOURCE_MODEL_H +#define __VIED_NCI_PSYS_RESOURCE_MODEL_H + +#include "type_support.h" +#include "storage_class.h" + +#define HAS_DFM 0 +#define NON_RELOC_RESOURCE_SUPPORT 0 +#define IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + +/* Defines for the routing bitmap in the program group manifest. + */ +#define VIED_NCI_RBM_MAX_MUX_COUNT 0 +#define VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT 0 +#define VIED_NCI_RBM_MAX_TERMINAL_DESC_COUNT 0 +#define N_PADDING_UINT8_IN_RBM_MANIFEST 2 + +/* The amount of padding bytes needed to make + * ia_css_process_s structure 64 bit aligned + */ +#define N_PADDING_UINT8_IN_PROCESS_STRUCT 4 +#define N_PADDING_UINT8_IN_PROGRAM_GROUP_MANFEST 4 + +/** + * Resource model for BXT B0 + */ + +/* + * Cell IDs + */ +typedef enum { + VIED_NCI_SP0_ID = 0, + VIED_NCI_SP1_ID, + VIED_NCI_SP2_ID, + VIED_NCI_VP0_ID, + VIED_NCI_VP1_ID, + VIED_NCI_VP2_ID, + VIED_NCI_VP3_ID, + VIED_NCI_ACC0_ID, + VIED_NCI_ACC1_ID, + VIED_NCI_ACC2_ID, + VIED_NCI_ACC3_ID, + VIED_NCI_ACC4_ID, + VIED_NCI_ACC5_ID, + VIED_NCI_ACC6_ID, + VIED_NCI_ACC7_ID, + VIED_NCI_GDC0_ID, + VIED_NCI_GDC1_ID, + VIED_NCI_N_CELL_ID +} vied_nci_cell_ID_t; + +/* + * Barrier bits (to model process group dependencies) + */ +typedef enum { + VIED_NCI_BARRIER0_ID, + VIED_NCI_BARRIER1_ID, + VIED_NCI_BARRIER2_ID, + VIED_NCI_BARRIER3_ID, + VIED_NCI_BARRIER4_ID, + VIED_NCI_BARRIER5_ID, + VIED_NCI_BARRIER6_ID, + VIED_NCI_BARRIER7_ID, + VIED_NCI_N_BARRIER_ID +} vied_nci_barrier_ID_t; + +/* + * Cell types + */ +typedef enum { + VIED_NCI_SP_CTRL_TYPE_ID = 0, + VIED_NCI_SP_SERVER_TYPE_ID, + VIED_NCI_VP_TYPE_ID, + VIED_NCI_ACC_PSA_TYPE_ID, + VIED_NCI_ACC_ISA_TYPE_ID, + VIED_NCI_ACC_OSA_TYPE_ID, + VIED_NCI_GDC_TYPE_ID, + VIED_NCI_N_CELL_TYPE_ID +} vied_nci_cell_type_ID_t; + +/* + * Memory IDs + */ +typedef enum { + VIED_NCI_VMEM0_ID = 0, + VIED_NCI_VMEM1_ID, + VIED_NCI_VMEM2_ID, + VIED_NCI_VMEM3_ID, + VIED_NCI_VMEM4_ID, + VIED_NCI_BAMEM0_ID, + VIED_NCI_BAMEM1_ID, + VIED_NCI_BAMEM2_ID, + VIED_NCI_BAMEM3_ID, + VIED_NCI_DMEM0_ID, + VIED_NCI_DMEM1_ID, + VIED_NCI_DMEM2_ID, + VIED_NCI_DMEM3_ID, + VIED_NCI_DMEM4_ID, + VIED_NCI_DMEM5_ID, + VIED_NCI_DMEM6_ID, + VIED_NCI_DMEM7_ID, + VIED_NCI_PMEM0_ID, + VIED_NCI_PMEM1_ID, + VIED_NCI_PMEM2_ID, + VIED_NCI_PMEM3_ID, + VIED_NCI_N_MEM_ID +} vied_nci_mem_ID_t; + +/* + * Memory types + */ +typedef enum { + VIED_NCI_GMEM_TYPE_ID = 0, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_VMEM_TYPE_ID, + VIED_NCI_BAMEM_TYPE_ID, + VIED_NCI_PMEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID +} vied_nci_mem_type_ID_t; + +/* Excluding PMEM */ +#define VIED_NCI_N_DATA_MEM_TYPE_ID (VIED_NCI_N_MEM_TYPE_ID - 1) + +#define VIED_NCI_N_SP_CTRL_MEM 2 +#define VIED_NCI_N_SP_SERVER_MEM 2 +#define VIED_NCI_N_VP_MEM 4 +#define VIED_NCI_N_ACC_PSA_MEM 0 +#define VIED_NCI_N_ACC_ISA_MEM 0 +#define VIED_NCI_N_ACC_OSA_MEM 0 + +#define VIED_NCI_N_VP_CELL 4 +#define VIED_NCI_N_ACC_CELL 8 + +/* + * Device IDs + */ +typedef enum { + VIED_NCI_DEV_CHN_DMA_EXT0_ID = 0, + VIED_NCI_DEV_CHN_GDC_ID, + VIED_NCI_DEV_CHN_DMA_EXT1_READ_ID, + VIED_NCI_DEV_CHN_DMA_EXT1_WRITE_ID, + VIED_NCI_DEV_CHN_DMA_INTERNAL_ID, + VIED_NCI_DEV_CHN_DMA_IPFD_ID, + VIED_NCI_DEV_CHN_DMA_ISA_ID, + VIED_NCI_DEV_CHN_DMA_FW_ID, + VIED_NCI_N_DEV_CHN_ID +} vied_nci_dev_chn_ID_t; + +typedef enum { + DFM_IS_NOT_AVAILABLE +} vied_nci_dev_dfm_id_t; + +#define VIED_NCI_N_DEV_DFM_ID 0 + + +/* + * Memory size (previously in vied_nci_psys_system.c) + * VMEM: in words, 64 Byte per word. + * BAMEM: in words, 64 Byte per word + * DMEM: in words, 4 Byte per word. + * PMEM: in words, 64 Byte per word. + */ +#define VIED_NCI_GMEM_WORD_SIZE 64 +#define VIED_NCI_DMEM_WORD_SIZE 4 +#define VIED_NCI_VMEM_WORD_SIZE 64 +#define VIED_NCI_BAMEM_WORD_SIZE 64 + +#define VIED_NCI_VMEM0_MAX_SIZE (0x0800) +#define VIED_NCI_VMEM1_MAX_SIZE (0x0800) +#define VIED_NCI_VMEM2_MAX_SIZE (0x0800) +#define VIED_NCI_VMEM3_MAX_SIZE (0x0800) +#define VIED_NCI_VMEM4_MAX_SIZE (0x0800) +#define VIED_NCI_BAMEM0_MAX_SIZE (0x0400) +#define VIED_NCI_BAMEM1_MAX_SIZE (0x0400) +#define VIED_NCI_BAMEM2_MAX_SIZE (0x0400) +#define VIED_NCI_BAMEM3_MAX_SIZE (0x0400) +#define VIED_NCI_DMEM0_MAX_SIZE (0x4000) +#define VIED_NCI_DMEM1_MAX_SIZE (0x1000) +#define VIED_NCI_DMEM2_MAX_SIZE (0x1000) +#define VIED_NCI_DMEM3_MAX_SIZE (0x1000) +#define VIED_NCI_DMEM4_MAX_SIZE (0x1000) +#define VIED_NCI_DMEM5_MAX_SIZE (0x1000) +#define VIED_NCI_DMEM6_MAX_SIZE (0x1000) +#define VIED_NCI_DMEM7_MAX_SIZE (0x1000) +#define VIED_NCI_PMEM0_MAX_SIZE (0x0500) +#define VIED_NCI_PMEM1_MAX_SIZE (0x0500) +#define VIED_NCI_PMEM2_MAX_SIZE (0x0500) +#define VIED_NCI_PMEM3_MAX_SIZE (0x0500) + +/* + * Number of channels per device + */ +#define VIED_NCI_DEV_CHN_DMA_EXT0_MAX_SIZE (30) +#define VIED_NCI_DEV_CHN_GDC_MAX_SIZE (4) +#define VIED_NCI_DEV_CHN_DMA_EXT1_READ_MAX_SIZE (30) +#define VIED_NCI_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE (20) +#define VIED_NCI_DEV_CHN_DMA_INTERNAL_MAX_SIZE (2) +#define VIED_NCI_DEV_CHN_DMA_IPFD_MAX_SIZE (5) +#define VIED_NCI_DEV_CHN_DMA_ISA_MAX_SIZE (2) +#define VIED_NCI_DEV_CHN_DMA_FW_MAX_SIZE (1) + +/* + * Storage of the resource and resource type enumerators + */ +#define VIED_NCI_RESOURCE_ID_BITS 8 +typedef uint8_t vied_nci_resource_id_t; + +#define VIED_NCI_RESOURCE_SIZE_BITS 16 +typedef uint16_t vied_nci_resource_size_t; + +#define VIED_NCI_RESOURCE_BITMAP_BITS 32 +typedef uint32_t vied_nci_resource_bitmap_t; + +#define IA_CSS_PROCESS_INVALID_DEPENDENCY ((vied_nci_resource_id_t)(-1)) +#define IA_CSS_PROCESS_INVALID_OFFSET ((vied_nci_resource_size_t)(-1)) +#define IA_CSS_PROCESS_MAX_CELLS 1 + +/* + * Resource specifications + * Note that the FAS uses the terminology local/remote memory. In the PSYS API, + * these are called internal/external memory. + */ + +/* resource spec for internal (local) memory */ +struct vied_nci_resource_spec_int_mem_s { + vied_nci_resource_id_t type_id; + vied_nci_resource_size_t size; + vied_nci_resource_size_t offset; +}; + +typedef struct vied_nci_resource_spec_int_mem_s + vied_nci_resource_spec_int_mem_t; + +/* resource spec for external (remote) memory */ +struct vied_nci_resource_spec_ext_mem_s { + vied_nci_resource_id_t type_id; + vied_nci_resource_size_t size; + vied_nci_resource_size_t offset; +}; + +typedef struct vied_nci_resource_spec_ext_mem_s + vied_nci_resource_spec_ext_mem_t; + +/* resource spec for device channel */ +struct vied_nci_resource_spec_dev_chn_s { + vied_nci_resource_id_t type_id; + vied_nci_resource_size_t size; + vied_nci_resource_size_t offset; +}; + +typedef struct vied_nci_resource_spec_dev_chn_s + vied_nci_resource_spec_dev_chn_t; + +/* resource spec for all contiguous resources */ +struct vied_nci_resource_spec_s { + vied_nci_resource_spec_int_mem_t int_mem[VIED_NCI_N_MEM_TYPE_ID]; + vied_nci_resource_spec_ext_mem_t ext_mem[VIED_NCI_N_DATA_MEM_TYPE_ID]; + vied_nci_resource_spec_dev_chn_t dev_chn[VIED_NCI_N_DEV_CHN_ID]; +}; + +typedef struct vied_nci_resource_spec_s vied_nci_resource_spec_t; + +#ifndef PIPE_GENERATION + +extern const vied_nci_cell_type_ID_t vied_nci_cell_type[VIED_NCI_N_CELL_ID]; +extern const vied_nci_mem_type_ID_t vied_nci_mem_type[VIED_NCI_N_MEM_ID]; +extern const uint16_t vied_nci_N_cell_mem[VIED_NCI_N_CELL_TYPE_ID]; +extern const vied_nci_mem_type_ID_t + vied_nci_cell_mem_type[VIED_NCI_N_CELL_TYPE_ID][VIED_NCI_N_MEM_TYPE_ID]; +extern const vied_nci_mem_ID_t + vied_nci_ext_mem[VIED_NCI_N_MEM_TYPE_ID]; +extern const vied_nci_mem_ID_t + vied_nci_cell_mem[VIED_NCI_N_CELL_ID][VIED_NCI_N_MEM_TYPE_ID]; +extern const uint16_t vied_nci_mem_size[VIED_NCI_N_MEM_ID]; +extern const uint16_t vied_nci_mem_word_size[VIED_NCI_N_DATA_MEM_TYPE_ID]; +extern const uint16_t vied_nci_dev_chn_size[VIED_NCI_N_DEV_CHN_ID]; + +STORAGE_CLASS_INLINE +uint32_t vied_nci_mem_is_ext_type(const vied_nci_mem_type_ID_t mem_type_id) +{ + return((mem_type_id == VIED_NCI_GMEM_TYPE_ID)); +} + +#endif /* PIPE_GENERATION */ + +#endif /* __VIED_NCI_PSYS_RESOURCE_MODEL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_data.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_data.h new file mode 100644 index 0000000000000..5b053a27686bd --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_data.h @@ -0,0 +1,50 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_SIM_DATA_H +#define __IA_CSS_PSYS_SIM_DATA_H + +/*! Set the seed if the random number generator + + @param seed[in] Random number generator seed + */ +extern void ia_css_psys_ran_set_seed(const unsigned int seed); + +/*! Generate a random number of a specified bit depth + + @param bit_depth[in] The number of bits of the random output + + @return out, weight(out) <= bit_depth, 0 on error + */ +extern unsigned int ia_css_psys_ran_var(const unsigned int bit_depth); + +/*! Generate a random number of a specified range + + @param range[in] The range of the random output + + @return 0 <= out < range, 0 on error + */ +extern unsigned int ia_css_psys_ran_val(const unsigned int range); + +/*! Generate a random number in a specified interval + + @param lo[in] The lower bound of the random output range + @param hi[in] The higher bound of the random output range + + @return lo <= out < hi, 0 on error + */ +extern unsigned int ia_css_psys_ran_interval(const unsigned int lo, + const unsigned int hi); + +#endif /* __IA_CSS_PSYS_SIM_DATA_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_storage_class.h new file mode 100644 index 0000000000000..61095257ec550 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_storage_class.h @@ -0,0 +1,28 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_SIM_STORAGE_CLASS_H +#define __IA_CSS_PSYS_SIM_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __IA_CSS_PSYS_SIM_INLINE__ +#define IA_CSS_PSYS_SIM_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_PSYS_SIM_STORAGE_CLASS_C +#else +#define IA_CSS_PSYS_SIM_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_PSYS_SIM_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_PSYS_SIM_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_trace.h new file mode 100644 index 0000000000000..423ff19802707 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_trace.h @@ -0,0 +1,95 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_SIM_TRACE_H +#define __IA_CSS_PSYS_SIM_TRACE_H + +#include "ia_css_psysapi_trace.h" + +#define PSYS_SIM_TRACE_LEVEL_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +/* Default sub-module tracing config */ +#if (!defined(PSYSAPI_SIM_TRACING_OVERRIDE)) + #define PSYS_SIM_TRACE_LEVEL_CONFIG PSYS_SIM_TRACE_LEVEL_CONFIG_DEFAULT +#endif + +/* Module/sub-module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_SIM_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_SIM_TRACING_OVERRIDE)) + /* Module/sub-module specific trace setting */ + #if PSYSAPI_SIM_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_SIM_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_SIM_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_SIM_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_SIM_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_SIM_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_SIM_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_SIM_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_SIM_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_SIM_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_SIM_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_SIM_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_SIM_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_SIM_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_SIM_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_SIM_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_DATA Tracing level defined" + #endif +#else + /* Inherit Module trace setting */ + #define PSYSAPI_SIM_TRACE_METHOD PSYSAPI_TRACE_METHOD + #define PSYSAPI_SIM_TRACE_LEVEL_ASSERT PSYSAPI_TRACE_LEVEL_ASSERT + #define PSYSAPI_SIM_TRACE_LEVEL_ERROR PSYSAPI_TRACE_LEVEL_ERROR + #define PSYSAPI_SIM_TRACE_LEVEL_WARNING PSYSAPI_TRACE_LEVEL_WARNING + #define PSYSAPI_SIM_TRACE_LEVEL_INFO PSYSAPI_TRACE_LEVEL_INFO + #define PSYSAPI_SIM_TRACE_LEVEL_DEBUG PSYSAPI_TRACE_LEVEL_DEBUG + #define PSYSAPI_SIM_TRACE_LEVEL_VERBOSE PSYSAPI_TRACE_LEVEL_VERBOSE +#endif + +#endif /* __IA_CSS_PSYSAPI_SIM_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/interface/vied_nci_psys_system_global.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/interface/vied_nci_psys_system_global.h new file mode 100644 index 0000000000000..529bea763cc2a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/interface/vied_nci_psys_system_global.h @@ -0,0 +1,180 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __VIED_NCI_PSYS_SYSTEM_GLOBAL_H +#define __VIED_NCI_PSYS_SYSTEM_GLOBAL_H + +#include +#include "ia_css_base_types.h" +#include "ia_css_psys_sim_storage_class.h" +#include "vied_nci_psys_resource_model.h" + +/* + * Key system types + */ +/* Subsystem internal physical address */ +#define VIED_ADDRESS_BITS 32 + +/* typedef uint32_t vied_address_t; */ + +/* Subsystem internal virtual address */ + +/* Subsystem internal data bus */ +#define VIED_DATA_BITS 32 +typedef uint32_t vied_data_t; + +#define VIED_NULL ((vied_vaddress_t)0) + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bit_mask( + const unsigned index); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitmap_set( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitmap_clear( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_is_bitmap_empty( + const vied_nci_resource_bitmap_t bitmap); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_is_bitmap_set( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_is_bit_set_in_bitmap( + const vied_nci_resource_bitmap_t bitmap, + const unsigned int index); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_is_bitmap_clear( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +int vied_nci_bitmap_compute_weight( + const vied_nci_resource_bitmap_t bitmap); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitmap_union( + const vied_nci_resource_bitmap_t bitmap0, + const vied_nci_resource_bitmap_t bitmap1); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitmap_intersection( + const vied_nci_resource_bitmap_t bitmap0, + const vied_nci_resource_bitmap_t bitmap1); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitmap_xor( + const vied_nci_resource_bitmap_t bitmap0, + const vied_nci_resource_bitmap_t bitmap1); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitmap_set_unique( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitfield_mask( + const unsigned int position, + const unsigned int size); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitmap_set_bitfield( +const vied_nci_resource_bitmap_t bitmap, +const unsigned int index, +const unsigned int size); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bit_mask_set_unique( + const vied_nci_resource_bitmap_t bitmap, + const unsigned index); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_cell_bit_mask( + const vied_nci_cell_ID_t cell_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_barrier_bit_mask( + const vied_nci_barrier_ID_t barrier_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_cell_type_ID_t vied_nci_cell_get_type( + const vied_nci_cell_ID_t cell_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_mem_type_ID_t vied_nci_mem_get_type( + const vied_nci_mem_ID_t mem_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +uint16_t vied_nci_mem_get_size( + const vied_nci_mem_ID_t mem_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +uint16_t vied_nci_dev_chn_get_size( + const vied_nci_dev_chn_ID_t dev_chn_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_is_cell_of_type( + const vied_nci_cell_ID_t cell_id, + const vied_nci_cell_type_ID_t cell_type_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_is_mem_of_type( + const vied_nci_mem_ID_t mem_id, + const vied_nci_mem_type_ID_t mem_type_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_is_cell_mem_of_type( + const vied_nci_cell_ID_t cell_id, + const uint16_t mem_index, + const vied_nci_mem_type_ID_t mem_type_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_has_cell_mem_of_id( + const vied_nci_cell_ID_t cell_id, + const vied_nci_mem_ID_t mem_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +uint16_t vied_nci_cell_get_mem_count( + const vied_nci_cell_ID_t cell_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_mem_type_ID_t vied_nci_cell_get_mem_type( + const vied_nci_cell_ID_t cell_id, + const uint16_t mem_index); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_mem_ID_t vied_nci_cell_get_mem( + const vied_nci_cell_ID_t cell_id, + const uint16_t mem_index); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_mem_type_ID_t vied_nci_cell_type_get_mem_type( + const vied_nci_cell_type_ID_t cell_type_id, + const uint16_t mem_index); + +#ifdef __IA_CSS_PSYS_SIM_INLINE__ +#include "psys_system_global_impl.h" +#endif /* __IA_CSS_PSYS_SIM_INLINE__ */ + +#endif /* __VIED_NCI_PSYS_SYSTEM_GLOBAL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/src/ia_css_psys_sim_data.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/src/ia_css_psys_sim_data.c new file mode 100644 index 0000000000000..6dccac8238719 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/src/ia_css_psys_sim_data.c @@ -0,0 +1,91 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include + +#include "ia_css_psys_sim_trace.h" + +static unsigned int ia_css_psys_ran_seed; + +void ia_css_psys_ran_set_seed(const unsigned int seed) +{ + ia_css_psys_ran_seed = seed; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "ia_css_psys_ran_set_seed(): enter:\n"); + +} + +static unsigned int ia_css_psys_ran_int (void) +{ + ia_css_psys_ran_seed = 1664525UL * ia_css_psys_ran_seed + 1013904223UL; + return ia_css_psys_ran_seed; +} + +unsigned int ia_css_psys_ran_var(const unsigned int bit_depth) +{ + unsigned int out; + unsigned int tmp; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, "ia_css_psys_ran_var(): enter:\n"); + + tmp = ia_css_psys_ran_int(); + + if (bit_depth > 32) + out = tmp; + else if (bit_depth == 0) + out = 0; + else + out = (unsigned short)(tmp >> (32 - bit_depth)); + + return out; +} + +unsigned int ia_css_psys_ran_val(const unsigned int range) +{ + unsigned int out; + unsigned int tmp; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, "ia_css_psys_ran_val(): enter:\n"); + + tmp = ia_css_psys_ran_int(); + + if (range > 1) + out = tmp % range; + else + out = 0; + + return out; +} + +unsigned int ia_css_psys_ran_interval(const unsigned int lo, + const unsigned int hi) +{ + unsigned int out; + unsigned int tmp; + unsigned int range = hi - lo; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "ia_css_psys_ran_interval(): enter:\n"); + + tmp = ia_css_psys_ran_int(); + + if ((range > 1) && (lo < hi)) + out = lo + (tmp % range); + else + out = 0; + + return out; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/src/psys_system_global_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/src/psys_system_global_impl.h new file mode 100644 index 0000000000000..ff51175548ec0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/src/psys_system_global_impl.h @@ -0,0 +1,485 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PSYS_SYSTEM_GLOBAL_IMPL_H +#define __PSYS_SYSTEM_GLOBAL_IMPL_H + +#include + +#include "ia_css_psys_sim_trace.h" +#include + +/* Use vied_bits instead, however for test purposes we uses explicit type + * checking + */ +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bit_mask( + const unsigned int index) +{ + vied_nci_resource_bitmap_t bit_mask = 0; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, "vied_nci_bit_mask(): enter:\n"); + + if (index < VIED_NCI_RESOURCE_BITMAP_BITS) + bit_mask = (vied_nci_resource_bitmap_t)1 << index; + + return bit_mask; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitmap_set( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask) +{ + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, "vied_nci_bitmap_set(): enter:\n"); + +/* + assert(vied_nci_is_bitmap_one_hot(bit_mask)); +*/ + return bitmap | bit_mask; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitmap_clear( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask) +{ + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_bitmap_clear(): enter:\n"); + +/* + assert(vied_nci_is_bitmap_one_hot(bit_mask)); +*/ + return bitmap & (~bit_mask); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitfield_mask( + const unsigned int position, + const unsigned int size) +{ + vied_nci_resource_bitmap_t bit_mask = 0; + vied_nci_resource_bitmap_t ones = (vied_nci_resource_bitmap_t)-1; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_bitfield_mask(): enter:\n"); + + if (position < VIED_NCI_RESOURCE_BITMAP_BITS) + bit_mask = (ones >> (sizeof(vied_nci_resource_bitmap_t) - size)) << position; + + return bit_mask; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitmap_set_bitfield( + const vied_nci_resource_bitmap_t bitmap, + const unsigned int index, + const unsigned int size) +{ + vied_nci_resource_bitmap_t ret = 0; + vied_nci_resource_bitmap_t bit_mask = 0; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_bit_mask_set_bitfield(): enter:\n"); + + bit_mask = vied_nci_bitfield_mask(index, size); + ret = vied_nci_bitmap_set(bitmap, bit_mask); + + return ret; +} + + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitmap_set_unique( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask) +{ + vied_nci_resource_bitmap_t ret = 0; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_bitmap_set_unique(): enter:\n"); + + if ((bitmap & bit_mask) == 0) + ret = bitmap | bit_mask; + + return ret; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bit_mask_set_unique( + const vied_nci_resource_bitmap_t bitmap, + const unsigned int index) +{ + vied_nci_resource_bitmap_t ret = 0; + vied_nci_resource_bitmap_t bit_mask; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_bit_mask_set_unique(): enter:\n"); + + bit_mask = vied_nci_bit_mask(index); + + if (((bitmap & bit_mask) == 0) && (bit_mask != 0)) + ret = bitmap | bit_mask; + + return ret; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_is_bitmap_empty( + const vied_nci_resource_bitmap_t bitmap) +{ + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_is_bitmap_empty(): enter:\n"); + + return (bitmap == 0); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_is_bitmap_set( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask) +{ + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_is_bitmap_set(): enter:\n"); + +/* + assert(vied_nci_is_bitmap_one_hot(bit_mask)); +*/ + return !vied_nci_is_bitmap_clear(bitmap, bit_mask); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_is_bit_set_in_bitmap( + const vied_nci_resource_bitmap_t bitmap, + const unsigned int index) +{ + + vied_nci_resource_bitmap_t bitmask; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_is_bit_set_in_bitmap(): enter:\n"); + bitmask = vied_nci_bit_mask(index); + return vied_nci_is_bitmap_set(bitmap, bitmask); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_is_bitmap_clear( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask) +{ + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_is_bitmap_clear(): enter:\n"); + +/* + assert(vied_nci_is_bitmap_one_hot(bit_mask)); +*/ + return ((bitmap & bit_mask) == 0); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +int vied_nci_bitmap_compute_weight( + const vied_nci_resource_bitmap_t bitmap) +{ + vied_nci_resource_bitmap_t loc_bitmap = bitmap; + int weight = 0; + int i; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_bitmap_compute_weight(): enter:\n"); + + /* Do not need the iterator "i" */ + for (i = 0; (i < VIED_NCI_RESOURCE_BITMAP_BITS) && + (loc_bitmap != 0); i++) { + weight += loc_bitmap & 0x01; + loc_bitmap >>= 1; + } + + return weight; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitmap_union( + const vied_nci_resource_bitmap_t bitmap0, + const vied_nci_resource_bitmap_t bitmap1) +{ + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_bitmap_union(): enter:\n"); + return (bitmap0 | bitmap1); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitmap_intersection( + const vied_nci_resource_bitmap_t bitmap0, + const vied_nci_resource_bitmap_t bitmap1) +{ + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "ia_css_kernel_bitmap_intersection(): enter:\n"); + return (bitmap0 & bitmap1); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitmap_xor( + const vied_nci_resource_bitmap_t bitmap0, + const vied_nci_resource_bitmap_t bitmap1) +{ + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, "vied_nci_bitmap_xor(): enter:\n"); + return (bitmap0 ^ bitmap1); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_cell_bit_mask( + const vied_nci_cell_ID_t cell_id) +{ + vied_nci_resource_bitmap_t bit_mask = 0; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_cell_bit_mask(): enter:\n"); + + if ((cell_id < VIED_NCI_N_CELL_ID) && + (cell_id < VIED_NCI_RESOURCE_BITMAP_BITS)) { + bit_mask = (vied_nci_resource_bitmap_t)1 << cell_id; + } + return bit_mask; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_barrier_bit_mask( + const vied_nci_barrier_ID_t barrier_id) +{ + vied_nci_resource_bitmap_t bit_mask = 0; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_barrier_bit_mask(): enter:\n"); + + if ((barrier_id < VIED_NCI_N_BARRIER_ID) && + ((barrier_id + VIED_NCI_N_CELL_ID) < VIED_NCI_RESOURCE_BITMAP_BITS)) { + bit_mask = (vied_nci_resource_bitmap_t)1 << + (barrier_id + VIED_NCI_N_CELL_ID); + } + return bit_mask; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_cell_type_ID_t vied_nci_cell_get_type( + const vied_nci_cell_ID_t cell_id) +{ + vied_nci_cell_type_ID_t cell_type = VIED_NCI_N_CELL_TYPE_ID; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_cell_get_type(): enter:\n"); + + if (cell_id < VIED_NCI_N_CELL_ID) { + cell_type = vied_nci_cell_type[cell_id]; + } else { + IA_CSS_TRACE_0(PSYSAPI_SIM, WARNING, + "vied_nci_cell_get_type(): invalid argument\n"); + } + + return cell_type; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_mem_type_ID_t vied_nci_mem_get_type( + const vied_nci_mem_ID_t mem_id) +{ + vied_nci_mem_type_ID_t mem_type = VIED_NCI_N_MEM_TYPE_ID; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_mem_get_type(): enter:\n"); + + if (mem_id < VIED_NCI_N_MEM_ID) { + mem_type = vied_nci_mem_type[mem_id]; + } else { + IA_CSS_TRACE_0(PSYSAPI_SIM, WARNING, + "vied_nci_mem_get_type(): invalid argument\n"); + } + + return mem_type; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +uint16_t vied_nci_mem_get_size( + const vied_nci_mem_ID_t mem_id) +{ + uint16_t mem_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_mem_get_size(): enter:\n"); + + if (mem_id < VIED_NCI_N_MEM_ID) { + mem_size = vied_nci_mem_size[mem_id]; + } else { + IA_CSS_TRACE_0(PSYSAPI_SIM, WARNING, + "vied_nci_mem_get_size(): invalid argument\n"); + } + + return mem_size; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +uint16_t vied_nci_dev_chn_get_size( + const vied_nci_dev_chn_ID_t dev_chn_id) +{ + uint16_t dev_chn_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_dev_chn_get_size(): enter:\n"); + + if (dev_chn_id < VIED_NCI_N_DEV_CHN_ID) { + dev_chn_size = vied_nci_dev_chn_size[dev_chn_id]; + } else { + IA_CSS_TRACE_0(PSYSAPI_SIM, WARNING, + "vied_nci_dev_chn_get_size(): invalid argument\n"); + } + + return dev_chn_size; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_is_cell_of_type( + const vied_nci_cell_ID_t cell_id, + const vied_nci_cell_type_ID_t cell_type_id) +{ + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_is_cell_of_type(): enter:\n"); + + return ((vied_nci_cell_get_type(cell_id) == + cell_type_id) && (cell_type_id != + VIED_NCI_N_CELL_TYPE_ID)); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_is_mem_of_type( + const vied_nci_mem_ID_t mem_id, + const vied_nci_mem_type_ID_t mem_type_id) +{ + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_is_mem_of_type(): enter:\n"); + + return ((vied_nci_mem_get_type(mem_id) == mem_type_id) && + (mem_type_id != VIED_NCI_N_MEM_TYPE_ID)); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_is_cell_mem_of_type( + const vied_nci_cell_ID_t cell_id, + const uint16_t mem_index, + const vied_nci_mem_type_ID_t mem_type_id) +{ + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_is_cell_mem_of_type(): enter:\n"); + + return ((vied_nci_cell_get_mem_type(cell_id, mem_index) == mem_type_id) + && (mem_type_id != VIED_NCI_N_MEM_TYPE_ID)); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_has_cell_mem_of_id( + const vied_nci_cell_ID_t cell_id, + const vied_nci_mem_ID_t mem_id) +{ + uint16_t mem_index; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_has_cell_mem_of_id(): enter:\n"); + + for (mem_index = 0; mem_index < VIED_NCI_N_MEM_TYPE_ID; mem_index++) { + if ((vied_nci_cell_get_mem(cell_id, mem_index) == mem_id) && + (mem_id != VIED_NCI_N_MEM_ID)) { + break; + } + } + + return (mem_index < VIED_NCI_N_MEM_TYPE_ID); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +uint16_t vied_nci_cell_get_mem_count( + const vied_nci_cell_ID_t cell_id) +{ + uint16_t mem_count = 0; + vied_nci_cell_type_ID_t cell_type; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_cell_get_mem_count(): enter:\n"); + + cell_type = vied_nci_cell_get_type(cell_id); + + if (cell_type < VIED_NCI_N_CELL_TYPE_ID) + mem_count = vied_nci_N_cell_mem[cell_type]; + + return mem_count; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_mem_type_ID_t vied_nci_cell_get_mem_type( + const vied_nci_cell_ID_t cell_id, + const uint16_t mem_index) +{ + vied_nci_mem_type_ID_t mem_type = VIED_NCI_N_MEM_TYPE_ID; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_cell_get_mem_type(): enter:\n"); + + if ((cell_id < VIED_NCI_N_CELL_ID) && + (mem_index < VIED_NCI_N_MEM_TYPE_ID)) { + mem_type = vied_nci_cell_mem_type[ + vied_nci_cell_get_type(cell_id)][mem_index]; + } + + return mem_type; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_mem_ID_t vied_nci_cell_get_mem( + const vied_nci_cell_ID_t cell_id, + const uint16_t mem_index) +{ + vied_nci_mem_ID_t mem_id = VIED_NCI_N_MEM_ID; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_cell_get_mem(): enter:\n"); + + if ((cell_id < VIED_NCI_N_CELL_ID) && + (mem_index < VIED_NCI_N_MEM_TYPE_ID)) { + mem_id = vied_nci_cell_mem[cell_id][mem_index]; + } + + return mem_id; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_mem_type_ID_t vied_nci_cell_type_get_mem_type( + const vied_nci_cell_type_ID_t cell_type_id, + const uint16_t mem_index) +{ + vied_nci_mem_type_ID_t mem_type = VIED_NCI_N_MEM_TYPE_ID; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_cell_type_get_mem_type(): enter:\n"); + + if ((cell_type_id < VIED_NCI_N_CELL_TYPE_ID) + && (mem_index < VIED_NCI_N_MEM_TYPE_ID)) { + mem_type = vied_nci_cell_mem_type[cell_type_id][mem_index]; + } + + return mem_type; +} + +#endif /* __PSYS_SYSTEM_GLOBAL_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/src/vied_nci_psys_system.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/src/vied_nci_psys_system.c new file mode 100644 index 0000000000000..b0e0aebb6e774 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/src/vied_nci_psys_system.c @@ -0,0 +1,26 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_sim_storage_class.h" + +/* + * Functions to possibly inline + */ + +#ifdef __IA_CSS_PSYS_SIM_INLINE__ +STORAGE_CLASS_INLINE int +__ia_css_psys_system_global_avoid_warning_on_empty_file(void) { return 0; } +#else /* __IA_CSS_PSYS_SIM_INLINE__ */ +#include "psys_system_global_impl.h" +#endif /* __IA_CSS_PSYS_SIM_INLINE__ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_manifest_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_manifest_types.h new file mode 100644 index 0000000000000..4a2f96e9405e8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_manifest_types.h @@ -0,0 +1,102 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_MANIFEST_TYPES_H +#define __IA_CSS_PSYS_MANIFEST_TYPES_H + +/*! \file */ + +/** @file ia_css_psys_manifest_types.h + * + * The types belonging to the terminal/program/ + * program group manifest static module + */ + +#include +#include "vied_nci_psys_resource_model.h" + + +/* This value is used in the manifest to indicate that the resource + * offset field must be ignored and the resource is relocatable + */ +#define IA_CSS_PROGRAM_MANIFEST_RESOURCE_OFFSET_IS_RELOCATABLE ((vied_nci_resource_size_t)(-1)) + +/* + * Connection type defining the interface source/sink + * + * Note that the connection type does not define the + * real-time configuration of the system, i.e. it + * does not describe whether a source and sink + * program group or sub-system operate synchronously + * that is a program script property {online, offline} + * (see FAS 5.16.3) + */ +#define IA_CSS_CONNECTION_BITMAP_BITS 8 +typedef uint8_t ia_css_connection_bitmap_t; + +#define IA_CSS_CONNECTION_TYPE_BITS 32 +typedef enum ia_css_connection_type { + /**< The terminal is in DDR */ + IA_CSS_CONNECTION_MEMORY = 0, + /**< The terminal is a (watermark) queued stream over DDR */ + IA_CSS_CONNECTION_MEMORY_STREAM, + /* The terminal is a device port */ + IA_CSS_CONNECTION_STREAM, + IA_CSS_N_CONNECTION_TYPES +} ia_css_connection_type_t; + +#define IA_CSS_PROGRAM_TYPE_BITS 32 +typedef enum ia_css_program_type { + IA_CSS_PROGRAM_TYPE_SINGULAR = 0, + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB, + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER, + IA_CSS_PROGRAM_TYPE_PARALLEL_SUB, + IA_CSS_PROGRAM_TYPE_PARALLEL_SUPER, + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB, + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUPER, +/* + * Future extension; A bitmap coding starts making more sense + * + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB_PARALLEL_SUB, + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB_PARALLEL_SUPER, + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER_PARALLEL_SUB, + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER_PARALLEL_SUPER, + */ + IA_CSS_N_PROGRAM_TYPES +} ia_css_program_type_t; + +#define IA_CSS_PROGRAM_GROUP_ID_BITS 32 +typedef uint32_t ia_css_program_group_ID_t; +#define IA_CSS_PROGRAM_ID_BITS 32 +typedef uint32_t ia_css_program_ID_t; + +#define IA_CSS_PROGRAM_INVALID_ID ((uint32_t)(-1)) +#define IA_CSS_PROGRAM_GROUP_INVALID_ID ((uint32_t)(-1)) + +typedef struct ia_css_program_group_manifest_s +ia_css_program_group_manifest_t; +typedef struct ia_css_program_manifest_s +ia_css_program_manifest_t; +typedef struct ia_css_data_terminal_manifest_s +ia_css_data_terminal_manifest_t; + +/* ============ Program Control Init Terminal Manifest - START ============ */ +typedef struct ia_css_program_control_init_manifest_program_desc_s + ia_css_program_control_init_manifest_program_desc_t; + +typedef struct ia_css_program_control_init_terminal_manifest_s + ia_css_program_control_init_terminal_manifest_t; +/* ============ Program Control Init Terminal Manifest - END ============ */ + +#endif /* __IA_CSS_PSYS_MANIFEST_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.h new file mode 100644 index 0000000000000..ee8321ea1f12b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.h @@ -0,0 +1,311 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_H +#define __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_H + +#include "ia_css_psys_static_storage_class.h" + +/*! \file */ + +/** @file ia_css_psys_program_group_manifest.h + * + * Define the methods on the program group manifest object that are not part of + * a single interface + */ + +#include + +#include /* uint8_t */ + +#include + +#include + +#include /* ia_css_kernel_bitmap_t */ +#include "ia_css_terminal_manifest.h" +#include "ia_css_rbm_manifest_types.h" + +#define IA_CSS_PROGRAM_GROUP_INVALID_ALIGNMENT ((uint8_t)(-1)) + +/*! Get the stored size of the program group manifest object + + @param manifest[in] program group manifest object + + @return size, 0 on invalid argument + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +size_t ia_css_program_group_manifest_get_size( + const ia_css_program_group_manifest_t *manifest); + +/*! Get the program group ID of the program group manifest object + + @param manifest[in] program group manifest object + + @return program group ID, IA_CSS_PROGRAM_GROUP_INVALID_ID on invalid argument +*/ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_program_group_ID_t +ia_css_program_group_manifest_get_program_group_ID( + const ia_css_program_group_manifest_t *manifest); + +/*! Set the program group ID of the program group manifest object + + @param manifest[in] program group manifest object + + @param program group ID + + @return 0 on success, -1 on invalid manifest argument + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +int ia_css_program_group_manifest_set_program_group_ID( + ia_css_program_group_manifest_t *manifest, + ia_css_program_group_ID_t id); + +/*! Get the storage alignment constraint of the program group binary data + + @param manifest[in] program group manifest object + + @return alignment, IA_CSS_PROGRAM_GROUP_INVALID_ALIGNMENT on invalid manifest + argument +*/ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +uint8_t ia_css_program_group_manifest_get_alignment( + const ia_css_program_group_manifest_t *manifest); + +/*! Set the storage alignment constraint of the program group binary data + + @param manifest[in] program group manifest object + @param alignment[in] alignment desired + + @return < 0 on invalid manifest argument + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +int ia_css_program_group_manifest_set_alignment( + ia_css_program_group_manifest_t *manifest, + const uint8_t alignment); + +/*! Get the kernel enable bitmap of the program group + + @param manifest[in] program group manifest object + + @return bitmap, 0 on invalid manifest argument + */ +extern ia_css_kernel_bitmap_t +ia_css_program_group_manifest_get_kernel_bitmap( + const ia_css_program_group_manifest_t *manifest); + +/*! Set the kernel enable bitmap of the program group + + @param manifest[in] program group manifest object + @param kernel bitmap[in] kernel enable bitmap + + @return < 0 on invalid manifest argument + */ +extern int ia_css_program_group_manifest_set_kernel_bitmap( + ia_css_program_group_manifest_t *manifest, + const ia_css_kernel_bitmap_t bitmap); + +/*! Get the number of programs in the program group manifest object + + @param manifest[in] program group manifest object + + @return program count, 0 on invalid manifest argument + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +uint8_t ia_css_program_group_manifest_get_program_count( + const ia_css_program_group_manifest_t *manifest); + +/*! Get the number of terminals in the program group manifest object + + @param manifest[in] program group manifest object + + @return terminal count, 0 on invalid manifest argument + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +uint8_t ia_css_program_group_manifest_get_terminal_count( + const ia_css_program_group_manifest_t *manifest); + +/*! Get the (pointer to) private data blob in the manifest + + @param manifest[in] program group manifest object + + @return private data blob, NULL on invalid manifest argument + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +void *ia_css_program_group_manifest_get_private_data( + const ia_css_program_group_manifest_t *manifest); + +/*! Get the (pointer to) routing bitmap (rbm) manifest + + @param manifest[in] program group manifest object + + @return rbm manifest, NULL on invalid manifest argument + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_rbm_manifest_t * +ia_css_program_group_manifest_get_rbm_manifest( + const ia_css_program_group_manifest_t *manifest); + +/*! Get the (pointer to) indexed program manifest in the program group manifest + * object + + @param manifest[in] program group manifest object + @param program_index[in] index of the program manifest object + + @return program manifest, NULL on invalid arguments + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_program_manifest_t * +ia_css_program_group_manifest_get_prgrm_mnfst( + const ia_css_program_group_manifest_t *manifest, + const unsigned int program_index); + +/*! Get the (pointer to) indexed terminal manifest in the program group + * manifest object + + @param manifest[in] program group manifest object + @param program_index[in] index of the terminal manifest object + + @return terminal manifest, NULL on invalid arguments + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_terminal_manifest_t * +ia_css_program_group_manifest_get_term_mnfst( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index); + +/*! Get the (pointer to) indexed data terminal manifest in the program group + * manifest object + + @param manifest[in] program group manifest object + @param program_index[in] index of the terminal manifest object + + @return data terminal manifest, NULL on invalid arguments + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_data_terminal_manifest_t * +ia_css_program_group_manifest_get_data_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index); + +/*! Get the (pointer to) indexed parameter terminal manifest in the program + * group manifest object + + @param manifest[in] program group manifest object + @param program_index[in] index of the terminal manifest object + + @return parameter terminal manifest, NULL on invalid arguments + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_param_terminal_manifest_t * +ia_css_program_group_manifest_get_param_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index); + +/*! Get the (pointer to) indexed spatial param terminal manifest in the program + * group manifest object + + @param manifest[in] program group manifest object + @param program_index[in] index of the terminal manifest object + + @return spatial param terminal manifest, NULL on invalid arguments + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_spatial_param_terminal_manifest_t * +ia_css_program_group_manifest_get_spatial_param_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index); + +/*! Get the (pointer to) indexed sliced param terminal manifest in the program + * group manifest object + + @param manifest[in] program group manifest object + @param program_index[in] index of the terminal manifest object + + @return sliced param terminal manifest, NULL on invalid arguments + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_sliced_param_terminal_manifest_t * +ia_css_program_group_manifest_get_sliced_param_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index); + +/*! Get the (pointer to) indexed program terminal manifest in the program group + * manifest object + + @parammanifest[in]program group manifest object + @paramprogram_index[in]index of the terminal manifest object + + @return program terminal manifest, NULL on invalid arguments + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_program_terminal_manifest_t * +ia_css_program_group_manifest_get_program_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index); + +/*! initialize program group manifest + + @param manifest[in] program group manifest object + @param program_count[in] number of programs. + @param terminal_count[in] number of terminals. + @param program_deps[in] program dependencies for programs in pg. + @param terminal_deps[in] terminal dependencies for programs in pg. + @param terminal_type[in] array of terminal types, binary specific + static frame data + @param cached_in_param_section_count[in]Number of parameter terminal sections + @param cached_out_param_section_count[in] Number of parameter out terminal + @param spatial_param_section_count[in] Array[spatial_terminal_count] + with sections per cached out + terminal + @param sliced_in_param_section_count[in] Array[sliced_in_terminal_count] + with sections per sliced in + terminal + @param sliced_out_param_section_count[in] Array[sliced_out_terminal_count] + with sections per sliced out + terminal + @param fragment_param_section_count[in] Number of fragment parameter + sections of the program init + terminal, + @param kernel_fragment_seq_count[in] Number of kernel fragment + seqence info. + @param progctrlinit_load_section_counts[in] Number of progctrinit load + sections (size of array is program_count) + @param progctrlinit_connect_section_counts[in] Number of progctrinit connect + sections (size of array is program_count) + @return none; + */ +extern void ia_css_program_group_manifest_init( + ia_css_program_group_manifest_t *blob, + const uint8_t program_count, + const uint8_t terminal_count, + const uint8_t *program_dependencies, + const uint8_t *terminal_dependencies, + const ia_css_terminal_type_t *terminal_type, + const uint16_t cached_in_param_section_count, + const uint16_t cached_out_param_section_count, + const uint16_t *spatial_param_section_count, + const uint16_t fragment_param_section_count, + const uint16_t *sliced_in_param_section_count, + const uint16_t *sliced_out_param_section_count, + const uint16_t kernel_fragment_seq_count, + const uint16_t *progctrlinit_load_section_counts, + const uint16_t *progctrlinit_connect_section_counts); + +#ifdef __IA_CSS_PSYS_STATIC_INLINE__ +#include "ia_css_psys_program_group_manifest_impl.h" +#endif /* __IA_CSS_PSYS_STATIC_INLINE__ */ + +#endif /* __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.hsys.user.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.hsys.user.h new file mode 100644 index 0000000000000..ce802ff5dd8d3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.hsys.user.h @@ -0,0 +1,69 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_HSYS_USER_H +#define __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_HSYS_USER_H + +/*! \file */ + +/** @file ia_css_psys_program_group_manifest.hsys.user.h + * + * Define the methods on the program group manifest object: Hsys user interface + */ + +#include + +#include /* bool */ + +/*! Print the program group manifest object to file/stream + + @param manifest[in] program group manifest object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_program_group_manifest_print( + const ia_css_program_group_manifest_t *manifest, + void *fid); + +/*! Read the program group manifest object from file/stream + + @param fid[in] file/stream handle + + @return NULL on error + */ +extern ia_css_program_group_manifest_t *ia_css_program_group_manifest_read( + void *fid); + +/*! Write the program group manifest object to file/stream + + @param manifest[in] program group manifest object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_program_group_manifest_write( + const ia_css_program_group_manifest_t *manifest, + void *fid); + +/*! Boolean test if the program group manifest is valid + + @param manifest[in] program group manifest + + @return true if program group manifest is correct, false on error + */ +extern bool ia_css_is_program_group_manifest_valid( + const ia_css_program_group_manifest_t *manifest); + +#endif /* __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_HSYS_USER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.sim.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.sim.h new file mode 100644 index 0000000000000..242f02108dd84 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.sim.h @@ -0,0 +1,127 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_SIM_H +#define __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_SIM_H + +/*! \file */ + +/** @file ia_css_psys_program_group_manifest.sim.h + * + * Define the methods on the program group manifest object: Simulation only + */ + +#include + +#include /* uint8_t */ +#include "ia_css_terminal_defs.h" + +/*! Create a program group manifest object from specification + + @param specification[in] specification (index) + + @return NULL on error + */ +extern ia_css_program_group_manifest_t *ia_css_program_group_manifest_create( + const unsigned int specification); + +/*! Destroy the program group manifest object + + @param manifest[in] program group manifest + + @return NULL + */ +extern ia_css_program_group_manifest_t *ia_css_program_group_manifest_destroy( + ia_css_program_group_manifest_t *manifest); + +/*! Compute the size of storage required for allocating + * the program group (PG) manifest object + + @param program_count[in] Number of programs in the PG + @param terminal_count[in] Number of terminals on the PG + @param program_dependency_count[in] Array[program_count] with the PG + @param terminal_dependency_count[in] Array[program_count] with the + terminal dependencies + @param terminal_type[in] Array[terminal_count] with the + terminal type + @param cached_in_param_section_count[in] Number of parameter + in terminal sections + @param cached_out_param_section_count[in] Number of parameter + out terminal sections + @param sliced_param_section_count[in] Array[sliced_terminal_count] + with sections per + sliced in terminal + @param sliced_out_param_section_count[in] Array[sliced_terminal_count] + with sections per + sliced out terminal + @param spatial_param_section_count[in] Array[spatial_terminal_count] + with sections per + spatial terminal + @param fragment_param_section_count[in] Number of fragment parameter + sections of the + program init terminal, + @param kernel_fragment_seq_count[in] Number of + kernel_fragment_seq_count. + @param progctrlinit_load_section_counts[in] Number of progctrinit load + sections (size of array is program_count) + @param progctrlinit_connect_section_counts[in] Number of progctrinit connect + sections (size of array is program_count) + @return 0 on error + */ +size_t ia_css_sizeof_program_group_manifest( + const uint8_t program_count, + const uint8_t terminal_count, + const uint8_t *program_dependency_count, + const uint8_t *terminal_dependency_count, + const ia_css_terminal_type_t *terminal_type, + const uint16_t cached_in_param_section_count, + const uint16_t cached_out_param_section_count, + const uint16_t *spatial_param_section_count, + const uint16_t fragment_param_section_count, + const uint16_t *sliced_param_section_count, + const uint16_t *sliced_out_param_section_count, + const uint16_t kernel_fragment_seq_count, + const uint16_t *progctrlinit_load_section_counts, + const uint16_t *progctrlinit_connect_section_counts); + +/*! Create (the storage for) the program group manifest object + + @param program_count[in] Number of programs in the program group + @param terminal_count[in] Number of terminals on the program group + @param program_dependency_count[in] Array[program_count] with the + program dependencies + @param terminal_dependency_count[in] Array[program_count] with the + terminal dependencies + @param terminal_type[in] Array[terminal_count] with the + terminal type + + @return NULL on error + */ +extern ia_css_program_group_manifest_t *ia_css_program_group_manifest_alloc( + const uint8_t program_count, + const uint8_t terminal_count, + const uint8_t *program_dependency_count, + const uint8_t *terminal_dependency_count, + const ia_css_terminal_type_t *terminal_type); + +/*! Free (the storage of) the program group manifest object + + @param manifest[in] program group manifest + + @return NULL + */ +extern ia_css_program_group_manifest_t *ia_css_program_group_manifest_free( + ia_css_program_group_manifest_t *manifest); + +#endif /* __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_SIM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.h new file mode 100644 index 0000000000000..b7333671ed4fc --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.h @@ -0,0 +1,488 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_MANIFEST_H +#define __IA_CSS_PSYS_PROGRAM_MANIFEST_H + +/*! \file */ + +/** @file ia_css_psys_program_manifest.h + * + * Define the methods on the program manifest object that are not part of a + * single interface + */ + +#include + +#include /* uint8_t */ + +#include + +#include + +#include /* ia_css_kernel_bitmap_t */ + +/* + * Resources needs + */ +#include + +#define IA_CSS_PROGRAM_INVALID_DEPENDENCY ((uint8_t)(-1)) + +/*! Check if the program manifest object specifies a fixed cell allocation + + @param manifest[in] program manifest object + + @return has_fixed_cell, false on invalid argument + */ +extern bool ia_css_has_program_manifest_fixed_cell( + const ia_css_program_manifest_t *manifest); + +/*! Get the stored size of the program manifest object + + @param manifest[in] program manifest object + + @return size, 0 on invalid argument + */ +extern size_t ia_css_program_manifest_get_size( + const ia_css_program_manifest_t *manifest); + +/*! Get the program ID of the program manifest object + + @param manifest[in] program manifest object + + @return program ID, IA_CSS_PROGRAM_INVALID_ID on invalid argument + */ +extern ia_css_program_ID_t ia_css_program_manifest_get_program_ID( + const ia_css_program_manifest_t *manifest); + +/*! Set the program ID of the program manifest object + + @param manifest[in] program manifest object + + @param program ID + + @return 0 on success, -1 on invalid manifest argument + */ +extern int ia_css_program_manifest_set_program_ID( + ia_css_program_manifest_t *manifest, + ia_css_program_ID_t id); + +/*! Get the (pointer to) the program group manifest parent of the program + * manifest object + + @param manifest[in] program manifest object + + @return the pointer to the parent, NULL on invalid manifest argument + */ +extern ia_css_program_group_manifest_t *ia_css_program_manifest_get_parent( + const ia_css_program_manifest_t *manifest); + +/*! Set the (pointer to) the program group manifest parent of the program + * manifest object + + @param manifest[in] program manifest object + @param program_offset[in] this program's offset from + program_group_manifest's base address. + + @return < 0 on invalid manifest argument + */ +extern int ia_css_program_manifest_set_parent_offset( + ia_css_program_manifest_t *manifest, + int32_t program_offset); + +/*! Get the type of the program manifest object + + @param manifest[in] program manifest object + + @return program type, limit value (IA_CSS_N_PROGRAM_TYPES) on invalid manifest + argument +*/ +extern ia_css_program_type_t ia_css_program_manifest_get_type( + const ia_css_program_manifest_t *manifest); + +/*! Set the type of the program manifest object + + @param manifest[in] program manifest object + @param program_type[in] program type + + @return < 0 on invalid manifest argument + */ +extern int ia_css_program_manifest_set_type( + ia_css_program_manifest_t *manifest, + const ia_css_program_type_t program_type); + +/*! Set the cell id of the program manifest object + + @param manifest[in] program manifest object + @param program_cell_id[in] program cell id + + @return < 0 on invalid manifest argument + */ +extern int ia_css_program_manifest_set_cell_ID( + ia_css_program_manifest_t *manifest, + const vied_nci_cell_ID_t cell_id); + +/*! Set the cell type of the program manifest object + + @param manifest[in] program manifest object + @param program_cell_type[in] program cell type + + @return < 0 on invalid manifest argument + */ +extern int ia_css_program_manifest_set_cell_type_ID( + ia_css_program_manifest_t *manifest, + const vied_nci_cell_type_ID_t cell_type_id); + +/*! Set cells bitmap for the program + + @param manifest[in] program manifest object + @param bitmap[in] bitmap + + @return 0 when not applicable and/or invalid arguments + */ +extern int ia_css_program_manifest_set_cells_bitmap( + ia_css_program_manifest_t *manifest, + const vied_nci_resource_bitmap_t bitmap); + +/*! Get cells bitmap for the program + + @param manifest[in] program manifest object + + @return 0 when not applicable and/or invalid arguments + */ +extern vied_nci_resource_bitmap_t ia_css_program_manifest_get_cells_bitmap( + const ia_css_program_manifest_t *manifest); + +/*! Set DFM port bitmap for the program + + @param manifest[in] program manifest object + @param dfm_type_id[in] DFM resource type ID + @param bitmap[in] bitmap + + @return 0 when not applicable and/or invalid arguments + */ +extern int ia_css_program_manifest_set_dfm_port_bitmap( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id, + const vied_nci_resource_bitmap_t bitmap); + +/*! Get bitmap of DFM ports requested for the program + + @param manifest[in] program manifest object + @param dfm_type_id[in] DFM resource type ID + + @return DFM port bitmap + */ +extern vied_nci_resource_bitmap_t ia_css_program_manifest_get_dfm_port_bitmap( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id); + + +/*! Set active DFM port specification bitmap for the program + + @param manifest[in] program manifest object + @param dfm_type_id[in] DFM resource type ID + @param bitmap[in] bitmap + + @return 0 when not applicable and/or invalid arguments + */ +extern int ia_css_program_manifest_set_dfm_active_port_bitmap( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id, + const vied_nci_resource_bitmap_t bitmap); + +/*! Get active DFM port specification bitmap for the program + + @param manifest[in] program manifest object + @param dfm_type_id[in] DFM resource type ID + + @return 0 when not applicable and/or invalid arguments + */ +extern vied_nci_resource_bitmap_t ia_css_program_manifest_get_dfm_active_port_bitmap( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id); + +/*! Set DFM device relocatability specification for the program + + @param manifest[in] program manifest object + @param dfm_type_id[in] DFM resource type ID + @param is_relocatable[in] 1 if dfm device ports are relocatable, 0 otherwise + + @return 0 when not applicable and/or invalid arguments + */ +extern int ia_css_program_manifest_set_is_dfm_relocatable( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id, + const uint8_t is_relocatable); + +/*! Get DFM device relocatability specification for the program + + @param manifest[in] program manifest object + @param dfm_type_id[in] DFM resource type ID + + @return 1 if dfm device ports are relocatable, 0 otherwise + */ +extern uint8_t ia_css_program_manifest_get_is_dfm_relocatable( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id); + + +/*! Get the memory resource (size) specification for a memory + that belongs to the cell where the program will be mapped + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type ID + + @return 0 when not applicable and/or invalid arguments + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_int_mem_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id); + +/*! Set the memory resource (size) specification for a memory + that belongs to the cell where the program will be mapped + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type id + @param int_mem_size[in] internal memory size + + @return < 0 on invalid arguments + */ +extern int ia_css_program_manifest_set_int_mem_size( + ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t int_mem_size); + +/*! Get the memory resource (size) specification for a memory + that does not belong to the cell where the program will be mapped + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type ID + + @return 0 when not applicable and/or invalid arguments + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_ext_mem_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id); + +/*! Set the memory resource (size) specification for a memory + that does not belong to the cell where the program will be mapped + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type id + @param ext_mem_size[in] external memory size + + @return < 0 on invalid arguments + */ +extern int ia_css_program_manifest_set_ext_mem_size( + ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t ext_mem_size); + +/*! Get a device channel resource (size) specification + + @param manifest[in] program manifest object + @param dev_chn_id[in] device channel ID + + @return 0 when not applicable and/or invalid arguments + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_dev_chn_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id); + +/*! Set a device channel resource (size) specification + + @param manifest[in] program manifest object + @param dev_chn_id[in] device channel ID + @param dev_chn_size[in] device channel size + + @return < 0 on invalid arguments + */ +extern int ia_css_program_manifest_set_dev_chn_size( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id, + const vied_nci_resource_size_t dev_chn_size); + +/*! Set a device channel resource (offset) specification + + @param manifest[in] program manifest object + @param dev_chn_id[in] device channel ID + @param dev_chn_offset[in] device channel offset + + @return < 0 on invalid arguments + */ +extern int ia_css_program_manifest_set_dev_chn_offset( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id, + const vied_nci_resource_size_t dev_chn_offset); + + +/*! Set the memory resource (offset) specification for a memory + that does not belong to the cell where the program will be mapped + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type id + @param ext_mem_offset[in] external memory offset + + @return < 0 on invalid arguments + */ +extern int ia_css_program_manifest_set_ext_mem_offset( + ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t ext_mem_offset); + +/*! Get a device channel resource (offset) specification + + @param manifest[in] program manifest object + @param dev_chn_id[in] device channel ID + + @return Valid fixed offset (if value is greater or equal to 0) or + IA_CSS_PROGRAM_MANIFEST_RESOURCE_OFFSET_IS_RELOCATABLE if offset + is relocatable + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_dev_chn_offset( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id); + +/*! Get the memory resource (offset) specification for a memory + that does not belong to the cell where the program will be mapped. + + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type ID + + @return Valid fixed offset (if value is greater or equal to 0) or + IA_CSS_PROGRAM_MANIFEST_RESOURCE_OFFSET_IS_RELOCATABLE if offset + is relocatable + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_ext_mem_offset( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id); + + +/*! Get the kernel composition of the program manifest object + + @param manifest[in] program manifest object + + @return bitmap, 0 on invalid arguments + */ +extern ia_css_kernel_bitmap_t ia_css_program_manifest_get_kernel_bitmap( + const ia_css_program_manifest_t *manifest); + +/*! Set the kernel dependency of the program manifest object + + @param manifest[in] program manifest object + @param kernel_bitmap[in] kernel composition bitmap + + @return < 0 on invalid arguments + */ +extern int ia_css_program_manifest_set_kernel_bitmap( + ia_css_program_manifest_t *manifest, + const ia_css_kernel_bitmap_t kernel_bitmap); + +/*! Get the number of programs this programs depends on from the program group + * manifest object + + @param manifest[in] program manifest object + + @return program dependency count + */ +extern uint8_t ia_css_program_manifest_get_program_dependency_count( + const ia_css_program_manifest_t *manifest); + +/*! Get the index of the program which the programs at this index depends on + from the program manifest object + + @param manifest[in] program manifest object + + @return program dependency, + IA_CSS_PROGRAM_INVALID_DEPENDENCY on invalid arguments + */ +extern uint8_t ia_css_program_manifest_get_program_dependency( + const ia_css_program_manifest_t *manifest, + const unsigned int index); + +/*! Set the index of the program which the programs at this index depends on + in the program manifest object + + @param manifest[in] program manifest object + + @return program dependency + */ +extern int ia_css_program_manifest_set_program_dependency( + ia_css_program_manifest_t *manifest, + const uint8_t program_dependency, + const unsigned int index); + +/*! Get the number of terminals this programs depends on from the program group + * manifest object + + @param manifest[in] program manifest object + + @return program dependency count + */ +extern uint8_t ia_css_program_manifest_get_terminal_dependency_count( + const ia_css_program_manifest_t *manifest); + +/*! Get the index of the terminal which the programs at this index depends on + from the program manifest object + + @param manifest[in] program manifest object + + @return terminal dependency, IA_CSS_PROGRAM_INVALID_DEPENDENCY on error + */ +uint8_t ia_css_program_manifest_get_terminal_dependency( + const ia_css_program_manifest_t *manifest, + const unsigned int index); + +/*! Set the index of the terminal which the programs at this index depends on + in the program manifest object + + @param manifest[in] program manifest object + + @return < 0 on invalid arguments + */ +extern int ia_css_program_manifest_set_terminal_dependency( + ia_css_program_manifest_t *manifest, + const uint8_t terminal_dependency, + const unsigned int index); + +/*! Check if the program manifest object specifies a subnode program + + @param manifest[in] program manifest object + + @return is_subnode, false on invalid argument + */ +extern bool ia_css_is_program_manifest_subnode_program_type( + const ia_css_program_manifest_t *manifest); + +/*! Check if the program manifest object specifies a supernode program + + @param manifest[in] program manifest object + + @return is_supernode, false on invalid argument + */ +extern bool ia_css_is_program_manifest_supernode_program_type( + const ia_css_program_manifest_t *manifest); +/*! Check if the program manifest object specifies a singular program + + @param manifest[in] program manifest object + + @return is_singular, false on invalid argument + */ +extern bool ia_css_is_program_manifest_singular_program_type( + const ia_css_program_manifest_t *manifest); + +#endif /* __IA_CSS_PSYS_PROGRAM_MANIFEST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.hsys.kernel.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.hsys.kernel.h new file mode 100644 index 0000000000000..9d737b75a576b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.hsys.kernel.h @@ -0,0 +1,96 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_MANIFEST_HSYS_KERNEL_H +#define __IA_CSS_PSYS_PROGRAM_MANIFEST_HSYS_KERNEL_H + +/*! \file */ + +/** @file ia_css_psys_program_manifest.hsys.kernel.h + * + * Define the methods on the program manifest object: Hsys kernel interface + */ + +#include + +#include + +#include /* uint8_t */ + +/* + * Resources needs + */ + +/*! Get the cell ID from the program manifest object + + @param manifest[in] program manifest object + + Note: If the cell ID is specified, the program this manifest belongs to + must be mapped on that instance. If the cell ID is invalid (limit value) + then the cell type ID must be specified instead + + @return cell ID, limit value if not specified + */ +extern vied_nci_cell_ID_t ia_css_program_manifest_get_cell_ID( + const ia_css_program_manifest_t *manifest); + +/*! Get the cell type ID from the program manifest object + + @param manifest[in] program manifest object + + Note: If the cell type ID is specified, the program this manifest belongs + to can be mapped on any instance of this clee type. If the cell type ID is + invalid (limit value) then a specific cell ID must be specified instead + + @return cell ID, limit value if not specified + */ +extern vied_nci_cell_type_ID_t ia_css_program_manifest_get_cell_type_ID( + const ia_css_program_manifest_t *manifest); + +/*! Get the memory resource (size) specification for a memory + that belongs to the cell where the program will be mapped + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type ID + + @return 0 when not applicable + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_int_mem_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id); + +/*! Get the memory resource (size) specification for a memory + that does not belong to the cell where the program will be mapped + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type ID + + @return 0 when not applicable + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_ext_mem_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id); + +/*! Get a device channel resource (size) specification + + @param manifest[in] program manifest object + @param dev_chn_id[in] device channel ID + + @return 0 when not applicable + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_dev_chn_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id); + +#endif /* __IA_CSS_PSYS_PROGRAM_MANIFEST_HSYS_KERNEL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.hsys.user.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.hsys.user.h new file mode 100644 index 0000000000000..087c84b7106e5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.hsys.user.h @@ -0,0 +1,38 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_MANIFEST_HSYS_USER_H +#define __IA_CSS_PSYS_PROGRAM_MANIFEST_HSYS_USER_H + +/*! \file */ + +/** @file ia_css_psys_program_manifest.hsys.user.h + * + * Define the methods on the program manifest object: Hsys user interface + */ + +#include + +/*! Print the program manifest object to file/stream + + @param manifest[in] program manifest object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_program_manifest_print( + const ia_css_program_manifest_t *manifest, + void *fid); + +#endif /* __IA_CSS_PSYS_PROGRAM_MANIFEST_HSYS_USER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.sim.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.sim.h new file mode 100644 index 0000000000000..0c2cef11f30eb --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.sim.h @@ -0,0 +1,61 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_MANIFEST_SIM_H +#define __IA_CSS_PSYS_PROGRAM_MANIFEST_SIM_H + +/*! \file */ + +/** @file ia_css_psys_program_manifest.sim.h + * + * Define the methods on the program manifest object: Simulation only + */ + +#include + +#include /* uint8_t */ + +/*! Compute the size of storage required for allocating + * the program manifest object + + @param program_dependency_count[in] Number of programs this one depends on + @param terminal_dependency_count[in] Number of terminals this one depends on + + @return 0 on error + */ +extern size_t ia_css_sizeof_program_manifest( + const uint8_t program_dependency_count, + const uint8_t terminal_dependency_count); + +/*! Create (the storage for) the program manifest object + + @param program_dependency_count[in] Number of programs this one depends on + @param terminal_dependency_count[in] Number of terminals this one depends on + + @return NULL on error + */ +extern ia_css_program_manifest_t *ia_css_program_manifest_alloc( + const uint8_t program_dependency_count, + const uint8_t terminal_dependency_count); + +/*! Destroy (the storage of) the program manifest object + + @param manifest[in] program manifest + + @return NULL + */ +extern ia_css_program_manifest_t *ia_css_program_manifest_free( + ia_css_program_manifest_t *manifest); + +#endif /* __IA_CSS_PSYS_PROGRAM_MANIFEST_SIM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_static_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_static_storage_class.h new file mode 100644 index 0000000000000..f3c832b5a4a33 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_static_storage_class.h @@ -0,0 +1,28 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +#define __IA_CSS_PSYS_STATIC_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __IA_CSS_PSYS_STATIC_INLINE__ +#define IA_CSS_PSYS_STATIC_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +#else +#define IA_CSS_PSYS_STATIC_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_PSYS_STATIC_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_PSYS_STATIC_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_static_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_static_trace.h new file mode 100644 index 0000000000000..7c5612cd09690 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_static_trace.h @@ -0,0 +1,103 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_STATIC_TRACE_H +#define __IA_CSS_PSYS_STATIC_TRACE_H + +#include "ia_css_psysapi_trace.h" + +#define PSYS_STATIC_TRACE_LEVEL_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +/* Default sub-module tracing config */ +#if (!defined(PSYSAPI_STATIC_TRACING_OVERRIDE)) + #define PSYS_STATIC_TRACE_LEVEL_CONFIG \ + PSYS_STATIC_TRACE_LEVEL_CONFIG_DEFAULT +#endif + +/* Module/sub-module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_STATIC_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_STATIC_TRACING_OVERRIDE)) + /* Module/sub-module specific trace setting */ + #if PSYSAPI_STATIC_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_STATIC_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_STATIC_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_STATIC_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_STATIC_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_STATIC_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_STATIC_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_STATIC_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_STATIC_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_DATA Tracing level defined" + #endif +#else + /* Inherit Module trace setting */ + #define PSYSAPI_STATIC_TRACE_METHOD \ + PSYSAPI_TRACE_METHOD + #define PSYSAPI_STATIC_TRACE_LEVEL_ASSERT \ + PSYSAPI_TRACE_LEVEL_ASSERT + #define PSYSAPI_STATIC_TRACE_LEVEL_ERROR \ + PSYSAPI_TRACE_LEVEL_ERROR + #define PSYSAPI_STATIC_TRACE_LEVEL_WARNING \ + PSYSAPI_TRACE_LEVEL_WARNING + #define PSYSAPI_STATIC_TRACE_LEVEL_INFO \ + PSYSAPI_TRACE_LEVEL_INFO + #define PSYSAPI_STATIC_TRACE_LEVEL_DEBUG \ + PSYSAPI_TRACE_LEVEL_DEBUG + #define PSYSAPI_STATIC_TRACE_LEVEL_VERBOSE \ + PSYSAPI_TRACE_LEVEL_VERBOSE +#endif + +#endif /* __IA_CSS_PSYSAPI_STATIC_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.h new file mode 100644 index 0000000000000..0fa62b32e1a74 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.h @@ -0,0 +1,423 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TERMINAL_MANIFEST_H +#define __IA_CSS_PSYS_TERMINAL_MANIFEST_H + +/*! \file */ + +/** @file ia_css_psys_terminal_manifest.h + * + * Define the methods on the terminal manifest object that are not part of a + * single interface + */ + +#include + +#include + +#include + +#include /* ia_css_frame_format_bitmap_t */ +#include /* ia_css_kernel_bitmap_t */ + +#include /* size_t */ +#include "ia_css_terminal_manifest.h" +#include "ia_css_terminal_manifest_base_types.h" + + +/*! Check if the terminal manifest object specifies a spatial param terminal + * type + + @param manifest[in] terminal manifest object + + @return is_parameter_terminal, false on invalid manifest argument + */ +extern bool ia_css_is_terminal_manifest_spatial_parameter_terminal( + const ia_css_terminal_manifest_t *manifest); + +/*! Check if the terminal manifest object specifies a program terminal type + + @param manifest[in] terminal manifest object + + @return is_parameter_terminal, false on invalid manifest argument + */ +extern bool ia_css_is_terminal_manifest_program_terminal( + const ia_css_terminal_manifest_t *manifest); + + +/*! Check if the terminal manifest object specifies a program control init terminal type + * + * @param manifest[in] terminal manifest object + * + * @return is_parameter_terminal, false on invalid manifest argument + */ +extern bool ia_css_is_terminal_manifest_program_control_init_terminal( + const ia_css_terminal_manifest_t *manifest); + +/*! Check if the terminal manifest object specifies a (cached) parameter + * terminal type + + @param manifest[in] terminal manifest object + + @return is_parameter_terminal, false on invalid manifest argument + */ +extern bool ia_css_is_terminal_manifest_parameter_terminal( + const ia_css_terminal_manifest_t *manifest); + +/*! Check if the terminal manifest object specifies a (sliced) parameter + * terminal type + + @param manifest[in] terminal manifest object + + @return is_parameter_terminal, false on invalid manifest argument + */ +extern bool ia_css_is_terminal_manifest_sliced_terminal( + const ia_css_terminal_manifest_t *manifest); + +/*! Check if the terminal manifest object specifies a data terminal type + + @param manifest[in] terminal manifest object + + @return is_data_terminal, false on invalid manifest argument + */ +extern bool ia_css_is_terminal_manifest_data_terminal( + const ia_css_terminal_manifest_t *manifest); + +/*! Get the stored size of the terminal manifest object + + @param manifest[in] terminal manifest object + + @return size, 0 on invalid manifest argument + */ +extern size_t ia_css_terminal_manifest_get_size( + const ia_css_terminal_manifest_t *manifest); + +/*! Get the (pointer to) the program group manifest parent of the terminal + * manifest object + + @param manifest[in] terminal manifest object + + @return the pointer to the parent, NULL on invalid manifest argument + */ +extern ia_css_program_group_manifest_t *ia_css_terminal_manifest_get_parent( + const ia_css_terminal_manifest_t *manifest); + +/*! Set the (pointer to) the program group manifest parent of the terminal + * manifest object + + @param manifest[in] terminal manifest object + @param terminal_offset[in] this terminal's offset from + program_group_manifest base address. + + @return < 0 on invalid arguments + */ +extern int ia_css_terminal_manifest_set_parent_offset( + ia_css_terminal_manifest_t *manifest, + int32_t terminal_offset); + +/*! Get the type of the terminal manifest object + + @param manifest[in] terminal manifest object + + @return terminal type, limit value (IA_CSS_N_TERMINAL_TYPES) on invalid + manifest argument +*/ +extern ia_css_terminal_type_t ia_css_terminal_manifest_get_type( + const ia_css_terminal_manifest_t *manifest); + +/*! Set the type of the terminal manifest object + + @param manifest[in] terminal manifest object + @param terminal_type[in] terminal type + + @return < 0 on invalid manifest argument + */ +extern int ia_css_terminal_manifest_set_type( + ia_css_terminal_manifest_t *manifest, + const ia_css_terminal_type_t terminal_type); + +/*! Set the ID of the terminal manifest object + + @param manifest[in] terminal manifest object + @param ID[in] terminal ID + + @return < 0 on invalid manifest argument + */ +int ia_css_terminal_manifest_set_ID( + ia_css_terminal_manifest_t *manifest, + const ia_css_terminal_ID_t ID); + +/*! Get the type of the terminal manifest object + + @param manifest[in] terminal manifest object + + @return terminal id, IA_CSS_TERMINAL_INVALID_ID on invalid manifest argument + */ +extern ia_css_terminal_ID_t ia_css_terminal_manifest_get_ID( + const ia_css_terminal_manifest_t *manifest); + +/*! Get the supported frame types of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + + @return frame format bitmap, 0 on invalid manifest argument +*/ +extern ia_css_frame_format_bitmap_t + ia_css_data_terminal_manifest_get_frame_format_bitmap( + const ia_css_data_terminal_manifest_t *manifest); + +/*! Set the chosen frame type for the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param bitmap[in] frame format bitmap + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_frame_format_bitmap( + ia_css_data_terminal_manifest_t *manifest, + ia_css_frame_format_bitmap_t bitmap); + +/*! Check if the (data) terminal manifest object supports compression + + @param manifest[in] (data) terminal manifest object + + @return compression_support, true if compression is supported + */ +extern bool ia_css_data_terminal_manifest_can_support_compression( + const ia_css_data_terminal_manifest_t *manifest); + +/*! Set the compression support feature of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param compression_support[in] set true to support compression + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_compression_support( + ia_css_data_terminal_manifest_t *manifest, + bool compression_support); + +/*! Set the supported connection types of the terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param bitmap[in] connection bitmap + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_connection_bitmap( + ia_css_data_terminal_manifest_t *manifest, ia_css_connection_bitmap_t bitmap); + +/*! Get the connection bitmap of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + + @return connection bitmap, 0 on invalid manifest argument +*/ +extern ia_css_connection_bitmap_t + ia_css_data_terminal_manifest_get_connection_bitmap( + const ia_css_data_terminal_manifest_t *manifest); + +/*! Get the kernel dependency of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + + @return kernel bitmap, 0 on invalid manifest argument + */ +extern ia_css_kernel_bitmap_t ia_css_data_terminal_manifest_get_kernel_bitmap( + const ia_css_data_terminal_manifest_t *manifest); + +/*! Set the kernel dependency of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param kernel_bitmap[in] kernel dependency bitmap + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_kernel_bitmap( + ia_css_data_terminal_manifest_t *manifest, + const ia_css_kernel_bitmap_t kernel_bitmap); + +/*! Set the unique kernel dependency of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param index[in] kernel dependency bitmap index + + @return < 0 on invalid argument(s) + */ +extern int ia_css_data_terminal_manifest_set_kernel_bitmap_unique( + ia_css_data_terminal_manifest_t *manifest, + const unsigned int index); + +/*! Set the min size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param min_size[in] Minimum size of the frame array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_min_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t min_size[IA_CSS_N_DATA_DIMENSION]); + +/*! Set the max size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param max_size[in] Maximum size of the frame array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_max_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t max_size[IA_CSS_N_DATA_DIMENSION]); + +/*! Get the min size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param min_size[in] Minimum size of the frame array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_get_min_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t min_size[IA_CSS_N_DATA_DIMENSION]); + +/*! Get the max size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param max_size[in] Maximum size of the frame array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_get_max_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t max_size[IA_CSS_N_DATA_DIMENSION]); + +/*! Set the min fragment size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param min_size[in] Minimum size of the fragment array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_min_fragment_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t min_size[IA_CSS_N_DATA_DIMENSION]); + +/*! Set the max fragment size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param max_size[in] Maximum size of the fragment array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_max_fragment_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t max_size[IA_CSS_N_DATA_DIMENSION]); + +/*! Get the min fragment size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param min_size[in] Minimum size of the fragment array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_get_min_fragment_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t min_size[IA_CSS_N_DATA_DIMENSION]); + +/*! Get the max fragment size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param max_size[in] Maximum size of the fragment array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_get_max_fragment_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t max_size[IA_CSS_N_DATA_DIMENSION]); + +/*! + * Get the program control init connect section count for program prog. + * @param prog[in] program control init terminal program desc + * @return number of connect section for program prog. + */ + +extern +unsigned int ia_css_program_control_init_terminal_manifest_get_connect_section_count( + const ia_css_program_control_init_manifest_program_desc_t *prog); + + +/*! + * Get the program control init load section count for program prog. + * @param prog[in] program control init terminal program desc + * @return number of load section for program prog. + */ + +extern +unsigned int ia_css_program_control_init_terminal_manifest_get_load_section_count( + const ia_css_program_control_init_manifest_program_desc_t *prog); + +/*! + * Get the program control init terminal manifest size. + * @param nof_programs[in] Number of programs. + * @param nof_load_sections[in] Array of size nof_programs, + * encoding the number of load sections. + * @param nof_connect_sections[in] Array of size nof_programs, + * encoding the number of connect sections. + * @return < 0 on invalid manifest argument + */ +extern +unsigned int ia_css_program_control_init_terminal_manifest_get_size( + const uint16_t nof_programs, + const uint16_t *nof_load_sections, + const uint16_t *nof_connect_sections); + +/*! + * Get the program control init terminal manifest program desc. + * @param terminal[in] Program control init terminal. + * @param program[in] Number of programs. + * @return program control init terminal program desc (or NULL if error). + */ +extern +ia_css_program_control_init_manifest_program_desc_t * +ia_css_program_control_init_terminal_manifest_get_program_desc( + const ia_css_program_control_init_terminal_manifest_t *terminal, + unsigned int program); + +/*! + * Initialize the program control init terminal manifest. + * @param nof_programs[in] Number of programs + * @param nof_load_sections[in] Array of size nof_programs, + * encoding the number of load sections. + * @param nof_connect_sections[in] Array of size nof_programs, + * encoding the number of connect sections. + * @return < 0 on invalid manifest argument + */ +extern +int ia_css_program_control_init_terminal_manifest_init( + ia_css_program_control_init_terminal_manifest_t *terminal, + const uint16_t nof_programs, + const uint16_t *nof_load_sections, + const uint16_t *nof_connect_sections); + +/*! + * Pretty prints the program control init terminal manifest. + * @param terminal[in] Program control init terminal. + */ +extern +void ia_css_program_control_init_terminal_manifest_print( + ia_css_program_control_init_terminal_manifest_t *terminal); + +#endif /* __IA_CSS_PSYS_TERMINAL_MANIFEST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.hsys.user.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.hsys.user.h new file mode 100644 index 0000000000000..1d2f06f3cbce9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.hsys.user.h @@ -0,0 +1,38 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TERMINAL_MANIFEST_HSYS_USER_H +#define __IA_CSS_PSYS_TERMINAL_MANIFEST_HSYS_USER_H + +/*! \file */ + +/** @file ia_css_psys_terminal.hsys.user.h + * + * Define the methods on the termianl manifest object: Hsys user interface + */ + +#include + +/*! Print the terminal manifest object to file/stream + + @param manifest[in] terminal manifest object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_terminal_manifest_print( + const ia_css_terminal_manifest_t *manifest, + void *fid); + +#endif /* __IA_CSS_PSYS_TERMINAL_MANIFEST_HSYS_USER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.sim.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.sim.h new file mode 100644 index 0000000000000..f7da810d82f19 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.sim.h @@ -0,0 +1,48 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TERMINAL_MANIFEST_SIM_H +#define __IA_CSS_PSYS_TERMINAL_MANIFEST_SIM_H + +/*! \file */ + +/** @file ia_css_psys_terminal_manifest.sim.h + * + * Define the methods on the terminal manifest object: Simulation only + */ + +#include /* size_t */ +#include "ia_css_terminal.h" +#include "ia_css_terminal_manifest.h" +#include "ia_css_terminal_defs.h" + +/*! Create (the storage for) the terminal manifest object + + @param terminal_type[in] type of the terminal manifest {parameter, data} + + @return NULL on error + */ +extern ia_css_terminal_manifest_t *ia_css_terminal_manifest_alloc( + const ia_css_terminal_type_t terminal_type); + +/*! Destroy (the storage of) the terminal manifest object + + @param manifest[in] terminal manifest + + @return NULL + */ +extern ia_css_terminal_manifest_t *ia_css_terminal_manifest_free( + ia_css_terminal_manifest_t *manifest); + +#endif /* __IA_CSS_PSYS_TERMINAL_MANIFEST_SIM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_manifest.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_manifest.c new file mode 100644 index 0000000000000..5af4de7463104 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_manifest.c @@ -0,0 +1,1038 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_static_storage_class.h" +#include "ia_css_psys_program_group_manifest.h" +#include "ia_css_rbm_manifest.h" + +/* + * Functions to possibly inline + */ + +#ifndef __IA_CSS_PSYS_STATIC_INLINE__ +#include "ia_css_psys_program_group_manifest_impl.h" +#endif /* __IA_CSS_PSYS_STATIC_INLINE__ */ + +/* + * Functions not to inline + */ + +/* + * We need to refactor those files in order to + * build in the firmware only what is needed, + * switches are put current to workaround compilation problems + * in the firmware (for example lack of uint64_t support) + * supported in the firmware + */ +#if !defined(__HIVECC) +size_t ia_css_sizeof_program_group_manifest( + const uint8_t program_count, + const uint8_t terminal_count, + const uint8_t *program_dependency_count, + const uint8_t *terminal_dependency_count, + const ia_css_terminal_type_t *terminal_type, + const uint16_t cached_in_param_section_count, + const uint16_t cached_out_param_section_count, + const uint16_t *spatial_param_section_count, + const uint16_t fragment_param_section_count, + const uint16_t *sliced_param_section_count, + const uint16_t *sliced_out_param_section_count, + const uint16_t kernel_fragment_seq_count, + const uint16_t *progctrlinit_load_section_counts, + const uint16_t *progctrlinit_connect_section_counts) +{ + size_t size = 0; + int i = 0; + int j = 0; + int m = 0; + int n = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_sizeof_program_group_manifest(): enter:\n"); + + verifexit(program_count != 0); + verifexit(program_dependency_count != NULL); + verifexit(terminal_dependency_count != NULL); + + size += sizeof(ia_css_program_group_manifest_t); + + /* Private payload in the program group manifest */ + size += ceil_mul(sizeof(struct ia_css_psys_private_pg_data), + sizeof(uint64_t)); + /* RBM manifest in the program group manifest */ + size += ceil_mul(sizeof(ia_css_rbm_manifest_t), + sizeof(uint64_t)); + + for (i = 0; i < (int)program_count; i++) { + size += ia_css_sizeof_program_manifest( + program_dependency_count[i], + terminal_dependency_count[i]); + } + + for (i = 0; i < (int)terminal_count; i++) { + switch (terminal_type[i]) { + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN: + size += ia_css_param_terminal_manifest_get_size( + cached_in_param_section_count); + break; + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT: + size += ia_css_param_terminal_manifest_get_size( + cached_out_param_section_count); + break; + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: + size += ia_css_spatial_param_terminal_manifest_get_size( + spatial_param_section_count[j]); + j++; + break; + case IA_CSS_TERMINAL_TYPE_PROGRAM: + size += ia_css_program_terminal_manifest_get_size( + fragment_param_section_count, + kernel_fragment_seq_count); + break; + case IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: + size += ia_css_program_control_init_terminal_manifest_get_size( + program_count, + progctrlinit_load_section_counts, + progctrlinit_connect_section_counts); + break; + case IA_CSS_TERMINAL_TYPE_DATA_IN: + case IA_CSS_TERMINAL_TYPE_DATA_OUT: + size += sizeof(ia_css_data_terminal_manifest_t); + break; + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN: + size += ia_css_sliced_param_terminal_manifest_get_size( + sliced_param_section_count[m]); + m++; + break; + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT: + size += ia_css_sliced_param_terminal_manifest_get_size( + sliced_out_param_section_count[n]); + n++; + break; + default: + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_sizeof_program_group_manifest invalid argument\n"); + } + } + +EXIT: + if (0 == program_count || 0 == terminal_count || + NULL == program_dependency_count || + NULL == terminal_dependency_count) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_sizeof_program_group_manifest invalid argument\n"); + } + return size; +} + +/* + * Currently, the design of XNR kernel inside the *_pregdc program group, + * does not fit the exact model as is being asserted on in + * ia_css_is_program_group_manifest_valid. We therefore disable some checks. + * Further investigation is needed to determine whether *_pregdc program group + * can be canged or that the model must be changed. + * #define USE_SIMPLIFIED_GRAPH_MODEL 1 allows multiple programs to be + * connected to the same terminal, and it allows a kernel be mapped over + * multiple programs. + */ +#define USE_SIMPLIFIED_GRAPH_MODEL 1 + +/* + * Model and/or check refinements + * - Parallel programs do not yet have mutual exclusive alternatives + * - The pgram dependencies do not need to be acyclic + * - Parallel programs need to have an equal kernel requirement + */ +bool ia_css_is_program_group_manifest_valid( + const ia_css_program_group_manifest_t *manifest) +{ + int i; + bool is_valid = false; + uint8_t terminal_count; + uint8_t program_count; + ia_css_kernel_bitmap_t total_bitmap; + ia_css_kernel_bitmap_t check_bitmap; + ia_css_kernel_bitmap_t terminal_bitmap; + /* + * Use a standard bitmap type for the minimum logic to check the DAG, + * generic functions can be used for the kernel enable bitmaps; Later + */ + vied_nci_resource_bitmap_t resource_bitmap; + int terminal_bitmap_weight; + bool has_parameter_terminal_in = false; + bool has_parameter_terminal_out = false; + bool has_program_control_init_terminal = false; + bool has_program_terminal = false; + bool has_program_terminal_sequencer_info = false; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_program_group_manifest_valid(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(ia_css_program_group_manifest_get_size(manifest) != 0); + verifexit(ia_css_program_group_manifest_get_alignment(manifest) != 0); + verifexit(ia_css_program_group_manifest_get_program_group_ID(manifest) != 0); + + terminal_count = + ia_css_program_group_manifest_get_terminal_count(manifest); + program_count = + ia_css_program_group_manifest_get_program_count(manifest); + total_bitmap = + ia_css_program_group_manifest_get_kernel_bitmap(manifest); + check_bitmap = ia_css_kernel_bitmap_clear(); + resource_bitmap = vied_nci_bit_mask(VIED_NCI_RESOURCE_BITMAP_BITS); + terminal_bitmap = ia_css_kernel_bitmap_clear(); + + verifexit(program_count != 0); + verifexit(terminal_count != 0); + verifexit(!ia_css_is_kernel_bitmap_empty(total_bitmap)); + verifexit(vied_nci_is_bitmap_empty(resource_bitmap)); + + /* Check the kernel bitmaps for terminals */ + for (i = 0; i < (int)terminal_count; i++) { + ia_css_terminal_manifest_t *terminal_manifest_i = + ia_css_program_group_manifest_get_term_mnfst( + manifest, i); + bool is_parameter_in = + (IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN == + ia_css_terminal_manifest_get_type( + terminal_manifest_i)); + bool is_parameter_out = + (IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT == + ia_css_terminal_manifest_get_type( + terminal_manifest_i)); + bool is_data = + ia_css_is_terminal_manifest_data_terminal( + terminal_manifest_i); + bool is_program = + ia_css_is_terminal_manifest_program_terminal( + terminal_manifest_i); + bool is_spatial_param = + ia_css_is_terminal_manifest_spatial_parameter_terminal( + terminal_manifest_i); + bool is_program_control_init = + ia_css_is_terminal_manifest_program_control_init_terminal( + terminal_manifest_i); + + if (is_parameter_in) { + /* + * There can be only one cached in parameter terminal + * it serves kernels, not programs + */ + verifexit(!has_parameter_terminal_in); + has_parameter_terminal_in = is_parameter_in; + } else if (is_parameter_out) { + /* + * There can be only one cached out parameter terminal + * it serves kernels, not programs + */ + verifexit(!has_parameter_terminal_out); + has_parameter_terminal_out = is_parameter_out; + } else if (is_data) { + ia_css_data_terminal_manifest_t *dterminal_manifest_i = + (ia_css_data_terminal_manifest_t *) + terminal_manifest_i; + ia_css_kernel_bitmap_t terminal_bitmap_i = + ia_css_data_terminal_manifest_get_kernel_bitmap( + dterminal_manifest_i); + /* + * A terminal must depend on kernels that are a subset + * of the total, correction, it can only depend on one + * kernel + */ + verifexit(!ia_css_is_kernel_bitmap_empty( + terminal_bitmap_i)); + verifexit(ia_css_is_kernel_bitmap_subset( + total_bitmap, terminal_bitmap_i)); + verifexit(ia_css_is_kernel_bitmap_onehot( + terminal_bitmap_i)); + } else if (is_program) { + verifexit(!has_program_terminal); + verifexit(terminal_manifest_i); + has_program_terminal = is_program; + has_program_terminal_sequencer_info = + (((ia_css_program_terminal_manifest_t *) + terminal_manifest_i)-> + kernel_fragment_sequencer_info_manifest_info_count + != 0); + } else if (is_program_control_init) { + has_program_control_init_terminal = is_program_control_init; + } else { + const ia_css_spatial_param_terminal_manifest_t + *spatial_param_man = + (const ia_css_spatial_param_terminal_manifest_t *) + terminal_manifest_i; + verifexit(spatial_param_man); + verifexit(is_spatial_param); + + terminal_bitmap = + ia_css_kernel_bitmap_set(terminal_bitmap, + spatial_param_man->kernel_id); + verifexit(!ia_css_is_kernel_bitmap_empty(terminal_bitmap)); + verifexit(ia_css_is_kernel_bitmap_subset( + total_bitmap, terminal_bitmap)); + } + } + + /* Check the kernel bitmaps for programs */ + for (i = 0; i < (int)program_count; i++) { + int j; + ia_css_program_manifest_t *program_manifest_i = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, i); + ia_css_program_type_t program_type_i = + ia_css_program_manifest_get_type(program_manifest_i); + ia_css_kernel_bitmap_t program_bitmap_i = + ia_css_program_manifest_get_kernel_bitmap( + program_manifest_i); + uint8_t program_dependency_count_i = + ia_css_program_manifest_get_program_dependency_count( + program_manifest_i); + uint8_t terminal_dependency_count_i = + ia_css_program_manifest_get_terminal_dependency_count( + program_manifest_i); + uint8_t program_dependency_i0 = + ia_css_program_manifest_get_program_dependency( + program_manifest_i, 0); + bool is_sub_i = + ia_css_is_program_manifest_subnode_program_type( + program_manifest_i); + bool is_exclusive_sub_i = + (program_type_i == IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB); + bool is_virtual_sub_i = + (program_type_i == IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB); + bool is_super_i = + ia_css_is_program_manifest_supernode_program_type( + program_manifest_i); + + /* + * A program must have kernels that + * are a subset of the total + */ + verifexit(!ia_css_is_kernel_bitmap_empty( + program_bitmap_i)); + verifexit(ia_css_is_kernel_bitmap_subset( + total_bitmap, program_bitmap_i)); + verifexit((program_type_i != IA_CSS_N_PROGRAM_TYPES)); + verifexit((program_dependency_count_i + terminal_dependency_count_i) != 0); + /* + * Checks for subnodes + * - Parallel subnodes cannot depend on terminals + * - Exclusive subnodes must depend on + * fewer terminals than the supernode + * - Subnodes only depend on a supernode of the same type + * - Must have a subset of the supernode's kernels + * (but not equal) + * - This tests only positive cases + * Checks for singular or supernodes + * - Cannot depend on exclusive subnodes + * - No intersection between kernels + * (too strict for multiple instances ?) + */ + if (is_sub_i) { + /* Subnode */ + ia_css_program_manifest_t *program_manifest_k = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, program_dependency_i0); + ia_css_program_type_t program_type_k = + ia_css_program_manifest_get_type( + program_manifest_k); + ia_css_kernel_bitmap_t program_bitmap_k = + ia_css_program_manifest_get_kernel_bitmap( + program_manifest_k); + + verifexit(program_dependency_count_i == 1); + if (is_exclusive_sub_i || is_virtual_sub_i) { + verifexit(terminal_dependency_count_i <= + ia_css_program_manifest_get_terminal_dependency_count( + program_manifest_k)); + } else{ + verifexit(terminal_dependency_count_i == 0); + } + verifexit(program_type_k == + (is_exclusive_sub_i ? + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER : + is_virtual_sub_i ? + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUPER : + IA_CSS_PROGRAM_TYPE_PARALLEL_SUPER)); + verifexit(!ia_css_is_kernel_bitmap_equal( + program_bitmap_k, program_bitmap_i)); + verifexit(ia_css_is_kernel_bitmap_subset( + program_bitmap_k, program_bitmap_i)); + } else { + /* Singular or Supernode */ + int k; + + for (k = 0; k < program_dependency_count_i; k++) { + uint8_t program_dependency_k = + ia_css_program_manifest_get_program_dependency( + program_manifest_i, k); + ia_css_program_manifest_t *program_manifest_k = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, (int)program_dependency_k); + ia_css_program_type_t program_type_k = + ia_css_program_manifest_get_type( + program_manifest_k); + ia_css_kernel_bitmap_t program_bitmap_k = + ia_css_program_manifest_get_kernel_bitmap( + program_manifest_k); + + verifexit(program_dependency_k < + program_count); + verifexit((program_type_k != + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB) && + (program_type_k != + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB)); +#if USE_SIMPLIFIED_GRAPH_MODEL == 0 + verifexit(ia_css_is_kernel_bitmap_intersection_empty( + program_bitmap_i, program_bitmap_k)); +#else + (void)program_bitmap_k; +#endif + } + } + + /* Check for relations */ + for (j = 0; j < (int)program_count; j++) { + int k; + ia_css_program_manifest_t *program_manifest_j = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, j); + ia_css_program_type_t program_type_j = + ia_css_program_manifest_get_type(program_manifest_j); + ia_css_kernel_bitmap_t program_bitmap_j = + ia_css_program_manifest_get_kernel_bitmap( + program_manifest_j); + uint8_t program_dependency_count_j = + ia_css_program_manifest_get_program_dependency_count( + program_manifest_j); + uint8_t program_dependency_j0 = + ia_css_program_manifest_get_program_dependency( + program_manifest_j, 0); + bool is_sub_j = + ia_css_is_program_manifest_subnode_program_type( + program_manifest_j); + bool is_super_j = + ia_css_is_program_manifest_supernode_program_type( + program_manifest_j); + bool is_virtual_sub_j = + (program_type_j == IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB); + bool is_j_subset_i = + ia_css_is_kernel_bitmap_subset( + program_bitmap_i, program_bitmap_j); + bool is_i_subset_j = + ia_css_is_kernel_bitmap_subset( + program_bitmap_j, program_bitmap_i); + + /* Test below would fail for i==j */ + if (i == j) + continue; + + /* Empty sets are always subsets, but meaningless */ + verifexit(!ia_css_is_kernel_bitmap_empty( + program_bitmap_j)); + + /* + * Checks for mutual subnodes + * - Parallel subnodes must have an equal + * set of kernels + * - Exclusive and virtual subnodes must + * have an unequal set of kernels + * Checks for subnodes + * - Subnodes must have a subset of kernels + */ + if (((program_type_i == + IA_CSS_PROGRAM_TYPE_PARALLEL_SUB) && + (program_type_j == + IA_CSS_PROGRAM_TYPE_PARALLEL_SUB)) || + ((program_type_i == + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB) && + (program_type_j == + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB)) || + ((program_type_i == + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB) && + (program_type_j == + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB))) { + + verifexit(program_dependency_count_j == 1); + verifexit(program_dependency_i0 != i); + verifexit(program_dependency_j0 != i); + + if (program_dependency_i0 == + program_dependency_j0) { + verifexit(is_sub_i); + /* + * Subnodes are subsets, + * not for virtual nodes + */ + if (!is_virtual_sub_i) + verifexit( + ((is_j_subset_i || + is_i_subset_j))); + /* + * That must be equal for + * parallel subnodes, + * must be unequal for + * exlusive and virtual subnodes + */ + verifexit( + ((is_j_subset_i && is_i_subset_j) ^ + (is_exclusive_sub_i | + is_virtual_sub_i))); + + } + if (is_j_subset_i || is_i_subset_j) { + verifexit(program_dependency_i0 == + program_dependency_j0); + } + } + + if (((program_type_i == + IA_CSS_PROGRAM_TYPE_PARALLEL_SUPER) && + (program_type_j == + IA_CSS_PROGRAM_TYPE_PARALLEL_SUB)) || + ((program_type_i == + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER) && + (program_type_j == + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB)) || + ((program_type_i == + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUPER) && + (program_type_j == + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB))) { + + verifexit(program_dependency_count_j == 1); + verifexit(!is_i_subset_j); + + if (program_dependency_j0 == i) { + verifexit(program_dependency_i0 != + program_dependency_j0); + verifexit(is_super_i); + verifexit(is_j_subset_i); + + } + if (is_j_subset_i) { + verifexit(program_dependency_j0 == i); + } + } + + /* + * Checks for dependent nodes + * - Cannot depend on exclusive subnodes + * - No intersection between kernels + * (too strict for multiple instances ?) + * unless a subnode + */ + for (k = 0; k < (int)program_dependency_count_j; k++) { + uint8_t program_dependency_k = + ia_css_program_manifest_get_program_dependency( + program_manifest_j, k); + + verifexit((program_dependency_k < + program_count)); + if (program_dependency_k == i) { + /* program[j] depends on program[i] */ + verifexit((i != j)); + verifexit((program_type_i != + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB) && + (program_type_i != + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB)); + verifexit(USE_SIMPLIFIED_GRAPH_MODEL || + (ia_css_is_kernel_bitmap_intersection_empty( + program_bitmap_i, program_bitmap_j) ^ is_sub_j)); + } + } + + /* + * Checks for supernodes and subnodes + * - Detect nodes that kernel-wise are subsets, + * but not connected to the correct supernode + * - We do not (yet) detect if programs properly + * depend on all parallel nodes + */ + if (!ia_css_is_kernel_bitmap_intersection_empty( + program_bitmap_i, program_bitmap_j)) { + /* + * This test will pass if + * the program manifest is NULL, + * but that's no concern here + */ +#if USE_SIMPLIFIED_GRAPH_MODEL == 0 + verifexit(!ia_css_is_program_manifest_singular_program_type( + program_manifest_i)); + verifexit(!ia_css_is_program_manifest_singular_program_type( + program_manifest_j)); + if (!is_virtual_sub_j) + verifexit((is_j_subset_i || is_i_subset_j)); +#else + (void)is_virtual_sub_j; +#endif + if (is_super_i) { + verifexit(is_sub_j); + verifexit(program_dependency_j0 == i); + } + if (is_super_j) { + verifexit(is_sub_i); + verifexit(program_dependency_i0 == j); + } + } + } + check_bitmap = ia_css_kernel_bitmap_union( + check_bitmap, program_bitmap_i); + /* + * A terminal can be bound to only a single + * (of multiple concurrent) program(s), + * i.e. the one that holds the iterator to control it + * Only singular and super nodes can depend on a terminal. + * This loop accumulates all terminal + * dependencies over all programs + */ + for (j = 0; j < (int)terminal_dependency_count_i; j++) { + uint8_t terminal_dependency = + ia_css_program_manifest_get_terminal_dependency( + program_manifest_i, j); + + verifexit(terminal_dependency < terminal_count); + if ((program_type_i != + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB) && + (program_type_i != + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB)) { + /* If the subnode always came after the */ + /* supernode we could check for presence */ + resource_bitmap = + vied_nci_bit_mask_set_unique( + resource_bitmap, + terminal_dependency); +#if USE_SIMPLIFIED_GRAPH_MODEL == 0 + verifexit(!vied_nci_is_bitmap_empty( + resource_bitmap)); +#endif + } + } + } + verifexit(ia_css_is_kernel_bitmap_equal( + total_bitmap, check_bitmap)); + + terminal_bitmap_weight = + vied_nci_bitmap_compute_weight(resource_bitmap); + verifexit(terminal_bitmap_weight >= 0); + if (has_parameter_terminal_in || + has_parameter_terminal_out || + has_program_terminal || + has_program_control_init_terminal) { + int skip_terminal_count = 0; + + if (has_parameter_terminal_in) + skip_terminal_count++; + if (has_parameter_terminal_out) + skip_terminal_count++; + if (has_program_control_init_terminal) { + skip_terminal_count++; + } + if (has_program_terminal) + skip_terminal_count++; + if (has_program_terminal_sequencer_info) + skip_terminal_count--; +#if USE_SIMPLIFIED_GRAPH_MODEL == 0 + verifexit((terminal_bitmap_weight == + (terminal_count - skip_terminal_count))); +#endif + } else + verifexit((terminal_bitmap_weight == terminal_count)); + + is_valid = true; +EXIT: + if (is_valid == false) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_is_program_group_manifest_valid: failed\n"); + } + return is_valid; +} + +int ia_css_program_group_manifest_set_kernel_bitmap( + ia_css_program_group_manifest_t *manifest, + const ia_css_kernel_bitmap_t bitmap) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_set_kernel_bitmap(): enter:\n"); + + if (manifest != NULL) { + manifest->kernel_bitmap = bitmap; + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_set_kernel_bitmap invalid argument\n"); + } + return retval; +} + +ia_css_kernel_bitmap_t ia_css_program_group_manifest_get_kernel_bitmap( + const ia_css_program_group_manifest_t *manifest) +{ + ia_css_kernel_bitmap_t bitmap = ia_css_kernel_bitmap_clear(); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_kernel_bitmap(): enter:\n"); + + if (manifest != NULL) { + bitmap = manifest->kernel_bitmap; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_kernel_bitmap invalid argument\n"); + } + return bitmap; +} + +void ia_css_program_group_manifest_init( + ia_css_program_group_manifest_t *blob, + const uint8_t program_count, + const uint8_t terminal_count, + const uint8_t *program_dependencies, + const uint8_t *terminal_dependencies, + const ia_css_terminal_type_t *terminal_type, + const uint16_t cached_in_param_section_count, + const uint16_t cached_out_param_section_count, + const uint16_t *spatial_param_section_count, + const uint16_t fragment_param_section_count, + const uint16_t *sliced_in_param_section_count, + const uint16_t *sliced_out_param_section_count, + const uint16_t kernel_fragment_seq_count, + const uint16_t *progctrlinit_load_section_counts, + const uint16_t *progctrlinit_connect_section_counts) +{ + int i = 0; + int j = 0; + int m = 0; + int n = 0; + int result; + uint32_t offset = 0; + char *prg_manifest_base, *terminal_manifest_base; + size_t program_size = 0; + + /* + * assert(blob != NULL); + */ + COMPILATION_ERROR_IF( + SIZE_OF_DATA_TERMINAL_MANIFEST_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_data_terminal_manifest_t))); + COMPILATION_ERROR_IF( + SIZE_OF_PROGRAM_GROUP_MANIFEST_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_program_group_manifest_t))); + COMPILATION_ERROR_IF( + SIZE_OF_PROGRAM_MANIFEST_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_program_manifest_t))); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, INFO, + "ia_css_program_group_manifest_init(): enter:\n"); + + for (i = 0; i < (int)program_count; i++) { + program_size += + ia_css_sizeof_program_manifest(program_dependencies[i], + terminal_dependencies[i]); + } + + /* A program group ID cannot be zero */ + blob->ID = 1; + blob->program_count = program_count; + blob->terminal_count = terminal_count; + blob->program_manifest_offset = sizeof(ia_css_program_group_manifest_t); + blob->terminal_manifest_offset = + (uint32_t)blob->program_manifest_offset + program_size; + + prg_manifest_base = (char *) + (((char *)blob) + blob->program_manifest_offset); + offset = blob->program_manifest_offset; + for (i = 0; i < (int)program_count; i++) { + ia_css_program_manifest_init( + (ia_css_program_manifest_t *)prg_manifest_base, + program_dependencies[i], terminal_dependencies[i]); + ia_css_program_manifest_set_parent_offset( + (ia_css_program_manifest_t *)prg_manifest_base, offset); + program_size = + ia_css_sizeof_program_manifest(program_dependencies[i], + terminal_dependencies[i]); + prg_manifest_base += program_size; + offset += (uint32_t)program_size; + } + + offset = blob->terminal_manifest_offset; + terminal_manifest_base = (char *) (((char *)blob) + offset); + for (i = 0; i < (int)terminal_count; i++) { + size_t terminal_size = 0; + ia_css_terminal_manifest_t *term_manifest = + (ia_css_terminal_manifest_t *)terminal_manifest_base; + + ia_css_terminal_manifest_set_parent_offset( + (ia_css_terminal_manifest_t *) + terminal_manifest_base, + offset); + switch (terminal_type[i]) { + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN: + result = ia_css_param_terminal_manifest_init( + (ia_css_param_terminal_manifest_t *) + term_manifest, + cached_in_param_section_count); + if (0 == result) { + terminal_size = + ia_css_param_terminal_manifest_get_size( + cached_in_param_section_count); + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_param_terminal_manifest_init failed in cached in terminal\n"); + } + break; + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT: + result = ia_css_param_terminal_manifest_init( + (ia_css_param_terminal_manifest_t *) + term_manifest, + cached_out_param_section_count); + if (0 == result) { + terminal_size = + ia_css_param_terminal_manifest_get_size( + cached_out_param_section_count); + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_param_terminal_manifest_init failed\n"); + } + break; + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: + result = ia_css_spatial_param_terminal_manifest_init( + (ia_css_spatial_param_terminal_manifest_t *) + term_manifest, + spatial_param_section_count[j]); + if (0 == result) { + terminal_size = + ia_css_spatial_param_terminal_manifest_get_size( + spatial_param_section_count[j]); + j++; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_spatial_param_terminal_manifest_init failed in spatial terminal\n"); + } + break; + case IA_CSS_TERMINAL_TYPE_PROGRAM: + result = ia_css_program_terminal_manifest_init( + (ia_css_program_terminal_manifest_t *) + term_manifest, + fragment_param_section_count, + kernel_fragment_seq_count); + if (0 == result) { + terminal_size = + ia_css_program_terminal_manifest_get_size( + fragment_param_section_count, + kernel_fragment_seq_count); + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_program_terminal_manifest_init failed in program terminal\n"); + } + break; + case IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: + result = ia_css_program_control_init_terminal_manifest_init( + (ia_css_program_control_init_terminal_manifest_t *) + term_manifest, + program_count, + progctrlinit_load_section_counts, + progctrlinit_connect_section_counts); + if (0 == result) { + terminal_size = + ia_css_program_control_init_terminal_manifest_get_size( + program_count, + NULL, + NULL); + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_program_control_init_terminal_manifest_init failed\n"); + } + break; + case IA_CSS_TERMINAL_TYPE_DATA_IN: + case IA_CSS_TERMINAL_TYPE_DATA_OUT: + terminal_size = sizeof(ia_css_data_terminal_manifest_t); + break; + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN: + result = ia_css_sliced_param_terminal_manifest_init( + (ia_css_sliced_param_terminal_manifest_t *) + term_manifest, + sliced_in_param_section_count[m]); + if (0 == result) { + terminal_size = + ia_css_sliced_param_terminal_manifest_get_size( + sliced_in_param_section_count[m]); + m++; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_param_terminal_manifest_init in sliced terminal failed\n"); + } + break; + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT: + result = ia_css_sliced_param_terminal_manifest_init( + (ia_css_sliced_param_terminal_manifest_t *) + term_manifest, + sliced_out_param_section_count[n]); + if (0 == result) { + terminal_size = + ia_css_sliced_param_terminal_manifest_get_size( + sliced_out_param_section_count[n]); + n++; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_param_terminal_manifest_init in sliced out terminal failed\n"); + } + break; + default: + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_init invalid argument\n"); + } + term_manifest->size = (uint16_t)terminal_size; + term_manifest->terminal_type = terminal_type[i]; + terminal_manifest_base += terminal_size; + offset += (uint32_t)terminal_size; + } + + /* Set the private program group manifest blob offset */ + blob->private_data_offset = offset; + offset += ceil_mul(sizeof(struct ia_css_psys_private_pg_data), + sizeof(uint64_t)); + + /* Set the RBM manifest blob offset */ + blob->rbm_manifest_offset = offset; + offset += ceil_mul(sizeof(ia_css_rbm_manifest_t), + sizeof(uint64_t)); + + assert(offset <= UINT16_MAX); + blob->size = (uint16_t)offset; +} + +int ia_css_program_group_manifest_print( + const ia_css_program_group_manifest_t *manifest, + void *fid) +{ + int retval = -1; + int i; + uint8_t program_count, terminal_count; + ia_css_kernel_bitmap_t bitmap; + struct ia_css_psys_private_pg_data *priv_data; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, INFO, + "ia_css_program_group_manifest_print(): enter:\n"); + + NOT_USED(fid); + + verifexit(manifest != NULL); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "sizeof(manifest) = %d\n", + (int)ia_css_program_group_manifest_get_size(manifest)); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "alignment(manifest) = %d\n", + (int)ia_css_program_group_manifest_get_alignment(manifest)); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "program group ID = %d\n", + (int)ia_css_program_group_manifest_get_program_group_ID( + manifest)); + + program_count = + ia_css_program_group_manifest_get_program_count(manifest); + terminal_count = + ia_css_program_group_manifest_get_terminal_count(manifest); + + bitmap = ia_css_program_group_manifest_get_kernel_bitmap(manifest); + verifexit(ia_css_kernel_bitmap_print(bitmap, fid) == 0); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "%d program manifests\n", (int)program_count); + for (i = 0; i < (int)program_count; i++) { + ia_css_program_manifest_t *program_manifest = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, i); + + retval = ia_css_program_manifest_print(program_manifest, fid); + verifjmpexit(retval == 0); + } + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "%d terminal manifests\n", (int)terminal_count); + for (i = 0; i < (int)terminal_count; i++) { + ia_css_terminal_manifest_t *terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst( + manifest, i); + + retval = ia_css_terminal_manifest_print( + terminal_manifest, fid); + verifjmpexit(retval == 0); + } + + priv_data = + (struct ia_css_psys_private_pg_data *) + ia_css_program_group_manifest_get_private_data(manifest); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "private_data_offset %d\n", manifest->private_data_offset); + + for (i = 0; i < IPU_DEVICE_GP_PSA_MUX_NUM_MUX; i++) { + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "PSA MUX id %d mux val %d\n", i, + priv_data->psa_mux_conf[i]); + + } + + for (i = 0; i < IPU_DEVICE_GP_ISA_STATIC_MUX_NUM_MUX; i++) { + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "ISA MUX id %d mux val %d\n", i, + priv_data->isa_mux_conf[i]); + + } + + for (i = 0; i < IPU_DEVICE_ACB_NUM_ACB; i++) { + + if (priv_data->acb_route[i].in_select != + NCI_ACB_PORT_INVALID) { + + assert(priv_data->acb_route[i].in_select != + NCI_ACB_PORT_INVALID && + priv_data->acb_route[i].out_select != + NCI_ACB_PORT_INVALID); + + IA_CSS_TRACE_3(PSYSAPI_STATIC, INFO, + "Route Cell id %d In %d Out %d\n", i, + priv_data->acb_route[i].in_select, + priv_data->acb_route[i].out_select); + } + + } + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "Input Buffer: buffer_base_addr 0x%x\n", + priv_data->input_buffer_info.buffer_base_addr); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "Input Buffer: bpe = %d\n", + priv_data->input_buffer_info.bpe); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "Input Buffer: buffer_width = %d\n", + priv_data->input_buffer_info.buffer_width); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "Input Buffer: buffer_height = %d\n", + priv_data->input_buffer_info.buffer_height); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "Input Buffer: num_of_buffers = %d\n", + priv_data->input_buffer_info.num_of_buffers); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "Input Buffer: dfm_port_addr = 0x%x\n", + priv_data->input_buffer_info.dfm_port_addr); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_group_manifest_print failed (%i)\n", + retval); + } + return retval; +} +#endif /* !defined(__HIVECC) */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_manifest_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_manifest_impl.h new file mode 100644 index 0000000000000..527b8cc00dd14 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_manifest_impl.h @@ -0,0 +1,415 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_IMPL_H +#define __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_IMPL_H + +#include +#include +#include +#include +#include "ia_css_psys_program_group_private.h" +#include "ia_css_terminal_manifest_types.h" +#include "ia_css_psys_private_pg_data.h" +#include /* Safer bit mask functions */ +#include "ia_css_psys_static_trace.h" +#include "ia_css_rbm_manifest_types.h" +#include +#include +#include + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +size_t ia_css_program_group_manifest_get_size( + const ia_css_program_group_manifest_t *manifest) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_size(): enter:\n"); + + if (manifest != NULL) { + size = manifest->size; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_size invalid argument\n"); + } + return size; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_program_group_ID_t +ia_css_program_group_manifest_get_program_group_ID( + const ia_css_program_group_manifest_t *manifest) +{ + ia_css_program_group_ID_t id = IA_CSS_PROGRAM_GROUP_INVALID_ID; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_program_group_ID(): enter:\n"); + + if (manifest != NULL) { + id = manifest->ID; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_program_group_ID invalid argument\n"); + } + return id; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +int ia_css_program_group_manifest_set_program_group_ID( + ia_css_program_group_manifest_t *manifest, + ia_css_program_group_ID_t id) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_set_program_group_ID(): enter:\n"); + + if (manifest != NULL) { + manifest->ID = id; + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_set_program_group_ID invalid argument\n"); + } + return retval; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +int ia_css_program_group_manifest_set_alignment( + ia_css_program_group_manifest_t *manifest, + const uint8_t alignment) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_set_alignment(): enter:\n"); + + if (manifest != NULL) { + manifest->alignment = alignment; + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_set_alignment invalid argument\n"); + } + return retval; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +uint8_t ia_css_program_group_manifest_get_alignment( + const ia_css_program_group_manifest_t *manifest) +{ + uint8_t alignment = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_alignment(): enter:\n"); + + if (manifest != NULL) { + alignment = manifest->alignment; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_alignment invalid argument\n"); + } + return alignment; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +void *ia_css_program_group_manifest_get_private_data( + const ia_css_program_group_manifest_t *manifest) +{ + void *private_data = NULL; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_private_data(%p): enter:\n", + manifest); + + verifexit(manifest != NULL); + + private_data = (void *)((const char *)manifest + + manifest->private_data_offset); +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_private_data invalid argument\n"); + } + return private_data; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_rbm_manifest_t *ia_css_program_group_manifest_get_rbm_manifest( + const ia_css_program_group_manifest_t *manifest) +{ + ia_css_rbm_manifest_t *rbm_manifest = NULL; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_rbm_manifest(%p): enter:\n", + manifest); + + verifexit(manifest != NULL); + + rbm_manifest = (ia_css_rbm_manifest_t *)((const char *)manifest + + manifest->rbm_manifest_offset); + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_rbm_manifest invalid argument\n"); + } + return rbm_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_program_manifest_t * +ia_css_program_group_manifest_get_prgrm_mnfst( + const ia_css_program_group_manifest_t *manifest, + const unsigned int program_index) +{ + ia_css_program_manifest_t *prg_manifest_base; + uint8_t *program_manifest = NULL; + uint8_t program_count; + unsigned int i; + + IA_CSS_TRACE_2(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_prgrm_mnfst(%p,%d): enter:\n", + manifest, program_index); + + program_count = + ia_css_program_group_manifest_get_program_count(manifest); + + verifexit(manifest != NULL); + verifexit(program_index < program_count); + + prg_manifest_base = (ia_css_program_manifest_t *)((char *)manifest + + manifest->program_manifest_offset); + if (program_index < program_count) { + program_manifest = (uint8_t *)prg_manifest_base; + for (i = 0; i < program_index; i++) { + program_manifest += ((ia_css_program_manifest_t *) + program_manifest)->size; + } + } + +EXIT: + if (NULL == manifest || program_index >= program_count) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_prgrm_mnfst invalid argument\n"); + } + return (ia_css_program_manifest_t *)program_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_data_terminal_manifest_t * +ia_css_program_group_manifest_get_data_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index) +{ + ia_css_data_terminal_manifest_t *data_terminal_manifest = NULL; + ia_css_terminal_manifest_t *terminal_manifest; + + IA_CSS_TRACE_2(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_data_terminal_manifest(%p, %d): enter:\n", + manifest, (int)terminal_index); + + terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst(manifest, + terminal_index); + + verifexit(ia_css_is_terminal_manifest_data_terminal(terminal_manifest)); + + data_terminal_manifest = + (ia_css_data_terminal_manifest_t *)terminal_manifest; +EXIT: + return data_terminal_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_param_terminal_manifest_t * +ia_css_program_group_manifest_get_param_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index) +{ + ia_css_param_terminal_manifest_t *param_terminal_manifest = NULL; + ia_css_terminal_manifest_t *terminal_manifest; + + IA_CSS_TRACE_2(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_param_terminal_manifest(%p, %d): enter:\n", + manifest, (int)terminal_index); + + terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst(manifest, + terminal_index); + + verifexit(ia_css_is_terminal_manifest_parameter_terminal( + terminal_manifest)); + param_terminal_manifest = + (ia_css_param_terminal_manifest_t *)terminal_manifest; +EXIT: + return param_terminal_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_spatial_param_terminal_manifest_t * +ia_css_program_group_manifest_get_spatial_param_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index) +{ + ia_css_spatial_param_terminal_manifest_t * + spatial_param_terminal_manifest = NULL; + ia_css_terminal_manifest_t *terminal_manifest; + + IA_CSS_TRACE_2(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_spatial_param_terminal_manifest(%p, %d): enter:\n", + manifest, (int)terminal_index); + + terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst(manifest, + terminal_index); + + verifexit(ia_css_is_terminal_manifest_spatial_parameter_terminal( + terminal_manifest)); + + spatial_param_terminal_manifest = + (ia_css_spatial_param_terminal_manifest_t *)terminal_manifest; +EXIT: + return spatial_param_terminal_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_sliced_param_terminal_manifest_t * +ia_css_program_group_manifest_get_sliced_param_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index) +{ + ia_css_sliced_param_terminal_manifest_t * + sliced_param_terminal_manifest = NULL; + ia_css_terminal_manifest_t *terminal_manifest; + + IA_CSS_TRACE_2(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_sliced_param_terminal_manifest(%p, %d): enter:\n", + manifest, (int)terminal_index); + + terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst(manifest, + terminal_index); + + verifexit(ia_css_is_terminal_manifest_sliced_terminal( + terminal_manifest)); + + sliced_param_terminal_manifest = + (ia_css_sliced_param_terminal_manifest_t *)terminal_manifest; +EXIT: + return sliced_param_terminal_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_program_terminal_manifest_t * +ia_css_program_group_manifest_get_program_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index) +{ + ia_css_program_terminal_manifest_t *program_terminal_manifest = NULL; + ia_css_terminal_manifest_t *terminal_manifest; + + IA_CSS_TRACE_2(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_program_terminal_manifest(%p, %d): enter:\n", + manifest, (int)terminal_index); + + terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst(manifest, + terminal_index); + + verifexit(ia_css_is_terminal_manifest_program_terminal( + terminal_manifest)); + + program_terminal_manifest = + (ia_css_program_terminal_manifest_t *)terminal_manifest; + EXIT: + return program_terminal_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_terminal_manifest_t * +ia_css_program_group_manifest_get_term_mnfst( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index) +{ + ia_css_terminal_manifest_t *terminal_manifest = NULL; + ia_css_terminal_manifest_t *terminal_manifest_base; + uint8_t terminal_count; + uint8_t i = 0; + uint32_t offset; + + IA_CSS_TRACE_2(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_term_mnfst(%p,%d): enter:\n", + manifest, (int)terminal_index); + + verifexit(manifest != NULL); + + terminal_count = + ia_css_program_group_manifest_get_terminal_count(manifest); + + verifexit(terminal_index < terminal_count); + + terminal_manifest_base = + (ia_css_terminal_manifest_t *)((char *)manifest + + manifest->terminal_manifest_offset); + terminal_manifest = terminal_manifest_base; + while (i < terminal_index) { + offset = + (uint32_t)ia_css_terminal_manifest_get_size(terminal_manifest); + terminal_manifest = (ia_css_terminal_manifest_t *) + ((char *)terminal_manifest + offset); + i++; + } +EXIT: + return terminal_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +uint8_t ia_css_program_group_manifest_get_program_count( + const ia_css_program_group_manifest_t *manifest) +{ + uint8_t program_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_program_count(): enter:\n"); + + if (manifest != NULL) { + program_count = manifest->program_count; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_program_count invalid argument\n"); + } + return program_count; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +uint8_t ia_css_program_group_manifest_get_terminal_count( + const ia_css_program_group_manifest_t *manifest) +{ + uint8_t terminal_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_terminal_count(): enter:\n"); + + if (manifest != NULL) { + terminal_count = manifest->terminal_count; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_terminal_count invalid argument\n"); + } + return terminal_count; +} + +#endif /* __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_private.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_private.h new file mode 100644 index 0000000000000..502d59def6e90 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_private.h @@ -0,0 +1,212 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_GROUP_PRIVATE_H +#define __IA_CSS_PSYS_PROGRAM_GROUP_PRIVATE_H + +#include "ia_css_psys_manifest_types.h" +#include "ia_css_terminal_manifest_types.h" +#include "ia_css_kernel_bitmap.h" +#include "ia_css_program_group_data.h" +#include "vied_nci_psys_resource_model.h" +#include "ia_css_rbm_manifest_types.h" +#include +#include +#include + +#define SIZE_OF_PROGRAM_GROUP_MANIFEST_STRUCT_IN_BITS \ + ((IA_CSS_KERNEL_BITMAP_BITS) \ + + (IA_CSS_PROGRAM_GROUP_ID_BITS) \ + + (5 * IA_CSS_UINT16_T_BITS) \ + + (5 * IA_CSS_UINT8_T_BITS) \ + + (5 * IA_CSS_UINT8_T_BITS)) + +struct ia_css_program_group_manifest_s { + /**< Indicate kernels are present in this program group */ + ia_css_kernel_bitmap_t kernel_bitmap; + /**< Referral ID to program group FW */ + ia_css_program_group_ID_t ID; + uint16_t program_manifest_offset; + uint16_t terminal_manifest_offset; + /**< Offset to private data (not part of the official API) */ + uint16_t private_data_offset; + /**< Offset to RBM manifest */ + uint16_t rbm_manifest_offset; + /**< Size of this structure */ + uint16_t size; + /**< Storage alignment requirement (in uint8_t) */ + uint8_t alignment; + /**< Total number of kernels in this program group */ + uint8_t kernel_count; + /**< Total number of program in this program group */ + uint8_t program_count; + /**< Total number of terminals on this program group */ + uint8_t terminal_count; + /**< Total number of independent subgraphs in this program group */ + uint8_t subgraph_count; + /**< Padding; esnures that rbm_manifest starts on 64bit alignment */ + uint8_t reserved[5]; +}; + +#define SIZE_OF_PROGRAM_MANIFEST_STRUCT_IN_BITS \ + (IA_CSS_KERNEL_BITMAP_BITS \ + + IA_CSS_PROGRAM_ID_BITS \ + + IA_CSS_PROGRAM_TYPE_BITS \ + + (3 * IA_CSS_UINT32_T_BITS) \ + + (VIED_NCI_RESOURCE_BITMAP_BITS * VIED_NCI_N_DEV_DFM_ID) \ + + (VIED_NCI_RESOURCE_BITMAP_BITS * VIED_NCI_N_DEV_DFM_ID) \ + + IA_CSS_UINT16_T_BITS \ + + (VIED_NCI_RESOURCE_SIZE_BITS * VIED_NCI_N_MEM_TYPE_ID) \ + + (VIED_NCI_RESOURCE_SIZE_BITS * VIED_NCI_N_DATA_MEM_TYPE_ID * 2) \ + + (VIED_NCI_RESOURCE_SIZE_BITS * VIED_NCI_N_DEV_CHN_ID * 2) \ + + (IA_CSS_UINT8_T_BITS * VIED_NCI_N_DEV_DFM_ID) \ + + (IA_CSS_PROCESS_MAX_CELLS * VIED_NCI_RESOURCE_ID_BITS) \ + + (VIED_NCI_RESOURCE_ID_BITS) \ + + (2 * IA_CSS_UINT8_T_BITS) \ + + (N_PADDING_UINT8_IN_PROGRAM_GROUP_MANFEST * IA_CSS_UINT8_T_BITS)) +/* + * This structure contains only the information required for resource + * management and construction of the process group. + * The header for the program binary load is separate + */ + +struct ia_css_program_manifest_s { + /**< Indicate which kernels lead to this program being used */ + ia_css_kernel_bitmap_t kernel_bitmap; + /**< Referral ID to a specific program FW, valid ID's != 0 */ + ia_css_program_ID_t ID; + /**< Specification of for exclusive or parallel programs */ + ia_css_program_type_t program_type; + /**< offset to add to reach parent. This is negative value.*/ + int32_t parent_offset; + uint32_t program_dependency_offset; + uint32_t terminal_dependency_offset; +#if (VIED_NCI_N_DEV_DFM_ID > 0) + /**< DFM port allocation of this program */ + vied_nci_resource_bitmap_t dfm_port_bitmap[VIED_NCI_N_DEV_DFM_ID]; + /**< Active DFM ports which need a kick + * If an empty port is configured to run in active mode, the empty + * port and the corresponding full port(s) in the stream must be kicked. + * The empty port must always be kicked aster the full port. + */ + vied_nci_resource_bitmap_t dfm_active_port_bitmap[VIED_NCI_N_DEV_DFM_ID]; +#endif + /**< Size of this structure */ + uint16_t size; + /**< (internal) Memory allocation size needs of this program */ + vied_nci_resource_size_t int_mem_size[VIED_NCI_N_MEM_TYPE_ID]; + /**< (external) Memory allocation size needs of this program */ + vied_nci_resource_size_t ext_mem_size[VIED_NCI_N_DATA_MEM_TYPE_ID]; + vied_nci_resource_size_t ext_mem_offset[VIED_NCI_N_DATA_MEM_TYPE_ID]; + /**< Device channel allocation size needs of this program */ + vied_nci_resource_size_t dev_chn_size[VIED_NCI_N_DEV_CHN_ID]; + vied_nci_resource_size_t dev_chn_offset[VIED_NCI_N_DEV_CHN_ID]; +#if (VIED_NCI_N_DEV_DFM_ID > 0) + /**< DFM ports are relocatable if value is set to 1. + * The flag is per dfm port type. + * This will not be supported for now. + */ + uint8_t is_dfm_relocatable[VIED_NCI_N_DEV_DFM_ID]; +#endif + /** Array of all the cells this program needs */ +#if IA_CSS_PROCESS_MAX_CELLS == 1 + vied_nci_resource_id_t cell_id; +#else + vied_nci_resource_id_t cells[IA_CSS_PROCESS_MAX_CELLS]; +#endif /* IA_CSS_PROCESS_MAX_CELLS == 1 */ + /**< (exclusive) indication of a cell type to be used by this program */ + vied_nci_resource_id_t cell_type_id; + + /**< Number of programs this program depends on */ + uint8_t program_dependency_count; + /**< Number of terminals this program depends on */ + uint8_t terminal_dependency_count; + /**< Padding bytes for 64bit alignment*/ +#if N_PADDING_UINT8_IN_PROGRAM_GROUP_MANFEST > 0 + /*hivecc does not allow an array of zero length*/ + uint8_t padding[N_PADDING_UINT8_IN_PROGRAM_GROUP_MANFEST]; +#endif +}; + +/* + *Calculation for manual size check for struct ia_css_data_terminal_manifest_s + */ +#define SIZE_OF_DATA_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + (SIZE_OF_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + + IA_CSS_FRAME_FORMAT_BITMAP_BITS \ + + IA_CSS_CONNECTION_BITMAP_BITS \ + + IA_CSS_KERNEL_BITMAP_BITS \ + + (4 * (IA_CSS_UINT16_T_BITS * IA_CSS_N_DATA_DIMENSION)) \ + + IA_CSS_UINT16_T_BITS \ + + IA_CSS_UINT8_T_BITS \ + + (4*IA_CSS_UINT8_T_BITS)) +/* + * Inherited data terminal class + */ +struct ia_css_data_terminal_manifest_s { + /**< Data terminal base */ + ia_css_terminal_manifest_t base; + /**< Supported (4CC / MIPI / parameter) formats */ + ia_css_frame_format_bitmap_t frame_format_bitmap; + /**< Indicate which kernels lead to this terminal being used */ + ia_css_kernel_bitmap_t kernel_bitmap; + /**< Minimum size of the frame */ + uint16_t min_size[IA_CSS_N_DATA_DIMENSION]; + /**< Maximum size of the frame */ + uint16_t max_size[IA_CSS_N_DATA_DIMENSION]; + /**< Minimum size of a fragment that the program port can accept */ + uint16_t min_fragment_size[IA_CSS_N_DATA_DIMENSION]; + /**< Maximum size of a fragment that the program port can accept */ + uint16_t max_fragment_size[IA_CSS_N_DATA_DIMENSION]; + /**< Indicate if this terminal is derived from a principal terminal */ + uint16_t terminal_dependency; + /**< Indicate what (streaming) interface types this terminal supports */ + ia_css_connection_bitmap_t connection_bitmap; + /**< Indicates if compression is supported on the data associated with + * this terminal. '1' indicates compression is supported, + * '0' otherwise + */ + uint8_t compression_support; + uint8_t reserved[4]; +}; + +/* ============ Program Control Init Terminal Manifest - START ============ */ +#define N_PADDING_UINT8_IN_PROGCTRLINIT_MANIFEST_PROGRAM_DESC_STRUCT 4 +struct ia_css_program_control_init_manifest_program_desc_s { + uint16_t load_section_count; + uint16_t connect_section_count; + uint8_t padding[N_PADDING_UINT8_IN_PROGCTRLINIT_MANIFEST_PROGRAM_DESC_STRUCT]; +}; + +#define N_PADDING_UINT8_IN_PROGCTRLINIT_TERMINAL_MANIFEST_STRUCT 2 +struct ia_css_program_control_init_terminal_manifest_s { + ia_css_terminal_manifest_t base; + /* Number of programs in program group */ + uint32_t program_count; + /* + * Points to array of ia_css_program_control_init_terminal_program_desc_t + * with size program_count. + */ + uint16_t program_desc_offset; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_PROGCTRLINIT_TERMINAL_MANIFEST_STRUCT]; +}; +/* ============ Program Control Init Terminal Manifest - END ============ */ + +extern void ia_css_program_manifest_init( + ia_css_program_manifest_t *blob, + const uint8_t program_dependency_count, + const uint8_t terminal_dependency_count); + +#endif /* __IA_CSS_PSYS_PROGRAM_GROUP_PRIVATE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_manifest.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_manifest.c new file mode 100644 index 0000000000000..188f9d80193e4 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_manifest.c @@ -0,0 +1,1240 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include +#include +/* for ia_css_kernel_bitmap_t, ia_css_kernel_bitmap_print */ +#include + +#include +#include "ia_css_psys_program_group_private.h" +#include "ia_css_psys_static_trace.h" + +#include +#include + +size_t ia_css_sizeof_program_manifest( + const uint8_t program_dependency_count, + const uint8_t terminal_dependency_count) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_sizeof_program_manifest(): enter:\n"); + + size += sizeof(ia_css_program_manifest_t); + size += program_dependency_count * sizeof(uint8_t); + size += terminal_dependency_count * sizeof(uint8_t); + size = ceil_mul(size, sizeof(uint64_t)); + + return size; +} + +bool ia_css_has_program_manifest_fixed_cell( + const ia_css_program_manifest_t *manifest) +{ + bool has_fixed_cell = false; + + vied_nci_cell_ID_t cell_id; + vied_nci_cell_type_ID_t cell_type_id; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_has_program_manifest_fixed_cell(): enter:\n"); + + verifexit(manifest != NULL); + + cell_id = ia_css_program_manifest_get_cell_ID(manifest); + cell_type_id = ia_css_program_manifest_get_cell_type_ID(manifest); + + has_fixed_cell = ((cell_id != VIED_NCI_N_CELL_ID) && + (cell_type_id == VIED_NCI_N_CELL_TYPE_ID)); + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_has_program_manifest_fixed_cell invalid argument\n"); + } + return has_fixed_cell; +} + +size_t ia_css_program_manifest_get_size( + const ia_css_program_manifest_t *manifest) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_size(): enter:\n"); + + if (manifest != NULL) { + size = manifest->size; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_size invalid argument\n"); + } + + return size; +} + +ia_css_program_ID_t ia_css_program_manifest_get_program_ID( + const ia_css_program_manifest_t *manifest) +{ + ia_css_program_ID_t program_id = IA_CSS_PROGRAM_INVALID_ID; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_program_ID(): enter:\n"); + + if (manifest != NULL) { + program_id = manifest->ID; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_program_ID invalid argument\n"); + } + return program_id; +} + +int ia_css_program_manifest_set_program_ID( + ia_css_program_manifest_t *manifest, + ia_css_program_ID_t id) +{ + int ret = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_program_ID(): enter:\n"); + + if (manifest != NULL) { + manifest->ID = id; + ret = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_program_ID failed (%i)\n", ret); + } + return ret; +} + +ia_css_program_group_manifest_t *ia_css_program_manifest_get_parent( + const ia_css_program_manifest_t *manifest) +{ + ia_css_program_group_manifest_t *parent = NULL; + char *base; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_parent(): enter:\n"); + + verifexit(manifest != NULL); + + base = (char *)((char *)manifest + manifest->parent_offset); + + parent = (ia_css_program_group_manifest_t *) (base); +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_parent invalid argument\n"); + } + return parent; +} + +int ia_css_program_manifest_set_parent_offset( + ia_css_program_manifest_t *manifest, + int32_t program_offset) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_parent_offset(): enter:\n"); + + verifexit(manifest != NULL); + + /* parent is at negative offset away from current program offset*/ + manifest->parent_offset = -program_offset; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_parent_offset failed (%i)\n", + retval); + } + return retval; +} + +ia_css_program_type_t ia_css_program_manifest_get_type( + const ia_css_program_manifest_t *manifest) +{ + ia_css_program_type_t program_type = IA_CSS_N_PROGRAM_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_type(): enter:\n"); + + if (manifest != NULL) { + program_type = manifest->program_type; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_type invalid argument\n"); + } + return program_type; +} + +int ia_css_program_manifest_set_type( + ia_css_program_manifest_t *manifest, + const ia_css_program_type_t program_type) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_type(): enter:\n"); + + if (manifest != NULL) { + manifest->program_type = program_type; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_type failed (%i)\n", retval); + } + return retval; +} + +ia_css_kernel_bitmap_t ia_css_program_manifest_get_kernel_bitmap( + const ia_css_program_manifest_t *manifest) +{ + ia_css_kernel_bitmap_t kernel_bitmap = ia_css_kernel_bitmap_clear(); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_kernel_bitmap(): enter:\n"); + + if (manifest != NULL) { + kernel_bitmap = manifest->kernel_bitmap; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_kernel_bitmap invalid argument\n"); + } + return kernel_bitmap; +} + +int ia_css_program_manifest_set_kernel_bitmap( + ia_css_program_manifest_t *manifest, + const ia_css_kernel_bitmap_t kernel_bitmap) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_kernel_bitmap(): enter:\n"); + + if (manifest != NULL) { + manifest->kernel_bitmap = kernel_bitmap; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_kernel_bitmap failed (%i)\n", + retval); + } + return retval; +} + +vied_nci_cell_ID_t ia_css_program_manifest_get_cell_ID( + const ia_css_program_manifest_t *manifest) +{ + vied_nci_cell_ID_t cell_id = VIED_NCI_N_CELL_ID; +#if IA_CSS_PROCESS_MAX_CELLS > 1 + int i = 0; +#endif /* IA_CSS_PROCESS_MAX_CELLS > 1 */ + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_cell_ID(): enter:\n"); + + verifexit(manifest != NULL); + +#if IA_CSS_PROCESS_MAX_CELLS == 1 + cell_id = manifest->cell_id; +#else + for (i = 1; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + assert(VIED_NCI_N_CELL_ID == manifest->cells[i]); +#ifdef __HIVECC +#pragma hivecc unroll +#endif + } + cell_id = manifest->cells[0]; +#endif /* IA_CSS_PROCESS_MAX_CELLS == 1 */ +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_cell_ID invalid argument\n"); + } + return cell_id; +} + +int ia_css_program_manifest_set_cell_ID( + ia_css_program_manifest_t *manifest, + const vied_nci_cell_ID_t cell_id) +{ + int retval = -1; +#if IA_CSS_PROCESS_MAX_CELLS > 1 + int i = 0; +#endif /* IA_CSS_PROCESS_MAX_CELLS > 1 */ + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_cell_ID(): enter:\n"); + if (manifest != NULL) { +#if IA_CSS_PROCESS_MAX_CELLS == 1 + manifest->cell_id = cell_id; +#else + manifest->cells[0] = cell_id; + for (i = 1; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + manifest->cells[i] = VIED_NCI_N_CELL_ID; + } +#endif /* IA_CSS_PROCESS_MAX_CELLS == 1 */ + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_cell_ID failed (%i)\n", retval); + } + return retval; +} + +vied_nci_cell_type_ID_t ia_css_program_manifest_get_cell_type_ID( + const ia_css_program_manifest_t *manifest) +{ + vied_nci_cell_type_ID_t cell_type_id = VIED_NCI_N_CELL_TYPE_ID; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_cell_type_ID(): enter:\n"); + + verifexit(manifest != NULL); + + cell_type_id = (vied_nci_cell_type_ID_t)(manifest->cell_type_id); +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_cell_type_ID invalid argument\n"); + } + return cell_type_id; +} + +int ia_css_program_manifest_set_cell_type_ID( + ia_css_program_manifest_t *manifest, + const vied_nci_cell_type_ID_t cell_type_id) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_cell_type_ID(): enter:\n"); + if (manifest != NULL) { + manifest->cell_type_id = cell_type_id; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_cell_type_ID failed (%i)\n", + retval); + } + return retval; +} + +vied_nci_resource_size_t ia_css_program_manifest_get_int_mem_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id) +{ + vied_nci_resource_size_t int_mem_size = 0; + vied_nci_cell_type_ID_t cell_type_id; + int mem_index; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_int_mem_size(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(mem_type_id < VIED_NCI_N_MEM_TYPE_ID); + + if (ia_css_has_program_manifest_fixed_cell(manifest)) { + vied_nci_cell_ID_t cell_id = + ia_css_program_manifest_get_cell_ID(manifest); + + cell_type_id = vied_nci_cell_get_type(cell_id); + } else { + cell_type_id = + ia_css_program_manifest_get_cell_type_ID(manifest); + } + + /* loop over vied_nci_cell_mem_type to verify mem_type_id for a + * specific cell_type_id + */ + for (mem_index = 0; mem_index < VIED_NCI_N_MEM_TYPE_ID; mem_index++) { + if ((int)mem_type_id == + (int)vied_nci_cell_type_get_mem_type( + cell_type_id, mem_index)) { + int_mem_size = manifest->int_mem_size[mem_index]; + } + } + +EXIT: + if (NULL == manifest || mem_type_id >= VIED_NCI_N_MEM_TYPE_ID) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_int_mem_size invalid argument\n"); + } + return int_mem_size; +} + +int ia_css_program_manifest_set_cells_bitmap( + ia_css_program_manifest_t *manifest, + const vied_nci_resource_bitmap_t bitmap) +{ + int retval = -1; + int array_index = 0; + int bit_index; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_cells_bitmap(): enter:\n"); + + if (manifest != NULL) { + for (bit_index = 0; bit_index < VIED_NCI_N_CELL_ID; bit_index++) { + if (vied_nci_is_bit_set_in_bitmap(bitmap, bit_index)) { + verifexit(array_index < IA_CSS_PROCESS_MAX_CELLS); +#if IA_CSS_PROCESS_MAX_CELLS == 1 + manifest->cell_id = (vied_nci_cell_ID_t)bit_index; +#else + manifest->cells[array_index] = (vied_nci_cell_ID_t)bit_index; +#endif /* IA_CSS_PROCESS_MAX_CELLS == 1 */ + array_index++; + } + } + for (; array_index < IA_CSS_PROCESS_MAX_CELLS; array_index++) { +#if IA_CSS_PROCESS_MAX_CELLS == 1 + manifest->cell_id = VIED_NCI_N_CELL_ID; +#else + manifest->cells[array_index] = VIED_NCI_N_CELL_ID; +#endif /* IA_CSS_PROCESS_MAX_CELLS */ + } + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_cells_bitmap invalid argument\n"); + } +EXIT: + return retval; +} + +vied_nci_resource_bitmap_t ia_css_program_manifest_get_cells_bitmap( + const ia_css_program_manifest_t *manifest) +{ + vied_nci_resource_bitmap_t bitmap = 0; +#if IA_CSS_PROCESS_MAX_CELLS > 1 + int i = 0; +#endif /* IA_CSS_PROCESS_MAX_CELLS > 1 */ + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_cells_bitmap(): enter:\n"); + + verifexit(manifest != NULL); + +#if IA_CSS_PROCESS_MAX_CELLS == 1 + bitmap = (1 << manifest->cell_id); +#else + for (i = 0; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + if (VIED_NCI_N_CELL_ID != manifest->cells[i]) { + bitmap |= (1 << manifest->cells[i]); + } +#ifdef __HIVECC +#pragma hivecc unroll +#endif + } +#endif /* IA_CSS_PROCESS_MAX_CELLS == 1 */ +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_cells_bitmap invalid argument\n"); + } + return bitmap; +} + +int ia_css_program_manifest_set_dfm_port_bitmap( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id, + const vied_nci_resource_bitmap_t bitmap) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_dfm_port_bitmap(): enter:\n"); + + verifexit(manifest != NULL); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_type_id < VIED_NCI_N_DEV_DFM_ID); + manifest->dfm_port_bitmap[dfm_type_id] = bitmap; +#else + (void)bitmap; + (void)dfm_type_id; +#endif + retval = 0; + +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_dfm_port_bitmap invalid argument\n"); + } + return retval; +} + +int ia_css_program_manifest_set_dfm_active_port_bitmap( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id, + const vied_nci_resource_bitmap_t bitmap) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_dfm_active_port_bitmap(): enter:\n"); + + verifexit(manifest != NULL); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_type_id < VIED_NCI_N_DEV_DFM_ID); + manifest->dfm_active_port_bitmap[dfm_type_id] = bitmap; +#else + (void)bitmap; + (void)dfm_type_id; +#endif + retval = 0; + +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_dfm_active_port_bitmap invalid argument\n"); + } + return retval; +} + +int ia_css_program_manifest_set_is_dfm_relocatable( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id, + const uint8_t is_relocatable) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_is_dfm_relocatable(): enter:\n"); + + verifexit(manifest != NULL); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_type_id < VIED_NCI_N_DEV_DFM_ID); + manifest->is_dfm_relocatable[dfm_type_id] = is_relocatable; +#else + (void)is_relocatable; + (void)dfm_type_id; +#endif + retval = 0; + + EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_is_dfm_relocatable invalid argument\n"); + } + + return retval; +} + +uint8_t ia_css_program_manifest_get_is_dfm_relocatable( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id) +{ + uint8_t ret = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_is_dfm_relocatable(): enter:\n"); + + verifexit(manifest != NULL); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_type_id < VIED_NCI_N_DEV_DFM_ID); + ret = manifest->is_dfm_relocatable[dfm_type_id]; +#else + ret = 0; + (void)dfm_type_id; +#endif +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_is_dfm_relocatable invalid argument\n"); + } + return ret; +} + +vied_nci_resource_bitmap_t ia_css_program_manifest_get_dfm_port_bitmap( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id) +{ + vied_nci_resource_bitmap_t bitmap = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_dfm_port_bitmap(): enter:\n"); + + verifexit(manifest != NULL); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_type_id < VIED_NCI_N_DEV_DFM_ID); + bitmap = manifest->dfm_port_bitmap[dfm_type_id]; +#else + bitmap = 0; + (void)dfm_type_id; +#endif +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_dfm_port_bitmap invalid argument\n"); + } + return bitmap; +} + +vied_nci_resource_bitmap_t ia_css_program_manifest_get_dfm_active_port_bitmap( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id) +{ + vied_nci_resource_bitmap_t bitmap = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_dfm_active_port_bitmap(): enter:\n"); + + verifexit(manifest != NULL); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_type_id < VIED_NCI_N_DEV_DFM_ID); + bitmap = manifest->dfm_active_port_bitmap[dfm_type_id]; +#else + bitmap = 0; + (void)dfm_type_id; +#endif +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_dfm_active_port_bitmap invalid argument\n"); + } + return bitmap; +} + +int ia_css_program_manifest_set_int_mem_size( + ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t int_mem_size) +{ + int retval = -1; + vied_nci_cell_type_ID_t cell_type_id; + int mem_index; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_int_mem_size(): enter:\n"); + + if (ia_css_has_program_manifest_fixed_cell(manifest)) { + vied_nci_cell_ID_t cell_id = + ia_css_program_manifest_get_cell_ID(manifest); + + cell_type_id = vied_nci_cell_get_type(cell_id); + } else { + cell_type_id = + ia_css_program_manifest_get_cell_type_ID(manifest); + } + + if (manifest != NULL && mem_type_id < VIED_NCI_N_MEM_TYPE_ID) { + /* loop over vied_nci_cell_mem_type to verify mem_type_id for + * a specific cell_type_id + */ + for (mem_index = 0; mem_index < VIED_NCI_N_MEM_TYPE_ID; + mem_index++) { + if ((int)mem_type_id == + (int)vied_nci_cell_type_get_mem_type( + cell_type_id, mem_index)) { + manifest->int_mem_size[mem_index] = + int_mem_size; + retval = 0; + } + } + } + if (retval != 0) { + IA_CSS_TRACE_2(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_int_mem_size cell_type_id %d has no mem_type_id %d\n", + (int)cell_type_id, (int)mem_type_id); + } + + return retval; +} + +vied_nci_resource_size_t ia_css_program_manifest_get_ext_mem_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id) +{ + vied_nci_resource_size_t ext_mem_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_ext_mem_size(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(mem_type_id < VIED_NCI_N_DATA_MEM_TYPE_ID); + + ext_mem_size = manifest->ext_mem_size[mem_type_id]; +EXIT: + if (NULL == manifest || mem_type_id >= VIED_NCI_N_DATA_MEM_TYPE_ID) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_ext_mem_size invalid argument\n"); + } + return ext_mem_size; +} + +vied_nci_resource_size_t ia_css_program_manifest_get_ext_mem_offset( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id) +{ + vied_nci_resource_size_t ext_mem_offset = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_ext_mem_offset(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(mem_type_id < VIED_NCI_N_DATA_MEM_TYPE_ID); + + ext_mem_offset = manifest->ext_mem_offset[mem_type_id]; +EXIT: + if (NULL == manifest || mem_type_id >= VIED_NCI_N_DATA_MEM_TYPE_ID) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_ext_mem_offset invalid argument\n"); + } + return ext_mem_offset; +} + +int ia_css_program_manifest_set_ext_mem_size( + ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t ext_mem_size) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_ext_mem_size(): enter:\n"); + + if (manifest != NULL && mem_type_id < VIED_NCI_N_DATA_MEM_TYPE_ID) { + manifest->ext_mem_size[mem_type_id] = ext_mem_size; + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_ext_mem_size invalid argument\n"); + } + + return retval; +} + +int ia_css_program_manifest_set_ext_mem_offset( + ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t ext_mem_offset) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_ext_mem_offset(): enter:\n"); + + if (manifest != NULL && mem_type_id < VIED_NCI_N_DATA_MEM_TYPE_ID) { + manifest->ext_mem_offset[mem_type_id] = ext_mem_offset; + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_ext_mem_offset invalid argument\n"); + } + + return retval; +} + +vied_nci_resource_size_t ia_css_program_manifest_get_dev_chn_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id) +{ + vied_nci_resource_size_t dev_chn_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_dev_chn_size(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(dev_chn_id < VIED_NCI_N_DEV_CHN_ID); + + dev_chn_size = manifest->dev_chn_size[dev_chn_id]; +EXIT: + if (NULL == manifest || dev_chn_id >= VIED_NCI_N_DEV_CHN_ID) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_dev_chn_size invalid argument\n"); + } + return dev_chn_size; +} + +vied_nci_resource_size_t ia_css_program_manifest_get_dev_chn_offset( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id) +{ + vied_nci_resource_size_t dev_chn_offset = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_dev_chn_offset(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(dev_chn_id < VIED_NCI_N_DEV_CHN_ID); + + dev_chn_offset = manifest->dev_chn_offset[dev_chn_id]; +EXIT: + if (NULL == manifest || dev_chn_id >= VIED_NCI_N_DEV_CHN_ID) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_dev_chn_offset invalid argument\n"); + } + return dev_chn_offset; +} + +int ia_css_program_manifest_set_dev_chn_size( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id, + const vied_nci_resource_size_t dev_chn_size) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_dev_chn_size(): enter:\n"); + + if (manifest != NULL && dev_chn_id < VIED_NCI_N_DEV_CHN_ID) { + manifest->dev_chn_size[dev_chn_id] = dev_chn_size; + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_dev_chn_size invalid argument\n"); + } + + return retval; +} + +int ia_css_program_manifest_set_dev_chn_offset( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id, + const vied_nci_resource_size_t dev_chn_offset) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_dev_chn_offset(): enter:\n"); + + if (manifest != NULL && dev_chn_id < VIED_NCI_N_DEV_CHN_ID) { + manifest->dev_chn_offset[dev_chn_id] = dev_chn_offset; + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_dev_chn_offset invalid argument\n"); + } + + return retval; +} + +uint8_t ia_css_program_manifest_get_program_dependency_count( + const ia_css_program_manifest_t *manifest) +{ + uint8_t program_dependency_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_program_dependency_count(): enter:\n"); + + if (manifest != NULL) { + program_dependency_count = manifest->program_dependency_count; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_program_dependency_count invalid argument\n"); + } + return program_dependency_count; +} + +uint8_t ia_css_program_manifest_get_program_dependency( + const ia_css_program_manifest_t *manifest, + const unsigned int index) +{ + uint8_t program_dependency = IA_CSS_PROGRAM_INVALID_DEPENDENCY; + uint8_t *program_dep_ptr; + uint8_t program_dependency_count; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_program_dependency(): enter:\n"); + + program_dependency_count = + ia_css_program_manifest_get_program_dependency_count(manifest); + + if (index < program_dependency_count) { + program_dep_ptr = + (uint8_t *)((uint8_t *)manifest + + manifest->program_dependency_offset + + index * sizeof(uint8_t)); + program_dependency = *program_dep_ptr; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_program_dependency invalid argument\n"); + } + return program_dependency; +} + +int ia_css_program_manifest_set_program_dependency( + ia_css_program_manifest_t *manifest, + const uint8_t program_dependency, + const unsigned int index) +{ + int retval = -1; + uint8_t *program_dep_ptr; + uint8_t program_dependency_count; + uint8_t program_count; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_program_dependency(): enter:\n"); + + program_dependency_count = + ia_css_program_manifest_get_program_dependency_count(manifest); + program_count = + ia_css_program_group_manifest_get_program_count( + ia_css_program_manifest_get_parent(manifest)); + + if ((index < program_dependency_count) && + (program_dependency < program_count)) { + program_dep_ptr = (uint8_t *)((uint8_t *)manifest + + manifest->program_dependency_offset + + index*sizeof(uint8_t)); + *program_dep_ptr = program_dependency; + retval = 0; + } + + if (retval != 0) { + IA_CSS_TRACE_3(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_program_dependency(m, %d, %d) failed (%i)\n", + program_dependency, index, retval); + } + return retval; +} + +uint8_t ia_css_program_manifest_get_terminal_dependency_count( + const ia_css_program_manifest_t *manifest) +{ + uint8_t terminal_dependency_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_terminal_dependency_count(): enter:\n"); + + if (manifest != NULL) { + terminal_dependency_count = manifest->terminal_dependency_count; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_terminal_dependency_count invalid argument\n"); + } + return terminal_dependency_count; +} + +uint8_t ia_css_program_manifest_get_terminal_dependency( + const ia_css_program_manifest_t *manifest, + const unsigned int index) +{ + uint8_t terminal_dependency = IA_CSS_PROGRAM_INVALID_DEPENDENCY; + uint8_t *terminal_dep_ptr; + uint8_t terminal_dependency_count = + ia_css_program_manifest_get_terminal_dependency_count(manifest); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_terminal_dependency(): enter:\n"); + + if (index < terminal_dependency_count) { + terminal_dep_ptr = (uint8_t *)((uint8_t *)manifest + + manifest->terminal_dependency_offset + index); + terminal_dependency = *terminal_dep_ptr; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_terminal_dependency invalid argument\n"); + } + return terminal_dependency; +} + +int ia_css_program_manifest_set_terminal_dependency( + ia_css_program_manifest_t *manifest, + const uint8_t terminal_dependency, + const unsigned int index) +{ + int retval = -1; + uint8_t *terminal_dep_ptr; + uint8_t terminal_dependency_count = + ia_css_program_manifest_get_terminal_dependency_count(manifest); + uint8_t terminal_count = + ia_css_program_group_manifest_get_terminal_count( + ia_css_program_manifest_get_parent(manifest)); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_terminal_dependency(): enter:\n"); + + if ((index < terminal_dependency_count) && + (terminal_dependency < terminal_count)) { + terminal_dep_ptr = (uint8_t *)((uint8_t *)manifest + + manifest->terminal_dependency_offset + index); + *terminal_dep_ptr = terminal_dependency; + retval = 0; + } + + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_terminal_dependency failed (%i)\n", + retval); + } + return retval; +} + +bool ia_css_is_program_manifest_subnode_program_type( + const ia_css_program_manifest_t *manifest) +{ + ia_css_program_type_t program_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_program_manifest_subnode_program_type(): enter:\n"); + + program_type = ia_css_program_manifest_get_type(manifest); +/* The error return is the limit value, so no need to check on the manifest + * pointer + */ + return (program_type == IA_CSS_PROGRAM_TYPE_PARALLEL_SUB) || + (program_type == IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB) || + (program_type == IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB); +} + +bool ia_css_is_program_manifest_supernode_program_type( + const ia_css_program_manifest_t *manifest) +{ + ia_css_program_type_t program_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_program_manifest_supernode_program_type(): enter:\n"); + + program_type = ia_css_program_manifest_get_type(manifest); + +/* The error return is the limit value, so no need to check on the manifest + * pointer + */ + return (program_type == IA_CSS_PROGRAM_TYPE_PARALLEL_SUPER) || + (program_type == IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER) || + (program_type == IA_CSS_PROGRAM_TYPE_VIRTUAL_SUPER); +} + +bool ia_css_is_program_manifest_singular_program_type( + const ia_css_program_manifest_t *manifest) +{ + ia_css_program_type_t program_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_program_manifest_singular_program_type(): enter:\n"); + + program_type = ia_css_program_manifest_get_type(manifest); + +/* The error return is the limit value, so no need to check on the manifest + * pointer + */ + return (program_type == IA_CSS_PROGRAM_TYPE_SINGULAR); +} + +void ia_css_program_manifest_init( + ia_css_program_manifest_t *blob, + const uint8_t program_dependency_count, + const uint8_t terminal_dependency_count) +{ + IA_CSS_TRACE_0(PSYSAPI_STATIC, INFO, + "ia_css_program_manifest_init(): enter:\n"); + + /*TODO: add assert*/ + if (!blob) + return; + + blob->ID = 1; + blob->program_dependency_count = program_dependency_count; + blob->terminal_dependency_count = terminal_dependency_count; + blob->program_dependency_offset = sizeof(ia_css_program_manifest_t); + blob->terminal_dependency_offset = blob->program_dependency_offset + + sizeof(uint8_t) * program_dependency_count; + blob->size = + (uint16_t)ia_css_sizeof_program_manifest( + program_dependency_count, + terminal_dependency_count); +} + +/* We need to refactor those files in order to build in the firmware only + what is needed, switches are put current to workaround compilation problems + in the firmware (for example lack of uint64_t support) + supported in the firmware + */ +#if !defined(__HIVECC) + +#if defined(_MSC_VER) +/* WA for a visual studio compiler bug, refer to + developercommunity.visualstudio.com/content/problem/209359/ice-with-fpfast-in-156-and-msvc-daily-1413263051-p.html +*/ +#pragma optimize("", off) +#endif + +int ia_css_program_manifest_print( + const ia_css_program_manifest_t *manifest, + void *fid) +{ + int retval = -1; + int i, mem_index, dev_chn_index; + + vied_nci_cell_type_ID_t cell_type_id; + uint8_t program_dependency_count; + uint8_t terminal_dependency_count; + ia_css_kernel_bitmap_t bitmap; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, INFO, + "ia_css_program_manifest_print(): enter:\n"); + + verifexit(manifest != NULL); + NOT_USED(fid); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "sizeof(manifest) = %d\n", + (int)ia_css_program_manifest_get_size(manifest)); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "program ID = %d\n", + (int)ia_css_program_manifest_get_program_ID(manifest)); + + bitmap = ia_css_program_manifest_get_kernel_bitmap(manifest); + verifexit(ia_css_kernel_bitmap_print(bitmap, fid) == 0); + + if (ia_css_has_program_manifest_fixed_cell(manifest)) { + vied_nci_cell_ID_t cell_id = + ia_css_program_manifest_get_cell_ID(manifest); + + cell_type_id = vied_nci_cell_get_type(cell_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "cell(program) = %d\n", + (int)cell_id); + } else { + cell_type_id = + ia_css_program_manifest_get_cell_type_ID(manifest); + } + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "cell type(program) = %d\n", + (int)cell_type_id); + + for (mem_index = 0; mem_index < (int)VIED_NCI_N_MEM_TYPE_ID; + mem_index++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(internal mem) type = %d\n", + (int)vied_nci_cell_type_get_mem_type(cell_type_id, mem_index)); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(internal mem) size = %d\n", + manifest->int_mem_size[mem_index]); + } + + for (mem_index = 0; mem_index < (int)VIED_NCI_N_DATA_MEM_TYPE_ID; + mem_index++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(external mem) type = %d\n", + (int)(vied_nci_mem_type_ID_t)mem_index); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(external mem) size = %d\n", + manifest->ext_mem_size[mem_index]); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(external mem) offset = %d\n", + manifest->ext_mem_offset[mem_index]); + } + + for (dev_chn_index = 0; dev_chn_index < (int)VIED_NCI_N_DEV_CHN_ID; + dev_chn_index++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(device channel) type = %d\n", + (int)dev_chn_index); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(device channel) size = %d\n", + manifest->dev_chn_size[dev_chn_index]); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(device channel) offset = %d\n", + manifest->dev_chn_offset[dev_chn_index]); + } +#if HAS_DFM + for (dev_chn_index = 0; dev_chn_index < (int)VIED_NCI_N_DEV_DFM_ID; + dev_chn_index++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(dfm port) type = %d\n", + (int)dev_chn_index); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(dfm port) port_bitmap = %d\n", + manifest->dfm_port_bitmap[dev_chn_index]); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(dfm port) active_port_bitmap = %d\n", + manifest->dfm_active_port_bitmap[dev_chn_index]); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(dfm port) is_dfm_relocatable = %d\n", + manifest->is_dfm_relocatable[dev_chn_index]); + } +#endif + +#if IA_CSS_PROCESS_MAX_CELLS == 1 + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(cells) bitmap = %d\n", + manifest->cell_id); +#else + for (i = 0; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(cells) bitmap = %d\n", + manifest->cells[i]); + } +#endif /* IA_CSS_PROCESS_MAX_CELLS == 1 */ + program_dependency_count = + ia_css_program_manifest_get_program_dependency_count(manifest); + if (program_dependency_count == 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "program_dependencies[%d] {};\n", + program_dependency_count); + } else { + uint8_t prog_dep; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "program_dependencies[%d] {\n", + program_dependency_count); + for (i = 0; i < (int)program_dependency_count - 1; i++) { + prog_dep = + ia_css_program_manifest_get_program_dependency( + manifest, i); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t %4d,\n", prog_dep); + } + prog_dep = + ia_css_program_manifest_get_program_dependency(manifest, i); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "\t %4d }\n", prog_dep); + (void)prog_dep; + } + + terminal_dependency_count = + ia_css_program_manifest_get_terminal_dependency_count(manifest); + if (terminal_dependency_count == 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "terminal_dependencies[%d] {};\n", + terminal_dependency_count); + } else { + uint8_t term_dep; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "terminal_dependencies[%d] {\n", + terminal_dependency_count); + for (i = 0; i < (int)terminal_dependency_count - 1; i++) { + term_dep = + ia_css_program_manifest_get_terminal_dependency( + manifest, i); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t %4d,\n", term_dep); + } + term_dep = + ia_css_program_manifest_get_terminal_dependency(manifest, i); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "\t %4d }\n", term_dep); + (void)term_dep; + } + (void)cell_type_id; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_print failed (%i)\n", retval); + } + return retval; +} + +#if defined(_MSC_VER) +/* WA for a visual studio compiler bug */ +#pragma optimize("", off) +#endif + +#endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_terminal_manifest.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_terminal_manifest.c new file mode 100644 index 0000000000000..c890b8a71f2c0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_terminal_manifest.c @@ -0,0 +1,1137 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include + +/* Data object types on the terminals */ +#include +/* for ia_css_kernel_bitmap_t, ia_css_kernel_bitmap_clear, ia_css_... */ +#include + +#include "ia_css_psys_program_group_private.h" +#include "ia_css_terminal_manifest.h" +#include "ia_css_terminal_manifest_types.h" + +#include +#include +#include +#include "ia_css_psys_static_trace.h" + +/* We need to refactor those files in order to build in the firmware only + what is needed, switches are put current to workaround compilation problems + in the firmware (for example lack of uint64_t support) + supported in the firmware + */ +#if !defined(__HIVECC) +static const char *terminal_type_strings[IA_CSS_N_TERMINAL_TYPES + 1] = { + "IA_CSS_TERMINAL_TYPE_DATA_IN", + "IA_CSS_TERMINAL_TYPE_DATA_OUT", + "IA_CSS_TERMINAL_TYPE_PARAM_STREAM", + /**< Type 1-5 parameter input */ + "IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN", + /**< Type 1-5 parameter output */ + "IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT", + /**< Represent the new type of terminal for + * the "spatial dependent parameters", when params go in + */ + "IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN", + /**< Represent the new type of terminal for + * the "spatial dependent parameters", when params go out + */ + "IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT", + /**< Represent the new type of terminal for + * the explicit slicing, when params go in + */ + "IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN", + /**< Represent the new type of terminal for + * the explicit slicing, when params go out + */ + "IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT", + /**< State (private data) input */ + "IA_CSS_TERMINAL_TYPE_STATE_IN", + /**< State (private data) output */ + "IA_CSS_TERMINAL_TYPE_STATE_OUT", + "IA_CSS_TERMINAL_TYPE_PROGRAM", + "IA_CSS_TERMINAL_TYPR_PROGRAM_CONTROL_INIT", + "UNDEFINED_TERMINAL_TYPE"}; + +#endif + +bool ia_css_is_terminal_manifest_spatial_parameter_terminal( + const ia_css_terminal_manifest_t *manifest) +{ + ia_css_terminal_type_t terminal_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_terminal_manifest_parameter_terminal(): enter:\n"); + + terminal_type = ia_css_terminal_manifest_get_type(manifest); + + return ((terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN) || + (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT)); +} + +bool ia_css_is_terminal_manifest_program_terminal( + const ia_css_terminal_manifest_t *manifest) +{ + ia_css_terminal_type_t terminal_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_terminal_manifest_parameter_terminal(): enter:\n"); + + terminal_type = ia_css_terminal_manifest_get_type(manifest); + + return (terminal_type == IA_CSS_TERMINAL_TYPE_PROGRAM); +} + +bool ia_css_is_terminal_manifest_program_control_init_terminal( + const ia_css_terminal_manifest_t *manifest) +{ + ia_css_terminal_type_t terminal_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_terminal_manifest_program_control_init_terminal(): enter:\n"); + + terminal_type = ia_css_terminal_manifest_get_type(manifest); + + return (terminal_type == IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT); +} + + +bool ia_css_is_terminal_manifest_parameter_terminal( + const ia_css_terminal_manifest_t *manifest) +{ + /* will return an error value on error */ + ia_css_terminal_type_t terminal_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_terminal_manifest_parameter_terminal(): enter:\n"); + + terminal_type = ia_css_terminal_manifest_get_type(manifest); + + return (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT); +} + +bool ia_css_is_terminal_manifest_data_terminal( + const ia_css_terminal_manifest_t *manifest) +{ + /* will return an error value on error */ + ia_css_terminal_type_t terminal_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_terminal_manifest_data_terminal(): enter:\n"); + + terminal_type = ia_css_terminal_manifest_get_type(manifest); + + return ((terminal_type == IA_CSS_TERMINAL_TYPE_DATA_IN) || + (terminal_type == IA_CSS_TERMINAL_TYPE_DATA_OUT)); +} + +bool ia_css_is_terminal_manifest_sliced_terminal( + const ia_css_terminal_manifest_t *manifest) +{ + ia_css_terminal_type_t terminal_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_terminal_manifest_sliced_terminal(): enter:\n"); + + terminal_type = ia_css_terminal_manifest_get_type(manifest); + + return ((terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN) || + (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT)); +} + +size_t ia_css_terminal_manifest_get_size( + const ia_css_terminal_manifest_t *manifest) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_terminal_manifest_get_size(): enter:\n"); + + if (manifest != NULL) { + size = manifest->size; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_terminal_manifest_get_size: invalid argument\n"); + } + return size; +} + +ia_css_terminal_type_t ia_css_terminal_manifest_get_type( + const ia_css_terminal_manifest_t *manifest) +{ + ia_css_terminal_type_t terminal_type = IA_CSS_N_TERMINAL_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_terminal_manifest_get_type(): enter:\n"); + + if (manifest != NULL) { + terminal_type = manifest->terminal_type; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_terminal_manifest_get_type: invalid argument\n"); + } + return terminal_type; +} + +int ia_css_terminal_manifest_set_type( + ia_css_terminal_manifest_t *manifest, + const ia_css_terminal_type_t terminal_type) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_terminal_manifest_set_type(): enter:\n"); + + if (manifest != NULL) { + manifest->terminal_type = terminal_type; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_terminal_manifest_set_type failed (%i)\n", + retval); + } + return retval; +} + +int ia_css_terminal_manifest_set_ID( + ia_css_terminal_manifest_t *manifest, + const ia_css_terminal_ID_t ID) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_terminal_manifest_set_ID(): enter:\n"); + + if (manifest != NULL) { + manifest->ID = ID; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_terminal_manifest_set_ID failed (%i)\n", + retval); + } + return retval; +} + +ia_css_terminal_ID_t ia_css_terminal_manifest_get_ID( + const ia_css_terminal_manifest_t *manifest) +{ + ia_css_terminal_ID_t retval; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_terminal_manifest_get_ID(): enter:\n"); + + if (manifest != NULL) { + retval = manifest->ID; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_terminal_manifest_get_ID failed\n"); + retval = IA_CSS_TERMINAL_INVALID_ID; + } + return retval; +} + +ia_css_program_group_manifest_t *ia_css_terminal_manifest_get_parent( + const ia_css_terminal_manifest_t *manifest) +{ + ia_css_program_group_manifest_t *parent = NULL; + char *base; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_terminal_manifest_get_parent(): enter:\n"); + + verifexit(manifest != NULL); + + base = (char *)((char *)manifest + manifest->parent_offset); + + parent = (ia_css_program_group_manifest_t *)(base); +EXIT: + return parent; +} + +int ia_css_terminal_manifest_set_parent_offset( + ia_css_terminal_manifest_t *manifest, + int32_t terminal_offset) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_terminal_manifest_set_parent_offset(): enter:\n"); + + verifexit(manifest != NULL); + + /* parent is at negative offset away from current terminal offset*/ + manifest->parent_offset = -terminal_offset; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_terminal_manifest_set_parent_offset failed (%i)\n", + retval); + } + return retval; +} + +ia_css_frame_format_bitmap_t +ia_css_data_terminal_manifest_get_frame_format_bitmap( + const ia_css_data_terminal_manifest_t *manifest) +{ + ia_css_frame_format_bitmap_t bitmap = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_frame_format_bitmap(): enter:\n"); + + if (manifest != NULL) { + bitmap = manifest->frame_format_bitmap; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_data_terminal_manifest_get_frame_format_bitmap invalid argument\n"); + } + return bitmap; +} + +int ia_css_data_terminal_manifest_set_frame_format_bitmap( + ia_css_data_terminal_manifest_t *manifest, + ia_css_frame_format_bitmap_t bitmap) +{ + int ret = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_frame_format_bitmap(): enter:\n"); + + if (manifest != NULL) { + manifest->frame_format_bitmap = bitmap; + ret = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_frame_format_bitmap failed (%i)\n", + ret); + } + + return ret; +} + +bool ia_css_data_terminal_manifest_can_support_compression( + const ia_css_data_terminal_manifest_t *manifest) +{ + bool compression_support = false; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_compression_support(): enter:\n"); + + if (manifest != NULL) { + /* compression_support is used boolean encoded in uint8_t. + * So we only need to check + * if this is non-zero + */ + compression_support = (manifest->compression_support != 0); + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_can_support_compression invalid argument\n"); + } + + return compression_support; +} + +int ia_css_data_terminal_manifest_set_compression_support( + ia_css_data_terminal_manifest_t *manifest, + bool compression_support) +{ + int ret = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_compression_support(): enter:\n"); + + if (manifest != NULL) { + manifest->compression_support = + (compression_support == true) ? 1 : 0; + ret = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_compression_support failed (%i)\n", + ret); + } + + return ret; +} + +ia_css_connection_bitmap_t ia_css_data_terminal_manifest_get_connection_bitmap( + const ia_css_data_terminal_manifest_t *manifest) +{ + ia_css_connection_bitmap_t connection_bitmap = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_connection_bitmap(): enter:\n"); + + if (manifest != NULL) { + connection_bitmap = manifest->connection_bitmap; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_data_terminal_manifest_get_connection_bitmap invalid argument\n"); + } + return connection_bitmap; +} + +int ia_css_data_terminal_manifest_set_connection_bitmap( + ia_css_data_terminal_manifest_t *manifest, ia_css_connection_bitmap_t bitmap) +{ + int ret = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_connection_bitmap(): enter:\n"); + + if (manifest != NULL) { + assert(bitmap != 0); /* zero means there is no connection, this is invalid. */ + assert((bitmap >> IA_CSS_N_CONNECTION_TYPES) == 0); + + manifest->connection_bitmap = bitmap; + ret = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_data_terminal_manifest_set_connection_bitmap invalid argument\n"); + } + return ret; +} + +/* We need to refactor those files in order to build in the firmware only + what is needed, switches are put current to workaround compilation problems + in the firmware (for example lack of uint64_t support) + supported in the firmware + */ +#if !defined(__HIVECC) +ia_css_kernel_bitmap_t ia_css_data_terminal_manifest_get_kernel_bitmap( + const ia_css_data_terminal_manifest_t *manifest) +{ + ia_css_kernel_bitmap_t kernel_bitmap = ia_css_kernel_bitmap_clear(); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_kernel_bitmap(): enter:\n"); + + if (manifest != NULL) { + kernel_bitmap = manifest->kernel_bitmap; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_data_terminal_manifest_get_kernel_bitmap: invalid argument\n"); + } + return kernel_bitmap; +} + +int ia_css_data_terminal_manifest_set_kernel_bitmap( + ia_css_data_terminal_manifest_t *manifest, + const ia_css_kernel_bitmap_t kernel_bitmap) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_kernel_bitmap(): enter:\n"); + + if (manifest != NULL) { + manifest->kernel_bitmap = kernel_bitmap; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_kernel_bitmap: failed (%i)\n", + retval); + } + + return retval; +} + +int ia_css_data_terminal_manifest_set_kernel_bitmap_unique( + ia_css_data_terminal_manifest_t *manifest, + const unsigned int index) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_kernel_bitmap_unique(): enter:\n"); + + if (manifest != NULL) { + ia_css_kernel_bitmap_t kernel_bitmap = + ia_css_kernel_bitmap_clear(); + + kernel_bitmap = ia_css_kernel_bitmap_set(kernel_bitmap, index); + verifexit(!ia_css_is_kernel_bitmap_empty(kernel_bitmap)); + verifexit(ia_css_data_terminal_manifest_set_kernel_bitmap( + manifest, kernel_bitmap) == 0); + retval = 0; + } + +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_kernel_bitmap_unique failed (%i)\n", + retval); + } + return retval; +} +#endif + +int ia_css_data_terminal_manifest_set_min_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t min_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_min_size(): enter:\n"); + + verifexit(manifest != NULL); + + manifest->min_size[IA_CSS_COL_DIMENSION] = + min_size[IA_CSS_COL_DIMENSION]; + manifest->min_size[IA_CSS_ROW_DIMENSION] = + min_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_min_size: invalid argument\n"); + } + return retval; +} + +int ia_css_data_terminal_manifest_set_max_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t max_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_max_size(): enter:\n"); + + verifexit(manifest != NULL); + + manifest->max_size[IA_CSS_COL_DIMENSION] = + max_size[IA_CSS_COL_DIMENSION]; + manifest->max_size[IA_CSS_ROW_DIMENSION] = + max_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_max_size: invalid argument\n"); + } + return retval; +} + +int ia_css_data_terminal_manifest_get_min_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t min_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_min_size(): enter:\n"); + + verifexit(manifest != NULL); + + min_size[IA_CSS_COL_DIMENSION] = + manifest->min_size[IA_CSS_COL_DIMENSION]; + min_size[IA_CSS_ROW_DIMENSION] = + manifest->min_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_get_min_size: invalid argument\n"); + } + return retval; +} + +int ia_css_data_terminal_manifest_get_max_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t max_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_max_size(): enter:\n"); + + verifexit(manifest != NULL); + + max_size[IA_CSS_COL_DIMENSION] = + manifest->max_size[IA_CSS_COL_DIMENSION]; + max_size[IA_CSS_ROW_DIMENSION] = + manifest->max_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_get_max_size: invalid argument\n"); + } + return retval; +} + +int ia_css_data_terminal_manifest_set_min_fragment_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t min_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_min_fragment_size(): enter:\n"); + + verifexit(manifest != NULL); + + manifest->min_fragment_size[IA_CSS_COL_DIMENSION] = + min_size[IA_CSS_COL_DIMENSION]; + manifest->min_fragment_size[IA_CSS_ROW_DIMENSION] = + min_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_min_fragment_size invalid argument\n"); + } + return retval; +} + +int ia_css_data_terminal_manifest_set_max_fragment_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t max_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_max_fragment_size(): enter:\n"); + + verifexit(manifest != NULL); + + manifest->max_fragment_size[IA_CSS_COL_DIMENSION] = + max_size[IA_CSS_COL_DIMENSION]; + manifest->max_fragment_size[IA_CSS_ROW_DIMENSION] = + max_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_max_fragment_size invalid argument\n"); + } + return retval; +} + +int ia_css_data_terminal_manifest_get_min_fragment_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t min_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_min_fragment_size(): enter:\n"); + + verifexit(manifest != NULL); + + min_size[IA_CSS_COL_DIMENSION] = + manifest->min_fragment_size[IA_CSS_COL_DIMENSION]; + min_size[IA_CSS_ROW_DIMENSION] = + manifest->min_fragment_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_get_min_fragment_size invalid argument\n"); + } + return retval; +} + +int ia_css_data_terminal_manifest_get_max_fragment_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t max_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_max_fragment_size(): enter:\n"); + + verifexit(manifest != NULL); + + max_size[IA_CSS_COL_DIMENSION] = + manifest->max_fragment_size[IA_CSS_COL_DIMENSION]; + max_size[IA_CSS_ROW_DIMENSION] = + manifest->max_fragment_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_get_max_fragment_size invalid argument\n"); + } + return retval; +} + +/* We need to refactor those files in order to build in the firmware only + what is needed, switches are put current to workaround compilation problems + in the firmware (for example lack of uint64_t support) + supported in the firmware + */ +#if !defined(__HIVECC) + +#define PRINT_DIMENSION(name, var) IA_CSS_TRACE_3(PSYSAPI_STATIC, \ + INFO, "%s:\t%d %d\n", \ + (name), \ + (var)[IA_CSS_COL_DIMENSION], \ + (var)[IA_CSS_ROW_DIMENSION]) + +int ia_css_terminal_manifest_print( + const ia_css_terminal_manifest_t *manifest, + void *fid) +{ + int retval = -1; + ia_css_terminal_type_t terminal_type = + ia_css_terminal_manifest_get_type(manifest); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, INFO, + "ia_css_terminal_manifest_print(): enter:\n"); + + verifexit(manifest != NULL); + NOT_USED(fid); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "sizeof(manifest) = %d\n", + (int)ia_css_terminal_manifest_get_size(manifest)); + + PRINT("typeof(manifest) = %s\n", terminal_type_strings[terminal_type]); + + if (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT) { + ia_css_param_terminal_manifest_t *pterminal_manifest = + (ia_css_param_terminal_manifest_t *)manifest; + uint16_t section_count = + pterminal_manifest->param_manifest_section_desc_count; + int i; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "sections(manifest) = %d\n", (int)section_count); + for (i = 0; i < section_count; i++) { + const ia_css_param_manifest_section_desc_t *manifest = + ia_css_param_terminal_manifest_get_prm_sct_desc( + pterminal_manifest, i); + verifjmpexit(manifest != NULL); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "kernel_id = %d\n", (int)manifest->kernel_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "mem_type_id = %d\n", + (int)manifest->mem_type_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "max_mem_size = %d\n", + (int)manifest->max_mem_size); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "region_id = %d\n", + (int)manifest->region_id); + } + } else if (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT) { + ia_css_sliced_param_terminal_manifest_t + *sliced_terminal_manifest = + (ia_css_sliced_param_terminal_manifest_t *)manifest; + uint32_t kernel_id; + uint16_t section_count; + uint16_t section_idx; + + kernel_id = sliced_terminal_manifest->kernel_id; + section_count = + sliced_terminal_manifest->sliced_param_section_count; + + NOT_USED(kernel_id); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "kernel_id = %d\n", (int)kernel_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "section_count = %d\n", (int)section_count); + + for (section_idx = 0; section_idx < section_count; + section_idx++) { + ia_css_sliced_param_manifest_section_desc_t + *sliced_param_manifest_section_desc; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "section %d\n", (int)section_idx); + sliced_param_manifest_section_desc = + ia_css_sliced_param_terminal_manifest_get_sliced_prm_sct_desc( + sliced_terminal_manifest, section_idx); + verifjmpexit(sliced_param_manifest_section_desc != + NULL); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "mem_type_id = %d\n", + (int)sliced_param_manifest_section_desc->mem_type_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "region_id = %d\n", + (int)sliced_param_manifest_section_desc->region_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "max_mem_size = %d\n", + (int)sliced_param_manifest_section_desc->max_mem_size); + } + } else if (terminal_type == IA_CSS_TERMINAL_TYPE_PROGRAM) { + ia_css_program_terminal_manifest_t *program_terminal_manifest = + (ia_css_program_terminal_manifest_t *)manifest; + uint32_t sequencer_info_kernel_id; + uint16_t max_kernel_fragment_sequencer_command_desc; + uint16_t kernel_fragment_sequencer_info_manifest_info_count; + uint16_t seq_info_idx; + + sequencer_info_kernel_id = + program_terminal_manifest->sequencer_info_kernel_id; + max_kernel_fragment_sequencer_command_desc = + program_terminal_manifest-> + max_kernel_fragment_sequencer_command_desc; + kernel_fragment_sequencer_info_manifest_info_count = + program_terminal_manifest-> + kernel_fragment_sequencer_info_manifest_info_count; + + NOT_USED(sequencer_info_kernel_id); + NOT_USED(max_kernel_fragment_sequencer_command_desc); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "sequencer_info_kernel_id = %d\n", + (int)sequencer_info_kernel_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "max_kernel_fragment_sequencer_command_desc = %d\n", + (int)max_kernel_fragment_sequencer_command_desc); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "kernel_fragment_sequencer_info_manifest_info_count = %d\n", + (int) + kernel_fragment_sequencer_info_manifest_info_count); + + for (seq_info_idx = 0; seq_info_idx < + kernel_fragment_sequencer_info_manifest_info_count; + seq_info_idx++) { + ia_css_kernel_fragment_sequencer_info_manifest_desc_t + *sequencer_info_manifest_desc; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "sequencer info %d\n", (int)seq_info_idx); + sequencer_info_manifest_desc = + ia_css_program_terminal_manifest_get_kernel_frgmnt_seq_info_desc + (program_terminal_manifest, seq_info_idx); + verifjmpexit(sequencer_info_manifest_desc != NULL); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "min_fragment_grid_slice_dimension[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + min_fragment_grid_slice_dimension[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + min_fragment_grid_slice_dimension[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "max_fragment_grid_slice_dimension[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + max_fragment_grid_slice_dimension[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + max_fragment_grid_slice_dimension[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "min_fragment_grid_slice_count[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + min_fragment_grid_slice_count[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + min_fragment_grid_slice_count[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "max_fragment_grid_slice_count[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + max_fragment_grid_slice_count[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + max_fragment_grid_slice_count[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "min_fragment_grid_point_decimation_factor[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + min_fragment_grid_point_decimation_factor[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + min_fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "max_fragment_grid_point_decimation_factor[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + max_fragment_grid_point_decimation_factor[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + max_fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "min_fragment_grid_overlay_on_pixel_topleft_index[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + min_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + min_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "max_fragment_grid_overlay_on_pixel_topleft_index[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + max_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + max_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "min_fragment_grid_overlay_on_pixel_dimension[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + min_fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + min_fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "max_fragment_grid_overlay_on_pixel_dimension[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + max_fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + max_fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION]); + } + } else if (terminal_type == IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT) { + ia_css_program_control_init_terminal_manifest_t *progctrlinit_man = + (ia_css_program_control_init_terminal_manifest_t *)manifest; + ia_css_program_control_init_terminal_manifest_print(progctrlinit_man); + } else if (terminal_type == IA_CSS_TERMINAL_TYPE_DATA_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_DATA_OUT) { + + ia_css_data_terminal_manifest_t *dterminal_manifest = + (ia_css_data_terminal_manifest_t *)manifest; + int i; + + NOT_USED(dterminal_manifest); + + verifexit(ia_css_kernel_bitmap_print( + ia_css_data_terminal_manifest_get_kernel_bitmap( + dterminal_manifest), fid) == 0); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "formats(manifest) = %04x\n", + (int)ia_css_data_terminal_manifest_get_frame_format_bitmap( + dterminal_manifest)); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "connection(manifest) = %04x\n", + (int)ia_css_data_terminal_manifest_get_connection_bitmap( + dterminal_manifest)); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "dependent(manifest) = %d\n", + (int)dterminal_manifest->terminal_dependency); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\tmin_size[%d] = {\n", + IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d,\n", dterminal_manifest->min_size[i]); + } + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d }\n", dterminal_manifest->min_size[i]); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\tmax_size[%d] = {\n", IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d,\n", dterminal_manifest->max_size[i]); + } + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d }\n", dterminal_manifest->max_size[i]); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\tmin_fragment_size[%d] = {\n", + IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d,\n", + dterminal_manifest->min_fragment_size[i]); + } + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d }\n", + dterminal_manifest->min_fragment_size[i]); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\tmax_fragment_size[%d] = {\n", + IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d,\n", + dterminal_manifest->max_fragment_size[i]); + } + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d }\n", + dterminal_manifest->max_fragment_size[i]); + + } else if (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT) { + + ia_css_spatial_param_terminal_manifest_t *stm = + (ia_css_spatial_param_terminal_manifest_t *)manifest; + ia_css_frame_grid_param_manifest_section_desc_t *sec; + int sec_count = + stm->frame_grid_param_manifest_section_desc_count; + ia_css_fragment_grid_manifest_desc_t *fragd = + &stm->common_fragment_grid_desc; + ia_css_frame_grid_manifest_desc_t *framed = + &stm->frame_grid_desc; + int sec_index; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "kernel_id:\t\t%d\n", + stm->kernel_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "compute_units_p_elem:\t%d\n", + stm->compute_units_p_elem); + + PRINT_DIMENSION("min_fragment_grid_dimension", + fragd->min_fragment_grid_dimension); + PRINT_DIMENSION("max_fragment_grid_dimension", + fragd->max_fragment_grid_dimension); + PRINT_DIMENSION("min_frame_grid_dimension", + framed->min_frame_grid_dimension); + PRINT_DIMENSION("max_frame_grid_dimension", + framed->max_frame_grid_dimension); + + NOT_USED(framed); + NOT_USED(fragd); + + for (sec_index = 0; sec_index < sec_count; sec_index++) { + sec = ia_css_spatial_param_terminal_manifest_get_frm_grid_prm_sct_desc( + stm, sec_index); + verifjmpexit(sec != NULL); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, INFO, "--------------------------\n"); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "\tmem_type_id:\t%d\n", + sec->mem_type_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "\tregion_id:\t%d\n", + sec->region_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "\telem_size:\t%d\n", + sec->elem_size); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "\tmax_mem_size:\t%d\n", + sec->max_mem_size); + } + } else if (terminal_type < IA_CSS_N_TERMINAL_TYPES) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "terminal type can not be pretty printed, not supported\n"); + } + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_terminal_manifest_print failed (%i)\n", + retval); + } + return retval; +} + +/* Program control init Terminal */ +unsigned int ia_css_program_control_init_terminal_manifest_get_connect_section_count( + const ia_css_program_control_init_manifest_program_desc_t *prog) +{ + assert(prog); + return prog->connect_section_count; +} + + +unsigned int ia_css_program_control_init_terminal_manifest_get_load_section_count( + const ia_css_program_control_init_manifest_program_desc_t *prog) +{ + assert(prog); + return prog->load_section_count; +} + +unsigned int ia_css_program_control_init_terminal_manifest_get_size( + const uint16_t nof_programs, + const uint16_t *nof_load_sections, + const uint16_t *nof_connect_sections) +{ + (void)nof_load_sections; /* might be needed in future */ + (void)nof_connect_sections; /* might be needed in future */ + + return sizeof(ia_css_program_control_init_terminal_manifest_t) + + nof_programs * + sizeof(ia_css_program_control_init_manifest_program_desc_t); +} + +ia_css_program_control_init_manifest_program_desc_t * +ia_css_program_control_init_terminal_manifest_get_program_desc( + const ia_css_program_control_init_terminal_manifest_t *terminal, + unsigned int program) +{ + ia_css_program_control_init_manifest_program_desc_t *progs; + + assert(terminal != NULL); + assert(program < terminal->program_count); + + progs = (ia_css_program_control_init_manifest_program_desc_t *) + ((const char *)terminal + terminal->program_desc_offset); + + return &progs[program]; +} + +int ia_css_program_control_init_terminal_manifest_init( + ia_css_program_control_init_terminal_manifest_t *terminal, + const uint16_t nof_programs, + const uint16_t *nof_load_sections, + const uint16_t *nof_connect_sections) +{ + unsigned int i; + ia_css_program_control_init_manifest_program_desc_t *progs; + + if (terminal == NULL) { + return -EFAULT; + } + + terminal->program_count = nof_programs; + terminal->program_desc_offset = + sizeof(ia_css_program_control_init_terminal_manifest_t); + + progs = ia_css_program_control_init_terminal_manifest_get_program_desc( + terminal, 0); + + for (i = 0; i < nof_programs; i++) { + progs[i].load_section_count = nof_load_sections[i]; + progs[i].connect_section_count = nof_connect_sections[i]; + } + return 0; +} + +void ia_css_program_control_init_terminal_manifest_print( + ia_css_program_control_init_terminal_manifest_t *terminal) +{ + unsigned int i; + + ia_css_program_control_init_manifest_program_desc_t *progs; + + progs = ia_css_program_control_init_terminal_manifest_get_program_desc( + terminal, 0); + + assert(progs); + (void)progs; + + for (i = 0; i < terminal->program_count; i++) { + IA_CSS_TRACE_3(PSYSAPI_STATIC, INFO, + "program index: %d, load sec: %d, connect sec: %d\n", + i, + progs[i].load_section_count, + progs[i].connect_section_count); + } +} + +#endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/reg_dump/src/psys/bxtB0_gen_reg_dump/ia_css_debug_dump.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/reg_dump/src/psys/bxtB0_gen_reg_dump/ia_css_debug_dump.c new file mode 100644 index 0000000000000..c51d65c8cb647 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/reg_dump/src/psys/bxtB0_gen_reg_dump/ia_css_debug_dump.c @@ -0,0 +1,15 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +#include "ia_css_debug_dump.h" + void ia_css_debug_dump(void) {} \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/reg_dump/src/psys/bxtB0_gen_reg_dump/ia_css_debug_dump.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/reg_dump/src/psys/bxtB0_gen_reg_dump/ia_css_debug_dump.h new file mode 100644 index 0000000000000..5dd23ddbd180b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/reg_dump/src/psys/bxtB0_gen_reg_dump/ia_css_debug_dump.h @@ -0,0 +1,17 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +#ifndef __IA_CSS_DEBUG_DUMP_H_ + #define __IA_CSS_DEBUG_DUMP_H_ + void ia_css_debug_dump(void); + #endif /* __IA_CSS_DEBUG_DUMP_H_ */ \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/reg_dump/src/reg_dump_generic_bridge.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/reg_dump/src/reg_dump_generic_bridge.c new file mode 100644 index 0000000000000..9b9161ae78cf2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/reg_dump/src/reg_dump_generic_bridge.c @@ -0,0 +1,39 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include +#include "ia_css_trace.h" +#ifdef USE_LOGICAL_SSIDS +/* + Logical names can be used to define the SSID + In order to resolve these names the following include file should be provided + and the define above should be enabled +*/ +#include +#endif + +#define REG_DUMP_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +#define REG_DUMP_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_ENABLED + +/* SSID value is defined in test makefiles as either isys0 or psys0 */ +#define REG_DUMP_READ_REGISTER(addr) vied_subsystem_load_32(SSID, addr) + +#define REG_DUMP_PRINT_0(...) \ +EXPAND_VA_ARGS(IA_CSS_TRACE_0(REG_DUMP, VERBOSE, __VA_ARGS__)) +#define REG_DUMP_PRINT_1(...) \ +EXPAND_VA_ARGS(IA_CSS_TRACE_1(REG_DUMP, VERBOSE, __VA_ARGS__)) +#define EXPAND_VA_ARGS(x) x + +/* Including generated source code for reg_dump */ +#include "ia_css_debug_dump.c" diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/regmem/interface/regmem_access.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/regmem/interface/regmem_access.h new file mode 100644 index 0000000000000..d4576af936f6d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/regmem/interface/regmem_access.h @@ -0,0 +1,67 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __REGMEM_ACCESS_H +#define __REGMEM_ACCESS_H + +#include "storage_class.h" + +enum regmem_id { + /* pass pkg_dir address to SPC in non-secure mode */ + PKG_DIR_ADDR_REG = 0, + /* pass syscom configuration to SPC */ + SYSCOM_CONFIG_REG = 1, + /* syscom state - modified by SP */ + SYSCOM_STATE_REG = 2, + /* syscom commands - modified by the host */ + SYSCOM_COMMAND_REG = 3, + /* Store interrupt status - updated by SP */ + SYSCOM_IRQ_REG = 4, + /* Store VTL0_ADDR_MASK in trusted secure regision - provided by host.*/ + SYSCOM_VTL0_ADDR_MASK = 5, +#if HAS_DUAL_CMD_CTX_SUPPORT + /* Initialized if trustlet exists - updated by host */ + TRUSTLET_STATUS = 6, + /* identify if SPC access blocker programming is completed - updated by SP */ + AB_SPC_STATUS = 7, + /* first syscom queue pointer register */ + SYSCOM_QPR_BASE_REG = 8 +#else + /* first syscom queue pointer register */ + SYSCOM_QPR_BASE_REG = 6 +#endif +}; + +#if HAS_DUAL_CMD_CTX_SUPPORT +/* Bit 0: for untrusted non-secure DRV driver on VTL0 + * Bit 1: for trusted secure TEE driver on VTL1 + */ +#define SYSCOM_IRQ_VTL0_MASK 0x1 +#define SYSCOM_IRQ_VTL1_MASK 0x2 +#endif + +STORAGE_CLASS_INLINE unsigned int +regmem_load_32(unsigned int mem_address, unsigned int reg, unsigned int ssid); + +STORAGE_CLASS_INLINE void +regmem_store_32(unsigned int mem_address, unsigned int reg, unsigned int value, + unsigned int ssid); + +#ifdef __VIED_CELL +#include "regmem_access_cell.h" +#else +#include "regmem_access_host.h" +#endif + +#endif /* __REGMEM_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/regmem/regmem.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/regmem/regmem.mk new file mode 100644 index 0000000000000..24ebc1c325d8e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/regmem/regmem.mk @@ -0,0 +1,32 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +ifndef REGMEM_MK +REGMEM_MK=1 + +# MODULE is REGMEM + +REGMEM_DIR=$${MODULES_DIR}/regmem + +REGMEM_INTERFACE=$(REGMEM_DIR)/interface +REGMEM_SOURCES=$(REGMEM_DIR)/src + +REGMEM_HOST_FILES = +REGMEM_FW_FILES = $(REGMEM_SOURCES)/regmem.c + +REGMEM_CPPFLAGS = -I$(REGMEM_INTERFACE) -I$(REGMEM_SOURCES) +REGMEM_HOST_CPPFLAGS = $(REGMEM_CPPFLAGS) +REGMEM_FW_CPPFLAGS = $(REGMEM_CPPFLAGS) + +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/regmem/src/regmem_access_host.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/regmem/src/regmem_access_host.h new file mode 100644 index 0000000000000..8878d7074fabb --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/regmem/src/regmem_access_host.h @@ -0,0 +1,41 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __REGMEM_ACCESS_HOST_H +#define __REGMEM_ACCESS_HOST_H + +#include "regmem_access.h" /* implemented interface */ + +#include "storage_class.h" +#include "regmem_const.h" +#include +#include "ia_css_cmem.h" + +STORAGE_CLASS_INLINE unsigned int +regmem_load_32(unsigned int mem_addr, unsigned int reg, unsigned int ssid) +{ + /* No need to add REGMEM_OFFSET, it is already included in mem_addr. */ + return ia_css_cmem_load_32(ssid, mem_addr + (REGMEM_WORD_BYTES*reg)); +} + +STORAGE_CLASS_INLINE void +regmem_store_32(unsigned int mem_addr, unsigned int reg, + unsigned int value, unsigned int ssid) +{ + /* No need to add REGMEM_OFFSET, it is already included in mem_addr. */ + ia_css_cmem_store_32(ssid, mem_addr + (REGMEM_WORD_BYTES*reg), + value); +} + +#endif /* __REGMEM_ACCESS_HOST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/regmem/src/regmem_const.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/regmem/src/regmem_const.h new file mode 100644 index 0000000000000..ac7e3a98a434f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/regmem/src/regmem_const.h @@ -0,0 +1,28 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __REGMEM_CONST_H +#define __REGMEM_CONST_H + +#ifndef REGMEM_SIZE +#define REGMEM_SIZE (16) +#endif /* REGMEM_SIZE */ +#ifndef REGMEM_OFFSET +#define REGMEM_OFFSET (0) +#endif /* REGMEM_OFFSET */ +#ifndef REGMEM_WORD_BYTES +#define REGMEM_WORD_BYTES (4) +#endif + +#endif /* __REGMEM_CONST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm.h new file mode 100644 index 0000000000000..4a04a98903264 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm.h @@ -0,0 +1,173 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_RBM_H +#define __IA_CSS_RBM_H + +#include "ia_css_rbm_storage_class.h" +#include + +#define IA_CSS_RBM_BITS 64 +/** An element is a 32 bit unsigned integer. 64 bit integers might cause + * problems in the compiler. + */ +#define IA_CSS_RBM_ELEM_TYPE uint32_t +#define IA_CSS_RBM_ELEM_BITS \ + (sizeof(IA_CSS_RBM_ELEM_TYPE)*8) +#define IA_CSS_RBM_NOF_ELEMS \ + ((IA_CSS_RBM_BITS) / (IA_CSS_RBM_ELEM_BITS)) + +/** Users should make no assumption about the actual type of + * ia_css_rbm_t. + */ +typedef struct { + IA_CSS_RBM_ELEM_TYPE data[IA_CSS_RBM_NOF_ELEMS]; +} ia_css_rbm_elems_t; +typedef ia_css_rbm_elems_t ia_css_rbm_t; + +/** Print the bits of a routing bitmap + * @return < 0 on error + */ +IA_CSS_RBM_STORAGE_CLASS_H +int ia_css_rbm_print( + const ia_css_rbm_t bitmap, + void *fid); + +/** Create an empty routing bitmap + * @return bitmap = 0 + */ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_clear(void); + +/** Creates the complement of a routing bitmap + * @param bitmap[in] routing bitmap + * @return ~bitmap + */ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_complement( + const ia_css_rbm_t bitmap); + +/** Create the union of two routing bitmaps + * @param bitmap0[in] routing bitmap 0 + * @param bitmap1[in] routing bitmap 1 + * @return bitmap0 | bitmap1 + */ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_union( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1); + +/** Create the intersection of two routing bitmaps + * @param bitmap0[in] routing bitmap 0 + * @param bitmap1[in] routing bitmap 1 + * @return bitmap0 & bitmap1 + */ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_intersection( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1); + +/** Check if the routing bitmaps is empty + * @param bitmap[in] routing bitmap + * @return bitmap == 0 + */ +IA_CSS_RBM_STORAGE_CLASS_H +bool ia_css_is_rbm_empty( + const ia_css_rbm_t bitmap); + +/** Check if the intersection of two routing bitmaps is empty + * @param bitmap0[in] routing bitmap 0 + * @param bitmap1[in] routing bitmap 1 + * @return (bitmap0 & bitmap1) == 0 + */ +IA_CSS_RBM_STORAGE_CLASS_H +bool ia_css_is_rbm_intersection_empty( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1); + +/** Check if the second routing bitmap is a subset of the first (or equal) + * @param bitmap0[in] routing bitmap 0 + * @param bitmap1[in routing bitmap 1 + * Note: An empty set is always a subset, this function + * returns true if bitmap 1 is empty + * @return (bitmap0 & bitmap1) == bitmap1 + */ +IA_CSS_RBM_STORAGE_CLASS_H +bool ia_css_is_rbm_subset( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1); + +/** Check if the routing bitmaps are equal + * @param bitmap0[in] routing bitmap 0 + * @param bitmap1[in] routing bitmap 1 + * @return bitmap0 == bitmap1 + */ +IA_CSS_RBM_STORAGE_CLASS_H +bool ia_css_is_rbm_equal( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1); + +/** Checks whether a specific kernel bit is set + * @return bitmap[index] == 1 + */ +IA_CSS_RBM_STORAGE_CLASS_H +int ia_css_is_rbm_set( + const ia_css_rbm_t bitmap, + const unsigned int index); + +/** Create the union of a routing bitmap with a onehot bitmap + * with a bit set at index + * @return bitmap[index] |= 1 +*/ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_set( + const ia_css_rbm_t bitmap, + const unsigned int index); + +/** Creates routing bitmap using a uint64 value. + * @return bitmap with the same bits set as in value (provided that width of bitmap is sufficient). + */ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_create_from_uint64( + const uint64_t value); + +/** Converts an ia_css_rbm_t type to uint64_t. Note that if + * ia_css_rbm_t contains more then 64 bits, only the lowest 64 bits + * are returned. + * @return uint64_t representation of value + */ +IA_CSS_RBM_STORAGE_CLASS_H +uint64_t ia_css_rbm_to_uint64( + const ia_css_rbm_t value); + +/** Creates a routing bitmap with the bit at index 'index' removed. + * @return ~(1 << index) & bitmap + */ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_unset( + const ia_css_rbm_t bitmap, + const unsigned int index); + +/** Create a onehot routing bitmap with a bit set at index + * @return bitmap[index] = 1 + */ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_bit_mask( + const unsigned int index); + +#ifdef __IA_CSS_RBM_INLINE__ +#include "ia_css_rbm_impl.h" +#endif /* __IA_CSS_RBM_INLINE__ */ + +#endif /* __IA_CSS_RBM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_manifest.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_manifest.h new file mode 100644 index 0000000000000..f497a7de90a93 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_manifest.h @@ -0,0 +1,133 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_RBM_MANIFEST_H +#define __IA_CSS_RBM_MANIFEST_H + +#include "type_support.h" +#include "ia_css_rbm_manifest_types.h" + +/** Returns the descriptor size of the RBM manifest. + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +unsigned int +ia_css_rbm_manifest_get_size(void); + +/** Initializes the RBM manifest. + * @param rbm[in] Routing bitmap. + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +void +ia_css_rbm_manifest_init(struct ia_css_rbm_manifest_s *rbm); + +/** Returns a pointer to the array of mux descriptors. + * @param manifest[in] Routing bitmap manifest. + * @return NULL on error + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +ia_css_rbm_mux_desc_t * +ia_css_rbm_manifest_get_muxes(const ia_css_rbm_manifest_t *manifest); + +/** Returns the size of mux descriptors array. + * @param manifest[in] Routing bitmap manifest. + * @return size + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +unsigned int +ia_css_rbm_manifest_get_mux_count(const ia_css_rbm_manifest_t *manifest); + +/** Returns a pointer to the array of validation descriptors. + * @param manifest[in] Routing bitmap manifest. + * @return NULL on error + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +ia_css_rbm_validation_rule_t * +ia_css_rbm_manifest_get_validation_rules(const ia_css_rbm_manifest_t *manifest); + +/** Returns the size of the validation descriptor array. + * @param manifest[in] Routing bitmap manifest. + * @return size + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +unsigned int +ia_css_rbm_manifest_get_validation_rule_count(const ia_css_rbm_manifest_t *manifest); + +/** Returns a pointer to the array of terminal routing descriptors. + * @param manifest[in] Routing bitmap manifest. + * @return NULL on error + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +ia_css_rbm_terminal_routing_desc_t * +ia_css_rbm_manifest_get_terminal_routing_desc(const ia_css_rbm_manifest_t *manifest); + +/** \brief Returns the size of the terminal routing descriptor array. + * Note: pretty printing differs from on host and on IPU. + * @param manifest[in] Routing bitmap manifest. + * @return size + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +unsigned int +ia_css_rbm_manifest_get_terminal_routing_desc_count(const ia_css_rbm_manifest_t *manifest); + +/** Pretty prints the routing bitmap manifest. + * @param manifest[in] Routing bitmap manifest. + */ +void +ia_css_rbm_manifest_print(const ia_css_rbm_manifest_t *manifest); + +/** \brief Pretty prints a RBM (routing bitmap). + * Note: pretty printing differs from on host and on IPU. + * @param rbm[in] Routing bitmap. + * @param mux[in] List of mux descriptors corresponding to rbm. + * @param mux_desc_count[in] Number of muxes in list mux. + */ +void +ia_css_rbm_pretty_print( + const ia_css_rbm_t *rbm, + const ia_css_rbm_mux_desc_t *mux, + unsigned int mux_desc_count); + +/** \brief check for the validity of a routing bitmap. + * @param manifest[in] Routing bitmap manifest. + * @param rbm[in] Routing bitmap + * @return true on match. + */ +bool +ia_css_rbm_manifest_check_rbm_validity( + const ia_css_rbm_manifest_t *manifest, + const ia_css_rbm_t *rbm); + +/** \brief sets, using manifest info, the value of a mux in the routing bitmap. + * @param rbm[in] Routing bitmap. + * @param mux[in] List of mux descriptors corresponding to rbm. + * @param mux_count[in] Number of muxes in list mux. + * @param gp_dev_id[in] ID of sub system (PSA/ISA) where the mux is located. + * @param mux_id[in] ID of mux to set configuration for. + * @param value[in] Value of the mux. + * @return routing bitmap. + */ +ia_css_rbm_t +ia_css_rbm_set_mux( + ia_css_rbm_t rbm, + ia_css_rbm_mux_desc_t *mux, + unsigned int mux_count, + unsigned int gp_dev_id, + unsigned int mux_id, + unsigned int value); + +#ifdef __IA_CSS_RBM_MANIFEST_INLINE__ +#include "ia_css_rbm_manifest_impl.h" +#endif /* __IA_CSS_RBM_MANIFEST_INLINE__ */ + +#endif /* __IA_CSS_RBM_MANIFEST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_manifest_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_manifest_types.h new file mode 100644 index 0000000000000..ade20446b9f64 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_manifest_types.h @@ -0,0 +1,95 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_RBM_MANIFEST_TYPES_H +#define __IA_CSS_RBM_MANIFEST_TYPES_H + +#include "ia_css_rbm.h" +#include "vied_nci_psys_resource_model.h" + +#ifndef VIED_NCI_RBM_MAX_MUX_COUNT +#error Please define VIED_NCI_RBM_MAX_MUX_COUNT +#endif +#ifndef VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT +#error Please define VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT +#endif +#ifndef VIED_NCI_RBM_MAX_TERMINAL_DESC_COUNT +#error Please define VIED_NCI_RBM_MAX_TERMINAL_DESC_COUNT +#endif +#ifndef N_PADDING_UINT8_IN_RBM_MANIFEST +#error Please define N_PADDING_UINT8_IN_RBM_MANIFEST +#endif + +#define SIZE_OF_RBM_MUX_DESC_S ( \ + (4 * IA_CSS_UINT8_T_BITS)) + +typedef struct ia_css_rbm_mux_desc_s { + uint8_t gp_dev_id; + uint8_t mux_id; + uint8_t offset; + uint8_t size_bits; +} ia_css_rbm_mux_desc_t; + +#define SIZE_OF_RBM_VALIDATION_RULE_DESC_S ( \ + (2 * IA_CSS_RBM_BITS) \ + + (1 * IA_CSS_UINT32_T_BITS)) + +typedef struct ia_css_rbm_validation_rule_s { + ia_css_rbm_t match; /* RBM is an array of 32 bit elements */ + ia_css_rbm_t mask; + uint32_t expected_value; +} ia_css_rbm_validation_rule_t; + +#define SIZE_OF_RBM_TERMINAL_ROUTING_DESC_S ( \ + (4 * IA_CSS_UINT8_T_BITS)) + +typedef struct ia_css_rbm_terminal_routing_desc_s { + uint8_t terminal_id; + uint8_t connection_state; + uint8_t mux_id; + uint8_t state; +} ia_css_rbm_terminal_routing_desc_t; + +#define SIZE_OF_RBM_MANIFEST_S ( \ + (VIED_NCI_RBM_MAX_MUX_COUNT * SIZE_OF_RBM_MUX_DESC_S) \ + + (VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT * SIZE_OF_RBM_VALIDATION_RULE_DESC_S) \ + + (VIED_NCI_RBM_MAX_TERMINAL_DESC_COUNT * SIZE_OF_RBM_TERMINAL_ROUTING_DESC_S) \ + + (3 * IA_CSS_UINT16_T_BITS) \ + + (N_PADDING_UINT8_IN_RBM_MANIFEST * IA_CSS_UINT8_T_BITS)) + +typedef struct ia_css_rbm_manifest_s { +#if VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT > 0 + ia_css_rbm_validation_rule_t + validation_rules[VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT]; +#endif + uint16_t mux_desc_count; + uint16_t validation_rule_count; + uint16_t terminal_routing_desc_count; + +#if VIED_NCI_RBM_MAX_MUX_COUNT > 0 + ia_css_rbm_mux_desc_t + mux_desc[VIED_NCI_RBM_MAX_MUX_COUNT]; +#endif + +#if VIED_NCI_RBM_MAX_TERMINAL_DESC_COUNT > 0 + ia_css_rbm_terminal_routing_desc_t + terminal_routing_desc[VIED_NCI_RBM_MAX_TERMINAL_DESC_COUNT]; +#endif + +#if N_PADDING_UINT8_IN_RBM_MANIFEST > 0 + uint8_t padding[N_PADDING_UINT8_IN_RBM_MANIFEST]; +#endif +} ia_css_rbm_manifest_t; + +#endif /* __IA_CSS_RBM_MANIFEST_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_storage_class.h new file mode 100644 index 0000000000000..9548e9a9fabbc --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_storage_class.h @@ -0,0 +1,36 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_RBM_STORAGE_CLASS_H +#define __IA_CSS_RBM_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __IA_CSS_RBM_INLINE__ +#define IA_CSS_RBM_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_RBM_STORAGE_CLASS_C +#else +#define IA_CSS_RBM_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_RBM_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#ifndef __IA_CSS_RBM_MANIFEST_INLINE__ +#define IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +#else +#define IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_RBM_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_trace.h new file mode 100644 index 0000000000000..dd060323da5c2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_trace.h @@ -0,0 +1,77 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_RBM_TRACE_H +#define __IA_CSS_RBM_TRACE_H + +#include "ia_css_trace.h" + +/* Not using 0 to identify wrong configuration being passed from the .mk file outside. +* Log levels not in the range below will cause a "No RBM_TRACE_CONFIG Tracing level defined" +*/ +#define RBM_TRACE_LOG_LEVEL_OFF 1 +#define RBM_TRACE_LOG_LEVEL_NORMAL 2 +#define RBM_TRACE_LOG_LEVEL_DEBUG 3 + +#define RBM_TRACE_CONFIG_DEFAULT RBM_TRACE_LOG_LEVEL_NORMAL + +#if !defined(RBM_TRACE_CONFIG) +# define RBM_TRACE_CONFIG RBM_TRACE_CONFIG_DEFAULT +#endif + +/* IPU_RESOURCE Module tracing backend is mapped to TUNIT tracing for target platforms */ +#ifdef __HIVECC +# ifndef HRT_CSIM +# define RBM_TRACE_METHOD IA_CSS_TRACE_METHOD_TRACE +# else +# define RBM_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +# endif +#else +# define RBM_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +#endif + +#if (defined(RBM_TRACE_CONFIG)) +/* Module specific trace setting */ +# if RBM_TRACE_CONFIG == RBM_TRACE_LOG_LEVEL_OFF +/* RBM_TRACE_LOG_LEVEL_OFF */ +# define RBM_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED +# elif RBM_TRACE_CONFIG == RBM_TRACE_LOG_LEVEL_NORMAL +/* RBM_TRACE_LOG_LEVEL_NORMAL */ +# define RBM_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED +# define RBM_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED +# define RBM_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED +# elif RBM_TRACE_CONFIG == RBM_TRACE_LOG_LEVEL_DEBUG +/* RBM_TRACE_LOG_LEVEL_DEBUG */ +# define RBM_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_ENABLED +# define RBM_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED +# define RBM_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_ENABLED +# define RBM_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED +# define RBM_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_ENABLED +# define RBM_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_ENABLED +# else +# error "No RBM_TRACE_CONFIG Tracing level defined" +# endif +#else +# error "RBM_TRACE_CONFIG not defined" +#endif + +#endif /* __RBM_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/routing_bitmap.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/routing_bitmap.mk new file mode 100644 index 0000000000000..f4251f9740fde --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/routing_bitmap.mk @@ -0,0 +1,39 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# + +ifdef _H_ROUTING_BITMAP_MK +$(error ERROR: routing_bitmap.mk included multiple times, please check makefile) +else +_H_ROUTING_BITMAP_MK=1 +endif + +ROUTING_BITMAP_FILES += $(ROUTING_BITMAP_DIR)/src/ia_css_rbm_manifest.c + +ROUTING_BITMAP_DIR = $(MODULES_DIR)/routing_bitmap +ROUTING_BITMAP_INTERFACE = $(ROUTING_BITMAP_DIR)/interface +ROUTING_BITMAP_SOURCES = $(ROUTING_BITMAP_DIR)/src + +ROUTING_BITMAP_CPPFLAGS = -I$(ROUTING_BITMAP_INTERFACE) +ROUTING_BITMAP_CPPFLAGS += -I$(ROUTING_BITMAP_SOURCES) + +ifeq ($(ROUTING_BITMAP_INLINE),1) +ROUTING_BITMAP_CPPFLAGS += -D__IA_CSS_RBM_INLINE__ +else +ROUTING_BITMAP_FILES += $(ROUTING_BITMAP_DIR)/src/ia_css_rbm.c +endif + +ifeq ($(ROUTING_BITMAP_MANIFEST_INLINE),1) +ROUTING_BITMAP_CPPFLAGS += -D__IA_CSS_RBM_MANIFEST_INLINE__ +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm.c new file mode 100644 index 0000000000000..bc5bf14efbd77 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm.c @@ -0,0 +1,17 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_RBM_INLINE__ +#include "ia_css_rbm_impl.h" +#endif /* __IA_CSS_RBM_INLINE__ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_impl.h new file mode 100644 index 0000000000000..c8cd78d416a17 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_impl.h @@ -0,0 +1,338 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_rbm.h" +#include "type_support.h" +#include "misc_support.h" +#include "assert_support.h" +#include "math_support.h" +#include "ia_css_rbm_trace.h" + +STORAGE_CLASS_INLINE int ia_css_rbm_compute_weight( + const ia_css_rbm_t bitmap); + +STORAGE_CLASS_INLINE ia_css_rbm_t ia_css_rbm_shift( + const ia_css_rbm_t bitmap); + +IA_CSS_RBM_STORAGE_CLASS_C +bool ia_css_is_rbm_intersection_empty( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1) +{ + ia_css_rbm_t intersection; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_is_rbm_intersection_empty(): enter:\n"); + + intersection = ia_css_rbm_intersection(bitmap0, bitmap1); + return ia_css_is_rbm_empty(intersection); +} + +IA_CSS_RBM_STORAGE_CLASS_C +bool ia_css_is_rbm_empty( + const ia_css_rbm_t bitmap) +{ + unsigned int i; + bool is_empty = true; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_is_rbm_empty(): enter:\n"); + for (i = 0; i < IA_CSS_RBM_NOF_ELEMS; i++) { + is_empty &= bitmap.data[i] == 0; + } + return is_empty; +} + +IA_CSS_RBM_STORAGE_CLASS_C +bool ia_css_is_rbm_equal( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1) +{ + unsigned int i; + bool is_equal = true; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_is_rbm_equal(): enter:\n"); + for (i = 0; i < IA_CSS_RBM_NOF_ELEMS; i++) { + is_equal = is_equal && (bitmap0.data[i] == bitmap1.data[i]); + } + return is_equal; +} + +IA_CSS_RBM_STORAGE_CLASS_C +bool ia_css_is_rbm_subset( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1) +{ + ia_css_rbm_t intersection; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_is_rbm_subset(): enter:\n"); + + intersection = ia_css_rbm_intersection(bitmap0, bitmap1); + return ia_css_is_rbm_equal(intersection, bitmap1); +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_clear(void) +{ + unsigned int i; + ia_css_rbm_t bitmap; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_clear(): enter:\n"); + for (i = 0; i < IA_CSS_RBM_NOF_ELEMS; i++) { + bitmap.data[i] = 0; + } + return bitmap; +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_complement( + const ia_css_rbm_t bitmap) +{ + unsigned int i; + ia_css_rbm_t result; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_complement(): enter:\n"); + for (i = 0; i < IA_CSS_RBM_NOF_ELEMS; i++) { + result.data[i] = ~bitmap.data[i]; + } + return result; +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_union( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1) +{ + unsigned int i; + ia_css_rbm_t result; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_union(): enter:\n"); + for (i = 0; i < IA_CSS_RBM_NOF_ELEMS; i++) { + result.data[i] = (bitmap0.data[i] | bitmap1.data[i]); + } + return result; +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_intersection( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1) +{ + unsigned int i; + ia_css_rbm_t result; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_intersection(): enter:\n"); + for (i = 0; i < IA_CSS_RBM_NOF_ELEMS; i++) { + result.data[i] = (bitmap0.data[i] & bitmap1.data[i]); + } + return result; +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_set( + const ia_css_rbm_t bitmap, + const unsigned int index) +{ + ia_css_rbm_t bit_mask; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_set(): enter:\n"); + + bit_mask = ia_css_rbm_bit_mask(index); + return ia_css_rbm_union(bitmap, bit_mask); +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_create_from_uint64( + const uint64_t value) +{ + unsigned int i; + ia_css_rbm_t result; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_create_from_uint64(): enter:\n"); + + result = ia_css_rbm_clear(); + for (i = 0; i < IA_CSS_RBM_NOF_ELEMS; i++) { + /* masking is done implictly, the MSB bits of casting will be chopped off */ + result.data[i] = (IA_CSS_RBM_ELEM_TYPE) + (value >> (i * IA_CSS_RBM_ELEM_BITS)); + } + return result; +} + +IA_CSS_RBM_STORAGE_CLASS_C +uint64_t ia_css_rbm_to_uint64( + const ia_css_rbm_t value) +{ + const unsigned int bits64 = sizeof(uint64_t) * 8; + const unsigned int nof_elems_bits64 = bits64 / IA_CSS_RBM_ELEM_BITS; + unsigned int i; + uint64_t res = 0; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_to_uint64(): enter:\n"); + + assert((bits64 % IA_CSS_RBM_ELEM_BITS) == 0); + assert(nof_elems_bits64 > 0); + + for (i = 0; i < MIN(IA_CSS_RBM_NOF_ELEMS, nof_elems_bits64); i++) { + res |= ((uint64_t)(value.data[i]) << (i * IA_CSS_RBM_ELEM_BITS)); + } + for (i = nof_elems_bits64; i < IA_CSS_RBM_NOF_ELEMS; i++) { + assert(value.data[i] == 0); + } + return res; +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_unset( + const ia_css_rbm_t bitmap, + const unsigned int index) +{ + ia_css_rbm_t result; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_unset(): enter:\n"); + + result = ia_css_rbm_bit_mask(index); + result = ia_css_rbm_complement(result); + return ia_css_rbm_intersection(bitmap, result); +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_bit_mask( + const unsigned int index) +{ + unsigned int elem_index; + unsigned int elem_bit_index; + ia_css_rbm_t bit_mask = ia_css_rbm_clear(); + + assert(index < IA_CSS_RBM_BITS); + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_bit_mask(): enter:\n"); + if (index < IA_CSS_RBM_BITS) { + elem_index = index / IA_CSS_RBM_ELEM_BITS; + elem_bit_index = index % IA_CSS_RBM_ELEM_BITS; + assert(elem_index < IA_CSS_RBM_NOF_ELEMS); + + bit_mask.data[elem_index] = 1 << elem_bit_index; + } + return bit_mask; +} + +STORAGE_CLASS_INLINE +int ia_css_rbm_compute_weight( + const ia_css_rbm_t bitmap) +{ + ia_css_rbm_t loc_bitmap; + int weight = 0; + int i; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_compute_weight(): enter:\n"); + + loc_bitmap = bitmap; + + /* In fact; do not need the iterator "i" */ + for (i = 0; (i < IA_CSS_RBM_BITS) && + !ia_css_is_rbm_empty(loc_bitmap); i++) { + weight += ia_css_is_rbm_set(loc_bitmap, 0); + loc_bitmap = ia_css_rbm_shift(loc_bitmap); + } + + return weight; +} + +IA_CSS_RBM_STORAGE_CLASS_C +int ia_css_is_rbm_set( + const ia_css_rbm_t bitmap, + const unsigned int index) +{ + unsigned int elem_index; + unsigned int elem_bit_index; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_is_rbm_set(): enter:\n"); + + assert(index < IA_CSS_RBM_BITS); + + elem_index = index / IA_CSS_RBM_ELEM_BITS; + elem_bit_index = index % IA_CSS_RBM_ELEM_BITS; + assert(elem_index < IA_CSS_RBM_NOF_ELEMS); + return (((bitmap.data[elem_index] >> elem_bit_index) & 0x1) == 1); +} + +STORAGE_CLASS_INLINE +ia_css_rbm_t ia_css_rbm_shift( + const ia_css_rbm_t bitmap) +{ + int i; + unsigned int lsb_current_elem = 0; + unsigned int lsb_previous_elem = 0; + ia_css_rbm_t loc_bitmap; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_shift(): enter:\n"); + + loc_bitmap = bitmap; + + for (i = IA_CSS_RBM_NOF_ELEMS - 1; i >= 0; i--) { + lsb_current_elem = bitmap.data[i] & 0x01; + loc_bitmap.data[i] >>= 1; + loc_bitmap.data[i] |= (lsb_previous_elem << (IA_CSS_RBM_ELEM_BITS - 1)); + lsb_previous_elem = lsb_current_elem; + } + return loc_bitmap; +} + +IA_CSS_RBM_STORAGE_CLASS_C +int ia_css_rbm_print( + const ia_css_rbm_t bitmap, + void *fid) +{ + int retval = -1; + int bit; + unsigned int bit_index = 0; + ia_css_rbm_t loc_bitmap; + + IA_CSS_TRACE_0(RBM, INFO, + "ia_css_rbm_print(): enter:\n"); + + NOT_USED(fid); + NOT_USED(bit); + + IA_CSS_TRACE_0(RBM, INFO, "kernel bitmap {\n"); + + loc_bitmap = bitmap; + + for (bit_index = 0; (bit_index < IA_CSS_RBM_BITS) && + !ia_css_is_rbm_empty(loc_bitmap); bit_index++) { + + bit = ia_css_is_rbm_set(loc_bitmap, 0); + loc_bitmap = ia_css_rbm_shift(loc_bitmap); + IA_CSS_TRACE_2(RBM, INFO, "\t%d\t = %d\n", bit_index, bit); + } + IA_CSS_TRACE_0(RBM, INFO, "}\n"); + + retval = 0; + return retval; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_manifest.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_manifest.c new file mode 100644 index 0000000000000..ef3beb8760b62 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_manifest.c @@ -0,0 +1,224 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_rbm_manifest.h" +#include "ia_css_rbm.h" +#include "type_support.h" +#include "misc_support.h" +#include "assert_support.h" +#include "math_support.h" +#include "ia_css_rbm_trace.h" + +#ifndef __IA_CSS_RBM_MANIFEST_INLINE__ +#include "ia_css_rbm_manifest_impl.h" +#endif /* __IA_CSS_RBM_MANIFEST_INLINE__ */ + +STORAGE_CLASS_INLINE void +ia_css_rbm_print_with_header( + const ia_css_rbm_t *rbm, + const ia_css_rbm_mux_desc_t *mux, + unsigned int mux_desc_count, + bool print_header) +{ +#ifdef __HIVECC + ia_css_rbm_print(*rbm, NULL); + (void)print_header; + (void)mux_desc_count; + (void)mux; +#else + int i, j; + + assert(mux != NULL); + assert(rbm != NULL); + if (mux == NULL || rbm == NULL) + return; + + if (print_header) { + for (i = mux_desc_count - 1; i >= 0; i--) { + PRINT("%*d|", mux[i].size_bits, mux[i].mux_id); + } + PRINT("\n"); + } + for (i = mux_desc_count - 1; i >= 0; i--) { + for (j = mux[i].size_bits - 1; j >= 0; j--) { + PRINT("%d", ia_css_is_rbm_set(*rbm, j + mux[i].offset)); + } + PRINT("|"); + } +#endif +} + +STORAGE_CLASS_INLINE void +ia_css_rbm_validation_rule_print( + ia_css_rbm_validation_rule_t *rule, + ia_css_rbm_mux_desc_t *mux_desc, + unsigned int mux_desc_count, + bool print_header) +{ + ia_css_rbm_print_with_header(&rule->match, mux_desc, mux_desc_count, print_header); +#ifdef __HIVECC + IA_CSS_TRACE_0(RBM, INFO, "Mask\n"); +#else + PRINT("\t"); +#endif + ia_css_rbm_print_with_header(&rule->mask, mux_desc, mux_desc_count, false); +#ifdef __HIVECC + IA_CSS_TRACE_1(RBM, INFO, "Rule expected_value: %d\n", rule->expected_value); +#else + PRINT("\t%d\n", rule->expected_value); +#endif +} + +void +ia_css_rbm_pretty_print( + const ia_css_rbm_t *rbm, + const ia_css_rbm_mux_desc_t *mux, + unsigned int mux_desc_count) +{ + ia_css_rbm_print_with_header(rbm, mux, mux_desc_count, false); +#ifndef __HIVECC + PRINT("\n"); +#endif +} + +void +ia_css_rbm_manifest_print( + const ia_css_rbm_manifest_t *manifest) +{ + int retval = -1; + unsigned int i; + bool print_header = true; + ia_css_rbm_mux_desc_t *muxes; + ia_css_rbm_validation_rule_t *validation_rule; + ia_css_rbm_terminal_routing_desc_t *terminal_routing_desc; + + verifjmpexit(manifest != NULL); + muxes = ia_css_rbm_manifest_get_muxes(manifest); + verifjmpexit(muxes != NULL || manifest->mux_desc_count == 0); + + for (i = 0; i < manifest->mux_desc_count; i++) { + IA_CSS_TRACE_4(RBM, INFO, "id: %d.%d offstet: %d size_bits: %d\n", + muxes[i].gp_dev_id, + muxes[i].mux_id, + muxes[i].offset, + muxes[i].size_bits); + } +#if VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT != 0 + validation_rule = ia_css_rbm_manifest_get_validation_rules(manifest); + verifjmpexit(validation_rule != NULL || manifest->validation_rule_count == 0); + + for (i = 0; i < manifest->validation_rule_count; i++) { + ia_css_rbm_validation_rule_print(&validation_rule[i], muxes, manifest->mux_desc_count, print_header); + print_header = false; + } +#else + (void) validation_rule; + (void) print_header; +#endif + terminal_routing_desc = ia_css_rbm_manifest_get_terminal_routing_desc(manifest); + verifjmpexit(terminal_routing_desc != NULL || manifest->terminal_routing_desc_count == 0); + for (i = 0; i < manifest->terminal_routing_desc_count; i++) { + IA_CSS_TRACE_4(RBM, INFO, "terminal_id: %d connection_state: %d mux_id: %d state: %d\n", + terminal_routing_desc[i].terminal_id, + terminal_routing_desc[i].connection_state, + terminal_routing_desc[i].mux_id, + terminal_routing_desc[i].state); + } + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(RBM, ERROR, "ia_css_rbm_manifest_print failed\n"); + } +} + +bool +ia_css_rbm_manifest_check_rbm_validity( + const ia_css_rbm_manifest_t *manifest, + const ia_css_rbm_t *rbm) +{ + unsigned int i; + ia_css_rbm_t res; + ia_css_rbm_t final_rbm = ia_css_rbm_clear(); + ia_css_rbm_validation_rule_t *rules; + bool matches_rules; + + verifjmpexit(manifest != NULL); + verifjmpexit(rbm != NULL); + + if (ia_css_is_rbm_empty(*rbm)) { + IA_CSS_TRACE_0(RBM, ERROR, "ia_css_rbm_manifest_check_rbm_validity failes: RBM is empty.\n"); + return false; + } + +#if VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT != 0 + rules = ia_css_rbm_manifest_get_validation_rules(manifest); + verifjmpexit(rules != NULL || manifest->validation_rule_count == 0); + + for (i = 0; i < manifest->validation_rule_count; i++) { + res = ia_css_rbm_intersection(*rbm, rules[i].mask); + matches_rules = ia_css_is_rbm_equal(res, rules[i].match); + + if (!matches_rules) + continue; + + if (rules[i].expected_value == 1) { + final_rbm = ia_css_rbm_union(final_rbm, res); + } else { + IA_CSS_TRACE_1(RBM, INFO, "ia_css_rbm_manifest_check_rbm_validity failes on rule %d\n", 1); + return false; + } + } +#else + (void)matches_rules; + (void)i; + (void)rules; + (void)res; +#endif + return ia_css_is_rbm_equal(final_rbm, *rbm); +EXIT: + return false; +} + +ia_css_rbm_t +ia_css_rbm_set_mux( + ia_css_rbm_t rbm, + ia_css_rbm_mux_desc_t *mux, + unsigned int mux_count, + unsigned int gp_dev_id, + unsigned int mux_id, + unsigned int value) +{ + unsigned int i; + + verifjmpexit(mux != NULL); + + for (i = 0; i < mux_count; i++) { + if (mux[i].gp_dev_id == gp_dev_id && mux[i].mux_id == mux_id) + break; + } + if (i >= mux_count) { + IA_CSS_TRACE_2(RBM, ERROR, + "ia_css_rbm_set_mux mux with mux_id %d.%d not found\n", gp_dev_id, mux_id); + return rbm; + } + if (value >= mux[i].size_bits) { + IA_CSS_TRACE_3(RBM, ERROR, + "ia_css_rbm_set_mux mux mux_id %d.%d, value %d illegal\n", gp_dev_id, mux_id, value); + return rbm; + } + rbm = ia_css_rbm_set(rbm, mux[i].offset + value); +EXIT: + return rbm; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_manifest_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_manifest_impl.h new file mode 100644 index 0000000000000..7059b6bc898e0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_manifest_impl.h @@ -0,0 +1,108 @@ + + +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_rbm_manifest.h" +#include "ia_css_rbm_trace.h" + +#include "type_support.h" +#include "math_support.h" +#include "error_support.h" +#include "assert_support.h" +#include "print_support.h" + +STORAGE_CLASS_INLINE +void __ia_css_rbm_manifest_check_struct(void) +{ + COMPILATION_ERROR_IF( + sizeof(ia_css_rbm_manifest_t) != (SIZE_OF_RBM_MANIFEST_S / IA_CSS_UINT8_T_BITS)); + COMPILATION_ERROR_IF( + (sizeof(ia_css_rbm_manifest_t) % 8 /* 64 bit */) != 0); +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +unsigned int +ia_css_rbm_manifest_get_size(void) +{ + unsigned int size = sizeof(struct ia_css_rbm_manifest_s); + + return ceil_mul(size, sizeof(uint64_t)); +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +void +ia_css_rbm_manifest_init(struct ia_css_rbm_manifest_s *rbm) +{ + rbm->mux_desc_count = 0; + rbm->terminal_routing_desc_count = 0; + rbm->validation_rule_count = 0; +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +ia_css_rbm_mux_desc_t * +ia_css_rbm_manifest_get_muxes(const ia_css_rbm_manifest_t *manifest) +{ +#if VIED_NCI_RBM_MAX_MUX_COUNT == 0 + (void)manifest; + return NULL; +#else + return (ia_css_rbm_mux_desc_t *)manifest->mux_desc; +#endif +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +unsigned int +ia_css_rbm_manifest_get_mux_count(const ia_css_rbm_manifest_t *manifest) +{ + return manifest->mux_desc_count; +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +ia_css_rbm_validation_rule_t * +ia_css_rbm_manifest_get_validation_rules(const ia_css_rbm_manifest_t *manifest) +{ +#if VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT == 0 + (void)manifest; + return NULL; +#else + return (ia_css_rbm_validation_rule_t *)manifest->validation_rules; +#endif +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +unsigned int +ia_css_rbm_manifest_get_validation_rule_count(const ia_css_rbm_manifest_t *manifest) +{ + return manifest->validation_rule_count; +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +ia_css_rbm_terminal_routing_desc_t * +ia_css_rbm_manifest_get_terminal_routing_desc(const ia_css_rbm_manifest_t *manifest) +{ +#if VIED_NCI_RBM_MAX_TERMINAL_DESC_COUNT == 0 + (void)manifest; + return NULL; +#else + return (ia_css_rbm_terminal_routing_desc_t *)manifest->terminal_routing_desc; +#endif +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +unsigned int +ia_css_rbm_manifest_get_terminal_routing_desc_count(const ia_css_rbm_manifest_t *manifest) +{ + return manifest->terminal_routing_desc_count; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/assert_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/assert_support.h new file mode 100644 index 0000000000000..f904a494b53c9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/assert_support.h @@ -0,0 +1,197 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __ASSERT_SUPPORT_H +#define __ASSERT_SUPPORT_H + +/* This file provides support for run-time assertions + * and compile-time assertions. + * + * Run-time asstions are provided via the following syntax: + * assert(condition) + * Run-time assertions are disabled using the NDEBUG flag. + * + * Compile time assertions are provided via the following syntax: + * COMPILATION_ERROR_IF(condition); + * A compile-time assertion will fail to compile if the condition is false. + * The condition must be constant, such that it can be evaluated + * at compile time. + * + * OP___assert is deprecated. + */ + +#define IA_CSS_ASSERT(expr) assert(expr) + +#ifdef __KLOCWORK__ +/* Klocwork does not see that assert will lead to abortion + * as there is no good way to tell this to KW and the code + * should not depend on assert to function (actually the assert + * could be disabled in a release build) it was decided to + * disable the assert for KW scans (by defining NDEBUG) + */ +#define NDEBUG +#endif /* __KLOCWORK__ */ + +/** + * The following macro can help to test the size of a struct at compile + * time rather than at run-time. It does not work for all compilers; see + * below. + * + * Depending on the value of 'condition', the following macro is expanded to: + * - condition==true: + * an expression containing an array declaration with negative size, + * usually resulting in a compilation error + * - condition==false: + * (void) 1; // C statement with no effect + * + * example: + * COMPILATION_ERROR_IF( sizeof(struct host_sp_queues) != + * SIZE_OF_HOST_SP_QUEUES_STRUCT); + * + * verify that the macro indeed triggers a compilation error with your compiler: + * COMPILATION_ERROR_IF( sizeof(struct host_sp_queues) != + * (sizeof(struct host_sp_queues)+1) ); + * + * Not all compilers will trigger an error with this macro; + * use a search engine to search for BUILD_BUG_ON to find other methods. + */ +#define COMPILATION_ERROR_IF(condition) \ +((void)sizeof(char[1 - 2*!!(condition)])) + +/* Compile time assertion */ +#ifndef CT_ASSERT +#define CT_ASSERT(cnd) ((void)sizeof(char[(cnd)?1 : -1])) +#endif /* CT_ASSERT */ + +#ifdef NDEBUG + +#define assert(cnd) ((void)0) + +#else + +#include "storage_class.h" + +#if defined(_MSC_VER) +#ifdef _KERNEL_MODE +/* Windows kernel mode compilation */ +#include +#define assert(cnd) ASSERT(cnd) +#else +/* Windows usermode compilation */ +#include +#endif + +#elif defined(__HIVECC) + +/* + * target: assert disabled + * sched: assert enabled only when SCHED_DEBUG is defined + * unsched: assert enabled + */ +#if defined(HRT_HW) +#define assert(cnd) ((void)0) +#elif defined(HRT_SCHED) && !defined(DEBUG_SCHED) +#define assert(cnd) ((void)0) +#elif defined(PIPE_GENERATION) +#define assert(cnd) ((void)0) +#else +#include +#define assert(cnd) OP___csim_assert(cnd) +#endif + +#elif defined(__KERNEL__) +#include + +#ifndef KERNEL_ASSERT_TO_BUG +#ifndef KERNEL_ASSERT_TO_BUG_ON +#ifndef KERNEL_ASSERT_TO_WARN_ON +#ifndef KERNEL_ASSERT_TO_WARN_ON_INF_LOOP +#ifndef KERNEL_ASSERT_UNDEFINED +/* Default */ +#define KERNEL_ASSERT_TO_BUG +#endif /*KERNEL_ASSERT_UNDEFINED*/ +#endif /*KERNEL_ASSERT_TO_WARN_ON_INF_LOOP*/ +#endif /*KERNEL_ASSERT_TO_WARN_ON*/ +#endif /*KERNEL_ASSERT_TO_BUG_ON*/ +#endif /*KERNEL_ASSERT_TO_BUG*/ + +#ifdef KERNEL_ASSERT_TO_BUG +/* TODO: it would be cleaner to use this: + * #define assert(cnd) BUG_ON(cnd) + * but that causes many compiler warnings (==errors) under Android + * because it seems that the BUG_ON() macro is not seen as a check by + * gcc like the BUG() macro is. */ +#define assert(cnd) \ + do { \ + if (!(cnd)) { \ + BUG(); \ + } \ + } while (0) +#endif /*KERNEL_ASSERT_TO_BUG*/ + +#ifdef KERNEL_ASSERT_TO_BUG_ON +#define assert(cnd) BUG_ON(!(cnd)) +#endif /*KERNEL_ASSERT_TO_BUG_ON*/ + +#ifdef KERNEL_ASSERT_TO_WARN_ON +#define assert(cnd) WARN_ON(!(cnd)) +#endif /*KERNEL_ASSERT_TO_WARN_ON*/ + +#ifdef KERNEL_ASSERT_TO_WARN_ON_INF_LOOP +#define assert(cnd) \ + do { \ + int not_cnd = !(cnd); \ + WARN_ON(not_cnd); \ + if (not_cnd) { \ + for (;;) { \ + } \ + } \ + } while (0) +#endif /*KERNEL_ASSERT_TO_WARN_ON_INF_LOOP*/ + +#ifdef KERNEL_ASSERT_UNDEFINED +#include KERNEL_ASSERT_DEFINITION_FILESTRING +#endif /*KERNEL_ASSERT_UNDEFINED*/ + +#elif defined(__FIST__) || defined(__GNUC__) + +#include "assert.h" + +#else /* default is for unknown environments */ +#define assert(cnd) ((void)0) +#endif + +#endif /* NDEBUG */ + +#ifndef PIPE_GENERATION +/* Deprecated OP___assert, this is still used in ~1000 places + * in the code. This will be removed over time. + * The implementation for the pipe generation tool is in see support.isp.h */ +#define OP___assert(cnd) assert(cnd) + +#ifdef C_RUN +#define compile_time_assert(cond) OP___assert(cond) +#else +#include "storage_class.h" +extern void _compile_time_assert(void); +STORAGE_CLASS_INLINE void compile_time_assert(unsigned cond) +{ + /* Call undefined function if cond is false */ + if (!cond) + _compile_time_assert(); +} +#endif +#endif /* PIPE_GENERATION */ + +#endif /* __ASSERT_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/cpu_mem_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/cpu_mem_support.h new file mode 100644 index 0000000000000..fa349cac4b24a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/cpu_mem_support.h @@ -0,0 +1,233 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __CPU_MEM_SUPPORT_H +#define __CPU_MEM_SUPPORT_H + +#include "storage_class.h" +#include "assert_support.h" +#include "type_support.h" + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_copy(void *dst, const void *src, unsigned int size) +{ + /* memcpy cannot be used in in Windows (function is not allowed), + * and the safer function memcpy_s is not available on other platforms. + * Because usage of ia_css_cpu_mem_copy is minimal, we implement it here in an easy, + * but sub-optimal way. + */ + unsigned int i; + + assert(dst != NULL && src != NULL); + + if (!(dst != NULL && src != NULL)) { + return NULL; + } + for (i = 0; i < size; i++) { + ((char *)dst)[i] = ((char *)src)[i]; + } + return dst; +} + +#if defined(__KERNEL__) + +#include +#include +#include +#include + +/* TODO: remove, workaround for issue in hrt file ibuf_ctrl_2600_config.c + * error checking code added to SDK that uses calls to exit function + */ +#define exit(a) return + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc(unsigned int size) +{ + return kmalloc(size, GFP_KERNEL); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc_page_aligned(unsigned int size) +{ + return ia_css_cpu_mem_alloc(size); /* todo: align to page size */ +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_protect(void *ptr, unsigned int size, int prot) +{ + /* nothing here yet */ +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_set_zero(void *dst, unsigned int size) +{ + return memset(dst, 0, size); /* available in kernel in linux/string.h */ +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_free(void *ptr) +{ + kfree(ptr); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_flush(void *ptr, unsigned int size) +{ + /* parameter check here */ + if (ptr == NULL) + return; + + clflush_cache_range(ptr, size); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_invalidate(void *ptr, unsigned int size) +{ + /* for now same as flush */ + ia_css_cpu_mem_cache_flush(ptr, size); +} + +#elif defined(_MSC_VER) + +#include +#include +#include + +extern void *hrt_malloc(size_t bytes, int zero_mem); +extern void *hrt_free(void *ptr); +extern void hrt_mem_cache_flush(void *ptr, unsigned int size); +extern void hrt_mem_cache_invalidate(void *ptr, unsigned int size); + +#define malloc(a) hrt_malloc(a, 1) +#define free(a) hrt_free(a) + +#define CSS_PAGE_SIZE (1<<12) + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc(unsigned int size) +{ + return malloc(size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc_page_aligned(unsigned int size) +{ + unsigned int buffer_size = size; + + /* Currently hrt_malloc calls Windows ExAllocatePoolWithTag() routine + * to request system memory. If the number of bytes is equal or bigger + * than the page size, then the returned address is page aligned, + * but if it's smaller it's not necessarily page-aligned We agreed + * with Windows team that we allocate a full page + * if it's less than page size + */ + if (buffer_size < CSS_PAGE_SIZE) + buffer_size = CSS_PAGE_SIZE; + + return ia_css_cpu_mem_alloc(buffer_size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_set_zero(void *dst, unsigned int size) +{ + return memset(dst, 0, size); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_free(void *ptr) +{ + free(ptr); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_flush(void *ptr, unsigned int size) +{ +#ifdef _KERNEL_MODE + hrt_mem_cache_flush(ptr, size); +#else + (void)ptr; + (void)size; +#endif +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_invalidate(void *ptr, unsigned int size) +{ +#ifdef _KERNEL_MODE + hrt_mem_cache_invalidate(ptr, size); +#else + (void)ptr; + (void)size; +#endif +} + +#else + +#include +#include +#include +/* Needed for the MPROTECT */ +#include +#include +#include +#include + + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc(unsigned int size) +{ + return malloc(size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc_page_aligned(unsigned int size) +{ + int pagesize; + + pagesize = sysconf(_SC_PAGE_SIZE); + return memalign(pagesize, size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_set_zero(void *dst, unsigned int size) +{ + return memset(dst, 0, size); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_free(void *ptr) +{ + free(ptr); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_flush(void *ptr, unsigned int size) +{ + /* not needed in simulation */ + (void)ptr; + (void)size; +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_invalidate(void *ptr, unsigned int size) +{ + /* not needed in simulation */ + (void)ptr; + (void)size; +} + +#endif + +#endif /* __CPU_MEM_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/error_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/error_support.h new file mode 100644 index 0000000000000..9fe1f65125e6c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/error_support.h @@ -0,0 +1,110 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __ERROR_SUPPORT_H +#define __ERROR_SUPPORT_H + +#if defined(__KERNEL__) +#include +#else +#include +#endif +#include + +/* OS-independent definition of IA_CSS errno values */ +/* #define IA_CSS_EINVAL 1 */ +/* #define IA_CSS_EFAULT 2 */ + +#ifdef __HIVECC +#define ERR_EMBEDDED 1 +#else +#define ERR_EMBEDDED 0 +#endif + +#if ERR_EMBEDDED +#define DECLARE_ERRVAL +#else +#define DECLARE_ERRVAL \ + int _errval = 0; +#endif + +/* Use "owl" in while to prevent compiler warnings in Windows */ +#define ALWAYS_FALSE ((void)0, 0) + +#define verifret(cond, error_type) \ +do { \ + if (!(cond)) { \ + return error_type; \ + } \ +} while (ALWAYS_FALSE) + +#define verifjmp(cond, error_tag) \ +do { \ + if (!(cond)) { \ + goto error_tag; \ + } \ +} while (ALWAYS_FALSE) + +#define verifexit(cond) \ +do { \ + if (!(cond)) { \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) + +#if ERR_EMBEDDED +#define verifexitval(cond, error_tag) \ +do { \ + assert(cond); \ +} while (ALWAYS_FALSE) +#else +#define verifexitval(cond, error_tag) \ +do { \ + if (!(cond)) { \ + _errval = (error_tag); \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) +#endif + +#if ERR_EMBEDDED +#define haserror(error_tag) (0) +#else +#define haserror(error_tag) \ + (_errval == (error_tag)) +#endif + +#if ERR_EMBEDDED +#define noerror() (1) +#else +#define noerror() \ + (_errval == 0) +#endif + +#define verifjmpexit(cond) \ +do { \ + if (!(cond)) { \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) + +#define verifjmpexitsetretval(cond, retval) \ +do { \ + if (!(cond)) { \ + retval = -1; \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) + +#endif /* __ERROR_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/math_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/math_support.h new file mode 100644 index 0000000000000..633f86f1a1b09 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/math_support.h @@ -0,0 +1,314 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __MATH_SUPPORT_H +#define __MATH_SUPPORT_H + +#include "storage_class.h" /* for STORAGE_CLASS_INLINE */ +#include "type_support.h" +#include "assert_support.h" + +/* in case we have min/max/MIN/MAX macro's undefine them */ +#ifdef min +#undef min +#endif +#ifdef max +#undef max +#endif +#ifdef MIN /* also defined in include/hrt/numeric.h from SDK */ +#undef MIN +#endif +#ifdef MAX +#undef MAX +#endif + +#ifndef UINT16_MAX +#define UINT16_MAX (0xffffUL) +#endif + +#ifndef UINT32_MAX +#define UINT32_MAX (0xffffffffUL) +#endif + +#define IS_ODD(a) ((a) & 0x1) +#define IS_EVEN(a) (!IS_ODD(a)) +#define IS_POWER2(a) (!((a)&((a)-1))) +#define IS_MASK_BITS_SET(a, b) ((a & b) != 0) + +/*To Find next power of 2 number from x */ +#define bit2(x) ((x) | ((x) >> 1)) +#define bit4(x) (bit2(x) | (bit2(x) >> 2)) +#define bit8(x) (bit4(x) | (bit4(x) >> 4)) +#define bit16(x) (bit8(x) | (bit8(x) >> 8)) +#define bit32(x) (bit16(x) | (bit16(x) >> 16)) +#define NEXT_POWER_OF_2(x) (bit32(x-1) + 1) + +/* force a value to a lower even value */ +#define EVEN_FLOOR(x) ((x) & ~1UL) + +/* A => B */ +#define IMPLIES(a, b) (!(a) || (b)) + +/* The ORIG_BITS th bit is the sign bit */ +/* Sign extends a ORIG_BITS bits long signed number to a 64-bit signed number */ +/* By type casting it can relimited to any valid type-size + * (32-bit signed or 16-bit or 8-bit) + */ +/* By masking it can be transformed to any arbitrary bit size */ +#define SIGN_EXTEND(VAL, ORIG_BITS) \ +((~(((VAL)&(1ULL<<((ORIG_BITS)-1)))-1))|(VAL)) + +#define EXTRACT_BIT(a, b) ((a >> b) & 1) + +/* for preprocessor and array sizing use MIN and MAX + otherwise use min and max */ +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#define CLIP(a, b, c) MIN((MAX((a), (b))), (c)) +/* Integer round-down division of a with b */ +#define FLOOR_DIV(a, b) ((b) ? ((a) / (b)) : 0) +/* Align a to the lower multiple of b */ +#define FLOOR_MUL(a, b) (FLOOR_DIV(a, b) * (b)) +/* Integer round-up division of a with b */ +#define CEIL_DIV(a, b) ((b) ? (((a) + (b) - 1) / (b)) : 0) +/* Align a to the upper multiple of b */ +#define CEIL_MUL(a, b) (CEIL_DIV(a, b) * (b)) +/* Align a to the upper multiple of b - fast implementation + * for cases when b=pow(2,n) + */ +#define CEIL_MUL2(a, b) (((a) + (b) - 1) & ~((b) - 1)) +/* integer round-up division of a with pow(2,b) */ +#define CEIL_SHIFT(a, b) (((a) + (1UL << (b)) - 1) >> (b)) +/* Align a to the upper multiple of pow(2,b) */ +#define CEIL_SHIFT_MUL(a, b) (CEIL_SHIFT(a, b) << (b)) +/* Absolute difference of a and b */ +#define ABS_DIF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a))) +#define ABS(a) ABS_DIF(a, 0) +/* Square of x */ +#define SQR(x) ((x)*(x)) +/* Integer round-half-down division of a nad b */ +#define ROUND_HALF_DOWN_DIV(a, b) ((b) ? ((a) + (b / 2) - 1) / (b) : 0) +/* Align a to the round-half-down multiple of b */ +#define ROUND_HALF_DOWN_MUL(a, b) (ROUND_HALF_DOWN_DIV(a, b) * (b)) + +#define MAX3(a, b, c) MAX((a), MAX((b), (c))) +#define MIN3(a, b, c) MIN((a), MIN((b), (c))) +#define MAX4(a, b, c, d) MAX((MAX((a), (b))), (MAX((c), (d)))) +#define MIN4(a, b, c, d) MIN((MIN((a), (b))), (MIN((c), (d)))) + +/* min and max should not be macros as they will evaluate their arguments twice. + if you really need a macro (e.g. for CPP or for initializing an array) + use MIN() and MAX(), otherwise use min() and max() */ + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(a) ((sizeof(a) / sizeof(*(a)))) +#endif + +#ifndef BYTES +#define BYTES(bit) (((bit)+7)/8) +#endif + +#if !defined(PIPE_GENERATION) +STORAGE_CLASS_INLINE unsigned int max_value_bits(unsigned int bits) +{ + return (bits == 0) ? 0 : ((2 * ((1 << ((bits) - 1)) - 1)) + 1); +} +STORAGE_CLASS_INLINE unsigned int max_value_bytes(unsigned int bytes) +{ + return max_value_bits(IA_CSS_UINT8_T_BITS * bytes); +} +STORAGE_CLASS_INLINE int max(int a, int b) +{ + return MAX(a, b); +} + +STORAGE_CLASS_INLINE int min(int a, int b) +{ + return MIN(a, b); +} + +STORAGE_CLASS_INLINE int clip(int a, int b, int c) +{ + return min(max(a, b), c); +} + +STORAGE_CLASS_INLINE unsigned int umax(unsigned int a, unsigned int b) +{ + return MAX(a, b); +} + +STORAGE_CLASS_INLINE unsigned int umin(unsigned int a, unsigned int b) +{ + return MIN(a, b); +} + +STORAGE_CLASS_INLINE unsigned int uclip(unsigned int a, unsigned int b, + unsigned int c) +{ + return umin(umax(a, b), c); +} + +STORAGE_CLASS_INLINE unsigned int ceil_div(unsigned int a, unsigned int b) +{ + return CEIL_DIV(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_mul(unsigned int a, unsigned int b) +{ + return CEIL_MUL(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_mul2(unsigned int a, unsigned int b) +{ + return CEIL_MUL2(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_shift(unsigned int a, unsigned int b) +{ + return CEIL_SHIFT(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_shift_mul(unsigned int a, unsigned int b) +{ + return CEIL_SHIFT_MUL(a, b); +} + +STORAGE_CLASS_INLINE int abs_dif(int a, int b) +{ + return ABS_DIF(a, b); +} + +STORAGE_CLASS_INLINE unsigned int uabs_dif(unsigned int a, unsigned int b) +{ + return ABS_DIF(a, b); +} + +STORAGE_CLASS_INLINE unsigned int round_half_down_div(unsigned int a, + unsigned int b) +{ + return ROUND_HALF_DOWN_DIV(a, b); +} + +STORAGE_CLASS_INLINE unsigned int round_half_down_mul(unsigned int a, + unsigned int b) +{ + return ROUND_HALF_DOWN_MUL(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_pow2(uint32_t a) +{ + unsigned int retval = 0; + + if (IS_POWER2(a)) { + retval = (unsigned int)a; + } else { + unsigned int v = a; + + v |= v>>1; + v |= v>>2; + v |= v>>4; + v |= v>>8; + v |= v>>16; + retval = (unsigned int)(v+1); + } + return retval; +} + +STORAGE_CLASS_INLINE unsigned int floor_log2(uint32_t a) +{ + static const uint8_t de_bruijn[] = { + 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, + 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 + }; + uint32_t v = a; + + v |= v>>1; + v |= v>>2; + v |= v>>4; + v |= v>>8; + v |= v>>16; + return (unsigned int)de_bruijn[(v*0x07C4ACDDU)>>27]; +} + +/* Divide by small power of two */ +STORAGE_CLASS_INLINE unsigned int +udiv2_small_i(uint32_t a, uint32_t b) +{ + assert(b <= 2); + return a >> (b-1); +} + +/* optimized divide for small results + * a will be divided by b + * outbits is the number of bits needed for the result + * the smaller the cheaper the function will be. + * if the result doesn't fit in the number of output bits + * the result is incorrect and the function will assert + */ +STORAGE_CLASS_INLINE unsigned int +udiv_medium(uint32_t a, uint32_t b, unsigned outbits) +{ + int bit; + unsigned res = 0; + unsigned mask; + +#ifdef VOLCANO +#pragma ipu unroll +#endif + for (bit = outbits-1 ; bit >= 0; bit--) { + mask = 1<= (b<= c ? a+b-c : a+b); +} + +/* + * For SP and ISP, SDK provides the definition of OP_asp_slor. + * We need it only for host + */ +STORAGE_CLASS_INLINE unsigned int OP_asp_slor(int a, int b, int c) +{ + return ((a << c) | b); +} +#else +#include "hive/customops.h" +#endif /* !defined(__VIED_CELL) */ + +#endif /* !defined(PIPE_GENERATION) */ + +#if !defined(__KERNEL__) +#define clamp(a, min_val, max_val) MIN(MAX((a), (min_val)), (max_val)) +#endif /* !defined(__KERNEL__) */ + +#endif /* __MATH_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/misc_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/misc_support.h new file mode 100644 index 0000000000000..a2c2729e946d2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/misc_support.h @@ -0,0 +1,76 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __MISC_SUPPORT_H +#define __MISC_SUPPORT_H + +/* suppress compiler warnings on unused variables */ +#ifndef NOT_USED +#define NOT_USED(a) ((void)(a)) +#endif + +/* Calculate the total bytes for pow(2) byte alignment */ +#define tot_bytes_for_pow2_align(pow2, cur_bytes) \ + ((cur_bytes + (pow2 - 1)) & ~(pow2 - 1)) + +/* Display the macro value given a string */ +#define _STR(x) #x +#define STR(x) _STR(x) + +/* Concatenate */ +#ifndef CAT /* also defined in */ +#define _CAT(a, b) a ## b +#define CAT(a, b) _CAT(a, b) +#endif + +#define _CAT3(a, b, c) a ## b ## c +#define CAT3(a, b, c) _CAT3(a, b, c) + +/* NO_HOIST, NO_CSE, NO_ALIAS attributes must be ignored for host code */ +#ifndef __HIVECC +#ifndef NO_HOIST +#define NO_HOIST +#endif +#ifndef NO_CSE +#define NO_CSE +#endif +#ifndef NO_ALIAS +#define NO_ALIAS +#endif +#endif + +enum hive_method_id { + HIVE_METHOD_ID_CRUN, + HIVE_METHOD_ID_UNSCHED, + HIVE_METHOD_ID_SCHED, + HIVE_METHOD_ID_TARGET +}; + +/* Derive METHOD */ +#if defined(C_RUN) + #define HIVE_METHOD "crun" + #define HIVE_METHOD_ID HIVE_METHOD_ID_CRUN +#elif defined(HRT_UNSCHED) + #define HIVE_METHOD "unsched" + #define HIVE_METHOD_ID HIVE_METHOD_ID_UNSCHED +#elif defined(HRT_SCHED) + #define HIVE_METHOD "sched" + #define HIVE_METHOD_ID HIVE_METHOD_ID_SCHED +#else + #define HIVE_METHOD "target" + #define HIVE_METHOD_ID HIVE_METHOD_ID_TARGET + #define HRT_TARGET 1 +#endif + +#endif /* __MISC_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/platform_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/platform_support.h new file mode 100644 index 0000000000000..1752efc7b4df8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/platform_support.h @@ -0,0 +1,146 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PLATFORM_SUPPORT_H +#define __PLATFORM_SUPPORT_H + +#include "storage_class.h" + +#define MSEC_IN_SEC 1000 +#define NSEC_IN_MSEC 1000000 + +#if defined(_MSC_VER) +#include + +#define IA_CSS_EXTERN +#define SYNC_WITH(x) +#define CSS_ALIGN(d, a) _declspec(align(a)) d + +STORAGE_CLASS_INLINE void ia_css_sleep(void) +{ + /* Placeholder for driver team*/ +} + +STORAGE_CLASS_INLINE void ia_css_sleep_msec(long unsigned int delay_time_ms) +{ + /* Placeholder for driver team*/ + (void)delay_time_ms; +} + +#elif defined(__HIVECC) +#include +#include + +#define IA_CSS_EXTERN extern +#define CSS_ALIGN(d, a) d __attribute__((aligned(a))) +STORAGE_CLASS_INLINE void ia_css_sleep(void) +{ + OP___schedule(); +} + +#elif defined(__KERNEL__) +#include +#include + +#define IA_CSS_EXTERN +#define CSS_ALIGN(d, a) d __aligned(a) + +STORAGE_CLASS_INLINE void ia_css_sleep(void) +{ + usleep_range(1, 50); +} + +#elif defined(__GNUC__) +#include + +#define IA_CSS_EXTERN +#define CSS_ALIGN(d, a) d __attribute__((aligned(a))) + +/* Define some __HIVECC specific macros to nothing to allow host code compilation */ +#ifndef NO_ALIAS +#define NO_ALIAS +#endif + +#ifndef SYNC_WITH +#define SYNC_WITH(x) +#endif + +#if defined(HRT_CSIM) + #include "hrt/host.h" /* Using hrt_sleep from hrt/host.h */ + STORAGE_CLASS_INLINE void ia_css_sleep(void) + { + /* For the SDK still using hrt_sleep */ + hrt_sleep(); + } + STORAGE_CLASS_INLINE void ia_css_sleep_msec(long unsigned int delay_time_ms) + { + /* For the SDK still using hrt_sleep */ + long unsigned int i = 0; + for (i = 0; i < delay_time_ms; i++) { + hrt_sleep(); + } + } +#else + #include + STORAGE_CLASS_INLINE void ia_css_sleep(void) + { + struct timespec delay_time; + + delay_time.tv_sec = 0; + delay_time.tv_nsec = 10; + nanosleep(&delay_time, NULL); + } + STORAGE_CLASS_INLINE void ia_css_sleep_msec(long unsigned int delay_time_ms) + { + struct timespec delay_time; + + if (delay_time_ms >= MSEC_IN_SEC) { + delay_time.tv_sec = delay_time_ms / MSEC_IN_SEC; + delay_time.tv_nsec = (delay_time_ms % MSEC_IN_SEC) * NSEC_IN_MSEC; + } else { + delay_time.tv_sec = 0; + delay_time.tv_nsec = delay_time_ms * NSEC_IN_MSEC; + } + nanosleep(&delay_time, NULL); + } +#endif + +#else +#include +#endif + +/*needed for the include in stdint.h for various environments */ +#include "type_support.h" +#include "storage_class.h" + +#define MAX_ALIGNMENT 8 +#define aligned_uint8(type, obj) CSS_ALIGN(uint8_t obj, 1) +#define aligned_int8(type, obj) CSS_ALIGN(int8_t obj, 1) +#define aligned_uint16(type, obj) CSS_ALIGN(uint16_t obj, 2) +#define aligned_int16(type, obj) CSS_ALIGN(int16_t obj, 2) +#define aligned_uint32(type, obj) CSS_ALIGN(uint32_t obj, 4) +#define aligned_int32(type, obj) CSS_ALIGN(int32_t obj, 4) + +/* needed as long as hivecc does not define the type (u)int64_t */ +#if defined(__HIVECC) +#define aligned_uint64(type, obj) CSS_ALIGN(unsigned long long obj, 8) +#define aligned_int64(type, obj) CSS_ALIGN(signed long long obj, 8) +#else +#define aligned_uint64(type, obj) CSS_ALIGN(uint64_t obj, 8) +#define aligned_int64(type, obj) CSS_ALIGN(int64_t obj, 8) +#endif +#define aligned_enum(enum_type, obj) CSS_ALIGN(uint32_t obj, 4) +#define aligned_struct(struct_type, obj) struct_type obj + +#endif /* __PLATFORM_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/print_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/print_support.h new file mode 100644 index 0000000000000..0b614f7ef12d8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/print_support.h @@ -0,0 +1,90 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PRINT_SUPPORT_H +#define __PRINT_SUPPORT_H + +#if defined(_MSC_VER) +#ifdef _KERNEL_MODE + +/* TODO: Windows driver team to provide tracing mechanism for kernel mode + * e.g. DbgPrint and DbgPrintEx + */ +extern void FwTracePrintPWARN(const char *fmt, ...); +extern void FwTracePrintPRINT(const char *fmt, ...); +extern void FwTracePrintPERROR(const char *fmt, ...); +extern void FwTracePrintPDEBUG(const char *fmt, ...); + +#define PWARN(format, ...) FwTracePrintPWARN(format, __VA_ARGS__) +#define PRINT(format, ...) FwTracePrintPRINT(format, __VA_ARGS__) +#define PERROR(format, ...) FwTracePrintPERROR(format, __VA_ARGS__) +#define PDEBUG(format, ...) FwTracePrintPDEBUG(format, __VA_ARGS__) + +#else +/* Windows usermode compilation */ +#include + +/* To change the defines below, communicate with Windows team first + * to ensure they will not get flooded with prints + */ +/* This is temporary workaround to avoid flooding userspace + * Windows driver with prints + */ + +#define PWARN(format, ...) +#define PRINT(format, ...) +#define PERROR(format, ...) printf("error: " format, __VA_ARGS__) +#define PDEBUG(format, ...) + +#endif /* _KERNEL_MODE */ + +#elif defined(__HIVECC) +#include +/* To be revised + +#define PWARN(format) +#define PRINT(format) OP___printstring(format) +#define PERROR(variable) OP___dump(9999, arguments) +#define PDEBUG(variable) OP___dump(__LINE__, arguments) + +*/ + +#define PRINTSTRING(str) OP___printstring(str) + +#elif defined(__KERNEL__) +#include +#include + + +#define PWARN(format, arguments...) pr_debug(format, ##arguments) +#define PRINT(format, arguments...) pr_debug(format, ##arguments) +#define PERROR(format, arguments...) pr_debug(format, ##arguments) +#define PDEBUG(format, arguments...) pr_debug(format, ##arguments) + +#else +#include + +#define PRINT_HELPER(prefix, format, ...) printf(prefix format "%s", __VA_ARGS__) + +/* The trailing "" allows the edge case of printing single string */ +#define PWARN(...) PRINT_HELPER("warning: ", __VA_ARGS__, "") +#define PRINT(...) PRINT_HELPER("", __VA_ARGS__, "") +#define PERROR(...) PRINT_HELPER("error: ", __VA_ARGS__, "") +#define PDEBUG(...) PRINT_HELPER("debug: ", __VA_ARGS__, "") + +#define PRINTSTRING(str) PRINT(str) + +#endif + +#endif /* __PRINT_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/storage_class.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/storage_class.h new file mode 100644 index 0000000000000..af19b4026220a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/storage_class.h @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __STORAGE_CLASS_H +#define __STORAGE_CLASS_H + +#define STORAGE_CLASS_EXTERN \ +extern + +#if defined(_MSC_VER) +#define STORAGE_CLASS_INLINE \ +static __inline +#elif defined(__HIVECC) +#define STORAGE_CLASS_INLINE \ +static inline +#else +#define STORAGE_CLASS_INLINE \ +static inline +#endif + +/* Register struct */ +#ifndef __register +#if defined(__HIVECC) && !defined(PIPE_GENERATION) +#define __register register +#else +#define __register +#endif +#endif + +/* Memory attribute */ +#ifndef MEM +#ifdef PIPE_GENERATION +#elif defined(__HIVECC) +#include +#else +#define MEM(any_mem) +#endif +#endif + +#endif /* __STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/type_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/type_support.h new file mode 100644 index 0000000000000..a86da0e78941c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/type_support.h @@ -0,0 +1,80 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __TYPE_SUPPORT_H +#define __TYPE_SUPPORT_H + +/* Per the DLI spec, types are in "type_support.h" and + * "platform_support.h" is for unclassified/to be refactored + * platform specific definitions. + */ +#define IA_CSS_UINT8_T_BITS 8 +#define IA_CSS_UINT16_T_BITS 16 +#define IA_CSS_UINT32_T_BITS 32 +#define IA_CSS_INT32_T_BITS 32 +#define IA_CSS_UINT64_T_BITS 64 + + +#if defined(_MSC_VER) +#include +#include +#include +#include +#if defined(_M_X64) +#define HOST_ADDRESS(x) (unsigned long long)(x) +#else +#define HOST_ADDRESS(x) (unsigned long)(x) +#endif + +#elif defined(PARAM_GENERATION) +/* Nothing */ +#elif defined(__HIVECC) +#include +#include +#include +#include +#define HOST_ADDRESS(x) (unsigned long)(x) + +typedef long long int64_t; +typedef unsigned long long uint64_t; + +#elif defined(__KERNEL__) +#include +#include + +#define CHAR_BIT (8) +#define HOST_ADDRESS(x) (unsigned long)(x) + +#elif defined(__GNUC__) +#include +#include +#include +#include +#define HOST_ADDRESS(x) (unsigned long)(x) + +#else /* default is for the FIST environment */ +#include +#include +#include +#include +#define HOST_ADDRESS(x) (unsigned long)(x) + +#endif + +#if !defined(PIPE_GENERATION) && !defined(IO_GENERATION) +/* genpipe cannot handle the void* syntax */ +typedef void *HANDLE; +#endif + +#endif /* __TYPE_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/interface/ia_css_syscom.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/interface/ia_css_syscom.h new file mode 100644 index 0000000000000..5426d6d18e0bd --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/interface/ia_css_syscom.h @@ -0,0 +1,247 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_H +#define __IA_CSS_SYSCOM_H + + +/* + * The CSS Subsystem Communication Interface - Host side + * + * It provides subsystem initialzation, send ports and receive ports + * The PSYS and ISYS interfaces are implemented on top of this interface. + */ + +#include "ia_css_syscom_config.h" + +#define FW_ERROR_INVALID_PARAMETER (-1) +#define FW_ERROR_BAD_ADDRESS (-2) +#define FW_ERROR_BUSY (-3) +#define FW_ERROR_NO_MEMORY (-4) + +struct ia_css_syscom_context; + +/** + * ia_css_syscom_size() - provide syscom external buffer requirements + * @config: pointer to the configuration data (read) + * @size: pointer to the buffer size (write) + * + * Purpose: + * - Provide external buffer requirements + * - To be used for external buffer allocation + * + */ +extern void +ia_css_syscom_size( + const struct ia_css_syscom_config *cfg, + struct ia_css_syscom_size *size +); + +/** + * ia_css_syscom_open() - initialize a subsystem context + * @config: pointer to the configuration data (read) + * @buf: pointer to externally allocated buffers (read) + * @returns: struct ia_css_syscom_context* on success, 0 otherwise. + * + * Purpose: + * - initialize host side data structures + * - boot the subsystem? + * + */ +extern struct ia_css_syscom_context* +ia_css_syscom_open( + struct ia_css_syscom_config *config, + struct ia_css_syscom_buf *buf +); + +/** + * ia_css_syscom_close() - signal close to cell + * @context: pointer to the subsystem context + * @returns: 0 on success, -2 (FW_ERROR_BUSY) if SPC is not ready yet. + * + * Purpose: + * Request from the Cell to terminate + */ +extern int +ia_css_syscom_close( + struct ia_css_syscom_context *context +); + +/** + * ia_css_syscom_release() - free context + * @context: pointer to the subsystem context + * @force: flag which specifies whether cell + * state will be checked before freeing the + * context. + * @returns: 0 on success, -2 (FW_ERROR_BUSY) if cell + * is busy and call was not forced. + * + * Purpose: + * 2 modes, with first (force==true) immediately + * free context, and second (force==false) verifying + * that the cell state is ok and freeing context if so, + * returning error otherwise. + */ +extern int +ia_css_syscom_release( + struct ia_css_syscom_context *context, + unsigned int force +); + +/** + * Open a port for sending tokens to the subsystem + * @context: pointer to the subsystem context + * @port: send port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_open( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Closes a port for sending tokens to the subsystem + * @context: pointer to the subsystem context + * @port: send port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_close( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Get the number of tokens that can be sent to a port without error. + * @context: pointer to the subsystem context + * @port: send port index + * @returns: number of available tokens on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_available( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Send a token to the subsystem port + * The token size is determined during initialization + * @context: pointer to the subsystem context + * @port: send port index + * @token: pointer to the token value that is transferred to the subsystem + * @returns: number of tokens sent on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_transfer( + struct ia_css_syscom_context *context, + unsigned int port, + const void *token +); + +/** + * Open a port for receiving tokens to the subsystem + * @context: pointer to the subsystem context + * @port: receive port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_open( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Closes a port for receiving tokens to the subsystem + * Returns 0 on success, otherwise negative value of error code + * @context: pointer to the subsystem context + * @port: receive port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_close( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Get the number of tokens that can be received from a port without errors. + * @context: pointer to the subsystem context + * @port: receive port index + * @returns: number of available tokens on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_available( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Receive a token from the subsystem port + * The token size is determined during initialization + * @context: pointer to the subsystem context + * @port: receive port index + * @token (output): pointer to (space for) the token to be received + * @returns: number of tokens received on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_transfer( + struct ia_css_syscom_context *context, + unsigned int port, + void *token +); + +#if HAS_DUAL_CMD_CTX_SUPPORT +/** + * ia_css_syscom_store_dmem() - store subsystem context information in DMEM + * @context: pointer to the subsystem context + * @ssid: subsystem id + * @vtl0_addr_mask: VTL0 address mask; only applicable when the passed in context is secure + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_store_dmem( + struct ia_css_syscom_context *context, + unsigned int ssid, + unsigned int vtl0_addr_mask +); + +/** + * ia_css_syscom_set_trustlet_status() - store truslet configuration setting + * @context: pointer to the subsystem context + * @trustlet_exist: 1 if trustlet exists + */ +extern void +ia_css_syscom_set_trustlet_status( + unsigned int dmem_addr, + unsigned int ssid, + bool trustlet_exist +); + +/** + * ia_css_syscom_is_ab_spc_ready() - check if SPC access blocker programming is completed + * @context: pointer to the subsystem context + * @returns: 1 when status is ready. 0 otherwise + */ +bool +ia_css_syscom_is_ab_spc_ready( + struct ia_css_syscom_context *ctx +); +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ + +#endif /* __IA_CSS_SYSCOM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/interface/ia_css_syscom_config.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/interface/ia_css_syscom_config.h new file mode 100644 index 0000000000000..2f5eb309df94e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/interface/ia_css_syscom_config.h @@ -0,0 +1,97 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_CONFIG_H +#define __IA_CSS_SYSCOM_CONFIG_H + +#include +#include + +/* syscom size struct, output of ia_css_syscom_size, + * input for (external) allocation + */ +struct ia_css_syscom_size { + /* Size of host buffer */ + unsigned int cpu; + /* Size of shared config buffer (host to cell) */ + unsigned int shm; + /* Size of shared input queue buffers (host to cell) */ + unsigned int ibuf; + /* Size of shared output queue buffers (cell to host) */ + unsigned int obuf; +}; + +/* syscom buffer struct, output of (external) allocation, + * input for ia_css_syscom_open + */ +struct ia_css_syscom_buf { + char *cpu; /* host buffer */ + + /* shared memory buffer host address */ + host_virtual_address_t shm_host; + /* shared memory buffer cell address */ + vied_virtual_address_t shm_cell; + + /* input queue shared buffer host address */ + host_virtual_address_t ibuf_host; + /* input queue shared buffer cell address */ + vied_virtual_address_t ibuf_cell; + + /* output queue shared buffer host address */ + host_virtual_address_t obuf_host; + /* output queue shared buffer cell address */ + vied_virtual_address_t obuf_cell; +}; + +struct ia_css_syscom_queue_config { + unsigned int queue_size; /* tokens per queue */ + unsigned int token_size; /* bytes per token */ +}; + +/** + * Parameter struct for ia_css_syscom_open + */ +struct ia_css_syscom_config { + /* This member in no longer used in syscom. + It is kept to not break any driver builds, and will be removed when + all assignments have been removed from driver code */ + /* address of firmware in DDR/IMR */ + unsigned long long host_firmware_address; + + /* address of firmware in DDR, seen from SPC */ + unsigned int vied_firmware_address; + + unsigned int ssid; + unsigned int mmid; + + unsigned int num_input_queues; + unsigned int num_output_queues; + struct ia_css_syscom_queue_config *input; + struct ia_css_syscom_queue_config *output; + + unsigned int regs_addr; + unsigned int dmem_addr; + + /* firmware-specific configuration data */ + void *specific_addr; + unsigned int specific_size; + + /* if true; secure syscom in VTIO Case + * if false, non-secure syscom + */ + bool secure; + unsigned int vtl0_addr_mask; /* only applicable in 'secure' case */ +}; + +#endif /* __IA_CSS_SYSCOM_CONFIG_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/interface/ia_css_syscom_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/interface/ia_css_syscom_trace.h new file mode 100644 index 0000000000000..2c32693c2a82e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/interface/ia_css_syscom_trace.h @@ -0,0 +1,51 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IA_CSS_SYSCOM_TRACE_H +#define __IA_CSS_SYSCOM_TRACE_H + +#include "ia_css_trace.h" + +#define SYSCOM_TRACE_LEVEL_DEFAULT 1 +#define SYSCOM_TRACE_LEVEL_DEBUG 2 + +/* Set to default level if no level is defined */ +#ifndef SYSCOM_TRACE_LEVEL +#define SYSCOM_TRACE_LEVEL SYSCOM_TRACE_LEVEL_DEFAULT +#endif /* SYSCOM_TRACE_LEVEL */ + +/* SYSCOM Module tracing backend is mapped to TUNIT tracing for target platforms */ +#ifdef __HIVECC +# ifndef HRT_CSIM +# define SYSCOM_TRACE_METHOD IA_CSS_TRACE_METHOD_TRACE +# else +# define SYSCOM_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +# endif +#else +# define SYSCOM_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +#endif + +#define SYSCOM_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED +#define SYSCOM_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_ENABLED +#define SYSCOM_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + +#if (SYSCOM_TRACE_LEVEL == SYSCOM_TRACE_LEVEL_DEFAULT) +# define SYSCOM_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED +#elif (SYSCOM_TRACE_LEVEL == SYSCOM_TRACE_LEVEL_DEBUG) +# define SYSCOM_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_ENABLED +#else +# error "Connection manager trace level not defined!" +#endif /* SYSCOM_TRACE_LEVEL */ + +#endif /* __IA_CSS_SYSCOM_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/src/ia_css_syscom.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/src/ia_css_syscom.c new file mode 100644 index 0000000000000..cdf9df0531ff0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/src/ia_css_syscom.c @@ -0,0 +1,650 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_syscom.h" + +#include "ia_css_syscom_context.h" +#include "ia_css_syscom_config_fw.h" +#include "ia_css_syscom_trace.h" + +#include "queue.h" +#include "send_port.h" +#include "recv_port.h" +#include "regmem_access.h" + +#include "error_support.h" +#include "cpu_mem_support.h" + +#include "queue_struct.h" +#include "send_port_struct.h" +#include "recv_port_struct.h" + +#include "type_support.h" +#include +#include +#include "platform_support.h" + +#include "ia_css_cell.h" + +/* struct of internal buffer sizes */ +struct ia_css_syscom_size_intern { + unsigned int context; + unsigned int input_queue; + unsigned int output_queue; + unsigned int input_port; + unsigned int output_port; + + unsigned int fw_config; + unsigned int specific; + + unsigned int input_buffer; + unsigned int output_buffer; +}; + +/* Allocate buffers internally, when no buffers are provided */ +static int +ia_css_syscom_alloc( + unsigned int ssid, + unsigned int mmid, + const struct ia_css_syscom_size *size, + struct ia_css_syscom_buf *buf) +{ + /* zero the buffer to set all pointers to zero */ + memset(buf, 0, sizeof(*buf)); + + /* allocate cpu_mem */ + buf->cpu = (char *)ia_css_cpu_mem_alloc(size->cpu); + if (!buf->cpu) + goto EXIT7; + + /* allocate and map shared config buffer */ + buf->shm_host = shared_memory_alloc(mmid, size->shm); + if (!buf->shm_host) + goto EXIT6; + buf->shm_cell = shared_memory_map(ssid, mmid, buf->shm_host); + if (!buf->shm_cell) + goto EXIT5; + + /* allocate and map input queue buffer */ + buf->ibuf_host = shared_memory_alloc(mmid, size->ibuf); + if (!buf->ibuf_host) + goto EXIT4; + buf->ibuf_cell = shared_memory_map(ssid, mmid, buf->ibuf_host); + if (!buf->ibuf_cell) + goto EXIT3; + + /* allocate and map output queue buffer */ + buf->obuf_host = shared_memory_alloc(mmid, size->obuf); + if (!buf->obuf_host) + goto EXIT2; + buf->obuf_cell = shared_memory_map(ssid, mmid, buf->obuf_host); + if (!buf->obuf_cell) + goto EXIT1; + + return 0; + +EXIT1: shared_memory_free(mmid, buf->obuf_host); +EXIT2: shared_memory_unmap(ssid, mmid, buf->ibuf_cell); +EXIT3: shared_memory_free(mmid, buf->ibuf_host); +EXIT4: shared_memory_unmap(ssid, mmid, buf->shm_cell); +EXIT5: shared_memory_free(mmid, buf->shm_host); +EXIT6: ia_css_cpu_mem_free(buf->cpu); +EXIT7: return FW_ERROR_NO_MEMORY; +} + +static void +ia_css_syscom_size_intern( + const struct ia_css_syscom_config *cfg, + struct ia_css_syscom_size_intern *size) +{ + /* convert syscom config into syscom internal size struct */ + + unsigned int i; + + size->context = sizeof(struct ia_css_syscom_context); + size->input_queue = cfg->num_input_queues * sizeof(struct sys_queue); + size->output_queue = cfg->num_output_queues * sizeof(struct sys_queue); + size->input_port = cfg->num_input_queues * sizeof(struct send_port); + size->output_port = cfg->num_output_queues * sizeof(struct recv_port); + + size->fw_config = sizeof(struct ia_css_syscom_config_fw); + size->specific = cfg->specific_size; + + /* accumulate input queue buffer sizes */ + size->input_buffer = 0; + for (i = 0; i < cfg->num_input_queues; i++) { + size->input_buffer += + sys_queue_buf_size(cfg->input[i].queue_size, + cfg->input[i].token_size); + } + + /* accumulate outut queue buffer sizes */ + size->output_buffer = 0; + for (i = 0; i < cfg->num_output_queues; i++) { + size->output_buffer += + sys_queue_buf_size(cfg->output[i].queue_size, + cfg->output[i].token_size); + } +} + +static void +ia_css_syscom_size_extern( + const struct ia_css_syscom_size_intern *i, + struct ia_css_syscom_size *e) +{ + /* convert syscom internal size struct into external size struct */ + + e->cpu = i->context + i->input_queue + i->output_queue + + i->input_port + i->output_port; + e->shm = i->fw_config + i->input_queue + i->output_queue + i->specific; + e->ibuf = i->input_buffer; + e->obuf = i->output_buffer; +} + +/* Function that provides buffer sizes to be allocated */ +void +ia_css_syscom_size( + const struct ia_css_syscom_config *cfg, + struct ia_css_syscom_size *size) +{ + struct ia_css_syscom_size_intern i; + + ia_css_syscom_size_intern(cfg, &i); + ia_css_syscom_size_extern(&i, size); +} + +static struct ia_css_syscom_context* +ia_css_syscom_assign_buf( + const struct ia_css_syscom_size_intern *i, + const struct ia_css_syscom_buf *buf) +{ + struct ia_css_syscom_context *ctx; + char *cpu_mem_buf; + host_virtual_address_t shm_buf_host; + vied_virtual_address_t shm_buf_cell; + + /* host context */ + cpu_mem_buf = buf->cpu; + + ctx = (struct ia_css_syscom_context *)cpu_mem_buf; + ia_css_cpu_mem_set_zero(ctx, i->context); + cpu_mem_buf += i->context; + + ctx->input_queue = (struct sys_queue *) cpu_mem_buf; + cpu_mem_buf += i->input_queue; + + ctx->output_queue = (struct sys_queue *) cpu_mem_buf; + cpu_mem_buf += i->output_queue; + + ctx->send_port = (struct send_port *) cpu_mem_buf; + cpu_mem_buf += i->input_port; + + ctx->recv_port = (struct recv_port *) cpu_mem_buf; + + + /* cell config */ + shm_buf_host = buf->shm_host; + shm_buf_cell = buf->shm_cell; + + ctx->config_host_addr = shm_buf_host; + shm_buf_host += i->fw_config; + ctx->config_vied_addr = shm_buf_cell; + shm_buf_cell += i->fw_config; + + ctx->input_queue_host_addr = shm_buf_host; + shm_buf_host += i->input_queue; + ctx->input_queue_vied_addr = shm_buf_cell; + shm_buf_cell += i->input_queue; + + ctx->output_queue_host_addr = shm_buf_host; + shm_buf_host += i->output_queue; + ctx->output_queue_vied_addr = shm_buf_cell; + shm_buf_cell += i->output_queue; + + ctx->specific_host_addr = shm_buf_host; + ctx->specific_vied_addr = shm_buf_cell; + + ctx->ibuf_host_addr = buf->ibuf_host; + ctx->ibuf_vied_addr = buf->ibuf_cell; + + ctx->obuf_host_addr = buf->obuf_host; + ctx->obuf_vied_addr = buf->obuf_cell; + + return ctx; +} + +struct ia_css_syscom_context* +ia_css_syscom_open( + struct ia_css_syscom_config *cfg, + struct ia_css_syscom_buf *buf_extern +) +{ + struct ia_css_syscom_size_intern size_intern; + struct ia_css_syscom_size size; + struct ia_css_syscom_buf buf_intern; + struct ia_css_syscom_buf *buf; + struct ia_css_syscom_context *ctx; + struct ia_css_syscom_config_fw fw_cfg; + unsigned int i; + struct sys_queue_res res; + + IA_CSS_TRACE_0(SYSCOM, INFO, "Entered: ia_css_syscom_open\n"); + + /* error handling */ + if (cfg == NULL) + return NULL; + + IA_CSS_TRACE_1(SYSCOM, INFO, "ia_css_syscom_open (secure %d) start\n", cfg->secure); + + /* check members of cfg: TBD */ + + /* + * Check if SP is in valid state, have to wait if not ready. + * In some platform (Such as VP), it will need more time to wait due to system performance; + * If return NULL without wait for SPC0 ready, Driver load FW will failed + */ + ia_css_cell_wait(cfg->ssid, SPC0); + + ia_css_syscom_size_intern(cfg, &size_intern); + ia_css_syscom_size_extern(&size_intern, &size); + + if (buf_extern) { + /* use externally allocated buffers */ + buf = buf_extern; + } else { + /* use internally allocated buffers */ + buf = &buf_intern; + if (ia_css_syscom_alloc(cfg->ssid, cfg->mmid, &size, buf) != 0) + return NULL; + } + + /* assign buffer pointers */ + ctx = ia_css_syscom_assign_buf(&size_intern, buf); + /* only need to free internally allocated buffers */ + ctx->free_buf = !buf_extern; + + ctx->cell_regs_addr = cfg->regs_addr; + /* regmem is at cell_dmem_addr + REGMEM_OFFSET */ + ctx->cell_dmem_addr = cfg->dmem_addr; + + ctx->num_input_queues = cfg->num_input_queues; + ctx->num_output_queues = cfg->num_output_queues; + + ctx->env.mmid = cfg->mmid; + ctx->env.ssid = cfg->ssid; + ctx->env.mem_addr = cfg->dmem_addr; + + ctx->regmem_idx = SYSCOM_QPR_BASE_REG; + + /* initialize input queues */ + res.reg = SYSCOM_QPR_BASE_REG; + res.host_address = ctx->ibuf_host_addr; + res.vied_address = ctx->ibuf_vied_addr; + for (i = 0; i < cfg->num_input_queues; i++) { + sys_queue_init(ctx->input_queue + i, + cfg->input[i].queue_size, + cfg->input[i].token_size, &res); + } + + /* initialize output queues */ + res.host_address = ctx->obuf_host_addr; + res.vied_address = ctx->obuf_vied_addr; + for (i = 0; i < cfg->num_output_queues; i++) { + sys_queue_init(ctx->output_queue + i, + cfg->output[i].queue_size, + cfg->output[i].token_size, &res); + } + + /* fill shared queue structs */ + shared_memory_store(cfg->mmid, ctx->input_queue_host_addr, + ctx->input_queue, + cfg->num_input_queues * sizeof(struct sys_queue)); + ia_css_cpu_mem_cache_flush( + (void *)HOST_ADDRESS(ctx->input_queue_host_addr), + cfg->num_input_queues * sizeof(struct sys_queue)); + shared_memory_store(cfg->mmid, ctx->output_queue_host_addr, + ctx->output_queue, + cfg->num_output_queues * sizeof(struct sys_queue)); + ia_css_cpu_mem_cache_flush( + (void *)HOST_ADDRESS(ctx->output_queue_host_addr), + cfg->num_output_queues * sizeof(struct sys_queue)); + + /* Zero the queue buffers. Is this really needed? */ + shared_memory_zero(cfg->mmid, buf->ibuf_host, size.ibuf); + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(buf->ibuf_host), + size.ibuf); + shared_memory_zero(cfg->mmid, buf->obuf_host, size.obuf); + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(buf->obuf_host), + size.obuf); + + /* copy firmware specific data */ + if (cfg->specific_addr && cfg->specific_size) { + shared_memory_store(cfg->mmid, ctx->specific_host_addr, + cfg->specific_addr, cfg->specific_size); + ia_css_cpu_mem_cache_flush( + (void *)HOST_ADDRESS(ctx->specific_host_addr), + cfg->specific_size); + } + + fw_cfg.num_input_queues = cfg->num_input_queues; + fw_cfg.num_output_queues = cfg->num_output_queues; + fw_cfg.input_queue = ctx->input_queue_vied_addr; + fw_cfg.output_queue = ctx->output_queue_vied_addr; + fw_cfg.specific_addr = ctx->specific_vied_addr; + fw_cfg.specific_size = cfg->specific_size; + + shared_memory_store(cfg->mmid, ctx->config_host_addr, + &fw_cfg, sizeof(struct ia_css_syscom_config_fw)); + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(ctx->config_host_addr), + sizeof(struct ia_css_syscom_config_fw)); + +#if !HAS_DUAL_CMD_CTX_SUPPORT + /* store syscom uninitialized state */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_open store STATE_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_STATE_UNINIT, ctx->cell_dmem_addr, cfg->ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + SYSCOM_STATE_UNINIT, cfg->ssid); + /* store syscom uninitialized command */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_open store COMMAND_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_COMMAND_UNINIT, ctx->cell_dmem_addr, cfg->ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_COMMAND_REG, + SYSCOM_COMMAND_UNINIT, cfg->ssid); + /* store firmware configuration address */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_open store CONFIG_REG (%#x) @ dmem_addr %#x ssid %d\n", + ctx->config_vied_addr, ctx->cell_dmem_addr, cfg->ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_CONFIG_REG, + ctx->config_vied_addr, cfg->ssid); +#endif + + /* Indicate if ctx is created for secure stream purpose */ + ctx->secure = cfg->secure; + + IA_CSS_TRACE_1(SYSCOM, INFO, "ia_css_syscom_open (secure %d) completed\n", cfg->secure); + return ctx; +} + + +int +ia_css_syscom_close( + struct ia_css_syscom_context *ctx +) { + int state; + + state = regmem_load_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + ctx->env.ssid); + if (state != SYSCOM_STATE_READY) { + /* SPC is not ready to handle close request yet */ + return FW_ERROR_BUSY; + } + + /* set close request flag */ + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_COMMAND_REG, + SYSCOM_COMMAND_INACTIVE, ctx->env.ssid); + + return 0; +} + +static void +ia_css_syscom_free(struct ia_css_syscom_context *ctx) +{ + shared_memory_unmap(ctx->env.ssid, ctx->env.mmid, ctx->ibuf_vied_addr); + shared_memory_free(ctx->env.mmid, ctx->ibuf_host_addr); + shared_memory_unmap(ctx->env.ssid, ctx->env.mmid, ctx->obuf_vied_addr); + shared_memory_free(ctx->env.mmid, ctx->obuf_host_addr); + shared_memory_unmap(ctx->env.ssid, ctx->env.mmid, + ctx->config_vied_addr); + shared_memory_free(ctx->env.mmid, ctx->config_host_addr); + ia_css_cpu_mem_free(ctx); +} + +int +ia_css_syscom_release( + struct ia_css_syscom_context *ctx, + unsigned int force +) { + /* check if release is forced, an verify cell state if it is not */ + if (!force) { + if (!ia_css_cell_is_ready(ctx->env.ssid, SPC0)) + return FW_ERROR_BUSY; + } + + /* Reset the regmem idx */ + ctx->regmem_idx = 0; + + if (ctx->free_buf) + ia_css_syscom_free(ctx); + + return 0; +} + +int ia_css_syscom_send_port_open( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + int state; + + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + /* check if SP syscom is ready to open the queue */ + state = regmem_load_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + ctx->env.ssid); + if (state != SYSCOM_STATE_READY) { + /* SPC is not ready to handle messages yet */ + return FW_ERROR_BUSY; + } + + /* initialize the port */ + send_port_open(ctx->send_port + port, + ctx->input_queue + port, &(ctx->env)); + + return 0; +} + +int ia_css_syscom_send_port_close( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + return 0; +} + +int ia_css_syscom_send_port_available( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + return send_port_available(ctx->send_port + port); +} + +int ia_css_syscom_send_port_transfer( + struct ia_css_syscom_context *ctx, + unsigned int port, + const void *token +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + return send_port_transfer(ctx->send_port + port, token); +} + +int ia_css_syscom_recv_port_open( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + int state; + + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + /* check if SP syscom is ready to open the queue */ + state = regmem_load_32(ctx->cell_dmem_addr, + SYSCOM_STATE_REG, ctx->env.ssid); + if (state != SYSCOM_STATE_READY) { + /* SPC is not ready to handle messages yet */ + return FW_ERROR_BUSY; + } + + /* initialize the port */ + recv_port_open(ctx->recv_port + port, + ctx->output_queue + port, &(ctx->env)); + + return 0; +} + +int ia_css_syscom_recv_port_close( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + return 0; +} + +/* + * Get the number of responses in the response queue + */ +int +ia_css_syscom_recv_port_available( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + return recv_port_available(ctx->recv_port + port); +} + + +/* + * Dequeue the head of the response queue + * returns an error when the response queue is empty + */ +int +ia_css_syscom_recv_port_transfer( + struct ia_css_syscom_context *ctx, + unsigned int port, + void *token +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + return recv_port_transfer(ctx->recv_port + port, token); +} + +#if HAS_DUAL_CMD_CTX_SUPPORT +/* + * store subsystem context information in DMEM + */ +int +ia_css_syscom_store_dmem( + struct ia_css_syscom_context *ctx, + unsigned int ssid, + unsigned int vtl0_addr_mask +) +{ + unsigned int read_back; + + NOT_USED(vtl0_addr_mask); + NOT_USED(read_back); + + if (ctx->secure) { + /* store VTL0 address mask in 'secure' context */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem VTL0_ADDR_MASK (%#x) @ dmem_addr %#x ssid %d\n", + vtl0_addr_mask, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_VTL0_ADDR_MASK, vtl0_addr_mask, ssid); + } + /* store firmware configuration address */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem CONFIG_REG (%#x) @ dmem_addr %#x ssid %d\n", + ctx->config_vied_addr, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_CONFIG_REG, + ctx->config_vied_addr, ssid); + /* store syscom uninitialized state */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem STATE_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_STATE_UNINIT, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + SYSCOM_STATE_UNINIT, ssid); + /* store syscom uninitialized command */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem COMMAND_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_COMMAND_UNINIT, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_COMMAND_REG, + SYSCOM_COMMAND_UNINIT, ssid); + + return 0; +} + +/* + * store truslet configuration status setting + */ +void +ia_css_syscom_set_trustlet_status( + unsigned int dmem_addr, + unsigned int ssid, + bool trustlet_exist +) +{ + unsigned int value; + + value = trustlet_exist ? TRUSTLET_EXIST : TRUSTLET_NOT_EXIST; + IA_CSS_TRACE_3(SYSCOM, INFO, + "ia_css_syscom_set_trustlet_status TRUSTLET_STATUS (%#x) @ dmem_addr %#x ssid %d\n", + value, dmem_addr, ssid); + regmem_store_32(dmem_addr, TRUSTLET_STATUS, value, ssid); +} + +/* + * check if SPC access blocker programming is completed + */ +bool +ia_css_syscom_is_ab_spc_ready( + struct ia_css_syscom_context *ctx +) +{ + unsigned int value; + + /* We only expect the call from non-secure context only */ + if (ctx->secure) { + IA_CSS_TRACE_0(SYSCOM, ERROR, "ia_css_syscom_is_spc_ab_ready - Please call from non-secure context\n"); + return false; + } + + value = regmem_load_32(ctx->cell_dmem_addr, AB_SPC_STATUS, ctx->env.ssid); + IA_CSS_TRACE_3(SYSCOM, INFO, + "ia_css_syscom_is_spc_ab_ready AB_SPC_STATUS @ dmem_addr %#x ssid %d - value %#x\n", + ctx->cell_dmem_addr, ctx->env.ssid, value); + + return (value == AB_SPC_READY); +} +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/src/ia_css_syscom_config_fw.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/src/ia_css_syscom_config_fw.h new file mode 100644 index 0000000000000..0cacd5a34934d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/src/ia_css_syscom_config_fw.h @@ -0,0 +1,69 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_CONFIG_FW_H +#define __IA_CSS_SYSCOM_CONFIG_FW_H + +#include "type_support.h" + +enum { + /* Program load or explicit host setting should init to this */ + SYSCOM_STATE_UNINIT = 0x57A7E000, + /* SP Syscom sets this when it is ready for use */ + SYSCOM_STATE_READY = 0x57A7E001, + /* SP Syscom sets this when no more syscom accesses will happen */ + SYSCOM_STATE_INACTIVE = 0x57A7E002 +}; + +enum { + /* Program load or explicit host setting should init to this */ + SYSCOM_COMMAND_UNINIT = 0x57A7F000, + /* Host Syscom requests syscom to become inactive */ + SYSCOM_COMMAND_INACTIVE = 0x57A7F001 +}; + +#if HAS_DUAL_CMD_CTX_SUPPORT +enum { + /* Program load or explicit host setting should init to this */ + TRUSTLET_UNINIT = 0x57A8E000, + /* Host Syscom informs SP that Trustlet exists */ + TRUSTLET_EXIST = 0x57A8E001, + /* Host Syscom informs SP that Trustlet does not exist */ + TRUSTLET_NOT_EXIST = 0x57A8E002 +}; + +enum { + /* Program load or explicit setting initialized by SP */ + AB_SPC_NOT_READY = 0x57A8F000, + /* SP informs host that SPC access programming is completed */ + AB_SPC_READY = 0x57A8F001 +}; +#endif + +/* firmware config: data that sent from the host to SP via DDR */ +/* Cell copies data into a context */ + +struct ia_css_syscom_config_fw { + unsigned int firmware_address; + + unsigned int num_input_queues; + unsigned int num_output_queues; + unsigned int input_queue; /* hmm_ptr / struct queue* */ + unsigned int output_queue; /* hmm_ptr / struct queue* */ + + unsigned int specific_addr; /* vied virtual address */ + unsigned int specific_size; +}; + +#endif /* __IA_CSS_SYSCOM_CONFIG_FW_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/src/ia_css_syscom_context.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/src/ia_css_syscom_context.h new file mode 100644 index 0000000000000..ecf22f6b7ac53 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/src/ia_css_syscom_context.h @@ -0,0 +1,65 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_CONTEXT_H +#define __IA_CSS_SYSCOM_CONTEXT_H + +#include + +#include "port_env_struct.h" +#include + +/* host context */ +struct ia_css_syscom_context { + vied_virtual_address_t cell_firmware_addr; + unsigned int cell_regs_addr; + unsigned int cell_dmem_addr; + + struct port_env env; + + unsigned int num_input_queues; + unsigned int num_output_queues; + + /* array of input queues (from host to SP) */ + struct sys_queue *input_queue; + /* array of output queues (from SP to host) */ + struct sys_queue *output_queue; + + struct send_port *send_port; + struct recv_port *recv_port; + + unsigned int regmem_idx; + unsigned int free_buf; + + host_virtual_address_t config_host_addr; + host_virtual_address_t input_queue_host_addr; + host_virtual_address_t output_queue_host_addr; + host_virtual_address_t specific_host_addr; + host_virtual_address_t ibuf_host_addr; + host_virtual_address_t obuf_host_addr; + + vied_virtual_address_t config_vied_addr; + vied_virtual_address_t input_queue_vied_addr; + vied_virtual_address_t output_queue_vied_addr; + vied_virtual_address_t specific_vied_addr; + vied_virtual_address_t ibuf_vied_addr; + vied_virtual_address_t obuf_vied_addr; + + /* if true; secure syscom object as in VTIO Case + * if false, non-secure syscom + */ + bool secure; +}; + +#endif /* __IA_CSS_SYSCOM_CONTEXT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/syscom.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/syscom.mk new file mode 100644 index 0000000000000..8d36b8928af55 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/syscom.mk @@ -0,0 +1,42 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is SYSCOM + +SYSCOM_DIR=$${MODULES_DIR}/syscom + +SYSCOM_INTERFACE=$(SYSCOM_DIR)/interface +SYSCOM_SOURCES1=$(SYSCOM_DIR)/src + +SYSCOM_HOST_FILES += $(SYSCOM_SOURCES1)/ia_css_syscom.c + +SYSCOM_HOST_CPPFLAGS += -I$(SYSCOM_INTERFACE) +SYSCOM_HOST_CPPFLAGS += -I$(SYSCOM_SOURCES1) +SYSCOM_HOST_CPPFLAGS += -I$${MODULES_DIR}/devices +ifdef REGMEM_SECURE_OFFSET +SYSCOM_HOST_CPPFLAGS += -DREGMEM_SECURE_OFFSET=$(REGMEM_SECURE_OFFSET) +else +SYSCOM_HOST_CPPFLAGS += -DREGMEM_SECURE_OFFSET=0 +endif + +SYSCOM_FW_FILES += $(SYSCOM_SOURCES1)/ia_css_syscom_fw.c + +SYSCOM_FW_CPPFLAGS += -I$(SYSCOM_INTERFACE) +SYSCOM_FW_CPPFLAGS += -I$(SYSCOM_SOURCES1) +SYSCOM_FW_CPPFLAGS += -DREGMEM_OFFSET=$(REGMEM_OFFSET) +ifdef REGMEM_SECURE_OFFSET +SYSCOM_FW_CPPFLAGS += -DREGMEM_SECURE_OFFSET=$(REGMEM_SECURE_OFFSET) +else +SYSCOM_FW_CPPFLAGS += -DREGMEM_SECURE_OFFSET=0 +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/trace/interface/ia_css_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/trace/interface/ia_css_trace.h new file mode 100644 index 0000000000000..b85b1810f1070 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/trace/interface/ia_css_trace.h @@ -0,0 +1,883 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/*! \file */ + +#ifndef __IA_CSS_TRACE_H +#define __IA_CSS_TRACE_H + +/* +** Configurations +*/ + +/** + * STEP 1: Define {Module Name}_TRACE_METHOD to one of the following. + * Where: + * {Module Name} is the name of the targeted module. + * + * Example: + * #define NCI_DMA_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE + */ + +/**< Use whatever method of tracing that best suits the platform + * this code is compiled for. + */ +#define IA_CSS_TRACE_METHOD_NATIVE 1 +/**< Use the Tracing NCI. */ +#define IA_CSS_TRACE_METHOD_TRACE 2 + +/** + * STEP 2: Define {Module Name}_TRACE_LEVEL_{Level} to one of the following. + * Where: + * {Module Name} is the name of the targeted module. + * {Level}, in decreasing order of severity, is one of the + * following values: + * {ASSERT, ERROR, WARNING, INFO, DEBUG, VERBOSE}. + * + * Example: + * #define NCI_DMA_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_DISABLED + * #define NCI_DMA_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + */ +/**< Disables the corresponding trace level. */ +#define IA_CSS_TRACE_LEVEL_DISABLED 0 +/**< Enables the corresponding trace level. */ +#define IA_CSS_TRACE_LEVEL_ENABLED 1 + +/* + * Used in macro definition with do-while loop + * for removing checkpatch warnings + */ +#define IA_CSS_TRACE_FILE_DUMMY_DEFINE + +/** + * STEP 3: Define IA_CSS_TRACE_PRINT_FILE_LINE to have file name and + * line printed with every log message. + * + * Example: + * #define IA_CSS_TRACE_PRINT_FILE_LINE + */ + +/* +** Interface +*/ + +/* +** Static +*/ + +/** + * Logs a message with zero arguments if the targeted severity level is enabled + * at compile-time. + * @param module The targeted module. + * @param severity The severity level of the trace message. In decreasing order: + * {ASSERT, ERROR, WARNING, INFO, DEBUG, VERBOSE}. + * @param format The message to be traced. + */ +#define IA_CSS_TRACE_0(module, severity, format) \ + IA_CSS_TRACE_IMPL(module, 0, severity, format) + +/** + * Logs a message with one argument if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_1(module, severity, format, a1) \ + IA_CSS_TRACE_IMPL(module, 1, severity, format, a1) + +/** + * Logs a message with two arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_2(module, severity, format, a1, a2) \ + IA_CSS_TRACE_IMPL(module, 2, severity, format, a1, a2) + +/** + * Logs a message with three arguments if the targeted severity level + * is enabled at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_3(module, severity, format, a1, a2, a3) \ + IA_CSS_TRACE_IMPL(module, 3, severity, format, a1, a2, a3) + +/** + * Logs a message with four arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_4(module, severity, format, a1, a2, a3, a4) \ + IA_CSS_TRACE_IMPL(module, 4, severity, format, a1, a2, a3, a4) + +/** + * Logs a message with five arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_5(module, severity, format, a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_IMPL(module, 5, severity, format, a1, a2, a3, a4, a5) + +/** + * Logs a message with six arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_6(module, severity, format, a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_IMPL(module, 6, severity, format, a1, a2, a3, a4, a5, a6) + +/** + * Logs a message with seven arguments if the targeted severity level + * is enabled at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_7(module, severity, format, a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_IMPL(module, 7, severity, format, \ + a1, a2, a3, a4, a5, a6, a7) + +/* +** Dynamic +*/ + +/** +* Declares, but does not define, dynamic tracing functions and variables +* for module \p module. For each module, place an instance of this macro +* in the compilation unit in which you want to use dynamic tracing facility +* so as to inform the compiler of the declaration of the available functions. +* An invocation of this function does not enable any of the available tracing +* levels. Do not place a semicolon after a call to this macro. +* @see IA_CSS_TRACE_DYNAMIC_DEFINE +*/ +#define IA_CSS_TRACE_DYNAMIC_DECLARE(module) \ + IA_CSS_TRACE_DYNAMIC_DECLARE_IMPL(module) +/** +* Declares the configuration function for the dynamic api seperatly, if one +* wants to use it. +*/ +#define IA_CSS_TRACE_DYNAMIC_DECLARE_CONFIG_FUNC(module) \ + IA_CSS_TRACE_DYNAMIC_DECLARE_CONFIG_FUNC_IMPL(module) + +/** +* Defines dynamic tracing functions and variables for module \p module. +* For each module, place an instance of this macro in one, and only one, +* of your SOURCE files so as to allow the linker resolve the related symbols. +* An invocation of this macro does not enable any of the available tracing +* levels. Do not place a semicolon after a call to this macro. +* @see IA_CSS_TRACE_DYNAMIC_DECLARE +*/ +#define IA_CSS_TRACE_DYNAMIC_DEFINE(module) \ + IA_CSS_TRACE_DYNAMIC_DEFINE_IMPL(module) +/** +* Defines the configuration function for the dynamic api seperatly, if one +* wants to use it. +*/ +#define IA_CSS_TRACE_DYNAMIC_DEFINE_CONFIG_FUNC(module) \ + IA_CSS_TRACE_DYNAMIC_DEFINE_CONFIG_FUNC_IMPL(module) + +/** + * Logs a message with zero arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @param module The targeted module. + * @param severity The severity level of the trace message. In decreasing order: + * {ASSERT, ERROR, WARNING, INFO, DEBUG, VERBOSE}. + * @param format The message to be traced. + */ +#define IA_CSS_TRACE_DYNAMIC_0(module, severity, format) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 0, severity, format) + +/** + * Logs a message with one argument if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_1(module, severity, format, a1) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 1, severity, format, a1) + +/** + * Logs a message with two arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_2(module, severity, format, a1, a2) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 2, severity, format, a1, a2) + +/** + * Logs a message with three arguments if the targeted severity level + * is enabled both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_3(module, severity, format, a1, a2, a3) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 3, severity, format, a1, a2, a3) + +/** + * Logs a message with four arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_4(module, severity, format, a1, a2, a3, a4) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 4, severity, format, a1, a2, a3, a4) + +/** + * Logs a message with five arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_5(module, severity, format, a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 5, severity, format, \ + a1, a2, a3, a4, a5) + +/** + * Logs a message with six arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_6(module, severity, format, \ + a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 6, severity, format, \ + a1, a2, a3, a4, a5, a6) + +/** + * Logs a message with seven arguments if the targeted severity level + * is enabled both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_7(module, severity, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 7, severity, format, \ + a1, a2, a3, a4, a5, a6, a7) + +/* +** Implementation +*/ + +/* CAT */ +#define IA_CSS_TRACE_CAT_IMPL(a, b) a ## b +#define IA_CSS_TRACE_CAT(a, b) IA_CSS_TRACE_CAT_IMPL(a, b) + +/* Bridge */ +#if defined(__HIVECC) || defined(__GNUC__) +#define IA_CSS_TRACE_IMPL(module, argument_count, severity, arguments ...) \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_, \ + argument_count \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_LEVEL_ \ + ), \ + severity \ + ) \ + ( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_SEVERITY_, \ + severity \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + #module, \ + ## arguments \ + ) \ + ) + +/* Bridge */ +#define IA_CSS_TRACE_DYNAMIC_IMPL(module, argument_count, severity, \ + arguments ...) \ + do { \ + if (IA_CSS_TRACE_CAT(IA_CSS_TRACE_CAT(module, _trace_level_), \ + severity)) { \ + IA_CSS_TRACE_IMPL(module, argument_count, severity, \ + ## arguments); \ + } \ + } while (0) +#elif defined(_MSC_VER) +#define IA_CSS_TRACE_IMPL(module, argument_count, severity, ...) \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_, \ + argument_count \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_LEVEL_ \ + ), \ + severity \ + ) \ + ( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_SEVERITY_, \ + severity \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + #module, \ + __VA_ARGS__ \ + ) \ + ) + +/* Bridge */ +#define IA_CSS_TRACE_DYNAMIC_IMPL(module, argument_count, severity, ...) \ + do { \ + if (IA_CSS_TRACE_CAT(IA_CSS_TRACE_CAT(module, _trace_level_), \ + severity)) { \ + IA_CSS_TRACE_IMPL(module, argument_count, severity, \ + __VA_ARGS__); \ + } \ + } while (0) +#endif + +/* +** Native Backend +*/ + +#if defined(__HIVECC) + #define IA_CSS_TRACE_PLATFORM_CELL +#elif defined(__GNUC__) + #define IA_CSS_TRACE_PLATFORM_HOST + + #define IA_CSS_TRACE_NATIVE(severity, module, format, arguments ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_NATIVE(severity, module, \ + format), ## arguments); \ + } while (0) + /* TODO: In case Host Side tracing is needed to be mapped to the + * Tunit, the following "IA_CSS_TRACE_TRACE" needs to be modified from + * PRINT to vied_nci_tunit_print function calls + */ + #define IA_CSS_TRACE_TRACE(severity, module, format, arguments ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_TRACE(severity, module, \ + format), ## arguments); \ + } while (0) + +#elif defined(_MSC_VER) + #define IA_CSS_TRACE_PLATFORM_HOST + + #define IA_CSS_TRACE_NATIVE(severity, module, format, ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_NATIVE(severity, \ + module, format), __VA_ARGS__); \ + } while (0) + /* TODO: In case Host Side tracing is needed to be mapped to the + * Tunit, the following "IA_CSS_TRACE_TRACE" needs to be modified from + * PRINT to vied_nci_tunit_print function calls + */ + #define IA_CSS_TRACE_TRACE(severity, module, format, ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_TRACE(severity, \ + module, format), __VA_ARGS__); \ + } while (0) +#else + #error Unsupported platform! +#endif /* Platform */ + +#if defined(IA_CSS_TRACE_PLATFORM_CELL) + #include /* VOLATILE */ + + #ifdef IA_CSS_TRACE_PRINT_FILE_LINE + #define IA_CSS_TRACE_FILE_PRINT_COMMAND \ + do { \ + OP___printstring(__FILE__":") VOLATILE; \ + OP___printdec(__LINE__) VOLATILE; \ + OP___printstring("\n") VOLATILE; \ + } while (0) + #else + #define IA_CSS_TRACE_FILE_PRINT_COMMAND + #endif + + #define IA_CSS_TRACE_MODULE_SEVERITY_PRINT(module, severity) \ + do { \ + IA_CSS_TRACE_FILE_DUMMY_DEFINE; \ + OP___printstring("["module"]:["severity"]:") \ + VOLATILE; \ + } while (0) + + #define IA_CSS_TRACE_MSG_NATIVE(severity, module, format) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + OP___printstring("["module"]:["severity"]: "format) \ + VOLATILE; \ + } while (0) + + #define IA_CSS_TRACE_ARG_NATIVE(module, severity, i, value) \ + do { \ + IA_CSS_TRACE_MODULE_SEVERITY_PRINT(module, severity); \ + OP___dump(i, value) VOLATILE; \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_0(severity, module, format) \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format) + + #define IA_CSS_TRACE_NATIVE_1(severity, module, format, a1) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_2(severity, module, format, a1, a2) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_3(severity, module, format, a1, a2, a3) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_4(severity, module, format, \ + a1, a2, a3, a4) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 5, a5); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 5, a5); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 6, a6); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 5, a5); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 6, a6); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 7, a7); \ + } while (0) + /* + ** Tracing Backend + */ +#if !defined(HRT_CSIM) && !defined(NO_TUNIT) + #include "vied_nci_tunit.h" +#endif + #define IA_CSS_TRACE_AUG_FORMAT_TRACE(format, module) \ + "[" module "]" format " : PID = %x : Timestamp = %d : PC = %x" + + #define IA_CSS_TRACE_TRACE_0(severity, module, format) \ + vied_nci_tunit_print(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity) + + #define IA_CSS_TRACE_TRACE_1(severity, module, format, a1) \ + vied_nci_tunit_print1i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1) + + #define IA_CSS_TRACE_TRACE_2(severity, module, format, a1, a2) \ + vied_nci_tunit_print2i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2) + + #define IA_CSS_TRACE_TRACE_3(severity, module, format, a1, a2, a3) \ + vied_nci_tunit_print3i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3) + + #define IA_CSS_TRACE_TRACE_4(severity, module, format, a1, a2, a3, a4) \ + vied_nci_tunit_print4i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4) + + #define IA_CSS_TRACE_TRACE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + vied_nci_tunit_print5i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4, a5) + + #define IA_CSS_TRACE_TRACE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + vied_nci_tunit_print6i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4, a5, a6) + + #define IA_CSS_TRACE_TRACE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + vied_nci_tunit_print7i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4, a5, a6, a7) + +#elif defined(IA_CSS_TRACE_PLATFORM_HOST) + #include "print_support.h" + + #ifdef IA_CSS_TRACE_PRINT_FILE_LINE + #define IA_CSS_TRACE_FILE_PRINT_COMMAND \ + PRINT("%s:%d:\n", __FILE__, __LINE__) + #else + #define IA_CSS_TRACE_FILE_PRINT_COMMAND + #endif + + #define IA_CSS_TRACE_FORMAT_AUG_NATIVE(severity, module, format) \ + "[" module "]:[" severity "]: " format + + #define IA_CSS_TRACE_NATIVE_0(severity, module, format) \ + IA_CSS_TRACE_NATIVE(severity, module, format) + + #define IA_CSS_TRACE_NATIVE_1(severity, module, format, a1) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1) + + #define IA_CSS_TRACE_NATIVE_2(severity, module, format, a1, a2) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1, a2) + + #define IA_CSS_TRACE_NATIVE_3(severity, module, format, a1, a2, a3) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1, a2, a3) + + #define IA_CSS_TRACE_NATIVE_4(severity, module, format, \ + a1, a2, a3, a4) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1, a2, a3, a4) + + #define IA_CSS_TRACE_NATIVE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_NATIVE(severity, module, format, \ + a1, a2, a3, a4, a5) + + #define IA_CSS_TRACE_NATIVE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_NATIVE(severity, module, format, \ + a1, a2, a3, a4, a5, a6) + + #define IA_CSS_TRACE_NATIVE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_NATIVE(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) + + #define IA_CSS_TRACE_FORMAT_AUG_TRACE(severity, module, format) \ + "["module"]:["severity"]: "format + + #define IA_CSS_TRACE_TRACE_0(severity, module, format) \ + IA_CSS_TRACE_TRACE(severity, module, format) + + #define IA_CSS_TRACE_TRACE_1(severity, module, format, a1) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1) + + #define IA_CSS_TRACE_TRACE_2(severity, module, format, a1, a2) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1, a2) + + #define IA_CSS_TRACE_TRACE_3(severity, module, format, a1, a2, a3) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1, a2, a3) + + #define IA_CSS_TRACE_TRACE_4(severity, module, format, \ + a1, a2, a3, a4) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1, a2, a3, a4) + + #define IA_CSS_TRACE_TRACE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_TRACE(severity, module, format, \ + a1, a2, a3, a4, a5) + + #define IA_CSS_TRACE_TRACE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_TRACE(severity, module, format, \ + a1, a2, a3, a4, a5, a6) + + #define IA_CSS_TRACE_TRACE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_TRACE(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) +#endif + +/* Disabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_1_0(severity, module, format) +#define IA_CSS_TRACE_1_1_0(severity, module, format, arg1) +#define IA_CSS_TRACE_2_1_0(severity, module, format, arg1, arg2) +#define IA_CSS_TRACE_3_1_0(severity, module, format, arg1, arg2, arg3) +#define IA_CSS_TRACE_4_1_0(severity, module, format, arg1, arg2, arg3, arg4) +#define IA_CSS_TRACE_5_1_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5) +#define IA_CSS_TRACE_6_1_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6) +#define IA_CSS_TRACE_7_1_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6, arg7) + +/* Enabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_1_1 IA_CSS_TRACE_NATIVE_0 +#define IA_CSS_TRACE_1_1_1 IA_CSS_TRACE_NATIVE_1 +#define IA_CSS_TRACE_2_1_1 IA_CSS_TRACE_NATIVE_2 +#define IA_CSS_TRACE_3_1_1 IA_CSS_TRACE_NATIVE_3 +#define IA_CSS_TRACE_4_1_1 IA_CSS_TRACE_NATIVE_4 +#define IA_CSS_TRACE_5_1_1 IA_CSS_TRACE_NATIVE_5 +#define IA_CSS_TRACE_6_1_1 IA_CSS_TRACE_NATIVE_6 +#define IA_CSS_TRACE_7_1_1 IA_CSS_TRACE_NATIVE_7 + +/* Enabled */ +/* Legend: IA_CSS_TRACE_SEVERITY_{Severity Level}_{Backend ID} */ +#define IA_CSS_TRACE_SEVERITY_ASSERT_1 "Assert" +#define IA_CSS_TRACE_SEVERITY_ERROR_1 "Error" +#define IA_CSS_TRACE_SEVERITY_WARNING_1 "Warning" +#define IA_CSS_TRACE_SEVERITY_INFO_1 "Info" +#define IA_CSS_TRACE_SEVERITY_DEBUG_1 "Debug" +#define IA_CSS_TRACE_SEVERITY_VERBOSE_1 "Verbose" + +/* Disabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_2_0(severity, module, format) +#define IA_CSS_TRACE_1_2_0(severity, module, format, arg1) +#define IA_CSS_TRACE_2_2_0(severity, module, format, arg1, arg2) +#define IA_CSS_TRACE_3_2_0(severity, module, format, arg1, arg2, arg3) +#define IA_CSS_TRACE_4_2_0(severity, module, format, arg1, arg2, arg3, arg4) +#define IA_CSS_TRACE_5_2_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5) +#define IA_CSS_TRACE_6_2_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6) +#define IA_CSS_TRACE_7_2_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6, arg7) + +/* Enabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_2_1 IA_CSS_TRACE_TRACE_0 +#define IA_CSS_TRACE_1_2_1 IA_CSS_TRACE_TRACE_1 +#define IA_CSS_TRACE_2_2_1 IA_CSS_TRACE_TRACE_2 +#define IA_CSS_TRACE_3_2_1 IA_CSS_TRACE_TRACE_3 +#define IA_CSS_TRACE_4_2_1 IA_CSS_TRACE_TRACE_4 +#define IA_CSS_TRACE_5_2_1 IA_CSS_TRACE_TRACE_5 +#define IA_CSS_TRACE_6_2_1 IA_CSS_TRACE_TRACE_6 +#define IA_CSS_TRACE_7_2_1 IA_CSS_TRACE_TRACE_7 + +/* Enabled */ +/* Legend: IA_CSS_TRACE_SEVERITY_{Severity Level}_{Backend ID} */ +#define IA_CSS_TRACE_SEVERITY_ASSERT_2 VIED_NCI_TUNIT_MSG_SEVERITY_FATAL +#define IA_CSS_TRACE_SEVERITY_ERROR_2 VIED_NCI_TUNIT_MSG_SEVERITY_ERROR +#define IA_CSS_TRACE_SEVERITY_WARNING_2 VIED_NCI_TUNIT_MSG_SEVERITY_WARNING +#define IA_CSS_TRACE_SEVERITY_INFO_2 VIED_NCI_TUNIT_MSG_SEVERITY_NORMAL +#define IA_CSS_TRACE_SEVERITY_DEBUG_2 VIED_NCI_TUNIT_MSG_SEVERITY_USER1 +#define IA_CSS_TRACE_SEVERITY_VERBOSE_2 VIED_NCI_TUNIT_MSG_SEVERITY_USER2 + +/* +** Dynamicism +*/ + +#define IA_CSS_TRACE_DYNAMIC_DECLARE_IMPL(module) \ + do { \ + void IA_CSS_TRACE_CAT(module, _trace_assert_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_assert_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_error_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_error_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_warning_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_warning_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_info_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_info_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_debug_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_debug_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_disable)(void); \ + } while (0) + +#define IA_CSS_TRACE_DYNAMIC_DECLARE_CONFIG_FUNC_IMPL(module) \ + do { \ + IA_CSS_TRACE_FILE_DUMMY_DEFINE; \ + void IA_CSS_TRACE_CAT(module, _trace_configure)\ + (int argc, const char *const *argv); \ + } while (0) + +#include "platform_support.h" +#include "type_support.h" + +#define IA_CSS_TRACE_DYNAMIC_DEFINE_IMPL(module) \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_assert); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_error); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_warning); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_info); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_debug); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_verbose); \ + \ + void IA_CSS_TRACE_CAT(module, _trace_assert_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_assert) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_assert_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_assert) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_error_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_error) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_error_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_error) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_warning_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_warning) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_warning_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_warning) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_info_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_info) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_info_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_info) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_debug_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_debug) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_debug_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_debug) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_verbose) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_verbose) = 0; \ + } + +#define IA_CSS_TRACE_DYNAMIC_DEFINE_CONFIG_FUNC_IMPL(module) \ +void IA_CSS_TRACE_CAT(module, _trace_configure)(const int argc, \ + const char *const *const argv) \ +{ \ + int i = 1; \ + const char *levels = 0; \ + \ + while (i < argc) { \ + if (!strcmp(argv[i], "-" #module "_trace")) { \ + ++i; \ + \ + if (i < argc) { \ + levels = argv[i]; \ + \ + while (*levels) { \ + switch (*levels++) { \ + case 'a': \ + IA_CSS_TRACE_CAT \ + (module, _trace_assert_enable)(); \ + break; \ + \ + case 'e': \ + IA_CSS_TRACE_CAT \ + (module, _trace_error_enable)(); \ + break; \ + \ + case 'w': \ + IA_CSS_TRACE_CAT \ + (module, _trace_warning_enable)(); \ + break; \ + \ + case 'i': \ + IA_CSS_TRACE_CAT \ + (module, _trace_info_enable)(); \ + break; \ + \ + case 'd': \ + IA_CSS_TRACE_CAT \ + (module, _trace_debug_enable)(); \ + break; \ + \ + case 'v': \ + IA_CSS_TRACE_CAT \ + (module, _trace_verbose_enable)(); \ + break; \ + \ + default: \ + } \ + } \ + } \ + } \ + \ + ++i; \ + } \ +} + +#endif /* __IA_CSS_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/trace/trace.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/trace/trace.mk new file mode 100644 index 0000000000000..b232880b882bd --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/trace/trace.mk @@ -0,0 +1,40 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE Trace + +# Dependencies +IA_CSS_TRACE_SUPPORT = $${MODULES_DIR}/support + +# API +IA_CSS_TRACE = $${MODULES_DIR}/trace +IA_CSS_TRACE_INTERFACE = $(IA_CSS_TRACE)/interface + +# +# Host +# + +# Host CPP Flags +IA_CSS_TRACE_HOST_CPPFLAGS += -I$(IA_CSS_TRACE_SUPPORT) +IA_CSS_TRACE_HOST_CPPFLAGS += -I$(IA_CSS_TRACE_INTERFACE) +IA_CSS_TRACE_HOST_CPPFLAGS += -I$(IA_CSS_TRACE)/trace_modules + +# +# Firmware +# + +# Firmware CPP Flags +IA_CSS_TRACE_FW_CPPFLAGS += -I$(IA_CSS_TRACE_SUPPORT) +IA_CSS_TRACE_FW_CPPFLAGS += -I$(IA_CSS_TRACE_INTERFACE) +IA_CSS_TRACE_FW_CPPFLAGS += -I$(IA_CSS_TRACE)/trace_modules diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/shared_memory_access.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/shared_memory_access.h new file mode 100755 index 0000000000000..1e81bad9f4eec --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/shared_memory_access.h @@ -0,0 +1,139 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _SHARED_MEMORY_ACCESS_H +#define _SHARED_MEMORY_ACCESS_H + +#include +#include +#include + +typedef enum { + sm_esuccess, + sm_enomem, + sm_ezeroalloc, + sm_ebadvaddr, + sm_einternalerror, + sm_ecorruption, + sm_enocontiguousmem, + sm_enolocmem, + sm_emultiplefree, +} shared_memory_error; + +/** + * \brief Virtual address of (DDR) shared memory space as seen from the VIED subsystem + */ +typedef uint32_t vied_virtual_address_t; + +/** + * \brief Virtual address of (DDR) shared memory space as seen from the host + */ +typedef unsigned long long host_virtual_address_t; + +/** + * \brief List of physical addresses of (DDR) shared memory space. This is used to represent a list of physical pages. + */ +typedef struct shared_memory_physical_page_list_s *shared_memory_physical_page_list; +typedef struct shared_memory_physical_page_list_s +{ + shared_memory_physical_page_list next; + vied_physical_address_t address; +}shared_memory_physical_page_list_s; + + +/** + * \brief Initialize the shared memory interface administration on the host. + * \param idm: id of ddr memory + * \param host_ddr_addr: physical address of memory as seen from host + * \param memory_size: size of ddr memory in bytes + * \param ps: size of page in bytes (for instance 4096) + */ +int shared_memory_allocation_initialize(vied_memory_t idm, vied_physical_address_t host_ddr_addr, size_t memory_size, size_t ps); + +/** + * \brief De-initialize the shared memory interface administration on the host. + * + */ +void shared_memory_allocation_uninitialize(vied_memory_t idm); + +/** + * \brief Allocate (DDR) shared memory space and return a host virtual address. Returns NULL when insufficient memory available + */ +host_virtual_address_t shared_memory_alloc(vied_memory_t idm, size_t bytes); + +/** + * \brief Free (DDR) shared memory space. +*/ +void shared_memory_free(vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Translate a virtual host.address to a physical address. +*/ +vied_physical_address_t shared_memory_virtual_host_to_physical_address (vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Return the allocated physical pages for a virtual host.address. +*/ +shared_memory_physical_page_list shared_memory_virtual_host_to_physical_pages (vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Destroy a shared_memory_physical_page_list. +*/ +void shared_memory_physical_pages_list_destroy (shared_memory_physical_page_list ppl); + +/** + * \brief Store a byte into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store_8 (vied_memory_t idm, host_virtual_address_t addr, uint8_t data); + +/** + * \brief Store a 16-bit word into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store_16(vied_memory_t idm, host_virtual_address_t addr, uint16_t data); + +/** + * \brief Store a 32-bit word into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store_32(vied_memory_t idm, host_virtual_address_t addr, uint32_t data); + +/** + * \brief Store a number of bytes into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store(vied_memory_t idm, host_virtual_address_t addr, const void *data, size_t bytes); + +/** + * \brief Set a number of bytes of (DDR) shared memory space to 0 using a host virtual address + */ +void shared_memory_zero(vied_memory_t idm, host_virtual_address_t addr, size_t bytes); + +/** + * \brief Load a byte from (DDR) shared memory space using a host virtual address + */ +uint8_t shared_memory_load_8 (vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Load a 16-bit word from (DDR) shared memory space using a host virtual address + */ +uint16_t shared_memory_load_16(vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Load a 32-bit word from (DDR) shared memory space using a host virtual address + */ +uint32_t shared_memory_load_32(vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Load a number of bytes from (DDR) shared memory space using a host virtual address + */ +void shared_memory_load(vied_memory_t idm, host_virtual_address_t addr, void *data, size_t bytes); + +#endif /* _SHARED_MEMORY_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/shared_memory_map.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/shared_memory_map.h new file mode 100755 index 0000000000000..1bbedcf9e7fd8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/shared_memory_map.h @@ -0,0 +1,53 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _SHARED_MEMORY_MAP_H +#define _SHARED_MEMORY_MAP_H + +#include +#include +#include + +typedef void (*shared_memory_invalidate_mmu_tlb)(void); +typedef void (*shared_memory_set_page_table_base_address)(vied_physical_address_t); + +typedef void (*shared_memory_invalidate_mmu_tlb_ssid)(vied_subsystem_t id); +typedef void (*shared_memory_set_page_table_base_address_ssid)(vied_subsystem_t id, vied_physical_address_t); + +/** + * \brief Initialize the CSS virtual address system and MMU. The subsystem id will NOT be taken into account. +*/ +int shared_memory_map_initialize(vied_subsystem_t id, vied_memory_t idm, size_t mmu_ps, size_t mmu_pnrs, vied_physical_address_t ddr_addr, shared_memory_invalidate_mmu_tlb inv_tlb, shared_memory_set_page_table_base_address sbt); + +/** + * \brief Initialize the CSS virtual address system and MMU. The subsystem id will be taken into account. +*/ +int shared_memory_map_initialize_ssid(vied_subsystem_t id, vied_memory_t idm, size_t mmu_ps, size_t mmu_pnrs, vied_physical_address_t ddr_addr, shared_memory_invalidate_mmu_tlb_ssid inv_tlb, shared_memory_set_page_table_base_address_ssid sbt); + +/** + * \brief De-initialize the CSS virtual address system and MMU. +*/ +void shared_memory_map_uninitialize(vied_subsystem_t id, vied_memory_t idm); + +/** + * \brief Convert a host virtual address to a CSS virtual address and update the MMU. +*/ +vied_virtual_address_t shared_memory_map(vied_subsystem_t id, vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Free a CSS virtual address and update the MMU. +*/ +void shared_memory_unmap(vied_subsystem_t id, vied_memory_t idm, vied_virtual_address_t addr); + + +#endif /* _SHARED_MEMORY_MAP_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_config.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_config.h new file mode 100755 index 0000000000000..912f016ead241 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_config.h @@ -0,0 +1,33 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_CONFIG_H +#define _HRT_VIED_CONFIG_H + +/* Defines from the compiler: + * HRT_HOST - this is code running on the host + * HRT_CELL - this is code running on a cell + */ +#ifdef HRT_HOST +# define CFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL 1 +# undef CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL + +#elif defined (HRT_CELL) +# undef CFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL +# define CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL 1 + +#else /* !HRT_CELL */ +/* Allow neither HRT_HOST nor HRT_CELL for testing purposes */ +#endif /* !HRT_CELL */ + +#endif /* _HRT_VIED_CONFIG_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_memory_access_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_memory_access_types.h new file mode 100755 index 0000000000000..0b44492789e37 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_memory_access_types.h @@ -0,0 +1,36 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_MEMORY_ACCESS_TYPES_H +#define _HRT_VIED_MEMORY_ACCESS_TYPES_H + +/** Types for the VIED memory access interface */ + +#include "vied_types.h" + +/** + * \brief An identifier for a system memory. + * + * This identifier must be a compile-time constant. It is used in + * access to system memory. + */ +typedef unsigned int vied_memory_t; + +#ifndef __HIVECC +/** + * \brief The type for a physical address + */ +typedef unsigned long long vied_physical_address_t; +#endif + +#endif /* _HRT_VIED_MEMORY_ACCESS_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_subsystem_access.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_subsystem_access.h new file mode 100755 index 0000000000000..674f5fb5b0f99 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_subsystem_access.h @@ -0,0 +1,70 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_SUBSYSTEM_ACCESS_H +#define _HRT_VIED_SUBSYSTEM_ACCESS_H + +#include +#include "vied_config.h" +#include "vied_subsystem_access_types.h" + +#if !defined(CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL) && \ + !defined(CFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL) +#error Implementation selection macro for vied subsystem access not defined +#endif + +#if defined(CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL) +#ifndef __HIVECC +#error "Inline implementation of subsystem access not supported for host" +#endif +#define _VIED_SUBSYSTEM_ACCESS_INLINE static __inline +#include "vied_subsystem_access_impl.h" +#else +#define _VIED_SUBSYSTEM_ACCESS_INLINE +#endif + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store_8 (vied_subsystem_t dev, + vied_subsystem_address_t addr, uint8_t data); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store_16(vied_subsystem_t dev, + vied_subsystem_address_t addr, uint16_t data); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store_32(vied_subsystem_t dev, + vied_subsystem_address_t addr, uint32_t data); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store(vied_subsystem_t dev, + vied_subsystem_address_t addr, + const void *data, unsigned int size); + +_VIED_SUBSYSTEM_ACCESS_INLINE +uint8_t vied_subsystem_load_8 (vied_subsystem_t dev, + vied_subsystem_address_t addr); + +_VIED_SUBSYSTEM_ACCESS_INLINE +uint16_t vied_subsystem_load_16(vied_subsystem_t dev, + vied_subsystem_address_t addr); + +_VIED_SUBSYSTEM_ACCESS_INLINE +uint32_t vied_subsystem_load_32(vied_subsystem_t dev, + vied_subsystem_address_t addr); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_load(vied_subsystem_t dev, + vied_subsystem_address_t addr, + void *data, unsigned int size); + +#endif /* _HRT_VIED_SUBSYSTEM_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_subsystem_access_initialization.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_subsystem_access_initialization.h new file mode 100755 index 0000000000000..81f4d08d5ae0e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_subsystem_access_initialization.h @@ -0,0 +1,44 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_SUBSYSTEM_ACCESS_INITIALIZE_H +#define _HRT_VIED_SUBSYSTEM_ACCESS_INITIALIZE_H + +#include "vied_subsystem_access_types.h" + +/** @brief Initialises the access of a subsystem. + * @param[in] system The subsystem for which the access has to be initialised. + * + * vied_subsystem_access_initialize initilalises the access a subsystem. + * It sets the base address of the subsystem. This base address is extracted from the hsd file. + * + */ +void +vied_subsystem_access_initialize(vied_subsystem_t system); + + +/** @brief Initialises the access of multiple subsystems. + * @param[in] nr _subsystems The number of subsystems for which the access has to be initialised. + * @param[in] dev_base_addresses A pointer to an array of base addresses of subsystems. + * The size of this array must be "nr_subsystems". + * This array must be available during the accesses of the subsystem. + * + * vied_subsystems_access_initialize initilalises the access to multiple subsystems. + * It sets the base addresses of the subsystems that are provided by the array dev_base_addresses. + * + */ +void +vied_subsystems_access_initialize( unsigned int nr_subsystems + , const vied_subsystem_base_address_t *base_addresses); + +#endif /* _HRT_VIED_SUBSYSTEM_ACCESS_INITIALIZE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_subsystem_access_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_subsystem_access_types.h new file mode 100755 index 0000000000000..75fef6c4ddba2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_subsystem_access_types.h @@ -0,0 +1,34 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_SUBSYSTEM_ACCESS_TYPES_H +#define _HRT_VIED_SUBSYSTEM_ACCESS_TYPES_H + +/** Types for the VIED subsystem access interface */ +#include + +/** \brief An identifier for a VIED subsystem. + * + * This identifier must be a compile-time constant. It is used in + * access to a VIED subsystem. + */ +typedef unsigned int vied_subsystem_t; + + +/** \brief An address within a VIED subsystem */ +typedef uint32_t vied_subsystem_address_t; + +/** \brief A base address of a VIED subsystem seen from the host */ +typedef unsigned long long vied_subsystem_base_address_t; + +#endif /* _HRT_VIED_SUBSYSTEM_ACCESS_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_types.h new file mode 100755 index 0000000000000..0acfdbb00cfa3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_types.h @@ -0,0 +1,45 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_TYPES_H +#define _HRT_VIED_TYPES_H + +/** Types shared by VIED interfaces */ + +#include + +/** \brief An address within a VIED subsystem + * + * This will eventually replace teh vied_memory_address_t and vied_subsystem_address_t + */ +typedef uint32_t vied_address_t; + +/** \brief Memory address type + * + * A memory address is an offset within a memory. + */ +typedef uint32_t vied_memory_address_t; + +/** \brief Master port id */ +typedef int vied_master_port_id_t; + +/** + * \brief Require the existence of a certain type + * + * This macro can be used in interface header files to ensure that + * an implementation define type with a specified name exists. + */ +#define _VIED_REQUIRE_TYPE(T) enum { _VIED_SIZEOF_##T = sizeof(T) } + + +#endif /* _HRT_VIED_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_nci_acb/interface/vied_nci_acb_route_type.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_nci_acb/interface/vied_nci_acb_route_type.h new file mode 100644 index 0000000000000..b09d9f4d5d427 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_nci_acb/interface/vied_nci_acb_route_type.h @@ -0,0 +1,39 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef VIED_NCI_ACB_ROUTE_TYPE_H_ +#define VIED_NCI_ACB_ROUTE_TYPE_H_ + +#include "type_support.h" + +typedef enum { + NCI_ACB_PORT_ISP = 0, + NCI_ACB_PORT_ACC = 1, + NCI_ACB_PORT_INVALID = 0xFF +} nci_acb_port_t; + +typedef struct { + /* 0 = ISP, 1 = Acc */ + nci_acb_port_t in_select; + /* 0 = ISP, 1 = Acc */ + nci_acb_port_t out_select; + /* When set, Ack will be sent only when Eof arrives */ + uint32_t ignore_line_num; + /* Fork adapter to enable streaming to both output + * (next acb out and isp out) + */ + uint32_t fork_acb_output; +} nci_acb_route_t; + +#endif /* VIED_NCI_ACB_ROUTE_TYPE_H_ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_param_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_param_storage_class.h new file mode 100644 index 0000000000000..1ea7e729078c2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_param_storage_class.h @@ -0,0 +1,28 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PARAM_STORAGE_CLASS_H +#define __IA_CSS_PARAM_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __INLINE_PARAMETERS__ +#define IA_CSS_PARAMETERS_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_PARAMETERS_STORAGE_CLASS_C +#else +#define IA_CSS_PARAMETERS_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_PARAMETERS_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_PARAM_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal.h new file mode 100644 index 0000000000000..4cc71be3fc389 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal.h @@ -0,0 +1,188 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_H +#define __IA_CSS_TERMINAL_H + +#include "type_support.h" +#include "ia_css_terminal_types.h" +#include "ia_css_param_storage_class.h" + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_param_in_terminal_get_descriptor_size( + const unsigned int nof_sections +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_param_section_desc_t * +ia_css_param_in_terminal_get_param_section_desc( + const ia_css_param_terminal_t *param_terminal, + const unsigned int section_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_param_out_terminal_get_descriptor_size( + const unsigned int nof_sections, + const unsigned int nof_fragments +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_param_section_desc_t * +ia_css_param_out_terminal_get_param_section_desc( + const ia_css_param_terminal_t *param_terminal, + const unsigned int section_index, + const unsigned int nof_sections, + const unsigned int fragment_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_param_terminal_create( + ia_css_param_terminal_t *param_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const uint16_t is_input_terminal +); + + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_spatial_param_terminal_get_descriptor_size( + const unsigned int nof_frame_param_sections, + const unsigned int nof_fragments +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_fragment_grid_desc_t * +ia_css_spatial_param_terminal_get_fragment_grid_desc( + const ia_css_spatial_param_terminal_t *spatial_param_terminal, + const unsigned int fragment_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_frame_grid_param_section_desc_t * +ia_css_spatial_param_terminal_get_frame_grid_param_section_desc( + const ia_css_spatial_param_terminal_t *spatial_param_terminal, + const unsigned int section_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_spatial_param_terminal_create( + ia_css_spatial_param_terminal_t *spatial_param_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const uint16_t is_input_terminal, + const unsigned int nof_fragments, + const uint32_t kernel_id +); + + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_sliced_param_terminal_get_descriptor_size( + const unsigned int nof_slice_param_sections, + const unsigned int nof_slices[], + const unsigned int nof_fragments +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_fragment_slice_desc_t * +ia_css_sliced_param_terminal_get_fragment_slice_desc( + const ia_css_sliced_param_terminal_t *sliced_param_terminal, + const unsigned int fragment_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_slice_param_section_desc_t * +ia_css_sliced_param_terminal_get_slice_param_section_desc( + const ia_css_sliced_param_terminal_t *sliced_param_terminal, + const unsigned int fragment_index, + const unsigned int slice_index, + const unsigned int section_index, + const unsigned int nof_slice_param_sections +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_sliced_param_terminal_create( + ia_css_sliced_param_terminal_t *sliced_param_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const uint16_t is_input_terminal, + const unsigned int nof_slice_param_sections, + const unsigned int nof_slices[], + const unsigned int nof_fragments, + const uint32_t kernel_id +); + + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_program_terminal_get_descriptor_size( + const unsigned int nof_fragments, + const unsigned int nof_fragment_param_sections, + const unsigned int nof_kernel_fragment_sequencer_infos, + const unsigned int nof_command_objs +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_fragment_param_section_desc_t * +ia_css_program_terminal_get_frgmnt_prm_sct_desc( + const ia_css_program_terminal_t *program_terminal, + const unsigned int fragment_index, + const unsigned int section_index, + const unsigned int nof_fragment_param_sections +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_kernel_fragment_sequencer_info_desc_t * +ia_css_program_terminal_get_kernel_frgmnt_seq_info_desc( + const ia_css_program_terminal_t *program_terminal, + const unsigned int fragment_index, + const unsigned int info_index, + const unsigned int nof_kernel_fragment_sequencer_infos +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_program_terminal_create( + ia_css_program_terminal_t *program_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const unsigned int nof_fragments, + const unsigned int nof_kernel_fragment_sequencer_infos, + const unsigned int nof_command_objs +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_program_terminal_get_command_base_offset( + const ia_css_program_terminal_t *program_terminal, + const unsigned int nof_fragments, + const unsigned int nof_kernel_fragment_sequencer_infos, + const unsigned int commands_slots_used, + uint16_t *command_desc_offset +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +uint16_t *ia_css_program_terminal_get_line_count( + const ia_css_kernel_fragment_sequencer_command_desc_t + *kernel_fragment_sequencer_command_desc_base, + const unsigned int set_count +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_spatial_param_terminal_get_descriptor_size( + const unsigned int nof_frame_param_sections, + const unsigned int nof_fragments +); + +#ifdef __INLINE_PARAMETERS__ +#include "ia_css_terminal_impl.h" +#endif /* __INLINE_PARAMETERS__ */ + +#endif /* __IA_CSS_TERMINAL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_manifest.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_manifest.h new file mode 100644 index 0000000000000..ca0a436082cf6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_manifest.h @@ -0,0 +1,109 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_MANIFEST_H +#define __IA_CSS_TERMINAL_MANIFEST_H + +#include "type_support.h" +#include "ia_css_param_storage_class.h" +#include "ia_css_terminal_manifest_types.h" + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_param_terminal_manifest_get_size( + const unsigned int nof_sections +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_param_terminal_manifest_init( + ia_css_param_terminal_manifest_t *param_terminal, + const uint16_t section_count +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_param_manifest_section_desc_t * +ia_css_param_terminal_manifest_get_prm_sct_desc( + const ia_css_param_terminal_manifest_t *param_terminal_manifest, + const unsigned int section_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_spatial_param_terminal_manifest_get_size( + const unsigned int nof_frame_param_sections +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_spatial_param_terminal_manifest_init( + ia_css_spatial_param_terminal_manifest_t *spatial_param_terminal, + const uint16_t section_count +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_frame_grid_param_manifest_section_desc_t * +ia_css_spatial_param_terminal_manifest_get_frm_grid_prm_sct_desc( + const ia_css_spatial_param_terminal_manifest_t * + spatial_param_terminal_manifest, + const unsigned int section_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_sliced_param_terminal_manifest_get_size( + const unsigned int nof_slice_param_sections +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_sliced_param_terminal_manifest_init( + ia_css_sliced_param_terminal_manifest_t *sliced_param_terminal, + const uint16_t section_count +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_sliced_param_manifest_section_desc_t * +ia_css_sliced_param_terminal_manifest_get_sliced_prm_sct_desc( + const ia_css_sliced_param_terminal_manifest_t * + sliced_param_terminal_manifest, + const unsigned int section_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_program_terminal_manifest_get_size( + const unsigned int nof_fragment_param_sections, + const unsigned int nof_kernel_fragment_sequencer_infos +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_program_terminal_manifest_init( + ia_css_program_terminal_manifest_t *program_terminal, + const uint16_t fragment_param_section_count, + const uint16_t kernel_fragment_seq_info_section_count +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_fragment_param_manifest_section_desc_t * +ia_css_program_terminal_manifest_get_frgmnt_prm_sct_desc( + const ia_css_program_terminal_manifest_t *program_terminal_manifest, + const unsigned int section_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_kernel_fragment_sequencer_info_manifest_desc_t * +ia_css_program_terminal_manifest_get_kernel_frgmnt_seq_info_desc( + const ia_css_program_terminal_manifest_t *program_terminal_manifest, + const unsigned int info_index +); + +#ifdef __INLINE_PARAMETERS__ +#include "ia_css_terminal_manifest_impl.h" +#endif /* __INLINE_PARAMETERS__ */ + +#endif /* __IA_CSS_TERMINAL_MANIFEST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_manifest_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_manifest_types.h new file mode 100644 index 0000000000000..fe146395a8f4f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_manifest_types.h @@ -0,0 +1,342 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_MANIFEST_TYPES_H +#define __IA_CSS_TERMINAL_MANIFEST_TYPES_H + + +#include "ia_css_terminal_defs.h" +#include "type_support.h" +#include "ia_css_base_types.h" +#include "ia_css_terminal_manifest_base_types.h" + +#define N_PADDING_UINT8_IN_PARAM_TERMINAL_MANIFEST_SEC_STRUCT 1 +#define SIZE_OF_PARAM_TERMINAL_MANIFEST_SEC_STRUCT_IN_BITS \ + (1 * IA_CSS_UINT32_T_BITS \ + + 3 * IA_CSS_UINT8_T_BITS \ + + N_PADDING_UINT8_IN_PARAM_TERMINAL_MANIFEST_SEC_STRUCT * IA_CSS_UINT8_T_BITS) + +/* =============== Cached Param Terminal Manifest - START ============== */ +struct ia_css_param_manifest_section_desc_s { + /* Maximum size of the related parameter region */ + uint32_t max_mem_size; + /* Indication of the kernel this parameter belongs to */ + uint8_t kernel_id; + /* Memory targeted by this section + * (Register MMIO Interface/DMEM/VMEM/GMEM etc) + */ + uint8_t mem_type_id; + /* Region id within the specified memory */ + uint8_t region_id; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_PARAM_TERMINAL_MANIFEST_SEC_STRUCT]; +}; + +typedef struct ia_css_param_manifest_section_desc_s + ia_css_param_manifest_section_desc_t; + + +#define N_PADDING_UINT8_IN_PARAM_TERMINAL_MAN_STRUCT 4 +#define SIZE_OF_PARAM_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + (SIZE_OF_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + + (2*IA_CSS_UINT16_T_BITS) \ + + (N_PADDING_UINT8_IN_PARAM_TERMINAL_MAN_STRUCT * IA_CSS_UINT8_T_BITS)) + +/* Frame constant parameters terminal manifest */ +struct ia_css_param_terminal_manifest_s { + /* Parameter terminal manifest base */ + ia_css_terminal_manifest_t base; + /* + * Number of cached parameter sections, coming from manifest + * but also shared by the terminal + */ + uint16_t param_manifest_section_desc_count; + /* + * Points to the variable array of + * struct ia_css_param_section_desc_s + */ + uint16_t param_manifest_section_desc_offset; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_PARAM_TERMINAL_MAN_STRUCT]; +}; + +typedef struct ia_css_param_terminal_manifest_s + ia_css_param_terminal_manifest_t; +/* ================= Cached Param Terminal Manifest - End ================ */ + + +/* ================= Spatial Param Terminal Manifest - START ============= */ + +#define SIZE_OF_FRAG_GRID_MAN_STRUCT_IN_BITS \ + ((IA_CSS_N_DATA_DIMENSION*IA_CSS_UINT16_T_BITS) \ + + (IA_CSS_N_DATA_DIMENSION*IA_CSS_UINT16_T_BITS)) + +struct ia_css_fragment_grid_manifest_desc_s { + /* Min resolution width/height of the spatial parameters + * for the fragment measured in compute units + */ + uint16_t min_fragment_grid_dimension[IA_CSS_N_DATA_DIMENSION]; + /* Max resolution width/height of the spatial parameters + * for the fragment measured in compute units + */ + uint16_t max_fragment_grid_dimension[IA_CSS_N_DATA_DIMENSION]; +}; + +typedef struct ia_css_fragment_grid_manifest_desc_s + ia_css_fragment_grid_manifest_desc_t; + +#define N_PADDING_UINT8_IN_FRAME_GRID_PARAM_MAN_SEC_STRUCT 1 +#define SIZE_OF_FRAME_GRID_PARAM_MAN_SEC_STRUCT_IN_BITS \ + (1 * IA_CSS_UINT32_T_BITS \ + + 3 * IA_CSS_UINT8_T_BITS \ + + N_PADDING_UINT8_IN_FRAME_GRID_PARAM_MAN_SEC_STRUCT * IA_CSS_UINT8_T_BITS) + +struct ia_css_frame_grid_param_manifest_section_desc_s { + /* Maximum buffer total size allowed for + * this frame of parameters + */ + uint32_t max_mem_size; + /* Memory space targeted by this section + * (Register MMIO Interface/DMEM/VMEM/GMEM etc) + */ + uint8_t mem_type_id; + /* Region id within the specified memory space */ + uint8_t region_id; + /* size in bytes of each compute unit for + * the specified memory space and region + */ + uint8_t elem_size; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_FRAME_GRID_PARAM_MAN_SEC_STRUCT]; +}; + +typedef struct ia_css_frame_grid_param_manifest_section_desc_s + ia_css_frame_grid_param_manifest_section_desc_t; + +#define SIZE_OF_FRAME_GRID_MAN_STRUCT_IN_BITS \ + ((IA_CSS_N_DATA_DIMENSION*IA_CSS_UINT16_T_BITS) \ + + (IA_CSS_N_DATA_DIMENSION*IA_CSS_UINT16_T_BITS)) + +struct ia_css_frame_grid_manifest_desc_s { + /* Min resolution width/height of the spatial parameters for + * the frame measured in compute units + */ + uint16_t min_frame_grid_dimension[IA_CSS_N_DATA_DIMENSION]; + /* Max resolution width/height of the spatial parameters for + * the frame measured in compute units + */ + uint16_t max_frame_grid_dimension[IA_CSS_N_DATA_DIMENSION]; +}; + +typedef struct ia_css_frame_grid_manifest_desc_s + ia_css_frame_grid_manifest_desc_t; + +#define N_PADDING_UINT8_IN_SPATIAL_PARAM_TERM_MAN_STRUCT 2 +#define SIZE_OF_SPATIAL_PARAM_TERM_MAN_STRUCT_IN_BITS \ + ((SIZE_OF_TERMINAL_MANIFEST_STRUCT_IN_BITS) \ + + (SIZE_OF_FRAME_GRID_MAN_STRUCT_IN_BITS) \ + + (SIZE_OF_FRAG_GRID_MAN_STRUCT_IN_BITS) \ + + (2 * IA_CSS_UINT16_T_BITS) \ + + (2 * IA_CSS_UINT8_T_BITS) \ + + (N_PADDING_UINT8_IN_SPATIAL_PARAM_TERM_MAN_STRUCT * \ + IA_CSS_UINT8_T_BITS)) + +struct ia_css_spatial_param_terminal_manifest_s { + /* Spatial Parameter terminal manifest base */ + ia_css_terminal_manifest_t base; + /* Contains limits for the frame spatial parameters */ + ia_css_frame_grid_manifest_desc_t frame_grid_desc; + /* + * Constains limits for the fragment spatial parameters + * - COMMON AMONG FRAGMENTS + */ + ia_css_fragment_grid_manifest_desc_t common_fragment_grid_desc; + /* + * Number of frame spatial parameter sections, they are set + * in slice-steps through frame processing + */ + uint16_t frame_grid_param_manifest_section_desc_count; + /* + * Points to the variable array of + * ia_css_frame_spatial_param_manifest_section_desc_t + */ + uint16_t frame_grid_param_manifest_section_desc_offset; + /* + * Indication of the kernel this spatial parameter terminal belongs to + * SHOULD MATCH TO INDEX AND BE USED ONLY FOR CHECK + */ + uint8_t kernel_id; + /* + * Groups together compute units in order to achieve alignment + * requirements for transfes and to achieve canonical frame + * representation + */ + uint8_t compute_units_p_elem; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_SPATIAL_PARAM_TERM_MAN_STRUCT]; +}; + +typedef struct ia_css_spatial_param_terminal_manifest_s + ia_css_spatial_param_terminal_manifest_t; + +/* ================= Spatial Param Terminal Manifest - END ================ */ + +/* ================= Sliced Param Terminal Manifest - START =============== */ + +#define N_PADDING_UINT8_IN_SLICED_TERMINAL_MAN_SECTION_STRUCT (2) +#define SIZE_OF_SLICED_PARAM_MAN_SEC_STRUCT_IN_BITS \ + (1 * IA_CSS_UINT32_T_BITS \ + + 2 * IA_CSS_UINT8_T_BITS \ + + N_PADDING_UINT8_IN_SLICED_TERMINAL_MAN_SECTION_STRUCT * IA_CSS_UINT8_T_BITS) + +struct ia_css_sliced_param_manifest_section_desc_s { + /* Maximum size of the related parameter region */ + uint32_t max_mem_size; + /* + * Memory targeted by this section + * (Register MMIO Interface/DMEM/VMEM/GMEM etc) + */ + uint8_t mem_type_id; + /* Region id within the specified memory */ + uint8_t region_id; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_SLICED_TERMINAL_MAN_SECTION_STRUCT]; +}; + +typedef struct ia_css_sliced_param_manifest_section_desc_s + ia_css_sliced_param_manifest_section_desc_t; + +#define N_PADDING_UINT8_IN_SLICED_TERMINAL_MANIFEST_STRUCT 3 +#define SIZE_OF_SLICED_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + (SIZE_OF_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + + 2 * IA_CSS_UINT16_T_BITS \ + + 1 * IA_CSS_UINT8_T_BITS \ + + N_PADDING_UINT8_IN_SLICED_TERMINAL_MANIFEST_STRUCT * IA_CSS_UINT8_T_BITS) + +/* Frame constant parameters terminal manifest */ +struct ia_css_sliced_param_terminal_manifest_s { + /* Spatial Parameter terminal base */ + ia_css_terminal_manifest_t base; + /* + * Number of the array elements + * sliced_param_section_offset points to + */ + uint16_t sliced_param_section_count; + /* + * Points to array of ia_css_sliced_param_manifest_section_desc_s + * which constain info for the slicing of the parameters + */ + uint16_t sliced_param_section_offset; + /* Kernel identifier */ + uint8_t kernel_id; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_SLICED_TERMINAL_MANIFEST_STRUCT]; +}; + +typedef struct ia_css_sliced_param_terminal_manifest_s + ia_css_sliced_param_terminal_manifest_t; + +/* ================= Slice Param Terminal Manifest - End =============== */ + +/* ================= Program Terminal Manifest - START ================= */ + +#define N_PADDING_UINT8_IN_FRAG_PARAM_MAN_SEC_STRUCT 1 +#define SIZE_OF_FRAG_PARAM_MAN_SEC_STRUCT_IN_BITS \ + (1 * IA_CSS_UINT32_T_BITS \ + + 3 * IA_CSS_UINT8_T_BITS \ + + N_PADDING_UINT8_IN_FRAG_PARAM_MAN_SEC_STRUCT * IA_CSS_UINT8_T_BITS) + +/* Fragment constant parameters manifest */ +struct ia_css_fragment_param_manifest_section_desc_s { + /* Maximum size of the related parameter region */ + uint32_t max_mem_size; + /* Indication of the kernel this parameter belongs to */ + uint8_t kernel_id; + /* Memory targeted by this section + * (Register MMIO Interface/DMEM/VMEM/GMEM etc) + */ + uint8_t mem_type_id; + /* Region id within the specified memory space */ + uint8_t region_id; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_FRAG_PARAM_MAN_SEC_STRUCT]; +}; + +typedef struct ia_css_fragment_param_manifest_section_desc_s + ia_css_fragment_param_manifest_section_desc_t; + +#define SIZE_OF_KERNEL_FRAG_SEQ_INFO_MAN_STRUCT_IN_BITS \ + (10*IA_CSS_N_DATA_DIMENSION*IA_CSS_UINT16_T_BITS) + +struct ia_css_kernel_fragment_sequencer_info_manifest_desc_s { + /* Slice dimensions */ + uint16_t min_fragment_grid_slice_dimension[IA_CSS_N_DATA_DIMENSION]; + /* Slice dimensions */ + uint16_t max_fragment_grid_slice_dimension[IA_CSS_N_DATA_DIMENSION]; + /* Nof slices */ + uint16_t min_fragment_grid_slice_count[IA_CSS_N_DATA_DIMENSION]; + /* Nof slices */ + uint16_t max_fragment_grid_slice_count[IA_CSS_N_DATA_DIMENSION]; + /* Grid point decimation factor */ + uint16_t + min_fragment_grid_point_decimation_factor[IA_CSS_N_DATA_DIMENSION]; + /* Grid point decimation factor */ + uint16_t + max_fragment_grid_point_decimation_factor[IA_CSS_N_DATA_DIMENSION]; + /* Relative position of grid origin to pixel origin */ + int16_t + min_fragment_grid_overlay_pixel_topleft_index[IA_CSS_N_DATA_DIMENSION]; + /* Relative position of grid origin to pixel origin */ + int16_t + max_fragment_grid_overlay_pixel_topleft_index[IA_CSS_N_DATA_DIMENSION]; + /* Dimension of grid */ + int16_t + min_fragment_grid_overlay_pixel_dimension[IA_CSS_N_DATA_DIMENSION]; + /* Dimension of grid */ + int16_t + max_fragment_grid_overlay_pixel_dimension[IA_CSS_N_DATA_DIMENSION]; +}; + +typedef struct ia_css_kernel_fragment_sequencer_info_manifest_desc_s + ia_css_kernel_fragment_sequencer_info_manifest_desc_t; + +#define N_PADDING_UINT8_IN_PROGRAM_TERM_MAN_STRUCT 2 +#define SIZE_OF_PROG_TERM_MAN_STRUCT_IN_BITS \ + ((SIZE_OF_TERMINAL_MANIFEST_STRUCT_IN_BITS) \ + + (IA_CSS_UINT32_T_BITS) \ + + (5*IA_CSS_UINT16_T_BITS) \ + + (N_PADDING_UINT8_IN_PROGRAM_TERM_MAN_STRUCT * IA_CSS_UINT8_T_BITS)) + +struct ia_css_program_terminal_manifest_s { + ia_css_terminal_manifest_t base; + /* Connection manager passes seq info as single blob at the moment */ + uint32_t sequencer_info_kernel_id; + /* Maximum number of command secriptors supported + * by the program group + */ + uint16_t max_kernel_fragment_sequencer_command_desc; + uint16_t fragment_param_manifest_section_desc_count; + uint16_t fragment_param_manifest_section_desc_offset; + uint16_t kernel_fragment_sequencer_info_manifest_info_count; + uint16_t kernel_fragment_sequencer_info_manifest_info_offset; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_PROGRAM_TERM_MAN_STRUCT]; +}; + +typedef struct ia_css_program_terminal_manifest_s + ia_css_program_terminal_manifest_t; + +/* ==================== Program Terminal Manifest - END ==================== */ + +#endif /* __IA_CSS_TERMINAL_MANIFEST_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_types.h new file mode 100644 index 0000000000000..c5c89fb7ec917 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_types.h @@ -0,0 +1,351 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_TYPES_H +#define __IA_CSS_TERMINAL_TYPES_H + +#include "type_support.h" +#include "ia_css_base_types.h" +#include "ia_css_terminal_base_types.h" + + +typedef struct ia_css_program_control_init_load_section_desc_s + ia_css_program_control_init_load_section_desc_t; +typedef struct ia_css_program_control_init_connect_section_desc_s + ia_css_program_control_init_connect_section_desc_t; +typedef struct ia_css_program_control_init_program_desc_s + ia_css_program_control_init_program_desc_t; +typedef struct ia_css_program_control_init_terminal_s + ia_css_program_control_init_terminal_t; + +typedef struct ia_css_program_terminal_s ia_css_program_terminal_t; +typedef struct ia_css_fragment_param_section_desc_s + ia_css_fragment_param_section_desc_t; +typedef struct ia_css_kernel_fragment_sequencer_info_desc_s + ia_css_kernel_fragment_sequencer_info_desc_t; +typedef struct ia_css_kernel_fragment_sequencer_command_desc_s + ia_css_kernel_fragment_sequencer_command_desc_t; + +typedef struct ia_css_sliced_param_terminal_s ia_css_sliced_param_terminal_t; +typedef struct ia_css_fragment_slice_desc_s ia_css_fragment_slice_desc_t; +typedef struct ia_css_slice_param_section_desc_s + ia_css_slice_param_section_desc_t; + +typedef struct ia_css_spatial_param_terminal_s ia_css_spatial_param_terminal_t; +typedef struct ia_css_frame_grid_desc_s ia_css_frame_grid_desc_t; +typedef struct ia_css_frame_grid_param_section_desc_s + ia_css_frame_grid_param_section_desc_t; +typedef struct ia_css_fragment_grid_desc_s ia_css_fragment_grid_desc_t; + +typedef struct ia_css_param_terminal_s ia_css_param_terminal_t; +typedef struct ia_css_param_section_desc_s ia_css_param_section_desc_t; + +typedef struct ia_css_param_payload_s ia_css_param_payload_t; +typedef struct ia_css_terminal_s ia_css_terminal_t; + +/* =================== Generic Parameter Payload - START =================== */ +#define N_UINT64_IN_PARAM_PAYLOAD_STRUCT 1 +#define N_UINT32_IN_PARAM_PAYLOAD_STRUCT 1 + +#define IA_CSS_PARAM_PAYLOAD_STRUCT_BITS \ + (N_UINT64_IN_PARAM_PAYLOAD_STRUCT * IA_CSS_UINT64_T_BITS \ + + VIED_VADDRESS_BITS \ + + N_UINT32_IN_PARAM_PAYLOAD_STRUCT * IA_CSS_UINT32_T_BITS) + +struct ia_css_param_payload_s { + /* + * Temporary variable holding the host address of the parameter buffer + * as PSYS is handling the parameters on the host side for the moment + */ + uint64_t host_buffer; + /* + * Base virtual addresses to parameters in subsystem virtual + * memory space + * NOTE: Used in legacy pg flow + */ + vied_vaddress_t buffer; + /* + * Offset to buffer address within external buffer set structure + * NOTE: Used in ppg flow + */ + uint32_t terminal_index; +}; +/* =================== Generic Parameter Payload - End ==================== */ + + +/* ==================== Cached Param Terminal - START ==================== */ +#define N_UINT32_IN_PARAM_SEC_STRUCT 2 + +#define SIZE_OF_PARAM_SEC_STRUCT_BITS \ + (N_UINT32_IN_PARAM_SEC_STRUCT * IA_CSS_UINT32_T_BITS) + +/* Frame constant parameters section */ +struct ia_css_param_section_desc_s { + /* Offset of the parameter allocation in memory */ + uint32_t mem_offset; + /* Memory allocation size needs of this parameter */ + uint32_t mem_size; +}; + +#define N_UINT16_IN_PARAM_TERMINAL_STRUCT 1 +#define N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT 6 + +#define SIZE_OF_PARAM_TERMINAL_STRUCT_BITS \ + (SIZE_OF_TERMINAL_STRUCT_BITS \ + + IA_CSS_PARAM_PAYLOAD_STRUCT_BITS \ + + N_UINT16_IN_PARAM_TERMINAL_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT * IA_CSS_UINT8_T_BITS) + +/* Frame constant parameters terminal */ +struct ia_css_param_terminal_s { + /* Parameter terminal base */ + ia_css_terminal_t base; + /* Parameter buffer handle attached to the terminal */ + ia_css_param_payload_t param_payload; + /* Points to the variable array of ia_css_param_section_desc_t */ + uint16_t param_section_desc_offset; + uint8_t padding[N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT]; +}; +/* ==================== Cached Param Terminal - End ==================== */ + + +/* ==================== Spatial Param Terminal - START ==================== */ +#define N_UINT16_IN_FRAG_GRID_STRUCT (2 * IA_CSS_N_DATA_DIMENSION) + +#define SIZE_OF_FRAG_GRID_STRUCT_BITS \ + (N_UINT16_IN_FRAG_GRID_STRUCT * IA_CSS_UINT16_T_BITS) + +struct ia_css_fragment_grid_desc_s { + /* + * Offset width/height of the top-left compute unit of the + * fragment compared to the frame + */ + uint16_t fragment_grid_index[IA_CSS_N_DATA_DIMENSION]; + /* + * Resolution width/height of the spatial parameters that + * correspond to the fragment measured in compute units + */ + uint16_t fragment_grid_dimension[IA_CSS_N_DATA_DIMENSION]; +}; + +#define N_UINT32_IN_FRAME_GRID_PARAM_SEC_STRUCT 3 +#define N_PADDING_UINT8_IN_FRAME_GRID_PARAM_SEC_STRUCT 4 + +#define SIZE_OF_FRAME_GRID_PARAM_SEC_STRUCT_BITS \ + (N_UINT32_IN_FRAME_GRID_PARAM_SEC_STRUCT * IA_CSS_UINT32_T_BITS \ + + N_PADDING_UINT8_IN_FRAME_GRID_PARAM_SEC_STRUCT * IA_CSS_UINT8_T_BITS) + +/* + * A plane of parameters with spatial aspect + * (compute units correlated to pixel data) + */ +struct ia_css_frame_grid_param_section_desc_s { + /* Offset of the parameter allocation in memory */ + uint32_t mem_offset; + /* Memory allocation size needs of this parameter */ + uint32_t mem_size; + /* + * stride in bytes of each line of compute units for + * the specified memory space and region + */ + uint32_t stride; + uint8_t padding[N_PADDING_UINT8_IN_FRAME_GRID_PARAM_SEC_STRUCT]; +}; + +#define N_UINT16_IN_FRAME_GRID_STRUCT_STRUCT IA_CSS_N_DATA_DIMENSION +#define N_PADDING_UINT8_IN_FRAME_GRID_STRUCT 4 + +#define SIZE_OF_FRAME_GRID_STRUCT_BITS \ + (N_UINT16_IN_FRAME_GRID_STRUCT_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_PADDING_UINT8_IN_FRAME_GRID_STRUCT * IA_CSS_UINT8_T_BITS) + +struct ia_css_frame_grid_desc_s { + /* Resolution width/height of the frame of + * spatial parameters measured in compute units + */ + uint16_t frame_grid_dimension[IA_CSS_N_DATA_DIMENSION]; + uint8_t padding[N_PADDING_UINT8_IN_FRAME_GRID_STRUCT]; +}; + +#define N_UINT32_IN_SPATIAL_PARAM_TERM_STRUCT 1 +#define N_UINT16_IN_SPATIAL_PARAM_TERM_STRUCT 2 + +#define SIZE_OF_SPATIAL_PARAM_TERM_STRUCT_BITS \ + (SIZE_OF_TERMINAL_STRUCT_BITS \ + + IA_CSS_PARAM_PAYLOAD_STRUCT_BITS \ + + SIZE_OF_FRAME_GRID_STRUCT_BITS \ + + N_UINT32_IN_SPATIAL_PARAM_TERM_STRUCT * IA_CSS_UINT32_T_BITS \ + + N_UINT16_IN_SPATIAL_PARAM_TERM_STRUCT * IA_CSS_UINT16_T_BITS) + +struct ia_css_spatial_param_terminal_s { + /* Spatial Parameter terminal base */ + ia_css_terminal_t base; + /* Spatial Parameter buffer handle attached to the terminal */ + ia_css_param_payload_t param_payload; + /* Contains info for the frame of spatial parameters */ + ia_css_frame_grid_desc_t frame_grid_desc; + /* Kernel identifier */ + uint32_t kernel_id; + /* + * Points to the variable array of + * ia_css_frame_grid_param_section_desc_t + */ + uint16_t frame_grid_param_section_desc_offset; + /* + * Points to array of ia_css_fragment_spatial_desc_t + * which constain info for the fragments of spatial parameters + */ + uint16_t fragment_grid_desc_offset; +}; +/* ==================== Spatial Param Terminal - END ==================== */ + + +/* ==================== Sliced Param Terminal - START ==================== */ +#define N_UINT32_IN_SLICE_PARAM_SECTION_DESC_STRUCT 2 + +#define SIZE_OF_SLICE_PARAM_SECTION_DESC_STRUCT_BITS \ + (N_UINT32_IN_SLICE_PARAM_SECTION_DESC_STRUCT * IA_CSS_UINT32_T_BITS) + +/* A Slice of parameters ready to be trasferred from/to registers */ +struct ia_css_slice_param_section_desc_s { + /* Offset of the parameter allocation in memory */ + uint32_t mem_offset; + /* Memory allocation size needs of this parameter */ + uint32_t mem_size; +}; + +#define N_UINT16_IN_FRAGMENT_SLICE_DESC_STRUCT 2 +#define N_PADDING_UINT8_FRAGMENT_SLICE_DESC_STRUCT 4 + +#define SIZE_OF_FRAGMENT_SLICE_DESC_STRUCT_BITS \ + (N_UINT16_IN_FRAGMENT_SLICE_DESC_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_PADDING_UINT8_FRAGMENT_SLICE_DESC_STRUCT * IA_CSS_UINT8_T_BITS) + +struct ia_css_fragment_slice_desc_s { + /* + * Points to array of ia_css_slice_param_section_desc_t + * which constain info for each prameter slice + */ + uint16_t slice_section_desc_offset; + /* Number of slices for the parameters for this fragment */ + uint16_t slice_count; + uint8_t padding[N_PADDING_UINT8_FRAGMENT_SLICE_DESC_STRUCT]; +}; + +#define N_UINT32_IN_SLICED_PARAM_TERMINAL_STRUCT 1 +#define N_UINT16_IN_SLICED_PARAM_TERMINAL_STRUCT 1 +#define N_PADDING_UINT8_SLICED_PARAM_TERMINAL_STRUCT 2 + +#define SIZE_OF_SLICED_PARAM_TERM_STRUCT_BITS \ + (SIZE_OF_TERMINAL_STRUCT_BITS \ + + IA_CSS_PARAM_PAYLOAD_STRUCT_BITS \ + + N_UINT32_IN_SLICED_PARAM_TERMINAL_STRUCT * IA_CSS_UINT32_T_BITS \ + + N_UINT16_IN_SLICED_PARAM_TERMINAL_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_PADDING_UINT8_SLICED_PARAM_TERMINAL_STRUCT * IA_CSS_UINT8_T_BITS) + +struct ia_css_sliced_param_terminal_s { + /* Spatial Parameter terminal base */ + ia_css_terminal_t base; + /* Spatial Parameter buffer handle attached to the terminal */ + ia_css_param_payload_t param_payload; + /* Kernel identifier */ + uint32_t kernel_id; + /* + * Points to array of ia_css_fragment_slice_desc_t + * which constain info for the slicing of the parameters + */ + uint16_t fragment_slice_desc_offset; + uint8_t padding[N_PADDING_UINT8_SLICED_PARAM_TERMINAL_STRUCT]; +}; +/* ==================== Sliced Param Terminal - END ==================== */ + + +/* ==================== Program Terminal - START ==================== */ + +#define N_UINT32_IN_FRAG_PARAM_SEC_STRUCT 2 + +#define SIZE_OF_FRAG_PARAM_SEC_STRUCT_BITS \ + (N_UINT32_IN_FRAG_PARAM_SEC_STRUCT * IA_CSS_UINT32_T_BITS) + +/* Fragment constant parameters section */ +struct ia_css_fragment_param_section_desc_s { + /* Offset of the parameter allocation in memory */ + uint32_t mem_offset; + /* Memory allocation size needs of this parameter */ + uint32_t mem_size; +}; + +#define N_UINT16_IN_FRAG_SEQ_COMMAND_STRUCT IA_CSS_N_COMMAND_COUNT + +#define SIZE_OF_FRAG_SEQ_COMMANDS_STRUCT_BITS \ + (N_UINT16_IN_FRAG_SEQ_COMMAND_STRUCT * IA_CSS_UINT16_T_BITS) + +/* 4 commands packe together to save memory space */ +struct ia_css_kernel_fragment_sequencer_command_desc_s { + /* Contains the "(command_index%4) == index" command desc */ + uint16_t line_count[IA_CSS_N_COMMAND_COUNT]; +}; + +#define N_UINT16_IN_FRAG_SEQ_INFO_STRUCT (5 * IA_CSS_N_DATA_DIMENSION + 2) + +#define SIZE_OF_FRAG_SEQ_INFO_STRUCT_BITS \ + (N_UINT16_IN_FRAG_SEQ_INFO_STRUCT * IA_CSS_UINT16_T_BITS) + +struct ia_css_kernel_fragment_sequencer_info_desc_s { + /* Slice dimensions */ + uint16_t fragment_grid_slice_dimension[IA_CSS_N_DATA_DIMENSION]; + /* Nof slices */ + uint16_t fragment_grid_slice_count[IA_CSS_N_DATA_DIMENSION]; + /* Grid point decimation factor */ + uint16_t + fragment_grid_point_decimation_factor[IA_CSS_N_DATA_DIMENSION]; + /* Relative position of grid origin to pixel origin */ + int16_t + fragment_grid_overlay_pixel_topleft_index[IA_CSS_N_DATA_DIMENSION]; + /* Size of active fragment region */ + int16_t + fragment_grid_overlay_pixel_dimension[IA_CSS_N_DATA_DIMENSION]; + /* If >0 it overrides the standard fragment sequencer info */ + uint16_t command_count; + /* + * To be used only if command_count>0, points to the descriptors + * for the commands (ia_css_kernel_fragment_sequencer_command_desc_s) + */ + uint16_t command_desc_offset; +}; + +#define N_UINT16_IN_PROG_TERM_STRUCT 2 +#define N_PADDING_UINT8_IN_PROG_TERM_STRUCT 4 + +#define SIZE_OF_PROG_TERM_STRUCT_BITS \ + (SIZE_OF_TERMINAL_STRUCT_BITS \ + + IA_CSS_PARAM_PAYLOAD_STRUCT_BITS \ + + N_UINT16_IN_PROG_TERM_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_PADDING_UINT8_IN_PROG_TERM_STRUCT * IA_CSS_UINT8_T_BITS) + +struct ia_css_program_terminal_s { + /* Program terminal base */ + ia_css_terminal_t base; + /* Program terminal buffer handle attached to the terminal */ + ia_css_param_payload_t param_payload; + /* Points to array of ia_css_fragment_param_desc_s */ + uint16_t fragment_param_section_desc_offset; + /* Points to array of ia_css_kernel_fragment_sequencer_info_s */ + uint16_t kernel_fragment_sequencer_info_desc_offset; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_PROG_TERM_STRUCT]; +}; +/* ==================== Program Terminal - END ==================== */ + +#endif /* __IA_CSS_TERMINAL_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal.c new file mode 100644 index 0000000000000..683fb3a88cd87 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal.c @@ -0,0 +1,20 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifdef __INLINE_PARAMETERS__ +#include "storage_class.h" +STORAGE_CLASS_INLINE int __ia_css_param_avoid_warning_on_empty_file(void) { return 0; } +#else /* __INLINE_PARAMETERS__ */ +#include "ia_css_terminal_impl.h" +#endif /* __INLINE_PARAMETERS__ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_impl.h new file mode 100644 index 0000000000000..9ccf3931e8e3d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_impl.h @@ -0,0 +1,495 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_IMPL_H +#define __IA_CSS_TERMINAL_IMPL_H + +#include "ia_css_terminal.h" +#include "ia_css_terminal_types.h" +#include "error_support.h" +#include "assert_support.h" +#include "storage_class.h" + +/* Param Terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_param_in_terminal_get_descriptor_size( + const unsigned int nof_sections) +{ + return sizeof(ia_css_param_terminal_t) + + nof_sections*sizeof(ia_css_param_section_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_param_section_desc_t *ia_css_param_in_terminal_get_param_section_desc( + const ia_css_param_terminal_t *param_terminal, + const unsigned int section_index) +{ + ia_css_param_section_desc_t *param_section_base; + ia_css_param_section_desc_t *param_section_desc = NULL; + + verifjmpexit(param_terminal != NULL); + + param_section_base = + (ia_css_param_section_desc_t *) + (((const char *)param_terminal) + + param_terminal->param_section_desc_offset); + param_section_desc = &(param_section_base[section_index]); + +EXIT: + return param_section_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_param_out_terminal_get_descriptor_size( + const unsigned int nof_sections, + const unsigned int nof_fragments) +{ + return sizeof(ia_css_param_terminal_t) + + nof_fragments*nof_sections*sizeof(ia_css_param_section_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_param_section_desc_t *ia_css_param_out_terminal_get_param_section_desc( + const ia_css_param_terminal_t *param_terminal, + const unsigned int section_index, + const unsigned int nof_sections, + const unsigned int fragment_index) +{ + ia_css_param_section_desc_t *param_section_base; + ia_css_param_section_desc_t *param_section_desc = NULL; + + verifjmpexit(param_terminal != NULL); + + param_section_base = + (ia_css_param_section_desc_t *) + (((const char *)param_terminal) + + param_terminal->param_section_desc_offset); + param_section_desc = + &(param_section_base[(nof_sections * fragment_index) + + section_index]); + +EXIT: + return param_section_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_param_terminal_create( + ia_css_param_terminal_t *param_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const uint16_t is_input_terminal) +{ + if (param_terminal == NULL) { + return -EFAULT; + } + + if (terminal_offset > (1<<15)) { + return -EINVAL; + } + + param_terminal->base.terminal_type = + is_input_terminal ? + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN : + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT; + param_terminal->base.parent_offset = + 0 - ((int16_t)terminal_offset); + param_terminal->base.size = terminal_size; + param_terminal->param_section_desc_offset = + sizeof(ia_css_param_terminal_t); + + return 0; +} + +/* Spatial Param Terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_spatial_param_terminal_get_descriptor_size( + const unsigned int nof_frame_param_sections, + const unsigned int nof_fragments) +{ + return sizeof(ia_css_spatial_param_terminal_t) + + nof_frame_param_sections * sizeof( + ia_css_frame_grid_param_section_desc_t) + + nof_fragments * sizeof(ia_css_fragment_grid_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_fragment_grid_desc_t * +ia_css_spatial_param_terminal_get_fragment_grid_desc( + const ia_css_spatial_param_terminal_t *spatial_param_terminal, + const unsigned int fragment_index) +{ + ia_css_fragment_grid_desc_t *fragment_grid_desc_base; + ia_css_fragment_grid_desc_t *fragment_grid_desc = NULL; + + verifjmpexit(spatial_param_terminal != NULL); + + fragment_grid_desc_base = + (ia_css_fragment_grid_desc_t *) + (((const char *)spatial_param_terminal) + + spatial_param_terminal->fragment_grid_desc_offset); + fragment_grid_desc = &(fragment_grid_desc_base[fragment_index]); + +EXIT: + return fragment_grid_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_frame_grid_param_section_desc_t * +ia_css_spatial_param_terminal_get_frame_grid_param_section_desc( + const ia_css_spatial_param_terminal_t *spatial_param_terminal, + const unsigned int section_index) +{ + ia_css_frame_grid_param_section_desc_t * + frame_grid_param_section_base; + ia_css_frame_grid_param_section_desc_t * + frame_grid_param_section_desc = NULL; + + verifjmpexit(spatial_param_terminal != NULL); + + frame_grid_param_section_base = + (ia_css_frame_grid_param_section_desc_t *) + (((const char *)spatial_param_terminal) + + spatial_param_terminal->frame_grid_param_section_desc_offset); + frame_grid_param_section_desc = + &(frame_grid_param_section_base[section_index]); + +EXIT: + return frame_grid_param_section_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_spatial_param_terminal_create( + ia_css_spatial_param_terminal_t *spatial_param_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const uint16_t is_input_terminal, + const unsigned int nof_fragments, + const uint32_t kernel_id) +{ + if (spatial_param_terminal == NULL) { + return -EFAULT; + } + + if (terminal_offset > (1<<15)) { + return -EINVAL; + } + + spatial_param_terminal->base.terminal_type = + is_input_terminal ? + IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN : + IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT; + spatial_param_terminal->base.parent_offset = + 0 - ((int16_t)terminal_offset); + spatial_param_terminal->base.size = terminal_size; + spatial_param_terminal->kernel_id = kernel_id; + spatial_param_terminal->fragment_grid_desc_offset = + sizeof(ia_css_spatial_param_terminal_t); + spatial_param_terminal->frame_grid_param_section_desc_offset = + spatial_param_terminal->fragment_grid_desc_offset + + (nof_fragments * sizeof(ia_css_fragment_grid_desc_t)); + + return 0; +} + +/* Sliced terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_sliced_param_terminal_get_descriptor_size( + const unsigned int nof_slice_param_sections, + const unsigned int nof_slices[], + const unsigned int nof_fragments) +{ + unsigned int descriptor_size = 0; + unsigned int fragment_index; + unsigned int nof_slices_total = 0; + + verifjmpexit(nof_slices != NULL); + + for (fragment_index = 0; + fragment_index < nof_fragments; fragment_index++) { + nof_slices_total += nof_slices[fragment_index]; + } + + descriptor_size = + sizeof(ia_css_sliced_param_terminal_t) + + nof_fragments*sizeof(ia_css_fragment_slice_desc_t) + + nof_slices_total*nof_slice_param_sections*sizeof( + ia_css_fragment_param_section_desc_t); + +EXIT: + return descriptor_size; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_fragment_slice_desc_t * +ia_css_sliced_param_terminal_get_fragment_slice_desc( + const ia_css_sliced_param_terminal_t *sliced_param_terminal, + const unsigned int fragment_index +) +{ + ia_css_fragment_slice_desc_t *fragment_slice_desc_base; + ia_css_fragment_slice_desc_t *fragment_slice_desc = NULL; + + verifjmpexit(sliced_param_terminal != NULL); + + fragment_slice_desc_base = + (ia_css_fragment_slice_desc_t *) + (((const char *)sliced_param_terminal) + + sliced_param_terminal->fragment_slice_desc_offset); + fragment_slice_desc = &(fragment_slice_desc_base[fragment_index]); + +EXIT: + return fragment_slice_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_slice_param_section_desc_t * +ia_css_sliced_param_terminal_get_slice_param_section_desc( + const ia_css_sliced_param_terminal_t *sliced_param_terminal, + const unsigned int fragment_index, + const unsigned int slice_index, + const unsigned int section_index, + const unsigned int nof_slice_param_sections) +{ + ia_css_fragment_slice_desc_t *fragment_slice_desc; + ia_css_slice_param_section_desc_t *slice_param_section_desc_base; + ia_css_slice_param_section_desc_t *slice_param_section_desc = NULL; + + fragment_slice_desc = + ia_css_sliced_param_terminal_get_fragment_slice_desc( + sliced_param_terminal, + fragment_index + ); + verifjmpexit(fragment_slice_desc != NULL); + + slice_param_section_desc_base = + (ia_css_slice_param_section_desc_t *) + (((const char *)sliced_param_terminal) + + fragment_slice_desc->slice_section_desc_offset); + slice_param_section_desc = + &(slice_param_section_desc_base[( + slice_index * nof_slice_param_sections) + + section_index]); + +EXIT: + return slice_param_section_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_sliced_param_terminal_create( + ia_css_sliced_param_terminal_t *sliced_param_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const uint16_t is_input_terminal, + const unsigned int nof_slice_param_sections, + const unsigned int nof_slices[], + const unsigned int nof_fragments, + const uint32_t kernel_id) +{ + unsigned int fragment_index; + unsigned int nof_slices_total = 0; + + if (sliced_param_terminal == NULL) { + return -EFAULT; + } + + if (terminal_offset > (1<<15)) { + return -EINVAL; + } + + sliced_param_terminal->base.terminal_type = + is_input_terminal ? + IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN : + IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT; + sliced_param_terminal->base.parent_offset = + 0 - ((int16_t)terminal_offset); + sliced_param_terminal->base.size = terminal_size; + sliced_param_terminal->kernel_id = kernel_id; + /* set here to use below to find the pointer */ + sliced_param_terminal->fragment_slice_desc_offset = + sizeof(ia_css_sliced_param_terminal_t); + for (fragment_index = 0; + fragment_index < nof_fragments; fragment_index++) { + ia_css_fragment_slice_desc_t *fragment_slice_desc = + ia_css_sliced_param_terminal_get_fragment_slice_desc( + sliced_param_terminal, + fragment_index); + /* + * Error handling not required at this point + * since everything has been constructed/validated just above + */ + fragment_slice_desc->slice_count = nof_slices[fragment_index]; + fragment_slice_desc->slice_section_desc_offset = + sliced_param_terminal->fragment_slice_desc_offset + + (nof_fragments * sizeof( + ia_css_fragment_slice_desc_t)) + + (nof_slices_total * nof_slice_param_sections * sizeof( + ia_css_slice_param_section_desc_t)); + nof_slices_total += nof_slices[fragment_index]; + } + + return 0; +} + +/* Program terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_program_terminal_get_descriptor_size( + const unsigned int nof_fragments, + const unsigned int nof_fragment_param_sections, + const unsigned int nof_kernel_fragment_sequencer_infos, + const unsigned int nof_command_objs) +{ + return sizeof(ia_css_program_terminal_t) + + nof_fragments * nof_fragment_param_sections * + sizeof(ia_css_fragment_param_section_desc_t) + + nof_fragments * nof_kernel_fragment_sequencer_infos * + sizeof(ia_css_kernel_fragment_sequencer_info_desc_t) + + nof_command_objs * sizeof( + ia_css_kernel_fragment_sequencer_command_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_fragment_param_section_desc_t * +ia_css_program_terminal_get_frgmnt_prm_sct_desc( + const ia_css_program_terminal_t *program_terminal, + const unsigned int fragment_index, + const unsigned int section_index, + const unsigned int nof_fragment_param_sections) +{ + ia_css_fragment_param_section_desc_t * + fragment_param_section_desc_base; + ia_css_fragment_param_section_desc_t * + fragment_param_section_desc = NULL; + + verifjmpexit(program_terminal != NULL); + verifjmpexit(section_index < nof_fragment_param_sections); + + fragment_param_section_desc_base = + (ia_css_fragment_param_section_desc_t *) + (((const char *)program_terminal) + + program_terminal->fragment_param_section_desc_offset); + fragment_param_section_desc = + &(fragment_param_section_desc_base[(fragment_index * + nof_fragment_param_sections) + section_index]); + +EXIT: + return fragment_param_section_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_kernel_fragment_sequencer_info_desc_t * +ia_css_program_terminal_get_kernel_frgmnt_seq_info_desc( + const ia_css_program_terminal_t *program_terminal, + const unsigned int fragment_index, + const unsigned int info_index, + const unsigned int nof_kernel_fragment_sequencer_infos) +{ + ia_css_kernel_fragment_sequencer_info_desc_t * + kernel_fragment_sequencer_info_desc_base; + ia_css_kernel_fragment_sequencer_info_desc_t * + kernel_fragment_sequencer_info_desc = NULL; + + verifjmpexit(program_terminal != NULL); + if (nof_kernel_fragment_sequencer_infos > 0) { + verifjmpexit(info_index < nof_kernel_fragment_sequencer_infos); + } + + kernel_fragment_sequencer_info_desc_base = + (ia_css_kernel_fragment_sequencer_info_desc_t *) + (((const char *)program_terminal) + + program_terminal->kernel_fragment_sequencer_info_desc_offset); + kernel_fragment_sequencer_info_desc = + &(kernel_fragment_sequencer_info_desc_base[(fragment_index * + nof_kernel_fragment_sequencer_infos) + info_index]); + +EXIT: + return kernel_fragment_sequencer_info_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_program_terminal_create( + ia_css_program_terminal_t *program_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const unsigned int nof_fragments, + const unsigned int nof_kernel_fragment_sequencer_infos, + const unsigned int nof_command_objs) +{ + if (program_terminal == NULL) { + return -EFAULT; + } + + if (terminal_offset > (1<<15)) { + return -EINVAL; + } + + program_terminal->base.terminal_type = IA_CSS_TERMINAL_TYPE_PROGRAM; + program_terminal->base.parent_offset = 0-((int16_t)terminal_offset); + program_terminal->base.size = terminal_size; + program_terminal->kernel_fragment_sequencer_info_desc_offset = + sizeof(ia_css_program_terminal_t); + program_terminal->fragment_param_section_desc_offset = + program_terminal->kernel_fragment_sequencer_info_desc_offset + + (nof_fragments * nof_kernel_fragment_sequencer_infos * + sizeof(ia_css_kernel_fragment_sequencer_info_desc_t)) + + (nof_command_objs * sizeof( + ia_css_kernel_fragment_sequencer_command_desc_t)); + + return 0; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_program_terminal_get_command_base_offset( + const ia_css_program_terminal_t *program_terminal, + const unsigned int nof_fragments, + const unsigned int nof_kernel_fragment_sequencer_infos, + const unsigned int commands_slots_used, + uint16_t *command_desc_offset) +{ + if (command_desc_offset == NULL) { + return -EFAULT; + } + + *command_desc_offset = 0; + + if (program_terminal == NULL) { + return -EFAULT; + } + + *command_desc_offset = + program_terminal->kernel_fragment_sequencer_info_desc_offset + + (nof_fragments * nof_kernel_fragment_sequencer_infos * + sizeof(ia_css_kernel_fragment_sequencer_info_desc_t)) + + (commands_slots_used * sizeof( + ia_css_kernel_fragment_sequencer_command_desc_t)); + + return 0; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +uint16_t *ia_css_program_terminal_get_line_count( + const ia_css_kernel_fragment_sequencer_command_desc_t + *kernel_fragment_sequencer_command_desc_base, + const unsigned int set_count) +{ + uint16_t *line_count = NULL; + + verifjmpexit(kernel_fragment_sequencer_command_desc_base != NULL); + line_count = + (uint16_t *)&(kernel_fragment_sequencer_command_desc_base[ + set_count >> 2].line_count[set_count & 0x00000003]); +EXIT: + return line_count; +} + +#endif /* __IA_CSS_TERMINAL_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_manifest.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_manifest.c new file mode 100644 index 0000000000000..53c4708c7fc90 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_manifest.c @@ -0,0 +1,20 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifdef __INLINE_PARAMETERS__ +#include "storage_class.h" +STORAGE_CLASS_INLINE int __ia_css_param_avoid_warning_on_empty_file(void) { return 0; } +#else /* __INLINE_PARAMETERS__ */ +#include "ia_css_terminal_manifest_impl.h" +#endif /* __INLINE_PARAMETERS__ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_manifest_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_manifest_impl.h new file mode 100644 index 0000000000000..39734136b117b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_manifest_impl.h @@ -0,0 +1,347 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_MANIFEST_IMPL_H +#define __IA_CSS_TERMINAL_MANIFEST_IMPL_H + +#include "ia_css_terminal_manifest.h" +#include "error_support.h" +#include "assert_support.h" +#include "storage_class.h" + +STORAGE_CLASS_INLINE void __terminal_manifest_dummy_check_alignment(void) +{ + COMPILATION_ERROR_IF( + SIZE_OF_PARAM_TERMINAL_MANIFEST_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_param_terminal_manifest_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_param_terminal_manifest_t) % sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PARAM_TERMINAL_MANIFEST_SEC_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_param_manifest_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_param_manifest_section_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_SPATIAL_PARAM_TERM_MAN_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_spatial_param_terminal_manifest_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_spatial_param_terminal_manifest_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAME_GRID_PARAM_MAN_SEC_STRUCT_IN_BITS != + (CHAR_BIT * sizeof( + ia_css_frame_grid_param_manifest_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_frame_grid_param_manifest_section_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PROG_TERM_MAN_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_program_terminal_manifest_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_program_terminal_manifest_t)%sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAG_PARAM_MAN_SEC_STRUCT_IN_BITS != + (CHAR_BIT * sizeof( + ia_css_fragment_param_manifest_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_fragment_param_manifest_section_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_KERNEL_FRAG_SEQ_INFO_MAN_STRUCT_IN_BITS != + (CHAR_BIT * sizeof( + ia_css_kernel_fragment_sequencer_info_manifest_desc_t)) + ); + + COMPILATION_ERROR_IF(0 != sizeof( + ia_css_kernel_fragment_sequencer_info_manifest_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PARAM_TERMINAL_MANIFEST_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_sliced_param_terminal_manifest_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_sliced_param_terminal_manifest_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_SLICED_PARAM_MAN_SEC_STRUCT_IN_BITS != + (CHAR_BIT * sizeof + (ia_css_sliced_param_manifest_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_sliced_param_manifest_section_desc_t) % + sizeof(uint64_t)); +} + +/* Parameter Terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_param_terminal_manifest_get_size( + const unsigned int nof_sections) +{ + + return sizeof(ia_css_param_terminal_manifest_t) + + nof_sections*sizeof(ia_css_param_manifest_section_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_param_terminal_manifest_init( + ia_css_param_terminal_manifest_t *param_terminal, + const uint16_t section_count) +{ + if (param_terminal == NULL) { + return -EFAULT; + } + + param_terminal->param_manifest_section_desc_count = section_count; + param_terminal->param_manifest_section_desc_offset = sizeof( + ia_css_param_terminal_manifest_t); + + return 0; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_param_manifest_section_desc_t * +ia_css_param_terminal_manifest_get_prm_sct_desc( + const ia_css_param_terminal_manifest_t *param_terminal_manifest, + const unsigned int section_index) +{ + ia_css_param_manifest_section_desc_t *param_manifest_section_base; + ia_css_param_manifest_section_desc_t * + param_manifest_section_desc = NULL; + + verifjmpexit(param_terminal_manifest != NULL); + + param_manifest_section_base = + (ia_css_param_manifest_section_desc_t *) + (((const char *)param_terminal_manifest) + + param_terminal_manifest->param_manifest_section_desc_offset); + + param_manifest_section_desc = + &(param_manifest_section_base[section_index]); + +EXIT: + return param_manifest_section_desc; +} + +/* Spatial Parameter Terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_spatial_param_terminal_manifest_get_size( + const unsigned int nof_frame_param_sections) +{ + return sizeof(ia_css_spatial_param_terminal_manifest_t) + + nof_frame_param_sections * sizeof( + ia_css_frame_grid_param_manifest_section_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_spatial_param_terminal_manifest_init( + ia_css_spatial_param_terminal_manifest_t *spatial_param_terminal, + const uint16_t section_count) +{ + if (spatial_param_terminal == NULL) { + return -EFAULT; + } + + spatial_param_terminal-> + frame_grid_param_manifest_section_desc_count = section_count; + spatial_param_terminal-> + frame_grid_param_manifest_section_desc_offset = + sizeof(ia_css_spatial_param_terminal_manifest_t); + + return 0; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_frame_grid_param_manifest_section_desc_t * +ia_css_spatial_param_terminal_manifest_get_frm_grid_prm_sct_desc( + const ia_css_spatial_param_terminal_manifest_t * + spatial_param_terminal_manifest, + const unsigned int section_index) +{ + ia_css_frame_grid_param_manifest_section_desc_t * + frame_param_manifest_section_base; + ia_css_frame_grid_param_manifest_section_desc_t * + frame_param_manifest_section_desc = NULL; + + verifjmpexit(spatial_param_terminal_manifest != NULL); + + frame_param_manifest_section_base = + (ia_css_frame_grid_param_manifest_section_desc_t *) + (((const char *)spatial_param_terminal_manifest) + + spatial_param_terminal_manifest-> + frame_grid_param_manifest_section_desc_offset); + frame_param_manifest_section_desc = + &(frame_param_manifest_section_base[section_index]); + +EXIT: + return frame_param_manifest_section_desc; +} + +/* Sliced Terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_sliced_param_terminal_manifest_get_size( + const unsigned int nof_slice_param_sections) +{ + return sizeof(ia_css_spatial_param_terminal_manifest_t) + + nof_slice_param_sections * + sizeof(ia_css_sliced_param_manifest_section_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_sliced_param_terminal_manifest_init( + ia_css_sliced_param_terminal_manifest_t *sliced_param_terminal, + const uint16_t section_count) +{ + if (sliced_param_terminal == NULL) { + return -EFAULT; + } + + sliced_param_terminal->sliced_param_section_count = section_count; + sliced_param_terminal->sliced_param_section_offset = + sizeof(ia_css_sliced_param_terminal_manifest_t); + + return 0; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_sliced_param_manifest_section_desc_t * +ia_css_sliced_param_terminal_manifest_get_sliced_prm_sct_desc( + const ia_css_sliced_param_terminal_manifest_t * + sliced_param_terminal_manifest, + const unsigned int section_index) +{ + ia_css_sliced_param_manifest_section_desc_t * + sliced_param_manifest_section_base; + ia_css_sliced_param_manifest_section_desc_t * + sliced_param_manifest_section_desc = NULL; + + verifjmpexit(sliced_param_terminal_manifest != NULL); + + sliced_param_manifest_section_base = + (ia_css_sliced_param_manifest_section_desc_t *) + (((const char *)sliced_param_terminal_manifest) + + sliced_param_terminal_manifest-> + sliced_param_section_offset); + sliced_param_manifest_section_desc = + &(sliced_param_manifest_section_base[section_index]); + +EXIT: + return sliced_param_manifest_section_desc; +} + +/* Program Terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_program_terminal_manifest_get_size( + const unsigned int nof_fragment_param_sections, + const unsigned int nof_kernel_fragment_sequencer_infos) +{ + return sizeof(ia_css_program_terminal_manifest_t) + + nof_fragment_param_sections * + sizeof(ia_css_fragment_param_manifest_section_desc_t) + + nof_kernel_fragment_sequencer_infos * + sizeof(ia_css_kernel_fragment_sequencer_info_manifest_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_program_terminal_manifest_init( + ia_css_program_terminal_manifest_t *program_terminal, + const uint16_t fragment_param_section_count, + const uint16_t kernel_fragment_seq_info_section_count) +{ + if (program_terminal == NULL) { + return -EFAULT; + } + + program_terminal->fragment_param_manifest_section_desc_count = + fragment_param_section_count; + program_terminal->fragment_param_manifest_section_desc_offset = + sizeof(ia_css_program_terminal_manifest_t); + + program_terminal->kernel_fragment_sequencer_info_manifest_info_count = + kernel_fragment_seq_info_section_count; + program_terminal->kernel_fragment_sequencer_info_manifest_info_offset = + sizeof(ia_css_program_terminal_manifest_t) + + fragment_param_section_count*sizeof( + ia_css_fragment_param_manifest_section_desc_t); + + return 0; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_fragment_param_manifest_section_desc_t * +ia_css_program_terminal_manifest_get_frgmnt_prm_sct_desc( + const ia_css_program_terminal_manifest_t *program_terminal_manifest, + const unsigned int section_index) +{ + ia_css_fragment_param_manifest_section_desc_t * + fragment_param_manifest_section_base; + ia_css_fragment_param_manifest_section_desc_t * + fragment_param_manifest_section = NULL; + + verifjmpexit(program_terminal_manifest != NULL); + + fragment_param_manifest_section_base = + (ia_css_fragment_param_manifest_section_desc_t *) + (((const char *)program_terminal_manifest) + + program_terminal_manifest-> + fragment_param_manifest_section_desc_offset); + fragment_param_manifest_section = + &(fragment_param_manifest_section_base[section_index]); + +EXIT: + return fragment_param_manifest_section; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_kernel_fragment_sequencer_info_manifest_desc_t * +ia_css_program_terminal_manifest_get_kernel_frgmnt_seq_info_desc( + const ia_css_program_terminal_manifest_t *program_terminal_manifest, + const unsigned int info_index) +{ + ia_css_kernel_fragment_sequencer_info_manifest_desc_t * + kernel_manifest_fragment_sequencer_info_manifest_desc_base; + ia_css_kernel_fragment_sequencer_info_manifest_desc_t * + kernel_manifest_fragment_sequencer_info_manifest_desc = NULL; + + verifjmpexit(program_terminal_manifest != NULL); + + kernel_manifest_fragment_sequencer_info_manifest_desc_base = + (ia_css_kernel_fragment_sequencer_info_manifest_desc_t *) + (((const char *)program_terminal_manifest) + + program_terminal_manifest-> + kernel_fragment_sequencer_info_manifest_info_offset); + + kernel_manifest_fragment_sequencer_info_manifest_desc = + &(kernel_manifest_fragment_sequencer_info_manifest_desc_base[ + info_index]); + +EXIT: + return kernel_manifest_fragment_sequencer_info_manifest_desc; +} + +#endif /* __IA_CSS_TERMINAL_MANIFEST_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/vied_parameters.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/vied_parameters.mk new file mode 100644 index 0000000000000..834a1a4b2bab6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/vied_parameters.mk @@ -0,0 +1,76 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is VIED_PARAMETERS + +VIED_PARAMETERS_DIR=$${MODULES_DIR}/vied_parameters + +VIED_PARAMETERS_INTERFACE=$(VIED_PARAMETERS_DIR)/interface +VIED_PARAMETERS_SOURCES=$(VIED_PARAMETERS_DIR)/src +VIED_PARAMETERS_EXTINCLUDE = $${MODULES_DIR}/support + +VIED_PARAMETERS_DYNAMIC_HOST_FILES += $(VIED_PARAMETERS_SOURCES)/ia_css_terminal.c +VIED_PARAMETERS_STATIC_HOST_FILES += $(VIED_PARAMETERS_SOURCES)/ia_css_terminal_manifest.c + +VIED_PARAMETERS_HOST_FILES = $(VIED_PARAMETERS_DYNAMIC_HOST_FILES) +VIED_PARAMETERS_HOST_FILES += $(VIED_PARAMETERS_STATIC_HOST_FILES) + +VIED_PARAMETERS_ISA_CLIENT_HOST_FILES = $(VIED_PARAMETERS_SOURCES)/ia_css_isys_process_group.c +VIED_PARAMETERS_ISA_CLIENT_HOST_FILES += $(VIED_PARAMETERS_DIR)/client/ia_css_isys_parameter_client.c + +VIED_PARAMETERS_DYNAMIC_FW_FILES += $(VIED_PARAMETERS_SOURCES)/ia_css_terminal.c +VIED_PARAMETERS_STATIC_FW_FILES += $(VIED_PARAMETERS_SOURCES)/ia_css_terminal_manifest.c + +VIED_PARAMETERS_FW_FILES = $(VIED_PARAMETERS_DYNAMIC_HOST_FILES) +VIED_PARAMETERS_FW_FILES += $(VIED_PARAMETERS_STATIC_HOST_FILES) +VIED_PARAMETERS_SUPPORT_CPPFLAGS = -I$(VIED_PARAMETERS_DIR)/support +VIED_PARAMETERS_SUPPORT_CPPFLAGS += -I$(VIED_PARAMETERS_DIR)/support/$(IPU_SYSVER) +VIED_PARAMETERS_ISA_CLIENT_HOST_CPPFLAGS = -I$(VIED_PARAMETERS_DIR)/client +VIED_PARAMETERS_PSA_UTILS_HOST_FILES = $(MODULES_DIR)/vied_parameters/support/ia_css_psys_parameter_utils.c +VIED_PARAMETERS_PSA_UTILS_HOST_FILES += $(MODULES_DIR)/vied_parameters/support/$(IPU_SYSVER)/ia_css_psys_parameter_utils_dep.c + +VIED_PARAMETERS_UTILS_HOST_CPPFLAGS = $(VIED_PARAMETERS_SUPPORT_CPPFLAGS) + +VIED_PARAMETERS_ISA_UTILS_HOST_FILES = $(MODULES_DIR)/vied_parameters/support/ia_css_isys_parameter_utils.c +VIED_PARAMETERS_ISA_UTILS_HOST_FILES += $(MODULES_DIR)/vied_parameters/support/$(IPU_SYSVER)/ia_css_isys_parameter_utils_dep.c + +VIED_PARAMETERS_PRINT_CPPFLAGS += -I$(VIED_PARAMETERS_DIR)/print/interface +VIED_PARAMETERS_PRINT_FILES += $(VIED_PARAMETERS_DIR)/print/src/ia_css_terminal_print.c + +# VIED_PARAMETERS Trace Log Level = VIED_PARAMETERS_TRACE_LOG_LEVEL_NORMAL +# Other options are [VIED_PARAMETERS_TRACE_LOG_LEVEL_OFF, VIED_PARAMETERS_TRACE_LOG_LEVEL_DEBUG] +ifndef VIED_PARAMETERS_TRACE_CONFIG_HOST + VIED_PARAMETERS_TRACE_CONFIG_HOST=VIED_PARAMETERS_TRACE_LOG_LEVEL_NORMAL +endif +ifndef VIED_PARAMETERS_TRACE_CONFIG_FW + VIED_PARAMETERS_TRACE_CONFIG_FW=VIED_PARAMETERS_TRACE_LOG_LEVEL_NORMAL +endif + +VIED_PARAMETERS_HOST_CPPFLAGS += -DVIED_PARAMETERS_TRACE_CONFIG=$(VIED_PARAMETERS_TRACE_CONFIG_HOST) +VIED_PARAMETERS_FW_CPPFLAGS += -DVIED_PARAMETERS_TRACE_CONFIG=$(VIED_PARAMETERS_TRACE_CONFIG_FW) + +VIED_PARAMETERS_HOST_CPPFLAGS += -I$(VIED_PARAMETERS_INTERFACE) +VIED_PARAMETERS_HOST_CPPFLAGS += -I$(VIED_PARAMETERS_SOURCES) +VIED_PARAMETERS_HOST_CPPFLAGS += -I$(VIED_PARAMETERS_EXTINCLUDE) +VIED_PARAMETERS_HOST_CPPFLAGS += $(VIED_PARAMETERS_SUPPORT_CPPFLAGS) +VIED_PARAMETERS_FW_CPPFLAGS += -I$(VIED_PARAMETERS_INTERFACE) +VIED_PARAMETERS_FW_CPPFLAGS += -I$(VIED_PARAMETERS_SOURCES) +VIED_PARAMETERS_FW_CPPFLAGS += -I$(VIED_PARAMETERS_EXTINCLUDE) +VIED_PARAMETERS_FW_CPPFLAGS += $(VIED_PARAMETERS_SUPPORT_CPPFLAGS) + +#For IPU interface +include $(MODULES_DIR)/fw_abi_common_types/cpu/fw_abi_cpu_types.mk +VIED_PARAMETERS_HOST_CPPFLAGS += $(FW_ABI_COMMON_TYPES_HOST_CPPFLAGS) + +VIED_PARAMETERS_FW_CPPFLAGS += $(FW_ABI_COMMON_TYPES_FW_CPPFLAGS) diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/libcsspsys2600.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/libcsspsys2600.c new file mode 100644 index 0000000000000..38893935bf37e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/libcsspsys2600.c @@ -0,0 +1,480 @@ +/* + * Copyright (c) 2015--2018 Intel Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#include + +#include "ipu.h" +#include "ipu-mmu.h" +#include "ipu-psys.h" +#include "ipu-fw-psys.h" +#include "ipu-wrapper.h" +#include "libcsspsys2600.h" + +#include +#include +#include +#include +#include + +int ipu_fw_psys_pg_start(struct ipu_psys_kcmd *kcmd) +{ + return -ia_css_process_group_start((ia_css_process_group_t *) + kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_start); + +int ipu_fw_psys_pg_disown(struct ipu_psys_kcmd *kcmd) +{ + return -ia_css_process_group_disown((ia_css_process_group_t *) + kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_disown); + +int ipu_fw_psys_pg_abort(struct ipu_psys_kcmd *kcmd) +{ + int rval; + + rval = ia_css_process_group_stop((ia_css_process_group_t *) + kcmd->kpg->pg); + if (rval) { + dev_err(&kcmd->fh->psys->adev->dev, + "failed to abort kcmd!\n"); + kcmd->pg_user = NULL; + rval = -EIO; + /* TODO: need to reset PSYS by power cycling it */ + } + return rval; +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_abort); + +int ipu_fw_psys_pg_submit(struct ipu_psys_kcmd *kcmd) +{ + return -ia_css_process_group_submit((ia_css_process_group_t *) + kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_submit); + +static void *syscom_buffer; +static struct ia_css_syscom_config *syscom_config; +static struct ia_css_psys_server_init *server_init; + +int ipu_fw_psys_rcv_event(struct ipu_psys *psys, + struct ipu_fw_psys_event *event) +{ + return ia_css_psys_event_queue_receive(psys_syscom, + IA_CSS_PSYS_EVENT_QUEUE_MAIN_ID, + (struct ia_css_psys_event_s *)event); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_rcv_event); + +int ipu_fw_psys_terminal_set(struct ipu_fw_psys_terminal *terminal, + int terminal_idx, + struct ipu_psys_kcmd *kcmd, + u32 buffer, + unsigned size) +{ + ia_css_terminal_type_t type; + u32 buffer_state; + + type = ia_css_terminal_get_type((ia_css_terminal_t *)terminal); + + switch (type) { + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT: + case IA_CSS_TERMINAL_TYPE_PROGRAM: + buffer_state = IA_CSS_BUFFER_UNDEFINED; + break; + case IA_CSS_TERMINAL_TYPE_PARAM_STREAM: + case IA_CSS_TERMINAL_TYPE_DATA_IN: + case IA_CSS_TERMINAL_TYPE_STATE_IN: + buffer_state = IA_CSS_BUFFER_FULL; + break; + case IA_CSS_TERMINAL_TYPE_DATA_OUT: + case IA_CSS_TERMINAL_TYPE_STATE_OUT: + buffer_state = IA_CSS_BUFFER_EMPTY; + break; + default: + dev_err(&kcmd->fh->psys->adev->dev, + "unknown terminal type: 0x%x\n", type); + return -EAGAIN; + } + + if (type == IA_CSS_TERMINAL_TYPE_DATA_IN || + type == IA_CSS_TERMINAL_TYPE_DATA_OUT) { + ia_css_frame_t *frame; + + if (ia_css_data_terminal_set_connection_type( + (ia_css_data_terminal_t *)terminal, + IA_CSS_CONNECTION_MEMORY)) + return -EIO; + frame = ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + if (!frame) + return -EIO; + + if (ia_css_frame_set_data_bytes(frame, size)) + return -EIO; + } + + return -ia_css_process_group_attach_buffer( + (ia_css_process_group_t *)kcmd->kpg->pg, buffer, + buffer_state, terminal_idx); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_terminal_set); + +void ipu_fw_psys_pg_dump(struct ipu_psys *psys, + struct ipu_psys_kcmd *kcmd, + const char *note) +{ + ia_css_process_group_t *pg = (ia_css_process_group_t *)kcmd->kpg->pg; + ia_css_program_group_ID_t pgid = + ia_css_process_group_get_program_group_ID(pg); + uint8_t processes = ia_css_process_group_get_process_count( + (ia_css_process_group_t *)kcmd->kpg->pg); + unsigned int p; + + dev_dbg(&psys->adev->dev, "%s %s pgid %i processes %i\n", + __func__, note, pgid, processes); + for (p = 0; p < processes; p++) { + ia_css_process_t *process = + ia_css_process_group_get_process(pg, p); + + dev_dbg(&psys->adev->dev, + "%s pgid %i process %i cell %i dev_chn: ext0 %i ext1r %i ext1w %i int %i ipfd %i isa %i\n", + __func__, pgid, p, + ia_css_process_get_cell(process), + ia_css_process_get_dev_chn(process, + VIED_NCI_DEV_CHN_DMA_EXT0_ID), + ia_css_process_get_dev_chn(process, + VIED_NCI_DEV_CHN_DMA_EXT1_READ_ID), + ia_css_process_get_dev_chn(process, + VIED_NCI_DEV_CHN_DMA_EXT1_WRITE_ID), + ia_css_process_get_dev_chn(process, + VIED_NCI_DEV_CHN_DMA_INTERNAL_ID), + ia_css_process_get_dev_chn(process, + VIED_NCI_DEV_CHN_DMA_IPFD_ID), + ia_css_process_get_dev_chn(process, + VIED_NCI_DEV_CHN_DMA_ISA_ID)); + } +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_dump); + +int ipu_fw_psys_pg_get_id(struct ipu_psys_kcmd *kcmd) +{ + return ia_css_process_group_get_program_group_ID( + (ia_css_process_group_t *)kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_get_id); + +int ipu_fw_psys_pg_get_terminal_count(struct ipu_psys_kcmd *kcmd) +{ + return ia_css_process_group_get_terminal_count( + (ia_css_process_group_t *)kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_get_terminal_count); + +int ipu_fw_psys_pg_get_size(struct ipu_psys_kcmd *kcmd) +{ + return ia_css_process_group_get_size((ia_css_process_group_t *) + kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_get_size); + +int ipu_fw_psys_pg_set_ipu_vaddress(struct ipu_psys_kcmd *kcmd, + dma_addr_t vaddress) +{ + return ia_css_process_group_set_ipu_vaddress((ia_css_process_group_t *) + kcmd->kpg->pg, vaddress); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_set_ipu_vaddress); + +int ipu_fw_psys_pg_load_cycles(struct ipu_psys_kcmd *kcmd) +{ + return ia_css_process_group_get_pg_load_cycles( + (ia_css_process_group_t *)kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_load_cycles); + +int ipu_fw_psys_pg_init_cycles(struct ipu_psys_kcmd *kcmd) +{ + return ia_css_process_group_get_pg_init_cycles( + (ia_css_process_group_t *)kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_init_cycles); + +int ipu_fw_psys_pg_processing_cycles(struct ipu_psys_kcmd *kcmd) +{ + return ia_css_process_group_get_pg_processing_cycles( + (ia_css_process_group_t *)kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_processing_cycles); + +struct ipu_fw_psys_terminal * +ipu_fw_psys_pg_get_terminal(struct ipu_psys_kcmd *kcmd, int index) +{ + return (struct ipu_fw_psys_terminal *)ia_css_process_group_get_terminal( + (ia_css_process_group_t *)kcmd->kpg->pg, index); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_get_terminal); + +void ipu_fw_psys_pg_set_token(struct ipu_psys_kcmd *kcmd, u64 token) +{ + ia_css_process_group_set_token((ia_css_process_group_t *)kcmd->kpg->pg, + token); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_set_token); + +int ipu_fw_psys_pg_get_protocol( + struct ipu_psys_kcmd *kcmd) +{ + return ia_css_process_group_get_protocol_version( + (ia_css_process_group_t *)kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_get_protocol); + +static int libcsspsys2600_init(void); +int ipu_fw_psys_open(struct ipu_psys *psys) +{ + bool opened; + int retry = IPU_PSYS_OPEN_RETRY; + + ipu_wrapper_init(PSYS_MMID, &psys->adev->dev, psys->pdata->base); + /* When fw psys open, make sure csslib init first */ + libcsspsys2600_init(); + + server_init->icache_prefetch_sp = psys->icache_prefetch_sp; + server_init->icache_prefetch_isp = psys->icache_prefetch_isp; + + psys_syscom = ia_css_psys_open(syscom_buffer, syscom_config); + if (!psys_syscom) { + dev_err(&psys->adev->dev, + "psys library open failed\n"); + return -ENODEV; + } + + do { + opened = ia_css_psys_open_is_ready(psys_syscom); + if (opened) + break; + usleep_range(IPU_PSYS_OPEN_TIMEOUT_US, + IPU_PSYS_OPEN_TIMEOUT_US + 10); + retry--; + } while (retry > 0); + + if (!retry && !opened) { + dev_err(&psys->adev->dev, + "psys library open ready failed\n"); + ia_css_psys_close(psys_syscom); + ia_css_psys_release(psys_syscom, 1); + psys_syscom = NULL; + return -ENODEV; + } + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_open); + +int ipu_fw_psys_close(struct ipu_psys *psys) +{ + int rval; + unsigned int retry = IPU_PSYS_CLOSE_TIMEOUT; + + if (!psys_syscom) + return 0; + + if (ia_css_psys_close(psys_syscom)) { + dev_err(&psys->adev->dev, + "psys library close ready failed\n"); + return 0; + } + + do { + rval = ia_css_psys_release(psys_syscom, 0); + if (rval && rval != -EBUSY) { + dev_dbg(&psys->adev->dev, "psys library release failed\n"); + break; + } + usleep_range(IPU_PSYS_CLOSE_TIMEOUT_US, + IPU_PSYS_CLOSE_TIMEOUT_US + 10); + } while (rval && --retry); + + psys_syscom = NULL; + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_close); + +u64 ipu_fw_psys_pg_get_token(struct ipu_psys_kcmd *kcmd) +{ + return 0; +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_get_token); + +static const struct ipu_fw_resource_definitions default_defs = { + .cells = vied_nci_cell_type, + .num_cells = VIED_NCI_N_CELL_ID, + .num_cells_type = VIED_NCI_N_CELL_TYPE_ID, + .dev_channels = vied_nci_dev_chn_size, + .num_dev_channels = VIED_NCI_N_DEV_CHN_ID, + + .num_ext_mem_types = VIED_NCI_N_DATA_MEM_TYPE_ID, + .num_ext_mem_ids = VIED_NCI_N_MEM_ID, + .ext_mem_ids = vied_nci_mem_size, + + .cell_mem_row = VIED_NCI_N_MEM_TYPE_ID, + .cell_mem = (enum ipu_mem_id *)vied_nci_cell_mem, +}; + +const struct ipu_fw_resource_definitions *res_defs = &default_defs; +EXPORT_SYMBOL_GPL(res_defs); + +int ipu_fw_psys_set_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index, + u8 value) +{ + return ia_css_process_set_cell((ia_css_process_t *)ptr, + (vied_nci_cell_ID_t)value); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_set_process_cell_id); + +u8 ipu_fw_psys_get_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index) +{ + return ia_css_process_get_cell((ia_css_process_t *)ptr); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_get_process_cell_id); + +int ipu_fw_psys_clear_process_cell(struct ipu_fw_psys_process *ptr) +{ + return ia_css_process_clear_cell((ia_css_process_t *)ptr); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_clear_process_cell); + +int ipu_fw_psys_set_process_dev_chn_offset(struct ipu_fw_psys_process *ptr, + u16 offset, u16 value) +{ + return ia_css_process_set_dev_chn((ia_css_process_t *)ptr, + (vied_nci_dev_chn_ID_t)offset, + (vied_nci_resource_size_t)value); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_set_process_dev_chn_offset); + +int ipu_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr, + u16 type_id, u16 mem_id, u16 offset) +{ + return ia_css_process_set_ext_mem((ia_css_process_t *)ptr, mem_id, offset); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_set_process_ext_mem); + +int ipu_fw_psys_get_program_manifest_by_process( + struct ipu_fw_generic_program_manifest *gen_pm, + const struct ipu_fw_psys_program_group_manifest *pg_manifest, + struct ipu_fw_psys_process *process) +{ + ia_css_program_ID_t process_id = + ia_css_process_get_program_ID( + (const ia_css_process_t *)process); + int programs = + ia_css_program_group_manifest_get_program_count( + (const ia_css_program_group_manifest_t *)pg_manifest); + int i; + + for (i = 0; i < programs; i++) { + ia_css_program_ID_t program_id; + ia_css_program_manifest_t *pm = + ia_css_program_group_manifest_get_prgrm_mnfst( + (const ia_css_program_group_manifest_t *) + pg_manifest, i); + if (!pm) + continue; + program_id = ia_css_program_manifest_get_program_ID(pm); + if (program_id == process_id) { + gen_pm->dev_chn_size = (u16 *)pm->dev_chn_size; + gen_pm->ext_mem_size = (u16 *)pm->ext_mem_size; + gen_pm->cell_id = pm->cell_id; + gen_pm->cell_type_id = pm->cell_type_id; + return 0; + } + } + return -ENOENT; +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_get_program_manifest_by_process); + +static int libcsspsys2600_init(void) +{ + int rval; + static bool csslib_init; + + if (csslib_init) + return 0; + + syscom_buffer = kzalloc(ia_css_sizeof_psys(NULL), GFP_KERNEL); + if (!syscom_buffer) + return -ENOMEM; + + syscom_config = kzalloc(sizeof(struct ia_css_syscom_config), + GFP_KERNEL); + if (!syscom_config) { + rval = -ENOMEM; + goto out_syscom_buffer_free; + } + + server_init = kzalloc(sizeof(struct ia_css_psys_server_init), + GFP_KERNEL); + if (!server_init) { + rval = -ENOMEM; + goto out_syscom_config_free; + } + + server_init->ddr_pkg_dir_address = 0; + server_init->host_ddr_pkg_dir = 0; + server_init->pkg_dir_size = 0; + + *syscom_config = *ia_css_psys_specify(); + syscom_config->specific_addr = server_init; + syscom_config->specific_size = sizeof(struct ia_css_psys_server_init); + syscom_config->ssid = PSYS_SSID; + syscom_config->mmid = PSYS_MMID; + syscom_config->regs_addr = ipu_device_cell_memory_address(SPC0, + IPU_DEVICE_SP2600_CONTROL_REGS); + syscom_config->dmem_addr = ipu_device_cell_memory_address(SPC0, + IPU_DEVICE_SP2600_CONTROL_DMEM); + csslib_init = true; + + return 0; + +out_syscom_config_free: + kfree(syscom_config); +out_syscom_buffer_free: + kfree(syscom_buffer); + + return rval; +} + +static void __exit libcsspsys2600_exit(void) +{ + kfree(syscom_buffer); + kfree(syscom_config); + kfree(server_init); +} + +module_init(libcsspsys2600_init); +module_exit(libcsspsys2600_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel ipu psys css library"); diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/libcsspsys2600.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/libcsspsys2600.h new file mode 100644 index 0000000000000..b8d790f561805 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/libcsspsys2600.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2015--2018 Intel Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef LIBCSSPSYS2600_H +#define LIBCSSPSYS2600_H + +#include +#include +#include +#include +#include +#include +#include + +extern struct ia_css_syscom_context *psys_syscom; +#endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/libintel-ipu4.c b/drivers/media/pci/intel/ipu4/ipu4-css/libintel-ipu4.c new file mode 100644 index 0000000000000..59c9b5b858e03 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/libintel-ipu4.c @@ -0,0 +1,394 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2014 - 2018 Intel Corporation + +#include +#include +#include +#include "ipu-isys.h" +#include "ipu-wrapper.h" +#include + +#include "ipu-platform.h" + +#define ipu_lib_call_notrace_unlocked(func, isys, ...) \ + ({ \ + int rval; \ + \ + rval = -ia_css_isys_##func((isys)->fwcom, ##__VA_ARGS__); \ + \ + rval; \ + }) + +#define ipu_lib_call_notrace(func, isys, ...) \ + ({ \ + int rval; \ + \ + mutex_lock(&(isys)->lib_mutex); \ + \ + rval = ipu_lib_call_notrace_unlocked( \ + func, isys, ##__VA_ARGS__); \ + \ + mutex_unlock(&(isys)->lib_mutex); \ + \ + rval; \ + }) + +#define ipu_lib_call(func, isys, ...) \ + ({ \ + int rval; \ + dev_dbg(&(isys)->adev->dev, "hostlib: libcall %s\n", #func); \ + rval = ipu_lib_call_notrace(func, isys, ##__VA_ARGS__); \ + \ + rval; \ + }) + +static int wrapper_init_done; + +int ipu_fw_isys_close(struct ipu_isys *isys) +{ + struct device *dev = &isys->adev->dev; + int timeout = IPU_ISYS_TURNOFF_TIMEOUT; + int rval; + unsigned long flags; + + /* + * Ask library to stop the isys fw. Actual close takes + * some time as the FW must stop its actions including code fetch + * to SP icache. + */ + mutex_lock(&isys->lib_mutex); + spin_lock_irqsave(&isys->power_lock, flags); + rval = ipu_lib_call_notrace_unlocked(device_close, isys); + spin_unlock_irqrestore(&isys->power_lock, flags); + mutex_unlock(&isys->lib_mutex); + if (rval) + dev_err(dev, "Device close failure: %d\n", rval); + + /* release probably fails if the close failed. Let's try still */ + do { + usleep_range(IPU_ISYS_TURNOFF_DELAY_US, + 2 * IPU_ISYS_TURNOFF_DELAY_US); + rval = ipu_lib_call_notrace(device_release, isys, 0); + timeout--; + } while (rval != 0 && timeout); + + /* Spin lock to wait the interrupt handler to be finished */ + spin_lock_irqsave(&isys->power_lock, flags); + if (!rval) + isys->fwcom = NULL; /* No further actions needed */ + else + dev_err(dev, "Device release time out %d\n", rval); + spin_unlock_irqrestore(&isys->power_lock, flags); + return rval; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_close); + +int ipu_fw_isys_init(struct ipu_isys *isys, + unsigned int num_streams) +{ + int retry = IPU_ISYS_OPEN_RETRY; + unsigned int i; + + struct ia_css_isys_device_cfg_data isys_cfg = { + .driver_sys = { + .ssid = ISYS_SSID, + .mmid = ISYS_MMID, + .num_send_queues = clamp_t( + unsigned int, num_streams, 1, + IPU_ISYS_NUM_STREAMS), + .num_recv_queues = IPU_ISYS_NUM_RECV_QUEUE, + .send_queue_size = IPU_ISYS_SIZE_SEND_QUEUE, + .recv_queue_size = IPU_ISYS_SIZE_RECV_QUEUE, + .icache_prefetch = isys->icache_prefetch, + }, + }; + struct device *dev = &isys->adev->dev; + int rval; + + if (!wrapper_init_done) { + wrapper_init_done = true; + ipu_wrapper_init(ISYS_MMID, &isys->adev->dev, + isys->pdata->base); + } + + /* + * SRAM partitioning. Initially equal partitioning is set + * TODO: Fine tune the partitining based on the stream pixel load + */ + for (i = 0; i < min(IPU_NOF_SRAM_BLOCKS_MAX, NOF_SRAM_BLOCKS_MAX); i++) { + if (i < isys_cfg.driver_sys.num_send_queues) + isys_cfg.buffer_partition.num_gda_pages[i] = + (IPU_DEVICE_GDA_NR_PAGES * + IPU_DEVICE_GDA_VIRT_FACTOR) / + isys_cfg.driver_sys.num_send_queues; + else + isys_cfg.buffer_partition.num_gda_pages[i] = 0; + } + + rval = -ia_css_isys_device_open(&isys->fwcom, &isys_cfg); + if (rval < 0) { + dev_err(dev, "isys device open failed %d\n", rval); + return rval; + } + + do { + usleep_range(IPU_ISYS_OPEN_TIMEOUT_US, + IPU_ISYS_OPEN_TIMEOUT_US + 10); + rval = ipu_lib_call(device_open_ready, isys); + if (!rval) + break; + retry--; + } while (retry > 0); + + if (!retry && rval) { + dev_err(dev, "isys device open ready failed %d\n", rval); + ipu_fw_isys_close(isys); + } + + return rval; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_init); + +void ipu_fw_isys_cleanup(struct ipu_isys *isys) +{ + ipu_lib_call(device_release, isys, 1); + isys->fwcom = NULL; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_cleanup); + +struct ipu_fw_isys_resp_info_abi *ipu_fw_isys_get_resp( + void *context, unsigned int queue, + struct ipu_fw_isys_resp_info_abi *response) +{ + struct ia_css_isys_resp_info apiresp; + int rval; + + rval = -ia_css_isys_stream_handle_response(context, &apiresp); + if (rval < 0) + return NULL; + + response->buf_id = 0; + response->type = apiresp.type; + response->timestamp[0] = apiresp.timestamp[0]; + response->timestamp[1] = apiresp.timestamp[1]; + response->stream_handle = apiresp.stream_handle; + response->error_info.error = apiresp.error; + response->error_info.error_details = apiresp.error_details; + response->pin.out_buf_id = apiresp.pin.out_buf_id; + response->pin.addr = apiresp.pin.addr; + response->pin_id = apiresp.pin_id; + response->process_group_light.param_buf_id = + apiresp.process_group_light.param_buf_id; + response->process_group_light.addr = + apiresp.process_group_light.addr; + response->acc_id = apiresp.acc_id; +#ifdef IPU_OTF_SUPPORT + response->frame_counter = apiresp.frame_counter; + response->written_direct = apiresp.written_direct; +#endif + + return response; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_get_resp); + +void ipu_fw_isys_put_resp(void *context, unsigned int queue) +{ + /* Nothing to do here really */ +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_put_resp); + +int ipu_fw_isys_simple_cmd(struct ipu_isys *isys, + const unsigned int stream_handle, + enum ipu_fw_isys_send_type send_type) +{ + int rval = -1; + + switch (send_type) { + case IPU_FW_ISYS_SEND_TYPE_STREAM_START: + rval = ipu_lib_call(stream_start, isys, stream_handle, + NULL); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_FLUSH: + rval = ipu_lib_call(stream_flush, isys, stream_handle); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_STOP: + rval = ipu_lib_call(stream_stop, isys, stream_handle); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_CLOSE: + rval = ipu_lib_call(stream_close, isys, stream_handle); + break; + default: + WARN_ON(1); + } + + return rval; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_simple_cmd); + +static void resolution_abi_to_api(const struct ipu_fw_isys_resolution_abi *abi, + struct ia_css_isys_resolution *api) +{ + api->width = abi->width; + api->height = abi->height; +} + +static void output_pin_payload_abi_to_api( + struct ipu_fw_isys_output_pin_payload_abi *abi, + struct ia_css_isys_output_pin_payload *api) +{ + api->out_buf_id = abi->out_buf_id; + api->addr = abi->addr; +} + +static void output_pin_info_abi_to_api( + struct ipu_fw_isys_output_pin_info_abi *abi, + struct ia_css_isys_output_pin_info *api) +{ + api->input_pin_id = abi->input_pin_id; + resolution_abi_to_api(&abi->output_res, &api->output_res); + api->stride = abi->stride; + api->pt = abi->pt; + api->watermark_in_lines = abi->watermark_in_lines; + api->payload_buf_size = abi->payload_buf_size; + api->send_irq = abi->send_irq; + api->ft = abi->ft; +#ifdef IPU_OTF_SUPPORT + api->link_id = abi->link_id; +#endif + api->reserve_compression = abi->reserve_compression; +} + +static void param_pin_abi_to_api(struct ipu_fw_isys_param_pin_abi *abi, + struct ia_css_isys_param_pin *api) +{ + api->param_buf_id = abi->param_buf_id; + api->addr = abi->addr; +} + +static void input_pin_info_abi_to_api( + struct ipu_fw_isys_input_pin_info_abi *abi, + struct ia_css_isys_input_pin_info *api) +{ + resolution_abi_to_api(&abi->input_res, &api->input_res); + api->dt = abi->dt; + api->mipi_store_mode = abi->mipi_store_mode; + api->mapped_dt = abi->mapped_dt; +} + +static void isa_cfg_abi_to_api(const struct ipu_fw_isys_isa_cfg_abi *abi, + struct ia_css_isys_isa_cfg *api) +{ + unsigned int i; + + for (i = 0; i < N_IA_CSS_ISYS_RESOLUTION_INFO; i++) + resolution_abi_to_api(&abi->isa_res[i], &api->isa_res[i]); + + api->blc_enabled = abi->cfg.blc; + api->lsc_enabled = abi->cfg.lsc; + api->dpc_enabled = abi->cfg.dpc; + api->downscaler_enabled = abi->cfg.downscaler; + api->awb_enabled = abi->cfg.awb; + api->af_enabled = abi->cfg.af; + api->ae_enabled = abi->cfg.ae; + api->paf_type = abi->cfg.paf; + api->send_irq_stats_ready = abi->cfg.send_irq_stats_ready; + api->send_resp_stats_ready = abi->cfg.send_irq_stats_ready; +} + +static void cropping_abi_to_api(struct ipu_fw_isys_cropping_abi *abi, + struct ia_css_isys_cropping *api) +{ + api->top_offset = abi->top_offset; + api->left_offset = abi->left_offset; + api->bottom_offset = abi->bottom_offset; + api->right_offset = abi->right_offset; +} + +static void stream_cfg_abi_to_api(struct ipu_fw_isys_stream_cfg_data_abi *abi, + struct ia_css_isys_stream_cfg_data *api) +{ + unsigned int i; + + api->src = abi->src; + api->vc = abi->vc; + api->isl_use = abi->isl_use; + api->compfmt = abi->compfmt; + isa_cfg_abi_to_api(&abi->isa_cfg, &api->isa_cfg); + for (i = 0; i < N_IA_CSS_ISYS_CROPPING_LOCATION; i++) + cropping_abi_to_api(&abi->crop[i], &api->crop[i]); + + api->send_irq_sof_discarded = abi->send_irq_sof_discarded; + api->send_irq_eof_discarded = abi->send_irq_eof_discarded; + api->send_resp_sof_discarded = abi->send_irq_sof_discarded; + api->send_resp_eof_discarded = abi->send_irq_eof_discarded; + api->nof_input_pins = abi->nof_input_pins; + api->nof_output_pins = abi->nof_output_pins; + for (i = 0; i < abi->nof_input_pins; i++) + input_pin_info_abi_to_api(&abi->input_pins[i], + &api->input_pins[i]); + + for (i = 0; i < abi->nof_output_pins; i++) + output_pin_info_abi_to_api(&abi->output_pins[i], + &api->output_pins[i]); +} + +static void frame_buff_set_abi_to_api( + struct ipu_fw_isys_frame_buff_set_abi *abi, + struct ia_css_isys_frame_buff_set *api) +{ + int i; + + for (i = 0; i < min(IPU_MAX_OPINS, MAX_OPINS); i++) + output_pin_payload_abi_to_api(&abi->output_pins[i], + &api->output_pins[i]); + + param_pin_abi_to_api(&abi->process_group_light, + &api->process_group_light); + + api->send_irq_sof = abi->send_irq_sof; + api->send_irq_eof = abi->send_irq_eof; + api->send_irq_capture_ack = abi->send_irq_capture_ack; + api->send_irq_capture_done = abi->send_irq_capture_done; +} + +int ipu_fw_isys_complex_cmd(struct ipu_isys *isys, + const unsigned int stream_handle, + void *cpu_mapped_buf, + dma_addr_t dma_mapped_buf, + size_t size, + enum ipu_fw_isys_send_type send_type) +{ + union { + struct ia_css_isys_stream_cfg_data stream_cfg; + struct ia_css_isys_frame_buff_set buf; + } param; + int rval = -1; + + memset(¶m, 0, sizeof(param)); + + switch (send_type) { + case IPU_FW_ISYS_SEND_TYPE_STREAM_CAPTURE: + frame_buff_set_abi_to_api(cpu_mapped_buf, ¶m.buf); + rval = ipu_lib_call(stream_capture_indication, + isys, stream_handle, ¶m.buf); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_OPEN: + stream_cfg_abi_to_api(cpu_mapped_buf, ¶m.stream_cfg); + rval = ipu_lib_call(stream_open, isys, stream_handle, + ¶m.stream_cfg); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE: + frame_buff_set_abi_to_api(cpu_mapped_buf, ¶m.buf); + rval = ipu_lib_call(stream_start, isys, stream_handle, + ¶m.buf); + break; + default: + WARN_ON(1); + } + + return rval; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_complex_cmd); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel ipu library"); diff --git a/drivers/media/pci/intel/ipu4/ipu4-fw-resources.c b/drivers/media/pci/intel/ipu4/ipu4-fw-resources.c new file mode 100644 index 0000000000000..73089764b79a4 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-fw-resources.c @@ -0,0 +1,332 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2015 - 2018 Intel Corporation + +#include "ipu-fw-psys.h" + +#include + +/* resources table */ +/* + * Cell types by cell IDs + */ +const u32 ipu_fw_psys_cell_types[IPU_FW_PSYS_N_CELL_ID] = { + IPU_FW_PSYS_SP_CTRL_TYPE_ID, + IPU_FW_PSYS_SP_SERVER_TYPE_ID, + IPU_FW_PSYS_SP_SERVER_TYPE_ID, + IPU_FW_PSYS_VP_TYPE_ID, + IPU_FW_PSYS_VP_TYPE_ID, + IPU_FW_PSYS_VP_TYPE_ID, + IPU_FW_PSYS_VP_TYPE_ID, + IPU_FW_PSYS_ACC_ISA_TYPE_ID, + IPU_FW_PSYS_ACC_PSA_TYPE_ID, + IPU_FW_PSYS_ACC_PSA_TYPE_ID, + IPU_FW_PSYS_ACC_PSA_TYPE_ID, + IPU_FW_PSYS_ACC_PSA_TYPE_ID, + IPU_FW_PSYS_ACC_PSA_TYPE_ID, + IPU_FW_PSYS_ACC_PSA_TYPE_ID, + IPU_FW_PSYS_ACC_OSA_TYPE_ID, + IPU_FW_PSYS_GDC_TYPE_ID, + IPU_FW_PSYS_GDC_TYPE_ID +}; + +const u16 ipu_fw_num_dev_channels[IPU_FW_PSYS_N_DEV_CHN_ID] = { + IPU_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE, + IPU_FW_PSYS_DEV_CHN_GDC_MAX_SIZE, + IPU_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE, + IPU_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE, + IPU_FW_PSYS_DEV_CHN_DMA_INTERNAL_MAX_SIZE, + IPU_FW_PSYS_DEV_CHN_DMA_IPFD_MAX_SIZE, + IPU_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE, + IPU_FW_PSYS_DEV_CHN_DMA_FW_MAX_SIZE, +#ifdef CONFIG_VIDEO_INTEL_IPU4P + IPU_FW_PSYS_DEV_CHN_DMA_CMPRS_MAX_SIZE +#endif +}; + +const u16 ipu_fw_psys_mem_size[IPU_FW_PSYS_N_MEM_ID] = { + IPU_FW_PSYS_VMEM0_MAX_SIZE, + IPU_FW_PSYS_VMEM1_MAX_SIZE, + IPU_FW_PSYS_VMEM2_MAX_SIZE, + IPU_FW_PSYS_VMEM3_MAX_SIZE, + IPU_FW_PSYS_VMEM4_MAX_SIZE, + IPU_FW_PSYS_BAMEM0_MAX_SIZE, + IPU_FW_PSYS_BAMEM1_MAX_SIZE, + IPU_FW_PSYS_BAMEM2_MAX_SIZE, + IPU_FW_PSYS_BAMEM3_MAX_SIZE, + IPU_FW_PSYS_DMEM0_MAX_SIZE, + IPU_FW_PSYS_DMEM1_MAX_SIZE, + IPU_FW_PSYS_DMEM2_MAX_SIZE, + IPU_FW_PSYS_DMEM3_MAX_SIZE, + IPU_FW_PSYS_DMEM4_MAX_SIZE, + IPU_FW_PSYS_DMEM5_MAX_SIZE, + IPU_FW_PSYS_DMEM6_MAX_SIZE, + IPU_FW_PSYS_DMEM7_MAX_SIZE, + IPU_FW_PSYS_PMEM0_MAX_SIZE, + IPU_FW_PSYS_PMEM1_MAX_SIZE, + IPU_FW_PSYS_PMEM2_MAX_SIZE, + IPU_FW_PSYS_PMEM3_MAX_SIZE +}; + +const enum ipu_mem_id +ipu_fw_psys_cell_mem[IPU_FW_PSYS_N_CELL_ID][IPU_FW_PSYS_N_MEM_TYPE_ID] = { + { + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_DMEM0_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID + }, + { + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_DMEM1_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID + }, + { + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_DMEM2_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID + }, + { + IPU_FW_PSYS_VMEM4_ID, + IPU_FW_PSYS_DMEM4_ID, + IPU_FW_PSYS_VMEM0_ID, + IPU_FW_PSYS_BAMEM0_ID, + IPU_FW_PSYS_PMEM0_ID + }, + { + IPU_FW_PSYS_VMEM4_ID, + IPU_FW_PSYS_DMEM5_ID, + IPU_FW_PSYS_VMEM1_ID, + IPU_FW_PSYS_BAMEM1_ID, + IPU_FW_PSYS_PMEM1_ID + }, + { + IPU_FW_PSYS_VMEM4_ID, + IPU_FW_PSYS_DMEM6_ID, + IPU_FW_PSYS_VMEM2_ID, + IPU_FW_PSYS_BAMEM2_ID, + IPU_FW_PSYS_PMEM2_ID, + }, + { + IPU_FW_PSYS_VMEM4_ID, + IPU_FW_PSYS_DMEM7_ID, + IPU_FW_PSYS_VMEM3_ID, + IPU_FW_PSYS_BAMEM3_ID, + IPU_FW_PSYS_PMEM3_ID, + }, + { + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID + }, + { + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID + }, + { + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID + }, + { + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID + }, + { + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID + }, + { + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID + }, + { + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID + }, + { + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID + }, + { + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID + }, + { + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID + } +}; + +static const struct ipu_fw_resource_definitions default_defs = { + .cells = ipu_fw_psys_cell_types, + .num_cells = IPU_FW_PSYS_N_CELL_ID, + .num_cells_type = IPU_FW_PSYS_N_CELL_TYPE_ID, + + .dev_channels = ipu_fw_num_dev_channels, + .num_dev_channels = IPU_FW_PSYS_N_DEV_CHN_ID, + + .num_ext_mem_types = IPU_FW_PSYS_N_DATA_MEM_TYPE_ID, + .num_ext_mem_ids = IPU_FW_PSYS_N_MEM_ID, + .ext_mem_ids = ipu_fw_psys_mem_size, + + .num_dfm_ids = IPU_FW_PSYS_N_DEV_DFM_ID, + + .cell_mem_row = IPU_FW_PSYS_N_MEM_TYPE_ID, + .cell_mem = &ipu_fw_psys_cell_mem[0][0], + + .process.ext_mem_id = offsetof(struct ipu_fw_psys_process, + ext_mem_id[0]), + .process.ext_mem_offset = offsetof(struct ipu_fw_psys_process, + ext_mem_offset[0]), + .process.dev_chn_offset = offsetof(struct ipu_fw_psys_process, + dev_chn_offset[0]), + .process.cell_id = offsetof(struct ipu_fw_psys_process, cell_id), +}; + +const struct ipu_fw_resource_definitions *res_defs = &default_defs; + +/********** Generic resource handling **********/ + +/* + * Extension library gives byte offsets to its internal structures. + * use those offsets to update fields. Without extension lib access + * structures directly. + */ +int ipu_fw_psys_set_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index, + u8 value) +{ + struct ipu_fw_psys_process_group *parent = + (struct ipu_fw_psys_process_group *) ((char *)ptr + + ptr->parent_offset); + + ptr->cell_id = value; + parent->resource_bitmap |= 1 << value; + + return 0; +} + +u8 ipu_fw_psys_get_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index) +{ + return ptr->cell_id; +} + +int ipu_fw_psys_clear_process_cell(struct ipu_fw_psys_process *ptr) +{ + struct ipu_fw_psys_process_group *parent; + u8 cell_id = ipu_fw_psys_get_process_cell_id(ptr, 0); + int retval = -1; + + parent = (struct ipu_fw_psys_process_group *) ((char *)ptr + + ptr->parent_offset); + if ((1 << cell_id) && ((1 << cell_id) & parent->resource_bitmap)) { + ipu_fw_psys_set_process_cell_id(ptr, 0, IPU_FW_PSYS_N_CELL_ID); + parent->resource_bitmap &= ~(1 << cell_id); + retval = 0; + } + + return retval; +} + +int ipu_fw_psys_set_process_dev_chn_offset(struct ipu_fw_psys_process *ptr, + u16 offset, u16 value) +{ + ptr->dev_chn_offset[offset] = value; + + return 0; +} + +int ipu_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr, + u16 type_id, u16 mem_id, u16 offset) +{ + ptr->ext_mem_offset[type_id] = offset; + ptr->ext_mem_id[type_id] = mem_id; + + return 0; +} + +static struct ipu_fw_psys_program_manifest * +ipu_resource_get_program_manifest( + const struct ipu_fw_psys_program_group_manifest *manifest, + const unsigned int program_index) +{ + struct ipu_fw_psys_program_manifest *prg_manifest_base; + u8 *program_manifest = NULL; + u8 program_count; + unsigned int i; + + program_count = manifest->program_count; + + prg_manifest_base = (struct ipu_fw_psys_program_manifest *) + ((char *)manifest + manifest->program_manifest_offset); + if (program_index < program_count) { + program_manifest = (u8 *) prg_manifest_base; + for (i = 0; i < program_index; i++) + program_manifest += + ((struct ipu_fw_psys_program_manifest *) + program_manifest)->size; + } + + return (struct ipu_fw_psys_program_manifest *)program_manifest; +} + +int ipu_fw_psys_get_program_manifest_by_process( + struct ipu_fw_generic_program_manifest *gen_pm, + const struct ipu_fw_psys_program_group_manifest *pg_manifest, + struct ipu_fw_psys_process *process) +{ + u32 process_id = process->ID; + int programs = pg_manifest->program_count; + int i; + + for (i = 0; i < programs; i++) { + u32 program_id; + struct ipu_fw_psys_program_manifest *pm = + ipu_resource_get_program_manifest(pg_manifest, i); + if (!pm) + continue; + program_id = pm->ID; + if (program_id == process_id) { + gen_pm->dev_chn_size = pm->dev_chn_size; + gen_pm->dev_chn_offset = NULL; + gen_pm->ext_mem_offset = NULL; + gen_pm->cell_id = pm->cell_id; + gen_pm->cell_type_id = pm->cell_type_id; + gen_pm->ext_mem_size = pm->ext_mem_size; + return 0; + } + } + return -ENOENT; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-isys-csi2.c b/drivers/media/pci/intel/ipu4/ipu4-isys-csi2.c new file mode 100644 index 0000000000000..50eb7a9ab6e4b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-isys-csi2.c @@ -0,0 +1,713 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include "ipu.h" +#include "ipu-buttress.h" +#include "ipu-isys.h" +#include "ipu-isys-csi2.h" +#include "ipu-platform-isys-csi2-reg.h" +#include "ipu-platform-regs.h" +#include "ipu-trace.h" +#include "ipu-isys-csi2.h" + +#define CSE_IPC_CMDPHYWRITEL 35 +#define CSE_IPC_CMDPHYWRITEH 36 +#define CSE_IPC_CMDLEGACYPHYWRITEL 39 +#define CSE_IPC_CMDLEGACYPHYWRITEH 40 + +#define NBR_BULK_MSGS 30 /* Space reservation for IPC messages */ + +#define CSI2_UPDATE_TIME_TRY_NUM 3 +#define CSI2_UPDATE_TIME_MAX_DIFF 20 + +static u32 +build_cse_ipc_commands(struct ipu_ipc_buttress_bulk_msg *target, + u32 nbr_msgs, u32 opcodel, u32 reg, u32 data) +{ + struct ipu_ipc_buttress_bulk_msg *msgs = &target[nbr_msgs]; + u32 opcodeh = opcodel == CSE_IPC_CMDPHYWRITEL ? + CSE_IPC_CMDPHYWRITEH : CSE_IPC_CMDLEGACYPHYWRITEH; + + /* + * Writing of 32 bits consist of 2 16 bit IPC messages to CSE. + * Messages must be in low-high order and nothing else between + * them. + * Register is in bits 8..15 as index (register value divided by 4) + */ + msgs->cmd = opcodel | (reg << (8 - 2)) | ((data & 0xffff) << 16); + msgs->expected_resp = opcodel; + msgs->require_resp = true; + msgs->cmd_size = 4; + msgs++; + + msgs->cmd = opcodeh | (reg << (8 - 2)) | (data & 0xffff0000); + msgs->expected_resp = opcodeh; + msgs->require_resp = true; + msgs->cmd_size = 4; + + nbr_msgs += 2; + + /* Hits only if code change introduces too many new IPC messages */ + WARN_ON(nbr_msgs > NBR_BULK_MSGS); + + return nbr_msgs; +} + +static int csi2_ev_correction_params(struct ipu_isys_csi2 *csi2, + unsigned int lanes) +{ + struct ipu_device *isp = csi2->isys->adev->isp; + struct ipu_ipc_buttress_bulk_msg *messages; + const struct ipu_receiver_electrical_params *ev_params; + const struct ipu_isys_internal_csi2_pdata *csi2_pdata; + + __s64 link_freq; + unsigned int i; + u32 val; + u32 nbr_msgs = 0; + int rval; + bool conf_set0; + bool conf_set1; + bool conf_combined = false; + + csi2_pdata = &csi2->isys->pdata->ipdata->csi2; + ev_params = csi2_pdata->evparams; + if (!ev_params) + return 0; + + if (csi2->isys->csi2_cse_ipc_not_supported) + return 0; + + rval = ipu_isys_csi2_get_link_freq(csi2, &link_freq); + if (rval) + return rval; + + i = 0; + while (ev_params[i].device) { + if (ev_params[i].device == isp->pdev->device && + ev_params[i].revision == isp->pdev->revision && + ev_params[i].min_freq < link_freq && + ev_params[i].max_freq >= link_freq) + break; + i++; + } + + if (!ev_params[i].device) { + dev_info(&csi2->isys->adev->dev, + "No rcomp value override for this HW revision\n"); + return 0; + } + + messages = kcalloc(NBR_BULK_MSGS, sizeof(*messages), GFP_KERNEL); + if (!messages) + return -ENOMEM; + + conf_set0 = csi2_pdata->evsetmask0 & (1 << csi2->index); + conf_set1 = csi2_pdata->evsetmask1 & (1 << csi2->index); + if (csi2_pdata->evlanecombine[csi2->index]) { + conf_combined = + lanes > csi2_pdata->evlanecombine[csi2->index] ? 1 : 0; + } + conf_set1 |= conf_combined; + + /* + * Note: There is no way to make R-M-W to these. Possible non-zero reset + * default is OR'd with the values + */ + val = 1 << CSI2_SB_CSI_RCOMP_CONTROL_LEGACY_OVR_ENABLE_PORT1_SHIFT | + 1 << CSI2_SB_CSI_RCOMP_CONTROL_LEGACY_OVR_ENABLE_PORT2_SHIFT | + 1 << CSI2_SB_CSI_RCOMP_CONTROL_LEGACY_OVR_ENABLE_PORT3_SHIFT | + 1 << CSI2_SB_CSI_RCOMP_CONTROL_LEGACY_OVR_ENABLE_PORT4_SHIFT | + 1 << CSI2_SB_CSI_RCOMP_CONTROL_LEGACY_OVR_ENABLE_SHIFT | + ev_params[i].rcomp_val_legacy << + CSI2_SB_CSI_RCOMP_CONTROL_LEGACY_OVR_CODE_SHIFT; + + nbr_msgs = build_cse_ipc_commands(messages, nbr_msgs, + CSE_IPC_CMDLEGACYPHYWRITEL, + CSI2_SB_CSI_RCOMP_CONTROL_LEGACY, + val); + + val = 2 << CSI2_SB_CSI_RCOMP_UPDATE_MODE_SHIFT | + 1 << CSI2_SB_CSI_RCOMP_OVR_ENABLE_SHIFT | + ev_params[i].rcomp_val_combo << CSI2_SB_CSI_RCOMP_OVR_CODE_SHIFT; + + nbr_msgs = build_cse_ipc_commands(messages, nbr_msgs, + CSE_IPC_CMDPHYWRITEL, + CSI2_SB_CSI_RCOMP_CONTROL_COMBO, val); + + if (conf_set0) { + val = 0x380078 | ev_params[i].ports[0].ctle_val << + CSI2_SB_CPHY0_RX_CONTROL1_EQ_LANE0_SHIFT; + nbr_msgs = build_cse_ipc_commands(messages, nbr_msgs, + CSE_IPC_CMDPHYWRITEL, + CSI2_SB_CPHY0_RX_CONTROL1, + val); + val = 0x10000; + if (ev_params[i].ports[0].crc_val != IPU_EV_AUTO) + val |= ev_params[i].ports[0].crc_val << + CSI2_SB_CPHY0_DLL_OVRD_CRCDC_FSM_DLANE0_SHIFT | + CSI2_SB_CPHY0_DLL_OVRD_LDEN_CRCDC_FSM_DLANE0; + + nbr_msgs = build_cse_ipc_commands(messages, nbr_msgs, + CSE_IPC_CMDPHYWRITEL, + CSI2_SB_CPHY0_DLL_OVRD, val); + } + + if (conf_set1) { + val = 0x380078 | ev_params[i].ports[1].ctle_val << + CSI2_SB_CPHY2_RX_CONTROL1_EQ_LANE1_SHIFT; + nbr_msgs = build_cse_ipc_commands(messages, nbr_msgs, + CSE_IPC_CMDPHYWRITEL, + CSI2_SB_CPHY2_RX_CONTROL1, + val); + + val = 0x10000; + if (ev_params[i].ports[1].crc_val != IPU_EV_AUTO) + val |= ev_params[i].ports[1].crc_val << + CSI2_SB_CPHY2_DLL_OVRD_CRCDC_FSM_DLANE1_SHIFT | + CSI2_SB_CPHY2_DLL_OVRD_LDEN_CRCDC_FSM_DLANE1; + + nbr_msgs = build_cse_ipc_commands(messages, nbr_msgs, + CSE_IPC_CMDPHYWRITEL, + CSI2_SB_CPHY2_DLL_OVRD, val); + } + + mutex_lock(&csi2->isys->mutex); + /* This register is shared between two receivers */ + val = csi2->isys->csi2_rx_ctrl_cached; + if (conf_set0) { + val &= ~CSI2_SB_DPHY0_RX_CNTRL_SKEWCAL_CR_SEL_DLANE01_MASK; + if (ev_params[i].ports[0].drc_val != IPU_EV_AUTO) + val |= + CSI2_SB_DPHY0_RX_CNTRL_SKEWCAL_CR_SEL_DLANE01_MASK; + } + + if (conf_set1) { + val &= ~CSI2_SB_DPHY0_RX_CNTRL_SKEWCAL_CR_SEL_DLANE23_MASK; + if (ev_params[i].ports[1].drc_val != IPU_EV_AUTO) + val |= + CSI2_SB_DPHY0_RX_CNTRL_SKEWCAL_CR_SEL_DLANE23_MASK; + } + csi2->isys->csi2_rx_ctrl_cached = val; + + nbr_msgs = build_cse_ipc_commands(messages, nbr_msgs, + CSE_IPC_CMDPHYWRITEL, + CSI2_SB_DPHY0_RX_CNTRL, val); + mutex_unlock(&csi2->isys->mutex); + + if (conf_set0 && ev_params[i].ports[0].drc_val != IPU_EV_AUTO) { + /* Write value with FSM disabled */ + val = (conf_combined ? + ev_params[i].ports[0].drc_val_combined : + ev_params[i].ports[0].drc_val) << + CSI2_SB_DPHY0_DLL_OVRD_DRC_FSM_OVRD_SHIFT; + + nbr_msgs = build_cse_ipc_commands(messages, nbr_msgs, + CSE_IPC_CMDPHYWRITEL, + CSI2_SB_DPHY0_DLL_OVRD, val); + + /* Write value with FSM enabled */ + val |= 1 << CSI2_SB_DPHY1_DLL_OVRD_LDEN_DRC_FSM_SHIFT; + nbr_msgs = build_cse_ipc_commands(messages, nbr_msgs, + CSE_IPC_CMDPHYWRITEL, + CSI2_SB_DPHY0_DLL_OVRD, val); + } else if (conf_set0 && ev_params[i].ports[0].drc_val == IPU_EV_AUTO) { + nbr_msgs = build_cse_ipc_commands(messages, nbr_msgs, + CSE_IPC_CMDPHYWRITEL, + CSI2_SB_DPHY0_DLL_OVRD, 0); + } + + if (conf_set1 && ev_params[i].ports[1].drc_val != IPU_EV_AUTO) { + val = (conf_combined ? + ev_params[i].ports[1].drc_val_combined : + ev_params[i].ports[1].drc_val) << + CSI2_SB_DPHY0_DLL_OVRD_DRC_FSM_OVRD_SHIFT; + nbr_msgs = build_cse_ipc_commands(messages, nbr_msgs, + CSE_IPC_CMDPHYWRITEL, + CSI2_SB_DPHY1_DLL_OVRD, val); + + val |= 1 << CSI2_SB_DPHY1_DLL_OVRD_LDEN_DRC_FSM_SHIFT; + nbr_msgs = build_cse_ipc_commands(messages, nbr_msgs, + CSE_IPC_CMDPHYWRITEL, + CSI2_SB_DPHY1_DLL_OVRD, val); + } else if (conf_set1 && ev_params[i].ports[1].drc_val == IPU_EV_AUTO) { + nbr_msgs = build_cse_ipc_commands(messages, nbr_msgs, + CSE_IPC_CMDPHYWRITEL, + CSI2_SB_DPHY1_DLL_OVRD, 0); + } + + rval = ipu_buttress_ipc_send_bulk(isp, + IPU_BUTTRESS_IPC_CSE, + messages, nbr_msgs); + + if (rval == -ENODEV) + csi2->isys->csi2_cse_ipc_not_supported = true; + + kfree(messages); + return 0; +} + +static void ipu_isys_register_errors(struct ipu_isys_csi2 *csi2) +{ + u32 status = readl(csi2->base + CSI2_REG_CSIRX_IRQ_STATUS); + + writel(status, csi2->base + CSI2_REG_CSIRX_IRQ_CLEAR); + csi2->receiver_errors |= status; +} + +void ipu_isys_csi2_error(struct ipu_isys_csi2 *csi2) +{ + /* + * Strings corresponding to CSI-2 receiver errors are here. + * Corresponding macros are defined in the header file. + */ + static const struct ipu_isys_csi2_error { + const char *error_string; + bool is_info_only; + } errors[] = { + {"Single packet header error corrected", true}, + {"Multiple packet header errors detected", true}, + {"Payload checksum (CRC) error", true}, + {"FIFO overflow", false}, + {"Reserved short packet data type detected", true}, + {"Reserved long packet data type detected", true}, + {"Incomplete long packet detected", false}, + {"Frame sync error", false}, + {"Line sync error", false}, + {"DPHY recoverable synchronization error", true}, + {"DPHY non-recoverable synchronization error", false}, + {"Escape mode error", true}, + {"Escape mode trigger event", true}, + {"Escape mode ultra-low power state for data lane(s)", true}, + {"Escape mode ultra-low power state exit for clock lane", true}, + {"Inter-frame short packet discarded", true}, + {"Inter-frame long packet discarded", true}, + }; + u32 status; + unsigned int i; + + /* Register errors once more in case of error interrupts are disabled */ + ipu_isys_register_errors(csi2); + status = csi2->receiver_errors; + csi2->receiver_errors = 0; + + for (i = 0; i < ARRAY_SIZE(errors); i++) { + if (!(status & BIT(i))) + continue; + + if (errors[i].is_info_only) + dev_dbg(&csi2->isys->adev->dev, + "csi2-%i info: %s\n", + csi2->index, errors[i].error_string); + else + dev_err_ratelimited(&csi2->isys->adev->dev, + "csi2-%i error: %s\n", + csi2->index, + errors[i].error_string); + } +} + +static u64 tunit_time_to_us(struct ipu_isys *isys, u64 time) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(isys->adev->iommu); + u64 isys_clk = IS_FREQ_SOURCE / adev->ctrl->divisor / 1000000; + + do_div(time, isys_clk); + + return time; +} + +static int update_timer_base(struct ipu_isys *isys) +{ + int rval, i; + u64 time; + + for (i = 0; i < CSI2_UPDATE_TIME_TRY_NUM; i++) { + rval = ipu_trace_get_timer(&isys->adev->dev, &time); + if (rval) { + dev_err(&isys->adev->dev, + "Failed to read Tunit timer.\n"); + return rval; + } + rval = ipu_buttress_tsc_read(isys->adev->isp, + &isys->tsc_timer_base); + if (rval) { + dev_err(&isys->adev->dev, + "Failed to read TSC timer.\n"); + return rval; + } + rval = ipu_trace_get_timer(&isys->adev->dev, + &isys->tunit_timer_base); + if (rval) { + dev_err(&isys->adev->dev, + "Failed to read Tunit timer.\n"); + return rval; + } + if (tunit_time_to_us(isys, isys->tunit_timer_base - time) < + CSI2_UPDATE_TIME_MAX_DIFF) + return 0; + } + dev_dbg(&isys->adev->dev, "Timer base values may not be accurate.\n"); + return 0; +} + +static int +ipu_isys_csi2_configure_tunit(struct ipu_isys_csi2 *csi2, bool enable) +{ + struct ipu_isys *isys = csi2->isys; + void __iomem *isys_base = isys->pdata->base; + void __iomem *tunit_base = isys_base + TRACE_REG_IS_TRACE_UNIT_BASE; + int i, ret = 0; + + mutex_lock(&isys->short_packet_tracing_mutex); + if (!enable) { + isys->short_packet_tracing_count--; + if (isys->short_packet_tracing_count == 0) + writel(0, tunit_base + TRACE_REG_TUN_DDR_ENABLE); + goto out_release_mutex; + } + + isys->short_packet_tracing_count++; + if (isys->short_packet_tracing_count > 1) + goto out_release_mutex; + + memset(isys->short_packet_trace_buffer, 0, + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE); + dma_sync_single_for_device(&isys->adev->dev, + isys->short_packet_trace_buffer_dma_addr, + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE, + DMA_BIDIRECTIONAL); + + /* ring buffer base */ + writel(isys->short_packet_trace_buffer_dma_addr, + tunit_base + TRACE_REG_TUN_DRAM_BASE_ADDR); + + /* ring buffer end */ + writel(isys->short_packet_trace_buffer_dma_addr + + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE - + IPU_ISYS_SHORT_PACKET_TRACE_MSG_SIZE, + tunit_base + TRACE_REG_TUN_DRAM_END_ADDR); + + /* Infobits for ddr trace */ + writel(IPU_INFO_REQUEST_DESTINATION_PRIMARY, + tunit_base + TRACE_REG_TUN_DDR_INFO_VAL); + + /* Remove reset from trace timers */ + writel(TRACE_REG_GPREG_TRACE_TIMER_RST_OFF, + isys_base + TRACE_REG_IS_GPREG_TRACE_TIMER_RST_N); + + /* Reset CSI2 monitors */ + writel(1, isys->pdata->base + TRACE_REG_CSI2_TM_BASE + + TRACE_REG_CSI2_TM_RESET_REG_IDX); + writel(1, isys->pdata->base + TRACE_REG_CSI2_3PH_TM_BASE + + TRACE_REG_CSI2_TM_RESET_REG_IDX); + + /* Set trace address register. */ + writel(TRACE_REG_CSI2_TM_TRACE_ADDRESS_VAL, + isys->pdata->base + TRACE_REG_CSI2_TM_BASE + + TRACE_REG_CSI2_TM_TRACE_ADDRESS_REG_IDX); + writel(TRACE_REG_CSI2_TM_TRACE_HEADER_VAL, + isys->pdata->base + TRACE_REG_CSI2_TM_BASE + + TRACE_REG_CSI2_TM_TRACE_HEADER_REG_IDX); + writel(TRACE_REG_CSI2_3PH_TM_TRACE_ADDRESS_VAL, + isys->pdata->base + TRACE_REG_CSI2_3PH_TM_BASE + + TRACE_REG_CSI2_TM_TRACE_ADDRESS_REG_IDX); + writel(TRACE_REG_CSI2_TM_TRACE_HEADER_VAL, + isys->pdata->base + TRACE_REG_CSI2_3PH_TM_BASE + + TRACE_REG_CSI2_TM_TRACE_HEADER_REG_IDX); + + /* Enable DDR trace. */ + writel(1, tunit_base + TRACE_REG_TUN_DDR_ENABLE); + + /* Enable trace for CSI2 port. */ + for (i = 0; i < IPU_ISYS_MAX_CSI2_LEGACY_PORTS + + IPU_ISYS_MAX_CSI2_COMBO_PORTS; i++) { + void __iomem *event_mask_reg = + (i < IPU_ISYS_MAX_CSI2_LEGACY_PORTS) ? + isys->pdata->base + TRACE_REG_CSI2_TM_BASE + + TRACE_REG_CSI2_TM_TRACE_DDR_EN_REG_IDX_P(i) : + isys->pdata->base + TRACE_REG_CSI2_3PH_TM_BASE + + TRACE_REG_CSI2_3PH_TM_TRACE_DDR_EN_REG_IDX_P(i); + + writel(IPU_ISYS_SHORT_PACKET_TRACE_EVENT_MASK, + event_mask_reg); + } + + /* Enable CSI2 receiver monitor */ + writel(1, isys->pdata->base + TRACE_REG_CSI2_TM_BASE + + TRACE_REG_CSI2_TM_OVERALL_ENABLE_REG_IDX); + writel(1, isys->pdata->base + TRACE_REG_CSI2_3PH_TM_BASE + + TRACE_REG_CSI2_TM_OVERALL_ENABLE_REG_IDX); + + ret = update_timer_base(isys); + +out_release_mutex: + mutex_unlock(&isys->short_packet_tracing_mutex); + + return ret; +} + +int ipu_isys_csi2_set_stream(struct v4l2_subdev *sd, + struct ipu_isys_csi2_timing timing, + unsigned int nlanes, int enable) +{ + struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd); + struct ipu_isys_pipeline *ip = container_of(sd->entity.pipe, + struct ipu_isys_pipeline, + pipe); + unsigned int i; + int rval; + u32 val, csi2part = 0, csi2csirx; + + dev_dbg(&csi2->isys->adev->dev, "csi2 s_stream %d\n", enable); + + if (!enable) { + ipu_isys_csi2_error(csi2); + + val = readl(csi2->base + CSI2_REG_CSI_RX_CONFIG); + val &= ~(CSI2_CSI_RX_CONFIG_DISABLE_BYTE_CLK_GATING | + CSI2_CSI_RX_CONFIG_RELEASE_LP11); + writel(val, csi2->base + CSI2_REG_CSI_RX_CONFIG); + + writel(0, csi2->base + CSI2_REG_CSI_RX_ENABLE); + + /* Disable interrupts */ + writel(0, csi2->base + CSI2_REG_CSI2S2M_IRQ_MASK); + writel(0, csi2->base + CSI2_REG_CSI2S2M_IRQ_ENABLE); + writel(0, csi2->base + CSI2_REG_CSI2PART_IRQ_MASK); + writel(0, csi2->base + CSI2_REG_CSI2PART_IRQ_ENABLE); + if (ip->interlaced) + ipu_isys_csi2_configure_tunit(csi2, 0); + return 0; + } + + csi2_ev_correction_params(csi2, nlanes); + + writel(timing.ctermen, + csi2->base + CSI2_REG_CSI_RX_DLY_CNT_TERMEN_CLANE); + writel(timing.csettle, + csi2->base + CSI2_REG_CSI_RX_DLY_CNT_SETTLE_CLANE); + + for (i = 0; i < nlanes; i++) { + writel(timing.dtermen, + csi2->base + + CSI2_REG_CSI_RX_DLY_CNT_TERMEN_DLANE(i)); + writel(timing.dsettle, + csi2->base + + CSI2_REG_CSI_RX_DLY_CNT_SETTLE_DLANE(i)); + } + + val = readl(csi2->base + CSI2_REG_CSI_RX_CONFIG); + val |= CSI2_CSI_RX_CONFIG_DISABLE_BYTE_CLK_GATING | + CSI2_CSI_RX_CONFIG_RELEASE_LP11; + writel(val, csi2->base + CSI2_REG_CSI_RX_CONFIG); + + writel(nlanes, csi2->base + CSI2_REG_CSI_RX_NOF_ENABLED_LANES); + writel(CSI2_CSI_RX_ENABLE_ENABLE, + csi2->base + CSI2_REG_CSI_RX_ENABLE); + +#ifdef IPU_VC_SUPPORT + /* SOF/EOF of VC0-VC3 enabled from CSI2PART register in B0 */ + for (i = 0; i < NR_OF_CSI2_VC; i++) + csi2part |= CSI2_IRQ_FS_VC(i) | CSI2_IRQ_FE_VC(i); +#else + /* SOF/EOF enabled from CSI2PART register in B0 */ + csi2part |= CSI2_IRQ_FS_VC | CSI2_IRQ_FE_VC; +#endif + + /* Enable csi2 receiver error interrupts */ + csi2csirx = BIT(CSI2_CSIRX_NUM_ERRORS) - 1; + writel(csi2csirx, csi2->base + CSI2_REG_CSIRX_IRQ_EDGE); + writel(0, csi2->base + CSI2_REG_CSIRX_IRQ_LEVEL_NOT_PULSE); + writel(csi2csirx, csi2->base + CSI2_REG_CSIRX_IRQ_CLEAR); + writel(csi2csirx, csi2->base + CSI2_REG_CSIRX_IRQ_MASK); + writel(csi2csirx, csi2->base + CSI2_REG_CSIRX_IRQ_ENABLE); + + /* Enable csi2 error and SOF-related irqs */ + writel(csi2part, csi2->base + CSI2_REG_CSI2PART_IRQ_EDGE); + writel(0, csi2->base + CSI2_REG_CSI2PART_IRQ_LEVEL_NOT_PULSE); + writel(csi2part, csi2->base + CSI2_REG_CSI2PART_IRQ_CLEAR); + writel(csi2part, csi2->base + CSI2_REG_CSI2PART_IRQ_MASK); + writel(csi2part, csi2->base + CSI2_REG_CSI2PART_IRQ_ENABLE); + if (ip->interlaced) { + writel(CSI2_RX_SYNC_COUNTER_EXTERNAL, + csi2->base + CSI2_REG_CSI_RX_SYNC_COUNTER_SEL); + rval = ipu_isys_csi2_configure_tunit(csi2, 1); + if (rval) + return rval; + } + + return 0; +} + +void ipu_isys_csi2_isr(struct ipu_isys_csi2 *csi2) +{ + u32 status = readl(csi2->base + CSI2_REG_CSI2PART_IRQ_STATUS); + unsigned int i; + + writel(status, csi2->base + CSI2_REG_CSI2PART_IRQ_CLEAR); + + if (status & CSI2_CSI2PART_IRQ_CSIRX) + ipu_isys_register_errors(csi2); + +#ifdef IPU_VC_SUPPORT + for (i = 0; i < NR_OF_CSI2_VC; i++) { + if ((status & CSI2_IRQ_FS_VC(i))) + ipu_isys_csi2_sof_event(csi2, i); + + if ((status & CSI2_IRQ_FE_VC(i))) + ipu_isys_csi2_eof_event(csi2, i); + } +#else + if (status & CSI2_IRQ_FS_VC) + ipu_isys_csi2_sof_event(csi2); + if (status & CSI2_IRQ_FE_VC) + ipu_isys_csi2_eof_event(csi2); +#endif +} + +static u64 tsc_time_to_tunit_time(struct ipu_isys *isys, + u64 tsc_base, u64 tunit_base, u64 tsc_time) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(isys->adev->iommu); + u64 isys_clk = IS_FREQ_SOURCE / adev->ctrl->divisor / 100000; + u64 tsc_clk = IPU_BUTTRESS_TSC_CLK / 100000; + + tsc_time *= isys_clk; + tsc_base *= isys_clk; + do_div(tsc_time, tsc_clk); + do_div(tsc_base, tsc_clk); + + return tunit_base + tsc_time - tsc_base; +} + +/* Extract the timestamp from trace message. + * The timestamp in the traces message contains two parts. + * The lower part contains bit0 ~ 15 of the total 64bit timestamp. + * The higher part contains bit14 ~ 63 of the 64bit timestamp. + * These two parts are sampled at different time. + * Two overlaped bits are used to identify if there's roll overs + * in the lower part during the two samples. + * If the two overlapped bits do not match, a fix is needed to + * handle the roll over. + */ +static u64 +extract_time_from_short_packet_msg(struct ipu_isys_csi2_monitor_message *msg) +{ + u64 time_h = msg->timestamp_h << 14; + u64 time_l = msg->timestamp_l; + u64 time_h_ovl = time_h & 0xc000; + u64 time_h_h = time_h & (~0xffff); + + /* Fix possible roll overs. */ + if (time_h_ovl >= (time_l & 0xc000)) + return time_h_h | time_l; + else + return (time_h_h - 0x10000) | time_l; +} + +unsigned int +ipu_isys_csi2_get_current_field(struct ipu_isys_pipeline *ip, + unsigned int *timestamp) +{ + struct ipu_isys_video *av = container_of(ip, struct ipu_isys_video, ip); + struct ipu_isys *isys = av->isys; + unsigned int field = V4L2_FIELD_TOP; + + /* + * Find the nearest message that has matched msg type, + * port id, virtual channel and packet type. + */ + unsigned int i = ip->short_packet_trace_index; + bool msg_matched = false; + unsigned int monitor_id; + + update_timer_base(isys); + + if (ip->csi2->index >= IPU_ISYS_MAX_CSI2_LEGACY_PORTS) + monitor_id = TRACE_REG_CSI2_3PH_TM_MONITOR_ID; + else + monitor_id = TRACE_REG_CSI2_TM_MONITOR_ID; + + dma_sync_single_for_cpu(&isys->adev->dev, + isys->short_packet_trace_buffer_dma_addr, + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE, + DMA_BIDIRECTIONAL); + + do { + struct ipu_isys_csi2_monitor_message msg = + isys->short_packet_trace_buffer[i]; + u64 sof_time = tsc_time_to_tunit_time(isys, + isys->tsc_timer_base, + isys->tunit_timer_base, + (((u64) timestamp[1]) << + 32) | timestamp[0]); + u64 trace_time = extract_time_from_short_packet_msg(&msg); + u64 delta_time_us = tunit_time_to_us(isys, + (sof_time > trace_time) ? + sof_time - trace_time : + trace_time - sof_time); + + i = (i + 1) % IPU_ISYS_SHORT_PACKET_TRACE_MSG_NUMBER; + + if (msg.cmd == TRACE_REG_CMD_TYPE_D64MTS && + msg.monitor_id == monitor_id && + msg.fs == 1 && + msg.port == ip->csi2->index && +#ifdef IPU_VC_SUPPORT + msg.vc == ip->vc && +#endif + delta_time_us < IPU_ISYS_SHORT_PACKET_TRACE_MAX_TIMESHIFT) { + field = (msg.sequence % 2) ? + V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM; + ip->short_packet_trace_index = i; + msg_matched = true; + dev_dbg(&isys->adev->dev, + "Interlaced field ready. field = %d\n", field); + break; + } + } while (i != ip->short_packet_trace_index); + if (!msg_matched) + /* We have walked through the whole buffer. */ + dev_dbg(&isys->adev->dev, "No matched trace message found.\n"); + + return field; +} + +bool ipu_isys_csi2_skew_cal_required(struct ipu_isys_csi2 *csi2) +{ + __s64 link_freq; + int rval; + + if (!csi2) + return false; + + /* Not yet ? */ + if (csi2->remote_streams != csi2->stream_count) + return false; + + rval = ipu_isys_csi2_get_link_freq(csi2, &link_freq); + if (rval) + return false; + + if (link_freq <= IPU_SKEW_CAL_LIMIT_HZ) + return false; + + return true; +} + +int ipu_isys_csi2_set_skew_cal(struct ipu_isys_csi2 *csi2, int enable) +{ + u32 val; + + val = readl(csi2->base + CSI2_REG_CSI_RX_CONFIG); + + if (enable) + val |= CSI2_CSI_RX_CONFIG_SKEWCAL_ENABLE; + else + val &= ~CSI2_CSI_RX_CONFIG_SKEWCAL_ENABLE; + + writel(val, csi2->base + CSI2_REG_CSI_RX_CONFIG); + + return 0; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-isys-isa.c b/drivers/media/pci/intel/ipu4/ipu4-isys-isa.c new file mode 100644 index 0000000000000..b81e62c1479e8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-isys-isa.c @@ -0,0 +1,1074 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2014 - 2018 Intel Corporation + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "ipu.h" +#include "ipu-bus.h" +#include "ipu-isys.h" +#include "ipu4-isys-isa.h" +#include "ipu-isys-subdev.h" +#include "ipu-isys-video.h" + +static const u32 isa_supported_codes_pad_sink[] = { + MEDIA_BUS_FMT_SBGGR14_1X14, + MEDIA_BUS_FMT_SGBRG14_1X14, + MEDIA_BUS_FMT_SGRBG14_1X14, + MEDIA_BUS_FMT_SRGGB14_1X14, + MEDIA_BUS_FMT_SBGGR12_1X12, + MEDIA_BUS_FMT_SGBRG12_1X12, + MEDIA_BUS_FMT_SGRBG12_1X12, + MEDIA_BUS_FMT_SRGGB12_1X12, + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SGBRG10_1X10, + MEDIA_BUS_FMT_SGRBG10_1X10, + MEDIA_BUS_FMT_SRGGB10_1X10, + MEDIA_BUS_FMT_SBGGR8_1X8, + MEDIA_BUS_FMT_SGBRG8_1X8, + MEDIA_BUS_FMT_SGRBG8_1X8, + MEDIA_BUS_FMT_SRGGB8_1X8, + 0, +}; + +/* Regardless of the input mode ISA always produces 16 bit output */ +static const u32 isa_supported_codes_pad_source[] = { + MEDIA_BUS_FMT_SBGGR12_1X12, + MEDIA_BUS_FMT_SGBRG12_1X12, + MEDIA_BUS_FMT_SGRBG12_1X12, + MEDIA_BUS_FMT_SRGGB12_1X12, + 0, +}; + +/* ISA configuration */ +struct ipu_isys_pixelformat isa_config_pfmts[] = { + {V4L2_FMT_IPU_ISA_CFG, 8, 8, 0, MEDIA_BUS_FMT_FIXED, 0}, + {}, +}; + +static const u32 isa_supported_codes_pad_cfg[] = { + MEDIA_BUS_FMT_FIXED, + 0, +}; + +static const u32 isa_supported_codes_pad_3a[] = { + MEDIA_BUS_FMT_FIXED, + 0, +}; + +static const u32 isa_supported_codes_pad_source_scaled[] = { + MEDIA_BUS_FMT_SBGGR12_1X12, + MEDIA_BUS_FMT_SGBRG12_1X12, + MEDIA_BUS_FMT_SGRBG12_1X12, + MEDIA_BUS_FMT_SRGGB12_1X12, + MEDIA_BUS_FMT_YUYV12_1X24, + 0, +}; + +static const u32 *isa_supported_codes[] = { + isa_supported_codes_pad_sink, + isa_supported_codes_pad_source, + isa_supported_codes_pad_cfg, + isa_supported_codes_pad_3a, + isa_supported_codes_pad_source_scaled, +}; + +static struct v4l2_subdev_internal_ops isa_sd_internal_ops = { + .open = ipu_isys_subdev_open, + .close = ipu_isys_subdev_close, +}; + +static int isa_config_vidioc_g_fmt_vid_out_mplane(struct file *file, void *fh, + struct v4l2_format *fmt) +{ + struct ipu_isys_video *av = video_drvdata(file); + + fmt->fmt.pix_mp = av->mpix; + + return 0; +} + +static const struct ipu_isys_pixelformat * +isa_config_try_fmt_vid_out_mplane(struct ipu_isys_video *av, + struct v4l2_pix_format_mplane *mpix) +{ + const struct ipu_isys_pixelformat *pfmt = + ipu_isys_get_pixelformat(av, mpix->pixelformat); + + if (!pfmt) + return NULL; + mpix->pixelformat = pfmt->pixelformat; + mpix->num_planes = ISA_CFG_BUF_PLANES; + + mpix->plane_fmt[ISA_CFG_BUF_PLANE_PG].bytesperline = 0; + mpix->plane_fmt[ISA_CFG_BUF_PLANE_PG].sizeimage = + ALIGN(max_t(u32, sizeof(struct ia_css_process_group_light), + mpix->plane_fmt[ISA_CFG_BUF_PLANE_PG].sizeimage), + av->isys->line_align); + + mpix->plane_fmt[ISA_CFG_BUF_PLANE_DATA].bytesperline = 0; + mpix->plane_fmt[ISA_CFG_BUF_PLANE_DATA].sizeimage = + ALIGN(max(1U, + mpix->plane_fmt[ISA_CFG_BUF_PLANE_DATA].sizeimage), + av->isys->line_align); + + return pfmt; +} + +static int isa_config_vidioc_s_fmt_vid_out_mplane(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct ipu_isys_video *av = video_drvdata(file); + + if (av->aq.vbq.streaming) + return -EBUSY; + + av->pfmt = isa_config_try_fmt_vid_out_mplane(av, &f->fmt.pix_mp); + av->mpix = f->fmt.pix_mp; + + return 0; +} + +static int isa_config_vidioc_try_fmt_vid_out_mplane(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct ipu_isys_video *av = video_drvdata(file); + + isa_config_try_fmt_vid_out_mplane(av, &f->fmt.pix_mp); + return 0; +} + +static const struct v4l2_ioctl_ops isa_config_ioctl_ops = { + .vidioc_querycap = ipu_isys_vidioc_querycap, + .vidioc_enum_fmt_vid_cap = ipu_isys_vidioc_enum_fmt, + .vidioc_g_fmt_vid_out_mplane = isa_config_vidioc_g_fmt_vid_out_mplane, + .vidioc_s_fmt_vid_out_mplane = isa_config_vidioc_s_fmt_vid_out_mplane, + .vidioc_try_fmt_vid_out_mplane = + isa_config_vidioc_try_fmt_vid_out_mplane, + .vidioc_g_fmt_vid_cap_mplane = isa_config_vidioc_g_fmt_vid_out_mplane, + .vidioc_s_fmt_vid_cap_mplane = isa_config_vidioc_s_fmt_vid_out_mplane, + .vidioc_try_fmt_vid_cap_mplane = + isa_config_vidioc_try_fmt_vid_out_mplane, + .vidioc_reqbufs = vb2_ioctl_reqbufs, + .vidioc_create_bufs = vb2_ioctl_create_bufs, + .vidioc_prepare_buf = vb2_ioctl_prepare_buf, + .vidioc_querybuf = vb2_ioctl_querybuf, + .vidioc_qbuf = vb2_ioctl_qbuf, + .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, + .vidioc_expbuf = vb2_ioctl_expbuf, +}; + +static const struct v4l2_subdev_core_ops isa_sd_core_ops = { + .subscribe_event = v4l2_ctrl_subdev_subscribe_event, + .unsubscribe_event = v4l2_event_subdev_unsubscribe, +}; + +static int set_stream(struct v4l2_subdev *sd, int enable) +{ + struct ipu_isys_isa *isa = to_ipu_isys_isa(sd); + unsigned int i; + + if (enable) + return 0; + + for (i = 0; i < ISA_CFG_BUF_PLANES; i++) + isa->next_param[i] = NULL; + + return 0; +} + +static const struct v4l2_subdev_video_ops isa_sd_video_ops = { + .s_stream = set_stream, +}; + +static const struct v4l2_subdev_pad_ops isa_sd_pad_ops = { + .link_validate = ipu_isys_subdev_link_validate, + .get_fmt = ipu_isys_subdev_get_ffmt, + .set_fmt = ipu_isys_subdev_set_ffmt, + .get_selection = ipu_isys_subdev_get_sel, + .set_selection = ipu_isys_subdev_set_sel, + .enum_mbus_code = ipu_isys_subdev_enum_mbus_code, +}; + +static struct v4l2_subdev_ops isa_sd_ops = { + .core = &isa_sd_core_ops, + .video = &isa_sd_video_ops, + .pad = &isa_sd_pad_ops, +}; + +static int isa_link_validate(struct media_link *link) +{ + struct ipu_isys_pipeline *ip; + struct media_pipeline *pipe; + + /* Non-video node source */ + if (is_media_entity_v4l2_subdev(link->source->entity)) + return v4l2_subdev_link_validate(link); + + pipe = link->sink->entity->pipe; + ip = to_ipu_isys_pipeline(pipe); + ip->nr_queues++; + + return 0; +} + +static struct media_entity_operations isa_entity_ops = { + .link_validate = isa_link_validate, +}; + +void ipu_isys_isa_cleanup(struct ipu_isys_isa *isa) +{ + v4l2_device_unregister_subdev(&isa->asd.sd); + ipu_isys_subdev_cleanup(&isa->asd); + ipu_isys_video_cleanup(&isa->av_scaled); + ipu_isys_video_cleanup(&isa->av_config); + ipu_isys_video_cleanup(&isa->av_3a); + ipu_isys_video_cleanup(&isa->av); +} + +static void isa_set_ffmt(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_format *fmt) +{ + struct v4l2_mbus_framefmt *ffmt = +#ifdef IPU_VC_SUPPORT + __ipu_isys_get_ffmt(sd, cfg, fmt->pad, fmt->stream, + fmt->which); +#else + __ipu_isys_get_ffmt(sd, cfg, fmt->pad, fmt->which); +#endif + enum ipu_isys_subdev_pixelorder order; + enum isys_subdev_prop_tgt tgt; + + switch (fmt->pad) { + case ISA_PAD_SINK: + fmt->format.field = V4L2_FIELD_NONE; + *ffmt = fmt->format; + tgt = IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT; + ipu_isys_subdev_fmt_propagate(sd, cfg, &fmt->format, + NULL, tgt, fmt->pad, fmt->which); + return; + case ISA_PAD_SOURCE: { + struct v4l2_mbus_framefmt *sink_ffmt = +#ifdef IPU_VC_SUPPORT + __ipu_isys_get_ffmt(sd, cfg, ISA_PAD_SINK, + fmt->stream, fmt->which); +#else + __ipu_isys_get_ffmt(sd, cfg, ISA_PAD_SINK, fmt->which); +#endif + struct v4l2_rect *r = + __ipu_isys_get_selection(sd, cfg, + V4L2_SEL_TGT_CROP, + ISA_PAD_SOURCE, + fmt->which); + + ffmt->width = r->width; + ffmt->height = r->height; + ffmt->field = sink_ffmt->field; + order = ipu_isys_subdev_get_pixelorder(sink_ffmt->code); + ffmt->code = isa_supported_codes_pad_source[order]; + return; + } + case ISA_PAD_CONFIG: + case ISA_PAD_3A: + ffmt->code = MEDIA_BUS_FMT_FIXED; + ffmt->width = 0; + ffmt->height = 0; + fmt->format = *ffmt; + return; + case ISA_PAD_SOURCE_SCALED: { + struct v4l2_mbus_framefmt *sink_ffmt = +#ifdef IPU_VC_SUPPORT + __ipu_isys_get_ffmt(sd, cfg, ISA_PAD_SINK, + fmt->stream, fmt->which); +#else + __ipu_isys_get_ffmt(sd, cfg, ISA_PAD_SINK, fmt->which); +#endif + struct v4l2_rect *r = + __ipu_isys_get_selection(sd, cfg, + V4L2_SEL_TGT_CROP, + ISA_PAD_SOURCE_SCALED, + fmt->which); + + ffmt->width = r->width; + ffmt->height = r->height; + ffmt->field = sink_ffmt->field; + order = ipu_isys_subdev_get_pixelorder(sink_ffmt->code); + ffmt->code = + isa_supported_codes_pad_source_scaled[order]; + if (fmt->format.code == MEDIA_BUS_FMT_YUYV12_1X24) + ffmt->code = MEDIA_BUS_FMT_YUYV12_1X24; + + return; + } + default: + WARN_ON(1); + } +} + +static int isa_s_ctrl(struct v4l2_ctrl *ctrl) +{ + return 0; +} + +static const struct v4l2_ctrl_ops isa_ctrl_ops = { + .s_ctrl = isa_s_ctrl, +}; + +static void isa_capture_done(struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_resp_info_abi *info) +{ + struct ipu_isys_isa *isa = &ip->isys->isa; + struct ipu_isys_queue *aq = &isa->av_config.aq; + struct ipu_isys_buffer *ib; + unsigned long flags; + + if (WARN_ON_ONCE(list_empty(&aq->active))) + return; + + spin_lock_irqsave(&aq->lock, flags); + ib = list_last_entry(&aq->active, struct ipu_isys_buffer, head); + list_del(&ib->head); + dev_dbg(&ip->isys->adev->dev, "isa cfg: dequeued buffer %p", ib); + spin_unlock_irqrestore(&aq->lock, flags); + + ipu_isys_buf_calc_sequence_time(ib, info); + ipu_isys_queue_buf_done(ib); + + aq = &isa->av_3a.aq; + + if (isa->av_3a.vdev.entity.pipe != isa->av_config.vdev.entity.pipe) { + dev_dbg(&ip->isys->adev->dev, "3a disabled\n"); + return; + } + + if (WARN_ON_ONCE(list_empty(&aq->active))) + return; + + spin_lock_irqsave(&aq->lock, flags); + ib = list_last_entry(&aq->active, struct ipu_isys_buffer, head); + list_del(&ib->head); + dev_dbg(&ip->isys->adev->dev, "isa 3a: dequeued buffer %p", ib); + spin_unlock_irqrestore(&aq->lock, flags); + + ipu_isys_buf_calc_sequence_time(ib, info); + ipu_isys_queue_buf_done(ib); +} + +/* Maximum size of the buffer-specific process group. */ +#define PGL_SIZE PAGE_SIZE + +static int isa_3a_buf_init(struct vb2_buffer *vb) +{ + struct ipu_isys_isa_buffer *isa_buf = + vb2_buffer_to_ipu_isys_isa_buffer(vb); + + isa_buf->pgl.pg = kzalloc(PGL_SIZE, GFP_KERNEL); + if (!isa_buf->pgl.pg) + return -ENOMEM; + + return 0; +} + +static void isa_3a_buf_cleanup(struct vb2_buffer *vb) +{ + struct ipu_isys_isa_buffer *isa_buf = + vb2_buffer_to_ipu_isys_isa_buffer(vb); + + kfree(isa_buf->pgl.pg); +} + +static int isa_config_buf_init(struct vb2_buffer *vb) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + struct ipu_isys_isa_buffer *isa_buf = + vb2_buffer_to_ipu_isys_isa_buffer(vb); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs attrs; +#else + unsigned long attrs; +#endif + int rval; + + rval = isa_3a_buf_init(vb); + if (rval) + return rval; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + init_dma_attrs(&attrs); + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); +#else + attrs = DMA_ATTR_NON_CONSISTENT; +#endif + + isa_buf->pgl.common_pg = + dma_alloc_attrs(&av->isys->adev->dev, PGL_SIZE << 1, + &isa_buf->pgl.iova, GFP_KERNEL | __GFP_ZERO, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + &attrs +#else + attrs +#endif + ); + + dev_dbg(&av->isys->adev->dev, + "buf_init: index %u, cpu addr %p, dma addr %pad\n", +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + vb->v4l2_buf.index, +#else + vb->index, +#endif + isa_buf->pgl.common_pg, &isa_buf->pgl.iova); + + if (!isa_buf->pgl.common_pg) { + isa_3a_buf_cleanup(vb); + return -ENOMEM; + } + + return 0; +} + +static void isa_config_buf_cleanup(struct vb2_buffer *vb) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + struct ipu_isys_isa_buffer *isa_buf = + vb2_buffer_to_ipu_isys_isa_buffer(vb); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs attrs; +#else + unsigned long attrs; +#endif + + dev_dbg(&av->isys->adev->dev, + "buf_cleanup: index %u, cpu addr %p, dma addr %pad\n", +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + vb->v4l2_buf.index, +#else + vb->index, +#endif + isa_buf->pgl.pg, &isa_buf->pgl.iova); + if (!isa_buf->pgl.pg) + return; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + init_dma_attrs(&attrs); + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); +#else + attrs = DMA_ATTR_NON_CONSISTENT; +#endif + + dma_free_attrs(&av->isys->adev->dev, PGL_SIZE << 1, + isa_buf->pgl.common_pg, isa_buf->pgl.iova, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + &attrs +#else + attrs +#endif + ); + + isa_3a_buf_cleanup(vb); +} + +static void +isa_prepare_firmware_stream_cfg(struct ipu_isys_video *av, + struct ipu_fw_isys_stream_cfg_data_abi *cfg) +{ + struct v4l2_rect *r; + unsigned int pad, cropping_location, res_info; + + if (av == &av->isys->isa.av) { + pad = ISA_PAD_SOURCE; + cropping_location = + IPU_FW_ISYS_CROPPING_LOCATION_POST_ISA_NONSCALED; + res_info = IPU_FW_ISYS_RESOLUTION_INFO_POST_ISA_NONSCALED; + } else if (av == &av->isys->isa.av_scaled) { + pad = ISA_PAD_SOURCE_SCALED; + cropping_location = + IPU_FW_ISYS_CROPPING_LOCATION_POST_ISA_SCALED; + res_info = IPU_FW_ISYS_RESOLUTION_INFO_POST_ISA_SCALED; + } else { + WARN_ON(1); + return; + } + + r = __ipu_isys_get_selection(&av->isys->isa.asd.sd, NULL, + V4L2_SEL_TGT_CROP, pad, + V4L2_SUBDEV_FORMAT_ACTIVE); + + cfg->crop[cropping_location].top_offset = r->top; + cfg->crop[cropping_location].left_offset = r->left; + cfg->crop[cropping_location].bottom_offset = r->top + r->height; + cfg->crop[cropping_location].right_offset = r->left + r->width; + + r = __ipu_isys_get_selection(&av->isys->isa.asd.sd, NULL, + V4L2_SEL_TGT_COMPOSE, pad, + V4L2_SUBDEV_FORMAT_ACTIVE); + + cfg->isa_cfg.isa_res[res_info].height = r->height; + cfg->isa_cfg.isa_res[res_info].width = r->width; + ipu_isys_prepare_firmware_stream_cfg_default(av, cfg); +} + +static void +isa_prepare_firmware_stream_cfg_param(struct ipu_isys_video *av, + struct ipu_fw_isys_stream_cfg_data_abi + *cfg) +{ + struct ipu_isys_isa *isa = &av->isys->isa; + struct ipu_isys_pipeline *ip = + to_ipu_isys_pipeline(av->vdev.entity.pipe); + + cfg->isa_cfg.cfg.blc = !!(isa->isa_en->val & V4L2_IPU_ISA_EN_BLC); + cfg->isa_cfg.cfg.lsc = !!(isa->isa_en->val & V4L2_IPU_ISA_EN_LSC); + cfg->isa_cfg.cfg.dpc = !!(isa->isa_en->val & V4L2_IPU_ISA_EN_DPC); + cfg->isa_cfg.cfg.downscaler = + !!(isa->isa_en->val & V4L2_IPU_ISA_EN_SCALER); + cfg->isa_cfg.cfg.awb = !!(isa->isa_en->val & V4L2_IPU_ISA_EN_AWB); + cfg->isa_cfg.cfg.af = !!(isa->isa_en->val & V4L2_IPU_ISA_EN_AF); + cfg->isa_cfg.cfg.ae = !!(isa->isa_en->val & V4L2_IPU_ISA_EN_AE); + + cfg->isa_cfg.cfg.send_irq_stats_ready = 1; + cfg->isa_cfg.cfg.send_resp_stats_ready = 1; + ipu_isys_video_add_capture_done(ip, isa_capture_done); +} + +static bool is_capture_terminal(struct ia_css_terminal *t) +{ + switch (t->terminal_type) { + case IPU_FW_TERMINAL_TYPE_PARAM_CACHED_OUT: + case IPU_FW_TERMINAL_TYPE_PARAM_SPATIAL_OUT: + case IPU_FW_TERMINAL_TYPE_PARAM_SLICED_OUT: + return true; + default: + return false; + } +} + +/* Return the pointer to the terminal payload's IOVA. */ +static int isa_terminal_get_iova(struct device *dev, struct ia_css_terminal *t, + u32 **iova) +{ + switch (t->terminal_type) { + case IPU_FW_TERMINAL_TYPE_PARAM_CACHED_IN: + case IPU_FW_TERMINAL_TYPE_PARAM_CACHED_OUT:{ + struct ia_css_param_terminal *tpterm = (void *)t; + + *iova = &tpterm->param_payload.buffer; + break; + } + case IPU_FW_TERMINAL_TYPE_PARAM_SPATIAL_IN: + case IPU_FW_TERMINAL_TYPE_PARAM_SPATIAL_OUT:{ + struct ia_css_spatial_param_terminal *tpterm = + (void *)t; + + *iova = &tpterm->param_payload.buffer; + break; + } + case IPU_FW_TERMINAL_TYPE_PARAM_SLICED_IN: + case IPU_FW_TERMINAL_TYPE_PARAM_SLICED_OUT:{ + struct ia_css_sliced_param_terminal *tpterm = (void *)t; + + *iova = &tpterm->param_payload.buffer; + break; + } + case IPU_FW_TERMINAL_TYPE_PROGRAM:{ + struct ia_css_program_terminal *tpterm = (void *)t; + + *iova = &tpterm->param_payload.buffer; + break; + } + default: + dev_dbg(dev, "unhandled terminal type %u\n", t->terminal_type); + return -EINVAL; + } + + return 0; +} + +/* + * Validate a process group, and add the IOVA of the data plane to the + * offsets related to the start of the data plane. + */ +static int isa_import_pg(struct vb2_buffer *vb) +{ + void *__pg = vb2_plane_vaddr(vb, ISA_CFG_BUF_PLANE_PG); + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + struct ipu_isys_isa_buffer *isa_buf = + vb2_buffer_to_ipu_isys_isa_buffer(vb); + struct ia_css_process_group_light *pg = isa_buf->pgl.pg; + bool capture = aq->vbq.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + u32 addr = vb2_dma_contig_plane_dma_addr(vb, + ISA_CFG_BUF_PLANE_DATA); + unsigned int i; + + if (!__pg) { + dev_warn(&av->isys->adev->dev, + "virtual mapping of the buffer failed\n"); + return -EINVAL; + } + + if (vb2_plane_size(vb, ISA_CFG_BUF_PLANE_PG) > PGL_SIZE) { + dev_dbg(&av->isys->adev->dev, + "too large process group, max %lu\n", PGL_SIZE); + return -EINVAL; + } + + /* + * Copy the light process group to a kernel buffer so that it + * cannot be modified by the user space. + */ + memcpy(pg, __pg, vb2_plane_size(vb, ISA_CFG_BUF_PLANE_PG)); + + if (pg->size > vb2_plane_size(vb, ISA_CFG_BUF_PLANE_PG)) { + dev_dbg(&av->isys->adev->dev, + "process group size too large (%u bytes, %lu bytes available)\n", + pg->size, vb2_plane_size(vb, ISA_CFG_BUF_PLANE_PG)); + return -EINVAL; + } + + if (!pg->terminal_count) { + dev_dbg(&av->isys->adev->dev, "no terminals defined\n"); + return -EINVAL; + } + + if ((void *)(ia_css_terminal_offsets(pg) + + pg->terminal_count * sizeof(uint16_t)) - (void *)pg + > pg->size) { + dev_dbg(&av->isys->adev->dev, + "terminal offsets do not fit in the buffer\n"); + return -EINVAL; + } + + for (i = 0; i < pg->terminal_count; i++) { + struct ia_css_terminal *t = to_ia_css_terminal(pg, i); + u32 *iova; + int rval; + + if ((void *)t + sizeof(*t) - (void *)pg > pg->size) { + dev_dbg(&av->isys->adev->dev, + "terminal %u does not fit in the buffer\n", i); + return -EINVAL; + } + + dev_dbg(&av->isys->adev->dev, + "terminal: terminal %u, size %u, capture %u / %u\n", + i, t->size, capture, is_capture_terminal(t)); + + if (capture != is_capture_terminal(t)) + continue; + + dev_dbg(&av->isys->adev->dev, "terminal: %u offset %u\n", i, + ia_css_terminal_offsets(pg)[i]); + + rval = isa_terminal_get_iova(&av->isys->adev->dev, t, &iova); + if (rval) + return rval; + + dev_dbg(&av->isys->adev->dev, + "terminal: offset 0x%x, address 0x%8.8x\n", + *iova, (u32) addr + *iova); + + if (addr + *iova < addr) { + dev_dbg(&av->isys->adev->dev, + "address space overflow\n"); + return -EINVAL; + } + + if (*iova > vb2_plane_size(vb, ISA_CFG_BUF_PLANE_DATA)) { + dev_dbg(&av->isys->adev->dev, + "offset outside the buffer\n"); + return -EINVAL; + } + + /* + * Add the IOVA of the data plane to the terminal + * payload's offset. + */ + *iova += addr; + } + + return 0; +} + +static int isa_terminal_buf_prepare(struct vb2_buffer *vb) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + unsigned int i; + + for (i = 0; i < ISA_CFG_BUF_PLANES; i++) { + vb2_set_plane_payload(vb, i, av->mpix.plane_fmt[i].sizeimage); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + vb->v4l2_planes[i].data_offset = 0; +#else + vb->planes[i].data_offset = 0; +#endif + } + + return isa_import_pg(vb); +} + +/* + * Count relevant terminals in a light process group and add the + * number of found to the common light process group. + */ +static void +isa_config_count_valid_terminals(struct device *dev, + struct ia_css_process_group_light *cpg, + struct ia_css_process_group_light *pg, + bool capture) +{ + unsigned int i; + + for (i = 0; i < pg->terminal_count; i++) + if (capture == is_capture_terminal(to_ia_css_terminal(pg, i))) + cpg->terminal_count++; +} + +static void +isa_config_prepare_frame_buff_set_one(struct device *dev, + struct ia_css_process_group_light *cpg, + struct ia_css_process_group_light *pg, + dma_addr_t addr, bool capture, + unsigned int *terminal_count) +{ + unsigned int i; + + dev_dbg(dev, "terminal: size %u, count %u, offset %u\n", + pg->size, pg->terminal_count, pg->terminals_offset_offset); + + dev_dbg(dev, "terminal: copying %u terminal offsets to %p from %p\n", + pg->terminal_count, ia_css_terminal_offsets(cpg), + ia_css_terminal_offsets(pg)); + + for (i = 0; i < pg->terminal_count; i++) { + struct ia_css_terminal *t = to_ia_css_terminal(pg, i), *ct; + + dev_dbg(dev, + "terminal: parsing %u, size %u, capture %u / %u\n", + i, t->size, capture, is_capture_terminal(t)); + + if (capture != is_capture_terminal(t)) + continue; + + ia_css_terminal_offsets(cpg)[*terminal_count] = + ia_css_terminal_offset(cpg, *terminal_count); + + dev_dbg(dev, "terminal: %u offset %u\n", *terminal_count, + ia_css_terminal_offsets(cpg)[*terminal_count]); + + ct = to_ia_css_terminal(cpg, *terminal_count); + + dev_dbg(dev, + "terminal: copying terminal %p to %p (%u bytes)\n", + t, ct, t->size); + memcpy(ct, t, t->size); + + (*terminal_count)++; + } +} + +/* + * Move the terminals from a read-only or write-only light process + * group to a common process group. + */ +static void isa_config_prepare_frame_buff_set(struct vb2_buffer *__vb) +{ + struct ipu_isys_queue *aq = + vb2_queue_to_ipu_isys_queue(__vb->vb2_queue); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + struct ipu_isys_isa *isa = &av->isys->isa; + struct vb2_buffer *vb[ISA_PARAM_QUEUES]; + struct ia_css_process_group_light *pg[ISA_PARAM_QUEUES]; + dma_addr_t addr[ISA_PARAM_QUEUES]; + struct ia_css_process_group_light *cpg; + struct ipu_isys_isa_buffer *__isa_buf; + unsigned int terminal_count = 0, i; + bool capture = &av->isys->isa.av_3a.aq == aq; + + dev_dbg(&av->isys->adev->dev, "%s: capture %u\n", av->vdev.name, + capture); + + isa->next_param[capture] = __vb; + + /* Proceed only when both cfg and stats buffers are available. */ + if (!isa->next_param[!capture]) + return; + + /* Obtain common process group light buffer from config buffer */ + __isa_buf = vb2_buffer_to_ipu_isys_isa_buffer( + isa->next_param[ISA_CFG_BUF_PLANE_PG]); + + for (i = 0; i < ISA_PARAM_QUEUES; i++) { + struct ipu_isys_isa_buffer *isa_buf; + + vb[i] = isa->next_param[i]; + isa_buf = vb2_buffer_to_ipu_isys_isa_buffer(vb[i]); + pg[i] = isa_buf->pgl.pg; + addr[i] = vb2_dma_contig_plane_dma_addr(vb[i], + ISA_CFG_BUF_PLANE_DATA); + + dma_sync_single_for_device(&av->isys->adev->dev, + addr[i], vb2_plane_size(vb[i], + ISA_CFG_BUF_PLANE_DATA), + DMA_TO_DEVICE); + + dev_dbg(&av->isys->adev->dev, + "terminal: queue %u, plane 0: vaddr %p, dma_addr %pad program group size %u program group terminals %u\n", + i, pg[i], &addr[i], pg[i]->size, pg[i]->terminal_count); + } + + cpg = __isa_buf->pgl.common_pg; + cpg->terminal_count = 0; + cpg->terminals_offset_offset = sizeof(*cpg); + + if (cpg->size > PGL_SIZE << 1) { + dev_err(&av->isys->adev->dev, + "not enough room for terms, %lu found, %u needed\n", + PGL_SIZE << 1, cpg->size); + return; + } + + for (i = 0; i < ISA_PARAM_QUEUES; i++) + isa_config_count_valid_terminals(&av->isys->adev->dev, + cpg, pg[i], i); + + for (i = 0; i < ISA_PARAM_QUEUES; i++) { + isa_config_prepare_frame_buff_set_one(&av->isys->adev->dev, cpg, + pg[i], addr[i], i, + &terminal_count); + + isa->next_param[i] = NULL; + } + + cpg->size = ia_css_terminal_offset(cpg, cpg->terminal_count); + + dev_dbg(&av->isys->adev->dev, "common pg size 0x%x count %d\n", + cpg->size, cpg->terminal_count); + + dma_sync_single_for_device(&av->isys->adev->dev, __isa_buf->pgl.iova, + PGL_SIZE << 1, DMA_TO_DEVICE); +} + +static void +isa_config_fill_frame_buff_set_pin(struct vb2_buffer *vb, + struct ipu_fw_isys_frame_buff_set_abi *set) +{ + struct ipu_isys_isa_buffer *isa_buf = + vb2_buffer_to_ipu_isys_isa_buffer(vb); + + set->process_group_light.addr = isa_buf->pgl.iova; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + set->process_group_light.param_buf_id = vb->v4l2_buf.index + 1; +#else + set->process_group_light.param_buf_id = vb->index + 1; +#endif +} + +static void isa_ctrl_init(struct v4l2_subdev *sd) +{ + struct ipu_isys_isa *isa = to_ipu_isys_isa(sd); + static const struct v4l2_ctrl_config cfg = { + .ops = &isa_ctrl_ops, + .id = V4L2_CID_IPU_ISA_EN, + .name = "ISA enable", + .type = V4L2_CTRL_TYPE_BITMASK, + .max = V4L2_IPU_ISA_EN_BLC + | V4L2_IPU_ISA_EN_LSC + | V4L2_IPU_ISA_EN_DPC + | V4L2_IPU_ISA_EN_SCALER + | V4L2_IPU_ISA_EN_AWB + | V4L2_IPU_ISA_EN_AF | V4L2_IPU_ISA_EN_AE, + }; + + isa->isa_en = v4l2_ctrl_new_custom(&isa->asd.ctrl_handler, &cfg, NULL); +} + +int ipu_isys_isa_init(struct ipu_isys_isa *isa, + struct ipu_isys *isys, void __iomem *base) +{ + struct v4l2_subdev_format fmt = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = ISA_PAD_SINK, + .format = { + .width = 4096, + .height = 3072, + }, + }; + struct v4l2_subdev_format fmt_config = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = ISA_PAD_CONFIG, + }; + struct v4l2_subdev_format fmt_3a = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = ISA_PAD_3A, + }; + int rval; + + isa->base = base; + + isa->asd.sd.entity.ops = &isa_entity_ops; + isa->asd.ctrl_init = isa_ctrl_init; + isa->asd.isys = isys; + + rval = ipu_isys_subdev_init(&isa->asd, &isa_sd_ops, 1, + NR_OF_ISA_PADS, +#ifdef IPU_VC_SUPPORT + NR_OF_ISA_STREAMS, +#endif + NR_OF_ISA_SOURCE_PADS, + NR_OF_ISA_SINK_PADS, + V4L2_SUBDEV_FL_HAS_EVENTS); + if (rval) + goto fail; + + isa->asd.pad[ISA_PAD_SINK].flags = MEDIA_PAD_FL_SINK + | MEDIA_PAD_FL_MUST_CONNECT; + isa->asd.pad[ISA_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE; + isa->asd.valid_tgts[ISA_PAD_SOURCE].crop = true; + isa->asd.pad[ISA_PAD_CONFIG].flags = MEDIA_PAD_FL_SINK + | MEDIA_PAD_FL_MUST_CONNECT; + isa->asd.pad[ISA_PAD_3A].flags = MEDIA_PAD_FL_SOURCE; + isa->asd.pad[ISA_PAD_SOURCE_SCALED].flags = MEDIA_PAD_FL_SOURCE; + isa->asd.valid_tgts[ISA_PAD_SOURCE_SCALED].compose = true; + isa->asd.valid_tgts[ISA_PAD_SOURCE_SCALED].crop = true; + + isa->asd.isl_mode = IPU_ISL_ISA; + isa->asd.supported_codes = isa_supported_codes; + isa->asd.set_ffmt = isa_set_ffmt; + ipu_isys_subdev_set_ffmt(&isa->asd.sd, NULL, &fmt); + ipu_isys_subdev_set_ffmt(&isa->asd.sd, NULL, &fmt_config); + ipu_isys_subdev_set_ffmt(&isa->asd.sd, NULL, &fmt_3a); + + isa->asd.sd.internal_ops = &isa_sd_internal_ops; + snprintf(isa->asd.sd.name, sizeof(isa->asd.sd.name), + IPU_ISYS_ENTITY_PREFIX " ISA"); + v4l2_set_subdevdata(&isa->asd.sd, &isa->asd); + rval = v4l2_device_register_subdev(&isys->v4l2_dev, &isa->asd.sd); + if (rval) { + dev_info(&isys->adev->dev, "can't register v4l2 subdev\n"); + goto fail; + } + + snprintf(isa->av.vdev.name, sizeof(isa->av.vdev.name), + IPU_ISYS_ENTITY_PREFIX " ISA capture"); + isa->av.isys = isys; + isa->av.aq.css_pin_type = IPU_FW_ISYS_PIN_TYPE_RAW_NS; + isa->av.pfmts = ipu_isys_pfmts; + isa->av.try_fmt_vid_mplane = ipu_isys_video_try_fmt_vid_mplane_default; + isa->av.prepare_firmware_stream_cfg = isa_prepare_firmware_stream_cfg; + isa->av.aq.buf_prepare = ipu_isys_buf_prepare; + isa->av.aq.fill_frame_buff_set_pin = + ipu_isys_buffer_list_to_ipu_fw_isys_frame_buff_set_pin; + isa->av.aq.link_fmt_validate = ipu_isys_link_fmt_validate; + isa->av.aq.vbq.buf_struct_size = sizeof(struct ipu_isys_video_buffer); + + rval = ipu_isys_video_init(&isa->av, &isa->asd.sd.entity, + ISA_PAD_SOURCE, MEDIA_PAD_FL_SINK, 0); + if (rval) { + dev_info(&isys->adev->dev, "can't init video node\n"); + goto fail; + } + + snprintf(isa->av_config.vdev.name, sizeof(isa->av_config.vdev.name), + IPU_ISYS_ENTITY_PREFIX " ISA config"); + isa->av_config.isys = isys; + isa->av_config.pfmts = isa_config_pfmts; + isa->av_config.try_fmt_vid_mplane = isa_config_try_fmt_vid_out_mplane; + isa->av_config.prepare_firmware_stream_cfg = + isa_prepare_firmware_stream_cfg_param; + isa->av_config.vdev.ioctl_ops = &isa_config_ioctl_ops; + isa->av_config.aq.buf_init = isa_config_buf_init; + isa->av_config.aq.buf_cleanup = isa_config_buf_cleanup; + isa->av_config.aq.buf_prepare = isa_terminal_buf_prepare; + isa->av_config.aq.prepare_frame_buff_set = + isa_config_prepare_frame_buff_set; + isa->av_config.aq.fill_frame_buff_set_pin = + isa_config_fill_frame_buff_set_pin; + isa->av_config.aq.link_fmt_validate = ipu_isys_link_fmt_validate; + isa->av_config.aq.vbq.io_modes = VB2_MMAP | VB2_DMABUF; + isa->av_config.aq.vbq.buf_struct_size = + sizeof(struct ipu_isys_isa_buffer); + + rval = ipu_isys_video_init(&isa->av_config, &isa->asd.sd.entity, + ISA_PAD_CONFIG, MEDIA_PAD_FL_SOURCE, 0); + if (rval) { + dev_info(&isys->adev->dev, "can't init video node\n"); + goto fail; + } + + snprintf(isa->av_3a.vdev.name, sizeof(isa->av_3a.vdev.name), + IPU_ISYS_ENTITY_PREFIX " ISA 3A stats"); + isa->av_3a.isys = isys; + isa->av_3a.pfmts = isa_config_pfmts; + isa->av_3a.try_fmt_vid_mplane = isa_config_try_fmt_vid_out_mplane; + isa->av_3a.prepare_firmware_stream_cfg = + isa_prepare_firmware_stream_cfg_param; + isa->av_3a.vdev.ioctl_ops = &isa_config_ioctl_ops; + isa->av_3a.aq.buf_init = isa_3a_buf_init; + isa->av_3a.aq.buf_cleanup = isa_3a_buf_cleanup; + isa->av_3a.aq.buf_prepare = isa_terminal_buf_prepare; + isa->av_3a.aq.prepare_frame_buff_set = + isa_config_prepare_frame_buff_set; + isa->av_3a.aq.link_fmt_validate = ipu_isys_link_fmt_validate; + isa->av_3a.aq.vbq.io_modes = VB2_MMAP | VB2_DMABUF; + isa->av_3a.aq.vbq.buf_struct_size = sizeof(struct ipu_isys_isa_buffer); + isa->av_3a.line_header_length = 4; /* Set to non-zero to force mplane*/ + + rval = ipu_isys_video_init(&isa->av_3a, &isa->asd.sd.entity, + ISA_PAD_3A, MEDIA_PAD_FL_SINK, 0); + if (rval) { + dev_info(&isys->adev->dev, "can't init video node\n"); + goto fail; + } + + snprintf(isa->av_scaled.vdev.name, sizeof(isa->av_scaled.vdev.name), + IPU_ISYS_ENTITY_PREFIX " ISA scaled capture"); + isa->av_scaled.isys = isys; + isa->av_scaled.aq.css_pin_type = IPU_FW_ISYS_PIN_TYPE_RAW_S; + isa->av_scaled.pfmts = isa->av.pfmts; + isa->av_scaled.try_fmt_vid_mplane = + ipu_isys_video_try_fmt_vid_mplane_default; + isa->av_scaled.prepare_firmware_stream_cfg = + isa_prepare_firmware_stream_cfg; + isa->av_scaled.aq.buf_prepare = ipu_isys_buf_prepare; + isa->av_scaled.aq.fill_frame_buff_set_pin = + ipu_isys_buffer_list_to_ipu_fw_isys_frame_buff_set_pin; + isa->av_scaled.aq.link_fmt_validate = ipu_isys_link_fmt_validate; + isa->av_scaled.aq.vbq.buf_struct_size = + sizeof(struct ipu_isys_video_buffer); + + rval = ipu_isys_video_init(&isa->av_scaled, &isa->asd.sd.entity, + ISA_PAD_SOURCE_SCALED, MEDIA_PAD_FL_SINK, 0); + if (rval) { + dev_info(&isys->adev->dev, "can't init video node\n"); + goto fail; + } + + return 0; + +fail: + ipu_isys_isa_cleanup(isa); + + return rval; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-isys-isa.h b/drivers/media/pci/intel/ipu4/ipu4-isys-isa.h new file mode 100644 index 0000000000000..649714dca2f48 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-isys-isa.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2014 - 2018 Intel Corporation */ + +#ifndef IPU_ISYS_ISA_H +#define IPU_ISYS_ISA_H + +#include +#include + +#include "ipu-isys-queue.h" +#include "ipu-isys-subdev.h" +#include "ipu-isys-video.h" + +#define ISA_PAD_SINK 0 +#define ISA_PAD_SOURCE 1 +#define ISA_PAD_CONFIG 2 +#define ISA_PAD_3A 3 +#define ISA_PAD_SOURCE_SCALED 4 + +#define NR_OF_ISA_PADS 5 +#define NR_OF_ISA_SINK_PADS 2 +#define NR_OF_ISA_SOURCE_PADS 3 +#define NR_OF_ISA_STREAMS 1 + +struct ipu_isys; +struct ia_css_process_group_light; + +/* + * struct ipu_isa_buffer + * + * @ivb: Base buffer type which provides inheritance of + * isys buffer and vb2 buffer. + * @pgl: program group light DMA buffer + * @pgl.pg: process group, copy of the buffer's plane 0 + * but not mapped to user space + * @pgl.common_pg: A combined process group from both video buffers + * @pgl.iova: IOVA of common_pg + */ +struct ipu_isys_isa_buffer { + struct ipu_isys_video_buffer ivb; + struct { + struct ia_css_process_group_light *pg; + struct ia_css_process_group_light *common_pg; + dma_addr_t iova; + } pgl; +}; + +/* ISA CFG will use multiplanar buffers */ +#define ISA_CFG_BUF_PLANE_PG 0 +#define ISA_CFG_BUF_PLANE_DATA 1 +#define ISA_CFG_BUF_PLANES 2 + +#define ISA_PARAM_QUEUES 2 + +/* + * struct ipu_isys_isa + */ +struct ipu_isys_isa { + struct ipu_isys_subdev asd; + struct ipu_isys_video av; + struct ipu_isys_video av_config; + struct ipu_isys_video av_3a; + struct ipu_isys_video av_scaled; + + void __iomem *base; + + struct v4l2_ctrl *isa_en; + + struct vb2_buffer *next_param[ISA_PARAM_QUEUES]; /* config and 3a */ +}; + +#define to_ipu_isys_isa(sd) \ + container_of(to_ipu_isys_subdev(sd), \ + struct ipu_isys_isa, asd) + +#define vb2_buffer_to_ipu_isys_isa_buffer(__vb) \ + container_of(vb2_buffer_to_ipu_isys_video_buffer(__vb), \ + struct ipu_isys_isa_buffer, ivb) + +int ipu_isys_isa_init(struct ipu_isys_isa *isa, + struct ipu_isys *isys, void __iomem *base); +void ipu_isys_isa_cleanup(struct ipu_isys_isa *isa); +void ipu_isys_isa_isr(struct ipu_isys_isa *isa); + +#endif /* IPU_ISYS_ISA_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-isys.c b/drivers/media/pci/intel/ipu4/ipu4-isys.c new file mode 100644 index 0000000000000..27bfe78aa0201 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-isys.c @@ -0,0 +1,451 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Intel Corporation + +#include + +#include "ipu.h" +#include "ipu-platform-regs.h" +#include "ipu-platform-buttress-regs.h" +#include "ipu-platform-isys-csi2-reg.h" +#include "ipu-trace.h" +#include "ipu-isys.h" +#include "ipu-isys-video.h" +#include "ipu-isys-tpg.h" + +#ifndef V4L2_PIX_FMT_SBGGR14V32 +/* + * Non-vectorized 14bit definitions have been upstreamed. + * To keep various versions of the ipu4 builds compileable use local + * definitions when global one's doesn't exists. + */ +#define V4L2_PIX_FMT_SBGGR14V32 v4l2_fourcc('b', 'V', '0', 'M') +#define V4L2_PIX_FMT_SGBRG14V32 v4l2_fourcc('b', 'V', '0', 'N') +#define V4L2_PIX_FMT_SGRBG14V32 v4l2_fourcc('b', 'V', '0', 'O') +#define V4L2_PIX_FMT_SRGGB14V32 v4l2_fourcc('b', 'V', '0', 'P') +#endif + +const struct ipu_isys_pixelformat ipu_isys_pfmts[] = { + /* YUV vector format */ + {V4L2_PIX_FMT_YUYV420_V32, 24, 24, 0, MEDIA_BUS_FMT_YUYV12_1X24, + IPU_FW_ISYS_FRAME_FORMAT_YUV420_16}, + /* Raw bayer vector formats. */ + {V4L2_PIX_FMT_SBGGR14V32, 16, 14, 0, MEDIA_BUS_FMT_SBGGR14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SGBRG14V32, 16, 14, 0, MEDIA_BUS_FMT_SGBRG14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SGRBG14V32, 16, 14, 0, MEDIA_BUS_FMT_SGRBG14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SRGGB14V32, 16, 14, 0, MEDIA_BUS_FMT_SRGGB14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SBGGR12V32, 16, 12, 0, MEDIA_BUS_FMT_SBGGR12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SGBRG12V32, 16, 12, 0, MEDIA_BUS_FMT_SGBRG12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SGRBG12V32, 16, 12, 0, MEDIA_BUS_FMT_SGRBG12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SRGGB12V32, 16, 12, 0, MEDIA_BUS_FMT_SRGGB12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SBGGR10V32, 16, 10, 0, MEDIA_BUS_FMT_SBGGR10_1X10, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SGBRG10V32, 16, 10, 0, MEDIA_BUS_FMT_SGBRG10_1X10, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SGRBG10V32, 16, 10, 0, MEDIA_BUS_FMT_SGRBG10_1X10, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SRGGB10V32, 16, 10, 0, MEDIA_BUS_FMT_SRGGB10_1X10, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SBGGR8_16V32, 16, 8, 0, MEDIA_BUS_FMT_SBGGR8_1X8, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SGBRG8_16V32, 16, 8, 0, MEDIA_BUS_FMT_SGBRG8_1X8, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SGRBG8_16V32, 16, 8, 0, MEDIA_BUS_FMT_SGRBG8_1X8, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SRGGB8_16V32, 16, 8, 0, MEDIA_BUS_FMT_SRGGB8_1X8, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_FMT_IPU_ISYS_META, 8, 8, 0, MEDIA_BUS_FMT_FIXED, + IPU_FW_ISYS_MIPI_DATA_TYPE_EMBEDDED}, + {} +}; + +struct ipu_trace_block isys_trace_blocks[] = { + { + .offset = TRACE_REG_IS_TRACE_UNIT_BASE, + .type = IPU_TRACE_BLOCK_TUN, + }, + { + .offset = TRACE_REG_IS_SP_EVQ_BASE, + .type = IPU_TRACE_BLOCK_TM, + }, + { + .offset = TRACE_REG_IS_SP_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_IS_ISL_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_IS_MMU_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_CSI2_TM_BASE, + .type = IPU_TRACE_CSI2, + }, + { + .offset = TRACE_REG_CSI2_3PH_TM_BASE, + .type = IPU_TRACE_CSI2_3PH, + }, + { + /* Note! this covers all 9 blocks */ + .offset = TRACE_REG_CSI2_SIG2SIO_GR_BASE(0), + .type = IPU_TRACE_SIG2CIOS, + }, + { + /* Note! this covers all 9 blocks */ + .offset = TRACE_REG_CSI2_PH3_SIG2SIO_GR_BASE(0), + .type = IPU_TRACE_SIG2CIOS, + }, + { + .offset = TRACE_REG_IS_GPREG_TRACE_TIMER_RST_N, + .type = IPU_TRACE_TIMER_RST, + }, + { + .type = IPU_TRACE_BLOCK_END, + } +}; + +#ifdef CONFIG_VIDEO_INTEL_IPU4 +void isys_setup_hw(struct ipu_isys *isys) +{ + void __iomem *base = isys->pdata->base; + const u8 *thd = isys->pdata->ipdata->hw_variant.cdc_fifo_threshold; + u32 irqs; + unsigned int i; + + /* Enable irqs for all MIPI busses */ + irqs = IPU_ISYS_UNISPART_IRQ_CSI2(0) | + IPU_ISYS_UNISPART_IRQ_CSI2(1) | + IPU_ISYS_UNISPART_IRQ_CSI2(2) | + IPU_ISYS_UNISPART_IRQ_CSI2(3) | + IPU_ISYS_UNISPART_IRQ_CSI2(4) | IPU_ISYS_UNISPART_IRQ_CSI2(5); + + irqs |= IPU_ISYS_UNISPART_IRQ_SW; + + writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_EDGE); + writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_LEVEL_NOT_PULSE); + writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_CLEAR); + writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_MASK); + writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_ENABLE); + + writel(0, base + IPU_REG_ISYS_UNISPART_SW_IRQ_REG); + writel(0, base + IPU_REG_ISYS_UNISPART_SW_IRQ_MUX_REG); + + /* Write CDC FIFO threshold values for isys */ + for (i = 0; i < isys->pdata->ipdata->hw_variant.cdc_fifos; i++) + writel(thd[i], base + IPU_REG_ISYS_CDC_THRESHOLD(i)); +} +#endif + +#ifdef CONFIG_VIDEO_INTEL_IPU4P +/* + * For new HW, extra common register (en_flush_for_idrain)added to the IBufCtrl + * of ISL_IS and CSI that enables the feature to send a DMA command with flush + * when draining. This means that a DMA command is send with the flush bit + * set(read post write check is performed) when a drain request comes in and + * iwake is enabled for that SID proc. + * This results in that all data is moved out of the system when the IDone is + * given back. Default the feature is off, to keep behavior as is when nothing + * is written, writing 0x1 to the register (reg 11 in common reg bank, + * addr ibuf_base + 0x2C) to enable this feature. + */ +static int ipu4p_isys_flush_idrain_en(struct ipu_isys *isys) +{ + void __iomem *base = isys->pdata->base; + + writel(1, base + CSI2_REG_CL0_IBUFCTL_EN_FLUSH_FOR_IDRAIN); + writel(1, base + CSI2_REG_CL1_IBUFCTL_EN_FLUSH_FOR_IDRAIN); + writel(1, base + IPU_REG_ISYS_IBUFCTL_EN_FLUSH_FOR_IDRAIN); + + return 0; +} + +static void ipu4p_isys_irq_cfg(struct ipu_isys *isys) +{ + void __iomem *base = isys->pdata->base; + int i, j; + struct { + u32 base; + u32 mask; + } irq_config[] = { + {IPU_REG_ISYS_UNISPART_IRQ_EDGE, 0x400018}, + {IPU_REG_ISYS_ISA_ACC_IRQ_CTRL_BASE, 0x0}, + {IPU_REG_ISYS_A_IRQ_CTRL_BASE, 0x0}, + {IPU_REG_ISYS_SIP0_IRQ_CTRL_BASE, 0xf}, + {IPU_REG_ISYS_SIP1_IRQ_CTRL_BASE, 0xf}, + }; + unsigned int offsets[4] = { + 0x0, 0x4, 0x10, 0x14 + }; + + for (i = 0; i < ARRAY_SIZE(irq_config); i++) { + for (j = 0; j < ARRAY_SIZE(offsets); j++) + writel(irq_config[i].mask, + base + irq_config[i].base + offsets[j]); + writel(0xffffffff, base + irq_config[i].base + 0xc); + } + + writel(0, base + IPU_REG_ISYS_UNISPART_SW_IRQ_REG); + writel(0, base + IPU_REG_ISYS_UNISPART_SW_IRQ_MUX_REG); +} + +static void ipu4p_isys_bb_cfg(struct ipu_isys *isys) +{ + void __iomem *isp_base = isys->adev->isp->base; + unsigned int i, val; + unsigned int bbconfig[4][4] = { + {4, 13, 32, 0xf}, + {6, 13, 32, 0x15}, + {12, 13, 32, 0xf}, + {14, 13, 32, 0x15}, + }; + + /* Config building block */ + for (i = 0; i < 4; i++) { + unsigned int bb = bbconfig[i][0]; + unsigned int crc = bbconfig[i][1]; + unsigned int drc = bbconfig[i][2]; + unsigned int afe = bbconfig[i][3]; + + val = readl(isp_base + BUTTRESS_REG_CPHYX_DLL_OVRD(bb)); + val &= ~0x7e; + val |= crc << 1; + val |= 1; + writel(val, isp_base + BUTTRESS_REG_CPHYX_DLL_OVRD(bb)); + val = readl(isp_base + BUTTRESS_REG_DPHYX_DLL_OVRD(bb)); + val |= 1; + val |= drc << 1; + writel(val, isp_base + BUTTRESS_REG_DPHYX_DLL_OVRD(bb)); + val = afe | (2 << 29); + writel(val, isp_base + BUTTRESS_REG_BBX_AFE_CONFIG(bb)); + } +} + +static void ipu4p_isys_port_cfg(struct ipu_isys *isys) +{ + void __iomem *base = isys->pdata->base; + void __iomem *isp_base = isys->adev->isp->base; + + /* Port config */ + writel(0x3895, base + IPU_GPOFFSET + 0x14); + writel(0x3895, base + IPU_COMBO_GPOFFSET + 0x14); + writel((0x100 << 1) | (0x100 << 10) | (0x100 << 19), isp_base + + BUTTRESS_REG_CSI_BSCAN_EXCLUDE); +} + +void isys_setup_hw(struct ipu_isys *isys) +{ + ipu4p_isys_irq_cfg(isys); + ipu4p_isys_port_cfg(isys); + ipu4p_isys_bb_cfg(isys); + ipu4p_isys_flush_idrain_en(isys); +} +#endif + +#ifdef CONFIG_VIDEO_INTEL_IPU4 +irqreturn_t isys_isr(struct ipu_bus_device *adev) +{ + struct ipu_isys *isys = ipu_bus_get_drvdata(adev); + void __iomem *base = isys->pdata->base; + u32 status; + + spin_lock(&isys->power_lock); + if (!isys->power) { + spin_unlock(&isys->power_lock); + return IRQ_NONE; + } + + status = readl(isys->pdata->base + + IPU_REG_ISYS_UNISPART_IRQ_STATUS); + do { + writel(status, isys->pdata->base + + IPU_REG_ISYS_UNISPART_IRQ_CLEAR); + + if (isys->isr_csi2_bits & status) { + unsigned int i; + + for (i = 0; i < isys->pdata->ipdata->csi2.nports; i++) { + if (IPU_ISYS_UNISPART_IRQ_CSI2(i) & status) + ipu_isys_csi2_isr(&isys->csi2[i]); + } + } + + writel(0, base + IPU_REG_ISYS_UNISPART_SW_IRQ_REG); + + /* + * Handle a single FW event per checking the CSI-2 + * receiver SOF status. This is done in order to avoid + * the case where events arrive to the event queue and + * one of them is a SOF event which then could be + * handled before the SOF interrupt. This would pose + * issues in sequence numbering which is based on SOF + * interrupts, always assumed to arrive before FW SOF + * events. + */ + if (status & IPU_ISYS_UNISPART_IRQ_SW && !isys_isr_one(adev)) + status = IPU_ISYS_UNISPART_IRQ_SW; + else + status = 0; + + status |= readl(isys->pdata->base + + IPU_REG_ISYS_UNISPART_IRQ_STATUS); + } while (status & (isys->isr_csi2_bits + | IPU_ISYS_UNISPART_IRQ_SW) && + !isys->adev->isp->flr_done); + spin_unlock(&isys->power_lock); + + return IRQ_HANDLED; +} +#endif + +#ifdef CONFIG_VIDEO_INTEL_IPU4P +irqreturn_t isys_isr(struct ipu_bus_device *adev) +{ + struct ipu_isys *isys = ipu_bus_get_drvdata(adev); + void __iomem *base = isys->pdata->base; + u32 status; + unsigned int i; + u32 sip0_status, sip1_status; + struct { + u32 *status; + u32 mask; + } csi2_irq_mask[] = { + {&sip0_status, IPU_ISYS_CSI2_D_IRQ_MASK}, + {&sip1_status, IPU_ISYS_CSI2_A_IRQ_MASK}, + {&sip1_status, IPU_ISYS_CSI2_B_IRQ_MASK}, + {&sip1_status, IPU_ISYS_CSI2_C_IRQ_MASK}, + {&sip1_status, IPU_ISYS_CSI2_D_IRQ_MASK}, + }; + + spin_lock(&isys->power_lock); + if (!isys->power) { + spin_unlock(&isys->power_lock); + return IRQ_NONE; + } + + /* read unis sw irq */ + status = readl(isys->pdata->base + + IPU_REG_ISYS_UNISPART_IRQ_STATUS); + dev_dbg(&adev->dev, "isys irq status - unis sw irq = 0x%x", status); + + do { + /* clear unis sw irqs */ + writel(status, isys->pdata->base + + IPU_REG_ISYS_UNISPART_IRQ_CLEAR); + + /* read and clear sip irq status */ + sip0_status = readl(isys->pdata->base + + IPU_REG_ISYS_SIP0_IRQ_CTRL_STATUS); + sip1_status = readl(isys->pdata->base + + IPU_REG_ISYS_SIP1_IRQ_CTRL_STATUS); + dev_dbg(&adev->dev, "isys irq status - sip0 = 0x%x sip1 = 0x%x", + sip0_status, sip1_status); + writel(sip0_status, isys->pdata->base + + IPU_REG_ISYS_SIP0_IRQ_CTRL_CLEAR); + writel(sip1_status, isys->pdata->base + + IPU_REG_ISYS_SIP1_IRQ_CTRL_CLEAR); + + for (i = 0; i < isys->pdata->ipdata->csi2.nports; i++) { + if (*csi2_irq_mask[i].status & csi2_irq_mask[i].mask) + ipu_isys_csi2_isr(&isys->csi2[i]); + } + + writel(0, base + IPU_REG_ISYS_UNISPART_SW_IRQ_REG); + + /* + * Handle a single FW event per checking the CSI-2 + * receiver SOF status. This is done in order to avoid + * the case where events arrive to the event queue and + * one of them is a SOF event which then could be + * handled before the SOF interrupt. This would pose + * issues in sequence numbering which is based on SOF + * interrupts, always assumed to arrive before FW SOF + * events. + */ + if (status & IPU_ISYS_UNISPART_IRQ_SW && !isys_isr_one(adev)) + status = IPU_ISYS_UNISPART_IRQ_SW; + else + status = 0; + + status |= readl(isys->pdata->base + + IPU_REG_ISYS_UNISPART_IRQ_STATUS); + } while (status & (isys->isr_csi2_bits + | IPU_ISYS_UNISPART_IRQ_SW) && + !isys->adev->isp->flr_done); + spin_unlock(&isys->power_lock); + + return IRQ_HANDLED; +} +#endif + +int tpg_set_stream(struct v4l2_subdev *sd, int enable) +{ + struct ipu_isys_tpg *tpg = to_ipu_isys_tpg(sd); +#ifdef IPU_VC_SUPPORT + __u32 code = tpg->asd.ffmt[TPG_PAD_SOURCE][0].code; +#else + __u32 code = tpg->asd.ffmt[TPG_PAD_SOURCE].code; +#endif + unsigned int bpp = ipu_isys_mbus_code_to_bpp(code); + + /* + * MIPI_GEN block is CSI2 FB. Need to enable/disable TPG selection + * register to control the TPG streaming. + */ + if (tpg->sel) + writel(enable ? 1 : 0, tpg->sel); + + if (!enable) { + writel(0, tpg->base + MIPI_GEN_REG_COM_ENABLE); + return 0; + } + + writel(MIPI_GEN_COM_DTYPE_RAW(bpp), + tpg->base + MIPI_GEN_REG_COM_DTYPE); + writel(ipu_isys_mbus_code_to_mipi(code), + tpg->base + MIPI_GEN_REG_COM_VTYPE); + writel(0, tpg->base + MIPI_GEN_REG_COM_VCHAN); + + writel(0, tpg->base + MIPI_GEN_REG_SYNG_NOF_FRAMES); + +#ifdef IPU_VC_SUPPORT + writel(DIV_ROUND_UP(tpg->asd.ffmt[TPG_PAD_SOURCE][0].width * + bpp, BITS_PER_BYTE), + tpg->base + MIPI_GEN_REG_COM_WCOUNT); + writel(DIV_ROUND_UP(tpg->asd.ffmt[TPG_PAD_SOURCE][0].width, + MIPI_GEN_PPC), + tpg->base + MIPI_GEN_REG_SYNG_NOF_PIXELS); + writel(tpg->asd.ffmt[TPG_PAD_SOURCE][0].height, + tpg->base + MIPI_GEN_REG_SYNG_NOF_LINES); +#else + writel(DIV_ROUND_UP(tpg->asd.ffmt[TPG_PAD_SOURCE].width * + bpp, BITS_PER_BYTE), + tpg->base + MIPI_GEN_REG_COM_WCOUNT); + writel(DIV_ROUND_UP(tpg->asd.ffmt[TPG_PAD_SOURCE].width, + MIPI_GEN_PPC), + tpg->base + MIPI_GEN_REG_SYNG_NOF_PIXELS); + writel(tpg->asd.ffmt[TPG_PAD_SOURCE].height, + tpg->base + MIPI_GEN_REG_SYNG_NOF_LINES); +#endif + + writel(0, tpg->base + MIPI_GEN_REG_TPG_MODE); + writel(-1, tpg->base + MIPI_GEN_REG_TPG_HCNT_MASK); + writel(-1, tpg->base + MIPI_GEN_REG_TPG_VCNT_MASK); + writel(-1, tpg->base + MIPI_GEN_REG_TPG_XYCNT_MASK); + writel(0, tpg->base + MIPI_GEN_REG_TPG_HCNT_DELTA); + writel(0, tpg->base + MIPI_GEN_REG_TPG_VCNT_DELTA); + + v4l2_ctrl_handler_setup(&tpg->asd.ctrl_handler); + + writel(2, tpg->base + MIPI_GEN_REG_COM_ENABLE); + return 0; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-psys.c b/drivers/media/pci/intel/ipu4/ipu4-psys.c new file mode 100644 index 0000000000000..3fe3a06d61fcb --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-psys.c @@ -0,0 +1,1109 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) +#include +#else +#include +#endif +#include +#include + +#include "ipu.h" +#include "ipu-psys.h" +#include "ipu-platform-regs.h" +#include "ipu-trace.h" +#define CREATE_TRACE_POINTS +#define IPU_PG_KCMD_TRACE +#include "ipu-trace-event.h" + +static bool early_pg_transfer; +static bool enable_concurrency = true; +module_param(early_pg_transfer, bool, 0664); +module_param(enable_concurrency, bool, 0664); +MODULE_PARM_DESC(early_pg_transfer, + "Copy PGs back to user after resource allocation"); +MODULE_PARM_DESC(enable_concurrency, + "Enable concurrent execution of program groups"); + +struct ipu_trace_block psys_trace_blocks[] = { + { + .offset = TRACE_REG_PS_TRACE_UNIT_BASE, + .type = IPU_TRACE_BLOCK_TUN, + }, + { + .offset = TRACE_REG_PS_SPC_EVQ_BASE, + .type = IPU_TRACE_BLOCK_TM, + }, + { + .offset = TRACE_REG_PS_SPP0_EVQ_BASE, + .type = IPU_TRACE_BLOCK_TM, + }, + { + .offset = TRACE_REG_PS_SPP1_EVQ_BASE, + .type = IPU_TRACE_BLOCK_TM, + }, + { + .offset = TRACE_REG_PS_ISP0_EVQ_BASE, + .type = IPU_TRACE_BLOCK_TM, + }, + { + .offset = TRACE_REG_PS_ISP1_EVQ_BASE, + .type = IPU_TRACE_BLOCK_TM, + }, + { + .offset = TRACE_REG_PS_ISP2_EVQ_BASE, + .type = IPU_TRACE_BLOCK_TM, + }, + { + .offset = TRACE_REG_PS_ISP3_EVQ_BASE, + .type = IPU_TRACE_BLOCK_TM, + }, + { + .offset = TRACE_REG_PS_SPC_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_PS_SPP0_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_PS_SPP1_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_PS_MMU_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_PS_ISL_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_PS_ISP0_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_PS_ISP1_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_PS_ISP2_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_PS_ISP3_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_PS_GPREG_TRACE_TIMER_RST_N, + .type = IPU_TRACE_TIMER_RST, + }, + { + .type = IPU_TRACE_BLOCK_END, + } +}; + +static int ipu_psys_kcmd_abort(struct ipu_psys *psys, + struct ipu_psys_kcmd *kcmd); +static int ipu_psys_kcmd_queue(struct ipu_psys *psys, + struct ipu_psys_kcmd *kcmd); + +static void set_sp_info_bits(void *base) +{ + int i; + + writel(IPU_INFO_REQUEST_DESTINATION_PRIMARY, + base + IPU_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER); + + for (i = 0; i < 4; i++) + writel(IPU_INFO_REQUEST_DESTINATION_PRIMARY, + base + IPU_REG_PSYS_INFO_SEG_CMEM_MASTER(i)); + for (i = 0; i < 4; i++) + writel(IPU_INFO_REQUEST_DESTINATION_PRIMARY, + base + IPU_REG_PSYS_INFO_SEG_XMEM_MASTER(i)); +} + +static void set_isp_info_bits(void *base) +{ + int i; + + writel(IPU_INFO_REQUEST_DESTINATION_PRIMARY, + base + IPU_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER); + + for (i = 0; i < 4; i++) + writel(IPU_INFO_REQUEST_DESTINATION_PRIMARY, + base + IPU_REG_PSYS_INFO_SEG_DATA_MASTER(i)); +} + +void ipu_psys_setup_hw(struct ipu_psys *psys) +{ + void __iomem *base = psys->pdata->base; + void __iomem *spc_regs_base = + base + psys->pdata->ipdata->hw_variant.spc_offset; + void *psys_iommu0_ctrl = base + + psys->pdata->ipdata->hw_variant.mmu_hw[0].offset + + IPU_PSYS_MMU0_CTRL_OFFSET; + const u8 *thd = psys->pdata->ipdata->hw_variant.cdc_fifo_threshold; + u32 irqs; + unsigned int i; + + /* Configure PSYS info bits */ + writel(IPU_INFO_REQUEST_DESTINATION_PRIMARY, psys_iommu0_ctrl); + + set_sp_info_bits(spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL); + set_sp_info_bits(spc_regs_base + IPU_PSYS_REG_SPP0_STATUS_CTRL); + set_sp_info_bits(spc_regs_base + IPU_PSYS_REG_SPP1_STATUS_CTRL); + set_isp_info_bits(spc_regs_base + IPU_PSYS_REG_ISP0_STATUS_CTRL); + set_isp_info_bits(spc_regs_base + IPU_PSYS_REG_ISP1_STATUS_CTRL); + set_isp_info_bits(spc_regs_base + IPU_PSYS_REG_ISP2_STATUS_CTRL); + set_isp_info_bits(spc_regs_base + IPU_PSYS_REG_ISP3_STATUS_CTRL); + + /* Enable FW interrupt #0 */ + writel(0, base + IPU_REG_PSYS_GPDEV_FWIRQ(0)); + irqs = IPU_PSYS_GPDEV_IRQ_FWIRQ(0); + writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_EDGE); + /* + * With pulse setting, driver misses interrupts. IUNIT integration + * HAS(v1.26) suggests to use pulse, but this seem to be error in + * documentation. + */ + writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_LEVEL_NOT_PULSE); + writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_CLEAR); + writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_MASK); + writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_ENABLE); + + /* Write CDC FIFO threshold values for psys */ + for (i = 0; i < psys->pdata->ipdata->hw_variant.cdc_fifos; i++) + writel(thd[i], base + IPU_REG_PSYS_CDC_THRESHOLD(i)); +} + +/* + * Called to free up all resources associated with a kcmd. + * After this the kcmd doesn't anymore exist in the driver. + */ +void ipu_psys_kcmd_free(struct ipu_psys_kcmd *kcmd) +{ + struct ipu_psys *psys; + unsigned long flags; + + if (!kcmd) + return; + + psys = kcmd->fh->psys; + + if (!list_empty(&kcmd->list)) + list_del(&kcmd->list); + + spin_lock_irqsave(&psys->pgs_lock, flags); + if (kcmd->kpg) + kcmd->kpg->pg_size = 0; + spin_unlock_irqrestore(&psys->pgs_lock, flags); + + kfree(kcmd->pg_manifest); + kfree(kcmd->kbufs); + kfree(kcmd->buffers); + kfree(kcmd); +} + +static struct ipu_psys_kcmd *ipu_psys_copy_cmd(struct ipu_psys_command *cmd, + struct ipu_psys_fh *fh) +{ + struct ipu_psys *psys = fh->psys; + struct ipu_psys_kcmd *kcmd; + struct ipu_psys_kbuffer *kpgbuf; + unsigned int i; + int ret, prevfd = 0; + + if (cmd->bufcount > IPU_MAX_PSYS_CMD_BUFFERS) + return NULL; + + if (!cmd->pg_manifest_size || + cmd->pg_manifest_size > KMALLOC_MAX_CACHE_SIZE) + return NULL; + + kcmd = kzalloc(sizeof(*kcmd), GFP_KERNEL); + if (!kcmd) + return NULL; + + kcmd->state = KCMD_STATE_NEW; + kcmd->fh = fh; + INIT_LIST_HEAD(&kcmd->list); + INIT_LIST_HEAD(&kcmd->started_list); + + mutex_lock(&fh->mutex); + kpgbuf = ipu_psys_lookup_kbuffer(fh, cmd->pg); + mutex_unlock(&fh->mutex); + if (!kpgbuf || !kpgbuf->sgt) + goto error; + + kcmd->pg_user = kpgbuf->kaddr; + kcmd->kpg = __get_pg_buf(psys, kpgbuf->len); + if (!kcmd->kpg) + goto error; + + memcpy(kcmd->kpg->pg, kcmd->pg_user, kcmd->kpg->pg_size); + + kcmd->pg_manifest = kzalloc(cmd->pg_manifest_size, GFP_KERNEL); + if (!kcmd->pg_manifest) + goto error; + + ret = copy_from_user(kcmd->pg_manifest, cmd->pg_manifest, + cmd->pg_manifest_size); + if (ret) + goto error; + + kcmd->pg_manifest_size = cmd->pg_manifest_size; + + kcmd->user_token = cmd->user_token; + kcmd->issue_id = cmd->issue_id; + kcmd->priority = cmd->priority; + if (kcmd->priority >= IPU_PSYS_CMD_PRIORITY_NUM) + goto error; + + kcmd->nbuffers = ipu_fw_psys_pg_get_terminal_count(kcmd); + kcmd->buffers = kcalloc(kcmd->nbuffers, sizeof(*kcmd->buffers), + GFP_KERNEL); + if (!kcmd->buffers) + goto error; + + kcmd->kbufs = kcalloc(kcmd->nbuffers, sizeof(kcmd->kbufs[0]), + GFP_KERNEL); + if (!kcmd->kbufs) + goto error; + + + if (!cmd->bufcount || kcmd->nbuffers > cmd->bufcount) + goto error; + + ret = copy_from_user(kcmd->buffers, cmd->buffers, + kcmd->nbuffers * sizeof(*kcmd->buffers)); + if (ret) + goto error; + + for (i = 0; i < kcmd->nbuffers; i++) { + struct ipu_fw_psys_terminal *terminal; + + terminal = ipu_fw_psys_pg_get_terminal(kcmd, i); + if (!terminal) + continue; + + + mutex_lock(&fh->mutex); + kcmd->kbufs[i] = ipu_psys_lookup_kbuffer(fh, + kcmd->buffers[i].base.fd); + mutex_unlock(&fh->mutex); + if (!kcmd->kbufs[i] || !kcmd->kbufs[i]->sgt || + kcmd->kbufs[i]->len < kcmd->buffers[i].bytes_used) + goto error; + if ((kcmd->kbufs[i]->flags & + IPU_BUFFER_FLAG_NO_FLUSH) || + (kcmd->buffers[i].flags & + IPU_BUFFER_FLAG_NO_FLUSH) || + prevfd == kcmd->buffers[i].base.fd) + continue; + + prevfd = kcmd->buffers[i].base.fd; + dma_sync_sg_for_device(&psys->adev->dev, + kcmd->kbufs[i]->sgt->sgl, + kcmd->kbufs[i]->sgt->orig_nents, + DMA_BIDIRECTIONAL); + } + + + return kcmd; +error: + ipu_psys_kcmd_free(kcmd); + + dev_dbg(&psys->adev->dev, "failed to copy cmd\n"); + + return NULL; +} + +static void ipu_psys_kcmd_run(struct ipu_psys *psys) +{ + struct ipu_psys_kcmd *kcmd = list_first_entry(&psys->started_kcmds_list, + struct ipu_psys_kcmd, + started_list); + int ret; + + ret = ipu_psys_move_resources(&psys->adev->dev, + &kcmd->kpg->resource_alloc, + &psys->resource_pool_started, + &psys->resource_pool_running); + if (!ret) { + psys->started_kcmds--; + psys->active_kcmds++; + kcmd->state = KCMD_STATE_RUNNING; + list_del(&kcmd->started_list); + kcmd->watchdog.expires = jiffies + + msecs_to_jiffies(psys->timeout); + add_timer(&kcmd->watchdog); + return; + } + + if (ret != -ENOSPC || !psys->active_kcmds) { + dev_err(&psys->adev->dev, + "kcmd %p failed to alloc resources %d, active_kcmds %d\n", + kcmd, ret, psys->active_kcmds); + ipu_psys_kcmd_abort(psys, kcmd); + return; + } +} + +/* + * Move kcmd into completed state (due to running finished or failure). + * Fill up the event struct and notify waiters. + */ +void ipu_psys_kcmd_complete(struct ipu_psys *psys, + struct ipu_psys_kcmd *kcmd, int error) +{ + struct ipu_psys_fh *fh = kcmd->fh; + + trace_ipu_pg_kcmd(__func__, kcmd->user_token, kcmd->issue_id, + kcmd->priority, + ipu_fw_psys_pg_get_id(kcmd), + ipu_fw_psys_pg_load_cycles(kcmd), + ipu_fw_psys_pg_init_cycles(kcmd), + ipu_fw_psys_pg_processing_cycles(kcmd)); + + switch (kcmd->state) { + case KCMD_STATE_RUNNING: + if (try_to_del_timer_sync(&kcmd->watchdog) < 0) { + dev_err(&psys->adev->dev, + "could not cancel kcmd timer\n"); + return; + } + /* Fall through on purpose */ + case KCMD_STATE_RUN_PREPARED: + ipu_psys_free_resources(&kcmd->kpg->resource_alloc, + &psys->resource_pool_running); + if (psys->started_kcmds) + ipu_psys_kcmd_run(psys); + if (kcmd->state == KCMD_STATE_RUNNING) + psys->active_kcmds--; + break; + case KCMD_STATE_STARTED: + psys->started_kcmds--; + list_del(&kcmd->started_list); + /* Fall through on purpose */ + case KCMD_STATE_START_PREPARED: + ipu_psys_free_resources(&kcmd->kpg->resource_alloc, + &psys->resource_pool_started); + break; + default: + break; + } + + kcmd->ev.type = IPU_PSYS_EVENT_TYPE_CMD_COMPLETE; + kcmd->ev.user_token = kcmd->user_token; + kcmd->ev.issue_id = kcmd->issue_id; + kcmd->ev.error = error; + + if (kcmd->constraint.min_freq) + ipu_buttress_remove_psys_constraint(psys->adev->isp, + &kcmd->constraint); + + if (!early_pg_transfer && kcmd->pg_user && kcmd->kpg->pg) { + struct ipu_psys_kbuffer *kbuf; + + kbuf = ipu_psys_lookup_kbuffer_by_kaddr(kcmd->fh, + kcmd->pg_user); + + if (kbuf && kbuf->valid) + memcpy(kcmd->pg_user, + kcmd->kpg->pg, kcmd->kpg->pg_size); + else + dev_dbg(&psys->adev->dev, + "Skipping already unmapped buffer\n"); + } + + if (kcmd->state == KCMD_STATE_RUNNING || + kcmd->state == KCMD_STATE_STARTED) { + pm_runtime_mark_last_busy(&psys->adev->dev); + pm_runtime_put_autosuspend(&psys->adev->dev); + } + + kcmd->state = KCMD_STATE_COMPLETE; + + wake_up_interruptible(&fh->wait); +} + +/* + * Schedule next kcmd by finding a runnable kcmd from the highest + * priority queue in a round-robin fashion versus the client + * queues and running it. + * Any kcmds which fail to start are completed with an error. + */ +void ipu_psys_run_next(struct ipu_psys *psys) +{ + int p; + + /* + * Code below will crash if fhs is empty. Normally this + * shouldn't happen. + */ + if (list_empty(&psys->fhs)) { + WARN_ON(1); + return; + } + + for (p = 0; p < IPU_PSYS_CMD_PRIORITY_NUM; p++) { + int removed; + + do { + struct ipu_psys_fh *fh = list_first_entry(&psys->fhs, + struct + ipu_psys_fh, + list); + struct ipu_psys_fh *fh_last = + list_last_entry(&psys->fhs, + struct ipu_psys_fh, + list); + /* + * When a kcmd is scheduled from a fh, it might expose + * more runnable kcmds behind it in the same queue. + * Therefore loop running kcmds as long as some were + * scheduled. + */ + removed = 0; + do { + struct ipu_psys_fh *fh_next = + list_next_entry(fh, list); + struct ipu_psys_kcmd *kcmd; + int ret; + + mutex_lock(&fh->mutex); + + kcmd = fh->sched.new_kcmd_tail[p]; + /* + * If concurrency is disabled and there are + * already commands running on the PSYS, do not + * run new commands. + */ + if (!enable_concurrency && + psys->active_kcmds > 0) { + mutex_unlock(&fh->mutex); + return; + } + + /* Are there new kcmds available for running? */ + if (!kcmd) + goto next; + + ret = ipu_psys_kcmd_queue(psys, kcmd); + if (ret == -ENOSPC) + goto next; + + /* Update pointer to the first new kcmd */ + fh->sched.new_kcmd_tail[p] = NULL; + while (kcmd != list_last_entry( + &fh->sched.kcmds[p], + struct ipu_psys_kcmd, + list)) { + kcmd = list_next_entry(kcmd, list); + if (kcmd->state == KCMD_STATE_NEW) { + fh->sched.new_kcmd_tail[p] = + kcmd; + break; + } + } + + list_move_tail(&fh->list, &psys->fhs); + removed++; +next: + mutex_unlock(&fh->mutex); + if (fh == fh_last) + break; + fh = fh_next; + } while (1); + } while (removed > 0); + } +} + +/* + * Move kcmd into completed state. If kcmd is currently running, + * abort it. + */ +int ipu_psys_kcmd_abort(struct ipu_psys *psys, struct ipu_psys_kcmd *kcmd) +{ + int ret = 0; + + if (kcmd->state == KCMD_STATE_COMPLETE) + return 0; + + if ((kcmd->state == KCMD_STATE_RUNNING || + kcmd->state == KCMD_STATE_STARTED)) { + ret = ipu_fw_psys_pg_abort(kcmd); + if (ret) { + dev_err(&psys->adev->dev, "failed to abort kcmd!\n"); + goto out; + } + } + +out: + ipu_psys_kcmd_complete(psys, kcmd, ret); + + return ret; +} + +/* + * Submit kcmd into psys queue. If running fails, complete the kcmd + * with an error. + */ +static int ipu_psys_kcmd_start(struct ipu_psys *psys, + struct ipu_psys_kcmd *kcmd) +{ + /* + * Found a runnable PG. Move queue to the list tail for round-robin + * scheduling and run the PG. Start the watchdog timer if the PG was + * started successfully. Enable PSYS power if requested. + */ + int ret; + + if (psys->adev->isp->flr_done) { + ipu_psys_kcmd_complete(psys, kcmd, -EIO); + return -EIO; + } + + ret = pm_runtime_get_sync(&psys->adev->dev); + if (ret < 0) { + dev_err(&psys->adev->dev, "failed to power on PSYS\n"); + ipu_psys_kcmd_complete(psys, kcmd, -EIO); + pm_runtime_put_noidle(&psys->adev->dev); + return ret; + } + + if (early_pg_transfer && kcmd->pg_user && kcmd->kpg->pg) + memcpy(kcmd->pg_user, kcmd->kpg->pg, kcmd->kpg->pg_size); + + ret = ipu_fw_psys_pg_start(kcmd); + if (ret) { + dev_err(&psys->adev->dev, "failed to start kcmd!\n"); + goto error; + } + + ipu_fw_psys_pg_dump(psys, kcmd, "run"); + + /* + * Starting from scci_master_20151228_1800, pg start api is split into + * two different calls, making driver responsible to flush pg between + * start and disown library calls. + */ + clflush_cache_range(kcmd->kpg->pg, kcmd->kpg->pg_size); + ret = ipu_fw_psys_pg_disown(kcmd); + if (ret) { + dev_err(&psys->adev->dev, "failed to start kcmd!\n"); + goto error; + } + + trace_ipu_pg_kcmd(__func__, kcmd->user_token, kcmd->issue_id, + kcmd->priority, + ipu_fw_psys_pg_get_id(kcmd), + ipu_fw_psys_pg_load_cycles(kcmd), + ipu_fw_psys_pg_init_cycles(kcmd), + ipu_fw_psys_pg_processing_cycles(kcmd)); + + switch (kcmd->state) { + case KCMD_STATE_RUN_PREPARED: + kcmd->state = KCMD_STATE_RUNNING; + psys->active_kcmds++; + kcmd->watchdog.expires = jiffies + + msecs_to_jiffies(psys->timeout); + add_timer(&kcmd->watchdog); + break; + case KCMD_STATE_START_PREPARED: + kcmd->state = KCMD_STATE_STARTED; + psys->started_kcmds++; + list_add_tail(&kcmd->started_list, &psys->started_kcmds_list); + break; + default: + WARN_ON(1); + ret = -EINVAL; + goto error; + } + return 0; + +error: + dev_err(&psys->adev->dev, "failed to start process group\n"); + ipu_psys_kcmd_complete(psys, kcmd, -EIO); + return ret; +} + +/* + * Move all kcmds in all queues forcily into completed state. + */ +static void ipu_psys_flush_kcmds(struct ipu_psys *psys, int error) +{ + struct ipu_psys_fh *fh; + struct ipu_psys_kcmd *kcmd; + int p; + + dev_err(&psys->dev, "flushing all commands with error: %d\n", error); + + list_for_each_entry(fh, &psys->fhs, list) { + mutex_lock(&fh->mutex); + for (p = 0; p < IPU_PSYS_CMD_PRIORITY_NUM; p++) { + fh->sched.new_kcmd_tail[p] = NULL; + list_for_each_entry(kcmd, &fh->sched.kcmds[p], list) { + if (kcmd->state == KCMD_STATE_COMPLETE) + continue; + ipu_psys_kcmd_complete(psys, kcmd, error); + } + } + mutex_unlock(&fh->mutex); + } +} + +/* + * Abort all currently running process groups and reset PSYS + * by power cycling it. PSYS power must not be acquired + * except by running kcmds when calling this. + */ +static void ipu_psys_reset(struct ipu_psys *psys) +{ +#ifdef CONFIG_PM + struct device *d = &psys->adev->isp->psys_iommu->dev; + int r; + + pm_runtime_dont_use_autosuspend(&psys->adev->dev); + r = pm_runtime_get_sync(d); + if (r < 0) { + pm_runtime_put_noidle(d); + dev_err(&psys->adev->dev, "power management failed\n"); + return; + } + + ipu_psys_flush_kcmds(psys, -EIO); + flush_workqueue(pm_wq); + r = pm_runtime_put_sync(d); /* Turn big red power knob off here */ + /* Power was successfully turned off if and only if zero was returned */ + if (r) + dev_warn(&psys->adev->dev, + "power management failed, PSYS reset may be incomplete\n"); + pm_runtime_use_autosuspend(&psys->adev->dev); + ipu_psys_run_next(psys); +#else + dev_err(&psys->adev->dev, + "power management disabled, can not reset PSYS\n"); +#endif +} + +void ipu_psys_watchdog_work(struct work_struct *work) +{ + struct ipu_psys *psys = container_of(work, + struct ipu_psys, watchdog_work); + struct ipu_psys_fh *fh; + + mutex_lock(&psys->mutex); + + /* Loop over all running kcmds */ + list_for_each_entry(fh, &psys->fhs, list) { + int p, r; + + mutex_lock(&fh->mutex); + for (p = 0; p < IPU_PSYS_CMD_PRIORITY_NUM; p++) { + struct ipu_psys_kcmd *kcmd; + + list_for_each_entry(kcmd, &fh->sched.kcmds[p], list) { + if (fh->sched.new_kcmd_tail[p] == kcmd) + break; + if (kcmd->state != KCMD_STATE_RUNNING) + continue; + + if (timer_pending(&kcmd->watchdog)) + continue; + /* Found an expired but running command */ + dev_err(&psys->adev->dev, + "kcmd:0x%llx[0x%llx] taking too long\n", + kcmd->user_token, kcmd->issue_id); + r = ipu_psys_kcmd_abort(psys, kcmd); + if (r) + goto stop_failed; + } + } + mutex_unlock(&fh->mutex); + } + + /* Kick command scheduler thread */ + atomic_set(&psys->wakeup_sched_thread_count, 1); + wake_up_interruptible(&psys->sched_cmd_wq); + mutex_unlock(&psys->mutex); + return; + +stop_failed: + mutex_unlock(&fh->mutex); + ipu_psys_reset(psys); + mutex_unlock(&psys->mutex); +} + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(4, 14, 2) +static void ipu_psys_watchdog(unsigned long data) +{ + struct ipu_psys_kcmd *kcmd = (struct ipu_psys_kcmd *)data; +#else +static void ipu_psys_watchdog(struct timer_list *t) +{ + struct ipu_psys_kcmd *kcmd = from_timer(kcmd, t, watchdog); +#endif + struct ipu_psys *psys = kcmd->fh->psys; + + queue_work(IPU_PSYS_WORK_QUEUE, &psys->watchdog_work); +} + +static int ipu_psys_config_legacy_pg(struct ipu_psys_kcmd *kcmd) +{ + struct ipu_psys *psys = kcmd->fh->psys; + unsigned int i; + int ret; + + ret = ipu_fw_psys_pg_set_ipu_vaddress(kcmd, kcmd->kpg->pg_dma_addr); + if (ret) { + ret = -EIO; + goto error; + } + + for (i = 0; i < kcmd->nbuffers; i++) { + struct ipu_fw_psys_terminal *terminal; + u32 buffer; + + terminal = ipu_fw_psys_pg_get_terminal(kcmd, i); + if (!terminal) + continue; + + buffer = (u32) kcmd->kbufs[i]->dma_addr + + kcmd->buffers[i].data_offset; + + ret = ipu_fw_psys_terminal_set(terminal, i, kcmd, + buffer, kcmd->kbufs[i]->len); + if (ret == -EAGAIN) + continue; + + if (ret) { + dev_err(&psys->adev->dev, "Unable to set terminal\n"); + goto error; + } + } + + ipu_fw_psys_pg_set_token(kcmd, (uintptr_t) kcmd); + + ret = ipu_fw_psys_pg_submit(kcmd); + if (ret) { + dev_err(&psys->adev->dev, "failed to submit kcmd!\n"); + goto error; + } + + return 0; + +error: + dev_err(&psys->adev->dev, "failed to config legacy pg\n"); + return ret; +} + +static bool ipu_psys_kcmd_is_valid(struct ipu_psys *psys, + struct ipu_psys_kcmd *kcmd) +{ + struct ipu_psys_fh *fh; + struct ipu_psys_kcmd *kcmd0; + int p; + + list_for_each_entry(fh, &psys->fhs, list) { + mutex_lock(&fh->mutex); + for (p = 0; p < IPU_PSYS_CMD_PRIORITY_NUM; p++) { + list_for_each_entry(kcmd0, &fh->sched.kcmds[p], list) { + if (kcmd0 == kcmd) { + mutex_unlock(&fh->mutex); + return true; + } + } + } + mutex_unlock(&fh->mutex); + } + + return false; +} + +static int ipu_psys_kcmd_queue(struct ipu_psys *psys, + struct ipu_psys_kcmd *kcmd) +{ + int ret; + + if (kcmd->state != KCMD_STATE_NEW) { + WARN_ON(1); + return -EINVAL; + } + + if (!psys->started_kcmds) { + ret = ipu_psys_allocate_resources(&psys->adev->dev, + kcmd->kpg->pg, + kcmd->pg_manifest, + &kcmd->kpg->resource_alloc, + &psys->resource_pool_running); + if (!ret) { + if (kcmd->state == KCMD_STATE_NEW) + kcmd->state = KCMD_STATE_RUN_PREPARED; + return ipu_psys_kcmd_start(psys, kcmd); + } + + if (ret != -ENOSPC || !psys->active_kcmds) { + dev_err(&psys->adev->dev, + "kcmd %p failed to alloc resources (running)\n", + kcmd); + ipu_psys_kcmd_complete(psys, kcmd, ret); + /* kcmd_complete doesn't handle PM for KCMD_STATE_NEW */ + pm_runtime_put(&psys->adev->dev); + return -EINVAL; + } + } + + ret = ipu_psys_allocate_resources(&psys->adev->dev, + kcmd->kpg->pg, + kcmd->pg_manifest, + &kcmd->kpg->resource_alloc, + &psys->resource_pool_started); + if (!ret) { + kcmd->state = KCMD_STATE_START_PREPARED; + return ipu_psys_kcmd_start(psys, kcmd); + } + + if (ret != -ENOSPC || !psys->started_kcmds) { + dev_err(&psys->adev->dev, + "kcmd %p failed to alloc resources (started)\n", kcmd); + ipu_psys_kcmd_complete(psys, kcmd, ret); + /* kcmd_complete doesn't handle PM for KCMD_STATE_NEW */ + pm_runtime_put(&psys->adev->dev); + ret = -EINVAL; + } + return ret; +} + +int ipu_psys_kcmd_new(struct ipu_psys_command *cmd, struct ipu_psys_fh *fh) +{ + struct ipu_psys *psys = fh->psys; + struct ipu_psys_kcmd *kcmd; + size_t pg_size; + int ret; + + if (psys->adev->isp->flr_done) + return -EIO; + + kcmd = ipu_psys_copy_cmd(cmd, fh); + if (!kcmd) + return -EINVAL; + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(4, 14, 2) + init_timer(&kcmd->watchdog); + kcmd->watchdog.data = (unsigned long)kcmd; + kcmd->watchdog.function = &ipu_psys_watchdog; +#else + timer_setup(&kcmd->watchdog, ipu_psys_watchdog, 0); +#endif + + if (cmd->min_psys_freq) { + kcmd->constraint.min_freq = cmd->min_psys_freq; + ipu_buttress_add_psys_constraint(psys->adev->isp, + &kcmd->constraint); + } + + pg_size = ipu_fw_psys_pg_get_size(kcmd); + if (pg_size > kcmd->kpg->pg_size) { + dev_dbg(&psys->adev->dev, "pg size mismatch %zu %zu\n", + pg_size, kcmd->kpg->pg_size); + ret = -EINVAL; + goto error; + } + + ret = ipu_psys_config_legacy_pg(kcmd); + if (ret) + goto error; + + mutex_lock(&fh->mutex); + list_add_tail(&kcmd->list, &fh->sched.kcmds[cmd->priority]); + if (!fh->sched.new_kcmd_tail[cmd->priority] && + kcmd->state == KCMD_STATE_NEW) { + fh->sched.new_kcmd_tail[cmd->priority] = kcmd; + /* Kick command scheduler thread */ + atomic_set(&psys->wakeup_sched_thread_count, 1); + wake_up_interruptible(&psys->sched_cmd_wq); + } + mutex_unlock(&fh->mutex); + + dev_dbg(&psys->adev->dev, + "IOC_QCMD: user_token:%llx issue_id:0x%llx pri:%d\n", + cmd->user_token, cmd->issue_id, cmd->priority); + + return 0; + +error: + ipu_psys_kcmd_free(kcmd); + + return ret; +} + +void ipu_psys_handle_events(struct ipu_psys *psys) +{ + struct ipu_psys_kcmd *kcmd = NULL; + struct ipu_fw_psys_event event; + bool error; + + do { + memset(&event, 0, sizeof(event)); + if (!ipu_fw_psys_rcv_event(psys, &event)) + break; + + error = false; + kcmd = (struct ipu_psys_kcmd *)(unsigned long)event.token; + error = IS_ERR_OR_NULL(kcmd) ? true : false; + + dev_dbg(&psys->adev->dev, "psys received event status:%d\n", + event.status); + + if (error) { + dev_err(&psys->adev->dev, + "no token received, command unknown\n"); + pm_runtime_put(&psys->adev->dev); + ipu_psys_reset(psys); + pm_runtime_get(&psys->adev->dev); + break; + } + + if (ipu_psys_kcmd_is_valid(psys, kcmd)) + ipu_psys_kcmd_complete(psys, kcmd, + event.status == + IPU_PSYS_EVENT_CMD_COMPLETE || + event.status == + IPU_PSYS_EVENT_FRAGMENT_COMPLETE + ? 0 : -EIO); + /* Kick command scheduler thread */ + atomic_set(&psys->wakeup_sched_thread_count, 1); + wake_up_interruptible(&psys->sched_cmd_wq); + } while (1); +} + +int ipu_psys_fh_init(struct ipu_psys_fh *fh) +{ + struct ipu_psys *psys = fh->psys; + int p; + + pm_runtime_use_autosuspend(&psys->adev->dev); + for (p = 0; p < IPU_PSYS_CMD_PRIORITY_NUM; p++) + INIT_LIST_HEAD(&fh->sched.kcmds[p]); + + return 0; +} + +int ipu_psys_fh_deinit(struct ipu_psys_fh *fh) +{ + struct ipu_psys *psys = fh->psys; + struct ipu_psys_kcmd *kcmd, *kcmd0; + int p; + + mutex_lock(&psys->mutex); + mutex_lock(&fh->mutex); + + /* + * Set pg_user to NULL so that completed kcmds don't write + * their result to user space anymore. + */ + for (p = 0; p < IPU_PSYS_CMD_PRIORITY_NUM; p++) + list_for_each_entry(kcmd, &fh->sched.kcmds[p], list) + kcmd->pg_user = NULL; + + /* Prevent scheduler from running more kcmds */ + memset(fh->sched.new_kcmd_tail, 0, + sizeof(fh->sched.new_kcmd_tail)); + + /* Wait until kcmds are completed in this queue and free them */ + for (p = 0; p < IPU_PSYS_CMD_PRIORITY_NUM; p++) { + fh->sched.new_kcmd_tail[p] = NULL; + list_for_each_entry_safe( + kcmd, kcmd0, &fh->sched.kcmds[p], list) { + ipu_psys_kcmd_abort(psys, kcmd); + ipu_psys_kcmd_free(kcmd); + } + } + + /* disable runtime autosuspend for the last fh */ + if (list_empty(&psys->fhs)) + pm_runtime_dont_use_autosuspend(&psys->adev->dev); + + mutex_unlock(&fh->mutex); + mutex_unlock(&psys->mutex); + + return 0; +} + +struct ipu_psys_kcmd *__ipu_get_completed_kcmd(struct ipu_psys_fh *fh) +{ + int p; + + for (p = 0; p < IPU_PSYS_CMD_PRIORITY_NUM; p++) { + struct ipu_psys_kcmd *kcmd; + + if (list_empty(&fh->sched.kcmds[p])) + continue; + + kcmd = list_first_entry(&fh->sched.kcmds[p], + struct ipu_psys_kcmd, list); + if (kcmd->state != KCMD_STATE_COMPLETE) + continue; + /* Found a kcmd in completed state */ + return kcmd; + + } + + return NULL; +} + +struct ipu_psys_kcmd *ipu_get_completed_kcmd(struct ipu_psys_fh *fh) +{ + struct ipu_psys_kcmd *kcmd; + + mutex_lock(&fh->mutex); + kcmd = __ipu_get_completed_kcmd(fh); + mutex_unlock(&fh->mutex); + + return kcmd; +} + +long ipu_ioctl_dqevent(struct ipu_psys_event *event, + struct ipu_psys_fh *fh, unsigned int f_flags) +{ + struct ipu_psys *psys = fh->psys; + struct ipu_psys_kcmd *kcmd = NULL; + int rval; + + dev_dbg(&psys->adev->dev, "IOC_DQEVENT\n"); + + if (!(f_flags & O_NONBLOCK)) { + rval = wait_event_interruptible(fh->wait, + (kcmd = + ipu_get_completed_kcmd(fh))); + if (rval == -ERESTARTSYS) + return rval; + } + + mutex_lock(&fh->mutex); + if (!kcmd) { + kcmd = __ipu_get_completed_kcmd(fh); + if (!kcmd) { + mutex_unlock(&fh->mutex); + return -ENODATA; + } + } + + *event = kcmd->ev; + ipu_psys_kcmd_free(kcmd); + mutex_unlock(&fh->mutex); + + return 0; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-resources.c b/drivers/media/pci/intel/ipu4/ipu4-resources.c new file mode 100644 index 0000000000000..097ea1bb7ed91 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-resources.c @@ -0,0 +1,461 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2015 - 2018 Intel Corporation + +#include +#include +#include +#include +#include + +#include + +#include "ipu-fw-psys.h" +#include "ipu-psys.h" + +static int ipu_resource_init(struct ipu_resource *res, u32 id, int elements) +{ + if (elements <= 0) { + res->bitmap = NULL; + return 0; + } + + res->bitmap = kcalloc(BITS_TO_LONGS(elements), sizeof(long), + GFP_KERNEL); + if (!res->bitmap) + return -ENOMEM; + res->elements = elements; + res->id = id; + return 0; +} + +static unsigned long +ipu_resource_alloc(struct ipu_resource *res, int n, + struct ipu_resource_alloc *alloc, + enum ipu_resource_type type) +{ + unsigned long p; + + if (n <= 0) { + alloc->elements = 0; + return 0; + } + + if (!res->bitmap) + return (unsigned long)(-ENOSPC); + + p = bitmap_find_next_zero_area(res->bitmap, res->elements, 0, n, 0); + alloc->resource = NULL; + + if (p >= res->elements) + return (unsigned long)(-ENOSPC); + bitmap_set(res->bitmap, p, n); + alloc->resource = res; + alloc->elements = n; + alloc->pos = p; + alloc->type = type; + + return p; +} + +static void ipu_resource_free(struct ipu_resource_alloc *alloc) +{ + if (alloc->elements <= 0) + return; + + if (alloc->type == IPU_RESOURCE_DFM) + *alloc->resource->bitmap &= ~(unsigned long)(alloc->elements); + else + bitmap_clear(alloc->resource->bitmap, alloc->pos, + alloc->elements); + alloc->resource = NULL; +} + +static void ipu_resource_cleanup(struct ipu_resource *res) +{ + kfree(res->bitmap); + res->bitmap = NULL; +} + +/********** IPU PSYS-specific resource handling **********/ + +int ipu_psys_resource_pool_init(struct ipu_psys_resource_pool + *pool) +{ + int i, j, k, ret; + + pool->cells = 0; + + for (i = 0; i < res_defs->num_dev_channels; i++) { + ret = ipu_resource_init(&pool->dev_channels[i], i, + res_defs->dev_channels[i]); + if (ret) + goto error; + } + + for (j = 0; j < res_defs->num_ext_mem_ids; j++) { + ret = ipu_resource_init(&pool->ext_memory[j], j, + res_defs->ext_mem_ids[j]); + if (ret) + goto memory_error; + } + + for (k = 0; k < res_defs->num_dfm_ids; k++) { + ret = ipu_resource_init(&pool->dfms[k], k, res_defs->dfms[k]); + if (ret) + goto dfm_error; + } + + return 0; + +dfm_error: + for (k--; k >= 0; k--) + ipu_resource_cleanup(&pool->dfms[k]); + +memory_error: + for (j--; j >= 0; j--) + ipu_resource_cleanup(&pool->ext_memory[j]); + +error: + for (i--; i >= 0; i--) + ipu_resource_cleanup(&pool->dev_channels[i]); + return ret; +} + + +void ipu_psys_resource_pool_cleanup(struct ipu_psys_resource_pool + *pool) +{ + u32 i; + + for (i = 0; i < res_defs->num_dev_channels; i++) + ipu_resource_cleanup(&pool->dev_channels[i]); + + for (i = 0; i < res_defs->num_ext_mem_ids; i++) + ipu_resource_cleanup(&pool->ext_memory[i]); + + for (i = 0; i < res_defs->num_dfm_ids; i++) + ipu_resource_cleanup(&pool->dfms[i]); +} + +static int ipu_psys_allocate_one_resource(const struct device *dev, + struct ipu_fw_psys_process *process, + struct ipu_resource *resource, + struct ipu_fw_generic_program_manifest *pm, + u32 resource_id, + struct ipu_psys_resource_alloc *alloc) +{ + const u16 resource_req = pm->dev_chn_size[resource_id]; + unsigned long retl; + + if (resource_req <= 0) + return 0; + + if (alloc->resources >= IPU_MAX_RESOURCES) { + dev_err(dev, "out of resource handles\n"); + return -ENOSPC; + } + retl = ipu_resource_alloc + (resource, resource_req, + &alloc->resource_alloc[alloc->resources], + IPU_RESOURCE_DEV_CHN); + if (IS_ERR_VALUE(retl)) { + dev_dbg(dev, "out of device channel resources\n"); + return (int)retl; + } + alloc->resources++; + + return 0; +} + +/* + * ext_mem_type_id is a generic type id for memory (like DMEM, VMEM) + * ext_mem_bank_id is detailed type id for memory (like DMEM0, DMEM1 etc.) + */ +static int ipu_psys_allocate_memory_resource( + const struct device *dev, + struct ipu_fw_psys_process *process, + struct ipu_resource *resource, + struct ipu_fw_generic_program_manifest *pm, + u32 ext_mem_type_id, u32 ext_mem_bank_id, + struct ipu_psys_resource_alloc *alloc) +{ + const u16 memory_resource_req = pm->ext_mem_size[ext_mem_type_id]; + + unsigned long retl; + + if (memory_resource_req <= 0) + return 0; + + if (alloc->resources >= IPU_MAX_RESOURCES) { + dev_err(dev, "out of resource handles\n"); + return -ENOSPC; + } + retl = ipu_resource_alloc + (resource, memory_resource_req, + &alloc->resource_alloc[alloc->resources], + IPU_RESOURCE_EXT_MEM); + if (IS_ERR_VALUE(retl)) { + dev_dbg(dev, "out of memory resources\n"); + return (int)retl; + } + + alloc->resources++; + + return 0; +} + +/* + * Allocate resources for pg from `pool'. Mark the allocated + * resources into `alloc'. Returns 0 on success, -ENOSPC + * if there are no enough resources, in which cases resources + * are not allocated at all, or some other error on other conditions. + */ +int ipu_psys_allocate_resources(const struct device *dev, + struct ipu_fw_psys_process_group *pg, + void *pg_manifest, + struct ipu_psys_resource_alloc + *alloc, struct ipu_psys_resource_pool + *pool) +{ + u32 resid; + u32 mem_type_id; + int ret, i; + u16 *process_offset_table; + u8 processes; + u32 cells = 0; + + if (!pg) + return -EINVAL; + process_offset_table = (u16 *)((u8 *) pg + pg->processes_offset); + processes = pg->process_count; + + for (i = 0; i < processes; i++) { + u32 cell; + struct ipu_fw_psys_process *process = + (struct ipu_fw_psys_process *) + ((char *)pg + process_offset_table[i]); + struct ipu_fw_generic_program_manifest pm; + + memset(&pm, 0, sizeof(pm)); + if (!process) { + dev_err(dev, "can not get process\n"); + ret = -ENOENT; + goto free_out; + } + + ret = ipu_fw_psys_get_program_manifest_by_process(&pm, + pg_manifest, + process); + if (ret < 0) { + dev_err(dev, "can not get manifest\n"); + goto free_out; + } + + if (pm.cell_id == res_defs->num_cells && + pm.cell_type_id == res_defs->num_cells_type) { + dev_dbg(dev, "ignore the cell requirement\n"); + cell = res_defs->num_cells; + } else if ((pm.cell_id != res_defs->num_cells && + pm.cell_type_id == res_defs->num_cells_type)) { + cell = ipu_fw_psys_get_process_cell_id(process, 0); + } else { + /* Find a free cell of desired type */ + u32 type = pm.cell_type_id; + + for (cell = 0; cell < res_defs->num_cells; cell++) + if (res_defs->cells[cell] == type && + ((pool->cells | cells) & (1 << cell)) == 0) + break; + if (cell >= res_defs->num_cells) { + dev_dbg(dev, "no free cells of right type\n"); + ret = -ENOSPC; + goto free_out; + } + ret = ipu_fw_psys_set_process_cell_id(process, 0, cell); + if (ret) + goto free_out; + } + if (cell < res_defs->num_cells) + cells |= 1 << cell; + if (pool->cells & cells) { + dev_dbg(dev, "out of cell resources\n"); + ret = -ENOSPC; + goto free_out; + } + if (pm.dev_chn_size) { + for (resid = 0; resid < res_defs->num_dev_channels; resid++) { + ret = ipu_psys_allocate_one_resource + (dev, process, + &pool->dev_channels[resid], &pm, resid, alloc); + if (ret) + goto free_out; + ret = ipu_fw_psys_set_process_dev_chn_offset(process, resid, + alloc->resource_alloc[alloc->resources - 1].pos); + if (ret) + goto free_out; + } + } + + if (pm.ext_mem_size) { + for (mem_type_id = 0; + mem_type_id < res_defs->num_ext_mem_types; mem_type_id++) { + u32 mem_bank_id = res_defs->num_ext_mem_ids; + + if (cell != res_defs->num_cells) + mem_bank_id = + res_defs->cell_mem[res_defs->cell_mem_row * + cell + mem_type_id]; + if (mem_bank_id == res_defs->num_ext_mem_ids) + continue; + + ret = ipu_psys_allocate_memory_resource + (dev, process, + &pool->ext_memory[mem_bank_id], + &pm, mem_type_id, mem_bank_id, alloc); + if (ret) + goto free_out; + /* no return value check here because fw api will + * do some checks, and would return non-zero + * except mem_type_id == 0. This may be caused by that + * above flow if allocating mem_bank_id is improper + */ + ipu_fw_psys_set_process_ext_mem + (process, mem_type_id, mem_bank_id, + alloc->resource_alloc[alloc->resources - 1].pos); + } + } + } + alloc->cells |= cells; + pool->cells |= cells; + return 0; + +free_out: + for (; i >= 0; i--) { + struct ipu_fw_psys_process *process = + (struct ipu_fw_psys_process *) + ((char *)pg + process_offset_table[i]); + struct ipu_fw_generic_program_manifest pm; + int retval; + + if (!process) + break; + + retval = ipu_fw_psys_get_program_manifest_by_process + (&pm, pg_manifest, process); + if (retval < 0) + break; + if ((pm.cell_id != res_defs->num_cells && + pm.cell_type_id == res_defs->num_cells_type)) + continue; + /* no return value check here because if finding free cell + * failed, process cell would not set then calling clear_cell + * will return non-zero. + */ + ipu_fw_psys_clear_process_cell(process); + } + dev_dbg(dev, "failed to allocate resources, ret %d\n", ret); + ipu_psys_free_resources(alloc, pool); + return ret; +} + +int ipu_psys_move_resources(const struct device *dev, + struct ipu_psys_resource_alloc *alloc, + struct ipu_psys_resource_pool + *source_pool, struct ipu_psys_resource_pool + *target_pool) +{ + int i; + + if (target_pool->cells & alloc->cells) { + dev_dbg(dev, "out of cell resources\n"); + return -ENOSPC; + } + + for (i = 0; i < alloc->resources; i++) { + unsigned long bitmap = 0; + unsigned int id = alloc->resource_alloc[i].resource->id; + unsigned long fbit, end; + + switch (alloc->resource_alloc[i].type) { + case IPU_RESOURCE_DEV_CHN: + bitmap_set(&bitmap, alloc->resource_alloc[i].pos, + alloc->resource_alloc[i].elements); + if (*target_pool->dev_channels[id].bitmap & bitmap) + return -ENOSPC; + break; + case IPU_RESOURCE_EXT_MEM: + end = alloc->resource_alloc[i].elements + + alloc->resource_alloc[i].pos; + + fbit = find_next_bit(target_pool->ext_memory[id].bitmap, + end, alloc->resource_alloc[i].pos); + /* if find_next_bit returns "end" it didn't find 1bit */ + if (end != fbit) + return -ENOSPC; + break; + case IPU_RESOURCE_DFM: + bitmap = alloc->resource_alloc[i].elements; + if (*target_pool->dfms[id].bitmap & bitmap) + return -ENOSPC; + break; + default: + dev_err(dev, "Illegal resource type\n"); + return -EINVAL; + } + } + + for (i = 0; i < alloc->resources; i++) { + u32 id = alloc->resource_alloc[i].resource->id; + + switch (alloc->resource_alloc[i].type) { + case IPU_RESOURCE_DEV_CHN: + bitmap_set(target_pool->dev_channels[id].bitmap, + alloc->resource_alloc[i].pos, + alloc->resource_alloc[i].elements); + ipu_resource_free(&alloc->resource_alloc[i]); + alloc->resource_alloc[i].resource = + &target_pool->dev_channels[id]; + break; + case IPU_RESOURCE_EXT_MEM: + bitmap_set(target_pool->ext_memory[id].bitmap, + alloc->resource_alloc[i].pos, + alloc->resource_alloc[i].elements); + ipu_resource_free(&alloc->resource_alloc[i]); + alloc->resource_alloc[i].resource = + &target_pool->ext_memory[id]; + break; + case IPU_RESOURCE_DFM: + *target_pool->dfms[id].bitmap |= + alloc->resource_alloc[i].elements; + *alloc->resource_alloc[i].resource->bitmap &= + ~(alloc->resource_alloc[i].elements); + alloc->resource_alloc[i].resource = + &target_pool->dfms[id]; + break; + default: + /* + * Just keep compiler happy. This case failed already + * in above loop. + */ + break; + } + } + + target_pool->cells |= alloc->cells; + source_pool->cells &= ~alloc->cells; + + return 0; +} + +/* Free resources marked in `alloc' from `resources' */ +void ipu_psys_free_resources(struct ipu_psys_resource_alloc + *alloc, struct ipu_psys_resource_pool *pool) +{ + unsigned int i; + + pool->cells &= ~alloc->cells; + alloc->cells = 0; + for (i = 0; i < alloc->resources; i++) + ipu_resource_free(&alloc->resource_alloc[i]); + alloc->resources = 0; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4.c b/drivers/media/pci/intel/ipu4/ipu4.c new file mode 100644 index 0000000000000..c3615d225431b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4.c @@ -0,0 +1,572 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Intel Corporation + +#include +#include +#include +#include +#include + +#include "ipu.h" +#include "ipu-cpd.h" +#include "ipu-isys.h" +#include "ipu-buttress.h" +#include "ipu-psys.h" +#include "ipu-platform.h" +#include "ipu-platform-regs.h" +#include "ipu-platform-buttress-regs.h" + +#ifdef CONFIG_VIDEO_INTEL_IPU4 +static struct ipu_receiver_electrical_params ipu4_ev_params[] = { + {0, 1500000000ul / 2, IPU_PCI_ID, IPU_HW_BXT_P_B1_REV, + .rcomp_val_combo = 11, + .rcomp_val_legacy = 11, + .ports[0].crc_val = 18, + .ports[0].drc_val = 29, + .ports[0].drc_val_combined = 29, + .ports[0].ctle_val = 4, + .ports[1].crc_val = 18, + .ports[1].drc_val = 29, + .ports[1].drc_val_combined = 31, + .ports[1].ctle_val = 4 + }, + {0, 1500000000ul / 2, IPU_PCI_ID, IPU_HW_BXT_P_D0_REV, + .rcomp_val_combo = 11, + .rcomp_val_legacy = 11, + .ports[0].crc_val = 18, + .ports[0].drc_val = 29, + .ports[0].drc_val_combined = 29, + .ports[0].ctle_val = 4, + .ports[1].crc_val = 18, + .ports[1].drc_val = 29, + .ports[1].drc_val_combined = 31, + .ports[1].ctle_val = 4 + }, + {0, 1500000000ul / 2, IPU_PCI_ID, IPU_HW_BXT_P_E0_REV, + .rcomp_val_combo = 11, + .rcomp_val_legacy = 11, + .ports[0].crc_val = 18, + .ports[0].drc_val = 29, + .ports[0].drc_val_combined = 29, + .ports[0].ctle_val = 4, + .ports[1].crc_val = 18, + .ports[1].drc_val = 29, + .ports[1].drc_val_combined = 31, + .ports[1].ctle_val = 4 + }, + {}, +}; + +static unsigned int ipu4_csi_offsets[] = { + 0x64000, 0x65000, 0x66000, 0x67000, 0x6C000, 0x6C800 +}; + +static unsigned char ipu4_csi_evlanecombine[] = { + 0, 0, 0, 0, 2, 0 +}; + +static unsigned int ipu4_tpg_offsets[] = { + IPU_TPG0_ADDR_OFFSET, + IPU_TPG1_ADDR_OFFSET +}; + +static unsigned int ipu4_tpg_sels[] = { + IPU_GPOFFSET + IPU_GPREG_MIPI_PKT_GEN0_SEL, + IPU_COMBO_GPOFFSET + IPU_GPREG_MIPI_PKT_GEN1_SEL +}; + +const struct ipu_isys_internal_pdata isys_ipdata = { + .csi2 = { + .nports = ARRAY_SIZE(ipu4_csi_offsets), + .offsets = ipu4_csi_offsets, + .evparams = ipu4_ev_params, + .evlanecombine = ipu4_csi_evlanecombine, + .evsetmask0 = 1 << 4, /* CSI port 4 */ + .evsetmask1 = 1 << 5, /* CSI port 5 */ + }, + .tpg = { + .ntpgs = ARRAY_SIZE(ipu4_tpg_offsets), + .offsets = ipu4_tpg_offsets, + .sels = ipu4_tpg_sels, + }, + .hw_variant = { + .offset = IPU_ISYS_OFFSET, + .nr_mmus = 2, + .mmu_hw = { + { + .offset = IPU_ISYS_IOMMU0_OFFSET, + .info_bits = + IPU_INFO_REQUEST_DESTINATION_PRIMARY, + .nr_l1streams = 0, + .nr_l2streams = 0, + .insert_read_before_invalidate = true, + }, + { + .offset = IPU_ISYS_IOMMU1_OFFSET, + .info_bits = IPU_INFO_STREAM_ID_SET(0), + .nr_l1streams = IPU_MMU_MAX_TLB_L1_STREAMS, + .l1_block_sz = { + 8, 16, 16, 16, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 8 + }, + .l1_zlw_en = { + 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0 + }, + .l1_zlw_1d_mode = { + 0, 1, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0 + }, + .l1_ins_zlw_ahead_pages = { + 0, 3, 3, 3, 0, 0, + 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0 + }, + .l1_zlw_2d_mode = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0 + }, + .nr_l2streams = IPU_MMU_MAX_TLB_L2_STREAMS, + .l2_block_sz = { + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2 + }, + .insert_read_before_invalidate = false, + .zlw_invalidate = true, + .l1_stream_id_reg_offset = + IPU_MMU_L1_STREAM_ID_REG_OFFSET, + .l2_stream_id_reg_offset = + IPU_MMU_L2_STREAM_ID_REG_OFFSET, + }, + }, + .dmem_offset = IPU_ISYS_DMEM_OFFSET, + .spc_offset = IPU_ISYS_SPC_OFFSET, + }, + .num_parallel_streams = IPU_ISYS_NUM_STREAMS, + .isys_dma_overshoot = IPU_ISYS_OVERALLOC_MIN, +}; + +const struct ipu_psys_internal_pdata psys_ipdata = { + .hw_variant = { + .offset = IPU_PSYS_OFFSET, + .nr_mmus = 3, + .mmu_hw = { + { + .offset = IPU_PSYS_IOMMU0_OFFSET, + .info_bits = + IPU_INFO_REQUEST_DESTINATION_PRIMARY, + .nr_l1streams = 0, + .nr_l2streams = 0, + .insert_read_before_invalidate = true, + }, + { + .offset = IPU_PSYS_IOMMU1_OFFSET, + .info_bits = IPU_INFO_STREAM_ID_SET(0), + .nr_l1streams = IPU_MMU_MAX_TLB_L1_STREAMS, + .l1_block_sz = { + 0, 0, 0, 0, 10, 8, 10, 8, 0, + 4, 4, 12, 0, 0, 0, 8 + }, + .l1_zlw_en = { + 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, + 1, 1, 0, 0, 0, 0 + }, + .l1_zlw_1d_mode = { + 0, 0, 0, 0, 1, 1, 1, 1, 0, + 1, 1, 1, 0, 0, 0, 0 + }, + .l1_ins_zlw_ahead_pages = { + 0, 0, 0, 0, 3, 3, + 3, 3, 0, 3, 1, 3, + 0, 0, 0, 0 + }, + .l1_zlw_2d_mode = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0 + }, + .nr_l2streams = IPU_MMU_MAX_TLB_L2_STREAMS, + .l2_block_sz = { + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2 + }, + .insert_read_before_invalidate = false, + .zlw_invalidate = true, + .l1_stream_id_reg_offset = + IPU_MMU_L1_STREAM_ID_REG_OFFSET, + .l2_stream_id_reg_offset = + IPU_MMU_L2_STREAM_ID_REG_OFFSET, + }, + { + .offset = IPU_PSYS_IOMMU1R_OFFSET, + .info_bits = IPU_INFO_STREAM_ID_SET(0), + .nr_l1streams = IPU_MMU_MAX_TLB_L1_STREAMS, + .l1_block_sz = { + 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, + 0, 0, 16, 12, 12, 16 + }, + .l1_zlw_en = { + 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 1, 1, 1, 1 + }, + .l1_zlw_1d_mode = { + 0, 0, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 1, 1, 1 + }, + .l1_ins_zlw_ahead_pages = { + 0, 0, 0, 0, 0, 0, + 0, 0, 3, 0, 0, 0, + 0, 0, 0, 0 + }, + .l1_zlw_2d_mode = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1, 1, 1 + }, + .nr_l2streams = IPU_MMU_MAX_TLB_L2_STREAMS, + .l2_block_sz = { + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2 + }, + .insert_read_before_invalidate = false, + .zlw_invalidate = true, + .l1_stream_id_reg_offset = + IPU_MMU_L1_STREAM_ID_REG_OFFSET, + .l2_stream_id_reg_offset = + IPU_MMU_L2_STREAM_ID_REG_OFFSET, + }, + }, + .dmem_offset = IPU_PSYS_DMEM_OFFSET, + .spc_offset = IPU_PSYS_SPC_OFFSET, + }, +}; + +/* + * This is meant only as reference for initialising the buttress control, + * because the different HW stepping can have different initial values + * + * There is a HW bug and IS_PWR and PS_PWR fields cannot be used to + * detect if power on/off is ready. Using IS_PWR_FSM and PS_PWR_FSM + * fields instead. + */ +const struct ipu_buttress_ctrl isys_buttress_ctrl = { + .divisor = IS_FREQ_CTL_DIVISOR, + .qos_floor = 0, + .freq_ctl = BUTTRESS_REG_IS_FREQ_CTL, + .pwr_sts_shift = BUTTRESS_PWR_STATE_IS_PWR_FSM_SHIFT, + .pwr_sts_mask = BUTTRESS_PWR_STATE_IS_PWR_FSM_MASK, + .pwr_sts_on = BUTTRESS_PWR_STATE_IS_PWR_FSM_IS_RDY, + .pwr_sts_off = BUTTRESS_PWR_STATE_IS_PWR_FSM_IDLE, +}; + +/* + * This is meant only as reference for initialising the buttress control, + * because the different HW stepping can have different initial values + */ + +const struct ipu_buttress_ctrl psys_buttress_ctrl = { + .divisor = PS_FREQ_CTL_DEFAULT_RATIO, + .qos_floor = PS_FREQ_CTL_DEFAULT_RATIO, + .freq_ctl = BUTTRESS_REG_PS_FREQ_CTL, + .pwr_sts_shift = BUTTRESS_PWR_STATE_PS_PWR_FSM_SHIFT, + .pwr_sts_mask = BUTTRESS_PWR_STATE_PS_PWR_FSM_MASK, + .pwr_sts_on = BUTTRESS_PWR_STATE_PS_PWR_FSM_PS_PWR_UP, + .pwr_sts_off = BUTTRESS_PWR_STATE_PS_PWR_FSM_IDLE, +}; +#endif + +#ifdef CONFIG_VIDEO_INTEL_IPU4P + +/* + * ipu4p available hw ports start from sip0 port3 + * available ports are: + * s0p3, s1p0, s1p1, s1p2, s1p3 + */ +static unsigned int ipu4p_csi_offsets[] = { + 0x64300, 0x6c000, 0x6c100, 0x6c200, 0x6c300 +}; + +static unsigned char ipu4p_csi_evlanecombine[] = { + 0, 0, 0, 0, 0, 0 +}; + +static unsigned int ipu4p_tpg_offsets[] = { + IPU_TPG0_ADDR_OFFSET, + IPU_TPG1_ADDR_OFFSET +}; + +static unsigned int ipu4p_tpg_sels[] = { + IPU_GPOFFSET + IPU_GPREG_MIPI_PKT_GEN0_SEL, + IPU_COMBO_GPOFFSET + IPU_GPREG_MIPI_PKT_GEN1_SEL +}; + +const struct ipu_isys_internal_pdata isys_ipdata = { + .csi2 = { + .nports = ARRAY_SIZE(ipu4p_csi_offsets), + .offsets = ipu4p_csi_offsets, + .evlanecombine = ipu4p_csi_evlanecombine, + }, + .tpg = { + .ntpgs = ARRAY_SIZE(ipu4p_tpg_offsets), + .offsets = ipu4p_tpg_offsets, + .sels = ipu4p_tpg_sels, + }, + .hw_variant = { + .offset = IPU_ISYS_OFFSET, + .nr_mmus = 2, + .mmu_hw = { + { + .offset = IPU_ISYS_IOMMU0_OFFSET, + .info_bits = + IPU_INFO_REQUEST_DESTINATION_PRIMARY, + .nr_l1streams = 0, + .nr_l2streams = 0, + .insert_read_before_invalidate = true, + }, + { + .offset = IPU_ISYS_IOMMU1_OFFSET, + .info_bits = IPU_INFO_STREAM_ID_SET(0), + .nr_l1streams = IPU_MMU_MAX_TLB_L1_STREAMS, + .l1_block_sz = { + 5, 16, 6, 6, 6, 6, 6, 8, 0, + 0, 0, 0, 0, 0, 0, 5 + }, + .l1_zlw_en = { + 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0 + }, + .l1_zlw_1d_mode = { + 0, 1, 1, 1, 1, 1, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0 + }, + .l1_ins_zlw_ahead_pages = { + 0, 3, 3, 3, 3, 3, + 3, 3, 0, 0, 0, 0, + 0, 0, 0, 0 + }, + .l1_zlw_2d_mode = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0 + }, + .nr_l2streams = IPU_MMU_MAX_TLB_L2_STREAMS, + .l2_block_sz = { + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2 + }, + .insert_read_before_invalidate = false, + .zlw_invalidate = true, + .l1_stream_id_reg_offset = + IPU_MMU_L1_STREAM_ID_REG_OFFSET, + .l2_stream_id_reg_offset = + IPU_MMU_L2_STREAM_ID_REG_OFFSET, + }, + }, + .dmem_offset = IPU_ISYS_DMEM_OFFSET, + .spc_offset = IPU_ISYS_SPC_OFFSET, + }, + .num_parallel_streams = IPU_ISYS_NUM_STREAMS, + .isys_dma_overshoot = IPU_ISYS_OVERALLOC_MIN, +}; + +const struct ipu_psys_internal_pdata psys_ipdata = { + .hw_variant = { + .offset = IPU_PSYS_OFFSET, + .nr_mmus = 3, + .mmu_hw = { + { + .offset = IPU_PSYS_IOMMU0_OFFSET, + .info_bits = + IPU_INFO_REQUEST_DESTINATION_PRIMARY, + .nr_l1streams = 0, + .nr_l2streams = 0, + .insert_read_before_invalidate = true, + }, + { + .offset = IPU_PSYS_IOMMU1_OFFSET, + .info_bits = IPU_INFO_STREAM_ID_SET(0), + .nr_l1streams = IPU_MMU_MAX_TLB_L1_STREAMS, + .l1_block_sz = { + 2, 5, 4, 2, 2, 10, 5, 16, 10, + 5, 0, 0, 0, 0, 0, 3 + }, + .l1_zlw_en = { + 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0 + }, + .l1_zlw_1d_mode = { + 0, 0, 1, 1, 1, 1, 1, 1, 1, + 1, 0, 0, 0, 0, 0, 0 + }, + .l1_ins_zlw_ahead_pages = { + 0, 0, 3, 3, 3, 3, + 3, 3, 3, 3, 0, 0, + 0, 0, 0, 0 + }, + .l1_zlw_2d_mode = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0 + }, + .nr_l2streams = IPU_MMU_MAX_TLB_L2_STREAMS, + .l2_block_sz = { + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2 + }, + .insert_read_before_invalidate = false, + .zlw_invalidate = true, + .l1_stream_id_reg_offset = + IPU_MMU_L1_STREAM_ID_REG_OFFSET, + .l2_stream_id_reg_offset = + IPU_MMU_L2_STREAM_ID_REG_OFFSET, + }, + { + .offset = IPU_PSYS_IOMMU1R_OFFSET, + .info_bits = IPU_INFO_STREAM_ID_SET(0), + .nr_l1streams = IPU_MMU_MAX_TLB_L1_STREAMS, + .l1_block_sz = { + 2, 6, 5, 16, 16, 8, 8, 0, 0, + 0, 0, 0, 0, 0, 0, 3 + }, + .l1_zlw_en = { + 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0 + }, + .l1_zlw_1d_mode = { + 0, 0, 1, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0 + }, + .l1_ins_zlw_ahead_pages = { + 0, 0, 3, 3, 0, 0, + 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0 + }, + .l1_zlw_2d_mode = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0 + }, + .nr_l2streams = IPU_MMU_MAX_TLB_L2_STREAMS, + .l2_block_sz = { + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2 + }, + .insert_read_before_invalidate = false, + .zlw_invalidate = true, + .l1_stream_id_reg_offset = + IPU_MMU_L1_STREAM_ID_REG_OFFSET, + .l2_stream_id_reg_offset = + IPU_MMU_L2_STREAM_ID_REG_OFFSET, + }, + }, + .dmem_offset = IPU_PSYS_DMEM_OFFSET, + .spc_offset = IPU_PSYS_SPC_OFFSET, + }, +}; + +const struct ipu_buttress_ctrl isys_buttress_ctrl = { + .divisor = IS_FREQ_CTL_DIVISOR, + .qos_floor = 0, + .ovrd = 0, + .freq_ctl = BUTTRESS_REG_IS_FREQ_CTL, + .divisor_shift = BUTTRESS_REG_IS_FREQ_CTL_RATIO_SHIFT, + .pwr_sts_shift = BUTTRESS_PWR_STATE_IS_PWR_FSM_SHIFT, + .pwr_sts_mask = BUTTRESS_PWR_STATE_IS_PWR_FSM_MASK, + .pwr_sts_on = BUTTRESS_PWR_STATE_IS_PWR_FSM_IS_RDY, + .pwr_sts_off = BUTTRESS_PWR_STATE_IS_PWR_FSM_IDLE, +}; + +const struct ipu_buttress_ctrl psys_buttress_ctrl = { + .divisor = PS_FREQ_CTL_DEFAULT_RATIO, + .qos_floor = PS_FREQ_CTL_DEFAULT_RATIO, + .ovrd = 1, + .freq_ctl = BUTTRESS_REG_PS_FREQ_CTL, + .divisor_shift = BUTTRESS_REG_PS_FREQ_CTL_RATIO_SHIFT, + .ovrd_shift = BUTTRESS_REG_PS_FREQ_CTL_OVRD_SHIFT, + .pwr_sts_shift = BUTTRESS_PWR_STATE_PS_PWR_FSM_SHIFT, + .pwr_sts_mask = BUTTRESS_PWR_STATE_PS_PWR_FSM_MASK, + .pwr_sts_on = BUTTRESS_PWR_STATE_PS_PWR_FSM_PS_PWR_UP, + .pwr_sts_off = BUTTRESS_PWR_STATE_PS_PWR_FSM_IDLE, +}; +#endif + +void ipu_configure_spc(struct ipu_device *isp, + const struct ipu_hw_variants *hw_variant, + int pkg_dir_idx, void __iomem *base, u64 *pkg_dir, + dma_addr_t pkg_dir_dma_addr) +{ + u32 val; + void __iomem *dmem_base = base + hw_variant->dmem_offset; + void __iomem *spc_regs_base = base + hw_variant->spc_offset; + + val = readl(spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL); + val |= IPU_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE; + writel(val, spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL); + + if (isp->secure_mode) { + writel(IPU_PKG_DIR_IMR_OFFSET, dmem_base); + } else { + u32 server_addr; + + server_addr = ipu_cpd_pkg_dir_get_address(pkg_dir, pkg_dir_idx); + + writel(server_addr + + ipu_cpd_get_pg_icache_base(isp, pkg_dir_idx, + isp->cpd_fw->data, + isp->cpd_fw->size), + spc_regs_base + IPU_PSYS_REG_SPC_ICACHE_BASE); + writel(ipu_cpd_get_pg_entry_point(isp, pkg_dir_idx, + isp->cpd_fw->data, + isp->cpd_fw->size), + spc_regs_base + IPU_PSYS_REG_SPC_START_PC); + writel(IPU_INFO_REQUEST_DESTINATION_PRIMARY, + spc_regs_base + + IPU_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER); + writel(pkg_dir_dma_addr, dmem_base); + } +} +EXPORT_SYMBOL(ipu_configure_spc); + +int ipu_buttress_psys_freq_get(void *data, u64 *val) +{ + struct ipu_device *isp = data; + u32 reg_val, ratio; + int rval; + + rval = pm_runtime_get_sync(&isp->psys->dev); + if (rval < 0) { + pm_runtime_put(&isp->psys->dev); + dev_err(&isp->pdev->dev, "Runtime PM failed (%d)\n", rval); + return rval; + } + + reg_val = readl(isp->base + BUTTRESS_REG_PS_FREQ_CAPABILITIES); + + pm_runtime_put(&isp->psys->dev); + + ratio = (reg_val & + BUTTRESS_PS_FREQ_CAPABILITIES_LAST_RESOLVED_RATIO_MASK) >> + BUTTRESS_PS_FREQ_CAPABILITIES_LAST_RESOLVED_RATIO_SHIFT; + + *val = BUTTRESS_PS_FREQ_STEP * ratio; + + return 0; +} + +int ipu_buttress_isys_freq_get(void *data, u64 *val) +{ + struct ipu_device *isp = data; + u32 reg_val; + int rval; + + rval = pm_runtime_get_sync(&isp->isys->dev); + if (rval < 0) { + pm_runtime_put(&isp->isys->dev); + dev_err(&isp->pdev->dev, "Runtime PM failed (%d)\n", rval); + return rval; + } + + reg_val = readl(isp->base + BUTTRESS_REG_IS_FREQ_CTL); + + pm_runtime_put(&isp->isys->dev); + + /* Input system frequency specified as 1600MHz/divisor */ + *val = 1600 / (reg_val & BUTTRESS_IS_FREQ_CTL_DIVISOR_MASK); + + return 0; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.ipu4pisys_inc b/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.ipu4pisys_inc new file mode 100644 index 0000000000000..90a2ab46510c3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.ipu4pisys_inc @@ -0,0 +1,26 @@ +IPU_ISYSLIB_INC = \ + -I$(IPU_ISYSLIB_ROOT)/buffer/interface \ + -I$(IPU_ISYSLIB_ROOT)/cell/interface \ + -I$(IPU_ISYSLIB_ROOT)/cell/src \ + -I$(IPU_ISYSLIB_ROOT)/device_access/interface \ + -I$(IPU_ISYSLIB_ROOT)/device_access/src \ + -I$(IPU_ISYSLIB_ROOT)/devices \ + -I$(IPU_ISYSLIB_ROOT)/devices/interface \ + -I$(IPU_ISYSLIB_ROOT)/devices/isys/cnlB0 \ + -I$(IPU_ISYSLIB_ROOT)/devices/src \ + -I$(IPU_ISYSLIB_ROOT)/fw_abi_common_types \ + -I$(IPU_ISYSLIB_ROOT)/fw_abi_common_types/cpu \ + -I$(IPU_ISYSLIB_ROOT)/isysapi/interface \ + -I$(IPU_ISYSLIB_ROOT)/pkg_dir/interface \ + -I$(IPU_ISYSLIB_ROOT)/pkg_dir/src \ + -I$(IPU_ISYSLIB_ROOT)/port/interface \ + -I$(IPU_ISYSLIB_ROOT)/reg_dump/src/isys/cnlB0_gen_reg_dump \ + -I$(IPU_ISYSLIB_ROOT)/regmem/interface \ + -I$(IPU_ISYSLIB_ROOT)/regmem/src \ + -I$(IPU_ISYSLIB_ROOT)/support \ + -I$(IPU_ISYSLIB_ROOT)/syscom/interface \ + -I$(IPU_ISYSLIB_ROOT)/syscom/src \ + -I$(IPU_ISYSLIB_ROOT)/trace/interface \ + -I$(IPU_ISYSLIB_ROOT)/utils/system_defs/ \ + -I$(IPU_ISYSLIB_ROOT)/vied \ + -I$(IPU_ISYSLIB_ROOT)/vied/vied/ \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.ipu4pisys_src b/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.ipu4pisys_src new file mode 100644 index 0000000000000..c20760bdb5f1d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.ipu4pisys_src @@ -0,0 +1,19 @@ +IPU_ISYSLIB_SRC = \ + $(IPU_ISYSLIB_ROOT_REL)/isysapi/src/ia_css_isys_private.o \ + $(IPU_ISYSLIB_ROOT_REL)/isysapi/src/ia_css_isys_public.o \ + $(IPU_ISYSLIB_ROOT_REL)/isysapi/src/ia_css_isys_public_trace.o + +ifeq ($(CONFIG_VIDEO_INTEL_IPU), m) +IPU_ISYSLIB_SRC += \ + $(IPU_ISYSLIB_ROOT_REL)/buffer/src/cpu/buffer_access.o \ + $(IPU_ISYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_buffer.o \ + $(IPU_ISYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_input_buffer.o \ + $(IPU_ISYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_output_buffer.o \ + $(IPU_ISYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_shared_buffer.o \ + $(IPU_ISYSLIB_ROOT_REL)/pkg_dir/src/ia_css_pkg_dir.o \ + $(IPU_ISYSLIB_ROOT_REL)/port/src/queue.o \ + $(IPU_ISYSLIB_ROOT_REL)/port/src/recv_port.o \ + $(IPU_ISYSLIB_ROOT_REL)/port/src/send_port.o \ + $(IPU_ISYSLIB_ROOT_REL)/reg_dump/src/reg_dump_generic_bridge.o \ + $(IPU_ISYSLIB_ROOT_REL)/syscom/src/ia_css_syscom.o +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.ipu4ppsys_inc b/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.ipu4ppsys_inc new file mode 100644 index 0000000000000..fb01678242eec --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.ipu4ppsys_inc @@ -0,0 +1,52 @@ +IPU_PSYSLIB_INC = \ + -I$(IPU_PSYSLIB_ROOT)/buffer/interface \ + -I$(IPU_PSYSLIB_ROOT)/cell/interface \ + -I$(IPU_PSYSLIB_ROOT)/cell/src \ + -I$(IPU_PSYSLIB_ROOT)/client_pkg/interface \ + -I$(IPU_PSYSLIB_ROOT)/client_pkg/src \ + -I$(IPU_PSYSLIB_ROOT)/cpd/ \ + -I$(IPU_PSYSLIB_ROOT)/cpd/cpd_component/interface \ + -I$(IPU_PSYSLIB_ROOT)/cpd/cpd_metadata/interface \ + -I$(IPU_PSYSLIB_ROOT)/device_access/interface \ + -I$(IPU_PSYSLIB_ROOT)/device_access/src \ + -I$(IPU_PSYSLIB_ROOT)/devices \ + -I$(IPU_PSYSLIB_ROOT)/devices/interface \ + -I$(IPU_PSYSLIB_ROOT)/devices/psys/cnlB0 \ + -I$(IPU_PSYSLIB_ROOT)/devices/src \ + -I$(IPU_PSYSLIB_ROOT)/fw_abi_common_types \ + -I$(IPU_PSYSLIB_ROOT)/fw_abi_common_types/cpu \ + -I$(IPU_PSYSLIB_ROOT)/pkg_dir/interface \ + -I$(IPU_PSYSLIB_ROOT)/pkg_dir/src \ + -I$(IPU_PSYSLIB_ROOT)/port/interface \ + -I$(IPU_PSYSLIB_ROOT)/psys_private_pg/interface \ + -I$(IPU_PSYSLIB_ROOT)/psys_server/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/data/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/data/src \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/device/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/device/interface/cnlB0 \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/dynamic/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/dynamic/src \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/kernel/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/param/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/param/src \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/psys_server_manifest/cnlB0 \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/resource_model/cnlB0 \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/sim/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/sim/src \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/static/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/static/src \ + -I$(IPU_PSYSLIB_ROOT)/reg_dump/src/psys/cnlB0_gen_reg_dump \ + -I$(IPU_PSYSLIB_ROOT)/regmem/interface \ + -I$(IPU_PSYSLIB_ROOT)/regmem/src \ + -I$(IPU_PSYSLIB_ROOT)/routing_bitmap/interface \ + -I$(IPU_PSYSLIB_ROOT)/routing_bitmap/src \ + -I$(IPU_PSYSLIB_ROOT)/support \ + -I$(IPU_PSYSLIB_ROOT)/syscom/interface \ + -I$(IPU_PSYSLIB_ROOT)/syscom/src \ + -I$(IPU_PSYSLIB_ROOT)/trace/interface \ + -I$(IPU_PSYSLIB_ROOT)/vied \ + -I$(IPU_PSYSLIB_ROOT)/vied/vied/ \ + -I$(IPU_PSYSLIB_ROOT)/vied_nci_acb/interface \ + -I$(IPU_PSYSLIB_ROOT)/vied_parameters/interface \ + -I$(IPU_PSYSLIB_ROOT)/vied_parameters/src \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.ipu4ppsys_src b/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.ipu4ppsys_src new file mode 100644 index 0000000000000..3ed88d455baba --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.ipu4ppsys_src @@ -0,0 +1,32 @@ +IPU_PSYSLIB_SRC = \ + $(IPU_PSYSLIB_ROOT_REL)/buffer/src/cpu/buffer_access.o \ + $(IPU_PSYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_buffer.o \ + $(IPU_PSYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_input_buffer.o \ + $(IPU_PSYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_output_buffer.o \ + $(IPU_PSYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_shared_buffer.o \ + $(IPU_PSYSLIB_ROOT_REL)/client_pkg/src/ia_css_client_pkg.o \ + $(IPU_PSYSLIB_ROOT_REL)/pkg_dir/src/ia_css_pkg_dir.o \ + $(IPU_PSYSLIB_ROOT_REL)/port/src/queue.o \ + $(IPU_PSYSLIB_ROOT_REL)/port/src/recv_port.o \ + $(IPU_PSYSLIB_ROOT_REL)/port/src/send_port.o \ + $(IPU_PSYSLIB_ROOT_REL)/psys_server/src/bxt_spctrl_process_group_cmd_impl.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/data/src/ia_css_program_group_data.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/device/src/ia_css_psys_device.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/dynamic/src/ia_css_psys_buffer_set.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/dynamic/src/ia_css_psys_process.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/dynamic/src/ia_css_psys_process_group.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/dynamic/src/ia_css_psys_terminal.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/kernel/src/ia_css_kernel_bitmap.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/param/src/ia_css_program_group_param.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/psys_server_manifest/cnlB0/ia_css_psys_server_manifest.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/resource_model/cnlB0/vied_nci_psys_resource_model.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/sim/src/vied_nci_psys_system.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/static/src/ia_css_psys_program_group_manifest.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/static/src/ia_css_psys_program_manifest.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/static/src/ia_css_psys_terminal_manifest.o \ + $(IPU_PSYSLIB_ROOT_REL)/reg_dump/src/reg_dump_generic_bridge.o \ + $(IPU_PSYSLIB_ROOT_REL)/routing_bitmap/src/ia_css_rbm.o \ + $(IPU_PSYSLIB_ROOT_REL)/routing_bitmap/src/ia_css_rbm_manifest.o \ + $(IPU_PSYSLIB_ROOT_REL)/syscom/src/ia_css_syscom.o \ + $(IPU_PSYSLIB_ROOT_REL)/vied_parameters/src/ia_css_terminal.o \ + $(IPU_PSYSLIB_ROOT_REL)/vied_parameters/src/ia_css_terminal_manifest.o \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.isyslib b/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.isyslib new file mode 100644 index 0000000000000..d0816c508ed93 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.isyslib @@ -0,0 +1,42 @@ +ifneq ($(EXTERNAL_BUILD), 1) +srcpath := $(srctree) +endif + +PROGRAMS = isys_fw +SYSTEM = input_system_system +IPU_ISYSLIB_ROOT_REL = ipu4p-css/lib2600 +IPU_ISYSLIB_ROOT = $(srcpath)/$(src)/$(IPU_ISYSLIB_ROOT_REL) + +include $(srcpath)/$(src)/ipu4p-css/Makefile.ipu4pisys_inc +include $(srcpath)/$(src)/ipu4p-css/Makefile.ipu4pisys_src + +intel-ipu4p-isys-csslib-objs := \ + ipu4p-css/libintel-ipu4p.o \ + $(IPU_ISYSLIB_SRC) + +ifeq ($(CONFIG_VIDEO_INTEL_IPU), m) +intel-ipu4p-isys-csslib-objs += ipu4p-css/ipu-wrapper.o +endif +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4p-isys-csslib.o + +INCLUDES := -I$(srcpath)/$(src)/$(IPU_ISYSLIB_ROOT_REL) \ + -I$(srcpath)/$(src) \ + $(IPU_ISYSLIB_INC) + +DEFINES:= -D__HOST__ -D__KERNEL__ -DISYS_FPGA -DPSYS_FPGA + +DEFINES += -DSSID=1 +DEFINES += -DMMID=1 +DEFINES += -DPROGNAME=isys_fw +DEFINES += -DPROGMAP=\"isys_fw.map.h\" +DEFINES += -DSUBSYSTEM_INCLUDE=\ +DEFINES += -DCELL=input_system_unis_logic_sp_control_tile_sp +DEFINES += -DSPMAIN=isys_fw +DEFINES += -DRUN_INTEGRATION +DEFINES += -DDEBUG_SP_NCI +DEFINES += -DCFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL=1 +DEFINES += -DHRT_ON_VIED_SUBSYSTEM_ACCESS=0 +DEFINES += -DHRT_USE_VIR_ADDRS +DEFINES += -DHRT_HW + +ccflags-y += $(INCLUDES) $(DEFINES) -fno-common diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.psyslib b/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.psyslib new file mode 100644 index 0000000000000..fe954d8e2e623 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.psyslib @@ -0,0 +1,14 @@ +ifneq ($(EXTERNAL_BUILD), 1) +srcpath := $(srctree) +endif + +# note: this file only defines INCLUDES paths for lib2600psys +include $(srcpath)/$(src)/ipu4p-css/Makefile.ipu4ppsys_inc + +IPU_PSYSLIB_ROOT = $(srcpath)/$(src)/ipu4p-css/lib2600psys/lib +HOST_DEFINES += -DPSYS_SERVER_ON_SPC +HOST_DEFINES += -DCFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL=1 + +ccflags-y += $(IPU_PSYSLIB_INC) $(HOST_DEFINES) + +obj-$(CONFIG_VIDEO_INTEL_IPU) += ipu4p-css/lib2600psys/ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/ia_css_fw_pkg_release.h b/drivers/media/pci/intel/ipu4/ipu4p-css/ia_css_fw_pkg_release.h new file mode 100644 index 0000000000000..408726c817146 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/ia_css_fw_pkg_release.h @@ -0,0 +1,14 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +#define IA_CSS_FW_PKG_RELEASE 0x20181222 diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/ipu-wrapper.c b/drivers/media/pci/intel/ipu4/ipu4p-css/ipu-wrapper.c new file mode 120000 index 0000000000000..3167dda06f067 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/ipu-wrapper.c @@ -0,0 +1 @@ +../../ipu-wrapper.c \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/buffer.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/buffer.mk new file mode 100644 index 0000000000000..c00a1133b440f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/buffer.mk @@ -0,0 +1,43 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is BUFFER + +ifdef _H_BUFFER_MK +$(error ERROR: buffer.mk included multiple times, please check makefile) +else +_H_BUFFER_MK=1 +endif + +BUFFER_DIR=$${MODULES_DIR}/buffer + +BUFFER_INTERFACE=$(BUFFER_DIR)/interface +BUFFER_SOURCES_CPU=$(BUFFER_DIR)/src/cpu +BUFFER_SOURCES_CSS=$(BUFFER_DIR)/src/css + +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_output_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_input_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_shared_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/buffer_access.c +BUFFER_HOST_CPPFLAGS += -I$(BUFFER_INTERFACE) +BUFFER_HOST_CPPFLAGS += -I$${MODULES_DIR}/support + +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/ia_css_input_buffer.c +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/ia_css_output_buffer.c +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/ia_css_shared_buffer.c +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/buffer_access.c + +BUFFER_FW_CPPFLAGS += -I$(BUFFER_INTERFACE) +BUFFER_FW_CPPFLAGS += -I$${MODULES_DIR}/support diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/buffer_access.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/buffer_access.h new file mode 100644 index 0000000000000..e5fe647742c9f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/buffer_access.h @@ -0,0 +1,36 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __BUFFER_ACCESS_H +#define __BUFFER_ACCESS_H + +#include "buffer_type.h" +/* #def to keep consistent the buffer load interfaces for host and css */ +#define IDM 0 + +void +buffer_load( + buffer_address address, + void *data, + unsigned int size, + unsigned int mm_id); + +void +buffer_store( + buffer_address address, + const void *data, + unsigned int size, + unsigned int mm_id); + +#endif /* __BUFFER_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/buffer_type.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/buffer_type.h new file mode 100644 index 0000000000000..de51f23941582 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/buffer_type.h @@ -0,0 +1,29 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __BUFFER_TYPE_H +#define __BUFFER_TYPE_H + +/* portable access to buffers in DDR */ + +#ifdef __VIED_CELL +typedef unsigned int buffer_address; +#else +/* workaround needed because shared_memory_access.h uses size_t */ +#include "type_support.h" +#include "vied/shared_memory_access.h" +typedef host_virtual_address_t buffer_address; +#endif + +#endif /* __BUFFER_TYPE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_buffer_address.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_buffer_address.h new file mode 100644 index 0000000000000..137bfb1fda166 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_buffer_address.h @@ -0,0 +1,24 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_BUFFER_ADDRESS_H +#define __IA_CSS_BUFFER_ADDRESS_H + +#include "type_support.h" + +typedef uint32_t ia_css_buffer_address; /* CSS virtual address */ + +#define ia_css_buffer_address_null ((ia_css_buffer_address)0) + +#endif /* __IA_CSS_BUFFER_ADDRESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_input_buffer.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_input_buffer.h new file mode 100644 index 0000000000000..4e92e35b61843 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_input_buffer.h @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_INPUT_BUFFER_H +#define __IA_CSS_INPUT_BUFFER_H + + +/* Input Buffers */ + +/* A CSS input buffer is a buffer in DDR that can be written by the CPU, + * and that can be read by CSS hardware, after the buffer has been handed over. + * Examples: command buffer, input frame buffer, parameter buffer + * An input buffer must be mapped into the CPU address space before it can be + * written by the CPU. + * After mapping, writing, and unmapping, the buffer can be handed over to the + * firmware. An input buffer is handed over to the CSS by mapping it to the + * CSS address space (by the CPU), and by passing the resulting CSS (virtial) + * address of the input buffer to the DA CSS hardware. + * The firmware can read from an input buffer as soon as it has been received + * CSS virtual address. + * The firmware should not write into an input buffer. + * The firmware hands over the input buffer (back to the CPU) by sending the + * buffer handle via a response. The host should unmap the buffer, + * before reusing it. + * The firmware should not read from the input buffer after returning the + * buffer handle to the CPU. + * + * A buffer may be pre-mapped to the CPU and/or to the CSS upon allocation, + * depending on the allocator's preference. In case of pre-mapped buffers, + * the map and unmap functions will only manage read and write access. + */ + +#include "ia_css_buffer_address.h" + +typedef struct ia_css_buffer_s *ia_css_input_buffer; /* input buffer handle */ +typedef void *ia_css_input_buffer_cpu_address; /* CPU virtual address */ +/* CSS virtual address */ +typedef ia_css_buffer_address ia_css_input_buffer_css_address; + +#endif /* __IA_CSS_INPUT_BUFFER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_input_buffer_cpu.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_input_buffer_cpu.h new file mode 100644 index 0000000000000..d3d01353ce431 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_input_buffer_cpu.h @@ -0,0 +1,49 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_INPUT_BUFFER_CPU_H +#define __IA_CSS_INPUT_BUFFER_CPU_H + +#include "vied/shared_memory_map.h" +#include "ia_css_input_buffer.h" + +ia_css_input_buffer +ia_css_input_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_input_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_input_buffer b); + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_map(ia_css_input_buffer b); + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_unmap(ia_css_input_buffer b); + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map(vied_memory_t mid, ia_css_input_buffer b); + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map_no_invalidate(vied_memory_t mid, ia_css_input_buffer b); + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_unmap(ia_css_input_buffer b); + + +#endif /* __IA_CSS_INPUT_BUFFER_CPU_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_output_buffer.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_output_buffer.h new file mode 100644 index 0000000000000..2c310ea92c6af --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_output_buffer.h @@ -0,0 +1,30 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_OUTPUT_BUFFER_H +#define __IA_CSS_OUTPUT_BUFFER_H + +/* Output Buffers */ +/* A CSS output buffer a buffer in DDR that can be written by CSS hardware + * and that can be read by the host, after the buffer has been handed over + * Examples: output frame buffer + */ + +#include "ia_css_buffer_address.h" + +typedef struct ia_css_buffer_s *ia_css_output_buffer; +typedef void *ia_css_output_buffer_cpu_address; +typedef ia_css_buffer_address ia_css_output_buffer_css_address; + +#endif /* __IA_CSS_OUTPUT_BUFFER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_output_buffer_cpu.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_output_buffer_cpu.h new file mode 100644 index 0000000000000..0299fc3b7eb66 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_output_buffer_cpu.h @@ -0,0 +1,48 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_OUTPUT_BUFFER_CPU_H +#define __IA_CSS_OUTPUT_BUFFER_CPU_H + +#include "vied/shared_memory_map.h" +#include "ia_css_output_buffer.h" + +ia_css_output_buffer +ia_css_output_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_output_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_output_buffer b); + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_map(ia_css_output_buffer b); + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_unmap(ia_css_output_buffer b); + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map(vied_memory_t mid, ia_css_output_buffer b); +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map_no_invalidate(vied_memory_t mid, ia_css_output_buffer b); + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_unmap(ia_css_output_buffer b); + + +#endif /* __IA_CSS_OUTPUT_BUFFER_CPU_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_return_token.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_return_token.h new file mode 100644 index 0000000000000..440161d2f32b3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_return_token.h @@ -0,0 +1,54 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_RETURN_TOKEN_H +#define __IA_CSS_RETURN_TOKEN_H + +#include "storage_class.h" +#include "assert_support.h" /* For CT_ASSERT */ + +/* ia_css_return_token: data item of exacly 8 bytes (64 bits) + * which can be used to pass a return token back to the host +*/ +typedef unsigned long long ia_css_return_token; + +STORAGE_CLASS_INLINE void +ia_css_return_token_copy(ia_css_return_token *to, + const ia_css_return_token *from) +{ + /* copy a return token on VIED processor */ + int *dst = (int *)to; + int *src = (int *)from; + + dst[0] = src[0]; + dst[1] = src[1]; +} + +STORAGE_CLASS_INLINE void +ia_css_return_token_zero(ia_css_return_token *to) +{ + /* zero return token on VIED processor */ + int *dst = (int *)to; + + dst[0] = 0; + dst[1] = 0; +} + +STORAGE_CLASS_INLINE void _check_return_token_size(void) +{ + CT_ASSERT(sizeof(int) == 4); + CT_ASSERT(sizeof(ia_css_return_token) == 8); +} + +#endif /* __IA_CSS_RETURN_TOKEN_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_shared_buffer.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_shared_buffer.h new file mode 100644 index 0000000000000..558ec679f98a0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_shared_buffer.h @@ -0,0 +1,32 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SHARED_BUFFER_H +#define __IA_CSS_SHARED_BUFFER_H + +/* Shared Buffers */ +/* A CSS shared buffer is a buffer in DDR that can be read and written by the + * CPU and CSS. + * Both the CPU and CSS can have the buffer mapped simultaneously. + * Access rights are not managed by this interface, this could be done by means + * the read and write pointer of a queue, for example. + */ + +#include "ia_css_buffer_address.h" + +typedef struct ia_css_buffer_s *ia_css_shared_buffer; +typedef void *ia_css_shared_buffer_cpu_address; +typedef ia_css_buffer_address ia_css_shared_buffer_css_address; + +#endif /* __IA_CSS_SHARED_BUFFER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_shared_buffer_cpu.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_shared_buffer_cpu.h new file mode 100644 index 0000000000000..ff62914f99dc3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_shared_buffer_cpu.h @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SHARED_BUFFER_CPU_H +#define __IA_CSS_SHARED_BUFFER_CPU_H + +#include "vied/shared_memory_map.h" +#include "ia_css_shared_buffer.h" + +ia_css_shared_buffer +ia_css_shared_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_shared_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_shared_buffer b); + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_map(ia_css_shared_buffer b); + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_unmap(ia_css_shared_buffer b); + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_map(ia_css_shared_buffer b); + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_unmap(ia_css_shared_buffer b); + +ia_css_shared_buffer +ia_css_shared_buffer_css_update(vied_memory_t mid, ia_css_shared_buffer b); + +ia_css_shared_buffer +ia_css_shared_buffer_cpu_update(vied_memory_t mid, ia_css_shared_buffer b); + +#endif /* __IA_CSS_SHARED_BUFFER_CPU_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/buffer_access.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/buffer_access.c new file mode 100644 index 0000000000000..f0c617fe501a0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/buffer_access.c @@ -0,0 +1,39 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/* implementation of buffer access from the CPU */ +/* using shared_memory interface */ + +#include "buffer_access.h" +#include "vied/shared_memory_access.h" + +void +buffer_load( + buffer_address address, + void *data, + unsigned int bytes, + unsigned int mm_id) +{ + shared_memory_load(mm_id, address, data, bytes); +} + +void +buffer_store( + buffer_address address, + const void *data, + unsigned int bytes, + unsigned int mm_id) +{ + shared_memory_store(mm_id, address, data, bytes); +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_buffer.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_buffer.c new file mode 100644 index 0000000000000..146d4109de440 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_buffer.c @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/* provided interface */ +#include "ia_css_buffer.h" + +/* used interfaces */ +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + +ia_css_buffer_t +ia_css_buffer_alloc(vied_subsystem_t sid, vied_memory_t mid, unsigned int size) +{ + ia_css_buffer_t b; + + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + + b->css_address = shared_memory_map(sid, mid, b->mem); + b->size = size; + return b; +} + + +void +ia_css_buffer_free(vied_subsystem_t sid, vied_memory_t mid, ia_css_buffer_t b) +{ + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_buffer.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_buffer.h new file mode 100644 index 0000000000000..0f99a06e9a89b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_buffer.h @@ -0,0 +1,58 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_BUFFER_H +#define __IA_CSS_BUFFER_H + +/* workaround: needed because uses size_t */ +#include "type_support.h" +#include "vied/shared_memory_map.h" + +typedef enum { + buffer_unmapped, /* buffer is not accessible by cpu, nor css */ + buffer_write, /* output buffer: css has write access */ + /* input buffer: cpu has write access */ + buffer_read, /* input buffer: css has read access */ + /* output buffer: cpu has read access */ + buffer_cpu, /* shared buffer: cpu has read/write access */ + buffer_css /* shared buffer: css has read/write access */ +} buffer_state; + +struct ia_css_buffer_s { + /* number of bytes bytes allocated */ + unsigned int size; + /* allocated virtual memory object */ + host_virtual_address_t mem; + /* virtual address to be used on css/firmware */ + vied_virtual_address_t css_address; + /* virtual address to be used on cpu/host */ + void *cpu_address; + buffer_state state; +}; + +typedef struct ia_css_buffer_s *ia_css_buffer_t; + +ia_css_buffer_t +ia_css_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_buffer_t b); + +#endif /* __IA_CSS_BUFFER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_input_buffer.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_input_buffer.c new file mode 100644 index 0000000000000..2a128795d03e2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_input_buffer.c @@ -0,0 +1,184 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include "ia_css_input_buffer_cpu.h" +#include "ia_css_buffer.h" +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + + +ia_css_input_buffer +ia_css_input_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size) +{ + ia_css_input_buffer b; + + /* allocate buffer container */ + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + +#ifndef HRT_HW + /* initialize the buffer to avoid warnings when copying */ + shared_memory_zero(mid, b->mem, size); + + /* in simulation, we need to allocate a shadow host buffer */ + b->cpu_address = ia_css_cpu_mem_alloc_page_aligned(size); + if (b->cpu_address == NULL) { + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); + return NULL; + } +#else + /* on hw / real platform we can use the pointer from + * shared memory alloc + */ + b->cpu_address = (void *)HOST_ADDRESS(b->mem); +#endif + + b->css_address = shared_memory_map(sid, mid, b->mem); + + b->size = size; + b->state = buffer_unmapped; + + return b; +} + + +void +ia_css_input_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_input_buffer b) +{ + if (b == NULL) + return; + if (b->state != buffer_unmapped) + return; + +#ifndef HRT_HW + /* only free if we actually allocated it separately */ + ia_css_cpu_mem_free(b->cpu_address); +#endif + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} + + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_map(ia_css_input_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map input buffer to CPU address space, acquire write access */ + b->state = buffer_write; + + /* return pre-mapped buffer */ + return b->cpu_address; +} + + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_unmap(ia_css_input_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_write) + return NULL; + + /* unmap input buffer from CPU address space, release write access */ + b->state = buffer_unmapped; + + /* return pre-mapped buffer */ + return b->cpu_address; +} + + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map(vied_memory_t mid, ia_css_input_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map input buffer to CSS address space, acquire read access */ + b->state = buffer_read; + + /* now flush the cache */ + ia_css_cpu_mem_cache_flush(b->cpu_address, b->size); +#ifndef HRT_HW + /* only copy in case of simulation, otherwise it should just work */ + /* copy data from CPU address space to CSS address space */ + shared_memory_store(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return (ia_css_input_buffer_css_address)b->css_address; +} + + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map_no_invalidate(vied_memory_t mid, ia_css_input_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map input buffer to CSS address space, acquire read access */ + b->state = buffer_read; + +#ifndef HRT_HW + /* only copy in case of simulation, otherwise it should just work */ + /* copy data from CPU address space to CSS address space */ + shared_memory_store(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return (ia_css_input_buffer_css_address)b->css_address; +} + + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_unmap(ia_css_input_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_read) + return 0; + + /* unmap input buffer from CSS address space, release read access */ + b->state = buffer_unmapped; + + /* input buffer only, no need to invalidate cache */ + + return (ia_css_input_buffer_css_address)b->css_address; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_output_buffer.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_output_buffer.c new file mode 100644 index 0000000000000..30bc8d52a5a9e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_output_buffer.c @@ -0,0 +1,181 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include "ia_css_output_buffer_cpu.h" +#include "ia_css_buffer.h" +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + + +ia_css_output_buffer +ia_css_output_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size) +{ + ia_css_output_buffer b; + + /* allocate buffer container */ + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + +#ifndef HRT_HW + /* initialize the buffer to avoid warnings when copying */ + shared_memory_zero(mid, b->mem, size); + + /* in simulation, we need to allocate a shadow host buffer */ + b->cpu_address = ia_css_cpu_mem_alloc_page_aligned(size); + if (b->cpu_address == NULL) { + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); + return NULL; + } +#else + /* on hw / real platform we can use the pointer from + * shared memory alloc + */ + b->cpu_address = (void *)HOST_ADDRESS(b->mem); +#endif + + b->css_address = shared_memory_map(sid, mid, b->mem); + + b->size = size; + b->state = buffer_unmapped; + + return b; +} + + +void +ia_css_output_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_output_buffer b) +{ + if (b == NULL) + return; + if (b->state != buffer_unmapped) + return; + +#ifndef HRT_HW + /* only free if we actually allocated it separately */ + ia_css_cpu_mem_free(b->cpu_address); +#endif + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} + + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_map(ia_css_output_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map output buffer to CSS address space, acquire write access */ + b->state = buffer_write; + + return (ia_css_output_buffer_css_address)b->css_address; +} + + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_unmap(ia_css_output_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_write) + return 0; + + /* unmap output buffer from CSS address space, release write access */ + b->state = buffer_unmapped; + + return (ia_css_output_buffer_css_address)b->css_address; +} + + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map(vied_memory_t mid, ia_css_output_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map output buffer to CPU address space, acquire read access */ + b->state = buffer_read; + +#ifndef HRT_HW + /* only in simulation */ + /* copy data from CSS address space to CPU address space */ + shared_memory_load(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + /* now invalidate the cache */ + ia_css_cpu_mem_cache_invalidate(b->cpu_address, b->size); + + return b->cpu_address; +} + + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map_no_invalidate(vied_memory_t mid, ia_css_output_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map output buffer to CPU address space, acquire read access */ + b->state = buffer_read; + +#ifndef HRT_HW + /* only in simulation */ + /* copy data from CSS address space to CPU address space */ + shared_memory_load(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return b->cpu_address; +} + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_unmap(ia_css_output_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_read) + return NULL; + + /* unmap output buffer from CPU address space, release read access */ + b->state = buffer_unmapped; + + /* output only, no need to flush cache */ + + return b->cpu_address; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_shared_buffer.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_shared_buffer.c new file mode 100644 index 0000000000000..92b7110644fe3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_shared_buffer.c @@ -0,0 +1,187 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include "ia_css_shared_buffer_cpu.h" +#include "ia_css_buffer.h" +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + + +ia_css_shared_buffer +ia_css_shared_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size) +{ + ia_css_shared_buffer b; + + /* allocate buffer container */ + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + +#ifndef HRT_HW + /* initialize the buffer to avoid warnings when copying */ + shared_memory_zero(mid, b->mem, size); + + /* in simulation, we need to allocate a shadow host buffer */ + b->cpu_address = ia_css_cpu_mem_alloc_page_aligned(size); + if (b->cpu_address == NULL) { + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); + return NULL; + } +#else + /* on hw / real platform we can use the pointer from + * shared memory alloc + */ + b->cpu_address = (void *)HOST_ADDRESS(b->mem); +#endif + + b->css_address = shared_memory_map(sid, mid, b->mem); + + b->size = size; + b->state = buffer_unmapped; + + return b; +} + + +void +ia_css_shared_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_shared_buffer b) +{ + if (b == NULL) + return; + if (b->state != buffer_unmapped) + return; + +#ifndef HRT_HW + /* only free if we actually allocated it separately */ + ia_css_cpu_mem_free(b->cpu_address); +#endif + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} + + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_map(ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map shared buffer to CPU address space */ + b->state = buffer_cpu; + + return b->cpu_address; +} + + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_unmap(ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_cpu) + return NULL; + + /* unmap shared buffer from CPU address space */ + b->state = buffer_unmapped; + + return b->cpu_address; +} + + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_map(ia_css_shared_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map shared buffer to CSS address space */ + b->state = buffer_css; + + return (ia_css_shared_buffer_css_address)b->css_address; +} + + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_unmap(ia_css_shared_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_css) + return 0; + + /* unmap shared buffer from CSS address space */ + b->state = buffer_unmapped; + + return (ia_css_shared_buffer_css_address)b->css_address; +} + + +ia_css_shared_buffer +ia_css_shared_buffer_css_update(vied_memory_t mid, ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + + /* flush the buffer to CSS after it was modified by the CPU */ + /* flush cache to ddr */ + ia_css_cpu_mem_cache_flush(b->cpu_address, b->size); +#ifndef HRT_HW + /* copy data from CPU address space to CSS address space */ + shared_memory_store(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return b; +} + + +ia_css_shared_buffer +ia_css_shared_buffer_cpu_update(vied_memory_t mid, ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + + /* flush the buffer to the CPU after it has been modified by CSS */ +#ifndef HRT_HW + /* copy data from CSS address space to CPU address space */ + shared_memory_load(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + /* flush cache to ddr */ + ia_css_cpu_mem_cache_invalidate(b->cpu_address, b->size); + + return b; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/cell/cell.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/cell/cell.mk new file mode 100644 index 0000000000000..fa5e650226017 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/cell/cell.mk @@ -0,0 +1,43 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +ifndef _CELL_MK_ +_CELL_MK_ = 1 + + +CELL_DIR=$${MODULES_DIR}/cell +CELL_INTERFACE=$(CELL_DIR)/interface +CELL_SOURCES=$(CELL_DIR)/src + +CELL_HOST_FILES = +CELL_FW_FILES = + +CELL_HOST_CPPFLAGS = \ + -I$(CELL_INTERFACE) \ + -I$(CELL_SOURCES) + +CELL_FW_CPPFLAGS = \ + -I$(CELL_INTERFACE) \ + -I$(CELL_SOURCES) + +ifdef 0 +# Disabled until it is decided to go this way or not +include $(MODULES_DIR)/device_access/device_access.mk +CELL_HOST_FILES += $(DEVICE_ACCESS_HOST_FILES) +CELL_FW_FILES += $(DEVICE_ACCESS_FW_FILES) +CELL_HOST_CPPFLAGS += $(DEVICE_ACCESS_HOST_CPPFLAGS) +CELL_FW_CPPFLAGS += $(DEVICE_ACCESS_FW_CPPFLAGS) +endif + +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/cell/interface/ia_css_cell.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/cell/interface/ia_css_cell.h new file mode 100644 index 0000000000000..3fac3c791b6e6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/cell/interface/ia_css_cell.h @@ -0,0 +1,112 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_H +#define __IA_CSS_CELL_H + +#include "storage_class.h" +#include "type_support.h" + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_stat_ctrl(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_stat_ctrl(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_pc(unsigned int ssid, unsigned int cell_id, + unsigned int pc); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_icache_base_address(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +#if 0 /* To be implemented after completing cell device properties */ +STORAGE_CLASS_INLINE void +ia_css_cell_set_icache_info_bits(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_debug_pc(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_stall_bits(unsigned int ssid, unsigned int cell_id); +#endif + +/* configure master ports */ + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_base_address(unsigned int ssid, unsigned int cell_id, + unsigned int master, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_base_address(unsigned int ssid, + unsigned int cell_id, + unsigned int master, unsigned int segment, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_bits(unsigned int ssid, unsigned int cell_id, + unsigned int master, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_bits(unsigned int ssid, + unsigned int cell_id, + unsigned int master, unsigned int segment, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_override_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_override_bits(unsigned int ssid, + unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value); + +/* Access memories */ + +STORAGE_CLASS_INLINE void +ia_css_cell_mem_store_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr, unsigned int value); + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_mem_load_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr); + +/***********************************************************************/ + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_is_ready(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_bit(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_run_bit(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_start(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_start_prefetch(unsigned int ssid, unsigned int cell_id, + bool prefetch); + +STORAGE_CLASS_INLINE void +ia_css_cell_wait(unsigned int ssid, unsigned int cell_id); + +/* include inline implementation */ +#include "ia_css_cell_impl.h" + +#endif /* __IA_CSS_CELL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/cell/src/ia_css_cell_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/cell/src/ia_css_cell_impl.h new file mode 100644 index 0000000000000..60b2e234da1a0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/cell/src/ia_css_cell_impl.h @@ -0,0 +1,272 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_IMPL_H +#define __IA_CSS_CELL_IMPL_H + +#include "ia_css_cell.h" + +#include "ia_css_cmem.h" +#include "ipu_device_cell_properties.h" +#include "storage_class.h" +#include "assert_support.h" +#include "platform_support.h" +#include "misc_support.h" + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_regs_addr(unsigned int cell_id) +{ + /* mem_id 0 is for registers */ + return ipu_device_cell_memory_address(cell_id, 0); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_dmem_addr(unsigned int cell_id) +{ + /* mem_id 1 is for DMEM */ + return ipu_device_cell_memory_address(cell_id, 1); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_mem_store_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr, unsigned int value) +{ + ia_css_cmem_store_32( + ssid, ipu_device_cell_memory_address( + cell_id, mem_id) + addr, value); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_mem_load_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr) +{ + return ia_css_cmem_load_32( + ssid, ipu_device_cell_memory_address(cell_id, mem_id) + addr); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_stat_ctrl(unsigned int ssid, unsigned int cell_id) +{ + return ia_css_cmem_load_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_STAT_CTRL_REG_ADDRESS); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_stat_ctrl(unsigned int ssid, unsigned int cell_id, + unsigned int value) +{ + ia_css_cmem_store_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_STAT_CTRL_REG_ADDRESS, value); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_is_ready(unsigned int ssid, unsigned int cell_id) +{ + unsigned int reg; + + reg = ia_css_cell_get_stat_ctrl(ssid, cell_id); + /* READY must be 1, START must be 0 */ + return (reg & (1 << IPU_DEVICE_CELL_STAT_CTRL_READY_BIT)) && + ((~reg) & (1 << IPU_DEVICE_CELL_STAT_CTRL_START_BIT)); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_pc(unsigned int ssid, unsigned int cell_id, + unsigned int pc) +{ + /* set start PC */ + ia_css_cmem_store_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_START_PC_REG_ADDRESS, pc); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_bit(unsigned int ssid, unsigned int cell_id) +{ + unsigned int reg; + + reg = 1 << IPU_DEVICE_CELL_STAT_CTRL_START_BIT; + ia_css_cell_set_stat_ctrl(ssid, cell_id, reg); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_run_bit(unsigned int ssid, unsigned int cell_id, + unsigned int value) +{ + unsigned int reg; + + reg = value << IPU_DEVICE_CELL_STAT_CTRL_RUN_BIT; + ia_css_cell_set_stat_ctrl(ssid, cell_id, reg); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_start(unsigned int ssid, unsigned int cell_id) +{ + ia_css_cell_start_prefetch(ssid, cell_id, 0); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_start_prefetch(unsigned int ssid, unsigned int cell_id, + bool prefetch) +{ + unsigned int reg = 0; + + /* Set run bit and start bit */ + reg |= (1 << IPU_DEVICE_CELL_STAT_CTRL_START_BIT); + reg |= (1 << IPU_DEVICE_CELL_STAT_CTRL_RUN_BIT); + /* Invalidate the icache */ + reg |= (1 << IPU_DEVICE_CELL_STAT_CTRL_INVALIDATE_ICACHE_BIT); + /* Optionally enable prefetching */ + reg |= ((prefetch == 1) ? + (1 << IPU_DEVICE_CELL_STAT_CTRL_ICACHE_ENABLE_PREFETCH_BIT) : + 0); + + /* store into register */ + ia_css_cell_set_stat_ctrl(ssid, cell_id, reg); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_wait(unsigned int ssid, unsigned int cell_id) +{ + do { + ia_css_sleep(); + } while (!ia_css_cell_is_ready(ssid, cell_id)); +}; + +STORAGE_CLASS_INLINE void +ia_css_cell_set_icache_base_address(unsigned int ssid, unsigned int cell_id, + unsigned int value) +{ + ia_css_cmem_store_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_ICACHE_BASE_REG_ADDRESS, value); +} + +/* master port configuration */ + + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value) +{ + unsigned int addr; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + assert(segment < ipu_device_cell_master_num_segments(cell, master)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_reg(cell, master); + addr += segment * ipu_device_cell_master_stride(cell, master); + ia_css_cmem_store_32(ssid, addr, value); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_override_bits(unsigned int ssid, + unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value) +{ + unsigned int addr; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + assert(segment < ipu_device_cell_master_num_segments(cell, master)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_override_reg(cell, master); + addr += segment * ipu_device_cell_master_stride(cell, master); + ia_css_cmem_store_32(ssid, addr, value); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_base_address(unsigned int ssid, + unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value) + +{ + unsigned int addr; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + assert(segment < ipu_device_cell_master_num_segments(cell, master)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_base_reg(cell, master); + addr += segment * ipu_device_cell_master_stride(cell, master); + ia_css_cmem_store_32(ssid, addr, value); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value) +{ + unsigned int addr, s, stride, num_segments; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_reg(cell, master); + stride = ipu_device_cell_master_stride(cell, master); + num_segments = ipu_device_cell_master_num_segments(cell, master); + for (s = 0; s < num_segments; s++) { + ia_css_cmem_store_32(ssid, addr, value); + addr += stride; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_override_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value) +{ + unsigned int addr, s, stride, num_segments; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_override_reg(cell, master); + stride = ipu_device_cell_master_stride(cell, master); + num_segments = ipu_device_cell_master_num_segments(cell, master); + for (s = 0; s < num_segments; s++) { + ia_css_cmem_store_32(ssid, addr, value); + addr += stride; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_base_address(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value) +{ + unsigned int addr, s, stride, num_segments, segment_size; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_base_reg(cell, master); + stride = ipu_device_cell_master_stride(cell, master); + num_segments = ipu_device_cell_master_num_segments(cell, master); + segment_size = ipu_device_cell_master_segment_size(cell, master); + + for (s = 0; s < num_segments; s++) { + ia_css_cmem_store_32(ssid, addr, value); + addr += stride; + value += segment_size; + } +} + +#endif /* __IA_CSS_CELL_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/config/isys/subsystem_cnlB0.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/config/isys/subsystem_cnlB0.mk new file mode 100644 index 0000000000000..4a7ef4f324f34 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/config/isys/subsystem_cnlB0.mk @@ -0,0 +1,75 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# + +############################################################################ +# This file is used to specify versions and properties of ISYS firmware +# components. Please note that these are subsystem specific. System specific +# properties should go to system_$IPU_SYSVER.mk. Also the device versions +# should be defined under "devices" or should be taken from the SDK. +############################################################################ + +############################################################################ +# FIRMWARE RELATED VARIABLES +############################################################################ + +# Activate loading params and storing stats DDR<->REGs with DMA +ISYS_USE_ISA_DMA = 1 +#DMA does not work with AF due to a known bug +DISABLE_AF_STAT_DMA = 1 +# Used in ISA module +ISYS_ISL_DPC_DPC_V2 = 1 + +# Specification for Isys server's fixed globals' locations +REGMEM_OFFSET = 0 # Starting from 0 +REGMEM_SECURE_OFFSET = 4096 +REGMEM_SIZE = 36 +REGMEM_WORD_BYTES = 4 +FW_LOAD_NO_OF_REQUEST_OFFSET = 144 # Taken from REGMEM_OFFSET + REGMEM_SIZE*REGMEM_WORD_BYTES +FW_LOAD_NO_OF_REQUEST_SIZE_BYTES = 4 +# Total Used (@ REGMEM_OFFSET) = 148 # FW_LOAD_NO_OF_REQUEST_OFFSET + FW_LOAD_NO_OF_REQUEST_SIZE_BYTES +# Total Used (@ REGMEM_SECURE_OFFSET) = 144 # FW_LOAD_NO_OF_REQUEST_OFFSET + +# Workarounds: + +# This WA is not to pipeline store frame commands for SID processors that control a Str2Vec (ISA output) +WA_HSD1304553438 = 1 + +# FW workaround for HSD 1404347241. Disable clock gating for CSI2 DPHY Receiver ports +DISABLE_CSI2_RX_DPHY_CLK_GATE = 1 + +# Larger than specified frames that complete mid-line +WA_HSD1209062354 = 0 + +# WA to disable clock gating for the devices in the CSI receivers needed for using the mipi_pkt_gen device +WA_HSD1805168877 = 0 + +# Support IBUF soft-reset at stream start +SOFT_RESET_IBUF_STREAM_START_SUPPORT = 0 + +############################################################################ +# TESTING RELATED VARIABLES +############################################################################ + +# Cannot remove this define +# Used in mipi_capture, isys_utils.mk, and stream_controller.mk +ISYS_DISABLE_VERIFY_RECEIVED_SOF_EOF = 0 + +ISYS_ACCESS_BLOCKER_VERSION = v1 + +HAS_SPC = 1 + +# Support dual command context for VTIO - concurrent secure and non-secure streams +ISYS_HAS_DUAL_CMD_CTX_SUPPORT = 1 + +AB_CONFIG_ARRAY_SIZE = 50 diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/config/system_cnlB0.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/config/system_cnlB0.mk new file mode 100644 index 0000000000000..667282b519c4c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/config/system_cnlB0.mk @@ -0,0 +1,96 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# + +#--- DEFINES REQUIRED TO COMPILE USING LLVM --- +# Enable LLVM/Volcano for IPU4P, SPs only. +VOLCANO_IPU4P = 1 +VOLCANO_SP2601 = 1 +#---------------------------------------------- + +# enable NO_ALIAS for LLVM +ENABLE_NO_ALIAS_FOR_LLVM = 1 + +LOGICAL_FW_INPUT_SYSTEM = input_system_system +LOGICAL_FW_PROCESSING_SYSTEM = processing_system_system +LOGICAL_FW_IPU_SYSTEM = ipu_system +LOGICAL_FW_ISP_SYSTEM = isp2601_default_system +SP_CONTROL_CELL = sp2601_control +SP_PROXY_CELL = sp2601_proxy +ISP_CELL = isp2601 +# The non-capital define isp2601 is used in the sdk, in order to distinguish +# between different isp versions the ISP_CELL_IDENTIFIER define is added. +ISP_CELL_IDENTIFIER = ISP2601 +HAS_IPFD = 1 +HAS_S2M_IN_ISYS_ISL_NONSOC_PATH = 0 +HAS_S2V_IN_ISYS_ISL_NONSOC_PATH = 1 +# ISL-IS non-SoC path has ISA with PAF and DPC-Pext support for IPU4P-B0 +HAS_ISA_IN_ISYS_ISL = 1 +HAS_PAF_IN_ISYS_ISL = 1 +HAS_DPC_PEXT_IN_ISYS_ISL = 1 +HAS_PMA_IF = 1 + +HAS_MIPIBE_IN_PSYS_ISL = 1 + +HAS_VPLESS_SUPPORT = 0 + +DLI_SYSTEM = hive_isp_css_2600_system +RESOURCE_MANAGER_VERSION = v2 +MEM_RESOURCE_VALIDATION_ERROR = 0 +OFS_SCALER_1_4K_TILEY_422_SUPPORT= 1 +PROGDESC_ACC_SYMBOLS_VERSION = v1 +DEVPROXY_INTERFACE_VERSION = v1 +FW_ABI_IPU_TYPES_VERSION = v1 + +HAS_ONLINE_MODE_SUPPORT_IN_ISYS_PSYS = 0 + +MMU_INTERFACE_VERSION = v2 +DEVICE_ACCESS_VERSION = v2 +PSYS_SERVER_VERSION = v3 +PSYS_SERVER_LOADER_VERSION = v1 +PSYS_HW_VERSION = CNL_B0_HW + +# Enable FW_DMA for loading firmware +PSYS_SERVER_ENABLE_FW_LOAD_DMA = 1 + +NCI_SPA_VERSION = v1 +MANIFEST_TOOL_VERSION = v2 +PSYS_CON_MGR_TOOL_VERSION = v1 +# TODO: Should be removed after performance issues OTF are solved +PSYS_PROC_MGR_VERSION = v1 +IPU_RESOURCES_VERSION = v2 + +HAS_ACC_CLUSTER_PAF_PAL = 1 +HAS_ACC_CLUSTER_PEXT_PAL = 1 +HAS_ACC_CLUSTER_GBL_PAL = 1 + +# TODO use version naming scheme "v#" to decouple +# IPU_SYSVER from version. +PARAMBINTOOL_ISA_INIT_VERSION = cnlB0 + +# Select EQC2EQ version +# Version 1: uniform address space, equal EQ addresses regardless of EQC device +# Version 2: multiple addresses per EQ, depending on location of EQC device +EQC2EQ_VERSION = v1 + +# Select DMA instance for fw_load +FW_LOAD_DMA_INSTANCE = NCI_DMA_FW + +HAS_DMA_FW = 1 + +HAS_SIS = 0 +HAS_IDS = 1 + +PSYS_SERVER_ENABLE_TPROXY = 1 +PSYS_SERVER_ENABLE_DEVPROXY = 1 +NCI_OFS_VERSION = v1 diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/cpd_binary/ia_css_fw_pkg_release.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/cpd_binary/ia_css_fw_pkg_release.h new file mode 100644 index 0000000000000..408726c817146 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/cpd_binary/ia_css_fw_pkg_release.h @@ -0,0 +1,14 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +#define IA_CSS_FW_PKG_RELEASE 0x20181222 diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/device_access.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/device_access.mk new file mode 100644 index 0000000000000..1629d9af803b6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/device_access.mk @@ -0,0 +1,40 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# + +ifndef _DEVICE_ACCESS_MK_ +_DEVICE_ACCESS_MK_ = 1 + +# DEVICE_ACCESS_VERSION= +include $(MODULES_DIR)/config/system_$(IPU_SYSVER).mk + +DEVICE_ACCESS_DIR=$${MODULES_DIR}/device_access +DEVICE_ACCESS_INTERFACE=$(DEVICE_ACCESS_DIR)/interface +DEVICE_ACCESS_SOURCES=$(DEVICE_ACCESS_DIR)/src + +DEVICE_ACCESS_HOST_FILES = + +DEVICE_ACCESS_FW_FILES = + +DEVICE_ACCESS_HOST_CPPFLAGS = \ + -I$(DEVICE_ACCESS_INTERFACE) \ + -I$(DEVICE_ACCESS_SOURCES) + +DEVICE_ACCESS_FW_CPPFLAGS = \ + -I$(DEVICE_ACCESS_INTERFACE) \ + -I$(DEVICE_ACCESS_SOURCES) + +DEVICE_ACCESS_FW_CPPFLAGS += \ + -I$(DEVICE_ACCESS_SOURCES)/$(DEVICE_ACCESS_VERSION) +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/interface/ia_css_cmem.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/interface/ia_css_cmem.h new file mode 100644 index 0000000000000..3dc47c29fcab7 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/interface/ia_css_cmem.h @@ -0,0 +1,58 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CMEM_H +#define __IA_CSS_CMEM_H + +#include "type_support.h" +#include "storage_class.h" + +#ifdef __VIED_CELL +typedef unsigned int ia_css_cmem_address_t; +#else +#include +typedef vied_subsystem_address_t ia_css_cmem_address_t; +#endif + +STORAGE_CLASS_INLINE uint32_t +ia_css_cmem_load_32(unsigned int ssid, ia_css_cmem_address_t address); + +STORAGE_CLASS_INLINE void +ia_css_cmem_store_32(unsigned int ssid, ia_css_cmem_address_t address, + uint32_t value); + +STORAGE_CLASS_INLINE void +ia_css_cmem_load(unsigned int ssid, ia_css_cmem_address_t address, void *data, + unsigned int size); + +STORAGE_CLASS_INLINE void +ia_css_cmem_store(unsigned int ssid, ia_css_cmem_address_t address, + const void *data, unsigned int size); + +STORAGE_CLASS_INLINE void +ia_css_cmem_zero(unsigned int ssid, ia_css_cmem_address_t address, + unsigned int size); + +STORAGE_CLASS_INLINE ia_css_cmem_address_t +ia_css_cmem_get_cmem_addr_from_dmem(unsigned int base_addr, void *p); + +/* Include inline implementation */ + +#ifdef __VIED_CELL +#include "ia_css_cmem_cell.h" +#else +#include "ia_css_cmem_host.h" +#endif + +#endif /* __IA_CSS_CMEM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/interface/ia_css_xmem.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/interface/ia_css_xmem.h new file mode 100644 index 0000000000000..de2b94d8af541 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/interface/ia_css_xmem.h @@ -0,0 +1,65 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_H +#define __IA_CSS_XMEM_H + +#include "type_support.h" +#include "storage_class.h" + +#ifdef __VIED_CELL +typedef unsigned int ia_css_xmem_address_t; +#else +#include +typedef host_virtual_address_t ia_css_xmem_address_t; +#endif + +STORAGE_CLASS_INLINE uint8_t +ia_css_xmem_load_8(unsigned int mmid, ia_css_xmem_address_t address); + +STORAGE_CLASS_INLINE uint16_t +ia_css_xmem_load_16(unsigned int mmid, ia_css_xmem_address_t address); + +STORAGE_CLASS_INLINE uint32_t +ia_css_xmem_load_32(unsigned int mmid, ia_css_xmem_address_t address); + +STORAGE_CLASS_INLINE void +ia_css_xmem_load(unsigned int mmid, ia_css_xmem_address_t address, void *data, + unsigned int size); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_8(unsigned int mmid, ia_css_xmem_address_t address, + uint8_t value); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_16(unsigned int mmid, ia_css_xmem_address_t address, + uint16_t value); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_32(unsigned int mmid, ia_css_xmem_address_t address, + uint32_t value); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store(unsigned int mmid, ia_css_xmem_address_t address, + const void *data, unsigned int bytes); + +/* Include inline implementation */ + +#ifdef __VIED_CELL +#include "ia_css_xmem_cell.h" +#else +#include "ia_css_xmem_host.h" +#endif + +#endif /* __IA_CSS_XMEM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/interface/ia_css_xmem_cmem.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/interface/ia_css_xmem_cmem.h new file mode 100644 index 0000000000000..57aab3323c739 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/interface/ia_css_xmem_cmem.h @@ -0,0 +1,35 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_CMEM_H +#define __IA_CSS_XMEM_CMEM_H + +#include "ia_css_cmem.h" +#include "ia_css_xmem.h" + +/* Copy data from xmem to cmem, e.g., from a program in DDR to a cell's DMEM */ +/* This may also be implemented using DMA */ + +STORAGE_CLASS_INLINE void +ia_css_xmem_to_cmem_copy( + unsigned int mmid, + unsigned int ssid, + ia_css_xmem_address_t src, + ia_css_cmem_address_t dst, + unsigned int size); + +/* include inline implementation */ +#include "ia_css_xmem_cmem_impl.h" + +#endif /* __IA_CSS_XMEM_CMEM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/src/ia_css_cmem_host.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/src/ia_css_cmem_host.h new file mode 100644 index 0000000000000..22799e67214c1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/src/ia_css_cmem_host.h @@ -0,0 +1,121 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CMEM_HOST_H +#define __IA_CSS_CMEM_HOST_H + +/* This file is an inline implementation for the interface ia_css_cmem.h + * and should only be included there. */ + +#include "assert_support.h" +#include "misc_support.h" + +STORAGE_CLASS_INLINE uint32_t +ia_css_cmem_load_32(unsigned int ssid, ia_css_cmem_address_t address) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + return vied_subsystem_load_32(ssid, address); +} + +STORAGE_CLASS_INLINE uint32_t +ia_css_cond_cmem_load_32(bool cond, unsigned int ssid, + ia_css_cmem_address_t address) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + if (cond) + return vied_subsystem_load_32(ssid, address); + else + return 0; +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_store_32(unsigned int ssid, ia_css_cmem_address_t address, + uint32_t data) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + vied_subsystem_store_32(ssid, address, data); +} + +STORAGE_CLASS_INLINE void +ia_css_cond_cmem_store_32(bool cond, unsigned int ssid, + ia_css_cmem_address_t address, uint32_t data) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + if (cond) + vied_subsystem_store_32(ssid, address, data); +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_load(unsigned int ssid, ia_css_cmem_address_t address, void *data, + unsigned int size) +{ + uint32_t *data32 = (uint32_t *)data; + uint32_t end = address + size; + + assert(size % 4 == 0); + assert(address % 4 == 0); + assert((long)data % 4 == 0); + + while (address != end) { + *data32 = ia_css_cmem_load_32(ssid, address); + address += 4; + data32 += 1; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_store(unsigned int ssid, ia_css_cmem_address_t address, + const void *data, unsigned int size) +{ + uint32_t *data32 = (uint32_t *)data; + uint32_t end = address + size; + + assert(size % 4 == 0); + assert(address % 4 == 0); + assert((long)data % 4 == 0); + + while (address != end) { + ia_css_cmem_store_32(ssid, address, *data32); + address += 4; + data32 += 1; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_zero(unsigned int ssid, ia_css_cmem_address_t address, + unsigned int size) +{ + uint32_t end = address + size; + + assert(size % 4 == 0); + assert(address % 4 == 0); + + while (address != end) { + ia_css_cmem_store_32(ssid, address, 0); + address += 4; + } +} + +STORAGE_CLASS_INLINE ia_css_cmem_address_t +ia_css_cmem_get_cmem_addr_from_dmem(unsigned int base_addr, void *p) +{ + NOT_USED(base_addr); + return (ia_css_cmem_address_t)(uintptr_t)p; +} + +#endif /* __IA_CSS_CMEM_HOST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/src/ia_css_xmem_cmem_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/src/ia_css_xmem_cmem_impl.h new file mode 100644 index 0000000000000..adc178b75059a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/src/ia_css_xmem_cmem_impl.h @@ -0,0 +1,79 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_CMEM_IMPL_H +#define __IA_CSS_XMEM_CMEM_IMPL_H + +#include "ia_css_xmem_cmem.h" + +#include "ia_css_cmem.h" +#include "ia_css_xmem.h" + +/* Copy data from xmem to cmem, e.g., from a program in DDR to a cell's DMEM */ +/* This may also be implemented using DMA */ + +STORAGE_CLASS_INLINE void +ia_css_xmem_to_cmem_copy( + unsigned int mmid, + unsigned int ssid, + ia_css_xmem_address_t src, + ia_css_cmem_address_t dst, + unsigned int size) +{ + /* copy from ddr to subsystem, e.g., cell dmem */ + ia_css_cmem_address_t end = dst + size; + + assert(size % 4 == 0); + assert((uintptr_t) dst % 4 == 0); + assert((uintptr_t) src % 4 == 0); + + while (dst != end) { + uint32_t data; + + data = ia_css_xmem_load_32(mmid, src); + ia_css_cmem_store_32(ssid, dst, data); + dst += 4; + src += 4; + } +} + +/* Copy data from cmem to xmem */ + +STORAGE_CLASS_INLINE void +ia_css_cmem_to_xmem_copy( + unsigned int mmid, + unsigned int ssid, + ia_css_cmem_address_t src, + ia_css_xmem_address_t dst, + unsigned int size) +{ + /* copy from ddr to subsystem, e.g., cell dmem */ + ia_css_xmem_address_t end = dst + size; + + assert(size % 4 == 0); + assert((uintptr_t) dst % 4 == 0); + assert((uintptr_t) src % 4 == 0); + + while (dst != end) { + uint32_t data; + + data = ia_css_cmem_load_32(mmid, src); + ia_css_xmem_store_32(ssid, dst, data); + dst += 4; + src += 4; + } +} + + +#endif /* __IA_CSS_XMEM_CMEM_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/src/ia_css_xmem_host.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/src/ia_css_xmem_host.h new file mode 100644 index 0000000000000..d94991fc11143 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/src/ia_css_xmem_host.h @@ -0,0 +1,84 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_HOST_H +#define __IA_CSS_XMEM_HOST_H + +#include "ia_css_xmem.h" +#include +#include "assert_support.h" +#include + +STORAGE_CLASS_INLINE uint8_t +ia_css_xmem_load_8(unsigned int mmid, ia_css_xmem_address_t address) +{ + return shared_memory_load_8(mmid, address); +} + +STORAGE_CLASS_INLINE uint16_t +ia_css_xmem_load_16(unsigned int mmid, ia_css_xmem_address_t address) +{ + /* Address has to be half-word aligned */ + assert(0 == (uintptr_t) address % 2); + return shared_memory_load_16(mmid, address); +} + +STORAGE_CLASS_INLINE uint32_t +ia_css_xmem_load_32(unsigned int mmid, ia_css_xmem_address_t address) +{ + /* Address has to be word aligned */ + assert(0 == (uintptr_t) address % 4); + return shared_memory_load_32(mmid, address); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_load(unsigned int mmid, ia_css_xmem_address_t address, void *data, + unsigned int size) +{ + shared_memory_load(mmid, address, data, size); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_8(unsigned int mmid, ia_css_xmem_address_t address, + uint8_t value) +{ + shared_memory_store_8(mmid, address, value); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_16(unsigned int mmid, ia_css_xmem_address_t address, + uint16_t value) +{ + /* Address has to be half-word aligned */ + assert(0 == (uintptr_t) address % 2); + shared_memory_store_16(mmid, address, value); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_32(unsigned int mmid, ia_css_xmem_address_t address, + uint32_t value) +{ + /* Address has to be word aligned */ + assert(0 == (uintptr_t) address % 4); + shared_memory_store_32(mmid, address, value); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store(unsigned int mmid, ia_css_xmem_address_t address, + const void *data, unsigned int bytes) +{ + shared_memory_store(mmid, address, data, bytes); +} + +#endif /* __IA_CSS_XMEM_HOST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/cnlB0/ipu_device_buttress_properties_struct.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/cnlB0/ipu_device_buttress_properties_struct.h new file mode 100644 index 0000000000000..5102f6e44d2f6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/cnlB0/ipu_device_buttress_properties_struct.h @@ -0,0 +1,68 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_BUTTRESS_PROPERTIES_STRUCT_H +#define __IPU_DEVICE_BUTTRESS_PROPERTIES_STRUCT_H + +/* Destination values for master port 0 and bitfield "request_dest" */ +enum cio_M0_btrs_dest { + DEST_IS_BUT_REGS = 0, + DEST_IS_DDR, + RESERVED, + DEST_IS_SUBSYSTEM, + N_BTRS_DEST +}; + +/* Bit-field positions for M0 info bits */ +enum ia_css_info_bits_m0_pos { + IA_CSS_INFO_BITS_M0_SNOOPABLE_POS = 0, + IA_CSS_INFO_BITS_M0_IMR_DESTINED_POS = 1, + IA_CSS_INFO_BITS_M0_REQUEST_DEST_POS = 4 +}; + +#define IA_CSS_INFO_BITS_M0_DDR \ + (DEST_IS_DDR << IA_CSS_INFO_BITS_M0_REQUEST_DEST_POS) +#define IA_CSS_INFO_BITS_M0_SNOOPABLE (1 << IA_CSS_INFO_BITS_M0_SNOOPABLE_POS) + +/* Info bits as expected by the buttress */ +/* Deprecated because bit fields are not portable */ + +/* For master port 0*/ +union cio_M0_t { + struct { + unsigned int snoopable : 1; + unsigned int imr_destined : 1; + unsigned int spare0 : 2; + unsigned int request_dest : 2; + unsigned int spare1 : 26; + } as_bitfield; + unsigned int as_word; +}; + +/* For master port 1*/ +union cio_M1_t { + struct { + unsigned int spare0 : 1; + unsigned int deadline_pointer : 1; + unsigned int reserved : 1; + unsigned int zlw : 1; + unsigned int stream_id : 4; + unsigned int address_swizzling : 1; + unsigned int spare1 : 23; + } as_bitfield; + unsigned int as_word; +}; + + +#endif /* __IPU_DEVICE_BUTTRESS_PROPERTIES_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/ipu_device_cell_properties.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/ipu_device_cell_properties.h new file mode 100644 index 0000000000000..e6e1e9dcbe80c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/ipu_device_cell_properties.h @@ -0,0 +1,76 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_H +#define __IPU_DEVICE_CELL_PROPERTIES_H + +#include "storage_class.h" +#include "ipu_device_cell_type_properties.h" + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_devices(void); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_memories(const unsigned int cell_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_size(const unsigned int cell_id, + const unsigned int mem_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_address(const unsigned int cell_id, + const unsigned int mem_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_databus_memory_address(const unsigned int cell_id, + const unsigned int mem_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_masters(const unsigned int cell_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_bits(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_num_segments(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_size(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_stride(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_base_reg(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_info_reg(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_icache_align(unsigned int cell_id); + +#ifdef C_RUN +STORAGE_CLASS_INLINE int +ipu_device_cell_id_crun(int cell_id); +#endif + +#include "ipu_device_cell_properties_func.h" + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/ipu_device_cell_properties_func.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/ipu_device_cell_properties_func.h new file mode 100644 index 0000000000000..481b0504a2378 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/ipu_device_cell_properties_func.h @@ -0,0 +1,164 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_FUNC_H +#define __IPU_DEVICE_CELL_PROPERTIES_FUNC_H + +/* define properties for all cells uses in ISYS */ + +#include "ipu_device_cell_properties_impl.h" +#include "ipu_device_cell_devices.h" +#include "assert_support.h" +#include "storage_class.h" + +enum {IA_CSS_CELL_MASTER_ADDRESS_WIDTH = 32}; + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_devices(void) +{ + return NUM_CELLS; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_memories(const unsigned int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_cell_properties[cell_id].type_properties->count-> + num_memories; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_size(const unsigned int cell_id, + const unsigned int mem_id) +{ + assert(cell_id < NUM_CELLS); + assert(mem_id < ipu_device_cell_num_memories(cell_id)); + return ipu_device_cell_properties[cell_id].type_properties-> + mem_size[mem_id]; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_address(const unsigned int cell_id, + const unsigned int mem_id) +{ + assert(cell_id < NUM_CELLS); + assert(mem_id < ipu_device_cell_num_memories(cell_id)); + return ipu_device_cell_properties[cell_id].mem_address[mem_id]; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_databus_memory_address(const unsigned int cell_id, + const unsigned int mem_id) +{ + assert(cell_id < NUM_CELLS); + assert(mem_id < ipu_device_cell_num_memories(cell_id)); + assert(mem_id != 0); + return ipu_device_cell_properties[cell_id].mem_databus_address[mem_id]; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_masters(const unsigned int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_cell_properties[cell_id].type_properties->count-> + num_master_ports; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_bits(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].segment_bits; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_num_segments(const unsigned int cell_id, + const unsigned int master_id) +{ + return 1u << ipu_device_cell_master_segment_bits(cell_id, master_id); +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_size(const unsigned int cell_id, + const unsigned int master_id) +{ + return 1u << (IA_CSS_CELL_MASTER_ADDRESS_WIDTH - + ipu_device_cell_master_segment_bits(cell_id, master_id)); +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_stride(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].stride; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_base_reg(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].base_address_register; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_info_reg(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].info_bits_register; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_info_override_reg(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].info_override_bits_register; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_icache_align(unsigned int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_cell_properties[cell_id].type_properties->count-> + icache_align; +} + +#ifdef C_RUN +STORAGE_CLASS_INLINE int +ipu_device_cell_id_crun(int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_map_cell_id_to_crun_proc_id[cell_id]; +} +#endif + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_FUNC_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/ipu_device_cell_properties_struct.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/ipu_device_cell_properties_struct.h new file mode 100644 index 0000000000000..63397dc0b7fe6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/ipu_device_cell_properties_struct.h @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_STRUCT_H +#define __IPU_DEVICE_CELL_PROPERTIES_STRUCT_H + +/* definitions for all cell types */ + +struct ipu_device_cell_count_s { + unsigned int num_memories; + unsigned int num_master_ports; + unsigned int num_stall_bits; + unsigned int icache_align; +}; + +struct ipu_device_cell_master_properties_s { + unsigned int segment_bits; + unsigned int stride; /* offset to register of next segment */ + unsigned int base_address_register; /* address of first base address + register */ + unsigned int info_bits_register; + unsigned int info_override_bits_register; +}; + +struct ipu_device_cell_type_properties_s { + const struct ipu_device_cell_count_s *count; + const struct ipu_device_cell_master_properties_s *master; + const unsigned int *reg_offset; /* offsets of registers, some depend + on cell type */ + const unsigned int *mem_size; +}; + +struct ipu_device_cell_properties_s { + const struct ipu_device_cell_type_properties_s *type_properties; + const unsigned int *mem_address; + const unsigned int *mem_databus_address; + /* const cell_master_port_properties_s* master_port_properties; */ +}; + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/ipu_device_cell_type_properties.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/ipu_device_cell_type_properties.h new file mode 100644 index 0000000000000..72caed3eef0c9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/ipu_device_cell_type_properties.h @@ -0,0 +1,69 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_TYPE_PROPERTIES_H +#define __IPU_DEVICE_CELL_TYPE_PROPERTIES_H + +#define IPU_DEVICE_INVALID_MEM_ADDRESS 0xFFFFFFFF + +enum ipu_device_cell_stat_ctrl_bit { + IPU_DEVICE_CELL_STAT_CTRL_RESET_BIT = 0, + IPU_DEVICE_CELL_STAT_CTRL_START_BIT = 1, + IPU_DEVICE_CELL_STAT_CTRL_RUN_BIT = 3, + IPU_DEVICE_CELL_STAT_CTRL_READY_BIT = 5, + IPU_DEVICE_CELL_STAT_CTRL_SLEEP_BIT = 6, + IPU_DEVICE_CELL_STAT_CTRL_STALL_BIT = 7, + IPU_DEVICE_CELL_STAT_CTRL_CLEAR_IRQ_MASK_FLAG_BIT = 8, + IPU_DEVICE_CELL_STAT_CTRL_BROKEN_IRQ_MASK_FLAG_BIT = 9, + IPU_DEVICE_CELL_STAT_CTRL_READY_IRQ_MASK_FLAG_BIT = 10, + IPU_DEVICE_CELL_STAT_CTRL_SLEEP_IRQ_MASK_FLAG_BIT = 11, + IPU_DEVICE_CELL_STAT_CTRL_INVALIDATE_ICACHE_BIT = 12, + IPU_DEVICE_CELL_STAT_CTRL_ICACHE_ENABLE_PREFETCH_BIT = 13 +}; + +enum ipu_device_cell_reg_addr { + IPU_DEVICE_CELL_STAT_CTRL_REG_ADDRESS = 0x0, + IPU_DEVICE_CELL_START_PC_REG_ADDRESS = 0x4, + IPU_DEVICE_CELL_ICACHE_BASE_REG_ADDRESS = 0x10, + IPU_DEVICE_CELL_ICACHE_INFO_BITS_REG_ADDRESS = 0x14 +}; + +enum ipu_device_cell_reg { + IPU_DEVICE_CELL_STAT_CTRL_REG, + IPU_DEVICE_CELL_START_PC_REG, + IPU_DEVICE_CELL_ICACHE_BASE_REG, + IPU_DEVICE_CELL_DEBUG_PC_REG, + IPU_DEVICE_CELL_STALL_REG, + IPU_DEVICE_CELL_NUM_REGS +}; + +enum ipu_device_cell_mem { + IPU_DEVICE_CELL_REGS, /* memory id of registers */ + IPU_DEVICE_CELL_PMEM, /* memory id of pmem */ + IPU_DEVICE_CELL_DMEM, /* memory id of dmem */ + IPU_DEVICE_CELL_BAMEM, /* memory id of bamem */ + IPU_DEVICE_CELL_VMEM /* memory id of vmem */ +}; +#define IPU_DEVICE_CELL_NUM_MEMORIES (IPU_DEVICE_CELL_VMEM + 1) + +enum ipu_device_cell_master { + IPU_DEVICE_CELL_MASTER_ICACHE, /* master port id of icache */ + IPU_DEVICE_CELL_MASTER_QMEM, + IPU_DEVICE_CELL_MASTER_CMEM, + IPU_DEVICE_CELL_MASTER_XMEM, + IPU_DEVICE_CELL_MASTER_XVMEM +}; +#define IPU_DEVICE_CELL_MASTER_NUM_MASTERS (IPU_DEVICE_CELL_MASTER_XVMEM + 1) + +#endif /* __IPU_DEVICE_CELL_TYPE_PROPERTIES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/isys/cnlB0/ipu_device_cell_devices.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/isys/cnlB0/ipu_device_cell_devices.h new file mode 100644 index 0000000000000..274c9518fd3de --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/isys/cnlB0/ipu_device_cell_devices.h @@ -0,0 +1,27 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_DEVICES_H +#define __IPU_DEVICE_CELL_DEVICES_H + +/* define cell instances in ISYS */ + +#define SPC0_CELL input_system_unis_logic_sp_control_tile_sp + +enum ipu_device_isys_cell_id { + SPC0 +}; +#define NUM_CELLS (SPC0 + 1) + +#endif /* __IPU_DEVICE_CELL_DEVICES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/isys/cnlB0/ipu_device_cell_properties_defs.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/isys/cnlB0/ipu_device_cell_properties_defs.h new file mode 100644 index 0000000000000..e811478b7d0f4 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/isys/cnlB0/ipu_device_cell_properties_defs.h @@ -0,0 +1,22 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +/* Generated file - please do not edit. */ + +#ifndef _IPU_DEVICE_CELL_PROPERTIES_DEFS_H_ +#define _IPU_DEVICE_CELL_PROPERTIES_DEFS_H_ +#define SPC0_REGS_CBUS_ADDRESS 0x0 +#define SPC0_DMEM_CBUS_ADDRESS 0x8000 +#define SPC0_DMEM_DBUS_ADDRESS 0x8000 +#define SPC0_DMEM_DMA_M0_ADDRESS 0x1010000 +#endif /* _IPU_DEVICE_CELL_PROPERTIES_DEFS_H_ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/isys/cnlB0/ipu_device_cell_properties_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/isys/cnlB0/ipu_device_cell_properties_impl.h new file mode 100644 index 0000000000000..f350ae74b94d6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/isys/cnlB0/ipu_device_cell_properties_impl.h @@ -0,0 +1,57 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_IMPL_H +#define __IPU_DEVICE_CELL_PROPERTIES_IMPL_H + +/* define properties for all cells uses in ISYS */ + +#include "ipu_device_sp2600_control_properties_impl.h" +#include "ipu_device_cell_properties_defs.h" +#include "ipu_device_cell_devices.h" +#include "ipu_device_cell_type_properties.h"/* IPU_DEVICE_INVALID_MEM_ADDRESS */ + +static const unsigned int +ipu_device_spc0_mem_address[IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES] = { + SPC0_REGS_CBUS_ADDRESS, + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPC0_DMEM_CBUS_ADDRESS +}; + +static const unsigned int +ipu_device_spc0_databus_mem_address[IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* regs not accessible from DBUS */ + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPC0_DMEM_DBUS_ADDRESS +}; + +static const struct ipu_device_cell_properties_s +ipu_device_cell_properties[NUM_CELLS] = { + { + &ipu_device_sp2600_control_properties, + ipu_device_spc0_mem_address, + ipu_device_spc0_databus_mem_address + } +}; + +#ifdef C_RUN + +/* Mapping between hrt_hive_processors enum and cell_id's used in FW */ +static const int ipu_device_map_cell_id_to_crun_proc_id[NUM_CELLS] = { + 0 /* SPC0 */ +}; + +#endif + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/src/ipu_device_sp2600_control_properties_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/src/ipu_device_sp2600_control_properties_impl.h new file mode 100644 index 0000000000000..430295cd9d949 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/src/ipu_device_sp2600_control_properties_impl.h @@ -0,0 +1,136 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_SP2600_CONTROL_PROPERTIES_IMPL_H +#define __IPU_DEVICE_SP2600_CONTROL_PROPERTIES_IMPL_H + +/* sp2600_control definition */ + +#include "ipu_device_cell_properties_struct.h" + +enum ipu_device_sp2600_control_registers { + /* control registers */ + IPU_DEVICE_SP2600_CONTROL_STAT_CTRL = 0x0, + IPU_DEVICE_SP2600_CONTROL_START_PC = 0x4, + + /* master port registers */ + IPU_DEVICE_SP2600_CONTROL_ICACHE_BASE = 0x10, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO = 0x14, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO_OVERRIDE = 0x18, + + IPU_DEVICE_SP2600_CONTROL_QMEM_BASE = 0x1C, + + IPU_DEVICE_SP2600_CONTROL_CMEM_BASE = 0x28, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO = 0x2C, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO_OVERRIDE = 0x30, + + IPU_DEVICE_SP2600_CONTROL_XMEM_BASE = 0x58, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO = 0x5C, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO_OVERRIDE = 0x60, + + /* debug registers */ + IPU_DEVICE_SP2600_CONTROL_DEBUG_PC = 0x9C, + IPU_DEVICE_SP2600_CONTROL_STALL = 0xA0 +}; + +enum ipu_device_sp2600_control_mems { + IPU_DEVICE_SP2600_CONTROL_REGS, + IPU_DEVICE_SP2600_CONTROL_PMEM, + IPU_DEVICE_SP2600_CONTROL_DMEM, + IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES +}; + +static const unsigned int +ipu_device_sp2600_control_mem_size[IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES] = { + 0x000AC, + 0x00000, + 0x10000 +}; + +enum ipu_device_sp2600_control_masters { + IPU_DEVICE_SP2600_CONTROL_ICACHE, + IPU_DEVICE_SP2600_CONTROL_QMEM, + IPU_DEVICE_SP2600_CONTROL_CMEM, + IPU_DEVICE_SP2600_CONTROL_XMEM, + IPU_DEVICE_SP2600_CONTROL_NUM_MASTERS +}; + +static const struct ipu_device_cell_master_properties_s +ipu_device_sp2600_control_masters[IPU_DEVICE_SP2600_CONTROL_NUM_MASTERS] = { + { + 0, + 0xC, + IPU_DEVICE_SP2600_CONTROL_ICACHE_BASE, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO_OVERRIDE + }, + { + 0, + 0xC, + IPU_DEVICE_SP2600_CONTROL_QMEM_BASE, + 0xFFFFFFFF, + 0xFFFFFFFF + }, + { + 2, + 0xC, + IPU_DEVICE_SP2600_CONTROL_CMEM_BASE, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO_OVERRIDE + }, + { + 2, + 0xC, + IPU_DEVICE_SP2600_CONTROL_XMEM_BASE, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO_OVERRIDE + } +}; + +enum ipu_device_sp2600_control_stall_bits { + IPU_DEVICE_SP2600_CONTROL_STALL_ICACHE, + IPU_DEVICE_SP2600_CONTROL_STALL_DMEM, + IPU_DEVICE_SP2600_CONTROL_STALL_QMEM, + IPU_DEVICE_SP2600_CONTROL_STALL_CMEM, + IPU_DEVICE_SP2600_CONTROL_STALL_XMEM, + IPU_DEVICE_SP2600_CONTROL_NUM_STALL_BITS +}; + +/* 32 bits per instruction */ +#define IPU_DEVICE_SP2600_CONTROL_ICACHE_WORD_SIZE 4 +/* 32 instructions per burst */ +#define IPU_DEVICE_SP2600_CONTROL_ICACHE_BURST_SIZE 32 + +static const struct ipu_device_cell_count_s ipu_device_sp2600_control_count = { + IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES, + IPU_DEVICE_SP2600_CONTROL_NUM_MASTERS, + IPU_DEVICE_SP2600_CONTROL_NUM_STALL_BITS, + IPU_DEVICE_SP2600_CONTROL_ICACHE_WORD_SIZE * + IPU_DEVICE_SP2600_CONTROL_ICACHE_BURST_SIZE +}; + +static const unsigned int +ipu_device_sp2600_control_reg_offset[/* CELL_NUM_REGS */] = { + 0x0, 0x4, 0x10, 0x9C, 0xA0 +}; + +static const struct ipu_device_cell_type_properties_s +ipu_device_sp2600_control_properties = { + &ipu_device_sp2600_control_count, + ipu_device_sp2600_control_masters, + ipu_device_sp2600_control_reg_offset, + ipu_device_sp2600_control_mem_size +}; + +#endif /* __IPU_DEVICE_SP2600_CONTROL_PROPERTIES_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/cpu/fw_abi_cpu_types.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/cpu/fw_abi_cpu_types.mk new file mode 100644 index 0000000000000..b1ffbf7ea21ff --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/cpu/fw_abi_cpu_types.mk @@ -0,0 +1,24 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# + +# MODULE is FW ABI COMMON TYPES + +FW_ABI_COMMON_TYPES_DIRS = -I$${MODULES_DIR}/fw_abi_common_types +FW_ABI_COMMON_TYPES_DIRS += -I$${MODULES_DIR}/fw_abi_common_types/cpu + +FW_ABI_COMMON_TYPES_HOST_FILES = +FW_ABI_COMMON_TYPES_HOST_CPPFLAGS = $(FW_ABI_COMMON_TYPES_DIRS) + +FW_ABI_COMMON_TYPES_FW_FILES = +FW_ABI_COMMON_TYPES_FW_CPPFLAGS = $(FW_ABI_COMMON_TYPES_DIRS) diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/cpu/ia_css_terminal_base_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/cpu/ia_css_terminal_base_types.h new file mode 100644 index 0000000000000..21cc3f43f485e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/cpu/ia_css_terminal_base_types.h @@ -0,0 +1,42 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_BASE_TYPES_H +#define __IA_CSS_TERMINAL_BASE_TYPES_H + + +#include "type_support.h" +#include "ia_css_terminal_defs.h" + +#define N_UINT16_IN_TERMINAL_STRUCT 3 +#define N_PADDING_UINT8_IN_TERMINAL_STRUCT 5 + +#define SIZE_OF_TERMINAL_STRUCT_BITS \ + (IA_CSS_TERMINAL_TYPE_BITS \ + + IA_CSS_TERMINAL_ID_BITS \ + + N_UINT16_IN_TERMINAL_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_PADDING_UINT8_IN_TERMINAL_STRUCT * IA_CSS_UINT8_T_BITS) + +/* ==================== Base Terminal - START ==================== */ +struct ia_css_terminal_s { /**< Base terminal */ + ia_css_terminal_type_t terminal_type; /**< Type ia_css_terminal_type_t */ + int16_t parent_offset; /**< Offset to the process group */ + uint16_t size; /**< Size of this whole terminal layout-structure */ + uint16_t tm_index; /**< Index of the terminal manifest object */ + ia_css_terminal_ID_t ID; /**< Absolute referal ID for this terminal, valid ID's != 0 */ + uint8_t padding[N_PADDING_UINT8_IN_TERMINAL_STRUCT]; +}; +/* ==================== Base Terminal - END ==================== */ + +#endif /* __IA_CSS_TERMINAL_BASE_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/cpu/ia_css_terminal_manifest_base_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/cpu/ia_css_terminal_manifest_base_types.h new file mode 100644 index 0000000000000..056e1b6d5d4bd --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/cpu/ia_css_terminal_manifest_base_types.h @@ -0,0 +1,42 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_MANIFEST_BASE_TYPES_H +#define __IA_CSS_TERMINAL_MANIFEST_BASE_TYPES_H + +#include "ia_css_terminal_defs.h" + +#define N_PADDING_UINT8_IN_TERMINAL_MAN_STRUCT 5 +#define SIZE_OF_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + (IA_CSS_UINT16_T_BITS \ + + IA_CSS_TERMINAL_ID_BITS \ + + IA_CSS_TERMINAL_TYPE_BITS \ + + IA_CSS_UINT32_T_BITS \ + + (N_PADDING_UINT8_IN_TERMINAL_MAN_STRUCT*IA_CSS_UINT8_T_BITS)) + +/* ==================== Base Terminal Manifest - START ==================== */ +struct ia_css_terminal_manifest_s { + ia_css_terminal_type_t terminal_type; /**< Type ia_css_terminal_type_t */ + int16_t parent_offset; /**< Offset to the program group manifest */ + uint16_t size; /**< Size of this whole terminal-manifest layout-structure */ + ia_css_terminal_ID_t ID; + uint8_t padding[N_PADDING_UINT8_IN_TERMINAL_MAN_STRUCT]; +}; + +typedef struct ia_css_terminal_manifest_s + ia_css_terminal_manifest_t; + +/* ==================== Base Terminal Manifest - END ==================== */ + +#endif /* __IA_CSS_TERMINAL_MANIFEST_BASE_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/ia_css_base_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/ia_css_base_types.h new file mode 100644 index 0000000000000..3b80a17a6ad38 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/ia_css_base_types.h @@ -0,0 +1,38 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_BASE_TYPES_H +#define __IA_CSS_BASE_TYPES_H + +#include "type_support.h" + +#define VIED_VADDRESS_BITS 32 +typedef uint32_t vied_vaddress_t; + +#define DEVICE_DESCRIPTOR_ID_BITS 32 +typedef struct { + uint8_t device_id; + uint8_t instance_id; + uint8_t channel_id; + uint8_t section_id; +} device_descriptor_fields_t; + +typedef union { + device_descriptor_fields_t fields; + uint32_t data; +} device_descriptor_id_t; + +typedef uint16_t ia_css_process_id_t; + +#endif /* __IA_CSS_BASE_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/ia_css_terminal_defs.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/ia_css_terminal_defs.h new file mode 100644 index 0000000000000..dbf1cf93756ff --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/ia_css_terminal_defs.h @@ -0,0 +1,105 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_DEFS_H +#define __IA_CSS_TERMINAL_DEFS_H + + +#include "type_support.h" + +#define IA_CSS_TERMINAL_ID_BITS 8 +typedef uint8_t ia_css_terminal_ID_t; +#define IA_CSS_TERMINAL_INVALID_ID ((ia_css_terminal_ID_t)(-1)) + +/* + * Terminal Base Type + */ +typedef enum ia_css_terminal_type { + /**< Data input */ + IA_CSS_TERMINAL_TYPE_DATA_IN = 0, + /**< Data output */ + IA_CSS_TERMINAL_TYPE_DATA_OUT, + /**< Type 6 parameter input */ + IA_CSS_TERMINAL_TYPE_PARAM_STREAM, + /**< Type 1-5 parameter input */ + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN, + /**< Type 1-5 parameter output */ + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT, + /**< Represent the new type of terminal for the + * "spatial dependent parameters", when params go in + */ + IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN, + /**< Represent the new type of terminal for the + * "spatial dependent parameters", when params go out + */ + IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT, + /**< Represent the new type of terminal for the + * explicit slicing, when params go in + */ + IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN, + /**< Represent the new type of terminal for the + * explicit slicing, when params go out + */ + IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT, + /**< State (private data) input */ + IA_CSS_TERMINAL_TYPE_STATE_IN, + /**< State (private data) output */ + IA_CSS_TERMINAL_TYPE_STATE_OUT, + IA_CSS_TERMINAL_TYPE_PROGRAM, + IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT, + IA_CSS_N_TERMINAL_TYPES +} ia_css_terminal_type_t; + +#define IA_CSS_TERMINAL_TYPE_BITS 32 + +/* Temporary redirection needed to facilicate merging with the drivers + in a backwards compatible manner */ +#define IA_CSS_TERMINAL_TYPE_PARAM_CACHED IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN + +/* + * Dimensions of the data objects. Note that a C-style + * data order is assumed. Data stored by row. + */ +typedef enum ia_css_dimension { + /**< The number of columns, i.e. the size of the row */ + IA_CSS_COL_DIMENSION = 0, + /**< The number of rows, i.e. the size of the column */ + IA_CSS_ROW_DIMENSION = 1, + IA_CSS_N_DATA_DIMENSION = 2 +} ia_css_dimension_t; + +#define IA_CSS_N_COMMAND_COUNT (4) + +#ifndef PIPE_GENERATION +/* Don't include these complex enum structures in Genpipe, it can't handle and it does not need them */ +/* + * enum ia_css_isys_link_id. Lists the link IDs used by the FW for On The Fly feature + */ +typedef enum ia_css_isys_link_id { + IA_CSS_ISYS_LINK_OFFLINE = 0, + IA_CSS_ISYS_LINK_MAIN_OUTPUT = 1, + IA_CSS_ISYS_LINK_PDAF_OUTPUT = 2 +} ia_css_isys_link_id_t; +#define N_IA_CSS_ISYS_LINK_ID (IA_CSS_ISYS_LINK_PDAF_OUTPUT + 1) + +/* + * enum ia_css_data_barrier_link_id. Lists the link IDs used by the FW for data barrier feature + */ +typedef enum ia_css_data_barrier_link_id { + IA_CSS_DATA_BARRIER_LINK_MEMORY = N_IA_CSS_ISYS_LINK_ID, + N_IA_CSS_DATA_BARRIER_LINK_ID +} ia_css_data_barrier_link_id_t; + +#endif /* #ifndef PIPE_GENERATION */ +#endif /* __IA_CSS_TERMINAL_DEFS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isys_fw_bridged_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isys_fw_bridged_types.h new file mode 100644 index 0000000000000..5e47fe7026bd7 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isys_fw_bridged_types.h @@ -0,0 +1,402 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYS_FW_BRIDGED_TYPES_H +#define __IA_CSS_ISYS_FW_BRIDGED_TYPES_H + +#include "platform_support.h" + +#include "ia_css_isysapi_fw_types.h" + +/** + * struct ia_css_isys_buffer_partition_comm - buffer partition information + * @num_gda_pages: Number of virtual gda pages available for each + * virtual stream + */ +struct ia_css_isys_buffer_partition_comm { + aligned_uint32(unsigned int, num_gda_pages[STREAM_ID_MAX]); +}; + +/** + * struct ia_css_isys_fw_config - contains the parts from + * ia_css_isys_device_cfg_data + * we need to transfer to the cell + * @num_send_queues: Number of send queues per queue + * type(N_IA_CSS_ISYS_QUEUE_TYPE) + * @num_recv_queues: Number of receive queues per queue + * type(N_IA_CSS_ISYS_QUEUE_TYPE) + */ +struct ia_css_isys_fw_config { + aligned_struct(struct ia_css_isys_buffer_partition_comm, + buffer_partition); + aligned_uint32(unsigned int, + num_send_queues[N_IA_CSS_ISYS_QUEUE_TYPE]); + aligned_uint32(unsigned int, + num_recv_queues[N_IA_CSS_ISYS_QUEUE_TYPE]); +}; + +/** + * struct ia_css_isys_resolution_comm: Generic resolution structure. + * @Width + * @Height + */ +struct ia_css_isys_resolution_comm { + aligned_uint32(unsigned int, width); + aligned_uint32(unsigned int, height); +}; + +/** + * struct ia_css_isys_output_pin_payload_comm + * @out_buf_id: Points to output pin buffer - buffer identifier + * @addr: Points to output pin buffer - CSS Virtual Address + * @compress: Request frame compression (1), or not (0) + * This must be the same as ia_css_isys_output_pin_info_comm::reserve_compression + */ +struct ia_css_isys_output_pin_payload_comm { + aligned_uint64(ia_css_return_token, out_buf_id); + aligned_uint32(ia_css_output_buffer_css_address, addr); + aligned_uint32(unsigned int, compress); +}; + +/** + * struct ia_css_isys_output_pin_info_comm + * @input_pin_id: input pin id/index which is source of + * the data for this output pin + * @output_res: output pin resolution + * @stride: output stride in Bytes (not valid for statistics) + * @watermark_in_lines: pin watermark level in lines + * @payload_buf_size: Size in Bytes of all buffers that will be supplied for capture + * on this pin (i.e. addressed by ia_css_isys_output_pin_payload::addr) + * @send_irq: assert if pin event should trigger irq + * @pt: pin type + * @ft: frame format type + * @link_id: identifies PPG to connect to, link_id = 0 implies offline + * while link_id > 0 implies buffer_chasing or online mode + * can be entered. + * @reserve_compression: Reserve compression resources for pin. + */ +struct ia_css_isys_output_pin_info_comm { + aligned_struct(struct ia_css_isys_resolution_comm, output_res); + aligned_uint32(unsigned int, stride); + aligned_uint32(unsigned int, watermark_in_lines); + aligned_uint32(unsigned int, payload_buf_size); + aligned_uint8(unsigned int, send_irq); + aligned_uint8(unsigned int, input_pin_id); + aligned_uint8(enum ia_css_isys_pin_type, pt); + aligned_uint8(enum ia_css_isys_frame_format_type, ft); + aligned_uint8(enum ia_css_isys_link_id, link_id); + aligned_uint8(unsigned int, reserve_compression); +}; + +/** + * struct ia_css_isys_param_pin_comm + * @param_buf_id: Points to param port buffer - buffer identifier + * @addr: Points to param pin buffer - CSS Virtual Address + */ +struct ia_css_isys_param_pin_comm { + aligned_uint64(ia_css_return_token, param_buf_id); + aligned_uint32(ia_css_input_buffer_css_address, addr); +}; + +/** + * struct ia_css_isys_input_pin_info_comm + * @input_res: input resolution + * @dt: mipi data type + * @mipi_store_mode: defines if legacy long packet header will be stored or + * hdiscarded if discarded, output pin pin type for this + * input pin can only be MIPI + * @bits_per_pix: native bits per pixel + * @dt_rename: mapped_dt + */ +struct ia_css_isys_input_pin_info_comm { + aligned_struct(struct ia_css_isys_resolution_comm, input_res); + aligned_uint8(enum ia_css_isys_mipi_data_type, dt); + aligned_uint8(enum ia_css_isys_mipi_store_mode, mipi_store_mode); + aligned_uint8(unsigned int, bits_per_pix); + aligned_uint8(unsigned int, mapped_dt); +}; + +/** + * ISA configuration fields, definition and macros + */ +#define ISA_CFG_FIELD_BLC_EN_LEN 1 +#define ISA_CFG_FIELD_BLC_EN_SHIFT 0 + +#define ISA_CFG_FIELD_LSC_EN_LEN 1 +#define ISA_CFG_FIELD_LSC_EN_SHIFT 1 + +#define ISA_CFG_FIELD_DPC_EN_LEN 1 +#define ISA_CFG_FIELD_DPC_EN_SHIFT 2 + +#define ISA_CFG_FIELD_DOWNSCALER_EN_LEN 1 +#define ISA_CFG_FIELD_DOWNSCALER_EN_SHIFT 3 + +#define ISA_CFG_FIELD_AWB_EN_LEN 1 +#define ISA_CFG_FIELD_AWB_EN_SHIFT 4 + +#define ISA_CFG_FIELD_AF_EN_LEN 1 +#define ISA_CFG_FIELD_AF_EN_SHIFT 5 + +#define ISA_CFG_FIELD_AE_EN_LEN 1 +#define ISA_CFG_FIELD_AE_EN_SHIFT 6 + +#define ISA_CFG_FIELD_PAF_TYPE_LEN 8 +#define ISA_CFG_FIELD_PAF_TYPE_SHIFT 7 + +#define ISA_CFG_FIELD_SEND_IRQ_STATS_READY_LEN 1 +#define ISA_CFG_FIELD_SEND_IRQ_STATS_READY_SHIFT 15 + +#define ISA_CFG_FIELD_SEND_RESP_STATS_READY_LEN 1 +#define ISA_CFG_FIELD_SEND_RESP_STATS_READY_SHIFT 16 + +/* Helper macros */ +#define ISA_CFG_GET_MASK_FROM_LEN(len) ((1 << (len)) - 1) +#define ISA_CFG_GET_MASK_FROM_TAG(tag) \ + (ISA_CFG_GET_MASK_FROM_LEN(ISA_CFG_FIELD_##tag##_LEN)) +#define ISA_CFG_GET_SHIFT_FROM_TAG(tag) \ + (ISA_CFG_FIELD_##tag##_SHIFT) +/* Get/Set macros */ +#define ISA_CFG_FIELD_GET(tag, word) \ + ( \ + ((word) >> (ISA_CFG_GET_SHIFT_FROM_TAG(tag))) &\ + ISA_CFG_GET_MASK_FROM_TAG(tag) \ + ) +#define ISA_CFG_FIELD_SET(tag, word, value) \ + word |= ( \ + ((value) & ISA_CFG_GET_MASK_FROM_TAG(tag)) << \ + ISA_CFG_GET_SHIFT_FROM_TAG(tag) \ + ) + +/** + * struct ia_css_isys_isa_cfg_comm. Describes the ISA cfg + */ +struct ia_css_isys_isa_cfg_comm { + aligned_struct(struct ia_css_isys_resolution_comm, + isa_res[N_IA_CSS_ISYS_RESOLUTION_INFO]); + aligned_uint32(/* multi-field packing */, cfg_fields); +}; + + /** + * struct ia_css_isys_cropping_comm - cropping coordinates + */ +struct ia_css_isys_cropping_comm { + aligned_int32(int, top_offset); + aligned_int32(int, left_offset); + aligned_int32(int, bottom_offset); + aligned_int32(int, right_offset); +}; + + /** + * struct ia_css_isys_stream_cfg_data_comm + * ISYS stream configuration data structure + * @isa_cfg: details about what ACCs are active if ISA is used + * @crop: defines cropping resolution for the + * maximum number of input pins which can be cropped, + * it is directly mapped to the HW devices + * @input_pins: input pin descriptors + * @output_pins: output pin descriptors + * @compfmt: de-compression setting for User Defined Data + * @nof_input_pins: number of input pins + * @nof_output_pins: number of output pins + * @send_irq_sof_discarded: send irq on discarded frame sof response + * - if '1' it will override the send_resp_sof_discarded and send + * the response + * - if '0' the send_resp_sof_discarded will determine whether to + * send the response + * @send_irq_eof_discarded: send irq on discarded frame eof response + * - if '1' it will override the send_resp_eof_discarded and send + * the response + * - if '0' the send_resp_eof_discarded will determine whether to + * send the response + * @send_resp_sof_discarded: send response for discarded frame sof detected, + * used only when send_irq_sof_discarded is '0' + * @send_resp_eof_discarded: send response for discarded frame eof detected, + * used only when send_irq_eof_discarded is '0' + * @src: Stream source index e.g. MIPI_generator_0, CSI2-rx_1 + * @vc: MIPI Virtual Channel (up to 4 virtual per physical channel) + * @isl_use: indicates whether stream requires ISL and how + */ +struct ia_css_isys_stream_cfg_data_comm { + aligned_struct(struct ia_css_isys_isa_cfg_comm, isa_cfg); + aligned_struct(struct ia_css_isys_cropping_comm, + crop[N_IA_CSS_ISYS_CROPPING_LOCATION]); + aligned_struct(struct ia_css_isys_input_pin_info_comm, + input_pins[MAX_IPINS]); + aligned_struct(struct ia_css_isys_output_pin_info_comm, + output_pins[MAX_OPINS]); + aligned_uint32(unsigned int, compfmt); + aligned_uint8(unsigned int, nof_input_pins); + aligned_uint8(unsigned int, nof_output_pins); + aligned_uint8(unsigned int, send_irq_sof_discarded); + aligned_uint8(unsigned int, send_irq_eof_discarded); + aligned_uint8(unsigned int, send_resp_sof_discarded); + aligned_uint8(unsigned int, send_resp_eof_discarded); + aligned_uint8(enum ia_css_isys_stream_source, src); + aligned_uint8(enum ia_css_isys_mipi_vc, vc); + aligned_uint8(enum ia_css_isys_isl_use, isl_use); +}; + +/** + * struct ia_css_isys_frame_buff_set - frame buffer set + * @output_pins: output pin addresses + * @process_group_light: process_group_light buffer address + * @send_irq_sof: send irq on frame sof response + * - if '1' it will override the send_resp_sof and send the + * response + * - if '0' the send_resp_sof will determine whether to send the + * response + * @send_irq_eof: send irq on frame eof response + * - if '1' it will override the send_resp_eof and send the + * response + * - if '0' the send_resp_eof will determine whether to send the + * response + * @send_resp_sof: send response for frame sof detected, used only when + * send_irq_sof is '0' + * @send_resp_eof: send response for frame eof detected, used only when + * send_irq_eof is '0' + * @frame_counter: frame number associated with this buffer set. + */ +struct ia_css_isys_frame_buff_set_comm { + aligned_struct(struct ia_css_isys_output_pin_payload_comm, + output_pins[MAX_OPINS]); + aligned_struct(struct ia_css_isys_param_pin_comm, process_group_light); + aligned_uint8(unsigned int, send_irq_sof); + aligned_uint8(unsigned int, send_irq_eof); + aligned_uint8(unsigned int, send_irq_capture_ack); + aligned_uint8(unsigned int, send_irq_capture_done); + aligned_uint8(unsigned int, send_resp_sof); + aligned_uint8(unsigned int, send_resp_eof); + aligned_uint8(unsigned int, frame_counter); +}; + +/** + * struct ia_css_isys_error_info_comm + * @error: error code if something went wrong + * @error_details: depending on error code, it may contain additional + * error info + */ +struct ia_css_isys_error_info_comm { + aligned_enum(enum ia_css_isys_error, error); + aligned_uint32(unsigned int, error_details); +}; + +/** + * struct ia_css_isys_resp_info_comm + * @pin: this var is only valid for pin event related responses, + * contains pin addresses + * @process_group_light: this var is valid for stats ready related responses, + * contains process group addresses + * @error_info: error information from the FW + * @timestamp: Time information for event if available + * @stream_handle: stream id the response corresponds to + * @type: response type + * @pin_id: pin id that the pin payload corresponds to + * @acc_id: this var is valid for stats ready related responses, + * contains accelerator id that finished producing + * all related statistics + * @frame_counter: valid for STREAM_START_AND_CAPTURE_DONE, + * STREAM_CAPTURE_DONE and STREAM_CAPTURE_DISCARDED, + * @written_direct: indicates if frame was written direct (online mode) or not. + * + */ + +struct ia_css_isys_resp_info_comm { + aligned_uint64(ia_css_return_token, buf_id); /* Used internally only */ + aligned_struct(struct ia_css_isys_output_pin_payload_comm, pin); + aligned_struct(struct ia_css_isys_param_pin_comm, process_group_light); + aligned_struct(struct ia_css_isys_error_info_comm, error_info); + aligned_uint32(unsigned int, timestamp[2]); + aligned_uint8(unsigned int, stream_handle); + aligned_uint8(enum ia_css_isys_resp_type, type); + aligned_uint8(unsigned int, pin_id); + aligned_uint8(unsigned int, acc_id); + aligned_uint8(unsigned int, frame_counter); + aligned_uint8(unsigned int, written_direct); +}; + +/** + * struct ia_css_isys_proxy_error_info_comm + * @proxy_error: error code if something went wrong + * @proxy_error_details: depending on error code, it may contain additional + * error info + */ +struct ia_css_isys_proxy_error_info_comm { + aligned_enum(enum ia_css_proxy_error, error); + aligned_uint32(unsigned int, error_details); +}; + +/** + * struct ia_css_isys_proxy_resp_info_comm + * @request_id: Unique identifier for the write request + * (in case multiple write requests are issued for same register) + * @error_info: details in struct definition + */ +struct ia_css_isys_proxy_resp_info_comm { + aligned_uint32(uint32_t, request_id); + aligned_struct(struct ia_css_isys_proxy_error_info_comm, error_info); +}; + +/** + * struct ia_css_proxy_write_queue_token + * @request_id: update id for the specific proxy write request + * @region_index: Region id for the proxy write request + * @offset: Offset of the write request according to the base address of the + * region + * @value: Value that is requested to be written with the proxy write request + */ +struct ia_css_proxy_write_queue_token { + aligned_uint32(uint32_t, request_id); + aligned_uint32(uint32_t, region_index); + aligned_uint32(uint32_t, offset); + aligned_uint32(uint32_t, value); +}; + +/* From here on type defines not coming from the ISYSAPI interface */ + +/** + * struct resp_queue_token + */ +struct resp_queue_token { + aligned_struct(struct ia_css_isys_resp_info_comm, resp_info); +}; + +/** + * struct send_queue_token + */ +struct send_queue_token { + aligned_uint64(ia_css_return_token, buf_handle); + aligned_uint32(ia_css_input_buffer_css_address, payload); + aligned_uint16(enum ia_css_isys_send_type, send_type); + aligned_uint16(unsigned int, stream_id); +}; + +/** + * struct proxy_resp_queue_token + */ +struct proxy_resp_queue_token { + aligned_struct(struct ia_css_isys_proxy_resp_info_comm, + proxy_resp_info); +}; + +/** + * struct proxy_send_queue_token + */ +struct proxy_send_queue_token { + aligned_uint32(uint32_t, request_id); + aligned_uint32(uint32_t, region_index); + aligned_uint32(uint32_t, offset); + aligned_uint32(uint32_t, value); +}; + +#endif /* __IA_CSS_ISYS_FW_BRIDGED_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi.h new file mode 100644 index 0000000000000..abbc8b8d26ed8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi.h @@ -0,0 +1,321 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYSAPI_H +#define __IA_CSS_ISYSAPI_H + + +/* The following is needed for the function arguments */ +#include "ia_css_isysapi_types.h" + +/* To define the HANDLE */ +#include "type_support.h" + + +/** + * ia_css_isys_device_open() - configure ISYS device + * @ context : device handle output parameter + * @config: device configuration data struct ptr as input parameter, + * read only by css fw until function return + * Ownership, ISYS will only access read my_device during fct call + * Prepares and Sends to PG server (SP) the syscom and isys context + * Executes the host level 0 and 1 boot sequence and starts the PG server (SP) + * All streams must be stopped when calling ia_css_isys_device_open() + * + * Return: int type error code (errno.h) + */ +#if HAS_DUAL_CMD_CTX_SUPPORT +extern int ia_css_isys_context_create( + HANDLE * context, + const struct ia_css_isys_device_cfg_data *config +); +extern int ia_css_isys_context_store_dmem( + const HANDLE *context, + const struct ia_css_isys_device_cfg_data *config +); +extern bool ia_css_isys_ab_spc_ready( + HANDLE *context +); +extern int ia_css_isys_device_open( + const struct ia_css_isys_device_cfg_data *config +); +#else +extern int ia_css_isys_device_open( + HANDLE * context, + const struct ia_css_isys_device_cfg_data *config +); +#endif + +/** + * ia_css_isys_device_open_ready() - Complete ISYS device configuration + * @ context : device handle output parameter + * read only by css fw until function return + * Requires the boot failure to be completed before it can return + * successfully (includes syscom and isys context) + * Initialise Host/ISYS messaging queues + * Must be called multiple times until it succeeds or it is determined by + * the driver that the boot seuqence has failed. + * All streams must be stopped when calling ia_css_isys_device_open() + * + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_device_open_ready( + HANDLE context +); + + /** + * ia_css_isys_stream_open() - open and configure a virtual stream + * @ stream_handle: stream handle + * @ stream_cfg: stream configuration data struct pointer, which is + * "read only" by ISYS until function return + * ownership, ISYS will only read access stream_cfg during fct call + * Pre-conditions: + * Any Isys/Ssys interface changes must call ia_css_isys_stream_open() + * Post-condition: + * On successful call, ISYS hardware resource (IBFctrl, ISL, DMAs) + * are acquired and ISYS server is able to handle stream specific commands + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_stream_open( + HANDLE context, + const unsigned int stream_handle, + const struct ia_css_isys_stream_cfg_data *stream_cfg +); + +/** + * ia_css_isys_stream_close() - close virtual stream + * @ stream_handle: stream identifier + * release ISYS resources by freeing up stream HW resources + * output pin buffers ownership is returned to the driver + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_stream_close( + HANDLE context, + const unsigned int stream_handle +); + +/** + * ia_css_isys_stream_start() - starts handling a mipi virtual stream + * @ stream_handle: stream identifier + * @next_frame: + * if next_frame != NULL: apply next_frame + * settings asynchronously and start stream + * This mode ensures that the first frame is captured + * and thus a minimal start up latency + * (preconditions: sensor streaming must be switched off) + * + * if next_frame == NULL: sensor can be in a streaming state, + * all capture indicates commands will be + * processed synchronously (e.g. on mipi SOF events) + * + * To be called once ia_css_isys_stream_open() successly called + * On success, the stream's HW resources are in active state + * + * Object ownership: During this function call, + * next_frame struct must be read but not modified by the ISYS, + * and in addition the driver is not allowed to modify it + * on function exit next_frame ownership is returned to + * the driver and is no longer accesses by iSYS + * next_frame contains a collection of + * ia_css_isys_output_pin * and ia_css_isys_input_pin * + * which point to the frame's "output/input pin info & data buffers", + * + * Upon the ia_css_isys_stream_start() call, + * ia_css_isys_output_pin* or ia_css_isys_input_pin* + * will now be owned by the ISYS + * these ptr will enable runtime/dynamic ISYS configuration and also + * to store and write captured payload data + * at the address specified in ia_css_isys_output_pin_payload + * These ptrs should no longer be accessed by any other + * code until (ia_css_isys_output_pin) gets handed + * back to the driver via the response mechansim + * ia_css_isys_stream_handle_response() + * the driver is responsible for providing valid + * ia_css_isys_output_pin* or ia_css_isys_output_pin* + * Pointers set to NULL will simply not be used by the ISYS + * + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_stream_start( + HANDLE context, + const unsigned int stream_handle, + const struct ia_css_isys_frame_buff_set *next_frame +); + +/** + * ia_css_isys_stream_stop() - Stops a mipi virtual stream + * @ stream_handle: stream identifier + * stop both accepting new commands and processing + * submitted capture indication commands + * Support for Secure Touch + * Precondition: stream must be started + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_stream_stop( + HANDLE context, + const unsigned int stream_handle +); + +/** + * ia_css_isys_stream_flush() - stops a mipi virtual stream but + * completes processing cmd backlog + * @ stream_handle: stream identifier + * stop accepting commands, but process + * the already submitted capture indicates + * Precondition: stream must be started + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_stream_flush( + HANDLE context, + const unsigned int stream_handle +); + +/** + * ia_css_isys_stream_capture_indication() + * captures "next frame" on stream_handle + * @ stream_handle: stream identifier + * @ next_frame: frame pin payloads are provided atomically + * purpose: stream capture new frame command, Successfull calls will + * result in frame output pins being captured + * + * To be called once ia_css_isys_stream_start() is successly called + * On success, the stream's HW resources are in active state + * + * Object ownership: During this function call, + * next_frame struct must be read but not modified by the ISYS, + * and in addition the driver is not allowed to modify it + * on function exit next_frame ownership is returned to + * the driver and is no longer accesses by iSYS + * next_frame contains a collection of + * ia_css_isys_output_pin * and ia_css_isys_input_pin * + * which point to the frame's "output/input pin info & data buffers", + * + * Upon the ia_css_isys_stream_capture_indication() call, + * ia_css_isys_output_pin* or ia_css_isys_input_pin* + * will now be owned by the ISYS + * these ptr will enable runtime/dynamic ISYS configuration and also + * to store and write captured payload data + * at the address specified in ia_css_isys_output_pin_payload + * These ptrs should no longer be accessed by any other + * code until (ia_css_isys_output_pin) gets handed + * back to the driver via the response mechanism + * ia_css_isys_stream_handle_response() + * the driver is responsible for providing valid + * ia_css_isys_output_pin* or ia_css_isys_output_pin* + * Pointers set to NULL will simply not be used by the ISYS, and this + * refers specifically the following cases: + * - output pins from SOC path if the same datatype is also passed into ISAPF + * path or it has active MIPI output (not NULL) + * - full resolution pin from ISA (but not when bypassing ISA) + * - scaled pin from ISA (bypassing ISA for scaled pin is impossible) + * - output pins from MIPI path but only when the same datatype is also + * either forwarded to the ISAPF path based on the stream configuration + * (it is ok if the second output pin of this datatype is also skipped) + * or it has an active SOC output (not NULL) + * + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_stream_capture_indication( + HANDLE context, + const unsigned int stream_handle, + const struct ia_css_isys_frame_buff_set *next_frame +); + +/** + * ia_css_isys_stream_handle_response() - handle ISYS responses + * @received_response: provides response info from the + * "next response element" from ISYS server + * received_response will be written to during the fct call and + * can be read by the drv once fct is returned + * + * purpose: Allows the client to handle received ISYS responses + * Upon an IRQ event, the driver will call ia_css_isys_stream_handle_response() + * until the queue is emptied + * Responses returning IA_CSS_ISYS_RESP_TYPE_PIN_DATA_READY to the driver will + * hand back ia_css_isys_output_pin ownership to the drv + * ISYS FW will not write/read access ia_css_isys_output_pin + * once it belongs to the driver + * Pre-conditions: ISYS client must have sent a CMDs to ISYS srv + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_stream_handle_response( + HANDLE context, + struct ia_css_isys_resp_info *received_response +); + +/** + * ia_css_isys_device_close() - close ISYS device + * @context : device handle output parameter + * Purpose: Request for the cell to close + * All streams must be stopped when calling ia_css_isys_device_close() + * + * Return: int type error code (errno.h) + */ +#if HAS_DUAL_CMD_CTX_SUPPORT +extern int ia_css_isys_context_destroy( + HANDLE context +); +extern void ia_css_isys_device_close( + void +); +#else +extern int ia_css_isys_device_close( + HANDLE context +); +#endif + +/** + * ia_css_isys_device_release() - release ISYS device + * @context : device handle output parameter + * @force: forces release or verifies the state before releasing + * Purpose: Free context forcibly or not + * Must be called after ia_css_isys_device_close() + * + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_device_release( + HANDLE context, + unsigned int force +); + +/** + * ia_css_isys_proxy_write_req() - issue a isys proxy write request + * @context : device handle output parameter + * Purpose: Issues a write request for the regions that are exposed + * by proxy interface + * Can be called any time between ia_css_isys_device_open + * ia_css_isys_device_close + * + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_proxy_write_req( + HANDLE context, + const struct ia_css_proxy_write_req_val *write_req_val +); + +/** + * ia_css_isys_proxy_handle_write_response() + * - Handles isys proxy write request responses + * @context : device handle output parameter + * Purpose: Handling the responses that are created by FW upon the completion + * proxy interface write request + * + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_proxy_handle_write_response( + HANDLE context, + struct ia_css_proxy_write_req_resp *received_response +); + +#endif /* __IA_CSS_ISYSAPI_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_fw_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_fw_types.h new file mode 100644 index 0000000000000..938f726d1cfb8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_fw_types.h @@ -0,0 +1,512 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYSAPI_FW_TYPES_H +#define __IA_CSS_ISYSAPI_FW_TYPES_H + + +/* Max number of Input/Output Pins */ +#define MAX_IPINS (4) +/* worst case is ISA use where a single input pin produces: +* Mipi output, NS Pixel Output, and Scaled Pixel Output. +* This is how the 2 is calculated +*/ +#define MAX_OPINS ((MAX_IPINS) + 2) + +/* Max number of supported virtual streams */ +#define STREAM_ID_MAX (8) + +/* Aligned with the approach of having one dedicated per stream */ +#define N_MAX_MSG_SEND_QUEUES (STREAM_ID_MAX) +/* Single return queue for all streams/commands type */ +#define N_MAX_MSG_RECV_QUEUES (1) +/* Single device queue for high priority commands (bypass in-order queue) */ +#define N_MAX_DEV_SEND_QUEUES (1) +/* Single dedicated send queue for proxy interface */ +#define N_MAX_PROXY_SEND_QUEUES (1) +/* Single dedicated recv queue for proxy interface */ +#define N_MAX_PROXY_RECV_QUEUES (1) +/* Send queues layout */ +#define BASE_PROXY_SEND_QUEUES (0) +#define BASE_DEV_SEND_QUEUES (BASE_PROXY_SEND_QUEUES + N_MAX_PROXY_SEND_QUEUES) +#define BASE_MSG_SEND_QUEUES (BASE_DEV_SEND_QUEUES + N_MAX_DEV_SEND_QUEUES) +#define N_MAX_SEND_QUEUES (BASE_MSG_SEND_QUEUES + N_MAX_MSG_SEND_QUEUES) +/* Recv queues layout */ +#define BASE_PROXY_RECV_QUEUES (0) +#define BASE_MSG_RECV_QUEUES (BASE_PROXY_RECV_QUEUES + N_MAX_PROXY_RECV_QUEUES) +#define N_MAX_RECV_QUEUES (BASE_MSG_RECV_QUEUES + N_MAX_MSG_RECV_QUEUES) + +#define MAX_QUEUE_SIZE (256) +#define MIN_QUEUE_SIZE (1) + +/* Consider 1 slot per stream since driver is not expected to pipeline + * device commands for the same stream */ +#define DEV_SEND_QUEUE_SIZE (STREAM_ID_MAX) + +/* Max number of supported SRAM buffer partitions */ +/* It refers to the size of stream partitions */ +/* These partitions are further subpartitioned internally */ +/* by the FW, but by declaring statically the stream */ +/* partitions we solve the buffer fragmentation issue */ +#define NOF_SRAM_BLOCKS_MAX (STREAM_ID_MAX) + +/* Max number of supported input pins routed in ISL */ +#define MAX_IPINS_IN_ISL (2) + +/* Max number of planes for frame formats supported by the FW */ +#define PIN_PLANES_MAX (4) + +/** + * enum ia_css_isys_resp_type + */ +enum ia_css_isys_resp_type { + IA_CSS_ISYS_RESP_TYPE_STREAM_OPEN_DONE = 0, + IA_CSS_ISYS_RESP_TYPE_STREAM_START_ACK, + IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK, + IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK, + IA_CSS_ISYS_RESP_TYPE_STREAM_STOP_ACK, + IA_CSS_ISYS_RESP_TYPE_STREAM_FLUSH_ACK, + IA_CSS_ISYS_RESP_TYPE_STREAM_CLOSE_ACK, + IA_CSS_ISYS_RESP_TYPE_PIN_DATA_READY, + IA_CSS_ISYS_RESP_TYPE_PIN_DATA_WATERMARK, + IA_CSS_ISYS_RESP_TYPE_FRAME_SOF, + IA_CSS_ISYS_RESP_TYPE_FRAME_EOF, + IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE, + IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE, + IA_CSS_ISYS_RESP_TYPE_PIN_DATA_SKIPPED, + IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_SKIPPED, + IA_CSS_ISYS_RESP_TYPE_FRAME_SOF_DISCARDED, + IA_CSS_ISYS_RESP_TYPE_FRAME_EOF_DISCARDED, + IA_CSS_ISYS_RESP_TYPE_STATS_DATA_READY, + N_IA_CSS_ISYS_RESP_TYPE +}; + +/** + * enum ia_css_isys_send_type + */ +enum ia_css_isys_send_type { + IA_CSS_ISYS_SEND_TYPE_STREAM_OPEN = 0, + IA_CSS_ISYS_SEND_TYPE_STREAM_START, + IA_CSS_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE, + IA_CSS_ISYS_SEND_TYPE_STREAM_CAPTURE, + IA_CSS_ISYS_SEND_TYPE_STREAM_STOP, + IA_CSS_ISYS_SEND_TYPE_STREAM_FLUSH, + IA_CSS_ISYS_SEND_TYPE_STREAM_CLOSE, + N_IA_CSS_ISYS_SEND_TYPE +}; + +/** + * enum ia_css_isys_queue_type + */ +enum ia_css_isys_queue_type { + IA_CSS_ISYS_QUEUE_TYPE_PROXY = 0, + IA_CSS_ISYS_QUEUE_TYPE_DEV, + IA_CSS_ISYS_QUEUE_TYPE_MSG, + N_IA_CSS_ISYS_QUEUE_TYPE +}; + +/** + * enum ia_css_isys_stream_source: Specifies a source for a stream + */ +enum ia_css_isys_stream_source { + IA_CSS_ISYS_STREAM_SRC_PORT_0 = 0, + IA_CSS_ISYS_STREAM_SRC_PORT_1, + IA_CSS_ISYS_STREAM_SRC_PORT_2, + IA_CSS_ISYS_STREAM_SRC_PORT_3, + IA_CSS_ISYS_STREAM_SRC_PORT_4, + IA_CSS_ISYS_STREAM_SRC_PORT_5, + IA_CSS_ISYS_STREAM_SRC_PORT_6, + IA_CSS_ISYS_STREAM_SRC_PORT_7, + IA_CSS_ISYS_STREAM_SRC_PORT_8, + IA_CSS_ISYS_STREAM_SRC_PORT_9, + IA_CSS_ISYS_STREAM_SRC_PORT_10, + IA_CSS_ISYS_STREAM_SRC_PORT_11, + IA_CSS_ISYS_STREAM_SRC_PORT_12, + IA_CSS_ISYS_STREAM_SRC_PORT_13, + IA_CSS_ISYS_STREAM_SRC_PORT_14, + IA_CSS_ISYS_STREAM_SRC_PORT_15, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_0, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_1, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_2, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_3, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_4, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_5, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_6, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_7, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_8, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_9, + N_IA_CSS_ISYS_STREAM_SRC +}; + +#define IA_CSS_ISYS_STREAM_SRC_CSI2_PORT0 IA_CSS_ISYS_STREAM_SRC_PORT_0 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_PORT1 IA_CSS_ISYS_STREAM_SRC_PORT_1 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_PORT2 IA_CSS_ISYS_STREAM_SRC_PORT_2 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_PORT3 IA_CSS_ISYS_STREAM_SRC_PORT_3 + +#define IA_CSS_ISYS_STREAM_SRC_CSI2_3PH_PORTA IA_CSS_ISYS_STREAM_SRC_PORT_4 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_3PH_PORTB IA_CSS_ISYS_STREAM_SRC_PORT_5 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT0 IA_CSS_ISYS_STREAM_SRC_PORT_6 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT1 IA_CSS_ISYS_STREAM_SRC_PORT_7 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT2 IA_CSS_ISYS_STREAM_SRC_PORT_8 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT3 IA_CSS_ISYS_STREAM_SRC_PORT_9 + +#define IA_CSS_ISYS_STREAM_SRC_MIPIGEN_PORT0 IA_CSS_ISYS_STREAM_SRC_MIPIGEN_0 +#define IA_CSS_ISYS_STREAM_SRC_MIPIGEN_PORT1 IA_CSS_ISYS_STREAM_SRC_MIPIGEN_1 + +/** + * enum ia_css_isys_mipi_vc: MIPI csi2 spec + * supports upto 4 virtual per physical channel + */ +enum ia_css_isys_mipi_vc { + IA_CSS_ISYS_MIPI_VC_0 = 0, + IA_CSS_ISYS_MIPI_VC_1, + IA_CSS_ISYS_MIPI_VC_2, + IA_CSS_ISYS_MIPI_VC_3, + N_IA_CSS_ISYS_MIPI_VC +}; + +/** + * Supported Pixel Frame formats. Expandable if needed + */ +enum ia_css_isys_frame_format_type { + IA_CSS_ISYS_FRAME_FORMAT_NV11 = 0,/* 12 bit YUV 411, Y, UV plane */ + IA_CSS_ISYS_FRAME_FORMAT_NV12,/* 12 bit YUV 420, Y, UV plane */ + IA_CSS_ISYS_FRAME_FORMAT_NV12_16,/* 16 bit YUV 420, Y, UV plane */ + IA_CSS_ISYS_FRAME_FORMAT_NV12_TILEY,/* 12 bit YUV 420, Intel + proprietary tiled format, + TileY + */ + IA_CSS_ISYS_FRAME_FORMAT_NV16,/* 16 bit YUV 422, Y, UV plane */ + IA_CSS_ISYS_FRAME_FORMAT_NV21,/* 12 bit YUV 420, Y, VU plane */ + IA_CSS_ISYS_FRAME_FORMAT_NV61,/* 16 bit YUV 422, Y, VU plane */ + IA_CSS_ISYS_FRAME_FORMAT_YV12,/* 12 bit YUV 420, Y, V, U plane */ + IA_CSS_ISYS_FRAME_FORMAT_YV16,/* 16 bit YUV 422, Y, V, U plane */ + IA_CSS_ISYS_FRAME_FORMAT_YUV420,/* 12 bit YUV 420, Y, U, V plane */ + IA_CSS_ISYS_FRAME_FORMAT_YUV420_10,/* yuv420, 10 bits per subpixel */ + IA_CSS_ISYS_FRAME_FORMAT_YUV420_12,/* yuv420, 12 bits per subpixel */ + IA_CSS_ISYS_FRAME_FORMAT_YUV420_14,/* yuv420, 14 bits per subpixel */ + IA_CSS_ISYS_FRAME_FORMAT_YUV420_16,/* yuv420, 16 bits per subpixel */ + IA_CSS_ISYS_FRAME_FORMAT_YUV422,/* 16 bit YUV 422, Y, U, V plane */ + IA_CSS_ISYS_FRAME_FORMAT_YUV422_16,/* yuv422, 16 bits per subpixel */ + IA_CSS_ISYS_FRAME_FORMAT_UYVY,/* 16 bit YUV 422, UYVY interleaved */ + IA_CSS_ISYS_FRAME_FORMAT_YUYV,/* 16 bit YUV 422, YUYV interleaved */ + IA_CSS_ISYS_FRAME_FORMAT_YUV444,/* 24 bit YUV 444, Y, U, V plane */ + IA_CSS_ISYS_FRAME_FORMAT_YUV_LINE,/* Internal format, 2 y lines + followed by a uvinterleaved line + */ + IA_CSS_ISYS_FRAME_FORMAT_RAW8, /* RAW8, 1 plane */ + IA_CSS_ISYS_FRAME_FORMAT_RAW10, /* RAW10, 1 plane */ + IA_CSS_ISYS_FRAME_FORMAT_RAW12, /* RAW12, 1 plane */ + IA_CSS_ISYS_FRAME_FORMAT_RAW14, /* RAW14, 1 plane */ + IA_CSS_ISYS_FRAME_FORMAT_RAW16, /* RAW16, 1 plane */ + IA_CSS_ISYS_FRAME_FORMAT_RGB565,/* 16 bit RGB, 1 plane. Each 3 sub + pixels are packed into one 16 bit + value, 5 bits for R, 6 bits for G + and 5 bits for B. + */ + IA_CSS_ISYS_FRAME_FORMAT_PLANAR_RGB888, /* 24 bit RGB, 3 planes */ + IA_CSS_ISYS_FRAME_FORMAT_RGBA888,/* 32 bit RGBA, 1 plane, + A=Alpha (alpha is unused) + */ + IA_CSS_ISYS_FRAME_FORMAT_QPLANE6,/* Internal, for advanced ISP */ + IA_CSS_ISYS_FRAME_FORMAT_BINARY_8,/* byte stream, used for jpeg. */ + N_IA_CSS_ISYS_FRAME_FORMAT +}; +/* Temporary for driver compatibility */ +#define IA_CSS_ISYS_FRAME_FORMAT_RAW (IA_CSS_ISYS_FRAME_FORMAT_RAW16) + + +/** + * Supported MIPI data type. Keep in sync array in ia_css_isys_private.c + */ +enum ia_css_isys_mipi_data_type { + /** SYNCHRONIZATION SHORT PACKET DATA TYPES */ + IA_CSS_ISYS_MIPI_DATA_TYPE_FRAME_START_CODE = 0x00, + IA_CSS_ISYS_MIPI_DATA_TYPE_FRAME_END_CODE = 0x01, + IA_CSS_ISYS_MIPI_DATA_TYPE_LINE_START_CODE = 0x02, /* Optional */ + IA_CSS_ISYS_MIPI_DATA_TYPE_LINE_END_CODE = 0x03, /* Optional */ + /** Reserved 0x04-0x07 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x04 = 0x04, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x05 = 0x05, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x06 = 0x06, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x07 = 0x07, + /** GENERIC SHORT PACKET DATA TYPES */ + /** They are used to keep the timing information for the + * opening/closing of shutters, triggering of flashes and etc. + */ + /* Generic Short Packet Code 1 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT1 = 0x08, + /* Generic Short Packet Code 2 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT2 = 0x09, + /* Generic Short Packet Code 3 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT3 = 0x0A, + /* Generic Short Packet Code 4 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT4 = 0x0B, + /* Generic Short Packet Code 5 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT5 = 0x0C, + /* Generic Short Packet Code 6 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT6 = 0x0D, + /* Generic Short Packet Code 7 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT7 = 0x0E, + /* Generic Short Packet Code 8 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT8 = 0x0F, + /** GENERIC LONG PACKET DATA TYPES */ + IA_CSS_ISYS_MIPI_DATA_TYPE_NULL = 0x10, + IA_CSS_ISYS_MIPI_DATA_TYPE_BLANKING_DATA = 0x11, + /* Embedded 8-bit non Image Data */ + IA_CSS_ISYS_MIPI_DATA_TYPE_EMBEDDED = 0x12, + /** Reserved 0x13-0x17 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x13 = 0x13, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x14 = 0x14, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x15 = 0x15, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x16 = 0x16, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x17 = 0x17, + /** YUV DATA TYPES */ + /* 8 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_YUV420_8 = 0x18, + /* 10 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_YUV420_10 = 0x19, + /* 8 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_YUV420_8_LEGACY = 0x1A, + /** Reserved 0x1B */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x1B = 0x1B, + /* YUV420 8-bit (Chroma Shifted Pixel Sampling) */ + IA_CSS_ISYS_MIPI_DATA_TYPE_YUV420_8_SHIFT = 0x1C, + /* YUV420 10-bit (Chroma Shifted Pixel Sampling) */ + IA_CSS_ISYS_MIPI_DATA_TYPE_YUV420_10_SHIFT = 0x1D, + /* UYVY..UVYV, 8 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_YUV422_8 = 0x1E, + /* UYVY..UVYV, 10 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_YUV422_10 = 0x1F, + /** RGB DATA TYPES */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RGB_444 = 0x20, + /* BGR..BGR, 5 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RGB_555 = 0x21, + /* BGR..BGR, 5 bits B and R, 6 bits G */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RGB_565 = 0x22, + /* BGR..BGR, 6 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RGB_666 = 0x23, + /* BGR..BGR, 8 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RGB_888 = 0x24, + /** Reserved 0x25-0x27 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x25 = 0x25, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x26 = 0x26, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x27 = 0x27, + /** RAW DATA TYPES */ + /* RAW data, 6 bits per pixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RAW_6 = 0x28, + /* RAW data, 7 bits per pixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RAW_7 = 0x29, + /* RAW data, 8 bits per pixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RAW_8 = 0x2A, + /* RAW data, 10 bits per pixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RAW_10 = 0x2B, + /* RAW data, 12 bits per pixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RAW_12 = 0x2C, + /* RAW data, 14 bits per pixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RAW_14 = 0x2D, + /** Reserved 0x2E-2F are used with assigned meaning */ + /* RAW data, 16 bits per pixel, not specified in CSI-MIPI standard */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RAW_16 = 0x2E, + /* Binary byte stream, which is target at JPEG, not specified in + * CSI-MIPI standard + */ + IA_CSS_ISYS_MIPI_DATA_TYPE_BINARY_8 = 0x2F, + /** USER DEFINED 8-BIT DATA TYPES */ + /** For example, the data transmitter (e.g. the SoC sensor) can keep + * the JPEG data as the User Defined Data Type 4 and the MPEG data as + * the User Defined Data Type 7. + */ + /* User defined 8-bit data type 1 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF1 = 0x30, + /* User defined 8-bit data type 2 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF2 = 0x31, + /* User defined 8-bit data type 3 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF3 = 0x32, + /* User defined 8-bit data type 4 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF4 = 0x33, + /* User defined 8-bit data type 5 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF5 = 0x34, + /* User defined 8-bit data type 6 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF6 = 0x35, + /* User defined 8-bit data type 7 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF7 = 0x36, + /* User defined 8-bit data type 8 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF8 = 0x37, + /** Reserved 0x38-0x3F */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x38 = 0x38, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x39 = 0x39, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x3A = 0x3A, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x3B = 0x3B, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x3C = 0x3C, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x3D = 0x3D, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x3E = 0x3E, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x3F = 0x3F, + + /* Keep always last and max value */ + N_IA_CSS_ISYS_MIPI_DATA_TYPE = 0x40 +}; + +/** enum ia_css_isys_pin_type: output pin buffer types. + * Buffers can be queued and de-queued to hand them over between IA and ISYS + */ +enum ia_css_isys_pin_type { + /* Captured as MIPI packets */ + IA_CSS_ISYS_PIN_TYPE_MIPI = 0, + /* Captured through the ISApf (with/without ISA) + * and the non-scaled output path + */ + IA_CSS_ISYS_PIN_TYPE_RAW_NS, + /* Captured through the ISApf + ISA and the scaled output path */ + IA_CSS_ISYS_PIN_TYPE_RAW_S, + /* Captured through the SoC path */ + IA_CSS_ISYS_PIN_TYPE_RAW_SOC, + /* Reserved for future use, maybe short packets */ + IA_CSS_ISYS_PIN_TYPE_METADATA_0, + /* Reserved for future use */ + IA_CSS_ISYS_PIN_TYPE_METADATA_1, + /* Legacy (non-PIV2), used for the AWB stats */ + IA_CSS_ISYS_PIN_TYPE_AWB_STATS, + /* Legacy (non-PIV2), used for the AF stats */ + IA_CSS_ISYS_PIN_TYPE_AF_STATS, + /* Legacy (non-PIV2), used for the AE stats */ + IA_CSS_ISYS_PIN_TYPE_HIST_STATS, + /* Used for the PAF FF*/ + IA_CSS_ISYS_PIN_TYPE_PAF_FF, + /* Keep always last and max value */ + N_IA_CSS_ISYS_PIN_TYPE +}; + +/** + * enum ia_css_isys_isl_use. Describes the ISL/ISA use + * (ISAPF path in after BXT A0) + */ +enum ia_css_isys_isl_use { + IA_CSS_ISYS_USE_NO_ISL_NO_ISA = 0, + IA_CSS_ISYS_USE_SINGLE_DUAL_ISL, + IA_CSS_ISYS_USE_SINGLE_ISA, + N_IA_CSS_ISYS_USE +}; + +/** + * enum ia_css_isys_mipi_store_mode. Describes if long MIPI packets reach MIPI + * SRAM with the long packet header or not. + * if not, then only option is to capture it with pin type MIPI. + */ +enum ia_css_isys_mipi_store_mode { + IA_CSS_ISYS_MIPI_STORE_MODE_NORMAL = 0, + IA_CSS_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER, + N_IA_CSS_ISYS_MIPI_STORE_MODE +}; + +/** + * enum ia_css_isys_mipi_dt_rename_mode. Describes if long MIPI packets have + * DT with some other DT format. + */ +enum ia_css_isys_mipi_dt_rename_mode { + IA_CSS_ISYS_MIPI_DT_NO_RENAME = 0, + IA_CSS_ISYS_MIPI_DT_RENAMED_MODE, + N_IA_CSS_ISYS_MIPI_DT_MODE +}; + +/** + * enum ia_css_isys_type_paf. Describes the Type of PAF enabled + * (PAF path in after cnlB0) + */ +enum ia_css_isys_type_paf { + /* PAF data not present */ + IA_CSS_ISYS_TYPE_NO_PAF = 0, + /* Type 2 sensor types, PAF coming separately from Image Frame */ + /* PAF data in interleaved format(RLRL or LRLR)*/ + IA_CSS_ISYS_TYPE_INTERLEAVED_PAF, + /* PAF data in non-interleaved format(LL/RR or RR/LL) */ + IA_CSS_ISYS_TYPE_NON_INTERLEAVED_PAF, + /* Type 3 sensor types , PAF data embedded in Image Frame*/ + /* Frame Embedded PAF in interleaved format(RLRL or LRLR)*/ + IA_CSS_ISYS_TYPE_FRAME_EMB_INTERLEAVED_PAF, + /* Frame Embedded PAF non-interleaved format(LL/RR or RR/LL)*/ + IA_CSS_ISYS_TYPE_FRAME_EMB_NON_INTERLEAVED_PAF, + N_IA_CSS_ISYS_TYPE_PAF +}; + +/** + * enum ia_css_isys_cropping_location. Enumerates the cropping locations + * in ISYS + */ +enum ia_css_isys_cropping_location { + /* Cropping executed in ISAPF (mainly), ISAPF preproc (odd column) and + * MIPI STR2MMIO (odd row) + */ + IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA = 0, + /* BXT A0 legacy mode which will never be implemented */ + IA_CSS_ISYS_CROPPING_LOCATION_RESERVED_1, + /* Cropping executed in StreamPifConv in the ISA output for + * RAW_NS pin + */ + IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_NONSCALED, + /* Cropping executed in StreamScaledPifConv in the ISA output for + * RAW_S pin + */ + IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_SCALED, + N_IA_CSS_ISYS_CROPPING_LOCATION +}; + +/** + * enum ia_css_isys_resolution_info. Describes the resolution, required to + * setup the various ISA GP registers. + */ +enum ia_css_isys_resolution_info { + /* Scaled ISA output resolution before the + * StreamScaledPifConv cropping + */ + IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_NONSCALED = 0, + /* Non-Scaled ISA output resolution before the + * StreamPifConv cropping + */ + IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_SCALED, + N_IA_CSS_ISYS_RESOLUTION_INFO +}; + +/** + * enum ia_css_isys_error. Describes the error type detected by the FW + */ +enum ia_css_isys_error { + IA_CSS_ISYS_ERROR_NONE = 0, /* No details */ + IA_CSS_ISYS_ERROR_FW_INTERNAL_CONSISTENCY, /* enum */ + IA_CSS_ISYS_ERROR_HW_CONSISTENCY, /* enum */ + IA_CSS_ISYS_ERROR_DRIVER_INVALID_COMMAND_SEQUENCE, /* enum */ + IA_CSS_ISYS_ERROR_DRIVER_INVALID_DEVICE_CONFIGURATION, /* enum */ + IA_CSS_ISYS_ERROR_DRIVER_INVALID_STREAM_CONFIGURATION, /* enum */ + IA_CSS_ISYS_ERROR_DRIVER_INVALID_FRAME_CONFIGURATION, /* enum */ + IA_CSS_ISYS_ERROR_INSUFFICIENT_RESOURCES, /* enum */ + IA_CSS_ISYS_ERROR_HW_REPORTED_STR2MMIO, /* HW code */ + IA_CSS_ISYS_ERROR_HW_REPORTED_SIG2CIO, /* HW code */ + IA_CSS_ISYS_ERROR_SENSOR_FW_SYNC, /* enum */ + IA_CSS_ISYS_ERROR_STREAM_IN_SUSPENSION, /* FW code */ + IA_CSS_ISYS_ERROR_RESPONSE_QUEUE_FULL, /* FW code */ + N_IA_CSS_ISYS_ERROR +}; + +/** + * enum ia_css_proxy_error. Describes the error type for the proxy detected by + * the FW + */ +enum ia_css_proxy_error { + IA_CSS_PROXY_ERROR_NONE = 0, + IA_CSS_PROXY_ERROR_INVALID_WRITE_REGION, + IA_CSS_PROXY_ERROR_INVALID_WRITE_OFFSET, + N_IA_CSS_PROXY_ERROR +}; + +#endif /* __IA_CSS_ISYSAPI_FW_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_fw_version.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_fw_version.h new file mode 100644 index 0000000000000..bc056157cedb6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_fw_version.h @@ -0,0 +1,21 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYSAPI_FW_VERSION_H +#define __IA_CSS_ISYSAPI_FW_VERSION_H + +/* ISYSAPI FW VERSION is taken from Makefile for FW tests */ +#define BXT_FW_RELEASE_VERSION ISYS_FIRMWARE_VERSION + +#endif /* __IA_CSS_ISYSAPI_FW_VERSION_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_proxy_region_defs.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_proxy_region_defs.h new file mode 100644 index 0000000000000..c002b33bdfaf0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_proxy_region_defs.h @@ -0,0 +1,113 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYSAPI_PROXY_REGION_DEFS_H +#define __IA_CSS_ISYSAPI_PROXY_REGION_DEFS_H + +#include "ia_css_isysapi_proxy_region_types.h" + +/* + * Definitions for IPU4_B0_PROXY_INT + */ + +#if defined(IPU4_B0_PROXY_INT) + +/** + * enum ipu4_b0_ia_css_proxy_write_region. Provides the list of regions for ipu4B0 that + * can be accessed (for writing purpose) through the proxy interface + */ +enum ipu4_b0_ia_css_proxy_write_region { + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_0_ERROR_FILL_RATE = 0, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_1_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_2_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_3_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_4_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_5_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_6_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_7_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_8_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_9_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_GDA_IRQ_URGENT_THRESHOLD, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_GDA_IRQ_CRITICAL_THRESHOLD, + N_IPU4_B0_IA_CSS_PROXY_WRITE_REGION +}; + +struct ia_css_proxy_write_region_description ipu4_b0_reg_write_desc[N_IPU4_B0_IA_CSS_PROXY_WRITE_REGION] = { + /* base_addr, offset */ + {0x64128, /*input_system_csi2_logic_s2m_a_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_0_ERROR_FILL_RATE*/ + {0x65128, /*input_system_csi2_logic_s2m_b_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_1_ERROR_FILL_RATE*/ + {0x66128, /*input_system_csi2_logic_s2m_c_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_2_ERROR_FILL_RATE*/ + {0x67128, /*input_system_csi2_logic_s2m_d_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_3_ERROR_FILL_RATE*/ + {0x6C128, /*input_system_csi2_3ph_logic_s2m_a_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_4_ERROR_FILL_RATE*/ + {0x6C928, /*input_system_csi2_3ph_logic_s2m_b_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_5_ERROR_FILL_RATE*/ + {0x6D128, /*input_system_csi2_3ph_logic_s2m_0_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_6_ERROR_FILL_RATE*/ + {0x6D928, /*input_system_csi2_3ph_logic_s2m_1_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_7_ERROR_FILL_RATE*/ + {0x6E128, /*input_system_csi2_3ph_logic_s2m_2_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_8_ERROR_FILL_RATE*/ + {0x6E928, /*input_system_csi2_3ph_logic_s2m_3_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_9_ERROR_FILL_RATE*/ + {0x7800C, /*input_system_unis_logic_gda_irq_urgent_threshold*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_GDA_IRQ_URGENT_THRESHOLD*/ + {0x78010, /*input_system_unis_logic_gda_irq_critical_threshold*/ 4} /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_GDA_IRQ_CRITICAL_THRESHOLD*/ +}; + +#endif /*defined(IPU4_B0_PROXY_INT)*/ + +/* + * Definitions for IPU4P_A0_PROXY_INT + */ + +#if defined(IPU4P_A0_PROXY_INT) + +/** + * enum ipu4p_a0_ia_css_proxy_write_region. Provides the list of regions for ipu4pA0 that + * can be accessed (for writing purpose) through the proxy interface + */ +enum ipu4p_a0_ia_css_proxy_write_region { + N_IPU4P_A0_IA_CSS_PROXY_WRITE_REGION +}; + +#define IPU4P_A0_NO_PROXY_WRITE_REGION_AVAILABLE + +#ifndef IPU4P_A0_NO_PROXY_WRITE_REGION_AVAILABLE +struct ia_css_proxy_write_region_description ipu4p_a0_reg_write_desc[N_IPU4P_A0_IA_CSS_PROXY_WRITE_REGION] = { +} +#endif /*IPU4P_A0_NO_PROXY_WRITE_REGION_AVAILABLE*/ + +#endif /*defined(IPU4P_A0_PROXY_INT)*/ + +/* + * Definitions for IPU4P_B0_PROXY_INT + */ + +#if defined(IPU4P_B0_PROXY_INT) + +/** + * enum ipu4p_b0_ia_css_proxy_write_region. Provides the list of regions for ipu4pB0 that + * can be accessed (for writing purpose) through the proxy interface + */ +enum ipu4p_b0_ia_css_proxy_write_region { + IPU4P_B0_IA_CSS_PROXY_WRITE_REGION_GDA_IWAKE_THRESHOLD = 0, + IPU4P_B0_IA_CSS_PROXY_WRITE_REGION_GDA_ENABLE_IWAKE, + N_IPU4P_B0_IA_CSS_PROXY_WRITE_REGION +}; + +struct ia_css_proxy_write_region_description ipu4p_b0_reg_write_desc[N_IPU4P_B0_IA_CSS_PROXY_WRITE_REGION] = { + /* base_addr, max_offset */ + /*input_system_unis_logic_gda_iwake_threshold*/ + {0x78014, 4}, /*IPU4P_B0_IA_CSS_PROXY_WRITE_REGION_GDA_IWAKE_THRESHOLD*/ + /*input_system_unis_logic_gda_enable_iwake*/ + {0x7801C, 4} /*IPU4P_B0_IA_CSS_PROXY_WRITE_REGION_GDA_ENABLE_IWAKE*/ +}; + +#endif /*defined(IPU4P_B0_PROXY_INT)*/ + +#endif /* __IA_CSS_ISYSAPI_PROXY_REGION_DEFS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_proxy_region_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_proxy_region_types.h new file mode 100644 index 0000000000000..045f089e5a4c8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_proxy_region_types.h @@ -0,0 +1,24 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYSAPI_PROXY_REGION_TYPES_H +#define __IA_CSS_ISYSAPI_PROXY_REGION_TYPES_H + + +struct ia_css_proxy_write_region_description { + uint32_t base_addr; + uint32_t offset; +}; + +#endif /* __IA_CSS_ISYSAPI_PROXY_REGION_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_types.h new file mode 100644 index 0000000000000..481a7dc7b4813 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_types.h @@ -0,0 +1,349 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYSAPI_TYPES_H +#define __IA_CSS_ISYSAPI_TYPES_H + +#include "ia_css_isysapi_fw_types.h" +#include "type_support.h" + +#include "ia_css_return_token.h" +#include "ia_css_output_buffer.h" +#include "ia_css_input_buffer.h" +#include "ia_css_terminal_defs.h" + +/** + * struct ia_css_isys_buffer_partition - buffer partition information + * @num_gda_pages: Number of virtual gda pages available for each virtual stream + */ +struct ia_css_isys_buffer_partition { + unsigned int num_gda_pages[STREAM_ID_MAX]; +}; + +/** + * This should contain the driver specified info for sys + */ +struct ia_css_driver_sys_config { + unsigned int ssid; + unsigned int mmid; + unsigned int num_send_queues; /* # of MSG send queues */ + unsigned int num_recv_queues; /* # of MSG recv queues */ + unsigned int send_queue_size; /* max # tokens per queue */ + unsigned int recv_queue_size; /* max # tokens per queue */ + + unsigned int icache_prefetch; /* enable prefetching for SPC */ +}; + +/** + * This should contain the driver specified info for proxy write queues + */ +struct ia_css_driver_proxy_config { + /* max # tokens per PROXY send/recv queue. + * Proxy queues are used for write access purpose + */ + unsigned int proxy_write_queue_size; +}; + + /** + * struct ia_css_isys_device_cfg_data - ISYS device configuration data + * @driver_sys + * @buffer_partition: Information required for the virtual SRAM + * space partition of the streams. + * @driver_proxy + * @secure: Driver needs to set 'secure' to indicate the intention + * when invoking ia_css_isys_context_create() in + * HAS_DUAL_CMD_CTX_SUPPORT case. If 'true', it's for + * secure case. + */ +struct ia_css_isys_device_cfg_data { + struct ia_css_driver_sys_config driver_sys; + struct ia_css_isys_buffer_partition buffer_partition; + struct ia_css_driver_proxy_config driver_proxy; + bool secure; + unsigned vtl0_addr_mask; /* only applicable in 'secure' case */ +}; + +/** + * struct ia_css_isys_resolution: Generic resolution structure. + * @Width + * @Height + */ +struct ia_css_isys_resolution { + unsigned int width; + unsigned int height; +}; + +/** + * struct ia_css_isys_output_pin_payload + * @out_buf_id: Points to output pin buffer - buffer identifier + * @addr: Points to output pin buffer - CSS Virtual Address + * @compressed: Request frame compression (1), or not (0) + * This must be the same as ia_css_isys_output_pin_info::reserve_compression + */ +struct ia_css_isys_output_pin_payload { + ia_css_return_token out_buf_id; + ia_css_output_buffer_css_address addr; + unsigned int compress; +}; + +/** + * struct ia_css_isys_output_pin_info + * @input_pin_id: input pin id/index which is source of + * the data for this output pin + * @output_res: output pin resolution + * @stride: output stride in Bytes (not valid for statistics) + * @pt: pin type + * @ft: frame format type + * @watermark_in_lines: pin watermark level in lines + * @send_irq: assert if pin event should trigger irq + * @link_id: identifies PPG to connect to, link_id = 0 implies offline + * while link_id > 0 implies buffer_chasing or online mode + * can be entered. + * @reserve_compression: Reserve compression resources for pin. + * @payload_buf_size: Minimum size in Bytes of all buffers that will be supplied for capture + * on this pin (i.e. addressed by ia_css_isys_output_pin_payload::addr) + */ +struct ia_css_isys_output_pin_info { + unsigned int input_pin_id; + struct ia_css_isys_resolution output_res; + unsigned int stride; + enum ia_css_isys_pin_type pt; + enum ia_css_isys_frame_format_type ft; + unsigned int watermark_in_lines; + unsigned int send_irq; + enum ia_css_isys_link_id link_id; + unsigned int reserve_compression; + unsigned int payload_buf_size; +}; + +/** + * struct ia_css_isys_param_pin + * @param_buf_id: Points to param buffer - buffer identifier + * @addr: Points to param buffer - CSS Virtual Address + */ +struct ia_css_isys_param_pin { + ia_css_return_token param_buf_id; + ia_css_input_buffer_css_address addr; +}; + +/** + * struct ia_css_isys_input_pin_info + * @input_res: input resolution + * @dt: mipi data type + * @mipi_store_mode: defines if legacy long packet header will be stored or + * discarded if discarded, output pin pin type for this + * input pin can only be MIPI + * @dt_rename_mode: defines if MIPI data is encapsulated in some other + * data type + * @mapped_dt: Encapsulating in mipi data type(what sensor sends) + */ +struct ia_css_isys_input_pin_info { + struct ia_css_isys_resolution input_res; + enum ia_css_isys_mipi_data_type dt; + enum ia_css_isys_mipi_store_mode mipi_store_mode; + enum ia_css_isys_mipi_dt_rename_mode dt_rename_mode; + enum ia_css_isys_mipi_data_type mapped_dt; +}; + +/** + * struct ia_css_isys_isa_cfg. Describes the ISA cfg + */ +struct ia_css_isys_isa_cfg { + /* Following sets resolution information neeed by the IS GP registers, + * For index IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_NONSCALED, + * it is needed when there is RAW_NS pin + * For index IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_SCALED, + * it is needed when there is RAW_S pin + */ + struct ia_css_isys_resolution isa_res[N_IA_CSS_ISYS_RESOLUTION_INFO]; + /* acc id 0, set if process required */ + unsigned int blc_enabled; + /* acc id 1, set if process required */ + unsigned int lsc_enabled; + /* acc id 2, set if process required */ + unsigned int dpc_enabled; + /* acc id 3, set if process required */ + unsigned int downscaler_enabled; + /* acc id 4, set if process required */ + unsigned int awb_enabled; + /* acc id 5, set if process required */ + unsigned int af_enabled; + /* acc id 6, set if process required */ + unsigned int ae_enabled; + /* acc id 7, disabled, or type of paf enabled*/ + enum ia_css_isys_type_paf paf_type; + /* Send irq for any statistics buffers which got completed */ + unsigned int send_irq_stats_ready; + /* Send response for any statistics buffers which got completed */ + unsigned int send_resp_stats_ready; +}; + +/** + * struct ia_css_isys_cropping - cropping coordinates + * Left/Top offsets are INCLUDED + * Right/Bottom offsets are EXCLUDED + * Horizontal: [left_offset,right_offset) + * Vertical: [top_offset,bottom_offset) + * Padding is supported + */ +struct ia_css_isys_cropping { + int top_offset; + int left_offset; + int bottom_offset; + int right_offset; +}; + + /** + * struct ia_css_isys_stream_cfg_data + * ISYS stream configuration data structure + * @src: Stream source index e.g. MIPI_generator_0, CSI2-rx_1 + * @vc: MIPI Virtual Channel (up to 4 virtual per physical channel) + * @isl_use: indicates whether stream requires ISL and how + * @compfmt: de-compression setting for User Defined Data + * @isa_cfg: details about what ACCs are active if ISA is used + * @crop: defines cropping resolution for the + * maximum number of input pins which can be cropped, + * it is directly mapped to the HW devices + * @send_irq_sof_discarded: send irq on discarded frame sof response + * - if '1' it will override the send_resp_sof_discarded and send + * the response + * - if '0' the send_resp_sof_discarded will determine whether to + * send the response + * @send_irq_eof_discarded: send irq on discarded frame eof response + * - if '1' it will override the send_resp_eof_discarded and send + * the response + * - if '0' the send_resp_eof_discarded will determine whether to + * send the response + * @send_resp_sof_discarded: send response for discarded frame sof detected, + * used only when send_irq_sof_discarded is '0' + * @send_resp_eof_discarded: send response for discarded frame eof detected, + * used only when send_irq_eof_discarded is '0' + * @the rest: input/output pin descriptors + */ +struct ia_css_isys_stream_cfg_data { + enum ia_css_isys_stream_source src; + enum ia_css_isys_mipi_vc vc; + enum ia_css_isys_isl_use isl_use; + unsigned int compfmt; + struct ia_css_isys_isa_cfg isa_cfg; + struct ia_css_isys_cropping crop[N_IA_CSS_ISYS_CROPPING_LOCATION]; + unsigned int send_irq_sof_discarded; + unsigned int send_irq_eof_discarded; + unsigned int send_resp_sof_discarded; + unsigned int send_resp_eof_discarded; + unsigned int nof_input_pins; + unsigned int nof_output_pins; + struct ia_css_isys_input_pin_info input_pins[MAX_IPINS]; + struct ia_css_isys_output_pin_info output_pins[MAX_OPINS]; +}; + +/** + * struct ia_css_isys_frame_buff_set - frame buffer set + * @output_pins: output pin addresses + * @process_group_light: process_group_light buffer address + * @send_irq_sof: send irq on frame sof response + * - if '1' it will override the send_resp_sof and send + * the response + * - if '0' the send_resp_sof will determine whether to send + * the response + * @send_irq_eof: send irq on frame eof response + * - if '1' it will override the send_resp_eof and send + * the response + * - if '0' the send_resp_eof will determine whether to send + * the response + * @send_resp_sof: send response for frame sof detected, + * used only when send_irq_sof is '0' + * @send_resp_eof: send response for frame eof detected, + * used only when send_irq_eof is '0' + * @frame_counter: frame number associated with this buffer set. + */ +struct ia_css_isys_frame_buff_set { + struct ia_css_isys_output_pin_payload output_pins[MAX_OPINS]; + struct ia_css_isys_param_pin process_group_light; + unsigned int send_irq_sof; + unsigned int send_irq_eof; + unsigned int send_irq_capture_ack; + unsigned int send_irq_capture_done; + unsigned int send_resp_sof; + unsigned int send_resp_eof; + uint8_t frame_counter; +}; + +/** + * struct ia_css_isys_resp_info + * @type: response type + * @stream_handle: stream id the response corresponds to + * @timestamp: Time information for event if available + * @error: error code if something went wrong + * @error_details: depending on error code, it may contain additional + * error info + * @pin: this var is valid for pin event related responses, + * contains pin addresses + * @pin_id: this var is valid for pin event related responses, + * contains pin id that the pin payload corresponds to + * @process_group_light: this var is valid for stats ready related responses, + * contains process group addresses + * @acc_id: this var is valid for stats ready related responses, + * contains accelerator id that finished producing + * all related statistics + * @frame_counter: valid for STREAM_START_AND_CAPTURE_DONE, + * STREAM_CAPTURE_DONE and STREAM_CAPTURE_DISCARDED + * @written_direct: indicates if frame was written direct (online mode) or to DDR. + */ +struct ia_css_isys_resp_info { + enum ia_css_isys_resp_type type; + unsigned int stream_handle; + unsigned int timestamp[2]; + enum ia_css_isys_error error; + unsigned int error_details; + struct ia_css_isys_output_pin_payload pin; + unsigned int pin_id; + struct ia_css_isys_param_pin process_group_light; + unsigned int acc_id; + uint8_t frame_counter; + uint8_t written_direct; +}; + +/** + * struct ia_css_proxy_write_req_val + * @request_id: Unique identifier for the write request + * (in case multiple write requests are issued for same register) + * @region_index: region id for the write request + * @offset: Offset to the specific register within the region + * @value: Value to be written to register + */ +struct ia_css_proxy_write_req_val { + uint32_t request_id; + uint32_t region_index; + uint32_t offset; + uint32_t value; +}; + +/** + * struct ia_css_proxy_write_req_resp + * @request_id: Unique identifier for the write request + * (in case multiple write requests are issued for same register) + * @error: error code if something went wrong + * @error_details: error detail includes either offset or region index + * information which caused proxy request to be rejected + * (invalid access request) + */ +struct ia_css_proxy_write_req_resp { + uint32_t request_id; + enum ia_css_proxy_error error; + uint32_t error_details; +}; + + +#endif /* __IA_CSS_ISYSAPI_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/isysapi.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/isysapi.mk new file mode 100644 index 0000000000000..0d06298f9acb0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/isysapi.mk @@ -0,0 +1,77 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is ISYSAPI + +include $(MODULES_DIR)/config/isys/subsystem_$(IPU_SYSVER).mk + +ISYSAPI_DIR=$${MODULES_DIR}/isysapi + +ISYSAPI_INTERFACE=$(ISYSAPI_DIR)/interface +ISYSAPI_SOURCES=$(ISYSAPI_DIR)/src +ISYSAPI_EXTINCLUDE=$${MODULES_DIR}/support +ISYSAPI_EXTINTERFACE=$${MODULES_DIR}/syscom/interface + +ISYSAPI_HOST_FILES += $(ISYSAPI_SOURCES)/ia_css_isys_public.c + +ISYSAPI_HOST_FILES += $(ISYSAPI_SOURCES)/ia_css_isys_private.c + +# ISYSAPI Trace Log Level = ISYSAPI_TRACE_LOG_LEVEL_NORMAL +# Other options are [ISYSAPI_TRACE_LOG_LEVEL_OFF, ISYSAPI_TRACE_LOG_LEVEL_DEBUG] +ifndef ISYSAPI_TRACE_CONFIG_HOST + ISYSAPI_TRACE_CONFIG_HOST=ISYSAPI_TRACE_LOG_LEVEL_NORMAL +endif +ifndef ISYSAPI_TRACE_CONFIG_FW + ISYSAPI_TRACE_CONFIG_FW=ISYSAPI_TRACE_LOG_LEVEL_NORMAL +endif + +ISYSAPI_HOST_CPPFLAGS += -DISYSAPI_TRACE_CONFIG=$(ISYSAPI_TRACE_CONFIG_HOST) +ISYSAPI_FW_CPPFLAGS += -DISYSAPI_TRACE_CONFIG=$(ISYSAPI_TRACE_CONFIG_FW) + +ISYSAPI_HOST_FILES += $(ISYSAPI_SOURCES)/ia_css_isys_public_trace.c + +ISYSAPI_HOST_CPPFLAGS += -I$(ISYSAPI_INTERFACE) +ISYSAPI_HOST_CPPFLAGS += -I$(ISYSAPI_EXTINCLUDE) +ISYSAPI_HOST_CPPFLAGS += -I$(ISYSAPI_EXTINTERFACE) +ISYSAPI_HOST_CPPFLAGS += -I$(HIVESDK)/systems/ipu_system/dai/include +ISYSAPI_HOST_CPPFLAGS += -I$(HIVESDK)/systems/ipu_system/dai/include/default_system +ISYSAPI_HOST_CPPFLAGS += -I$(HIVESDK)/include/ipu/dai +ISYSAPI_HOST_CPPFLAGS += -I$(HIVESDK)/include/ipu + +ISYSAPI_FW_FILES += $(ISYSAPI_SOURCES)/isys_fw.c +ISYSAPI_FW_FILES += $(ISYSAPI_SOURCES)/isys_fw_utils.c + +ISYSAPI_FW_CPPFLAGS += -I$(ISYSAPI_INTERFACE) +ISYSAPI_FW_CPPFLAGS += -I$(ISYSAPI_SOURCES)/$(IPU_SYSVER) +ISYSAPI_FW_CPPFLAGS += -I$(ISYSAPI_EXTINCLUDE) +ISYSAPI_FW_CPPFLAGS += -I$(ISYSAPI_EXTINTERFACE) +ISYSAPI_FW_CPPFLAGS += -I$(HIVESDK)/systems/ipu_system/dai/include +ISYSAPI_FW_CPPFLAGS += -I$(HIVESDK)/systems/ipu_system/dai/include/default_system +ISYSAPI_FW_CPPFLAGS += -I$(HIVESDK)/include/ipu/dai +ISYSAPI_FW_CPPFLAGS += -I$(HIVESDK)/include/ipu + +ISYSAPI_FW_CPPFLAGS += -DWA_HSD1805168877=$(WA_HSD1805168877) + +ISYSAPI_HOST_CPPFLAGS += -DREGMEM_OFFSET=$(REGMEM_OFFSET) + +ifeq ($(ISYS_HAS_DUAL_CMD_CTX_SUPPORT), 1) +ISYSAPI_HOST_CPPFLAGS += -DHAS_DUAL_CMD_CTX_SUPPORT=$(ISYS_HAS_DUAL_CMD_CTX_SUPPORT) +ISYSAPI_FW_CPPFLAGS += -DHAS_DUAL_CMD_CTX_SUPPORT=$(ISYS_HAS_DUAL_CMD_CTX_SUPPORT) +endif + +ifdef AB_CONFIG_ARRAY_SIZE +ISYSAPI_FW_CPPFLAGS += -DAB_CONFIG_ARRAY_SIZE=$(AB_CONFIG_ARRAY_SIZE) +else +ISYSAPI_FW_CPPFLAGS += -DAB_CONFIG_ARRAY_SIZE=1 +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_private.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_private.c new file mode 100644 index 0000000000000..8297a1ff2d1be --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_private.c @@ -0,0 +1,980 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_isys_private.h" +/* The following is needed for the contained data types */ +#include "ia_css_isys_fw_bridged_types.h" +#include "ia_css_isysapi_types.h" +#include "ia_css_syscom_config.h" +/* + * The following header file is needed for the + * stddef.h (NULL), + * limits.h (CHAR_BIT definition). + */ +#include "type_support.h" +#include "error_support.h" +#include "ia_css_isysapi_trace.h" +#include "misc_support.h" +#include "cpu_mem_support.h" +#include "storage_class.h" + +#include "ia_css_shared_buffer_cpu.h" + +/* + * defines how many stream cfg host may sent concurrently + * before receiving the stream ack + */ +#define STREAM_CFG_BUFS_PER_MSG_QUEUE (1) +#define NEXT_FRAME_BUFS_PER_MSG_QUEUE \ + (ctx->send_queue_size[IA_CSS_ISYS_QUEUE_TYPE_MSG] + 4 + 1) +/* + * There is an edge case that host has filled the full queue + * with capture requests (ctx->send_queue_size), + * SP reads and HW-queues all of them (4), + * while in the meantime host continues queueing capture requests + * without checking for responses which SP will have sent with each HW-queue + * capture request (if it does then the 4 is much more improbable to appear, + * but still not impossible). + * After this, host tries to queue an extra capture request + * even though there is no space in the msg queue because msg queue + * is checked at a later point, so +1 is needed + */ + +/* + * A DT is supported assuming when the MIPI packets + * have the same size even when even/odd lines are different, + * and the size is the average per line + */ +#define IA_CSS_UNSUPPORTED_DATA_TYPE (0) +static const uint32_t +ia_css_isys_extracted_bits_per_pixel_per_mipi_data_type[ + N_IA_CSS_ISYS_MIPI_DATA_TYPE] = { + /* + * Remove Prefix "IA_CSS_ISYS_MIPI_DATA_TYPE_" in comments + * to align with Checkpatch 80 characters requirements + * For detailed comments of each field, please refer to + * definition of enum ia_css_isys_mipi_data_type{} in + * isysapi/interface/ia_css_isysapi_fw_types.h + */ + 64, /* [0x00] FRAME_START_CODE */ + 64, /* [0x01] FRAME_END_CODE */ + 64, /* [0x02] LINE_START_CODE Optional */ + 64, /* [0x03] LINE_END_CODE Optional */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x04] RESERVED_0x04 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x05] RESERVED_0x05 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x06] RESERVED_0x06 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x07] RESERVED_0x07 */ + 64, /* [0x08] GENERIC_SHORT1 */ + 64, /* [0x09] GENERIC_SHORT2 */ + 64, /* [0x0A] GENERIC_SHORT3 */ + 64, /* [0x0B] GENERIC_SHORT4 */ + 64, /* [0x0C] GENERIC_SHORT5 */ + 64, /* [0x0D] GENERIC_SHORT6 */ + 64, /* [0x0E] GENERIC_SHORT7 */ + 64, /* [0x0F] GENERIC_SHORT8 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x10] NULL To be ignored */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x11] BLANKING_DATA To be ignored */ + 8, /* [0x12] EMBEDDED non Image Data */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x13] RESERVED_0x13 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x14] RESERVED_0x14 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x15] RESERVED_0x15 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x16] RESERVED_0x16 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x17] RESERVED_0x17 */ + 12, /* [0x18] YUV420_8 */ + 15, /* [0x19] YUV420_10 */ + 12, /* [0x1A] YUV420_8_LEGACY */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x1B] RESERVED_0x1B */ + 12, /* [0x1C] YUV420_8_SHIFT */ + 15, /* [0x1D] YUV420_10_SHIFT */ + 16, /* [0x1E] YUV422_8 */ + 20, /* [0x1F] YUV422_10 */ + 16, /* [0x20] RGB_444 */ + 16, /* [0x21] RGB_555 */ + 16, /* [0x22] RGB_565 */ + 18, /* [0x23] RGB_666 */ + 24, /* [0x24] RGB_888 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x25] RESERVED_0x25 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x26] RESERVED_0x26 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x27] RESERVED_0x27 */ + 6, /* [0x28] RAW_6 */ + 7, /* [0x29] RAW_7 */ + 8, /* [0x2A] RAW_8 */ + 10, /* [0x2B] RAW_10 */ + 12, /* [0x2C] RAW_12 */ + 14, /* [0x2D] RAW_14 */ + 16, /* [0x2E] RAW_16 */ + 8, /* [0x2F] BINARY_8 */ + 8, /* [0x30] USER_DEF1 */ + 8, /* [0x31] USER_DEF2 */ + 8, /* [0x32] USER_DEF3 */ + 8, /* [0x33] USER_DEF4 */ + 8, /* [0x34] USER_DEF5 */ + 8, /* [0x35] USER_DEF6 */ + 8, /* [0x36] USER_DEF7 */ + 8, /* [0x37] USER_DEF8 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x38] RESERVED_0x38 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x39] RESERVED_0x39 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x3A] RESERVED_0x3A */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x3B] RESERVED_0x3B */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x3C] RESERVED_0x3C */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x3D] RESERVED_0x3D */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x3E] RESERVED_0x3E */ + IA_CSS_UNSUPPORTED_DATA_TYPE /* [0x3F] RESERVED_0x3F */ +}; + +STORAGE_CLASS_INLINE int get_stream_cfg_buff_slot( + struct ia_css_isys_context *ctx, + int stream_handle, + int stream_cfg_buff_counter) +{ + NOT_USED(ctx); + return (stream_handle * STREAM_CFG_BUFS_PER_MSG_QUEUE) + + stream_cfg_buff_counter; +} + +STORAGE_CLASS_INLINE int get_next_frame_buff_slot( + struct ia_css_isys_context *ctx, + int stream_handle, + int next_frame_buff_counter) +{ + NOT_USED(ctx); + return (stream_handle * NEXT_FRAME_BUFS_PER_MSG_QUEUE) + + next_frame_buff_counter; +} + +STORAGE_CLASS_INLINE void free_comm_buff_shared_mem( + struct ia_css_isys_context *ctx, + int stream_handle, + int stream_cfg_buff_counter, + int next_frame_buff_counter) +{ + int buff_slot; + + /* Initialiser is the current value of stream_handle */ + for (; stream_handle >= 0; stream_handle--) { + /* + * Initialiser is the current value of stream_cfg_buff_counter + */ + for (; stream_cfg_buff_counter >= 0; + stream_cfg_buff_counter--) { + buff_slot = get_stream_cfg_buff_slot( + ctx, stream_handle, stream_cfg_buff_counter); + ia_css_shared_buffer_free( + ctx->ssid, ctx->mmid, + ctx->isys_comm_buffer_queue. + pstream_cfg_buff_id[buff_slot]); + } + /* Set for the next iteration */ + stream_cfg_buff_counter = STREAM_CFG_BUFS_PER_MSG_QUEUE - 1; + /* + * Initialiser is the current value of next_frame_buff_counter + */ + for (; next_frame_buff_counter >= 0; + next_frame_buff_counter--) { + buff_slot = get_next_frame_buff_slot( + ctx, stream_handle, next_frame_buff_counter); + ia_css_shared_buffer_free( + ctx->ssid, ctx->mmid, + ctx->isys_comm_buffer_queue. + pnext_frame_buff_id[buff_slot]); + } + next_frame_buff_counter = NEXT_FRAME_BUFS_PER_MSG_QUEUE - 1; + } +} + +/* + * ia_css_isys_constr_comm_buff_queue() + */ +int ia_css_isys_constr_comm_buff_queue( + struct ia_css_isys_context *ctx) +{ + int stream_handle; + int stream_cfg_buff_counter; + int next_frame_buff_counter; + int buff_slot; + + verifret(ctx, EFAULT); /* Host Consistency */ + + ctx->isys_comm_buffer_queue.pstream_cfg_buff_id = + (ia_css_shared_buffer *) + ia_css_cpu_mem_alloc(ctx-> + num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG] * + STREAM_CFG_BUFS_PER_MSG_QUEUE * + sizeof(ia_css_shared_buffer)); + verifret(ctx->isys_comm_buffer_queue.pstream_cfg_buff_id != NULL, + EFAULT); + + ctx->isys_comm_buffer_queue.pnext_frame_buff_id = + (ia_css_shared_buffer *) + ia_css_cpu_mem_alloc(ctx-> + num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG] * + NEXT_FRAME_BUFS_PER_MSG_QUEUE * + sizeof(ia_css_shared_buffer)); + if (ctx->isys_comm_buffer_queue.pnext_frame_buff_id == NULL) { + ia_css_cpu_mem_free( + ctx->isys_comm_buffer_queue.pstream_cfg_buff_id); + verifret(0, EFAULT); /* return EFAULT; equivalent */ + } + + for (stream_handle = 0; stream_handle < + (int)ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG]; + stream_handle++) { + /* Initialisation needs to happen here for both loops */ + stream_cfg_buff_counter = 0; + next_frame_buff_counter = 0; + + for (; stream_cfg_buff_counter < STREAM_CFG_BUFS_PER_MSG_QUEUE; + stream_cfg_buff_counter++) { + buff_slot = get_stream_cfg_buff_slot( + ctx, stream_handle, stream_cfg_buff_counter); + ctx->isys_comm_buffer_queue. + pstream_cfg_buff_id[buff_slot] = + ia_css_shared_buffer_alloc( + ctx->ssid, ctx->mmid, + sizeof(struct + ia_css_isys_stream_cfg_data_comm)); + if (ctx->isys_comm_buffer_queue.pstream_cfg_buff_id[ + buff_slot] == 0) { + goto SHARED_BUFF_ALLOC_FAILURE; + } + } + ctx->isys_comm_buffer_queue. + stream_cfg_queue_head[stream_handle] = 0; + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle] = 0; + for (; next_frame_buff_counter < + (int)NEXT_FRAME_BUFS_PER_MSG_QUEUE; + next_frame_buff_counter++) { + buff_slot = get_next_frame_buff_slot( + ctx, stream_handle, + next_frame_buff_counter); + ctx->isys_comm_buffer_queue. + pnext_frame_buff_id[buff_slot] = + ia_css_shared_buffer_alloc( + ctx->ssid, ctx->mmid, + sizeof(struct + ia_css_isys_frame_buff_set_comm)); + if (ctx->isys_comm_buffer_queue. + pnext_frame_buff_id[buff_slot] == 0) { + goto SHARED_BUFF_ALLOC_FAILURE; + } + } + ctx->isys_comm_buffer_queue. + next_frame_queue_head[stream_handle] = 0; + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle] = 0; + } + + return 0; + +SHARED_BUFF_ALLOC_FAILURE: + /* stream_handle has correct value for calling the free function */ + /* prepare stream_cfg_buff_counter for calling the free function */ + stream_cfg_buff_counter--; + /* prepare next_frame_buff_counter for calling the free function */ + next_frame_buff_counter--; + free_comm_buff_shared_mem( + ctx, + stream_handle, + stream_cfg_buff_counter, + next_frame_buff_counter); + + verifret(0, EFAULT); /* return EFAULT; equivalent */ +} + +/* + * ia_css_isys_force_unmap_comm_buff_queue() + */ +int ia_css_isys_force_unmap_comm_buff_queue( + struct ia_css_isys_context *ctx) +{ + int stream_handle; + int buff_slot; + + verifret(ctx, EFAULT); /* Host Consistency */ + + IA_CSS_TRACE_0(ISYSAPI, WARNING, + "ia_css_isys_force_unmap_comm_buff_queue() called\n"); + for (stream_handle = 0; stream_handle < + (int)ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG]; + stream_handle++) { + /* Host-FW Consistency */ + verifret((ctx->isys_comm_buffer_queue. + stream_cfg_queue_head[stream_handle] - + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle]) <= + STREAM_CFG_BUFS_PER_MSG_QUEUE, EPROTO); + for (; ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle] < + ctx->isys_comm_buffer_queue. + stream_cfg_queue_head[stream_handle]; + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle]++) { + IA_CSS_TRACE_1(ISYSAPI, WARNING, + "CSS forced unmapping stream_cfg %d\n", + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle]); + buff_slot = get_stream_cfg_buff_slot( + ctx, stream_handle, + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle] % + STREAM_CFG_BUFS_PER_MSG_QUEUE); + ia_css_shared_buffer_css_unmap( + ctx->isys_comm_buffer_queue. + pstream_cfg_buff_id[buff_slot]); + } + /* Host-FW Consistency */ + verifret((ctx->isys_comm_buffer_queue. + next_frame_queue_head[stream_handle] - + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle]) <= + NEXT_FRAME_BUFS_PER_MSG_QUEUE, EPROTO); + for (; ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle] < + ctx->isys_comm_buffer_queue. + next_frame_queue_head[stream_handle]; + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle]++) { + IA_CSS_TRACE_1(ISYSAPI, WARNING, + "CSS forced unmapping next_frame %d\n", + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle]); + buff_slot = get_next_frame_buff_slot( + ctx, stream_handle, + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle] % + NEXT_FRAME_BUFS_PER_MSG_QUEUE); + ia_css_shared_buffer_css_unmap( + ctx->isys_comm_buffer_queue. + pnext_frame_buff_id[buff_slot]); + } + } + + return 0; +} + +/* + * ia_css_isys_destr_comm_buff_queue() + */ +int ia_css_isys_destr_comm_buff_queue( + struct ia_css_isys_context *ctx) +{ + verifret(ctx, EFAULT); /* Host Consistency */ + + free_comm_buff_shared_mem( + ctx, + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG] - 1, + STREAM_CFG_BUFS_PER_MSG_QUEUE - 1, + NEXT_FRAME_BUFS_PER_MSG_QUEUE - 1); + + ia_css_cpu_mem_free(ctx->isys_comm_buffer_queue.pnext_frame_buff_id); + ia_css_cpu_mem_free(ctx->isys_comm_buffer_queue.pstream_cfg_buff_id); + + return 0; +} + +STORAGE_CLASS_INLINE void resolution_host_to_css( + const struct ia_css_isys_resolution *resolution_host, + struct ia_css_isys_resolution_comm *resolution_css) +{ + resolution_css->width = resolution_host->width; + resolution_css->height = resolution_host->height; +} + +STORAGE_CLASS_INLINE void output_pin_payload_host_to_css( + const struct ia_css_isys_output_pin_payload *output_pin_payload_host, + struct ia_css_isys_output_pin_payload_comm *output_pin_payload_css) +{ + output_pin_payload_css->out_buf_id = + output_pin_payload_host->out_buf_id; + output_pin_payload_css->addr = output_pin_payload_host->addr; +#ifdef ENABLE_DEC400 + output_pin_payload_css->compress = output_pin_payload_host->compress; +#else + output_pin_payload_css->compress = 0; +#endif /* ENABLE_DEC400 */ +} + +STORAGE_CLASS_INLINE void output_pin_info_host_to_css( + const struct ia_css_isys_output_pin_info *output_pin_info_host, + struct ia_css_isys_output_pin_info_comm *output_pin_info_css) +{ + output_pin_info_css->input_pin_id = output_pin_info_host->input_pin_id; + resolution_host_to_css( + &output_pin_info_host->output_res, + &output_pin_info_css->output_res); + output_pin_info_css->stride = output_pin_info_host->stride; + output_pin_info_css->pt = output_pin_info_host->pt; + output_pin_info_css->watermark_in_lines = + output_pin_info_host->watermark_in_lines; + output_pin_info_css->send_irq = output_pin_info_host->send_irq; + output_pin_info_css->ft = output_pin_info_host->ft; + output_pin_info_css->link_id = output_pin_info_host->link_id; +#ifdef ENABLE_DEC400 + output_pin_info_css->reserve_compression = output_pin_info_host->reserve_compression; + output_pin_info_css->payload_buf_size = output_pin_info_host->payload_buf_size; +#else + output_pin_info_css->reserve_compression = 0; + /* Though payload_buf_size was added for compression, set sane value for + * payload_buf_size, just in case... + */ + output_pin_info_css->payload_buf_size = + output_pin_info_host->stride * output_pin_info_host->output_res.height; +#endif /* ENABLE_DEC400 */ +} + +STORAGE_CLASS_INLINE void param_pin_host_to_css( + const struct ia_css_isys_param_pin *param_pin_host, + struct ia_css_isys_param_pin_comm *param_pin_css) +{ + param_pin_css->param_buf_id = param_pin_host->param_buf_id; + param_pin_css->addr = param_pin_host->addr; +} + +STORAGE_CLASS_INLINE void input_pin_info_host_to_css( + const struct ia_css_isys_input_pin_info *input_pin_info_host, + struct ia_css_isys_input_pin_info_comm *input_pin_info_css) +{ + resolution_host_to_css( + &input_pin_info_host->input_res, + &input_pin_info_css->input_res); + if (input_pin_info_host->dt >= N_IA_CSS_ISYS_MIPI_DATA_TYPE) { + IA_CSS_TRACE_0(ISYSAPI, ERROR, + "input_pin_info_host->dt out of range\n"); + return; + } + if (input_pin_info_host->dt_rename_mode >= N_IA_CSS_ISYS_MIPI_DT_MODE) { + IA_CSS_TRACE_0(ISYSAPI, ERROR, + "input_pin_info_host->dt_rename_mode out of range\n"); + return; + } + /* Mapped DT check if data type renaming is being used*/ + if (input_pin_info_host->dt_rename_mode == IA_CSS_ISYS_MIPI_DT_RENAMED_MODE && + input_pin_info_host->mapped_dt >= N_IA_CSS_ISYS_MIPI_DATA_TYPE) { + IA_CSS_TRACE_0(ISYSAPI, ERROR, + "input_pin_info_host->mapped_dt out of range\n"); + return; + } + input_pin_info_css->dt = input_pin_info_host->dt; + input_pin_info_css->mipi_store_mode = + input_pin_info_host->mipi_store_mode; + input_pin_info_css->bits_per_pix = + ia_css_isys_extracted_bits_per_pixel_per_mipi_data_type[ + input_pin_info_host->dt]; + if (input_pin_info_host->dt_rename_mode == IA_CSS_ISYS_MIPI_DT_RENAMED_MODE) { + input_pin_info_css->mapped_dt = input_pin_info_host->mapped_dt; + } + else { + input_pin_info_css->mapped_dt = N_IA_CSS_ISYS_MIPI_DATA_TYPE; + } +} + +STORAGE_CLASS_INLINE void isa_cfg_host_to_css( + const struct ia_css_isys_isa_cfg *isa_cfg_host, + struct ia_css_isys_isa_cfg_comm *isa_cfg_css) +{ + unsigned int i; + + for (i = 0; i < N_IA_CSS_ISYS_RESOLUTION_INFO; i++) { + resolution_host_to_css(&isa_cfg_host->isa_res[i], + &isa_cfg_css->isa_res[i]); + } + isa_cfg_css->cfg_fields = 0; + ISA_CFG_FIELD_SET(BLC_EN, isa_cfg_css->cfg_fields, + isa_cfg_host->blc_enabled ? 1 : 0); + ISA_CFG_FIELD_SET(LSC_EN, isa_cfg_css->cfg_fields, + isa_cfg_host->lsc_enabled ? 1 : 0); + ISA_CFG_FIELD_SET(DPC_EN, isa_cfg_css->cfg_fields, + isa_cfg_host->dpc_enabled ? 1 : 0); + ISA_CFG_FIELD_SET(DOWNSCALER_EN, isa_cfg_css->cfg_fields, + isa_cfg_host->downscaler_enabled ? 1 : 0); + ISA_CFG_FIELD_SET(AWB_EN, isa_cfg_css->cfg_fields, + isa_cfg_host->awb_enabled ? 1 : 0); + ISA_CFG_FIELD_SET(AF_EN, isa_cfg_css->cfg_fields, + isa_cfg_host->af_enabled ? 1 : 0); + ISA_CFG_FIELD_SET(AE_EN, isa_cfg_css->cfg_fields, + isa_cfg_host->ae_enabled ? 1 : 0); + ISA_CFG_FIELD_SET(PAF_TYPE, isa_cfg_css->cfg_fields, + isa_cfg_host->paf_type); + ISA_CFG_FIELD_SET(SEND_IRQ_STATS_READY, isa_cfg_css->cfg_fields, + isa_cfg_host->send_irq_stats_ready ? 1 : 0); + ISA_CFG_FIELD_SET(SEND_RESP_STATS_READY, isa_cfg_css->cfg_fields, + (isa_cfg_host->send_irq_stats_ready || + isa_cfg_host->send_resp_stats_ready) ? 1 : 0); +} + +STORAGE_CLASS_INLINE void cropping_host_to_css( + const struct ia_css_isys_cropping *cropping_host, + struct ia_css_isys_cropping_comm *cropping_css) +{ + cropping_css->top_offset = cropping_host->top_offset; + cropping_css->left_offset = cropping_host->left_offset; + cropping_css->bottom_offset = cropping_host->bottom_offset; + cropping_css->right_offset = cropping_host->right_offset; + +} + +STORAGE_CLASS_INLINE int stream_cfg_data_host_to_css( + const struct ia_css_isys_stream_cfg_data *stream_cfg_data_host, + struct ia_css_isys_stream_cfg_data_comm *stream_cfg_data_css) +{ + unsigned int i; + + stream_cfg_data_css->src = stream_cfg_data_host->src; + stream_cfg_data_css->vc = stream_cfg_data_host->vc; + stream_cfg_data_css->isl_use = stream_cfg_data_host->isl_use; + stream_cfg_data_css->compfmt = stream_cfg_data_host->compfmt; + stream_cfg_data_css->isa_cfg.cfg_fields = 0; + + switch (stream_cfg_data_host->isl_use) { + case IA_CSS_ISYS_USE_SINGLE_ISA: + isa_cfg_host_to_css(&stream_cfg_data_host->isa_cfg, + &stream_cfg_data_css->isa_cfg); + /* deliberate fall-through */ + case IA_CSS_ISYS_USE_SINGLE_DUAL_ISL: + for (i = 0; i < N_IA_CSS_ISYS_CROPPING_LOCATION; i++) { + cropping_host_to_css(&stream_cfg_data_host->crop[i], + &stream_cfg_data_css->crop[i]); + } + break; + case IA_CSS_ISYS_USE_NO_ISL_NO_ISA: + break; + default: + break; + } + + stream_cfg_data_css->send_irq_sof_discarded = + stream_cfg_data_host->send_irq_sof_discarded ? 1 : 0; + stream_cfg_data_css->send_irq_eof_discarded = + stream_cfg_data_host->send_irq_eof_discarded ? 1 : 0; + stream_cfg_data_css->send_resp_sof_discarded = + stream_cfg_data_host->send_irq_sof_discarded ? + 1 : stream_cfg_data_host->send_resp_sof_discarded; + stream_cfg_data_css->send_resp_eof_discarded = + stream_cfg_data_host->send_irq_eof_discarded ? + 1 : stream_cfg_data_host->send_resp_eof_discarded; + stream_cfg_data_css->nof_input_pins = + stream_cfg_data_host->nof_input_pins; + stream_cfg_data_css->nof_output_pins = + stream_cfg_data_host->nof_output_pins; + for (i = 0; i < stream_cfg_data_host->nof_input_pins; i++) { + input_pin_info_host_to_css( + &stream_cfg_data_host->input_pins[i], + &stream_cfg_data_css->input_pins[i]); + verifret(stream_cfg_data_css->input_pins[i].bits_per_pix, + EINVAL); + } + for (i = 0; i < stream_cfg_data_host->nof_output_pins; i++) { + output_pin_info_host_to_css( + &stream_cfg_data_host->output_pins[i], + &stream_cfg_data_css->output_pins[i]); + } + return 0; +} + +STORAGE_CLASS_INLINE void frame_buff_set_host_to_css( + const struct ia_css_isys_frame_buff_set *frame_buff_set_host, + struct ia_css_isys_frame_buff_set_comm *frame_buff_set_css) +{ + int i; + + for (i = 0; i < MAX_OPINS; i++) { + output_pin_payload_host_to_css( + &frame_buff_set_host->output_pins[i], + &frame_buff_set_css->output_pins[i]); + } + + param_pin_host_to_css(&frame_buff_set_host->process_group_light, + &frame_buff_set_css->process_group_light); + frame_buff_set_css->send_irq_sof = + frame_buff_set_host->send_irq_sof ? 1 : 0; + frame_buff_set_css->send_irq_eof = + frame_buff_set_host->send_irq_eof ? 1 : 0; + frame_buff_set_css->send_irq_capture_done = + (uint8_t)frame_buff_set_host->send_irq_capture_done; + frame_buff_set_css->send_irq_capture_ack = + frame_buff_set_host->send_irq_capture_ack ? 1 : 0; + frame_buff_set_css->send_resp_sof = + frame_buff_set_host->send_irq_sof ? + 1 : frame_buff_set_host->send_resp_sof; + frame_buff_set_css->send_resp_eof = + frame_buff_set_host->send_irq_eof ? + 1 : frame_buff_set_host->send_resp_eof; + frame_buff_set_css->frame_counter = + frame_buff_set_host->frame_counter; +} + +STORAGE_CLASS_INLINE void buffer_partition_host_to_css( + const struct ia_css_isys_buffer_partition *buffer_partition_host, + struct ia_css_isys_buffer_partition_comm *buffer_partition_css) +{ + int i; + + for (i = 0; i < STREAM_ID_MAX; i++) { + buffer_partition_css->num_gda_pages[i] = + buffer_partition_host->num_gda_pages[i]; + } +} + +STORAGE_CLASS_INLINE void output_pin_payload_css_to_host( + const struct ia_css_isys_output_pin_payload_comm * + output_pin_payload_css, + struct ia_css_isys_output_pin_payload *output_pin_payload_host) +{ + output_pin_payload_host->out_buf_id = + output_pin_payload_css->out_buf_id; + output_pin_payload_host->addr = output_pin_payload_css->addr; +#ifdef ENABLE_DEC400 + output_pin_payload_host->compress = output_pin_payload_css->compress; +#else + output_pin_payload_host->compress = 0; +#endif /* ENABLE_DEC400 */ +} + +STORAGE_CLASS_INLINE void param_pin_css_to_host( + const struct ia_css_isys_param_pin_comm *param_pin_css, + struct ia_css_isys_param_pin *param_pin_host) +{ + param_pin_host->param_buf_id = param_pin_css->param_buf_id; + param_pin_host->addr = param_pin_css->addr; + +} + +STORAGE_CLASS_INLINE void resp_info_css_to_host( + const struct ia_css_isys_resp_info_comm *resp_info_css, + struct ia_css_isys_resp_info *resp_info_host) +{ + resp_info_host->type = resp_info_css->type; + resp_info_host->timestamp[0] = resp_info_css->timestamp[0]; + resp_info_host->timestamp[1] = resp_info_css->timestamp[1]; + resp_info_host->stream_handle = resp_info_css->stream_handle; + resp_info_host->error = resp_info_css->error_info.error; + resp_info_host->error_details = + resp_info_css->error_info.error_details; + output_pin_payload_css_to_host( + &resp_info_css->pin, &resp_info_host->pin); + resp_info_host->pin_id = resp_info_css->pin_id; + param_pin_css_to_host(&resp_info_css->process_group_light, + &resp_info_host->process_group_light); + resp_info_host->acc_id = resp_info_css->acc_id; + resp_info_host->frame_counter = resp_info_css->frame_counter; + resp_info_host->written_direct = resp_info_css->written_direct; +} + +/* + * ia_css_isys_constr_fw_stream_cfg() + */ +int ia_css_isys_constr_fw_stream_cfg( + struct ia_css_isys_context *ctx, + const unsigned int stream_handle, + ia_css_shared_buffer_css_address *pstream_cfg_fw, + ia_css_shared_buffer *pbuf_stream_cfg_id, + const struct ia_css_isys_stream_cfg_data *stream_cfg) +{ + ia_css_shared_buffer_cpu_address stream_cfg_cpu_addr; + ia_css_shared_buffer_css_address stream_cfg_css_addr; + int buff_slot; + int retval = 0; + unsigned int wrap_compensation; + const unsigned int wrap_condition = 0xFFFFFFFF; + + verifret(ctx, EFAULT); /* Host Consistency */ + verifret(pstream_cfg_fw, EFAULT); /* Host Consistency */ + verifret(pbuf_stream_cfg_id, EFAULT); /* Host Consistency */ + verifret(stream_cfg, EFAULT); /* Host Consistency */ + + /* Host-FW Consistency */ + verifret((ctx->isys_comm_buffer_queue. + stream_cfg_queue_head[stream_handle] - + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle]) < + STREAM_CFG_BUFS_PER_MSG_QUEUE, EPROTO); + buff_slot = get_stream_cfg_buff_slot(ctx, stream_handle, + ctx->isys_comm_buffer_queue. + stream_cfg_queue_head[stream_handle] % + STREAM_CFG_BUFS_PER_MSG_QUEUE); + *pbuf_stream_cfg_id = + ctx->isys_comm_buffer_queue.pstream_cfg_buff_id[buff_slot]; + /* Host-FW Consistency */ + verifret(*pbuf_stream_cfg_id, EADDRNOTAVAIL); + + stream_cfg_cpu_addr = + ia_css_shared_buffer_cpu_map(*pbuf_stream_cfg_id); + /* Host-FW Consistency */ + verifret(stream_cfg_cpu_addr, EADDRINUSE); + + retval = stream_cfg_data_host_to_css(stream_cfg, stream_cfg_cpu_addr); + if (retval) + return retval; + + stream_cfg_cpu_addr = + ia_css_shared_buffer_cpu_unmap(*pbuf_stream_cfg_id); + /* Host Consistency */ + verifret(stream_cfg_cpu_addr, EADDRINUSE); + + stream_cfg_css_addr = + ia_css_shared_buffer_css_map(*pbuf_stream_cfg_id); + /* Host Consistency */ + verifret(stream_cfg_css_addr, EADDRINUSE); + + ia_css_shared_buffer_css_update(ctx->mmid, *pbuf_stream_cfg_id); + + *pstream_cfg_fw = stream_cfg_css_addr; + + /* + * cover head wrap around extreme case, + * in which case force tail to wrap around too + * while maintaining diff and modulo + */ + if (ctx->isys_comm_buffer_queue.stream_cfg_queue_head[stream_handle] == + wrap_condition) { + /* Value to be added to both head and tail */ + wrap_compensation = + /* + * Distance of wrap_condition to 0, + * will need to be added for wrapping around head to 0 + */ + (0 - wrap_condition) + + /* + * To force tail to also wrap around, + * since it has to happen concurrently + */ + STREAM_CFG_BUFS_PER_MSG_QUEUE + + /* To preserve the same modulo, + * since the previous will result in head modulo 0 + */ + (wrap_condition % STREAM_CFG_BUFS_PER_MSG_QUEUE); + ctx->isys_comm_buffer_queue. + stream_cfg_queue_head[stream_handle] += + wrap_compensation; + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle] += + wrap_compensation; + } + ctx->isys_comm_buffer_queue.stream_cfg_queue_head[stream_handle]++; + + return 0; +} + +/* + * ia_css_isys_constr_fw_next_frame() + */ +int ia_css_isys_constr_fw_next_frame( + struct ia_css_isys_context *ctx, + const unsigned int stream_handle, + ia_css_shared_buffer_css_address *pnext_frame_fw, + ia_css_shared_buffer *pbuf_next_frame_id, + const struct ia_css_isys_frame_buff_set *next_frame) +{ + ia_css_shared_buffer_cpu_address next_frame_cpu_addr; + ia_css_shared_buffer_css_address next_frame_css_addr; + int buff_slot; + unsigned int wrap_compensation; + const unsigned int wrap_condition = 0xFFFFFFFF; + + verifret(ctx, EFAULT); /* Host Consistency */ + verifret(pnext_frame_fw, EFAULT); /* Host Consistency */ + verifret(next_frame, EFAULT); /* Host Consistency */ + verifret(pbuf_next_frame_id, EFAULT); /* Host Consistency */ + + /* For some reason responses are not dequeued in time */ + verifret((ctx->isys_comm_buffer_queue. + next_frame_queue_head[stream_handle] - + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle]) < + NEXT_FRAME_BUFS_PER_MSG_QUEUE, EPERM); + buff_slot = get_next_frame_buff_slot(ctx, stream_handle, + ctx->isys_comm_buffer_queue. + next_frame_queue_head[stream_handle] % + NEXT_FRAME_BUFS_PER_MSG_QUEUE); + *pbuf_next_frame_id = + ctx->isys_comm_buffer_queue.pnext_frame_buff_id[buff_slot]; + /* Host-FW Consistency */ + verifret(*pbuf_next_frame_id, EADDRNOTAVAIL); + + /* map it in cpu */ + next_frame_cpu_addr = + ia_css_shared_buffer_cpu_map(*pbuf_next_frame_id); + /* Host-FW Consistency */ + verifret(next_frame_cpu_addr, EADDRINUSE); + + frame_buff_set_host_to_css(next_frame, next_frame_cpu_addr); + + /* unmap the buffer from cpu */ + next_frame_cpu_addr = + ia_css_shared_buffer_cpu_unmap(*pbuf_next_frame_id); + /* Host Consistency */ + verifret(next_frame_cpu_addr, EADDRINUSE); + + /* map it to css */ + next_frame_css_addr = + ia_css_shared_buffer_css_map(*pbuf_next_frame_id); + /* Host Consistency */ + verifret(next_frame_css_addr, EADDRINUSE); + + ia_css_shared_buffer_css_update(ctx->mmid, *pbuf_next_frame_id); + + *pnext_frame_fw = next_frame_css_addr; + + /* + * cover head wrap around extreme case, + * in which case force tail to wrap around too + * while maintaining diff and modulo + */ + if (ctx->isys_comm_buffer_queue.next_frame_queue_head[stream_handle] == + wrap_condition) { + /* Value to be added to both head and tail */ + wrap_compensation = + /* + * Distance of wrap_condition to 0, + * will need to be added for wrapping around head to 0 + */ + (0 - wrap_condition) + + /* + * To force tail to also wrap around, + * since it has to happen concurrently + */ + NEXT_FRAME_BUFS_PER_MSG_QUEUE + + /* + * To preserve the same modulo, + * since the previous will result in head modulo 0 + */ + (wrap_condition % NEXT_FRAME_BUFS_PER_MSG_QUEUE); + ctx->isys_comm_buffer_queue. + next_frame_queue_head[stream_handle] += + wrap_compensation; + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle] += + wrap_compensation; + } + ctx->isys_comm_buffer_queue.next_frame_queue_head[stream_handle]++; + + return 0; +} + +/* + * ia_css_isys_extract_fw_response() + */ +int ia_css_isys_extract_fw_response( + struct ia_css_isys_context *ctx, + const struct resp_queue_token *token, + struct ia_css_isys_resp_info *received_response) +{ + int buff_slot; + unsigned int css_address; + + verifret(ctx, EFAULT); /* Host Consistency */ + verifret(token, EFAULT); /* Host Consistency */ + verifret(received_response, EFAULT); /* Host Consistency */ + + resp_info_css_to_host(&(token->resp_info), received_response); + + switch (token->resp_info.type) { + case IA_CSS_ISYS_RESP_TYPE_STREAM_OPEN_DONE: + /* Host-FW Consistency */ + verifret((ctx->isys_comm_buffer_queue. + stream_cfg_queue_head[token->resp_info.stream_handle] - + ctx->isys_comm_buffer_queue.stream_cfg_queue_tail[ + token->resp_info.stream_handle]) > 0, EPROTO); + buff_slot = get_stream_cfg_buff_slot(ctx, + token->resp_info.stream_handle, + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[ + token->resp_info.stream_handle] % + STREAM_CFG_BUFS_PER_MSG_QUEUE); + verifret((ia_css_shared_buffer)HOST_ADDRESS( + token->resp_info.buf_id) == + ctx->isys_comm_buffer_queue. + pstream_cfg_buff_id[buff_slot], EIO); + ctx->isys_comm_buffer_queue.stream_cfg_queue_tail[ + token->resp_info.stream_handle]++; + css_address = ia_css_shared_buffer_css_unmap( + (ia_css_shared_buffer) + HOST_ADDRESS(token->resp_info.buf_id)); + verifret(css_address, EADDRINUSE); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK: + case IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK: + /* Host-FW Consistency */ + verifret((ctx->isys_comm_buffer_queue. + next_frame_queue_head[token->resp_info.stream_handle] - + ctx->isys_comm_buffer_queue.next_frame_queue_tail[ + token->resp_info.stream_handle]) > 0, EPROTO); + buff_slot = get_next_frame_buff_slot(ctx, + token->resp_info.stream_handle, + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[ + token->resp_info.stream_handle] % + NEXT_FRAME_BUFS_PER_MSG_QUEUE); + verifret((ia_css_shared_buffer)HOST_ADDRESS( + token->resp_info.buf_id) == + ctx->isys_comm_buffer_queue. + pnext_frame_buff_id[buff_slot], EIO); + ctx->isys_comm_buffer_queue.next_frame_queue_tail[ + token->resp_info.stream_handle]++; + css_address = ia_css_shared_buffer_css_unmap( + (ia_css_shared_buffer) + HOST_ADDRESS(token->resp_info.buf_id)); + verifret(css_address, EADDRINUSE); + break; + default: + break; + } + + return 0; +} + +/* + * ia_css_isys_extract_proxy_response() + */ +int ia_css_isys_extract_proxy_response( + const struct proxy_resp_queue_token *token, + struct ia_css_proxy_write_req_resp *preceived_response) +{ + verifret(token, EFAULT); /* Host Consistency */ + verifret(preceived_response, EFAULT); /* Host Consistency */ + + preceived_response->request_id = token->proxy_resp_info.request_id; + preceived_response->error = token->proxy_resp_info.error_info.error; + preceived_response->error_details = + token->proxy_resp_info.error_info.error_details; + + return 0; +} + +/* + * ia_css_isys_prepare_param() + */ +int ia_css_isys_prepare_param( + struct ia_css_isys_fw_config *isys_fw_cfg, + const struct ia_css_isys_buffer_partition *buf_partition, + const unsigned int num_send_queues[], + const unsigned int num_recv_queues[]) +{ + unsigned int i; + + verifret(isys_fw_cfg, EFAULT); /* Host Consistency */ + verifret(buf_partition, EFAULT); /* Host Consistency */ + verifret(num_send_queues, EFAULT); /* Host Consistency */ + verifret(num_recv_queues, EFAULT); /* Host Consistency */ + + buffer_partition_host_to_css(buf_partition, + &isys_fw_cfg->buffer_partition); + for (i = 0; i < N_IA_CSS_ISYS_QUEUE_TYPE; i++) { + isys_fw_cfg->num_send_queues[i] = num_send_queues[i]; + isys_fw_cfg->num_recv_queues[i] = num_recv_queues[i]; + } + + return 0; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_private.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_private.h new file mode 100644 index 0000000000000..d53fa53c9a818 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_private.h @@ -0,0 +1,156 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYS_PRIVATE_H +#define __IA_CSS_ISYS_PRIVATE_H + + +#include "type_support.h" +/* Needed for the structure member ia_css_sys_context * sys */ +#include "ia_css_syscom.h" +/* Needed for the definitions of STREAM_ID_MAX */ +#include "ia_css_isysapi.h" +/* The following is needed for the function arguments */ +#include "ia_css_isys_fw_bridged_types.h" + +#include "ia_css_shared_buffer.h" + + +/* Set for the respective error handling */ +#define VERIFY_DEVSTATE 1 + +#if (VERIFY_DEVSTATE != 0) +/** + * enum device_state + */ +enum device_state { + IA_CSS_ISYS_DEVICE_STATE_IDLE = 0, + IA_CSS_ISYS_DEVICE_STATE_CONFIGURED = 1, + IA_CSS_ISYS_DEVICE_STATE_READY = 2 +}; +#endif /* VERIFY_DEVSTATE */ + +/** + * enum stream_state + */ +enum stream_state { + IA_CSS_ISYS_STREAM_STATE_IDLE = 0, + IA_CSS_ISYS_STREAM_STATE_OPENED = 1, + IA_CSS_ISYS_STREAM_STATE_STARTED = 2 +}; + + +/** + * struct ia_css_isys_comm_buffer_queue + */ +struct ia_css_isys_comm_buffer_queue { + ia_css_shared_buffer *pstream_cfg_buff_id; + unsigned int stream_cfg_queue_head[STREAM_ID_MAX]; + unsigned int stream_cfg_queue_tail[STREAM_ID_MAX]; + ia_css_shared_buffer *pnext_frame_buff_id; + unsigned int next_frame_queue_head[STREAM_ID_MAX]; + unsigned int next_frame_queue_tail[STREAM_ID_MAX]; +}; + + +/** + * struct ia_css_isys_context + */ +struct ia_css_isys_context { + struct ia_css_syscom_context *sys; + /* add here any isys specific members that need + to be passed into the isys api functions as input */ + unsigned int ssid; + unsigned int mmid; + unsigned int num_send_queues[N_IA_CSS_ISYS_QUEUE_TYPE]; + unsigned int num_recv_queues[N_IA_CSS_ISYS_QUEUE_TYPE]; + unsigned int send_queue_size[N_IA_CSS_ISYS_QUEUE_TYPE]; + struct ia_css_isys_comm_buffer_queue isys_comm_buffer_queue; + unsigned int stream_nof_output_pins[STREAM_ID_MAX]; +#if (VERIFY_DEVSTATE != 0) + enum device_state dev_state; +#endif /* VERIFY_DEVSTATE */ + enum stream_state stream_state_array[STREAM_ID_MAX]; + /* If true, this context is created based on secure config */ + bool secure; +}; + + +/** + * ia_css_isys_constr_comm_buff_queue() + */ +extern int ia_css_isys_constr_comm_buff_queue( + struct ia_css_isys_context *ctx +); + +/** + * ia_css_isys_force_unmap_comm_buff_queue() + */ +extern int ia_css_isys_force_unmap_comm_buff_queue( + struct ia_css_isys_context *ctx +); + +/** + * ia_css_isys_destr_comm_buff_queue() + */ +extern int ia_css_isys_destr_comm_buff_queue( + struct ia_css_isys_context *ctx +); + +/** + * ia_css_isys_constr_fw_stream_cfg() + */ +extern int ia_css_isys_constr_fw_stream_cfg( + struct ia_css_isys_context *ctx, + const unsigned int stream_handle, + ia_css_shared_buffer_css_address *pstream_cfg_fw, + ia_css_shared_buffer *pbuf_stream_cfg_id, + const struct ia_css_isys_stream_cfg_data *stream_cfg +); + +/** + * ia_css_isys_constr_fw_next_frame() + */ +extern int ia_css_isys_constr_fw_next_frame( + struct ia_css_isys_context *ctx, + const unsigned int stream_handle, + ia_css_shared_buffer_css_address *pnext_frame_fw, + ia_css_shared_buffer *pbuf_next_frame_id, + const struct ia_css_isys_frame_buff_set *next_frame +); + +/** + * ia_css_isys_extract_fw_response() + */ +extern int ia_css_isys_extract_fw_response( + struct ia_css_isys_context *ctx, + const struct resp_queue_token *token, + struct ia_css_isys_resp_info *received_response +); +extern int ia_css_isys_extract_proxy_response( + const struct proxy_resp_queue_token *token, + struct ia_css_proxy_write_req_resp *received_response +); + +/** + * ia_css_isys_prepare_param() + */ +extern int ia_css_isys_prepare_param( + struct ia_css_isys_fw_config *isys_fw_cfg, + const struct ia_css_isys_buffer_partition *buf_partition, + const unsigned int num_send_queues[], + const unsigned int num_recv_queues[] +); + +#endif /* __IA_CSS_ISYS_PRIVATE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_public.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_public.c new file mode 100644 index 0000000000000..478d49f51cdd8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_public.c @@ -0,0 +1,1283 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/* TODO: REMOVE --> START IF EXTERNALLY INCLUDED/DEFINED */ +/* These are temporary, the correct numbers need to be inserted/linked */ +/* Until this happens, the following definitions stay here */ +#define INPUT_MIN_WIDTH 1 +#define INPUT_MAX_WIDTH 16384 +#define INPUT_MIN_HEIGHT 1 +#define INPUT_MAX_HEIGHT 16384 +#define OUTPUT_MIN_WIDTH 1 +#define OUTPUT_MAX_WIDTH 16384 +#define OUTPUT_MIN_HEIGHT 1 +#define OUTPUT_MAX_HEIGHT 16384 +/* REMOVE --> END IF EXTERNALLY INCLUDED/DEFINED */ + + +/* The FW bridged types are included through the following */ +#include "ia_css_isysapi.h" +/* The following provides the isys-sys context */ +#include "ia_css_isys_private.h" +/* The following provides the sys layer functions */ +#include "ia_css_syscom.h" + +#include "ia_css_cell.h" +#include "ipu_device_cell_properties.h" + +/* The following provides the tracing functions */ +#include "ia_css_isysapi_trace.h" +#include "ia_css_isys_public_trace.h" + +#include "ia_css_shared_buffer_cpu.h" +/* The following is needed for the + * stddef.h (NULL), + * limits.h (CHAR_BIT definition). + */ +#include "type_support.h" +#include "error_support.h" +#include "cpu_mem_support.h" +#include "math_support.h" +#include "misc_support.h" +#include "system_const.h" + +static int isys_context_create( + HANDLE * context, + const struct ia_css_isys_device_cfg_data *config); +static int isys_start_server( + const struct ia_css_isys_device_cfg_data *config); + +static int isys_context_create( + HANDLE * context, + const struct ia_css_isys_device_cfg_data *config) +{ + int retval; + unsigned int stream_handle; + struct ia_css_isys_context *ctx; + struct ia_css_syscom_config sys; + /* Needs to be updated in case new type of queues are introduced */ + struct ia_css_syscom_queue_config input_queue_cfg[N_MAX_SEND_QUEUES]; + /* Needs to be updated in case new type of queues are introduced */ + struct ia_css_syscom_queue_config output_queue_cfg[N_MAX_RECV_QUEUES]; + struct ia_css_isys_fw_config isys_fw_cfg; + unsigned int proxy_write_queue_size; + unsigned int ssid; + unsigned int mmid; + unsigned int i; + + /* Printing "ENTRY isys_context_create" + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY isys_context_create\n"); + + verifret(config != NULL, EFAULT); + + /* Printing configuration information if tracing level = VERBOSE. */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_device_config_data(config); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + + /* Runtime check for # of send and recv MSG queues */ + verifret(config->driver_sys.num_send_queues <= + N_MAX_MSG_SEND_QUEUES/*=STREAM_ID_MAX*/, EINVAL); + verifret(config->driver_sys.num_recv_queues <= + N_MAX_MSG_RECV_QUEUES, EINVAL); + + /* Runtime check for send and recv MSG queue sizes */ + verifret(config->driver_sys.send_queue_size <= MAX_QUEUE_SIZE, EINVAL); + verifret(config->driver_sys.recv_queue_size <= MAX_QUEUE_SIZE, EINVAL); + + /* TODO: return an error in case MAX_QUEUE_SIZE is exceeded + * (Similar to runtime check on MSG queue sizes) + */ + proxy_write_queue_size = uclip( + config->driver_proxy.proxy_write_queue_size, + MIN_QUEUE_SIZE, + MAX_QUEUE_SIZE); + + ctx = (struct ia_css_isys_context *) + ia_css_cpu_mem_alloc(sizeof(struct ia_css_isys_context)); + verifret(ctx != NULL, EFAULT); + *context = (HANDLE)ctx; + + /* Copy to the sys config the driver_sys config, + * and add the internal info (token sizes) + */ + ssid = config->driver_sys.ssid; + mmid = config->driver_sys.mmid; + sys.ssid = ssid; + sys.mmid = mmid; + + ctx->secure = config->secure; + /* Following operations need to be aligned with + * "enum ia_css_isys_queue_type" list (list of queue types) + */ + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY] = + N_MAX_PROXY_SEND_QUEUES; + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_DEV] = + N_MAX_DEV_SEND_QUEUES; + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG] = + config->driver_sys.num_send_queues; + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY] = + N_MAX_PROXY_RECV_QUEUES; + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_DEV] = + 0; /* Common msg/dev return queue */ + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG] = + config->driver_sys.num_recv_queues; + + sys.num_input_queues = + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY] + + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_DEV] + + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG]; + sys.num_output_queues = + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY] + + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_DEV] + + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG]; + + sys.input = input_queue_cfg; + for (i = 0; + i < ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY]; + i++) { + input_queue_cfg[BASE_PROXY_SEND_QUEUES + i].queue_size = + proxy_write_queue_size; + input_queue_cfg[BASE_PROXY_SEND_QUEUES + i].token_size = + sizeof(struct proxy_send_queue_token); + } + for (i = 0; + i < ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_DEV]; + i++) { + input_queue_cfg[BASE_DEV_SEND_QUEUES + i].queue_size = + DEV_SEND_QUEUE_SIZE; + input_queue_cfg[BASE_DEV_SEND_QUEUES + i].token_size = + sizeof(struct send_queue_token); + } + for (i = 0; + i < ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG]; + i++) { + input_queue_cfg[BASE_MSG_SEND_QUEUES + i].queue_size = + config->driver_sys.send_queue_size; + input_queue_cfg[BASE_MSG_SEND_QUEUES + i].token_size = + sizeof(struct send_queue_token); + } + + ctx->send_queue_size[IA_CSS_ISYS_QUEUE_TYPE_PROXY] = + proxy_write_queue_size; + ctx->send_queue_size[IA_CSS_ISYS_QUEUE_TYPE_DEV] = + DEV_SEND_QUEUE_SIZE; + ctx->send_queue_size[IA_CSS_ISYS_QUEUE_TYPE_MSG] = + config->driver_sys.send_queue_size; + + sys.output = output_queue_cfg; + for (i = 0; + i < ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY]; + i++) { + output_queue_cfg[BASE_PROXY_RECV_QUEUES + i].queue_size = + proxy_write_queue_size; + output_queue_cfg[BASE_PROXY_RECV_QUEUES + i].token_size = + sizeof(struct proxy_resp_queue_token); + } + /* There is no recv DEV queue */ + for (i = 0; + i < ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG]; + i++) { + output_queue_cfg[BASE_MSG_RECV_QUEUES + i].queue_size = + config->driver_sys.recv_queue_size; + output_queue_cfg[BASE_MSG_RECV_QUEUES + i].token_size = + sizeof(struct resp_queue_token); + } + + sys.regs_addr = ipu_device_cell_memory_address(SPC0, + IPU_DEVICE_SP2600_CONTROL_REGS); + sys.dmem_addr = ipu_device_cell_memory_address(SPC0, + IPU_DEVICE_SP2600_CONTROL_DMEM); + +#if HAS_DUAL_CMD_CTX_SUPPORT + sys.dmem_addr += config->secure ? REGMEM_SECURE_OFFSET : REGMEM_OFFSET; +#endif + + /* Prepare the param */ + ia_css_isys_prepare_param( + &isys_fw_cfg, + &config->buffer_partition, + ctx->num_send_queues, + ctx->num_recv_queues); + + /* parameter struct to be passed to fw */ + sys.specific_addr = &isys_fw_cfg; + /* parameters size */ + sys.specific_size = sizeof(isys_fw_cfg); + sys.secure = config->secure; + if (config->secure) { + sys.vtl0_addr_mask = config->vtl0_addr_mask; + } + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "isys_context_create || call ia_css_syscom_open()\n"); + /* The allocation of the queues will take place within this call and + * info will be stored in sys_context output + */ + ctx->sys = ia_css_syscom_open(&sys, NULL); + if (!ctx->sys) { + ia_css_cpu_mem_free(ctx); + return -EFAULT; + } + + /* Update the context with the id's */ + ctx->ssid = ssid; + ctx->mmid = mmid; + + for (stream_handle = 0; stream_handle < STREAM_ID_MAX; + stream_handle++) { + ctx->stream_state_array[stream_handle] = + IA_CSS_ISYS_STREAM_STATE_IDLE; + } + + retval = ia_css_isys_constr_comm_buff_queue(ctx); + if (retval) { + ia_css_syscom_close(ctx->sys); + ia_css_syscom_release(ctx->sys, 1); + ia_css_cpu_mem_free(ctx); + return retval; + } + +#if (VERIFY_DEVSTATE != 0) + ctx->dev_state = IA_CSS_ISYS_DEVICE_STATE_CONFIGURED; +#endif /* VERIFY_DEVSTATE */ + + /* Printing device configuration and device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + + /* Printing "LEAVE isys_context_create" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE isys_context_create\n"); + return 0; +} + +static int isys_start_server( + const struct ia_css_isys_device_cfg_data *config) +{ + verifret(config != NULL, EFAULT); + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "isys_start_server || start SPC\n"); + /* The firmware is loaded and syscom is ready, start the SPC */ + ia_css_cell_start_prefetch(config->driver_sys.ssid, SPC0, + config->driver_sys.icache_prefetch); + IA_CSS_TRACE_1(ISYSAPI, VERBOSE, "SPC prefetch: %d\n", + config->driver_sys.icache_prefetch); + return 0; +} + +/** + * ia_css_isys_device_open() - open and configure ISYS device + */ +#if HAS_DUAL_CMD_CTX_SUPPORT +int ia_css_isys_context_create( + HANDLE * context, + const struct ia_css_isys_device_cfg_data *config) +{ + return isys_context_create(context, config); +} + +/* push context information to DMEM for FW to access */ +int ia_css_isys_context_store_dmem( + const HANDLE * context, + const struct ia_css_isys_device_cfg_data *config) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *) *context; + + return ia_css_syscom_store_dmem(ctx->sys, config->driver_sys.ssid, config->vtl0_addr_mask); +} + +bool ia_css_isys_ab_spc_ready( + HANDLE * context) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *) *context; + + return ia_css_syscom_is_ab_spc_ready(ctx->sys); +} + +int ia_css_isys_device_open( + const struct ia_css_isys_device_cfg_data *config) +{ + return isys_start_server(config); +} +#else +int ia_css_isys_device_open( + HANDLE * context, + const struct ia_css_isys_device_cfg_data *config) +{ + int retval; + + retval = isys_context_create(context, config); + if (retval) { + IA_CSS_TRACE_1(ISYSAPI, ERROR, "ia_css_isys_device_open() failed (retval %d)\n", retval); + return retval; + } + + isys_start_server(config); + return 0; +} +#endif + +/** + * ia_css_isys_device_open_ready() - open and configure ISYS device + */ +int ia_css_isys_device_open_ready(HANDLE context) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + unsigned int i; + int retval; + + /* Printing "ENTRY IA_CSS_ISYS_DEVICE_OPEN" + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_DEVICE_OPEN\n"); + + verifret(ctx, EFAULT); + + /* Printing device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_CONFIGURED, EPERM); +#endif /* VERIFY_DEVSTATE */ + + /* Open the ports for all the non-MSG send queues (PROXY + DEV) */ + for (i = 0; + i < ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY] + + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_DEV]; + i++) { + retval = ia_css_syscom_send_port_open(ctx->sys, i); + verifret(retval != FW_ERROR_BUSY, EBUSY); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval == 0, EINVAL); + } + + /* Open the ports for all the recv queues (PROXY + MSG) */ + for (i = 0; + i < (ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY] + + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG]); + i++) { + retval = ia_css_syscom_recv_port_open(ctx->sys, i); + verifret(retval != FW_ERROR_BUSY, EBUSY); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval == 0, EINVAL); + } + +#if (VERIFY_DEVSTATE != 0) + ctx->dev_state = IA_CSS_ISYS_DEVICE_STATE_READY; +#endif /* VERIFY_DEVSTATE */ + + /* Printing "LEAVE IA_CSS_ISYS_DEVICE_OPEN_READY" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "LEAVE IA_CSS_ISYS_DEVICE_OPEN_READY\n"); + return 0; +} + + + /** + * ia_css_isys_stream_open() - open and configure a virtual stream + */ +int ia_css_isys_stream_open( + HANDLE context, + const unsigned int stream_handle, + const struct ia_css_isys_stream_cfg_data *stream_cfg) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + unsigned int i; + int retval = 0; + int packets; + struct send_queue_token token; + ia_css_shared_buffer_css_address stream_cfg_fw = 0; + ia_css_shared_buffer buf_stream_cfg_id = (ia_css_shared_buffer)NULL; + /* Printing "ENTRY IA_CSS_ISYS_STREAM_OPEN" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_STREAM_OPEN\n"); + + verifret(ctx, EFAULT); + + /* Printing stream configuration and device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); + print_stream_config_data(stream_cfg); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + verifret(stream_handle < STREAM_ID_MAX, EINVAL); + verifret(stream_handle < + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG], EINVAL); + + verifret(ctx->stream_state_array[stream_handle] == + IA_CSS_ISYS_STREAM_STATE_IDLE, EPERM); + + verifret(stream_cfg != NULL, EFAULT); + verifret(stream_cfg->src < N_IA_CSS_ISYS_STREAM_SRC, EINVAL); + verifret(stream_cfg->vc < N_IA_CSS_ISYS_MIPI_VC, EINVAL); + verifret(stream_cfg->isl_use < N_IA_CSS_ISYS_USE, EINVAL); + if (stream_cfg->isl_use != IA_CSS_ISYS_USE_NO_ISL_NO_ISA) { + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].bottom_offset >= + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].top_offset + + OUTPUT_MIN_HEIGHT, EINVAL); + + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].bottom_offset <= + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].top_offset + + OUTPUT_MAX_HEIGHT, EINVAL); + + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].right_offset >= + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].left_offset + + OUTPUT_MIN_WIDTH, EINVAL); + + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].right_offset <= + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].left_offset + + OUTPUT_MAX_WIDTH, EINVAL); + } + verifret(stream_cfg->nof_input_pins <= MAX_IPINS, EINVAL); + verifret(stream_cfg->nof_output_pins <= MAX_OPINS, EINVAL); + for (i = 0; i < stream_cfg->nof_input_pins; i++) { + /* Verify input pin */ + verifret( + stream_cfg->input_pins[i].input_res.width >= + INPUT_MIN_WIDTH && + stream_cfg->input_pins[i].input_res.width <= + INPUT_MAX_WIDTH && + stream_cfg->input_pins[i].input_res.height >= + INPUT_MIN_HEIGHT && + stream_cfg->input_pins[i].input_res.height <= + INPUT_MAX_HEIGHT, EINVAL); + verifret(stream_cfg->input_pins[i].dt < + N_IA_CSS_ISYS_MIPI_DATA_TYPE, EINVAL); +/* #ifdef To be removed when driver inits the value */ +#ifdef DRIVER_INIT_MIPI_STORE_MODE + verifret(stream_cfg->input_pins[i].mipi_store_mode < + N_IA_CSS_ISYS_MIPI_STORE_MODE, EINVAL); +#endif /* DRIVER_INIT_MIPI_STORE_MODE */ + } + for (i = 0; i < stream_cfg->nof_output_pins; i++) { + /* Verify output pin */ + verifret(stream_cfg->output_pins[i].input_pin_id < + stream_cfg->nof_input_pins, EINVAL); + verifret(stream_cfg->output_pins[i].pt < + N_IA_CSS_ISYS_PIN_TYPE, EINVAL); + verifret(stream_cfg->output_pins[i].ft < + N_IA_CSS_ISYS_FRAME_FORMAT, EINVAL); + /* Verify that the stride is aligned to 64 bytes: HW spec */ + verifret(stream_cfg->output_pins[i].stride%(XMEM_WIDTH/8) == + 0, EINVAL); + verifret((stream_cfg->output_pins[i].output_res.width >= + OUTPUT_MIN_WIDTH) && + (stream_cfg->output_pins[i].output_res.width <= + OUTPUT_MAX_WIDTH) && + (stream_cfg->output_pins[i].output_res.height >= + OUTPUT_MIN_HEIGHT) && + (stream_cfg->output_pins[i].output_res.height <= + OUTPUT_MAX_HEIGHT), EINVAL); + verifret((stream_cfg->output_pins[i].pt == + IA_CSS_ISYS_PIN_TYPE_MIPI) || + (stream_cfg-> + input_pins[stream_cfg->output_pins[i].input_pin_id].mipi_store_mode != + IA_CSS_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER), EINVAL); + if (stream_cfg->isl_use == IA_CSS_ISYS_USE_SINGLE_ISA) { + switch (stream_cfg->output_pins[i].pt) { + case IA_CSS_ISYS_PIN_TYPE_RAW_NS: + /* Ensure the PIFCONV cropped resolution + * matches the RAW_NS output pin resolution + */ + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_NONSCALED].bottom_offset == + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_NONSCALED].top_offset + + (int)stream_cfg->output_pins[i].output_res.height, EINVAL); + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_NONSCALED].right_offset == + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_NONSCALED].left_offset + + (int)stream_cfg->output_pins[i].output_res.width, EINVAL); + /* Ensure the ISAPF cropped resolution matches + * the Non-scaled ISA output resolution before + * the PIFCONV cropping, since nothing can + * modify the resolution in that part of + * the pipe + */ + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].bottom_offset == + stream_cfg->crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].top_offset + + (int)stream_cfg-> + isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_NONSCALED].height, + EINVAL); + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].right_offset == + stream_cfg->crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].left_offset + + (int)stream_cfg-> + isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_NONSCALED].width, + EINVAL); + /* Ensure the Non-scaled ISA output resolution + * before the PIFCONV cropping bounds the + * RAW_NS pin output resolution since padding + * is not supported + */ + verifret(stream_cfg-> +isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_NONSCALED].height >= +stream_cfg->output_pins[i].output_res.height, EINVAL); + verifret(stream_cfg-> +isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_NONSCALED].width >= +stream_cfg->output_pins[i].output_res.width, EINVAL); + break; + case IA_CSS_ISYS_PIN_TYPE_RAW_S: + /* Ensure the ScaledPIFCONV cropped resolution + * matches the RAW_S output pin resolution + */ + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_SCALED].bottom_offset == + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_SCALED].top_offset + + (int)stream_cfg->output_pins[i].output_res.height, EINVAL); + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_SCALED].right_offset == + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_SCALED].left_offset + + (int)stream_cfg->output_pins[i].output_res.width, EINVAL); + /* Ensure the ISAPF cropped resolution bounds + * the Scaled ISA output resolution before the + * ScaledPIFCONV cropping, since only IDS can + * modify the resolution, and this only to + * make it smaller + */ + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].bottom_offset >= + stream_cfg->crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].top_offset + + (int)stream_cfg-> + isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_SCALED].height, + EINVAL); + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].right_offset >= + stream_cfg->crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].left_offset + + (int)stream_cfg-> + isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_SCALED].width, + EINVAL); + /* Ensure the Scaled ISA output resolution + * before the ScaledPIFCONV cropping bounds + * the RAW_S pin output resolution since + * padding is not supported + */ + verifret(stream_cfg-> + isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_SCALED].height >= + stream_cfg->output_pins[i].output_res.height, EINVAL); + verifret(stream_cfg-> + isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_SCALED].width >= + stream_cfg->output_pins[i].output_res.width, EINVAL); + break; + default: + break; + } + } + } + + /* open 1 send queue/stream and a single receive queue + * if not existing + */ + retval = ia_css_syscom_send_port_open(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle)); + verifret(retval != FW_ERROR_BUSY, EBUSY); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval == 0, EINVAL); + + packets = ia_css_syscom_send_port_available(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle)); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + token.send_type = IA_CSS_ISYS_SEND_TYPE_STREAM_OPEN; + retval = ia_css_isys_constr_fw_stream_cfg(ctx, stream_handle, + &stream_cfg_fw, &buf_stream_cfg_id, stream_cfg); + verifret(retval == 0, retval); + token.payload = stream_cfg_fw; + token.buf_handle = HOST_ADDRESS(buf_stream_cfg_id); + retval = ia_css_syscom_send_port_transfer(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle), &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + ctx->stream_nof_output_pins[stream_handle] = + stream_cfg->nof_output_pins; + ctx->stream_state_array[stream_handle] = + IA_CSS_ISYS_STREAM_STATE_OPENED; + + /* Printing "LEAVE IA_CSS_ISYS_STREAM_OPEN" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE IA_CSS_ISYS_STREAM_OPEN\n"); + + return 0; +} + + +/** + * ia_css_isys_stream_close() - close virtual stream + */ +int ia_css_isys_stream_close( + HANDLE context, + const unsigned int stream_handle) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + int retval = 0; + int packets; + struct send_queue_token token; + + /* Printing "LEAVE IA_CSS_ISYS_STREAM_CLOSE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_STREAM_CLOSE\n"); + + verifret(ctx, EFAULT); + + /* Printing device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + verifret(stream_handle < STREAM_ID_MAX, EINVAL); + verifret(stream_handle < + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG], EINVAL); + + verifret(ctx->stream_state_array[stream_handle] == + IA_CSS_ISYS_STREAM_STATE_OPENED, EPERM); + + packets = ia_css_syscom_send_port_available(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle)); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + token.send_type = IA_CSS_ISYS_SEND_TYPE_STREAM_CLOSE; + token.stream_id = stream_handle; + token.payload = 0; + token.buf_handle = 0; + retval = ia_css_syscom_send_port_transfer(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle), &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + /* close 1 send queue/stream and the single receive queue + * if none is using it + */ + retval = ia_css_syscom_send_port_close(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle)); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval == 0, EINVAL); + + ctx->stream_state_array[stream_handle] = IA_CSS_ISYS_STREAM_STATE_IDLE; + /* Printing "LEAVE IA_CSS_ISYS_STREAM_CLOSE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE IA_CSS_ISYS_STREAM_CLOSE\n"); + + return 0; +} + + +/** + * ia_css_isys_stream_start() - starts handling a mipi virtual stream + */ +int ia_css_isys_stream_start( + HANDLE context, + const unsigned int stream_handle, + const struct ia_css_isys_frame_buff_set *next_frame) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + int retval = 0; + int packets; + struct send_queue_token token; + ia_css_shared_buffer_css_address next_frame_fw = 0; + ia_css_shared_buffer buf_next_frame_id = (ia_css_shared_buffer)NULL; + + /* Printing "ENTRY IA_CSS_ISYS_STREAM_START" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_STREAM_START\n"); + + verifret(ctx, EFAULT); + + /* Printing frame configuration and device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); + print_isys_frame_buff_set(next_frame, + ctx->stream_nof_output_pins[stream_handle]); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + verifret(stream_handle < STREAM_ID_MAX, EINVAL); + verifret(stream_handle < + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG], EINVAL); + + verifret(ctx->stream_state_array[stream_handle] == + IA_CSS_ISYS_STREAM_STATE_OPENED, EPERM); + + packets = ia_css_syscom_send_port_available(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle)); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + if (next_frame != NULL) { + token.send_type = + IA_CSS_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE; + retval = ia_css_isys_constr_fw_next_frame(ctx, stream_handle, + &next_frame_fw, &buf_next_frame_id, next_frame); + verifret(retval == 0, retval); + token.payload = next_frame_fw; + token.buf_handle = HOST_ADDRESS(buf_next_frame_id); + } else { + token.send_type = IA_CSS_ISYS_SEND_TYPE_STREAM_START; + token.payload = 0; + token.buf_handle = 0; + } + retval = ia_css_syscom_send_port_transfer(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle), &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + ctx->stream_state_array[stream_handle] = + IA_CSS_ISYS_STREAM_STATE_STARTED; + /* Printing "LEAVE IA_CSS_ISYS_STREAM_START" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE IA_CSS_ISYS_STREAM_START\n"); + + return 0; +} + + +/** + * ia_css_isys_stream_stop() - Stops a mipi virtual stream + */ +int ia_css_isys_stream_stop( + HANDLE context, + const unsigned int stream_handle) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + int retval = 0; + int packets; + struct send_queue_token token; + + /* Printing "ENTRY IA_CSS_ISYS_STREAM_STOP" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_STREAM_STOP\n"); + + verifret(ctx, EFAULT); + + /* Printing device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + verifret(stream_handle < STREAM_ID_MAX, EINVAL); + verifret(stream_handle < + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG], EINVAL); + + verifret(ctx->stream_state_array[stream_handle] == + IA_CSS_ISYS_STREAM_STATE_STARTED, EPERM); + + packets = ia_css_syscom_send_port_available(ctx->sys, + (BASE_DEV_SEND_QUEUES)); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + token.send_type = IA_CSS_ISYS_SEND_TYPE_STREAM_STOP; + token.stream_id = stream_handle; + token.payload = 0; + token.buf_handle = 0; + retval = ia_css_syscom_send_port_transfer(ctx->sys, + (BASE_DEV_SEND_QUEUES), &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + ctx->stream_state_array[stream_handle] = + IA_CSS_ISYS_STREAM_STATE_OPENED; + + /* Printing "LEAVE IA_CSS_ISYS_STREAM_STOP" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE IA_CSS_ISYS_STREAM_STOP\n"); + + return 0; +} + + +/** + * ia_css_isys_stream_flush() - stops a mipi virtual stream but + * completes processing cmd backlog + */ +int ia_css_isys_stream_flush( + HANDLE context, + const unsigned int stream_handle) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + int retval = 0; + int packets; + struct send_queue_token token; + + /* Printing "ENTRY IA_CSS_ISYS_STREAM_FLUSH" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_STREAM_FLUSH\n"); + + verifret(ctx, EFAULT); + + /* Printing device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + verifret(stream_handle < STREAM_ID_MAX, EINVAL); + verifret(stream_handle < + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG], EINVAL); + + verifret(ctx->stream_state_array[stream_handle] == + IA_CSS_ISYS_STREAM_STATE_STARTED, EPERM); + + packets = ia_css_syscom_send_port_available(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle)); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + token.send_type = IA_CSS_ISYS_SEND_TYPE_STREAM_FLUSH; + token.payload = 0; + token.buf_handle = 0; + retval = ia_css_syscom_send_port_transfer(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle), &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + ctx->stream_state_array[stream_handle] = + IA_CSS_ISYS_STREAM_STATE_OPENED; + + /* Printing "LEAVE IA_CSS_ISYS_STREAM_FLUSH" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE IA_CSS_ISYS_STREAM_FLUSH\n"); + + return 0; +} + + +/** + * ia_css_isys_stream_capture_indication() + * - captures "next frame" on stream_handle + */ +int ia_css_isys_stream_capture_indication( + HANDLE context, + const unsigned int stream_handle, + const struct ia_css_isys_frame_buff_set *next_frame) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + int retval = 0; + int packets; + struct send_queue_token token; + ia_css_shared_buffer_css_address next_frame_fw = 0; + ia_css_shared_buffer buf_next_frame_id = (ia_css_shared_buffer)NULL; + + /* Printing "ENTRY IA_CSS_ISYS_STREAM_CAPTURE_INDICATION" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "ENTRY IA_CSS_ISYS_STREAM_CAPTURE_INDICATION\n"); + + verifret(ctx, EFAULT); + + /* Printing frame configuration and device handle context information + *if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); + print_isys_frame_buff_set(next_frame, + ctx->stream_nof_output_pins[stream_handle]); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + verifret(stream_handle < STREAM_ID_MAX, EINVAL); + verifret(stream_handle < + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG], EINVAL); + verifret(ctx->stream_state_array[stream_handle] == + IA_CSS_ISYS_STREAM_STATE_STARTED, EPERM); + verifret(next_frame != NULL, EFAULT); + + packets = ia_css_syscom_send_port_available(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle)); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + { + token.send_type = IA_CSS_ISYS_SEND_TYPE_STREAM_CAPTURE; + retval = ia_css_isys_constr_fw_next_frame(ctx, stream_handle, + &next_frame_fw, &buf_next_frame_id, next_frame); + verifret(retval == 0, retval); + token.payload = next_frame_fw; + token.buf_handle = HOST_ADDRESS(buf_next_frame_id); + } + retval = ia_css_syscom_send_port_transfer(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle), &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + /* Printing "LEAVE IA_CSS_ISYS_STREAM_CAPTURE_INDICATION" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "LEAVE IA_CSS_ISYS_STREAM_CAPTURE_INDICATION\n"); + + return 0; +} + + +/** + * ia_css_isys_stream_handle_response() - handle ISYS responses + */ +int ia_css_isys_stream_handle_response( + HANDLE context, + struct ia_css_isys_resp_info *received_response) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + int retval = 0; + int packets; + struct resp_queue_token token; + + /* Printing "ENTRY IA_CSS_ISYS_STREAM_HANDLE_RESPONSE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "ENTRY IA_CSS_ISYS_STREAM_HANDLE_RESPONSE\n"); + + verifret(ctx, EFAULT); + + /* Printing device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + verifret(received_response != NULL, EFAULT); + + packets = ia_css_syscom_recv_port_available( + ctx->sys, BASE_MSG_RECV_QUEUES); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + + retval = ia_css_syscom_recv_port_transfer( + ctx->sys, BASE_MSG_RECV_QUEUES, &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + retval = ia_css_isys_extract_fw_response( + ctx, &token, received_response); + verifret(retval == 0, retval); + + /* Printing received response information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_isys_resp_info(received_response); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + + verifret(received_response->type < N_IA_CSS_ISYS_RESP_TYPE, EINVAL); + verifret(received_response->stream_handle < STREAM_ID_MAX, EINVAL); + + if (received_response->type == IA_CSS_ISYS_RESP_TYPE_PIN_DATA_READY || + received_response->type == IA_CSS_ISYS_RESP_TYPE_PIN_DATA_WATERMARK || + received_response->type == IA_CSS_ISYS_RESP_TYPE_PIN_DATA_SKIPPED) { + verifret(received_response->pin.addr != 0, EFAULT); + verifret(received_response->pin.out_buf_id != 0, EFAULT); + verifret(received_response->pin_id < + ctx->stream_nof_output_pins[received_response->stream_handle], + EINVAL); + } + + /* Printing "LEAVE IA_CSS_ISYS_STREAM_HANDLE_RESPONSE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "LEAVE IA_CSS_ISYS_STREAM_HANDLE_RESPONSE\n"); + + return 0; +} + + +/** + * ia_css_isys_device_close() - close ISYS device + */ +static int isys_context_destroy(HANDLE context) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + unsigned int stream_handle; + unsigned int queue_id; + unsigned int nof_recv_queues; + int retval = 0; + + /* Printing "ENTRY IA_CSS_ISYS_DEVICE_CLOSE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY isys_context_destroy\n"); + + verifret(ctx, EFAULT); + + /* Printing device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + nof_recv_queues = ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG] + + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY]; + /* Close the ports for all the recv queues (MSG and PROXY) */ + for (queue_id = 0; queue_id < nof_recv_queues; queue_id++) { + retval = ia_css_syscom_recv_port_close( + ctx->sys, queue_id); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval == 0, EINVAL); + } + + /* Close the ports for PROXY send queue(s) */ + for (queue_id = 0; + queue_id < ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY] + + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_DEV]; + queue_id++) { + retval = ia_css_syscom_send_port_close( + ctx->sys, queue_id); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval == 0, EINVAL); + } + + for (stream_handle = 0; stream_handle < STREAM_ID_MAX; + stream_handle++) { + verifret(ctx->stream_state_array[stream_handle] == + IA_CSS_ISYS_STREAM_STATE_IDLE, EPERM); + } + + retval = ia_css_syscom_close(ctx->sys); + verifret(retval == 0, EBUSY); + +#if (VERIFY_DEVSTATE != 0) + ctx->dev_state = IA_CSS_ISYS_DEVICE_STATE_CONFIGURED; +#endif /* VERIFY_DEVSTATE */ + + /* Printing "LEAVE IA_CSS_ISYS_DEVICE_CLOSE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE isys_context_destroy\n"); + + return 0; +} +/** + * ia_css_isys_device_close() - close ISYS device + */ +#if HAS_DUAL_CMD_CTX_SUPPORT +int ia_css_isys_context_destroy(HANDLE context) +{ + return isys_context_destroy(context); +} + +void ia_css_isys_device_close(void) +{ + /* Created for legacy, nothing to perform here */ +} + +#else +int ia_css_isys_device_close(HANDLE context) +{ + return isys_context_destroy(context); +} +#endif + +/** + * ia_css_isys_device_release() - release ISYS device + */ +int ia_css_isys_device_release(HANDLE context, unsigned int force) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + int retval = 0; + + /* Printing "ENTRY IA_CSS_ISYS_DEVICE_RELEASE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_DEVICE_RELEASE\n"); + + verifret(ctx, EFAULT); + + /* Printing device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_CONFIGURED, EPERM); +#endif /* VERIFY_DEVSTATE */ + + retval = ia_css_syscom_release(ctx->sys, force); + verifret(retval == 0, EBUSY); + + /* If ia_css_isys_device_release called with force==1, this should + * happen after timeout, so no active transfers + * If ia_css_isys_device_release called with force==0, this should + * happen after SP has gone idle, so no active transfers + */ + ia_css_isys_force_unmap_comm_buff_queue(ctx); + ia_css_isys_destr_comm_buff_queue(ctx); + +#if (VERIFY_DEVSTATE != 0) + ctx->dev_state = IA_CSS_ISYS_DEVICE_STATE_IDLE; +#endif /* VERIFY_DEVSTATE */ + + ia_css_cpu_mem_free(ctx); + + /* Printing "LEAVE IA_CSS_ISYS_DEVICE_RELEASE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE IA_CSS_ISYS_DEVICE_RELEASE\n"); + + return 0; +} + +/** + * ia_css_isys_proxy_write_req() - send ISYS proxy write requests + */ +int ia_css_isys_proxy_write_req( + HANDLE context, + const struct ia_css_proxy_write_req_val *write_req_val) +{ + + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + struct proxy_send_queue_token token; + int packets; + int retval = 0; + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_PROXY_WRITE_REQ\n"); + verifret(ctx, EFAULT); + verifret(write_req_val != NULL, EFAULT); + + packets = ia_css_syscom_send_port_available(ctx->sys, 0); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + + token.request_id = write_req_val->request_id; + token.region_index = write_req_val->region_index; + token.offset = write_req_val->offset; + token.value = write_req_val->value; + + retval = ia_css_syscom_send_port_transfer(ctx->sys, 0, &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE IA_CSS_ISYS_PROXY_WRITE_REQ\n"); + + return 0; +} + +/** + * ia_css_isys_proxy_handle_write_response() - handle ISYS proxy responses + */ +int ia_css_isys_proxy_handle_write_response( + HANDLE context, + struct ia_css_proxy_write_req_resp *received_response) +{ + + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + struct proxy_resp_queue_token token; + int retval = 0; + int packets; + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "ENTRY IA_CSS_ISYS_PROXY_HANDLE_WRITE_RESPONSE\n"); + verifret(ctx, EFAULT); + verifret(received_response != NULL, EFAULT); + + packets = ia_css_syscom_recv_port_available(ctx->sys, 0); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + + retval = ia_css_syscom_recv_port_transfer(ctx->sys, 0, &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + + retval = ia_css_isys_extract_proxy_response(&token, received_response); + verifret(retval == 0, retval); + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "LEAVE IA_CSS_ISYS_PROXY_HANDLE_WRITE_RESPONSE\n"); + + return 0; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_public_trace.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_public_trace.c new file mode 100644 index 0000000000000..d6500a0cb6056 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_public_trace.c @@ -0,0 +1,379 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_isysapi_trace.h" +#include "ia_css_isys_public_trace.h" +#include "ia_css_isysapi_types.h" +#include "ia_css_isysapi.h" +#include "ia_css_isys_private.h" +#include "error_support.h" +#include "ia_css_syscom.h" + +/** + * print_handle_context - formatted print function for + * struct ia_css_isys_context *ctx variable + */ +int print_handle_context(struct ia_css_isys_context *ctx) +{ + unsigned int i; + + verifret(ctx != NULL, EFAULT); + /* Print ctx->(ssid, mmid, dev_state) */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "Print ia_css_isys_context *ctx\n" + "-------------------------------------------------------\n"); + IA_CSS_TRACE_3(ISYSAPI, VERBOSE, + "\tia_css_isys_context->ssid = %d\n" + "\t\t\tia_css_isys_context->mmid = %d\n" + "\t\t\tia_css_isys_context->device_state = %d\n" + , ctx->ssid + , ctx->mmid + , ctx->dev_state); + /* Print ctx->(stream_state_array, stream_nof_output_pins) */ + for (i = 0; i < STREAM_ID_MAX; i++) { + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_context->stream_state[i = %d] = %d\n" + "\t\t\tia_css_isys_context->stream_nof_output_pins[i = %d] = %d\n" + , i + , ctx->stream_state_array[i] + , i + , ctx->stream_nof_output_pins[i]); + } + /* Print ctx->ia_css_syscom_context */ + IA_CSS_TRACE_1(ISYSAPI, VERBOSE, + "\tia_css_isys_context->ia_css_syscom_context = %p\n" + , (struct ia_css_syscom_context *)(ctx->sys)); + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "-------------------------------------------------------\n"); + return 0; +} + +/** + * print_device_config_data - formatted print function for + * struct ia_css_isys_device_cfg_data *config variable + */ +int print_device_config_data(const struct ia_css_isys_device_cfg_data *config) +{ + verifret(config != NULL, EFAULT); + IA_CSS_TRACE_0(ISYSAPI, + VERBOSE, + "Print ia_css_isys_device_cfg_data *config\n" + "-------------------------------------------------------\n"); + IA_CSS_TRACE_7(ISYSAPI, + VERBOSE, + "\tia_css_isys_device_cfg_data->driver_sys.ssid = %d\n" + "\t\t\tia_css_isys_device_cfg_data->driver_sys.mmid = %d\n" + "\t\t\tia_css_isys_device_cfg_data->driver_sys.num_send_queues = %d\n" + "\t\t\tia_css_isys_device_cfg_data->driver_sys.num_recv_queues = %d\n" + "\t\t\tia_css_isys_device_cfg_data->driver_sys.send_queue_size = %d\n" + "\t\t\tia_css_isys_device_cfg_data->driver_sys.recv_queue_size = %d\n" + "\t\t\tia_css_isys_device_cfg_data->driver_proxy.proxy_write_queue_size = %d\n", + config->driver_sys.ssid, + config->driver_sys.mmid, + config->driver_sys.num_send_queues, + config->driver_sys.num_recv_queues, + config->driver_sys.send_queue_size, + config->driver_sys.recv_queue_size, + config->driver_proxy.proxy_write_queue_size); + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "-------------------------------------------------------\n"); + return 0; +} + +/** + * print_stream_config_data - formatted print function for + * ia_css_isys_stream_cfg_data stream_cfg variable + */ +int print_stream_config_data( + const struct ia_css_isys_stream_cfg_data *stream_cfg) +{ + unsigned int i; + + verifret(stream_cfg != NULL, EFAULT); + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "Print ia_css_isys_stream_cfg_data stream_cfg\n" + "-------------------------------------------------------\n"); + IA_CSS_TRACE_5(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_isl_use = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_stream_source = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_mipi_vc = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->nof_input_pins = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->nof_output_pins = %d\n" + , stream_cfg->isl_use + , stream_cfg->src + , stream_cfg->vc + , stream_cfg->nof_input_pins + , stream_cfg->nof_output_pins); + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->send_irq_sof_discarded = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->send_irq_eof_discarded = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->send_resp_sof_discarded = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->send_resp_eof_discarded = %d\n" + , stream_cfg->send_irq_sof_discarded + , stream_cfg->send_irq_eof_discarded + , stream_cfg->send_resp_sof_discarded + , stream_cfg->send_resp_eof_discarded); + for (i = 0; i < stream_cfg->nof_input_pins; i++) { + IA_CSS_TRACE_6(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_input_pin_info[i = %d].ia_css_isys_mipi_data_type = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_input_pin_info[i = %d].ia_css_isys_resolution.width = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_input_pin_info[i = %d].ia_css_isys_resolution.height = %d\n" + , i + , stream_cfg->input_pins[i].dt + , i + , stream_cfg->input_pins[i].input_res.width + , i + , stream_cfg->input_pins[i].input_res.height); + IA_CSS_TRACE_2(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_input_pin_info[i = %d].ia_css_isys_mipi_store_mode = %d\n" + , i + , stream_cfg->input_pins[i].mipi_store_mode); + } + for (i = 0; i < N_IA_CSS_ISYS_CROPPING_LOCATION; i++) { + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_cropping[i = %d].top_offset = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_cropping[i = %d].left_offset = %d\n" + , i + , stream_cfg->crop[i].top_offset + , i + , stream_cfg->crop[i].left_offset); + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_cropping[i = %d].bottom_offset = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_cropping[i = %d].right_offset = %d\n" + , i + , stream_cfg->crop[i].bottom_offset + , i + , stream_cfg->crop[i].right_offset); + } + for (i = 0; i < stream_cfg->nof_output_pins; i++) { + IA_CSS_TRACE_6(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].ia_css_isys_pin_type = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].ia_css_isys_frame_format_type = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].input_pin_id = %d\n" + , i + , stream_cfg->output_pins[i].pt + , i + , stream_cfg->output_pins[i].ft + , i + , stream_cfg->output_pins[i].input_pin_id); + IA_CSS_TRACE_6(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].watermark_in_lines = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].send_irq = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].stride = %d\n" + , i + , stream_cfg->output_pins[i].watermark_in_lines + , i + , stream_cfg->output_pins[i].send_irq + , i + , stream_cfg->output_pins[i].stride); + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].ia_css_isys_resolution.width = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].ia_css_isys_resolution.height = %d\n" + , i + , stream_cfg->output_pins[i].output_res.width + , i + , stream_cfg->output_pins[i].output_res.height); + } + for (i = 0; i < N_IA_CSS_ISYS_RESOLUTION_INFO; i++) { + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.ia_css_isys_resolution[i = %d].width = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.ia_css_isys_resolution[i = %d].height = %d\n" + , i + , stream_cfg->isa_cfg.isa_res[i].width + , i + , stream_cfg->isa_cfg.isa_res[i].height); + } + IA_CSS_TRACE_7(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.blc_enabled = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.lsc_enabled = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.dpc_enabled = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.downscaler_enabled = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.awb_enabled = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.af_enabled = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.ae_enabled = %d\n" + , stream_cfg->isa_cfg.blc_enabled + , stream_cfg->isa_cfg.lsc_enabled + , stream_cfg->isa_cfg.dpc_enabled + , stream_cfg->isa_cfg.downscaler_enabled + , stream_cfg->isa_cfg.awb_enabled + , stream_cfg->isa_cfg.af_enabled + , stream_cfg->isa_cfg.ae_enabled); + + IA_CSS_TRACE_1(ISYSAPI, VERBOSE, + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.paf_type = %d\n" + , stream_cfg->isa_cfg.paf_type); + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "-------------------------------------------------------\n"); + return 0; +} + +/** + * print_isys_frame_buff_set - formatted print function for + * struct ia_css_isys_frame_buff_set *next_frame variable + */ +int print_isys_frame_buff_set( + const struct ia_css_isys_frame_buff_set *next_frame, + const unsigned int nof_output_pins) +{ + unsigned int i; + + verifret(next_frame != NULL, EFAULT); + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "Print ia_css_isys_frame_buff_set *next_frame\n" + "-------------------------------------------------------\n"); + for (i = 0; i < nof_output_pins; i++) { + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_frame_buff_set->ia_css_isys_output_pin_payload[i = %d].ia_css_return_token = %016lxu\n" + "\t\t\tia_css_isys_frame_buff_set->ia_css_isys_output_pin_payload[i = %d].ia_css_input_buffer_css_address = %08xu\n" + , i + , (unsigned long int) + next_frame->output_pins[i].out_buf_id + , i + , next_frame->output_pins[i].addr); + } + IA_CSS_TRACE_2(ISYSAPI, VERBOSE, + "\tia_css_isys_frame_buff_set->process_group_light.ia_css_return_token = %016lxu\n" + "\t\t\tia_css_isys_frame_buff_set->process_group_light.ia_css_input_buffer_css_address = %08xu\n" + , (unsigned long int) + next_frame->process_group_light.param_buf_id + , next_frame->process_group_light.addr); + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_frame_buff_set->send_irq_sof = %d\n" + "\t\t\tia_css_isys_frame_buff_set->send_irq_eof = %d\n" + "\t\t\tia_css_isys_frame_buff_set->send_resp_sof = %d\n" + "\t\t\tia_css_isys_frame_buff_set->send_resp_eof = %d\n" + , (int) next_frame->send_irq_sof + , (int) next_frame->send_irq_eof + , (int) next_frame->send_resp_sof + , (int) next_frame->send_resp_eof); + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "-------------------------------------------------------\n"); + return 0; +} + +/** + * print_isys_resp_info - formatted print function for + * struct ia_css_isys_frame_buff_set *next_frame variable + */ +int print_isys_resp_info(struct ia_css_isys_resp_info *received_response) +{ + verifret(received_response != NULL, EFAULT); + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ISYS_RESPONSE_INFO\n" + "-------------------------------------------------------\n"); + switch (received_response->type) { + case IA_CSS_ISYS_RESP_TYPE_STREAM_OPEN_DONE: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_OPEN_DONE\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_START_ACK: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_START_ACK\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_STOP_ACK: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_STOP_ACK\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_FLUSH_ACK: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_FLUSH_ACK\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_CLOSE_ACK: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_CLOSE_ACK\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_PIN_DATA_READY: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_PIN_DATA_READY\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_PIN_DATA_WATERMARK: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_PIN_DATA_WATERMARK\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_FRAME_SOF: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_FRAME_SOF\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_FRAME_EOF: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_FRAME_EOF\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_PIN_DATA_SKIPPED: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_PIN_DATA_SKIPPED\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_SKIPPED: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_SKIPPED\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_FRAME_SOF_DISCARDED: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_FRAME_SOF_DISCARDED\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_FRAME_EOF_DISCARDED: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_FRAME_EOF_DISCARDED\n"); + break; + default: + IA_CSS_TRACE_0(ISYSAPI, ERROR, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = INVALID\n"); + break; + } + + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.type = %d\n" + "\t\t\tia_css_isys_resp_info.stream_handle = %d\n" + "\t\t\tia_css_isys_resp_info.time_stamp[0] = %d\n" + "\t\t\tia_css_isys_resp_info.time_stamp[1] = %d\n", + received_response->type, + received_response->stream_handle, + received_response->timestamp[0], + received_response->timestamp[1]); + IA_CSS_TRACE_7(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.error = %d\n" + "\t\t\tia_css_isys_resp_info.error_details = %d\n" + "\t\t\tia_css_isys_resp_info.pin.out_buf_id = %016llxu\n" + "\t\t\tia_css_isys_resp_info.pin.addr = %016llxu\n" + "\t\t\tia_css_isys_resp_info.pin_id = %d\n" + "\t\t\tia_css_isys_resp_info.frame_counter = %d\n," + "\t\t\tia_css_isys_resp_info.written_direct = %d\n", + received_response->error, + received_response->error_details, + (unsigned long long)received_response->pin.out_buf_id, + (unsigned long long)received_response->pin.addr, + received_response->pin_id, + received_response->frame_counter, + received_response->written_direct); + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "------------------------------------------------------\n"); + + return 0; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_public_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_public_trace.h new file mode 100644 index 0000000000000..5b6508058fd6e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_public_trace.h @@ -0,0 +1,55 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYS_PUBLIC_TRACE_H +#define __IA_CSS_ISYS_PUBLIC_TRACE_H + +#include "ia_css_isysapi_trace.h" + +#include "ia_css_isysapi_types.h" + +#include "ia_css_isysapi.h" + +#include "ia_css_isys_private.h" +/** + * print_handle_context - formatted print function for + * struct ia_css_isys_context *ctx variable + */ +int print_handle_context(struct ia_css_isys_context *ctx); + +/** + * print_device_config_data - formatted print function for + * struct ia_css_isys_device_cfg_data *config variable + */ +int print_device_config_data(const struct ia_css_isys_device_cfg_data *config); +/** + * print_stream_config_data - formatted print function for + * ia_css_isys_stream_cfg_data stream_cfg variable + */ +int print_stream_config_data( + const struct ia_css_isys_stream_cfg_data *stream_cfg); +/** + * print_isys_frame_buff_set - formatted print function for + * struct ia_css_isys_frame_buff_set *next_frame variable + */ +int print_isys_frame_buff_set( + const struct ia_css_isys_frame_buff_set *next_frame, + const unsigned int nof_output_pins); +/** + * print_isys_isys_resp_info - formatted print function for + * struct ia_css_isys_frame_buff_set *next_frame variable + */ +int print_isys_resp_info(struct ia_css_isys_resp_info *received_response); + +#endif /* __IA_CSS_ISYS_PUBLIC_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isysapi_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isysapi_trace.h new file mode 100644 index 0000000000000..c6b944f245b11 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isysapi_trace.h @@ -0,0 +1,79 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYSAPI_TRACE_H +#define __IA_CSS_ISYSAPI_TRACE_H + +#include "ia_css_trace.h" + +#define ISYSAPI_TRACE_LOG_LEVEL_OFF 0 +#define ISYSAPI_TRACE_LOG_LEVEL_NORMAL 1 +#define ISYSAPI_TRACE_LOG_LEVEL_DEBUG 2 + +/* ISYSAPI and all the submodules in ISYSAPI will have + * the default tracing level set to this level + */ +#define ISYSAPI_TRACE_CONFIG_DEFAULT ISYSAPI_TRACE_LOG_LEVEL_NORMAL + +/* In case ISYSAPI_TRACE_CONFIG is not defined, set it to default level */ +#if !defined(ISYSAPI_TRACE_CONFIG) + #define ISYSAPI_TRACE_CONFIG ISYSAPI_TRACE_CONFIG_DEFAULT +#endif + +/* ISYSAPI Module tracing backend is mapped to + * TUNIT tracing for target platforms + */ +#ifdef IA_CSS_TRACE_PLATFORM_CELL + #ifndef HRT_CSIM + #define ISYSAPI_TRACE_METHOD IA_CSS_TRACE_METHOD_TRACE + #else + #define ISYSAPI_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE + #endif +#else + #define ISYSAPI_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +#endif + +#if (defined(ISYSAPI_TRACE_CONFIG)) + /* TRACE_OFF */ + #if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_OFF + #define ISYSAPI_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_DISABLED + #define ISYSAPI_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_DISABLED + #define ISYSAPI_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_DISABLED + #define ISYSAPI_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_DISABLED + #define ISYSAPI_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_DISABLED + #define ISYSAPI_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED + /* TRACE_NORMAL */ + #elif ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_NORMAL + #define ISYSAPI_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_DISABLED + #define ISYSAPI_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED + /* TRACE_DEBUG */ + #elif ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + #define ISYSAPI_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No ISYSAPI_TRACE_CONFIG Tracing level defined" + #endif +#else + #error "ISYSAPI_TRACE_CONFIG not defined" +#endif + +#endif /* __IA_CSS_ISYSAPI_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/interface/ia_css_pkg_dir.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/interface/ia_css_pkg_dir.h new file mode 100644 index 0000000000000..a284d74bb4a67 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/interface/ia_css_pkg_dir.h @@ -0,0 +1,99 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_H +#define __IA_CSS_PKG_DIR_H + +#include "ia_css_pkg_dir_storage_class.h" +#include "ia_css_pkg_dir_types.h" +#include "type_support.h" + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +const ia_css_pkg_dir_entry_t *ia_css_pkg_dir_get_entry( + const ia_css_pkg_dir_t *pkg_dir, + uint32_t index +); + +/* User is expected to call the verify function manually, + * other functions do not call it internally + */ +IA_CSS_PKG_DIR_STORAGE_CLASS_H +int ia_css_pkg_dir_verify_header( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_get_num_entries( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_get_size_in_bytes( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +enum ia_css_pkg_dir_version ia_css_pkg_dir_get_version( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint16_t ia_css_pkg_dir_set_version( + ia_css_pkg_dir_entry_t *pkg_dir_header, + enum ia_css_pkg_dir_version version +); + + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_entry_get_address_lo( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_entry_get_address_hi( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_entry_get_size( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint16_t ia_css_pkg_dir_entry_get_version( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint8_t ia_css_pkg_dir_entry_get_type( + const ia_css_pkg_dir_entry_t *entry +); + +/* Get the address of the specified entry in the PKG_DIR + * Note: This function expects the complete PKG_DIR in the same memory space + * and the entries contains offsets and not addresses. + */ +IA_CSS_PKG_DIR_STORAGE_CLASS_H +void *ia_css_pkg_dir_get_entry_address( + const ia_css_pkg_dir_t *pkg_dir, + uint32_t index +); + +#ifdef __IA_CSS_PKG_DIR_INLINE__ + +#include "ia_css_pkg_dir_impl.h" + +#endif + +#endif /* __IA_CSS_PKG_DIR_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_iunit.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_iunit.h new file mode 100644 index 0000000000000..ad194b0389eb7 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_iunit.h @@ -0,0 +1,46 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_IUNIT_H +#define __IA_CSS_PKG_DIR_IUNIT_H + +/* In bootflow, pkg_dir only supports upto 16 entries in pkg_dir + * pkg_dir_header + Psys_server pg + Isys_server pg + 13 Client pg + */ + +enum { + IA_CSS_PKG_DIR_SIZE = 16, + IA_CSS_PKG_DIR_ENTRIES = IA_CSS_PKG_DIR_SIZE - 1 +}; + +#define IUNIT_MAX_CLIENT_PKG_ENTRIES 13 + +/* Example assignment of unique identifiers for the FW components + * This should match the identifiers in the manifest + */ +enum ia_css_pkg_dir_entry_type { + IA_CSS_PKG_DIR_HEADER = 0, + IA_CSS_PKG_DIR_PSYS_SERVER_PG, + IA_CSS_PKG_DIR_ISYS_SERVER_PG, + IA_CSS_PKG_DIR_CLIENT_PG +}; + +/* Fixed entries in the package directory */ +enum ia_css_pkg_dir_index { + IA_CSS_PKG_DIR_PSYS_INDEX = 0, + IA_CSS_PKG_DIR_ISYS_INDEX = 1, + IA_CSS_PKG_DIR_CLIENT_0 = 2 +}; + +#endif /* __IA_CSS_PKG_DIR_IUNIT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_storage_class.h new file mode 100644 index 0000000000000..cb64172151f92 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_storage_class.h @@ -0,0 +1,29 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_STORAGE_CLASS_H +#define __IA_CSS_PKG_DIR_STORAGE_CLASS_H + + +#include "storage_class.h" + +#ifndef __IA_CSS_PKG_DIR_INLINE__ +#define IA_CSS_PKG_DIR_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_PKG_DIR_STORAGE_CLASS_C +#else +#define IA_CSS_PKG_DIR_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_PKG_DIR_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_PKG_DIR_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_types.h new file mode 100644 index 0000000000000..b024b3da2f9e6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_types.h @@ -0,0 +1,41 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_TYPES_H +#define __IA_CSS_PKG_DIR_TYPES_H + +#include "type_support.h" + +struct ia_css_pkg_dir_entry { + uint32_t address[2]; + uint32_t size; + uint16_t version; + uint8_t type; + uint8_t unused; +}; + +typedef void ia_css_pkg_dir_t; +typedef struct ia_css_pkg_dir_entry ia_css_pkg_dir_entry_t; + +/* The version field of the pkg_dir header defines + * if entries contain offsets or pointers + */ +/* This is temporary, until all pkg_dirs use pointers */ +enum ia_css_pkg_dir_version { + IA_CSS_PKG_DIR_POINTER, + IA_CSS_PKG_DIR_OFFSET +}; + + +#endif /* __IA_CSS_PKG_DIR_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/pkg_dir.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/pkg_dir.mk new file mode 100644 index 0000000000000..32c8a68f3653c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/pkg_dir.mk @@ -0,0 +1,29 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is PKG DIR + +PKG_DIR_DIR = $${MODULES_DIR}/pkg_dir +PKG_DIR_INTERFACE = $(PKG_DIR_DIR)/interface +PKG_DIR_SOURCES = $(PKG_DIR_DIR)/src + +PKG_DIR_FILES = $(PKG_DIR_DIR)/src/ia_css_pkg_dir.c +PKG_DIR_CPPFLAGS = -I$(PKG_DIR_INTERFACE) +PKG_DIR_CPPFLAGS += -I$(PKG_DIR_SOURCES) +PKG_DIR_CPPFLAGS += -I$${MODULES_DIR}/../isp/kernels/io_ls/common +PKG_DIR_CPPFLAGS += -I$${MODULES_DIR}/fw_abi_common_types/ipu +PKG_DIR_CPPFLAGS += -I$${MODULES_DIR}/fw_abi_common_types/ipu/$(FW_ABI_IPU_TYPES_VERSION) + +PKG_DIR_CREATE_FILES = $(PKG_DIR_DIR)/src/ia_css_pkg_dir_create.c +PKG_DIR_UPDATE_FILES = $(PKG_DIR_DIR)/src/ia_css_pkg_dir_update.c diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/src/ia_css_pkg_dir.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/src/ia_css_pkg_dir.c new file mode 100644 index 0000000000000..348b56833e060 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/src/ia_css_pkg_dir.c @@ -0,0 +1,27 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifdef __IA_CSS_PKG_DIR_INLINE__ + +#include "storage_class.h" + +STORAGE_CLASS_INLINE int __ia_css_pkg_dir_avoid_warning_on_empty_file(void) +{ + return 0; +} + +#else +#include "ia_css_pkg_dir_impl.h" + +#endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/src/ia_css_pkg_dir_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/src/ia_css_pkg_dir_impl.h new file mode 100644 index 0000000000000..d5067d21398f9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/src/ia_css_pkg_dir_impl.h @@ -0,0 +1,201 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_IMPL_H +#define __IA_CSS_PKG_DIR_IMPL_H + +#include "ia_css_pkg_dir.h" +#include "ia_css_pkg_dir_int.h" +#include "error_support.h" +#include "type_support.h" +#include "assert_support.h" + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +const ia_css_pkg_dir_entry_t *ia_css_pkg_dir_get_entry( + const ia_css_pkg_dir_t *pkg_dir, + uint32_t index) +{ + DECLARE_ERRVAL + struct ia_css_pkg_dir_entry *pkg_dir_header = NULL; + + verifexitval(pkg_dir != NULL, EFAULT); + + pkg_dir_header = (struct ia_css_pkg_dir_entry *)pkg_dir; + + /* First entry of the structure is the header, skip that */ + index++; + verifexitval(index < pkg_dir_header->size, EFAULT); + +EXIT: + if (haserror(EFAULT)) { + return NULL; + } + return &(pkg_dir_header[index]); +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +int ia_css_pkg_dir_verify_header(const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + DECLARE_ERRVAL + verifexitval(pkg_dir_header != NULL, EFAULT); + +EXIT: + if (haserror(EFAULT)) { + return -1; + } + return ((pkg_dir_header->address[0] == PKG_DIR_MAGIC_VAL_0) + && (pkg_dir_header->address[1] == PKG_DIR_MAGIC_VAL_1)) ? + 0 : -1; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_get_num_entries( + const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + DECLARE_ERRVAL + uint32_t size = 0; + + verifexitval(pkg_dir_header != NULL, EFAULT); + size = pkg_dir_header->size; + verifexitval(size > 0, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return size - 1; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +enum ia_css_pkg_dir_version +ia_css_pkg_dir_get_version(const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + assert(pkg_dir_header != NULL); + return pkg_dir_header->version; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint16_t ia_css_pkg_dir_set_version(ia_css_pkg_dir_entry_t *pkg_dir_header, + enum ia_css_pkg_dir_version version) +{ + DECLARE_ERRVAL + + verifexitval(pkg_dir_header != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 1; + } + pkg_dir_header->version = version; + return 0; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_get_size_in_bytes( + const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + DECLARE_ERRVAL + + verifexitval(pkg_dir_header != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return sizeof(struct ia_css_pkg_dir_entry) * pkg_dir_header->size; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_entry_get_address_lo( + const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->address[0]; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_entry_get_address_hi( + const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->address[1]; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_entry_get_size(const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->size; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint16_t ia_css_pkg_dir_entry_get_version(const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->version; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint8_t ia_css_pkg_dir_entry_get_type(const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->type; +} + + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +void *ia_css_pkg_dir_get_entry_address(const ia_css_pkg_dir_t *pkg_dir, + uint32_t index) +{ + void *entry_blob = NULL; + const ia_css_pkg_dir_entry_t *pkg_dir_entry = + ia_css_pkg_dir_get_entry(pkg_dir, index-1); + + if ((pkg_dir_entry != NULL) && + (ia_css_pkg_dir_entry_get_size(pkg_dir_entry) > 0)) { + assert(ia_css_pkg_dir_entry_get_address_hi(pkg_dir_entry) == 0); + entry_blob = (void *)((char *)pkg_dir + + ia_css_pkg_dir_entry_get_address_lo(pkg_dir_entry)); + } + return entry_blob; +} + +#endif /* __IA_CSS_PKG_DIR_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/src/ia_css_pkg_dir_int.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/src/ia_css_pkg_dir_int.h new file mode 100644 index 0000000000000..203505fbee54e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/src/ia_css_pkg_dir_int.h @@ -0,0 +1,49 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_INT_H +#define __IA_CSS_PKG_DIR_INT_H + +/* + * Package Dir structure as specified in CSE FAS + * + * PKG DIR Header + * Qword 63:56 55 54:48 47:32 31:24 23:0 + * 0 "_IUPKDR_" + * 1 Rsvd Rsvd Type Version Rsvd Size + * + * Version: Version of the Structure + * Size: Size of the entire table (including header) in 16 byte chunks + * Type: Must be 0 for header + * + * Figure 13: PKG DIR Header + * + * + * PKG DIR Entry + * Qword 63:56 55 54:48 47:32 31:24 23:0 + * N Address/Offset + * N+1 Rsvd Rsvd Type Version Rsvd Size + * + * Version: Version # of the Component + * Size: Size of the component in bytes + * Type: Component Identifier + */ + +#define PKG_DIR_SIZE_BITS 24 +#define PKG_DIR_TYPE_BITS 7 + +#define PKG_DIR_MAGIC_VAL_1 (('_' << 24) | ('I' << 16) | ('U' << 8) | 'P') +#define PKG_DIR_MAGIC_VAL_0 (('K' << 24) | ('D' << 16) | ('R' << 8) | '_') + +#endif /* __IA_CSS_PKG_DIR_INT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/port_env_struct.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/port_env_struct.h new file mode 100644 index 0000000000000..4d39a4739a8b0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/port_env_struct.h @@ -0,0 +1,24 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PORT_ENV_STRUCT_H +#define __PORT_ENV_STRUCT_H + +struct port_env { + unsigned int mmid; + unsigned int ssid; + unsigned int mem_addr; +}; + +#endif /* __PORT_ENV_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/queue.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/queue.h new file mode 100644 index 0000000000000..b233ab3baf014 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/queue.h @@ -0,0 +1,40 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __QUEUE_H +#define __QUEUE_H + +#include "queue_struct.h" +#include "port_env_struct.h" + +/* + * SYS queues are created by the host + * SYS queues cannot be accessed through the queue interface + * To send data into a queue a send_port must be opened. + * To receive data from a queue, a recv_port must be opened. + */ + +/* return required buffer size for queue */ +unsigned int +sys_queue_buf_size(unsigned int size, unsigned int token_size); + +/* + * initialize a queue that can hold at least 'size' tokens of + * 'token_size' bytes. + */ +void +sys_queue_init(struct sys_queue *q, unsigned int size, + unsigned int token_size, struct sys_queue_res *res); + +#endif /* __QUEUE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/queue_struct.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/queue_struct.h new file mode 100644 index 0000000000000..ef48fcfded2b6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/queue_struct.h @@ -0,0 +1,47 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __QUEUE_STRUCT_H +#define __QUEUE_STRUCT_H + +/* queue description, shared between sender and receiver */ + +#include "type_support.h" + +#ifdef __VIED_CELL +typedef struct {uint32_t v[2]; } host_buffer_address_t; +#else +typedef uint64_t host_buffer_address_t; +#endif + +typedef uint32_t vied_buffer_address_t; + + +struct sys_queue { + host_buffer_address_t host_address; + vied_buffer_address_t vied_address; + unsigned int size; + unsigned int token_size; + unsigned int wr_reg; /* reg no in subsystem's regmem */ + unsigned int rd_reg; + unsigned int _align; +}; + +struct sys_queue_res { + host_buffer_address_t host_address; + vied_buffer_address_t vied_address; + unsigned int reg; +}; + +#endif /* __QUEUE_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/recv_port.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/recv_port.h new file mode 100644 index 0000000000000..cce253b266687 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/recv_port.h @@ -0,0 +1,34 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __RECV_PORT_H +#define __RECV_PORT_H + + +struct recv_port; +struct sys_queue; +struct port_env; + +void +recv_port_open(struct recv_port *p, const struct sys_queue *q, + const struct port_env *env); + +unsigned int +recv_port_available(const struct recv_port *p); + +unsigned int +recv_port_transfer(const struct recv_port *p, void *data); + + +#endif /* __RECV_PORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/recv_port_struct.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/recv_port_struct.h new file mode 100644 index 0000000000000..52ec563b13cf5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/recv_port_struct.h @@ -0,0 +1,32 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __RECV_PORT_STRUCT_H +#define __RECV_PORT_STRUCT_H + +#include "buffer_type.h" + +struct recv_port { + buffer_address buffer; /* address of buffer in DDR */ + unsigned int size; + unsigned int token_size; + unsigned int wr_reg; /* index of write pointer located in regmem */ + unsigned int rd_reg; /* index read pointer located in regmem */ + + unsigned int mmid; + unsigned int ssid; + unsigned int mem_addr; /* address of memory containing regmem */ +}; + +#endif /* __RECV_PORT_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/send_port.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/send_port.h new file mode 100644 index 0000000000000..04a160f3f0199 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/send_port.h @@ -0,0 +1,52 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __SEND_PORT_H +#define __SEND_PORT_H + + +/* + * A send port can be used to send tokens into a queue. + * The interface can be used on any type of processor (host, SP, ...) + */ + +struct send_port; +struct sys_queue; +struct port_env; + +/* + * Open a send port on a queue. After the port is opened, tokens can be sent + */ +void +send_port_open(struct send_port *p, const struct sys_queue *q, + const struct port_env *env); + +/* + * Determine how many tokens can be sent + */ +unsigned int +send_port_available(const struct send_port *p); + +/* + * Send a token via a send port. The function returns the number of + * tokens that have been sent: + * 1: the token was accepted + * 0: the token was not accepted (full queue) + * The size of a token is determined at initialization. + */ +unsigned int +send_port_transfer(const struct send_port *p, const void *data); + + +#endif /* __SEND_PORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/send_port_struct.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/send_port_struct.h new file mode 100644 index 0000000000000..f834c62bc3db6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/send_port_struct.h @@ -0,0 +1,32 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __SEND_PORT_STRUCT_H +#define __SEND_PORT_STRUCT_H + +#include "buffer_type.h" + +struct send_port { + buffer_address buffer; + unsigned int size; + unsigned int token_size; + unsigned int wr_reg; /* index of write pointer in regmem */ + unsigned int rd_reg; /* index of read pointer in regmem */ + + unsigned int mmid; + unsigned int ssid; + unsigned int mem_addr; +}; + +#endif /* __SEND_PORT_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/port.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/port.mk new file mode 100644 index 0000000000000..b3801247802e9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/port.mk @@ -0,0 +1,31 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is PORT + +PORT_DIR=$${MODULES_DIR}/port + +PORT_INTERFACE=$(PORT_DIR)/interface +PORT_SOURCES1=$(PORT_DIR)/src + +PORT_HOST_FILES += $(PORT_SOURCES1)/send_port.c +PORT_HOST_FILES += $(PORT_SOURCES1)/recv_port.c +PORT_HOST_FILES += $(PORT_SOURCES1)/queue.c + +PORT_HOST_CPPFLAGS += -I$(PORT_INTERFACE) + +PORT_FW_FILES += $(PORT_SOURCES1)/send_port.c +PORT_FW_FILES += $(PORT_SOURCES1)/recv_port.c + +PORT_FW_CPPFLAGS += -I$(PORT_INTERFACE) diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/src/queue.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/src/queue.c new file mode 100644 index 0000000000000..eeec99dfe2d0d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/src/queue.c @@ -0,0 +1,47 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "queue.h" + +#include "regmem_access.h" +#include "port_env_struct.h" + +unsigned int sys_queue_buf_size(unsigned int size, unsigned int token_size) +{ + return (size + 1) * token_size; +} + +void +sys_queue_init(struct sys_queue *q, unsigned int size, unsigned int token_size, + struct sys_queue_res *res) +{ + unsigned int buf_size; + + q->size = size + 1; + q->token_size = token_size; + buf_size = sys_queue_buf_size(size, token_size); + + /* acquire the shared buffer space */ + q->host_address = res->host_address; + res->host_address += buf_size; + q->vied_address = res->vied_address; + res->vied_address += buf_size; + + /* acquire the shared read and writer pointers */ + q->wr_reg = res->reg; + res->reg++; + q->rd_reg = res->reg; + res->reg++; + +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/src/recv_port.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/src/recv_port.c new file mode 100644 index 0000000000000..31b36e9ceafbb --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/src/recv_port.c @@ -0,0 +1,95 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "recv_port.h" +#include "port_env_struct.h" /* for port_env */ +#include "queue_struct.h" /* for sys_queue */ +#include "recv_port_struct.h" /* for recv_port */ +#include "buffer_access.h" /* for buffer_load, buffer_address */ +#include "regmem_access.h" /* for regmem_load_32, regmem_store_32 */ +#include "storage_class.h" /* for STORAGE_CLASS_INLINE */ +#include "math_support.h" /* for OP_std_modadd */ +#include "type_support.h" /* for HOST_ADDRESS */ + +#ifndef __VIED_CELL +#include "cpu_mem_support.h" /* for ia_css_cpu_mem_cache_invalidate */ +#endif + +void +recv_port_open(struct recv_port *p, const struct sys_queue *q, + const struct port_env *env) +{ + p->mmid = env->mmid; + p->ssid = env->ssid; + p->mem_addr = env->mem_addr; + + p->size = q->size; + p->token_size = q->token_size; + p->wr_reg = q->wr_reg; + p->rd_reg = q->rd_reg; + +#ifdef __VIED_CELL + p->buffer = q->vied_address; +#else + p->buffer = q->host_address; +#endif +} + +STORAGE_CLASS_INLINE unsigned int +recv_port_index(const struct recv_port *p, unsigned int i) +{ + unsigned int rd = regmem_load_32(p->mem_addr, p->rd_reg, p->ssid); + + return OP_std_modadd(rd, i, p->size); +} + +unsigned int +recv_port_available(const struct recv_port *p) +{ + int wr = (int)regmem_load_32(p->mem_addr, p->wr_reg, p->ssid); + int rd = (int)regmem_load_32(p->mem_addr, p->rd_reg, p->ssid); + + return OP_std_modadd(wr, -rd, p->size); +} + +STORAGE_CLASS_INLINE void +recv_port_copy(const struct recv_port *p, unsigned int i, void *data) +{ + unsigned int rd = recv_port_index(p, i); + unsigned int token_size = p->token_size; + buffer_address addr = p->buffer + (rd * token_size); +#ifndef __VIED_CELL + ia_css_cpu_mem_cache_invalidate((void *)HOST_ADDRESS(p->buffer), + token_size*p->size); +#endif + buffer_load(addr, data, token_size, p->mmid); +} + +STORAGE_CLASS_INLINE void +recv_port_release(const struct recv_port *p, unsigned int i) +{ + unsigned int rd = recv_port_index(p, i); + + regmem_store_32(p->mem_addr, p->rd_reg, rd, p->ssid); +} + +unsigned int +recv_port_transfer(const struct recv_port *p, void *data) +{ + if (!recv_port_available(p)) + return 0; + recv_port_copy(p, 0, data); + recv_port_release(p, 1); + return 1; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/src/send_port.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/src/send_port.c new file mode 100644 index 0000000000000..8d1fba08c5d58 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/src/send_port.c @@ -0,0 +1,94 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "send_port.h" +#include "queue_struct.h" /* for sys_queue */ +#include "send_port_struct.h" /* for send_port */ +#include "port_env_struct.h" /* for port_env */ +#include "regmem_access.h" /* for regmem_load_32, regmem_store_32 */ +#include "buffer_access.h" /* for buffer_store, buffer_address */ +#include "storage_class.h" /* for STORAGE_CLASS_INLINE */ +#include "math_support.h" /* for OP_std_modadd */ +#include "type_support.h" /* for HOST_ADDRESS */ + +#ifndef __VIED_CELL +#include "cpu_mem_support.h" /* for ia_css_cpu_mem_cache_flush */ +#endif + +void +send_port_open(struct send_port *p, const struct sys_queue *q, + const struct port_env *env) +{ + p->mmid = env->mmid; + p->ssid = env->ssid; + p->mem_addr = env->mem_addr; + + p->size = q->size; + p->token_size = q->token_size; + p->wr_reg = q->wr_reg; + p->rd_reg = q->rd_reg; +#ifdef __VIED_CELL + p->buffer = q->vied_address; +#else + p->buffer = q->host_address; +#endif +} + +STORAGE_CLASS_INLINE unsigned int +send_port_index(const struct send_port *p, unsigned int i) +{ + unsigned int wr = regmem_load_32(p->mem_addr, p->wr_reg, p->ssid); + + return OP_std_modadd(wr, i, p->size); +} + +unsigned int +send_port_available(const struct send_port *p) +{ + int rd = (int)regmem_load_32(p->mem_addr, p->rd_reg, p->ssid); + int wr = (int)regmem_load_32(p->mem_addr, p->wr_reg, p->ssid); + + return OP_std_modadd(rd, -(wr+1), p->size); +} + +STORAGE_CLASS_INLINE void +send_port_copy(const struct send_port *p, unsigned int i, const void *data) +{ + unsigned int wr = send_port_index(p, i); + unsigned int token_size = p->token_size; + buffer_address addr = p->buffer + (wr * token_size); + + buffer_store(addr, data, token_size, p->mmid); +#ifndef __VIED_CELL + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(addr), token_size); +#endif +} + +STORAGE_CLASS_INLINE void +send_port_release(const struct send_port *p, unsigned int i) +{ + unsigned int wr = send_port_index(p, i); + + regmem_store_32(p->mem_addr, p->wr_reg, wr, p->ssid); +} + +unsigned int +send_port_transfer(const struct send_port *p, const void *data) +{ + if (!send_port_available(p)) + return 0; + send_port_copy(p, 0, data); + send_port_release(p, 1); + return 1; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/reg_dump/src/isys/cnlB0_gen_reg_dump/ia_css_debug_dump.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/reg_dump/src/isys/cnlB0_gen_reg_dump/ia_css_debug_dump.c new file mode 100644 index 0000000000000..c51d65c8cb647 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/reg_dump/src/isys/cnlB0_gen_reg_dump/ia_css_debug_dump.c @@ -0,0 +1,15 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +#include "ia_css_debug_dump.h" + void ia_css_debug_dump(void) {} \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/reg_dump/src/isys/cnlB0_gen_reg_dump/ia_css_debug_dump.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/reg_dump/src/isys/cnlB0_gen_reg_dump/ia_css_debug_dump.h new file mode 100644 index 0000000000000..5dd23ddbd180b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/reg_dump/src/isys/cnlB0_gen_reg_dump/ia_css_debug_dump.h @@ -0,0 +1,17 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +#ifndef __IA_CSS_DEBUG_DUMP_H_ + #define __IA_CSS_DEBUG_DUMP_H_ + void ia_css_debug_dump(void); + #endif /* __IA_CSS_DEBUG_DUMP_H_ */ \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/reg_dump/src/reg_dump_generic_bridge.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/reg_dump/src/reg_dump_generic_bridge.c new file mode 100644 index 0000000000000..9b9161ae78cf2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/reg_dump/src/reg_dump_generic_bridge.c @@ -0,0 +1,39 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include +#include "ia_css_trace.h" +#ifdef USE_LOGICAL_SSIDS +/* + Logical names can be used to define the SSID + In order to resolve these names the following include file should be provided + and the define above should be enabled +*/ +#include +#endif + +#define REG_DUMP_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +#define REG_DUMP_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_ENABLED + +/* SSID value is defined in test makefiles as either isys0 or psys0 */ +#define REG_DUMP_READ_REGISTER(addr) vied_subsystem_load_32(SSID, addr) + +#define REG_DUMP_PRINT_0(...) \ +EXPAND_VA_ARGS(IA_CSS_TRACE_0(REG_DUMP, VERBOSE, __VA_ARGS__)) +#define REG_DUMP_PRINT_1(...) \ +EXPAND_VA_ARGS(IA_CSS_TRACE_1(REG_DUMP, VERBOSE, __VA_ARGS__)) +#define EXPAND_VA_ARGS(x) x + +/* Including generated source code for reg_dump */ +#include "ia_css_debug_dump.c" diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/regmem/interface/regmem_access.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/regmem/interface/regmem_access.h new file mode 100644 index 0000000000000..d4576af936f6d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/regmem/interface/regmem_access.h @@ -0,0 +1,67 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __REGMEM_ACCESS_H +#define __REGMEM_ACCESS_H + +#include "storage_class.h" + +enum regmem_id { + /* pass pkg_dir address to SPC in non-secure mode */ + PKG_DIR_ADDR_REG = 0, + /* pass syscom configuration to SPC */ + SYSCOM_CONFIG_REG = 1, + /* syscom state - modified by SP */ + SYSCOM_STATE_REG = 2, + /* syscom commands - modified by the host */ + SYSCOM_COMMAND_REG = 3, + /* Store interrupt status - updated by SP */ + SYSCOM_IRQ_REG = 4, + /* Store VTL0_ADDR_MASK in trusted secure regision - provided by host.*/ + SYSCOM_VTL0_ADDR_MASK = 5, +#if HAS_DUAL_CMD_CTX_SUPPORT + /* Initialized if trustlet exists - updated by host */ + TRUSTLET_STATUS = 6, + /* identify if SPC access blocker programming is completed - updated by SP */ + AB_SPC_STATUS = 7, + /* first syscom queue pointer register */ + SYSCOM_QPR_BASE_REG = 8 +#else + /* first syscom queue pointer register */ + SYSCOM_QPR_BASE_REG = 6 +#endif +}; + +#if HAS_DUAL_CMD_CTX_SUPPORT +/* Bit 0: for untrusted non-secure DRV driver on VTL0 + * Bit 1: for trusted secure TEE driver on VTL1 + */ +#define SYSCOM_IRQ_VTL0_MASK 0x1 +#define SYSCOM_IRQ_VTL1_MASK 0x2 +#endif + +STORAGE_CLASS_INLINE unsigned int +regmem_load_32(unsigned int mem_address, unsigned int reg, unsigned int ssid); + +STORAGE_CLASS_INLINE void +regmem_store_32(unsigned int mem_address, unsigned int reg, unsigned int value, + unsigned int ssid); + +#ifdef __VIED_CELL +#include "regmem_access_cell.h" +#else +#include "regmem_access_host.h" +#endif + +#endif /* __REGMEM_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/regmem/regmem.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/regmem/regmem.mk new file mode 100644 index 0000000000000..24ebc1c325d8e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/regmem/regmem.mk @@ -0,0 +1,32 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +ifndef REGMEM_MK +REGMEM_MK=1 + +# MODULE is REGMEM + +REGMEM_DIR=$${MODULES_DIR}/regmem + +REGMEM_INTERFACE=$(REGMEM_DIR)/interface +REGMEM_SOURCES=$(REGMEM_DIR)/src + +REGMEM_HOST_FILES = +REGMEM_FW_FILES = $(REGMEM_SOURCES)/regmem.c + +REGMEM_CPPFLAGS = -I$(REGMEM_INTERFACE) -I$(REGMEM_SOURCES) +REGMEM_HOST_CPPFLAGS = $(REGMEM_CPPFLAGS) +REGMEM_FW_CPPFLAGS = $(REGMEM_CPPFLAGS) + +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/regmem/src/regmem_access_host.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/regmem/src/regmem_access_host.h new file mode 100644 index 0000000000000..8878d7074fabb --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/regmem/src/regmem_access_host.h @@ -0,0 +1,41 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __REGMEM_ACCESS_HOST_H +#define __REGMEM_ACCESS_HOST_H + +#include "regmem_access.h" /* implemented interface */ + +#include "storage_class.h" +#include "regmem_const.h" +#include +#include "ia_css_cmem.h" + +STORAGE_CLASS_INLINE unsigned int +regmem_load_32(unsigned int mem_addr, unsigned int reg, unsigned int ssid) +{ + /* No need to add REGMEM_OFFSET, it is already included in mem_addr. */ + return ia_css_cmem_load_32(ssid, mem_addr + (REGMEM_WORD_BYTES*reg)); +} + +STORAGE_CLASS_INLINE void +regmem_store_32(unsigned int mem_addr, unsigned int reg, + unsigned int value, unsigned int ssid) +{ + /* No need to add REGMEM_OFFSET, it is already included in mem_addr. */ + ia_css_cmem_store_32(ssid, mem_addr + (REGMEM_WORD_BYTES*reg), + value); +} + +#endif /* __REGMEM_ACCESS_HOST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/regmem/src/regmem_const.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/regmem/src/regmem_const.h new file mode 100644 index 0000000000000..ac7e3a98a434f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/regmem/src/regmem_const.h @@ -0,0 +1,28 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __REGMEM_CONST_H +#define __REGMEM_CONST_H + +#ifndef REGMEM_SIZE +#define REGMEM_SIZE (16) +#endif /* REGMEM_SIZE */ +#ifndef REGMEM_OFFSET +#define REGMEM_OFFSET (0) +#endif /* REGMEM_OFFSET */ +#ifndef REGMEM_WORD_BYTES +#define REGMEM_WORD_BYTES (4) +#endif + +#endif /* __REGMEM_CONST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/assert_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/assert_support.h new file mode 100644 index 0000000000000..f904a494b53c9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/assert_support.h @@ -0,0 +1,197 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __ASSERT_SUPPORT_H +#define __ASSERT_SUPPORT_H + +/* This file provides support for run-time assertions + * and compile-time assertions. + * + * Run-time asstions are provided via the following syntax: + * assert(condition) + * Run-time assertions are disabled using the NDEBUG flag. + * + * Compile time assertions are provided via the following syntax: + * COMPILATION_ERROR_IF(condition); + * A compile-time assertion will fail to compile if the condition is false. + * The condition must be constant, such that it can be evaluated + * at compile time. + * + * OP___assert is deprecated. + */ + +#define IA_CSS_ASSERT(expr) assert(expr) + +#ifdef __KLOCWORK__ +/* Klocwork does not see that assert will lead to abortion + * as there is no good way to tell this to KW and the code + * should not depend on assert to function (actually the assert + * could be disabled in a release build) it was decided to + * disable the assert for KW scans (by defining NDEBUG) + */ +#define NDEBUG +#endif /* __KLOCWORK__ */ + +/** + * The following macro can help to test the size of a struct at compile + * time rather than at run-time. It does not work for all compilers; see + * below. + * + * Depending on the value of 'condition', the following macro is expanded to: + * - condition==true: + * an expression containing an array declaration with negative size, + * usually resulting in a compilation error + * - condition==false: + * (void) 1; // C statement with no effect + * + * example: + * COMPILATION_ERROR_IF( sizeof(struct host_sp_queues) != + * SIZE_OF_HOST_SP_QUEUES_STRUCT); + * + * verify that the macro indeed triggers a compilation error with your compiler: + * COMPILATION_ERROR_IF( sizeof(struct host_sp_queues) != + * (sizeof(struct host_sp_queues)+1) ); + * + * Not all compilers will trigger an error with this macro; + * use a search engine to search for BUILD_BUG_ON to find other methods. + */ +#define COMPILATION_ERROR_IF(condition) \ +((void)sizeof(char[1 - 2*!!(condition)])) + +/* Compile time assertion */ +#ifndef CT_ASSERT +#define CT_ASSERT(cnd) ((void)sizeof(char[(cnd)?1 : -1])) +#endif /* CT_ASSERT */ + +#ifdef NDEBUG + +#define assert(cnd) ((void)0) + +#else + +#include "storage_class.h" + +#if defined(_MSC_VER) +#ifdef _KERNEL_MODE +/* Windows kernel mode compilation */ +#include +#define assert(cnd) ASSERT(cnd) +#else +/* Windows usermode compilation */ +#include +#endif + +#elif defined(__HIVECC) + +/* + * target: assert disabled + * sched: assert enabled only when SCHED_DEBUG is defined + * unsched: assert enabled + */ +#if defined(HRT_HW) +#define assert(cnd) ((void)0) +#elif defined(HRT_SCHED) && !defined(DEBUG_SCHED) +#define assert(cnd) ((void)0) +#elif defined(PIPE_GENERATION) +#define assert(cnd) ((void)0) +#else +#include +#define assert(cnd) OP___csim_assert(cnd) +#endif + +#elif defined(__KERNEL__) +#include + +#ifndef KERNEL_ASSERT_TO_BUG +#ifndef KERNEL_ASSERT_TO_BUG_ON +#ifndef KERNEL_ASSERT_TO_WARN_ON +#ifndef KERNEL_ASSERT_TO_WARN_ON_INF_LOOP +#ifndef KERNEL_ASSERT_UNDEFINED +/* Default */ +#define KERNEL_ASSERT_TO_BUG +#endif /*KERNEL_ASSERT_UNDEFINED*/ +#endif /*KERNEL_ASSERT_TO_WARN_ON_INF_LOOP*/ +#endif /*KERNEL_ASSERT_TO_WARN_ON*/ +#endif /*KERNEL_ASSERT_TO_BUG_ON*/ +#endif /*KERNEL_ASSERT_TO_BUG*/ + +#ifdef KERNEL_ASSERT_TO_BUG +/* TODO: it would be cleaner to use this: + * #define assert(cnd) BUG_ON(cnd) + * but that causes many compiler warnings (==errors) under Android + * because it seems that the BUG_ON() macro is not seen as a check by + * gcc like the BUG() macro is. */ +#define assert(cnd) \ + do { \ + if (!(cnd)) { \ + BUG(); \ + } \ + } while (0) +#endif /*KERNEL_ASSERT_TO_BUG*/ + +#ifdef KERNEL_ASSERT_TO_BUG_ON +#define assert(cnd) BUG_ON(!(cnd)) +#endif /*KERNEL_ASSERT_TO_BUG_ON*/ + +#ifdef KERNEL_ASSERT_TO_WARN_ON +#define assert(cnd) WARN_ON(!(cnd)) +#endif /*KERNEL_ASSERT_TO_WARN_ON*/ + +#ifdef KERNEL_ASSERT_TO_WARN_ON_INF_LOOP +#define assert(cnd) \ + do { \ + int not_cnd = !(cnd); \ + WARN_ON(not_cnd); \ + if (not_cnd) { \ + for (;;) { \ + } \ + } \ + } while (0) +#endif /*KERNEL_ASSERT_TO_WARN_ON_INF_LOOP*/ + +#ifdef KERNEL_ASSERT_UNDEFINED +#include KERNEL_ASSERT_DEFINITION_FILESTRING +#endif /*KERNEL_ASSERT_UNDEFINED*/ + +#elif defined(__FIST__) || defined(__GNUC__) + +#include "assert.h" + +#else /* default is for unknown environments */ +#define assert(cnd) ((void)0) +#endif + +#endif /* NDEBUG */ + +#ifndef PIPE_GENERATION +/* Deprecated OP___assert, this is still used in ~1000 places + * in the code. This will be removed over time. + * The implementation for the pipe generation tool is in see support.isp.h */ +#define OP___assert(cnd) assert(cnd) + +#ifdef C_RUN +#define compile_time_assert(cond) OP___assert(cond) +#else +#include "storage_class.h" +extern void _compile_time_assert(void); +STORAGE_CLASS_INLINE void compile_time_assert(unsigned cond) +{ + /* Call undefined function if cond is false */ + if (!cond) + _compile_time_assert(); +} +#endif +#endif /* PIPE_GENERATION */ + +#endif /* __ASSERT_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/cpu_mem_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/cpu_mem_support.h new file mode 100644 index 0000000000000..fa349cac4b24a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/cpu_mem_support.h @@ -0,0 +1,233 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __CPU_MEM_SUPPORT_H +#define __CPU_MEM_SUPPORT_H + +#include "storage_class.h" +#include "assert_support.h" +#include "type_support.h" + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_copy(void *dst, const void *src, unsigned int size) +{ + /* memcpy cannot be used in in Windows (function is not allowed), + * and the safer function memcpy_s is not available on other platforms. + * Because usage of ia_css_cpu_mem_copy is minimal, we implement it here in an easy, + * but sub-optimal way. + */ + unsigned int i; + + assert(dst != NULL && src != NULL); + + if (!(dst != NULL && src != NULL)) { + return NULL; + } + for (i = 0; i < size; i++) { + ((char *)dst)[i] = ((char *)src)[i]; + } + return dst; +} + +#if defined(__KERNEL__) + +#include +#include +#include +#include + +/* TODO: remove, workaround for issue in hrt file ibuf_ctrl_2600_config.c + * error checking code added to SDK that uses calls to exit function + */ +#define exit(a) return + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc(unsigned int size) +{ + return kmalloc(size, GFP_KERNEL); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc_page_aligned(unsigned int size) +{ + return ia_css_cpu_mem_alloc(size); /* todo: align to page size */ +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_protect(void *ptr, unsigned int size, int prot) +{ + /* nothing here yet */ +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_set_zero(void *dst, unsigned int size) +{ + return memset(dst, 0, size); /* available in kernel in linux/string.h */ +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_free(void *ptr) +{ + kfree(ptr); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_flush(void *ptr, unsigned int size) +{ + /* parameter check here */ + if (ptr == NULL) + return; + + clflush_cache_range(ptr, size); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_invalidate(void *ptr, unsigned int size) +{ + /* for now same as flush */ + ia_css_cpu_mem_cache_flush(ptr, size); +} + +#elif defined(_MSC_VER) + +#include +#include +#include + +extern void *hrt_malloc(size_t bytes, int zero_mem); +extern void *hrt_free(void *ptr); +extern void hrt_mem_cache_flush(void *ptr, unsigned int size); +extern void hrt_mem_cache_invalidate(void *ptr, unsigned int size); + +#define malloc(a) hrt_malloc(a, 1) +#define free(a) hrt_free(a) + +#define CSS_PAGE_SIZE (1<<12) + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc(unsigned int size) +{ + return malloc(size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc_page_aligned(unsigned int size) +{ + unsigned int buffer_size = size; + + /* Currently hrt_malloc calls Windows ExAllocatePoolWithTag() routine + * to request system memory. If the number of bytes is equal or bigger + * than the page size, then the returned address is page aligned, + * but if it's smaller it's not necessarily page-aligned We agreed + * with Windows team that we allocate a full page + * if it's less than page size + */ + if (buffer_size < CSS_PAGE_SIZE) + buffer_size = CSS_PAGE_SIZE; + + return ia_css_cpu_mem_alloc(buffer_size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_set_zero(void *dst, unsigned int size) +{ + return memset(dst, 0, size); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_free(void *ptr) +{ + free(ptr); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_flush(void *ptr, unsigned int size) +{ +#ifdef _KERNEL_MODE + hrt_mem_cache_flush(ptr, size); +#else + (void)ptr; + (void)size; +#endif +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_invalidate(void *ptr, unsigned int size) +{ +#ifdef _KERNEL_MODE + hrt_mem_cache_invalidate(ptr, size); +#else + (void)ptr; + (void)size; +#endif +} + +#else + +#include +#include +#include +/* Needed for the MPROTECT */ +#include +#include +#include +#include + + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc(unsigned int size) +{ + return malloc(size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc_page_aligned(unsigned int size) +{ + int pagesize; + + pagesize = sysconf(_SC_PAGE_SIZE); + return memalign(pagesize, size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_set_zero(void *dst, unsigned int size) +{ + return memset(dst, 0, size); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_free(void *ptr) +{ + free(ptr); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_flush(void *ptr, unsigned int size) +{ + /* not needed in simulation */ + (void)ptr; + (void)size; +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_invalidate(void *ptr, unsigned int size) +{ + /* not needed in simulation */ + (void)ptr; + (void)size; +} + +#endif + +#endif /* __CPU_MEM_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/error_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/error_support.h new file mode 100644 index 0000000000000..9fe1f65125e6c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/error_support.h @@ -0,0 +1,110 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __ERROR_SUPPORT_H +#define __ERROR_SUPPORT_H + +#if defined(__KERNEL__) +#include +#else +#include +#endif +#include + +/* OS-independent definition of IA_CSS errno values */ +/* #define IA_CSS_EINVAL 1 */ +/* #define IA_CSS_EFAULT 2 */ + +#ifdef __HIVECC +#define ERR_EMBEDDED 1 +#else +#define ERR_EMBEDDED 0 +#endif + +#if ERR_EMBEDDED +#define DECLARE_ERRVAL +#else +#define DECLARE_ERRVAL \ + int _errval = 0; +#endif + +/* Use "owl" in while to prevent compiler warnings in Windows */ +#define ALWAYS_FALSE ((void)0, 0) + +#define verifret(cond, error_type) \ +do { \ + if (!(cond)) { \ + return error_type; \ + } \ +} while (ALWAYS_FALSE) + +#define verifjmp(cond, error_tag) \ +do { \ + if (!(cond)) { \ + goto error_tag; \ + } \ +} while (ALWAYS_FALSE) + +#define verifexit(cond) \ +do { \ + if (!(cond)) { \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) + +#if ERR_EMBEDDED +#define verifexitval(cond, error_tag) \ +do { \ + assert(cond); \ +} while (ALWAYS_FALSE) +#else +#define verifexitval(cond, error_tag) \ +do { \ + if (!(cond)) { \ + _errval = (error_tag); \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) +#endif + +#if ERR_EMBEDDED +#define haserror(error_tag) (0) +#else +#define haserror(error_tag) \ + (_errval == (error_tag)) +#endif + +#if ERR_EMBEDDED +#define noerror() (1) +#else +#define noerror() \ + (_errval == 0) +#endif + +#define verifjmpexit(cond) \ +do { \ + if (!(cond)) { \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) + +#define verifjmpexitsetretval(cond, retval) \ +do { \ + if (!(cond)) { \ + retval = -1; \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) + +#endif /* __ERROR_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/math_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/math_support.h new file mode 100644 index 0000000000000..633f86f1a1b09 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/math_support.h @@ -0,0 +1,314 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __MATH_SUPPORT_H +#define __MATH_SUPPORT_H + +#include "storage_class.h" /* for STORAGE_CLASS_INLINE */ +#include "type_support.h" +#include "assert_support.h" + +/* in case we have min/max/MIN/MAX macro's undefine them */ +#ifdef min +#undef min +#endif +#ifdef max +#undef max +#endif +#ifdef MIN /* also defined in include/hrt/numeric.h from SDK */ +#undef MIN +#endif +#ifdef MAX +#undef MAX +#endif + +#ifndef UINT16_MAX +#define UINT16_MAX (0xffffUL) +#endif + +#ifndef UINT32_MAX +#define UINT32_MAX (0xffffffffUL) +#endif + +#define IS_ODD(a) ((a) & 0x1) +#define IS_EVEN(a) (!IS_ODD(a)) +#define IS_POWER2(a) (!((a)&((a)-1))) +#define IS_MASK_BITS_SET(a, b) ((a & b) != 0) + +/*To Find next power of 2 number from x */ +#define bit2(x) ((x) | ((x) >> 1)) +#define bit4(x) (bit2(x) | (bit2(x) >> 2)) +#define bit8(x) (bit4(x) | (bit4(x) >> 4)) +#define bit16(x) (bit8(x) | (bit8(x) >> 8)) +#define bit32(x) (bit16(x) | (bit16(x) >> 16)) +#define NEXT_POWER_OF_2(x) (bit32(x-1) + 1) + +/* force a value to a lower even value */ +#define EVEN_FLOOR(x) ((x) & ~1UL) + +/* A => B */ +#define IMPLIES(a, b) (!(a) || (b)) + +/* The ORIG_BITS th bit is the sign bit */ +/* Sign extends a ORIG_BITS bits long signed number to a 64-bit signed number */ +/* By type casting it can relimited to any valid type-size + * (32-bit signed or 16-bit or 8-bit) + */ +/* By masking it can be transformed to any arbitrary bit size */ +#define SIGN_EXTEND(VAL, ORIG_BITS) \ +((~(((VAL)&(1ULL<<((ORIG_BITS)-1)))-1))|(VAL)) + +#define EXTRACT_BIT(a, b) ((a >> b) & 1) + +/* for preprocessor and array sizing use MIN and MAX + otherwise use min and max */ +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#define CLIP(a, b, c) MIN((MAX((a), (b))), (c)) +/* Integer round-down division of a with b */ +#define FLOOR_DIV(a, b) ((b) ? ((a) / (b)) : 0) +/* Align a to the lower multiple of b */ +#define FLOOR_MUL(a, b) (FLOOR_DIV(a, b) * (b)) +/* Integer round-up division of a with b */ +#define CEIL_DIV(a, b) ((b) ? (((a) + (b) - 1) / (b)) : 0) +/* Align a to the upper multiple of b */ +#define CEIL_MUL(a, b) (CEIL_DIV(a, b) * (b)) +/* Align a to the upper multiple of b - fast implementation + * for cases when b=pow(2,n) + */ +#define CEIL_MUL2(a, b) (((a) + (b) - 1) & ~((b) - 1)) +/* integer round-up division of a with pow(2,b) */ +#define CEIL_SHIFT(a, b) (((a) + (1UL << (b)) - 1) >> (b)) +/* Align a to the upper multiple of pow(2,b) */ +#define CEIL_SHIFT_MUL(a, b) (CEIL_SHIFT(a, b) << (b)) +/* Absolute difference of a and b */ +#define ABS_DIF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a))) +#define ABS(a) ABS_DIF(a, 0) +/* Square of x */ +#define SQR(x) ((x)*(x)) +/* Integer round-half-down division of a nad b */ +#define ROUND_HALF_DOWN_DIV(a, b) ((b) ? ((a) + (b / 2) - 1) / (b) : 0) +/* Align a to the round-half-down multiple of b */ +#define ROUND_HALF_DOWN_MUL(a, b) (ROUND_HALF_DOWN_DIV(a, b) * (b)) + +#define MAX3(a, b, c) MAX((a), MAX((b), (c))) +#define MIN3(a, b, c) MIN((a), MIN((b), (c))) +#define MAX4(a, b, c, d) MAX((MAX((a), (b))), (MAX((c), (d)))) +#define MIN4(a, b, c, d) MIN((MIN((a), (b))), (MIN((c), (d)))) + +/* min and max should not be macros as they will evaluate their arguments twice. + if you really need a macro (e.g. for CPP or for initializing an array) + use MIN() and MAX(), otherwise use min() and max() */ + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(a) ((sizeof(a) / sizeof(*(a)))) +#endif + +#ifndef BYTES +#define BYTES(bit) (((bit)+7)/8) +#endif + +#if !defined(PIPE_GENERATION) +STORAGE_CLASS_INLINE unsigned int max_value_bits(unsigned int bits) +{ + return (bits == 0) ? 0 : ((2 * ((1 << ((bits) - 1)) - 1)) + 1); +} +STORAGE_CLASS_INLINE unsigned int max_value_bytes(unsigned int bytes) +{ + return max_value_bits(IA_CSS_UINT8_T_BITS * bytes); +} +STORAGE_CLASS_INLINE int max(int a, int b) +{ + return MAX(a, b); +} + +STORAGE_CLASS_INLINE int min(int a, int b) +{ + return MIN(a, b); +} + +STORAGE_CLASS_INLINE int clip(int a, int b, int c) +{ + return min(max(a, b), c); +} + +STORAGE_CLASS_INLINE unsigned int umax(unsigned int a, unsigned int b) +{ + return MAX(a, b); +} + +STORAGE_CLASS_INLINE unsigned int umin(unsigned int a, unsigned int b) +{ + return MIN(a, b); +} + +STORAGE_CLASS_INLINE unsigned int uclip(unsigned int a, unsigned int b, + unsigned int c) +{ + return umin(umax(a, b), c); +} + +STORAGE_CLASS_INLINE unsigned int ceil_div(unsigned int a, unsigned int b) +{ + return CEIL_DIV(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_mul(unsigned int a, unsigned int b) +{ + return CEIL_MUL(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_mul2(unsigned int a, unsigned int b) +{ + return CEIL_MUL2(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_shift(unsigned int a, unsigned int b) +{ + return CEIL_SHIFT(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_shift_mul(unsigned int a, unsigned int b) +{ + return CEIL_SHIFT_MUL(a, b); +} + +STORAGE_CLASS_INLINE int abs_dif(int a, int b) +{ + return ABS_DIF(a, b); +} + +STORAGE_CLASS_INLINE unsigned int uabs_dif(unsigned int a, unsigned int b) +{ + return ABS_DIF(a, b); +} + +STORAGE_CLASS_INLINE unsigned int round_half_down_div(unsigned int a, + unsigned int b) +{ + return ROUND_HALF_DOWN_DIV(a, b); +} + +STORAGE_CLASS_INLINE unsigned int round_half_down_mul(unsigned int a, + unsigned int b) +{ + return ROUND_HALF_DOWN_MUL(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_pow2(uint32_t a) +{ + unsigned int retval = 0; + + if (IS_POWER2(a)) { + retval = (unsigned int)a; + } else { + unsigned int v = a; + + v |= v>>1; + v |= v>>2; + v |= v>>4; + v |= v>>8; + v |= v>>16; + retval = (unsigned int)(v+1); + } + return retval; +} + +STORAGE_CLASS_INLINE unsigned int floor_log2(uint32_t a) +{ + static const uint8_t de_bruijn[] = { + 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, + 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 + }; + uint32_t v = a; + + v |= v>>1; + v |= v>>2; + v |= v>>4; + v |= v>>8; + v |= v>>16; + return (unsigned int)de_bruijn[(v*0x07C4ACDDU)>>27]; +} + +/* Divide by small power of two */ +STORAGE_CLASS_INLINE unsigned int +udiv2_small_i(uint32_t a, uint32_t b) +{ + assert(b <= 2); + return a >> (b-1); +} + +/* optimized divide for small results + * a will be divided by b + * outbits is the number of bits needed for the result + * the smaller the cheaper the function will be. + * if the result doesn't fit in the number of output bits + * the result is incorrect and the function will assert + */ +STORAGE_CLASS_INLINE unsigned int +udiv_medium(uint32_t a, uint32_t b, unsigned outbits) +{ + int bit; + unsigned res = 0; + unsigned mask; + +#ifdef VOLCANO +#pragma ipu unroll +#endif + for (bit = outbits-1 ; bit >= 0; bit--) { + mask = 1<= (b<= c ? a+b-c : a+b); +} + +/* + * For SP and ISP, SDK provides the definition of OP_asp_slor. + * We need it only for host + */ +STORAGE_CLASS_INLINE unsigned int OP_asp_slor(int a, int b, int c) +{ + return ((a << c) | b); +} +#else +#include "hive/customops.h" +#endif /* !defined(__VIED_CELL) */ + +#endif /* !defined(PIPE_GENERATION) */ + +#if !defined(__KERNEL__) +#define clamp(a, min_val, max_val) MIN(MAX((a), (min_val)), (max_val)) +#endif /* !defined(__KERNEL__) */ + +#endif /* __MATH_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/misc_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/misc_support.h new file mode 100644 index 0000000000000..a2c2729e946d2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/misc_support.h @@ -0,0 +1,76 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __MISC_SUPPORT_H +#define __MISC_SUPPORT_H + +/* suppress compiler warnings on unused variables */ +#ifndef NOT_USED +#define NOT_USED(a) ((void)(a)) +#endif + +/* Calculate the total bytes for pow(2) byte alignment */ +#define tot_bytes_for_pow2_align(pow2, cur_bytes) \ + ((cur_bytes + (pow2 - 1)) & ~(pow2 - 1)) + +/* Display the macro value given a string */ +#define _STR(x) #x +#define STR(x) _STR(x) + +/* Concatenate */ +#ifndef CAT /* also defined in */ +#define _CAT(a, b) a ## b +#define CAT(a, b) _CAT(a, b) +#endif + +#define _CAT3(a, b, c) a ## b ## c +#define CAT3(a, b, c) _CAT3(a, b, c) + +/* NO_HOIST, NO_CSE, NO_ALIAS attributes must be ignored for host code */ +#ifndef __HIVECC +#ifndef NO_HOIST +#define NO_HOIST +#endif +#ifndef NO_CSE +#define NO_CSE +#endif +#ifndef NO_ALIAS +#define NO_ALIAS +#endif +#endif + +enum hive_method_id { + HIVE_METHOD_ID_CRUN, + HIVE_METHOD_ID_UNSCHED, + HIVE_METHOD_ID_SCHED, + HIVE_METHOD_ID_TARGET +}; + +/* Derive METHOD */ +#if defined(C_RUN) + #define HIVE_METHOD "crun" + #define HIVE_METHOD_ID HIVE_METHOD_ID_CRUN +#elif defined(HRT_UNSCHED) + #define HIVE_METHOD "unsched" + #define HIVE_METHOD_ID HIVE_METHOD_ID_UNSCHED +#elif defined(HRT_SCHED) + #define HIVE_METHOD "sched" + #define HIVE_METHOD_ID HIVE_METHOD_ID_SCHED +#else + #define HIVE_METHOD "target" + #define HIVE_METHOD_ID HIVE_METHOD_ID_TARGET + #define HRT_TARGET 1 +#endif + +#endif /* __MISC_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/platform_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/platform_support.h new file mode 100644 index 0000000000000..1752efc7b4df8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/platform_support.h @@ -0,0 +1,146 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PLATFORM_SUPPORT_H +#define __PLATFORM_SUPPORT_H + +#include "storage_class.h" + +#define MSEC_IN_SEC 1000 +#define NSEC_IN_MSEC 1000000 + +#if defined(_MSC_VER) +#include + +#define IA_CSS_EXTERN +#define SYNC_WITH(x) +#define CSS_ALIGN(d, a) _declspec(align(a)) d + +STORAGE_CLASS_INLINE void ia_css_sleep(void) +{ + /* Placeholder for driver team*/ +} + +STORAGE_CLASS_INLINE void ia_css_sleep_msec(long unsigned int delay_time_ms) +{ + /* Placeholder for driver team*/ + (void)delay_time_ms; +} + +#elif defined(__HIVECC) +#include +#include + +#define IA_CSS_EXTERN extern +#define CSS_ALIGN(d, a) d __attribute__((aligned(a))) +STORAGE_CLASS_INLINE void ia_css_sleep(void) +{ + OP___schedule(); +} + +#elif defined(__KERNEL__) +#include +#include + +#define IA_CSS_EXTERN +#define CSS_ALIGN(d, a) d __aligned(a) + +STORAGE_CLASS_INLINE void ia_css_sleep(void) +{ + usleep_range(1, 50); +} + +#elif defined(__GNUC__) +#include + +#define IA_CSS_EXTERN +#define CSS_ALIGN(d, a) d __attribute__((aligned(a))) + +/* Define some __HIVECC specific macros to nothing to allow host code compilation */ +#ifndef NO_ALIAS +#define NO_ALIAS +#endif + +#ifndef SYNC_WITH +#define SYNC_WITH(x) +#endif + +#if defined(HRT_CSIM) + #include "hrt/host.h" /* Using hrt_sleep from hrt/host.h */ + STORAGE_CLASS_INLINE void ia_css_sleep(void) + { + /* For the SDK still using hrt_sleep */ + hrt_sleep(); + } + STORAGE_CLASS_INLINE void ia_css_sleep_msec(long unsigned int delay_time_ms) + { + /* For the SDK still using hrt_sleep */ + long unsigned int i = 0; + for (i = 0; i < delay_time_ms; i++) { + hrt_sleep(); + } + } +#else + #include + STORAGE_CLASS_INLINE void ia_css_sleep(void) + { + struct timespec delay_time; + + delay_time.tv_sec = 0; + delay_time.tv_nsec = 10; + nanosleep(&delay_time, NULL); + } + STORAGE_CLASS_INLINE void ia_css_sleep_msec(long unsigned int delay_time_ms) + { + struct timespec delay_time; + + if (delay_time_ms >= MSEC_IN_SEC) { + delay_time.tv_sec = delay_time_ms / MSEC_IN_SEC; + delay_time.tv_nsec = (delay_time_ms % MSEC_IN_SEC) * NSEC_IN_MSEC; + } else { + delay_time.tv_sec = 0; + delay_time.tv_nsec = delay_time_ms * NSEC_IN_MSEC; + } + nanosleep(&delay_time, NULL); + } +#endif + +#else +#include +#endif + +/*needed for the include in stdint.h for various environments */ +#include "type_support.h" +#include "storage_class.h" + +#define MAX_ALIGNMENT 8 +#define aligned_uint8(type, obj) CSS_ALIGN(uint8_t obj, 1) +#define aligned_int8(type, obj) CSS_ALIGN(int8_t obj, 1) +#define aligned_uint16(type, obj) CSS_ALIGN(uint16_t obj, 2) +#define aligned_int16(type, obj) CSS_ALIGN(int16_t obj, 2) +#define aligned_uint32(type, obj) CSS_ALIGN(uint32_t obj, 4) +#define aligned_int32(type, obj) CSS_ALIGN(int32_t obj, 4) + +/* needed as long as hivecc does not define the type (u)int64_t */ +#if defined(__HIVECC) +#define aligned_uint64(type, obj) CSS_ALIGN(unsigned long long obj, 8) +#define aligned_int64(type, obj) CSS_ALIGN(signed long long obj, 8) +#else +#define aligned_uint64(type, obj) CSS_ALIGN(uint64_t obj, 8) +#define aligned_int64(type, obj) CSS_ALIGN(int64_t obj, 8) +#endif +#define aligned_enum(enum_type, obj) CSS_ALIGN(uint32_t obj, 4) +#define aligned_struct(struct_type, obj) struct_type obj + +#endif /* __PLATFORM_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/print_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/print_support.h new file mode 100644 index 0000000000000..0b614f7ef12d8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/print_support.h @@ -0,0 +1,90 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PRINT_SUPPORT_H +#define __PRINT_SUPPORT_H + +#if defined(_MSC_VER) +#ifdef _KERNEL_MODE + +/* TODO: Windows driver team to provide tracing mechanism for kernel mode + * e.g. DbgPrint and DbgPrintEx + */ +extern void FwTracePrintPWARN(const char *fmt, ...); +extern void FwTracePrintPRINT(const char *fmt, ...); +extern void FwTracePrintPERROR(const char *fmt, ...); +extern void FwTracePrintPDEBUG(const char *fmt, ...); + +#define PWARN(format, ...) FwTracePrintPWARN(format, __VA_ARGS__) +#define PRINT(format, ...) FwTracePrintPRINT(format, __VA_ARGS__) +#define PERROR(format, ...) FwTracePrintPERROR(format, __VA_ARGS__) +#define PDEBUG(format, ...) FwTracePrintPDEBUG(format, __VA_ARGS__) + +#else +/* Windows usermode compilation */ +#include + +/* To change the defines below, communicate with Windows team first + * to ensure they will not get flooded with prints + */ +/* This is temporary workaround to avoid flooding userspace + * Windows driver with prints + */ + +#define PWARN(format, ...) +#define PRINT(format, ...) +#define PERROR(format, ...) printf("error: " format, __VA_ARGS__) +#define PDEBUG(format, ...) + +#endif /* _KERNEL_MODE */ + +#elif defined(__HIVECC) +#include +/* To be revised + +#define PWARN(format) +#define PRINT(format) OP___printstring(format) +#define PERROR(variable) OP___dump(9999, arguments) +#define PDEBUG(variable) OP___dump(__LINE__, arguments) + +*/ + +#define PRINTSTRING(str) OP___printstring(str) + +#elif defined(__KERNEL__) +#include +#include + + +#define PWARN(format, arguments...) pr_debug(format, ##arguments) +#define PRINT(format, arguments...) pr_debug(format, ##arguments) +#define PERROR(format, arguments...) pr_debug(format, ##arguments) +#define PDEBUG(format, arguments...) pr_debug(format, ##arguments) + +#else +#include + +#define PRINT_HELPER(prefix, format, ...) printf(prefix format "%s", __VA_ARGS__) + +/* The trailing "" allows the edge case of printing single string */ +#define PWARN(...) PRINT_HELPER("warning: ", __VA_ARGS__, "") +#define PRINT(...) PRINT_HELPER("", __VA_ARGS__, "") +#define PERROR(...) PRINT_HELPER("error: ", __VA_ARGS__, "") +#define PDEBUG(...) PRINT_HELPER("debug: ", __VA_ARGS__, "") + +#define PRINTSTRING(str) PRINT(str) + +#endif + +#endif /* __PRINT_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/storage_class.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/storage_class.h new file mode 100644 index 0000000000000..af19b4026220a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/storage_class.h @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __STORAGE_CLASS_H +#define __STORAGE_CLASS_H + +#define STORAGE_CLASS_EXTERN \ +extern + +#if defined(_MSC_VER) +#define STORAGE_CLASS_INLINE \ +static __inline +#elif defined(__HIVECC) +#define STORAGE_CLASS_INLINE \ +static inline +#else +#define STORAGE_CLASS_INLINE \ +static inline +#endif + +/* Register struct */ +#ifndef __register +#if defined(__HIVECC) && !defined(PIPE_GENERATION) +#define __register register +#else +#define __register +#endif +#endif + +/* Memory attribute */ +#ifndef MEM +#ifdef PIPE_GENERATION +#elif defined(__HIVECC) +#include +#else +#define MEM(any_mem) +#endif +#endif + +#endif /* __STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/type_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/type_support.h new file mode 100644 index 0000000000000..a86da0e78941c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/type_support.h @@ -0,0 +1,80 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __TYPE_SUPPORT_H +#define __TYPE_SUPPORT_H + +/* Per the DLI spec, types are in "type_support.h" and + * "platform_support.h" is for unclassified/to be refactored + * platform specific definitions. + */ +#define IA_CSS_UINT8_T_BITS 8 +#define IA_CSS_UINT16_T_BITS 16 +#define IA_CSS_UINT32_T_BITS 32 +#define IA_CSS_INT32_T_BITS 32 +#define IA_CSS_UINT64_T_BITS 64 + + +#if defined(_MSC_VER) +#include +#include +#include +#include +#if defined(_M_X64) +#define HOST_ADDRESS(x) (unsigned long long)(x) +#else +#define HOST_ADDRESS(x) (unsigned long)(x) +#endif + +#elif defined(PARAM_GENERATION) +/* Nothing */ +#elif defined(__HIVECC) +#include +#include +#include +#include +#define HOST_ADDRESS(x) (unsigned long)(x) + +typedef long long int64_t; +typedef unsigned long long uint64_t; + +#elif defined(__KERNEL__) +#include +#include + +#define CHAR_BIT (8) +#define HOST_ADDRESS(x) (unsigned long)(x) + +#elif defined(__GNUC__) +#include +#include +#include +#include +#define HOST_ADDRESS(x) (unsigned long)(x) + +#else /* default is for the FIST environment */ +#include +#include +#include +#include +#define HOST_ADDRESS(x) (unsigned long)(x) + +#endif + +#if !defined(PIPE_GENERATION) && !defined(IO_GENERATION) +/* genpipe cannot handle the void* syntax */ +typedef void *HANDLE; +#endif + +#endif /* __TYPE_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/interface/ia_css_syscom.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/interface/ia_css_syscom.h new file mode 100644 index 0000000000000..5426d6d18e0bd --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/interface/ia_css_syscom.h @@ -0,0 +1,247 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_H +#define __IA_CSS_SYSCOM_H + + +/* + * The CSS Subsystem Communication Interface - Host side + * + * It provides subsystem initialzation, send ports and receive ports + * The PSYS and ISYS interfaces are implemented on top of this interface. + */ + +#include "ia_css_syscom_config.h" + +#define FW_ERROR_INVALID_PARAMETER (-1) +#define FW_ERROR_BAD_ADDRESS (-2) +#define FW_ERROR_BUSY (-3) +#define FW_ERROR_NO_MEMORY (-4) + +struct ia_css_syscom_context; + +/** + * ia_css_syscom_size() - provide syscom external buffer requirements + * @config: pointer to the configuration data (read) + * @size: pointer to the buffer size (write) + * + * Purpose: + * - Provide external buffer requirements + * - To be used for external buffer allocation + * + */ +extern void +ia_css_syscom_size( + const struct ia_css_syscom_config *cfg, + struct ia_css_syscom_size *size +); + +/** + * ia_css_syscom_open() - initialize a subsystem context + * @config: pointer to the configuration data (read) + * @buf: pointer to externally allocated buffers (read) + * @returns: struct ia_css_syscom_context* on success, 0 otherwise. + * + * Purpose: + * - initialize host side data structures + * - boot the subsystem? + * + */ +extern struct ia_css_syscom_context* +ia_css_syscom_open( + struct ia_css_syscom_config *config, + struct ia_css_syscom_buf *buf +); + +/** + * ia_css_syscom_close() - signal close to cell + * @context: pointer to the subsystem context + * @returns: 0 on success, -2 (FW_ERROR_BUSY) if SPC is not ready yet. + * + * Purpose: + * Request from the Cell to terminate + */ +extern int +ia_css_syscom_close( + struct ia_css_syscom_context *context +); + +/** + * ia_css_syscom_release() - free context + * @context: pointer to the subsystem context + * @force: flag which specifies whether cell + * state will be checked before freeing the + * context. + * @returns: 0 on success, -2 (FW_ERROR_BUSY) if cell + * is busy and call was not forced. + * + * Purpose: + * 2 modes, with first (force==true) immediately + * free context, and second (force==false) verifying + * that the cell state is ok and freeing context if so, + * returning error otherwise. + */ +extern int +ia_css_syscom_release( + struct ia_css_syscom_context *context, + unsigned int force +); + +/** + * Open a port for sending tokens to the subsystem + * @context: pointer to the subsystem context + * @port: send port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_open( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Closes a port for sending tokens to the subsystem + * @context: pointer to the subsystem context + * @port: send port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_close( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Get the number of tokens that can be sent to a port without error. + * @context: pointer to the subsystem context + * @port: send port index + * @returns: number of available tokens on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_available( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Send a token to the subsystem port + * The token size is determined during initialization + * @context: pointer to the subsystem context + * @port: send port index + * @token: pointer to the token value that is transferred to the subsystem + * @returns: number of tokens sent on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_transfer( + struct ia_css_syscom_context *context, + unsigned int port, + const void *token +); + +/** + * Open a port for receiving tokens to the subsystem + * @context: pointer to the subsystem context + * @port: receive port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_open( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Closes a port for receiving tokens to the subsystem + * Returns 0 on success, otherwise negative value of error code + * @context: pointer to the subsystem context + * @port: receive port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_close( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Get the number of tokens that can be received from a port without errors. + * @context: pointer to the subsystem context + * @port: receive port index + * @returns: number of available tokens on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_available( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Receive a token from the subsystem port + * The token size is determined during initialization + * @context: pointer to the subsystem context + * @port: receive port index + * @token (output): pointer to (space for) the token to be received + * @returns: number of tokens received on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_transfer( + struct ia_css_syscom_context *context, + unsigned int port, + void *token +); + +#if HAS_DUAL_CMD_CTX_SUPPORT +/** + * ia_css_syscom_store_dmem() - store subsystem context information in DMEM + * @context: pointer to the subsystem context + * @ssid: subsystem id + * @vtl0_addr_mask: VTL0 address mask; only applicable when the passed in context is secure + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_store_dmem( + struct ia_css_syscom_context *context, + unsigned int ssid, + unsigned int vtl0_addr_mask +); + +/** + * ia_css_syscom_set_trustlet_status() - store truslet configuration setting + * @context: pointer to the subsystem context + * @trustlet_exist: 1 if trustlet exists + */ +extern void +ia_css_syscom_set_trustlet_status( + unsigned int dmem_addr, + unsigned int ssid, + bool trustlet_exist +); + +/** + * ia_css_syscom_is_ab_spc_ready() - check if SPC access blocker programming is completed + * @context: pointer to the subsystem context + * @returns: 1 when status is ready. 0 otherwise + */ +bool +ia_css_syscom_is_ab_spc_ready( + struct ia_css_syscom_context *ctx +); +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ + +#endif /* __IA_CSS_SYSCOM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/interface/ia_css_syscom_config.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/interface/ia_css_syscom_config.h new file mode 100644 index 0000000000000..2f5eb309df94e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/interface/ia_css_syscom_config.h @@ -0,0 +1,97 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_CONFIG_H +#define __IA_CSS_SYSCOM_CONFIG_H + +#include +#include + +/* syscom size struct, output of ia_css_syscom_size, + * input for (external) allocation + */ +struct ia_css_syscom_size { + /* Size of host buffer */ + unsigned int cpu; + /* Size of shared config buffer (host to cell) */ + unsigned int shm; + /* Size of shared input queue buffers (host to cell) */ + unsigned int ibuf; + /* Size of shared output queue buffers (cell to host) */ + unsigned int obuf; +}; + +/* syscom buffer struct, output of (external) allocation, + * input for ia_css_syscom_open + */ +struct ia_css_syscom_buf { + char *cpu; /* host buffer */ + + /* shared memory buffer host address */ + host_virtual_address_t shm_host; + /* shared memory buffer cell address */ + vied_virtual_address_t shm_cell; + + /* input queue shared buffer host address */ + host_virtual_address_t ibuf_host; + /* input queue shared buffer cell address */ + vied_virtual_address_t ibuf_cell; + + /* output queue shared buffer host address */ + host_virtual_address_t obuf_host; + /* output queue shared buffer cell address */ + vied_virtual_address_t obuf_cell; +}; + +struct ia_css_syscom_queue_config { + unsigned int queue_size; /* tokens per queue */ + unsigned int token_size; /* bytes per token */ +}; + +/** + * Parameter struct for ia_css_syscom_open + */ +struct ia_css_syscom_config { + /* This member in no longer used in syscom. + It is kept to not break any driver builds, and will be removed when + all assignments have been removed from driver code */ + /* address of firmware in DDR/IMR */ + unsigned long long host_firmware_address; + + /* address of firmware in DDR, seen from SPC */ + unsigned int vied_firmware_address; + + unsigned int ssid; + unsigned int mmid; + + unsigned int num_input_queues; + unsigned int num_output_queues; + struct ia_css_syscom_queue_config *input; + struct ia_css_syscom_queue_config *output; + + unsigned int regs_addr; + unsigned int dmem_addr; + + /* firmware-specific configuration data */ + void *specific_addr; + unsigned int specific_size; + + /* if true; secure syscom in VTIO Case + * if false, non-secure syscom + */ + bool secure; + unsigned int vtl0_addr_mask; /* only applicable in 'secure' case */ +}; + +#endif /* __IA_CSS_SYSCOM_CONFIG_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/interface/ia_css_syscom_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/interface/ia_css_syscom_trace.h new file mode 100644 index 0000000000000..2c32693c2a82e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/interface/ia_css_syscom_trace.h @@ -0,0 +1,51 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IA_CSS_SYSCOM_TRACE_H +#define __IA_CSS_SYSCOM_TRACE_H + +#include "ia_css_trace.h" + +#define SYSCOM_TRACE_LEVEL_DEFAULT 1 +#define SYSCOM_TRACE_LEVEL_DEBUG 2 + +/* Set to default level if no level is defined */ +#ifndef SYSCOM_TRACE_LEVEL +#define SYSCOM_TRACE_LEVEL SYSCOM_TRACE_LEVEL_DEFAULT +#endif /* SYSCOM_TRACE_LEVEL */ + +/* SYSCOM Module tracing backend is mapped to TUNIT tracing for target platforms */ +#ifdef __HIVECC +# ifndef HRT_CSIM +# define SYSCOM_TRACE_METHOD IA_CSS_TRACE_METHOD_TRACE +# else +# define SYSCOM_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +# endif +#else +# define SYSCOM_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +#endif + +#define SYSCOM_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED +#define SYSCOM_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_ENABLED +#define SYSCOM_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + +#if (SYSCOM_TRACE_LEVEL == SYSCOM_TRACE_LEVEL_DEFAULT) +# define SYSCOM_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED +#elif (SYSCOM_TRACE_LEVEL == SYSCOM_TRACE_LEVEL_DEBUG) +# define SYSCOM_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_ENABLED +#else +# error "Connection manager trace level not defined!" +#endif /* SYSCOM_TRACE_LEVEL */ + +#endif /* __IA_CSS_SYSCOM_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/src/ia_css_syscom.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/src/ia_css_syscom.c new file mode 100644 index 0000000000000..cdf9df0531ff0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/src/ia_css_syscom.c @@ -0,0 +1,650 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_syscom.h" + +#include "ia_css_syscom_context.h" +#include "ia_css_syscom_config_fw.h" +#include "ia_css_syscom_trace.h" + +#include "queue.h" +#include "send_port.h" +#include "recv_port.h" +#include "regmem_access.h" + +#include "error_support.h" +#include "cpu_mem_support.h" + +#include "queue_struct.h" +#include "send_port_struct.h" +#include "recv_port_struct.h" + +#include "type_support.h" +#include +#include +#include "platform_support.h" + +#include "ia_css_cell.h" + +/* struct of internal buffer sizes */ +struct ia_css_syscom_size_intern { + unsigned int context; + unsigned int input_queue; + unsigned int output_queue; + unsigned int input_port; + unsigned int output_port; + + unsigned int fw_config; + unsigned int specific; + + unsigned int input_buffer; + unsigned int output_buffer; +}; + +/* Allocate buffers internally, when no buffers are provided */ +static int +ia_css_syscom_alloc( + unsigned int ssid, + unsigned int mmid, + const struct ia_css_syscom_size *size, + struct ia_css_syscom_buf *buf) +{ + /* zero the buffer to set all pointers to zero */ + memset(buf, 0, sizeof(*buf)); + + /* allocate cpu_mem */ + buf->cpu = (char *)ia_css_cpu_mem_alloc(size->cpu); + if (!buf->cpu) + goto EXIT7; + + /* allocate and map shared config buffer */ + buf->shm_host = shared_memory_alloc(mmid, size->shm); + if (!buf->shm_host) + goto EXIT6; + buf->shm_cell = shared_memory_map(ssid, mmid, buf->shm_host); + if (!buf->shm_cell) + goto EXIT5; + + /* allocate and map input queue buffer */ + buf->ibuf_host = shared_memory_alloc(mmid, size->ibuf); + if (!buf->ibuf_host) + goto EXIT4; + buf->ibuf_cell = shared_memory_map(ssid, mmid, buf->ibuf_host); + if (!buf->ibuf_cell) + goto EXIT3; + + /* allocate and map output queue buffer */ + buf->obuf_host = shared_memory_alloc(mmid, size->obuf); + if (!buf->obuf_host) + goto EXIT2; + buf->obuf_cell = shared_memory_map(ssid, mmid, buf->obuf_host); + if (!buf->obuf_cell) + goto EXIT1; + + return 0; + +EXIT1: shared_memory_free(mmid, buf->obuf_host); +EXIT2: shared_memory_unmap(ssid, mmid, buf->ibuf_cell); +EXIT3: shared_memory_free(mmid, buf->ibuf_host); +EXIT4: shared_memory_unmap(ssid, mmid, buf->shm_cell); +EXIT5: shared_memory_free(mmid, buf->shm_host); +EXIT6: ia_css_cpu_mem_free(buf->cpu); +EXIT7: return FW_ERROR_NO_MEMORY; +} + +static void +ia_css_syscom_size_intern( + const struct ia_css_syscom_config *cfg, + struct ia_css_syscom_size_intern *size) +{ + /* convert syscom config into syscom internal size struct */ + + unsigned int i; + + size->context = sizeof(struct ia_css_syscom_context); + size->input_queue = cfg->num_input_queues * sizeof(struct sys_queue); + size->output_queue = cfg->num_output_queues * sizeof(struct sys_queue); + size->input_port = cfg->num_input_queues * sizeof(struct send_port); + size->output_port = cfg->num_output_queues * sizeof(struct recv_port); + + size->fw_config = sizeof(struct ia_css_syscom_config_fw); + size->specific = cfg->specific_size; + + /* accumulate input queue buffer sizes */ + size->input_buffer = 0; + for (i = 0; i < cfg->num_input_queues; i++) { + size->input_buffer += + sys_queue_buf_size(cfg->input[i].queue_size, + cfg->input[i].token_size); + } + + /* accumulate outut queue buffer sizes */ + size->output_buffer = 0; + for (i = 0; i < cfg->num_output_queues; i++) { + size->output_buffer += + sys_queue_buf_size(cfg->output[i].queue_size, + cfg->output[i].token_size); + } +} + +static void +ia_css_syscom_size_extern( + const struct ia_css_syscom_size_intern *i, + struct ia_css_syscom_size *e) +{ + /* convert syscom internal size struct into external size struct */ + + e->cpu = i->context + i->input_queue + i->output_queue + + i->input_port + i->output_port; + e->shm = i->fw_config + i->input_queue + i->output_queue + i->specific; + e->ibuf = i->input_buffer; + e->obuf = i->output_buffer; +} + +/* Function that provides buffer sizes to be allocated */ +void +ia_css_syscom_size( + const struct ia_css_syscom_config *cfg, + struct ia_css_syscom_size *size) +{ + struct ia_css_syscom_size_intern i; + + ia_css_syscom_size_intern(cfg, &i); + ia_css_syscom_size_extern(&i, size); +} + +static struct ia_css_syscom_context* +ia_css_syscom_assign_buf( + const struct ia_css_syscom_size_intern *i, + const struct ia_css_syscom_buf *buf) +{ + struct ia_css_syscom_context *ctx; + char *cpu_mem_buf; + host_virtual_address_t shm_buf_host; + vied_virtual_address_t shm_buf_cell; + + /* host context */ + cpu_mem_buf = buf->cpu; + + ctx = (struct ia_css_syscom_context *)cpu_mem_buf; + ia_css_cpu_mem_set_zero(ctx, i->context); + cpu_mem_buf += i->context; + + ctx->input_queue = (struct sys_queue *) cpu_mem_buf; + cpu_mem_buf += i->input_queue; + + ctx->output_queue = (struct sys_queue *) cpu_mem_buf; + cpu_mem_buf += i->output_queue; + + ctx->send_port = (struct send_port *) cpu_mem_buf; + cpu_mem_buf += i->input_port; + + ctx->recv_port = (struct recv_port *) cpu_mem_buf; + + + /* cell config */ + shm_buf_host = buf->shm_host; + shm_buf_cell = buf->shm_cell; + + ctx->config_host_addr = shm_buf_host; + shm_buf_host += i->fw_config; + ctx->config_vied_addr = shm_buf_cell; + shm_buf_cell += i->fw_config; + + ctx->input_queue_host_addr = shm_buf_host; + shm_buf_host += i->input_queue; + ctx->input_queue_vied_addr = shm_buf_cell; + shm_buf_cell += i->input_queue; + + ctx->output_queue_host_addr = shm_buf_host; + shm_buf_host += i->output_queue; + ctx->output_queue_vied_addr = shm_buf_cell; + shm_buf_cell += i->output_queue; + + ctx->specific_host_addr = shm_buf_host; + ctx->specific_vied_addr = shm_buf_cell; + + ctx->ibuf_host_addr = buf->ibuf_host; + ctx->ibuf_vied_addr = buf->ibuf_cell; + + ctx->obuf_host_addr = buf->obuf_host; + ctx->obuf_vied_addr = buf->obuf_cell; + + return ctx; +} + +struct ia_css_syscom_context* +ia_css_syscom_open( + struct ia_css_syscom_config *cfg, + struct ia_css_syscom_buf *buf_extern +) +{ + struct ia_css_syscom_size_intern size_intern; + struct ia_css_syscom_size size; + struct ia_css_syscom_buf buf_intern; + struct ia_css_syscom_buf *buf; + struct ia_css_syscom_context *ctx; + struct ia_css_syscom_config_fw fw_cfg; + unsigned int i; + struct sys_queue_res res; + + IA_CSS_TRACE_0(SYSCOM, INFO, "Entered: ia_css_syscom_open\n"); + + /* error handling */ + if (cfg == NULL) + return NULL; + + IA_CSS_TRACE_1(SYSCOM, INFO, "ia_css_syscom_open (secure %d) start\n", cfg->secure); + + /* check members of cfg: TBD */ + + /* + * Check if SP is in valid state, have to wait if not ready. + * In some platform (Such as VP), it will need more time to wait due to system performance; + * If return NULL without wait for SPC0 ready, Driver load FW will failed + */ + ia_css_cell_wait(cfg->ssid, SPC0); + + ia_css_syscom_size_intern(cfg, &size_intern); + ia_css_syscom_size_extern(&size_intern, &size); + + if (buf_extern) { + /* use externally allocated buffers */ + buf = buf_extern; + } else { + /* use internally allocated buffers */ + buf = &buf_intern; + if (ia_css_syscom_alloc(cfg->ssid, cfg->mmid, &size, buf) != 0) + return NULL; + } + + /* assign buffer pointers */ + ctx = ia_css_syscom_assign_buf(&size_intern, buf); + /* only need to free internally allocated buffers */ + ctx->free_buf = !buf_extern; + + ctx->cell_regs_addr = cfg->regs_addr; + /* regmem is at cell_dmem_addr + REGMEM_OFFSET */ + ctx->cell_dmem_addr = cfg->dmem_addr; + + ctx->num_input_queues = cfg->num_input_queues; + ctx->num_output_queues = cfg->num_output_queues; + + ctx->env.mmid = cfg->mmid; + ctx->env.ssid = cfg->ssid; + ctx->env.mem_addr = cfg->dmem_addr; + + ctx->regmem_idx = SYSCOM_QPR_BASE_REG; + + /* initialize input queues */ + res.reg = SYSCOM_QPR_BASE_REG; + res.host_address = ctx->ibuf_host_addr; + res.vied_address = ctx->ibuf_vied_addr; + for (i = 0; i < cfg->num_input_queues; i++) { + sys_queue_init(ctx->input_queue + i, + cfg->input[i].queue_size, + cfg->input[i].token_size, &res); + } + + /* initialize output queues */ + res.host_address = ctx->obuf_host_addr; + res.vied_address = ctx->obuf_vied_addr; + for (i = 0; i < cfg->num_output_queues; i++) { + sys_queue_init(ctx->output_queue + i, + cfg->output[i].queue_size, + cfg->output[i].token_size, &res); + } + + /* fill shared queue structs */ + shared_memory_store(cfg->mmid, ctx->input_queue_host_addr, + ctx->input_queue, + cfg->num_input_queues * sizeof(struct sys_queue)); + ia_css_cpu_mem_cache_flush( + (void *)HOST_ADDRESS(ctx->input_queue_host_addr), + cfg->num_input_queues * sizeof(struct sys_queue)); + shared_memory_store(cfg->mmid, ctx->output_queue_host_addr, + ctx->output_queue, + cfg->num_output_queues * sizeof(struct sys_queue)); + ia_css_cpu_mem_cache_flush( + (void *)HOST_ADDRESS(ctx->output_queue_host_addr), + cfg->num_output_queues * sizeof(struct sys_queue)); + + /* Zero the queue buffers. Is this really needed? */ + shared_memory_zero(cfg->mmid, buf->ibuf_host, size.ibuf); + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(buf->ibuf_host), + size.ibuf); + shared_memory_zero(cfg->mmid, buf->obuf_host, size.obuf); + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(buf->obuf_host), + size.obuf); + + /* copy firmware specific data */ + if (cfg->specific_addr && cfg->specific_size) { + shared_memory_store(cfg->mmid, ctx->specific_host_addr, + cfg->specific_addr, cfg->specific_size); + ia_css_cpu_mem_cache_flush( + (void *)HOST_ADDRESS(ctx->specific_host_addr), + cfg->specific_size); + } + + fw_cfg.num_input_queues = cfg->num_input_queues; + fw_cfg.num_output_queues = cfg->num_output_queues; + fw_cfg.input_queue = ctx->input_queue_vied_addr; + fw_cfg.output_queue = ctx->output_queue_vied_addr; + fw_cfg.specific_addr = ctx->specific_vied_addr; + fw_cfg.specific_size = cfg->specific_size; + + shared_memory_store(cfg->mmid, ctx->config_host_addr, + &fw_cfg, sizeof(struct ia_css_syscom_config_fw)); + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(ctx->config_host_addr), + sizeof(struct ia_css_syscom_config_fw)); + +#if !HAS_DUAL_CMD_CTX_SUPPORT + /* store syscom uninitialized state */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_open store STATE_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_STATE_UNINIT, ctx->cell_dmem_addr, cfg->ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + SYSCOM_STATE_UNINIT, cfg->ssid); + /* store syscom uninitialized command */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_open store COMMAND_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_COMMAND_UNINIT, ctx->cell_dmem_addr, cfg->ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_COMMAND_REG, + SYSCOM_COMMAND_UNINIT, cfg->ssid); + /* store firmware configuration address */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_open store CONFIG_REG (%#x) @ dmem_addr %#x ssid %d\n", + ctx->config_vied_addr, ctx->cell_dmem_addr, cfg->ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_CONFIG_REG, + ctx->config_vied_addr, cfg->ssid); +#endif + + /* Indicate if ctx is created for secure stream purpose */ + ctx->secure = cfg->secure; + + IA_CSS_TRACE_1(SYSCOM, INFO, "ia_css_syscom_open (secure %d) completed\n", cfg->secure); + return ctx; +} + + +int +ia_css_syscom_close( + struct ia_css_syscom_context *ctx +) { + int state; + + state = regmem_load_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + ctx->env.ssid); + if (state != SYSCOM_STATE_READY) { + /* SPC is not ready to handle close request yet */ + return FW_ERROR_BUSY; + } + + /* set close request flag */ + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_COMMAND_REG, + SYSCOM_COMMAND_INACTIVE, ctx->env.ssid); + + return 0; +} + +static void +ia_css_syscom_free(struct ia_css_syscom_context *ctx) +{ + shared_memory_unmap(ctx->env.ssid, ctx->env.mmid, ctx->ibuf_vied_addr); + shared_memory_free(ctx->env.mmid, ctx->ibuf_host_addr); + shared_memory_unmap(ctx->env.ssid, ctx->env.mmid, ctx->obuf_vied_addr); + shared_memory_free(ctx->env.mmid, ctx->obuf_host_addr); + shared_memory_unmap(ctx->env.ssid, ctx->env.mmid, + ctx->config_vied_addr); + shared_memory_free(ctx->env.mmid, ctx->config_host_addr); + ia_css_cpu_mem_free(ctx); +} + +int +ia_css_syscom_release( + struct ia_css_syscom_context *ctx, + unsigned int force +) { + /* check if release is forced, an verify cell state if it is not */ + if (!force) { + if (!ia_css_cell_is_ready(ctx->env.ssid, SPC0)) + return FW_ERROR_BUSY; + } + + /* Reset the regmem idx */ + ctx->regmem_idx = 0; + + if (ctx->free_buf) + ia_css_syscom_free(ctx); + + return 0; +} + +int ia_css_syscom_send_port_open( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + int state; + + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + /* check if SP syscom is ready to open the queue */ + state = regmem_load_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + ctx->env.ssid); + if (state != SYSCOM_STATE_READY) { + /* SPC is not ready to handle messages yet */ + return FW_ERROR_BUSY; + } + + /* initialize the port */ + send_port_open(ctx->send_port + port, + ctx->input_queue + port, &(ctx->env)); + + return 0; +} + +int ia_css_syscom_send_port_close( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + return 0; +} + +int ia_css_syscom_send_port_available( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + return send_port_available(ctx->send_port + port); +} + +int ia_css_syscom_send_port_transfer( + struct ia_css_syscom_context *ctx, + unsigned int port, + const void *token +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + return send_port_transfer(ctx->send_port + port, token); +} + +int ia_css_syscom_recv_port_open( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + int state; + + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + /* check if SP syscom is ready to open the queue */ + state = regmem_load_32(ctx->cell_dmem_addr, + SYSCOM_STATE_REG, ctx->env.ssid); + if (state != SYSCOM_STATE_READY) { + /* SPC is not ready to handle messages yet */ + return FW_ERROR_BUSY; + } + + /* initialize the port */ + recv_port_open(ctx->recv_port + port, + ctx->output_queue + port, &(ctx->env)); + + return 0; +} + +int ia_css_syscom_recv_port_close( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + return 0; +} + +/* + * Get the number of responses in the response queue + */ +int +ia_css_syscom_recv_port_available( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + return recv_port_available(ctx->recv_port + port); +} + + +/* + * Dequeue the head of the response queue + * returns an error when the response queue is empty + */ +int +ia_css_syscom_recv_port_transfer( + struct ia_css_syscom_context *ctx, + unsigned int port, + void *token +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + return recv_port_transfer(ctx->recv_port + port, token); +} + +#if HAS_DUAL_CMD_CTX_SUPPORT +/* + * store subsystem context information in DMEM + */ +int +ia_css_syscom_store_dmem( + struct ia_css_syscom_context *ctx, + unsigned int ssid, + unsigned int vtl0_addr_mask +) +{ + unsigned int read_back; + + NOT_USED(vtl0_addr_mask); + NOT_USED(read_back); + + if (ctx->secure) { + /* store VTL0 address mask in 'secure' context */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem VTL0_ADDR_MASK (%#x) @ dmem_addr %#x ssid %d\n", + vtl0_addr_mask, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_VTL0_ADDR_MASK, vtl0_addr_mask, ssid); + } + /* store firmware configuration address */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem CONFIG_REG (%#x) @ dmem_addr %#x ssid %d\n", + ctx->config_vied_addr, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_CONFIG_REG, + ctx->config_vied_addr, ssid); + /* store syscom uninitialized state */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem STATE_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_STATE_UNINIT, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + SYSCOM_STATE_UNINIT, ssid); + /* store syscom uninitialized command */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem COMMAND_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_COMMAND_UNINIT, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_COMMAND_REG, + SYSCOM_COMMAND_UNINIT, ssid); + + return 0; +} + +/* + * store truslet configuration status setting + */ +void +ia_css_syscom_set_trustlet_status( + unsigned int dmem_addr, + unsigned int ssid, + bool trustlet_exist +) +{ + unsigned int value; + + value = trustlet_exist ? TRUSTLET_EXIST : TRUSTLET_NOT_EXIST; + IA_CSS_TRACE_3(SYSCOM, INFO, + "ia_css_syscom_set_trustlet_status TRUSTLET_STATUS (%#x) @ dmem_addr %#x ssid %d\n", + value, dmem_addr, ssid); + regmem_store_32(dmem_addr, TRUSTLET_STATUS, value, ssid); +} + +/* + * check if SPC access blocker programming is completed + */ +bool +ia_css_syscom_is_ab_spc_ready( + struct ia_css_syscom_context *ctx +) +{ + unsigned int value; + + /* We only expect the call from non-secure context only */ + if (ctx->secure) { + IA_CSS_TRACE_0(SYSCOM, ERROR, "ia_css_syscom_is_spc_ab_ready - Please call from non-secure context\n"); + return false; + } + + value = regmem_load_32(ctx->cell_dmem_addr, AB_SPC_STATUS, ctx->env.ssid); + IA_CSS_TRACE_3(SYSCOM, INFO, + "ia_css_syscom_is_spc_ab_ready AB_SPC_STATUS @ dmem_addr %#x ssid %d - value %#x\n", + ctx->cell_dmem_addr, ctx->env.ssid, value); + + return (value == AB_SPC_READY); +} +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/src/ia_css_syscom_config_fw.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/src/ia_css_syscom_config_fw.h new file mode 100644 index 0000000000000..0cacd5a34934d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/src/ia_css_syscom_config_fw.h @@ -0,0 +1,69 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_CONFIG_FW_H +#define __IA_CSS_SYSCOM_CONFIG_FW_H + +#include "type_support.h" + +enum { + /* Program load or explicit host setting should init to this */ + SYSCOM_STATE_UNINIT = 0x57A7E000, + /* SP Syscom sets this when it is ready for use */ + SYSCOM_STATE_READY = 0x57A7E001, + /* SP Syscom sets this when no more syscom accesses will happen */ + SYSCOM_STATE_INACTIVE = 0x57A7E002 +}; + +enum { + /* Program load or explicit host setting should init to this */ + SYSCOM_COMMAND_UNINIT = 0x57A7F000, + /* Host Syscom requests syscom to become inactive */ + SYSCOM_COMMAND_INACTIVE = 0x57A7F001 +}; + +#if HAS_DUAL_CMD_CTX_SUPPORT +enum { + /* Program load or explicit host setting should init to this */ + TRUSTLET_UNINIT = 0x57A8E000, + /* Host Syscom informs SP that Trustlet exists */ + TRUSTLET_EXIST = 0x57A8E001, + /* Host Syscom informs SP that Trustlet does not exist */ + TRUSTLET_NOT_EXIST = 0x57A8E002 +}; + +enum { + /* Program load or explicit setting initialized by SP */ + AB_SPC_NOT_READY = 0x57A8F000, + /* SP informs host that SPC access programming is completed */ + AB_SPC_READY = 0x57A8F001 +}; +#endif + +/* firmware config: data that sent from the host to SP via DDR */ +/* Cell copies data into a context */ + +struct ia_css_syscom_config_fw { + unsigned int firmware_address; + + unsigned int num_input_queues; + unsigned int num_output_queues; + unsigned int input_queue; /* hmm_ptr / struct queue* */ + unsigned int output_queue; /* hmm_ptr / struct queue* */ + + unsigned int specific_addr; /* vied virtual address */ + unsigned int specific_size; +}; + +#endif /* __IA_CSS_SYSCOM_CONFIG_FW_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/src/ia_css_syscom_context.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/src/ia_css_syscom_context.h new file mode 100644 index 0000000000000..ecf22f6b7ac53 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/src/ia_css_syscom_context.h @@ -0,0 +1,65 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_CONTEXT_H +#define __IA_CSS_SYSCOM_CONTEXT_H + +#include + +#include "port_env_struct.h" +#include + +/* host context */ +struct ia_css_syscom_context { + vied_virtual_address_t cell_firmware_addr; + unsigned int cell_regs_addr; + unsigned int cell_dmem_addr; + + struct port_env env; + + unsigned int num_input_queues; + unsigned int num_output_queues; + + /* array of input queues (from host to SP) */ + struct sys_queue *input_queue; + /* array of output queues (from SP to host) */ + struct sys_queue *output_queue; + + struct send_port *send_port; + struct recv_port *recv_port; + + unsigned int regmem_idx; + unsigned int free_buf; + + host_virtual_address_t config_host_addr; + host_virtual_address_t input_queue_host_addr; + host_virtual_address_t output_queue_host_addr; + host_virtual_address_t specific_host_addr; + host_virtual_address_t ibuf_host_addr; + host_virtual_address_t obuf_host_addr; + + vied_virtual_address_t config_vied_addr; + vied_virtual_address_t input_queue_vied_addr; + vied_virtual_address_t output_queue_vied_addr; + vied_virtual_address_t specific_vied_addr; + vied_virtual_address_t ibuf_vied_addr; + vied_virtual_address_t obuf_vied_addr; + + /* if true; secure syscom object as in VTIO Case + * if false, non-secure syscom + */ + bool secure; +}; + +#endif /* __IA_CSS_SYSCOM_CONTEXT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/syscom.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/syscom.mk new file mode 100644 index 0000000000000..8d36b8928af55 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/syscom.mk @@ -0,0 +1,42 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is SYSCOM + +SYSCOM_DIR=$${MODULES_DIR}/syscom + +SYSCOM_INTERFACE=$(SYSCOM_DIR)/interface +SYSCOM_SOURCES1=$(SYSCOM_DIR)/src + +SYSCOM_HOST_FILES += $(SYSCOM_SOURCES1)/ia_css_syscom.c + +SYSCOM_HOST_CPPFLAGS += -I$(SYSCOM_INTERFACE) +SYSCOM_HOST_CPPFLAGS += -I$(SYSCOM_SOURCES1) +SYSCOM_HOST_CPPFLAGS += -I$${MODULES_DIR}/devices +ifdef REGMEM_SECURE_OFFSET +SYSCOM_HOST_CPPFLAGS += -DREGMEM_SECURE_OFFSET=$(REGMEM_SECURE_OFFSET) +else +SYSCOM_HOST_CPPFLAGS += -DREGMEM_SECURE_OFFSET=0 +endif + +SYSCOM_FW_FILES += $(SYSCOM_SOURCES1)/ia_css_syscom_fw.c + +SYSCOM_FW_CPPFLAGS += -I$(SYSCOM_INTERFACE) +SYSCOM_FW_CPPFLAGS += -I$(SYSCOM_SOURCES1) +SYSCOM_FW_CPPFLAGS += -DREGMEM_OFFSET=$(REGMEM_OFFSET) +ifdef REGMEM_SECURE_OFFSET +SYSCOM_FW_CPPFLAGS += -DREGMEM_SECURE_OFFSET=$(REGMEM_SECURE_OFFSET) +else +SYSCOM_FW_CPPFLAGS += -DREGMEM_SECURE_OFFSET=0 +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/trace/interface/ia_css_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/trace/interface/ia_css_trace.h new file mode 100644 index 0000000000000..b85b1810f1070 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/trace/interface/ia_css_trace.h @@ -0,0 +1,883 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/*! \file */ + +#ifndef __IA_CSS_TRACE_H +#define __IA_CSS_TRACE_H + +/* +** Configurations +*/ + +/** + * STEP 1: Define {Module Name}_TRACE_METHOD to one of the following. + * Where: + * {Module Name} is the name of the targeted module. + * + * Example: + * #define NCI_DMA_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE + */ + +/**< Use whatever method of tracing that best suits the platform + * this code is compiled for. + */ +#define IA_CSS_TRACE_METHOD_NATIVE 1 +/**< Use the Tracing NCI. */ +#define IA_CSS_TRACE_METHOD_TRACE 2 + +/** + * STEP 2: Define {Module Name}_TRACE_LEVEL_{Level} to one of the following. + * Where: + * {Module Name} is the name of the targeted module. + * {Level}, in decreasing order of severity, is one of the + * following values: + * {ASSERT, ERROR, WARNING, INFO, DEBUG, VERBOSE}. + * + * Example: + * #define NCI_DMA_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_DISABLED + * #define NCI_DMA_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + */ +/**< Disables the corresponding trace level. */ +#define IA_CSS_TRACE_LEVEL_DISABLED 0 +/**< Enables the corresponding trace level. */ +#define IA_CSS_TRACE_LEVEL_ENABLED 1 + +/* + * Used in macro definition with do-while loop + * for removing checkpatch warnings + */ +#define IA_CSS_TRACE_FILE_DUMMY_DEFINE + +/** + * STEP 3: Define IA_CSS_TRACE_PRINT_FILE_LINE to have file name and + * line printed with every log message. + * + * Example: + * #define IA_CSS_TRACE_PRINT_FILE_LINE + */ + +/* +** Interface +*/ + +/* +** Static +*/ + +/** + * Logs a message with zero arguments if the targeted severity level is enabled + * at compile-time. + * @param module The targeted module. + * @param severity The severity level of the trace message. In decreasing order: + * {ASSERT, ERROR, WARNING, INFO, DEBUG, VERBOSE}. + * @param format The message to be traced. + */ +#define IA_CSS_TRACE_0(module, severity, format) \ + IA_CSS_TRACE_IMPL(module, 0, severity, format) + +/** + * Logs a message with one argument if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_1(module, severity, format, a1) \ + IA_CSS_TRACE_IMPL(module, 1, severity, format, a1) + +/** + * Logs a message with two arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_2(module, severity, format, a1, a2) \ + IA_CSS_TRACE_IMPL(module, 2, severity, format, a1, a2) + +/** + * Logs a message with three arguments if the targeted severity level + * is enabled at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_3(module, severity, format, a1, a2, a3) \ + IA_CSS_TRACE_IMPL(module, 3, severity, format, a1, a2, a3) + +/** + * Logs a message with four arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_4(module, severity, format, a1, a2, a3, a4) \ + IA_CSS_TRACE_IMPL(module, 4, severity, format, a1, a2, a3, a4) + +/** + * Logs a message with five arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_5(module, severity, format, a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_IMPL(module, 5, severity, format, a1, a2, a3, a4, a5) + +/** + * Logs a message with six arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_6(module, severity, format, a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_IMPL(module, 6, severity, format, a1, a2, a3, a4, a5, a6) + +/** + * Logs a message with seven arguments if the targeted severity level + * is enabled at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_7(module, severity, format, a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_IMPL(module, 7, severity, format, \ + a1, a2, a3, a4, a5, a6, a7) + +/* +** Dynamic +*/ + +/** +* Declares, but does not define, dynamic tracing functions and variables +* for module \p module. For each module, place an instance of this macro +* in the compilation unit in which you want to use dynamic tracing facility +* so as to inform the compiler of the declaration of the available functions. +* An invocation of this function does not enable any of the available tracing +* levels. Do not place a semicolon after a call to this macro. +* @see IA_CSS_TRACE_DYNAMIC_DEFINE +*/ +#define IA_CSS_TRACE_DYNAMIC_DECLARE(module) \ + IA_CSS_TRACE_DYNAMIC_DECLARE_IMPL(module) +/** +* Declares the configuration function for the dynamic api seperatly, if one +* wants to use it. +*/ +#define IA_CSS_TRACE_DYNAMIC_DECLARE_CONFIG_FUNC(module) \ + IA_CSS_TRACE_DYNAMIC_DECLARE_CONFIG_FUNC_IMPL(module) + +/** +* Defines dynamic tracing functions and variables for module \p module. +* For each module, place an instance of this macro in one, and only one, +* of your SOURCE files so as to allow the linker resolve the related symbols. +* An invocation of this macro does not enable any of the available tracing +* levels. Do not place a semicolon after a call to this macro. +* @see IA_CSS_TRACE_DYNAMIC_DECLARE +*/ +#define IA_CSS_TRACE_DYNAMIC_DEFINE(module) \ + IA_CSS_TRACE_DYNAMIC_DEFINE_IMPL(module) +/** +* Defines the configuration function for the dynamic api seperatly, if one +* wants to use it. +*/ +#define IA_CSS_TRACE_DYNAMIC_DEFINE_CONFIG_FUNC(module) \ + IA_CSS_TRACE_DYNAMIC_DEFINE_CONFIG_FUNC_IMPL(module) + +/** + * Logs a message with zero arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @param module The targeted module. + * @param severity The severity level of the trace message. In decreasing order: + * {ASSERT, ERROR, WARNING, INFO, DEBUG, VERBOSE}. + * @param format The message to be traced. + */ +#define IA_CSS_TRACE_DYNAMIC_0(module, severity, format) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 0, severity, format) + +/** + * Logs a message with one argument if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_1(module, severity, format, a1) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 1, severity, format, a1) + +/** + * Logs a message with two arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_2(module, severity, format, a1, a2) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 2, severity, format, a1, a2) + +/** + * Logs a message with three arguments if the targeted severity level + * is enabled both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_3(module, severity, format, a1, a2, a3) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 3, severity, format, a1, a2, a3) + +/** + * Logs a message with four arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_4(module, severity, format, a1, a2, a3, a4) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 4, severity, format, a1, a2, a3, a4) + +/** + * Logs a message with five arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_5(module, severity, format, a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 5, severity, format, \ + a1, a2, a3, a4, a5) + +/** + * Logs a message with six arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_6(module, severity, format, \ + a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 6, severity, format, \ + a1, a2, a3, a4, a5, a6) + +/** + * Logs a message with seven arguments if the targeted severity level + * is enabled both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_7(module, severity, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 7, severity, format, \ + a1, a2, a3, a4, a5, a6, a7) + +/* +** Implementation +*/ + +/* CAT */ +#define IA_CSS_TRACE_CAT_IMPL(a, b) a ## b +#define IA_CSS_TRACE_CAT(a, b) IA_CSS_TRACE_CAT_IMPL(a, b) + +/* Bridge */ +#if defined(__HIVECC) || defined(__GNUC__) +#define IA_CSS_TRACE_IMPL(module, argument_count, severity, arguments ...) \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_, \ + argument_count \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_LEVEL_ \ + ), \ + severity \ + ) \ + ( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_SEVERITY_, \ + severity \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + #module, \ + ## arguments \ + ) \ + ) + +/* Bridge */ +#define IA_CSS_TRACE_DYNAMIC_IMPL(module, argument_count, severity, \ + arguments ...) \ + do { \ + if (IA_CSS_TRACE_CAT(IA_CSS_TRACE_CAT(module, _trace_level_), \ + severity)) { \ + IA_CSS_TRACE_IMPL(module, argument_count, severity, \ + ## arguments); \ + } \ + } while (0) +#elif defined(_MSC_VER) +#define IA_CSS_TRACE_IMPL(module, argument_count, severity, ...) \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_, \ + argument_count \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_LEVEL_ \ + ), \ + severity \ + ) \ + ( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_SEVERITY_, \ + severity \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + #module, \ + __VA_ARGS__ \ + ) \ + ) + +/* Bridge */ +#define IA_CSS_TRACE_DYNAMIC_IMPL(module, argument_count, severity, ...) \ + do { \ + if (IA_CSS_TRACE_CAT(IA_CSS_TRACE_CAT(module, _trace_level_), \ + severity)) { \ + IA_CSS_TRACE_IMPL(module, argument_count, severity, \ + __VA_ARGS__); \ + } \ + } while (0) +#endif + +/* +** Native Backend +*/ + +#if defined(__HIVECC) + #define IA_CSS_TRACE_PLATFORM_CELL +#elif defined(__GNUC__) + #define IA_CSS_TRACE_PLATFORM_HOST + + #define IA_CSS_TRACE_NATIVE(severity, module, format, arguments ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_NATIVE(severity, module, \ + format), ## arguments); \ + } while (0) + /* TODO: In case Host Side tracing is needed to be mapped to the + * Tunit, the following "IA_CSS_TRACE_TRACE" needs to be modified from + * PRINT to vied_nci_tunit_print function calls + */ + #define IA_CSS_TRACE_TRACE(severity, module, format, arguments ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_TRACE(severity, module, \ + format), ## arguments); \ + } while (0) + +#elif defined(_MSC_VER) + #define IA_CSS_TRACE_PLATFORM_HOST + + #define IA_CSS_TRACE_NATIVE(severity, module, format, ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_NATIVE(severity, \ + module, format), __VA_ARGS__); \ + } while (0) + /* TODO: In case Host Side tracing is needed to be mapped to the + * Tunit, the following "IA_CSS_TRACE_TRACE" needs to be modified from + * PRINT to vied_nci_tunit_print function calls + */ + #define IA_CSS_TRACE_TRACE(severity, module, format, ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_TRACE(severity, \ + module, format), __VA_ARGS__); \ + } while (0) +#else + #error Unsupported platform! +#endif /* Platform */ + +#if defined(IA_CSS_TRACE_PLATFORM_CELL) + #include /* VOLATILE */ + + #ifdef IA_CSS_TRACE_PRINT_FILE_LINE + #define IA_CSS_TRACE_FILE_PRINT_COMMAND \ + do { \ + OP___printstring(__FILE__":") VOLATILE; \ + OP___printdec(__LINE__) VOLATILE; \ + OP___printstring("\n") VOLATILE; \ + } while (0) + #else + #define IA_CSS_TRACE_FILE_PRINT_COMMAND + #endif + + #define IA_CSS_TRACE_MODULE_SEVERITY_PRINT(module, severity) \ + do { \ + IA_CSS_TRACE_FILE_DUMMY_DEFINE; \ + OP___printstring("["module"]:["severity"]:") \ + VOLATILE; \ + } while (0) + + #define IA_CSS_TRACE_MSG_NATIVE(severity, module, format) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + OP___printstring("["module"]:["severity"]: "format) \ + VOLATILE; \ + } while (0) + + #define IA_CSS_TRACE_ARG_NATIVE(module, severity, i, value) \ + do { \ + IA_CSS_TRACE_MODULE_SEVERITY_PRINT(module, severity); \ + OP___dump(i, value) VOLATILE; \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_0(severity, module, format) \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format) + + #define IA_CSS_TRACE_NATIVE_1(severity, module, format, a1) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_2(severity, module, format, a1, a2) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_3(severity, module, format, a1, a2, a3) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_4(severity, module, format, \ + a1, a2, a3, a4) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 5, a5); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 5, a5); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 6, a6); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 5, a5); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 6, a6); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 7, a7); \ + } while (0) + /* + ** Tracing Backend + */ +#if !defined(HRT_CSIM) && !defined(NO_TUNIT) + #include "vied_nci_tunit.h" +#endif + #define IA_CSS_TRACE_AUG_FORMAT_TRACE(format, module) \ + "[" module "]" format " : PID = %x : Timestamp = %d : PC = %x" + + #define IA_CSS_TRACE_TRACE_0(severity, module, format) \ + vied_nci_tunit_print(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity) + + #define IA_CSS_TRACE_TRACE_1(severity, module, format, a1) \ + vied_nci_tunit_print1i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1) + + #define IA_CSS_TRACE_TRACE_2(severity, module, format, a1, a2) \ + vied_nci_tunit_print2i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2) + + #define IA_CSS_TRACE_TRACE_3(severity, module, format, a1, a2, a3) \ + vied_nci_tunit_print3i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3) + + #define IA_CSS_TRACE_TRACE_4(severity, module, format, a1, a2, a3, a4) \ + vied_nci_tunit_print4i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4) + + #define IA_CSS_TRACE_TRACE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + vied_nci_tunit_print5i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4, a5) + + #define IA_CSS_TRACE_TRACE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + vied_nci_tunit_print6i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4, a5, a6) + + #define IA_CSS_TRACE_TRACE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + vied_nci_tunit_print7i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4, a5, a6, a7) + +#elif defined(IA_CSS_TRACE_PLATFORM_HOST) + #include "print_support.h" + + #ifdef IA_CSS_TRACE_PRINT_FILE_LINE + #define IA_CSS_TRACE_FILE_PRINT_COMMAND \ + PRINT("%s:%d:\n", __FILE__, __LINE__) + #else + #define IA_CSS_TRACE_FILE_PRINT_COMMAND + #endif + + #define IA_CSS_TRACE_FORMAT_AUG_NATIVE(severity, module, format) \ + "[" module "]:[" severity "]: " format + + #define IA_CSS_TRACE_NATIVE_0(severity, module, format) \ + IA_CSS_TRACE_NATIVE(severity, module, format) + + #define IA_CSS_TRACE_NATIVE_1(severity, module, format, a1) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1) + + #define IA_CSS_TRACE_NATIVE_2(severity, module, format, a1, a2) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1, a2) + + #define IA_CSS_TRACE_NATIVE_3(severity, module, format, a1, a2, a3) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1, a2, a3) + + #define IA_CSS_TRACE_NATIVE_4(severity, module, format, \ + a1, a2, a3, a4) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1, a2, a3, a4) + + #define IA_CSS_TRACE_NATIVE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_NATIVE(severity, module, format, \ + a1, a2, a3, a4, a5) + + #define IA_CSS_TRACE_NATIVE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_NATIVE(severity, module, format, \ + a1, a2, a3, a4, a5, a6) + + #define IA_CSS_TRACE_NATIVE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_NATIVE(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) + + #define IA_CSS_TRACE_FORMAT_AUG_TRACE(severity, module, format) \ + "["module"]:["severity"]: "format + + #define IA_CSS_TRACE_TRACE_0(severity, module, format) \ + IA_CSS_TRACE_TRACE(severity, module, format) + + #define IA_CSS_TRACE_TRACE_1(severity, module, format, a1) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1) + + #define IA_CSS_TRACE_TRACE_2(severity, module, format, a1, a2) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1, a2) + + #define IA_CSS_TRACE_TRACE_3(severity, module, format, a1, a2, a3) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1, a2, a3) + + #define IA_CSS_TRACE_TRACE_4(severity, module, format, \ + a1, a2, a3, a4) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1, a2, a3, a4) + + #define IA_CSS_TRACE_TRACE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_TRACE(severity, module, format, \ + a1, a2, a3, a4, a5) + + #define IA_CSS_TRACE_TRACE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_TRACE(severity, module, format, \ + a1, a2, a3, a4, a5, a6) + + #define IA_CSS_TRACE_TRACE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_TRACE(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) +#endif + +/* Disabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_1_0(severity, module, format) +#define IA_CSS_TRACE_1_1_0(severity, module, format, arg1) +#define IA_CSS_TRACE_2_1_0(severity, module, format, arg1, arg2) +#define IA_CSS_TRACE_3_1_0(severity, module, format, arg1, arg2, arg3) +#define IA_CSS_TRACE_4_1_0(severity, module, format, arg1, arg2, arg3, arg4) +#define IA_CSS_TRACE_5_1_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5) +#define IA_CSS_TRACE_6_1_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6) +#define IA_CSS_TRACE_7_1_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6, arg7) + +/* Enabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_1_1 IA_CSS_TRACE_NATIVE_0 +#define IA_CSS_TRACE_1_1_1 IA_CSS_TRACE_NATIVE_1 +#define IA_CSS_TRACE_2_1_1 IA_CSS_TRACE_NATIVE_2 +#define IA_CSS_TRACE_3_1_1 IA_CSS_TRACE_NATIVE_3 +#define IA_CSS_TRACE_4_1_1 IA_CSS_TRACE_NATIVE_4 +#define IA_CSS_TRACE_5_1_1 IA_CSS_TRACE_NATIVE_5 +#define IA_CSS_TRACE_6_1_1 IA_CSS_TRACE_NATIVE_6 +#define IA_CSS_TRACE_7_1_1 IA_CSS_TRACE_NATIVE_7 + +/* Enabled */ +/* Legend: IA_CSS_TRACE_SEVERITY_{Severity Level}_{Backend ID} */ +#define IA_CSS_TRACE_SEVERITY_ASSERT_1 "Assert" +#define IA_CSS_TRACE_SEVERITY_ERROR_1 "Error" +#define IA_CSS_TRACE_SEVERITY_WARNING_1 "Warning" +#define IA_CSS_TRACE_SEVERITY_INFO_1 "Info" +#define IA_CSS_TRACE_SEVERITY_DEBUG_1 "Debug" +#define IA_CSS_TRACE_SEVERITY_VERBOSE_1 "Verbose" + +/* Disabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_2_0(severity, module, format) +#define IA_CSS_TRACE_1_2_0(severity, module, format, arg1) +#define IA_CSS_TRACE_2_2_0(severity, module, format, arg1, arg2) +#define IA_CSS_TRACE_3_2_0(severity, module, format, arg1, arg2, arg3) +#define IA_CSS_TRACE_4_2_0(severity, module, format, arg1, arg2, arg3, arg4) +#define IA_CSS_TRACE_5_2_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5) +#define IA_CSS_TRACE_6_2_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6) +#define IA_CSS_TRACE_7_2_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6, arg7) + +/* Enabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_2_1 IA_CSS_TRACE_TRACE_0 +#define IA_CSS_TRACE_1_2_1 IA_CSS_TRACE_TRACE_1 +#define IA_CSS_TRACE_2_2_1 IA_CSS_TRACE_TRACE_2 +#define IA_CSS_TRACE_3_2_1 IA_CSS_TRACE_TRACE_3 +#define IA_CSS_TRACE_4_2_1 IA_CSS_TRACE_TRACE_4 +#define IA_CSS_TRACE_5_2_1 IA_CSS_TRACE_TRACE_5 +#define IA_CSS_TRACE_6_2_1 IA_CSS_TRACE_TRACE_6 +#define IA_CSS_TRACE_7_2_1 IA_CSS_TRACE_TRACE_7 + +/* Enabled */ +/* Legend: IA_CSS_TRACE_SEVERITY_{Severity Level}_{Backend ID} */ +#define IA_CSS_TRACE_SEVERITY_ASSERT_2 VIED_NCI_TUNIT_MSG_SEVERITY_FATAL +#define IA_CSS_TRACE_SEVERITY_ERROR_2 VIED_NCI_TUNIT_MSG_SEVERITY_ERROR +#define IA_CSS_TRACE_SEVERITY_WARNING_2 VIED_NCI_TUNIT_MSG_SEVERITY_WARNING +#define IA_CSS_TRACE_SEVERITY_INFO_2 VIED_NCI_TUNIT_MSG_SEVERITY_NORMAL +#define IA_CSS_TRACE_SEVERITY_DEBUG_2 VIED_NCI_TUNIT_MSG_SEVERITY_USER1 +#define IA_CSS_TRACE_SEVERITY_VERBOSE_2 VIED_NCI_TUNIT_MSG_SEVERITY_USER2 + +/* +** Dynamicism +*/ + +#define IA_CSS_TRACE_DYNAMIC_DECLARE_IMPL(module) \ + do { \ + void IA_CSS_TRACE_CAT(module, _trace_assert_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_assert_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_error_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_error_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_warning_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_warning_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_info_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_info_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_debug_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_debug_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_disable)(void); \ + } while (0) + +#define IA_CSS_TRACE_DYNAMIC_DECLARE_CONFIG_FUNC_IMPL(module) \ + do { \ + IA_CSS_TRACE_FILE_DUMMY_DEFINE; \ + void IA_CSS_TRACE_CAT(module, _trace_configure)\ + (int argc, const char *const *argv); \ + } while (0) + +#include "platform_support.h" +#include "type_support.h" + +#define IA_CSS_TRACE_DYNAMIC_DEFINE_IMPL(module) \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_assert); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_error); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_warning); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_info); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_debug); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_verbose); \ + \ + void IA_CSS_TRACE_CAT(module, _trace_assert_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_assert) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_assert_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_assert) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_error_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_error) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_error_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_error) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_warning_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_warning) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_warning_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_warning) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_info_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_info) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_info_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_info) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_debug_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_debug) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_debug_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_debug) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_verbose) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_verbose) = 0; \ + } + +#define IA_CSS_TRACE_DYNAMIC_DEFINE_CONFIG_FUNC_IMPL(module) \ +void IA_CSS_TRACE_CAT(module, _trace_configure)(const int argc, \ + const char *const *const argv) \ +{ \ + int i = 1; \ + const char *levels = 0; \ + \ + while (i < argc) { \ + if (!strcmp(argv[i], "-" #module "_trace")) { \ + ++i; \ + \ + if (i < argc) { \ + levels = argv[i]; \ + \ + while (*levels) { \ + switch (*levels++) { \ + case 'a': \ + IA_CSS_TRACE_CAT \ + (module, _trace_assert_enable)(); \ + break; \ + \ + case 'e': \ + IA_CSS_TRACE_CAT \ + (module, _trace_error_enable)(); \ + break; \ + \ + case 'w': \ + IA_CSS_TRACE_CAT \ + (module, _trace_warning_enable)(); \ + break; \ + \ + case 'i': \ + IA_CSS_TRACE_CAT \ + (module, _trace_info_enable)(); \ + break; \ + \ + case 'd': \ + IA_CSS_TRACE_CAT \ + (module, _trace_debug_enable)(); \ + break; \ + \ + case 'v': \ + IA_CSS_TRACE_CAT \ + (module, _trace_verbose_enable)(); \ + break; \ + \ + default: \ + } \ + } \ + } \ + } \ + \ + ++i; \ + } \ +} + +#endif /* __IA_CSS_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/trace/trace.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/trace/trace.mk new file mode 100644 index 0000000000000..b232880b882bd --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/trace/trace.mk @@ -0,0 +1,40 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE Trace + +# Dependencies +IA_CSS_TRACE_SUPPORT = $${MODULES_DIR}/support + +# API +IA_CSS_TRACE = $${MODULES_DIR}/trace +IA_CSS_TRACE_INTERFACE = $(IA_CSS_TRACE)/interface + +# +# Host +# + +# Host CPP Flags +IA_CSS_TRACE_HOST_CPPFLAGS += -I$(IA_CSS_TRACE_SUPPORT) +IA_CSS_TRACE_HOST_CPPFLAGS += -I$(IA_CSS_TRACE_INTERFACE) +IA_CSS_TRACE_HOST_CPPFLAGS += -I$(IA_CSS_TRACE)/trace_modules + +# +# Firmware +# + +# Firmware CPP Flags +IA_CSS_TRACE_FW_CPPFLAGS += -I$(IA_CSS_TRACE_SUPPORT) +IA_CSS_TRACE_FW_CPPFLAGS += -I$(IA_CSS_TRACE_INTERFACE) +IA_CSS_TRACE_FW_CPPFLAGS += -I$(IA_CSS_TRACE)/trace_modules diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/utils/system_defs/system_const.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/utils/system_defs/system_const.h new file mode 100644 index 0000000000000..161f28fced973 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/utils/system_defs/system_const.h @@ -0,0 +1,26 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __SYSTEM_CONST_H +#define __SYSTEM_CONST_H + +/* The values included in this file should have been + * taken from system/device properties which + * are not currently available in SDK + */ + +#define XMEM_WIDTH (512) +#define MG_PPC (4) + +#endif /* __SYSTEM_CONST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/shared_memory_access.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/shared_memory_access.h new file mode 100644 index 0000000000000..1e81bad9f4eec --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/shared_memory_access.h @@ -0,0 +1,139 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _SHARED_MEMORY_ACCESS_H +#define _SHARED_MEMORY_ACCESS_H + +#include +#include +#include + +typedef enum { + sm_esuccess, + sm_enomem, + sm_ezeroalloc, + sm_ebadvaddr, + sm_einternalerror, + sm_ecorruption, + sm_enocontiguousmem, + sm_enolocmem, + sm_emultiplefree, +} shared_memory_error; + +/** + * \brief Virtual address of (DDR) shared memory space as seen from the VIED subsystem + */ +typedef uint32_t vied_virtual_address_t; + +/** + * \brief Virtual address of (DDR) shared memory space as seen from the host + */ +typedef unsigned long long host_virtual_address_t; + +/** + * \brief List of physical addresses of (DDR) shared memory space. This is used to represent a list of physical pages. + */ +typedef struct shared_memory_physical_page_list_s *shared_memory_physical_page_list; +typedef struct shared_memory_physical_page_list_s +{ + shared_memory_physical_page_list next; + vied_physical_address_t address; +}shared_memory_physical_page_list_s; + + +/** + * \brief Initialize the shared memory interface administration on the host. + * \param idm: id of ddr memory + * \param host_ddr_addr: physical address of memory as seen from host + * \param memory_size: size of ddr memory in bytes + * \param ps: size of page in bytes (for instance 4096) + */ +int shared_memory_allocation_initialize(vied_memory_t idm, vied_physical_address_t host_ddr_addr, size_t memory_size, size_t ps); + +/** + * \brief De-initialize the shared memory interface administration on the host. + * + */ +void shared_memory_allocation_uninitialize(vied_memory_t idm); + +/** + * \brief Allocate (DDR) shared memory space and return a host virtual address. Returns NULL when insufficient memory available + */ +host_virtual_address_t shared_memory_alloc(vied_memory_t idm, size_t bytes); + +/** + * \brief Free (DDR) shared memory space. +*/ +void shared_memory_free(vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Translate a virtual host.address to a physical address. +*/ +vied_physical_address_t shared_memory_virtual_host_to_physical_address (vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Return the allocated physical pages for a virtual host.address. +*/ +shared_memory_physical_page_list shared_memory_virtual_host_to_physical_pages (vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Destroy a shared_memory_physical_page_list. +*/ +void shared_memory_physical_pages_list_destroy (shared_memory_physical_page_list ppl); + +/** + * \brief Store a byte into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store_8 (vied_memory_t idm, host_virtual_address_t addr, uint8_t data); + +/** + * \brief Store a 16-bit word into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store_16(vied_memory_t idm, host_virtual_address_t addr, uint16_t data); + +/** + * \brief Store a 32-bit word into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store_32(vied_memory_t idm, host_virtual_address_t addr, uint32_t data); + +/** + * \brief Store a number of bytes into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store(vied_memory_t idm, host_virtual_address_t addr, const void *data, size_t bytes); + +/** + * \brief Set a number of bytes of (DDR) shared memory space to 0 using a host virtual address + */ +void shared_memory_zero(vied_memory_t idm, host_virtual_address_t addr, size_t bytes); + +/** + * \brief Load a byte from (DDR) shared memory space using a host virtual address + */ +uint8_t shared_memory_load_8 (vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Load a 16-bit word from (DDR) shared memory space using a host virtual address + */ +uint16_t shared_memory_load_16(vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Load a 32-bit word from (DDR) shared memory space using a host virtual address + */ +uint32_t shared_memory_load_32(vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Load a number of bytes from (DDR) shared memory space using a host virtual address + */ +void shared_memory_load(vied_memory_t idm, host_virtual_address_t addr, void *data, size_t bytes); + +#endif /* _SHARED_MEMORY_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/shared_memory_map.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/shared_memory_map.h new file mode 100644 index 0000000000000..1bbedcf9e7fd8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/shared_memory_map.h @@ -0,0 +1,53 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _SHARED_MEMORY_MAP_H +#define _SHARED_MEMORY_MAP_H + +#include +#include +#include + +typedef void (*shared_memory_invalidate_mmu_tlb)(void); +typedef void (*shared_memory_set_page_table_base_address)(vied_physical_address_t); + +typedef void (*shared_memory_invalidate_mmu_tlb_ssid)(vied_subsystem_t id); +typedef void (*shared_memory_set_page_table_base_address_ssid)(vied_subsystem_t id, vied_physical_address_t); + +/** + * \brief Initialize the CSS virtual address system and MMU. The subsystem id will NOT be taken into account. +*/ +int shared_memory_map_initialize(vied_subsystem_t id, vied_memory_t idm, size_t mmu_ps, size_t mmu_pnrs, vied_physical_address_t ddr_addr, shared_memory_invalidate_mmu_tlb inv_tlb, shared_memory_set_page_table_base_address sbt); + +/** + * \brief Initialize the CSS virtual address system and MMU. The subsystem id will be taken into account. +*/ +int shared_memory_map_initialize_ssid(vied_subsystem_t id, vied_memory_t idm, size_t mmu_ps, size_t mmu_pnrs, vied_physical_address_t ddr_addr, shared_memory_invalidate_mmu_tlb_ssid inv_tlb, shared_memory_set_page_table_base_address_ssid sbt); + +/** + * \brief De-initialize the CSS virtual address system and MMU. +*/ +void shared_memory_map_uninitialize(vied_subsystem_t id, vied_memory_t idm); + +/** + * \brief Convert a host virtual address to a CSS virtual address and update the MMU. +*/ +vied_virtual_address_t shared_memory_map(vied_subsystem_t id, vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Free a CSS virtual address and update the MMU. +*/ +void shared_memory_unmap(vied_subsystem_t id, vied_memory_t idm, vied_virtual_address_t addr); + + +#endif /* _SHARED_MEMORY_MAP_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_config.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_config.h new file mode 100644 index 0000000000000..912f016ead241 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_config.h @@ -0,0 +1,33 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_CONFIG_H +#define _HRT_VIED_CONFIG_H + +/* Defines from the compiler: + * HRT_HOST - this is code running on the host + * HRT_CELL - this is code running on a cell + */ +#ifdef HRT_HOST +# define CFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL 1 +# undef CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL + +#elif defined (HRT_CELL) +# undef CFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL +# define CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL 1 + +#else /* !HRT_CELL */ +/* Allow neither HRT_HOST nor HRT_CELL for testing purposes */ +#endif /* !HRT_CELL */ + +#endif /* _HRT_VIED_CONFIG_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_memory_access_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_memory_access_types.h new file mode 100644 index 0000000000000..0b44492789e37 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_memory_access_types.h @@ -0,0 +1,36 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_MEMORY_ACCESS_TYPES_H +#define _HRT_VIED_MEMORY_ACCESS_TYPES_H + +/** Types for the VIED memory access interface */ + +#include "vied_types.h" + +/** + * \brief An identifier for a system memory. + * + * This identifier must be a compile-time constant. It is used in + * access to system memory. + */ +typedef unsigned int vied_memory_t; + +#ifndef __HIVECC +/** + * \brief The type for a physical address + */ +typedef unsigned long long vied_physical_address_t; +#endif + +#endif /* _HRT_VIED_MEMORY_ACCESS_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_subsystem_access.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_subsystem_access.h new file mode 100644 index 0000000000000..674f5fb5b0f99 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_subsystem_access.h @@ -0,0 +1,70 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_SUBSYSTEM_ACCESS_H +#define _HRT_VIED_SUBSYSTEM_ACCESS_H + +#include +#include "vied_config.h" +#include "vied_subsystem_access_types.h" + +#if !defined(CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL) && \ + !defined(CFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL) +#error Implementation selection macro for vied subsystem access not defined +#endif + +#if defined(CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL) +#ifndef __HIVECC +#error "Inline implementation of subsystem access not supported for host" +#endif +#define _VIED_SUBSYSTEM_ACCESS_INLINE static __inline +#include "vied_subsystem_access_impl.h" +#else +#define _VIED_SUBSYSTEM_ACCESS_INLINE +#endif + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store_8 (vied_subsystem_t dev, + vied_subsystem_address_t addr, uint8_t data); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store_16(vied_subsystem_t dev, + vied_subsystem_address_t addr, uint16_t data); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store_32(vied_subsystem_t dev, + vied_subsystem_address_t addr, uint32_t data); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store(vied_subsystem_t dev, + vied_subsystem_address_t addr, + const void *data, unsigned int size); + +_VIED_SUBSYSTEM_ACCESS_INLINE +uint8_t vied_subsystem_load_8 (vied_subsystem_t dev, + vied_subsystem_address_t addr); + +_VIED_SUBSYSTEM_ACCESS_INLINE +uint16_t vied_subsystem_load_16(vied_subsystem_t dev, + vied_subsystem_address_t addr); + +_VIED_SUBSYSTEM_ACCESS_INLINE +uint32_t vied_subsystem_load_32(vied_subsystem_t dev, + vied_subsystem_address_t addr); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_load(vied_subsystem_t dev, + vied_subsystem_address_t addr, + void *data, unsigned int size); + +#endif /* _HRT_VIED_SUBSYSTEM_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_subsystem_access_initialization.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_subsystem_access_initialization.h new file mode 100644 index 0000000000000..81f4d08d5ae0e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_subsystem_access_initialization.h @@ -0,0 +1,44 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_SUBSYSTEM_ACCESS_INITIALIZE_H +#define _HRT_VIED_SUBSYSTEM_ACCESS_INITIALIZE_H + +#include "vied_subsystem_access_types.h" + +/** @brief Initialises the access of a subsystem. + * @param[in] system The subsystem for which the access has to be initialised. + * + * vied_subsystem_access_initialize initilalises the access a subsystem. + * It sets the base address of the subsystem. This base address is extracted from the hsd file. + * + */ +void +vied_subsystem_access_initialize(vied_subsystem_t system); + + +/** @brief Initialises the access of multiple subsystems. + * @param[in] nr _subsystems The number of subsystems for which the access has to be initialised. + * @param[in] dev_base_addresses A pointer to an array of base addresses of subsystems. + * The size of this array must be "nr_subsystems". + * This array must be available during the accesses of the subsystem. + * + * vied_subsystems_access_initialize initilalises the access to multiple subsystems. + * It sets the base addresses of the subsystems that are provided by the array dev_base_addresses. + * + */ +void +vied_subsystems_access_initialize( unsigned int nr_subsystems + , const vied_subsystem_base_address_t *base_addresses); + +#endif /* _HRT_VIED_SUBSYSTEM_ACCESS_INITIALIZE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_subsystem_access_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_subsystem_access_types.h new file mode 100644 index 0000000000000..75fef6c4ddba2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_subsystem_access_types.h @@ -0,0 +1,34 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_SUBSYSTEM_ACCESS_TYPES_H +#define _HRT_VIED_SUBSYSTEM_ACCESS_TYPES_H + +/** Types for the VIED subsystem access interface */ +#include + +/** \brief An identifier for a VIED subsystem. + * + * This identifier must be a compile-time constant. It is used in + * access to a VIED subsystem. + */ +typedef unsigned int vied_subsystem_t; + + +/** \brief An address within a VIED subsystem */ +typedef uint32_t vied_subsystem_address_t; + +/** \brief A base address of a VIED subsystem seen from the host */ +typedef unsigned long long vied_subsystem_base_address_t; + +#endif /* _HRT_VIED_SUBSYSTEM_ACCESS_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_types.h new file mode 100644 index 0000000000000..0acfdbb00cfa3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_types.h @@ -0,0 +1,45 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_TYPES_H +#define _HRT_VIED_TYPES_H + +/** Types shared by VIED interfaces */ + +#include + +/** \brief An address within a VIED subsystem + * + * This will eventually replace teh vied_memory_address_t and vied_subsystem_address_t + */ +typedef uint32_t vied_address_t; + +/** \brief Memory address type + * + * A memory address is an offset within a memory. + */ +typedef uint32_t vied_memory_address_t; + +/** \brief Master port id */ +typedef int vied_master_port_id_t; + +/** + * \brief Require the existence of a certain type + * + * This macro can be used in interface header files to ensure that + * an implementation define type with a specified name exists. + */ +#define _VIED_REQUIRE_TYPE(T) enum { _VIED_SIZEOF_##T = sizeof(T) } + + +#endif /* _HRT_VIED_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/Makefile b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/Makefile new file mode 100644 index 0000000000000..068d3207a0d91 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/Makefile @@ -0,0 +1,52 @@ +# +# Copyright (c) 2010 - 2018 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# + +ifneq ($(EXTERNAL_BUILD), 1) +srcpath := $(srctree) +endif + +include $(srcpath)/$(src)/../Makefile.ipu4ppsys_src +include $(srcpath)/$(src)/../Makefile.ipu4ppsys_inc + +SSID = 0 +MMID = 0 +IPU_SYSVER = cnl + +IPU_PSYSLIB_ROOT_REL = lib +IPU_PSYSLIB_ROOT = $(srcpath)/$(src)/$(IPU_PSYSLIB_ROOT_REL) + +ccflags-y += -I$(srcpath)/$(src)/../../../ +ccflags-y += -I$(srcpath)/$(src)/../../ +ccflags-y += -DHAS_DUAL_CMD_CTX_SUPPORT=0 -DHAS_LATE_BINDING_SUPPORT=0 -DIPU_PSYS_LEGACY + +IPU_PSYSLIB_SRC += libcsspsys2600.o + +#CFLAGS = -W -Wall -Wstrict-prototypes -Wmissing-prototypes -O2 -fomit-frame-pointer -Wno-unused-variable +HOST_DEFINES += -DSSID=$(SSID) +HOST_DEFINES += -DMMID=$(MMID) +HOST_DEFINES += -DHRT_ON_VIED_SUBSYSTEM_ACCESS=$(SSID) +HOST_DEFINES += -DCFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL +HOST_DEFINES += -DHRT_USE_VIR_ADDRS +HOST_DEFINES += -DHRT_HW +HOST_DEFINES += -DVIED_NCI_TUNIT_PSYS +HOST_DEFINES += -DFIRMWARE_RELEASE_VERSION +HOST_DEFINES += -DPSYS_SERVER_ON_SPC +HOST_DEFINES += -DAPI_SPLIT_START_STATE_UPDATE +HOST_DEFINES += -DHAS_DUAL_CMD_CTX_SUPPORT=0 +HOST_DEFINES += -DHAS_LATE_BINDING_SUPPORT=0 + +intel-ipu4p-psys-csslib-objs := ../../../ipu-wrapper.o \ + $(IPU_PSYSLIB_SRC) +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4p-psys-csslib.o + +ccflags-y += $(IPU_PSYSLIB_INC) $(HOST_DEFINES) -fno-common -v diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/CNL_program_group/ia_css_fw_pkg_release.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/CNL_program_group/ia_css_fw_pkg_release.h new file mode 100644 index 0000000000000..408726c817146 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/CNL_program_group/ia_css_fw_pkg_release.h @@ -0,0 +1,14 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +#define IA_CSS_FW_PKG_RELEASE 0x20181222 diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/ICL_program_group/ia_css_fw_pkg_release.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/ICL_program_group/ia_css_fw_pkg_release.h new file mode 100644 index 0000000000000..408726c817146 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/ICL_program_group/ia_css_fw_pkg_release.h @@ -0,0 +1,14 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +#define IA_CSS_FW_PKG_RELEASE 0x20181222 diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/buffer.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/buffer.mk new file mode 100644 index 0000000000000..c00a1133b440f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/buffer.mk @@ -0,0 +1,43 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is BUFFER + +ifdef _H_BUFFER_MK +$(error ERROR: buffer.mk included multiple times, please check makefile) +else +_H_BUFFER_MK=1 +endif + +BUFFER_DIR=$${MODULES_DIR}/buffer + +BUFFER_INTERFACE=$(BUFFER_DIR)/interface +BUFFER_SOURCES_CPU=$(BUFFER_DIR)/src/cpu +BUFFER_SOURCES_CSS=$(BUFFER_DIR)/src/css + +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_output_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_input_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_shared_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/buffer_access.c +BUFFER_HOST_CPPFLAGS += -I$(BUFFER_INTERFACE) +BUFFER_HOST_CPPFLAGS += -I$${MODULES_DIR}/support + +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/ia_css_input_buffer.c +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/ia_css_output_buffer.c +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/ia_css_shared_buffer.c +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/buffer_access.c + +BUFFER_FW_CPPFLAGS += -I$(BUFFER_INTERFACE) +BUFFER_FW_CPPFLAGS += -I$${MODULES_DIR}/support diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/buffer_access.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/buffer_access.h new file mode 100644 index 0000000000000..e5fe647742c9f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/buffer_access.h @@ -0,0 +1,36 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __BUFFER_ACCESS_H +#define __BUFFER_ACCESS_H + +#include "buffer_type.h" +/* #def to keep consistent the buffer load interfaces for host and css */ +#define IDM 0 + +void +buffer_load( + buffer_address address, + void *data, + unsigned int size, + unsigned int mm_id); + +void +buffer_store( + buffer_address address, + const void *data, + unsigned int size, + unsigned int mm_id); + +#endif /* __BUFFER_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/buffer_type.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/buffer_type.h new file mode 100644 index 0000000000000..de51f23941582 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/buffer_type.h @@ -0,0 +1,29 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __BUFFER_TYPE_H +#define __BUFFER_TYPE_H + +/* portable access to buffers in DDR */ + +#ifdef __VIED_CELL +typedef unsigned int buffer_address; +#else +/* workaround needed because shared_memory_access.h uses size_t */ +#include "type_support.h" +#include "vied/shared_memory_access.h" +typedef host_virtual_address_t buffer_address; +#endif + +#endif /* __BUFFER_TYPE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_buffer_address.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_buffer_address.h new file mode 100644 index 0000000000000..137bfb1fda166 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_buffer_address.h @@ -0,0 +1,24 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_BUFFER_ADDRESS_H +#define __IA_CSS_BUFFER_ADDRESS_H + +#include "type_support.h" + +typedef uint32_t ia_css_buffer_address; /* CSS virtual address */ + +#define ia_css_buffer_address_null ((ia_css_buffer_address)0) + +#endif /* __IA_CSS_BUFFER_ADDRESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_input_buffer.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_input_buffer.h new file mode 100644 index 0000000000000..4e92e35b61843 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_input_buffer.h @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_INPUT_BUFFER_H +#define __IA_CSS_INPUT_BUFFER_H + + +/* Input Buffers */ + +/* A CSS input buffer is a buffer in DDR that can be written by the CPU, + * and that can be read by CSS hardware, after the buffer has been handed over. + * Examples: command buffer, input frame buffer, parameter buffer + * An input buffer must be mapped into the CPU address space before it can be + * written by the CPU. + * After mapping, writing, and unmapping, the buffer can be handed over to the + * firmware. An input buffer is handed over to the CSS by mapping it to the + * CSS address space (by the CPU), and by passing the resulting CSS (virtial) + * address of the input buffer to the DA CSS hardware. + * The firmware can read from an input buffer as soon as it has been received + * CSS virtual address. + * The firmware should not write into an input buffer. + * The firmware hands over the input buffer (back to the CPU) by sending the + * buffer handle via a response. The host should unmap the buffer, + * before reusing it. + * The firmware should not read from the input buffer after returning the + * buffer handle to the CPU. + * + * A buffer may be pre-mapped to the CPU and/or to the CSS upon allocation, + * depending on the allocator's preference. In case of pre-mapped buffers, + * the map and unmap functions will only manage read and write access. + */ + +#include "ia_css_buffer_address.h" + +typedef struct ia_css_buffer_s *ia_css_input_buffer; /* input buffer handle */ +typedef void *ia_css_input_buffer_cpu_address; /* CPU virtual address */ +/* CSS virtual address */ +typedef ia_css_buffer_address ia_css_input_buffer_css_address; + +#endif /* __IA_CSS_INPUT_BUFFER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_input_buffer_cpu.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_input_buffer_cpu.h new file mode 100644 index 0000000000000..d3d01353ce431 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_input_buffer_cpu.h @@ -0,0 +1,49 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_INPUT_BUFFER_CPU_H +#define __IA_CSS_INPUT_BUFFER_CPU_H + +#include "vied/shared_memory_map.h" +#include "ia_css_input_buffer.h" + +ia_css_input_buffer +ia_css_input_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_input_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_input_buffer b); + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_map(ia_css_input_buffer b); + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_unmap(ia_css_input_buffer b); + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map(vied_memory_t mid, ia_css_input_buffer b); + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map_no_invalidate(vied_memory_t mid, ia_css_input_buffer b); + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_unmap(ia_css_input_buffer b); + + +#endif /* __IA_CSS_INPUT_BUFFER_CPU_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_output_buffer.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_output_buffer.h new file mode 100644 index 0000000000000..2c310ea92c6af --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_output_buffer.h @@ -0,0 +1,30 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_OUTPUT_BUFFER_H +#define __IA_CSS_OUTPUT_BUFFER_H + +/* Output Buffers */ +/* A CSS output buffer a buffer in DDR that can be written by CSS hardware + * and that can be read by the host, after the buffer has been handed over + * Examples: output frame buffer + */ + +#include "ia_css_buffer_address.h" + +typedef struct ia_css_buffer_s *ia_css_output_buffer; +typedef void *ia_css_output_buffer_cpu_address; +typedef ia_css_buffer_address ia_css_output_buffer_css_address; + +#endif /* __IA_CSS_OUTPUT_BUFFER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_output_buffer_cpu.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_output_buffer_cpu.h new file mode 100644 index 0000000000000..0299fc3b7eb66 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_output_buffer_cpu.h @@ -0,0 +1,48 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_OUTPUT_BUFFER_CPU_H +#define __IA_CSS_OUTPUT_BUFFER_CPU_H + +#include "vied/shared_memory_map.h" +#include "ia_css_output_buffer.h" + +ia_css_output_buffer +ia_css_output_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_output_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_output_buffer b); + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_map(ia_css_output_buffer b); + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_unmap(ia_css_output_buffer b); + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map(vied_memory_t mid, ia_css_output_buffer b); +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map_no_invalidate(vied_memory_t mid, ia_css_output_buffer b); + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_unmap(ia_css_output_buffer b); + + +#endif /* __IA_CSS_OUTPUT_BUFFER_CPU_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_shared_buffer.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_shared_buffer.h new file mode 100644 index 0000000000000..558ec679f98a0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_shared_buffer.h @@ -0,0 +1,32 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SHARED_BUFFER_H +#define __IA_CSS_SHARED_BUFFER_H + +/* Shared Buffers */ +/* A CSS shared buffer is a buffer in DDR that can be read and written by the + * CPU and CSS. + * Both the CPU and CSS can have the buffer mapped simultaneously. + * Access rights are not managed by this interface, this could be done by means + * the read and write pointer of a queue, for example. + */ + +#include "ia_css_buffer_address.h" + +typedef struct ia_css_buffer_s *ia_css_shared_buffer; +typedef void *ia_css_shared_buffer_cpu_address; +typedef ia_css_buffer_address ia_css_shared_buffer_css_address; + +#endif /* __IA_CSS_SHARED_BUFFER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_shared_buffer_cpu.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_shared_buffer_cpu.h new file mode 100644 index 0000000000000..ff62914f99dc3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_shared_buffer_cpu.h @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SHARED_BUFFER_CPU_H +#define __IA_CSS_SHARED_BUFFER_CPU_H + +#include "vied/shared_memory_map.h" +#include "ia_css_shared_buffer.h" + +ia_css_shared_buffer +ia_css_shared_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_shared_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_shared_buffer b); + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_map(ia_css_shared_buffer b); + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_unmap(ia_css_shared_buffer b); + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_map(ia_css_shared_buffer b); + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_unmap(ia_css_shared_buffer b); + +ia_css_shared_buffer +ia_css_shared_buffer_css_update(vied_memory_t mid, ia_css_shared_buffer b); + +ia_css_shared_buffer +ia_css_shared_buffer_cpu_update(vied_memory_t mid, ia_css_shared_buffer b); + +#endif /* __IA_CSS_SHARED_BUFFER_CPU_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/buffer_access.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/buffer_access.c new file mode 100644 index 0000000000000..f0c617fe501a0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/buffer_access.c @@ -0,0 +1,39 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/* implementation of buffer access from the CPU */ +/* using shared_memory interface */ + +#include "buffer_access.h" +#include "vied/shared_memory_access.h" + +void +buffer_load( + buffer_address address, + void *data, + unsigned int bytes, + unsigned int mm_id) +{ + shared_memory_load(mm_id, address, data, bytes); +} + +void +buffer_store( + buffer_address address, + const void *data, + unsigned int bytes, + unsigned int mm_id) +{ + shared_memory_store(mm_id, address, data, bytes); +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_buffer.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_buffer.c new file mode 100644 index 0000000000000..146d4109de440 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_buffer.c @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/* provided interface */ +#include "ia_css_buffer.h" + +/* used interfaces */ +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + +ia_css_buffer_t +ia_css_buffer_alloc(vied_subsystem_t sid, vied_memory_t mid, unsigned int size) +{ + ia_css_buffer_t b; + + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + + b->css_address = shared_memory_map(sid, mid, b->mem); + b->size = size; + return b; +} + + +void +ia_css_buffer_free(vied_subsystem_t sid, vied_memory_t mid, ia_css_buffer_t b) +{ + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_buffer.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_buffer.h new file mode 100644 index 0000000000000..0f99a06e9a89b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_buffer.h @@ -0,0 +1,58 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_BUFFER_H +#define __IA_CSS_BUFFER_H + +/* workaround: needed because uses size_t */ +#include "type_support.h" +#include "vied/shared_memory_map.h" + +typedef enum { + buffer_unmapped, /* buffer is not accessible by cpu, nor css */ + buffer_write, /* output buffer: css has write access */ + /* input buffer: cpu has write access */ + buffer_read, /* input buffer: css has read access */ + /* output buffer: cpu has read access */ + buffer_cpu, /* shared buffer: cpu has read/write access */ + buffer_css /* shared buffer: css has read/write access */ +} buffer_state; + +struct ia_css_buffer_s { + /* number of bytes bytes allocated */ + unsigned int size; + /* allocated virtual memory object */ + host_virtual_address_t mem; + /* virtual address to be used on css/firmware */ + vied_virtual_address_t css_address; + /* virtual address to be used on cpu/host */ + void *cpu_address; + buffer_state state; +}; + +typedef struct ia_css_buffer_s *ia_css_buffer_t; + +ia_css_buffer_t +ia_css_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_buffer_t b); + +#endif /* __IA_CSS_BUFFER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_input_buffer.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_input_buffer.c new file mode 100644 index 0000000000000..2a128795d03e2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_input_buffer.c @@ -0,0 +1,184 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include "ia_css_input_buffer_cpu.h" +#include "ia_css_buffer.h" +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + + +ia_css_input_buffer +ia_css_input_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size) +{ + ia_css_input_buffer b; + + /* allocate buffer container */ + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + +#ifndef HRT_HW + /* initialize the buffer to avoid warnings when copying */ + shared_memory_zero(mid, b->mem, size); + + /* in simulation, we need to allocate a shadow host buffer */ + b->cpu_address = ia_css_cpu_mem_alloc_page_aligned(size); + if (b->cpu_address == NULL) { + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); + return NULL; + } +#else + /* on hw / real platform we can use the pointer from + * shared memory alloc + */ + b->cpu_address = (void *)HOST_ADDRESS(b->mem); +#endif + + b->css_address = shared_memory_map(sid, mid, b->mem); + + b->size = size; + b->state = buffer_unmapped; + + return b; +} + + +void +ia_css_input_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_input_buffer b) +{ + if (b == NULL) + return; + if (b->state != buffer_unmapped) + return; + +#ifndef HRT_HW + /* only free if we actually allocated it separately */ + ia_css_cpu_mem_free(b->cpu_address); +#endif + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} + + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_map(ia_css_input_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map input buffer to CPU address space, acquire write access */ + b->state = buffer_write; + + /* return pre-mapped buffer */ + return b->cpu_address; +} + + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_unmap(ia_css_input_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_write) + return NULL; + + /* unmap input buffer from CPU address space, release write access */ + b->state = buffer_unmapped; + + /* return pre-mapped buffer */ + return b->cpu_address; +} + + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map(vied_memory_t mid, ia_css_input_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map input buffer to CSS address space, acquire read access */ + b->state = buffer_read; + + /* now flush the cache */ + ia_css_cpu_mem_cache_flush(b->cpu_address, b->size); +#ifndef HRT_HW + /* only copy in case of simulation, otherwise it should just work */ + /* copy data from CPU address space to CSS address space */ + shared_memory_store(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return (ia_css_input_buffer_css_address)b->css_address; +} + + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map_no_invalidate(vied_memory_t mid, ia_css_input_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map input buffer to CSS address space, acquire read access */ + b->state = buffer_read; + +#ifndef HRT_HW + /* only copy in case of simulation, otherwise it should just work */ + /* copy data from CPU address space to CSS address space */ + shared_memory_store(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return (ia_css_input_buffer_css_address)b->css_address; +} + + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_unmap(ia_css_input_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_read) + return 0; + + /* unmap input buffer from CSS address space, release read access */ + b->state = buffer_unmapped; + + /* input buffer only, no need to invalidate cache */ + + return (ia_css_input_buffer_css_address)b->css_address; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_output_buffer.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_output_buffer.c new file mode 100644 index 0000000000000..30bc8d52a5a9e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_output_buffer.c @@ -0,0 +1,181 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include "ia_css_output_buffer_cpu.h" +#include "ia_css_buffer.h" +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + + +ia_css_output_buffer +ia_css_output_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size) +{ + ia_css_output_buffer b; + + /* allocate buffer container */ + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + +#ifndef HRT_HW + /* initialize the buffer to avoid warnings when copying */ + shared_memory_zero(mid, b->mem, size); + + /* in simulation, we need to allocate a shadow host buffer */ + b->cpu_address = ia_css_cpu_mem_alloc_page_aligned(size); + if (b->cpu_address == NULL) { + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); + return NULL; + } +#else + /* on hw / real platform we can use the pointer from + * shared memory alloc + */ + b->cpu_address = (void *)HOST_ADDRESS(b->mem); +#endif + + b->css_address = shared_memory_map(sid, mid, b->mem); + + b->size = size; + b->state = buffer_unmapped; + + return b; +} + + +void +ia_css_output_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_output_buffer b) +{ + if (b == NULL) + return; + if (b->state != buffer_unmapped) + return; + +#ifndef HRT_HW + /* only free if we actually allocated it separately */ + ia_css_cpu_mem_free(b->cpu_address); +#endif + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} + + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_map(ia_css_output_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map output buffer to CSS address space, acquire write access */ + b->state = buffer_write; + + return (ia_css_output_buffer_css_address)b->css_address; +} + + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_unmap(ia_css_output_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_write) + return 0; + + /* unmap output buffer from CSS address space, release write access */ + b->state = buffer_unmapped; + + return (ia_css_output_buffer_css_address)b->css_address; +} + + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map(vied_memory_t mid, ia_css_output_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map output buffer to CPU address space, acquire read access */ + b->state = buffer_read; + +#ifndef HRT_HW + /* only in simulation */ + /* copy data from CSS address space to CPU address space */ + shared_memory_load(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + /* now invalidate the cache */ + ia_css_cpu_mem_cache_invalidate(b->cpu_address, b->size); + + return b->cpu_address; +} + + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map_no_invalidate(vied_memory_t mid, ia_css_output_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map output buffer to CPU address space, acquire read access */ + b->state = buffer_read; + +#ifndef HRT_HW + /* only in simulation */ + /* copy data from CSS address space to CPU address space */ + shared_memory_load(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return b->cpu_address; +} + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_unmap(ia_css_output_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_read) + return NULL; + + /* unmap output buffer from CPU address space, release read access */ + b->state = buffer_unmapped; + + /* output only, no need to flush cache */ + + return b->cpu_address; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_shared_buffer.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_shared_buffer.c new file mode 100644 index 0000000000000..92b7110644fe3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_shared_buffer.c @@ -0,0 +1,187 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include "ia_css_shared_buffer_cpu.h" +#include "ia_css_buffer.h" +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + + +ia_css_shared_buffer +ia_css_shared_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size) +{ + ia_css_shared_buffer b; + + /* allocate buffer container */ + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + +#ifndef HRT_HW + /* initialize the buffer to avoid warnings when copying */ + shared_memory_zero(mid, b->mem, size); + + /* in simulation, we need to allocate a shadow host buffer */ + b->cpu_address = ia_css_cpu_mem_alloc_page_aligned(size); + if (b->cpu_address == NULL) { + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); + return NULL; + } +#else + /* on hw / real platform we can use the pointer from + * shared memory alloc + */ + b->cpu_address = (void *)HOST_ADDRESS(b->mem); +#endif + + b->css_address = shared_memory_map(sid, mid, b->mem); + + b->size = size; + b->state = buffer_unmapped; + + return b; +} + + +void +ia_css_shared_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_shared_buffer b) +{ + if (b == NULL) + return; + if (b->state != buffer_unmapped) + return; + +#ifndef HRT_HW + /* only free if we actually allocated it separately */ + ia_css_cpu_mem_free(b->cpu_address); +#endif + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} + + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_map(ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map shared buffer to CPU address space */ + b->state = buffer_cpu; + + return b->cpu_address; +} + + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_unmap(ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_cpu) + return NULL; + + /* unmap shared buffer from CPU address space */ + b->state = buffer_unmapped; + + return b->cpu_address; +} + + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_map(ia_css_shared_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map shared buffer to CSS address space */ + b->state = buffer_css; + + return (ia_css_shared_buffer_css_address)b->css_address; +} + + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_unmap(ia_css_shared_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_css) + return 0; + + /* unmap shared buffer from CSS address space */ + b->state = buffer_unmapped; + + return (ia_css_shared_buffer_css_address)b->css_address; +} + + +ia_css_shared_buffer +ia_css_shared_buffer_css_update(vied_memory_t mid, ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + + /* flush the buffer to CSS after it was modified by the CPU */ + /* flush cache to ddr */ + ia_css_cpu_mem_cache_flush(b->cpu_address, b->size); +#ifndef HRT_HW + /* copy data from CPU address space to CSS address space */ + shared_memory_store(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return b; +} + + +ia_css_shared_buffer +ia_css_shared_buffer_cpu_update(vied_memory_t mid, ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + + /* flush the buffer to the CPU after it has been modified by CSS */ +#ifndef HRT_HW + /* copy data from CSS address space to CPU address space */ + shared_memory_load(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + /* flush cache to ddr */ + ia_css_cpu_mem_cache_invalidate(b->cpu_address, b->size); + + return b; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell/cell.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell/cell.mk new file mode 100644 index 0000000000000..fa5e650226017 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell/cell.mk @@ -0,0 +1,43 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +ifndef _CELL_MK_ +_CELL_MK_ = 1 + + +CELL_DIR=$${MODULES_DIR}/cell +CELL_INTERFACE=$(CELL_DIR)/interface +CELL_SOURCES=$(CELL_DIR)/src + +CELL_HOST_FILES = +CELL_FW_FILES = + +CELL_HOST_CPPFLAGS = \ + -I$(CELL_INTERFACE) \ + -I$(CELL_SOURCES) + +CELL_FW_CPPFLAGS = \ + -I$(CELL_INTERFACE) \ + -I$(CELL_SOURCES) + +ifdef 0 +# Disabled until it is decided to go this way or not +include $(MODULES_DIR)/device_access/device_access.mk +CELL_HOST_FILES += $(DEVICE_ACCESS_HOST_FILES) +CELL_FW_FILES += $(DEVICE_ACCESS_FW_FILES) +CELL_HOST_CPPFLAGS += $(DEVICE_ACCESS_HOST_CPPFLAGS) +CELL_FW_CPPFLAGS += $(DEVICE_ACCESS_FW_CPPFLAGS) +endif + +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell/interface/ia_css_cell.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell/interface/ia_css_cell.h new file mode 100644 index 0000000000000..3fac3c791b6e6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell/interface/ia_css_cell.h @@ -0,0 +1,112 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_H +#define __IA_CSS_CELL_H + +#include "storage_class.h" +#include "type_support.h" + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_stat_ctrl(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_stat_ctrl(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_pc(unsigned int ssid, unsigned int cell_id, + unsigned int pc); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_icache_base_address(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +#if 0 /* To be implemented after completing cell device properties */ +STORAGE_CLASS_INLINE void +ia_css_cell_set_icache_info_bits(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_debug_pc(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_stall_bits(unsigned int ssid, unsigned int cell_id); +#endif + +/* configure master ports */ + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_base_address(unsigned int ssid, unsigned int cell_id, + unsigned int master, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_base_address(unsigned int ssid, + unsigned int cell_id, + unsigned int master, unsigned int segment, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_bits(unsigned int ssid, unsigned int cell_id, + unsigned int master, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_bits(unsigned int ssid, + unsigned int cell_id, + unsigned int master, unsigned int segment, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_override_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_override_bits(unsigned int ssid, + unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value); + +/* Access memories */ + +STORAGE_CLASS_INLINE void +ia_css_cell_mem_store_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr, unsigned int value); + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_mem_load_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr); + +/***********************************************************************/ + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_is_ready(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_bit(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_run_bit(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_start(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_start_prefetch(unsigned int ssid, unsigned int cell_id, + bool prefetch); + +STORAGE_CLASS_INLINE void +ia_css_cell_wait(unsigned int ssid, unsigned int cell_id); + +/* include inline implementation */ +#include "ia_css_cell_impl.h" + +#endif /* __IA_CSS_CELL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell/src/ia_css_cell_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell/src/ia_css_cell_impl.h new file mode 100644 index 0000000000000..60b2e234da1a0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell/src/ia_css_cell_impl.h @@ -0,0 +1,272 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_IMPL_H +#define __IA_CSS_CELL_IMPL_H + +#include "ia_css_cell.h" + +#include "ia_css_cmem.h" +#include "ipu_device_cell_properties.h" +#include "storage_class.h" +#include "assert_support.h" +#include "platform_support.h" +#include "misc_support.h" + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_regs_addr(unsigned int cell_id) +{ + /* mem_id 0 is for registers */ + return ipu_device_cell_memory_address(cell_id, 0); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_dmem_addr(unsigned int cell_id) +{ + /* mem_id 1 is for DMEM */ + return ipu_device_cell_memory_address(cell_id, 1); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_mem_store_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr, unsigned int value) +{ + ia_css_cmem_store_32( + ssid, ipu_device_cell_memory_address( + cell_id, mem_id) + addr, value); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_mem_load_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr) +{ + return ia_css_cmem_load_32( + ssid, ipu_device_cell_memory_address(cell_id, mem_id) + addr); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_stat_ctrl(unsigned int ssid, unsigned int cell_id) +{ + return ia_css_cmem_load_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_STAT_CTRL_REG_ADDRESS); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_stat_ctrl(unsigned int ssid, unsigned int cell_id, + unsigned int value) +{ + ia_css_cmem_store_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_STAT_CTRL_REG_ADDRESS, value); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_is_ready(unsigned int ssid, unsigned int cell_id) +{ + unsigned int reg; + + reg = ia_css_cell_get_stat_ctrl(ssid, cell_id); + /* READY must be 1, START must be 0 */ + return (reg & (1 << IPU_DEVICE_CELL_STAT_CTRL_READY_BIT)) && + ((~reg) & (1 << IPU_DEVICE_CELL_STAT_CTRL_START_BIT)); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_pc(unsigned int ssid, unsigned int cell_id, + unsigned int pc) +{ + /* set start PC */ + ia_css_cmem_store_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_START_PC_REG_ADDRESS, pc); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_bit(unsigned int ssid, unsigned int cell_id) +{ + unsigned int reg; + + reg = 1 << IPU_DEVICE_CELL_STAT_CTRL_START_BIT; + ia_css_cell_set_stat_ctrl(ssid, cell_id, reg); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_run_bit(unsigned int ssid, unsigned int cell_id, + unsigned int value) +{ + unsigned int reg; + + reg = value << IPU_DEVICE_CELL_STAT_CTRL_RUN_BIT; + ia_css_cell_set_stat_ctrl(ssid, cell_id, reg); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_start(unsigned int ssid, unsigned int cell_id) +{ + ia_css_cell_start_prefetch(ssid, cell_id, 0); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_start_prefetch(unsigned int ssid, unsigned int cell_id, + bool prefetch) +{ + unsigned int reg = 0; + + /* Set run bit and start bit */ + reg |= (1 << IPU_DEVICE_CELL_STAT_CTRL_START_BIT); + reg |= (1 << IPU_DEVICE_CELL_STAT_CTRL_RUN_BIT); + /* Invalidate the icache */ + reg |= (1 << IPU_DEVICE_CELL_STAT_CTRL_INVALIDATE_ICACHE_BIT); + /* Optionally enable prefetching */ + reg |= ((prefetch == 1) ? + (1 << IPU_DEVICE_CELL_STAT_CTRL_ICACHE_ENABLE_PREFETCH_BIT) : + 0); + + /* store into register */ + ia_css_cell_set_stat_ctrl(ssid, cell_id, reg); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_wait(unsigned int ssid, unsigned int cell_id) +{ + do { + ia_css_sleep(); + } while (!ia_css_cell_is_ready(ssid, cell_id)); +}; + +STORAGE_CLASS_INLINE void +ia_css_cell_set_icache_base_address(unsigned int ssid, unsigned int cell_id, + unsigned int value) +{ + ia_css_cmem_store_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_ICACHE_BASE_REG_ADDRESS, value); +} + +/* master port configuration */ + + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value) +{ + unsigned int addr; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + assert(segment < ipu_device_cell_master_num_segments(cell, master)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_reg(cell, master); + addr += segment * ipu_device_cell_master_stride(cell, master); + ia_css_cmem_store_32(ssid, addr, value); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_override_bits(unsigned int ssid, + unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value) +{ + unsigned int addr; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + assert(segment < ipu_device_cell_master_num_segments(cell, master)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_override_reg(cell, master); + addr += segment * ipu_device_cell_master_stride(cell, master); + ia_css_cmem_store_32(ssid, addr, value); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_base_address(unsigned int ssid, + unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value) + +{ + unsigned int addr; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + assert(segment < ipu_device_cell_master_num_segments(cell, master)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_base_reg(cell, master); + addr += segment * ipu_device_cell_master_stride(cell, master); + ia_css_cmem_store_32(ssid, addr, value); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value) +{ + unsigned int addr, s, stride, num_segments; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_reg(cell, master); + stride = ipu_device_cell_master_stride(cell, master); + num_segments = ipu_device_cell_master_num_segments(cell, master); + for (s = 0; s < num_segments; s++) { + ia_css_cmem_store_32(ssid, addr, value); + addr += stride; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_override_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value) +{ + unsigned int addr, s, stride, num_segments; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_override_reg(cell, master); + stride = ipu_device_cell_master_stride(cell, master); + num_segments = ipu_device_cell_master_num_segments(cell, master); + for (s = 0; s < num_segments; s++) { + ia_css_cmem_store_32(ssid, addr, value); + addr += stride; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_base_address(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value) +{ + unsigned int addr, s, stride, num_segments, segment_size; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_base_reg(cell, master); + stride = ipu_device_cell_master_stride(cell, master); + num_segments = ipu_device_cell_master_num_segments(cell, master); + segment_size = ipu_device_cell_master_segment_size(cell, master); + + for (s = 0; s < num_segments; s++) { + ia_css_cmem_store_32(ssid, addr, value); + addr += stride; + value += segment_size; + } +} + +#endif /* __IA_CSS_CELL_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/cell_program_load.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/cell_program_load.mk new file mode 100644 index 0000000000000..ec5389aff4a0a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/cell_program_load.mk @@ -0,0 +1,39 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# + +ifndef _CELL_PROGRAM_LOAD_MK_ +_CELL_PROGRAM_LOAD_MK_ = 1 + +CELL_PROGRAM_LOAD_DIR=$${MODULES_DIR}/cell_program_load +CELL_PROGRAM_LOAD_INTERFACE=$(CELL_PROGRAM_LOAD_DIR)/interface +CELL_PROGRAM_LOAD_SOURCES=$(CELL_PROGRAM_LOAD_DIR)/src + +CELL_PROGRAM_LOAD_HOST_FILES = $(CELL_PROGRAM_LOAD_SOURCES)/ia_css_cell_program_load.c + +CELL_PROGRAM_LOAD_FW_FILES = $(CELL_PROGRAM_LOAD_SOURCES)/ia_css_cell_program_load.c + +CELL_PROGRAM_LOAD_HOST_CPPFLAGS = \ + -I$(CELL_PROGRAM_LOAD_INTERFACE) \ + -I$(CELL_PROGRAM_LOAD_SOURCES) + +CELL_PROGRAM_LOAD_FW_CPPFLAGS = \ + -I$(CELL_PROGRAM_LOAD_INTERFACE) \ + -I$(CELL_PROGRAM_LOAD_SOURCES) + +ifeq ($(CRUN_DYNAMIC_LINK_PROGRAMS), 1) +CELL_PROGRAM_LOAD_HOST_CPPFLAGS += -DCRUN_DYNAMIC_LINK_PROGRAMS=1 +CELL_PROGRAM_LOAD_FW_CPPFLAGS += -DCRUN_DYNAMIC_LINK_PROGRAMS=1 +endif + +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_group_load.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_group_load.h new file mode 100644 index 0000000000000..812dd4ea09a84 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_group_load.h @@ -0,0 +1,76 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_PROGRAM_GROUP_LOAD_H +#define __IA_CSS_CELL_PROGRAM_GROUP_LOAD_H + +#include "ia_css_cell_program_load_storage_class.h" +#include "ia_css_xmem.h" +#include "ia_css_cell_program_struct.h" + +/* Load all programs in program group + * Return 0 on success, -1 on incorrect magic number, + * -2 on incorrect release tag + */ + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +int +ia_css_cell_program_group_load( + unsigned int ssid, + unsigned int mmid, + /* program address as seen from caller */ + ia_css_xmem_address_t program_addr, + /* program address as seen from cell's icache */ + unsigned int program_addr_icache +); + +/* Load all programs in program group + * each group may have multiple entry function. This function will return + * the info of each entry function to allow user start any of them + * Return 0 on success, -1 on incorrect magic number, + * -2 on incorrect release tag + */ + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +int +ia_css_cell_program_group_load_multi_entry( + unsigned int ssid, + unsigned int mmid, + /* program address as seen from caller */ + ia_css_xmem_address_t program_addr, + /* program address as seen from cell's icache */ + unsigned int program_addr_icache, + struct ia_css_cell_program_entry_func_info_s *entry_info, + unsigned int num_entry_info +); + +/* Load all programs in program group, except icache of first program + */ + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +int +ia_css_cell_program_group_load_mem( + unsigned int ssid, + unsigned int mmid, + /* program address as seen from caller */ + ia_css_xmem_address_t program_addr, + /* program address as seen from cell's icache */ + unsigned int program_addr_icache +); + +#ifdef __INLINE_IA_CSS_CELL_PROGRAM_LOAD__ +#include "ia_css_cell_program_group_load_impl.h" +#endif + +#endif /* __IA_CSS_CELL_PROGRAM_GROUP_LOAD_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_load.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_load.h new file mode 100644 index 0000000000000..d7e689e9d5697 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_load.h @@ -0,0 +1,114 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_PROGRAM_LOAD_H +#define __IA_CSS_CELL_PROGRAM_LOAD_H + +#include "ia_css_cell_program_load_storage_class.h" +#include "ia_css_cell_program_struct.h" +#include "ia_css_xmem.h" + +/* Perform full program load: + * - load program header + * - initialize icache and start PC of exec entry function + * - initialize PMEM and DMEM + * Return 0 on success, -1 on incorrect magic number, + * -2 on incorrect release tag + */ + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +int +ia_css_cell_program_load( + unsigned int ssid, + unsigned int mmid, + /* program address as seen from caller */ + ia_css_xmem_address_t program_addr, + /* program address as seen from cell's icache */ + unsigned int program_addr_icache +); + +/* Perform full program load: + * - load program header + * - initialize icache and start PC of exec entry function + * - initialize info of all entry function + * - initialize PMEM and DMEM + * Return 0 on success, -1 on incorrect magic number, + * -2 on incorrect release tag + */ + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +int +ia_css_cell_program_load_multi_entry( + unsigned int ssid, + unsigned int mmid, + /* program address as seen from caller */ + ia_css_xmem_address_t program_addr, + /* program address as seen from cell's icache */ + unsigned int program_addr_icache, + struct ia_css_cell_program_entry_func_info_s *entry_info +); + +/* Load program header, and initialize icache and start PC. + * After this, the cell may be started, but the entry function may not yet use + * global data, nor may code from PMEM be executed. + * Before accessing global data or executing code from PMEM + * the function ia_css_cell_load_program_mem must be executed. + */ + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +int +ia_css_cell_program_load_icache( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t program_addr, + unsigned int program_addr_icache); + +/* Load program header and finish the program load by + * initializing PMEM and DMEM. + * After this any code from the program may be be executed on the cell. + */ +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +int +ia_css_cell_program_load_mem( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t program_addr, + unsigned int program_addr_icache); + +/* set cell start PC to program init entry function */ +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +void +ia_css_cell_program_load_set_init_start_pc( + unsigned int ssid, + const struct ia_css_cell_program_entry_func_info_s *entry_info); + +/* set cell start PC to program exec entry function */ +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +void +ia_css_cell_program_load_set_exec_start_pc( + unsigned int ssid, + const struct ia_css_cell_program_entry_func_info_s *entry_info); + +/* set cell start PC to program done entry function */ +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +void +ia_css_cell_program_load_set_done_start_pc( + unsigned int ssid, + const struct ia_css_cell_program_entry_func_info_s *entry_info); + +#ifdef __INLINE_IA_CSS_CELL_PROGRAM_LOAD__ +#include "ia_css_cell_program_load_impl.h" +#endif + +#endif /* __IA_CSS_CELL_PROGRAM_LOAD_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_load_prog.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_load_prog.h new file mode 100644 index 0000000000000..0f8f1852449c1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_load_prog.h @@ -0,0 +1,84 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_PROGRAM_LOAD_PROG_H +#define __IA_CSS_CELL_PROGRAM_LOAD_PROG_H + +/* basic functions needed to implement all program(group) loads */ + +#include "ia_css_cell_program_load_storage_class.h" +#include "ia_css_cell_program_struct.h" +#include "ia_css_xmem.h" + + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +void +ia_css_cell_program_load_encode_entry_info( + struct ia_css_cell_program_entry_func_info_s *entry_info, + const struct ia_css_cell_program_s *prog); + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +void +ia_css_cell_program_load_set_start_pc( + unsigned int ssid, + const struct ia_css_cell_program_entry_func_info_s *entry_info, + enum ia_css_cell_program_entry_func_id func_id); + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +int +ia_css_cell_program_load_header( + unsigned int mmid, + ia_css_xmem_address_t host_addr, + struct ia_css_cell_program_s *prog); + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +int +ia_css_cell_program_load_icache_prog( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t host_addr, + unsigned int vied_addr, + const struct ia_css_cell_program_s *prog); + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +int +ia_css_cell_program_load_entry_prog( + unsigned int ssid, + unsigned int mmid, + enum ia_css_cell_program_entry_func_id entry_func_id, + const struct ia_css_cell_program_s *prog); + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +int +ia_css_cell_program_load_mem_prog( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t host_addr, + unsigned int vied_addr, + const struct ia_css_cell_program_s *prog); + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +int +ia_css_cell_program_load_prog( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t host_addr, + unsigned int vied_addr, + struct ia_css_cell_program_s *prog); + +#ifdef __INLINE_IA_CSS_CELL_PROGRAM_LOAD__ +#include "ia_css_cell_program_load_prog_impl.h" +#endif + +#endif /* __IA_CSS_CELL_PROGRAM_LOAD_PROG_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_load_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_load_storage_class.h new file mode 100644 index 0000000000000..8691e1402eaf8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_load_storage_class.h @@ -0,0 +1,28 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +#define __IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifdef __INLINE_IA_CSS_CELL_PROGRAM_LOAD__ +#define IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#else +#define IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C +#endif + +#endif /* __IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_struct.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_struct.h new file mode 100644 index 0000000000000..de3c3682ff8df --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_struct.h @@ -0,0 +1,114 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_PROGRAM_STRUCT_H +#define __IA_CSS_CELL_PROGRAM_STRUCT_H + +#define IA_CSS_CELL_ID_UNDEFINED 0xFFFFFFFF +#define IA_CSS_CELL_PROGRAM_MAGIC_NUMBER 0xF1A30002 + +#define CSIM_PROGRAM_NAME_SIZE 64 + +enum ia_css_cell_program_entry_func_id { + IA_CSS_CELL_PROGRAM_INIT_FUNC_ID, + IA_CSS_CELL_PROGRAM_EXEC_FUNC_ID, + IA_CSS_CELL_PROGRAM_DONE_FUNC_ID, + IA_CSS_CELL_PROGRAM_NUM_FUNC_ID, +}; + +struct ia_css_cell_program_entry_func_info_s { + /* start PC value of program entry functions */ + unsigned int start[IA_CSS_CELL_PROGRAM_NUM_FUNC_ID]; + +#if defined(C_RUN) + /* entry function names */ + char func_name[IA_CSS_CELL_PROGRAM_NUM_FUNC_ID][CSIM_PROGRAM_NAME_SIZE]; + /* for crun use only */ + unsigned int cell_id; +#endif + /* base address for cell's registers */ + unsigned int regs_addr; + +}; + +struct ia_css_cell_program_s { + /* must be equal to IA_CSS_CELL_PROGRAM_MAGIC_NUMBER */ + unsigned int magic_number; + + /* offset of blob relative to start of this struct */ + unsigned int blob_offset; + /* size of the blob, not used */ + unsigned int blob_size; + + /* start PC value of program entry functions */ + unsigned int start[IA_CSS_CELL_PROGRAM_NUM_FUNC_ID]; + +#if defined(C_RUN) || defined(HRT_UNSCHED) || defined(HRT_SCHED) + /* program name */ + char prog_name[CSIM_PROGRAM_NAME_SIZE]; +#if defined(C_RUN) + /* entry function names */ + char func_name[IA_CSS_CELL_PROGRAM_NUM_FUNC_ID][CSIM_PROGRAM_NAME_SIZE]; +#endif +#endif + + /* offset of icache section in blob */ + unsigned int icache_source; + /* offset in the instruction space, not used */ + unsigned int icache_target; + /* icache section size, not used */ + unsigned int icache_size; + + /* offset of pmem section in blob */ + unsigned int pmem_source; + /* offset in the pmem, typically 0 */ + unsigned int pmem_target; + /* pmem section size, 0 if not used */ + unsigned int pmem_size; + + /* offset of data section in blob */ + unsigned int data_source; + /* offset of data section in dmem */ + unsigned int data_target; + /* size of dmem data section */ + unsigned int data_size; + + /* offset of bss section in dmem, to be zeroed */ + unsigned int bss_target; + /* size of bss section in dmem */ + unsigned int bss_size; + + /* for checking */ + unsigned int cell_id; + /* base address for cell's registers */ + unsigned int regs_addr; + + /* pmem data bus address */ + unsigned int cell_pmem_data_bus_addres; + /* dmem data bus address */ + unsigned int cell_dmem_data_bus_addres; + /* pmem config bus address */ + unsigned int cell_pmem_control_bus_addres; + /* dmem config bus address */ + unsigned int cell_dmem_control_bus_addres; + + /* offset to header of next program */ + unsigned int next; + /* Temporary workaround for a dma bug where it fails to trasfer + * data with size which is not multiple of 64 bytes + */ + unsigned int dummy[2]; +}; + +#endif /* __IA_CSS_CELL_PROGRAM_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_group_load_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_group_load_impl.h new file mode 100644 index 0000000000000..20d71bb25d495 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_group_load_impl.h @@ -0,0 +1,128 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_PROGRAM_GROUP_LOAD_IMPL_H +#define __IA_CSS_CELL_PROGRAM_GROUP_LOAD_IMPL_H + +#include "ia_css_cell_program_group_load.h" + +#include "ia_css_cell_program_load_storage_class.h" +#include "ia_css_cell_program_load_prog.h" +#include "ia_css_cell_program_struct.h" + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C +int +ia_css_cell_program_group_load( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t host_addr, + unsigned int vied_addr) +{ + struct ia_css_cell_program_s prog; + unsigned int next; + int status = 0; + + do { + status = ia_css_cell_program_load_prog( + ssid, mmid, host_addr, vied_addr, &prog); + if (status) + return status; + + next = prog.next; + host_addr = + (ia_css_xmem_address_t)((unsigned long long)host_addr + next); + vied_addr += next; + } while (next); + + return status; +} + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C +int +ia_css_cell_program_group_load_multi_entry( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t host_addr, + unsigned int vied_addr, + struct ia_css_cell_program_entry_func_info_s *entry_info, + unsigned int num_entry_info) +{ + struct ia_css_cell_program_s prog; + unsigned int next; + int status = 0; + unsigned int i = 0; + + do { + status = ia_css_cell_program_load_prog( + ssid, mmid, host_addr, vied_addr, &prog); + if (status) + return status; + if (i >= num_entry_info) { + /* more program than entry info, + * cause access out of bound. + */ + return 1; + } + ia_css_cell_program_load_encode_entry_info( + &entry_info[i], &prog); + + next = prog.next; + host_addr = + (ia_css_xmem_address_t)((unsigned long long)host_addr + next); + vied_addr += next; + i++; + } while (next); + + return status; +} + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C +int +ia_css_cell_program_group_load_mem( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t host_addr, + unsigned int vied_addr) +{ + struct ia_css_cell_program_s prog; + unsigned int next; + int status = 0; + + status = ia_css_cell_program_load_header(mmid, host_addr, &prog); + if (status) + return status; + + /* load memories of first program */ + status = ia_css_cell_program_load_mem_prog( + ssid, mmid, host_addr, vied_addr, &prog); + if (status) + return status; + + /* return next from ia_css_cell_program_load_mem_prog? */ + next = prog.next; + + /* load next programs, if any */ + if (next) { + host_addr = + (ia_css_xmem_address_t)((unsigned long long)host_addr + next); + status = ia_css_cell_program_group_load( + ssid, mmid, host_addr, vied_addr + next); + if (status) + return status; + } + + return status; +} + +#endif /* __IA_CSS_CELL_PROGRAM_GROUP_LOAD_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_load.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_load.c new file mode 100644 index 0000000000000..0a1ea1ac2ed1e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_load.c @@ -0,0 +1,31 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifdef __INLINE_IA_CSS_CELL_PROGRAM_LOAD__ + +#include "storage_class.h" +STORAGE_CLASS_INLINE void __dummy(void) { } + +#else + +/* low-level functions */ +#include "ia_css_cell_program_load_prog_impl.h" + +/* functions for single, unmapped program load */ +#include "ia_css_cell_program_load_impl.h" + +/* functions for program group load */ +#include "ia_css_cell_program_group_load_impl.h" + +#endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_load_bin.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_load_bin.h new file mode 100644 index 0000000000000..523ce536cb099 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_load_bin.h @@ -0,0 +1,193 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_PROGRAM_LOAD_BIN_H +#define __IA_CSS_CELL_PROGRAM_LOAD_BIN_H + +#include "ia_css_cell_program_load_prog.h" + +#include "ia_css_cell_program_load_storage_class.h" +#include "ia_css_cell_program_struct.h" +#include "ia_css_cell_regs.h" +#include "misc_support.h" +#include "ia_css_fw_load.h" +#include "platform_support.h" +#include "ipu_device_buttress_properties_struct.h" + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C +void +ia_css_cell_program_load_encode_entry_info( + struct ia_css_cell_program_entry_func_info_s *entry_info, + const struct ia_css_cell_program_s *prog) +{ + unsigned int i; + + for (i = 0; i < IA_CSS_CELL_PROGRAM_NUM_FUNC_ID; i++) + entry_info->start[i] = prog->start[i]; + + entry_info->regs_addr = prog->regs_addr; +} + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C +void +ia_css_cell_program_load_set_start_pc( + unsigned int ssid, + const struct ia_css_cell_program_entry_func_info_s *entry_info, + enum ia_css_cell_program_entry_func_id func_id) +{ + unsigned int start_pc; + + start_pc = entry_info->start[func_id]; + /* set start address */ + ia_css_cell_regs_set_start_pc(ssid, entry_info->regs_addr, start_pc); +} + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C +int +ia_css_cell_program_load_icache_prog( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t host_addr, + unsigned int vied_addr, + const struct ia_css_cell_program_s *prog) +{ + unsigned int regs_addr; + struct ia_css_cell_program_entry_func_info_s entry_info; + + NOT_USED(mmid); + NOT_USED(host_addr); + + if (prog->cell_id == IA_CSS_CELL_ID_UNDEFINED) + return -1; + + regs_addr = prog->regs_addr; + + /* set icache base address */ + ia_css_cell_regs_set_icache_base_address(ssid, regs_addr, + vied_addr + prog->blob_offset + prog->icache_source); + + /* set icache info bits */ + ia_css_cell_regs_set_icache_info_bits( + ssid, regs_addr, IA_CSS_INFO_BITS_M0_DDR); + + /* by default we set to start PC of exec entry function */ + ia_css_cell_program_load_encode_entry_info(&entry_info, prog); + ia_css_cell_program_load_set_start_pc( + ssid, &entry_info, IA_CSS_CELL_PROGRAM_EXEC_FUNC_ID); + + return 0; +} + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C +int +ia_css_cell_program_load_entry_prog( + unsigned int ssid, + unsigned int mmid, + enum ia_css_cell_program_entry_func_id entry_func_id, + const struct ia_css_cell_program_s *prog) +{ + struct ia_css_cell_program_entry_func_info_s entry_info; + + NOT_USED(mmid); + + if (prog->cell_id == IA_CSS_CELL_ID_UNDEFINED) + return -1; + + ia_css_cell_program_load_encode_entry_info(&entry_info, prog); + ia_css_cell_program_load_set_start_pc(ssid, &entry_info, entry_func_id); + + return 0; +} + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C int +ia_css_cell_program_load_mem_prog( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t host_addr, + unsigned int vied_addr, + const struct ia_css_cell_program_s *prog) +{ + unsigned int transferred = 0; + unsigned int pending = 0; + unsigned int dmem_addr; + unsigned int pmem_addr; + + NOT_USED(vied_addr); + +#ifdef ENABLE_FW_LOAD_DMA + pmem_addr = prog->cell_pmem_data_bus_addres; + dmem_addr = prog->cell_dmem_data_bus_addres; +#else + pmem_addr = prog->cell_pmem_control_bus_addres; + dmem_addr = prog->cell_dmem_control_bus_addres; +#endif + + /* Copy text section from ddr to pmem. */ + if (prog->pmem_size) { + transferred = ia_css_fw_copy_begin(mmid, + ssid, + host_addr + prog->blob_offset + + prog->pmem_source, + pmem_addr + prog->pmem_target, + prog->pmem_size); + + assert(prog->pmem_size == transferred); + /* If less bytes are transferred that requested, signal error, + * This architecture enforces DMA xfer size > pmem_size. + * So, a DMA transfer request should be xferable*/ + if (transferred != prog->pmem_size) + return 1; + pending++; + } + + /* Copy data section from ddr to dmem. */ + if (prog->data_size) { + transferred = ia_css_fw_copy_begin(mmid, + ssid, + host_addr + prog->blob_offset + + prog->data_source, + dmem_addr + prog->data_target, + prog->data_size); + assert(prog->data_size == transferred); + /* If less bytes are transferred that requested, signal error, + * This architecture enforces DMA xfer size > data_size. + * So, a DMA transfer request should be xferable*/ + if (transferred != prog->data_size) + return 1; /*FALSE*/ + pending++; + } + + /* Zero bss section in dmem.*/ + if (prog->bss_size) { + transferred = ia_css_fw_zero_begin(ssid, + dmem_addr + prog->bss_target, + prog->bss_size); + assert(prog->bss_size == transferred); + /* If less bytes are transferred that requested, signal error, + * This architecture enforces DMA xfer size > bss_size. + * So, a DMA transfer request should be xferable*/ + if (transferred != prog->bss_size) + return 1; + pending++; + } + + /* Wait for all fw load to complete */ + while (pending) { + pending -= ia_css_fw_end(pending); + ia_css_sleep(); + } + return 0; /*Success*/ +} + +#endif /* __IA_CSS_CELL_PROGRAM_LOAD_BIN_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_load_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_load_impl.h new file mode 100644 index 0000000000000..6201fd583482d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_load_impl.h @@ -0,0 +1,134 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_PROGRAM_LOAD_IMPL_H +#define __IA_CSS_CELL_PROGRAM_LOAD_IMPL_H + +#include "ia_css_cell_program_load.h" + +#include "ia_css_cell_program_load_storage_class.h" +#include "ia_css_cell_program_load_prog.h" +#include "ia_css_cell_program_struct.h" + + + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C int +ia_css_cell_program_load( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t host_addr, + unsigned int vied_addr) +{ + struct ia_css_cell_program_s prog; + int status; + + status = ia_css_cell_program_load_prog( + ssid, mmid, host_addr, vied_addr, &prog); + + return status; +} + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C int +ia_css_cell_program_load_multi_entry( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t host_addr, + unsigned int vied_addr, + struct ia_css_cell_program_entry_func_info_s *entry_info) +{ + struct ia_css_cell_program_s prog; + int status; + + status = ia_css_cell_program_load_prog( + ssid, mmid, host_addr, vied_addr, &prog); + if (status) + return status; + + ia_css_cell_program_load_encode_entry_info(entry_info, &prog); + + return status; +} + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C int +ia_css_cell_program_load_icache( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t host_addr, + unsigned int vied_addr) +{ + struct ia_css_cell_program_s prog; + int status; + + status = ia_css_cell_program_load_header(mmid, host_addr, &prog); + if (status) + return status; + + status = ia_css_cell_program_load_icache_prog( + ssid, mmid, host_addr, vied_addr, &prog); + return status; +} + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C int +ia_css_cell_program_load_mem( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t host_addr, + unsigned int vied_addr) +{ + struct ia_css_cell_program_s prog; + int status; + + status = ia_css_cell_program_load_header(mmid, host_addr, &prog); + if (status) + return status; + + status = ia_css_cell_program_load_mem_prog( + ssid, mmid, host_addr, vied_addr, &prog); + return status; +} + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C void +ia_css_cell_program_load_set_init_start_pc( + unsigned int ssid, + const struct ia_css_cell_program_entry_func_info_s *entry_info) +{ + assert(entry_info != NULL); + + ia_css_cell_program_load_set_start_pc(ssid, entry_info, + IA_CSS_CELL_PROGRAM_INIT_FUNC_ID); +} + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C void +ia_css_cell_program_load_set_exec_start_pc( + unsigned int ssid, + const struct ia_css_cell_program_entry_func_info_s *entry_info) +{ + assert(entry_info != NULL); + + ia_css_cell_program_load_set_start_pc(ssid, entry_info, + IA_CSS_CELL_PROGRAM_EXEC_FUNC_ID); +} + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C void +ia_css_cell_program_load_set_done_start_pc( + unsigned int ssid, + const struct ia_css_cell_program_entry_func_info_s *entry_info) +{ + assert(entry_info != NULL); + + ia_css_cell_program_load_set_start_pc(ssid, entry_info, + IA_CSS_CELL_PROGRAM_DONE_FUNC_ID); +} + +#endif /* __IA_CSS_CELL_PROGRAM_LOAD_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_load_prog_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_load_prog_impl.h new file mode 100644 index 0000000000000..f20bc2f6da52a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_load_prog_impl.h @@ -0,0 +1,76 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_PROGRAM_LOAD_PROG_IMPL_H +#define __IA_CSS_CELL_PROGRAM_LOAD_PROG_IMPL_H + +#include "ia_css_cell_program_load_prog.h" +#include "ia_css_fw_load.h" + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C +int +ia_css_cell_program_load_prog( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t host_addr, + unsigned int vied_addr, + struct ia_css_cell_program_s *prog) +{ + int status; + + status = ia_css_cell_program_load_header(mmid, host_addr, prog); + if (status) + return status; + + status = ia_css_cell_program_load_icache_prog( + ssid, mmid, host_addr, vied_addr, prog); + if (status) + return status; + + status = ia_css_cell_program_load_mem_prog( + ssid, mmid, host_addr, vied_addr, prog); + if (status) + return status; + + return status; +} + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C +int +ia_css_cell_program_load_header( + unsigned int mmid, + ia_css_xmem_address_t host_addr, + struct ia_css_cell_program_s *prog) +{ + + /* read the program header from DDR */ + ia_css_fw_load(mmid, + host_addr, + prog, + sizeof(struct ia_css_cell_program_s)); + + /* check magic number */ + if (prog->magic_number != IA_CSS_CELL_PROGRAM_MAGIC_NUMBER) + return -1; + + return 0; +} + +#if defined(C_RUN) || defined(HRT_UNSCHED) || defined(HRT_SCHED) +#include "ia_css_cell_program_load_csim.h" +#else +#include "ia_css_cell_program_load_bin.h" +#endif + +#endif /* __IA_CSS_CELL_PROGRAM_LOAD_PROG_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_regs.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_regs.h new file mode 100644 index 0000000000000..4eb283b58de69 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_regs.h @@ -0,0 +1,78 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_REGS_H +#define __IA_CSS_CELL_REGS_H + +#include "storage_class.h" +#include "ipu_device_cell_type_properties.h" +#include "ia_css_cmem.h" + +STORAGE_CLASS_INLINE void +ia_css_cell_regs_set_stat_ctrl(unsigned int ssid, unsigned int regs_addr, + unsigned int value) +{ + ia_css_cmem_store_32(ssid, + regs_addr + IPU_DEVICE_CELL_STAT_CTRL_REG_ADDRESS, value); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_regs_get_stat_ctrl(unsigned int ssid, unsigned int regs_addr) +{ + return ia_css_cmem_load_32(ssid, + regs_addr + IPU_DEVICE_CELL_STAT_CTRL_REG_ADDRESS); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_icache_invalidate(unsigned int ssid, unsigned int regs_addr) +{ + ia_css_cell_regs_set_stat_ctrl(ssid, regs_addr, + 1u << IPU_DEVICE_CELL_STAT_CTRL_INVALIDATE_ICACHE_BIT); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_regs_set_start_pc(unsigned int ssid, unsigned int regs_addr, + unsigned int pc) +{ + ia_css_cmem_store_32(ssid, + regs_addr + IPU_DEVICE_CELL_START_PC_REG_ADDRESS, pc); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_regs_set_icache_base_address(unsigned int ssid, + unsigned int regs_addr, + unsigned int value) +{ + ia_css_cmem_store_32(ssid, + regs_addr + IPU_DEVICE_CELL_ICACHE_BASE_REG_ADDRESS, value); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_regs_set_icache_info_bits(unsigned int ssid, + unsigned int regs_addr, + unsigned int value) +{ + ia_css_cmem_store_32(ssid, + regs_addr + IPU_DEVICE_CELL_ICACHE_INFO_BITS_REG_ADDRESS, + value); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_regs_icache_invalidate(unsigned int ssid, unsigned int regs_addr) +{ + ia_css_cell_regs_set_stat_ctrl(ssid, regs_addr, + 1u << IPU_DEVICE_CELL_STAT_CTRL_INVALIDATE_ICACHE_BIT); +} + +#endif /* __IA_CSS_CELL_REGS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg.h new file mode 100644 index 0000000000000..e8b0a48b27e33 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg.h @@ -0,0 +1,60 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CLIENT_PKG_H +#define __IA_CSS_CLIENT_PKG_H + +#include "type_support.h" +#include "ia_css_client_pkg_storage_class.h" +/* for ia_css_client_pkg_header_s (ptr only), ia_css_client_pkg_t */ +#include "ia_css_client_pkg_types.h" + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_H +int ia_css_client_pkg_get_pg_manifest_offset_size( + const struct ia_css_client_pkg_header_s *client_pkg_header, + uint32_t *offset, + uint32_t *size); + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_H +int ia_css_client_pkg_get_prog_list_offset_size( + const struct ia_css_client_pkg_header_s *client_pkg_header, + uint32_t *offset, + uint32_t *size); + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_H +int ia_css_client_pkg_get_prog_desc_offset_size( + const struct ia_css_client_pkg_header_s *client_pkg_header, + uint32_t *offset, + uint32_t *size); + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_H +int ia_css_client_pkg_get_prog_bin_entry_offset_size( + const ia_css_client_pkg_t *client_pkg, + uint32_t program_id, + uint32_t *offset, + uint32_t *size); + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_H +int ia_css_client_pkg_get_indexed_prog_desc_entry_offset_size( + const ia_css_client_pkg_t *client_pkg, + uint32_t program_id, + uint32_t program_index, + uint32_t *offset, + uint32_t *size); + +#ifdef __INLINE_CLIENT_PKG__ +#include "ia_css_client_pkg_impl.h" +#endif + +#endif /* __IA_CSS_CLIENT_PKG_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg_storage_class.h new file mode 100644 index 0000000000000..98af98d5d824d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg_storage_class.h @@ -0,0 +1,28 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CLIENT_PKG_STORAGE_CLASS_H +#define __IA_CSS_CLIENT_PKG_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __INLINE_CLIENT_PKG__ +#define IA_CSS_CLIENT_PKG_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_CLIENT_PKG_STORAGE_CLASS_C +#else +#define IA_CSS_CLIENT_PKG_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_CLIENT_PKG_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_CLIENT_PKG_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg_types.h new file mode 100644 index 0000000000000..ff5bf01358f1a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg_types.h @@ -0,0 +1,44 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CLIENT_PKG_TYPES_H +#define __IA_CSS_CLIENT_PKG_TYPES_H + +#include "type_support.h" + +typedef void ia_css_client_pkg_t; + +struct ia_css_client_pkg_header_s { + uint32_t prog_list_offset; + uint32_t prog_list_size; + uint32_t prog_desc_offset; + uint32_t prog_desc_size; + uint32_t pg_manifest_offset; + uint32_t pg_manifest_size; + uint32_t prog_bin_offset; + uint32_t prog_bin_size; +}; + +struct ia_css_client_pkg_prog_s { + uint32_t prog_id; + uint32_t prog_offset; + uint32_t prog_size; +}; + +struct ia_css_client_pkg_prog_list_s { + uint32_t prog_desc_count; + uint32_t prog_bin_count; +}; + +#endif /* __IA_CSS_CLIENT_PKG_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/src/ia_css_client_pkg.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/src/ia_css_client_pkg.c new file mode 100644 index 0000000000000..0b2fd86d09f36 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/src/ia_css_client_pkg.c @@ -0,0 +1,20 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifdef __INLINE_CLIENT_PKG__ +#include "storage_class.h" +STORAGE_CLASS_INLINE int __ia_css_client_pkg_avoid_warning_on_empty_file(void) { return 0; } +#else /* __INLINE_CLIENT_PKG__ */ +#include "ia_css_client_pkg_impl.h" +#endif /* __INLINE_CLIENT_PKG__ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/src/ia_css_client_pkg_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/src/ia_css_client_pkg_impl.h new file mode 100644 index 0000000000000..b79e5de02b893 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/src/ia_css_client_pkg_impl.h @@ -0,0 +1,161 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CLIENT_PKG_IMPL_H +#define __IA_CSS_CLIENT_PKG_IMPL_H + +#include "ia_css_client_pkg.h" +#include "ia_css_client_pkg_types.h" +#include "error_support.h" + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_C +int ia_css_client_pkg_get_pg_manifest_offset_size( + const struct ia_css_client_pkg_header_s *client_pkg_header, + uint32_t *offset, + uint32_t *size) +{ + int ret_val = -1; + + verifjmpexit(NULL != client_pkg_header); + verifjmpexit(NULL != offset); + verifjmpexit(NULL != size); + + *(offset) = client_pkg_header->pg_manifest_offset; + *(size) = client_pkg_header->pg_manifest_size; + ret_val = 0; +EXIT: + return ret_val; +} + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_C +int ia_css_client_pkg_get_prog_list_offset_size( + const struct ia_css_client_pkg_header_s *client_pkg_header, + uint32_t *offset, + uint32_t *size) +{ + int ret_val = -1; + + verifjmpexit(NULL != client_pkg_header); + verifjmpexit(NULL != offset); + verifjmpexit(NULL != size); + + *(offset) = client_pkg_header->prog_list_offset; + *(size) = client_pkg_header->prog_list_size; + ret_val = 0; +EXIT: + return ret_val; +} + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_C +int ia_css_client_pkg_get_prog_desc_offset_size( + const struct ia_css_client_pkg_header_s *client_pkg_header, + uint32_t *offset, + uint32_t *size) +{ + int ret_val = -1; + + verifjmpexit(NULL != client_pkg_header); + verifjmpexit(NULL != offset); + verifjmpexit(NULL != size); + + *(offset) = client_pkg_header->prog_desc_offset; + *(size) = client_pkg_header->prog_desc_size; + ret_val = 0; +EXIT: + return ret_val; +} + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_C +int ia_css_client_pkg_get_prog_bin_entry_offset_size( + const ia_css_client_pkg_t *client_pkg, + uint32_t program_id, + uint32_t *offset, + uint32_t *size) +{ + uint8_t i; + int ret_val = -1; + struct ia_css_client_pkg_header_s *client_pkg_header = NULL; + const struct ia_css_client_pkg_prog_list_s *pkg_prog_list = NULL; + const struct ia_css_client_pkg_prog_s *pkg_prog_bin_entry = NULL; + + verifjmpexit(NULL != client_pkg); + verifjmpexit(NULL != offset); + verifjmpexit(NULL != size); + + client_pkg_header = + (struct ia_css_client_pkg_header_s *)((uint8_t *)client_pkg); + pkg_prog_list = + (struct ia_css_client_pkg_prog_list_s *)((uint8_t *)client_pkg + + client_pkg_header->prog_list_offset); + pkg_prog_bin_entry = + (struct ia_css_client_pkg_prog_s *)((uint8_t *)pkg_prog_list + + sizeof(struct ia_css_client_pkg_prog_list_s)); + pkg_prog_bin_entry += pkg_prog_list->prog_desc_count; + + for (i = 0; i < pkg_prog_list->prog_bin_count; i++) { + if (program_id == pkg_prog_bin_entry->prog_id) { + *(offset) = pkg_prog_bin_entry->prog_offset; + *(size) = pkg_prog_bin_entry->prog_size; + ret_val = 0; + break; + } else if (0 == pkg_prog_bin_entry->prog_size) { + /* We can have a variable number of program descriptors. + * The first non-valid one will have size set to 0 + */ + break; + } + pkg_prog_bin_entry++; + } +EXIT: + return ret_val; +} + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_C +int ia_css_client_pkg_get_indexed_prog_desc_entry_offset_size( + const ia_css_client_pkg_t *client_pkg, + uint32_t program_id, + uint32_t program_index, + uint32_t *offset, + uint32_t *size) +{ + int ret_val = -1; + struct ia_css_client_pkg_header_s *client_pkg_header = NULL; + const struct ia_css_client_pkg_prog_list_s *pkg_prog_list = NULL; + const struct ia_css_client_pkg_prog_s *pkg_prog_desc_entry = NULL; + + verifjmpexit(NULL != client_pkg); + verifjmpexit(NULL != offset); + verifjmpexit(NULL != size); + + client_pkg_header = + (struct ia_css_client_pkg_header_s *)((uint8_t *)client_pkg); + pkg_prog_list = + (struct ia_css_client_pkg_prog_list_s *)((uint8_t *)client_pkg + + client_pkg_header->prog_list_offset); + pkg_prog_desc_entry = + (struct ia_css_client_pkg_prog_s *)((uint8_t *)pkg_prog_list + + sizeof(struct ia_css_client_pkg_prog_list_s)); + + verifjmpexit(program_index < pkg_prog_list->prog_desc_count); + verifjmpexit(program_id == pkg_prog_desc_entry[program_index].prog_id); + verifjmpexit(pkg_prog_desc_entry[program_index].prog_size > 0); + *(offset) = pkg_prog_desc_entry[program_index].prog_offset; + *(size) = pkg_prog_desc_entry[program_index].prog_size; + ret_val = 0; + +EXIT: + return ret_val; +} + +#endif /* __IA_CSS_CLIENT_PKG_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/config/psys/subsystem_cnlB0.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/config/psys/subsystem_cnlB0.mk new file mode 100644 index 0000000000000..be397a0646bd3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/config/psys/subsystem_cnlB0.mk @@ -0,0 +1,138 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# + +############################################################################ +# This file is used to specify versions and properties of PSYS firmware +# components. Please note that these are subsystem specific. System specific +# properties should go to system_$IPU_SYSVER.mk. Also the device versions +# should be defined under "devices" or should be taken from the SDK. +############################################################################ + +# define for DPCM Compression/ Decompression module +HAS_DPCM = 1 + +# See HSD 1805169230 +HAS_FWDMA_ALIGNMENT_ISSUE_SIGHTING = 1 + +# Activate loading params and storing stats DDR<->REGs with DMA. +PSYS_USE_ISA_DMA = 1 + +# Used in ISA module +PSYS_ISL_DPC_DPC_V2 = 0 + +# Use the DMA for terminal loading in Psys server +PSYS_SERVER_ENABLE_TERMINAL_LOAD_DMA = 1 + +# Assume OFS will be running concurrently with IPF, and prioritize according to rates of services on devproxy +CONCURRENT_OFS_IPF_PRIORITY_OPTIMIZATION_ENABLED = 1 + +# Enable clock gating of input feeder ibufctrl +ENABLE_IPFD_IBUFCTRL_CLK_GATE = 1 + +# Enable clock gating of input slice light ibufctrl +ENABLE_ISL_IBUFCTRL_CLK_GATE = 1 + +# Enable clock gating of GDC0 +ENABLE_GDC0_CLK_GATE = 1 + + +# define for VCA_VCR2_FF +HAS_VCA_VCR2_FF = 1 + +HAS_GMEM = 1 +HAS_64KB_GDC_MEM = 1 + +# define for enabling mmu_stream_id_lut support +ENABLE_MMU_STREAM_ID_LUT = 1 + +# define for enabling rgbir related chnages in devproxy +HAS_RGBIR = 1 + +# Specification for Psys server's fixed globals' locations +REGMEM_OFFSET = 0 +REGMEM_SECURE_OFFSET = 4096 +REGMEM_SIZE = 20 +REGMEM_WORD_BYTES = 4 +REGMEM_SIZE_BYTES = 80 +GPC_ISP_PERF_DATA_OFFSET = 80 # Taken from REGMEM_OFFSET + REGMEM_SIZE_BYTES +GPC_ISP_PERF_DATA_SIZE_BYTES = 80 +FW_LOAD_NO_OF_REQUEST_OFFSET = 160 # Taken from GPC_ISP_PERF_DATA_OFFSET + GPC_ISP_PERF_DATA_SIZE_BYTES +FW_LOAD_NO_OF_REQUEST_SIZE_BYTES = 4 +DISPATCHER_SCRATCH_SPACE_OFFSET = 4176 # Taken from REGMEM_SECURE_OFFSET + REGMEM_SIZE_BYTES +# Total Used (@ REGMEM_OFFSET) = 164 # FW_LOAD_NO_OF_REQUEST_OFFSET + FW_LOAD_NO_OF_REQUEST_SIZE_BYTES +# Total Used (@ REGMEM_SECURE_OFFSET) = 80 # REGMEM_SIZE_BYTES + +# use DMA NCI for OFS Service to reduce load in tproxy +DMA_NCI_IN_OFS_SERVICE = 1 +# TODO use version naming scheme "v#" to decouple +# IPU_SYSVER from version. +PSYS_SERVER_MANIFEST_VERSION = cnlB0 +PSYS_RESOURCE_MODEL_VERSION = cnlB0 +PSYS_ACCESS_BLOCKER_VERSION = v1 + +# Disable support for PPG protocol to save codesize +PSYS_HAS_PPG_SUPPORT = 0 +# Disable support for late binding +PSYS_HAS_LATE_BINDING_SUPPORT = 0 + +# Specify PSYS server context spaces for caching context from DDR +PSYS_SERVER_NOF_CACHES = 4 +PSYS_SERVER_MAX_NUM_PROC_GRP = $(PSYS_SERVER_NOF_CACHES) +PSYS_SERVER_MAX_NUM_EXEC_PROC_GRP = 8 # Max PG's running, 4 running on Cores, 4 being updated on the host upon executing. +PSYS_SERVER_MAX_PROC_GRP_SIZE = 3352 +PSYS_SERVER_MAX_MANIFEST_SIZE = 3420 +PSYS_SERVER_MAX_CLIENT_PKG_SIZE = 2360 +PSYS_SERVER_MAX_BUFFER_SET_SIZE = 0 +PSYS_SERVER_MAX_NUMBER_OF_TERMINAL_SECTIONS = 90 +PSYS_SERVER_MAX_NUMBER_OF_TERMINAL_STORE_SECTIONS = 1 +# The caching scheme for this subsystem suits the method of queueing ahead separate PGs for frames in an interleaved +# fashion. As such there should be as many caches to support to heaviest two concurrent PGs, times two. This results +# in the following distribution of caches: two large ones for the maximum sized PG, two smaller ones for the +# second-largest sized PG. +PSYS_SERVER_CACHE_0_PROC_GRP_SIZE = $(PSYS_SERVER_MAX_PROC_GRP_SIZE) +PSYS_SERVER_CACHE_0_MANIFEST_SIZE = $(PSYS_SERVER_MAX_MANIFEST_SIZE) +PSYS_SERVER_CACHE_0_CLIENT_PKG_SIZE = $(PSYS_SERVER_MAX_CLIENT_PKG_SIZE) +PSYS_SERVER_CACHE_0_BUFFER_SET_SIZE = $(PSYS_SERVER_MAX_BUFFER_SET_SIZE) +PSYS_SERVER_CACHE_0_NUMBER_OF_TERMINAL_SECTIONS = $(PSYS_SERVER_MAX_NUMBER_OF_TERMINAL_SECTIONS) +PSYS_SERVER_CACHE_0_NUMBER_OF_TERMINAL_STORE_SECTIONS = $(PSYS_SERVER_MAX_NUMBER_OF_TERMINAL_STORE_SECTIONS) +PSYS_SERVER_CACHE_1_PROC_GRP_SIZE = $(PSYS_SERVER_CACHE_0_PROC_GRP_SIZE) +PSYS_SERVER_CACHE_1_MANIFEST_SIZE = $(PSYS_SERVER_CACHE_0_MANIFEST_SIZE) +PSYS_SERVER_CACHE_1_CLIENT_PKG_SIZE = $(PSYS_SERVER_CACHE_0_CLIENT_PKG_SIZE) +PSYS_SERVER_CACHE_1_BUFFER_SET_SIZE = $(PSYS_SERVER_CACHE_0_BUFFER_SET_SIZE) +PSYS_SERVER_CACHE_1_NUMBER_OF_TERMINAL_SECTIONS = $(PSYS_SERVER_CACHE_0_NUMBER_OF_TERMINAL_SECTIONS) +PSYS_SERVER_CACHE_1_NUMBER_OF_TERMINAL_STORE_SECTIONS = $(PSYS_SERVER_MAX_NUMBER_OF_TERMINAL_STORE_SECTIONS) +PSYS_SERVER_CACHE_2_PROC_GRP_SIZE = 1624 +PSYS_SERVER_CACHE_2_MANIFEST_SIZE = 1248 +PSYS_SERVER_CACHE_2_CLIENT_PKG_SIZE = 1040 +PSYS_SERVER_CACHE_2_BUFFER_SET_SIZE = 0 +PSYS_SERVER_CACHE_2_NUMBER_OF_TERMINAL_SECTIONS = 43 +PSYS_SERVER_CACHE_2_NUMBER_OF_TERMINAL_STORE_SECTIONS = $(PSYS_SERVER_MAX_NUMBER_OF_TERMINAL_STORE_SECTIONS) +PSYS_SERVER_CACHE_3_PROC_GRP_SIZE = $(PSYS_SERVER_CACHE_2_PROC_GRP_SIZE) +PSYS_SERVER_CACHE_3_MANIFEST_SIZE = $(PSYS_SERVER_CACHE_2_MANIFEST_SIZE) +PSYS_SERVER_CACHE_3_CLIENT_PKG_SIZE = $(PSYS_SERVER_CACHE_2_CLIENT_PKG_SIZE) +PSYS_SERVER_CACHE_3_BUFFER_SET_SIZE = $(PSYS_SERVER_CACHE_2_BUFFER_SET_SIZE) +PSYS_SERVER_CACHE_3_NUMBER_OF_TERMINAL_SECTIONS = $(PSYS_SERVER_CACHE_2_NUMBER_OF_TERMINAL_SECTIONS) +PSYS_SERVER_CACHE_3_NUMBER_OF_TERMINAL_STORE_SECTIONS = $(PSYS_SERVER_MAX_NUMBER_OF_TERMINAL_STORE_SECTIONS) +# Support dual command context for VTIO - concurrent secure and non-secure streams +PSYS_HAS_DUAL_CMD_CTX_SUPPORT = 1 + +HAS_SPC = 1 +HAS_SPP0 = 1 +HAS_SPP1 = 1 +HAS_ISP0 = 1 +HAS_ISP1 = 1 +HAS_ISP2 = 1 +HAS_ISP3 = 1 + +AB_CONFIG_ARRAY_SIZE = 50 diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/config/system_cnlB0.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/config/system_cnlB0.mk new file mode 100644 index 0000000000000..667282b519c4c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/config/system_cnlB0.mk @@ -0,0 +1,96 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# + +#--- DEFINES REQUIRED TO COMPILE USING LLVM --- +# Enable LLVM/Volcano for IPU4P, SPs only. +VOLCANO_IPU4P = 1 +VOLCANO_SP2601 = 1 +#---------------------------------------------- + +# enable NO_ALIAS for LLVM +ENABLE_NO_ALIAS_FOR_LLVM = 1 + +LOGICAL_FW_INPUT_SYSTEM = input_system_system +LOGICAL_FW_PROCESSING_SYSTEM = processing_system_system +LOGICAL_FW_IPU_SYSTEM = ipu_system +LOGICAL_FW_ISP_SYSTEM = isp2601_default_system +SP_CONTROL_CELL = sp2601_control +SP_PROXY_CELL = sp2601_proxy +ISP_CELL = isp2601 +# The non-capital define isp2601 is used in the sdk, in order to distinguish +# between different isp versions the ISP_CELL_IDENTIFIER define is added. +ISP_CELL_IDENTIFIER = ISP2601 +HAS_IPFD = 1 +HAS_S2M_IN_ISYS_ISL_NONSOC_PATH = 0 +HAS_S2V_IN_ISYS_ISL_NONSOC_PATH = 1 +# ISL-IS non-SoC path has ISA with PAF and DPC-Pext support for IPU4P-B0 +HAS_ISA_IN_ISYS_ISL = 1 +HAS_PAF_IN_ISYS_ISL = 1 +HAS_DPC_PEXT_IN_ISYS_ISL = 1 +HAS_PMA_IF = 1 + +HAS_MIPIBE_IN_PSYS_ISL = 1 + +HAS_VPLESS_SUPPORT = 0 + +DLI_SYSTEM = hive_isp_css_2600_system +RESOURCE_MANAGER_VERSION = v2 +MEM_RESOURCE_VALIDATION_ERROR = 0 +OFS_SCALER_1_4K_TILEY_422_SUPPORT= 1 +PROGDESC_ACC_SYMBOLS_VERSION = v1 +DEVPROXY_INTERFACE_VERSION = v1 +FW_ABI_IPU_TYPES_VERSION = v1 + +HAS_ONLINE_MODE_SUPPORT_IN_ISYS_PSYS = 0 + +MMU_INTERFACE_VERSION = v2 +DEVICE_ACCESS_VERSION = v2 +PSYS_SERVER_VERSION = v3 +PSYS_SERVER_LOADER_VERSION = v1 +PSYS_HW_VERSION = CNL_B0_HW + +# Enable FW_DMA for loading firmware +PSYS_SERVER_ENABLE_FW_LOAD_DMA = 1 + +NCI_SPA_VERSION = v1 +MANIFEST_TOOL_VERSION = v2 +PSYS_CON_MGR_TOOL_VERSION = v1 +# TODO: Should be removed after performance issues OTF are solved +PSYS_PROC_MGR_VERSION = v1 +IPU_RESOURCES_VERSION = v2 + +HAS_ACC_CLUSTER_PAF_PAL = 1 +HAS_ACC_CLUSTER_PEXT_PAL = 1 +HAS_ACC_CLUSTER_GBL_PAL = 1 + +# TODO use version naming scheme "v#" to decouple +# IPU_SYSVER from version. +PARAMBINTOOL_ISA_INIT_VERSION = cnlB0 + +# Select EQC2EQ version +# Version 1: uniform address space, equal EQ addresses regardless of EQC device +# Version 2: multiple addresses per EQ, depending on location of EQC device +EQC2EQ_VERSION = v1 + +# Select DMA instance for fw_load +FW_LOAD_DMA_INSTANCE = NCI_DMA_FW + +HAS_DMA_FW = 1 + +HAS_SIS = 0 +HAS_IDS = 1 + +PSYS_SERVER_ENABLE_TPROXY = 1 +PSYS_SERVER_ENABLE_DEVPROXY = 1 +NCI_OFS_VERSION = v1 diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cpd/cpd_component/cpd_component.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cpd/cpd_component/cpd_component.mk new file mode 100644 index 0000000000000..8ecc3e42e55d3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cpd/cpd_component/cpd_component.mk @@ -0,0 +1,28 @@ +## +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +## + +# MODULE is cpd/cpd_component + +CPD_DIR = $${MODULES_DIR}/cpd +CPD_COMPONENT_DIR = $${MODULES_DIR}/cpd/cpd_component +CPD_COMPONENT_INTERFACE = $(CPD_COMPONENT_DIR)/interface +CPD_COMPONENT_SOURCES = $(CPD_COMPONENT_DIR)/src + +CPD_COMPONENT_FILES = $(CPD_COMPONENT_SOURCES)/ia_css_cpd_component_create.c +CPD_COMPONENT_FILES += $(CPD_COMPONENT_SOURCES)/ia_css_cpd_component.c +CPD_COMPONENT_CPPFLAGS = -I$(CPD_COMPONENT_INTERFACE) +CPD_COMPONENT_CPPFLAGS += -I$(CPD_COMPONENT_SOURCES) +CPD_COMPONENT_CPPFLAGS += -I$(CPD_DIR) diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cpd/cpd_component/interface/ia_css_cpd_component_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cpd/cpd_component/interface/ia_css_cpd_component_types.h new file mode 100644 index 0000000000000..7ad3070b2fd72 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cpd/cpd_component/interface/ia_css_cpd_component_types.h @@ -0,0 +1,90 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IA_CSS_CPD_COMPONENT_TYPES_H +#define __IA_CSS_CPD_COMPONENT_TYPES_H + +/** @file + * This file contains datastructure related to generation of CPD file + */ + +#include "type_support.h" + +#define SIZE_OF_FW_ARCH_VERSION 7 +#define SIZE_OF_SYSTEM_VERSION 11 +#define SIZE_OF_COMPONENT_NAME 12 + +enum ia_css_cpd_component_endianness { + IA_CSSCPD_COMP_ENDIAN_RSVD, + IA_CSS_CPD_COMP_LITTLE_ENDIAN, + IA_CSS_CPD_COMP_BIG_ENDIAN +}; + +/** Module Data (components) Header + * Following data structure has been created using FAS section 5.25 + * Open : Should we add padding at the end of module directory + * (the component must be 512 aligned) + */ +typedef struct { + uint32_t header_size; + /**< Specifies endianness of the binary data */ + unsigned int endianness; + /**< fw_pkg_date is current date stored in 'binary decimal' + * representation e.g. 538248729 (0x20150619) + */ + uint32_t fw_pkg_date; + /**< hive_sdk_date is date of HIVE_SDK stored in + * 'binary decimal' representation + */ + uint32_t hive_sdk_date; + /**< compiler_date is date of ptools stored in + * 'binary decimal' representation + */ + uint32_t compiler_date; + /**< UNSCHED / SCHED / TARGET / CRUN */ + unsigned int target_platform_type; + /**< specifies the system version stored as string + * e.g. BXTB0_IPU4'\0' + */ + uint8_t system_version[SIZE_OF_SYSTEM_VERSION]; + /**< specifies fw architecture version e.g. for BXT CSS3.0'\0' */ + uint8_t fw_arch_version[SIZE_OF_FW_ARCH_VERSION]; + uint8_t rsvd[2]; +} ia_css_header_component_t; + +/** Module Data Directory = Directory Header + Directory Entry (0..n) + * Following two Data Structure has been taken from CSE Storage FAS (CPD desgin) + * Module Data Directory Header + */ +typedef struct { + uint32_t header_marker; + uint32_t number_of_entries; + uint8_t header_version; + uint8_t entry_version; + uint8_t header_length; /**< 0x10 (16) Fixed for this version*/ + uint8_t checksum; + uint32_t partition_name; +} ia_css_directory_header_component_t; + +/** Module Date Directory Entry + */ +typedef struct { + /**< character string describing the component name */ + uint8_t entry_name[SIZE_OF_COMPONENT_NAME]; + uint32_t offset; + uint32_t length; + uint32_t rsvd; /**< Must be 0 */ +} ia_css_directory_entry_component_t; + +#endif /* __IA_CSS_CPD_COMPONENT_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cpd/cpd_metadata/cpd_metadata.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cpd/cpd_metadata/cpd_metadata.mk new file mode 100644 index 0000000000000..ac78815dfbd8c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cpd/cpd_metadata/cpd_metadata.mk @@ -0,0 +1,29 @@ +## +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +## + + +# MODULE is CPD UTL (Metadata File Extension) + +CPD_DIR = $${MODULES_DIR}/cpd/ +CPD_METADATA_DIR = $${MODULES_DIR}/cpd/cpd_metadata +CPD_METADATA_INTERFACE = $(CPD_METADATA_DIR)/interface +CPD_METADATA_SOURCES = $(CPD_METADATA_DIR)/src + +CPD_METADATA_FILES = $(CPD_METADATA_SOURCES)/ia_css_cpd_metadata_create.c +CPD_METADATA_FILES += $(CPD_METADATA_SOURCES)/ia_css_cpd_metadata.c +CPD_METADATA_CPPFLAGS = -I$(CPD_METADATA_INTERFACE) \ + -I$(CPD_METADATA_SOURCES) \ + -I$(CPD_DIR) diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cpd/cpd_metadata/interface/ia_css_cpd_metadata_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cpd/cpd_metadata/interface/ia_css_cpd_metadata_types.h new file mode 100644 index 0000000000000..a88c6aede08c5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cpd/cpd_metadata/interface/ia_css_cpd_metadata_types.h @@ -0,0 +1,111 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IA_CSS_CPD_METADATA_TYPES_H +#define __IA_CSS_CPD_METADATA_TYPES_H + +/** @file + * This file contains data structures related to generation of + * metadata file extension + */ +#include + +/* As per v0.2 manifest document + * Header = Extension Type (4) + Extension Length (4) + + * iUnit Image Type (4) + Reserved (16) + */ +#define IPU_METADATA_HEADER_RSVD_SIZE 16 +#define IPU_METADATA_HEADER_FIELDS_SIZE 12 +#define IPU_METADATA_HEADER_SIZE \ + (IPU_METADATA_HEADER_FIELDS_SIZE + IPU_METADATA_HEADER_RSVD_SIZE) + +/* iUnit metadata extension tpye value */ +#define IPU_METADATA_EXTENSION_TYPE 16 + +/* Unique id for level 0 bootloader component */ +#define IA_CSS_IUNIT_BTLDR_ID 0 +/* Unique id for psys server program group component */ +#define IA_CSS_IUNIT_PSYS_SERVER_ID 1 +/* Unique id for isys server program group component */ +#define IA_CSS_IUNIT_ISYS_SERVER_ID 2 +/* Initial Identifier for client program group component */ +#define IA_CSS_IUNIT_CLIENT_ID 3 + +/* Use this to parse date from release version from the iUnit component + * e.g. 20150701 + */ +#define IA_CSS_IUNIT_COMP_DATE_SIZE 8 +/* offset of release version in program group binary + * e.g. release_version = "scci_gerrit_20150716_2117" + * In cpd file we only use date/version for the component + */ +#define IA_CSS_IUNIT_DATE_OFFSET 12 + +#define IPU_METADATA_HASH_KEY_SIZE 32 +#define IPU_METADATA_ATTRIBUTE_SIZE 16 +#define IA_CSE_METADATA_COMPONENT_ID_MAX 127 + +typedef enum { + IA_CSS_CPD_METADATA_IMAGE_TYPE_RESERVED, + IA_CSS_CPD_METADATA_IMAGE_TYPE_BOOTLOADER, + IA_CSS_CPD_METADATA_IMAGE_TYPE_MAIN_FIRMWARE +} ia_css_cpd_metadata_image_type_t; + +typedef enum { + IA_CSS_CPD_MAIN_FW_TYPE_RESERVED, + IA_CSS_CPD_MAIN_FW_TYPE_PSYS_SERVER, + IA_CSS_CPD_MAIN_FW_TYPE_ISYS_SERVER, + IA_CSS_CPD_MAIN_FW_TYPE_CLIENT +} ia_css_cpd_iunit_main_fw_type_t; + +/** Data structure for component specific information + * Following data structure has been taken from CSE Manifest v0.2 + */ +typedef struct { + /**< Component ID - unique for each component */ + uint32_t id; + /**< Size of the components */ + uint32_t size; + /**< Version/date of when the components is being generated/created */ + uint32_t version; + /**< SHA 256 Hash Key for component */ + uint8_t sha2_hash[IPU_METADATA_HASH_KEY_SIZE]; + /**< component sp entry point + * - Only valid for btldr/psys/isys server component + */ + uint32_t entry_point; + /**< component icache base address + * - Only valid for btldr/psys/isys server component + */ + uint32_t icache_base_offset; + /**< Resevred - must be 0 */ + uint8_t attributes[IPU_METADATA_ATTRIBUTE_SIZE]; +} ia_css_cpd_metadata_component_t; + +/** Data structure for Metadata File Extension Header + */ +typedef struct { + /**< Specifies the binary image type + * - could be bootloader or main firmware + */ + ia_css_cpd_metadata_image_type_t image_type; + /**< Number of components available in metadata file extension + * (For btldr always 1) + */ + uint32_t component_count; + /**< Component specific information */ + ia_css_cpd_metadata_component_t *components; +} ia_css_cpd_metadata_desc_t; + +#endif /* __IA_CSS_CPD_METADATA_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/device_access.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/device_access.mk new file mode 100644 index 0000000000000..1629d9af803b6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/device_access.mk @@ -0,0 +1,40 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# + +ifndef _DEVICE_ACCESS_MK_ +_DEVICE_ACCESS_MK_ = 1 + +# DEVICE_ACCESS_VERSION= +include $(MODULES_DIR)/config/system_$(IPU_SYSVER).mk + +DEVICE_ACCESS_DIR=$${MODULES_DIR}/device_access +DEVICE_ACCESS_INTERFACE=$(DEVICE_ACCESS_DIR)/interface +DEVICE_ACCESS_SOURCES=$(DEVICE_ACCESS_DIR)/src + +DEVICE_ACCESS_HOST_FILES = + +DEVICE_ACCESS_FW_FILES = + +DEVICE_ACCESS_HOST_CPPFLAGS = \ + -I$(DEVICE_ACCESS_INTERFACE) \ + -I$(DEVICE_ACCESS_SOURCES) + +DEVICE_ACCESS_FW_CPPFLAGS = \ + -I$(DEVICE_ACCESS_INTERFACE) \ + -I$(DEVICE_ACCESS_SOURCES) + +DEVICE_ACCESS_FW_CPPFLAGS += \ + -I$(DEVICE_ACCESS_SOURCES)/$(DEVICE_ACCESS_VERSION) +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/interface/ia_css_cmem.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/interface/ia_css_cmem.h new file mode 100644 index 0000000000000..3dc47c29fcab7 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/interface/ia_css_cmem.h @@ -0,0 +1,58 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CMEM_H +#define __IA_CSS_CMEM_H + +#include "type_support.h" +#include "storage_class.h" + +#ifdef __VIED_CELL +typedef unsigned int ia_css_cmem_address_t; +#else +#include +typedef vied_subsystem_address_t ia_css_cmem_address_t; +#endif + +STORAGE_CLASS_INLINE uint32_t +ia_css_cmem_load_32(unsigned int ssid, ia_css_cmem_address_t address); + +STORAGE_CLASS_INLINE void +ia_css_cmem_store_32(unsigned int ssid, ia_css_cmem_address_t address, + uint32_t value); + +STORAGE_CLASS_INLINE void +ia_css_cmem_load(unsigned int ssid, ia_css_cmem_address_t address, void *data, + unsigned int size); + +STORAGE_CLASS_INLINE void +ia_css_cmem_store(unsigned int ssid, ia_css_cmem_address_t address, + const void *data, unsigned int size); + +STORAGE_CLASS_INLINE void +ia_css_cmem_zero(unsigned int ssid, ia_css_cmem_address_t address, + unsigned int size); + +STORAGE_CLASS_INLINE ia_css_cmem_address_t +ia_css_cmem_get_cmem_addr_from_dmem(unsigned int base_addr, void *p); + +/* Include inline implementation */ + +#ifdef __VIED_CELL +#include "ia_css_cmem_cell.h" +#else +#include "ia_css_cmem_host.h" +#endif + +#endif /* __IA_CSS_CMEM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/interface/ia_css_xmem.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/interface/ia_css_xmem.h new file mode 100644 index 0000000000000..de2b94d8af541 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/interface/ia_css_xmem.h @@ -0,0 +1,65 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_H +#define __IA_CSS_XMEM_H + +#include "type_support.h" +#include "storage_class.h" + +#ifdef __VIED_CELL +typedef unsigned int ia_css_xmem_address_t; +#else +#include +typedef host_virtual_address_t ia_css_xmem_address_t; +#endif + +STORAGE_CLASS_INLINE uint8_t +ia_css_xmem_load_8(unsigned int mmid, ia_css_xmem_address_t address); + +STORAGE_CLASS_INLINE uint16_t +ia_css_xmem_load_16(unsigned int mmid, ia_css_xmem_address_t address); + +STORAGE_CLASS_INLINE uint32_t +ia_css_xmem_load_32(unsigned int mmid, ia_css_xmem_address_t address); + +STORAGE_CLASS_INLINE void +ia_css_xmem_load(unsigned int mmid, ia_css_xmem_address_t address, void *data, + unsigned int size); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_8(unsigned int mmid, ia_css_xmem_address_t address, + uint8_t value); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_16(unsigned int mmid, ia_css_xmem_address_t address, + uint16_t value); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_32(unsigned int mmid, ia_css_xmem_address_t address, + uint32_t value); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store(unsigned int mmid, ia_css_xmem_address_t address, + const void *data, unsigned int bytes); + +/* Include inline implementation */ + +#ifdef __VIED_CELL +#include "ia_css_xmem_cell.h" +#else +#include "ia_css_xmem_host.h" +#endif + +#endif /* __IA_CSS_XMEM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/interface/ia_css_xmem_cmem.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/interface/ia_css_xmem_cmem.h new file mode 100644 index 0000000000000..57aab3323c739 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/interface/ia_css_xmem_cmem.h @@ -0,0 +1,35 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_CMEM_H +#define __IA_CSS_XMEM_CMEM_H + +#include "ia_css_cmem.h" +#include "ia_css_xmem.h" + +/* Copy data from xmem to cmem, e.g., from a program in DDR to a cell's DMEM */ +/* This may also be implemented using DMA */ + +STORAGE_CLASS_INLINE void +ia_css_xmem_to_cmem_copy( + unsigned int mmid, + unsigned int ssid, + ia_css_xmem_address_t src, + ia_css_cmem_address_t dst, + unsigned int size); + +/* include inline implementation */ +#include "ia_css_xmem_cmem_impl.h" + +#endif /* __IA_CSS_XMEM_CMEM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/src/ia_css_cmem_host.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/src/ia_css_cmem_host.h new file mode 100644 index 0000000000000..22799e67214c1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/src/ia_css_cmem_host.h @@ -0,0 +1,121 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CMEM_HOST_H +#define __IA_CSS_CMEM_HOST_H + +/* This file is an inline implementation for the interface ia_css_cmem.h + * and should only be included there. */ + +#include "assert_support.h" +#include "misc_support.h" + +STORAGE_CLASS_INLINE uint32_t +ia_css_cmem_load_32(unsigned int ssid, ia_css_cmem_address_t address) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + return vied_subsystem_load_32(ssid, address); +} + +STORAGE_CLASS_INLINE uint32_t +ia_css_cond_cmem_load_32(bool cond, unsigned int ssid, + ia_css_cmem_address_t address) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + if (cond) + return vied_subsystem_load_32(ssid, address); + else + return 0; +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_store_32(unsigned int ssid, ia_css_cmem_address_t address, + uint32_t data) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + vied_subsystem_store_32(ssid, address, data); +} + +STORAGE_CLASS_INLINE void +ia_css_cond_cmem_store_32(bool cond, unsigned int ssid, + ia_css_cmem_address_t address, uint32_t data) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + if (cond) + vied_subsystem_store_32(ssid, address, data); +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_load(unsigned int ssid, ia_css_cmem_address_t address, void *data, + unsigned int size) +{ + uint32_t *data32 = (uint32_t *)data; + uint32_t end = address + size; + + assert(size % 4 == 0); + assert(address % 4 == 0); + assert((long)data % 4 == 0); + + while (address != end) { + *data32 = ia_css_cmem_load_32(ssid, address); + address += 4; + data32 += 1; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_store(unsigned int ssid, ia_css_cmem_address_t address, + const void *data, unsigned int size) +{ + uint32_t *data32 = (uint32_t *)data; + uint32_t end = address + size; + + assert(size % 4 == 0); + assert(address % 4 == 0); + assert((long)data % 4 == 0); + + while (address != end) { + ia_css_cmem_store_32(ssid, address, *data32); + address += 4; + data32 += 1; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_zero(unsigned int ssid, ia_css_cmem_address_t address, + unsigned int size) +{ + uint32_t end = address + size; + + assert(size % 4 == 0); + assert(address % 4 == 0); + + while (address != end) { + ia_css_cmem_store_32(ssid, address, 0); + address += 4; + } +} + +STORAGE_CLASS_INLINE ia_css_cmem_address_t +ia_css_cmem_get_cmem_addr_from_dmem(unsigned int base_addr, void *p) +{ + NOT_USED(base_addr); + return (ia_css_cmem_address_t)(uintptr_t)p; +} + +#endif /* __IA_CSS_CMEM_HOST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/src/ia_css_xmem_cmem_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/src/ia_css_xmem_cmem_impl.h new file mode 100644 index 0000000000000..adc178b75059a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/src/ia_css_xmem_cmem_impl.h @@ -0,0 +1,79 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_CMEM_IMPL_H +#define __IA_CSS_XMEM_CMEM_IMPL_H + +#include "ia_css_xmem_cmem.h" + +#include "ia_css_cmem.h" +#include "ia_css_xmem.h" + +/* Copy data from xmem to cmem, e.g., from a program in DDR to a cell's DMEM */ +/* This may also be implemented using DMA */ + +STORAGE_CLASS_INLINE void +ia_css_xmem_to_cmem_copy( + unsigned int mmid, + unsigned int ssid, + ia_css_xmem_address_t src, + ia_css_cmem_address_t dst, + unsigned int size) +{ + /* copy from ddr to subsystem, e.g., cell dmem */ + ia_css_cmem_address_t end = dst + size; + + assert(size % 4 == 0); + assert((uintptr_t) dst % 4 == 0); + assert((uintptr_t) src % 4 == 0); + + while (dst != end) { + uint32_t data; + + data = ia_css_xmem_load_32(mmid, src); + ia_css_cmem_store_32(ssid, dst, data); + dst += 4; + src += 4; + } +} + +/* Copy data from cmem to xmem */ + +STORAGE_CLASS_INLINE void +ia_css_cmem_to_xmem_copy( + unsigned int mmid, + unsigned int ssid, + ia_css_cmem_address_t src, + ia_css_xmem_address_t dst, + unsigned int size) +{ + /* copy from ddr to subsystem, e.g., cell dmem */ + ia_css_xmem_address_t end = dst + size; + + assert(size % 4 == 0); + assert((uintptr_t) dst % 4 == 0); + assert((uintptr_t) src % 4 == 0); + + while (dst != end) { + uint32_t data; + + data = ia_css_cmem_load_32(mmid, src); + ia_css_xmem_store_32(ssid, dst, data); + dst += 4; + src += 4; + } +} + + +#endif /* __IA_CSS_XMEM_CMEM_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/src/ia_css_xmem_host.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/src/ia_css_xmem_host.h new file mode 100644 index 0000000000000..d94991fc11143 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/src/ia_css_xmem_host.h @@ -0,0 +1,84 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_HOST_H +#define __IA_CSS_XMEM_HOST_H + +#include "ia_css_xmem.h" +#include +#include "assert_support.h" +#include + +STORAGE_CLASS_INLINE uint8_t +ia_css_xmem_load_8(unsigned int mmid, ia_css_xmem_address_t address) +{ + return shared_memory_load_8(mmid, address); +} + +STORAGE_CLASS_INLINE uint16_t +ia_css_xmem_load_16(unsigned int mmid, ia_css_xmem_address_t address) +{ + /* Address has to be half-word aligned */ + assert(0 == (uintptr_t) address % 2); + return shared_memory_load_16(mmid, address); +} + +STORAGE_CLASS_INLINE uint32_t +ia_css_xmem_load_32(unsigned int mmid, ia_css_xmem_address_t address) +{ + /* Address has to be word aligned */ + assert(0 == (uintptr_t) address % 4); + return shared_memory_load_32(mmid, address); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_load(unsigned int mmid, ia_css_xmem_address_t address, void *data, + unsigned int size) +{ + shared_memory_load(mmid, address, data, size); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_8(unsigned int mmid, ia_css_xmem_address_t address, + uint8_t value) +{ + shared_memory_store_8(mmid, address, value); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_16(unsigned int mmid, ia_css_xmem_address_t address, + uint16_t value) +{ + /* Address has to be half-word aligned */ + assert(0 == (uintptr_t) address % 2); + shared_memory_store_16(mmid, address, value); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_32(unsigned int mmid, ia_css_xmem_address_t address, + uint32_t value) +{ + /* Address has to be word aligned */ + assert(0 == (uintptr_t) address % 4); + shared_memory_store_32(mmid, address, value); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store(unsigned int mmid, ia_css_xmem_address_t address, + const void *data, unsigned int bytes) +{ + shared_memory_store(mmid, address, data, bytes); +} + +#endif /* __IA_CSS_XMEM_HOST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/cnlB0/ipu_device_buttress_properties_struct.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/cnlB0/ipu_device_buttress_properties_struct.h new file mode 100644 index 0000000000000..5102f6e44d2f6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/cnlB0/ipu_device_buttress_properties_struct.h @@ -0,0 +1,68 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_BUTTRESS_PROPERTIES_STRUCT_H +#define __IPU_DEVICE_BUTTRESS_PROPERTIES_STRUCT_H + +/* Destination values for master port 0 and bitfield "request_dest" */ +enum cio_M0_btrs_dest { + DEST_IS_BUT_REGS = 0, + DEST_IS_DDR, + RESERVED, + DEST_IS_SUBSYSTEM, + N_BTRS_DEST +}; + +/* Bit-field positions for M0 info bits */ +enum ia_css_info_bits_m0_pos { + IA_CSS_INFO_BITS_M0_SNOOPABLE_POS = 0, + IA_CSS_INFO_BITS_M0_IMR_DESTINED_POS = 1, + IA_CSS_INFO_BITS_M0_REQUEST_DEST_POS = 4 +}; + +#define IA_CSS_INFO_BITS_M0_DDR \ + (DEST_IS_DDR << IA_CSS_INFO_BITS_M0_REQUEST_DEST_POS) +#define IA_CSS_INFO_BITS_M0_SNOOPABLE (1 << IA_CSS_INFO_BITS_M0_SNOOPABLE_POS) + +/* Info bits as expected by the buttress */ +/* Deprecated because bit fields are not portable */ + +/* For master port 0*/ +union cio_M0_t { + struct { + unsigned int snoopable : 1; + unsigned int imr_destined : 1; + unsigned int spare0 : 2; + unsigned int request_dest : 2; + unsigned int spare1 : 26; + } as_bitfield; + unsigned int as_word; +}; + +/* For master port 1*/ +union cio_M1_t { + struct { + unsigned int spare0 : 1; + unsigned int deadline_pointer : 1; + unsigned int reserved : 1; + unsigned int zlw : 1; + unsigned int stream_id : 4; + unsigned int address_swizzling : 1; + unsigned int spare1 : 23; + } as_bitfield; + unsigned int as_word; +}; + + +#endif /* __IPU_DEVICE_BUTTRESS_PROPERTIES_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties.h new file mode 100644 index 0000000000000..e6e1e9dcbe80c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties.h @@ -0,0 +1,76 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_H +#define __IPU_DEVICE_CELL_PROPERTIES_H + +#include "storage_class.h" +#include "ipu_device_cell_type_properties.h" + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_devices(void); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_memories(const unsigned int cell_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_size(const unsigned int cell_id, + const unsigned int mem_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_address(const unsigned int cell_id, + const unsigned int mem_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_databus_memory_address(const unsigned int cell_id, + const unsigned int mem_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_masters(const unsigned int cell_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_bits(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_num_segments(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_size(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_stride(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_base_reg(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_info_reg(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_icache_align(unsigned int cell_id); + +#ifdef C_RUN +STORAGE_CLASS_INLINE int +ipu_device_cell_id_crun(int cell_id); +#endif + +#include "ipu_device_cell_properties_func.h" + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties_func.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties_func.h new file mode 100644 index 0000000000000..481b0504a2378 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties_func.h @@ -0,0 +1,164 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_FUNC_H +#define __IPU_DEVICE_CELL_PROPERTIES_FUNC_H + +/* define properties for all cells uses in ISYS */ + +#include "ipu_device_cell_properties_impl.h" +#include "ipu_device_cell_devices.h" +#include "assert_support.h" +#include "storage_class.h" + +enum {IA_CSS_CELL_MASTER_ADDRESS_WIDTH = 32}; + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_devices(void) +{ + return NUM_CELLS; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_memories(const unsigned int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_cell_properties[cell_id].type_properties->count-> + num_memories; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_size(const unsigned int cell_id, + const unsigned int mem_id) +{ + assert(cell_id < NUM_CELLS); + assert(mem_id < ipu_device_cell_num_memories(cell_id)); + return ipu_device_cell_properties[cell_id].type_properties-> + mem_size[mem_id]; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_address(const unsigned int cell_id, + const unsigned int mem_id) +{ + assert(cell_id < NUM_CELLS); + assert(mem_id < ipu_device_cell_num_memories(cell_id)); + return ipu_device_cell_properties[cell_id].mem_address[mem_id]; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_databus_memory_address(const unsigned int cell_id, + const unsigned int mem_id) +{ + assert(cell_id < NUM_CELLS); + assert(mem_id < ipu_device_cell_num_memories(cell_id)); + assert(mem_id != 0); + return ipu_device_cell_properties[cell_id].mem_databus_address[mem_id]; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_masters(const unsigned int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_cell_properties[cell_id].type_properties->count-> + num_master_ports; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_bits(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].segment_bits; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_num_segments(const unsigned int cell_id, + const unsigned int master_id) +{ + return 1u << ipu_device_cell_master_segment_bits(cell_id, master_id); +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_size(const unsigned int cell_id, + const unsigned int master_id) +{ + return 1u << (IA_CSS_CELL_MASTER_ADDRESS_WIDTH - + ipu_device_cell_master_segment_bits(cell_id, master_id)); +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_stride(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].stride; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_base_reg(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].base_address_register; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_info_reg(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].info_bits_register; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_info_override_reg(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].info_override_bits_register; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_icache_align(unsigned int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_cell_properties[cell_id].type_properties->count-> + icache_align; +} + +#ifdef C_RUN +STORAGE_CLASS_INLINE int +ipu_device_cell_id_crun(int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_map_cell_id_to_crun_proc_id[cell_id]; +} +#endif + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_FUNC_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties_struct.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties_struct.h new file mode 100644 index 0000000000000..63397dc0b7fe6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties_struct.h @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_STRUCT_H +#define __IPU_DEVICE_CELL_PROPERTIES_STRUCT_H + +/* definitions for all cell types */ + +struct ipu_device_cell_count_s { + unsigned int num_memories; + unsigned int num_master_ports; + unsigned int num_stall_bits; + unsigned int icache_align; +}; + +struct ipu_device_cell_master_properties_s { + unsigned int segment_bits; + unsigned int stride; /* offset to register of next segment */ + unsigned int base_address_register; /* address of first base address + register */ + unsigned int info_bits_register; + unsigned int info_override_bits_register; +}; + +struct ipu_device_cell_type_properties_s { + const struct ipu_device_cell_count_s *count; + const struct ipu_device_cell_master_properties_s *master; + const unsigned int *reg_offset; /* offsets of registers, some depend + on cell type */ + const unsigned int *mem_size; +}; + +struct ipu_device_cell_properties_s { + const struct ipu_device_cell_type_properties_s *type_properties; + const unsigned int *mem_address; + const unsigned int *mem_databus_address; + /* const cell_master_port_properties_s* master_port_properties; */ +}; + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_cell_type_properties.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_cell_type_properties.h new file mode 100644 index 0000000000000..72caed3eef0c9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_cell_type_properties.h @@ -0,0 +1,69 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_TYPE_PROPERTIES_H +#define __IPU_DEVICE_CELL_TYPE_PROPERTIES_H + +#define IPU_DEVICE_INVALID_MEM_ADDRESS 0xFFFFFFFF + +enum ipu_device_cell_stat_ctrl_bit { + IPU_DEVICE_CELL_STAT_CTRL_RESET_BIT = 0, + IPU_DEVICE_CELL_STAT_CTRL_START_BIT = 1, + IPU_DEVICE_CELL_STAT_CTRL_RUN_BIT = 3, + IPU_DEVICE_CELL_STAT_CTRL_READY_BIT = 5, + IPU_DEVICE_CELL_STAT_CTRL_SLEEP_BIT = 6, + IPU_DEVICE_CELL_STAT_CTRL_STALL_BIT = 7, + IPU_DEVICE_CELL_STAT_CTRL_CLEAR_IRQ_MASK_FLAG_BIT = 8, + IPU_DEVICE_CELL_STAT_CTRL_BROKEN_IRQ_MASK_FLAG_BIT = 9, + IPU_DEVICE_CELL_STAT_CTRL_READY_IRQ_MASK_FLAG_BIT = 10, + IPU_DEVICE_CELL_STAT_CTRL_SLEEP_IRQ_MASK_FLAG_BIT = 11, + IPU_DEVICE_CELL_STAT_CTRL_INVALIDATE_ICACHE_BIT = 12, + IPU_DEVICE_CELL_STAT_CTRL_ICACHE_ENABLE_PREFETCH_BIT = 13 +}; + +enum ipu_device_cell_reg_addr { + IPU_DEVICE_CELL_STAT_CTRL_REG_ADDRESS = 0x0, + IPU_DEVICE_CELL_START_PC_REG_ADDRESS = 0x4, + IPU_DEVICE_CELL_ICACHE_BASE_REG_ADDRESS = 0x10, + IPU_DEVICE_CELL_ICACHE_INFO_BITS_REG_ADDRESS = 0x14 +}; + +enum ipu_device_cell_reg { + IPU_DEVICE_CELL_STAT_CTRL_REG, + IPU_DEVICE_CELL_START_PC_REG, + IPU_DEVICE_CELL_ICACHE_BASE_REG, + IPU_DEVICE_CELL_DEBUG_PC_REG, + IPU_DEVICE_CELL_STALL_REG, + IPU_DEVICE_CELL_NUM_REGS +}; + +enum ipu_device_cell_mem { + IPU_DEVICE_CELL_REGS, /* memory id of registers */ + IPU_DEVICE_CELL_PMEM, /* memory id of pmem */ + IPU_DEVICE_CELL_DMEM, /* memory id of dmem */ + IPU_DEVICE_CELL_BAMEM, /* memory id of bamem */ + IPU_DEVICE_CELL_VMEM /* memory id of vmem */ +}; +#define IPU_DEVICE_CELL_NUM_MEMORIES (IPU_DEVICE_CELL_VMEM + 1) + +enum ipu_device_cell_master { + IPU_DEVICE_CELL_MASTER_ICACHE, /* master port id of icache */ + IPU_DEVICE_CELL_MASTER_QMEM, + IPU_DEVICE_CELL_MASTER_CMEM, + IPU_DEVICE_CELL_MASTER_XMEM, + IPU_DEVICE_CELL_MASTER_XVMEM +}; +#define IPU_DEVICE_CELL_MASTER_NUM_MASTERS (IPU_DEVICE_CELL_MASTER_XVMEM + 1) + +#endif /* __IPU_DEVICE_CELL_TYPE_PROPERTIES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_gp_properties.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_gp_properties.h new file mode 100644 index 0000000000000..fd0c5a586c949 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_gp_properties.h @@ -0,0 +1,26 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_GP_PROPERTIES_H +#define __IPU_DEVICE_GP_PROPERTIES_H + +#include "storage_class.h" +#include "ipu_device_gp_properties_types.h" + +STORAGE_CLASS_INLINE unsigned int +ipu_device_gp_mux_addr(const unsigned int device_id, const unsigned int mux_id); + +#include "ipu_device_gp_properties_func.h" + +#endif /* __IPU_DEVICE_GP_PROPERTIES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_gp_properties_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_gp_properties_types.h new file mode 100644 index 0000000000000..3032273696eab --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_gp_properties_types.h @@ -0,0 +1,103 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_GP_PROPERTIES_TYPES_H +#define __IPU_DEVICE_GP_PROPERTIES_TYPES_H + +enum ipu_device_gp_isa_value { + /* ISA_MUX_SEL options */ + IPU_DEVICE_GP_ISA_MUX_SEL_ICA = 0, /* Enable output after FF ICA */ + IPU_DEVICE_GP_ISA_MUX_SEL_LSC = 1, /* Enable output after FF LSC */ + IPU_DEVICE_GP_ISA_MUX_SEL_DPC = 2, /* Enable output after FF DPC */ + /* ICA stream block options */ + /* UNBLOCK signal received from ICA */ + IPU_DEVICE_GP_ISA_ICA_UNBLOCK = 0, + /* BLOCK signal received from ICA */ + IPU_DEVICE_GP_ISA_ICA_BLOCK = 1, + /* LSC stream block options */ + /* UNBLOCK signal received from LSC */ + IPU_DEVICE_GP_ISA_LSC_UNBLOCK = 0, + /* BLOCK signal received from LSC */ + IPU_DEVICE_GP_ISA_LSC_BLOCK = 1, + /* DPC stream block options */ + /* UNBLOCK signal received from DPC */ + IPU_DEVICE_GP_ISA_DPC_UNBLOCK = 0, + /* BLOCK signal received from DPC */ + IPU_DEVICE_GP_ISA_DPC_BLOCK = 1, + /* Defines needed only for bxtB0 */ + /* ISA_AWB_MUX_SEL options */ + /* Input Correction input */ + IPU_DEVICE_GP_ISA_AWB_MUX_SEL_ICA = 0, + /* DPC input */ + IPU_DEVICE_GP_ISA_AWB_MUX_SEL_DPC = 1, + /* ISA_AWB_MUX_SEL options */ + /* UNBLOCK DPC input */ + IPU_DEVICE_GP_ISA_AWB_MUX_ICA_UNBLOCK = 0, + /* BLOCK DPC input */ + IPU_DEVICE_GP_ISA_AWB_MUX_ICA_BLOCK = 1, + /* ISA_AWB_MUX_SEL options */ + /* UNBLOCK Input Correction input */ + IPU_DEVICE_GP_ISA_AWB_MUX_DPC_UNBLOCK = 0, + /* BLOCK Input Correction input */ + IPU_DEVICE_GP_ISA_AWB_MUX_DPC_BLOCK = 1, + + /* PAF STRM options */ + /* Disable streaming to PAF FF*/ + IPU_DEVICE_GP_ISA_PAF_DISABLE_STREAM = 0, + /* Enable stream0 to PAF FF*/ + IPU_DEVICE_GP_ISA_PAF_ENABLE_STREAM0 = 1, + /* Enable stream1 to PAF FF*/ + IPU_DEVICE_GP_ISA_PAF_ENABLE_STREAM1 = 2, + /* PAF SRC SEL options */ + /* External channel input */ + IPU_DEVICE_GP_ISA_PAF_SRC_SEL0 = 0, + /* DPC extracted input */ + IPU_DEVICE_GP_ISA_PAF_SRC_SEL1 = 1, + /* PAF_GDDPC_BLK options */ + IPU_DEVICE_GP_ISA_PAF_GDDPC_PORT_BLK0 = 0, + IPU_DEVICE_GP_ISA_PAF_GDDPC_PORT_BLK1 = 1, + /* PAF ISA STR_PORT options */ + IPU_DEVICE_GP_ISA_PAF_STR_PORT0 = 0, + IPU_DEVICE_GP_ISA_PAF_STR_PORT1 = 1, + + /* sis port block options */ + IPU_DEVICE_GP_ISA_SIS_PORT_UNBLOCK = 0, + IPU_DEVICE_GP_ISA_SIS_PORT_BLOCK = 1, + IPU_DEVICE_GP_ISA_CONF_INVALID = 0xFF +}; + +enum ipu_device_gp_psa_value { + /* Defines needed for bxtB0 */ + /* PSA_STILLS_MODE_MUX */ + IPU_DEVICE_GP_PSA_MUX_POST_RYNR_ROUTE_WO_DM = 0, + IPU_DEVICE_GP_PSA_MUX_POST_RYNR_ROUTE_W_DM = 1, + /* PSA_ACM_DEMUX */ + IPU_DEVICE_GP_PSA_DEMUX_PRE_ACM_ROUTE_TO_ACM = 0, + IPU_DEVICE_GP_PSA_DEMUX_PRE_ACM_ROUTE_TO_S2V = 1, + /* PSA_S2V_RGB_F_MUX */ + IPU_DEVICE_GP_PSA_MUX_PRE_S2V_RGB_F_FROM_ACM = 0, + IPU_DEVICE_GP_PSA_MUX_PRE_S2V_RGB_F_FROM_DM_OR_SPLITTER = 1, + /* PSA_V2S_RGB_4_DEMUX */ + IPU_DEVICE_GP_PSA_DEMUX_POST_V2S_RGB_4_TO_GTM = 0, + IPU_DEVICE_GP_PSA_DEMUX_POST_V2S_RGB_4_TO_ACM = 1, +}; + +enum ipu_device_gp_isl_value { + /* choose and route pixel stream to CSI BE */ + IPU_DEVICE_GP_ISL_CSI_BE_IN_USE = 0, + /* choose and route pixel stream bypass CSI BE */ + IPU_DEVICE_GP_ISL_CSI_BE_BYPASS +}; + +#endif /* __IPU_DEVICE_GP_PROPERTIES_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_acb_devices.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_acb_devices.h new file mode 100644 index 0000000000000..4898fbb2e875c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_acb_devices.h @@ -0,0 +1,43 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IPU_DEVICE_ACB_DEVICES_H +#define __IPU_DEVICE_ACB_DEVICES_H + +enum ipu_device_acb_id { + /* PSA accelerators */ + IPU_DEVICE_ACB_WBA_ID = 0, + IPU_DEVICE_ACB_RYNR_ID, + IPU_DEVICE_ACB_DEMOSAIC_ID, + IPU_DEVICE_ACB_ACM_ID, /* In CNLB0 ACM is called VCA in HW */ + IPU_DEVICE_ACB_GTC_ID, + IPU_DEVICE_ACB_YUV1_ID, + IPU_DEVICE_ACB_DVS_ID, + IPU_DEVICE_ACB_LACE_ID, + /* ISA accelerators */ + IPU_DEVICE_ACB_ICA_ID, + IPU_DEVICE_ACB_LSC_ID, + IPU_DEVICE_ACB_DPC_ID, + IPU_DEVICE_ACB_IDS_ID, + IPU_DEVICE_ACB_AWB_ID, + IPU_DEVICE_ACB_AF_ID, + IPU_DEVICE_ACB_AE_ID, + IPU_DEVICE_ACB_NUM_ACB +}; + +#define IPU_DEVICE_ACB_NUM_PSA_ACB (IPU_DEVICE_ACB_LACE_ID + 1) +#define IPU_DEVICE_ACB_NUM_ISA_ACB \ + (IPU_DEVICE_ACB_NUM_ACB - IPU_DEVICE_ACB_NUM_PSA_ACB) + +#endif /* __IPU_DEVICE_ACB_DEVICES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_cell_devices.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_cell_devices.h new file mode 100644 index 0000000000000..0c923d1396387 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_cell_devices.h @@ -0,0 +1,38 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IPU_DEVICE_CELL_DEVICES_H +#define __IPU_DEVICE_CELL_DEVICES_H + +#define SPC0_CELL processing_system_sp_cluster_sp_cluster_logic_spc_tile_sp +#define SPP0_CELL processing_system_sp_cluster_sp_cluster_logic_spp_tile0_sp +#define SPP1_CELL processing_system_sp_cluster_sp_cluster_logic_spp_tile1_sp +#define ISP0_CELL processing_system_isp_tile0_logic_isp +#define ISP1_CELL processing_system_isp_tile1_logic_isp +#define ISP2_CELL processing_system_isp_tile2_logic_isp +#define ISP3_CELL processing_system_isp_tile3_logic_isp + +enum ipu_device_psys_cell_id { + SPC0, + SPP0, + SPP1, + ISP0, + ISP1, + ISP2, + ISP3 +}; +#define NUM_CELLS (ISP3 + 1) +#define NUM_ISP_CELLS 4 + +#endif /* __IPU_DEVICE_CELL_DEVICES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_cell_properties_defs.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_cell_properties_defs.h new file mode 100644 index 0000000000000..2b80e2822a906 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_cell_properties_defs.h @@ -0,0 +1,65 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +/* Generated file - please do not edit. */ + +#ifndef _IPU_DEVICE_CELL_PROPERTIES_DEFS_H_ +#define _IPU_DEVICE_CELL_PROPERTIES_DEFS_H_ +#define SPC0_REGS_CBUS_ADDRESS 0x00000000 +#define SPC0_DMEM_CBUS_ADDRESS 0x00008000 +#define SPC0_DMEM_DBUS_ADDRESS 0x02000000 +#define SPC0_DMEM_DMA_M0_ADDRESS SPC0_DMEM_DBUS_ADDRESS +#define SPC0_DMEM_INT_DMA_M0_ADDRESS SPC0_DMEM_DBUS_ADDRESS +#define SPP0_REGS_CBUS_ADDRESS 0x00020000 +#define SPP0_DMEM_CBUS_ADDRESS 0x00028000 +#define SPP0_DMEM_DBUS_ADDRESS 0x02020000 +#define SPP1_REGS_CBUS_ADDRESS 0x00030000 +#define SPP1_DMEM_CBUS_ADDRESS 0x00038000 +#define SPP1_DMEM_DBUS_ADDRESS 0x02030000 +#define ISP0_REGS_CBUS_ADDRESS 0x001C0000 +#define ISP0_PMEM_CBUS_ADDRESS 0x001D0000 +#define ISP0_DMEM_CBUS_ADDRESS 0x001F0000 +#define ISP0_BAMEM_CBUS_ADDRESS 0x00200000 +#define ISP0_VMEM_CBUS_ADDRESS 0x00220000 +#define ISP1_REGS_CBUS_ADDRESS 0x00240000 +#define ISP1_PMEM_CBUS_ADDRESS 0x00250000 +#define ISP1_DMEM_CBUS_ADDRESS 0x00270000 +#define ISP1_BAMEM_CBUS_ADDRESS 0x00280000 +#define ISP1_VMEM_CBUS_ADDRESS 0x002A0000 +#define ISP2_REGS_CBUS_ADDRESS 0x002C0000 +#define ISP2_PMEM_CBUS_ADDRESS 0x002D0000 +#define ISP2_DMEM_CBUS_ADDRESS 0x002F0000 +#define ISP2_BAMEM_CBUS_ADDRESS 0x00300000 +#define ISP2_VMEM_CBUS_ADDRESS 0x00320000 +#define ISP3_REGS_CBUS_ADDRESS 0x00340000 +#define ISP3_PMEM_CBUS_ADDRESS 0x00350000 +#define ISP3_DMEM_CBUS_ADDRESS 0x00370000 +#define ISP3_BAMEM_CBUS_ADDRESS 0x00380000 +#define ISP3_VMEM_CBUS_ADDRESS 0x003A0000 +#define ISP0_PMEM_DBUS_ADDRESS 0x08000000 +#define ISP0_DMEM_DBUS_ADDRESS 0x08400000 +#define ISP0_BAMEM_DBUS_ADDRESS 0x09000000 +#define ISP0_VMEM_DBUS_ADDRESS 0x08800000 +#define ISP1_PMEM_DBUS_ADDRESS 0x0A000000 +#define ISP1_DMEM_DBUS_ADDRESS 0x0A400000 +#define ISP1_BAMEM_DBUS_ADDRESS 0x0B000000 +#define ISP1_VMEM_DBUS_ADDRESS 0x0A800000 +#define ISP2_PMEM_DBUS_ADDRESS 0x0C000000 +#define ISP2_DMEM_DBUS_ADDRESS 0x0C400000 +#define ISP2_BAMEM_DBUS_ADDRESS 0x0D000000 +#define ISP2_VMEM_DBUS_ADDRESS 0x0C800000 +#define ISP3_PMEM_DBUS_ADDRESS 0x0E000000 +#define ISP3_DMEM_DBUS_ADDRESS 0x0E400000 +#define ISP3_BAMEM_DBUS_ADDRESS 0x0F000000 +#define ISP3_VMEM_DBUS_ADDRESS 0x0E800000 +#endif /* _IPU_DEVICE_CELL_PROPERTIES_DEFS_H_ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_cell_properties_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_cell_properties_impl.h new file mode 100644 index 0000000000000..428a394e81368 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_cell_properties_impl.h @@ -0,0 +1,193 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_IMPL_H +#define __IPU_DEVICE_CELL_PROPERTIES_IMPL_H + +#include "ipu_device_sp2600_control_properties_impl.h" +#include "ipu_device_sp2600_proxy_properties_impl.h" +#include "ipu_device_isp2600_properties_impl.h" +#include "ipu_device_cell_properties_defs.h" +#include "ipu_device_cell_devices.h" +#include "ipu_device_cell_type_properties.h"/* IPU_DEVICE_INVALID_MEM_ADDRESS */ + +static const unsigned int +ipu_device_spc0_mem_address[IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES] = { + SPC0_REGS_CBUS_ADDRESS, + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPC0_DMEM_CBUS_ADDRESS +}; + +static const unsigned int +ipu_device_spp0_mem_address[IPU_DEVICE_SP2600_PROXY_NUM_MEMORIES] = { + SPP0_REGS_CBUS_ADDRESS, + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPP0_DMEM_CBUS_ADDRESS +}; + +static const unsigned int +ipu_device_spp1_mem_address[IPU_DEVICE_SP2600_PROXY_NUM_MEMORIES] = { + SPP1_REGS_CBUS_ADDRESS, + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPP1_DMEM_CBUS_ADDRESS +}; + +static const unsigned int +ipu_device_isp0_mem_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + ISP0_REGS_CBUS_ADDRESS, /* reg addr */ + ISP0_PMEM_CBUS_ADDRESS, /* pmem addr */ + ISP0_DMEM_CBUS_ADDRESS, /* dmem addr */ + ISP0_BAMEM_CBUS_ADDRESS,/* bamem addr */ + ISP0_VMEM_CBUS_ADDRESS /* vmem addr */ +}; + +static const unsigned int +ipu_device_isp1_mem_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + ISP1_REGS_CBUS_ADDRESS, /* reg addr */ + ISP1_PMEM_CBUS_ADDRESS, /* pmem addr */ + ISP1_DMEM_CBUS_ADDRESS, /* dmem addr */ + ISP1_BAMEM_CBUS_ADDRESS,/* bamem addr */ + ISP1_VMEM_CBUS_ADDRESS /* vmem addr */ +}; + +static const unsigned int +ipu_device_isp2_mem_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + ISP2_REGS_CBUS_ADDRESS, /* reg addr */ + ISP2_PMEM_CBUS_ADDRESS, /* pmem addr */ + ISP2_DMEM_CBUS_ADDRESS, /* dmem addr */ + ISP2_BAMEM_CBUS_ADDRESS,/* bamem addr */ + ISP2_VMEM_CBUS_ADDRESS /* vmem addr */ +}; + +static const unsigned int +ipu_device_isp3_mem_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + ISP3_REGS_CBUS_ADDRESS, /* reg addr */ + ISP3_PMEM_CBUS_ADDRESS, /* pmem addr */ + ISP3_DMEM_CBUS_ADDRESS, /* dmem addr */ + ISP3_BAMEM_CBUS_ADDRESS,/* bamem addr */ + ISP3_VMEM_CBUS_ADDRESS /* vmem addr */ +}; + +static const unsigned int +ipu_device_spc0_mem_databus_address[IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no reg addr */ + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPC0_DMEM_DBUS_ADDRESS +}; + +static const unsigned int +ipu_device_spp0_mem_databus_address[IPU_DEVICE_SP2600_PROXY_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no reg addr */ + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPP0_DMEM_DBUS_ADDRESS +}; + +static const unsigned int +ipu_device_spp1_mem_databus_address[IPU_DEVICE_SP2600_PROXY_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no reg addr */ + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPP1_DMEM_DBUS_ADDRESS +}; + +static const unsigned int +ipu_device_isp0_mem_databus_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no reg addr */ + ISP0_PMEM_DBUS_ADDRESS, /* pmem databus addr */ + ISP0_DMEM_DBUS_ADDRESS, /* dmem databus addr */ + ISP0_BAMEM_DBUS_ADDRESS, /* bamem databus addr */ + ISP0_VMEM_DBUS_ADDRESS /* vmem databus addr */ +}; + +static const unsigned int +ipu_device_isp1_mem_databus_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no reg addr */ + ISP1_PMEM_DBUS_ADDRESS, /* pmem databus addr */ + ISP1_DMEM_DBUS_ADDRESS, /* dmem databus addr */ + ISP1_BAMEM_DBUS_ADDRESS, /* bamem databus addr */ + ISP1_VMEM_DBUS_ADDRESS /* vmem databus addr */ +}; + +static const unsigned int +ipu_device_isp2_mem_databus_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no reg addr */ + ISP2_PMEM_DBUS_ADDRESS, /* pmem databus addr */ + ISP2_DMEM_DBUS_ADDRESS, /* dmem databus addr */ + ISP2_BAMEM_DBUS_ADDRESS, /* bamem databus addr */ + ISP2_VMEM_DBUS_ADDRESS /* vmem databus addr */ +}; + +static const unsigned int +ipu_device_isp3_mem_databus_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no reg addr */ + ISP3_PMEM_DBUS_ADDRESS, /* pmem databus addr */ + ISP3_DMEM_DBUS_ADDRESS, /* dmem databus addr */ + ISP3_BAMEM_DBUS_ADDRESS, /* bamem databus addr */ + ISP3_VMEM_DBUS_ADDRESS /* vmem databus addr */ +}; + +static const struct ipu_device_cell_properties_s +ipu_device_cell_properties[NUM_CELLS] = { + { + &ipu_device_sp2600_control_properties, + ipu_device_spc0_mem_address, + ipu_device_spc0_mem_databus_address + }, + { + &ipu_device_sp2600_proxy_properties, + ipu_device_spp0_mem_address, + ipu_device_spp0_mem_databus_address + }, + { + &ipu_device_sp2600_proxy_properties, + ipu_device_spp1_mem_address, + ipu_device_spp1_mem_databus_address + }, + { + &ipu_device_isp2600_properties, + ipu_device_isp0_mem_address, + ipu_device_isp0_mem_databus_address + }, + { + &ipu_device_isp2600_properties, + ipu_device_isp1_mem_address, + ipu_device_isp1_mem_databus_address + }, + { + &ipu_device_isp2600_properties, + ipu_device_isp2_mem_address, + ipu_device_isp2_mem_databus_address + }, + { + &ipu_device_isp2600_properties, + ipu_device_isp3_mem_address, + ipu_device_isp3_mem_databus_address + } +}; + +#ifdef C_RUN + +/* Mapping between hrt_hive_processors enum and cell_id's used in FW */ +static const int ipu_device_map_cell_id_to_crun_proc_id[NUM_CELLS] = { + 4, /* SPC0 */ + 5, /* SPP0 */ + 6, /* SPP1 */ + 0, /* ISP0 */ + 1, /* ISP1 */ + 2, /* ISP2 */ + 3 /* ISP3 */ +}; + +#endif + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_ff_devices.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_ff_devices.h new file mode 100644 index 0000000000000..d784fb47ffaa1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_ff_devices.h @@ -0,0 +1,57 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IPU_DEVICE_FF_DEVICES_H +#define __IPU_DEVICE_FF_DEVICES_H + +enum ipu_device_ff_id { + /* Names (shortened) as used in */ + /* PSA fixed functions */ /* ipu_device_ff_hrt.txt */ + IPU_DEVICE_FF_WBA_WBA = 0, /* WBA_WBA */ + IPU_DEVICE_FF_RYNR_SPLITTER, /* RYNR_RYNR_SPLITTER */ + IPU_DEVICE_FF_RYNR_COLLECTOR, /* RYNR_RYNR_COLLECTOR */ + IPU_DEVICE_FF_RYNR_BNLM, /* RYNR_BNLM */ + IPU_DEVICE_FF_RYNR_VCUD, /* RYNR_VCUD */ + IPU_DEVICE_FF_DEMOSAIC_DEMOSAIC,/* DEMOSAIC_DEMOSAIC */ + IPU_DEVICE_FF_ACM_CCM, /* VCA_VCR, name as used in CNLB0 HW */ + IPU_DEVICE_FF_ACM_ACM, /* VCA_ACM, name as used in CNLB0 HW */ + IPU_DEVICE_FF_VCA_VCR2, /* VCA_VCR, part of ACM */ + IPU_DEVICE_FF_GTC_CSC_CDS, /* GTC_CSC_CDS */ + IPU_DEVICE_FF_GTC_GTM, /* GTC_GTM */ + IPU_DEVICE_FF_YUV1_SPLITTER, /* YUV1_Processing_YUV_SPLITTER */ + IPU_DEVICE_FF_YUV1_IEFD, /* YUV1_Processing_IEFD*/ + IPU_DEVICE_FF_YUV1_YDS, /* YUV1_Processing_YDS */ + IPU_DEVICE_FF_YUV1_TCC, /* YUV1_Processing_TCC */ + IPU_DEVICE_FF_DVS_YBIN, /* DVS_YBIN */ + IPU_DEVICE_FF_DVS_DVS, /* DVS_DVS */ + IPU_DEVICE_FF_LACE_LACE, /* Lace_Stat_LACE_STAT */ + /* ISA fixed functions */ + IPU_DEVICE_FF_ICA_INL, /* Input_Corr_INL */ + IPU_DEVICE_FF_ICA_GBL, /* Input_Corr_GBL */ + IPU_DEVICE_FF_ICA_PCLN, /* Input_Corr_PCLN */ + IPU_DEVICE_FF_LSC_LSC, /* Bayer_Lsc_LSC */ + IPU_DEVICE_FF_DPC_DPC, /* Bayer_Dpc_GDDPC */ + IPU_DEVICE_FF_IDS_SCALER, /* Bayer_Scaler_SCALER */ + IPU_DEVICE_FF_AWB_AWRG, /* Stat_AWB_AWRG */ + IPU_DEVICE_FF_AF_AF, /* Stat_AF_AWB_FR_AF_AWB_FR_GRD */ + IPU_DEVICE_FF_AE_WGHT_HIST, /* Stat_AE_WGHT_HIST */ + IPU_DEVICE_FF_AE_CCM, /* Stat_AE_AE_CCM */ + IPU_DEVICE_FF_NUM_FF +}; + +#define IPU_DEVICE_FF_NUM_PSA_FF (IPU_DEVICE_FF_LACE_LACE + 1) +#define IPU_DEVICE_FF_NUM_ISA_FF \ + (IPU_DEVICE_FF_NUM_FF - IPU_DEVICE_FF_NUM_PSA_FF) + +#endif /* __IPU_DEVICE_FF_DEVICES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_gp_devices.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_gp_devices.h new file mode 100644 index 0000000000000..ab8cd6a783ce6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_gp_devices.h @@ -0,0 +1,67 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_GP_DEVICES_H +#define __IPU_DEVICE_GP_DEVICES_H +#include "math_support.h" +#include "type_support.h" + +enum ipu_device_gp_id { + IPU_DEVICE_GP_PSA = 0, /* PSA */ + IPU_DEVICE_GP_ISA_STATIC, /* ISA Static */ + IPU_DEVICE_GP_ISA_RUNTIME, /* ISA Runtime */ + IPU_DEVICE_GP_ISL, /* ISL */ + IPU_DEVICE_GP_NUM_GP +}; + +enum ipu_device_gp_psa_mux_id { + /* Post RYNR/CCN: 0-To ACM (Video), 1-To Demosaic (Stills)*/ + IPU_DEVICE_GP_PSA_STILLS_MODE_MUX = 0, + /* Post Vec2Str 4: 0-To GTC, 1-To ACM */ + IPU_DEVICE_GP_PSA_V2S_RGB_4_DEMUX, + /* Post DM and pre ACM 0-CCM/ACM: 1-DM Componenet Splitter */ + IPU_DEVICE_GP_PSA_S2V_RGB_F_MUX, + /* Pre ACM/CCM: 0-To CCM/ACM, 1-To str2vec id_f */ + IPU_DEVICE_GP_PSA_ACM_DEMUX, + IPU_DEVICE_GP_PSA_MUX_NUM_MUX +}; + +enum ipu_device_gp_isa_static_mux_id { + IPU_DEVICE_GP_ISA_STATIC_MUX_SEL = 0, + IPU_DEVICE_GP_ISA_STATIC_PORTA_BLK, + IPU_DEVICE_GP_ISA_STATIC_PORTB_BLK, + IPU_DEVICE_GP_ISA_STATIC_PORTC_BLK, + IPU_DEVICE_GP_ISA_STATIC_AWB_MUX_SEL, + IPU_DEVICE_GP_ISA_STATIC_AWB_MUX_INPUT_CORR_PORT_BLK, + IPU_DEVICE_GP_ISA_STATIC_AWB_MUX_DPC_PORT_BLK, + IPU_DEVICE_GP_ISA_STATIC_MUX_NUM_MUX +}; + +enum ipu_device_gp_isa_runtime_mux_id { + IPU_DEVICE_GP_ISA_RUNTIME_FRAME_SIZE = 0, + IPU_DEVICE_GP_ISA_RUNTIME_SCALED_FRAME_SIZE, + IPU_DEVICE_GP_ISA_RUNTIME_MUX_NUM_MUX +}; + +enum ipu_device_gp_isl_mux_id { + IPU_DEVICE_GP_ISL_MIPI_BE_MUX = 0, + IPU_DEVICE_GP_ISL_MUX_NUM_MUX +}; + +#define IPU_DEVICE_GP_MAX_NUM MAX4((uint32_t)IPU_DEVICE_GP_PSA_MUX_NUM_MUX, \ + (uint32_t)IPU_DEVICE_GP_ISA_STATIC_MUX_NUM_MUX, \ + (uint32_t)IPU_DEVICE_GP_ISA_RUNTIME_MUX_NUM_MUX, \ + (uint32_t)IPU_DEVICE_GP_ISL_MUX_NUM_MUX) + +#endif /* __IPU_DEVICE_GP_DEVICES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/src/ipu_device_isp2600_properties_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/src/ipu_device_isp2600_properties_impl.h new file mode 100644 index 0000000000000..de733be679986 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/src/ipu_device_isp2600_properties_impl.h @@ -0,0 +1,151 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_ISP2600_PROPERTIES_IMPL_H +#define __IPU_DEVICE_ISP2600_PROPERTIES_IMPL_H + +/* isp2600 definition */ + +#include "ipu_device_cell_properties_struct.h" + +enum ipu_device_isp2600_registers { + /* control registers */ + IPU_DEVICE_ISP2600_STAT_CTRL = 0x0, + IPU_DEVICE_ISP2600_START_PC = 0x4, + + /* master port registers */ + IPU_DEVICE_ISP2600_ICACHE_BASE = 0x10, + IPU_DEVICE_ISP2600_ICACHE_INFO = 0x14, + IPU_DEVICE_ISP2600_ICACHE_INFO_OVERRIDE = 0x18, + + IPU_DEVICE_ISP2600_QMEM_BASE = 0x1C, + + IPU_DEVICE_ISP2600_CMEM_BASE = 0x28, + + IPU_DEVICE_ISP2600_XMEM_BASE = 0x88, + IPU_DEVICE_ISP2600_XMEM_INFO = 0x8C, + IPU_DEVICE_ISP2600_XMEM_INFO_OVERRIDE = 0x90, + + IPU_DEVICE_ISP2600_XVMEM_BASE = 0xB8, + + /* debug registers */ + IPU_DEVICE_ISP2600_DEBUG_PC = 0x130, + IPU_DEVICE_ISP2600_STALL = 0x134 +}; + + +enum ipu_device_isp2600_memories { + IPU_DEVICE_ISP2600_REGS, + IPU_DEVICE_ISP2600_PMEM, + IPU_DEVICE_ISP2600_DMEM, + IPU_DEVICE_ISP2600_BAMEM, + IPU_DEVICE_ISP2600_VMEM, + IPU_DEVICE_ISP2600_NUM_MEMORIES +}; + +static const unsigned int +ipu_device_isp2600_mem_size[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + 0x00140, + 0x14000, + 0x04000, + 0x20000, + 0x20000 +}; + + +enum ipu_device_isp2600_masters { + IPU_DEVICE_ISP2600_ICACHE, + IPU_DEVICE_ISP2600_QMEM, + IPU_DEVICE_ISP2600_CMEM, + IPU_DEVICE_ISP2600_XMEM, + IPU_DEVICE_ISP2600_XVMEM, + IPU_DEVICE_ISP2600_NUM_MASTERS +}; + +static const struct ipu_device_cell_master_properties_s +ipu_device_isp2600_masters[IPU_DEVICE_ISP2600_NUM_MASTERS] = { + { + 0, + 0xC, + IPU_DEVICE_ISP2600_ICACHE_BASE, + IPU_DEVICE_ISP2600_ICACHE_INFO, + IPU_DEVICE_ISP2600_ICACHE_INFO_OVERRIDE + }, + { + 0, + 0xC, + IPU_DEVICE_ISP2600_QMEM_BASE, + 0xFFFFFFFF, + 0xFFFFFFFF + }, + { + 3, + 0xC, + IPU_DEVICE_ISP2600_CMEM_BASE, + 0xFFFFFFFF, + 0xFFFFFFFF + }, + { + 2, + 0xC, + IPU_DEVICE_ISP2600_XMEM_BASE, + IPU_DEVICE_ISP2600_XMEM_INFO, + IPU_DEVICE_ISP2600_XMEM_INFO_OVERRIDE + }, + { + 3, + 0xC, + IPU_DEVICE_ISP2600_XVMEM_BASE, + 0xFFFFFFFF, + 0xFFFFFFFF + } +}; + +enum ipu_device_isp2600_stall_bits { + IPU_DEVICE_ISP2600_STALL_ICACHE0, + IPU_DEVICE_ISP2600_STALL_ICACHE1, + IPU_DEVICE_ISP2600_STALL_DMEM, + IPU_DEVICE_ISP2600_STALL_QMEM, + IPU_DEVICE_ISP2600_STALL_CMEM, + IPU_DEVICE_ISP2600_STALL_XMEM, + IPU_DEVICE_ISP2600_STALL_BAMEM, + IPU_DEVICE_ISP2600_STALL_VMEM, + IPU_DEVICE_ISP2600_STALL_XVMEM, + IPU_DEVICE_ISP2600_NUM_STALL_BITS +}; + +#define IPU_DEVICE_ISP2600_ICACHE_WORD_SIZE 64 /* 512 bits per instruction */ +#define IPU_DEVICE_ISP2600_ICACHE_BURST_SIZE 8 /* 8 instructions per burst */ + +static const struct ipu_device_cell_count_s ipu_device_isp2600_count = { + IPU_DEVICE_ISP2600_NUM_MEMORIES, + IPU_DEVICE_ISP2600_NUM_MASTERS, + IPU_DEVICE_ISP2600_NUM_STALL_BITS, + IPU_DEVICE_ISP2600_ICACHE_WORD_SIZE * + IPU_DEVICE_ISP2600_ICACHE_BURST_SIZE +}; + +static const unsigned int ipu_device_isp2600_reg_offset[/* CELL_NUM_REGS */] = { + 0x0, 0x4, 0x10, 0x130, 0x134 +}; + +static const struct ipu_device_cell_type_properties_s +ipu_device_isp2600_properties = { + &ipu_device_isp2600_count, + ipu_device_isp2600_masters, + ipu_device_isp2600_reg_offset, + ipu_device_isp2600_mem_size +}; + +#endif /* __IPU_DEVICE_ISP2600_PROPERTIES_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/src/ipu_device_sp2600_control_properties_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/src/ipu_device_sp2600_control_properties_impl.h new file mode 100644 index 0000000000000..430295cd9d949 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/src/ipu_device_sp2600_control_properties_impl.h @@ -0,0 +1,136 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_SP2600_CONTROL_PROPERTIES_IMPL_H +#define __IPU_DEVICE_SP2600_CONTROL_PROPERTIES_IMPL_H + +/* sp2600_control definition */ + +#include "ipu_device_cell_properties_struct.h" + +enum ipu_device_sp2600_control_registers { + /* control registers */ + IPU_DEVICE_SP2600_CONTROL_STAT_CTRL = 0x0, + IPU_DEVICE_SP2600_CONTROL_START_PC = 0x4, + + /* master port registers */ + IPU_DEVICE_SP2600_CONTROL_ICACHE_BASE = 0x10, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO = 0x14, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO_OVERRIDE = 0x18, + + IPU_DEVICE_SP2600_CONTROL_QMEM_BASE = 0x1C, + + IPU_DEVICE_SP2600_CONTROL_CMEM_BASE = 0x28, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO = 0x2C, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO_OVERRIDE = 0x30, + + IPU_DEVICE_SP2600_CONTROL_XMEM_BASE = 0x58, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO = 0x5C, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO_OVERRIDE = 0x60, + + /* debug registers */ + IPU_DEVICE_SP2600_CONTROL_DEBUG_PC = 0x9C, + IPU_DEVICE_SP2600_CONTROL_STALL = 0xA0 +}; + +enum ipu_device_sp2600_control_mems { + IPU_DEVICE_SP2600_CONTROL_REGS, + IPU_DEVICE_SP2600_CONTROL_PMEM, + IPU_DEVICE_SP2600_CONTROL_DMEM, + IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES +}; + +static const unsigned int +ipu_device_sp2600_control_mem_size[IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES] = { + 0x000AC, + 0x00000, + 0x10000 +}; + +enum ipu_device_sp2600_control_masters { + IPU_DEVICE_SP2600_CONTROL_ICACHE, + IPU_DEVICE_SP2600_CONTROL_QMEM, + IPU_DEVICE_SP2600_CONTROL_CMEM, + IPU_DEVICE_SP2600_CONTROL_XMEM, + IPU_DEVICE_SP2600_CONTROL_NUM_MASTERS +}; + +static const struct ipu_device_cell_master_properties_s +ipu_device_sp2600_control_masters[IPU_DEVICE_SP2600_CONTROL_NUM_MASTERS] = { + { + 0, + 0xC, + IPU_DEVICE_SP2600_CONTROL_ICACHE_BASE, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO_OVERRIDE + }, + { + 0, + 0xC, + IPU_DEVICE_SP2600_CONTROL_QMEM_BASE, + 0xFFFFFFFF, + 0xFFFFFFFF + }, + { + 2, + 0xC, + IPU_DEVICE_SP2600_CONTROL_CMEM_BASE, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO_OVERRIDE + }, + { + 2, + 0xC, + IPU_DEVICE_SP2600_CONTROL_XMEM_BASE, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO_OVERRIDE + } +}; + +enum ipu_device_sp2600_control_stall_bits { + IPU_DEVICE_SP2600_CONTROL_STALL_ICACHE, + IPU_DEVICE_SP2600_CONTROL_STALL_DMEM, + IPU_DEVICE_SP2600_CONTROL_STALL_QMEM, + IPU_DEVICE_SP2600_CONTROL_STALL_CMEM, + IPU_DEVICE_SP2600_CONTROL_STALL_XMEM, + IPU_DEVICE_SP2600_CONTROL_NUM_STALL_BITS +}; + +/* 32 bits per instruction */ +#define IPU_DEVICE_SP2600_CONTROL_ICACHE_WORD_SIZE 4 +/* 32 instructions per burst */ +#define IPU_DEVICE_SP2600_CONTROL_ICACHE_BURST_SIZE 32 + +static const struct ipu_device_cell_count_s ipu_device_sp2600_control_count = { + IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES, + IPU_DEVICE_SP2600_CONTROL_NUM_MASTERS, + IPU_DEVICE_SP2600_CONTROL_NUM_STALL_BITS, + IPU_DEVICE_SP2600_CONTROL_ICACHE_WORD_SIZE * + IPU_DEVICE_SP2600_CONTROL_ICACHE_BURST_SIZE +}; + +static const unsigned int +ipu_device_sp2600_control_reg_offset[/* CELL_NUM_REGS */] = { + 0x0, 0x4, 0x10, 0x9C, 0xA0 +}; + +static const struct ipu_device_cell_type_properties_s +ipu_device_sp2600_control_properties = { + &ipu_device_sp2600_control_count, + ipu_device_sp2600_control_masters, + ipu_device_sp2600_control_reg_offset, + ipu_device_sp2600_control_mem_size +}; + +#endif /* __IPU_DEVICE_SP2600_CONTROL_PROPERTIES_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/src/ipu_device_sp2600_fp_properties_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/src/ipu_device_sp2600_fp_properties_impl.h new file mode 100644 index 0000000000000..b3f120f9fea86 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/src/ipu_device_sp2600_fp_properties_impl.h @@ -0,0 +1,140 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_SP2600_FP_PROPERTIES_IMPL_H +#define __IPU_DEVICE_SP2600_FP_PROPERTIES_IMPL_H + +/* sp2600_fp definition */ + +#include "ipu_device_cell_properties_struct.h" + +enum ipu_device_sp2600_fp_registers { + /* control registers */ + IPU_DEVICE_SP2600_FP_STAT_CTRL = 0x0, + IPU_DEVICE_SP2600_FP_START_PC = 0x4, + + /* master port registers */ + IPU_DEVICE_SP2600_FP_ICACHE_BASE = 0x10, + IPU_DEVICE_SP2600_FP_ICACHE_INFO = 0x14, + IPU_DEVICE_SP2600_FP_ICACHE_INFO_OVERRIDE = 0x18, + + IPU_DEVICE_SP2600_FP_QMEM_BASE = 0x1C, + + IPU_DEVICE_SP2600_FP_CMEM_BASE = 0x28, + IPU_DEVICE_SP2600_FP_CMEM_INFO = 0x2C, + IPU_DEVICE_SP2600_FP_CMEM_INFO_OVERRIDE = 0x30, + + IPU_DEVICE_SP2600_FP_XMEM_BASE = 0x88, + IPU_DEVICE_SP2600_FP_XMEM_INFO = 0x8C, + IPU_DEVICE_SP2600_FP_XMEM_INFO_OVERRIDE = 0x90, + + /* debug registers */ + IPU_DEVICE_SP2600_FP_DEBUG_PC = 0xCC, + IPU_DEVICE_SP2600_FP_STALL = 0xD0 +}; + + +enum ipu_device_sp2600_fp_memories { + IPU_DEVICE_SP2600_FP_REGS, + IPU_DEVICE_SP2600_FP_PMEM, + IPU_DEVICE_SP2600_FP_DMEM, + IPU_DEVICE_SP2600_FP_DMEM1, + IPU_DEVICE_SP2600_FP_NUM_MEMORIES +}; + +static const unsigned int +ipu_device_sp2600_fp_mem_size[IPU_DEVICE_SP2600_FP_NUM_MEMORIES] = { + 0x000DC, + 0x00000, + 0x10000, + 0x08000 +}; + +enum ipu_device_sp2600_fp_masters { + IPU_DEVICE_SP2600_FP_ICACHE, + IPU_DEVICE_SP2600_FP_QMEM, + IPU_DEVICE_SP2600_FP_CMEM, + IPU_DEVICE_SP2600_FP_XMEM, + IPU_DEVICE_SP2600_FP_NUM_MASTERS +}; + +static const struct ipu_device_cell_master_properties_s +ipu_device_sp2600_fp_masters[IPU_DEVICE_SP2600_FP_NUM_MASTERS] = { + { + 0, + 0xC, + IPU_DEVICE_SP2600_FP_ICACHE_BASE, + IPU_DEVICE_SP2600_FP_ICACHE_INFO, + IPU_DEVICE_SP2600_FP_ICACHE_INFO_OVERRIDE + }, + { + 0, + 0xC, + IPU_DEVICE_SP2600_FP_QMEM_BASE, + 0xFFFFFFFF, + 0xFFFFFFFF + }, + { + 3, + 0xC, + IPU_DEVICE_SP2600_FP_CMEM_BASE, + IPU_DEVICE_SP2600_FP_CMEM_INFO, + IPU_DEVICE_SP2600_FP_CMEM_INFO_OVERRIDE + }, + { + 2, + 0xC, + IPU_DEVICE_SP2600_FP_XMEM_BASE, + IPU_DEVICE_SP2600_FP_XMEM_INFO, + IPU_DEVICE_SP2600_FP_XMEM_INFO_OVERRIDE + } +}; + +enum ipu_device_sp2600_fp_stall_bits { + IPU_DEVICE_SP2600_FP_STALL_ICACHE, + IPU_DEVICE_SP2600_FP_STALL_DMEM, + IPU_DEVICE_SP2600_FP_STALL_QMEM, + IPU_DEVICE_SP2600_FP_STALL_CMEM, + IPU_DEVICE_SP2600_FP_STALL_XMEM, + IPU_DEVICE_SP2600_FP_STALL_DMEM1, + IPU_DEVICE_SP2600_FP_NUM_STALL_BITS +}; + +/* 32 bits per instruction */ +#define IPU_DEVICE_SP2600_FP_ICACHE_WORD_SIZE 4 +/* 32 instructions per burst */ +#define IPU_DEVICE_SP2600_FP_ICACHE_BURST_SIZE 32 + +static const struct ipu_device_cell_count_s ipu_device_sp2600_fp_count = { + IPU_DEVICE_SP2600_FP_NUM_MEMORIES, + IPU_DEVICE_SP2600_FP_NUM_MASTERS, + IPU_DEVICE_SP2600_FP_NUM_STALL_BITS, + IPU_DEVICE_SP2600_FP_ICACHE_WORD_SIZE * + IPU_DEVICE_SP2600_FP_ICACHE_BURST_SIZE +}; + +static const unsigned int +ipu_device_sp2600_fp_reg_offset[/* CELL_NUM_REGS */] = { + 0x0, 0x4, 0x10, 0x9C, 0xA0 +}; + +static const struct ipu_device_cell_type_properties_s +ipu_device_sp2600_fp_properties = { + &ipu_device_sp2600_fp_count, + ipu_device_sp2600_fp_masters, + ipu_device_sp2600_fp_reg_offset, + ipu_device_sp2600_fp_mem_size +}; + +#endif /* __IPU_DEVICE_SP2600_FP_PROPERTIES_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/src/ipu_device_sp2600_proxy_properties_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/src/ipu_device_sp2600_proxy_properties_impl.h new file mode 100644 index 0000000000000..6fdcd7faea9b8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/src/ipu_device_sp2600_proxy_properties_impl.h @@ -0,0 +1,138 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_SP2600_PROXY_PROPERTIES_IMPL_H +#define __IPU_DEVICE_SP2600_PROXY_PROPERTIES_IMPL_H + +/* sp2600_proxy definition */ + +#include "ipu_device_cell_properties_struct.h" + +enum ipu_device_sp2600_proxy_registers { + /* control registers */ + IPU_DEVICE_SP2600_PROXY_STAT_CTRL = 0x0, + IPU_DEVICE_SP2600_PROXY_START_PC = 0x4, + + /* THESE ADDRESSES NEED TO BE CHECKED !!!! */ + /* master port registers */ + IPU_DEVICE_SP2600_PROXY_ICACHE_BASE = 0x10, + IPU_DEVICE_SP2600_PROXY_ICACHE_INFO = 0x14, + IPU_DEVICE_SP2600_PROXY_ICACHE_INFO_OVERRIDE = 0x18, + + IPU_DEVICE_SP2600_PROXY_QMEM_BASE = 0x1C, + + IPU_DEVICE_SP2600_PROXY_CMEM_BASE = 0x28, + IPU_DEVICE_SP2600_PROXY_CMEM_INFO = 0x2C, + IPU_DEVICE_SP2600_PROXY_CMEM_INFO_OVERRIDE = 0x30, + + IPU_DEVICE_SP2600_PROXY_XMEM_BASE = 0x58, + IPU_DEVICE_SP2600_PROXY_XMEM_INFO = 0x5C, + IPU_DEVICE_SP2600_PROXY_XMEM_INFO_OVERRIDE = 0x60, + + /* debug registers */ + IPU_DEVICE_SP2600_PROXY_DEBUG_PC = 0x9C, + IPU_DEVICE_SP2600_PROXY_STALL = 0xA0 +}; + + +enum ipu_device_sp2600_proxy_memories { + IPU_DEVICE_SP2600_PROXY_REGS, + IPU_DEVICE_SP2600_PROXY_PMEM, + IPU_DEVICE_SP2600_PROXY_DMEM, + IPU_DEVICE_SP2600_PROXY_NUM_MEMORIES +}; + +static const unsigned int +ipu_device_sp2600_proxy_mem_size[IPU_DEVICE_SP2600_PROXY_NUM_MEMORIES] = { + 0x00AC, + 0x0000, + 0x4000 +}; + +enum ipu_device_sp2600_proxy_masters { + IPU_DEVICE_SP2600_PROXY_ICACHE, + IPU_DEVICE_SP2600_PROXY_QMEM, + IPU_DEVICE_SP2600_PROXY_CMEM, + IPU_DEVICE_SP2600_PROXY_XMEM, + IPU_DEVICE_SP2600_PROXY_NUM_MASTERS +}; + +static const struct ipu_device_cell_master_properties_s +ipu_device_sp2600_proxy_masters[IPU_DEVICE_SP2600_PROXY_NUM_MASTERS] = { + { + 0, + 0xC, + IPU_DEVICE_SP2600_PROXY_ICACHE_BASE, + IPU_DEVICE_SP2600_PROXY_ICACHE_INFO, + IPU_DEVICE_SP2600_PROXY_ICACHE_INFO_OVERRIDE + }, + { + 0, + 0xC, + IPU_DEVICE_SP2600_PROXY_QMEM_BASE, + 0xFFFFFFFF, + 0xFFFFFFFF + }, + { + 2, + 0xC, + IPU_DEVICE_SP2600_PROXY_CMEM_BASE, + IPU_DEVICE_SP2600_PROXY_CMEM_INFO, + IPU_DEVICE_SP2600_PROXY_CMEM_INFO_OVERRIDE + }, + { + 2, + 0xC, + IPU_DEVICE_SP2600_PROXY_XMEM_BASE, + IPU_DEVICE_SP2600_PROXY_XMEM_INFO, + IPU_DEVICE_SP2600_PROXY_XMEM_INFO_OVERRIDE + } +}; + +enum ipu_device_sp2600_proxy_stall_bits { + IPU_DEVICE_SP2600_PROXY_STALL_ICACHE, + IPU_DEVICE_SP2600_PROXY_STALL_DMEM, + IPU_DEVICE_SP2600_PROXY_STALL_QMEM, + IPU_DEVICE_SP2600_PROXY_STALL_CMEM, + IPU_DEVICE_SP2600_PROXY_STALL_XMEM, + IPU_DEVICE_SP2600_PROXY_NUM_STALL_BITS +}; + +/* 32 bits per instruction */ +#define IPU_DEVICE_SP2600_PROXY_ICACHE_WORD_SIZE 4 +/* 32 instructions per burst */ +#define IPU_DEVICE_SP2600_PROXY_ICACHE_BURST_SIZE 32 + +static const struct ipu_device_cell_count_s ipu_device_sp2600_proxy_count = { + IPU_DEVICE_SP2600_PROXY_NUM_MEMORIES, + IPU_DEVICE_SP2600_PROXY_NUM_MASTERS, + IPU_DEVICE_SP2600_PROXY_NUM_STALL_BITS, + IPU_DEVICE_SP2600_PROXY_ICACHE_WORD_SIZE * + IPU_DEVICE_SP2600_PROXY_ICACHE_BURST_SIZE +}; + +static const unsigned int +ipu_device_sp2600_proxy_reg_offset[/* CELL_NUM_REGS */] = { + 0x0, 0x4, 0x10, 0xCC, 0xD0 +}; + +static const struct ipu_device_cell_type_properties_s +ipu_device_sp2600_proxy_properties = { + &ipu_device_sp2600_proxy_count, + ipu_device_sp2600_proxy_masters, + ipu_device_sp2600_proxy_reg_offset, + ipu_device_sp2600_proxy_mem_size +}; + +#endif /* __IPU_DEVICE_SP2600_PROXY_PROPERTIES_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/cpu/fw_abi_cpu_types.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/cpu/fw_abi_cpu_types.mk new file mode 100644 index 0000000000000..b1ffbf7ea21ff --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/cpu/fw_abi_cpu_types.mk @@ -0,0 +1,24 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# + +# MODULE is FW ABI COMMON TYPES + +FW_ABI_COMMON_TYPES_DIRS = -I$${MODULES_DIR}/fw_abi_common_types +FW_ABI_COMMON_TYPES_DIRS += -I$${MODULES_DIR}/fw_abi_common_types/cpu + +FW_ABI_COMMON_TYPES_HOST_FILES = +FW_ABI_COMMON_TYPES_HOST_CPPFLAGS = $(FW_ABI_COMMON_TYPES_DIRS) + +FW_ABI_COMMON_TYPES_FW_FILES = +FW_ABI_COMMON_TYPES_FW_CPPFLAGS = $(FW_ABI_COMMON_TYPES_DIRS) diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/cpu/ia_css_terminal_base_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/cpu/ia_css_terminal_base_types.h new file mode 100644 index 0000000000000..21cc3f43f485e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/cpu/ia_css_terminal_base_types.h @@ -0,0 +1,42 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_BASE_TYPES_H +#define __IA_CSS_TERMINAL_BASE_TYPES_H + + +#include "type_support.h" +#include "ia_css_terminal_defs.h" + +#define N_UINT16_IN_TERMINAL_STRUCT 3 +#define N_PADDING_UINT8_IN_TERMINAL_STRUCT 5 + +#define SIZE_OF_TERMINAL_STRUCT_BITS \ + (IA_CSS_TERMINAL_TYPE_BITS \ + + IA_CSS_TERMINAL_ID_BITS \ + + N_UINT16_IN_TERMINAL_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_PADDING_UINT8_IN_TERMINAL_STRUCT * IA_CSS_UINT8_T_BITS) + +/* ==================== Base Terminal - START ==================== */ +struct ia_css_terminal_s { /**< Base terminal */ + ia_css_terminal_type_t terminal_type; /**< Type ia_css_terminal_type_t */ + int16_t parent_offset; /**< Offset to the process group */ + uint16_t size; /**< Size of this whole terminal layout-structure */ + uint16_t tm_index; /**< Index of the terminal manifest object */ + ia_css_terminal_ID_t ID; /**< Absolute referal ID for this terminal, valid ID's != 0 */ + uint8_t padding[N_PADDING_UINT8_IN_TERMINAL_STRUCT]; +}; +/* ==================== Base Terminal - END ==================== */ + +#endif /* __IA_CSS_TERMINAL_BASE_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/cpu/ia_css_terminal_manifest_base_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/cpu/ia_css_terminal_manifest_base_types.h new file mode 100644 index 0000000000000..056e1b6d5d4bd --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/cpu/ia_css_terminal_manifest_base_types.h @@ -0,0 +1,42 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_MANIFEST_BASE_TYPES_H +#define __IA_CSS_TERMINAL_MANIFEST_BASE_TYPES_H + +#include "ia_css_terminal_defs.h" + +#define N_PADDING_UINT8_IN_TERMINAL_MAN_STRUCT 5 +#define SIZE_OF_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + (IA_CSS_UINT16_T_BITS \ + + IA_CSS_TERMINAL_ID_BITS \ + + IA_CSS_TERMINAL_TYPE_BITS \ + + IA_CSS_UINT32_T_BITS \ + + (N_PADDING_UINT8_IN_TERMINAL_MAN_STRUCT*IA_CSS_UINT8_T_BITS)) + +/* ==================== Base Terminal Manifest - START ==================== */ +struct ia_css_terminal_manifest_s { + ia_css_terminal_type_t terminal_type; /**< Type ia_css_terminal_type_t */ + int16_t parent_offset; /**< Offset to the program group manifest */ + uint16_t size; /**< Size of this whole terminal-manifest layout-structure */ + ia_css_terminal_ID_t ID; + uint8_t padding[N_PADDING_UINT8_IN_TERMINAL_MAN_STRUCT]; +}; + +typedef struct ia_css_terminal_manifest_s + ia_css_terminal_manifest_t; + +/* ==================== Base Terminal Manifest - END ==================== */ + +#endif /* __IA_CSS_TERMINAL_MANIFEST_BASE_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/ia_css_base_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/ia_css_base_types.h new file mode 100644 index 0000000000000..3b80a17a6ad38 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/ia_css_base_types.h @@ -0,0 +1,38 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_BASE_TYPES_H +#define __IA_CSS_BASE_TYPES_H + +#include "type_support.h" + +#define VIED_VADDRESS_BITS 32 +typedef uint32_t vied_vaddress_t; + +#define DEVICE_DESCRIPTOR_ID_BITS 32 +typedef struct { + uint8_t device_id; + uint8_t instance_id; + uint8_t channel_id; + uint8_t section_id; +} device_descriptor_fields_t; + +typedef union { + device_descriptor_fields_t fields; + uint32_t data; +} device_descriptor_id_t; + +typedef uint16_t ia_css_process_id_t; + +#endif /* __IA_CSS_BASE_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/ia_css_terminal_defs.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/ia_css_terminal_defs.h new file mode 100644 index 0000000000000..dbf1cf93756ff --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/ia_css_terminal_defs.h @@ -0,0 +1,105 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_DEFS_H +#define __IA_CSS_TERMINAL_DEFS_H + + +#include "type_support.h" + +#define IA_CSS_TERMINAL_ID_BITS 8 +typedef uint8_t ia_css_terminal_ID_t; +#define IA_CSS_TERMINAL_INVALID_ID ((ia_css_terminal_ID_t)(-1)) + +/* + * Terminal Base Type + */ +typedef enum ia_css_terminal_type { + /**< Data input */ + IA_CSS_TERMINAL_TYPE_DATA_IN = 0, + /**< Data output */ + IA_CSS_TERMINAL_TYPE_DATA_OUT, + /**< Type 6 parameter input */ + IA_CSS_TERMINAL_TYPE_PARAM_STREAM, + /**< Type 1-5 parameter input */ + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN, + /**< Type 1-5 parameter output */ + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT, + /**< Represent the new type of terminal for the + * "spatial dependent parameters", when params go in + */ + IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN, + /**< Represent the new type of terminal for the + * "spatial dependent parameters", when params go out + */ + IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT, + /**< Represent the new type of terminal for the + * explicit slicing, when params go in + */ + IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN, + /**< Represent the new type of terminal for the + * explicit slicing, when params go out + */ + IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT, + /**< State (private data) input */ + IA_CSS_TERMINAL_TYPE_STATE_IN, + /**< State (private data) output */ + IA_CSS_TERMINAL_TYPE_STATE_OUT, + IA_CSS_TERMINAL_TYPE_PROGRAM, + IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT, + IA_CSS_N_TERMINAL_TYPES +} ia_css_terminal_type_t; + +#define IA_CSS_TERMINAL_TYPE_BITS 32 + +/* Temporary redirection needed to facilicate merging with the drivers + in a backwards compatible manner */ +#define IA_CSS_TERMINAL_TYPE_PARAM_CACHED IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN + +/* + * Dimensions of the data objects. Note that a C-style + * data order is assumed. Data stored by row. + */ +typedef enum ia_css_dimension { + /**< The number of columns, i.e. the size of the row */ + IA_CSS_COL_DIMENSION = 0, + /**< The number of rows, i.e. the size of the column */ + IA_CSS_ROW_DIMENSION = 1, + IA_CSS_N_DATA_DIMENSION = 2 +} ia_css_dimension_t; + +#define IA_CSS_N_COMMAND_COUNT (4) + +#ifndef PIPE_GENERATION +/* Don't include these complex enum structures in Genpipe, it can't handle and it does not need them */ +/* + * enum ia_css_isys_link_id. Lists the link IDs used by the FW for On The Fly feature + */ +typedef enum ia_css_isys_link_id { + IA_CSS_ISYS_LINK_OFFLINE = 0, + IA_CSS_ISYS_LINK_MAIN_OUTPUT = 1, + IA_CSS_ISYS_LINK_PDAF_OUTPUT = 2 +} ia_css_isys_link_id_t; +#define N_IA_CSS_ISYS_LINK_ID (IA_CSS_ISYS_LINK_PDAF_OUTPUT + 1) + +/* + * enum ia_css_data_barrier_link_id. Lists the link IDs used by the FW for data barrier feature + */ +typedef enum ia_css_data_barrier_link_id { + IA_CSS_DATA_BARRIER_LINK_MEMORY = N_IA_CSS_ISYS_LINK_ID, + N_IA_CSS_DATA_BARRIER_LINK_ID +} ia_css_data_barrier_link_id_t; + +#endif /* #ifndef PIPE_GENERATION */ +#endif /* __IA_CSS_TERMINAL_DEFS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/fw_load.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/fw_load.mk new file mode 100644 index 0000000000000..0af62100cba82 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/fw_load.mk @@ -0,0 +1,59 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is FW_LOAD + +# select implementation for fw_load +ifeq ($(FW_LOAD_DMA), 1) +FW_LOAD_IMPL = fwdma +else +FW_LOAD_IMPL = xmem +endif + +FW_LOAD_FW_CPPFLAGS = + +# select DMA instance for fw_load +ifeq ($(FW_LOAD_DMA_INSTANCE),) +$(error FW_LOAD_DMA_INSTANCE not specified) +else +ifeq ($(FW_LOAD_DMA_INSTANCE), NCI_DMA_EXT0) +FW_LOAD_FW_CPPFLAGS += -DFW_LOAD_INSTANCE_USE_DMA_EXT0 +else +ifeq ($(FW_LOAD_DMA_INSTANCE), NCI_DMA_FW) +FW_LOAD_FW_CPPFLAGS += -DFW_LOAD_INSTANCE_USE_DMA_FW +else +$(error FW_LOAD_DMA_INSTANCE $(FW_LOAD_DMA_INSTANCE) not supported) +endif +endif +endif + +FW_LOAD_DIR = $${MODULES_DIR}/fw_load +FW_LOAD_INTERFACE = $(FW_LOAD_DIR)/interface +FW_LOAD_SOURCES = $(FW_LOAD_DIR)/src/$(FW_LOAD_IMPL) + +# XMEM/FWDMA supports on SP side +FW_LOAD_FW_FILES = $(FW_LOAD_SOURCES)/ia_css_fw_load.c +FW_LOAD_FW_CPPFLAGS += -I$(FW_LOAD_INTERFACE) \ + -I$(FW_LOAD_SOURCES) \ + -I$(FW_LOAD_DIR)/src + +# Only XMEM supports on Host side +FW_LOAD_HOST_FILES = $(FW_LOAD_DIR)/src/xmem/ia_css_fw_load.c +FW_LOAD_HOST_CPPFLAGS = -I$(FW_LOAD_INTERFACE) \ + -I$(FW_LOAD_DIR)/src/xmem \ + -I$(FW_LOAD_DIR)/src + +ifdef FW_LOAD_NO_OF_REQUEST_OFFSET +FW_LOAD_FW_CPPFLAGS += -DFW_LOAD_NO_OF_REQUEST_ADDRESS=$(FW_LOAD_NO_OF_REQUEST_OFFSET) +endif # FW_LOAD_NO_OF_REQUEST_OFFSET diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/interface/ia_css_fw_load.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/interface/ia_css_fw_load.h new file mode 100644 index 0000000000000..d1f7926f39c60 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/interface/ia_css_fw_load.h @@ -0,0 +1,155 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_FW_LOAD_H +#define __IA_CSS_FW_LOAD_H + +#include "ia_css_fw_load_storage_class.h" +#include "ia_css_xmem.h" +#include "ia_css_cmem.h" + +enum ia_css_fw_load_mode { + IA_CSS_DBUS_ADDRESS = 0, + IA_CSS_CBUS_ADDRESS +}; + +/* Perform Initialization for fwload + Client must call init before it calls any other API + */ + +IA_CSS_FW_LOAD_STORAGE_CLASS_H void +ia_css_fw_load_init(void); + +/* This motifies the use what address has to be passed into the 'dst' parameter + * of the ia_css_fw_copy function and the ia_css_fw_zero function. + * When this function returns IA_CSS_DBUS_ADDRESS, the user must pass a data-bus + * address, when the function returns IA_CSS_CBUS_ADDRESS, the user must pass a + * control-bus address. + * XMEM implementation will require control-bus address while, + * DMA implementation will require data-bus addresses. +*/ +IA_CSS_FW_LOAD_STORAGE_CLASS_H unsigned int +ia_css_fw_load_get_mode(void); + +/***************** FW LOAD BLOCKING FUNCTIONS *******************************/ +/* NOTE : User cannot use blocking functions immidiate after calling any + * non-blocking request functions. User must finish all the load request before + * it calls any blocking function. + * e.g. Following is the invalid use case. + * - ia_css_fw_load_copy_begin (non-blocking) then without ending this request, + * it calls ia_css_fw_load_copy (blocking). Client should not do this. + * But before calling ia_css_fw_load_copy, it shouold finish all request by + * calling ia_css_fw_end(). + */ + +/* Perform a single data transfer from DDR/IMR (src) to local variable(dst). + All arguments are multiples of 4. + The function returns when the transfer has completed. + The function may block. + */ +IA_CSS_FW_LOAD_STORAGE_CLASS_H void +ia_css_fw_load( + unsigned int mmid, + ia_css_xmem_address_t src, + void *dst, + unsigned int size +); + +/* Perform a single data transfer from DDR/IMR (src) to the subsystem (dst). + All arguments are multiples of 4. + The function returns when the transfer has completed. + The function may block. + */ +IA_CSS_FW_LOAD_STORAGE_CLASS_H void +ia_css_fw_copy( + unsigned int mmid, + unsigned int ssid, + ia_css_xmem_address_t src, + ia_css_cmem_address_t dst, + unsigned int size +); + +/* Perform zeroing the memory in subsystem (dst) + The function returns when all transfers have completed. + The function may block. + */ +IA_CSS_FW_LOAD_STORAGE_CLASS_H void +ia_css_fw_zero( + unsigned int ssid, + ia_css_cmem_address_t dst, + unsigned int size); + +/***************** FW LOAD NON_BLOCKING FUNCTIONS ****************************/ + +/* Perform a single data transfer from DDR/IMR (src) to local variable(dst). + All arguments are multiples of 4. + The function returns when the transfer has completed. + The function will not block. + */ +IA_CSS_FW_LOAD_STORAGE_CLASS_H unsigned int +ia_css_fw_load_begin( + unsigned int mmid, + ia_css_xmem_address_t src, + void *dst, + unsigned int size +); + +/* START OF TRANSFER / SUBMIT */ +/* Start a single data transfer from DDR/IMR (src) to the subsystem (dst). + The function returns 1 when the transfer has been issued successfully. + When the transfer cannot be issued, the function returns 0. + The function will not block. + */ +IA_CSS_FW_LOAD_STORAGE_CLASS_H unsigned int +ia_css_fw_copy_begin( + unsigned int mmid, + unsigned int ssid, + ia_css_xmem_address_t src, + ia_css_cmem_address_t dst, + unsigned int size +); + +/* Perform zeroing the subsystem (dst) memory + This function will not block + */ +IA_CSS_FW_LOAD_STORAGE_CLASS_H unsigned int +ia_css_fw_zero_begin( + unsigned int ssid, + ia_css_cmem_address_t dst, + unsigned int size); + +/* END OF TRANSFER / ACKNOWLEDGES */ +/* Complete at most n transfers, + returns the number of transfers that could be completed + */ +IA_CSS_FW_LOAD_STORAGE_CLASS_H unsigned int +ia_css_fw_end(unsigned int n); + +/* OPTIONALLY USED FUNCTIONS */ +/* Return the number of transactions that may be submitted without blocking */ +IA_CSS_FW_LOAD_STORAGE_CLASS_H unsigned int +ia_css_fw_copy_begin_available(void); + +/* Return the number of transactions may be ended */ +IA_CSS_FW_LOAD_STORAGE_CLASS_H unsigned int +ia_css_fw_copy_end_available(void); + +#ifdef __INLINE_IA_CSS_FW_LOAD__ +#include "ia_css_fw_load_blocking_impl.h" +#include "ia_css_fw_load_non_blocking_impl.h" +#include "ia_css_fw_load_impl.h" +#endif + + +#endif /* __IA_CSS_FW_LOAD_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/interface/ia_css_fw_load_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/interface/ia_css_fw_load_storage_class.h new file mode 100644 index 0000000000000..10ad61f89ea91 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/interface/ia_css_fw_load_storage_class.h @@ -0,0 +1,28 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_FW_LOAD_STORAGE_CLASS_H +#define __IA_CSS_FW_LOAD_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __INLINE_IA_CSS_FW_LOAD__ +#define IA_CSS_FW_LOAD_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_FW_LOAD_STORAGE_CLASS_C +#else +#define IA_CSS_FW_LOAD_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_FW_LOAD_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_FW_LOAD_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load.c new file mode 100644 index 0000000000000..5930a6b1e8d21 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load.c @@ -0,0 +1,29 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/* C file with (optionally) inlined files */ + +/* Global variable for tracking the number of fw_load transactions */ +/* Needed in host side implementaion */ +#ifndef __VIED_CELL +unsigned int started; +#endif + +#ifdef __INLINE_IA_CSS_FW_LOAD__ +static inline int __avoid_warning_on_empty_file(void) { return 0; } +#else +#include "ia_css_fw_load_blocking_impl.h" +#include "ia_css_fw_load_non_blocking_impl.h" +#include "ia_css_fw_load_impl.h" +#endif /* __INLINE_IA_CSS_FW_LOAD__ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_blocking_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_blocking_impl.h new file mode 100644 index 0000000000000..02ad9c36156e0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_blocking_impl.h @@ -0,0 +1,54 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_FW_LOAD_BLOCKING_IMPL_H +#define __IA_CSS_FW_LOAD_BLOCKING_IMPL_H + +#include "ia_css_fw_load.h" +#include "ia_css_fw_load_storage_class.h" +#include "ia_css_xmem_cmem.h" +#include "ia_css_xmem.h" +#include "ia_css_cmem.h" + +IA_CSS_FW_LOAD_STORAGE_CLASS_C void +ia_css_fw_load( + unsigned int mmid, + ia_css_xmem_address_t src, + void *dst, + unsigned int size) +{ + ia_css_xmem_load(mmid, src, dst, size); +} + +IA_CSS_FW_LOAD_STORAGE_CLASS_C void +ia_css_fw_copy( + unsigned int mmid, + unsigned int ssid, + ia_css_xmem_address_t src, + ia_css_cmem_address_t dst, + unsigned int size) +{ + ia_css_xmem_to_cmem_copy(mmid, ssid, src, dst, size); +} + +IA_CSS_FW_LOAD_STORAGE_CLASS_C void +ia_css_fw_zero( + unsigned int ssid, + ia_css_cmem_address_t dst, + unsigned int size) +{ + ia_css_cmem_zero(ssid, dst, size); +} + +#endif /* __IA_CSS_FW_LOAD_BLOCKING_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_impl.h new file mode 100644 index 0000000000000..a9b6db8a5f55c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_impl.h @@ -0,0 +1,26 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_FW_LOAD_IMPL_H +#define __IA_CSS_FW_LOAD_IMPL_H + +#include "ia_css_fw_load.h" + +IA_CSS_FW_LOAD_STORAGE_CLASS_C unsigned int +ia_css_fw_load_get_mode(void) +{ + return IA_CSS_CBUS_ADDRESS; +} + +#endif /* __IA_CSS_FW_LOAD_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_non_blocking_host_state.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_non_blocking_host_state.h new file mode 100644 index 0000000000000..1691e4522f78b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_non_blocking_host_state.h @@ -0,0 +1,21 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_FW_LOAD_NON_BLOCKING_HOST_STATE_H +#define __IA_CSS_FW_LOAD_NON_BLOCKING_HOST_STATE_H +/* Global variable for tracking the number of fw_load transactions */ +/* Used in xmem non blocking host side implementaion */ +extern unsigned int started; + +#endif /* __IA_CSS_FW_LOAD_NON_BLOCKING_HOST_STATE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_non_blocking_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_non_blocking_impl.h new file mode 100644 index 0000000000000..c8949aa493700 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_non_blocking_impl.h @@ -0,0 +1,125 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_FW_LOAD_NON_BLOCKING_IMPL_H +#define __IA_CSS_FW_LOAD_NON_BLOCKING_IMPL_H + +#include "type_support.h" +#include "ia_css_fw_load.h" +#include "ia_css_fw_load_storage_class.h" +#include "math_support.h" +#include "error_support.h" + +#ifdef __VIED_CELL +#include "ia_css_fw_load_non_blocking_impl_sp.h" +#else +#include "ia_css_fw_load_non_blocking_impl_host.h" +#endif + +#define FW_LOAD_MAX_NB_TRANS UINT_MAX +#define FW_LOAD_XMEM_MAX_TRANSACTION_SUPPORT \ + umin(FW_LOAD_MAX_NB_TRANS, FW_LOAD_MAX_TRANS_SUPPORTED) + + +IA_CSS_FW_LOAD_STORAGE_CLASS_C void +ia_css_fw_load_init(void) +{ + fw_load_transaction_init(); +} + +/* START OF TRANSFER */ +IA_CSS_FW_LOAD_STORAGE_CLASS_C unsigned int +ia_css_fw_load_begin( + unsigned int mmid, + ia_css_xmem_address_t src, + void *dst, + unsigned int size +) +{ + if (!ia_css_fw_copy_begin_available()) + return 0; + ia_css_fw_load(mmid, src, dst, size); + fw_load_transaction_add(); + return size; +} + +IA_CSS_FW_LOAD_STORAGE_CLASS_C unsigned int +ia_css_fw_copy_begin( + unsigned int mmid, + unsigned int ssid, + ia_css_xmem_address_t src, + ia_css_cmem_address_t dst, + unsigned int size) +{ + /* Check if there is space to hold the ack event in the queue */ + if (!ia_css_fw_copy_begin_available()) + return 0; + ia_css_fw_copy(mmid, ssid, src, dst, size); + fw_load_transaction_add(); + return size; +} + + +IA_CSS_FW_LOAD_STORAGE_CLASS_C unsigned int +ia_css_fw_zero_begin( + unsigned int ssid, + ia_css_cmem_address_t dst, + unsigned int size) +{ + if (!ia_css_fw_copy_begin_available()) + return 0; /*quote exceeded*/ + + ia_css_fw_zero(ssid, dst, size); + fw_load_transaction_add(); + return size; +} + +/* END OF TRANSFER */ +IA_CSS_FW_LOAD_STORAGE_CLASS_C unsigned int +ia_css_fw_end(unsigned int n) +{ + int no_of_ack_received; + int fw_end_count; + int transaction_done; + bool success; + + no_of_ack_received = ia_css_fw_copy_end_available(); + fw_end_count = min(n, no_of_ack_received); + + transaction_done = 0; + + while (transaction_done < fw_end_count) { + success = fw_load_transaction_remove(); + assert(success == true); + transaction_done++; + } + return fw_end_count; +} + +/* OPTIONALLY USED */ +IA_CSS_FW_LOAD_STORAGE_CLASS_C unsigned int +ia_css_fw_copy_begin_available(void) +{ + return (FW_LOAD_XMEM_MAX_TRANSACTION_SUPPORT - + ia_css_fw_copy_end_available()); +} + +IA_CSS_FW_LOAD_STORAGE_CLASS_C unsigned int +ia_css_fw_copy_end_available(void) +{ + /* check how many transactions are ready to be ended */ + return fw_load_transaction_get_finished(); +} + +#endif /* __IA_CSS_FW_LOAD_NON_BLOCKING_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_non_blocking_impl_host.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_non_blocking_impl_host.h new file mode 100644 index 0000000000000..25a05cce25768 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_non_blocking_impl_host.h @@ -0,0 +1,45 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_FW_LOAD_NON_BLOCKING_IMPL_HOST_H +#define __IA_CSS_FW_LOAD_NON_BLOCKING_IMPL_HOST_H + +#include "storage_class.h" +#include "type_support.h" +#include "ia_css_fw_load_non_blocking_host_state.h" + +#define FW_LOAD_MAX_TRANS_SUPPORTED UINT_MAX + +STORAGE_CLASS_INLINE void fw_load_transaction_init(void) +{ + started = 0; +} + +STORAGE_CLASS_INLINE bool fw_load_transaction_add(void) +{ + started++; + return true; +} + +STORAGE_CLASS_INLINE bool fw_load_transaction_remove(void) +{ + started--; + return true; +} + +STORAGE_CLASS_INLINE unsigned int fw_load_transaction_get_finished(void) +{ + return started; +} +#endif /* __IA_CSS_FW_LOAD_NON_BLOCKING_IMPL_HOST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir.h new file mode 100644 index 0000000000000..a284d74bb4a67 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir.h @@ -0,0 +1,99 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_H +#define __IA_CSS_PKG_DIR_H + +#include "ia_css_pkg_dir_storage_class.h" +#include "ia_css_pkg_dir_types.h" +#include "type_support.h" + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +const ia_css_pkg_dir_entry_t *ia_css_pkg_dir_get_entry( + const ia_css_pkg_dir_t *pkg_dir, + uint32_t index +); + +/* User is expected to call the verify function manually, + * other functions do not call it internally + */ +IA_CSS_PKG_DIR_STORAGE_CLASS_H +int ia_css_pkg_dir_verify_header( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_get_num_entries( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_get_size_in_bytes( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +enum ia_css_pkg_dir_version ia_css_pkg_dir_get_version( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint16_t ia_css_pkg_dir_set_version( + ia_css_pkg_dir_entry_t *pkg_dir_header, + enum ia_css_pkg_dir_version version +); + + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_entry_get_address_lo( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_entry_get_address_hi( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_entry_get_size( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint16_t ia_css_pkg_dir_entry_get_version( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint8_t ia_css_pkg_dir_entry_get_type( + const ia_css_pkg_dir_entry_t *entry +); + +/* Get the address of the specified entry in the PKG_DIR + * Note: This function expects the complete PKG_DIR in the same memory space + * and the entries contains offsets and not addresses. + */ +IA_CSS_PKG_DIR_STORAGE_CLASS_H +void *ia_css_pkg_dir_get_entry_address( + const ia_css_pkg_dir_t *pkg_dir, + uint32_t index +); + +#ifdef __IA_CSS_PKG_DIR_INLINE__ + +#include "ia_css_pkg_dir_impl.h" + +#endif + +#endif /* __IA_CSS_PKG_DIR_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_iunit.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_iunit.h new file mode 100644 index 0000000000000..ad194b0389eb7 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_iunit.h @@ -0,0 +1,46 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_IUNIT_H +#define __IA_CSS_PKG_DIR_IUNIT_H + +/* In bootflow, pkg_dir only supports upto 16 entries in pkg_dir + * pkg_dir_header + Psys_server pg + Isys_server pg + 13 Client pg + */ + +enum { + IA_CSS_PKG_DIR_SIZE = 16, + IA_CSS_PKG_DIR_ENTRIES = IA_CSS_PKG_DIR_SIZE - 1 +}; + +#define IUNIT_MAX_CLIENT_PKG_ENTRIES 13 + +/* Example assignment of unique identifiers for the FW components + * This should match the identifiers in the manifest + */ +enum ia_css_pkg_dir_entry_type { + IA_CSS_PKG_DIR_HEADER = 0, + IA_CSS_PKG_DIR_PSYS_SERVER_PG, + IA_CSS_PKG_DIR_ISYS_SERVER_PG, + IA_CSS_PKG_DIR_CLIENT_PG +}; + +/* Fixed entries in the package directory */ +enum ia_css_pkg_dir_index { + IA_CSS_PKG_DIR_PSYS_INDEX = 0, + IA_CSS_PKG_DIR_ISYS_INDEX = 1, + IA_CSS_PKG_DIR_CLIENT_0 = 2 +}; + +#endif /* __IA_CSS_PKG_DIR_IUNIT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_storage_class.h new file mode 100644 index 0000000000000..cb64172151f92 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_storage_class.h @@ -0,0 +1,29 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_STORAGE_CLASS_H +#define __IA_CSS_PKG_DIR_STORAGE_CLASS_H + + +#include "storage_class.h" + +#ifndef __IA_CSS_PKG_DIR_INLINE__ +#define IA_CSS_PKG_DIR_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_PKG_DIR_STORAGE_CLASS_C +#else +#define IA_CSS_PKG_DIR_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_PKG_DIR_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_PKG_DIR_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_types.h new file mode 100644 index 0000000000000..b024b3da2f9e6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_types.h @@ -0,0 +1,41 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_TYPES_H +#define __IA_CSS_PKG_DIR_TYPES_H + +#include "type_support.h" + +struct ia_css_pkg_dir_entry { + uint32_t address[2]; + uint32_t size; + uint16_t version; + uint8_t type; + uint8_t unused; +}; + +typedef void ia_css_pkg_dir_t; +typedef struct ia_css_pkg_dir_entry ia_css_pkg_dir_entry_t; + +/* The version field of the pkg_dir header defines + * if entries contain offsets or pointers + */ +/* This is temporary, until all pkg_dirs use pointers */ +enum ia_css_pkg_dir_version { + IA_CSS_PKG_DIR_POINTER, + IA_CSS_PKG_DIR_OFFSET +}; + + +#endif /* __IA_CSS_PKG_DIR_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/pkg_dir.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/pkg_dir.mk new file mode 100644 index 0000000000000..32c8a68f3653c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/pkg_dir.mk @@ -0,0 +1,29 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is PKG DIR + +PKG_DIR_DIR = $${MODULES_DIR}/pkg_dir +PKG_DIR_INTERFACE = $(PKG_DIR_DIR)/interface +PKG_DIR_SOURCES = $(PKG_DIR_DIR)/src + +PKG_DIR_FILES = $(PKG_DIR_DIR)/src/ia_css_pkg_dir.c +PKG_DIR_CPPFLAGS = -I$(PKG_DIR_INTERFACE) +PKG_DIR_CPPFLAGS += -I$(PKG_DIR_SOURCES) +PKG_DIR_CPPFLAGS += -I$${MODULES_DIR}/../isp/kernels/io_ls/common +PKG_DIR_CPPFLAGS += -I$${MODULES_DIR}/fw_abi_common_types/ipu +PKG_DIR_CPPFLAGS += -I$${MODULES_DIR}/fw_abi_common_types/ipu/$(FW_ABI_IPU_TYPES_VERSION) + +PKG_DIR_CREATE_FILES = $(PKG_DIR_DIR)/src/ia_css_pkg_dir_create.c +PKG_DIR_UPDATE_FILES = $(PKG_DIR_DIR)/src/ia_css_pkg_dir_update.c diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir.c new file mode 100644 index 0000000000000..348b56833e060 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir.c @@ -0,0 +1,27 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifdef __IA_CSS_PKG_DIR_INLINE__ + +#include "storage_class.h" + +STORAGE_CLASS_INLINE int __ia_css_pkg_dir_avoid_warning_on_empty_file(void) +{ + return 0; +} + +#else +#include "ia_css_pkg_dir_impl.h" + +#endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir_impl.h new file mode 100644 index 0000000000000..d5067d21398f9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir_impl.h @@ -0,0 +1,201 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_IMPL_H +#define __IA_CSS_PKG_DIR_IMPL_H + +#include "ia_css_pkg_dir.h" +#include "ia_css_pkg_dir_int.h" +#include "error_support.h" +#include "type_support.h" +#include "assert_support.h" + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +const ia_css_pkg_dir_entry_t *ia_css_pkg_dir_get_entry( + const ia_css_pkg_dir_t *pkg_dir, + uint32_t index) +{ + DECLARE_ERRVAL + struct ia_css_pkg_dir_entry *pkg_dir_header = NULL; + + verifexitval(pkg_dir != NULL, EFAULT); + + pkg_dir_header = (struct ia_css_pkg_dir_entry *)pkg_dir; + + /* First entry of the structure is the header, skip that */ + index++; + verifexitval(index < pkg_dir_header->size, EFAULT); + +EXIT: + if (haserror(EFAULT)) { + return NULL; + } + return &(pkg_dir_header[index]); +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +int ia_css_pkg_dir_verify_header(const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + DECLARE_ERRVAL + verifexitval(pkg_dir_header != NULL, EFAULT); + +EXIT: + if (haserror(EFAULT)) { + return -1; + } + return ((pkg_dir_header->address[0] == PKG_DIR_MAGIC_VAL_0) + && (pkg_dir_header->address[1] == PKG_DIR_MAGIC_VAL_1)) ? + 0 : -1; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_get_num_entries( + const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + DECLARE_ERRVAL + uint32_t size = 0; + + verifexitval(pkg_dir_header != NULL, EFAULT); + size = pkg_dir_header->size; + verifexitval(size > 0, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return size - 1; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +enum ia_css_pkg_dir_version +ia_css_pkg_dir_get_version(const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + assert(pkg_dir_header != NULL); + return pkg_dir_header->version; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint16_t ia_css_pkg_dir_set_version(ia_css_pkg_dir_entry_t *pkg_dir_header, + enum ia_css_pkg_dir_version version) +{ + DECLARE_ERRVAL + + verifexitval(pkg_dir_header != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 1; + } + pkg_dir_header->version = version; + return 0; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_get_size_in_bytes( + const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + DECLARE_ERRVAL + + verifexitval(pkg_dir_header != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return sizeof(struct ia_css_pkg_dir_entry) * pkg_dir_header->size; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_entry_get_address_lo( + const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->address[0]; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_entry_get_address_hi( + const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->address[1]; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_entry_get_size(const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->size; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint16_t ia_css_pkg_dir_entry_get_version(const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->version; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint8_t ia_css_pkg_dir_entry_get_type(const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->type; +} + + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +void *ia_css_pkg_dir_get_entry_address(const ia_css_pkg_dir_t *pkg_dir, + uint32_t index) +{ + void *entry_blob = NULL; + const ia_css_pkg_dir_entry_t *pkg_dir_entry = + ia_css_pkg_dir_get_entry(pkg_dir, index-1); + + if ((pkg_dir_entry != NULL) && + (ia_css_pkg_dir_entry_get_size(pkg_dir_entry) > 0)) { + assert(ia_css_pkg_dir_entry_get_address_hi(pkg_dir_entry) == 0); + entry_blob = (void *)((char *)pkg_dir + + ia_css_pkg_dir_entry_get_address_lo(pkg_dir_entry)); + } + return entry_blob; +} + +#endif /* __IA_CSS_PKG_DIR_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir_int.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir_int.h new file mode 100644 index 0000000000000..203505fbee54e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir_int.h @@ -0,0 +1,49 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_INT_H +#define __IA_CSS_PKG_DIR_INT_H + +/* + * Package Dir structure as specified in CSE FAS + * + * PKG DIR Header + * Qword 63:56 55 54:48 47:32 31:24 23:0 + * 0 "_IUPKDR_" + * 1 Rsvd Rsvd Type Version Rsvd Size + * + * Version: Version of the Structure + * Size: Size of the entire table (including header) in 16 byte chunks + * Type: Must be 0 for header + * + * Figure 13: PKG DIR Header + * + * + * PKG DIR Entry + * Qword 63:56 55 54:48 47:32 31:24 23:0 + * N Address/Offset + * N+1 Rsvd Rsvd Type Version Rsvd Size + * + * Version: Version # of the Component + * Size: Size of the component in bytes + * Type: Component Identifier + */ + +#define PKG_DIR_SIZE_BITS 24 +#define PKG_DIR_TYPE_BITS 7 + +#define PKG_DIR_MAGIC_VAL_1 (('_' << 24) | ('I' << 16) | ('U' << 8) | 'P') +#define PKG_DIR_MAGIC_VAL_0 (('K' << 24) | ('D' << 16) | ('R' << 8) | '_') + +#endif /* __IA_CSS_PKG_DIR_INT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/port_env_struct.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/port_env_struct.h new file mode 100644 index 0000000000000..4d39a4739a8b0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/port_env_struct.h @@ -0,0 +1,24 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PORT_ENV_STRUCT_H +#define __PORT_ENV_STRUCT_H + +struct port_env { + unsigned int mmid; + unsigned int ssid; + unsigned int mem_addr; +}; + +#endif /* __PORT_ENV_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/queue.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/queue.h new file mode 100644 index 0000000000000..b233ab3baf014 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/queue.h @@ -0,0 +1,40 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __QUEUE_H +#define __QUEUE_H + +#include "queue_struct.h" +#include "port_env_struct.h" + +/* + * SYS queues are created by the host + * SYS queues cannot be accessed through the queue interface + * To send data into a queue a send_port must be opened. + * To receive data from a queue, a recv_port must be opened. + */ + +/* return required buffer size for queue */ +unsigned int +sys_queue_buf_size(unsigned int size, unsigned int token_size); + +/* + * initialize a queue that can hold at least 'size' tokens of + * 'token_size' bytes. + */ +void +sys_queue_init(struct sys_queue *q, unsigned int size, + unsigned int token_size, struct sys_queue_res *res); + +#endif /* __QUEUE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/queue_struct.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/queue_struct.h new file mode 100644 index 0000000000000..ef48fcfded2b6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/queue_struct.h @@ -0,0 +1,47 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __QUEUE_STRUCT_H +#define __QUEUE_STRUCT_H + +/* queue description, shared between sender and receiver */ + +#include "type_support.h" + +#ifdef __VIED_CELL +typedef struct {uint32_t v[2]; } host_buffer_address_t; +#else +typedef uint64_t host_buffer_address_t; +#endif + +typedef uint32_t vied_buffer_address_t; + + +struct sys_queue { + host_buffer_address_t host_address; + vied_buffer_address_t vied_address; + unsigned int size; + unsigned int token_size; + unsigned int wr_reg; /* reg no in subsystem's regmem */ + unsigned int rd_reg; + unsigned int _align; +}; + +struct sys_queue_res { + host_buffer_address_t host_address; + vied_buffer_address_t vied_address; + unsigned int reg; +}; + +#endif /* __QUEUE_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/recv_port.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/recv_port.h new file mode 100644 index 0000000000000..cce253b266687 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/recv_port.h @@ -0,0 +1,34 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __RECV_PORT_H +#define __RECV_PORT_H + + +struct recv_port; +struct sys_queue; +struct port_env; + +void +recv_port_open(struct recv_port *p, const struct sys_queue *q, + const struct port_env *env); + +unsigned int +recv_port_available(const struct recv_port *p); + +unsigned int +recv_port_transfer(const struct recv_port *p, void *data); + + +#endif /* __RECV_PORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/recv_port_struct.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/recv_port_struct.h new file mode 100644 index 0000000000000..52ec563b13cf5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/recv_port_struct.h @@ -0,0 +1,32 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __RECV_PORT_STRUCT_H +#define __RECV_PORT_STRUCT_H + +#include "buffer_type.h" + +struct recv_port { + buffer_address buffer; /* address of buffer in DDR */ + unsigned int size; + unsigned int token_size; + unsigned int wr_reg; /* index of write pointer located in regmem */ + unsigned int rd_reg; /* index read pointer located in regmem */ + + unsigned int mmid; + unsigned int ssid; + unsigned int mem_addr; /* address of memory containing regmem */ +}; + +#endif /* __RECV_PORT_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/send_port.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/send_port.h new file mode 100644 index 0000000000000..04a160f3f0199 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/send_port.h @@ -0,0 +1,52 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __SEND_PORT_H +#define __SEND_PORT_H + + +/* + * A send port can be used to send tokens into a queue. + * The interface can be used on any type of processor (host, SP, ...) + */ + +struct send_port; +struct sys_queue; +struct port_env; + +/* + * Open a send port on a queue. After the port is opened, tokens can be sent + */ +void +send_port_open(struct send_port *p, const struct sys_queue *q, + const struct port_env *env); + +/* + * Determine how many tokens can be sent + */ +unsigned int +send_port_available(const struct send_port *p); + +/* + * Send a token via a send port. The function returns the number of + * tokens that have been sent: + * 1: the token was accepted + * 0: the token was not accepted (full queue) + * The size of a token is determined at initialization. + */ +unsigned int +send_port_transfer(const struct send_port *p, const void *data); + + +#endif /* __SEND_PORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/send_port_struct.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/send_port_struct.h new file mode 100644 index 0000000000000..f834c62bc3db6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/send_port_struct.h @@ -0,0 +1,32 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __SEND_PORT_STRUCT_H +#define __SEND_PORT_STRUCT_H + +#include "buffer_type.h" + +struct send_port { + buffer_address buffer; + unsigned int size; + unsigned int token_size; + unsigned int wr_reg; /* index of write pointer in regmem */ + unsigned int rd_reg; /* index of read pointer in regmem */ + + unsigned int mmid; + unsigned int ssid; + unsigned int mem_addr; +}; + +#endif /* __SEND_PORT_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/port.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/port.mk new file mode 100644 index 0000000000000..b3801247802e9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/port.mk @@ -0,0 +1,31 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is PORT + +PORT_DIR=$${MODULES_DIR}/port + +PORT_INTERFACE=$(PORT_DIR)/interface +PORT_SOURCES1=$(PORT_DIR)/src + +PORT_HOST_FILES += $(PORT_SOURCES1)/send_port.c +PORT_HOST_FILES += $(PORT_SOURCES1)/recv_port.c +PORT_HOST_FILES += $(PORT_SOURCES1)/queue.c + +PORT_HOST_CPPFLAGS += -I$(PORT_INTERFACE) + +PORT_FW_FILES += $(PORT_SOURCES1)/send_port.c +PORT_FW_FILES += $(PORT_SOURCES1)/recv_port.c + +PORT_FW_CPPFLAGS += -I$(PORT_INTERFACE) diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/src/queue.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/src/queue.c new file mode 100644 index 0000000000000..eeec99dfe2d0d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/src/queue.c @@ -0,0 +1,47 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "queue.h" + +#include "regmem_access.h" +#include "port_env_struct.h" + +unsigned int sys_queue_buf_size(unsigned int size, unsigned int token_size) +{ + return (size + 1) * token_size; +} + +void +sys_queue_init(struct sys_queue *q, unsigned int size, unsigned int token_size, + struct sys_queue_res *res) +{ + unsigned int buf_size; + + q->size = size + 1; + q->token_size = token_size; + buf_size = sys_queue_buf_size(size, token_size); + + /* acquire the shared buffer space */ + q->host_address = res->host_address; + res->host_address += buf_size; + q->vied_address = res->vied_address; + res->vied_address += buf_size; + + /* acquire the shared read and writer pointers */ + q->wr_reg = res->reg; + res->reg++; + q->rd_reg = res->reg; + res->reg++; + +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/src/recv_port.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/src/recv_port.c new file mode 100644 index 0000000000000..31b36e9ceafbb --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/src/recv_port.c @@ -0,0 +1,95 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "recv_port.h" +#include "port_env_struct.h" /* for port_env */ +#include "queue_struct.h" /* for sys_queue */ +#include "recv_port_struct.h" /* for recv_port */ +#include "buffer_access.h" /* for buffer_load, buffer_address */ +#include "regmem_access.h" /* for regmem_load_32, regmem_store_32 */ +#include "storage_class.h" /* for STORAGE_CLASS_INLINE */ +#include "math_support.h" /* for OP_std_modadd */ +#include "type_support.h" /* for HOST_ADDRESS */ + +#ifndef __VIED_CELL +#include "cpu_mem_support.h" /* for ia_css_cpu_mem_cache_invalidate */ +#endif + +void +recv_port_open(struct recv_port *p, const struct sys_queue *q, + const struct port_env *env) +{ + p->mmid = env->mmid; + p->ssid = env->ssid; + p->mem_addr = env->mem_addr; + + p->size = q->size; + p->token_size = q->token_size; + p->wr_reg = q->wr_reg; + p->rd_reg = q->rd_reg; + +#ifdef __VIED_CELL + p->buffer = q->vied_address; +#else + p->buffer = q->host_address; +#endif +} + +STORAGE_CLASS_INLINE unsigned int +recv_port_index(const struct recv_port *p, unsigned int i) +{ + unsigned int rd = regmem_load_32(p->mem_addr, p->rd_reg, p->ssid); + + return OP_std_modadd(rd, i, p->size); +} + +unsigned int +recv_port_available(const struct recv_port *p) +{ + int wr = (int)regmem_load_32(p->mem_addr, p->wr_reg, p->ssid); + int rd = (int)regmem_load_32(p->mem_addr, p->rd_reg, p->ssid); + + return OP_std_modadd(wr, -rd, p->size); +} + +STORAGE_CLASS_INLINE void +recv_port_copy(const struct recv_port *p, unsigned int i, void *data) +{ + unsigned int rd = recv_port_index(p, i); + unsigned int token_size = p->token_size; + buffer_address addr = p->buffer + (rd * token_size); +#ifndef __VIED_CELL + ia_css_cpu_mem_cache_invalidate((void *)HOST_ADDRESS(p->buffer), + token_size*p->size); +#endif + buffer_load(addr, data, token_size, p->mmid); +} + +STORAGE_CLASS_INLINE void +recv_port_release(const struct recv_port *p, unsigned int i) +{ + unsigned int rd = recv_port_index(p, i); + + regmem_store_32(p->mem_addr, p->rd_reg, rd, p->ssid); +} + +unsigned int +recv_port_transfer(const struct recv_port *p, void *data) +{ + if (!recv_port_available(p)) + return 0; + recv_port_copy(p, 0, data); + recv_port_release(p, 1); + return 1; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/src/send_port.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/src/send_port.c new file mode 100644 index 0000000000000..8d1fba08c5d58 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/src/send_port.c @@ -0,0 +1,94 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "send_port.h" +#include "queue_struct.h" /* for sys_queue */ +#include "send_port_struct.h" /* for send_port */ +#include "port_env_struct.h" /* for port_env */ +#include "regmem_access.h" /* for regmem_load_32, regmem_store_32 */ +#include "buffer_access.h" /* for buffer_store, buffer_address */ +#include "storage_class.h" /* for STORAGE_CLASS_INLINE */ +#include "math_support.h" /* for OP_std_modadd */ +#include "type_support.h" /* for HOST_ADDRESS */ + +#ifndef __VIED_CELL +#include "cpu_mem_support.h" /* for ia_css_cpu_mem_cache_flush */ +#endif + +void +send_port_open(struct send_port *p, const struct sys_queue *q, + const struct port_env *env) +{ + p->mmid = env->mmid; + p->ssid = env->ssid; + p->mem_addr = env->mem_addr; + + p->size = q->size; + p->token_size = q->token_size; + p->wr_reg = q->wr_reg; + p->rd_reg = q->rd_reg; +#ifdef __VIED_CELL + p->buffer = q->vied_address; +#else + p->buffer = q->host_address; +#endif +} + +STORAGE_CLASS_INLINE unsigned int +send_port_index(const struct send_port *p, unsigned int i) +{ + unsigned int wr = regmem_load_32(p->mem_addr, p->wr_reg, p->ssid); + + return OP_std_modadd(wr, i, p->size); +} + +unsigned int +send_port_available(const struct send_port *p) +{ + int rd = (int)regmem_load_32(p->mem_addr, p->rd_reg, p->ssid); + int wr = (int)regmem_load_32(p->mem_addr, p->wr_reg, p->ssid); + + return OP_std_modadd(rd, -(wr+1), p->size); +} + +STORAGE_CLASS_INLINE void +send_port_copy(const struct send_port *p, unsigned int i, const void *data) +{ + unsigned int wr = send_port_index(p, i); + unsigned int token_size = p->token_size; + buffer_address addr = p->buffer + (wr * token_size); + + buffer_store(addr, data, token_size, p->mmid); +#ifndef __VIED_CELL + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(addr), token_size); +#endif +} + +STORAGE_CLASS_INLINE void +send_port_release(const struct send_port *p, unsigned int i) +{ + unsigned int wr = send_port_index(p, i); + + regmem_store_32(p->mem_addr, p->wr_reg, wr, p->ssid); +} + +unsigned int +send_port_transfer(const struct send_port *p, const void *data) +{ + if (!send_port_available(p)) + return 0; + send_port_copy(p, 0, data); + send_port_release(p, 1); + return 1; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_infobits/interface/psys_infobits.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_infobits/interface/psys_infobits.h new file mode 100644 index 0000000000000..11029a1805313 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_infobits/interface/psys_infobits.h @@ -0,0 +1,20 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PSYS_INFOBITS_H +#define __PSYS_INFOBITS_H + +void ia_css_psys_set_master_port_regs(unsigned int ssid); + +#endif /* __PSYS_INFOBITS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_infobits/psys_infobits.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_infobits/psys_infobits.mk new file mode 100644 index 0000000000000..c6641e293fe64 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_infobits/psys_infobits.mk @@ -0,0 +1,29 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# PSYS_INFOBITS +# + +PSYS_INFOBITS_DIR = $${MODULES_DIR}/psys_infobits + +PSYS_INFOBITS_INTERFACE = $(PSYS_INFOBITS_DIR)/interface +PSYS_INFOBITS_SOURCES = $(PSYS_INFOBITS_DIR)/src + +PSYS_INFOBITS_CPPFLAGS := \ + -I$(PSYS_INFOBITS_INTERFACE) + +PSYS_INFOBITS_HOST_FILES = \ + $(PSYS_INFOBITS_SOURCES)/psys_infobits.c + +PSYS_INFOBITS_FW_FILES = $(PSYS_INFOBITS_HOST_FILES) diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_infobits/src/psys_infobits.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_infobits/src/psys_infobits.c new file mode 100644 index 0000000000000..5c43583f6193e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_infobits/src/psys_infobits.c @@ -0,0 +1,107 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "psys_infobits.h" + +#include "assert_support.h" +#include "ia_css_cell.h" +#include "ipu_device_cell_properties.h" +#include "ipu_device_cell_properties_impl.h" +#include "ipu_device_buttress_properties_struct.h" + +/* +** According to BXT CSS HAS PS the info bits as expected by buttress are +** Field---------Description---------------------Encoding---------------| + | 0 | CIOM0: Snoopable | 0 - non snoopable | + | | | 1 - snoopable | + ----------------------------------------------------------------------| + | 1 | CIOM0: VC0_RS_for_IMR | Deadline | + | | CIOM1: VC1_deadline_pointer | 0 - regular deadline | + | | | 1 - urgent deadline | + ----------------------------------------------------------------------| + | 2 | Deadline pointer reserved | | + ----------------------------------------------------------------------| + | 3 | CIOM1: Zero-length write (ZLW)| 0 - NOP | + | | | 1 - Convert transaction as ZLW + ----------------------------------------------------------------------| + | 5:4 | CIOM0: Request destination | Destination | + | | CIOM1: Stream_ID[1:0] | 00 - Buttress registers| + | | | 01 - Primary | + | | | 10 - Reserved | + | | | 11 - Input system | + ----------------------------------------------------------------------| + | 7:6 | CIOM1: Stream_ID[3:2] | For data prefetch | + ----------------------------------------------------------------------| + | 8 | CIOM1: Address swizzeling | | + ----------------------------------------------------------------------| + + ** As PSYS devices use MO port and the request destination is DDR + ** then bit 4 (Request destination) should be 1 (Primary), thus 0x10 +*/ + + +void ia_css_psys_set_master_port_regs(unsigned int ssid) +{ + /* set primary destination(DDR) */ + unsigned int info_bits = IA_CSS_INFO_BITS_M0_DDR; + enum ipu_device_psys_cell_id cell_id; + + COMPILATION_ERROR_IF(0 != SPC0); + + /* Configure SPC */ + cell_id = SPC0; + ia_css_cell_set_master_info_bits(ssid, cell_id, + IPU_DEVICE_SP2600_CONTROL_ICACHE, info_bits); + ia_css_cell_set_master_info_bits(ssid, cell_id, + IPU_DEVICE_SP2600_CONTROL_XMEM, info_bits); + ia_css_cell_set_master_base_address(ssid, cell_id, + IPU_DEVICE_SP2600_CONTROL_XMEM, 0); + +#if defined(HAS_SPP0) + /* Configure SPP0 proxy */ + cell_id = SPP0; + ia_css_cell_set_master_info_bits(ssid, cell_id, + IPU_DEVICE_SP2600_PROXY_ICACHE, info_bits); + ia_css_cell_set_master_info_bits(ssid, cell_id, + IPU_DEVICE_SP2600_PROXY_XMEM, info_bits); + ia_css_cell_set_master_base_address(ssid, cell_id, + IPU_DEVICE_SP2600_PROXY_XMEM, 0); + COMPILATION_ERROR_IF(SPP0 < SPC0); +#endif + +#if defined(HAS_SPP1) + /* Configure SPP1 proxy */ + cell_id = SPP1; + ia_css_cell_set_master_info_bits(ssid, cell_id, + IPU_DEVICE_SP2600_PROXY_ICACHE, info_bits); + ia_css_cell_set_master_info_bits(ssid, cell_id, + IPU_DEVICE_SP2600_PROXY_XMEM, info_bits); + ia_css_cell_set_master_base_address(ssid, cell_id, + IPU_DEVICE_SP2600_PROXY_XMEM, 0); + COMPILATION_ERROR_IF(SPP1 < SPC0); +#endif + +#if defined(HAS_ISP0) + /* Configure ISP(s) */ + for (cell_id = ISP0; cell_id < NUM_CELLS; cell_id++) { + ia_css_cell_set_master_info_bits(ssid, cell_id, + IPU_DEVICE_CELL_MASTER_ICACHE, info_bits); + ia_css_cell_set_master_info_bits(ssid, cell_id, + IPU_DEVICE_CELL_MASTER_XMEM, info_bits); + ia_css_cell_set_master_base_address(ssid, cell_id, + IPU_DEVICE_CELL_MASTER_XMEM, 0); + } + COMPILATION_ERROR_IF(ISP0 < SPP0); +#endif +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_private_pg/interface/ia_css_psys_private_pg_data.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_private_pg/interface/ia_css_psys_private_pg_data.h new file mode 100644 index 0000000000000..6b2387352ae36 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_private_pg/interface/ia_css_psys_private_pg_data.h @@ -0,0 +1,43 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PRIVATE_PG_DATA_H +#define __IA_CSS_PSYS_PRIVATE_PG_DATA_H + +#include "ipu_device_acb_devices.h" +#include "ipu_device_gp_devices.h" +#include "type_support.h" +#include "vied_nci_acb_route_type.h" + +#define PRIV_CONF_INVALID 0xFF + +struct ia_css_psys_pg_buffer_information_s { + unsigned int buffer_base_addr; + unsigned int bpe; + unsigned int buffer_width; + unsigned int buffer_height; + unsigned int num_of_buffers; + unsigned int dfm_port_addr; +}; + +typedef struct ia_css_psys_pg_buffer_information_s ia_css_psys_pg_buffer_information_t; + +struct ia_css_psys_private_pg_data { + nci_acb_route_t acb_route[IPU_DEVICE_ACB_NUM_ACB]; + uint8_t psa_mux_conf[IPU_DEVICE_GP_PSA_MUX_NUM_MUX]; + uint8_t isa_mux_conf[IPU_DEVICE_GP_ISA_STATIC_MUX_NUM_MUX]; + ia_css_psys_pg_buffer_information_t input_buffer_info; +}; + +#endif /* __IA_CSS_PSYS_PRIVATE_PG_DATA_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_server/interface/ia_css_bxt_spctrl_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_server/interface/ia_css_bxt_spctrl_trace.h new file mode 100644 index 0000000000000..eee1d6ab0a496 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_server/interface/ia_css_bxt_spctrl_trace.h @@ -0,0 +1,107 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_BXT_SPCTRL_TRACE_H +#define __IA_CSS_BXT_SPCTRL_TRACE_H + +#include "ia_css_trace.h" + +/* Not using 0 to identify wrong configuration being passed from + * the .mk file outside. + * Log levels not in the range below will cause a + * "No BXT_SPCTRL_TRACE_CONFIG Tracing level defined" + */ +#define BXT_SPCTRL_TRACE_LOG_LEVEL_OFF 1 +#define BXT_SPCTRL_TRACE_LOG_LEVEL_NORMAL 2 +#define BXT_SPCTRL_TRACE_LOG_LEVEL_DEBUG 3 + +/* BXT_SPCTRL and all the submodules in BXT_SPCTRL will have the + * default tracing level set to the BXT_SPCTRL_TRACE_CONFIG level. + * If not defined in the psysapi.mk fill it will be set by + * default to no trace (BXT_SPCTRL_TRACE_LOG_LEVEL_NORMAL) + */ +#define BXT_SPCTRL_TRACE_CONFIG_DEFAULT BXT_SPCTRL_TRACE_LOG_LEVEL_NORMAL + +#if !defined(BXT_SPCTRL_TRACE_CONFIG) +# define BXT_SPCTRL_TRACE_CONFIG BXT_SPCTRL_TRACE_CONFIG_DEFAULT +#endif + +/* BXT_SPCTRL Module tracing backend is mapped to TUNIT tracing for + * target platforms + */ +#ifdef __HIVECC +# ifndef HRT_CSIM +# define BXT_SPCTRL_TRACE_METHOD IA_CSS_TRACE_METHOD_TRACE +# else +# define BXT_SPCTRL_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +# endif +#else +# define BXT_SPCTRL_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +#endif + +#if (defined(BXT_SPCTRL_TRACE_CONFIG)) + /* Module specific trace setting */ +# if BXT_SPCTRL_TRACE_CONFIG == BXT_SPCTRL_TRACE_LOG_LEVEL_OFF + /* BXT_SPCTRL_TRACE_LOG_LEVEL_OFF */ +# define BXT_SPCTRL_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED +# elif BXT_SPCTRL_TRACE_CONFIG == BXT_SPCTRL_TRACE_LOG_LEVEL_NORMAL + /* BXT_SPCTRL_TRACE_LOG_LEVEL_NORMAL */ +# define BXT_SPCTRL_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED +# define BXT_SPCTRL_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED +# define BXT_SPCTRL_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED +# elif BXT_SPCTRL_TRACE_CONFIG == BXT_SPCTRL_TRACE_LOG_LEVEL_DEBUG + /* BXT_SPCTRL_TRACE_LOG_LEVEL_DEBUG */ +# define BXT_SPCTRL_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED +# define BXT_SPCTRL_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED +# define BXT_SPCTRL_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED +# define BXT_SPCTRL_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED +# define BXT_SPCTRL_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED +# define BXT_SPCTRL_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED +# else +# error "No BXT_SPCTRL_TRACE_CONFIG Tracing level defined" +# endif +#else +# error "BXT_SPCTRL_TRACE_CONFIG not defined" +#endif + +/* Overriding submodules in BXT_SPCTRL with a specific tracing level */ +/* #define BXT_SPCTRL_DYNAMIC_TRACING_OVERRIDE TRACE_LOG_LEVEL_VERBOSE */ + +#endif /* __IA_CSS_BXT_SPCTRL_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_server/psys_server.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_server/psys_server.mk new file mode 100644 index 0000000000000..c4462c9847935 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_server/psys_server.mk @@ -0,0 +1,81 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is PSYS_SERVER + +include $(MODULES_DIR)/config/system_$(IPU_SYSVER).mk +include $(MODULES_DIR)/config/$(SUBSYSTEM)/subsystem_$(IPU_SYSVER).mk + +PSYS_SERVER_DIR=${MODULES_DIR}/psys_server + +# The watchdog should never be merged enabled +PSYS_SERVER_WATCHDOG_ENABLE ?= 0 + +PSYS_SERVER_INTERFACE=$(PSYS_SERVER_DIR)/interface +PSYS_SERVER_SOURCES=$(PSYS_SERVER_DIR)/src + +# PSYS API implementation files. Consider a new module for those to avoid +# having them together with firmware. +PSYS_SERVER_HOST_FILES += $${MODULES_DIR}/psysapi/device/src/ia_css_psys_device.c +PSYS_SERVER_HOST_FILES += $(PSYS_SERVER_SOURCES)/bxt_spctrl_process_group_cmd_impl.c + +PSYS_SERVER_HOST_CPPFLAGS += -I$(PSYS_SERVER_INTERFACE) + +PSYS_SERVER_HOST_CPPFLAGS += -DSSID=$(SSID) +PSYS_SERVER_HOST_CPPFLAGS += -DMMID=$(MMID) + + +PSYS_SERVER_FW_FILES += $(PSYS_SERVER_SOURCES)/psys_cmd_queue_fw.c +PSYS_SERVER_FW_FILES += $(PSYS_SERVER_SOURCES)/psys_event_queue_fw.c +PSYS_SERVER_FW_FILES += $(PSYS_SERVER_SOURCES)/psys_init_fw.c +PSYS_SERVER_FW_FILES += $(PSYS_SERVER_SOURCES)/psys_process_group_fw.c + +# Files that server modules need to use +PSYS_SERVER_SUPPORT_FILES = $(PSYS_SERVER_SOURCES)/dev_access_conv/$(IPU_SYSVER)/ia_css_psys_server_dev_access_type_conv.c +PSYS_SERVER_SUPPORT_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_server_config.c + +# Include those to build the release firmware. Otherwise replace by test code. +PSYS_SERVER_RELEASE_FW_FILES = $(PSYS_SERVER_SOURCES)/psys_server.c +PSYS_SERVER_RELEASE_FW_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_proxy.c +PSYS_SERVER_RELEASE_FW_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_server_dev_access.c +PSYS_SERVER_RELEASE_FW_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_server_terminal_load.c +PSYS_SERVER_RELEASE_FW_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_server_remote_obj_access.c +PSYS_SERVER_RELEASE_FW_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_server_dma_access.c +ifeq ($(HAS_DEC400), 1) +PSYS_SERVER_RELEASE_FW_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_server_dec400_access.c +endif +PSYS_SERVER_RELEASE_FW_FILES += $(PSYS_SERVER_SUPPORT_FILES) + +PSYS_SERVER_FW_CPPFLAGS += -I$(PSYS_SERVER_INTERFACE) +PSYS_SERVER_FW_CPPFLAGS += -I$(PSYS_SERVER_SOURCES) +PSYS_SERVER_FW_CPPFLAGS += -I$(PSYS_SERVER_SOURCES)/$(IPU_SYSVER) +PSYS_SERVER_FW_CPPFLAGS += -I$(PSYS_SERVER_SOURCES)/$(PSYS_SERVER_VERSION) +PSYS_SERVER_FW_CPPFLAGS += -I$(PSYS_SERVER_SOURCES)/loader/$(PSYS_SERVER_LOADER_VERSION) +PSYS_SERVER_FW_CPPFLAGS += -I$(PSYS_SERVER_SOURCES)/access_blocker/$(PSYS_ACCESS_BLOCKER_VERSION) +PSYS_SERVER_FW_CPPFLAGS += -I$(PSYS_SERVER_SOURCES)/access_blocker/src + +PSYS_SERVER_FW_CPPFLAGS += -DSSID=$(SSID) +PSYS_SERVER_FW_CPPFLAGS += -DMMID=$(MMID) +PSYS_SERVER_FW_CPPFLAGS += -DHAS_DPCM=$(if $(HAS_DPCM),1,0) + +# PSYS server watchdog for debugging +ifeq ($(PSYS_SERVER_WATCHDOG_ENABLE), 1) + PSYS_SERVER_FW_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_server_watchdog.c + PSYS_SERVER_FW_CPPFLAGS += -DPSYS_SERVER_WATCHDOG_DEBUG +endif + +PSYS_SERVER_FW_CPPFLAGS += -D$(PSYS_HW_VERSION) + +PSYS_SERVER_FW_CPPFLAGS += -DENABLE_TPROXY=$(PSYS_SERVER_ENABLE_TPROXY) +PSYS_SERVER_FW_CPPFLAGS += -DENABLE_DEVPROXY=$(PSYS_SERVER_ENABLE_DEVPROXY) diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_server/src/bxt_spctrl_process_group_cmd_impl.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_server/src/bxt_spctrl_process_group_cmd_impl.c new file mode 100644 index 0000000000000..6f8aea782464a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_server/src/bxt_spctrl_process_group_cmd_impl.c @@ -0,0 +1,332 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_device.h" +#include "ia_css_psys_process_group_cmd_impl.h" +#include "ia_css_psysapi.h" +#include "ia_css_psys_terminal.h" +#include "ia_css_psys_process.h" +#include "ia_css_psys_process.psys.h" +#include "ia_css_psys_process_group.h" +#include "ia_css_psys_process_group.psys.h" +#include "ia_css_psys_program_group_manifest.h" +#include "type_support.h" +#include "error_support.h" +#include "misc_support.h" +#include "cpu_mem_support.h" +#include "ia_css_bxt_spctrl_trace.h" + +#if HAS_DUAL_CMD_CTX_SUPPORT +#define MAX_CLIENT_PGS 8 /* same as test_params.h */ +struct ia_css_process_group_context { + ia_css_process_group_t *pg; + bool secure; +}; +struct ia_css_process_group_context pg_contexts[MAX_CLIENT_PGS]; +static unsigned int num_of_pgs; + +STORAGE_CLASS_INLINE +struct ia_css_syscom_context *ia_css_process_group_get_context(ia_css_process_group_t *process_group) +{ + unsigned int i; + bool secure = false; + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_get_context(): enter:\n"); + + for (i = 0; i < num_of_pgs; i++) { + if (pg_contexts[i].pg == process_group) { + secure = pg_contexts[i].secure; + break; + } + } + + IA_CSS_TRACE_1(BXT_SPCTRL, INFO, + "ia_css_process_group_get_context(): secure %d\n", secure); + return secure ? psys_syscom_secure : psys_syscom; +} + +int ia_css_process_group_store(ia_css_process_group_t *process_group, bool secure) +{ + IA_CSS_TRACE_2(BXT_SPCTRL, INFO, + "ia_css_process_group_store(): pg instance %d secure %d\n", num_of_pgs, secure); + + pg_contexts[num_of_pgs].pg = process_group; + pg_contexts[num_of_pgs].secure = secure; + num_of_pgs++; + return 0; +} +#else /* HAS_DUAL_CMD_CTX_SUPPORT */ +STORAGE_CLASS_INLINE +struct ia_css_syscom_context *ia_css_process_group_get_context(ia_css_process_group_t *process_group) +{ + NOT_USED(process_group); + + return psys_syscom; +} + +int ia_css_process_group_store(ia_css_process_group_t *process_group, bool secure) +{ + NOT_USED(process_group); + NOT_USED(secure); + + return 0; +} +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ + +int ia_css_process_group_on_create( + ia_css_process_group_t *process_group, + const ia_css_program_group_manifest_t *program_group_manifest, + const ia_css_program_group_param_t *program_group_param) +{ + NOT_USED(process_group); + NOT_USED(program_group_manifest); + NOT_USED(program_group_param); + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_on_create(): enter:\n"); + + return 0; +} + +int ia_css_process_group_on_destroy( + ia_css_process_group_t *process_group) +{ + NOT_USED(process_group); + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_on_destroy(): enter:\n"); + + return 0; +} + +int ia_css_process_group_exec_cmd( + ia_css_process_group_t *process_group, + const ia_css_process_group_cmd_t cmd) +{ + int retval = -1; + ia_css_process_group_state_t state; + struct ia_css_psys_cmd_s psys_cmd; + bool cmd_queue_full; + unsigned int queue_id; + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_exec_cmd(): enter:\n"); + + verifexit(process_group != NULL); + + state = ia_css_process_group_get_state(process_group); + + verifexit(state != IA_CSS_PROCESS_GROUP_ERROR); + verifexit(state < IA_CSS_N_PROCESS_GROUP_STATES); + + switch (cmd) { + case IA_CSS_PROCESS_GROUP_CMD_SUBMIT: + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_exec_cmd(): IA_CSS_PROCESS_GROUP_CMD_SUBMIT:\n"); + verifexit(state == IA_CSS_PROCESS_GROUP_READY); + + /* External resource availability checks */ + verifexit(ia_css_can_process_group_submit(process_group)); + + process_group->state = IA_CSS_PROCESS_GROUP_BLOCKED; + break; + case IA_CSS_PROCESS_GROUP_CMD_START: + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_exec_cmd(): IA_CSS_PROCESS_GROUP_CMD_START:\n"); + verifexit(state == IA_CSS_PROCESS_GROUP_BLOCKED); + + /* External resource state checks */ + verifexit(ia_css_can_process_group_start(process_group)); + + process_group->state = IA_CSS_PROCESS_GROUP_STARTED; + break; + case IA_CSS_PROCESS_GROUP_CMD_DISOWN: + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_exec_cmd(): IA_CSS_PROCESS_GROUP_CMD_DISOWN:\n"); + verifexit(state == IA_CSS_PROCESS_GROUP_STARTED); + + cmd_queue_full = ia_css_is_psys_cmd_queue_full(ia_css_process_group_get_context(process_group), + IA_CSS_PSYS_CMD_QUEUE_COMMAND_ID); + retval = EBUSY; + verifexit(cmd_queue_full == false); + + psys_cmd.command = IA_CSS_PROCESS_GROUP_CMD_START; + psys_cmd.msg = 0; + psys_cmd.context_handle = process_group->ipu_virtual_address; + + verifexit(ia_css_process_group_print(process_group, NULL) == 0); + + retval = ia_css_psys_cmd_queue_send(ia_css_process_group_get_context(process_group), + IA_CSS_PSYS_CMD_QUEUE_COMMAND_ID, &psys_cmd); + verifexit(retval > 0); + break; + case IA_CSS_PROCESS_GROUP_CMD_STOP: + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_exec_cmd(): IA_CSS_PROCESS_GROUP_CMD_STOP:\n"); + + cmd_queue_full = ia_css_is_psys_cmd_queue_full(ia_css_process_group_get_context(process_group), + IA_CSS_PSYS_CMD_QUEUE_COMMAND_ID); + retval = EBUSY; + verifexit(cmd_queue_full == false); + + psys_cmd.command = IA_CSS_PROCESS_GROUP_CMD_STOP; + psys_cmd.msg = 0; + psys_cmd.context_handle = process_group->ipu_virtual_address; + + queue_id = ia_css_process_group_get_base_queue_id(process_group); + verifexit(queue_id < IA_CSS_N_PSYS_CMD_QUEUE_ID); + + retval = ia_css_psys_cmd_queue_send(ia_css_process_group_get_context(process_group), + queue_id, &psys_cmd); + verifexit(retval > 0); + break; + case IA_CSS_PROCESS_GROUP_CMD_ABORT: + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_exec_cmd(): IA_CSS_PROCESS_GROUP_CMD_ABORT:\n"); + + /* Once the flushing of shared buffers is fixed this verifexit + * should be changed to be state = IA_CSS_PROCESS_GROUP_STARTED + */ + verifexit(state == IA_CSS_PROCESS_GROUP_BLOCKED); + + cmd_queue_full = ia_css_is_psys_cmd_queue_full(ia_css_process_group_get_context(process_group), + IA_CSS_PSYS_CMD_QUEUE_COMMAND_ID); + retval = EBUSY; + verifexit(cmd_queue_full == false); + + psys_cmd.command = IA_CSS_PROCESS_GROUP_CMD_ABORT; + psys_cmd.msg = 0; + psys_cmd.context_handle = process_group->ipu_virtual_address; + + retval = ia_css_psys_cmd_queue_send(ia_css_process_group_get_context(process_group), + IA_CSS_PSYS_CMD_QUEUE_DEVICE_ID, &psys_cmd); + verifexit(retval > 0); + break; + default: + verifexit(false); + break; + } + + retval = 0; +EXIT: + if (0 != retval) { + IA_CSS_TRACE_1(BXT_SPCTRL, ERROR, + "ia_css_process_group_exec_cmd failed (%i)\n", retval); + } + return retval; +} + +STORAGE_CLASS_INLINE int enqueue_buffer_set_cmd( + ia_css_process_group_t *process_group, + ia_css_buffer_set_t *buffer_set, + unsigned int queue_offset, + uint16_t command + ) +{ + int retval = -1; + struct ia_css_psys_cmd_s psys_cmd; + bool cmd_queue_full; + unsigned int queue_id; + + verifexit(ia_css_process_group_get_state(process_group) + == IA_CSS_PROCESS_GROUP_STARTED); + + verifexit(queue_offset < + ia_css_process_group_get_num_queues(process_group)); + + queue_id = + ia_css_process_group_get_base_queue_id(process_group) + + queue_offset; + verifexit(queue_id < IA_CSS_N_PSYS_CMD_QUEUE_ID); + + cmd_queue_full = ia_css_is_psys_cmd_queue_full(ia_css_process_group_get_context(process_group), queue_id); + retval = EBUSY; + verifexit(cmd_queue_full == false); + + psys_cmd.command = command; + psys_cmd.msg = 0; + psys_cmd.context_handle = + ia_css_buffer_set_get_ipu_address(buffer_set); + + retval = ia_css_psys_cmd_queue_send(ia_css_process_group_get_context(process_group), queue_id, &psys_cmd); + verifexit(retval > 0); + + retval = 0; + +EXIT: + if (0 != retval) { + IA_CSS_TRACE_1(BXT_SPCTRL, ERROR, + "enqueue_buffer_set failed (%i)\n", retval); + } + return retval; +} + +int ia_css_enqueue_buffer_set( + ia_css_process_group_t *process_group, + ia_css_buffer_set_t *buffer_set, + unsigned int queue_offset) +{ + int retval = -1; + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_enqueue_buffer_set():\n"); + retval = enqueue_buffer_set_cmd( + process_group, + buffer_set, + queue_offset, + IA_CSS_PROCESS_GROUP_CMD_RUN); + + if (0 != retval) { + IA_CSS_TRACE_1(BXT_SPCTRL, ERROR, + "ia_css_enqueue_buffer_set failed (%i)\n", retval); + } + return retval; +} + +int ia_css_enqueue_param_buffer_set( + ia_css_process_group_t *process_group, + ia_css_buffer_set_t *param_buffer_set) +{ +#if (HAS_LATE_BINDING_SUPPORT == 1) + int retval = -1; + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_enqueue_param_buffer_set():\n"); + + retval = enqueue_buffer_set_cmd( + process_group, + param_buffer_set, + IA_CSS_PSYS_LATE_BINDING_QUEUE_OFFSET, + IA_CSS_PROCESS_GROUP_CMD_SUBMIT); + + if (0 != retval) { + IA_CSS_TRACE_1(BXT_SPCTRL, ERROR, + "ia_css_enqueue_param_buffer_set failed (%i)\n", retval); + } +#else + int retval = -1; + + NOT_USED(process_group); + NOT_USED(param_buffer_set); + IA_CSS_TRACE_0(BXT_SPCTRL, ERROR, + "ia_css_enqueue_param_buffer_set failed, no late binding supported\n"); +#endif /* (HAS_LATE_BINDING_SUPPORT == 1) */ + return retval; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/interface/ia_css_program_group_data.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/interface/ia_css_program_group_data.h new file mode 100644 index 0000000000000..6ccca1d9b69e1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/interface/ia_css_program_group_data.h @@ -0,0 +1,418 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PROGRAM_GROUP_DATA_H +#define __IA_CSS_PROGRAM_GROUP_DATA_H + +#include "ia_css_psys_data_storage_class.h" + +/*! \file */ + +/** @file ia_css_program_group_data.h + * + * Define the data objects that are passed to the process groups + * i.e. frames and matrices with their sub-structures + * + * The data objects are separate from the process group terminal, + * although they are stored by value rather than by reference and + * make the process group terminal dependendent on its definition + * + * This frame definition overloads the current CSS frame definition + * they are the same object, just a slightly different implementation + */ + +#include /* vied_vaddress_t */ + +#include +#include "ia_css_program_group_data_defs.h" /* ia_css_frame_format_type */ + +#include "ia_css_terminal_defs.h" + +/* + * Frame buffer state used for sequencing + * (see FAS 5.5.3) + * + * The buffer can be in DDR or a handle to a stream + */ +typedef enum ia_css_buffer_state { + IA_CSS_BUFFER_NULL = 0, + IA_CSS_BUFFER_UNDEFINED, + IA_CSS_BUFFER_EMPTY, + IA_CSS_BUFFER_NONEMPTY, + IA_CSS_BUFFER_FULL, + IA_CSS_N_BUFFER_STATES +} ia_css_buffer_state_t; + +#define IA_CSS_BUFFER_STATE_IN_BITS 32 + +/* + * Pointer state used to signal MMU invalidation + */ +typedef enum ia_css_pointer_state { + IA_CSS_POINTER_INVALID = 0, + IA_CSS_POINTER_VALID, + IA_CSS_N_POINTER_STATES +} ia_css_pointer_state_t; + +#define IA_CSS_POINTER_STATE_IN_BITS 32 + +/* + * Access direction needed to select the access port + */ +typedef enum ia_css_access_type { + IA_CSS_ACCESS_LOCKED = 0, + IA_CSS_ACCESS_READ, + IA_CSS_ACCESS_WRITE, + IA_CSS_ACCESS_MODIFY, + IA_CSS_N_ACCESS_TYPES +} ia_css_access_type_t; + +#define IA_CSS_ACCESS_TYPE_IN_BITS 32 + +/* + * Access attribute needed to select the access port + * - public : snooped + * - private: non-snooped + * Naming is a bit awkward, lack of inspiration + */ +typedef enum ia_css_access_scope { + IA_CSS_ACCESS_PRIVATE = 0, + IA_CSS_ACCESS_PUBLIC, + IA_CSS_N_ACCESS_SCOPES +} ia_css_access_scopes_t; + +#define IA_CSS_ACCESS_SCOPES_IN_BITS 32 + +#define IA_CSS_N_FRAME_PLANES 6 + +#define IA_CSS_FRAME_FORMAT_BITMAP_BITS 64 +typedef uint64_t ia_css_frame_format_bitmap_t; + +typedef struct ia_css_param_frame_descriptor_s ia_css_param_frame_descriptor_t; +typedef struct ia_css_param_frame_s ia_css_param_frame_t; + +typedef struct ia_css_frame_descriptor_s ia_css_frame_descriptor_t; +typedef struct ia_css_frame_s ia_css_frame_t; +typedef struct ia_css_fragment_descriptor_s ia_css_fragment_descriptor_t; + +typedef struct ia_css_stream_s ia_css_stream_t; + + +#define N_UINT64_IN_STREAM_STRUCT 1 + +#define IA_CSS_STREAM_STRUCT_BITS \ + (N_UINT64_IN_STREAM_STRUCT * 64) + +struct ia_css_stream_s { + uint64_t dummy; +}; + +struct ia_css_param_frame_descriptor_s { + uint16_t size; /**< Size of the descriptor */ + uint32_t buffer_count; /**< Number of parameter buffers */ +}; + +struct ia_css_param_frame_s { + /*< Base virtual addresses to parameters in subsystem virtual + * memory space + */ + vied_vaddress_t *data; +}; + +#define N_UINT32_IN_FRAME_DESC_STRUCT \ + (1 + IA_CSS_N_FRAME_PLANES + (IA_CSS_N_DATA_DIMENSION - 1)) +#define N_UINT16_IN_FRAME_DESC_STRUCT (1 + IA_CSS_N_DATA_DIMENSION) +#define N_UINT8_IN_FRAME_DESC_STRUCT 3 +#define N_PADDING_UINT8_IN_FRAME_DESC_STRUCT 3 + +#define IA_CSS_FRAME_DESCRIPTOR_STRUCT_BITS \ + (IA_CSS_FRAME_FORMAT_TYPE_BITS \ + + (N_UINT32_IN_FRAME_DESC_STRUCT * 32) \ + + (N_UINT16_IN_FRAME_DESC_STRUCT * 16) \ + + (N_UINT8_IN_FRAME_DESC_STRUCT * 8) \ + + (N_PADDING_UINT8_IN_FRAME_DESC_STRUCT * 8)) + +/* + * Structure defining the frame (size and access) properties for + * inbuild types only. + * + * The inbuild types like FourCC, MIPI and CSS private types are supported + * by FW all other types are custom types which interpretation must be encoded + * on the buffer itself or known by the source and sink + */ +struct ia_css_frame_descriptor_s { + /**< Indicates if this is a generic type or inbuild with + * variable size descriptor + */ + ia_css_frame_format_type_t frame_format_type; + /**< Number of data planes (pointers) */ + uint32_t plane_count; + /**< Plane offsets accounting for fragments */ + uint32_t plane_offsets[IA_CSS_N_FRAME_PLANES]; + /**< Physical size aspects */ + uint32_t stride[IA_CSS_N_DATA_DIMENSION - 1]; + /**< Logical dimensions */ + uint16_t dimension[IA_CSS_N_DATA_DIMENSION]; + /**< Size of this descriptor */ + uint16_t size; + /**< Bits per pixel */ + uint8_t bpp; + /**< Bits per element */ + uint8_t bpe; + /**< 1 if terminal uses compressed datatype, 0 otherwise */ + uint8_t is_compressed; + /**< Padding for 64bit alignment */ + uint8_t padding[N_PADDING_UINT8_IN_FRAME_DESC_STRUCT]; +}; + +#define N_UINT32_IN_FRAME_STRUCT 2 +#define N_PADDING_UINT8_IN_FRAME_STRUCT 4 + +#define IA_CSS_FRAME_STRUCT_BITS \ + (IA_CSS_BUFFER_STATE_IN_BITS \ + + IA_CSS_ACCESS_TYPE_IN_BITS \ + + IA_CSS_POINTER_STATE_IN_BITS \ + + IA_CSS_ACCESS_SCOPES_IN_BITS \ + + VIED_VADDRESS_BITS \ + + (N_UINT32_IN_FRAME_STRUCT * 32) \ + + (N_PADDING_UINT8_IN_FRAME_STRUCT * 8)) + + +/* + * Main frame structure holding the main store and auxilary access properties + * the "pointer_state" and "access_scope" should be encoded on the + * "vied_vaddress_t" type + */ +struct ia_css_frame_s { + /**< State of the frame for purpose of sequencing */ + ia_css_buffer_state_t buffer_state; + /**< Access direction, may change when buffer state changes */ + ia_css_access_type_t access_type; + /**< State of the pointer for purpose of embedded MMU coherency */ + ia_css_pointer_state_t pointer_state; + /**< Access to the pointer for purpose of host cache coherency */ + ia_css_access_scopes_t access_scope; + /**< Base virtual address to data in subsystem virtual memory space */ + vied_vaddress_t data; + /**< Offset to buffer address within external buffer set structure */ + uint32_t data_index; + /**< Total allocation size in bytes */ + uint32_t data_bytes; + /**< Padding for 64bit alignment */ + uint8_t padding[N_PADDING_UINT8_IN_FRAME_STRUCT]; +}; + +#define N_UINT16_IN_FRAGMENT_DESC_STRUCT (3 * IA_CSS_N_DATA_DIMENSION) +#define N_PADDING_UINT8_IN_FRAGMENT_DESC_STRUCT 4 + +#define IA_CSS_FRAGMENT_DESCRIPTOR_STRUCT_BITS \ + ((N_UINT16_IN_FRAME_DESC_STRUCT * 16) \ + + (N_PADDING_UINT8_IN_FRAGMENT_DESC_STRUCT * 8)) + +/* + * Structure defining the fragment (size and access) properties. + * + * All cropping and padding effects are described by the difference between + * the frame size and its location and the fragment size(s) and location(s) + */ +struct ia_css_fragment_descriptor_s { + /**< Logical dimensions of the fragment */ + uint16_t dimension[IA_CSS_N_DATA_DIMENSION]; + /**< Logical location of the fragment in the frame */ + uint16_t index[IA_CSS_N_DATA_DIMENSION]; + /**< Fractional start (phase) of the fragment in the access unit */ + uint16_t offset[IA_CSS_N_DATA_DIMENSION]; + /**< Padding for 64bit alignment */ + uint8_t padding[N_PADDING_UINT8_IN_FRAGMENT_DESC_STRUCT]; +}; + + +/*! Print the frame object to file/stream + + @param frame[in] frame object + @param fid[out] file/stream handle + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_print( + const ia_css_frame_t *frame, void *fid); + +/*! Get the data buffer handle from the frame object + +@param frame[in] frame object + +@return buffer pointer, VIED_NULL on error +*/ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +const vied_vaddress_t *ia_css_frame_get_buffer_host_virtual_address( + const ia_css_frame_t *frame); + +/*! Get the data buffer handle from the frame object + + @param frame[in] frame object + + @return buffer pointer, VIED_NULL on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +vied_vaddress_t ia_css_frame_get_buffer(const ia_css_frame_t *frame); + +/*! Set the data buffer handle on the frame object + + @param frame[in] frame object + @param buffer[in] buffer pointer + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_set_buffer( + ia_css_frame_t *frame, vied_vaddress_t buffer); + +/*! Get the data buffer index in the frame object + + @param frame[in] frame object + + @return data buffer index on success, -1 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_get_data_index( + const ia_css_frame_t *frame); + +/*! Set the data buffer index in the frame object + + @param frame[in] frame object + @param data_index[in] data buffer index + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_set_data_index( + ia_css_frame_t *frame, + unsigned int data_index); + +/*! Set the data buffer size on the frame object + + @param frame[in] frame object + @param size[in] number of data bytes + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_set_data_bytes( + ia_css_frame_t *frame, unsigned size); + +/*! Get the data buffer state from the frame object + + @param frame[in] frame object + + @return buffer state, limit value on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +ia_css_buffer_state_t ia_css_frame_get_buffer_state( + const ia_css_frame_t *frame); + +/*! Set the data buffer state of the frame object + + @param frame[in] frame object + @param buffer_state[in] buffer state + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_set_buffer_state(ia_css_frame_t *frame, + const ia_css_buffer_state_t buffer_state); + +/*! Get the data pointer state from the frame object + + @param frame[in] frame object + + @return pointer state, limit value on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +ia_css_pointer_state_t ia_css_frame_get_pointer_state( + const ia_css_frame_t *frame); + +/*! Set the data pointer state of the frame object + + @param frame[in] frame object + @param pointer_state[in] pointer state + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_set_pointer_state(ia_css_frame_t *frame, + const ia_css_pointer_state_t pointer_state); + +/*! Print the frame descriptor object to file/stream + + @param frame_descriptor[in] frame descriptor object + @param fid[out] file/stream handle + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_descriptor_print( + const ia_css_frame_descriptor_t *frame_descriptor, void *fid); + +/*! Print the fragment descriptor object to file/stream + + @param fragment_descriptor[in] fragment descriptor object + @param fid[out] file/stream handle + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_fragment_descriptor_print( + const ia_css_fragment_descriptor_t *fragment_descriptor, void *fid); + +/*! Compute the bitmap for the frame format type + + @param frame_format_type[in] frame format type + + @return 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +ia_css_frame_format_bitmap_t ia_css_frame_format_bit_mask( + const ia_css_frame_format_type_t frame_format_type); + +/*! clear frame format bitmap + + @return cleared bitmap + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +ia_css_frame_format_bitmap_t ia_css_frame_format_bitmap_clear(void); + + +/*! Compute the size of storage required for the data descriptor object + * on a terminal + *@param plane_count[in] The number of data planes in the buffer + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +size_t ia_css_sizeof_frame_descriptor( + const uint8_t plane_count); +/*! Compute the size of storage required for the kernel parameter descriptor + * object on a terminal + + @param section_count[in] The number of parameter sections in the buffer + + @return 0 on error + */ +extern size_t ia_css_sizeof_kernel_param_descriptor( + const uint16_t section_count); + +#ifdef __IA_CSS_PSYS_DATA_INLINE__ +#include "ia_css_program_group_data_impl.h" +#endif /* __IA_CSS_PSYS_DATA_INLINE__ */ + +#endif /* __IA_CSS_PROGRAM_GROUP_DATA_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/interface/ia_css_program_group_data_defs.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/interface/ia_css_program_group_data_defs.h new file mode 100644 index 0000000000000..3f177a19b98b4 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/interface/ia_css_program_group_data_defs.h @@ -0,0 +1,196 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PROGRAM_GROUP_DATA_DEFS_H +#define __IA_CSS_PROGRAM_GROUP_DATA_DEFS_H + + +/* + * Pre-defined frame format + * + * Those formats have inbuild support of traffic + * and access functions + * + * Note that the formats are for terminals, so there + * is no distinction between input and output formats + * - Custom formats with ot without descriptor + * - 4CC formats such as YUV variants + * - MIPI (line) formats as produced by CSI receivers + * - MIPI (sensor) formats such as Bayer or RGBC + * - CSS internal formats (private types) + * - CSS parameters (type 1 - 6) + */ +#define IA_CSS_FRAME_FORMAT_TYPE_BITS 32 +typedef enum ia_css_frame_format_type { + IA_CSS_DATA_CUSTOM_NO_DESCRIPTOR = 0, + IA_CSS_DATA_CUSTOM, + + /* 12 bit YUV 411, Y, UV 2-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_NV11, + /* bpp bit YUV 420, Y, U, V 3-plane (bpp/1.5 bpe) */ + IA_CSS_DATA_FORMAT_YUV420, + /* 12 bit YUV 420, Y, V, U 3-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_YV12, + /* 12 bit YUV 420, Y, UV 2-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_NV12, + /* 16 bit YUV 420, Y, UV 2-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_NV12_16, + /* 12 bit YUV 420, Intel proprietary tiled format, TileY */ + IA_CSS_DATA_FORMAT_NV12_TILEY, + /* 12 bit YUV 420, Y, VU 2-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_NV21, + /* bpp bit YUV 422, Y, U, V 3-plane (bpp/2 bpe) */ + IA_CSS_DATA_FORMAT_YUV422, + /* 16 bit YUV 422, Y, V, U 3-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_YV16, + /* 16 bit YUV 422, Y, UV 2-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_NV16, + /* 16 bit YUV 422, Y, VU 2-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_NV61, + /* 16 bit YUV 422, UYVY 1-plane interleaved (8 bit per element) */ + IA_CSS_DATA_FORMAT_UYVY, + /* 16 bit YUV 422, YUYV 1-plane interleaved (8 bit per element) */ + IA_CSS_DATA_FORMAT_YUYV, + /* bpp bit YUV 444, Y, U, V 3-plane (bpp/3 bpe) */ + IA_CSS_DATA_FORMAT_YUV444, + /* 8 bit monochrome plane */ + IA_CSS_DATA_FORMAT_Y800, + + /* 5-6-5 bit packed (1-plane) RGB (16bpp, ~5 bpe) */ + IA_CSS_DATA_FORMAT_RGB565, + /* 24 bit RGB, 3 planes (8 bit per element) */ + IA_CSS_DATA_FORMAT_RGB888, + /* 32 bit RGB-Alpha, 1 plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_RGBA888, + + /* bpp bit raw, [[Gr, R];[B, Gb]] 1-plane (bpp == bpe) */ + IA_CSS_DATA_FORMAT_BAYER_GRBG, + /* bpp bit raw, [[R, Gr];[Gb, B]] 1-plane (bpp == bpe) */ + IA_CSS_DATA_FORMAT_BAYER_RGGB, + /* bpp bit raw, [[B, Gb];[Gr, R]] 1-plane (bpp == bpe) */ + IA_CSS_DATA_FORMAT_BAYER_BGGR, + /* bpp bit raw, [[Gb, B];[R, Gr]] 1-plane (bpp == bpe) */ + IA_CSS_DATA_FORMAT_BAYER_GBRG, + + /* bpp bit (NV12) YUV 420, Y, UV 2-plane derived 3-line, + * 2-Y, 1-UV (bpp/1.5 bpe): M420 format + */ + IA_CSS_DATA_FORMAT_YUV420_LINE, + /* Deprecated RAW, 1 plane */ + IA_CSS_DATA_FORMAT_RAW, + /* Deprecated RAW, 1 plane, packed */ + IA_CSS_DATA_FORMAT_RAW_PACKED, + /* Internal, for advanced ISP */ + IA_CSS_DATA_FORMAT_QPLANE6, + /* 1D byte stream, used for jpeg 1-plane */ + IA_CSS_DATA_FORMAT_BINARY_8, + /* Deprecated MIPI frame, 1D byte stream 1 plane */ + IA_CSS_DATA_FORMAT_MIPI, + /* 12 bit [[YY];[UYVY]] 1-plane interleaved 2-line + * (8 bit per element) + */ + IA_CSS_DATA_FORMAT_MIPI_YUV420_8, + /* 15 bit [[YY];[UYVY]] 1-plane interleaved 2-line + * (10 bit per element) + */ + IA_CSS_DATA_FORMAT_MIPI_YUV420_10, + /* 12 bit [[UY];[VY]] 1-plane interleaved 2-line (8 bit per element) */ + IA_CSS_DATA_FORMAT_MIPI_LEGACY_YUV420_8, + + /* Type 1-5 parameter, not fragmentable */ + IA_CSS_DATA_GENERIC_PARAMETER, + /* Video stabilisation Type 6 parameter, fragmentable */ + IA_CSS_DATA_DVS_PARAMETER, + /* Video stabilisation Type 6 parameter, coordinates */ + IA_CSS_DATA_DVS_COORDINATES, + /* Dead Pixel correction Type 6 parameter, fragmentable */ + IA_CSS_DATA_DPC_PARAMETER, + /* Lens Shading Correction Type 6 parameter, fragmentable */ + IA_CSS_DATA_LSC_PARAMETER, + /* 3A statistics output HI. */ + IA_CSS_DATA_S3A_STATISTICS_HI, + /* 3A statistics output LO. */ + IA_CSS_DATA_S3A_STATISTICS_LO, + /* histogram output */ + IA_CSS_DATA_S3A_HISTOGRAM, + /* GammaStar grid */ + IA_CSS_DATA_GAMMASTAR_GRID, + + /* Gr R B Gb Gr R B Gb in PIXELS (also called isys interleaved) */ + IA_CSS_DATA_FORMAT_BAYER_LINE_INTERLEAVED, + /* Gr R B Gb Gr R B Gb in VECTORS (VCC IMAGE, ISP NWAY depentdent) */ + IA_CSS_DATA_FORMAT_BAYER_VECTORIZED, + /* Gr R Gr R ... | B Gb B Gb .. in VECTORS (ISP NWAY depentdent) */ + IA_CSS_DATA_FORMAT_BAYER_GRBG_VECTORIZED, + + /* 16 bit YUV 420, Y even plane, Y uneven plane, + * UV plane vector interleaved + */ + IA_CSS_DATA_FORMAT_YUV420_VECTORIZED, + /* 16 bit YUV 420, YYUVYY vector interleaved */ + IA_CSS_DATA_FORMAT_YYUVYY_VECTORIZED, + + /* 12 bit YUV 420, Intel proprietary tiled format, TileYf */ + IA_CSS_DATA_FORMAT_NV12_TILEYF, + + /*Y samples appear first in the memory. All Y samples are array of WORDs; + * even number of lines ; + * Surface stride can be larger than the width of Y plane. + * This array is followed immediately by chroma array. + * Chroma array is an array of WORDs, with interleaved U/V samples. + * If the interleaved U/V plane is addresses as an * array of DWORDs, + * the least significant word contains U sample. The stride of the + * interleaved U/V plane is equal to Y plane. 10 bit data. + */ + IA_CSS_DATA_FORMAT_P010, + + /* MSB aligned version of P010*/ + IA_CSS_DATA_FORMAT_P010_MSB, + + /* P016/P012 Y samples appear first in the memory. + * All Y samples are array of WORDs; + * even number of lines ; + * Surface stride can be larger than the width of Y plane. + * This array is followed immediately by chroma array. + * Chroma array is an array of WORDs, with interleaved U/V samples. + * If the interleaved U/V plane is addresses as an * array of DWORDs, + * the least significant word contains U sample. The stride of the + * interleaved U/V plane is equal to Y plane. 12 bit data. + */ + IA_CSS_DATA_FORMAT_P016, + + /* MSB aligned version of P016*/ + IA_CSS_DATA_FORMAT_P016_MSB, + + /* TILEYYf representation of P010*/ + IA_CSS_DATA_FORMAT_P010_TILEYF, + + /* TILEYYf representation of P010 MSB aligned*/ + IA_CSS_DATA_FORMAT_P010_MSB_TILEYF, + + /* TILEYYf representation of P016*/ + IA_CSS_DATA_FORMAT_P016_TILEYF, + + /* TILEYYf representation of P016 MSB aligned*/ + IA_CSS_DATA_FORMAT_P016_MSB_TILEYF, + + /* consists of L and R PDAF pixel pairs. + * L and R can be interleaved or not. 1-plane (bpp == bpe) */ + IA_CSS_DATA_FORMAT_PAF, + + IA_CSS_N_FRAME_FORMAT_TYPES +} ia_css_frame_format_type_t; + + +#endif /* __IA_CSS_PROGRAM_GROUP_DATA_DEFS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/interface/ia_css_psys_data_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/interface/ia_css_psys_data_storage_class.h new file mode 100644 index 0000000000000..6a4e3a28e5336 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/interface/ia_css_psys_data_storage_class.h @@ -0,0 +1,28 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_DATA_STORAGE_CLASS_H +#define __IA_CSS_PSYS_DATA_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __IA_CSS_PSYS_DATA_INLINE__ +#define IA_CSS_PSYS_DATA_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_PSYS_DATA_STORAGE_CLASS_C +#else +#define IA_CSS_PSYS_DATA_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_PSYS_DATA_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_PSYS_DATA_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/interface/ia_css_psys_data_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/interface/ia_css_psys_data_trace.h new file mode 100644 index 0000000000000..49afed9ce9dfc --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/interface/ia_css_psys_data_trace.h @@ -0,0 +1,102 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_DATA_TRACE_H +#define __IA_CSS_PSYS_DATA_TRACE_H + +#include "ia_css_psysapi_trace.h" + +#define PSYS_DATA_TRACE_LEVEL_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +/* Default sub-module tracing config */ +#if (!defined(PSYSAPI_DATA_TRACING_OVERRIDE)) + #define PSYS_DATA_TRACE_LEVEL_CONFIG PSYS_DATA_TRACE_LEVEL_CONFIG_DEFAULT +#endif + +/* Module/sub-module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_DATA_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_DATA_TRACING_OVERRIDE)) + /* Module/sub-module specific trace setting */ + #if PSYSAPI_DATA_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_DATA_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DATA_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_DATA_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_DATA_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DATA_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DATA_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DATA_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_DATA_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_DATA_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DATA_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DATA_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DATA_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DATA_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DATA_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DATA_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_DATA Tracing level defined" + #endif +#else + /* Inherit Module trace setting */ + #define PSYSAPI_DATA_TRACE_METHOD \ + PSYSAPI_TRACE_METHOD + #define PSYSAPI_DATA_TRACE_LEVEL_ASSERT \ + PSYSAPI_TRACE_LEVEL_ASSERT + #define PSYSAPI_DATA_TRACE_LEVEL_ERROR \ + PSYSAPI_TRACE_LEVEL_ERROR + #define PSYSAPI_DATA_TRACE_LEVEL_WARNING \ + PSYSAPI_TRACE_LEVEL_WARNING + #define PSYSAPI_DATA_TRACE_LEVEL_INFO \ + PSYSAPI_TRACE_LEVEL_INFO + #define PSYSAPI_DATA_TRACE_LEVEL_DEBUG \ + PSYSAPI_TRACE_LEVEL_DEBUG + #define PSYSAPI_DATA_TRACE_LEVEL_VERBOSE \ + PSYSAPI_TRACE_LEVEL_VERBOSE +#endif + +#endif /* __IA_CSS_PSYSAPI_DATA_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/src/ia_css_program_group_data.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/src/ia_css_program_group_data.c new file mode 100644 index 0000000000000..edf3e55e6c399 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/src/ia_css_program_group_data.c @@ -0,0 +1,26 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_data_storage_class.h" + +/* + * Functions to possibly inline + */ + +#ifdef __IA_CSS_PSYS_DATA_INLINE__ +STORAGE_CLASS_INLINE int +__ia_css_program_group_data_avoid_warning_on_empty_file(void) { return 0; } +#else /* __IA_CSS_PSYS_DATA_INLINE__ */ +#include "ia_css_program_group_data_impl.h" +#endif /* __IA_CSS_PSYS_DATA_INLINE__ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/src/ia_css_program_group_data_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/src/ia_css_program_group_data_impl.h new file mode 100644 index 0000000000000..f08a057e4480e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/src/ia_css_program_group_data_impl.h @@ -0,0 +1,455 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PROGRAM_GROUP_DATA_IMPL_H +#define __IA_CSS_PROGRAM_GROUP_DATA_IMPL_H + +#include "ia_css_program_group_data.h" +#include "ia_css_psys_data_trace.h" +#include "ia_css_terminal_defs.h" +#include /* for verifexit */ +#include /* for COMPILATION_ERROR_IF */ +#include /* for NOT_USED */ + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_print( + const ia_css_frame_t *frame, void *fid) +{ + int retval = -1; + + NOT_USED(fid); + + IA_CSS_TRACE_0(PSYSAPI_DATA, INFO, "ia_css_frame_print(): enter:\n"); + + verifexit(frame != NULL); + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tbuffer = %d\n", ia_css_frame_get_buffer(frame)); + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tbuffer_state = %d\n", ia_css_frame_get_buffer_state(frame)); + /* IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, "\tbuffer_state = %s\n", + * ia_css_buffer_state_string(ia_css_frame_get_buffer_state(frame))); + */ + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tpointer_state = %d\n", ia_css_frame_get_pointer_state(frame)); + /* IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, "\tpointer_state = %s\n", + * ia_css_pointer_state_string(ia_css_frame_get_pointer_state(frame))); + */ + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tdata_bytes = %d\n", frame->data_bytes); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_frame_print failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +const vied_vaddress_t *ia_css_frame_get_buffer_host_virtual_address( + const ia_css_frame_t *frame) { + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_get_buffer_host_virtual_address(): enter:\n"); + + verifexit(frame != NULL); + return &(frame->data); + +EXIT: + if (NULL == frame) { + IA_CSS_TRACE_0(PSYSAPI_DATA, WARNING, + "ia_css_frame_get_buffer_host_virtual_address invalid argument\n"); + } + return NULL; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +vied_vaddress_t ia_css_frame_get_buffer( + const ia_css_frame_t *frame) +{ + vied_vaddress_t buffer = VIED_NULL; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_get_buffer(): enter:\n"); + + verifexit(frame != NULL); + buffer = frame->data; + +EXIT: + if (NULL == frame) { + IA_CSS_TRACE_0(PSYSAPI_DATA, WARNING, + "ia_css_frame_get_buffer invalid argument\n"); + } + return buffer; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_set_buffer( + ia_css_frame_t *frame, + vied_vaddress_t buffer) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_set_buffer(): enter:\n"); + + verifexit(frame != NULL); + frame->data = buffer; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_frame_set_buffer failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_get_data_index( + const ia_css_frame_t *frame) +{ + int data_index = -1; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_get_data_index(): enter:\n"); + + verifexit(frame != NULL); + + data_index = frame->data_index; + +EXIT: + if (NULL == frame) { + IA_CSS_TRACE_0(PSYSAPI_DATA, WARNING, + "ia_css_frame_get_data_index invalid argument\n"); + } + return data_index; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_set_data_index( + ia_css_frame_t *frame, + unsigned int data_index) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_set_data_index(): enter:\n"); + + verifexit(frame != NULL); + + frame->data_index = data_index; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_frame_set_data_index failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_set_data_bytes( + ia_css_frame_t *frame, + unsigned int size) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_set_data_bytes(): enter:\n"); + + verifexit(frame != NULL); + frame->data_bytes = size; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_frame_set_data_bytes failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +ia_css_buffer_state_t ia_css_frame_get_buffer_state( + const ia_css_frame_t *frame) +{ + ia_css_buffer_state_t buffer_state = IA_CSS_N_BUFFER_STATES; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_get_buffer_state(): enter:\n"); + + verifexit(frame != NULL); + buffer_state = frame->buffer_state; + +EXIT: + if (NULL == frame) { + IA_CSS_TRACE_0(PSYSAPI_DATA, WARNING, + "ia_css_frame_get_buffer_state invalid argument\n"); + } + return buffer_state; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_set_buffer_state( + ia_css_frame_t *frame, + const ia_css_buffer_state_t buffer_state) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_set_buffer_state(): enter:\n"); + + verifexit(frame != NULL); + frame->buffer_state = buffer_state; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_frame_set_buffer_state failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +ia_css_pointer_state_t ia_css_frame_get_pointer_state( + const ia_css_frame_t *frame) +{ + ia_css_pointer_state_t pointer_state = IA_CSS_N_POINTER_STATES; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_get_pointer_state(): enter:\n"); + + verifexit(frame != NULL); + pointer_state = frame->pointer_state; + +EXIT: + if (NULL == frame) { + IA_CSS_TRACE_0(PSYSAPI_DATA, WARNING, + "ia_css_frame_get_pointer_state invalid argument\n"); + } + return pointer_state; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_set_pointer_state( + ia_css_frame_t *frame, + const ia_css_pointer_state_t pointer_state) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_set_pointer_state(): enter:\n"); + + verifexit(frame != NULL); + frame->pointer_state = pointer_state; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_frame_set_pointer_state failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_descriptor_print( + const ia_css_frame_descriptor_t *frame_descriptor, + void *fid) +{ + int retval = -1; + int i; + uint8_t frame_plane_count; + + NOT_USED(fid); + + IA_CSS_TRACE_0(PSYSAPI_DATA, INFO, + "ia_css_frame_descriptor_print(): enter:\n"); + + COMPILATION_ERROR_IF(IA_CSS_N_DATA_DIMENSION <= 0); + + verifexit(frame_descriptor != NULL); + + IA_CSS_TRACE_0(PSYSAPI_DATA, INFO, + "ia_css_frame_descriptor_print(): enter:\n"); + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tframe_format_type = %d\n", + frame_descriptor->frame_format_type); + /* IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, "\tframe_format_type = %s\n", + * ia_css_frame_format_string(frame_descriptor->frame_format_type)); + */ + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tbpp = %d\n", frame_descriptor->bpp); + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tbpe = %d\n", frame_descriptor->bpe); + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tis_compressed = %d\n", frame_descriptor->is_compressed); + + frame_plane_count = IA_CSS_N_FRAME_PLANES; + /* frame_plane_count = + * ia_css_frame_plane_count(frame_descriptor->frame_format_type); + */ + + verifexit(frame_plane_count > 0); + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tplane_offsets[%d]: [\n", frame_plane_count); + for (i = 0; i < (int)frame_plane_count - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d,\n", frame_descriptor->plane_offsets[i]); + } + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d ]\n", frame_descriptor->plane_offsets[i]); + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tdimension[%d] = {\n", IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d,\n", frame_descriptor->dimension[i]); + } + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d }\n", frame_descriptor->dimension[i]); + + COMPILATION_ERROR_IF(0 > (IA_CSS_N_DATA_DIMENSION - 2)); + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tstride[%d] = {\n", IA_CSS_N_DATA_DIMENSION - 1); + i = 0; + if (IA_CSS_N_DATA_DIMENSION > 2) { + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 2; i++) { + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d,\n", frame_descriptor->stride[i]); + } + } + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d }\n", frame_descriptor->stride[i]); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_frame_descriptor_print failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_fragment_descriptor_print( + const ia_css_fragment_descriptor_t *fragment_descriptor, + void *fid) +{ + int retval = -1; + int i; + + NOT_USED(fid); + + IA_CSS_TRACE_0(PSYSAPI_DATA, INFO, + "ia_css_fragment_descriptor_print(): enter:\n"); + + verifexit(fragment_descriptor != NULL); + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "dimension[%d] = {\n", IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d,\n", fragment_descriptor->dimension[i]); + } + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d }\n", fragment_descriptor->dimension[i]); + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "index[%d] = {\n", IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d,\n", fragment_descriptor->index[i]); + } + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d }\n", fragment_descriptor->index[i]); + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "offset[%d] = {\n", IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d,\n", fragment_descriptor->offset[i]); + } + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, "\t%4d }\n", + fragment_descriptor->offset[i]); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_fragment_descriptor_print failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +ia_css_frame_format_bitmap_t ia_css_frame_format_bit_mask( + const ia_css_frame_format_type_t frame_format_type) +{ + ia_css_frame_format_bitmap_t bit_mask = 0; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_format_bit_mask(): enter:\n"); + + if ((frame_format_type < IA_CSS_N_FRAME_FORMAT_TYPES) && + (frame_format_type < IA_CSS_FRAME_FORMAT_BITMAP_BITS)) { + bit_mask = (ia_css_frame_format_bitmap_t)1 << frame_format_type; + } else { + IA_CSS_TRACE_0(PSYSAPI_DATA, WARNING, + "ia_css_frame_format_bit_mask invalid argument\n"); + } + + return bit_mask; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +ia_css_frame_format_bitmap_t ia_css_frame_format_bitmap_clear(void) +{ + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_format_bitmap_clear(): enter:\n"); + + return 0; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +size_t ia_css_sizeof_frame_descriptor( + const uint8_t plane_count) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_sizeof_frame_descriptor(): enter:\n"); + + verifexit(plane_count > 0); + size += sizeof(ia_css_frame_descriptor_t); + size += plane_count * sizeof(uint32_t); + +EXIT: + if (0 == plane_count) { + IA_CSS_TRACE_0(PSYSAPI_DATA, WARNING, + "ia_css_sizeof_frame_descriptor invalid argument\n"); + } + return size; +} + +#endif /* __IA_CSS_PROGRAM_GROUP_DATA_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/cnlB0/ia_css_psys_transport_dep.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/cnlB0/ia_css_psys_transport_dep.h new file mode 100644 index 0000000000000..7bb145c1b183b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/cnlB0/ia_css_psys_transport_dep.h @@ -0,0 +1,35 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TRANSPORT_DEP_H +#define __IA_CSS_PSYS_TRANSPORT_DEP_H + +/* + * The ID's of the Psys specific queues. + */ +typedef enum ia_css_psys_cmd_queues { + /**< The in-order queue for scheduled process groups */ + IA_CSS_PSYS_CMD_QUEUE_COMMAND_ID = 0, + /**< The in-order queue for commands changing psys or + * process group state + */ + IA_CSS_PSYS_CMD_QUEUE_DEVICE_ID, + /**< An in-order queue for dedicated PPG commands */ + IA_CSS_PSYS_CMD_QUEUE_PPG0_COMMAND_ID, + /**< An in-order queue for dedicated PPG commands */ + IA_CSS_PSYS_CMD_QUEUE_PPG1_COMMAND_ID, + IA_CSS_N_PSYS_CMD_QUEUE_ID +} ia_css_psys_cmd_queue_ID_t; + +#endif /* __IA_CSS_PSYS_TRANSPORT_DEP_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_device.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_device.h new file mode 100644 index 0000000000000..dc8fa531b11e3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_device.h @@ -0,0 +1,516 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_DEVICE_H +#define __IA_CSS_PSYS_DEVICE_H + +#include "ia_css_psys_init.h" +#include "ia_css_psys_transport.h" + +/*! \file */ + +/** @file ia_css_psys_device.h + * + * Define the interface to open the psys specific communication layer + * instance + */ + +#include /* vied_vaddress_t */ + +#include +#include + +#include +#include + +#define IA_CSS_PSYS_STATE_READY_PATTERN (0xF7F7F7F7) +#define IA_CSS_PSYS_STATE_RUNNING_PATTERN (0xE6E6E6E6) +#define IA_CSS_PSYS_STATE_STARTING_PATTERN (0xD5D5D5D5) +#define IA_CSS_PSYS_STATE_STARTED_PATTERN (0xC4C4C4C4) +#define IA_CSS_PSYS_STATE_INITIALIZING_PATTERN (0xB3B3B3B3) +#define IA_CSS_PSYS_STATE_INITIALIZED_PATTERN (0xA0A0A0A0) + +/* + * Defines the state of psys: + * - IA_CSS_PSYS_STATE_UNKNOWN = psys status is unknown (or not recognized) + * - IA_CSS_PSYS_STATE_INITIALING = some of the psys components are + * not initialized yet + * - IA_CSS_PSYS_STATE_INITIALIZED = psys components are initialized + * - IA_CSS_PSYS_STATE_STARTING = some of the psys components are initialized + * but not started yet + * - IA_CSS_PSYS_STATE_STARTED = psys components are started + * - IA_CSS_PSYS_STATE_RUNNING = some of the psys components are started + * but not ready yet + * - IA_CSS_PSYS_STATE_READY = psys is ready + * The state of psys can be obtained calling ia_css_psys_check_state() +*/ +typedef enum ia_css_psys_state { + IA_CSS_PSYS_STATE_UNKNOWN = 0, /**< psys state is unknown */ + /*< some of the psys components are not initialized yet*/ + IA_CSS_PSYS_STATE_INITIALIZING = IA_CSS_PSYS_STATE_INITIALIZING_PATTERN, + /**< psys components are initialized */ + IA_CSS_PSYS_STATE_INITIALIZED = IA_CSS_PSYS_STATE_INITIALIZED_PATTERN, + /**< some of the psys components are not started yet */ + IA_CSS_PSYS_STATE_STARTING = IA_CSS_PSYS_STATE_STARTING_PATTERN, + /**< psys components are started */ + IA_CSS_PSYS_STATE_STARTED = IA_CSS_PSYS_STATE_STARTED_PATTERN, + /**< some of the psys components are not ready yet */ + IA_CSS_PSYS_STATE_RUNNING = IA_CSS_PSYS_STATE_RUNNING_PATTERN, + /**< psys is ready */ + IA_CSS_PSYS_STATE_READY = IA_CSS_PSYS_STATE_READY_PATTERN, +} ia_css_psys_state_t; + +extern struct ia_css_syscom_context *psys_syscom; +#if HAS_DUAL_CMD_CTX_SUPPORT +extern struct ia_css_syscom_context *psys_syscom_secure; +#endif + +/*! Print the syscom creation descriptor to file/stream + + @param config[in] Psys syscom descriptor + @param fid[out] file/stream handle + + @return < 0 on error +*/ +extern int ia_css_psys_config_print( + const struct ia_css_syscom_config *config, void *fid); + +/*! Print the Psys syscom object to file/stream + + @param context[in] Psys syscom object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_psys_print( + const struct ia_css_syscom_context *context, void *fid); + +/*! Create the syscom creation descriptor + + @return NULL on error + */ +extern struct ia_css_syscom_config *ia_css_psys_specify(void); + +#if HAS_DUAL_CMD_CTX_SUPPORT +/*! Create the syscom creation descriptor for secure stream + + @param vtl0_addr_mask[in] VTL0 address mask that will be stored in 'secure' ctx + @return NULL on error + */ +extern struct ia_css_syscom_config *ia_css_psys_specify_secure(unsigned int vtl0_addr_mask); +#endif + +/*! Compute the size of storage required for allocating the Psys syscom object + + @param config[in] Psys syscom descriptor + + @return 0 on error + */ +extern size_t ia_css_sizeof_psys( + struct ia_css_syscom_config *config); + +#if HAS_DUAL_CMD_CTX_SUPPORT +/*! Open (and map the storage for) the Psys syscom object + This is the same as ia_css_psys_open() excluding server start. + Target for VTIO usage where multiple syscom objects need to be + created first before this API is invoked. + + @param buffer[in] storage buffers for the syscom object + in the kernel virtual memory space and + its Psys mapped version + @param config[in] Psys syscom descriptor + @return NULL on error + */ + +extern struct ia_css_syscom_context *ia_css_psys_context_create( + const struct ia_css_psys_buffer_s *buffer, + struct ia_css_syscom_config *config); + +/*! Store the parameters of the Psys syscom object in DMEM, so + they can be communicated with FW. This step needs to be invoked + after SPC starts in ia_css_psys_open(), so SPC DMEM access blocker + programming already takes effective. + + @param context[in] Psys syscom object + @param config[in] Psys syscom descriptor + @return 0 if successful + */ +extern int ia_css_psys_context_store_dmem( + struct ia_css_syscom_context *context, + struct ia_css_syscom_config *config); + +/*! Start PSYS Server. Psys syscom object must have been created already. + Target for VTIO usage where multiple syscom objects need to be + created first before this API is invoked. + @param config[in] Psys syscom descriptor + + @return true if psys open started successfully + */ +extern int ia_css_psys_open( + struct ia_css_syscom_config *config); +#else +/*! Open (and map the storage for) the Psys syscom object + + @param buffer[in] storage buffers for the syscom object + in the kernel virtual memory space and + its Psys mapped version + @param config[in] Psys syscom descriptor + + Precondition(1): The buffer must be large enough to hold the syscom object. + Its size must be computed with the function "ia_css_sizeof_psys()". + The buffer must be created in the kernel memory space. + + Precondition(2): If buffer == NULL, the storage allocations and mapping + is performed in this function. Config must hold the handle to the Psys + virtual memory space + + Postcondition: The context is initialised in the provided/created buffer. + The syscom context pointer is the kernel space handle to the syscom object + + @return NULL on error + */ +extern struct ia_css_syscom_context *ia_css_psys_open( + const struct ia_css_psys_buffer_s *buffer, + struct ia_css_syscom_config *config); +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ + +/*! completes the psys open procedure. Must be called multiple times + until it succeeds or driver determines the boot sequence has failed. + + @param context[in] Psys syscom object + + @return false if psys open has not completed successfully + */ +extern bool ia_css_psys_open_is_ready( + struct ia_css_syscom_context *context); + +#if HAS_DUAL_CMD_CTX_SUPPORT +/*! Request close of a PSYS context + * The functionatlity is the same as ia_css_psys_close() which closes PSYS syscom object. + * Counterpart of ia_css_psys_context_create() + * @param context[in]: Psys context + * @return NULL if close is successful context otherwise + */ +extern struct ia_css_syscom_context *ia_css_psys_context_destroy( + struct ia_css_syscom_context *context); + +/*! Request close of a PSYS device for VTIO case + * @param None + * @return 0 if successful + */ +extern int ia_css_psys_close(void); +#else +/*! Request close of a PSYS context + * @param context[in]: Psys context + * @return NULL if close is successful context otherwise + */ +extern struct ia_css_syscom_context *ia_css_psys_close( + struct ia_css_syscom_context *context); +#endif /* HAS_DUAL_CMD_CTX_SUPPORT*/ + +/*! Unmap and free the storage of the PSYS context + * @param context[in] Psys context + * @param force[in] Force release even if device is busy + * @return 0 if release is successful + * EINVAL if context is invalid + * EBUSY if device is not yet idle, and force==0 + */ +extern int ia_css_psys_release( + struct ia_css_syscom_context *context, + bool force); + +/*! Checks the state of the Psys syscom object + + @param context[in] Psys syscom object + + @return State of the syscom object + */ +extern ia_css_psys_state_t ia_css_psys_check_state( + struct ia_css_syscom_context *context); + +/*!Indicate if the designated cmd queue in the Psys syscom object is full + + @param context[in] Psys syscom object + @param id[in] Psys syscom cmd queue ID + + @return false if the cmd queue is not full or on error + */ + +extern bool ia_css_is_psys_cmd_queue_full( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id); + +/*!Indicate if the designated cmd queue in the Psys syscom object is notfull + + @param context[in] Psys syscom object + @param id[in] Psys syscom cmd queue ID + + @return false if the cmd queue is full on error + */ +extern bool ia_css_is_psys_cmd_queue_not_full( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id); + +/*!Indicate if the designated cmd queue in the Psys syscom object holds N space + + @param context[in] Psys syscom object + @param id[in] Psys syscom cmd queue ID + @param N[in] Number of messages + + @return false if the cmd queue space is unavailable or on error + */ +extern bool ia_css_has_psys_cmd_queue_N_space( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id, + const unsigned int N); + +/*!Return the free space count in the designated cmd queue in the + * Psys syscom object + + @param context[in] Psys syscom object + @param id[in] Psys syscom cmd queue ID + + @return the space, < 0 on error + */ +extern int ia_css_psys_cmd_queue_get_available_space( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id); + +/*!Indicate if there are any messages pending in the Psys syscom + * object event queues + + @param context[in] Psys syscom object + + @return false if there are no messages or on error + */ +extern bool ia_css_any_psys_event_queue_not_empty( + struct ia_css_syscom_context *context); + +/*!Indicate if the designated event queue in the Psys syscom object is empty + + @param context[in] Psys syscom object + @param id[in] Psys syscom event queue ID + + @return false if the event queue is not empty or on error + */ +extern bool ia_css_is_psys_event_queue_empty( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id); + +/*!Indicate if the designated event queue in the Psys syscom object is not empty + + @param context[in] Psys syscom object + @param id[in] Psys syscom event queue ID + + @return false if the receive queue is empty or on error + */ +extern bool ia_css_is_psys_event_queue_not_empty( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id); + +/*!Indicate if the designated event queue + * in the Psys syscom object holds N items + + @param context[in] Psys syscom object + @param id[in] Psys syscom event queue ID + @param N[in] Number of messages + + @return false if the event queue has insufficient messages + available or on error +*/ +extern bool ia_css_has_psys_event_queue_N_msgs( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id, + const unsigned int N); + +/*!Return the message count in the designated event queue in the + * Psys syscom object + + @param context[in] Psys syscom object + @param id[in] Psys syscom event queue ID + + @return the messages, < 0 on error + */ +extern int ia_css_psys_event_queue_get_available_msgs( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id); + +/*! Send (pass by value) a command on a queue in the Psys syscom object + + @param context[in] Psys syscom object + @param id[in] Psys syscom cmd queue ID +@param cmd_msg_buffer[in] pointer to the command message buffer + +Precondition: The command message buffer must be large enough + to hold the command + +Postcondition: Either 0 or 1 commands have been sent + +Note: The message size is fixed and determined on creation + + @return the number of sent commands (1), <= 0 on error + */ +extern int ia_css_psys_cmd_queue_send( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id, + const void *cmd_msg_buffer); + +/*! Send (pass by value) N commands on a queue in the Psys syscom object + + @param context[in] Psys syscom object + @param id[in] Psys syscom cmd queue ID + @param cmd_msg_buffer[in] Pointer to the command message buffer +@param N[in] Number of commands + +Precondition: The command message buffer must be large enough + to hold the commands + +Postcondition: Either 0 or up to and including N commands have been sent + + Note: The message size is fixed and determined on creation + + @return the number of sent commands, <= 0 on error + */ +extern int ia_css_psys_cmd_queue_send_N( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id, + const void *cmd_msg_buffer, + const unsigned int N); + +/*! Receive (pass by value) an event from an event queue in the + * Psys syscom object + + @param context[in] Psys syscom object + @param id[in] Psys syscom event queue ID + @param event_msg_buffer[out] pointer to the event message buffer + + Precondition: The event message buffer must be large enough to hold the event + + Postcondition: Either 0 or 1 events have been received + + Note: The event size is fixed and determined on creation + + @return the number of received events (1), <= 0 on error + */ +extern int ia_css_psys_event_queue_receive( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id, + void *event_msg_buffer); + +/*! Receive (pass by value) N events from an event queue in the + * Psys syscom object + + @param context[in] Psys syscom object + @param id[in] Psys syscom event queue ID + @param event_msg_buffer[out] pointer to the event message buffer + @param N[in] Number of events + + Precondition: The event buffer must be large enough to hold the events + + Postcondition: Either 0 or up to and including N events have been received + + Note: The message size is fixed and determined on creation + + @return the number of received event messages, <= 0 on error + */ +extern int ia_css_psys_event_queue_receive_N( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id, + void *event_msg_buffer, + const unsigned int N); + + +/* + * Access functions to query the object stats + */ + + +/*!Return the size of the Psys syscom object + + @param context[in] Psys syscom object + + @return 0 on error + */ +extern size_t ia_css_psys_get_size( + const struct ia_css_syscom_context *context); + +/*!Return the number of cmd queues in the Psys syscom object + + @param context[in] Psys syscom object + + @return 0 on error + */ +extern unsigned int ia_css_psys_get_cmd_queue_count( + const struct ia_css_syscom_context *context); + +/*!Return the number of event queues in the Psys syscom object + + @param context[in] Psys syscom object + + @return 0 on error + */ +extern unsigned int ia_css_psys_get_event_queue_count( + const struct ia_css_syscom_context *context); + +/*!Return the size of the indicated Psys command queue + + @param context[in] Psys syscom object + @param id[in] Psys syscom cmd queue ID + + Note: The queue size is expressed in the number of fields + + @return 0 on error + */ +extern size_t ia_css_psys_get_cmd_queue_size( + const struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id); + +/*!Return the size of the indicated Psys event queue + + @param context[in] Psys syscom object + @param id[in] Psys syscom event queue ID + + Note: The queue size is expressed in the number of fields + + @return 0 on error + */ +extern size_t ia_css_psys_get_event_queue_size( + const struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id); + +/*!Return the command message size of the indicated Psys command queue + + @param context[in] Psys syscom object + + Note: The message size is expressed in uint8_t + + @return 0 on error + */ +extern size_t ia_css_psys_get_cmd_msg_size( + const struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id); + +/*!Return the event message size of the indicated Psys event queue + + @param context[in] Psys syscom object + + Note: The message size is expressed in uint8_t + + @return 0 on error + */ +extern size_t ia_css_psys_get_event_msg_size( + const struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id); + +#endif /* __IA_CSS_PSYS_DEVICE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_device_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_device_trace.h new file mode 100644 index 0000000000000..8e5899bc66dba --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_device_trace.h @@ -0,0 +1,103 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_DEVICE_TRACE_H +#define __IA_CSS_PSYS_DEVICE_TRACE_H + +#include "ia_css_psysapi_trace.h" + +#define PSYS_DEVICE_TRACE_LEVEL_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +/* Default sub-module tracing config */ +#if (!defined(PSYSAPI_DEVICE_TRACING_OVERRIDE)) + #define PSYS_DEVICE_TRACE_LEVEL_CONFIG \ + PSYS_DEVICE_TRACE_LEVEL_CONFIG_DEFAULT +#endif + +/* Module/sub-module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_DEVICE_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_DEVICE_TRACING_OVERRIDE)) + /* Module/sub-module specific trace setting */ + #if PSYSAPI_DEVICE_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_DEVICE_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DEVICE_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_DEVICE_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_DEVICE_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DEVICE_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_DEVICE_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_DEVICE_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DEVICE_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_DATA Tracing level defined" + #endif +#else + /* Inherit Module trace setting */ + #define PSYSAPI_DEVICE_TRACE_METHOD \ + PSYSAPI_TRACE_METHOD + #define PSYSAPI_DEVICE_TRACE_LEVEL_ASSERT \ + PSYSAPI_TRACE_LEVEL_ASSERT + #define PSYSAPI_DEVICE_TRACE_LEVEL_ERROR \ + PSYSAPI_TRACE_LEVEL_ERROR + #define PSYSAPI_DEVICE_TRACE_LEVEL_WARNING \ + PSYSAPI_TRACE_LEVEL_WARNING + #define PSYSAPI_DEVICE_TRACE_LEVEL_INFO \ + PSYSAPI_TRACE_LEVEL_INFO + #define PSYSAPI_DEVICE_TRACE_LEVEL_DEBUG \ + PSYSAPI_TRACE_LEVEL_DEBUG + #define PSYSAPI_DEVICE_TRACE_LEVEL_VERBOSE \ + PSYSAPI_TRACE_LEVEL_VERBOSE +#endif + +#endif /* __IA_CSS_PSYSAPI_DEVICE_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_init.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_init.h new file mode 100644 index 0000000000000..1120b357632cf --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_init.h @@ -0,0 +1,37 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_INIT_H +#define __IA_CSS_PSYS_INIT_H + +#include /* vied_vaddress_t */ + +/* Init parameters passed to the fw on device open (non secure mode) */ +typedef struct ia_css_psys_server_init { + /* These members are used in PSS only and will be removed */ + /* Shared memory host address of pkg dir */ + unsigned long long host_ddr_pkg_dir; + /* Address of pkg_dir structure in DDR */ + vied_vaddress_t ddr_pkg_dir_address; + /* Size of Package dir in DDR */ + uint32_t pkg_dir_size; + + /* Prefetch configiration */ + /* enable prefetching on SPC, SPP0 and SPP1 */ + uint32_t icache_prefetch_sp; + /* enable prefetching on ISP0..N */ + uint32_t icache_prefetch_isp; +} ia_css_psys_server_init_t; + +#endif /* __IA_CSS_PSYS_INIT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_transport.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_transport.h new file mode 100644 index 0000000000000..e0d1e935c2211 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_transport.h @@ -0,0 +1,92 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TRANSPORT_H +#define __IA_CSS_PSYS_TRANSPORT_H + +#include /* ia_css_psys_cmd_queues */ +#include /* vied_vaddress_t */ + +#include + +typedef enum ia_css_psys_event_queues { + /**< The in-order queue for event returns */ + IA_CSS_PSYS_EVENT_QUEUE_MAIN_ID, + IA_CSS_N_PSYS_EVENT_QUEUE_ID +} ia_css_psys_event_queue_ID_t; + +typedef enum ia_css_psys_event_types { + /**< No error to report. */ + IA_CSS_PSYS_EVENT_TYPE_SUCCESS = 0, + /**< Unknown unhandled error */ + IA_CSS_PSYS_EVENT_TYPE_UNKNOWN_ERROR = 1, + /* Retrieving remote object: */ + /**< Object ID not found */ + IA_CSS_PSYS_EVENT_TYPE_RET_REM_OBJ_NOT_FOUND = 2, + /**< Objects too big, or size is zero. */ + IA_CSS_PSYS_EVENT_TYPE_RET_REM_OBJ_TOO_BIG = 3, + /**< Failed to load whole process group from tproxy/dma */ + IA_CSS_PSYS_EVENT_TYPE_RET_REM_OBJ_DDR_TRANS_ERR = 4, + /**< The proper package could not be found */ + IA_CSS_PSYS_EVENT_TYPE_RET_REM_OBJ_NULL_PKG_DIR_ADDR = 5, + /* Process group: */ + /**< Failed to run, error while loading frame */ + IA_CSS_PSYS_EVENT_TYPE_PROC_GRP_LOAD_FRAME_ERR = 6, + /**< Failed to run, error while loading fragment */ + IA_CSS_PSYS_EVENT_TYPE_PROC_GRP_LOAD_FRAGMENT_ERR = 7, + /**< The process count of the process group is zero */ + IA_CSS_PSYS_EVENT_TYPE_PROC_GRP_PROCESS_COUNT_ZERO = 8, + /**< Process(es) initialization */ + IA_CSS_PSYS_EVENT_TYPE_PROC_GRP_PROCESS_INIT_ERR = 9, + /**< Aborted (after host request) */ + IA_CSS_PSYS_EVENT_TYPE_PROC_GRP_ABORT = 10, + /**< NULL pointer in the process group */ + IA_CSS_PSYS_EVENT_TYPE_PROC_GRP_NULL = 11, + /**< Process group validation failed */ + IA_CSS_PSYS_EVENT_TYPE_PROC_GRP_VALIDATION_ERR = 12 +} ia_css_psys_event_type_t; + +#define IA_CSS_PSYS_CMD_BITS 64 +struct ia_css_psys_cmd_s { + /**< The command issued to the process group */ + uint16_t command; + /**< Message field of the command */ + uint16_t msg; + /**< The context reference (process group/buffer set/...) */ + uint32_t context_handle; +}; + +#define IA_CSS_PSYS_EVENT_BITS 128 +struct ia_css_psys_event_s { + /**< The (return) status of the command issued to + * the process group this event refers to + */ + uint16_t status; + /**< The command issued to the process group this event refers to */ + uint16_t command; + /**< The context reference (process group/buffer set/...) */ + uint32_t context_handle; + /**< This token (size) must match the token registered + * in a process group + */ + uint64_t token; +}; + +struct ia_css_psys_buffer_s { + /**< The in-order queue for scheduled process groups */ + void *host_buffer; + vied_vaddress_t *isp_buffer; +}; + +#endif /* __IA_CSS_PSYS_TRANSPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/src/ia_css_psys_device.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/src/ia_css_psys_device.c new file mode 100644 index 0000000000000..106fe0a0da859 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/src/ia_css_psys_device.c @@ -0,0 +1,853 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include "ia_css_psys_device.h" +#include "ia_css_psys_device_trace.h" +#include "ia_css_psys_init.h" +#include "regmem_access.h" + +#include +#include +#include + +#include "ia_css_cell.h" + +#define IA_CSS_PSYS_CMD_QUEUE_SIZE 0x20 +#define IA_CSS_PSYS_EVENT_QUEUE_SIZE 0x40 + +static struct ia_css_syscom_queue_config ia_css_psys_cmd_queue_cfg[IA_CSS_N_PSYS_CMD_QUEUE_ID]; + +static struct ia_css_syscom_queue_config + ia_css_psys_event_queue_cfg[IA_CSS_N_PSYS_EVENT_QUEUE_ID] = { + {IA_CSS_PSYS_EVENT_QUEUE_SIZE, IA_CSS_PSYS_EVENT_BITS/8}, +}; + +static struct ia_css_syscom_config psys_syscom_config; +struct ia_css_syscom_context *psys_syscom; +#if HAS_DUAL_CMD_CTX_SUPPORT +static struct ia_css_syscom_config psys_syscom_config_secure; +struct ia_css_syscom_context *psys_syscom_secure; +#endif +static bool external_alloc = true; + +int ia_css_psys_config_print( + const struct ia_css_syscom_config *config, + void *fh) +{ + int retval = -1; + + NOT_USED(fh); + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "ia_css_frame_print(): enter:\n"); + + verifexit(config != NULL); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DEVICE, ERROR, + "ia_css_frame_print failed (%i)\n", retval); + } + return retval; +} + +int ia_css_psys_print( + const struct ia_css_syscom_context *context, + void *fh) +{ + int retval = -1; + + NOT_USED(fh); + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "ia_css_psys_print(): enter:\n"); + + verifexit(context != NULL); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_print failed (%i)\n", retval); + } + return retval; +} + +static void set_syscom_config(struct ia_css_syscom_config *config) +{ + int i; + config->num_input_queues = IA_CSS_N_PSYS_CMD_QUEUE_ID; + config->num_output_queues = IA_CSS_N_PSYS_EVENT_QUEUE_ID; + /* The number of queues are different for different platforms + * so the array is initialized here + */ + for (i = 0; i < IA_CSS_N_PSYS_CMD_QUEUE_ID; i++) { + ia_css_psys_cmd_queue_cfg[i].queue_size = IA_CSS_PSYS_CMD_QUEUE_SIZE; + ia_css_psys_cmd_queue_cfg[i].token_size = IA_CSS_PSYS_CMD_BITS/8; + } + config->input = ia_css_psys_cmd_queue_cfg; + config->output = ia_css_psys_event_queue_cfg; + config->vtl0_addr_mask = 0; +} + +struct ia_css_syscom_config *ia_css_psys_specify(void) +{ + struct ia_css_syscom_config *config = &psys_syscom_config; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "ia_css_psys_specify(): enter:\n"); + set_syscom_config(config); + config->secure = false; + + return config; +} + +#if HAS_DUAL_CMD_CTX_SUPPORT +struct ia_css_syscom_config *ia_css_psys_specify_secure(unsigned int vtl0_addr_mask) +{ + struct ia_css_syscom_config *config = &psys_syscom_config_secure; + + IA_CSS_TRACE_1(PSYSAPI_DEVICE, INFO, "ia_css_psys_specify_secure(mask %#x): enter:\n", vtl0_addr_mask); + set_syscom_config(config); + config->secure = true; + config->vtl0_addr_mask = vtl0_addr_mask; + return config; +} +#endif + +size_t ia_css_sizeof_psys( + struct ia_css_syscom_config *config) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_sizeof_psys(): enter:\n"); + + NOT_USED(config); + + return size; +} + +/* Internal function to create syscom_context */ +static struct ia_css_syscom_context *psys_context_create( + const struct ia_css_psys_buffer_s *buffer, + struct ia_css_syscom_config *config) +{ + struct ia_css_syscom_context *context; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "psys_context_create(): enter:\n"); + + if (config == NULL) + goto EXIT; + + if (buffer == NULL) { + /* Allocate locally */ + external_alloc = false; + } + + /* + * Here we would like to pass separately the sub-system ID + * and optionally the user pointer to be mapped, depending on + * where this open is called, and which virtual memory handles + * we see here. + */ + /* context = ia_css_syscom_open(get_virtual_memory_handle(vied_psys_ID), + * buffer, config); + */ + context = ia_css_syscom_open(config, NULL); + if (context == NULL) + goto EXIT; + + return context; + +EXIT: + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, "psys_context_create failed\n"); + return NULL; +} + +#if HAS_DUAL_CMD_CTX_SUPPORT +struct ia_css_syscom_context *ia_css_psys_context_create( + const struct ia_css_psys_buffer_s *buffer, + struct ia_css_syscom_config *config) +{ + return psys_context_create(buffer, config); +} + +/* push context information to DMEM for FW to access */ +int ia_css_psys_context_store_dmem( + struct ia_css_syscom_context *context, + struct ia_css_syscom_config *config) +{ + return ia_css_syscom_store_dmem(context, config->ssid, config->vtl0_addr_mask); +} +#endif + +/* Internal function to start psys server */ +static int psys_start_server( + struct ia_css_syscom_config *config) +{ + ia_css_psys_server_init_t *server_config; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "psys_start_server(): enter:\n"); + + /* Configure SPC icache prefetching and start SPC */ + server_config = (ia_css_psys_server_init_t *)config->specific_addr; + IA_CSS_TRACE_1(PSYSAPI_DEVICE, INFO, "SPC prefetch: %d\n", + server_config->icache_prefetch_sp); + ia_css_cell_start_prefetch(config->ssid, SPC0, + server_config->icache_prefetch_sp); + return 0; +} + +#if HAS_DUAL_CMD_CTX_SUPPORT +int ia_css_psys_open( + struct ia_css_syscom_config *config) +{ + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "ia_css_psys_open(): enter:\n"); + return psys_start_server(config); +} +#else +struct ia_css_syscom_context *ia_css_psys_open( + const struct ia_css_psys_buffer_s *buffer, + struct ia_css_syscom_config *config) +{ + struct ia_css_syscom_context *context; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "ia_css_psys_open(): enter:\n"); + + context = psys_context_create(buffer, config); + + /* Configure SPC icache prefetching and start SPC */ + psys_start_server(config); + + return context; +} +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ + +bool ia_css_psys_open_is_ready( + struct ia_css_syscom_context *context) +{ + int retval = -1; + bool ready = 0; + unsigned int i; + int syscom_retval; + + verifexit(context != NULL); + + for (i = 0; i < IA_CSS_N_PSYS_CMD_QUEUE_ID; i++) { + syscom_retval = ia_css_syscom_send_port_open(context, i); + if (syscom_retval != 0) { + if (syscom_retval == FW_ERROR_BUSY) { + /* Do not print error */ + retval = 0; + } + /* Not ready yet */ + goto EXIT; + } + } + + for (i = 0; i < IA_CSS_N_PSYS_EVENT_QUEUE_ID; i++) { + syscom_retval = ia_css_syscom_recv_port_open(context, i); + if (syscom_retval != 0) { + if (syscom_retval == FW_ERROR_BUSY) { + /* Do not print error */ + retval = 0; + } + /* Not ready yet */ + goto EXIT; + } + } + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, + "ia_css_psys_open_is_ready(): complete:\n"); + + /* If this point reached, do not print error */ + retval = 0; + /* If this point reached, ready */ + ready = 1; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_open_is_ready failed\n"); + } + return ready; +} + +/* Internal function to close syscom_context */ +static struct ia_css_syscom_context *psys_context_destroy( + struct ia_css_syscom_context *context) +{ + /* Success: return NULL, Error: return context pointer value + * Intention is to change return type to int (errno), + * see commented values. + */ + + unsigned int i; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "psys_context_destroy(): enter:\n"); + + /* NULL pointer check disabled, since there is no proper return value */ + + for (i = 0; i < IA_CSS_N_PSYS_CMD_QUEUE_ID; i++) { + if (ia_css_syscom_send_port_close(context, i) != 0) + return context; /* EINVAL */ + } + + for (i = 0; i < IA_CSS_N_PSYS_EVENT_QUEUE_ID; i++) { + if (ia_css_syscom_recv_port_close(context, i) != 0) + return context; /* EINVAL */ + } + + /* request device close */ + if (ia_css_syscom_close(context) != 0) + return context; /* EBUSY */ + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, + "psys_context_destroy(): leave: OK\n"); + return NULL; +} + +#if HAS_DUAL_CMD_CTX_SUPPORT +struct ia_css_syscom_context *ia_css_psys_context_destroy( + struct ia_css_syscom_context *context) +{ + return psys_context_destroy(context); +} + +int ia_css_psys_close() +{ + /* Intentionally left blank for now since syscom objects should have + * been destroyed already by prior ia_css_psys_context_destroy() calls. + */ + return 0; +} +#else +struct ia_css_syscom_context *ia_css_psys_close( + struct ia_css_syscom_context *context) +{ + return psys_context_destroy(context); +} +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ + +int ia_css_psys_release( + struct ia_css_syscom_context *context, + bool force) +{ + if (context == NULL) + return -EFAULT; + + /* try to free resources */ + if (ia_css_syscom_release(context, force) != 0) + return -EBUSY; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, + "ia_css_psys_release(): leave: OK\n"); + return 0; +} + +ia_css_psys_state_t ia_css_psys_check_state( + struct ia_css_syscom_context *context) +{ + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_check_state(): enter:\n"); + + NOT_USED(context); + + /* For the time being, return the READY state to be used by SPC test */ + return IA_CSS_PSYS_STATE_READY; +} + +bool ia_css_is_psys_cmd_queue_full( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id) +{ + bool is_full = false; + int num_tokens; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_is_psys_cmd_queue_full(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_send_port_available(context, + (unsigned int)id); + verifexit(num_tokens >= 0); + + is_full = (num_tokens == 0); + retval = 0; +EXIT: + if (retval != 0) { + is_full = true; + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_is_psys_cmd_queue_full failed\n"); + } + return is_full; +} + +bool ia_css_is_psys_cmd_queue_not_full( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id) +{ + bool is_not_full = false; + int num_tokens; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_is_psys_cmd_queue_not_full(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_send_port_available(context, + (unsigned int)id); + verifexit(num_tokens >= 0); + + is_not_full = (num_tokens != 0); + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_is_psys_cmd_queue_not_full failed\n"); + } + return is_not_full; +} + +bool ia_css_has_psys_cmd_queue_N_space( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id, + const unsigned int N) +{ + bool has_N_space = false; + int num_tokens; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_has_psys_cmd_queue_N_space(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_send_port_available(context, + (unsigned int)id); + verifexit(num_tokens >= 0); + + has_N_space = ((unsigned int)num_tokens >= N); +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_has_psys_cmd_queue_N_space failed\n"); + } + return has_N_space; +} + +int ia_css_psys_cmd_queue_get_available_space( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id) +{ + int N_space = -1; + int num_tokens; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_cmd_queue_get_available_space(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_send_port_available(context, + (unsigned int)id); + verifexit(num_tokens >= 0); + + N_space = (int)(num_tokens); +EXIT: + if (N_space < 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_cmd_queue_get_available_space failed\n"); + } + return N_space; +} + +bool ia_css_any_psys_event_queue_not_empty( + struct ia_css_syscom_context *context) +{ + ia_css_psys_event_queue_ID_t i; + bool any_msg = false; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_any_psys_event_queue_not_empty(): enter:\n"); + verifexit(context != NULL); + + for (i = (ia_css_psys_event_queue_ID_t)0; + i < IA_CSS_N_PSYS_EVENT_QUEUE_ID; i++) { + any_msg = + any_msg || ia_css_is_psys_event_queue_not_empty(context, i); + } + +EXIT: + return any_msg; +} + +bool ia_css_is_psys_event_queue_empty( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id) +{ + bool is_empty = false; + int num_tokens; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_is_psys_event_queue_empty(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_recv_port_available(context, (unsigned int)id); + verifexit(num_tokens >= 0); + + is_empty = (num_tokens == 0); + retval = 0; +EXIT: + if (retval != 0) { + is_empty = true; + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_is_psys_event_queue_empty failed\n"); + } + return is_empty; +} + +bool ia_css_is_psys_event_queue_not_empty( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id) +{ + bool is_not_empty = false; + int num_tokens; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_is_psys_event_queue_not_empty(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_recv_port_available(context, + (unsigned int)id); + verifexit(num_tokens >= 0); + + is_not_empty = (num_tokens != 0); + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_is_psys_event_queue_not_empty failed\n"); + } + return is_not_empty; +} + +bool ia_css_has_psys_event_queue_N_msgs( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id, + const unsigned int N) +{ + bool has_N_msgs = false; + int num_tokens; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_has_psys_event_queue_N_msgs(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_recv_port_available(context, + (unsigned int)id); + verifexit(num_tokens >= 0); + + has_N_msgs = ((unsigned int)num_tokens >= N); + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_has_psys_event_queue_N_msgs failed\n"); + } + return has_N_msgs; +} + +int ia_css_psys_event_queue_get_available_msgs( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id) +{ + int N_msgs = -1; + int num_tokens; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_event_queue_get_available_msgs(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_recv_port_available(context, + (unsigned int)id); + verifexit(num_tokens >= 0); + + N_msgs = (int)(num_tokens); +EXIT: + if (N_msgs < 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_event_queue_get_available_msgs failed\n"); + } + return N_msgs; +} + +int ia_css_psys_cmd_queue_send( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id, + const void *cmd_msg_buffer) +{ + int count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_cmd_queue_send(): enter:\n"); + verifexit(context != NULL); + + verifexit(context != NULL); + /* The ~full check fails on receive queues */ + verifexit(ia_css_is_psys_cmd_queue_not_full(context, id)); + verifexit(cmd_msg_buffer != NULL); + + verifexit(ia_css_syscom_send_port_transfer(context, (unsigned int)id, + cmd_msg_buffer) >= 0); + + count = 1; +EXIT: + if (count == 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_cmd_queue_send failed\n"); + } + return count; +} + +int ia_css_psys_cmd_queue_send_N( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id, + const void *cmd_msg_buffer, + const unsigned int N) +{ + struct ia_css_psys_cmd_s *cmd_msg_buffer_loc = + (struct ia_css_psys_cmd_s *)cmd_msg_buffer; + int count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_cmd_queue_send_N(): enter:\n"); + verifexit(context != NULL); + + for (count = 0; count < (int)N; count++) { + int count_loc = ia_css_psys_cmd_queue_send(context, id, + (void *)(&cmd_msg_buffer_loc[count])); + + verifexit(count_loc == 1); + } + +EXIT: + if ((unsigned int) count < N) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_cmd_queue_send_N failed\n"); + } + return count; +} + +int ia_css_psys_event_queue_receive( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id, + void *event_msg_buffer) +{ + int count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_event_queue_receive(): enter:\n"); + + verifexit(context != NULL); + /* The ~empty check fails on send queues */ + verifexit(ia_css_is_psys_event_queue_not_empty(context, id)); + verifexit(event_msg_buffer != NULL); + + verifexit(ia_css_syscom_recv_port_transfer(context, (unsigned int)id, + event_msg_buffer) >= 0); + + count = 1; +EXIT: + if (count == 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_event_queue_receive failed\n"); + } + return count; +} + +int ia_css_psys_event_queue_receive_N( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id, + void *event_msg_buffer, + const unsigned int N) +{ + struct ia_css_psys_event_s *event_msg_buffer_loc; + int count; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_event_queue_receive_N(): enter:\n"); + + event_msg_buffer_loc = (struct ia_css_psys_event_s *)event_msg_buffer; + + for (count = 0; count < (int)N; count++) { + int count_loc = ia_css_psys_event_queue_receive(context, id, + (void *)(&event_msg_buffer_loc[count])); + + verifexit(count_loc == 1); + } + +EXIT: + if ((unsigned int) count < N) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_event_queue_receive_N failed\n"); + } + return count; +} + +size_t ia_css_psys_get_size( + const struct ia_css_syscom_context *context) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_get_size(): enter:\n"); + + verifexit(context != NULL); + /* How can I query the context ? */ +EXIT: + if (size == 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_get_size failed\n"); + } + return size; +} + +unsigned int ia_css_psys_get_cmd_queue_count( + const struct ia_css_syscom_context *context) +{ + unsigned int count = 0; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_get_cmd_queue_count(): enter:\n"); + + verifexit(context != NULL); + /* How can I query the context ? */ + NOT_USED(context); + count = (unsigned int)IA_CSS_N_PSYS_CMD_QUEUE_ID; + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_get_cmd_queue_count failed\n"); + } + return count; +} + +unsigned int ia_css_psys_get_event_queue_count( + const struct ia_css_syscom_context *context) +{ + unsigned int count = 0; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_get_event_queue_count(): enter:\n"); + + verifexit(context != NULL); + /* How can I query the context ? */ + NOT_USED(context); + count = (unsigned int)IA_CSS_N_PSYS_EVENT_QUEUE_ID; + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_get_event_queue_count failed\n"); + } + return count; +} + +size_t ia_css_psys_get_cmd_queue_size( + const struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id) +{ + size_t queue_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_get_cmd_queue_size(): enter:\n"); + + verifexit(context != NULL); + /* How can I query the context ? */ + NOT_USED(context); + queue_size = ia_css_psys_cmd_queue_cfg[id].queue_size; +EXIT: + if (queue_size == 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_get_cmd_queue_size failed\n"); + } + return queue_size; +} + +size_t ia_css_psys_get_event_queue_size( + const struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id) +{ + size_t queue_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_get_event_queue_size(): enter:\n"); + + verifexit(context != NULL); + /* How can I query the context ? */ + NOT_USED(context); + queue_size = ia_css_psys_event_queue_cfg[id].queue_size; +EXIT: + if (queue_size == 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_get_event_queue_size failed\n"); + } + return queue_size; +} + +size_t ia_css_psys_get_cmd_msg_size( + const struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id) +{ + size_t msg_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_get_cmd_msg_size(): enter:\n"); + + verifexit(context != NULL); + /* How can I query the context ? */ + NOT_USED(context); + msg_size = ia_css_psys_cmd_queue_cfg[id].token_size; +EXIT: + if (msg_size == 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_get_cmd_msg_size failed\n"); + } + return msg_size; +} + +size_t ia_css_psys_get_event_msg_size( + const struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id) +{ + size_t msg_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_get_event_msg_size(): enter:\n"); + + verifexit(context != NULL); + /* How can I query the context ? */ + NOT_USED(context); + msg_size = ia_css_psys_event_queue_cfg[id].token_size; +EXIT: + if (msg_size == 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_get_cmd_msg_size failed\n"); + } + return msg_size; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_buffer_set.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_buffer_set.h new file mode 100644 index 0000000000000..392b4359353f4 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_buffer_set.h @@ -0,0 +1,174 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IA_CSS_PSYS_BUFFER_SET_H +#define __IA_CSS_PSYS_BUFFER_SET_H + +#include "ia_css_base_types.h" +#include "ia_css_psys_dynamic_storage_class.h" +#include "ia_css_psys_process_types.h" +#include "ia_css_terminal_types.h" + +#define N_UINT64_IN_BUFFER_SET_STRUCT 1 +#define N_UINT16_IN_BUFFER_SET_STRUCT 1 +#define N_UINT8_IN_BUFFER_SET_STRUCT 1 +#define N_PADDING_UINT8_IN_BUFFER_SET_STRUCT 5 +#define SIZE_OF_BUFFER_SET \ + (N_UINT64_IN_BUFFER_SET_STRUCT * IA_CSS_UINT64_T_BITS \ + + VIED_VADDRESS_BITS \ + + VIED_VADDRESS_BITS \ + + N_UINT16_IN_BUFFER_SET_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_UINT8_IN_BUFFER_SET_STRUCT * IA_CSS_UINT8_T_BITS \ + + N_PADDING_UINT8_IN_BUFFER_SET_STRUCT * IA_CSS_UINT8_T_BITS) + +typedef struct ia_css_buffer_set_s ia_css_buffer_set_t; + +struct ia_css_buffer_set_s { + /* Token for user context reference */ + uint64_t token; + /* IPU virtual address of this buffer set */ + vied_vaddress_t ipu_virtual_address; + /* IPU virtual address of the process group corresponding to this buffer set */ + vied_vaddress_t process_group_handle; + /* Number of terminal buffer addresses in this structure */ + uint16_t terminal_count; + /* Frame id to associate with this buffer set */ + uint8_t frame_counter; + /* Padding for 64bit alignment */ + uint8_t padding[N_PADDING_UINT8_IN_BUFFER_SET_STRUCT]; +}; + + +/*! Construct a buffer set object at specified location + + @param buffer_set_mem[in] memory location to create buffer set object + @param process_group[in] process group corresponding to this buffer set + @param frame_counter[in] frame number for this buffer set object + + @return pointer to buffer set object on success, NULL on error + */ +ia_css_buffer_set_t *ia_css_buffer_set_create( + void *buffer_set_mem, + const ia_css_process_group_t *process_group, + const unsigned int frame_counter); + +/*! Compute size (in bytes) required for full buffer set object + + @param process_group[in] process group corresponding to this buffer set + + @return size in bytes of buffer set object on success, 0 on error + */ +size_t ia_css_sizeof_buffer_set( + const ia_css_process_group_t *process_group); + +/*! Set a buffer address in a buffer set object + + @param buffer_set[in] buffer set object to set buffer in + @param terminal_index[in] terminal index to use as a reference between + buffer and terminal + @param buffer[in] buffer address to store + + @return 0 on success, -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_buffer_set_set_buffer( + ia_css_buffer_set_t *buffer_set, + const unsigned int terminal_index, + const vied_vaddress_t buffer); + +/*! Get virtual buffer address from a buffer set object and terminal object by + resolving the index used + + @param buffer_set[in] buffer set object to get buffer from + @param terminal[in] terminal object to get buffer of + + @return virtual buffer address on success, VIED_NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_vaddress_t ia_css_buffer_set_get_buffer( + const ia_css_buffer_set_t *buffer_set, + const ia_css_terminal_t *terminal); + +/*! Set ipu virtual address of a buffer set object within the buffer set object + + @param buffer_set[in] buffer set object to set ipu address in + @param ipu_vaddress[in] ipu virtual address of the buffer set object + + @return 0 on success, -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_buffer_set_set_ipu_address( + ia_css_buffer_set_t *buffer_set, + const vied_vaddress_t ipu_vaddress); + +/*! Get ipu virtual address from a buffer set object + + @param buffer_set[in] buffer set object to get ipu address from + + @return virtual buffer set address on success, VIED_NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_vaddress_t ia_css_buffer_set_get_ipu_address( + const ia_css_buffer_set_t *buffer_set); + +/*! Set process group handle in a buffer set object + + @param buffer_set[in] buffer set object to set handle in + @param process_group_handle[in] process group handle of the buffer set + object + + @return 0 on success, -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_buffer_set_set_process_group_handle( + ia_css_buffer_set_t *buffer_set, + const vied_vaddress_t process_group_handle); + +/*! Get process group handle from a buffer set object + + @param buffer_set[in] buffer set object to get handle from + + @return virtual process group address on success, VIED_NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_vaddress_t ia_css_buffer_set_get_process_group_handle( + const ia_css_buffer_set_t *buffer_set); + +/*! Set token of a buffer set object within the buffer set object + + @param buffer_set[in] buffer set object to set ipu address in + @param token[in] token of the buffer set object + + @return 0 on success, -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_buffer_set_set_token( + ia_css_buffer_set_t *buffer_set, + const uint64_t token); + +/*! Get token from a buffer set object + + @param buffer_set[in] buffer set object to get token from + + @return token on success, NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint64_t ia_css_buffer_set_get_token( + const ia_css_buffer_set_t *buffer_set); + +#ifdef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_buffer_set_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +#endif /* __IA_CSS_PSYS_BUFFER_SET_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_dynamic_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_dynamic_storage_class.h new file mode 100644 index 0000000000000..9a1e3a7a12949 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_dynamic_storage_class.h @@ -0,0 +1,28 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +#define __IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#define IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +#else +#define IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_dynamic_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_dynamic_trace.h new file mode 100644 index 0000000000000..e8a979dfce0bf --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_dynamic_trace.h @@ -0,0 +1,103 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_DYNAMIC_TRACE_H +#define __IA_CSS_PSYS_DYNAMIC_TRACE_H + +#include "ia_css_psysapi_trace.h" + +#define PSYS_DYNAMIC_TRACE_LEVEL_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +/* Default sub-module tracing config */ +#if (!defined(PSYSAPI_DYNAMIC_TRACING_OVERRIDE)) + #define PSYS_DYNAMIC_TRACE_LEVEL_CONFIG \ + PSYS_DYNAMIC_TRACE_LEVEL_CONFIG_DEFAULT +#endif + +/* Module/sub-module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_DYNAMIC_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_DYNAMIC_TRACING_OVERRIDE)) + /* Module/sub-module specific trace setting */ + #if PSYSAPI_DYNAMIC_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_DYNAMIC_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_DYNAMIC_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_DYNAMIC_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_DYNAMIC_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_DYNAMIC_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_DATA Tracing level defined" + #endif +#else + /* Inherit Module trace setting */ + #define PSYSAPI_DYNAMIC_TRACE_METHOD \ + PSYSAPI_TRACE_METHOD + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ASSERT \ + PSYSAPI_TRACE_LEVEL_ASSERT + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ERROR \ + PSYSAPI_TRACE_LEVEL_ERROR + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_WARNING \ + PSYSAPI_TRACE_LEVEL_WARNING + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_INFO \ + PSYSAPI_TRACE_LEVEL_INFO + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_DEBUG \ + PSYSAPI_TRACE_LEVEL_DEBUG + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_VERBOSE \ + PSYSAPI_TRACE_LEVEL_VERBOSE +#endif + +#endif /* __IA_CSS_PSYSAPI_DYNAMIC_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.h new file mode 100644 index 0000000000000..f4ef80f742135 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.h @@ -0,0 +1,396 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_H +#define __IA_CSS_PSYS_PROCESS_H + +/*! \file */ + +/** @file ia_css_psys_process.h + * + * Define the methods on the process object that are not part of + * a single interface + */ + +#include +#include + +#include + +#include /* uint8_t */ + +/* + * Creation + */ +#include + +/* + * Internal resources + */ +#include + +/* + * Process manager + */ +#include + +/* + * Command processor + */ + +/*! Execute a command locally or send it to be processed remotely + + @param process[in] process object + @param cmd[in] command + + @return < 0 on invalid argument(s) or process state + */ +extern int ia_css_process_cmd( + ia_css_process_t *process, + const ia_css_process_cmd_t cmd); + +/*! Get the internal memory offset of the process object + + @param process[in] process object + @param mem_id[in] memory id + + @return internal memory offset, + IA_CSS_PROCESS_INVALID_OFFSET on invalid argument(s) +*/ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_size_t ia_css_process_get_int_mem_offset( + const ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_id); + + +/*! Get the external memory offset of the process object + + @param process[in] process object + @param mem_id[in] memory id + + @return external memory offset, + IA_CSS_PROCESS_INVALID_OFFSET on invalid argument(s) +*/ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_size_t ia_css_process_get_ext_mem_offset( + const ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id); + + +/*! Get the stored size of the process object + + @param process[in] process object + + @return size, 0 on invalid argument + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +size_t ia_css_process_get_size(const ia_css_process_t *process); + +/*! Get the (pointer to) the process group parent of the process object + + @param process[in] process object + + @return the pointer to the parent, NULL on invalid argument + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_process_group_t *ia_css_process_get_parent( + const ia_css_process_t *process); + +/*! Set the (pointer to) the process group parent of the process object + + @param process[in] process object + @param parent[in] (pointer to the) process group parent object + + @return < 0 on invalid argument(s) + */ +extern int ia_css_process_set_parent( + ia_css_process_t *process, + ia_css_process_group_t *parent); + +/*! Get the unique ID of program used by the process object + + @param process[in] process object + + @return ID, 0 on invalid argument + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_program_ID_t ia_css_process_get_program_ID( + const ia_css_process_t *process); + +/*! Get the state of the process object + + @param process[in] process object + + @return state, limit value (IA_CSS_N_PROCESS_STATES) on invalid argument + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_process_state_t ia_css_process_get_state( + const ia_css_process_t *process); + +/*! Set the state of the process object + + @param process[in] process object + @param state[in] state of the process + + @return < 0 on invalid argument + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_set_state( + ia_css_process_t *process, + ia_css_process_state_t state); + +/*! Get the assigned cell of the the process object + + @param process[in] process object + + @return cell ID, limit value (VIED_NCI_N_CELL_ID) on invalid argument + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_cell_ID_t ia_css_process_get_cell( + const ia_css_process_t *process); + +/*! Get the number of cells the process object depends on + + @param process[in] process object + + @return number of cells + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_get_cell_dependency_count( + const ia_css_process_t *process); + +/*! Get the number of terminals the process object depends on + + @param process[in] process object + + @return number of terminals + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_get_terminal_dependency_count( + const ia_css_process_t *process); + +/*! Set n-th cell dependency of a process object + + @param process[in] Process object + @param dep_index[in] dep index + @param id[in] dep id + + @return < 0 on invalid process argument + */ +extern int ia_css_process_set_cell_dependency( + const ia_css_process_t *process, + const unsigned int dep_index, + const vied_nci_resource_id_t id); + +/*! Get n-th cell dependency of a process object + + @param process[in] Process object + @param cell_num[in] n-th cell + + @return n-th cell dependency, + IA_CSS_PROCESS_INVALID_DEPENDENCY on invalid argument(s) +*/ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_id_t ia_css_process_get_cell_dependency( + const ia_css_process_t *process, + const unsigned int cell_num); + +/*! Set n-th terminal dependency of a process object + + @param process[in] Process object + @param dep_index[in] dep index + @param id[in] dep id + + @return < 0 on on invalid argument(s) + */ +extern int ia_css_process_set_terminal_dependency( + const ia_css_process_t *process, + const unsigned int dep_index, + const vied_nci_resource_id_t id); + +/*! Get n-th terminal dependency of a process object + + @param process[in] Process object + @param terminal_num[in] n-th cell + + @return n-th terminal dependency, + IA_CSS_PROCESS_INVALID_DEPENDENCY on invalid argument(s) +*/ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_get_terminal_dependency( + const ia_css_process_t *process, + const unsigned int terminal_num); + +/*! Get the kernel bitmap of the the process object + + @param process[in] process object + + @return process kernel bitmap + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_kernel_bitmap_t ia_css_process_get_kernel_bitmap( + const ia_css_process_t *process); + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_bitmap_t* ia_css_process_get_dfm_port_bitmap_ptr( + ia_css_process_t *process); + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_bitmap_t* ia_css_process_get_dfm_active_port_bitmap_ptr( + ia_css_process_t *process); + + +/*! Get the cells bitmap of the the process object + + @param process[in] process object + + @return process cells bitmap + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_bitmap_t ia_css_process_get_cells_bitmap( + const ia_css_process_t *process); + +/*! Sets the dfm device resource allocation bitmap of + * the process object + + @param process[in] process object + @param dfm_dev_id[in] dfm device id + @param bitmap[in] resource bitmap + + @return < 0 on invalid argument(s) or process state + */ +int ia_css_process_set_dfm_port_bitmap( + ia_css_process_t *process, + const vied_nci_dev_dfm_id_t dfm_dev_id, + const vied_nci_resource_bitmap_t bitmap); + + +/*! Sets the active dfm ports bitmap of + * the process object + + @param process[in] process object + @param dfm_dev_id[in] dfm device id + @param bitmap[in] active ports bitmap + + @return < 0 on invalid argument(s) or process state + */ +int ia_css_process_set_dfm_active_port_bitmap( + ia_css_process_t *process, + const vied_nci_dev_dfm_id_t dfm_dev_id, + const vied_nci_resource_bitmap_t bitmap); + +/*! Get the dfm port bitmap of the the process object + + @param process[in] process object + @param dfm_res_id dfm resource id + + @return bitmap of all DFM ports used by process, corresponding to the input dfm resource id + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_bitmap_t ia_css_process_get_dfm_port_bitmap( + const ia_css_process_t *process, + vied_nci_dev_dfm_id_t dfm_res_id); + +/*! Get the dfm active port bitmap of the the process object + + @param process[in] process object + @param dfm_res_id[in] dfm resource id + + @return bitmap of all active DFM ports used by the process, corresponding to the input + dfm resource id + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_bitmap_t ia_css_process_get_dfm_active_port_bitmap( + const ia_css_process_t *process, + vied_nci_dev_dfm_id_t dfm_res_id); + + +/*! Sets the cells bitmap of + * the process object + + @param process[in] process object + @param bitmap[in] bitmap + + @return < 0 on invalid argument(s) or process state + */ +int ia_css_process_set_cells_bitmap( + ia_css_process_t *process, + const vied_nci_resource_bitmap_t bitmap); + +/*! Get the device channel id-n resource allocation offset of the process object + + @param process[in] process object + @param dev_chn_id[in] channel id + + @return resource offset, IA_CSS_PROCESS_INVALID_OFFSET on invalid argument(s) + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_size_t ia_css_process_get_dev_chn( + const ia_css_process_t *process, + const vied_nci_dev_chn_ID_t dev_chn_id); + +/*! Get the ext mem type-n resource id of the the process object + + @param process[in] process object + @param mem_type[in] mem type + + @return resource offset, IA_CSS_PROCESS_INVALID_OFFSET on invalid argument(s) + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_mem_ID_t ia_css_process_get_ext_mem_id( + const ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type); + + +/*! Sets the device channel id-n resource allocation offset of + * the process object + + @param process[in] process object + @param dev_chn_id[in] channel id + @param offset[in] resource offset + + @return < 0 on invalid argument(s) or process state + */ +int ia_css_process_set_dev_chn( + ia_css_process_t *process, + const vied_nci_dev_chn_ID_t dev_chn_id, + const vied_nci_resource_size_t offset); + +/*! Boolean test if the process object type is valid + + @param process[in] process object + @param p_manifest[in] program manifest + + @return true if the process object is correct, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_process_valid( + const ia_css_process_t *process, + const ia_css_program_manifest_t *p_manifest); + +/*! Gets the program_idx from the process object + + @param process[in] process object + + @return program index + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint32_t ia_css_process_get_program_idx( + const ia_css_process_t *process); + +#ifdef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_process_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +#endif /* __IA_CSS_PSYS_PROCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.hsys.kernel.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.hsys.kernel.h new file mode 100644 index 0000000000000..cab7965604146 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.hsys.kernel.h @@ -0,0 +1,144 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_HSYS_KERNEL_H +#define __IA_CSS_PSYS_PROCESS_HSYS_KERNEL_H + +/*! \file */ + +/** @file ia_css_psys_process.hsys.kernel.h + * + * Define the methods on the process object: Hsys kernel interface + */ + +#include + +#include + +/* + * Internal resources + */ + +/*! Clear all resource (offset) specifications + + @param process[in] process object + + @return < 0 on error + */ +extern int ia_css_process_clear_all(ia_css_process_t *process); + +/*! Set the cell ID resource specification + + @param process[in] process object + @param cell_id[in] cell ID + + @return < 0 on error + */ +extern int ia_css_process_set_cell( + ia_css_process_t *process, + const vied_nci_cell_ID_t cell_id); + +/*! Clear cell ID resource specification + + @param process[in] process object + + @return < 0 on error + */ +extern int ia_css_process_clear_cell(ia_css_process_t *process); + +/*! Set the memory resource (offset) specification for a memory + that belongs to the cell that is assigned to the process + + @param process[in] process object + @param mem_type_id[in] mem type ID + @param offset[in] offset + + Precondition: The cell ID must be set + + @return < 0 on error + */ +extern int ia_css_process_set_int_mem( + ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t offset); + +/*! Clear the memory resource (offset) specification for a memory + type that belongs to the cell that is assigned to the process + + @param process[in] process object + @param mem_id[in] mem ID + + Precondition: The cell ID must be set + + @return < 0 on error + */ +extern int ia_css_process_clear_int_mem( + ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id); + +/*! Set the memory resource (offset) specification for a memory + that does not belong to the cell that is assigned to the process + + @param process[in] process object + @param mem_type_id[in] mem type ID + @param offset[in] offset + + Precondition: The cell ID must be set + + @return < 0 on error + */ +extern int ia_css_process_set_ext_mem( + ia_css_process_t *process, + const vied_nci_mem_ID_t mem_id, + const vied_nci_resource_size_t offset); + +/*! Clear the memory resource (offset) specification for a memory + type that does not belong to the cell that is assigned to the process + + @param process[in] process object + @param mem_id[in] mem ID + + Precondition: The cell ID must be set + + @return < 0 on error + */ +extern int ia_css_process_clear_ext_mem( + ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id); + +/*! Set a device channel resource (offset) specification + + @param process[in] process object + @param dev_chn_id[in] device channel ID + @param offset[in] offset + + @return < 0 on error + */ +extern int ia_css_process_set_dev_chn( + ia_css_process_t *process, + const vied_nci_dev_chn_ID_t dev_chn_id, + const vied_nci_resource_size_t offset); + +/*! Clear a device channel resource (offset) specification + + @param process[in] process object + @param dev_chn_id[in] device channel ID + + @return < 0 on error + */ +extern int ia_css_process_clear_dev_chn( + ia_css_process_t *process, + const vied_nci_dev_chn_ID_t dev_chn_id); + +#endif /* __IA_CSS_PSYS_PROCESS_HSYS_KERNEL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.hsys.user.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.hsys.user.h new file mode 100644 index 0000000000000..015a60b0e1afb --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.hsys.user.h @@ -0,0 +1,85 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_HSYS_USER_H +#define __IA_CSS_PSYS_PROCESS_HSYS_USER_H + +/*! \file */ + +/** @file ia_css_psys_process.hsys.user.h + * + * Define the methods on the process object: Hsys user interface + */ + +#include /* ia_css_program_param_t */ + +#include +#include + +#include /* uint8_t */ + +/* + * Creation + */ + +/*! Compute the size of storage required for allocating the process object + + @param manifest[in] program manifest + @param param[in] program parameters + + @return 0 on error + */ +extern size_t ia_css_sizeof_process( + const ia_css_program_manifest_t *manifest, + const ia_css_program_param_t *param); + +/*! Create the process object + + @param raw_mem[in] pre allocated memory + @param manifest[in] program manifest + @param param[in] program parameters + + @return NULL on error + */ +extern ia_css_process_t *ia_css_process_create( + void *raw_mem, + const ia_css_program_manifest_t *manifest, + const ia_css_program_param_t *param, + const uint32_t program_idx); + +/*! Destroy (the storage of) the process object + + @param process[in] process object + + @return NULL + */ +extern ia_css_process_t *ia_css_process_destroy( + ia_css_process_t *process); + +/* + * Access functions + */ + +/*! Print the process object to file/stream + + @param process[in] process object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_process_print( + const ia_css_process_t *process, + void *fid); + +#endif /* __IA_CSS_PSYS_PROCESS_HSYS_USER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.psys.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.psys.h new file mode 100644 index 0000000000000..ba1db574a4388 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.psys.h @@ -0,0 +1,53 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_PSYS_H +#define __IA_CSS_PSYS_PROCESS_PSYS_H + +/*! \file */ + +/** @file ia_css_psys_process.psys.h + * + * Define the methods on the process object: Psys embedded interface + */ + +#include + +/* + * Process manager + */ + +/*! Acquire the resources specificed in process object + + @param process[in] process object + + Postcondition: This is a try process if any of the + resources is not available, all succesfully acquired + ones will be release and the function will return an + error + + @return < 0 on error + */ +extern int ia_css_process_acquire(ia_css_process_t *process); + +/*! Release the resources specificed in process object + + @param process[in] process object + + @return < 0 on error + */ +extern int ia_css_process_release(ia_css_process_t *process); + + +#endif /* __IA_CSS_PSYS_PROCESS_PSYS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.h new file mode 100644 index 0000000000000..c0f6901adeb01 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.h @@ -0,0 +1,366 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_GROUP_H +#define __IA_CSS_PSYS_PROCESS_GROUP_H + +/*! \file */ + +/** @file ia_css_psys_process_group.h + * + * Define the methods on the process object that are not part of + * a single interface + */ +#include "ia_css_rbm.h" + +#include +#include + +#include /* uint8_t */ + +/* + * Creation + */ +#include + +/* + * Registration of user contexts / callback info + * External resources + * Sequencing resources + */ +#include + +/* + * Dispatcher + */ +#include + +/* + * Access to sub-structure handles / fields + */ + +#include "ia_css_terminal.h" + +/*! Get the number of fragments on the process group + + @param process_group[in] process group object + + Note: Future change is to have a fragment count per + independent subgraph + + @return the fragment count, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint16_t ia_css_process_group_get_fragment_count( + const ia_css_process_group_t *process_group); + + +/*! Get the fragment state on the process group + + @param process_group[in] process group object + @param fragment_state[in] current fragment of processing + + @return -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_get_fragment_state( + const ia_css_process_group_t *process_group, + uint16_t *fragment_state); + +/*! Set the fragment state on the process group + + @param process_group[in] process group object + @param fragment_state[in] current fragment of processing + + @return -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_fragment_state( + ia_css_process_group_t *process_group, + uint16_t fragment_state); + +/*! Get the number of processes on the process group + + @param process_group[in] process group object + + @return the process count, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_group_get_process_count( + const ia_css_process_group_t *process_group); + +/*! Get the number of terminals on the process group + + @param process_group[in] process group object + + Note: Future change is to have a terminal count per + independent subgraph + + @return the terminal count, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_group_get_terminal_count( + const ia_css_process_group_t *process_group); + +/*! Get the PG load start timestamp + + @param process_group[in] process group object + + @return PG load start timestamp, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint32_t ia_css_process_group_get_pg_load_start_ts( + const ia_css_process_group_t *process_group); + +/*! Get the PG load time in cycles + + @param process_group[in] process group object + + @return PG load time in cycles, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint32_t ia_css_process_group_get_pg_load_cycles( + const ia_css_process_group_t *process_group); + +/*! Get the PG init time in cycles + + @param process_group[in] process group object + + @return PG init time in cycles, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint32_t ia_css_process_group_get_pg_init_cycles( + const ia_css_process_group_t *process_group); + +/*! Get the PG processing time in cycles + + @param process_group[in] process group object + + @return PG processing time in cycles, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint32_t ia_css_process_group_get_pg_processing_cycles( + const ia_css_process_group_t *process_group); + +/*! Get the (pointer to) the terminal of the process group object + + @param process_group[in] process group object + @param terminal_type[in] terminal type of terminal + + @return the pointer to the terminal, NULL on error + */ + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_terminal_t *ia_css_process_group_get_terminal_from_type( + const ia_css_process_group_t *process_group, + const ia_css_terminal_type_t terminal_type); + +/*! Get the (pointer to) the terminal of the process group object + * for terminals which have only a single instance + * (cached in, cached out, program, program_ctrl_init) + + @param process_group[in] process group object + @param terminal_type[in] terminal type of terminal + + @return the pointer to the terminal, NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +const ia_css_terminal_t *ia_css_process_group_get_single_instance_terminal( + const ia_css_process_group_t *process_group, + ia_css_terminal_type_t term_type); + +/*! Get the (pointer to) the indexed terminal of the process group object + + @param process_group[in] process group object + @param terminal_index[in] index of the terminal + + @return the pointer to the terminal, NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_terminal_t *ia_css_process_group_get_terminal( + const ia_css_process_group_t *process_group, + const unsigned int terminal_index); + +/*! Get the (pointer to) the indexed process of the process group object + + @param process_group[in] process group object + @param process_index[in] index of the process + + @return the pointer to the process, NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_process_t *ia_css_process_group_get_process( + const ia_css_process_group_t *process_group, + const unsigned int process_index); + +/*! Get the stored size of the process group object + + @param process_group[in] process group object + + @return size, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +size_t ia_css_process_group_get_size( + const ia_css_process_group_t *process_group); + +/*! Get the state of the the process group object + + @param process_group[in] process group object + + @return state, limit value on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_process_group_state_t ia_css_process_group_get_state( + const ia_css_process_group_t *process_group); + +/*! Get the unique ID of program group used by the process group object + + @param process_group[in] process group object + + @return ID, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_program_group_ID_t ia_css_process_group_get_program_group_ID( + const ia_css_process_group_t *process_group); + +/*! Get the resource bitmap of the process group + + @param process_group[in] process group object + + @return the reource bitmap + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_bitmap_t ia_css_process_group_get_resource_bitmap( + const ia_css_process_group_t *process_group); + +/*! Set the resource bitmap of the process group + + @param process_group[in] process group object + @param resource_bitmap[in] the resource bitmap + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_resource_bitmap( + ia_css_process_group_t *process_group, + const vied_nci_resource_bitmap_t resource_bitmap); + +/*! Get the routing bitmap of the process group + + @param process_group[in] process group object + + @return routing bitmap (pointer) + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +const ia_css_rbm_t *ia_css_process_group_get_routing_bitmap( + const ia_css_process_group_t *process_group); + +/*! Set the routing bitmap of the process group + + @param process_group[in] process group object + @param rbm[in] routing bitmap + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_routing_bitmap( + ia_css_process_group_t *process_group, + const ia_css_rbm_t rbm); + +/*! Get IPU virtual address of process group + + @param process_group[in] process group object + @param ipu_vaddress[in/out] process group ipu virtual address + + @return -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_get_ipu_vaddress( + const ia_css_process_group_t *process_group, + vied_vaddress_t *ipu_vaddress); + +/*! Set IPU virtual address of process group + + @param process_group[in] process group object + @param ipu_vaddress[in] process group ipu address + + @return -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_ipu_vaddress( + ia_css_process_group_t *process_group, + vied_vaddress_t ipu_vaddress); + +/*! Get protocol version used by a process group + + @param process_group[in] process group object + + @return invalid protocol version on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_group_get_protocol_version( + const ia_css_process_group_t *process_group); + +/*! Get base queue id used by a process group + + @param process_group[in] process group object + + @return -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_group_get_base_queue_id( + ia_css_process_group_t *process_group); + +/*! Set base queue id used by a process group + + @param process_group[in] process group object + @param queue_id[in] process group queue id + + @return invalid queue id on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_base_queue_id( + ia_css_process_group_t *process_group, + uint8_t queue_id); + +/*! Get number of queues used by a process group + + @param process_group[in] process group object + + @return invalid number of queues (0) on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_group_get_num_queues( + ia_css_process_group_t *process_group); + +/*! Set number of queues used by a process group + + @param process_group[in] process group object + @param num_queues[in] process group number of queues + + @return -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_num_queues( + ia_css_process_group_t *process_group, + uint8_t num_queues); + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_process_group_has_vp(const ia_css_process_group_t *process_group); + +#ifdef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_process_group_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +#endif /* __IA_CSS_PSYS_PROCESS_GROUP_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.hsys.kernel.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.hsys.kernel.h new file mode 100644 index 0000000000000..93cce2555de9f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.hsys.kernel.h @@ -0,0 +1,324 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_GROUP_HSYS_KERNEL_H +#define __IA_CSS_PSYS_PROCESS_GROUP_HSYS_KERNEL_H + +/*! \file */ + +/** @file ia_css_psys_process_group.hsys.kernel.h + * + * Define the methods on the process group object: Hsys kernel interface + */ + +#include + +#include +#include + +#include /* uint8_t */ + +/* + * Registration of user contexts / callback info + */ + +/*! Get the user (callback) token as registered in the process group + + @param process_group[in] process group object + + @return 0 on error + */ +extern uint64_t ia_css_process_group_get_token( + ia_css_process_group_t *process_group); + +/*! Set (register) a user (callback) token in the process group + + @param process_group[in] process group object + @param token[in] user token + + Note: The token value shall be non-zero. This token is + returned in each return message related to the process + group the token is registered with. + + @return < 0 on error + */ +extern int ia_css_process_group_set_token( + ia_css_process_group_t *process_group, + const uint64_t token); + +/* + * Passing of a (fragment) watermark + */ + +/*! Get the fragment progress limit of the process group + + @param process_group[in] process group object + + @return 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint16_t ia_css_process_group_get_fragment_limit( + const ia_css_process_group_t *process_group); + +/*! Set the new fragment progress limit of the process group + + @param process_group[in] process group object + @param fragment_limit[in] New limit value + + Note: The limit value must be less or equal to the fragment + count value. The process group will not make progress beyond + the limit value. The limit value can be modified asynchronously + If the limit value is reached before an update happens, the + process group will suspend and will not automatically resume. + + The limit is monotonically increasing. The default value is + equal to the fragment count + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_fragment_limit( + ia_css_process_group_t *process_group, + const uint16_t fragment_limit); + +/*! Clear the fragment progress limit of the process group + + @param process_group[in] process group object + + Note: This function sets the fragment limit to zero. + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_clear_fragment_limit( + ia_css_process_group_t *process_group); + +/* + * Commands + */ + +/*! Perform the start command on the process group + + @param process_group[in] process group object + + Note: Start is an action of the l-Scheduler it makes the + process group eligible for execution + + Precondition: The external resources that are attached to + the process group must be in the correct state, i.e. input + buffers are not-empty and output buffers not-full + + @return < 0 on error + */ +extern int ia_css_process_group_start( + ia_css_process_group_t *process_group); + +/*! Perform the suspend command on the process group + + @param process_group[in] process group object + + Note: Suspend indicates that the process group execution + is halted at the next fragment boundary. The process group + will not automatically resume + + Precondition: The process group must be running + + @return < 0 on error + */ +extern int ia_css_process_group_suspend( + ia_css_process_group_t *process_group); + +/*! Perform the resume command on the process group + + @param process_group[in] process group object + + Note: Resume indicates that the process group is again + eligible for execution + + Precondition: The process group must be started + + @return < 0 on error + */ +extern int ia_css_process_group_resume( + ia_css_process_group_t *process_group); + +/*! Perform the reset command on the process group + + @param process_group[in] process group object + + Note: Return the process group to the started state + + Precondition: The process group must be running or stopped + + @return < 0 on error + */ +extern int ia_css_process_group_reset( + ia_css_process_group_t *process_group); + +/*! Perform the abort command on the process group + + @param process_group[in] process group object + + Note: Force the process group to the stopped state + + Precondition: The process group must be running or started + + @return < 0 on error + */ +extern int ia_css_process_group_abort( + ia_css_process_group_t *process_group); + +/*! Release ownership of the process group + + @param process_group[in] process group object + + Note: Release notifies PSYS and hands over ownership of the + process group from SW to FW + + Precondition: The process group must be in the started state + + @return < 0 on error + */ +extern int ia_css_process_group_disown( + ia_css_process_group_t *process_group); + +/* + * External resources + */ + +/*! Set (register) a data buffer to the indexed terminal in the process group + + @param process_group[in] process group object + @param buffer[in] buffer handle + @param buffer_state[in] state of the buffer + @param terminal_index[in] index of the terminal + + Note: The buffer handle shall not be VIED_NULL, the buffer + state can be undefined; BUFFER_UNDEFINED + + Note: The buffer can be in memory or streaming over memory + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_attach_buffer( + ia_css_process_group_t *process_group, + vied_vaddress_t buffer, + const ia_css_buffer_state_t buffer_state, + const unsigned int terminal_index); + +/*! Get (unregister) the data buffer on the indexed terminal of + * the process group + + @param process_group[in] process group object + @param terminal_index[in] index of the terminal + + Precondition: The process group must be stopped + + Postcondition: The buffer handle shall be reset to VIED_NULL, the buffer + state to BUFFER_NULL + + @return VIED_NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_vaddress_t ia_css_process_group_detach_buffer( + ia_css_process_group_t *process_group, + const unsigned int terminal_index); + +/*! Set (register) a data buffer to the indexed terminal in the process group + + @param process_group[in] process group object + @param stream[in] stream handle + @param buffer_state[in] state of the buffer + @param terminal_index[in] index of the terminal + + Note: The stream handle shall not be zero, the buffer + state can be undefined; BUFFER_UNDEFINED + + Note: The stream is used exclusive to a buffer; the latter can be in memory + or streaming over memory + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_attach_stream( + ia_css_process_group_t *process_group, + uint32_t stream, + const ia_css_buffer_state_t buffer_state, + const unsigned int terminal_index); + +/*! Get (unregister) the stream handle on the indexed terminal of + * the process group + + @param process_group[in] process group object + @param terminal_index[in] index of the terminal + + Precondition: The process group must be stopped + + Postcondition: The stream handle shall be reset to zero, the buffer + state to BUFFER_NULL + + @return 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint32_t ia_css_process_group_detach_stream( + ia_css_process_group_t *process_group, + const unsigned int terminal_index); + +/* + * Sequencing resources + */ + +/*! Set a(n artificial) blocking resource (barrier) in + * the process group resource map + + @param process_group[in] process group object + @param barrier_index[in] index of the barrier + + Note: The barriers have to be set to force sequence between started + process groups + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_barrier( + ia_css_process_group_t *process_group, + const vied_nci_barrier_ID_t barrier_index); + +/*! Clear a previously set blocking resource (barrier) in + * the process group resource map + + @param process_group[in] process group object + @param barrier_index[in] index of the barrier + + Precondition: The barriers must have been set + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_clear_barrier( + ia_css_process_group_t *process_group, + const vied_nci_barrier_ID_t barrier_index); + +/*! Boolean test if the process group preconditions for start are satisfied + + @param process_group[in] process group object + + @return true if the process group can be started + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_can_process_group_start( + const ia_css_process_group_t *process_group); + +#endif /* __IA_CSS_PSYS_PROCESS_GROUP_HSYS_KERNEL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.hsys.user.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.hsys.user.h new file mode 100644 index 0000000000000..dfbcc8815c1ef --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.hsys.user.h @@ -0,0 +1,199 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_GROUP_HSYS_USER_H +#define __IA_CSS_PSYS_PROCESS_GROUP_HSYS_USER_H + +/*! \file */ + +/** @file ia_css_psys_process_group.hsys.user.h + * + * Define the methods on the process group object: Hsys user interface + */ + +#include /* ia_css_program_group_param_t */ + +#include +#include +#include + +#include "ia_css_psys_dynamic_storage_class.h" + +#include /* uint8_t */ + +/* + * Creation + */ + +/*! Compute the size of storage required for allocating the process group object + + @param manifest[in] program group manifest + @param param[in] program group parameters + + @return 0 on error + */ +extern size_t ia_css_sizeof_process_group( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Create (the storage for) the process group object + + @param process_grp_mem[in/out] raw memory for process group + @param manifest[in] program group manifest + @param param[in] program group parameters + + @return NULL on error + */ +extern ia_css_process_group_t *ia_css_process_group_create( + void *process_grp_mem, + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Destroy (the storage of) the process group object + + @param process_group[in] process group object + + @return NULL + */ +extern ia_css_process_group_t *ia_css_process_group_destroy( + ia_css_process_group_t *process_group); + +/*! Print the process group object to file/stream + + @param process_group[in] process group object + @param fid[out] file/stream handle + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_print( + const ia_css_process_group_t *process_group, + void *fid); + +/* + * Commands + */ + +/*! Perform the submit command on the process group + + @param process_group[in] process group object + + Note: Submit is an action of the h-Scheduler it makes the + process group eligible for the l-Scheduler + + Precondition: The external resources must be attached to + the process group + + @return < 0 on error + */ +extern int ia_css_process_group_submit( + ia_css_process_group_t *process_group); + +/*! Boolean test if the process group object type is valid + + @param process_group[in] process group object + @param manifest[in] program group manifest + @param param[in] program group parameters + + @return true if the process group is correct, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_process_group_valid( + const ia_css_process_group_t *process_group, + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Boolean test if the process group preconditions for submit are satisfied + + @param process_group[in] process group object + + @return true if the process group can be submitted + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_can_process_group_submit( + const ia_css_process_group_t *process_group); + +/*! Boolean test if the preconditions on process group and buffer set are + satisfied for enqueuing buffer set + + @param process_group[in] process group object + @param buffer_set[in] buffer set object + + @return true if the buffer set can be enqueued + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_can_enqueue_buffer_set( + const ia_css_process_group_t *process_group, + const ia_css_buffer_set_t *buffer_set); + +/*! Compute the cyclecount required for executing the process group object + + @param manifest[in] program group manifest + @param param[in] program group parameters + + @return 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint32_t ia_css_process_group_compute_cycle_count( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Compute the number of processes required for + * executing the process group object + + @param manifest[in] program group manifest + @param param[in] program group parameters + + @return 0 on error + */ +extern uint8_t ia_css_process_group_compute_process_count( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Compute the number of terminals required for + * executing the process group object + + @param manifest[in] program group manifest + @param param[in] program group parameters + + @return 0 on error + */ +extern uint8_t ia_css_process_group_compute_terminal_count( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Get private token as registered in the process group by the implementation + + @param process_group[in] process group object + + @return 0 on error + */ +extern uint64_t ia_css_process_group_get_private_token( + ia_css_process_group_t *process_group); + +/*! Set private token in the process group as needed by the implementation + + @param process_group[in] process group object + @param token[in] user token + + Note: The token value shall be non-zero. This token is private + to the implementation. This is in addition to the user token + + @return < 0 on error, 0 on success + */ +extern int ia_css_process_group_set_private_token( + ia_css_process_group_t *process_group, + const uint64_t token); + +#endif /* __IA_CSS_PSYS_PROCESS_GROUP_HSYS_USER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.psys.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.psys.h new file mode 100644 index 0000000000000..6ceccfc2f9bc3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.psys.h @@ -0,0 +1,60 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_GROUP_PSYS_H +#define __IA_CSS_PSYS_PROCESS_GROUP_PSYS_H + +/*! \file */ + +/** @file ia_css_psys_process_group.psys.h + * + * Define the methods on the process group object: Psys embedded interface + */ + +#include + +/* + * Dispatcher + */ + +/*! Perform the run command on the process group + + @param process_group[in] process group object + + Note: Run indicates that the process group will execute + + Precondition: The process group must be started or + suspended and the processes have acquired the necessary + internal resources + + @return < 0 on error + */ +extern int ia_css_process_group_run( + ia_css_process_group_t *process_group); + +/*! Perform the stop command on the process group + + @param process_group[in] process group object + + Note: Stop indicates that the process group has completed execution + + Postcondition: The external resoruces can now be detached + + @return < 0 on error + */ +extern int ia_css_process_group_stop( + ia_css_process_group_t *process_group); + + +#endif /* __IA_CSS_PSYS_PROCESS_GROUP_PSYS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group_cmd_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group_cmd_impl.h new file mode 100644 index 0000000000000..530f93ef6ce03 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group_cmd_impl.h @@ -0,0 +1,178 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_GROUP_CMD_IMPL_H +#define __IA_CSS_PSYS_PROCESS_GROUP_CMD_IMPL_H + +#include "type_support.h" +#include "ia_css_psys_process_group.h" +#include "ia_css_rbm_manifest_types.h" + +#define N_UINT64_IN_PROCESS_GROUP_STRUCT 2 +#define N_UINT32_IN_PROCESS_GROUP_STRUCT 5 +#define N_UINT16_IN_PROCESS_GROUP_STRUCT 5 +#define N_UINT8_IN_PROCESS_GROUP_STRUCT 7 +#define N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT 3 + +#define SIZE_OF_PROCESS_GROUP_STRUCT_BITS \ + (IA_CSS_RBM_BITS \ + + N_UINT64_IN_PROCESS_GROUP_STRUCT * IA_CSS_UINT64_T_BITS \ + + N_UINT32_IN_PROCESS_GROUP_STRUCT * IA_CSS_UINT32_T_BITS \ + + IA_CSS_PROGRAM_GROUP_ID_BITS \ + + IA_CSS_PROCESS_GROUP_STATE_BITS \ + + VIED_VADDRESS_BITS \ + + VIED_NCI_RESOURCE_BITMAP_BITS \ + + N_UINT16_IN_PROCESS_GROUP_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_UINT8_IN_PROCESS_GROUP_STRUCT * IA_CSS_UINT8_T_BITS \ + + N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT * IA_CSS_UINT8_T_BITS) + +struct ia_css_process_group_s { + /**< User (callback) token / user context reference, + * zero is an error value + */ + uint64_t token; + /**< private token / context reference, zero is an error value */ + uint64_t private_token; + /**< PG routing bitmap used to set connection between programs >*/ + ia_css_rbm_t routing_bitmap; + /**< Size of this structure */ + uint32_t size; + /**< The timestamp when PG load starts */ + uint32_t pg_load_start_ts; + /**< PG load time in cycles */ + uint32_t pg_load_cycles; + /**< PG init time in cycles */ + uint32_t pg_init_cycles; + /**< PG processing time in cycles */ + uint32_t pg_processing_cycles; + /**< Referral ID to program group FW */ + ia_css_program_group_ID_t ID; + /**< State of the process group FSM */ + ia_css_process_group_state_t state; + /**< Virtual address of process group in IPU */ + vied_vaddress_t ipu_virtual_address; + /**< Bitmap of the compute resources used by the process group */ + vied_nci_resource_bitmap_t resource_bitmap; + /**< Number of fragments offered on each terminal */ + uint16_t fragment_count; + /**< Current fragment of processing */ + uint16_t fragment_state; + /**< Watermark to control fragment processing */ + uint16_t fragment_limit; + /**< Array[process_count] of process addresses in this process group */ + uint16_t processes_offset; + /**< Array[terminal_count] of terminal addresses on this process group */ + uint16_t terminals_offset; + /**< Parameter dependent number of processes in this process group */ + uint8_t process_count; + /**< Parameter dependent number of terminals on this process group */ + uint8_t terminal_count; + /**< Parameter dependent number of independent subgraphs in + * this process group + */ + uint8_t subgraph_count; + /**< Process group protocol version */ + uint8_t protocol_version; + /**< Dedicated base queue id used for enqueueing payload buffer sets */ + uint8_t base_queue_id; + /**< Number of dedicated queues used */ + uint8_t num_queues; + /**< Mask the send_pg_done IRQ */ + uint8_t mask_irq; + /**< Padding for 64bit alignment */ + uint8_t padding[N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT]; +}; + +/*! Callback after process group is created. Implementations can provide + * suitable actions needed when process group is created. + + @param process_group[in] process group object + @param program_group_manifest[in] program group manifest + @param program_group_param[in] program group parameters + + @return 0 on success and non-zero on failure + */ +extern int ia_css_process_group_on_create( + ia_css_process_group_t *process_group, + const ia_css_program_group_manifest_t *program_group_manifest, + const ia_css_program_group_param_t *program_group_param); + +/*! Callback before process group is about to be destoyed. Any implementation + * specific cleanups can be done here. + + @param process_group[in] process group object + + @return 0 on success and non-zero on failure + */ +extern int ia_css_process_group_on_destroy( + ia_css_process_group_t *process_group); + +/* + * Command processor + */ + +/*! Execute a command locally or send it to be processed remotely + + @param process_group[in] process group object + @param cmd[in] command + + @return < 0 on error + */ +extern int ia_css_process_group_exec_cmd( + ia_css_process_group_t *process_group, + const ia_css_process_group_cmd_t cmd); + + +/*! Enqueue a buffer set corresponding to a persistent program group by + * sending a command to subsystem. + + @param process_group[in] process group object + @param buffer_set[in] buffer set + @param queue_offset[in] offset to be used from the queue id + specified in the process group object + (0 for first buffer set for frame, 1 + for late binding) + + @return < 0 on error + */ +extern int ia_css_enqueue_buffer_set( + ia_css_process_group_t *process_group, + ia_css_buffer_set_t *buffer_set, + unsigned int queue_offset); + +/*! Enqueue a parameter buffer set corresponding to a persistent program + * group by sending a command to subsystem. + + @param process_group[in] process group object + @param buffer_set[in] parameter buffer set + + @return < 0 on error + */ +extern int ia_css_enqueue_param_buffer_set( + ia_css_process_group_t *process_group, + ia_css_buffer_set_t *buffer_set); + +/*! Need to store the 'secure' mode for each PG for FW test app only + * + * @param process_group[in] process group object + * @param secure[in] parameter buffer set + * + * @return < 0 on error + */ +extern int ia_css_process_group_store( + ia_css_process_group_t *process_group, + bool secure); + + +#endif /* __IA_CSS_PSYS_PROCESS_GROUP_CMD_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_types.h new file mode 100644 index 0000000000000..4fb064dc00df6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_types.h @@ -0,0 +1,95 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_TYPES_H +#define __IA_CSS_PSYS_PROCESS_TYPES_H + +/*! \file */ + +/** @file ia_css_psys_process_types.h + * + * The types belonging to the terminal/process/process group dynamic module + */ + +#include +#include + +#include + +#define IA_CSS_PROCESS_INVALID_PROGRAM_IDX ((uint32_t)-1) + +/* private */ +typedef enum ia_css_process_group_cmd { + IA_CSS_PROCESS_GROUP_CMD_NOP = 0, + IA_CSS_PROCESS_GROUP_CMD_SUBMIT, + IA_CSS_PROCESS_GROUP_CMD_ATTACH, + IA_CSS_PROCESS_GROUP_CMD_DETACH, + IA_CSS_PROCESS_GROUP_CMD_START, + IA_CSS_PROCESS_GROUP_CMD_DISOWN, + IA_CSS_PROCESS_GROUP_CMD_RUN, + IA_CSS_PROCESS_GROUP_CMD_STOP, + IA_CSS_PROCESS_GROUP_CMD_SUSPEND, + IA_CSS_PROCESS_GROUP_CMD_RESUME, + IA_CSS_PROCESS_GROUP_CMD_ABORT, + IA_CSS_PROCESS_GROUP_CMD_RESET, + IA_CSS_N_PROCESS_GROUP_CMDS +} ia_css_process_group_cmd_t; + +/* private */ +#define IA_CSS_PROCESS_GROUP_STATE_BITS 32 +typedef enum ia_css_process_group_state { + IA_CSS_PROCESS_GROUP_ERROR = 0, + IA_CSS_PROCESS_GROUP_CREATED, + IA_CSS_PROCESS_GROUP_READY, + IA_CSS_PROCESS_GROUP_BLOCKED, + IA_CSS_PROCESS_GROUP_STARTED, + IA_CSS_PROCESS_GROUP_RUNNING, + IA_CSS_PROCESS_GROUP_STALLED, + IA_CSS_PROCESS_GROUP_STOPPED, + IA_CSS_N_PROCESS_GROUP_STATES +} ia_css_process_group_state_t; + +/* private */ +typedef enum ia_css_process_cmd { + IA_CSS_PROCESS_CMD_NOP = 0, + IA_CSS_PROCESS_CMD_ACQUIRE, + IA_CSS_PROCESS_CMD_RELEASE, + IA_CSS_PROCESS_CMD_START, + IA_CSS_PROCESS_CMD_LOAD, + IA_CSS_PROCESS_CMD_STOP, + IA_CSS_PROCESS_CMD_SUSPEND, + IA_CSS_PROCESS_CMD_RESUME, + IA_CSS_N_PROCESS_CMDS +} ia_css_process_cmd_t; + +/* private */ +#define IA_CSS_PROCESS_STATE_BITS 32 +typedef enum ia_css_process_state { + IA_CSS_PROCESS_ERROR = 0, + IA_CSS_PROCESS_CREATED, + IA_CSS_PROCESS_READY, + IA_CSS_PROCESS_STARTED, + IA_CSS_PROCESS_RUNNING, + IA_CSS_PROCESS_STOPPED, + IA_CSS_PROCESS_SUSPENDED, + IA_CSS_N_PROCESS_STATES +} ia_css_process_state_t; + +/* public */ +typedef struct ia_css_process_group_s ia_css_process_group_t; +typedef struct ia_css_process_s ia_css_process_t; + +typedef struct ia_css_data_terminal_s ia_css_data_terminal_t; + +#endif /* __IA_CSS_PSYS_PROCESS_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_terminal.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_terminal.h new file mode 100644 index 0000000000000..abf398299d166 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_terminal.h @@ -0,0 +1,316 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TERMINAL_H +#define __IA_CSS_PSYS_TERMINAL_H + +/*! \file */ + +/** @file ia_css_psys_terminal.h + * + * Define the methods on the terminal object that are not part of + * a single interface + */ + +#include /* ia_css_frame_t */ +#include /* ia_css_program_group_param_t */ + +#include +#include + +#include /* bool */ +#include /* FILE */ +#include "ia_css_psys_dynamic_storage_class.h" +#include "ia_css_terminal.h" +#include "ia_css_terminal_manifest_base_types.h" + +/* + * Creation + */ +#include + +/*! Boolean test if the terminal object type is input + + @param terminal[in] terminal object + + @return true if the terminal is input, false otherwise or on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_terminal_input( + const ia_css_terminal_t *terminal); + +/*! Get the stored size of the terminal object + + @param terminal[in] terminal object + + @return size, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +size_t ia_css_terminal_get_size( + const ia_css_terminal_t *terminal); + +/*! Get the type of the terminal object + + @param terminal[in] terminal object + + @return the type of the terminal, limit value on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_terminal_type_t ia_css_terminal_get_type( + const ia_css_terminal_t *terminal); + +/*! Set the type of the terminal object + + @param terminal[in] terminal object + @param terminal_type[in] type of the terminal + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_terminal_set_type( + ia_css_terminal_t *terminal, + const ia_css_terminal_type_t terminal_type); + +/*! Get the index of the terminal manifest object + + @param terminal[in] terminal object + + @return the index of the terminal manifest object, limit value on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint16_t ia_css_terminal_get_terminal_manifest_index( + const ia_css_terminal_t *terminal); + +/*! Set the index of the terminal manifest object + + @param terminal[in] terminal object + @param tm_index[in] terminal manifest index + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_terminal_set_terminal_manifest_index( + ia_css_terminal_t *terminal, + const uint16_t tm_index); + +/*! Get id of the terminal object + + @param terminal[in] terminal object + + @return id of terminal + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_terminal_ID_t ia_css_terminal_get_ID( + const ia_css_terminal_t *terminal); + +/*! Get kernel id of the data terminal object + + @param dterminal[in] data terminal object + + @return kernel id of terminal + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_data_terminal_get_kernel_id( + const ia_css_data_terminal_t *dterminal); + +/*! Get the connection type from the terminal object + + @param terminal[in] terminal object + + @return buffer type, limit value on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_connection_type_t ia_css_data_terminal_get_connection_type( + const ia_css_data_terminal_t *dterminal); + +/*! Set the connection type of the terminal object + + @param terminal[in] terminal object + @param connection_type[in] connection type + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_data_terminal_set_connection_type( + ia_css_data_terminal_t *dterminal, + const ia_css_connection_type_t connection_type); + +/*! Get link id of the data terminal object + + @param dterminal[in] data terminal object + + @return link id of terminal + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_data_terminal_get_link_id( + const ia_css_data_terminal_t *dterminal); + + +/*! Set link id of the terminal object + + @param terminal[in] data terminal object + @param link_id[in] synchronization link id + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_data_terminal_set_link_id( + ia_css_data_terminal_t *dterminal, + const uint8_t link_id); + +/*! Get the (pointer to) the process group parent of the terminal object + + @param terminal[in] terminal object + + @return the pointer to the parent, NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_process_group_t *ia_css_terminal_get_parent( + const ia_css_terminal_t *terminal); + +/*! Set the (pointer to) the process group parent of the terminal object + + @param terminal[in] terminal object + @param parent[in] (pointer to the) process group parent object + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_terminal_set_parent( + ia_css_terminal_t *terminal, + ia_css_process_group_t *parent); + +/*! Boolean test if the terminal object type is valid + + @param terminal[in] process terminal object + @param terminal_manifest[in] program terminal manifest + + @return true if the process terminal object is correct, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_terminal_valid( + const ia_css_terminal_t *terminal, + const ia_css_terminal_manifest_t *terminal_manifest); + +/* ================= Program Control Init Terminal - START ================= */ + +/*! + * Gets the program init terminal descripor size + * @param manifest[in] program control init terminal manifest + * @return size, error if < 0. + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +unsigned int +ia_css_program_control_init_terminal_get_descriptor_size( + const ia_css_program_control_init_terminal_manifest_t *manifest); + +/*! + * Initialize program control init terminal + * @param nof_fragments[in] Number of fragments + * @param terminal[in] program control init terminal + * @param manifest[in] program control init terminal manifest + * @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int +ia_css_program_control_init_terminal_init( + ia_css_program_control_init_terminal_t *terminal, + const ia_css_program_control_init_terminal_manifest_t *manifest); + +/*! + * Get a program desc for a program control init terminal + * @param terminal[in] program control init terminal + * @param manifest[in] program control init terminal manifest + * @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_program_control_init_program_desc_t * +ia_css_program_control_init_terminal_get_program_desc( + const ia_css_program_control_init_terminal_t *prog_ctrl_init_terminal, + const unsigned int program_index +); + +/*! + * Pretty prints the program control init termnial + * @param terminal[in] program control init terminal + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +void ia_css_program_control_init_terminal_print( + const ia_css_program_control_init_terminal_t *terminal); + +/*! + * Gets a load section desc for a program desc + * of a program control init terminal + * @param program_desc[in] program control init terminal program desc + * @param load_section_index[in] section index + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_program_control_init_load_section_desc_t * +ia_css_program_control_init_terminal_get_load_section_desc( + const ia_css_program_control_init_program_desc_t *program_desc, + const unsigned int load_section_index +); + +/*! + * Gets process_id from program desc + * of a program control init terminal + * @param program_desc[in] program control init terminal program desc + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_process_id_t ia_css_program_control_init_terminal_get_process_id( + const ia_css_program_control_init_program_desc_t *program_desc); + +/*! + * Set control info of program desc + * of a program control init terminal + * @param program_desc[in] program control init terminal program desc + * @param process_id unique process id used to identify the process + * among all active process + * @param num_done_events number of events required to close the process + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +void ia_css_program_control_init_terminal_set_control_info( + ia_css_program_control_init_program_desc_t *program_desc, + ia_css_process_id_t process_id, + uint8_t num_done_events); + +/*! + * Gets num_done_events value from program desc + * of a program control init terminal + * @param program_desc[in] program control init terminal program desc + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_program_control_init_terminal_get_num_done_events( + const ia_css_program_control_init_program_desc_t *program_desc); + +/*! + * Gets a connect section desc for a program desc + * of a program control init terminal + * @param program_desc[in] program control init terminal program desc + * @param connect_section_index[in] section index + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_program_control_init_connect_section_desc_t * +ia_css_program_control_init_terminal_get_connect_section_desc( + const ia_css_program_control_init_program_desc_t *program_desc, + const unsigned int connect_section_index +); + +/* ================= Program Control Init Terminal - END ================= */ + +#ifdef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_terminal_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +#endif /* __IA_CSS_PSYS_TERMINAL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_terminal.hsys.user.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_terminal.hsys.user.h new file mode 100644 index 0000000000000..b8aa08c19754a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_terminal.hsys.user.h @@ -0,0 +1,255 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TERMINAL_HSYS_USER_H +#define __IA_CSS_PSYS_TERMINAL_HSYS_USER_H + +/*! \file */ + +/** @file ia_css_psys_terminal.hsys.user.h + * + * Define the methods on the terminal object: Hsys user interface + */ + +#include /* ia_css_frame_t */ +#include /* ia_css_program_group_param_t */ + +#include +#include + +#include /* bool */ +#include "ia_css_psys_dynamic_storage_class.h" +#include "ia_css_terminal.h" +#include "ia_css_terminal_manifest.h" +#include "ia_css_kernel_bitmap.h" + +/* + * Creation + */ + +/* + * This source file is created with the intention of sharing and + * compiled for host and firmware. Since there is no native 64bit + * data type support for firmware this wouldn't compile for SP + * tile. The part of the file that is not compilable are marked + * with the following __VIED_CELL marker and this comment. Once we + * come up with a solution to address this issue this will be + * removed. + */ +#if !defined(__VIED_CELL) +/*! Compute the size of storage required for allocating the terminal object + + @param manifest[in] terminal manifest + @param param[in] program group parameters + + @return 0 on error + */ +extern size_t ia_css_sizeof_terminal( + const ia_css_terminal_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Create the terminal object + + @param raw_mem[in] pre allocated memory + @param manifest[in] terminal manifest + @param terminal_param[in] terminal parameter + @param enable_bitmap program group enable bitmap + + @return NULL on error + */ +extern ia_css_terminal_t *ia_css_terminal_create( + void *raw_mem, + const ia_css_terminal_manifest_t *manifest, + const ia_css_terminal_param_t *terminal_param, + ia_css_kernel_bitmap_t enable_bitmap); + +/*! Destroy (the storage of) the process object + + @param terminal[in] terminal object + + @return NULL + */ +extern ia_css_terminal_t *ia_css_terminal_destroy( + ia_css_terminal_t *terminal); +#endif /* !defined(__VIED_CELL) */ + +/*! Print the terminal object to file/stream + + @param terminal[in] terminal object + @param fid[out] file/stream handle + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_terminal_print( + const ia_css_terminal_t *terminal, + void *fid); + +/*! Get the (pointer to) the frame object in the terminal object + + @param terminal[in] terminal object + + @return the pointer to the frame, NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_frame_t *ia_css_data_terminal_get_frame( + const ia_css_data_terminal_t *terminal); + +/*! Get the (pointer to) the frame descriptor object in the terminal object + + @param terminal[in] terminal object + + @return the pointer to the frame descriptor, NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_frame_descriptor_t *ia_css_data_terminal_get_frame_descriptor( + const ia_css_data_terminal_t *dterminal); + +/*! Get the (pointer to) the fragment descriptor object in the terminal object + + @param terminal[in] terminal object + +@return the pointer to the fragment descriptor, NULL on error +*/ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_fragment_descriptor_t + *ia_css_data_terminal_get_fragment_descriptor( + const ia_css_data_terminal_t *dterminal, + const unsigned int fragment_index); + +/*! Get the number of fragments on the terminal + + @param terminal[in] terminal object + + @return the fragment count, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint16_t ia_css_data_terminal_get_fragment_count( + const ia_css_data_terminal_t *dterminal); + +/*! Get the number of section on the (param)terminal + @param manifest[in] terminal manifest + @param terminal_param[in] terminal parameter + + @return the section count, 0 on error + */ +extern uint16_t ia_css_param_terminal_compute_section_count( + const ia_css_terminal_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Get the number of planes on the (data)terminal + @param manifest[in] terminal manifest + @param terminal_param[in] terminal parameter + + @return the plane count, 1(default) on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_data_terminal_compute_plane_count( + const ia_css_terminal_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! check if given terminal is parameter terminal. + + @param terminal[in] (base)terminal object + + @return true on success, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_terminal_parameter_terminal( + const ia_css_terminal_t *terminal); + +/*! check if given terminal is program terminal. + + @program terminal[in] (base)terminal object + + @return true on success, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_terminal_program_terminal( + const ia_css_terminal_t *terminal); + +/*! check if given terminal is program control init terminal. + + @program control init terminal[in] (base)terminal object + + @return true on success, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_terminal_program_control_init_terminal( + const ia_css_terminal_t *terminal); + +/*! check if given terminal is spatial parameter terminal. + + @spatial terminal[in] (base)terminal object + + @return true on success, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_terminal_spatial_parameter_terminal( + const ia_css_terminal_t *terminal); + +/*! check if given terminal is data terminal. + + @param terminal[in] (base)terminal object + + @return true on success, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_terminal_data_terminal( + const ia_css_terminal_t *terminal); + +/*! obtain buffer out of terminal(both data & param terminals can call this) + + @param terminal[in] (base)terminal object of either data or param terminal. + + @return vied address of buffer stored in terminal + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_vaddress_t ia_css_terminal_get_buffer( + const ia_css_terminal_t *terminal); + +/*!store a buffer in the terminal. + + @param terminal[in] (base)terminal object of either data or param terminal. + @param buffer[in] buffer in vied (hrt address) space. + + @return 0 on success + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_terminal_set_buffer(ia_css_terminal_t *terminal, + vied_vaddress_t buffer); + +/*! Obtain terminal buffer index out of terminal object + + @param terminal[in] (base)terminal object of either data or param terminal. + + @return terminal buffer index stored in terminal object on success, -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_get_terminal_index( + const ia_css_terminal_t *terminal); + +/*! Store a terminal buffer index in the terminal object + + @param terminal[in] (base)terminal object of either data or param terminal. + @param terminal_index[in] terminal buffer index + + @return 0 on success + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_set_terminal_index( + ia_css_terminal_t *terminal, + unsigned int terminal_index); + +#endif /* __IA_CSS_PSYS_TERMINAL_HSYS_USER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_buffer_set.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_buffer_set.c new file mode 100644 index 0000000000000..82d53831f9a98 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_buffer_set.c @@ -0,0 +1,111 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include "assert_support.h" +#include "ia_css_psys_dynamic_trace.h" +#include "ia_css_psys_buffer_set.h" +#include "ia_css_psys_process_group.h" + +/* + * Functions to possibly inline + */ +#ifndef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_buffer_set_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +STORAGE_CLASS_INLINE void __buffer_set_dummy_check_alignment(void) +{ + COMPILATION_ERROR_IF(SIZE_OF_BUFFER_SET != + CHAR_BIT * sizeof(ia_css_buffer_set_t)); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_buffer_set_t) % sizeof(uint64_t)); +} + +/* + * Functions not to inline + */ + +/* The below functions are not to be compiled for firmware */ +#if !defined(__HIVECC) + +ia_css_buffer_set_t *ia_css_buffer_set_create( + void *buffer_set_mem, + const ia_css_process_group_t *process_group, + const unsigned int frame_counter) +{ + ia_css_buffer_set_t *buffer_set = NULL; + unsigned int i; + int ret = -1; + + verifexit(buffer_set_mem != NULL); + verifexit(process_group != NULL); + + buffer_set = (ia_css_buffer_set_t *)buffer_set_mem; + + /* + * Set base struct members + */ + buffer_set->ipu_virtual_address = VIED_NULL; + ia_css_process_group_get_ipu_vaddress(process_group, + &buffer_set->process_group_handle); + buffer_set->frame_counter = frame_counter; + buffer_set->terminal_count = + ia_css_process_group_get_terminal_count(process_group); + + /* + * Initialize adjacent buffer addresses + */ + for (i = 0; i < buffer_set->terminal_count; i++) { + vied_vaddress_t *buffer = + (vied_vaddress_t *)( + (char *)buffer_set + + sizeof(ia_css_buffer_set_t) + + sizeof(vied_vaddress_t) * i); + + *buffer = VIED_NULL; + } + ret = 0; + +EXIT: + if (ret != 0) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_create failed\n"); + } + return buffer_set; +} + +size_t ia_css_sizeof_buffer_set( + const ia_css_process_group_t *process_group) +{ + size_t size = 0; + + verifexit(process_group != NULL); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_sizeof_buffer_set(): enter:\n"); + + size = sizeof(ia_css_buffer_set_t) + + ia_css_process_group_get_terminal_count(process_group) * + sizeof(vied_vaddress_t); + +EXIT: + if (size == 0) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_sizeof_buffer_set failed\n"); + } + return size; +} + +#endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_buffer_set_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_buffer_set_impl.h new file mode 100644 index 0000000000000..0399d76f33315 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_buffer_set_impl.h @@ -0,0 +1,241 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IA_CSS_PSYS_BUFFER_SET_IMPL_H +#define __IA_CSS_PSYS_BUFFER_SET_IMPL_H + +#include "error_support.h" +#include "ia_css_psys_dynamic_trace.h" +#include "vied_nci_psys_system_global.h" +#include "ia_css_psys_terminal.hsys.user.h" + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_buffer_set_set_buffer( + ia_css_buffer_set_t *buffer_set, + const unsigned int terminal_index, + const vied_vaddress_t buffer) +{ + DECLARE_ERRVAL + vied_vaddress_t *buffer_ptr; + int ret = -1; + + verifexitval(buffer_set != NULL, EFAULT); + verifexitval(terminal_index < buffer_set->terminal_count, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_set_buffer(): enter:\n"); + + /* + * Set address in buffer set object + */ + buffer_ptr = + (vied_vaddress_t *)( + (char *)buffer_set + + sizeof(ia_css_buffer_set_t) + + terminal_index * sizeof(vied_vaddress_t)); + *buffer_ptr = buffer; + + ret = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_set_buffer: invalid argument\n"); + } + return ret; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_vaddress_t ia_css_buffer_set_get_buffer( + const ia_css_buffer_set_t *buffer_set, + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + vied_vaddress_t buffer = VIED_NULL; + vied_vaddress_t *buffer_ptr; + int terminal_index; + + verifexitval(buffer_set != NULL, EFAULT); + verifexitval(terminal != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_get_buffer(): enter:\n"); + + /* + * Retrieve terminal index from terminal object + */ + terminal_index = ia_css_terminal_get_terminal_index(terminal); + verifexitval(terminal_index >= 0, EFAULT); + verifexitval(terminal_index < buffer_set->terminal_count, EFAULT); + + /* + * Retrieve address from buffer set object + */ + buffer_ptr = + (vied_vaddress_t *)( + (char *)buffer_set + + sizeof(ia_css_buffer_set_t) + + terminal_index * sizeof(vied_vaddress_t)); + buffer = *buffer_ptr; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_get_buffer: invalid argument\n"); + } + return buffer; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_buffer_set_set_ipu_address( + ia_css_buffer_set_t *buffer_set, + const vied_vaddress_t ipu_vaddress) +{ + DECLARE_ERRVAL + int ret = -1; + + verifexitval(buffer_set != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_set_ipu_address(): enter:\n"); + + buffer_set->ipu_virtual_address = ipu_vaddress; + + ret = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_set_ipu_address invalid argument\n"); + } + return ret; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_vaddress_t ia_css_buffer_set_get_ipu_address( + const ia_css_buffer_set_t *buffer_set) +{ + DECLARE_ERRVAL + vied_vaddress_t ipu_virtual_address = VIED_NULL; + + verifexitval(buffer_set != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_get_ipu_address(): enter:\n"); + + ipu_virtual_address = buffer_set->ipu_virtual_address; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_get_ipu_address: invalid argument\n"); + } + return ipu_virtual_address; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_buffer_set_set_process_group_handle( + ia_css_buffer_set_t *buffer_set, + const vied_vaddress_t process_group_handle) +{ + DECLARE_ERRVAL + int ret = -1; + + verifexitval(buffer_set != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_set_process_group_context(): enter:\n"); + + buffer_set->process_group_handle = process_group_handle; + + ret = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_set_process_group_context invalid argument\n"); + } + return ret; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_vaddress_t ia_css_buffer_set_get_process_group_handle( + const ia_css_buffer_set_t *buffer_set) +{ + DECLARE_ERRVAL + vied_vaddress_t process_group_handle = VIED_NULL; + + verifexitval(buffer_set != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_get_process_group_handle(): enter:\n"); + + process_group_handle = buffer_set->process_group_handle; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_get_process_group_handle: invalid argument\n"); + } + return process_group_handle; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_buffer_set_set_token( + ia_css_buffer_set_t *buffer_set, + const uint64_t token) +{ + DECLARE_ERRVAL + int ret = -1; + + verifexitval(buffer_set != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_set_token(): enter:\n"); + + buffer_set->token = token; + + ret = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_set_token invalid argument\n"); + } + return ret; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint64_t ia_css_buffer_set_get_token( + const ia_css_buffer_set_t *buffer_set) +{ + DECLARE_ERRVAL + uint64_t token = 0; + + verifexitval(buffer_set != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_get_token(): enter:\n"); + + token = buffer_set->token; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_get_token: invalid argument\n"); + } + return token; +} + +#endif /* __IA_CSS_PSYS_BUFFER_SET_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process.c new file mode 100644 index 0000000000000..cca0fa73fb374 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process.c @@ -0,0 +1,1147 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_process.h" +#include "ia_css_psys_dynamic_storage_class.h" +#include "ia_css_psys_process_private_types.h" +#include /* for NOT_USED */ + +/* + * Functions to possibly inline + */ + +#ifndef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_process_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +/* + * Functions not to inline + */ + +/* This source file is created with the intention of sharing and + * compiled for host and firmware. Since there is no native 64bit + * data type support for firmware this wouldn't compile for SP + * tile. The part of the file that is not compilable are marked + * with the following __HIVECC marker and this comment. Once we + * come up with a solution to address this issue this will be + * removed. + */ +#if !defined(__HIVECC) +size_t ia_css_sizeof_process( + const ia_css_program_manifest_t *manifest, + const ia_css_program_param_t *param) +{ + size_t size = 0, tmp_size; + + uint8_t program_dependency_count; + uint8_t terminal_dependency_count; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_sizeof_process(): enter:\n"); + + COMPILATION_ERROR_IF( + SIZE_OF_PROCESS_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_process_t))); + + COMPILATION_ERROR_IF(0 != sizeof(ia_css_process_t)%sizeof(uint64_t)); + + verifexit(manifest != NULL); + verifexit(param != NULL); + + size += sizeof(ia_css_process_t); + + program_dependency_count = + ia_css_program_manifest_get_program_dependency_count(manifest); + terminal_dependency_count = + ia_css_program_manifest_get_terminal_dependency_count(manifest); + + tmp_size = program_dependency_count*sizeof(vied_nci_resource_id_t); + size += tot_bytes_for_pow2_align(sizeof(uint64_t), tmp_size); + tmp_size = terminal_dependency_count*sizeof(uint8_t); + size += tot_bytes_for_pow2_align(sizeof(uint64_t), tmp_size); + +EXIT: + if (NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_sizeof_process invalid argument\n"); + } + return size; +} + +ia_css_process_t *ia_css_process_create( + void *raw_mem, + const ia_css_program_manifest_t *manifest, + const ia_css_program_param_t *param, + const uint32_t program_idx) +{ + size_t tmp_size; + int retval = -1; + ia_css_process_t *process = NULL; + char *process_raw_ptr = (char *) raw_mem; + + /* size_t size = ia_css_sizeof_process(manifest, param); */ + uint8_t program_dependency_count; + uint8_t terminal_dependency_count; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_create(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(param != NULL); + verifexit(process_raw_ptr != NULL); + + process = (ia_css_process_t *) process_raw_ptr; + verifexit(process != NULL); + + process->kernel_bitmap = + ia_css_program_manifest_get_kernel_bitmap(manifest); + process->state = IA_CSS_PROCESS_CREATED; + + program_dependency_count = + ia_css_program_manifest_get_program_dependency_count(manifest); + terminal_dependency_count = + ia_css_program_manifest_get_terminal_dependency_count(manifest); + + /* A process requires at least one input or output */ + verifexit((program_dependency_count + + terminal_dependency_count) != 0); + + process_raw_ptr += sizeof(ia_css_process_t); + if (program_dependency_count != 0) { + process->cell_dependencies_offset = + (uint16_t) (process_raw_ptr - (char *)process); + tmp_size = + program_dependency_count * sizeof(vied_nci_resource_id_t); + process_raw_ptr += + tot_bytes_for_pow2_align(sizeof(uint64_t), tmp_size); + } else { + process->cell_dependencies_offset = 0; + } + + if (terminal_dependency_count != 0) { + process->terminal_dependencies_offset = + (uint16_t) (process_raw_ptr - (char *)process); + } + + process->size = (uint32_t)ia_css_sizeof_process(manifest, param); + + process->ID = ia_css_program_manifest_get_program_ID(manifest); + verifexit(process->ID != 0); + process->program_idx = program_idx; + + process->cell_dependency_count = program_dependency_count; + process->terminal_dependency_count = terminal_dependency_count; + + process->parent_offset = 0; + + verifexit(ia_css_process_clear_all(process) == 0); + + process->state = IA_CSS_PROCESS_READY; + retval = 0; + + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_create(): Created successfully process %p ID 0x%x\n", + process, process->ID); + +EXIT: + if (NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_create invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_create failed (%i)\n", retval); + process = ia_css_process_destroy(process); + } + return process; +} + +ia_css_process_t *ia_css_process_destroy( + ia_css_process_t *process) +{ + + return process; +} +#endif + +int ia_css_process_set_cell( + ia_css_process_t *process, + const vied_nci_cell_ID_t cell_id) +{ + int retval = -1; + vied_nci_resource_bitmap_t bit_mask; + vied_nci_resource_bitmap_t resource_bitmap; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_cell(): enter:\n"); + + verifexit(process != NULL); + + parent = ia_css_process_get_parent(process); + + verifexit(parent != NULL); + + parent_state = ia_css_process_group_get_state(parent); + state = ia_css_process_get_state(process); + +/* Some programs are mapped on a fixed cell, + * when the process group is created + */ + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED) || + (parent_state == IA_CSS_PROCESS_GROUP_CREATED) || + /* If the process group has already been created, but no VP cell + * has been assigned to this process (i.e. not fixed in + * manifest), then we need to set the cell of this process + * while its parent state is READY (the ready state is set at + * the end of ia_css_process_group_create) + */ + (parent_state == IA_CSS_PROCESS_GROUP_READY))); + verifexit(state == IA_CSS_PROCESS_READY); + +/* Some programs are mapped on a fixed cell, thus check is not secure, + * but it will detect a preset, the process manager will do the secure check + */ + verifexit(ia_css_process_get_cell(process) == + VIED_NCI_N_CELL_ID); + + bit_mask = vied_nci_cell_bit_mask(cell_id); + resource_bitmap = ia_css_process_group_get_resource_bitmap(parent); + + verifexit(bit_mask != 0); + verifexit(vied_nci_is_bitmap_clear(bit_mask, resource_bitmap)); + + ia_css_process_cells_clear(process); + ia_css_process_cells_set_cell(process, 0, cell_id); + + resource_bitmap = vied_nci_bitmap_set(resource_bitmap, bit_mask); + + retval = ia_css_process_group_set_resource_bitmap( + parent, resource_bitmap); +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_set_cell invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_cell failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_clear_cell( + ia_css_process_t *process) +{ + int retval = -1; + vied_nci_cell_ID_t cell_id; + ia_css_process_group_t *parent; + vied_nci_resource_bitmap_t resource_bitmap; + vied_nci_resource_bitmap_t bit_mask; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_clear_cell(): enter:\n"); + verifexit(process != NULL); + + cell_id = ia_css_process_get_cell(process); + parent = ia_css_process_get_parent(process); + + verifexit(parent != NULL); + + parent_state = ia_css_process_group_get_state(parent); + state = ia_css_process_get_state(process); + + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) + || (parent_state == IA_CSS_PROCESS_GROUP_STARTED))); + verifexit(state == IA_CSS_PROCESS_READY); + + bit_mask = vied_nci_cell_bit_mask(cell_id); + resource_bitmap = ia_css_process_group_get_resource_bitmap(parent); + + verifexit(bit_mask != 0); + verifexit(vied_nci_is_bitmap_set(bit_mask, resource_bitmap)); + + ia_css_process_cells_clear(process); + + resource_bitmap = vied_nci_bitmap_clear(resource_bitmap, bit_mask); + + retval = ia_css_process_group_set_resource_bitmap( + parent, resource_bitmap); +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_clear_cell invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_clear_cell failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_set_int_mem( + ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t offset) +{ + int retval = -1; + ia_css_process_group_t *parent; + vied_nci_cell_ID_t cell_id; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_int_mem(): enter:\n"); + + verifexit(process != NULL); + verifexit(mem_type_id < VIED_NCI_N_MEM_TYPE_ID); + + parent = ia_css_process_get_parent(process); + cell_id = ia_css_process_get_cell(process); + + parent_state = ia_css_process_group_get_state(parent); + state = ia_css_process_get_state(process); + + /* TODO : separate process group start and run from + * process_group_exec_cmd() + */ + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED) || + (parent_state == IA_CSS_PROCESS_GROUP_RUNNING))); + verifexit(state == IA_CSS_PROCESS_READY); + + if (vied_nci_is_cell_mem_of_type(cell_id, mem_type_id, mem_type_id)) { + vied_nci_mem_ID_t mem_id = + vied_nci_cell_get_mem(cell_id, mem_type_id); + + process->int_mem_id[mem_type_id] = mem_id; + process->int_mem_offset[mem_type_id] = offset; + retval = 0; + } +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_int_mem failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_clear_int_mem( + ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id) +{ + int retval = -1; + uint16_t mem_index; + ia_css_process_group_t *parent; + vied_nci_cell_ID_t cell_id; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_clear_int_mem(): enter:\n"); + + verifexit(process != NULL); + verifexit(mem_type_id < VIED_NCI_N_MEM_TYPE_ID); + + parent = ia_css_process_get_parent(process); + cell_id = ia_css_process_get_cell(process); + + /* We should have a check on NULL != parent but it parent is NULL + * ia_css_process_group_get_state will return + * IA_CSS_N_PROCESS_GROUP_STATES so it will be filtered anyway later. + */ + + /* verifexit(parent != NULL); */ + + parent_state = ia_css_process_group_get_state(parent); + state = ia_css_process_get_state(process); + + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) + || (parent_state == IA_CSS_PROCESS_GROUP_STARTED))); + verifexit(state == IA_CSS_PROCESS_READY); + +/* We could just clear the field, but lets check the state for + * consistency first + */ + for (mem_index = 0; mem_index < (int)VIED_NCI_N_MEM_TYPE_ID; + mem_index++) { + if (vied_nci_is_cell_mem_of_type( + cell_id, mem_index, mem_type_id)) { + vied_nci_mem_ID_t mem_id = + vied_nci_cell_get_mem(cell_id, mem_index); + int mem_of_type; + + mem_of_type = + vied_nci_is_mem_of_type(mem_id, mem_type_id); + + assert(mem_of_type); + assert((process->int_mem_id[mem_type_id] == mem_id) || + (process->int_mem_id[mem_type_id] == + VIED_NCI_N_MEM_ID)); + process->int_mem_id[mem_type_id] = VIED_NCI_N_MEM_ID; + process->int_mem_offset[mem_type_id] = + IA_CSS_PROCESS_INVALID_OFFSET; + retval = 0; + } + } + +EXIT: + if (NULL == process || mem_type_id >= VIED_NCI_N_MEM_TYPE_ID) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_clear_int_mem invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_clear_int_mem failed (%i)\n", retval); + } +return retval; +} + +int ia_css_process_set_ext_mem( + ia_css_process_t *process, + const vied_nci_mem_ID_t mem_id, + const vied_nci_resource_size_t offset) +{ + int retval = -1; + ia_css_process_group_t *parent; + vied_nci_cell_ID_t cell_id; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + vied_nci_mem_type_ID_t mem_type_id; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_ext_mem(): enter:\n"); + + verifexit(process != NULL); + + parent = ia_css_process_get_parent(process); + cell_id = ia_css_process_get_cell(process); + + /* We should have a check on NULL != parent but it parent is NULL + * ia_css_process_group_get_state will return + * IA_CSS_N_PROCESS_GROUP_STATES so it will be filtered anyway later. + */ + + /* verifexit(parent != NULL); */ + + parent_state = ia_css_process_group_get_state(parent); + state = ia_css_process_get_state(process); + + /* TODO : separate process group start and run from + * process_group_exec_cmd() + */ + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED) || + (parent_state == IA_CSS_PROCESS_GROUP_RUNNING))); + verifexit(state == IA_CSS_PROCESS_READY); + + /* Check that the memory actually exists, "vied_nci_has_cell_mem_of_id()" + * will return false on error + */ + + mem_type_id = vied_nci_mem_get_type(mem_id); + if (((!vied_nci_has_cell_mem_of_id(cell_id, mem_id) && + (mem_type_id != VIED_NCI_PMEM_TYPE_ID)) + || vied_nci_mem_is_ext_type(mem_type_id)) && + (mem_id < VIED_NCI_N_MEM_ID)) { + + verifexit(mem_type_id < VIED_NCI_N_DATA_MEM_TYPE_ID); + process->ext_mem_id[mem_type_id] = mem_id; + process->ext_mem_offset[mem_type_id] = offset; + retval = 0; + } + +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_set_ext_mem invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_ext_mem failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_clear_ext_mem( + ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id) +{ + int retval = -1; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_clear_ext_mem(): enter:\n"); + + verifexit(process != NULL); + verifexit(mem_type_id < VIED_NCI_N_DATA_MEM_TYPE_ID); + + parent = ia_css_process_get_parent(process); + state = ia_css_process_get_state(process); + + verifexit(parent != NULL); + verifexit(state == IA_CSS_PROCESS_READY); + + parent_state = ia_css_process_group_get_state(parent); + + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED))); + + process->ext_mem_id[mem_type_id] = VIED_NCI_N_MEM_ID; + process->ext_mem_offset[mem_type_id] = IA_CSS_PROCESS_INVALID_OFFSET; + + retval = 0; +EXIT: + if (NULL == process || mem_type_id >= VIED_NCI_N_DATA_MEM_TYPE_ID) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_clear_ext_mem invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_clear_ext_mem failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_set_cells_bitmap( + ia_css_process_t *process, + const vied_nci_resource_bitmap_t bitmap) +{ + int retval = -1; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + int array_index = 0; + int bit_index; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_cells_bitmap(): enter:\n"); + + verifexit(process != NULL); + parent = ia_css_process_get_parent(process); + state = ia_css_process_get_state(process); + + parent_state = ia_css_process_group_get_state(parent); + + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED) || + (parent_state == IA_CSS_PROCESS_GROUP_CREATED) || + (parent_state == IA_CSS_PROCESS_GROUP_READY))); + verifexit(state == IA_CSS_PROCESS_READY); + + for (bit_index = 0; bit_index < VIED_NCI_N_CELL_ID; bit_index++) { + if (vied_nci_is_bit_set_in_bitmap(bitmap, bit_index)) { + verifexit(array_index < IA_CSS_PROCESS_MAX_CELLS); + ia_css_process_cells_set_cell(process, + array_index, (vied_nci_cell_ID_t)bit_index); + array_index++; + } + } + for (; array_index < IA_CSS_PROCESS_MAX_CELLS; array_index++) { + ia_css_process_cells_set_cell(process, + array_index, VIED_NCI_N_CELL_ID); + } + + retval = 0; +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_set_cells_bitmap invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_cells_bitmap failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_set_dev_chn( + ia_css_process_t *process, + const vied_nci_dev_chn_ID_t dev_chn_id, + const vied_nci_resource_size_t offset) +{ + int retval = -1; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_dev_chn(): enter:\n"); + + verifexit(process != NULL); + verifexit(dev_chn_id <= VIED_NCI_N_DEV_CHN_ID); + + parent = ia_css_process_get_parent(process); + state = ia_css_process_get_state(process); + + parent_state = ia_css_process_group_get_state(parent); + + /* TODO : separate process group start and run from + * process_group_exec_cmd() + */ + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED) || + (parent_state == IA_CSS_PROCESS_GROUP_RUNNING))); + verifexit(state == IA_CSS_PROCESS_READY); + + process->dev_chn_offset[dev_chn_id] = offset; + + retval = 0; +EXIT: + if (NULL == process || dev_chn_id >= VIED_NCI_N_DEV_CHN_ID) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_set_dev_chn invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_dev_chn invalid argument\n"); + } + return retval; +} + +int ia_css_process_set_dfm_port_bitmap( + ia_css_process_t *process, + const vied_nci_dev_dfm_id_t dfm_dev_id, + const vied_nci_resource_bitmap_t bitmap) +{ + int retval = -1; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_dfm_port(): enter:\n"); + + verifexit(process != NULL); + + parent = ia_css_process_get_parent(process); + state = ia_css_process_get_state(process); + + parent_state = ia_css_process_group_get_state(parent); + + /* TODO : separate process group start and run from + * process_group_exec_cmd() + */ + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED) || + (parent_state == IA_CSS_PROCESS_GROUP_RUNNING))); + verifexit(state == IA_CSS_PROCESS_READY); + +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_dev_id <= VIED_NCI_N_DEV_DFM_ID); + process->dfm_port_bitmap[dfm_dev_id] = bitmap; +#else + (void)bitmap; + (void)dfm_dev_id; +#endif + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_dfm_port invalid argument\n"); + } + return retval; +} + +int ia_css_process_set_dfm_active_port_bitmap( + ia_css_process_t *process, + const vied_nci_dev_dfm_id_t dfm_dev_id, + const vied_nci_resource_bitmap_t bitmap) +{ + int retval = -1; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_dfm_active_port_bitmap(): enter:\n"); + + verifexit(process != NULL); + + parent = ia_css_process_get_parent(process); + state = ia_css_process_get_state(process); + + parent_state = ia_css_process_group_get_state(parent); + + /* TODO : separate process group start and run from + * process_group_exec_cmd() + */ + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED) || + (parent_state == IA_CSS_PROCESS_GROUP_RUNNING))); + verifexit(state == IA_CSS_PROCESS_READY); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_dev_id <= VIED_NCI_N_DEV_DFM_ID); + process->dfm_active_port_bitmap[dfm_dev_id] = bitmap; +#else + (void)bitmap; + (void)dfm_dev_id; +#endif + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_set_dfm_active_port_bitmap invalid argument\n"); + } + return retval; +} + +int ia_css_process_clear_dev_chn( + ia_css_process_t *process, + const vied_nci_dev_chn_ID_t dev_chn_id) +{ + int retval = -1; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_clear_dev_chn(): enter:\n"); + + verifexit(process != NULL); + + parent = ia_css_process_get_parent(process); + + /* We should have a check on NULL != parent but it parent is NULL + * ia_css_process_group_get_state will return + * IA_CSS_N_PROCESS_GROUP_STATES so it will be filtered anyway later. + */ + + /* verifexit(parent != NULL); */ + + parent_state = ia_css_process_group_get_state(parent); + state = ia_css_process_get_state(process); + + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) + || (parent_state == IA_CSS_PROCESS_GROUP_STARTED))); + verifexit(state == IA_CSS_PROCESS_READY); + + verifexit(dev_chn_id <= VIED_NCI_N_DEV_CHN_ID); + + process->dev_chn_offset[dev_chn_id] = IA_CSS_PROCESS_INVALID_OFFSET; + + retval = 0; +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_clear_dev_chn invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_clear_dev_chn failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_clear_all( + ia_css_process_t *process) +{ + int retval = -1; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + int mem_index; + int dev_chn_index; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_clear_all(): enter:\n"); + + verifexit(process != NULL); + + parent = ia_css_process_get_parent(process); + state = ia_css_process_get_state(process); + + /* We should have a check on NULL != parent but it parent is NULL + * ia_css_process_group_get_state will return + * IA_CSS_N_PROCESS_GROUP_STATES so it will be filtered anyway later. + */ + + /* verifexit(parent != NULL); */ + + parent_state = ia_css_process_group_get_state(parent); + +/* Resource clear can only be called in excluded states contrary to set */ + verifexit((parent_state != IA_CSS_PROCESS_GROUP_RUNNING) || + (parent_state == IA_CSS_N_PROCESS_GROUP_STATES)); + verifexit((state == IA_CSS_PROCESS_CREATED) || + (state == IA_CSS_PROCESS_READY)); + + for (dev_chn_index = 0; dev_chn_index < VIED_NCI_N_DEV_CHN_ID; + dev_chn_index++) { + process->dev_chn_offset[dev_chn_index] = + IA_CSS_PROCESS_INVALID_OFFSET; + } +/* No difference whether a cell_id has been set or not, clear all */ + for (mem_index = 0; mem_index < VIED_NCI_N_DATA_MEM_TYPE_ID; + mem_index++) { + process->ext_mem_id[mem_index] = VIED_NCI_N_MEM_ID; + process->ext_mem_offset[mem_index] = + IA_CSS_PROCESS_INVALID_OFFSET; + } + for (mem_index = 0; mem_index < VIED_NCI_N_MEM_TYPE_ID; mem_index++) { + process->int_mem_id[mem_index] = VIED_NCI_N_MEM_ID; + process->int_mem_offset[mem_index] = + IA_CSS_PROCESS_INVALID_OFFSET; + } + + ia_css_process_cells_clear(process); + + retval = 0; +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_clear_all invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_clear_all failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_acquire( + ia_css_process_t *process) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_acquire(): enter:\n"); + + verifexit(process != NULL); + + retval = 0; +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_acquire invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_acquire failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_release( + ia_css_process_t *process) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_release(): enter:\n"); + + verifexit(process != NULL); + + retval = 0; +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_t invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_release failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_print(const ia_css_process_t *process, void *fid) +{ + int retval = -1; + int i, dev_chn_index; + uint16_t mem_index; + uint8_t cell_dependency_count, terminal_dependency_count; + vied_nci_cell_ID_t cell_id = ia_css_process_get_cell(process); + NOT_USED(fid); + + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_print(process %p): enter:\n", process); + + verifexit(process != NULL); + + IA_CSS_TRACE_6(PSYSAPI_DYNAMIC, INFO, + "\tprocess %p, sizeof %d, programID %d, state %d, parent %p, cell %d\n", + process, + (int)ia_css_process_get_size(process), + (int)ia_css_process_get_program_ID(process), + (int)ia_css_process_get_state(process), + (void *)ia_css_process_get_parent(process), + (int)ia_css_process_get_cell(process)); + + for (mem_index = 0; mem_index < (int)VIED_NCI_N_MEM_TYPE_ID; + mem_index++) { + vied_nci_mem_ID_t mem_id = + (vied_nci_mem_ID_t)(process->int_mem_id[mem_index]); + if (cell_id == VIED_NCI_N_CELL_ID) { + verifexit(mem_id == VIED_NCI_N_MEM_ID); + continue; + } + verifexit(((mem_id == vied_nci_cell_get_mem(cell_id, mem_index)) + || (mem_id == VIED_NCI_N_MEM_ID))); + + IA_CSS_TRACE_4(PSYSAPI_DYNAMIC, INFO, + "\tinternal index %d, type %d, id %d offset 0x%x\n", + mem_index, + (int)vied_nci_cell_get_mem_type(cell_id, mem_index), + (int)mem_id, + process->int_mem_offset[mem_index]); + } + + for (mem_index = 0; mem_index < (int)VIED_NCI_N_DATA_MEM_TYPE_ID; + mem_index++) { + vied_nci_mem_ID_t mem_id = + (vied_nci_mem_ID_t)(process->ext_mem_id[mem_index]); + /* TODO: in case of an cells_bitmap = [], + * vied_nci_cell_get_mem_type will return a wrong result. + */ + IA_CSS_TRACE_4(PSYSAPI_DYNAMIC, INFO, + "\texternal index %d, type %d, id %d offset 0x%x\n", + mem_index, + (int)vied_nci_cell_get_mem_type(cell_id, mem_index), + (int)mem_id, + process->ext_mem_offset[mem_index]); + NOT_USED(mem_id); + } + for (dev_chn_index = 0; dev_chn_index < (int)VIED_NCI_N_DEV_CHN_ID; + dev_chn_index++) { + IA_CSS_TRACE_3(PSYSAPI_DYNAMIC, INFO, + "\tdevice channel index %d, type %d, offset 0x%x\n", + dev_chn_index, + (int)dev_chn_index, + process->dev_chn_offset[dev_chn_index]); + } +#if HAS_DFM + for (dev_chn_index = 0; dev_chn_index < (int)VIED_NCI_N_DEV_DFM_ID; + dev_chn_index++) { + IA_CSS_TRACE_4(PSYSAPI_DYNAMIC, INFO, + "\tdfm device index %d, type %d, bitmap 0x%x active_ports_bitmap 0x%x\n", + dev_chn_index, dev_chn_index, + process->dfm_port_bitmap[dev_chn_index], + process->dfm_active_port_bitmap[dev_chn_index]); + } +#endif + + for (i = 0; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + "\tcells[%d] = 0x%x\n", + i, ia_css_process_cells_get_cell(process, i)); + } + + cell_dependency_count = + ia_css_process_get_cell_dependency_count(process); + if (cell_dependency_count == 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tcell_dependencies[%d] {};\n", cell_dependency_count); + } else { + vied_nci_resource_id_t cell_dependency; + + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tcell_dependencies[%d] {", cell_dependency_count); + for (i = 0; i < (int)cell_dependency_count - 1; i++) { + cell_dependency = + ia_css_process_get_cell_dependency(process, i); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "%4d, ", cell_dependency); + } + cell_dependency = + ia_css_process_get_cell_dependency(process, i); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "%4d}\n", cell_dependency); + (void)cell_dependency; + } + + terminal_dependency_count = + ia_css_process_get_terminal_dependency_count(process); + if (terminal_dependency_count == 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tterminal_dependencies[%d] {};\n", + terminal_dependency_count); + } else { + uint8_t terminal_dependency; + + terminal_dependency_count = + ia_css_process_get_terminal_dependency_count(process); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tterminal_dependencies[%d] {", + terminal_dependency_count); + for (i = 0; i < (int)terminal_dependency_count - 1; i++) { + terminal_dependency = + ia_css_process_get_terminal_dependency(process, i); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "%4d, ", terminal_dependency); + } + terminal_dependency = + ia_css_process_get_terminal_dependency(process, i); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "%4d}\n", terminal_dependency); + (void)terminal_dependency; + } + + retval = 0; +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_print invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_print failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_set_parent( + ia_css_process_t *process, + ia_css_process_group_t *parent) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_parent(): enter:\n"); + + verifexit(process != NULL); + verifexit(parent != NULL); + + process->parent_offset = (uint16_t) ((char *)parent - (char *)process); + retval = 0; +EXIT: + if (NULL == process || NULL == parent) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_set_parent invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_parent failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_set_cell_dependency( + const ia_css_process_t *process, + const unsigned int dep_index, + const vied_nci_resource_id_t id) +{ + int retval = -1; + uint8_t *process_dep_ptr; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_cell_dependency(): enter:\n"); + verifexit(process != NULL); + + process_dep_ptr = + (uint8_t *)process + process->cell_dependencies_offset + + dep_index*sizeof(vied_nci_resource_id_t); + + + *process_dep_ptr = id; + retval = 0; +EXIT: + return retval; +} + +int ia_css_process_set_terminal_dependency( + const ia_css_process_t *process, + const unsigned int dep_index, + const vied_nci_resource_id_t id) +{ + int retval = -1; + uint8_t *terminal_dep_ptr; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_terminal_dependency(): enter:\n"); + verifexit(process != NULL); + verifexit(ia_css_process_get_terminal_dependency_count(process) > dep_index); + + terminal_dep_ptr = + (uint8_t *)process + process->terminal_dependencies_offset + + dep_index*sizeof(uint8_t); + + *terminal_dep_ptr = id; + retval = 0; +EXIT: + return retval; +} + +int ia_css_process_cmd( + ia_css_process_t *process, + const ia_css_process_cmd_t cmd) +{ + int retval = -1; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, "ia_css_process_cmd(): enter:\n"); + + verifexit(process != NULL); + + state = ia_css_process_get_state(process); + + verifexit(state != IA_CSS_PROCESS_ERROR); + verifexit(state < IA_CSS_N_PROCESS_STATES); + + switch (cmd) { + case IA_CSS_PROCESS_CMD_NOP: + break; + case IA_CSS_PROCESS_CMD_ACQUIRE: + verifexit(state == IA_CSS_PROCESS_READY); + break; + case IA_CSS_PROCESS_CMD_RELEASE: + verifexit(state == IA_CSS_PROCESS_READY); + break; + case IA_CSS_PROCESS_CMD_START: + verifexit((state == IA_CSS_PROCESS_READY) + || (state == IA_CSS_PROCESS_STOPPED)); + process->state = IA_CSS_PROCESS_STARTED; + break; + case IA_CSS_PROCESS_CMD_LOAD: + verifexit(state == IA_CSS_PROCESS_STARTED); + process->state = IA_CSS_PROCESS_RUNNING; + break; + case IA_CSS_PROCESS_CMD_STOP: + verifexit((state == IA_CSS_PROCESS_RUNNING) + || (state == IA_CSS_PROCESS_SUSPENDED)); + process->state = IA_CSS_PROCESS_STOPPED; + break; + case IA_CSS_PROCESS_CMD_SUSPEND: + verifexit(state == IA_CSS_PROCESS_RUNNING); + process->state = IA_CSS_PROCESS_SUSPENDED; + break; + case IA_CSS_PROCESS_CMD_RESUME: + verifexit(state == IA_CSS_PROCESS_SUSPENDED); + process->state = IA_CSS_PROCESS_RUNNING; + break; + case IA_CSS_N_PROCESS_CMDS: /* Fall through */ + default: + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_cmd invalid cmd (0x%x)\n", cmd); + goto EXIT; + } + retval = 0; +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_cmd invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_cmd failed (%i)\n", retval); + } + return retval; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_group.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_group.c new file mode 100644 index 0000000000000..46bb828041534 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_group.c @@ -0,0 +1,886 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_process_group.h" +#include "ia_css_psys_dynamic_storage_class.h" + +/* + * Functions to possibly inline + */ + +#ifndef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_process_group_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +/* + * Functions not to inline + */ + +/* This header is need for cpu memset to 0 +* and process groups are not created in SP +*/ +#if !defined(__VIED_CELL) +#include "cpu_mem_support.h" +#endif + +/* This source file is created with the intention of sharing and +* compiled for host and firmware. Since there is no native 64bit +* data type support for firmware this wouldn't compile for SP +* tile. The part of the file that is not compilable are marked +* with the following __VIED_CELL marker and this comment. Once we +* come up with a solution to address this issue this will be +* removed. +*/ +#if !defined(__VIED_CELL) +static bool ia_css_process_group_is_program_enabled( + const ia_css_program_manifest_t *program_manifest, + ia_css_kernel_bitmap_t enable_bitmap) +{ + ia_css_kernel_bitmap_t program_bitmap = + ia_css_program_manifest_get_kernel_bitmap(program_manifest); + ia_css_program_type_t program_type = + ia_css_program_manifest_get_type(program_manifest); + ia_css_kernel_bitmap_t program_enable_bitmap; + + if (!ia_css_is_kernel_bitmap_intersection_empty(enable_bitmap, + program_bitmap)) { + + if (program_type == IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB || + program_type == IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER || + program_type == IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB) { + /* + * EXCLUSIVE_SUB programs are subsets of + * EXCLUSIVE_SUPER so the bits of the enable_bitmap + * that refer to those are those of their + * EXCLUSIVE_SUPER program (on which the depend) and + * not the subset that their own program_bitmap has + */ + if (program_type == + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB || + program_type == + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB) { + ia_css_kernel_bitmap_t super_program_bitmap; + + const ia_css_program_group_manifest_t * + prog_group_manifest = + ia_css_program_manifest_get_parent(program_manifest); + uint8_t super_prog_idx = + ia_css_program_manifest_get_program_dependency( + program_manifest, 0); + const ia_css_program_manifest_t * + super_program_manifest = + ia_css_program_group_manifest_get_prgrm_mnfst( + prog_group_manifest, super_prog_idx); + + verifexit(super_program_manifest != NULL); + if (((program_type == + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB) && + (ia_css_program_manifest_get_type( + super_program_manifest) != + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER)) + || ((program_type == + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB) && + (ia_css_program_manifest_get_type( + super_program_manifest) != + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUPER))) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_is_program_enabled(): Error\n"); + verifexit(0); + } + + super_program_bitmap = + ia_css_program_manifest_get_kernel_bitmap( + super_program_manifest); + program_enable_bitmap = + ia_css_kernel_bitmap_intersection( + enable_bitmap, + super_program_bitmap); + } else { + program_enable_bitmap = + ia_css_kernel_bitmap_intersection( + enable_bitmap, program_bitmap); + } + + if (ia_css_is_kernel_bitmap_equal( + program_enable_bitmap, program_bitmap)) { + return true; + } + } else if (program_type == IA_CSS_PROGRAM_TYPE_VIRTUAL_SUPER) { + /* + * Virtual super programs are not selectable + * only the virtual sub programs + */ + return false; + } else { + return true; + } + } + +EXIT: + return false; +} + +static bool ia_css_process_group_is_terminal_enabled( + const ia_css_terminal_manifest_t *terminal_manifest, + ia_css_kernel_bitmap_t enable_bitmap) +{ + ia_css_terminal_type_t terminal_type; + + verifjmpexit(NULL != terminal_manifest); + terminal_type = ia_css_terminal_manifest_get_type(terminal_manifest); + + if (ia_css_is_terminal_manifest_data_terminal(terminal_manifest)) { + ia_css_data_terminal_manifest_t *data_term_manifest = + (ia_css_data_terminal_manifest_t *)terminal_manifest; + ia_css_kernel_bitmap_t term_bitmap = + ia_css_data_terminal_manifest_get_kernel_bitmap( + data_term_manifest); + /* + * Terminals depend on a kernel, + * if the kernel is present the program it contains and + * the terminal the program depends on are active + */ + if (!ia_css_is_kernel_bitmap_intersection_empty( + enable_bitmap, term_bitmap)) { + return true; + } + } else if (ia_css_is_terminal_manifest_spatial_parameter_terminal( + terminal_manifest)) { + ia_css_kernel_bitmap_t term_kernel_bitmap = ia_css_kernel_bitmap_clear(); + ia_css_spatial_param_terminal_manifest_t *spatial_term_man = + (ia_css_spatial_param_terminal_manifest_t *) + terminal_manifest; + + term_kernel_bitmap = + ia_css_kernel_bitmap_set( + term_kernel_bitmap, + spatial_term_man->kernel_id); + if (!ia_css_is_kernel_bitmap_intersection_empty( + enable_bitmap, term_kernel_bitmap)) { + return true; + } + + } else if (ia_css_is_terminal_manifest_parameter_terminal( + terminal_manifest) && terminal_type == + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN) { + return true; + + } else if (ia_css_is_terminal_manifest_parameter_terminal( + terminal_manifest) && terminal_type == + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT) { + /* + * For parameter out terminals, we disable the terminals + * if ALL the corresponding kernels are disabled, + * for parameter in terminals we cannot do this; + * even if kernels are disabled, it may be required that + * (HW) parameters must be supplied via the parameter + * in terminal (e.g. bypass bits). + */ + ia_css_kernel_bitmap_t term_kernel_bitmap = ia_css_kernel_bitmap_clear(); + ia_css_param_terminal_manifest_t *param_term_man = + (ia_css_param_terminal_manifest_t *)terminal_manifest; + ia_css_param_manifest_section_desc_t *section_desc; + unsigned int section = 0; + + for (section = 0; section < param_term_man-> + param_manifest_section_desc_count; section++) { + section_desc = + ia_css_param_terminal_manifest_get_prm_sct_desc( + param_term_man, section); + verifjmpexit(section_desc != NULL); + term_kernel_bitmap = ia_css_kernel_bitmap_set( + term_kernel_bitmap, + section_desc->kernel_id); + } + + if (!ia_css_is_kernel_bitmap_intersection_empty( + enable_bitmap, term_kernel_bitmap)) { + return true; + } + } else if (ia_css_is_terminal_manifest_program_terminal( + terminal_manifest)) { + return true; + } else if (ia_css_is_terminal_manifest_program_control_init_terminal( + terminal_manifest)) { + return true; + } +EXIT: + return false; +} + +size_t ia_css_sizeof_process_group( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param) +{ + size_t size = 0, tmp_size; + int i, error_val = -1; + uint8_t process_count, process_num; + uint8_t terminal_count; + ia_css_kernel_bitmap_t enable_bitmap; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_sizeof_process_group(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(param != NULL); + + COMPILATION_ERROR_IF( + SIZE_OF_PROCESS_GROUP_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_process_group_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_process_group_t) % sizeof(uint64_t)); + + process_count = + ia_css_process_group_compute_process_count(manifest, param); + terminal_count = + ia_css_process_group_compute_terminal_count(manifest, param); + + verifexit(process_count != 0); + verifexit(terminal_count != 0); + + size += sizeof(ia_css_process_group_t); + + tmp_size = process_count * sizeof(uint16_t); + size += tot_bytes_for_pow2_align(sizeof(uint64_t), tmp_size); + + tmp_size = terminal_count * sizeof(uint16_t); + size += tot_bytes_for_pow2_align(sizeof(uint64_t), tmp_size); + + enable_bitmap = + ia_css_program_group_param_get_kernel_enable_bitmap(param); + process_num = 0; + for (i = 0; i < (int)ia_css_program_group_manifest_get_program_count( + manifest); i++) { + ia_css_program_manifest_t *program_manifest = + ia_css_program_group_manifest_get_prgrm_mnfst(manifest, i); + ia_css_program_param_t *program_param = + ia_css_program_group_param_get_program_param(param, i); + + if (ia_css_process_group_is_program_enabled( + program_manifest, enable_bitmap)) { + verifexit(process_num < process_count); + size += ia_css_sizeof_process( + program_manifest, program_param); + process_num++; + } + } + + verifexit(process_num == process_count); + + for (i = 0; i < (int)ia_css_program_group_manifest_get_terminal_count( + manifest); i++) { + ia_css_terminal_manifest_t *terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst( + manifest, i); + + if (ia_css_process_group_is_terminal_enabled( + terminal_manifest, enable_bitmap)) { + size += ia_css_sizeof_terminal( + terminal_manifest, param); + } + } + + error_val = 0; + +EXIT: + if (NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_sizeof_process_group invalid argument\n"); + } + if (error_val != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_sizeof_process_group ERROR(%d)\n", error_val); + } + return size; +} + +ia_css_process_group_t *ia_css_process_group_create( + void *process_grp_mem, + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param) +{ + size_t size = ia_css_sizeof_process_group(manifest, param); + int retval = -1; + int ret; + int i; + ia_css_process_group_t *process_group = NULL; + uint8_t process_count, process_num; + uint8_t terminal_count, terminal_num; + uint16_t fragment_count; + char *process_grp_raw_ptr; + uint16_t *process_tab_ptr, *terminal_tab_ptr; + ia_css_kernel_bitmap_t enable_bitmap; + uint8_t manifest_terminal_count; + + IA_CSS_TRACE_3(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_create(process_grp_mem %p, manifest %p, group_param %p): enter:\n", + process_grp_mem, manifest, param); + + verifexit(process_grp_mem != NULL); + verifexit(manifest != NULL); + verifexit(param != NULL); + verifexit(ia_css_is_program_group_manifest_valid(manifest)); + + process_group = (ia_css_process_group_t *)process_grp_mem; + ia_css_cpu_mem_set_zero(process_group, size); + process_grp_raw_ptr = (char *) process_group; + + process_group->state = IA_CSS_PROCESS_GROUP_CREATED; + + process_group->protocol_version = + ia_css_program_group_param_get_protocol_version(param); + + fragment_count = ia_css_program_group_param_get_fragment_count(param); + process_count = + ia_css_process_group_compute_process_count(manifest, param); + terminal_count = + ia_css_process_group_compute_terminal_count(manifest, param); + enable_bitmap = + ia_css_program_group_param_get_kernel_enable_bitmap(param); + + process_group->fragment_count = fragment_count; + process_group->process_count = process_count; + process_group->terminal_count = terminal_count; + + process_grp_raw_ptr += sizeof(ia_css_process_group_t); + process_tab_ptr = (uint16_t *) process_grp_raw_ptr; + process_group->processes_offset = + (uint16_t)(process_grp_raw_ptr - (char *)process_group); + + process_grp_raw_ptr += tot_bytes_for_pow2_align( + sizeof(uint64_t), process_count * sizeof(uint16_t)); + terminal_tab_ptr = (uint16_t *) process_grp_raw_ptr; + process_group->terminals_offset = + (uint16_t)(process_grp_raw_ptr - (char *)process_group); + + /* Move raw pointer to the first process */ + process_grp_raw_ptr += tot_bytes_for_pow2_align( + sizeof(uint64_t), terminal_count * sizeof(uint16_t)); + + /* Set default */ + verifexit(ia_css_process_group_set_fragment_limit( + process_group, fragment_count) == 0); + + /* Set process group terminal dependency list */ + /* This list is used during creating the process dependency list */ + manifest_terminal_count = + ia_css_program_group_manifest_get_terminal_count(manifest); + + terminal_num = 0; + for (i = 0; i < (int)manifest_terminal_count; i++) { + ia_css_terminal_manifest_t *t_manifest = + ia_css_program_group_manifest_get_term_mnfst( + manifest, i); + + verifexit(NULL != t_manifest); + if (ia_css_process_group_is_terminal_enabled( + t_manifest, enable_bitmap)) { + ia_css_terminal_t *terminal = NULL; + ia_css_terminal_param_t *terminal_param = + ia_css_program_group_param_get_terminal_param( + param, i); + + verifexit(NULL != terminal_param); + terminal_tab_ptr[terminal_num] = + (uint16_t)(process_grp_raw_ptr - + (char *)process_group); + terminal = ia_css_terminal_create( + process_grp_raw_ptr, t_manifest, + terminal_param, enable_bitmap); + verifexit(terminal != NULL); + verifexit((ia_css_terminal_set_parent( + terminal, process_group) == 0)); + verifexit((ia_css_terminal_set_terminal_manifest_index( + terminal, i) == 0)); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_create: terminal_manifest_index %d\n", + i); + + process_grp_raw_ptr += ia_css_terminal_get_size( + terminal); + terminal_num++; + } + } + verifexit(terminal_num == terminal_count); + + process_num = 0; + for (i = 0; i < (int)ia_css_program_group_manifest_get_program_count( + manifest); i++) { + ia_css_process_t *process = NULL; + ia_css_program_manifest_t *program_manifest = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, i); + ia_css_program_param_t *program_param = + ia_css_program_group_param_get_program_param(param, i); + unsigned int prog_dep_index, proc_dep_index; + unsigned int term_dep_index, term_index; + + if (ia_css_process_group_is_program_enabled( + program_manifest, enable_bitmap)) { + + verifexit(process_num < process_count); + + process_tab_ptr[process_num] = + (uint16_t)(process_grp_raw_ptr - + (char *)process_group); + process = ia_css_process_create( + process_grp_raw_ptr, + program_manifest, + program_param, + i); + verifexit(process != NULL); + + ia_css_process_set_parent(process, process_group); + if (ia_css_has_program_manifest_fixed_cell( + program_manifest)) { + vied_nci_cell_ID_t cell_id = + ia_css_program_manifest_get_cell_ID( + program_manifest); + + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_create: cell_id %d\n", + cell_id); + ia_css_process_set_cell(process, cell_id); + } + + process_grp_raw_ptr += ia_css_process_get_size( + process); + /* + * Set process dependencies of process derived + * from program manifest + */ + for (prog_dep_index = 0; prog_dep_index < + ia_css_program_manifest_get_program_dependency_count( + program_manifest); prog_dep_index++) { + uint8_t dep_prog_idx = + ia_css_program_manifest_get_program_dependency( + program_manifest, prog_dep_index); + const ia_css_program_manifest_t * + dep_prg_manifest = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, dep_prog_idx); + ia_css_program_ID_t id = + ia_css_program_manifest_get_program_ID( + dep_prg_manifest); + + verifexit(id != 0); + for (proc_dep_index = 0; + proc_dep_index < process_num; + proc_dep_index++) { + ia_css_process_t *dep_process = + ia_css_process_group_get_process( + process_group, + proc_dep_index); + + ia_css_process_set_cell_dependency( + process, + prog_dep_index, 0); + + if (ia_css_process_get_program_ID( + dep_process) == id) { + ia_css_process_set_cell_dependency( + process, + prog_dep_index, + proc_dep_index); + break; + } + } + } + process_num++; + + /* + * Set terminal dependencies of process derived + * from program manifest + */ + for (term_dep_index = 0; term_dep_index < + ia_css_program_manifest_get_terminal_dependency_count( + program_manifest); term_dep_index++) { + uint8_t pm_term_index = + ia_css_program_manifest_get_terminal_dependency + (program_manifest, term_dep_index); + + verifexit(pm_term_index < manifest_terminal_count); + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_create(): term_dep_index: %d, pm_term_index: %d\n", + term_dep_index, pm_term_index); + for (term_index = 0; + term_index < terminal_count; + term_index++) { + ia_css_terminal_t *terminal = + ia_css_process_group_get_terminal( + process_group, + term_index); + + if (ia_css_terminal_get_terminal_manifest_index + (terminal) == pm_term_index) { + ia_css_process_set_terminal_dependency( + process, + term_dep_index, + term_index); + IA_CSS_TRACE_3(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_create() set_terminal_dependency(process: %d, dep_idx: %d, term_idx: %d)\n", + i, term_dep_index, term_index); + + break; + } + } + } + } + } + verifexit(process_num == process_count); + + process_group->size = + (uint32_t)ia_css_sizeof_process_group(manifest, param); + process_group->ID = + ia_css_program_group_manifest_get_program_group_ID(manifest); + + /* Initialize performance measurement fields to zero */ + process_group->pg_load_start_ts = 0; + process_group->pg_load_cycles = 0; + process_group->pg_init_cycles = 0; + process_group->pg_processing_cycles = 0; + + verifexit(process_group->ID != 0); + + ret = ia_css_process_group_on_create(process_group, manifest, param); + verifexit(ret == 0); + + process_group->state = IA_CSS_PROCESS_GROUP_READY; + retval = 0; + + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_create(): Created successfully process group ID 0x%x\n", + process_group->ID); + +EXIT: + if (NULL == process_grp_mem || NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_create invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_create failed (%i)\n", retval); + process_group = ia_css_process_group_destroy(process_group); + } + return process_group; +} + +ia_css_process_group_t *ia_css_process_group_destroy( + ia_css_process_group_t *process_group) +{ + if (process_group != NULL) { + ia_css_process_group_on_destroy(process_group); + process_group = NULL; + } else { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_destroy invalid argument\n"); + } + return process_group; +} + +int ia_css_process_group_submit( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_submit(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_SUBMIT); +} + +int ia_css_process_group_start( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_start(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_START); +} + +int ia_css_process_group_stop( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_stop(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_STOP); +} + +int ia_css_process_group_run( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_run(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_RUN); +} + +int ia_css_process_group_suspend( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_suspend(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_SUSPEND); +} + +int ia_css_process_group_resume( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_resume(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_RESUME); +} + +int ia_css_process_group_reset( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_reset(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_RESET); +} + +int ia_css_process_group_abort( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_abort(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_ABORT); +} + +int ia_css_process_group_disown( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_disown(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_DISOWN); +} + +extern uint64_t ia_css_process_group_get_token( + ia_css_process_group_t *process_group) +{ + uint64_t token = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_token(): enter:\n"); + + verifexit(process_group != NULL); + + token = process_group->token; + +EXIT: + if (NULL == process_group) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_get_token invalid argument\n"); + } + return token; +} + +int ia_css_process_group_set_token( + ia_css_process_group_t *process_group, + const uint64_t token) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_token(): enter:\n"); + + verifexit(process_group != NULL); + verifexit(token != 0); + + process_group->token = token; + + retval = 0; +EXIT: + if (NULL == process_group || 0 == token) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_set_token invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_token failed (%i)\n", + retval); + } + return retval; +} + +extern uint64_t ia_css_process_group_get_private_token( + ia_css_process_group_t *process_group) +{ + uint64_t token = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_private_token(): enter:\n"); + + verifexit(process_group != NULL); + + token = process_group->private_token; + +EXIT: + if (NULL == process_group) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_get_private_token invalid argument\n"); + } + return token; +} + +int ia_css_process_group_set_private_token( + ia_css_process_group_t *process_group, + const uint64_t token) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_private_token(): enter:\n"); + + verifexit(process_group != NULL); + verifexit(token != 0); + + process_group->private_token = token; + + retval = 0; +EXIT: + if (NULL == process_group || 0 == token) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_set_private_token invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_private_token failed (%i)\n", + retval); + } + return retval; +} + +uint8_t ia_css_process_group_compute_process_count( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param) +{ + uint8_t process_count = 0; + ia_css_kernel_bitmap_t total_bitmap; + ia_css_kernel_bitmap_t enable_bitmap; + int i; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_compute_process_count(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(param != NULL); + + total_bitmap = + ia_css_program_group_manifest_get_kernel_bitmap(manifest); + enable_bitmap = + ia_css_program_group_param_get_kernel_enable_bitmap(param); + + verifexit(ia_css_is_program_group_manifest_valid(manifest)); + verifexit(ia_css_is_kernel_bitmap_subset(total_bitmap, enable_bitmap)); + verifexit(!ia_css_is_kernel_bitmap_empty(enable_bitmap)); + + for (i = 0; i < + (int)ia_css_program_group_manifest_get_program_count(manifest); + i++) { + ia_css_program_manifest_t *program_manifest = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, i); + ia_css_kernel_bitmap_t program_bitmap = + ia_css_program_manifest_get_kernel_bitmap( + program_manifest); + /* + * Programs can be orthogonal, + * a mutually exclusive subset, + * or a concurrent subset + */ + if (!ia_css_is_kernel_bitmap_intersection_empty(enable_bitmap, + program_bitmap)) { + ia_css_program_type_t program_type = + ia_css_program_manifest_get_type( + program_manifest); + /* + * An exclusive subnode < exclusive supernode, + * so simply don't count it + */ + if (program_type != + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB && + program_type != + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB) { + process_count++; + } + } + } + +EXIT: + if (NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_compute_process_count invalid argument\n"); + } + return process_count; +} + +uint8_t ia_css_process_group_compute_terminal_count( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param) +{ + uint8_t terminal_count = 0; + ia_css_kernel_bitmap_t total_bitmap, enable_bitmap; + int i; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_compute_terminal_count(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(param != NULL); + + total_bitmap = + ia_css_program_group_manifest_get_kernel_bitmap(manifest); + enable_bitmap = + ia_css_program_group_param_get_kernel_enable_bitmap(param); + + verifexit(ia_css_is_program_group_manifest_valid(manifest)); + verifexit(ia_css_is_kernel_bitmap_subset(total_bitmap, enable_bitmap)); + verifexit(!ia_css_is_kernel_bitmap_empty(enable_bitmap)); + + for (i = 0; i < + (int)ia_css_program_group_manifest_get_terminal_count( + manifest); i++) { + ia_css_terminal_manifest_t *tmanifest = + ia_css_program_group_manifest_get_term_mnfst( + manifest, i); + + if (ia_css_process_group_is_terminal_enabled( + tmanifest, enable_bitmap)) { + terminal_count++; + } + } + +EXIT: + if (NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_compute_terminal_count invalid argument\n"); + } + return terminal_count; +} +#endif /* !defined(__VIED_CELL) */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_group_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_group_impl.h new file mode 100644 index 0000000000000..f99602dc3c9e2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_group_impl.h @@ -0,0 +1,1538 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_GROUP_IMPL_H +#define __IA_CSS_PSYS_PROCESS_GROUP_IMPL_H + +#include +#include +#include "ia_css_psys_process_group_cmd_impl.h" +#include +#include +#include +#include +#include +#include +#include "ia_css_terminal_manifest_types.h" + +#include "ia_css_rbm.h" + +#include /* ia_css_kernel_bitmap_t */ + +#include +#include +#include "ia_css_rbm_manifest_types.h" +#include +#include +#include + +#include "ia_css_psys_dynamic_trace.h" + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint16_t ia_css_process_group_get_fragment_limit( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint16_t fragment_limit = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_fragment_limit(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + fragment_limit = process_group->fragment_limit; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_fragment_limit invalid argument\n"); + } + return fragment_limit; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_fragment_limit( + ia_css_process_group_t *process_group, + const uint16_t fragment_limit) +{ + DECLARE_ERRVAL + int retval = -1; + uint16_t fragment_state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_fragment_limit(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + retval = ia_css_process_group_get_fragment_state(process_group, + &fragment_state); + + verifexitval(retval == 0, EINVAL); + verifexitval(fragment_limit > fragment_state, EINVAL); + verifexitval(fragment_limit <= ia_css_process_group_get_fragment_count( + process_group), EINVAL); + + process_group->fragment_limit = fragment_limit; + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_fragment_limit invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_fragment_limit failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_clear_fragment_limit( + ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_clear_fragment_limit(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + process_group->fragment_limit = 0; + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_clear_fragment_limit invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_clear_fragment_limit failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_attach_buffer( + ia_css_process_group_t *process_group, + vied_vaddress_t buffer, + const ia_css_buffer_state_t buffer_state, + const unsigned int terminal_index) +{ + DECLARE_ERRVAL + int retval = -1; + ia_css_terminal_t *terminal = NULL; + + NOT_USED(buffer_state); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_attach_buffer(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + terminal = ia_css_process_group_get_terminal( + process_group, terminal_index); + + verifexitval(terminal != NULL, EINVAL); + verifexitval(ia_css_process_group_get_state(process_group) == + IA_CSS_PROCESS_GROUP_READY, EINVAL); + verifexitval(process_group->protocol_version == + IA_CSS_PROCESS_GROUP_PROTOCOL_LEGACY || + process_group->protocol_version == + IA_CSS_PROCESS_GROUP_PROTOCOL_PPG, EINVAL); + + if (process_group->protocol_version == + IA_CSS_PROCESS_GROUP_PROTOCOL_LEGACY) { + /* + * Legacy flow: + * Terminal address is part of the process group structure + */ + retval = ia_css_terminal_set_buffer( + terminal, buffer); + } else if (process_group->protocol_version == + IA_CSS_PROCESS_GROUP_PROTOCOL_PPG) { + /* + * PPG flow: + * Terminal address is part of external buffer set structure + */ + retval = ia_css_terminal_set_terminal_index( + terminal, terminal_index); + } + verifexitval(retval == 0, EFAULT); + + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + "\tTerminal %p has buffer 0x%x\n", terminal, buffer); + + if (ia_css_is_terminal_data_terminal(terminal) == true) { + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + verifexitval(frame != NULL, EINVAL); + + retval = ia_css_frame_set_buffer_state(frame, buffer_state); + verifexitval(retval == 0, EINVAL); + } + + retval = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_attach_buffer invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_attach_buffer failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_vaddress_t ia_css_process_group_detach_buffer( + ia_css_process_group_t *process_group, + const unsigned int terminal_index) +{ + DECLARE_ERRVAL + int retval = -1; + vied_vaddress_t buffer = VIED_NULL; + + ia_css_terminal_t *terminal = NULL; + ia_css_process_group_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_detach_buffer(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + terminal = + ia_css_process_group_get_terminal( + process_group, terminal_index); + state = ia_css_process_group_get_state(process_group); + + verifexitval(terminal != NULL, EINVAL); + verifexitval(state == IA_CSS_PROCESS_GROUP_READY, EINVAL); + + buffer = ia_css_terminal_get_buffer(terminal); + + if (ia_css_is_terminal_data_terminal(terminal) == true) { + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + verifexitval(frame != NULL, EINVAL); + + retval = ia_css_frame_set_buffer_state(frame, IA_CSS_BUFFER_NULL); + verifexitval(retval == 0, EINVAL); + } + ia_css_terminal_set_buffer(terminal, VIED_NULL); + + retval = 0; +EXIT: + /* + * buffer pointer will appear on output, + * regardless of subsequent fails to avoid memory leaks + */ + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_detach_buffer invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_detach_buffer failed (%i)\n", + retval); + } + return buffer; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_attach_stream( + ia_css_process_group_t *process_group, + uint32_t stream, + const ia_css_buffer_state_t buffer_state, + const unsigned int terminal_index) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_attach_stream(): enter:\n"); + + NOT_USED(process_group); + NOT_USED(stream); + NOT_USED(buffer_state); + NOT_USED(terminal_index); + + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_attach_stream failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint32_t ia_css_process_group_detach_stream( + ia_css_process_group_t *process_group, + const unsigned int terminal_index) +{ + int retval = -1; + uint32_t stream = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_detach_stream(): enter:\n"); + + NOT_USED(process_group); + NOT_USED(terminal_index); + + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_detach_stream failed (%i)\n", + retval); + } + return stream; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_barrier( + ia_css_process_group_t *process_group, + const vied_nci_barrier_ID_t barrier_index) +{ + DECLARE_ERRVAL + int retval = -1; + vied_nci_resource_bitmap_t bit_mask; + vied_nci_resource_bitmap_t resource_bitmap; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_barrier(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + resource_bitmap = + ia_css_process_group_get_resource_bitmap(process_group); + + bit_mask = vied_nci_barrier_bit_mask(barrier_index); + + verifexitval(bit_mask != 0, EINVAL); + verifexitval(vied_nci_is_bitmap_clear(bit_mask, resource_bitmap), EINVAL); + + resource_bitmap = vied_nci_bitmap_set(resource_bitmap, bit_mask); + + retval = + ia_css_process_group_set_resource_bitmap( + process_group, resource_bitmap); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_barrier invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_barrier failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_clear_barrier( + ia_css_process_group_t *process_group, + const vied_nci_barrier_ID_t barrier_index) +{ + DECLARE_ERRVAL + int retval = -1; + vied_nci_resource_bitmap_t bit_mask, resource_bitmap; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_clear_barrier(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + resource_bitmap = + ia_css_process_group_get_resource_bitmap(process_group); + + bit_mask = vied_nci_barrier_bit_mask(barrier_index); + + verifexitval(bit_mask != 0, EINVAL); + verifexitval(vied_nci_is_bitmap_set(bit_mask, resource_bitmap), EINVAL); + + resource_bitmap = vied_nci_bitmap_clear(resource_bitmap, bit_mask); + + retval = + ia_css_process_group_set_resource_bitmap( + process_group, resource_bitmap); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_clear_barrier invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_clear_barrier failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_print( + const ia_css_process_group_t *process_group, + void *fid) +{ + DECLARE_ERRVAL + int retval = -1; + int i; + + uint8_t process_count; + uint8_t terminal_count; + vied_vaddress_t ipu_vaddress = VIED_NULL; + ia_css_rbm_t routing_bitmap; + + NOT_USED(fid); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_print(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + retval = ia_css_process_group_get_ipu_vaddress(process_group, &ipu_vaddress); + verifexitval(retval == 0, EINVAL); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "=============== Process group print start ===============\n"); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tprocess_group cpu address = %p\n", process_group); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tipu_virtual_address = %#x\n", ipu_vaddress); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tsizeof(process_group) = %d\n", + (int)ia_css_process_group_get_size(process_group)); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tfragment_count = %d\n", + (int)ia_css_process_group_get_fragment_count(process_group)); + + routing_bitmap = *ia_css_process_group_get_routing_bitmap(process_group); + for (i = 0; i < (int)IA_CSS_RBM_NOF_ELEMS; i++) { + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + "\trouting_bitmap[index = %d] = 0x%X\n", + i, (int)routing_bitmap.data[i]); + } + + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tprogram_group(process_group) = %d\n", + (int)ia_css_process_group_get_program_group_ID(process_group)); + process_count = ia_css_process_group_get_process_count(process_group); + terminal_count = + ia_css_process_group_get_terminal_count(process_group); + + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\t%d processes\n", (int)process_count); + for (i = 0; i < (int)process_count; i++) { + ia_css_process_t *process = + ia_css_process_group_get_process(process_group, i); + + retval = ia_css_process_print(process, fid); + verifjmpexit(retval == 0); + } + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\t%d terminals\n", (int)terminal_count); + for (i = 0; i < (int)terminal_count; i++) { + ia_css_terminal_t *terminal = + ia_css_process_group_get_terminal(process_group, i); + + retval = ia_css_terminal_print(terminal, fid); + verifjmpexit(retval == 0); + } + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "=============== Process group print end ===============\n"); + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_print invalid argument\n"); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_process_group_valid( + const ia_css_process_group_t *process_group, + const ia_css_program_group_manifest_t *pg_manifest, + const ia_css_program_group_param_t *param) +{ + DECLARE_ERRVAL + bool invalid_flag = false; + uint8_t proc_idx; + uint8_t prog_idx; + uint8_t proc_term_idx; + uint8_t process_count; + uint8_t program_count; + uint8_t terminal_count; + uint8_t man_terminal_count; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_process_group_valid(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + verifexitval(pg_manifest != NULL, EFAULT); + NOT_USED(param); + + process_count = process_group->process_count; + terminal_count = process_group->terminal_count; + program_count = + ia_css_program_group_manifest_get_program_count(pg_manifest); + man_terminal_count = + ia_css_program_group_manifest_get_terminal_count(pg_manifest); + + /* Validate process group */ + invalid_flag = invalid_flag || + !(program_count >= process_count) || + !(man_terminal_count >= terminal_count) || + !(process_group->size > process_group->processes_offset) || + !(process_group->size > process_group->terminals_offset); + + /* Validate processes */ + for (proc_idx = 0; proc_idx < process_count; proc_idx++) { + const ia_css_process_t *process; + ia_css_program_ID_t prog_id; + bool no_match_found = true; + + process = ia_css_process_group_get_process( + process_group, proc_idx); + verifexitval(NULL != process, EFAULT); + prog_id = ia_css_process_get_program_ID(process); + for (prog_idx = 0; prog_idx < program_count; prog_idx++) { + ia_css_program_manifest_t *p_manifest = NULL; + + p_manifest = + ia_css_program_group_manifest_get_prgrm_mnfst( + pg_manifest, prog_idx); + if (prog_id == + ia_css_program_manifest_get_program_ID( + p_manifest)) { + invalid_flag = invalid_flag || + !ia_css_is_process_valid( + process, p_manifest); + no_match_found = false; + break; + } + } + invalid_flag = invalid_flag || no_match_found; + } + + /* Validate terminals */ + for (proc_term_idx = 0; proc_term_idx < terminal_count; + proc_term_idx++) { + int man_term_idx; + const ia_css_terminal_t *terminal; + const ia_css_terminal_manifest_t *terminal_manifest; + + terminal = + ia_css_process_group_get_terminal( + process_group, proc_term_idx); + verifexitval(NULL != terminal, EFAULT); + man_term_idx = + ia_css_terminal_get_terminal_manifest_index(terminal); + terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst( + pg_manifest, man_term_idx); + invalid_flag = invalid_flag || + !ia_css_is_terminal_valid(terminal, terminal_manifest); + } + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_process_group_valid() invalid argument\n"); + return false; + } else { + return (!invalid_flag); + } +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_can_process_group_submit( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + int i; + bool can_submit = false; + int retval = -1; + uint8_t terminal_count = + ia_css_process_group_get_terminal_count(process_group); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_can_process_group_submit(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + for (i = 0; i < (int)terminal_count; i++) { + ia_css_terminal_t *terminal = + ia_css_process_group_get_terminal(process_group, i); + vied_vaddress_t buffer; + ia_css_buffer_state_t buffer_state; + + verifexitval(terminal != NULL, EINVAL); + + if (process_group->protocol_version == + IA_CSS_PROCESS_GROUP_PROTOCOL_LEGACY) { + /* + * For legacy pg flow, buffer addresses are contained inside + * the process group structure, so these need to be validated + * on process group submission. + */ + buffer = ia_css_terminal_get_buffer(terminal); + IA_CSS_TRACE_3(PSYSAPI_DYNAMIC, INFO, + "\tH: Terminal number(%d) is %p having buffer 0x%x\n", + i, terminal, buffer); + } + + /* buffer_state is applicable only for data terminals*/ + if (ia_css_is_terminal_data_terminal(terminal) == true) { + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + + verifexitval(frame != NULL, EINVAL); + buffer_state = ia_css_frame_get_buffer_state(frame); + if ((buffer_state == IA_CSS_BUFFER_NULL) || + (buffer_state == IA_CSS_N_BUFFER_STATES)) { + break; + } + } else if ( + (ia_css_is_terminal_parameter_terminal(terminal) + != true) && + (ia_css_is_terminal_program_terminal(terminal) + != true) && + (ia_css_is_terminal_program_control_init_terminal(terminal) + != true) && + (ia_css_is_terminal_spatial_parameter_terminal( + terminal) != true)) { + /* neither data nor parameter terminal, so error.*/ + break; + } + + } + /* Only true if no check failed */ + can_submit = (i == terminal_count); + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_can_process_group_submit invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_can_process_group_submit failed (%i)\n", + retval); + } + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_can_process_group_submit(): leave:\n"); + return can_submit; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_can_enqueue_buffer_set( + const ia_css_process_group_t *process_group, + const ia_css_buffer_set_t *buffer_set) +{ + DECLARE_ERRVAL + int i; + bool can_enqueue = false; + int retval = -1; + uint8_t terminal_count; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_can_enqueue_buffer_set(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + verifexitval(buffer_set != NULL, EFAULT); + + terminal_count = + ia_css_process_group_get_terminal_count(process_group); + + /* + * For ppg flow, buffer addresses are contained in the + * external buffer set structure, so these need to be + * validated before enqueueing. + */ + verifexitval(process_group->protocol_version == + IA_CSS_PROCESS_GROUP_PROTOCOL_PPG, EFAULT); + + for (i = 0; i < (int)terminal_count; i++) { + ia_css_terminal_t *terminal = + ia_css_process_group_get_terminal(process_group, i); + vied_vaddress_t buffer; + ia_css_buffer_state_t buffer_state; + + verifexitval(terminal != NULL, EINVAL); + + buffer = ia_css_buffer_set_get_buffer(buffer_set, terminal); + IA_CSS_TRACE_3(PSYSAPI_DYNAMIC, INFO, + "\tH: Terminal number(%d) is %p having buffer 0x%x\n", + i, terminal, buffer); + + /* buffer_state is applicable only for data terminals*/ + if (ia_css_is_terminal_data_terminal(terminal) == true) { + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + + verifexitval(frame != NULL, EINVAL); + buffer_state = ia_css_frame_get_buffer_state(frame); + if ((buffer_state == IA_CSS_BUFFER_NULL) || + (buffer_state == IA_CSS_N_BUFFER_STATES)) { + break; + } + } else if ( + (ia_css_is_terminal_parameter_terminal(terminal) + != true) && + (ia_css_is_terminal_program_terminal(terminal) + != true) && + (ia_css_is_terminal_program_control_init_terminal(terminal) + != true) && + (ia_css_is_terminal_spatial_parameter_terminal( + terminal) != true)) { + /* neither data nor parameter terminal, so error.*/ + break; + } + } + /* Only true if no check failed */ + can_enqueue = (i == terminal_count); + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_can_enqueue_buffer_set invalid argument\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_can_enqueue_buffer_set failed (%i)\n", + retval); + } + return can_enqueue; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_can_process_group_start( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + int i; + bool can_start = false; + int retval = -1; + uint8_t terminal_count; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_can_process_group_start(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + terminal_count = + ia_css_process_group_get_terminal_count(process_group); + for (i = 0; i < (int)terminal_count; i++) { + ia_css_terminal_t *terminal = + ia_css_process_group_get_terminal(process_group, i); + ia_css_buffer_state_t buffer_state; + bool ok = false; + + verifexitval(terminal != NULL, EINVAL); + if (ia_css_is_terminal_data_terminal(terminal) == true) { + /* + * buffer_state is applicable only for data terminals + */ + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + bool is_input = ia_css_is_terminal_input(terminal); + /* + * check for NULL here. + * then invoke next 2 statements + */ + verifexitval(frame != NULL, EINVAL); + IA_CSS_TRACE_5(PSYSAPI_DYNAMIC, VERBOSE, + "\tTerminal %d: buffer_state %u, access_type %u, data_bytes %u, data %u\n", + i, frame->buffer_state, frame->access_type, + frame->data_bytes, frame->data); + buffer_state = ia_css_frame_get_buffer_state(frame); + + ok = ((is_input && + (buffer_state == IA_CSS_BUFFER_FULL)) || + (!is_input && (buffer_state == + IA_CSS_BUFFER_EMPTY))); + + } else if (ia_css_is_terminal_parameter_terminal(terminal) == + true) { + /* + * FIXME: + * is there any pre-requisite for param_terminal? + */ + ok = true; + } else if (ia_css_is_terminal_program_terminal(terminal) == + true) { + ok = true; + } else if (ia_css_is_terminal_program_control_init_terminal(terminal) == + true) { + ok = true; + } else if (ia_css_is_terminal_spatial_parameter_terminal( + terminal) == true) { + ok = true; + } else { + /* neither data nor parameter terminal, so error.*/ + break; + } + + if (!ok) + break; + } + /* Only true if no check failed */ + can_start = (i == terminal_count); + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_can_process_group_submit invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_can_process_group_start failed (%i)\n", + retval); + } + return can_start; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +size_t ia_css_process_group_get_size( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_size(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + size = process_group->size; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_size invalid argument\n"); + } + return size; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_process_group_state_t ia_css_process_group_get_state( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + ia_css_process_group_state_t state = IA_CSS_N_PROCESS_GROUP_STATES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_state(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + state = process_group->state; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_state invalid argument\n"); + } + return state; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +const ia_css_rbm_t *ia_css_process_group_get_routing_bitmap( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + const ia_css_rbm_t *rbm = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_routing_bitmap(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + rbm = &(process_group->routing_bitmap); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_routing_bitmap invalid argument\n"); + } + return rbm; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint16_t ia_css_process_group_get_fragment_count( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint16_t fragment_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_fragment_count(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + fragment_count = process_group->fragment_count; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_fragment_count invalid argument\n"); + } + return fragment_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_group_get_process_count( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint8_t process_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_process_count(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + process_count = process_group->process_count; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_process_count invalid argument\n"); + } + return process_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_group_get_terminal_count( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint8_t terminal_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_terminal_count(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + terminal_count = process_group->terminal_count; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_terminal_count invalid argument\n"); + } + return terminal_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint32_t ia_css_process_group_get_pg_load_start_ts( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint32_t pg_load_start_ts = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_pg_load_start_ts(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + pg_load_start_ts = process_group->pg_load_start_ts; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_pg_load_start_ts invalid argument\n"); + } + return pg_load_start_ts; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint32_t ia_css_process_group_get_pg_load_cycles( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint32_t pg_load_cycles = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_pg_load_cycles(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + pg_load_cycles = process_group->pg_load_cycles; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_pg_load_cycles invalid argument\n"); + } + return pg_load_cycles; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint32_t ia_css_process_group_get_pg_init_cycles( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint32_t pg_init_cycles = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_pg_init_cycles(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + pg_init_cycles = process_group->pg_init_cycles; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_pg_init_cycles invalid argument\n"); + } + return pg_init_cycles; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint32_t ia_css_process_group_get_pg_processing_cycles( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint32_t pg_processing_cycles = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_pg_processing_cycles(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + pg_processing_cycles = process_group->pg_processing_cycles; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_pg_processing_cycles invalid argument\n"); + } + return pg_processing_cycles; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_terminal_t *ia_css_process_group_get_terminal_from_type( + const ia_css_process_group_t *process_group, + const ia_css_terminal_type_t terminal_type) +{ + unsigned int proc_cnt; + ia_css_terminal_t *terminal = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_terminal_from_type(): enter:\n"); + + for (proc_cnt = 0; proc_cnt < (unsigned int)ia_css_process_group_get_terminal_count(process_group); proc_cnt++) { + terminal = ia_css_process_group_get_terminal(process_group, proc_cnt); + if (terminal == NULL) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_terminal_from_type() Failed to get terminal %d", proc_cnt); + goto EXIT; + } + if (ia_css_terminal_get_type(terminal) == terminal_type) { + return terminal; + } + terminal = NULL; /* If not the expected type, return NULL */ + } +EXIT: + return terminal; +} + +/* Returns the terminal or NULL if it was not found + For some of those maybe valid to not exist at all in the process group */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +const ia_css_terminal_t *ia_css_process_group_get_single_instance_terminal( + const ia_css_process_group_t *process_group, + ia_css_terminal_type_t term_type) +{ + int i, term_count; + + assert(process_group != NULL); + + /* Those below have at most one instance per process group */ + assert(term_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN || + term_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT || + term_type == IA_CSS_TERMINAL_TYPE_PROGRAM || + term_type == IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT); + + term_count = ia_css_process_group_get_terminal_count(process_group); + + for (i = 0; i < term_count; i++) { + const ia_css_terminal_t *terminal = ia_css_process_group_get_terminal(process_group, i); + + if (ia_css_terminal_get_type(terminal) == term_type) { + /* Only one parameter terminal per process group */ + return terminal; + } + } + + return NULL; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_terminal_t *ia_css_process_group_get_terminal( + const ia_css_process_group_t *process_grp, + const unsigned int terminal_num) +{ + DECLARE_ERRVAL + ia_css_terminal_t *terminal_ptr = NULL; + uint16_t *terminal_offset_table; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_terminal(): enter:\n"); + + verifexitval(process_grp != NULL, EFAULT); + verifexitval(terminal_num < process_grp->terminal_count, EINVAL); + + terminal_offset_table = + (uint16_t *)((char *)process_grp + + process_grp->terminals_offset); + terminal_ptr = + (ia_css_terminal_t *)((char *)process_grp + + terminal_offset_table[terminal_num]); + + verifexitval(terminal_ptr != NULL, EFAULT); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_terminal invalid argument\n"); + } + return terminal_ptr; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_process_t *ia_css_process_group_get_process( + const ia_css_process_group_t *process_grp, + const unsigned int process_num) +{ + DECLARE_ERRVAL + ia_css_process_t *process_ptr = NULL; + uint16_t *process_offset_table; + + verifexitval(process_grp != NULL, EFAULT); + verifexitval(process_num < process_grp->process_count, EINVAL); + + process_offset_table = + (uint16_t *)((char *)process_grp + + process_grp->processes_offset); + process_ptr = + (ia_css_process_t *)((char *)process_grp + + process_offset_table[process_num]); + + verifexitval(process_ptr != NULL, EFAULT); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_process invalid argument\n"); + } + return process_ptr; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_program_group_ID_t ia_css_process_group_get_program_group_ID( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + ia_css_program_group_ID_t id = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_program_group_ID(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + id = process_group->ID; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_program_group_ID invalid argument\n"); + } + return id; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_bitmap_t ia_css_process_group_get_resource_bitmap( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + vied_nci_resource_bitmap_t resource_bitmap = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_resource_bitmap(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + resource_bitmap = process_group->resource_bitmap; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_resource_bitmap invalid argument\n"); + } + return resource_bitmap; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_resource_bitmap( + ia_css_process_group_t *process_group, + const vied_nci_resource_bitmap_t resource_bitmap) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_resource_bitmap(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + process_group->resource_bitmap = resource_bitmap; + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_resource_bitmap invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_resource_bitmap failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_routing_bitmap( + ia_css_process_group_t *process_group, + const ia_css_rbm_t rbm) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_routing_bitmap(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + process_group->routing_bitmap = rbm; + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_routing_bitmap invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_routing_bitmap failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint32_t ia_css_process_group_compute_cycle_count( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param) +{ + DECLARE_ERRVAL + uint32_t cycle_count = 0; + + NOT_USED(manifest); + NOT_USED(param); + + verifexitval(manifest != NULL, EFAULT); + verifexitval(param != NULL, EFAULT); + + cycle_count = 1; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_compute_cycle_count invalid argument\n"); + } + return cycle_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_fragment_state( + ia_css_process_group_t *process_group, + uint16_t fragment_state) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_set_fragment_state(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + verifexitval(fragment_state <= ia_css_process_group_get_fragment_count( + process_group), EINVAL); + + process_group->fragment_state = fragment_state; + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_fragment_state invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_fragment_state failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_get_fragment_state( + const ia_css_process_group_t *process_group, + uint16_t *fragment_state) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_fragment_state(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + verifexitval(fragment_state != NULL, EFAULT); + + *fragment_state = process_group->fragment_state; + retval = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_fragment_state invalid argument\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_fragment_state failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_get_ipu_vaddress( + const ia_css_process_group_t *process_group, + vied_vaddress_t *ipu_vaddress) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_ipu_vaddress(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + verifexitval(ipu_vaddress != NULL, EFAULT); + + *ipu_vaddress = process_group->ipu_virtual_address; + retval = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_ipu_vaddress invalid argument\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_ipu_vaddress failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_ipu_vaddress( + ia_css_process_group_t *process_group, + vied_vaddress_t ipu_vaddress) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_ipu_vaddress(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + process_group->ipu_virtual_address = ipu_vaddress; + retval = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_ipu_vaddress invalid argument\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_ipu_vaddress failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_group_get_protocol_version( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint8_t protocol_version = IA_CSS_PROCESS_GROUP_N_PROTOCOLS; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_protocol_version(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + protocol_version = process_group->protocol_version; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_protocol_version invalid argument\n"); + } + return protocol_version; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_group_get_base_queue_id( + ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint8_t queue_id = IA_CSS_N_PSYS_CMD_QUEUE_ID; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_base_queue_id(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + queue_id = process_group->base_queue_id; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_base_queue_id invalid argument\n"); + } + return queue_id; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_base_queue_id( + ia_css_process_group_t *process_group, + uint8_t queue_id) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_base_queue_id(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + process_group->base_queue_id = queue_id; + retval = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_base_queue_id invalid argument\n"); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_group_get_num_queues( + ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint8_t num_queues = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_num_queues(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + num_queues = process_group->num_queues; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_num_queues invalid argument\n"); + } + return num_queues; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_num_queues( + ia_css_process_group_t *process_group, + uint8_t num_queues) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_num_queues(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + process_group->num_queues = num_queues; + retval = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_num_queues invalid argument\n"); + } + return retval; +} + + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_process_group_has_vp(const ia_css_process_group_t *process_group) +{ + bool has_vp = false; + uint32_t i; + + uint8_t process_count = ia_css_process_group_get_process_count(process_group); + + for (i = 0; i < process_count; i++) { + ia_css_process_t *process; + vied_nci_cell_ID_t cell_id; + + process = ia_css_process_group_get_process(process_group, i); + cell_id = ia_css_process_get_cell(process); + + if (VIED_NCI_VP_TYPE_ID == vied_nci_cell_get_type(cell_id)) { + has_vp = true; + break; + } + } + + return has_vp; +} + +#endif /* __IA_CSS_PSYS_PROCESS_GROUP_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_impl.h new file mode 100644 index 0000000000000..5d0303012700b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_impl.h @@ -0,0 +1,637 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_IMPL_H +#define __IA_CSS_PSYS_PROCESS_IMPL_H + +#include + +#include +#include + +#include +#include +#include + +#include + +#include "ia_css_psys_dynamic_trace.h" +#include "ia_css_psys_process_private_types.h" + +/** Function only to be used in ia_css_psys_process_impl.h and ia_css_psys_process.h */ +STORAGE_CLASS_INLINE vied_nci_cell_ID_t ia_css_process_cells_get_cell(const ia_css_process_t *process, int index) +{ + assert(index < IA_CSS_PROCESS_MAX_CELLS); + if (index >= IA_CSS_PROCESS_MAX_CELLS) { + return VIED_NCI_N_CELL_ID; + } +#if IA_CSS_PROCESS_MAX_CELLS == 1 + return process->cell_id; +#else + return process->cells[index]; +#endif +} + +/** Function only to be used in ia_css_psys_process_impl.h and ia_css_psys_process.h */ +STORAGE_CLASS_INLINE void ia_css_process_cells_set_cell(ia_css_process_t *process, int index, vied_nci_cell_ID_t cell_id) +{ + assert(index < IA_CSS_PROCESS_MAX_CELLS); + if (index >= IA_CSS_PROCESS_MAX_CELLS) { + return; + } +#if IA_CSS_PROCESS_MAX_CELLS == 1 + process->cell_id = cell_id; +#else + process->cells[index] = cell_id; +#endif +} + +/** Function only to be used in ia_css_psys_process_impl.h and ia_css_psys_process */ +STORAGE_CLASS_INLINE void ia_css_process_cells_clear(ia_css_process_t *process) +{ + int i; + for (i = 0; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + ia_css_process_cells_set_cell(process, i, VIED_NCI_N_CELL_ID); + } +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_cell_ID_t ia_css_process_get_cell( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + vied_nci_cell_ID_t cell_id = VIED_NCI_N_CELL_ID; + int i = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_cell(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + +#if IA_CSS_PROCESS_MAX_CELLS > 1 + for (i = 1; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + assert(VIED_NCI_N_CELL_ID == ia_css_process_cells_get_cell(process, i)); +#ifdef __HIVECC +#pragma hivecc unroll +#endif + } +#else + (void)i; +#endif + cell_id = ia_css_process_cells_get_cell(process, 0); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_cell invalid argument\n"); + } + return cell_id; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_mem_ID_t ia_css_process_get_ext_mem_id( + const ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type) +{ + DECLARE_ERRVAL + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_ext_mem(): enter:\n"); + + verifexitval(process != NULL && mem_type < VIED_NCI_N_DATA_MEM_TYPE_ID, EFAULT); + +EXIT: + if (!noerror()) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_ext_mem invalid argument\n"); + return IA_CSS_PROCESS_INVALID_OFFSET; + } + return process->ext_mem_id[mem_type]; +} + + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint32_t ia_css_process_get_program_idx( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_program_idx(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_program_idx invalid argument\n"); + return IA_CSS_PROCESS_INVALID_PROGRAM_IDX; + } + return process->program_idx; +} + + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_size_t ia_css_process_get_dev_chn( + const ia_css_process_t *process, + const vied_nci_dev_chn_ID_t dev_chn_id) +{ + DECLARE_ERRVAL + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_dev_chn(): enter:\n"); + + verifexitval(process != NULL && dev_chn_id < VIED_NCI_N_DEV_CHN_ID, EFAULT); + +EXIT: + if (!noerror()) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_dev_chn(): invalid arguments\n"); + return IA_CSS_PROCESS_INVALID_OFFSET; + } + return process->dev_chn_offset[dev_chn_id]; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_size_t ia_css_process_get_int_mem_offset( + const ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_id) +{ + DECLARE_ERRVAL + vied_nci_resource_size_t int_mem_offset = IA_CSS_PROCESS_INVALID_OFFSET; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_int_mem_offset(): enter:\n"); + + verifexitval(process != NULL && mem_id < VIED_NCI_N_MEM_TYPE_ID, EFAULT); + +EXIT: + if (noerror()) { + int_mem_offset = process->int_mem_offset[mem_id]; + } else { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_int_mem_offset invalid argument\n"); + } + + return int_mem_offset; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_size_t ia_css_process_get_ext_mem_offset( + const ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id) +{ + DECLARE_ERRVAL + vied_nci_resource_size_t ext_mem_offset = IA_CSS_PROCESS_INVALID_OFFSET; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_ext_mem_offset(): enter:\n"); + + verifexitval(process != NULL && mem_type_id < VIED_NCI_N_DATA_MEM_TYPE_ID, EFAULT); + +EXIT: + if (noerror()) { + ext_mem_offset = process->ext_mem_offset[mem_type_id]; + } else { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_ext_mem_offset invalid argument\n"); + } + + return ext_mem_offset; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +size_t ia_css_process_get_size( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_size(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + +EXIT: + if (noerror()) { + size = process->size; + } else { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_size invalid argument\n"); + } + + return size; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_process_state_t ia_css_process_get_state( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + ia_css_process_state_t state = IA_CSS_N_PROCESS_STATES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_state(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + +EXIT: + if (noerror()) { + state = process->state; + } else { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_state invalid argument\n"); + } + + return state; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_set_state( + ia_css_process_t *process, + ia_css_process_state_t state) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_state(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + + process->state = state; + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_state invalid argument\n"); + } + + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_get_cell_dependency_count( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + uint8_t cell_dependency_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_cell_dependency_count(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + cell_dependency_count = process->cell_dependency_count; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_cell_dependency_count invalid argument\n"); + } + return cell_dependency_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_get_terminal_dependency_count( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + uint8_t terminal_dependency_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_terminal_dependency_count(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + terminal_dependency_count = process->terminal_dependency_count; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_terminal_dependency_count invalid argument process\n"); + } + return terminal_dependency_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_process_group_t *ia_css_process_get_parent( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + ia_css_process_group_t *parent = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_parent(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + + parent = + (ia_css_process_group_t *) ((char *)process + process->parent_offset); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_parent invalid argument process\n"); + } + return parent; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_program_ID_t ia_css_process_get_program_ID( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + ia_css_program_ID_t id = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_program_ID(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + + id = process->ID; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_program_ID invalid argument process\n"); + } + return id; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_id_t ia_css_process_get_cell_dependency( + const ia_css_process_t *process, + const unsigned int cell_num) +{ + DECLARE_ERRVAL + vied_nci_resource_id_t cell_dependency = + IA_CSS_PROCESS_INVALID_DEPENDENCY; + vied_nci_resource_id_t *cell_dep_ptr = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_cell_dependency(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + verifexitval(cell_num < process->cell_dependency_count, EFAULT); + + cell_dep_ptr = + (vied_nci_resource_id_t *) + ((char *)process + process->cell_dependencies_offset); + cell_dependency = *(cell_dep_ptr + cell_num); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_cell_dependency invalid argument\n"); + } + return cell_dependency; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_get_terminal_dependency( + const ia_css_process_t *process, + const unsigned int terminal_num) +{ + DECLARE_ERRVAL + uint8_t *ter_dep_ptr = NULL; + uint8_t ter_dep = IA_CSS_PROCESS_INVALID_DEPENDENCY; + + verifexitval(process != NULL, EFAULT); + verifexitval(terminal_num < process->terminal_dependency_count, EFAULT); + + ter_dep_ptr = (uint8_t *) ((char *)process + + process->terminal_dependencies_offset); + + ter_dep = *(ter_dep_ptr + terminal_num); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_terminal_dependency invalid argument\n"); + } + return ter_dep; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_kernel_bitmap_t ia_css_process_get_kernel_bitmap( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + ia_css_kernel_bitmap_t bitmap = ia_css_kernel_bitmap_clear(); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_kernel_bitmap(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + + bitmap = process->kernel_bitmap; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_kernel_bitmap invalid argument process\n"); + } + return bitmap; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_bitmap_t ia_css_process_get_cells_bitmap( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + vied_nci_resource_bitmap_t bitmap = 0; + vied_nci_cell_ID_t cell_id; + int i = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_cell_bitmap(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + + for (i = 0; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + cell_id = ia_css_process_cells_get_cell(process, i); + if (VIED_NCI_N_CELL_ID != cell_id) { + bitmap |= (1 << cell_id); + } +#ifdef __HIVECC +#pragma hivecc unroll +#endif + } + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_cells_bitmap invalid argument process\n"); + } + + return bitmap; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_bitmap_t* ia_css_process_get_dfm_port_bitmap_ptr( + ia_css_process_t *process) +{ + DECLARE_ERRVAL + vied_nci_resource_bitmap_t *p_bitmap = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_dfm_port_bitmap(): enter:\n"); + + verifexitval(process != NULL, EFAULT); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + p_bitmap = &process->dfm_port_bitmap[0]; +#else + p_bitmap = NULL; +#endif +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_dfm_port_bitmap invalid argument process\n"); + } + + return p_bitmap; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_bitmap_t* ia_css_process_get_dfm_active_port_bitmap_ptr( + ia_css_process_t *process) +{ + DECLARE_ERRVAL + vied_nci_resource_bitmap_t *p_bitmap = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_dfm_port_bitmap(): enter:\n"); + + verifexitval(process != NULL, EFAULT); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + p_bitmap = &process->dfm_active_port_bitmap[0]; +#else + p_bitmap = NULL; +#endif +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_dfm_port_bitmap invalid argument process\n"); + } + + return p_bitmap; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_bitmap_t ia_css_process_get_dfm_port_bitmap( + const ia_css_process_t *process, + vied_nci_dev_dfm_id_t dfm_res_id) +{ + DECLARE_ERRVAL + vied_nci_resource_bitmap_t bitmap = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_dfm_port_bitmap(): enter:\n"); + + verifexitval(process != NULL, EFAULT); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexitval(dfm_res_id < VIED_NCI_N_DEV_DFM_ID, EFAULT); + bitmap = process->dfm_port_bitmap[dfm_res_id]; +#else + bitmap = 0; + (void)dfm_res_id; +#endif +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_dfm_port_bitmap invalid argument process\n"); + } + + return bitmap; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_bitmap_t ia_css_process_get_dfm_active_port_bitmap( + const ia_css_process_t *process, + vied_nci_dev_dfm_id_t dfm_res_id) +{ + DECLARE_ERRVAL + vied_nci_resource_bitmap_t bitmap = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_dfm_active_port_bitmap(): enter:\n"); + + verifexitval(process != NULL, EFAULT); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexitval(dfm_res_id < VIED_NCI_N_DEV_DFM_ID, EFAULT); + bitmap = process->dfm_active_port_bitmap[dfm_res_id]; +#else + bitmap = 0; + (void)dfm_res_id; +#endif +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_dfm_active_port_bitmap invalid argument process\n"); + } + return bitmap; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_process_valid( + const ia_css_process_t *process, + const ia_css_program_manifest_t *p_manifest) +{ + DECLARE_ERRVAL + bool invalid_flag = false; + ia_css_program_ID_t prog_id; + ia_css_kernel_bitmap_t prog_kernel_bitmap; + + verifexitval(NULL != process, EFAULT); + verifexitval(NULL != p_manifest, EFAULT); + + prog_id = ia_css_process_get_program_ID(process); + verifjmpexit(prog_id == + ia_css_program_manifest_get_program_ID(p_manifest)); + + prog_kernel_bitmap = + ia_css_program_manifest_get_kernel_bitmap(p_manifest); + + invalid_flag = (process->size <= process->cell_dependencies_offset) || + (process->size <= process->terminal_dependencies_offset) || + !ia_css_is_kernel_bitmap_subset(prog_kernel_bitmap, + process->kernel_bitmap); + + if (ia_css_has_program_manifest_fixed_cell(p_manifest)) { + vied_nci_cell_ID_t cell_id; + + cell_id = ia_css_program_manifest_get_cell_ID(p_manifest); + invalid_flag = invalid_flag || + (cell_id != (vied_nci_cell_ID_t)(ia_css_process_get_cell(process))); + } + invalid_flag = invalid_flag || + ((process->cell_dependency_count + + process->terminal_dependency_count) == 0) || + (process->cell_dependency_count != + ia_css_program_manifest_get_program_dependency_count(p_manifest)) || + (process->terminal_dependency_count != + ia_css_program_manifest_get_terminal_dependency_count(p_manifest)); + + /* TODO: to be removed once all PGs pass validation */ + if (invalid_flag == true) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_is_process_valid(): false\n"); + } + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_process_valid() invalid argument\n"); + return false; + } else { + return (!invalid_flag); + } +} + +#endif /* __IA_CSS_PSYS_PROCESS_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_private_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_private_types.h new file mode 100644 index 0000000000000..ae0affde97187 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_private_types.h @@ -0,0 +1,87 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_PRIVATE_TYPES_H +#define __IA_CSS_PSYS_PROCESS_PRIVATE_TYPES_H + +#include "ia_css_psys_process_types.h" +#include "vied_nci_psys_resource_model.h" + +#define N_UINT32_IN_PROCESS_STRUCT 2 +#define N_UINT16_IN_PROCESS_STRUCT 3 +#define N_UINT8_IN_PROCESS_STRUCT 2 + +#define SIZE_OF_PROCESS_STRUCT_BITS \ + (IA_CSS_KERNEL_BITMAP_BITS \ + + (N_UINT32_IN_PROCESS_STRUCT * 32) \ + + IA_CSS_PROGRAM_ID_BITS \ + + (VIED_NCI_RESOURCE_BITMAP_BITS * VIED_NCI_N_DEV_DFM_ID) \ + + (VIED_NCI_RESOURCE_BITMAP_BITS * VIED_NCI_N_DEV_DFM_ID) \ + + IA_CSS_PROCESS_STATE_BITS \ + + (N_UINT16_IN_PROCESS_STRUCT * 16) \ + + (VIED_NCI_N_MEM_TYPE_ID * VIED_NCI_RESOURCE_SIZE_BITS) \ + + (VIED_NCI_N_DATA_MEM_TYPE_ID * VIED_NCI_RESOURCE_SIZE_BITS) \ + + (VIED_NCI_N_DEV_CHN_ID * VIED_NCI_RESOURCE_SIZE_BITS) \ + + (IA_CSS_PROCESS_MAX_CELLS * VIED_NCI_RESOURCE_ID_BITS) \ + + (VIED_NCI_N_MEM_TYPE_ID * VIED_NCI_RESOURCE_ID_BITS) \ + + (VIED_NCI_N_DATA_MEM_TYPE_ID * VIED_NCI_RESOURCE_ID_BITS) \ + + (N_UINT8_IN_PROCESS_STRUCT * 8) \ + + (N_PADDING_UINT8_IN_PROCESS_STRUCT * 8)) + +struct ia_css_process_s { + /**< Indicate which kernels lead to this process being used */ + ia_css_kernel_bitmap_t kernel_bitmap; + uint32_t size; /**< Size of this structure */ + ia_css_program_ID_t ID; /**< Referal ID to a specific program FW */ + uint32_t program_idx; /**< Program Index into the PG manifest */ +#if (VIED_NCI_N_DEV_DFM_ID > 0) + /**< DFM port allocated to this process */ + vied_nci_resource_bitmap_t dfm_port_bitmap[VIED_NCI_N_DEV_DFM_ID]; + /**< Active DFM ports which need a kick */ + vied_nci_resource_bitmap_t dfm_active_port_bitmap[VIED_NCI_N_DEV_DFM_ID]; +#endif + /**< State of the process FSM dependent on the parent FSM */ + ia_css_process_state_t state; + int16_t parent_offset; /**< Reference to the process group */ + /**< Array[dependency_count] of ID's of the cells that provide input */ + uint16_t cell_dependencies_offset; + /**< Array[terminal_dependency_count] of indices of connected terminals */ + uint16_t terminal_dependencies_offset; + /**< (internal) Memory allocation offset given to this process */ + vied_nci_resource_size_t int_mem_offset[VIED_NCI_N_MEM_TYPE_ID]; + /**< (external) Memory allocation offset given to this process */ + vied_nci_resource_size_t ext_mem_offset[VIED_NCI_N_DATA_MEM_TYPE_ID]; + /**< Device channel allocation offset given to this process */ + vied_nci_resource_size_t dev_chn_offset[VIED_NCI_N_DEV_CHN_ID]; + /**< Cells (VP, ACB) allocated for the process*/ +#if IA_CSS_PROCESS_MAX_CELLS == 1 + vied_nci_resource_id_t cell_id; +#else + vied_nci_resource_id_t cells[IA_CSS_PROCESS_MAX_CELLS]; +#endif /* IA_CSS_PROCESS_MAX_CELLS == 1 */ + /**< (internal) Memory ID; This is redundant, derived from cell_id */ + vied_nci_resource_id_t int_mem_id[VIED_NCI_N_MEM_TYPE_ID]; + /**< (external) Memory ID */ + vied_nci_resource_id_t ext_mem_id[VIED_NCI_N_DATA_MEM_TYPE_ID]; + /**< Number of processes (mapped on cells) this process depends on */ + uint8_t cell_dependency_count; + /**< Number of terminals this process depends on */ + uint8_t terminal_dependency_count; + /**< Padding bytes for 64bit alignment*/ +#if (N_PADDING_UINT8_IN_PROCESS_STRUCT > 0) + uint8_t padding[N_PADDING_UINT8_IN_PROCESS_STRUCT]; +#endif /*(N_PADDING_UINT8_IN_PROCESS_STRUCT > 0)*/ +}; + +#endif /* __IA_CSS_PSYS_PROCESS_PRIVATE_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal.c new file mode 100644 index 0000000000000..ea406f2292739 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal.c @@ -0,0 +1,604 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_dynamic_storage_class.h" +#include "ia_css_psys_terminal_private_types.h" +#include "ia_css_terminal_types.h" + +/* + * Functions to possibly inline + */ + +#ifndef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_terminal_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +STORAGE_CLASS_INLINE void __terminal_dummy_check_alignment(void) +{ + COMPILATION_ERROR_IF( + SIZE_OF_PARAM_TERMINAL_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_param_terminal_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_param_terminal_t) % sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PARAM_SEC_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_param_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_param_section_desc_t) % sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_SPATIAL_PARAM_TERM_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_spatial_param_terminal_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_spatial_param_terminal_t) % sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAME_GRID_PARAM_SEC_STRUCT_BITS != + (CHAR_BIT * sizeof( + ia_css_frame_grid_param_section_desc_t))); + + COMPILATION_ERROR_IF(0 != sizeof( + ia_css_frame_grid_param_section_desc_t) % sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAG_GRID_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_fragment_grid_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_fragment_grid_desc_t) % sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_SLICED_PARAM_TERM_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_sliced_param_terminal_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_sliced_param_terminal_t)%sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAGMENT_SLICE_DESC_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_fragment_slice_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_fragment_slice_desc_t)%sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_SLICE_PARAM_SECTION_DESC_STRUCT_BITS != + (CHAR_BIT * sizeof( + ia_css_slice_param_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_slice_param_section_desc_t)%sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PROG_TERM_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_program_terminal_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_program_terminal_t)%sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAG_SEQ_INFO_STRUCT_BITS != + (CHAR_BIT * sizeof( + ia_css_kernel_fragment_sequencer_info_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_kernel_fragment_sequencer_info_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAG_SEQ_COMMANDS_STRUCT_BITS != + (CHAR_BIT * sizeof( + ia_css_kernel_fragment_sequencer_command_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_kernel_fragment_sequencer_command_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAG_PARAM_SEC_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_fragment_param_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_fragment_param_section_desc_t)%sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PROG_CONTROL_INIT_LOAD_SECTION_DESC_STRUCT_BITS != + (CHAR_BIT * + sizeof(ia_css_program_control_init_load_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_program_control_init_load_section_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PROG_CONTROL_INIT_CONNECT_SECTION_DESC_STRUCT_BITS != + (CHAR_BIT * + sizeof(ia_css_program_control_init_connect_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_program_control_init_connect_section_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PROGRAM_DESC_CONTROL_INFO_STRUCT_BITS != + (CHAR_BIT * + sizeof(struct ia_css_program_desc_control_info_s))); + + COMPILATION_ERROR_IF( + SIZE_OF_PROG_CONTROL_INIT_PROG_DESC_STRUCT_BITS != + (CHAR_BIT * + sizeof(ia_css_program_control_init_program_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_program_control_init_program_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PROG_CONTROL_INIT_TERM_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_program_control_init_terminal_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_program_control_init_terminal_t) % + sizeof(uint64_t)); +} + +/* + * Functions not to inline + */ + +/* + * This source file is created with the intention of sharing and + * compiled for host and firmware. Since there is no native 64bit + * data type support for firmware this wouldn't compile for SP + * tile. The part of the file that is not compilable are marked + * with the following __VIED_CELL marker and this comment. Once we + * come up with a solution to address this issue this will be + * removed. + */ +#if !defined(__VIED_CELL) +size_t ia_css_sizeof_terminal( + const ia_css_terminal_manifest_t *manifest, + const ia_css_program_group_param_t *param) +{ + size_t size = 0; + uint16_t fragment_count = + ia_css_program_group_param_get_fragment_count(param); + + COMPILATION_ERROR_IF( + SIZE_OF_DATA_TERMINAL_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_data_terminal_t))); + + COMPILATION_ERROR_IF( + 0 != sizeof(ia_css_data_terminal_t)%sizeof(uint64_t)); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_sizeof_terminal(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(param != NULL); + + if (ia_css_is_terminal_manifest_parameter_terminal(manifest)) { + const ia_css_param_terminal_manifest_t *param_term_man = + (const ia_css_param_terminal_manifest_t *)manifest; + if (ia_css_terminal_manifest_get_type(manifest) == + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN) { + size = ia_css_param_in_terminal_get_descriptor_size( + param_term_man->param_manifest_section_desc_count); + } else if (ia_css_terminal_manifest_get_type(manifest) == + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT) { + size = ia_css_param_out_terminal_get_descriptor_size( + param_term_man->param_manifest_section_desc_count, + fragment_count); + } else { + assert(NULL == "Invalid parameter terminal type"); + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_sizeof_terminal(): Invalid parameter terminal type:\n"); + verifjmpexit(0); + } + } else if (ia_css_is_terminal_manifest_data_terminal(manifest)) { + size += sizeof(ia_css_data_terminal_t); + size += fragment_count * sizeof(ia_css_fragment_descriptor_t); + } else if (ia_css_is_terminal_manifest_program_terminal(manifest)) { + ia_css_program_terminal_manifest_t *prog_term_man = + (ia_css_program_terminal_manifest_t *)manifest; + + size = ia_css_program_terminal_get_descriptor_size( + fragment_count, + prog_term_man-> + fragment_param_manifest_section_desc_count, + prog_term_man-> + kernel_fragment_sequencer_info_manifest_info_count, + (fragment_count * prog_term_man-> + max_kernel_fragment_sequencer_command_desc)); + } else if (ia_css_is_terminal_manifest_spatial_parameter_terminal( + manifest)) { + ia_css_spatial_param_terminal_manifest_t *spatial_param_term = + (ia_css_spatial_param_terminal_manifest_t *)manifest; + size = ia_css_spatial_param_terminal_get_descriptor_size( + spatial_param_term-> + frame_grid_param_manifest_section_desc_count, + fragment_count); + } else if (ia_css_is_terminal_manifest_program_control_init_terminal( + manifest)) { + ia_css_program_control_init_terminal_manifest_t *progctrlinit_term_man = + (ia_css_program_control_init_terminal_manifest_t *)manifest; + + size = ia_css_program_control_init_terminal_get_descriptor_size( + progctrlinit_term_man); + } +EXIT: + if (NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_sizeof_terminal invalid argument\n"); + } + return size; +} + +ia_css_terminal_t *ia_css_terminal_create( + void *raw_mem, + const ia_css_terminal_manifest_t *manifest, + const ia_css_terminal_param_t *terminal_param, + ia_css_kernel_bitmap_t enable_bitmap) +{ + char *terminal_raw_ptr; + ia_css_terminal_t *terminal = NULL; + uint16_t fragment_count; + int i, j; + int retval = -1; + ia_css_program_group_param_t *param; + + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + "ia_css_terminal_create(manifest %p, terminal_param %p): enter:\n", + manifest, terminal_param); + + param = ia_css_terminal_param_get_parent(terminal_param); + fragment_count = ia_css_program_group_param_get_fragment_count(param); + + verifexit(manifest != NULL); + verifexit(param != NULL); + + terminal_raw_ptr = (char *) raw_mem; + + terminal = (ia_css_terminal_t *) terminal_raw_ptr; + verifexit(terminal != NULL); + + terminal->size = (uint16_t)ia_css_sizeof_terminal(manifest, param); + verifexit(ia_css_terminal_set_type( + terminal, ia_css_terminal_manifest_get_type(manifest)) == 0); + + terminal->ID = ia_css_terminal_manifest_get_ID(manifest); + + verifexit(ia_css_terminal_set_buffer(terminal, + VIED_NULL) == 0); + + if (ia_css_is_terminal_manifest_data_terminal(manifest) == true) { + ia_css_data_terminal_t *dterminal = + (ia_css_data_terminal_t *)terminal; + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame(dterminal); + ia_css_kernel_bitmap_t intersection = + ia_css_kernel_bitmap_intersection(enable_bitmap, + ia_css_data_terminal_manifest_get_kernel_bitmap( + (const ia_css_data_terminal_manifest_t *)manifest)); + + verifexit(frame != NULL); + verifexit(ia_css_frame_set_buffer_state( + frame, IA_CSS_BUFFER_NULL) == 0); + verifexit(ia_css_is_kernel_bitmap_onehot(intersection) == + true); + + terminal_raw_ptr += sizeof(ia_css_data_terminal_t); + dterminal->fragment_descriptor_offset = + (uint16_t) (terminal_raw_ptr - (char *)terminal); + + dterminal->kernel_id = 0; + while (!ia_css_is_kernel_bitmap_empty(intersection)) { + intersection = ia_css_kernel_bitmap_shift( + intersection); + dterminal->kernel_id++; + } + assert(dterminal->kernel_id > 0); + dterminal->kernel_id -= 1; + + /* some terminal and fragment initialization */ + dterminal->frame_descriptor.frame_format_type = + terminal_param->frame_format_type; + for (i = 0; i < IA_CSS_N_DATA_DIMENSION; i++) { + dterminal->frame_descriptor.dimension[i] = + terminal_param->dimensions[i]; + } + dterminal->frame_descriptor.stride[IA_CSS_COL_DIMENSION] = + terminal_param->stride; + dterminal->frame_descriptor.bpp = terminal_param->bpp; + dterminal->frame_descriptor.bpe = terminal_param->bpe; + switch (dterminal->frame_descriptor.frame_format_type) { + case IA_CSS_DATA_FORMAT_UYVY: + case IA_CSS_DATA_FORMAT_YUYV: + case IA_CSS_DATA_FORMAT_Y800: + case IA_CSS_DATA_FORMAT_RGB565: + case IA_CSS_DATA_FORMAT_RGBA888: + case IA_CSS_DATA_FORMAT_BAYER_GRBG: + case IA_CSS_DATA_FORMAT_BAYER_RGGB: + case IA_CSS_DATA_FORMAT_BAYER_BGGR: + case IA_CSS_DATA_FORMAT_BAYER_GBRG: + case IA_CSS_DATA_FORMAT_RAW: + case IA_CSS_DATA_FORMAT_RAW_PACKED: + case IA_CSS_DATA_FORMAT_YYUVYY_VECTORIZED: + case IA_CSS_DATA_FORMAT_PAF: + dterminal->frame_descriptor.plane_count = 1; + dterminal->frame_descriptor.plane_offsets[0] = 0; + break; + case IA_CSS_DATA_FORMAT_NV12: + case IA_CSS_DATA_FORMAT_NV21: + case IA_CSS_DATA_FORMAT_NV16: + case IA_CSS_DATA_FORMAT_NV61: + dterminal->frame_descriptor.plane_count = 2; + dterminal->frame_descriptor.plane_offsets[0] = 0; + dterminal->frame_descriptor.plane_offsets[1] = + dterminal->frame_descriptor.plane_offsets[0] + + dterminal->frame_descriptor.stride[IA_CSS_COL_DIMENSION] * + dterminal->frame_descriptor.dimension[IA_CSS_ROW_DIMENSION]; + break; + case IA_CSS_DATA_FORMAT_YUV444: + case IA_CSS_DATA_FORMAT_RGB888: + case IA_CSS_DATA_FORMAT_YUV420_VECTORIZED: + dterminal->frame_descriptor.plane_count = 3; + dterminal->frame_descriptor.plane_offsets[0] = 0; + dterminal->frame_descriptor.plane_offsets[1] = + dterminal->frame_descriptor.plane_offsets[0] + + dterminal->frame_descriptor.stride[IA_CSS_COL_DIMENSION] * + dterminal->frame_descriptor.dimension[IA_CSS_ROW_DIMENSION]; + dterminal->frame_descriptor.plane_offsets[2] = + dterminal->frame_descriptor.plane_offsets[1] + + dterminal->frame_descriptor.stride[IA_CSS_COL_DIMENSION] * + dterminal->frame_descriptor.dimension[IA_CSS_ROW_DIMENSION]; + break; + case IA_CSS_DATA_FORMAT_YUV420: + dterminal->frame_descriptor.plane_count = 3; + dterminal->frame_descriptor.plane_offsets[0] = 0; + dterminal->frame_descriptor.plane_offsets[1] = + dterminal->frame_descriptor.plane_offsets[0] + + dterminal->frame_descriptor.stride[IA_CSS_COL_DIMENSION] * + dterminal->frame_descriptor.dimension[IA_CSS_ROW_DIMENSION]; + dterminal->frame_descriptor.plane_offsets[2] = + dterminal->frame_descriptor.plane_offsets[1] + + dterminal->frame_descriptor.stride[IA_CSS_COL_DIMENSION]/2 * + dterminal->frame_descriptor.dimension[IA_CSS_ROW_DIMENSION]/2; + break; + default: + /* Unset, resulting in potential terminal connect issues */ + dterminal->frame_descriptor.plane_count = 1; + dterminal->frame_descriptor.plane_offsets[0] = 0; + break; + } + /* + * Initial solution for single fragment initialization + * TODO: + * where to get the fragment description params from??? + */ + if (fragment_count > 0) { + ia_css_fragment_descriptor_t *fragment_descriptor = + (ia_css_fragment_descriptor_t *) + terminal_raw_ptr; + + fragment_descriptor->index[IA_CSS_COL_DIMENSION] = + terminal_param->index[IA_CSS_COL_DIMENSION]; + fragment_descriptor->index[IA_CSS_ROW_DIMENSION] = + terminal_param->index[IA_CSS_ROW_DIMENSION]; + fragment_descriptor->offset[0] = + terminal_param->offset; + for (i = 0; i < IA_CSS_N_DATA_DIMENSION; i++) { + fragment_descriptor->dimension[i] = + terminal_param->fragment_dimensions[i]; + } + } + /* end fragment stuff */ + } else if (ia_css_is_terminal_manifest_parameter_terminal(manifest) == + true) { + ia_css_param_terminal_t *pterminal = + (ia_css_param_terminal_t *)terminal; + uint16_t section_count = + ((const ia_css_param_terminal_manifest_t *)manifest)-> + param_manifest_section_desc_count; + size_t curr_offset = 0; + + pterminal->param_section_desc_offset = + sizeof(ia_css_param_terminal_t); + + for (i = 0; i < section_count; i++) { + ia_css_param_section_desc_t *section = + ia_css_param_in_terminal_get_param_section_desc( + pterminal, i); + const ia_css_param_manifest_section_desc_t * + man_section = + ia_css_param_terminal_manifest_get_prm_sct_desc( + (const ia_css_param_terminal_manifest_t *)manifest, i); + + verifjmpexit(man_section != NULL); + verifjmpexit(section != NULL); + + section->mem_size = man_section->max_mem_size; + section->mem_offset = curr_offset; + curr_offset += man_section->max_mem_size; + } + } else if (ia_css_is_terminal_manifest_program_terminal(manifest) == + true && + ia_css_terminal_manifest_get_type(manifest) == + IA_CSS_TERMINAL_TYPE_PROGRAM) { /* for program terminal */ + ia_css_program_terminal_t *prog_terminal = + (ia_css_program_terminal_t *)terminal; + const ia_css_program_terminal_manifest_t *prog_terminal_man = + (const ia_css_program_terminal_manifest_t *)manifest; + ia_css_kernel_fragment_sequencer_info_desc_t + *sequencer_info_desc_base = NULL; + uint16_t section_count = prog_terminal_man-> + fragment_param_manifest_section_desc_count; + uint16_t manifest_info_count = + prog_terminal_man-> + kernel_fragment_sequencer_info_manifest_info_count; + /* information needs to come from user or manifest once + * the size sizeof function is updated. + */ + uint16_t nof_command_objs = 0; + size_t curr_offset = 0; + + prog_terminal->kernel_fragment_sequencer_info_desc_offset = + sizeof(ia_css_program_terminal_t); + prog_terminal->fragment_param_section_desc_offset = + prog_terminal-> + kernel_fragment_sequencer_info_desc_offset + + (fragment_count * manifest_info_count * + sizeof(ia_css_kernel_fragment_sequencer_info_desc_t)) + + (nof_command_objs * + sizeof( + ia_css_kernel_fragment_sequencer_command_desc_t)); + + NOT_USED(sequencer_info_desc_base); + for (i = 0; i < fragment_count; i++) { + for (j = 0; j < section_count; j++) { + ia_css_fragment_param_section_desc_t *section = + ia_css_program_terminal_get_frgmnt_prm_sct_desc( + prog_terminal, i, j, section_count); + const ia_css_fragment_param_manifest_section_desc_t * + man_section = +ia_css_program_terminal_manifest_get_frgmnt_prm_sct_desc + (prog_terminal_man, j); + + verifjmpexit(man_section != NULL); + verifjmpexit(section != NULL); + + section->mem_size = man_section->max_mem_size; + section->mem_offset = curr_offset; + curr_offset += man_section->max_mem_size; + } + + sequencer_info_desc_base = + ia_css_program_terminal_get_kernel_frgmnt_seq_info_desc( + prog_terminal, i, 0, + manifest_info_count); + + /* + * This offset cannot be initialized properly + * since the number of commands in every sequencer + * is not known at this point + */ + /*for (j = 0; j < manifest_info_count; j++) { + sequencer_info_desc_base[j]. + command_desc_offset = + prog_terminal-> + kernel_fragment_sequencer_info_desc_offset + + (manifest_info_count * + sizeof( + ia_css_kernel_fragment_sequencer_info_desc_t) + + (nof_command_objs * + sizeof( + ia_css_kernel_fragment_sequencer_command_desc_t + )); + }*/ + } + } else if (ia_css_is_terminal_manifest_spatial_parameter_terminal( + manifest) == true) { + ia_css_spatial_param_terminal_t *spatial_param_terminal = + (ia_css_spatial_param_terminal_t *)terminal; + ia_css_spatial_param_terminal_manifest_t * + spatia_param_terminal_man = + (ia_css_spatial_param_terminal_manifest_t *)manifest; + + /* Initialize the spatial terminal structure */ + spatial_param_terminal->fragment_grid_desc_offset = + sizeof(ia_css_spatial_param_terminal_t); + spatial_param_terminal->frame_grid_param_section_desc_offset = + spatial_param_terminal->fragment_grid_desc_offset + + (fragment_count * sizeof(ia_css_fragment_grid_desc_t)); + spatial_param_terminal->kernel_id = + spatia_param_terminal_man->kernel_id; + } else if (ia_css_is_terminal_manifest_sliced_terminal(manifest) == + true) { + ia_css_sliced_param_terminal_t *sliced_param_terminal = + (ia_css_sliced_param_terminal_t *)terminal; + ia_css_sliced_param_terminal_manifest_t + *sliced_param_terminal_man = + (ia_css_sliced_param_terminal_manifest_t *)manifest; + + /* Initialize the sliced terminal structure */ + sliced_param_terminal->fragment_slice_desc_offset = + sizeof(ia_css_sliced_param_terminal_t); + sliced_param_terminal->kernel_id = + sliced_param_terminal_man->kernel_id; + } else if (ia_css_is_terminal_manifest_program_control_init_terminal( + manifest) == true) { + verifjmpexit(ia_css_program_control_init_terminal_init( + (ia_css_program_control_init_terminal_t *) + terminal, + (const ia_css_program_control_init_terminal_manifest_t *) + manifest) == 0); + } else { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_create failed, not a data or param terminal. Returning (%i)\n", + EFAULT); + goto EXIT; + } + + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "ia_css_terminal_create(): Created successfully terminal %p\n", + terminal); + + retval = 0; +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_terminal_create invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_create failed (%i)\n", retval); + terminal = ia_css_terminal_destroy(terminal); + } + return terminal; +} + +ia_css_terminal_t *ia_css_terminal_destroy( + ia_css_terminal_t *terminal) +{ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "ia_css_terminal_destroy(terminal %p): enter:\n", terminal); + return terminal; +} + +uint16_t ia_css_param_terminal_compute_section_count( + const ia_css_terminal_manifest_t *manifest, + const ia_css_program_group_param_t *param) /* Delete 2nd argument*/ +{ + uint16_t section_count = 0; + + NOT_USED(param); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_param_terminal_compute_section_count(): enter:\n"); + + verifexit(manifest != NULL); + section_count = ((const ia_css_param_terminal_manifest_t *)manifest)-> + param_manifest_section_desc_count; +EXIT: + if (NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_param_terminal_compute_section_count: invalid argument\n"); + } + return section_count; +} +#endif /* !defined(__VIED_CELL) */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal_impl.h new file mode 100644 index 0000000000000..36fb0f1d469a1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal_impl.h @@ -0,0 +1,1868 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TERMINAL_IMPL_H +#define __IA_CSS_PSYS_TERMINAL_IMPL_H + +#include + +#include +#include + +#include +#include + +#include + + +#include +#include /* for verifexit, verifjmpexit */ +#include /* for COMPILATION_ERROR_IF */ +#include /* for NOT_USED */ +#include "ia_css_psys_terminal_private_types.h" +#include "ia_css_terminal_manifest_types.h" +#include "ia_css_psys_dynamic_trace.h" +#include "ia_css_psys_manifest_types.h" +#include "ia_css_psys_program_group_private.h" +#include "ia_css_terminal_types.h" + +STORAGE_CLASS_INLINE int ia_css_data_terminal_print(const ia_css_terminal_t *terminal, + void *fid) { + + DECLARE_ERRVAL + int retval = -1; + int i; + ia_css_data_terminal_t *dterminal = (ia_css_data_terminal_t *)terminal; + uint16_t fragment_count = + ia_css_data_terminal_get_fragment_count(dterminal); + verifexitval(fragment_count != 0, EINVAL); + + retval = ia_css_frame_descriptor_print( + ia_css_data_terminal_get_frame_descriptor(dterminal), + fid); + verifexitval(retval == 0, EINVAL); + + retval = ia_css_frame_print( + ia_css_data_terminal_get_frame(dterminal), fid); + verifexitval(retval == 0, EINVAL); + + for (i = 0; i < (int)fragment_count; i++) { + retval = ia_css_fragment_descriptor_print( + ia_css_data_terminal_get_fragment_descriptor( + dterminal, i), fid); + verifexitval(retval == 0, EINVAL); + } + + retval = 0; +EXIT: + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_print failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_print( + const ia_css_terminal_t *terminal, + void *fid) +{ + DECLARE_ERRVAL + int retval = -1; + ia_css_terminal_type_t term_type = ia_css_terminal_get_type(terminal); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_terminal_print(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + IA_CSS_TRACE_4(PSYSAPI_DYNAMIC, INFO, + "\tTerminal %p sizeof %d, typeof %d, parent %p\n", + terminal, + (int)ia_css_terminal_get_size(terminal), + (int)ia_css_terminal_get_type(terminal), + (void *)ia_css_terminal_get_parent(terminal)); + + switch (term_type) { + case IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: + ia_css_program_control_init_terminal_print( + (ia_css_program_control_init_terminal_t *)terminal); + break; + case IA_CSS_TERMINAL_TYPE_DATA_IN: + case IA_CSS_TERMINAL_TYPE_DATA_OUT: + ia_css_data_terminal_print(terminal, fid); + break; + default: + /* other terminal prints are currently not supported */ + break; + } + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_print invalid argument terminal\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_print failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_terminal_input( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + bool is_input = false; + ia_css_terminal_type_t terminal_type; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_terminal_input(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + terminal_type = ia_css_terminal_get_type(terminal); + + switch (terminal_type) { + case IA_CSS_TERMINAL_TYPE_DATA_IN: /* Fall through */ + case IA_CSS_TERMINAL_TYPE_STATE_IN: /* Fall through */ + case IA_CSS_TERMINAL_TYPE_PARAM_STREAM: /* Fall through */ + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN: + case IA_CSS_TERMINAL_TYPE_PROGRAM: + case IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: + is_input = true; + break; + case IA_CSS_TERMINAL_TYPE_DATA_OUT: /* Fall through */ + case IA_CSS_TERMINAL_TYPE_STATE_OUT: + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT: + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: + is_input = false; + break; + default: + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_input: Unknown terminal type (%d)\n", + terminal_type); + goto EXIT; + } + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_input invalid argument\n"); + } + return is_input; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +size_t ia_css_terminal_get_size( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_get_size(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + size = terminal->size; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_get_size invalid argument\n"); + } + return size; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_terminal_type_t ia_css_terminal_get_type( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_terminal_type_t terminal_type = IA_CSS_N_TERMINAL_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_get_type(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + terminal_type = terminal->terminal_type; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_get_type invalid argument\n"); + } + return terminal_type; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_set_type( + ia_css_terminal_t *terminal, + const ia_css_terminal_type_t terminal_type) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_set_type(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + terminal->terminal_type = terminal_type; + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_type invalid argument terminal\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_type failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint16_t ia_css_terminal_get_terminal_manifest_index( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + uint16_t terminal_manifest_index; + + terminal_manifest_index = 0xffff; + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_get_terminal_manifest_index(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + terminal_manifest_index = terminal->tm_index; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_get_terminal_manifest_index: invalid argument\n"); + } + return terminal_manifest_index; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_set_terminal_manifest_index( + ia_css_terminal_t *terminal, + const uint16_t terminal_manifest_index) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_set_terminal_manifest_index(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + terminal->tm_index = terminal_manifest_index; + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_terminal_manifest_index: invalid argument terminal\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_terminal_manifest_index: failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_terminal_ID_t ia_css_terminal_get_ID( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_terminal_ID_t retval = IA_CSS_TERMINAL_INVALID_ID; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_get_ID(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + retval = terminal->ID; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_get_ID invalid argument\n"); + retval = 0; + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_data_terminal_get_kernel_id( + const ia_css_data_terminal_t *dterminal) +{ + DECLARE_ERRVAL + uint8_t retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_get_kernel_id(): enter:\n"); + + verifexitval(dterminal != NULL, EFAULT); + + retval = dterminal->kernel_id; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_get_kernel_id: invalid argument\n"); + retval = 0; + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_connection_type_t ia_css_data_terminal_get_connection_type( + const ia_css_data_terminal_t *dterminal) +{ + DECLARE_ERRVAL + ia_css_connection_type_t connection_type = IA_CSS_N_CONNECTION_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_get_connection_type(): enter:\n"); + + verifexitval(dterminal != NULL, EFAULT); + + connection_type = dterminal->connection_type; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_get_connection_type: invalid argument\n"); + } + return connection_type; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_data_terminal_get_link_id( + const ia_css_data_terminal_t *dterminal) +{ + DECLARE_ERRVAL + uint8_t link_id = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_get_link_id(): enter:\n"); + + verifexitval(dterminal != NULL, EFAULT); + + link_id = dterminal->link_id; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_get_link_id: invalid argument\n"); + } + return link_id; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_data_terminal_set_link_id( + ia_css_data_terminal_t *dterminal, + const uint8_t link_id) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_set_link_id(): enter:\n"); + + verifexitval(dterminal != NULL, EFAULT); + dterminal->link_id = link_id; + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_set_link_id: invalid argument terminal\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_set_link_id: failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_data_terminal_set_connection_type( + ia_css_data_terminal_t *dterminal, + const ia_css_connection_type_t connection_type) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_set_connection_type(): enter:\n"); + + verifexitval(dterminal != NULL, EFAULT); + + dterminal->connection_type = connection_type; + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_set_connection_type: invalid argument dterminal\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_set_connection_type failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_process_group_t *ia_css_terminal_get_parent( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_process_group_t *parent = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_get_parent(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + parent = (ia_css_process_group_t *) ((char *)terminal + + terminal->parent_offset); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_get_parent invalid argument\n"); + } + return parent; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_set_parent( + ia_css_terminal_t *terminal, + ia_css_process_group_t *parent) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_set_parent(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + verifexitval(parent != NULL, EFAULT); + + terminal->parent_offset = (uint16_t) ((char *)parent - + (char *)terminal); + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_parent invalid argument\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_parent failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_frame_t *ia_css_data_terminal_get_frame( + const ia_css_data_terminal_t *dterminal) +{ + DECLARE_ERRVAL + ia_css_frame_t *frame = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_get_frame(): enter:\n"); + + verifexitval(dterminal != NULL, EFAULT); + + frame = (ia_css_frame_t *)(&(dterminal->frame)); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_get_frame invalid argument\n"); + } + return frame; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_frame_descriptor_t *ia_css_data_terminal_get_frame_descriptor( + const ia_css_data_terminal_t *dterminal) +{ + DECLARE_ERRVAL + ia_css_frame_descriptor_t *frame_descriptor = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_get_frame_descriptor(): enter:\n"); + + verifexitval(dterminal != NULL, EFAULT); + + frame_descriptor = + (ia_css_frame_descriptor_t *)(&(dterminal->frame_descriptor)); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_get_frame_descriptor: invalid argument\n"); + } + return frame_descriptor; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_fragment_descriptor_t *ia_css_data_terminal_get_fragment_descriptor( + const ia_css_data_terminal_t *dterminal, + const unsigned int fragment_index) +{ + DECLARE_ERRVAL + ia_css_fragment_descriptor_t *fragment_descriptor = NULL; + uint16_t fragment_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_get_frame_descriptor(): enter:\n"); + + fragment_count = ia_css_data_terminal_get_fragment_count(dterminal); + + verifexitval(dterminal != NULL, EFAULT); + verifexitval(fragment_count != 0, EINVAL); + verifexitval(fragment_index < fragment_count, EINVAL); + + fragment_descriptor = (ia_css_fragment_descriptor_t *) + ((char *)dterminal + dterminal->fragment_descriptor_offset); + + fragment_descriptor += fragment_index; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_get_frame_descriptor: invalid argument\n"); + } + return fragment_descriptor; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint16_t ia_css_data_terminal_get_fragment_count( + const ia_css_data_terminal_t *dterminal) +{ + DECLARE_ERRVAL + ia_css_process_group_t *parent; + uint16_t fragment_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_get_fragment_count(): enter:\n"); + + parent = ia_css_terminal_get_parent((ia_css_terminal_t *)dterminal); + + verifexitval(dterminal != NULL, EFAULT); + verifexitval(parent != NULL, EFAULT); + + fragment_count = ia_css_process_group_get_fragment_count(parent); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_get_fragment_count: invalid argument\n"); + } + return fragment_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_terminal_parameter_terminal( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_terminal_type_t terminal_type = IA_CSS_N_TERMINAL_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_terminal_parameter_terminal(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + /* will return an error value on error */ + terminal_type = ia_css_terminal_get_type(terminal); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_parameter_terminal: invalid argument\n"); + } + return (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT); +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_terminal_data_terminal( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_terminal_type_t terminal_type = IA_CSS_N_TERMINAL_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_terminal_data_terminal(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + /* will return an error value on error */ + terminal_type = ia_css_terminal_get_type(terminal); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_data_terminal invalid argument\n"); + } + return (terminal_type == IA_CSS_TERMINAL_TYPE_DATA_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_DATA_OUT); +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_terminal_program_terminal( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_terminal_type_t terminal_type = IA_CSS_N_TERMINAL_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_terminal_program_terminal(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + /* will return an error value on error */ + terminal_type = ia_css_terminal_get_type(terminal); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_program_terminal: invalid argument\n"); + } + return (terminal_type == IA_CSS_TERMINAL_TYPE_PROGRAM); +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_terminal_program_control_init_terminal( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_terminal_type_t terminal_type = IA_CSS_N_TERMINAL_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_terminal_program_control_init_terminal(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + /* will return an error value on error */ + terminal_type = ia_css_terminal_get_type(terminal); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_program_control_init_terminal: invalid argument\n"); + } + return (terminal_type == IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT); +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_terminal_spatial_parameter_terminal( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_terminal_type_t terminal_type = IA_CSS_N_TERMINAL_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_terminal_spatial_parameter_terminal(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + /* will return an error value on error */ + terminal_type = ia_css_terminal_get_type(terminal); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_spatial_param_terminal: invalid argument\n"); + } + return (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT); +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_data_terminal_compute_plane_count( + const ia_css_terminal_manifest_t *manifest, + const ia_css_program_group_param_t *param) +{ + DECLARE_ERRVAL + uint8_t plane_count = 1; + + NOT_USED(manifest); + NOT_USED(param); + + verifexitval(manifest != NULL, EFAULT); + verifexitval(param != NULL, EFAULT); + /* TODO: Implementation Missing*/ + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_compute_plane_count(): enter:\n"); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_compute_plane_count: invalid argument\n"); + } + return plane_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_vaddress_t ia_css_terminal_get_buffer( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + vied_vaddress_t buffer = VIED_NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_get_buffer(): enter:\n"); + + if (ia_css_is_terminal_data_terminal(terminal)) { + ia_css_frame_t *frame = ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + + verifexitval(frame != NULL, EFAULT); + buffer = ia_css_frame_get_buffer(frame); + } else if (ia_css_is_terminal_parameter_terminal(terminal)) { + const ia_css_param_terminal_t *param_terminal = + (const ia_css_param_terminal_t *)terminal; + + buffer = param_terminal->param_payload.buffer; + } else if (ia_css_is_terminal_program_terminal(terminal)) { + const ia_css_program_terminal_t *program_terminal = + (const ia_css_program_terminal_t *)terminal; + + buffer = program_terminal->param_payload.buffer; + } else if (ia_css_is_terminal_program_control_init_terminal(terminal)) { + const ia_css_program_control_init_terminal_t *program_ctrl_init_terminal = + (const ia_css_program_control_init_terminal_t *)terminal; + + buffer = program_ctrl_init_terminal->param_payload.buffer; + } else if (ia_css_is_terminal_spatial_parameter_terminal(terminal)) { + const ia_css_spatial_param_terminal_t *spatial_terminal = + (const ia_css_spatial_param_terminal_t *)terminal; + + buffer = spatial_terminal->param_payload.buffer; + } +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_get_buffer: invalid argument terminal\n"); + } + return buffer; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_set_buffer( + ia_css_terminal_t *terminal, + vied_vaddress_t buffer) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_set_buffer(): enter:\n"); + + if (ia_css_is_terminal_data_terminal(terminal) == true) { + /* Currently using Frames inside data terminal , + * TODO: start directly using data. + */ + ia_css_data_terminal_t *dterminal = + (ia_css_data_terminal_t *)terminal; + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame(dterminal); + + verifexitval(frame != NULL, EFAULT); + retval = ia_css_frame_set_buffer(frame, buffer); + verifexitval(retval == 0, EINVAL); + } else if (ia_css_is_terminal_parameter_terminal(terminal) == true) { + ia_css_param_terminal_t *pterminal = + (ia_css_param_terminal_t *)terminal; + + pterminal->param_payload.buffer = buffer; + retval = 0; + } else if (ia_css_is_terminal_program_terminal(terminal) == true) { + ia_css_program_terminal_t *pterminal = + (ia_css_program_terminal_t *)terminal; + + pterminal->param_payload.buffer = buffer; + retval = 0; + } else if (ia_css_is_terminal_program_control_init_terminal(terminal) == true) { + ia_css_program_control_init_terminal_t *pterminal = + (ia_css_program_control_init_terminal_t *)terminal; + + pterminal->param_payload.buffer = buffer; + retval = 0; + } else if (ia_css_is_terminal_spatial_parameter_terminal(terminal) == + true) { + ia_css_spatial_param_terminal_t *pterminal = + (ia_css_spatial_param_terminal_t *)terminal; + + pterminal->param_payload.buffer = buffer; + retval = 0; + } else { + return retval; + } + + retval = 0; +EXIT: + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_buffer failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_get_terminal_index( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + int terminal_index = -1; + + verifexitval(terminal != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_get_terminal_index(): enter:\n"); + + if (ia_css_is_terminal_data_terminal(terminal)) { + ia_css_frame_t *frame = ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + + verifexitval(frame != NULL, EFAULT); + terminal_index = ia_css_frame_get_data_index(frame); + } else { + if (ia_css_is_terminal_parameter_terminal(terminal)) { + const ia_css_param_terminal_t *param_terminal = + (const ia_css_param_terminal_t *)terminal; + + terminal_index = param_terminal->param_payload.terminal_index; + } else if (ia_css_is_terminal_program_terminal(terminal)) { + const ia_css_program_terminal_t *program_terminal = + (const ia_css_program_terminal_t *)terminal; + + terminal_index = program_terminal->param_payload.terminal_index; + } else if (ia_css_is_terminal_program_control_init_terminal(terminal)) { + const ia_css_program_control_init_terminal_t *program_ctrl_init_terminal = + (const ia_css_program_control_init_terminal_t *)terminal; + + terminal_index = program_ctrl_init_terminal->param_payload.terminal_index; + } else if (ia_css_is_terminal_spatial_parameter_terminal(terminal)) { + const ia_css_spatial_param_terminal_t *spatial_terminal = + (const ia_css_spatial_param_terminal_t *)terminal; + + terminal_index = spatial_terminal->param_payload.terminal_index; + } else { + verifjmpexit(0); + } + } +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_get_terminal_index: invalid argument\n"); + } + return terminal_index; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_set_terminal_index( + ia_css_terminal_t *terminal, + unsigned int terminal_index) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_set_terminal_index(): enter:\n"); + + if (ia_css_is_terminal_data_terminal(terminal) == true) { + /* Currently using Frames inside data terminal , + * TODO: start directly using data. + */ + ia_css_data_terminal_t *dterminal = + (ia_css_data_terminal_t *)terminal; + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame(dterminal); + + verifexitval(frame != NULL, EFAULT); + retval = ia_css_frame_set_data_index(frame, terminal_index); + verifexitval(retval == 0, EINVAL); + } else { + if (ia_css_is_terminal_parameter_terminal(terminal) == true) { + ia_css_param_terminal_t *pterminal = + (ia_css_param_terminal_t *)terminal; + + pterminal->param_payload.terminal_index = terminal_index; + retval = 0; + } else if (ia_css_is_terminal_program_terminal(terminal) == true) { + ia_css_program_terminal_t *pterminal = + (ia_css_program_terminal_t *)terminal; + + pterminal->param_payload.terminal_index = terminal_index; + retval = 0; + } else if (ia_css_is_terminal_program_control_init_terminal(terminal) + == true) { + ia_css_program_control_init_terminal_t *pterminal = + (ia_css_program_control_init_terminal_t *)terminal; + + pterminal->param_payload.terminal_index = terminal_index; + retval = 0; + } else if (ia_css_is_terminal_spatial_parameter_terminal(terminal) == + true) { + ia_css_spatial_param_terminal_t *pterminal = + (ia_css_spatial_param_terminal_t *)terminal; + + pterminal->param_payload.terminal_index = terminal_index; + retval = 0; + } else { + return retval; + } + } + + retval = 0; +EXIT: + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_terminal_index failed (%i)\n", + retval); + } + return retval; +} + +STORAGE_CLASS_INLINE bool ia_css_is_data_terminal_valid( + const ia_css_terminal_t *terminal, + const ia_css_terminal_manifest_t *terminal_manifest, + const uint16_t nof_fragments) +{ + DECLARE_ERRVAL + bool invalid_flag = false; + + const ia_css_data_terminal_t *dterminal = + (ia_css_data_terminal_t *)terminal; + const ia_css_data_terminal_manifest_t *dt_manifest = + (ia_css_data_terminal_manifest_t *)terminal_manifest; + const ia_css_frame_descriptor_t *frame_descriptor; + ia_css_frame_format_bitmap_t man_frame_format_bitmap; + ia_css_frame_format_bitmap_t proc_frame_format_bitmap; + uint16_t max_value[IA_CSS_N_DATA_DIMENSION]; + uint16_t min_value[IA_CSS_N_DATA_DIMENSION]; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_data_terminal_valid enter\n"); + + frame_descriptor = + ia_css_data_terminal_get_frame_descriptor(dterminal); + verifexitval(frame_descriptor != NULL, EFAULT); + man_frame_format_bitmap = + ia_css_data_terminal_manifest_get_frame_format_bitmap( + dt_manifest); + proc_frame_format_bitmap = + ia_css_frame_format_bit_mask( + frame_descriptor->frame_format_type); + /* + * TODO: Replace by 'validation of frame format type'. + * Currently frame format type is not correctly set by manifest, + * waiting for HSD 1804260604 + */ + if (man_frame_format_bitmap > 0) { + if ((man_frame_format_bitmap & + proc_frame_format_bitmap) == 0) { + uint32_t *bitmap_arr = + (uint32_t *)&man_frame_format_bitmap; + + NOT_USED(bitmap_arr); + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "Frame format type not defined in manifest\n"); + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + " man bitmap_arr[]: %d,%d\n", + bitmap_arr[1], bitmap_arr[0]); + bitmap_arr = (uint32_t *)&proc_frame_format_bitmap; + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + " proc bitmap_arr[]: %d,%d\n", + bitmap_arr[1], bitmap_arr[0]); + } + } else { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "Frame format bitmap not defined in manifest\n"); + } + ia_css_data_terminal_manifest_get_min_size(dt_manifest, min_value); + /* + * TODO: Replace by validation of Minimal frame column dimensions. + * Currently not correctly set by manifest yet, + * waiting for HSD 1804260604 + */ + if ((frame_descriptor->dimension[IA_CSS_COL_DIMENSION] < + min_value[IA_CSS_COL_DIMENSION])) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "Minimal frame column dimensions not set correctly (by manifest)\n"); + } + /* + * TODO: Replace by validation of Minimal frame row dimensions. + * Currently not correctly set by manifest yet, + * waiting for HSD 1804260604 + */ + if (frame_descriptor->dimension[IA_CSS_ROW_DIMENSION] < + min_value[IA_CSS_ROW_DIMENSION]) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "Minimal frame row dimensions not set correctly (by manifest)\n"); + } + + ia_css_data_terminal_manifest_get_max_size(dt_manifest, max_value); + /* + * TODO: Replace by validation of Maximal frame column dimensions. + * Currently not correctly set by manifest yet, + * waiting for HSD 1804260604 + */ + if (frame_descriptor->dimension[IA_CSS_COL_DIMENSION] > + max_value[IA_CSS_COL_DIMENSION]) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "Maximal frame column dimensions not set correctly (by manifest)\n"); + } + /* + * TODO: Replace by validation of Maximal frame row dimensions. + * Currently not correctly set by manifest yet, + * waiting for HSD 1804260604 + */ + if (frame_descriptor->dimension[IA_CSS_ROW_DIMENSION] > + max_value[IA_CSS_ROW_DIMENSION]) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "Maximal frame row dimensions not set correctly (by manifest)\n"); + } + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, VERBOSE, "min_value: [%d,%d]\n", + min_value[IA_CSS_COL_DIMENSION], + min_value[IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, VERBOSE, "max_value: [%d,%d]\n", + max_value[IA_CSS_COL_DIMENSION], + max_value[IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, VERBOSE, "frame dim: [%d,%d]\n", + frame_descriptor->dimension[IA_CSS_COL_DIMENSION], + frame_descriptor->dimension[IA_CSS_ROW_DIMENSION]); + /* + * TODO: Add validation of fragment dimensions. + * Currently not set by manifest yet, waiting for HSD 1804260604 + */ + NOT_USED(nof_fragments); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_data_terminal_valid() invalid argument\n"); + return false; + } else { + return (!invalid_flag); + } +} + +STORAGE_CLASS_INLINE void ia_css_program_terminal_seq_info_print( + const ia_css_kernel_fragment_sequencer_info_manifest_desc_t + *man_seq_info_desc, + const ia_css_kernel_fragment_sequencer_info_desc_t + *term_seq_info_desc) +{ + NOT_USED(man_seq_info_desc); + NOT_USED(term_seq_info_desc); + + /* slice dimension column */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_slice_dimension: %d\n", + term_seq_info_desc-> + fragment_grid_slice_dimension[IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_slice_dimension: %d\n", + man_seq_info_desc-> + max_fragment_grid_slice_dimension[IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_slice_dimension: %d\n", + man_seq_info_desc-> + min_fragment_grid_slice_dimension[IA_CSS_COL_DIMENSION]); + + /* slice dimension row */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_slice_dimension: %d\n", + term_seq_info_desc-> + fragment_grid_slice_dimension[IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_slice_dimension: %d\n", + man_seq_info_desc-> + max_fragment_grid_slice_dimension[IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_slice_dimension: %d\n", + man_seq_info_desc-> + min_fragment_grid_slice_dimension[IA_CSS_ROW_DIMENSION]); + + /* slice count column */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_slice_count: %d\n", + term_seq_info_desc-> + fragment_grid_slice_count[IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_slice_count: %d\n", + man_seq_info_desc-> + max_fragment_grid_slice_count[IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_slice_count: %d\n", + man_seq_info_desc-> + min_fragment_grid_slice_count[IA_CSS_COL_DIMENSION]); + + /* slice count row */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_slice_count: %d\n", + term_seq_info_desc-> + fragment_grid_slice_count[IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_slice_count: %d\n", + man_seq_info_desc-> + max_fragment_grid_slice_count[IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_slice_count: %d\n", + man_seq_info_desc-> + min_fragment_grid_slice_count[IA_CSS_ROW_DIMENSION]); + + /* decimation factor column */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_point_decimation_factor: %d\n", + term_seq_info_desc-> + fragment_grid_point_decimation_factor[IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_point_decimation_factor: %d\n", + man_seq_info_desc-> + max_fragment_grid_point_decimation_factor[IA_CSS_COL_DIMENSION] + ); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_point_decimation_factor: %d\n", + man_seq_info_desc-> + min_fragment_grid_point_decimation_factor[IA_CSS_COL_DIMENSION] + ); + + /* decimation factor row */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_point_decimation_factor: %d\n", + term_seq_info_desc-> + fragment_grid_point_decimation_factor[IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_point_decimation_factor: %d\n", + man_seq_info_desc-> + max_fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_point_decimation_factor: %d\n", + man_seq_info_desc-> + min_fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION]); + + /* index column */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_overlay_pixel_topleft_index: %d\n", + term_seq_info_desc-> + fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_overlay_pixel_topleft_index: %d\n", + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_overlay_pixel_topleft_index: %d\n", + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION]); + + /* index row */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_overlay_pixel_topleft_index: %d\n", + term_seq_info_desc-> + fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_overlay_pixel_topleft_index: %d\n", + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_overlay_pixel_topleft_index: %d\n", + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION]); + + /* dimension column */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_overlay_pixel_dimension: %d\n", + term_seq_info_desc-> + fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_overlay_pixel_dimension: %d\n", + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_overlay_pixel_dimension: %d\n", + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION]); + + /* dimension column */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_overlay_pixel_dimension: %d\n", + term_seq_info_desc-> + fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_overlay_pixel_dimension: %d\n", + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_overlay_pixel_dimension: %d\n", + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION]); +} + +STORAGE_CLASS_INLINE bool ia_css_is_program_terminal_valid( + const ia_css_terminal_t *terminal, + const ia_css_terminal_manifest_t *terminal_manifest, + const uint16_t nof_fragments) +{ + DECLARE_ERRVAL + bool invalid_flag = false; + uint16_t frag_idx; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_program_terminal_valid enter\n"); + + for (frag_idx = 0; frag_idx < nof_fragments; frag_idx++) { + uint16_t frag_seq_info_count, seq_idx; + const ia_css_program_terminal_t *prog_term; + const ia_css_program_terminal_manifest_t *prog_term_man; + + prog_term = (const ia_css_program_terminal_t *)terminal; + prog_term_man = + (const ia_css_program_terminal_manifest_t *) + terminal_manifest; + frag_seq_info_count = + prog_term_man-> + kernel_fragment_sequencer_info_manifest_info_count; + + for (seq_idx = 0; seq_idx < frag_seq_info_count; seq_idx++) { + const ia_css_kernel_fragment_sequencer_info_desc_t + *term_seq_info_desc; + const + ia_css_kernel_fragment_sequencer_info_manifest_desc_t * + man_seq_info_desc; + + term_seq_info_desc = + ia_css_program_terminal_get_kernel_frgmnt_seq_info_desc( + prog_term, frag_idx, seq_idx, + frag_seq_info_count); + verifexitval(term_seq_info_desc != NULL, EFAULT); + man_seq_info_desc = + ia_css_program_terminal_manifest_get_kernel_frgmnt_seq_info_desc + (prog_term_man, seq_idx); + verifexitval(man_seq_info_desc != NULL, EFAULT); + + ia_css_program_terminal_seq_info_print( + man_seq_info_desc, term_seq_info_desc); + /* slice dimension column */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_dimension[ + IA_CSS_COL_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_slice_dimension[ + IA_CSS_COL_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_dimension[ + IA_CSS_COL_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_slice_dimension[ + IA_CSS_COL_DIMENSION]); + + /* slice dimension row */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_dimension[ + IA_CSS_ROW_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_slice_dimension[ + IA_CSS_ROW_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_dimension[ + IA_CSS_ROW_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_slice_dimension[ + IA_CSS_ROW_DIMENSION]); + + /* slice count column */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_count[ + IA_CSS_COL_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_slice_count[ + IA_CSS_COL_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_count[ + IA_CSS_COL_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_slice_count[ + IA_CSS_COL_DIMENSION]); + + /* slice count row */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_count[ + IA_CSS_ROW_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_slice_count[ + IA_CSS_ROW_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_count[ + IA_CSS_ROW_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_slice_count[ + IA_CSS_ROW_DIMENSION]); + + /* decimation factor column */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_point_decimation_factor[ + IA_CSS_COL_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_point_decimation_factor[ + IA_CSS_COL_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_point_decimation_factor[ + IA_CSS_COL_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_point_decimation_factor[ + IA_CSS_COL_DIMENSION]); + + /* decimation factor row */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION]); + + /* index column */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION]); + + /* index row */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION]); + + /* dimension column */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION]); + + /* dimension column */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION]); + } + } + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_program_terminal_valid() invalid argument\n"); + return false; + } + if (invalid_flag == true) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_is_program_terminal_valid(): validation failed\n"); + /* TODO: program terminal parameters not correctly defined, + * disable validation result until issues has been solved + */ + return true; + } + return (!invalid_flag); +} + +STORAGE_CLASS_INLINE bool ia_css_is_sliced_terminal_valid( + const ia_css_terminal_t *terminal, + const ia_css_terminal_manifest_t *terminal_manifest, + const uint16_t nof_fragments) +{ + DECLARE_ERRVAL + bool invalid_flag = false; + uint16_t frag_idx; + + uint16_t slice_idx, section_idx; + + const ia_css_sliced_param_terminal_t *sliced_term = + (const ia_css_sliced_param_terminal_t *)terminal; + const ia_css_sliced_param_terminal_manifest_t *sliced_term_man = + (const ia_css_sliced_param_terminal_manifest_t *) + terminal_manifest; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_sliced_terminal_valid enter\n"); + + for (frag_idx = 0; frag_idx < nof_fragments; frag_idx++) { + const ia_css_fragment_slice_desc_t *fragment_slice_desc = + ia_css_sliced_param_terminal_get_fragment_slice_desc( + sliced_term, frag_idx); + + verifexitval(fragment_slice_desc != NULL, EFAULT); + + for (slice_idx = 0; + slice_idx < fragment_slice_desc->slice_count; + slice_idx++) { + for (section_idx = 0; + section_idx < + sliced_term_man->sliced_param_section_count; + section_idx++) { + const + ia_css_sliced_param_manifest_section_desc_t * + slice_man_section_desc; + const ia_css_slice_param_section_desc_t * + slice_section_desc; + + slice_man_section_desc = + ia_css_sliced_param_terminal_manifest_get_sliced_prm_sct_desc( + sliced_term_man, section_idx); + slice_section_desc = + ia_css_sliced_param_terminal_get_slice_param_section_desc( + sliced_term, frag_idx, + slice_idx, section_idx, + sliced_term_man-> + sliced_param_section_count); + verifexitval(slice_man_section_desc != NULL, EFAULT); + verifexitval(slice_section_desc != NULL, EFAULT); + + invalid_flag = invalid_flag || + (slice_section_desc->mem_size > + slice_man_section_desc->max_mem_size); + } + } + } + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_sliced_terminal_valid() invalid argument\n"); + return false; + } else { + return (!invalid_flag); + } + +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_terminal_valid( + const ia_css_terminal_t *terminal, + const ia_css_terminal_manifest_t *terminal_manifest) +{ + DECLARE_ERRVAL + bool is_valid = false; + uint16_t nof_fragments; + ia_css_terminal_type_t terminal_type = IA_CSS_TERMINAL_INVALID_ID; + + verifexitval(NULL != terminal, EFAULT); + verifexitval(NULL != terminal_manifest, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_terminal_valid enter\n"); + + nof_fragments = ia_css_data_terminal_get_fragment_count( + (const ia_css_data_terminal_t *)terminal); + terminal_type = ia_css_terminal_get_type(terminal); + + switch (terminal_type) { + case IA_CSS_TERMINAL_TYPE_DATA_IN: + case IA_CSS_TERMINAL_TYPE_DATA_OUT: + is_valid = ia_css_is_data_terminal_valid(terminal, + terminal_manifest, nof_fragments); + break; + case IA_CSS_TERMINAL_TYPE_PROGRAM: + is_valid = ia_css_is_program_terminal_valid(terminal, + terminal_manifest, nof_fragments); + break; + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: + case IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: + /* Nothing to be validated for cached and spatial + * parameters, return valid + */ + is_valid = true; + break; + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT: + is_valid = ia_css_is_sliced_terminal_valid(terminal, + terminal_manifest, nof_fragments); + break; + default: + /* Terminal type unknown, return invalid */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, WARNING, + "ia_css_is_terminal_valid() Terminal type %x unknown\n", + (int)terminal_type); + is_valid = false; + break; + } + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_valid() invalid argument\n"); + return false; + } + /* TODO: to be removed once all PGs pass validation */ + if (is_valid == false) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "ia_css_is_terminal_valid(): type: %d validation failed\n", + terminal_type); + } + return is_valid; +} + +/* ================= Program Control Init Terminal - START ================= */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int +ia_css_program_control_init_terminal_init( + ia_css_program_control_init_terminal_t *terminal, + const ia_css_program_control_init_terminal_manifest_t *manifest) +{ + int retval = -1; + unsigned int i; + unsigned int base_load_sec; + unsigned int base_connect_sec; + unsigned int load_index = 0; + unsigned int connect_index = 0; + unsigned int load_section_count = 0; + unsigned int connect_section_count = 0; + + ia_css_program_control_init_manifest_program_desc_t *man_progs; + + verifjmpexit(terminal != NULL); + + man_progs = + ia_css_program_control_init_terminal_manifest_get_program_desc(manifest, 0); + verifjmpexit(man_progs != NULL); + + for (i = 0; i < manifest->program_count; i++) { + load_section_count += man_progs[i].load_section_count; + connect_section_count += man_progs[i].connect_section_count; + } + + terminal->program_count = manifest->program_count; + terminal->program_section_desc_offset = + sizeof(ia_css_program_control_init_terminal_t); + + base_load_sec = /* base_load_sec relative to first program */ + terminal->program_count * + sizeof(ia_css_program_control_init_program_desc_t); + + base_connect_sec = base_load_sec + + load_section_count * + sizeof(ia_css_program_control_init_load_section_desc_t); + + for (i = 0; i < terminal->program_count; i++) { + ia_css_program_control_init_program_desc_t *prog; + + prog = ia_css_program_control_init_terminal_get_program_desc( + terminal, i); + verifjmpexit(prog != NULL); + + prog->load_section_count = man_progs[i].load_section_count; + prog->connect_section_count = man_progs[i].connect_section_count; + + prog->load_section_desc_offset = + base_load_sec + + load_index * + sizeof(ia_css_program_control_init_load_section_desc_t) - + i * sizeof(ia_css_program_control_init_program_desc_t); + prog->connect_section_desc_offset = + base_connect_sec + + connect_index * + sizeof(ia_css_program_control_init_connect_section_desc_t) - + i * sizeof(ia_css_program_control_init_program_desc_t); + + load_index += man_progs[i].load_section_count; + connect_index += man_progs[i].connect_section_count; + } + retval = 0; +EXIT: + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +unsigned int +ia_css_program_control_init_terminal_get_descriptor_size( + const ia_css_program_control_init_terminal_manifest_t *manifest) +{ + unsigned int i; + unsigned size = 0; + unsigned load_section_count = 0; + unsigned connect_section_count = 0; + ia_css_program_control_init_manifest_program_desc_t *man_progs; + verifjmpexit(manifest != NULL); + + man_progs = + ia_css_program_control_init_terminal_manifest_get_program_desc( + manifest, 0); + verifjmpexit(man_progs != NULL); + + for (i = 0; i < manifest->program_count; i++) { + load_section_count += man_progs[i].load_section_count; + connect_section_count += man_progs[i].connect_section_count; + } + + size = sizeof(ia_css_program_control_init_terminal_t) + + manifest->program_count * + sizeof(struct ia_css_program_control_init_program_desc_s) + + load_section_count * + sizeof(struct ia_css_program_control_init_load_section_desc_s) + + connect_section_count * + sizeof(struct ia_css_program_control_init_connect_section_desc_s); +EXIT: + return size; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +void ia_css_program_control_init_terminal_print( + const ia_css_program_control_init_terminal_t *terminal) +{ + unsigned int prog_idx, sec_idx; + ia_css_program_control_init_program_desc_t *prog; + ia_css_program_control_init_load_section_desc_t *load_sec; + ia_css_program_control_init_connect_section_desc_t *connect_sec; + + verifjmpexit(terminal != NULL); + + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + "program_count: %d, payload_fragment_stride: %d\n", + terminal->program_count, + terminal->payload_fragment_stride); + + for (prog_idx = 0; prog_idx < terminal->program_count; prog_idx++) { + prog = ia_css_program_control_init_terminal_get_program_desc( + terminal, prog_idx); + verifjmpexit(prog != NULL); + + for (sec_idx = 0; sec_idx < prog->load_section_count; sec_idx++) { + load_sec = + ia_css_program_control_init_terminal_get_load_section_desc( + prog, sec_idx); + verifjmpexit(load_sec != NULL); + IA_CSS_TRACE_4(PSYSAPI_DYNAMIC, INFO, + "load_section>> device_descriptor_id: 0x%x, mem_offset: %d, " + "mem_size: %d, mode_bitmask: %x\n", + load_sec->device_descriptor_id.data, + load_sec->mem_offset, + load_sec->mem_size, + load_sec->mode_bitmask); + } + for (sec_idx = 0; sec_idx < prog->connect_section_count; sec_idx++) { + connect_sec = + ia_css_program_control_init_terminal_get_connect_section_desc( + prog, sec_idx); + verifjmpexit(connect_sec != NULL); + IA_CSS_TRACE_4(PSYSAPI_DYNAMIC, INFO, + "connect_section>> device_descriptor_id: 0x%x, " + "connect_terminal_ID: %d, connect_section_idx: %d, " + "mode_bitmask: %x\n", + connect_sec->device_descriptor_id.data, + connect_sec->connect_terminal_ID, + connect_sec->connect_section_idx, + connect_sec->mode_bitmask); + } + } +EXIT: + return; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_program_control_init_program_desc_t * +ia_css_program_control_init_terminal_get_program_desc( + const ia_css_program_control_init_terminal_t *prog_ctrl_init_terminal, + const unsigned int program_index) +{ + ia_css_program_control_init_program_desc_t *program_desc_base; + ia_css_program_control_init_program_desc_t *program_desc = NULL; + + verifjmpexit(prog_ctrl_init_terminal != NULL); + verifjmpexit(program_index < prog_ctrl_init_terminal->program_count); + + program_desc_base = (ia_css_program_control_init_program_desc_t *) + (((const char *)prog_ctrl_init_terminal) + + prog_ctrl_init_terminal->program_section_desc_offset); + program_desc = &(program_desc_base[program_index]); + +EXIT: + return program_desc; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_process_id_t ia_css_program_control_init_terminal_get_process_id( + const ia_css_program_control_init_program_desc_t *program_desc) +{ + ia_css_process_id_t process_id = 0; + + verifjmpexit(program_desc != NULL); + + process_id = program_desc->control_info.process_id; + +EXIT: + return process_id; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_program_control_init_terminal_get_num_done_events( + const ia_css_program_control_init_program_desc_t *program_desc) +{ + uint8_t num_done_events = 0; + + verifjmpexit(program_desc != NULL); + + num_done_events = program_desc->control_info.num_done_events; + +EXIT: + return num_done_events; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +void ia_css_program_control_init_terminal_set_control_info( + ia_css_program_control_init_program_desc_t *program_desc, + ia_css_process_id_t process_id, + uint8_t num_done_events) +{ + verifjmpexit(program_desc != NULL); + + program_desc->control_info.process_id = process_id; + program_desc->control_info.num_done_events = num_done_events; + +EXIT: + return; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_program_control_init_load_section_desc_t * +ia_css_program_control_init_terminal_get_load_section_desc( + const ia_css_program_control_init_program_desc_t *program_desc, + const unsigned int load_section_index) +{ + ia_css_program_control_init_load_section_desc_t *load_section_desc_base; + ia_css_program_control_init_load_section_desc_t *load_section_desc = NULL; + + verifjmpexit(program_desc != NULL); + verifjmpexit(load_section_index < program_desc->load_section_count); + + load_section_desc_base = (ia_css_program_control_init_load_section_desc_t *) + (((const char *)program_desc) + + program_desc->load_section_desc_offset); + load_section_desc = &(load_section_desc_base[load_section_index]); + +EXIT: + return load_section_desc; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_program_control_init_connect_section_desc_t * +ia_css_program_control_init_terminal_get_connect_section_desc( + const ia_css_program_control_init_program_desc_t *program_desc, + const unsigned int connect_section_index) +{ + ia_css_program_control_init_connect_section_desc_t *connect_sec_desc_base; + ia_css_program_control_init_connect_section_desc_t *connect_sec_desc = NULL; + + verifjmpexit(program_desc != NULL); + verifjmpexit(connect_section_index < program_desc->connect_section_count); + + connect_sec_desc_base = + (ia_css_program_control_init_connect_section_desc_t *) + (((const char *)program_desc) + + program_desc->connect_section_desc_offset); + connect_sec_desc = &(connect_sec_desc_base[connect_section_index]); + +EXIT: + return connect_sec_desc; +} + +/* ================= Program Control Init Terminal - END ================= */ + +#endif /* __IA_CSS_PSYS_TERMINAL_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal_private_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal_private_types.h new file mode 100644 index 0000000000000..68626561acb5d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal_private_types.h @@ -0,0 +1,186 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TERMINAL_PRIVATE_TYPES_H +#define __IA_CSS_PSYS_TERMINAL_PRIVATE_TYPES_H + +#include "ia_css_terminal_types.h" +#include "ia_css_program_group_data.h" +#include "ia_css_psys_manifest_types.h" + +#define N_UINT16_IN_DATA_TERMINAL_STRUCT 1 +#define N_UINT8_IN_DATA_TERMINAL_STRUCT 3 +#define N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT 3 + +/* ========================= Data terminal - START ========================= */ + +#define SIZE_OF_DATA_TERMINAL_STRUCT_BITS \ + (SIZE_OF_TERMINAL_STRUCT_BITS \ + + IA_CSS_FRAME_DESCRIPTOR_STRUCT_BITS \ + + IA_CSS_FRAME_STRUCT_BITS \ + + IA_CSS_STREAM_STRUCT_BITS \ + + IA_CSS_UINT32_T_BITS \ + + IA_CSS_CONNECTION_TYPE_BITS \ + + (N_UINT16_IN_DATA_TERMINAL_STRUCT * 16) \ + + (N_UINT8_IN_DATA_TERMINAL_STRUCT * 8) \ + + (N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT * 8)) + +/* + * The (data) terminal can be attached to a buffer or a stream. + * The stream interface is not necessarily limited to strict in-order access. + * For a stream the restriction is that contrary to a buffer it cannot be + * addressed directly, i.e. it behaves as a port, + * but it may support stream_pos() and/or seek() operations + */ +struct ia_css_data_terminal_s { + /**< Data terminal base */ + ia_css_terminal_t base; + /**< Properties of the data attached to the terminal */ + ia_css_frame_descriptor_t frame_descriptor; + /**< Data buffer handle attached to the terminal */ + ia_css_frame_t frame; + /**< (exclusive) Data stream handle attached to the terminal + * if the data is sourced over a device port + */ + ia_css_stream_t stream; + /**< Reserved */ + uint32_t reserved; + /**< Connection {buffer, stream, ...} */ + ia_css_connection_type_t connection_type; + /**< Array[fragment_count] (fragment_count being equal for all + * terminals in a subgraph) of fragment descriptors + */ + uint16_t fragment_descriptor_offset; + /**< Kernel id where this terminal is connected to */ + uint8_t kernel_id; + /**< Indicate to which subgraph this terminal belongs + * for common constraints + */ + uint8_t subgraph_id; + /* Link ID of the data terminal */ + uint8_t link_id; + /**< Padding for 64bit alignment */ + uint8_t padding[N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT]; +}; +/* ========================== Data terminal - END ========================== */ + +/* ================= Program Control Init Terminal - START ================= */ +#define SIZE_OF_PROG_CONTROL_INIT_LOAD_SECTION_DESC_STRUCT_BITS \ + (DEVICE_DESCRIPTOR_ID_BITS \ + + (3 * IA_CSS_UINT32_T_BITS) \ + ) +struct ia_css_program_control_init_load_section_desc_s { + /* Offset of the parameter allocation in memory */ + uint32_t mem_offset; + /* Memory allocation size needs of this parameter */ + uint32_t mem_size; + /* Device descriptor */ + device_descriptor_id_t device_descriptor_id; /* 32 bits */ + /* (Applicable to) mode bitmask */ + uint32_t mode_bitmask; +}; + +#define MODE_BITMASK_MEMORY (1u << IA_CSS_CONNECTION_MEMORY) +#define MODE_BITMASK_MEMORY_STREAM (1u << IA_CSS_CONNECTION_MEMORY_STREAM) +#define MODE_BITMASK_STREAM (1u << IA_CSS_CONNECTION_STREAM) +#define MODE_BITMASK_DONT_CARE (MODE_BITMASK_MEMORY | MODE_BITMASK_MEMORY_STREAM | MODE_BITMASK_STREAM) + +#define N_PADDING_UINT8_IN_PROG_CTRL_INIT_CONNECT_SECT_STRUCT (5) +#define SIZE_OF_PROG_CONTROL_INIT_CONNECT_SECTION_DESC_STRUCT_BITS \ + (DEVICE_DESCRIPTOR_ID_BITS \ + + (1 * IA_CSS_UINT32_T_BITS) \ + + (1 * IA_CSS_UINT16_T_BITS) \ + + IA_CSS_TERMINAL_ID_BITS \ + + (N_PADDING_UINT8_IN_PROG_CTRL_INIT_CONNECT_SECT_STRUCT * \ + IA_CSS_UINT8_T_BITS) \ + ) +struct ia_css_program_control_init_connect_section_desc_s { + /* Device descriptor */ + device_descriptor_id_t device_descriptor_id; /* 32 bits */ + /* (Applicable to) mode bitmask */ + uint32_t mode_bitmask; + /* Connected terminal section (plane) index */ + uint16_t connect_section_idx; + /* Absolute referral ID for the connected terminal */ + ia_css_terminal_ID_t connect_terminal_ID; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_PROG_CTRL_INIT_CONNECT_SECT_STRUCT]; +}; + +#define N_PADDING_UINT8_IN_PROG_DESC_CONTROL_INFO (1) +#define N_PADDING_UINT8_IN_PROG_CTRL_INIT_PROGRAM_DESC_STRUCT (4) +#define SIZE_OF_PROGRAM_DESC_CONTROL_INFO_STRUCT_BITS \ + (1 * IA_CSS_UINT16_T_BITS) \ + + (1 * IA_CSS_UINT8_T_BITS) \ + + (N_PADDING_UINT8_IN_PROG_DESC_CONTROL_INFO * IA_CSS_UINT8_T_BITS) + +#define SIZE_OF_PROG_CONTROL_INIT_PROG_DESC_STRUCT_BITS \ + (4 * IA_CSS_UINT16_T_BITS) \ + + (SIZE_OF_PROGRAM_DESC_CONTROL_INFO_STRUCT_BITS) \ + + (N_PADDING_UINT8_IN_PROG_CTRL_INIT_PROGRAM_DESC_STRUCT * \ + IA_CSS_UINT8_T_BITS) + +struct ia_css_program_desc_control_info_s { + /* 12-bit process identifier */ + ia_css_process_id_t process_id; + /* number of done acks required to close the process */ + uint8_t num_done_events; + uint8_t padding[N_PADDING_UINT8_IN_PROG_DESC_CONTROL_INFO]; +}; + +struct ia_css_program_control_init_program_desc_s { + /* Number of load sections in this program */ + uint16_t load_section_count; + /* Points to variable size array of + * ia_css_program_control_init_load_section_desc_s + * in relation to its program_desc + */ + uint16_t load_section_desc_offset; + /* Number of connect sections in this program */ + uint16_t connect_section_count; + /* Points to variable size array of + * ia_css_program_control_init_connect_section_desc_s + * in relation to its program_desc + */ + uint16_t connect_section_desc_offset; + struct ia_css_program_desc_control_info_s control_info; + /* align to 64 bits */ + uint8_t padding[N_PADDING_UINT8_IN_PROG_CTRL_INIT_PROGRAM_DESC_STRUCT]; +}; + +#define SIZE_OF_PROG_CONTROL_INIT_TERM_STRUCT_BITS \ + (SIZE_OF_TERMINAL_STRUCT_BITS \ + + IA_CSS_PARAM_PAYLOAD_STRUCT_BITS \ + + (1 * IA_CSS_UINT32_T_BITS) \ + + (2 * IA_CSS_UINT16_T_BITS) \ + ) +struct ia_css_program_control_init_terminal_s { + /* Parameter terminal base */ + ia_css_terminal_t base; + /* Parameter buffer handle attached to the terminal */ + ia_css_param_payload_t param_payload; + /* Fragment stride for the payload, used to find the base + * of the payload for a given fragment + */ + uint32_t payload_fragment_stride; + /* Points to the variable array of + * ia_css_program_control_init_program_desc_s + */ + uint16_t program_section_desc_offset; + /* Number of instantiated programs in program group (processes) */ + uint16_t program_count; +}; +/* ================= Program Control Init Terminal - END ================= */ + +#endif /* __IA_CSS_PSYS_TERMINAL_PRIVATE_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi.h new file mode 100644 index 0000000000000..4c8fd33b331ca --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi.h @@ -0,0 +1,23 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYSAPI_H +#define __IA_CSS_PSYSAPI_H + +#include +#include +#include +#include + +#endif /* __IA_CSS_PSYSAPI_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi_fw_version.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi_fw_version.h new file mode 100644 index 0000000000000..5658a2988a08d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi_fw_version.h @@ -0,0 +1,33 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IA_CSS_PSYSAPI_FW_VERSION_H +#define __IA_CSS_PSYSAPI_FW_VERSION_H + +/* PSYSAPI FW VERSION is taken from Makefile for FW tests */ +#define BXT_FW_RELEASE_VERSION PSYS_FIRMWARE_VERSION + +enum ia_css_process_group_protocol_version { + /* + * Legacy protocol + */ + IA_CSS_PROCESS_GROUP_PROTOCOL_LEGACY = 0, + /* + * Persistent process group support protocol + */ + IA_CSS_PROCESS_GROUP_PROTOCOL_PPG, + IA_CSS_PROCESS_GROUP_N_PROTOCOLS +}; + +#endif /* __IA_CSS_PSYSAPI_FW_VERSION_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi_trace.h new file mode 100644 index 0000000000000..e35ec24c77b36 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi_trace.h @@ -0,0 +1,78 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYSAPI_TRACE_H +#define __IA_CSS_PSYSAPI_TRACE_H + +#include "ia_css_trace.h" + +#define PSYSAPI_TRACE_LOG_LEVEL_OFF 0 +#define PSYSAPI_TRACE_LOG_LEVEL_NORMAL 1 +#define PSYSAPI_TRACE_LOG_LEVEL_DEBUG 2 + +/* PSYSAPI and all the submodules in PSYSAPI will have the default tracing + * level set to the PSYSAPI_TRACE_CONFIG level. If not defined in the + * psysapi.mk fill it will be set by default to no trace + * (PSYSAPI_TRACE_LOG_LEVEL_OFF) + */ +#define PSYSAPI_TRACE_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +#if !defined(PSYSAPI_TRACE_CONFIG) + #define PSYSAPI_TRACE_CONFIG PSYSAPI_TRACE_CONFIG_DEFAULT +#endif + +/* Module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_TRACE_CONFIG)) + /* Module specific trace setting */ + #if PSYSAPI_TRACE_CONFIG == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_TRACE_CONFIG == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_TRACE_CONFIG == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_TRACE_CONFIG Tracing level defined" + #endif +#else + #error "PSYSAPI_TRACE_CONFIG not defined" +#endif + +/* Overriding submodules in PSYSAPI with a specific tracing level */ +/* #define PSYSAPI_DYNAMIC_TRACING_OVERRIDE TRACE_LOG_LEVEL_VERBOSE */ + +#endif /* __IA_CSS_PSYSAPI_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/kernel/interface/ia_css_kernel_bitmap.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/kernel/interface/ia_css_kernel_bitmap.h new file mode 100644 index 0000000000000..3fec775eb019d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/kernel/interface/ia_css_kernel_bitmap.h @@ -0,0 +1,223 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_KERNEL_BITMAP_H +#define __IA_CSS_KERNEL_BITMAP_H + +/*! \file */ + +/** @file ia_css_kernel_bitmap.h + * + * The types and operations to make logic decisions given kernel bitmaps + * "ia_css_kernel_bitmap_t" can be larger than native types + */ + +#include +#include "vied_nci_psys_resource_model.h" + +#define IA_CSS_KERNEL_BITMAP_BITS 64 +#define IA_CSS_KERNEL_BITMAP_ELEM_TYPE uint32_t +#define IA_CSS_KERNEL_BITMAP_ELEM_BITS \ + (sizeof(IA_CSS_KERNEL_BITMAP_ELEM_TYPE)*8) +#define IA_CSS_KERNEL_BITMAP_NOF_ELEMS \ + ((IA_CSS_KERNEL_BITMAP_BITS) / (IA_CSS_KERNEL_BITMAP_ELEM_BITS)) + +/** An element is a 32 bit unsigned integer. 64 bit integers might cause + * problems in the compiler. + */ +typedef struct { + IA_CSS_KERNEL_BITMAP_ELEM_TYPE data[IA_CSS_KERNEL_BITMAP_NOF_ELEMS]; +} ia_css_kernel_bitmap_elems_t; + +/** Users should make no assumption about the actual type of + * ia_css_kernel_bitmap_t. + * Users should use IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS in + * case they erroneously assume that this type is uint64_t and they + * cannot change their implementation. + */ +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS +typedef ia_css_kernel_bitmap_elems_t ia_css_kernel_bitmap_t; +#else +typedef uint64_t ia_css_kernel_bitmap_t; +#if IA_CSS_KERNEL_BITMAP_BITS > 64 +#error IA_CSS_KERNEL_BITMAP_BITS > 64 not supported \ + with IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS +#endif +#endif + +/*! Print the bits of a kernel bitmap + + @return < 0 on error + */ +extern int ia_css_kernel_bitmap_print( + const ia_css_kernel_bitmap_t bitmap, + void *fid); + +/*! Create an empty kernel bitmap + + @return bitmap = 0 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_clear(void); + +/*! Creates the complement of a kernel bitmap + * @param bitmap[in] kernel bitmap + * @return ~bitmap + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_complement( + const ia_css_kernel_bitmap_t bitmap); + +/*! Create the union of two kernel bitmaps + + @param bitmap0[in] kernel bitmap 0 + @param bitmap1[in] kernel bitmap 1 + + @return bitmap0 | bitmap1 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_union( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1); + +/*! Create the intersection of two kernel bitmaps + + @param bitmap0[in] kernel bitmap 0 + @param bitmap1[in] kernel bitmap 1 + + @return bitmap0 & bitmap1 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_intersection( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1); + +/*! Check if the kernel bitmaps is empty + + @param bitmap[in] kernel bitmap + + @return bitmap == 0 + */ +extern bool ia_css_is_kernel_bitmap_empty( + const ia_css_kernel_bitmap_t bitmap); + +/*! Check if the intersection of two kernel bitmaps is empty + + @param bitmap0[in] kernel bitmap 0 + @param bitmap1[in] kernel bitmap 1 + + @return (bitmap0 & bitmap1) == 0 + */ +extern bool ia_css_is_kernel_bitmap_intersection_empty( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1); + +/*! Check if the second kernel bitmap is a subset of the first (or equal) + + @param bitmap0[in] kernel bitmap 0 + @param bitmap1[in] kernel bitmap 1 + + Note: An empty set is always a subset, this function + returns true if bitmap 1 is empty + + @return (bitmap0 & bitmap1) == bitmap1 + */ +extern bool ia_css_is_kernel_bitmap_subset( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1); + +/*! Check if the kernel bitmaps are equal + + @param bitmap0[in] kernel bitmap 0 + @param bitmap1[in] kernel bitmap 1 + + @return bitmap0 == bitmap1 + */ +extern bool ia_css_is_kernel_bitmap_equal( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1); + +/*! Right shift kernel bitmap + + @param bitmap0[in] kernel bitmap 0 + + @return bitmap0 >> 1 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_shift( + const ia_css_kernel_bitmap_t bitmap); + +/*! Check if the kernel bitmaps contains only a single element + + @param bitmap[in] kernel bitmap + + @return weight(bitmap) == 1 + */ +extern bool ia_css_is_kernel_bitmap_onehot( + const ia_css_kernel_bitmap_t bitmap); + +/*! Checks whether a specific kernel bit is set + * @return bitmap[index] == 1 + */ +extern int ia_css_is_kernel_bitmap_set( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index); + +/*! Create the union of a kernel bitmap with a onehot bitmap + * with a bit set at index + + @return bitmap[index] |= 1 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_set( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index); + +/*! Creates kernel bitmap using a uint64 value. + * @return bitmap with the same bits set as in value (provided that width of bitmap is sufficient). + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_create_from_uint64( + const uint64_t value); + +/*! Converts an ia_css_kernel_bitmap_t type to uint64_t. Note that if + * ia_css_kernel_bitmap_t contains more then 64 bits, only the lowest 64 bits + * are returned. + * @return uint64_t representation of value +*/ +extern uint64_t ia_css_kernel_bitmap_to_uint64( + const ia_css_kernel_bitmap_t value); + +/*! Creates a kernel bitmap with the bit at index 'index' removed. + * @return ~(1 << index) & bitmap + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_unset( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index); + +/*! Set a previously clear field of a kernel bitmap at index + + @return if bitmap[index] == 0, bitmap[index] -> 1, else 0 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_set_unique( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index); + +/*! Create a onehot kernel bitmap with a bit set at index + + @return bitmap[index] = 1 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bit_mask( + const unsigned int index); + +/*! Create a random bitmap + + @return bitmap[index] = 1 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_ran_bitmap(void); + +#endif /* __IA_CSS_KERNEL_BITMAP_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/kernel/interface/ia_css_psys_kernel_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/kernel/interface/ia_css_psys_kernel_trace.h new file mode 100644 index 0000000000000..1ba29c7ab77ec --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/kernel/interface/ia_css_psys_kernel_trace.h @@ -0,0 +1,103 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_KERNEL_TRACE_H +#define __IA_CSS_PSYS_KERNEL_TRACE_H + +#include "ia_css_psysapi_trace.h" + +#define PSYS_KERNEL_TRACE_LEVEL_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +/* Default sub-module tracing config */ +#if (!defined(PSYSAPI_KERNEL_TRACING_OVERRIDE)) + #define PSYS_KERNEL_TRACE_LEVEL_CONFIG \ + PSYS_KERNEL_TRACE_LEVEL_CONFIG_DEFAULT +#endif + +/* Module/sub-module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_KERNEL_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_KERNEL_TRACING_OVERRIDE)) + /* Module/sub-module specific trace setting */ + #if PSYSAPI_KERNEL_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_KERNEL_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_KERNEL_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_KERNEL_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_KERNEL_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_KERNEL_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_KERNEL_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_KERNEL_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_KERNEL_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_DATA Tracing level defined" + #endif +#else + /* Inherit Module trace setting */ + #define PSYSAPI_KERNEL_TRACE_METHOD \ + PSYSAPI_TRACE_METHOD + #define PSYSAPI_KERNEL_TRACE_LEVEL_ASSERT \ + PSYSAPI_TRACE_LEVEL_ASSERT + #define PSYSAPI_KERNEL_TRACE_LEVEL_ERROR \ + PSYSAPI_TRACE_LEVEL_ERROR + #define PSYSAPI_KERNEL_TRACE_LEVEL_WARNING \ + PSYSAPI_TRACE_LEVEL_WARNING + #define PSYSAPI_KERNEL_TRACE_LEVEL_INFO \ + PSYSAPI_TRACE_LEVEL_INFO + #define PSYSAPI_KERNEL_TRACE_LEVEL_DEBUG \ + PSYSAPI_TRACE_LEVEL_DEBUG + #define PSYSAPI_KERNEL_TRACE_LEVEL_VERBOSE \ + PSYSAPI_TRACE_LEVEL_VERBOSE +#endif + +#endif /* __IA_CSS_PSYSAPI_KERNEL_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/kernel/src/ia_css_kernel_bitmap.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/kernel/src/ia_css_kernel_bitmap.c new file mode 100644 index 0000000000000..5fd9496bc3ccd --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/kernel/src/ia_css_kernel_bitmap.c @@ -0,0 +1,413 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include +#include +#include +#include +#include "ia_css_psys_kernel_trace.h" + +static int ia_css_kernel_bitmap_compute_weight( + const ia_css_kernel_bitmap_t bitmap); + +bool ia_css_is_kernel_bitmap_intersection_empty( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1) +{ + ia_css_kernel_bitmap_t intersection; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_is_kernel_bitmap_intersection_empty(): enter:\n"); + + intersection = ia_css_kernel_bitmap_intersection(bitmap0, bitmap1); + return ia_css_is_kernel_bitmap_empty(intersection); +} + +bool ia_css_is_kernel_bitmap_empty( + const ia_css_kernel_bitmap_t bitmap) +{ + unsigned int i; + bool is_empty = true; + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_is_kernel_bitmap_empty(): enter:\n"); +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = 0; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + is_empty &= bitmap.data[i] == 0; + } +#else + NOT_USED(i); + is_empty = (bitmap == 0); +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return is_empty; +} + +bool ia_css_is_kernel_bitmap_equal( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1) +{ + unsigned int i; + bool is_equal = true; + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_is_kernel_bitmap_equal(): enter:\n"); +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = 0; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + is_equal = is_equal && (bitmap0.data[i] == bitmap1.data[i]); + } +#else + NOT_USED(i); + is_equal = (bitmap0 == bitmap1); +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return is_equal; +} + +bool ia_css_is_kernel_bitmap_onehot( + const ia_css_kernel_bitmap_t bitmap) +{ + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_is_kernel_bitmap_onehot(): enter:\n"); + return ia_css_kernel_bitmap_compute_weight(bitmap) == 1; +} + +bool ia_css_is_kernel_bitmap_subset( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1) +{ + ia_css_kernel_bitmap_t intersection; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_is_kernel_bitmap_subset(): enter:\n"); + + intersection = ia_css_kernel_bitmap_intersection(bitmap0, bitmap1); + return ia_css_is_kernel_bitmap_equal(intersection, bitmap1); +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_clear(void) +{ + unsigned int i; + ia_css_kernel_bitmap_t bitmap; + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_clear(): enter:\n"); +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = 0; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + bitmap.data[i] = 0; + } +#else + NOT_USED(i); + bitmap = 0; +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return bitmap; +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_complement( + const ia_css_kernel_bitmap_t bitmap) +{ + unsigned int i; + ia_css_kernel_bitmap_t result; + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_complement(): enter:\n"); +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = 0; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + result.data[i] = ~bitmap.data[i]; + } +#else + NOT_USED(i); + result = ~bitmap; +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return result; +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_union( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1) +{ + unsigned int i; + ia_css_kernel_bitmap_t result; + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_union(): enter:\n"); +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = 0; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + result.data[i] = (bitmap0.data[i] | bitmap1.data[i]); + } +#else + NOT_USED(i); + result = (bitmap0 | bitmap1); +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return result; +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_intersection( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1) +{ + unsigned int i; + ia_css_kernel_bitmap_t result; + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_intersection(): enter:\n"); +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = 0; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + result.data[i] = (bitmap0.data[i] & bitmap1.data[i]); + } +#else + NOT_USED(i); + result = (bitmap0 & bitmap1); +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return result; +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_set( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index) +{ + ia_css_kernel_bitmap_t bit_mask; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_set(): enter:\n"); + + bit_mask = ia_css_kernel_bit_mask(index); + return ia_css_kernel_bitmap_union(bitmap, bit_mask); +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_create_from_uint64( + const uint64_t value) +{ + unsigned int i; + ia_css_kernel_bitmap_t result; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_create_from_uint64(): enter:\n"); + +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + result = ia_css_kernel_bitmap_clear(); + for (i = 0; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + /* masking is done implictly, the MSB bits of casting will be chopped off */ + result.data[i] = (IA_CSS_KERNEL_BITMAP_ELEM_TYPE) + (value >> (i * IA_CSS_KERNEL_BITMAP_ELEM_BITS)); + } +#if IA_CSS_KERNEL_BITMAP_BITS < 64 + if ((value >> IA_CSS_KERNEL_BITMAP_BITS) != 0) { + IA_CSS_TRACE_0(PSYSAPI_KERNEL, ERROR, + "ia_css_kernel_bitmap_create_from_uint64(): " + "kernel bitmap is not wide enough to encode value\n"); + assert(0); + } +#endif +#else + NOT_USED(i); + result = value; +#endif /* IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS */ + return result; +} + +uint64_t ia_css_kernel_bitmap_to_uint64( + const ia_css_kernel_bitmap_t value) +{ + const unsigned int bits64 = sizeof(uint64_t) * 8; + const unsigned int nof_elems_bits64 = bits64 / IA_CSS_KERNEL_BITMAP_ELEM_BITS; + unsigned int i; + uint64_t res = 0; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_to_uint64(): enter:\n"); + + assert((bits64 % IA_CSS_KERNEL_BITMAP_ELEM_BITS) == 0); + assert(nof_elems_bits64 > 0); + +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = 0; i < nof_elems_bits64; i++) { + res |= ((uint64_t)(value.data[i]) << (i * IA_CSS_KERNEL_BITMAP_ELEM_BITS)); + } + for (i = nof_elems_bits64; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + assert(value.data[i] == 0); + } + return res; +#else + (void)i; + (void)res; + (void)nof_elems_bits64; + return (uint64_t)value; +#endif /* IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS */ +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_unset( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index) +{ + ia_css_kernel_bitmap_t result; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_unset(): enter:\n"); + + result = ia_css_kernel_bit_mask(index); + result = ia_css_kernel_bitmap_complement(result); + return ia_css_kernel_bitmap_intersection(bitmap, result); +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_set_unique( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index) +{ + ia_css_kernel_bitmap_t ret; + ia_css_kernel_bitmap_t bit_mask; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_set_unique(): enter:\n"); + + ret = ia_css_kernel_bitmap_clear(); + bit_mask = ia_css_kernel_bit_mask(index); + + if (ia_css_is_kernel_bitmap_intersection_empty(bitmap, bit_mask) + && !ia_css_is_kernel_bitmap_empty(bit_mask)) { + ret = ia_css_kernel_bitmap_union(bitmap, bit_mask); + } + return ret; +} + +ia_css_kernel_bitmap_t ia_css_kernel_bit_mask( + const unsigned int index) +{ + unsigned int elem_index; + unsigned int elem_bit_index; + ia_css_kernel_bitmap_t bit_mask = ia_css_kernel_bitmap_clear(); + + /* Assert disabled for staging, because some PGs do not satisfy this condition */ + /* assert(index < IA_CSS_KERNEL_BITMAP_BITS); */ + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bit_mask(): enter:\n"); +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + if (index < IA_CSS_KERNEL_BITMAP_BITS) { + elem_index = index / IA_CSS_KERNEL_BITMAP_ELEM_BITS; + elem_bit_index = index % IA_CSS_KERNEL_BITMAP_ELEM_BITS; + assert(elem_index < IA_CSS_KERNEL_BITMAP_NOF_ELEMS); + + bit_mask.data[elem_index] = 1 << elem_bit_index; + } +#else + NOT_USED(elem_index); + NOT_USED(elem_bit_index); + if (index < IA_CSS_KERNEL_BITMAP_BITS) { + bit_mask = (ia_css_kernel_bitmap_t)1 << index; + } +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return bit_mask; +} + + +static int ia_css_kernel_bitmap_compute_weight( + const ia_css_kernel_bitmap_t bitmap) +{ + ia_css_kernel_bitmap_t loc_bitmap; + int weight = 0; + int i; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_compute_weight(): enter:\n"); + + loc_bitmap = bitmap; + + /* In fact; do not need the iterator "i" */ + for (i = 0; (i < IA_CSS_KERNEL_BITMAP_BITS) && + !ia_css_is_kernel_bitmap_empty(loc_bitmap); i++) { + weight += ia_css_is_kernel_bitmap_set(loc_bitmap, 0); + loc_bitmap = ia_css_kernel_bitmap_shift(loc_bitmap); + } + + return weight; +} + +int ia_css_is_kernel_bitmap_set( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index) +{ + unsigned int elem_index; + unsigned int elem_bit_index; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_is_kernel_bitmap_set(): enter:\n"); + + /* Assert disabled for staging, because some PGs do not satisfy this condition */ + /* assert(index < IA_CSS_KERNEL_BITMAP_BITS); */ + +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + elem_index = index / IA_CSS_KERNEL_BITMAP_ELEM_BITS; + elem_bit_index = index % IA_CSS_KERNEL_BITMAP_ELEM_BITS; + assert(elem_index < IA_CSS_KERNEL_BITMAP_NOF_ELEMS); + return (((bitmap.data[elem_index] >> elem_bit_index) & 0x1) == 1); +#else + NOT_USED(elem_index); + NOT_USED(elem_bit_index); + return (((bitmap >> index) & 0x1) == 1); +#endif /* IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS */ +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_shift( + const ia_css_kernel_bitmap_t bitmap) +{ + int i; + unsigned int lsb_current_elem = 0; + unsigned int lsb_previous_elem = 0; + ia_css_kernel_bitmap_t loc_bitmap; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_shift(): enter:\n"); + + loc_bitmap = bitmap; + +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = IA_CSS_KERNEL_BITMAP_NOF_ELEMS - 1; i >= 0; i--) { + lsb_current_elem = bitmap.data[i] & 0x01; + loc_bitmap.data[i] >>= 1; + loc_bitmap.data[i] |= (lsb_previous_elem << (IA_CSS_KERNEL_BITMAP_ELEM_BITS - 1)); + lsb_previous_elem = lsb_current_elem; + } +#else + NOT_USED(i); + NOT_USED(lsb_current_elem); + NOT_USED(lsb_previous_elem); + loc_bitmap >>= 1; +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return loc_bitmap; +} + +int ia_css_kernel_bitmap_print( + const ia_css_kernel_bitmap_t bitmap, + void *fid) +{ + int retval = -1; + int bit; + unsigned int bit_index = 0; + ia_css_kernel_bitmap_t loc_bitmap; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, INFO, + "ia_css_kernel_bitmap_print(): enter:\n"); + + NOT_USED(fid); + NOT_USED(bit); + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, INFO, "kernel bitmap {\n"); + + loc_bitmap = bitmap; + + for (bit_index = 0; (bit_index < IA_CSS_KERNEL_BITMAP_BITS) && + !ia_css_is_kernel_bitmap_empty(loc_bitmap); bit_index++) { + + bit = ia_css_is_kernel_bitmap_set(loc_bitmap, 0); + loc_bitmap = ia_css_kernel_bitmap_shift(loc_bitmap); + IA_CSS_TRACE_2(PSYSAPI_KERNEL, INFO, "\t%d\t = %d\n", bit_index, bit); + } + IA_CSS_TRACE_0(PSYSAPI_KERNEL, INFO, "}\n"); + + retval = 0; + return retval; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param.h new file mode 100644 index 0000000000000..485dd63e5a861 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param.h @@ -0,0 +1,293 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PROGRAM_GROUP_PARAM_H +#define __IA_CSS_PROGRAM_GROUP_PARAM_H + +/*! \file */ + +/** @file ia_css_program_group_param.h + * + * Define the methods on the program group parameter object that are not part + * of a single interface + */ +#include + +#include + +#include /* ia_css_kernel_bitmap_t */ + +#include + +/*! Get the stored size of the program group parameter object + + @param param[in] program group parameter object + + @return size, 0 on error + */ +extern size_t ia_css_program_group_param_get_size( + const ia_css_program_group_param_t *param); + +/*! initialize program_group_param + + @param blob[in] program group parameter object + @param program_count[in] number of terminals. + @param terminal_count[in] number of terminals. + @param fragment_count[in] number of terminals. + + @return 0 if success, else failure. + */ +extern int ia_css_program_group_param_init( + ia_css_program_group_param_t *blob, + const uint8_t program_count, + const uint8_t terminal_count, + const uint16_t fragment_count, + const enum ia_css_frame_format_type *frame_format_types); +/*! Get the program parameter object from a program group parameter object + + @param program_group_param[in] program group parameter object + @param i[in] program parameter index + + @return program parameter pointer, NULL on error + */ +extern ia_css_program_param_t *ia_css_program_group_param_get_program_param( + const ia_css_program_group_param_t *param, + const int i); + +/*! Get the terminal parameter object from a program group parameter object + + @param program_group_param[in] program group parameter object + @param i[in] terminal parameter index + + @return terminal parameter pointer, NULL on error + */ +extern ia_css_terminal_param_t *ia_css_program_group_param_get_terminal_param( + const ia_css_program_group_param_t *param, + const int i); + +/*! Get the fragment count from a program group parameter object + + @param program_group_param[in] program group parameter object + + @return fragment count, 0 on error + */ +extern uint16_t ia_css_program_group_param_get_fragment_count( + const ia_css_program_group_param_t *param); + +/*! Get the program count from a program group parameter object + + @param program_group_param[in] program group parameter object + + @return program count, 0 on error + */ +extern uint8_t ia_css_program_group_param_get_program_count( + const ia_css_program_group_param_t *param); + +/*! Get the terminal count from a program group parameter object + + @param program_group_param[in] program group parameter object + + @return terminal count, 0 on error + */ +extern uint8_t ia_css_program_group_param_get_terminal_count( + const ia_css_program_group_param_t *param); + +/*! Set the protocol version in a program group parameter object + + @param program_group_param[in] program group parameter object + @param protocol_version[in] protocol version + + @return nonzero on error +*/ +extern int +ia_css_program_group_param_set_protocol_version( + ia_css_program_group_param_t *param, + uint8_t protocol_version); + +/*! Get the protocol version from a program group parameter object + + @param program_group_param[in] program group parameter object + + @return protocol version +*/ +extern uint8_t +ia_css_program_group_param_get_protocol_version( + const ia_css_program_group_param_t *param); + +/*! Set the kernel enable bitmap from a program group parameter object + + @param param[in] program group parameter object + @param bitmap[in] kernel enable bitmap + + @return non-zero on error + */ +extern int ia_css_program_group_param_set_kernel_enable_bitmap( + ia_css_program_group_param_t *param, + const ia_css_kernel_bitmap_t bitmap); + +/*! Get the kernel enable bitmap from a program group parameter object + + @param program_group_param[in] program group parameter object + + @return kernel enable bitmap, 0 on error +*/ +extern ia_css_kernel_bitmap_t +ia_css_program_group_param_get_kernel_enable_bitmap( + const ia_css_program_group_param_t *param); + +/*! Get the stored size of the program parameter object + + @param param[in] program parameter object + + @return size, 0 on error + */ +extern size_t ia_css_program_param_get_size( + const ia_css_program_param_t *param); + +/*! Set the kernel enable bitmap from a program parameter object + + @param program_param[in] program parameter object + @param bitmap[in] kernel enable bitmap + + @return non-zero on error + */ +extern int ia_css_program_param_set_kernel_enable_bitmap( + ia_css_program_param_t *program_param, + const ia_css_kernel_bitmap_t bitmap); + +/*! Get the kernel enable bitmap from a program parameter object + + @param program_param[in] program parameter object + + Note: This function returns in fact the kernel enable of the program group + parameters + + @return kernel enable bitmap, 0 on error + */ +extern ia_css_kernel_bitmap_t ia_css_program_param_get_kernel_enable_bitmap( + const ia_css_program_param_t *param); + +/*! Get the stored size of the terminal parameter object + + @param param[in] terminal parameter object + + @return size, 0 on error + */ +extern size_t ia_css_terminal_param_get_size( + const ia_css_terminal_param_t *param); + +/*! Get the kernel enable bitmap from a terminal parameter object + + @param terminal_param[in] terminal parameter object + + Note: This function returns in fact the kernel enable of the program group + parameters + + @return kernel enable bitmap, 0 on error + */ +extern ia_css_kernel_bitmap_t ia_css_terminal_param_get_kernel_enable_bitmap( + const ia_css_terminal_param_t *param); + +/*! Get the parent object for this terminal param. + + @param terminal_param[in] terminal parameter object + + @return parent program group param object + */ +extern ia_css_program_group_param_t *ia_css_terminal_param_get_parent( + const ia_css_terminal_param_t *param); + +/*! Get the data format type associated with the terminal. + + @param terminal_param[in] terminal parameter object + + @return data format type (ia_css_data_format_type_t) + */ +extern ia_css_frame_format_type_t ia_css_terminal_param_get_frame_format_type( + const ia_css_terminal_param_t *terminal_param); + +/*! Set the data format type associated with the terminal. + + @param terminal_param[in] terminal parameter object + @param data_format_type[in] data format type + + @return non-zero on error. + */ +extern int ia_css_terminal_param_set_frame_format_type( + ia_css_terminal_param_t *terminal_param, + const ia_css_frame_format_type_t data_format_type); + +/*! Get bits per pixel on the frame associated with the terminal. + + @param terminal_param[in] terminal parameter object + + @return bits per pixel + */ +extern uint8_t ia_css_terminal_param_get_bpp( + const ia_css_terminal_param_t *terminal_param); + +/*! Set bits per pixel on the frame associated with the terminal. + + @param terminal_param[in] terminal parameter object + @param bpp[in] bits per pixel + + @return non-zero on error. + */ +extern int ia_css_terminal_param_set_bpp( + ia_css_terminal_param_t *terminal_param, + const uint8_t bpp); + +/*! Get dimensions on the frame associated with the terminal. + + @param terminal_param[in] terminal parameter object + @param dimensions[out] dimension array + + @return non-zero on error. + */ +extern int ia_css_terminal_param_get_dimensions( + const ia_css_terminal_param_t *terminal_param, + uint16_t dimensions[IA_CSS_N_DATA_DIMENSION]); + +/*! Set dimensions on the frame associated with the terminal. + + @param terminal_param[in] terminal parameter object + @param dimensions[in] dimension array + + @return non-zero on error. + */ +extern int ia_css_terminal_param_set_dimensions( + ia_css_terminal_param_t *terminal_param, + const uint16_t dimensions[IA_CSS_N_DATA_DIMENSION]); + +/*! Get stride on the frame associated with the terminal. + + @param terminal_param[in] terminal parameter object + + @return stride of the frame to be attached. + */ +extern uint32_t ia_css_terminal_param_get_stride( + const ia_css_terminal_param_t *terminal_param); + +/*! Set stride on the frame associated with the terminal. + + @param terminal_param[in] terminal parameter object + @param stride[in] stride + + @return non-zero on error. + */ +extern int ia_css_terminal_param_set_stride( + ia_css_terminal_param_t *terminal_param, + const uint32_t stride); + +#endif /* __IA_CSS_PROGRAM_GROUP_PARAM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param.sim.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param.sim.h new file mode 100644 index 0000000000000..7821f8147a1a0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param.sim.h @@ -0,0 +1,153 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PROGRAM_GROUP_PARAM_SIM_H +#define __IA_CSS_PROGRAM_GROUP_PARAM_SIM_H + +/*! \file */ + +/** @file ia_css_program_group_param.sim.h + * + * Define the methods on the program group parameter object: Simulation only + */ +#include + +#include + +#include + +/* Simulation */ + +/*! Create a program group parameter object from specification + + @param specification[in] specification (index) + @param manifest[in] program group manifest + + @return NULL on error + */ +extern ia_css_program_group_param_t *ia_css_program_group_param_create( + const unsigned int specification, + const ia_css_program_group_manifest_t *manifest); + +/*! Destroy the program group parameter object + + @param program_group_param[in] program group parameter object + + @return NULL + */ +extern ia_css_program_group_param_t *ia_css_program_group_param_destroy( + ia_css_program_group_param_t *param); + +/*! Compute the size of storage required for allocating + * the program group parameter object + + @param program_count[in] Number of programs in the process group + @param terminal_count[in] Number of terminals on the process group + @param fragment_count[in] Number of fragments on the terminals of + the process group + + @return 0 on error + */ +size_t ia_css_sizeof_program_group_param( + const uint8_t program_count, + const uint8_t terminal_count, + const uint16_t fragment_count); + +/*! Allocate (the store of) a program group parameter object + + @param program_count[in] Number of programs in the process group + @param terminal_count[in] Number of terminals on the process group + @param fragment_count[in] Number of fragments on the terminals of + the process group + + @return program group parameter pointer, NULL on error + */ +extern ia_css_program_group_param_t *ia_css_program_group_param_alloc( + const uint8_t program_count, + const uint8_t terminal_count, + const uint16_t fragment_count); + +/*! Free (the store of) a program group parameter object + + @param program_group_param[in] program group parameter object + + @return NULL + */ +extern ia_css_program_group_param_t *ia_css_program_group_param_free( + ia_css_program_group_param_t *param); + +/*! Print the program group parameter object to file/stream + + @param param[in] program group parameter object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_program_group_param_print( + const ia_css_program_group_param_t *param, + void *fid); + +/*! Allocate (the store of) a program parameter object + + @return program parameter pointer, NULL on error + */ +extern ia_css_program_param_t *ia_css_program_param_alloc(void); + +/*! Free (the store of) a program parameter object + + @param param[in] program parameter object + + @return NULL + */ +extern ia_css_program_param_t *ia_css_program_param_free( + ia_css_program_param_t *param); + +/*! Print the program parameter object to file/stream + + @param param[in] program parameter object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_program_param_print( + const ia_css_program_param_t *param, + void *fid); + +/*! Allocate (the store of) a terminal parameter object + + @return terminal parameter pointer, NULL on error + */ +extern ia_css_terminal_param_t *ia_css_terminal_param_alloc(void); + +/*! Free (the store of) a terminal parameter object + + @param param[in] terminal parameter object + + @return NULL + */ +extern ia_css_terminal_param_t *ia_css_terminal_param_free( + ia_css_terminal_param_t *param); + +/*! Print the terminal parameter object to file/stream + + @param param[in] terminal parameter object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_terminal_param_print( + const ia_css_terminal_param_t *param, + void *fid); + +#endif /* __IA_CSS_PROGRAM_GROUP_PARAM_SIM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param_types.h new file mode 100644 index 0000000000000..34f57584a227f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param_types.h @@ -0,0 +1,64 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PROGRAM_GROUP_PARAM_TYPES_H +#define __IA_CSS_PROGRAM_GROUP_PARAM_TYPES_H + +/*! \file */ + +/** @file ia_css_program_group_param_types.h + * + * Define the parameter objects that are necessary to create the process + * groups i.e. enable parameters and parameters to set-up frame descriptors + */ + +#include +#include /* ia_css_kernel_bitmap_t */ +#include + +#include +/*! make this public so that driver can populate, + * size, bpp, dimensions for all terminals. + * + * Currently one API is provided to get frame_format_type. + * + * frame_format_type is set during ia_css_terminal_param_init(). + * Value for that is const and binary specific. + */ +struct ia_css_terminal_param_s { + uint32_t size; /**< Size of this structure */ + /**< Indicates if this is a generic type or inbuild + * with variable size descriptor + */ + ia_css_frame_format_type_t frame_format_type; + /**< offset to add to reach parent. This is negative value.*/ + int32_t parent_offset; + uint16_t dimensions[IA_CSS_N_DATA_DIMENSION];/**< Logical dimensions */ + /**< Mapping to the index field of the terminal descriptor */ + uint16_t index[IA_CSS_N_DATA_DIMENSION]; + /**< Logical fragment dimension, + * TODO: fragment dimensions can be different per fragment + */ + uint16_t fragment_dimensions[IA_CSS_N_DATA_DIMENSION]; + uint32_t stride;/**< Stride of a frame */ + uint16_t offset;/**< Offset in bytes to first fragment */ + uint8_t bpp; /**< Bits per pixel */ + uint8_t bpe; /**< Bits per element */ +}; + +typedef struct ia_css_program_group_param_s ia_css_program_group_param_t; +typedef struct ia_css_program_param_s ia_css_program_param_t; +typedef struct ia_css_terminal_param_s ia_css_terminal_param_t; + +#endif /* __IA_CSS_PROGRAM_GROUP_PARAM_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/interface/ia_css_psys_param_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/interface/ia_css_psys_param_trace.h new file mode 100644 index 0000000000000..f59dfbf165e4d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/interface/ia_css_psys_param_trace.h @@ -0,0 +1,102 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PARAM_TRACE_H +#define __IA_CSS_PSYS_PARAM_TRACE_H + +#include "ia_css_psysapi_trace.h" + +#define PSYS_PARAM_TRACE_LEVEL_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +/* Default sub-module tracing config */ +#if (!defined(PSYSAPI_PARAM_TRACING_OVERRIDE)) + #define PSYS_PARAM_TRACE_LEVEL_CONFIG PSYS_PARAM_TRACE_LEVEL_CONFIG_DEFAULT +#endif + +/* Module/sub-module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_PARAM_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_PARAM_TRACING_OVERRIDE)) + /* Module/sub-module specific trace setting */ + #if PSYSAPI_PARAM_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_PARAM_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_PARAM_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_PARAM_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_PARAM_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_PARAM_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_PARAM_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_PARAM_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_PARAM_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_DATA Tracing level defined" + #endif +#else + /* Inherit Module trace setting */ + #define PSYSAPI_PARAM_TRACE_METHOD \ + PSYSAPI_TRACE_METHOD + #define PSYSAPI_PARAM_TRACE_LEVEL_ASSERT \ + PSYSAPI_TRACE_LEVEL_ASSERT + #define PSYSAPI_PARAM_TRACE_LEVEL_ERROR \ + PSYSAPI_TRACE_LEVEL_ERROR + #define PSYSAPI_PARAM_TRACE_LEVEL_WARNING \ + PSYSAPI_TRACE_LEVEL_WARNING + #define PSYSAPI_PARAM_TRACE_LEVEL_INFO \ + PSYSAPI_TRACE_LEVEL_INFO + #define PSYSAPI_PARAM_TRACE_LEVEL_DEBUG \ + PSYSAPI_TRACE_LEVEL_DEBUG + #define PSYSAPI_PARAM_TRACE_LEVEL_VERBOSE \ + PSYSAPI_TRACE_LEVEL_VERBOSE +#endif + +#endif /* __IA_CSS_PSYSAPI_PARAM_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/src/ia_css_program_group_param.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/src/ia_css_program_group_param.c new file mode 100644 index 0000000000000..067f69a4a01e2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/src/ia_css_program_group_param.c @@ -0,0 +1,771 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ia_css_psys_param_trace.h" + +static int +ia_css_terminal_param_init(ia_css_terminal_param_t *terminal_param, + uint32_t offset, + enum ia_css_frame_format_type frame_format_type); + +static int +ia_css_program_param_init(ia_css_program_param_t *program_param, + int32_t offset); + +size_t ia_css_sizeof_program_group_param( + const uint8_t program_count, + const uint8_t terminal_count, + const uint16_t fragment_count) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_sizeof_program_group_param(): enter:\n"); + + verifexit(program_count != 0); + verifexit(terminal_count != 0); + verifexit(fragment_count != 0); + + size += sizeof(ia_css_program_group_param_t); + size += program_count * fragment_count * sizeof(ia_css_program_param_t); + size += terminal_count * sizeof(ia_css_terminal_param_t); +EXIT: + if (0 == program_count || 0 == terminal_count || 0 == fragment_count) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_sizeof_program_group_param invalid argument\n"); + } + return size; +} + +size_t ia_css_program_group_param_get_size( + const ia_css_program_group_param_t *program_group_param) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_size(): enter:\n"); + + if (program_group_param != NULL) { + size = program_group_param->size; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_size invalid argument\n"); + } + return size; +} + +size_t ia_css_program_param_get_size( + const ia_css_program_param_t *param) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_param_get_size(): enter:\n"); + + if (param != NULL) { + size = param->size; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_param_get_size invalid argument\n"); + } + return size; +} + +ia_css_program_param_t *ia_css_program_group_param_get_program_param( + const ia_css_program_group_param_t *param, + const int i) +{ + ia_css_program_param_t *program_param = NULL; + ia_css_program_param_t *program_param_base; + int program_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_program_param(): enter:\n"); + + verifexit(param != NULL); + + program_count = + (int)ia_css_program_group_param_get_program_count(param); + + verifexit(i < program_count); + + program_param_base = (ia_css_program_param_t *) + (((char *)param) + param->program_param_offset); + + program_param = &program_param_base[i]; + +EXIT: + if (NULL == param || i >= program_count) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_program_param invalid argument\n"); + } + return program_param; +} + +size_t ia_css_terminal_param_get_size( + const ia_css_terminal_param_t *param) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_get_size(): enter:\n"); + + if (param != NULL) { + size = param->size; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_terminal_param_get_size invalid argument\n"); + } + + return size; +} + +ia_css_terminal_param_t *ia_css_program_group_param_get_terminal_param( + const ia_css_program_group_param_t *param, + const int i) +{ + ia_css_terminal_param_t *terminal_param = NULL; + ia_css_terminal_param_t *terminal_param_base; + int program_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_terminal_param(): enter:\n"); + + verifexit(param != NULL); + + program_count = + (int)ia_css_program_group_param_get_terminal_count(param); + + verifexit(i < program_count); + + terminal_param_base = (ia_css_terminal_param_t *) + (((char *)param) + param->terminal_param_offset); + terminal_param = &terminal_param_base[i]; +EXIT: + if (NULL == param || i >= program_count) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_terminal_param invalid argument\n"); + } + return terminal_param; +} + +uint8_t ia_css_program_group_param_get_program_count( + const ia_css_program_group_param_t *param) +{ + uint8_t program_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_program_count(): enter:\n"); + + if (param != NULL) { + program_count = param->program_count; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_program_count invalid argument\n"); + } + return program_count; +} + +uint8_t ia_css_program_group_param_get_terminal_count( + const ia_css_program_group_param_t *param) +{ + uint8_t terminal_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_terminal_count(): enter:\n"); + + if (param != NULL) { + terminal_count = param->terminal_count; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_terminal_count invalid argument\n"); + } + return terminal_count; +} + +uint16_t ia_css_program_group_param_get_fragment_count( + const ia_css_program_group_param_t *param) +{ + uint8_t fragment_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_fragment_count(): enter:\n"); + + if (param != NULL) { + fragment_count = (uint8_t)param->fragment_count; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_fragment_count invalid argument\n"); + } + return fragment_count; +} + +int ia_css_program_group_param_set_protocol_version( + ia_css_program_group_param_t *param, + uint8_t protocol_version) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_set_protocol_version(): enter:\n"); + + if (param != NULL) { + param->protocol_version = protocol_version; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_program_group_param_set_protocol_version failed (%i)\n", + retval); + } + return retval; +} + +uint8_t ia_css_program_group_param_get_protocol_version( + const ia_css_program_group_param_t *param) +{ + uint8_t protocol_version = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_protocol_version(): enter:\n"); + + if (param != NULL) { + protocol_version = param->protocol_version; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_protocol_version invalid argument\n"); + } + return protocol_version; +} + +int ia_css_program_group_param_set_kernel_enable_bitmap( + ia_css_program_group_param_t *param, + const ia_css_kernel_bitmap_t bitmap) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_set_kernel_enable_bitmap(): enter:\n"); + + if (param != NULL) { + param->kernel_enable_bitmap = bitmap; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_program_group_param_set_kernel_enable_bitmap failed (%i)\n", + retval); + } + return retval; +} + +ia_css_kernel_bitmap_t ia_css_program_group_param_get_kernel_enable_bitmap( + const ia_css_program_group_param_t *param) +{ + ia_css_kernel_bitmap_t bitmap = ia_css_kernel_bitmap_clear(); + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_kernel_enable_bitmap(): enter:\n"); + + if (param != NULL) { + bitmap = param->kernel_enable_bitmap; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_kernel_enable_bitmap invalid argument\n"); + } + return bitmap; +} + +int ia_css_program_param_set_kernel_enable_bitmap( + ia_css_program_param_t *program_param, + const ia_css_kernel_bitmap_t bitmap) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_param_set_kernel_enable_bitmap(): enter:\n"); + + if (program_param != NULL) { + program_param->kernel_enable_bitmap = bitmap; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_program_param_set_kernel_enable_bitmap failed (%i)\n", + retval); + } + return retval; +} + +ia_css_kernel_bitmap_t ia_css_program_param_get_kernel_enable_bitmap( + const ia_css_program_param_t *program_param) +{ + ia_css_kernel_bitmap_t bitmap = ia_css_kernel_bitmap_clear(); + char *base; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_param_get_kernel_enable_bitmap(): enter:\n"); + + verifexit(program_param != NULL); + verifexit(program_param->parent_offset != 0); + + base = (char *)((char *)program_param + program_param->parent_offset); + bitmap = ((ia_css_program_group_param_t *)base)->kernel_enable_bitmap; +EXIT: + if (NULL == program_param || 0 == program_param->parent_offset) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_param_get_kernel_enable_bitmap invalid argument\n"); + } + return bitmap; +} + +ia_css_kernel_bitmap_t ia_css_terminal_param_get_kernel_enable_bitmap( + const ia_css_terminal_param_t *param) +{ + ia_css_kernel_bitmap_t bitmap = ia_css_kernel_bitmap_clear(); + char *base; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_get_kernel_enable_bitmap(): enter:\n"); + + verifexit(param != NULL); + verifexit(param->parent_offset != 0); + + base = (char *)((char *)param + param->parent_offset); + bitmap = ((ia_css_program_group_param_t *)base)->kernel_enable_bitmap; +EXIT: + if (NULL == param || 0 == param->parent_offset) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_terminal_param_get_kernel_enable_bitmap invalid argument\n"); + } + return bitmap; +} + +ia_css_frame_format_type_t ia_css_terminal_param_get_frame_format_type( + const ia_css_terminal_param_t *param) +{ + ia_css_frame_format_type_t ft = IA_CSS_N_FRAME_FORMAT_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_get_frame_format_type(): enter:\n"); + + verifexit(param != NULL); + + ft = param->frame_format_type; +EXIT: + if (NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_terminal_param_get_frame_format_type invalid argument\n"); + } + return ft; +} + +int ia_css_terminal_param_set_frame_format_type( + ia_css_terminal_param_t *param, + const ia_css_frame_format_type_t data_format_type) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_set_frame_format_type(): enter:\n"); + + if (param != NULL) { + param->frame_format_type = data_format_type; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_terminal_param_set_frame_format_type failed (%i)\n", + retval); + } + return retval; +} + +uint8_t ia_css_terminal_param_get_bpp( + const ia_css_terminal_param_t *param) +{ + uint8_t bpp = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_get_bpp(): enter:\n"); + + verifexit(param != NULL); + + bpp = param->bpp; + +EXIT: + if (NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_terminal_param_get_bpp invalid argument\n"); + } + return bpp; +} + +int ia_css_terminal_param_set_bpp( + ia_css_terminal_param_t *param, + const uint8_t bpp) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_set_bpp(): enter:\n"); + + if (param != NULL) { + param->bpp = bpp; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_terminal_param_set_bpp failed (%i)\n", retval); + } + return retval; +} + +int ia_css_terminal_param_get_dimensions( + const ia_css_terminal_param_t *param, + uint16_t dimensions[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_get_dimensions(): enter:\n"); + + if (param != NULL) { + dimensions[IA_CSS_COL_DIMENSION] = + param->dimensions[IA_CSS_COL_DIMENSION]; + dimensions[IA_CSS_ROW_DIMENSION] = + param->dimensions[IA_CSS_ROW_DIMENSION]; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_terminal_param_get_dimensions failed (%i)\n", retval); + } + return retval; +} + +int ia_css_terminal_param_set_dimensions( + ia_css_terminal_param_t *param, + const uint16_t dimensions[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_set_dimensions(): enter:\n"); + + if (param != NULL) { + param->dimensions[IA_CSS_COL_DIMENSION] = + dimensions[IA_CSS_COL_DIMENSION]; + param->dimensions[IA_CSS_ROW_DIMENSION] = + dimensions[IA_CSS_ROW_DIMENSION]; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_terminal_param_set_dimensions failed (%i)\n", retval); + } + return retval; +} + +int ia_css_terminal_param_set_stride( + ia_css_terminal_param_t *param, + const uint32_t stride) +{ + int retval = -1; + + verifexit(param != NULL); + param->stride = stride; + retval = 0; + +EXIT: + return retval; +} + +uint32_t ia_css_terminal_param_get_stride( + const ia_css_terminal_param_t *param) +{ + uint32_t stride = 0; + + verifexit(param != NULL); + stride = param->stride; + +EXIT: + return stride; +} + + +static int ia_css_program_param_init( + ia_css_program_param_t *program_param, + int32_t offset) +{ + int retval = -1; + + COMPILATION_ERROR_IF( + SIZE_OF_PROGRAM_PARAM_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_program_param_t))); + verifexit(program_param != NULL); + + IA_CSS_TRACE_0(PSYSAPI_PARAM, INFO, + "ia_css_program_param_init(): enter:\n"); + + program_param->size = sizeof(ia_css_program_param_t); + /* parent is at negative offset from current program.*/ + program_param->parent_offset = -offset; + /*TODO: Kernel_bitmap setting. ?*/ + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_program_param_init failed (%i)\n", retval); + } + return retval; +} + +static int +ia_css_terminal_param_init(ia_css_terminal_param_t *terminal_param, + uint32_t offset, + enum ia_css_frame_format_type frame_format_type) +{ + int retval = -1; + + COMPILATION_ERROR_IF( + SIZE_OF_TERMINAL_PARAM_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_terminal_param_t))); + verifexit(terminal_param != NULL); + + IA_CSS_TRACE_0(PSYSAPI_PARAM, INFO, + "ia_css_terminal_param_init(): enter:\n"); + + terminal_param->size = sizeof(ia_css_terminal_param_t); + /* parent is at negative offset from current program.*/ + terminal_param->parent_offset = -((int32_t)offset); + /*TODO: Kernel_bitmap setting. ?*/ + terminal_param->frame_format_type = frame_format_type; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_terminal_param_init failed (%i)\n", retval); + } + return retval; +} + +ia_css_program_group_param_t * +ia_css_terminal_param_get_parent( + const ia_css_terminal_param_t *param) +{ + ia_css_program_group_param_t *parent = NULL; + char *base; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_get_parent(): enter:\n"); + + verifexit(NULL != param); + + base = (char *)((char *)param + param->parent_offset); + + parent = (ia_css_program_group_param_t *)(base); +EXIT: + if (NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_terminal_param_get_parent invalid argument\n"); + } + return parent; +} + +int ia_css_program_group_param_init( + ia_css_program_group_param_t *blob, + const uint8_t program_count, + const uint8_t terminal_count, + const uint16_t fragment_count, + const enum ia_css_frame_format_type *frame_format_types) +{ + int i = 0; + char *param_base; + uint32_t offset; + int retval = -1; + + COMPILATION_ERROR_IF( + SIZE_OF_PROGRAM_GROUP_PARAM_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_program_group_param_t))); + + IA_CSS_TRACE_0(PSYSAPI_PARAM, INFO, + "ia_css_program_group_param_init(): enter:\n"); + + assert(blob != 0); + + verifexit(blob != NULL); + verifexit(frame_format_types != NULL); + + blob->program_count = program_count; + blob->fragment_count = fragment_count; + blob->terminal_count = terminal_count; + blob->program_param_offset = sizeof(ia_css_program_group_param_t); + blob->terminal_param_offset = blob->program_param_offset + + sizeof(ia_css_program_param_t) * program_count; + + param_base = (char *)((char *)blob + blob->program_param_offset); + offset = blob->program_param_offset; + + for (i = 0; i < program_count; i++) { + ia_css_program_param_init( + (ia_css_program_param_t *)param_base, offset); + offset += sizeof(ia_css_program_param_t); + param_base += sizeof(ia_css_program_param_t); + } + + param_base = (char *)((char *)blob + blob->terminal_param_offset); + offset = blob->terminal_param_offset; + + for (i = 0; i < terminal_count; i++) { + ia_css_terminal_param_init( + (ia_css_terminal_param_t *)param_base, + offset, + frame_format_types[i]); + + offset += sizeof(ia_css_terminal_param_t); + param_base += sizeof(ia_css_terminal_param_t); + } + + /* + * For now, set legacy flow by default. This can be removed as soon + * as all hosts/drivers explicitly set the protocol version. + */ + blob->protocol_version = IA_CSS_PROCESS_GROUP_PROTOCOL_LEGACY; + + blob->size = (uint32_t)ia_css_sizeof_program_group_param(program_count, + terminal_count, + fragment_count); + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_program_group_param_init failed (%i)\n", retval); + } + return retval; +} + +int ia_css_program_group_param_print( + const ia_css_program_group_param_t *param, + void *fid) +{ + int retval = -1; + int i; + uint8_t program_count, terminal_count; + ia_css_kernel_bitmap_t bitmap; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, INFO, + "ia_css_program_group_param_print(): enter:\n"); + + verifexit(param != NULL); + NOT_USED(fid); + + IA_CSS_TRACE_1(PSYSAPI_PARAM, INFO, + "sizeof(program_group_param) = %d\n", + (int)ia_css_program_group_param_get_size(param)); + + program_count = ia_css_program_group_param_get_program_count(param); + terminal_count = ia_css_program_group_param_get_terminal_count(param); + + bitmap = ia_css_program_group_param_get_kernel_enable_bitmap(param); + verifexit(ia_css_kernel_bitmap_print(bitmap, fid) == 0); + + IA_CSS_TRACE_1(PSYSAPI_PARAM, INFO, + "%d program params\n", (int)program_count); + for (i = 0; i < (int)program_count; i++) { + ia_css_program_param_t *program_param = + ia_css_program_group_param_get_program_param(param, i); + + retval = ia_css_program_param_print(program_param, fid); + verifjmpexit(retval == 0); + } + IA_CSS_TRACE_1(PSYSAPI_PARAM, INFO, "%d terminal params\n", + (int)terminal_count); + for (i = 0; i < (int)terminal_count; i++) { + ia_css_terminal_param_t *terminal_param = + ia_css_program_group_param_get_terminal_param(param, i); + + retval = ia_css_terminal_param_print(terminal_param, fid); + verifjmpexit(retval == 0); + } + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_program_group_param_print failed (%i)\n", retval); + } + return retval; +} + +int ia_css_terminal_param_print( + const ia_css_terminal_param_t *param, + void *fid) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, INFO, + "ia_css_terminal_param_print(): enter:\n"); + + verifexit(param != NULL); + NOT_USED(fid); + + IA_CSS_TRACE_1(PSYSAPI_PARAM, INFO, + "sizeof(terminal_param) = %d\n", + (int)ia_css_terminal_param_get_size(param)); + + IA_CSS_TRACE_1(PSYSAPI_PARAM, INFO, + "\tframe_format_type = %d\n", param->frame_format_type); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_terminal_param_print failed (%i)\n", retval); + } + return retval; +} + +int ia_css_program_param_print( + const ia_css_program_param_t *param, + void *fid) +{ + int retval = -1; + ia_css_kernel_bitmap_t bitmap; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, INFO, + "ia_css_program_param_print(): enter:\n"); + + verifexit(param != NULL); + NOT_USED(fid); + + IA_CSS_TRACE_1(PSYSAPI_PARAM, INFO, "sizeof(program_param) = %d\n", + (int)ia_css_program_param_get_size(param)); + + bitmap = ia_css_program_param_get_kernel_enable_bitmap(param); + verifexit(ia_css_kernel_bitmap_print(bitmap, fid) == 0); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_program_param_print failed (%i)\n", retval); + } + return retval; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/src/ia_css_program_group_param_private.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/src/ia_css_program_group_param_private.h new file mode 100644 index 0000000000000..6672737e51a14 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/src/ia_css_program_group_param_private.h @@ -0,0 +1,80 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PROGRAM_GROUP_PARAM_PRIVATE_H +#define __IA_CSS_PROGRAM_GROUP_PARAM_PRIVATE_H + +#include +#include +#include +#include +#include +#include +#include + +#define N_PADDING_UINT8_IN_PROGRAM_GROUP_PARAM_STRUCT 7 +#define SIZE_OF_PROGRAM_GROUP_PARAM_STRUCT_IN_BITS \ + (IA_CSS_KERNEL_BITMAP_BITS \ + + (3 * IA_CSS_UINT32_T_BITS) \ + + IA_CSS_UINT16_T_BITS \ + + (3 * IA_CSS_UINT8_T_BITS) \ + + (N_PADDING_UINT8_IN_PROGRAM_GROUP_PARAM_STRUCT * IA_CSS_UINT8_T_BITS)) + +/* tentative; co-design with ISP algorithm */ +struct ia_css_program_group_param_s { + /* The enable bits for each individual kernel */ + ia_css_kernel_bitmap_t kernel_enable_bitmap; + /* Size of this structure */ + uint32_t size; + uint32_t program_param_offset; + uint32_t terminal_param_offset; + /* Number of (explicit) fragments to use in a frame */ + uint16_t fragment_count; + /* Number of active programs */ + uint8_t program_count; + /* Number of active terminals */ + uint8_t terminal_count; + /* Program group protocol version */ + uint8_t protocol_version; + uint8_t padding[N_PADDING_UINT8_IN_PROGRAM_GROUP_PARAM_STRUCT]; +}; + +#define SIZE_OF_PROGRAM_PARAM_STRUCT_IN_BITS \ + (IA_CSS_KERNEL_BITMAP_BITS \ + + IA_CSS_UINT32_T_BITS \ + + IA_CSS_INT32_T_BITS) + +/* private */ +struct ia_css_program_param_s { + /* What to use this one for ? */ + ia_css_kernel_bitmap_t kernel_enable_bitmap; + /* Size of this structure */ + uint32_t size; + /* offset to add to reach parent. This is negative value.*/ + int32_t parent_offset; +}; + +#define SIZE_OF_TERMINAL_PARAM_STRUCT_IN_BITS \ + (IA_CSS_UINT32_T_BITS \ + + IA_CSS_FRAME_FORMAT_TYPE_BITS \ + + IA_CSS_INT32_T_BITS \ + + (IA_CSS_UINT16_T_BITS * IA_CSS_N_DATA_DIMENSION) \ + + (IA_CSS_UINT16_T_BITS * IA_CSS_N_DATA_DIMENSION) \ + + (IA_CSS_UINT16_T_BITS * IA_CSS_N_DATA_DIMENSION) \ + + IA_CSS_INT32_T_BITS \ + + IA_CSS_UINT16_T_BITS \ + + IA_CSS_UINT8_T_BITS \ + + (IA_CSS_UINT8_T_BITS * 1)) + +#endif /* __IA_CSS_PROGRAM_GROUP_PARAM_PRIVATE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/psys_server_manifest/cnlB0/ia_css_psys_server_manifest.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/psys_server_manifest/cnlB0/ia_css_psys_server_manifest.c new file mode 100644 index 0000000000000..7543b93f279b1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/psys_server_manifest/cnlB0/ia_css_psys_server_manifest.c @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_server_manifest.h" + +/** + * Manifest of resources in use by PSYS itself + */ + +const vied_nci_resource_spec_t psys_server_manifest = { + /* internal memory */ + { /* resource id size offset*/ + {VIED_NCI_GMEM_TYPE_ID, 0, 0}, + {VIED_NCI_DMEM_TYPE_ID, VIED_NCI_DMEM0_MAX_SIZE, 0}, + {VIED_NCI_VMEM_TYPE_ID, 0, 0}, + {VIED_NCI_BAMEM_TYPE_ID, 0, 0}, + {VIED_NCI_PMEM_TYPE_ID, 0, 0} + }, + /* external memory */ + { /* resource id size offset*/ + {VIED_NCI_N_MEM_ID, 0, 0}, + {VIED_NCI_N_MEM_ID, 0, 0}, + {VIED_NCI_N_MEM_ID, 0, 0}, + {VIED_NCI_N_MEM_ID, 0, 0} + }, + /* device channel */ + { /* resource id size offset*/ + {VIED_NCI_DEV_CHN_DMA_EXT0_ID, + PSYS_SERVER_DMA_CHANNEL_SIZE, + PSYS_SERVER_DMA_CHANNEL_OFFSET}, + {VIED_NCI_DEV_CHN_GDC_ID, 0, 0}, + {VIED_NCI_DEV_CHN_DMA_EXT1_READ_ID, 0, 0}, + {VIED_NCI_DEV_CHN_DMA_EXT1_WRITE_ID, 0, 0}, + {VIED_NCI_DEV_CHN_DMA_INTERNAL_ID, 0, 0}, + {VIED_NCI_DEV_CHN_DMA_IPFD_ID, 0, 0}, + {VIED_NCI_DEV_CHN_DMA_ISA_ID, 0, 0}, + {VIED_NCI_DEV_CHN_DMA_FW_ID, 0, 0}, + {VIED_NCI_DEV_CHN_DMA_CMPRS_ID, 0, 0} + } +}; diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/psys_server_manifest/cnlB0/ia_css_psys_server_manifest.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/psys_server_manifest/cnlB0/ia_css_psys_server_manifest.h new file mode 100644 index 0000000000000..b4c7fbc32d5ba --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/psys_server_manifest/cnlB0/ia_css_psys_server_manifest.h @@ -0,0 +1,29 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_SERVER_MANIFEST_H +#define __IA_CSS_PSYS_SERVER_MANIFEST_H + +#include "vied_nci_psys_resource_model.h" + +/** + * Manifest of resources in use by PSYS itself + */ + +#define PSYS_SERVER_DMA_CHANNEL_SIZE 2 +#define PSYS_SERVER_DMA_CHANNEL_OFFSET 28 + +extern const vied_nci_resource_spec_t psys_server_manifest; + +#endif /* __IA_CSS_PSYS_SERVER_MANIFEST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/psysapi.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/psysapi.mk new file mode 100644 index 0000000000000..e1977cbe2ca2a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/psysapi.mk @@ -0,0 +1,122 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is PSYSAPI +# +ifdef _H_PSYSAPI_MK +$(error ERROR: psysapi.mk included multiple times, please check makefile) +else +_H_PSYSAPI_MK=1 +endif + +include $(MODULES_DIR)/config/psys/subsystem_$(IPU_SYSVER).mk + +PSYSAPI_DIR = $${MODULES_DIR}/psysapi + +PSYSAPI_PROCESS_HOST_FILES = $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_process.c +PSYSAPI_PROCESS_HOST_FILES += $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_process_group.c +PSYSAPI_PROCESS_HOST_FILES += $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_buffer_set.c +PSYSAPI_PROCESS_HOST_FILES += $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_terminal.c +PSYSAPI_PROCESS_HOST_FILES += $(PSYSAPI_DIR)/param/src/ia_css_program_group_param.c + +# Use PSYS_MANIFEST_HOST_FILES when only accessing manifest functions +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/static/src/ia_css_psys_program_group_manifest.c +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/static/src/ia_css_psys_program_manifest.c +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/static/src/ia_css_psys_terminal_manifest.c +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/sim/src/vied_nci_psys_system.c +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/kernel/src/ia_css_kernel_bitmap.c +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/data/src/ia_css_program_group_data.c +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION)/vied_nci_psys_resource_model.c +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/psys_server_manifest/$(PSYS_SERVER_MANIFEST_VERSION)/ia_css_psys_server_manifest.c + +# Use only kernel bitmap functionality from PSYS API +PSYSAPI_KERNEL_BITMAP_FILES += $(PSYSAPI_DIR)/kernel/src/ia_css_kernel_bitmap.c +PSYSAPI_KERNEL_BITMAP_CPPFLAGS += -I$(PSYSAPI_DIR)/kernel/interface +PSYSAPI_KERNEL_BITMAP_CPPFLAGS += -I$(PSYSAPI_DIR)/interface + +# Use PSYSAPI_HOST_FILES when program and process group are both needed +PSYSAPI_HOST_FILES = $(PSYSAPI_PROCESS_HOST_FILES) $(PSYSAPI_MANIFEST_HOST_FILES) + +# Use PSYSAPI_PROCESS_GROUP_HOST_FILES when program and process group are both needed but there is no +# implementation (yet) of the user customization functions defined in ia_css_psys_process_group_cmd_impl.h. +# Dummy implementations are provided in $(PSYSAPI_DIR)/sim/src/ia_css_psys_process_group_cmd_impl.c +PSYSAPI_PROCESS_GROUP_HOST_FILES = $(PSYSAPI_HOST_FILES) +PSYSAPI_PROCESS_GROUP_HOST_FILES += $(PSYSAPI_DIR)/sim/src/ia_css_psys_process_group_cmd_impl.c + +# for now disabled, implementation for now provided by psys api impl +#PSYSAPI_HOST_FILES += $(PSYSAPI_DIR)/device/src/ia_css_psys_device.c + +PSYSAPI_HOST_CPPFLAGS = -I$(PSYSAPI_DIR)/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/device/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/device/interface/$(IPU_SYSVER) +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/dynamic/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/dynamic/src +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/data/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/data/src +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/static/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/static/src +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/kernel/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/param/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/param/src +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/sim/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/sim/src +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION) +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION)/private +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/psys_server_manifest/$(PSYS_SERVER_MANIFEST_VERSION) + +PSYSAPI_FW_CPPFLAGS = $(PSYSAPI_HOST_CPPFLAGS) +PSYSAPI_FW_CPPFLAGS += -I$(PSYSAPI_DIR)/static/interface +PSYSAPI_FW_CPPFLAGS += -I$(PSYSAPI_DIR)/static/src +PSYSAPI_FW_CPPFLAGS += -I$(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION) +PSYSAPI_FW_CPPFLAGS += -I$(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION)/private +PSYSAPI_FW_CPPFLAGS += -I$(PSYSAPI_DIR)/psys_server_manifest/$(PSYS_SERVER_MANIFEST_VERSION) +PSYSAPI_SYSTEM_GLOBAL_CPPFLAGS += -I$(PSYSAPI_DIR)/sim/interface +PSYSAPI_SYSTEM_GLOBAL_CPPFLAGS += -I$(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION) +PSYSAPI_SYSTEM_GLOBAL_CPPFLAGS += -I$(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION)/private +PSYSAPI_SYSTEM_GLOBAL_CPPFLAGS += -I$(PSYSAPI_DIR)/psys_server_manifest/$(PSYS_SERVER_MANIFEST_VERSION) + +# Defining the trace level for the PSYSAPI +PSYSAPI_HOST_CPPFLAGS += -DPSYSAPI_TRACE_CONFIG=PSYSAPI_TRACE_LOG_LEVEL_NORMAL +# Enable/Disable 'late binding' support and it's additional queues +PSYSAPI_HOST_CPPFLAGS += -DHAS_LATE_BINDING_SUPPORT=$(PSYS_HAS_LATE_BINDING_SUPPORT) + +#Example: how to switch to a different log level for a sub-module +#PSYSAPI_HOST_CPPFLAGS += -DPSYSAPI_DYNAMIC_TRACING_OVERRIDE=PSYSAPI_TRACE_LOG_LEVEL_DEBUG + +# enable host side implementation +# TODO: better name for the flag to enable the impl... +PSYSAPI_HOST_CPPFLAGS += -D__X86_SIM__ + +# Files for Firmware +PSYSAPI_FW_FILES = $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_process.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_process_group.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_terminal.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_buffer_set.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/param/src/ia_css_program_group_param.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/data/src/ia_css_program_group_data.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/sim/src/vied_nci_psys_system.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/sim/src/ia_css_psys_sim_data.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/static/src/ia_css_psys_program_group_manifest.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/static/src/ia_css_psys_program_manifest.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/static/src/ia_css_psys_terminal_manifest.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION)/vied_nci_psys_resource_model.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/psys_server_manifest/$(PSYS_SERVER_MANIFEST_VERSION)/ia_css_psys_server_manifest.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/kernel/src/ia_css_kernel_bitmap.c + +# resource model +PSYSAPI_RESOURCE_MODEL_FILES = $(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION)/vied_nci_psys_resource_model.c + +ifeq ($(PSYS_HAS_DUAL_CMD_CTX_SUPPORT), 1) +PSYSAPI_HOST_CPPFLAGS += -DHAS_DUAL_CMD_CTX_SUPPORT=$(PSYS_HAS_DUAL_CMD_CTX_SUPPORT) +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/resource_model/cnlB0/vied_nci_psys_resource_model.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/resource_model/cnlB0/vied_nci_psys_resource_model.c new file mode 100644 index 0000000000000..20bfb729e6417 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/resource_model/cnlB0/vied_nci_psys_resource_model.c @@ -0,0 +1,323 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "vied_nci_psys_resource_model.h" + +/* + * Cell types by cell IDs + */ +const vied_nci_cell_type_ID_t vied_nci_cell_type[VIED_NCI_N_CELL_ID] = { + VIED_NCI_SP_CTRL_TYPE_ID, + VIED_NCI_SP_SERVER_TYPE_ID, + VIED_NCI_SP_SERVER_TYPE_ID, + VIED_NCI_VP_TYPE_ID, + VIED_NCI_VP_TYPE_ID, + VIED_NCI_VP_TYPE_ID, + VIED_NCI_VP_TYPE_ID, + VIED_NCI_ACC_ISA_TYPE_ID, + VIED_NCI_ACC_PSA_TYPE_ID, + VIED_NCI_ACC_PSA_TYPE_ID, + VIED_NCI_ACC_PSA_TYPE_ID, + VIED_NCI_ACC_PSA_TYPE_ID, + VIED_NCI_ACC_PSA_TYPE_ID, + VIED_NCI_ACC_PSA_TYPE_ID, + VIED_NCI_ACC_OSA_TYPE_ID, + VIED_NCI_GDC_TYPE_ID, + VIED_NCI_GDC_TYPE_ID +}; + +/* + * Memory types by memory IDs + */ +const vied_nci_mem_type_ID_t vied_nci_mem_type[VIED_NCI_N_MEM_ID] = { + VIED_NCI_VMEM_TYPE_ID, + VIED_NCI_VMEM_TYPE_ID, + VIED_NCI_VMEM_TYPE_ID, + VIED_NCI_VMEM_TYPE_ID, + VIED_NCI_GMEM_TYPE_ID,/* VMEM4 is GMEM according to vied_nci_cell_mem */ + VIED_NCI_BAMEM_TYPE_ID, + VIED_NCI_BAMEM_TYPE_ID, + VIED_NCI_BAMEM_TYPE_ID, + VIED_NCI_BAMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_PMEM_TYPE_ID, + VIED_NCI_PMEM_TYPE_ID, + VIED_NCI_PMEM_TYPE_ID, + VIED_NCI_PMEM_TYPE_ID +}; + +/* + * Cell mem count by cell type ID + */ +const uint16_t vied_nci_N_cell_mem[VIED_NCI_N_CELL_TYPE_ID] = { + VIED_NCI_N_SP_CTRL_MEM, + VIED_NCI_N_SP_SERVER_MEM, + VIED_NCI_N_VP_MEM, + VIED_NCI_N_ACC_PSA_MEM, + VIED_NCI_N_ACC_ISA_MEM, + VIED_NCI_N_ACC_OSA_MEM +}; + +/* + * Cell mem type by cell type ID and memory index + */ +const vied_nci_mem_type_ID_t +vied_nci_cell_mem_type[VIED_NCI_N_CELL_TYPE_ID][VIED_NCI_N_MEM_TYPE_ID] = { + { + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID + }, + { + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID + }, + { + VIED_NCI_GMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_VMEM_TYPE_ID, + VIED_NCI_BAMEM_TYPE_ID, + VIED_NCI_PMEM_TYPE_ID + }, + { + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID + }, + { + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID + }, + { + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID + }, + { + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID + } +}; + +/* + * Ext mem ID by memory index + */ +const vied_nci_mem_ID_t +vied_nci_ext_mem[VIED_NCI_N_MEM_TYPE_ID] = { + VIED_NCI_VMEM4_ID, /* VIED_NCI_GMEM_TYPE_ID */ + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID +}; + +/* + * Cell mem ID by cell ID and memory index + */ +const vied_nci_mem_ID_t +vied_nci_cell_mem[VIED_NCI_N_CELL_ID][VIED_NCI_N_MEM_TYPE_ID] = { + { + VIED_NCI_N_MEM_ID, + VIED_NCI_DMEM0_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_DMEM1_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_DMEM2_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_VMEM4_ID, + VIED_NCI_DMEM4_ID, + VIED_NCI_VMEM0_ID, + VIED_NCI_BAMEM0_ID, + VIED_NCI_PMEM0_ID + }, + { + VIED_NCI_VMEM4_ID, + VIED_NCI_DMEM5_ID, + VIED_NCI_VMEM1_ID, + VIED_NCI_BAMEM1_ID, + VIED_NCI_PMEM1_ID + }, + { + VIED_NCI_VMEM4_ID, + VIED_NCI_DMEM6_ID, + VIED_NCI_VMEM2_ID, + VIED_NCI_BAMEM2_ID, + VIED_NCI_PMEM2_ID + }, + { + VIED_NCI_VMEM4_ID, + VIED_NCI_DMEM7_ID, + VIED_NCI_VMEM3_ID, + VIED_NCI_BAMEM3_ID, + VIED_NCI_PMEM3_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + } +}; + +/* + * Memory sizes by mem ID + */ +const uint16_t vied_nci_mem_size[VIED_NCI_N_MEM_ID] = { + VIED_NCI_VMEM0_MAX_SIZE, + VIED_NCI_VMEM1_MAX_SIZE, + VIED_NCI_VMEM2_MAX_SIZE, + VIED_NCI_VMEM3_MAX_SIZE, + VIED_NCI_VMEM4_MAX_SIZE, + VIED_NCI_BAMEM0_MAX_SIZE, + VIED_NCI_BAMEM1_MAX_SIZE, + VIED_NCI_BAMEM2_MAX_SIZE, + VIED_NCI_BAMEM3_MAX_SIZE, + VIED_NCI_DMEM0_MAX_SIZE, + VIED_NCI_DMEM1_MAX_SIZE, + VIED_NCI_DMEM2_MAX_SIZE, + VIED_NCI_DMEM3_MAX_SIZE, + VIED_NCI_DMEM4_MAX_SIZE, + VIED_NCI_DMEM5_MAX_SIZE, + VIED_NCI_DMEM6_MAX_SIZE, + VIED_NCI_DMEM7_MAX_SIZE, + VIED_NCI_PMEM0_MAX_SIZE, + VIED_NCI_PMEM1_MAX_SIZE, + VIED_NCI_PMEM2_MAX_SIZE, + VIED_NCI_PMEM3_MAX_SIZE +}; + +/* + * Memory word sizes by mem type ID + */ +const uint16_t vied_nci_mem_word_size[VIED_NCI_N_DATA_MEM_TYPE_ID] = { + VIED_NCI_GMEM_WORD_SIZE, + VIED_NCI_DMEM_WORD_SIZE, + VIED_NCI_VMEM_WORD_SIZE, + VIED_NCI_BAMEM_WORD_SIZE +}; + +/* + * Number of channels by device ID + */ +const uint16_t vied_nci_dev_chn_size[VIED_NCI_N_DEV_CHN_ID] = { + VIED_NCI_DEV_CHN_DMA_EXT0_MAX_SIZE, + VIED_NCI_DEV_CHN_GDC_MAX_SIZE, + VIED_NCI_DEV_CHN_DMA_EXT1_READ_MAX_SIZE, + VIED_NCI_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE, + VIED_NCI_DEV_CHN_DMA_INTERNAL_MAX_SIZE, + VIED_NCI_DEV_CHN_DMA_IPFD_MAX_SIZE, + VIED_NCI_DEV_CHN_DMA_ISA_MAX_SIZE, + VIED_NCI_DEV_CHN_DMA_FW_MAX_SIZE, + VIED_NCI_DEV_CHN_DMA_CMPRS_MAX_SIZE +}; diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/resource_model/cnlB0/vied_nci_psys_resource_model.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/resource_model/cnlB0/vied_nci_psys_resource_model.h new file mode 100644 index 0000000000000..6249d8af3effc --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/resource_model/cnlB0/vied_nci_psys_resource_model.h @@ -0,0 +1,300 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __VIED_NCI_PSYS_RESOURCE_MODEL_H +#define __VIED_NCI_PSYS_RESOURCE_MODEL_H + +#include "type_support.h" +#include "storage_class.h" + +#define HAS_DFM 0 +#define NON_RELOC_RESOURCE_SUPPORT 0 +#define IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + +/* Defines for the routing bitmap in the program group manifest. + */ +#define VIED_NCI_RBM_MAX_MUX_COUNT 0 +#define VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT 0 +#define VIED_NCI_RBM_MAX_TERMINAL_DESC_COUNT 0 +#define N_PADDING_UINT8_IN_RBM_MANIFEST 2 + +/* The amount of padding bytes needed to make + * ia_css_process_s structure 64 bit aligned + */ +#define N_PADDING_UINT8_IN_PROCESS_STRUCT 2 +#define N_PADDING_UINT8_IN_PROGRAM_GROUP_MANFEST 0 + +/** + * Resource model for CNL B0 + */ + +/* + * Cell IDs + */ +typedef enum { + VIED_NCI_SP0_ID = 0, + VIED_NCI_SP1_ID, + VIED_NCI_SP2_ID, + VIED_NCI_VP0_ID, + VIED_NCI_VP1_ID, + VIED_NCI_VP2_ID, + VIED_NCI_VP3_ID, + VIED_NCI_ACC0_ID, + VIED_NCI_ACC1_ID, + VIED_NCI_ACC2_ID, + VIED_NCI_ACC3_ID, + VIED_NCI_ACC4_ID, + VIED_NCI_ACC5_ID, + VIED_NCI_ACC6_ID, + VIED_NCI_ACC7_ID, + VIED_NCI_GDC0_ID, + VIED_NCI_GDC1_ID, + VIED_NCI_N_CELL_ID +} vied_nci_cell_ID_t; + +/* + * Barrier bits (to model process group dependencies) + */ +typedef enum { + VIED_NCI_BARRIER0_ID, + VIED_NCI_BARRIER1_ID, + VIED_NCI_BARRIER2_ID, + VIED_NCI_BARRIER3_ID, + VIED_NCI_BARRIER4_ID, + VIED_NCI_BARRIER5_ID, + VIED_NCI_BARRIER6_ID, + VIED_NCI_BARRIER7_ID, + VIED_NCI_N_BARRIER_ID +} vied_nci_barrier_ID_t; + +/* + * Cell types + */ +typedef enum { + VIED_NCI_SP_CTRL_TYPE_ID = 0, + VIED_NCI_SP_SERVER_TYPE_ID, + VIED_NCI_VP_TYPE_ID, + VIED_NCI_ACC_PSA_TYPE_ID, + VIED_NCI_ACC_ISA_TYPE_ID, + VIED_NCI_ACC_OSA_TYPE_ID, + VIED_NCI_GDC_TYPE_ID, + VIED_NCI_N_CELL_TYPE_ID +} vied_nci_cell_type_ID_t; + +/* + * Memory IDs + */ +typedef enum { + VIED_NCI_VMEM0_ID = 0, + VIED_NCI_VMEM1_ID, + VIED_NCI_VMEM2_ID, + VIED_NCI_VMEM3_ID, + VIED_NCI_VMEM4_ID, + VIED_NCI_BAMEM0_ID, + VIED_NCI_BAMEM1_ID, + VIED_NCI_BAMEM2_ID, + VIED_NCI_BAMEM3_ID, + VIED_NCI_DMEM0_ID, + VIED_NCI_DMEM1_ID, + VIED_NCI_DMEM2_ID, + VIED_NCI_DMEM3_ID, + VIED_NCI_DMEM4_ID, + VIED_NCI_DMEM5_ID, + VIED_NCI_DMEM6_ID, + VIED_NCI_DMEM7_ID, + VIED_NCI_PMEM0_ID, + VIED_NCI_PMEM1_ID, + VIED_NCI_PMEM2_ID, + VIED_NCI_PMEM3_ID, + VIED_NCI_N_MEM_ID +} vied_nci_mem_ID_t; + +/* + * Memory types + */ +typedef enum { + VIED_NCI_GMEM_TYPE_ID = 0, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_VMEM_TYPE_ID, + VIED_NCI_BAMEM_TYPE_ID, + VIED_NCI_PMEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID +} vied_nci_mem_type_ID_t; + +/* Excluding PMEM */ +#define VIED_NCI_N_DATA_MEM_TYPE_ID (VIED_NCI_N_MEM_TYPE_ID - 1) + +#define VIED_NCI_N_SP_CTRL_MEM 2 +#define VIED_NCI_N_SP_SERVER_MEM 2 +#define VIED_NCI_N_VP_MEM 4 +#define VIED_NCI_N_ACC_PSA_MEM 0 +#define VIED_NCI_N_ACC_ISA_MEM 0 +#define VIED_NCI_N_ACC_OSA_MEM 0 + +#define VIED_NCI_N_VP_CELL 4 +#define VIED_NCI_N_ACC_CELL 8 + +/* + * Device IDs + */ +typedef enum { + VIED_NCI_DEV_CHN_DMA_EXT0_ID = 0, + VIED_NCI_DEV_CHN_GDC_ID, + VIED_NCI_DEV_CHN_DMA_EXT1_READ_ID, + VIED_NCI_DEV_CHN_DMA_EXT1_WRITE_ID, + VIED_NCI_DEV_CHN_DMA_INTERNAL_ID, + VIED_NCI_DEV_CHN_DMA_IPFD_ID, + VIED_NCI_DEV_CHN_DMA_ISA_ID, + VIED_NCI_DEV_CHN_DMA_FW_ID, + VIED_NCI_DEV_CHN_DMA_CMPRS_ID, + VIED_NCI_N_DEV_CHN_ID +} vied_nci_dev_chn_ID_t; + +typedef enum { + DFM_IS_NOT_AVAILABLE +} vied_nci_dev_dfm_id_t; + +#define VIED_NCI_N_DEV_DFM_ID 0 +/* + * Memory size (previously in vied_nci_psys_system.c) + * VMEM: in words, 64 Byte per word. + * BAMEM: in words, 64 Byte per word + * DMEM: in words, 4 Byte per word. + * PMEM: in words, 64 Byte per word. + */ +#define VIED_NCI_GMEM_WORD_SIZE 64 +#define VIED_NCI_DMEM_WORD_SIZE 4 +#define VIED_NCI_VMEM_WORD_SIZE 64 +#define VIED_NCI_BAMEM_WORD_SIZE 64 + +#define VIED_NCI_VMEM0_MAX_SIZE (0x0800) +#define VIED_NCI_VMEM1_MAX_SIZE (0x0800) +#define VIED_NCI_VMEM2_MAX_SIZE (0x0800) +#define VIED_NCI_VMEM3_MAX_SIZE (0x0800) +#define VIED_NCI_VMEM4_MAX_SIZE (0x0800) +#define VIED_NCI_BAMEM0_MAX_SIZE (0x0400) +#define VIED_NCI_BAMEM1_MAX_SIZE (0x0400) +#define VIED_NCI_BAMEM2_MAX_SIZE (0x0400) +#define VIED_NCI_BAMEM3_MAX_SIZE (0x0400) +#define VIED_NCI_DMEM0_MAX_SIZE (0x4000) +#define VIED_NCI_DMEM1_MAX_SIZE (0x1000) +#define VIED_NCI_DMEM2_MAX_SIZE (0x1000) +#define VIED_NCI_DMEM3_MAX_SIZE (0x1000) +#define VIED_NCI_DMEM4_MAX_SIZE (0x1000) +#define VIED_NCI_DMEM5_MAX_SIZE (0x1000) +#define VIED_NCI_DMEM6_MAX_SIZE (0x1000) +#define VIED_NCI_DMEM7_MAX_SIZE (0x1000) +#define VIED_NCI_PMEM0_MAX_SIZE (0x0500) +#define VIED_NCI_PMEM1_MAX_SIZE (0x0500) +#define VIED_NCI_PMEM2_MAX_SIZE (0x0500) +#define VIED_NCI_PMEM3_MAX_SIZE (0x0500) + +/* + * Number of channels per device + */ +#define VIED_NCI_DEV_CHN_DMA_EXT0_MAX_SIZE (30) +#define VIED_NCI_DEV_CHN_GDC_MAX_SIZE (4) +#define VIED_NCI_DEV_CHN_DMA_EXT1_READ_MAX_SIZE (30) +#define VIED_NCI_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE (20) +#define VIED_NCI_DEV_CHN_DMA_INTERNAL_MAX_SIZE (2) +#define VIED_NCI_DEV_CHN_DMA_IPFD_MAX_SIZE (5) +#define VIED_NCI_DEV_CHN_DMA_ISA_MAX_SIZE (2) +#define VIED_NCI_DEV_CHN_DMA_FW_MAX_SIZE (1) +#define VIED_NCI_DEV_CHN_DMA_CMPRS_MAX_SIZE (6) + +/* + * Storage of the resource and resource type enumerators + */ +#define VIED_NCI_RESOURCE_ID_BITS 8 +typedef uint8_t vied_nci_resource_id_t; + +#define VIED_NCI_RESOURCE_SIZE_BITS 16 +typedef uint16_t vied_nci_resource_size_t; + +#define VIED_NCI_RESOURCE_BITMAP_BITS 32 +typedef uint32_t vied_nci_resource_bitmap_t; + +#define IA_CSS_PROCESS_INVALID_DEPENDENCY ((vied_nci_resource_id_t)(-1)) +#define IA_CSS_PROCESS_INVALID_OFFSET ((vied_nci_resource_size_t)(-1)) +#define IA_CSS_PROCESS_MAX_CELLS 1 + +/* + * Resource specifications + * Note that the FAS uses the terminology local/remote memory. In the PSYS API, + * these are called internal/external memory. + */ + +/* resource spec for internal (local) memory */ +struct vied_nci_resource_spec_int_mem_s { + vied_nci_resource_id_t type_id; + vied_nci_resource_size_t size; + vied_nci_resource_size_t offset; +}; + +typedef struct vied_nci_resource_spec_int_mem_s + vied_nci_resource_spec_int_mem_t; + +/* resource spec for external (remote) memory */ +struct vied_nci_resource_spec_ext_mem_s { + vied_nci_resource_id_t type_id; + vied_nci_resource_size_t size; + vied_nci_resource_size_t offset; +}; + +typedef struct vied_nci_resource_spec_ext_mem_s + vied_nci_resource_spec_ext_mem_t; + +/* resource spec for device channel */ +struct vied_nci_resource_spec_dev_chn_s { + vied_nci_resource_id_t type_id; + vied_nci_resource_size_t size; + vied_nci_resource_size_t offset; +}; + +typedef struct vied_nci_resource_spec_dev_chn_s + vied_nci_resource_spec_dev_chn_t; + +/* resource spec for all contiguous resources */ +struct vied_nci_resource_spec_s { + vied_nci_resource_spec_int_mem_t int_mem[VIED_NCI_N_MEM_TYPE_ID]; + vied_nci_resource_spec_ext_mem_t ext_mem[VIED_NCI_N_DATA_MEM_TYPE_ID]; + vied_nci_resource_spec_dev_chn_t dev_chn[VIED_NCI_N_DEV_CHN_ID]; +}; + +typedef struct vied_nci_resource_spec_s vied_nci_resource_spec_t; + +#ifndef PIPE_GENERATION + +extern const vied_nci_cell_type_ID_t vied_nci_cell_type[VIED_NCI_N_CELL_ID]; +extern const vied_nci_mem_type_ID_t vied_nci_mem_type[VIED_NCI_N_MEM_ID]; +extern const uint16_t vied_nci_N_cell_mem[VIED_NCI_N_CELL_TYPE_ID]; +extern const vied_nci_mem_type_ID_t + vied_nci_cell_mem_type[VIED_NCI_N_CELL_TYPE_ID][VIED_NCI_N_MEM_TYPE_ID]; +extern const vied_nci_mem_ID_t + vied_nci_ext_mem[VIED_NCI_N_MEM_TYPE_ID]; +extern const vied_nci_mem_ID_t + vied_nci_cell_mem[VIED_NCI_N_CELL_ID][VIED_NCI_N_MEM_TYPE_ID]; +extern const uint16_t vied_nci_mem_size[VIED_NCI_N_MEM_ID]; +extern const uint16_t vied_nci_mem_word_size[VIED_NCI_N_DATA_MEM_TYPE_ID]; +extern const uint16_t vied_nci_dev_chn_size[VIED_NCI_N_DEV_CHN_ID]; + +STORAGE_CLASS_INLINE +uint32_t vied_nci_mem_is_ext_type(const vied_nci_mem_type_ID_t mem_type_id) +{ + return((mem_type_id == VIED_NCI_GMEM_TYPE_ID)); +} + +#endif /* PIPE_GENERATION */ + +#endif /* __VIED_NCI_PSYS_RESOURCE_MODEL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_data.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_data.h new file mode 100644 index 0000000000000..5b053a27686bd --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_data.h @@ -0,0 +1,50 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_SIM_DATA_H +#define __IA_CSS_PSYS_SIM_DATA_H + +/*! Set the seed if the random number generator + + @param seed[in] Random number generator seed + */ +extern void ia_css_psys_ran_set_seed(const unsigned int seed); + +/*! Generate a random number of a specified bit depth + + @param bit_depth[in] The number of bits of the random output + + @return out, weight(out) <= bit_depth, 0 on error + */ +extern unsigned int ia_css_psys_ran_var(const unsigned int bit_depth); + +/*! Generate a random number of a specified range + + @param range[in] The range of the random output + + @return 0 <= out < range, 0 on error + */ +extern unsigned int ia_css_psys_ran_val(const unsigned int range); + +/*! Generate a random number in a specified interval + + @param lo[in] The lower bound of the random output range + @param hi[in] The higher bound of the random output range + + @return lo <= out < hi, 0 on error + */ +extern unsigned int ia_css_psys_ran_interval(const unsigned int lo, + const unsigned int hi); + +#endif /* __IA_CSS_PSYS_SIM_DATA_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_storage_class.h new file mode 100644 index 0000000000000..61095257ec550 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_storage_class.h @@ -0,0 +1,28 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_SIM_STORAGE_CLASS_H +#define __IA_CSS_PSYS_SIM_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __IA_CSS_PSYS_SIM_INLINE__ +#define IA_CSS_PSYS_SIM_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_PSYS_SIM_STORAGE_CLASS_C +#else +#define IA_CSS_PSYS_SIM_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_PSYS_SIM_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_PSYS_SIM_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_trace.h new file mode 100644 index 0000000000000..423ff19802707 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_trace.h @@ -0,0 +1,95 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_SIM_TRACE_H +#define __IA_CSS_PSYS_SIM_TRACE_H + +#include "ia_css_psysapi_trace.h" + +#define PSYS_SIM_TRACE_LEVEL_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +/* Default sub-module tracing config */ +#if (!defined(PSYSAPI_SIM_TRACING_OVERRIDE)) + #define PSYS_SIM_TRACE_LEVEL_CONFIG PSYS_SIM_TRACE_LEVEL_CONFIG_DEFAULT +#endif + +/* Module/sub-module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_SIM_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_SIM_TRACING_OVERRIDE)) + /* Module/sub-module specific trace setting */ + #if PSYSAPI_SIM_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_SIM_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_SIM_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_SIM_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_SIM_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_SIM_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_SIM_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_SIM_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_SIM_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_SIM_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_SIM_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_SIM_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_SIM_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_SIM_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_SIM_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_SIM_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_DATA Tracing level defined" + #endif +#else + /* Inherit Module trace setting */ + #define PSYSAPI_SIM_TRACE_METHOD PSYSAPI_TRACE_METHOD + #define PSYSAPI_SIM_TRACE_LEVEL_ASSERT PSYSAPI_TRACE_LEVEL_ASSERT + #define PSYSAPI_SIM_TRACE_LEVEL_ERROR PSYSAPI_TRACE_LEVEL_ERROR + #define PSYSAPI_SIM_TRACE_LEVEL_WARNING PSYSAPI_TRACE_LEVEL_WARNING + #define PSYSAPI_SIM_TRACE_LEVEL_INFO PSYSAPI_TRACE_LEVEL_INFO + #define PSYSAPI_SIM_TRACE_LEVEL_DEBUG PSYSAPI_TRACE_LEVEL_DEBUG + #define PSYSAPI_SIM_TRACE_LEVEL_VERBOSE PSYSAPI_TRACE_LEVEL_VERBOSE +#endif + +#endif /* __IA_CSS_PSYSAPI_SIM_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/interface/vied_nci_psys_system_global.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/interface/vied_nci_psys_system_global.h new file mode 100644 index 0000000000000..529bea763cc2a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/interface/vied_nci_psys_system_global.h @@ -0,0 +1,180 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __VIED_NCI_PSYS_SYSTEM_GLOBAL_H +#define __VIED_NCI_PSYS_SYSTEM_GLOBAL_H + +#include +#include "ia_css_base_types.h" +#include "ia_css_psys_sim_storage_class.h" +#include "vied_nci_psys_resource_model.h" + +/* + * Key system types + */ +/* Subsystem internal physical address */ +#define VIED_ADDRESS_BITS 32 + +/* typedef uint32_t vied_address_t; */ + +/* Subsystem internal virtual address */ + +/* Subsystem internal data bus */ +#define VIED_DATA_BITS 32 +typedef uint32_t vied_data_t; + +#define VIED_NULL ((vied_vaddress_t)0) + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bit_mask( + const unsigned index); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitmap_set( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitmap_clear( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_is_bitmap_empty( + const vied_nci_resource_bitmap_t bitmap); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_is_bitmap_set( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_is_bit_set_in_bitmap( + const vied_nci_resource_bitmap_t bitmap, + const unsigned int index); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_is_bitmap_clear( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +int vied_nci_bitmap_compute_weight( + const vied_nci_resource_bitmap_t bitmap); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitmap_union( + const vied_nci_resource_bitmap_t bitmap0, + const vied_nci_resource_bitmap_t bitmap1); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitmap_intersection( + const vied_nci_resource_bitmap_t bitmap0, + const vied_nci_resource_bitmap_t bitmap1); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitmap_xor( + const vied_nci_resource_bitmap_t bitmap0, + const vied_nci_resource_bitmap_t bitmap1); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitmap_set_unique( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitfield_mask( + const unsigned int position, + const unsigned int size); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitmap_set_bitfield( +const vied_nci_resource_bitmap_t bitmap, +const unsigned int index, +const unsigned int size); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bit_mask_set_unique( + const vied_nci_resource_bitmap_t bitmap, + const unsigned index); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_cell_bit_mask( + const vied_nci_cell_ID_t cell_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_barrier_bit_mask( + const vied_nci_barrier_ID_t barrier_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_cell_type_ID_t vied_nci_cell_get_type( + const vied_nci_cell_ID_t cell_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_mem_type_ID_t vied_nci_mem_get_type( + const vied_nci_mem_ID_t mem_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +uint16_t vied_nci_mem_get_size( + const vied_nci_mem_ID_t mem_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +uint16_t vied_nci_dev_chn_get_size( + const vied_nci_dev_chn_ID_t dev_chn_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_is_cell_of_type( + const vied_nci_cell_ID_t cell_id, + const vied_nci_cell_type_ID_t cell_type_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_is_mem_of_type( + const vied_nci_mem_ID_t mem_id, + const vied_nci_mem_type_ID_t mem_type_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_is_cell_mem_of_type( + const vied_nci_cell_ID_t cell_id, + const uint16_t mem_index, + const vied_nci_mem_type_ID_t mem_type_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_has_cell_mem_of_id( + const vied_nci_cell_ID_t cell_id, + const vied_nci_mem_ID_t mem_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +uint16_t vied_nci_cell_get_mem_count( + const vied_nci_cell_ID_t cell_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_mem_type_ID_t vied_nci_cell_get_mem_type( + const vied_nci_cell_ID_t cell_id, + const uint16_t mem_index); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_mem_ID_t vied_nci_cell_get_mem( + const vied_nci_cell_ID_t cell_id, + const uint16_t mem_index); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_mem_type_ID_t vied_nci_cell_type_get_mem_type( + const vied_nci_cell_type_ID_t cell_type_id, + const uint16_t mem_index); + +#ifdef __IA_CSS_PSYS_SIM_INLINE__ +#include "psys_system_global_impl.h" +#endif /* __IA_CSS_PSYS_SIM_INLINE__ */ + +#endif /* __VIED_NCI_PSYS_SYSTEM_GLOBAL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/src/ia_css_psys_sim_data.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/src/ia_css_psys_sim_data.c new file mode 100644 index 0000000000000..6dccac8238719 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/src/ia_css_psys_sim_data.c @@ -0,0 +1,91 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include + +#include "ia_css_psys_sim_trace.h" + +static unsigned int ia_css_psys_ran_seed; + +void ia_css_psys_ran_set_seed(const unsigned int seed) +{ + ia_css_psys_ran_seed = seed; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "ia_css_psys_ran_set_seed(): enter:\n"); + +} + +static unsigned int ia_css_psys_ran_int (void) +{ + ia_css_psys_ran_seed = 1664525UL * ia_css_psys_ran_seed + 1013904223UL; + return ia_css_psys_ran_seed; +} + +unsigned int ia_css_psys_ran_var(const unsigned int bit_depth) +{ + unsigned int out; + unsigned int tmp; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, "ia_css_psys_ran_var(): enter:\n"); + + tmp = ia_css_psys_ran_int(); + + if (bit_depth > 32) + out = tmp; + else if (bit_depth == 0) + out = 0; + else + out = (unsigned short)(tmp >> (32 - bit_depth)); + + return out; +} + +unsigned int ia_css_psys_ran_val(const unsigned int range) +{ + unsigned int out; + unsigned int tmp; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, "ia_css_psys_ran_val(): enter:\n"); + + tmp = ia_css_psys_ran_int(); + + if (range > 1) + out = tmp % range; + else + out = 0; + + return out; +} + +unsigned int ia_css_psys_ran_interval(const unsigned int lo, + const unsigned int hi) +{ + unsigned int out; + unsigned int tmp; + unsigned int range = hi - lo; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "ia_css_psys_ran_interval(): enter:\n"); + + tmp = ia_css_psys_ran_int(); + + if ((range > 1) && (lo < hi)) + out = lo + (tmp % range); + else + out = 0; + + return out; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/src/psys_system_global_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/src/psys_system_global_impl.h new file mode 100644 index 0000000000000..ff51175548ec0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/src/psys_system_global_impl.h @@ -0,0 +1,485 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PSYS_SYSTEM_GLOBAL_IMPL_H +#define __PSYS_SYSTEM_GLOBAL_IMPL_H + +#include + +#include "ia_css_psys_sim_trace.h" +#include + +/* Use vied_bits instead, however for test purposes we uses explicit type + * checking + */ +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bit_mask( + const unsigned int index) +{ + vied_nci_resource_bitmap_t bit_mask = 0; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, "vied_nci_bit_mask(): enter:\n"); + + if (index < VIED_NCI_RESOURCE_BITMAP_BITS) + bit_mask = (vied_nci_resource_bitmap_t)1 << index; + + return bit_mask; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitmap_set( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask) +{ + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, "vied_nci_bitmap_set(): enter:\n"); + +/* + assert(vied_nci_is_bitmap_one_hot(bit_mask)); +*/ + return bitmap | bit_mask; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitmap_clear( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask) +{ + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_bitmap_clear(): enter:\n"); + +/* + assert(vied_nci_is_bitmap_one_hot(bit_mask)); +*/ + return bitmap & (~bit_mask); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitfield_mask( + const unsigned int position, + const unsigned int size) +{ + vied_nci_resource_bitmap_t bit_mask = 0; + vied_nci_resource_bitmap_t ones = (vied_nci_resource_bitmap_t)-1; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_bitfield_mask(): enter:\n"); + + if (position < VIED_NCI_RESOURCE_BITMAP_BITS) + bit_mask = (ones >> (sizeof(vied_nci_resource_bitmap_t) - size)) << position; + + return bit_mask; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitmap_set_bitfield( + const vied_nci_resource_bitmap_t bitmap, + const unsigned int index, + const unsigned int size) +{ + vied_nci_resource_bitmap_t ret = 0; + vied_nci_resource_bitmap_t bit_mask = 0; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_bit_mask_set_bitfield(): enter:\n"); + + bit_mask = vied_nci_bitfield_mask(index, size); + ret = vied_nci_bitmap_set(bitmap, bit_mask); + + return ret; +} + + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitmap_set_unique( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask) +{ + vied_nci_resource_bitmap_t ret = 0; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_bitmap_set_unique(): enter:\n"); + + if ((bitmap & bit_mask) == 0) + ret = bitmap | bit_mask; + + return ret; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bit_mask_set_unique( + const vied_nci_resource_bitmap_t bitmap, + const unsigned int index) +{ + vied_nci_resource_bitmap_t ret = 0; + vied_nci_resource_bitmap_t bit_mask; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_bit_mask_set_unique(): enter:\n"); + + bit_mask = vied_nci_bit_mask(index); + + if (((bitmap & bit_mask) == 0) && (bit_mask != 0)) + ret = bitmap | bit_mask; + + return ret; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_is_bitmap_empty( + const vied_nci_resource_bitmap_t bitmap) +{ + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_is_bitmap_empty(): enter:\n"); + + return (bitmap == 0); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_is_bitmap_set( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask) +{ + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_is_bitmap_set(): enter:\n"); + +/* + assert(vied_nci_is_bitmap_one_hot(bit_mask)); +*/ + return !vied_nci_is_bitmap_clear(bitmap, bit_mask); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_is_bit_set_in_bitmap( + const vied_nci_resource_bitmap_t bitmap, + const unsigned int index) +{ + + vied_nci_resource_bitmap_t bitmask; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_is_bit_set_in_bitmap(): enter:\n"); + bitmask = vied_nci_bit_mask(index); + return vied_nci_is_bitmap_set(bitmap, bitmask); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_is_bitmap_clear( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask) +{ + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_is_bitmap_clear(): enter:\n"); + +/* + assert(vied_nci_is_bitmap_one_hot(bit_mask)); +*/ + return ((bitmap & bit_mask) == 0); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +int vied_nci_bitmap_compute_weight( + const vied_nci_resource_bitmap_t bitmap) +{ + vied_nci_resource_bitmap_t loc_bitmap = bitmap; + int weight = 0; + int i; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_bitmap_compute_weight(): enter:\n"); + + /* Do not need the iterator "i" */ + for (i = 0; (i < VIED_NCI_RESOURCE_BITMAP_BITS) && + (loc_bitmap != 0); i++) { + weight += loc_bitmap & 0x01; + loc_bitmap >>= 1; + } + + return weight; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitmap_union( + const vied_nci_resource_bitmap_t bitmap0, + const vied_nci_resource_bitmap_t bitmap1) +{ + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_bitmap_union(): enter:\n"); + return (bitmap0 | bitmap1); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitmap_intersection( + const vied_nci_resource_bitmap_t bitmap0, + const vied_nci_resource_bitmap_t bitmap1) +{ + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "ia_css_kernel_bitmap_intersection(): enter:\n"); + return (bitmap0 & bitmap1); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitmap_xor( + const vied_nci_resource_bitmap_t bitmap0, + const vied_nci_resource_bitmap_t bitmap1) +{ + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, "vied_nci_bitmap_xor(): enter:\n"); + return (bitmap0 ^ bitmap1); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_cell_bit_mask( + const vied_nci_cell_ID_t cell_id) +{ + vied_nci_resource_bitmap_t bit_mask = 0; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_cell_bit_mask(): enter:\n"); + + if ((cell_id < VIED_NCI_N_CELL_ID) && + (cell_id < VIED_NCI_RESOURCE_BITMAP_BITS)) { + bit_mask = (vied_nci_resource_bitmap_t)1 << cell_id; + } + return bit_mask; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_barrier_bit_mask( + const vied_nci_barrier_ID_t barrier_id) +{ + vied_nci_resource_bitmap_t bit_mask = 0; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_barrier_bit_mask(): enter:\n"); + + if ((barrier_id < VIED_NCI_N_BARRIER_ID) && + ((barrier_id + VIED_NCI_N_CELL_ID) < VIED_NCI_RESOURCE_BITMAP_BITS)) { + bit_mask = (vied_nci_resource_bitmap_t)1 << + (barrier_id + VIED_NCI_N_CELL_ID); + } + return bit_mask; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_cell_type_ID_t vied_nci_cell_get_type( + const vied_nci_cell_ID_t cell_id) +{ + vied_nci_cell_type_ID_t cell_type = VIED_NCI_N_CELL_TYPE_ID; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_cell_get_type(): enter:\n"); + + if (cell_id < VIED_NCI_N_CELL_ID) { + cell_type = vied_nci_cell_type[cell_id]; + } else { + IA_CSS_TRACE_0(PSYSAPI_SIM, WARNING, + "vied_nci_cell_get_type(): invalid argument\n"); + } + + return cell_type; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_mem_type_ID_t vied_nci_mem_get_type( + const vied_nci_mem_ID_t mem_id) +{ + vied_nci_mem_type_ID_t mem_type = VIED_NCI_N_MEM_TYPE_ID; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_mem_get_type(): enter:\n"); + + if (mem_id < VIED_NCI_N_MEM_ID) { + mem_type = vied_nci_mem_type[mem_id]; + } else { + IA_CSS_TRACE_0(PSYSAPI_SIM, WARNING, + "vied_nci_mem_get_type(): invalid argument\n"); + } + + return mem_type; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +uint16_t vied_nci_mem_get_size( + const vied_nci_mem_ID_t mem_id) +{ + uint16_t mem_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_mem_get_size(): enter:\n"); + + if (mem_id < VIED_NCI_N_MEM_ID) { + mem_size = vied_nci_mem_size[mem_id]; + } else { + IA_CSS_TRACE_0(PSYSAPI_SIM, WARNING, + "vied_nci_mem_get_size(): invalid argument\n"); + } + + return mem_size; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +uint16_t vied_nci_dev_chn_get_size( + const vied_nci_dev_chn_ID_t dev_chn_id) +{ + uint16_t dev_chn_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_dev_chn_get_size(): enter:\n"); + + if (dev_chn_id < VIED_NCI_N_DEV_CHN_ID) { + dev_chn_size = vied_nci_dev_chn_size[dev_chn_id]; + } else { + IA_CSS_TRACE_0(PSYSAPI_SIM, WARNING, + "vied_nci_dev_chn_get_size(): invalid argument\n"); + } + + return dev_chn_size; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_is_cell_of_type( + const vied_nci_cell_ID_t cell_id, + const vied_nci_cell_type_ID_t cell_type_id) +{ + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_is_cell_of_type(): enter:\n"); + + return ((vied_nci_cell_get_type(cell_id) == + cell_type_id) && (cell_type_id != + VIED_NCI_N_CELL_TYPE_ID)); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_is_mem_of_type( + const vied_nci_mem_ID_t mem_id, + const vied_nci_mem_type_ID_t mem_type_id) +{ + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_is_mem_of_type(): enter:\n"); + + return ((vied_nci_mem_get_type(mem_id) == mem_type_id) && + (mem_type_id != VIED_NCI_N_MEM_TYPE_ID)); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_is_cell_mem_of_type( + const vied_nci_cell_ID_t cell_id, + const uint16_t mem_index, + const vied_nci_mem_type_ID_t mem_type_id) +{ + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_is_cell_mem_of_type(): enter:\n"); + + return ((vied_nci_cell_get_mem_type(cell_id, mem_index) == mem_type_id) + && (mem_type_id != VIED_NCI_N_MEM_TYPE_ID)); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_has_cell_mem_of_id( + const vied_nci_cell_ID_t cell_id, + const vied_nci_mem_ID_t mem_id) +{ + uint16_t mem_index; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_has_cell_mem_of_id(): enter:\n"); + + for (mem_index = 0; mem_index < VIED_NCI_N_MEM_TYPE_ID; mem_index++) { + if ((vied_nci_cell_get_mem(cell_id, mem_index) == mem_id) && + (mem_id != VIED_NCI_N_MEM_ID)) { + break; + } + } + + return (mem_index < VIED_NCI_N_MEM_TYPE_ID); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +uint16_t vied_nci_cell_get_mem_count( + const vied_nci_cell_ID_t cell_id) +{ + uint16_t mem_count = 0; + vied_nci_cell_type_ID_t cell_type; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_cell_get_mem_count(): enter:\n"); + + cell_type = vied_nci_cell_get_type(cell_id); + + if (cell_type < VIED_NCI_N_CELL_TYPE_ID) + mem_count = vied_nci_N_cell_mem[cell_type]; + + return mem_count; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_mem_type_ID_t vied_nci_cell_get_mem_type( + const vied_nci_cell_ID_t cell_id, + const uint16_t mem_index) +{ + vied_nci_mem_type_ID_t mem_type = VIED_NCI_N_MEM_TYPE_ID; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_cell_get_mem_type(): enter:\n"); + + if ((cell_id < VIED_NCI_N_CELL_ID) && + (mem_index < VIED_NCI_N_MEM_TYPE_ID)) { + mem_type = vied_nci_cell_mem_type[ + vied_nci_cell_get_type(cell_id)][mem_index]; + } + + return mem_type; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_mem_ID_t vied_nci_cell_get_mem( + const vied_nci_cell_ID_t cell_id, + const uint16_t mem_index) +{ + vied_nci_mem_ID_t mem_id = VIED_NCI_N_MEM_ID; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_cell_get_mem(): enter:\n"); + + if ((cell_id < VIED_NCI_N_CELL_ID) && + (mem_index < VIED_NCI_N_MEM_TYPE_ID)) { + mem_id = vied_nci_cell_mem[cell_id][mem_index]; + } + + return mem_id; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_mem_type_ID_t vied_nci_cell_type_get_mem_type( + const vied_nci_cell_type_ID_t cell_type_id, + const uint16_t mem_index) +{ + vied_nci_mem_type_ID_t mem_type = VIED_NCI_N_MEM_TYPE_ID; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_cell_type_get_mem_type(): enter:\n"); + + if ((cell_type_id < VIED_NCI_N_CELL_TYPE_ID) + && (mem_index < VIED_NCI_N_MEM_TYPE_ID)) { + mem_type = vied_nci_cell_mem_type[cell_type_id][mem_index]; + } + + return mem_type; +} + +#endif /* __PSYS_SYSTEM_GLOBAL_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/src/vied_nci_psys_system.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/src/vied_nci_psys_system.c new file mode 100644 index 0000000000000..b0e0aebb6e774 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/src/vied_nci_psys_system.c @@ -0,0 +1,26 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_sim_storage_class.h" + +/* + * Functions to possibly inline + */ + +#ifdef __IA_CSS_PSYS_SIM_INLINE__ +STORAGE_CLASS_INLINE int +__ia_css_psys_system_global_avoid_warning_on_empty_file(void) { return 0; } +#else /* __IA_CSS_PSYS_SIM_INLINE__ */ +#include "psys_system_global_impl.h" +#endif /* __IA_CSS_PSYS_SIM_INLINE__ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_manifest_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_manifest_types.h new file mode 100644 index 0000000000000..4a2f96e9405e8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_manifest_types.h @@ -0,0 +1,102 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_MANIFEST_TYPES_H +#define __IA_CSS_PSYS_MANIFEST_TYPES_H + +/*! \file */ + +/** @file ia_css_psys_manifest_types.h + * + * The types belonging to the terminal/program/ + * program group manifest static module + */ + +#include +#include "vied_nci_psys_resource_model.h" + + +/* This value is used in the manifest to indicate that the resource + * offset field must be ignored and the resource is relocatable + */ +#define IA_CSS_PROGRAM_MANIFEST_RESOURCE_OFFSET_IS_RELOCATABLE ((vied_nci_resource_size_t)(-1)) + +/* + * Connection type defining the interface source/sink + * + * Note that the connection type does not define the + * real-time configuration of the system, i.e. it + * does not describe whether a source and sink + * program group or sub-system operate synchronously + * that is a program script property {online, offline} + * (see FAS 5.16.3) + */ +#define IA_CSS_CONNECTION_BITMAP_BITS 8 +typedef uint8_t ia_css_connection_bitmap_t; + +#define IA_CSS_CONNECTION_TYPE_BITS 32 +typedef enum ia_css_connection_type { + /**< The terminal is in DDR */ + IA_CSS_CONNECTION_MEMORY = 0, + /**< The terminal is a (watermark) queued stream over DDR */ + IA_CSS_CONNECTION_MEMORY_STREAM, + /* The terminal is a device port */ + IA_CSS_CONNECTION_STREAM, + IA_CSS_N_CONNECTION_TYPES +} ia_css_connection_type_t; + +#define IA_CSS_PROGRAM_TYPE_BITS 32 +typedef enum ia_css_program_type { + IA_CSS_PROGRAM_TYPE_SINGULAR = 0, + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB, + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER, + IA_CSS_PROGRAM_TYPE_PARALLEL_SUB, + IA_CSS_PROGRAM_TYPE_PARALLEL_SUPER, + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB, + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUPER, +/* + * Future extension; A bitmap coding starts making more sense + * + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB_PARALLEL_SUB, + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB_PARALLEL_SUPER, + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER_PARALLEL_SUB, + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER_PARALLEL_SUPER, + */ + IA_CSS_N_PROGRAM_TYPES +} ia_css_program_type_t; + +#define IA_CSS_PROGRAM_GROUP_ID_BITS 32 +typedef uint32_t ia_css_program_group_ID_t; +#define IA_CSS_PROGRAM_ID_BITS 32 +typedef uint32_t ia_css_program_ID_t; + +#define IA_CSS_PROGRAM_INVALID_ID ((uint32_t)(-1)) +#define IA_CSS_PROGRAM_GROUP_INVALID_ID ((uint32_t)(-1)) + +typedef struct ia_css_program_group_manifest_s +ia_css_program_group_manifest_t; +typedef struct ia_css_program_manifest_s +ia_css_program_manifest_t; +typedef struct ia_css_data_terminal_manifest_s +ia_css_data_terminal_manifest_t; + +/* ============ Program Control Init Terminal Manifest - START ============ */ +typedef struct ia_css_program_control_init_manifest_program_desc_s + ia_css_program_control_init_manifest_program_desc_t; + +typedef struct ia_css_program_control_init_terminal_manifest_s + ia_css_program_control_init_terminal_manifest_t; +/* ============ Program Control Init Terminal Manifest - END ============ */ + +#endif /* __IA_CSS_PSYS_MANIFEST_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.h new file mode 100644 index 0000000000000..ee8321ea1f12b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.h @@ -0,0 +1,311 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_H +#define __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_H + +#include "ia_css_psys_static_storage_class.h" + +/*! \file */ + +/** @file ia_css_psys_program_group_manifest.h + * + * Define the methods on the program group manifest object that are not part of + * a single interface + */ + +#include + +#include /* uint8_t */ + +#include + +#include + +#include /* ia_css_kernel_bitmap_t */ +#include "ia_css_terminal_manifest.h" +#include "ia_css_rbm_manifest_types.h" + +#define IA_CSS_PROGRAM_GROUP_INVALID_ALIGNMENT ((uint8_t)(-1)) + +/*! Get the stored size of the program group manifest object + + @param manifest[in] program group manifest object + + @return size, 0 on invalid argument + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +size_t ia_css_program_group_manifest_get_size( + const ia_css_program_group_manifest_t *manifest); + +/*! Get the program group ID of the program group manifest object + + @param manifest[in] program group manifest object + + @return program group ID, IA_CSS_PROGRAM_GROUP_INVALID_ID on invalid argument +*/ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_program_group_ID_t +ia_css_program_group_manifest_get_program_group_ID( + const ia_css_program_group_manifest_t *manifest); + +/*! Set the program group ID of the program group manifest object + + @param manifest[in] program group manifest object + + @param program group ID + + @return 0 on success, -1 on invalid manifest argument + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +int ia_css_program_group_manifest_set_program_group_ID( + ia_css_program_group_manifest_t *manifest, + ia_css_program_group_ID_t id); + +/*! Get the storage alignment constraint of the program group binary data + + @param manifest[in] program group manifest object + + @return alignment, IA_CSS_PROGRAM_GROUP_INVALID_ALIGNMENT on invalid manifest + argument +*/ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +uint8_t ia_css_program_group_manifest_get_alignment( + const ia_css_program_group_manifest_t *manifest); + +/*! Set the storage alignment constraint of the program group binary data + + @param manifest[in] program group manifest object + @param alignment[in] alignment desired + + @return < 0 on invalid manifest argument + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +int ia_css_program_group_manifest_set_alignment( + ia_css_program_group_manifest_t *manifest, + const uint8_t alignment); + +/*! Get the kernel enable bitmap of the program group + + @param manifest[in] program group manifest object + + @return bitmap, 0 on invalid manifest argument + */ +extern ia_css_kernel_bitmap_t +ia_css_program_group_manifest_get_kernel_bitmap( + const ia_css_program_group_manifest_t *manifest); + +/*! Set the kernel enable bitmap of the program group + + @param manifest[in] program group manifest object + @param kernel bitmap[in] kernel enable bitmap + + @return < 0 on invalid manifest argument + */ +extern int ia_css_program_group_manifest_set_kernel_bitmap( + ia_css_program_group_manifest_t *manifest, + const ia_css_kernel_bitmap_t bitmap); + +/*! Get the number of programs in the program group manifest object + + @param manifest[in] program group manifest object + + @return program count, 0 on invalid manifest argument + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +uint8_t ia_css_program_group_manifest_get_program_count( + const ia_css_program_group_manifest_t *manifest); + +/*! Get the number of terminals in the program group manifest object + + @param manifest[in] program group manifest object + + @return terminal count, 0 on invalid manifest argument + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +uint8_t ia_css_program_group_manifest_get_terminal_count( + const ia_css_program_group_manifest_t *manifest); + +/*! Get the (pointer to) private data blob in the manifest + + @param manifest[in] program group manifest object + + @return private data blob, NULL on invalid manifest argument + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +void *ia_css_program_group_manifest_get_private_data( + const ia_css_program_group_manifest_t *manifest); + +/*! Get the (pointer to) routing bitmap (rbm) manifest + + @param manifest[in] program group manifest object + + @return rbm manifest, NULL on invalid manifest argument + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_rbm_manifest_t * +ia_css_program_group_manifest_get_rbm_manifest( + const ia_css_program_group_manifest_t *manifest); + +/*! Get the (pointer to) indexed program manifest in the program group manifest + * object + + @param manifest[in] program group manifest object + @param program_index[in] index of the program manifest object + + @return program manifest, NULL on invalid arguments + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_program_manifest_t * +ia_css_program_group_manifest_get_prgrm_mnfst( + const ia_css_program_group_manifest_t *manifest, + const unsigned int program_index); + +/*! Get the (pointer to) indexed terminal manifest in the program group + * manifest object + + @param manifest[in] program group manifest object + @param program_index[in] index of the terminal manifest object + + @return terminal manifest, NULL on invalid arguments + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_terminal_manifest_t * +ia_css_program_group_manifest_get_term_mnfst( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index); + +/*! Get the (pointer to) indexed data terminal manifest in the program group + * manifest object + + @param manifest[in] program group manifest object + @param program_index[in] index of the terminal manifest object + + @return data terminal manifest, NULL on invalid arguments + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_data_terminal_manifest_t * +ia_css_program_group_manifest_get_data_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index); + +/*! Get the (pointer to) indexed parameter terminal manifest in the program + * group manifest object + + @param manifest[in] program group manifest object + @param program_index[in] index of the terminal manifest object + + @return parameter terminal manifest, NULL on invalid arguments + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_param_terminal_manifest_t * +ia_css_program_group_manifest_get_param_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index); + +/*! Get the (pointer to) indexed spatial param terminal manifest in the program + * group manifest object + + @param manifest[in] program group manifest object + @param program_index[in] index of the terminal manifest object + + @return spatial param terminal manifest, NULL on invalid arguments + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_spatial_param_terminal_manifest_t * +ia_css_program_group_manifest_get_spatial_param_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index); + +/*! Get the (pointer to) indexed sliced param terminal manifest in the program + * group manifest object + + @param manifest[in] program group manifest object + @param program_index[in] index of the terminal manifest object + + @return sliced param terminal manifest, NULL on invalid arguments + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_sliced_param_terminal_manifest_t * +ia_css_program_group_manifest_get_sliced_param_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index); + +/*! Get the (pointer to) indexed program terminal manifest in the program group + * manifest object + + @parammanifest[in]program group manifest object + @paramprogram_index[in]index of the terminal manifest object + + @return program terminal manifest, NULL on invalid arguments + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_program_terminal_manifest_t * +ia_css_program_group_manifest_get_program_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index); + +/*! initialize program group manifest + + @param manifest[in] program group manifest object + @param program_count[in] number of programs. + @param terminal_count[in] number of terminals. + @param program_deps[in] program dependencies for programs in pg. + @param terminal_deps[in] terminal dependencies for programs in pg. + @param terminal_type[in] array of terminal types, binary specific + static frame data + @param cached_in_param_section_count[in]Number of parameter terminal sections + @param cached_out_param_section_count[in] Number of parameter out terminal + @param spatial_param_section_count[in] Array[spatial_terminal_count] + with sections per cached out + terminal + @param sliced_in_param_section_count[in] Array[sliced_in_terminal_count] + with sections per sliced in + terminal + @param sliced_out_param_section_count[in] Array[sliced_out_terminal_count] + with sections per sliced out + terminal + @param fragment_param_section_count[in] Number of fragment parameter + sections of the program init + terminal, + @param kernel_fragment_seq_count[in] Number of kernel fragment + seqence info. + @param progctrlinit_load_section_counts[in] Number of progctrinit load + sections (size of array is program_count) + @param progctrlinit_connect_section_counts[in] Number of progctrinit connect + sections (size of array is program_count) + @return none; + */ +extern void ia_css_program_group_manifest_init( + ia_css_program_group_manifest_t *blob, + const uint8_t program_count, + const uint8_t terminal_count, + const uint8_t *program_dependencies, + const uint8_t *terminal_dependencies, + const ia_css_terminal_type_t *terminal_type, + const uint16_t cached_in_param_section_count, + const uint16_t cached_out_param_section_count, + const uint16_t *spatial_param_section_count, + const uint16_t fragment_param_section_count, + const uint16_t *sliced_in_param_section_count, + const uint16_t *sliced_out_param_section_count, + const uint16_t kernel_fragment_seq_count, + const uint16_t *progctrlinit_load_section_counts, + const uint16_t *progctrlinit_connect_section_counts); + +#ifdef __IA_CSS_PSYS_STATIC_INLINE__ +#include "ia_css_psys_program_group_manifest_impl.h" +#endif /* __IA_CSS_PSYS_STATIC_INLINE__ */ + +#endif /* __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.hsys.user.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.hsys.user.h new file mode 100644 index 0000000000000..ce802ff5dd8d3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.hsys.user.h @@ -0,0 +1,69 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_HSYS_USER_H +#define __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_HSYS_USER_H + +/*! \file */ + +/** @file ia_css_psys_program_group_manifest.hsys.user.h + * + * Define the methods on the program group manifest object: Hsys user interface + */ + +#include + +#include /* bool */ + +/*! Print the program group manifest object to file/stream + + @param manifest[in] program group manifest object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_program_group_manifest_print( + const ia_css_program_group_manifest_t *manifest, + void *fid); + +/*! Read the program group manifest object from file/stream + + @param fid[in] file/stream handle + + @return NULL on error + */ +extern ia_css_program_group_manifest_t *ia_css_program_group_manifest_read( + void *fid); + +/*! Write the program group manifest object to file/stream + + @param manifest[in] program group manifest object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_program_group_manifest_write( + const ia_css_program_group_manifest_t *manifest, + void *fid); + +/*! Boolean test if the program group manifest is valid + + @param manifest[in] program group manifest + + @return true if program group manifest is correct, false on error + */ +extern bool ia_css_is_program_group_manifest_valid( + const ia_css_program_group_manifest_t *manifest); + +#endif /* __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_HSYS_USER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.sim.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.sim.h new file mode 100644 index 0000000000000..242f02108dd84 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.sim.h @@ -0,0 +1,127 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_SIM_H +#define __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_SIM_H + +/*! \file */ + +/** @file ia_css_psys_program_group_manifest.sim.h + * + * Define the methods on the program group manifest object: Simulation only + */ + +#include + +#include /* uint8_t */ +#include "ia_css_terminal_defs.h" + +/*! Create a program group manifest object from specification + + @param specification[in] specification (index) + + @return NULL on error + */ +extern ia_css_program_group_manifest_t *ia_css_program_group_manifest_create( + const unsigned int specification); + +/*! Destroy the program group manifest object + + @param manifest[in] program group manifest + + @return NULL + */ +extern ia_css_program_group_manifest_t *ia_css_program_group_manifest_destroy( + ia_css_program_group_manifest_t *manifest); + +/*! Compute the size of storage required for allocating + * the program group (PG) manifest object + + @param program_count[in] Number of programs in the PG + @param terminal_count[in] Number of terminals on the PG + @param program_dependency_count[in] Array[program_count] with the PG + @param terminal_dependency_count[in] Array[program_count] with the + terminal dependencies + @param terminal_type[in] Array[terminal_count] with the + terminal type + @param cached_in_param_section_count[in] Number of parameter + in terminal sections + @param cached_out_param_section_count[in] Number of parameter + out terminal sections + @param sliced_param_section_count[in] Array[sliced_terminal_count] + with sections per + sliced in terminal + @param sliced_out_param_section_count[in] Array[sliced_terminal_count] + with sections per + sliced out terminal + @param spatial_param_section_count[in] Array[spatial_terminal_count] + with sections per + spatial terminal + @param fragment_param_section_count[in] Number of fragment parameter + sections of the + program init terminal, + @param kernel_fragment_seq_count[in] Number of + kernel_fragment_seq_count. + @param progctrlinit_load_section_counts[in] Number of progctrinit load + sections (size of array is program_count) + @param progctrlinit_connect_section_counts[in] Number of progctrinit connect + sections (size of array is program_count) + @return 0 on error + */ +size_t ia_css_sizeof_program_group_manifest( + const uint8_t program_count, + const uint8_t terminal_count, + const uint8_t *program_dependency_count, + const uint8_t *terminal_dependency_count, + const ia_css_terminal_type_t *terminal_type, + const uint16_t cached_in_param_section_count, + const uint16_t cached_out_param_section_count, + const uint16_t *spatial_param_section_count, + const uint16_t fragment_param_section_count, + const uint16_t *sliced_param_section_count, + const uint16_t *sliced_out_param_section_count, + const uint16_t kernel_fragment_seq_count, + const uint16_t *progctrlinit_load_section_counts, + const uint16_t *progctrlinit_connect_section_counts); + +/*! Create (the storage for) the program group manifest object + + @param program_count[in] Number of programs in the program group + @param terminal_count[in] Number of terminals on the program group + @param program_dependency_count[in] Array[program_count] with the + program dependencies + @param terminal_dependency_count[in] Array[program_count] with the + terminal dependencies + @param terminal_type[in] Array[terminal_count] with the + terminal type + + @return NULL on error + */ +extern ia_css_program_group_manifest_t *ia_css_program_group_manifest_alloc( + const uint8_t program_count, + const uint8_t terminal_count, + const uint8_t *program_dependency_count, + const uint8_t *terminal_dependency_count, + const ia_css_terminal_type_t *terminal_type); + +/*! Free (the storage of) the program group manifest object + + @param manifest[in] program group manifest + + @return NULL + */ +extern ia_css_program_group_manifest_t *ia_css_program_group_manifest_free( + ia_css_program_group_manifest_t *manifest); + +#endif /* __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_SIM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.h new file mode 100644 index 0000000000000..b7333671ed4fc --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.h @@ -0,0 +1,488 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_MANIFEST_H +#define __IA_CSS_PSYS_PROGRAM_MANIFEST_H + +/*! \file */ + +/** @file ia_css_psys_program_manifest.h + * + * Define the methods on the program manifest object that are not part of a + * single interface + */ + +#include + +#include /* uint8_t */ + +#include + +#include + +#include /* ia_css_kernel_bitmap_t */ + +/* + * Resources needs + */ +#include + +#define IA_CSS_PROGRAM_INVALID_DEPENDENCY ((uint8_t)(-1)) + +/*! Check if the program manifest object specifies a fixed cell allocation + + @param manifest[in] program manifest object + + @return has_fixed_cell, false on invalid argument + */ +extern bool ia_css_has_program_manifest_fixed_cell( + const ia_css_program_manifest_t *manifest); + +/*! Get the stored size of the program manifest object + + @param manifest[in] program manifest object + + @return size, 0 on invalid argument + */ +extern size_t ia_css_program_manifest_get_size( + const ia_css_program_manifest_t *manifest); + +/*! Get the program ID of the program manifest object + + @param manifest[in] program manifest object + + @return program ID, IA_CSS_PROGRAM_INVALID_ID on invalid argument + */ +extern ia_css_program_ID_t ia_css_program_manifest_get_program_ID( + const ia_css_program_manifest_t *manifest); + +/*! Set the program ID of the program manifest object + + @param manifest[in] program manifest object + + @param program ID + + @return 0 on success, -1 on invalid manifest argument + */ +extern int ia_css_program_manifest_set_program_ID( + ia_css_program_manifest_t *manifest, + ia_css_program_ID_t id); + +/*! Get the (pointer to) the program group manifest parent of the program + * manifest object + + @param manifest[in] program manifest object + + @return the pointer to the parent, NULL on invalid manifest argument + */ +extern ia_css_program_group_manifest_t *ia_css_program_manifest_get_parent( + const ia_css_program_manifest_t *manifest); + +/*! Set the (pointer to) the program group manifest parent of the program + * manifest object + + @param manifest[in] program manifest object + @param program_offset[in] this program's offset from + program_group_manifest's base address. + + @return < 0 on invalid manifest argument + */ +extern int ia_css_program_manifest_set_parent_offset( + ia_css_program_manifest_t *manifest, + int32_t program_offset); + +/*! Get the type of the program manifest object + + @param manifest[in] program manifest object + + @return program type, limit value (IA_CSS_N_PROGRAM_TYPES) on invalid manifest + argument +*/ +extern ia_css_program_type_t ia_css_program_manifest_get_type( + const ia_css_program_manifest_t *manifest); + +/*! Set the type of the program manifest object + + @param manifest[in] program manifest object + @param program_type[in] program type + + @return < 0 on invalid manifest argument + */ +extern int ia_css_program_manifest_set_type( + ia_css_program_manifest_t *manifest, + const ia_css_program_type_t program_type); + +/*! Set the cell id of the program manifest object + + @param manifest[in] program manifest object + @param program_cell_id[in] program cell id + + @return < 0 on invalid manifest argument + */ +extern int ia_css_program_manifest_set_cell_ID( + ia_css_program_manifest_t *manifest, + const vied_nci_cell_ID_t cell_id); + +/*! Set the cell type of the program manifest object + + @param manifest[in] program manifest object + @param program_cell_type[in] program cell type + + @return < 0 on invalid manifest argument + */ +extern int ia_css_program_manifest_set_cell_type_ID( + ia_css_program_manifest_t *manifest, + const vied_nci_cell_type_ID_t cell_type_id); + +/*! Set cells bitmap for the program + + @param manifest[in] program manifest object + @param bitmap[in] bitmap + + @return 0 when not applicable and/or invalid arguments + */ +extern int ia_css_program_manifest_set_cells_bitmap( + ia_css_program_manifest_t *manifest, + const vied_nci_resource_bitmap_t bitmap); + +/*! Get cells bitmap for the program + + @param manifest[in] program manifest object + + @return 0 when not applicable and/or invalid arguments + */ +extern vied_nci_resource_bitmap_t ia_css_program_manifest_get_cells_bitmap( + const ia_css_program_manifest_t *manifest); + +/*! Set DFM port bitmap for the program + + @param manifest[in] program manifest object + @param dfm_type_id[in] DFM resource type ID + @param bitmap[in] bitmap + + @return 0 when not applicable and/or invalid arguments + */ +extern int ia_css_program_manifest_set_dfm_port_bitmap( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id, + const vied_nci_resource_bitmap_t bitmap); + +/*! Get bitmap of DFM ports requested for the program + + @param manifest[in] program manifest object + @param dfm_type_id[in] DFM resource type ID + + @return DFM port bitmap + */ +extern vied_nci_resource_bitmap_t ia_css_program_manifest_get_dfm_port_bitmap( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id); + + +/*! Set active DFM port specification bitmap for the program + + @param manifest[in] program manifest object + @param dfm_type_id[in] DFM resource type ID + @param bitmap[in] bitmap + + @return 0 when not applicable and/or invalid arguments + */ +extern int ia_css_program_manifest_set_dfm_active_port_bitmap( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id, + const vied_nci_resource_bitmap_t bitmap); + +/*! Get active DFM port specification bitmap for the program + + @param manifest[in] program manifest object + @param dfm_type_id[in] DFM resource type ID + + @return 0 when not applicable and/or invalid arguments + */ +extern vied_nci_resource_bitmap_t ia_css_program_manifest_get_dfm_active_port_bitmap( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id); + +/*! Set DFM device relocatability specification for the program + + @param manifest[in] program manifest object + @param dfm_type_id[in] DFM resource type ID + @param is_relocatable[in] 1 if dfm device ports are relocatable, 0 otherwise + + @return 0 when not applicable and/or invalid arguments + */ +extern int ia_css_program_manifest_set_is_dfm_relocatable( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id, + const uint8_t is_relocatable); + +/*! Get DFM device relocatability specification for the program + + @param manifest[in] program manifest object + @param dfm_type_id[in] DFM resource type ID + + @return 1 if dfm device ports are relocatable, 0 otherwise + */ +extern uint8_t ia_css_program_manifest_get_is_dfm_relocatable( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id); + + +/*! Get the memory resource (size) specification for a memory + that belongs to the cell where the program will be mapped + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type ID + + @return 0 when not applicable and/or invalid arguments + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_int_mem_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id); + +/*! Set the memory resource (size) specification for a memory + that belongs to the cell where the program will be mapped + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type id + @param int_mem_size[in] internal memory size + + @return < 0 on invalid arguments + */ +extern int ia_css_program_manifest_set_int_mem_size( + ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t int_mem_size); + +/*! Get the memory resource (size) specification for a memory + that does not belong to the cell where the program will be mapped + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type ID + + @return 0 when not applicable and/or invalid arguments + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_ext_mem_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id); + +/*! Set the memory resource (size) specification for a memory + that does not belong to the cell where the program will be mapped + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type id + @param ext_mem_size[in] external memory size + + @return < 0 on invalid arguments + */ +extern int ia_css_program_manifest_set_ext_mem_size( + ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t ext_mem_size); + +/*! Get a device channel resource (size) specification + + @param manifest[in] program manifest object + @param dev_chn_id[in] device channel ID + + @return 0 when not applicable and/or invalid arguments + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_dev_chn_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id); + +/*! Set a device channel resource (size) specification + + @param manifest[in] program manifest object + @param dev_chn_id[in] device channel ID + @param dev_chn_size[in] device channel size + + @return < 0 on invalid arguments + */ +extern int ia_css_program_manifest_set_dev_chn_size( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id, + const vied_nci_resource_size_t dev_chn_size); + +/*! Set a device channel resource (offset) specification + + @param manifest[in] program manifest object + @param dev_chn_id[in] device channel ID + @param dev_chn_offset[in] device channel offset + + @return < 0 on invalid arguments + */ +extern int ia_css_program_manifest_set_dev_chn_offset( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id, + const vied_nci_resource_size_t dev_chn_offset); + + +/*! Set the memory resource (offset) specification for a memory + that does not belong to the cell where the program will be mapped + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type id + @param ext_mem_offset[in] external memory offset + + @return < 0 on invalid arguments + */ +extern int ia_css_program_manifest_set_ext_mem_offset( + ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t ext_mem_offset); + +/*! Get a device channel resource (offset) specification + + @param manifest[in] program manifest object + @param dev_chn_id[in] device channel ID + + @return Valid fixed offset (if value is greater or equal to 0) or + IA_CSS_PROGRAM_MANIFEST_RESOURCE_OFFSET_IS_RELOCATABLE if offset + is relocatable + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_dev_chn_offset( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id); + +/*! Get the memory resource (offset) specification for a memory + that does not belong to the cell where the program will be mapped. + + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type ID + + @return Valid fixed offset (if value is greater or equal to 0) or + IA_CSS_PROGRAM_MANIFEST_RESOURCE_OFFSET_IS_RELOCATABLE if offset + is relocatable + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_ext_mem_offset( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id); + + +/*! Get the kernel composition of the program manifest object + + @param manifest[in] program manifest object + + @return bitmap, 0 on invalid arguments + */ +extern ia_css_kernel_bitmap_t ia_css_program_manifest_get_kernel_bitmap( + const ia_css_program_manifest_t *manifest); + +/*! Set the kernel dependency of the program manifest object + + @param manifest[in] program manifest object + @param kernel_bitmap[in] kernel composition bitmap + + @return < 0 on invalid arguments + */ +extern int ia_css_program_manifest_set_kernel_bitmap( + ia_css_program_manifest_t *manifest, + const ia_css_kernel_bitmap_t kernel_bitmap); + +/*! Get the number of programs this programs depends on from the program group + * manifest object + + @param manifest[in] program manifest object + + @return program dependency count + */ +extern uint8_t ia_css_program_manifest_get_program_dependency_count( + const ia_css_program_manifest_t *manifest); + +/*! Get the index of the program which the programs at this index depends on + from the program manifest object + + @param manifest[in] program manifest object + + @return program dependency, + IA_CSS_PROGRAM_INVALID_DEPENDENCY on invalid arguments + */ +extern uint8_t ia_css_program_manifest_get_program_dependency( + const ia_css_program_manifest_t *manifest, + const unsigned int index); + +/*! Set the index of the program which the programs at this index depends on + in the program manifest object + + @param manifest[in] program manifest object + + @return program dependency + */ +extern int ia_css_program_manifest_set_program_dependency( + ia_css_program_manifest_t *manifest, + const uint8_t program_dependency, + const unsigned int index); + +/*! Get the number of terminals this programs depends on from the program group + * manifest object + + @param manifest[in] program manifest object + + @return program dependency count + */ +extern uint8_t ia_css_program_manifest_get_terminal_dependency_count( + const ia_css_program_manifest_t *manifest); + +/*! Get the index of the terminal which the programs at this index depends on + from the program manifest object + + @param manifest[in] program manifest object + + @return terminal dependency, IA_CSS_PROGRAM_INVALID_DEPENDENCY on error + */ +uint8_t ia_css_program_manifest_get_terminal_dependency( + const ia_css_program_manifest_t *manifest, + const unsigned int index); + +/*! Set the index of the terminal which the programs at this index depends on + in the program manifest object + + @param manifest[in] program manifest object + + @return < 0 on invalid arguments + */ +extern int ia_css_program_manifest_set_terminal_dependency( + ia_css_program_manifest_t *manifest, + const uint8_t terminal_dependency, + const unsigned int index); + +/*! Check if the program manifest object specifies a subnode program + + @param manifest[in] program manifest object + + @return is_subnode, false on invalid argument + */ +extern bool ia_css_is_program_manifest_subnode_program_type( + const ia_css_program_manifest_t *manifest); + +/*! Check if the program manifest object specifies a supernode program + + @param manifest[in] program manifest object + + @return is_supernode, false on invalid argument + */ +extern bool ia_css_is_program_manifest_supernode_program_type( + const ia_css_program_manifest_t *manifest); +/*! Check if the program manifest object specifies a singular program + + @param manifest[in] program manifest object + + @return is_singular, false on invalid argument + */ +extern bool ia_css_is_program_manifest_singular_program_type( + const ia_css_program_manifest_t *manifest); + +#endif /* __IA_CSS_PSYS_PROGRAM_MANIFEST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.hsys.kernel.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.hsys.kernel.h new file mode 100644 index 0000000000000..9d737b75a576b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.hsys.kernel.h @@ -0,0 +1,96 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_MANIFEST_HSYS_KERNEL_H +#define __IA_CSS_PSYS_PROGRAM_MANIFEST_HSYS_KERNEL_H + +/*! \file */ + +/** @file ia_css_psys_program_manifest.hsys.kernel.h + * + * Define the methods on the program manifest object: Hsys kernel interface + */ + +#include + +#include + +#include /* uint8_t */ + +/* + * Resources needs + */ + +/*! Get the cell ID from the program manifest object + + @param manifest[in] program manifest object + + Note: If the cell ID is specified, the program this manifest belongs to + must be mapped on that instance. If the cell ID is invalid (limit value) + then the cell type ID must be specified instead + + @return cell ID, limit value if not specified + */ +extern vied_nci_cell_ID_t ia_css_program_manifest_get_cell_ID( + const ia_css_program_manifest_t *manifest); + +/*! Get the cell type ID from the program manifest object + + @param manifest[in] program manifest object + + Note: If the cell type ID is specified, the program this manifest belongs + to can be mapped on any instance of this clee type. If the cell type ID is + invalid (limit value) then a specific cell ID must be specified instead + + @return cell ID, limit value if not specified + */ +extern vied_nci_cell_type_ID_t ia_css_program_manifest_get_cell_type_ID( + const ia_css_program_manifest_t *manifest); + +/*! Get the memory resource (size) specification for a memory + that belongs to the cell where the program will be mapped + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type ID + + @return 0 when not applicable + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_int_mem_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id); + +/*! Get the memory resource (size) specification for a memory + that does not belong to the cell where the program will be mapped + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type ID + + @return 0 when not applicable + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_ext_mem_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id); + +/*! Get a device channel resource (size) specification + + @param manifest[in] program manifest object + @param dev_chn_id[in] device channel ID + + @return 0 when not applicable + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_dev_chn_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id); + +#endif /* __IA_CSS_PSYS_PROGRAM_MANIFEST_HSYS_KERNEL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.hsys.user.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.hsys.user.h new file mode 100644 index 0000000000000..087c84b7106e5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.hsys.user.h @@ -0,0 +1,38 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_MANIFEST_HSYS_USER_H +#define __IA_CSS_PSYS_PROGRAM_MANIFEST_HSYS_USER_H + +/*! \file */ + +/** @file ia_css_psys_program_manifest.hsys.user.h + * + * Define the methods on the program manifest object: Hsys user interface + */ + +#include + +/*! Print the program manifest object to file/stream + + @param manifest[in] program manifest object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_program_manifest_print( + const ia_css_program_manifest_t *manifest, + void *fid); + +#endif /* __IA_CSS_PSYS_PROGRAM_MANIFEST_HSYS_USER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.sim.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.sim.h new file mode 100644 index 0000000000000..0c2cef11f30eb --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.sim.h @@ -0,0 +1,61 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_MANIFEST_SIM_H +#define __IA_CSS_PSYS_PROGRAM_MANIFEST_SIM_H + +/*! \file */ + +/** @file ia_css_psys_program_manifest.sim.h + * + * Define the methods on the program manifest object: Simulation only + */ + +#include + +#include /* uint8_t */ + +/*! Compute the size of storage required for allocating + * the program manifest object + + @param program_dependency_count[in] Number of programs this one depends on + @param terminal_dependency_count[in] Number of terminals this one depends on + + @return 0 on error + */ +extern size_t ia_css_sizeof_program_manifest( + const uint8_t program_dependency_count, + const uint8_t terminal_dependency_count); + +/*! Create (the storage for) the program manifest object + + @param program_dependency_count[in] Number of programs this one depends on + @param terminal_dependency_count[in] Number of terminals this one depends on + + @return NULL on error + */ +extern ia_css_program_manifest_t *ia_css_program_manifest_alloc( + const uint8_t program_dependency_count, + const uint8_t terminal_dependency_count); + +/*! Destroy (the storage of) the program manifest object + + @param manifest[in] program manifest + + @return NULL + */ +extern ia_css_program_manifest_t *ia_css_program_manifest_free( + ia_css_program_manifest_t *manifest); + +#endif /* __IA_CSS_PSYS_PROGRAM_MANIFEST_SIM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_static_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_static_storage_class.h new file mode 100644 index 0000000000000..f3c832b5a4a33 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_static_storage_class.h @@ -0,0 +1,28 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +#define __IA_CSS_PSYS_STATIC_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __IA_CSS_PSYS_STATIC_INLINE__ +#define IA_CSS_PSYS_STATIC_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +#else +#define IA_CSS_PSYS_STATIC_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_PSYS_STATIC_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_PSYS_STATIC_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_static_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_static_trace.h new file mode 100644 index 0000000000000..7c5612cd09690 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_static_trace.h @@ -0,0 +1,103 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_STATIC_TRACE_H +#define __IA_CSS_PSYS_STATIC_TRACE_H + +#include "ia_css_psysapi_trace.h" + +#define PSYS_STATIC_TRACE_LEVEL_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +/* Default sub-module tracing config */ +#if (!defined(PSYSAPI_STATIC_TRACING_OVERRIDE)) + #define PSYS_STATIC_TRACE_LEVEL_CONFIG \ + PSYS_STATIC_TRACE_LEVEL_CONFIG_DEFAULT +#endif + +/* Module/sub-module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_STATIC_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_STATIC_TRACING_OVERRIDE)) + /* Module/sub-module specific trace setting */ + #if PSYSAPI_STATIC_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_STATIC_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_STATIC_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_STATIC_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_STATIC_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_STATIC_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_STATIC_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_STATIC_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_STATIC_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_DATA Tracing level defined" + #endif +#else + /* Inherit Module trace setting */ + #define PSYSAPI_STATIC_TRACE_METHOD \ + PSYSAPI_TRACE_METHOD + #define PSYSAPI_STATIC_TRACE_LEVEL_ASSERT \ + PSYSAPI_TRACE_LEVEL_ASSERT + #define PSYSAPI_STATIC_TRACE_LEVEL_ERROR \ + PSYSAPI_TRACE_LEVEL_ERROR + #define PSYSAPI_STATIC_TRACE_LEVEL_WARNING \ + PSYSAPI_TRACE_LEVEL_WARNING + #define PSYSAPI_STATIC_TRACE_LEVEL_INFO \ + PSYSAPI_TRACE_LEVEL_INFO + #define PSYSAPI_STATIC_TRACE_LEVEL_DEBUG \ + PSYSAPI_TRACE_LEVEL_DEBUG + #define PSYSAPI_STATIC_TRACE_LEVEL_VERBOSE \ + PSYSAPI_TRACE_LEVEL_VERBOSE +#endif + +#endif /* __IA_CSS_PSYSAPI_STATIC_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.h new file mode 100644 index 0000000000000..0fa62b32e1a74 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.h @@ -0,0 +1,423 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TERMINAL_MANIFEST_H +#define __IA_CSS_PSYS_TERMINAL_MANIFEST_H + +/*! \file */ + +/** @file ia_css_psys_terminal_manifest.h + * + * Define the methods on the terminal manifest object that are not part of a + * single interface + */ + +#include + +#include + +#include + +#include /* ia_css_frame_format_bitmap_t */ +#include /* ia_css_kernel_bitmap_t */ + +#include /* size_t */ +#include "ia_css_terminal_manifest.h" +#include "ia_css_terminal_manifest_base_types.h" + + +/*! Check if the terminal manifest object specifies a spatial param terminal + * type + + @param manifest[in] terminal manifest object + + @return is_parameter_terminal, false on invalid manifest argument + */ +extern bool ia_css_is_terminal_manifest_spatial_parameter_terminal( + const ia_css_terminal_manifest_t *manifest); + +/*! Check if the terminal manifest object specifies a program terminal type + + @param manifest[in] terminal manifest object + + @return is_parameter_terminal, false on invalid manifest argument + */ +extern bool ia_css_is_terminal_manifest_program_terminal( + const ia_css_terminal_manifest_t *manifest); + + +/*! Check if the terminal manifest object specifies a program control init terminal type + * + * @param manifest[in] terminal manifest object + * + * @return is_parameter_terminal, false on invalid manifest argument + */ +extern bool ia_css_is_terminal_manifest_program_control_init_terminal( + const ia_css_terminal_manifest_t *manifest); + +/*! Check if the terminal manifest object specifies a (cached) parameter + * terminal type + + @param manifest[in] terminal manifest object + + @return is_parameter_terminal, false on invalid manifest argument + */ +extern bool ia_css_is_terminal_manifest_parameter_terminal( + const ia_css_terminal_manifest_t *manifest); + +/*! Check if the terminal manifest object specifies a (sliced) parameter + * terminal type + + @param manifest[in] terminal manifest object + + @return is_parameter_terminal, false on invalid manifest argument + */ +extern bool ia_css_is_terminal_manifest_sliced_terminal( + const ia_css_terminal_manifest_t *manifest); + +/*! Check if the terminal manifest object specifies a data terminal type + + @param manifest[in] terminal manifest object + + @return is_data_terminal, false on invalid manifest argument + */ +extern bool ia_css_is_terminal_manifest_data_terminal( + const ia_css_terminal_manifest_t *manifest); + +/*! Get the stored size of the terminal manifest object + + @param manifest[in] terminal manifest object + + @return size, 0 on invalid manifest argument + */ +extern size_t ia_css_terminal_manifest_get_size( + const ia_css_terminal_manifest_t *manifest); + +/*! Get the (pointer to) the program group manifest parent of the terminal + * manifest object + + @param manifest[in] terminal manifest object + + @return the pointer to the parent, NULL on invalid manifest argument + */ +extern ia_css_program_group_manifest_t *ia_css_terminal_manifest_get_parent( + const ia_css_terminal_manifest_t *manifest); + +/*! Set the (pointer to) the program group manifest parent of the terminal + * manifest object + + @param manifest[in] terminal manifest object + @param terminal_offset[in] this terminal's offset from + program_group_manifest base address. + + @return < 0 on invalid arguments + */ +extern int ia_css_terminal_manifest_set_parent_offset( + ia_css_terminal_manifest_t *manifest, + int32_t terminal_offset); + +/*! Get the type of the terminal manifest object + + @param manifest[in] terminal manifest object + + @return terminal type, limit value (IA_CSS_N_TERMINAL_TYPES) on invalid + manifest argument +*/ +extern ia_css_terminal_type_t ia_css_terminal_manifest_get_type( + const ia_css_terminal_manifest_t *manifest); + +/*! Set the type of the terminal manifest object + + @param manifest[in] terminal manifest object + @param terminal_type[in] terminal type + + @return < 0 on invalid manifest argument + */ +extern int ia_css_terminal_manifest_set_type( + ia_css_terminal_manifest_t *manifest, + const ia_css_terminal_type_t terminal_type); + +/*! Set the ID of the terminal manifest object + + @param manifest[in] terminal manifest object + @param ID[in] terminal ID + + @return < 0 on invalid manifest argument + */ +int ia_css_terminal_manifest_set_ID( + ia_css_terminal_manifest_t *manifest, + const ia_css_terminal_ID_t ID); + +/*! Get the type of the terminal manifest object + + @param manifest[in] terminal manifest object + + @return terminal id, IA_CSS_TERMINAL_INVALID_ID on invalid manifest argument + */ +extern ia_css_terminal_ID_t ia_css_terminal_manifest_get_ID( + const ia_css_terminal_manifest_t *manifest); + +/*! Get the supported frame types of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + + @return frame format bitmap, 0 on invalid manifest argument +*/ +extern ia_css_frame_format_bitmap_t + ia_css_data_terminal_manifest_get_frame_format_bitmap( + const ia_css_data_terminal_manifest_t *manifest); + +/*! Set the chosen frame type for the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param bitmap[in] frame format bitmap + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_frame_format_bitmap( + ia_css_data_terminal_manifest_t *manifest, + ia_css_frame_format_bitmap_t bitmap); + +/*! Check if the (data) terminal manifest object supports compression + + @param manifest[in] (data) terminal manifest object + + @return compression_support, true if compression is supported + */ +extern bool ia_css_data_terminal_manifest_can_support_compression( + const ia_css_data_terminal_manifest_t *manifest); + +/*! Set the compression support feature of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param compression_support[in] set true to support compression + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_compression_support( + ia_css_data_terminal_manifest_t *manifest, + bool compression_support); + +/*! Set the supported connection types of the terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param bitmap[in] connection bitmap + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_connection_bitmap( + ia_css_data_terminal_manifest_t *manifest, ia_css_connection_bitmap_t bitmap); + +/*! Get the connection bitmap of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + + @return connection bitmap, 0 on invalid manifest argument +*/ +extern ia_css_connection_bitmap_t + ia_css_data_terminal_manifest_get_connection_bitmap( + const ia_css_data_terminal_manifest_t *manifest); + +/*! Get the kernel dependency of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + + @return kernel bitmap, 0 on invalid manifest argument + */ +extern ia_css_kernel_bitmap_t ia_css_data_terminal_manifest_get_kernel_bitmap( + const ia_css_data_terminal_manifest_t *manifest); + +/*! Set the kernel dependency of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param kernel_bitmap[in] kernel dependency bitmap + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_kernel_bitmap( + ia_css_data_terminal_manifest_t *manifest, + const ia_css_kernel_bitmap_t kernel_bitmap); + +/*! Set the unique kernel dependency of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param index[in] kernel dependency bitmap index + + @return < 0 on invalid argument(s) + */ +extern int ia_css_data_terminal_manifest_set_kernel_bitmap_unique( + ia_css_data_terminal_manifest_t *manifest, + const unsigned int index); + +/*! Set the min size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param min_size[in] Minimum size of the frame array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_min_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t min_size[IA_CSS_N_DATA_DIMENSION]); + +/*! Set the max size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param max_size[in] Maximum size of the frame array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_max_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t max_size[IA_CSS_N_DATA_DIMENSION]); + +/*! Get the min size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param min_size[in] Minimum size of the frame array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_get_min_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t min_size[IA_CSS_N_DATA_DIMENSION]); + +/*! Get the max size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param max_size[in] Maximum size of the frame array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_get_max_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t max_size[IA_CSS_N_DATA_DIMENSION]); + +/*! Set the min fragment size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param min_size[in] Minimum size of the fragment array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_min_fragment_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t min_size[IA_CSS_N_DATA_DIMENSION]); + +/*! Set the max fragment size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param max_size[in] Maximum size of the fragment array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_max_fragment_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t max_size[IA_CSS_N_DATA_DIMENSION]); + +/*! Get the min fragment size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param min_size[in] Minimum size of the fragment array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_get_min_fragment_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t min_size[IA_CSS_N_DATA_DIMENSION]); + +/*! Get the max fragment size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param max_size[in] Maximum size of the fragment array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_get_max_fragment_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t max_size[IA_CSS_N_DATA_DIMENSION]); + +/*! + * Get the program control init connect section count for program prog. + * @param prog[in] program control init terminal program desc + * @return number of connect section for program prog. + */ + +extern +unsigned int ia_css_program_control_init_terminal_manifest_get_connect_section_count( + const ia_css_program_control_init_manifest_program_desc_t *prog); + + +/*! + * Get the program control init load section count for program prog. + * @param prog[in] program control init terminal program desc + * @return number of load section for program prog. + */ + +extern +unsigned int ia_css_program_control_init_terminal_manifest_get_load_section_count( + const ia_css_program_control_init_manifest_program_desc_t *prog); + +/*! + * Get the program control init terminal manifest size. + * @param nof_programs[in] Number of programs. + * @param nof_load_sections[in] Array of size nof_programs, + * encoding the number of load sections. + * @param nof_connect_sections[in] Array of size nof_programs, + * encoding the number of connect sections. + * @return < 0 on invalid manifest argument + */ +extern +unsigned int ia_css_program_control_init_terminal_manifest_get_size( + const uint16_t nof_programs, + const uint16_t *nof_load_sections, + const uint16_t *nof_connect_sections); + +/*! + * Get the program control init terminal manifest program desc. + * @param terminal[in] Program control init terminal. + * @param program[in] Number of programs. + * @return program control init terminal program desc (or NULL if error). + */ +extern +ia_css_program_control_init_manifest_program_desc_t * +ia_css_program_control_init_terminal_manifest_get_program_desc( + const ia_css_program_control_init_terminal_manifest_t *terminal, + unsigned int program); + +/*! + * Initialize the program control init terminal manifest. + * @param nof_programs[in] Number of programs + * @param nof_load_sections[in] Array of size nof_programs, + * encoding the number of load sections. + * @param nof_connect_sections[in] Array of size nof_programs, + * encoding the number of connect sections. + * @return < 0 on invalid manifest argument + */ +extern +int ia_css_program_control_init_terminal_manifest_init( + ia_css_program_control_init_terminal_manifest_t *terminal, + const uint16_t nof_programs, + const uint16_t *nof_load_sections, + const uint16_t *nof_connect_sections); + +/*! + * Pretty prints the program control init terminal manifest. + * @param terminal[in] Program control init terminal. + */ +extern +void ia_css_program_control_init_terminal_manifest_print( + ia_css_program_control_init_terminal_manifest_t *terminal); + +#endif /* __IA_CSS_PSYS_TERMINAL_MANIFEST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.hsys.user.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.hsys.user.h new file mode 100644 index 0000000000000..1d2f06f3cbce9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.hsys.user.h @@ -0,0 +1,38 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TERMINAL_MANIFEST_HSYS_USER_H +#define __IA_CSS_PSYS_TERMINAL_MANIFEST_HSYS_USER_H + +/*! \file */ + +/** @file ia_css_psys_terminal.hsys.user.h + * + * Define the methods on the termianl manifest object: Hsys user interface + */ + +#include + +/*! Print the terminal manifest object to file/stream + + @param manifest[in] terminal manifest object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_terminal_manifest_print( + const ia_css_terminal_manifest_t *manifest, + void *fid); + +#endif /* __IA_CSS_PSYS_TERMINAL_MANIFEST_HSYS_USER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.sim.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.sim.h new file mode 100644 index 0000000000000..f7da810d82f19 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.sim.h @@ -0,0 +1,48 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TERMINAL_MANIFEST_SIM_H +#define __IA_CSS_PSYS_TERMINAL_MANIFEST_SIM_H + +/*! \file */ + +/** @file ia_css_psys_terminal_manifest.sim.h + * + * Define the methods on the terminal manifest object: Simulation only + */ + +#include /* size_t */ +#include "ia_css_terminal.h" +#include "ia_css_terminal_manifest.h" +#include "ia_css_terminal_defs.h" + +/*! Create (the storage for) the terminal manifest object + + @param terminal_type[in] type of the terminal manifest {parameter, data} + + @return NULL on error + */ +extern ia_css_terminal_manifest_t *ia_css_terminal_manifest_alloc( + const ia_css_terminal_type_t terminal_type); + +/*! Destroy (the storage of) the terminal manifest object + + @param manifest[in] terminal manifest + + @return NULL + */ +extern ia_css_terminal_manifest_t *ia_css_terminal_manifest_free( + ia_css_terminal_manifest_t *manifest); + +#endif /* __IA_CSS_PSYS_TERMINAL_MANIFEST_SIM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_manifest.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_manifest.c new file mode 100644 index 0000000000000..5af4de7463104 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_manifest.c @@ -0,0 +1,1038 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_static_storage_class.h" +#include "ia_css_psys_program_group_manifest.h" +#include "ia_css_rbm_manifest.h" + +/* + * Functions to possibly inline + */ + +#ifndef __IA_CSS_PSYS_STATIC_INLINE__ +#include "ia_css_psys_program_group_manifest_impl.h" +#endif /* __IA_CSS_PSYS_STATIC_INLINE__ */ + +/* + * Functions not to inline + */ + +/* + * We need to refactor those files in order to + * build in the firmware only what is needed, + * switches are put current to workaround compilation problems + * in the firmware (for example lack of uint64_t support) + * supported in the firmware + */ +#if !defined(__HIVECC) +size_t ia_css_sizeof_program_group_manifest( + const uint8_t program_count, + const uint8_t terminal_count, + const uint8_t *program_dependency_count, + const uint8_t *terminal_dependency_count, + const ia_css_terminal_type_t *terminal_type, + const uint16_t cached_in_param_section_count, + const uint16_t cached_out_param_section_count, + const uint16_t *spatial_param_section_count, + const uint16_t fragment_param_section_count, + const uint16_t *sliced_param_section_count, + const uint16_t *sliced_out_param_section_count, + const uint16_t kernel_fragment_seq_count, + const uint16_t *progctrlinit_load_section_counts, + const uint16_t *progctrlinit_connect_section_counts) +{ + size_t size = 0; + int i = 0; + int j = 0; + int m = 0; + int n = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_sizeof_program_group_manifest(): enter:\n"); + + verifexit(program_count != 0); + verifexit(program_dependency_count != NULL); + verifexit(terminal_dependency_count != NULL); + + size += sizeof(ia_css_program_group_manifest_t); + + /* Private payload in the program group manifest */ + size += ceil_mul(sizeof(struct ia_css_psys_private_pg_data), + sizeof(uint64_t)); + /* RBM manifest in the program group manifest */ + size += ceil_mul(sizeof(ia_css_rbm_manifest_t), + sizeof(uint64_t)); + + for (i = 0; i < (int)program_count; i++) { + size += ia_css_sizeof_program_manifest( + program_dependency_count[i], + terminal_dependency_count[i]); + } + + for (i = 0; i < (int)terminal_count; i++) { + switch (terminal_type[i]) { + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN: + size += ia_css_param_terminal_manifest_get_size( + cached_in_param_section_count); + break; + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT: + size += ia_css_param_terminal_manifest_get_size( + cached_out_param_section_count); + break; + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: + size += ia_css_spatial_param_terminal_manifest_get_size( + spatial_param_section_count[j]); + j++; + break; + case IA_CSS_TERMINAL_TYPE_PROGRAM: + size += ia_css_program_terminal_manifest_get_size( + fragment_param_section_count, + kernel_fragment_seq_count); + break; + case IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: + size += ia_css_program_control_init_terminal_manifest_get_size( + program_count, + progctrlinit_load_section_counts, + progctrlinit_connect_section_counts); + break; + case IA_CSS_TERMINAL_TYPE_DATA_IN: + case IA_CSS_TERMINAL_TYPE_DATA_OUT: + size += sizeof(ia_css_data_terminal_manifest_t); + break; + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN: + size += ia_css_sliced_param_terminal_manifest_get_size( + sliced_param_section_count[m]); + m++; + break; + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT: + size += ia_css_sliced_param_terminal_manifest_get_size( + sliced_out_param_section_count[n]); + n++; + break; + default: + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_sizeof_program_group_manifest invalid argument\n"); + } + } + +EXIT: + if (0 == program_count || 0 == terminal_count || + NULL == program_dependency_count || + NULL == terminal_dependency_count) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_sizeof_program_group_manifest invalid argument\n"); + } + return size; +} + +/* + * Currently, the design of XNR kernel inside the *_pregdc program group, + * does not fit the exact model as is being asserted on in + * ia_css_is_program_group_manifest_valid. We therefore disable some checks. + * Further investigation is needed to determine whether *_pregdc program group + * can be canged or that the model must be changed. + * #define USE_SIMPLIFIED_GRAPH_MODEL 1 allows multiple programs to be + * connected to the same terminal, and it allows a kernel be mapped over + * multiple programs. + */ +#define USE_SIMPLIFIED_GRAPH_MODEL 1 + +/* + * Model and/or check refinements + * - Parallel programs do not yet have mutual exclusive alternatives + * - The pgram dependencies do not need to be acyclic + * - Parallel programs need to have an equal kernel requirement + */ +bool ia_css_is_program_group_manifest_valid( + const ia_css_program_group_manifest_t *manifest) +{ + int i; + bool is_valid = false; + uint8_t terminal_count; + uint8_t program_count; + ia_css_kernel_bitmap_t total_bitmap; + ia_css_kernel_bitmap_t check_bitmap; + ia_css_kernel_bitmap_t terminal_bitmap; + /* + * Use a standard bitmap type for the minimum logic to check the DAG, + * generic functions can be used for the kernel enable bitmaps; Later + */ + vied_nci_resource_bitmap_t resource_bitmap; + int terminal_bitmap_weight; + bool has_parameter_terminal_in = false; + bool has_parameter_terminal_out = false; + bool has_program_control_init_terminal = false; + bool has_program_terminal = false; + bool has_program_terminal_sequencer_info = false; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_program_group_manifest_valid(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(ia_css_program_group_manifest_get_size(manifest) != 0); + verifexit(ia_css_program_group_manifest_get_alignment(manifest) != 0); + verifexit(ia_css_program_group_manifest_get_program_group_ID(manifest) != 0); + + terminal_count = + ia_css_program_group_manifest_get_terminal_count(manifest); + program_count = + ia_css_program_group_manifest_get_program_count(manifest); + total_bitmap = + ia_css_program_group_manifest_get_kernel_bitmap(manifest); + check_bitmap = ia_css_kernel_bitmap_clear(); + resource_bitmap = vied_nci_bit_mask(VIED_NCI_RESOURCE_BITMAP_BITS); + terminal_bitmap = ia_css_kernel_bitmap_clear(); + + verifexit(program_count != 0); + verifexit(terminal_count != 0); + verifexit(!ia_css_is_kernel_bitmap_empty(total_bitmap)); + verifexit(vied_nci_is_bitmap_empty(resource_bitmap)); + + /* Check the kernel bitmaps for terminals */ + for (i = 0; i < (int)terminal_count; i++) { + ia_css_terminal_manifest_t *terminal_manifest_i = + ia_css_program_group_manifest_get_term_mnfst( + manifest, i); + bool is_parameter_in = + (IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN == + ia_css_terminal_manifest_get_type( + terminal_manifest_i)); + bool is_parameter_out = + (IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT == + ia_css_terminal_manifest_get_type( + terminal_manifest_i)); + bool is_data = + ia_css_is_terminal_manifest_data_terminal( + terminal_manifest_i); + bool is_program = + ia_css_is_terminal_manifest_program_terminal( + terminal_manifest_i); + bool is_spatial_param = + ia_css_is_terminal_manifest_spatial_parameter_terminal( + terminal_manifest_i); + bool is_program_control_init = + ia_css_is_terminal_manifest_program_control_init_terminal( + terminal_manifest_i); + + if (is_parameter_in) { + /* + * There can be only one cached in parameter terminal + * it serves kernels, not programs + */ + verifexit(!has_parameter_terminal_in); + has_parameter_terminal_in = is_parameter_in; + } else if (is_parameter_out) { + /* + * There can be only one cached out parameter terminal + * it serves kernels, not programs + */ + verifexit(!has_parameter_terminal_out); + has_parameter_terminal_out = is_parameter_out; + } else if (is_data) { + ia_css_data_terminal_manifest_t *dterminal_manifest_i = + (ia_css_data_terminal_manifest_t *) + terminal_manifest_i; + ia_css_kernel_bitmap_t terminal_bitmap_i = + ia_css_data_terminal_manifest_get_kernel_bitmap( + dterminal_manifest_i); + /* + * A terminal must depend on kernels that are a subset + * of the total, correction, it can only depend on one + * kernel + */ + verifexit(!ia_css_is_kernel_bitmap_empty( + terminal_bitmap_i)); + verifexit(ia_css_is_kernel_bitmap_subset( + total_bitmap, terminal_bitmap_i)); + verifexit(ia_css_is_kernel_bitmap_onehot( + terminal_bitmap_i)); + } else if (is_program) { + verifexit(!has_program_terminal); + verifexit(terminal_manifest_i); + has_program_terminal = is_program; + has_program_terminal_sequencer_info = + (((ia_css_program_terminal_manifest_t *) + terminal_manifest_i)-> + kernel_fragment_sequencer_info_manifest_info_count + != 0); + } else if (is_program_control_init) { + has_program_control_init_terminal = is_program_control_init; + } else { + const ia_css_spatial_param_terminal_manifest_t + *spatial_param_man = + (const ia_css_spatial_param_terminal_manifest_t *) + terminal_manifest_i; + verifexit(spatial_param_man); + verifexit(is_spatial_param); + + terminal_bitmap = + ia_css_kernel_bitmap_set(terminal_bitmap, + spatial_param_man->kernel_id); + verifexit(!ia_css_is_kernel_bitmap_empty(terminal_bitmap)); + verifexit(ia_css_is_kernel_bitmap_subset( + total_bitmap, terminal_bitmap)); + } + } + + /* Check the kernel bitmaps for programs */ + for (i = 0; i < (int)program_count; i++) { + int j; + ia_css_program_manifest_t *program_manifest_i = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, i); + ia_css_program_type_t program_type_i = + ia_css_program_manifest_get_type(program_manifest_i); + ia_css_kernel_bitmap_t program_bitmap_i = + ia_css_program_manifest_get_kernel_bitmap( + program_manifest_i); + uint8_t program_dependency_count_i = + ia_css_program_manifest_get_program_dependency_count( + program_manifest_i); + uint8_t terminal_dependency_count_i = + ia_css_program_manifest_get_terminal_dependency_count( + program_manifest_i); + uint8_t program_dependency_i0 = + ia_css_program_manifest_get_program_dependency( + program_manifest_i, 0); + bool is_sub_i = + ia_css_is_program_manifest_subnode_program_type( + program_manifest_i); + bool is_exclusive_sub_i = + (program_type_i == IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB); + bool is_virtual_sub_i = + (program_type_i == IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB); + bool is_super_i = + ia_css_is_program_manifest_supernode_program_type( + program_manifest_i); + + /* + * A program must have kernels that + * are a subset of the total + */ + verifexit(!ia_css_is_kernel_bitmap_empty( + program_bitmap_i)); + verifexit(ia_css_is_kernel_bitmap_subset( + total_bitmap, program_bitmap_i)); + verifexit((program_type_i != IA_CSS_N_PROGRAM_TYPES)); + verifexit((program_dependency_count_i + terminal_dependency_count_i) != 0); + /* + * Checks for subnodes + * - Parallel subnodes cannot depend on terminals + * - Exclusive subnodes must depend on + * fewer terminals than the supernode + * - Subnodes only depend on a supernode of the same type + * - Must have a subset of the supernode's kernels + * (but not equal) + * - This tests only positive cases + * Checks for singular or supernodes + * - Cannot depend on exclusive subnodes + * - No intersection between kernels + * (too strict for multiple instances ?) + */ + if (is_sub_i) { + /* Subnode */ + ia_css_program_manifest_t *program_manifest_k = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, program_dependency_i0); + ia_css_program_type_t program_type_k = + ia_css_program_manifest_get_type( + program_manifest_k); + ia_css_kernel_bitmap_t program_bitmap_k = + ia_css_program_manifest_get_kernel_bitmap( + program_manifest_k); + + verifexit(program_dependency_count_i == 1); + if (is_exclusive_sub_i || is_virtual_sub_i) { + verifexit(terminal_dependency_count_i <= + ia_css_program_manifest_get_terminal_dependency_count( + program_manifest_k)); + } else{ + verifexit(terminal_dependency_count_i == 0); + } + verifexit(program_type_k == + (is_exclusive_sub_i ? + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER : + is_virtual_sub_i ? + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUPER : + IA_CSS_PROGRAM_TYPE_PARALLEL_SUPER)); + verifexit(!ia_css_is_kernel_bitmap_equal( + program_bitmap_k, program_bitmap_i)); + verifexit(ia_css_is_kernel_bitmap_subset( + program_bitmap_k, program_bitmap_i)); + } else { + /* Singular or Supernode */ + int k; + + for (k = 0; k < program_dependency_count_i; k++) { + uint8_t program_dependency_k = + ia_css_program_manifest_get_program_dependency( + program_manifest_i, k); + ia_css_program_manifest_t *program_manifest_k = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, (int)program_dependency_k); + ia_css_program_type_t program_type_k = + ia_css_program_manifest_get_type( + program_manifest_k); + ia_css_kernel_bitmap_t program_bitmap_k = + ia_css_program_manifest_get_kernel_bitmap( + program_manifest_k); + + verifexit(program_dependency_k < + program_count); + verifexit((program_type_k != + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB) && + (program_type_k != + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB)); +#if USE_SIMPLIFIED_GRAPH_MODEL == 0 + verifexit(ia_css_is_kernel_bitmap_intersection_empty( + program_bitmap_i, program_bitmap_k)); +#else + (void)program_bitmap_k; +#endif + } + } + + /* Check for relations */ + for (j = 0; j < (int)program_count; j++) { + int k; + ia_css_program_manifest_t *program_manifest_j = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, j); + ia_css_program_type_t program_type_j = + ia_css_program_manifest_get_type(program_manifest_j); + ia_css_kernel_bitmap_t program_bitmap_j = + ia_css_program_manifest_get_kernel_bitmap( + program_manifest_j); + uint8_t program_dependency_count_j = + ia_css_program_manifest_get_program_dependency_count( + program_manifest_j); + uint8_t program_dependency_j0 = + ia_css_program_manifest_get_program_dependency( + program_manifest_j, 0); + bool is_sub_j = + ia_css_is_program_manifest_subnode_program_type( + program_manifest_j); + bool is_super_j = + ia_css_is_program_manifest_supernode_program_type( + program_manifest_j); + bool is_virtual_sub_j = + (program_type_j == IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB); + bool is_j_subset_i = + ia_css_is_kernel_bitmap_subset( + program_bitmap_i, program_bitmap_j); + bool is_i_subset_j = + ia_css_is_kernel_bitmap_subset( + program_bitmap_j, program_bitmap_i); + + /* Test below would fail for i==j */ + if (i == j) + continue; + + /* Empty sets are always subsets, but meaningless */ + verifexit(!ia_css_is_kernel_bitmap_empty( + program_bitmap_j)); + + /* + * Checks for mutual subnodes + * - Parallel subnodes must have an equal + * set of kernels + * - Exclusive and virtual subnodes must + * have an unequal set of kernels + * Checks for subnodes + * - Subnodes must have a subset of kernels + */ + if (((program_type_i == + IA_CSS_PROGRAM_TYPE_PARALLEL_SUB) && + (program_type_j == + IA_CSS_PROGRAM_TYPE_PARALLEL_SUB)) || + ((program_type_i == + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB) && + (program_type_j == + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB)) || + ((program_type_i == + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB) && + (program_type_j == + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB))) { + + verifexit(program_dependency_count_j == 1); + verifexit(program_dependency_i0 != i); + verifexit(program_dependency_j0 != i); + + if (program_dependency_i0 == + program_dependency_j0) { + verifexit(is_sub_i); + /* + * Subnodes are subsets, + * not for virtual nodes + */ + if (!is_virtual_sub_i) + verifexit( + ((is_j_subset_i || + is_i_subset_j))); + /* + * That must be equal for + * parallel subnodes, + * must be unequal for + * exlusive and virtual subnodes + */ + verifexit( + ((is_j_subset_i && is_i_subset_j) ^ + (is_exclusive_sub_i | + is_virtual_sub_i))); + + } + if (is_j_subset_i || is_i_subset_j) { + verifexit(program_dependency_i0 == + program_dependency_j0); + } + } + + if (((program_type_i == + IA_CSS_PROGRAM_TYPE_PARALLEL_SUPER) && + (program_type_j == + IA_CSS_PROGRAM_TYPE_PARALLEL_SUB)) || + ((program_type_i == + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER) && + (program_type_j == + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB)) || + ((program_type_i == + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUPER) && + (program_type_j == + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB))) { + + verifexit(program_dependency_count_j == 1); + verifexit(!is_i_subset_j); + + if (program_dependency_j0 == i) { + verifexit(program_dependency_i0 != + program_dependency_j0); + verifexit(is_super_i); + verifexit(is_j_subset_i); + + } + if (is_j_subset_i) { + verifexit(program_dependency_j0 == i); + } + } + + /* + * Checks for dependent nodes + * - Cannot depend on exclusive subnodes + * - No intersection between kernels + * (too strict for multiple instances ?) + * unless a subnode + */ + for (k = 0; k < (int)program_dependency_count_j; k++) { + uint8_t program_dependency_k = + ia_css_program_manifest_get_program_dependency( + program_manifest_j, k); + + verifexit((program_dependency_k < + program_count)); + if (program_dependency_k == i) { + /* program[j] depends on program[i] */ + verifexit((i != j)); + verifexit((program_type_i != + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB) && + (program_type_i != + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB)); + verifexit(USE_SIMPLIFIED_GRAPH_MODEL || + (ia_css_is_kernel_bitmap_intersection_empty( + program_bitmap_i, program_bitmap_j) ^ is_sub_j)); + } + } + + /* + * Checks for supernodes and subnodes + * - Detect nodes that kernel-wise are subsets, + * but not connected to the correct supernode + * - We do not (yet) detect if programs properly + * depend on all parallel nodes + */ + if (!ia_css_is_kernel_bitmap_intersection_empty( + program_bitmap_i, program_bitmap_j)) { + /* + * This test will pass if + * the program manifest is NULL, + * but that's no concern here + */ +#if USE_SIMPLIFIED_GRAPH_MODEL == 0 + verifexit(!ia_css_is_program_manifest_singular_program_type( + program_manifest_i)); + verifexit(!ia_css_is_program_manifest_singular_program_type( + program_manifest_j)); + if (!is_virtual_sub_j) + verifexit((is_j_subset_i || is_i_subset_j)); +#else + (void)is_virtual_sub_j; +#endif + if (is_super_i) { + verifexit(is_sub_j); + verifexit(program_dependency_j0 == i); + } + if (is_super_j) { + verifexit(is_sub_i); + verifexit(program_dependency_i0 == j); + } + } + } + check_bitmap = ia_css_kernel_bitmap_union( + check_bitmap, program_bitmap_i); + /* + * A terminal can be bound to only a single + * (of multiple concurrent) program(s), + * i.e. the one that holds the iterator to control it + * Only singular and super nodes can depend on a terminal. + * This loop accumulates all terminal + * dependencies over all programs + */ + for (j = 0; j < (int)terminal_dependency_count_i; j++) { + uint8_t terminal_dependency = + ia_css_program_manifest_get_terminal_dependency( + program_manifest_i, j); + + verifexit(terminal_dependency < terminal_count); + if ((program_type_i != + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB) && + (program_type_i != + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB)) { + /* If the subnode always came after the */ + /* supernode we could check for presence */ + resource_bitmap = + vied_nci_bit_mask_set_unique( + resource_bitmap, + terminal_dependency); +#if USE_SIMPLIFIED_GRAPH_MODEL == 0 + verifexit(!vied_nci_is_bitmap_empty( + resource_bitmap)); +#endif + } + } + } + verifexit(ia_css_is_kernel_bitmap_equal( + total_bitmap, check_bitmap)); + + terminal_bitmap_weight = + vied_nci_bitmap_compute_weight(resource_bitmap); + verifexit(terminal_bitmap_weight >= 0); + if (has_parameter_terminal_in || + has_parameter_terminal_out || + has_program_terminal || + has_program_control_init_terminal) { + int skip_terminal_count = 0; + + if (has_parameter_terminal_in) + skip_terminal_count++; + if (has_parameter_terminal_out) + skip_terminal_count++; + if (has_program_control_init_terminal) { + skip_terminal_count++; + } + if (has_program_terminal) + skip_terminal_count++; + if (has_program_terminal_sequencer_info) + skip_terminal_count--; +#if USE_SIMPLIFIED_GRAPH_MODEL == 0 + verifexit((terminal_bitmap_weight == + (terminal_count - skip_terminal_count))); +#endif + } else + verifexit((terminal_bitmap_weight == terminal_count)); + + is_valid = true; +EXIT: + if (is_valid == false) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_is_program_group_manifest_valid: failed\n"); + } + return is_valid; +} + +int ia_css_program_group_manifest_set_kernel_bitmap( + ia_css_program_group_manifest_t *manifest, + const ia_css_kernel_bitmap_t bitmap) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_set_kernel_bitmap(): enter:\n"); + + if (manifest != NULL) { + manifest->kernel_bitmap = bitmap; + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_set_kernel_bitmap invalid argument\n"); + } + return retval; +} + +ia_css_kernel_bitmap_t ia_css_program_group_manifest_get_kernel_bitmap( + const ia_css_program_group_manifest_t *manifest) +{ + ia_css_kernel_bitmap_t bitmap = ia_css_kernel_bitmap_clear(); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_kernel_bitmap(): enter:\n"); + + if (manifest != NULL) { + bitmap = manifest->kernel_bitmap; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_kernel_bitmap invalid argument\n"); + } + return bitmap; +} + +void ia_css_program_group_manifest_init( + ia_css_program_group_manifest_t *blob, + const uint8_t program_count, + const uint8_t terminal_count, + const uint8_t *program_dependencies, + const uint8_t *terminal_dependencies, + const ia_css_terminal_type_t *terminal_type, + const uint16_t cached_in_param_section_count, + const uint16_t cached_out_param_section_count, + const uint16_t *spatial_param_section_count, + const uint16_t fragment_param_section_count, + const uint16_t *sliced_in_param_section_count, + const uint16_t *sliced_out_param_section_count, + const uint16_t kernel_fragment_seq_count, + const uint16_t *progctrlinit_load_section_counts, + const uint16_t *progctrlinit_connect_section_counts) +{ + int i = 0; + int j = 0; + int m = 0; + int n = 0; + int result; + uint32_t offset = 0; + char *prg_manifest_base, *terminal_manifest_base; + size_t program_size = 0; + + /* + * assert(blob != NULL); + */ + COMPILATION_ERROR_IF( + SIZE_OF_DATA_TERMINAL_MANIFEST_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_data_terminal_manifest_t))); + COMPILATION_ERROR_IF( + SIZE_OF_PROGRAM_GROUP_MANIFEST_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_program_group_manifest_t))); + COMPILATION_ERROR_IF( + SIZE_OF_PROGRAM_MANIFEST_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_program_manifest_t))); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, INFO, + "ia_css_program_group_manifest_init(): enter:\n"); + + for (i = 0; i < (int)program_count; i++) { + program_size += + ia_css_sizeof_program_manifest(program_dependencies[i], + terminal_dependencies[i]); + } + + /* A program group ID cannot be zero */ + blob->ID = 1; + blob->program_count = program_count; + blob->terminal_count = terminal_count; + blob->program_manifest_offset = sizeof(ia_css_program_group_manifest_t); + blob->terminal_manifest_offset = + (uint32_t)blob->program_manifest_offset + program_size; + + prg_manifest_base = (char *) + (((char *)blob) + blob->program_manifest_offset); + offset = blob->program_manifest_offset; + for (i = 0; i < (int)program_count; i++) { + ia_css_program_manifest_init( + (ia_css_program_manifest_t *)prg_manifest_base, + program_dependencies[i], terminal_dependencies[i]); + ia_css_program_manifest_set_parent_offset( + (ia_css_program_manifest_t *)prg_manifest_base, offset); + program_size = + ia_css_sizeof_program_manifest(program_dependencies[i], + terminal_dependencies[i]); + prg_manifest_base += program_size; + offset += (uint32_t)program_size; + } + + offset = blob->terminal_manifest_offset; + terminal_manifest_base = (char *) (((char *)blob) + offset); + for (i = 0; i < (int)terminal_count; i++) { + size_t terminal_size = 0; + ia_css_terminal_manifest_t *term_manifest = + (ia_css_terminal_manifest_t *)terminal_manifest_base; + + ia_css_terminal_manifest_set_parent_offset( + (ia_css_terminal_manifest_t *) + terminal_manifest_base, + offset); + switch (terminal_type[i]) { + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN: + result = ia_css_param_terminal_manifest_init( + (ia_css_param_terminal_manifest_t *) + term_manifest, + cached_in_param_section_count); + if (0 == result) { + terminal_size = + ia_css_param_terminal_manifest_get_size( + cached_in_param_section_count); + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_param_terminal_manifest_init failed in cached in terminal\n"); + } + break; + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT: + result = ia_css_param_terminal_manifest_init( + (ia_css_param_terminal_manifest_t *) + term_manifest, + cached_out_param_section_count); + if (0 == result) { + terminal_size = + ia_css_param_terminal_manifest_get_size( + cached_out_param_section_count); + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_param_terminal_manifest_init failed\n"); + } + break; + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: + result = ia_css_spatial_param_terminal_manifest_init( + (ia_css_spatial_param_terminal_manifest_t *) + term_manifest, + spatial_param_section_count[j]); + if (0 == result) { + terminal_size = + ia_css_spatial_param_terminal_manifest_get_size( + spatial_param_section_count[j]); + j++; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_spatial_param_terminal_manifest_init failed in spatial terminal\n"); + } + break; + case IA_CSS_TERMINAL_TYPE_PROGRAM: + result = ia_css_program_terminal_manifest_init( + (ia_css_program_terminal_manifest_t *) + term_manifest, + fragment_param_section_count, + kernel_fragment_seq_count); + if (0 == result) { + terminal_size = + ia_css_program_terminal_manifest_get_size( + fragment_param_section_count, + kernel_fragment_seq_count); + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_program_terminal_manifest_init failed in program terminal\n"); + } + break; + case IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: + result = ia_css_program_control_init_terminal_manifest_init( + (ia_css_program_control_init_terminal_manifest_t *) + term_manifest, + program_count, + progctrlinit_load_section_counts, + progctrlinit_connect_section_counts); + if (0 == result) { + terminal_size = + ia_css_program_control_init_terminal_manifest_get_size( + program_count, + NULL, + NULL); + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_program_control_init_terminal_manifest_init failed\n"); + } + break; + case IA_CSS_TERMINAL_TYPE_DATA_IN: + case IA_CSS_TERMINAL_TYPE_DATA_OUT: + terminal_size = sizeof(ia_css_data_terminal_manifest_t); + break; + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN: + result = ia_css_sliced_param_terminal_manifest_init( + (ia_css_sliced_param_terminal_manifest_t *) + term_manifest, + sliced_in_param_section_count[m]); + if (0 == result) { + terminal_size = + ia_css_sliced_param_terminal_manifest_get_size( + sliced_in_param_section_count[m]); + m++; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_param_terminal_manifest_init in sliced terminal failed\n"); + } + break; + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT: + result = ia_css_sliced_param_terminal_manifest_init( + (ia_css_sliced_param_terminal_manifest_t *) + term_manifest, + sliced_out_param_section_count[n]); + if (0 == result) { + terminal_size = + ia_css_sliced_param_terminal_manifest_get_size( + sliced_out_param_section_count[n]); + n++; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_param_terminal_manifest_init in sliced out terminal failed\n"); + } + break; + default: + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_init invalid argument\n"); + } + term_manifest->size = (uint16_t)terminal_size; + term_manifest->terminal_type = terminal_type[i]; + terminal_manifest_base += terminal_size; + offset += (uint32_t)terminal_size; + } + + /* Set the private program group manifest blob offset */ + blob->private_data_offset = offset; + offset += ceil_mul(sizeof(struct ia_css_psys_private_pg_data), + sizeof(uint64_t)); + + /* Set the RBM manifest blob offset */ + blob->rbm_manifest_offset = offset; + offset += ceil_mul(sizeof(ia_css_rbm_manifest_t), + sizeof(uint64_t)); + + assert(offset <= UINT16_MAX); + blob->size = (uint16_t)offset; +} + +int ia_css_program_group_manifest_print( + const ia_css_program_group_manifest_t *manifest, + void *fid) +{ + int retval = -1; + int i; + uint8_t program_count, terminal_count; + ia_css_kernel_bitmap_t bitmap; + struct ia_css_psys_private_pg_data *priv_data; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, INFO, + "ia_css_program_group_manifest_print(): enter:\n"); + + NOT_USED(fid); + + verifexit(manifest != NULL); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "sizeof(manifest) = %d\n", + (int)ia_css_program_group_manifest_get_size(manifest)); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "alignment(manifest) = %d\n", + (int)ia_css_program_group_manifest_get_alignment(manifest)); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "program group ID = %d\n", + (int)ia_css_program_group_manifest_get_program_group_ID( + manifest)); + + program_count = + ia_css_program_group_manifest_get_program_count(manifest); + terminal_count = + ia_css_program_group_manifest_get_terminal_count(manifest); + + bitmap = ia_css_program_group_manifest_get_kernel_bitmap(manifest); + verifexit(ia_css_kernel_bitmap_print(bitmap, fid) == 0); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "%d program manifests\n", (int)program_count); + for (i = 0; i < (int)program_count; i++) { + ia_css_program_manifest_t *program_manifest = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, i); + + retval = ia_css_program_manifest_print(program_manifest, fid); + verifjmpexit(retval == 0); + } + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "%d terminal manifests\n", (int)terminal_count); + for (i = 0; i < (int)terminal_count; i++) { + ia_css_terminal_manifest_t *terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst( + manifest, i); + + retval = ia_css_terminal_manifest_print( + terminal_manifest, fid); + verifjmpexit(retval == 0); + } + + priv_data = + (struct ia_css_psys_private_pg_data *) + ia_css_program_group_manifest_get_private_data(manifest); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "private_data_offset %d\n", manifest->private_data_offset); + + for (i = 0; i < IPU_DEVICE_GP_PSA_MUX_NUM_MUX; i++) { + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "PSA MUX id %d mux val %d\n", i, + priv_data->psa_mux_conf[i]); + + } + + for (i = 0; i < IPU_DEVICE_GP_ISA_STATIC_MUX_NUM_MUX; i++) { + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "ISA MUX id %d mux val %d\n", i, + priv_data->isa_mux_conf[i]); + + } + + for (i = 0; i < IPU_DEVICE_ACB_NUM_ACB; i++) { + + if (priv_data->acb_route[i].in_select != + NCI_ACB_PORT_INVALID) { + + assert(priv_data->acb_route[i].in_select != + NCI_ACB_PORT_INVALID && + priv_data->acb_route[i].out_select != + NCI_ACB_PORT_INVALID); + + IA_CSS_TRACE_3(PSYSAPI_STATIC, INFO, + "Route Cell id %d In %d Out %d\n", i, + priv_data->acb_route[i].in_select, + priv_data->acb_route[i].out_select); + } + + } + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "Input Buffer: buffer_base_addr 0x%x\n", + priv_data->input_buffer_info.buffer_base_addr); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "Input Buffer: bpe = %d\n", + priv_data->input_buffer_info.bpe); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "Input Buffer: buffer_width = %d\n", + priv_data->input_buffer_info.buffer_width); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "Input Buffer: buffer_height = %d\n", + priv_data->input_buffer_info.buffer_height); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "Input Buffer: num_of_buffers = %d\n", + priv_data->input_buffer_info.num_of_buffers); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "Input Buffer: dfm_port_addr = 0x%x\n", + priv_data->input_buffer_info.dfm_port_addr); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_group_manifest_print failed (%i)\n", + retval); + } + return retval; +} +#endif /* !defined(__HIVECC) */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_manifest_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_manifest_impl.h new file mode 100644 index 0000000000000..527b8cc00dd14 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_manifest_impl.h @@ -0,0 +1,415 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_IMPL_H +#define __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_IMPL_H + +#include +#include +#include +#include +#include "ia_css_psys_program_group_private.h" +#include "ia_css_terminal_manifest_types.h" +#include "ia_css_psys_private_pg_data.h" +#include /* Safer bit mask functions */ +#include "ia_css_psys_static_trace.h" +#include "ia_css_rbm_manifest_types.h" +#include +#include +#include + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +size_t ia_css_program_group_manifest_get_size( + const ia_css_program_group_manifest_t *manifest) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_size(): enter:\n"); + + if (manifest != NULL) { + size = manifest->size; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_size invalid argument\n"); + } + return size; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_program_group_ID_t +ia_css_program_group_manifest_get_program_group_ID( + const ia_css_program_group_manifest_t *manifest) +{ + ia_css_program_group_ID_t id = IA_CSS_PROGRAM_GROUP_INVALID_ID; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_program_group_ID(): enter:\n"); + + if (manifest != NULL) { + id = manifest->ID; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_program_group_ID invalid argument\n"); + } + return id; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +int ia_css_program_group_manifest_set_program_group_ID( + ia_css_program_group_manifest_t *manifest, + ia_css_program_group_ID_t id) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_set_program_group_ID(): enter:\n"); + + if (manifest != NULL) { + manifest->ID = id; + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_set_program_group_ID invalid argument\n"); + } + return retval; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +int ia_css_program_group_manifest_set_alignment( + ia_css_program_group_manifest_t *manifest, + const uint8_t alignment) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_set_alignment(): enter:\n"); + + if (manifest != NULL) { + manifest->alignment = alignment; + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_set_alignment invalid argument\n"); + } + return retval; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +uint8_t ia_css_program_group_manifest_get_alignment( + const ia_css_program_group_manifest_t *manifest) +{ + uint8_t alignment = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_alignment(): enter:\n"); + + if (manifest != NULL) { + alignment = manifest->alignment; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_alignment invalid argument\n"); + } + return alignment; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +void *ia_css_program_group_manifest_get_private_data( + const ia_css_program_group_manifest_t *manifest) +{ + void *private_data = NULL; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_private_data(%p): enter:\n", + manifest); + + verifexit(manifest != NULL); + + private_data = (void *)((const char *)manifest + + manifest->private_data_offset); +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_private_data invalid argument\n"); + } + return private_data; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_rbm_manifest_t *ia_css_program_group_manifest_get_rbm_manifest( + const ia_css_program_group_manifest_t *manifest) +{ + ia_css_rbm_manifest_t *rbm_manifest = NULL; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_rbm_manifest(%p): enter:\n", + manifest); + + verifexit(manifest != NULL); + + rbm_manifest = (ia_css_rbm_manifest_t *)((const char *)manifest + + manifest->rbm_manifest_offset); + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_rbm_manifest invalid argument\n"); + } + return rbm_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_program_manifest_t * +ia_css_program_group_manifest_get_prgrm_mnfst( + const ia_css_program_group_manifest_t *manifest, + const unsigned int program_index) +{ + ia_css_program_manifest_t *prg_manifest_base; + uint8_t *program_manifest = NULL; + uint8_t program_count; + unsigned int i; + + IA_CSS_TRACE_2(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_prgrm_mnfst(%p,%d): enter:\n", + manifest, program_index); + + program_count = + ia_css_program_group_manifest_get_program_count(manifest); + + verifexit(manifest != NULL); + verifexit(program_index < program_count); + + prg_manifest_base = (ia_css_program_manifest_t *)((char *)manifest + + manifest->program_manifest_offset); + if (program_index < program_count) { + program_manifest = (uint8_t *)prg_manifest_base; + for (i = 0; i < program_index; i++) { + program_manifest += ((ia_css_program_manifest_t *) + program_manifest)->size; + } + } + +EXIT: + if (NULL == manifest || program_index >= program_count) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_prgrm_mnfst invalid argument\n"); + } + return (ia_css_program_manifest_t *)program_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_data_terminal_manifest_t * +ia_css_program_group_manifest_get_data_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index) +{ + ia_css_data_terminal_manifest_t *data_terminal_manifest = NULL; + ia_css_terminal_manifest_t *terminal_manifest; + + IA_CSS_TRACE_2(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_data_terminal_manifest(%p, %d): enter:\n", + manifest, (int)terminal_index); + + terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst(manifest, + terminal_index); + + verifexit(ia_css_is_terminal_manifest_data_terminal(terminal_manifest)); + + data_terminal_manifest = + (ia_css_data_terminal_manifest_t *)terminal_manifest; +EXIT: + return data_terminal_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_param_terminal_manifest_t * +ia_css_program_group_manifest_get_param_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index) +{ + ia_css_param_terminal_manifest_t *param_terminal_manifest = NULL; + ia_css_terminal_manifest_t *terminal_manifest; + + IA_CSS_TRACE_2(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_param_terminal_manifest(%p, %d): enter:\n", + manifest, (int)terminal_index); + + terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst(manifest, + terminal_index); + + verifexit(ia_css_is_terminal_manifest_parameter_terminal( + terminal_manifest)); + param_terminal_manifest = + (ia_css_param_terminal_manifest_t *)terminal_manifest; +EXIT: + return param_terminal_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_spatial_param_terminal_manifest_t * +ia_css_program_group_manifest_get_spatial_param_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index) +{ + ia_css_spatial_param_terminal_manifest_t * + spatial_param_terminal_manifest = NULL; + ia_css_terminal_manifest_t *terminal_manifest; + + IA_CSS_TRACE_2(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_spatial_param_terminal_manifest(%p, %d): enter:\n", + manifest, (int)terminal_index); + + terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst(manifest, + terminal_index); + + verifexit(ia_css_is_terminal_manifest_spatial_parameter_terminal( + terminal_manifest)); + + spatial_param_terminal_manifest = + (ia_css_spatial_param_terminal_manifest_t *)terminal_manifest; +EXIT: + return spatial_param_terminal_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_sliced_param_terminal_manifest_t * +ia_css_program_group_manifest_get_sliced_param_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index) +{ + ia_css_sliced_param_terminal_manifest_t * + sliced_param_terminal_manifest = NULL; + ia_css_terminal_manifest_t *terminal_manifest; + + IA_CSS_TRACE_2(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_sliced_param_terminal_manifest(%p, %d): enter:\n", + manifest, (int)terminal_index); + + terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst(manifest, + terminal_index); + + verifexit(ia_css_is_terminal_manifest_sliced_terminal( + terminal_manifest)); + + sliced_param_terminal_manifest = + (ia_css_sliced_param_terminal_manifest_t *)terminal_manifest; +EXIT: + return sliced_param_terminal_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_program_terminal_manifest_t * +ia_css_program_group_manifest_get_program_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index) +{ + ia_css_program_terminal_manifest_t *program_terminal_manifest = NULL; + ia_css_terminal_manifest_t *terminal_manifest; + + IA_CSS_TRACE_2(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_program_terminal_manifest(%p, %d): enter:\n", + manifest, (int)terminal_index); + + terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst(manifest, + terminal_index); + + verifexit(ia_css_is_terminal_manifest_program_terminal( + terminal_manifest)); + + program_terminal_manifest = + (ia_css_program_terminal_manifest_t *)terminal_manifest; + EXIT: + return program_terminal_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_terminal_manifest_t * +ia_css_program_group_manifest_get_term_mnfst( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index) +{ + ia_css_terminal_manifest_t *terminal_manifest = NULL; + ia_css_terminal_manifest_t *terminal_manifest_base; + uint8_t terminal_count; + uint8_t i = 0; + uint32_t offset; + + IA_CSS_TRACE_2(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_term_mnfst(%p,%d): enter:\n", + manifest, (int)terminal_index); + + verifexit(manifest != NULL); + + terminal_count = + ia_css_program_group_manifest_get_terminal_count(manifest); + + verifexit(terminal_index < terminal_count); + + terminal_manifest_base = + (ia_css_terminal_manifest_t *)((char *)manifest + + manifest->terminal_manifest_offset); + terminal_manifest = terminal_manifest_base; + while (i < terminal_index) { + offset = + (uint32_t)ia_css_terminal_manifest_get_size(terminal_manifest); + terminal_manifest = (ia_css_terminal_manifest_t *) + ((char *)terminal_manifest + offset); + i++; + } +EXIT: + return terminal_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +uint8_t ia_css_program_group_manifest_get_program_count( + const ia_css_program_group_manifest_t *manifest) +{ + uint8_t program_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_program_count(): enter:\n"); + + if (manifest != NULL) { + program_count = manifest->program_count; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_program_count invalid argument\n"); + } + return program_count; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +uint8_t ia_css_program_group_manifest_get_terminal_count( + const ia_css_program_group_manifest_t *manifest) +{ + uint8_t terminal_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_terminal_count(): enter:\n"); + + if (manifest != NULL) { + terminal_count = manifest->terminal_count; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_terminal_count invalid argument\n"); + } + return terminal_count; +} + +#endif /* __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_private.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_private.h new file mode 100644 index 0000000000000..502d59def6e90 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_private.h @@ -0,0 +1,212 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_GROUP_PRIVATE_H +#define __IA_CSS_PSYS_PROGRAM_GROUP_PRIVATE_H + +#include "ia_css_psys_manifest_types.h" +#include "ia_css_terminal_manifest_types.h" +#include "ia_css_kernel_bitmap.h" +#include "ia_css_program_group_data.h" +#include "vied_nci_psys_resource_model.h" +#include "ia_css_rbm_manifest_types.h" +#include +#include +#include + +#define SIZE_OF_PROGRAM_GROUP_MANIFEST_STRUCT_IN_BITS \ + ((IA_CSS_KERNEL_BITMAP_BITS) \ + + (IA_CSS_PROGRAM_GROUP_ID_BITS) \ + + (5 * IA_CSS_UINT16_T_BITS) \ + + (5 * IA_CSS_UINT8_T_BITS) \ + + (5 * IA_CSS_UINT8_T_BITS)) + +struct ia_css_program_group_manifest_s { + /**< Indicate kernels are present in this program group */ + ia_css_kernel_bitmap_t kernel_bitmap; + /**< Referral ID to program group FW */ + ia_css_program_group_ID_t ID; + uint16_t program_manifest_offset; + uint16_t terminal_manifest_offset; + /**< Offset to private data (not part of the official API) */ + uint16_t private_data_offset; + /**< Offset to RBM manifest */ + uint16_t rbm_manifest_offset; + /**< Size of this structure */ + uint16_t size; + /**< Storage alignment requirement (in uint8_t) */ + uint8_t alignment; + /**< Total number of kernels in this program group */ + uint8_t kernel_count; + /**< Total number of program in this program group */ + uint8_t program_count; + /**< Total number of terminals on this program group */ + uint8_t terminal_count; + /**< Total number of independent subgraphs in this program group */ + uint8_t subgraph_count; + /**< Padding; esnures that rbm_manifest starts on 64bit alignment */ + uint8_t reserved[5]; +}; + +#define SIZE_OF_PROGRAM_MANIFEST_STRUCT_IN_BITS \ + (IA_CSS_KERNEL_BITMAP_BITS \ + + IA_CSS_PROGRAM_ID_BITS \ + + IA_CSS_PROGRAM_TYPE_BITS \ + + (3 * IA_CSS_UINT32_T_BITS) \ + + (VIED_NCI_RESOURCE_BITMAP_BITS * VIED_NCI_N_DEV_DFM_ID) \ + + (VIED_NCI_RESOURCE_BITMAP_BITS * VIED_NCI_N_DEV_DFM_ID) \ + + IA_CSS_UINT16_T_BITS \ + + (VIED_NCI_RESOURCE_SIZE_BITS * VIED_NCI_N_MEM_TYPE_ID) \ + + (VIED_NCI_RESOURCE_SIZE_BITS * VIED_NCI_N_DATA_MEM_TYPE_ID * 2) \ + + (VIED_NCI_RESOURCE_SIZE_BITS * VIED_NCI_N_DEV_CHN_ID * 2) \ + + (IA_CSS_UINT8_T_BITS * VIED_NCI_N_DEV_DFM_ID) \ + + (IA_CSS_PROCESS_MAX_CELLS * VIED_NCI_RESOURCE_ID_BITS) \ + + (VIED_NCI_RESOURCE_ID_BITS) \ + + (2 * IA_CSS_UINT8_T_BITS) \ + + (N_PADDING_UINT8_IN_PROGRAM_GROUP_MANFEST * IA_CSS_UINT8_T_BITS)) +/* + * This structure contains only the information required for resource + * management and construction of the process group. + * The header for the program binary load is separate + */ + +struct ia_css_program_manifest_s { + /**< Indicate which kernels lead to this program being used */ + ia_css_kernel_bitmap_t kernel_bitmap; + /**< Referral ID to a specific program FW, valid ID's != 0 */ + ia_css_program_ID_t ID; + /**< Specification of for exclusive or parallel programs */ + ia_css_program_type_t program_type; + /**< offset to add to reach parent. This is negative value.*/ + int32_t parent_offset; + uint32_t program_dependency_offset; + uint32_t terminal_dependency_offset; +#if (VIED_NCI_N_DEV_DFM_ID > 0) + /**< DFM port allocation of this program */ + vied_nci_resource_bitmap_t dfm_port_bitmap[VIED_NCI_N_DEV_DFM_ID]; + /**< Active DFM ports which need a kick + * If an empty port is configured to run in active mode, the empty + * port and the corresponding full port(s) in the stream must be kicked. + * The empty port must always be kicked aster the full port. + */ + vied_nci_resource_bitmap_t dfm_active_port_bitmap[VIED_NCI_N_DEV_DFM_ID]; +#endif + /**< Size of this structure */ + uint16_t size; + /**< (internal) Memory allocation size needs of this program */ + vied_nci_resource_size_t int_mem_size[VIED_NCI_N_MEM_TYPE_ID]; + /**< (external) Memory allocation size needs of this program */ + vied_nci_resource_size_t ext_mem_size[VIED_NCI_N_DATA_MEM_TYPE_ID]; + vied_nci_resource_size_t ext_mem_offset[VIED_NCI_N_DATA_MEM_TYPE_ID]; + /**< Device channel allocation size needs of this program */ + vied_nci_resource_size_t dev_chn_size[VIED_NCI_N_DEV_CHN_ID]; + vied_nci_resource_size_t dev_chn_offset[VIED_NCI_N_DEV_CHN_ID]; +#if (VIED_NCI_N_DEV_DFM_ID > 0) + /**< DFM ports are relocatable if value is set to 1. + * The flag is per dfm port type. + * This will not be supported for now. + */ + uint8_t is_dfm_relocatable[VIED_NCI_N_DEV_DFM_ID]; +#endif + /** Array of all the cells this program needs */ +#if IA_CSS_PROCESS_MAX_CELLS == 1 + vied_nci_resource_id_t cell_id; +#else + vied_nci_resource_id_t cells[IA_CSS_PROCESS_MAX_CELLS]; +#endif /* IA_CSS_PROCESS_MAX_CELLS == 1 */ + /**< (exclusive) indication of a cell type to be used by this program */ + vied_nci_resource_id_t cell_type_id; + + /**< Number of programs this program depends on */ + uint8_t program_dependency_count; + /**< Number of terminals this program depends on */ + uint8_t terminal_dependency_count; + /**< Padding bytes for 64bit alignment*/ +#if N_PADDING_UINT8_IN_PROGRAM_GROUP_MANFEST > 0 + /*hivecc does not allow an array of zero length*/ + uint8_t padding[N_PADDING_UINT8_IN_PROGRAM_GROUP_MANFEST]; +#endif +}; + +/* + *Calculation for manual size check for struct ia_css_data_terminal_manifest_s + */ +#define SIZE_OF_DATA_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + (SIZE_OF_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + + IA_CSS_FRAME_FORMAT_BITMAP_BITS \ + + IA_CSS_CONNECTION_BITMAP_BITS \ + + IA_CSS_KERNEL_BITMAP_BITS \ + + (4 * (IA_CSS_UINT16_T_BITS * IA_CSS_N_DATA_DIMENSION)) \ + + IA_CSS_UINT16_T_BITS \ + + IA_CSS_UINT8_T_BITS \ + + (4*IA_CSS_UINT8_T_BITS)) +/* + * Inherited data terminal class + */ +struct ia_css_data_terminal_manifest_s { + /**< Data terminal base */ + ia_css_terminal_manifest_t base; + /**< Supported (4CC / MIPI / parameter) formats */ + ia_css_frame_format_bitmap_t frame_format_bitmap; + /**< Indicate which kernels lead to this terminal being used */ + ia_css_kernel_bitmap_t kernel_bitmap; + /**< Minimum size of the frame */ + uint16_t min_size[IA_CSS_N_DATA_DIMENSION]; + /**< Maximum size of the frame */ + uint16_t max_size[IA_CSS_N_DATA_DIMENSION]; + /**< Minimum size of a fragment that the program port can accept */ + uint16_t min_fragment_size[IA_CSS_N_DATA_DIMENSION]; + /**< Maximum size of a fragment that the program port can accept */ + uint16_t max_fragment_size[IA_CSS_N_DATA_DIMENSION]; + /**< Indicate if this terminal is derived from a principal terminal */ + uint16_t terminal_dependency; + /**< Indicate what (streaming) interface types this terminal supports */ + ia_css_connection_bitmap_t connection_bitmap; + /**< Indicates if compression is supported on the data associated with + * this terminal. '1' indicates compression is supported, + * '0' otherwise + */ + uint8_t compression_support; + uint8_t reserved[4]; +}; + +/* ============ Program Control Init Terminal Manifest - START ============ */ +#define N_PADDING_UINT8_IN_PROGCTRLINIT_MANIFEST_PROGRAM_DESC_STRUCT 4 +struct ia_css_program_control_init_manifest_program_desc_s { + uint16_t load_section_count; + uint16_t connect_section_count; + uint8_t padding[N_PADDING_UINT8_IN_PROGCTRLINIT_MANIFEST_PROGRAM_DESC_STRUCT]; +}; + +#define N_PADDING_UINT8_IN_PROGCTRLINIT_TERMINAL_MANIFEST_STRUCT 2 +struct ia_css_program_control_init_terminal_manifest_s { + ia_css_terminal_manifest_t base; + /* Number of programs in program group */ + uint32_t program_count; + /* + * Points to array of ia_css_program_control_init_terminal_program_desc_t + * with size program_count. + */ + uint16_t program_desc_offset; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_PROGCTRLINIT_TERMINAL_MANIFEST_STRUCT]; +}; +/* ============ Program Control Init Terminal Manifest - END ============ */ + +extern void ia_css_program_manifest_init( + ia_css_program_manifest_t *blob, + const uint8_t program_dependency_count, + const uint8_t terminal_dependency_count); + +#endif /* __IA_CSS_PSYS_PROGRAM_GROUP_PRIVATE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_manifest.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_manifest.c new file mode 100644 index 0000000000000..188f9d80193e4 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_manifest.c @@ -0,0 +1,1240 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include +#include +/* for ia_css_kernel_bitmap_t, ia_css_kernel_bitmap_print */ +#include + +#include +#include "ia_css_psys_program_group_private.h" +#include "ia_css_psys_static_trace.h" + +#include +#include + +size_t ia_css_sizeof_program_manifest( + const uint8_t program_dependency_count, + const uint8_t terminal_dependency_count) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_sizeof_program_manifest(): enter:\n"); + + size += sizeof(ia_css_program_manifest_t); + size += program_dependency_count * sizeof(uint8_t); + size += terminal_dependency_count * sizeof(uint8_t); + size = ceil_mul(size, sizeof(uint64_t)); + + return size; +} + +bool ia_css_has_program_manifest_fixed_cell( + const ia_css_program_manifest_t *manifest) +{ + bool has_fixed_cell = false; + + vied_nci_cell_ID_t cell_id; + vied_nci_cell_type_ID_t cell_type_id; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_has_program_manifest_fixed_cell(): enter:\n"); + + verifexit(manifest != NULL); + + cell_id = ia_css_program_manifest_get_cell_ID(manifest); + cell_type_id = ia_css_program_manifest_get_cell_type_ID(manifest); + + has_fixed_cell = ((cell_id != VIED_NCI_N_CELL_ID) && + (cell_type_id == VIED_NCI_N_CELL_TYPE_ID)); + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_has_program_manifest_fixed_cell invalid argument\n"); + } + return has_fixed_cell; +} + +size_t ia_css_program_manifest_get_size( + const ia_css_program_manifest_t *manifest) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_size(): enter:\n"); + + if (manifest != NULL) { + size = manifest->size; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_size invalid argument\n"); + } + + return size; +} + +ia_css_program_ID_t ia_css_program_manifest_get_program_ID( + const ia_css_program_manifest_t *manifest) +{ + ia_css_program_ID_t program_id = IA_CSS_PROGRAM_INVALID_ID; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_program_ID(): enter:\n"); + + if (manifest != NULL) { + program_id = manifest->ID; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_program_ID invalid argument\n"); + } + return program_id; +} + +int ia_css_program_manifest_set_program_ID( + ia_css_program_manifest_t *manifest, + ia_css_program_ID_t id) +{ + int ret = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_program_ID(): enter:\n"); + + if (manifest != NULL) { + manifest->ID = id; + ret = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_program_ID failed (%i)\n", ret); + } + return ret; +} + +ia_css_program_group_manifest_t *ia_css_program_manifest_get_parent( + const ia_css_program_manifest_t *manifest) +{ + ia_css_program_group_manifest_t *parent = NULL; + char *base; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_parent(): enter:\n"); + + verifexit(manifest != NULL); + + base = (char *)((char *)manifest + manifest->parent_offset); + + parent = (ia_css_program_group_manifest_t *) (base); +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_parent invalid argument\n"); + } + return parent; +} + +int ia_css_program_manifest_set_parent_offset( + ia_css_program_manifest_t *manifest, + int32_t program_offset) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_parent_offset(): enter:\n"); + + verifexit(manifest != NULL); + + /* parent is at negative offset away from current program offset*/ + manifest->parent_offset = -program_offset; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_parent_offset failed (%i)\n", + retval); + } + return retval; +} + +ia_css_program_type_t ia_css_program_manifest_get_type( + const ia_css_program_manifest_t *manifest) +{ + ia_css_program_type_t program_type = IA_CSS_N_PROGRAM_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_type(): enter:\n"); + + if (manifest != NULL) { + program_type = manifest->program_type; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_type invalid argument\n"); + } + return program_type; +} + +int ia_css_program_manifest_set_type( + ia_css_program_manifest_t *manifest, + const ia_css_program_type_t program_type) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_type(): enter:\n"); + + if (manifest != NULL) { + manifest->program_type = program_type; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_type failed (%i)\n", retval); + } + return retval; +} + +ia_css_kernel_bitmap_t ia_css_program_manifest_get_kernel_bitmap( + const ia_css_program_manifest_t *manifest) +{ + ia_css_kernel_bitmap_t kernel_bitmap = ia_css_kernel_bitmap_clear(); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_kernel_bitmap(): enter:\n"); + + if (manifest != NULL) { + kernel_bitmap = manifest->kernel_bitmap; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_kernel_bitmap invalid argument\n"); + } + return kernel_bitmap; +} + +int ia_css_program_manifest_set_kernel_bitmap( + ia_css_program_manifest_t *manifest, + const ia_css_kernel_bitmap_t kernel_bitmap) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_kernel_bitmap(): enter:\n"); + + if (manifest != NULL) { + manifest->kernel_bitmap = kernel_bitmap; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_kernel_bitmap failed (%i)\n", + retval); + } + return retval; +} + +vied_nci_cell_ID_t ia_css_program_manifest_get_cell_ID( + const ia_css_program_manifest_t *manifest) +{ + vied_nci_cell_ID_t cell_id = VIED_NCI_N_CELL_ID; +#if IA_CSS_PROCESS_MAX_CELLS > 1 + int i = 0; +#endif /* IA_CSS_PROCESS_MAX_CELLS > 1 */ + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_cell_ID(): enter:\n"); + + verifexit(manifest != NULL); + +#if IA_CSS_PROCESS_MAX_CELLS == 1 + cell_id = manifest->cell_id; +#else + for (i = 1; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + assert(VIED_NCI_N_CELL_ID == manifest->cells[i]); +#ifdef __HIVECC +#pragma hivecc unroll +#endif + } + cell_id = manifest->cells[0]; +#endif /* IA_CSS_PROCESS_MAX_CELLS == 1 */ +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_cell_ID invalid argument\n"); + } + return cell_id; +} + +int ia_css_program_manifest_set_cell_ID( + ia_css_program_manifest_t *manifest, + const vied_nci_cell_ID_t cell_id) +{ + int retval = -1; +#if IA_CSS_PROCESS_MAX_CELLS > 1 + int i = 0; +#endif /* IA_CSS_PROCESS_MAX_CELLS > 1 */ + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_cell_ID(): enter:\n"); + if (manifest != NULL) { +#if IA_CSS_PROCESS_MAX_CELLS == 1 + manifest->cell_id = cell_id; +#else + manifest->cells[0] = cell_id; + for (i = 1; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + manifest->cells[i] = VIED_NCI_N_CELL_ID; + } +#endif /* IA_CSS_PROCESS_MAX_CELLS == 1 */ + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_cell_ID failed (%i)\n", retval); + } + return retval; +} + +vied_nci_cell_type_ID_t ia_css_program_manifest_get_cell_type_ID( + const ia_css_program_manifest_t *manifest) +{ + vied_nci_cell_type_ID_t cell_type_id = VIED_NCI_N_CELL_TYPE_ID; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_cell_type_ID(): enter:\n"); + + verifexit(manifest != NULL); + + cell_type_id = (vied_nci_cell_type_ID_t)(manifest->cell_type_id); +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_cell_type_ID invalid argument\n"); + } + return cell_type_id; +} + +int ia_css_program_manifest_set_cell_type_ID( + ia_css_program_manifest_t *manifest, + const vied_nci_cell_type_ID_t cell_type_id) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_cell_type_ID(): enter:\n"); + if (manifest != NULL) { + manifest->cell_type_id = cell_type_id; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_cell_type_ID failed (%i)\n", + retval); + } + return retval; +} + +vied_nci_resource_size_t ia_css_program_manifest_get_int_mem_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id) +{ + vied_nci_resource_size_t int_mem_size = 0; + vied_nci_cell_type_ID_t cell_type_id; + int mem_index; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_int_mem_size(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(mem_type_id < VIED_NCI_N_MEM_TYPE_ID); + + if (ia_css_has_program_manifest_fixed_cell(manifest)) { + vied_nci_cell_ID_t cell_id = + ia_css_program_manifest_get_cell_ID(manifest); + + cell_type_id = vied_nci_cell_get_type(cell_id); + } else { + cell_type_id = + ia_css_program_manifest_get_cell_type_ID(manifest); + } + + /* loop over vied_nci_cell_mem_type to verify mem_type_id for a + * specific cell_type_id + */ + for (mem_index = 0; mem_index < VIED_NCI_N_MEM_TYPE_ID; mem_index++) { + if ((int)mem_type_id == + (int)vied_nci_cell_type_get_mem_type( + cell_type_id, mem_index)) { + int_mem_size = manifest->int_mem_size[mem_index]; + } + } + +EXIT: + if (NULL == manifest || mem_type_id >= VIED_NCI_N_MEM_TYPE_ID) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_int_mem_size invalid argument\n"); + } + return int_mem_size; +} + +int ia_css_program_manifest_set_cells_bitmap( + ia_css_program_manifest_t *manifest, + const vied_nci_resource_bitmap_t bitmap) +{ + int retval = -1; + int array_index = 0; + int bit_index; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_cells_bitmap(): enter:\n"); + + if (manifest != NULL) { + for (bit_index = 0; bit_index < VIED_NCI_N_CELL_ID; bit_index++) { + if (vied_nci_is_bit_set_in_bitmap(bitmap, bit_index)) { + verifexit(array_index < IA_CSS_PROCESS_MAX_CELLS); +#if IA_CSS_PROCESS_MAX_CELLS == 1 + manifest->cell_id = (vied_nci_cell_ID_t)bit_index; +#else + manifest->cells[array_index] = (vied_nci_cell_ID_t)bit_index; +#endif /* IA_CSS_PROCESS_MAX_CELLS == 1 */ + array_index++; + } + } + for (; array_index < IA_CSS_PROCESS_MAX_CELLS; array_index++) { +#if IA_CSS_PROCESS_MAX_CELLS == 1 + manifest->cell_id = VIED_NCI_N_CELL_ID; +#else + manifest->cells[array_index] = VIED_NCI_N_CELL_ID; +#endif /* IA_CSS_PROCESS_MAX_CELLS */ + } + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_cells_bitmap invalid argument\n"); + } +EXIT: + return retval; +} + +vied_nci_resource_bitmap_t ia_css_program_manifest_get_cells_bitmap( + const ia_css_program_manifest_t *manifest) +{ + vied_nci_resource_bitmap_t bitmap = 0; +#if IA_CSS_PROCESS_MAX_CELLS > 1 + int i = 0; +#endif /* IA_CSS_PROCESS_MAX_CELLS > 1 */ + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_cells_bitmap(): enter:\n"); + + verifexit(manifest != NULL); + +#if IA_CSS_PROCESS_MAX_CELLS == 1 + bitmap = (1 << manifest->cell_id); +#else + for (i = 0; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + if (VIED_NCI_N_CELL_ID != manifest->cells[i]) { + bitmap |= (1 << manifest->cells[i]); + } +#ifdef __HIVECC +#pragma hivecc unroll +#endif + } +#endif /* IA_CSS_PROCESS_MAX_CELLS == 1 */ +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_cells_bitmap invalid argument\n"); + } + return bitmap; +} + +int ia_css_program_manifest_set_dfm_port_bitmap( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id, + const vied_nci_resource_bitmap_t bitmap) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_dfm_port_bitmap(): enter:\n"); + + verifexit(manifest != NULL); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_type_id < VIED_NCI_N_DEV_DFM_ID); + manifest->dfm_port_bitmap[dfm_type_id] = bitmap; +#else + (void)bitmap; + (void)dfm_type_id; +#endif + retval = 0; + +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_dfm_port_bitmap invalid argument\n"); + } + return retval; +} + +int ia_css_program_manifest_set_dfm_active_port_bitmap( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id, + const vied_nci_resource_bitmap_t bitmap) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_dfm_active_port_bitmap(): enter:\n"); + + verifexit(manifest != NULL); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_type_id < VIED_NCI_N_DEV_DFM_ID); + manifest->dfm_active_port_bitmap[dfm_type_id] = bitmap; +#else + (void)bitmap; + (void)dfm_type_id; +#endif + retval = 0; + +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_dfm_active_port_bitmap invalid argument\n"); + } + return retval; +} + +int ia_css_program_manifest_set_is_dfm_relocatable( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id, + const uint8_t is_relocatable) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_is_dfm_relocatable(): enter:\n"); + + verifexit(manifest != NULL); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_type_id < VIED_NCI_N_DEV_DFM_ID); + manifest->is_dfm_relocatable[dfm_type_id] = is_relocatable; +#else + (void)is_relocatable; + (void)dfm_type_id; +#endif + retval = 0; + + EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_is_dfm_relocatable invalid argument\n"); + } + + return retval; +} + +uint8_t ia_css_program_manifest_get_is_dfm_relocatable( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id) +{ + uint8_t ret = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_is_dfm_relocatable(): enter:\n"); + + verifexit(manifest != NULL); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_type_id < VIED_NCI_N_DEV_DFM_ID); + ret = manifest->is_dfm_relocatable[dfm_type_id]; +#else + ret = 0; + (void)dfm_type_id; +#endif +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_is_dfm_relocatable invalid argument\n"); + } + return ret; +} + +vied_nci_resource_bitmap_t ia_css_program_manifest_get_dfm_port_bitmap( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id) +{ + vied_nci_resource_bitmap_t bitmap = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_dfm_port_bitmap(): enter:\n"); + + verifexit(manifest != NULL); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_type_id < VIED_NCI_N_DEV_DFM_ID); + bitmap = manifest->dfm_port_bitmap[dfm_type_id]; +#else + bitmap = 0; + (void)dfm_type_id; +#endif +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_dfm_port_bitmap invalid argument\n"); + } + return bitmap; +} + +vied_nci_resource_bitmap_t ia_css_program_manifest_get_dfm_active_port_bitmap( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id) +{ + vied_nci_resource_bitmap_t bitmap = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_dfm_active_port_bitmap(): enter:\n"); + + verifexit(manifest != NULL); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_type_id < VIED_NCI_N_DEV_DFM_ID); + bitmap = manifest->dfm_active_port_bitmap[dfm_type_id]; +#else + bitmap = 0; + (void)dfm_type_id; +#endif +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_dfm_active_port_bitmap invalid argument\n"); + } + return bitmap; +} + +int ia_css_program_manifest_set_int_mem_size( + ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t int_mem_size) +{ + int retval = -1; + vied_nci_cell_type_ID_t cell_type_id; + int mem_index; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_int_mem_size(): enter:\n"); + + if (ia_css_has_program_manifest_fixed_cell(manifest)) { + vied_nci_cell_ID_t cell_id = + ia_css_program_manifest_get_cell_ID(manifest); + + cell_type_id = vied_nci_cell_get_type(cell_id); + } else { + cell_type_id = + ia_css_program_manifest_get_cell_type_ID(manifest); + } + + if (manifest != NULL && mem_type_id < VIED_NCI_N_MEM_TYPE_ID) { + /* loop over vied_nci_cell_mem_type to verify mem_type_id for + * a specific cell_type_id + */ + for (mem_index = 0; mem_index < VIED_NCI_N_MEM_TYPE_ID; + mem_index++) { + if ((int)mem_type_id == + (int)vied_nci_cell_type_get_mem_type( + cell_type_id, mem_index)) { + manifest->int_mem_size[mem_index] = + int_mem_size; + retval = 0; + } + } + } + if (retval != 0) { + IA_CSS_TRACE_2(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_int_mem_size cell_type_id %d has no mem_type_id %d\n", + (int)cell_type_id, (int)mem_type_id); + } + + return retval; +} + +vied_nci_resource_size_t ia_css_program_manifest_get_ext_mem_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id) +{ + vied_nci_resource_size_t ext_mem_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_ext_mem_size(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(mem_type_id < VIED_NCI_N_DATA_MEM_TYPE_ID); + + ext_mem_size = manifest->ext_mem_size[mem_type_id]; +EXIT: + if (NULL == manifest || mem_type_id >= VIED_NCI_N_DATA_MEM_TYPE_ID) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_ext_mem_size invalid argument\n"); + } + return ext_mem_size; +} + +vied_nci_resource_size_t ia_css_program_manifest_get_ext_mem_offset( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id) +{ + vied_nci_resource_size_t ext_mem_offset = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_ext_mem_offset(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(mem_type_id < VIED_NCI_N_DATA_MEM_TYPE_ID); + + ext_mem_offset = manifest->ext_mem_offset[mem_type_id]; +EXIT: + if (NULL == manifest || mem_type_id >= VIED_NCI_N_DATA_MEM_TYPE_ID) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_ext_mem_offset invalid argument\n"); + } + return ext_mem_offset; +} + +int ia_css_program_manifest_set_ext_mem_size( + ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t ext_mem_size) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_ext_mem_size(): enter:\n"); + + if (manifest != NULL && mem_type_id < VIED_NCI_N_DATA_MEM_TYPE_ID) { + manifest->ext_mem_size[mem_type_id] = ext_mem_size; + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_ext_mem_size invalid argument\n"); + } + + return retval; +} + +int ia_css_program_manifest_set_ext_mem_offset( + ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t ext_mem_offset) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_ext_mem_offset(): enter:\n"); + + if (manifest != NULL && mem_type_id < VIED_NCI_N_DATA_MEM_TYPE_ID) { + manifest->ext_mem_offset[mem_type_id] = ext_mem_offset; + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_ext_mem_offset invalid argument\n"); + } + + return retval; +} + +vied_nci_resource_size_t ia_css_program_manifest_get_dev_chn_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id) +{ + vied_nci_resource_size_t dev_chn_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_dev_chn_size(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(dev_chn_id < VIED_NCI_N_DEV_CHN_ID); + + dev_chn_size = manifest->dev_chn_size[dev_chn_id]; +EXIT: + if (NULL == manifest || dev_chn_id >= VIED_NCI_N_DEV_CHN_ID) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_dev_chn_size invalid argument\n"); + } + return dev_chn_size; +} + +vied_nci_resource_size_t ia_css_program_manifest_get_dev_chn_offset( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id) +{ + vied_nci_resource_size_t dev_chn_offset = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_dev_chn_offset(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(dev_chn_id < VIED_NCI_N_DEV_CHN_ID); + + dev_chn_offset = manifest->dev_chn_offset[dev_chn_id]; +EXIT: + if (NULL == manifest || dev_chn_id >= VIED_NCI_N_DEV_CHN_ID) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_dev_chn_offset invalid argument\n"); + } + return dev_chn_offset; +} + +int ia_css_program_manifest_set_dev_chn_size( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id, + const vied_nci_resource_size_t dev_chn_size) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_dev_chn_size(): enter:\n"); + + if (manifest != NULL && dev_chn_id < VIED_NCI_N_DEV_CHN_ID) { + manifest->dev_chn_size[dev_chn_id] = dev_chn_size; + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_dev_chn_size invalid argument\n"); + } + + return retval; +} + +int ia_css_program_manifest_set_dev_chn_offset( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id, + const vied_nci_resource_size_t dev_chn_offset) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_dev_chn_offset(): enter:\n"); + + if (manifest != NULL && dev_chn_id < VIED_NCI_N_DEV_CHN_ID) { + manifest->dev_chn_offset[dev_chn_id] = dev_chn_offset; + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_dev_chn_offset invalid argument\n"); + } + + return retval; +} + +uint8_t ia_css_program_manifest_get_program_dependency_count( + const ia_css_program_manifest_t *manifest) +{ + uint8_t program_dependency_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_program_dependency_count(): enter:\n"); + + if (manifest != NULL) { + program_dependency_count = manifest->program_dependency_count; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_program_dependency_count invalid argument\n"); + } + return program_dependency_count; +} + +uint8_t ia_css_program_manifest_get_program_dependency( + const ia_css_program_manifest_t *manifest, + const unsigned int index) +{ + uint8_t program_dependency = IA_CSS_PROGRAM_INVALID_DEPENDENCY; + uint8_t *program_dep_ptr; + uint8_t program_dependency_count; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_program_dependency(): enter:\n"); + + program_dependency_count = + ia_css_program_manifest_get_program_dependency_count(manifest); + + if (index < program_dependency_count) { + program_dep_ptr = + (uint8_t *)((uint8_t *)manifest + + manifest->program_dependency_offset + + index * sizeof(uint8_t)); + program_dependency = *program_dep_ptr; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_program_dependency invalid argument\n"); + } + return program_dependency; +} + +int ia_css_program_manifest_set_program_dependency( + ia_css_program_manifest_t *manifest, + const uint8_t program_dependency, + const unsigned int index) +{ + int retval = -1; + uint8_t *program_dep_ptr; + uint8_t program_dependency_count; + uint8_t program_count; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_program_dependency(): enter:\n"); + + program_dependency_count = + ia_css_program_manifest_get_program_dependency_count(manifest); + program_count = + ia_css_program_group_manifest_get_program_count( + ia_css_program_manifest_get_parent(manifest)); + + if ((index < program_dependency_count) && + (program_dependency < program_count)) { + program_dep_ptr = (uint8_t *)((uint8_t *)manifest + + manifest->program_dependency_offset + + index*sizeof(uint8_t)); + *program_dep_ptr = program_dependency; + retval = 0; + } + + if (retval != 0) { + IA_CSS_TRACE_3(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_program_dependency(m, %d, %d) failed (%i)\n", + program_dependency, index, retval); + } + return retval; +} + +uint8_t ia_css_program_manifest_get_terminal_dependency_count( + const ia_css_program_manifest_t *manifest) +{ + uint8_t terminal_dependency_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_terminal_dependency_count(): enter:\n"); + + if (manifest != NULL) { + terminal_dependency_count = manifest->terminal_dependency_count; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_terminal_dependency_count invalid argument\n"); + } + return terminal_dependency_count; +} + +uint8_t ia_css_program_manifest_get_terminal_dependency( + const ia_css_program_manifest_t *manifest, + const unsigned int index) +{ + uint8_t terminal_dependency = IA_CSS_PROGRAM_INVALID_DEPENDENCY; + uint8_t *terminal_dep_ptr; + uint8_t terminal_dependency_count = + ia_css_program_manifest_get_terminal_dependency_count(manifest); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_terminal_dependency(): enter:\n"); + + if (index < terminal_dependency_count) { + terminal_dep_ptr = (uint8_t *)((uint8_t *)manifest + + manifest->terminal_dependency_offset + index); + terminal_dependency = *terminal_dep_ptr; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_terminal_dependency invalid argument\n"); + } + return terminal_dependency; +} + +int ia_css_program_manifest_set_terminal_dependency( + ia_css_program_manifest_t *manifest, + const uint8_t terminal_dependency, + const unsigned int index) +{ + int retval = -1; + uint8_t *terminal_dep_ptr; + uint8_t terminal_dependency_count = + ia_css_program_manifest_get_terminal_dependency_count(manifest); + uint8_t terminal_count = + ia_css_program_group_manifest_get_terminal_count( + ia_css_program_manifest_get_parent(manifest)); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_terminal_dependency(): enter:\n"); + + if ((index < terminal_dependency_count) && + (terminal_dependency < terminal_count)) { + terminal_dep_ptr = (uint8_t *)((uint8_t *)manifest + + manifest->terminal_dependency_offset + index); + *terminal_dep_ptr = terminal_dependency; + retval = 0; + } + + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_terminal_dependency failed (%i)\n", + retval); + } + return retval; +} + +bool ia_css_is_program_manifest_subnode_program_type( + const ia_css_program_manifest_t *manifest) +{ + ia_css_program_type_t program_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_program_manifest_subnode_program_type(): enter:\n"); + + program_type = ia_css_program_manifest_get_type(manifest); +/* The error return is the limit value, so no need to check on the manifest + * pointer + */ + return (program_type == IA_CSS_PROGRAM_TYPE_PARALLEL_SUB) || + (program_type == IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB) || + (program_type == IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB); +} + +bool ia_css_is_program_manifest_supernode_program_type( + const ia_css_program_manifest_t *manifest) +{ + ia_css_program_type_t program_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_program_manifest_supernode_program_type(): enter:\n"); + + program_type = ia_css_program_manifest_get_type(manifest); + +/* The error return is the limit value, so no need to check on the manifest + * pointer + */ + return (program_type == IA_CSS_PROGRAM_TYPE_PARALLEL_SUPER) || + (program_type == IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER) || + (program_type == IA_CSS_PROGRAM_TYPE_VIRTUAL_SUPER); +} + +bool ia_css_is_program_manifest_singular_program_type( + const ia_css_program_manifest_t *manifest) +{ + ia_css_program_type_t program_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_program_manifest_singular_program_type(): enter:\n"); + + program_type = ia_css_program_manifest_get_type(manifest); + +/* The error return is the limit value, so no need to check on the manifest + * pointer + */ + return (program_type == IA_CSS_PROGRAM_TYPE_SINGULAR); +} + +void ia_css_program_manifest_init( + ia_css_program_manifest_t *blob, + const uint8_t program_dependency_count, + const uint8_t terminal_dependency_count) +{ + IA_CSS_TRACE_0(PSYSAPI_STATIC, INFO, + "ia_css_program_manifest_init(): enter:\n"); + + /*TODO: add assert*/ + if (!blob) + return; + + blob->ID = 1; + blob->program_dependency_count = program_dependency_count; + blob->terminal_dependency_count = terminal_dependency_count; + blob->program_dependency_offset = sizeof(ia_css_program_manifest_t); + blob->terminal_dependency_offset = blob->program_dependency_offset + + sizeof(uint8_t) * program_dependency_count; + blob->size = + (uint16_t)ia_css_sizeof_program_manifest( + program_dependency_count, + terminal_dependency_count); +} + +/* We need to refactor those files in order to build in the firmware only + what is needed, switches are put current to workaround compilation problems + in the firmware (for example lack of uint64_t support) + supported in the firmware + */ +#if !defined(__HIVECC) + +#if defined(_MSC_VER) +/* WA for a visual studio compiler bug, refer to + developercommunity.visualstudio.com/content/problem/209359/ice-with-fpfast-in-156-and-msvc-daily-1413263051-p.html +*/ +#pragma optimize("", off) +#endif + +int ia_css_program_manifest_print( + const ia_css_program_manifest_t *manifest, + void *fid) +{ + int retval = -1; + int i, mem_index, dev_chn_index; + + vied_nci_cell_type_ID_t cell_type_id; + uint8_t program_dependency_count; + uint8_t terminal_dependency_count; + ia_css_kernel_bitmap_t bitmap; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, INFO, + "ia_css_program_manifest_print(): enter:\n"); + + verifexit(manifest != NULL); + NOT_USED(fid); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "sizeof(manifest) = %d\n", + (int)ia_css_program_manifest_get_size(manifest)); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "program ID = %d\n", + (int)ia_css_program_manifest_get_program_ID(manifest)); + + bitmap = ia_css_program_manifest_get_kernel_bitmap(manifest); + verifexit(ia_css_kernel_bitmap_print(bitmap, fid) == 0); + + if (ia_css_has_program_manifest_fixed_cell(manifest)) { + vied_nci_cell_ID_t cell_id = + ia_css_program_manifest_get_cell_ID(manifest); + + cell_type_id = vied_nci_cell_get_type(cell_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "cell(program) = %d\n", + (int)cell_id); + } else { + cell_type_id = + ia_css_program_manifest_get_cell_type_ID(manifest); + } + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "cell type(program) = %d\n", + (int)cell_type_id); + + for (mem_index = 0; mem_index < (int)VIED_NCI_N_MEM_TYPE_ID; + mem_index++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(internal mem) type = %d\n", + (int)vied_nci_cell_type_get_mem_type(cell_type_id, mem_index)); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(internal mem) size = %d\n", + manifest->int_mem_size[mem_index]); + } + + for (mem_index = 0; mem_index < (int)VIED_NCI_N_DATA_MEM_TYPE_ID; + mem_index++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(external mem) type = %d\n", + (int)(vied_nci_mem_type_ID_t)mem_index); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(external mem) size = %d\n", + manifest->ext_mem_size[mem_index]); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(external mem) offset = %d\n", + manifest->ext_mem_offset[mem_index]); + } + + for (dev_chn_index = 0; dev_chn_index < (int)VIED_NCI_N_DEV_CHN_ID; + dev_chn_index++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(device channel) type = %d\n", + (int)dev_chn_index); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(device channel) size = %d\n", + manifest->dev_chn_size[dev_chn_index]); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(device channel) offset = %d\n", + manifest->dev_chn_offset[dev_chn_index]); + } +#if HAS_DFM + for (dev_chn_index = 0; dev_chn_index < (int)VIED_NCI_N_DEV_DFM_ID; + dev_chn_index++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(dfm port) type = %d\n", + (int)dev_chn_index); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(dfm port) port_bitmap = %d\n", + manifest->dfm_port_bitmap[dev_chn_index]); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(dfm port) active_port_bitmap = %d\n", + manifest->dfm_active_port_bitmap[dev_chn_index]); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(dfm port) is_dfm_relocatable = %d\n", + manifest->is_dfm_relocatable[dev_chn_index]); + } +#endif + +#if IA_CSS_PROCESS_MAX_CELLS == 1 + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(cells) bitmap = %d\n", + manifest->cell_id); +#else + for (i = 0; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(cells) bitmap = %d\n", + manifest->cells[i]); + } +#endif /* IA_CSS_PROCESS_MAX_CELLS == 1 */ + program_dependency_count = + ia_css_program_manifest_get_program_dependency_count(manifest); + if (program_dependency_count == 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "program_dependencies[%d] {};\n", + program_dependency_count); + } else { + uint8_t prog_dep; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "program_dependencies[%d] {\n", + program_dependency_count); + for (i = 0; i < (int)program_dependency_count - 1; i++) { + prog_dep = + ia_css_program_manifest_get_program_dependency( + manifest, i); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t %4d,\n", prog_dep); + } + prog_dep = + ia_css_program_manifest_get_program_dependency(manifest, i); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "\t %4d }\n", prog_dep); + (void)prog_dep; + } + + terminal_dependency_count = + ia_css_program_manifest_get_terminal_dependency_count(manifest); + if (terminal_dependency_count == 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "terminal_dependencies[%d] {};\n", + terminal_dependency_count); + } else { + uint8_t term_dep; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "terminal_dependencies[%d] {\n", + terminal_dependency_count); + for (i = 0; i < (int)terminal_dependency_count - 1; i++) { + term_dep = + ia_css_program_manifest_get_terminal_dependency( + manifest, i); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t %4d,\n", term_dep); + } + term_dep = + ia_css_program_manifest_get_terminal_dependency(manifest, i); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "\t %4d }\n", term_dep); + (void)term_dep; + } + (void)cell_type_id; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_print failed (%i)\n", retval); + } + return retval; +} + +#if defined(_MSC_VER) +/* WA for a visual studio compiler bug */ +#pragma optimize("", off) +#endif + +#endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_terminal_manifest.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_terminal_manifest.c new file mode 100644 index 0000000000000..c890b8a71f2c0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_terminal_manifest.c @@ -0,0 +1,1137 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include + +/* Data object types on the terminals */ +#include +/* for ia_css_kernel_bitmap_t, ia_css_kernel_bitmap_clear, ia_css_... */ +#include + +#include "ia_css_psys_program_group_private.h" +#include "ia_css_terminal_manifest.h" +#include "ia_css_terminal_manifest_types.h" + +#include +#include +#include +#include "ia_css_psys_static_trace.h" + +/* We need to refactor those files in order to build in the firmware only + what is needed, switches are put current to workaround compilation problems + in the firmware (for example lack of uint64_t support) + supported in the firmware + */ +#if !defined(__HIVECC) +static const char *terminal_type_strings[IA_CSS_N_TERMINAL_TYPES + 1] = { + "IA_CSS_TERMINAL_TYPE_DATA_IN", + "IA_CSS_TERMINAL_TYPE_DATA_OUT", + "IA_CSS_TERMINAL_TYPE_PARAM_STREAM", + /**< Type 1-5 parameter input */ + "IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN", + /**< Type 1-5 parameter output */ + "IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT", + /**< Represent the new type of terminal for + * the "spatial dependent parameters", when params go in + */ + "IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN", + /**< Represent the new type of terminal for + * the "spatial dependent parameters", when params go out + */ + "IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT", + /**< Represent the new type of terminal for + * the explicit slicing, when params go in + */ + "IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN", + /**< Represent the new type of terminal for + * the explicit slicing, when params go out + */ + "IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT", + /**< State (private data) input */ + "IA_CSS_TERMINAL_TYPE_STATE_IN", + /**< State (private data) output */ + "IA_CSS_TERMINAL_TYPE_STATE_OUT", + "IA_CSS_TERMINAL_TYPE_PROGRAM", + "IA_CSS_TERMINAL_TYPR_PROGRAM_CONTROL_INIT", + "UNDEFINED_TERMINAL_TYPE"}; + +#endif + +bool ia_css_is_terminal_manifest_spatial_parameter_terminal( + const ia_css_terminal_manifest_t *manifest) +{ + ia_css_terminal_type_t terminal_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_terminal_manifest_parameter_terminal(): enter:\n"); + + terminal_type = ia_css_terminal_manifest_get_type(manifest); + + return ((terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN) || + (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT)); +} + +bool ia_css_is_terminal_manifest_program_terminal( + const ia_css_terminal_manifest_t *manifest) +{ + ia_css_terminal_type_t terminal_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_terminal_manifest_parameter_terminal(): enter:\n"); + + terminal_type = ia_css_terminal_manifest_get_type(manifest); + + return (terminal_type == IA_CSS_TERMINAL_TYPE_PROGRAM); +} + +bool ia_css_is_terminal_manifest_program_control_init_terminal( + const ia_css_terminal_manifest_t *manifest) +{ + ia_css_terminal_type_t terminal_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_terminal_manifest_program_control_init_terminal(): enter:\n"); + + terminal_type = ia_css_terminal_manifest_get_type(manifest); + + return (terminal_type == IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT); +} + + +bool ia_css_is_terminal_manifest_parameter_terminal( + const ia_css_terminal_manifest_t *manifest) +{ + /* will return an error value on error */ + ia_css_terminal_type_t terminal_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_terminal_manifest_parameter_terminal(): enter:\n"); + + terminal_type = ia_css_terminal_manifest_get_type(manifest); + + return (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT); +} + +bool ia_css_is_terminal_manifest_data_terminal( + const ia_css_terminal_manifest_t *manifest) +{ + /* will return an error value on error */ + ia_css_terminal_type_t terminal_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_terminal_manifest_data_terminal(): enter:\n"); + + terminal_type = ia_css_terminal_manifest_get_type(manifest); + + return ((terminal_type == IA_CSS_TERMINAL_TYPE_DATA_IN) || + (terminal_type == IA_CSS_TERMINAL_TYPE_DATA_OUT)); +} + +bool ia_css_is_terminal_manifest_sliced_terminal( + const ia_css_terminal_manifest_t *manifest) +{ + ia_css_terminal_type_t terminal_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_terminal_manifest_sliced_terminal(): enter:\n"); + + terminal_type = ia_css_terminal_manifest_get_type(manifest); + + return ((terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN) || + (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT)); +} + +size_t ia_css_terminal_manifest_get_size( + const ia_css_terminal_manifest_t *manifest) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_terminal_manifest_get_size(): enter:\n"); + + if (manifest != NULL) { + size = manifest->size; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_terminal_manifest_get_size: invalid argument\n"); + } + return size; +} + +ia_css_terminal_type_t ia_css_terminal_manifest_get_type( + const ia_css_terminal_manifest_t *manifest) +{ + ia_css_terminal_type_t terminal_type = IA_CSS_N_TERMINAL_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_terminal_manifest_get_type(): enter:\n"); + + if (manifest != NULL) { + terminal_type = manifest->terminal_type; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_terminal_manifest_get_type: invalid argument\n"); + } + return terminal_type; +} + +int ia_css_terminal_manifest_set_type( + ia_css_terminal_manifest_t *manifest, + const ia_css_terminal_type_t terminal_type) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_terminal_manifest_set_type(): enter:\n"); + + if (manifest != NULL) { + manifest->terminal_type = terminal_type; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_terminal_manifest_set_type failed (%i)\n", + retval); + } + return retval; +} + +int ia_css_terminal_manifest_set_ID( + ia_css_terminal_manifest_t *manifest, + const ia_css_terminal_ID_t ID) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_terminal_manifest_set_ID(): enter:\n"); + + if (manifest != NULL) { + manifest->ID = ID; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_terminal_manifest_set_ID failed (%i)\n", + retval); + } + return retval; +} + +ia_css_terminal_ID_t ia_css_terminal_manifest_get_ID( + const ia_css_terminal_manifest_t *manifest) +{ + ia_css_terminal_ID_t retval; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_terminal_manifest_get_ID(): enter:\n"); + + if (manifest != NULL) { + retval = manifest->ID; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_terminal_manifest_get_ID failed\n"); + retval = IA_CSS_TERMINAL_INVALID_ID; + } + return retval; +} + +ia_css_program_group_manifest_t *ia_css_terminal_manifest_get_parent( + const ia_css_terminal_manifest_t *manifest) +{ + ia_css_program_group_manifest_t *parent = NULL; + char *base; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_terminal_manifest_get_parent(): enter:\n"); + + verifexit(manifest != NULL); + + base = (char *)((char *)manifest + manifest->parent_offset); + + parent = (ia_css_program_group_manifest_t *)(base); +EXIT: + return parent; +} + +int ia_css_terminal_manifest_set_parent_offset( + ia_css_terminal_manifest_t *manifest, + int32_t terminal_offset) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_terminal_manifest_set_parent_offset(): enter:\n"); + + verifexit(manifest != NULL); + + /* parent is at negative offset away from current terminal offset*/ + manifest->parent_offset = -terminal_offset; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_terminal_manifest_set_parent_offset failed (%i)\n", + retval); + } + return retval; +} + +ia_css_frame_format_bitmap_t +ia_css_data_terminal_manifest_get_frame_format_bitmap( + const ia_css_data_terminal_manifest_t *manifest) +{ + ia_css_frame_format_bitmap_t bitmap = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_frame_format_bitmap(): enter:\n"); + + if (manifest != NULL) { + bitmap = manifest->frame_format_bitmap; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_data_terminal_manifest_get_frame_format_bitmap invalid argument\n"); + } + return bitmap; +} + +int ia_css_data_terminal_manifest_set_frame_format_bitmap( + ia_css_data_terminal_manifest_t *manifest, + ia_css_frame_format_bitmap_t bitmap) +{ + int ret = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_frame_format_bitmap(): enter:\n"); + + if (manifest != NULL) { + manifest->frame_format_bitmap = bitmap; + ret = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_frame_format_bitmap failed (%i)\n", + ret); + } + + return ret; +} + +bool ia_css_data_terminal_manifest_can_support_compression( + const ia_css_data_terminal_manifest_t *manifest) +{ + bool compression_support = false; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_compression_support(): enter:\n"); + + if (manifest != NULL) { + /* compression_support is used boolean encoded in uint8_t. + * So we only need to check + * if this is non-zero + */ + compression_support = (manifest->compression_support != 0); + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_can_support_compression invalid argument\n"); + } + + return compression_support; +} + +int ia_css_data_terminal_manifest_set_compression_support( + ia_css_data_terminal_manifest_t *manifest, + bool compression_support) +{ + int ret = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_compression_support(): enter:\n"); + + if (manifest != NULL) { + manifest->compression_support = + (compression_support == true) ? 1 : 0; + ret = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_compression_support failed (%i)\n", + ret); + } + + return ret; +} + +ia_css_connection_bitmap_t ia_css_data_terminal_manifest_get_connection_bitmap( + const ia_css_data_terminal_manifest_t *manifest) +{ + ia_css_connection_bitmap_t connection_bitmap = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_connection_bitmap(): enter:\n"); + + if (manifest != NULL) { + connection_bitmap = manifest->connection_bitmap; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_data_terminal_manifest_get_connection_bitmap invalid argument\n"); + } + return connection_bitmap; +} + +int ia_css_data_terminal_manifest_set_connection_bitmap( + ia_css_data_terminal_manifest_t *manifest, ia_css_connection_bitmap_t bitmap) +{ + int ret = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_connection_bitmap(): enter:\n"); + + if (manifest != NULL) { + assert(bitmap != 0); /* zero means there is no connection, this is invalid. */ + assert((bitmap >> IA_CSS_N_CONNECTION_TYPES) == 0); + + manifest->connection_bitmap = bitmap; + ret = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_data_terminal_manifest_set_connection_bitmap invalid argument\n"); + } + return ret; +} + +/* We need to refactor those files in order to build in the firmware only + what is needed, switches are put current to workaround compilation problems + in the firmware (for example lack of uint64_t support) + supported in the firmware + */ +#if !defined(__HIVECC) +ia_css_kernel_bitmap_t ia_css_data_terminal_manifest_get_kernel_bitmap( + const ia_css_data_terminal_manifest_t *manifest) +{ + ia_css_kernel_bitmap_t kernel_bitmap = ia_css_kernel_bitmap_clear(); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_kernel_bitmap(): enter:\n"); + + if (manifest != NULL) { + kernel_bitmap = manifest->kernel_bitmap; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_data_terminal_manifest_get_kernel_bitmap: invalid argument\n"); + } + return kernel_bitmap; +} + +int ia_css_data_terminal_manifest_set_kernel_bitmap( + ia_css_data_terminal_manifest_t *manifest, + const ia_css_kernel_bitmap_t kernel_bitmap) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_kernel_bitmap(): enter:\n"); + + if (manifest != NULL) { + manifest->kernel_bitmap = kernel_bitmap; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_kernel_bitmap: failed (%i)\n", + retval); + } + + return retval; +} + +int ia_css_data_terminal_manifest_set_kernel_bitmap_unique( + ia_css_data_terminal_manifest_t *manifest, + const unsigned int index) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_kernel_bitmap_unique(): enter:\n"); + + if (manifest != NULL) { + ia_css_kernel_bitmap_t kernel_bitmap = + ia_css_kernel_bitmap_clear(); + + kernel_bitmap = ia_css_kernel_bitmap_set(kernel_bitmap, index); + verifexit(!ia_css_is_kernel_bitmap_empty(kernel_bitmap)); + verifexit(ia_css_data_terminal_manifest_set_kernel_bitmap( + manifest, kernel_bitmap) == 0); + retval = 0; + } + +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_kernel_bitmap_unique failed (%i)\n", + retval); + } + return retval; +} +#endif + +int ia_css_data_terminal_manifest_set_min_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t min_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_min_size(): enter:\n"); + + verifexit(manifest != NULL); + + manifest->min_size[IA_CSS_COL_DIMENSION] = + min_size[IA_CSS_COL_DIMENSION]; + manifest->min_size[IA_CSS_ROW_DIMENSION] = + min_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_min_size: invalid argument\n"); + } + return retval; +} + +int ia_css_data_terminal_manifest_set_max_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t max_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_max_size(): enter:\n"); + + verifexit(manifest != NULL); + + manifest->max_size[IA_CSS_COL_DIMENSION] = + max_size[IA_CSS_COL_DIMENSION]; + manifest->max_size[IA_CSS_ROW_DIMENSION] = + max_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_max_size: invalid argument\n"); + } + return retval; +} + +int ia_css_data_terminal_manifest_get_min_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t min_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_min_size(): enter:\n"); + + verifexit(manifest != NULL); + + min_size[IA_CSS_COL_DIMENSION] = + manifest->min_size[IA_CSS_COL_DIMENSION]; + min_size[IA_CSS_ROW_DIMENSION] = + manifest->min_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_get_min_size: invalid argument\n"); + } + return retval; +} + +int ia_css_data_terminal_manifest_get_max_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t max_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_max_size(): enter:\n"); + + verifexit(manifest != NULL); + + max_size[IA_CSS_COL_DIMENSION] = + manifest->max_size[IA_CSS_COL_DIMENSION]; + max_size[IA_CSS_ROW_DIMENSION] = + manifest->max_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_get_max_size: invalid argument\n"); + } + return retval; +} + +int ia_css_data_terminal_manifest_set_min_fragment_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t min_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_min_fragment_size(): enter:\n"); + + verifexit(manifest != NULL); + + manifest->min_fragment_size[IA_CSS_COL_DIMENSION] = + min_size[IA_CSS_COL_DIMENSION]; + manifest->min_fragment_size[IA_CSS_ROW_DIMENSION] = + min_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_min_fragment_size invalid argument\n"); + } + return retval; +} + +int ia_css_data_terminal_manifest_set_max_fragment_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t max_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_max_fragment_size(): enter:\n"); + + verifexit(manifest != NULL); + + manifest->max_fragment_size[IA_CSS_COL_DIMENSION] = + max_size[IA_CSS_COL_DIMENSION]; + manifest->max_fragment_size[IA_CSS_ROW_DIMENSION] = + max_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_max_fragment_size invalid argument\n"); + } + return retval; +} + +int ia_css_data_terminal_manifest_get_min_fragment_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t min_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_min_fragment_size(): enter:\n"); + + verifexit(manifest != NULL); + + min_size[IA_CSS_COL_DIMENSION] = + manifest->min_fragment_size[IA_CSS_COL_DIMENSION]; + min_size[IA_CSS_ROW_DIMENSION] = + manifest->min_fragment_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_get_min_fragment_size invalid argument\n"); + } + return retval; +} + +int ia_css_data_terminal_manifest_get_max_fragment_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t max_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_max_fragment_size(): enter:\n"); + + verifexit(manifest != NULL); + + max_size[IA_CSS_COL_DIMENSION] = + manifest->max_fragment_size[IA_CSS_COL_DIMENSION]; + max_size[IA_CSS_ROW_DIMENSION] = + manifest->max_fragment_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_get_max_fragment_size invalid argument\n"); + } + return retval; +} + +/* We need to refactor those files in order to build in the firmware only + what is needed, switches are put current to workaround compilation problems + in the firmware (for example lack of uint64_t support) + supported in the firmware + */ +#if !defined(__HIVECC) + +#define PRINT_DIMENSION(name, var) IA_CSS_TRACE_3(PSYSAPI_STATIC, \ + INFO, "%s:\t%d %d\n", \ + (name), \ + (var)[IA_CSS_COL_DIMENSION], \ + (var)[IA_CSS_ROW_DIMENSION]) + +int ia_css_terminal_manifest_print( + const ia_css_terminal_manifest_t *manifest, + void *fid) +{ + int retval = -1; + ia_css_terminal_type_t terminal_type = + ia_css_terminal_manifest_get_type(manifest); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, INFO, + "ia_css_terminal_manifest_print(): enter:\n"); + + verifexit(manifest != NULL); + NOT_USED(fid); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "sizeof(manifest) = %d\n", + (int)ia_css_terminal_manifest_get_size(manifest)); + + PRINT("typeof(manifest) = %s\n", terminal_type_strings[terminal_type]); + + if (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT) { + ia_css_param_terminal_manifest_t *pterminal_manifest = + (ia_css_param_terminal_manifest_t *)manifest; + uint16_t section_count = + pterminal_manifest->param_manifest_section_desc_count; + int i; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "sections(manifest) = %d\n", (int)section_count); + for (i = 0; i < section_count; i++) { + const ia_css_param_manifest_section_desc_t *manifest = + ia_css_param_terminal_manifest_get_prm_sct_desc( + pterminal_manifest, i); + verifjmpexit(manifest != NULL); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "kernel_id = %d\n", (int)manifest->kernel_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "mem_type_id = %d\n", + (int)manifest->mem_type_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "max_mem_size = %d\n", + (int)manifest->max_mem_size); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "region_id = %d\n", + (int)manifest->region_id); + } + } else if (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT) { + ia_css_sliced_param_terminal_manifest_t + *sliced_terminal_manifest = + (ia_css_sliced_param_terminal_manifest_t *)manifest; + uint32_t kernel_id; + uint16_t section_count; + uint16_t section_idx; + + kernel_id = sliced_terminal_manifest->kernel_id; + section_count = + sliced_terminal_manifest->sliced_param_section_count; + + NOT_USED(kernel_id); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "kernel_id = %d\n", (int)kernel_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "section_count = %d\n", (int)section_count); + + for (section_idx = 0; section_idx < section_count; + section_idx++) { + ia_css_sliced_param_manifest_section_desc_t + *sliced_param_manifest_section_desc; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "section %d\n", (int)section_idx); + sliced_param_manifest_section_desc = + ia_css_sliced_param_terminal_manifest_get_sliced_prm_sct_desc( + sliced_terminal_manifest, section_idx); + verifjmpexit(sliced_param_manifest_section_desc != + NULL); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "mem_type_id = %d\n", + (int)sliced_param_manifest_section_desc->mem_type_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "region_id = %d\n", + (int)sliced_param_manifest_section_desc->region_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "max_mem_size = %d\n", + (int)sliced_param_manifest_section_desc->max_mem_size); + } + } else if (terminal_type == IA_CSS_TERMINAL_TYPE_PROGRAM) { + ia_css_program_terminal_manifest_t *program_terminal_manifest = + (ia_css_program_terminal_manifest_t *)manifest; + uint32_t sequencer_info_kernel_id; + uint16_t max_kernel_fragment_sequencer_command_desc; + uint16_t kernel_fragment_sequencer_info_manifest_info_count; + uint16_t seq_info_idx; + + sequencer_info_kernel_id = + program_terminal_manifest->sequencer_info_kernel_id; + max_kernel_fragment_sequencer_command_desc = + program_terminal_manifest-> + max_kernel_fragment_sequencer_command_desc; + kernel_fragment_sequencer_info_manifest_info_count = + program_terminal_manifest-> + kernel_fragment_sequencer_info_manifest_info_count; + + NOT_USED(sequencer_info_kernel_id); + NOT_USED(max_kernel_fragment_sequencer_command_desc); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "sequencer_info_kernel_id = %d\n", + (int)sequencer_info_kernel_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "max_kernel_fragment_sequencer_command_desc = %d\n", + (int)max_kernel_fragment_sequencer_command_desc); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "kernel_fragment_sequencer_info_manifest_info_count = %d\n", + (int) + kernel_fragment_sequencer_info_manifest_info_count); + + for (seq_info_idx = 0; seq_info_idx < + kernel_fragment_sequencer_info_manifest_info_count; + seq_info_idx++) { + ia_css_kernel_fragment_sequencer_info_manifest_desc_t + *sequencer_info_manifest_desc; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "sequencer info %d\n", (int)seq_info_idx); + sequencer_info_manifest_desc = + ia_css_program_terminal_manifest_get_kernel_frgmnt_seq_info_desc + (program_terminal_manifest, seq_info_idx); + verifjmpexit(sequencer_info_manifest_desc != NULL); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "min_fragment_grid_slice_dimension[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + min_fragment_grid_slice_dimension[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + min_fragment_grid_slice_dimension[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "max_fragment_grid_slice_dimension[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + max_fragment_grid_slice_dimension[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + max_fragment_grid_slice_dimension[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "min_fragment_grid_slice_count[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + min_fragment_grid_slice_count[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + min_fragment_grid_slice_count[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "max_fragment_grid_slice_count[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + max_fragment_grid_slice_count[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + max_fragment_grid_slice_count[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "min_fragment_grid_point_decimation_factor[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + min_fragment_grid_point_decimation_factor[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + min_fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "max_fragment_grid_point_decimation_factor[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + max_fragment_grid_point_decimation_factor[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + max_fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "min_fragment_grid_overlay_on_pixel_topleft_index[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + min_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + min_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "max_fragment_grid_overlay_on_pixel_topleft_index[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + max_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + max_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "min_fragment_grid_overlay_on_pixel_dimension[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + min_fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + min_fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "max_fragment_grid_overlay_on_pixel_dimension[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + max_fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + max_fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION]); + } + } else if (terminal_type == IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT) { + ia_css_program_control_init_terminal_manifest_t *progctrlinit_man = + (ia_css_program_control_init_terminal_manifest_t *)manifest; + ia_css_program_control_init_terminal_manifest_print(progctrlinit_man); + } else if (terminal_type == IA_CSS_TERMINAL_TYPE_DATA_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_DATA_OUT) { + + ia_css_data_terminal_manifest_t *dterminal_manifest = + (ia_css_data_terminal_manifest_t *)manifest; + int i; + + NOT_USED(dterminal_manifest); + + verifexit(ia_css_kernel_bitmap_print( + ia_css_data_terminal_manifest_get_kernel_bitmap( + dterminal_manifest), fid) == 0); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "formats(manifest) = %04x\n", + (int)ia_css_data_terminal_manifest_get_frame_format_bitmap( + dterminal_manifest)); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "connection(manifest) = %04x\n", + (int)ia_css_data_terminal_manifest_get_connection_bitmap( + dterminal_manifest)); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "dependent(manifest) = %d\n", + (int)dterminal_manifest->terminal_dependency); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\tmin_size[%d] = {\n", + IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d,\n", dterminal_manifest->min_size[i]); + } + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d }\n", dterminal_manifest->min_size[i]); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\tmax_size[%d] = {\n", IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d,\n", dterminal_manifest->max_size[i]); + } + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d }\n", dterminal_manifest->max_size[i]); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\tmin_fragment_size[%d] = {\n", + IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d,\n", + dterminal_manifest->min_fragment_size[i]); + } + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d }\n", + dterminal_manifest->min_fragment_size[i]); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\tmax_fragment_size[%d] = {\n", + IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d,\n", + dterminal_manifest->max_fragment_size[i]); + } + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d }\n", + dterminal_manifest->max_fragment_size[i]); + + } else if (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT) { + + ia_css_spatial_param_terminal_manifest_t *stm = + (ia_css_spatial_param_terminal_manifest_t *)manifest; + ia_css_frame_grid_param_manifest_section_desc_t *sec; + int sec_count = + stm->frame_grid_param_manifest_section_desc_count; + ia_css_fragment_grid_manifest_desc_t *fragd = + &stm->common_fragment_grid_desc; + ia_css_frame_grid_manifest_desc_t *framed = + &stm->frame_grid_desc; + int sec_index; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "kernel_id:\t\t%d\n", + stm->kernel_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "compute_units_p_elem:\t%d\n", + stm->compute_units_p_elem); + + PRINT_DIMENSION("min_fragment_grid_dimension", + fragd->min_fragment_grid_dimension); + PRINT_DIMENSION("max_fragment_grid_dimension", + fragd->max_fragment_grid_dimension); + PRINT_DIMENSION("min_frame_grid_dimension", + framed->min_frame_grid_dimension); + PRINT_DIMENSION("max_frame_grid_dimension", + framed->max_frame_grid_dimension); + + NOT_USED(framed); + NOT_USED(fragd); + + for (sec_index = 0; sec_index < sec_count; sec_index++) { + sec = ia_css_spatial_param_terminal_manifest_get_frm_grid_prm_sct_desc( + stm, sec_index); + verifjmpexit(sec != NULL); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, INFO, "--------------------------\n"); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "\tmem_type_id:\t%d\n", + sec->mem_type_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "\tregion_id:\t%d\n", + sec->region_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "\telem_size:\t%d\n", + sec->elem_size); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "\tmax_mem_size:\t%d\n", + sec->max_mem_size); + } + } else if (terminal_type < IA_CSS_N_TERMINAL_TYPES) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "terminal type can not be pretty printed, not supported\n"); + } + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_terminal_manifest_print failed (%i)\n", + retval); + } + return retval; +} + +/* Program control init Terminal */ +unsigned int ia_css_program_control_init_terminal_manifest_get_connect_section_count( + const ia_css_program_control_init_manifest_program_desc_t *prog) +{ + assert(prog); + return prog->connect_section_count; +} + + +unsigned int ia_css_program_control_init_terminal_manifest_get_load_section_count( + const ia_css_program_control_init_manifest_program_desc_t *prog) +{ + assert(prog); + return prog->load_section_count; +} + +unsigned int ia_css_program_control_init_terminal_manifest_get_size( + const uint16_t nof_programs, + const uint16_t *nof_load_sections, + const uint16_t *nof_connect_sections) +{ + (void)nof_load_sections; /* might be needed in future */ + (void)nof_connect_sections; /* might be needed in future */ + + return sizeof(ia_css_program_control_init_terminal_manifest_t) + + nof_programs * + sizeof(ia_css_program_control_init_manifest_program_desc_t); +} + +ia_css_program_control_init_manifest_program_desc_t * +ia_css_program_control_init_terminal_manifest_get_program_desc( + const ia_css_program_control_init_terminal_manifest_t *terminal, + unsigned int program) +{ + ia_css_program_control_init_manifest_program_desc_t *progs; + + assert(terminal != NULL); + assert(program < terminal->program_count); + + progs = (ia_css_program_control_init_manifest_program_desc_t *) + ((const char *)terminal + terminal->program_desc_offset); + + return &progs[program]; +} + +int ia_css_program_control_init_terminal_manifest_init( + ia_css_program_control_init_terminal_manifest_t *terminal, + const uint16_t nof_programs, + const uint16_t *nof_load_sections, + const uint16_t *nof_connect_sections) +{ + unsigned int i; + ia_css_program_control_init_manifest_program_desc_t *progs; + + if (terminal == NULL) { + return -EFAULT; + } + + terminal->program_count = nof_programs; + terminal->program_desc_offset = + sizeof(ia_css_program_control_init_terminal_manifest_t); + + progs = ia_css_program_control_init_terminal_manifest_get_program_desc( + terminal, 0); + + for (i = 0; i < nof_programs; i++) { + progs[i].load_section_count = nof_load_sections[i]; + progs[i].connect_section_count = nof_connect_sections[i]; + } + return 0; +} + +void ia_css_program_control_init_terminal_manifest_print( + ia_css_program_control_init_terminal_manifest_t *terminal) +{ + unsigned int i; + + ia_css_program_control_init_manifest_program_desc_t *progs; + + progs = ia_css_program_control_init_terminal_manifest_get_program_desc( + terminal, 0); + + assert(progs); + (void)progs; + + for (i = 0; i < terminal->program_count; i++) { + IA_CSS_TRACE_3(PSYSAPI_STATIC, INFO, + "program index: %d, load sec: %d, connect sec: %d\n", + i, + progs[i].load_section_count, + progs[i].connect_section_count); + } +} + +#endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/reg_dump/src/psys/cnlB0_gen_reg_dump/ia_css_debug_dump.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/reg_dump/src/psys/cnlB0_gen_reg_dump/ia_css_debug_dump.c new file mode 100644 index 0000000000000..c51d65c8cb647 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/reg_dump/src/psys/cnlB0_gen_reg_dump/ia_css_debug_dump.c @@ -0,0 +1,15 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +#include "ia_css_debug_dump.h" + void ia_css_debug_dump(void) {} \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/reg_dump/src/psys/cnlB0_gen_reg_dump/ia_css_debug_dump.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/reg_dump/src/psys/cnlB0_gen_reg_dump/ia_css_debug_dump.h new file mode 100644 index 0000000000000..5dd23ddbd180b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/reg_dump/src/psys/cnlB0_gen_reg_dump/ia_css_debug_dump.h @@ -0,0 +1,17 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +#ifndef __IA_CSS_DEBUG_DUMP_H_ + #define __IA_CSS_DEBUG_DUMP_H_ + void ia_css_debug_dump(void); + #endif /* __IA_CSS_DEBUG_DUMP_H_ */ \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/reg_dump/src/reg_dump_generic_bridge.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/reg_dump/src/reg_dump_generic_bridge.c new file mode 100644 index 0000000000000..9b9161ae78cf2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/reg_dump/src/reg_dump_generic_bridge.c @@ -0,0 +1,39 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include +#include "ia_css_trace.h" +#ifdef USE_LOGICAL_SSIDS +/* + Logical names can be used to define the SSID + In order to resolve these names the following include file should be provided + and the define above should be enabled +*/ +#include +#endif + +#define REG_DUMP_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +#define REG_DUMP_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_ENABLED + +/* SSID value is defined in test makefiles as either isys0 or psys0 */ +#define REG_DUMP_READ_REGISTER(addr) vied_subsystem_load_32(SSID, addr) + +#define REG_DUMP_PRINT_0(...) \ +EXPAND_VA_ARGS(IA_CSS_TRACE_0(REG_DUMP, VERBOSE, __VA_ARGS__)) +#define REG_DUMP_PRINT_1(...) \ +EXPAND_VA_ARGS(IA_CSS_TRACE_1(REG_DUMP, VERBOSE, __VA_ARGS__)) +#define EXPAND_VA_ARGS(x) x + +/* Including generated source code for reg_dump */ +#include "ia_css_debug_dump.c" diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/regmem/interface/regmem_access.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/regmem/interface/regmem_access.h new file mode 100644 index 0000000000000..d4576af936f6d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/regmem/interface/regmem_access.h @@ -0,0 +1,67 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __REGMEM_ACCESS_H +#define __REGMEM_ACCESS_H + +#include "storage_class.h" + +enum regmem_id { + /* pass pkg_dir address to SPC in non-secure mode */ + PKG_DIR_ADDR_REG = 0, + /* pass syscom configuration to SPC */ + SYSCOM_CONFIG_REG = 1, + /* syscom state - modified by SP */ + SYSCOM_STATE_REG = 2, + /* syscom commands - modified by the host */ + SYSCOM_COMMAND_REG = 3, + /* Store interrupt status - updated by SP */ + SYSCOM_IRQ_REG = 4, + /* Store VTL0_ADDR_MASK in trusted secure regision - provided by host.*/ + SYSCOM_VTL0_ADDR_MASK = 5, +#if HAS_DUAL_CMD_CTX_SUPPORT + /* Initialized if trustlet exists - updated by host */ + TRUSTLET_STATUS = 6, + /* identify if SPC access blocker programming is completed - updated by SP */ + AB_SPC_STATUS = 7, + /* first syscom queue pointer register */ + SYSCOM_QPR_BASE_REG = 8 +#else + /* first syscom queue pointer register */ + SYSCOM_QPR_BASE_REG = 6 +#endif +}; + +#if HAS_DUAL_CMD_CTX_SUPPORT +/* Bit 0: for untrusted non-secure DRV driver on VTL0 + * Bit 1: for trusted secure TEE driver on VTL1 + */ +#define SYSCOM_IRQ_VTL0_MASK 0x1 +#define SYSCOM_IRQ_VTL1_MASK 0x2 +#endif + +STORAGE_CLASS_INLINE unsigned int +regmem_load_32(unsigned int mem_address, unsigned int reg, unsigned int ssid); + +STORAGE_CLASS_INLINE void +regmem_store_32(unsigned int mem_address, unsigned int reg, unsigned int value, + unsigned int ssid); + +#ifdef __VIED_CELL +#include "regmem_access_cell.h" +#else +#include "regmem_access_host.h" +#endif + +#endif /* __REGMEM_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/regmem/regmem.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/regmem/regmem.mk new file mode 100644 index 0000000000000..24ebc1c325d8e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/regmem/regmem.mk @@ -0,0 +1,32 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +ifndef REGMEM_MK +REGMEM_MK=1 + +# MODULE is REGMEM + +REGMEM_DIR=$${MODULES_DIR}/regmem + +REGMEM_INTERFACE=$(REGMEM_DIR)/interface +REGMEM_SOURCES=$(REGMEM_DIR)/src + +REGMEM_HOST_FILES = +REGMEM_FW_FILES = $(REGMEM_SOURCES)/regmem.c + +REGMEM_CPPFLAGS = -I$(REGMEM_INTERFACE) -I$(REGMEM_SOURCES) +REGMEM_HOST_CPPFLAGS = $(REGMEM_CPPFLAGS) +REGMEM_FW_CPPFLAGS = $(REGMEM_CPPFLAGS) + +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/regmem/src/regmem_access_host.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/regmem/src/regmem_access_host.h new file mode 100644 index 0000000000000..8878d7074fabb --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/regmem/src/regmem_access_host.h @@ -0,0 +1,41 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __REGMEM_ACCESS_HOST_H +#define __REGMEM_ACCESS_HOST_H + +#include "regmem_access.h" /* implemented interface */ + +#include "storage_class.h" +#include "regmem_const.h" +#include +#include "ia_css_cmem.h" + +STORAGE_CLASS_INLINE unsigned int +regmem_load_32(unsigned int mem_addr, unsigned int reg, unsigned int ssid) +{ + /* No need to add REGMEM_OFFSET, it is already included in mem_addr. */ + return ia_css_cmem_load_32(ssid, mem_addr + (REGMEM_WORD_BYTES*reg)); +} + +STORAGE_CLASS_INLINE void +regmem_store_32(unsigned int mem_addr, unsigned int reg, + unsigned int value, unsigned int ssid) +{ + /* No need to add REGMEM_OFFSET, it is already included in mem_addr. */ + ia_css_cmem_store_32(ssid, mem_addr + (REGMEM_WORD_BYTES*reg), + value); +} + +#endif /* __REGMEM_ACCESS_HOST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/regmem/src/regmem_const.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/regmem/src/regmem_const.h new file mode 100644 index 0000000000000..ac7e3a98a434f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/regmem/src/regmem_const.h @@ -0,0 +1,28 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __REGMEM_CONST_H +#define __REGMEM_CONST_H + +#ifndef REGMEM_SIZE +#define REGMEM_SIZE (16) +#endif /* REGMEM_SIZE */ +#ifndef REGMEM_OFFSET +#define REGMEM_OFFSET (0) +#endif /* REGMEM_OFFSET */ +#ifndef REGMEM_WORD_BYTES +#define REGMEM_WORD_BYTES (4) +#endif + +#endif /* __REGMEM_CONST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm.h new file mode 100644 index 0000000000000..4a04a98903264 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm.h @@ -0,0 +1,173 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_RBM_H +#define __IA_CSS_RBM_H + +#include "ia_css_rbm_storage_class.h" +#include + +#define IA_CSS_RBM_BITS 64 +/** An element is a 32 bit unsigned integer. 64 bit integers might cause + * problems in the compiler. + */ +#define IA_CSS_RBM_ELEM_TYPE uint32_t +#define IA_CSS_RBM_ELEM_BITS \ + (sizeof(IA_CSS_RBM_ELEM_TYPE)*8) +#define IA_CSS_RBM_NOF_ELEMS \ + ((IA_CSS_RBM_BITS) / (IA_CSS_RBM_ELEM_BITS)) + +/** Users should make no assumption about the actual type of + * ia_css_rbm_t. + */ +typedef struct { + IA_CSS_RBM_ELEM_TYPE data[IA_CSS_RBM_NOF_ELEMS]; +} ia_css_rbm_elems_t; +typedef ia_css_rbm_elems_t ia_css_rbm_t; + +/** Print the bits of a routing bitmap + * @return < 0 on error + */ +IA_CSS_RBM_STORAGE_CLASS_H +int ia_css_rbm_print( + const ia_css_rbm_t bitmap, + void *fid); + +/** Create an empty routing bitmap + * @return bitmap = 0 + */ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_clear(void); + +/** Creates the complement of a routing bitmap + * @param bitmap[in] routing bitmap + * @return ~bitmap + */ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_complement( + const ia_css_rbm_t bitmap); + +/** Create the union of two routing bitmaps + * @param bitmap0[in] routing bitmap 0 + * @param bitmap1[in] routing bitmap 1 + * @return bitmap0 | bitmap1 + */ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_union( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1); + +/** Create the intersection of two routing bitmaps + * @param bitmap0[in] routing bitmap 0 + * @param bitmap1[in] routing bitmap 1 + * @return bitmap0 & bitmap1 + */ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_intersection( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1); + +/** Check if the routing bitmaps is empty + * @param bitmap[in] routing bitmap + * @return bitmap == 0 + */ +IA_CSS_RBM_STORAGE_CLASS_H +bool ia_css_is_rbm_empty( + const ia_css_rbm_t bitmap); + +/** Check if the intersection of two routing bitmaps is empty + * @param bitmap0[in] routing bitmap 0 + * @param bitmap1[in] routing bitmap 1 + * @return (bitmap0 & bitmap1) == 0 + */ +IA_CSS_RBM_STORAGE_CLASS_H +bool ia_css_is_rbm_intersection_empty( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1); + +/** Check if the second routing bitmap is a subset of the first (or equal) + * @param bitmap0[in] routing bitmap 0 + * @param bitmap1[in routing bitmap 1 + * Note: An empty set is always a subset, this function + * returns true if bitmap 1 is empty + * @return (bitmap0 & bitmap1) == bitmap1 + */ +IA_CSS_RBM_STORAGE_CLASS_H +bool ia_css_is_rbm_subset( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1); + +/** Check if the routing bitmaps are equal + * @param bitmap0[in] routing bitmap 0 + * @param bitmap1[in] routing bitmap 1 + * @return bitmap0 == bitmap1 + */ +IA_CSS_RBM_STORAGE_CLASS_H +bool ia_css_is_rbm_equal( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1); + +/** Checks whether a specific kernel bit is set + * @return bitmap[index] == 1 + */ +IA_CSS_RBM_STORAGE_CLASS_H +int ia_css_is_rbm_set( + const ia_css_rbm_t bitmap, + const unsigned int index); + +/** Create the union of a routing bitmap with a onehot bitmap + * with a bit set at index + * @return bitmap[index] |= 1 +*/ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_set( + const ia_css_rbm_t bitmap, + const unsigned int index); + +/** Creates routing bitmap using a uint64 value. + * @return bitmap with the same bits set as in value (provided that width of bitmap is sufficient). + */ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_create_from_uint64( + const uint64_t value); + +/** Converts an ia_css_rbm_t type to uint64_t. Note that if + * ia_css_rbm_t contains more then 64 bits, only the lowest 64 bits + * are returned. + * @return uint64_t representation of value + */ +IA_CSS_RBM_STORAGE_CLASS_H +uint64_t ia_css_rbm_to_uint64( + const ia_css_rbm_t value); + +/** Creates a routing bitmap with the bit at index 'index' removed. + * @return ~(1 << index) & bitmap + */ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_unset( + const ia_css_rbm_t bitmap, + const unsigned int index); + +/** Create a onehot routing bitmap with a bit set at index + * @return bitmap[index] = 1 + */ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_bit_mask( + const unsigned int index); + +#ifdef __IA_CSS_RBM_INLINE__ +#include "ia_css_rbm_impl.h" +#endif /* __IA_CSS_RBM_INLINE__ */ + +#endif /* __IA_CSS_RBM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_manifest.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_manifest.h new file mode 100644 index 0000000000000..f497a7de90a93 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_manifest.h @@ -0,0 +1,133 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_RBM_MANIFEST_H +#define __IA_CSS_RBM_MANIFEST_H + +#include "type_support.h" +#include "ia_css_rbm_manifest_types.h" + +/** Returns the descriptor size of the RBM manifest. + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +unsigned int +ia_css_rbm_manifest_get_size(void); + +/** Initializes the RBM manifest. + * @param rbm[in] Routing bitmap. + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +void +ia_css_rbm_manifest_init(struct ia_css_rbm_manifest_s *rbm); + +/** Returns a pointer to the array of mux descriptors. + * @param manifest[in] Routing bitmap manifest. + * @return NULL on error + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +ia_css_rbm_mux_desc_t * +ia_css_rbm_manifest_get_muxes(const ia_css_rbm_manifest_t *manifest); + +/** Returns the size of mux descriptors array. + * @param manifest[in] Routing bitmap manifest. + * @return size + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +unsigned int +ia_css_rbm_manifest_get_mux_count(const ia_css_rbm_manifest_t *manifest); + +/** Returns a pointer to the array of validation descriptors. + * @param manifest[in] Routing bitmap manifest. + * @return NULL on error + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +ia_css_rbm_validation_rule_t * +ia_css_rbm_manifest_get_validation_rules(const ia_css_rbm_manifest_t *manifest); + +/** Returns the size of the validation descriptor array. + * @param manifest[in] Routing bitmap manifest. + * @return size + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +unsigned int +ia_css_rbm_manifest_get_validation_rule_count(const ia_css_rbm_manifest_t *manifest); + +/** Returns a pointer to the array of terminal routing descriptors. + * @param manifest[in] Routing bitmap manifest. + * @return NULL on error + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +ia_css_rbm_terminal_routing_desc_t * +ia_css_rbm_manifest_get_terminal_routing_desc(const ia_css_rbm_manifest_t *manifest); + +/** \brief Returns the size of the terminal routing descriptor array. + * Note: pretty printing differs from on host and on IPU. + * @param manifest[in] Routing bitmap manifest. + * @return size + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +unsigned int +ia_css_rbm_manifest_get_terminal_routing_desc_count(const ia_css_rbm_manifest_t *manifest); + +/** Pretty prints the routing bitmap manifest. + * @param manifest[in] Routing bitmap manifest. + */ +void +ia_css_rbm_manifest_print(const ia_css_rbm_manifest_t *manifest); + +/** \brief Pretty prints a RBM (routing bitmap). + * Note: pretty printing differs from on host and on IPU. + * @param rbm[in] Routing bitmap. + * @param mux[in] List of mux descriptors corresponding to rbm. + * @param mux_desc_count[in] Number of muxes in list mux. + */ +void +ia_css_rbm_pretty_print( + const ia_css_rbm_t *rbm, + const ia_css_rbm_mux_desc_t *mux, + unsigned int mux_desc_count); + +/** \brief check for the validity of a routing bitmap. + * @param manifest[in] Routing bitmap manifest. + * @param rbm[in] Routing bitmap + * @return true on match. + */ +bool +ia_css_rbm_manifest_check_rbm_validity( + const ia_css_rbm_manifest_t *manifest, + const ia_css_rbm_t *rbm); + +/** \brief sets, using manifest info, the value of a mux in the routing bitmap. + * @param rbm[in] Routing bitmap. + * @param mux[in] List of mux descriptors corresponding to rbm. + * @param mux_count[in] Number of muxes in list mux. + * @param gp_dev_id[in] ID of sub system (PSA/ISA) where the mux is located. + * @param mux_id[in] ID of mux to set configuration for. + * @param value[in] Value of the mux. + * @return routing bitmap. + */ +ia_css_rbm_t +ia_css_rbm_set_mux( + ia_css_rbm_t rbm, + ia_css_rbm_mux_desc_t *mux, + unsigned int mux_count, + unsigned int gp_dev_id, + unsigned int mux_id, + unsigned int value); + +#ifdef __IA_CSS_RBM_MANIFEST_INLINE__ +#include "ia_css_rbm_manifest_impl.h" +#endif /* __IA_CSS_RBM_MANIFEST_INLINE__ */ + +#endif /* __IA_CSS_RBM_MANIFEST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_manifest_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_manifest_types.h new file mode 100644 index 0000000000000..ade20446b9f64 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_manifest_types.h @@ -0,0 +1,95 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_RBM_MANIFEST_TYPES_H +#define __IA_CSS_RBM_MANIFEST_TYPES_H + +#include "ia_css_rbm.h" +#include "vied_nci_psys_resource_model.h" + +#ifndef VIED_NCI_RBM_MAX_MUX_COUNT +#error Please define VIED_NCI_RBM_MAX_MUX_COUNT +#endif +#ifndef VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT +#error Please define VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT +#endif +#ifndef VIED_NCI_RBM_MAX_TERMINAL_DESC_COUNT +#error Please define VIED_NCI_RBM_MAX_TERMINAL_DESC_COUNT +#endif +#ifndef N_PADDING_UINT8_IN_RBM_MANIFEST +#error Please define N_PADDING_UINT8_IN_RBM_MANIFEST +#endif + +#define SIZE_OF_RBM_MUX_DESC_S ( \ + (4 * IA_CSS_UINT8_T_BITS)) + +typedef struct ia_css_rbm_mux_desc_s { + uint8_t gp_dev_id; + uint8_t mux_id; + uint8_t offset; + uint8_t size_bits; +} ia_css_rbm_mux_desc_t; + +#define SIZE_OF_RBM_VALIDATION_RULE_DESC_S ( \ + (2 * IA_CSS_RBM_BITS) \ + + (1 * IA_CSS_UINT32_T_BITS)) + +typedef struct ia_css_rbm_validation_rule_s { + ia_css_rbm_t match; /* RBM is an array of 32 bit elements */ + ia_css_rbm_t mask; + uint32_t expected_value; +} ia_css_rbm_validation_rule_t; + +#define SIZE_OF_RBM_TERMINAL_ROUTING_DESC_S ( \ + (4 * IA_CSS_UINT8_T_BITS)) + +typedef struct ia_css_rbm_terminal_routing_desc_s { + uint8_t terminal_id; + uint8_t connection_state; + uint8_t mux_id; + uint8_t state; +} ia_css_rbm_terminal_routing_desc_t; + +#define SIZE_OF_RBM_MANIFEST_S ( \ + (VIED_NCI_RBM_MAX_MUX_COUNT * SIZE_OF_RBM_MUX_DESC_S) \ + + (VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT * SIZE_OF_RBM_VALIDATION_RULE_DESC_S) \ + + (VIED_NCI_RBM_MAX_TERMINAL_DESC_COUNT * SIZE_OF_RBM_TERMINAL_ROUTING_DESC_S) \ + + (3 * IA_CSS_UINT16_T_BITS) \ + + (N_PADDING_UINT8_IN_RBM_MANIFEST * IA_CSS_UINT8_T_BITS)) + +typedef struct ia_css_rbm_manifest_s { +#if VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT > 0 + ia_css_rbm_validation_rule_t + validation_rules[VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT]; +#endif + uint16_t mux_desc_count; + uint16_t validation_rule_count; + uint16_t terminal_routing_desc_count; + +#if VIED_NCI_RBM_MAX_MUX_COUNT > 0 + ia_css_rbm_mux_desc_t + mux_desc[VIED_NCI_RBM_MAX_MUX_COUNT]; +#endif + +#if VIED_NCI_RBM_MAX_TERMINAL_DESC_COUNT > 0 + ia_css_rbm_terminal_routing_desc_t + terminal_routing_desc[VIED_NCI_RBM_MAX_TERMINAL_DESC_COUNT]; +#endif + +#if N_PADDING_UINT8_IN_RBM_MANIFEST > 0 + uint8_t padding[N_PADDING_UINT8_IN_RBM_MANIFEST]; +#endif +} ia_css_rbm_manifest_t; + +#endif /* __IA_CSS_RBM_MANIFEST_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_storage_class.h new file mode 100644 index 0000000000000..9548e9a9fabbc --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_storage_class.h @@ -0,0 +1,36 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_RBM_STORAGE_CLASS_H +#define __IA_CSS_RBM_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __IA_CSS_RBM_INLINE__ +#define IA_CSS_RBM_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_RBM_STORAGE_CLASS_C +#else +#define IA_CSS_RBM_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_RBM_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#ifndef __IA_CSS_RBM_MANIFEST_INLINE__ +#define IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +#else +#define IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_RBM_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_trace.h new file mode 100644 index 0000000000000..dd060323da5c2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_trace.h @@ -0,0 +1,77 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_RBM_TRACE_H +#define __IA_CSS_RBM_TRACE_H + +#include "ia_css_trace.h" + +/* Not using 0 to identify wrong configuration being passed from the .mk file outside. +* Log levels not in the range below will cause a "No RBM_TRACE_CONFIG Tracing level defined" +*/ +#define RBM_TRACE_LOG_LEVEL_OFF 1 +#define RBM_TRACE_LOG_LEVEL_NORMAL 2 +#define RBM_TRACE_LOG_LEVEL_DEBUG 3 + +#define RBM_TRACE_CONFIG_DEFAULT RBM_TRACE_LOG_LEVEL_NORMAL + +#if !defined(RBM_TRACE_CONFIG) +# define RBM_TRACE_CONFIG RBM_TRACE_CONFIG_DEFAULT +#endif + +/* IPU_RESOURCE Module tracing backend is mapped to TUNIT tracing for target platforms */ +#ifdef __HIVECC +# ifndef HRT_CSIM +# define RBM_TRACE_METHOD IA_CSS_TRACE_METHOD_TRACE +# else +# define RBM_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +# endif +#else +# define RBM_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +#endif + +#if (defined(RBM_TRACE_CONFIG)) +/* Module specific trace setting */ +# if RBM_TRACE_CONFIG == RBM_TRACE_LOG_LEVEL_OFF +/* RBM_TRACE_LOG_LEVEL_OFF */ +# define RBM_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED +# elif RBM_TRACE_CONFIG == RBM_TRACE_LOG_LEVEL_NORMAL +/* RBM_TRACE_LOG_LEVEL_NORMAL */ +# define RBM_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED +# define RBM_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED +# define RBM_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED +# elif RBM_TRACE_CONFIG == RBM_TRACE_LOG_LEVEL_DEBUG +/* RBM_TRACE_LOG_LEVEL_DEBUG */ +# define RBM_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_ENABLED +# define RBM_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED +# define RBM_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_ENABLED +# define RBM_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED +# define RBM_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_ENABLED +# define RBM_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_ENABLED +# else +# error "No RBM_TRACE_CONFIG Tracing level defined" +# endif +#else +# error "RBM_TRACE_CONFIG not defined" +#endif + +#endif /* __RBM_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/routing_bitmap.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/routing_bitmap.mk new file mode 100644 index 0000000000000..f4251f9740fde --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/routing_bitmap.mk @@ -0,0 +1,39 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# + +ifdef _H_ROUTING_BITMAP_MK +$(error ERROR: routing_bitmap.mk included multiple times, please check makefile) +else +_H_ROUTING_BITMAP_MK=1 +endif + +ROUTING_BITMAP_FILES += $(ROUTING_BITMAP_DIR)/src/ia_css_rbm_manifest.c + +ROUTING_BITMAP_DIR = $(MODULES_DIR)/routing_bitmap +ROUTING_BITMAP_INTERFACE = $(ROUTING_BITMAP_DIR)/interface +ROUTING_BITMAP_SOURCES = $(ROUTING_BITMAP_DIR)/src + +ROUTING_BITMAP_CPPFLAGS = -I$(ROUTING_BITMAP_INTERFACE) +ROUTING_BITMAP_CPPFLAGS += -I$(ROUTING_BITMAP_SOURCES) + +ifeq ($(ROUTING_BITMAP_INLINE),1) +ROUTING_BITMAP_CPPFLAGS += -D__IA_CSS_RBM_INLINE__ +else +ROUTING_BITMAP_FILES += $(ROUTING_BITMAP_DIR)/src/ia_css_rbm.c +endif + +ifeq ($(ROUTING_BITMAP_MANIFEST_INLINE),1) +ROUTING_BITMAP_CPPFLAGS += -D__IA_CSS_RBM_MANIFEST_INLINE__ +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm.c new file mode 100644 index 0000000000000..bc5bf14efbd77 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm.c @@ -0,0 +1,17 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_RBM_INLINE__ +#include "ia_css_rbm_impl.h" +#endif /* __IA_CSS_RBM_INLINE__ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_impl.h new file mode 100644 index 0000000000000..c8cd78d416a17 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_impl.h @@ -0,0 +1,338 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_rbm.h" +#include "type_support.h" +#include "misc_support.h" +#include "assert_support.h" +#include "math_support.h" +#include "ia_css_rbm_trace.h" + +STORAGE_CLASS_INLINE int ia_css_rbm_compute_weight( + const ia_css_rbm_t bitmap); + +STORAGE_CLASS_INLINE ia_css_rbm_t ia_css_rbm_shift( + const ia_css_rbm_t bitmap); + +IA_CSS_RBM_STORAGE_CLASS_C +bool ia_css_is_rbm_intersection_empty( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1) +{ + ia_css_rbm_t intersection; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_is_rbm_intersection_empty(): enter:\n"); + + intersection = ia_css_rbm_intersection(bitmap0, bitmap1); + return ia_css_is_rbm_empty(intersection); +} + +IA_CSS_RBM_STORAGE_CLASS_C +bool ia_css_is_rbm_empty( + const ia_css_rbm_t bitmap) +{ + unsigned int i; + bool is_empty = true; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_is_rbm_empty(): enter:\n"); + for (i = 0; i < IA_CSS_RBM_NOF_ELEMS; i++) { + is_empty &= bitmap.data[i] == 0; + } + return is_empty; +} + +IA_CSS_RBM_STORAGE_CLASS_C +bool ia_css_is_rbm_equal( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1) +{ + unsigned int i; + bool is_equal = true; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_is_rbm_equal(): enter:\n"); + for (i = 0; i < IA_CSS_RBM_NOF_ELEMS; i++) { + is_equal = is_equal && (bitmap0.data[i] == bitmap1.data[i]); + } + return is_equal; +} + +IA_CSS_RBM_STORAGE_CLASS_C +bool ia_css_is_rbm_subset( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1) +{ + ia_css_rbm_t intersection; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_is_rbm_subset(): enter:\n"); + + intersection = ia_css_rbm_intersection(bitmap0, bitmap1); + return ia_css_is_rbm_equal(intersection, bitmap1); +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_clear(void) +{ + unsigned int i; + ia_css_rbm_t bitmap; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_clear(): enter:\n"); + for (i = 0; i < IA_CSS_RBM_NOF_ELEMS; i++) { + bitmap.data[i] = 0; + } + return bitmap; +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_complement( + const ia_css_rbm_t bitmap) +{ + unsigned int i; + ia_css_rbm_t result; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_complement(): enter:\n"); + for (i = 0; i < IA_CSS_RBM_NOF_ELEMS; i++) { + result.data[i] = ~bitmap.data[i]; + } + return result; +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_union( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1) +{ + unsigned int i; + ia_css_rbm_t result; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_union(): enter:\n"); + for (i = 0; i < IA_CSS_RBM_NOF_ELEMS; i++) { + result.data[i] = (bitmap0.data[i] | bitmap1.data[i]); + } + return result; +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_intersection( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1) +{ + unsigned int i; + ia_css_rbm_t result; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_intersection(): enter:\n"); + for (i = 0; i < IA_CSS_RBM_NOF_ELEMS; i++) { + result.data[i] = (bitmap0.data[i] & bitmap1.data[i]); + } + return result; +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_set( + const ia_css_rbm_t bitmap, + const unsigned int index) +{ + ia_css_rbm_t bit_mask; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_set(): enter:\n"); + + bit_mask = ia_css_rbm_bit_mask(index); + return ia_css_rbm_union(bitmap, bit_mask); +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_create_from_uint64( + const uint64_t value) +{ + unsigned int i; + ia_css_rbm_t result; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_create_from_uint64(): enter:\n"); + + result = ia_css_rbm_clear(); + for (i = 0; i < IA_CSS_RBM_NOF_ELEMS; i++) { + /* masking is done implictly, the MSB bits of casting will be chopped off */ + result.data[i] = (IA_CSS_RBM_ELEM_TYPE) + (value >> (i * IA_CSS_RBM_ELEM_BITS)); + } + return result; +} + +IA_CSS_RBM_STORAGE_CLASS_C +uint64_t ia_css_rbm_to_uint64( + const ia_css_rbm_t value) +{ + const unsigned int bits64 = sizeof(uint64_t) * 8; + const unsigned int nof_elems_bits64 = bits64 / IA_CSS_RBM_ELEM_BITS; + unsigned int i; + uint64_t res = 0; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_to_uint64(): enter:\n"); + + assert((bits64 % IA_CSS_RBM_ELEM_BITS) == 0); + assert(nof_elems_bits64 > 0); + + for (i = 0; i < MIN(IA_CSS_RBM_NOF_ELEMS, nof_elems_bits64); i++) { + res |= ((uint64_t)(value.data[i]) << (i * IA_CSS_RBM_ELEM_BITS)); + } + for (i = nof_elems_bits64; i < IA_CSS_RBM_NOF_ELEMS; i++) { + assert(value.data[i] == 0); + } + return res; +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_unset( + const ia_css_rbm_t bitmap, + const unsigned int index) +{ + ia_css_rbm_t result; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_unset(): enter:\n"); + + result = ia_css_rbm_bit_mask(index); + result = ia_css_rbm_complement(result); + return ia_css_rbm_intersection(bitmap, result); +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_bit_mask( + const unsigned int index) +{ + unsigned int elem_index; + unsigned int elem_bit_index; + ia_css_rbm_t bit_mask = ia_css_rbm_clear(); + + assert(index < IA_CSS_RBM_BITS); + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_bit_mask(): enter:\n"); + if (index < IA_CSS_RBM_BITS) { + elem_index = index / IA_CSS_RBM_ELEM_BITS; + elem_bit_index = index % IA_CSS_RBM_ELEM_BITS; + assert(elem_index < IA_CSS_RBM_NOF_ELEMS); + + bit_mask.data[elem_index] = 1 << elem_bit_index; + } + return bit_mask; +} + +STORAGE_CLASS_INLINE +int ia_css_rbm_compute_weight( + const ia_css_rbm_t bitmap) +{ + ia_css_rbm_t loc_bitmap; + int weight = 0; + int i; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_compute_weight(): enter:\n"); + + loc_bitmap = bitmap; + + /* In fact; do not need the iterator "i" */ + for (i = 0; (i < IA_CSS_RBM_BITS) && + !ia_css_is_rbm_empty(loc_bitmap); i++) { + weight += ia_css_is_rbm_set(loc_bitmap, 0); + loc_bitmap = ia_css_rbm_shift(loc_bitmap); + } + + return weight; +} + +IA_CSS_RBM_STORAGE_CLASS_C +int ia_css_is_rbm_set( + const ia_css_rbm_t bitmap, + const unsigned int index) +{ + unsigned int elem_index; + unsigned int elem_bit_index; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_is_rbm_set(): enter:\n"); + + assert(index < IA_CSS_RBM_BITS); + + elem_index = index / IA_CSS_RBM_ELEM_BITS; + elem_bit_index = index % IA_CSS_RBM_ELEM_BITS; + assert(elem_index < IA_CSS_RBM_NOF_ELEMS); + return (((bitmap.data[elem_index] >> elem_bit_index) & 0x1) == 1); +} + +STORAGE_CLASS_INLINE +ia_css_rbm_t ia_css_rbm_shift( + const ia_css_rbm_t bitmap) +{ + int i; + unsigned int lsb_current_elem = 0; + unsigned int lsb_previous_elem = 0; + ia_css_rbm_t loc_bitmap; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_shift(): enter:\n"); + + loc_bitmap = bitmap; + + for (i = IA_CSS_RBM_NOF_ELEMS - 1; i >= 0; i--) { + lsb_current_elem = bitmap.data[i] & 0x01; + loc_bitmap.data[i] >>= 1; + loc_bitmap.data[i] |= (lsb_previous_elem << (IA_CSS_RBM_ELEM_BITS - 1)); + lsb_previous_elem = lsb_current_elem; + } + return loc_bitmap; +} + +IA_CSS_RBM_STORAGE_CLASS_C +int ia_css_rbm_print( + const ia_css_rbm_t bitmap, + void *fid) +{ + int retval = -1; + int bit; + unsigned int bit_index = 0; + ia_css_rbm_t loc_bitmap; + + IA_CSS_TRACE_0(RBM, INFO, + "ia_css_rbm_print(): enter:\n"); + + NOT_USED(fid); + NOT_USED(bit); + + IA_CSS_TRACE_0(RBM, INFO, "kernel bitmap {\n"); + + loc_bitmap = bitmap; + + for (bit_index = 0; (bit_index < IA_CSS_RBM_BITS) && + !ia_css_is_rbm_empty(loc_bitmap); bit_index++) { + + bit = ia_css_is_rbm_set(loc_bitmap, 0); + loc_bitmap = ia_css_rbm_shift(loc_bitmap); + IA_CSS_TRACE_2(RBM, INFO, "\t%d\t = %d\n", bit_index, bit); + } + IA_CSS_TRACE_0(RBM, INFO, "}\n"); + + retval = 0; + return retval; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_manifest.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_manifest.c new file mode 100644 index 0000000000000..ef3beb8760b62 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_manifest.c @@ -0,0 +1,224 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_rbm_manifest.h" +#include "ia_css_rbm.h" +#include "type_support.h" +#include "misc_support.h" +#include "assert_support.h" +#include "math_support.h" +#include "ia_css_rbm_trace.h" + +#ifndef __IA_CSS_RBM_MANIFEST_INLINE__ +#include "ia_css_rbm_manifest_impl.h" +#endif /* __IA_CSS_RBM_MANIFEST_INLINE__ */ + +STORAGE_CLASS_INLINE void +ia_css_rbm_print_with_header( + const ia_css_rbm_t *rbm, + const ia_css_rbm_mux_desc_t *mux, + unsigned int mux_desc_count, + bool print_header) +{ +#ifdef __HIVECC + ia_css_rbm_print(*rbm, NULL); + (void)print_header; + (void)mux_desc_count; + (void)mux; +#else + int i, j; + + assert(mux != NULL); + assert(rbm != NULL); + if (mux == NULL || rbm == NULL) + return; + + if (print_header) { + for (i = mux_desc_count - 1; i >= 0; i--) { + PRINT("%*d|", mux[i].size_bits, mux[i].mux_id); + } + PRINT("\n"); + } + for (i = mux_desc_count - 1; i >= 0; i--) { + for (j = mux[i].size_bits - 1; j >= 0; j--) { + PRINT("%d", ia_css_is_rbm_set(*rbm, j + mux[i].offset)); + } + PRINT("|"); + } +#endif +} + +STORAGE_CLASS_INLINE void +ia_css_rbm_validation_rule_print( + ia_css_rbm_validation_rule_t *rule, + ia_css_rbm_mux_desc_t *mux_desc, + unsigned int mux_desc_count, + bool print_header) +{ + ia_css_rbm_print_with_header(&rule->match, mux_desc, mux_desc_count, print_header); +#ifdef __HIVECC + IA_CSS_TRACE_0(RBM, INFO, "Mask\n"); +#else + PRINT("\t"); +#endif + ia_css_rbm_print_with_header(&rule->mask, mux_desc, mux_desc_count, false); +#ifdef __HIVECC + IA_CSS_TRACE_1(RBM, INFO, "Rule expected_value: %d\n", rule->expected_value); +#else + PRINT("\t%d\n", rule->expected_value); +#endif +} + +void +ia_css_rbm_pretty_print( + const ia_css_rbm_t *rbm, + const ia_css_rbm_mux_desc_t *mux, + unsigned int mux_desc_count) +{ + ia_css_rbm_print_with_header(rbm, mux, mux_desc_count, false); +#ifndef __HIVECC + PRINT("\n"); +#endif +} + +void +ia_css_rbm_manifest_print( + const ia_css_rbm_manifest_t *manifest) +{ + int retval = -1; + unsigned int i; + bool print_header = true; + ia_css_rbm_mux_desc_t *muxes; + ia_css_rbm_validation_rule_t *validation_rule; + ia_css_rbm_terminal_routing_desc_t *terminal_routing_desc; + + verifjmpexit(manifest != NULL); + muxes = ia_css_rbm_manifest_get_muxes(manifest); + verifjmpexit(muxes != NULL || manifest->mux_desc_count == 0); + + for (i = 0; i < manifest->mux_desc_count; i++) { + IA_CSS_TRACE_4(RBM, INFO, "id: %d.%d offstet: %d size_bits: %d\n", + muxes[i].gp_dev_id, + muxes[i].mux_id, + muxes[i].offset, + muxes[i].size_bits); + } +#if VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT != 0 + validation_rule = ia_css_rbm_manifest_get_validation_rules(manifest); + verifjmpexit(validation_rule != NULL || manifest->validation_rule_count == 0); + + for (i = 0; i < manifest->validation_rule_count; i++) { + ia_css_rbm_validation_rule_print(&validation_rule[i], muxes, manifest->mux_desc_count, print_header); + print_header = false; + } +#else + (void) validation_rule; + (void) print_header; +#endif + terminal_routing_desc = ia_css_rbm_manifest_get_terminal_routing_desc(manifest); + verifjmpexit(terminal_routing_desc != NULL || manifest->terminal_routing_desc_count == 0); + for (i = 0; i < manifest->terminal_routing_desc_count; i++) { + IA_CSS_TRACE_4(RBM, INFO, "terminal_id: %d connection_state: %d mux_id: %d state: %d\n", + terminal_routing_desc[i].terminal_id, + terminal_routing_desc[i].connection_state, + terminal_routing_desc[i].mux_id, + terminal_routing_desc[i].state); + } + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(RBM, ERROR, "ia_css_rbm_manifest_print failed\n"); + } +} + +bool +ia_css_rbm_manifest_check_rbm_validity( + const ia_css_rbm_manifest_t *manifest, + const ia_css_rbm_t *rbm) +{ + unsigned int i; + ia_css_rbm_t res; + ia_css_rbm_t final_rbm = ia_css_rbm_clear(); + ia_css_rbm_validation_rule_t *rules; + bool matches_rules; + + verifjmpexit(manifest != NULL); + verifjmpexit(rbm != NULL); + + if (ia_css_is_rbm_empty(*rbm)) { + IA_CSS_TRACE_0(RBM, ERROR, "ia_css_rbm_manifest_check_rbm_validity failes: RBM is empty.\n"); + return false; + } + +#if VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT != 0 + rules = ia_css_rbm_manifest_get_validation_rules(manifest); + verifjmpexit(rules != NULL || manifest->validation_rule_count == 0); + + for (i = 0; i < manifest->validation_rule_count; i++) { + res = ia_css_rbm_intersection(*rbm, rules[i].mask); + matches_rules = ia_css_is_rbm_equal(res, rules[i].match); + + if (!matches_rules) + continue; + + if (rules[i].expected_value == 1) { + final_rbm = ia_css_rbm_union(final_rbm, res); + } else { + IA_CSS_TRACE_1(RBM, INFO, "ia_css_rbm_manifest_check_rbm_validity failes on rule %d\n", 1); + return false; + } + } +#else + (void)matches_rules; + (void)i; + (void)rules; + (void)res; +#endif + return ia_css_is_rbm_equal(final_rbm, *rbm); +EXIT: + return false; +} + +ia_css_rbm_t +ia_css_rbm_set_mux( + ia_css_rbm_t rbm, + ia_css_rbm_mux_desc_t *mux, + unsigned int mux_count, + unsigned int gp_dev_id, + unsigned int mux_id, + unsigned int value) +{ + unsigned int i; + + verifjmpexit(mux != NULL); + + for (i = 0; i < mux_count; i++) { + if (mux[i].gp_dev_id == gp_dev_id && mux[i].mux_id == mux_id) + break; + } + if (i >= mux_count) { + IA_CSS_TRACE_2(RBM, ERROR, + "ia_css_rbm_set_mux mux with mux_id %d.%d not found\n", gp_dev_id, mux_id); + return rbm; + } + if (value >= mux[i].size_bits) { + IA_CSS_TRACE_3(RBM, ERROR, + "ia_css_rbm_set_mux mux mux_id %d.%d, value %d illegal\n", gp_dev_id, mux_id, value); + return rbm; + } + rbm = ia_css_rbm_set(rbm, mux[i].offset + value); +EXIT: + return rbm; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_manifest_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_manifest_impl.h new file mode 100644 index 0000000000000..7059b6bc898e0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_manifest_impl.h @@ -0,0 +1,108 @@ + + +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_rbm_manifest.h" +#include "ia_css_rbm_trace.h" + +#include "type_support.h" +#include "math_support.h" +#include "error_support.h" +#include "assert_support.h" +#include "print_support.h" + +STORAGE_CLASS_INLINE +void __ia_css_rbm_manifest_check_struct(void) +{ + COMPILATION_ERROR_IF( + sizeof(ia_css_rbm_manifest_t) != (SIZE_OF_RBM_MANIFEST_S / IA_CSS_UINT8_T_BITS)); + COMPILATION_ERROR_IF( + (sizeof(ia_css_rbm_manifest_t) % 8 /* 64 bit */) != 0); +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +unsigned int +ia_css_rbm_manifest_get_size(void) +{ + unsigned int size = sizeof(struct ia_css_rbm_manifest_s); + + return ceil_mul(size, sizeof(uint64_t)); +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +void +ia_css_rbm_manifest_init(struct ia_css_rbm_manifest_s *rbm) +{ + rbm->mux_desc_count = 0; + rbm->terminal_routing_desc_count = 0; + rbm->validation_rule_count = 0; +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +ia_css_rbm_mux_desc_t * +ia_css_rbm_manifest_get_muxes(const ia_css_rbm_manifest_t *manifest) +{ +#if VIED_NCI_RBM_MAX_MUX_COUNT == 0 + (void)manifest; + return NULL; +#else + return (ia_css_rbm_mux_desc_t *)manifest->mux_desc; +#endif +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +unsigned int +ia_css_rbm_manifest_get_mux_count(const ia_css_rbm_manifest_t *manifest) +{ + return manifest->mux_desc_count; +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +ia_css_rbm_validation_rule_t * +ia_css_rbm_manifest_get_validation_rules(const ia_css_rbm_manifest_t *manifest) +{ +#if VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT == 0 + (void)manifest; + return NULL; +#else + return (ia_css_rbm_validation_rule_t *)manifest->validation_rules; +#endif +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +unsigned int +ia_css_rbm_manifest_get_validation_rule_count(const ia_css_rbm_manifest_t *manifest) +{ + return manifest->validation_rule_count; +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +ia_css_rbm_terminal_routing_desc_t * +ia_css_rbm_manifest_get_terminal_routing_desc(const ia_css_rbm_manifest_t *manifest) +{ +#if VIED_NCI_RBM_MAX_TERMINAL_DESC_COUNT == 0 + (void)manifest; + return NULL; +#else + return (ia_css_rbm_terminal_routing_desc_t *)manifest->terminal_routing_desc; +#endif +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +unsigned int +ia_css_rbm_manifest_get_terminal_routing_desc_count(const ia_css_rbm_manifest_t *manifest) +{ + return manifest->terminal_routing_desc_count; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/assert_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/assert_support.h new file mode 100644 index 0000000000000..f904a494b53c9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/assert_support.h @@ -0,0 +1,197 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __ASSERT_SUPPORT_H +#define __ASSERT_SUPPORT_H + +/* This file provides support for run-time assertions + * and compile-time assertions. + * + * Run-time asstions are provided via the following syntax: + * assert(condition) + * Run-time assertions are disabled using the NDEBUG flag. + * + * Compile time assertions are provided via the following syntax: + * COMPILATION_ERROR_IF(condition); + * A compile-time assertion will fail to compile if the condition is false. + * The condition must be constant, such that it can be evaluated + * at compile time. + * + * OP___assert is deprecated. + */ + +#define IA_CSS_ASSERT(expr) assert(expr) + +#ifdef __KLOCWORK__ +/* Klocwork does not see that assert will lead to abortion + * as there is no good way to tell this to KW and the code + * should not depend on assert to function (actually the assert + * could be disabled in a release build) it was decided to + * disable the assert for KW scans (by defining NDEBUG) + */ +#define NDEBUG +#endif /* __KLOCWORK__ */ + +/** + * The following macro can help to test the size of a struct at compile + * time rather than at run-time. It does not work for all compilers; see + * below. + * + * Depending on the value of 'condition', the following macro is expanded to: + * - condition==true: + * an expression containing an array declaration with negative size, + * usually resulting in a compilation error + * - condition==false: + * (void) 1; // C statement with no effect + * + * example: + * COMPILATION_ERROR_IF( sizeof(struct host_sp_queues) != + * SIZE_OF_HOST_SP_QUEUES_STRUCT); + * + * verify that the macro indeed triggers a compilation error with your compiler: + * COMPILATION_ERROR_IF( sizeof(struct host_sp_queues) != + * (sizeof(struct host_sp_queues)+1) ); + * + * Not all compilers will trigger an error with this macro; + * use a search engine to search for BUILD_BUG_ON to find other methods. + */ +#define COMPILATION_ERROR_IF(condition) \ +((void)sizeof(char[1 - 2*!!(condition)])) + +/* Compile time assertion */ +#ifndef CT_ASSERT +#define CT_ASSERT(cnd) ((void)sizeof(char[(cnd)?1 : -1])) +#endif /* CT_ASSERT */ + +#ifdef NDEBUG + +#define assert(cnd) ((void)0) + +#else + +#include "storage_class.h" + +#if defined(_MSC_VER) +#ifdef _KERNEL_MODE +/* Windows kernel mode compilation */ +#include +#define assert(cnd) ASSERT(cnd) +#else +/* Windows usermode compilation */ +#include +#endif + +#elif defined(__HIVECC) + +/* + * target: assert disabled + * sched: assert enabled only when SCHED_DEBUG is defined + * unsched: assert enabled + */ +#if defined(HRT_HW) +#define assert(cnd) ((void)0) +#elif defined(HRT_SCHED) && !defined(DEBUG_SCHED) +#define assert(cnd) ((void)0) +#elif defined(PIPE_GENERATION) +#define assert(cnd) ((void)0) +#else +#include +#define assert(cnd) OP___csim_assert(cnd) +#endif + +#elif defined(__KERNEL__) +#include + +#ifndef KERNEL_ASSERT_TO_BUG +#ifndef KERNEL_ASSERT_TO_BUG_ON +#ifndef KERNEL_ASSERT_TO_WARN_ON +#ifndef KERNEL_ASSERT_TO_WARN_ON_INF_LOOP +#ifndef KERNEL_ASSERT_UNDEFINED +/* Default */ +#define KERNEL_ASSERT_TO_BUG +#endif /*KERNEL_ASSERT_UNDEFINED*/ +#endif /*KERNEL_ASSERT_TO_WARN_ON_INF_LOOP*/ +#endif /*KERNEL_ASSERT_TO_WARN_ON*/ +#endif /*KERNEL_ASSERT_TO_BUG_ON*/ +#endif /*KERNEL_ASSERT_TO_BUG*/ + +#ifdef KERNEL_ASSERT_TO_BUG +/* TODO: it would be cleaner to use this: + * #define assert(cnd) BUG_ON(cnd) + * but that causes many compiler warnings (==errors) under Android + * because it seems that the BUG_ON() macro is not seen as a check by + * gcc like the BUG() macro is. */ +#define assert(cnd) \ + do { \ + if (!(cnd)) { \ + BUG(); \ + } \ + } while (0) +#endif /*KERNEL_ASSERT_TO_BUG*/ + +#ifdef KERNEL_ASSERT_TO_BUG_ON +#define assert(cnd) BUG_ON(!(cnd)) +#endif /*KERNEL_ASSERT_TO_BUG_ON*/ + +#ifdef KERNEL_ASSERT_TO_WARN_ON +#define assert(cnd) WARN_ON(!(cnd)) +#endif /*KERNEL_ASSERT_TO_WARN_ON*/ + +#ifdef KERNEL_ASSERT_TO_WARN_ON_INF_LOOP +#define assert(cnd) \ + do { \ + int not_cnd = !(cnd); \ + WARN_ON(not_cnd); \ + if (not_cnd) { \ + for (;;) { \ + } \ + } \ + } while (0) +#endif /*KERNEL_ASSERT_TO_WARN_ON_INF_LOOP*/ + +#ifdef KERNEL_ASSERT_UNDEFINED +#include KERNEL_ASSERT_DEFINITION_FILESTRING +#endif /*KERNEL_ASSERT_UNDEFINED*/ + +#elif defined(__FIST__) || defined(__GNUC__) + +#include "assert.h" + +#else /* default is for unknown environments */ +#define assert(cnd) ((void)0) +#endif + +#endif /* NDEBUG */ + +#ifndef PIPE_GENERATION +/* Deprecated OP___assert, this is still used in ~1000 places + * in the code. This will be removed over time. + * The implementation for the pipe generation tool is in see support.isp.h */ +#define OP___assert(cnd) assert(cnd) + +#ifdef C_RUN +#define compile_time_assert(cond) OP___assert(cond) +#else +#include "storage_class.h" +extern void _compile_time_assert(void); +STORAGE_CLASS_INLINE void compile_time_assert(unsigned cond) +{ + /* Call undefined function if cond is false */ + if (!cond) + _compile_time_assert(); +} +#endif +#endif /* PIPE_GENERATION */ + +#endif /* __ASSERT_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/cpu_mem_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/cpu_mem_support.h new file mode 100644 index 0000000000000..fa349cac4b24a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/cpu_mem_support.h @@ -0,0 +1,233 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __CPU_MEM_SUPPORT_H +#define __CPU_MEM_SUPPORT_H + +#include "storage_class.h" +#include "assert_support.h" +#include "type_support.h" + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_copy(void *dst, const void *src, unsigned int size) +{ + /* memcpy cannot be used in in Windows (function is not allowed), + * and the safer function memcpy_s is not available on other platforms. + * Because usage of ia_css_cpu_mem_copy is minimal, we implement it here in an easy, + * but sub-optimal way. + */ + unsigned int i; + + assert(dst != NULL && src != NULL); + + if (!(dst != NULL && src != NULL)) { + return NULL; + } + for (i = 0; i < size; i++) { + ((char *)dst)[i] = ((char *)src)[i]; + } + return dst; +} + +#if defined(__KERNEL__) + +#include +#include +#include +#include + +/* TODO: remove, workaround for issue in hrt file ibuf_ctrl_2600_config.c + * error checking code added to SDK that uses calls to exit function + */ +#define exit(a) return + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc(unsigned int size) +{ + return kmalloc(size, GFP_KERNEL); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc_page_aligned(unsigned int size) +{ + return ia_css_cpu_mem_alloc(size); /* todo: align to page size */ +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_protect(void *ptr, unsigned int size, int prot) +{ + /* nothing here yet */ +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_set_zero(void *dst, unsigned int size) +{ + return memset(dst, 0, size); /* available in kernel in linux/string.h */ +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_free(void *ptr) +{ + kfree(ptr); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_flush(void *ptr, unsigned int size) +{ + /* parameter check here */ + if (ptr == NULL) + return; + + clflush_cache_range(ptr, size); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_invalidate(void *ptr, unsigned int size) +{ + /* for now same as flush */ + ia_css_cpu_mem_cache_flush(ptr, size); +} + +#elif defined(_MSC_VER) + +#include +#include +#include + +extern void *hrt_malloc(size_t bytes, int zero_mem); +extern void *hrt_free(void *ptr); +extern void hrt_mem_cache_flush(void *ptr, unsigned int size); +extern void hrt_mem_cache_invalidate(void *ptr, unsigned int size); + +#define malloc(a) hrt_malloc(a, 1) +#define free(a) hrt_free(a) + +#define CSS_PAGE_SIZE (1<<12) + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc(unsigned int size) +{ + return malloc(size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc_page_aligned(unsigned int size) +{ + unsigned int buffer_size = size; + + /* Currently hrt_malloc calls Windows ExAllocatePoolWithTag() routine + * to request system memory. If the number of bytes is equal or bigger + * than the page size, then the returned address is page aligned, + * but if it's smaller it's not necessarily page-aligned We agreed + * with Windows team that we allocate a full page + * if it's less than page size + */ + if (buffer_size < CSS_PAGE_SIZE) + buffer_size = CSS_PAGE_SIZE; + + return ia_css_cpu_mem_alloc(buffer_size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_set_zero(void *dst, unsigned int size) +{ + return memset(dst, 0, size); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_free(void *ptr) +{ + free(ptr); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_flush(void *ptr, unsigned int size) +{ +#ifdef _KERNEL_MODE + hrt_mem_cache_flush(ptr, size); +#else + (void)ptr; + (void)size; +#endif +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_invalidate(void *ptr, unsigned int size) +{ +#ifdef _KERNEL_MODE + hrt_mem_cache_invalidate(ptr, size); +#else + (void)ptr; + (void)size; +#endif +} + +#else + +#include +#include +#include +/* Needed for the MPROTECT */ +#include +#include +#include +#include + + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc(unsigned int size) +{ + return malloc(size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc_page_aligned(unsigned int size) +{ + int pagesize; + + pagesize = sysconf(_SC_PAGE_SIZE); + return memalign(pagesize, size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_set_zero(void *dst, unsigned int size) +{ + return memset(dst, 0, size); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_free(void *ptr) +{ + free(ptr); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_flush(void *ptr, unsigned int size) +{ + /* not needed in simulation */ + (void)ptr; + (void)size; +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_invalidate(void *ptr, unsigned int size) +{ + /* not needed in simulation */ + (void)ptr; + (void)size; +} + +#endif + +#endif /* __CPU_MEM_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/error_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/error_support.h new file mode 100644 index 0000000000000..9fe1f65125e6c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/error_support.h @@ -0,0 +1,110 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __ERROR_SUPPORT_H +#define __ERROR_SUPPORT_H + +#if defined(__KERNEL__) +#include +#else +#include +#endif +#include + +/* OS-independent definition of IA_CSS errno values */ +/* #define IA_CSS_EINVAL 1 */ +/* #define IA_CSS_EFAULT 2 */ + +#ifdef __HIVECC +#define ERR_EMBEDDED 1 +#else +#define ERR_EMBEDDED 0 +#endif + +#if ERR_EMBEDDED +#define DECLARE_ERRVAL +#else +#define DECLARE_ERRVAL \ + int _errval = 0; +#endif + +/* Use "owl" in while to prevent compiler warnings in Windows */ +#define ALWAYS_FALSE ((void)0, 0) + +#define verifret(cond, error_type) \ +do { \ + if (!(cond)) { \ + return error_type; \ + } \ +} while (ALWAYS_FALSE) + +#define verifjmp(cond, error_tag) \ +do { \ + if (!(cond)) { \ + goto error_tag; \ + } \ +} while (ALWAYS_FALSE) + +#define verifexit(cond) \ +do { \ + if (!(cond)) { \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) + +#if ERR_EMBEDDED +#define verifexitval(cond, error_tag) \ +do { \ + assert(cond); \ +} while (ALWAYS_FALSE) +#else +#define verifexitval(cond, error_tag) \ +do { \ + if (!(cond)) { \ + _errval = (error_tag); \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) +#endif + +#if ERR_EMBEDDED +#define haserror(error_tag) (0) +#else +#define haserror(error_tag) \ + (_errval == (error_tag)) +#endif + +#if ERR_EMBEDDED +#define noerror() (1) +#else +#define noerror() \ + (_errval == 0) +#endif + +#define verifjmpexit(cond) \ +do { \ + if (!(cond)) { \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) + +#define verifjmpexitsetretval(cond, retval) \ +do { \ + if (!(cond)) { \ + retval = -1; \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) + +#endif /* __ERROR_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/math_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/math_support.h new file mode 100644 index 0000000000000..633f86f1a1b09 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/math_support.h @@ -0,0 +1,314 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __MATH_SUPPORT_H +#define __MATH_SUPPORT_H + +#include "storage_class.h" /* for STORAGE_CLASS_INLINE */ +#include "type_support.h" +#include "assert_support.h" + +/* in case we have min/max/MIN/MAX macro's undefine them */ +#ifdef min +#undef min +#endif +#ifdef max +#undef max +#endif +#ifdef MIN /* also defined in include/hrt/numeric.h from SDK */ +#undef MIN +#endif +#ifdef MAX +#undef MAX +#endif + +#ifndef UINT16_MAX +#define UINT16_MAX (0xffffUL) +#endif + +#ifndef UINT32_MAX +#define UINT32_MAX (0xffffffffUL) +#endif + +#define IS_ODD(a) ((a) & 0x1) +#define IS_EVEN(a) (!IS_ODD(a)) +#define IS_POWER2(a) (!((a)&((a)-1))) +#define IS_MASK_BITS_SET(a, b) ((a & b) != 0) + +/*To Find next power of 2 number from x */ +#define bit2(x) ((x) | ((x) >> 1)) +#define bit4(x) (bit2(x) | (bit2(x) >> 2)) +#define bit8(x) (bit4(x) | (bit4(x) >> 4)) +#define bit16(x) (bit8(x) | (bit8(x) >> 8)) +#define bit32(x) (bit16(x) | (bit16(x) >> 16)) +#define NEXT_POWER_OF_2(x) (bit32(x-1) + 1) + +/* force a value to a lower even value */ +#define EVEN_FLOOR(x) ((x) & ~1UL) + +/* A => B */ +#define IMPLIES(a, b) (!(a) || (b)) + +/* The ORIG_BITS th bit is the sign bit */ +/* Sign extends a ORIG_BITS bits long signed number to a 64-bit signed number */ +/* By type casting it can relimited to any valid type-size + * (32-bit signed or 16-bit or 8-bit) + */ +/* By masking it can be transformed to any arbitrary bit size */ +#define SIGN_EXTEND(VAL, ORIG_BITS) \ +((~(((VAL)&(1ULL<<((ORIG_BITS)-1)))-1))|(VAL)) + +#define EXTRACT_BIT(a, b) ((a >> b) & 1) + +/* for preprocessor and array sizing use MIN and MAX + otherwise use min and max */ +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#define CLIP(a, b, c) MIN((MAX((a), (b))), (c)) +/* Integer round-down division of a with b */ +#define FLOOR_DIV(a, b) ((b) ? ((a) / (b)) : 0) +/* Align a to the lower multiple of b */ +#define FLOOR_MUL(a, b) (FLOOR_DIV(a, b) * (b)) +/* Integer round-up division of a with b */ +#define CEIL_DIV(a, b) ((b) ? (((a) + (b) - 1) / (b)) : 0) +/* Align a to the upper multiple of b */ +#define CEIL_MUL(a, b) (CEIL_DIV(a, b) * (b)) +/* Align a to the upper multiple of b - fast implementation + * for cases when b=pow(2,n) + */ +#define CEIL_MUL2(a, b) (((a) + (b) - 1) & ~((b) - 1)) +/* integer round-up division of a with pow(2,b) */ +#define CEIL_SHIFT(a, b) (((a) + (1UL << (b)) - 1) >> (b)) +/* Align a to the upper multiple of pow(2,b) */ +#define CEIL_SHIFT_MUL(a, b) (CEIL_SHIFT(a, b) << (b)) +/* Absolute difference of a and b */ +#define ABS_DIF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a))) +#define ABS(a) ABS_DIF(a, 0) +/* Square of x */ +#define SQR(x) ((x)*(x)) +/* Integer round-half-down division of a nad b */ +#define ROUND_HALF_DOWN_DIV(a, b) ((b) ? ((a) + (b / 2) - 1) / (b) : 0) +/* Align a to the round-half-down multiple of b */ +#define ROUND_HALF_DOWN_MUL(a, b) (ROUND_HALF_DOWN_DIV(a, b) * (b)) + +#define MAX3(a, b, c) MAX((a), MAX((b), (c))) +#define MIN3(a, b, c) MIN((a), MIN((b), (c))) +#define MAX4(a, b, c, d) MAX((MAX((a), (b))), (MAX((c), (d)))) +#define MIN4(a, b, c, d) MIN((MIN((a), (b))), (MIN((c), (d)))) + +/* min and max should not be macros as they will evaluate their arguments twice. + if you really need a macro (e.g. for CPP or for initializing an array) + use MIN() and MAX(), otherwise use min() and max() */ + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(a) ((sizeof(a) / sizeof(*(a)))) +#endif + +#ifndef BYTES +#define BYTES(bit) (((bit)+7)/8) +#endif + +#if !defined(PIPE_GENERATION) +STORAGE_CLASS_INLINE unsigned int max_value_bits(unsigned int bits) +{ + return (bits == 0) ? 0 : ((2 * ((1 << ((bits) - 1)) - 1)) + 1); +} +STORAGE_CLASS_INLINE unsigned int max_value_bytes(unsigned int bytes) +{ + return max_value_bits(IA_CSS_UINT8_T_BITS * bytes); +} +STORAGE_CLASS_INLINE int max(int a, int b) +{ + return MAX(a, b); +} + +STORAGE_CLASS_INLINE int min(int a, int b) +{ + return MIN(a, b); +} + +STORAGE_CLASS_INLINE int clip(int a, int b, int c) +{ + return min(max(a, b), c); +} + +STORAGE_CLASS_INLINE unsigned int umax(unsigned int a, unsigned int b) +{ + return MAX(a, b); +} + +STORAGE_CLASS_INLINE unsigned int umin(unsigned int a, unsigned int b) +{ + return MIN(a, b); +} + +STORAGE_CLASS_INLINE unsigned int uclip(unsigned int a, unsigned int b, + unsigned int c) +{ + return umin(umax(a, b), c); +} + +STORAGE_CLASS_INLINE unsigned int ceil_div(unsigned int a, unsigned int b) +{ + return CEIL_DIV(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_mul(unsigned int a, unsigned int b) +{ + return CEIL_MUL(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_mul2(unsigned int a, unsigned int b) +{ + return CEIL_MUL2(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_shift(unsigned int a, unsigned int b) +{ + return CEIL_SHIFT(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_shift_mul(unsigned int a, unsigned int b) +{ + return CEIL_SHIFT_MUL(a, b); +} + +STORAGE_CLASS_INLINE int abs_dif(int a, int b) +{ + return ABS_DIF(a, b); +} + +STORAGE_CLASS_INLINE unsigned int uabs_dif(unsigned int a, unsigned int b) +{ + return ABS_DIF(a, b); +} + +STORAGE_CLASS_INLINE unsigned int round_half_down_div(unsigned int a, + unsigned int b) +{ + return ROUND_HALF_DOWN_DIV(a, b); +} + +STORAGE_CLASS_INLINE unsigned int round_half_down_mul(unsigned int a, + unsigned int b) +{ + return ROUND_HALF_DOWN_MUL(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_pow2(uint32_t a) +{ + unsigned int retval = 0; + + if (IS_POWER2(a)) { + retval = (unsigned int)a; + } else { + unsigned int v = a; + + v |= v>>1; + v |= v>>2; + v |= v>>4; + v |= v>>8; + v |= v>>16; + retval = (unsigned int)(v+1); + } + return retval; +} + +STORAGE_CLASS_INLINE unsigned int floor_log2(uint32_t a) +{ + static const uint8_t de_bruijn[] = { + 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, + 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 + }; + uint32_t v = a; + + v |= v>>1; + v |= v>>2; + v |= v>>4; + v |= v>>8; + v |= v>>16; + return (unsigned int)de_bruijn[(v*0x07C4ACDDU)>>27]; +} + +/* Divide by small power of two */ +STORAGE_CLASS_INLINE unsigned int +udiv2_small_i(uint32_t a, uint32_t b) +{ + assert(b <= 2); + return a >> (b-1); +} + +/* optimized divide for small results + * a will be divided by b + * outbits is the number of bits needed for the result + * the smaller the cheaper the function will be. + * if the result doesn't fit in the number of output bits + * the result is incorrect and the function will assert + */ +STORAGE_CLASS_INLINE unsigned int +udiv_medium(uint32_t a, uint32_t b, unsigned outbits) +{ + int bit; + unsigned res = 0; + unsigned mask; + +#ifdef VOLCANO +#pragma ipu unroll +#endif + for (bit = outbits-1 ; bit >= 0; bit--) { + mask = 1<= (b<= c ? a+b-c : a+b); +} + +/* + * For SP and ISP, SDK provides the definition of OP_asp_slor. + * We need it only for host + */ +STORAGE_CLASS_INLINE unsigned int OP_asp_slor(int a, int b, int c) +{ + return ((a << c) | b); +} +#else +#include "hive/customops.h" +#endif /* !defined(__VIED_CELL) */ + +#endif /* !defined(PIPE_GENERATION) */ + +#if !defined(__KERNEL__) +#define clamp(a, min_val, max_val) MIN(MAX((a), (min_val)), (max_val)) +#endif /* !defined(__KERNEL__) */ + +#endif /* __MATH_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/misc_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/misc_support.h new file mode 100644 index 0000000000000..a2c2729e946d2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/misc_support.h @@ -0,0 +1,76 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __MISC_SUPPORT_H +#define __MISC_SUPPORT_H + +/* suppress compiler warnings on unused variables */ +#ifndef NOT_USED +#define NOT_USED(a) ((void)(a)) +#endif + +/* Calculate the total bytes for pow(2) byte alignment */ +#define tot_bytes_for_pow2_align(pow2, cur_bytes) \ + ((cur_bytes + (pow2 - 1)) & ~(pow2 - 1)) + +/* Display the macro value given a string */ +#define _STR(x) #x +#define STR(x) _STR(x) + +/* Concatenate */ +#ifndef CAT /* also defined in */ +#define _CAT(a, b) a ## b +#define CAT(a, b) _CAT(a, b) +#endif + +#define _CAT3(a, b, c) a ## b ## c +#define CAT3(a, b, c) _CAT3(a, b, c) + +/* NO_HOIST, NO_CSE, NO_ALIAS attributes must be ignored for host code */ +#ifndef __HIVECC +#ifndef NO_HOIST +#define NO_HOIST +#endif +#ifndef NO_CSE +#define NO_CSE +#endif +#ifndef NO_ALIAS +#define NO_ALIAS +#endif +#endif + +enum hive_method_id { + HIVE_METHOD_ID_CRUN, + HIVE_METHOD_ID_UNSCHED, + HIVE_METHOD_ID_SCHED, + HIVE_METHOD_ID_TARGET +}; + +/* Derive METHOD */ +#if defined(C_RUN) + #define HIVE_METHOD "crun" + #define HIVE_METHOD_ID HIVE_METHOD_ID_CRUN +#elif defined(HRT_UNSCHED) + #define HIVE_METHOD "unsched" + #define HIVE_METHOD_ID HIVE_METHOD_ID_UNSCHED +#elif defined(HRT_SCHED) + #define HIVE_METHOD "sched" + #define HIVE_METHOD_ID HIVE_METHOD_ID_SCHED +#else + #define HIVE_METHOD "target" + #define HIVE_METHOD_ID HIVE_METHOD_ID_TARGET + #define HRT_TARGET 1 +#endif + +#endif /* __MISC_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/platform_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/platform_support.h new file mode 100644 index 0000000000000..1752efc7b4df8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/platform_support.h @@ -0,0 +1,146 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PLATFORM_SUPPORT_H +#define __PLATFORM_SUPPORT_H + +#include "storage_class.h" + +#define MSEC_IN_SEC 1000 +#define NSEC_IN_MSEC 1000000 + +#if defined(_MSC_VER) +#include + +#define IA_CSS_EXTERN +#define SYNC_WITH(x) +#define CSS_ALIGN(d, a) _declspec(align(a)) d + +STORAGE_CLASS_INLINE void ia_css_sleep(void) +{ + /* Placeholder for driver team*/ +} + +STORAGE_CLASS_INLINE void ia_css_sleep_msec(long unsigned int delay_time_ms) +{ + /* Placeholder for driver team*/ + (void)delay_time_ms; +} + +#elif defined(__HIVECC) +#include +#include + +#define IA_CSS_EXTERN extern +#define CSS_ALIGN(d, a) d __attribute__((aligned(a))) +STORAGE_CLASS_INLINE void ia_css_sleep(void) +{ + OP___schedule(); +} + +#elif defined(__KERNEL__) +#include +#include + +#define IA_CSS_EXTERN +#define CSS_ALIGN(d, a) d __aligned(a) + +STORAGE_CLASS_INLINE void ia_css_sleep(void) +{ + usleep_range(1, 50); +} + +#elif defined(__GNUC__) +#include + +#define IA_CSS_EXTERN +#define CSS_ALIGN(d, a) d __attribute__((aligned(a))) + +/* Define some __HIVECC specific macros to nothing to allow host code compilation */ +#ifndef NO_ALIAS +#define NO_ALIAS +#endif + +#ifndef SYNC_WITH +#define SYNC_WITH(x) +#endif + +#if defined(HRT_CSIM) + #include "hrt/host.h" /* Using hrt_sleep from hrt/host.h */ + STORAGE_CLASS_INLINE void ia_css_sleep(void) + { + /* For the SDK still using hrt_sleep */ + hrt_sleep(); + } + STORAGE_CLASS_INLINE void ia_css_sleep_msec(long unsigned int delay_time_ms) + { + /* For the SDK still using hrt_sleep */ + long unsigned int i = 0; + for (i = 0; i < delay_time_ms; i++) { + hrt_sleep(); + } + } +#else + #include + STORAGE_CLASS_INLINE void ia_css_sleep(void) + { + struct timespec delay_time; + + delay_time.tv_sec = 0; + delay_time.tv_nsec = 10; + nanosleep(&delay_time, NULL); + } + STORAGE_CLASS_INLINE void ia_css_sleep_msec(long unsigned int delay_time_ms) + { + struct timespec delay_time; + + if (delay_time_ms >= MSEC_IN_SEC) { + delay_time.tv_sec = delay_time_ms / MSEC_IN_SEC; + delay_time.tv_nsec = (delay_time_ms % MSEC_IN_SEC) * NSEC_IN_MSEC; + } else { + delay_time.tv_sec = 0; + delay_time.tv_nsec = delay_time_ms * NSEC_IN_MSEC; + } + nanosleep(&delay_time, NULL); + } +#endif + +#else +#include +#endif + +/*needed for the include in stdint.h for various environments */ +#include "type_support.h" +#include "storage_class.h" + +#define MAX_ALIGNMENT 8 +#define aligned_uint8(type, obj) CSS_ALIGN(uint8_t obj, 1) +#define aligned_int8(type, obj) CSS_ALIGN(int8_t obj, 1) +#define aligned_uint16(type, obj) CSS_ALIGN(uint16_t obj, 2) +#define aligned_int16(type, obj) CSS_ALIGN(int16_t obj, 2) +#define aligned_uint32(type, obj) CSS_ALIGN(uint32_t obj, 4) +#define aligned_int32(type, obj) CSS_ALIGN(int32_t obj, 4) + +/* needed as long as hivecc does not define the type (u)int64_t */ +#if defined(__HIVECC) +#define aligned_uint64(type, obj) CSS_ALIGN(unsigned long long obj, 8) +#define aligned_int64(type, obj) CSS_ALIGN(signed long long obj, 8) +#else +#define aligned_uint64(type, obj) CSS_ALIGN(uint64_t obj, 8) +#define aligned_int64(type, obj) CSS_ALIGN(int64_t obj, 8) +#endif +#define aligned_enum(enum_type, obj) CSS_ALIGN(uint32_t obj, 4) +#define aligned_struct(struct_type, obj) struct_type obj + +#endif /* __PLATFORM_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/print_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/print_support.h new file mode 100644 index 0000000000000..0b614f7ef12d8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/print_support.h @@ -0,0 +1,90 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PRINT_SUPPORT_H +#define __PRINT_SUPPORT_H + +#if defined(_MSC_VER) +#ifdef _KERNEL_MODE + +/* TODO: Windows driver team to provide tracing mechanism for kernel mode + * e.g. DbgPrint and DbgPrintEx + */ +extern void FwTracePrintPWARN(const char *fmt, ...); +extern void FwTracePrintPRINT(const char *fmt, ...); +extern void FwTracePrintPERROR(const char *fmt, ...); +extern void FwTracePrintPDEBUG(const char *fmt, ...); + +#define PWARN(format, ...) FwTracePrintPWARN(format, __VA_ARGS__) +#define PRINT(format, ...) FwTracePrintPRINT(format, __VA_ARGS__) +#define PERROR(format, ...) FwTracePrintPERROR(format, __VA_ARGS__) +#define PDEBUG(format, ...) FwTracePrintPDEBUG(format, __VA_ARGS__) + +#else +/* Windows usermode compilation */ +#include + +/* To change the defines below, communicate with Windows team first + * to ensure they will not get flooded with prints + */ +/* This is temporary workaround to avoid flooding userspace + * Windows driver with prints + */ + +#define PWARN(format, ...) +#define PRINT(format, ...) +#define PERROR(format, ...) printf("error: " format, __VA_ARGS__) +#define PDEBUG(format, ...) + +#endif /* _KERNEL_MODE */ + +#elif defined(__HIVECC) +#include +/* To be revised + +#define PWARN(format) +#define PRINT(format) OP___printstring(format) +#define PERROR(variable) OP___dump(9999, arguments) +#define PDEBUG(variable) OP___dump(__LINE__, arguments) + +*/ + +#define PRINTSTRING(str) OP___printstring(str) + +#elif defined(__KERNEL__) +#include +#include + + +#define PWARN(format, arguments...) pr_debug(format, ##arguments) +#define PRINT(format, arguments...) pr_debug(format, ##arguments) +#define PERROR(format, arguments...) pr_debug(format, ##arguments) +#define PDEBUG(format, arguments...) pr_debug(format, ##arguments) + +#else +#include + +#define PRINT_HELPER(prefix, format, ...) printf(prefix format "%s", __VA_ARGS__) + +/* The trailing "" allows the edge case of printing single string */ +#define PWARN(...) PRINT_HELPER("warning: ", __VA_ARGS__, "") +#define PRINT(...) PRINT_HELPER("", __VA_ARGS__, "") +#define PERROR(...) PRINT_HELPER("error: ", __VA_ARGS__, "") +#define PDEBUG(...) PRINT_HELPER("debug: ", __VA_ARGS__, "") + +#define PRINTSTRING(str) PRINT(str) + +#endif + +#endif /* __PRINT_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/storage_class.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/storage_class.h new file mode 100644 index 0000000000000..af19b4026220a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/storage_class.h @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __STORAGE_CLASS_H +#define __STORAGE_CLASS_H + +#define STORAGE_CLASS_EXTERN \ +extern + +#if defined(_MSC_VER) +#define STORAGE_CLASS_INLINE \ +static __inline +#elif defined(__HIVECC) +#define STORAGE_CLASS_INLINE \ +static inline +#else +#define STORAGE_CLASS_INLINE \ +static inline +#endif + +/* Register struct */ +#ifndef __register +#if defined(__HIVECC) && !defined(PIPE_GENERATION) +#define __register register +#else +#define __register +#endif +#endif + +/* Memory attribute */ +#ifndef MEM +#ifdef PIPE_GENERATION +#elif defined(__HIVECC) +#include +#else +#define MEM(any_mem) +#endif +#endif + +#endif /* __STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/type_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/type_support.h new file mode 100644 index 0000000000000..a86da0e78941c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/type_support.h @@ -0,0 +1,80 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __TYPE_SUPPORT_H +#define __TYPE_SUPPORT_H + +/* Per the DLI spec, types are in "type_support.h" and + * "platform_support.h" is for unclassified/to be refactored + * platform specific definitions. + */ +#define IA_CSS_UINT8_T_BITS 8 +#define IA_CSS_UINT16_T_BITS 16 +#define IA_CSS_UINT32_T_BITS 32 +#define IA_CSS_INT32_T_BITS 32 +#define IA_CSS_UINT64_T_BITS 64 + + +#if defined(_MSC_VER) +#include +#include +#include +#include +#if defined(_M_X64) +#define HOST_ADDRESS(x) (unsigned long long)(x) +#else +#define HOST_ADDRESS(x) (unsigned long)(x) +#endif + +#elif defined(PARAM_GENERATION) +/* Nothing */ +#elif defined(__HIVECC) +#include +#include +#include +#include +#define HOST_ADDRESS(x) (unsigned long)(x) + +typedef long long int64_t; +typedef unsigned long long uint64_t; + +#elif defined(__KERNEL__) +#include +#include + +#define CHAR_BIT (8) +#define HOST_ADDRESS(x) (unsigned long)(x) + +#elif defined(__GNUC__) +#include +#include +#include +#include +#define HOST_ADDRESS(x) (unsigned long)(x) + +#else /* default is for the FIST environment */ +#include +#include +#include +#include +#define HOST_ADDRESS(x) (unsigned long)(x) + +#endif + +#if !defined(PIPE_GENERATION) && !defined(IO_GENERATION) +/* genpipe cannot handle the void* syntax */ +typedef void *HANDLE; +#endif + +#endif /* __TYPE_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/interface/ia_css_syscom.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/interface/ia_css_syscom.h new file mode 100644 index 0000000000000..5426d6d18e0bd --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/interface/ia_css_syscom.h @@ -0,0 +1,247 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_H +#define __IA_CSS_SYSCOM_H + + +/* + * The CSS Subsystem Communication Interface - Host side + * + * It provides subsystem initialzation, send ports and receive ports + * The PSYS and ISYS interfaces are implemented on top of this interface. + */ + +#include "ia_css_syscom_config.h" + +#define FW_ERROR_INVALID_PARAMETER (-1) +#define FW_ERROR_BAD_ADDRESS (-2) +#define FW_ERROR_BUSY (-3) +#define FW_ERROR_NO_MEMORY (-4) + +struct ia_css_syscom_context; + +/** + * ia_css_syscom_size() - provide syscom external buffer requirements + * @config: pointer to the configuration data (read) + * @size: pointer to the buffer size (write) + * + * Purpose: + * - Provide external buffer requirements + * - To be used for external buffer allocation + * + */ +extern void +ia_css_syscom_size( + const struct ia_css_syscom_config *cfg, + struct ia_css_syscom_size *size +); + +/** + * ia_css_syscom_open() - initialize a subsystem context + * @config: pointer to the configuration data (read) + * @buf: pointer to externally allocated buffers (read) + * @returns: struct ia_css_syscom_context* on success, 0 otherwise. + * + * Purpose: + * - initialize host side data structures + * - boot the subsystem? + * + */ +extern struct ia_css_syscom_context* +ia_css_syscom_open( + struct ia_css_syscom_config *config, + struct ia_css_syscom_buf *buf +); + +/** + * ia_css_syscom_close() - signal close to cell + * @context: pointer to the subsystem context + * @returns: 0 on success, -2 (FW_ERROR_BUSY) if SPC is not ready yet. + * + * Purpose: + * Request from the Cell to terminate + */ +extern int +ia_css_syscom_close( + struct ia_css_syscom_context *context +); + +/** + * ia_css_syscom_release() - free context + * @context: pointer to the subsystem context + * @force: flag which specifies whether cell + * state will be checked before freeing the + * context. + * @returns: 0 on success, -2 (FW_ERROR_BUSY) if cell + * is busy and call was not forced. + * + * Purpose: + * 2 modes, with first (force==true) immediately + * free context, and second (force==false) verifying + * that the cell state is ok and freeing context if so, + * returning error otherwise. + */ +extern int +ia_css_syscom_release( + struct ia_css_syscom_context *context, + unsigned int force +); + +/** + * Open a port for sending tokens to the subsystem + * @context: pointer to the subsystem context + * @port: send port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_open( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Closes a port for sending tokens to the subsystem + * @context: pointer to the subsystem context + * @port: send port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_close( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Get the number of tokens that can be sent to a port without error. + * @context: pointer to the subsystem context + * @port: send port index + * @returns: number of available tokens on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_available( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Send a token to the subsystem port + * The token size is determined during initialization + * @context: pointer to the subsystem context + * @port: send port index + * @token: pointer to the token value that is transferred to the subsystem + * @returns: number of tokens sent on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_transfer( + struct ia_css_syscom_context *context, + unsigned int port, + const void *token +); + +/** + * Open a port for receiving tokens to the subsystem + * @context: pointer to the subsystem context + * @port: receive port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_open( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Closes a port for receiving tokens to the subsystem + * Returns 0 on success, otherwise negative value of error code + * @context: pointer to the subsystem context + * @port: receive port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_close( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Get the number of tokens that can be received from a port without errors. + * @context: pointer to the subsystem context + * @port: receive port index + * @returns: number of available tokens on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_available( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Receive a token from the subsystem port + * The token size is determined during initialization + * @context: pointer to the subsystem context + * @port: receive port index + * @token (output): pointer to (space for) the token to be received + * @returns: number of tokens received on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_transfer( + struct ia_css_syscom_context *context, + unsigned int port, + void *token +); + +#if HAS_DUAL_CMD_CTX_SUPPORT +/** + * ia_css_syscom_store_dmem() - store subsystem context information in DMEM + * @context: pointer to the subsystem context + * @ssid: subsystem id + * @vtl0_addr_mask: VTL0 address mask; only applicable when the passed in context is secure + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_store_dmem( + struct ia_css_syscom_context *context, + unsigned int ssid, + unsigned int vtl0_addr_mask +); + +/** + * ia_css_syscom_set_trustlet_status() - store truslet configuration setting + * @context: pointer to the subsystem context + * @trustlet_exist: 1 if trustlet exists + */ +extern void +ia_css_syscom_set_trustlet_status( + unsigned int dmem_addr, + unsigned int ssid, + bool trustlet_exist +); + +/** + * ia_css_syscom_is_ab_spc_ready() - check if SPC access blocker programming is completed + * @context: pointer to the subsystem context + * @returns: 1 when status is ready. 0 otherwise + */ +bool +ia_css_syscom_is_ab_spc_ready( + struct ia_css_syscom_context *ctx +); +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ + +#endif /* __IA_CSS_SYSCOM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/interface/ia_css_syscom_config.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/interface/ia_css_syscom_config.h new file mode 100644 index 0000000000000..2f5eb309df94e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/interface/ia_css_syscom_config.h @@ -0,0 +1,97 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_CONFIG_H +#define __IA_CSS_SYSCOM_CONFIG_H + +#include +#include + +/* syscom size struct, output of ia_css_syscom_size, + * input for (external) allocation + */ +struct ia_css_syscom_size { + /* Size of host buffer */ + unsigned int cpu; + /* Size of shared config buffer (host to cell) */ + unsigned int shm; + /* Size of shared input queue buffers (host to cell) */ + unsigned int ibuf; + /* Size of shared output queue buffers (cell to host) */ + unsigned int obuf; +}; + +/* syscom buffer struct, output of (external) allocation, + * input for ia_css_syscom_open + */ +struct ia_css_syscom_buf { + char *cpu; /* host buffer */ + + /* shared memory buffer host address */ + host_virtual_address_t shm_host; + /* shared memory buffer cell address */ + vied_virtual_address_t shm_cell; + + /* input queue shared buffer host address */ + host_virtual_address_t ibuf_host; + /* input queue shared buffer cell address */ + vied_virtual_address_t ibuf_cell; + + /* output queue shared buffer host address */ + host_virtual_address_t obuf_host; + /* output queue shared buffer cell address */ + vied_virtual_address_t obuf_cell; +}; + +struct ia_css_syscom_queue_config { + unsigned int queue_size; /* tokens per queue */ + unsigned int token_size; /* bytes per token */ +}; + +/** + * Parameter struct for ia_css_syscom_open + */ +struct ia_css_syscom_config { + /* This member in no longer used in syscom. + It is kept to not break any driver builds, and will be removed when + all assignments have been removed from driver code */ + /* address of firmware in DDR/IMR */ + unsigned long long host_firmware_address; + + /* address of firmware in DDR, seen from SPC */ + unsigned int vied_firmware_address; + + unsigned int ssid; + unsigned int mmid; + + unsigned int num_input_queues; + unsigned int num_output_queues; + struct ia_css_syscom_queue_config *input; + struct ia_css_syscom_queue_config *output; + + unsigned int regs_addr; + unsigned int dmem_addr; + + /* firmware-specific configuration data */ + void *specific_addr; + unsigned int specific_size; + + /* if true; secure syscom in VTIO Case + * if false, non-secure syscom + */ + bool secure; + unsigned int vtl0_addr_mask; /* only applicable in 'secure' case */ +}; + +#endif /* __IA_CSS_SYSCOM_CONFIG_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/interface/ia_css_syscom_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/interface/ia_css_syscom_trace.h new file mode 100644 index 0000000000000..2c32693c2a82e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/interface/ia_css_syscom_trace.h @@ -0,0 +1,51 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IA_CSS_SYSCOM_TRACE_H +#define __IA_CSS_SYSCOM_TRACE_H + +#include "ia_css_trace.h" + +#define SYSCOM_TRACE_LEVEL_DEFAULT 1 +#define SYSCOM_TRACE_LEVEL_DEBUG 2 + +/* Set to default level if no level is defined */ +#ifndef SYSCOM_TRACE_LEVEL +#define SYSCOM_TRACE_LEVEL SYSCOM_TRACE_LEVEL_DEFAULT +#endif /* SYSCOM_TRACE_LEVEL */ + +/* SYSCOM Module tracing backend is mapped to TUNIT tracing for target platforms */ +#ifdef __HIVECC +# ifndef HRT_CSIM +# define SYSCOM_TRACE_METHOD IA_CSS_TRACE_METHOD_TRACE +# else +# define SYSCOM_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +# endif +#else +# define SYSCOM_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +#endif + +#define SYSCOM_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED +#define SYSCOM_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_ENABLED +#define SYSCOM_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + +#if (SYSCOM_TRACE_LEVEL == SYSCOM_TRACE_LEVEL_DEFAULT) +# define SYSCOM_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED +#elif (SYSCOM_TRACE_LEVEL == SYSCOM_TRACE_LEVEL_DEBUG) +# define SYSCOM_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_ENABLED +#else +# error "Connection manager trace level not defined!" +#endif /* SYSCOM_TRACE_LEVEL */ + +#endif /* __IA_CSS_SYSCOM_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/src/ia_css_syscom.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/src/ia_css_syscom.c new file mode 100644 index 0000000000000..cdf9df0531ff0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/src/ia_css_syscom.c @@ -0,0 +1,650 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_syscom.h" + +#include "ia_css_syscom_context.h" +#include "ia_css_syscom_config_fw.h" +#include "ia_css_syscom_trace.h" + +#include "queue.h" +#include "send_port.h" +#include "recv_port.h" +#include "regmem_access.h" + +#include "error_support.h" +#include "cpu_mem_support.h" + +#include "queue_struct.h" +#include "send_port_struct.h" +#include "recv_port_struct.h" + +#include "type_support.h" +#include +#include +#include "platform_support.h" + +#include "ia_css_cell.h" + +/* struct of internal buffer sizes */ +struct ia_css_syscom_size_intern { + unsigned int context; + unsigned int input_queue; + unsigned int output_queue; + unsigned int input_port; + unsigned int output_port; + + unsigned int fw_config; + unsigned int specific; + + unsigned int input_buffer; + unsigned int output_buffer; +}; + +/* Allocate buffers internally, when no buffers are provided */ +static int +ia_css_syscom_alloc( + unsigned int ssid, + unsigned int mmid, + const struct ia_css_syscom_size *size, + struct ia_css_syscom_buf *buf) +{ + /* zero the buffer to set all pointers to zero */ + memset(buf, 0, sizeof(*buf)); + + /* allocate cpu_mem */ + buf->cpu = (char *)ia_css_cpu_mem_alloc(size->cpu); + if (!buf->cpu) + goto EXIT7; + + /* allocate and map shared config buffer */ + buf->shm_host = shared_memory_alloc(mmid, size->shm); + if (!buf->shm_host) + goto EXIT6; + buf->shm_cell = shared_memory_map(ssid, mmid, buf->shm_host); + if (!buf->shm_cell) + goto EXIT5; + + /* allocate and map input queue buffer */ + buf->ibuf_host = shared_memory_alloc(mmid, size->ibuf); + if (!buf->ibuf_host) + goto EXIT4; + buf->ibuf_cell = shared_memory_map(ssid, mmid, buf->ibuf_host); + if (!buf->ibuf_cell) + goto EXIT3; + + /* allocate and map output queue buffer */ + buf->obuf_host = shared_memory_alloc(mmid, size->obuf); + if (!buf->obuf_host) + goto EXIT2; + buf->obuf_cell = shared_memory_map(ssid, mmid, buf->obuf_host); + if (!buf->obuf_cell) + goto EXIT1; + + return 0; + +EXIT1: shared_memory_free(mmid, buf->obuf_host); +EXIT2: shared_memory_unmap(ssid, mmid, buf->ibuf_cell); +EXIT3: shared_memory_free(mmid, buf->ibuf_host); +EXIT4: shared_memory_unmap(ssid, mmid, buf->shm_cell); +EXIT5: shared_memory_free(mmid, buf->shm_host); +EXIT6: ia_css_cpu_mem_free(buf->cpu); +EXIT7: return FW_ERROR_NO_MEMORY; +} + +static void +ia_css_syscom_size_intern( + const struct ia_css_syscom_config *cfg, + struct ia_css_syscom_size_intern *size) +{ + /* convert syscom config into syscom internal size struct */ + + unsigned int i; + + size->context = sizeof(struct ia_css_syscom_context); + size->input_queue = cfg->num_input_queues * sizeof(struct sys_queue); + size->output_queue = cfg->num_output_queues * sizeof(struct sys_queue); + size->input_port = cfg->num_input_queues * sizeof(struct send_port); + size->output_port = cfg->num_output_queues * sizeof(struct recv_port); + + size->fw_config = sizeof(struct ia_css_syscom_config_fw); + size->specific = cfg->specific_size; + + /* accumulate input queue buffer sizes */ + size->input_buffer = 0; + for (i = 0; i < cfg->num_input_queues; i++) { + size->input_buffer += + sys_queue_buf_size(cfg->input[i].queue_size, + cfg->input[i].token_size); + } + + /* accumulate outut queue buffer sizes */ + size->output_buffer = 0; + for (i = 0; i < cfg->num_output_queues; i++) { + size->output_buffer += + sys_queue_buf_size(cfg->output[i].queue_size, + cfg->output[i].token_size); + } +} + +static void +ia_css_syscom_size_extern( + const struct ia_css_syscom_size_intern *i, + struct ia_css_syscom_size *e) +{ + /* convert syscom internal size struct into external size struct */ + + e->cpu = i->context + i->input_queue + i->output_queue + + i->input_port + i->output_port; + e->shm = i->fw_config + i->input_queue + i->output_queue + i->specific; + e->ibuf = i->input_buffer; + e->obuf = i->output_buffer; +} + +/* Function that provides buffer sizes to be allocated */ +void +ia_css_syscom_size( + const struct ia_css_syscom_config *cfg, + struct ia_css_syscom_size *size) +{ + struct ia_css_syscom_size_intern i; + + ia_css_syscom_size_intern(cfg, &i); + ia_css_syscom_size_extern(&i, size); +} + +static struct ia_css_syscom_context* +ia_css_syscom_assign_buf( + const struct ia_css_syscom_size_intern *i, + const struct ia_css_syscom_buf *buf) +{ + struct ia_css_syscom_context *ctx; + char *cpu_mem_buf; + host_virtual_address_t shm_buf_host; + vied_virtual_address_t shm_buf_cell; + + /* host context */ + cpu_mem_buf = buf->cpu; + + ctx = (struct ia_css_syscom_context *)cpu_mem_buf; + ia_css_cpu_mem_set_zero(ctx, i->context); + cpu_mem_buf += i->context; + + ctx->input_queue = (struct sys_queue *) cpu_mem_buf; + cpu_mem_buf += i->input_queue; + + ctx->output_queue = (struct sys_queue *) cpu_mem_buf; + cpu_mem_buf += i->output_queue; + + ctx->send_port = (struct send_port *) cpu_mem_buf; + cpu_mem_buf += i->input_port; + + ctx->recv_port = (struct recv_port *) cpu_mem_buf; + + + /* cell config */ + shm_buf_host = buf->shm_host; + shm_buf_cell = buf->shm_cell; + + ctx->config_host_addr = shm_buf_host; + shm_buf_host += i->fw_config; + ctx->config_vied_addr = shm_buf_cell; + shm_buf_cell += i->fw_config; + + ctx->input_queue_host_addr = shm_buf_host; + shm_buf_host += i->input_queue; + ctx->input_queue_vied_addr = shm_buf_cell; + shm_buf_cell += i->input_queue; + + ctx->output_queue_host_addr = shm_buf_host; + shm_buf_host += i->output_queue; + ctx->output_queue_vied_addr = shm_buf_cell; + shm_buf_cell += i->output_queue; + + ctx->specific_host_addr = shm_buf_host; + ctx->specific_vied_addr = shm_buf_cell; + + ctx->ibuf_host_addr = buf->ibuf_host; + ctx->ibuf_vied_addr = buf->ibuf_cell; + + ctx->obuf_host_addr = buf->obuf_host; + ctx->obuf_vied_addr = buf->obuf_cell; + + return ctx; +} + +struct ia_css_syscom_context* +ia_css_syscom_open( + struct ia_css_syscom_config *cfg, + struct ia_css_syscom_buf *buf_extern +) +{ + struct ia_css_syscom_size_intern size_intern; + struct ia_css_syscom_size size; + struct ia_css_syscom_buf buf_intern; + struct ia_css_syscom_buf *buf; + struct ia_css_syscom_context *ctx; + struct ia_css_syscom_config_fw fw_cfg; + unsigned int i; + struct sys_queue_res res; + + IA_CSS_TRACE_0(SYSCOM, INFO, "Entered: ia_css_syscom_open\n"); + + /* error handling */ + if (cfg == NULL) + return NULL; + + IA_CSS_TRACE_1(SYSCOM, INFO, "ia_css_syscom_open (secure %d) start\n", cfg->secure); + + /* check members of cfg: TBD */ + + /* + * Check if SP is in valid state, have to wait if not ready. + * In some platform (Such as VP), it will need more time to wait due to system performance; + * If return NULL without wait for SPC0 ready, Driver load FW will failed + */ + ia_css_cell_wait(cfg->ssid, SPC0); + + ia_css_syscom_size_intern(cfg, &size_intern); + ia_css_syscom_size_extern(&size_intern, &size); + + if (buf_extern) { + /* use externally allocated buffers */ + buf = buf_extern; + } else { + /* use internally allocated buffers */ + buf = &buf_intern; + if (ia_css_syscom_alloc(cfg->ssid, cfg->mmid, &size, buf) != 0) + return NULL; + } + + /* assign buffer pointers */ + ctx = ia_css_syscom_assign_buf(&size_intern, buf); + /* only need to free internally allocated buffers */ + ctx->free_buf = !buf_extern; + + ctx->cell_regs_addr = cfg->regs_addr; + /* regmem is at cell_dmem_addr + REGMEM_OFFSET */ + ctx->cell_dmem_addr = cfg->dmem_addr; + + ctx->num_input_queues = cfg->num_input_queues; + ctx->num_output_queues = cfg->num_output_queues; + + ctx->env.mmid = cfg->mmid; + ctx->env.ssid = cfg->ssid; + ctx->env.mem_addr = cfg->dmem_addr; + + ctx->regmem_idx = SYSCOM_QPR_BASE_REG; + + /* initialize input queues */ + res.reg = SYSCOM_QPR_BASE_REG; + res.host_address = ctx->ibuf_host_addr; + res.vied_address = ctx->ibuf_vied_addr; + for (i = 0; i < cfg->num_input_queues; i++) { + sys_queue_init(ctx->input_queue + i, + cfg->input[i].queue_size, + cfg->input[i].token_size, &res); + } + + /* initialize output queues */ + res.host_address = ctx->obuf_host_addr; + res.vied_address = ctx->obuf_vied_addr; + for (i = 0; i < cfg->num_output_queues; i++) { + sys_queue_init(ctx->output_queue + i, + cfg->output[i].queue_size, + cfg->output[i].token_size, &res); + } + + /* fill shared queue structs */ + shared_memory_store(cfg->mmid, ctx->input_queue_host_addr, + ctx->input_queue, + cfg->num_input_queues * sizeof(struct sys_queue)); + ia_css_cpu_mem_cache_flush( + (void *)HOST_ADDRESS(ctx->input_queue_host_addr), + cfg->num_input_queues * sizeof(struct sys_queue)); + shared_memory_store(cfg->mmid, ctx->output_queue_host_addr, + ctx->output_queue, + cfg->num_output_queues * sizeof(struct sys_queue)); + ia_css_cpu_mem_cache_flush( + (void *)HOST_ADDRESS(ctx->output_queue_host_addr), + cfg->num_output_queues * sizeof(struct sys_queue)); + + /* Zero the queue buffers. Is this really needed? */ + shared_memory_zero(cfg->mmid, buf->ibuf_host, size.ibuf); + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(buf->ibuf_host), + size.ibuf); + shared_memory_zero(cfg->mmid, buf->obuf_host, size.obuf); + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(buf->obuf_host), + size.obuf); + + /* copy firmware specific data */ + if (cfg->specific_addr && cfg->specific_size) { + shared_memory_store(cfg->mmid, ctx->specific_host_addr, + cfg->specific_addr, cfg->specific_size); + ia_css_cpu_mem_cache_flush( + (void *)HOST_ADDRESS(ctx->specific_host_addr), + cfg->specific_size); + } + + fw_cfg.num_input_queues = cfg->num_input_queues; + fw_cfg.num_output_queues = cfg->num_output_queues; + fw_cfg.input_queue = ctx->input_queue_vied_addr; + fw_cfg.output_queue = ctx->output_queue_vied_addr; + fw_cfg.specific_addr = ctx->specific_vied_addr; + fw_cfg.specific_size = cfg->specific_size; + + shared_memory_store(cfg->mmid, ctx->config_host_addr, + &fw_cfg, sizeof(struct ia_css_syscom_config_fw)); + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(ctx->config_host_addr), + sizeof(struct ia_css_syscom_config_fw)); + +#if !HAS_DUAL_CMD_CTX_SUPPORT + /* store syscom uninitialized state */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_open store STATE_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_STATE_UNINIT, ctx->cell_dmem_addr, cfg->ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + SYSCOM_STATE_UNINIT, cfg->ssid); + /* store syscom uninitialized command */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_open store COMMAND_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_COMMAND_UNINIT, ctx->cell_dmem_addr, cfg->ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_COMMAND_REG, + SYSCOM_COMMAND_UNINIT, cfg->ssid); + /* store firmware configuration address */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_open store CONFIG_REG (%#x) @ dmem_addr %#x ssid %d\n", + ctx->config_vied_addr, ctx->cell_dmem_addr, cfg->ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_CONFIG_REG, + ctx->config_vied_addr, cfg->ssid); +#endif + + /* Indicate if ctx is created for secure stream purpose */ + ctx->secure = cfg->secure; + + IA_CSS_TRACE_1(SYSCOM, INFO, "ia_css_syscom_open (secure %d) completed\n", cfg->secure); + return ctx; +} + + +int +ia_css_syscom_close( + struct ia_css_syscom_context *ctx +) { + int state; + + state = regmem_load_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + ctx->env.ssid); + if (state != SYSCOM_STATE_READY) { + /* SPC is not ready to handle close request yet */ + return FW_ERROR_BUSY; + } + + /* set close request flag */ + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_COMMAND_REG, + SYSCOM_COMMAND_INACTIVE, ctx->env.ssid); + + return 0; +} + +static void +ia_css_syscom_free(struct ia_css_syscom_context *ctx) +{ + shared_memory_unmap(ctx->env.ssid, ctx->env.mmid, ctx->ibuf_vied_addr); + shared_memory_free(ctx->env.mmid, ctx->ibuf_host_addr); + shared_memory_unmap(ctx->env.ssid, ctx->env.mmid, ctx->obuf_vied_addr); + shared_memory_free(ctx->env.mmid, ctx->obuf_host_addr); + shared_memory_unmap(ctx->env.ssid, ctx->env.mmid, + ctx->config_vied_addr); + shared_memory_free(ctx->env.mmid, ctx->config_host_addr); + ia_css_cpu_mem_free(ctx); +} + +int +ia_css_syscom_release( + struct ia_css_syscom_context *ctx, + unsigned int force +) { + /* check if release is forced, an verify cell state if it is not */ + if (!force) { + if (!ia_css_cell_is_ready(ctx->env.ssid, SPC0)) + return FW_ERROR_BUSY; + } + + /* Reset the regmem idx */ + ctx->regmem_idx = 0; + + if (ctx->free_buf) + ia_css_syscom_free(ctx); + + return 0; +} + +int ia_css_syscom_send_port_open( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + int state; + + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + /* check if SP syscom is ready to open the queue */ + state = regmem_load_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + ctx->env.ssid); + if (state != SYSCOM_STATE_READY) { + /* SPC is not ready to handle messages yet */ + return FW_ERROR_BUSY; + } + + /* initialize the port */ + send_port_open(ctx->send_port + port, + ctx->input_queue + port, &(ctx->env)); + + return 0; +} + +int ia_css_syscom_send_port_close( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + return 0; +} + +int ia_css_syscom_send_port_available( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + return send_port_available(ctx->send_port + port); +} + +int ia_css_syscom_send_port_transfer( + struct ia_css_syscom_context *ctx, + unsigned int port, + const void *token +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + return send_port_transfer(ctx->send_port + port, token); +} + +int ia_css_syscom_recv_port_open( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + int state; + + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + /* check if SP syscom is ready to open the queue */ + state = regmem_load_32(ctx->cell_dmem_addr, + SYSCOM_STATE_REG, ctx->env.ssid); + if (state != SYSCOM_STATE_READY) { + /* SPC is not ready to handle messages yet */ + return FW_ERROR_BUSY; + } + + /* initialize the port */ + recv_port_open(ctx->recv_port + port, + ctx->output_queue + port, &(ctx->env)); + + return 0; +} + +int ia_css_syscom_recv_port_close( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + return 0; +} + +/* + * Get the number of responses in the response queue + */ +int +ia_css_syscom_recv_port_available( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + return recv_port_available(ctx->recv_port + port); +} + + +/* + * Dequeue the head of the response queue + * returns an error when the response queue is empty + */ +int +ia_css_syscom_recv_port_transfer( + struct ia_css_syscom_context *ctx, + unsigned int port, + void *token +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + return recv_port_transfer(ctx->recv_port + port, token); +} + +#if HAS_DUAL_CMD_CTX_SUPPORT +/* + * store subsystem context information in DMEM + */ +int +ia_css_syscom_store_dmem( + struct ia_css_syscom_context *ctx, + unsigned int ssid, + unsigned int vtl0_addr_mask +) +{ + unsigned int read_back; + + NOT_USED(vtl0_addr_mask); + NOT_USED(read_back); + + if (ctx->secure) { + /* store VTL0 address mask in 'secure' context */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem VTL0_ADDR_MASK (%#x) @ dmem_addr %#x ssid %d\n", + vtl0_addr_mask, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_VTL0_ADDR_MASK, vtl0_addr_mask, ssid); + } + /* store firmware configuration address */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem CONFIG_REG (%#x) @ dmem_addr %#x ssid %d\n", + ctx->config_vied_addr, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_CONFIG_REG, + ctx->config_vied_addr, ssid); + /* store syscom uninitialized state */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem STATE_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_STATE_UNINIT, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + SYSCOM_STATE_UNINIT, ssid); + /* store syscom uninitialized command */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem COMMAND_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_COMMAND_UNINIT, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_COMMAND_REG, + SYSCOM_COMMAND_UNINIT, ssid); + + return 0; +} + +/* + * store truslet configuration status setting + */ +void +ia_css_syscom_set_trustlet_status( + unsigned int dmem_addr, + unsigned int ssid, + bool trustlet_exist +) +{ + unsigned int value; + + value = trustlet_exist ? TRUSTLET_EXIST : TRUSTLET_NOT_EXIST; + IA_CSS_TRACE_3(SYSCOM, INFO, + "ia_css_syscom_set_trustlet_status TRUSTLET_STATUS (%#x) @ dmem_addr %#x ssid %d\n", + value, dmem_addr, ssid); + regmem_store_32(dmem_addr, TRUSTLET_STATUS, value, ssid); +} + +/* + * check if SPC access blocker programming is completed + */ +bool +ia_css_syscom_is_ab_spc_ready( + struct ia_css_syscom_context *ctx +) +{ + unsigned int value; + + /* We only expect the call from non-secure context only */ + if (ctx->secure) { + IA_CSS_TRACE_0(SYSCOM, ERROR, "ia_css_syscom_is_spc_ab_ready - Please call from non-secure context\n"); + return false; + } + + value = regmem_load_32(ctx->cell_dmem_addr, AB_SPC_STATUS, ctx->env.ssid); + IA_CSS_TRACE_3(SYSCOM, INFO, + "ia_css_syscom_is_spc_ab_ready AB_SPC_STATUS @ dmem_addr %#x ssid %d - value %#x\n", + ctx->cell_dmem_addr, ctx->env.ssid, value); + + return (value == AB_SPC_READY); +} +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/src/ia_css_syscom_config_fw.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/src/ia_css_syscom_config_fw.h new file mode 100644 index 0000000000000..0cacd5a34934d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/src/ia_css_syscom_config_fw.h @@ -0,0 +1,69 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_CONFIG_FW_H +#define __IA_CSS_SYSCOM_CONFIG_FW_H + +#include "type_support.h" + +enum { + /* Program load or explicit host setting should init to this */ + SYSCOM_STATE_UNINIT = 0x57A7E000, + /* SP Syscom sets this when it is ready for use */ + SYSCOM_STATE_READY = 0x57A7E001, + /* SP Syscom sets this when no more syscom accesses will happen */ + SYSCOM_STATE_INACTIVE = 0x57A7E002 +}; + +enum { + /* Program load or explicit host setting should init to this */ + SYSCOM_COMMAND_UNINIT = 0x57A7F000, + /* Host Syscom requests syscom to become inactive */ + SYSCOM_COMMAND_INACTIVE = 0x57A7F001 +}; + +#if HAS_DUAL_CMD_CTX_SUPPORT +enum { + /* Program load or explicit host setting should init to this */ + TRUSTLET_UNINIT = 0x57A8E000, + /* Host Syscom informs SP that Trustlet exists */ + TRUSTLET_EXIST = 0x57A8E001, + /* Host Syscom informs SP that Trustlet does not exist */ + TRUSTLET_NOT_EXIST = 0x57A8E002 +}; + +enum { + /* Program load or explicit setting initialized by SP */ + AB_SPC_NOT_READY = 0x57A8F000, + /* SP informs host that SPC access programming is completed */ + AB_SPC_READY = 0x57A8F001 +}; +#endif + +/* firmware config: data that sent from the host to SP via DDR */ +/* Cell copies data into a context */ + +struct ia_css_syscom_config_fw { + unsigned int firmware_address; + + unsigned int num_input_queues; + unsigned int num_output_queues; + unsigned int input_queue; /* hmm_ptr / struct queue* */ + unsigned int output_queue; /* hmm_ptr / struct queue* */ + + unsigned int specific_addr; /* vied virtual address */ + unsigned int specific_size; +}; + +#endif /* __IA_CSS_SYSCOM_CONFIG_FW_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/src/ia_css_syscom_context.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/src/ia_css_syscom_context.h new file mode 100644 index 0000000000000..ecf22f6b7ac53 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/src/ia_css_syscom_context.h @@ -0,0 +1,65 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_CONTEXT_H +#define __IA_CSS_SYSCOM_CONTEXT_H + +#include + +#include "port_env_struct.h" +#include + +/* host context */ +struct ia_css_syscom_context { + vied_virtual_address_t cell_firmware_addr; + unsigned int cell_regs_addr; + unsigned int cell_dmem_addr; + + struct port_env env; + + unsigned int num_input_queues; + unsigned int num_output_queues; + + /* array of input queues (from host to SP) */ + struct sys_queue *input_queue; + /* array of output queues (from SP to host) */ + struct sys_queue *output_queue; + + struct send_port *send_port; + struct recv_port *recv_port; + + unsigned int regmem_idx; + unsigned int free_buf; + + host_virtual_address_t config_host_addr; + host_virtual_address_t input_queue_host_addr; + host_virtual_address_t output_queue_host_addr; + host_virtual_address_t specific_host_addr; + host_virtual_address_t ibuf_host_addr; + host_virtual_address_t obuf_host_addr; + + vied_virtual_address_t config_vied_addr; + vied_virtual_address_t input_queue_vied_addr; + vied_virtual_address_t output_queue_vied_addr; + vied_virtual_address_t specific_vied_addr; + vied_virtual_address_t ibuf_vied_addr; + vied_virtual_address_t obuf_vied_addr; + + /* if true; secure syscom object as in VTIO Case + * if false, non-secure syscom + */ + bool secure; +}; + +#endif /* __IA_CSS_SYSCOM_CONTEXT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/syscom.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/syscom.mk new file mode 100644 index 0000000000000..8d36b8928af55 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/syscom.mk @@ -0,0 +1,42 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is SYSCOM + +SYSCOM_DIR=$${MODULES_DIR}/syscom + +SYSCOM_INTERFACE=$(SYSCOM_DIR)/interface +SYSCOM_SOURCES1=$(SYSCOM_DIR)/src + +SYSCOM_HOST_FILES += $(SYSCOM_SOURCES1)/ia_css_syscom.c + +SYSCOM_HOST_CPPFLAGS += -I$(SYSCOM_INTERFACE) +SYSCOM_HOST_CPPFLAGS += -I$(SYSCOM_SOURCES1) +SYSCOM_HOST_CPPFLAGS += -I$${MODULES_DIR}/devices +ifdef REGMEM_SECURE_OFFSET +SYSCOM_HOST_CPPFLAGS += -DREGMEM_SECURE_OFFSET=$(REGMEM_SECURE_OFFSET) +else +SYSCOM_HOST_CPPFLAGS += -DREGMEM_SECURE_OFFSET=0 +endif + +SYSCOM_FW_FILES += $(SYSCOM_SOURCES1)/ia_css_syscom_fw.c + +SYSCOM_FW_CPPFLAGS += -I$(SYSCOM_INTERFACE) +SYSCOM_FW_CPPFLAGS += -I$(SYSCOM_SOURCES1) +SYSCOM_FW_CPPFLAGS += -DREGMEM_OFFSET=$(REGMEM_OFFSET) +ifdef REGMEM_SECURE_OFFSET +SYSCOM_FW_CPPFLAGS += -DREGMEM_SECURE_OFFSET=$(REGMEM_SECURE_OFFSET) +else +SYSCOM_FW_CPPFLAGS += -DREGMEM_SECURE_OFFSET=0 +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/trace/interface/ia_css_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/trace/interface/ia_css_trace.h new file mode 100644 index 0000000000000..b85b1810f1070 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/trace/interface/ia_css_trace.h @@ -0,0 +1,883 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/*! \file */ + +#ifndef __IA_CSS_TRACE_H +#define __IA_CSS_TRACE_H + +/* +** Configurations +*/ + +/** + * STEP 1: Define {Module Name}_TRACE_METHOD to one of the following. + * Where: + * {Module Name} is the name of the targeted module. + * + * Example: + * #define NCI_DMA_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE + */ + +/**< Use whatever method of tracing that best suits the platform + * this code is compiled for. + */ +#define IA_CSS_TRACE_METHOD_NATIVE 1 +/**< Use the Tracing NCI. */ +#define IA_CSS_TRACE_METHOD_TRACE 2 + +/** + * STEP 2: Define {Module Name}_TRACE_LEVEL_{Level} to one of the following. + * Where: + * {Module Name} is the name of the targeted module. + * {Level}, in decreasing order of severity, is one of the + * following values: + * {ASSERT, ERROR, WARNING, INFO, DEBUG, VERBOSE}. + * + * Example: + * #define NCI_DMA_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_DISABLED + * #define NCI_DMA_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + */ +/**< Disables the corresponding trace level. */ +#define IA_CSS_TRACE_LEVEL_DISABLED 0 +/**< Enables the corresponding trace level. */ +#define IA_CSS_TRACE_LEVEL_ENABLED 1 + +/* + * Used in macro definition with do-while loop + * for removing checkpatch warnings + */ +#define IA_CSS_TRACE_FILE_DUMMY_DEFINE + +/** + * STEP 3: Define IA_CSS_TRACE_PRINT_FILE_LINE to have file name and + * line printed with every log message. + * + * Example: + * #define IA_CSS_TRACE_PRINT_FILE_LINE + */ + +/* +** Interface +*/ + +/* +** Static +*/ + +/** + * Logs a message with zero arguments if the targeted severity level is enabled + * at compile-time. + * @param module The targeted module. + * @param severity The severity level of the trace message. In decreasing order: + * {ASSERT, ERROR, WARNING, INFO, DEBUG, VERBOSE}. + * @param format The message to be traced. + */ +#define IA_CSS_TRACE_0(module, severity, format) \ + IA_CSS_TRACE_IMPL(module, 0, severity, format) + +/** + * Logs a message with one argument if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_1(module, severity, format, a1) \ + IA_CSS_TRACE_IMPL(module, 1, severity, format, a1) + +/** + * Logs a message with two arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_2(module, severity, format, a1, a2) \ + IA_CSS_TRACE_IMPL(module, 2, severity, format, a1, a2) + +/** + * Logs a message with three arguments if the targeted severity level + * is enabled at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_3(module, severity, format, a1, a2, a3) \ + IA_CSS_TRACE_IMPL(module, 3, severity, format, a1, a2, a3) + +/** + * Logs a message with four arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_4(module, severity, format, a1, a2, a3, a4) \ + IA_CSS_TRACE_IMPL(module, 4, severity, format, a1, a2, a3, a4) + +/** + * Logs a message with five arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_5(module, severity, format, a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_IMPL(module, 5, severity, format, a1, a2, a3, a4, a5) + +/** + * Logs a message with six arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_6(module, severity, format, a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_IMPL(module, 6, severity, format, a1, a2, a3, a4, a5, a6) + +/** + * Logs a message with seven arguments if the targeted severity level + * is enabled at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_7(module, severity, format, a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_IMPL(module, 7, severity, format, \ + a1, a2, a3, a4, a5, a6, a7) + +/* +** Dynamic +*/ + +/** +* Declares, but does not define, dynamic tracing functions and variables +* for module \p module. For each module, place an instance of this macro +* in the compilation unit in which you want to use dynamic tracing facility +* so as to inform the compiler of the declaration of the available functions. +* An invocation of this function does not enable any of the available tracing +* levels. Do not place a semicolon after a call to this macro. +* @see IA_CSS_TRACE_DYNAMIC_DEFINE +*/ +#define IA_CSS_TRACE_DYNAMIC_DECLARE(module) \ + IA_CSS_TRACE_DYNAMIC_DECLARE_IMPL(module) +/** +* Declares the configuration function for the dynamic api seperatly, if one +* wants to use it. +*/ +#define IA_CSS_TRACE_DYNAMIC_DECLARE_CONFIG_FUNC(module) \ + IA_CSS_TRACE_DYNAMIC_DECLARE_CONFIG_FUNC_IMPL(module) + +/** +* Defines dynamic tracing functions and variables for module \p module. +* For each module, place an instance of this macro in one, and only one, +* of your SOURCE files so as to allow the linker resolve the related symbols. +* An invocation of this macro does not enable any of the available tracing +* levels. Do not place a semicolon after a call to this macro. +* @see IA_CSS_TRACE_DYNAMIC_DECLARE +*/ +#define IA_CSS_TRACE_DYNAMIC_DEFINE(module) \ + IA_CSS_TRACE_DYNAMIC_DEFINE_IMPL(module) +/** +* Defines the configuration function for the dynamic api seperatly, if one +* wants to use it. +*/ +#define IA_CSS_TRACE_DYNAMIC_DEFINE_CONFIG_FUNC(module) \ + IA_CSS_TRACE_DYNAMIC_DEFINE_CONFIG_FUNC_IMPL(module) + +/** + * Logs a message with zero arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @param module The targeted module. + * @param severity The severity level of the trace message. In decreasing order: + * {ASSERT, ERROR, WARNING, INFO, DEBUG, VERBOSE}. + * @param format The message to be traced. + */ +#define IA_CSS_TRACE_DYNAMIC_0(module, severity, format) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 0, severity, format) + +/** + * Logs a message with one argument if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_1(module, severity, format, a1) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 1, severity, format, a1) + +/** + * Logs a message with two arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_2(module, severity, format, a1, a2) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 2, severity, format, a1, a2) + +/** + * Logs a message with three arguments if the targeted severity level + * is enabled both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_3(module, severity, format, a1, a2, a3) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 3, severity, format, a1, a2, a3) + +/** + * Logs a message with four arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_4(module, severity, format, a1, a2, a3, a4) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 4, severity, format, a1, a2, a3, a4) + +/** + * Logs a message with five arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_5(module, severity, format, a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 5, severity, format, \ + a1, a2, a3, a4, a5) + +/** + * Logs a message with six arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_6(module, severity, format, \ + a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 6, severity, format, \ + a1, a2, a3, a4, a5, a6) + +/** + * Logs a message with seven arguments if the targeted severity level + * is enabled both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_7(module, severity, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 7, severity, format, \ + a1, a2, a3, a4, a5, a6, a7) + +/* +** Implementation +*/ + +/* CAT */ +#define IA_CSS_TRACE_CAT_IMPL(a, b) a ## b +#define IA_CSS_TRACE_CAT(a, b) IA_CSS_TRACE_CAT_IMPL(a, b) + +/* Bridge */ +#if defined(__HIVECC) || defined(__GNUC__) +#define IA_CSS_TRACE_IMPL(module, argument_count, severity, arguments ...) \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_, \ + argument_count \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_LEVEL_ \ + ), \ + severity \ + ) \ + ( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_SEVERITY_, \ + severity \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + #module, \ + ## arguments \ + ) \ + ) + +/* Bridge */ +#define IA_CSS_TRACE_DYNAMIC_IMPL(module, argument_count, severity, \ + arguments ...) \ + do { \ + if (IA_CSS_TRACE_CAT(IA_CSS_TRACE_CAT(module, _trace_level_), \ + severity)) { \ + IA_CSS_TRACE_IMPL(module, argument_count, severity, \ + ## arguments); \ + } \ + } while (0) +#elif defined(_MSC_VER) +#define IA_CSS_TRACE_IMPL(module, argument_count, severity, ...) \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_, \ + argument_count \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_LEVEL_ \ + ), \ + severity \ + ) \ + ( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_SEVERITY_, \ + severity \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + #module, \ + __VA_ARGS__ \ + ) \ + ) + +/* Bridge */ +#define IA_CSS_TRACE_DYNAMIC_IMPL(module, argument_count, severity, ...) \ + do { \ + if (IA_CSS_TRACE_CAT(IA_CSS_TRACE_CAT(module, _trace_level_), \ + severity)) { \ + IA_CSS_TRACE_IMPL(module, argument_count, severity, \ + __VA_ARGS__); \ + } \ + } while (0) +#endif + +/* +** Native Backend +*/ + +#if defined(__HIVECC) + #define IA_CSS_TRACE_PLATFORM_CELL +#elif defined(__GNUC__) + #define IA_CSS_TRACE_PLATFORM_HOST + + #define IA_CSS_TRACE_NATIVE(severity, module, format, arguments ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_NATIVE(severity, module, \ + format), ## arguments); \ + } while (0) + /* TODO: In case Host Side tracing is needed to be mapped to the + * Tunit, the following "IA_CSS_TRACE_TRACE" needs to be modified from + * PRINT to vied_nci_tunit_print function calls + */ + #define IA_CSS_TRACE_TRACE(severity, module, format, arguments ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_TRACE(severity, module, \ + format), ## arguments); \ + } while (0) + +#elif defined(_MSC_VER) + #define IA_CSS_TRACE_PLATFORM_HOST + + #define IA_CSS_TRACE_NATIVE(severity, module, format, ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_NATIVE(severity, \ + module, format), __VA_ARGS__); \ + } while (0) + /* TODO: In case Host Side tracing is needed to be mapped to the + * Tunit, the following "IA_CSS_TRACE_TRACE" needs to be modified from + * PRINT to vied_nci_tunit_print function calls + */ + #define IA_CSS_TRACE_TRACE(severity, module, format, ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_TRACE(severity, \ + module, format), __VA_ARGS__); \ + } while (0) +#else + #error Unsupported platform! +#endif /* Platform */ + +#if defined(IA_CSS_TRACE_PLATFORM_CELL) + #include /* VOLATILE */ + + #ifdef IA_CSS_TRACE_PRINT_FILE_LINE + #define IA_CSS_TRACE_FILE_PRINT_COMMAND \ + do { \ + OP___printstring(__FILE__":") VOLATILE; \ + OP___printdec(__LINE__) VOLATILE; \ + OP___printstring("\n") VOLATILE; \ + } while (0) + #else + #define IA_CSS_TRACE_FILE_PRINT_COMMAND + #endif + + #define IA_CSS_TRACE_MODULE_SEVERITY_PRINT(module, severity) \ + do { \ + IA_CSS_TRACE_FILE_DUMMY_DEFINE; \ + OP___printstring("["module"]:["severity"]:") \ + VOLATILE; \ + } while (0) + + #define IA_CSS_TRACE_MSG_NATIVE(severity, module, format) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + OP___printstring("["module"]:["severity"]: "format) \ + VOLATILE; \ + } while (0) + + #define IA_CSS_TRACE_ARG_NATIVE(module, severity, i, value) \ + do { \ + IA_CSS_TRACE_MODULE_SEVERITY_PRINT(module, severity); \ + OP___dump(i, value) VOLATILE; \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_0(severity, module, format) \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format) + + #define IA_CSS_TRACE_NATIVE_1(severity, module, format, a1) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_2(severity, module, format, a1, a2) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_3(severity, module, format, a1, a2, a3) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_4(severity, module, format, \ + a1, a2, a3, a4) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 5, a5); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 5, a5); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 6, a6); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 5, a5); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 6, a6); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 7, a7); \ + } while (0) + /* + ** Tracing Backend + */ +#if !defined(HRT_CSIM) && !defined(NO_TUNIT) + #include "vied_nci_tunit.h" +#endif + #define IA_CSS_TRACE_AUG_FORMAT_TRACE(format, module) \ + "[" module "]" format " : PID = %x : Timestamp = %d : PC = %x" + + #define IA_CSS_TRACE_TRACE_0(severity, module, format) \ + vied_nci_tunit_print(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity) + + #define IA_CSS_TRACE_TRACE_1(severity, module, format, a1) \ + vied_nci_tunit_print1i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1) + + #define IA_CSS_TRACE_TRACE_2(severity, module, format, a1, a2) \ + vied_nci_tunit_print2i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2) + + #define IA_CSS_TRACE_TRACE_3(severity, module, format, a1, a2, a3) \ + vied_nci_tunit_print3i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3) + + #define IA_CSS_TRACE_TRACE_4(severity, module, format, a1, a2, a3, a4) \ + vied_nci_tunit_print4i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4) + + #define IA_CSS_TRACE_TRACE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + vied_nci_tunit_print5i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4, a5) + + #define IA_CSS_TRACE_TRACE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + vied_nci_tunit_print6i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4, a5, a6) + + #define IA_CSS_TRACE_TRACE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + vied_nci_tunit_print7i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4, a5, a6, a7) + +#elif defined(IA_CSS_TRACE_PLATFORM_HOST) + #include "print_support.h" + + #ifdef IA_CSS_TRACE_PRINT_FILE_LINE + #define IA_CSS_TRACE_FILE_PRINT_COMMAND \ + PRINT("%s:%d:\n", __FILE__, __LINE__) + #else + #define IA_CSS_TRACE_FILE_PRINT_COMMAND + #endif + + #define IA_CSS_TRACE_FORMAT_AUG_NATIVE(severity, module, format) \ + "[" module "]:[" severity "]: " format + + #define IA_CSS_TRACE_NATIVE_0(severity, module, format) \ + IA_CSS_TRACE_NATIVE(severity, module, format) + + #define IA_CSS_TRACE_NATIVE_1(severity, module, format, a1) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1) + + #define IA_CSS_TRACE_NATIVE_2(severity, module, format, a1, a2) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1, a2) + + #define IA_CSS_TRACE_NATIVE_3(severity, module, format, a1, a2, a3) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1, a2, a3) + + #define IA_CSS_TRACE_NATIVE_4(severity, module, format, \ + a1, a2, a3, a4) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1, a2, a3, a4) + + #define IA_CSS_TRACE_NATIVE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_NATIVE(severity, module, format, \ + a1, a2, a3, a4, a5) + + #define IA_CSS_TRACE_NATIVE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_NATIVE(severity, module, format, \ + a1, a2, a3, a4, a5, a6) + + #define IA_CSS_TRACE_NATIVE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_NATIVE(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) + + #define IA_CSS_TRACE_FORMAT_AUG_TRACE(severity, module, format) \ + "["module"]:["severity"]: "format + + #define IA_CSS_TRACE_TRACE_0(severity, module, format) \ + IA_CSS_TRACE_TRACE(severity, module, format) + + #define IA_CSS_TRACE_TRACE_1(severity, module, format, a1) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1) + + #define IA_CSS_TRACE_TRACE_2(severity, module, format, a1, a2) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1, a2) + + #define IA_CSS_TRACE_TRACE_3(severity, module, format, a1, a2, a3) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1, a2, a3) + + #define IA_CSS_TRACE_TRACE_4(severity, module, format, \ + a1, a2, a3, a4) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1, a2, a3, a4) + + #define IA_CSS_TRACE_TRACE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_TRACE(severity, module, format, \ + a1, a2, a3, a4, a5) + + #define IA_CSS_TRACE_TRACE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_TRACE(severity, module, format, \ + a1, a2, a3, a4, a5, a6) + + #define IA_CSS_TRACE_TRACE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_TRACE(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) +#endif + +/* Disabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_1_0(severity, module, format) +#define IA_CSS_TRACE_1_1_0(severity, module, format, arg1) +#define IA_CSS_TRACE_2_1_0(severity, module, format, arg1, arg2) +#define IA_CSS_TRACE_3_1_0(severity, module, format, arg1, arg2, arg3) +#define IA_CSS_TRACE_4_1_0(severity, module, format, arg1, arg2, arg3, arg4) +#define IA_CSS_TRACE_5_1_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5) +#define IA_CSS_TRACE_6_1_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6) +#define IA_CSS_TRACE_7_1_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6, arg7) + +/* Enabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_1_1 IA_CSS_TRACE_NATIVE_0 +#define IA_CSS_TRACE_1_1_1 IA_CSS_TRACE_NATIVE_1 +#define IA_CSS_TRACE_2_1_1 IA_CSS_TRACE_NATIVE_2 +#define IA_CSS_TRACE_3_1_1 IA_CSS_TRACE_NATIVE_3 +#define IA_CSS_TRACE_4_1_1 IA_CSS_TRACE_NATIVE_4 +#define IA_CSS_TRACE_5_1_1 IA_CSS_TRACE_NATIVE_5 +#define IA_CSS_TRACE_6_1_1 IA_CSS_TRACE_NATIVE_6 +#define IA_CSS_TRACE_7_1_1 IA_CSS_TRACE_NATIVE_7 + +/* Enabled */ +/* Legend: IA_CSS_TRACE_SEVERITY_{Severity Level}_{Backend ID} */ +#define IA_CSS_TRACE_SEVERITY_ASSERT_1 "Assert" +#define IA_CSS_TRACE_SEVERITY_ERROR_1 "Error" +#define IA_CSS_TRACE_SEVERITY_WARNING_1 "Warning" +#define IA_CSS_TRACE_SEVERITY_INFO_1 "Info" +#define IA_CSS_TRACE_SEVERITY_DEBUG_1 "Debug" +#define IA_CSS_TRACE_SEVERITY_VERBOSE_1 "Verbose" + +/* Disabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_2_0(severity, module, format) +#define IA_CSS_TRACE_1_2_0(severity, module, format, arg1) +#define IA_CSS_TRACE_2_2_0(severity, module, format, arg1, arg2) +#define IA_CSS_TRACE_3_2_0(severity, module, format, arg1, arg2, arg3) +#define IA_CSS_TRACE_4_2_0(severity, module, format, arg1, arg2, arg3, arg4) +#define IA_CSS_TRACE_5_2_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5) +#define IA_CSS_TRACE_6_2_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6) +#define IA_CSS_TRACE_7_2_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6, arg7) + +/* Enabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_2_1 IA_CSS_TRACE_TRACE_0 +#define IA_CSS_TRACE_1_2_1 IA_CSS_TRACE_TRACE_1 +#define IA_CSS_TRACE_2_2_1 IA_CSS_TRACE_TRACE_2 +#define IA_CSS_TRACE_3_2_1 IA_CSS_TRACE_TRACE_3 +#define IA_CSS_TRACE_4_2_1 IA_CSS_TRACE_TRACE_4 +#define IA_CSS_TRACE_5_2_1 IA_CSS_TRACE_TRACE_5 +#define IA_CSS_TRACE_6_2_1 IA_CSS_TRACE_TRACE_6 +#define IA_CSS_TRACE_7_2_1 IA_CSS_TRACE_TRACE_7 + +/* Enabled */ +/* Legend: IA_CSS_TRACE_SEVERITY_{Severity Level}_{Backend ID} */ +#define IA_CSS_TRACE_SEVERITY_ASSERT_2 VIED_NCI_TUNIT_MSG_SEVERITY_FATAL +#define IA_CSS_TRACE_SEVERITY_ERROR_2 VIED_NCI_TUNIT_MSG_SEVERITY_ERROR +#define IA_CSS_TRACE_SEVERITY_WARNING_2 VIED_NCI_TUNIT_MSG_SEVERITY_WARNING +#define IA_CSS_TRACE_SEVERITY_INFO_2 VIED_NCI_TUNIT_MSG_SEVERITY_NORMAL +#define IA_CSS_TRACE_SEVERITY_DEBUG_2 VIED_NCI_TUNIT_MSG_SEVERITY_USER1 +#define IA_CSS_TRACE_SEVERITY_VERBOSE_2 VIED_NCI_TUNIT_MSG_SEVERITY_USER2 + +/* +** Dynamicism +*/ + +#define IA_CSS_TRACE_DYNAMIC_DECLARE_IMPL(module) \ + do { \ + void IA_CSS_TRACE_CAT(module, _trace_assert_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_assert_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_error_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_error_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_warning_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_warning_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_info_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_info_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_debug_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_debug_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_disable)(void); \ + } while (0) + +#define IA_CSS_TRACE_DYNAMIC_DECLARE_CONFIG_FUNC_IMPL(module) \ + do { \ + IA_CSS_TRACE_FILE_DUMMY_DEFINE; \ + void IA_CSS_TRACE_CAT(module, _trace_configure)\ + (int argc, const char *const *argv); \ + } while (0) + +#include "platform_support.h" +#include "type_support.h" + +#define IA_CSS_TRACE_DYNAMIC_DEFINE_IMPL(module) \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_assert); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_error); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_warning); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_info); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_debug); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_verbose); \ + \ + void IA_CSS_TRACE_CAT(module, _trace_assert_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_assert) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_assert_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_assert) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_error_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_error) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_error_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_error) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_warning_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_warning) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_warning_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_warning) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_info_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_info) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_info_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_info) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_debug_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_debug) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_debug_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_debug) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_verbose) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_verbose) = 0; \ + } + +#define IA_CSS_TRACE_DYNAMIC_DEFINE_CONFIG_FUNC_IMPL(module) \ +void IA_CSS_TRACE_CAT(module, _trace_configure)(const int argc, \ + const char *const *const argv) \ +{ \ + int i = 1; \ + const char *levels = 0; \ + \ + while (i < argc) { \ + if (!strcmp(argv[i], "-" #module "_trace")) { \ + ++i; \ + \ + if (i < argc) { \ + levels = argv[i]; \ + \ + while (*levels) { \ + switch (*levels++) { \ + case 'a': \ + IA_CSS_TRACE_CAT \ + (module, _trace_assert_enable)(); \ + break; \ + \ + case 'e': \ + IA_CSS_TRACE_CAT \ + (module, _trace_error_enable)(); \ + break; \ + \ + case 'w': \ + IA_CSS_TRACE_CAT \ + (module, _trace_warning_enable)(); \ + break; \ + \ + case 'i': \ + IA_CSS_TRACE_CAT \ + (module, _trace_info_enable)(); \ + break; \ + \ + case 'd': \ + IA_CSS_TRACE_CAT \ + (module, _trace_debug_enable)(); \ + break; \ + \ + case 'v': \ + IA_CSS_TRACE_CAT \ + (module, _trace_verbose_enable)(); \ + break; \ + \ + default: \ + } \ + } \ + } \ + } \ + \ + ++i; \ + } \ +} + +#endif /* __IA_CSS_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/trace/trace.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/trace/trace.mk new file mode 100644 index 0000000000000..b232880b882bd --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/trace/trace.mk @@ -0,0 +1,40 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE Trace + +# Dependencies +IA_CSS_TRACE_SUPPORT = $${MODULES_DIR}/support + +# API +IA_CSS_TRACE = $${MODULES_DIR}/trace +IA_CSS_TRACE_INTERFACE = $(IA_CSS_TRACE)/interface + +# +# Host +# + +# Host CPP Flags +IA_CSS_TRACE_HOST_CPPFLAGS += -I$(IA_CSS_TRACE_SUPPORT) +IA_CSS_TRACE_HOST_CPPFLAGS += -I$(IA_CSS_TRACE_INTERFACE) +IA_CSS_TRACE_HOST_CPPFLAGS += -I$(IA_CSS_TRACE)/trace_modules + +# +# Firmware +# + +# Firmware CPP Flags +IA_CSS_TRACE_FW_CPPFLAGS += -I$(IA_CSS_TRACE_SUPPORT) +IA_CSS_TRACE_FW_CPPFLAGS += -I$(IA_CSS_TRACE_INTERFACE) +IA_CSS_TRACE_FW_CPPFLAGS += -I$(IA_CSS_TRACE)/trace_modules diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/shared_memory_access.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/shared_memory_access.h new file mode 100644 index 0000000000000..1e81bad9f4eec --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/shared_memory_access.h @@ -0,0 +1,139 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _SHARED_MEMORY_ACCESS_H +#define _SHARED_MEMORY_ACCESS_H + +#include +#include +#include + +typedef enum { + sm_esuccess, + sm_enomem, + sm_ezeroalloc, + sm_ebadvaddr, + sm_einternalerror, + sm_ecorruption, + sm_enocontiguousmem, + sm_enolocmem, + sm_emultiplefree, +} shared_memory_error; + +/** + * \brief Virtual address of (DDR) shared memory space as seen from the VIED subsystem + */ +typedef uint32_t vied_virtual_address_t; + +/** + * \brief Virtual address of (DDR) shared memory space as seen from the host + */ +typedef unsigned long long host_virtual_address_t; + +/** + * \brief List of physical addresses of (DDR) shared memory space. This is used to represent a list of physical pages. + */ +typedef struct shared_memory_physical_page_list_s *shared_memory_physical_page_list; +typedef struct shared_memory_physical_page_list_s +{ + shared_memory_physical_page_list next; + vied_physical_address_t address; +}shared_memory_physical_page_list_s; + + +/** + * \brief Initialize the shared memory interface administration on the host. + * \param idm: id of ddr memory + * \param host_ddr_addr: physical address of memory as seen from host + * \param memory_size: size of ddr memory in bytes + * \param ps: size of page in bytes (for instance 4096) + */ +int shared_memory_allocation_initialize(vied_memory_t idm, vied_physical_address_t host_ddr_addr, size_t memory_size, size_t ps); + +/** + * \brief De-initialize the shared memory interface administration on the host. + * + */ +void shared_memory_allocation_uninitialize(vied_memory_t idm); + +/** + * \brief Allocate (DDR) shared memory space and return a host virtual address. Returns NULL when insufficient memory available + */ +host_virtual_address_t shared_memory_alloc(vied_memory_t idm, size_t bytes); + +/** + * \brief Free (DDR) shared memory space. +*/ +void shared_memory_free(vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Translate a virtual host.address to a physical address. +*/ +vied_physical_address_t shared_memory_virtual_host_to_physical_address (vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Return the allocated physical pages for a virtual host.address. +*/ +shared_memory_physical_page_list shared_memory_virtual_host_to_physical_pages (vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Destroy a shared_memory_physical_page_list. +*/ +void shared_memory_physical_pages_list_destroy (shared_memory_physical_page_list ppl); + +/** + * \brief Store a byte into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store_8 (vied_memory_t idm, host_virtual_address_t addr, uint8_t data); + +/** + * \brief Store a 16-bit word into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store_16(vied_memory_t idm, host_virtual_address_t addr, uint16_t data); + +/** + * \brief Store a 32-bit word into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store_32(vied_memory_t idm, host_virtual_address_t addr, uint32_t data); + +/** + * \brief Store a number of bytes into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store(vied_memory_t idm, host_virtual_address_t addr, const void *data, size_t bytes); + +/** + * \brief Set a number of bytes of (DDR) shared memory space to 0 using a host virtual address + */ +void shared_memory_zero(vied_memory_t idm, host_virtual_address_t addr, size_t bytes); + +/** + * \brief Load a byte from (DDR) shared memory space using a host virtual address + */ +uint8_t shared_memory_load_8 (vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Load a 16-bit word from (DDR) shared memory space using a host virtual address + */ +uint16_t shared_memory_load_16(vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Load a 32-bit word from (DDR) shared memory space using a host virtual address + */ +uint32_t shared_memory_load_32(vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Load a number of bytes from (DDR) shared memory space using a host virtual address + */ +void shared_memory_load(vied_memory_t idm, host_virtual_address_t addr, void *data, size_t bytes); + +#endif /* _SHARED_MEMORY_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/shared_memory_map.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/shared_memory_map.h new file mode 100644 index 0000000000000..1bbedcf9e7fd8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/shared_memory_map.h @@ -0,0 +1,53 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _SHARED_MEMORY_MAP_H +#define _SHARED_MEMORY_MAP_H + +#include +#include +#include + +typedef void (*shared_memory_invalidate_mmu_tlb)(void); +typedef void (*shared_memory_set_page_table_base_address)(vied_physical_address_t); + +typedef void (*shared_memory_invalidate_mmu_tlb_ssid)(vied_subsystem_t id); +typedef void (*shared_memory_set_page_table_base_address_ssid)(vied_subsystem_t id, vied_physical_address_t); + +/** + * \brief Initialize the CSS virtual address system and MMU. The subsystem id will NOT be taken into account. +*/ +int shared_memory_map_initialize(vied_subsystem_t id, vied_memory_t idm, size_t mmu_ps, size_t mmu_pnrs, vied_physical_address_t ddr_addr, shared_memory_invalidate_mmu_tlb inv_tlb, shared_memory_set_page_table_base_address sbt); + +/** + * \brief Initialize the CSS virtual address system and MMU. The subsystem id will be taken into account. +*/ +int shared_memory_map_initialize_ssid(vied_subsystem_t id, vied_memory_t idm, size_t mmu_ps, size_t mmu_pnrs, vied_physical_address_t ddr_addr, shared_memory_invalidate_mmu_tlb_ssid inv_tlb, shared_memory_set_page_table_base_address_ssid sbt); + +/** + * \brief De-initialize the CSS virtual address system and MMU. +*/ +void shared_memory_map_uninitialize(vied_subsystem_t id, vied_memory_t idm); + +/** + * \brief Convert a host virtual address to a CSS virtual address and update the MMU. +*/ +vied_virtual_address_t shared_memory_map(vied_subsystem_t id, vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Free a CSS virtual address and update the MMU. +*/ +void shared_memory_unmap(vied_subsystem_t id, vied_memory_t idm, vied_virtual_address_t addr); + + +#endif /* _SHARED_MEMORY_MAP_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_config.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_config.h new file mode 100644 index 0000000000000..912f016ead241 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_config.h @@ -0,0 +1,33 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_CONFIG_H +#define _HRT_VIED_CONFIG_H + +/* Defines from the compiler: + * HRT_HOST - this is code running on the host + * HRT_CELL - this is code running on a cell + */ +#ifdef HRT_HOST +# define CFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL 1 +# undef CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL + +#elif defined (HRT_CELL) +# undef CFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL +# define CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL 1 + +#else /* !HRT_CELL */ +/* Allow neither HRT_HOST nor HRT_CELL for testing purposes */ +#endif /* !HRT_CELL */ + +#endif /* _HRT_VIED_CONFIG_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_memory_access_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_memory_access_types.h new file mode 100644 index 0000000000000..0b44492789e37 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_memory_access_types.h @@ -0,0 +1,36 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_MEMORY_ACCESS_TYPES_H +#define _HRT_VIED_MEMORY_ACCESS_TYPES_H + +/** Types for the VIED memory access interface */ + +#include "vied_types.h" + +/** + * \brief An identifier for a system memory. + * + * This identifier must be a compile-time constant. It is used in + * access to system memory. + */ +typedef unsigned int vied_memory_t; + +#ifndef __HIVECC +/** + * \brief The type for a physical address + */ +typedef unsigned long long vied_physical_address_t; +#endif + +#endif /* _HRT_VIED_MEMORY_ACCESS_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_subsystem_access.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_subsystem_access.h new file mode 100644 index 0000000000000..674f5fb5b0f99 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_subsystem_access.h @@ -0,0 +1,70 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_SUBSYSTEM_ACCESS_H +#define _HRT_VIED_SUBSYSTEM_ACCESS_H + +#include +#include "vied_config.h" +#include "vied_subsystem_access_types.h" + +#if !defined(CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL) && \ + !defined(CFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL) +#error Implementation selection macro for vied subsystem access not defined +#endif + +#if defined(CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL) +#ifndef __HIVECC +#error "Inline implementation of subsystem access not supported for host" +#endif +#define _VIED_SUBSYSTEM_ACCESS_INLINE static __inline +#include "vied_subsystem_access_impl.h" +#else +#define _VIED_SUBSYSTEM_ACCESS_INLINE +#endif + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store_8 (vied_subsystem_t dev, + vied_subsystem_address_t addr, uint8_t data); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store_16(vied_subsystem_t dev, + vied_subsystem_address_t addr, uint16_t data); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store_32(vied_subsystem_t dev, + vied_subsystem_address_t addr, uint32_t data); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store(vied_subsystem_t dev, + vied_subsystem_address_t addr, + const void *data, unsigned int size); + +_VIED_SUBSYSTEM_ACCESS_INLINE +uint8_t vied_subsystem_load_8 (vied_subsystem_t dev, + vied_subsystem_address_t addr); + +_VIED_SUBSYSTEM_ACCESS_INLINE +uint16_t vied_subsystem_load_16(vied_subsystem_t dev, + vied_subsystem_address_t addr); + +_VIED_SUBSYSTEM_ACCESS_INLINE +uint32_t vied_subsystem_load_32(vied_subsystem_t dev, + vied_subsystem_address_t addr); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_load(vied_subsystem_t dev, + vied_subsystem_address_t addr, + void *data, unsigned int size); + +#endif /* _HRT_VIED_SUBSYSTEM_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_subsystem_access_initialization.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_subsystem_access_initialization.h new file mode 100644 index 0000000000000..81f4d08d5ae0e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_subsystem_access_initialization.h @@ -0,0 +1,44 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_SUBSYSTEM_ACCESS_INITIALIZE_H +#define _HRT_VIED_SUBSYSTEM_ACCESS_INITIALIZE_H + +#include "vied_subsystem_access_types.h" + +/** @brief Initialises the access of a subsystem. + * @param[in] system The subsystem for which the access has to be initialised. + * + * vied_subsystem_access_initialize initilalises the access a subsystem. + * It sets the base address of the subsystem. This base address is extracted from the hsd file. + * + */ +void +vied_subsystem_access_initialize(vied_subsystem_t system); + + +/** @brief Initialises the access of multiple subsystems. + * @param[in] nr _subsystems The number of subsystems for which the access has to be initialised. + * @param[in] dev_base_addresses A pointer to an array of base addresses of subsystems. + * The size of this array must be "nr_subsystems". + * This array must be available during the accesses of the subsystem. + * + * vied_subsystems_access_initialize initilalises the access to multiple subsystems. + * It sets the base addresses of the subsystems that are provided by the array dev_base_addresses. + * + */ +void +vied_subsystems_access_initialize( unsigned int nr_subsystems + , const vied_subsystem_base_address_t *base_addresses); + +#endif /* _HRT_VIED_SUBSYSTEM_ACCESS_INITIALIZE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_subsystem_access_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_subsystem_access_types.h new file mode 100644 index 0000000000000..75fef6c4ddba2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_subsystem_access_types.h @@ -0,0 +1,34 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_SUBSYSTEM_ACCESS_TYPES_H +#define _HRT_VIED_SUBSYSTEM_ACCESS_TYPES_H + +/** Types for the VIED subsystem access interface */ +#include + +/** \brief An identifier for a VIED subsystem. + * + * This identifier must be a compile-time constant. It is used in + * access to a VIED subsystem. + */ +typedef unsigned int vied_subsystem_t; + + +/** \brief An address within a VIED subsystem */ +typedef uint32_t vied_subsystem_address_t; + +/** \brief A base address of a VIED subsystem seen from the host */ +typedef unsigned long long vied_subsystem_base_address_t; + +#endif /* _HRT_VIED_SUBSYSTEM_ACCESS_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_types.h new file mode 100644 index 0000000000000..0acfdbb00cfa3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_types.h @@ -0,0 +1,45 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_TYPES_H +#define _HRT_VIED_TYPES_H + +/** Types shared by VIED interfaces */ + +#include + +/** \brief An address within a VIED subsystem + * + * This will eventually replace teh vied_memory_address_t and vied_subsystem_address_t + */ +typedef uint32_t vied_address_t; + +/** \brief Memory address type + * + * A memory address is an offset within a memory. + */ +typedef uint32_t vied_memory_address_t; + +/** \brief Master port id */ +typedef int vied_master_port_id_t; + +/** + * \brief Require the existence of a certain type + * + * This macro can be used in interface header files to ensure that + * an implementation define type with a specified name exists. + */ +#define _VIED_REQUIRE_TYPE(T) enum { _VIED_SIZEOF_##T = sizeof(T) } + + +#endif /* _HRT_VIED_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_nci_acb/interface/vied_nci_acb_route_type.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_nci_acb/interface/vied_nci_acb_route_type.h new file mode 100644 index 0000000000000..b09d9f4d5d427 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_nci_acb/interface/vied_nci_acb_route_type.h @@ -0,0 +1,39 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef VIED_NCI_ACB_ROUTE_TYPE_H_ +#define VIED_NCI_ACB_ROUTE_TYPE_H_ + +#include "type_support.h" + +typedef enum { + NCI_ACB_PORT_ISP = 0, + NCI_ACB_PORT_ACC = 1, + NCI_ACB_PORT_INVALID = 0xFF +} nci_acb_port_t; + +typedef struct { + /* 0 = ISP, 1 = Acc */ + nci_acb_port_t in_select; + /* 0 = ISP, 1 = Acc */ + nci_acb_port_t out_select; + /* When set, Ack will be sent only when Eof arrives */ + uint32_t ignore_line_num; + /* Fork adapter to enable streaming to both output + * (next acb out and isp out) + */ + uint32_t fork_acb_output; +} nci_acb_route_t; + +#endif /* VIED_NCI_ACB_ROUTE_TYPE_H_ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_param_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_param_storage_class.h new file mode 100644 index 0000000000000..1ea7e729078c2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_param_storage_class.h @@ -0,0 +1,28 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PARAM_STORAGE_CLASS_H +#define __IA_CSS_PARAM_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __INLINE_PARAMETERS__ +#define IA_CSS_PARAMETERS_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_PARAMETERS_STORAGE_CLASS_C +#else +#define IA_CSS_PARAMETERS_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_PARAMETERS_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_PARAM_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal.h new file mode 100644 index 0000000000000..4cc71be3fc389 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal.h @@ -0,0 +1,188 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_H +#define __IA_CSS_TERMINAL_H + +#include "type_support.h" +#include "ia_css_terminal_types.h" +#include "ia_css_param_storage_class.h" + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_param_in_terminal_get_descriptor_size( + const unsigned int nof_sections +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_param_section_desc_t * +ia_css_param_in_terminal_get_param_section_desc( + const ia_css_param_terminal_t *param_terminal, + const unsigned int section_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_param_out_terminal_get_descriptor_size( + const unsigned int nof_sections, + const unsigned int nof_fragments +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_param_section_desc_t * +ia_css_param_out_terminal_get_param_section_desc( + const ia_css_param_terminal_t *param_terminal, + const unsigned int section_index, + const unsigned int nof_sections, + const unsigned int fragment_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_param_terminal_create( + ia_css_param_terminal_t *param_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const uint16_t is_input_terminal +); + + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_spatial_param_terminal_get_descriptor_size( + const unsigned int nof_frame_param_sections, + const unsigned int nof_fragments +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_fragment_grid_desc_t * +ia_css_spatial_param_terminal_get_fragment_grid_desc( + const ia_css_spatial_param_terminal_t *spatial_param_terminal, + const unsigned int fragment_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_frame_grid_param_section_desc_t * +ia_css_spatial_param_terminal_get_frame_grid_param_section_desc( + const ia_css_spatial_param_terminal_t *spatial_param_terminal, + const unsigned int section_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_spatial_param_terminal_create( + ia_css_spatial_param_terminal_t *spatial_param_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const uint16_t is_input_terminal, + const unsigned int nof_fragments, + const uint32_t kernel_id +); + + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_sliced_param_terminal_get_descriptor_size( + const unsigned int nof_slice_param_sections, + const unsigned int nof_slices[], + const unsigned int nof_fragments +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_fragment_slice_desc_t * +ia_css_sliced_param_terminal_get_fragment_slice_desc( + const ia_css_sliced_param_terminal_t *sliced_param_terminal, + const unsigned int fragment_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_slice_param_section_desc_t * +ia_css_sliced_param_terminal_get_slice_param_section_desc( + const ia_css_sliced_param_terminal_t *sliced_param_terminal, + const unsigned int fragment_index, + const unsigned int slice_index, + const unsigned int section_index, + const unsigned int nof_slice_param_sections +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_sliced_param_terminal_create( + ia_css_sliced_param_terminal_t *sliced_param_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const uint16_t is_input_terminal, + const unsigned int nof_slice_param_sections, + const unsigned int nof_slices[], + const unsigned int nof_fragments, + const uint32_t kernel_id +); + + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_program_terminal_get_descriptor_size( + const unsigned int nof_fragments, + const unsigned int nof_fragment_param_sections, + const unsigned int nof_kernel_fragment_sequencer_infos, + const unsigned int nof_command_objs +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_fragment_param_section_desc_t * +ia_css_program_terminal_get_frgmnt_prm_sct_desc( + const ia_css_program_terminal_t *program_terminal, + const unsigned int fragment_index, + const unsigned int section_index, + const unsigned int nof_fragment_param_sections +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_kernel_fragment_sequencer_info_desc_t * +ia_css_program_terminal_get_kernel_frgmnt_seq_info_desc( + const ia_css_program_terminal_t *program_terminal, + const unsigned int fragment_index, + const unsigned int info_index, + const unsigned int nof_kernel_fragment_sequencer_infos +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_program_terminal_create( + ia_css_program_terminal_t *program_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const unsigned int nof_fragments, + const unsigned int nof_kernel_fragment_sequencer_infos, + const unsigned int nof_command_objs +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_program_terminal_get_command_base_offset( + const ia_css_program_terminal_t *program_terminal, + const unsigned int nof_fragments, + const unsigned int nof_kernel_fragment_sequencer_infos, + const unsigned int commands_slots_used, + uint16_t *command_desc_offset +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +uint16_t *ia_css_program_terminal_get_line_count( + const ia_css_kernel_fragment_sequencer_command_desc_t + *kernel_fragment_sequencer_command_desc_base, + const unsigned int set_count +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_spatial_param_terminal_get_descriptor_size( + const unsigned int nof_frame_param_sections, + const unsigned int nof_fragments +); + +#ifdef __INLINE_PARAMETERS__ +#include "ia_css_terminal_impl.h" +#endif /* __INLINE_PARAMETERS__ */ + +#endif /* __IA_CSS_TERMINAL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_manifest.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_manifest.h new file mode 100644 index 0000000000000..ca0a436082cf6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_manifest.h @@ -0,0 +1,109 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_MANIFEST_H +#define __IA_CSS_TERMINAL_MANIFEST_H + +#include "type_support.h" +#include "ia_css_param_storage_class.h" +#include "ia_css_terminal_manifest_types.h" + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_param_terminal_manifest_get_size( + const unsigned int nof_sections +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_param_terminal_manifest_init( + ia_css_param_terminal_manifest_t *param_terminal, + const uint16_t section_count +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_param_manifest_section_desc_t * +ia_css_param_terminal_manifest_get_prm_sct_desc( + const ia_css_param_terminal_manifest_t *param_terminal_manifest, + const unsigned int section_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_spatial_param_terminal_manifest_get_size( + const unsigned int nof_frame_param_sections +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_spatial_param_terminal_manifest_init( + ia_css_spatial_param_terminal_manifest_t *spatial_param_terminal, + const uint16_t section_count +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_frame_grid_param_manifest_section_desc_t * +ia_css_spatial_param_terminal_manifest_get_frm_grid_prm_sct_desc( + const ia_css_spatial_param_terminal_manifest_t * + spatial_param_terminal_manifest, + const unsigned int section_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_sliced_param_terminal_manifest_get_size( + const unsigned int nof_slice_param_sections +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_sliced_param_terminal_manifest_init( + ia_css_sliced_param_terminal_manifest_t *sliced_param_terminal, + const uint16_t section_count +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_sliced_param_manifest_section_desc_t * +ia_css_sliced_param_terminal_manifest_get_sliced_prm_sct_desc( + const ia_css_sliced_param_terminal_manifest_t * + sliced_param_terminal_manifest, + const unsigned int section_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_program_terminal_manifest_get_size( + const unsigned int nof_fragment_param_sections, + const unsigned int nof_kernel_fragment_sequencer_infos +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_program_terminal_manifest_init( + ia_css_program_terminal_manifest_t *program_terminal, + const uint16_t fragment_param_section_count, + const uint16_t kernel_fragment_seq_info_section_count +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_fragment_param_manifest_section_desc_t * +ia_css_program_terminal_manifest_get_frgmnt_prm_sct_desc( + const ia_css_program_terminal_manifest_t *program_terminal_manifest, + const unsigned int section_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_kernel_fragment_sequencer_info_manifest_desc_t * +ia_css_program_terminal_manifest_get_kernel_frgmnt_seq_info_desc( + const ia_css_program_terminal_manifest_t *program_terminal_manifest, + const unsigned int info_index +); + +#ifdef __INLINE_PARAMETERS__ +#include "ia_css_terminal_manifest_impl.h" +#endif /* __INLINE_PARAMETERS__ */ + +#endif /* __IA_CSS_TERMINAL_MANIFEST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_manifest_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_manifest_types.h new file mode 100644 index 0000000000000..fe146395a8f4f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_manifest_types.h @@ -0,0 +1,342 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_MANIFEST_TYPES_H +#define __IA_CSS_TERMINAL_MANIFEST_TYPES_H + + +#include "ia_css_terminal_defs.h" +#include "type_support.h" +#include "ia_css_base_types.h" +#include "ia_css_terminal_manifest_base_types.h" + +#define N_PADDING_UINT8_IN_PARAM_TERMINAL_MANIFEST_SEC_STRUCT 1 +#define SIZE_OF_PARAM_TERMINAL_MANIFEST_SEC_STRUCT_IN_BITS \ + (1 * IA_CSS_UINT32_T_BITS \ + + 3 * IA_CSS_UINT8_T_BITS \ + + N_PADDING_UINT8_IN_PARAM_TERMINAL_MANIFEST_SEC_STRUCT * IA_CSS_UINT8_T_BITS) + +/* =============== Cached Param Terminal Manifest - START ============== */ +struct ia_css_param_manifest_section_desc_s { + /* Maximum size of the related parameter region */ + uint32_t max_mem_size; + /* Indication of the kernel this parameter belongs to */ + uint8_t kernel_id; + /* Memory targeted by this section + * (Register MMIO Interface/DMEM/VMEM/GMEM etc) + */ + uint8_t mem_type_id; + /* Region id within the specified memory */ + uint8_t region_id; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_PARAM_TERMINAL_MANIFEST_SEC_STRUCT]; +}; + +typedef struct ia_css_param_manifest_section_desc_s + ia_css_param_manifest_section_desc_t; + + +#define N_PADDING_UINT8_IN_PARAM_TERMINAL_MAN_STRUCT 4 +#define SIZE_OF_PARAM_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + (SIZE_OF_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + + (2*IA_CSS_UINT16_T_BITS) \ + + (N_PADDING_UINT8_IN_PARAM_TERMINAL_MAN_STRUCT * IA_CSS_UINT8_T_BITS)) + +/* Frame constant parameters terminal manifest */ +struct ia_css_param_terminal_manifest_s { + /* Parameter terminal manifest base */ + ia_css_terminal_manifest_t base; + /* + * Number of cached parameter sections, coming from manifest + * but also shared by the terminal + */ + uint16_t param_manifest_section_desc_count; + /* + * Points to the variable array of + * struct ia_css_param_section_desc_s + */ + uint16_t param_manifest_section_desc_offset; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_PARAM_TERMINAL_MAN_STRUCT]; +}; + +typedef struct ia_css_param_terminal_manifest_s + ia_css_param_terminal_manifest_t; +/* ================= Cached Param Terminal Manifest - End ================ */ + + +/* ================= Spatial Param Terminal Manifest - START ============= */ + +#define SIZE_OF_FRAG_GRID_MAN_STRUCT_IN_BITS \ + ((IA_CSS_N_DATA_DIMENSION*IA_CSS_UINT16_T_BITS) \ + + (IA_CSS_N_DATA_DIMENSION*IA_CSS_UINT16_T_BITS)) + +struct ia_css_fragment_grid_manifest_desc_s { + /* Min resolution width/height of the spatial parameters + * for the fragment measured in compute units + */ + uint16_t min_fragment_grid_dimension[IA_CSS_N_DATA_DIMENSION]; + /* Max resolution width/height of the spatial parameters + * for the fragment measured in compute units + */ + uint16_t max_fragment_grid_dimension[IA_CSS_N_DATA_DIMENSION]; +}; + +typedef struct ia_css_fragment_grid_manifest_desc_s + ia_css_fragment_grid_manifest_desc_t; + +#define N_PADDING_UINT8_IN_FRAME_GRID_PARAM_MAN_SEC_STRUCT 1 +#define SIZE_OF_FRAME_GRID_PARAM_MAN_SEC_STRUCT_IN_BITS \ + (1 * IA_CSS_UINT32_T_BITS \ + + 3 * IA_CSS_UINT8_T_BITS \ + + N_PADDING_UINT8_IN_FRAME_GRID_PARAM_MAN_SEC_STRUCT * IA_CSS_UINT8_T_BITS) + +struct ia_css_frame_grid_param_manifest_section_desc_s { + /* Maximum buffer total size allowed for + * this frame of parameters + */ + uint32_t max_mem_size; + /* Memory space targeted by this section + * (Register MMIO Interface/DMEM/VMEM/GMEM etc) + */ + uint8_t mem_type_id; + /* Region id within the specified memory space */ + uint8_t region_id; + /* size in bytes of each compute unit for + * the specified memory space and region + */ + uint8_t elem_size; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_FRAME_GRID_PARAM_MAN_SEC_STRUCT]; +}; + +typedef struct ia_css_frame_grid_param_manifest_section_desc_s + ia_css_frame_grid_param_manifest_section_desc_t; + +#define SIZE_OF_FRAME_GRID_MAN_STRUCT_IN_BITS \ + ((IA_CSS_N_DATA_DIMENSION*IA_CSS_UINT16_T_BITS) \ + + (IA_CSS_N_DATA_DIMENSION*IA_CSS_UINT16_T_BITS)) + +struct ia_css_frame_grid_manifest_desc_s { + /* Min resolution width/height of the spatial parameters for + * the frame measured in compute units + */ + uint16_t min_frame_grid_dimension[IA_CSS_N_DATA_DIMENSION]; + /* Max resolution width/height of the spatial parameters for + * the frame measured in compute units + */ + uint16_t max_frame_grid_dimension[IA_CSS_N_DATA_DIMENSION]; +}; + +typedef struct ia_css_frame_grid_manifest_desc_s + ia_css_frame_grid_manifest_desc_t; + +#define N_PADDING_UINT8_IN_SPATIAL_PARAM_TERM_MAN_STRUCT 2 +#define SIZE_OF_SPATIAL_PARAM_TERM_MAN_STRUCT_IN_BITS \ + ((SIZE_OF_TERMINAL_MANIFEST_STRUCT_IN_BITS) \ + + (SIZE_OF_FRAME_GRID_MAN_STRUCT_IN_BITS) \ + + (SIZE_OF_FRAG_GRID_MAN_STRUCT_IN_BITS) \ + + (2 * IA_CSS_UINT16_T_BITS) \ + + (2 * IA_CSS_UINT8_T_BITS) \ + + (N_PADDING_UINT8_IN_SPATIAL_PARAM_TERM_MAN_STRUCT * \ + IA_CSS_UINT8_T_BITS)) + +struct ia_css_spatial_param_terminal_manifest_s { + /* Spatial Parameter terminal manifest base */ + ia_css_terminal_manifest_t base; + /* Contains limits for the frame spatial parameters */ + ia_css_frame_grid_manifest_desc_t frame_grid_desc; + /* + * Constains limits for the fragment spatial parameters + * - COMMON AMONG FRAGMENTS + */ + ia_css_fragment_grid_manifest_desc_t common_fragment_grid_desc; + /* + * Number of frame spatial parameter sections, they are set + * in slice-steps through frame processing + */ + uint16_t frame_grid_param_manifest_section_desc_count; + /* + * Points to the variable array of + * ia_css_frame_spatial_param_manifest_section_desc_t + */ + uint16_t frame_grid_param_manifest_section_desc_offset; + /* + * Indication of the kernel this spatial parameter terminal belongs to + * SHOULD MATCH TO INDEX AND BE USED ONLY FOR CHECK + */ + uint8_t kernel_id; + /* + * Groups together compute units in order to achieve alignment + * requirements for transfes and to achieve canonical frame + * representation + */ + uint8_t compute_units_p_elem; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_SPATIAL_PARAM_TERM_MAN_STRUCT]; +}; + +typedef struct ia_css_spatial_param_terminal_manifest_s + ia_css_spatial_param_terminal_manifest_t; + +/* ================= Spatial Param Terminal Manifest - END ================ */ + +/* ================= Sliced Param Terminal Manifest - START =============== */ + +#define N_PADDING_UINT8_IN_SLICED_TERMINAL_MAN_SECTION_STRUCT (2) +#define SIZE_OF_SLICED_PARAM_MAN_SEC_STRUCT_IN_BITS \ + (1 * IA_CSS_UINT32_T_BITS \ + + 2 * IA_CSS_UINT8_T_BITS \ + + N_PADDING_UINT8_IN_SLICED_TERMINAL_MAN_SECTION_STRUCT * IA_CSS_UINT8_T_BITS) + +struct ia_css_sliced_param_manifest_section_desc_s { + /* Maximum size of the related parameter region */ + uint32_t max_mem_size; + /* + * Memory targeted by this section + * (Register MMIO Interface/DMEM/VMEM/GMEM etc) + */ + uint8_t mem_type_id; + /* Region id within the specified memory */ + uint8_t region_id; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_SLICED_TERMINAL_MAN_SECTION_STRUCT]; +}; + +typedef struct ia_css_sliced_param_manifest_section_desc_s + ia_css_sliced_param_manifest_section_desc_t; + +#define N_PADDING_UINT8_IN_SLICED_TERMINAL_MANIFEST_STRUCT 3 +#define SIZE_OF_SLICED_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + (SIZE_OF_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + + 2 * IA_CSS_UINT16_T_BITS \ + + 1 * IA_CSS_UINT8_T_BITS \ + + N_PADDING_UINT8_IN_SLICED_TERMINAL_MANIFEST_STRUCT * IA_CSS_UINT8_T_BITS) + +/* Frame constant parameters terminal manifest */ +struct ia_css_sliced_param_terminal_manifest_s { + /* Spatial Parameter terminal base */ + ia_css_terminal_manifest_t base; + /* + * Number of the array elements + * sliced_param_section_offset points to + */ + uint16_t sliced_param_section_count; + /* + * Points to array of ia_css_sliced_param_manifest_section_desc_s + * which constain info for the slicing of the parameters + */ + uint16_t sliced_param_section_offset; + /* Kernel identifier */ + uint8_t kernel_id; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_SLICED_TERMINAL_MANIFEST_STRUCT]; +}; + +typedef struct ia_css_sliced_param_terminal_manifest_s + ia_css_sliced_param_terminal_manifest_t; + +/* ================= Slice Param Terminal Manifest - End =============== */ + +/* ================= Program Terminal Manifest - START ================= */ + +#define N_PADDING_UINT8_IN_FRAG_PARAM_MAN_SEC_STRUCT 1 +#define SIZE_OF_FRAG_PARAM_MAN_SEC_STRUCT_IN_BITS \ + (1 * IA_CSS_UINT32_T_BITS \ + + 3 * IA_CSS_UINT8_T_BITS \ + + N_PADDING_UINT8_IN_FRAG_PARAM_MAN_SEC_STRUCT * IA_CSS_UINT8_T_BITS) + +/* Fragment constant parameters manifest */ +struct ia_css_fragment_param_manifest_section_desc_s { + /* Maximum size of the related parameter region */ + uint32_t max_mem_size; + /* Indication of the kernel this parameter belongs to */ + uint8_t kernel_id; + /* Memory targeted by this section + * (Register MMIO Interface/DMEM/VMEM/GMEM etc) + */ + uint8_t mem_type_id; + /* Region id within the specified memory space */ + uint8_t region_id; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_FRAG_PARAM_MAN_SEC_STRUCT]; +}; + +typedef struct ia_css_fragment_param_manifest_section_desc_s + ia_css_fragment_param_manifest_section_desc_t; + +#define SIZE_OF_KERNEL_FRAG_SEQ_INFO_MAN_STRUCT_IN_BITS \ + (10*IA_CSS_N_DATA_DIMENSION*IA_CSS_UINT16_T_BITS) + +struct ia_css_kernel_fragment_sequencer_info_manifest_desc_s { + /* Slice dimensions */ + uint16_t min_fragment_grid_slice_dimension[IA_CSS_N_DATA_DIMENSION]; + /* Slice dimensions */ + uint16_t max_fragment_grid_slice_dimension[IA_CSS_N_DATA_DIMENSION]; + /* Nof slices */ + uint16_t min_fragment_grid_slice_count[IA_CSS_N_DATA_DIMENSION]; + /* Nof slices */ + uint16_t max_fragment_grid_slice_count[IA_CSS_N_DATA_DIMENSION]; + /* Grid point decimation factor */ + uint16_t + min_fragment_grid_point_decimation_factor[IA_CSS_N_DATA_DIMENSION]; + /* Grid point decimation factor */ + uint16_t + max_fragment_grid_point_decimation_factor[IA_CSS_N_DATA_DIMENSION]; + /* Relative position of grid origin to pixel origin */ + int16_t + min_fragment_grid_overlay_pixel_topleft_index[IA_CSS_N_DATA_DIMENSION]; + /* Relative position of grid origin to pixel origin */ + int16_t + max_fragment_grid_overlay_pixel_topleft_index[IA_CSS_N_DATA_DIMENSION]; + /* Dimension of grid */ + int16_t + min_fragment_grid_overlay_pixel_dimension[IA_CSS_N_DATA_DIMENSION]; + /* Dimension of grid */ + int16_t + max_fragment_grid_overlay_pixel_dimension[IA_CSS_N_DATA_DIMENSION]; +}; + +typedef struct ia_css_kernel_fragment_sequencer_info_manifest_desc_s + ia_css_kernel_fragment_sequencer_info_manifest_desc_t; + +#define N_PADDING_UINT8_IN_PROGRAM_TERM_MAN_STRUCT 2 +#define SIZE_OF_PROG_TERM_MAN_STRUCT_IN_BITS \ + ((SIZE_OF_TERMINAL_MANIFEST_STRUCT_IN_BITS) \ + + (IA_CSS_UINT32_T_BITS) \ + + (5*IA_CSS_UINT16_T_BITS) \ + + (N_PADDING_UINT8_IN_PROGRAM_TERM_MAN_STRUCT * IA_CSS_UINT8_T_BITS)) + +struct ia_css_program_terminal_manifest_s { + ia_css_terminal_manifest_t base; + /* Connection manager passes seq info as single blob at the moment */ + uint32_t sequencer_info_kernel_id; + /* Maximum number of command secriptors supported + * by the program group + */ + uint16_t max_kernel_fragment_sequencer_command_desc; + uint16_t fragment_param_manifest_section_desc_count; + uint16_t fragment_param_manifest_section_desc_offset; + uint16_t kernel_fragment_sequencer_info_manifest_info_count; + uint16_t kernel_fragment_sequencer_info_manifest_info_offset; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_PROGRAM_TERM_MAN_STRUCT]; +}; + +typedef struct ia_css_program_terminal_manifest_s + ia_css_program_terminal_manifest_t; + +/* ==================== Program Terminal Manifest - END ==================== */ + +#endif /* __IA_CSS_TERMINAL_MANIFEST_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_types.h new file mode 100644 index 0000000000000..c5c89fb7ec917 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_types.h @@ -0,0 +1,351 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_TYPES_H +#define __IA_CSS_TERMINAL_TYPES_H + +#include "type_support.h" +#include "ia_css_base_types.h" +#include "ia_css_terminal_base_types.h" + + +typedef struct ia_css_program_control_init_load_section_desc_s + ia_css_program_control_init_load_section_desc_t; +typedef struct ia_css_program_control_init_connect_section_desc_s + ia_css_program_control_init_connect_section_desc_t; +typedef struct ia_css_program_control_init_program_desc_s + ia_css_program_control_init_program_desc_t; +typedef struct ia_css_program_control_init_terminal_s + ia_css_program_control_init_terminal_t; + +typedef struct ia_css_program_terminal_s ia_css_program_terminal_t; +typedef struct ia_css_fragment_param_section_desc_s + ia_css_fragment_param_section_desc_t; +typedef struct ia_css_kernel_fragment_sequencer_info_desc_s + ia_css_kernel_fragment_sequencer_info_desc_t; +typedef struct ia_css_kernel_fragment_sequencer_command_desc_s + ia_css_kernel_fragment_sequencer_command_desc_t; + +typedef struct ia_css_sliced_param_terminal_s ia_css_sliced_param_terminal_t; +typedef struct ia_css_fragment_slice_desc_s ia_css_fragment_slice_desc_t; +typedef struct ia_css_slice_param_section_desc_s + ia_css_slice_param_section_desc_t; + +typedef struct ia_css_spatial_param_terminal_s ia_css_spatial_param_terminal_t; +typedef struct ia_css_frame_grid_desc_s ia_css_frame_grid_desc_t; +typedef struct ia_css_frame_grid_param_section_desc_s + ia_css_frame_grid_param_section_desc_t; +typedef struct ia_css_fragment_grid_desc_s ia_css_fragment_grid_desc_t; + +typedef struct ia_css_param_terminal_s ia_css_param_terminal_t; +typedef struct ia_css_param_section_desc_s ia_css_param_section_desc_t; + +typedef struct ia_css_param_payload_s ia_css_param_payload_t; +typedef struct ia_css_terminal_s ia_css_terminal_t; + +/* =================== Generic Parameter Payload - START =================== */ +#define N_UINT64_IN_PARAM_PAYLOAD_STRUCT 1 +#define N_UINT32_IN_PARAM_PAYLOAD_STRUCT 1 + +#define IA_CSS_PARAM_PAYLOAD_STRUCT_BITS \ + (N_UINT64_IN_PARAM_PAYLOAD_STRUCT * IA_CSS_UINT64_T_BITS \ + + VIED_VADDRESS_BITS \ + + N_UINT32_IN_PARAM_PAYLOAD_STRUCT * IA_CSS_UINT32_T_BITS) + +struct ia_css_param_payload_s { + /* + * Temporary variable holding the host address of the parameter buffer + * as PSYS is handling the parameters on the host side for the moment + */ + uint64_t host_buffer; + /* + * Base virtual addresses to parameters in subsystem virtual + * memory space + * NOTE: Used in legacy pg flow + */ + vied_vaddress_t buffer; + /* + * Offset to buffer address within external buffer set structure + * NOTE: Used in ppg flow + */ + uint32_t terminal_index; +}; +/* =================== Generic Parameter Payload - End ==================== */ + + +/* ==================== Cached Param Terminal - START ==================== */ +#define N_UINT32_IN_PARAM_SEC_STRUCT 2 + +#define SIZE_OF_PARAM_SEC_STRUCT_BITS \ + (N_UINT32_IN_PARAM_SEC_STRUCT * IA_CSS_UINT32_T_BITS) + +/* Frame constant parameters section */ +struct ia_css_param_section_desc_s { + /* Offset of the parameter allocation in memory */ + uint32_t mem_offset; + /* Memory allocation size needs of this parameter */ + uint32_t mem_size; +}; + +#define N_UINT16_IN_PARAM_TERMINAL_STRUCT 1 +#define N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT 6 + +#define SIZE_OF_PARAM_TERMINAL_STRUCT_BITS \ + (SIZE_OF_TERMINAL_STRUCT_BITS \ + + IA_CSS_PARAM_PAYLOAD_STRUCT_BITS \ + + N_UINT16_IN_PARAM_TERMINAL_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT * IA_CSS_UINT8_T_BITS) + +/* Frame constant parameters terminal */ +struct ia_css_param_terminal_s { + /* Parameter terminal base */ + ia_css_terminal_t base; + /* Parameter buffer handle attached to the terminal */ + ia_css_param_payload_t param_payload; + /* Points to the variable array of ia_css_param_section_desc_t */ + uint16_t param_section_desc_offset; + uint8_t padding[N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT]; +}; +/* ==================== Cached Param Terminal - End ==================== */ + + +/* ==================== Spatial Param Terminal - START ==================== */ +#define N_UINT16_IN_FRAG_GRID_STRUCT (2 * IA_CSS_N_DATA_DIMENSION) + +#define SIZE_OF_FRAG_GRID_STRUCT_BITS \ + (N_UINT16_IN_FRAG_GRID_STRUCT * IA_CSS_UINT16_T_BITS) + +struct ia_css_fragment_grid_desc_s { + /* + * Offset width/height of the top-left compute unit of the + * fragment compared to the frame + */ + uint16_t fragment_grid_index[IA_CSS_N_DATA_DIMENSION]; + /* + * Resolution width/height of the spatial parameters that + * correspond to the fragment measured in compute units + */ + uint16_t fragment_grid_dimension[IA_CSS_N_DATA_DIMENSION]; +}; + +#define N_UINT32_IN_FRAME_GRID_PARAM_SEC_STRUCT 3 +#define N_PADDING_UINT8_IN_FRAME_GRID_PARAM_SEC_STRUCT 4 + +#define SIZE_OF_FRAME_GRID_PARAM_SEC_STRUCT_BITS \ + (N_UINT32_IN_FRAME_GRID_PARAM_SEC_STRUCT * IA_CSS_UINT32_T_BITS \ + + N_PADDING_UINT8_IN_FRAME_GRID_PARAM_SEC_STRUCT * IA_CSS_UINT8_T_BITS) + +/* + * A plane of parameters with spatial aspect + * (compute units correlated to pixel data) + */ +struct ia_css_frame_grid_param_section_desc_s { + /* Offset of the parameter allocation in memory */ + uint32_t mem_offset; + /* Memory allocation size needs of this parameter */ + uint32_t mem_size; + /* + * stride in bytes of each line of compute units for + * the specified memory space and region + */ + uint32_t stride; + uint8_t padding[N_PADDING_UINT8_IN_FRAME_GRID_PARAM_SEC_STRUCT]; +}; + +#define N_UINT16_IN_FRAME_GRID_STRUCT_STRUCT IA_CSS_N_DATA_DIMENSION +#define N_PADDING_UINT8_IN_FRAME_GRID_STRUCT 4 + +#define SIZE_OF_FRAME_GRID_STRUCT_BITS \ + (N_UINT16_IN_FRAME_GRID_STRUCT_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_PADDING_UINT8_IN_FRAME_GRID_STRUCT * IA_CSS_UINT8_T_BITS) + +struct ia_css_frame_grid_desc_s { + /* Resolution width/height of the frame of + * spatial parameters measured in compute units + */ + uint16_t frame_grid_dimension[IA_CSS_N_DATA_DIMENSION]; + uint8_t padding[N_PADDING_UINT8_IN_FRAME_GRID_STRUCT]; +}; + +#define N_UINT32_IN_SPATIAL_PARAM_TERM_STRUCT 1 +#define N_UINT16_IN_SPATIAL_PARAM_TERM_STRUCT 2 + +#define SIZE_OF_SPATIAL_PARAM_TERM_STRUCT_BITS \ + (SIZE_OF_TERMINAL_STRUCT_BITS \ + + IA_CSS_PARAM_PAYLOAD_STRUCT_BITS \ + + SIZE_OF_FRAME_GRID_STRUCT_BITS \ + + N_UINT32_IN_SPATIAL_PARAM_TERM_STRUCT * IA_CSS_UINT32_T_BITS \ + + N_UINT16_IN_SPATIAL_PARAM_TERM_STRUCT * IA_CSS_UINT16_T_BITS) + +struct ia_css_spatial_param_terminal_s { + /* Spatial Parameter terminal base */ + ia_css_terminal_t base; + /* Spatial Parameter buffer handle attached to the terminal */ + ia_css_param_payload_t param_payload; + /* Contains info for the frame of spatial parameters */ + ia_css_frame_grid_desc_t frame_grid_desc; + /* Kernel identifier */ + uint32_t kernel_id; + /* + * Points to the variable array of + * ia_css_frame_grid_param_section_desc_t + */ + uint16_t frame_grid_param_section_desc_offset; + /* + * Points to array of ia_css_fragment_spatial_desc_t + * which constain info for the fragments of spatial parameters + */ + uint16_t fragment_grid_desc_offset; +}; +/* ==================== Spatial Param Terminal - END ==================== */ + + +/* ==================== Sliced Param Terminal - START ==================== */ +#define N_UINT32_IN_SLICE_PARAM_SECTION_DESC_STRUCT 2 + +#define SIZE_OF_SLICE_PARAM_SECTION_DESC_STRUCT_BITS \ + (N_UINT32_IN_SLICE_PARAM_SECTION_DESC_STRUCT * IA_CSS_UINT32_T_BITS) + +/* A Slice of parameters ready to be trasferred from/to registers */ +struct ia_css_slice_param_section_desc_s { + /* Offset of the parameter allocation in memory */ + uint32_t mem_offset; + /* Memory allocation size needs of this parameter */ + uint32_t mem_size; +}; + +#define N_UINT16_IN_FRAGMENT_SLICE_DESC_STRUCT 2 +#define N_PADDING_UINT8_FRAGMENT_SLICE_DESC_STRUCT 4 + +#define SIZE_OF_FRAGMENT_SLICE_DESC_STRUCT_BITS \ + (N_UINT16_IN_FRAGMENT_SLICE_DESC_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_PADDING_UINT8_FRAGMENT_SLICE_DESC_STRUCT * IA_CSS_UINT8_T_BITS) + +struct ia_css_fragment_slice_desc_s { + /* + * Points to array of ia_css_slice_param_section_desc_t + * which constain info for each prameter slice + */ + uint16_t slice_section_desc_offset; + /* Number of slices for the parameters for this fragment */ + uint16_t slice_count; + uint8_t padding[N_PADDING_UINT8_FRAGMENT_SLICE_DESC_STRUCT]; +}; + +#define N_UINT32_IN_SLICED_PARAM_TERMINAL_STRUCT 1 +#define N_UINT16_IN_SLICED_PARAM_TERMINAL_STRUCT 1 +#define N_PADDING_UINT8_SLICED_PARAM_TERMINAL_STRUCT 2 + +#define SIZE_OF_SLICED_PARAM_TERM_STRUCT_BITS \ + (SIZE_OF_TERMINAL_STRUCT_BITS \ + + IA_CSS_PARAM_PAYLOAD_STRUCT_BITS \ + + N_UINT32_IN_SLICED_PARAM_TERMINAL_STRUCT * IA_CSS_UINT32_T_BITS \ + + N_UINT16_IN_SLICED_PARAM_TERMINAL_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_PADDING_UINT8_SLICED_PARAM_TERMINAL_STRUCT * IA_CSS_UINT8_T_BITS) + +struct ia_css_sliced_param_terminal_s { + /* Spatial Parameter terminal base */ + ia_css_terminal_t base; + /* Spatial Parameter buffer handle attached to the terminal */ + ia_css_param_payload_t param_payload; + /* Kernel identifier */ + uint32_t kernel_id; + /* + * Points to array of ia_css_fragment_slice_desc_t + * which constain info for the slicing of the parameters + */ + uint16_t fragment_slice_desc_offset; + uint8_t padding[N_PADDING_UINT8_SLICED_PARAM_TERMINAL_STRUCT]; +}; +/* ==================== Sliced Param Terminal - END ==================== */ + + +/* ==================== Program Terminal - START ==================== */ + +#define N_UINT32_IN_FRAG_PARAM_SEC_STRUCT 2 + +#define SIZE_OF_FRAG_PARAM_SEC_STRUCT_BITS \ + (N_UINT32_IN_FRAG_PARAM_SEC_STRUCT * IA_CSS_UINT32_T_BITS) + +/* Fragment constant parameters section */ +struct ia_css_fragment_param_section_desc_s { + /* Offset of the parameter allocation in memory */ + uint32_t mem_offset; + /* Memory allocation size needs of this parameter */ + uint32_t mem_size; +}; + +#define N_UINT16_IN_FRAG_SEQ_COMMAND_STRUCT IA_CSS_N_COMMAND_COUNT + +#define SIZE_OF_FRAG_SEQ_COMMANDS_STRUCT_BITS \ + (N_UINT16_IN_FRAG_SEQ_COMMAND_STRUCT * IA_CSS_UINT16_T_BITS) + +/* 4 commands packe together to save memory space */ +struct ia_css_kernel_fragment_sequencer_command_desc_s { + /* Contains the "(command_index%4) == index" command desc */ + uint16_t line_count[IA_CSS_N_COMMAND_COUNT]; +}; + +#define N_UINT16_IN_FRAG_SEQ_INFO_STRUCT (5 * IA_CSS_N_DATA_DIMENSION + 2) + +#define SIZE_OF_FRAG_SEQ_INFO_STRUCT_BITS \ + (N_UINT16_IN_FRAG_SEQ_INFO_STRUCT * IA_CSS_UINT16_T_BITS) + +struct ia_css_kernel_fragment_sequencer_info_desc_s { + /* Slice dimensions */ + uint16_t fragment_grid_slice_dimension[IA_CSS_N_DATA_DIMENSION]; + /* Nof slices */ + uint16_t fragment_grid_slice_count[IA_CSS_N_DATA_DIMENSION]; + /* Grid point decimation factor */ + uint16_t + fragment_grid_point_decimation_factor[IA_CSS_N_DATA_DIMENSION]; + /* Relative position of grid origin to pixel origin */ + int16_t + fragment_grid_overlay_pixel_topleft_index[IA_CSS_N_DATA_DIMENSION]; + /* Size of active fragment region */ + int16_t + fragment_grid_overlay_pixel_dimension[IA_CSS_N_DATA_DIMENSION]; + /* If >0 it overrides the standard fragment sequencer info */ + uint16_t command_count; + /* + * To be used only if command_count>0, points to the descriptors + * for the commands (ia_css_kernel_fragment_sequencer_command_desc_s) + */ + uint16_t command_desc_offset; +}; + +#define N_UINT16_IN_PROG_TERM_STRUCT 2 +#define N_PADDING_UINT8_IN_PROG_TERM_STRUCT 4 + +#define SIZE_OF_PROG_TERM_STRUCT_BITS \ + (SIZE_OF_TERMINAL_STRUCT_BITS \ + + IA_CSS_PARAM_PAYLOAD_STRUCT_BITS \ + + N_UINT16_IN_PROG_TERM_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_PADDING_UINT8_IN_PROG_TERM_STRUCT * IA_CSS_UINT8_T_BITS) + +struct ia_css_program_terminal_s { + /* Program terminal base */ + ia_css_terminal_t base; + /* Program terminal buffer handle attached to the terminal */ + ia_css_param_payload_t param_payload; + /* Points to array of ia_css_fragment_param_desc_s */ + uint16_t fragment_param_section_desc_offset; + /* Points to array of ia_css_kernel_fragment_sequencer_info_s */ + uint16_t kernel_fragment_sequencer_info_desc_offset; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_PROG_TERM_STRUCT]; +}; +/* ==================== Program Terminal - END ==================== */ + +#endif /* __IA_CSS_TERMINAL_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal.c new file mode 100644 index 0000000000000..683fb3a88cd87 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal.c @@ -0,0 +1,20 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifdef __INLINE_PARAMETERS__ +#include "storage_class.h" +STORAGE_CLASS_INLINE int __ia_css_param_avoid_warning_on_empty_file(void) { return 0; } +#else /* __INLINE_PARAMETERS__ */ +#include "ia_css_terminal_impl.h" +#endif /* __INLINE_PARAMETERS__ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_impl.h new file mode 100644 index 0000000000000..9ccf3931e8e3d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_impl.h @@ -0,0 +1,495 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_IMPL_H +#define __IA_CSS_TERMINAL_IMPL_H + +#include "ia_css_terminal.h" +#include "ia_css_terminal_types.h" +#include "error_support.h" +#include "assert_support.h" +#include "storage_class.h" + +/* Param Terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_param_in_terminal_get_descriptor_size( + const unsigned int nof_sections) +{ + return sizeof(ia_css_param_terminal_t) + + nof_sections*sizeof(ia_css_param_section_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_param_section_desc_t *ia_css_param_in_terminal_get_param_section_desc( + const ia_css_param_terminal_t *param_terminal, + const unsigned int section_index) +{ + ia_css_param_section_desc_t *param_section_base; + ia_css_param_section_desc_t *param_section_desc = NULL; + + verifjmpexit(param_terminal != NULL); + + param_section_base = + (ia_css_param_section_desc_t *) + (((const char *)param_terminal) + + param_terminal->param_section_desc_offset); + param_section_desc = &(param_section_base[section_index]); + +EXIT: + return param_section_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_param_out_terminal_get_descriptor_size( + const unsigned int nof_sections, + const unsigned int nof_fragments) +{ + return sizeof(ia_css_param_terminal_t) + + nof_fragments*nof_sections*sizeof(ia_css_param_section_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_param_section_desc_t *ia_css_param_out_terminal_get_param_section_desc( + const ia_css_param_terminal_t *param_terminal, + const unsigned int section_index, + const unsigned int nof_sections, + const unsigned int fragment_index) +{ + ia_css_param_section_desc_t *param_section_base; + ia_css_param_section_desc_t *param_section_desc = NULL; + + verifjmpexit(param_terminal != NULL); + + param_section_base = + (ia_css_param_section_desc_t *) + (((const char *)param_terminal) + + param_terminal->param_section_desc_offset); + param_section_desc = + &(param_section_base[(nof_sections * fragment_index) + + section_index]); + +EXIT: + return param_section_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_param_terminal_create( + ia_css_param_terminal_t *param_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const uint16_t is_input_terminal) +{ + if (param_terminal == NULL) { + return -EFAULT; + } + + if (terminal_offset > (1<<15)) { + return -EINVAL; + } + + param_terminal->base.terminal_type = + is_input_terminal ? + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN : + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT; + param_terminal->base.parent_offset = + 0 - ((int16_t)terminal_offset); + param_terminal->base.size = terminal_size; + param_terminal->param_section_desc_offset = + sizeof(ia_css_param_terminal_t); + + return 0; +} + +/* Spatial Param Terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_spatial_param_terminal_get_descriptor_size( + const unsigned int nof_frame_param_sections, + const unsigned int nof_fragments) +{ + return sizeof(ia_css_spatial_param_terminal_t) + + nof_frame_param_sections * sizeof( + ia_css_frame_grid_param_section_desc_t) + + nof_fragments * sizeof(ia_css_fragment_grid_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_fragment_grid_desc_t * +ia_css_spatial_param_terminal_get_fragment_grid_desc( + const ia_css_spatial_param_terminal_t *spatial_param_terminal, + const unsigned int fragment_index) +{ + ia_css_fragment_grid_desc_t *fragment_grid_desc_base; + ia_css_fragment_grid_desc_t *fragment_grid_desc = NULL; + + verifjmpexit(spatial_param_terminal != NULL); + + fragment_grid_desc_base = + (ia_css_fragment_grid_desc_t *) + (((const char *)spatial_param_terminal) + + spatial_param_terminal->fragment_grid_desc_offset); + fragment_grid_desc = &(fragment_grid_desc_base[fragment_index]); + +EXIT: + return fragment_grid_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_frame_grid_param_section_desc_t * +ia_css_spatial_param_terminal_get_frame_grid_param_section_desc( + const ia_css_spatial_param_terminal_t *spatial_param_terminal, + const unsigned int section_index) +{ + ia_css_frame_grid_param_section_desc_t * + frame_grid_param_section_base; + ia_css_frame_grid_param_section_desc_t * + frame_grid_param_section_desc = NULL; + + verifjmpexit(spatial_param_terminal != NULL); + + frame_grid_param_section_base = + (ia_css_frame_grid_param_section_desc_t *) + (((const char *)spatial_param_terminal) + + spatial_param_terminal->frame_grid_param_section_desc_offset); + frame_grid_param_section_desc = + &(frame_grid_param_section_base[section_index]); + +EXIT: + return frame_grid_param_section_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_spatial_param_terminal_create( + ia_css_spatial_param_terminal_t *spatial_param_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const uint16_t is_input_terminal, + const unsigned int nof_fragments, + const uint32_t kernel_id) +{ + if (spatial_param_terminal == NULL) { + return -EFAULT; + } + + if (terminal_offset > (1<<15)) { + return -EINVAL; + } + + spatial_param_terminal->base.terminal_type = + is_input_terminal ? + IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN : + IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT; + spatial_param_terminal->base.parent_offset = + 0 - ((int16_t)terminal_offset); + spatial_param_terminal->base.size = terminal_size; + spatial_param_terminal->kernel_id = kernel_id; + spatial_param_terminal->fragment_grid_desc_offset = + sizeof(ia_css_spatial_param_terminal_t); + spatial_param_terminal->frame_grid_param_section_desc_offset = + spatial_param_terminal->fragment_grid_desc_offset + + (nof_fragments * sizeof(ia_css_fragment_grid_desc_t)); + + return 0; +} + +/* Sliced terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_sliced_param_terminal_get_descriptor_size( + const unsigned int nof_slice_param_sections, + const unsigned int nof_slices[], + const unsigned int nof_fragments) +{ + unsigned int descriptor_size = 0; + unsigned int fragment_index; + unsigned int nof_slices_total = 0; + + verifjmpexit(nof_slices != NULL); + + for (fragment_index = 0; + fragment_index < nof_fragments; fragment_index++) { + nof_slices_total += nof_slices[fragment_index]; + } + + descriptor_size = + sizeof(ia_css_sliced_param_terminal_t) + + nof_fragments*sizeof(ia_css_fragment_slice_desc_t) + + nof_slices_total*nof_slice_param_sections*sizeof( + ia_css_fragment_param_section_desc_t); + +EXIT: + return descriptor_size; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_fragment_slice_desc_t * +ia_css_sliced_param_terminal_get_fragment_slice_desc( + const ia_css_sliced_param_terminal_t *sliced_param_terminal, + const unsigned int fragment_index +) +{ + ia_css_fragment_slice_desc_t *fragment_slice_desc_base; + ia_css_fragment_slice_desc_t *fragment_slice_desc = NULL; + + verifjmpexit(sliced_param_terminal != NULL); + + fragment_slice_desc_base = + (ia_css_fragment_slice_desc_t *) + (((const char *)sliced_param_terminal) + + sliced_param_terminal->fragment_slice_desc_offset); + fragment_slice_desc = &(fragment_slice_desc_base[fragment_index]); + +EXIT: + return fragment_slice_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_slice_param_section_desc_t * +ia_css_sliced_param_terminal_get_slice_param_section_desc( + const ia_css_sliced_param_terminal_t *sliced_param_terminal, + const unsigned int fragment_index, + const unsigned int slice_index, + const unsigned int section_index, + const unsigned int nof_slice_param_sections) +{ + ia_css_fragment_slice_desc_t *fragment_slice_desc; + ia_css_slice_param_section_desc_t *slice_param_section_desc_base; + ia_css_slice_param_section_desc_t *slice_param_section_desc = NULL; + + fragment_slice_desc = + ia_css_sliced_param_terminal_get_fragment_slice_desc( + sliced_param_terminal, + fragment_index + ); + verifjmpexit(fragment_slice_desc != NULL); + + slice_param_section_desc_base = + (ia_css_slice_param_section_desc_t *) + (((const char *)sliced_param_terminal) + + fragment_slice_desc->slice_section_desc_offset); + slice_param_section_desc = + &(slice_param_section_desc_base[( + slice_index * nof_slice_param_sections) + + section_index]); + +EXIT: + return slice_param_section_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_sliced_param_terminal_create( + ia_css_sliced_param_terminal_t *sliced_param_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const uint16_t is_input_terminal, + const unsigned int nof_slice_param_sections, + const unsigned int nof_slices[], + const unsigned int nof_fragments, + const uint32_t kernel_id) +{ + unsigned int fragment_index; + unsigned int nof_slices_total = 0; + + if (sliced_param_terminal == NULL) { + return -EFAULT; + } + + if (terminal_offset > (1<<15)) { + return -EINVAL; + } + + sliced_param_terminal->base.terminal_type = + is_input_terminal ? + IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN : + IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT; + sliced_param_terminal->base.parent_offset = + 0 - ((int16_t)terminal_offset); + sliced_param_terminal->base.size = terminal_size; + sliced_param_terminal->kernel_id = kernel_id; + /* set here to use below to find the pointer */ + sliced_param_terminal->fragment_slice_desc_offset = + sizeof(ia_css_sliced_param_terminal_t); + for (fragment_index = 0; + fragment_index < nof_fragments; fragment_index++) { + ia_css_fragment_slice_desc_t *fragment_slice_desc = + ia_css_sliced_param_terminal_get_fragment_slice_desc( + sliced_param_terminal, + fragment_index); + /* + * Error handling not required at this point + * since everything has been constructed/validated just above + */ + fragment_slice_desc->slice_count = nof_slices[fragment_index]; + fragment_slice_desc->slice_section_desc_offset = + sliced_param_terminal->fragment_slice_desc_offset + + (nof_fragments * sizeof( + ia_css_fragment_slice_desc_t)) + + (nof_slices_total * nof_slice_param_sections * sizeof( + ia_css_slice_param_section_desc_t)); + nof_slices_total += nof_slices[fragment_index]; + } + + return 0; +} + +/* Program terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_program_terminal_get_descriptor_size( + const unsigned int nof_fragments, + const unsigned int nof_fragment_param_sections, + const unsigned int nof_kernel_fragment_sequencer_infos, + const unsigned int nof_command_objs) +{ + return sizeof(ia_css_program_terminal_t) + + nof_fragments * nof_fragment_param_sections * + sizeof(ia_css_fragment_param_section_desc_t) + + nof_fragments * nof_kernel_fragment_sequencer_infos * + sizeof(ia_css_kernel_fragment_sequencer_info_desc_t) + + nof_command_objs * sizeof( + ia_css_kernel_fragment_sequencer_command_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_fragment_param_section_desc_t * +ia_css_program_terminal_get_frgmnt_prm_sct_desc( + const ia_css_program_terminal_t *program_terminal, + const unsigned int fragment_index, + const unsigned int section_index, + const unsigned int nof_fragment_param_sections) +{ + ia_css_fragment_param_section_desc_t * + fragment_param_section_desc_base; + ia_css_fragment_param_section_desc_t * + fragment_param_section_desc = NULL; + + verifjmpexit(program_terminal != NULL); + verifjmpexit(section_index < nof_fragment_param_sections); + + fragment_param_section_desc_base = + (ia_css_fragment_param_section_desc_t *) + (((const char *)program_terminal) + + program_terminal->fragment_param_section_desc_offset); + fragment_param_section_desc = + &(fragment_param_section_desc_base[(fragment_index * + nof_fragment_param_sections) + section_index]); + +EXIT: + return fragment_param_section_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_kernel_fragment_sequencer_info_desc_t * +ia_css_program_terminal_get_kernel_frgmnt_seq_info_desc( + const ia_css_program_terminal_t *program_terminal, + const unsigned int fragment_index, + const unsigned int info_index, + const unsigned int nof_kernel_fragment_sequencer_infos) +{ + ia_css_kernel_fragment_sequencer_info_desc_t * + kernel_fragment_sequencer_info_desc_base; + ia_css_kernel_fragment_sequencer_info_desc_t * + kernel_fragment_sequencer_info_desc = NULL; + + verifjmpexit(program_terminal != NULL); + if (nof_kernel_fragment_sequencer_infos > 0) { + verifjmpexit(info_index < nof_kernel_fragment_sequencer_infos); + } + + kernel_fragment_sequencer_info_desc_base = + (ia_css_kernel_fragment_sequencer_info_desc_t *) + (((const char *)program_terminal) + + program_terminal->kernel_fragment_sequencer_info_desc_offset); + kernel_fragment_sequencer_info_desc = + &(kernel_fragment_sequencer_info_desc_base[(fragment_index * + nof_kernel_fragment_sequencer_infos) + info_index]); + +EXIT: + return kernel_fragment_sequencer_info_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_program_terminal_create( + ia_css_program_terminal_t *program_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const unsigned int nof_fragments, + const unsigned int nof_kernel_fragment_sequencer_infos, + const unsigned int nof_command_objs) +{ + if (program_terminal == NULL) { + return -EFAULT; + } + + if (terminal_offset > (1<<15)) { + return -EINVAL; + } + + program_terminal->base.terminal_type = IA_CSS_TERMINAL_TYPE_PROGRAM; + program_terminal->base.parent_offset = 0-((int16_t)terminal_offset); + program_terminal->base.size = terminal_size; + program_terminal->kernel_fragment_sequencer_info_desc_offset = + sizeof(ia_css_program_terminal_t); + program_terminal->fragment_param_section_desc_offset = + program_terminal->kernel_fragment_sequencer_info_desc_offset + + (nof_fragments * nof_kernel_fragment_sequencer_infos * + sizeof(ia_css_kernel_fragment_sequencer_info_desc_t)) + + (nof_command_objs * sizeof( + ia_css_kernel_fragment_sequencer_command_desc_t)); + + return 0; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_program_terminal_get_command_base_offset( + const ia_css_program_terminal_t *program_terminal, + const unsigned int nof_fragments, + const unsigned int nof_kernel_fragment_sequencer_infos, + const unsigned int commands_slots_used, + uint16_t *command_desc_offset) +{ + if (command_desc_offset == NULL) { + return -EFAULT; + } + + *command_desc_offset = 0; + + if (program_terminal == NULL) { + return -EFAULT; + } + + *command_desc_offset = + program_terminal->kernel_fragment_sequencer_info_desc_offset + + (nof_fragments * nof_kernel_fragment_sequencer_infos * + sizeof(ia_css_kernel_fragment_sequencer_info_desc_t)) + + (commands_slots_used * sizeof( + ia_css_kernel_fragment_sequencer_command_desc_t)); + + return 0; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +uint16_t *ia_css_program_terminal_get_line_count( + const ia_css_kernel_fragment_sequencer_command_desc_t + *kernel_fragment_sequencer_command_desc_base, + const unsigned int set_count) +{ + uint16_t *line_count = NULL; + + verifjmpexit(kernel_fragment_sequencer_command_desc_base != NULL); + line_count = + (uint16_t *)&(kernel_fragment_sequencer_command_desc_base[ + set_count >> 2].line_count[set_count & 0x00000003]); +EXIT: + return line_count; +} + +#endif /* __IA_CSS_TERMINAL_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_manifest.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_manifest.c new file mode 100644 index 0000000000000..53c4708c7fc90 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_manifest.c @@ -0,0 +1,20 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifdef __INLINE_PARAMETERS__ +#include "storage_class.h" +STORAGE_CLASS_INLINE int __ia_css_param_avoid_warning_on_empty_file(void) { return 0; } +#else /* __INLINE_PARAMETERS__ */ +#include "ia_css_terminal_manifest_impl.h" +#endif /* __INLINE_PARAMETERS__ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_manifest_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_manifest_impl.h new file mode 100644 index 0000000000000..39734136b117b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_manifest_impl.h @@ -0,0 +1,347 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_MANIFEST_IMPL_H +#define __IA_CSS_TERMINAL_MANIFEST_IMPL_H + +#include "ia_css_terminal_manifest.h" +#include "error_support.h" +#include "assert_support.h" +#include "storage_class.h" + +STORAGE_CLASS_INLINE void __terminal_manifest_dummy_check_alignment(void) +{ + COMPILATION_ERROR_IF( + SIZE_OF_PARAM_TERMINAL_MANIFEST_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_param_terminal_manifest_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_param_terminal_manifest_t) % sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PARAM_TERMINAL_MANIFEST_SEC_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_param_manifest_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_param_manifest_section_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_SPATIAL_PARAM_TERM_MAN_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_spatial_param_terminal_manifest_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_spatial_param_terminal_manifest_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAME_GRID_PARAM_MAN_SEC_STRUCT_IN_BITS != + (CHAR_BIT * sizeof( + ia_css_frame_grid_param_manifest_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_frame_grid_param_manifest_section_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PROG_TERM_MAN_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_program_terminal_manifest_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_program_terminal_manifest_t)%sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAG_PARAM_MAN_SEC_STRUCT_IN_BITS != + (CHAR_BIT * sizeof( + ia_css_fragment_param_manifest_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_fragment_param_manifest_section_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_KERNEL_FRAG_SEQ_INFO_MAN_STRUCT_IN_BITS != + (CHAR_BIT * sizeof( + ia_css_kernel_fragment_sequencer_info_manifest_desc_t)) + ); + + COMPILATION_ERROR_IF(0 != sizeof( + ia_css_kernel_fragment_sequencer_info_manifest_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PARAM_TERMINAL_MANIFEST_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_sliced_param_terminal_manifest_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_sliced_param_terminal_manifest_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_SLICED_PARAM_MAN_SEC_STRUCT_IN_BITS != + (CHAR_BIT * sizeof + (ia_css_sliced_param_manifest_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_sliced_param_manifest_section_desc_t) % + sizeof(uint64_t)); +} + +/* Parameter Terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_param_terminal_manifest_get_size( + const unsigned int nof_sections) +{ + + return sizeof(ia_css_param_terminal_manifest_t) + + nof_sections*sizeof(ia_css_param_manifest_section_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_param_terminal_manifest_init( + ia_css_param_terminal_manifest_t *param_terminal, + const uint16_t section_count) +{ + if (param_terminal == NULL) { + return -EFAULT; + } + + param_terminal->param_manifest_section_desc_count = section_count; + param_terminal->param_manifest_section_desc_offset = sizeof( + ia_css_param_terminal_manifest_t); + + return 0; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_param_manifest_section_desc_t * +ia_css_param_terminal_manifest_get_prm_sct_desc( + const ia_css_param_terminal_manifest_t *param_terminal_manifest, + const unsigned int section_index) +{ + ia_css_param_manifest_section_desc_t *param_manifest_section_base; + ia_css_param_manifest_section_desc_t * + param_manifest_section_desc = NULL; + + verifjmpexit(param_terminal_manifest != NULL); + + param_manifest_section_base = + (ia_css_param_manifest_section_desc_t *) + (((const char *)param_terminal_manifest) + + param_terminal_manifest->param_manifest_section_desc_offset); + + param_manifest_section_desc = + &(param_manifest_section_base[section_index]); + +EXIT: + return param_manifest_section_desc; +} + +/* Spatial Parameter Terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_spatial_param_terminal_manifest_get_size( + const unsigned int nof_frame_param_sections) +{ + return sizeof(ia_css_spatial_param_terminal_manifest_t) + + nof_frame_param_sections * sizeof( + ia_css_frame_grid_param_manifest_section_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_spatial_param_terminal_manifest_init( + ia_css_spatial_param_terminal_manifest_t *spatial_param_terminal, + const uint16_t section_count) +{ + if (spatial_param_terminal == NULL) { + return -EFAULT; + } + + spatial_param_terminal-> + frame_grid_param_manifest_section_desc_count = section_count; + spatial_param_terminal-> + frame_grid_param_manifest_section_desc_offset = + sizeof(ia_css_spatial_param_terminal_manifest_t); + + return 0; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_frame_grid_param_manifest_section_desc_t * +ia_css_spatial_param_terminal_manifest_get_frm_grid_prm_sct_desc( + const ia_css_spatial_param_terminal_manifest_t * + spatial_param_terminal_manifest, + const unsigned int section_index) +{ + ia_css_frame_grid_param_manifest_section_desc_t * + frame_param_manifest_section_base; + ia_css_frame_grid_param_manifest_section_desc_t * + frame_param_manifest_section_desc = NULL; + + verifjmpexit(spatial_param_terminal_manifest != NULL); + + frame_param_manifest_section_base = + (ia_css_frame_grid_param_manifest_section_desc_t *) + (((const char *)spatial_param_terminal_manifest) + + spatial_param_terminal_manifest-> + frame_grid_param_manifest_section_desc_offset); + frame_param_manifest_section_desc = + &(frame_param_manifest_section_base[section_index]); + +EXIT: + return frame_param_manifest_section_desc; +} + +/* Sliced Terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_sliced_param_terminal_manifest_get_size( + const unsigned int nof_slice_param_sections) +{ + return sizeof(ia_css_spatial_param_terminal_manifest_t) + + nof_slice_param_sections * + sizeof(ia_css_sliced_param_manifest_section_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_sliced_param_terminal_manifest_init( + ia_css_sliced_param_terminal_manifest_t *sliced_param_terminal, + const uint16_t section_count) +{ + if (sliced_param_terminal == NULL) { + return -EFAULT; + } + + sliced_param_terminal->sliced_param_section_count = section_count; + sliced_param_terminal->sliced_param_section_offset = + sizeof(ia_css_sliced_param_terminal_manifest_t); + + return 0; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_sliced_param_manifest_section_desc_t * +ia_css_sliced_param_terminal_manifest_get_sliced_prm_sct_desc( + const ia_css_sliced_param_terminal_manifest_t * + sliced_param_terminal_manifest, + const unsigned int section_index) +{ + ia_css_sliced_param_manifest_section_desc_t * + sliced_param_manifest_section_base; + ia_css_sliced_param_manifest_section_desc_t * + sliced_param_manifest_section_desc = NULL; + + verifjmpexit(sliced_param_terminal_manifest != NULL); + + sliced_param_manifest_section_base = + (ia_css_sliced_param_manifest_section_desc_t *) + (((const char *)sliced_param_terminal_manifest) + + sliced_param_terminal_manifest-> + sliced_param_section_offset); + sliced_param_manifest_section_desc = + &(sliced_param_manifest_section_base[section_index]); + +EXIT: + return sliced_param_manifest_section_desc; +} + +/* Program Terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_program_terminal_manifest_get_size( + const unsigned int nof_fragment_param_sections, + const unsigned int nof_kernel_fragment_sequencer_infos) +{ + return sizeof(ia_css_program_terminal_manifest_t) + + nof_fragment_param_sections * + sizeof(ia_css_fragment_param_manifest_section_desc_t) + + nof_kernel_fragment_sequencer_infos * + sizeof(ia_css_kernel_fragment_sequencer_info_manifest_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_program_terminal_manifest_init( + ia_css_program_terminal_manifest_t *program_terminal, + const uint16_t fragment_param_section_count, + const uint16_t kernel_fragment_seq_info_section_count) +{ + if (program_terminal == NULL) { + return -EFAULT; + } + + program_terminal->fragment_param_manifest_section_desc_count = + fragment_param_section_count; + program_terminal->fragment_param_manifest_section_desc_offset = + sizeof(ia_css_program_terminal_manifest_t); + + program_terminal->kernel_fragment_sequencer_info_manifest_info_count = + kernel_fragment_seq_info_section_count; + program_terminal->kernel_fragment_sequencer_info_manifest_info_offset = + sizeof(ia_css_program_terminal_manifest_t) + + fragment_param_section_count*sizeof( + ia_css_fragment_param_manifest_section_desc_t); + + return 0; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_fragment_param_manifest_section_desc_t * +ia_css_program_terminal_manifest_get_frgmnt_prm_sct_desc( + const ia_css_program_terminal_manifest_t *program_terminal_manifest, + const unsigned int section_index) +{ + ia_css_fragment_param_manifest_section_desc_t * + fragment_param_manifest_section_base; + ia_css_fragment_param_manifest_section_desc_t * + fragment_param_manifest_section = NULL; + + verifjmpexit(program_terminal_manifest != NULL); + + fragment_param_manifest_section_base = + (ia_css_fragment_param_manifest_section_desc_t *) + (((const char *)program_terminal_manifest) + + program_terminal_manifest-> + fragment_param_manifest_section_desc_offset); + fragment_param_manifest_section = + &(fragment_param_manifest_section_base[section_index]); + +EXIT: + return fragment_param_manifest_section; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_kernel_fragment_sequencer_info_manifest_desc_t * +ia_css_program_terminal_manifest_get_kernel_frgmnt_seq_info_desc( + const ia_css_program_terminal_manifest_t *program_terminal_manifest, + const unsigned int info_index) +{ + ia_css_kernel_fragment_sequencer_info_manifest_desc_t * + kernel_manifest_fragment_sequencer_info_manifest_desc_base; + ia_css_kernel_fragment_sequencer_info_manifest_desc_t * + kernel_manifest_fragment_sequencer_info_manifest_desc = NULL; + + verifjmpexit(program_terminal_manifest != NULL); + + kernel_manifest_fragment_sequencer_info_manifest_desc_base = + (ia_css_kernel_fragment_sequencer_info_manifest_desc_t *) + (((const char *)program_terminal_manifest) + + program_terminal_manifest-> + kernel_fragment_sequencer_info_manifest_info_offset); + + kernel_manifest_fragment_sequencer_info_manifest_desc = + &(kernel_manifest_fragment_sequencer_info_manifest_desc_base[ + info_index]); + +EXIT: + return kernel_manifest_fragment_sequencer_info_manifest_desc; +} + +#endif /* __IA_CSS_TERMINAL_MANIFEST_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/vied_parameters.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/vied_parameters.mk new file mode 100644 index 0000000000000..834a1a4b2bab6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/vied_parameters.mk @@ -0,0 +1,76 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is VIED_PARAMETERS + +VIED_PARAMETERS_DIR=$${MODULES_DIR}/vied_parameters + +VIED_PARAMETERS_INTERFACE=$(VIED_PARAMETERS_DIR)/interface +VIED_PARAMETERS_SOURCES=$(VIED_PARAMETERS_DIR)/src +VIED_PARAMETERS_EXTINCLUDE = $${MODULES_DIR}/support + +VIED_PARAMETERS_DYNAMIC_HOST_FILES += $(VIED_PARAMETERS_SOURCES)/ia_css_terminal.c +VIED_PARAMETERS_STATIC_HOST_FILES += $(VIED_PARAMETERS_SOURCES)/ia_css_terminal_manifest.c + +VIED_PARAMETERS_HOST_FILES = $(VIED_PARAMETERS_DYNAMIC_HOST_FILES) +VIED_PARAMETERS_HOST_FILES += $(VIED_PARAMETERS_STATIC_HOST_FILES) + +VIED_PARAMETERS_ISA_CLIENT_HOST_FILES = $(VIED_PARAMETERS_SOURCES)/ia_css_isys_process_group.c +VIED_PARAMETERS_ISA_CLIENT_HOST_FILES += $(VIED_PARAMETERS_DIR)/client/ia_css_isys_parameter_client.c + +VIED_PARAMETERS_DYNAMIC_FW_FILES += $(VIED_PARAMETERS_SOURCES)/ia_css_terminal.c +VIED_PARAMETERS_STATIC_FW_FILES += $(VIED_PARAMETERS_SOURCES)/ia_css_terminal_manifest.c + +VIED_PARAMETERS_FW_FILES = $(VIED_PARAMETERS_DYNAMIC_HOST_FILES) +VIED_PARAMETERS_FW_FILES += $(VIED_PARAMETERS_STATIC_HOST_FILES) +VIED_PARAMETERS_SUPPORT_CPPFLAGS = -I$(VIED_PARAMETERS_DIR)/support +VIED_PARAMETERS_SUPPORT_CPPFLAGS += -I$(VIED_PARAMETERS_DIR)/support/$(IPU_SYSVER) +VIED_PARAMETERS_ISA_CLIENT_HOST_CPPFLAGS = -I$(VIED_PARAMETERS_DIR)/client +VIED_PARAMETERS_PSA_UTILS_HOST_FILES = $(MODULES_DIR)/vied_parameters/support/ia_css_psys_parameter_utils.c +VIED_PARAMETERS_PSA_UTILS_HOST_FILES += $(MODULES_DIR)/vied_parameters/support/$(IPU_SYSVER)/ia_css_psys_parameter_utils_dep.c + +VIED_PARAMETERS_UTILS_HOST_CPPFLAGS = $(VIED_PARAMETERS_SUPPORT_CPPFLAGS) + +VIED_PARAMETERS_ISA_UTILS_HOST_FILES = $(MODULES_DIR)/vied_parameters/support/ia_css_isys_parameter_utils.c +VIED_PARAMETERS_ISA_UTILS_HOST_FILES += $(MODULES_DIR)/vied_parameters/support/$(IPU_SYSVER)/ia_css_isys_parameter_utils_dep.c + +VIED_PARAMETERS_PRINT_CPPFLAGS += -I$(VIED_PARAMETERS_DIR)/print/interface +VIED_PARAMETERS_PRINT_FILES += $(VIED_PARAMETERS_DIR)/print/src/ia_css_terminal_print.c + +# VIED_PARAMETERS Trace Log Level = VIED_PARAMETERS_TRACE_LOG_LEVEL_NORMAL +# Other options are [VIED_PARAMETERS_TRACE_LOG_LEVEL_OFF, VIED_PARAMETERS_TRACE_LOG_LEVEL_DEBUG] +ifndef VIED_PARAMETERS_TRACE_CONFIG_HOST + VIED_PARAMETERS_TRACE_CONFIG_HOST=VIED_PARAMETERS_TRACE_LOG_LEVEL_NORMAL +endif +ifndef VIED_PARAMETERS_TRACE_CONFIG_FW + VIED_PARAMETERS_TRACE_CONFIG_FW=VIED_PARAMETERS_TRACE_LOG_LEVEL_NORMAL +endif + +VIED_PARAMETERS_HOST_CPPFLAGS += -DVIED_PARAMETERS_TRACE_CONFIG=$(VIED_PARAMETERS_TRACE_CONFIG_HOST) +VIED_PARAMETERS_FW_CPPFLAGS += -DVIED_PARAMETERS_TRACE_CONFIG=$(VIED_PARAMETERS_TRACE_CONFIG_FW) + +VIED_PARAMETERS_HOST_CPPFLAGS += -I$(VIED_PARAMETERS_INTERFACE) +VIED_PARAMETERS_HOST_CPPFLAGS += -I$(VIED_PARAMETERS_SOURCES) +VIED_PARAMETERS_HOST_CPPFLAGS += -I$(VIED_PARAMETERS_EXTINCLUDE) +VIED_PARAMETERS_HOST_CPPFLAGS += $(VIED_PARAMETERS_SUPPORT_CPPFLAGS) +VIED_PARAMETERS_FW_CPPFLAGS += -I$(VIED_PARAMETERS_INTERFACE) +VIED_PARAMETERS_FW_CPPFLAGS += -I$(VIED_PARAMETERS_SOURCES) +VIED_PARAMETERS_FW_CPPFLAGS += -I$(VIED_PARAMETERS_EXTINCLUDE) +VIED_PARAMETERS_FW_CPPFLAGS += $(VIED_PARAMETERS_SUPPORT_CPPFLAGS) + +#For IPU interface +include $(MODULES_DIR)/fw_abi_common_types/cpu/fw_abi_cpu_types.mk +VIED_PARAMETERS_HOST_CPPFLAGS += $(FW_ABI_COMMON_TYPES_HOST_CPPFLAGS) + +VIED_PARAMETERS_FW_CPPFLAGS += $(FW_ABI_COMMON_TYPES_FW_CPPFLAGS) diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/libcsspsys2600.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/libcsspsys2600.c new file mode 100644 index 0000000000000..805d1542c1632 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/libcsspsys2600.c @@ -0,0 +1,488 @@ +/* + * Copyright (c) 2015--2018 Intel Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#include + +#include "ipu.h" +#include "ipu-mmu.h" +#include "ipu-psys.h" +#include "ipu-wrapper.h" +#include "ipu-fw-psys.h" +#include "libcsspsys2600.h" + +#include +#include +#include +#include +#include + +int ipu_fw_psys_pg_start(struct ipu_psys_kcmd *kcmd) +{ + return -ia_css_process_group_start((ia_css_process_group_t *) + kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_start); + +int ipu_fw_psys_pg_disown(struct ipu_psys_kcmd *kcmd) +{ + return -ia_css_process_group_disown((ia_css_process_group_t *) + kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_disown); + +int ipu_fw_psys_pg_abort(struct ipu_psys_kcmd *kcmd) +{ + int rval; + + rval = ia_css_process_group_stop((ia_css_process_group_t *) + kcmd->kpg->pg); + if (rval) { + dev_err(&kcmd->fh->psys->adev->dev, + "failed to abort kcmd!\n"); + kcmd->pg_user = NULL; + rval = -EIO; + /* TODO: need to reset PSYS by power cycling it */ + } + return rval; +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_abort); + +int ipu_fw_psys_pg_submit(struct ipu_psys_kcmd *kcmd) +{ + return -ia_css_process_group_submit((ia_css_process_group_t *) + kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_submit); + +static void *syscom_buffer; +static struct ia_css_syscom_config *syscom_config; +static struct ia_css_psys_server_init *server_init; + +int ipu_fw_psys_rcv_event(struct ipu_psys *psys, + struct ipu_fw_psys_event *event) +{ + return ia_css_psys_event_queue_receive(psys_syscom, + IA_CSS_PSYS_EVENT_QUEUE_MAIN_ID, + (struct ia_css_psys_event_s *)event); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_rcv_event); + +int ipu_fw_psys_terminal_set(struct ipu_fw_psys_terminal *terminal, + int terminal_idx, + struct ipu_psys_kcmd *kcmd, + u32 buffer, + unsigned size) +{ + ia_css_terminal_type_t type; + u32 buffer_state; + + type = ia_css_terminal_get_type((ia_css_terminal_t *)terminal); + + switch (type) { + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT: + case IA_CSS_TERMINAL_TYPE_PROGRAM: + buffer_state = IA_CSS_BUFFER_UNDEFINED; + break; + case IA_CSS_TERMINAL_TYPE_PARAM_STREAM: + case IA_CSS_TERMINAL_TYPE_DATA_IN: + case IA_CSS_TERMINAL_TYPE_STATE_IN: + buffer_state = IA_CSS_BUFFER_FULL; + break; + case IA_CSS_TERMINAL_TYPE_DATA_OUT: + case IA_CSS_TERMINAL_TYPE_STATE_OUT: + buffer_state = IA_CSS_BUFFER_EMPTY; + break; + default: + dev_err(&kcmd->fh->psys->adev->dev, + "unknown terminal type: 0x%x\n", type); + return -EAGAIN; + } + + if (type == IA_CSS_TERMINAL_TYPE_DATA_IN || + type == IA_CSS_TERMINAL_TYPE_DATA_OUT) { + ia_css_frame_t *frame; + + if (ia_css_data_terminal_set_connection_type( + (ia_css_data_terminal_t *)terminal, + IA_CSS_CONNECTION_MEMORY)) + return -EIO; + frame = ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + if (!frame) + return -EIO; + + if (ia_css_frame_set_data_bytes(frame, size)) + return -EIO; + } + + return -ia_css_process_group_attach_buffer( + (ia_css_process_group_t *)kcmd->kpg->pg, buffer, + buffer_state, terminal_idx); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_terminal_set); + +void ipu_fw_psys_pg_dump(struct ipu_psys *psys, + struct ipu_psys_kcmd *kcmd, + const char *note) +{ + ia_css_process_group_t *pg = (ia_css_process_group_t *)kcmd->kpg->pg; + ia_css_program_group_ID_t pgid = + ia_css_process_group_get_program_group_ID(pg); + uint8_t processes = ia_css_process_group_get_process_count( + (ia_css_process_group_t *)kcmd->kpg->pg); + unsigned int p, chn, mem; + + dev_dbg(&psys->adev->dev, "%s %s pgid %i has %i processes\n", + __func__, note, pgid, processes); + for (p = 0; p < processes; p++) { + ia_css_process_t *process = + ia_css_process_group_get_process(pg, p); + int cell = ia_css_process_get_cell(process); + dev_dbg(&psys->adev->dev, + "%s pgid %i process %i cell %i cell_bitmap = 0x%x size = %zu\n", + __func__, pgid, p, + cell, + ia_css_process_get_cells_bitmap(process), + ia_css_process_get_size(process)); + dev_dbg(&psys->adev->dev, + "%s pgid %i process %i kernel bitmap 0x%llx \n", + __func__, pgid, p, + ia_css_process_get_kernel_bitmap(process)); + for (mem = 0; mem < VIED_NCI_N_DATA_MEM_TYPE_ID; mem++ ) { + unsigned int mem_id = process->ext_mem_id[mem]; + dev_dbg(&psys->adev->dev, + "%s pgid %i process %i index %u type %d id %d offset 0x%x \n", + __func__, pgid, p, mem, + vied_nci_cell_get_mem_type(cell, mem), + mem_id, process->ext_mem_offset[mem]); + } + for (chn = 0; chn < VIED_NCI_N_DEV_CHN_ID; chn++ ) { + dev_dbg(&psys->adev->dev, + "%s pgid %i process %i dev_chn[%u] = %i\n", + __func__, pgid, p, chn, + ia_css_process_get_dev_chn(process, chn)); + } + } +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_dump); + +int ipu_fw_psys_pg_get_id(struct ipu_psys_kcmd *kcmd) +{ + return ia_css_process_group_get_program_group_ID( + (ia_css_process_group_t *)kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_get_id); + +int ipu_fw_psys_pg_get_terminal_count(struct ipu_psys_kcmd *kcmd) +{ + return ia_css_process_group_get_terminal_count( + (ia_css_process_group_t *)kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_get_terminal_count); + +int ipu_fw_psys_pg_get_size(struct ipu_psys_kcmd *kcmd) +{ + return ia_css_process_group_get_size((ia_css_process_group_t *) + kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_get_size); + +int ipu_fw_psys_pg_set_ipu_vaddress(struct ipu_psys_kcmd *kcmd, + dma_addr_t vaddress) +{ + return ia_css_process_group_set_ipu_vaddress((ia_css_process_group_t *) + kcmd->kpg->pg, vaddress); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_set_ipu_vaddress); + +int ipu_fw_psys_pg_load_cycles(struct ipu_psys_kcmd *kcmd) +{ + return ia_css_process_group_get_pg_load_cycles( + (ia_css_process_group_t *)kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_load_cycles); + +int ipu_fw_psys_pg_init_cycles(struct ipu_psys_kcmd *kcmd) +{ + return ia_css_process_group_get_pg_init_cycles( + (ia_css_process_group_t *)kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_init_cycles); + +int ipu_fw_psys_pg_processing_cycles(struct ipu_psys_kcmd *kcmd) +{ + return ia_css_process_group_get_pg_processing_cycles( + (ia_css_process_group_t *)kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_processing_cycles); + +struct ipu_fw_psys_terminal * +ipu_fw_psys_pg_get_terminal(struct ipu_psys_kcmd *kcmd, int index) +{ + return (struct ipu_fw_psys_terminal *)ia_css_process_group_get_terminal( + (ia_css_process_group_t *)kcmd->kpg->pg, index); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_get_terminal); + +void ipu_fw_psys_pg_set_token(struct ipu_psys_kcmd *kcmd, u64 token) +{ + ia_css_process_group_set_token((ia_css_process_group_t *)kcmd->kpg->pg, + token); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_set_token); + +int ipu_fw_psys_pg_get_protocol( + struct ipu_psys_kcmd *kcmd) +{ + return ia_css_process_group_get_protocol_version( + (ia_css_process_group_t *)kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_get_protocol); + +static int libcsspsys2600_init(void); +int ipu_fw_psys_open(struct ipu_psys *psys) +{ + bool opened; + int retry = IPU_PSYS_OPEN_RETRY; + + ipu_wrapper_init(PSYS_MMID, &psys->adev->dev, + psys->pdata->base); + /* When fw psys open, make sure csslib init first */ + libcsspsys2600_init(); + + server_init->icache_prefetch_sp = psys->icache_prefetch_sp; + server_init->icache_prefetch_isp = psys->icache_prefetch_isp; + + psys_syscom = ia_css_psys_open(syscom_buffer, syscom_config); + if (!psys_syscom) { + dev_err(&psys->adev->dev, + "psys library open failed\n"); + return -ENODEV; + } + do { + opened = ia_css_psys_open_is_ready(psys_syscom); + if (opened) + break; + usleep_range(IPU_PSYS_OPEN_TIMEOUT_US, + IPU_PSYS_OPEN_TIMEOUT_US + 10); + retry--; + } while (retry > 0); + + if (!retry && !opened) { + dev_err(&psys->adev->dev, + "psys library open ready failed\n"); + ia_css_psys_close(psys_syscom); + ia_css_psys_release(psys_syscom, 1); + psys_syscom = NULL; + return -ENODEV; + } + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_open); + +int ipu_fw_psys_close(struct ipu_psys *psys) +{ + int rval; + unsigned int retry = IPU_PSYS_CLOSE_TIMEOUT; + + if (!psys_syscom) + return 0; + + if (ia_css_psys_close(psys_syscom)) { + dev_err(&psys->adev->dev, + "psys library close ready failed\n"); + return 0; + } + + do { + rval = ia_css_psys_release(psys_syscom, 0); + if (rval && rval != -EBUSY) { + dev_dbg(&psys->adev->dev, "psys library release failed\n"); + break; + } + usleep_range(IPU_PSYS_CLOSE_TIMEOUT_US, + IPU_PSYS_CLOSE_TIMEOUT_US + 10); + } while (rval && --retry); + + psys_syscom = NULL; + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_close); + +u64 ipu_fw_psys_pg_get_token(struct ipu_psys_kcmd *kcmd) +{ + return 0; +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_get_token); + +static const struct ipu_fw_resource_definitions default_defs = { + .cells = vied_nci_cell_type, + .num_cells = VIED_NCI_N_CELL_ID, + .num_cells_type = VIED_NCI_N_CELL_TYPE_ID, + .dev_channels = vied_nci_dev_chn_size, + .num_dev_channels = VIED_NCI_N_DEV_CHN_ID, + + .num_ext_mem_types = VIED_NCI_N_DATA_MEM_TYPE_ID, + .num_ext_mem_ids = VIED_NCI_N_MEM_ID, + .ext_mem_ids = vied_nci_mem_size, + + .cell_mem_row = VIED_NCI_N_MEM_TYPE_ID, + .cell_mem = (enum ipu_mem_id *)vied_nci_cell_mem, +}; + +const struct ipu_fw_resource_definitions *res_defs = &default_defs; +EXPORT_SYMBOL_GPL(res_defs); + +int ipu_fw_psys_set_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index, + u8 value) +{ + return ia_css_process_set_cell((ia_css_process_t *)ptr, + (vied_nci_cell_ID_t)value); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_set_process_cell_id); + +u8 ipu_fw_psys_get_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index) +{ + return ia_css_process_get_cell((ia_css_process_t *)ptr); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_get_process_cell_id); + +int ipu_fw_psys_clear_process_cell(struct ipu_fw_psys_process *ptr) +{ + return ia_css_process_clear_cell((ia_css_process_t *)ptr); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_clear_process_cell); + +int ipu_fw_psys_set_process_dev_chn_offset(struct ipu_fw_psys_process *ptr, + u16 offset, u16 value) +{ + return ia_css_process_set_dev_chn((ia_css_process_t *)ptr, + (vied_nci_dev_chn_ID_t)offset, + (vied_nci_resource_size_t)value); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_set_process_dev_chn_offset); + +int ipu_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr, + u16 type_id, u16 mem_id, u16 offset) +{ + return ia_css_process_set_ext_mem((ia_css_process_t *)ptr, mem_id, offset); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_set_process_ext_mem); + +int ipu_fw_psys_get_program_manifest_by_process( + struct ipu_fw_generic_program_manifest *gen_pm, + const struct ipu_fw_psys_program_group_manifest *pg_manifest, + struct ipu_fw_psys_process *process) +{ + ia_css_program_ID_t process_id = + ia_css_process_get_program_ID( + (const ia_css_process_t *)process); + int programs = + ia_css_program_group_manifest_get_program_count( + (const ia_css_program_group_manifest_t *)pg_manifest); + int i; + + for (i = 0; i < programs; i++) { + ia_css_program_ID_t program_id; + ia_css_program_manifest_t *pm = + ia_css_program_group_manifest_get_prgrm_mnfst( + (const ia_css_program_group_manifest_t *) + pg_manifest, i); + if (!pm) + continue; + program_id = ia_css_program_manifest_get_program_ID(pm); + if (program_id == process_id) { + gen_pm->dev_chn_size = (u16 *)pm->dev_chn_size; + gen_pm->ext_mem_size = (u16 *)pm->ext_mem_size; + gen_pm->cell_id = pm->cell_id; + gen_pm->cell_type_id = pm->cell_type_id; + return 0; + } + } + return -ENOENT; +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_get_program_manifest_by_process); + +static int libcsspsys2600_init(void) +{ + int rval; + static bool csslib_init; + + if (csslib_init) + return 0; + + syscom_buffer = kzalloc(ia_css_sizeof_psys(NULL), GFP_KERNEL); + if (!syscom_buffer) + return -ENOMEM; + + syscom_config = kzalloc(sizeof(struct ia_css_syscom_config), + GFP_KERNEL); + if (!syscom_config) { + rval = -ENOMEM; + goto out_syscom_buffer_free; + } + + server_init = kzalloc(sizeof(struct ia_css_psys_server_init), + GFP_KERNEL); + if (!server_init) { + rval = -ENOMEM; + goto out_syscom_config_free; + } + + server_init->ddr_pkg_dir_address = 0; + server_init->host_ddr_pkg_dir = 0; + server_init->pkg_dir_size = 0; + + *syscom_config = *ia_css_psys_specify(); + syscom_config->specific_addr = server_init; + syscom_config->specific_size = sizeof(struct ia_css_psys_server_init); + syscom_config->ssid = PSYS_SSID; + syscom_config->mmid = PSYS_MMID; + syscom_config->regs_addr = ipu_device_cell_memory_address(SPC0, + IPU_DEVICE_SP2600_CONTROL_REGS); + syscom_config->dmem_addr = ipu_device_cell_memory_address(SPC0, + IPU_DEVICE_SP2600_CONTROL_DMEM); + csslib_init = true; + + return 0; + +out_syscom_config_free: + kfree(syscom_config); +out_syscom_buffer_free: + kfree(syscom_buffer); + + return rval; +} + +static void __exit libcsspsys2600_exit(void) +{ + kfree(syscom_buffer); + kfree(syscom_config); + kfree(server_init); +} + +module_init(libcsspsys2600_init); +module_exit(libcsspsys2600_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel ipu psys css library"); diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/libcsspsys2600.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/libcsspsys2600.h new file mode 100644 index 0000000000000..b8d790f561805 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/libcsspsys2600.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2015--2018 Intel Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef LIBCSSPSYS2600_H +#define LIBCSSPSYS2600_H + +#include +#include +#include +#include +#include +#include +#include + +extern struct ia_css_syscom_context *psys_syscom; +#endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/libintel-ipu4p.c b/drivers/media/pci/intel/ipu4/ipu4p-css/libintel-ipu4p.c new file mode 100644 index 0000000000000..3704754be71c0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/libintel-ipu4p.c @@ -0,0 +1,395 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2014 - 2018 Intel Corporation + +#include +#include +#include +#include "ipu-isys.h" +#include "ipu-wrapper.h" +#include + +#include "ipu-platform.h" + +#define ipu_lib_call_notrace_unlocked(func, isys, ...) \ + ({ \ + int rval; \ + \ + rval = -ia_css_isys_##func((isys)->fwcom, ##__VA_ARGS__); \ + \ + rval; \ + }) + +#define ipu_lib_call_notrace(func, isys, ...) \ + ({ \ + int rval; \ + \ + mutex_lock(&(isys)->lib_mutex); \ + \ + rval = ipu_lib_call_notrace_unlocked( \ + func, isys, ##__VA_ARGS__); \ + \ + mutex_unlock(&(isys)->lib_mutex); \ + \ + rval; \ + }) + +#define ipu_lib_call(func, isys, ...) \ + ({ \ + int rval; \ + dev_dbg(&(isys)->adev->dev, "hostlib: libcall %s\n", #func); \ + rval = ipu_lib_call_notrace(func, isys, ##__VA_ARGS__); \ + \ + rval; \ + }) + +static int wrapper_init_done; + +int ipu_fw_isys_close(struct ipu_isys *isys) +{ + struct device *dev = &isys->adev->dev; + int timeout = IPU_ISYS_TURNOFF_TIMEOUT; + int rval; + unsigned long flags; + + /* + * Ask library to stop the isys fw. Actual close takes + * some time as the FW must stop its actions including code fetch + * to SP icache. + */ + mutex_lock(&isys->lib_mutex); + spin_lock_irqsave(&isys->power_lock, flags); + rval = ipu_lib_call_notrace_unlocked(device_close, isys); + spin_unlock_irqrestore(&isys->power_lock, flags); + mutex_unlock(&isys->lib_mutex); + if (rval) + dev_err(dev, "Device close failure: %d\n", rval); + + /* release probably fails if the close failed. Let's try still */ + do { + usleep_range(IPU_ISYS_TURNOFF_DELAY_US, + 2 * IPU_ISYS_TURNOFF_DELAY_US); + rval = ipu_lib_call_notrace(device_release, isys, 0); + timeout--; + } while (rval != 0 && timeout); + + /* Spin lock to wait the interrupt handler to be finished */ + spin_lock_irqsave(&isys->power_lock, flags); + if (!rval) + isys->fwcom = NULL; /* No further actions needed */ + else + dev_err(dev, "Device release time out %d\n", rval); + spin_unlock_irqrestore(&isys->power_lock, flags); + return rval; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_close); + +int ipu_fw_isys_init(struct ipu_isys *isys, + unsigned int num_streams) +{ + int retry = IPU_ISYS_OPEN_RETRY; + unsigned int i; + + struct ia_css_isys_device_cfg_data isys_cfg = { + .driver_sys = { + .ssid = ISYS_SSID, + .mmid = ISYS_MMID, + .num_send_queues = clamp_t( + unsigned int, num_streams, 1, + IPU_ISYS_NUM_STREAMS), + .num_recv_queues = IPU_ISYS_NUM_RECV_QUEUE, + .send_queue_size = IPU_ISYS_SIZE_SEND_QUEUE, + .recv_queue_size = IPU_ISYS_SIZE_RECV_QUEUE, + .icache_prefetch = isys->icache_prefetch, + }, + }; + struct device *dev = &isys->adev->dev; + int rval; + + if (!wrapper_init_done) { + wrapper_init_done = true; + ipu_wrapper_init(ISYS_MMID, &isys->adev->dev, + isys->pdata->base); + } + + /* + * SRAM partitioning. Initially equal partitioning is set + * TODO: Fine tune the partitining based on the stream pixel load + */ + for (i = 0; i < min(IPU_NOF_SRAM_BLOCKS_MAX, + NOF_SRAM_BLOCKS_MAX); i++) { + if (i < isys_cfg.driver_sys.num_send_queues) + isys_cfg.buffer_partition.num_gda_pages[i] = + (IPU_DEVICE_GDA_NR_PAGES * + IPU_DEVICE_GDA_VIRT_FACTOR) / + isys_cfg.driver_sys.num_send_queues; + else + isys_cfg.buffer_partition.num_gda_pages[i] = 0; + } + + rval = -ia_css_isys_device_open(&isys->fwcom, &isys_cfg); + if (rval < 0) { + dev_err(dev, "isys device open failed %d\n", rval); + return rval; + } + + do { + usleep_range(IPU_ISYS_OPEN_TIMEOUT_US, + IPU_ISYS_OPEN_TIMEOUT_US + 10); + rval = ipu_lib_call(device_open_ready, isys); + if (!rval) + break; + retry--; + } while (retry > 0); + + if (!retry && rval) { + dev_err(dev, "isys device open ready failed %d\n", rval); + ipu_fw_isys_close(isys); + } + + return rval; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_init); + +void ipu_fw_isys_cleanup(struct ipu_isys *isys) +{ + ipu_lib_call(device_release, isys, 1); + isys->fwcom = NULL; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_cleanup); + +struct ipu_fw_isys_resp_info_abi *ipu_fw_isys_get_resp( + void *context, unsigned int queue, + struct ipu_fw_isys_resp_info_abi *response) +{ + struct ia_css_isys_resp_info apiresp; + int rval; + + rval = -ia_css_isys_stream_handle_response(context, &apiresp); + if (rval < 0) + return NULL; + + response->buf_id = 0; + response->type = apiresp.type; + response->timestamp[0] = apiresp.timestamp[0]; + response->timestamp[1] = apiresp.timestamp[1]; + response->stream_handle = apiresp.stream_handle; + response->error_info.error = apiresp.error; + response->error_info.error_details = apiresp.error_details; + response->pin.out_buf_id = apiresp.pin.out_buf_id; + response->pin.addr = apiresp.pin.addr; + response->pin_id = apiresp.pin_id; + response->process_group_light.param_buf_id = + apiresp.process_group_light.param_buf_id; + response->process_group_light.addr = + apiresp.process_group_light.addr; + response->acc_id = apiresp.acc_id; +#ifdef IPU_OTF_SUPPORT + response->frame_counter = apiresp.frame_counter; + response->written_direct = apiresp.written_direct; +#endif + + return response; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_get_resp); + +void ipu_fw_isys_put_resp(void *context, unsigned int queue) +{ + /* Nothing to do here really */ +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_put_resp); + +int ipu_fw_isys_simple_cmd(struct ipu_isys *isys, + const unsigned int stream_handle, + enum ipu_fw_isys_send_type send_type) +{ + int rval = -1; + + switch (send_type) { + case IPU_FW_ISYS_SEND_TYPE_STREAM_START: + rval = ipu_lib_call(stream_start, isys, stream_handle, NULL); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_FLUSH: + rval = ipu_lib_call(stream_flush, isys, stream_handle); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_STOP: + rval = ipu_lib_call(stream_stop, isys, stream_handle); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_CLOSE: + rval = ipu_lib_call(stream_close, isys, stream_handle); + break; + default: + WARN_ON(1); + } + return rval; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_simple_cmd); + +static void resolution_abi_to_api(const struct ipu_fw_isys_resolution_abi *abi, + struct ia_css_isys_resolution *api) +{ + api->width = abi->width; + api->height = abi->height; +} + +static void output_pin_payload_abi_to_api( + struct ipu_fw_isys_output_pin_payload_abi *abi, + struct ia_css_isys_output_pin_payload *api) +{ + api->out_buf_id = abi->out_buf_id; + api->addr = abi->addr; +} + +static void output_pin_info_abi_to_api( + struct ipu_fw_isys_output_pin_info_abi *abi, + struct ia_css_isys_output_pin_info *api) +{ + api->input_pin_id = abi->input_pin_id; + resolution_abi_to_api(&abi->output_res, &api->output_res); + api->stride = abi->stride; + api->pt = abi->pt; + api->watermark_in_lines = abi->watermark_in_lines; + api->payload_buf_size = abi->payload_buf_size; + api->send_irq = abi->send_irq; + api->ft = abi->ft; +#ifdef IPU_OTF_SUPPORT + api->link_id = abi->link_id; +#endif + api->reserve_compression = abi->reserve_compression; +} + +static void param_pin_abi_to_api(struct ipu_fw_isys_param_pin_abi *abi, + struct ia_css_isys_param_pin *api) +{ + api->param_buf_id = abi->param_buf_id; + api->addr = abi->addr; +} + +static void input_pin_info_abi_to_api( + struct ipu_fw_isys_input_pin_info_abi *abi, + struct ia_css_isys_input_pin_info *api) +{ + resolution_abi_to_api(&abi->input_res, &api->input_res); + api->dt = abi->dt; + api->mipi_store_mode = abi->mipi_store_mode; + api->mapped_dt = abi->mapped_dt; +} + +static void isa_cfg_abi_to_api(const struct ipu_fw_isys_isa_cfg_abi *abi, + struct ia_css_isys_isa_cfg *api) +{ + unsigned int i; + + for (i = 0; i < min(N_IPU_FW_ISYS_RESOLUTION_INFO, + N_IA_CSS_ISYS_RESOLUTION_INFO); i++) + resolution_abi_to_api(&abi->isa_res[i], &api->isa_res[i]); + + api->blc_enabled = abi->cfg.blc; + api->lsc_enabled = abi->cfg.lsc; + api->dpc_enabled = abi->cfg.dpc; + api->downscaler_enabled = abi->cfg.downscaler; + api->awb_enabled = abi->cfg.awb; + api->af_enabled = abi->cfg.af; + api->ae_enabled = abi->cfg.ae; + api->paf_type = abi->cfg.paf; + api->send_irq_stats_ready = abi->cfg.send_irq_stats_ready; + api->send_resp_stats_ready = abi->cfg.send_irq_stats_ready; +} + +static void cropping_abi_to_api(struct ipu_fw_isys_cropping_abi *abi, + struct ia_css_isys_cropping *api) +{ + api->top_offset = abi->top_offset; + api->left_offset = abi->left_offset; + api->bottom_offset = abi->bottom_offset; + api->right_offset = abi->right_offset; +} + +static void stream_cfg_abi_to_api(struct ipu_fw_isys_stream_cfg_data_abi *abi, + struct ia_css_isys_stream_cfg_data *api) +{ + unsigned int i; + + api->src = abi->src; + api->vc = abi->vc; + api->isl_use = abi->isl_use; + api->compfmt = abi->compfmt; + isa_cfg_abi_to_api(&abi->isa_cfg, &api->isa_cfg); + for (i = 0; i < min(N_IPU_FW_ISYS_CROPPING_LOCATION, + N_IA_CSS_ISYS_CROPPING_LOCATION); i++) + cropping_abi_to_api(&abi->crop[i], &api->crop[i]); + + api->send_irq_sof_discarded = abi->send_irq_sof_discarded; + api->send_irq_eof_discarded = abi->send_irq_eof_discarded; + api->send_resp_sof_discarded = abi->send_irq_sof_discarded; + api->send_resp_eof_discarded = abi->send_irq_eof_discarded; + api->nof_input_pins = abi->nof_input_pins; + api->nof_output_pins = abi->nof_output_pins; + for (i = 0; i < abi->nof_input_pins; i++) + input_pin_info_abi_to_api(&abi->input_pins[i], + &api->input_pins[i]); + + for (i = 0; i < abi->nof_output_pins; i++) + output_pin_info_abi_to_api(&abi->output_pins[i], + &api->output_pins[i]); +} + +static void frame_buff_set_abi_to_api( + struct ipu_fw_isys_frame_buff_set_abi *abi, + struct ia_css_isys_frame_buff_set *api) +{ + int i; + + for (i = 0; i < min(IPU_MAX_OPINS, MAX_OPINS); i++) + output_pin_payload_abi_to_api(&abi->output_pins[i], + &api->output_pins[i]); + + param_pin_abi_to_api(&abi->process_group_light, + &api->process_group_light); + + api->send_irq_sof = abi->send_irq_sof; + api->send_irq_eof = abi->send_irq_eof; + api->send_irq_capture_ack = abi->send_irq_capture_ack; + api->send_irq_capture_done = abi->send_irq_capture_done; +} + +int ipu_fw_isys_complex_cmd(struct ipu_isys *isys, + const unsigned int stream_handle, + void *cpu_mapped_buf, + dma_addr_t dma_mapped_buf, + size_t size, + enum ipu_fw_isys_send_type send_type) +{ + union { + struct ia_css_isys_stream_cfg_data stream_cfg; + struct ia_css_isys_frame_buff_set buf; + } param; + int rval = -1; + + memset(¶m, 0, sizeof(param)); + + switch (send_type) { + case IPU_FW_ISYS_SEND_TYPE_STREAM_CAPTURE: + frame_buff_set_abi_to_api(cpu_mapped_buf, ¶m.buf); + rval = ipu_lib_call(stream_capture_indication, + isys, stream_handle, ¶m.buf); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_OPEN: + stream_cfg_abi_to_api(cpu_mapped_buf, ¶m.stream_cfg); + rval = ipu_lib_call(stream_open, isys, stream_handle, + ¶m.stream_cfg); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE: + frame_buff_set_abi_to_api(cpu_mapped_buf, ¶m.buf); + rval = ipu_lib_call(stream_start, isys, stream_handle, + ¶m.buf); + break; + default: + WARN_ON(1); + } + + return rval; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_complex_cmd); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel ipu library"); diff --git a/drivers/media/pci/intel/ipu4/ipu4p-isys-csi2.c b/drivers/media/pci/intel/ipu4/ipu4p-isys-csi2.c new file mode 100644 index 0000000000000..580a90835bbb7 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-isys-csi2.c @@ -0,0 +1,426 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Intel Corporation + +#include "ipu.h" +#include "ipu-buttress.h" +#include "ipu-isys.h" +#include "ipu-isys-csi2.h" +#include "ipu-platform-isys-csi2-reg.h" +#include "ipu-platform-regs.h" +#include "ipu-trace.h" +#include "ipu-isys-csi2.h" + +#define CSI2_UPDATE_TIME_TRY_NUM 3 +#define CSI2_UPDATE_TIME_MAX_DIFF 20 + +static int ipu4p_csi2_ev_correction_params(struct ipu_isys_csi2 + *csi2, unsigned int lanes) +{ + /* + * TBD: add implementation for ipu4p + * probably re-use ipu4 implementation + */ + return 0; +} + +static void ipu4p_isys_register_errors(struct ipu_isys_csi2 *csi2) +{ + u32 status; + unsigned int index; + struct ipu_isys *isys = csi2->isys; + void __iomem *isys_base = isys->pdata->base; + + index = csi2->index; + status = readl(isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL0_BASE(index) + 0x8); + writel(status, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL0_BASE(index) + 0xc); + + status &= 0xffff; + dev_dbg(&isys->adev->dev, "csi %d rxsync status 0x%x", index, status); + csi2->receiver_errors |= status; +} + +void ipu_isys_csi2_error(struct ipu_isys_csi2 *csi2) +{ + /* + * Strings corresponding to CSI-2 receiver errors are here. + * Corresponding macros are defined in the header file. + */ + static const struct ipu_isys_csi2_error { + const char *error_string; + bool is_info_only; + } errors[] = { + {"Single packet header error corrected", true}, + {"Multiple packet header errors detected", true}, + {"Payload checksum (CRC) error", true}, + {"FIFO overflow", false}, + {"Reserved short packet data type detected", true}, + {"Reserved long packet data type detected", true}, + {"Incomplete long packet detected", false}, + {"Frame sync error", false}, + {"Line sync error", false}, + {"DPHY recoverable synchronization error", true}, + {"DPHY non-recoverable synchronization error", false}, + {"Escape mode error", true}, + {"Escape mode trigger event", true}, + {"Escape mode ultra-low power state for data lane(s)", true}, + {"Escape mode ultra-low power state exit for clock lane", true}, + {"Inter-frame short packet discarded", true}, + {"Inter-frame long packet discarded", true}, + }; + u32 status; + unsigned int i; + + /* Register errors once more in case of error interrupts are disabled */ + ipu4p_isys_register_errors(csi2); + status = csi2->receiver_errors; + csi2->receiver_errors = 0; + + for (i = 0; i < ARRAY_SIZE(errors); i++) { + if (status & BIT(i)) { + if (errors[i].is_info_only) + dev_dbg(&csi2->isys->adev->dev, + "csi2-%i info: %s\n", + csi2->index, errors[i].error_string); + else + dev_err_ratelimited(&csi2->isys->adev->dev, + "csi2-%i error: %s\n", + csi2->index, + errors[i].error_string); + } + } +} + +int ipu_isys_csi2_set_stream(struct v4l2_subdev *sd, + struct ipu_isys_csi2_timing timing, + unsigned int nlanes, int enable) +{ + struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd); + struct ipu_isys *isys = csi2->isys; + void __iomem *isys_base = isys->pdata->base; + unsigned int i; + u32 val, csi2part = 0; + + dev_dbg(&csi2->isys->adev->dev, "csi2 s_stream %d\n", enable); + if (!enable) { + ipu_isys_csi2_error(csi2); + + val = readl(csi2->base + CSI2_REG_CSI_RX_CONFIG); + val &= ~(CSI2_CSI_RX_CONFIG_DISABLE_BYTE_CLK_GATING | + CSI2_CSI_RX_CONFIG_RELEASE_LP11); + writel(val, csi2->base + CSI2_REG_CSI_RX_CONFIG); + + writel(0, csi2->base + CSI2_REG_CSI_RX_ENABLE); + + writel(0, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL_BASE(csi2->index) + 0x4); + writel(0, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL_BASE(csi2->index) + + 0x10); + writel + (0, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL0_BASE(csi2->index) + 0x4); + writel + (0, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL0_BASE(csi2->index) + 0x10); + return 0; + } + + ipu4p_csi2_ev_correction_params(csi2, nlanes); + + writel(timing.ctermen, + csi2->base + CSI2_REG_CSI_RX_DLY_CNT_TERMEN_CLANE); + writel(timing.csettle, + csi2->base + CSI2_REG_CSI_RX_DLY_CNT_SETTLE_CLANE); + + for (i = 0; i < nlanes; i++) { + writel + (timing.dtermen, + csi2->base + CSI2_REG_CSI_RX_DLY_CNT_TERMEN_DLANE(i)); + writel + (timing.dsettle, + csi2->base + CSI2_REG_CSI_RX_DLY_CNT_SETTLE_DLANE(i)); + } + + val = readl(csi2->base + CSI2_REG_CSI_RX_CONFIG); + val |= CSI2_CSI_RX_CONFIG_DISABLE_BYTE_CLK_GATING | + CSI2_CSI_RX_CONFIG_RELEASE_LP11; + writel(val, csi2->base + CSI2_REG_CSI_RX_CONFIG); + + writel(nlanes, csi2->base + CSI2_REG_CSI_RX_NOF_ENABLED_LANES); + writel(CSI2_CSI_RX_ENABLE_ENABLE, + csi2->base + CSI2_REG_CSI_RX_ENABLE); + +#ifdef IPU_VC_SUPPORT + /* SOF of VC0-VC3 enabled from CSI2PART register in B0 */ + for (i = 0; i < NR_OF_CSI2_VC; i++) + csi2part |= CSI2_IRQ_FS_VC(i) | CSI2_IRQ_FE_VC(i); +#else + csi2part |= CSI2_IRQ_FS_VC | CSI2_IRQ_FE_VC; +#endif + + /* Enable csi2 receiver error interrupts */ + writel(1, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL_BASE(csi2->index)); + writel(0, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL_BASE(csi2->index) + 0x14); + writel(0xffffffff, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL_BASE(csi2->index) + 0xc); + writel(1, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL_BASE(csi2->index) + 0x4); + writel(1, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL_BASE(csi2->index) + 0x10); + + csi2part |= 0xffff; + writel(csi2part, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL0_BASE(csi2->index)); + writel(0, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL0_BASE(csi2->index) + 0x14); + writel(0xffffffff, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL0_BASE(csi2->index) + 0xc); + writel(csi2part, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL0_BASE(csi2->index) + 0x4); + writel(csi2part, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL0_BASE(csi2->index) + 0x10); + + return 0; +} + +void ipu_isys_csi2_isr(struct ipu_isys_csi2 *csi2) +{ + u32 status = 0; +#ifdef IPU_VC_SUPPORT + unsigned int i, bus; +#else + unsigned int bus; +#endif + struct ipu_isys *isys = csi2->isys; + void __iomem *isys_base = isys->pdata->base; + + bus = csi2->index; + /* handle ctrl and ctrl0 irq */ + status = readl(isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL_BASE(bus) + 0x8); + writel(status, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL_BASE(bus) + 0xc); + dev_dbg(&isys->adev->dev, "csi %d irq_ctrl status 0x%x", bus, status); + + if (!(status & BIT(0))) + return; + + status = readl(isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL0_BASE(bus) + 0x8); + writel(status, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL0_BASE(bus) + 0xc); + dev_dbg(&isys->adev->dev, "csi %d irq_ctrl0 status 0x%x", bus, status); + /* register the csi sync error */ + csi2->receiver_errors |= status & 0xffff; + /* handle sof and eof event */ +#ifdef IPU_VC_SUPPORT + for (i = 0; i < NR_OF_CSI2_VC; i++) { + if (status & CSI2_IRQ_FS_VC(i)) + ipu_isys_csi2_sof_event(csi2, i); + + if (status & CSI2_IRQ_FE_VC(i)) + ipu_isys_csi2_eof_event(csi2, i); + } +#else + if (status & CSI2_IRQ_FS_VC) + ipu_isys_csi2_sof_event(csi2); + if (status & CSI2_IRQ_FE_VC) + ipu_isys_csi2_eof_event(csi2); +#endif +} + +static u64 tunit_time_to_us(struct ipu_isys *isys, u64 time) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(isys->adev->iommu); + u64 isys_clk = IS_FREQ_SOURCE / adev->ctrl->divisor / 1000000; + + do_div(time, isys_clk); + + return time; +} + +static u64 tsc_time_to_tunit_time(struct ipu_isys *isys, + u64 tsc_base, u64 tunit_base, u64 tsc_time) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(isys->adev->iommu); + u64 isys_clk = IS_FREQ_SOURCE / adev->ctrl->divisor / 100000; + u64 tsc_clk = IPU_BUTTRESS_TSC_CLK / 100000; + + tsc_time *= isys_clk; + tsc_base *= isys_clk; + do_div(tsc_time, tsc_clk); + do_div(tsc_base, tsc_clk); + + return tunit_base + tsc_time - tsc_base; +} + +static int update_timer_base(struct ipu_isys *isys) +{ + int rval, i; + u64 time; + + for (i = 0; i < CSI2_UPDATE_TIME_TRY_NUM; i++) { + rval = ipu_trace_get_timer(&isys->adev->dev, &time); + if (rval) { + dev_err(&isys->adev->dev, + "Failed to read Tunit timer.\n"); + return rval; + } + rval = ipu_buttress_tsc_read(isys->adev->isp, + &isys->tsc_timer_base); + if (rval) { + dev_err(&isys->adev->dev, + "Failed to read TSC timer.\n"); + return rval; + } + rval = ipu_trace_get_timer(&isys->adev->dev, + &isys->tunit_timer_base); + if (rval) { + dev_err(&isys->adev->dev, + "Failed to read Tunit timer.\n"); + return rval; + } + if (tunit_time_to_us(isys, isys->tunit_timer_base - time) < + CSI2_UPDATE_TIME_MAX_DIFF) + return 0; + } + dev_dbg(&isys->adev->dev, "Timer base values may not be accurate.\n"); + return 0; +} + +/* Extract the timestamp from trace message. + * The timestamp in the traces message contains two parts. + * The lower part contains bit0 ~ 15 of the total 64bit timestamp. + * The higher part contains bit14 ~ 63 of the 64bit timestamp. + * These two parts are sampled at different time. + * Two overlaped bits are used to identify if there's roll overs + * in the lower part during the two samples. + * If the two overlapped bits do not match, a fix is needed to + * handle the roll over. + */ +static u64 extract_time_from_short_packet_msg(struct + ipu_isys_csi2_monitor_message + *msg) +{ + u64 time_h = msg->timestamp_h << 14; + u64 time_l = msg->timestamp_l; + u64 time_h_ovl = time_h & 0xc000; + u64 time_h_h = time_h & (~0xffff); + + /* Fix possible roll overs. */ + if (time_h_ovl >= (time_l & 0xc000)) + return time_h_h | time_l; + else + return (time_h_h - 0x10000) | time_l; +} + +unsigned int ipu_isys_csi2_get_current_field(struct ipu_isys_pipeline *ip, + unsigned int *timestamp) +{ + struct ipu_isys_video *av = container_of(ip, struct ipu_isys_video, ip); + struct ipu_isys *isys = av->isys; + unsigned int field = V4L2_FIELD_TOP; + + /* + * Find the nearest message that has matched msg type, + * port id, virtual channel and packet type. + */ + unsigned int i = ip->short_packet_trace_index; + bool msg_matched = false; + unsigned int monitor_id; + + update_timer_base(isys); + + if (ip->csi2->index >= IPU_ISYS_MAX_CSI2_LEGACY_PORTS) + monitor_id = TRACE_REG_CSI2_3PH_TM_MONITOR_ID; + else + monitor_id = TRACE_REG_CSI2_TM_MONITOR_ID; + + dma_sync_single_for_cpu(&isys->adev->dev, + isys->short_packet_trace_buffer_dma_addr, + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE, + DMA_BIDIRECTIONAL); + + do { + struct ipu_isys_csi2_monitor_message msg = + isys->short_packet_trace_buffer[i]; + u64 sof_time = tsc_time_to_tunit_time(isys, + isys->tsc_timer_base, + isys->tunit_timer_base, + (((u64) timestamp[1]) << + 32) | timestamp[0]); + u64 trace_time = extract_time_from_short_packet_msg(&msg); + u64 delta_time_us = tunit_time_to_us(isys, + (sof_time > trace_time) ? + sof_time - trace_time : + trace_time - sof_time); + + i = (i + 1) % IPU_ISYS_SHORT_PACKET_TRACE_MSG_NUMBER; + + if (msg.cmd == TRACE_REG_CMD_TYPE_D64MTS && + msg.monitor_id == monitor_id && + msg.fs == 1 && + msg.port == ip->csi2->index && +#ifdef IPU_VC_SUPPORT + msg.vc == ip->vc && +#endif + delta_time_us < IPU_ISYS_SHORT_PACKET_TRACE_MAX_TIMESHIFT) { + field = (msg.sequence % 2) ? + V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM; + ip->short_packet_trace_index = i; + msg_matched = true; + dev_dbg(&isys->adev->dev, + "Interlaced field ready. field = %d\n", field); + break; + } + } while (i != ip->short_packet_trace_index); + if (!msg_matched) + /* We have walked through the whole buffer. */ + dev_dbg(&isys->adev->dev, "No matched trace message found.\n"); + + return field; +} + +bool ipu_isys_csi2_skew_cal_required(struct ipu_isys_csi2 *csi2) +{ + __s64 link_freq; + int rval; + + if (!csi2) + return false; + +#ifdef IPU_VC_SUPPORT + /* Not yet ? */ + if (csi2->remote_streams != csi2->stream_count) + return false; + +#endif + rval = ipu_isys_csi2_get_link_freq(csi2, &link_freq); + if (rval) + return false; + + if (link_freq <= IPU_SKEW_CAL_LIMIT_HZ) + return false; + + return true; +} + +int ipu_isys_csi2_set_skew_cal(struct ipu_isys_csi2 *csi2, int enable) +{ + u32 val; + + val = readl(csi2->base + CSI2_REG_CSI_RX_CONFIG); + + if (enable) + val |= CSI2_CSI_RX_CONFIG_SKEWCAL_ENABLE; + else + val &= ~CSI2_CSI_RX_CONFIG_SKEWCAL_ENABLE; + + writel(val, csi2->base + CSI2_REG_CSI_RX_CONFIG); + + return 0; +} diff --git a/drivers/media/pci/intel/virtio/Makefile b/drivers/media/pci/intel/virtio/Makefile new file mode 100644 index 0000000000000..a8633d54473cd --- /dev/null +++ b/drivers/media/pci/intel/virtio/Makefile @@ -0,0 +1,11 @@ +ifneq ($(EXTERNAL_BUILD), 1) +srcpath := $(srctree) +endif + +IPU_STEP = bxtB0 + +include $(srcpath)/$(src)/Makefile.virt + +ccflags-y += -I$(srcpath)/$(src)/../../../../../include/ +ccflags-y += -I$(srcpath)/$(src)/../ +ccflags-y += -I$(srcpath)/$(src)/../ipu4/ diff --git a/drivers/media/pci/intel/virtio/Makefile.virt b/drivers/media/pci/intel/virtio/Makefile.virt new file mode 100644 index 0000000000000..df41ac23a5013 --- /dev/null +++ b/drivers/media/pci/intel/virtio/Makefile.virt @@ -0,0 +1,28 @@ +ifndef IPU_STEP + $(error No IPU_STEP was defined. Stopping.) +endif + +TARGET_MODULE:=intel-ipu-virt-$(IPU_STEP) + +$(TARGET_MODULE)-objs += ../virtio/intel-ipu4-virtio-common.o + +ifdef CONFIG_VIDEO_INTEL_IPU_VIRTIO_BE + $(TARGET_MODULE)-objs += ../virtio/intel-ipu4-virtio-be-request-queue.o + $(TARGET_MODULE)-objs += ../virtio/intel-ipu4-virtio-be-pipeline.o + $(TARGET_MODULE)-objs += ../virtio/intel-ipu4-virtio-be-bridge.o + $(TARGET_MODULE)-objs += ../virtio/intel-ipu4-virtio-be.o + $(TARGET_MODULE)-objs += ../virtio/intel-ipu4-virtio-be-stream.o + $(TARGET_MODULE)-objs += ../virtio/intel-ipu4-virtio-be-psys.o +else + $(TARGET_MODULE)-objs += ../virtio/intel-ipu4-virtio-fe-request-queue.o + $(TARGET_MODULE)-objs += ../virtio/intel-ipu4-virtio-fe-pipeline.o + $(TARGET_MODULE)-objs += ../virtio/intel-ipu4-virtio-fe-payload.o + $(TARGET_MODULE)-objs += ../virtio/intel-ipu4-virtio-fe.o + $(TARGET_MODULE)-objs += ../virtio/intel-ipu4-para-virt-drv.o + $(TARGET_MODULE)-objs += ../virtio/intel-ipu4-para-virt-psys.o +ifdef CONFIG_COMPAT + $(TARGET_MODULE)-objs += ../virtio/intel-ipu4-virtio-fe-psys-32compat.o +endif +endif + +obj-$(CONFIG_VIDEO_INTEL_IPU_ACRN) += $(TARGET_MODULE).o \ No newline at end of file diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-para-virt-drv.c b/drivers/media/pci/intel/virtio/intel-ipu4-para-virt-drv.c new file mode 100644 index 0000000000000..5b4bac23f0895 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-para-virt-drv.c @@ -0,0 +1,1370 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "intel-ipu4-virtio-common.h" +#include "intel-ipu4-para-virt-drv.h" +#include "intel-ipu4-virtio-fe-pipeline.h" +#include "intel-ipu4-virtio-fe-payload.h" +#include "intel-ipu4-virtio-fe-request-queue.h" +#include "./ici/ici-isys-stream.h" +#include "./ici/ici-isys-pipeline-device.h" + +static dev_t virt_pipeline_dev_t; +static struct ici_isys_pipeline_device *pipeline_dev; + +static dev_t virt_stream_dev_t; +static struct class *virt_stream_class; +static struct class *virt_pipeline_class; +static int virt_stream_devs_registered; +static int stream_dev_init; + +static struct ipu4_virtio_ctx *g_fe_priv; +static struct mutex pipeline_fop_mutex; +static bool pipeline_open; + +#ifdef CONFIG_COMPAT +struct timeval32 { + __u32 tv_sec; + __u32 tv_usec; +} __attribute__((__packed__)); + +struct ici_frame_plane32 { + __u32 bytes_used; + __u32 length; + union { + compat_uptr_t userptr; + __s32 dmafd; + } mem; + __u32 data_offset; + __u32 reserved[2]; +} __attribute__((__packed__)); + +struct ici_frame_info32 { + __u32 frame_type; + __u32 field; + __u32 flag; + __u32 frame_buf_id; + struct timeval32 frame_timestamp; + __u32 frame_sequence_id; + __u32 mem_type; /* _DMA or _USER_PTR */ + struct ici_frame_plane32 frame_planes[ICI_MAX_PLANES]; /* multi-planar */ + __u32 num_planes; /* =1 single-planar > 1 multi-planar array size */ + __u32 reserved[2]; +} __attribute__((__packed__)); + +#define ICI_IOC_GET_BUF32 _IOWR(MAJOR_STREAM, 3, struct ici_frame_info32) +#define ICI_IOC_PUT_BUF32 _IOWR(MAJOR_STREAM, 4, struct ici_frame_info32) + +static void copy_from_user_frame_info32(struct ici_frame_info *kp, struct ici_frame_info32 __user *up) +{ + int i; + compat_uptr_t userptr; + + get_user(kp->frame_type, &up->frame_type); + get_user(kp->field, &up->field); + get_user(kp->flag, &up->flag); + get_user(kp->frame_buf_id, &up->frame_buf_id); + get_user(kp->frame_timestamp.tv_sec, &up->frame_timestamp.tv_sec); + get_user(kp->frame_timestamp.tv_usec, &up->frame_timestamp.tv_usec); + get_user(kp->frame_sequence_id, &up->frame_sequence_id); + get_user(kp->mem_type, &up->mem_type); + get_user(kp->num_planes, &up->num_planes); + for (i = 0; i < kp->num_planes; i++) { + get_user(kp->frame_planes[i].bytes_used, &up->frame_planes[i].bytes_used); + get_user(kp->frame_planes[i].length, &up->frame_planes[i].length); + if (kp->mem_type == ICI_MEM_USERPTR) { + get_user(userptr, &up->frame_planes[i].mem.userptr); + kp->frame_planes[i].mem.userptr = (unsigned long) compat_ptr(userptr); + } else if (kp->mem_type == ICI_MEM_DMABUF) { + get_user(kp->frame_planes[i].mem.dmafd, &up->frame_planes[i].mem.dmafd); + }; + get_user(kp->frame_planes[i].data_offset, &up->frame_planes[i].data_offset); + } +} + +static void copy_to_user_frame_info32(struct ici_frame_info *kp, struct ici_frame_info32 __user *up) +{ + int i; + compat_uptr_t userptr; + + put_user(kp->frame_type, &up->frame_type); + put_user(kp->field, &up->field); + put_user(kp->flag, &up->flag); + put_user(kp->frame_buf_id, &up->frame_buf_id); + put_user(kp->frame_timestamp.tv_sec, &up->frame_timestamp.tv_sec); + put_user(kp->frame_timestamp.tv_usec, &up->frame_timestamp.tv_usec); + put_user(kp->frame_sequence_id, &up->frame_sequence_id); + put_user(kp->mem_type, &up->mem_type); + put_user(kp->num_planes, &up->num_planes); + for (i = 0; i < kp->num_planes; i++) { + put_user(kp->frame_planes[i].bytes_used, &up->frame_planes[i].bytes_used); + put_user(kp->frame_planes[i].length, &up->frame_planes[i].length); + if (kp->mem_type == ICI_MEM_USERPTR) { + userptr = (unsigned long)compat_ptr(kp->frame_planes[i].mem.userptr); + put_user(userptr, &up->frame_planes[i].mem.userptr); + } else if (kp->mem_type == ICI_MEM_DMABUF) { + get_user(kp->frame_planes[i].mem.dmafd, &up->frame_planes[i].mem.dmafd); + } + put_user(kp->frame_planes[i].data_offset, &up->frame_planes[i].data_offset); + } +} +#endif + +static int get_userpages(struct device *dev, struct ici_frame_plane *frame_plane, + struct ici_kframe_plane *kframe_plane) +{ + unsigned long start, end, addr; + int npages, array_size; + struct page **pages; + int nr = 0; + int ret = 0; + unsigned int i; + u64 page_table_ref; + u64 *page_table; + addr = (unsigned long)frame_plane->mem.userptr; + start = addr & PAGE_MASK; + end = PAGE_ALIGN(addr + frame_plane->length); + npages = (end - start) >> PAGE_SHIFT; + array_size = npages * sizeof(struct page *); + + if (!npages) + return -EINVAL; + + page_table = kcalloc(npages, sizeof(*page_table), GFP_KERNEL); + if (!page_table) { + pr_err("Shared Page table for mediation failed\n"); + return -ENOMEM; + } + + pr_debug("%s:%d Number of Pages:%d frame_length:%d\n", __func__, __LINE__, npages, frame_plane->length); + if (array_size <= PAGE_SIZE) + pages = kzalloc(array_size, GFP_KERNEL); + else + pages = vzalloc(array_size); + if (!pages) + goto error_free_page_table; + + down_read(¤t->mm->mmap_sem); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + nr = get_user_pages(current, current->mm, + start, npages, 1, 0, pages, NULL); +#else + nr = get_user_pages(start, npages, FOLL_WRITE, pages, NULL); +#endif + if (nr < npages) + goto error_free_pages; + /* Share physical address of pages */ + for (i = 0; i < npages; i++) + page_table[i] = page_to_phys(pages[i]); + + pr_debug("UOS phy page add %lld offset:%ld\n", page_table[0], addr & ~PAGE_MASK); + page_table_ref = virt_to_phys(page_table); + kframe_plane->page_table_ref = page_table_ref; + kframe_plane->npages = npages; + + if (pages) { + if (array_size <= PAGE_SIZE) + kfree(pages); + else + vfree(pages); + } + + up_read(¤t->mm->mmap_sem); + return ret; + +error_free_pages: + if (pages) { + for (i = 0; i < nr; i++) + put_page(pages[i]); + } + if (array_size <= PAGE_SIZE) + kfree(pages); + else + vfree(pages); +error_free_page_table: + kfree(page_table); + return -ENOMEM; +} + +static struct ici_frame_buf_wrapper *frame_buf_lookup(struct ici_isys_frame_buf_list *buf_list, struct ici_frame_info *user_frame_info) +{ + struct ici_frame_buf_wrapper *buf; + int i; + int mem_type = user_frame_info->mem_type; + + list_for_each_entry(buf, &buf_list->getbuf_list, uos_node) { + for (i = 0; i < user_frame_info->num_planes; i++) { + struct ici_frame_plane *new_plane = &user_frame_info->frame_planes[i]; + struct ici_frame_plane *cur_plane = &buf->frame_info.frame_planes[i]; + + if (buf->state != ICI_BUF_PREPARED && + buf->state != ICI_BUF_DONE) + continue; + + switch (mem_type) { + case ICI_MEM_USERPTR: + if (new_plane->mem.userptr == cur_plane->mem.userptr) + return buf; + break; + case ICI_MEM_DMABUF: + if (new_plane->mem.dmafd == cur_plane->mem.dmafd) + return buf; + break; + } + //TODO: add multiplaner checks + } + } + return NULL; +} + +static int map_dma(struct device *dev, struct ici_frame_plane *frame_plane, + struct ici_kframe_plane *kframe_plane) +{ + + int ret = 0; + int fd = frame_plane->mem.dmafd; + + kframe_plane->dbdbuf = dma_buf_get(fd); + if (!kframe_plane->dbdbuf) { + ret = -EINVAL; + goto error; + } + + if (frame_plane->length == 0) + kframe_plane->length = kframe_plane->dbdbuf->size; + else + kframe_plane->length = frame_plane->length; + + kframe_plane->fd = fd; + kframe_plane->db_attach = dma_buf_attach(kframe_plane->dbdbuf, dev); + + if (IS_ERR(kframe_plane->db_attach)) { + ret = PTR_ERR(kframe_plane->db_attach); + goto error_put; + } + + kframe_plane->sgt = dma_buf_map_attachment(kframe_plane->db_attach, + DMA_BIDIRECTIONAL); + if (IS_ERR_OR_NULL(kframe_plane->sgt)) { + ret = -EINVAL; + kframe_plane->sgt = NULL; + pr_err("map attachment failed\n"); + goto error_detach; + } + + kframe_plane->dma_addr = sg_dma_address(kframe_plane->sgt->sgl); + kframe_plane->kaddr = dma_buf_vmap(kframe_plane->dbdbuf); + + if (!kframe_plane->kaddr) { + ret = -EINVAL; + goto error_detach; + } + + pr_debug("MAPBUF: mapped fd %d\n", fd); + + return 0; + +error_detach: + dma_buf_detach(kframe_plane->dbdbuf, kframe_plane->db_attach); +error_put: + dma_buf_put(kframe_plane->dbdbuf); +error: + return ret; +} + +struct ici_frame_buf_wrapper *get_buf(struct virtual_stream *vstream, struct ici_frame_info *frame_info) +{ + int res; + unsigned i; + struct ici_frame_buf_wrapper *buf; + + struct ici_kframe_plane *kframe_plane; + struct ici_isys_frame_buf_list *buf_list = &vstream->buf_list; + int mem_type = frame_info->mem_type; + + if (mem_type != ICI_MEM_USERPTR && mem_type != ICI_MEM_DMABUF) { + pr_err("Memory type not supproted\n"); + return NULL; + } + + if (!frame_info->frame_planes[0].length) { + pr_err("User length not set\n"); + return NULL; + } + + buf = frame_buf_lookup(buf_list, frame_info); + if (buf) { + pr_debug("Frame buffer found in the list: %ld\n", buf->frame_info.frame_planes[0].mem.userptr); + buf->state = ICI_BUF_PREPARED; + return buf; + } + pr_debug("Creating new buffer in the list\n"); + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) + return NULL; + + buf->buf_id = frame_info->frame_buf_id; + buf->uos_buf_list = buf_list; + memcpy(&buf->frame_info, frame_info, sizeof(buf->frame_info)); + + switch (mem_type) { + case ICI_MEM_USERPTR: + if (!frame_info->frame_planes[0].mem.userptr) { + pr_err("User pointer not define\n"); + return NULL; + } + for (i = 0; i < frame_info->num_planes; i++) { + kframe_plane = &buf->kframe_info.planes[i]; + kframe_plane->mem_type = ICI_MEM_USERPTR; + res = get_userpages(&vstream->strm_dev.dev, &frame_info->frame_planes[i], + kframe_plane); + if (res) + return NULL; + } + break; + case ICI_MEM_DMABUF: + for (i = 0; i < frame_info->num_planes; i++) { + kframe_plane = &buf->kframe_info.planes[i]; + kframe_plane->mem_type = ICI_MEM_DMABUF; + res = map_dma(&vstream->strm_dev.dev, &frame_info->frame_planes[i], + kframe_plane); + if (res) + return NULL; + } + + break; + } + mutex_lock(&buf_list->mutex); + buf->state = ICI_BUF_PREPARED; + list_add_tail(&buf->uos_node, &buf_list->getbuf_list); + mutex_unlock(&buf_list->mutex); + return buf; +} + +static void virt_ici_put_userpages(struct device *dev, + struct ici_kframe_plane + *kframe_plane) +{ + unsigned int i; + struct page *pages; + u64 *page_table; + + struct mm_struct* mm = current->active_mm; + if (!mm){ + dev_err(dev, "Failed to get active mm_struct ptr from current process.\n"); + return; + } + + down_read(&mm->mmap_sem); + + page_table = phys_to_virt(kframe_plane->page_table_ref); + for (i = 0; i < kframe_plane->npages; i++) { + pages = phys_to_page(page_table[i]); + set_page_dirty_lock(pages); + put_page(pages); + } + + kfree(page_table); + + up_read(&mm->mmap_sem); +} + +static void virt_unmap_buf(struct ici_frame_buf_wrapper *buf) +{ + int i; + + for (i = 0; i < buf->frame_info.num_planes; i++) { + struct ici_kframe_plane *kframe_plane = + &buf->kframe_info.planes[i]; + switch (kframe_plane->mem_type) { + case ICI_MEM_USERPTR: + virt_ici_put_userpages(kframe_plane->dev, + kframe_plane); + break; + default: + dev_err(&buf->buf_list->strm_dev->dev, "not supported memory type: %d\n", + kframe_plane->mem_type); + break; + } + } +} + +//Call from Stream-OFF and if Stream-ON fails +void buf_stream_cancel(struct virtual_stream *vstream) +{ + struct ici_isys_frame_buf_list *buf_list = &vstream->buf_list; + struct ici_frame_buf_wrapper *buf; + struct ici_frame_buf_wrapper *next_buf; + + list_for_each_entry_safe(buf, next_buf, + &buf_list->getbuf_list, uos_node) { + list_del(&buf->uos_node); + virt_unmap_buf(buf); + } + list_for_each_entry_safe(buf, next_buf, + &buf_list->putbuf_list, uos_node) { + list_del(&buf->uos_node); + virt_unmap_buf(buf); + } +} + +static int virt_isys_set_format(struct file *file, void *fh, + struct ici_stream_format *sf) +{ + struct ici_stream_device *strm_dev = fh; + struct virtual_stream *vstream = dev_to_vstream(strm_dev); + struct ipu4_virtio_ctx *fe_ctx = vstream->ctx; + struct ipu4_virtio_req *req; + int rval = 0; + int op[10]; + + pr_debug("Calling Set Format\n"); + + req = ipu4_virtio_fe_req_queue_get(); + if (!req) + return -ENOMEM; + op[0] = vstream->virt_dev_id; + op[1] = 0; + + req->payload = virt_to_phys(sf); + + intel_ipu4_virtio_create_req(req, IPU4_CMD_SET_FORMAT, &op[0]); + + rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, IPU_VIRTIO_QUEUE_0); + if (rval) { + dev_err(&strm_dev->dev, "Failed to set format\n"); + ipu4_virtio_fe_req_queue_put(req); + return rval; + } + ipu4_virtio_fe_req_queue_put(req); + + return rval; +} + +static int virt_isys_stream_on(struct file *file, void *fh) +{ + struct ici_stream_device *strm_dev = fh; + struct virtual_stream *vstream = dev_to_vstream(strm_dev); + struct ipu4_virtio_ctx *fe_ctx = vstream->ctx; + struct ipu4_virtio_req *req; + int rval = 0; + int op[10]; + pr_debug("Calling Stream ON\n"); + + req = ipu4_virtio_fe_req_queue_get(); + if (!req) + return -ENOMEM; + op[0] = vstream->virt_dev_id; + op[1] = 0; + + intel_ipu4_virtio_create_req(req, IPU4_CMD_STREAM_ON, &op[0]); + + rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, IPU_VIRTIO_QUEUE_0); + if (rval) { + dev_err(&strm_dev->dev, "Failed to stream on\n"); + ipu4_virtio_fe_req_queue_put(req); + return rval; + } + ipu4_virtio_fe_req_queue_put(req); + + return rval; +} + +static int virt_isys_stream_off(struct file *file, void *fh) +{ + struct ici_stream_device *strm_dev = fh; + struct virtual_stream *vstream = dev_to_vstream(strm_dev); + struct ipu4_virtio_ctx *fe_ctx = vstream->ctx; + struct ipu4_virtio_req *req; + int rval = 0; + int op[10]; + + pr_debug("Calling Stream OFF\n"); + req = ipu4_virtio_fe_req_queue_get(); + if (!req) + return -ENOMEM; + op[0] = vstream->virt_dev_id; + op[1] = 0; + + intel_ipu4_virtio_create_req(req, IPU4_CMD_STREAM_OFF, &op[0]); + + rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, IPU_VIRTIO_QUEUE_0); + if (rval) { + dev_err(&strm_dev->dev, "Failed to stream off\n"); + ipu4_virtio_fe_req_queue_put(req); + return rval; + } + ipu4_virtio_fe_req_queue_put(req); + + buf_stream_cancel(vstream); + + return rval; +} + +static int virt_isys_getbuf(struct file *file, void *fh, + struct ici_frame_info *user_frame_info) +{ + struct ici_stream_device *strm_dev = fh; + struct virtual_stream *vstream = dev_to_vstream(strm_dev); + struct ipu4_virtio_ctx *fe_ctx = vstream->ctx; + struct ipu4_virtio_req *req; + struct ici_frame_buf_wrapper *buf; + int rval = 0; + int op[3]; + + pr_debug("%s stream %d", __func__, vstream->virt_dev_id); + + buf = get_buf(vstream, user_frame_info); + if (!buf) { + dev_err(&strm_dev->dev, "Failed to map buffer: %d\n", rval); + return -ENOMEM; + } + + req = ipu4_virtio_fe_req_queue_get(); + if (!req) + return -ENOMEM; + + op[0] = vstream->virt_dev_id; + op[1] = 0; + op[2] = user_frame_info->mem_type; + req->payload = virt_to_phys(buf); + + intel_ipu4_virtio_create_req(req, IPU4_CMD_GET_BUF, &op[0]); + + rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, IPU_VIRTIO_QUEUE_0); + if (rval) { + dev_err(&strm_dev->dev, "Failed to Get Buffer\n"); + ipu4_virtio_fe_req_queue_put(req); + return rval; + } + ipu4_virtio_fe_req_queue_put(req); + + pr_debug("%s exit stream %d", __func__, vstream->virt_dev_id); + + return rval; +} + +static int virt_isys_putbuf(struct file *file, void *fh, + struct ici_frame_info *user_frame_info) +{ + struct ici_stream_device *strm_dev = fh; + struct virtual_stream *vstream = dev_to_vstream(strm_dev); + struct ipu4_virtio_ctx *fe_ctx = vstream->ctx; + struct ipu4_virtio_req *req; + int rval = 0; + int op[2]; + + pr_debug("%s stream %d", __func__, vstream->virt_dev_id); + + req = ipu4_virtio_fe_req_queue_get(); + if (!req) + return -ENOMEM; + + op[0] = vstream->virt_dev_id; + op[1] = 0; + req->payload = virt_to_phys(user_frame_info); + + intel_ipu4_virtio_create_req(req, IPU4_CMD_PUT_BUF, &op[0]); + + rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, IPU_VIRTIO_QUEUE_0); + if (rval) { + dev_err(&strm_dev->dev, "Failed to Get Buffer\n"); + ipu4_virtio_fe_req_queue_put(req); + return rval; + } + ipu4_virtio_fe_req_queue_put(req); + + pr_debug("%s exit stream %d", __func__, vstream->virt_dev_id); + + return rval; +} + +static unsigned int stream_fop_poll(struct file *file, struct ici_stream_device *dev) +{ + struct ipu4_virtio_req *req; + struct virtual_stream *vstream = dev_to_vstream(dev); + struct ipu4_virtio_ctx *fe_ctx = vstream->ctx; + int rval = 0; + int op[2]; + + dev_dbg(&dev->dev, "stream_fop_poll %d\n", vstream->virt_dev_id); + get_device(&dev->dev); + + req = ipu4_virtio_fe_req_queue_get(); + if (!req) + return -ENOMEM; + + op[0] = vstream->virt_dev_id; + op[1] = 0; + + intel_ipu4_virtio_create_req(req, IPU4_CMD_POLL, &op[0]); + + rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, + IPU_VIRTIO_QUEUE_0); + if (rval) { + dev_err(&dev->dev, "polling failed\n"); + ipu4_virtio_fe_req_queue_put(req); + return rval; + } + + rval = req->func_ret; + + ipu4_virtio_fe_req_queue_put(req); + + return rval; +} + +static int virt_stream_fop_open(struct inode *inode, struct file *file) +{ + struct ici_stream_device *strm_dev = inode_to_intel_ipu_stream_device(inode); + struct ipu4_virtio_req *req; + struct virtual_stream *vstream = dev_to_vstream(strm_dev); + struct ipu4_virtio_ctx *fe_ctx = vstream->ctx; + int rval = 0; + int op[3]; + pr_debug("%s %d", __func__, vstream->virt_dev_id); + get_device(&strm_dev->dev); + + file->private_data = strm_dev; + + if (!fe_ctx) + return -EINVAL; + + req = ipu4_virtio_fe_req_queue_get(); + if (!req) { + dev_err(&strm_dev->dev, "Virtio Req buffer failed\n"); + return -ENOMEM; + } + + op[0] = vstream->virt_dev_id; + op[1] = 1; + + intel_ipu4_virtio_create_req(req, IPU4_CMD_DEVICE_OPEN, &op[0]); + + rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, + IPU_VIRTIO_QUEUE_0); + if (rval) { + dev_err(&strm_dev->dev, "Failed to open virtual device\n"); + ipu4_virtio_fe_req_queue_put(req); + return rval; + } + ipu4_virtio_fe_req_queue_put(req); + + return rval; +} + +static int virt_stream_fop_release(struct inode *inode, struct file *file) +{ + struct ici_stream_device *strm_dev = inode_to_intel_ipu_stream_device(inode); + struct ipu4_virtio_req *req; + struct virtual_stream *vstream = dev_to_vstream(strm_dev); + struct ipu4_virtio_ctx *fe_ctx = vstream->ctx; + int rval = 0; + int op[2]; + pr_debug("%s %d", __func__, vstream->virt_dev_id); + put_device(&strm_dev->dev); + + req = ipu4_virtio_fe_req_queue_get(); + if (!req) + return -ENOMEM; + + op[0] = vstream->virt_dev_id; + op[1] = 0; + + intel_ipu4_virtio_create_req(req, IPU4_CMD_DEVICE_CLOSE, &op[0]); + + rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, + IPU_VIRTIO_QUEUE_0); + if (rval) { + dev_err(&strm_dev->dev, "Failed to close virtual device\n"); + ipu4_virtio_fe_req_queue_put(req); + return rval; + } + ipu4_virtio_fe_req_queue_put(req); + + buf_stream_cancel(vstream); + + return rval; +} + +static unsigned int virt_stream_fop_poll(struct file *file, + struct poll_table_struct *poll) +{ + struct ici_stream_device *as = file->private_data; + unsigned int res = POLLERR | POLLHUP; + + dev_dbg(&as->dev, "virt_stream_fop_poll for:%s\n", as->name); + + res = stream_fop_poll(file, as); + + dev_dbg(&as->dev, "virt_stream_fop_poll res %u\n", res); + return res; +} + +static long virt_stream_ioctl32(struct file *file, unsigned int ioctl_cmd, + unsigned long ioctl_arg) +{ + union isys_ioctl_cmd_args { + struct ici_frame_info frame_info; + struct ici_stream_format sf; + }; + void __user *up = compat_ptr(ioctl_arg); + union isys_ioctl_cmd_args *data = NULL; + int err = 0; + struct ici_stream_device *dev = file->private_data; + + mutex_lock(dev->mutex); + switch (ioctl_cmd) { + case ICI_IOC_STREAM_ON: + pr_debug("IPU FE IOCTL STREAM_ON\n"); + err = virt_isys_stream_on(file, dev); + break; + case ICI_IOC_STREAM_OFF: + pr_debug("IPU FE IOCTL STREAM_OFF\n"); + err = virt_isys_stream_off(file, dev); + break; + case ICI_IOC_GET_BUF32: + pr_debug("IPU FE IOCTL GET_BUF\n"); + data = (union isys_ioctl_cmd_args *) kzalloc(sizeof(union isys_ioctl_cmd_args), GFP_KERNEL); + copy_from_user_frame_info32(&data->frame_info, up); + err = virt_isys_getbuf(file, dev, &data->frame_info); + copy_to_user_frame_info32(&data->frame_info, up); + kfree(data); + if (err) { + mutex_unlock(dev->mutex); + return -EFAULT; + } + break; + case ICI_IOC_PUT_BUF32: + pr_debug("IPU FE IOCTL PUT_BUF\n"); + data = (union isys_ioctl_cmd_args *) kzalloc(sizeof(union isys_ioctl_cmd_args), GFP_KERNEL); + copy_from_user_frame_info32(&data->frame_info, up); + err = virt_isys_putbuf(file, dev, &data->frame_info); + copy_to_user_frame_info32(&data->frame_info, up); + kfree(data); + if (err) { + mutex_unlock(dev->mutex); + return -EFAULT; + } + break; + case ICI_IOC_SET_FORMAT: + pr_debug("IPU FE IOCTL SET_FORMAT\n"); + if (_IOC_SIZE(ioctl_cmd) > sizeof(union isys_ioctl_cmd_args)) { + mutex_unlock(dev->mutex); + return -ENOTTY; + } + + data = (union isys_ioctl_cmd_args *) kzalloc(sizeof(union isys_ioctl_cmd_args), GFP_KERNEL); + err = copy_from_user(data, up, _IOC_SIZE(ioctl_cmd)); + if (err) { + kfree(data); + mutex_unlock(dev->mutex); + return -EFAULT; + } + err = virt_isys_set_format(file, dev, &data->sf); + err = copy_to_user(up, data, _IOC_SIZE(ioctl_cmd)); + if (err) { + kfree(data); + mutex_unlock(dev->mutex); + return -EFAULT; + } + kfree(data); + break; + + default: + err = -ENOTTY; + break; + } + + mutex_unlock(dev->mutex); + + return 0; +} + +static long virt_stream_ioctl(struct file *file, unsigned int ioctl_cmd, + unsigned long ioctl_arg) +{ + union isys_ioctl_cmd_args { + struct ici_frame_info frame_info; + struct ici_stream_format sf; + }; + int err = 0; + union isys_ioctl_cmd_args *data = NULL; + struct ici_stream_device *dev = file->private_data; + void __user *up = (void __user *)ioctl_arg; + + bool copy = (ioctl_cmd != ICI_IOC_STREAM_ON && + ioctl_cmd != ICI_IOC_STREAM_OFF); + + if (copy) { + if (_IOC_SIZE(ioctl_cmd) > sizeof(union isys_ioctl_cmd_args)) + return -ENOTTY; + + data = (union isys_ioctl_cmd_args *) kzalloc(sizeof(union isys_ioctl_cmd_args), GFP_KERNEL); + if (_IOC_DIR(ioctl_cmd) & _IOC_WRITE) { + err = copy_from_user(data, up, + _IOC_SIZE(ioctl_cmd)); + if (err) { + kfree(data); + return -EFAULT; + } + } + } + + mutex_lock(dev->mutex); + switch (ioctl_cmd) { + case ICI_IOC_STREAM_ON: + err = virt_isys_stream_on(file, dev); + break; + case ICI_IOC_STREAM_OFF: + err = virt_isys_stream_off(file, dev); + break; + case ICI_IOC_GET_BUF: + err = virt_isys_getbuf(file, dev, &data->frame_info); + break; + case ICI_IOC_PUT_BUF: + err = virt_isys_putbuf(file, dev, &data->frame_info); + break; + case ICI_IOC_SET_FORMAT: + err = virt_isys_set_format(file, dev, &data->sf); + break; + default: + err = -ENOTTY; + break; + } + + mutex_unlock(dev->mutex); + + if (copy) { + err = copy_to_user(up, data, _IOC_SIZE(ioctl_cmd)); + kfree(data); + } + return 0; +} + + +static const struct file_operations virt_stream_fops = { + .owner = THIS_MODULE, + .open = virt_stream_fop_open, /* calls strm_dev->fops->open() */ + .unlocked_ioctl = virt_stream_ioctl, /* calls strm_dev->ipu_ioctl_ops->() */ +#ifdef CONFIG_COMPAT + .compat_ioctl = virt_stream_ioctl32, +#endif + .release = virt_stream_fop_release, /* calls strm_dev->fops->release() */ + .poll = virt_stream_fop_poll, /* calls strm_dev->fops->poll() */ +}; + +/* Called on device_unregister */ +static void base_device_release(struct device *sd) +{ +} + +int virt_frame_buf_init(struct ici_isys_frame_buf_list *buf_list) +{ + buf_list->drv_priv = NULL; + mutex_init(&buf_list->mutex); + spin_lock_init(&buf_list->lock); + spin_lock_init(&buf_list->short_packet_queue_lock); + INIT_LIST_HEAD(&buf_list->getbuf_list); + INIT_LIST_HEAD(&buf_list->putbuf_list); + INIT_LIST_HEAD(&buf_list->interlacebuf_list); + init_waitqueue_head(&buf_list->wait); + return 0; +} + +static int virt_ici_stream_init(struct ipu4_virtio_ctx *fe_ctx,struct virtual_stream *vstream, + struct ici_stream_device *strm_dev) +{ + int rval; + int num; + + if (!stream_dev_init) { + virt_stream_dev_t = MKDEV(MAJOR_STREAM, 0); + + rval = register_chrdev_region(virt_stream_dev_t, + MAX_STREAM_DEVICES, ICI_STREAM_DEVICE_NAME); + if (rval) { + pr_err("can't register virt_ici stream chrdev region (%d)\n", rval); + return rval; + } + + virt_stream_class = class_create(THIS_MODULE, ICI_STREAM_DEVICE_NAME); + if (IS_ERR(virt_stream_class)) { + unregister_chrdev_region(virt_stream_dev_t, MAX_STREAM_DEVICES); + pr_err("Failed to register device class %s\n", ICI_STREAM_DEVICE_NAME); + return PTR_ERR(virt_stream_class); + } + stream_dev_init++; + } + + num = virt_stream_devs_registered; + strm_dev->minor = -1; + cdev_init(&strm_dev->cdev, &virt_stream_fops); + strm_dev->cdev.owner = virt_stream_fops.owner; + + rval = cdev_add(&strm_dev->cdev, MKDEV(MAJOR(virt_stream_dev_t), num), 1); + if (rval) { + pr_err("%s: failed to add cdevice\n", __func__); + return rval; + } + + strm_dev->dev.class = virt_stream_class; + strm_dev->dev.devt = MKDEV(MAJOR(virt_stream_dev_t), num); + dev_set_name(&strm_dev->dev, "%s%d", ICI_STREAM_DEVICE_NAME, num); + + rval = device_register(&strm_dev->dev); + if (rval < 0) { + pr_err("%s: device_register failed\n", __func__); + cdev_del(&strm_dev->cdev); + return rval; + } + strm_dev->dev.release = base_device_release; + strlcpy(strm_dev->name, strm_dev->dev.kobj.name, sizeof(strm_dev->name)); + strm_dev->minor = num; + vstream->virt_dev_id = num; + + virt_stream_devs_registered++; + +#if 0 + fe_ctx = kcalloc(1, sizeof(struct ipu4_virtio_ctx), + GFP_KERNEL); + + if (!fe_ctx) + return -ENOMEM; + + fe_ctx->bknd_ops = &ipu4_virtio_bknd_ops; + + if (fe_ctx->bknd_ops->init) { + rval = fe_ctx->bknd_ops->init(); + if (rval < 0) { + pr_err("failed to initialize backend.\n"); + return rval; + } + } + + fe_ctx->domid = fe_ctx->bknd_ops->get_vm_id(); +#endif + if (!fe_ctx) + return -ENOMEM; + vstream->ctx = fe_ctx; + dev_dbg(&strm_dev->dev, "IPU FE registered with domid:%d\n", fe_ctx->domid); + + return 0; +} + +static void virt_ici_stream_exit(void) +{ + class_unregister(virt_stream_class); + unregister_chrdev_region(virt_stream_dev_t, MAX_STREAM_DEVICES); + + pr_notice("Virtual stream device unregistered\n"); +} + +static int virt_pipeline_fop_open(struct inode *inode, struct file *file) +{ + struct ici_isys_pipeline_device *dev = inode_to_ici_isys_pipeline_device(inode); + struct ipu4_virtio_req *req; + int rval = 0; + int op[2]; + pr_debug("virt pipeline open\n"); + get_device(&dev->dev); + + file->private_data = dev; + + mutex_lock(&pipeline_fop_mutex); + + if(pipeline_open) + goto exit; + + req = ipu4_virtio_fe_req_queue_get(); + if (!req) { + rval = -ENOMEM; + goto exit; + } + + op[0] = dev->minor; + op[1] = 0; + + intel_ipu4_virtio_create_req(req, IPU4_CMD_PIPELINE_OPEN, &op[0]); + + rval = g_fe_priv->bknd_ops->send_req(g_fe_priv->domid, req, true, + IPU_VIRTIO_QUEUE_0); + if (rval) { + pr_err("Failed to open virtual device\n"); + ipu4_virtio_fe_req_queue_put(req); + goto exit; + } + + pipeline_open = true; + + ipu4_virtio_fe_req_queue_put(req); + +exit: + mutex_unlock(&pipeline_fop_mutex); + + return rval; +} + +static int virt_pipeline_fop_release(struct inode *inode, struct file *file) +{ + int rval = 0; + int op[2]; + struct ipu4_virtio_req *req; + + struct ici_isys_pipeline_device *pipe_dev = + inode_to_ici_isys_pipeline_device(inode); + + put_device(&pipe_dev->dev); + + mutex_lock(&pipeline_fop_mutex); + + if(!pipeline_open) + goto exit; + + req = ipu4_virtio_fe_req_queue_get(); + if (!req) { + rval = -ENOMEM; + goto exit; + } + + op[0] = pipe_dev->minor; + op[1] = 0; + + intel_ipu4_virtio_create_req(req, IPU4_CMD_PIPELINE_CLOSE, &op[0]); + + rval = g_fe_priv->bknd_ops->send_req(g_fe_priv->domid, req, true, IPU_VIRTIO_QUEUE_0); + if (rval) { + pr_err("Failed to close virtual device\n"); + ipu4_virtio_fe_req_queue_put(req); + goto exit; + } + + pipeline_open = false; + + ipu4_virtio_fe_req_queue_put(req); + +exit: + mutex_unlock(&pipeline_fop_mutex); + + return rval; +} + +static long virt_pipeline_ioctl_common(void __user *up, + struct file *file, unsigned int ioctl_cmd, + unsigned long ioctl_arg) +{ + union isys_ioctl_cmd_args { + struct ici_node_desc node_desc; + struct ici_link_desc link; + struct ici_pad_framefmt pad_prop; + struct ici_pad_supported_format_desc + format_desc; + struct ici_links_query links_query; + struct ici_pad_selection pad_sel; + }; + int err = 0; + union isys_ioctl_cmd_args *data = NULL; + struct ici_isys_pipeline_device *dev = file->private_data; + + if (_IOC_SIZE(ioctl_cmd) > sizeof(union isys_ioctl_cmd_args)) + return -ENOTTY; + + data = (union isys_ioctl_cmd_args *) kzalloc(sizeof(union isys_ioctl_cmd_args), GFP_KERNEL); + if (_IOC_DIR(ioctl_cmd) & _IOC_WRITE) { + err = copy_from_user(data, up, + _IOC_SIZE(ioctl_cmd)); + if (err) { + kfree(data); + return -EFAULT; + } + } + mutex_lock(&dev->mutex); + switch (ioctl_cmd) { + case ICI_IOC_ENUM_NODES: + err = process_pipeline(file, g_fe_priv, + (void *)&data->node_desc, IPU4_CMD_ENUM_NODES); + break; + case ICI_IOC_ENUM_LINKS: + pr_debug("virt_pipeline_ioctl: ICI_IOC_ENUM_LINKS\n"); + err = process_pipeline(file, g_fe_priv, (void *)&data->links_query, IPU4_CMD_ENUM_LINKS); + break; + case ICI_IOC_SETUP_PIPE: + pr_debug("virt_pipeline_ioctl: ICI_IOC_SETUP_PIPE\n"); + err = process_pipeline(file, g_fe_priv, + (void *)&data->link, IPU4_CMD_SETUP_PIPE); + break; + case ICI_IOC_SET_FRAMEFMT: + pr_debug("virt_pipeline_ioctl: ICI_IOC_SET_FRAMEFMT\n"); + err = process_pipeline(file, g_fe_priv, + (void *)&data->pad_prop, IPU4_CMD_SET_FRAMEFMT); + break; + case ICI_IOC_GET_FRAMEFMT: + pr_debug("virt_pipeline_ioctl: ICI_IOC_GET_FRAMEFMT\n"); + err = process_pipeline(file, g_fe_priv, + (void *)&data->pad_prop, IPU4_CMD_GET_FRAMEFMT); + break; + case ICI_IOC_GET_SUPPORTED_FRAMEFMT: + pr_debug("virt_pipeline_ioctl: ICI_IOC_GET_SUPPORTED_FRAMEFMT\n"); + err = process_pipeline(file, g_fe_priv, + (void *)&data->format_desc, IPU4_CMD_GET_SUPPORTED_FRAMEFMT); + break; + case ICI_IOC_SET_SELECTION: + pr_debug("virt_pipeline_ioctl: ICI_IOC_SET_SELECTION\n"); + err = process_pipeline(file, g_fe_priv, + (void *)&data->pad_sel, IPU4_CMD_SET_SELECTION); + break; + case ICI_IOC_GET_SELECTION: + pr_debug("virt_pipeline_ioctl: ICI_IOC_GET_SELECTION\n"); + err = process_pipeline(file, g_fe_priv, + (void *)&data->pad_sel, IPU4_CMD_GET_SELECTION); + break; + default: + err = -ENOTTY; + break; + } + + mutex_unlock(&dev->mutex); + if (err < 0) { + kfree(data); + return err; + } + + if (_IOC_DIR(ioctl_cmd) & _IOC_READ) { + err = copy_to_user(up, data, + _IOC_SIZE(ioctl_cmd)); + if (err) { + kfree(data); + return -EFAULT; + } + } + kfree(data); + + return 0; +} + +static long virt_pipeline_ioctl(struct file *file, unsigned int ioctl_cmd, + unsigned long ioctl_arg) +{ + void __user *up = (void __user *)ioctl_arg; + return virt_pipeline_ioctl_common(up, file, ioctl_cmd, ioctl_arg); +} + +static long virt_pipeline_ioctl32(struct file *file, unsigned int ioctl_cmd, + unsigned long ioctl_arg) +{ + void __user *up = compat_ptr(ioctl_arg); + return virt_pipeline_ioctl_common(up, file, ioctl_cmd, ioctl_arg); +} + +static const struct file_operations virt_pipeline_fops = { + .owner = THIS_MODULE, + .open = virt_pipeline_fop_open, + .unlocked_ioctl = virt_pipeline_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = virt_pipeline_ioctl32, +#endif + .release = virt_pipeline_fop_release, +}; + +static int virt_fe_init(void) +{ + int rval; + + g_fe_priv = kcalloc(1, sizeof(struct ipu4_virtio_ctx), + GFP_KERNEL); + + if (!g_fe_priv) + return -ENOMEM; + + g_fe_priv->bknd_ops = &ipu4_virtio_bknd_ops; + + if (g_fe_priv->bknd_ops->init) { + rval = g_fe_priv->bknd_ops->init(); + if (rval < 0) { + pr_err("failed to initialize backend.\n"); + return rval; + } + } + + g_fe_priv->domid = g_fe_priv->bknd_ops->get_vm_id(); + + pr_debug("FE registered with domid:%d\n", g_fe_priv->domid); + + return 0; +} + +static int virt_ici_pipeline_init(void) +{ + int rval; + pr_notice("Initializing pipeline\n"); + virt_pipeline_dev_t = MKDEV(MAJOR_PIPELINE, 0); + + rval = register_chrdev_region(virt_pipeline_dev_t, + MAX_PIPELINE_DEVICES, ICI_PIPELINE_DEVICE_NAME); + if (rval) { + pr_err("can't register virt_ici stream chrdev region (%d)\n", + rval); + return rval; + } + + virt_pipeline_class = class_create(THIS_MODULE, ICI_PIPELINE_DEVICE_NAME); + if (IS_ERR(virt_pipeline_class)) { + unregister_chrdev_region(virt_pipeline_dev_t, MAX_PIPELINE_DEVICES); + pr_err("Failed to register device class %s\n", ICI_PIPELINE_DEVICE_NAME); + return PTR_ERR(virt_pipeline_class); + } + + pipeline_dev = kzalloc(sizeof(*pipeline_dev), GFP_KERNEL); + if (!pipeline_dev) + return -ENOMEM; + pipeline_dev->minor = -1; + cdev_init(&pipeline_dev->cdev, &virt_pipeline_fops); + pipeline_dev->cdev.owner = virt_pipeline_fops.owner; + + rval = cdev_add(&pipeline_dev->cdev, MKDEV(MAJOR_PIPELINE, MINOR_PIPELINE), 1); + if (rval) { + pr_err("%s: failed to add cdevice\n", __func__); + return rval; + } + + pipeline_dev->dev.class = virt_pipeline_class; + pipeline_dev->dev.devt = MKDEV(MAJOR_PIPELINE, MINOR_PIPELINE); + dev_set_name(&pipeline_dev->dev, "%s", ICI_PIPELINE_DEVICE_NAME); + + rval = device_register(&pipeline_dev->dev); + if (rval < 0) { + pr_err("%s: device_register failed\n", __func__); + cdev_del(&pipeline_dev->cdev); + return rval; + } + pipeline_dev->dev.release = base_device_release; + strlcpy(pipeline_dev->name, pipeline_dev->dev.kobj.name, sizeof(pipeline_dev->name)); + pipeline_dev->minor = MINOR_PIPELINE; + mutex_init(&pipeline_dev->mutex); + mutex_init(&pipeline_fop_mutex); + pipeline_open = false; + + return 0; +} + +static int virt_ici_init(struct ipu4_virtio_ctx *fe_ctx) +{ + struct virtual_stream *vstream; + int rval = 0, i; + pr_notice("Initializing IPU Para virtual driver\n"); + for (i = 0; i < MAX_ISYS_VIRT_STREAM; i++) { + + vstream = kzalloc(sizeof(*vstream), GFP_KERNEL); + if (!vstream) + return -ENOMEM; + mutex_init(&vstream->mutex); + vstream->strm_dev.mutex = &vstream->mutex; + + rval = virt_frame_buf_init(&vstream->buf_list); + if (rval) + goto init_fail; + + dev_set_drvdata(&vstream->strm_dev.dev, vstream); + + mutex_lock(&vstream->mutex); + rval = virt_ici_stream_init(fe_ctx,vstream, &vstream->strm_dev); + mutex_unlock(&vstream->mutex); + + if (rval) + goto init_fail; + } + + rval = virt_ici_pipeline_init(); + if (rval) + goto init_fail; + + return rval; + +init_fail: + mutex_destroy(&vstream->mutex); + kfree(vstream); + return rval; +} +static int virt_fe_probe(void) +{ + int rval = 0; + rval = ipu4_virtio_fe_req_queue_init(); + if (rval) { + pr_err("FE Ring queue initialization failed\n"); + return rval; + } + rval = virt_fe_init(); + if (rval) { + pr_err("FE initialization failed\n"); + return rval; + } + + return rval; +} +static int virt_fe_remove(void) +{ + ipu4_virtio_fe_req_queue_free(); + return 0; +} +static void virt_ici_pipeline_exit(void) +{ + class_unregister(virt_pipeline_class); + unregister_chrdev_region(virt_pipeline_dev_t, MAX_PIPELINE_DEVICES); + if (pipeline_dev) + kfree((void *)pipeline_dev); + if (g_fe_priv) + kfree((void *)g_fe_priv); + + pr_notice("virt_ici pipeline device unregistered\n"); +} +static void virt_ici_exit(void) +{ + virt_ici_stream_exit(); + virt_ici_pipeline_exit(); +} +static int __init virt_ipu_init(void) +{ + int rval = 0; + + rval = virt_fe_probe(); + if(rval) + return rval; + + rval = virt_ici_init(g_fe_priv); + if(rval) + pr_warn("ipu virt: ISYS init failed\n"); + + rval = virt_psys_init(g_fe_priv); + if(rval) + pr_warn("ipu virt: PSYS init failed\n"); + + return rval; +} +static void __exit virt_ipu_exit(void) +{ + virt_fe_remove(); + virt_ici_exit(); + virt_psys_exit(); +} + +module_init(virt_ipu_init); +module_exit(virt_ipu_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Intel IPU Para virtualize ici input system driver"); +MODULE_AUTHOR("Kushal Bandi "); + + diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-para-virt-drv.h b/drivers/media/pci/intel/virtio/intel-ipu4-para-virt-drv.h new file mode 100644 index 0000000000000..f32c3e9bb30b5 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-para-virt-drv.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef INTEL_IPU4_PARA_VIRT_H +#define INTEL_IPU4_PARA_VIRT_H + +#include +#include +#include +#include +#include +#include +#include + +#include "./ici/ici-isys-stream-device.h" +#include "./ici/ici-isys-frame-buf.h" +#include "intel-ipu4-virtio-common.h" +#include "intel-ipu4-para-virt-psys.h" +struct virtual_stream { + struct mutex mutex; + struct ici_stream_device strm_dev; + int virt_dev_id; + int actual_fd; + struct ipu4_virtio_ctx *ctx; + struct ici_isys_frame_buf_list buf_list; +}; + + +#define dev_to_vstream(dev) \ + container_of(dev, struct virtual_stream, strm_dev) + +#endif diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-para-virt-psys.c b/drivers/media/pci/intel/virtio/intel-ipu4-para-virt-psys.c new file mode 100644 index 0000000000000..939fc62496c36 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-para-virt-psys.c @@ -0,0 +1,869 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) +#include +#else +#include +#endif +#include +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) +#include +#else +#include +#endif + +#include "intel-ipu4-para-virt-psys.h" +#include "intel-ipu4-virtio-common.h" +#include "intel-ipu4-virtio-common-psys.h" +#include "intel-ipu4-virtio-fe-request-queue.h" +#include "intel-ipu4-virtio-fe-payload.h" + +#define FD_MAX_SIZE 8 +#define IPU_PSYS_NUM_DEVICES 4 +#define IPU_PSYS_NAME "intel-ipu4-psys" + +DECLARE_HASHTABLE(FD_BUF_HASH, FD_MAX_SIZE); + +#ifdef CONFIG_COMPAT +extern long virt_psys_compat_ioctl32(struct file *file, unsigned int cmd, + unsigned long arg); +#endif + +static dev_t virt_psys_dev_t; +static struct virt_ipu_psys *g_psys; +static struct class *virt_psys_class; + +static DECLARE_BITMAP(virt_psys_devices, IPU_PSYS_NUM_DEVICES); +static DEFINE_MUTEX(psys_mutex); + +int ipu_get_manifest(struct ipu_psys_manifest *m, + struct virt_ipu_psys_fh *fh) +{ + struct virt_ipu_psys *psys = fh->psys; + struct ipu4_virtio_req *req; + struct ipu4_virtio_ctx *fe_ctx = psys->ctx; + struct ipu_psys_manifest_wrap *manifest_wrap; + int rval = 0; + void *manifest_data; + + pr_debug("%s: processing start", __func__); + + manifest_wrap = kzalloc(sizeof(struct ipu_psys_manifest_wrap), + GFP_KERNEL); + + manifest_wrap->psys_manifest = virt_to_phys(m); + + //since the manifest memory is allocated by user space + //and the struct ia_cipr_buffer_t is not expose to + //driver. We assume the size is less than 1 page and + //allocate the max. + manifest_data = kzalloc(PAGE_SIZE, GFP_KERNEL); + manifest_wrap->manifest_data = virt_to_phys(manifest_data); + + req = ipu4_virtio_fe_req_queue_get(); + if (!req) + return -ENOMEM; + + req->payload = virt_to_phys(manifest_wrap); + + intel_ipu4_virtio_create_req(req, IPU4_CMD_PSYS_GET_MANIFEST, NULL); + + req->be_fh = fh->be_fh; + + rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, + IPU_VIRTIO_QUEUE_1); + if (rval) { + pr_err("%s: Failed to get manifest", __func__); + goto error_exit; + } + + if (m->manifest != NULL && copy_to_user(m->manifest, + manifest_data, + m->size)) { + pr_err("%s: Failed copy_to_user", __func__); + rval = -EFAULT; + goto error_exit; + } + +error_exit: + + kfree(manifest_data); + kfree(manifest_wrap); + + rval = req->func_ret; + + ipu4_virtio_fe_req_queue_put(req); + + pr_debug("%s: processing ended %d", __func__, rval); + + return rval; +} + +int ipu_query_caps(struct ipu_psys_capability *caps, + struct virt_ipu_psys_fh *fh) +{ + struct virt_ipu_psys *psys = fh->psys; + struct ipu4_virtio_req *req; + struct ipu4_virtio_ctx *fe_ctx = psys->ctx; + int rval = 0; + + pr_debug("%s: processing start", __func__); + + req = ipu4_virtio_fe_req_queue_get(); + if (!req) + return -ENOMEM; + + req->payload = virt_to_phys(caps); + + intel_ipu4_virtio_create_req(req, IPU4_CMD_PSYS_QUERYCAP, NULL); + + req->be_fh = fh->be_fh; + + rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, + IPU_VIRTIO_QUEUE_1); + if (rval) { + pr_err("%s: Failed to query capability", __func__); + ipu4_virtio_fe_req_queue_put(req); + return rval; + } + + rval = req->func_ret; + + ipu4_virtio_fe_req_queue_put(req); + + pr_debug("%s: processing ended %d", __func__, rval); + + return rval; +} + +int ipu_psys_kcmd_new(struct ipu_psys_command *cmd, + struct virt_ipu_psys_fh *fh) +{ + struct virt_ipu_psys *psys = fh->psys; + struct ipu4_virtio_req *req; + struct ipu4_virtio_ctx *fe_ctx = psys->ctx; + struct ipu_psys_command_wrap *cmd_wrap = NULL; + struct ipu_psys_buffer *psys_buffers = NULL; + void *pg_manifest = NULL; + + int rval = 0; + + pr_debug("%s: processing start", __func__); + + req = ipu4_virtio_fe_req_queue_get(); + if (!req) + return -ENOMEM; + + cmd_wrap = kzalloc(sizeof(struct ipu_psys_command_wrap), + GFP_KERNEL); + + /* Allocate for pg_manifest */ + pg_manifest = kzalloc(cmd->pg_manifest_size, GFP_KERNEL); + + /* Copy data from user */ + if (copy_from_user(pg_manifest, + cmd->pg_manifest, + cmd->pg_manifest_size)) { + pr_err("%s, Failed copy_from_user", __func__); + rval = -EFAULT; + goto error_exit; + } + + + /* Map pg_manifest to physical address */ + cmd_wrap->psys_manifest = virt_to_phys(pg_manifest); + + /* Map ipu_psys_command to physical address */ + cmd_wrap->psys_command = virt_to_phys(cmd); + + psys_buffers = kcalloc(cmd->bufcount, + sizeof(struct ipu_psys_buffer), + GFP_KERNEL); + + if (copy_from_user(psys_buffers, + cmd->buffers, + cmd->bufcount * sizeof(struct ipu_psys_buffer))) { + pr_err("%s, Failed copy_from_user", __func__); + rval = -EFAULT; + goto error_exit; + } + + /* Map ipu_psys_buffer to physical address */ + cmd_wrap->psys_buffer = virt_to_phys(psys_buffers); + + req->payload = virt_to_phys(cmd_wrap); + + intel_ipu4_virtio_create_req(req, IPU4_CMD_PSYS_QCMD, NULL); + + req->be_fh = fh->be_fh; + + rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, + IPU_VIRTIO_QUEUE_1); + + if (rval) { + pr_err("%s: Failed to queue command", __func__); + goto error_exit; + } + +error_exit: + if (pg_manifest) kfree(pg_manifest); + if (cmd_wrap) kfree(cmd_wrap); + if (psys_buffers) kfree(psys_buffers); + + rval = req->func_ret; + + ipu4_virtio_fe_req_queue_put(req); + + return rval; +} + +int psys_get_userpages(struct ipu_psys_buffer *buf, + struct ipu_psys_usrptr_map *map) +{ + struct vm_area_struct *vma; + unsigned long start, end; + int npages, array_size; + struct page **pages; + u64 *page_table; + int nr = 0, i; + int ret = -ENOMEM; + + start = (unsigned long)buf->base.userptr; + end = PAGE_ALIGN(start + buf->len); + npages = (end - (start & PAGE_MASK)) >> PAGE_SHIFT; + array_size = npages * sizeof(struct page *); + + page_table = kcalloc(npages, sizeof(*page_table), GFP_KERNEL); + if (!page_table) { + pr_err("%s: Shared Page table for mediation failed", __func__); + return -ENOMEM; + } + + if (array_size <= PAGE_SIZE) + pages = kzalloc(array_size, GFP_KERNEL); + else + pages = vzalloc(array_size); + if (!pages) { + pr_err("%s: failed to get userpages:%d", __func__, -ENOMEM); + ret = -ENOMEM; + goto exit_page_table; + } + + down_read(¤t->mm->mmap_sem); + vma = find_vma(current->mm, start); + if (!vma) { + ret = -EFAULT; + goto exit_up_read; + } + + if (vma->vm_end < start + buf->len) { + pr_err("%s: vma at %lu is too small for %llu bytes", + __func__, start, buf->len); + ret = -EFAULT; + goto exit_up_read; + } + + /* + * For buffers from Gralloc, VM_PFNMAP is expected, + * but VM_IO is set. Possibly bug in Gralloc. + */ + map->vma_is_io = vma->vm_flags & (VM_IO | VM_PFNMAP); + + if (map->vma_is_io) { + unsigned long io_start = start; + + for (nr = 0; nr < npages; nr++, io_start += PAGE_SIZE) { + unsigned long pfn; + + ret = follow_pfn(vma, io_start, &pfn); + if (ret) + goto exit_up_read; + pages[nr] = pfn_to_page(pfn); + } + } else { + nr = get_user_pages( +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) + current, current->mm, +#endif + start & PAGE_MASK, npages, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) + 1, 0, +#else + FOLL_WRITE, +#endif + pages, NULL); + if (nr < npages) + goto exit_pages; + } + + for (i = 0; i < npages; i++) + page_table[i] = page_to_phys(pages[i]); + + map->page_table_ref = virt_to_phys(page_table); + map->len = buf->len; + map->userptr = buf->base.userptr; + + up_read(¤t->mm->mmap_sem); + + map->npages = npages; + + if (array_size <= PAGE_SIZE) + kfree(pages); + else + vfree(pages); + + return 0; + +exit_pages: + if (!map->vma_is_io) + while (nr > 0) + put_page(pages[--nr]); + + if (array_size <= PAGE_SIZE) + kfree(pages); + else + vfree(pages); +exit_up_read: + up_read(¤t->mm->mmap_sem); +exit_page_table: + kfree(page_table); + + return ret; +} + +static void psys_put_userpages(struct ipu_psys_usrptr_map *map) +{ + unsigned long start, end; + int npages, i; + u64 *page_table; + struct page *pages; + struct mm_struct* mm; + + start = (unsigned long)map->userptr; + end = PAGE_ALIGN(start + map->len); + npages = (end - (start & PAGE_MASK)) >> PAGE_SHIFT; + + mm = current->active_mm; + if (!mm){ + pr_err("%s: Failed to get active mm_struct ptr from current process", + __func__); + return; + } + + down_read(&mm->mmap_sem); + + page_table = phys_to_virt(map->page_table_ref); + for (i = 0; i < npages; i++) { + pages = phys_to_page(page_table[i]); + set_page_dirty_lock(pages); + put_page(pages); + } + + kfree(page_table); + + up_read(&mm->mmap_sem); +} + +static struct ipu_psys_buffer_wrap *ipu_psys_buf_lookup( + int fd) +{ + struct ipu_psys_buffer_wrap *psys_buf_wrap; + + hash_for_each_possible(FD_BUF_HASH, psys_buf_wrap, node, fd) { + if (psys_buf_wrap) + return psys_buf_wrap; + } + + return NULL; +} + +int ipu_psys_getbuf(struct ipu_psys_buffer *buf, + struct virt_ipu_psys_fh *fh) +{ + struct virt_ipu_psys *psys = fh->psys; + struct ipu4_virtio_req *req; + struct ipu4_virtio_ctx *fe_ctx = psys->ctx; + struct ipu_psys_buffer_wrap *attach; + int rval = 0; + + pr_debug("%s: processing start", __func__); + + req = ipu4_virtio_fe_req_queue_get(); + if (!req) + return -ENOMEM; + + attach = kzalloc(sizeof(struct ipu_psys_buffer_wrap), + GFP_KERNEL); + + attach->psys_buf = virt_to_phys(buf); + + rval = psys_get_userpages(buf, &attach->map); + if (rval) { + req->func_ret = rval; + goto error_exit; + } + + req->payload = virt_to_phys(attach); + + intel_ipu4_virtio_create_req(req, IPU4_CMD_PSYS_GETBUF, NULL); + + req->be_fh = fh->be_fh; + + rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, + IPU_VIRTIO_QUEUE_1); + if (rval) { + pr_err("%s: Failed to get buf", __func__); + psys_put_userpages(&attach->map); + goto error_exit; + } + + mutex_lock(&fh->mutex); + if(!ipu_psys_buf_lookup(buf->base.fd)) { + hash_add(FD_BUF_HASH, &attach->node, buf->base.fd); + } + mutex_unlock(&fh->mutex); + + goto exit; + +error_exit: + + kfree(attach); + +exit: + + rval = req->func_ret; + + ipu4_virtio_fe_req_queue_put(req); + + pr_debug("%s: processing ended %d", __func__, rval); + + return rval; +} + +int ipu_psys_unmapbuf(int fd, struct virt_ipu_psys_fh *fh) +{ + struct virt_ipu_psys *psys = fh->psys; + struct ipu4_virtio_req *req; + struct ipu4_virtio_ctx *fe_ctx = psys->ctx; + struct ipu_psys_buffer_wrap *psys_buf_wrap; + int rval = 0, op[1]; + + pr_debug("%s: processing start", __func__); + + req = ipu4_virtio_fe_req_queue_get(); + if (!req) + return -ENOMEM; + + op[0] = fd; + + intel_ipu4_virtio_create_req(req, IPU4_CMD_PSYS_UNMAPBUF, &op[0]); + + req->be_fh = fh->be_fh; + + rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, + IPU_VIRTIO_QUEUE_1); + if (rval) { + pr_err("%s: Failed to unmapbuf", __func__); + goto error_exit; + } + + mutex_lock(&fh->mutex); + psys_buf_wrap = ipu_psys_buf_lookup(fd); + if (psys_buf_wrap) { + psys_put_userpages(&psys_buf_wrap->map); + hash_del(&psys_buf_wrap->node); + kfree(psys_buf_wrap); + } + mutex_unlock(&fh->mutex); + +error_exit: + + rval = req->func_ret; + + ipu4_virtio_fe_req_queue_put(req); + + pr_debug("%s: processing ended %d", __func__, rval); + + return rval; +} + +unsigned int virt_psys_poll(struct file *file, + struct poll_table_struct *wait) +{ + struct virt_ipu_psys_fh *fh = file->private_data; + struct virt_ipu_psys *psys = fh->psys; + struct ipu4_virtio_req *req; + struct ipu4_virtio_ctx *fe_ctx = psys->ctx; + int rval = 0; + + pr_debug("%s: processing start", __func__); + + req = ipu4_virtio_fe_req_queue_get(); + if (!req) + return -ENOMEM; + + intel_ipu4_virtio_create_req(req, IPU4_CMD_PSYS_POLL, NULL); + + req->be_fh = fh->be_fh; + + rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, + IPU_VIRTIO_QUEUE_1); + if (rval) { + pr_err("%s: Failed psys polling", __func__); + ipu4_virtio_fe_req_queue_put(req); + return rval; + } + + rval = req->func_ret; + + ipu4_virtio_fe_req_queue_put(req); + + pr_debug("%s: processing ended %d", __func__, rval); + + return rval; +} + +long ipu_ioctl_dqevent(struct ipu_psys_event *event, + struct virt_ipu_psys_fh *fh, unsigned int f_flags) +{ + struct virt_ipu_psys *psys = fh->psys; + struct ipu4_virtio_req *req; + struct ipu4_virtio_ctx *fe_ctx = psys->ctx; + int rval = 0; + + pr_debug("%s: processing start", __func__); + + req = ipu4_virtio_fe_req_queue_get(); + if (!req) + return -ENOMEM; + + req->payload = virt_to_phys(event); + + intel_ipu4_virtio_create_req(req, IPU4_CMD_PSYS_DQEVENT, NULL); + + req->be_fh = fh->be_fh; + + rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, + IPU_VIRTIO_QUEUE_1); + if (rval) { + pr_err("%s: Failed to dqevent", __func__); + goto error_exit; + } + +error_exit: + + rval = req->func_ret; + + ipu4_virtio_fe_req_queue_put(req); + + pr_debug("%s: processing ended %d", __func__, rval); + + return rval; +} + +static long virt_psys_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + union kargs { + struct ipu_psys_buffer buf; + struct ipu_psys_command cmd; + struct ipu_psys_event ev; + struct ipu_psys_capability caps; + struct ipu_psys_manifest m; + }; + int err = 0; + union kargs *data = NULL; + + struct virt_ipu_psys_fh *fh = file->private_data; + if(fh == NULL) + return -EFAULT; + void __user *up = (void __user *)arg; + bool copy = (cmd != IPU_IOC_MAPBUF && cmd != IPU_IOC_UNMAPBUF); + + if (copy) { + if (_IOC_SIZE(cmd) > sizeof(union kargs)) { + pr_err("%s: the incoming object size it too large! %d %d", + __func__, _IOC_SIZE(cmd), cmd); + return -ENOTTY; + } + + data = (union kargs *) kzalloc(sizeof(union kargs), GFP_KERNEL); + if (_IOC_DIR(cmd) & _IOC_WRITE) { + err = copy_from_user(data, up, _IOC_SIZE(cmd)); + if (err) { + pr_err("%s: failed to copy from user space! %d", + __func__, cmd); + kfree(data); + return -EFAULT; + } + } + } + switch (cmd) { + case IPU_IOC_MAPBUF: + pr_debug("%s: IPU_IOC_MAPBUF", __func__); + // mapbuf combined with getbuf + break; + case IPU_IOC_UNMAPBUF: + pr_debug("%s: IPU_IOC_UNMAPBUF", __func__); + err = ipu_psys_unmapbuf(arg, fh); + break; + case IPU_IOC_QUERYCAP: + pr_debug("%s: IPU_IOC_QUERYCAP", __func__); + err = ipu_query_caps(&data->caps, fh); + break; + case IPU_IOC_GETBUF: + pr_debug("%s: IPU_IOC_GETBUF", __func__); + err = ipu_psys_getbuf(&data->buf, fh); + break; + case IPU_IOC_PUTBUF: + pr_debug("%s: IPU_IOC_PUTBUF", __func__); + //err = ipu_psys_putbuf(&karg.buf, fh); + break; + case IPU_IOC_QCMD: + pr_debug("%s: IPU_IOC_QCMD", __func__); + err = ipu_psys_kcmd_new(&data->cmd, fh); + break; + case IPU_IOC_DQEVENT: + pr_debug("%s: IPU_IOC_DQEVENT", __func__); + err = ipu_ioctl_dqevent(&data->ev, fh, file->f_flags); + break; + case IPU_IOC_GET_MANIFEST: + pr_debug("%s: IPU_IOC_GET_MANIFEST", __func__); + err = ipu_get_manifest(&data->m, fh); + break; + default: + err = -ENOTTY; + break; + } + + if (!err && copy && _IOC_DIR(cmd) & _IOC_READ) { + err = copy_to_user(up, data, _IOC_SIZE(cmd)); + kfree(data); + } + + pr_debug("%s: return status %d", __func__, err); + + if (err) + return err; + + return 0; +} + +static int virt_psys_open(struct inode *inode, struct file *file) +{ + struct virt_ipu_psys *psys = inode_to_ipu_psys(inode); + struct virt_ipu_psys_fh *fh; + struct ipu4_virtio_req *req; + struct ipu4_virtio_ctx *fe_ctx = psys->ctx; + int rval = 0; + unsigned int op[1]; + + pr_debug("virt psys open\n"); + + fh = kzalloc(sizeof(*fh), GFP_KERNEL); + if (!fh) + return -ENOMEM; + mutex_init(&fh->bs_mutex); + INIT_LIST_HEAD(&fh->bufmap); + hash_init(FD_BUF_HASH); + + fh->psys = psys; + + req = ipu4_virtio_fe_req_queue_get(); + if (!req) { + dev_err(&psys->dev, "Virtio Req buffer failed\n"); + return -ENOMEM; + } + + op[0] = file->f_flags; + + intel_ipu4_virtio_create_req(req, IPU4_CMD_PSYS_OPEN, &op[0]); + + rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, + IPU_VIRTIO_QUEUE_1); + if (rval) { + dev_err(&psys->dev, "Failed to PSYS open virtual device\n"); + ipu4_virtio_fe_req_queue_put(req); + return rval; + } + + fh->be_fh = req->be_fh; + file->private_data = fh; + + ipu4_virtio_fe_req_queue_put(req); + + return rval; +} + +static int virt_psys_release(struct inode *inode, struct file *file) +{ + struct virt_ipu_psys *psys = inode_to_ipu_psys(inode); + struct ipu4_virtio_req *req; + struct ipu4_virtio_ctx *fe_ctx = psys->ctx; + struct ipu_psys_buffer_wrap *psys_buf_wrap; + struct hlist_node *tmp; + struct virt_ipu_psys_fh *fh = file->private_data; + int rval = 0, bkt; + + pr_debug("%s: processing start", __func__); + + req = ipu4_virtio_fe_req_queue_get(); + if (!req) { + dev_err(&psys->dev, "Virtio Req buffer failed\n"); + return -ENOMEM; + } + + intel_ipu4_virtio_create_req(req, IPU4_CMD_PSYS_CLOSE, NULL); + + req->be_fh = fh->be_fh; + + rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, + IPU_VIRTIO_QUEUE_1); + if (rval) { + dev_err(&psys->dev, "Failed to PSYS close virtual device\n"); + ipu4_virtio_fe_req_queue_put(req); + return rval; + } + ipu4_virtio_fe_req_queue_put(req); + + mutex_lock(&fh->mutex); + /* clean up buffers */ + if(!hash_empty(FD_BUF_HASH)) { + hash_for_each_safe(FD_BUF_HASH, bkt, tmp, + psys_buf_wrap, node) { + psys_put_userpages(&psys_buf_wrap->map); + hash_del(&psys_buf_wrap->node); + kfree(psys_buf_wrap); + } + } + mutex_unlock(&fh->mutex); + + kfree(file->private_data); + + return rval; +} + +static const struct file_operations virt_psys_fops = { + .open = virt_psys_open, + .release = virt_psys_release, + .unlocked_ioctl = virt_psys_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = virt_psys_compat_ioctl32, +#endif + .poll = virt_psys_poll, + .owner = THIS_MODULE, +}; + +static void virt_psys_dev_release(struct device *dev) +{ +} +void virt_psys_exit(void) +{ + class_unregister(virt_psys_class); + unregister_chrdev_region(virt_psys_dev_t, IPU_PSYS_NUM_DEVICES); + if (g_psys) + kfree(g_psys); + + + device_unregister(&g_psys->dev); + + clear_bit(MINOR(g_psys->cdev.dev), virt_psys_devices); + + cdev_del(&g_psys->cdev); + + mutex_destroy(&g_psys->mutex); + + pr_notice("Virtual psys device unregistered\n"); + +} + +int virt_psys_init(struct ipu4_virtio_ctx *fe_ctx) +{ + unsigned int minor; + int rval = -E2BIG; + + if (!fe_ctx) + return -ENOMEM; + + rval = alloc_chrdev_region(&virt_psys_dev_t, 0, + IPU_PSYS_NUM_DEVICES, IPU_PSYS_NAME); + if (rval) { + pr_err("can't alloc psys chrdev region (%d)\n", rval); + return rval; + } + mutex_lock(&psys_mutex); + + virt_psys_class = class_create(THIS_MODULE, IPU_PSYS_NAME); + if (IS_ERR(virt_psys_class)) { + unregister_chrdev_region(virt_psys_dev_t, IPU_PSYS_NUM_DEVICES); + pr_err("Failed to register device class %s\n", IPU_PSYS_NAME); + return PTR_ERR(virt_psys_class); + } + + minor = find_next_zero_bit(virt_psys_devices, IPU_PSYS_NUM_DEVICES, 0); + if (minor == IPU_PSYS_NUM_DEVICES) { + pr_err("too many devices\n"); + goto out_unlock; + } + + g_psys = kzalloc(sizeof(*g_psys), GFP_KERNEL); + if (!g_psys) { + rval = -ENOMEM; + goto out_unlock; + } + + cdev_init(&g_psys->cdev, &virt_psys_fops); + g_psys->cdev.owner = virt_psys_fops.owner; + + rval = cdev_add(&g_psys->cdev, MKDEV(MAJOR(virt_psys_dev_t), minor), 1); + if (rval) { + pr_err("cdev_add failed (%d)\n", rval); + goto out_unlock; + } + + set_bit(minor, virt_psys_devices); + + mutex_init(&g_psys->mutex); + + g_psys->dev.class = virt_psys_class; + g_psys->dev.devt = MKDEV(MAJOR(virt_psys_dev_t), minor); + g_psys->dev.release = virt_psys_dev_release; + dev_set_name(&g_psys->dev, "ipu-psys%d", minor); + rval = device_register(&g_psys->dev); + if (rval < 0) { + dev_err(&g_psys->dev, "psys device_register failed\n"); + goto out_mutex_destroy; + } + + g_psys->ctx = fe_ctx; + + pr_info("psys probe minor: %d\n", minor); + + goto out_unlock; + +out_mutex_destroy: + mutex_destroy(&g_psys->mutex); + cdev_del(&g_psys->cdev); +out_unlock: + mutex_unlock (&psys_mutex); + return rval; +} + diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-para-virt-psys.h b/drivers/media/pci/intel/virtio/intel-ipu4-para-virt-psys.h new file mode 100644 index 0000000000000..8df029ad37300 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-para-virt-psys.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef INTEL_IPU4_PARA_VIRT_PSYS_H_ +#define INTEL_IPU4_PARA_VIRT_PSYS_H_ + +#include +#include + +#define IPU_MEDIA_DEV_MODEL_NAME "ipu4/Broxton B" + +struct virt_ipu_psys { + struct cdev cdev; + struct device dev; + struct mutex mutex; + struct ipu4_virtio_ctx *ctx; +}; + +struct virt_ipu_psys_fh { + struct virt_ipu_psys *psys; + struct mutex mutex; /* Protects bufmap & kcmds fields */ + struct list_head list; + struct list_head bufmap; + struct list_head kcmds[IPU_PSYS_CMD_PRIORITY_NUM]; + struct ipu_psys_kcmd + *new_kcmd_tail[IPU_PSYS_CMD_PRIORITY_NUM]; + wait_queue_head_t wait; + struct mutex bs_mutex; /* Protects buf_set field */ + struct list_head buf_sets; + struct file *be_fh; +}; +int virt_psys_init(struct ipu4_virtio_ctx *fe_ctx); +void virt_psys_exit(void); +#define dev_to_vpsys(dev) \ + container_of(dev, struct virt_ipu_psys, dev) + +#define inode_to_ipu_psys(inode) \ + container_of((inode)->i_cdev, struct virt_ipu_psys, cdev) + +#endif /* INTEL_IPU4_PARA_VIRT_PSYS_H_ */ diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-bridge.c b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-bridge.c new file mode 100644 index 0000000000000..27f8631ffaa96 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-bridge.c @@ -0,0 +1,283 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "intel-ipu4-virtio-be-bridge.h" +#include "./ici/ici-isys-frame-buf.h" +#include "intel-ipu4-virtio-be-pipeline.h" +#include "intel-ipu4-virtio-be-stream.h" +#include "intel-ipu4-virtio-be-psys.h" + +int intel_ipu4_virtio_msg_parse(struct ipu4_virtio_req_info *req_info) +{ + int ret = 0; + struct ipu4_virtio_req *req; + + if (!req_info) + return -1; + + req = req_info->request; + + if (!req) { + pr_err("IPU mediator: request is NULL\n"); + return -EINVAL; + } + if ((req->cmd < IPU4_CMD_DEVICE_OPEN) || + (req->cmd >= IPU4_CMD_GET_N)) { + pr_err("IPU mediator: invalid command\n"); + return -EINVAL; + } + + switch (req->cmd) { + case IPU4_CMD_POLL: + /* + * Open video device node + * op0 - virtual device node number + * op1 - Actual device fd. By default set to 0 + */ + pr_debug("%s: process_poll %d", + __func__, req->op[0]); + kthread_run(process_poll_thread, req_info, + "process_poll"); + req->stat = IPU4_REQ_PENDING; + break; + case IPU4_CMD_DEVICE_OPEN: + /* + * Open video device node + * op0 - virtual device node number + * op1 - Actual device fd. By default set to 0 + */ + pr_debug("DEVICE_OPEN: virtual_dev_id:%d actual_fd:%d\n", req->op[0], req->op[1]); + kthread_run(process_device_open_thread, req_info, + "process_device_open"); + req->stat = IPU4_REQ_PENDING; + break; + case IPU4_CMD_DEVICE_CLOSE: + /* + * Close video device node + * op0 - virtual device node number + * op1 - Actual device fd. By default set to 0 + */ + pr_debug("DEVICE_CLOSE: virtual_dev_id:%d actual_fd:%d\n", req->op[0], req->op[1]); + kthread_run(process_device_close_thread, req_info, + "process_device_close"); + req->stat = IPU4_REQ_PENDING; + break; + case IPU4_CMD_STREAM_ON: + /* Start Stream + * op0 - virtual device node number + * op1 - Actual device fd. By default set to 0 + */ + pr_debug("STREAM ON: virtual_dev_id:%d actual_fd:%d\n", req->op[0], req->op[1]); + kthread_run(process_stream_on_thread, req_info, + "process_stream_on"); + req->stat = IPU4_REQ_PENDING; + break; + case IPU4_CMD_STREAM_OFF: + /* Stop Stream + * op0 - virtual device node number + * op1 - Actual device fd. By default set to 0 + */ + pr_debug("STREAM OFF: virtual_dev_id:%d actual_fd:%d\n", req->op[0], req->op[1]); + kthread_run(process_stream_off_thread, req_info, + "process_stream_off"); + req->stat = IPU4_REQ_PENDING; + break; + case IPU4_CMD_GET_BUF: + /* Set Format of a given video node + * op0 - virtual device node number + * op1 - Actual device fd. By default set to 0 + * op2 - Memory Type 1: USER_PTR 2: DMA_PTR + * op3 - Number of planes + * op4 - Buffer ID + * op5 - Length of Buffer + */ + + pr_debug("%s process_get_buf %d", + __func__, req->op[0]); + kthread_run(process_get_buf_thread, req_info, + "process_get_buf"); + req->stat = IPU4_REQ_PENDING; + break; + case IPU4_CMD_PUT_BUF: + /* Set Format of a given video node + * op0 - virtual device node number + * op1 - Actual device fd. By default set to 0 + * op2 - Memory Type 1: USER_PTR 2: DMA_PTR + */ + pr_debug("%s process_put_buf %d", + __func__, req->op[0]); + kthread_run(process_put_buf_thread, req_info, + "process_put_buf"); + req->stat = IPU4_REQ_PENDING; + break; + case IPU4_CMD_SET_FORMAT: + pr_debug("%s process_set_format %d", + __func__, req->op[0]); + kthread_run(process_set_format_thread, req_info, + "process_set_format"); + req->stat = IPU4_REQ_PENDING; + break; + case IPU4_CMD_PIPELINE_OPEN: + pr_debug("%s process_pipeline_open %d", + __func__, req->op[0]); + kthread_run(process_pipeline_open_thread, req_info, + "process_pipeline_open"); + req->stat = IPU4_REQ_PENDING; + break; + case IPU4_CMD_PIPELINE_CLOSE: + pr_debug("%s process_pipeline_close %d", + __func__, req->op[0]); + kthread_run(process_pipeline_close_thread, req_info, + "process_pipeline_close"); + req->stat = IPU4_REQ_PENDING; + break; + case IPU4_CMD_ENUM_NODES: + pr_debug("%s process_enum_nodes %d", + __func__, req->op[0]); + kthread_run(process_enum_nodes_thread, req_info, + "process_enum_nodes"); + req->stat = IPU4_REQ_PENDING; + break; + case IPU4_CMD_ENUM_LINKS: + pr_debug("%s process_enum_links %d", + __func__, req->op[0]); + kthread_run(process_enum_links_thread, req_info, + "process_enum_links"); + req->stat = IPU4_REQ_PENDING; + break; + case IPU4_CMD_SETUP_PIPE: + pr_debug("%s process_setup_pipe %d", + __func__, req->op[0]); + kthread_run(process_setup_pipe_thread, req_info, + "process_setup_pipe"); + req->stat = IPU4_REQ_PENDING; + break; + case IPU4_CMD_SET_FRAMEFMT: + pr_debug("%s process_set_framefmt %d", + __func__, req->op[0]); + kthread_run(process_set_framefmt_thread, req_info, + "process_set_framefmt"); + req->stat = IPU4_REQ_PENDING; + break; + case IPU4_CMD_GET_FRAMEFMT: + pr_debug("%s process_get_framefmt %d", + __func__, req->op[0]); + kthread_run(process_get_framefmt_thread, req_info, + "process_get_framefmt"); + req->stat = IPU4_REQ_PENDING; + break; + case IPU4_CMD_GET_SUPPORTED_FRAMEFMT: + pr_debug("%s process_get_supported_framefmt %d", + __func__, req->op[0]); + kthread_run(process_get_supported_framefmt_thread, + req_info, "process_get_supported_framefmt"); + req->stat = IPU4_REQ_PENDING; + break; + case IPU4_CMD_SET_SELECTION: + pr_debug("%s process_pad_set_sel %d", + __func__, req->op[0]); + kthread_run(process_pad_set_sel_thread, req_info, + "process_pad_set_sel"); + req->stat = IPU4_REQ_PENDING; + break; + case IPU4_CMD_GET_SELECTION: + pr_debug("%s process_pad_get_sel %d", + __func__, req->op[0]); + kthread_run(process_pad_get_sel_thread, req_info, + "process_pad_get_sel"); + req->stat = IPU4_REQ_PENDING; + break; + case IPU4_CMD_PSYS_MAPBUF: + pr_debug("%s process_psys_mapbuf_thread %d", + __func__, req->op[0]); + kthread_run(process_psys_mapbuf_thread, req_info, + "process_psys_mapbuf_thread"); + req->stat = IPU4_REQ_PENDING; + break; + case IPU4_CMD_PSYS_UNMAPBUF: + pr_debug("%s process_psys_unmapbuf_thread %d", + __func__, req->op[0]); + kthread_run(process_psys_unmapbuf_thread, req_info, + "process_psys_unmapbuf_thread"); + req->stat = IPU4_REQ_PENDING; + break; + case IPU4_CMD_PSYS_QUERYCAP: + pr_debug("%s process_psys_querycap_thread %d", + __func__, req->op[0]); + kthread_run(process_psys_querycap_thread, req_info, + "process_psys_querycap_thread"); + req->stat = IPU4_REQ_PENDING; + break; + case IPU4_CMD_PSYS_GETBUF: + pr_debug("%s process_psys_getbuf_thread %d", + __func__, req->op[0]); + kthread_run(process_psys_getbuf_thread, req_info, + "process_psys_getbuf_thread"); + req->stat = IPU4_REQ_PENDING; + break; + case IPU4_CMD_PSYS_PUTBUF: + pr_debug("%s process_psys_putbuf_thread %d", + __func__, req->op[0]); + kthread_run(process_psys_putbuf_thread, req_info, + "process_psys_putbuf_thread"); + req->stat = IPU4_REQ_PENDING; + break; + case IPU4_CMD_PSYS_QCMD: + pr_debug("%s process_psys_qcmd_thread %d", + __func__, req->op[0]); + kthread_run(process_psys_qcmd_thread, req_info, + "process_psys_qcmd_thread"); + req->stat = IPU4_REQ_PENDING; + break; + case IPU4_CMD_PSYS_DQEVENT: + pr_debug("%s process_psys_dqevent_thread %d", + __func__, req->op[0]); + kthread_run(process_psys_dqevent_thread, req_info, + "process_psys_dqevent_thread"); + req->stat = IPU4_REQ_PENDING; + break; + case IPU4_CMD_PSYS_GET_MANIFEST: + pr_debug("%s process_psys_get_manifest_thread %d", + __func__, req->op[0]); + kthread_run(process_psys_get_manifest_thread, req_info, + "process_psys_get_manifest_thread"); + req->stat = IPU4_REQ_PENDING; + break; + case IPU4_CMD_PSYS_OPEN: + pr_debug("%s process_psys_open_thread %d", + __func__, req->op[0]); + kthread_run(process_psys_open_thread, req_info, + "process_psys_open_thread"); + req->stat = IPU4_REQ_PENDING; + break; + case IPU4_CMD_PSYS_CLOSE: + pr_debug("%s process_psys_close_thread %d", + __func__, req->op[0]); + kthread_run(process_psys_close_thread, req_info, + "process_psys_close_thread"); + req->stat = IPU4_REQ_PENDING; + break; + case IPU4_CMD_PSYS_POLL: + pr_debug("%s process_psys_poll_thread %d", + __func__, req->op[0]); + kthread_run(process_psys_poll_thread, req_info, + "process_psys_poll_thread"); + req->stat = IPU4_REQ_PENDING; + break; + default: + return -EINVAL; + } + + return ret; +} diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-bridge.h b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-bridge.h new file mode 100644 index 0000000000000..18085882de855 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-bridge.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __IPU4_VIRTIO_BE_BRIDGE__ +#define __IPU4_VIRTIO_BE_BRIDGE__ + +#include +#include +#include +#include + +#include "intel-ipu4-virtio-common.h" +#include "intel-ipu4-virtio-be-request-queue.h" + +int intel_ipu4_virtio_msg_parse(struct ipu4_virtio_req_info *req_info); + +void intel_ipu4_virtio_create_req(struct ipu4_virtio_req *req, + enum intel_ipu4_virtio_command cmd, int *op); + + +#endif + + diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-pipeline.c b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-pipeline.c new file mode 100644 index 0000000000000..81e28262f266d --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-pipeline.c @@ -0,0 +1,447 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include + +#include +#include +#include "intel-ipu4-virtio-be-pipeline.h" +#include "./ici/ici-isys-pipeline.h" +#include "./ici/ici-isys-pipeline-device.h" +#include "intel-ipu4-virtio-be-request-queue.h" +#include "intel-ipu4-virtio-be.h" + +static struct file *pipeline; +static int guestID = -1; + +int process_pipeline_open(struct ipu4_virtio_req_info *req_info) +{ + int domid = req_info->domid; + if (guestID != -1 && guestID != domid) { + pr_err("%s: pipeline device already opened by other guest! %d %d", __func__, guestID, domid); + return IPU4_REQ_ERROR; + } + + pr_info("process_device_open: /dev/intel_pipeline"); + if (!pipeline) + pipeline = filp_open("/dev/intel_pipeline", O_RDWR | O_NONBLOCK, 0); + guestID = domid; + + return IPU4_REQ_PROCESSED; +} + +int process_pipeline_close(struct ipu4_virtio_req_info *req_info) +{ + struct ipu4_virtio_req *req = req_info->request; + + pr_info("%s: %d", __func__, req->op[0]); + + if (pipeline) + filp_close(pipeline, 0); + guestID = -1; + pipeline = NULL; + + return IPU4_REQ_PROCESSED; +} + +int process_enum_nodes(struct ipu4_virtio_req_info *req_info) +{ + int err = 0; + struct ici_isys_pipeline_device *dev; + struct ici_node_desc *host_virt; + struct ipu4_virtio_req *req; + int domid = req_info->domid; + + pr_debug("%s\n", __func__); + + if (!pipeline) { + pr_err("%s: NULL pipeline", __func__); + return IPU4_REQ_ERROR; + } + dev = pipeline->private_data; + + if (!req_info) { + pr_err("%s: NULL req_info", __func__); + return IPU4_REQ_ERROR; + } + req = req_info->request; + + host_virt = map_guest_phys(domid, req->payload, + sizeof(struct ici_node_desc)); + if (host_virt == NULL) { + pr_err("process_enum_nodes: NULL host_virt"); + return IPU4_REQ_ERROR; + } + + err = dev->pipeline_ioctl_ops->pipeline_enum_nodes(pipeline, dev, host_virt); + + unmap_guest_phys(domid, req->payload); + if (err) + return IPU4_REQ_ERROR; + else + return IPU4_REQ_PROCESSED; +} + +int process_enum_links(struct ipu4_virtio_req_info *req_info) +{ + int err = 0; + struct ici_isys_pipeline_device *dev; + struct ici_links_query *host_virt; + struct ipu4_virtio_req *req; + int domid = req_info->domid; + + pr_debug("%s\n", __func__); + + if (!pipeline) { + pr_err("%s: NULL pipeline", __func__); + return IPU4_REQ_ERROR; + } + dev = pipeline->private_data; + + if (!req_info) { + pr_err("%s: NULL req_info", __func__); + return IPU4_REQ_ERROR; + } + req = req_info->request; + + host_virt = map_guest_phys(domid, req->payload, + sizeof(struct ici_links_query)); + if (host_virt == NULL) { + pr_err("%s: NULL host_virt\n", __func__); + return IPU4_REQ_ERROR; + } + err = dev->pipeline_ioctl_ops->pipeline_enum_links(pipeline, dev, host_virt); + + unmap_guest_phys(domid, req->payload); + if (err) + return IPU4_REQ_ERROR; + else + return IPU4_REQ_PROCESSED; +} +int process_get_supported_framefmt(struct ipu4_virtio_req_info *req_info) +{ + int err = 0; + struct ici_isys_pipeline_device *dev; + struct ici_pad_supported_format_desc *host_virt; + struct ipu4_virtio_req *req; + int domid = req_info->domid; + + pr_debug("%s\n", __func__); + + if (!pipeline) { + pr_err("%s: NULL pipeline", __func__); + return IPU4_REQ_ERROR; + } + dev = pipeline->private_data; + + if (!req_info) { + pr_err("%s: NULL req_info", __func__); + return IPU4_REQ_ERROR; + } + req = req_info->request; + + host_virt = map_guest_phys(domid, req->payload, + sizeof(struct ici_pad_supported_format_desc)); + if (host_virt == NULL) { + pr_err("%s: NULL host_virt\n", __func__); + return IPU4_REQ_ERROR; + } + err = dev->pipeline_ioctl_ops->pad_get_supported_format(pipeline, dev, host_virt); + + unmap_guest_phys(domid, req->payload); + if (err) + return IPU4_REQ_ERROR; + else + return IPU4_REQ_PROCESSED; +} + +int process_set_framefmt(struct ipu4_virtio_req_info *req_info) +{ + int err = 0; + struct ici_isys_pipeline_device *dev; + struct ici_pad_framefmt *host_virt; + struct ipu4_virtio_req *req; + int domid = req_info->domid; + + pr_debug("%s\n", __func__); + + if (!pipeline) { + pr_err("%s: NULL pipeline", __func__); + return IPU4_REQ_ERROR; + } + dev = pipeline->private_data; + + if (!req_info) { + pr_err("%s: NULL req_info", __func__); + return IPU4_REQ_ERROR; + } + req = req_info->request; + + host_virt = map_guest_phys(domid, req->payload, + sizeof(struct ici_pad_framefmt)); + if (host_virt == NULL) { + pr_err("%s: NULL host_virt\n", __func__); + return IPU4_REQ_ERROR; + } + err = dev->pipeline_ioctl_ops->pad_set_ffmt(pipeline, dev, host_virt); + + unmap_guest_phys(domid, req->payload); + if (err) + return IPU4_REQ_ERROR; + else + return IPU4_REQ_PROCESSED; +} + +int process_get_framefmt(struct ipu4_virtio_req_info *req_info) +{ + int err = 0; + struct ici_isys_pipeline_device *dev; + struct ici_pad_framefmt *host_virt; + struct ipu4_virtio_req *req; + int domid = req_info->domid; + + pr_debug("%s\n", __func__); + + if (!pipeline) { + pr_err("%s: NULL pipeline", __func__); + return IPU4_REQ_ERROR; + } + dev = pipeline->private_data; + + if (!req_info) { + pr_err("%s: NULL req_info", __func__); + return IPU4_REQ_ERROR; + } + req = req_info->request; + + host_virt = map_guest_phys(domid, req->payload, + sizeof(struct ici_pad_framefmt)); + if (host_virt == NULL) { + pr_err("%s: NULL host_virt\n", __func__); + return IPU4_REQ_ERROR; + } + err = dev->pipeline_ioctl_ops->pad_get_ffmt(pipeline, dev, host_virt); + + unmap_guest_phys(domid, req->payload); + if (err) + return IPU4_REQ_ERROR; + else + return IPU4_REQ_PROCESSED; +} + +int process_setup_pipe(struct ipu4_virtio_req_info *req_info) +{ + int err = 0; + struct ici_isys_pipeline_device *dev; + struct ici_link_desc *host_virt; + struct ipu4_virtio_req *req; + int domid = req_info->domid; + + pr_debug("%s\n", __func__); + + if (!pipeline) { + pr_err("%s: NULL pipeline", __func__); + return IPU4_REQ_ERROR; + } + dev = pipeline->private_data; + + if (!req_info) { + pr_err("%s: NULL req_info", __func__); + return IPU4_REQ_ERROR; + } + req = req_info->request; + + host_virt = map_guest_phys(domid, req->payload, + sizeof(struct ici_link_desc)); + if (host_virt == NULL) { + pr_err("%s: NULL host_virt\n", __func__); + return IPU4_REQ_ERROR; + } + err = dev->pipeline_ioctl_ops->pipeline_setup_pipe(pipeline, dev, host_virt); + + unmap_guest_phys(domid, req->payload); + if (err) + return IPU4_REQ_ERROR; + else + return IPU4_REQ_PROCESSED; +} + +int process_pad_set_sel(struct ipu4_virtio_req_info *req_info) +{ + int err = 0; + struct ici_isys_pipeline_device *dev; + struct ici_pad_selection *host_virt; + struct ipu4_virtio_req *req; + int domid = req_info->domid; + + pr_debug("%s\n", __func__); + + if (!pipeline) { + pr_err("%s: NULL pipeline", __func__); + return IPU4_REQ_ERROR; + } + dev = pipeline->private_data; + + if (!req_info) { + pr_err("%s: NULL req_info", __func__); + return IPU4_REQ_ERROR; + } + req = req_info->request; + + host_virt = map_guest_phys(domid, req->payload, + sizeof(struct ici_pad_selection)); + if (host_virt == NULL) { + pr_err("%s: NULL host_virt\n", __func__); + return IPU4_REQ_ERROR; + } + err = dev->pipeline_ioctl_ops->pad_set_sel(pipeline, dev, host_virt); + + unmap_guest_phys(domid, req->payload); + if (err) + return IPU4_REQ_ERROR; + else + return IPU4_REQ_PROCESSED; +} + +int process_pad_get_sel(struct ipu4_virtio_req_info *req_info) +{ + int err = 0; + struct ici_isys_pipeline_device *dev; + struct ici_pad_selection *host_virt; + struct ipu4_virtio_req *req; + int domid = req_info->domid; + + pr_debug("%s\n", __func__); + + if (!pipeline) { + pr_err("%s: NULL pipeline", __func__); + return IPU4_REQ_ERROR; + } + dev = pipeline->private_data; + + if (!req_info) { + pr_err("%s: NULL req_info", __func__); + return IPU4_REQ_ERROR; + } + req = req_info->request; + + host_virt = map_guest_phys(domid, req->payload, + sizeof(struct ici_pad_selection)); + if (host_virt == NULL) { + pr_err("%s: NULL host_virt\n", __func__); + return IPU4_REQ_ERROR; + } + err = dev->pipeline_ioctl_ops->pad_get_sel(pipeline, dev, host_virt); + + unmap_guest_phys(domid, req->payload); + if (err) + return IPU4_REQ_ERROR; + else + return IPU4_REQ_PROCESSED; +} + +int process_pipeline_open_thread(void *data) +{ + int status; + + status = process_pipeline_open(data); + notify_fe(status, data); + do_exit(0); + return 0; +} + +int process_pipeline_close_thread(void *data) +{ + int status; + + status = process_pipeline_close(data); + notify_fe(status, data); + do_exit(0); + return 0; +} + +int process_enum_nodes_thread(void *data) +{ + int status; + + status = process_enum_nodes(data); + notify_fe(status, data); + do_exit(0); + return 0; +} + +int process_enum_links_thread(void *data) +{ + int status; + + status = process_enum_links(data); + notify_fe(status, data); + do_exit(0); + return 0; +} + +int process_get_supported_framefmt_thread(void *data) +{ + int status; + + status = process_get_supported_framefmt(data); + notify_fe(status, data); + do_exit(0); + return 0; +} + +int process_set_framefmt_thread(void *data) +{ + int status; + + status = process_set_framefmt(data); + notify_fe(status, data); + do_exit(0); + return 0; +} + +int process_get_framefmt_thread(void *data) +{ + int status; + + status = process_get_framefmt(data); + notify_fe(status, data); + do_exit(0); + return 0; +} + +int process_pad_set_sel_thread(void *data) +{ + int status; + + status = process_pad_set_sel(data); + notify_fe(status, data); + do_exit(0); + return 0; +} + +int process_pad_get_sel_thread(void *data) +{ + int status; + + status = process_pad_get_sel(data); + notify_fe(status, data); + do_exit(0); + return 0; +} + +int process_setup_pipe_thread(void *data) +{ + int status; + + status = process_setup_pipe(data); + notify_fe(status, data); + do_exit(0); + return 0; +} diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-pipeline.h b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-pipeline.h new file mode 100644 index 0000000000000..3da8c243a2bca --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-pipeline.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __IPU4_VIRTIO_BE_PIPELINE__ +#define __IPU4_VIRTIO_BE_PIPELINE__ + +#include +#include + +#include "intel-ipu4-virtio-common.h" + +int process_pipeline_open_thread(void *data); +int process_pipeline_close_thread(void *data); +int process_enum_nodes_thread(void *data); +int process_enum_links_thread(void *data); +int process_get_supported_framefmt_thread(void *data); +int process_set_framefmt_thread(void *data); +int process_get_framefmt_thread(void *data); +int process_pad_set_sel_thread(void *data); +int process_pad_get_sel_thread(void *data); +int process_setup_pipe_thread(void *data); + +#endif + + diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-psys.c b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-psys.c new file mode 100644 index 0000000000000..957d51b2198b0 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-psys.c @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include "ipu-psys.h" + +#include +#include "intel-ipu4-virtio-common.h" +#include "intel-ipu4-virtio-common-psys.h" +#include "intel-ipu4-virtio-be-request-queue.h" +#include "intel-ipu4-virtio-be.h" + +int process_psys_mapbuf(struct ipu4_virtio_req_info *req_info) +{ + return IPU4_REQ_ERROR; +} + +int process_psys_unmapbuf(struct ipu4_virtio_req_info *req_info) +{ + int status = 0; + + struct ipu_psys_fh *fh = req_info->request->be_fh->private_data; + if(!fh) { + pr_err("%s NULL file handler", __func__); + return IPU4_REQ_ERROR; + } + + status = fh->vfops->unmap_buf(fh, req_info); + + /*Only doing this in mediated mode because + fd passed from SOS to user space is invalid in UOS.*/ + ksys_close(req_info->request->op[0]); + + req_info->request->func_ret = status; + + if (status) + return IPU4_REQ_ERROR; + else + return IPU4_REQ_PROCESSED; +} + +int process_psys_querycap(struct ipu4_virtio_req_info *req_info) +{ + struct ipu_psys_fh *fh = req_info->request->be_fh->private_data; + int status = 0; + + struct ipu_psys_capability *psys_caps; + psys_caps = map_guest_phys(req_info->domid, + req_info->request->payload, + sizeof(struct ipu_psys_capability)); + if (psys_caps == NULL) { + pr_err("%s: failed to get ipu_psys_capability %u %llu", + __func__, req_info->domid, req_info->request->payload); + return -EFAULT; + } + + *psys_caps = fh->psys->caps; + + unmap_guest_phys(req_info->domid, + req_info->request->payload); + + req_info->request->func_ret = status; + + if (status) + return IPU4_REQ_ERROR; + else + return IPU4_REQ_PROCESSED; +} + +int process_psys_putbuf(struct ipu4_virtio_req_info *req_info) +{ + return IPU4_REQ_ERROR; +} + +int process_psys_qcmd(struct ipu4_virtio_req_info *req_info) +{ + struct ipu_psys_fh *fh = req_info->request->be_fh->private_data; + int status = 0; + + status = fh->vfops->qcmd(fh, req_info); + + req_info->request->func_ret = status; + + if (status) + return IPU4_REQ_ERROR; + else + return IPU4_REQ_PROCESSED; +} + +int process_psys_dqevent(struct ipu4_virtio_req_info *req_info) +{ + struct ipu_psys_fh *fh = req_info->request->be_fh->private_data; + int status = 0; + + status = fh->vfops->dqevent(fh, req_info, req_info->request->be_fh->f_flags); + + req_info->request->func_ret = status; + + if (status) + return IPU4_REQ_ERROR; + else + return IPU4_REQ_PROCESSED; +} + +int process_psys_getbuf(struct ipu4_virtio_req_info *req_info) +{ + struct ipu_psys_fh *fh = req_info->request->be_fh->private_data; + int status = 0; + + status = fh->vfops->get_buf(fh, req_info); + + req_info->request->func_ret = status; + + if (status) + return IPU4_REQ_ERROR; + else + return IPU4_REQ_PROCESSED; +} + +int process_psys_get_manifest(struct ipu4_virtio_req_info *req_info) +{ + struct ipu_psys_fh *fh = req_info->request->be_fh->private_data; + int status = 0; + + status = fh->vfops->get_manifest(fh, req_info); + + req_info->request->func_ret = status; + + if (status) + return IPU4_REQ_ERROR; + else + return IPU4_REQ_PROCESSED; +} + +int process_psys_open(struct ipu4_virtio_req_info *req_info) +{ + struct file *fh; + pr_info("%s: /dev/ipu-psys0", __func__); + + fh = filp_open("/dev/ipu-psys0", req_info->request->op[0], 0); + + if (fh == NULL) { + pr_err("%s: Native IPU psys device not found", + __func__); + return IPU4_REQ_ERROR; + } + + req_info->request->be_fh = fh; + + return IPU4_REQ_PROCESSED; +} + +int process_psys_close(struct ipu4_virtio_req_info *req_info) +{ + pr_info("%s: /dev/ipu-psys0", __func__); + + filp_close(req_info->request->be_fh, 0); + + return IPU4_REQ_PROCESSED; +} + +int process_psys_poll(struct ipu4_virtio_req_info *req_info) +{ + struct ipu_psys_fh *fh = req_info->request->be_fh->private_data; + int status = 0; + + status = fh->vfops->poll(fh, req_info); + + if (status) + return IPU4_REQ_ERROR; + else + return IPU4_REQ_PROCESSED; +} + +int process_psys_mapbuf_thread(void *data) +{ + int status; + + status = process_psys_mapbuf(data); + notify_fe(status, data); + do_exit(0); + return 0; +} + +int process_psys_unmapbuf_thread(void *data) +{ + int status; + + status = process_psys_unmapbuf(data); + notify_fe(status, data); + do_exit(0); + return 0; +} + +int process_psys_querycap_thread(void *data) +{ + int status; + + status = process_psys_querycap(data); + notify_fe(status, data); + do_exit(0); + return 0; +} + +int process_psys_putbuf_thread(void *data) +{ + int status; + + status = process_psys_putbuf(data); + notify_fe(status, data); + do_exit(0); + return 0; +} + +int process_psys_qcmd_thread(void *data) +{ + int status; + + status = process_psys_qcmd(data); + notify_fe(status, data); + do_exit(0); + return 0; +} + +int process_psys_dqevent_thread(void *data) +{ + int status; + + status = process_psys_dqevent(data); + notify_fe(status, data); + do_exit(0); + return 0; +} + +int process_psys_get_manifest_thread(void *data) +{ + int status; + + status = process_psys_get_manifest(data); + notify_fe(status, data); + do_exit(0); + return 0; +} + +int process_psys_getbuf_thread(void *data) +{ + int status; + + status = process_psys_getbuf(data); + notify_fe(status, data); + do_exit(0); + return 0; +} + +int process_psys_open_thread(void *data) +{ + int status; + + status = process_psys_open(data); + notify_fe(status, data); + do_exit(0); + return 0; +} + +int process_psys_close_thread(void *data) +{ + int status; + + status = process_psys_close(data); + notify_fe(status, data); + do_exit(0); + return 0; +} + +int process_psys_poll_thread(void *data) +{ + int status; + + status = process_psys_poll(data); + notify_fe(status, data); + do_exit(0); + return 0; +} diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-psys.h b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-psys.h new file mode 100644 index 0000000000000..5bc5c235eb3c3 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-psys.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __IPU4_VIRTIO_BE_PSYS__ +#define __IPU4_VIRTIO_BE_PSYS__ + +int process_set_format_thread(void *data); +int process_device_open_thread(void *data); +int process_device_close_thread(void *data); +int process_poll_thread(void *data); +int process_put_buf_thread(void *data); +int process_stream_on_thread(void *data); +int process_stream_off_thread(void *data); +int process_get_buf_thread(void *data); + +int process_psys_mapbuf_thread(void *data); +int process_psys_unmapbuf_thread(void *data); +int process_psys_querycap_thread(void *data); +int process_psys_putbuf_thread(void *data); +int process_psys_qcmd_thread(void *data); +int process_psys_dqevent_thread(void *data); +int process_psys_get_manifest_thread(void *data); +int process_psys_open_thread(void *data); +int process_psys_close_thread(void *data); +int process_psys_poll_thread(void *data); +int process_psys_getbuf_thread(void *data); + +#endif diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-request-queue.c b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-request-queue.c new file mode 100644 index 0000000000000..cee9b55518c11 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-request-queue.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include "intel-ipu4-virtio-common.h" +#include "intel-ipu4-virtio-be-request-queue.h" + +struct ipu4_virtio_ring ipu4_virtio_be_req_queue; + +int ipu4_virtio_be_req_queue_init(void) +{ + int i; + struct ipu4_virtio_req_info *req; + + if (ipu4_virtio_ring_init(&ipu4_virtio_be_req_queue, REQ_RING_SIZE)) + return -1; + + for (i = 0; i < REQ_RING_SIZE; i++) { + req = kcalloc(1, sizeof(struct ipu4_virtio_req_info), GFP_KERNEL); + if (req == NULL) { + pr_err("%s failed to allocate memory for ipu4_virtio_req_info", + __func__); + return -1; + } + ipu4_virtio_ring_push(&ipu4_virtio_be_req_queue, req); + } + return 0; +} + +void ipu4_virtio_be_req_queue_free(void) +{ + int i; + struct ipu4_virtio_req_info *req_info; + + for (i = 0; i < REQ_RING_SIZE; i++) { + req_info = ipu4_virtio_ring_pop(&ipu4_virtio_be_req_queue); + if (req_info) + kfree(req_info); + else + break; + } + ipu4_virtio_ring_free(&ipu4_virtio_be_req_queue); +} + +struct ipu4_virtio_req_info *ipu4_virtio_be_req_queue_get(void) +{ + return ipu4_virtio_ring_pop(&ipu4_virtio_be_req_queue); +} + +int ipu4_virtio_be_req_queue_put( + struct ipu4_virtio_req_info *req) +{ + return ipu4_virtio_ring_push(&ipu4_virtio_be_req_queue, req); +} diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-request-queue.h b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-request-queue.h new file mode 100644 index 0000000000000..febcf73152e28 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-request-queue.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef IPU4_VIRTIO_FE_REQUEST_QUEUE_H +#define IPU4_VIRTIO_FE_REQUEST_QUEUE_H + +struct ipu4_virtio_vq_info { + int vq_idx; + int req_len; + uint16_t vq_buf_idx; +}; + +struct ipu4_virtio_req_info { + struct ipu4_virtio_req *request; + struct ipu4_virtio_vq_info vq_info; + int domid; + int client_id; +}; + +int ipu4_virtio_be_req_queue_init(void); +void ipu4_virtio_be_req_queue_free(void); +struct ipu4_virtio_req_info *ipu4_virtio_be_req_queue_get(void); +int ipu4_virtio_be_req_queue_put(struct ipu4_virtio_req_info *req); + +#endif diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-stream.c b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-stream.c new file mode 100644 index 0000000000000..a0d672123b9a1 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-stream.c @@ -0,0 +1,571 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include "./ici/ici-isys-stream-device.h" +#include "./ici/ici-isys-stream.h" +#include "./ici/ici-isys-frame-buf.h" +#include "intel-ipu4-virtio-be-stream.h" +#include "intel-ipu4-virtio-be.h" + +#define MAX_SIZE 6 // max 2^6 +#define POLL_WAIT 20000 //20s + +#define dev_to_stream(dev) \ + container_of(dev, struct ici_isys_stream, strm_dev) + +DECLARE_HASHTABLE(STREAM_NODE_HASH, MAX_SIZE); +static bool hash_initialised; +static spinlock_t stream_node_hash_lock; + +struct stream_node { + int client_id; + struct file *f; + struct hlist_node node; +}; + +void cleanup_stream(void) +{ + struct stream_node *sn = NULL; + unsigned long flags = 0; + int bkt; + struct hlist_node *tmp; + + //To clean up SOS when uos got rebooted and stream did not + //get closed properly. Current implementation only handle + //for single UOS. + spin_lock_irqsave(&stream_node_hash_lock, flags); + if (!hash_empty(STREAM_NODE_HASH)) { + hash_for_each_safe(STREAM_NODE_HASH, bkt, tmp, sn, node) { + if (sn != NULL) { + pr_debug("%s: performing stream clean up!", + __func__); + filp_close(sn->f, 0); + hash_del(&sn->node); + kfree(sn); + } + } + } + spin_unlock_irqrestore(&stream_node_hash_lock, flags); +} + +static int process_device_open(struct ipu4_virtio_req_info *req_info) +{ + unsigned long flags = 0; + char node_name[25]; + struct stream_node *sn = NULL; + struct ici_stream_device *strm_dev; + struct ipu4_virtio_req *req = req_info->request; + int domid = req_info->domid; + + if (!hash_initialised) { + hash_init(STREAM_NODE_HASH); + hash_initialised = true; + spin_lock_init(&stream_node_hash_lock); + } + + spin_lock_irqsave(&stream_node_hash_lock, flags); + hash_for_each_possible(STREAM_NODE_HASH, sn, node, req->op[0]) { + if (sn != NULL) { + if (sn->client_id != domid) { + pr_err("%s: stream device %d already opened by other guest!", + __func__, sn->client_id); + spin_unlock_irqrestore(&stream_node_hash_lock, + flags); + return IPU4_REQ_ERROR; + } + pr_info("%s: stream device %d already opened by client %d", + __func__, req->op[0], domid); + spin_unlock_irqrestore(&stream_node_hash_lock, + flags); + return IPU4_REQ_ERROR; + } + } + spin_unlock_irqrestore(&stream_node_hash_lock, flags); + + sprintf(node_name, "/dev/intel_stream%d", req->op[0]); + pr_info("process_device_open: %s", node_name); + sn = kzalloc(sizeof(struct stream_node), GFP_KERNEL); + sn->f = filp_open(node_name, O_RDWR | O_NONBLOCK, 0); + + strm_dev = sn->f->private_data; + if (strm_dev == NULL) { + pr_err("Native IPU stream device not found\n"); + return IPU4_REQ_ERROR; + } + strm_dev->virt_dev_id = req->op[0]; + + sn->client_id = domid; + spin_lock_irqsave(&stream_node_hash_lock, flags); + hash_add(STREAM_NODE_HASH, &sn->node, req->op[0]); + spin_unlock_irqrestore(&stream_node_hash_lock, flags); + + return IPU4_REQ_PROCESSED; +} + +static int process_device_close(struct ipu4_virtio_req_info *req_info) +{ + unsigned long flags = 0; + struct stream_node *sn = NULL; + struct hlist_node *tmp; + struct ipu4_virtio_req *req = req_info->request; + + if (!hash_initialised) + return IPU4_REQ_PROCESSED; //no node has been opened, do nothing + + pr_info("process_device_close: %d", req->op[0]); + + spin_lock_irqsave(&stream_node_hash_lock, flags); + hash_for_each_possible_safe(STREAM_NODE_HASH, sn, + tmp, node, req->op[0]) { + if (sn != NULL) { + filp_close(sn->f, 0); + hash_del(&sn->node); + kfree(sn); + } + } + spin_unlock_irqrestore(&stream_node_hash_lock, flags); + + return IPU4_REQ_PROCESSED; +} + +int process_set_format(struct ipu4_virtio_req_info *req_info) +{ + struct stream_node *sn = NULL; + struct ici_stream_device *strm_dev; + struct ici_stream_format *host_virt; + int err, found; + struct ipu4_virtio_req *req = req_info->request; + int domid = req_info->domid; + + pr_debug("process_set_format: %d %d", hash_initialised, req->op[0]); + + if (!hash_initialised) + return IPU4_REQ_ERROR; + + found = 0; + hash_for_each_possible(STREAM_NODE_HASH, sn, node, req->op[0]) { + if (sn != NULL) { + pr_err("process_set_format: node %d %p", req->op[0], sn); + found = 1; + break; + } + } + + if (!found) { + pr_debug("%s: stream not found %d\n", __func__, req->op[0]); + return IPU4_REQ_ERROR; + } + + strm_dev = sn->f->private_data; + if (strm_dev == NULL) { + pr_err("Native IPU stream device not found\n"); + return IPU4_REQ_ERROR; + } + + host_virt = map_guest_phys(domid, req->payload, + sizeof(struct ici_stream_format)); + if (host_virt == NULL) { + pr_err("process_set_format: NULL host_virt"); + return IPU4_REQ_ERROR; + } + + err = strm_dev->ipu_ioctl_ops->ici_set_format(sn->f, strm_dev, host_virt); + + unmap_guest_phys(domid, req->payload); + + if (err) { + pr_err("intel_ipu4_pvirt: internal set fmt failed\n"); + return IPU4_REQ_ERROR; + } + else + return IPU4_REQ_PROCESSED; +} + +int process_poll(struct ipu4_virtio_req_info *req_info) +{ + struct stream_node *sn = NULL; + struct ici_isys_stream *as; + bool found, empty; + unsigned long flags = 0; + struct ipu4_virtio_req *req = req_info->request; + int time_remain; + + pr_debug("%s: %d %d", __func__, hash_initialised, req->op[0]); + + if (!hash_initialised) + return IPU4_REQ_ERROR; + + found = false; + hash_for_each_possible(STREAM_NODE_HASH, sn, node, req->op[0]) { + if (sn != NULL) { + found = true; + break; + } + } + if (!found) { + pr_debug("%s: stream not found %d\n", __func__, req->op[0]); + return IPU4_REQ_ERROR; + } + + as = dev_to_stream(sn->f->private_data); + + spin_lock_irqsave(&as->buf_list.lock, flags); + empty = list_empty(&as->buf_list.putbuf_list); + spin_unlock_irqrestore(&as->buf_list.lock, flags); + if (!empty) { + req->func_ret = 1; + pr_debug("%s: done", __func__); + return IPU4_REQ_PROCESSED; + } else { + time_remain = wait_event_interruptible_timeout( + as->buf_list.wait, + !list_empty(&as->buf_list.putbuf_list) || + !as->ip.streaming, + POLL_WAIT); + if((time_remain == -ERESTARTSYS) || + time_remain == 0 || + !as->ip.streaming) { + pr_err("%s poll timeout or unexpected wake up! code:%d streaming: %d port:%d", + __func__, time_remain, + as->ip.streaming, + req->op[0]); + req->func_ret = 0; + return IPU4_REQ_ERROR; + } + else { + req->func_ret = POLLIN; + return IPU4_REQ_PROCESSED; + } + } +} + +int process_put_buf(struct ipu4_virtio_req_info *req_info) +{ + struct stream_node *sn = NULL; + struct ici_stream_device *strm_dev; + struct ici_frame_info *host_virt; + int err, found; + struct ipu4_virtio_req *req = req_info->request; + int domid = req_info->domid; + + pr_debug("process_put_buf: %d %d", hash_initialised, req->op[0]); + + if (!hash_initialised) + return IPU4_REQ_ERROR; + + found = 0; + hash_for_each_possible(STREAM_NODE_HASH, sn, node, req->op[0]) { + if (sn != NULL) { + pr_debug("process_put_buf: node %d %p", req->op[0], sn); + found = 1; + break; + } + } + + if (!found) { + pr_debug("%s: stream not found %d\n", __func__, req->op[0]); + return IPU4_REQ_ERROR; + } + + strm_dev = sn->f->private_data; + if (strm_dev == NULL) { + pr_err("Native IPU stream device not found\n"); + return IPU4_REQ_ERROR; + } + + host_virt = map_guest_phys(domid, req->payload, + sizeof(struct ici_frame_info)); + if (host_virt == NULL) { + pr_err("process_put_buf: NULL host_virt"); + return IPU4_REQ_ERROR; + } + err = strm_dev->ipu_ioctl_ops->ici_put_buf(sn->f, strm_dev, host_virt); + + unmap_guest_phys(domid, req->payload); + + if (err) { + pr_err("process_put_buf: ici_put_buf failed\n"); + return IPU4_REQ_ERROR; + } + else + return IPU4_REQ_PROCESSED; +} + +int process_get_buf(struct ipu4_virtio_req_info *req_info) +{ + struct stream_node *sn = NULL; + struct ici_frame_buf_wrapper *shared_buf; + struct ici_stream_device *strm_dev; + int k, i = 0; + void *pageaddr; + u64 *page_table = NULL; + struct page **data_pages = NULL; + int err, found, status; + struct ipu4_virtio_req *req = req_info->request; + int domid = req_info->domid; + + pr_debug("process_get_buf: %d %d", hash_initialised, req->op[0]); + + if (!hash_initialised) + return IPU4_REQ_ERROR; + + found = 0; + hash_for_each_possible(STREAM_NODE_HASH, sn, node, req->op[0]) { + if (sn != NULL) { + pr_debug("process_get_buf: node %d %p", req->op[0], sn); + found = 1; + break; + } + } + + if (!found) { + pr_debug("%s: stream not found %d\n", __func__, req->op[0]); + return IPU4_REQ_ERROR; + } + + pr_debug("GET_BUF: Mapping buffer\n"); + shared_buf = map_guest_phys(domid, req->payload, + sizeof(struct ici_frame_buf_wrapper)); + if (!shared_buf) { + pr_err("SOS Failed to map Buffer from UserOS\n"); + status = IPU4_REQ_ERROR; + goto exit; + } + data_pages = kcalloc(shared_buf->kframe_info.planes[0].npages, sizeof(struct page *), GFP_KERNEL); + if (data_pages == NULL) { + pr_err("SOS Failed alloc data page set\n"); + status = IPU4_REQ_ERROR; + goto exit_payload; + } + pr_debug("Total number of pages:%d\n", shared_buf->kframe_info.planes[0].npages); + + page_table = map_guest_phys(domid, shared_buf->kframe_info.planes[0].page_table_ref, + shared_buf->kframe_info.planes[0].npages * sizeof(u64)); + + if (page_table == NULL) { + pr_err("SOS Failed to map page table\n"); + req->stat = IPU4_REQ_ERROR; + status = IPU4_REQ_ERROR; + goto exit_payload; + } + else { + pr_debug("SOS first page %lld\n", page_table[0]); + k = 0; + for (i = 0; i < shared_buf->kframe_info.planes[0].npages; i++) { + pageaddr = map_guest_phys(domid, page_table[i], PAGE_SIZE); + if (pageaddr == NULL) { + pr_err("Cannot map pages from UOS\n"); + req->stat = IPU4_REQ_ERROR; + break; + } + + data_pages[k] = virt_to_page(pageaddr); + k++; + } + } + + strm_dev = sn->f->private_data; + if (strm_dev == NULL) { + pr_err("Native IPU stream device not found\n"); + status = IPU4_REQ_ERROR; + goto exit_page_table; + } + err = strm_dev->ipu_ioctl_ops->ici_get_buf_virt(sn->f, strm_dev, shared_buf, data_pages); + + if (err) { + pr_err("process_get_buf: ici_get_buf_virt failed\n"); + status = IPU4_REQ_ERROR; + } + else + status = IPU4_REQ_PROCESSED; + +exit_page_table: + for (i = 0; i < shared_buf->kframe_info.planes[0].npages; i++) + unmap_guest_phys(domid, page_table[i]); + unmap_guest_phys(domid, shared_buf->kframe_info.planes[0].page_table_ref); +exit_payload: + kfree(data_pages); + unmap_guest_phys(domid, req->payload); +exit: + req->stat = status; + return status; +} + +int process_stream_on(struct ipu4_virtio_req_info *req_info) +{ + struct stream_node *sn = NULL; + struct ici_stream_device *strm_dev; + int err, found; + struct ipu4_virtio_req *req = req_info->request; + + pr_debug("process_stream_on: %d %d", hash_initialised, req->op[0]); + + if (!hash_initialised) + return IPU4_REQ_ERROR; + + found = 0; + hash_for_each_possible(STREAM_NODE_HASH, sn, node, req->op[0]) { + if (sn != NULL) { + pr_err("process_stream_on: node %d %p", req->op[0], sn); + found = 1; + break; + } + } + + if (!found) { + pr_debug("%s: stream not found %d\n", __func__, req->op[0]); + return IPU4_REQ_ERROR; + } + + strm_dev = sn->f->private_data; + if (strm_dev == NULL) { + pr_err("Native IPU stream device not found\n"); + return IPU4_REQ_ERROR; + } + + err = strm_dev->ipu_ioctl_ops->ici_stream_on(sn->f, strm_dev); + + if (err) { + pr_err("process_stream_on: stream on failed\n"); + return IPU4_REQ_ERROR; + } + else + return IPU4_REQ_PROCESSED; +} + +int process_stream_off(struct ipu4_virtio_req_info *req_info) +{ + struct stream_node *sn = NULL; + struct ici_stream_device *strm_dev; + struct ici_isys_stream *as; + int err, found; + struct ipu4_virtio_req *req = req_info->request; + + pr_debug("process_stream_off: %d %d", hash_initialised, req->op[0]); + + if (!hash_initialised) + return IPU4_REQ_ERROR; + + found = 0; + hash_for_each_possible(STREAM_NODE_HASH, sn, node, req->op[0]) { + if (sn != NULL) { + pr_err("process_stream_off: node %d %p", req->op[0], sn); + found = 1; + break; + } + } + + if (!found) { + pr_debug("%s: stream not found %d\n", __func__, req->op[0]); + return IPU4_REQ_ERROR; + } + + strm_dev = sn->f->private_data; + if (strm_dev == NULL) { + pr_err("Native IPU stream device not found\n"); + return IPU4_REQ_ERROR; + } + + err = strm_dev->ipu_ioctl_ops->ici_stream_off(sn->f, strm_dev); + + if (err) { + pr_err("%s: stream off failed\n", __func__); + return IPU4_REQ_ERROR; + } else { + as = dev_to_stream(strm_dev); + wake_up_interruptible(&as->buf_list.wait); + return IPU4_REQ_PROCESSED; + } +} + +int process_set_format_thread(void *data) +{ + int status; + + status = process_set_format(data); + notify_fe(status, data); + do_exit(0); + return 0; +} + +int process_device_open_thread(void *data) +{ + int status; + + status = process_device_open(data); + notify_fe(status, data); + do_exit(0); + return 0; +} + +int process_device_close_thread(void *data) +{ + int status; + + status = process_device_close(data); + notify_fe(status, data); + do_exit(0); + return 0; +} + +int process_poll_thread(void *data) +{ + int status; + + status = process_poll(data); + notify_fe(status, data); + do_exit(0); + return 0; +} + +int process_put_buf_thread(void *data) +{ + int status; + + status = process_put_buf(data); + notify_fe(status, data); + do_exit(0); + return 0; +} + +int process_stream_on_thread(void *data) +{ + int status; + + status = process_stream_on(data); + notify_fe(status, data); + do_exit(0); + return 0; +} + +int process_stream_off_thread(void *data) +{ + int status; + + status = process_stream_off(data); + notify_fe(status, data); + do_exit(0); + return 0; +} + +int process_get_buf_thread(void *data) +{ + int status; + + status = process_get_buf(data); + notify_fe(status, data); + do_exit(0); + return 0; +} diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-stream.h b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-stream.h new file mode 100644 index 0000000000000..04a84a4c365a6 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-stream.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __IPU4_VIRTIO_BE_STREAM__ +#define __IPU4_VIRTIO_BE_STREAM__ + +#include +#include + +#include "intel-ipu4-virtio-common.h" +#include "intel-ipu4-virtio-be-request-queue.h" + +int process_set_format_thread(void *data); +int process_device_open_thread(void *data); +int process_device_close_thread(void *data); +int process_poll_thread(void *data); +int process_put_buf_thread(void *data); +int process_stream_on_thread(void *data); +int process_stream_off_thread(void *data); +int process_get_buf_thread(void *data); + +#endif + + diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be.c b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be.c new file mode 100644 index 0000000000000..d3dd13b55a1bf --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be.c @@ -0,0 +1,515 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "intel-ipu4-virtio-common.h" +#include "intel-ipu4-virtio-be-bridge.h" +#include "intel-ipu4-virtio-be.h" +#include "intel-ipu4-virtio-be-request-queue.h" + +/** + * struct ipu4_virtio_be_priv - Backend of virtio-rng based on VBS-K + * + * @dev : instance of struct virtio_dev_info + * @vqs : instances of struct virtio_vq_info + * @hwrng : device specific member + * @node : hashtable maintaining multiple connections + * from multiple guests/devices + */ +struct ipu4_virtio_be_priv { + struct virtio_dev_info dev; + struct virtio_vq_info vqs[IPU_VIRTIO_QUEUE_MAX]; + bool busy; + struct mutex mlock; + spinlock_t slock; + /* + * Each VBS-K module might serve multiple connections + * from multiple guests/device models/VBS-Us, so better + * to maintain the connections in a list, and here we + * use hashtable as an example. + */ + struct hlist_node node; +}; + +#define RNG_MAX_HASH_BITS 4 /* MAX is 2^4 */ +#define HASH_NAME vbs_hash + +DECLARE_HASHTABLE(HASH_NAME, RNG_MAX_HASH_BITS); +static int ipu_vbk_hash_initialized; +static int ipu_vbk_connection_cnt; +/* function declarations */ +static int handle_kick(int client_id, unsigned long *req_map, + void *client_priv); +static void ipu_vbk_reset(struct ipu4_virtio_be_priv *rng); +static void ipu_vbk_stop(struct ipu4_virtio_be_priv *rng); +static void ipu_vbk_flush(struct ipu4_virtio_be_priv *rng); + +#ifdef RUNTIME_CTRL +static int ipu_vbk_enable_vq(struct ipu4_virtio_be_priv *rng, + struct virtio_vq_info *vq); +static void ipu_vbk_disable_vq(struct ipu4_virtio_be_priv *rng, + struct virtio_vq_info *vq); +static void ipu_vbk_stop_vq(struct ipu4_virtio_be_priv *rng, + struct virtio_vq_info *vq); +static void ipu_vbk_flush_vq(struct ipu4_virtio_be_priv *rng, int index); +#endif + +extern void cleanup_stream(void); + +/* hash table related functions */ +static void ipu_vbk_hash_init(void) +{ + if (ipu_vbk_hash_initialized) + return; + + hash_init(HASH_NAME); + ipu_vbk_hash_initialized = 1; +} + +static int ipu_vbk_hash_add(struct ipu4_virtio_be_priv *entry) +{ + if (!ipu_vbk_hash_initialized) { + pr_err("RNG hash table not initialized!\n"); + return -1; + } + + hash_add(HASH_NAME, &entry->node, virtio_dev_client_id(&entry->dev)); + return 0; +} + +static struct ipu4_virtio_be_priv *ipu_vbk_hash_find(int client_id) +{ + struct ipu4_virtio_be_priv *entry; + int bkt; + + if (!ipu_vbk_hash_initialized) { + pr_err("RNG hash table not initialized!\n"); + return NULL; + } + + hash_for_each(HASH_NAME, bkt, entry, node) + if (virtio_dev_client_id(&entry->dev) == client_id) + return entry; + + pr_err("Not found item matching client_id!\n"); + return NULL; +} + +static int ipu_vbk_hash_del(int client_id) +{ + struct ipu4_virtio_be_priv *entry; + struct hlist_node *tmp; + int bkt; + + if (!ipu_vbk_hash_initialized) { + pr_err("RNG hash table not initialized!\n"); + return -1; + } + + hash_for_each_safe(HASH_NAME, bkt, tmp, entry, node) + if (virtio_dev_client_id(&entry->dev) == client_id) { + hash_del(&entry->node); + return 0; + } + + pr_err("%s failed, not found matching client_id!\n", + __func__); + return -1; +} + +static int ipu_vbk_hash_del_all(void) +{ + struct ipu4_virtio_be_priv *entry; + struct hlist_node *tmp; + int bkt; + + if (!ipu_vbk_hash_initialized) { + pr_err("RNG hash table not initialized!\n"); + return -1; + } + + hash_for_each_safe(HASH_NAME, bkt, tmp, entry, node) + hash_del(&entry->node); + + return 0; +} + +static void handle_vq_kick(int client_id, int vq_idx) +{ + struct iovec iov; + struct ipu4_virtio_be_priv *be; + struct virtio_vq_info *vq; + struct ipu4_virtio_req_info *req_info = NULL; + struct ipu4_virtio_req *req = NULL; + int len; + int ret; + uint16_t idx; + + be = ipu_vbk_hash_find(client_id); + if (be == NULL) { + pr_err("%s: client %d not found!\n", + __func__, client_id); + return; + } + + if (!be) { + pr_err("rng is NULL! Cannot proceed!\n"); + return; + } + + vq = &(be->vqs[vq_idx]); + + while (virtio_vq_has_descs(vq)) { + virtio_vq_getchain(vq, &idx, &iov, 1, NULL); + + pr_debug("%s: vq index: %d vq buf index: %d req ptr: %lu\n", + __func__, vq_idx, idx, (long unsigned)iov.iov_base); + /* device specific operations, for example: */ + if (iov.iov_len != sizeof(struct ipu4_virtio_req)) { + if (iov.iov_len == sizeof(int)) { + *((int *)iov.iov_base) = 1; + len = iov.iov_len; + printk(KERN_NOTICE "IPU VBK handle kick from vmid:%d\n", 1); + } else { + len = 0; + printk(KERN_WARNING "received request with wrong size"); + printk(KERN_WARNING "%zu != %zu\n", + iov.iov_len, + sizeof(struct ipu4_virtio_req)); + } + + pr_debug("vtrnd: vtrnd_notify(): %d\r\n", len); + virtio_vq_relchain(vq, idx, len); + continue; + } + + req_info = ipu4_virtio_be_req_queue_get(); + if (req_info) { + req = (struct ipu4_virtio_req *)iov.iov_base; + req_info->request = req; + req_info->vq_info.req_len = iov.iov_len; + req_info->vq_info.vq_buf_idx = idx; + req_info->vq_info.vq_idx = vq_idx; + req_info->domid = 1; + req_info->client_id = client_id; + ret = intel_ipu4_virtio_msg_parse(req_info); + } else { + pr_err("%s: Failed to get request buffer from queue!", __func__); + virtio_vq_relchain(vq, idx, iov.iov_len); + continue; + } + + if (req->stat != IPU4_REQ_PENDING) { + virtio_vq_relchain(vq, idx, iov.iov_len); + ipu4_virtio_be_req_queue_put(req_info); + } + pr_debug("%s ending request for stream %d", + __func__, req->op[0]); + } + pr_debug("IPU VBK data process on VQ Done\n"); + if ((req == NULL) || (req && req->stat != + IPU4_REQ_PENDING)) + virtio_vq_endchains(vq, 1); +} + +static int handle_kick(int client_id, unsigned long *ioreqs_map, + void *client_priv) +{ + int *val, i, count; + struct ipu4_virtio_be_priv *priv; + + if (unlikely(bitmap_empty(ioreqs_map, VHM_REQUEST_MAX))) + return -EINVAL; + + priv = container_of(client_priv, struct ipu4_virtio_be_priv, dev); + if (priv == NULL) { + pr_err("%s: client %d not found!\n", + __func__, client_id); + return -EINVAL; + } + + val = kzalloc(priv->dev._ctx.max_vcpu * sizeof(int), + GFP_KERNEL); + + count = virtio_vqs_index_get(&priv->dev, ioreqs_map, val, + priv->dev._ctx.max_vcpu); + + for (i = 0; i < count; i++) { + if (val[i] >= 0) { + handle_vq_kick(client_id, val[i]); + } + } + + kfree(val); + + return 0; +} + +static int ipu_vbk_open(struct inode *inode, struct file *f) +{ + struct ipu4_virtio_be_priv *priv; + struct virtio_dev_info *dev; + struct virtio_vq_info *vqs; + int i; + + priv = kcalloc(1, sizeof(struct ipu4_virtio_be_priv), + GFP_KERNEL); + + if (priv == NULL) { + pr_err("Failed to allocate memory for ipu4_virtio_be_priv!\n"); + return -ENOMEM; + } + + vqs = &priv->vqs[0]; + + dev = &priv->dev; + + strncpy(dev->name, "vbs_ipu", VBS_NAME_LEN); + dev->dev_notify = handle_kick; + + + for (i = 0; i < IPU_VIRTIO_QUEUE_MAX; i++) { + vqs[i].dev = dev; + vqs[i].vq_notify = NULL; + } + + /* link dev and vqs */ + dev->vqs = vqs; + + virtio_dev_init(dev, vqs, IPU_VIRTIO_QUEUE_MAX); + + mutex_init(&priv->mlock); + spin_lock_init(&priv->slock); + + f->private_data = priv; + + /* init a hash table to maintain multi-connections */ + ipu_vbk_hash_init(); + + ipu4_virtio_be_req_queue_init(); + + return 0; +} + +static int ipu_vbk_release(struct inode *inode, struct file *f) +{ + struct ipu4_virtio_be_priv *priv = f->private_data; + int i; + + if (!priv) + pr_err("%s: UNLIKELY rng NULL!\n", + __func__); + + cleanup_stream(); + + ipu_vbk_stop(priv); + ipu_vbk_flush(priv); + for (i = 0; i < IPU_VIRTIO_QUEUE_MAX; i++) + virtio_vq_reset(&(priv->vqs[i])); + + /* device specific release */ + ipu_vbk_reset(priv); + + pr_debug("ipu_vbk_connection cnt is %d\n", + ipu_vbk_connection_cnt); + + if (priv && ipu_vbk_connection_cnt--) + ipu_vbk_hash_del(virtio_dev_client_id(&priv->dev)); + if (!ipu_vbk_connection_cnt) { + pr_debug("ipu4_virtio_be_priv remove all hash entries\n"); + ipu_vbk_hash_del_all(); + } + + kfree(priv); + + ipu4_virtio_be_req_queue_free(); + + pr_debug("%s done\n", __func__); + return 0; +} + +static long ipu_vbk_ioctl(struct file *f, unsigned int ioctl, + unsigned long arg) +{ + struct ipu4_virtio_be_priv *priv = f->private_data; + void __user *argp = (void __user *)arg; + /*u64 __user *featurep = argp;*/ + /*u64 features;*/ + int r; + + if (priv == NULL) { + pr_err("No IPU backend private data\n"); + return -EINVAL; + } + switch (ioctl) { +/* + * case VHOST_GET_FEATURES: + * features = VHOST_NET_FEATURES; + * if (copy_to_user(featurep, &features, sizeof features)) + * return -EFAULT; + * return 0; + * case VHOST_SET_FEATURES: + * if (copy_from_user(&features, featurep, sizeof features)) + * return -EFAULT; + * if (features & ~VHOST_NET_FEATURES) + * return -EOPNOTSUPP; + * return vhost_net_set_features(n, features); + */ + case VBS_SET_VQ: + /* + * we handle this here because we want to register VHM client + * after handling VBS_K_SET_VQ request + */ + r = virtio_vqs_ioctl(&priv->dev, ioctl, argp); + if (r == -ENOIOCTLCMD) { + pr_err("VBS_K_SET_VQ: virtio_vqs_ioctl failed!\n"); + return -EFAULT; + } + /* Register VHM client */ + if (virtio_dev_register(&priv->dev) < 0) { + pr_err("failed to register VHM client!\n"); + return -EFAULT; + } + /* Added to local hash table */ + if (ipu_vbk_hash_add(priv) < 0) { + pr_err("failed to add to hashtable!\n"); + return -EFAULT; + } + /* Increment counter */ + ipu_vbk_connection_cnt++; + return r; + default: + /*mutex_lock(&n->dev.mutex);*/ + r = virtio_dev_ioctl(&priv->dev, ioctl, argp); + if (r == -ENOIOCTLCMD) + r = virtio_vqs_ioctl(&priv->dev, ioctl, argp); + else + ipu_vbk_flush(priv); + /*mutex_unlock(&n->dev.mutex);*/ + return r; + } +} + +int notify_fe(int status, struct ipu4_virtio_req_info *req_info) +{ + struct virtio_vq_info *vq; + struct ipu4_virtio_be_priv *be; + unsigned long flags = 0; + + pr_debug("%s: stream: %d vq idx: %d cmd: %d stat: %d", + __func__, req_info->request->op[0], + req_info->vq_info.vq_idx, + req_info->request->cmd, + status); + + be = ipu_vbk_hash_find(req_info->client_id); + if (be == NULL) { + pr_err("%s: client %d not found!\n", + __func__, req_info->client_id); + return -EINVAL; + } + + vq = &(be->vqs[req_info->vq_info.vq_idx]); + + req_info->request->stat = status; + + spin_lock_irqsave(&be->slock, flags); + virtio_vq_relchain(vq, req_info->vq_info.vq_buf_idx, + req_info->vq_info.req_len); + virtio_vq_endchains(vq, 1); + ipu4_virtio_be_req_queue_put(req_info); + spin_unlock_irqrestore(&be->slock, flags); + + return 0; +} + +/* device specific function to cleanup itself */ +static void ipu_vbk_reset(struct ipu4_virtio_be_priv *rng) +{ +} + +/* device specific function */ +static void ipu_vbk_stop(struct ipu4_virtio_be_priv *rng) +{ + virtio_dev_deregister(&rng->dev); +} + +/* device specific function */ +static void ipu_vbk_flush(struct ipu4_virtio_be_priv *rng) +{ +} + +#ifdef RUNTIME_CTRL +/* device specific function */ +static int ipu_vbk_enable_vq(struct ipu4_virtio_be_priv *rng, + struct virtio_vq_info *vq) +{ + return 0; +} + +/* device specific function */ +static void ipu_vbk_disable_vq(struct ipu4_virtio_be_priv *rng, + struct virtio_vq_info *vq) +{ +} + +/* device specific function */ +static void ipu_vbk_stop_vq(struct ipu4_virtio_be_priv *rng, + struct virtio_vq_info *vq) +{ +} + +/* device specific function */ +static void ipu_vbk_flush_vq(struct ipu4_virtio_be_priv *rng, int index) +{ +} + +/* Set feature bits in kernel side device */ +static int ipu_vbk_set_features(struct ipu4_virtio_be_priv *rng, u64 features) +{ + return 0; +} +#endif + +static const struct file_operations vbs_fops = { + .owner = THIS_MODULE, + .release = ipu_vbk_release, + .unlocked_ioctl = ipu_vbk_ioctl, + .open = ipu_vbk_open, + .llseek = noop_llseek, +}; + +static struct miscdevice vbs_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "vbs_ipu", + .fops = &vbs_fops, +}; + +static int ipu_vbk_init(void) +{ + return misc_register(&vbs_misc); +} +module_init(ipu_vbk_init); + +static void ipu_vbk_exit(void) +{ + misc_deregister(&vbs_misc); +} +module_exit(ipu_vbk_exit); + +MODULE_VERSION("0.1"); +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("IPU4 virtio driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be.h b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be.h new file mode 100644 index 0000000000000..821ea94d55279 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __IPU4_VIRTIO_BE__ +#define __IPU4_VIRTIO_BE__ + +#include +#include "intel-ipu4-virtio-be-request-queue.h" + +enum poll_status { + IPU4_POLL_PENDING = 0, + IPU4_POLL_AVAILABLE, + IPU4_POLL_STOP, + IPU4_POLL_SLEEP +}; + +int notify_fe(int status, struct ipu4_virtio_req_info *req_info); +void notify_poll_thread(int stream_id, enum poll_status status); + +#endif diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-common-psys.h b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-common-psys.h new file mode 100644 index 0000000000000..0d8df64d3db6a --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-common-psys.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __IPU4_VIRTIO_COMMON_PSYS_H__ +#define __IPU4_VIRTIO_COMMON_PSYS_H__ + +struct ipu_psys_manifest_wrap { + u64 psys_manifest; + u64 manifest_data; +}; + +struct ipu_psys_usrptr_map { + bool vma_is_io; + u64 page_table_ref; + size_t npages; + u64 len; + void *userptr; +}; + +struct ipu_psys_buffer_wrap { + struct hlist_node node; + u64 psys_buf; + struct ipu_psys_usrptr_map map; +}; + +struct ipu_psys_command_wrap { + u64 psys_command; + u64 psys_manifest; + u64 psys_buffer; +}; + +#endif diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-common.c b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-common.c new file mode 100644 index 0000000000000..c0ac79fef8e2a --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-common.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include + +#include "intel-ipu4-virtio-common.h" + +DECLARE_HASHTABLE(ipu4_virtio_fe_hash, MAX_ENTRY_FE); + +void ipu4_virtio_fe_table_init(void) +{ + hash_init(ipu4_virtio_fe_hash); +} + +int ipu4_virtio_fe_add(struct ipu4_virtio_fe_info *fe_info) +{ + struct ipu4_virtio_fe_info_entry *info_entry; + + info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL); + + if (!info_entry) + return -ENOMEM; + + info_entry->info = fe_info; + + hash_add(ipu4_virtio_fe_hash, &info_entry->node, + info_entry->info->client_id); + + return 0; +} + +struct ipu4_virtio_fe_info *ipu4_virtio_fe_find(int client_id) +{ + struct ipu4_virtio_fe_info_entry *info_entry; + int bkt; + + hash_for_each(ipu4_virtio_fe_hash, bkt, info_entry, node) + if (info_entry->info->client_id == client_id) + return info_entry->info; + + return NULL; +} + +struct ipu4_virtio_fe_info *ipu4_virtio_fe_find_by_vmid(int vmid) +{ + struct ipu4_virtio_fe_info_entry *info_entry; + int bkt; + + hash_for_each(ipu4_virtio_fe_hash, bkt, info_entry, node) + if (info_entry->info->vmid == vmid) + return info_entry->info; + + return NULL; +} + +int ipu4_virtio_fe_remove(int client_id) +{ + struct ipu4_virtio_fe_info_entry *info_entry; + struct hlist_node *tmp; + int bkt; + + hash_for_each_safe(ipu4_virtio_fe_hash, bkt, + tmp, info_entry, node) + if (info_entry->info->client_id == client_id) { + hash_del(&info_entry->node); + kfree(info_entry); + return 0; + } + + return -ENOENT; +} + +int ipu4_virtio_ring_init(struct ipu4_virtio_ring *ring, + int ring_size) +{ + ring->buffer = kcalloc(1, ring_size * sizeof(u64), GFP_KERNEL); + + if (!ring->buffer) { + pr_err("%s: failed to allocate memory!", __func__); + return -ENOMEM; + } + + ring->head = 0; + ring->tail = 0; + ring->used = 0; + ring->ring_size = ring_size; + spin_lock_init(&ring->lock); + + return 0; +} + +void ipu4_virtio_ring_free(struct ipu4_virtio_ring *ring) +{ + kfree(ring->buffer); + ring->buffer = NULL; +} + +int ipu4_virtio_ring_push(struct ipu4_virtio_ring *ring, void *data) +{ + int next; + + if (ring->used == ring->ring_size) {//ring full + pr_err("%s: Ring is full!! %d", __func__, ring->used); + return -1; + } + + next = ring->head + 1; + next %= ring->ring_size; + ring->buffer[ring->head] = (u64)data; + ring->head = next; + ring->used++; + + return 0; +} + +void *ipu4_virtio_ring_pop(struct ipu4_virtio_ring *ring) +{ + int next; + void *data; + + if (ring->used == 0) + return NULL; + + next = ring->tail + 1; + next %= ring->ring_size; + + data = (void *) ring->buffer[ring->tail]; + ring->tail = next; + + ring->used--; + + return data; +} diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-common.h b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-common.h new file mode 100644 index 0000000000000..3edab4270da00 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-common.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __IPU4_VIRTIO_COMMON_H__ +#define __IPU4_VIRTIO_COMMON_H__ + +/* + * CWP uses physicall addresses for memory sharing, + * so size of one page ref will be 64-bits + */ + +#define REFS_PER_PAGE (PAGE_SIZE/sizeof(u64)) +/* Defines size of requests circular buffer */ +#define REQ_RING_SIZE 128 +#define MAX_NUMBER_OF_OPERANDS 64 +#define MAX_ENTRY_FE 7 +#define MAX_STREAM_DEVICES 64 +#define MAX_PIPELINE_DEVICES 1 +#define MAX_ISYS_VIRT_STREAM 34 + +#define phys_to_page(x) pfn_to_page((x) >> PAGE_SHIFT) + +enum virio_queue_type { + IPU_VIRTIO_QUEUE_0 = 0, + IPU_VIRTIO_QUEUE_1, + IPU_VIRTIO_QUEUE_MAX +}; + +struct ipu4_virtio_req { + unsigned int req_id; + unsigned int stat; + unsigned int cmd; + unsigned int func_ret; + unsigned int op[MAX_NUMBER_OF_OPERANDS]; + wait_queue_head_t *wait; + bool completed; + u64 payload; + struct file *be_fh; +}; +struct test_payload { + unsigned int data1; + long int data2; + char name[256]; +}; +/*Not used*/ +struct ipu4_virtio_resp { + unsigned int resp_id; + unsigned int stat; + unsigned int cmd; + unsigned int op[MAX_NUMBER_OF_OPERANDS]; +}; + +/*Not used*/ +struct ipu4_virtio_fe_info { + struct ipu4_virtio_be_priv *priv; + int client_id; + int vmid; + int max_vcpu; + struct vhm_request *req_buf; +}; + +/*Not used*/ +struct ipu4_virtio_fe_info_entry { + struct ipu4_virtio_fe_info *info; + struct hlist_node node; +}; + +struct ipu4_bknd_ops { + /* backed initialization routine */ + int (*init)(void); + + /* backed cleanup routine */ + void (*cleanup)(void); + + /* retreiving id of current virtual machine */ + int (*get_vm_id)(void); + + int (*send_req)(int, struct ipu4_virtio_req *, int, int); +}; + +struct ipu4_virtio_ctx { + /* VM(domain) id of current VM instance */ + int domid; + + /* backend ops - hypervisor specific */ + struct ipu4_bknd_ops *bknd_ops; + + /* flag that shows whether backend is initialized */ + bool initialized; + + /* device global lock */ + struct mutex lock; +}; + +enum intel_ipu4_virtio_command { + IPU4_CMD_DEVICE_OPEN = 0x1, + IPU4_CMD_DEVICE_CLOSE, + IPU4_CMD_STREAM_ON, + IPU4_CMD_STREAM_OFF, + IPU4_CMD_GET_BUF, + IPU4_CMD_PUT_BUF, + IPU4_CMD_SET_FORMAT, + IPU4_CMD_ENUM_NODES, + IPU4_CMD_ENUM_LINKS, + IPU4_CMD_SETUP_PIPE, + IPU4_CMD_SET_FRAMEFMT, + IPU4_CMD_GET_FRAMEFMT, + IPU4_CMD_GET_SUPPORTED_FRAMEFMT, + IPU4_CMD_SET_SELECTION, + IPU4_CMD_GET_SELECTION, + IPU4_CMD_POLL, + IPU4_CMD_PIPELINE_OPEN, + IPU4_CMD_PIPELINE_CLOSE, + IPU4_CMD_PSYS_MAPBUF, + IPU4_CMD_PSYS_UNMAPBUF, + IPU4_CMD_PSYS_QUERYCAP, + IPU4_CMD_PSYS_GETBUF, + IPU4_CMD_PSYS_PUTBUF, + IPU4_CMD_PSYS_QCMD, + IPU4_CMD_PSYS_DQEVENT, + IPU4_CMD_PSYS_GET_MANIFEST, + IPU4_CMD_PSYS_OPEN, + IPU4_CMD_PSYS_CLOSE, + IPU4_CMD_PSYS_POLL, + IPU4_CMD_GET_N +}; + +enum intel_ipu4_virtio_req_feedback { + IPU4_REQ_ERROR = -1, + IPU4_REQ_PROCESSED, + IPU4_REQ_PENDING, + IPU4_REQ_NOT_RESPONDED +}; + +struct ipu4_virtio_ring { + /* Buffer allocated for keeping ring entries */ + u64 *buffer; + + /* Index pointing to next free element in ring */ + int head; + + /* Index pointing to last released element in ring */ + int tail; + + /* Total number of elements that ring can contain */ + int ring_size; + + /* Number of location in ring has been used */ + unsigned int used; + + /* Multi thread sync */ + spinlock_t lock; +}; + +/* Create the ring buffer with given size */ +int ipu4_virtio_ring_init(struct ipu4_virtio_ring *ring, + int ring_size); + +/* Frees the ring buffers */ +void ipu4_virtio_ring_free(struct ipu4_virtio_ring *ring); + +/* Add a buffer to ring */ +int ipu4_virtio_ring_push(struct ipu4_virtio_ring *ring, void *data); + +/* Grab a buffer from ring */ +void *ipu4_virtio_ring_pop(struct ipu4_virtio_ring *ring); + +extern struct ipu4_bknd_ops ipu4_virtio_bknd_ops; + +void ipu4_virtio_fe_table_init(void); + +int ipu4_virtio_fe_add(struct ipu4_virtio_fe_info *fe_info); + +struct ipu4_virtio_fe_info *ipu4_virtio_fe_find(int client_id); + +struct ipu4_virtio_fe_info *ipu4_virtio_fe_find_by_vmid(int vmid); + +#endif diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-payload.c b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-payload.c new file mode 100644 index 0000000000000..231d7771a1c89 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-payload.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "intel-ipu4-virtio-common.h" +#include "intel-ipu4-virtio-fe-payload.h" + +void intel_ipu4_virtio_create_req(struct ipu4_virtio_req *req, + enum intel_ipu4_virtio_command cmd, int *op) +{ + int i; + + req->stat = IPU4_REQ_NOT_RESPONDED; + req->cmd = cmd; + + switch (cmd) { + case IPU4_CMD_POLL: + case IPU4_CMD_DEVICE_OPEN: + case IPU4_CMD_DEVICE_CLOSE: + case IPU4_CMD_STREAM_ON: + case IPU4_CMD_STREAM_OFF: + case IPU4_CMD_PUT_BUF: + case IPU4_CMD_SET_FORMAT: + case IPU4_CMD_ENUM_NODES: + case IPU4_CMD_ENUM_LINKS: + case IPU4_CMD_SETUP_PIPE: + case IPU4_CMD_SET_FRAMEFMT: + case IPU4_CMD_GET_FRAMEFMT: + case IPU4_CMD_GET_SUPPORTED_FRAMEFMT: + case IPU4_CMD_SET_SELECTION: + case IPU4_CMD_GET_SELECTION: + /* Open video device node + * op0 - virtual device node number + * op1 - Actual device fd. By default set to 0 + */ + for (i = 0; i < 2; i++) + req->op[i] = op[i]; + break; + case IPU4_CMD_GET_BUF: + for (i = 0; i < 3; i++) + req->op[i] = op[i]; + break; + case IPU4_CMD_PSYS_UNMAPBUF: + req->op[0] = op[0]; + break; + case IPU4_CMD_PSYS_OPEN: + case IPU4_CMD_PSYS_CLOSE: + case IPU4_CMD_PSYS_POLL: + case IPU4_CMD_PSYS_MAPBUF: + case IPU4_CMD_PSYS_QUERYCAP: + case IPU4_CMD_PSYS_GETBUF: + case IPU4_CMD_PSYS_PUTBUF: + case IPU4_CMD_PSYS_QCMD: + case IPU4_CMD_PSYS_DQEVENT: + case IPU4_CMD_PSYS_GET_MANIFEST: + break; + default: + return; + } +} diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-payload.h b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-payload.h new file mode 100644 index 0000000000000..173c31a54692b --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-payload.h @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __IPU4_VIRTIO_FE_PAYLOAD__ +#define __IPU4_VIRTIO_FE_PAYLOAD__ + +#include "intel-ipu4-virtio-common.h" + +void intel_ipu4_virtio_create_req(struct ipu4_virtio_req *req, + enum intel_ipu4_virtio_command cmd, int *op); + +#endif \ No newline at end of file diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-pipeline.c b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-pipeline.c new file mode 100644 index 0000000000000..8122d81771048 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-pipeline.c @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include + +#include "intel-ipu4-virtio-fe-payload.h" +#include "intel-ipu4-virtio-fe-pipeline.h" +#include "intel-ipu4-virtio-fe-request-queue.h" + +int process_pipeline(struct file *file, struct ipu4_virtio_ctx *fe_priv, + void *data, int cmd) +{ + struct ipu4_virtio_req *req; + int rval = 0; + int op[10]; + + op[0] = 0; + op[1] = 0; + + req = ipu4_virtio_fe_req_queue_get(); + if (!req) + return -ENOMEM; + + req->payload = virt_to_phys(data); + + intel_ipu4_virtio_create_req(req, cmd, &op[0]); + + rval = fe_priv->bknd_ops->send_req(fe_priv->domid, req, true, IPU_VIRTIO_QUEUE_0); + if (rval) { + pr_err("Failed to send request to BE\n"); + ipu4_virtio_fe_req_queue_put(req); + return rval; + } + + ipu4_virtio_fe_req_queue_put(req); + + return rval; +} + diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-pipeline.h b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-pipeline.h new file mode 100644 index 0000000000000..d1fbe106beda0 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-pipeline.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef IPU4_VIRTIO_FE_PIPELINE_H +#define IPU4_VIRTIO_FE_PIPELINE_H + +#include + +#include "virtio/intel-ipu4-virtio-common.h" + +int process_pipeline(struct file *file, + struct ipu4_virtio_ctx *fe_priv, + void *data, + int cmd); + + +#endif diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-psys-32compat.c b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-psys-32compat.c new file mode 100644 index 0000000000000..6756dd333cf8e --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-psys-32compat.c @@ -0,0 +1,225 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include +#include +#include + +#include + +#include "ipu-psys.h" + +static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + long ret = -ENOTTY; + + if (file->f_op->unlocked_ioctl) + ret = file->f_op->unlocked_ioctl(file, cmd, arg); + + return ret; +} + +struct ipu_psys_buffer32 { + u64 len; + union { + int fd; + compat_uptr_t userptr; + u64 reserved; + } base; + u32 data_offset; + u32 bytes_used; + u32 flags; + u32 reserved[2]; +} __packed; + +struct ipu_psys_command32 { + u64 issue_id; + u64 user_token; + u32 priority; + compat_uptr_t pg_manifest; + compat_uptr_t buffers; + int pg; + u32 pg_manifest_size; + u32 bufcount; + u32 min_psys_freq; + u32 frame_counter; + u32 reserved[2]; +} __packed; + +struct ipu_psys_manifest32 { + u32 index; + u32 size; + compat_uptr_t manifest; + u32 reserved[5]; +} __packed; + +static int +get_ipu_psys_command32(struct ipu_psys_command *kp, + struct ipu_psys_command32 __user *up) +{ + compat_uptr_t pgm, bufs; + + if (!access_ok(VERIFY_READ, up, + sizeof(struct ipu_psys_command32)) || + get_user(kp->issue_id, &up->issue_id) || + get_user(kp->user_token, &up->user_token) || + get_user(kp->priority, &up->priority) || + get_user(pgm, &up->pg_manifest) || + get_user(bufs, &up->buffers) || + get_user(kp->pg, &up->pg) || + get_user(kp->pg_manifest_size, &up->pg_manifest_size) || + get_user(kp->bufcount, &up->bufcount) || + get_user(kp->min_psys_freq, &up->min_psys_freq) + || get_user(kp->frame_counter, &up->frame_counter) + ) + return -EFAULT; + + kp->pg_manifest = compat_ptr(pgm); + kp->buffers = compat_ptr(bufs); + + return 0; +} + +static int +get_ipu_psys_buffer32(struct ipu_psys_buffer *kp, + struct ipu_psys_buffer32 __user *up) +{ + compat_uptr_t ptr; + + if (!access_ok(VERIFY_READ, up, + sizeof(struct ipu_psys_buffer32)) || + get_user(kp->len, &up->len) || + get_user(ptr, &up->base.userptr) || + get_user(kp->data_offset, &up->data_offset) || + get_user(kp->bytes_used, &up->bytes_used) || + get_user(kp->flags, &up->flags)) + return -EFAULT; + + kp->base.userptr = compat_ptr(ptr); + + return 0; +} + +static int +put_ipu_psys_buffer32(struct ipu_psys_buffer *kp, + struct ipu_psys_buffer32 __user *up) +{ + if (!access_ok(VERIFY_WRITE, up, + sizeof(struct ipu_psys_buffer32)) || + put_user(kp->len, &up->len) || + put_user(kp->base.fd, &up->base.fd) || + put_user(kp->data_offset, &up->data_offset) || + put_user(kp->bytes_used, &up->bytes_used) || + put_user(kp->flags, &up->flags)) + return -EFAULT; + + return 0; +} + +static int +get_ipu_psys_manifest32(struct ipu_psys_manifest *kp, + struct ipu_psys_manifest32 __user *up) +{ + compat_uptr_t ptr; + + if (!access_ok(VERIFY_READ, up, + sizeof(struct ipu_psys_manifest32)) || + get_user(kp->index, &up->index) || + get_user(kp->size, &up->size) || get_user(ptr, &up->manifest)) + return -EFAULT; + + kp->manifest = compat_ptr(ptr); + + return 0; +} + +static int +put_ipu_psys_manifest32(struct ipu_psys_manifest *kp, + struct ipu_psys_manifest32 __user *up) +{ + compat_uptr_t ptr = (u32)((unsigned long)kp->manifest); + + if (!access_ok(VERIFY_WRITE, up, + sizeof(struct ipu_psys_manifest32)) || + put_user(kp->index, &up->index) || + put_user(kp->size, &up->size) || put_user(ptr, &up->manifest)) + return -EFAULT; + + return 0; +} + +#define IPU_IOC_GETBUF32 _IOWR('A', 4, struct ipu_psys_buffer32) +#define IPU_IOC_PUTBUF32 _IOWR('A', 5, struct ipu_psys_buffer32) +#define IPU_IOC_QCMD32 _IOWR('A', 6, struct ipu_psys_command32) +#define IPU_IOC_CMD_CANCEL32 _IOWR('A', 8, struct ipu_psys_command32) +#define IPU_IOC_GET_MANIFEST32 _IOWR('A', 9, struct ipu_psys_manifest32) + +long virt_psys_compat_ioctl32(struct file *file, unsigned int cmd, + unsigned long arg) +{ + union { + struct ipu_psys_buffer buf; + struct ipu_psys_command cmd; + struct ipu_psys_event ev; + struct ipu_psys_manifest m; + } karg; + int compatible_arg = 1; + int err = 0; + void __user *up = compat_ptr(arg); + + switch (cmd) { + case IPU_IOC_GETBUF32: + cmd = IPU_IOC_GETBUF; + break; + case IPU_IOC_PUTBUF32: + cmd = IPU_IOC_PUTBUF; + break; + case IPU_IOC_QCMD32: + cmd = IPU_IOC_QCMD; + break; + case IPU_IOC_GET_MANIFEST32: + cmd = IPU_IOC_GET_MANIFEST; + break; + } + + switch (cmd) { + case IPU_IOC_GETBUF: + case IPU_IOC_PUTBUF: + err = get_ipu_psys_buffer32(&karg.buf, up); + compatible_arg = 0; + break; + case IPU_IOC_QCMD: + err = get_ipu_psys_command32(&karg.cmd, up); + compatible_arg = 0; + break; + case IPU_IOC_GET_MANIFEST: + err = get_ipu_psys_manifest32(&karg.m, up); + compatible_arg = 0; + break; + } + if (err) + return err; + + if (compatible_arg) { + err = native_ioctl(file, cmd, (unsigned long)up); + } else { + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); + err = native_ioctl(file, cmd, (unsigned long)&karg); + set_fs(old_fs); + } + + if (err) + return err; + + switch (cmd) { + case IPU_IOC_GETBUF: + err = put_ipu_psys_buffer32(&karg.buf, up); + break; + case IPU_IOC_GET_MANIFEST: + err = put_ipu_psys_manifest32(&karg.m, up); + break; + } + return err; +} diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-request-queue.c b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-request-queue.c new file mode 100644 index 0000000000000..b03438c03a8c2 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-request-queue.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include "intel-ipu4-virtio-common.h" +#include "intel-ipu4-virtio-fe-request-queue.h" + +struct ipu4_virtio_ring ipu4_virtio_fe_req_queue; + +int ipu4_virtio_fe_req_queue_init(void) +{ + int i; + struct ipu4_virtio_req *req; + + if (ipu4_virtio_ring_init(&ipu4_virtio_fe_req_queue, REQ_RING_SIZE)) + return -1; + + for (i = 0; i < REQ_RING_SIZE; i++) { + req = kcalloc(1, sizeof(struct ipu4_virtio_req), GFP_KERNEL); + if (req == NULL) { + pr_err("%s failed to allocate memory for ipu4_virtio_req", + __func__); + return -1; + } + req->wait = kzalloc(sizeof(wait_queue_head_t), GFP_KERNEL); + init_waitqueue_head(req->wait); + ipu4_virtio_ring_push(&ipu4_virtio_fe_req_queue, req); + } + return 0; +} + +void ipu4_virtio_fe_req_queue_free(void) +{ + int i; + struct ipu4_virtio_req *req; + + for (i = 0; i < REQ_RING_SIZE; i++) { + req = ipu4_virtio_ring_pop(&ipu4_virtio_fe_req_queue); + if (req) { + kfree(req->wait); + kfree(req); + } + else + break; + } + ipu4_virtio_ring_free(&ipu4_virtio_fe_req_queue); +} + +struct ipu4_virtio_req *ipu4_virtio_fe_req_queue_get(void) +{ + struct ipu4_virtio_req *req; + unsigned long flags = 0; + + spin_lock_irqsave(&ipu4_virtio_fe_req_queue.lock, flags); + req = ipu4_virtio_ring_pop(&ipu4_virtio_fe_req_queue); + spin_unlock_irqrestore(&ipu4_virtio_fe_req_queue.lock, flags); + return req; +} + +int ipu4_virtio_fe_req_queue_put( + struct ipu4_virtio_req *req) +{ + unsigned long flags = 0; + int status; + + spin_lock_irqsave(&ipu4_virtio_fe_req_queue.lock, flags); + status = ipu4_virtio_ring_push(&ipu4_virtio_fe_req_queue, req); + spin_unlock_irqrestore(&ipu4_virtio_fe_req_queue.lock, flags); + return status; +} diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-request-queue.h b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-request-queue.h new file mode 100644 index 0000000000000..9a36c99f9b5b3 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-request-queue.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef IPU4_VIRTIO_FE_REQUEST_QUEUE_H +#define IPU4_VIRTIO_FE_REQUEST_QUEUE_H + +int ipu4_virtio_fe_req_queue_init(void); +void ipu4_virtio_fe_req_queue_free(void); +struct ipu4_virtio_req *ipu4_virtio_fe_req_queue_get(void); +int ipu4_virtio_fe_req_queue_put(struct ipu4_virtio_req *req); + +#endif diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe.c b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe.c new file mode 100644 index 0000000000000..517b48d2ede1b --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe.c @@ -0,0 +1,271 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "intel-ipu4-virtio-common.h" + +static DEFINE_IDA(index_ida); + +#define REQ_TIMEOUT 22000 //22s + +struct ipu4_virtio_uos { + struct virtqueue *vq[IPU_VIRTIO_QUEUE_MAX]; + char name[25]; + unsigned int data_avail; + spinlock_t lock; + int index; + bool busy; + int vmid; +}; + +/* Assuming there will be one FE instance per VM */ +static struct ipu4_virtio_uos *ipu4_virtio_fe; + +static void ipu_virtio_fe_tx_done_vq_0(struct virtqueue *vq) +{ + struct ipu4_virtio_uos *priv = (struct ipu4_virtio_uos *)vq->vdev->priv; + struct ipu4_virtio_req *req; + unsigned long flags = 0; + + do { + spin_lock_irqsave(&priv->lock, flags); + req = (struct ipu4_virtio_req *) + virtqueue_get_buf(vq, &priv->data_avail); + spin_unlock_irqrestore(&priv->lock, flags); + if (req != NULL && + priv->data_avail == sizeof(struct ipu4_virtio_req)) { + req->completed = true; + wake_up(req->wait); + } + } while (req != NULL); + + pr_debug("IPU FE:%s vmid:%d TX for VQ 0 done\n", __func__, priv->vmid); +} + +static void ipu_virtio_fe_tx_done_vq_1(struct virtqueue *vq) +{ + struct ipu4_virtio_uos *priv = (struct ipu4_virtio_uos *)vq->vdev->priv; + struct ipu4_virtio_req *req; + unsigned long flags = 0; + + do { + spin_lock_irqsave(&priv->lock, flags); + req = (struct ipu4_virtio_req *) + virtqueue_get_buf(vq, &priv->data_avail); + spin_unlock_irqrestore(&priv->lock, flags); + if (req != NULL && + priv->data_avail == sizeof(struct ipu4_virtio_req)) { + req->completed = true; + wake_up(req->wait); + } + } while (req != NULL); + + pr_debug("IPU FE:%s vmid:%d TX for VQ 1 done\n", __func__, priv->vmid); +} + +/* The host will fill any buffer we give it with sweet, sweet randomness. */ +static void ipu_virtio_fe_register_buffer(struct ipu4_virtio_uos *vi, void *buf, size_t size, + int nqueue) +{ + struct scatterlist sg; + unsigned long flags = 0; + + if (nqueue >= IPU_VIRTIO_QUEUE_MAX) { + pr_debug("Number of queue exceeding max queue number\n"); + return; + } + + sg_init_one(&sg, buf, size); + + spin_lock_irqsave(&vi->lock, flags); + /* There should always be room for one buffer. */ + virtqueue_add_inbuf(vi->vq[nqueue], &sg, 1, buf, GFP_KERNEL); + + spin_unlock_irqrestore(&vi->lock, flags); + + virtqueue_kick(vi->vq[nqueue]); +} + +static int ipu_virtio_fe_probe_common(struct virtio_device *vdev) +{ + int err, index; + struct ipu4_virtio_uos *priv = NULL; + vq_callback_t *callbacks[] = { + ipu_virtio_fe_tx_done_vq_0, + ipu_virtio_fe_tx_done_vq_1}; + static const char * const names[] = { + "csi_queue_0", + "csi_queue_1"}; + priv = kzalloc(sizeof(struct ipu4_virtio_uos), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->index = index = ida_simple_get(&index_ida, 0, 0, GFP_KERNEL); + if (index < 0) { + err = index; + goto err_ida; + } + sprintf(priv->name, "virtio_.%d", index); + priv->vmid = -1; + vdev->priv = priv; + err = virtio_find_vqs(vdev, IPU_VIRTIO_QUEUE_MAX, + priv->vq, callbacks, names, NULL); + if (err) + goto err_find; + + spin_lock_init(&priv->lock); + + ipu4_virtio_fe = priv; + + return 0; + +err_find: + ida_simple_remove(&index_ida, index); +err_ida: + kfree(priv); + return err; +} + +static void ipu_virtio_fe_remove_common(struct virtio_device *vdev) +{ + struct ipu4_virtio_uos *priv = vdev->priv; + + priv->data_avail = 0; + vdev->config->reset(vdev); + priv->busy = false; + + vdev->config->del_vqs(vdev); + //ida_simple_remove(&index_ida, priv->index); + kfree(priv); +} + +static int ipu_virtio_fe_send_req(int vmid, struct ipu4_virtio_req *req, + int wait, int idx) +{ + struct ipu4_virtio_uos *priv = ipu4_virtio_fe; + int ret = 0; + pr_debug("IPU FE:%s\n", __func__); + if (priv == NULL) { + pr_err("IPU Backend not connected\n"); + return -ENOENT; + } + req->completed = false; + ipu_virtio_fe_register_buffer(ipu4_virtio_fe, req, sizeof(*req), idx); + ret = wait_event_timeout(*req->wait, + req->completed,REQ_TIMEOUT); + + if(ret) + return req->stat; + else { + pr_err("%s: send request timeout!!!", __func__); + return -1; + } +} +static int ipu_virtio_fe_get_vmid(void) +{ + struct ipu4_virtio_uos *priv = ipu4_virtio_fe; + + if (ipu4_virtio_fe == NULL) { + pr_err("IPU Backend not connected\n"); + return -1; + } + return priv->vmid; +} + +int ipu_virtio_fe_register(void) +{ + pr_debug("IPU FE:%s\n", __func__); + return 0; +} + +void ipu_virtio_fe_unregister(void) +{ + pr_debug("IPU FE:%s\n", __func__); + return; +} +static int virt_probe(struct virtio_device *vdev) +{ + return ipu_virtio_fe_probe_common(vdev); +} + +static void virt_remove(struct virtio_device *vdev) +{ + ipu_virtio_fe_remove_common(vdev); +} + +static void virt_scan(struct virtio_device *vdev) +{ + struct ipu4_virtio_uos *vi = (struct ipu4_virtio_uos *)vdev->priv; + int timeout = 1000; + + if (vi == NULL) { + pr_err("IPU No frontend private data\n"); + return; + } + ipu_virtio_fe_register_buffer(vi, &vi->vmid, sizeof(vi->vmid), + IPU_VIRTIO_QUEUE_0); + + while (timeout--) { + if (vi->vmid > 0) + break; + usleep_range(100, 120); + } + pr_debug("IPU FE:%s vmid:%d\n", __func__, vi->vmid); + + if (timeout < 0) + pr_err("IPU Cannot query vmid\n"); + +} + +#ifdef CONFIG_PM_SLEEP +static int virt_freeze(struct virtio_device *vdev) +{ + ipu_virtio_fe_remove_common(vdev); + return 0; +} + +static int virt_restore(struct virtio_device *vdev) +{ + return ipu_virtio_fe_probe_common(vdev); +} +#endif + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_IPU, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +struct ipu4_bknd_ops ipu4_virtio_bknd_ops = { + .init = ipu_virtio_fe_register, + .cleanup = ipu_virtio_fe_unregister, + .get_vm_id = ipu_virtio_fe_get_vmid, + .send_req = ipu_virtio_fe_send_req +}; + +static struct virtio_driver virtio_driver = { + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = virt_probe, + .remove = virt_remove, + .scan = virt_scan, +#ifdef CONFIG_PM_SLEEP + .freeze = virt_freeze, + .restore = virt_restore, +#endif +}; + + +module_virtio_driver(virtio_driver); +MODULE_DEVICE_TABLE(virtio, id_table); +MODULE_DESCRIPTION("IPU4 virtio driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index 54fe90acb5b29..76b67179062d8 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig @@ -137,6 +137,7 @@ source "drivers/media/platform/am437x/Kconfig" source "drivers/media/platform/xilinx/Kconfig" source "drivers/media/platform/rcar-vin/Kconfig" source "drivers/media/platform/atmel/Kconfig" +source "drivers/media/platform/intel/Kconfig" config VIDEO_TI_CAL tristate "TI CAL (Camera Adaptation Layer) driver" diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile index 41322ab658027..f8c6a9c14add9 100644 --- a/drivers/media/platform/Makefile +++ b/drivers/media/platform/Makefile @@ -96,3 +96,4 @@ obj-$(CONFIG_VIDEO_QCOM_VENUS) += qcom/venus/ obj-y += meson/ obj-y += cros-ec-cec/ +obj-y += intel/ diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c index 726b3b93a4863..bf7b8417c27fc 100644 --- a/drivers/media/platform/coda/coda-common.c +++ b/drivers/media/platform/coda/coda-common.c @@ -1804,7 +1804,8 @@ static int coda_s_ctrl(struct v4l2_ctrl *ctrl) break; case V4L2_CID_MPEG_VIDEO_H264_PROFILE: /* TODO: switch between baseline and constrained baseline */ - ctx->params.h264_profile_idc = 66; + if (ctx->inst_type == CODA_INST_ENCODER) + ctx->params.h264_profile_idc = 66; break; case V4L2_CID_MPEG_VIDEO_H264_LEVEL: /* nothing to do, this is set by the encoder */ diff --git a/drivers/media/platform/intel/Kconfig b/drivers/media/platform/intel/Kconfig new file mode 100644 index 0000000000000..95e15c58cfef5 --- /dev/null +++ b/drivers/media/platform/intel/Kconfig @@ -0,0 +1,114 @@ +config INTEL_IPU4_BXT_P_PDATA + bool "Enable built in platform data for Broxton-P" + depends on VIDEO_INTEL_IPU && VIDEO_INTEL_IPU4 + ---help--- + Pre-ACPI system platform data is compiled inside kernel + +config INTEL_IPU4_BXT_GP_PDATA + bool "Enable built in platform data for Broxton-P" + depends on VIDEO_INTEL_IPU && VIDEO_INTEL_IPU4 + ---help--- + Pre-ACPI system platform data is compiled inside kernel + +config INTEL_IPU4P_ICL_RVP_PDATA + bool "Enable built in platform data for ipu4p" + depends on VIDEO_INTEL_IPU && VIDEO_INTEL_IPU4P + ---help--- + Pre-ACPI system platform data is compiled inside kernel + +config INTEL_IPU4_OV2740 + bool "Compile platform data for OV2740" + depends on INTEL_IPU4_BXT_P_PDATA + +config INTEL_IPU4_IMX185 + bool "Compile platform data for IMX185" + depends on INTEL_IPU4_BXT_P_PDATA + +config INTEL_IPU4_AR023Z + bool "Compile platform data for AR023Z" + ---help--- + Onsemi 2MP AR023Z camera sensor + +config INTEL_IPU4_IMX477 + bool "Compile platform data for IMX477 camera sensor" + depends on INTEL_IPU4_BXT_P_PDATA + ---help--- + Sony IMX477 sensor is enabled for DUAL Camera input. + +config INTEL_IPU4_OV13860 + bool "Compile platform data for OV13860" + ---help--- + Omnivision 13MP camera sensor + +config INTEL_IPU4_OV9281 + bool "Compile platform data for OV9281" + ---help--- + Omnivision 1MP camera sensor + +config INTEL_IPU4_OV10635 + bool "Compile platform data for OV10635" + ---help--- + Omnivision 1MP camera sensor + +config INTEL_IPU4_AR0231AT + bool "Compile platform data for AR0231AT" + ---help--- + AR0231 camera sensor for MAXIM 9286 + +config INTEL_IPU4_MAGNA + bool "Compile platform data for MAGNA" + depends on INTEL_IPU4_BXT_P_PDATA + ---help--- + MAGNA Camera Sensor + +config INTEL_IPU4_IMX274 + bool "Compile platform data for IMX274 camera sensor" + depends on INTEL_IPU4_BXT_P_PDATA + ---help--- + Sony 14MP camera sensor is enabled for HDR function. + +config INTEL_IPU4_OV10640 + bool "Compile platform data for OV10640" + ---help--- + Omnivision 1.4MP camera sensor + +config INTEL_IPU4_ADV7481 + bool "Compile platform data for ADV7481" + ---help--- + HDMI2MIPI convertor device ADV7481 + +config INTEL_IPU4_ADV7481_EVAL + bool "Compile platform data for ADV7481 evaluation board" + ---help--- + HDMI2MIPI convertor device ADV7481 eval board + +config INTEL_IPU4_IMX290 + bool "Compile platform data for IMX290" + depends on INTEL_IPU4_BXT_P_PDATA + ---help--- + "Sony 8MB camera sensor is enabled for HDR function" + +config INTEL_IPU4_OX03A10 + bool "Compile platorm data for OX03A10" + depends on INTEL_IPU4_BXT_P_PDATA + ---help--- + "ox03a10 camera sensor" + +config INTEL_IPU4_OV495 + bool "Compile platorm data for OV495" + depends on INTEL_IPU4_BXT_P_PDATA + ---help--- + "ov495 camera sensor" + +config INTEL_IPU4_ICI_BXT_P_PDATA + depends on VIDEO_INTEL_IPU && VIDEO_INTEL_ICI + bool "Enable built in platform data for Broxton-P ICI driver" + ---help--- + Pre-ACPI system platform data is compiled inside kernel + +config INTEL_IPU4_ADV7481_I2C_ID + int "I2C bus ID for ADV7481" + range 0 8 + default 0 + ---help--- + I2C bus number of ADV7481 Mondello diff --git a/drivers/media/platform/intel/Makefile b/drivers/media/platform/intel/Makefile new file mode 100644 index 0000000000000..cf4cd3bfaa9b8 --- /dev/null +++ b/drivers/media/platform/intel/Makefile @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2010 - 2018, Intel Corporation. + +ifneq ($(EXTERNAL_BUILD), 1) +srcpath := $(srctree) +endif + +# force check the compile warning to make sure zero warnings +# note we may have build issue when gcc upgraded. +ccflags-y := -Wall -Wextra +ccflags-y += $(call cc-disable-warning, unused-parameter) +ccflags-y += $(call cc-disable-warning, implicit-fallthrough) +ccflags-y += $(call cc-disable-warning, missing-field-initializers) +ccflags-$(CONFIG_VIDEO_INTEL_IPU_WERROR) += -Werror + +ccflags-y += -I$(srcpath)/$(src)/../../../../include/ +ccflags-y += -I$(srcpath)/$(src)/../../pci/intel/ + +obj-$(CONFIG_INTEL_IPU4_BXT_P_PDATA) += ipu4-bxt-p-pdata.o +obj-$(CONFIG_INTEL_IPU4_BXT_GP_PDATA) += ipu4-bxt-gp-pdata.o +obj-$(CONFIG_INTEL_IPU4P_ICL_RVP_PDATA) += ipu4p-icl-rvp-pdata.o +obj-$(CONFIG_INTEL_IPU4_ICI_BXT_P_PDATA) += ipu4-ici-bxt-p-pdata.o +obj-$(CONFIG_VIDEO_INTEL_IPU) += ipu4-acpi.o diff --git a/drivers/media/platform/intel/ipu4-acpi.c b/drivers/media/platform/intel/ipu4-acpi.c new file mode 100644 index 0000000000000..94f53727ddbf2 --- /dev/null +++ b/drivers/media/platform/intel/ipu4-acpi.c @@ -0,0 +1,1068 @@ +/* + * Copyright (c) 2016--2018 Intel Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) +#include +#else +#include +#endif +#include +#include + +#define HID_BUFFER_SIZE 32 +#define VCM_BUFFER_SIZE 32 + +/* Data representation as it is in ACPI SSDB buffer */ +struct sensor_bios_data_packed { + u8 version; + u8 sku; + u8 guid_csi2[16]; + u8 devfunction; + u8 bus; + u32 dphylinkenfuses; + u32 clockdiv; + u8 link; + u8 lanes; + u32 csiparams[10]; + u32 maxlanespeed; + u8 sensorcalibfileidx; + u8 sensorcalibfileidxInMBZ[3]; + u8 romtype; + u8 vcmtype; + u8 platforminfo; + u8 platformsubinfo; + u8 flash; + u8 privacyled; + u8 degree; + u8 mipilinkdefined; + u32 mclkspeed; + u8 controllogicid; + u8 reserved1[3]; + u8 mclkport; + u8 reserved2[13]; +} __attribute__((__packed__)); + +/* Fields needed by ipu4 driver */ +struct sensor_bios_data { + struct device *dev; + u8 link; + u8 lanes; + u8 vcmtype; + u8 flash; + u8 degree; + u8 mclkport; + u32 mclkspeed; + u16 xshutdown; +}; + +static LIST_HEAD(devices); +static LIST_HEAD(new_devs); + +struct ipu4_i2c_helper { + int (*fn)(struct device *, void *, + struct ipu_isys_csi2_config *csi2, + bool reprobe); + void *driver_data; +}; + +struct ipu4_i2c_new_dev { + struct list_head list; + struct i2c_board_info info; + unsigned short int bus; +}; + +struct ipu4_camera_module_data { + struct list_head list; + struct ipu_isys_csi2_config csi2; + unsigned int ext_clk; + void *pdata; /* Ptr to generated platform data*/ + void *priv; /* Private for specific subdevice */ +}; + +struct ipu4_i2c_info { + unsigned short bus; + unsigned short addr; +}; + +struct ipu4_acpi_devices { + const char *hid_name; + const char *real_driver; + int (*get_platform_data)(struct i2c_client *client, + struct ipu4_camera_module_data *data, + struct ipu4_i2c_helper *helper, + void *priv, size_t size); + void *priv_data; + size_t priv_size; + const struct ipu_regulator *regulators; +}; + +static uint64_t imx132_op_clocks[] = (uint64_t []){ 312000000, 0 }; + +/* + * Add a request to create new i2c devices later on. i2c_new_device can't be + * directly called from functions which are called by i2c_for_each_dev + * function. Both takes a same mutex inside i2c core code. + */ +static int add_new_i2c(unsigned short addr, unsigned short bus, + unsigned short flags, char *name, void *pdata) +{ + struct ipu4_i2c_new_dev *newdev = kzalloc(sizeof(*newdev), GFP_KERNEL); + + if (!newdev) + return -ENOMEM; + + newdev->info.flags = flags; + newdev->info.addr = addr; + newdev->bus = bus; + newdev->info.platform_data = pdata; + strlcpy(newdev->info.type, name, sizeof(newdev->info.type)); + + list_add(&newdev->list, &new_devs); + return 0; +} + +static int get_string_dsdt_data(struct device *dev, const u8 *dsdt, + int func, char *out, unsigned int size) +{ + struct acpi_handle *dev_handle = ACPI_HANDLE(dev); + union acpi_object *obj; + int ret = -ENODEV; + + obj = acpi_evaluate_dsm(dev_handle, (void *)dsdt, 0, func, NULL); + if (!obj) { + dev_err(dev, "No dsdt field\n"); + return -ENODEV; + } + dev_dbg(dev, "ACPI type %d", obj->type); + + if ((obj->type != ACPI_TYPE_STRING) || !obj->string.pointer) + goto exit; + + strlcpy(out, obj->string.pointer, + min((unsigned int)(obj->string.length + 1), size)); + dev_info(dev, "DSDT string id: %s\n", out); + + ret = 0; +exit: + ACPI_FREE(obj); + return ret; +} + +static int get_integer_dsdt_data(struct device *dev, const u8 *dsdt, + int func, u64 *out) +{ + struct acpi_handle *dev_handle = ACPI_HANDLE(dev); + union acpi_object *obj; + + obj = acpi_evaluate_dsm(dev_handle, (void *)dsdt, 0, func, NULL); + if (!obj) { + dev_err(dev, "No dsdt\n"); + return -ENODEV; + } + dev_dbg(dev, "ACPI type %d", obj->type); + + if (obj->type != ACPI_TYPE_INTEGER) { + ACPI_FREE(obj); + return -ENODEV; + } + *out = obj->integer.value; + ACPI_FREE(obj); + return 0; +} + +static int read_acpi_block(struct device *dev, char *id, void *data, u32 size) +{ + union acpi_object *obj; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_handle *dev_handle = ACPI_HANDLE(dev); + int status; + u32 buffer_length; + + status = acpi_evaluate_object(dev_handle, id, NULL, &buffer); + if (!ACPI_SUCCESS(status)) + return -ENODEV; + + obj = (union acpi_object *)buffer.pointer; + if (!obj || obj->type != ACPI_TYPE_BUFFER) { + dev_err(dev, "Could't read acpi buffer\n"); + status = -ENODEV; + goto err; + } + + if (obj->buffer.length > size) { + dev_err(dev, "Given buffer is too small\n"); + status = -ENODEV; + goto err; + } + + memcpy(data, obj->buffer.pointer, min(size, obj->buffer.length)); + buffer_length = obj->buffer.length; + kfree(buffer.pointer); + + return buffer_length; +err: + kfree(buffer.pointer); + return status; +} + +static struct ipu4_camera_module_data *add_device_to_list( + struct list_head *devices) +{ + struct ipu4_camera_module_data *cam_device; + + cam_device = kzalloc(sizeof(*cam_device), GFP_KERNEL); + if (!cam_device) + return NULL; + + list_add(&cam_device->list, devices); + return cam_device; +} + +static int get_sensor_gpio(struct device *dev, int index) +{ + struct gpio_desc *gpiod_gpio; + int gpio; + + gpiod_gpio = gpiod_get_index(dev, NULL, index, GPIOD_ASIS); + if (IS_ERR(gpiod_gpio)) { + dev_err(dev, "No gpio from index %d\n", index); + return -ENODEV; + } + gpio = desc_to_gpio(gpiod_gpio); + gpiod_put(gpiod_gpio); + return gpio; +} + +static void *get_dsdt_vcm(struct device *dev, char *vcm, char *second) +{ + void *pdata = NULL; + const u8 dsdt_cam_vcm[] = { + 0x39, 0xA6, 0xC9, 0x75, 0x8A, 0x5C, 0x00, 0x4A, + 0x9F, 0x48, 0xA9, 0xC3, 0xB5, 0xDA, 0x78, 0x9F }; + int ret = get_string_dsdt_data(dev, dsdt_cam_vcm, 0, + vcm, VCM_BUFFER_SIZE); + if (ret < 0) { + dev_err(dev, "get vcm failed - using override: %s\n", second); + strlcpy(vcm, second, VCM_BUFFER_SIZE); + } + dev_dbg(dev, "vcm: %s\n", vcm); + + if (!strcasecmp(vcm, LC898122_NAME)) { + struct lc898122_platform_data *lc_pdata; + + dev_dbg(dev, "Setting up voice coil motor lc898821"); + lc_pdata = kzalloc(sizeof(struct lc898122_platform_data), + GFP_KERNEL); + if (lc_pdata) + lc_pdata->sensor_device = dev; + pdata = lc_pdata; + strlcpy(vcm, LC898122_NAME, VCM_BUFFER_SIZE); + } else if (!strcasecmp(vcm, DW9714_NAME)) { + struct dw9714_platform_data *dw_pdata; + + dev_dbg(dev, "Setting up voice coil motor dw9714"); + dw_pdata = kzalloc(sizeof(struct dw9714_platform_data), + GFP_KERNEL); + if (dw_pdata) { + dw_pdata->sensor_dev = dev; + if (gpiod_count(dev, NULL) > 1) + dw_pdata->gpio_xsd = get_sensor_gpio(dev, 1); + else + dw_pdata->gpio_xsd = -ENODEV; + } + pdata = dw_pdata; + strlcpy(vcm, DW9714_NAME, VCM_BUFFER_SIZE); + } + return pdata; +} + +static int get_i2c_info(struct device *dev, struct ipu4_i2c_info *i2c, int size) +{ + const u8 dsdt_cam_i2c[] = { + 0x49, 0x75, 0x25, 0x26, 0x71, 0x92, 0xA4, 0x4C, + 0xBB, 0x43, 0xC4, 0x89, 0x9D, 0x5A, 0x48, 0x81}; + u64 num_i2c; + int i; + int rval = get_integer_dsdt_data(dev, dsdt_cam_i2c, 1, &num_i2c); + + if (rval < 0) { + dev_err(dev, "Failed to get number of I2C devices\n"); + return -ENODEV; + } + + for (i = 0; i < num_i2c && i < size; i++) { + u64 data; + + rval = get_integer_dsdt_data(dev, dsdt_cam_i2c, i + 2, + &data); + if (rval < 0) { + dev_err(dev, "No i2c data\n"); + return -ENODEV; + } + + i2c[i].bus = ((data >> 24) & 0xff) - 1; + i2c[i].addr = (data >> 8) & 0xff; + } + return num_i2c; +} + +static int match_depend(struct device *dev, void *data) +{ + return (dev && dev->fwnode == data) ? 1 : 0; +} + +#define MAX_CONSUMERS 1 +struct ipu4_gpio_regulator { + struct regulator_consumer_supply consumers[MAX_CONSUMERS]; + struct regulator_init_data init_data; + struct gpio_regulator_config info; + struct platform_device pdev; + struct list_head list; +}; +static LIST_HEAD(ipu4_gpio_regulator_head); + +static int create_gpio_regulator(struct device *dev, int index, const char *name) +{ + struct ipu4_gpio_regulator *reg_device; + struct platform_device *cam_regs[1]; + int gpio; + int num_consumers = 0; + + gpio = get_sensor_gpio(dev, 1); + if (gpio < 0) + return gpio; + + reg_device = kzalloc(sizeof(*reg_device), GFP_KERNEL); + if (!reg_device) + return -ENOMEM; + + INIT_LIST_HEAD(®_device->list); + reg_device->consumers[num_consumers].supply = "VANA"; + reg_device->consumers[num_consumers].dev_name = name; + num_consumers++; + + reg_device->init_data.constraints.input_uV = 3300000; + reg_device->init_data.constraints.min_uV = 2800000; + reg_device->init_data.constraints.max_uV = 2800000; + reg_device->init_data.constraints.valid_ops_mask = + REGULATOR_CHANGE_STATUS; + reg_device->init_data.num_consumer_supplies = num_consumers; + reg_device->init_data.consumer_supplies = reg_device->consumers; + + reg_device->info.supply_name = dev_name(dev); + reg_device->info.enable_gpio = gpio; + reg_device->info.enable_high = 1; + reg_device->info.enabled_at_boot = 1; + reg_device->info.type = REGULATOR_VOLTAGE; + reg_device->info.init_data = ®_device->init_data; + reg_device->pdev.name = "gpio-regulator"; + reg_device->pdev.id = -1; + reg_device->pdev.dev.platform_data = ®_device->info; + cam_regs[0] = ®_device->pdev; + + platform_add_devices(cam_regs, 1); + list_add_tail(®_device->list, &ipu4_gpio_regulator_head); + + return 0; +} + +static int remove_gpio_regulator(void) +{ + struct ipu4_gpio_regulator *reg_device; + + while (!list_empty(&ipu4_gpio_regulator_head)) { + reg_device = list_first_entry(&ipu4_gpio_regulator_head, + struct ipu4_gpio_regulator, list); + list_del(®_device->list); + + platform_device_unregister(®_device->pdev); + kfree(reg_device); + } + + return 0; +} + +static int get_acpi_dep_data(struct device *dev, + struct sensor_bios_data *sensor) +{ + struct acpi_handle *dev_handle = ACPI_HANDLE(dev); + struct acpi_handle_list dep_devices; + acpi_status status; + int i; + + if (!acpi_has_method(dev_handle, "_DEP")) + return 0; + + status = acpi_evaluate_reference(dev_handle, "_DEP", NULL, + &dep_devices); + if (ACPI_FAILURE(status)) { + dev_dbg(dev, "Failed to evaluate _DEP.\n"); + return -ENODEV; + } + + for (i = 0; i < dep_devices.count; i++) { + struct acpi_device *device; + struct acpi_device_info *info; + struct device *p_dev; + int match; + + status = acpi_get_object_info(dep_devices.handles[i], &info); + if (ACPI_FAILURE(status)) { + dev_dbg(dev, "Error reading _DEP device info\n"); + continue; + } + + match = info->valid & ACPI_VALID_HID && + !strcmp(info->hardware_id.string, "INT3472"); + + kfree(info); + + if (!match) + continue; + + /* Process device IN3472 created by acpi */ + if (acpi_bus_get_device(dep_devices.handles[i], &device)) + return -ENODEV; + + dev_dbg(dev, "Depend ACPI device found: %s\n", + dev_name(&device->dev)); + + p_dev = bus_find_device(&platform_bus_type, NULL, + &device->fwnode, match_depend); + if (p_dev) { + dev_dbg(dev, "Dependent platform device found %s\n", + dev_name(p_dev)); + sensor->dev = p_dev; + /* GPIO in index 1 is fixed regulator */ + create_gpio_regulator(p_dev, 1, dev_name(dev)); + } + } + return 0; +} + +static int get_acpi_ssdb_sensor_data(struct device *dev, + struct sensor_bios_data *sensor) +{ + struct sensor_bios_data_packed sensor_data; + int ret = read_acpi_block(dev, "SSDB", &sensor_data, + sizeof(sensor_data)); + if (ret < 0) + return ret; + + get_acpi_dep_data(dev, sensor); + + /* Xshutdown is not part of the ssdb data */ + sensor->link = sensor_data.link; + sensor->lanes = sensor_data.lanes; + sensor->mclkport = sensor_data.mclkport; + sensor->flash = sensor_data.flash; + sensor->mclkspeed = sensor_data.mclkspeed; + dev_dbg(dev, "sensor acpi data: link %d, lanes %d, mclk %d, flash %d, mclkspeed %d\n", + sensor->link, sensor->lanes, sensor->mclkport, sensor->flash, sensor->mclkspeed); + return 0; +} + +static int ipu_acpi_get_sensor_data(struct device *dev, + struct ipu4_camera_module_data *data, + struct sensor_bios_data *sensor) +{ + const u8 mipi_port_dsdt[] = { + 0xD8, 0x7B, 0x3B, 0xEA, 0x9B, 0xE0, 0x39, 0x42, + 0xAD, 0x6E, 0xED, 0x52, 0x5F, 0x3F, 0x26, 0xAB }; + const u8 mclk_out_dsdt[] = { + 0x51, 0x26, 0xBE, 0x8D, 0xC1, 0x70, 0x6F, 0x4C, + 0xAC, 0x87, 0xA3, 0x7C, 0xB4, 0x6E, 0x4A, 0xF6 }; + + int rval; + u64 acpi_data; + + if (sensor) { + /* Sensor data from ssdb block */ + data->csi2.port = sensor->link; + data->csi2.nlanes = sensor->lanes; + acpi_data = sensor->mclkport; + data->ext_clk = sensor->mclkspeed; + } else { + rval = get_integer_dsdt_data(dev, mipi_port_dsdt, 0, + &acpi_data); + if (rval < 0) { + dev_err(dev, "Can't get mipi port\n"); + return rval; + } + data->csi2.port = acpi_data & 0xf; + data->csi2.nlanes = (acpi_data & 0xf0) >> 4; + + rval = get_integer_dsdt_data(dev, mclk_out_dsdt, 0, &acpi_data); + if (rval < 0) { + dev_err(dev, "Can't get mclk info\n"); + return rval; + } + /* we have 24 MHz clock for sensors now */ + data->ext_clk = 286363636; + } + + /* dsdt data currently contains wrong numbers for combo ports */ + if (data->csi2.port >= 6) + data->csi2.port -= 2; + + if (data->csi2.nlanes == 0) + return -ENODEV; + + switch (acpi_data) { + case 0: + clk_add_alias(NULL, dev_name(dev), "ipu4_cam_clk0", NULL); + break; + case 1: + clk_add_alias(NULL, dev_name(dev), "ipu4_cam_clk1", NULL); + break; + case 2: + clk_add_alias(NULL, dev_name(dev), "ipu4_cam_clk2", NULL); + break; + default: + dev_err(dev, "Unknown clk data %u\n", (unsigned int)acpi_data); + break; + } + + dev_dbg(dev, "sensor: lanes %d, port %d, clk out %d, ext_clk %d\n", + data->csi2.nlanes, + data->csi2.port, (int)acpi_data, data->ext_clk); + return 0; +} + +static int get_custom_gpios(struct device *dev, + struct crlmodule_platform_data *pdata) +{ + int i, ret, c = gpiod_count(dev, NULL) - 1; + + for (i = 0; i < c; i++) { + ret = snprintf(pdata->custom_gpio[i].name, + sizeof(pdata->custom_gpio[i].name), + "custom_gpio%d", i); + if (ret < 0 || ret >= sizeof(pdata->custom_gpio[i].name)) { + dev_err(dev, "Failed to set custom gpio name\n"); + return -EINVAL; + } + /* First GPIO is xshutdown */ + pdata->custom_gpio[i].number = get_sensor_gpio(dev, i + 1); + if (pdata->custom_gpio[i].number < 0) { + dev_err(dev, "unable to get custom gpio number\n"); + return -ENODEV; + } + pdata->custom_gpio[i].val = 1; + pdata->custom_gpio[i].undo_val = 0; + } + + return 0; +} + +static int get_crlmodule_pdata(struct i2c_client *client, + struct ipu4_camera_module_data *data, + struct ipu4_i2c_helper *helper, + void *priv, size_t size) +{ + struct sensor_bios_data sensor; + struct crlmodule_platform_data *pdata; + struct ipu4_i2c_info i2c[2]; + void *vcm_pdata; + char vcm[VCM_BUFFER_SIZE]; + int num = get_i2c_info(&client->dev, i2c, ARRAY_SIZE(i2c)); + int rval; + + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + sensor.dev = &client->dev; + + rval = get_acpi_ssdb_sensor_data(&client->dev, &sensor); + + ipu_acpi_get_sensor_data(&client->dev, data, + rval == 0 ? &sensor : NULL); + + data->pdata = pdata; + /* sensor.dev may here point to sensor or dependent device */ + pdata->xshutdown = get_sensor_gpio(sensor.dev, 0); + if (pdata->xshutdown < 0) { + rval = pdata->xshutdown; + goto err_free_pdata; + } + + rval = get_custom_gpios(sensor.dev, pdata); + if (rval) + goto err_free_pdata; + + pdata->lanes = data->csi2.nlanes; + pdata->ext_clk = data->ext_clk; + client->dev.platform_data = pdata; + + helper->fn(&client->dev, helper->driver_data, &data->csi2, true); + + if ((num <= 1) || !priv) + return 0; + + vcm_pdata = get_dsdt_vcm(&client->dev, vcm, priv); + + dev_info(&client->dev, "Creating vcm instance: bus: %d addr 0x%x %s\n", + i2c[1].bus, i2c[1].addr, vcm); + + return add_new_i2c(i2c[1].addr, i2c[1].bus, 0, vcm, vcm_pdata); + +err_free_pdata: + kfree(pdata); + data->pdata = NULL; + return rval; +} + +#if defined (CONFIG_VIDEO_INTEL_ICI) +static int get_crlmodule_lite_pdata(struct i2c_client *client, + struct ipu4_camera_module_data *data, + struct ipu4_i2c_helper *helper, + void *priv, size_t size) +{ + struct sensor_bios_data sensor; + struct crlmodule_lite_platform_data *pdata; + struct ipu4_i2c_info i2c[2]; + void *vcm_pdata; + char vcm[VCM_BUFFER_SIZE]; + int num = get_i2c_info(&client->dev, i2c, ARRAY_SIZE(i2c)); + int rval; + + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + sensor.dev = &client->dev; + + rval = get_acpi_ssdb_sensor_data(&client->dev, &sensor); + + ipu_acpi_get_sensor_data(&client->dev, data, + rval == 0 ? &sensor : NULL); + + data->pdata = pdata; + /* sensor.dev may here point to sensor or dependent device */ +#if !defined(CONFIG_VIDEO_INTEL_UOS) + pdata->xshutdown = get_sensor_gpio(sensor.dev, 0); + if (pdata->xshutdown < 0) { + rval = pdata->xshutdown; + kfree(pdata); + data->pdata = NULL; + return rval; + } +#endif + pdata->lanes = data->csi2.nlanes; + pdata->ext_clk = data->ext_clk; + client->dev.platform_data = pdata; + + helper->fn(&client->dev, helper->driver_data, &data->csi2, true); + + if ((num <= 1) || !priv) + return 0; + + vcm_pdata = get_dsdt_vcm(&client->dev, vcm, priv); + + dev_info(&client->dev, "Creating vcm instance: bus: %d addr 0x%x %s\n", + i2c[1].bus, i2c[1].addr, vcm); + + return add_new_i2c(i2c[1].addr, i2c[1].bus, 0, vcm, vcm_pdata); +} +#endif + +static int get_smiapp_pdata(struct i2c_client *client, + struct ipu4_camera_module_data *data, + struct ipu4_i2c_helper *helper, + void *priv, size_t size) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) + struct smiapp_platform_data *pdata; +#else + struct smiapp_hwconfig *pdata; +#endif + uint64_t *source = priv; + + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + data->pdata = pdata; + + data->priv = source; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) + pdata->xshutdown = get_sensor_gpio(&client->dev, 0); + if (pdata->xshutdown < 0) + return -ENODEV; +#endif + + ipu_acpi_get_sensor_data(&client->dev, data, NULL); + + pdata->op_sys_clock = source; + pdata->lanes = data->csi2.nlanes; + pdata->ext_clk = data->ext_clk; + + client->dev.platform_data = pdata; + helper->fn(&client->dev, helper->driver_data, &data->csi2, true); + + return 0; +} + +static int get_lm3643_pdata(struct i2c_client *client, + struct ipu4_camera_module_data *data, + struct ipu4_i2c_helper *helper, + void *priv, size_t size) +{ + struct lm3643_platform_data *pdata; + struct ipu4_i2c_info i2c[2]; + struct gpio_desc *gpiod_reset; + int i; + int num = get_i2c_info(&client->dev, i2c, ARRAY_SIZE(i2c)); + + if (num < 0) + return -ENODEV; + + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + gpiod_reset = gpiod_get_index(&client->dev, NULL, 0, GPIOD_ASIS); + if (IS_ERR(gpiod_reset)) { + pdata->gpio_reset = -1; + dev_info(&client->dev, "No reset for lm3643\n"); + } else { + pdata->gpio_reset = desc_to_gpio(gpiod_reset); + gpiod_put(gpiod_reset); + } + + /* These should be added to ACPI */ + data->pdata = pdata; + pdata->gpio_torch = -1; + pdata->gpio_strobe = -1; + pdata->flash_max_brightness = 500; + pdata->torch_max_brightness = 89; + + client->dev.platform_data = pdata; + helper->fn(&client->dev, helper->driver_data, NULL, true); + + /* + * Same I2C ACPI entry may contain several instances. I2C core + * ACPI code creates only the first one. Create rest of the instances + */ + dev_info(&client->dev, "Adding rest of lm3643 instances: %d\n", num); + for (i = 1; i < num; i++) { + int rval = add_new_i2c(i2c[i].addr, i2c[i].bus, + 0, client->name, pdata); + if (rval < 0) + return rval; + dev_info(&client->dev, "LM3643 instance: bus: %d addr 0x%x\n", + i2c[i].bus, i2c[i].addr); + return -ENOMEM; + } + + return 0; +}; + +static int get_as3638_pdata(struct i2c_client *client, + struct ipu4_camera_module_data *data, + struct ipu4_i2c_helper *helper, + void *priv, size_t size) +{ + struct as3638_platform_data *pdata; + struct gpio_desc *gpiod_pin; + + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + gpiod_pin = gpiod_get_index(&client->dev, NULL, 0, GPIOD_ASIS); + if (IS_ERR(gpiod_pin)) { + pdata->gpio_reset = -1; + dev_info(&client->dev, "No reset gpio for as3638\n"); + } else { + pdata->gpio_reset = desc_to_gpio(gpiod_pin); + gpiod_put(gpiod_pin); + } + + gpiod_pin = gpiod_get_index(&client->dev, NULL, 1, GPIOD_ASIS); + if (IS_ERR(gpiod_pin)) { + pdata->gpio_torch = -1; + dev_info(&client->dev, "No torch gpio for as3638\n"); + } else { + pdata->gpio_torch = desc_to_gpio(gpiod_pin); + gpiod_put(gpiod_pin); + } + + gpiod_pin = gpiod_get_index(&client->dev, NULL, 2, GPIOD_ASIS); + if (IS_ERR(gpiod_pin)) { + pdata->gpio_strobe = -1; + dev_info(&client->dev, "No strobe gpio for as3638\n"); + } else { + pdata->gpio_strobe = desc_to_gpio(gpiod_pin); + gpiod_put(gpiod_pin); + } + + /* These should be added to ACPI */ + data->pdata = pdata; + pdata->flash_max_brightness[AS3638_LED1] = + AS3638_FLASH_MAX_BRIGHTNESS_LED1; + pdata->torch_max_brightness[AS3638_LED1] = + AS3638_TORCH_MAX_BRIGHTNESS_LED1; + pdata->flash_max_brightness[AS3638_LED2] = + AS3638_FLASH_MAX_BRIGHTNESS_LED2; + pdata->torch_max_brightness[AS3638_LED2] = + AS3638_TORCH_MAX_BRIGHTNESS_LED2; + pdata->flash_max_brightness[AS3638_LED3] = + AS3638_FLASH_MAX_BRIGHTNESS_LED3; + pdata->torch_max_brightness[AS3638_LED3] = + AS3638_TORCH_MAX_BRIGHTNESS_LED3; + + client->dev.platform_data = pdata; + helper->fn(&client->dev, helper->driver_data, NULL, true); + + return 0; +}; + +static const struct ipu4_acpi_devices supported_devices[] = { + { "SONY230A", CRLMODULE_NAME, get_crlmodule_pdata, LC898122_NAME, 0, + imx230regulators }, + { "INT3477", CRLMODULE_NAME, get_crlmodule_pdata, NULL, 0, + ov8858regulators }, + { "INT3471", CRLMODULE_NAME, get_crlmodule_pdata, NULL, 0 }, + { "OV5670AA", CRLMODULE_NAME, get_crlmodule_pdata, NULL, 0 }, + { "SONY214A", CRLMODULE_NAME, get_crlmodule_pdata, "dw9714", 0 }, + { "SONY132A", SMIAPP_NAME, get_smiapp_pdata, imx132_op_clocks, + sizeof(imx132_op_clocks) }, + { "TXNW3643", LM3643_NAME, get_lm3643_pdata, NULL, 0 }, + { "AMS3638", AS3638_NAME, get_as3638_pdata, NULL, 0 }, +#if defined (CONFIG_VIDEO_INTEL_ICI) + { "ADV7481A", CRLMODULE_LITE_NAME, get_crlmodule_lite_pdata, NULL, 0 }, + { "ADV7481B", CRLMODULE_LITE_NAME, get_crlmodule_lite_pdata, NULL, 0 }, +#else + { "ADV7481A", CRLMODULE_NAME, get_crlmodule_pdata, NULL, 0 }, + { "ADV7481B", CRLMODULE_NAME, get_crlmodule_pdata, NULL, 0 }, +#endif +}; + +static int get_table_index(struct device *device, const __u8 *acpi_name) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(supported_devices); i++) { + if (!strcmp(acpi_name, supported_devices[i].hid_name)) + return i; + } + + return -ENODEV; +} + +/* List of ACPI devices what we can handle */ +static const struct acpi_device_id ipu4_acpi_match[] = { + { "SONY230A", 0 }, + { "INT3477", 0 }, + { "INT3471", 0 }, + { "TXNW3643", 0 }, + { "AMS3638", 0 }, + { "SONY214A", 0 }, + { "SONY132A", 0 }, + { "OV5670AA", 0 }, + { "ADV7481A", 0 }, + { "ADV7481B", 0 }, + {}, +}; + +static int map_power_rails(char *src_dev_name, char *src_regulator, + struct device *dev, char *dest_rail) +{ + struct device *src_dev; + int rval; + + if (!src_dev_name) { + dev_dbg(dev, "Regulator device name missing"); + return -ENODEV; + } + + src_dev = bus_find_device_by_name(&platform_bus_type, NULL, + src_dev_name); + if (!src_dev) { + dev_dbg(dev, "Regulator device device not found"); + return -ENODEV; + } + + rval = regulator_register_supply_alias(dev, dest_rail, src_dev, + src_regulator); + if (rval < 0) { + dev_err(dev, "Regulator alias mapping fails %s, %s <-> %s, %s", + dev_name(src_dev), src_regulator, + dev_name(dev), dest_rail); + return -ENODEV; + } + return 0; +} + +static int ipu_acpi_pdata(struct i2c_client *client, + const struct acpi_device_id *acpi_id, + struct ipu4_i2c_helper *helper) +{ + struct ipu4_camera_module_data *camdata; + const struct ipu_regulator *regulators; + int index = get_table_index(&client->dev, acpi_id->id); + + if (index < 0) { + dev_err(&client->dev, + "Device is not in supported devices list\n"); + return -ENODEV; + } + + camdata = add_device_to_list(&devices); + if (!camdata) + return -ENOMEM; + + strlcpy(client->name, supported_devices[index].real_driver, + sizeof(client->name)); + + regulators = supported_devices[index].regulators; + while (regulators && regulators->src_dev_name) { + map_power_rails(regulators->src_dev_name, + regulators->src_rail, + &client->dev, + regulators->dest_rail); + regulators++; + } + + supported_devices[index].get_platform_data( + client, camdata, helper, + supported_devices[index].priv_data, + supported_devices[index].priv_size); + + return 0; +} + +static int ipu4_i2c_test(struct device *dev, void *priv) +{ + struct i2c_client *client = i2c_verify_client(dev); + const struct acpi_device_id *acpi_id; + + /* + * Check that we are handling only I2C devices which really has + * ACPI data and are one of the devices which we want to handle + */ + if (!ACPI_COMPANION(dev) || !client) + return 0; + + acpi_id = acpi_match_device(ipu4_acpi_match, dev); + if (!acpi_id) + return 0; + + /* + * Skip if platform data has already been added. + * Probably ACPI data overruled by kernel platform data + */ + if (client->dev.platform_data) { + dev_info(dev, "ACPI device has already platform data\n"); + return 0; + } + + /* Looks that we got what we are looking for */ + if (ipu_acpi_pdata(client, acpi_id, priv)) + dev_err(dev, "Failed to process ACPI data"); + + /* Don't return error since we want to process remaining devices */ + return 0; +} + +/* Scan all i2c devices and pick ones which we can handle */ +int ipu_get_acpi_devices(void *driver_data, + struct device *dev, + int (*fn) + (struct device *, void *, + struct ipu_isys_csi2_config *csi2, + bool reprobe)) +{ + struct ipu4_i2c_helper helper = { + .fn = fn, + .driver_data = driver_data, + }; + struct ipu4_i2c_new_dev *new_i2c_dev, *safe; + int rval; + + if ((!fn) || (!driver_data)) + return -ENODEV; + + rval = i2c_for_each_dev(&helper, ipu4_i2c_test); + if (rval < 0) + return rval; + + /* + * Some ACPI entries may contain several i2c devices. + * Create new devices here if those were added to list during + * ACPI processing + */ + list_for_each_entry_safe(new_i2c_dev, safe, &new_devs, list) { + struct i2c_adapter *adapter; + struct i2c_client *client; + + adapter = i2c_get_adapter(new_i2c_dev->bus); + if (!adapter) { + dev_err(dev, "Failed to get adapter\n"); + list_del(&new_i2c_dev->list); + kfree(new_i2c_dev); + continue; + } + + dev_info(dev, "New i2c device: %s\n", new_i2c_dev->info.type); + request_module(I2C_MODULE_PREFIX "%s", new_i2c_dev->info.type); + + client = i2c_new_device(adapter, &new_i2c_dev->info); + if (client) + fn(&client->dev, driver_data, NULL, false); + else + dev_err(dev, "failed to add I2C device from ACPI\n"); + + i2c_put_adapter(adapter); + list_del(&new_i2c_dev->list); + kfree(new_i2c_dev); + } + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_get_acpi_devices); + +static void __exit ipu_acpi_exit(void) +{ + remove_gpio_regulator(); +} +module_exit(ipu_acpi_exit); + +MODULE_AUTHOR("Samu Onkalo "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("IPU4 ACPI support"); diff --git a/drivers/media/platform/intel/ipu4-bxt-gp-pdata.c b/drivers/media/platform/intel/ipu4-bxt-gp-pdata.c new file mode 100644 index 0000000000000..9acfafc9cf420 --- /dev/null +++ b/drivers/media/platform/intel/ipu4-bxt-gp-pdata.c @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2016--2017 Intel Corporation. + * + * Author: Jouni Ukkonen + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include + +#include +#include +#include "ipu.h" + +#define ADV7481_HDMI_LANES 4 +#define ADV7481_HDMI_I2C_ADDRESS 0xe0 + +#define ADV7481_LANES 1 +/* + * below i2c address is dummy one, to be able to register single + * ADV7481 chip as two sensors + */ +#define ADV7481_I2C_ADDRESS 0xe1 + + +#define GPIO_BASE 434 + + +static struct crlmodule_platform_data adv7481_cvbs_pdata = { + .ext_clk = 286363636, + .xshutdown = GPIO_BASE + 64, /*dummy for now*/ + .lanes = ADV7481_LANES, + .module_name = "ADV7481 CVBS", + .suffix = 'a', +}; + +static struct ipu_isys_csi2_config adv7481_cvbs_csi2_cfg = { + .nlanes = ADV7481_LANES, + .port = 4, +}; + +static struct ipu_isys_subdev_info adv7481_cvbs_crl_sd = { + .csi2 = &adv7481_cvbs_csi2_cfg, + .i2c = { + .board_info = { + .type = CRLMODULE_NAME, + .flags = I2C_CLIENT_TEN, + .addr = ADV7481_I2C_ADDRESS, + .platform_data = &adv7481_cvbs_pdata, + }, + .i2c_adapter_id = 0, + } +}; + +static struct crlmodule_platform_data adv7481_hdmi_pdata = { + /* FIXME: may need to revisit */ + .ext_clk = 286363636, + .xshutdown = GPIO_BASE + 30, + .lanes = ADV7481_HDMI_LANES, + .module_name = "ADV7481 HDMI", + .crl_irq_pin = GPIO_BASE + 22, + .irq_pin_flags = (IRQF_TRIGGER_RISING | IRQF_ONESHOT), + .irq_pin_name = "ADV7481_HDMI_IRQ", + .suffix = 'a', +}; + +static struct ipu_isys_csi2_config adv7481_hdmi_csi2_cfg = { + .nlanes = ADV7481_HDMI_LANES, + .port = 0, +}; + +static struct ipu_isys_subdev_info adv7481_hdmi_crl_sd = { + .csi2 = &adv7481_hdmi_csi2_cfg, + .i2c = { + .board_info = { + .type = CRLMODULE_NAME, + .flags = I2C_CLIENT_TEN, + .addr = ADV7481_HDMI_I2C_ADDRESS, + .platform_data = &adv7481_hdmi_pdata, + }, + .i2c_adapter_id = 0, + } +}; + + + +/* + * Map buttress output sensor clocks to sensors - + * this should be coming from ACPI, in Gordon Peak + * ADV7481 have its own oscillator, no buttres clock + * needed. + */ +struct ipu_isys_clk_mapping gp_mapping[] = { + { CLKDEV_INIT(NULL, NULL, NULL), NULL } +}; + +static struct ipu_isys_subdev_pdata pdata = { + .subdevs = (struct ipu_isys_subdev_info *[]) { + &adv7481_hdmi_crl_sd, + &adv7481_cvbs_crl_sd, + NULL, + }, + .clk_map = gp_mapping, +}; + +static void ipu4_quirk(struct pci_dev *pci_dev) +{ + pci_dev->dev.platform_data = &pdata; +} + +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, IPU_PCI_ID, + ipu4_quirk); diff --git a/drivers/media/platform/intel/ipu4-bxt-p-pdata.c b/drivers/media/platform/intel/ipu4-bxt-p-pdata.c new file mode 100644 index 0000000000000..53db12e98b800 --- /dev/null +++ b/drivers/media/platform/intel/ipu4-bxt-p-pdata.c @@ -0,0 +1,1623 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2015 - 2018 Intel Corporation + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include "ipu.h" + +#define GPIO_BASE 422 + +#ifdef CONFIG_INTEL_IPU4_OV2740 +#define OV2740_LANES 2 +#define OV2740_I2C_ADDRESS 0x36 +static struct crlmodule_platform_data ov2740_pdata = { + .xshutdown = GPIO_BASE + 64, + .lanes = OV2740_LANES, + .ext_clk = 19200000, + .op_sys_clock = (uint64_t []){ 72000000 }, + .module_name = "INT3474", + .id_string = "0x27 0x40", + .suffix = 'a', +}; + +static struct ipu_isys_csi2_config ov2740_csi2_cfg = { + .nlanes = OV2740_LANES, + .port = 0, +}; + +static struct ipu_isys_subdev_info ov2740_crl_sd = { + .csi2 = &ov2740_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO(CRLMODULE_NAME, OV2740_I2C_ADDRESS), + .platform_data = &ov2740_pdata, + }, + .i2c_adapter_id = 2, + } +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_IMX185 +#define IMX185_LANES 4 +#define IMX185_I2C_ADDRESS 0x1a + +static struct crlmodule_platform_data imx185_pdata = { + .xshutdown = GPIO_BASE + 71, + .lanes = IMX185_LANES, + .ext_clk = 27000000, + .op_sys_clock = (uint64_t []){ 55687500, 111375000, + 111375000, 222750000 }, + .module_name = "IMX185", + .id_string = "0x1 0x85", + .suffix = 'a', +}; + +static struct ipu_isys_csi2_config imx185_csi2_cfg = { + .nlanes = IMX185_LANES, + .port = 0, +}; + +static struct ipu_isys_subdev_info imx185_crl_sd = { + .csi2 = &imx185_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO(CRLMODULE_NAME, IMX185_I2C_ADDRESS), + .platform_data = &imx185_pdata, + }, + .i2c_adapter_id = 2, + } +}; + +static struct crlmodule_platform_data imx185_b_pdata = { + .xshutdown = GPIO_BASE + 73, + .lanes = IMX185_LANES, + .ext_clk = 27000000, + .op_sys_clock = (uint64_t []){ 55687500, 111375000, + 111375000, 222750000 }, + .module_name = "IMX185", + .id_string = "0x1 0x85", + .suffix = 'b', +}; + +static struct ipu_isys_csi2_config imx185_b_csi2_cfg = { + .nlanes = IMX185_LANES, + .port = 4, +}; + +static struct ipu_isys_subdev_info imx185_b_crl_sd = { + .csi2 = &imx185_b_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO(CRLMODULE_NAME, IMX185_I2C_ADDRESS), + .platform_data = &imx185_b_pdata, + }, + .i2c_adapter_id = 4, + } +}; + +#endif + +#ifdef CONFIG_INTEL_IPU4_AR023Z +#define AR023Z_MIPI_LANES 2 +/* Toshiba TC358778 Parallel-MIPI Bridge */ +#define TC358778_I2C_ADDRESS 0x0e + +static struct crlmodule_platform_data ar023z_pdata = { + .xshutdown = GPIO_BASE + 64, + .lanes = AR023Z_MIPI_LANES, + .ext_clk = 27000000, + .op_sys_clock = (uint64_t []){317250000}, + .module_name = "AR023Z", + .id_string = "0x4401 0x64", + .suffix = 'a', +}; + +static struct ipu_isys_csi2_config ar023z_csi2_cfg = { + .nlanes = AR023Z_MIPI_LANES, + .port = 0, +}; + +static struct ipu_isys_subdev_info ar023z_crl_sd = { + .csi2 = &ar023z_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO(CRLMODULE_NAME, TC358778_I2C_ADDRESS), + .platform_data = &ar023z_pdata, + }, + .i2c_adapter_id = 2, + } +}; + +static struct crlmodule_platform_data ar023z_b_pdata = { + .xshutdown = GPIO_BASE + 67, + .lanes = AR023Z_MIPI_LANES, + .ext_clk = 27000000, + .op_sys_clock = (uint64_t []){317250000}, + .module_name = "AR023Z", + .id_string = "0x4401 0x64", + .suffix = 'b', +}; + +static struct ipu_isys_csi2_config ar023z_b_csi2_cfg = { + .nlanes = AR023Z_MIPI_LANES, + .port = 4, +}; + +static struct ipu_isys_subdev_info ar023z_b_crl_sd = { + .csi2 = &ar023z_b_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO(CRLMODULE_NAME, TC358778_I2C_ADDRESS), + .platform_data = &ar023z_b_pdata, + }, + .i2c_adapter_id = 4, + } +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_IMX477 +#define IMX477_LANES 2 + +#define IMX477_I2C_ADDRESS 0x10 + +static struct crlmodule_platform_data imx477_pdata_master = { + .xshutdown = GPIO_BASE + 64, + .lanes = IMX477_LANES, + .ext_clk = 19200000, + .op_sys_clock = (uint64_t []){600000000}, + .module_name = "IMX477-MASTER", + .id_string = "0x4 0x77", + .suffix = 'a', +}; + +static struct ipu_isys_csi2_config imx477_csi2_cfg_master = { + .nlanes = IMX477_LANES, + .port = 0, +}; + +static struct ipu_isys_subdev_info imx477_crl_sd_master = { + .csi2 = &imx477_csi2_cfg_master, + .i2c = { + .board_info = { + I2C_BOARD_INFO(CRLMODULE_NAME, IMX477_I2C_ADDRESS), + .platform_data = &imx477_pdata_master, + }, + .i2c_adapter_id = 2, + } +}; + +static struct crlmodule_platform_data imx477_pdata_slave_1 = { + .xshutdown = GPIO_BASE + 67, + .lanes = IMX477_LANES, + .ext_clk = 19200000, + .op_sys_clock = (uint64_t []){600000000}, + .module_name = "IMX477-SLAVE-1", + .id_string = "0x4 0x77", + .suffix = 'b', +}; + +static struct ipu_isys_csi2_config imx477_csi2_cfg_slave_1 = { + .nlanes = IMX477_LANES, + .port = 4, +}; + +static struct ipu_isys_subdev_info imx477_crl_sd_slave_1 = { + .csi2 = &imx477_csi2_cfg_slave_1, + .i2c = { + .board_info = { + I2C_BOARD_INFO(CRLMODULE_NAME, IMX477_I2C_ADDRESS), + .platform_data = &imx477_pdata_slave_1, + }, + .i2c_adapter_id = 4, + } +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_IMX274 + +#define IMX274_LANES 4 +#define IMX274_I2C_ADDRESS 0x1a + +static struct crlmodule_platform_data imx274_pdata = { + .xshutdown = GPIO_BASE + 64, + .lanes = IMX274_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){720000000}, + .module_name = "IMX274", + .id_string = "0x6 0x9", + .suffix = 'a', +}; + +static struct ipu_isys_csi2_config imx274_csi2_cfg = { + .nlanes = IMX274_LANES, + .port = 0, +}; + +static struct ipu_isys_subdev_info imx274_crl_sd = { + .csi2 = &imx274_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO(CRLMODULE_NAME, IMX274_I2C_ADDRESS), + .platform_data = &imx274_pdata + }, + .i2c_adapter_id = 2, + } +}; + +static struct crlmodule_platform_data imx274_b_pdata = { + .xshutdown = GPIO_BASE + 67, + .lanes = IMX274_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){720000000}, + .module_name = "IMX274", + .id_string = "0x6 0x9", + .suffix = 'b', +}; + +static struct ipu_isys_csi2_config imx274_b_csi2_cfg = { + .nlanes = IMX274_LANES, + .port = 4, +}; + +static struct ipu_isys_subdev_info imx274_b_crl_sd = { + .csi2 = &imx274_b_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO(CRLMODULE_NAME, IMX274_I2C_ADDRESS), + .platform_data = &imx274_b_pdata + }, + .i2c_adapter_id = 4, + } +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_IMX290 + +#define IMX290_LANES 4 +#define IMX290_I2C_ADDRESS 0x1a + +static struct crlmodule_platform_data imx290_pdata = { + .xshutdown = GPIO_BASE + 64, + .lanes = IMX290_LANES, + .ext_clk = 37125000, + .op_sys_clock = (uint64_t []){222750000, 445500000}, + .module_name = "IMX290", + .id_string = "0x2 0x90", + .suffix = 'a', +}; + +static struct ipu_isys_csi2_config imx290_csi2_cfg = { + .nlanes = IMX290_LANES, + .port = 0, +}; + +static struct ipu_isys_subdev_info imx290_crl_sd = { + .csi2 = &imx290_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO(CRLMODULE_NAME, IMX290_I2C_ADDRESS), + .platform_data = &imx290_pdata + }, + .i2c_adapter_id = 2, + } +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_OV13860 + +#define OV13860_LANES 2 +#define OV13860_I2C_ADDRESS 0x10 + +static struct crlmodule_platform_data ov13860_pdata = { + .xshutdown = GPIO_BASE + 71, + .lanes = OV13860_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){ 600000000, 300000000}, + .module_name = "OV13860", + .suffix = 'a', +}; + +static struct ipu_isys_csi2_config ov13860_csi2_cfg = { + .nlanes = OV13860_LANES, + .port = 0, +}; + +static struct ipu_isys_subdev_info ov13860_crl_sd = { + .csi2 = &ov13860_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO(CRLMODULE_NAME, OV13860_I2C_ADDRESS), + .platform_data = &ov13860_pdata, + }, + .i2c_adapter_id = 2, + } +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_OV9281 + +#define OV9281_LANES 2 +#define OV9281_I2C_ADDRESS 0x10 + +static struct crlmodule_platform_data ov9281_pdata = { + .xshutdown = GPIO_BASE + 71, + .lanes = OV9281_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){400000000}, + .module_name = "OV9281", + .id_string = "0x92 0x81", + .suffix = 'a', +}; + +static struct ipu_isys_csi2_config ov9281_csi2_cfg = { + .nlanes = OV9281_LANES, + .port = 0, +}; + +static struct ipu_isys_subdev_info ov9281_crl_sd = { + .csi2 = &ov9281_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO(CRLMODULE_NAME, OV9281_I2C_ADDRESS), + .platform_data = &ov9281_pdata, + }, + .i2c_adapter_id = 0, + } +}; +#endif + +#if IS_ENABLED(CONFIG_VIDEO_BU64295) + +#define BU64295_VCM_ADDR 0x0c +#define BU64295_NAME "bu64295" + +static struct ipu_isys_subdev_info bu64295_sd = { + .i2c = { + .board_info = { + I2C_BOARD_INFO(BU64295_NAME, BU64295_VCM_ADDR), + }, + .i2c_adapter_id = 2, + } +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_ADV7481 + +#define ADV7481_LANES 4 +#define ADV7481_I2C_ADDRESS 0xe0 +#define ADV7481B_I2C_ADDRESS 0xe2 + +static struct crlmodule_platform_data adv7481_pdata = { + .xshutdown = GPIO_BASE + 63, + .lanes = ADV7481_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){600000000}, + .module_name = "ADV7481", + .suffix = 'a', +}; + +static struct ipu_isys_csi2_config adv7481_csi2_cfg = { + .nlanes = ADV7481_LANES, + .port = 0, +}; + +static struct ipu_isys_subdev_info adv7481_crl_sd = { + .csi2 = &adv7481_csi2_cfg, + .i2c = { + .board_info = { + .type = CRLMODULE_NAME, + .flags = I2C_CLIENT_TEN, + .addr = ADV7481_I2C_ADDRESS, + .platform_data = &adv7481_pdata, + }, + .i2c_adapter_id = 2, + } +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_ADV7481_EVAL + +#define ADV7481_LANES 4 +#define ADV7481_I2C_ADDRESS 0xe0 +#define ADV7481B_I2C_ADDRESS 0xe2 + +static struct crlmodule_platform_data adv7481_eval_pdata = { + .xshutdown = GPIO_BASE + 63, + .lanes = ADV7481_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){600000000}, + .module_name = "ADV7481_EVAL", + .suffix = 'a', +}; + +static struct ipu_isys_csi2_config adv7481_eval_csi2_cfg = { + .nlanes = ADV7481_LANES, + .port = 0, +}; + +static struct ipu_isys_subdev_info adv7481_eval_crl_sd = { + .csi2 = &adv7481_eval_csi2_cfg, + .i2c = { + .board_info = { + .type = CRLMODULE_NAME, + .flags = I2C_CLIENT_TEN, + .addr = ADV7481_I2C_ADDRESS, + .platform_data = &adv7481_eval_pdata, + }, + .i2c_adapter_id = 2, + } +}; + +static struct crlmodule_platform_data adv7481b_eval_pdata = { + .xshutdown = GPIO_BASE + 63, + .lanes = ADV7481_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){600000000}, + .module_name = "ADV7481B_EVAL", + .suffix = 'b', +}; + +static struct ipu_isys_csi2_config adv7481b_eval_csi2_cfg = { + .nlanes = ADV7481_LANES, + .port = 4, +}; + +static struct ipu_isys_subdev_info adv7481b_eval_crl_sd = { + .csi2 = &adv7481b_eval_csi2_cfg, + .i2c = { + .board_info = { + .type = CRLMODULE_NAME, + .flags = I2C_CLIENT_TEN, + .addr = ADV7481B_I2C_ADDRESS, + .platform_data = &adv7481b_eval_pdata, + }, + .i2c_adapter_id = 2, + } +}; +#endif + +#if IS_ENABLED(CONFIG_VIDEO_AGGREGATOR_STUB) + +#define VIDEO_AGGRE_LANES 4 +#define VIDEO_AGGRE_I2C_ADDRESS 0x3b +#define VIDEO_AGGRE_B_I2C_ADDRESS 0x3c + +static struct ipu_isys_csi2_config video_aggre_csi2_cfg = { + .nlanes = VIDEO_AGGRE_LANES, + .port = 0, +}; + +static struct ipu_isys_subdev_info video_aggre_stub_sd = { + .csi2 = &video_aggre_csi2_cfg, + .i2c = { + .board_info = { + .type = "video-aggre", + .addr = VIDEO_AGGRE_I2C_ADDRESS, + }, + .i2c_adapter_id = 2, + } +}; + +static struct ipu_isys_csi2_config video_aggre_b_csi2_cfg = { + .nlanes = VIDEO_AGGRE_LANES, + .port = 4, +}; + +static struct ipu_isys_subdev_info video_aggre_b_stub_sd = { + .csi2 = &video_aggre_b_csi2_cfg, + .i2c = { + .board_info = { + .type = "video-aggre", + .addr = VIDEO_AGGRE_B_I2C_ADDRESS, + }, + .i2c_adapter_id = 2, + } +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_MAGNA +#define MAGNA_LANES 4 +#define MAGNA_PHY_ADDR 0x60 /* 0x30 for 7bit addr */ +#define MAGNA_ADDRESS_A 0x61 +#define MAGNA_ADDRESS_B 0x62 + +static struct crlmodule_platform_data magna_pdata = { + .lanes = MAGNA_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){ 400000000 }, + .module_name = "MAGNA", + .id_string = "0xa6 0x35", + /* + * The pin number of xshutdown will be determined + * and replaced inside TI964 driver. + * The number here stands for which GPIO to connect with. + * 1 means to connect sensor xshutdown to GPIO1 + */ + .xshutdown = 1, + /* + * this flags indicates the expected polarity for the LineValid + * indication received in Raw mode. + * 1 means LineValid is high for the duration of the video frame. + */ + .high_framevalid_flags = 1, +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_OV10635 +#define OV10635_LANES 4 +#define OV10635_I2C_PHY_ADDR 0x60 /* 0x30 for 7bit addr */ +#define OV10635A_I2C_ADDRESS 0x61 +#define OV10635B_I2C_ADDRESS 0x62 +#define OV10635C_I2C_ADDRESS 0x63 +#define OV10635D_I2C_ADDRESS 0x64 + +static struct crlmodule_platform_data ov10635_pdata = { + .lanes = OV10635_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){ 400000000 }, + .module_name = "OV10635", + .id_string = "0xa6 0x35", + /* + * The pin number of xshutdown will be determined + * and replaced inside TI964 driver. + * The number here stands for which GPIO to connect with. + * 1 means to connect sensor xshutdown to GPIO1 + */ + .xshutdown = 0, +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_OV10640 +#define OV10640_LANES 4 +#define OV10640_I2C_PHY_ADDR 0x60 /* 0x30 for 7bit addr */ +#define OV10640A_I2C_ADDRESS 0x61 +#define OV10640B_I2C_ADDRESS 0x62 +#define OV10640C_I2C_ADDRESS 0x63 +#define OV10640D_I2C_ADDRESS 0x64 + +static struct crlmodule_platform_data ov10640_pdata = { + .lanes = OV10640_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){ 400000000 }, + .module_name = "OV10640", + .id_string = "0xa6 0x40", + /* + * The pin number of xshutdown will be determined + * and replaced inside TI964 driver. + * The number here stands for which GPIO to connect with. + * 1 means to connect sensor xshutdown to GPIO1 + */ + .xshutdown = 1, +}; +#endif + +#if IS_ENABLED(CONFIG_VIDEO_TI964) +#define TI964_I2C_ADAPTER 0 +#define TI964_I2C_ADAPTER_2 7 +#define TI964_I2C_ADDRESS 0x3d +#define TI964_LANES 4 + +static struct ipu_isys_csi2_config ti964_csi2_cfg = { + .nlanes = TI964_LANES, + .port = 0, +}; + +static struct ipu_isys_csi2_config ti964_csi2_cfg_2 = { + .nlanes = TI964_LANES, + .port = 4, +}; + +static struct ti964_subdev_info ti964_subdevs[] = { +#ifdef CONFIG_INTEL_IPU4_OV10635 + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10635A_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER, + .rx_port = 0, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + .suffix = 'a', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10635B_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER, + .rx_port = 1, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + .suffix = 'b', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10635C_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER, + .rx_port = 2, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + .suffix = 'c', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10635D_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER, + .rx_port = 3, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + .suffix = 'd', + }, +#endif +#ifdef CONFIG_INTEL_IPU4_OV10640 + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10640A_I2C_ADDRESS, + .platform_data = &ov10640_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER, + .rx_port = 0, + .phy_i2c_addr = OV10640_I2C_PHY_ADDR, + .suffix = 'a', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10640B_I2C_ADDRESS, + .platform_data = &ov10640_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER, + .rx_port = 1, + .phy_i2c_addr = OV10640_I2C_PHY_ADDR, + .suffix = 'b', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10640C_I2C_ADDRESS, + .platform_data = &ov10640_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER, + .rx_port = 2, + .phy_i2c_addr = OV10640_I2C_PHY_ADDR, + .suffix = 'c', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10640D_I2C_ADDRESS, + .platform_data = &ov10640_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER, + .rx_port = 3, + .phy_i2c_addr = OV10640_I2C_PHY_ADDR, + .suffix = 'd', + }, +#endif +#ifdef CONFIG_INTEL_IPU4_MAGNA + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = MAGNA_ADDRESS_A, + .platform_data = &magna_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER, + .rx_port = 0, + .phy_i2c_addr = MAGNA_PHY_ADDR, + .suffix = 'a', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = MAGNA_ADDRESS_B, + .platform_data = &magna_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER, + .rx_port = 1, + .phy_i2c_addr = MAGNA_PHY_ADDR, + .suffix = 'b', + }, +#endif +}; + +static struct ti964_subdev_info ti964_subdevs_2[] = { +#ifdef CONFIG_INTEL_IPU4_OV10635 + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10635A_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER_2, + .rx_port = 0, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + .suffix = 'e', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10635B_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER_2, + .rx_port = 1, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + .suffix = 'f', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10635C_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER_2, + .rx_port = 2, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + .suffix = 'g', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10635D_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER_2, + .rx_port = 3, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + .suffix = 'h', + }, +#endif +#ifdef CONFIG_INTEL_IPU4_OV10640 + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10640A_I2C_ADDRESS, + .platform_data = &ov10640_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER_2, + .rx_port = 0, + .phy_i2c_addr = OV10640_I2C_PHY_ADDR, + .suffix = 'e', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10640B_I2C_ADDRESS, + .platform_data = &ov10640_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER_2, + .rx_port = 1, + .phy_i2c_addr = OV10640_I2C_PHY_ADDR, + .suffix = 'f', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10640C_I2C_ADDRESS, + .platform_data = &ov10640_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER_2, + .rx_port = 2, + .phy_i2c_addr = OV10640_I2C_PHY_ADDR, + .suffix = 'g', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10640D_I2C_ADDRESS, + .platform_data = &ov10640_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER_2, + .rx_port = 3, + .phy_i2c_addr = OV10640_I2C_PHY_ADDR, + .suffix = 'h', + }, +#endif +#ifdef CONFIG_INTEL_IPU4_MAGNA + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = MAGNA_ADDRESS_A, + .platform_data = &magna_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER_2, + .rx_port = 0, + .phy_i2c_addr = MAGNA_PHY_ADDR, + .suffix = 'e', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = MAGNA_ADDRESS_B, + .platform_data = &magna_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER_2, + .rx_port = 1, + .phy_i2c_addr = MAGNA_PHY_ADDR, + .suffix = 'f', + }, +#endif +}; + +static struct ti964_pdata ti964_pdata = { + .subdev_info = ti964_subdevs, + .subdev_num = ARRAY_SIZE(ti964_subdevs), + .reset_gpio = GPIO_BASE + 63, + .suffix = 'a', +}; + +static struct ipu_isys_subdev_info ti964_sd = { + .csi2 = &ti964_csi2_cfg, + .i2c = { + .board_info = { + .type = "ti964", + .addr = TI964_I2C_ADDRESS, + .platform_data = &ti964_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER, + } +}; + +static struct ti964_pdata ti964_pdata_2 = { + .subdev_info = ti964_subdevs_2, + .subdev_num = ARRAY_SIZE(ti964_subdevs_2), + .reset_gpio = GPIO_BASE + 66, + .suffix = 'b', +}; + +static struct ipu_isys_subdev_info ti964_sd_2 = { + .csi2 = &ti964_csi2_cfg_2, + .i2c = { + .board_info = { + .type = "ti964", + .addr = TI964_I2C_ADDRESS, + .platform_data = &ti964_pdata_2, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER_2, + } +}; +#endif + +#if IS_ENABLED(CONFIG_VIDEO_TI964) /* for AS_1140 */ +#define AS_1140_TI964_I2C_ADAPTER 2 +#define AS_1140_TI964_I2C_ADAPTER_2 4 + +static struct ipu_isys_csi2_config as_1140_ti964_csi2_cfg = { + .nlanes = TI964_LANES, + .port = 0, +}; + +static struct ipu_isys_csi2_config as_1140_ti964_csi2_cfg_2 = { + .nlanes = TI964_LANES, + .port = 4, +}; + +static struct ti964_subdev_info as_1140_ti964_subdevs[] = { +#ifdef CONFIG_INTEL_IPU4_OV10635 + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10635A_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = AS_1140_TI964_I2C_ADAPTER, + .rx_port = 0, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + .suffix = 'a', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10635B_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = AS_1140_TI964_I2C_ADAPTER, + .rx_port = 1, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + .suffix = 'b', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10635C_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = AS_1140_TI964_I2C_ADAPTER, + .rx_port = 2, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + .suffix = 'c', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10635D_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = AS_1140_TI964_I2C_ADAPTER, + .rx_port = 3, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + .suffix = 'd', + }, +#endif +#ifdef CONFIG_INTEL_IPU4_OV10640 + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10640A_I2C_ADDRESS, + .platform_data = &ov10640_pdata, + }, + .i2c_adapter_id = AS_1140_TI964_I2C_ADAPTER, + .rx_port = 0, + .phy_i2c_addr = OV10640_I2C_PHY_ADDR, + .suffix = 'a', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10640B_I2C_ADDRESS, + .platform_data = &ov10640_pdata, + }, + .i2c_adapter_id = AS_1140_TI964_I2C_ADAPTER, + .rx_port = 1, + .phy_i2c_addr = OV10640_I2C_PHY_ADDR, + .suffix = 'b', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10640C_I2C_ADDRESS, + .platform_data = &ov10640_pdata, + }, + .i2c_adapter_id = AS_1140_TI964_I2C_ADAPTER, + .rx_port = 2, + .phy_i2c_addr = OV10640_I2C_PHY_ADDR, + .suffix = 'c', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10640D_I2C_ADDRESS, + .platform_data = &ov10640_pdata, + }, + .i2c_adapter_id = AS_1140_TI964_I2C_ADAPTER, + .rx_port = 3, + .phy_i2c_addr = OV10640_I2C_PHY_ADDR, + .suffix = 'd', + }, +#endif +#ifdef CONFIG_INTEL_IPU4_MAGNA + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = MAGNA_ADDRESS_A, + .platform_data = &magna_pdata, + }, + .i2c_adapter_id = AS_1140_TI964_I2C_ADAPTER, + .rx_port = 0, + .phy_i2c_addr = MAGNA_PHY_ADDR, + .suffix = 'a', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = MAGNA_ADDRESS_B, + .platform_data = &magna_pdata, + }, + .i2c_adapter_id = AS_1140_TI964_I2C_ADAPTER, + .rx_port = 1, + .phy_i2c_addr = MAGNA_PHY_ADDR, + .suffix = 'b', + }, +#endif +}; + +static struct ti964_subdev_info as_1140_ti964_subdevs_2[] = { +#ifdef CONFIG_INTEL_IPU4_OV10635 + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10635A_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = AS_1140_TI964_I2C_ADAPTER_2, + .rx_port = 0, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + .suffix = 'e', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10635B_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = AS_1140_TI964_I2C_ADAPTER_2, + .rx_port = 1, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + .suffix = 'f', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10635C_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = AS_1140_TI964_I2C_ADAPTER_2, + .rx_port = 2, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + .suffix = 'g', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10635D_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = AS_1140_TI964_I2C_ADAPTER_2, + .rx_port = 3, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + .suffix = 'h', + }, +#endif +#ifdef CONFIG_INTEL_IPU4_OV10640 + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10640A_I2C_ADDRESS, + .platform_data = &ov10640_pdata, + }, + .i2c_adapter_id = AS_1140_TI964_I2C_ADAPTER_2, + .rx_port = 0, + .phy_i2c_addr = OV10640_I2C_PHY_ADDR, + .suffix = 'e', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10640B_I2C_ADDRESS, + .platform_data = &ov10640_pdata, + }, + .i2c_adapter_id = AS_1140_TI964_I2C_ADAPTER_2, + .rx_port = 1, + .phy_i2c_addr = OV10640_I2C_PHY_ADDR, + .suffix = 'f', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10640C_I2C_ADDRESS, + .platform_data = &ov10640_pdata, + }, + .i2c_adapter_id = AS_1140_TI964_I2C_ADAPTER_2, + .rx_port = 2, + .phy_i2c_addr = OV10640_I2C_PHY_ADDR, + .suffix = 'g', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10640D_I2C_ADDRESS, + .platform_data = &ov10640_pdata, + }, + .i2c_adapter_id = AS_1140_TI964_I2C_ADAPTER_2, + .rx_port = 3, + .phy_i2c_addr = OV10640_I2C_PHY_ADDR, + .suffix = 'h', + }, +#endif +#ifdef CONFIG_INTEL_IPU4_MAGNA + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = MAGNA_ADDRESS_A, + .platform_data = &magna_pdata, + }, + .i2c_adapter_id = AS_1140_TI964_I2C_ADAPTER_2, + .rx_port = 0, + .phy_i2c_addr = MAGNA_PHY_ADDR, + .suffix = 'e', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = MAGNA_ADDRESS_B, + .platform_data = &magna_pdata, + }, + .i2c_adapter_id = AS_1140_TI964_I2C_ADAPTER_2, + .rx_port = 1, + .phy_i2c_addr = MAGNA_PHY_ADDR, + .suffix = 'f', + }, +#endif +}; + +static struct ti964_pdata as_1140_ti964_pdata = { + .subdev_info = as_1140_ti964_subdevs, + .subdev_num = ARRAY_SIZE(as_1140_ti964_subdevs), + .reset_gpio = GPIO_BASE + 62, + .suffix = 'a', +}; + +static struct ipu_isys_subdev_info as_1140_ti964_sd = { + .csi2 = &as_1140_ti964_csi2_cfg, + .i2c = { + .board_info = { + .type = "ti964", + .addr = TI964_I2C_ADDRESS, + .platform_data = &as_1140_ti964_pdata, + }, + .i2c_adapter_id = AS_1140_TI964_I2C_ADAPTER, + } +}; + +static struct ti964_pdata as_1140_ti964_pdata_2 = { + .subdev_info = as_1140_ti964_subdevs_2, + .subdev_num = ARRAY_SIZE(as_1140_ti964_subdevs_2), + .reset_gpio = GPIO_BASE + 69, + .suffix = 'b', +}; + +static struct ipu_isys_subdev_info as_1140_ti964_sd_2 = { + .csi2 = &as_1140_ti964_csi2_cfg_2, + .i2c = { + .board_info = { + .type = "ti964", + .addr = TI964_I2C_ADDRESS, + .platform_data = &as_1140_ti964_pdata_2, + }, + .i2c_adapter_id = AS_1140_TI964_I2C_ADAPTER_2, + } +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_OX03A10 +#define OX03A10_LANES 4 +#define OX03A10_I2C_PHY_ADDR 0x6c +#define OX03A10A_I2C_ADDRESS 0x30 +#define OX03A10B_I2C_ADDRESS 0x31 + +#define OX03A10A_SER_ADDRESS 0x58 +#define OX03A10B_SER_ADDRESS 0x59 + +static struct crlmodule_platform_data ox03a10_pdata = { + .lanes = OX03A10_LANES, + .ext_clk = 27000000, + .op_sys_clock = (uint64_t[]){ 87750000 }, + .module_name = "OX03A10", + .id_string = "0x58 0x3 0x41", + /* + * TI960 has 4 gpio pins, for PWDN, FSIN, and etc. + * it depends connection between serializer and sensor, + * please specify xshutdown, fsin as needed. + */ + .fsin = 0, /* gpio 0 used for FSIN */ +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_OV495 +#define OV495_LANES 4 +#define OV495_I2C_PHY_ADDR 0x48 +#define OV495A_I2C_ADDRESS 0x30 +#define OV495B_I2C_ADDRESS 0x31 +#define OV495C_I2C_ADDRESS 0x32 +#define OV495D_I2C_ADDRESS 0x33 + +#define OV495A_SER_ADDRESS 0x58 +#define OV495B_SER_ADDRESS 0x59 +#define OV495C_SER_ADDRESS 0x5a +#define OV495D_SER_ADDRESS 0x5b + +static struct crlmodule_platform_data ov495_pdata = { + .lanes = OV495_LANES, + .ext_clk = 27000000, + .op_sys_clock = (uint64_t[]){ 87750000 }, + .module_name = "OV495", + .id_string = "0x51 0x49 0x56 0x4f", + /* + * TI960 has 4 gpio pins, for PWDN, FSIN, and etc. + * it depends connection between serializer and sensor, + * please specify xshutdown, fsin as needed. + */ + .fsin = 2, /* gpio 2 used for FSIN */ +}; +#endif + +#if IS_ENABLED(CONFIG_VIDEO_TI960) +#define TI960_I2C_ADAPTER 2 +#define TI960_I2C_ADAPTER_2 4 +#define TI960_LANES 4 + +static struct ipu_isys_csi2_config ti960_csi2_cfg = { + .nlanes = TI960_LANES, + .port = 0, +}; + +static struct ipu_isys_csi2_config ti960_csi2_cfg_2 = { + .nlanes = TI960_LANES, + .port = 4, +}; + +static struct ti960_subdev_info ti960_subdevs[] = { +#ifdef CONFIG_INTEL_IPU4_OX03A10 + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OX03A10A_I2C_ADDRESS, + .platform_data = &ox03a10_pdata, + }, + .i2c_adapter_id = TI960_I2C_ADAPTER, + .rx_port = 0, + .phy_i2c_addr = OX03A10_I2C_PHY_ADDR, + .ser_alias = OX03A10A_SER_ADDRESS, + .suffix = 'a', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OX03A10B_I2C_ADDRESS, + .platform_data = &ox03a10_pdata, + }, + .i2c_adapter_id = TI960_I2C_ADAPTER, + .rx_port = 1, + .phy_i2c_addr = OX03A10_I2C_PHY_ADDR, + .ser_alias = OX03A10B_SER_ADDRESS, + .suffix = 'b', + }, +#endif +#ifdef CONFIG_INTEL_IPU4_OV495 + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV495A_I2C_ADDRESS, + .platform_data = &ov495_pdata, + }, + .i2c_adapter_id = TI960_I2C_ADAPTER, + .rx_port = 0, + .phy_i2c_addr = OV495_I2C_PHY_ADDR, + .ser_alias = OV495A_SER_ADDRESS, + .suffix = 'a', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV495B_I2C_ADDRESS, + .platform_data = &ov495_pdata, + }, + .i2c_adapter_id = TI960_I2C_ADAPTER, + .rx_port = 1, + .phy_i2c_addr = OV495_I2C_PHY_ADDR, + .ser_alias = OV495B_SER_ADDRESS, + .suffix = 'b', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV495C_I2C_ADDRESS, + .platform_data = &ov495_pdata, + }, + .i2c_adapter_id = TI960_I2C_ADAPTER, + .rx_port = 2, + .phy_i2c_addr = OV495_I2C_PHY_ADDR, + .ser_alias = OV495C_SER_ADDRESS, + .suffix = 'c', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV495D_I2C_ADDRESS, + .platform_data = &ov495_pdata, + }, + .i2c_adapter_id = TI960_I2C_ADAPTER, + .rx_port = 3, + .phy_i2c_addr = OV495_I2C_PHY_ADDR, + .ser_alias = OV495D_SER_ADDRESS, + .suffix = 'd', + }, +#endif +}; + +static struct ti960_subdev_info ti960_subdevs_2[] = { +#ifdef CONFIG_INTEL_IPU4_OX03A10 + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OX03A10A_I2C_ADDRESS, + .platform_data = &ox03a10_pdata, + }, + .i2c_adapter_id = TI960_I2C_ADAPTER_2, + .rx_port = 0, + .phy_i2c_addr = OX03A10_I2C_PHY_ADDR, + .ser_alias = OX03A10A_SER_ADDRESS, + .suffix = 'e', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OX03A10B_I2C_ADDRESS, + .platform_data = &ox03a10_pdata, + }, + .i2c_adapter_id = TI960_I2C_ADAPTER_2, + .rx_port = 1, + .phy_i2c_addr = OX03A10_I2C_PHY_ADDR, + .ser_alias = OX03A10B_SER_ADDRESS, + .suffix = 'f', + }, +#endif +#ifdef CONFIG_INTEL_IPU4_OV495 + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV495A_I2C_ADDRESS, + .platform_data = &ov495_pdata, + }, + .i2c_adapter_id = TI960_I2C_ADAPTER_2, + .rx_port = 0, + .phy_i2c_addr = OV495_I2C_PHY_ADDR, + .ser_alias = OV495A_SER_ADDRESS, + .suffix = 'e', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV495B_I2C_ADDRESS, + .platform_data = &ov495_pdata, + }, + .i2c_adapter_id = TI960_I2C_ADAPTER_2, + .rx_port = 1, + .phy_i2c_addr = OV495_I2C_PHY_ADDR, + .ser_alias = OV495B_SER_ADDRESS, + .suffix = 'f', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV495C_I2C_ADDRESS, + .platform_data = &ov495_pdata, + }, + .i2c_adapter_id = TI960_I2C_ADAPTER_2, + .rx_port = 2, + .phy_i2c_addr = OV495_I2C_PHY_ADDR, + .ser_alias = OV495C_SER_ADDRESS, + .suffix = 'g', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV495D_I2C_ADDRESS, + .platform_data = &ov495_pdata, + }, + .i2c_adapter_id = TI960_I2C_ADAPTER_2, + .rx_port = 3, + .phy_i2c_addr = OV495_I2C_PHY_ADDR, + .ser_alias = OV495D_SER_ADDRESS, + .suffix = 'h', + }, +#endif +}; + +static struct ti960_pdata ti960_pdata = { + .subdev_info = ti960_subdevs, + .subdev_num = ARRAY_SIZE(ti960_subdevs), + .reset_gpio = GPIO_BASE + 62, + .suffix = 'a', +}; + +static struct ipu_isys_subdev_info ti960_sd = { + .csi2 = &ti960_csi2_cfg, + .i2c = { + .board_info = { + .type = "ti960", + .addr = TI960_I2C_ADDRESS, + .platform_data = &ti960_pdata, + }, + .i2c_adapter_id = TI960_I2C_ADAPTER, + } +}; + +static struct ti960_pdata ti960_pdata_2 = { + .subdev_info = ti960_subdevs_2, + .subdev_num = ARRAY_SIZE(ti960_subdevs_2), + .reset_gpio = GPIO_BASE + 66, + .suffix = 'b', +}; + +static struct ipu_isys_subdev_info ti960_sd_2 = { + .csi2 = &ti960_csi2_cfg_2, + .i2c = { + .board_info = { + .type = "ti960", + .addr = TI960_I2C_ADDRESS, + .platform_data = &ti960_pdata_2, + }, + .i2c_adapter_id = TI960_I2C_ADAPTER_2, + } +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_AR0231AT +#define AR0231AT_LANES 4 +#define AR0231ATA_I2C_ADDRESS 0x11 +#define AR0231ATB_I2C_ADDRESS 0x12 +#define AR0231ATC_I2C_ADDRESS 0x13 +#define AR0231ATD_I2C_ADDRESS 0x14 + +static struct crlmodule_platform_data ar0231at_pdata = { + .lanes = AR0231AT_LANES, + .ext_clk = 27000000, + .op_sys_clock = (uint64_t[]){ 87750000 }, + .module_name = "AR0231AT", +}; +#endif + +#if IS_ENABLED(CONFIG_VIDEO_MAX9286) +#define DS_MAX9286_LANES 4 +#define DS_MAX9286_I2C_ADAPTER 4 +#define DS_MAX9286_I2C_ADDRESS 0x48 + +static struct ipu_isys_csi2_config max9286_csi2_cfg = { + .nlanes = DS_MAX9286_LANES, + .port = 4, +}; + +static struct max9286_subdev_i2c_info max9286_subdevs[] = { +#ifdef CONFIG_INTEL_IPU4_AR0231AT + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = AR0231ATA_I2C_ADDRESS, + .platform_data = &ar0231at_pdata, + }, + .i2c_adapter_id = DS_MAX9286_I2C_ADAPTER, + .suffix = 'a', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = AR0231ATB_I2C_ADDRESS, + .platform_data = &ar0231at_pdata, + }, + .i2c_adapter_id = DS_MAX9286_I2C_ADAPTER, + .suffix = 'b', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = AR0231ATC_I2C_ADDRESS, + .platform_data = &ar0231at_pdata, + }, + .i2c_adapter_id = DS_MAX9286_I2C_ADAPTER, + .suffix = 'c', + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = AR0231ATD_I2C_ADDRESS, + .platform_data = &ar0231at_pdata, + }, + .i2c_adapter_id = DS_MAX9286_I2C_ADAPTER, + .suffix = 'd', + }, +#endif +}; + +static struct max9286_pdata max9286_pdata = { + .subdev_info = max9286_subdevs, + .subdev_num = ARRAY_SIZE(max9286_subdevs), + .reset_gpio = GPIO_BASE + 63, + .suffix = 'a', +}; + +static struct ipu_isys_subdev_info max9286_sd = { + .csi2 = &max9286_csi2_cfg, + .i2c = { + .board_info = { + .type = "max9286", + .addr = DS_MAX9286_I2C_ADDRESS, + .platform_data = &max9286_pdata, + }, + .i2c_adapter_id = DS_MAX9286_I2C_ADAPTER, + } +}; +#endif + +/* + * Map buttress output sensor clocks to sensors - + * this should be coming from ACPI + */ +static struct ipu_isys_clk_mapping clk_mapping[] = { + { CLKDEV_INIT("2-0036", NULL, NULL), "OSC_CLK_OUT0" }, + { CLKDEV_INIT("2-001a", NULL, NULL), "OSC_CLK_OUT0" }, + { CLKDEV_INIT("4-001a", NULL, NULL), "OSC_CLK_OUT1" }, + { CLKDEV_INIT("2-0010", NULL, NULL), "OSC_CLK_OUT0" }, + { CLKDEV_INIT("4-0010", NULL, NULL), "OSC_CLK_OUT1" }, + { CLKDEV_INIT("2-a0e0", NULL, NULL), "OSC_CLK_OUT0" }, + { CLKDEV_INIT("2-a0e2", NULL, NULL), "OSC_CLK_OUT0" }, + { CLKDEV_INIT("0-0010", NULL, NULL), "OSC_CLK_OUT0" }, + { CLKDEV_INIT("2-000e", NULL, NULL), "OSC_CLK_OUT0" }, + { CLKDEV_INIT("4-000e", NULL, NULL), "OSC_CLK_OUT1" }, + { CLKDEV_INIT("0-0048", NULL, NULL), "OSC_CLK_OUT0" }, + { CLKDEV_INIT("4-0048", NULL, NULL), "OSC_CLK_OUT1" }, + { CLKDEV_INIT(NULL, NULL, NULL), NULL } +}; + +static struct ipu_isys_subdev_pdata pdata = { + .subdevs = (struct ipu_isys_subdev_info *[]) { +#ifdef CONFIG_INTEL_IPU4_OV2740 + &ov2740_crl_sd, +#endif +#ifdef CONFIG_INTEL_IPU4_IMX185 + &imx185_crl_sd, + &imx185_b_crl_sd, +#endif +#ifdef CONFIG_INTEL_IPU4_AR023Z + &ar023z_crl_sd, + &ar023z_b_crl_sd, +#endif +#ifdef CONFIG_INTEL_IPU4_IMX477 + &imx477_crl_sd_slave_1, + &imx477_crl_sd_master, +#endif +#ifdef CONFIG_INTEL_IPU4_IMX274 + &imx274_crl_sd, + &imx274_b_crl_sd, +#endif +#ifdef CONFIG_INTEL_IPU4_IMX290 + &imx290_crl_sd, +#endif +#ifdef CONFIG_INTEL_IPU4_OV13860 + &ov13860_crl_sd, +#endif +#ifdef CONFIG_INTEL_IPU4_OV9281 + &ov9281_crl_sd, +#endif +#if IS_ENABLED(CONFIG_VIDEO_BU64295) + &bu64295_sd, +#endif +#ifdef CONFIG_INTEL_IPU4_ADV7481 + &adv7481_crl_sd, +#endif +#ifdef CONFIG_INTEL_IPU4_ADV7481_EVAL + &adv7481_eval_crl_sd, + &adv7481b_eval_crl_sd, +#endif +#if IS_ENABLED(CONFIG_VIDEO_AGGREGATOR_STUB) + &video_aggre_stub_sd, + &video_aggre_b_stub_sd, +#endif +#if IS_ENABLED(CONFIG_VIDEO_TI964) + &ti964_sd, + &ti964_sd_2, + &as_1140_ti964_sd, + &as_1140_ti964_sd_2, +#endif +#if IS_ENABLED(CONFIG_VIDEO_TI960) + &ti960_sd, + &ti960_sd_2, +#endif +#if IS_ENABLED(CONFIG_VIDEO_MAX9286) + &max9286_sd, +#endif + NULL, + }, + .clk_map = clk_mapping, +}; + +static void ipu4_quirk(struct pci_dev *pci_dev) +{ + pci_dev->dev.platform_data = &pdata; +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, IPU_PCI_ID, ipu4_quirk); diff --git a/drivers/media/platform/intel/ipu4-ici-bxt-p-pdata.c b/drivers/media/platform/intel/ipu4-ici-bxt-p-pdata.c new file mode 100644 index 0000000000000..19aef79135a18 --- /dev/null +++ b/drivers/media/platform/intel/ipu4-ici-bxt-p-pdata.c @@ -0,0 +1,471 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include "ipu.h" + +#define GPIO_BASE 422 + +#ifdef CONFIG_INTEL_IPU4_ADV7481 + +#define ADV7481_CVBS_LANES 1 +#define ADV7481_HDMI_LANES 4 +#define ADV7481_HDMI_I2C_ADDRESS 0xe0 +#define ADV7481_CVBS_I2C_ADDRESS 0xe1 +static struct crlmodule_lite_platform_data adv7481_hdmi_pdata_lite = { +#if (!IS_ENABLED(CONFIG_VIDEO_INTEL_UOS)) +// xshutdown GPIO pin unavailable on ACRN UOS + .xshutdown = GPIO_BASE + 63, +#endif + .lanes = ADV7481_HDMI_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){600000000}, + .module_name = "ADV7481 HDMI" +}; +static struct ipu_isys_csi2_config adv7481_hdmi_csi2_cfg = { + .nlanes = ADV7481_HDMI_LANES, + .port = 0, +}; +static struct ipu_isys_subdev_info adv7481_hdmi_crl_sd_lite = { + .csi2 = &adv7481_hdmi_csi2_cfg, + .i2c = { + .board_info = { + .type = CRLMODULE_LITE_NAME, + .flags = I2C_CLIENT_TEN, + .addr = ADV7481_HDMI_I2C_ADDRESS, + .platform_data = &adv7481_hdmi_pdata_lite, + }, + .i2c_adapter_id = CONFIG_INTEL_IPU4_ADV7481_I2C_ID, + } +}; + +static struct crlmodule_lite_platform_data adv7481_cvbs_pdata_lite = { +#if (!IS_ENABLED(CONFIG_VIDEO_INTEL_UOS)) +// xshutdown GPIO pin unavailable on ACRN UOS + .xshutdown = GPIO_BASE + 63, +#endif + .lanes = ADV7481_CVBS_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){600000000}, + .module_name = "ADV7481 CVBS" +}; +static struct ipu_isys_csi2_config adv7481_cvbs_csi2_cfg = { + .nlanes = ADV7481_CVBS_LANES, + .port = 4, +}; +static struct ipu_isys_subdev_info adv7481_cvbs_crl_sd_lite = { + .csi2 = &adv7481_cvbs_csi2_cfg, + .i2c = { + .board_info = { + .type = CRLMODULE_LITE_NAME, + .flags = I2C_CLIENT_TEN, + .addr = ADV7481_CVBS_I2C_ADDRESS, + .platform_data = &adv7481_cvbs_pdata_lite, + }, + .i2c_adapter_id = CONFIG_INTEL_IPU4_ADV7481_I2C_ID, + } +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_ADV7481_EVAL + +#define ADV7481_LANES 4 +//below i2c address is dummy one, to be able to register single ADV7481 chip as two sensors +#define ADV7481_I2C_ADDRESS 0xe0 +#define ADV7481B_I2C_ADDRESS 0xe2 + +static struct crlmodule_lite_platform_data adv7481_eval_pdata_lite = { + .xshutdown = GPIO_BASE + 63, + .lanes = ADV7481_HDMI_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){600000000}, + .module_name = "ADV7481_EVAL" +}; +static struct ipu_isys_csi2_config adv7481_eval_csi2_cfg = { + .nlanes = ADV7481_LANES, + .port = 0, +}; +static struct ipu_isys_subdev_info adv7481_eval_crl_sd_lite = { + .csi2 = &adv7481_eval_csi2_cfg, + .i2c = { + .board_info = { + .type = CRLMODULE_LITE_NAME, + .flags = I2C_CLIENT_TEN, + .addr = ADV7481_I2C_ADDRESS, + .platform_data = &adv7481_eval_pdata_lite, + }, + .i2c_adapter_id = 2, + } +}; + +static struct crlmodule_lite_platform_data adv7481b_eval_pdata_lite = { + .xshutdown = GPIO_BASE + 63, + .lanes = ADV7481_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){600000000}, + .module_name = "ADV7481B_EVAL" +}; +static struct ipu_isys_csi2_config adv7481b_eval_csi2_cfg = { + .nlanes = ADV7481_LANES, + .port = 4, +}; +static struct ipu_isys_subdev_info adv7481b_eval_crl_sd_lite = { + .csi2 = &adv7481b_eval_csi2_cfg, + .i2c = { + .board_info = { + .type = CRLMODULE_LITE_NAME, + .flags = I2C_CLIENT_TEN, + .addr = ADV7481B_I2C_ADDRESS, + .platform_data = &adv7481b_eval_pdata_lite, + }, + .i2c_adapter_id = 2, + } +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_MAGNA_TI964 + +#define MAGNA_TI964_MIPI_LANES 4 +#define TI964_I2C_ADDRESS 0x3d +static struct crlmodule_lite_platform_data magna_ti964_pdata = { + .xshutdown = GPIO_BASE + 63, + .lanes = MAGNA_TI964_MIPI_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){ 400000000 }, + .module_name = "MAGNA_TI964", +}; +static struct ipu_isys_csi2_config magna_ti964_csi2_cfg = { + .nlanes = MAGNA_TI964_MIPI_LANES, + .port = 0, +}; +static struct ipu_isys_subdev_info magna_ti964_crl_sd = { + .csi2 = &magna_ti964_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO(CRLMODULE_LITE_NAME, TI964_I2C_ADDRESS), + .platform_data = &magna_ti964_pdata, + }, + .i2c_adapter_id = 0, + } +}; + +#endif + +#if IS_ENABLED(CONFIG_INTEL_IPU4_OV10635) +#define OV10635_LANES 4 +#define OV10635_I2C_PHY_ADDR 0x60 /* 0x30 for 7bit addr */ +#define OV10635A_I2C_ADDRESS 0x61 +#define OV10635B_I2C_ADDRESS 0x62 +#define OV10635C_I2C_ADDRESS 0x63 +#define OV10635D_I2C_ADDRESS 0x64 + +static struct crlmodule_lite_platform_data ov10635_pdata = { + .lanes = OV10635_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){ 400000000 }, + .module_name = "OV10635", + .id_string = "0xa6 0x35", + + /* + * The pin number of xshutdown will be determined + * and replaced inside TI964 driver. + * The number here stands for which GPIO to connect with. + * 1 means to connect sensor xshutdown to GPIO1 + */ + .xshutdown = 0, +}; +#endif + +#ifdef CONFIG_VIDEO_TI964_ICI +#define TI964_I2C_ADAPTER 2 +#define TI964_I2C_ADAPTER_2 4 +#define TI964_I2C_ADDRESS 0x3d +#define TI964_LANES 4 + +static struct ipu_isys_csi2_config ti964_csi2_cfg = { + .nlanes = TI964_LANES, + .port = 0, +}; + +static struct ipu_isys_csi2_config ti964_csi2_cfg_2 = { + .nlanes = TI964_LANES, + .port = 4, +}; +static struct ti964_subdev_info ti964_subdevs[] = { +#ifdef CONFIG_INTEL_IPU4_OV10635 + { + .board_info = { + .type = CRLMODULE_LITE_NAME, + .addr = OV10635A_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER, + .rx_port = 0, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + .suffix = 'a', + }, + { + .board_info = { + .type = CRLMODULE_LITE_NAME, + .addr = OV10635B_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER, + .rx_port = 1, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + .suffix = 'b', + }, + { + .board_info = { + .type = CRLMODULE_LITE_NAME, + .addr = OV10635C_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER, + .rx_port = 2, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + .suffix = 'c', + }, + { + .board_info = { + .type = CRLMODULE_LITE_NAME, + .addr = OV10635D_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER, + .rx_port = 3, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + .suffix = 'd', + }, +#endif +}; +static struct ti964_subdev_info ti964_subdevs_2[] = { +#ifdef CONFIG_INTEL_IPU4_OV10635 + { + .board_info = { + .type = CRLMODULE_LITE_NAME, + .addr = OV10635A_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER_2, + .rx_port = 0, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + .suffix = 'e', + }, + { + .board_info = { + .type = CRLMODULE_LITE_NAME, + .addr = OV10635B_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER_2, + .rx_port = 1, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + .suffix = 'f', + }, + { + .board_info = { + .type = CRLMODULE_LITE_NAME, + .addr = OV10635C_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER_2, + .rx_port = 2, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + .suffix = 'g', + }, + { + .board_info = { + .type = CRLMODULE_LITE_NAME, + .addr = OV10635D_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER_2, + .rx_port = 3, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + .suffix = 'h', + }, +#endif +}; +static struct ti964_pdata ti964_pdata = { + .subdev_info = ti964_subdevs, + .subdev_num = ARRAY_SIZE(ti964_subdevs), + .reset_gpio = GPIO_BASE + 62, + .suffix = 'a', +}; + +static struct ipu_isys_subdev_info ti964_sd = { + .csi2 = &ti964_csi2_cfg, + .i2c = { + .board_info = { + .type = "ti964", + .addr = TI964_I2C_ADDRESS, + .platform_data = &ti964_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER, + } +}; + +static struct ti964_pdata ti964_pdata_2 = { + .subdev_info = ti964_subdevs_2, + .subdev_num = ARRAY_SIZE(ti964_subdevs_2), + .reset_gpio = GPIO_BASE + 69, + .suffix = 'b', +}; + +static struct ipu_isys_subdev_info ti964_sd_2 = { + .csi2 = &ti964_csi2_cfg_2, + .i2c = { + .board_info = { + .type = "ti964", + .addr = TI964_I2C_ADDRESS, + .platform_data = &ti964_pdata_2, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER_2, + } +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_AR0231AT +#define AR0231AT_LANES 4 +#define AR0231ATA_I2C_ADDRESS 0x11 +#define AR0231ATB_I2C_ADDRESS 0x12 +#define AR0231ATC_I2C_ADDRESS 0x13 +#define AR0231ATD_I2C_ADDRESS 0x14 + +static struct crlmodule_lite_platform_data ar0231at_pdata = { + .lanes = AR0231AT_LANES, + .ext_clk = 27000000, + .op_sys_clock = (uint64_t[]){ 87750000 }, + .module_name = "AR0231AT", +}; +#endif + +#if IS_ENABLED(CONFIG_VIDEO_MAX9286_ICI) +#define DS_MAX9286_LANES 4 +#define DS_MAX9286_I2C_ADAPTER 4 +#define DS_MAX9286_I2C_ADDRESS 0x48 + +static struct ipu_isys_csi2_config max9286_csi2_cfg = { + .nlanes = DS_MAX9286_LANES, + .port = 4, +}; + +static struct max9286_subdev_i2c_info max9286_subdevs[] = { +#ifdef CONFIG_INTEL_IPU4_AR0231AT + { + .board_info = { + .type = CRLMODULE_LITE_NAME, + .addr = AR0231ATA_I2C_ADDRESS, + .platform_data = &ar0231at_pdata, + }, + .i2c_adapter_id = DS_MAX9286_I2C_ADAPTER, + .suffix = 'a', + }, + { + .board_info = { + .type = CRLMODULE_LITE_NAME, + .addr = AR0231ATB_I2C_ADDRESS, + .platform_data = &ar0231at_pdata, + }, + .i2c_adapter_id = DS_MAX9286_I2C_ADAPTER, + .suffix = 'b', + }, + { + .board_info = { + .type = CRLMODULE_LITE_NAME, + .addr = AR0231ATC_I2C_ADDRESS, + .platform_data = &ar0231at_pdata, + }, + .i2c_adapter_id = DS_MAX9286_I2C_ADAPTER, + .suffix = 'c', + }, + { + .board_info = { + .type = CRLMODULE_LITE_NAME, + .addr = AR0231ATD_I2C_ADDRESS, + .platform_data = &ar0231at_pdata, + }, + .i2c_adapter_id = DS_MAX9286_I2C_ADAPTER, + .suffix = 'd', + }, +#endif +}; + + +static struct max9286_pdata max9286_pdata = { + .subdev_info = max9286_subdevs, + .subdev_num = ARRAY_SIZE(max9286_subdevs), + .reset_gpio = GPIO_BASE + 63, + .suffix = 'a', +}; + +static struct ipu_isys_subdev_info max9286_sd = { + .csi2 = &max9286_csi2_cfg, + .i2c = { + .board_info = { + .type = "max9286", + .addr = DS_MAX9286_I2C_ADDRESS, + .platform_data = &max9286_pdata, + }, + .i2c_adapter_id = DS_MAX9286_I2C_ADAPTER, + } +}; +#endif + +/* + * Map buttress output sensor clocks to sensors - + * this should be coming from ACPI + */ +struct ipu_isys_clk_mapping p_mapping[] = { + { CLKDEV_INIT("0-003d", NULL, NULL), "OSC_CLK_OUT1" }, + { CLKDEV_INIT("0-00e1", NULL, NULL), "OSC_CLK_OUT0" }, + { CLKDEV_INIT("0-00e0", NULL, NULL), "OSC_CLK_OUT1" }, + { CLKDEV_INIT("2-a0e0", NULL, NULL), "OSC_CLK_OUT0" }, + { CLKDEV_INIT("2-a0e2", NULL, NULL), "OSC_CLK_OUT1" }, + { CLKDEV_INIT(NULL, NULL, NULL), NULL } +}; + +static struct ipu_isys_subdev_pdata pdata = { + .subdevs = (struct ipu_isys_subdev_info *[]) { +#ifdef CONFIG_INTEL_IPU4_ADV7481 + &adv7481_cvbs_crl_sd_lite, + &adv7481_hdmi_crl_sd_lite, +#endif +#ifdef CONFIG_INTEL_IPU4_ADV7481_EVAL + &adv7481_eval_crl_sd_lite, + &adv7481b_eval_crl_sd_lite, +#endif +#ifdef CONFIG_VIDEO_TI964_ICI + &ti964_sd, + &ti964_sd_2, +#endif +#ifdef CONFIG_INTEL_IPU4_MAGNA_TI964 + &magna_ti964_crl_sd, +#endif +#if IS_ENABLED(CONFIG_VIDEO_MAX9286_ICI) + &max9286_sd, +#endif + NULL, + }, + .clk_map = p_mapping, +}; + +static void ipu4_quirk(struct pci_dev *pci_dev) +{ + pci_dev->dev.platform_data = &pdata; +} + +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, IPU_PCI_ID, + ipu4_quirk); diff --git a/drivers/media/platform/intel/ipu4p-icl-rvp-pdata.c b/drivers/media/platform/intel/ipu4p-icl-rvp-pdata.c new file mode 100644 index 0000000000000..f3906f4d4fa1d --- /dev/null +++ b/drivers/media/platform/intel/ipu4p-icl-rvp-pdata.c @@ -0,0 +1,181 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Intel Corporation + +#include +#include +#include +#include +#include + +#include +#include +#include "ipu.h" +#include + +#define IMX355_LANES 4 +#define IMX355_I2C_ADDRESS 0x1a +#define IMX319_LANES 4 +#define IMX319_I2C_ADDRESS 0x10 +#define AK7375_I2C_ADDRESS 0xc + +static struct ipu_isys_csi2_config imx355_csi2_cfg = { + .nlanes = IMX355_LANES, + .port = 4, /* WF camera */ +}; + +static struct ipu_isys_subdev_info imx355_sd = { + .csi2 = &imx355_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO("imx355", IMX355_I2C_ADDRESS), + }, + .i2c_adapter_id = 9, + } +}; + +/* FIXME: Remove this after hardware transition. */ +static struct ipu_isys_subdev_info imx355_sd2 = { + .csi2 = &imx355_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO("imx355", 0x10), + }, + .i2c_adapter_id = 9, + } +}; + +static struct ipu_isys_subdev_info ak7375_sd = { + .i2c = { + .board_info = { + I2C_BOARD_INFO("ak7375", AK7375_I2C_ADDRESS), + }, + .i2c_adapter_id = 9, + } +}; + +static struct ipu_isys_csi2_config imx319_csi2_cfg = { + .nlanes = IMX319_LANES, + .port = 0, /* UF camera */ +}; + +static struct ipu_isys_subdev_info imx319_sd = { + .csi2 = &imx319_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO("imx319", IMX319_I2C_ADDRESS), + }, + .i2c_adapter_id = 8, + } +}; + +#ifdef CONFIG_INTEL_IPU4_AR0231AT +#define AR0231AT_LANES 4 +#define AR0231ATA_I2C_ADDRESS 0x11 +#define AR0231ATB_I2C_ADDRESS 0x12 +#define AR0231ATC_I2C_ADDRESS 0x13 +#define AR0231ATD_I2C_ADDRESS 0x14 + +static struct crlmodule_platform_data ar0231at_pdata = { + .lanes = AR0231AT_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t[]){ 264000000 }, + .module_name = "AR0231AT", +}; +#endif + +#if IS_ENABLED(CONFIG_VIDEO_MAX9286) +#define DS_MAX9286_LANES 4 +#define DS_MAX9286_I2C_ADAPTER_B 3 +#define DS_MAX9286_I2C_ADDRESS 0x48 + +static struct ipu_isys_csi2_config max9286_b_csi2_cfg = { + .nlanes = DS_MAX9286_LANES, + .port = 4, +}; + +struct max9286_subdev_i2c_info max9286_b_subdevs[] = { +#ifdef CONFIG_INTEL_IPU4_AR0231AT + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = AR0231ATA_I2C_ADDRESS, + .platform_data = &ar0231at_pdata, + }, + .i2c_adapter_id = DS_MAX9286_I2C_ADAPTER_B, + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = AR0231ATB_I2C_ADDRESS, + .platform_data = &ar0231at_pdata, + }, + .i2c_adapter_id = DS_MAX9286_I2C_ADAPTER_B, + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = AR0231ATC_I2C_ADDRESS, + .platform_data = &ar0231at_pdata, + }, + .i2c_adapter_id = DS_MAX9286_I2C_ADAPTER_B, + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = AR0231ATD_I2C_ADDRESS, + .platform_data = &ar0231at_pdata, + }, + .i2c_adapter_id = DS_MAX9286_I2C_ADAPTER_B, + }, +#endif +}; + +static struct max9286_pdata max9286_b_pdata = { + .subdev_info = max9286_b_subdevs, + .subdev_num = ARRAY_SIZE(max9286_b_subdevs), + .reset_gpio = 195, +}; + +static struct ipu_isys_subdev_info max9286_b_sd = { + .csi2 = &max9286_b_csi2_cfg, + .i2c = { + .board_info = { + .type = "max9286", + .addr = DS_MAX9286_I2C_ADDRESS, + .platform_data = &max9286_b_pdata, + }, + .i2c_adapter_id = DS_MAX9286_I2C_ADAPTER_B, + } +}; +#endif + +static struct ipu_isys_clk_mapping clk_mapping[] = { + { CLKDEV_INIT("3-0048", NULL, NULL), "OSC_CLK_OUT1" }, + { CLKDEV_INIT(NULL, NULL, NULL), NULL } +}; + +static struct ipu_isys_subdev_pdata pdata = { + .subdevs = (struct ipu_isys_subdev_info *[]) { + &imx355_sd, + &imx355_sd2, + &imx319_sd, + &ak7375_sd, +#if IS_ENABLED(CONFIG_VIDEO_MAX9286) + &max9286_b_sd, +#endif + NULL, + }, + .clk_map = clk_mapping, +}; + +static void ipu4p_quirk(struct pci_dev *pci_dev) +{ + pr_info("Intel platform data PCI quirk for IPU4P\n"); + pci_dev->dev.platform_data = &pdata; +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, IPU_PCI_ID, ipu4p_quirk); + +MODULE_AUTHOR("Bingbu Cao "); +MODULE_AUTHOR("Qiu, Tianshu "); +MODULE_AUTHOR("Kun Jiang "); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c index 842e2235047d9..432bc7fbedc99 100644 --- a/drivers/media/platform/omap3isp/isp.c +++ b/drivers/media/platform/omap3isp/isp.c @@ -1587,6 +1587,8 @@ static void isp_pm_complete(struct device *dev) static void isp_unregister_entities(struct isp_device *isp) { + media_device_unregister(&isp->media_dev); + omap3isp_csi2_unregister_entities(&isp->isp_csi2a); omap3isp_ccp2_unregister_entities(&isp->isp_ccp2); omap3isp_ccdc_unregister_entities(&isp->isp_ccdc); @@ -1597,7 +1599,6 @@ static void isp_unregister_entities(struct isp_device *isp) omap3isp_stat_unregister_entities(&isp->isp_hist); v4l2_device_unregister(&isp->v4l2_dev); - media_device_unregister(&isp->media_dev); media_device_cleanup(&isp->media_dev); } diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c index bb6add9d340e2..5b8350e87e750 100644 --- a/drivers/media/platform/qcom/venus/core.c +++ b/drivers/media/platform/qcom/venus/core.c @@ -264,6 +264,14 @@ static int venus_probe(struct platform_device *pdev) if (ret) return ret; + if (!dev->dma_parms) { + dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), + GFP_KERNEL); + if (!dev->dma_parms) + return -ENOMEM; + } + dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); + INIT_LIST_HEAD(&core->instances); mutex_init(&core->lock); INIT_DELAYED_WORK(&core->work, venus_sys_error_handler); diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c index 408cd55d35801..7a33a52eaccaa 100644 --- a/drivers/media/platform/vicodec/vicodec-core.c +++ b/drivers/media/platform/vicodec/vicodec-core.c @@ -42,7 +42,7 @@ MODULE_PARM_DESC(debug, " activates debug info"); #define MAX_WIDTH 4096U #define MIN_WIDTH 640U #define MAX_HEIGHT 2160U -#define MIN_HEIGHT 480U +#define MIN_HEIGHT 360U #define dprintk(dev, fmt, arg...) \ v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg) @@ -438,7 +438,8 @@ static int job_ready(void *priv) for (; p < p_out + sz; p++) { u32 copy; - p = memchr(p, magic[ctx->comp_magic_cnt], sz); + p = memchr(p, magic[ctx->comp_magic_cnt], + p_out + sz - p); if (!p) { ctx->comp_magic_cnt = 0; break; diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h index 477c80a4d44c0..cd4c8230563c7 100644 --- a/drivers/media/platform/vivid/vivid-core.h +++ b/drivers/media/platform/vivid/vivid-core.h @@ -111,7 +111,7 @@ enum vivid_colorspace { VIVID_CS_170M, VIVID_CS_709, VIVID_CS_SRGB, - VIVID_CS_ADOBERGB, + VIVID_CS_OPRGB, VIVID_CS_2020, VIVID_CS_DCI_P3, VIVID_CS_240M, diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c index 5429193fbb91d..999aa101b1501 100644 --- a/drivers/media/platform/vivid/vivid-ctrls.c +++ b/drivers/media/platform/vivid/vivid-ctrls.c @@ -348,7 +348,7 @@ static int vivid_vid_cap_s_ctrl(struct v4l2_ctrl *ctrl) V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_REC709, V4L2_COLORSPACE_SRGB, - V4L2_COLORSPACE_ADOBERGB, + V4L2_COLORSPACE_OPRGB, V4L2_COLORSPACE_BT2020, V4L2_COLORSPACE_DCI_P3, V4L2_COLORSPACE_SMPTE240M, @@ -729,7 +729,7 @@ static const char * const vivid_ctrl_colorspace_strings[] = { "SMPTE 170M", "Rec. 709", "sRGB", - "AdobeRGB", + "opRGB", "BT.2020", "DCI-P3", "SMPTE 240M", @@ -752,7 +752,7 @@ static const char * const vivid_ctrl_xfer_func_strings[] = { "Default", "Rec. 709", "sRGB", - "AdobeRGB", + "opRGB", "SMPTE 240M", "None", "DCI-P3", diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c index f06003bb8e423..2a92e5aac9ed3 100644 --- a/drivers/media/platform/vivid/vivid-kthread-cap.c +++ b/drivers/media/platform/vivid/vivid-kthread-cap.c @@ -865,8 +865,11 @@ int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming) "%s-vid-cap", dev->v4l2_dev.name); if (IS_ERR(dev->kthread_vid_cap)) { + int err = PTR_ERR(dev->kthread_vid_cap); + + dev->kthread_vid_cap = NULL; v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n"); - return PTR_ERR(dev->kthread_vid_cap); + return err; } *pstreaming = true; vivid_grab_controls(dev, true); diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c index 9981e7548019b..488590594150e 100644 --- a/drivers/media/platform/vivid/vivid-kthread-out.c +++ b/drivers/media/platform/vivid/vivid-kthread-out.c @@ -236,8 +236,11 @@ int vivid_start_generating_vid_out(struct vivid_dev *dev, bool *pstreaming) "%s-vid-out", dev->v4l2_dev.name); if (IS_ERR(dev->kthread_vid_out)) { + int err = PTR_ERR(dev->kthread_vid_out); + + dev->kthread_vid_out = NULL; v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n"); - return PTR_ERR(dev->kthread_vid_out); + return err; } *pstreaming = true; vivid_grab_controls(dev, true); diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c index 1599159f2574e..baa7c83ee6e02 100644 --- a/drivers/media/platform/vivid/vivid-vid-cap.c +++ b/drivers/media/platform/vivid/vivid-vid-cap.c @@ -438,6 +438,8 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls) tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap)); break; } + vfree(dev->bitmap_cap); + dev->bitmap_cap = NULL; vivid_update_quality(dev); tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap); dev->crop_cap = dev->src_rect; diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c index be531caa2cdf9..2079861d2270f 100644 --- a/drivers/media/platform/vivid/vivid-vid-common.c +++ b/drivers/media/platform/vivid/vivid-vid-common.c @@ -21,7 +21,7 @@ const struct v4l2_dv_timings_cap vivid_dv_timings_cap = { .type = V4L2_DV_BT_656_1120, /* keep this initialization for compatibility with GCC < 4.4.6 */ .reserved = { 0 }, - V4L2_INIT_BT_TIMINGS(0, MAX_WIDTH, 0, MAX_HEIGHT, 14000000, 775000000, + V4L2_INIT_BT_TIMINGS(16, MAX_WIDTH, 16, MAX_HEIGHT, 14000000, 775000000, V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF, V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_INTERLACED) diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c index 51fec66d8d455..50248e2176a08 100644 --- a/drivers/media/platform/vivid/vivid-vid-out.c +++ b/drivers/media/platform/vivid/vivid-vid-out.c @@ -413,7 +413,7 @@ int vivid_try_fmt_vid_out(struct file *file, void *priv, mp->colorspace = V4L2_COLORSPACE_SMPTE170M; } else if (mp->colorspace != V4L2_COLORSPACE_SMPTE170M && mp->colorspace != V4L2_COLORSPACE_REC709 && - mp->colorspace != V4L2_COLORSPACE_ADOBERGB && + mp->colorspace != V4L2_COLORSPACE_OPRGB && mp->colorspace != V4L2_COLORSPACE_BT2020 && mp->colorspace != V4L2_COLORSPACE_SRGB) { mp->colorspace = V4L2_COLORSPACE_REC709; diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index ca68e1d2b2f98..8b2c16dd58bd5 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -707,7 +707,8 @@ void rc_repeat(struct rc_dev *dev) (dev->last_toggle ? LIRC_SCANCODE_FLAG_TOGGLE : 0) }; - ir_lirc_scancode_event(dev, &sc); + if (dev->allowed_protocols != RC_PROTO_BIT_CEC) + ir_lirc_scancode_event(dev, &sc); spin_lock_irqsave(&dev->keylock, flags); @@ -747,7 +748,8 @@ static void ir_do_keydown(struct rc_dev *dev, enum rc_proto protocol, .keycode = keycode }; - ir_lirc_scancode_event(dev, &sc); + if (dev->allowed_protocols != RC_PROTO_BIT_CEC) + ir_lirc_scancode_event(dev, &sc); if (new_event && dev->keypressed) ir_do_keyup(dev, false); diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c index 1aa88d94e57f5..e28bd8836751e 100644 --- a/drivers/media/usb/dvb-usb-v2/dvbsky.c +++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c @@ -31,6 +31,7 @@ MODULE_PARM_DESC(disable_rc, "Disable inbuilt IR receiver."); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); struct dvbsky_state { + struct mutex stream_mutex; u8 ibuf[DVBSKY_BUF_LEN]; u8 obuf[DVBSKY_BUF_LEN]; u8 last_lock; @@ -67,17 +68,18 @@ static int dvbsky_usb_generic_rw(struct dvb_usb_device *d, static int dvbsky_stream_ctrl(struct dvb_usb_device *d, u8 onoff) { + struct dvbsky_state *state = d_to_priv(d); int ret; - static u8 obuf_pre[3] = { 0x37, 0, 0 }; - static u8 obuf_post[3] = { 0x36, 3, 0 }; + u8 obuf_pre[3] = { 0x37, 0, 0 }; + u8 obuf_post[3] = { 0x36, 3, 0 }; - mutex_lock(&d->usb_mutex); - ret = dvb_usbv2_generic_rw_locked(d, obuf_pre, 3, NULL, 0); + mutex_lock(&state->stream_mutex); + ret = dvbsky_usb_generic_rw(d, obuf_pre, 3, NULL, 0); if (!ret && onoff) { msleep(20); - ret = dvb_usbv2_generic_rw_locked(d, obuf_post, 3, NULL, 0); + ret = dvbsky_usb_generic_rw(d, obuf_post, 3, NULL, 0); } - mutex_unlock(&d->usb_mutex); + mutex_unlock(&state->stream_mutex); return ret; } @@ -606,6 +608,8 @@ static int dvbsky_init(struct dvb_usb_device *d) if (ret) return ret; */ + mutex_init(&state->stream_mutex); + state->last_lock = 0; return 0; diff --git a/drivers/media/usb/dvb-usb-v2/usb_urb.c b/drivers/media/usb/dvb-usb-v2/usb_urb.c index 024c751eb1659..2ad2ddeaff513 100644 --- a/drivers/media/usb/dvb-usb-v2/usb_urb.c +++ b/drivers/media/usb/dvb-usb-v2/usb_urb.c @@ -155,7 +155,6 @@ static int usb_urb_alloc_bulk_urbs(struct usb_data_stream *stream) stream->props.u.bulk.buffersize, usb_urb_complete, stream); - stream->urb_list[i]->transfer_flags = URB_FREE_BUFFER; stream->urbs_initialized++; } return 0; @@ -186,7 +185,7 @@ static int usb_urb_alloc_isoc_urbs(struct usb_data_stream *stream) urb->complete = usb_urb_complete; urb->pipe = usb_rcvisocpipe(stream->udev, stream->props.endpoint); - urb->transfer_flags = URB_ISO_ASAP | URB_FREE_BUFFER; + urb->transfer_flags = URB_ISO_ASAP; urb->interval = stream->props.u.isoc.interval; urb->number_of_packets = stream->props.u.isoc.framesperurb; urb->transfer_buffer_length = stream->props.u.isoc.framesize * @@ -210,7 +209,7 @@ static int usb_free_stream_buffers(struct usb_data_stream *stream) if (stream->state & USB_STATE_URB_BUF) { while (stream->buf_num) { stream->buf_num--; - stream->buf_list[stream->buf_num] = NULL; + kfree(stream->buf_list[stream->buf_num]); } } diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c index 71c829f31d3bb..87b887b7604ef 100644 --- a/drivers/media/usb/em28xx/em28xx-cards.c +++ b/drivers/media/usb/em28xx/em28xx-cards.c @@ -2141,13 +2141,13 @@ const struct em28xx_board em28xx_boards[] = { .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, - .amux = EM28XX_AUDIO_SRC_LINE, + .amux = EM28XX_AMUX_LINE_IN, .gpio = terratec_av350_unmute_gpio, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, - .amux = EM28XX_AUDIO_SRC_LINE, + .amux = EM28XX_AMUX_LINE_IN, .gpio = terratec_av350_unmute_gpio, } }, }, @@ -3039,6 +3039,9 @@ static int em28xx_hint_board(struct em28xx *dev) static void em28xx_card_setup(struct em28xx *dev) { + int i, j, idx; + bool duplicate_entry; + /* * If the device can be a webcam, seek for a sensor. * If sensor is not found, then it isn't a webcam. @@ -3195,6 +3198,32 @@ static void em28xx_card_setup(struct em28xx *dev) /* Allow override tuner type by a module parameter */ if (tuner >= 0) dev->tuner_type = tuner; + + /* + * Dynamically generate a list of valid audio inputs for this + * specific board, mapping them via enum em28xx_amux. + */ + + idx = 0; + for (i = 0; i < MAX_EM28XX_INPUT; i++) { + if (!INPUT(i)->type) + continue; + + /* Skip already mapped audio inputs */ + duplicate_entry = false; + for (j = 0; j < idx; j++) { + if (INPUT(i)->amux == dev->amux_map[j]) { + duplicate_entry = true; + break; + } + } + if (duplicate_entry) + continue; + + dev->amux_map[idx++] = INPUT(i)->amux; + } + for (; idx < MAX_EM28XX_INPUT; idx++) + dev->amux_map[idx] = EM28XX_AMUX_UNUSED; } void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl) diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c index 68571bf36d286..3bf98ac897ec3 100644 --- a/drivers/media/usb/em28xx/em28xx-video.c +++ b/drivers/media/usb/em28xx/em28xx-video.c @@ -1093,6 +1093,8 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count) em28xx_videodbg("%s\n", __func__); + dev->v4l2->field_count = 0; + /* * Make sure streaming is not already in progress for this type * of filehandle (e.g. video, vbi) @@ -1471,9 +1473,9 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, fmt = format_by_fourcc(f->fmt.pix.pixelformat); if (!fmt) { - em28xx_videodbg("Fourcc format (%08x) invalid.\n", - f->fmt.pix.pixelformat); - return -EINVAL; + fmt = &format[0]; + em28xx_videodbg("Fourcc format (%08x) invalid. Using default (%08x).\n", + f->fmt.pix.pixelformat, fmt->fourcc); } if (dev->board.is_em2800) { @@ -1666,6 +1668,7 @@ static int vidioc_enum_input(struct file *file, void *priv, { struct em28xx *dev = video_drvdata(file); unsigned int n; + int j; n = i->index; if (n >= MAX_EM28XX_INPUT) @@ -1685,6 +1688,12 @@ static int vidioc_enum_input(struct file *file, void *priv, if (dev->is_webcam) i->capabilities = 0; + /* Dynamically generates an audioset bitmask */ + i->audioset = 0; + for (j = 0; j < MAX_EM28XX_INPUT; j++) + if (dev->amux_map[j] != EM28XX_AMUX_UNUSED) + i->audioset |= 1 << j; + return 0; } @@ -1710,11 +1719,24 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i) return 0; } -static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a) +static int em28xx_fill_audio_input(struct em28xx *dev, + const char *s, + struct v4l2_audio *a, + unsigned int index) { - struct em28xx *dev = video_drvdata(file); + unsigned int idx = dev->amux_map[index]; + + /* + * With msp3400, almost all mappings use the default (amux = 0). + * The only one may use a different value is WinTV USB2, where it + * can also be SCART1 input. + * As it is very doubtful that we would see new boards with msp3400, + * let's just reuse the existing switch. + */ + if (dev->has_msp34xx && idx != EM28XX_AMUX_UNUSED) + idx = EM28XX_AMUX_LINE_IN; - switch (a->index) { + switch (idx) { case EM28XX_AMUX_VIDEO: strcpy(a->name, "Television"); break; @@ -1739,32 +1761,79 @@ static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a) case EM28XX_AMUX_PCM_OUT: strcpy(a->name, "PCM"); break; + case EM28XX_AMUX_UNUSED: default: return -EINVAL; } - - a->index = dev->ctl_ainput; + a->index = index; a->capability = V4L2_AUDCAP_STEREO; + em28xx_videodbg("%s: audio input index %d is '%s'\n", + s, a->index, a->name); + return 0; } +static int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *a) +{ + struct em28xx *dev = video_drvdata(file); + + if (a->index >= MAX_EM28XX_INPUT) + return -EINVAL; + + return em28xx_fill_audio_input(dev, __func__, a, a->index); +} + +static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a) +{ + struct em28xx *dev = video_drvdata(file); + int i; + + for (i = 0; i < MAX_EM28XX_INPUT; i++) + if (dev->ctl_ainput == dev->amux_map[i]) + return em28xx_fill_audio_input(dev, __func__, a, i); + + /* Should never happen! */ + return -EINVAL; +} + static int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio *a) { struct em28xx *dev = video_drvdata(file); + int idx, i; if (a->index >= MAX_EM28XX_INPUT) return -EINVAL; - if (!INPUT(a->index)->type) + + idx = dev->amux_map[a->index]; + + if (idx == EM28XX_AMUX_UNUSED) return -EINVAL; - dev->ctl_ainput = INPUT(a->index)->amux; - dev->ctl_aoutput = INPUT(a->index)->aout; + dev->ctl_ainput = idx; + + /* + * FIXME: This is wrong, as different inputs at em28xx_cards + * may have different audio outputs. So, the right thing + * to do is to implement VIDIOC_G_AUDOUT/VIDIOC_S_AUDOUT. + * With the current board definitions, this would work fine, + * as, currently, all boards fit. + */ + for (i = 0; i < MAX_EM28XX_INPUT; i++) + if (idx == dev->amux_map[i]) + break; + if (i == MAX_EM28XX_INPUT) + return -EINVAL; + + dev->ctl_aoutput = INPUT(i)->aout; if (!dev->ctl_aoutput) dev->ctl_aoutput = EM28XX_AOUT_MASTER; + em28xx_videodbg("%s: set audio input to %d\n", __func__, + dev->ctl_ainput); + return 0; } @@ -2302,6 +2371,7 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = { .vidioc_try_fmt_vbi_cap = vidioc_g_fmt_vbi_cap, .vidioc_s_fmt_vbi_cap = vidioc_g_fmt_vbi_cap, .vidioc_enum_framesizes = vidioc_enum_framesizes, + .vidioc_enumaudio = vidioc_enumaudio, .vidioc_g_audio = vidioc_g_audio, .vidioc_s_audio = vidioc_s_audio, diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h index 953caac025f22..a551072e62ed1 100644 --- a/drivers/media/usb/em28xx/em28xx.h +++ b/drivers/media/usb/em28xx/em28xx.h @@ -335,6 +335,9 @@ enum em28xx_usb_audio_type { /** * em28xx_amux - describes the type of audio input used by em28xx * + * @EM28XX_AMUX_UNUSED: + * Used only on em28xx dev->map field, in order to mark an entry + * as unused. * @EM28XX_AMUX_VIDEO: * On devices without AC97, this is the only value that it is currently * allowed. @@ -369,7 +372,8 @@ enum em28xx_usb_audio_type { * same time, via the alsa mux. */ enum em28xx_amux { - EM28XX_AMUX_VIDEO, + EM28XX_AMUX_UNUSED = -1, + EM28XX_AMUX_VIDEO = 0, EM28XX_AMUX_LINE_IN, /* Some less-common mixer setups */ @@ -692,6 +696,8 @@ struct em28xx { unsigned int ctl_input; // selected input unsigned int ctl_ainput;// selected audio input unsigned int ctl_aoutput;// selected audio output + enum em28xx_amux amux_map[MAX_EM28XX_INPUT]; + int mute; int volume; diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c index 57aa521e16b15..405a6a76d820f 100644 --- a/drivers/media/usb/gspca/gspca.c +++ b/drivers/media/usb/gspca/gspca.c @@ -426,10 +426,10 @@ void gspca_frame_add(struct gspca_dev *gspca_dev, /* append the packet to the frame buffer */ if (len > 0) { - if (gspca_dev->image_len + len > gspca_dev->pixfmt.sizeimage) { + if (gspca_dev->image_len + len > PAGE_ALIGN(gspca_dev->pixfmt.sizeimage)) { gspca_err(gspca_dev, "frame overflow %d > %d\n", gspca_dev->image_len + len, - gspca_dev->pixfmt.sizeimage); + PAGE_ALIGN(gspca_dev->pixfmt.sizeimage)); packet_type = DISCARD_PACKET; } else { /* !! image is NULL only when last pkt is LAST or DISCARD @@ -1297,18 +1297,19 @@ static int gspca_queue_setup(struct vb2_queue *vq, unsigned int sizes[], struct device *alloc_devs[]) { struct gspca_dev *gspca_dev = vb2_get_drv_priv(vq); + unsigned int size = PAGE_ALIGN(gspca_dev->pixfmt.sizeimage); if (*nplanes) - return sizes[0] < gspca_dev->pixfmt.sizeimage ? -EINVAL : 0; + return sizes[0] < size ? -EINVAL : 0; *nplanes = 1; - sizes[0] = gspca_dev->pixfmt.sizeimage; + sizes[0] = size; return 0; } static int gspca_buffer_prepare(struct vb2_buffer *vb) { struct gspca_dev *gspca_dev = vb2_get_drv_priv(vb->vb2_queue); - unsigned long size = gspca_dev->pixfmt.sizeimage; + unsigned long size = PAGE_ALIGN(gspca_dev->pixfmt.sizeimage); if (vb2_plane_size(vb, 0) < size) { gspca_err(gspca_dev, "buffer too small (%lu < %lu)\n", diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index d46dc432456c4..361abbc004866 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -1824,11 +1824,7 @@ static void uvc_delete(struct kref *kref) usb_put_intf(dev->intf); usb_put_dev(dev->udev); - if (dev->vdev.dev) - v4l2_device_unregister(&dev->vdev); #ifdef CONFIG_MEDIA_CONTROLLER - if (media_devnode_is_registered(dev->mdev.devnode)) - media_device_unregister(&dev->mdev); media_device_cleanup(&dev->mdev); #endif @@ -1885,6 +1881,15 @@ static void uvc_unregister_video(struct uvc_device *dev) uvc_debugfs_cleanup_stream(stream); } + + uvc_status_unregister(dev); + + if (dev->vdev.dev) + v4l2_device_unregister(&dev->vdev); +#ifdef CONFIG_MEDIA_CONTROLLER + if (media_devnode_is_registered(dev->mdev.devnode)) + media_device_unregister(&dev->mdev); +#endif } int uvc_register_video_device(struct uvc_device *dev, diff --git a/drivers/media/usb/uvc/uvc_status.c b/drivers/media/usb/uvc/uvc_status.c index 0722dc684378f..883e4cab45e79 100644 --- a/drivers/media/usb/uvc/uvc_status.c +++ b/drivers/media/usb/uvc/uvc_status.c @@ -54,7 +54,7 @@ static int uvc_input_init(struct uvc_device *dev) return ret; } -static void uvc_input_cleanup(struct uvc_device *dev) +static void uvc_input_unregister(struct uvc_device *dev) { if (dev->input) input_unregister_device(dev->input); @@ -71,7 +71,7 @@ static void uvc_input_report_key(struct uvc_device *dev, unsigned int code, #else #define uvc_input_init(dev) -#define uvc_input_cleanup(dev) +#define uvc_input_unregister(dev) #define uvc_input_report_key(dev, code, value) #endif /* CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV */ @@ -292,12 +292,16 @@ int uvc_status_init(struct uvc_device *dev) return 0; } -void uvc_status_cleanup(struct uvc_device *dev) +void uvc_status_unregister(struct uvc_device *dev) { usb_kill_urb(dev->int_urb); + uvc_input_unregister(dev); +} + +void uvc_status_cleanup(struct uvc_device *dev) +{ usb_free_urb(dev->int_urb); kfree(dev->status); - uvc_input_cleanup(dev); } int uvc_status_start(struct uvc_device *dev, gfp_t flags) diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h index e5f5d84f1d1d5..a738486fd9d64 100644 --- a/drivers/media/usb/uvc/uvcvideo.h +++ b/drivers/media/usb/uvc/uvcvideo.h @@ -750,6 +750,7 @@ int uvc_register_video_device(struct uvc_device *dev, /* Status */ int uvc_status_init(struct uvc_device *dev); +void uvc_status_unregister(struct uvc_device *dev); void uvc_status_cleanup(struct uvc_device *dev); int uvc_status_start(struct uvc_device *dev, gfp_t flags); void uvc_status_stop(struct uvc_device *dev); diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index 6481212fda772..1fbb677143125 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -481,7 +481,7 @@ struct v4l2_buffer32 { __s32 fd; } m; __u32 length; - __u32 reserved2; + __u32 request; __u32 reserved; }; @@ -581,6 +581,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer __user *p64, { u32 type; u32 length; + u32 request; enum v4l2_memory memory; struct v4l2_plane32 __user *uplane32; struct v4l2_plane __user *uplane; @@ -595,7 +596,9 @@ static int get_v4l2_buffer32(struct v4l2_buffer __user *p64, get_user(memory, &p32->memory) || put_user(memory, &p64->memory) || get_user(length, &p32->length) || - put_user(length, &p64->length)) + put_user(length, &p64->length) || + get_user(request, &p32->request) || + put_user(request, &p64->request)) return -EFAULT; if (V4L2_TYPE_IS_OUTPUT(type)) @@ -677,6 +680,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer __user *p64, { u32 type; u32 length; + u32 request; enum v4l2_memory memory; struct v4l2_plane32 __user *uplane32; struct v4l2_plane *uplane; @@ -698,10 +702,12 @@ static int put_v4l2_buffer32(struct v4l2_buffer __user *p64, assign_in_user(&p32->timestamp.tv_usec, &p64->timestamp.tv_usec) || copy_in_user(&p32->timecode, &p64->timecode, sizeof(p64->timecode)) || assign_in_user(&p32->sequence, &p64->sequence) || - assign_in_user(&p32->reserved2, &p64->reserved2) || + assign_in_user(&p32->request, &p64->request) || assign_in_user(&p32->reserved, &p64->reserved) || get_user(length, &p64->length) || - put_user(length, &p32->length)) + put_user(length, &p32->length) || + get_user(request, &p64->length) || + put_user(request, &p32->length)) return -EFAULT; if (V4L2_TYPE_IS_MULTIPLANAR(type)) { @@ -1045,6 +1051,57 @@ static int put_v4l2_event32(struct v4l2_event __user *p64, return 0; } +struct v4l2_subdev_routing32 { + compat_caddr_t routes; + __u32 num_routes; + __u32 reserved[5]; +}; + +static int get_v4l2_subdev_routing(struct v4l2_subdev_routing __user *kp, + struct v4l2_subdev_routing32 __user *up) +{ + compat_caddr_t p; + u32 num_routes; + + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || + get_user(p, &up->routes) || + put_user(compat_ptr(p), &kp->routes) || + get_user(num_routes, &kp->num_routes) || + assign_in_user(&kp->num_routes, &up->num_routes) || + !access_ok(VERIFY_READ, up->reserved, sizeof(*up->reserved)) || + num_routes > U32_MAX / sizeof(*kp->routes)) + return -EFAULT; + + if (!access_ok(VERIFY_READ, compat_ptr(p), + num_routes * (u32)sizeof(*kp->routes))) + return -EFAULT; + + return 0; +} + +static int put_v4l2_subdev_routing(struct v4l2_subdev_routing __user *kp, + struct v4l2_subdev_routing32 __user *up) +{ + struct v4l2_subdev_route __user *uroutes; + compat_caddr_t p; + u32 num_routes; + + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || + get_user(p, &up->routes) || + get_user(num_routes, &kp->num_routes) || + assign_in_user(&up->num_routes, &kp->num_routes) || + !access_ok(VERIFY_WRITE, up->reserved, sizeof(*up->reserved))) + return -EFAULT; + + uroutes = compat_ptr(p); + + if (!access_ok(VERIFY_WRITE, uroutes, + num_routes * sizeof(*kp->routes))) + return -EFAULT; + + return 0; +} + struct v4l2_edid32 { __u32 pad; __u32 start_block; @@ -1117,6 +1174,8 @@ static int put_v4l2_edid32(struct v4l2_edid __user *p64, #define VIDIOC_STREAMOFF32 _IOW ('V', 19, s32) #define VIDIOC_G_INPUT32 _IOR ('V', 38, s32) #define VIDIOC_S_INPUT32 _IOWR('V', 39, s32) +#define VIDIOC_SUBDEV_G_ROUTING32 _IOWR('V', 38, struct v4l2_subdev_routing32) +#define VIDIOC_SUBDEV_S_ROUTING32 _IOWR('V', 39, struct v4l2_subdev_routing32) #define VIDIOC_G_OUTPUT32 _IOR ('V', 46, s32) #define VIDIOC_S_OUTPUT32 _IOWR('V', 47, s32) @@ -1195,6 +1254,8 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar case VIDIOC_STREAMOFF32: cmd = VIDIOC_STREAMOFF; break; case VIDIOC_G_INPUT32: cmd = VIDIOC_G_INPUT; break; case VIDIOC_S_INPUT32: cmd = VIDIOC_S_INPUT; break; + case VIDIOC_SUBDEV_G_ROUTING32: cmd = VIDIOC_SUBDEV_G_ROUTING; break; + case VIDIOC_SUBDEV_S_ROUTING32: cmd = VIDIOC_SUBDEV_S_ROUTING; break; case VIDIOC_G_OUTPUT32: cmd = VIDIOC_G_OUTPUT; break; case VIDIOC_S_OUTPUT32: cmd = VIDIOC_S_OUTPUT; break; case VIDIOC_CREATE_BUFS32: cmd = VIDIOC_CREATE_BUFS; break; @@ -1227,6 +1288,14 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar compatible_arg = 0; break; + case VIDIOC_SUBDEV_G_ROUTING: + case VIDIOC_SUBDEV_S_ROUTING: + err = alloc_userspace(sizeof(struct v4l2_subdev_routing), 0, &new_p64); + if (!err) + err = get_v4l2_subdev_routing(new_p64, p32); + compatible_arg = 0; + break; + case VIDIOC_G_EDID: case VIDIOC_S_EDID: err = alloc_userspace(sizeof(struct v4l2_edid), 0, &new_p64); @@ -1368,6 +1437,13 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar if (put_v4l2_edid32(new_p64, p32)) err = -EFAULT; break; + case VIDIOC_SUBDEV_G_ROUTING: + case VIDIOC_SUBDEV_S_ROUTING: + err = alloc_userspace(sizeof(struct v4l2_subdev_routing), 0, &new_p64); + if (!err) + if (put_v4l2_subdev_routing(new_p64, p32)) + err = -EFAULT; + break; } if (err) return err; diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c index c81faea96fbab..c7c600c1f63b8 100644 --- a/drivers/media/v4l2-core/v4l2-dv-timings.c +++ b/drivers/media/v4l2-core/v4l2-dv-timings.c @@ -837,9 +837,9 @@ v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi, switch (avi->colorimetry) { case HDMI_COLORIMETRY_EXTENDED: switch (avi->extended_colorimetry) { - case HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB: - c.colorspace = V4L2_COLORSPACE_ADOBERGB; - c.xfer_func = V4L2_XFER_FUNC_ADOBERGB; + case HDMI_EXTENDED_COLORIMETRY_OPRGB: + c.colorspace = V4L2_COLORSPACE_OPRGB; + c.xfer_func = V4L2_XFER_FUNC_OPRGB; break; case HDMI_EXTENDED_COLORIMETRY_BT2020: c.colorspace = V4L2_COLORSPACE_BT2020; @@ -908,10 +908,10 @@ v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi, c.ycbcr_enc = V4L2_YCBCR_ENC_601; c.xfer_func = V4L2_XFER_FUNC_SRGB; break; - case HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601: - c.colorspace = V4L2_COLORSPACE_ADOBERGB; + case HDMI_EXTENDED_COLORIMETRY_OPYCC_601: + c.colorspace = V4L2_COLORSPACE_OPRGB; c.ycbcr_enc = V4L2_YCBCR_ENC_601; - c.xfer_func = V4L2_XFER_FUNC_ADOBERGB; + c.xfer_func = V4L2_XFER_FUNC_OPRGB; break; case HDMI_EXTENDED_COLORIMETRY_BT2020: c.colorspace = V4L2_COLORSPACE_BT2020; diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c index a3ef1f50a4b34..481e3c65cf97a 100644 --- a/drivers/media/v4l2-core/v4l2-event.c +++ b/drivers/media/v4l2-core/v4l2-event.c @@ -193,6 +193,22 @@ int v4l2_event_pending(struct v4l2_fh *fh) } EXPORT_SYMBOL_GPL(v4l2_event_pending); +static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev) +{ + struct v4l2_fh *fh = sev->fh; + unsigned int i; + + lockdep_assert_held(&fh->subscribe_lock); + assert_spin_locked(&fh->vdev->fh_lock); + + /* Remove any pending events for this subscription */ + for (i = 0; i < sev->in_use; i++) { + list_del(&sev->events[sev_pos(sev, i)].list); + fh->navailable--; + } + list_del(&sev->list); +} + int v4l2_event_subscribe(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub, unsigned elems, const struct v4l2_subscribed_event_ops *ops) @@ -224,27 +240,23 @@ int v4l2_event_subscribe(struct v4l2_fh *fh, spin_lock_irqsave(&fh->vdev->fh_lock, flags); found_ev = v4l2_event_subscribed(fh, sub->type, sub->id); + if (!found_ev) + list_add(&sev->list, &fh->subscribed); spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); if (found_ev) { /* Already listening */ kvfree(sev); - goto out_unlock; - } - - if (sev->ops && sev->ops->add) { + } else if (sev->ops && sev->ops->add) { ret = sev->ops->add(sev, elems); if (ret) { + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + __v4l2_event_unsubscribe(sev); + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); kvfree(sev); - goto out_unlock; } } - spin_lock_irqsave(&fh->vdev->fh_lock, flags); - list_add(&sev->list, &fh->subscribed); - spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); - -out_unlock: mutex_unlock(&fh->subscribe_lock); return ret; @@ -279,7 +291,6 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh, { struct v4l2_subscribed_event *sev; unsigned long flags; - int i; if (sub->type == V4L2_EVENT_ALL) { v4l2_event_unsubscribe_all(fh); @@ -291,14 +302,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh, spin_lock_irqsave(&fh->vdev->fh_lock, flags); sev = v4l2_event_subscribed(fh, sub->type, sub->id); - if (sev != NULL) { - /* Remove any pending events for this subscription */ - for (i = 0; i < sev->in_use; i++) { - list_del(&sev->events[sev_pos(sev, i)].list); - fh->navailable--; - } - list_del(&sev->list); - } + if (sev != NULL) + __v4l2_event_unsubscribe(sev); spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 54afc9c7ee6ea..0523d2d16061d 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -286,6 +286,7 @@ static void v4l_print_format(const void *arg, bool write_only) const struct v4l2_window *win; const struct v4l2_sdr_format *sdr; const struct v4l2_meta_format *meta; + u32 planes; unsigned i; pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); @@ -316,7 +317,8 @@ static void v4l_print_format(const void *arg, bool write_only) prt_names(mp->field, v4l2_field_names), mp->colorspace, mp->num_planes, mp->flags, mp->ycbcr_enc, mp->quantization, mp->xfer_func); - for (i = 0; i < mp->num_planes; i++) + planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES); + for (i = 0; i < planes; i++) printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i, mp->plane_fmt[i].bytesperline, mp->plane_fmt[i].sizeimage); @@ -2924,6 +2926,22 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size, } break; } + case VIDIOC_SUBDEV_G_ROUTING: + case VIDIOC_SUBDEV_S_ROUTING: { + struct v4l2_subdev_routing *route = parg; + + if (route->num_routes > 0) { + if (route->num_routes > 256) + return -EINVAL; + + *user_ptr = (void __user *)route->routes; + *kernel_ptr = (void *)&route->routes; + *array_size = sizeof(struct v4l2_subdev_route) + * route->num_routes; + ret = 1; + } + break; + } } return ret; diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c index 0fc185a2ce90e..031418bb0edf8 100644 --- a/drivers/media/v4l2-core/v4l2-mc.c +++ b/drivers/media/v4l2-core/v4l2-mc.c @@ -375,6 +375,8 @@ int v4l2_pipeline_link_notify(struct media_link *link, u32 flags, int sink_use; int ret = 0; + source->start = link->source; + sink->start = link->sink; source_use = pipeline_pm_use_count(source, graph); sink_use = pipeline_pm_use_count(sink, graph); @@ -383,6 +385,8 @@ int v4l2_pipeline_link_notify(struct media_link *link, u32 flags, /* Powering off entities is assumed to never fail. */ pipeline_pm_power(source, -sink_use, graph); pipeline_pm_power(sink, -source_use, graph); + source->use_count = 0; + sink->use_count = 0; return 0; } diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c index 2b63fa6b6fc9a..e95cda5542158 100644 --- a/drivers/media/v4l2-core/v4l2-subdev.c +++ b/drivers/media/v4l2-core/v4l2-subdev.c @@ -135,6 +135,9 @@ static int check_format(struct v4l2_subdev *sd, if (format->pad >= sd->entity.num_pads) return -EINVAL; + if (!(sd->flags & V4L2_SUBDEV_FL_HAS_SUBSTREAMS) && format->stream) + return -EINVAL; + return 0; } @@ -160,6 +163,9 @@ static int check_selection(struct v4l2_subdev *sd, if (sel->pad >= sd->entity.num_pads) return -EINVAL; + if (!(sd->flags & V4L2_SUBDEV_FL_HAS_SUBSTREAMS) && sel->stream) + return -EINVAL; + return 0; } @@ -516,6 +522,40 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg) case VIDIOC_SUBDEV_QUERYSTD: return v4l2_subdev_call(sd, video, querystd, arg); + + case VIDIOC_SUBDEV_G_ROUTING: + return v4l2_subdev_call(sd, pad, get_routing, arg); + + case VIDIOC_SUBDEV_S_ROUTING: { + struct v4l2_subdev_routing *route = arg; + unsigned int i; + int rval; + + if (route->num_routes > sd->entity.num_pads) + return -EINVAL; + + for (i = 0; i < route->num_routes; ++i) { + unsigned int sink = route->routes[i].sink_pad; + unsigned int source = route->routes[i].source_pad; + struct media_pad *pads = sd->entity.pads; + + if (sink >= sd->entity.num_pads || + source >= sd->entity.num_pads) + return -EINVAL; + + if ((!(route->routes[i].flags & + V4L2_SUBDEV_ROUTE_FL_SOURCE) && + !(pads[sink].flags & MEDIA_PAD_FL_SINK)) || + !(pads[source].flags & MEDIA_PAD_FL_SOURCE)) + return -EINVAL; + } + + mutex_lock(&sd->entity.graph_obj.mdev->graph_mutex); + rval = v4l2_subdev_call(sd, pad, set_routing, route); + mutex_unlock(&sd->entity.graph_obj.mdev->graph_mutex); + + return rval; + } #endif default: return v4l2_subdev_call(sd, core, ioctl, cmd, arg); @@ -628,19 +668,21 @@ v4l2_subdev_link_validate_get_format(struct media_pad *pad, return -EINVAL; } -int v4l2_subdev_link_validate(struct media_link *link) +static int v4l2_subdev_link_validate_one(struct media_link *link, + struct media_pad *source_pad, unsigned int source_stream, + struct media_pad *sink_pad, unsigned int sink_stream) { struct v4l2_subdev *sink; struct v4l2_subdev_format sink_fmt, source_fmt; int rval; - rval = v4l2_subdev_link_validate_get_format( - link->source, &source_fmt); + source_fmt.stream = source_stream; + rval = v4l2_subdev_link_validate_get_format(source_pad, &source_fmt); if (rval < 0) return 0; - rval = v4l2_subdev_link_validate_get_format( - link->sink, &sink_fmt); + sink_fmt.stream = sink_stream; + rval = v4l2_subdev_link_validate_get_format(sink_pad, &sink_fmt); if (rval < 0) return 0; @@ -654,6 +696,129 @@ int v4l2_subdev_link_validate(struct media_link *link) return v4l2_subdev_link_validate_default( sink, link, &source_fmt, &sink_fmt); } + +/* How many routes to assume there can be per a sub-device? */ +#define LINK_VALIDATE_ROUTES 8 + +int v4l2_subdev_link_validate(struct media_link *link) +{ + struct v4l2_subdev *sink; + struct v4l2_subdev_route sink_routes[LINK_VALIDATE_ROUTES]; + struct v4l2_subdev_routing sink_routing = { + .routes = sink_routes, + .num_routes = ARRAY_SIZE(sink_routes), + }; + struct v4l2_subdev_route src_routes[LINK_VALIDATE_ROUTES]; + struct v4l2_subdev_routing src_routing = { + .routes = src_routes, + .num_routes = ARRAY_SIZE(src_routes), + }; + unsigned int i, j; + int rval; + + sink = media_entity_to_v4l2_subdev(link->sink->entity); + + if (!(link->sink->flags & MEDIA_PAD_FL_MULTIPLEX && + link->source->flags & MEDIA_PAD_FL_MULTIPLEX)) + return v4l2_subdev_link_validate_one(link, link->source, 0, + link->sink, 0); + /* + * multiplex link cannot proceed without route information. + */ + rval = v4l2_subdev_call(sink, pad, get_routing, &sink_routing); + + if (rval) { + dev_err(sink->entity.graph_obj.mdev->dev, + "error %d in get_routing() on %s, sink pad %u\n", rval, + sink->entity.name, link->sink->index); + + return rval; + } + + rval = v4l2_subdev_call(media_entity_to_v4l2_subdev( + link->source->entity), + pad, get_routing, &src_routing); + if (rval) { + dev_dbg(sink->entity.graph_obj.mdev->dev, + "error %d in get_routing() on %s, source pad %u\n", + rval, sink->entity.name, link->source->index); + + return rval; + } + + dev_dbg(sink->entity.graph_obj.mdev->dev, + "validating multiplexed link \"%s\":%u -> \"%s\":%u; %u/%u routes\n", + link->source->entity->name, link->source->index, + sink->entity.name, link->sink->index, + src_routing.num_routes, sink_routing.num_routes); + + for (i = 0; i < sink_routing.num_routes; i++) { + /* Get the first active route for the sink pad. */ + if (sink_routes[i].sink_pad != link->sink->index || + !(sink_routes[i].flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) { + dev_dbg(sink->entity.graph_obj.mdev->dev, + "skipping sink route %u/%u -> %u/%u[%u]\n", + sink_routes[i].sink_pad, + sink_routes[i].sink_stream, + sink_routes[i].source_pad, + sink_routes[i].source_stream, + (bool)(sink_routes[i].flags + & V4L2_SUBDEV_ROUTE_FL_ACTIVE)); + continue; + } + + /* + * Get the corresponding route for the source pad. + * It's ok for the source pad to have routes active + * where the sink pad does not, but the routes that + * are active on the source pad have to be active on + * the sink pad as well. + */ + + for (j = 0; j < src_routing.num_routes; j++) { + if (src_routes[j].source_pad == link->source->index && + src_routes[j].source_stream + == sink_routes[i].sink_stream) + break; + } + + if (j == src_routing.num_routes) { + dev_err(sink->entity.graph_obj.mdev->dev, + "no corresponding source found.\n"); + return -EINVAL; + } + + /* The source route must be active. */ + if (!(src_routes[j].flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) { + dev_dbg(sink->entity.graph_obj.mdev->dev, + "source route not active\n"); + return -EINVAL; + } + + dev_dbg(sink->entity.graph_obj.mdev->dev, + "validating link \"%s\": %u/%u => \"%s\" %u/%u\n", + link->source->entity->name, src_routes[j].source_pad, + src_routes[j].source_stream, sink->entity.name, + sink_routes[i].sink_pad, sink_routes[i].sink_stream); + + rval = v4l2_subdev_link_validate_one( + link, link->source, src_routes[j].source_stream, + link->sink, sink_routes[j].sink_stream); + if (rval) { + dev_dbg(sink->entity.graph_obj.mdev->dev, + "error %d in link validation\n", rval); + return rval; + } + } + + if (i < sink_routing.num_routes) { + dev_dbg(sink->entity.graph_obj.mdev->dev, + "not all sink routes verified; out of source routes\n"); + return -EINVAL; + } + + return 0; +} EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate); struct v4l2_subdev_pad_config * diff --git a/drivers/mfd/arizona-i2c.c b/drivers/mfd/arizona-i2c.c index 5fe12961cfe54..84d29dec0ae13 100644 --- a/drivers/mfd/arizona-i2c.c +++ b/drivers/mfd/arizona-i2c.c @@ -23,6 +23,141 @@ #include "arizona.h" +/************************************************************/ +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_SND_SOC_INTEL_CNL_FPGA +/***********WM8280 1.8V REGULATOR*************/ +static struct regulator_consumer_supply vflorida1_consumer[] = { + REGULATOR_SUPPLY("AVDD", "0-001a"), + REGULATOR_SUPPLY("DBVDD1", "0-001a"), + REGULATOR_SUPPLY("LDOVDD", "0-001a"), + REGULATOR_SUPPLY("CPVDD", "0-001a"), + REGULATOR_SUPPLY("DBVDD2", "0-001a"), + REGULATOR_SUPPLY("DBVDD3", "0-001a"), +}; + +/***********WM8280 5V REGULATOR*************/ +static struct regulator_consumer_supply vflorida2_consumer[] = { + REGULATOR_SUPPLY("SPKVDDL", "0-001a"), + REGULATOR_SUPPLY("SPKVDDR", "0-001a"), +}; +#else +/***********WM8280 1.8V REGULATOR*************/ +static struct regulator_consumer_supply vflorida1_consumer[] = { + REGULATOR_SUPPLY("AVDD", "i2c-INT34C1:00"), + REGULATOR_SUPPLY("DBVDD1", "i2c-INT34C1:00"), + REGULATOR_SUPPLY("LDOVDD", "i2c-INT34C1:00"), + REGULATOR_SUPPLY("CPVDD", "i2c-INT34C1:00"), + REGULATOR_SUPPLY("DBVDD2", "i2c-INT34C1:00"), + REGULATOR_SUPPLY("DBVDD3", "i2c-INT34C1:00"), +}; + +/***********WM8280 5V REGULATOR*************/ +static struct regulator_consumer_supply vflorida2_consumer[] = { + REGULATOR_SUPPLY("SPKVDDL", "i2c-INT34C1:00"), + REGULATOR_SUPPLY("SPKVDDR", "i2c-INT34C1:00"), +}; +#endif + +static struct regulator_init_data vflorida1_data = { + .constraints = { + .always_on = 1, + }, + .num_consumer_supplies = ARRAY_SIZE(vflorida1_consumer), + .consumer_supplies = vflorida1_consumer, +}; + +static struct fixed_voltage_config vflorida1_config = { + .supply_name = "DC_1V8", + .microvolts = 1800000, + .gpio = -EINVAL, + .init_data = &vflorida1_data, +}; + +static struct platform_device vflorida1_device = { + .name = "reg-fixed-voltage", + .id = PLATFORM_DEVID_AUTO, + .dev = { + .platform_data = &vflorida1_config, + }, +}; + +static struct regulator_init_data vflorida2_data = { + .constraints = { + .always_on = 1, + }, + .num_consumer_supplies = ARRAY_SIZE(vflorida2_consumer), + .consumer_supplies = vflorida2_consumer, +}; + +static struct fixed_voltage_config vflorida2_config = { + .supply_name = "DC_5V", + .microvolts = 3700000, + .gpio = -EINVAL, + .init_data = &vflorida2_data, +}; + +static struct platform_device vflorida2_device = { + .name = "reg-fixed-voltage", + .id = PLATFORM_DEVID_AUTO, + .dev = { + .platform_data = &vflorida2_config, + }, +}; + +/***********WM8280 Codec Driver platform data*************/ +static const struct arizona_micd_range micd_ctp_ranges[] = { + { .max = 11, .key = BTN_0 }, + { .max = 28, .key = BTN_1 }, + { .max = 54, .key = BTN_2 }, + { .max = 100, .key = BTN_3 }, + { .max = 186, .key = BTN_4 }, + { .max = 430, .key = BTN_5 }, +}; + +static struct arizona_micd_config micd_modes[] = { + /*{Acc Det on Micdet1, Use Micbias2 for detection, + * Set GPIO to 1 to selecte this polarity}*/ + { 0, 2, 1 }, +}; + +static struct arizona_pdata __maybe_unused florida_pdata = { + .reset = 0, /*No Reset GPIO from AP, use SW reset*/ + .irq_flags = IRQF_TRIGGER_LOW | IRQF_ONESHOT, + .clk32k_src = ARIZONA_32KZ_MCLK2, /*Onboard OSC provides 32K on MCLK2*/ + /* + * IN1 uses both MICBIAS1 and MICBIAS2 based on jack polarity, + * the below values in dmic_ref only has meaning for DMIC's and not + * AMIC's + */ +#ifdef CONFIG_SND_SOC_INTEL_CNL_FPGA + .dmic_ref = {ARIZONA_DMIC_MICBIAS1, ARIZONA_DMIC_MICBIAS3, 0, 0}, + .inmode = {ARIZONA_INMODE_DIFF, ARIZONA_INMODE_DMIC, 0, 0}, +#else + .dmic_ref = {ARIZONA_DMIC_MICBIAS1, 0, ARIZONA_DMIC_MICVDD, 0}, + .inmode = {ARIZONA_INMODE_SE, 0, ARIZONA_INMODE_DMIC, 0}, +#endif + .gpio_base = 0, /* Base allocated by gpio core */ + .micd_pol_gpio = 2, /* GPIO3 (offset 2 from gpio_base) of the codec */ + .micd_configs = micd_modes, + .num_micd_configs = ARRAY_SIZE(micd_modes), + .micd_force_micbias = true, +}; + +/************************************************************/ +#ifdef CONFIG_SND_SOC_INTEL_CNL_FPGA +static struct i2c_board_info arizona_i2c_device = { + I2C_BOARD_INFO("wm8280", 0x1A), + .platform_data = &florida_pdata, +}; +#endif + static int arizona_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { @@ -31,10 +166,16 @@ static int arizona_i2c_probe(struct i2c_client *i2c, unsigned long type; int ret; + pr_debug("%s:%d\n", __func__, __LINE__); if (i2c->dev.of_node) type = arizona_of_get_type(&i2c->dev); +#ifdef CONFIG_SND_SOC_INTEL_CNL_FPGA + else + type = WM8280; +#else else type = id->driver_data; +#endif switch (type) { case WM5102: @@ -105,6 +246,13 @@ static const struct i2c_device_id arizona_i2c_id[] = { }; MODULE_DEVICE_TABLE(i2c, arizona_i2c_id); +#ifndef CONFIG_SND_SOC_INTEL_CNL_FPGA +static struct acpi_device_id __maybe_unused arizona_acpi_match[] = { + { "INT34C1", WM8280 }, + { } +}; +#endif + static struct i2c_driver arizona_i2c_driver = { .driver = { .name = "arizona", @@ -116,7 +264,53 @@ static struct i2c_driver arizona_i2c_driver = { .id_table = arizona_i2c_id, }; -module_i2c_driver(arizona_i2c_driver); +static int __init arizona_modinit(void) +{ + int ret = 0; +#ifdef CONFIG_SND_SOC_INTEL_CNL_FPGA + struct i2c_adapter *adapter; + struct i2c_client *client; +#endif + + pr_debug("%s Entry\n", __func__); + /***********WM8280 Register Regulator*************/ + platform_device_register(&vflorida1_device); + platform_device_register(&vflorida2_device); + +#ifdef CONFIG_SND_SOC_INTEL_CNL_FPGA + adapter = i2c_get_adapter(0); + pr_debug("%s:%d\n", __func__, __LINE__); + if (adapter) { + client = i2c_new_device(adapter, &arizona_i2c_device); + pr_debug("%s:%d\n", __func__, __LINE__); + if (!client) { + pr_err("can't create i2c device %s\n", + arizona_i2c_device.type); + i2c_put_adapter(adapter); + pr_debug("%s:%d\n", __func__, __LINE__); + return -ENODEV; + } + } else { + pr_err("adapter is NULL\n"); + return -ENODEV; + } +#endif + pr_debug("%s:%d\n", __func__, __LINE__); + ret = i2c_add_driver(&arizona_i2c_driver); + + pr_debug("%s Exit\n", __func__); + + return ret; +} + +module_init(arizona_modinit); + +static void __exit arizona_modexit(void) +{ + i2c_del_driver(&arizona_i2c_driver); +} + +module_exit(arizona_modexit); MODULE_DESCRIPTION("Arizona I2C bus interface"); MODULE_AUTHOR("Mark Brown "); diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c index a307832d7e45f..bf50bb41e4a7a 100644 --- a/drivers/mfd/arizona-irq.c +++ b/drivers/mfd/arizona-irq.c @@ -377,7 +377,8 @@ int arizona_irq_init(struct arizona *arizona) ret = request_threaded_irq(arizona->irq, NULL, arizona_irq_thread, flags, "arizona", arizona); - if (ret != 0) { + /* FPGA board doesn't have irq line */ + if (ret != 0 && !IS_ENABLED(CONFIG_SND_SOC_INTEL_CNL_FPGA)) { dev_err(arizona->dev, "Failed to request primary IRQ %d: %d\n", arizona->irq, ret); goto err_main_irq; diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c index 999dac752bccf..6b22d54a540d1 100644 --- a/drivers/mfd/cros_ec_dev.c +++ b/drivers/mfd/cros_ec_dev.c @@ -263,6 +263,11 @@ static const struct file_operations fops = { #endif }; +static void cros_ec_class_release(struct device *dev) +{ + kfree(to_cros_ec_dev(dev)); +} + static void cros_ec_sensors_register(struct cros_ec_dev *ec) { /* @@ -395,7 +400,7 @@ static int ec_device_probe(struct platform_device *pdev) int retval = -ENOMEM; struct device *dev = &pdev->dev; struct cros_ec_platform *ec_platform = dev_get_platdata(dev); - struct cros_ec_dev *ec = devm_kzalloc(dev, sizeof(*ec), GFP_KERNEL); + struct cros_ec_dev *ec = kzalloc(sizeof(*ec), GFP_KERNEL); if (!ec) return retval; @@ -417,6 +422,7 @@ static int ec_device_probe(struct platform_device *pdev) ec->class_dev.devt = MKDEV(ec_major, pdev->id); ec->class_dev.class = &cros_class; ec->class_dev.parent = dev; + ec->class_dev.release = cros_ec_class_release; retval = dev_set_name(&ec->class_dev, "%s", ec_platform->ec_name); if (retval) { diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c index 29b7164a823bd..d28ebe7ecd211 100644 --- a/drivers/mfd/menelaus.c +++ b/drivers/mfd/menelaus.c @@ -1094,6 +1094,7 @@ static void menelaus_rtc_alarm_work(struct menelaus_chip *m) static inline void menelaus_rtc_init(struct menelaus_chip *m) { int alarm = (m->client->irq > 0); + int err; /* assume 32KDETEN pin is pulled high */ if (!(menelaus_read_reg(MENELAUS_OSC_CTRL) & 0x80)) { @@ -1101,6 +1102,12 @@ static inline void menelaus_rtc_init(struct menelaus_chip *m) return; } + m->rtc = devm_rtc_allocate_device(&m->client->dev); + if (IS_ERR(m->rtc)) + return; + + m->rtc->ops = &menelaus_rtc_ops; + /* support RTC alarm; it can issue wakeups */ if (alarm) { if (menelaus_add_irq_work(MENELAUS_RTCALM_IRQ, @@ -1125,10 +1132,8 @@ static inline void menelaus_rtc_init(struct menelaus_chip *m) menelaus_write_reg(MENELAUS_RTC_CTRL, m->rtc_control); } - m->rtc = rtc_device_register(DRIVER_NAME, - &m->client->dev, - &menelaus_rtc_ops, THIS_MODULE); - if (IS_ERR(m->rtc)) { + err = rtc_register_device(m->rtc); + if (err) { if (alarm) { menelaus_remove_irq_work(MENELAUS_RTCALM_IRQ); device_init_wakeup(&m->client->dev, 0); diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c index b893797827410..9c7925ca13cf0 100644 --- a/drivers/mfd/tps6586x.c +++ b/drivers/mfd/tps6586x.c @@ -592,6 +592,29 @@ static int tps6586x_i2c_remove(struct i2c_client *client) return 0; } +static int __maybe_unused tps6586x_i2c_suspend(struct device *dev) +{ + struct tps6586x *tps6586x = dev_get_drvdata(dev); + + if (tps6586x->client->irq) + disable_irq(tps6586x->client->irq); + + return 0; +} + +static int __maybe_unused tps6586x_i2c_resume(struct device *dev) +{ + struct tps6586x *tps6586x = dev_get_drvdata(dev); + + if (tps6586x->client->irq) + enable_irq(tps6586x->client->irq); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(tps6586x_pm_ops, tps6586x_i2c_suspend, + tps6586x_i2c_resume); + static const struct i2c_device_id tps6586x_id_table[] = { { "tps6586x", 0 }, { }, @@ -602,6 +625,7 @@ static struct i2c_driver tps6586x_driver = { .driver = { .name = "tps6586x", .of_match_table = of_match_ptr(tps6586x_of_match), + .pm = &tps6586x_pm_ops, }, .probe = tps6586x_i2c_probe, .remove = tps6586x_i2c_remove, diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c index b2a0340f277e2..d8e3cc2dc7470 100644 --- a/drivers/misc/atmel-ssc.c +++ b/drivers/misc/atmel-ssc.c @@ -132,7 +132,7 @@ static const struct of_device_id atmel_ssc_dt_ids[] = { MODULE_DEVICE_TABLE(of, atmel_ssc_dt_ids); #endif -static inline const struct atmel_ssc_platform_data * __init +static inline const struct atmel_ssc_platform_data * atmel_ssc_get_driver_data(struct platform_device *pdev) { if (pdev->dev.of_node) { diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h index 120738d6e58b2..77ed3967c5b00 100644 --- a/drivers/misc/genwqe/card_base.h +++ b/drivers/misc/genwqe/card_base.h @@ -408,7 +408,7 @@ struct genwqe_file { struct file *filp; struct fasync_struct *async_queue; - struct task_struct *owner; + struct pid *opener; struct list_head list; /* entry in list of open files */ spinlock_t map_lock; /* lock for dma_mappings */ diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c index f453ab82f0d7b..8c1b63a4337be 100644 --- a/drivers/misc/genwqe/card_dev.c +++ b/drivers/misc/genwqe/card_dev.c @@ -52,7 +52,7 @@ static void genwqe_add_file(struct genwqe_dev *cd, struct genwqe_file *cfile) { unsigned long flags; - cfile->owner = current; + cfile->opener = get_pid(task_tgid(current)); spin_lock_irqsave(&cd->file_lock, flags); list_add(&cfile->list, &cd->file_list); spin_unlock_irqrestore(&cd->file_lock, flags); @@ -65,6 +65,7 @@ static int genwqe_del_file(struct genwqe_dev *cd, struct genwqe_file *cfile) spin_lock_irqsave(&cd->file_lock, flags); list_del(&cfile->list); spin_unlock_irqrestore(&cd->file_lock, flags); + put_pid(cfile->opener); return 0; } @@ -275,7 +276,7 @@ static int genwqe_kill_fasync(struct genwqe_dev *cd, int sig) return files; } -static int genwqe_force_sig(struct genwqe_dev *cd, int sig) +static int genwqe_terminate(struct genwqe_dev *cd) { unsigned int files = 0; unsigned long flags; @@ -283,7 +284,7 @@ static int genwqe_force_sig(struct genwqe_dev *cd, int sig) spin_lock_irqsave(&cd->file_lock, flags); list_for_each_entry(cfile, &cd->file_list, list) { - force_sig(sig, cfile->owner); + kill_pid(cfile->opener, SIGKILL, 1); files++; } spin_unlock_irqrestore(&cd->file_lock, flags); @@ -1352,7 +1353,7 @@ static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd) dev_warn(&pci_dev->dev, "[%s] send SIGKILL and wait ...\n", __func__); - rc = genwqe_force_sig(cd, SIGKILL); /* force terminate */ + rc = genwqe_terminate(cd); if (rc) { /* Give kill_timout more seconds to end processes */ for (i = 0; (i < GENWQE_KILL_TIMEOUT) && diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c index 8679e0bd8ec28..f4f8ab6024428 100644 --- a/drivers/misc/genwqe/card_utils.c +++ b/drivers/misc/genwqe/card_utils.c @@ -217,7 +217,7 @@ u32 genwqe_crc32(u8 *buff, size_t len, u32 init) void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size, dma_addr_t *dma_handle) { - if (get_order(size) > MAX_ORDER) + if (get_order(size) >= MAX_ORDER) return NULL; return dma_zalloc_coherent(&cd->pci_dev->dev, size, dma_handle, diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c index b8aaa684c397b..2ed23c99f59fd 100644 --- a/drivers/misc/ibmvmc.c +++ b/drivers/misc/ibmvmc.c @@ -820,21 +820,24 @@ static int ibmvmc_send_msg(struct crq_server_adapter *adapter, * * Return: * 0 - Success + * Non-zero - Failure */ static int ibmvmc_open(struct inode *inode, struct file *file) { struct ibmvmc_file_session *session; - int rc = 0; pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__, (unsigned long)inode, (unsigned long)file, ibmvmc.state); session = kzalloc(sizeof(*session), GFP_KERNEL); + if (!session) + return -ENOMEM; + session->file = file; file->private_data = session; - return rc; + return 0; } /** diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig index c49e1d2269afe..94961573f5f81 100644 --- a/drivers/misc/mei/Kconfig +++ b/drivers/misc/mei/Kconfig @@ -43,3 +43,17 @@ config INTEL_MEI_TXE Supported SoCs: Intel Bay Trail + +config INTEL_MEI_VIRTIO + tristate "Intel MEI interface emulation with virtio framework" + select INTEL_MEI + depends on X86 && PCI && VIRTIO_PCI + help + This module implements mei hw emulation over virtio transport. + The module will be called mei_virtio. + Enable this if your virtual machine supports virtual mei + device over virtio. + +source "drivers/misc/mei/dal/Kconfig" + +source "drivers/misc/mei/spd/Kconfig" diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile index cd6825afa8e1b..462fa25242d83 100644 --- a/drivers/misc/mei/Makefile +++ b/drivers/misc/mei/Makefile @@ -9,6 +9,7 @@ mei-objs += hbm.o mei-objs += interrupt.o mei-objs += client.o mei-objs += main.o +mei-objs += dma-ring.o mei-objs += bus.o mei-objs += bus-fixup.o mei-$(CONFIG_DEBUG_FS) += debugfs.o @@ -23,3 +24,9 @@ mei-txe-objs += hw-txe.o mei-$(CONFIG_EVENT_TRACING) += mei-trace.o CFLAGS_mei-trace.o = -I$(src) + +obj-$(CONFIG_INTEL_MEI_VIRTIO) += mei-virtio.o +mei-virtio-objs := hw-virtio.o + +obj-$(CONFIG_INTEL_MEI_SPD) += spd/ +obj-$(CONFIG_INTEL_MEI_DAL) += dal/ diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c index a6f41f96f2a16..5945e7d482715 100644 --- a/drivers/misc/mei/bus-fixup.c +++ b/drivers/misc/mei/bus-fixup.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include #include @@ -461,6 +460,19 @@ static void mei_nfc(struct mei_cl_device *cldev) dev_dbg(bus->dev, "end of fixup match = %d\n", cldev->do_match); } +/** + * vm_support - enable on bus clients with vm support + * + * @cldev: me clients device + */ +static void vm_support(struct mei_cl_device *cldev) +{ + dev_dbg(&cldev->dev, "running hook %s\n", __func__); + + if (cldev->me_cl->props.vm_supported == 1) + cldev->do_match = 1; +} + #define MEI_FIXUP(_uuid, _hook) { _uuid, _hook } static struct mei_fixup { @@ -473,6 +485,7 @@ static struct mei_fixup { MEI_FIXUP(MEI_UUID_NFC_HCI, mei_nfc), MEI_FIXUP(MEI_UUID_WD, mei_wd), MEI_FIXUP(MEI_UUID_MKHIF_FIX, mei_mkhi_fix), + MEI_FIXUP(MEI_UUID_ANY, vm_support), }; /** diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index fc3872fe7b251..d9d21bba47281 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -28,7 +28,6 @@ #include "client.h" #define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver) -#define to_mei_cl_device(d) container_of(d, struct mei_cl_device, dev) /** * __mei_cl_send - internal client send (write) @@ -162,7 +161,7 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, if (timeout) { rets = wait_event_interruptible_timeout (cl->rx_wait, - (!list_empty(&cl->rd_completed)) || + mei_cl_read_cb(cl, NULL) || (!mei_cl_is_connected(cl)), msecs_to_jiffies(timeout)); if (rets == 0) @@ -175,7 +174,7 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, } else { if (wait_event_interruptible (cl->rx_wait, - (!list_empty(&cl->rd_completed)) || + mei_cl_read_cb(cl, NULL) || (!mei_cl_is_connected(cl)))) { if (signal_pending(current)) return -EINTR; @@ -208,7 +207,7 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, rets = r_length; free: - mei_io_cb_free(cb); + mei_cl_del_rd_completed(cl, cb); out: mutex_unlock(&bus->device_lock); @@ -505,6 +504,16 @@ static void mei_cl_bus_module_put(struct mei_cl_device *cldev) module_put(cldev->bus->dev->driver->owner); } +static int mei_cldev_vm_support_check(struct mei_cl_device *cldev) +{ + struct mei_device *bus = cldev->bus; + + if (!bus->hbm_f_vm_supported) + return -EOPNOTSUPP; + + return cldev->me_cl->props.vm_supported ? 0 : -EOPNOTSUPP; +} + /** * mei_cldev_enable - enable me client device * create connection with me client @@ -517,6 +526,7 @@ int mei_cldev_enable(struct mei_cl_device *cldev) { struct mei_device *bus = cldev->bus; struct mei_cl *cl; + struct mei_cl_vtag *cl_vtag; int ret; cl = cldev->cl; @@ -547,6 +557,16 @@ int mei_cldev_enable(struct mei_cl_device *cldev) goto out; } + if (!mei_cldev_vm_support_check(cldev)) { + cl_vtag = mei_cl_vtag_alloc(NULL, 0); + if (IS_ERR(cl_vtag)) { + ret = -ENOMEM; + goto out; + } + + list_add_tail(&cl_vtag->list, &cl->vtag_map); + } + ret = mei_cl_connect(cl, cldev->me_cl, NULL); if (ret < 0) { dev_err(&cldev->dev, "cannot connect\n"); @@ -591,6 +611,7 @@ int mei_cldev_disable(struct mei_cl_device *cldev) { struct mei_device *bus; struct mei_cl *cl; + struct mei_cl_vtag *cl_vtag; int err; if (!cldev) @@ -604,6 +625,13 @@ int mei_cldev_disable(struct mei_cl_device *cldev) mutex_lock(&bus->device_lock); + cl_vtag = list_first_entry_or_null(&cl->vtag_map, + struct mei_cl_vtag, list); + if (cl_vtag) { + list_del(&cl_vtag->list); + kfree(cl_vtag); + } + if (!mei_cl_is_connected(cl)) { dev_dbg(bus->dev, "Already disconnected\n"); err = 0; @@ -802,11 +830,55 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a, } static DEVICE_ATTR_RO(modalias); +static ssize_t max_conn_show(struct device *dev, struct device_attribute *a, + char *buf) +{ + struct mei_cl_device *cldev = to_mei_cl_device(dev); + u8 maxconn = mei_me_cl_max_conn(cldev->me_cl); + + return scnprintf(buf, PAGE_SIZE, "%d", maxconn); +} +static DEVICE_ATTR_RO(max_conn); + +static ssize_t fixed_show(struct device *dev, struct device_attribute *a, + char *buf) +{ + struct mei_cl_device *cldev = to_mei_cl_device(dev); + u8 fixed = mei_me_cl_fixed(cldev->me_cl); + + return scnprintf(buf, PAGE_SIZE, "%d", fixed); +} +static DEVICE_ATTR_RO(fixed); + +static ssize_t vtag_show(struct device *dev, struct device_attribute *a, + char *buf) +{ + struct mei_cl_device *cldev = to_mei_cl_device(dev); + bool vm = mei_me_cl_vm(cldev->me_cl); + + return scnprintf(buf, PAGE_SIZE, "%d", vm); +} +static DEVICE_ATTR_RO(vtag); + +static ssize_t max_len_show(struct device *dev, struct device_attribute *a, + char *buf) +{ + struct mei_cl_device *cldev = to_mei_cl_device(dev); + u32 maxlen = mei_me_cl_max_len(cldev->me_cl); + + return scnprintf(buf, PAGE_SIZE, "%u", maxlen); +} +static DEVICE_ATTR_RO(max_len); + static struct attribute *mei_cldev_attrs[] = { &dev_attr_name.attr, &dev_attr_uuid.attr, &dev_attr_version.attr, &dev_attr_modalias.attr, + &dev_attr_max_conn.attr, + &dev_attr_fixed.attr, + &dev_attr_vtag.attr, + &dev_attr_max_len.attr, NULL, }; ATTRIBUTE_GROUPS(mei_cldev); diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index ebdcf0b450e25..d46960fb50dda 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -317,23 +317,6 @@ void mei_me_cl_rm_all(struct mei_device *dev) up_write(&dev->me_clients_rwsem); } -/** - * mei_cl_cmp_id - tells if the clients are the same - * - * @cl1: host client 1 - * @cl2: host client 2 - * - * Return: true - if the clients has same host and me ids - * false - otherwise - */ -static inline bool mei_cl_cmp_id(const struct mei_cl *cl1, - const struct mei_cl *cl2) -{ - return cl1 && cl2 && - (cl1->host_client_id == cl2->host_client_id) && - (mei_cl_me_id(cl1) == mei_cl_me_id(cl2)); -} - /** * mei_io_cb_free - free mei_cb_private related memory * @@ -379,6 +362,19 @@ static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb) mei_io_cb_free(cb); } +static void mei_cl_set_read_by_fp(const struct mei_cl *cl, + const struct file *fp) +{ + struct mei_cl_vtag *cl_vtag; + + list_for_each_entry(cl_vtag, &cl->vtag_map, list) { + if (cl_vtag->fp == fp) { + cl_vtag->pending_read = true; + return; + } + } +} + /** * mei_io_cb_init - allocate and initialize io callback * @@ -403,6 +399,8 @@ static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, cb->cl = cl; cb->buf_idx = 0; cb->fop_type = type; + cb->vtag = 0; + return cb; } @@ -418,8 +416,11 @@ static void mei_io_list_flush_cl(struct list_head *head, struct mei_cl_cb *cb, *next; list_for_each_entry_safe(cb, next, head, list) { - if (mei_cl_cmp_id(cl, cb->cl)) + if (cl == cb->cl) { list_del_init(&cb->list); + if (cb->fop_type == MEI_FOP_READ) + mei_io_cb_free(cb); + } } } @@ -428,14 +429,16 @@ static void mei_io_list_flush_cl(struct list_head *head, * * @head: An instance of our list structure * @cl: host client + * @fp: file pointer (matching cb file object), may be NULL */ static void mei_io_tx_list_free_cl(struct list_head *head, - const struct mei_cl *cl) + const struct mei_cl *cl, + const struct file *fp) { struct mei_cl_cb *cb, *next; list_for_each_entry_safe(cb, next, head, list) { - if (mei_cl_cmp_id(cl, cb->cl)) + if (cl == cb->cl && (!fp || fp == cb->fp)) mei_tx_cb_dequeue(cb); } } @@ -455,6 +458,20 @@ static void mei_io_list_free_fp(struct list_head *head, const struct file *fp) mei_io_cb_free(cb); } +/** + * mei_cl_free_pending - free pending cb + * + * @cl: host client + */ +static void mei_cl_free_pending(struct mei_cl *cl) +{ + struct mei_cl_cb *cb; + + cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list); + if (cb) + mei_io_cb_free(cb); +} + /** * mei_cl_alloc_cb - a convenient wrapper for allocating read cb * @@ -478,7 +495,7 @@ struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, if (length == 0) return cb; - cb->buf.data = kmalloc(length, GFP_KERNEL); + cb->buf.data = kmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL); if (!cb->buf.data) { mei_io_cb_free(cb); return NULL; @@ -527,15 +544,19 @@ struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length, * * Return: cb on success, NULL if cb is not found */ -struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp) +struct mei_cl_cb *mei_cl_read_cb(struct mei_cl *cl, const struct file *fp) { struct mei_cl_cb *cb; + struct mei_cl_cb *ret_cb = NULL; + spin_lock(&cl->rd_completed_lock); list_for_each_entry(cb, &cl->rd_completed, list) - if (!fp || fp == cb->fp) - return cb; - - return NULL; + if (!fp || fp == cb->fp) { + ret_cb = cb; + break; + } + spin_unlock(&cl->rd_completed_lock); + return ret_cb; } /** @@ -556,12 +577,16 @@ int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp) dev = cl->dev; cl_dbg(dev, cl, "remove list entry belonging to cl\n"); - mei_io_tx_list_free_cl(&cl->dev->write_list, cl); - mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl); - mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl); - mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl); - mei_io_list_free_fp(&cl->rd_pending, fp); + mei_io_tx_list_free_cl(&cl->dev->write_list, cl, fp); + mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl, fp); + if (!fp) { + mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl); + mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl); + mei_cl_free_pending(cl); + } + spin_lock(&cl->rd_completed_lock); mei_io_list_free_fp(&cl->rd_completed, fp); + spin_unlock(&cl->rd_completed_lock); return 0; } @@ -579,6 +604,8 @@ static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev) init_waitqueue_head(&cl->rx_wait); init_waitqueue_head(&cl->tx_wait); init_waitqueue_head(&cl->ev_wait); + INIT_LIST_HEAD(&cl->vtag_map); + spin_lock_init(&cl->rd_completed_lock); INIT_LIST_HEAD(&cl->rd_completed); INIT_LIST_HEAD(&cl->rd_pending); INIT_LIST_HEAD(&cl->link); @@ -693,7 +720,7 @@ int mei_cl_unlink(struct mei_cl *cl) void mei_host_client_init(struct mei_device *dev) { - dev->dev_state = MEI_DEV_ENABLED; + mei_set_devstate(dev, MEI_DEV_ENABLED); dev->reset_count = 0; schedule_work(&dev->bus_rescan_work); @@ -774,8 +801,8 @@ static void mei_cl_set_disconnected(struct mei_cl *cl) return; cl->state = MEI_FILE_DISCONNECTED; - mei_io_tx_list_free_cl(&dev->write_list, cl); - mei_io_tx_list_free_cl(&dev->write_waiting_list, cl); + mei_io_tx_list_free_cl(&dev->write_list, cl, NULL); + mei_io_tx_list_free_cl(&dev->write_waiting_list, cl, NULL); mei_io_list_flush_cl(&dev->ctrl_rd_list, cl); mei_io_list_flush_cl(&dev->ctrl_wr_list, cl); mei_cl_wake_all(cl); @@ -1250,6 +1277,94 @@ static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl) return 0; } +struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag) +{ + struct mei_cl_vtag *cl_vtag; + + cl_vtag = kzalloc(sizeof(*cl_vtag), GFP_KERNEL); + if (!cl_vtag) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&cl_vtag->list); + cl_vtag->vtag = vtag; + cl_vtag->fp = fp; + + return cl_vtag; +} + +const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag) +{ + struct mei_cl_vtag *vtag_l; + + list_for_each_entry(vtag_l, &cl->vtag_map, list) + if (vtag_l->vtag == vtag) + return vtag_l->fp; + + return ERR_PTR(-ENOENT); +} + +static void mei_cl_reset_read_by_vtag(const struct mei_cl *cl, u8 vtag) +{ + struct mei_cl_vtag *vtag_l; + + list_for_each_entry(vtag_l, &cl->vtag_map, list) { + if (vtag_l->vtag == vtag) { + vtag_l->pending_read = false; + break; + } + } +} + +static void mei_cl_read_vtag_add_fc(struct mei_cl *cl) +{ + struct mei_cl_vtag *cl_vtag; + + list_for_each_entry(cl_vtag, &cl->vtag_map, list) { + if (cl_vtag->pending_read) { + if (mei_cl_enqueue_ctrl_wr_cb(cl, + mei_cl_mtu(cl), + MEI_FOP_READ, + cl_vtag->fp)) + cl->rx_flow_ctrl_creds++; + break; + } + } +} + +static int mei_cl_vm_support_check(struct mei_cl *cl) +{ + struct mei_device *dev = cl->dev; + + if (!dev->hbm_f_vm_supported) + return -EOPNOTSUPP; + + if (!cl->me_cl) + return 0; + + return cl->me_cl->props.vm_supported ? 0 : -EOPNOTSUPP; +} + +void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb) +{ + const struct file *fp; + + if (!mei_cl_vm_support_check(cl)) { + fp = mei_cl_fp_by_vtag(cl, cb->vtag); + if (IS_ERR(fp)) { + /* client already disconnected, discarding */ + mei_io_cb_free(cb); + return; + } + cb->fp = fp; + mei_cl_reset_read_by_vtag(cl, cb->vtag); + mei_cl_read_vtag_add_fc(cl); + } + + spin_lock(&cl->rd_completed_lock); + list_add_tail(&cb->list, &cl->rd_completed); + spin_unlock(&cl->rd_completed_lock); +} + /** * mei_cl_notify_fop2req - convert fop to proper request * @@ -1374,7 +1489,9 @@ int mei_cl_notify_request(struct mei_cl *cl, mutex_unlock(&dev->device_lock); wait_event_timeout(cl->wait, - cl->notify_en == request || !mei_cl_is_connected(cl), + cl->notify_en == request || + cl->status || + !mei_cl_is_connected(cl), mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); mutex_lock(&dev->device_lock); @@ -1503,13 +1620,17 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp) return 0; /* HW currently supports only one pending read */ - if (cl->rx_flow_ctrl_creds) + if (cl->rx_flow_ctrl_creds) { + mei_cl_set_read_by_fp(cl, fp); return -EBUSY; + } cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp); if (!cb) return -ENOMEM; + mei_cl_set_read_by_fp(cl, fp); + rets = pm_runtime_get(dev->dev); if (rets < 0 && rets != -EINPROGRESS) { pm_runtime_put_noidle(dev->dev); @@ -1543,16 +1664,29 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp) * * @mei_hdr: mei message header * @cb: message callback structure + * + * Return: header length in bytes */ -static void mei_msg_hdr_init(struct mei_msg_hdr *mei_hdr, struct mei_cl_cb *cb) +static size_t mei_msg_hdr_init(struct mei_msg_hdr *mei_hdr, + struct mei_cl_cb *cb) { + size_t hdr_len = sizeof(*mei_hdr); + struct mei_msg_extd_hdr *ext_hdr; + + memset(mei_hdr, 0, sizeof(*mei_hdr)); mei_hdr->host_addr = mei_cl_host_addr(cb->cl); mei_hdr->me_addr = mei_cl_me_id(cb->cl); - mei_hdr->length = 0; - mei_hdr->reserved = 0; - mei_hdr->msg_complete = 0; - mei_hdr->dma_ring = 0; mei_hdr->internal = cb->internal; + + if (cb->vtag && cb->buf_idx == 0) { + ext_hdr = (struct mei_msg_extd_hdr *)mei_hdr->extension; + memset(ext_hdr, 0, sizeof(*ext_hdr)); + mei_hdr->extended = 1; + ext_hdr->vtag = cb->vtag; + hdr_len += sizeof(*ext_hdr); + } + + return hdr_len; } /** @@ -1570,13 +1704,17 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, { struct mei_device *dev; struct mei_msg_data *buf; - struct mei_msg_hdr mei_hdr; - size_t hdr_len = sizeof(mei_hdr); + u32 __hdr[MEI_MSG_HDR_MAX]; + struct mei_msg_hdr *mei_hdr = (void *)__hdr; + size_t hdr_len; size_t len; - size_t hbuf_len; + size_t hbuf_len, dr_len; int hbuf_slots; + u32 dr_slots; + u32 dma_len; int rets; bool first_chunk; + const void *data; if (WARN_ON(!cl || !cl->dev)) return -ENODEV; @@ -1597,6 +1735,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, } len = buf->size - cb->buf_idx; + data = buf->data + cb->buf_idx; hbuf_slots = mei_hbuf_empty_slots(dev); if (hbuf_slots < 0) { rets = -EOVERFLOW; @@ -1604,33 +1743,48 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, } hbuf_len = mei_slots2data(hbuf_slots); + dr_slots = mei_dma_ring_empty_slots(dev); + dr_len = mei_slots2data(dr_slots); - mei_msg_hdr_init(&mei_hdr, cb); + hdr_len = mei_msg_hdr_init(mei_hdr, cb); + + cl_dbg(dev, cl, "Extend Header %d vtag = %d\n", + mei_hdr->extended, cb->vtag); /** * Split the message only if we can write the whole host buffer * otherwise wait for next time the host buffer is empty. */ if (len + hdr_len <= hbuf_len) { - mei_hdr.length = len; - mei_hdr.msg_complete = 1; + mei_hdr->length = len; + mei_hdr->msg_complete = 1; + } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) { + mei_hdr->dma_ring = 1; + if (len > dr_len) + len = dr_len; + else + mei_hdr->msg_complete = 1; + + mei_hdr->length = sizeof(dma_len); + dma_len = len; + data = &dma_len; } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) { - mei_hdr.length = hbuf_len - hdr_len; + len = hbuf_len - hdr_len; + mei_hdr->length = len; } else { return 0; } - cl_dbg(dev, cl, "buf: size = %zu idx = %zu\n", - cb->buf.size, cb->buf_idx); + if (mei_hdr->dma_ring) + mei_dma_ring_write(dev, buf->data + cb->buf_idx, len); - rets = mei_write_message(dev, &mei_hdr, hdr_len, - buf->data + cb->buf_idx, mei_hdr.length); + rets = mei_write_message(dev, mei_hdr, hdr_len, data, mei_hdr->length); if (rets) goto err; cl->status = 0; cl->writing_state = MEI_WRITING; - cb->buf_idx += mei_hdr.length; + cb->buf_idx += len; if (first_chunk) { if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) { @@ -1639,7 +1793,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, } } - if (mei_hdr.msg_complete) + if (mei_hdr->msg_complete) list_move_tail(&cb->list, &dev->write_waiting_list); return 0; @@ -1663,13 +1817,16 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) { struct mei_device *dev; struct mei_msg_data *buf; - struct mei_msg_hdr mei_hdr; - size_t hdr_len = sizeof(mei_hdr); - size_t len; - size_t hbuf_len; + u32 __hdr[MEI_MSG_HDR_MAX]; + struct mei_msg_hdr *mei_hdr = (void *)__hdr; + size_t hdr_len; + size_t len, hbuf_len, dr_len; int hbuf_slots; + u32 dr_slots; + u32 dma_len; ssize_t rets; bool blocking; + const void *data; if (WARN_ON(!cl || !cl->dev)) return -ENODEV; @@ -1681,10 +1838,12 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) buf = &cb->buf; len = buf->size; - blocking = cb->blocking; cl_dbg(dev, cl, "len=%zd\n", len); + blocking = cb->blocking; + data = buf->data; + rets = pm_runtime_get(dev->dev); if (rets < 0 && rets != -EINPROGRESS) { pm_runtime_put_noidle(dev->dev); @@ -1700,7 +1859,10 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) if (rets < 0) goto err; - mei_msg_hdr_init(&mei_hdr, cb); + hdr_len = mei_msg_hdr_init(mei_hdr, cb); + + cl_dbg(dev, cl, "Extend Header %d vtag = %d\n", + mei_hdr->extended, cb->vtag); if (rets == 0) { cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); @@ -1721,16 +1883,32 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) } hbuf_len = mei_slots2data(hbuf_slots); + dr_slots = mei_dma_ring_empty_slots(dev); + dr_len = mei_slots2data(dr_slots); if (len + hdr_len <= hbuf_len) { - mei_hdr.length = len; - mei_hdr.msg_complete = 1; + mei_hdr->length = len; + mei_hdr->msg_complete = 1; + } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) { + mei_hdr->dma_ring = 1; + if (len > dr_len) + len = dr_len; + else + mei_hdr->msg_complete = 1; + + mei_hdr->length = sizeof(dma_len); + dma_len = len; + data = &dma_len; } else { - mei_hdr.length = hbuf_len - hdr_len; + len = hbuf_len - hdr_len; + mei_hdr->length = len; } - rets = mei_write_message(dev, &mei_hdr, hdr_len, - buf->data, mei_hdr.length); + if (mei_hdr->dma_ring) + mei_dma_ring_write(dev, buf->data, len); + + rets = mei_write_message(dev, mei_hdr, hdr_len, + data, mei_hdr->length); if (rets) goto err; @@ -1739,10 +1917,12 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) goto err; cl->writing_state = MEI_WRITING; - cb->buf_idx = mei_hdr.length; + cb->buf_idx = len; + /* restore return value */ + len = buf->size; out: - if (mei_hdr.msg_complete) + if (mei_hdr->msg_complete) mei_tx_cb_enqueue(cb, &dev->write_waiting_list); else mei_tx_cb_enqueue(cb, &dev->write_list); @@ -1802,7 +1982,7 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) break; case MEI_FOP_READ: - list_add_tail(&cb->list, &cl->rd_completed); + mei_cl_add_rd_completed(cl, cb); if (!mei_cl_is_fixed_address(cl) && !WARN_ON(!cl->rx_flow_ctrl_creds)) cl->rx_flow_ctrl_creds--; diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h index 64e318f589b42..6c9986dfd5a09 100644 --- a/drivers/misc/mei/client.h +++ b/drivers/misc/mei/client.h @@ -79,6 +79,54 @@ static inline u8 mei_me_cl_ver(const struct mei_me_client *me_cl) return me_cl->props.protocol_version; } +/** + * mei_me_cl_max_conn - return me client max number of connections + * + * @me_cl: me client + * + * Return: me client max number of connections + */ +static inline u8 mei_me_cl_max_conn(const struct mei_me_client *me_cl) +{ + return me_cl->props.max_number_of_connections; +} + +/** + * mei_me_cl_fixed - return me client fixed address, if any + * + * @me_cl: me client + * + * Return: me client fixed address + */ +static inline u8 mei_me_cl_fixed(const struct mei_me_client *me_cl) +{ + return me_cl->props.fixed_address; +} + +/** + * mei_me_cl_vm - return me client vm supported status + * + * @me_cl: me client + * + * Return: true if me client supports vm tagging + */ +static inline bool mei_me_cl_vm(const struct mei_me_client *me_cl) +{ + return me_cl->props.vm_supported == 1; +} + +/** + * mei_me_cl_max_len - return me client max msg length + * + * @me_cl: me client + * + * Return: me client max msg length + */ +static inline u32 mei_me_cl_max_len(const struct mei_me_client *me_cl) +{ + return me_cl->props.max_msg_length; +} + /* * MEI IO Functions */ @@ -95,8 +143,17 @@ int mei_cl_unlink(struct mei_cl *cl); struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev); -struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, - const struct file *fp); +struct mei_cl_cb *mei_cl_read_cb(struct mei_cl *cl, const struct file *fp); +void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb); + +static inline void mei_cl_del_rd_completed(struct mei_cl *cl, + struct mei_cl_cb *cb) +{ + spin_lock(&cl->rd_completed_lock); + mei_io_cb_free(cb); + spin_unlock(&cl->rd_completed_lock); +} + struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, enum mei_cb_file_ops type, const struct file *fp); @@ -105,6 +162,8 @@ struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length, const struct file *fp); int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp); +struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag); +const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag); /* * MEI input output function prototype */ diff --git a/drivers/misc/mei/dal/Kconfig b/drivers/misc/mei/dal/Kconfig new file mode 100644 index 0000000000000..8943d4f7f7e40 --- /dev/null +++ b/drivers/misc/mei/dal/Kconfig @@ -0,0 +1,15 @@ +config INTEL_MEI_DAL + tristate "Dynamic Application Loader for ME" + depends on INTEL_MEI + help + Dynamic Application Loader enables downloading java applets + to DAL FW and run it in a secure environment. + The DAL module exposes both user space api and kernel space api. + +config INTEL_MEI_DAL_TEST + tristate "Test Module for Dynamic Application Loader for ME" + depends on INTEL_MEI_DAL + help + Testing Module for Dynamic Application Loader, to test the + kernel space api from a user space client. The test module + calls the kernel space api functions of DAL module. diff --git a/drivers/misc/mei/dal/Makefile b/drivers/misc/mei/dal/Makefile new file mode 100644 index 0000000000000..fa3f8150131e2 --- /dev/null +++ b/drivers/misc/mei/dal/Makefile @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0 + +ccflags-y += -D__CHECK_ENDIAN__ + +obj-$(CONFIG_INTEL_MEI_DAL) += mei_dal.o +mei_dal-objs := dal_class.o +mei_dal-objs += acp_parser.o +mei_dal-objs += bh_external.o +mei_dal-objs += bh_internal.o +mei_dal-objs += dal_cdev.o +mei_dal-objs += dal_kdi.o +mei_dal-objs += dal_ta_access.o + +obj-$(CONFIG_INTEL_MEI_DAL_TEST) += dal_test.o diff --git a/drivers/misc/mei/dal/acp_format.h b/drivers/misc/mei/dal/acp_format.h new file mode 100644 index 0000000000000..7fdef34071522 --- /dev/null +++ b/drivers/misc/mei/dal/acp_format.h @@ -0,0 +1,198 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* + * Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. + */ + +#ifndef _ACP_FORMAT_H +#define _ACP_FORMAT_H + +#include + +#define AC_MAX_INS_REASONS_LENGTH 1024 +#define AC_MAX_USED_SERVICES 20 +#define AC_MAX_PROPS_LENGTH 2048 +#define AC_MAX_PACK_HASH_LEN 32 + +/** + * enum ac_cmd_id - acp file command (acp type) + * + * @AC_CMD_INVALID: invalid command + * @AC_INSTALL_SD: install new sub security domain + * @AC_UNINSTALL_SD: uninstall sub security domain + * @AC_INSTALL_JTA: install java ta + * @AC_UNINSTALL_JTA: uninstall java ta + * @AC_INSTALL_NTA: install native ta (currently NOT SUPPORTED) + * @AC_UNINSTALL_NTA: uninstall native ta (currently NOT SUPPORTED) + * @AC_UPDATE_SVL: update the security version list + * @AC_INSTALL_JTA_PROP: ta properties for installation + * @AC_CMD_NUM: number of acp commands + */ +enum ac_cmd_id { + AC_CMD_INVALID, + AC_INSTALL_SD, + AC_UNINSTALL_SD, + AC_INSTALL_JTA, + AC_UNINSTALL_JTA, + AC_INSTALL_NTA, + AC_UNINSTALL_NTA, + AC_UPDATE_SVL, + AC_INSTALL_JTA_PROP, + AC_CMD_NUM +}; + +/** + * struct ac_pack_hash - ta pack hash + * + * @data: ta hash + */ +struct ac_pack_hash { + u8 data[AC_MAX_PACK_HASH_LEN]; +} __packed; + +/** + * struct ac_pack_header - admin comman pack header + * + * @magic: magic string which represents an ACP + * @version: package format version + * @byte_order: byte order of package (0 big endian, 1 little endian) + * @reserved: reserved bytes + * @size: total package size + * @cmd_id: acp command (acp file type) + * @svn: security version number + * + * @idx_num: the number of the indexed sections + * @idx_condition: condition section offset + * @idx_data: data section offset + */ +struct ac_pack_header { + /*ACP Header*/ + u8 magic[4]; + u8 version; + u8 byte_order; + u16 reserved; + u32 size; + u32 cmd_id; + u32 svn; + + /* Index Section */ + u32 idx_num; + u32 idx_condition; + u32 idx_data; +} __packed; + +/** + * struct ac_ta_id_list - A list of ta ids which the ta + * is allowed to communicate with. + * + * @num: ta ids count + * @list: ta ids list + */ +struct ac_ta_id_list { + u32 num; + uuid_t list[0]; +} __packed; + +/** + * struct ac_prop_list - TLV list of acp properties + * + * @num: number of properties + * @len: size of all properties + * @data: acp properties. TLV format is "type\0key\0value\0" + * (e.g. string\0name\0Tom\0int\0Age\013\0) + */ +struct ac_prop_list { + u32 num; + u32 len; + s8 data[0]; +} __packed; + +/** + * struct ac_ins_reasons - list of event codes that can be + * received or posted by ta + * + * @len: event codes count + * @data: event codes list + */ +struct ac_ins_reasons { + u32 len; + u32 data[0]; +} __packed; + +/** + * struct ac_pack - general struct to hold parsed acp content + * + * @head: acp pack header + * @data: acp parsed content + */ +struct ac_pack { + struct ac_pack_header *head; + char data[0]; +} __packed; + +/** + * struct ac_ins_ta_header - ta installation header + * + * @ta_id: ta id + * @ta_svn: ta security version number + * @hash_alg_type: ta hash algorithm type + * @ta_reserved: reserved bytes + * @hash: ta pack hash + */ +struct ac_ins_ta_header { + uuid_t ta_id; + u32 ta_svn; + u8 hash_alg_type; + u8 ta_reserved[3]; + struct ac_pack_hash hash; +} __packed; + +/** + * struct ac_ins_jta_pack - ta installation information + * + * @ins_cond: ta install conditions (contains some of the manifest data, + * including security.version, applet.version, applet.platform, + * applet.api.level) + * @head: ta installation header + */ +struct ac_ins_jta_pack { + struct ac_prop_list *ins_cond; + struct ac_ins_ta_header *head; +} __packed; + +/** + * struct ac_ins_jta_prop_header - ta manifest header + * + * @mem_quota: ta heap size + * @ta_encrypted: ta encrypted by provider flag + * @padding: padding + * @allowed_inter_session_num: allowed internal session count + * @ac_groups: ta permission groups + * @timeout: ta timeout in milliseconds + */ +struct ac_ins_jta_prop_header { + u32 mem_quota; + u8 ta_encrypted; + u8 padding; + u16 allowed_inter_session_num; + u64 ac_groups; + u32 timeout; +} __packed; + +/** + * struct ac_ins_jta_prop - ta manifest + * + * @head: manifest header + * @post_reasons: list of event codes that can be posted by ta + * @reg_reasons: list of event codes that can be received by ta + * @prop: all other manifest fields (acp properties) + * @used_service_list: list of ta ids which ta is allowed to communicate with + */ +struct ac_ins_jta_prop { + struct ac_ins_jta_prop_header *head; + struct ac_ins_reasons *post_reasons; + struct ac_ins_reasons *reg_reasons; + struct ac_prop_list *prop; + struct ac_ta_id_list *used_service_list; +} __packed; + +#endif /* _ACP_FORMAT_H */ diff --git a/drivers/misc/mei/dal/acp_parser.c b/drivers/misc/mei/dal/acp_parser.c new file mode 100644 index 0000000000000..462bd48606924 --- /dev/null +++ b/drivers/misc/mei/dal/acp_parser.c @@ -0,0 +1,507 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. + */ + +#include +#include + +#include "acp_format.h" +#include "acp_parser.h" + +/* CSS Header + CSS Crypto Block + * Prefixes each signed ACP package + */ +#define AC_CSS_HEADER_LENGTH (128 + 520) + +/** + * struct ac_pr_state - admin command pack reader state + * + * @cur : current read position + * @head : acp file head + * @total : size of acp file + */ +struct ac_pr_state { + const char *cur; + const char *head; + unsigned int total; +}; + +/** + * ac_pr_init - init pack reader + * + * @pr: pack reader + * @data: acp file content (without CSS header) + * @n: acp file size (without CSS header) + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int ac_pr_init(struct ac_pr_state *pr, const char *data, + unsigned int n) +{ + /* check integer overflow */ + if ((size_t)data > SIZE_MAX - n) + return -EINVAL; + + pr->cur = data; + pr->head = data; + pr->total = n; + return 0; +} + +/** + * ac_pr_8b_align_move - update pack reader cur pointer after reading n_move + * bytes. Leave cur aligned to 8 bytes. + * (e.g. when n_move is 3, increase cur by 8) + * + * @pr: pack reader + * @n_move: number of bytes to move cur pointer ahead + * will be rounded up to keep cur 8 bytes aligned + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int ac_pr_8b_align_move(struct ac_pr_state *pr, size_t n_move) +{ + unsigned long offset; + const char *new_cur = pr->cur + n_move; + size_t len_from_head = new_cur - pr->head; + + if ((size_t)pr->cur > SIZE_MAX - n_move || new_cur < pr->head) + return -EINVAL; + + offset = ((8 - (len_from_head & 7)) & 7); + if ((size_t)new_cur > SIZE_MAX - offset) + return -EINVAL; + + new_cur = new_cur + offset; + if (new_cur > pr->head + pr->total) + return -EINVAL; + + pr->cur = new_cur; + return 0; +} + +/** + * ac_pr_align_move - update pack reader cur pointer after reading n_move bytes + * Leave cur aligned to 4 bytes. + * (e.g. when n_move is 1, increase cur by 4) + * + * @pr: pack reader + * @n_move: number of bytes to move cur pointer ahead + * will be rounded up to keep cur 4 bytes aligned + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int ac_pr_align_move(struct ac_pr_state *pr, size_t n_move) +{ + const char *new_cur = pr->cur + n_move; + size_t len_from_head = new_cur - pr->head; + size_t offset; + + if ((size_t)pr->cur > SIZE_MAX - n_move || new_cur < pr->head) + return -EINVAL; + + offset = ((4 - (len_from_head & 3)) & 3); + if ((size_t)new_cur > SIZE_MAX - offset) + return -EINVAL; + + new_cur = new_cur + offset; + if (new_cur > pr->head + pr->total) + return -EINVAL; + + pr->cur = new_cur; + return 0; +} + +/** + * ac_pr_move - update pack reader cur pointer after reading n_move bytes + * + * @pr: pack reader + * @n_move: number of bytes to move cur pointer ahead + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int ac_pr_move(struct ac_pr_state *pr, size_t n_move) +{ + const char *new_cur = pr->cur + n_move; + + /* integer overflow or out of acp pkg size */ + if ((size_t)pr->cur > SIZE_MAX - n_move || + new_cur > pr->head + pr->total) + return -EINVAL; + + pr->cur = new_cur; + + return 0; +} + +/** + * ac_pr_is_safe_to_read - check whether it is safe to read more n_move + * bytes from the acp file + * + * @pr: pack reader + * @n_move: number of bytes to check if it is safe to read + * + * Return: true when it is safe to read more n_move bytes + * false otherwise + */ +static bool ac_pr_is_safe_to_read(const struct ac_pr_state *pr, size_t n_move) +{ + /* pointer overflow */ + if ((size_t)pr->cur > SIZE_MAX - n_move) + return false; + + if (pr->cur + n_move > pr->head + pr->total) + return false; + + return true; +} + +/** + * ac_pr_is_end - check if cur is at the end of the acp file + * + * @pr: pack reader + * + * Return: true when cur is at the end of the acp + * false otherwise + */ +static bool ac_pr_is_end(const struct ac_pr_state *pr) +{ + return (pr->cur == pr->head + pr->total); +} + +/** + * acp_load_reasons - load list of event codes that can be + * received or posted by ta + * + * @pr: pack reader + * @reasons: out param to hold the list of event codes + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_reasons(struct ac_pr_state *pr, + struct ac_ins_reasons **reasons) +{ + size_t len; + struct ac_ins_reasons *r; + + if (!ac_pr_is_safe_to_read(pr, sizeof(*r))) + return -EINVAL; + + r = (struct ac_ins_reasons *)pr->cur; + + if (r->len > AC_MAX_INS_REASONS_LENGTH) + return -EINVAL; + + len = sizeof(*r) + r->len * sizeof(r->data[0]); + if (!ac_pr_is_safe_to_read(pr, len)) + return -EINVAL; + + *reasons = r; + return ac_pr_align_move(pr, len); +} + +/** + * acp_load_taid_list - load list of ta ids which ta is allowed + * to communicate with + * + * @pr: pack reader + * @taid_list: out param to hold the loaded ta ids + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_taid_list(struct ac_pr_state *pr, + struct ac_ta_id_list **taid_list) +{ + size_t len; + struct ac_ta_id_list *t; + + if (!ac_pr_is_safe_to_read(pr, sizeof(*t))) + return -EINVAL; + + t = (struct ac_ta_id_list *)pr->cur; + if (t->num > AC_MAX_USED_SERVICES) + return -EINVAL; + + len = sizeof(*t) + t->num * sizeof(t->list[0]); + + if (!ac_pr_is_safe_to_read(pr, len)) + return -EINVAL; + + *taid_list = t; + return ac_pr_align_move(pr, len); +} + +/** + * acp_load_prop - load property from acp + * + * @pr: pack reader + * @prop: out param to hold the loaded property + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_prop(struct ac_pr_state *pr, struct ac_prop_list **prop) +{ + size_t len; + struct ac_prop_list *p; + + if (!ac_pr_is_safe_to_read(pr, sizeof(*p))) + return -EINVAL; + + p = (struct ac_prop_list *)pr->cur; + if (p->len > AC_MAX_PROPS_LENGTH) + return -EINVAL; + + len = sizeof(*p) + p->len * sizeof(p->data[0]); + + if (!ac_pr_is_safe_to_read(pr, len)) + return -EINVAL; + + *prop = p; + return ac_pr_align_move(pr, len); +} + +/** + * acp_load_ta_pack - load ta pack from acp + * + * @pr: pack reader + * @ta_pack: out param to hold the ta pack + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_ta_pack(struct ac_pr_state *pr, char **ta_pack) +{ + size_t len; + char *t; + + /*8 byte align to obey jeff rule*/ + if (ac_pr_8b_align_move(pr, 0)) + return -EINVAL; + + t = (char *)pr->cur; + + /* + *assume ta pack is the last item of one package, + *move cursor to the end directly + */ + if (pr->cur > pr->head + pr->total) + return -EINVAL; + + len = pr->head + pr->total - pr->cur; + if (!ac_pr_is_safe_to_read(pr, len)) + return -EINVAL; + + *ta_pack = t; + return ac_pr_move(pr, len); +} + +/** + * acp_load_ins_jta_prop_head - load ta manifest header + * + * @pr: pack reader + * @head: out param to hold manifest header + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_ins_jta_prop_head(struct ac_pr_state *pr, + struct ac_ins_jta_prop_header **head) +{ + if (!ac_pr_is_safe_to_read(pr, sizeof(**head))) + return -EINVAL; + + *head = (struct ac_ins_jta_prop_header *)pr->cur; + return ac_pr_align_move(pr, sizeof(**head)); +} + +/** + * acp_load_ins_jta_prop - load ta properties information (ta manifest) + * + * @pr: pack reader + * @pack: out param to hold ta manifest + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_ins_jta_prop(struct ac_pr_state *pr, + struct ac_ins_jta_prop *pack) +{ + int ret; + + ret = acp_load_ins_jta_prop_head(pr, &pack->head); + if (ret) + return ret; + + ret = acp_load_reasons(pr, &pack->post_reasons); + if (ret) + return ret; + + ret = acp_load_reasons(pr, &pack->reg_reasons); + if (ret) + return ret; + + ret = acp_load_prop(pr, &pack->prop); + if (ret) + return ret; + + ret = acp_load_taid_list(pr, &pack->used_service_list); + + return ret; +} + +/** + * acp_load_ins_jta_head - load ta installation header + * + * @pr: pack reader + * @head: out param to hold the installation header + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_ins_jta_head(struct ac_pr_state *pr, + struct ac_ins_ta_header **head) +{ + if (!ac_pr_is_safe_to_read(pr, sizeof(**head))) + return -EINVAL; + + *head = (struct ac_ins_ta_header *)pr->cur; + return ac_pr_align_move(pr, sizeof(**head)); +} + +/** + * acp_load_ins_jta - load ta installation information from acp + * + * @pr: pack reader + * @pack: out param to hold install information + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_ins_jta(struct ac_pr_state *pr, + struct ac_ins_jta_pack *pack) +{ + int ret; + + ret = acp_load_prop(pr, &pack->ins_cond); + if (ret) + return ret; + + ret = acp_load_ins_jta_head(pr, &pack->head); + + return ret; +} + +/** + * acp_load_pack_head - load acp pack header + * + * @pr: pack reader + * @head: out param to hold the acp header + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_pack_head(struct ac_pr_state *pr, + struct ac_pack_header **head) +{ + if (!ac_pr_is_safe_to_read(pr, sizeof(**head))) + return -EINVAL; + + *head = (struct ac_pack_header *)pr->cur; + return ac_pr_align_move(pr, sizeof(**head)); +} + +/** + * acp_load_pack - load and parse pack from acp file + * + * @raw_pack: acp file content, without the acp CSS header + * @size: acp file size (without CSS header) + * @cmd_id: command id + * @pack: out param to hold the loaded pack + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_pack(const char *raw_pack, unsigned int size, + unsigned int cmd_id, struct ac_pack *pack) +{ + int ret; + struct ac_pr_state pr; + struct ac_ins_jta_pack_ext *pack_ext; + struct ac_ins_jta_prop_ext *prop_ext; + + ret = ac_pr_init(&pr, raw_pack, size); + if (ret) + return ret; + + if (cmd_id != AC_INSTALL_JTA_PROP) { + ret = acp_load_pack_head(&pr, &pack->head); + if (ret) + return ret; + } + + if (cmd_id != AC_INSTALL_JTA_PROP && cmd_id != pack->head->cmd_id) + return -EINVAL; + + switch (cmd_id) { + case AC_INSTALL_JTA: + pack_ext = (struct ac_ins_jta_pack_ext *)pack; + ret = acp_load_ins_jta(&pr, &pack_ext->cmd_pack); + if (ret) + break; + ret = acp_load_ta_pack(&pr, &pack_ext->ta_pack); + break; + case AC_INSTALL_JTA_PROP: + prop_ext = (struct ac_ins_jta_prop_ext *)pack; + ret = acp_load_ins_jta_prop(&pr, &prop_ext->cmd_pack); + if (ret) + break; + /* Note: the next section is JEFF file, + * and not ta_pack(JTA_properties+JEFF file), + * but we could reuse the ACP_load_ta_pack() here. + */ + ret = acp_load_ta_pack(&pr, &prop_ext->jeff_pack); + break; + default: + return -EINVAL; + } + + if (!ac_pr_is_end(&pr)) + return -EINVAL; + + return ret; +} + +/** + * acp_pload_ins_jta - load and parse ta pack from acp file + * + * Exported function in acp parser API + * + * @raw_data: acp file content + * @size: acp file size + * @pack: out param to hold the ta pack + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +int acp_pload_ins_jta(const void *raw_data, unsigned int size, + struct ac_ins_jta_pack_ext *pack) +{ + int ret; + + if (!raw_data || size <= AC_CSS_HEADER_LENGTH || !pack) + return -EINVAL; + + ret = acp_load_pack((const char *)raw_data + AC_CSS_HEADER_LENGTH, + size - AC_CSS_HEADER_LENGTH, + AC_INSTALL_JTA, (struct ac_pack *)pack); + + return ret; +} diff --git a/drivers/misc/mei/dal/acp_parser.h b/drivers/misc/mei/dal/acp_parser.h new file mode 100644 index 0000000000000..1de625de54f28 --- /dev/null +++ b/drivers/misc/mei/dal/acp_parser.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* + * Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. + */ +#ifndef _ACP_PARSER_H +#define _ACP_PARSER_H + +#include "acp_format.h" + +/** + * struct ac_ins_jta_pack_ext - parsed ta pack from acp file + * + * @head: acp pack header + * @cmd_pack: ta installation information pack + * @ta_pack: raw ta pack + */ +struct ac_ins_jta_pack_ext { + struct ac_pack_header *head; + struct ac_ins_jta_pack cmd_pack; + char *ta_pack; +} __packed; + +/** + * struct ac_ins_jta_prop_ext - parsed ta properties information + * from acp file + * + * @cmd_pack: ta installation properties pack + * @jeff_pack: ta jeff pack + */ +struct ac_ins_jta_prop_ext { + struct ac_ins_jta_prop cmd_pack; + char *jeff_pack; +} __packed; + +int acp_pload_ins_jta(const void *raw_data, unsigned int size, + struct ac_ins_jta_pack_ext *pack); + +#endif /* _ACP_PARSER_H */ diff --git a/drivers/misc/mei/dal/bh_cmd_defs.h b/drivers/misc/mei/dal/bh_cmd_defs.h new file mode 100644 index 0000000000000..7ced1a26c2a6f --- /dev/null +++ b/drivers/misc/mei/dal/bh_cmd_defs.h @@ -0,0 +1,238 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* + * Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. + */ + +#ifndef __BH_DAL_H_ +#define __BH_DAL_H_ + +#include +#include + +/** + * enum bh_command_id - bh command ids + * + * @BHP_CMD_INIT: init command + * @BHP_CMD_DEINIT: deinit command + * @BHP_CMD_VERIFY_JAVATA: verify ta + * @BHP_CMD_DOWNLOAD_JAVATA: download ta to DAL + * @BHP_CMD_OPEN_JTASESSION: open session to ta + * @BHP_CMD_CLOSE_JTASESSION: close session with ta + * @BHP_CMD_FORCECLOSE_JTASESSION: force close session + * @BHP_CMD_SENDANDRECV: send and receive massages to ta + * @BHP_CMD_SENDANDRECV_INTERNAL: internal send and receive + * @BHP_CMD_RUN_NATIVETA: run native trusted application + * (currently NOT SUPPORTED) + * @BHP_CMD_STOP_NATIVETA: stop running native ta (currently NOT SUPPORTED) + * @BHP_CMD_OPEN_SDSESSION: open security domain session + * @BHP_CMD_CLOSE_SDSESSION: close security domain session + * @BHP_CMD_INSTALL_SD: install new sub security domain + * @BHP_CMD_UNINSTALL_SD: uninstall sub security domain + * @BHP_CMD_INSTALL_JAVATA: install java ta + * @BHP_CMD_UNINSTALL_JAVATA: uninstall java ta + * @BHP_CMD_INSTALL_NATIVETA: install native ta (currently NOT SUPPORTED) + * @BHP_CMD_UNINSTALL_NATIVETA: uninstall native ta (currently NOT SUPPORTED) + * @BHP_CMD_LIST_SD: get list of all security domains + * @BHP_CMD_LIST_TA: get list of all installed trusted applications + * @BHP_CMD_RESET: reset command + * @BHP_CMD_LIST_TA_PROPERTIES: get list of all ta properties (ta manifest) + * @BHP_CMD_QUERY_TA_PROPERTY: query specified ta property + * @BHP_CMD_LIST_JTA_SESSIONS: get list of all opened ta sessions + * @BHP_CMD_LIST_TA_PACKAGES: get list of all ta packages in DAL + * @BHP_CMD_GET_ISD: get Intel security domain uuid + * @BHP_CMD_GET_SD_BY_TA: get security domain id of ta + * @BHP_CMD_LAUNCH_VM: lunch IVM + * @BHP_CMD_CLOSE_VM: close IVM + * @BHP_CMD_QUERY_NATIVETA_STATUS: query specified native ta status + * (currently NOT SUPPORTED) + * @BHP_CMD_QUERY_SD_STATUS: query specified security domain status + * @BHP_CMD_LIST_DOWNLOADED_NTA: get list of all native trusted applications + * (currently NOT SUPPORTED) + * @BHP_CMD_UPDATE_SVL: update security version list + * @BHP_CMD_CHECK_SVL_TA_BLOCKED_STATE: check if ta security version is blocked + * @BHP_CMD_QUERY_TEE_METADATA: get DAL metadata (including api_level, + * library_version, dal_key_hash and more) + * + * @BHP_CMD_MAX: max command id + */ + +enum bh_command_id { + BHP_CMD_INIT = 0, + BHP_CMD_DEINIT, + BHP_CMD_VERIFY_JAVATA, + BHP_CMD_DOWNLOAD_JAVATA, + BHP_CMD_OPEN_JTASESSION, + BHP_CMD_CLOSE_JTASESSION, + BHP_CMD_FORCECLOSE_JTASESSION, + BHP_CMD_SENDANDRECV, + BHP_CMD_SENDANDRECV_INTERNAL, + BHP_CMD_RUN_NATIVETA, + BHP_CMD_STOP_NATIVETA, + BHP_CMD_OPEN_SDSESSION, + BHP_CMD_CLOSE_SDSESSION, + BHP_CMD_INSTALL_SD, + BHP_CMD_UNINSTALL_SD, + BHP_CMD_INSTALL_JAVATA, + BHP_CMD_UNINSTALL_JAVATA, + BHP_CMD_INSTALL_NATIVETA, + BHP_CMD_UNINSTALL_NATIVETA, + BHP_CMD_LIST_SD, + BHP_CMD_LIST_TA, + BHP_CMD_RESET, + BHP_CMD_LIST_TA_PROPERTIES, + BHP_CMD_QUERY_TA_PROPERTY, + BHP_CMD_LIST_JTA_SESSIONS, + BHP_CMD_LIST_TA_PACKAGES, + BHP_CMD_GET_ISD, + BHP_CMD_GET_SD_BY_TA, + BHP_CMD_LAUNCH_VM, + BHP_CMD_CLOSE_VM, + BHP_CMD_QUERY_NATIVETA_STATUS, + BHP_CMD_QUERY_SD_STATUS, + BHP_CMD_LIST_DOWNLOADED_NTA, + BHP_CMD_UPDATE_SVL, + BHP_CMD_CHECK_SVL_TA_BLOCKED_STATE, + BHP_CMD_QUERY_TEE_METADATA, + BHP_CMD_MAX +}; + +#define BH_MSG_RESP_MAGIC 0x55aaa5ff +#define BH_MSG_CMD_MAGIC 0x55aaa3ff + +/** + * struct bh_msg_header - transport header + * + * @magic: BH_MSG_RESP/CMD_MAGIC + * @length: overall message length + */ +struct bh_msg_header { + u32 magic; + u32 length; +}; + +/** + * struct bh_command_header - bh command header + * + * @h: transport header + * @seq: message sequence number + * @id: the command id (enum bh_command_id) + * @pad: padded for 64 bit + * @cmd: command buffer + */ +struct bh_command_header { + struct bh_msg_header h; + u64 seq; + u32 id; + u8 pad[4]; + s8 cmd[0]; +} __packed; + +/** + * struct bh_response_header - response header (from the DAL) + * + * @h: transport header + * @seq: message sequence number + * @ta_session_id: session id (DAL firmware address) + * @code: response code + * @pad: padded for 64 bit + * @data: response buffer + */ +struct bh_response_header { + struct bh_msg_header h; + u64 seq; + u64 ta_session_id; + s32 code; + u8 pad[4]; + s8 data[0]; +} __packed; + +/** + * struct bh_download_jta_cmd - download java trusted application. + * + * @ta_id: trusted application (ta) id + * @ta_blob: trusted application blob + */ +struct bh_download_jta_cmd { + uuid_t ta_id; + s8 ta_blob[0]; +} __packed; + +/** + * struct bh_open_jta_session_cmd - open session to TA command + * + * @ta_id: trusted application (ta) id + * @buffer: session initial parameters (optional) + */ +struct bh_open_jta_session_cmd { + uuid_t ta_id; + s8 buffer[0]; +} __packed; + +/** + * struct bh_close_jta_session_cmd - close session to TA command + * + * @ta_session_id: session id + */ +struct bh_close_jta_session_cmd { + u64 ta_session_id; +} __packed; + +/** + * struct bh_cmd - bh command + * + * @ta_session_id: session id + * @command: command id to ta + * @outlen: length of output buffer + * @buffer: data to send + */ +struct bh_cmd { + u64 ta_session_id; + s32 command; + u32 outlen; + s8 buffer[0]; +} __packed; + +/** + * struct bh_check_svl_ta_blocked_state_cmd - command to check if + * the trusted application security version is blocked + * + * @ta_id: trusted application id + */ +struct bh_check_svl_jta_blocked_state_cmd { + uuid_t ta_id; +} __packed; + +/** + * struct bh_resp - bh response + * + * @response: response code. Originated from java in big endian format + * @buffer: response buffer + */ +struct bh_resp { + __be32 response; + s8 buffer[0]; +} __packed; + +/** + * struct bh_resp_bof - response when output buffer is too small + * + * @response: response code. Originated from java in big endian format + * @request_length: the needed output buffer length + */ +struct bh_resp_bof { + __be32 response; + __be32 request_length; +} __packed; + +/** + * struct bh_resp_list_ta_packages - list of ta packages from DAL + * + * @count: count of ta packages + * @ta_ids: ta packages ids + */ +struct bh_resp_list_ta_packages { + u32 count; + uuid_t ta_ids[0]; +} __packed; + +#endif /* __BH_DAL_H_*/ diff --git a/drivers/misc/mei/dal/bh_errcode.h b/drivers/misc/mei/dal/bh_errcode.h new file mode 100644 index 0000000000000..145dbc3b9ae49 --- /dev/null +++ b/drivers/misc/mei/dal/bh_errcode.h @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. + */ + +#ifndef __BH_ERRCODE_H +#define __BH_ERRCODE_H + +/* + * BH Error codes numbers across Beihai Host and Firmware. + */ + +#define BH_SUCCESS 0x000 + +/* BHP specific error code section */ + +#define BPE_NOT_INIT 0x001 +#define BPE_SERVICE_UNAVAILABLE 0x002 +#define BPE_INTERNAL_ERROR 0x003 +#define BPE_COMMS_ERROR 0x004 +#define BPE_OUT_OF_MEMORY 0x005 +#define BPE_INVALID_PARAMS 0x006 +#define BPE_MESSAGE_TOO_SHORT 0x007 +#define BPE_MESSAGE_ILLEGAL 0x008 +#define BPE_NO_CONNECTION_TO_FIRMWARE 0x009 +#define BPE_NOT_IMPLEMENT 0x00A +#define BPE_OUT_OF_RESOURCE 0x00B +#define BPE_INITIALIZED_ALREADY 0x00C +#define BPE_CONNECT_FAILED 0x00D + +/* General error code section for Beihai on FW: 0x100 */ + +#define BHE_OUT_OF_MEMORY 0x101 +#define BHE_BAD_PARAMETER 0x102 +#define BHE_INSUFFICIENT_BUFFER 0x103 +#define BHE_MUTEX_INIT_FAIL 0x104 +#define BHE_COND_INIT_FAIL 0x105 +#define BHE_WD_TIMEOUT 0x106 +#define BHE_FAILED 0x107 +#define BHE_INVALID_HANDLE 0x108 +#define BHE_IPC_ERR_DEFAULT 0x109 +#define BHE_IPC_ERR_PLATFORM 0x10A +#define BHE_IPC_SRV_INIT_FAIL 0x10B + +/* VM communication error code section: 0x200 */ + +#define BHE_MAILBOX_NOT_FOUND 0x201 +#define BHE_APPLET_CRASHED BHE_MAILBOX_NOT_FOUND +#define BHE_MSG_QUEUE_IS_FULL 0x202 +#define BHE_MAILBOX_DENIED 0x203 + +/* VM InternalAppletCommunication error 0x240 */ + +#define BHE_IAC_INTERNAL_SESSION_NUM_EXCEED 0x241 +#define BHE_IAC_CLIENT_SLOT_FULL 0x242 +#define BHE_IAC_SERVICETA_EXITED 0x243 +#define BHE_IAC_EXIST_INTERNAL_SESSION 0x244 +#define BHE_IAC_SERVICETA_UNCAUGHT_EXCEPTION 0x245 +#define BHE_IAC_SERVICE_SESSION_NOT_FOUND 0x246 +#define BHE_IAC_SERVICE_HOST_SESSION_NUM_EXCEED 0x247 + +/* Firmware thread/mutex error code section: 0x280 */ +#define BHE_THREAD_ERROR 0x281 +#define BHE_THREAD_TIMED_OUT 0x282 + +/* Applet manager error code section: 0x300 */ + +#define BHE_LOAD_JEFF_FAIL 0x303 +#define BHE_PACKAGE_NOT_FOUND 0x304 +#define BHE_EXIST_LIVE_SESSION 0x305 +#define BHE_VM_INSTANCE_INIT_FAIL 0x306 +#define BHE_QUERY_PROP_NOT_SUPPORT 0x307 +#define BHE_INVALID_BPK_FILE 0x308 +#define BHE_PACKAGE_EXIST 0x309 +#define BHE_VM_INSTNACE_NOT_FOUND 0x312 +#define BHE_STARTING_JDWP_FAIL 0x313 +#define BHE_GROUP_CHECK_FAIL 0x314 +#define BHE_SDID_UNMATCH 0x315 +#define BHE_APPPACK_UNINITED 0x316 +#define BHE_SESSION_NUM_EXCEED 0x317 +#define BHE_TA_PACKAGE_HASH_VERIFY_FAIL 0x318 +#define BHE_SWITCH_ISD 0x319 +#define BHE_OPERATION_NOT_PERMITTED 0x31A + +/* VM Applet instance error code section: 0x400 */ +#define BHE_APPLET_GENERIC 0x400 +#define BHE_UNCAUGHT_EXCEPTION 0x401 +/* Bad parameters to applet */ +#define BHE_APPLET_BAD_PARAMETER 0x402 +/* Small response buffer */ +#define BHE_APPLET_SMALL_BUFFER 0x403 +/* Bad state */ +#define BHE_BAD_STATE 0x404 + +/*TODO: Should be removed these UI error code when integrate with ME 9 */ +#define BHE_UI_EXCEPTION 0x501 +#define BHE_UI_ILLEGAL_USE 0x502 +#define BHE_UI_ILLEGAL_PARAMETER 0x503 +#define BHE_UI_NOT_INITIALIZED 0x504 +#define BHE_UI_NOT_SUPPORTED 0x505 +#define BHE_UI_OUT_OF_RESOURCES 0x506 + +/* BeiHai VMInternalError code section: 0x600 */ +#define BHE_UNKNOWN 0x602 +#define BHE_MAGIC_UNMATCH 0x603 +#define BHE_UNIMPLEMENTED 0x604 +#define BHE_INTR 0x605 +#define BHE_CLOSED 0x606 +/* TODO: no used error, should remove*/ +#define BHE_BUFFER_OVERFLOW 0x607 +#define BHE_NOT_SUPPORTED 0x608 +#define BHE_WEAR_OUT_VIOLATION 0x609 +#define BHE_NOT_FOUND 0x610 +#define BHE_INVALID_PARAMS 0x611 +#define BHE_ACCESS_DENIED 0x612 +#define BHE_INVALID 0x614 +#define BHE_TIMEOUT 0x615 + +/* SDM specific error code section: 0x800 */ +#define BHE_SDM_FAILED 0x800 +#define BHE_SDM_NOT_FOUND 0x801 +#define BHE_SDM_ALREADY_EXIST 0x803 +#define BHE_SDM_TATYPE_MISMATCH 0x804 +#define BHE_SDM_TA_NUMBER_LIMIT 0x805 +#define BHE_SDM_SIGNAGURE_VERIFY_FAIL 0x806 +#define BHE_SDM_PERMGROUP_CHECK_FAIL 0x807 +#define BHE_SDM_INSTALL_CONDITION_FAIL 0x808 +#define BHE_SDM_SVN_CHECK_FAIL 0x809 +#define BHE_SDM_TA_DB_NO_FREE_SLOT 0x80A +#define BHE_SDM_SD_DB_NO_FREE_SLOT 0x80B +#define BHE_SDM_SVL_DB_NO_FREE_SLOT 0x80C +#define BHE_SDM_SVL_CHECK_FAIL 0x80D +#define BHE_SDM_DB_READ_FAIL 0x80E +#define BHE_SDM_DB_WRITE_FAIL 0x80F + +/* Launcher specific error code section: 0x900 */ +#define BHE_LAUNCHER_INIT_FAILED 0x901 +#define BHE_SD_NOT_INSTALLED 0x902 +#define BHE_NTA_NOT_INSTALLED 0x903 +#define BHE_PROCESS_SPAWN_FAILED 0x904 +#define BHE_PROCESS_KILL_FAILED 0x905 +#define BHE_PROCESS_ALREADY_RUNNING 0x906 +#define BHE_PROCESS_IN_TERMINATING 0x907 +#define BHE_PROCESS_NOT_EXIST 0x908 +#define BHE_PLATFORM_API_ERR 0x909 +#define BHE_PROCESS_NUM_EXCEED 0x09A + +/* + * BeihaiHAL Layer error code section: + * 0x1000,0x2000 reserved here, defined in CSG BeihaiStatusHAL.h + */ + +#endif /* __BH_ERRCODE_H */ diff --git a/drivers/misc/mei/dal/bh_external.c b/drivers/misc/mei/dal/bh_external.c new file mode 100644 index 0000000000000..0679728266a42 --- /dev/null +++ b/drivers/misc/mei/dal/bh_external.c @@ -0,0 +1,477 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. + */ + +#include +#include +#include +#include + +#include "bh_errcode.h" +#include "bh_external.h" +#include "bh_internal.h" + +/** + * uuid_is_valid_hyphenless - check if uuid is valid in hyphenless format + * + * @uuid_str: uuid string + * + * Return: true when uuid is valid in hyphenless format + * false when uuid is invalid + */ +static bool uuid_is_valid_hyphenless(const char *uuid_str) +{ + unsigned int i; + + /* exclude (i == 8 || i == 13 || i == 18 || i == 23) */ + for (i = 0; i < UUID_STRING_LEN - 4; i++) + if (!isxdigit(uuid_str[i])) + return false; + + return true; +} + +/** + * uuid_normalize_hyphenless - convert uuid from hyphenless format + * to standard format + * + * @uuid_hl: uuid string in hyphenless format + * @uuid_str: output param to hold uuid string in standard format + */ +static void uuid_normalize_hyphenless(const char *uuid_hl, char *uuid_str) +{ + unsigned int i; + + for (i = 0; i < UUID_STRING_LEN; i++) { + if (i == 8 || i == 13 || i == 18 || i == 23) + uuid_str[i] = '-'; + else + uuid_str[i] = *uuid_hl++; + } + uuid_str[i] = '\0'; +} + +/** + * dal_uuid_parse - convert uuid string to binary form + * + * Input uuid is in either hyphenless or standard format + * + * @uuid_str: uuid string + * @uuid: output param to hold uuid bin + * + * Return: 0 on success + * <0 on failure + */ +int dal_uuid_parse(const char *uuid_str, uuid_t *uuid) +{ + char __uuid_str[UUID_STRING_LEN + 1]; + + if (!uuid_str || !uuid) + return -EINVAL; + + if (uuid_is_valid_hyphenless(uuid_str)) { + uuid_normalize_hyphenless(uuid_str, __uuid_str); + uuid_str = __uuid_str; + } + + return uuid_parse(uuid_str, uuid); +} +EXPORT_SYMBOL(dal_uuid_parse); + +/** + * bh_msg_is_response - check if message is response + * + * @msg: message + * @len: message length + * + * Return: true when message is response + * false otherwise + */ +bool bh_msg_is_response(const void *msg, size_t len) +{ + const struct bh_response_header *r = msg; + + return (len >= sizeof(*r) && r->h.magic == BH_MSG_RESP_MAGIC); +} + +/** + * bh_msg_is_cmd - check if message is command + * + * @msg: message + * @len: message length + * + * Return: true when message is command + * false otherwise + */ +bool bh_msg_is_cmd(const void *msg, size_t len) +{ + const struct bh_command_header *c = msg; + + return (len >= sizeof(*c) && c->h.magic == BH_MSG_CMD_MAGIC); +} + +/** + * bh_msg_cmd_hdr - get the command header if message is command + * + * @msg: message + * @len: message length + * + * Return: pointer to the command header when message is command + * NULL otherwise + */ +const struct bh_command_header *bh_msg_cmd_hdr(const void *msg, size_t len) +{ + if (!bh_msg_is_cmd(msg, len)) + return NULL; + + return msg; +} + +/** + * bh_msg_is_cmd_open_session - check if command is open session command + * + * @hdr: message header + * + * Return: true when command is open session command + * false otherwise + */ +bool bh_msg_is_cmd_open_session(const struct bh_command_header *hdr) +{ + return hdr->id == BHP_CMD_OPEN_JTASESSION; +} + +/** + * bh_open_session_ta_id - get ta id from open session command + * + * @hdr: message header + * @count: message size + * + * Return: pointer to ta id when command is valid + * NULL otherwise + */ +const uuid_t *bh_open_session_ta_id(const struct bh_command_header *hdr, + size_t count) +{ + struct bh_open_jta_session_cmd *open_cmd; + + if (count < sizeof(*hdr) + sizeof(*open_cmd)) + return NULL; + + open_cmd = (struct bh_open_jta_session_cmd *)hdr->cmd; + + return &open_cmd->ta_id; +} + +/** + * bh_session_is_killed - check if session is killed + * + * @code: the session return code + * + * Return: true when the session is killed + * false otherwise + */ +static bool bh_session_is_killed(int code) +{ + return (code == BHE_WD_TIMEOUT || + code == BHE_UNCAUGHT_EXCEPTION || + code == BHE_APPLET_CRASHED); +} + +/** + * bh_ta_session_open - open session to ta + * + * This function will block until VM replied the response + * + * @host_id: out param to hold the session host_id + * @ta_id: trusted application (ta) id + * @ta_pkg: ta binary package + * @pkg_len: ta binary package length + * @init_param: init parameters to the session (optional) + * @init_len: length of the init parameters + * + * Return: 0 on success + * <0 on system failure + * >0 on DAL FW failure + */ +int bh_ta_session_open(u64 *host_id, const char *ta_id, + const u8 *ta_pkg, size_t pkg_len, + const u8 *init_param, size_t init_len) +{ + int ret; + uuid_t bin_ta_id; + unsigned int conn_idx; + unsigned int count; + bool found; + uuid_t *ta_ids = NULL; + unsigned int i; + + if (!ta_id || !host_id) + return -EINVAL; + + if (!ta_pkg || !pkg_len) + return -EINVAL; + + if (!init_param && init_len != 0) + return -EINVAL; + + if (dal_uuid_parse(ta_id, &bin_ta_id)) + return -EINVAL; + + *host_id = 0; + + ret = bh_proxy_check_svl_jta_blocked_state(&bin_ta_id); + if (ret) + return ret; + + /* 1: vm conn_idx is IVM dal FW client */ + conn_idx = CONN_IDX_IVM; + + /* 2.1: check whether the ta pkg existed in VM or not */ + count = 0; + ret = bh_proxy_list_jta_packages(conn_idx, &count, &ta_ids); + if (ret) + return ret; + + found = false; + for (i = 0; i < count; i++) { + if (uuid_equal(&bin_ta_id, &ta_ids[i])) { + found = true; + break; + } + } + kfree(ta_ids); + + /* 2.2: download ta pkg if not already present. */ + if (!found) { + ret = bh_proxy_dnload_jta(conn_idx, &bin_ta_id, + ta_pkg, pkg_len); + if (ret && ret != BHE_PACKAGE_EXIST) + return ret; + } + + /* 3: send open session command to VM */ + ret = bh_proxy_open_jta_session(conn_idx, &bin_ta_id, + init_param, init_len, + host_id, ta_pkg, pkg_len); + return ret; +} + +/** + * bh_ta_session_command - send and receive data to/from ta + * + * This function will block until VM replied the response + * + * @host_id: session host id + * @command_id: command id + * @input: message to be sent + * @length: sent message size + * @output: output param to hold pointer to the buffer which + * will contain received message. + * This buffer is allocated by Beihai and freed by the user. + * @output_length: input and output param - + * - input: the expected maximum length of the received message + * - output: size of the received message + * @response_code: An optional output param to hold the return value + * from the applet. Can be NULL. + * + * Return: 0 on success + * < 0 on system failure + * > 0 on DAL FW failure + */ +int bh_ta_session_command(u64 host_id, int command_id, + const void *input, size_t length, + void **output, size_t *output_length, + int *response_code) +{ + int ret; + struct bh_command_header *h; + struct bh_cmd *cmd; + char cmdbuf[CMD_BUF_SIZE(*cmd)]; + struct bh_response_header *resp_hdr; + unsigned int resp_len; + struct bh_session_record *session; + struct bh_resp *resp; + unsigned int conn_idx = CONN_IDX_IVM; + unsigned int len; + + memset(cmdbuf, 0, sizeof(cmdbuf)); + resp_hdr = NULL; + + if (!bh_is_initialized()) + return -EFAULT; + + if (!input && length != 0) + return -EINVAL; + + if (!output_length) + return -EINVAL; + + if (output) + *output = NULL; + + session = bh_session_find(conn_idx, host_id); + if (!session) + return -EINVAL; + + h = (struct bh_command_header *)cmdbuf; + cmd = (struct bh_cmd *)h->cmd; + h->id = BHP_CMD_SENDANDRECV; + cmd->ta_session_id = session->ta_session_id; + cmd->command = command_id; + cmd->outlen = *output_length; + + ret = bh_request(conn_idx, h, CMD_BUF_SIZE(*cmd), input, length, + host_id, (void **)&resp_hdr); + if (!resp_hdr) + return ret ? ret : -EFAULT; + + if (!ret) + ret = resp_hdr->code; + + session->ta_session_id = resp_hdr->ta_session_id; + resp_len = resp_hdr->h.length - sizeof(*resp_hdr); + + if (ret == BHE_APPLET_SMALL_BUFFER && + resp_len == sizeof(struct bh_resp_bof)) { + struct bh_resp_bof *bof = + (struct bh_resp_bof *)resp_hdr->data; + + if (response_code) + *response_code = be32_to_cpu(bof->response); + + *output_length = be32_to_cpu(bof->request_length); + } + + if (ret) + goto out; + + if (resp_len < sizeof(struct bh_resp)) { + ret = -EBADMSG; + goto out; + } + + resp = (struct bh_resp *)resp_hdr->data; + + if (response_code) + *response_code = be32_to_cpu(resp->response); + + len = resp_len - sizeof(*resp); + + if (*output_length < len) { + ret = -EMSGSIZE; + goto out; + } + + if (len && output) { + *output = kmemdup(resp->buffer, len, GFP_KERNEL); + if (!*output) { + ret = -ENOMEM; + goto out; + } + } + + *output_length = len; + +out: + if (bh_session_is_killed(resp_hdr->code)) + bh_session_remove(conn_idx, session->host_id); + + kfree(resp_hdr); + + return ret; +} + +/** + * bh_ta_session_close - close ta session + * + * This function will block until VM replied the response + * + * @host_id: session host id + * + * Return: 0 on success + * <0 on system failure + * >0 on DAL FW failure + */ +int bh_ta_session_close(u64 host_id) +{ + int ret; + char cmdbuf[CMD_BUF_SIZE(struct bh_close_jta_session_cmd)]; + struct bh_response_header *resp_hdr; + struct bh_session_record *session; + unsigned int conn_idx = CONN_IDX_IVM; + + memset(cmdbuf, 0, sizeof(cmdbuf)); + resp_hdr = NULL; + + session = bh_session_find(conn_idx, host_id); + if (!session) + return -EINVAL; + + bh_prep_session_close_cmd(cmdbuf, session->ta_session_id); + + ret = bh_request(conn_idx, cmdbuf, sizeof(cmdbuf), NULL, 0, host_id, + (void **)&resp_hdr); + + if (!ret) + ret = resp_hdr->code; + + kfree(resp_hdr); + /* + * An internal session exists, so we should not close the session. + * It means that host app should call this API at appropriate time. + */ + if (ret != BHE_IAC_EXIST_INTERNAL_SESSION) + bh_session_remove(conn_idx, host_id); + + return ret; +} + +/** + * bh_filter_hdr - filter the sent message + * + * Allow to send valid messages only. + * The filtering is done using given filter functions table + * + * @hdr: message header + * @count: message size + * @ctx: context to send to the filter functions + * @tbl: filter functions table + * + * Return: 0 when message is valid + * <0 on otherwise + */ +int bh_filter_hdr(const struct bh_command_header *hdr, size_t count, void *ctx, + const bh_filter_func tbl[]) +{ + int i; + int ret; + + for (i = 0; tbl[i]; i++) { + ret = tbl[i](hdr, count, ctx); + if (ret < 0) + return ret; + } + return 0; +} + +/** + * bh_prep_access_denied_response - prepare package with 'access denied' + * response code. + * + * This function is used to send in band error to user who trying to send + * message when he lacks the needed permissions + * + * @cmd: the invalid command message + * @res: out param to hold the response header + */ +void bh_prep_access_denied_response(const char *cmd, + struct bh_response_header *res) +{ + struct bh_command_header *cmd_hdr = (struct bh_command_header *)cmd; + + res->h.magic = BH_MSG_RESP_MAGIC; + res->h.length = sizeof(*res); + res->code = BHE_OPERATION_NOT_PERMITTED; + res->seq = cmd_hdr->seq; +} diff --git a/drivers/misc/mei/dal/bh_external.h b/drivers/misc/mei/dal/bh_external.h new file mode 100644 index 0000000000000..68b3387a09ca0 --- /dev/null +++ b/drivers/misc/mei/dal/bh_external.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. + */ + +#ifndef __BH_EXTERNAL_H +#define __BH_EXTERNAL_H + +#include +#include "bh_cmd_defs.h" + +# define MSG_SEQ_START_NUMBER BIT_ULL(32) + +bool bh_is_initialized(void); +void bh_init_internal(void); +void bh_deinit_internal(void); + +int bh_ta_session_open(u64 *host_id, const char *ta_id, const u8 *ta_pkg, + size_t pkg_len, const u8 *init_param, size_t init_len); + +int bh_ta_session_close(u64 host_id); + +int bh_ta_session_command(u64 host_id, int command_id, const void *input, + size_t length, void **output, size_t *output_length, + int *response_code); + +const struct bh_command_header *bh_msg_cmd_hdr(const void *msg, size_t len); + +typedef int (*bh_filter_func)(const struct bh_command_header *hdr, + size_t count, void *ctx); + +int bh_filter_hdr(const struct bh_command_header *hdr, size_t count, void *ctx, + const bh_filter_func tbl[]); + +bool bh_msg_is_cmd_open_session(const struct bh_command_header *hdr); + +const uuid_t *bh_open_session_ta_id(const struct bh_command_header *hdr, + size_t count); + +void bh_prep_access_denied_response(const char *cmd, + struct bh_response_header *res); + +bool bh_msg_is_cmd(const void *msg, size_t len); +bool bh_msg_is_response(const void *msg, size_t len); + +#endif /* __BH_EXTERNAL_H */ diff --git a/drivers/misc/mei/dal/bh_internal.c b/drivers/misc/mei/dal/bh_internal.c new file mode 100644 index 0000000000000..a7c280989db38 --- /dev/null +++ b/drivers/misc/mei/dal/bh_internal.c @@ -0,0 +1,906 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ + +#include +#include +#include +#include "bh_errcode.h" +#include "bh_external.h" +#include "bh_internal.h" +#include "dal_dev.h" + +/* BH initialization state */ +static atomic_t bh_state = ATOMIC_INIT(0); +static u64 bh_host_id_number = MSG_SEQ_START_NUMBER; + +/** + * struct bh_request_cmd - bh request command + * + * @link: link in the request list of bh service + * @cmd: command header and data + * @cmd_len: command buffer length + * @conn_idx: connection index + * @host_id: session host id + * @response: response buffer + * @complete: request completion + * @refcnt: reference counter + * @ret: return value of the request + */ +struct bh_request_cmd { + struct list_head link; + u8 *cmd; + unsigned int cmd_len; + unsigned int conn_idx; + u64 host_id; + void *response; + struct completion complete; + struct kref refcnt; + int ret; +}; + +struct bh_service { + struct work_struct work; + struct mutex request_lock; /* request lock */ + struct list_head request_list; +}; + +static struct bh_service bh_srvc; + +/* + * dal device session records list (array of list per dal device) + * represents opened sessions to dal fw client + */ +static struct list_head dal_dev_session_list[MAX_CONNECTIONS]; + +/** + * bh_get_msg_host_id - increase the shared variable bh_host_id_number by 1 + * and wrap around if needed + * + * Return: the updated host id number + */ +u64 bh_get_msg_host_id(void) +{ + bh_host_id_number++; + /* wrap around. sequence_number must + * not be 0, as required by Firmware VM + */ + if (bh_host_id_number == 0) + bh_host_id_number = MSG_SEQ_START_NUMBER; + + return bh_host_id_number; +} + +/** + * bh_session_find - find session record by handle + * + * @conn_idx: DAL client connection idx + * @host_id: session host id + * + * Return: pointer to bh_session_record if found + * NULL if the session wasn't found + */ +struct bh_session_record *bh_session_find(unsigned int conn_idx, u64 host_id) +{ + struct bh_session_record *pos; + struct list_head *session_list = &dal_dev_session_list[conn_idx]; + + list_for_each_entry(pos, session_list, link) { + if (pos->host_id == host_id) + return pos; + } + + return NULL; +} + +/** + * bh_session_add - add session record to list + * + * @conn_idx: fw client connection idx + * @session: session record + */ +void bh_session_add(unsigned int conn_idx, struct bh_session_record *session) +{ + list_add_tail(&session->link, &dal_dev_session_list[conn_idx]); +} + +/** + * bh_session_remove - remove session record from list, ad release its memory + * + * @conn_idx: fw client connection idx + * @host_id: session host id + */ +void bh_session_remove(unsigned int conn_idx, u64 host_id) +{ + struct bh_session_record *session; + + session = bh_session_find(conn_idx, host_id); + + if (session) { + list_del(&session->link); + kfree(session); + } +} + +static void bh_request_free(struct bh_request_cmd *request) +{ + if (!request) + return; + kfree(request->cmd); + kfree(request->response); + kfree(request); + request = NULL; +} + +static struct bh_request_cmd *bh_request_alloc(const void *hdr, + size_t hdr_len, + const void *data, + size_t data_len, + unsigned int conn_idx, + u64 host_id) +{ + struct bh_request_cmd *request; + size_t buf_len; + + if (!hdr || hdr_len < sizeof(struct bh_command_header)) + return ERR_PTR(-EINVAL); + + if (!data && data_len) + return ERR_PTR(-EINVAL); + + if (check_add_overflow(hdr_len, data_len, &buf_len)) + return ERR_PTR(-EOVERFLOW); + + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (!request) + return ERR_PTR(-ENOMEM); + + request->cmd = kmalloc(buf_len, GFP_KERNEL); + if (!request->cmd) { + kfree(request); + return ERR_PTR(-ENOMEM); + } + + memcpy(request->cmd, hdr, hdr_len); + request->cmd_len = hdr_len; + + if (data_len) { + memcpy(request->cmd + hdr_len, data, data_len); + request->cmd_len += data_len; + } + + request->conn_idx = conn_idx; + request->host_id = host_id; + + kref_init(&request->refcnt); + + return request; +} + +static char skip_buffer[DAL_MAX_BUFFER_SIZE] = {0}; +/** + * bh_transport_recv - receive message from DAL FW. + * + * @conn_idx: fw client connection idx + * @buffer: output buffer to hold the received message + * @size: output buffer size + * + * Return: 0 on success + * < 0 on failure + */ +static int bh_transport_recv(unsigned int conn_idx, void *buffer, size_t size) +{ + size_t got; + unsigned int count; + char *buf = buffer; + int ret; + + if (conn_idx > DAL_MEI_DEVICE_MAX) + return -ENODEV; + + for (count = 0; count < size; count += got) { + got = min_t(size_t, size - count, DAL_MAX_BUFFER_SIZE); + if (buf) + ret = dal_kdi_recv(conn_idx, buf + count, &got); + else + ret = dal_kdi_recv(conn_idx, skip_buffer, &got); + + if (!got) + return -EFAULT; + + if (ret) + return ret; + } + + if (count != size) + return -EFAULT; + + return 0; +} + +/** + * bh_recv_message_try - try to receive and prosses message from DAL + * + * @conn_idx: fw client connection idx + * @response: output param to hold the response + * @out_host_id: output param to hold the received message host id + * it should be identical to the sent message host id + * + * Return: 0 on success + * <0 on failure + */ +static int bh_recv_message_try(unsigned int conn_idx, void **response, + u64 *out_host_id) +{ + int ret; + char *data; + struct bh_response_header hdr; + + if (!response) + return -EINVAL; + + *response = NULL; + + memset(&hdr, 0, sizeof(hdr)); + ret = bh_transport_recv(conn_idx, &hdr, sizeof(hdr)); + if (ret) + return ret; + + if (hdr.h.length < sizeof(hdr)) + return -EBADMSG; + + /* check magic */ + if (hdr.h.magic != BH_MSG_RESP_MAGIC) + return -EBADMSG; + + data = kzalloc(hdr.h.length, GFP_KERNEL); + if (!data) + return -ENOMEM; + + memcpy(data, &hdr, sizeof(hdr)); + + /* message contains hdr only */ + if (hdr.h.length == sizeof(hdr)) + goto out; + + ret = bh_transport_recv(conn_idx, data + sizeof(hdr), + hdr.h.length - sizeof(hdr)); +out: + if (out_host_id) + *out_host_id = hdr.seq; + + *response = data; + + return ret; +} + +#define MAX_RETRY_COUNT 3 +static int bh_recv_message(struct bh_request_cmd *request) +{ + u32 retry; + u64 res_host_id; + void *resp; + int ret; + + for (resp = NULL, retry = 0; retry < MAX_RETRY_COUNT; retry++) { + kfree(resp); + resp = NULL; + + res_host_id = 0; + ret = bh_recv_message_try(request->conn_idx, + &resp, &res_host_id); + if (ret) { + pr_debug("failed to recv msg = %d\n", ret); + continue; + } + + if (res_host_id != request->host_id) { + pr_debug("recv message with host_id=%llu != sent host_id=%llu\n", + res_host_id, request->host_id); + continue; + } + + pr_debug("recv message with try=%d host_id=%llu\n", + retry, request->host_id); + break; + } + + if (retry == MAX_RETRY_COUNT) { + pr_err("out of retry attempts\n"); + ret = -EFAULT; + } + + if (ret) { + kfree(resp); + resp = NULL; + } + + request->response = resp; + return ret; +} + +/** + * bh_transport_send - send message to the DAL FW. + * + * @conn_idx: fw client connection idx + * @buffer: message to send + * @size: message size + * @host_id: message host id + * + * Return: 0 on success + * <0 on failure + */ +static int bh_transport_send(unsigned int conn_idx, const void *buffer, + unsigned int size, u64 host_id) +{ + size_t chunk_sz = DAL_MAX_BUFFER_SIZE; + size_t count; + int ret; + + for (ret = 0, count = 0; count < size && !ret; count += chunk_sz) { + chunk_sz = min_t(size_t, size - count, DAL_MAX_BUFFER_SIZE); + ret = dal_kdi_send(conn_idx, buffer + count, chunk_sz, host_id); + } + + return ret; +} + +/** + * bh_send_message - build and send command message to DAL FW. + * + * @request: all request details + * + * Return: 0 on success + * < 0 on failure + */ +static int bh_send_message(const struct bh_request_cmd *request) +{ + struct bh_command_header *h; + + if (!request) + return -EINVAL; + + if (request->cmd_len < sizeof(*h) || !request->cmd) + return -EINVAL; + + if (request->conn_idx > DAL_MEI_DEVICE_MAX) + return -ENODEV; + + h = (struct bh_command_header *)request->cmd; + h->h.magic = BH_MSG_CMD_MAGIC; + h->h.length = request->cmd_len; + h->seq = request->host_id; + + return bh_transport_send(request->conn_idx, + request->cmd, request->cmd_len, + request->host_id); +} + +void bh_prep_session_close_cmd(void *cmdbuf, u64 ta_session_id) +{ + struct bh_command_header *h = cmdbuf; + struct bh_close_jta_session_cmd *cmd; + + cmd = (struct bh_close_jta_session_cmd *)h->cmd; + h->id = BHP_CMD_CLOSE_JTASESSION; + cmd->ta_session_id = ta_session_id; +} + +static int bh_send_recv_message(struct bh_request_cmd *request) +{ + int ret; + + ret = bh_send_message(request); + if (ret) + return ret; + + return bh_recv_message(request); +} + +static void bh_kref_release(struct kref *ref) +{ + struct bh_request_cmd *request = + container_of(ref, struct bh_request_cmd, refcnt); + + bh_request_free(request); +} + +/** + * bh_kref_release_worker() - release bh_request from a background worker + * @ref: reference counter of the bh_request object + */ +static void bh_kref_release_worker(struct kref *ref) +{ + struct bh_response_header *resp_hdr; + struct bh_command_header *h; + struct bh_request_cmd *request = + container_of(ref, struct bh_request_cmd, refcnt); + + /* no one waits for the response - clean up is needed */ + pr_debug("no waiter - clean up is needed\n"); + + if (!request->cmd_len || !request->cmd || !request->response) + goto out; + + resp_hdr = (struct bh_response_header *)request->response; + /* + * if the command was open_session and + * it was succeeded then close the session + */ + if (request->ret || resp_hdr->code) + goto out; + + h = (struct bh_command_header *)request->cmd; + if (bh_msg_is_cmd_open_session(h)) { + char cmdbuf[CMD_BUF_SIZE(struct bh_close_jta_session_cmd)]; + struct bh_request_cmd *close_req; + u64 host_id = request->host_id; + + bh_prep_session_close_cmd(cmdbuf, resp_hdr->ta_session_id); + close_req = bh_request_alloc(cmdbuf, sizeof(cmdbuf), NULL, 0, + CONN_IDX_IVM, host_id); + if (IS_ERR(close_req)) + goto out; + + bh_send_recv_message(close_req); + bh_request_free(close_req); + } +out: + bh_request_free(request); +} + +static void bh_request_work(struct work_struct *work) +{ + struct bh_service *bh_srv; + struct bh_request_cmd *request, *next; + int ret; + + bh_srv = container_of(work, struct bh_service, work); + + mutex_lock(&bh_srv->request_lock); + list_for_each_entry_safe(request, next, &bh_srv->request_list, link) { + list_del_init(&request->link); + if (!request->cmd_len || !request->cmd) + goto out_free; + + ret = bh_send_recv_message(request); + request->ret = ret; + + complete(&request->complete); +out_free: + kref_put(&request->refcnt, bh_kref_release_worker); + } + + mutex_unlock(&bh_srv->request_lock); +} + +/** + * bh_request - send request to DAL FW and receive response back + * + * @conn_idx: fw client connection idx + * @cmd_hdr: command header + * @cmd_hdr_len: command header length + * @cmd_data: command data (message content) + * @cmd_data_len: data length + * @host_id: message host id + * @response: output param to hold the response + * + * Return: 0 on success + * <0 on failure + */ +int bh_request(unsigned int conn_idx, void *cmd_hdr, unsigned int cmd_hdr_len, + const void *cmd_data, unsigned int cmd_data_len, + u64 host_id, void **response) +{ + int ret; + struct bh_request_cmd *request; + + mutex_lock(&bh_srvc.request_lock); + request = bh_request_alloc(cmd_hdr, cmd_hdr_len, cmd_data, cmd_data_len, + conn_idx, host_id); + if (IS_ERR(request)) { + mutex_unlock(&bh_srvc.request_lock); + return PTR_ERR(request); + } + + init_completion(&request->complete); + kref_get(&request->refcnt); + list_add_tail(&request->link, &bh_srvc.request_list); + mutex_unlock(&bh_srvc.request_lock); + + /* + * Call kref_get before scheduling the worker thread, + * to avoid race condition + */ + + schedule_work(&bh_srvc.work); + ret = wait_for_completion_interruptible(&request->complete); + /* + * If wait was interrupted than decrease refcnt + * The worker thread will release the memory + */ + if (ret) { + kref_put(&request->refcnt, bh_kref_release); + return ret; + } + + mutex_lock(&bh_srvc.request_lock); + + /* detach response buffer */ + *response = request->response; + request->response = NULL; + + ret = request->ret; + + kref_put(&request->refcnt, bh_kref_release); + + mutex_unlock(&bh_srvc.request_lock); + + return ret; +} + +/** + * bh_ession_list_free - free session list of given dal fw client + * + * @conn_idx: fw client connection idx + */ +static void bh_session_list_free(unsigned int conn_idx) +{ + struct bh_session_record *pos, *next; + struct list_head *session_list = &dal_dev_session_list[conn_idx]; + + list_for_each_entry_safe(pos, next, session_list, link) { + list_del(&pos->link); + kfree(pos); + } + + INIT_LIST_HEAD(session_list); +} + +/** + * bh_session_list_init - initialize session list of given dal fw client + * + * @conn_idx: fw client connection idx + */ +static void bh_session_list_init(unsigned int conn_idx) +{ + INIT_LIST_HEAD(&dal_dev_session_list[conn_idx]); +} + +/** + * bh_proxy_check_svl_jta_blocked_state - check if ta security version + * is blocked + * + * When installing a ta, a minimum security version is given, + * so DAL will block installation of this ta from lower version. + * (even after the ta will be uninstalled) + * + * @ta_id: trusted application (ta) id + * + * Return: 0 when ta security version isn't blocked + * <0 on system failure + * >0 on DAL FW failure + */ +int bh_proxy_check_svl_jta_blocked_state(uuid_t *ta_id) +{ + int ret; + struct bh_command_header *h; + struct bh_check_svl_jta_blocked_state_cmd *cmd; + char cmdbuf[CMD_BUF_SIZE(*cmd)]; + struct bh_response_header *resp_hdr; + u64 host_id; + + if (!ta_id) + return -EINVAL; + + memset(cmdbuf, 0, sizeof(cmdbuf)); + resp_hdr = NULL; + + h = (struct bh_command_header *)cmdbuf; + cmd = (struct bh_check_svl_jta_blocked_state_cmd *)h->cmd; + h->id = BHP_CMD_CHECK_SVL_TA_BLOCKED_STATE; + cmd->ta_id = *ta_id; + + host_id = bh_get_msg_host_id(); + ret = bh_request(CONN_IDX_SDM, h, CMD_BUF_SIZE(*cmd), NULL, 0, + host_id, (void **)&resp_hdr); + + if (!ret) + ret = resp_hdr->code; + + kfree(resp_hdr); + + return ret; +} + +/** + * bh_proxy_list_jta_packages - get list of ta packages in DAL + * + * @conn_idx: fw client connection idx + * @count: out param to hold the count of ta packages in DAL + * @ta_ids: out param to hold pointer to the ids of ta packages in DAL + * The buffer which holds the ids is allocated in this function + * and freed by the caller + * + * Return: 0 when ta security version isn't blocked + * <0 on system failure + * >0 on DAL FW failure + */ +int bh_proxy_list_jta_packages(unsigned int conn_idx, unsigned int *count, + uuid_t **ta_ids) +{ + int ret; + struct bh_command_header h; + struct bh_response_header *resp_hdr; + unsigned int resp_len; + struct bh_resp_list_ta_packages *resp; + uuid_t *outbuf; + unsigned int i; + u64 host_id; + + memset(&h, 0, sizeof(h)); + resp_hdr = NULL; + + if (!bh_is_initialized()) + return -EFAULT; + + if (!count || !ta_ids) + return -EINVAL; + + *ta_ids = NULL; + *count = 0; + + h.id = BHP_CMD_LIST_TA_PACKAGES; + + host_id = bh_get_msg_host_id(); + ret = bh_request(conn_idx, &h, sizeof(h), NULL, 0, host_id, + (void **)&resp_hdr); + + if (!ret) + ret = resp_hdr->code; + if (ret) + goto out; + + resp_len = resp_hdr->h.length - sizeof(*resp_hdr); + if (resp_len < sizeof(*resp)) { + ret = -EBADMSG; + goto out; + } + + resp = (struct bh_resp_list_ta_packages *)resp_hdr->data; + if (!resp->count) { + /* return success, there are no ta packages loaded in DAL FW */ + ret = 0; + goto out; + } + + if (resp_len != sizeof(uuid_t) * resp->count + sizeof(*resp)) { + ret = -EBADMSG; + goto out; + } + + outbuf = kcalloc(resp->count, sizeof(uuid_t), GFP_KERNEL); + + if (!outbuf) { + ret = -ENOMEM; + goto out; + } + + for (i = 0; i < resp->count; i++) + outbuf[i] = resp->ta_ids[i]; + + *ta_ids = outbuf; + *count = resp->count; + +out: + kfree(resp_hdr); + return ret; +} + +/** + * bh_proxy_dnload_jta - download ta package to DAL + * + * @conn_idx: fw client connection idx + * @ta_id: trusted application (ta) id + * @ta_pkg: ta binary package + * @pkg_len: ta binary package length + * + * Return: 0 on success + * <0 on system failure + * >0 on DAL FW failure + */ +int bh_proxy_dnload_jta(unsigned int conn_idx, uuid_t *ta_id, + const char *ta_pkg, unsigned int pkg_len) +{ + struct bh_command_header *h; + struct bh_download_jta_cmd *cmd; + char cmdbuf[CMD_BUF_SIZE(*cmd)]; + struct bh_response_header *resp_hdr; + u64 host_id; + int ret; + + if (!ta_pkg || !pkg_len || !ta_id) + return -EINVAL; + + memset(cmdbuf, 0, sizeof(cmdbuf)); + resp_hdr = NULL; + + h = (struct bh_command_header *)cmdbuf; + cmd = (struct bh_download_jta_cmd *)h->cmd; + h->id = BHP_CMD_DOWNLOAD_JAVATA; + cmd->ta_id = *ta_id; + + host_id = bh_get_msg_host_id(); + ret = bh_request(conn_idx, h, CMD_BUF_SIZE(*cmd), ta_pkg, pkg_len, + host_id, (void **)&resp_hdr); + + if (!ret) + ret = resp_hdr->code; + + kfree(resp_hdr); + + return ret; +} + +/** + * bh_proxy_open_jta_session - send open session command + * + * @conn_idx: fw client connection idx + * @ta_id: trusted application (ta) id + * @init_buffer: init parameters to the session (optional) + * @init_len: length of the init parameters + * @host_id: out param to hold the session host id + * @ta_pkg: ta binary package + * @pkg_len: ta binary package length + * + * Return: 0 on success + * <0 on system failure + * >0 on DAL FW failure + */ +int bh_proxy_open_jta_session(unsigned int conn_idx, + uuid_t *ta_id, + const char *init_buffer, + unsigned int init_len, + u64 *host_id, + const char *ta_pkg, + unsigned int pkg_len) +{ + int ret; + struct bh_command_header *h; + struct bh_open_jta_session_cmd *cmd; + char cmdbuf[CMD_BUF_SIZE(*cmd)]; + struct bh_response_header *resp_hdr; + struct bh_session_record *session; + + if (!host_id || !ta_id) + return -EINVAL; + + if (!init_buffer && init_len > 0) + return -EINVAL; + + memset(cmdbuf, 0, sizeof(cmdbuf)); + resp_hdr = NULL; + + h = (struct bh_command_header *)cmdbuf; + cmd = (struct bh_open_jta_session_cmd *)h->cmd; + + session = kzalloc(sizeof(*session), GFP_KERNEL); + if (!session) + return -ENOMEM; + + session->host_id = bh_get_msg_host_id(); + bh_session_add(conn_idx, session); + + h->id = BHP_CMD_OPEN_JTASESSION; + cmd->ta_id = *ta_id; + + ret = bh_request(conn_idx, h, CMD_BUF_SIZE(*cmd), init_buffer, + init_len, session->host_id, (void **)&resp_hdr); + + if (!ret && resp_hdr) + ret = resp_hdr->code; + + if (ret == BHE_PACKAGE_NOT_FOUND) { + /* + * VM might delete the TA pkg when no live session. + * Download the TA pkg and open session again + */ + ret = bh_proxy_dnload_jta(conn_idx, ta_id, ta_pkg, pkg_len); + if (ret) + goto out; + + kfree(resp_hdr); + resp_hdr = NULL; + ret = bh_request(conn_idx, h, CMD_BUF_SIZE(*cmd), init_buffer, + init_len, session->host_id, + (void **)&resp_hdr); + + if (!ret && resp_hdr) + ret = resp_hdr->code; + } + + if (resp_hdr) + session->ta_session_id = resp_hdr->ta_session_id; + *host_id = session->host_id; + +out: + if (ret) + bh_session_remove(conn_idx, session->host_id); + + kfree(resp_hdr); + + return ret; +} + +/** + * bh_request_list_free - free request list of bh_service + * + * @request_list: request list + */ +static void bh_request_list_free(struct list_head *request_list) +{ + struct bh_request_cmd *pos, *next; + + list_for_each_entry_safe(pos, next, request_list, link) { + list_del(&pos->link); + bh_request_free(pos); + } + + INIT_LIST_HEAD(request_list); +} + +/** + * bh_is_initialized - check if bh is initialized + * + * Return: true when bh is initialized and false otherwise + */ +bool bh_is_initialized(void) +{ + return atomic_read(&bh_state) == 1; +} + +/** + * bh_init_internal - BH initialization function + * + * The BH initialization creates the session lists for all + * dal devices (dal fw clients) + * + * Return: 0 + */ +void bh_init_internal(void) +{ + unsigned int i; + + if (!atomic_add_unless(&bh_state, 1, 1)) + return; + + for (i = CONN_IDX_START; i < MAX_CONNECTIONS; i++) + bh_session_list_init(i); + + INIT_LIST_HEAD(&bh_srvc.request_list); + mutex_init(&bh_srvc.request_lock); + INIT_WORK(&bh_srvc.work, bh_request_work); +} + +/** + * bh_deinit_internal - BH deinit function + * + * The deinitialization frees the session lists of all + * dal devices (dal fw clients) + */ +void bh_deinit_internal(void) +{ + unsigned int i; + + if (!atomic_add_unless(&bh_state, -1, 0)) + return; + + for (i = CONN_IDX_START; i < MAX_CONNECTIONS; i++) + bh_session_list_free(i); + + cancel_work_sync(&bh_srvc.work); + bh_request_list_free(&bh_srvc.request_list); +} diff --git a/drivers/misc/mei/dal/bh_internal.h b/drivers/misc/mei/dal/bh_internal.h new file mode 100644 index 0000000000000..e50065b647e09 --- /dev/null +++ b/drivers/misc/mei/dal/bh_internal.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. + */ + +#ifndef __BH_INTERNAL_H +#define __BH_INTERNAL_H + +#include +#include +#include +#include + +#include "bh_cmd_defs.h" + +/** + * struct bh_session_record - session record + * + * @link: link in dal_dev_session_list of dal fw client + * @host_id: message/session host id + * @ta_session_id: session id + */ +struct bh_session_record { + struct list_head link; + u64 host_id; + u64 ta_session_id; +}; + +/* command buffer size */ +#define CMD_BUF_SIZE(cmd) (sizeof(struct bh_command_header) + sizeof(cmd)) + +/** + * enum bh_connection_index - connection index to dal fw clients + * + * @CONN_IDX_START: start idx + * + * @CONN_IDX_IVM: Intel/Issuer Virtual Machine + * @CONN_IDX_SDM: Security Domain Manager + * @CONN_IDX_LAUNCHER: Run Time Manager (Launcher) + * + * @MAX_CONNECTIONS: max connection idx + */ +enum bh_connection_index { + CONN_IDX_START = 0, + + CONN_IDX_IVM = 0, + CONN_IDX_SDM = 1, + CONN_IDX_LAUNCHER = 2, + + MAX_CONNECTIONS +}; + +u64 bh_get_msg_host_id(void); + +struct bh_session_record *bh_session_find(unsigned int conn_idx, u64 host_id); +void bh_session_add(unsigned int conn_idx, struct bh_session_record *session); +void bh_session_remove(unsigned int conn_idx, u64 host_id); + +int bh_request(unsigned int conn_idx, + void *hdr, unsigned int hdr_len, + const void *data, unsigned int data_len, + u64 host_id, void **response); + +int bh_proxy_check_svl_jta_blocked_state(uuid_t *ta_id); + +int bh_proxy_list_jta_packages(unsigned int conn_idx, + unsigned int *count, uuid_t **ta_ids); + +int bh_proxy_dnload_jta(unsigned int conn_idx, uuid_t *ta_id, + const char *ta_pkg, unsigned int pkg_len); + +int bh_proxy_open_jta_session(unsigned int conn_idx, uuid_t *ta_id, + const char *init_buffer, unsigned int init_len, + u64 *host_id, const char *ta_pkg, + unsigned int pkg_len); + +void bh_prep_session_close_cmd(void *cmdbuf, u64 ta_session_id); +#endif /* __BH_INTERNAL_H */ diff --git a/drivers/misc/mei/dal/dal_cdev.c b/drivers/misc/mei/dal/dal_cdev.c new file mode 100644 index 0000000000000..48f209af4909d --- /dev/null +++ b/drivers/misc/mei/dal/dal_cdev.c @@ -0,0 +1,243 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dal_dev.h" +#include "dal_cdev.h" + +/* KDI user space devices major and minor numbers */ +static dev_t dal_devt; + +/** + * dal_dev_open - dal cdev open function + * + * @inode: pointer to inode structure + * @fp: pointer to file structure + * + * Return: 0 on success + * <0 on failure + */ +static int dal_dev_open(struct inode *inode, struct file *fp) +{ + int ret; + struct dal_device *ddev; + + ddev = container_of(inode->i_cdev, struct dal_device, cdev); + if (!ddev) + return -ENODEV; + + /* single open */ + if (test_and_set_bit(DAL_DEV_OPENED, &ddev->status)) + return -EBUSY; + + ret = dal_dc_setup(ddev, DAL_INTF_CDEV); + if (ret) + goto err; + + fp->private_data = ddev->clients[DAL_INTF_CDEV]; + + return nonseekable_open(inode, fp); + +err: + clear_bit(DAL_DEV_OPENED, &ddev->status); + return ret; +} + +/** + * dal_dev_release - dal cdev release function + * + * @inode: pointer to inode structure + * @fp: pointer to file structure + * + * Return: 0 on success + * <0 on failure + */ +static int dal_dev_release(struct inode *inode, struct file *fp) +{ + struct dal_client *dc = fp->private_data; + struct dal_device *ddev = dc->ddev; + + if (mutex_lock_interruptible(&ddev->context_lock)) { + dev_dbg(&ddev->dev, "signal interrupted\n"); + return -ERESTARTSYS; + } + + dal_dc_destroy(ddev, dc->intf); + + mutex_unlock(&ddev->context_lock); + + clear_bit(DAL_DEV_OPENED, &ddev->status); + + return 0; +} + +/** + * dal_dev_read - dal cdev read function + * + * @fp: pointer to file structure + * @buf: pointer to user buffer + * @count: buffer length + * @off: data offset in buffer + * + * Return: >=0 data length on success + * <0 on failure + */ +static ssize_t dal_dev_read(struct file *fp, char __user *buf, + size_t count, loff_t *off) +{ + struct dal_client *dc = fp->private_data; + struct dal_device *ddev = dc->ddev; + int ret; + size_t r_len, len; + unsigned int copied; + + ret = dal_wait_for_read(dc); + if (ret) + return ret; + + if (kfifo_is_empty(&dc->read_queue)) + return 0; + + r_len = kfifo_out(&dc->read_queue, &len, sizeof(len)); + if (r_len != sizeof(len) || len > count) { + dev_dbg(&ddev->dev, "could not copy buffer: src size = %zd, dest size = %zu\n", + len, count); + return -EFAULT; + } + + ret = kfifo_to_user(&dc->read_queue, buf, count, &copied); + if (ret) { + dev_dbg(&ddev->dev, "copy_to_user() failed\n"); + return -EFAULT; + } + + /*FIXME: need to drop rest of the data */ + + return copied; +} + +/** + * dal_dev_write - dal cdev write function + * + * @fp: pointer to file structure + * @buff: pointer to user buffer + * @count: buffer length + * @off: data offset in buffer + * + * Return: >=0 data length on success + * <0 on failure + */ +static ssize_t dal_dev_write(struct file *fp, const char __user *buff, + size_t count, loff_t *off) +{ + struct dal_device *ddev; + struct dal_client *dc = fp->private_data; + void *data; + int ret; + + ddev = dc->ddev; + + if (count > DAL_MAX_BUFFER_SIZE) { + dev_dbg(&ddev->dev, "count is too big, count = %zu\n", count); + return -EMSGSIZE; + } + + if (count == 0) + return 0; + + if (!buff) + return -EINVAL; + + data = memdup_user(buff, count); + if (IS_ERR(data)) + return PTR_ERR(data); + + ret = dal_write(dc, data, count, 0); + + kfree(data); + + return ret; +} + +static const struct file_operations mei_dal_fops = { + .owner = THIS_MODULE, + .open = dal_dev_open, + .release = dal_dev_release, + .read = dal_dev_read, + .write = dal_dev_write, + .llseek = no_llseek, +}; + +/** + * dal_dev_del - delete dal cdev + * + * @ddev: dal device + */ +void dal_dev_del(struct dal_device *ddev) +{ + cdev_del(&ddev->cdev); +} + +/** + * dal_dev_setup - initialize dal cdev + * + * @ddev: dal device + */ +void dal_dev_setup(struct dal_device *ddev) +{ + dev_t devno; + + cdev_init(&ddev->cdev, &mei_dal_fops); + devno = MKDEV(MAJOR(dal_devt), ddev->device_id); + ddev->cdev.owner = THIS_MODULE; + ddev->dev.devt = devno; + ddev->cdev.kobj.parent = &ddev->dev.kobj; +} + +/** + * dal_dev_add - add dal cdev + * + * @ddev: dal device + * + * Return: 0 on success + * <0 on failure + */ +int dal_dev_add(struct dal_device *ddev) +{ + return cdev_add(&ddev->cdev, ddev->dev.devt, 1); +} + +/** + * dal_dev_init - allocate dev_t number + * + * Return: 0 on success + * <0 on failure + */ +int __init dal_dev_init(void) +{ + return alloc_chrdev_region(&dal_devt, 0, DAL_MEI_DEVICE_MAX, "dal"); +} + +/** + * dal_dev_exit - unregister allocated dev_t number + */ +void dal_dev_exit(void) +{ + unregister_chrdev_region(dal_devt, DAL_MEI_DEVICE_MAX); +} diff --git a/drivers/misc/mei/dal/dal_cdev.h b/drivers/misc/mei/dal/dal_cdev.h new file mode 100644 index 0000000000000..2547701cdfcae --- /dev/null +++ b/drivers/misc/mei/dal/dal_cdev.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. + */ + +#ifndef __MEI_DAL_DEV_H__ +#define __MEI_DAL_DEV_H__ +void dal_dev_del(struct dal_device *ddev); +void dal_dev_setup(struct dal_device *ddev); +int dal_dev_add(struct dal_device *ddev); +int __init dal_dev_init(void); +void dal_dev_exit(void); +#endif /* __MEI_DAL_DEV_H__ */ diff --git a/drivers/misc/mei/dal/dal_class.c b/drivers/misc/mei/dal/dal_class.c new file mode 100644 index 0000000000000..9fa4287a47796 --- /dev/null +++ b/drivers/misc/mei/dal/dal_class.c @@ -0,0 +1,847 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "bh_external.h" +#include "bh_cmd_defs.h" +#include "bh_errcode.h" +#include "dal_dev.h" +#include "dal_cdev.h" + +/* + * this class contains the 3 mei_cl_device, ivm, sdm, rtm. + * it is initialized during dal_probe and is used by the kernel space kdi + * to send/recv data to/from mei. + * + * this class must be initialized before the kernel space kdi uses it. + */ +struct class *dal_class; + +/** + * dal_dc_print - print client data for debug purpose + * + * @dev: device structure + * @dc: dal client + */ +void dal_dc_print(struct device *dev, struct dal_client *dc) +{ + if (!dc) { + dev_dbg(dev, "dc is null\n"); + return; + } + + dev_dbg(dev, "dc: intf = %d. expected to send: %d, sent: %d. expected to receive: %d, received: %d\n", + dc->intf, + dc->expected_msg_size_to_fw, + dc->bytes_sent_to_fw, + dc->expected_msg_size_from_fw, + dc->bytes_rcvd_from_fw); +} + +/** + * dal_dc_update_read_state - update client read state + * + * @dc : dal client + * @len: received message length + * + * Locking: called under "ddev->context_lock" lock + */ +static void dal_dc_update_read_state(struct dal_client *dc, ssize_t len) +{ + struct dal_device *ddev = dc->ddev; + + /* check BH msg magic, if it exists this is the header */ + if (bh_msg_is_response(ddev->bh_fw_msg.msg, len)) { + struct bh_response_header *hdr = + (struct bh_response_header *)dc->ddev->bh_fw_msg.msg; + + dc->expected_msg_size_from_fw = hdr->h.length; + dev_dbg(&ddev->dev, "expected_msg_size_from_fw = %d bytes read = %zd\n", + dc->expected_msg_size_from_fw, len); + + /* clear data from the past. */ + dc->bytes_rcvd_from_fw = 0; + } + + /* update number of bytes rcvd */ + dc->bytes_rcvd_from_fw += len; +} + +/** + * dal_get_client_by_squence_number - find the client interface which + * the received message is sent to + * + * @ddev : dal device + * + * Return: kernel space interface or user space interface + */ +static enum dal_intf dal_get_client_by_squence_number(struct dal_device *ddev) +{ + struct bh_response_header *head; + + if (!ddev->clients[DAL_INTF_KDI]) + return DAL_INTF_CDEV; + + head = (struct bh_response_header *)ddev->bh_fw_msg.msg; + + dev_dbg(&ddev->dev, "msg seq = %llu\n", head->seq); + + if (head->seq == ddev->clients[DAL_INTF_KDI]->seq) + return DAL_INTF_KDI; + + return DAL_INTF_CDEV; +} + +/** + * dal_recv_cb - callback to receive message from DAL FW over mei + * + * @cldev : mei client device + */ +static void dal_recv_cb(struct mei_cl_device *cldev) +{ + struct dal_device *ddev; + struct dal_client *dc; + enum dal_intf intf; + ssize_t len; + size_t ret; + bool is_unexpected_msg = false; + + ddev = mei_cldev_get_drvdata(cldev); + + /* + * read the msg from MEI + */ + len = mei_cldev_recv(cldev, ddev->bh_fw_msg.msg, DAL_MAX_BUFFER_SIZE); + if (len < 0) { + dev_err(&cldev->dev, "recv failed %zd\n", len); + return; + } + + /* + * lock to prevent read from MEI while writing to MEI and to + * deal with just one msg at the same time + */ + mutex_lock(&ddev->context_lock); + + /* save msg len */ + ddev->bh_fw_msg.len = len; + + /* set to which interface the msg should be sent */ + if (bh_msg_is_response(ddev->bh_fw_msg.msg, len)) { + intf = dal_get_client_by_squence_number(ddev); + dev_dbg(&ddev->dev, "recv_cb(): Client set by sequence number\n"); + dc = ddev->clients[intf]; + } else if (!ddev->current_read_client) { + intf = DAL_INTF_CDEV; + dev_dbg(&ddev->dev, "recv_cb(): EXTRA msg received - curr == NULL\n"); + dc = ddev->clients[intf]; + is_unexpected_msg = true; + } else { + dc = ddev->current_read_client; + dev_dbg(&ddev->dev, "recv_cb(): FRAGMENT msg received - curr != NULL\n"); + } + + /* save the current read client */ + ddev->current_read_client = dc; + /* In case of a client is not connected, dc might be NULL */ + if (!dc) + goto out; + + dev_dbg(&cldev->dev, "read client type %d data from mei client seq = %llu\n", + dc->intf, dc->seq); + + /* + * save new msg in queue, + * if the queue is full all new messages will be thrown + */ + ret = kfifo_in(&dc->read_queue, &ddev->bh_fw_msg.len, sizeof(len)); + ret += kfifo_in(&dc->read_queue, ddev->bh_fw_msg.msg, len); + if (ret < len + sizeof(len)) + dev_dbg(&ddev->dev, "queue is full - MSG THROWN\n"); + + dal_dc_update_read_state(dc, len); + + /* + * To clear current client we check if the whole msg received + * for the current client + */ + if (is_unexpected_msg || + dc->bytes_rcvd_from_fw == dc->expected_msg_size_from_fw) { + dev_dbg(&ddev->dev, "recv_cb(): setting CURRENT_READER to NULL\n"); + ddev->current_read_client = NULL; + } +out: + /* wake up all clients waiting for read or write */ + if (wq_has_sleeper(&ddev->wq)) + wake_up_interruptible(&ddev->wq); + + mutex_unlock(&ddev->context_lock); +} + +/** + * dal_mei_enable - enable mei cldev + * + * @ddev: dal device + * + * Return: 0 on success + * <0 on failure + */ +static int dal_mei_enable(struct dal_device *ddev) +{ + int ret; + + ret = mei_cldev_enable(ddev->cldev); + if (ret < 0) { + dev_err(&ddev->cldev->dev, "mei_cldev_enable_device() failed with ret = %d\n", + ret); + return ret; + } + + /* register to mei bus callbacks */ + ret = mei_cldev_register_rx_cb(ddev->cldev, dal_recv_cb); + if (ret) { + dev_err(&ddev->cldev->dev, "mei_cldev_register_event_cb() failed ret = %d\n", + ret); + goto err; + } + + /* save pointer to the context in the device */ + mei_cldev_set_drvdata(ddev->cldev, ddev); + + return 0; +err: + mei_cldev_disable(ddev->cldev); + return ret; +} + +/** + * dal_wait_for_write - wait until the dal client is the first writer + * in writers queue + * + * @ddev: dal device + * @dc: dal client + * + * Return: 0 on success + * -ERESTARTSYS when wait was interrupted + * -ENODEV when the device was removed + */ +static int dal_wait_for_write(struct dal_device *ddev, struct dal_client *dc) +{ + if (wait_event_interruptible(ddev->wq, + list_first_entry(&ddev->writers, + struct dal_client, + wrlink) == dc || + ddev->is_device_removed)) { + return -ERESTARTSYS; + } + + /* if the device was removed indicate that to the caller */ + if (ddev->is_device_removed) + return -ENODEV; + + return 0; +} + +/** + * dal_send_error_access_denied - put 'access denied' message + * into the client read queue. In-band error message. + * + * @dc: dal client + * @cmd: rejected message header + * + * Return: 0 on success + * -ENOMEM when client read queue is full + * + * Locking: called under "ddev->write_lock" lock + */ +static int dal_send_error_access_denied(struct dal_client *dc, const void *cmd) +{ + struct dal_device *ddev = dc->ddev; + struct bh_response_header res; + size_t len; + int ret; + + mutex_lock(&ddev->context_lock); + + bh_prep_access_denied_response(cmd, &res); + len = sizeof(res); + + if (kfifo_in(&dc->read_queue, &len, sizeof(len)) != sizeof(len)) { + ret = -ENOMEM; + goto out; + } + + if (kfifo_in(&dc->read_queue, &res, len) != len) { + ret = -ENOMEM; + goto out; + } + ret = 0; + +out: + mutex_unlock(&ddev->context_lock); + return ret; +} + +/** + * dal_validate_access - validate that the access is permitted. + * + * in case of open session command, validate that the client has the permissions + * to open session to the requested ta + * + * @hdr: command header + * @count: message size + * @ctx: context (not used) + * + * Return: 0 when command is permitted + * -EINVAL when message is invalid + * -EPERM when access is not permitted + * + * Locking: called under "ddev->write_lock" lock + */ +static int dal_validate_access(const struct bh_command_header *hdr, + size_t count, void *ctx) +{ + struct dal_client *dc = ctx; + struct dal_device *ddev = dc->ddev; + const uuid_t *ta_id; + + if (!bh_msg_is_cmd_open_session(hdr)) + return 0; + + ta_id = bh_open_session_ta_id(hdr, count); + if (!ta_id) + return -EINVAL; + + return dal_access_policy_allowed(ddev, ta_id, dc); +} + +/** + * dal_is_kdi_msg - check if sequence is in kernel space sequence range + * + * Each interface (kernel space and user space) has different range of + * sequence number. This function checks if given number is in kernel space + * sequence range + * + * @hdr: command header + * + * Return: true when seq fits kernel space intf + * false when seq fits user space intf + */ +static bool dal_is_kdi_msg(const struct bh_command_header *hdr) +{ + return hdr->seq >= MSG_SEQ_START_NUMBER; +} + +/** + * dal_validate_seq - validate that message sequence fits client interface, + * prevent user space client to use kernel space sequence + * + * @hdr: command header + * @count: message size + * @ctx: context - dal client + * + * Return: 0 when sequence match + * -EPERM when user space client uses kernel space sequence + * + * Locking: called under "ddev->write_lock" lock + */ +static int dal_validate_seq(const struct bh_command_header *hdr, + size_t count, void *ctx) +{ + struct dal_client *dc = ctx; + + if (dc->intf != DAL_INTF_KDI && dal_is_kdi_msg(hdr)) + return -EPERM; + + return 0; +} + +/* + * dal_write_filter_tbl - filter functions to validate that the message + * is being sent is valid, and the user client + * has the permissions to send it + */ +static const bh_filter_func dal_write_filter_tbl[] = { + dal_validate_access, + dal_validate_seq, + NULL, +}; + +/** + * dal_write - write message to DAL FW over mei + * + * @dc: dal client + * @buf: the message. + * @count: message size + * @seq: message sequence (if client is kernel space client) + * + * Return: >=0 data length on success + * <0 on failure + */ +ssize_t dal_write(struct dal_client *dc, const void *buf, size_t count, u64 seq) +{ + struct dal_device *ddev = dc->ddev; + struct device *dev; + ssize_t wr; + ssize_t ret; + enum dal_intf intf = dc->intf; + + dev = &ddev->dev; + + dev_dbg(dev, "client interface %d\n", intf); + dal_dc_print(dev, dc); + + /* lock for adding new client that want to write to fifo */ + mutex_lock(&ddev->write_lock); + /* update client on latest msg seq number*/ + dc->seq = seq; + dev_dbg(dev, "current_write_client seq = %llu\n", dc->seq); + + /* put dc in the writers queue if not already set */ + if (list_first_entry_or_null(&ddev->writers, + struct dal_client, wrlink) != dc) { + /* adding client to write queue - this is the first fragment */ + const struct bh_command_header *hdr; + + hdr = bh_msg_cmd_hdr(buf, count); + if (!hdr) { + dev_dbg(dev, "expected cmd hdr at first fragment\n"); + ret = -EINVAL; + goto out; + } + ret = bh_filter_hdr(hdr, count, dc, dal_write_filter_tbl); + if (ret == -EPERM) { + ret = dal_send_error_access_denied(dc, buf); + ret = ret ? ret : count; + } + if (ret) + goto out; + + dc->bytes_sent_to_fw = 0; + dc->expected_msg_size_to_fw = hdr->h.length; + + list_add_tail(&dc->wrlink, &ddev->writers); + } + + /* wait for current writer to finish his write session */ + mutex_unlock(&ddev->write_lock); + ret = dal_wait_for_write(ddev, dc); + mutex_lock(&ddev->write_lock); + if (ret < 0) + goto out; + + dev_dbg(dev, "before mei_cldev_send - client type %d\n", intf); + + /* send msg via MEI */ + wr = mei_cldev_send(ddev->cldev, (void *)buf, count); + if (wr != count) { + /* ENODEV can be issued upon internal reset */ + if (wr != -ENODEV) { + dev_err(dev, "mei_cl_send() failed, write_bytes != count (%zd != %zu)\n", + wr, count); + ret = -EFAULT; + goto out; + } + /* if DAL FW client is disconnected, try to reconnect */ + dev_dbg(dev, "try to reconnect to DAL FW cl\n"); + ret = mei_cldev_disable(ddev->cldev); + if (ret < 0) { + dev_err(&ddev->cldev->dev, "failed to disable mei cl [%zd]\n", + ret); + goto out; + } + ret = dal_mei_enable(ddev); + if (ret < 0) + dev_err(&ddev->cldev->dev, "failed to reconnect to DAL FW client [%zd]\n", + ret); + else + ret = -EAGAIN; + + goto out; + } + + dev_dbg(dev, "wrote %zu bytes to fw - client type %d\n", wr, intf); + + /* update client byte sent */ + dc->bytes_sent_to_fw += count; + ret = wr; + + if (dc->bytes_sent_to_fw != dc->expected_msg_size_to_fw) { + dev_dbg(dev, "expecting to write more data to DAL FW - client type %d\n", + intf); + goto write_more; + } +out: + /* remove current dc from the queue */ + list_del_init(&dc->wrlink); + if (list_empty(&ddev->writers)) + wake_up_interruptible(&ddev->wq); + +write_more: + mutex_unlock(&ddev->write_lock); + return ret; +} + +/** + * dal_wait_for_read - wait until the client (dc) will have data + * in his read queue + * + * @dc: dal client + * + * Return: 0 on success + * -ENODEV when the device was removed + * -ERESTARTSYS: when interrupted. + */ +int dal_wait_for_read(struct dal_client *dc) +{ + struct dal_device *ddev = dc->ddev; + struct device *dev = &ddev->dev; + int ret; + + dal_dc_print(dev, dc); + + dev_dbg(dev, "%s - client type %d kfifo status %d\n", __func__, + dc->intf, kfifo_is_empty(&dc->read_queue)); + + /* wait until there is data in the read_queue */ + ret = wait_event_interruptible(ddev->wq, + !kfifo_is_empty(&dc->read_queue) || + ddev->is_device_removed); + + dev_dbg(dev, "%s - client type %d status %d\n", __func__, + dc->intf, ret); + + /* FIXME: use reference counter */ + if (ddev->is_device_removed) { + dev_dbg(dev, "woke up, device was removed\n"); + return -ENODEV; + } + + return ret; +} + +/** + * dal_dc_destroy - destroy dal client + * + * @ddev: dal device + * @intf: device interface + * + * Locking: called under "ddev->context_lock" lock + */ +void dal_dc_destroy(struct dal_device *ddev, enum dal_intf intf) +{ + struct dal_client *dc; + + dc = ddev->clients[intf]; + if (!dc) + return; + + kfifo_free(&dc->read_queue); + kfree(dc); + ddev->clients[intf] = NULL; +} + +/** + * dal_dc_setup - initialize dal client + * + * @ddev: dal device + * @intf: device interface + * + * Return: 0 on success + * -EINVAL when client is already initialized + * -ENOMEM on memory allocation failure + */ +int dal_dc_setup(struct dal_device *ddev, enum dal_intf intf) +{ + int ret; + struct dal_client *dc; + size_t readq_sz; + + if (ddev->clients[intf]) { + dev_err(&ddev->dev, "client already set\n"); + return -EINVAL; + } + + dc = kzalloc(sizeof(*dc), GFP_KERNEL); + if (!dc) + return -ENOMEM; + + /* each buffer contains data and length */ + readq_sz = (DAL_MAX_BUFFER_SIZE + sizeof(ddev->bh_fw_msg.len)) * + DAL_BUFFERS_PER_CLIENT; + ret = kfifo_alloc(&dc->read_queue, readq_sz, GFP_KERNEL); + if (ret) { + kfree(dc); + return ret; + } + + dc->intf = intf; + dc->ddev = ddev; + INIT_LIST_HEAD(&dc->wrlink); + ddev->clients[intf] = dc; + return 0; +} + +/** + * dal_dev_match - match function to find dal device + * + * Used to get dal device from dal_class by device id + * + * @dev: device structure + * @data: the device id + * + * Return: 1 on match + * 0 on mismatch + */ +static int dal_dev_match(struct device *dev, const void *data) +{ + struct dal_device *ddev; + const enum dal_dev_type *device_id = + (enum dal_dev_type *)data; + + ddev = container_of(dev, struct dal_device, dev); + + return ddev->device_id == *device_id; +} + +/** + * dal_find_dev - get dal device from dal_class by device id + * + * @device_id: device id + * + * Return: pointer to the requested device + * NULL if the device wasn't found + */ +struct device *dal_find_dev(enum dal_dev_type device_id) +{ + return class_find_device(dal_class, NULL, &device_id, dal_dev_match); +} + +/** + * dal_remove - dal remove callback in mei_cl_driver + * + * @cldev: mei client device + * + * Return: 0 + */ +static int dal_remove(struct mei_cl_device *cldev) +{ + struct dal_device *ddev = mei_cldev_get_drvdata(cldev); + + if (!ddev) + return 0; + + dal_dev_del(ddev); + + ddev->is_device_removed = 1; + /* make sure the above is set */ + smp_mb(); + /* wakeup write waiters so we can unload */ + if (waitqueue_active(&ddev->wq)) + wake_up_interruptible(&ddev->wq); + + mei_cldev_set_drvdata(cldev, NULL); + + device_unregister(&ddev->dev); + + mei_cldev_disable(cldev); + + return 0; +} + +/** + * dal_device_release - dal release callback in dev structure + * + * @dev: device structure + */ +static void dal_device_release(struct device *dev) +{ + struct dal_device *ddev = to_dal_device(dev); + + dal_access_list_free(ddev); + kfree(ddev->bh_fw_msg.msg); + kfree(ddev); +} + +/** + * dal_probe - dal probe callback in mei_cl_driver + * + * @cldev: mei client device + * @id: mei client device id + * + * Return: 0 on success + * <0 on failure + */ +static int dal_probe(struct mei_cl_device *cldev, + const struct mei_cl_device_id *id) +{ + struct dal_device *ddev; + struct device *pdev = &cldev->dev; + int ret; + + ddev = kzalloc(sizeof(*ddev), GFP_KERNEL); + if (!ddev) + return -ENOMEM; + + /* initialize the mutex and wait queue */ + mutex_init(&ddev->context_lock); + mutex_init(&ddev->write_lock); + init_waitqueue_head(&ddev->wq); + INIT_LIST_HEAD(&ddev->writers); + ddev->cldev = cldev; + ddev->device_id = id->driver_info; + + ddev->dev.parent = pdev; + ddev->dev.class = dal_class; + ddev->dev.release = dal_device_release; + dev_set_name(&ddev->dev, "dal%d", ddev->device_id); + + dal_dev_setup(ddev); + + ret = device_register(&ddev->dev); + if (ret) { + dev_err(pdev, "unable to register device\n"); + goto err_unregister; + } + + ddev->bh_fw_msg.msg = kzalloc(DAL_MAX_BUFFER_SIZE, GFP_KERNEL); + if (!ddev->bh_fw_msg.msg) { + ret = -ENOMEM; + goto err_unregister; + } + + ret = dal_access_list_init(ddev); + if (ret) + goto err_unregister; + + ret = dal_mei_enable(ddev); + if (ret < 0) + goto err_unregister; + + ret = dal_dev_add(ddev); + if (ret) + goto err_disable; + + return 0; + +err_disable: + mei_cldev_set_drvdata(cldev, NULL); + mei_cldev_disable(cldev); +err_unregister: + device_unregister(&ddev->dev); + return ret; +} + +/* DAL FW HECI client GUIDs */ +#define IVM_UUID UUID_LE(0x3c4852d6, 0xd47b, 0x4f46, \ + 0xb0, 0x5e, 0xb5, 0xed, 0xc1, 0xaa, 0x44, 0x0e) +#define SDM_UUID UUID_LE(0xdba4d603, 0xd7ed, 0x4931, \ + 0x88, 0x23, 0x17, 0xad, 0x58, 0x57, 0x05, 0xd5) +#define RTM_UUID UUID_LE(0x5565a099, 0x7fe2, 0x45c1, \ + 0xa2, 0x2b, 0xd7, 0xe9, 0xdf, 0xea, 0x9a, 0x2e) + +#define DAL_DEV_ID(__uuid, __device_type) \ + {.uuid = __uuid, \ + .version = MEI_CL_VERSION_ANY, \ + .driver_info = __device_type} + +/* + * dal_device_id - ids of dal FW devices, + * for all 3 dal FW clients (IVM, SDM and RTM) + */ +static const struct mei_cl_device_id dal_device_id[] = { + DAL_DEV_ID(IVM_UUID, DAL_MEI_DEVICE_IVM), + DAL_DEV_ID(SDM_UUID, DAL_MEI_DEVICE_SDM), + DAL_DEV_ID(RTM_UUID, DAL_MEI_DEVICE_RTM), + /* required last entry */ + { } +}; +MODULE_DEVICE_TABLE(mei, dal_device_id); + +static struct mei_cl_driver dal_driver = { + .id_table = dal_device_id, + .name = KBUILD_MODNAME, + + .probe = dal_probe, + .remove = dal_remove, +}; + +/** + * mei_dal_exit - module exit function + */ +static void __exit mei_dal_exit(void) +{ + mei_cldev_driver_unregister(&dal_driver); + + dal_dev_exit(); + + dal_kdi_exit(); + + class_destroy(dal_class); +} + +/** + * mei_dal_init - module init function + * + * Return: 0 on success + * <0 on failure + */ +static int __init mei_dal_init(void) +{ + int ret; + + dal_class = class_create(THIS_MODULE, "dal"); + if (IS_ERR(dal_class)) { + pr_err("couldn't create class\n"); + return PTR_ERR(dal_class); + } + + ret = dal_dev_init(); + if (ret < 0) { + pr_err("failed allocate chrdev region = %d\n", ret); + goto err_class; + } + + ret = dal_kdi_init(); + if (ret) + goto err_dev; + + ret = mei_cldev_driver_register(&dal_driver); + if (ret < 0) { + pr_err("mei_cl_driver_register failed with status = %d\n", ret); + goto err; + } + + return 0; + +err: + dal_kdi_exit(); +err_dev: + dal_dev_exit(); +err_class: + class_destroy(dal_class); + return ret; +} + +module_init(mei_dal_init); +module_exit(mei_dal_exit); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel(R) MEI Dynamic Application Loader (DAL)"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/mei/dal/dal_dev.h b/drivers/misc/mei/dal/dal_dev.h new file mode 100644 index 0000000000000..e103271433367 --- /dev/null +++ b/drivers/misc/mei/dal/dal_dev.h @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* + * Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. + */ + +#ifndef _DAL_KDI_H_ +#define _DAL_KDI_H_ + +#include +#include +#include +#include + +#define DAL_MAX_BUFFER_SIZE 4096 +#define DAL_BUFFERS_PER_CLIENT 10 + +#define DAL_CLIENTS_PER_DEVICE 2 + +extern struct class *dal_class; + +/** + * enum dal_intf - dal interface type + * + * @DAL_INTF_KDI: (kdi) kernel space interface + * @DAL_INTF_CDEV: char device interface + */ +enum dal_intf { + DAL_INTF_KDI, + DAL_INTF_CDEV, +}; + +/** + * enum dal_dev_type - devices that are exposed to userspace + * + * @DAL_MEI_DEVICE_IVM: IVM - Intel/Issuer Virtual Machine + * @DAL_MEI_DEVICE_SDM: SDM - Security Domain Manager + * @DAL_MEI_DEVICE_RTM: RTM - Run Time Manager (Launcher) + * + * @DAL_MEI_DEVICE_MAX: max dal device type + */ +enum dal_dev_type { + DAL_MEI_DEVICE_IVM, + DAL_MEI_DEVICE_SDM, + DAL_MEI_DEVICE_RTM, + + DAL_MEI_DEVICE_MAX +}; + +/** + * struct dal_client - host client + * + * @ddev: dal parent device + * @wrlink: link in the writers list + * @read_queue: queue of received messages from DAL FW + * @intf: client interface - user space or kernel space + * + * @seq: the sequence number of the last message sent (in kernel space API only) + * When a message is received from DAL FW, we use this sequence number + * to decide which client should get the message. If the sequence + * number of the message is equals to the kernel space sequence number, + * the kernel space client should get the message. + * Otherwise the user space client will get it. + * @expected_msg_size_from_fw: the expected msg size from DALFW + * @expected_msg_size_to_fw: the expected msg size that will be sent to DAL FW + * @bytes_rcvd_from_fw: number of bytes that were received from DAL FW + * @bytes_sent_to_fw: number of bytes that were sent to DAL FW + */ +struct dal_client { + struct dal_device *ddev; + struct list_head wrlink; + struct kfifo read_queue; + enum dal_intf intf; + + u64 seq; + u32 expected_msg_size_from_fw; + u32 expected_msg_size_to_fw; + u32 bytes_rcvd_from_fw; + u32 bytes_sent_to_fw; +}; + +/** + * struct dal_bh_msg - msg received from DAL FW. + * + * @len: message length + * @msg: message buffer + */ +struct dal_bh_msg { + size_t len; + char *msg; +}; + +/** + * struct dal_device - DAL private device struct. + * each DAL device has a context (i.e IVM, SDM, RTM) + * + * @dev: device on a bus + * @cdev: character device + * @status: dal device status + * + * @context_lock: big device lock + * @write_lock: lock over write list + * @wq: dal clients wait queue. When client wants to send or receive message, + * he waits in this queue until he is ready + * @writers: write pending list + * @clients: clients on this device (userspace and kernel space) + * @bh_fw_msg: message which was received from DAL FW + * @current_read_client: current reading client (which receives message from + * DAL FW) + * + * @cldev: the MEI CL device which corresponds to a single DAL FW HECI client + * + * @is_device_removed: device removed flag + * + * @device_id: DAL device type + */ +struct dal_device { + struct device dev; + struct cdev cdev; +#define DAL_DEV_OPENED 0 + unsigned long status; + + struct mutex context_lock; /* device lock */ + struct mutex write_lock; /* write lock */ + wait_queue_head_t wq; + struct list_head writers; + struct dal_client *clients[DAL_CLIENTS_PER_DEVICE]; + struct dal_bh_msg bh_fw_msg; + struct dal_client *current_read_client; + + struct mei_cl_device *cldev; + + unsigned int is_device_removed :1; + + unsigned int device_id; +}; + +#define to_dal_device(d) container_of(d, struct dal_device, dev) + +ssize_t dal_write(struct dal_client *dc, + const void *buf, size_t count, u64 seq); +int dal_wait_for_read(struct dal_client *dc); + +struct device *dal_find_dev(enum dal_dev_type device_id); + +void dal_dc_print(struct device *dev, struct dal_client *dc); +int dal_dc_setup(struct dal_device *ddev, enum dal_intf intf); +void dal_dc_destroy(struct dal_device *ddev, enum dal_intf intf); + +int dal_kdi_send(unsigned int handle, const unsigned char *buf, + size_t len, u64 seq); +int dal_kdi_recv(unsigned int handle, unsigned char *buf, size_t *count); +int dal_kdi_init(void); +void dal_kdi_exit(void); + +int dal_access_policy_add(struct dal_device *ddev, + const uuid_t *ta_id, void *owner); +int dal_access_policy_remove(struct dal_device *ddev, + const uuid_t *ta_id, void *owner); +int dal_access_policy_allowed(struct dal_device *ddev, + const uuid_t *ta_id, void *owner); +void dal_access_list_free(struct dal_device *ddev); +int dal_access_list_init(struct dal_device *ddev); + +#endif /* _DAL_KDI_H_ */ diff --git a/drivers/misc/mei/dal/dal_kdi.c b/drivers/misc/mei/dal/dal_kdi.c new file mode 100644 index 0000000000000..c557853085a2a --- /dev/null +++ b/drivers/misc/mei/dal/dal_kdi.c @@ -0,0 +1,557 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bh_external.h" +#include "bh_errcode.h" +#include "acp_parser.h" +#include "dal_dev.h" + +static DEFINE_MUTEX(dal_kdi_lock); + +/** + * to_kdi_err- converts error number to kdi error + * + * Beihai errors (>0) are converted to DAL_KDI errors (those errors came + * from DAL FW) + * system errors and success value (<=0) stay as is + * + * @err: error code to convert (either bh err or system err) + * + * Return: the converted kdi error number or system error + */ +static int to_kdi_err(int err) +{ + if (err) + pr_debug("Error: %d\n", err); + + if (err <= 0) + return err; + + /* err > 0: is error from DAL FW */ + switch (err) { + case BPE_INTERNAL_ERROR: + return DAL_KDI_STATUS_INTERNAL_ERROR; + case BPE_INVALID_PARAMS: + case BHE_INVALID_PARAMS: + return DAL_KDI_STATUS_INVALID_PARAMS; + case BHE_INVALID_HANDLE: + return DAL_KDI_STATUS_INVALID_HANDLE; + case BPE_NOT_INIT: + return DAL_KDI_STATUS_NOT_INITIALIZED; + case BPE_OUT_OF_MEMORY: + case BHE_OUT_OF_MEMORY: + return DAL_KDI_STATUS_OUT_OF_MEMORY; + case BHE_INSUFFICIENT_BUFFER: + case BHE_APPLET_SMALL_BUFFER: + return DAL_KDI_STATUS_BUFFER_TOO_SMALL; + case BPE_OUT_OF_RESOURCE: + case BHE_VM_INSTANCE_INIT_FAIL: + return DAL_KDI_STATUS_OUT_OF_RESOURCE; + case BHE_SESSION_NUM_EXCEED: + return DAL_KDI_STATUS_MAX_SESSIONS_REACHED; + case BHE_UNCAUGHT_EXCEPTION: + return DAL_KDI_STATUS_UNCAUGHT_EXCEPTION; + case BHE_WD_TIMEOUT: + return DAL_KDI_STATUS_WD_TIMEOUT; + case BHE_APPLET_CRASHED: + return DAL_KDI_STATUS_APPLET_CRASHED; + case BHE_TA_PACKAGE_HASH_VERIFY_FAIL: + return DAL_KDI_STATUS_INVALID_ACP; + case BHE_PACKAGE_NOT_FOUND: + return DAL_KDI_STATUS_TA_NOT_FOUND; + case BHE_PACKAGE_EXIST: + return DAL_KDI_STATUS_TA_EXIST; + default: + return DAL_KDI_STATUS_INTERNAL_ERROR; + } +} + +/** + * dal_kdi_send - a callback which is called from bhp to send msg over mei + * + * @dev_idx: DAL device type + * @buf: message buffer + * @len: buffer length + * @seq: message sequence + * + * Return: 0 on success + * -EINVAL on incorrect input + * -ENODEV when the device can't be found + * -EFAULT if client is NULL + * <0 on dal_write failure + */ +int dal_kdi_send(unsigned int dev_idx, const unsigned char *buf, + size_t len, u64 seq) +{ + enum dal_dev_type mei_device; + struct dal_device *ddev; + struct dal_client *dc; + struct device *dev; + ssize_t wr; + int ret; + + if (!buf) + return -EINVAL; + + if (dev_idx >= DAL_MEI_DEVICE_MAX) + return -EINVAL; + + if (!len) + return 0; + + if (len > DAL_MAX_BUFFER_SIZE) + return -EMSGSIZE; + + mei_device = (enum dal_dev_type)dev_idx; + dev = dal_find_dev(mei_device); + if (!dev) { + dev_dbg(dev, "can't find device\n"); + return -ENODEV; + } + + ddev = to_dal_device(dev); + dc = ddev->clients[DAL_INTF_KDI]; + if (!dc) { + dev_dbg(dev, "client is NULL\n"); + ret = -EFAULT; + goto out; + } + + wr = dal_write(dc, buf, len, seq); + if (wr > 0) + ret = 0; + else + ret = wr; +out: + put_device(dev); + return ret; +} + +/** + * dal_kdi_recv - a callback which is called from bhp to recv msg from DAL FW + * + * @dev_idx: DAL device type + * @buf: buffer of received message + * @count: input and output param - + * - input: buffer length + * - output: size of the received message + * + * Return: 0 on success + * -EINVAL on incorrect input + * -ENODEV when the device can't be found + * -EFAULT when client is NULL or copy failed + * -EMSGSIZE when buffer is too small + * <0 on dal_wait_for_read failure + */ +int dal_kdi_recv(unsigned int dev_idx, unsigned char *buf, size_t *count) +{ + enum dal_dev_type mei_device; + struct dal_device *ddev; + struct dal_client *dc; + struct device *dev; + size_t r_len, len; + int ret; + + if (!buf || !count) + return -EINVAL; + + if (dev_idx >= DAL_MEI_DEVICE_MAX) + return -EINVAL; + + mei_device = (enum dal_dev_type)dev_idx; + dev = dal_find_dev(mei_device); + if (!dev) + return -ENODEV; + + ddev = to_dal_device(dev); + dc = ddev->clients[DAL_INTF_KDI]; + if (!dc) { + dev_dbg(dev, "client is NULL\n"); + ret = -EFAULT; + goto out; + } + + ret = dal_wait_for_read(dc); + if (ret) + goto out; + + if (kfifo_is_empty(&dc->read_queue)) { + *count = 0; + goto out; + } + + r_len = kfifo_out(&dc->read_queue, &len, sizeof(len)); + if (r_len != sizeof(len)) { + dev_err(&ddev->dev, "could not copy buffer: cannot fetch size\n"); + ret = -EFAULT; + goto out; + } + + if (len > *count) { + dev_dbg(&ddev->dev, "could not copy buffer: src size = %zd > dest size = %zd\n", + len, *count); + ret = -EMSGSIZE; + goto out; + } + + r_len = kfifo_out(&dc->read_queue, buf, len); + if (r_len != len) { + dev_err(&ddev->dev, "could not copy buffer: src size = %zd, dest size = %d\n", + len, ret); + ret = -EFAULT; + goto out; + } + + *count = len; + ret = 0; +out: + put_device(dev); + return ret; +} + +/** + * dal_create_session - create session to an installed trusted application. + * + * @session_handle: output param to hold the session handle + * @ta_id: trusted application (ta) id + * @acp_pkg: acp file of the ta + * @acp_pkg_len: acp file length + * @init_param: init parameters to the session (optional) + * @init_param_len: length of the init parameters + * + * Return: 0 on success + * <0 on system failure + * >0 on DAL FW failure + */ +int dal_create_session(u64 *session_handle, const char *ta_id, + const u8 *acp_pkg, size_t acp_pkg_len, + const u8 *init_param, size_t init_param_len) +{ + struct ac_ins_jta_pack_ext pack; + char *ta_pkg; + int ta_pkg_size; + int ret; + + if (!ta_id || !acp_pkg || !acp_pkg_len || !session_handle) + return -EINVAL; + + /* init_param are optional, if they exists the length shouldn't be 0 */ + if (!init_param && init_param_len != 0) { + pr_debug("INVALID_PARAMS init_param %p init_param_len %zu\n", + init_param, init_param_len); + return -EINVAL; + } + + mutex_lock(&dal_kdi_lock); + + ret = acp_pload_ins_jta(acp_pkg, acp_pkg_len, &pack); + if (ret) { + pr_debug("acp_pload_ins_jta() return %d\n", ret); + goto out; + } + + ta_pkg = pack.ta_pack; + if (!ta_pkg) { + ret = -EINVAL; + goto out; + } + + ta_pkg_size = ta_pkg - (char *)acp_pkg; + + if (ta_pkg_size < 0 || (unsigned int)ta_pkg_size > acp_pkg_len) { + ret = -EINVAL; + goto out; + } + + ta_pkg_size = acp_pkg_len - ta_pkg_size; + + ret = bh_ta_session_open(session_handle, ta_id, ta_pkg, ta_pkg_size, + init_param, init_param_len); + + if (ret) + pr_debug("bh_ta_session_open failed = %d\n", ret); + +out: + mutex_unlock(&dal_kdi_lock); + + return to_kdi_err(ret); +} +EXPORT_SYMBOL(dal_create_session); + +/** + * dal_send_and_receive - send and receive data to/from ta + * + * @session_handle: session handle + * @command_id: command id + * @input: message to be sent + * @input_len: sent message size + * @output: output param to hold a pointer to the buffer which + * will contain the received message. + * This buffer is allocated by DAL KDI module and freed by the user + * @output_len: input and output param - + * - input: the expected maximum length of the received message + * - output: size of the received message + * @response_code: An optional output param to hold the return value + * from the applet. Can be NULL. + * + * Return: 0 on success + * < 0 on system failure + * > 0 on DAL FW failure + */ +int dal_send_and_receive(u64 session_handle, int command_id, const u8 *input, + size_t input_len, u8 **output, size_t *output_len, + int *response_code) +{ + int ret; + + mutex_lock(&dal_kdi_lock); + + ret = bh_ta_session_command(session_handle, command_id, + input, input_len, + (void **)output, output_len, + response_code); + + if (ret) + pr_debug("bh_ta_session_command failed, status = %d\n", ret); + + mutex_unlock(&dal_kdi_lock); + + return to_kdi_err(ret); +} +EXPORT_SYMBOL(dal_send_and_receive); + +/** + * dal_close_session - close ta session + * + * @session_handle: session handle + * + * Return: 0 on success + * <0 on system failure + * >0 on DAL FW failure + */ +int dal_close_session(u64 session_handle) +{ + int ret; + + mutex_lock(&dal_kdi_lock); + + ret = bh_ta_session_close(session_handle); + + if (ret) + pr_debug("hp_close_ta_session failed = %d\n", ret); + + mutex_unlock(&dal_kdi_lock); + + return to_kdi_err(ret); +} +EXPORT_SYMBOL(dal_close_session); + +/** + * dal_set_ta_exclusive_access - set client to be owner of the ta, + * so no one else (especially user space client) + * will be able to open session to it + * + * @ta_id: trusted application (ta) id + * + * Return: 0 on success + * -ENODEV when the device can't be found + * -ENOMEM on memory allocation failure + * -EPERM when ta is owned by another client + * -EEXIST when ta is already owned by current client + */ +int dal_set_ta_exclusive_access(const uuid_t *ta_id) +{ + struct dal_device *ddev; + struct device *dev; + struct dal_client *dc; + int ret; + + mutex_lock(&dal_kdi_lock); + + dev = dal_find_dev(DAL_MEI_DEVICE_IVM); + if (!dev) { + dev_dbg(dev, "can't find device\n"); + ret = -ENODEV; + goto unlock; + } + + ddev = to_dal_device(dev); + dc = ddev->clients[DAL_INTF_KDI]; + + ret = dal_access_policy_add(ddev, ta_id, dc); + + put_device(dev); +unlock: + mutex_unlock(&dal_kdi_lock); + return ret; +} +EXPORT_SYMBOL(dal_set_ta_exclusive_access); + +/** + * dal_unset_ta_exclusive_access - unset client from owning ta + * + * @ta_id: trusted application (ta) id + * + * Return: 0 on success + * -ENODEV when the device can't be found + * -ENOENT when ta isn't found in exclusiveness ta list + * -EPERM when ta is owned by another client + */ +int dal_unset_ta_exclusive_access(const uuid_t *ta_id) +{ + struct dal_device *ddev; + struct device *dev; + struct dal_client *dc; + int ret; + + mutex_lock(&dal_kdi_lock); + + dev = dal_find_dev(DAL_MEI_DEVICE_IVM); + if (!dev) { + dev_dbg(dev, "can't find device\n"); + ret = -ENODEV; + goto unlock; + } + + ddev = to_dal_device(dev); + dc = ddev->clients[DAL_INTF_KDI]; + + ret = dal_access_policy_remove(ddev, ta_id, dc); + + put_device(dev); +unlock: + mutex_unlock(&dal_kdi_lock); + return ret; +} +EXPORT_SYMBOL(dal_unset_ta_exclusive_access); + +#define KDI_MAJOR_VER "1" +#define KDI_MINOR_VER "0" +#define KDI_HOTFIX_VER "0" + +#define KDI_VERSION KDI_MAJOR_VER "." \ + KDI_MINOR_VER "." \ + KDI_HOTFIX_VER + +/** + * dal_get_version_info - return DAL driver version + * + * @version_info: output param to hold DAL driver version information + * + * Return: 0 on success + * -EINVAL on incorrect input + */ +int dal_get_version_info(struct dal_version_info *version_info) +{ + if (!version_info) + return -EINVAL; + + memset(version_info, 0, sizeof(*version_info)); + snprintf(version_info->version, DAL_VERSION_LEN, "%s", KDI_VERSION); + + return 0; +} +EXPORT_SYMBOL(dal_get_version_info); + +/** + * dal_kdi_add_dev - add new dal device (one of dal_dev_type) + * + * @dev: device object which is associated with dal device + * @class_intf: class interface + * + * Return: 0 on success + * <0 on failure + * + * When new dal device is added, a new client is created for + * this device in kernel space interface + */ +static int dal_kdi_add_dev(struct device *dev, + struct class_interface *class_intf) +{ + int ret; + struct dal_device *ddev; + + ddev = to_dal_device(dev); + + mutex_lock(&ddev->context_lock); + ret = dal_dc_setup(ddev, DAL_INTF_KDI); + mutex_unlock(&ddev->context_lock); + return ret; +} + +/** + * dal_kdi_rm_dev - rm dal device (one of dal_dev_type) + * + * @dev: device object which is associated with dal device + * @class_intf: class interface + * + * Return: 0 on success + * <0 on failure + */ +static void dal_kdi_rm_dev(struct device *dev, + struct class_interface *class_intf) +{ + struct dal_device *ddev; + + ddev = to_dal_device(dev); + + mutex_lock(&ddev->context_lock); + dal_dc_destroy(ddev, DAL_INTF_KDI); + mutex_unlock(&ddev->context_lock); +} + +/* + * dal_kdi_interface handles addition/removal of dal devices + */ +static struct class_interface dal_kdi_interface __refdata = { + .add_dev = dal_kdi_add_dev, + .remove_dev = dal_kdi_rm_dev, +}; + +/** + * dal_kdi_init - initialize dal kdi + * + * Return: 0 on success + * <0 on failure + */ +int dal_kdi_init(void) +{ + int ret; + + bh_init_internal(); + + dal_kdi_interface.class = dal_class; + ret = class_interface_register(&dal_kdi_interface); + if (ret) { + pr_err("failed to register class interface = %d\n", ret); + goto err; + } + + return 0; + +err: + bh_deinit_internal(); + return ret; +} + +/** + * dal_kdi_exit - dal kdi exit function + */ +void dal_kdi_exit(void) +{ + bh_deinit_internal(); + class_interface_unregister(&dal_kdi_interface); +} diff --git a/drivers/misc/mei/dal/dal_ta_access.c b/drivers/misc/mei/dal/dal_ta_access.c new file mode 100644 index 0000000000000..c31c3f58527cb --- /dev/null +++ b/drivers/misc/mei/dal/dal_ta_access.c @@ -0,0 +1,234 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. + */ + +#include +#include +#include + +#include +#include "dal_dev.h" + +/* Spooler UUID */ +static const uuid_t spooler_ta_id = UUID_INIT(0xba8d1643, 0x50b6, 0x49cc, + 0x86, 0x1d, 0x2c, 0x01, + 0xbe, 0xd1, 0x4b, 0xe8); + +/** + * struct dal_access_policy - ta access information node + * + * @list: link in access list + * @ta_id: trusted application id + * @owner: owner of ta + */ +struct dal_access_policy { + struct list_head list; + uuid_t ta_id; + void *owner; +}; + +/** + * dal_dev_get_access_list - get access list of dal device + * + * @ddev: dal device + * + * Return: pointer to access list + */ +static struct list_head *dal_dev_get_access_list(struct dal_device *ddev) +{ + return dev_get_drvdata(&ddev->dev); +} + +/** + * dal_access_policy_alloc - allocate memory and initialize access list node + * + * @ta_id: trusted application id + * @owner: owner of ta + * + * Return: pointer to the new initialized access list node + * + * Locking: called under "kdi_lock" lock + */ +static struct dal_access_policy * +dal_access_policy_alloc(const uuid_t *ta_id, void *owner) +{ + struct dal_access_policy *e; + + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) + return NULL; + + INIT_LIST_HEAD(&e->list); + e->ta_id = *ta_id; + e->owner = owner; + + return e; +} + +/** + * dal_access_policy_find - find ta id in access list + * + * @access_list: access list + * @ta_id: trusted application id + * + * Return: pointer to access list node of ta + * NULL if ta is not found in access list + */ +static struct dal_access_policy * +dal_access_policy_find(struct list_head *access_list, const uuid_t *ta_id) +{ + struct dal_access_policy *e; + + list_for_each_entry(e, access_list, list) { + if (uuid_equal(&e->ta_id, ta_id)) + return e; + } + return NULL; +} + +/** + * dal_access_policy_add - add access information of ta and its owner + * + * @ddev: dal device + * @ta_id: trusted application id + * @owner: owner of ta + * + * Return: 0 on success + * -ENOMEM on memory allocation failure + * -EPERM when ta already has another owner + * -EEXIST when access information already exists (same ta and owner) + * + * Locking: called under "kdi_lock" lock + */ +int dal_access_policy_add(struct dal_device *ddev, + const uuid_t *ta_id, void *owner) +{ + struct list_head *access_list = dal_dev_get_access_list(ddev); + struct dal_access_policy *e; + + e = dal_access_policy_find(access_list, ta_id); + if (e) { + if (!e->owner) + return -EPERM; + + return -EEXIST; + } + + e = dal_access_policy_alloc(ta_id, owner); + if (!e) + return -ENOMEM; + + list_add_tail(&e->list, access_list); + return 0; +} + +/** + * dal_access_policy_remove - remove access information of ta and its owner + * + * @ddev: dal device + * @ta_id: trusted application id + * @owner: owner of ta + * + * Return: 0 on success + * -ENOENT when ta isn't found in access list + * -EPERM when ta has another owner + * + * Locking: called under "kdi_lock" lock + */ +int dal_access_policy_remove(struct dal_device *ddev, + const uuid_t *ta_id, void *owner) +{ + struct list_head *access_list = dal_dev_get_access_list(ddev); + struct dal_access_policy *e; + + e = dal_access_policy_find(access_list, ta_id); + if (!e) + return -ENOENT; + + if (!e->owner || e->owner != owner) + return -EPERM; + + list_del(&e->list); + kfree(e); + return 0; +} + +/** + * dal_access_policy_allowed - check if owner is allowed to use ta + * + * @ddev: dal device + * @ta_id: trusted application id + * @owner: owner + * + * Return: 0 on success + * -EPERM when owner is not allowed to use ta + * + * Locking: called under "ddev->write_lock" lock + */ +int dal_access_policy_allowed(struct dal_device *ddev, + const uuid_t *ta_id, void *owner) +{ + struct list_head *access_list = dal_dev_get_access_list(ddev); + struct dal_access_policy *e; + + e = dal_access_policy_find(access_list, ta_id); + if (!e) + return 0; + + if (e->owner && e->owner != owner) + return -EPERM; + + return 0; +} + +/** + * dal_access_list_free - free memory of access list + * + * @ddev: dal device + */ +void dal_access_list_free(struct dal_device *ddev) +{ + struct list_head *access_list = dal_dev_get_access_list(ddev); + struct dal_access_policy *e, *n; + + if (!access_list) + return; + + list_for_each_entry_safe(e, n, access_list, list) { + list_del(&e->list); + kfree(e); + } + + kfree(access_list); + dev_set_drvdata(&ddev->dev, NULL); +} + +/** + * dal_access_list_init - initialize an empty access list + * + * @ddev: dal device + * + * Note: Add spooler ta id with blank owner to the list. + * This will prevent any user from setting itself owner of the spooler, + * which will block others from openning session to it. + * + * Return: 0 on success + * -ENOMEM on memory allocation failure + */ +int dal_access_list_init(struct dal_device *ddev) +{ + struct list_head *access_list; + + access_list = kzalloc(sizeof(*access_list), GFP_KERNEL); + if (!access_list) + return -ENOMEM; + + INIT_LIST_HEAD(access_list); + dev_set_drvdata(&ddev->dev, access_list); + + /* Nobody can own SPOOLER TA */ + dal_access_policy_add(ddev, &spooler_ta_id, NULL); + + return 0; +} diff --git a/drivers/misc/mei/dal/dal_test.c b/drivers/misc/mei/dal/dal_test.c new file mode 100644 index 0000000000000..56da059e5cc64 --- /dev/null +++ b/drivers/misc/mei/dal/dal_test.c @@ -0,0 +1,775 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "uapi/kdi_cmd_defs.h" +#define KDI_MODULE "mei_dal" + +/** + * this is the max data size possible: + * there is no actually max size for acp file, + * but for testing 512k is good enough + */ +#define MAX_DATA_SIZE SZ_512K + +#define KDI_TEST_OPENED 0 + +/** + * struct dal_test_data - dal test cmd and response data + * + * @cmd_data_size: size of cmd got from user space + * @cmd_data: the cmd got from user space + * @cmd_lock: protects cmd_data buffer + * + * @resp_data_size: size of response from kdi + * @resp_data: the response from kdi + * @resp_lock: protects resp_data buffer + */ +struct dal_test_data { + u32 cmd_data_size; + u8 *cmd_data; + struct mutex cmd_lock; /* protects cmd_data buffer */ + + u32 resp_data_size; + u8 *resp_data; + struct mutex resp_lock; /* protects resp_data buffer */ +}; + +/** + * struct dal_test_device - dal test private data + * + * @dev: the device structure + * @cdev: character device + * + * @kdi_test_status: status of test module + * @data: cmd and response data + */ +static struct dal_test_device { + struct device *dev; + struct cdev cdev; + + unsigned long kdi_test_status; + struct dal_test_data *data; +} dal_test_dev; + +#if IS_MODULE(CONFIG_INTEL_MEI_DAL) +/** + * dal_test_find_module - find the given module + * + * @mod_name: the module name to find + * + * Return: pointer to the module if it is found + * NULL otherwise + */ +static struct module *dal_test_find_module(const char *mod_name) +{ + struct module *mod; + + mutex_lock(&module_mutex); + mod = find_module(mod_name); + mutex_unlock(&module_mutex); + + return mod; +} + +/** + * dal_test_load_kdi - load kdi module + * + * @dev: dal test device + * + * Return: 0 on success + * <0 on failure + */ +static int dal_test_load_kdi(struct dal_test_device *dev) +{ + struct module *mod; + + /* load KDI if it wasn't loaded */ + request_module(KDI_MODULE); + + mod = dal_test_find_module(KDI_MODULE); + if (!mod) { + dev_err(dev->dev, "failed to find KDI module: %s\n", + KDI_MODULE); + return -ENODEV; + } + + if (!try_module_get(mod)) { + dev_err(dev->dev, "failed to get KDI module\n"); + return -EFAULT; + } + + return 0; +} + +/** + * dal_test_unload_kdi - unload kdi module + * + * @dev: dal test device + * + * Return: 0 on success + * <0 on failure + */ +static int dal_test_unload_kdi(struct dal_test_device *dev) +{ + struct module *mod; + + mod = dal_test_find_module(KDI_MODULE); + if (!mod) { + dev_err(dev->dev, "failed to find KDI module: %s\n", + KDI_MODULE); + return -ENODEV; + } + module_put(mod); + + return 0; +} +#else +static inline int dal_test_load_kdi(struct dal_test_device *dev) { return 0; } +static inline int dal_test_unload_kdi(struct dal_test_device *dev) { return 0; } +#endif + +/** + * dal_test_result_set - set data to the result buffer + * + * @test_data: test command and response buffers + * @data: new data + * @size: size of the data buffer + */ +static void dal_test_result_set(struct dal_test_data *test_data, + void *data, u32 size) +{ + memcpy(test_data->resp_data, data, size); + test_data->resp_data_size = size; +} + +/** + * dal_test_result_append - append data to the result buffer + * + * @test_data: test command and response buffers + * @data: new data + * @size: size of the data buffer + */ +static void dal_test_result_append(struct dal_test_data *test_data, + void *data, u32 size) +{ + size_t offset = test_data->resp_data_size; + + memcpy(test_data->resp_data + offset, data, size); + test_data->resp_data_size += size; +} + +/** + * dal_test_send_and_recv - call send and receive function of kdi + * + * @dev: dal test device + * @t_cmd: the command to send kdi + * @t_data: test command and response buffers + */ +static void dal_test_send_and_recv(struct dal_test_device *dev, + struct kdi_test_command *t_cmd, + struct dal_test_data *t_data) +{ + struct send_and_rcv_cmd *cmd; + struct send_and_rcv_resp resp; + ssize_t data_size; + size_t output_len; + s32 response_code; + u8 *input; + u8 *output; + s32 status; + + memset(&resp, 0, sizeof(resp)); + + cmd = (struct send_and_rcv_cmd *)t_cmd->data; + data_size = t_data->cmd_data_size - sizeof(t_cmd->cmd_id) - + sizeof(*cmd); + if (data_size < 0) { + dev_dbg(dev->dev, "malformed command struct: data_size = %zu\n", + data_size); + resp.test_mod_status = -EINVAL; + + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + mutex_unlock(&t_data->resp_lock); + return; + } + + response_code = 0; + output = NULL; + input = (data_size) ? cmd->input : NULL; + output_len = (cmd->is_output_len_ptr) ? cmd->output_buf_len : 0; + + dev_dbg(dev->dev, "call dal_send_and_receive: handle=%llu command_id=%d input_len=%zd\n", + cmd->session_handle, cmd->command_id, data_size); + + status = dal_send_and_receive(cmd->session_handle, cmd->command_id, + input, data_size, + cmd->is_output_buf ? &output : NULL, + cmd->is_output_len_ptr ? + &output_len : NULL, + cmd->is_response_code_ptr ? + &response_code : NULL); + + dev_dbg(dev->dev, "dal_send_and_receive return: status=%d output_len=%zu response_code=%d\n", + status, output_len, response_code); + + resp.output_len = (u32)output_len; + resp.response_code = response_code; + resp.status = status; + resp.test_mod_status = 0; + + /* in case the call failed we don't copy the data */ + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + if (output && resp.output_len) + dal_test_result_append(t_data, output, resp.output_len); + mutex_unlock(&t_data->resp_lock); + + kfree(output); +} + +/** + * dal_test_create_session - call create session function of kdi + * + * @dev: dal test device + * @t_cmd: the command to send kdi + * @t_data: test command and response buffers + */ +static void dal_test_create_session(struct dal_test_device *dev, + struct kdi_test_command *t_cmd, + struct dal_test_data *t_data) +{ + struct session_create_cmd *cmd; + struct session_create_resp resp; + u32 data_size; + u64 handle; + char *app_id; + u8 *acp_pkg; + u8 *init_params; + u32 offset; + s32 status; + + memset(&resp, 0, sizeof(resp)); + + cmd = (struct session_create_cmd *)t_cmd->data; + data_size = t_data->cmd_data_size - sizeof(t_cmd->cmd_id) - + sizeof(*cmd); + + if (cmd->app_id_len + cmd->acp_pkg_len + cmd->init_param_len != + data_size) { + dev_dbg(dev->dev, "malformed command struct: data_size = %d\n", + data_size); + resp.test_mod_status = -EINVAL; + + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + mutex_unlock(&t_data->resp_lock); + return; + } + + handle = 0; + + offset = 0; + app_id = (cmd->app_id_len) ? cmd->data + offset : NULL; + offset += cmd->app_id_len; + + acp_pkg = (cmd->acp_pkg_len) ? cmd->data + offset : NULL; + offset += cmd->acp_pkg_len; + + init_params = (cmd->init_param_len) ? cmd->data + offset : NULL; + offset += cmd->init_param_len; + + dev_dbg(dev->dev, "call dal_create_session params: app_id = %s, app_id len = %d, acp pkg len = %d, init params len = %d\n", + app_id, cmd->app_id_len, cmd->acp_pkg_len, cmd->init_param_len); + + status = dal_create_session(cmd->is_session_handle_ptr ? + &handle : NULL, + app_id, acp_pkg, + cmd->acp_pkg_len, + init_params, + cmd->init_param_len); + dev_dbg(dev->dev, "dal_create_session return: status = %d, handle = %llu\n", + status, handle); + + resp.session_handle = handle; + resp.status = status; + resp.test_mod_status = 0; + + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + mutex_unlock(&t_data->resp_lock); +} + +/** + * dal_test_close_session - call close session function of kdi + * + * @dev: dal test device + * @t_cmd: the command to send kdi + * @t_data: test command and response buffers + */ +static void dal_test_close_session(struct dal_test_device *dev, + struct kdi_test_command *t_cmd, + struct dal_test_data *t_data) +{ + struct session_close_cmd *cmd; + struct session_close_resp resp; + + memset(&resp, 0, sizeof(resp)); + + cmd = (struct session_close_cmd *)t_cmd->data; + if (t_data->cmd_data_size != sizeof(t_cmd->cmd_id) + sizeof(*cmd)) { + dev_dbg(dev->dev, "malformed command struct\n"); + resp.test_mod_status = -EINVAL; + + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + mutex_unlock(&t_data->resp_lock); + return; + } + + resp.status = dal_close_session(cmd->session_handle); + resp.test_mod_status = 0; + + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + mutex_unlock(&t_data->resp_lock); +} + +/** + * dal_test_version_info - call get version function of kdi + * + * @dev: dal test device + * @t_cmd: the command to send kdi + * @t_data: test command and response buffers + */ +static void dal_test_version_info(struct dal_test_device *dev, + struct kdi_test_command *t_cmd, + struct dal_test_data *t_data) +{ + struct version_get_info_cmd *cmd; + struct version_get_info_resp resp; + struct dal_version_info *version; + + memset(&resp, 0, sizeof(resp)); + + cmd = (struct version_get_info_cmd *)t_cmd->data; + if (t_data->cmd_data_size != sizeof(t_cmd->cmd_id) + sizeof(*cmd)) { + dev_dbg(dev->dev, "malformed command struct\n"); + resp.test_mod_status = -EINVAL; + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + mutex_unlock(&t_data->resp_lock); + return; + } + + version = (cmd->is_version_ptr) ? + (struct dal_version_info *)resp.kdi_version : NULL; + + resp.status = dal_get_version_info(version); + resp.test_mod_status = 0; + + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + mutex_unlock(&t_data->resp_lock); +} + +/** + * dal_test_set_ex_access - call set/remove access function of kdi + * + * @dev: dal test device + * @t_cmd: the command to send kdi + * @t_data: test command and response buffers + * @set_access: true when calling set access function + * false when calling remove access function + */ +static void dal_test_set_ex_access(struct dal_test_device *dev, + struct kdi_test_command *t_cmd, + struct dal_test_data *t_data, + bool set_access) +{ + struct ta_access_set_remove_cmd *cmd; + struct ta_access_set_remove_resp resp; + u32 data_size; + uuid_t app_uuid; + char *app_id; + s32 status; + + memset(&resp, 0, sizeof(resp)); + + cmd = (struct ta_access_set_remove_cmd *)t_cmd->data; + data_size = t_data->cmd_data_size - sizeof(t_cmd->cmd_id) - + sizeof(*cmd); + + if (cmd->app_id_len != data_size) { + dev_dbg(dev->dev, "malformed command struct\n"); + resp.test_mod_status = -EINVAL; + + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + mutex_unlock(&t_data->resp_lock); + return; + } + + app_id = (cmd->app_id_len) ? cmd->data : NULL; + + status = dal_uuid_parse(app_id, &app_uuid); + if (status < 0) + goto out; + + if (set_access) + status = dal_set_ta_exclusive_access(&app_uuid); + else + status = dal_unset_ta_exclusive_access(&app_uuid); + +out: + resp.status = status; + resp.test_mod_status = 0; + + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + mutex_unlock(&t_data->resp_lock); +} + +/** + * dal_test_kdi_command - parse and invoke the requested command + * + * @dev: dal test device + */ +static void dal_test_kdi_command(struct dal_test_device *dev) +{ + struct dal_test_data *test_data; + struct kdi_test_command *cmd; + s32 status; + + test_data = dev->data; + cmd = (struct kdi_test_command *)test_data->cmd_data; + + if (test_data->cmd_data_size < sizeof(cmd->cmd_id)) { + dev_dbg(dev->dev, "malformed command struct\n"); + status = -EINVAL; + goto prep_err_test_mod; + } + + switch (cmd->cmd_id) { + case KDI_SESSION_CREATE: { + dev_dbg(dev->dev, "KDI_CREATE_SESSION[%d]\n", cmd->cmd_id); + dal_test_create_session(dev, cmd, test_data); + break; + } + case KDI_SESSION_CLOSE: { + dev_dbg(dev->dev, "KDI_CLOSE_SESSION[%d]\n", cmd->cmd_id); + dal_test_close_session(dev, cmd, test_data); + break; + } + case KDI_SEND_AND_RCV: { + dev_dbg(dev->dev, "KDI_SEND_AND_RCV[%d]\n", cmd->cmd_id); + dal_test_send_and_recv(dev, cmd, test_data); + break; + } + case KDI_VERSION_GET_INFO: { + dev_dbg(dev->dev, "KDI_GET_VERSION_INFO[%d]\n", cmd->cmd_id); + dal_test_version_info(dev, cmd, test_data); + break; + } + case KDI_EXCLUSIVE_ACCESS_SET: + case KDI_EXCLUSIVE_ACCESS_REMOVE: { + dev_dbg(dev->dev, "KDI_SET_EXCLUSIVE_ACCESS or KDI_REMOVE_EXCLUSIVE_ACCESS[%d]\n", + cmd->cmd_id); + dal_test_set_ex_access(dev, cmd, test_data, + cmd->cmd_id == KDI_EXCLUSIVE_ACCESS_SET); + break; + } + default: + dev_dbg(dev->dev, "unknown command %d\n", cmd->cmd_id); + status = -EINVAL; + goto prep_err_test_mod; + } + + return; + +prep_err_test_mod: + mutex_lock(&test_data->resp_lock); + dal_test_result_set(test_data, &status, sizeof(status)); + mutex_unlock(&test_data->resp_lock); +} + +/** + * dal_test_read - dal test read function + * + * @filp: pointer to file structure + * @buff: pointer to user buffer + * @count: buffer length + * @offp: data offset in buffer + * + * Return: >=0 data length on success + * <0 on failure + */ +static ssize_t dal_test_read(struct file *filp, char __user *buff, size_t count, + loff_t *offp) +{ + struct dal_test_device *dev; + struct dal_test_data *test_data; + int ret; + + dev = filp->private_data; + test_data = dev->data; + + mutex_lock(&test_data->resp_lock); + + if (test_data->resp_data_size > count) { + ret = -EMSGSIZE; + goto unlock; + } + + dev_dbg(dev->dev, "copying %d bytes to userspace\n", + test_data->resp_data_size); + if (copy_to_user(buff, test_data->resp_data, + test_data->resp_data_size)) { + dev_dbg(dev->dev, "copy_to_user failed\n"); + ret = -EFAULT; + goto unlock; + } + ret = test_data->resp_data_size; + +unlock: + mutex_unlock(&test_data->resp_lock); + + return ret; +} + +/** + * dal_test_write - dal test write function + * + * @filp: pointer to file structure + * @buff: pointer to user buffer + * @count: buffer length + * @offp: data offset in buffer + * + * Return: >=0 data length on success + * <0 on failure + */ +static ssize_t dal_test_write(struct file *filp, const char __user *buff, + size_t count, loff_t *offp) +{ + struct dal_test_device *dev; + struct dal_test_data *test_data; + + dev = filp->private_data; + test_data = dev->data; + + if (count > MAX_DATA_SIZE) + return -EMSGSIZE; + + mutex_lock(&test_data->cmd_lock); + + if (copy_from_user(test_data->cmd_data, buff, count)) { + mutex_unlock(&test_data->cmd_lock); + dev_dbg(dev->dev, "copy_from_user failed\n"); + return -EFAULT; + } + + test_data->cmd_data_size = count; + dev_dbg(dev->dev, "write %zu bytes\n", count); + + dal_test_kdi_command(dev); + + mutex_unlock(&test_data->cmd_lock); + + return count; +} + +/** + * dal_test_open - dal test open function + * + * @inode: pointer to inode structure + * @filp: pointer to file structure + * + * Return: 0 on success + * <0 on failure + */ +static int dal_test_open(struct inode *inode, struct file *filp) +{ + struct dal_test_device *dev; + struct dal_test_data *test_data; + int ret; + + dev = container_of(inode->i_cdev, struct dal_test_device, cdev); + if (!dev) + return -ENODEV; + + /* single open */ + if (test_and_set_bit(KDI_TEST_OPENED, &dev->kdi_test_status)) + return -EBUSY; + + test_data = kzalloc(sizeof(*test_data), GFP_KERNEL); + if (!test_data) { + ret = -ENOMEM; + goto err_clear_bit; + } + + test_data->cmd_data = kzalloc(MAX_DATA_SIZE, GFP_KERNEL); + test_data->resp_data = kzalloc(MAX_DATA_SIZE, GFP_KERNEL); + if (!test_data->cmd_data || !test_data->resp_data) { + ret = -ENOMEM; + goto err_free; + } + + mutex_init(&test_data->cmd_lock); + mutex_init(&test_data->resp_lock); + + ret = dal_test_load_kdi(dev); + if (ret) + goto err_free; + + dev->data = test_data; + filp->private_data = dev; + + return nonseekable_open(inode, filp); + +err_free: + kfree(test_data->cmd_data); + kfree(test_data->resp_data); + kfree(test_data); + +err_clear_bit: + clear_bit(KDI_TEST_OPENED, &dev->kdi_test_status); + + return ret; +} + +/** + * dal_test_release - dal test release function + * + * @inode: pointer to inode structure + * @filp: pointer to file structure + * + * Return: 0 on success + * <0 on failure + */ +static int dal_test_release(struct inode *inode, struct file *filp) +{ + struct dal_test_device *dev; + struct dal_test_data *test_data; + + dev = filp->private_data; + if (!dev) + return -ENODEV; + + dal_test_unload_kdi(dev); + + test_data = dev->data; + if (test_data) { + kfree(test_data->cmd_data); + kfree(test_data->resp_data); + kfree(test_data); + } + + clear_bit(KDI_TEST_OPENED, &dev->kdi_test_status); + + filp->private_data = NULL; + + return 0; +} + +static const struct file_operations dal_test_fops = { + .owner = THIS_MODULE, + .open = dal_test_open, + .release = dal_test_release, + .read = dal_test_read, + .write = dal_test_write, + .llseek = no_llseek, +}; + +/** + * dal_test_exit - destroy dal test device + */ +static void __exit dal_test_exit(void) +{ + struct dal_test_device *dev = &dal_test_dev; + struct class *dal_test_class; + static dev_t devt; + + dal_test_class = dev->dev->class; + devt = dev->dev->devt; + + cdev_del(&dev->cdev); + unregister_chrdev_region(devt, MINORMASK); + device_destroy(dal_test_class, devt); + class_destroy(dal_test_class); +} + +/** + * dal_test_init - initiallize dal test device + * + * Return: 0 on success + * <0 on failure + */ +static int __init dal_test_init(void) +{ + struct dal_test_device *dev = &dal_test_dev; + struct class *dal_test_class; + static dev_t devt; + int ret; + + ret = alloc_chrdev_region(&devt, 0, 1, "mei_dal_test"); + if (ret) + return ret; + + dal_test_class = class_create(THIS_MODULE, "mei_dal_test"); + if (IS_ERR(dal_test_class)) { + ret = PTR_ERR(dal_test_class); + dal_test_class = NULL; + goto err_unregister_cdev; + } + + dev->dev = device_create(dal_test_class, NULL, devt, dev, "dal_test0"); + if (IS_ERR(dev->dev)) { + ret = PTR_ERR(dev->dev); + goto err_class_destroy; + } + + cdev_init(&dev->cdev, &dal_test_fops); + dev->cdev.owner = THIS_MODULE; + ret = cdev_add(&dev->cdev, devt, 1); + if (ret) + goto err_device_destroy; + + return 0; + +err_device_destroy: + device_destroy(dal_test_class, devt); +err_class_destroy: + class_destroy(dal_test_class); +err_unregister_cdev: + unregister_chrdev_region(devt, 1); + + return ret; +} + +module_init(dal_test_init); +module_exit(dal_test_exit); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel(R) DAL test"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/mei/dal/uapi/kdi_cmd_defs.h b/drivers/misc/mei/dal/uapi/kdi_cmd_defs.h new file mode 100644 index 0000000000000..13717df1ae547 --- /dev/null +++ b/drivers/misc/mei/dal/uapi/kdi_cmd_defs.h @@ -0,0 +1,176 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. + */ + +#ifndef KDI_CMD_DEFS_H +#define KDI_CMD_DEFS_H + +/** + * enum kdi_command_id - cmd id to invoke in kdi module + * + * @KDI_SESSION_CREATE: call kdi "create session" function + * @KDI_SESSION_CLOSE: call kdi "close session" function + * @KDI_SEND_AND_RCV: call kdi "send and receive" function + * @KDI_VERSION_GET_INFO: call kdi "get version" function + * @KDI_EXCLUSIVE_ACCESS_SET: call kdi "set exclusive access" function + * @KDI_EXCLUSIVE_ACCESS_REMOVE: call kdi "unset exclusive access" function + */ +enum kdi_command_id { + KDI_SESSION_CREATE, + KDI_SESSION_CLOSE, + KDI_SEND_AND_RCV, + KDI_VERSION_GET_INFO, + KDI_EXCLUSIVE_ACCESS_SET, + KDI_EXCLUSIVE_ACCESS_REMOVE +}; + +/** + * struct kdi_test_command - contains the command received from user space + * + * @cmd_id: the command id + * @data: the command data + */ +struct kdi_test_command { + __u8 cmd_id; + unsigned char data[0]; +} __packed; + +/** + * struct session_create_cmd - create session cmd data + * + * @app_id_len: length of app_id arg + * @acp_pkg_len: length of the acp_pkg arg + * @init_param_len: length of init param arg + * @is_session_handle_ptr: either send kdi a valid ptr to hold the + * session handle or NULL + * @data: buffer to hold the cmd arguments + */ +struct session_create_cmd { + __u32 app_id_len; + __u32 acp_pkg_len; + __u32 init_param_len; + __u8 is_session_handle_ptr; + unsigned char data[0]; +} __packed; + +/** + * struct session_create_resp - create session response + * + * @session_handle: the session handle + * @test_mod_status: status returned from the test module + * @status: status returned from kdi + */ +struct session_create_resp { + __u64 session_handle; + __s32 test_mod_status; + __s32 status; +} __packed; + +/** + * struct session_close_cmd - close session cmd + * + * @session_handle: the session handle to close + */ +struct session_close_cmd { + __u64 session_handle; +} __packed; + +/** + * struct session_close_resp - close session response + * + * @test_mod_status: status returned from the test module + * @status: status returned from kdi + */ +struct session_close_resp { + __s32 test_mod_status; + __s32 status; +} __packed; + +/** + * struct send_and_rcv_cmd - send and receive cmd + * + * @session_handle: the session handle + * @command_id: the cmd id to send the applet + * @output_buf_len: the size of the output buffer + * @is_output_buf: either send kdi a valid ptr to hold the output buffer or NULL + * @is_output_len_ptr: either send kdi a valid ptr to hold + * the output len or NULL + * @is_response_code_ptr: either send kdi a valid ptr to hold + * the applet response code or NULL + * @input: the input data to send the applet + */ +struct send_and_rcv_cmd { + __u64 session_handle; + __u32 command_id; + __u32 output_buf_len; + __u8 is_output_buf; + __u8 is_output_len_ptr; + __u8 is_response_code_ptr; + unsigned char input[0]; +} __packed; + +/** + * struct send_and_rcv_resp - send and receive response + * + * @test_mod_status: status returned from the test module + * @status: status returned from kdi + * @response_code: response code returned from the applet + * @output_len: length of output from the applet + * @output: the output got from the applet + */ +struct send_and_rcv_resp { + __s32 test_mod_status; + __s32 status; + __s32 response_code; + __u32 output_len; + unsigned char output[0]; +} __packed; + +/** + * struct version_get_info_cmd - get version cmd + * + * @is_version_ptr: either send kdi a valid ptr to hold the version info or NULL + */ +struct version_get_info_cmd { + __u8 is_version_ptr; +} __packed; + +/** + * struct version_get_info_resp - get version response + * + * @kdi_version: kdi version + * @reserved: reserved bytes + * @test_mod_status: status returned from the test module + * @status: status returned from kdi + */ +struct version_get_info_resp { + char kdi_version[32]; + __u32 reserved[4]; + __s32 test_mod_status; + __s32 status; +} __packed; + +/** + * struct ta_access_set_remove_cmd - set/remove access cmd + * + * @app_id_len: length of app_id arg + * @data: the cmd data. contains the app_id + */ +struct ta_access_set_remove_cmd { + __u32 app_id_len; + unsigned char data[0]; +} __packed; + +/** + * struct ta_access_set_remove_resp - set/remove access response + * + * @test_mod_status: status returned from the test module + * @status: status returned from kdi + */ +struct ta_access_set_remove_resp { + __s32 test_mod_status; + __s32 status; +} __packed; + +#endif /* KDI_CMD_DEFS_H */ diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c index 7b5df8fd6c5ad..0a254cceb2fae 100644 --- a/drivers/misc/mei/debugfs.c +++ b/drivers/misc/mei/debugfs.c @@ -36,7 +36,7 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf, int ret; #define HDR \ -" |id|fix| UUID |con|msg len|sb|refc|\n" +" |id|fix| UUID |con|msg len|sb|refc|vm|\n" down_read(&dev->me_clients_rwsem); list_for_each_entry(me_cl, &dev->me_clients, list) @@ -60,14 +60,15 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf, if (mei_me_cl_get(me_cl)) { pos += scnprintf(buf + pos, bufsz - pos, - "%2d|%2d|%3d|%pUl|%3d|%7d|%2d|%4d|\n", + "%2d|%2d|%3d|%pUl|%3d|%7d|%2d|%4d|%2d|\n", i++, me_cl->client_id, me_cl->props.fixed_address, &me_cl->props.protocol_name, me_cl->props.max_number_of_connections, me_cl->props.max_msg_length, me_cl->props.single_recv_buf, - kref_read(&me_cl->refcnt)); + kref_read(&me_cl->refcnt), + me_cl->props.vm_supported); mei_me_cl_put(me_cl); } @@ -185,6 +186,10 @@ static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf, dev->hbm_f_os_supported); pos += scnprintf(buf + pos, bufsz - pos, "\tDR: %01d\n", dev->hbm_f_dr_supported); + pos += scnprintf(buf + pos, bufsz - pos, "\tVM: %01d\n", + dev->hbm_f_vm_supported); + pos += scnprintf(buf + pos, bufsz - pos, "\tCAP: %01d\n", + dev->hbm_f_cap_supported); } pos += scnprintf(buf + pos, bufsz - pos, "pg: %s, %s\n", diff --git a/drivers/misc/mei/dma-ring.c b/drivers/misc/mei/dma-ring.c new file mode 100644 index 0000000000000..f809db5c0129f --- /dev/null +++ b/drivers/misc/mei/dma-ring.c @@ -0,0 +1,278 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. + */ +#include +#include + +#include "mei_dev.h" + +/** + * mei_dmam_dscr_alloc - allocate a managed coherent buffer + * for the dma descriptor + * + * @dev: mei_device + * @dscr: dma descriptor + * + * Return: 0 on success or zero allocation request + * -EINVAL if size is not power of 2 + * -ENOMEM of allocation has failed + */ +static int mei_dmam_dscr_alloc(struct mei_device *dev, + struct mei_dma_dscr *dscr) +{ + if (!dscr->size) + return 0; + + if (WARN_ON(!is_power_of_2(dscr->size))) + return -EINVAL; + + if (dscr->vaddr) + return 0; + + dscr->vaddr = dmam_alloc_coherent(dev->dev, dscr->size, &dscr->daddr, + GFP_KERNEL); + if (!dscr->vaddr) + return -ENOMEM; + + return 0; +} + +/** + * mei_dmam_dscr_free - free a managed coherent buffer + * from the dma descriptor + * + * @dev: mei_device + * @dscr: dma descriptor + */ +static void mei_dmam_dscr_free(struct mei_device *dev, + struct mei_dma_dscr *dscr) +{ + if (!dscr->vaddr) + return; + + dmam_free_coherent(dev->dev, dscr->size, dscr->vaddr, dscr->daddr); + dscr->vaddr = NULL; +} + +/** + * mei_dmam_ring_free - free dma ring buffers + * + * @dev: mei device + */ +void mei_dmam_ring_free(struct mei_device *dev) +{ + int i; + + for (i = 0; i < DMA_DSCR_NUM; i++) + mei_dmam_dscr_free(dev, &dev->dr_dscr[i]); +} + +/** + * mei_dmam_ring_alloc - allocate dma ring buffers + * + * @dev: mei device + * + * Return: -ENOMEM on allocation failure 0 otherwise + */ +int mei_dmam_ring_alloc(struct mei_device *dev) +{ + int i; + + for (i = 0; i < DMA_DSCR_NUM; i++) + if (mei_dmam_dscr_alloc(dev, &dev->dr_dscr[i])) + goto err; + + return 0; + +err: + mei_dmam_ring_free(dev); + return -ENOMEM; +} + +/** + * mei_dma_ring_is_allocated - check if dma ring is allocated + * + * @dev: mei device + * + * Return: true if dma ring is allocated + */ +bool mei_dma_ring_is_allocated(struct mei_device *dev) +{ + return !!dev->dr_dscr[DMA_DSCR_HOST].vaddr; +} + +static inline +struct hbm_dma_ring_ctrl *mei_dma_ring_ctrl(struct mei_device *dev) +{ + return (struct hbm_dma_ring_ctrl *)dev->dr_dscr[DMA_DSCR_CTRL].vaddr; +} + +/** + * mei_dma_ring_reset - reset the dma control block + * + * @dev: mei device + */ +void mei_dma_ring_reset(struct mei_device *dev) +{ + struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev); + + if (!ctrl) + return; + + memset(ctrl, 0, sizeof(*ctrl)); +} + +/** + * mei_dma_ring_reset - copy from dma ring into buffer + * + * @dev: mei device + * @buf: data buffer + * @offset: offset in slots. + * @n: number of slots to copy. + */ +static size_t mei_dma_copy_from(struct mei_device *dev, unsigned char *buf, + u32 offset, u32 n) +{ + unsigned char *dbuf = dev->dr_dscr[DMA_DSCR_DEVICE].vaddr; + + size_t b_offset = offset << 2; + size_t b_n = n << 2; + + memcpy(buf, dbuf + b_offset, b_n); + + return b_n; +} + +/** + * mei_dma_copy_to - copy to a buffer to the dma ring + * + * @dev: mei device + * @buf: data buffer + * @offset: offset in slots. + * @n: number of slots to copy. + */ +static size_t mei_dma_copy_to(struct mei_device *dev, unsigned char *buf, + u32 offset, u32 n) +{ + unsigned char *hbuf = dev->dr_dscr[DMA_DSCR_HOST].vaddr; + + size_t b_offset = offset << 2; + size_t b_n = n << 2; + + memcpy(hbuf + b_offset, buf, b_n); + + return b_n; +} + +/** + * mei_dma_ring_read - read data from the ring + * + * @dev: mei device + * @buf: buffer to read into: may be NULL in case of droping the data. + * @len: length to read. + */ +void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len) +{ + struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev); + u32 dbuf_depth; + u32 rd_idx, rem, slots; + + if (WARN_ON(!ctrl)) + return; + + dev_dbg(dev->dev, "reading from dma %u bytes\n", len); + + if (!len) + return; + + dbuf_depth = dev->dr_dscr[DMA_DSCR_DEVICE].size >> 2; + rd_idx = READ_ONCE(ctrl->dbuf_rd_idx) & (dbuf_depth - 1); + slots = mei_data2slots(len); + + /* if buf is NULL we drop the packet by advancing the pointer.*/ + if (!buf) + goto out; + + if (rd_idx + slots > dbuf_depth) { + buf += mei_dma_copy_from(dev, buf, rd_idx, dbuf_depth - rd_idx); + rem = slots - (dbuf_depth - rd_idx); + rd_idx = 0; + } else { + rem = slots; + } + + mei_dma_copy_from(dev, buf, rd_idx, rem); +out: + WRITE_ONCE(ctrl->dbuf_rd_idx, ctrl->dbuf_rd_idx + slots); +} + +static inline u32 mei_dma_ring_hbuf_depth(struct mei_device *dev) +{ + return dev->dr_dscr[DMA_DSCR_HOST].size >> 2; +} + +/** + * mei_dma_ring_empty_slots - calaculate number of empty slots in dma ring + * + * @dev: mei_device + * + * Return: number of empty slots + */ +u32 mei_dma_ring_empty_slots(struct mei_device *dev) +{ + struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev); + u32 wr_idx, rd_idx, hbuf_depth, empty; + + if (!mei_dma_ring_is_allocated(dev)) + return 0; + + if (WARN_ON(!ctrl)) + return 0; + + /* easier to work in slots */ + hbuf_depth = mei_dma_ring_hbuf_depth(dev); + rd_idx = READ_ONCE(ctrl->hbuf_rd_idx); + wr_idx = READ_ONCE(ctrl->hbuf_wr_idx); + + if (rd_idx > wr_idx) + empty = rd_idx - wr_idx; + else + empty = hbuf_depth - (wr_idx - rd_idx); + + return empty; +} + +/** + * mei_dma_ring_write - write data to dma ring host buffer + * + * @dev: mei_device + * @buf: data will be written + * @len: data length + */ +void mei_dma_ring_write(struct mei_device *dev, unsigned char *buf, u32 len) +{ + struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev); + u32 hbuf_depth; + u32 wr_idx, rem, slots; + + if (WARN_ON(!ctrl)) + return; + + dev_dbg(dev->dev, "writing to dma %u bytes\n", len); + hbuf_depth = mei_dma_ring_hbuf_depth(dev); + wr_idx = READ_ONCE(ctrl->hbuf_wr_idx) & (hbuf_depth - 1); + slots = mei_data2slots(len); + + if (wr_idx + slots > hbuf_depth) { + buf += mei_dma_copy_to(dev, buf, wr_idx, hbuf_depth - wr_idx); + rem = slots - (hbuf_depth - wr_idx); + wr_idx = 0; + } else { + rem = slots; + } + + mei_dma_copy_to(dev, buf, wr_idx, rem); + + WRITE_ONCE(ctrl->hbuf_wr_idx, ctrl->hbuf_wr_idx + slots); +} diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index e56f3e72d57a0..84bcdb0bc2397 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -65,6 +65,7 @@ const char *mei_hbm_state_str(enum mei_hbm_state state) MEI_HBM_STATE(IDLE); MEI_HBM_STATE(STARTING); MEI_HBM_STATE(STARTED); + MEI_HBM_STATE(DR_SETUP); MEI_HBM_STATE(ENUM_CLIENTS); MEI_HBM_STATE(CLIENT_PROPERTIES); MEI_HBM_STATE(STOPPED); @@ -135,19 +136,15 @@ void mei_hbm_reset(struct mei_device *dev) /** * mei_hbm_hdr - construct hbm header * - * @hdr: hbm header + * @mei_hdr: hbm header * @length: payload length */ -static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length) +static inline void mei_hbm_hdr(struct mei_msg_hdr *mei_hdr, size_t length) { - hdr->host_addr = 0; - hdr->me_addr = 0; - hdr->length = length; - hdr->msg_complete = 1; - hdr->dma_ring = 0; - hdr->reserved = 0; - hdr->internal = 0; + memset(mei_hdr, 0, sizeof(*mei_hdr)); + mei_hdr->length = length; + mei_hdr->msg_complete = 1; } /** @@ -295,6 +292,82 @@ int mei_hbm_start_req(struct mei_device *dev) return 0; } +/** + * mei_hbm_dma_setup_req - setup DMA request + * + * @dev: the device structure + * + * Return: 0 on success and < 0 on failure + */ +static int mei_hbm_dma_setup_req(struct mei_device *dev) +{ + struct mei_msg_hdr mei_hdr; + struct hbm_dma_setup_request req; + const size_t len = sizeof(struct hbm_dma_setup_request); + unsigned int i; + int ret; + + mei_hbm_hdr(&mei_hdr, len); + + memset(&req, 0, len); + req.hbm_cmd = MEI_HBM_DMA_SETUP_REQ_CMD; + for (i = 0; i < DMA_DSCR_NUM; i++) { + phys_addr_t paddr; + + paddr = dev->dr_dscr[i].daddr; + req.dma_dscr[i].addr_hi = upper_32_bits(paddr); + req.dma_dscr[i].addr_lo = lower_32_bits(paddr); + req.dma_dscr[i].size = dev->dr_dscr[i].size; + } + + mei_dma_ring_reset(dev); + + ret = mei_hbm_write_message(dev, &mei_hdr, &req); + if (ret) { + dev_err(dev->dev, "dma setup request write failed: ret = %d.\n", + ret); + return ret; + } + + dev->hbm_state = MEI_HBM_DR_SETUP; + dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; + mei_schedule_stall_timer(dev); + return 0; +} + +/** + * mei_hbm_capabilities_req - request capabilities + * + * @dev: the device structure + * + * Return: 0 on success and < 0 on failure + */ +static int mei_hbm_capabilities_req(struct mei_device *dev) +{ + struct mei_msg_hdr mei_hdr; + struct hbm_capability_request req; + int ret; + + mei_hbm_hdr(&mei_hdr, sizeof(req)); + + memset(&req, 0, sizeof(req)); + req.hbm_cmd = MEI_HBM_CAPABILITIES_REQ_CMD; + if (dev->hbm_f_vm_supported) + req.capability_requested[0] = HBM_CAP_VM; + + ret = mei_hbm_write_message(dev, &mei_hdr, &req); + if (ret) { + dev_err(dev->dev, + "capabilities request write failed: ret = %d.\n", ret); + return ret; + } + + dev->hbm_state = MEI_HBM_CAP_SETUP; + dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; + mei_schedule_stall_timer(dev); + return 0; +} + /** * mei_hbm_enum_clients_req - sends enumeration client request message. * @@ -1013,6 +1086,18 @@ static void mei_hbm_config_features(struct mei_device *dev) (dev->version.major_version == HBM_MAJOR_VERSION_DR && dev->version.minor_version >= HBM_MINOR_VERSION_DR)) dev->hbm_f_dr_supported = 1; + + /* VM Tag Support */ + if (dev->version.major_version > HBM_MAJOR_VERSION_VM || + (dev->version.major_version == HBM_MAJOR_VERSION_VM && + dev->version.minor_version >= HBM_MINOR_VERSION_VM)) + dev->hbm_f_vm_supported = 1; + + /* Capability message Support */ + if (dev->version.major_version > HBM_MAJOR_VERSION_CAP || + (dev->version.major_version == HBM_MAJOR_VERSION_CAP && + dev->version.minor_version >= HBM_MINOR_VERSION_CAP)) + dev->hbm_f_cap_supported = 1; } /** @@ -1044,7 +1129,9 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) struct hbm_host_version_response *version_res; struct hbm_props_response *props_res; struct hbm_host_enum_response *enum_res; + struct hbm_dma_setup_response *dma_setup_res; struct hbm_add_client_request *add_cl_req; + struct hbm_capability_response *capability_res; int ret; struct mei_hbm_cl_cmd *cl_cmd; @@ -1108,14 +1195,91 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) return -EPROTO; } - if (mei_hbm_enum_clients_req(dev)) { - dev_err(dev->dev, "hbm: start: failed to send enumeration request\n"); - return -EIO; + if (dev->hbm_f_cap_supported) { + if (mei_hbm_capabilities_req(dev)) + return -EIO; + wake_up(&dev->wait_hbm_start); + break; + } + + if (dev->hbm_f_dr_supported) { + if (mei_dmam_ring_alloc(dev)) + dev_info(dev->dev, "running w/o dma ring\n"); + if (mei_dma_ring_is_allocated(dev)) { + if (mei_hbm_dma_setup_req(dev)) + return -EIO; + + wake_up(&dev->wait_hbm_start); + break; + } } + dev->hbm_f_dr_supported = 0; + mei_dmam_ring_free(dev); + + if (mei_hbm_enum_clients_req(dev)) + return -EIO; + wake_up(&dev->wait_hbm_start); break; + case MEI_HBM_CAPABILITIES_RES_CMD: + dev_dbg(dev->dev, "hbm: capabilities response: message received.\n"); + + dev->init_clients_timer = 0; + + if (dev->hbm_state != MEI_HBM_CAP_SETUP) { + dev_err(dev->dev, "hbm: capabilities response: state mismatch, [%d, %d]\n", + dev->dev_state, dev->hbm_state); + return -EPROTO; + } + + capability_res = (struct hbm_capability_response *)mei_msg; + if (!(capability_res->capability_granted[0] & HBM_CAP_VM)) + dev->hbm_f_vm_supported = 0; + + if (dev->hbm_f_dr_supported) { + if (mei_dmam_ring_alloc(dev)) + dev_info(dev->dev, "running w/o dma ring\n"); + if (mei_dma_ring_is_allocated(dev)) { + if (mei_hbm_dma_setup_req(dev)) + return -EIO; + break; + } + } + + dev->hbm_f_dr_supported = 0; + mei_dmam_ring_free(dev); + + if (mei_hbm_enum_clients_req(dev)) + return -EIO; + break; + + case MEI_HBM_DMA_SETUP_RES_CMD: + dev_dbg(dev->dev, "hbm: dma setup response: message received.\n"); + + dev->init_clients_timer = 0; + + if (dev->hbm_state != MEI_HBM_DR_SETUP) { + dev_err(dev->dev, "hbm: dma setup response: state mismatch, [%d, %d]\n", + dev->dev_state, dev->hbm_state); + return -EPROTO; + } + + dma_setup_res = (struct hbm_dma_setup_response *)mei_msg; + + if (dma_setup_res->status) { + dev_info(dev->dev, "hbm: dma setup response: failure = %d %s\n", + dma_setup_res->status, + mei_hbm_status_str(dma_setup_res->status)); + dev->hbm_f_dr_supported = 0; + mei_dmam_ring_free(dev); + } + + if (mei_hbm_enum_clients_req(dev)) + return -EIO; + break; + case CLIENT_CONNECT_RES_CMD: dev_dbg(dev->dev, "hbm: client connect response: message received.\n"); mei_hbm_cl_res(dev, cl_cmd, MEI_FOP_CONNECT); @@ -1271,8 +1435,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) break; default: - BUG(); - break; + WARN(1, "hbm: wrong command %d\n", mei_msg->hbm_cmd); + return -EPROTO; } return 0; diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h index a2025a5083a39..df1515fde478c 100644 --- a/drivers/misc/mei/hbm.h +++ b/drivers/misc/mei/hbm.h @@ -26,6 +26,8 @@ struct mei_cl; * * @MEI_HBM_IDLE : protocol not started * @MEI_HBM_STARTING : start request message was sent + * @MEI_HBM_CAP_SETUP : capabilities request message was sent + * @MEI_HBM_DR_SETUP : dma ring setup request message was sent * @MEI_HBM_ENUM_CLIENTS : enumeration request was sent * @MEI_HBM_CLIENT_PROPERTIES : acquiring clients properties * @MEI_HBM_STARTED : enumeration was completed @@ -34,6 +36,8 @@ struct mei_cl; enum mei_hbm_state { MEI_HBM_IDLE = 0, MEI_HBM_STARTING, + MEI_HBM_CAP_SETUP, + MEI_HBM_DR_SETUP, MEI_HBM_ENUM_CLIENTS, MEI_HBM_CLIENT_PROPERTIES, MEI_HBM_STARTED, diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index e4b10b2d1a083..23739a60517f8 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -127,6 +127,8 @@ #define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ #define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ +#define MEI_DEV_ID_DNV_IE 0x19E5 /* Denverton IE */ + #define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */ #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index 0759c3a668de7..3fbbadfa2ae15 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -1471,15 +1471,21 @@ struct mei_device *mei_me_dev_init(struct pci_dev *pdev, { struct mei_device *dev; struct mei_me_hw *hw; + int i; dev = devm_kzalloc(&pdev->dev, sizeof(struct mei_device) + sizeof(struct mei_me_hw), GFP_KERNEL); if (!dev) return NULL; + hw = to_me_hw(dev); + for (i = 0; i < DMA_DSCR_NUM; i++) + dev->dr_dscr[i].size = cfg->dma_size[i]; + mei_device_init(dev, &pdev->dev, &mei_me_hw_ops); hw->cfg = cfg; + return dev; } diff --git a/drivers/misc/mei/hw-virtio.c b/drivers/misc/mei/hw-virtio.c new file mode 100644 index 0000000000000..96858db5463d4 --- /dev/null +++ b/drivers/misc/mei/hw-virtio.c @@ -0,0 +1,872 @@ +// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) +/* + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2018, Intel Corporation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mei_dev.h" +#include "hbm.h" +#include "client.h" + +#define MEI_VIRTIO_RPM_TIMEOUT 500 +/* ACRN virtio device types */ +#ifndef VIRTIO_ID_MEI +#define VIRTIO_ID_MEI 0xFFFE /* virtio mei */ +#endif + +/** + * struct mei_virtio_cfg - settings passed from the virtio backend + * @buf_depth: read buffer depth in slots (4bytes) + * @hw_ready: hw is ready for operation + * @host_reset: synchronize reset with virtio backend + * @reserved: reserved for alignment + * @fw_status: FW status + */ +struct mei_virtio_cfg { + u32 buf_depth; + u8 hw_ready; + u8 host_reset; + u8 reserved[2]; + u32 fw_status[MEI_FW_STATUS_MAX]; +} __packed; + +struct mei_virtio_hw { + struct mei_device mdev; + char name[32]; + + struct virtqueue *in; + struct virtqueue *out; + + bool host_ready; + struct work_struct intr_handler; + + u32 *recv_buf; + u8 recv_rdy; + size_t recv_sz; + u32 recv_idx; + u32 recv_len; + + /* send buffer */ + atomic_t hbuf_ready; + const void *send_hdr; + const void *send_buf; + + struct mei_virtio_cfg cfg; +}; + +#define to_virtio_hw(_dev) container_of(_dev, struct mei_virtio_hw, mdev) + +/** + * mei_virtio_fw_status() - read status register of mei + * @dev: mei device + * @fw_status: fw status register values + * + * Return: always 0 + */ +static int mei_virtio_fw_status(struct mei_device *dev, + struct mei_fw_status *fw_status) +{ + struct virtio_device *vdev = dev_to_virtio(dev->dev); + + fw_status->count = MEI_FW_STATUS_MAX; + virtio_cread_bytes(vdev, offsetof(struct mei_virtio_cfg, fw_status), + fw_status->status, sizeof(fw_status->status)); + return 0; +} + +/** + * mei_virtio_pg_state() - translate internal pg state + * to the mei power gating state + * @dev: mei device + * + * Return: + * * MEI_PG_OFF - if aliveness is on (always) + * * MEI_PG_ON - otherwise + */ +static inline enum mei_pg_state mei_virtio_pg_state(struct mei_device *dev) +{ + /* TODO: not support power management in PV mode */ + return MEI_PG_OFF; +} + +/** + * mei_virtio_hw_config() - configure hw dependent settings + * + * @dev: mei device + */ +static void mei_virtio_hw_config(struct mei_device *dev) +{ + /* nop */ +} + +/** + * mei_virtio_hbuf_empty_slots() - counts write empty slots. + * @dev: the device structure + * + * Return: always return frontend buf size if buffer is ready, 0 otherwise + */ +static int mei_virtio_hbuf_empty_slots(struct mei_device *dev) +{ + struct mei_virtio_hw *hw = to_virtio_hw(dev); + + return (atomic_read(&hw->hbuf_ready) == 1) ? hw->cfg.buf_depth : 0; +} + +/** + * mei_virtio_hbuf_is_ready() - checks if write buffer is ready + * @dev: the device structure + * + * Return: true if hbuf is ready + */ +static bool mei_virtio_hbuf_is_ready(struct mei_device *dev) +{ + struct mei_virtio_hw *hw = to_virtio_hw(dev); + + return atomic_read(&hw->hbuf_ready) == 1; +} + +/** + * mei_virtio_hbuf_max_depth() - returns depth of FE write buffer. + * @dev: the device structure + * + * Return: size of frontend write buffer in bytes + */ +static u32 mei_virtio_hbuf_depth(const struct mei_device *dev) +{ + struct mei_virtio_hw *hw = to_virtio_hw(dev); + + return hw->cfg.buf_depth; +} + +/** + * mei_virtio_intr_clear() - clear and stop interrupts + * @dev: the device structure + */ +static void mei_virtio_intr_clear(struct mei_device *dev) +{ + /* + * In our virtio solution, there are two types of interrupts, + * vq interrupt and config change interrupt. + * 1) start/reset rely on virtio config changed interrupt; + * 2) send/recv rely on virtio virtqueue interrupts. + * They are all virtual interrupts. So, we don't have corresponding + * operation to do here. + */ +} + +/** + * mei_virtio_intr_enable() - enables mei BE virtqueues callbacks + * @dev: the device structure + */ +static void mei_virtio_intr_enable(struct mei_device *dev) +{ + struct mei_virtio_hw *hw = to_virtio_hw(dev); + struct virtio_device *vdev = dev_to_virtio(dev->dev); + + virtio_config_enable(vdev); + + virtqueue_enable_cb(hw->in); + virtqueue_enable_cb(hw->out); +} + +/** + * mei_virtio_intr_disable() - disables mei BE virtqueues callbacks + * + * @dev: the device structure + */ +static void mei_virtio_intr_disable(struct mei_device *dev) +{ + struct mei_virtio_hw *hw = to_virtio_hw(dev); + struct virtio_device *vdev = dev_to_virtio(dev->dev); + + virtio_config_disable(vdev); + + virtqueue_disable_cb(hw->in); + virtqueue_disable_cb(hw->out); +} + +/** + * mei_virtio_synchronize_irq() - wait for pending IRQ handlers for all + * virtqueue + * @dev: the device structure + */ +static void mei_virtio_synchronize_irq(struct mei_device *dev) +{ + struct mei_virtio_hw *hw = to_virtio_hw(dev); + + /* + * Now, all IRQ handlers are converted to workqueue. + * Change synchronize irq to flush this work. + */ + flush_work(&hw->intr_handler); +} + +static void mei_virtio_free_outbufs(struct mei_virtio_hw *hw) +{ + kfree(hw->send_hdr); + kfree(hw->send_buf); + hw->send_hdr = NULL; + hw->send_buf = NULL; +} + +/** + * mei_virtio_write_message() - writes a message to mei virtio back-end service. + * @dev: the device structure + * @hdr: mei header of message + * @hdr_len: header length + * @data: message payload will be written + * @data_len: messag payload length + * + * Return: -EIO if write has failed + */ +static int mei_virtio_write_message(struct mei_device *dev, + const void *hdr, size_t hdr_len, + const void *data, size_t data_len) +{ + struct mei_virtio_hw *hw = to_virtio_hw(dev); + struct scatterlist sg[2]; + const void *hbuf, *dbuf; + int ret; + + if (WARN_ON(!atomic_add_unless(&hw->hbuf_ready, -1, 0))) + return -EIO; + + hbuf = kmemdup(hdr, hdr_len, GFP_KERNEL); + hw->send_hdr = hbuf; + + dbuf = kmemdup(data, data_len, GFP_KERNEL); + hw->send_buf = dbuf; + + if (!hbuf || !dbuf) { + ret = -ENOMEM; + goto fail; + } + + sg_init_table(sg, 2); + sg_set_buf(&sg[0], hbuf, hdr_len); + sg_set_buf(&sg[1], dbuf, data_len); + + ret = virtqueue_add_outbuf(hw->out, sg, 2, hw, GFP_KERNEL); + if (ret) { + dev_err(dev->dev, "failed to add outbuf\n"); + goto fail; + } + + virtqueue_kick(hw->out); + return 0; +fail: + + mei_virtio_free_outbufs(hw); + + return ret; +} + +/** + * mei_virtio_count_full_read_slots() - counts read full slots. + * @dev: the device structure + * + * Return: -EOVERFLOW if overflow, otherwise filled slots count + */ +static int mei_virtio_count_full_read_slots(struct mei_device *dev) +{ + struct mei_virtio_hw *hw = to_virtio_hw(dev); + + if (hw->recv_idx > hw->recv_len) + return -EOVERFLOW; + + return hw->recv_len - hw->recv_idx; +} + +/** + * mei_virtio_read_hdr() - Reads 32bit dword from mei virtio receive buffer + * + * @dev: the device structure + * + * Return: 32bit dword of receive buffer (u32) + */ +static inline u32 mei_virtio_read_hdr(const struct mei_device *dev) +{ + struct mei_virtio_hw *hw = to_virtio_hw(dev); + + WARN_ON(hw->cfg.buf_depth < hw->recv_idx + 1); + + return hw->recv_buf[hw->recv_idx++]; +} + +static int mei_virtio_read(struct mei_device *dev, unsigned char *buffer, + unsigned long len) +{ + struct mei_virtio_hw *hw = to_virtio_hw(dev); + u32 slots = mei_data2slots(len); + + if (WARN_ON(hw->cfg.buf_depth < hw->recv_idx + slots)) + return -EOVERFLOW; + + /* + * Assumption: There is only one MEI message in recv_buf each time. + * Backend service need follow this rule too. + * + * TODO: use double/triple buffers for recv_buf + */ + memcpy(buffer, hw->recv_buf + hw->recv_idx, len); + hw->recv_idx += slots; + + return 0; +} + +static bool mei_virtio_pg_is_enabled(struct mei_device *dev) +{ + return false; +} + +static bool mei_virtio_pg_in_transition(struct mei_device *dev) +{ + return false; +} + +static void mei_virtio_add_recv_buf(struct mei_virtio_hw *hw) +{ + struct scatterlist sg; + + if (hw->recv_rdy) /* not needed */ + return; + + /* refill the recv_buf to IN virtqueue to get next message */ + sg_init_one(&sg, hw->recv_buf, mei_slots2data(hw->cfg.buf_depth)); + hw->recv_len = 0; + hw->recv_idx = 0; + hw->recv_rdy = 1; + virtqueue_add_inbuf(hw->in, &sg, 1, hw->recv_buf, GFP_KERNEL); + virtqueue_kick(hw->in); +} + +/** + * mei_virtio_hw_is_ready() - check whether the BE(hw) has turned ready + * @dev: mei device + * Return: bool + */ +static bool mei_virtio_hw_is_ready(struct mei_device *dev) +{ + struct mei_virtio_hw *hw = to_virtio_hw(dev); + struct virtio_device *vdev = dev_to_virtio(dev->dev); + + virtio_cread(vdev, struct mei_virtio_cfg, + hw_ready, &hw->cfg.hw_ready); + + dev_dbg(dev->dev, "hw ready %d\n", hw->cfg.hw_ready); + + return hw->cfg.hw_ready; +} + +/** + * mei_virtio_hw_reset - resets virtio hw. + * + * @dev: the device structure + * @intr_enable: virtio use data/config callbacks + * + * Return: 0 on success an error code otherwise + */ +static int mei_virtio_hw_reset(struct mei_device *dev, bool intr_enable) +{ + struct mei_virtio_hw *hw = to_virtio_hw(dev); + struct virtio_device *vdev = dev_to_virtio(dev->dev); + + dev_dbg(dev->dev, "hw reset\n"); + + dev->recvd_hw_ready = false; + hw->host_ready = false; + atomic_set(&hw->hbuf_ready, 0); + hw->recv_len = 0; + hw->recv_idx = 0; + + hw->cfg.host_reset = 1; + virtio_cwrite(vdev, struct mei_virtio_cfg, + host_reset, &hw->cfg.host_reset); + + mei_virtio_hw_is_ready(dev); + + if (intr_enable) + mei_virtio_intr_enable(dev); + + return 0; +} + +/** + * mei_virtio_hw_reset_release() - release device from the reset + * @dev: the device structure + */ +static void mei_virtio_hw_reset_release(struct mei_device *dev) +{ + struct mei_virtio_hw *hw = to_virtio_hw(dev); + struct virtio_device *vdev = dev_to_virtio(dev->dev); + + dev_dbg(dev->dev, "hw reset release\n"); + hw->cfg.host_reset = 0; + virtio_cwrite(vdev, struct mei_virtio_cfg, + host_reset, &hw->cfg.host_reset); +} + +/** + * mei_virtio_hw_ready_wait() - wait until the virtio(hw) has turned ready + * or timeout is reached + * @dev: mei device + * + * Return: 0 on success, error otherwise + */ +static int mei_virtio_hw_ready_wait(struct mei_device *dev) +{ + mutex_unlock(&dev->device_lock); + wait_event_timeout(dev->wait_hw_ready, + dev->recvd_hw_ready, + mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT)); + mutex_lock(&dev->device_lock); + if (!dev->recvd_hw_ready) { + dev_err(dev->dev, "wait hw ready failed\n"); + return -ETIMEDOUT; + } + + dev->recvd_hw_ready = false; + return 0; +} + +/** + * mei_virtio_hw_start() - hw start routine + * @dev: mei device + * + * Return: 0 on success, error otherwise + */ +static int mei_virtio_hw_start(struct mei_device *dev) +{ + struct mei_virtio_hw *hw = to_virtio_hw(dev); + int ret; + + dev_dbg(dev->dev, "hw start\n"); + mei_virtio_hw_reset_release(dev); + + ret = mei_virtio_hw_ready_wait(dev); + if (ret) + return ret; + + mei_virtio_add_recv_buf(hw); + atomic_set(&hw->hbuf_ready, 1); + dev_dbg(dev->dev, "hw is ready\n"); + hw->host_ready = true; + + return 0; +} + +/** + * mei_virtio_host_is_ready() - check whether the FE has turned ready + * @dev: mei device + * + * Return: bool + */ +static bool mei_virtio_host_is_ready(struct mei_device *dev) +{ + struct mei_virtio_hw *hw = to_virtio_hw(dev); + + dev_dbg(dev->dev, "host ready %d\n", hw->host_ready); + + return hw->host_ready; +} + +/** + * mei_virtio_data_in() - The callback of recv virtqueue of virtio mei + * @vq: receiving virtqueue + */ +static void mei_virtio_data_in(struct virtqueue *vq) +{ + struct mei_virtio_hw *hw = vq->vdev->priv; + + /* disable interrupts (enabled again from in the interrupt worker) */ + virtqueue_disable_cb(hw->in); + + schedule_work(&hw->intr_handler); +} + +/** + * mei_virtio_data_out() - The callback of send virtqueue of virtio mei + * @vq: transmiting virtqueue + */ +static void mei_virtio_data_out(struct virtqueue *vq) +{ + struct mei_virtio_hw *hw = vq->vdev->priv; + + schedule_work(&hw->intr_handler); +} + +static void mei_virtio_intr_handler(struct work_struct *work) +{ + struct mei_virtio_hw *hw = + container_of(work, struct mei_virtio_hw, intr_handler); + struct mei_device *dev = &hw->mdev; + LIST_HEAD(complete_list); + s32 slots; + int rets = 0; + void *data; + unsigned int len; + + mutex_lock(&dev->device_lock); + + if (dev->dev_state == MEI_DEV_DISABLED) { + dev_warn(dev->dev, "Interrupt in disabled state.\n"); + mei_virtio_intr_disable(dev); + goto end; + } + + /* check if ME wants a reset */ + if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) { + dev_warn(dev->dev, "BE service not ready: resetting.\n"); + schedule_work(&dev->reset_work); + goto end; + } + + /* check if we need to start the dev */ + if (!mei_host_is_ready(dev)) { + if (mei_hw_is_ready(dev)) { + dev_dbg(dev->dev, "we need to start the dev.\n"); + dev->recvd_hw_ready = true; + wake_up(&dev->wait_hw_ready); + } else { + dev_warn(dev->dev, "Spurious Interrupt\n"); + } + goto end; + } + + /* read */ + if (hw->recv_rdy) { + data = virtqueue_get_buf(hw->in, &len); + if (!data || !len) { + dev_dbg(dev->dev, "No data %d", len); + } else { + dev_dbg(dev->dev, "data_in %d\n", len); + WARN_ON(data != hw->recv_buf); + hw->recv_len = mei_data2slots(len); + hw->recv_rdy = 0; + } + } + + /* write */ + if (!atomic_read(&hw->hbuf_ready)) { + if (!virtqueue_get_buf(hw->out, &len)) { + dev_warn(dev->dev, "Failed to getbuf\n"); + } else { + mei_virtio_free_outbufs(hw); + atomic_inc(&hw->hbuf_ready); + } + } + + /* check slots available for reading */ + slots = mei_count_full_read_slots(dev); + while (slots > 0) { + dev_dbg(dev->dev, "slots to read = %08x\n", slots); + rets = mei_irq_read_handler(dev, &complete_list, &slots); + + if (rets && + (dev->dev_state != MEI_DEV_RESETTING && + dev->dev_state != MEI_DEV_POWER_DOWN)) { + dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n", + rets); + schedule_work(&dev->reset_work); + goto end; + } + } + + dev->hbuf_is_ready = mei_hbuf_is_ready(dev); + + mei_irq_write_handler(dev, &complete_list); + + dev->hbuf_is_ready = mei_hbuf_is_ready(dev); + + mei_irq_compl_handler(dev, &complete_list); + + mei_virtio_add_recv_buf(hw); + +end: + if (dev->dev_state != MEI_DEV_DISABLED) { + if (!virtqueue_enable_cb(hw->in)) { + dev_dbg(dev->dev, "IN queue pending 1\n"); + schedule_work(&hw->intr_handler); + } + } + + mutex_unlock(&dev->device_lock); +} + +static void mei_virtio_config_changed(struct virtio_device *vdev) +{ + struct mei_virtio_hw *hw = vdev->priv; + struct mei_device *dev = &hw->mdev; + + virtio_cread(vdev, struct mei_virtio_cfg, + hw_ready, &hw->cfg.hw_ready); + + if (dev->dev_state == MEI_DEV_DISABLED) { + dev_dbg(dev->dev, "disabled state don't start\n"); + return; + } + + /* Run intr handler once to handle reset notify */ + schedule_work(&hw->intr_handler); +} + +static void mei_virtio_remove_vqs(struct virtio_device *vdev) +{ + struct mei_virtio_hw *hw = vdev->priv; + + virtqueue_detach_unused_buf(hw->in); + hw->recv_len = 0; + hw->recv_idx = 0; + hw->recv_rdy = 0; + + virtqueue_detach_unused_buf(hw->out); + + mei_virtio_free_outbufs(hw); + + vdev->config->del_vqs(vdev); +} + +/* + * There are two virtqueues, one is for send and another is for recv. + */ +static int mei_virtio_init_vqs(struct mei_virtio_hw *hw, + struct virtio_device *vdev) +{ + struct virtqueue *vqs[2]; + + vq_callback_t *cbs[] = { + mei_virtio_data_in, + mei_virtio_data_out, + }; + static const char * const names[] = { + "in", + "out", + }; + int ret; + + ret = virtio_find_vqs(vdev, 2, vqs, cbs, names, NULL); + if (ret) + return ret; + + hw->in = vqs[0]; + hw->out = vqs[1]; + + return 0; +} + +static const struct mei_hw_ops mei_virtio_ops = { + .fw_status = mei_virtio_fw_status, + .pg_state = mei_virtio_pg_state, + + .host_is_ready = mei_virtio_host_is_ready, + + .hw_is_ready = mei_virtio_hw_is_ready, + .hw_reset = mei_virtio_hw_reset, + .hw_config = mei_virtio_hw_config, + .hw_start = mei_virtio_hw_start, + + .pg_in_transition = mei_virtio_pg_in_transition, + .pg_is_enabled = mei_virtio_pg_is_enabled, + + .intr_clear = mei_virtio_intr_clear, + .intr_enable = mei_virtio_intr_enable, + .intr_disable = mei_virtio_intr_disable, + .synchronize_irq = mei_virtio_synchronize_irq, + + .hbuf_free_slots = mei_virtio_hbuf_empty_slots, + .hbuf_is_ready = mei_virtio_hbuf_is_ready, + .hbuf_depth = mei_virtio_hbuf_depth, + + .write = mei_virtio_write_message, + + .rdbuf_full_slots = mei_virtio_count_full_read_slots, + .read_hdr = mei_virtio_read_hdr, + .read = mei_virtio_read, +}; + +static int mei_virtio_probe(struct virtio_device *vdev) +{ + struct mei_virtio_hw *hw; + int ret; + + hw = devm_kzalloc(&vdev->dev, sizeof(*hw), GFP_KERNEL); + if (!hw) + return -ENOMEM; + + vdev->priv = hw; + + INIT_WORK(&hw->intr_handler, mei_virtio_intr_handler); + + ret = mei_virtio_init_vqs(hw, vdev); + if (ret) + goto vqs_failed; + + virtio_cread(vdev, struct mei_virtio_cfg, + buf_depth, &hw->cfg.buf_depth); + + hw->recv_buf = kzalloc(mei_slots2data(hw->cfg.buf_depth), GFP_KERNEL); + if (!hw->recv_buf) { + ret = -ENOMEM; + goto hbuf_failed; + } + atomic_set(&hw->hbuf_ready, 0); + + virtio_device_ready(vdev); + + mei_device_init(&hw->mdev, &vdev->dev, &mei_virtio_ops); + + pm_runtime_get_noresume(&vdev->dev); + pm_runtime_set_active(&vdev->dev); + pm_runtime_enable(&vdev->dev); + + ret = mei_start(&hw->mdev); + if (ret) + goto mei_start_failed; + + pm_runtime_set_autosuspend_delay(&vdev->dev, MEI_VIRTIO_RPM_TIMEOUT); + pm_runtime_use_autosuspend(&vdev->dev); + + ret = mei_register(&hw->mdev, &vdev->dev); + if (ret) + goto mei_failed; + + pm_runtime_put(&vdev->dev); + + return 0; + +mei_failed: + mei_stop(&hw->mdev); +mei_start_failed: + mei_cancel_work(&hw->mdev); + mei_disable_interrupts(&hw->mdev); + kfree(hw->recv_buf); +hbuf_failed: + vdev->config->del_vqs(vdev); +vqs_failed: + return ret; +} + +static int __maybe_unused mei_virtio_pm_runtime_idle(struct device *device) +{ + struct virtio_device *vdev = dev_to_virtio(device); + struct mei_virtio_hw *hw = vdev->priv; + + dev_dbg(&vdev->dev, "rpm: mei_virtio : runtime_idle\n"); + + if (!hw) + return -ENODEV; + + if (mei_write_is_idle(&hw->mdev)) + pm_runtime_autosuspend(device); + + return -EBUSY; +} + +static int __maybe_unused mei_virtio_pm_runtime_suspend(struct device *device) +{ + return 0; +} + +static int __maybe_unused mei_virtio_pm_runtime_resume(struct device *device) +{ + return 0; +} + +static int __maybe_unused mei_virtio_freeze(struct virtio_device *vdev) +{ + struct mei_virtio_hw *hw = vdev->priv; + + dev_dbg(&vdev->dev, "freeze\n"); + + if (!hw) + return -ENODEV; + + mei_stop(&hw->mdev); + mei_disable_interrupts(&hw->mdev); + cancel_work_sync(&hw->intr_handler); + vdev->config->reset(vdev); + mei_virtio_remove_vqs(vdev); + + return 0; +} + +static int __maybe_unused mei_virtio_restore(struct virtio_device *vdev) +{ + struct mei_virtio_hw *hw = vdev->priv; + int ret; + + dev_dbg(&vdev->dev, "restore\n"); + + if (!hw) + return -ENODEV; + + ret = mei_virtio_init_vqs(hw, vdev); + if (ret) + return ret; + + virtio_device_ready(vdev); + + ret = mei_restart(&hw->mdev); + if (ret) + return ret; + + /* Start timer if stopped in suspend */ + schedule_delayed_work(&hw->mdev.timer_work, HZ); + + return 0; +} + +static const struct dev_pm_ops mei_virtio_pm_ops = { + SET_RUNTIME_PM_OPS(mei_virtio_pm_runtime_suspend, + mei_virtio_pm_runtime_resume, + mei_virtio_pm_runtime_idle) +}; + +static void mei_virtio_remove(struct virtio_device *vdev) +{ + struct mei_virtio_hw *hw = vdev->priv; + + mei_stop(&hw->mdev); + mei_disable_interrupts(&hw->mdev); + cancel_work_sync(&hw->intr_handler); + mei_deregister(&hw->mdev); + vdev->config->reset(vdev); + mei_virtio_remove_vqs(vdev); + kfree(hw->recv_buf); + pm_runtime_disable(&vdev->dev); +} + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_MEI, VIRTIO_DEV_ANY_ID }, + { } +}; + +static struct virtio_driver mei_virtio_driver = { + .id_table = id_table, + .probe = mei_virtio_probe, + .remove = mei_virtio_remove, + .config_changed = mei_virtio_config_changed, + .driver = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + .pm = &mei_virtio_pm_ops, + }, +#ifdef CONFIG_PM_SLEEP + .freeze = mei_virtio_freeze, + .restore = mei_virtio_restore, +#endif +}; + +module_virtio_driver(mei_virtio_driver); +MODULE_DEVICE_TABLE(virtio, id_table); +MODULE_DESCRIPTION("Virtio MEI frontend driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h index 65655925791ab..90d118ae8434f 100644 --- a/drivers/misc/mei/hw.h +++ b/drivers/misc/mei/hw.h @@ -35,7 +35,7 @@ /* * MEI Version */ -#define HBM_MINOR_VERSION 0 +#define HBM_MINOR_VERSION 2 #define HBM_MAJOR_VERSION 2 /* @@ -86,6 +86,18 @@ #define HBM_MINOR_VERSION_DR 1 #define HBM_MAJOR_VERSION_DR 2 +/* + * MEI version with vm tag support + */ +#define HBM_MINOR_VERSION_VM 2 +#define HBM_MAJOR_VERSION_VM 2 + +/* + * MEI version with capabilities message support + */ +#define HBM_MINOR_VERSION_CAP 2 +#define HBM_MAJOR_VERSION_CAP 2 + /* Host bus message command opcode */ #define MEI_HBM_CMD_OP_MSK 0x7f /* Host bus message command RESPONSE */ @@ -131,6 +143,9 @@ #define MEI_HBM_DMA_SETUP_REQ_CMD 0x12 #define MEI_HBM_DMA_SETUP_RES_CMD 0x92 +#define MEI_HBM_CAPABILITIES_REQ_CMD 0x13 +#define MEI_HBM_CAPABILITIES_RES_CMD 0x93 + /* * MEI Stop Reason * used by hbm_host_stop_request.reason @@ -196,6 +211,17 @@ enum mei_cl_disconnect_status { MEI_CL_DISCONN_SUCCESS = MEI_HBMS_SUCCESS }; +/** + * struct mei_msg_extd_hdr - mei extended header + * + * @vtag: virtual tag. + * @reserved: reserved. + */ +struct mei_msg_extd_hdr { + u8 vtag; + u8 reserved[3]; +} __packed; + /** * struct mei_msg_hdr - MEI BUS Interface Section * @@ -203,20 +229,26 @@ enum mei_cl_disconnect_status { * @host_addr: host address * @length: message length * @reserved: reserved + * @extended: message has extended header * @dma_ring: message is on dma ring * @internal: message is internal * @msg_complete: last packet of the message + * @extension: extension of the header */ struct mei_msg_hdr { u32 me_addr:8; u32 host_addr:8; u32 length:9; - u32 reserved:4; + u32 reserved:3; + u32 extended:1; u32 dma_ring:1; u32 internal:1; u32 msg_complete:1; + u32 extension[0]; } __packed; +#define MEI_MSG_HDR_MAX 3 + struct mei_bus_message { u8 hbm_cmd; u8 data[0]; @@ -307,7 +339,9 @@ struct mei_client_properties { u8 protocol_version; u8 max_number_of_connections; u8 fixed_address; - u8 single_recv_buf; + u8 single_recv_buf:1; + u8 vm_supported:1; + u8 reserved:6; u32 max_msg_length; } __packed; @@ -512,4 +546,39 @@ struct hbm_dma_setup_response { u8 reserved[2]; } __packed; +/** + * struct mei_dma_ring_ctrl - dma ring control block + * + * @hbuf_wr_idx: host circular buffer write index in slots + * @reserved1: reserved for alignment + * @hbuf_rd_idx: host circular buffer read index in slots + * @reserved2: reserved for alignment + * @dbuf_wr_idx: device circular buffer write index in slots + * @reserved3: reserved for alignment + * @dbuf_rd_idx: device circular buffer read index in slots + * @reserved4: reserved for alignment + */ +struct hbm_dma_ring_ctrl { + u32 hbuf_wr_idx; + u32 reserved1; + u32 hbuf_rd_idx; + u32 reserved2; + u32 dbuf_wr_idx; + u32 reserved3; + u32 dbuf_rd_idx; + u32 reserved4; +} __packed; + +#define HBM_CAP_VM BIT(0) + +struct hbm_capability_request { + u8 hbm_cmd; + u8 capability_requested[3]; +} __packed; + +struct hbm_capability_response { + u8 hbm_cmd; + u8 capability_granted[3]; +} __packed; + #endif diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index 4888ebc076b74..d75173dc1b23a 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c @@ -133,12 +133,12 @@ int mei_reset(struct mei_device *dev) /* enter reset flow */ interrupts_enabled = state != MEI_DEV_POWER_DOWN; - dev->dev_state = MEI_DEV_RESETTING; + mei_set_devstate(dev, MEI_DEV_RESETTING); dev->reset_count++; if (dev->reset_count > MEI_MAX_CONSEC_RESET) { dev_err(dev->dev, "reset: reached maximal consecutive resets: disabling the device\n"); - dev->dev_state = MEI_DEV_DISABLED; + mei_set_devstate(dev, MEI_DEV_DISABLED); return -ENODEV; } @@ -151,7 +151,7 @@ int mei_reset(struct mei_device *dev) mei_hbm_reset(dev); - dev->rd_msg_hdr = 0; + memset(dev->rd_msg_hdr, 0, sizeof(dev->rd_msg_hdr)); if (ret) { dev_err(dev->dev, "hw_reset failed ret = %d\n", ret); @@ -160,7 +160,7 @@ int mei_reset(struct mei_device *dev) if (state == MEI_DEV_POWER_DOWN) { dev_dbg(dev->dev, "powering down: end of reset\n"); - dev->dev_state = MEI_DEV_DISABLED; + mei_set_devstate(dev, MEI_DEV_DISABLED); return 0; } @@ -172,11 +172,11 @@ int mei_reset(struct mei_device *dev) dev_dbg(dev->dev, "link is established start sending messages.\n"); - dev->dev_state = MEI_DEV_INIT_CLIENTS; + mei_set_devstate(dev, MEI_DEV_INIT_CLIENTS); ret = mei_hbm_start_req(dev); if (ret) { dev_err(dev->dev, "hbm_start failed ret = %d\n", ret); - dev->dev_state = MEI_DEV_RESETTING; + mei_set_devstate(dev, MEI_DEV_RESETTING); return ret; } @@ -206,7 +206,7 @@ int mei_start(struct mei_device *dev) dev->reset_count = 0; do { - dev->dev_state = MEI_DEV_INITIALIZING; + mei_set_devstate(dev, MEI_DEV_INITIALIZING); ret = mei_reset(dev); if (ret == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) { @@ -241,7 +241,7 @@ int mei_start(struct mei_device *dev) return 0; err: dev_err(dev->dev, "link layer initialization failed.\n"); - dev->dev_state = MEI_DEV_DISABLED; + mei_set_devstate(dev, MEI_DEV_DISABLED); mutex_unlock(&dev->device_lock); return -ENODEV; } @@ -260,7 +260,7 @@ int mei_restart(struct mei_device *dev) mutex_lock(&dev->device_lock); - dev->dev_state = MEI_DEV_POWER_UP; + mei_set_devstate(dev, MEI_DEV_POWER_UP); dev->reset_count = 0; err = mei_reset(dev); @@ -311,7 +311,7 @@ void mei_stop(struct mei_device *dev) dev_dbg(dev->dev, "stopping the device.\n"); mutex_lock(&dev->device_lock); - dev->dev_state = MEI_DEV_POWER_DOWN; + mei_set_devstate(dev, MEI_DEV_POWER_DOWN); mutex_unlock(&dev->device_lock); mei_cl_bus_remove_devices(dev); @@ -324,7 +324,7 @@ void mei_stop(struct mei_device *dev) mei_reset(dev); /* move device to disabled state unconditionally */ - dev->dev_state = MEI_DEV_DISABLED; + mei_set_devstate(dev, MEI_DEV_DISABLED); mutex_unlock(&dev->device_lock); } @@ -389,6 +389,8 @@ void mei_device_init(struct mei_device *dev, INIT_WORK(&dev->reset_work, mei_reset_work); INIT_WORK(&dev->bus_rescan_work, mei_cl_bus_rescan_work); + dev->sysfs_state = NULL; + bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX); dev->open_handle_count = 0; diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index 5a661cbdf2aef..6531e036bb004 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c @@ -75,6 +75,8 @@ static inline int mei_cl_hbm_equal(struct mei_cl *cl, */ static void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr) { + if (hdr->dma_ring) + mei_dma_ring_read(dev, NULL, hdr->extension[0]); /* * no need to check for size as it is guarantied * that length fits into rd_msg_buf @@ -99,7 +101,9 @@ static int mei_cl_irq_read_msg(struct mei_cl *cl, { struct mei_device *dev = cl->dev; struct mei_cl_cb *cb; + struct mei_msg_extd_hdr *ext_hdr = (void *)mei_hdr->extension; size_t buf_sz; + u32 length; cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list); if (!cb) { @@ -113,31 +117,48 @@ static int mei_cl_irq_read_msg(struct mei_cl *cl, list_add_tail(&cb->list, &cl->rd_pending); } + if (mei_hdr->extended) { + cl_dbg(dev, cl, "vtag: %d\n", ext_hdr->vtag); + if (cb->vtag && cb->vtag != ext_hdr->vtag) { + cl_err(dev, cl, "mismatched tag: %d != %d\n", + cb->vtag, ext_hdr->vtag); + cb->status = -EPROTO; + goto discard; + } + cb->vtag = ext_hdr->vtag; + } + if (!mei_cl_is_connected(cl)) { cl_dbg(dev, cl, "not connected\n"); cb->status = -ENODEV; goto discard; } - buf_sz = mei_hdr->length + cb->buf_idx; + length = mei_hdr->dma_ring ? mei_hdr->extension[1] : mei_hdr->length; + + buf_sz = length + cb->buf_idx; /* catch for integer overflow */ if (buf_sz < cb->buf_idx) { cl_err(dev, cl, "message is too big len %d idx %zu\n", - mei_hdr->length, cb->buf_idx); + length, cb->buf_idx); cb->status = -EMSGSIZE; goto discard; } if (cb->buf.size < buf_sz) { cl_dbg(dev, cl, "message overflow. size %zu len %d idx %zu\n", - cb->buf.size, mei_hdr->length, cb->buf_idx); + cb->buf.size, length, cb->buf_idx); cb->status = -EMSGSIZE; goto discard; } + if (mei_hdr->dma_ring) + mei_dma_ring_read(dev, cb->buf.data + cb->buf_idx, length); + + /* for DMA read 0 length to generate an interrupt to the device */ mei_read_slots(dev, cb->buf.data + cb->buf_idx, mei_hdr->length); - cb->buf_idx += mei_hdr->length; + cb->buf_idx += length; if (mei_hdr->msg_complete) { cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx); @@ -247,6 +268,9 @@ static inline int hdr_is_valid(u32 msg_hdr) if (!msg_hdr || mei_hdr->reserved) return -EBADMSG; + if (mei_hdr->dma_ring && mei_hdr->length != MEI_SLOT_SIZE) + return -EBADMSG; + return 0; } @@ -267,20 +291,20 @@ int mei_irq_read_handler(struct mei_device *dev, struct mei_cl *cl; int ret; - if (!dev->rd_msg_hdr) { - dev->rd_msg_hdr = mei_read_hdr(dev); + if (!dev->rd_msg_hdr[0]) { + dev->rd_msg_hdr[0] = mei_read_hdr(dev); (*slots)--; dev_dbg(dev->dev, "slots =%08x.\n", *slots); - ret = hdr_is_valid(dev->rd_msg_hdr); + ret = hdr_is_valid(dev->rd_msg_hdr[0]); if (ret) { dev_err(dev->dev, "corrupted message header 0x%08X\n", - dev->rd_msg_hdr); + dev->rd_msg_hdr[0]); goto end; } } - mei_hdr = (struct mei_msg_hdr *)&dev->rd_msg_hdr; + mei_hdr = (struct mei_msg_hdr *)dev->rd_msg_hdr; dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); if (mei_slots2data(*slots) < mei_hdr->length) { @@ -291,6 +315,16 @@ int mei_irq_read_handler(struct mei_device *dev, goto end; } + if (mei_hdr->extended) { + dev->rd_msg_hdr[1] = mei_read_hdr(dev); + (*slots)--; + } + if (mei_hdr->dma_ring) { + dev->rd_msg_hdr[2] = mei_read_hdr(dev); + (*slots)--; + mei_hdr->length = 0; + } + /* HBM message */ if (hdr_is_hbm(mei_hdr)) { ret = mei_hbm_dispatch(dev, mei_hdr); @@ -324,7 +358,7 @@ int mei_irq_read_handler(struct mei_device *dev, goto reset_slots; } dev_err(dev->dev, "no destination client found 0x%08X\n", - dev->rd_msg_hdr); + dev->rd_msg_hdr[0]); ret = -EBADMSG; goto end; } @@ -334,9 +368,8 @@ int mei_irq_read_handler(struct mei_device *dev, reset_slots: /* reset the number of slots and header */ + memset(dev->rd_msg_hdr, 0, sizeof(dev->rd_msg_hdr)); *slots = mei_count_full_read_slots(dev); - dev->rd_msg_hdr = 0; - if (*slots == -EOVERFLOW) { /* overflow - reset */ dev_err(dev->dev, "resetting due to slots overflow.\n"); diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 4d77a6ae183a9..e25390549286b 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -83,6 +83,20 @@ static int mei_open(struct inode *inode, struct file *file) return err; } +static void mei_cl_vtag_remove_by_fp(const struct mei_cl *cl, + const struct file *fp) +{ + struct mei_cl_vtag *vtag_l, *next; + + list_for_each_entry_safe(vtag_l, next, &cl->vtag_map, list) { + if (vtag_l->fp == fp) { + list_del(&vtag_l->list); + kfree(vtag_l); + return; + } + } +} + /** * mei_release - the release function * @@ -104,17 +118,32 @@ static int mei_release(struct inode *inode, struct file *file) mutex_lock(&dev->device_lock); + mei_cl_vtag_remove_by_fp(cl, file); + + if (!list_empty(&cl->vtag_map)) { + cl_dbg(dev, cl, "not the last vtag\n"); + mei_cl_flush_queues(cl, file); + rets = 0; + goto out; + } + rets = mei_cl_disconnect(cl); + /* Check again: This is necessary since disconnect releases the lock. */ + if (!list_empty(&cl->vtag_map)) { + cl_dbg(dev, cl, "not the last vtag after disconnect\n"); + mei_cl_flush_queues(cl, file); + goto out; + } - mei_cl_flush_queues(cl, file); + mei_cl_flush_queues(cl, NULL); cl_dbg(dev, cl, "removing\n"); mei_cl_unlink(cl); + kfree(cl); +out: file->private_data = NULL; - kfree(cl); - mutex_unlock(&dev->device_lock); return rets; } @@ -181,7 +210,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf, mutex_unlock(&dev->device_lock); if (wait_event_interruptible(cl->rx_wait, - !list_empty(&cl->rd_completed) || + mei_cl_read_cb(cl, file) || !mei_cl_is_connected(cl))) { if (signal_pending(current)) return -EINTR; @@ -232,7 +261,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf, goto out; free: - mei_io_cb_free(cb); + mei_cl_del_rd_completed(cl, cb); *offset = 0; out: @@ -240,6 +269,20 @@ static ssize_t mei_read(struct file *file, char __user *ubuf, mutex_unlock(&dev->device_lock); return rets; } + +static u8 mei_cl_vtag_by_fp(const struct mei_cl *cl, const struct file *fp) +{ + struct mei_cl_vtag *cl_vtag; + + if (!fp) + return 0; + + list_for_each_entry(cl_vtag, &cl->vtag_map, list) + if (cl_vtag->fp == fp) + return cl_vtag->vtag; + return 0; +} + /** * mei_write - the write function. * @@ -317,6 +360,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, rets = -ENOMEM; goto out; } + cb->vtag = mei_cl_vtag_by_fp(cl, file); rets = copy_from_user(cb->buf.data, ubuf, length); if (rets) { @@ -336,17 +380,18 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, * mei_ioctl_connect_client - the connect to fw client IOCTL function * * @file: private data of the file object - * @data: IOCTL connect data, input and output parameters + * @in_client_uuid: requested UUID for connection + * @client: IOCTL connect data, output parameters * * Locking: called under "dev->device_lock" lock * * Return: 0 on success, <0 on failure. */ static int mei_ioctl_connect_client(struct file *file, - struct mei_connect_client_data *data) + const uuid_le *in_client_uuid, + struct mei_client *client) { struct mei_device *dev; - struct mei_client *client; struct mei_me_client *me_cl; struct mei_cl *cl; int rets; @@ -354,18 +399,15 @@ static int mei_ioctl_connect_client(struct file *file, cl = file->private_data; dev = cl->dev; - if (dev->dev_state != MEI_DEV_ENABLED) - return -ENODEV; - if (cl->state != MEI_FILE_INITIALIZING && cl->state != MEI_FILE_DISCONNECTED) return -EBUSY; /* find ME client we're trying to connect to */ - me_cl = mei_me_cl_by_uuid(dev, &data->in_client_uuid); + me_cl = mei_me_cl_by_uuid(dev, in_client_uuid); if (!me_cl) { dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n", - &data->in_client_uuid); + in_client_uuid); rets = -ENOTTY; goto end; } @@ -375,7 +417,7 @@ static int mei_ioctl_connect_client(struct file *file, !dev->allow_fixed_address : !dev->hbm_f_fa_supported; if (forbidden) { dev_dbg(dev->dev, "Connection forbidden to FW Client UUID = %pUl\n", - &data->in_client_uuid); + in_client_uuid); rets = -ENOTTY; goto end; } @@ -389,7 +431,6 @@ static int mei_ioctl_connect_client(struct file *file, me_cl->props.max_msg_length); /* prepare the output buffer */ - client = &data->out_client_properties; client->max_msg_length = me_cl->props.max_msg_length; client->protocol_version = me_cl->props.protocol_version; dev_dbg(dev->dev, "Can connect?\n"); @@ -401,6 +442,98 @@ static int mei_ioctl_connect_client(struct file *file, return rets; } +static int mei_vm_support_check(struct mei_device *dev, const uuid_le *uuid) +{ + struct mei_me_client *me_cl; + int ret; + + if (!dev->hbm_f_vm_supported) { + dev_dbg(dev->dev, "VTag not supported\n"); + return -EOPNOTSUPP; + } + + me_cl = mei_me_cl_by_uuid(dev, uuid); + if (!me_cl) { + dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n", + uuid); + return -ENOTTY; + } + ret = me_cl->props.vm_supported ? 0 : -EOPNOTSUPP; + mei_me_cl_put(me_cl); + + return ret; +} + +static int mei_ioctl_connect_vtag(struct file *file, + const uuid_le *in_client_uuid, + struct mei_client *client, + u8 vtag) +{ + struct mei_device *dev; + struct mei_cl *cl; + struct mei_cl *pos; + struct mei_cl_vtag *cl_vtag; + + cl = file->private_data; + dev = cl->dev; + + dev_dbg(dev->dev, "FW Client %pUl vtag %d\n", in_client_uuid, vtag); + + if (cl->state != MEI_FILE_INITIALIZING && + cl->state != MEI_FILE_DISCONNECTED) + return -EBUSY; + + list_for_each_entry(pos, &dev->file_list, link) { + if (pos == cl) + continue; + if (!pos->me_cl) + continue; + + /* FIXME: just compare me_cl addr */ + if (uuid_le_cmp(*mei_cl_uuid(pos), *in_client_uuid)) + continue; + + /* if tag already exist try another fp */ + if (!IS_ERR(mei_cl_fp_by_vtag(pos, vtag))) + continue; + + /* replace cl with acquired one */ + dev_dbg(dev->dev, "replacing with existing cl\n"); + mei_cl_unlink(cl); + kfree(cl); + file->private_data = pos; + cl = pos; + break; + } + + cl_vtag = mei_cl_vtag_alloc(file, vtag); + if (IS_ERR(cl_vtag)) + return -ENOMEM; + + list_add_tail(&cl_vtag->list, &cl->vtag_map); + + while (cl->state != MEI_FILE_INITIALIZING && + cl->state != MEI_FILE_DISCONNECTED && + cl->state != MEI_FILE_CONNECTED) { + mutex_unlock(&dev->device_lock); + wait_event_timeout(cl->wait, + (cl->state == MEI_FILE_CONNECTED || + cl->state == MEI_FILE_DISCONNECTED || + cl->state == MEI_FILE_DISCONNECT_REQUIRED || + cl->state == MEI_FILE_DISCONNECT_REPLY), + mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); + mutex_lock(&dev->device_lock); + } + + if (!mei_cl_is_connected(cl)) + return mei_ioctl_connect_client(file, in_client_uuid, client); + + client->max_msg_length = cl->me_cl->props.max_msg_length; + client->protocol_version = cl->me_cl->props.protocol_version; + + return 0; +} + /** * mei_ioctl_client_notify_request - * propagate event notification request to client @@ -457,7 +590,11 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) { struct mei_device *dev; struct mei_cl *cl = file->private_data; - struct mei_connect_client_data connect_data; + struct mei_connect_client_data conn; + struct mei_connect_client_data_vtag conn_vtag; + const uuid_le *cl_uuid; + struct mei_client *props; + u8 vtag; u32 notify_get, notify_req; int rets; @@ -478,20 +615,65 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) switch (cmd) { case IOCTL_MEI_CONNECT_CLIENT: dev_dbg(dev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n"); - if (copy_from_user(&connect_data, (char __user *)data, - sizeof(struct mei_connect_client_data))) { + if (copy_from_user(&conn, (char __user *)data, sizeof(conn))) { + dev_dbg(dev->dev, "failed to copy data from userland\n"); + rets = -EFAULT; + goto out; + } + cl_uuid = &conn.in_client_uuid; + props = &conn.out_client_properties; + vtag = 0; + + if (!mei_vm_support_check(dev, cl_uuid)) + rets = mei_ioctl_connect_vtag(file, cl_uuid, props, + vtag); + else + rets = mei_ioctl_connect_client(file, cl_uuid, props); + if (rets) + goto out; + + /* if all is ok, copying the data back to user. */ + if (copy_to_user((char __user *)data, &conn, sizeof(conn))) { + dev_dbg(dev->dev, "failed to copy data to userland\n"); + rets = -EFAULT; + goto out; + } + + break; + + case IOCTL_MEI_CONNECT_CLIENT_VTAG: + dev_dbg(dev->dev, "IOCTL_MEI_CONNECT_CLIENT_VTAG\n"); + if (copy_from_user(&conn_vtag, (char __user *)data, + sizeof(conn_vtag))) { dev_dbg(dev->dev, "failed to copy data from userland\n"); rets = -EFAULT; goto out; } - rets = mei_ioctl_connect_client(file, &connect_data); + cl_uuid = &conn_vtag.connect.in_client_uuid; + props = &conn_vtag.out_client_properties; + vtag = conn_vtag.connect.vtag; + + if (mei_vm_support_check(dev, cl_uuid)) { + dev_dbg(dev->dev, "FW Client %pUl does not support vtags\n", + cl_uuid); + rets = -EOPNOTSUPP; + goto out; + } + + if (!vtag) { + dev_dbg(dev->dev, "vtag can't be zero\n"); + rets = -EINVAL; + goto out; + } + + rets = mei_ioctl_connect_vtag(file, cl_uuid, props, vtag); if (rets) goto out; /* if all is ok, copying the data back to user. */ - if (copy_to_user((char __user *)data, &connect_data, - sizeof(struct mei_connect_client_data))) { + if (copy_to_user((char __user *)data, &conn_vtag, + sizeof(conn_vtag))) { dev_dbg(dev->dev, "failed to copy data to userland\n"); rets = -EFAULT; goto out; @@ -593,16 +775,16 @@ static __poll_t mei_poll(struct file *file, poll_table *wait) if (req_events & (EPOLLIN | EPOLLRDNORM)) { poll_wait(file, &cl->rx_wait, wait); - if (!list_empty(&cl->rd_completed)) + if (mei_cl_read_cb(cl, file)) mask |= EPOLLIN | EPOLLRDNORM; else mei_cl_read_start(cl, mei_cl_mtu(cl), file); } - if (req_events & (POLLOUT | POLLWRNORM)) { + if (req_events & (EPOLLOUT | EPOLLWRNORM)) { poll_wait(file, &cl->tx_wait, wait); if (cl->tx_cb_queued < dev->tx_queue_limit) - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; } out: @@ -838,12 +1020,36 @@ static ssize_t fw_ver_show(struct device *device, } static DEVICE_ATTR_RO(fw_ver); +/** + * dev_state_show - display device state + * + * @device: device pointer + * @attr: attribute pointer + * @buf: char out buffer + * + * Return: number of the bytes printed into buf or error + */ +static ssize_t dev_state_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct mei_device *dev = dev_get_drvdata(device); + enum mei_dev_state dev_state; + + mutex_lock(&dev->device_lock); + dev_state = dev->dev_state; + mutex_unlock(&dev->device_lock); + + return sprintf(buf, "%s", mei_dev_state_str(dev_state)); +} +static DEVICE_ATTR_RO(dev_state); + static struct attribute *mei_attrs[] = { &dev_attr_fw_status.attr, &dev_attr_hbm_ver.attr, &dev_attr_hbm_ver_drv.attr, &dev_attr_tx_queue_limit.attr, &dev_attr_fw_ver.attr, + &dev_attr_dev_state.attr, NULL }; ATTRIBUTE_GROUPS(mei); @@ -940,6 +1146,8 @@ int mei_register(struct mei_device *dev, struct device *parent) goto err_dev_create; } + dev->sysfs_state = sysfs_get_dirent(clsdev->kobj.sd, "dev_state"); + ret = mei_dbgfs_register(dev, dev_name(clsdev)); if (ret) { dev_err(clsdev, "cannot register debugfs ret = %d\n", ret); diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index 377397e1b5a5b..96232c43c2163 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h @@ -122,6 +122,19 @@ struct mei_msg_data { unsigned char *data; }; +/** + * struct mei_dma_dscr - dma address descriptor + * + * @vaddr: dma buffer virtual address + * @daddr: dma buffer physical address + * @size : dma buffer size + */ +struct mei_dma_dscr { + void *vaddr; + dma_addr_t daddr; + size_t size; +}; + /* Maximum number of processed FW status registers */ #define MEI_FW_STATUS_MAX 6 /* Minimal buffer for FW status string (8 bytes in dw + space or '\0') */ @@ -171,6 +184,7 @@ struct mei_cl; * @fop_type: file operation type * @buf: buffer for data associated with the callback * @buf_idx: last read index + * @vtag: vm tag * @fp: pointer to file structure * @status: io status of the cb * @internal: communication between driver and FW flag @@ -182,12 +196,20 @@ struct mei_cl_cb { enum mei_cb_file_ops fop_type; struct mei_msg_data buf; size_t buf_idx; + u8 vtag; const struct file *fp; int status; u32 internal:1; u32 blocking:1; }; +struct mei_cl_vtag { + struct list_head list; + const struct file *fp; + u8 vtag; + u8 pending_read:1; +}; + /** * struct mei_cl - me client host representation * carried in file->private_data @@ -204,6 +226,7 @@ struct mei_cl_cb { * @me_cl: fw client connected * @fp: file associated with client * @host_client_id: host id + * @vtag_map: vm tag map * @tx_flow_ctrl_creds: transmit flow credentials * @rx_flow_ctrl_creds: receive flow credentials * @timer_count: watchdog timer for operation completion @@ -212,6 +235,7 @@ struct mei_cl_cb { * @tx_cb_queued: number of tx callbacks in queue * @writing_state: state of the tx * @rd_pending: pending read credits + * @rd_completed_lock: protects rd_completed queue * @rd_completed: completed read * * @cldev: device on the mei client bus @@ -229,6 +253,7 @@ struct mei_cl { struct mei_me_client *me_cl; const struct file *fp; u8 host_client_id; + struct list_head vtag_map; u8 tx_flow_ctrl_creds; u8 rx_flow_ctrl_creds; u8 timer_count; @@ -237,6 +262,7 @@ struct mei_cl { u8 tx_cb_queued; enum mei_file_transaction_states writing_state; struct list_head rd_pending; + spinlock_t rd_completed_lock; /* protects rd_completed queue */ struct list_head rd_completed; struct mei_cl_device *cldev; @@ -409,6 +435,7 @@ struct mei_fw_version { * @rd_msg_hdr : read message header storage * * @hbuf_is_ready : query if the host host/write buffer is ready + * @dr_dscr: DMA ring descriptors: TX, RX, and CTRL * * @version : HBM protocol version in use * @hbm_f_pg_supported : hbm feature pgi protocol @@ -419,6 +446,8 @@ struct mei_fw_version { * @hbm_f_ie_supported : hbm feature immediate reply to enum request * @hbm_f_os_supported : hbm feature support OS ver message * @hbm_f_dr_supported : hbm feature dma ring supported + * @hbm_f_vm_supported : hbm feature vm tag supported + * @hbm_f_cap_supported : hbm feature capabilities message supported * * @fw_ver : FW versions * @@ -438,6 +467,8 @@ struct mei_fw_version { * * @dbgfs_dir : debugfs mei root directory * + * @sysfs_state : sysfs state object + * * @ops: : hw specific operations * @hw : hw specific data */ @@ -483,11 +514,13 @@ struct mei_device { #endif /* CONFIG_PM */ unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE]; - u32 rd_msg_hdr; + u32 rd_msg_hdr[MEI_MSG_HDR_MAX]; /* write buffer */ bool hbuf_is_ready; + struct mei_dma_dscr dr_dscr[DMA_DSCR_NUM]; + struct hbm_version version; unsigned int hbm_f_pg_supported:1; unsigned int hbm_f_dc_supported:1; @@ -497,6 +530,8 @@ struct mei_device { unsigned int hbm_f_ie_supported:1; unsigned int hbm_f_os_supported:1; unsigned int hbm_f_dr_supported:1; + unsigned int hbm_f_vm_supported:1; + unsigned int hbm_f_cap_supported:1; struct mei_fw_version fw_ver[MEI_MAX_FW_VER_BLOCKS]; @@ -519,6 +554,7 @@ struct mei_device { struct dentry *dbgfs_dir; #endif /* CONFIG_DEBUG_FS */ + struct kernfs_node *sysfs_state; const struct mei_hw_ops *ops; char hw[0] __aligned(sizeof(void *)); @@ -578,6 +614,22 @@ int mei_restart(struct mei_device *dev); void mei_stop(struct mei_device *dev); void mei_cancel_work(struct mei_device *dev); +static inline void mei_set_devstate(struct mei_device *dev, + enum mei_dev_state state) +{ + dev->dev_state = state; + if (dev->sysfs_state) + sysfs_notify_dirent(dev->sysfs_state); +} + +int mei_dmam_ring_alloc(struct mei_device *dev); +void mei_dmam_ring_free(struct mei_device *dev); +bool mei_dma_ring_is_allocated(struct mei_device *dev); +void mei_dma_ring_reset(struct mei_device *dev); +void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len); +void mei_dma_ring_write(struct mei_device *dev, unsigned char *buf, u32 len); +u32 mei_dma_ring_empty_slots(struct mei_device *dev); + /* * MEI interrupt functions prototype */ @@ -716,10 +768,11 @@ static inline void mei_dbgfs_deregister(struct mei_device *dev) {} int mei_register(struct mei_device *dev, struct device *parent); void mei_deregister(struct mei_device *dev); -#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d dma=%1d internal=%1d comp=%1d" +#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d dma=%1d ext=%1d internal=%1d comp=%1d" #define MEI_HDR_PRM(hdr) \ (hdr)->host_addr, (hdr)->me_addr, \ - (hdr)->length, (hdr)->dma_ring, (hdr)->internal, (hdr)->msg_complete + (hdr)->length, (hdr)->dma_ring, (hdr)->extended, \ + (hdr)->internal, (hdr)->msg_complete ssize_t mei_fw_status2str(struct mei_fw_status *fw_sts, char *buf, size_t len); /** diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index ea4e152270a3b..e89497f858ae3 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -88,19 +88,21 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH12_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_4, MEI_ME_PCH8_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)}, /* required last entry */ diff --git a/drivers/misc/mei/spd/Kconfig b/drivers/misc/mei/spd/Kconfig new file mode 100644 index 0000000000000..085f9caa8c668 --- /dev/null +++ b/drivers/misc/mei/spd/Kconfig @@ -0,0 +1,12 @@ +# +# Storage proxy device configuration +# +config INTEL_MEI_SPD + tristate "Intel MEI Host Storage Proxy Driver" + depends on INTEL_MEI && BLOCK && RPMB + help + A driver for the host storage proxy ME client + The driver enables ME FW to store data on a storage devices + that are accessible only from the host. + + To compile this driver as a module, choose M here. diff --git a/drivers/misc/mei/spd/Makefile b/drivers/misc/mei/spd/Makefile new file mode 100644 index 0000000000000..72d0bca2974eb --- /dev/null +++ b/drivers/misc/mei/spd/Makefile @@ -0,0 +1,12 @@ +# +# Makefile for the Storage Proxy device driver. +# + +obj-$(CONFIG_INTEL_MEI_SPD) += mei_spd.o +mei_spd-objs := main.o +mei_spd-objs += cmd.o +mei_spd-objs += gpp.o +mei_spd-objs += rpmb.o +mei_spd-$(CONFIG_DEBUG_FS) += debugfs.o + +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/misc/mei/spd/cmd.c b/drivers/misc/mei/spd/cmd.c new file mode 100644 index 0000000000000..3f45902e23da0 --- /dev/null +++ b/drivers/misc/mei/spd/cmd.c @@ -0,0 +1,546 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Copyright(c) 2015 - 2018 Intel Corporation. All rights reserved. + */ +#include +#include +#include + +#include "cmd.h" +#include "spd.h" + +#define spd_cmd_size(_cmd) \ + (sizeof(struct spd_cmd_hdr) + \ + sizeof(struct spd_cmd_##_cmd)) +#define spd_cmd_rpmb_size(_cmd) \ + (spd_cmd_size(_cmd) + SPD_CLIENT_RPMB_DATA_MAX_SIZE) + +#define to_spd_hdr(_buf) (struct spd_cmd_hdr *)(_buf) +#define to_spd_cmd(_cmd, _buf) \ + (struct spd_cmd_##_cmd *)((_buf) + sizeof(struct spd_cmd_hdr)) + +const char *spd_cmd_str(enum spd_cmd_type cmd) +{ +#define __SPD_CMD(_cmd) SPD_##_cmd##_CMD +#define SPD_CMD(cmd) case __SPD_CMD(cmd): return #cmd + switch (cmd) { + SPD_CMD(NONE); + SPD_CMD(START_STOP); + SPD_CMD(RPMB_WRITE); + SPD_CMD(RPMB_READ); + SPD_CMD(RPMB_GET_COUNTER); + SPD_CMD(GPP_WRITE); + SPD_CMD(GPP_READ); + SPD_CMD(TRIM); + SPD_CMD(INIT); + SPD_CMD(STORAGE_STATUS); + SPD_CMD(MAX); + default: + return "unknown"; + } +#undef SPD_CMD +#undef __SPD_CMD +} + +const char *mei_spd_dev_str(enum spd_storage_type type) +{ +#define SPD_TYPE(type) case SPD_TYPE_##type: return #type + switch (type) { + SPD_TYPE(UNDEF); + SPD_TYPE(EMMC); + SPD_TYPE(UFS); + default: + return "unknown"; + } +#undef SPD_TYPE +} + +const char *mei_spd_state_str(enum mei_spd_state state) +{ +#define SPD_STATE(state) case MEI_SPD_STATE_##state: return #state + switch (state) { + SPD_STATE(INIT); + SPD_STATE(INIT_WAIT); + SPD_STATE(INIT_DONE); + SPD_STATE(RUNNING); + SPD_STATE(STOPPING); + default: + return "unknown"; + } +#undef SPD_STATE +} + +/** + * mei_spd_init_req - send init request + * + * @spd: spd device + * + * Return: 0 on success + * -EPROTO if called in wrong state + * < 0 on write error + */ +int mei_spd_cmd_init_req(struct mei_spd *spd) +{ + const int req_len = sizeof(struct spd_cmd_hdr); + struct spd_cmd_hdr *hdr; + u32 cmd_type = SPD_INIT_CMD; + ssize_t ret; + + spd_dbg(spd, "cmd [%d] %s : state [%d] %s\n", + cmd_type, spd_cmd_str(cmd_type), + spd->state, mei_spd_state_str(spd->state)); + + if (spd->state != MEI_SPD_STATE_INIT) + return -EPROTO; + + memset(spd->buf, 0, req_len); + hdr = to_spd_hdr(spd->buf); + + hdr->command_type = cmd_type; + hdr->is_response = 0; + hdr->len = req_len; + + spd->state = MEI_SPD_STATE_INIT_WAIT; + ret = mei_cldev_send(spd->cldev, spd->buf, req_len); + if (ret != req_len) { + spd_err(spd, "start send failed ret = %zd\n", ret); + return ret; + } + + return 0; +} + +/** + * mei_spd_cmd_init_rsp - handle init response message + * + * @spd: spd device + * @cmd: received spd command + * @cmd_sz: received command size + * + * Return: 0 on success; < 0 otherwise + */ +static int mei_spd_cmd_init_rsp(struct mei_spd *spd, struct spd_cmd *cmd, + ssize_t cmd_sz) +{ + int type; + int gpp_id; + int i; + + if (cmd_sz < spd_cmd_size(init_resp)) { + spd_err(spd, "Wrong init response size\n"); + return -EINVAL; + } + + if (spd->state != MEI_SPD_STATE_INIT_WAIT) + return -EPROTO; + + type = cmd->init_rsp.type; + gpp_id = cmd->init_rsp.gpp_partition_id; + + switch (type) { + case SPD_TYPE_EMMC: + if (gpp_id < 1 || gpp_id > 4) { + spd_err(spd, "%s unsupported gpp id %d\n", + mei_spd_dev_str(type), gpp_id); + return -EINVAL; + } + break; + + case SPD_TYPE_UFS: + if (gpp_id < 1 || gpp_id > 6) { + spd_err(spd, "%s unsupported gpp id %d\n", + mei_spd_dev_str(type), gpp_id); + return -EINVAL; + } + break; + + default: + spd_err(spd, "unsupported storage type %d\n", + cmd->init_rsp.type); + return -EINVAL; + } + + spd->dev_type = type; + spd->gpp_partition_id = gpp_id; + + if (cmd->init_rsp.serial_no_sz != 0) { + if (cmd->init_rsp.serial_no_sz != + cmd_sz - spd_cmd_size(init_resp)) { + spd_err(spd, "wrong serial no size %u?=%zu\n", + cmd->init_rsp.serial_no_sz, + cmd_sz - spd_cmd_size(init_resp)); + return -EMSGSIZE; + } + + if (cmd->init_rsp.serial_no_sz > 256) { + spd_err(spd, "serial no is too large %u\n", + cmd->init_rsp.serial_no_sz); + return -EMSGSIZE; + } + + spd->dev_id = kzalloc(cmd->init_rsp.serial_no_sz, GFP_KERNEL); + if (!spd->dev_id) + return -ENOMEM; + + spd->dev_id_sz = cmd->init_rsp.serial_no_sz; + if (type == SPD_TYPE_EMMC) { + /* FW have this in be32 format */ + __be32 *sno = (__be32 *)cmd->init_rsp.serial_no; + u32 *dev_id = (u32 *)spd->dev_id; + + for (i = 0; i < spd->dev_id_sz / sizeof(u32); i++) + dev_id[i] = be32_to_cpu(sno[i]); + } else { + memcpy(spd->dev_id, &cmd->init_rsp.serial_no, + cmd->init_rsp.serial_no_sz); + } + } + + spd->state = MEI_SPD_STATE_INIT_DONE; + + return 0; +} + +/** + * mei_spd_cmd_storage_status_req - send storage status message + * + * @spd: spd device + * + * Return: 0 on success + * -EPROTO if called in wrong state + * < 0 on write error + */ +int mei_spd_cmd_storage_status_req(struct mei_spd *spd) +{ + struct spd_cmd_hdr *hdr; + struct spd_cmd_storage_status_req *req; + const int req_len = spd_cmd_size(storage_status_req); + u32 cmd_type = SPD_STORAGE_STATUS_CMD; + ssize_t ret; + + spd_dbg(spd, "cmd [%d] %s : state [%d] %s\n", + cmd_type, spd_cmd_str(cmd_type), + spd->state, mei_spd_state_str(spd->state)); + + if (spd->state < MEI_SPD_STATE_INIT_DONE) + return -EPROTO; + + memset(spd->buf, 0, req_len); + hdr = to_spd_hdr(spd->buf); + + hdr->command_type = cmd_type; + hdr->is_response = 0; + hdr->len = req_len; + + req = to_spd_cmd(storage_status_req, spd->buf); + req->gpp_on = mei_spd_gpp_is_open(spd); + req->rpmb_on = mei_spd_rpmb_is_open(spd); + + ret = mei_cldev_send(spd->cldev, spd->buf, req_len); + if (ret != req_len) { + spd_err(spd, "send storage status failed ret = %zd\n", ret); + return ret; + } + + if (req->gpp_on || req->rpmb_on) + spd->state = MEI_SPD_STATE_RUNNING; + else + spd->state = MEI_SPD_STATE_INIT_DONE; + + spd_dbg(spd, "cmd [%d] %s : state [%d] %s\n", + cmd_type, spd_cmd_str(cmd_type), + spd->state, mei_spd_state_str(spd->state)); + + return 0; +} + +static int mei_spd_cmd_gpp_write(struct mei_spd *spd, struct spd_cmd *cmd, + ssize_t out_buf_sz) +{ + size_t len = SPD_GPP_WRITE_DATA_LEN(*cmd); + int ret; + + if (out_buf_sz < spd_cmd_size(gpp_write_req)) { + spd_err(spd, "Wrong request size\n"); + return SPD_STATUS_INVALID_COMMAND; + } + + ret = mei_spd_gpp_write(spd, cmd->gpp_write_req.offset, + cmd->gpp_write_req.data, len); + if (ret) { + spd_err(spd, "Failed to write to gpp ret = %d\n", ret); + return SPD_STATUS_GENERAL_FAILURE; + } + + spd_dbg(spd, "wrote %zd bytes of data\n", len); + + cmd->header.len = spd_cmd_size(gpp_write_rsp); + + return SPD_STATUS_SUCCESS; +} + +static int mei_spd_cmd_gpp_read(struct mei_spd *spd, struct spd_cmd *cmd, + ssize_t out_buf_sz) +{ + size_t len; + int ret; + + if (out_buf_sz < spd_cmd_size(gpp_read_req)) { + spd_err(spd, "Wrong request size\n"); + return SPD_STATUS_INVALID_COMMAND; + } + + len = cmd->gpp_read_req.size_to_read; + if (len > SPD_CLIENT_GPP_DATA_MAX_SIZE) { + spd_err(spd, "Block is to large to read\n"); + return SPD_STATUS_INVALID_COMMAND; + } + + ret = mei_spd_gpp_read(spd, cmd->gpp_read_req.offset, + cmd->gpp_read_resp.data, len); + + if (ret) { + spd_err(spd, "Failed to read from gpp ret = %d\n", ret); + return SPD_STATUS_GENERAL_FAILURE; + } + + spd_dbg(spd, "read %zd bytes of data\n", len); + + cmd->header.len = spd_cmd_size(gpp_read_rsp) + len; + + return SPD_STATUS_SUCCESS; +} + +static int mei_spd_cmd_rpmb_read(struct mei_spd *spd, + struct spd_cmd *cmd, + ssize_t out_buf_sz) +{ + u8 *frame = cmd->rpmb_read.rpmb_frame; + + if (out_buf_sz != spd_cmd_rpmb_size(rpmb_read)) { + spd_err(spd, "Wrong request size\n"); + return SPD_STATUS_INVALID_COMMAND; + } + + if (mei_spd_rpmb_cmd_req(spd, RPMB_READ_DATA, frame)) + return SPD_STATUS_GENERAL_FAILURE; + + spd_dbg(spd, "read RPMB frame performed\n"); + return SPD_STATUS_SUCCESS; +} + +static int mei_spd_cmd_rpmb_write(struct mei_spd *spd, + struct spd_cmd *cmd, + ssize_t out_buf_sz) +{ + u8 *frame = cmd->rpmb_write.rpmb_frame; + + if (out_buf_sz != spd_cmd_rpmb_size(rpmb_write)) { + spd_err(spd, "Wrong request size\n"); + return SPD_STATUS_INVALID_COMMAND; + } + + if (mei_spd_rpmb_cmd_req(spd, RPMB_WRITE_DATA, frame)) + return SPD_STATUS_GENERAL_FAILURE; + + spd_dbg(spd, "write RPMB frame performed\n"); + return SPD_STATUS_SUCCESS; +} + +static int mei_spd_cmd_rpmb_get_counter(struct mei_spd *spd, + struct spd_cmd *cmd, + ssize_t out_buf_sz) +{ + u8 *frame = cmd->rpmb_get_counter.rpmb_frame; + + if (out_buf_sz != spd_cmd_rpmb_size(rpmb_get_counter)) { + spd_err(spd, "Wrong request size\n"); + return SPD_STATUS_INVALID_COMMAND; + } + + if (mei_spd_rpmb_cmd_req(spd, RPMB_WRITE_DATA, frame)) + return SPD_STATUS_GENERAL_FAILURE; + + spd_dbg(spd, "get RPMB counter performed\n"); + return SPD_STATUS_SUCCESS; +} + +static int mei_spd_cmd_response(struct mei_spd *spd, ssize_t out_buf_sz) +{ + struct spd_cmd *cmd = (struct spd_cmd *)spd->buf; + u32 spd_cmd; + int ret; + + spd_cmd = cmd->header.command_type; + + spd_dbg(spd, "rsp [%d] %s : state [%d] %s\n", + spd_cmd, spd_cmd_str(spd_cmd), + spd->state, mei_spd_state_str(spd->state)); + + switch (spd_cmd) { + case SPD_INIT_CMD: + ret = mei_spd_cmd_init_rsp(spd, cmd, out_buf_sz); + if (ret) + break; + mutex_unlock(&spd->lock); + mei_spd_rpmb_init(spd); + mei_spd_gpp_init(spd); + mutex_lock(&spd->lock); + break; + default: + ret = -EINVAL; + spd_err(spd, "Wrong response command %d\n", spd_cmd); + break; + } + + return ret; +} + +/** + * mei_spd_cmd_request - dispatch command requests from the SPD device + * + * @spd: spd device + * @out_buf_sz: buffer size + * + * Return: (TBD) + */ +static int mei_spd_cmd_request(struct mei_spd *spd, ssize_t out_buf_sz) +{ + struct spd_cmd *cmd = (struct spd_cmd *)spd->buf; + ssize_t written; + u32 spd_cmd; + int ret; + + spd_cmd = cmd->header.command_type; + + spd_dbg(spd, "req [%d] %s : state [%d] %s\n", + spd_cmd, spd_cmd_str(spd_cmd), + spd->state, mei_spd_state_str(spd->state)); + + if (spd->state < MEI_SPD_STATE_RUNNING) { + spd_err(spd, "Wrong state %d\n", spd->state); + ret = SPD_STATUS_INVALID_COMMAND; + goto reply; + } + + switch (spd_cmd) { + case SPD_RPMB_WRITE_CMD: + ret = mei_spd_cmd_rpmb_write(spd, cmd, out_buf_sz); + break; + case SPD_RPMB_READ_CMD: + ret = mei_spd_cmd_rpmb_read(spd, cmd, out_buf_sz); + break; + case SPD_RPMB_GET_COUNTER_CMD: + ret = mei_spd_cmd_rpmb_get_counter(spd, cmd, out_buf_sz); + break; + case SPD_GPP_WRITE_CMD: + ret = mei_spd_cmd_gpp_write(spd, cmd, out_buf_sz); + break; + case SPD_GPP_READ_CMD: + ret = mei_spd_cmd_gpp_read(spd, cmd, out_buf_sz); + break; + case SPD_TRIM_CMD: + spd_err(spd, "Command %d is not supported\n", spd_cmd); + ret = SPD_STATUS_NOT_SUPPORTED; + break; + default: + spd_err(spd, "Wrong request command %d\n", spd_cmd); + ret = SPD_STATUS_INVALID_COMMAND; + break; + } +reply: + cmd->header.is_response = 1; + cmd->header.status = ret; + if (ret != SPD_STATUS_SUCCESS) + cmd->header.len = sizeof(struct spd_cmd_hdr); + + written = mei_cldev_send(spd->cldev, spd->buf, cmd->header.len); + if (written != cmd->header.len) { + ret = SPD_STATUS_GENERAL_FAILURE; + spd_err(spd, "Failed to send reply written = %zd\n", written); + } + + /* FIXME: translate ret to errno */ + if (ret) + return -EINVAL; + + return 0; +} + +ssize_t mei_spd_cmd(struct mei_spd *spd) +{ + struct spd_cmd *cmd = (struct spd_cmd *)spd->buf; + ssize_t out_buf_sz; + int ret; + + out_buf_sz = mei_cldev_recv(spd->cldev, spd->buf, spd->buf_sz); + if (out_buf_sz < 0) { + spd_err(spd, "failure in receive ret = %zd\n", out_buf_sz); + return out_buf_sz; + } + + if (out_buf_sz == 0) { + spd_err(spd, "received empty msg\n"); + return 0; + } + + /* check that we've received at least sizeof(header) */ + if (out_buf_sz < sizeof(struct spd_cmd_hdr)) { + spd_err(spd, "Request is too short\n"); + return -EFAULT; + } + + if (cmd->header.is_response) + ret = mei_spd_cmd_response(spd, out_buf_sz); + else + ret = mei_spd_cmd_request(spd, out_buf_sz); + + return ret; +} + +static void mei_spd_status_send_work(struct work_struct *work) +{ + struct mei_spd *spd = + container_of(work, struct mei_spd, status_send_w); + + mutex_lock(&spd->lock); + mei_spd_cmd_storage_status_req(spd); + mutex_unlock(&spd->lock); +} + +void mei_spd_free(struct mei_spd *spd) +{ + if (!spd) + return; + + cancel_work_sync(&spd->status_send_w); + + kfree(spd->buf); + kfree(spd); +} + +struct mei_spd *mei_spd_alloc(struct mei_cl_device *cldev) +{ + struct mei_spd *spd; + u8 *buf; + + spd = kzalloc(sizeof(*spd), GFP_KERNEL); + if (!spd) + return NULL; + + spd->buf_sz = sizeof(struct spd_cmd) + SPD_CLIENT_GPP_DATA_MAX_SIZE; + buf = kmalloc(spd->buf_sz, GFP_KERNEL); + if (!buf) + goto free; + + spd->cldev = cldev; + spd->buf = buf; + spd->state = MEI_SPD_STATE_INIT; + mutex_init(&spd->lock); + INIT_WORK(&spd->status_send_w, mei_spd_status_send_work); + + return spd; +free: + kfree(spd); + return NULL; +} diff --git a/drivers/misc/mei/spd/cmd.h b/drivers/misc/mei/spd/cmd.h new file mode 100644 index 0000000000000..3f77550f44ab3 --- /dev/null +++ b/drivers/misc/mei/spd/cmd.h @@ -0,0 +1,230 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* + * Copyright (C) 2015-2018 Intel Corp. All rights reserved + */ +#ifndef _SPD_CMD_H +#define _SPD_CMD_H + +#include + +/** + * enum spd_cmd_type - available commands + * + * @SPD_NONE_CMD : Lower command sentinel. + * @SPD_START_STOP_CMD : start stop command (deprecated). [Host -> TEE] + * @SPD_RPMB_WRITE_CMD : RPMB write request. [TEE -> Host] + * @SPD_RPMB_READ_CMD : RPMB read request. [TEE -> Host] + * @SPD_RPMB_GET_COUNTER_CMD: get counter request [TEE -> Host] + * @SPD_GPP_WRITE_CMD : GPP write request. [TEE -> Host] + * @SPD_GPP_READ_CMD : GPP read request. [TEE -> Host] + * @SPD_TRIM_CMD : TRIM command [TEE -> Host] + * @SPD_INIT_CMD : initial handshake between host and fw. [Host -> TEE] + * @SPD_STORAGE_STATUS_CMD : the backing storage status. [Host -> TEE] + * @SPD_MAX_CMD: Upper command sentinel. + */ +enum spd_cmd_type { + SPD_NONE_CMD = 0, + SPD_START_STOP_CMD, + SPD_RPMB_WRITE_CMD, + SPD_RPMB_READ_CMD, + SPD_RPMB_GET_COUNTER_CMD, + SPD_GPP_WRITE_CMD, + SPD_GPP_READ_CMD, + SPD_TRIM_CMD, + SPD_INIT_CMD, + SPD_STORAGE_STATUS_CMD, + SPD_MAX_CMD, +}; + +enum spd_status { + SPD_STATUS_SUCCESS = 0, + SPD_STATUS_GENERAL_FAILURE = 1, + SPD_STATUS_NOT_READY = 2, + SPD_STATUS_NOT_SUPPORTED = 3, + SPD_STATUS_INVALID_COMMAND = 4, +}; + +/** + * enum spd_storage_type - storage device type + * + * @SPD_TYPE_UNDEF: lower enum sentinel + * @SPD_TYPE_EMMC: emmc device + * @SPD_TYPE_UFS: ufs device + * @SPD_TYPE_MAX: upper enum sentinel + */ +enum spd_storage_type { + SPD_TYPE_UNDEF = 0, + SPD_TYPE_EMMC = 1, + SPD_TYPE_UFS = 2, + SPD_TYPE_MAX +}; + +/** + * struct spd_cmd_hdr - Host storage Command Header + * + * @command_type: SPD_TYPES + * @is_response: 1 == Response, 0 == Request + * @len: command length + * @status: command status + * @reserved: reserved + */ +struct spd_cmd_hdr { + u32 command_type : 7; + u32 is_response : 1; + u32 len : 13; + u32 status : 8; + u32 reserved : 3; +} __packed; + +/** + * RPMB Frame Size as defined by the JDEC spec + */ +#define SPD_CLIENT_RPMB_DATA_MAX_SIZE (512) + +/** + * struct spd_cmd_init_resp + * commandType == HOST_STORAGE_INIT_CMD + * + * @gpp_partition_id: gpp_partition: + * UFS: LUN Number (0-7) + * EMMC: 1-4. + * 0xff: GPP not supported + * @type: storage hw type + * SPD_TYPE_EMMC + * SPD_TYPE_UFS + * @serial_no_sz: serial_no size + * @serial_no: device serial number + */ +struct spd_cmd_init_resp { + u32 gpp_partition_id; + u32 type; + u32 serial_no_sz; + u8 serial_no[0]; +}; + +/** + * struct spd_cmd_storage_status_req + * commandType == SPD_STORAGE_STATUS_CMD + * + * @gpp_on: availability of the gpp backing storage + * 0 - GP partition is accessible + * 1 - GP partition is not accessible + * @rpmb_on: availability of the backing storage + * 0 - RPMB partition is accessible + * 1 - RPBM partition is not accessible + */ +struct spd_cmd_storage_status_req { + u32 gpp_on; + u32 rpmb_on; +} __packed; + +/** + * struct spd_cmd_rpmb_write + * command_type == SPD_RPMB_WRITE_CMD + * + * @rpmb_frame: RPMB frame are constant size (512) + */ +struct spd_cmd_rpmb_write { + u8 rpmb_frame[0]; +} __packed; + +/** + * struct spd_cmd_rpmb_read + * command_type == SPD_RPMB_READ_CMD + * + * @rpmb_frame: RPMB frame are constant size (512) + */ +struct spd_cmd_rpmb_read { + u8 rpmb_frame[0]; +} __packed; + +/** + * struct spd_cmd_rpmb_get_counter + * command_type == SPD_RPMB_GET_COUNTER_CMD + * + * @rpmb_frame: frame containing frame counter + */ +struct spd_cmd_rpmb_get_counter { + u8 rpmb_frame[0]; +} __packed; + +/** + * struct spd_cmd_gpp_write_req + * command_type == SPD_GPP_WRITE_CMD + * + * @offset: frame offset in partition + * @data: 4K page + */ +struct spd_cmd_gpp_write_req { + u32 offset; + u8 data[0]; +} __packed; + +/** + * struct spd_cmd_gpp_write_rsp + * command_type == SPD_GPP_WRITE_CMD + * + * @reserved: reserved + */ +struct spd_cmd_gpp_write_rsp { + u32 reserved[2]; +} __packed; + +/** + * struct spd_cmd_gpp_read_req + * command_type == SPD_GPP_READ_CMD + * + * @offset: offset of a frame on GPP partition + * @size_to_read: data length to read (must be ) + */ +struct spd_cmd_gpp_read_req { + u32 offset; + u32 size_to_read; +} __packed; + +/** + * struct spd_cmd_gpp_read_rsp + * command_type == SPD_GPP_READ_CMD + * + * @reserved: reserved + * @data: data + */ +struct spd_cmd_gpp_read_rsp { + u32 reserved; + u8 data[0]; +} __packed; + +#define SPD_GPP_READ_DATA_LEN(cmd) ((cmd).header.len - \ + (sizeof(struct spd_cmd_hdr) + \ + sizeof(struct spd_cmd_gpp_read_rsp))) + +#define SPD_GPP_WRITE_DATA_LEN(cmd) ((cmd).header.len - \ + (sizeof(struct spd_cmd_hdr) + \ + sizeof(struct spd_cmd_gpp_write_req))) + +struct spd_cmd { + struct spd_cmd_hdr header; + + union { + struct spd_cmd_rpmb_write rpmb_write; + struct spd_cmd_rpmb_read rpmb_read; + struct spd_cmd_rpmb_get_counter rpmb_get_counter; + + struct spd_cmd_gpp_write_req gpp_write_req; + struct spd_cmd_gpp_write_rsp gpp_write_rsp; + + struct spd_cmd_gpp_read_req gpp_read_req; + struct spd_cmd_gpp_read_rsp gpp_read_resp; + + struct spd_cmd_init_resp init_rsp; + struct spd_cmd_storage_status_req status_req; + }; +} __packed; + +/* GPP Max data 4K */ +#define SPD_CLIENT_GPP_DATA_MAX_SIZE (4096) + +const char *spd_cmd_str(enum spd_cmd_type cmd); +const char *mei_spd_dev_str(enum spd_storage_type type); + +#endif /* _SPD_CMD_H */ diff --git a/drivers/misc/mei/spd/debugfs.c b/drivers/misc/mei/spd/debugfs.c new file mode 100644 index 0000000000000..dfbb62a49fcc8 --- /dev/null +++ b/drivers/misc/mei/spd/debugfs.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Copyright(c) 2015 - 2018 Intel Corporation. All rights reserved. + */ +#include +#include +#include +#include + +#include "cmd.h" +#include "spd.h" + +static ssize_t mei_spd_dbgfs_read_info(struct file *fp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + struct mei_spd *spd = fp->private_data; + size_t bufsz = 4095; + char *buf; + int pos = 0; + int ret; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf + pos, bufsz - pos, "DEV STATE: [%d] %s\n", + spd->state, mei_spd_state_str(spd->state)); + pos += scnprintf(buf + pos, bufsz - pos, "DEV TYPE : [%d] %s\n", + spd->dev_type, mei_spd_dev_str(spd->dev_type)); + pos += scnprintf(buf + pos, bufsz - pos, " ID SIZE : %d\n", + spd->dev_id_sz); + pos += scnprintf(buf + pos, bufsz - pos, " ID : '%s'\n", "N/A"); + pos += scnprintf(buf + pos, bufsz - pos, "GPP\n"); + pos += scnprintf(buf + pos, bufsz - pos, " id : %d\n", + spd->gpp_partition_id); + pos += scnprintf(buf + pos, bufsz - pos, " opened : %1d\n", + mei_spd_gpp_is_open(spd)); + + ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, pos); + kfree(buf); + return ret; +} + +static const struct file_operations mei_spd_dbgfs_fops_info = { + .open = simple_open, + .read = mei_spd_dbgfs_read_info, + .llseek = generic_file_llseek, +}; + +void mei_spd_dbgfs_deregister(struct mei_spd *spd) +{ + if (!spd->dbgfs_dir) + return; + debugfs_remove_recursive(spd->dbgfs_dir); + spd->dbgfs_dir = NULL; +} + +int mei_spd_dbgfs_register(struct mei_spd *spd, const char *name) +{ + struct dentry *dir, *f; + + dir = debugfs_create_dir(name, NULL); + if (!dir) + return -ENOMEM; + + spd->dbgfs_dir = dir; + + f = debugfs_create_file("info", 0400, dir, + spd, &mei_spd_dbgfs_fops_info); + if (!f) { + spd_err(spd, "info: registration failed\n"); + goto err; + } + + return 0; +err: + mei_spd_dbgfs_deregister(spd); + return -ENODEV; +} diff --git a/drivers/misc/mei/spd/gpp.c b/drivers/misc/mei/spd/gpp.c new file mode 100644 index 0000000000000..b5d1a27a50ee3 --- /dev/null +++ b/drivers/misc/mei/spd/gpp.c @@ -0,0 +1,299 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Copyright(c) 2015 - 2018 Intel Corporation. All rights reserved. + */ +#include +#include +#include +#include +#include +#include + +#include "cmd.h" +#include "spd.h" + +static struct page *page_read(struct address_space *mapping, int index) +{ + return read_mapping_page(mapping, index, NULL); +} + +static int mei_spd_bd_read(struct mei_spd *spd, loff_t from, size_t len, + size_t *retlen, u_char *buf) +{ + struct page *page; + int index = from >> PAGE_SHIFT; + int offset = from & (PAGE_SIZE - 1); + int cpylen; + + while (len) { + if ((offset + len) > PAGE_SIZE) + cpylen = PAGE_SIZE - offset; + else + cpylen = len; + len = len - cpylen; + + page = page_read(spd->gpp->bd_inode->i_mapping, index); + if (IS_ERR(page)) + return PTR_ERR(page); + + memcpy(buf, page_address(page) + offset, cpylen); + put_page(page); + + if (retlen) + *retlen += cpylen; + buf += cpylen; + offset = 0; + index++; + } + return 0; +} + +static int _mei_spd_bd_write(struct block_device *dev, const u_char *buf, + loff_t to, size_t len, size_t *retlen) +{ + struct page *page; + struct address_space *mapping = dev->bd_inode->i_mapping; + int index = to >> PAGE_SHIFT; /* page index */ + int offset = to & ~PAGE_MASK; /* page offset */ + int cpylen; + + while (len) { + if ((offset + len) > PAGE_SIZE) + cpylen = PAGE_SIZE - offset; + else + cpylen = len; + len = len - cpylen; + + page = page_read(mapping, index); + if (IS_ERR(page)) + return PTR_ERR(page); + + if (memcmp(page_address(page) + offset, buf, cpylen)) { + lock_page(page); + memcpy(page_address(page) + offset, buf, cpylen); + set_page_dirty(page); + unlock_page(page); + balance_dirty_pages_ratelimited(mapping); + } + put_page(page); + + if (retlen) + *retlen += cpylen; + + buf += cpylen; + offset = 0; + index++; + } + return 0; +} + +static int mei_spd_bd_write(struct mei_spd *spd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) +{ + int ret; + + ret = _mei_spd_bd_write(spd->gpp, buf, to, len, retlen); + if (ret > 0) + ret = 0; + + sync_blockdev(spd->gpp); + + return ret; +} + +static void mei_spd_bd_sync(struct mei_spd *spd) +{ + sync_blockdev(spd->gpp); +} + +#define GPP_FMODE (FMODE_WRITE | FMODE_READ | FMODE_EXCL) + +bool mei_spd_gpp_is_open(struct mei_spd *spd) +{ + struct request_queue *q; + + if (!spd->gpp) + return false; + + q = spd->gpp->bd_queue; + if (q && !blk_queue_stopped(q)) + return true; + + return false; +} + +static int mei_spd_gpp_open(struct mei_spd *spd, struct device *dev) +{ + int ret; + + if (spd->gpp) + return 0; + + spd->gpp = blkdev_get_by_dev(dev->devt, GPP_FMODE, spd); + if (IS_ERR(spd->gpp)) { + ret = PTR_ERR(spd->gpp); + spd->gpp = NULL; + spd_dbg(spd, "Can't get GPP block device %s ret = %d\n", + dev_name(dev), ret); + return ret; + } + + spd_dbg(spd, "gpp partition created\n"); + return 0; +} + +static int mei_spd_gpp_close(struct mei_spd *spd) +{ + if (!spd->gpp) + return 0; + + mei_spd_bd_sync(spd); + blkdev_put(spd->gpp, GPP_FMODE); + spd->gpp = NULL; + + spd_dbg(spd, "gpp partition removed\n"); + return 0; +} + +#define UFSHCD "ufshcd" +static bool mei_spd_lun_ufs_match(struct mei_spd *spd, struct device *dev) +{ + struct gendisk *disk = dev_to_disk(dev); + struct scsi_device *sdev; + + switch (disk->major) { + case SCSI_DISK0_MAJOR: + case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: + case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: + break; + default: + return false; + } + + sdev = to_scsi_device(dev->parent); + + if (!sdev->host || + strncmp(sdev->host->hostt->name, UFSHCD, strlen(UFSHCD))) + return false; + + return sdev->lun == spd->gpp_partition_id; +} + +static bool mei_spd_gpp_mmc_match(struct mei_spd *spd, struct device *dev) +{ + struct gendisk *disk = dev_to_disk(dev); + int idx, part_id; + + if (disk->major != MMC_BLOCK_MAJOR) + return false; + + if (sscanf(disk->disk_name, "mmcblk%dgp%d", &idx, &part_id) != 2) + return false; + + return part_id == spd->gpp_partition_id - 1; +} + +static bool mei_spd_gpp_match(struct mei_spd *spd, struct device *dev) +{ + /* we are only interested in physical partitions */ + if (strncmp(dev->type->name, "disk", sizeof("disk"))) + return false; + + if (spd->dev_type == SPD_TYPE_EMMC) + return mei_spd_gpp_mmc_match(spd, dev); + else if (spd->dev_type == SPD_TYPE_UFS) + return mei_spd_lun_ufs_match(spd, dev); + else + return false; +} + +static int gpp_add_device(struct device *dev, struct class_interface *intf) +{ + struct mei_spd *spd = container_of(intf, struct mei_spd, gpp_interface); + + if (!mei_spd_gpp_match(spd, dev)) + return 0; + + mutex_lock(&spd->lock); + if (mei_spd_gpp_open(spd, dev)) { + mutex_unlock(&spd->lock); + return 0; + } + + schedule_work(&spd->status_send_w); + mutex_unlock(&spd->lock); + + return 0; +} + +static void gpp_remove_device(struct device *dev, struct class_interface *intf) +{ + struct mei_spd *spd = container_of(intf, struct mei_spd, gpp_interface); + + if (!mei_spd_gpp_match(spd, dev)) + return; + + mutex_lock(&spd->lock); + if (mei_spd_gpp_close(spd)) { + mutex_unlock(&spd->lock); + return; + } + + if (spd->state != MEI_SPD_STATE_STOPPING) + schedule_work(&spd->status_send_w); + mutex_unlock(&spd->lock); +} + +int mei_spd_gpp_read(struct mei_spd *spd, size_t off, u8 *data, size_t size) +{ + int ret; + + spd_dbg(spd, "GPP read offset = %zx, size = %zx\n", off, size); + + if (!mei_spd_gpp_is_open(spd)) + return -ENODEV; + + ret = mei_spd_bd_read(spd, off, size, NULL, data); + if (ret) + spd_err(spd, "GPP read failed ret = %d\n", ret); + + return ret; +} + +int mei_spd_gpp_write(struct mei_spd *spd, size_t off, u8 *data, size_t size) +{ + int ret; + + spd_dbg(spd, "GPP write offset = %zx, size = %zx\n", off, size); + + if (!mei_spd_gpp_is_open(spd)) + return -ENODEV; + + ret = mei_spd_bd_write(spd, off, size, NULL, data); + if (ret) + spd_err(spd, "GPP write failed ret = %d\n", ret); + + return ret; +} + +void mei_spd_gpp_prepare(struct mei_spd *spd) +{ + spd->gpp_interface.add_dev = gpp_add_device; + spd->gpp_interface.remove_dev = gpp_remove_device; + spd->gpp_interface.class = &block_class; +} + +int mei_spd_gpp_init(struct mei_spd *spd) +{ + int ret; + + ret = class_interface_register(&spd->gpp_interface); + if (ret) + spd_err(spd, "Can't register interface\n"); + return ret; +} + +void mei_spd_gpp_exit(struct mei_spd *spd) +{ + class_interface_unregister(&spd->gpp_interface); +} diff --git a/drivers/misc/mei/spd/main.c b/drivers/misc/mei/spd/main.c new file mode 100644 index 0000000000000..468cceffb7a0a --- /dev/null +++ b/drivers/misc/mei/spd/main.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Copyright(c) 2015 - 2018 Intel Corporation. All rights reserved. + */ +#include + +#include "spd.h" + +static void mei_spd_rx_cb(struct mei_cl_device *cldev) +{ + struct mei_spd *spd = mei_cldev_get_drvdata(cldev); + + mutex_lock(&spd->lock); + mei_spd_cmd(spd); + mutex_unlock(&spd->lock); +} + +static int mei_spd_probe(struct mei_cl_device *cldev, + const struct mei_cl_device_id *id) +{ + struct mei_spd *spd; + u8 ver = mei_cldev_ver(cldev); + int ret; + + dev_dbg(&cldev->dev, "probing mei spd ver = %d\n", ver); + + if (ver < 2) { + dev_warn(&cldev->dev, "unuspported protocol version %d\n", ver); + return -ENODEV; + } + + spd = mei_spd_alloc(cldev); + if (!spd) + return -ENOMEM; + + mei_cldev_set_drvdata(cldev, spd); + + ret = mei_spd_dbgfs_register(spd, "spd"); + if (ret) + goto free; + + ret = mei_cldev_enable(cldev); + if (ret < 0) { + dev_err(&cldev->dev, "Could not enable device ret = %d\n", ret); + goto free; + } + + ret = mei_cldev_register_rx_cb(cldev, mei_spd_rx_cb); + if (ret) { + dev_err(&cldev->dev, "Error register event %d\n", ret); + goto disable; + } + + spd_dbg(spd, "protocol version %d\n", ver); + mei_spd_gpp_prepare(spd); + mei_spd_rpmb_prepare(spd); + mutex_lock(&spd->lock); + ret = mei_spd_cmd_init_req(spd); + mutex_unlock(&spd->lock); + if (ret) { + dev_err(&cldev->dev, "Could not start ret = %d\n", ret); + goto disable; + } + + return 0; + +disable: + mei_cldev_disable(cldev); + +free: + mei_spd_dbgfs_deregister(spd); + mei_cldev_set_drvdata(cldev, NULL); + mei_spd_free(spd); + return ret; +} + +static int mei_spd_remove(struct mei_cl_device *cldev) +{ + struct mei_spd *spd = mei_cldev_get_drvdata(cldev); + + if (spd->state == MEI_SPD_STATE_RUNNING) { + spd->state = MEI_SPD_STATE_STOPPING; + mei_spd_gpp_exit(spd); + mei_spd_rpmb_exit(spd); + mutex_lock(&spd->lock); + mei_spd_cmd_storage_status_req(spd); + mutex_unlock(&spd->lock); + } + + mei_cldev_disable(cldev); + mei_spd_dbgfs_deregister(spd); + mei_cldev_set_drvdata(cldev, NULL); + mei_spd_free(spd); + + return 0; +} + +#define MEI_SPD_UUID UUID_LE(0x2a39291f, 0x5551, 0x482f, \ + 0x99, 0xcb, 0x9e, 0x22, 0x74, 0x97, 0x8c, 0xa8) + +static struct mei_cl_device_id mei_spd_tbl[] = { + { .uuid = MEI_SPD_UUID, .version = MEI_CL_VERSION_ANY}, + /* required last entry */ + { } +}; +MODULE_DEVICE_TABLE(mei, mei_spd_tbl); + +static struct mei_cl_driver mei_spd_driver = { + .id_table = mei_spd_tbl, + .name = "mei_spd", + + .probe = mei_spd_probe, + .remove = mei_spd_remove, +}; + +module_mei_cl_driver(mei_spd_driver); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Storage Proxy driver based on mei bus"); diff --git a/drivers/misc/mei/spd/rpmb.c b/drivers/misc/mei/spd/rpmb.c new file mode 100644 index 0000000000000..b74d0cd8f8025 --- /dev/null +++ b/drivers/misc/mei/spd/rpmb.c @@ -0,0 +1,199 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Intel Host Storage Interface Linux driver + * Copyright (c) 2015 - 2018, Intel Corporation. + */ + +#include "cmd.h" +#include "spd.h" +#include + +static int mei_spd_rpmb_start(struct mei_spd *spd, struct rpmb_dev *rdev) +{ + if (spd->rdev == rdev) + return 0; + + if (spd->rdev) { + spd_warn(spd, "rpmb device already registered\n"); + return -EEXIST; + } + + spd->rdev = rpmb_dev_get(rdev); + spd_dbg(spd, "rpmb partition created\n"); + return 0; +} + +static int mei_spd_rpmb_stop(struct mei_spd *spd, struct rpmb_dev *rdev) +{ + if (!spd->rdev) { + spd_dbg(spd, "Already stopped\n"); + return -EPROTO; + } + + if (rdev && spd->rdev != rdev) { + spd_dbg(spd, "Wrong RPMB on stop\n"); + return -EINVAL; + } + + rpmb_dev_put(spd->rdev); + spd->rdev = NULL; + + spd_dbg(spd, "rpmb partition removed\n"); + return 0; +} + +static int mei_spd_rpmb_match(struct mei_spd *spd, struct rpmb_dev *rdev) +{ + if (spd->dev_id_sz && rdev->ops->dev_id) { + if (rdev->ops->dev_id_len != spd->dev_id_sz || + memcmp(rdev->ops->dev_id, spd->dev_id, + rdev->ops->dev_id_len)) { + spd_dbg(spd, "ignore request for another rpmb\n"); + /* return 0; FW sends garbage now, ignore it */ + } + } + + switch (rdev->ops->type) { + case RPMB_TYPE_EMMC: + if (spd->dev_type != SPD_TYPE_EMMC) + return 0; + break; + case RPMB_TYPE_UFS: + if (spd->dev_type != SPD_TYPE_UFS) + return 0; + break; + default: + return 0; + } + + return 1; +} + +static int rpmb_add_device(struct device *dev, struct class_interface *intf) +{ + struct mei_spd *spd = + container_of(intf, struct mei_spd, rpmb_interface); + struct rpmb_dev *rdev = to_rpmb_dev(dev); + + if (!mei_spd_rpmb_match(spd, rdev)) + return 0; + + mutex_lock(&spd->lock); + if (mei_spd_rpmb_start(spd, rdev)) { + mutex_unlock(&spd->lock); + return 0; + } + + schedule_work(&spd->status_send_w); + mutex_unlock(&spd->lock); + + return 0; +} + +static void rpmb_remove_device(struct device *dev, struct class_interface *intf) +{ + struct mei_spd *spd = + container_of(intf, struct mei_spd, rpmb_interface); + struct rpmb_dev *rdev = to_rpmb_dev(dev); + + if (!mei_spd_rpmb_match(spd, rdev)) + return; + + mutex_lock(&spd->lock); + if (mei_spd_rpmb_stop(spd, rdev)) { + mutex_unlock(&spd->lock); + return; + } + + if (spd->state != MEI_SPD_STATE_STOPPING) + schedule_work(&spd->status_send_w); + mutex_unlock(&spd->lock); +} + +void mei_spd_rpmb_prepare(struct mei_spd *spd) +{ + spd->rpmb_interface.add_dev = rpmb_add_device; + spd->rpmb_interface.remove_dev = rpmb_remove_device; + spd->rpmb_interface.class = &rpmb_class; +} + +/** + * mei_spd_rpmb_init - init RPMB connection + * + * @spd: device + * + * Locking: spd->lock should not be held + * Returns: 0 if initialized successfully, <0 otherwise + */ +int mei_spd_rpmb_init(struct mei_spd *spd) +{ + int ret; + + ret = class_interface_register(&spd->rpmb_interface); + if (ret) + spd_err(spd, "Can't register interface\n"); + return ret; +} + +/** + * mei_spd_rpmb_exit - clean RPMB connection + * + * @spd: device + * + * Locking: spd->lock should not be held + */ +void mei_spd_rpmb_exit(struct mei_spd *spd) +{ + class_interface_unregister(&spd->rpmb_interface); +} + +int mei_spd_rpmb_cmd_req(struct mei_spd *spd, u16 req, void *buf) +{ + struct rpmb_cmd cmd[3]; + struct rpmb_frame_jdec *frame_res = NULL; + u32 flags; + unsigned int i; + int ret; + + if (!spd->rdev) { + spd_err(spd, "RPMB not ready\n"); + return -ENODEV; + } + + i = 0; + flags = RPMB_F_WRITE; + if (req == RPMB_WRITE_DATA || req == RPMB_PROGRAM_KEY) + flags |= RPMB_F_REL_WRITE; + cmd[i].flags = flags; + cmd[i].nframes = 1; + cmd[i].frames = buf; + i++; + + if (req == RPMB_WRITE_DATA || req == RPMB_PROGRAM_KEY) { + frame_res = kzalloc(sizeof(*frame_res), GFP_KERNEL); + if (!frame_res) + return -ENOMEM; + frame_res->req_resp = cpu_to_be16(RPMB_RESULT_READ); + cmd[i].flags = RPMB_F_WRITE; + cmd[i].nframes = 1; + cmd[i].frames = frame_res; + i++; + } + + cmd[i].flags = 0; + cmd[i].nframes = 1; + cmd[i].frames = buf; + i++; + + ret = rpmb_cmd_seq(spd->rdev, cmd, i); + if (ret) + spd_err(spd, "RPMB req failed ret = %d\n", ret); + + kfree(frame_res); + return ret; +} + +bool mei_spd_rpmb_is_open(struct mei_spd *spd) +{ + return !!spd->rdev; +} diff --git a/drivers/misc/mei/spd/spd.h b/drivers/misc/mei/spd/spd.h new file mode 100644 index 0000000000000..b919a5cb7a4ca --- /dev/null +++ b/drivers/misc/mei/spd/spd.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* + * Copyright (C) 2015-2018 Intel Corp. All rights reserved + */ +#ifndef _MEI_SPD_H +#define _MEI_SPD_H + +#include +#include +#include + +enum mei_spd_state { + MEI_SPD_STATE_INIT, + MEI_SPD_STATE_INIT_WAIT, + MEI_SPD_STATE_INIT_DONE, + MEI_SPD_STATE_RUNNING, + MEI_SPD_STATE_STOPPING, +}; + +/** + * struct mei_spd - spd device struct + * + * @cldev: client bus device + * @gpp: GPP partition block device + * @gpp_partition_id: GPP partition id (1-6) + * @gpp_interface: gpp class interface for discovery + * @dev_type: storage device type + * @dev_id_sz: device id size + * @dev_id: device id string + * @rdev: RPMB device + * @rpmb_interface: gpp class interface for discovery + * @lock: mutex to sync request processing + * @state: driver state + * @status_send_w: workitem for sending status to the FW + * @buf_sz: receive/transmit buffer allocated size + * @buf: receive/transmit buffer + * @dbgfs_dir: debugfs directory entry + */ +struct mei_spd { + struct mei_cl_device *cldev; + struct block_device *gpp; + u32 gpp_partition_id; + struct class_interface gpp_interface; + u32 dev_type; + u32 dev_id_sz; + u8 *dev_id; + struct rpmb_dev *rdev; + struct class_interface rpmb_interface; + struct mutex lock; /* mutex to sync request processing */ + enum mei_spd_state state; + struct work_struct status_send_w; + size_t buf_sz; + u8 *buf; + +#if IS_ENABLED(CONFIG_DEBUG_FS) + struct dentry *dbgfs_dir; +#endif /* CONFIG_DEBUG_FS */ +}; + +struct mei_spd *mei_spd_alloc(struct mei_cl_device *cldev); +void mei_spd_free(struct mei_spd *spd); + +int mei_spd_cmd_init_req(struct mei_spd *spd); +int mei_spd_cmd_storage_status_req(struct mei_spd *spd); +ssize_t mei_spd_cmd(struct mei_spd *spd); + +void mei_spd_gpp_prepare(struct mei_spd *spd); +bool mei_spd_gpp_is_open(struct mei_spd *spd); +int mei_spd_gpp_init(struct mei_spd *spd); +void mei_spd_gpp_exit(struct mei_spd *spd); +int mei_spd_gpp_read(struct mei_spd *spd, size_t off, u8 *data, size_t size); +int mei_spd_gpp_write(struct mei_spd *spd, size_t off, u8 *data, size_t size); + +void mei_spd_rpmb_prepare(struct mei_spd *spd); +bool mei_spd_rpmb_is_open(struct mei_spd *spd); +int mei_spd_rpmb_init(struct mei_spd *spd); +void mei_spd_rpmb_exit(struct mei_spd *spd); +int mei_spd_rpmb_cmd_req(struct mei_spd *spd, u16 req_type, void *buf); + +#if IS_ENABLED(CONFIG_DEBUG_FS) +int mei_spd_dbgfs_register(struct mei_spd *spd, const char *name); +void mei_spd_dbgfs_deregister(struct mei_spd *spd); +#else +static inline int mei_spd_dbgfs_register(struct mei_spd *spd, const char *name) +{ + return 0; +} + +static inline void mei_spd_dbgfs_deregister(struct mei_spd *spd) +{ +} + +#endif /* CONFIG_DEBUG_FS */ + +const char *mei_spd_state_str(enum mei_spd_state state); + +#define spd_err(spd, fmt, ...) \ + dev_err(&(spd)->cldev->dev, fmt, ##__VA_ARGS__) +#define spd_warn(spd, fmt, ...) \ + dev_warn(&(spd)->cldev->dev, fmt, ##__VA_ARGS__) +#define spd_dbg(spd, fmt, ...) \ + dev_dbg(&(spd)->cldev->dev, fmt, ##__VA_ARGS__) + +#endif /* _MEI_SPD_H */ diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c index c824329f7012a..0e4193cb08cf1 100644 --- a/drivers/misc/mic/scif/scif_rma.c +++ b/drivers/misc/mic/scif/scif_rma.c @@ -416,7 +416,7 @@ static int scif_create_remote_lookup(struct scif_dev *remote_dev, if (err) goto error_window; err = scif_map_page(&window->num_pages_lookup.lookup[j], - vmalloc_dma_phys ? + vmalloc_num_pages ? vmalloc_to_page(&window->num_pages[i]) : virt_to_page(&window->num_pages[i]), remote_dev); diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c index 2e30de9c694ac..8f2c5d8bd2eee 100644 --- a/drivers/misc/ocxl/config.c +++ b/drivers/misc/ocxl/config.c @@ -280,7 +280,9 @@ int ocxl_config_check_afu_index(struct pci_dev *dev, u32 val; int rc, templ_major, templ_minor, len; - pci_write_config_word(dev, fn->dvsec_afu_info_pos, afu_idx); + pci_write_config_byte(dev, + fn->dvsec_afu_info_pos + OCXL_DVSEC_AFU_INFO_AFU_IDX, + afu_idx); rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_VERSION, &val); if (rc) return rc; @@ -316,7 +318,7 @@ static int read_afu_name(struct pci_dev *dev, struct ocxl_fn_config *fn, if (rc) return rc; ptr = (u32 *) &afu->name[i]; - *ptr = val; + *ptr = le32_to_cpu((__force __le32) val); } afu->name[OCXL_AFU_NAME_SZ - 1] = '\0'; /* play safe */ return 0; diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c index 31695a078485a..646d16450066f 100644 --- a/drivers/misc/ocxl/link.c +++ b/drivers/misc/ocxl/link.c @@ -566,7 +566,7 @@ int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid) mutex_lock(&spa->spa_lock); - pe->tid = tid; + pe->tid = cpu_to_be32(tid); /* * The barrier makes sure the PE is updated diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c index 313da31502626..1540a7785e147 100644 --- a/drivers/misc/sgi-gru/grukdump.c +++ b/drivers/misc/sgi-gru/grukdump.c @@ -27,6 +27,9 @@ #include #include #include + +#include + #include "gru.h" #include "grutables.h" #include "gruhandles.h" @@ -196,6 +199,7 @@ int gru_dump_chiplet_request(unsigned long arg) /* Currently, only dump by gid is implemented */ if (req.gid >= gru_max_gids) return -EINVAL; + req.gid = array_index_nospec(req.gid, gru_max_gids); gru = GID_TO_GRU(req.gid); ubuf = req.buf; diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c index d7eaf1eb11e7f..003bfba407588 100644 --- a/drivers/misc/vmw_vmci/vmci_driver.c +++ b/drivers/misc/vmw_vmci/vmci_driver.c @@ -113,5 +113,5 @@ module_exit(vmci_drv_exit); MODULE_AUTHOR("VMware, Inc."); MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface."); -MODULE_VERSION("1.1.5.0-k"); +MODULE_VERSION("1.1.6.0-k"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c index 83e0c95d20a47..be732e5ead75b 100644 --- a/drivers/misc/vmw_vmci/vmci_host.c +++ b/drivers/misc/vmw_vmci/vmci_host.c @@ -15,7 +15,6 @@ #include #include -#include #include #include #include diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c index 1ab6e8737a5f0..da1ee2e1ba991 100644 --- a/drivers/misc/vmw_vmci/vmci_resource.c +++ b/drivers/misc/vmw_vmci/vmci_resource.c @@ -57,7 +57,8 @@ static struct vmci_resource *vmci_resource_lookup(struct vmci_handle handle, if (r->type == type && rid == handle.resource && - (cid == handle.context || cid == VMCI_INVALID_ID)) { + (cid == handle.context || cid == VMCI_INVALID_ID || + handle.context == VMCI_INVALID_ID)) { resource = r; break; } diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig index 42e89060cd41e..96c7ff63178ca 100644 --- a/drivers/mmc/core/Kconfig +++ b/drivers/mmc/core/Kconfig @@ -36,6 +36,7 @@ config PWRSEQ_SIMPLE config MMC_BLOCK tristate "MMC block device driver" depends on BLOCK + select RPMB default y help Say Y here to enable the MMC block device driver support. diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index e201ccb3fda4d..8ac6113ebe5b6 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -44,6 +44,7 @@ #include #include #include +#include #include @@ -409,8 +410,8 @@ static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr, return 0; } -static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status, - u32 retries_max) +static int ioctl_mmc_blk_rpmb_status_poll(struct mmc_card *card, u32 *status, + u32 retries_max) { int err; u32 retry_count = 0; @@ -472,7 +473,7 @@ static int ioctl_do_sanitize(struct mmc_card *card) static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, struct mmc_blk_ioc_data *idata) { - struct mmc_command cmd = {}; + struct mmc_command cmd = {}, sbc = {}; struct mmc_data data = {}; struct mmc_request mrq = {}; struct scatterlist sg; @@ -550,10 +551,15 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, } if (idata->rpmb) { - err = mmc_set_blockcount(card, data.blocks, - idata->ic.write_flag & (1 << 31)); - if (err) - return err; + sbc.opcode = MMC_SET_BLOCK_COUNT; + /* + * We don't do any blockcount validation because the max size + * may be increased by a future standard. We just copy the + * 'Reliable Write' bit here. + */ + sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31)); + sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; + mrq.sbc = &sbc; } if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) && @@ -612,7 +618,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, * Ensure RPMB command has completed by polling CMD13 * "Send Status". */ - err = ioctl_rpmb_card_status_poll(card, &status, 5); + err = ioctl_mmc_blk_rpmb_status_poll(card, &status, 5); if (err) dev_err(mmc_dev(card->host), "%s: Card Status=0x%08X, error %d\n", @@ -1116,6 +1122,217 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK); } +static int mmc_blk_rpmb_process(struct mmc_blk_data *md, + struct mmc_blk_ioc_data *idata[], + u64 num_of_cmds) +{ + struct mmc_card *card; + struct mmc_queue *mq; + int err = 0; + struct request *req; + int op_mode; + + card = md->queue.card; + if (IS_ERR(card)) { + err = PTR_ERR(card); + goto cmd_err; + } + + /* + * Dispatch the ioctl()s into the block request queue. + */ + mq = &md->queue; + op_mode = idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, + req = blk_get_request(mq->queue, op_mode, 0); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto cmd_err; + } + + req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL_RPMB; + req_to_mmc_queue_req(req)->drv_op_data = idata; + req_to_mmc_queue_req(req)->ioc_count = num_of_cmds; + + blk_execute_rq(mq->queue, NULL, req, 0); + + err = req_to_mmc_queue_req(req)->drv_op_result; + + blk_put_request(req); + +cmd_err: + return err; +} + +static +struct mmc_blk_ioc_data *mmc_blk_rpmb_cmd_to_ioc_data(struct rpmb_cmd *cmd) +{ + struct mmc_blk_ioc_data *idata; + int err; + + idata = kzalloc(sizeof(*idata), GFP_KERNEL); + if (!idata) { + err = -ENOMEM; + goto out; + } + + if (cmd->flags & RPMB_F_WRITE) { + idata->ic.opcode = MMC_WRITE_MULTIPLE_BLOCK; + idata->ic.write_flag = 1; + if (cmd->flags & RPMB_F_REL_WRITE) + idata->ic.write_flag |= 1 << 31; + } else { + idata->ic.opcode = MMC_READ_MULTIPLE_BLOCK; + } + + /* nframes == 0 in case there is only meta data in the frame */ + idata->ic.blocks = cmd->nframes ?: 1; + idata->ic.blksz = 512; + + idata->buf_bytes = (u64)idata->ic.blksz * idata->ic.blocks; + if (idata->buf_bytes > MMC_IOC_MAX_BYTES) { + err = -EOVERFLOW; + goto out; + } + + idata->buf = (unsigned char *)cmd->frames; + + return idata; +out: + kfree(idata); + return ERR_PTR(err); +} + +static int mmc_blk_rpmb_cmd_seq(struct device *dev, u8 target, + struct rpmb_cmd *cmds, + u32 num_of_cmds) +{ + struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev); + struct mmc_blk_ioc_data **idata; + int err = 0; + u32 i; + + if (!rpmb) + return -ENODEV; + + idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL); + if (!idata) + return -ENOMEM; + + for (i = 0; i < num_of_cmds; i++) { + idata[i] = mmc_blk_rpmb_cmd_to_ioc_data(&cmds[i]); + if (IS_ERR(idata[i])) { + err = PTR_ERR(idata[i]); + num_of_cmds = i; + goto cmd_err; + } + idata[i]->rpmb = rpmb; + } + + get_device(&rpmb->dev); + mmc_blk_get(rpmb->md->disk); + + err = mmc_blk_rpmb_process(rpmb->md, idata, num_of_cmds); + +cmd_err: + for (i = 0; i < num_of_cmds; i++) + kfree(idata[i]); + + kfree(idata); + + put_device(&rpmb->dev); + mmc_blk_put(rpmb->md); + + return err; +} + +static int mmc_blk_rpmb_get_capacity(struct device *dev, u8 target) +{ + struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev); + struct mmc_card *card; + + card = rpmb->md->queue.card; + return card->ext_csd.raw_rpmb_size_mult; +} + +static struct rpmb_ops mmc_rpmb_dev_ops = { + .cmd_seq = mmc_blk_rpmb_cmd_seq, + .get_capacity = mmc_blk_rpmb_get_capacity, + .type = RPMB_TYPE_EMMC, + .auth_method = RPMB_HMAC_ALGO_SHA_256, +}; + +static void mmc_blk_rpmb_unset_dev_id(struct rpmb_ops *ops) +{ + kfree(ops->dev_id); + ops->dev_id = NULL; +} + +static int mmc_blk_rpmb_set_dev_id(struct rpmb_ops *ops, struct mmc_card *card) +{ + char *id; + + id = kmalloc(sizeof(card->raw_cid), GFP_KERNEL); + if (!id) + return -ENOMEM; + + memcpy(id, card->raw_cid, sizeof(card->raw_cid)); + ops->dev_id = id; + ops->dev_id_len = sizeof(card->raw_cid); + + return 0; +} + +static void mmc_blk_rpmb_set_cap(struct rpmb_ops *ops, + struct mmc_card *card) +{ + u16 rel_wr_cnt; + + /* RPMB blocks are written in half sectors hence '* 2' */ + rel_wr_cnt = card->ext_csd.rel_sectors * 2; + /* eMMC 5.1 may support RPMB 8K (32) frames */ + if (card->ext_csd.rev >= 8) { + if (card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) + rel_wr_cnt = 32; + else + rel_wr_cnt = 2; + } + ops->wr_cnt_max = rel_wr_cnt; + ops->rd_cnt_max = card->host->max_blk_count; + ops->block_size = 1; /* 256B */ +} + +static void mmc_blk_rpmb_add(struct mmc_card *card) +{ + struct mmc_blk_data *md = dev_get_drvdata(&card->dev); + struct rpmb_dev *rdev; + struct mmc_rpmb_data *rpmb; + u8 i = 0; + + mmc_blk_rpmb_set_dev_id(&mmc_rpmb_dev_ops, card); + mmc_blk_rpmb_set_cap(&mmc_rpmb_dev_ops, card); + + /* Add RPMB partitions */ + list_for_each_entry(rpmb, &md->rpmbs, node) { + rdev = rpmb_dev_register(&rpmb->dev, i++, &mmc_rpmb_dev_ops); + if (IS_ERR(rdev)) { + pr_warn("%s: cannot register to rpmb %ld\n", + dev_name(&rpmb->dev), PTR_ERR(rdev)); + } + } +} + +static void mmc_blk_rpmb_remove(struct mmc_card *card) +{ + struct mmc_blk_data *md = dev_get_drvdata(&card->dev); + struct mmc_rpmb_data *rpmb; + u8 i = 0; + + list_for_each_entry(rpmb, &md->rpmbs, node) + rpmb_dev_unregister_by_device(&rpmb->dev, i++); + + mmc_blk_rpmb_unset_dev_id(&mmc_rpmb_dev_ops); +} + static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->blkdata; @@ -2946,6 +3163,9 @@ static int mmc_blk_probe(struct mmc_card *card) goto out; } + /* Add rpmb layer */ + mmc_blk_rpmb_add(card); + /* Add two debugfs entries */ mmc_blk_add_debugfs(card, md); @@ -2974,6 +3194,7 @@ static void mmc_blk_remove(struct mmc_card *card) struct mmc_blk_data *md = dev_get_drvdata(&card->dev); mmc_blk_remove_debugfs(card, md); + mmc_blk_rpmb_remove(card); mmc_blk_remove_parts(card, md); pm_runtime_get_sync(&card->dev); if (md->part_curr != md->part_type) { diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index bc1bd2c256132..55997cf84b39f 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -30,6 +30,7 @@ #include "pwrseq.h" #define DEFAULT_CMD6_TIMEOUT_MS 500 +#define MIN_CACHE_EN_TIMEOUT_MS 1600 static const unsigned int tran_exp[] = { 10000, 100000, 1000000, 10000000, @@ -526,8 +527,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) card->cid.year += 16; /* check whether the eMMC card supports BKOPS */ - if (!mmc_card_broken_hpi(card) && - ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) { + if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) { card->ext_csd.bkops = 1; card->ext_csd.man_bkops_en = (ext_csd[EXT_CSD_BKOPS_EN] & @@ -1782,20 +1782,26 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, if (err) { pr_warn("%s: Enabling HPI failed\n", mmc_hostname(card->host)); + card->ext_csd.hpi_en = 0; err = 0; - } else + } else { card->ext_csd.hpi_en = 1; + } } /* - * If cache size is higher than 0, this indicates - * the existence of cache and it can be turned on. + * If cache size is higher than 0, this indicates the existence of cache + * and it can be turned on. Note that some eMMCs from Micron has been + * reported to need ~800 ms timeout, while enabling the cache after + * sudden power failure tests. Let's extend the timeout to a minimum of + * DEFAULT_CACHE_EN_TIMEOUT_MS and do it for all cards. */ - if (!mmc_card_broken_hpi(card) && - card->ext_csd.cache_size > 0) { + if (card->ext_csd.cache_size > 0) { + unsigned int timeout_ms = MIN_CACHE_EN_TIMEOUT_MS; + + timeout_ms = max(card->ext_csd.generic_cmd6_time, timeout_ms); err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_CACHE_CTRL, 1, - card->ext_csd.generic_cmd6_time); + EXT_CSD_CACHE_CTRL, 1, timeout_ms); if (err && err != -EBADMSG) goto free_card; diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index be53044086c76..fbc56ee996827 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -1954,13 +1954,14 @@ static void atmci_tasklet_func(unsigned long priv) } atmci_request_end(host, host->mrq); - state = STATE_IDLE; + goto unlock; /* atmci_request_end() sets host->state */ break; } } while (state != prev_state); host->state = state; +unlock: spin_unlock(&host->lock); } diff --git a/drivers/mmc/host/dw_mmc-bluefield.c b/drivers/mmc/host/dw_mmc-bluefield.c index 54c3fbb4a3918..db56d4f58aaab 100644 --- a/drivers/mmc/host/dw_mmc-bluefield.c +++ b/drivers/mmc/host/dw_mmc-bluefield.c @@ -1,11 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2018 Mellanox Technologies. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index c201c378537e4..ef9deaa361c73 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -174,6 +174,8 @@ struct meson_host { struct sd_emmc_desc *descs; dma_addr_t descs_dma_addr; + int irq; + bool vqmmc_enabled; }; @@ -1181,7 +1183,7 @@ static int meson_mmc_probe(struct platform_device *pdev) struct resource *res; struct meson_host *host; struct mmc_host *mmc; - int ret, irq; + int ret; mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev); if (!mmc) @@ -1228,8 +1230,8 @@ static int meson_mmc_probe(struct platform_device *pdev) goto free_host; } - irq = platform_get_irq(pdev, 0); - if (irq <= 0) { + host->irq = platform_get_irq(pdev, 0); + if (host->irq <= 0) { dev_err(&pdev->dev, "failed to get interrupt resource.\n"); ret = -EINVAL; goto free_host; @@ -1283,9 +1285,8 @@ static int meson_mmc_probe(struct platform_device *pdev) writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN, host->regs + SD_EMMC_IRQ_EN); - ret = devm_request_threaded_irq(&pdev->dev, irq, meson_mmc_irq, - meson_mmc_irq_thread, IRQF_SHARED, - NULL, host); + ret = request_threaded_irq(host->irq, meson_mmc_irq, + meson_mmc_irq_thread, IRQF_SHARED, NULL, host); if (ret) goto err_init_clk; @@ -1303,7 +1304,7 @@ static int meson_mmc_probe(struct platform_device *pdev) if (host->bounce_buf == NULL) { dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n"); ret = -ENOMEM; - goto err_init_clk; + goto err_free_irq; } host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, @@ -1322,6 +1323,8 @@ static int meson_mmc_probe(struct platform_device *pdev) err_bounce_buf: dma_free_coherent(host->dev, host->bounce_buf_size, host->bounce_buf, host->bounce_dma_addr); +err_free_irq: + free_irq(host->irq, host); err_init_clk: clk_disable_unprepare(host->mmc_clk); err_core_clk: @@ -1339,6 +1342,7 @@ static int meson_mmc_remove(struct platform_device *pdev) /* disable interrupts */ writel(0, host->regs + SD_EMMC_IRQ_EN); + free_irq(host->irq, host); dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, host->descs, host->descs_dma_addr); diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index adf32682f27a3..c60a7625b1fab 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -104,6 +104,7 @@ struct mmc_omap_slot { unsigned int vdd; u16 saved_con; u16 bus_mode; + u16 power_mode; unsigned int fclk_freq; struct tasklet_struct cover_tasklet; @@ -1157,7 +1158,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) struct mmc_omap_slot *slot = mmc_priv(mmc); struct mmc_omap_host *host = slot->host; int i, dsor; - int clk_enabled; + int clk_enabled, init_stream; mmc_omap_select_slot(slot, 0); @@ -1167,6 +1168,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) slot->vdd = ios->vdd; clk_enabled = 0; + init_stream = 0; switch (ios->power_mode) { case MMC_POWER_OFF: mmc_omap_set_power(slot, 0, ios->vdd); @@ -1174,13 +1176,17 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) case MMC_POWER_UP: /* Cannot touch dsor yet, just power up MMC */ mmc_omap_set_power(slot, 1, ios->vdd); + slot->power_mode = ios->power_mode; goto exit; case MMC_POWER_ON: mmc_omap_fclk_enable(host, 1); clk_enabled = 1; dsor |= 1 << 11; + if (slot->power_mode != MMC_POWER_ON) + init_stream = 1; break; } + slot->power_mode = ios->power_mode; if (slot->bus_mode != ios->bus_mode) { if (slot->pdata->set_bus_mode != NULL) @@ -1196,7 +1202,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) for (i = 0; i < 2; i++) OMAP_MMC_WRITE(host, CON, dsor); slot->saved_con = dsor; - if (ios->power_mode == MMC_POWER_ON) { + if (init_stream) { /* worst case at 400kHz, 80 cycles makes 200 microsecs */ int usecs = 250; @@ -1234,6 +1240,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id) slot->host = host; slot->mmc = mmc; slot->id = id; + slot->power_mode = MMC_POWER_UNDEFINED; slot->pdata = &host->pdata->slots[id]; host->slots[id] = slot; diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 68760d4a5d3da..b23c57e07f369 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -2066,7 +2066,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev) mmc->max_blk_size = 512; /* Block Length at max can be 1024 */ mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; - mmc->max_seg_size = mmc->max_req_size; mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23; @@ -2096,6 +2095,17 @@ static int omap_hsmmc_probe(struct platform_device *pdev) goto err_irq; } + /* + * Limit the maximum segment size to the lower of the request size + * and the DMA engine device segment size limits. In reality, with + * 32-bit transfers, the DMA engine can do longer segments than this + * but there is no way to represent that in the DMA model - if we + * increase this figure here, we get warnings from the DMA API debug. + */ + mmc->max_seg_size = min3(mmc->max_req_size, + dma_get_max_seg_size(host->rx_chan->device->dev), + dma_get_max_seg_size(host->tx_chan->device->dev)); + /* Request IRQ for MMC operations */ ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0, mmc_hostname(mmc), host); diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index 32321bd596d88..c61109f7b7933 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -76,6 +76,7 @@ struct sdhci_acpi_slot { size_t priv_size; int (*probe_slot)(struct platform_device *, const char *, const char *); int (*remove_slot)(struct platform_device *); + int (*free_slot)(struct platform_device *pdev); int (*setup_host)(struct platform_device *pdev); }; @@ -756,6 +757,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev) err_cleanup: sdhci_cleanup_host(c->host); err_free: + if (c->slot && c->slot->free_slot) + c->slot->free_slot(pdev); + sdhci_free_host(c->host); return err; } @@ -777,6 +781,10 @@ static int sdhci_acpi_remove(struct platform_device *pdev) dead = (sdhci_readl(c->host, SDHCI_INT_STATUS) == ~0); sdhci_remove_host(c->host, dead); + + if (c->slot && c->slot->free_slot) + c->slot->free_slot(pdev); + sdhci_free_host(c->host); return 0; diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 3cc8bfee6c18f..8594659cb5923 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -258,6 +258,8 @@ struct sdhci_msm_host { bool mci_removed; const struct sdhci_msm_variant_ops *var_ops; const struct sdhci_msm_offset *offset; + bool use_cdr; + u32 transfer_mode; }; static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host) @@ -1025,6 +1027,26 @@ static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host) return ret; } +static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable) +{ + const struct sdhci_msm_offset *msm_offset = sdhci_priv_msm_offset(host); + u32 config, oldconfig = readl_relaxed(host->ioaddr + + msm_offset->core_dll_config); + + config = oldconfig; + if (enable) { + config |= CORE_CDR_EN; + config &= ~CORE_CDR_EXT_EN; + } else { + config &= ~CORE_CDR_EN; + config |= CORE_CDR_EXT_EN; + } + + if (config != oldconfig) + writel_relaxed(config, host->ioaddr + + msm_offset->core_dll_config); +} + static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) { struct sdhci_host *host = mmc_priv(mmc); @@ -1042,8 +1064,14 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) if (host->clock <= CORE_FREQ_100MHZ || !(ios.timing == MMC_TIMING_MMC_HS400 || ios.timing == MMC_TIMING_MMC_HS200 || - ios.timing == MMC_TIMING_UHS_SDR104)) + ios.timing == MMC_TIMING_UHS_SDR104)) { + msm_host->use_cdr = false; + sdhci_msm_set_cdr(host, false); return 0; + } + + /* Clock-Data-Recovery used to dynamically adjust RX sampling point */ + msm_host->use_cdr = true; /* * For HS400 tuning in HS200 timing requires: @@ -1525,6 +1553,19 @@ static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg) case SDHCI_POWER_CONTROL: req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON; break; + case SDHCI_TRANSFER_MODE: + msm_host->transfer_mode = val; + break; + case SDHCI_COMMAND: + if (!msm_host->use_cdr) + break; + if ((msm_host->transfer_mode & SDHCI_TRNS_READ) && + SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK_HS200 && + SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK) + sdhci_msm_set_cdr(host, true); + else + sdhci_msm_set_cdr(host, false); + break; } if (req_type) { diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c index 88347ce78f23f..d264391616f93 100644 --- a/drivers/mmc/host/sdhci-omap.c +++ b/drivers/mmc/host/sdhci-omap.c @@ -288,9 +288,9 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode) struct device *dev = omap_host->dev; struct mmc_ios *ios = &mmc->ios; u32 start_window = 0, max_window = 0; + bool dcrc_was_enabled = false; u8 cur_match, prev_match = 0; u32 length = 0, max_len = 0; - u32 ier = host->ier; u32 phase_delay = 0; int ret = 0; u32 reg; @@ -317,9 +317,10 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode) * during the tuning procedure. So disable it during the * tuning procedure. */ - ier &= ~SDHCI_INT_DATA_CRC; - sdhci_writel(host, ier, SDHCI_INT_ENABLE); - sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE); + if (host->ier & SDHCI_INT_DATA_CRC) { + host->ier &= ~SDHCI_INT_DATA_CRC; + dcrc_was_enabled = true; + } while (phase_delay <= MAX_PHASE_DELAY) { sdhci_omap_set_dll(omap_host, phase_delay); @@ -366,6 +367,9 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode) ret: sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); + /* Reenable forbidden interrupt */ + if (dcrc_was_enabled) + host->ier |= SDHCI_INT_DATA_CRC; sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); return ret; diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 7bfd366d970da..c4115bae5db18 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -12,6 +12,7 @@ * - JMicron (hardware and technical support) */ +#include #include #include #include @@ -462,6 +463,9 @@ struct intel_host { u32 dsm_fns; int drv_strength; bool d3_retune; + bool rpm_retune_ok; + u32 glk_rx_ctrl1; + u32 glk_tun_val; }; static const guid_t intel_dsm_guid = @@ -791,6 +795,77 @@ static int glk_emmc_add_host(struct sdhci_pci_slot *slot) return ret; } +#ifdef CONFIG_PM +#define GLK_RX_CTRL1 0x834 +#define GLK_TUN_VAL 0x840 +#define GLK_PATH_PLL GENMASK(13, 8) +#define GLK_DLY GENMASK(6, 0) +/* Workaround firmware failing to restore the tuning value */ +static void glk_rpm_retune_wa(struct sdhci_pci_chip *chip, bool susp) +{ + struct sdhci_pci_slot *slot = chip->slots[0]; + struct intel_host *intel_host = sdhci_pci_priv(slot); + struct sdhci_host *host = slot->host; + u32 glk_rx_ctrl1; + u32 glk_tun_val; + u32 dly; + + if (intel_host->rpm_retune_ok || !mmc_can_retune(host->mmc)) + return; + + glk_rx_ctrl1 = sdhci_readl(host, GLK_RX_CTRL1); + glk_tun_val = sdhci_readl(host, GLK_TUN_VAL); + + if (susp) { + intel_host->glk_rx_ctrl1 = glk_rx_ctrl1; + intel_host->glk_tun_val = glk_tun_val; + return; + } + + if (!intel_host->glk_tun_val) + return; + + if (glk_rx_ctrl1 != intel_host->glk_rx_ctrl1) { + intel_host->rpm_retune_ok = true; + return; + } + + dly = FIELD_PREP(GLK_DLY, FIELD_GET(GLK_PATH_PLL, glk_rx_ctrl1) + + (intel_host->glk_tun_val << 1)); + if (dly == FIELD_GET(GLK_DLY, glk_rx_ctrl1)) + return; + + glk_rx_ctrl1 = (glk_rx_ctrl1 & ~GLK_DLY) | dly; + sdhci_writel(host, glk_rx_ctrl1, GLK_RX_CTRL1); + + intel_host->rpm_retune_ok = true; + chip->rpm_retune = true; + mmc_retune_needed(host->mmc); + pr_info("%s: Requiring re-tune after rpm resume", mmc_hostname(host->mmc)); +} + +static void glk_rpm_retune_chk(struct sdhci_pci_chip *chip, bool susp) +{ + if (chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC && + !chip->rpm_retune) + glk_rpm_retune_wa(chip, susp); +} + +static int glk_runtime_suspend(struct sdhci_pci_chip *chip) +{ + glk_rpm_retune_chk(chip, true); + + return sdhci_cqhci_runtime_suspend(chip); +} + +static int glk_runtime_resume(struct sdhci_pci_chip *chip) +{ + glk_rpm_retune_chk(chip, false); + + return sdhci_cqhci_runtime_resume(chip); +} +#endif + #ifdef CONFIG_ACPI static int ni_set_max_freq(struct sdhci_pci_slot *slot) { @@ -879,8 +954,8 @@ static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = { .resume = sdhci_cqhci_resume, #endif #ifdef CONFIG_PM - .runtime_suspend = sdhci_cqhci_runtime_suspend, - .runtime_resume = sdhci_cqhci_runtime_resume, + .runtime_suspend = glk_runtime_suspend, + .runtime_resume = glk_runtime_resume, #endif .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | @@ -1762,8 +1837,13 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot( device_init_wakeup(&pdev->dev, true); if (slot->cd_idx >= 0) { - ret = mmc_gpiod_request_cd(host->mmc, NULL, slot->cd_idx, + ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx, slot->cd_override_level, 0, NULL); + if (ret && ret != -EPROBE_DEFER) + ret = mmc_gpiod_request_cd(host->mmc, NULL, + slot->cd_idx, + slot->cd_override_level, + 0, NULL); if (ret == -EPROBE_DEFER) goto remove; diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c index 77e9bc4aaee91..cc3ffeffd7a2e 100644 --- a/drivers/mmc/host/sdhci-pci-o2micro.c +++ b/drivers/mmc/host/sdhci-pci-o2micro.c @@ -490,6 +490,9 @@ int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); break; case PCI_DEVICE_ID_O2_SEABIRD0: + if (chip->pdev->revision == 0x01) + chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER; + /* fall through */ case PCI_DEVICE_ID_O2_SEABIRD1: /* UnLock WP */ ret = pci_read_config_byte(chip->pdev, diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 1b3fbd9bd5c5b..654051e00117a 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -193,8 +193,12 @@ void sdhci_reset(struct sdhci_host *host, u8 mask) timeout = ktime_add_ms(ktime_get(), 100); /* hw clears the bit when it's done */ - while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) { - if (ktime_after(ktime_get(), timeout)) { + while (1) { + bool timedout = ktime_after(ktime_get(), timeout); + + if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)) + break; + if (timedout) { pr_err("%s: Reset 0x%x never completed.\n", mmc_hostname(host->mmc), (int)mask); sdhci_dumpregs(host); @@ -1495,9 +1499,13 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk) /* Wait max 20 ms */ timeout = ktime_add_ms(ktime_get(), 20); - while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) - & SDHCI_CLOCK_INT_STABLE)) { - if (ktime_after(ktime_get(), timeout)) { + while (1) { + bool timedout = ktime_after(ktime_get(), timeout); + + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + if (clk & SDHCI_CLOCK_INT_STABLE) + break; + if (timedout) { pr_err("%s: Internal clock never stabilised.\n", mmc_hostname(host->mmc)); sdhci_dumpregs(host); diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index e514d57a0419d..aa983422aa970 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig @@ -207,7 +207,7 @@ comment "Disk-On-Chip Device Drivers" config MTD_DOCG3 tristate "M-Systems Disk-On-Chip G3" select BCH - select BCH_CONST_PARAMS + select BCH_CONST_PARAMS if !MTD_NAND_BCH select BITREVERSE help This provides an MTD device driver for the M-Systems DiskOnChip diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c index 9d97236932175..2e3a8da3ce72c 100644 --- a/drivers/mtd/maps/gpio-addr-flash.c +++ b/drivers/mtd/maps/gpio-addr-flash.c @@ -238,7 +238,7 @@ static int gpio_flash_probe(struct platform_device *pdev) state->map.copy_to = gf_copy_to; state->map.bankwidth = pdata->width; state->map.size = state->win_size * (1 << state->gpio_count); - state->map.virt = ioremap_nocache(memory->start, state->map.size); + state->map.virt = ioremap_nocache(memory->start, state->win_size); if (!state->map.virt) return -ENOMEM; diff --git a/drivers/mtd/nand/bbt.c b/drivers/mtd/nand/bbt.c index 56cde38b92c03..044adf9138546 100644 --- a/drivers/mtd/nand/bbt.c +++ b/drivers/mtd/nand/bbt.c @@ -27,7 +27,8 @@ int nanddev_bbt_init(struct nand_device *nand) unsigned int nwords = DIV_ROUND_UP(nblocks * bits_per_block, BITS_PER_LONG); - nand->bbt.cache = kzalloc(nwords, GFP_KERNEL); + nand->bbt.cache = kcalloc(nwords, sizeof(*nand->bbt.cache), + GFP_KERNEL); if (!nand->bbt.cache) return -ENOMEM; diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c index a068b214ebaa7..32e95af486a20 100644 --- a/drivers/mtd/nand/raw/atmel/nand-controller.c +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c @@ -2061,8 +2061,11 @@ atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller *nc) int ret; nand_np = dev->of_node; - nfc_np = of_find_compatible_node(dev->of_node, NULL, - "atmel,sama5d3-nfc"); + nfc_np = of_get_compatible_child(dev->of_node, "atmel,sama5d3-nfc"); + if (!nfc_np) { + dev_err(dev, "Could not find device node for sama5d3-nfc\n"); + return -ENODEV; + } nc->clk = of_clk_get(nfc_np, 0); if (IS_ERR(nc->clk)) { @@ -2472,15 +2475,19 @@ static int atmel_nand_controller_probe(struct platform_device *pdev) } if (caps->legacy_of_bindings) { + struct device_node *nfc_node; u32 ale_offs = 21; /* * If we are parsing legacy DT props and the DT contains a * valid NFC node, forward the request to the sama5 logic. */ - if (of_find_compatible_node(pdev->dev.of_node, NULL, - "atmel,sama5d3-nfc")) + nfc_node = of_get_compatible_child(pdev->dev.of_node, + "atmel,sama5d3-nfc"); + if (nfc_node) { caps = &atmel_sama5_nand_caps; + of_node_put(nfc_node); + } /* * Even if the compatible says we are dealing with an diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c index b864b93dd289e..2242e999a76bf 100644 --- a/drivers/mtd/nand/raw/denali.c +++ b/drivers/mtd/nand/raw/denali.c @@ -28,6 +28,7 @@ MODULE_LICENSE("GPL"); #define DENALI_NAND_NAME "denali-nand" +#define DENALI_DEFAULT_OOB_SKIP_BYTES 8 /* for Indexed Addressing */ #define DENALI_INDEXED_CTRL 0x00 @@ -1105,12 +1106,17 @@ static void denali_hw_init(struct denali_nand_info *denali) denali->revision = swab16(ioread32(denali->reg + REVISION)); /* - * tell driver how many bit controller will skip before - * writing ECC code in OOB, this register may be already - * set by firmware. So we read this value out. - * if this value is 0, just let it be. + * Set how many bytes should be skipped before writing data in OOB. + * If a non-zero value has already been set (by firmware or something), + * just use it. Otherwise, set the driver default. */ denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES); + if (!denali->oob_skip_bytes) { + denali->oob_skip_bytes = DENALI_DEFAULT_OOB_SKIP_BYTES; + iowrite32(denali->oob_skip_bytes, + denali->reg + SPARE_AREA_SKIP_BYTES); + } + denali_detect_max_banks(denali); iowrite32(0x0F, denali->reg + RB_PIN_ENABLED); iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE); diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index bc2ef52097834..9c90695a885fe 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -444,9 +444,14 @@ static void marvell_nfc_enable_int(struct marvell_nfc *nfc, u32 int_mask) writel_relaxed(reg & ~int_mask, nfc->regs + NDCR); } -static void marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask) +static u32 marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask) { + u32 reg; + + reg = readl_relaxed(nfc->regs + NDSR); writel_relaxed(int_mask, nfc->regs + NDSR); + + return reg & int_mask; } static void marvell_nfc_force_byte_access(struct nand_chip *chip, @@ -613,6 +618,7 @@ static int marvell_nfc_wait_cmdd(struct nand_chip *chip) static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms) { struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); + u32 pending; int ret; /* Timeout is expressed in ms */ @@ -625,8 +631,13 @@ static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms) ret = wait_for_completion_timeout(&nfc->complete, msecs_to_jiffies(timeout_ms)); marvell_nfc_disable_int(nfc, NDCR_RDYM); - marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1)); - if (!ret) { + pending = marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1)); + + /* + * In case the interrupt was not served in the required time frame, + * check if the ISR was not served or if something went actually wrong. + */ + if (ret && !pending) { dev_err(nfc->dev, "Timeout waiting for RB signal\n"); return -ETIMEDOUT; } @@ -686,7 +697,7 @@ static irqreturn_t marvell_nfc_isr(int irq, void *dev_id) marvell_nfc_disable_int(nfc, st & NDCR_ALL_INT); - if (!(st & (NDSR_RDDREQ | NDSR_WRDREQ | NDSR_WRCMDREQ))) + if (st & (NDSR_RDY(0) | NDSR_RDY(1))) complete(&nfc->complete); return IRQ_HANDLED; diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c index 4546ac0bed4a0..b1683d7a7e04d 100644 --- a/drivers/mtd/nand/raw/omap2.c +++ b/drivers/mtd/nand/raw/omap2.c @@ -1938,7 +1938,7 @@ static int omap_nand_attach_chip(struct nand_chip *chip) case NAND_OMAP_PREFETCH_DMA: dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); - info->dma = dma_request_chan(dev, "rxtx"); + info->dma = dma_request_chan(dev->parent, "rxtx"); if (IS_ERR(info->dma)) { dev_err(dev, "DMA engine request failed\n"); diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index d1d470bb32e42..880e75f63a19b 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -151,15 +151,15 @@ #define NAND_VERSION_MINOR_SHIFT 16 /* NAND OP_CMDs */ -#define PAGE_READ 0x2 -#define PAGE_READ_WITH_ECC 0x3 -#define PAGE_READ_WITH_ECC_SPARE 0x4 -#define PROGRAM_PAGE 0x6 -#define PAGE_PROGRAM_WITH_ECC 0x7 -#define PROGRAM_PAGE_SPARE 0x9 -#define BLOCK_ERASE 0xa -#define FETCH_ID 0xb -#define RESET_DEVICE 0xd +#define OP_PAGE_READ 0x2 +#define OP_PAGE_READ_WITH_ECC 0x3 +#define OP_PAGE_READ_WITH_ECC_SPARE 0x4 +#define OP_PROGRAM_PAGE 0x6 +#define OP_PAGE_PROGRAM_WITH_ECC 0x7 +#define OP_PROGRAM_PAGE_SPARE 0x9 +#define OP_BLOCK_ERASE 0xa +#define OP_FETCH_ID 0xb +#define OP_RESET_DEVICE 0xd /* Default Value for NAND_DEV_CMD_VLD */ #define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \ @@ -692,11 +692,11 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read) if (read) { if (host->use_ecc) - cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE; + cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE; else - cmd = PAGE_READ | PAGE_ACC | LAST_PAGE; + cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE; } else { - cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE; + cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE; } if (host->use_ecc) { @@ -1170,7 +1170,7 @@ static int nandc_param(struct qcom_nand_host *host) * in use. we configure the controller to perform a raw read of 512 * bytes to read onfi params */ - nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE); + nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ | PAGE_ACC | LAST_PAGE); nandc_set_reg(nandc, NAND_ADDR0, 0); nandc_set_reg(nandc, NAND_ADDR1, 0); nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE @@ -1224,7 +1224,7 @@ static int erase_block(struct qcom_nand_host *host, int page_addr) struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); nandc_set_reg(nandc, NAND_FLASH_CMD, - BLOCK_ERASE | PAGE_ACC | LAST_PAGE); + OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE); nandc_set_reg(nandc, NAND_ADDR0, page_addr); nandc_set_reg(nandc, NAND_ADDR1, 0); nandc_set_reg(nandc, NAND_DEV0_CFG0, @@ -1255,7 +1255,7 @@ static int read_id(struct qcom_nand_host *host, int column) if (column == -1) return 0; - nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID); + nandc_set_reg(nandc, NAND_FLASH_CMD, OP_FETCH_ID); nandc_set_reg(nandc, NAND_ADDR0, column); nandc_set_reg(nandc, NAND_ADDR1, 0); nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT, @@ -1276,7 +1276,7 @@ static int reset(struct qcom_nand_host *host) struct nand_chip *chip = &host->chip; struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); - nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE); + nandc_set_reg(nandc, NAND_FLASH_CMD, OP_RESET_DEVICE); nandc_set_reg(nandc, NAND_EXEC_CMD, 1); write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); @@ -2839,6 +2839,16 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc, if (ret) return ret; + if (nandc->props->is_bam) { + free_bam_transaction(nandc); + nandc->bam_txn = alloc_bam_transaction(nandc); + if (!nandc->bam_txn) { + dev_err(nandc->dev, + "failed to allocate bam transaction\n"); + return -ENOMEM; + } + } + ret = mtd_device_register(mtd, NULL, 0); if (ret) nand_cleanup(chip); @@ -2853,16 +2863,6 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc) struct qcom_nand_host *host; int ret; - if (nandc->props->is_bam) { - free_bam_transaction(nandc); - nandc->bam_txn = alloc_bam_transaction(nandc); - if (!nandc->bam_txn) { - dev_err(nandc->dev, - "failed to allocate bam transaction\n"); - return -ENOMEM; - } - } - for_each_available_child_of_node(dn, child) { host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); if (!host) { diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig index 6cc9c929ff571..37775fc09e095 100644 --- a/drivers/mtd/spi-nor/Kconfig +++ b/drivers/mtd/spi-nor/Kconfig @@ -41,7 +41,7 @@ config SPI_ASPEED_SMC config SPI_ATMEL_QUADSPI tristate "Atmel Quad SPI Controller" - depends on ARCH_AT91 || (ARM && COMPILE_TEST) + depends on ARCH_AT91 || (ARM && COMPILE_TEST && !ARCH_EBSA110) depends on OF && HAS_IOMEM help This enables support for the Quad SPI controller in master mode. diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c index 8e714fbfa5212..0806c7a81c0f7 100644 --- a/drivers/mtd/spi-nor/cadence-quadspi.c +++ b/drivers/mtd/spi-nor/cadence-quadspi.c @@ -644,9 +644,23 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr, ndelay(cqspi->wr_delay); while (remaining > 0) { + size_t write_words, mod_bytes; + write_bytes = remaining > page_size ? page_size : remaining; - iowrite32_rep(cqspi->ahb_base, txbuf, - DIV_ROUND_UP(write_bytes, 4)); + write_words = write_bytes / 4; + mod_bytes = write_bytes % 4; + /* Write 4 bytes at a time then single bytes. */ + if (write_words) { + iowrite32_rep(cqspi->ahb_base, txbuf, write_words); + txbuf += (write_words * 4); + } + if (mod_bytes) { + unsigned int temp = 0xFFFFFFFF; + + memcpy(&temp, txbuf, mod_bytes); + iowrite32(temp, cqspi->ahb_base); + txbuf += mod_bytes; + } if (!wait_for_completion_timeout(&cqspi->transfer_complete, msecs_to_jiffies(CQSPI_TIMEOUT_MS))) { @@ -655,7 +669,6 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr, goto failwr; } - txbuf += write_bytes; remaining -= write_bytes; if (remaining > 0) @@ -996,7 +1009,7 @@ static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf, err_unmap: dma_unmap_single(nor->dev, dma_dst, len, DMA_DEV_TO_MEM); - return 0; + return ret; } static ssize_t cqspi_read(struct spi_nor *nor, loff_t from, diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c index 7d9620c7ff6c5..1ff3430f82c88 100644 --- a/drivers/mtd/spi-nor/fsl-quadspi.c +++ b/drivers/mtd/spi-nor/fsl-quadspi.c @@ -478,6 +478,7 @@ static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd) { switch (cmd) { case SPINOR_OP_READ_1_1_4: + case SPINOR_OP_READ_1_1_4_4B: return SEQID_READ; case SPINOR_OP_WREN: return SEQID_WREN; @@ -543,6 +544,9 @@ fsl_qspi_runcmd(struct fsl_qspi *q, u8 cmd, unsigned int addr, int len) /* trigger the LUT now */ seqid = fsl_qspi_get_seqid(q, cmd); + if (seqid < 0) + return seqid; + qspi_writel(q, (seqid << QUADSPI_IPCR_SEQID_SHIFT) | len, base + QUADSPI_IPCR); @@ -671,7 +675,7 @@ static void fsl_qspi_set_map_addr(struct fsl_qspi *q) * causes the controller to clear the buffer, and use the sequence pointed * by the QUADSPI_BFGENCR[SEQID] to initiate a read from the flash. */ -static void fsl_qspi_init_ahb_read(struct fsl_qspi *q) +static int fsl_qspi_init_ahb_read(struct fsl_qspi *q) { void __iomem *base = q->iobase; int seqid; @@ -696,8 +700,13 @@ static void fsl_qspi_init_ahb_read(struct fsl_qspi *q) /* Set the default lut sequence for AHB Read. */ seqid = fsl_qspi_get_seqid(q, q->nor[0].read_opcode); + if (seqid < 0) + return seqid; + qspi_writel(q, seqid << QUADSPI_BFGENCR_SEQID_SHIFT, q->iobase + QUADSPI_BFGENCR); + + return 0; } /* This function was used to prepare and enable QSPI clock */ @@ -805,9 +814,7 @@ static int fsl_qspi_nor_setup_last(struct fsl_qspi *q) fsl_qspi_init_lut(q); /* Init for AHB read */ - fsl_qspi_init_ahb_read(q); - - return 0; + return fsl_qspi_init_ahb_read(q); } static const struct of_device_id fsl_qspi_dt_ids[] = { diff --git a/drivers/mtd/spi-nor/intel-spi-pci.c b/drivers/mtd/spi-nor/intel-spi-pci.c index c0976f2e3dd19..872b409226081 100644 --- a/drivers/mtd/spi-nor/intel-spi-pci.c +++ b/drivers/mtd/spi-nor/intel-spi-pci.c @@ -65,6 +65,7 @@ static void intel_spi_pci_remove(struct pci_dev *pdev) static const struct pci_device_id intel_spi_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info }, { }, diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index f43fb2f958a54..93dfcef8afc4b 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -2086,6 +2086,9 @@ void bond_3ad_unbind_slave(struct slave *slave) aggregator->aggregator_identifier); /* Tell the partner that this port is not suitable for aggregation */ + port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION; + port->actor_oper_port_state &= ~AD_STATE_COLLECTING; + port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING; port->actor_oper_port_state &= ~AD_STATE_AGGREGATION; __update_lacpdu_from_port(port); ad_lacpdu_send(port); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index ee28ec9e0abad..a6fcc5c96070e 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1947,6 +1947,9 @@ static int __bond_release_one(struct net_device *bond_dev, if (!bond_has_slaves(bond)) { bond_set_carrier(bond); eth_hw_addr_random(bond_dev); + bond->nest_level = SINGLE_DEPTH_NESTING; + } else { + bond->nest_level = dev_get_nest_level(bond_dev) + 1; } unblock_netpoll_tx(); @@ -3111,13 +3114,13 @@ static int bond_slave_netdev_event(unsigned long event, case NETDEV_CHANGE: /* For 802.3ad mode only: * Getting invalid Speed/Duplex values here will put slave - * in weird state. So mark it as link-down for the time + * in weird state. So mark it as link-fail for the time * being and let link-monitoring (miimon) set it right when * correct speeds/duplex are available. */ if (bond_update_speed_duplex(slave) && BOND_MODE(bond) == BOND_MODE_8023AD) - slave->link = BOND_LINK_DOWN; + slave->link = BOND_LINK_FAIL; if (BOND_MODE(bond) == BOND_MODE_8023AD) bond_3ad_adapter_speed_duplex_changed(slave); diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index 9697977b80f04..6b9ad86732188 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -638,8 +638,7 @@ static int bond_fill_info(struct sk_buff *skb, goto nla_put_failure; if (nla_put(skb, IFLA_BOND_AD_ACTOR_SYSTEM, - sizeof(bond->params.ad_actor_system), - &bond->params.ad_actor_system)) + ETH_ALEN, &bond->params.ad_actor_system)) goto nla_put_failure; } if (!bond_3ad_get_active_agg_info(bond, &info)) { diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 49163570a63af..c05e4d50d43d7 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -477,6 +477,33 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, } EXPORT_SYMBOL_GPL(can_put_echo_skb); +struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr) +{ + struct can_priv *priv = netdev_priv(dev); + + if (idx >= priv->echo_skb_max) { + netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n", + __func__, idx, priv->echo_skb_max); + return NULL; + } + + if (priv->echo_skb[idx]) { + /* Using "struct canfd_frame::len" for the frame + * length is supported on both CAN and CANFD frames. + */ + struct sk_buff *skb = priv->echo_skb[idx]; + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + u8 len = cf->len; + + *len_ptr = len; + priv->echo_skb[idx] = NULL; + + return skb; + } + + return NULL; +} + /* * Get the skb from the stack and loop it back locally * @@ -486,22 +513,16 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb); */ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx) { - struct can_priv *priv = netdev_priv(dev); - - BUG_ON(idx >= priv->echo_skb_max); - - if (priv->echo_skb[idx]) { - struct sk_buff *skb = priv->echo_skb[idx]; - struct can_frame *cf = (struct can_frame *)skb->data; - u8 dlc = cf->can_dlc; + struct sk_buff *skb; + u8 len; - netif_rx(priv->echo_skb[idx]); - priv->echo_skb[idx] = NULL; + skb = __can_get_echo_skb(dev, idx, &len); + if (!skb) + return 0; - return dlc; - } + netif_rx(skb); - return 0; + return len; } EXPORT_SYMBOL_GPL(can_get_echo_skb); diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 8e972ef086376..ae219b8a7754a 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -135,13 +135,12 @@ /* FLEXCAN interrupt flag register (IFLAG) bits */ /* Errata ERR005829 step7: Reserve first valid MB */ -#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8 -#define FLEXCAN_TX_MB_OFF_FIFO 9 +#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8 #define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP 0 -#define FLEXCAN_TX_MB_OFF_TIMESTAMP 1 -#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_OFF_TIMESTAMP + 1) -#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST 63 -#define FLEXCAN_IFLAG_MB(x) BIT(x) +#define FLEXCAN_TX_MB 63 +#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP + 1) +#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST (FLEXCAN_TX_MB - 1) +#define FLEXCAN_IFLAG_MB(x) BIT(x & 0x1f) #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) #define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) #define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5) @@ -259,9 +258,7 @@ struct flexcan_priv { struct can_rx_offload offload; struct flexcan_regs __iomem *regs; - struct flexcan_mb __iomem *tx_mb; struct flexcan_mb __iomem *tx_mb_reserved; - u8 tx_mb_idx; u32 reg_ctrl_default; u32 reg_imask1_default; u32 reg_imask2_default; @@ -515,6 +512,7 @@ static int flexcan_get_berr_counter(const struct net_device *dev, static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) { const struct flexcan_priv *priv = netdev_priv(dev); + struct flexcan_regs __iomem *regs = priv->regs; struct can_frame *cf = (struct can_frame *)skb->data; u32 can_id; u32 data; @@ -537,17 +535,17 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de if (cf->can_dlc > 0) { data = be32_to_cpup((__be32 *)&cf->data[0]); - priv->write(data, &priv->tx_mb->data[0]); + priv->write(data, ®s->mb[FLEXCAN_TX_MB].data[0]); } if (cf->can_dlc > 4) { data = be32_to_cpup((__be32 *)&cf->data[4]); - priv->write(data, &priv->tx_mb->data[1]); + priv->write(data, ®s->mb[FLEXCAN_TX_MB].data[1]); } can_put_echo_skb(skb, dev, 0); - priv->write(can_id, &priv->tx_mb->can_id); - priv->write(ctrl, &priv->tx_mb->can_ctrl); + priv->write(can_id, ®s->mb[FLEXCAN_TX_MB].can_id); + priv->write(ctrl, ®s->mb[FLEXCAN_TX_MB].can_ctrl); /* Errata ERR005829 step8: * Write twice INACTIVE(0x8) code to first MB. @@ -563,9 +561,13 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr) { struct flexcan_priv *priv = netdev_priv(dev); + struct flexcan_regs __iomem *regs = priv->regs; struct sk_buff *skb; struct can_frame *cf; bool rx_errors = false, tx_errors = false; + u32 timestamp; + + timestamp = priv->read(®s->timer) << 16; skb = alloc_can_err_skb(dev, &cf); if (unlikely(!skb)) @@ -612,17 +614,21 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr) if (tx_errors) dev->stats.tx_errors++; - can_rx_offload_irq_queue_err_skb(&priv->offload, skb); + can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); } static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) { struct flexcan_priv *priv = netdev_priv(dev); + struct flexcan_regs __iomem *regs = priv->regs; struct sk_buff *skb; struct can_frame *cf; enum can_state new_state, rx_state, tx_state; int flt; struct can_berr_counter bec; + u32 timestamp; + + timestamp = priv->read(®s->timer) << 16; flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK; if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) { @@ -652,7 +658,7 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) if (unlikely(new_state == CAN_STATE_BUS_OFF)) can_bus_off(dev); - can_rx_offload_irq_queue_err_skb(&priv->offload, skb); + can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); } static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload) @@ -720,9 +726,14 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload, priv->write(BIT(n - 32), ®s->iflag2); } else { priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, ®s->iflag1); - priv->read(®s->timer); } + /* Read the Free Running Timer. It is optional but recommended + * to unlock Mailbox as soon as possible and make it available + * for reception. + */ + priv->read(®s->timer); + return 1; } @@ -732,9 +743,9 @@ static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv) struct flexcan_regs __iomem *regs = priv->regs; u32 iflag1, iflag2; - iflag2 = priv->read(®s->iflag2) & priv->reg_imask2_default; - iflag1 = priv->read(®s->iflag1) & priv->reg_imask1_default & - ~FLEXCAN_IFLAG_MB(priv->tx_mb_idx); + iflag2 = priv->read(®s->iflag2) & priv->reg_imask2_default & + ~FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB); + iflag1 = priv->read(®s->iflag1) & priv->reg_imask1_default; return (u64)iflag2 << 32 | iflag1; } @@ -746,11 +757,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->regs; irqreturn_t handled = IRQ_NONE; - u32 reg_iflag1, reg_esr; + u32 reg_iflag2, reg_esr; enum can_state last_state = priv->can.state; - reg_iflag1 = priv->read(®s->iflag1); - /* reception interrupt */ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { u64 reg_iflag; @@ -764,6 +773,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) break; } } else { + u32 reg_iflag1; + + reg_iflag1 = priv->read(®s->iflag1); if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) { handled = IRQ_HANDLED; can_rx_offload_irq_offload_fifo(&priv->offload); @@ -779,17 +791,22 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) } } + reg_iflag2 = priv->read(®s->iflag2); + /* transmission complete interrupt */ - if (reg_iflag1 & FLEXCAN_IFLAG_MB(priv->tx_mb_idx)) { + if (reg_iflag2 & FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB)) { + u32 reg_ctrl = priv->read(®s->mb[FLEXCAN_TX_MB].can_ctrl); + handled = IRQ_HANDLED; - stats->tx_bytes += can_get_echo_skb(dev, 0); + stats->tx_bytes += can_rx_offload_get_echo_skb(&priv->offload, + 0, reg_ctrl << 16); stats->tx_packets++; can_led_event(dev, CAN_LED_EVENT_TX); /* after sending a RTR frame MB is in RX mode */ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, - &priv->tx_mb->can_ctrl); - priv->write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), ®s->iflag1); + ®s->mb[FLEXCAN_TX_MB].can_ctrl); + priv->write(FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB), ®s->iflag2); netif_wake_queue(dev); } @@ -931,15 +948,13 @@ static int flexcan_chip_start(struct net_device *dev) reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff); reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ | - FLEXCAN_MCR_IDAM_C; + FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(FLEXCAN_TX_MB); - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) reg_mcr &= ~FLEXCAN_MCR_FEN; - reg_mcr |= FLEXCAN_MCR_MAXMB(priv->offload.mb_last); - } else { - reg_mcr |= FLEXCAN_MCR_FEN | - FLEXCAN_MCR_MAXMB(priv->tx_mb_idx); - } + else + reg_mcr |= FLEXCAN_MCR_FEN; + netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr); priv->write(reg_mcr, ®s->mcr); @@ -982,16 +997,17 @@ static int flexcan_chip_start(struct net_device *dev) priv->write(reg_ctrl2, ®s->ctrl2); } - /* clear and invalidate all mailboxes first */ - for (i = priv->tx_mb_idx; i < ARRAY_SIZE(regs->mb); i++) { - priv->write(FLEXCAN_MB_CODE_RX_INACTIVE, - ®s->mb[i].can_ctrl); - } - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { - for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) + for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) { priv->write(FLEXCAN_MB_CODE_RX_EMPTY, ®s->mb[i].can_ctrl); + } + } else { + /* clear and invalidate unused mailboxes first */ + for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i < ARRAY_SIZE(regs->mb); i++) { + priv->write(FLEXCAN_MB_CODE_RX_INACTIVE, + ®s->mb[i].can_ctrl); + } } /* Errata ERR005829: mark first TX mailbox as INACTIVE */ @@ -1000,7 +1016,7 @@ static int flexcan_chip_start(struct net_device *dev) /* mark TX mailbox as INACTIVE */ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, - &priv->tx_mb->can_ctrl); + ®s->mb[FLEXCAN_TX_MB].can_ctrl); /* acceptance mask/acceptance code (accept everything) */ priv->write(0x0, ®s->rxgmask); @@ -1355,17 +1371,13 @@ static int flexcan_probe(struct platform_device *pdev) priv->devtype_data = devtype_data; priv->reg_xceiver = reg_xceiver; - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { - priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_TIMESTAMP; + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) priv->tx_mb_reserved = ®s->mb[FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP]; - } else { - priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_FIFO; + else priv->tx_mb_reserved = ®s->mb[FLEXCAN_TX_MB_RESERVED_OFF_FIFO]; - } - priv->tx_mb = ®s->mb[priv->tx_mb_idx]; - priv->reg_imask1_default = FLEXCAN_IFLAG_MB(priv->tx_mb_idx); - priv->reg_imask2_default = 0; + priv->reg_imask1_default = 0; + priv->reg_imask2_default = FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB); priv->offload.mailbox_read = flexcan_mailbox_read; diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c index 11662f479e760..771a460837397 100644 --- a/drivers/net/can/rcar/rcar_can.c +++ b/drivers/net/can/rcar/rcar_can.c @@ -24,6 +24,9 @@ #define RCAR_CAN_DRV_NAME "rcar_can" +#define RCAR_SUPPORTED_CLOCKS (BIT(CLKR_CLKP1) | BIT(CLKR_CLKP2) | \ + BIT(CLKR_CLKEXT)) + /* Mailbox configuration: * mailbox 60 - 63 - Rx FIFO mailboxes * mailbox 56 - 59 - Tx FIFO mailboxes @@ -789,7 +792,7 @@ static int rcar_can_probe(struct platform_device *pdev) goto fail_clk; } - if (clock_select >= ARRAY_SIZE(clock_names)) { + if (!(BIT(clock_select) & RCAR_SUPPORTED_CLOCKS)) { err = -EINVAL; dev_err(&pdev->dev, "invalid CAN clock selected\n"); goto fail_clk; diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c index d94dae2168209..727691dd08fbf 100644 --- a/drivers/net/can/rx-offload.c +++ b/drivers/net/can/rx-offload.c @@ -209,7 +209,54 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload) } EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo); -int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb) +int can_rx_offload_queue_sorted(struct can_rx_offload *offload, + struct sk_buff *skb, u32 timestamp) +{ + struct can_rx_offload_cb *cb; + unsigned long flags; + + if (skb_queue_len(&offload->skb_queue) > + offload->skb_queue_len_max) + return -ENOMEM; + + cb = can_rx_offload_get_cb(skb); + cb->timestamp = timestamp; + + spin_lock_irqsave(&offload->skb_queue.lock, flags); + __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare); + spin_unlock_irqrestore(&offload->skb_queue.lock, flags); + + can_rx_offload_schedule(offload); + + return 0; +} +EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted); + +unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, + unsigned int idx, u32 timestamp) +{ + struct net_device *dev = offload->dev; + struct net_device_stats *stats = &dev->stats; + struct sk_buff *skb; + u8 len; + int err; + + skb = __can_get_echo_skb(dev, idx, &len); + if (!skb) + return 0; + + err = can_rx_offload_queue_sorted(offload, skb, timestamp); + if (err) { + stats->rx_errors++; + stats->tx_fifo_errors++; + } + + return len; +} +EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb); + +int can_rx_offload_queue_tail(struct can_rx_offload *offload, + struct sk_buff *skb) { if (skb_queue_len(&offload->skb_queue) > offload->skb_queue_len_max) @@ -220,7 +267,7 @@ int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_b return 0; } -EXPORT_SYMBOL_GPL(can_rx_offload_irq_queue_err_skb); +EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail); static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) { diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c index 53e320c92a8be..ddaf46239e39e 100644 --- a/drivers/net/can/spi/hi311x.c +++ b/drivers/net/can/spi/hi311x.c @@ -760,7 +760,7 @@ static int hi3110_open(struct net_device *net) { struct hi3110_priv *priv = netdev_priv(net); struct spi_device *spi = priv->spi; - unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_RISING; + unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_HIGH; int ret; ret = open_candev(net); diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c index b939a4c10b840..c89c7d4900d75 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c @@ -528,7 +528,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, context = &priv->tx_contexts[i]; context->echo_index = i; - can_put_echo_skb(skb, netdev, context->echo_index); ++priv->active_tx_contexts; if (priv->active_tx_contexts >= (int)dev->max_tx_urbs) netif_stop_queue(netdev); @@ -553,7 +552,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, dev_kfree_skb(skb); spin_lock_irqsave(&priv->tx_contexts_lock, flags); - can_free_echo_skb(netdev, context->echo_index); context->echo_index = dev->max_tx_urbs; --priv->active_tx_contexts; netif_wake_queue(netdev); @@ -564,6 +562,8 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, context->priv = priv; + can_put_echo_skb(skb, netdev, context->echo_index); + usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, dev->bulk_out->bEndpointAddress), diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c index c084bae5ec0a4..5fc0be5642743 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c @@ -1019,6 +1019,11 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv, new_state : CAN_STATE_ERROR_ACTIVE; can_change_state(netdev, cf, tx_state, rx_state); + + if (priv->can.restart_ms && + old_state >= CAN_STATE_BUS_OFF && + new_state < CAN_STATE_BUS_OFF) + cf->can_id |= CAN_ERR_RESTARTED; } if (new_state == CAN_STATE_BUS_OFF) { @@ -1028,11 +1033,6 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv, can_bus_off(netdev); } - - if (priv->can.restart_ms && - old_state >= CAN_STATE_BUS_OFF && - new_state < CAN_STATE_BUS_OFF) - cf->can_id |= CAN_ERR_RESTARTED; } if (!skb) { diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c index 0678a38b1af45..c9fd83e8d9477 100644 --- a/drivers/net/can/usb/ucan.c +++ b/drivers/net/can/usb/ucan.c @@ -1575,11 +1575,8 @@ static int ucan_probe(struct usb_interface *intf, /* disconnect the device */ static void ucan_disconnect(struct usb_interface *intf) { - struct usb_device *udev; struct ucan_priv *up = usb_get_intfdata(intf); - udev = interface_to_usbdev(intf); - usb_set_intfdata(intf, NULL); if (up) { diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 54e0ca6ed7308..86b6464b4525c 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -1117,11 +1117,6 @@ static int ksz_switch_init(struct ksz_device *dev) { int i; - mutex_init(&dev->reg_mutex); - mutex_init(&dev->stats_mutex); - mutex_init(&dev->alu_mutex); - mutex_init(&dev->vlan_mutex); - dev->ds->ops = &ksz_switch_ops; for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) { @@ -1206,6 +1201,11 @@ int ksz_switch_register(struct ksz_device *dev) if (dev->pdata) dev->chip_id = dev->pdata->chip_id; + mutex_init(&dev->reg_mutex); + mutex_init(&dev->stats_mutex); + mutex_init(&dev->alu_mutex); + mutex_init(&dev->vlan_mutex); + if (ksz_switch_detect(dev)) return -EINVAL; diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index 65f10fec25b39..0b3e51f248c21 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -116,8 +116,7 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds) /* Reset the switch. */ REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL, GLOBAL_ATU_CONTROL_SWRESET | - GLOBAL_ATU_CONTROL_ATUSIZE_1024 | - GLOBAL_ATU_CONTROL_ATE_AGE_5MIN); + GLOBAL_ATU_CONTROL_LEARNDIS); /* Wait up to one second for reset to complete. */ timeout = jiffies + 1 * HZ; @@ -142,13 +141,10 @@ static int mv88e6060_setup_global(struct dsa_switch *ds) */ REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536); - /* Enable automatic address learning, set the address - * database size to 1024 entries, and set the default aging - * time to 5 minutes. + /* Disable automatic address learning. */ REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL, - GLOBAL_ATU_CONTROL_ATUSIZE_1024 | - GLOBAL_ATU_CONTROL_ATE_AGE_5MIN); + GLOBAL_ATU_CONTROL_LEARNDIS); return 0; } diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 8da3d39e32189..258918d8a4165 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2391,6 +2391,107 @@ static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip) return mv88e6xxx_g1_stats_clear(chip); } +/* The mv88e6390 has some hidden registers used for debug and + * development. The errata also makes use of them. + */ +static int mv88e6390_hidden_write(struct mv88e6xxx_chip *chip, int port, + int reg, u16 val) +{ + u16 ctrl; + int err; + + err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_DATA_PORT, + PORT_RESERVED_1A, val); + if (err) + return err; + + ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_WRITE | + PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT | + reg; + + return mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT, + PORT_RESERVED_1A, ctrl); +} + +static int mv88e6390_hidden_wait(struct mv88e6xxx_chip *chip) +{ + return mv88e6xxx_wait(chip, PORT_RESERVED_1A_CTRL_PORT, + PORT_RESERVED_1A, PORT_RESERVED_1A_BUSY); +} + + +static int mv88e6390_hidden_read(struct mv88e6xxx_chip *chip, int port, + int reg, u16 *val) +{ + u16 ctrl; + int err; + + ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_READ | + PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT | + reg; + + err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT, + PORT_RESERVED_1A, ctrl); + if (err) + return err; + + err = mv88e6390_hidden_wait(chip); + if (err) + return err; + + return mv88e6xxx_port_read(chip, PORT_RESERVED_1A_DATA_PORT, + PORT_RESERVED_1A, val); +} + +/* Check if the errata has already been applied. */ +static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip) +{ + int port; + int err; + u16 val; + + for (port = 0; port < mv88e6xxx_num_ports(chip); port++) { + err = mv88e6390_hidden_read(chip, port, 0, &val); + if (err) { + dev_err(chip->dev, + "Error reading hidden register: %d\n", err); + return false; + } + if (val != 0x01c0) + return false; + } + + return true; +} + +/* The 6390 copper ports have an errata which require poking magic + * values into undocumented hidden registers and then performing a + * software reset. + */ +static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip) +{ + int port; + int err; + + if (mv88e6390_setup_errata_applied(chip)) + return 0; + + /* Set the ports into blocking mode */ + for (port = 0; port < mv88e6xxx_num_ports(chip); port++) { + err = mv88e6xxx_port_set_state(chip, port, BR_STATE_DISABLED); + if (err) + return err; + } + + for (port = 0; port < mv88e6xxx_num_ports(chip); port++) { + err = mv88e6390_hidden_write(chip, port, 0, 0x01c0); + if (err) + return err; + } + + return mv88e6xxx_software_reset(chip); +} + static int mv88e6xxx_setup(struct dsa_switch *ds) { struct mv88e6xxx_chip *chip = ds->priv; @@ -2403,6 +2504,12 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) mutex_lock(&chip->reg_lock); + if (chip->info->ops->setup_errata) { + err = chip->info->ops->setup_errata(chip); + if (err) + goto unlock; + } + /* Cache the cmode of each port. */ for (i = 0; i < mv88e6xxx_num_ports(chip); i++) { if (chip->info->ops->port_get_cmode) { @@ -3201,6 +3308,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = { static const struct mv88e6xxx_ops mv88e6190_ops = { /* MV88E6XXX_FAMILY_6390 */ + .setup_errata = mv88e6390_setup_errata, .irl_init_all = mv88e6390_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, @@ -3243,6 +3351,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { static const struct mv88e6xxx_ops mv88e6190x_ops = { /* MV88E6XXX_FAMILY_6390 */ + .setup_errata = mv88e6390_setup_errata, .irl_init_all = mv88e6390_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, @@ -3285,6 +3394,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { static const struct mv88e6xxx_ops mv88e6191_ops = { /* MV88E6XXX_FAMILY_6390 */ + .setup_errata = mv88e6390_setup_errata, .irl_init_all = mv88e6390_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, @@ -3374,6 +3484,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { static const struct mv88e6xxx_ops mv88e6290_ops = { /* MV88E6XXX_FAMILY_6390 */ + .setup_errata = mv88e6390_setup_errata, .irl_init_all = mv88e6390_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, @@ -3675,6 +3786,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { static const struct mv88e6xxx_ops mv88e6390_ops = { /* MV88E6XXX_FAMILY_6390 */ + .setup_errata = mv88e6390_setup_errata, .irl_init_all = mv88e6390_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, @@ -3722,6 +3834,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { static const struct mv88e6xxx_ops mv88e6390x_ops = { /* MV88E6XXX_FAMILY_6390 */ + .setup_errata = mv88e6390_setup_errata, .irl_init_all = mv88e6390_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index f9ecb7872d32c..546651d8c3e1f 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -300,6 +300,11 @@ struct mv88e6xxx_mdio_bus { }; struct mv88e6xxx_ops { + /* Switch Setup Errata, called early in the switch setup to + * allow any errata actions to be performed + */ + int (*setup_errata)(struct mv88e6xxx_chip *chip); + int (*ieee_pri_map)(struct mv88e6xxx_chip *chip); int (*ip_pri_map)(struct mv88e6xxx_chip *chip); diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c index d721ccf7d8bed..38e399e0f30e1 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.c +++ b/drivers/net/dsa/mv88e6xxx/global1.c @@ -567,6 +567,8 @@ int mv88e6xxx_g1_stats_clear(struct mv88e6xxx_chip *chip) if (err) return err; + /* Keep the histogram mode bits */ + val &= MV88E6XXX_G1_STATS_OP_HIST_RX_TX; val |= MV88E6XXX_G1_STATS_OP_BUSY | MV88E6XXX_G1_STATS_OP_FLUSH_ALL; err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP, val); diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c index 46af8052e5353..152a65d46e0b8 100644 --- a/drivers/net/dsa/mv88e6xxx/phy.c +++ b/drivers/net/dsa/mv88e6xxx/phy.c @@ -110,6 +110,9 @@ int mv88e6xxx_phy_page_write(struct mv88e6xxx_chip *chip, int phy, err = mv88e6xxx_phy_page_get(chip, phy, page); if (!err) { err = mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_PAGE, page); + if (!err) + err = mv88e6xxx_phy_write(chip, phy, reg, val); + mv88e6xxx_phy_page_put(chip, phy); } diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index f32f56af8e35d..b31910023bb64 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -251,6 +251,16 @@ /* Offset 0x19: Port IEEE Priority Remapping Registers (4-7) */ #define MV88E6095_PORT_IEEE_PRIO_REMAP_4567 0x19 +/* Offset 0x1a: Magic undocumented errata register */ +#define PORT_RESERVED_1A 0x1a +#define PORT_RESERVED_1A_BUSY BIT(15) +#define PORT_RESERVED_1A_WRITE BIT(14) +#define PORT_RESERVED_1A_READ 0 +#define PORT_RESERVED_1A_PORT_SHIFT 5 +#define PORT_RESERVED_1A_BLOCK (0xf << 10) +#define PORT_RESERVED_1A_CTRL_PORT 4 +#define PORT_RESERVED_1A_DATA_PORT 5 + int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg, u16 *val); int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg, diff --git a/drivers/net/dsa/realtek-smi.c b/drivers/net/dsa/realtek-smi.c index b4b839a1d0952..ad41ec63cc9f0 100644 --- a/drivers/net/dsa/realtek-smi.c +++ b/drivers/net/dsa/realtek-smi.c @@ -347,16 +347,17 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi) struct device_node *mdio_np; int ret; - mdio_np = of_find_compatible_node(smi->dev->of_node, NULL, - "realtek,smi-mdio"); + mdio_np = of_get_compatible_child(smi->dev->of_node, "realtek,smi-mdio"); if (!mdio_np) { dev_err(smi->dev, "no MDIO bus node\n"); return -ENODEV; } smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev); - if (!smi->slave_mii_bus) - return -ENOMEM; + if (!smi->slave_mii_bus) { + ret = -ENOMEM; + goto err_put_node; + } smi->slave_mii_bus->priv = smi; smi->slave_mii_bus->name = "SMI slave MII"; smi->slave_mii_bus->read = realtek_smi_mdio_read; @@ -371,10 +372,15 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi) if (ret) { dev_err(smi->dev, "unable to register MDIO bus %s\n", smi->slave_mii_bus->id); - of_node_put(mdio_np); + goto err_put_node; } return 0; + +err_put_node: + of_node_put(mdio_np); + + return ret; } static int realtek_smi_probe(struct platform_device *pdev) @@ -457,6 +463,8 @@ static int realtek_smi_remove(struct platform_device *pdev) struct realtek_smi *smi = dev_get_drvdata(&pdev->dev); dsa_unregister_switch(smi->ds); + if (smi->slave_mii_bus) + of_node_put(smi->slave_mii_bus->dev.of_node); gpiod_set_value(smi->reset, 1); return 0; diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index d906293ce07d9..4b73131a0f206 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2627,8 +2627,8 @@ static int ena_restore_device(struct ena_adapter *adapter) ena_com_abort_admin_commands(ena_dev); ena_com_wait_for_abort_completion(ena_dev); ena_com_admin_destroy(ena_dev); - ena_com_mmio_reg_read_request_destroy(ena_dev); ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE); + ena_com_mmio_reg_read_request_destroy(ena_dev); err: clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c index cdd7a611479b2..19f89d9b1781f 100644 --- a/drivers/net/ethernet/amd/sunlance.c +++ b/drivers/net/ethernet/amd/sunlance.c @@ -1419,7 +1419,7 @@ static int sparc_lance_probe_one(struct platform_device *op, prop = of_get_property(nd, "tpe-link-test?", NULL); if (!prop) - goto no_link_test; + goto node_put; if (strcmp(prop, "true")) { printk(KERN_NOTICE "SunLance: warning: overriding option " @@ -1428,6 +1428,8 @@ static int sparc_lance_probe_one(struct platform_device *op, "to ecd@skynet.be\n"); auxio_set_lte(AUXIO_LTE_ON); } +node_put: + of_node_put(nd); no_link_test: lp->auto_select = 1; lp->tpe = 0; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index d272dc6984ac6..b40d4377cc71d 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -431,8 +431,6 @@ #define MAC_MDIOSCAR_PA_WIDTH 5 #define MAC_MDIOSCAR_RA_INDEX 0 #define MAC_MDIOSCAR_RA_WIDTH 16 -#define MAC_MDIOSCAR_REG_INDEX 0 -#define MAC_MDIOSCAR_REG_WIDTH 21 #define MAC_MDIOSCCDR_BUSY_INDEX 22 #define MAC_MDIOSCCDR_BUSY_WIDTH 1 #define MAC_MDIOSCCDR_CMD_INDEX 16 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 1e929a1e4ca78..4666084eda16a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1284,6 +1284,20 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, } } +static unsigned int xgbe_create_mdio_sca(int port, int reg) +{ + unsigned int mdio_sca, da; + + da = (reg & MII_ADDR_C45) ? reg >> 16 : 0; + + mdio_sca = 0; + XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg); + XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port); + XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da); + + return mdio_sca; +} + static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, int reg, u16 val) { @@ -1291,9 +1305,7 @@ static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, reinit_completion(&pdata->mdio_complete); - mdio_sca = 0; - XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); - XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); + mdio_sca = xgbe_create_mdio_sca(addr, reg); XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); mdio_sccd = 0; @@ -1317,9 +1329,7 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, reinit_completion(&pdata->mdio_complete); - mdio_sca = 0; - XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); - XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); + mdio_sca = xgbe_create_mdio_sca(addr, reg); XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); mdio_sccd = 0; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 3b889efddf789..50dd6bf176d03 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -29,9 +29,6 @@ #define RES_RING_CSR 1 #define RES_RING_CMD 2 -static const struct of_device_id xgene_enet_of_match[]; -static const struct acpi_device_id xgene_enet_acpi_match[]; - static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool) { struct xgene_enet_raw_desc16 *raw_desc; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 26dc6782b4750..4f34808f1e064 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -590,7 +590,7 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) } } - if (i > 0 && i < AQ_HW_MULTICAST_ADDRESS_MAX) { + if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) { packet_filter |= IFF_MULTICAST; self->mc_list.count = i; self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index d1e1a0ba86150..7134d0d4cdf72 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -172,6 +172,27 @@ bool aq_ring_tx_clean(struct aq_ring_s *self) return !!budget; } +static void aq_rx_checksum(struct aq_ring_s *self, + struct aq_ring_buff_s *buff, + struct sk_buff *skb) +{ + if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM)) + return; + + if (unlikely(buff->is_cso_err)) { + ++self->stats.rx.errors; + skb->ip_summed = CHECKSUM_NONE; + return; + } + if (buff->is_ip_cso) { + __skb_incr_checksum_unnecessary(skb); + if (buff->is_udp_cso || buff->is_tcp_cso) + __skb_incr_checksum_unnecessary(skb); + } else { + skb->ip_summed = CHECKSUM_NONE; + } +} + #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) int aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi, @@ -267,18 +288,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self, } skb->protocol = eth_type_trans(skb, ndev); - if (unlikely(buff->is_cso_err)) { - ++self->stats.rx.errors; - skb->ip_summed = CHECKSUM_NONE; - } else { - if (buff->is_ip_cso) { - __skb_incr_checksum_unnecessary(skb); - if (buff->is_udp_cso || buff->is_tcp_cso) - __skb_incr_checksum_unnecessary(skb); - } else { - skb->ip_summed = CHECKSUM_NONE; - } - } + + aq_rx_checksum(self, buff, skb); skb_set_hash(skb, buff->rss_hash, buff->is_hash_l4 ? PKT_HASH_TYPE_L4 : diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 1d44a386e7d34..56363ff5c8919 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -655,9 +655,9 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *) &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE]; - unsigned int is_err = 1U; unsigned int is_rx_check_sum_enabled = 0U; unsigned int pkt_type = 0U; + u8 rx_stat = 0U; if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */ break; @@ -665,35 +665,35 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, buff = &ring->buff_ring[ring->hw_head]; - is_err = (0x0000003CU & rxd_wb->status); + rx_stat = (0x0000003CU & rxd_wb->status) >> 2; - is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19); - is_err &= ~0x20U; /* exclude validity bit */ + is_rx_check_sum_enabled = (rxd_wb->type >> 19) & 0x3U; pkt_type = 0xFFU & (rxd_wb->type >> 4); - if (is_rx_check_sum_enabled) { - if (0x0U == (pkt_type & 0x3U)) - buff->is_ip_cso = (is_err & 0x08U) ? 0U : 1U; + if (is_rx_check_sum_enabled & BIT(0) && + (0x0U == (pkt_type & 0x3U))) + buff->is_ip_cso = (rx_stat & BIT(1)) ? 0U : 1U; + if (is_rx_check_sum_enabled & BIT(1)) { if (0x4U == (pkt_type & 0x1CU)) - buff->is_udp_cso = buff->is_cso_err ? 0U : 1U; + buff->is_udp_cso = (rx_stat & BIT(2)) ? 0U : + !!(rx_stat & BIT(3)); else if (0x0U == (pkt_type & 0x1CU)) - buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U; - - /* Checksum offload workaround for small packets */ - if (rxd_wb->pkt_len <= 60) { - buff->is_ip_cso = 0U; - buff->is_cso_err = 0U; - } + buff->is_tcp_cso = (rx_stat & BIT(2)) ? 0U : + !!(rx_stat & BIT(3)); + } + buff->is_cso_err = !!(rx_stat & 0x6); + /* Checksum offload workaround for small packets */ + if (unlikely(rxd_wb->pkt_len <= 60)) { + buff->is_ip_cso = 0U; + buff->is_cso_err = 0U; } - - is_err &= ~0x18U; dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE); - if (is_err || rxd_wb->type & 0x1000U) { - /* status error or DMA error */ + if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) { + /* MAC error or DMA error */ buff->is_error = 1U; } else { if (self->aq_nic_cfg->is_rss) { @@ -915,6 +915,12 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self) static int hw_atl_b0_hw_stop(struct aq_hw_s *self) { hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK); + + /* Invalidate Descriptor Cache to prevent writing to the cached + * descriptors and to the data pointer of those descriptors + */ + hw_atl_rdm_rx_dma_desc_cache_init_set(self, 1); + return aq_hw_err_from_flags(self); } diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c index 10ba035dadb19..10ec5dc88e243 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c @@ -619,6 +619,14 @@ void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode); } +void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR, + HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK, + HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT, + init); +} + void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_pkt_buff_size_per_tc, u32 buffer) { diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h index dfb426f2dc2c8..b3bf64b48b93d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h @@ -325,6 +325,9 @@ void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_pkt_buff_size_per_tc, u32 buffer); +/* set rdm rx dma descriptor cache init */ +void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init); + /* set rx xoff enable (per tc) */ void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc, u32 buffer); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h index e0cf70120f1d0..e2ecdb1c5a5c4 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h @@ -293,6 +293,24 @@ /* default value of bitfield desc{d}_reset */ #define HW_ATL_RDM_DESCDRESET_DEFAULT 0x0 +/* rdm_desc_init_i bitfield definitions + * preprocessor definitions for the bitfield rdm_desc_init_i. + * port="pif_rdm_desc_init_i" + */ + +/* register address for bitfield rdm_desc_init_i */ +#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR 0x00005a00 +/* bitmask for bitfield rdm_desc_init_i */ +#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK 0xffffffff +/* inverted bitmask for bitfield rdm_desc_init_i */ +#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSKN 0x00000000 +/* lower bit position of bitfield rdm_desc_init_i */ +#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT 0 +/* width of bitfield rdm_desc_init_i */ +#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_WIDTH 32 +/* default value of bitfield rdm_desc_init_i */ +#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_DEFAULT 0x0 + /* rx int_desc_wrb_en bitfield definitions * preprocessor definitions for the bitfield "int_desc_wrb_en". * port="pif_rdm_int_desc_wrb_en_i" diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index c57238fce8637..7b6859e4924ea 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1897,9 +1897,6 @@ static void bcm_sysport_netif_start(struct net_device *dev) intrl2_1_mask_clear(priv, 0xffffffff); else intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK); - - /* Last call before we start the real business */ - netif_tx_start_all_queues(dev); } static void rbuf_init(struct bcm_sysport_priv *priv) @@ -2045,6 +2042,8 @@ static int bcm_sysport_open(struct net_device *dev) bcm_sysport_netif_start(dev); + netif_tx_start_all_queues(dev); + return 0; out_clear_rx_int: @@ -2068,7 +2067,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev) struct bcm_sysport_priv *priv = netdev_priv(dev); /* stop all software from updating hardware */ - netif_tx_stop_all_queues(dev); + netif_tx_disable(dev); napi_disable(&priv->napi); cancel_work_sync(&priv->dim.dim.work); phy_stop(dev->phydev); @@ -2654,12 +2653,12 @@ static int __maybe_unused bcm_sysport_suspend(struct device *d) if (!netif_running(dev)) return 0; + netif_device_detach(dev); + bcm_sysport_netif_stop(dev); phy_suspend(dev->phydev); - netif_device_detach(dev); - /* Disable UniMAC RX */ umac_enable_set(priv, CMD_RX_EN, 0); @@ -2743,8 +2742,6 @@ static int __maybe_unused bcm_sysport_resume(struct device *d) goto out_free_rx_ring; } - netif_device_attach(dev); - /* RX pipe enable */ topctrl_writel(priv, 0, RX_FLUSH_CNTL); @@ -2789,6 +2786,8 @@ static int __maybe_unused bcm_sysport_resume(struct device *d) bcm_sysport_netif_start(dev); + netif_device_attach(dev); + return 0; out_free_rx_ring: diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index be1506169076f..3db54b664aed0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1282,6 +1282,7 @@ enum sp_rtnl_flag { BNX2X_SP_RTNL_TX_STOP, BNX2X_SP_RTNL_GET_DRV_VERSION, BNX2X_SP_RTNL_CHANGE_UDP_PORT, + BNX2X_SP_RTNL_UPDATE_SVID, }; enum bnx2x_iov_flag { @@ -2191,6 +2192,13 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, #define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ E1HVN_MAX) +/* Following is the DMAE channel number allocation for the clients. + * MFW: OCBB/OCSD implementations use DMAE channels 14/15 respectively. + * Driver: 0-3 and 8-11 (for PF dmae operations) + * 4 and 12 (for stats requests) + */ +#define BNX2X_FW_DMAE_C 13 /* Channel for FW DMAE operations */ + /* PCIE link and speed */ #define PCICFG_LINK_WIDTH 0x1f00000 #define PCICFG_LINK_WIDTH_SHIFT 20 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index fcc2328bb0d95..a585f1025a580 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -2925,6 +2925,10 @@ static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp) func_params.f_obj = &bp->func_obj; func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE; + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); + if (IS_MF_UFP(bp) || IS_MF_BD(bp)) { int func = BP_ABS_FUNC(bp); u32 val; @@ -4301,7 +4305,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) bnx2x_handle_eee_event(bp); if (val & DRV_STATUS_OEM_UPDATE_SVID) - bnx2x_handle_update_svid_cmd(bp); + bnx2x_schedule_sp_rtnl(bp, + BNX2X_SP_RTNL_UPDATE_SVID, 0); if (bp->link_vars.periodic_flags & PERIODIC_FLAGS_LINK_EVENT) { @@ -8462,6 +8467,7 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan, /* Fill a user request section if needed */ if (!test_bit(RAMROD_CONT, ramrod_flags)) { ramrod_param.user_req.u.vlan.vlan = vlan; + __set_bit(BNX2X_VLAN, &ramrod_param.user_req.vlan_mac_flags); /* Set the command: ADD or DEL */ if (set) ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; @@ -8482,6 +8488,27 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan, return rc; } +static int bnx2x_del_all_vlans(struct bnx2x *bp) +{ + struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj; + unsigned long ramrod_flags = 0, vlan_flags = 0; + struct bnx2x_vlan_entry *vlan; + int rc; + + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + __set_bit(BNX2X_VLAN, &vlan_flags); + rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_flags, &ramrod_flags); + if (rc) + return rc; + + /* Mark that hw forgot all entries */ + list_for_each_entry(vlan, &bp->vlan_reg, link) + vlan->hw = false; + bp->vlan_cnt = 0; + + return 0; +} + int bnx2x_del_all_macs(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *mac_obj, int mac_type, bool wait_for_comp) @@ -9320,6 +9347,17 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n", rc); + /* The whole *vlan_obj structure may be not initialized if VLAN + * filtering offload is not supported by hardware. Currently this is + * true for all hardware covered by CHIP_IS_E1x(). + */ + if (!CHIP_IS_E1x(bp)) { + /* Remove all currently configured VLANs */ + rc = bnx2x_del_all_vlans(bp); + if (rc < 0) + BNX2X_ERR("Failed to delete all VLANs\n"); + } + /* Disable LLH */ if (!CHIP_IS_E1(bp)) REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); @@ -10349,6 +10387,9 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) &bp->sp_rtnl_state)) bnx2x_update_mng_version(bp); + if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state)) + bnx2x_handle_update_svid_cmd(bp); + if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT, &bp->sp_rtnl_state)) { if (bnx2x_udp_port_update(bp)) { @@ -11740,8 +11781,10 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp) * If maximum allowed number of connections is zero - * disable the feature. */ - if (!bp->cnic_eth_dev.max_fcoe_conn) + if (!bp->cnic_eth_dev.max_fcoe_conn) { bp->flags |= NO_FCOE_FLAG; + eth_zero_addr(bp->fip_mac); + } } static void bnx2x_get_cnic_info(struct bnx2x *bp) @@ -13014,13 +13057,6 @@ static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode) int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp) { - struct bnx2x_vlan_entry *vlan; - - /* The hw forgot all entries after reload */ - list_for_each_entry(vlan, &bp->vlan_reg, link) - vlan->hw = false; - bp->vlan_cnt = 0; - /* Don't set rx mode here. Our caller will do it. */ bnx2x_vlan_configure(bp, false); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 3f4d2c8da21a3..a9eaaf3e73a4c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -6149,6 +6149,7 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp, rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); rdata->path_id = BP_PATH(bp); rdata->network_cos_mode = start_params->network_cos_mode; + rdata->dmae_cmd_id = BNX2X_FW_DMAE_C; rdata->vxlan_dst_port = cpu_to_le16(start_params->vxlan_dst_port); rdata->geneve_dst_port = cpu_to_le16(start_params->geneve_dst_port); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 0bf2fd470819e..7a6e82db42312 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -265,6 +265,7 @@ enum { BNX2X_ETH_MAC, BNX2X_ISCSI_ETH_MAC, BNX2X_NETQ_ETH_MAC, + BNX2X_VLAN, BNX2X_DONT_CONSUME_CAM_CREDIT, BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, }; @@ -272,7 +273,8 @@ enum { #define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \ 1 << BNX2X_ETH_MAC | \ 1 << BNX2X_ISCSI_ETH_MAC | \ - 1 << BNX2X_NETQ_ETH_MAC) + 1 << BNX2X_NETQ_ETH_MAC | \ + 1 << BNX2X_VLAN) #define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \ ((flags) & BNX2X_VLAN_MAC_CMP_MASK) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index e52d7af3ab3e1..da9b876899963 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -2862,8 +2862,8 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record, record->asic_state = 0; strlcpy(record->system_name, utsname()->nodename, sizeof(record->system_name)); - record->year = cpu_to_le16(tm.tm_year); - record->month = cpu_to_le16(tm.tm_mon); + record->year = cpu_to_le16(tm.tm_year + 1900); + record->month = cpu_to_le16(tm.tm_mon + 1); record->day = cpu_to_le16(tm.tm_mday); record->hour = cpu_to_le16(tm.tm_hour); record->minute = cpu_to_le16(tm.tm_min); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 20c1681bb1afe..2d6f090bf6440 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2855,7 +2855,6 @@ static void bcmgenet_netif_start(struct net_device *dev) umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); - netif_tx_start_all_queues(dev); bcmgenet_enable_tx_napi(priv); /* Monitor link interrupts now */ @@ -2937,6 +2936,8 @@ static int bcmgenet_open(struct net_device *dev) bcmgenet_netif_start(dev); + netif_tx_start_all_queues(dev); + return 0; err_irq1: @@ -2958,7 +2959,7 @@ static void bcmgenet_netif_stop(struct net_device *dev) struct bcmgenet_priv *priv = netdev_priv(dev); bcmgenet_disable_tx_napi(priv); - netif_tx_stop_all_queues(dev); + netif_tx_disable(dev); /* Disable MAC receive */ umac_enable_set(priv, CMD_RX_EN, false); @@ -3620,13 +3621,13 @@ static int bcmgenet_suspend(struct device *d) if (!netif_running(dev)) return 0; + netif_device_detach(dev); + bcmgenet_netif_stop(dev); if (!device_may_wakeup(d)) phy_suspend(dev->phydev); - netif_device_detach(dev); - /* Prepare the device for Wake-on-LAN and switch to the slow clock */ if (device_may_wakeup(d) && priv->wolopts) { ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); @@ -3700,8 +3701,6 @@ static int bcmgenet_resume(struct device *d) /* Always enable ring 16 - descriptor ring */ bcmgenet_enable_dma(priv, dma_ctrl); - netif_device_attach(dev); - if (!device_may_wakeup(d)) phy_resume(dev->phydev); @@ -3710,6 +3709,8 @@ static int bcmgenet_resume(struct device *d) bcmgenet_netif_start(dev); + netif_device_attach(dev); + return 0; out_clk_disable: diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 34af5f1569c8f..de0e24d912fe9 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -342,7 +342,7 @@ static struct device_node *bcmgenet_mii_of_find_mdio(struct bcmgenet_priv *priv) if (!compat) return NULL; - priv->mdio_dn = of_find_compatible_node(dn, NULL, compat); + priv->mdio_dn = of_get_compatible_child(dn, compat); kfree(compat); if (!priv->mdio_dn) { dev_err(kdev, "unable to find MDIO bus node\n"); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index e6f28c7942abf..a12962702611f 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -12426,6 +12426,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e { struct tg3 *tp = netdev_priv(dev); int i, irq_sync = 0, err = 0; + bool reset_phy = false; if ((ering->rx_pending > tp->rx_std_ring_mask) || (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || @@ -12457,7 +12458,13 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e if (netif_running(dev)) { tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - err = tg3_restart_hw(tp, false); + /* Reset PHY to avoid PHY lock up */ + if (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720) + reset_phy = true; + + err = tg3_restart_hw(tp, reset_phy); if (!err) tg3_netif_start(tp); } @@ -12491,6 +12498,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam { struct tg3 *tp = netdev_priv(dev); int err = 0; + bool reset_phy = false; if (tp->link_config.autoneg == AUTONEG_ENABLE) tg3_warn_mgmt_link_flap(tp); @@ -12581,7 +12589,13 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam if (netif_running(dev)) { tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - err = tg3_restart_hw(tp, false); + /* Reset PHY to avoid PHY lock up */ + if (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720) + reset_phy = true; + + err = tg3_restart_hw(tp, reset_phy); if (!err) tg3_netif_start(tp); } diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 58b9744c40580..8f4b2f9a8e079 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -61,7 +61,8 @@ #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ | MACB_BIT(ISR_RLE) \ | MACB_BIT(TXERR)) -#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) +#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP) \ + | MACB_BIT(TXUBR)) /* Max length of transmit frame must be a multiple of 8 bytes */ #define MACB_TX_LEN_ALIGN 8 @@ -681,6 +682,11 @@ static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_ if (bp->hw_dma_cap & HW_DMA_CAP_64B) { desc_64 = macb_64b_desc(bp, desc); desc_64->addrh = upper_32_bits(addr); + /* The low bits of RX address contain the RX_USED bit, clearing + * of which allows packet RX. Make sure the high bits are also + * visible to HW at that point. + */ + dma_wmb(); } #endif desc->addr = lower_32_bits(addr); @@ -929,14 +935,19 @@ static void gem_rx_refill(struct macb_queue *queue) if (entry == bp->rx_ring_size - 1) paddr |= MACB_BIT(RX_WRAP); - macb_set_addr(bp, desc, paddr); desc->ctrl = 0; + /* Setting addr clears RX_USED and allows reception, + * make sure ctrl is cleared first to avoid a race. + */ + dma_wmb(); + macb_set_addr(bp, desc, paddr); /* properly align Ethernet header */ skb_reserve(skb, NET_IP_ALIGN); } else { - desc->addr &= ~MACB_BIT(RX_USED); desc->ctrl = 0; + dma_wmb(); + desc->addr &= ~MACB_BIT(RX_USED); } } @@ -990,11 +1001,15 @@ static int gem_rx(struct macb_queue *queue, int budget) rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; addr = macb_get_addr(bp, desc); - ctrl = desc->ctrl; if (!rxused) break; + /* Ensure ctrl is at least as up-to-date as rxused */ + dma_rmb(); + + ctrl = desc->ctrl; + queue->rx_tail++; count++; @@ -1169,11 +1184,14 @@ static int macb_rx(struct macb_queue *queue, int budget) /* Make hw descriptor updates visible to CPU */ rmb(); - ctrl = desc->ctrl; - if (!(desc->addr & MACB_BIT(RX_USED))) break; + /* Ensure ctrl is at least as up-to-date as addr */ + dma_rmb(); + + ctrl = desc->ctrl; + if (ctrl & MACB_BIT(RX_SOF)) { if (first_frag != -1) discard_partial_frame(queue, first_frag, tail); @@ -1313,6 +1331,21 @@ static void macb_hresp_error_task(unsigned long data) netif_tx_start_all_queues(dev); } +static void macb_tx_restart(struct macb_queue *queue) +{ + unsigned int head = queue->tx_head; + unsigned int tail = queue->tx_tail; + struct macb *bp = queue->bp; + + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + queue_writel(queue, ISR, MACB_BIT(TXUBR)); + + if (head == tail) + return; + + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); +} + static irqreturn_t macb_interrupt(int irq, void *dev_id) { struct macb_queue *queue = dev_id; @@ -1370,6 +1403,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) if (status & MACB_BIT(TCOMP)) macb_tx_interrupt(queue); + if (status & MACB_BIT(TXUBR)) + macb_tx_restart(queue); + /* Link change detection isn't possible with RMII, so we'll * add that if/when we get our hands on a full-blown MII PHY. */ diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c index cd5296b842290..a6dc47edc4cf6 100644 --- a/drivers/net/ethernet/cadence/macb_ptp.c +++ b/drivers/net/ethernet/cadence/macb_ptp.c @@ -319,6 +319,8 @@ int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb, desc_ptp = macb_ptp_desc(queue->bp, desc); tx_timestamp = &queue->tx_timestamps[head]; tx_timestamp->skb = skb; + /* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */ + dma_rmb(); tx_timestamp->desc_ptp.ts_1 = desc_ptp->ts_1; tx_timestamp->desc_ptp.ts_2 = desc_ptp->ts_2; /* move head */ diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c index ddd7431579f4e..c99b59fe4c8fb 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c @@ -367,13 +367,15 @@ lio_vf_rep_packet_sent_callback(struct octeon_device *oct, struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; struct sk_buff *skb = sc->ctxptr; struct net_device *ndev = skb->dev; + u32 iq_no; dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr, sc->datasize, DMA_TO_DEVICE); dev_kfree_skb_any(skb); + iq_no = sc->iq_no; octeon_free_soft_command(oct, sc); - if (octnet_iq_is_full(oct, sc->iq_no)) + if (octnet_iq_is_full(oct, iq_no)) return; if (netif_queue_stopped(ndev)) diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 55af04fa03a77..6c8dcb65ff031 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -1441,6 +1441,9 @@ static void nic_remove(struct pci_dev *pdev) { struct nicpf *nic = pci_get_drvdata(pdev); + if (!nic) + return; + if (nic->flags & NIC_SRIOV_ENABLED) pci_disable_sriov(pdev); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 768f584f83927..88f8a8fa93cdc 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -1784,6 +1784,7 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog) bool if_up = netif_running(nic->netdev); struct bpf_prog *old_prog; bool bpf_attached = false; + int ret = 0; /* For now just support only the usual MTU sized frames */ if (prog && (dev->mtu > 1500)) { @@ -1817,8 +1818,12 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog) if (nic->xdp_prog) { /* Attach BPF program */ nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1); - if (!IS_ERR(nic->xdp_prog)) + if (!IS_ERR(nic->xdp_prog)) { bpf_attached = true; + } else { + ret = PTR_ERR(nic->xdp_prog); + nic->xdp_prog = NULL; + } } /* Calculate Tx queues needed for XDP and network stack */ @@ -1830,7 +1835,7 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog) netif_trans_update(nic->netdev); } - return 0; + return ret; } static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 187a249ff2d1d..fcaf18fa39048 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -585,10 +585,12 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) if (!sq->dmem.base) return; - if (sq->tso_hdrs) + if (sq->tso_hdrs) { dma_free_coherent(&nic->pdev->dev, sq->dmem.q_len * TSO_HEADER_SIZE, sq->tso_hdrs, sq->tso_hdrs_phys); + sq->tso_hdrs = NULL; + } /* Free pending skbs in the queue */ smp_rmb(); diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 1c9ad3630c775..dfd1ad0b1cb94 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -661,7 +661,7 @@ static void gmac_clean_txq(struct net_device *netdev, struct gmac_txq *txq, u64_stats_update_begin(&port->tx_stats_syncp); port->tx_frag_stats[nfrags]++; - u64_stats_update_end(&port->ir_stats_syncp); + u64_stats_update_end(&port->tx_stats_syncp); } } diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 58bcee8f0a58b..ce041c90adb02 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -185,6 +185,7 @@ static inline void queue_tail_inc(struct be_queue_info *q) struct be_eq_obj { struct be_queue_info q; + char desc[32]; struct be_adapter *adapter; struct napi_struct napi; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 534787291b44f..bff74752cef16 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -3488,11 +3488,9 @@ static int be_msix_register(struct be_adapter *adapter) int status, i, vec; for_all_evt_queues(adapter, eqo, i) { - char irq_name[IFNAMSIZ+4]; - - snprintf(irq_name, sizeof(irq_name), "%s-q%d", netdev->name, i); + sprintf(eqo->desc, "%s-q%d", netdev->name, i); vec = be_msix_vec_get(adapter, eqo); - status = request_irq(vec, be_msix, 0, irq_name, eqo); + status = request_irq(vec, be_msix, 0, eqo->desc, eqo); if (status) goto err_msix; diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index a1197d3adbe01..9015bd911bee9 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -872,11 +872,10 @@ static irqreturn_t ftmac100_interrupt(int irq, void *dev_id) struct net_device *netdev = dev_id; struct ftmac100 *priv = netdev_priv(netdev); - if (likely(netif_running(netdev))) { - /* Disable interrupts for polling */ - ftmac100_disable_all_int(priv); + /* Disable interrupts for polling */ + ftmac100_disable_all_int(priv); + if (likely(netif_running(netdev))) napi_schedule(&priv->napi); - } return IRQ_HANDLED; } diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index c415ac67cb7be..e80fedb27cee8 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -2786,7 +2786,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev) if (!muram_node) { dev_err(&of_dev->dev, "%s: could not find MURAM node\n", __func__); - goto fman_node_put; + goto fman_free; } err = of_address_to_resource(muram_node, 0, @@ -2795,11 +2795,10 @@ static struct fman *read_dts_node(struct platform_device *of_dev) of_node_put(muram_node); dev_err(&of_dev->dev, "%s: of_address_to_resource() = %d\n", __func__, err); - goto fman_node_put; + goto fman_free; } of_node_put(muram_node); - of_node_put(fm_node); err = devm_request_irq(&of_dev->dev, irq, fman_irq, IRQF_SHARED, "fman", fman); diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 14374a856d309..6127697ede120 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -914,10 +914,8 @@ static int hip04_mac_probe(struct platform_device *pdev) } ret = register_netdev(ndev); - if (ret) { - free_netdev(ndev); + if (ret) goto alloc_fail; - } return 0; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index b52029e26d153..ad1779fc410e6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -379,6 +379,9 @@ static void hns_ae_stop(struct hnae_handle *handle) hns_ae_ring_enable_all(handle, 0); + /* clean rx fbd. */ + hns_rcb_wait_fbd_clean(handle->qs, handle->q_num, RCB_INT_FLAG_RX); + (void)hns_mac_vm_config_bc_en(mac_cb, 0, false); } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index 09e4061d1fa60..aa2c25d7a61d8 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c @@ -67,11 +67,14 @@ static void hns_gmac_enable(void *mac_drv, enum mac_commom_mode mode) struct mac_driver *drv = (struct mac_driver *)mac_drv; /*enable GE rX/tX */ - if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX)) + if (mode == MAC_COMM_MODE_TX || mode == MAC_COMM_MODE_RX_AND_TX) dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 1); - if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX)) + if (mode == MAC_COMM_MODE_RX || mode == MAC_COMM_MODE_RX_AND_TX) { + /* enable rx pcs */ + dsaf_set_dev_bit(drv, GMAC_PCS_RX_EN_REG, 0, 0); dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 1); + } } static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode) @@ -79,11 +82,14 @@ static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode) struct mac_driver *drv = (struct mac_driver *)mac_drv; /*disable GE rX/tX */ - if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX)) + if (mode == MAC_COMM_MODE_TX || mode == MAC_COMM_MODE_RX_AND_TX) dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 0); - if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX)) + if (mode == MAC_COMM_MODE_RX || mode == MAC_COMM_MODE_RX_AND_TX) { + /* disable rx pcs */ + dsaf_set_dev_bit(drv, GMAC_PCS_RX_EN_REG, 0, 1); dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 0); + } } /* hns_gmac_get_en - get port enable diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 6ed6f142427e4..cfdc92de9dc0e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -778,6 +778,17 @@ static int hns_mac_register_phy(struct hns_mac_cb *mac_cb) return rc; } +static void hns_mac_remove_phydev(struct hns_mac_cb *mac_cb) +{ + if (!to_acpi_device_node(mac_cb->fw_port) || !mac_cb->phy_dev) + return; + + phy_device_remove(mac_cb->phy_dev); + phy_device_free(mac_cb->phy_dev); + + mac_cb->phy_dev = NULL; +} + #define MAC_MEDIA_TYPE_MAX_LEN 16 static const struct { @@ -1117,7 +1128,11 @@ void hns_mac_uninit(struct dsaf_device *dsaf_dev) int max_port_num = hns_mac_get_max_port_num(dsaf_dev); for (i = 0; i < max_port_num; i++) { + if (!dsaf_dev->mac_cb[i]) + continue; + dsaf_dev->misc_op->cpld_reset_led(dsaf_dev->mac_cb[i]); + hns_mac_remove_phydev(dsaf_dev->mac_cb[i]); dsaf_dev->mac_cb[i] = NULL; } } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index e557a4ef5996c..3b9e74be5fbd2 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -934,6 +934,62 @@ static void hns_dsaf_tcam_mc_cfg( spin_unlock_bh(&dsaf_dev->tcam_lock); } +/** + * hns_dsaf_tcam_uc_cfg_vague - INT + * @dsaf_dev: dsa fabric device struct pointer + * @address, + * @ptbl_tcam_data, + */ +static void hns_dsaf_tcam_uc_cfg_vague(struct dsaf_device *dsaf_dev, + u32 address, + struct dsaf_tbl_tcam_data *tcam_data, + struct dsaf_tbl_tcam_data *tcam_mask, + struct dsaf_tbl_tcam_ucast_cfg *tcam_uc) +{ + spin_lock_bh(&dsaf_dev->tcam_lock); + hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address); + hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, tcam_data); + hns_dsaf_tbl_tcam_ucast_cfg(dsaf_dev, tcam_uc); + hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask); + hns_dsaf_tbl_tcam_data_ucast_pul(dsaf_dev); + + /*Restore Match Data*/ + tcam_mask->tbl_tcam_data_high = 0xffffffff; + tcam_mask->tbl_tcam_data_low = 0xffffffff; + hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask); + + spin_unlock_bh(&dsaf_dev->tcam_lock); +} + +/** + * hns_dsaf_tcam_mc_cfg_vague - INT + * @dsaf_dev: dsa fabric device struct pointer + * @address, + * @ptbl_tcam_data, + * @ptbl_tcam_mask + * @ptbl_tcam_mcast + */ +static void hns_dsaf_tcam_mc_cfg_vague(struct dsaf_device *dsaf_dev, + u32 address, + struct dsaf_tbl_tcam_data *tcam_data, + struct dsaf_tbl_tcam_data *tcam_mask, + struct dsaf_tbl_tcam_mcast_cfg *tcam_mc) +{ + spin_lock_bh(&dsaf_dev->tcam_lock); + hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address); + hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, tcam_data); + hns_dsaf_tbl_tcam_mcast_cfg(dsaf_dev, tcam_mc); + hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask); + hns_dsaf_tbl_tcam_data_mcast_pul(dsaf_dev); + + /*Restore Match Data*/ + tcam_mask->tbl_tcam_data_high = 0xffffffff; + tcam_mask->tbl_tcam_data_low = 0xffffffff; + hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask); + + spin_unlock_bh(&dsaf_dev->tcam_lock); +} + /** * hns_dsaf_tcam_mc_invld - INT * @dsaf_id: dsa fabric id @@ -1492,6 +1548,27 @@ static u16 hns_dsaf_find_empty_mac_entry(struct dsaf_device *dsaf_dev) return DSAF_INVALID_ENTRY_IDX; } +/** + * hns_dsaf_find_empty_mac_entry_reverse + * search dsa fabric soft empty-entry from the end + * @dsaf_dev: dsa fabric device struct pointer + */ +static u16 hns_dsaf_find_empty_mac_entry_reverse(struct dsaf_device *dsaf_dev) +{ + struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev); + struct dsaf_drv_soft_mac_tbl *soft_mac_entry; + int i; + + soft_mac_entry = priv->soft_mac_tbl + (DSAF_TCAM_SUM - 1); + for (i = (DSAF_TCAM_SUM - 1); i > 0; i--) { + /* search all entry from end to start.*/ + if (soft_mac_entry->index == DSAF_INVALID_ENTRY_IDX) + return i; + soft_mac_entry--; + } + return DSAF_INVALID_ENTRY_IDX; +} + /** * hns_dsaf_set_mac_key - set mac key * @dsaf_dev: dsa fabric device struct pointer @@ -2166,9 +2243,9 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num) DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG + 0x80 * (u64)node_num); hw_stats->vlan_drop += dsaf_read_dev(dsaf_dev, - DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 0x80 * (u64)node_num); + DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 4 * (u64)node_num); hw_stats->stp_drop += dsaf_read_dev(dsaf_dev, - DSAF_INODE_IN_DATA_STP_DISC_0_REG + 0x80 * (u64)node_num); + DSAF_INODE_IN_DATA_STP_DISC_0_REG + 4 * (u64)node_num); /* pfc pause frame statistics stored in dsaf inode*/ if ((node_num < DSAF_SERVICE_NW_NUM) && !is_ver1) { @@ -2285,237 +2362,237 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data) DSAF_INODE_BD_ORDER_STATUS_0_REG + j * 4); p[223 + i] = dsaf_read_dev(ddev, DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + j * 4); - p[224 + i] = dsaf_read_dev(ddev, + p[226 + i] = dsaf_read_dev(ddev, DSAF_INODE_IN_DATA_STP_DISC_0_REG + j * 4); } - p[227] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4); + p[229] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4); for (i = 0; i < DSAF_INODE_NUM / DSAF_COMM_CHN; i++) { j = i * DSAF_COMM_CHN + port; - p[228 + i] = dsaf_read_dev(ddev, + p[230 + i] = dsaf_read_dev(ddev, DSAF_INODE_VC0_IN_PKT_NUM_0_REG + j * 4); } - p[231] = dsaf_read_dev(ddev, - DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 4); + p[233] = dsaf_read_dev(ddev, + DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 0x80); /* dsaf inode registers */ for (i = 0; i < HNS_DSAF_SBM_NUM(ddev) / DSAF_COMM_CHN; i++) { j = i * DSAF_COMM_CHN + port; - p[232 + i] = dsaf_read_dev(ddev, + p[234 + i] = dsaf_read_dev(ddev, DSAF_SBM_CFG_REG_0_REG + j * 0x80); - p[235 + i] = dsaf_read_dev(ddev, + p[237 + i] = dsaf_read_dev(ddev, DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + j * 0x80); - p[238 + i] = dsaf_read_dev(ddev, + p[240 + i] = dsaf_read_dev(ddev, DSAF_SBM_BP_CFG_1_REG_0_REG + j * 0x80); - p[241 + i] = dsaf_read_dev(ddev, + p[243 + i] = dsaf_read_dev(ddev, DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + j * 0x80); - p[244 + i] = dsaf_read_dev(ddev, + p[246 + i] = dsaf_read_dev(ddev, DSAF_SBM_FREE_CNT_0_0_REG + j * 0x80); - p[245 + i] = dsaf_read_dev(ddev, + p[249 + i] = dsaf_read_dev(ddev, DSAF_SBM_FREE_CNT_1_0_REG + j * 0x80); - p[248 + i] = dsaf_read_dev(ddev, + p[252 + i] = dsaf_read_dev(ddev, DSAF_SBM_BP_CNT_0_0_REG + j * 0x80); - p[251 + i] = dsaf_read_dev(ddev, + p[255 + i] = dsaf_read_dev(ddev, DSAF_SBM_BP_CNT_1_0_REG + j * 0x80); - p[254 + i] = dsaf_read_dev(ddev, + p[258 + i] = dsaf_read_dev(ddev, DSAF_SBM_BP_CNT_2_0_REG + j * 0x80); - p[257 + i] = dsaf_read_dev(ddev, + p[261 + i] = dsaf_read_dev(ddev, DSAF_SBM_BP_CNT_3_0_REG + j * 0x80); - p[260 + i] = dsaf_read_dev(ddev, + p[264 + i] = dsaf_read_dev(ddev, DSAF_SBM_INER_ST_0_REG + j * 0x80); - p[263 + i] = dsaf_read_dev(ddev, + p[267 + i] = dsaf_read_dev(ddev, DSAF_SBM_MIB_REQ_FAILED_TC_0_REG + j * 0x80); - p[266 + i] = dsaf_read_dev(ddev, + p[270 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_INPORT_CNT_0_REG + j * 0x80); - p[269 + i] = dsaf_read_dev(ddev, + p[273 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_DROP_CNT_0_REG + j * 0x80); - p[272 + i] = dsaf_read_dev(ddev, + p[276 + i] = dsaf_read_dev(ddev, DSAF_SBM_INF_OUTPORT_CNT_0_REG + j * 0x80); - p[275 + i] = dsaf_read_dev(ddev, + p[279 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_INPORT_TC0_CNT_0_REG + j * 0x80); - p[278 + i] = dsaf_read_dev(ddev, + p[282 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_INPORT_TC1_CNT_0_REG + j * 0x80); - p[281 + i] = dsaf_read_dev(ddev, + p[285 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_INPORT_TC2_CNT_0_REG + j * 0x80); - p[284 + i] = dsaf_read_dev(ddev, + p[288 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_INPORT_TC3_CNT_0_REG + j * 0x80); - p[287 + i] = dsaf_read_dev(ddev, + p[291 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_INPORT_TC4_CNT_0_REG + j * 0x80); - p[290 + i] = dsaf_read_dev(ddev, + p[294 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_INPORT_TC5_CNT_0_REG + j * 0x80); - p[293 + i] = dsaf_read_dev(ddev, + p[297 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_INPORT_TC6_CNT_0_REG + j * 0x80); - p[296 + i] = dsaf_read_dev(ddev, + p[300 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_INPORT_TC7_CNT_0_REG + j * 0x80); - p[299 + i] = dsaf_read_dev(ddev, + p[303 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_REQ_CNT_0_REG + j * 0x80); - p[302 + i] = dsaf_read_dev(ddev, + p[306 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_RELS_CNT_0_REG + j * 0x80); - p[305 + i] = dsaf_read_dev(ddev, + p[309 + i] = dsaf_read_dev(ddev, DSAF_SBM_BP_CFG_3_REG_0_REG + j * 0x80); - p[308 + i] = dsaf_read_dev(ddev, + p[312 + i] = dsaf_read_dev(ddev, DSAF_SBM_BP_CFG_4_REG_0_REG + j * 0x80); } /* dsaf onode registers */ for (i = 0; i < DSAF_XOD_NUM; i++) { - p[311 + i] = dsaf_read_dev(ddev, + p[315 + i] = dsaf_read_dev(ddev, DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + i * 0x90); - p[319 + i] = dsaf_read_dev(ddev, + p[323 + i] = dsaf_read_dev(ddev, DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + i * 0x90); - p[327 + i] = dsaf_read_dev(ddev, + p[331 + i] = dsaf_read_dev(ddev, DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + i * 0x90); - p[335 + i] = dsaf_read_dev(ddev, + p[339 + i] = dsaf_read_dev(ddev, DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + i * 0x90); - p[343 + i] = dsaf_read_dev(ddev, + p[347 + i] = dsaf_read_dev(ddev, DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + i * 0x90); - p[351 + i] = dsaf_read_dev(ddev, + p[355 + i] = dsaf_read_dev(ddev, DSAF_XOD_ETS_TOKEN_CFG_0_REG + i * 0x90); } - p[359] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90); - p[360] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90); - p[361] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90); + p[363] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90); + p[364] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90); + p[365] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90); for (i = 0; i < DSAF_XOD_BIG_NUM / DSAF_COMM_CHN; i++) { j = i * DSAF_COMM_CHN + port; - p[362 + i] = dsaf_read_dev(ddev, + p[366 + i] = dsaf_read_dev(ddev, DSAF_XOD_GNT_L_0_REG + j * 0x90); - p[365 + i] = dsaf_read_dev(ddev, + p[369 + i] = dsaf_read_dev(ddev, DSAF_XOD_GNT_H_0_REG + j * 0x90); - p[368 + i] = dsaf_read_dev(ddev, + p[372 + i] = dsaf_read_dev(ddev, DSAF_XOD_CONNECT_STATE_0_REG + j * 0x90); - p[371 + i] = dsaf_read_dev(ddev, + p[375 + i] = dsaf_read_dev(ddev, DSAF_XOD_RCVPKT_CNT_0_REG + j * 0x90); - p[374 + i] = dsaf_read_dev(ddev, + p[378 + i] = dsaf_read_dev(ddev, DSAF_XOD_RCVTC0_CNT_0_REG + j * 0x90); - p[377 + i] = dsaf_read_dev(ddev, + p[381 + i] = dsaf_read_dev(ddev, DSAF_XOD_RCVTC1_CNT_0_REG + j * 0x90); - p[380 + i] = dsaf_read_dev(ddev, + p[384 + i] = dsaf_read_dev(ddev, DSAF_XOD_RCVTC2_CNT_0_REG + j * 0x90); - p[383 + i] = dsaf_read_dev(ddev, + p[387 + i] = dsaf_read_dev(ddev, DSAF_XOD_RCVTC3_CNT_0_REG + j * 0x90); - p[386 + i] = dsaf_read_dev(ddev, + p[390 + i] = dsaf_read_dev(ddev, DSAF_XOD_RCVVC0_CNT_0_REG + j * 0x90); - p[389 + i] = dsaf_read_dev(ddev, + p[393 + i] = dsaf_read_dev(ddev, DSAF_XOD_RCVVC1_CNT_0_REG + j * 0x90); } - p[392] = dsaf_read_dev(ddev, + p[396] = dsaf_read_dev(ddev, DSAF_XOD_XGE_RCVIN0_CNT_0_REG + port * 0x90); - p[393] = dsaf_read_dev(ddev, + p[397] = dsaf_read_dev(ddev, DSAF_XOD_XGE_RCVIN1_CNT_0_REG + port * 0x90); - p[394] = dsaf_read_dev(ddev, + p[398] = dsaf_read_dev(ddev, DSAF_XOD_XGE_RCVIN2_CNT_0_REG + port * 0x90); - p[395] = dsaf_read_dev(ddev, + p[399] = dsaf_read_dev(ddev, DSAF_XOD_XGE_RCVIN3_CNT_0_REG + port * 0x90); - p[396] = dsaf_read_dev(ddev, + p[400] = dsaf_read_dev(ddev, DSAF_XOD_XGE_RCVIN4_CNT_0_REG + port * 0x90); - p[397] = dsaf_read_dev(ddev, + p[401] = dsaf_read_dev(ddev, DSAF_XOD_XGE_RCVIN5_CNT_0_REG + port * 0x90); - p[398] = dsaf_read_dev(ddev, + p[402] = dsaf_read_dev(ddev, DSAF_XOD_XGE_RCVIN6_CNT_0_REG + port * 0x90); - p[399] = dsaf_read_dev(ddev, + p[403] = dsaf_read_dev(ddev, DSAF_XOD_XGE_RCVIN7_CNT_0_REG + port * 0x90); - p[400] = dsaf_read_dev(ddev, + p[404] = dsaf_read_dev(ddev, DSAF_XOD_PPE_RCVIN0_CNT_0_REG + port * 0x90); - p[401] = dsaf_read_dev(ddev, + p[405] = dsaf_read_dev(ddev, DSAF_XOD_PPE_RCVIN1_CNT_0_REG + port * 0x90); - p[402] = dsaf_read_dev(ddev, + p[406] = dsaf_read_dev(ddev, DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG + port * 0x90); - p[403] = dsaf_read_dev(ddev, + p[407] = dsaf_read_dev(ddev, DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG + port * 0x90); - p[404] = dsaf_read_dev(ddev, + p[408] = dsaf_read_dev(ddev, DSAF_XOD_FIFO_STATUS_0_REG + port * 0x90); /* dsaf voq registers */ for (i = 0; i < DSAF_VOQ_NUM / DSAF_COMM_CHN; i++) { j = (i * DSAF_COMM_CHN + port) * 0x90; - p[405 + i] = dsaf_read_dev(ddev, + p[409 + i] = dsaf_read_dev(ddev, DSAF_VOQ_ECC_INVERT_EN_0_REG + j); - p[408 + i] = dsaf_read_dev(ddev, + p[412 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SRAM_PKT_NUM_0_REG + j); - p[411 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j); - p[414 + i] = dsaf_read_dev(ddev, + p[415 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j); + p[418 + i] = dsaf_read_dev(ddev, DSAF_VOQ_OUT_PKT_NUM_0_REG + j); - p[417 + i] = dsaf_read_dev(ddev, + p[421 + i] = dsaf_read_dev(ddev, DSAF_VOQ_ECC_ERR_ADDR_0_REG + j); - p[420 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j); - p[423 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j); - p[426 + i] = dsaf_read_dev(ddev, + p[424 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j); + p[427 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j); + p[430 + i] = dsaf_read_dev(ddev, DSAF_VOQ_XGE_XOD_REQ_0_0_REG + j); - p[429 + i] = dsaf_read_dev(ddev, + p[433 + i] = dsaf_read_dev(ddev, DSAF_VOQ_XGE_XOD_REQ_1_0_REG + j); - p[432 + i] = dsaf_read_dev(ddev, + p[436 + i] = dsaf_read_dev(ddev, DSAF_VOQ_PPE_XOD_REQ_0_REG + j); - p[435 + i] = dsaf_read_dev(ddev, + p[439 + i] = dsaf_read_dev(ddev, DSAF_VOQ_ROCEE_XOD_REQ_0_REG + j); - p[438 + i] = dsaf_read_dev(ddev, + p[442 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_ALL_THRD_0_REG + j); } /* dsaf tbl registers */ - p[441] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG); - p[442] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG); - p[443] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG); - p[444] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG); - p[445] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG); - p[446] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG); - p[447] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG); - p[448] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG); - p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG); - p[450] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG); - p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG); - p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG); - p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG); - p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG); - p[455] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG); - p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG); - p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG); - p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG); - p[459] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG); - p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG); - p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG); - p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG); - p[463] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG); + p[445] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG); + p[446] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG); + p[447] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG); + p[448] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG); + p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG); + p[450] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG); + p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG); + p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG); + p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG); + p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG); + p[455] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG); + p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG); + p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG); + p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG); + p[459] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG); + p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG); + p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG); + p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG); + p[463] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG); + p[464] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG); + p[465] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG); + p[466] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG); + p[467] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG); for (i = 0; i < DSAF_SW_PORT_NUM; i++) { j = i * 0x8; - p[464 + 2 * i] = dsaf_read_dev(ddev, + p[468 + 2 * i] = dsaf_read_dev(ddev, DSAF_TBL_DA0_MIS_INFO1_0_REG + j); - p[465 + 2 * i] = dsaf_read_dev(ddev, + p[469 + 2 * i] = dsaf_read_dev(ddev, DSAF_TBL_DA0_MIS_INFO0_0_REG + j); } - p[480] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG); - p[481] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG); - p[482] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG); - p[483] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG); - p[484] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG); - p[485] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG); - p[486] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG); - p[487] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG); - p[488] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG); - p[489] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG); - p[490] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG); - p[491] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG); + p[484] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG); + p[485] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG); + p[486] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG); + p[487] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG); + p[488] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG); + p[489] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG); + p[490] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG); + p[491] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG); + p[492] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG); + p[493] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG); + p[494] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG); + p[495] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG); /* dsaf other registers */ - p[492] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4); - p[493] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4); - p[494] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4); - p[495] = dsaf_read_dev(ddev, + p[496] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4); + p[497] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4); + p[498] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4); + p[499] = dsaf_read_dev(ddev, DSAF_XGE_APP_RX_LINK_UP_0_REG + port * 0x4); - p[496] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4); - p[497] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4); + p[500] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4); + p[501] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4); if (!is_ver1) - p[498] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4); + p[502] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4); /* mark end of dsaf regs */ - for (i = 499; i < 504; i++) + for (i = 503; i < 504; i++) p[i] = 0xdddddddd; } @@ -2673,58 +2750,156 @@ int hns_dsaf_get_regs_count(void) return DSAF_DUMP_REGS_NUM; } -/* Reserve the last TCAM entry for promisc support */ -#define dsaf_promisc_tcam_entry(port) \ - (DSAF_TCAM_SUM - DSAFV2_MAC_FUZZY_TCAM_NUM + (port)) -void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev, - u32 port, bool enable) +static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port) { + struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80}; + struct dsaf_tbl_tcam_data tbl_tcam_data_mc = {0x01000000, port}; + struct dsaf_tbl_tcam_data tbl_tcam_mask_uc = {0x01000000, 0xf}; + struct dsaf_tbl_tcam_mcast_cfg tbl_tcam_mcast = {0, 0, {0} }; struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev); - struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl; - u16 entry_index; - struct dsaf_drv_tbl_tcam_key tbl_tcam_data, tbl_tcam_mask; - struct dsaf_tbl_tcam_mcast_cfg mac_data = {0}; + struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, port}; + struct dsaf_drv_mac_single_dest_entry mask_entry; + struct dsaf_drv_tbl_tcam_key temp_key, mask_key; + struct dsaf_drv_soft_mac_tbl *soft_mac_entry; + u16 entry_index = DSAF_INVALID_ENTRY_IDX; + struct dsaf_drv_tbl_tcam_key mac_key; + struct hns_mac_cb *mac_cb; + u8 addr[ETH_ALEN] = {0}; + u8 port_num; + u16 mskid; + + /* promisc use vague table match with vlanid = 0 & macaddr = 0 */ + hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr); + entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key); + if (entry_index != DSAF_INVALID_ENTRY_IDX) + return; + + /* put promisc tcam entry in the end. */ + /* 1. set promisc unicast vague tcam entry. */ + entry_index = hns_dsaf_find_empty_mac_entry_reverse(dsaf_dev); + if (entry_index == DSAF_INVALID_ENTRY_IDX) { + dev_err(dsaf_dev->dev, + "enable uc promisc failed (port:%#x)\n", + port); + return; + } + + mac_cb = dsaf_dev->mac_cb[port]; + (void)hns_mac_get_inner_port_num(mac_cb, 0, &port_num); + tbl_tcam_ucast.tbl_ucast_out_port = port_num; - if ((AE_IS_VER1(dsaf_dev->dsaf_ver)) || HNS_DSAF_IS_DEBUG(dsaf_dev)) + /* config uc vague table */ + hns_dsaf_tcam_uc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_uc, + &tbl_tcam_mask_uc, &tbl_tcam_ucast); + + /* update software entry */ + soft_mac_entry = priv->soft_mac_tbl; + soft_mac_entry += entry_index; + soft_mac_entry->index = entry_index; + soft_mac_entry->tcam_key.high.val = mac_key.high.val; + soft_mac_entry->tcam_key.low.val = mac_key.low.val; + /* step back to the START for mc. */ + soft_mac_entry = priv->soft_mac_tbl; + + /* 2. set promisc multicast vague tcam entry. */ + entry_index = hns_dsaf_find_empty_mac_entry_reverse(dsaf_dev); + if (entry_index == DSAF_INVALID_ENTRY_IDX) { + dev_err(dsaf_dev->dev, + "enable mc promisc failed (port:%#x)\n", + port); return; + } + + memset(&mask_entry, 0x0, sizeof(mask_entry)); + memset(&mask_key, 0x0, sizeof(mask_key)); + memset(&temp_key, 0x0, sizeof(temp_key)); + mask_entry.addr[0] = 0x01; + hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id, + port, mask_entry.addr); + tbl_tcam_mcast.tbl_mcast_item_vld = 1; + tbl_tcam_mcast.tbl_mcast_old_en = 0; - /* find the tcam entry index for promisc */ - entry_index = dsaf_promisc_tcam_entry(port); - - memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data)); - memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask)); - - /* config key mask */ - if (enable) { - dsaf_set_field(tbl_tcam_data.low.bits.port_vlan, - DSAF_TBL_TCAM_KEY_PORT_M, - DSAF_TBL_TCAM_KEY_PORT_S, port); - dsaf_set_field(tbl_tcam_mask.low.bits.port_vlan, - DSAF_TBL_TCAM_KEY_PORT_M, - DSAF_TBL_TCAM_KEY_PORT_S, 0xf); - - /* SUB_QID */ - dsaf_set_bit(mac_data.tbl_mcast_port_msk[0], - DSAF_SERVICE_NW_NUM, true); - mac_data.tbl_mcast_item_vld = true; /* item_vld bit */ + if (port < DSAF_SERVICE_NW_NUM) { + mskid = port; + } else if (port >= DSAF_BASE_INNER_PORT_NUM) { + mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM; } else { - mac_data.tbl_mcast_item_vld = false; /* item_vld bit */ + dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n", + dsaf_dev->ae_dev.name, port, + mask_key.high.val, mask_key.low.val); + return; } - dev_dbg(dsaf_dev->dev, - "set_promisc_entry, %s Mac key(%#x:%#x) entry_index%d\n", - dsaf_dev->ae_dev.name, tbl_tcam_data.high.val, - tbl_tcam_data.low.val, entry_index); + dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32], + mskid % 32, 1); + memcpy(&temp_key, &mask_key, sizeof(mask_key)); + hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc, + (struct dsaf_tbl_tcam_data *)(&mask_key), + &tbl_tcam_mcast); + + /* update software entry */ + soft_mac_entry += entry_index; + soft_mac_entry->index = entry_index; + soft_mac_entry->tcam_key.high.val = temp_key.high.val; + soft_mac_entry->tcam_key.low.val = temp_key.low.val; +} - /* config promisc entry with mask */ - hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, - (struct dsaf_tbl_tcam_data *)&tbl_tcam_data, - (struct dsaf_tbl_tcam_data *)&tbl_tcam_mask, - &mac_data); +static void set_promisc_tcam_disable(struct dsaf_device *dsaf_dev, u32 port) +{ + struct dsaf_tbl_tcam_data tbl_tcam_data_mc = {0x01000000, port}; + struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 0, 0, 0, 0}; + struct dsaf_tbl_tcam_mcast_cfg tbl_tcam_mcast = {0, 0, {0} }; + struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev); + struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, 0}; + struct dsaf_tbl_tcam_data tbl_tcam_mask = {0, 0}; + struct dsaf_drv_soft_mac_tbl *soft_mac_entry; + u16 entry_index = DSAF_INVALID_ENTRY_IDX; + struct dsaf_drv_tbl_tcam_key mac_key; + u8 addr[ETH_ALEN] = {0}; - /* config software entry */ + /* 1. delete uc vague tcam entry. */ + /* promisc use vague table match with vlanid = 0 & macaddr = 0 */ + hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr); + entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key); + + if (entry_index == DSAF_INVALID_ENTRY_IDX) + return; + + /* config uc vague table */ + hns_dsaf_tcam_uc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_uc, + &tbl_tcam_mask, &tbl_tcam_ucast); + /* update soft management table. */ + soft_mac_entry = priv->soft_mac_tbl; + soft_mac_entry += entry_index; + soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX; + /* step back to the START for mc. */ + soft_mac_entry = priv->soft_mac_tbl; + + /* 2. delete mc vague tcam entry. */ + addr[0] = 0x01; + memset(&mac_key, 0x0, sizeof(mac_key)); + hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr); + entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key); + + if (entry_index == DSAF_INVALID_ENTRY_IDX) + return; + + /* config mc vague table */ + hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc, + &tbl_tcam_mask, &tbl_tcam_mcast); + /* update soft management table. */ soft_mac_entry += entry_index; - soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX; + soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX; +} + +/* Reserve the last TCAM entry for promisc support */ +void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev, + u32 port, bool enable) +{ + if (enable) + set_promisc_tcam_enable(dsaf_dev, port); + else + set_promisc_tcam_disable(dsaf_dev, port); } int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index 74d935d82cbc6..b9733b0b84826 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -176,7 +176,7 @@ #define DSAF_INODE_IN_DATA_STP_DISC_0_REG 0x1A50 #define DSAF_INODE_GE_FC_EN_0_REG 0x1B00 #define DSAF_INODE_VC0_IN_PKT_NUM_0_REG 0x1B50 -#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG 0x1C00 +#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG 0x103C #define DSAF_INODE_IN_PRIO_PAUSE_BASE_REG 0x1C00 #define DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET 0x100 #define DSAF_INODE_IN_PRIO_PAUSE_OFFSET 0x50 @@ -404,11 +404,11 @@ #define RCB_ECC_ERR_ADDR4_REG 0x460 #define RCB_ECC_ERR_ADDR5_REG 0x464 -#define RCB_COM_SF_CFG_INTMASK_RING 0x480 -#define RCB_COM_SF_CFG_RING_STS 0x484 -#define RCB_COM_SF_CFG_RING 0x488 -#define RCB_COM_SF_CFG_INTMASK_BD 0x48C -#define RCB_COM_SF_CFG_BD_RINT_STS 0x470 +#define RCB_COM_SF_CFG_INTMASK_RING 0x470 +#define RCB_COM_SF_CFG_RING_STS 0x474 +#define RCB_COM_SF_CFG_RING 0x478 +#define RCB_COM_SF_CFG_INTMASK_BD 0x47C +#define RCB_COM_SF_CFG_BD_RINT_STS 0x480 #define RCB_COM_RCB_RD_BD_BUSY 0x490 #define RCB_COM_RCB_FBD_CRT_EN 0x494 #define RCB_COM_AXI_WR_ERR_INTMASK 0x498 @@ -534,6 +534,7 @@ #define GMAC_LD_LINK_COUNTER_REG 0x01D0UL #define GMAC_LOOP_REG 0x01DCUL #define GMAC_RECV_CONTROL_REG 0x01E0UL +#define GMAC_PCS_RX_EN_REG 0x01E4UL #define GMAC_VLAN_CODE_REG 0x01E8UL #define GMAC_RX_OVERRUN_CNT_REG 0x01ECUL #define GMAC_RX_LENGTHFIELD_ERR_CNT_REG 0x01F4UL diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 28e907831b0ed..6242249c9f4c5 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1186,6 +1186,9 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) if (h->phy_if == PHY_INTERFACE_MODE_XGMII) phy_dev->autoneg = false; + if (h->phy_if == PHY_INTERFACE_MODE_SGMII) + phy_stop(phy_dev); + return 0; } @@ -1281,6 +1284,22 @@ static int hns_nic_init_affinity_mask(int q_num, int ring_idx, return cpu; } +static void hns_nic_free_irq(int q_num, struct hns_nic_priv *priv) +{ + int i; + + for (i = 0; i < q_num * 2; i++) { + if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) { + irq_set_affinity_hint(priv->ring_data[i].ring->irq, + NULL); + free_irq(priv->ring_data[i].ring->irq, + &priv->ring_data[i]); + priv->ring_data[i].ring->irq_init_flag = + RCB_IRQ_NOT_INITED; + } + } +} + static int hns_nic_init_irq(struct hns_nic_priv *priv) { struct hnae_handle *h = priv->ae_handle; @@ -1306,7 +1325,7 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv) if (ret) { netdev_err(priv->netdev, "request irq(%d) fail\n", rd->ring->irq); - return ret; + goto out_free_irq; } disable_irq(rd->ring->irq); @@ -1321,6 +1340,10 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv) } return 0; + +out_free_irq: + hns_nic_free_irq(h->q_num, priv); + return ret; } static int hns_nic_net_up(struct net_device *ndev) @@ -1330,6 +1353,9 @@ static int hns_nic_net_up(struct net_device *ndev) int i, j; int ret; + if (!test_bit(NIC_STATE_DOWN, &priv->state)) + return 0; + ret = hns_nic_init_irq(priv); if (ret != 0) { netdev_err(ndev, "hns init irq failed! ret=%d\n", ret); @@ -1365,6 +1391,7 @@ static int hns_nic_net_up(struct net_device *ndev) for (j = i - 1; j >= 0; j--) hns_nic_ring_close(ndev, j); + hns_nic_free_irq(h->q_num, priv); set_bit(NIC_STATE_DOWN, &priv->state); return ret; @@ -1482,11 +1509,19 @@ static int hns_nic_net_stop(struct net_device *ndev) } static void hns_tx_timeout_reset(struct hns_nic_priv *priv); +#define HNS_TX_TIMEO_LIMIT (40 * HZ) static void hns_nic_net_timeout(struct net_device *ndev) { struct hns_nic_priv *priv = netdev_priv(ndev); - hns_tx_timeout_reset(priv); + if (ndev->watchdog_timeo < HNS_TX_TIMEO_LIMIT) { + ndev->watchdog_timeo *= 2; + netdev_info(ndev, "watchdog_timo changed to %d.\n", + ndev->watchdog_timeo); + } else { + ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT; + hns_tx_timeout_reset(priv); + } } static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr, @@ -2049,11 +2084,11 @@ static void hns_nic_service_task(struct work_struct *work) = container_of(work, struct hns_nic_priv, service_task); struct hnae_handle *h = priv->ae_handle; + hns_nic_reset_subtask(priv); hns_nic_update_link_status(priv->netdev); h->dev->ops->update_led_status(h); hns_nic_update_stats(priv->netdev); - hns_nic_reset_subtask(priv); hns_nic_service_event_complete(priv); } @@ -2339,7 +2374,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev) ndev->min_mtu = MAC_MIN_MTU; switch (priv->enet_ver) { case AE_VERSION_2: - ndev->features |= NETIF_F_TSO | NETIF_F_TSO6; + ndev->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_NTUPLE; ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 955c4ab18b03b..b7b2f8254ce15 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1915,6 +1915,7 @@ static int is_valid_clean_head(struct hns3_enet_ring *ring, int h) bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) { struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(netdev); struct netdev_queue *dev_queue; int bytes, pkts; int head; @@ -1961,7 +1962,8 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) * sees the new next_to_clean. */ smp_mb(); - if (netif_tx_queue_stopped(dev_queue)) { + if (netif_tx_queue_stopped(dev_queue) && + !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { netif_tx_wake_queue(dev_queue); ring->stats.restart_queue++; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index f70ee6910ee27..9684ad015c429 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -309,7 +309,7 @@ static void hns3_self_test(struct net_device *ndev, h->flags & HNAE3_SUPPORT_SERDES_LOOPBACK; if (if_running) - dev_close(ndev); + ndev->netdev_ops->ndo_stop(ndev); #if IS_ENABLED(CONFIG_VLAN_8021Q) /* Disable the vlan filter for selftest does not support it */ @@ -347,7 +347,7 @@ static void hns3_self_test(struct net_device *ndev, #endif if (if_running) - dev_open(ndev); + ndev->netdev_ops->ndo_open(ndev); } static int hns3_get_sset_count(struct net_device *netdev, int stringset) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index ac13cb2b168e5..68026a5ad7e77 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -304,6 +304,10 @@ int hclge_cmd_queue_init(struct hclge_dev *hdev) { int ret; + /* Setup the lock for command queue */ + spin_lock_init(&hdev->hw.cmq.csq.lock); + spin_lock_init(&hdev->hw.cmq.crq.lock); + /* Setup the queue entries for use cmd queue */ hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM; hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM; @@ -337,18 +341,20 @@ int hclge_cmd_init(struct hclge_dev *hdev) u32 version; int ret; + spin_lock_bh(&hdev->hw.cmq.csq.lock); + spin_lock_bh(&hdev->hw.cmq.crq.lock); + hdev->hw.cmq.csq.next_to_clean = 0; hdev->hw.cmq.csq.next_to_use = 0; hdev->hw.cmq.crq.next_to_clean = 0; hdev->hw.cmq.crq.next_to_use = 0; - /* Setup the lock for command queue */ - spin_lock_init(&hdev->hw.cmq.csq.lock); - spin_lock_init(&hdev->hw.cmq.crq.lock); - hclge_cmd_init_regs(&hdev->hw); clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + spin_unlock_bh(&hdev->hw.cmq.crq.lock); + spin_unlock_bh(&hdev->hw.cmq.csq.lock); + ret = hclge_cmd_query_firmware_version(&hdev->hw, &version); if (ret) { dev_err(&hdev->pdev->dev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index f08ebb7caaaf5..92f19384e2585 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -73,6 +73,7 @@ static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets) static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, u8 *tc, bool *changed) { + bool has_ets_tc = false; u32 total_ets_bw = 0; u8 max_tc = 0; u8 i; @@ -100,13 +101,14 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, *changed = true; total_ets_bw += ets->tc_tx_bw[i]; - break; + has_ets_tc = true; + break; default: return -EINVAL; } } - if (total_ets_bw != BW_PERCENT) + if (has_ets_tc && total_ets_bw != BW_PERCENT) return -EINVAL; *tc = max_tc + 1; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 8577dfc799ad6..340baf6a470cc 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1657,11 +1657,13 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev, static int hclge_rx_buffer_calc(struct hclge_dev *hdev, struct hclge_pkt_buf_alloc *buf_alloc) { - u32 rx_all = hdev->pkt_buf_size; +#define HCLGE_BUF_SIZE_UNIT 128 + u32 rx_all = hdev->pkt_buf_size, aligned_mps; int no_pfc_priv_num, pfc_priv_num; struct hclge_priv_buf *priv; int i; + aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); rx_all -= hclge_get_tx_buff_alloced(buf_alloc); /* When DCB is not supported, rx private @@ -1680,13 +1682,13 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev, if (hdev->hw_tc_map & BIT(i)) { priv->enable = 1; if (hdev->tm_info.hw_pfc_map & BIT(i)) { - priv->wl.low = hdev->mps; - priv->wl.high = priv->wl.low + hdev->mps; + priv->wl.low = aligned_mps; + priv->wl.high = priv->wl.low + aligned_mps; priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV; } else { priv->wl.low = 0; - priv->wl.high = 2 * hdev->mps; + priv->wl.high = 2 * aligned_mps; priv->buf_size = priv->wl.high; } } else { @@ -1718,11 +1720,11 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev, if (hdev->tm_info.hw_pfc_map & BIT(i)) { priv->wl.low = 128; - priv->wl.high = priv->wl.low + hdev->mps; + priv->wl.high = priv->wl.low + aligned_mps; priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV; } else { priv->wl.low = 0; - priv->wl.high = hdev->mps; + priv->wl.high = aligned_mps; priv->buf_size = priv->wl.high; } } @@ -2360,6 +2362,9 @@ static int hclge_get_mac_phy_link(struct hclge_dev *hdev) int mac_state; int link_stat; + if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) + return 0; + mac_state = hclge_get_mac_link_status(hdev); if (hdev->hw.mac.phydev) { @@ -2799,14 +2804,17 @@ static void hclge_reset(struct hclge_dev *hdev) handle = &hdev->vport[0].nic; rtnl_lock(); hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + rtnl_unlock(); if (!hclge_reset_wait(hdev)) { + rtnl_lock(); hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); hclge_reset_ae_dev(hdev->ae_dev); hclge_notify_client(hdev, HNAE3_INIT_CLIENT); hclge_clear_reset_cause(hdev); } else { + rtnl_lock(); /* schedule again to check pending resets later */ set_bit(hdev->reset_type, &hdev->reset_pending); hclge_reset_task_schedule(hdev); @@ -3809,6 +3817,8 @@ static void hclge_ae_stop(struct hnae3_handle *handle) struct hclge_dev *hdev = vport->back; int i; + set_bit(HCLGE_STATE_DOWN, &hdev->state); + del_timer_sync(&hdev->service_timer); cancel_work_sync(&hdev->service_task); clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); @@ -4686,9 +4696,17 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, "Add vf vlan filter fail, ret =%d.\n", req0->resp_code); } else { +#define HCLGE_VF_VLAN_DEL_NO_FOUND 1 if (!req0->resp_code) return 0; + if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) { + dev_warn(&hdev->pdev->dev, + "vlan %d filter is not in vf vlan table\n", + vlan); + return 0; + } + dev_err(&hdev->pdev->dev, "Kill vf vlan filter fail, ret =%d.\n", req0->resp_code); @@ -4732,6 +4750,9 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, u16 vport_idx, vport_num = 0; int ret; + if (is_kill && !vlan_id) + return 0; + ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id, 0, proto); if (ret) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index f34851c91eb39..e08e82020402a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -458,6 +458,12 @@ void hclge_mbx_handler(struct hclge_dev *hdev) /* handle all the mailbox requests in the queue */ while (!hclge_cmd_crq_empty(&hdev->hw)) { + if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) { + dev_warn(&hdev->pdev->dev, + "command queue needs re-initializing\n"); + return; + } + desc = &crq->desc[crq->next_to_use]; req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 5db70a1451c58..48235dc2dd56f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -1167,14 +1167,14 @@ static int hclge_pfc_setup_hw(struct hclge_dev *hdev) */ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) { - struct hclge_vport *vport = hdev->vport; - u32 i, k, qs_bitmap; - int ret; + int i; for (i = 0; i < HCLGE_BP_GRP_NUM; i++) { - qs_bitmap = 0; + u32 qs_bitmap = 0; + int k, ret; for (k = 0; k < hdev->num_alloc_vport; k++) { + struct hclge_vport *vport = &hdev->vport[k]; u16 qs_id = vport->qs_offset + tc; u8 grp, sub_grp; @@ -1184,8 +1184,6 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) HCLGE_BP_SUB_GRP_ID_S); if (i == grp) qs_bitmap |= (1 << sub_grp); - - vport++; } ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 9c0091f2addfc..5570fb5dc2eb4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -299,6 +299,9 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) client = handle->client; + link_state = + test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; + if (link_state != hdev->hw.mac.link) { client->ops->link_status_change(handle, !!link_state); hdev->hw.mac.link = link_state; @@ -1062,6 +1065,8 @@ static int hclgevf_reset(struct hclgevf_dev *hdev) /* bring down the nic to stop any ongoing TX/RX */ hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); + rtnl_unlock(); + /* check if VF could successfully fetch the hardware reset completion * status from the hardware */ @@ -1073,12 +1078,15 @@ static int hclgevf_reset(struct hclgevf_dev *hdev) ret); dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n"); + rtnl_lock(); hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); rtnl_unlock(); return ret; } + rtnl_lock(); + /* now, re-initialize the nic client and ae device*/ ret = hclgevf_reset_stack(hdev); if (ret) @@ -1448,6 +1456,8 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle) struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); int i, queue_id; + set_bit(HCLGEVF_STATE_DOWN, &hdev->state); + for (i = 0; i < hdev->num_tqps; i++) { /* Ring disable */ queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]); diff --git a/drivers/net/ethernet/ibm/emac/emac.h b/drivers/net/ethernet/ibm/emac/emac.h index e2f80cca9bed4..0d2de6f676764 100644 --- a/drivers/net/ethernet/ibm/emac/emac.h +++ b/drivers/net/ethernet/ibm/emac/emac.h @@ -231,7 +231,7 @@ struct emac_regs { #define EMAC_STACR_PHYE 0x00004000 #define EMAC_STACR_STAC_MASK 0x00003000 #define EMAC_STACR_STAC_READ 0x00001000 -#define EMAC_STACR_STAC_WRITE 0x00000800 +#define EMAC_STACR_STAC_WRITE 0x00002000 #define EMAC_STACR_OPBC_MASK 0x00000C00 #define EMAC_STACR_OPBC_50 0x00000000 #define EMAC_STACR_OPBC_66 0x00000400 diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 525d8b89187b9..91f48c0780734 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1172,11 +1172,15 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, map_failed_frags: last = i+1; - for (i = 0; i < last; i++) + for (i = 1; i < last; i++) dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address, descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK, DMA_TO_DEVICE); + dma_unmap_single(&adapter->vdev->dev, + descs[0].fields.address, + descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK, + DMA_TO_DEVICE); map_failed: if (!firmware_has_feature(FW_FEATURE_CMO)) netdev_err(netdev, "tx: unable to map xmit buffer\n"); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 699ef942b615c..c8704b1690ebf 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -485,8 +485,8 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter) for (j = 0; j < rx_pool->size; j++) { if (rx_pool->rx_buff[j].skb) { - dev_kfree_skb_any(rx_pool->rx_buff[i].skb); - rx_pool->rx_buff[i].skb = NULL; + dev_kfree_skb_any(rx_pool->rx_buff[j].skb); + rx_pool->rx_buff[j].skb = NULL; } } @@ -1103,20 +1103,15 @@ static int ibmvnic_open(struct net_device *netdev) return 0; } - mutex_lock(&adapter->reset_lock); - if (adapter->state != VNIC_CLOSED) { rc = ibmvnic_login(netdev); - if (rc) { - mutex_unlock(&adapter->reset_lock); + if (rc) return rc; - } rc = init_resources(adapter); if (rc) { netdev_err(netdev, "failed to initialize resources\n"); release_resources(adapter); - mutex_unlock(&adapter->reset_lock); return rc; } } @@ -1124,8 +1119,6 @@ static int ibmvnic_open(struct net_device *netdev) rc = __ibmvnic_open(netdev); netif_carrier_on(netdev); - mutex_unlock(&adapter->reset_lock); - return rc; } @@ -1269,10 +1262,8 @@ static int ibmvnic_close(struct net_device *netdev) return 0; } - mutex_lock(&adapter->reset_lock); rc = __ibmvnic_close(netdev); ibmvnic_cleanup(netdev); - mutex_unlock(&adapter->reset_lock); return rc; } @@ -1545,7 +1536,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_crq.v1.sge_len = cpu_to_be32(skb->len); tx_crq.v1.ioba = cpu_to_be64(data_dma_addr); - if (adapter->vlan_header_insertion) { + if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) { tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT; tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci); } @@ -1746,6 +1737,7 @@ static int do_reset(struct ibmvnic_adapter *adapter, struct ibmvnic_rwi *rwi, u32 reset_state) { u64 old_num_rx_queues, old_num_tx_queues; + u64 old_num_rx_slots, old_num_tx_slots; struct net_device *netdev = adapter->netdev; int i, rc; @@ -1757,6 +1749,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, old_num_rx_queues = adapter->req_rx_queues; old_num_tx_queues = adapter->req_tx_queues; + old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq; + old_num_tx_slots = adapter->req_tx_entries_per_subcrq; ibmvnic_cleanup(netdev); @@ -1819,21 +1813,20 @@ static int do_reset(struct ibmvnic_adapter *adapter, if (rc) return rc; } else if (adapter->req_rx_queues != old_num_rx_queues || - adapter->req_tx_queues != old_num_tx_queues) { - adapter->map_id = 1; + adapter->req_tx_queues != old_num_tx_queues || + adapter->req_rx_add_entries_per_subcrq != + old_num_rx_slots || + adapter->req_tx_entries_per_subcrq != + old_num_tx_slots) { release_rx_pools(adapter); release_tx_pools(adapter); - rc = init_rx_pools(netdev); - if (rc) - return rc; - rc = init_tx_pools(netdev); - if (rc) - return rc; - release_napi(adapter); - rc = init_napi(adapter); + release_vpd_data(adapter); + + rc = init_resources(adapter); if (rc) return rc; + } else { rc = reset_tx_pools(adapter); if (rc) @@ -1917,17 +1910,8 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter, adapter->state = VNIC_PROBED; return 0; } - /* netif_set_real_num_xx_queues needs to take rtnl lock here - * unless wait_for_reset is set, in which case the rtnl lock - * has already been taken before initializing the reset - */ - if (!adapter->wait_for_reset) { - rtnl_lock(); - rc = init_resources(adapter); - rtnl_unlock(); - } else { - rc = init_resources(adapter); - } + + rc = init_resources(adapter); if (rc) return rc; @@ -1955,8 +1939,9 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter, static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter) { struct ibmvnic_rwi *rwi; + unsigned long flags; - mutex_lock(&adapter->rwi_lock); + spin_lock_irqsave(&adapter->rwi_lock, flags); if (!list_empty(&adapter->rwi_list)) { rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi, @@ -1966,7 +1951,7 @@ static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter) rwi = NULL; } - mutex_unlock(&adapter->rwi_lock); + spin_unlock_irqrestore(&adapter->rwi_lock, flags); return rwi; } @@ -1986,13 +1971,21 @@ static void __ibmvnic_reset(struct work_struct *work) struct ibmvnic_rwi *rwi; struct ibmvnic_adapter *adapter; struct net_device *netdev; + bool we_lock_rtnl = false; u32 reset_state; int rc = 0; adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); netdev = adapter->netdev; - mutex_lock(&adapter->reset_lock); + /* netif_set_real_num_xx_queues needs to take rtnl lock here + * unless wait_for_reset is set, in which case the rtnl lock + * has already been taken before initializing the reset + */ + if (!adapter->wait_for_reset) { + rtnl_lock(); + we_lock_rtnl = true; + } reset_state = adapter->state; rwi = get_next_rwi(adapter); @@ -2020,12 +2013,11 @@ static void __ibmvnic_reset(struct work_struct *work) if (rc) { netdev_dbg(adapter->netdev, "Reset failed\n"); free_all_rwi(adapter); - mutex_unlock(&adapter->reset_lock); - return; } adapter->resetting = false; - mutex_unlock(&adapter->reset_lock); + if (we_lock_rtnl) + rtnl_unlock(); } static int ibmvnic_reset(struct ibmvnic_adapter *adapter, @@ -2034,6 +2026,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, struct list_head *entry, *tmp_entry; struct ibmvnic_rwi *rwi, *tmp; struct net_device *netdev = adapter->netdev; + unsigned long flags; int ret; if (adapter->state == VNIC_REMOVING || @@ -2050,21 +2043,21 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, goto err; } - mutex_lock(&adapter->rwi_lock); + spin_lock_irqsave(&adapter->rwi_lock, flags); list_for_each(entry, &adapter->rwi_list) { tmp = list_entry(entry, struct ibmvnic_rwi, list); if (tmp->reset_reason == reason) { netdev_dbg(netdev, "Skipping matching reset\n"); - mutex_unlock(&adapter->rwi_lock); + spin_unlock_irqrestore(&adapter->rwi_lock, flags); ret = EBUSY; goto err; } } - rwi = kzalloc(sizeof(*rwi), GFP_KERNEL); + rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC); if (!rwi) { - mutex_unlock(&adapter->rwi_lock); + spin_unlock_irqrestore(&adapter->rwi_lock, flags); ibmvnic_close(netdev); ret = ENOMEM; goto err; @@ -2078,7 +2071,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, } rwi->reset_reason = reason; list_add_tail(&rwi->list, &adapter->rwi_list); - mutex_unlock(&adapter->rwi_lock); + spin_unlock_irqrestore(&adapter->rwi_lock, flags); adapter->resetting = true; netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason); schedule_work(&adapter->ibmvnic_reset); @@ -4709,8 +4702,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); INIT_LIST_HEAD(&adapter->rwi_list); - mutex_init(&adapter->reset_lock); - mutex_init(&adapter->rwi_lock); + spin_lock_init(&adapter->rwi_lock); adapter->resetting = false; adapter->mac_change_pending = false; @@ -4781,8 +4773,8 @@ static int ibmvnic_remove(struct vio_dev *dev) struct ibmvnic_adapter *adapter = netdev_priv(netdev); adapter->state = VNIC_REMOVING; - unregister_netdev(netdev); - mutex_lock(&adapter->reset_lock); + rtnl_lock(); + unregister_netdevice(netdev); release_resources(adapter); release_sub_crqs(adapter, 1); @@ -4793,7 +4785,7 @@ static int ibmvnic_remove(struct vio_dev *dev) adapter->state = VNIC_REMOVED; - mutex_unlock(&adapter->reset_lock); + rtnl_unlock(); device_remove_file(&dev->dev, &dev_attr_failover); free_netdev(netdev); dev_set_drvdata(&dev->dev, NULL); diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index f06eec145ca60..09465397b7ff4 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -1068,7 +1068,7 @@ struct ibmvnic_adapter { struct tasklet_struct tasklet; enum vnic_state state; enum ibmvnic_reset_reason reset_reason; - struct mutex reset_lock, rwi_lock; + spinlock_t rwi_lock; struct list_head rwi_list; struct work_struct ibmvnic_reset; bool resetting; diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index 37c76945ad9ba..e1f821edbc21c 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -173,10 +173,14 @@ static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, ptp_clock_info); unsigned long flags; - u64 ns; + u64 cycles, ns; spin_lock_irqsave(&adapter->systim_lock, flags); - ns = timecounter_read(&adapter->tc); + + /* Use timecounter_cyc2time() to allow non-monotonic SYSTIM readings */ + cycles = adapter->cc.read(&adapter->cc); + ns = timecounter_cyc2time(&adapter->tc, cycles); + spin_unlock_irqrestore(&adapter->systim_lock, flags); *ts = ns_to_timespec64(ns); @@ -232,9 +236,12 @@ static void e1000e_systim_overflow_work(struct work_struct *work) systim_overflow_work.work); struct e1000_hw *hw = &adapter->hw; struct timespec64 ts; + u64 ns; - adapter->ptp_clock_info.gettime64(&adapter->ptp_clock_info, &ts); + /* Update the timecounter */ + ns = timecounter_read(&adapter->tc); + ts = ns_to_timespec64(ns); e_dbg("SYSTIM overflow check at %lld.%09lu\n", (long long) ts.tv_sec, ts.tv_nsec); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index ac685ad4d8773..ed9d3fc4aaba9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1409,7 +1409,7 @@ void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f) } vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; - set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->state); + set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); } /** @@ -1539,17 +1539,17 @@ static int i40e_set_mac(struct net_device *netdev, void *p) netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); /* Copy the address first, so that we avoid a possible race with - * .set_rx_mode(). If we copy after changing the address in the filter - * list, we might open ourselves to a narrow race window where - * .set_rx_mode could delete our dev_addr filter and prevent traffic - * from passing. + * .set_rx_mode(). + * - Remove old address from MAC filter + * - Copy new address + * - Add new address to MAC filter */ - ether_addr_copy(netdev->dev_addr, addr->sa_data); - spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_del_mac_filter(vsi, netdev->dev_addr); - i40e_add_mac_filter(vsi, addr->sa_data); + ether_addr_copy(netdev->dev_addr, addr->sa_data); + i40e_add_mac_filter(vsi, netdev->dev_addr); spin_unlock_bh(&vsi->mac_filter_hash_lock); + if (vsi->type == I40E_VSI_MAIN) { i40e_status ret; @@ -11926,6 +11926,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_PARTIAL | + NETIF_F_GSO_IPXIP4 | + NETIF_F_GSO_IPXIP6 | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 868f4a1d0f724..67591722c625e 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -39,9 +39,9 @@ extern const char ice_drv_ver[]; #define ICE_BAR0 0 #define ICE_DFLT_NUM_DESC 128 -#define ICE_MIN_NUM_DESC 8 -#define ICE_MAX_NUM_DESC 8160 #define ICE_REQ_DESC_MULTIPLE 32 +#define ICE_MIN_NUM_DESC ICE_REQ_DESC_MULTIPLE +#define ICE_MAX_NUM_DESC 8160 #define ICE_DFLT_TRAFFIC_CLASS BIT(0) #define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16) #define ICE_ETHTOOL_FWVER_LEN 32 diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index 62be72fdc8f30..e783976c401d8 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -518,22 +518,31 @@ ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) /** * ice_aq_ver_check - Check the reported AQ API version. - * @fw_branch: The "branch" of FW, typically describes the device type - * @fw_major: The major version of the FW API - * @fw_minor: The minor version increment of the FW API + * @hw: pointer to the hardware structure * * Checks if the driver should load on a given AQ API version. * * Return: 'true' iff the driver should attempt to load. 'false' otherwise. */ -static bool ice_aq_ver_check(u8 fw_branch, u8 fw_major, u8 fw_minor) +static bool ice_aq_ver_check(struct ice_hw *hw) { - if (fw_branch != EXP_FW_API_VER_BRANCH) - return false; - if (fw_major != EXP_FW_API_VER_MAJOR) - return false; - if (fw_minor != EXP_FW_API_VER_MINOR) + if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) { + /* Major API version is newer than expected, don't load */ + dev_warn(ice_hw_to_dev(hw), + "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); return false; + } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) { + if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2)) + dev_info(ice_hw_to_dev(hw), + "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); + else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR) + dev_info(ice_hw_to_dev(hw), + "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); + } else { + /* Major API version is older than expected, log a warning */ + dev_info(ice_hw_to_dev(hw), + "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); + } return true; } @@ -588,8 +597,7 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw) if (status) goto init_ctrlq_free_rq; - if (!ice_aq_ver_check(hw->api_branch, hw->api_maj_ver, - hw->api_min_ver)) { + if (!ice_aq_ver_check(hw)) { status = ICE_ERR_FW_API_VER; goto init_ctrlq_free_rq; } diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index c71a9b528d6d5..4c5c87b158f55 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -478,9 +478,11 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) ring->tx_max_pending = ICE_MAX_NUM_DESC; ring->rx_pending = vsi->rx_rings[0]->count; ring->tx_pending = vsi->tx_rings[0]->count; - ring->rx_mini_pending = ICE_MIN_NUM_DESC; + + /* Rx mini and jumbo rings are not supported */ ring->rx_mini_max_pending = 0; ring->rx_jumbo_max_pending = 0; + ring->rx_mini_pending = 0; ring->rx_jumbo_pending = 0; } @@ -498,14 +500,23 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) ring->tx_pending < ICE_MIN_NUM_DESC || ring->rx_pending > ICE_MAX_NUM_DESC || ring->rx_pending < ICE_MIN_NUM_DESC) { - netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n", + netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n", ring->tx_pending, ring->rx_pending, - ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC); + ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC, + ICE_REQ_DESC_MULTIPLE); return -EINVAL; } new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE); + if (new_tx_cnt != ring->tx_pending) + netdev_info(netdev, + "Requested Tx descriptor count rounded up to %d\n", + new_tx_cnt); new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE); + if (new_rx_cnt != ring->rx_pending) + netdev_info(netdev, + "Requested Rx descriptor count rounded up to %d\n", + new_rx_cnt); /* if nothing to do return success */ if (new_tx_cnt == vsi->tx_rings[0]->count && @@ -786,10 +797,15 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) } if (!test_bit(__ICE_DOWN, pf->state)) { - /* Give it a little more time to try to come back */ + /* Give it a little more time to try to come back. If still + * down, restart autoneg link or reinitialize the interface. + */ msleep(75); if (!test_bit(__ICE_DOWN, pf->state)) return ice_nway_reset(netdev); + + ice_down(vsi); + ice_up(vsi); } return err; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 6481e3d863749..0c95c8f83432c 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1519,7 +1519,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) /* update gso_segs and bytecount */ first->gso_segs = skb_shinfo(skb)->gso_segs; - first->bytecount = (first->gso_segs - 1) * off->header_len; + first->bytecount += (first->gso_segs - 1) * off->header_len; cd_tso_len = skb->len - off->header_len; cd_mss = skb_shinfo(skb)->gso_size; diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 97c366e0ca596..ba11b58988331 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -83,12 +83,12 @@ struct ice_link_status { u64 phy_type_low; u16 max_frame_size; u16 link_speed; + u16 req_speeds; u8 lse_ena; /* Link Status Event notification */ u8 link_info; u8 an_info; u8 ext_info; u8 pacing; - u8 req_speeds; /* Refer to #define from module_type[ICE_MODULE_TYPE_TOTAL_BYTE] of * ice_aqc_get_phy_caps structure */ diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c index c54ebedca6da9..c393cb2c0f168 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.c +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c @@ -842,6 +842,7 @@ s32 igb_pll_workaround_i210(struct e1000_hw *hw) nvm_word = E1000_INVM_DEFAULT_AL; tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, E1000_PHY_PLL_FREQ_PAGE); + phy_word = E1000_PHY_PLL_UNCONF; for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { /* check current state directly from internal PHY */ igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, &phy_word); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index da4322e4daed5..b27f7a968820d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -4,6 +4,7 @@ #include "ixgbe.h" #include #include +#include /** * ixgbe_ipsec_set_tx_sa - set the Tx SA registers @@ -676,6 +677,10 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) } else { struct tx_sa tsa; + if (adapter->num_vfs && + adapter->bridge_mode != BRIDGE_MODE_VEPA) + return -EOPNOTSUPP; + /* find the first unused index */ ret = ixgbe_ipsec_find_empty_idx(ipsec, false); if (ret < 0) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 3c6f01c41b788..f6ffd9fb20793 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -699,7 +699,6 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) u8 num_tcs = adapter->hw_tcs; u32 reg_val; u32 queue; - u32 word; /* remove VLAN filters beloning to this VF */ ixgbe_clear_vf_vlans(adapter, vf); @@ -721,8 +720,10 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, adapter->default_up, vf); - if (vfinfo->spoofchk_enabled) + if (vfinfo->spoofchk_enabled) { hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + hw->mac.ops.set_mac_anti_spoofing(hw, true, vf); + } } /* reset multicast table array for vf */ @@ -752,6 +753,14 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) } } + IXGBE_WRITE_FLUSH(hw); +} + +static void ixgbe_vf_clear_mbx(struct ixgbe_adapter *adapter, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 word; + /* Clear VF's mailbox memory */ for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++) IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0); @@ -825,6 +834,8 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) /* reset the filters for the device */ ixgbe_vf_reset_event(adapter, vf); + ixgbe_vf_clear_mbx(adapter, vf); + /* set vf mac address */ if (!is_zero_ether_addr(vf_mac)) ixgbe_set_vf_mac(adapter, vf, vf_mac); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index a8148c7126e51..9772016222c30 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -2248,7 +2248,9 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, *autoneg = false; if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) { *speed = IXGBE_LINK_SPEED_1GB_FULL; return 0; } diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 5a228582423b7..4093a9c52c182 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -3849,6 +3849,10 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, skb_checksum_help(skb); goto no_csum; } + + if (first->protocol == htons(ETH_P_IP)) + type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + /* update TX checksum flag */ first->tx_flags |= IXGBE_TX_FLAGS_CSUM; vlan_macip_lens = skb_checksum_start_offset(skb) - diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index b4ed7d394d079..a78a39244b79a 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -406,7 +406,6 @@ struct mvneta_port { struct mvneta_pcpu_stats __percpu *stats; int pkt_size; - unsigned int frag_size; void __iomem *base; struct mvneta_rx_queue *rxqs; struct mvneta_tx_queue *txqs; @@ -2905,7 +2904,9 @@ static void mvneta_rxq_hw_init(struct mvneta_port *pp, if (!pp->bm_priv) { /* Set Offset */ mvneta_rxq_offset_set(pp, rxq, 0); - mvneta_rxq_buf_size_set(pp, rxq, pp->frag_size); + mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ? + PAGE_SIZE : + MVNETA_RX_BUF_SIZE(pp->pkt_size)); mvneta_rxq_bm_disable(pp, rxq); mvneta_rxq_fill(pp, rxq, rxq->size); } else { @@ -3749,7 +3750,6 @@ static int mvneta_open(struct net_device *dev) int ret; pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); - pp->frag_size = PAGE_SIZE; ret = mvneta_setup_rxqs(pp); if (ret) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index a74002b43b518..f8e4808a8317d 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -4262,8 +4262,27 @@ static void mvpp2_phylink_validate(struct net_device *dev, unsigned long *supported, struct phylink_link_state *state) { + struct mvpp2_port *port = netdev_priv(dev); __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + /* Invalid combinations */ + switch (state->interface) { + case PHY_INTERFACE_MODE_10GKR: + case PHY_INTERFACE_MODE_XAUI: + if (port->gop_id != 0) + goto empty_set; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + if (port->gop_id == 0) + goto empty_set; + break; + default: + break; + } + phylink_set(mask, Autoneg); phylink_set_port_modes(mask); phylink_set(mask, Pause); @@ -4271,30 +4290,45 @@ static void mvpp2_phylink_validate(struct net_device *dev, switch (state->interface) { case PHY_INTERFACE_MODE_10GKR: - phylink_set(mask, 10000baseCR_Full); - phylink_set(mask, 10000baseSR_Full); - phylink_set(mask, 10000baseLR_Full); - phylink_set(mask, 10000baseLRM_Full); - phylink_set(mask, 10000baseER_Full); - phylink_set(mask, 10000baseKR_Full); + case PHY_INTERFACE_MODE_XAUI: + case PHY_INTERFACE_MODE_NA: + if (port->gop_id == 0) { + phylink_set(mask, 10000baseT_Full); + phylink_set(mask, 10000baseCR_Full); + phylink_set(mask, 10000baseSR_Full); + phylink_set(mask, 10000baseLR_Full); + phylink_set(mask, 10000baseLRM_Full); + phylink_set(mask, 10000baseER_Full); + phylink_set(mask, 10000baseKR_Full); + } /* Fall-through */ - default: + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + case PHY_INTERFACE_MODE_SGMII: phylink_set(mask, 10baseT_Half); phylink_set(mask, 10baseT_Full); phylink_set(mask, 100baseT_Half); phylink_set(mask, 100baseT_Full); - phylink_set(mask, 10000baseT_Full); /* Fall-through */ case PHY_INTERFACE_MODE_1000BASEX: case PHY_INTERFACE_MODE_2500BASEX: phylink_set(mask, 1000baseT_Full); phylink_set(mask, 1000baseX_Full); phylink_set(mask, 2500baseX_Full); + break; + default: + goto empty_set; } bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); bitmap_and(state->advertising, state->advertising, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); + return; + +empty_set: + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); } static void mvpp22_xlg_link_state(struct mvpp2_port *port, diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig index 36054e6fb9d34..f200b8c420d57 100644 --- a/drivers/net/ethernet/mellanox/mlx4/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig @@ -5,7 +5,7 @@ config MLX4_EN tristate "Mellanox Technologies 1/10/40Gbit Ethernet support" depends on MAY_USE_DEVLINK - depends on PCI + depends on PCI && NETDEVICES && ETHERNET && INET select MLX4_CORE imply PTP_1588_CLOCK ---help--- diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c index 4bdf250595427..21788d4f98814 100644 --- a/drivers/net/ethernet/mellanox/mlx4/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c @@ -337,7 +337,7 @@ void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc) static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count, int align, u32 skip_mask, u32 *puid) { - u32 uid; + u32 uid = 0; u32 res; struct mlx4_zone_allocator *zone_alloc = zone->allocator; struct mlx4_zone_entry *curr_node; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index f11b45001cad8..d290f0787dfbb 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -1084,8 +1084,8 @@ static int mlx4_en_set_pauseparam(struct net_device *dev, tx_pause = !!(pause->tx_pause); rx_pause = !!(pause->rx_pause); - rx_ppp = priv->prof->rx_ppp && !(tx_pause || rx_pause); - tx_ppp = priv->prof->tx_ppp && !(tx_pause || rx_pause); + rx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->rx_ppp; + tx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->tx_ppp; err = mlx4_SET_PORT_general(mdev->dev, priv->port, priv->rx_skb_size + ETH_FCS_LEN, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index fe49384eba48c..0d7fd3f043cf0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -3494,8 +3494,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; } - /* MTU range: 46 - hw-specific max */ - dev->min_mtu = MLX4_EN_MIN_MTU; + /* MTU range: 68 - hw-specific max */ + dev->min_mtu = ETH_MIN_MTU; dev->max_mtu = priv->max_mtu; mdev->pndev[port] = dev; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index ebcd2778eeb3e..23f1b5b512c21 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -540,8 +540,8 @@ struct slave_list { struct resource_allocator { spinlock_t alloc_lock; /* protect quotas */ union { - int res_reserved; - int res_port_rsvd[MLX4_MAX_PORTS]; + unsigned int res_reserved; + unsigned int res_port_rsvd[MLX4_MAX_PORTS]; }; union { int res_free; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index c3228b89df463..240f9c9ca943d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -161,7 +161,6 @@ #define MLX4_SELFTEST_LB_MIN_MTU (MLX4_LOOPBACK_TEST_PAYLOAD + NET_IP_ALIGN + \ ETH_HLEN + PREAMBLE_LEN) -#define MLX4_EN_MIN_MTU 46 /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple * headers. (For example: ETH_P_8021Q and ETH_P_8021AD). */ diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 2e84f10f59ba9..1a11bc0e16123 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -363,6 +363,7 @@ int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, container_of((void *)mpt_entry, struct mlx4_cmd_mailbox, buf); + (*mpt_entry)->lkey = 0; err = mlx4_SW2HW_MPT(dev, mailbox, key); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 0f189f8738592..16ceeb1b2c9d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -566,6 +566,7 @@ struct mlx5e_rq { unsigned long state; int ix; + unsigned int hw_mtu; struct net_dim dim; /* Dynamic Interrupt Moderation */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index 24e3b564964ff..12e1682f940b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c @@ -88,10 +88,8 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); *speed = mlx5e_port_ptys2speed(eth_proto_oper); - if (!(*speed)) { - mlx5_core_warn(mdev, "cannot get port speed\n"); + if (!(*speed)) err = -EINVAL; - } return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c index c047da8752daa..eac245a93f918 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c @@ -130,8 +130,10 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) int err; err = mlx5e_port_linkspeed(priv->mdev, &speed); - if (err) + if (err) { + mlx5_core_warn(priv->mdev, "cannot get port speed\n"); return 0; + } xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 98dd3e0ada72b..5e5423076b03e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1101,11 +1101,6 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, struct ethtool_ts_info *info) { struct mlx5_core_dev *mdev = priv->mdev; - int ret; - - ret = ethtool_op_get_ts_info(priv->netdev, info); - if (ret) - return ret; info->phc_index = mlx5_clock_get_ptp_index(mdev); @@ -1113,9 +1108,9 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, info->phc_index == -1) return 0; - info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | - SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_RAW_HARDWARE; + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index f291d1bf15586..7365899c3ac91 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -128,6 +128,8 @@ static bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev, return !params->lro_en && frag_sz <= PAGE_SIZE; } +#define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \ + MLX5_MPWQE_LOG_STRIDE_SZ_BASE) static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { @@ -138,6 +140,9 @@ static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, if (!mlx5e_rx_is_linear_skb(mdev, params)) return false; + if (order_base_2(frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ) + return false; + if (MLX5_CAP_GEN(mdev, ext_stride_num_range)) return true; @@ -492,6 +497,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq->channel = c; rq->ix = c->ix; rq->mdev = mdev; + rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); rq->stats = &c->priv->channel_stats[c->ix].rq; rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL; @@ -1382,6 +1388,7 @@ static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq) struct mlx5_core_dev *mdev = c->mdev; struct mlx5_rate_limit rl = {0}; + cancel_work_sync(&sq->dim.work); mlx5e_destroy_sq(mdev, sq->sqn); if (sq->rate_limit) { rl.rate = sq->rate_limit; @@ -1610,13 +1617,15 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev, int err; u32 i; + err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn); + if (err) + return err; + err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, &cq->wq_ctrl); if (err) return err; - mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn); - mcq->cqe_sz = 64; mcq->set_ci_db = cq->wq_ctrl.db.db; mcq->arm_db = cq->wq_ctrl.db.db + 1; @@ -1674,6 +1683,10 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) int eqn; int err; + err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used); + if (err) + return err; + inlen = MLX5_ST_SZ_BYTES(create_cq_in) + sizeof(u64) * cq->wq_ctrl.buf.npages; in = kvzalloc(inlen, GFP_KERNEL); @@ -1687,8 +1700,6 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); - mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used); - MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); MLX5_SET(cqc, cqc, c_eqn, eqn); MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); @@ -1908,6 +1919,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, int err; int eqn; + err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq); + if (err) + return err; + c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); if (!c) return -ENOMEM; @@ -1924,7 +1939,6 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->xdp = !!params->xdp_prog; c->stats = &priv->channel_stats[ix].ch; - mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq); c->irq_desc = irq_to_desc(irq); netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); @@ -3566,6 +3580,7 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable) return 0; } +#ifdef CONFIG_MLX5_ESWITCH static int set_feature_tc_num_filters(struct net_device *netdev, bool enable) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -3578,6 +3593,7 @@ static int set_feature_tc_num_filters(struct net_device *netdev, bool enable) return 0; } +#endif static int set_feature_rx_all(struct net_device *netdev, bool enable) { @@ -3676,7 +3692,9 @@ static int mlx5e_set_features(struct net_device *netdev, err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro); err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER, set_feature_cvlan_filter); +#ifdef CONFIG_MLX5_ESWITCH err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters); +#endif err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all); err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs); err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan); @@ -3747,10 +3765,11 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, } if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { + bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, &new_channels.params); u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params); u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params); - reset = reset && (ppw_old != ppw_new); + reset = reset && (is_linear || (ppw_old != ppw_new)); } if (!reset) { @@ -4685,7 +4704,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) FT_CAP(modify_root) && FT_CAP(identified_miss_table_mode) && FT_CAP(flow_table_modify)) { +#ifdef CONFIG_MLX5_ESWITCH netdev->hw_features |= NETIF_F_HW_TC; +#endif #ifdef CONFIG_MLX5_EN_ARFS netdev->hw_features |= NETIF_F_NTUPLE; #endif @@ -4958,11 +4979,21 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; const struct mlx5e_profile *profile; + int max_nch; int err; profile = priv->profile; clear_bit(MLX5E_STATE_DESTROYING, &priv->state); + /* max number of channels may have changed */ + max_nch = mlx5e_get_max_num_channels(priv->mdev); + if (priv->channels.params.num_channels > max_nch) { + mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch); + priv->channels.params.num_channels = max_nch; + mlx5e_build_default_indir_rqt(priv->channels.params.indirection_rqt, + MLX5E_INDIR_RQT_SIZE, max_nch); + } + err = profile->init_tx(priv); if (err) goto out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 00172dee5339c..8262f093fec4a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -696,43 +696,15 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth) return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6)); } -static __be32 mlx5e_get_fcs(struct sk_buff *skb) +static u32 mlx5e_get_fcs(const struct sk_buff *skb) { - int last_frag_sz, bytes_in_prev, nr_frags; - u8 *fcs_p1, *fcs_p2; - skb_frag_t *last_frag; - __be32 fcs_bytes; + const void *fcs_bytes; + u32 _fcs_bytes; - if (!skb_is_nonlinear(skb)) - return *(__be32 *)(skb->data + skb->len - ETH_FCS_LEN); + fcs_bytes = skb_header_pointer(skb, skb->len - ETH_FCS_LEN, + ETH_FCS_LEN, &_fcs_bytes); - nr_frags = skb_shinfo(skb)->nr_frags; - last_frag = &skb_shinfo(skb)->frags[nr_frags - 1]; - last_frag_sz = skb_frag_size(last_frag); - - /* If all FCS data is in last frag */ - if (last_frag_sz >= ETH_FCS_LEN) - return *(__be32 *)(skb_frag_address(last_frag) + - last_frag_sz - ETH_FCS_LEN); - - fcs_p2 = (u8 *)skb_frag_address(last_frag); - bytes_in_prev = ETH_FCS_LEN - last_frag_sz; - - /* Find where the other part of the FCS is - Linear or another frag */ - if (nr_frags == 1) { - fcs_p1 = skb_tail_pointer(skb); - } else { - skb_frag_t *prev_frag = &skb_shinfo(skb)->frags[nr_frags - 2]; - - fcs_p1 = skb_frag_address(prev_frag) + - skb_frag_size(prev_frag); - } - fcs_p1 -= bytes_in_prev; - - memcpy(&fcs_bytes, fcs_p1, bytes_in_prev); - memcpy(((u8 *)&fcs_bytes) + bytes_in_prev, fcs_p2, last_frag_sz); - - return fcs_bytes; + return __get_unaligned_cpu32(fcs_bytes); } static inline void mlx5e_handle_csum(struct net_device *netdev, @@ -765,8 +737,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, network_depth - ETH_HLEN, skb->csum); if (unlikely(netdev->features & NETIF_F_RXFCS)) - skb->csum = csum_add(skb->csum, - (__force __wsum)mlx5e_get_fcs(skb)); + skb->csum = csum_block_add(skb->csum, + (__force __wsum)mlx5e_get_fcs(skb), + skb->len - ETH_FCS_LEN); stats->csum_complete++; return; } @@ -1091,6 +1064,12 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, u32 frag_size; bool consumed; + /* Check packet size. Note LRO doesn't use linear SKB */ + if (unlikely(cqe_bcnt > rq->hw_mtu)) { + rq->stats->oversize_pkts_sw_drop++; + return NULL; + } + va = page_address(di->page) + head_offset; data = va + rx_headroom; frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32); @@ -1171,7 +1150,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) { struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); - struct mlx5e_xdpsq *xdpsq; + struct mlx5e_xdpsq *xdpsq = &rq->xdpsq; struct mlx5_cqe64 *cqe; int work_done = 0; @@ -1182,10 +1161,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget); cqe = mlx5_cqwq_get_cqe(&cq->wq); - if (!cqe) + if (!cqe) { + if (unlikely(work_done)) + goto out; return 0; - - xdpsq = &rq->xdpsq; + } do { if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) { @@ -1200,6 +1180,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) rq->handle_rx_cqe(rq, cqe); } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); +out: if (xdpsq->doorbell) { mlx5e_xmit_xdp_doorbell(xdpsq); xdpsq->doorbell = false; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 35ded91203f52..4382ef85488c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -98,18 +98,17 @@ static int mlx5e_test_link_speed(struct mlx5e_priv *priv) return 1; } -#ifdef CONFIG_INET -/* loopback test */ -#define MLX5E_TEST_PKT_SIZE (MLX5E_RX_MAX_HEAD - NET_IP_ALIGN) -static const char mlx5e_test_text[ETH_GSTRING_LEN] = "MLX5E SELF TEST"; -#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL - struct mlx5ehdr { __be32 version; __be64 magic; - char text[ETH_GSTRING_LEN]; }; +#ifdef CONFIG_INET +/* loopback test */ +#define MLX5E_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) +\ + sizeof(struct udphdr) + sizeof(struct mlx5ehdr)) +#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL + static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) { struct sk_buff *skb = NULL; @@ -117,10 +116,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) struct ethhdr *ethh; struct udphdr *udph; struct iphdr *iph; - int datalen, iplen; - - datalen = MLX5E_TEST_PKT_SIZE - - (sizeof(*ethh) + sizeof(*iph) + sizeof(*udph)); + int iplen; skb = netdev_alloc_skb(priv->netdev, MLX5E_TEST_PKT_SIZE); if (!skb) { @@ -149,7 +145,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) /* Fill UDP header */ udph->source = htons(9); udph->dest = htons(9); /* Discard Protocol */ - udph->len = htons(datalen + sizeof(struct udphdr)); + udph->len = htons(sizeof(struct mlx5ehdr) + sizeof(struct udphdr)); udph->check = 0; /* Fill IP header */ @@ -157,7 +153,8 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) iph->ttl = 32; iph->version = 4; iph->protocol = IPPROTO_UDP; - iplen = sizeof(struct iphdr) + sizeof(struct udphdr) + datalen; + iplen = sizeof(struct iphdr) + sizeof(struct udphdr) + + sizeof(struct mlx5ehdr); iph->tot_len = htons(iplen); iph->frag_off = 0; iph->saddr = 0; @@ -170,9 +167,6 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) mlxh = skb_put(skb, sizeof(*mlxh)); mlxh->version = 0; mlxh->magic = cpu_to_be64(MLX5E_TEST_MAGIC); - strlcpy(mlxh->text, mlx5e_test_text, sizeof(mlxh->text)); - datalen -= sizeof(*mlxh); - skb_put_zero(skb, datalen); skb->csum = 0; skb->ip_summed = CHECKSUM_PARTIAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 6839481f76974..7047cc293545c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -73,7 +73,6 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) }, @@ -82,6 +81,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) }, @@ -158,6 +158,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_wqe_err += rq_stats->wqe_err; s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes; s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides; + s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop; s->rx_buff_alloc_err += rq_stats->buff_alloc_err; s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks; s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts; @@ -192,7 +193,6 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->tx_nop += sq_stats->nop; s->tx_queue_stopped += sq_stats->stopped; s->tx_queue_wake += sq_stats->wake; - s->tx_udp_seg_rem += sq_stats->udp_seg_rem; s->tx_queue_dropped += sq_stats->dropped; s->tx_cqe_err += sq_stats->cqe_err; s->tx_recover += sq_stats->recover; @@ -1148,6 +1148,7 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index a4c035aedd46c..0ad7a165443a2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -86,7 +86,6 @@ struct mlx5e_sw_stats { u64 tx_recover; u64 tx_cqes; u64 tx_queue_wake; - u64 tx_udp_seg_rem; u64 tx_cqe_err; u64 tx_xdp_xmit; u64 tx_xdp_full; @@ -95,6 +94,7 @@ struct mlx5e_sw_stats { u64 rx_wqe_err; u64 rx_mpwqe_filler_cqes; u64 rx_mpwqe_filler_strides; + u64 rx_oversize_pkts_sw_drop; u64 rx_buff_alloc_err; u64 rx_cqe_compress_blks; u64 rx_cqe_compress_pkts; @@ -190,6 +190,7 @@ struct mlx5e_rq_stats { u64 wqe_err; u64 mpwqe_filler_cqes; u64 mpwqe_filler_strides; + u64 oversize_pkts_sw_drop; u64 buff_alloc_err; u64 cqe_compress_blks; u64 cqe_compress_pkts; @@ -215,7 +216,6 @@ struct mlx5e_sq_stats { u64 csum_partial_inner; u64 added_vlan_packets; u64 nop; - u64 udp_seg_rem; #ifdef CONFIG_MLX5_EN_TLS u64 tls_ooo; u64 tls_resync_bytes; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 85796727093ee..3092c59c0dc71 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1310,31 +1310,21 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, inner_headers); } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { - struct flow_dissector_key_eth_addrs *key = + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, + FLOW_DISSECTOR_KEY_BASIC, f->key); - struct flow_dissector_key_eth_addrs *mask = + struct flow_dissector_key_basic *mask = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, + FLOW_DISSECTOR_KEY_BASIC, f->mask); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, + ntohs(mask->n_proto)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, + ntohs(key->n_proto)); - ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, - dmac_47_16), - mask->dst); - ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, - dmac_47_16), - key->dst); - - ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, - smac_47_16), - mask->src); - ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, - smac_47_16), - key->src); - - if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst)) + if (mask->n_proto) *match_level = MLX5_MATCH_L2; } @@ -1368,9 +1358,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, *match_level = MLX5_MATCH_L2; } - } else { + } else if (*match_level != MLX5_MATCH_NONE) { MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1); + *match_level = MLX5_MATCH_L2; } if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) { @@ -1408,21 +1399,31 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, } } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *key = + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_dissector_key_eth_addrs *key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, + FLOW_DISSECTOR_KEY_ETH_ADDRS, f->key); - struct flow_dissector_key_basic *mask = + struct flow_dissector_key_eth_addrs *mask = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, + FLOW_DISSECTOR_KEY_ETH_ADDRS, f->mask); - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, - ntohs(mask->n_proto)); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, - ntohs(key->n_proto)); - if (mask->n_proto) + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + dmac_47_16), + mask->dst); + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + dmac_47_16), + key->dst); + + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + smac_47_16), + mask->src); + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + smac_47_16), + key->src); + + if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst)) *match_level = MLX5_MATCH_L2; } @@ -1449,10 +1450,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, /* the HW doesn't need L3 inline to match on frag=no */ if (!(key->flags & FLOW_DIS_IS_FRAGMENT)) - *match_level = MLX5_INLINE_MODE_L2; + *match_level = MLX5_MATCH_L2; /* *** L2 attributes parsing up to here *** */ else - *match_level = MLX5_INLINE_MODE_IP; + *match_level = MLX5_MATCH_L3; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index b8ee9101c5066..b5a8769a5bfda 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -83,8 +83,14 @@ struct mlx5_fpga_ipsec_rule { }; static const struct rhashtable_params rhash_sa = { - .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa), - .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa), + /* Keep out "cmd" field from the key as it's + * value is not constant during the lifetime + * of the key object. + */ + .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) - + FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd), + .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) + + FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd), .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash), .automatic_shrinking = true, .min_size = 1, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 37d114c668b7b..d181645fd968e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -432,7 +432,7 @@ static void del_sw_hw_rule(struct fs_node *node) if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && --fte->dests_size) { - modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST), + modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST); update_fte = true; } out: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index e3797a44e0743..5b7fe82641447 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -502,9 +502,9 @@ static int mlx5i_close(struct net_device *netdev) netif_carrier_off(epriv->netdev); mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn); - mlx5i_uninit_underlay_qp(epriv); mlx5e_deactivate_priv_channels(epriv); mlx5e_close_channels(&epriv->channels); + mlx5i_uninit_underlay_qp(epriv); unlock: mutex_unlock(&epriv->state_lock); return 0; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 937d0ace699a7..f7154f358f276 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -81,6 +81,7 @@ struct mlxsw_core { struct mlxsw_core_port *ports; unsigned int max_ports; bool reload_fail; + bool fw_flash_in_progress; unsigned long driver_priv[0]; /* driver_priv has to be always the last item */ }; @@ -428,12 +429,16 @@ struct mlxsw_reg_trans { struct rcu_head rcu; }; -#define MLXSW_EMAD_TIMEOUT_MS 200 +#define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS 3000 +#define MLXSW_EMAD_TIMEOUT_MS 200 static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans) { unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS); + if (trans->core->fw_flash_in_progress) + timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS); + queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout); } @@ -943,8 +948,8 @@ static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink, mlxsw_core->bus, mlxsw_core->bus_priv, true, devlink); - if (err) - mlxsw_core->reload_fail = true; + mlxsw_core->reload_fail = !!err; + return err; } @@ -1083,8 +1088,15 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, { struct devlink *devlink = priv_to_devlink(mlxsw_core); - if (mlxsw_core->reload_fail) - goto reload_fail; + if (mlxsw_core->reload_fail) { + if (!reload) + /* Only the parts that were not de-initialized in the + * failed reload attempt need to be de-initialized. + */ + goto reload_fail_deinit; + else + return; + } if (mlxsw_core->driver->fini) mlxsw_core->driver->fini(mlxsw_core); @@ -1098,9 +1110,12 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, if (!reload) devlink_resources_unregister(devlink, NULL); mlxsw_core->bus->fini(mlxsw_core->bus_priv); - if (reload) - return; -reload_fail: + + return; + +reload_fail_deinit: + devlink_unregister(devlink); + devlink_resources_unregister(devlink, NULL); devlink_free(devlink); } EXPORT_SYMBOL(mlxsw_core_bus_device_unregister); @@ -1844,6 +1859,18 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core, } EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get); +void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core) +{ + mlxsw_core->fw_flash_in_progress = true; +} +EXPORT_SYMBOL(mlxsw_core_fw_flash_start); + +void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core) +{ + mlxsw_core->fw_flash_in_progress = false; +} +EXPORT_SYMBOL(mlxsw_core_fw_flash_end); + static int __init mlxsw_core_module_init(void) { int err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index c35be477856f1..c4e4971764e54 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -292,6 +292,9 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core, u64 *p_single_size, u64 *p_double_size, u64 *p_linear_size); +void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core); +void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core); + bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core, enum mlxsw_res_id res_id); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 5890fdfd62c37..c7901a3f2a794 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -604,29 +604,31 @@ static void mlxsw_pci_cq_tasklet(unsigned long data) u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe); u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe); u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe); + char ncqe[MLXSW_PCI_CQE_SIZE_MAX]; + + memcpy(ncqe, cqe, q->elem_size); + mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); if (sendq) { struct mlxsw_pci_queue *sdq; sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn); mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq, - wqe_counter, cqe); + wqe_counter, ncqe); q->u.cq.comp_sdq_count++; } else { struct mlxsw_pci_queue *rdq; rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn); mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq, - wqe_counter, q->u.cq.v, cqe); + wqe_counter, q->u.cq.v, ncqe); q->u.cq.comp_rdq_count++; } if (++items == credits) break; } - if (items) { - mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); + if (items) mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); - } } static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q) diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index 83f452b7ccbbd..72cdaa01d56d4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h @@ -27,7 +27,7 @@ #define MLXSW_PCI_SW_RESET 0xF0010 #define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) -#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000 +#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 13000 #define MLXSW_PCI_SW_RESET_WAIT_MSECS 100 #define MLXSW_PCI_FW_READY 0xA1844 #define MLXSW_PCI_FW_READY_MASK 0xFFFF @@ -53,6 +53,7 @@ #define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */ #define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */ #define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */ +#define MLXSW_PCI_CQE_SIZE_MAX MLXSW_PCI_CQE2_SIZE #define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */ #define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE) #define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 30bb2c533cecc..de821a9fdfaf3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -308,8 +308,13 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, }, .mlxsw_sp = mlxsw_sp }; + int err; + + mlxsw_core_fw_flash_start(mlxsw_sp->core); + err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware); + mlxsw_core_fw_flash_end(mlxsw_sp->core); - return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware); + return err; } static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) @@ -3519,7 +3524,6 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) burst_size = 7; break; case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: - is_bytes = true; rate = 4 * 1024; burst_size = 4; break; @@ -4631,12 +4635,15 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, lower_dev, upper_dev); } else if (netif_is_lag_master(upper_dev)) { - if (info->linking) + if (info->linking) { err = mlxsw_sp_port_lag_join(mlxsw_sp_port, upper_dev); - else + } else { + mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, + false); mlxsw_sp_port_lag_leave(mlxsw_sp_port, upper_dev); + } } else if (netif_is_ovs_master(upper_dev)) { if (info->linking) err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c index 715d24ff937e9..562c4429eec71 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c @@ -696,8 +696,8 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_dummy_ops = { static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = { .type = MLXSW_SP_FID_TYPE_DUMMY, .fid_size = sizeof(struct mlxsw_sp_fid), - .start_index = MLXSW_SP_RFID_BASE - 1, - .end_index = MLXSW_SP_RFID_BASE - 1, + .start_index = VLAN_N_VID - 1, + .end_index = VLAN_N_VID - 1, .ops = &mlxsw_sp_fid_dummy_ops, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index db715da7bab77..0d9ea37c5d214 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -286,7 +286,13 @@ static bool mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port * bridge_port) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_port->dev); + struct net_device *dev = bridge_port->dev; + struct mlxsw_sp *mlxsw_sp; + + if (is_vlan_dev(dev)) + mlxsw_sp = mlxsw_sp_lower_get(vlan_dev_real_dev(dev)); + else + mlxsw_sp = mlxsw_sp_lower_get(dev); /* In case ports were pulled from out of a bridged LAG, then * it's possible the reference count isn't zero, yet the bridge @@ -1755,7 +1761,7 @@ static void mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_bridge_port *bridge_port, u16 vid) { - u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid; + u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid; struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); @@ -2020,7 +2026,7 @@ mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device, vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1; mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); - if (WARN_ON(!mlxsw_sp_port_vlan)) + if (!mlxsw_sp_port_vlan) return; mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); @@ -2317,8 +2323,6 @@ static void mlxsw_sp_switchdev_event_work(struct work_struct *work) break; case SWITCHDEV_FDB_DEL_TO_DEVICE: fdb_info = &switchdev_work->fdb_info; - if (!fdb_info->added_by_user) - break; mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false); break; case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */ diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 001b5f714c1b7..42f5bfa33694c 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -802,14 +802,8 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter) u32 mac_addr_hi = 0; u32 mac_addr_lo = 0; u32 data; - int ret; netdev = adapter->netdev; - lan743x_csr_write(adapter, MAC_CR, MAC_CR_RST_); - ret = lan743x_csr_wait_for_bit(adapter, MAC_CR, MAC_CR_RST_, - 0, 1000, 20000, 100); - if (ret) - return ret; /* setup auto duplex, and speed detection */ data = lan743x_csr_read(adapter, MAC_CR); @@ -968,13 +962,10 @@ static void lan743x_phy_link_status_change(struct net_device *netdev) memset(&ksettings, 0, sizeof(ksettings)); phy_ethtool_get_link_ksettings(netdev, &ksettings); - local_advertisement = phy_read(phydev, MII_ADVERTISE); - if (local_advertisement < 0) - return; - - remote_advertisement = phy_read(phydev, MII_LPA); - if (remote_advertisement < 0) - return; + local_advertisement = + ethtool_adv_to_mii_adv_t(phydev->advertising); + remote_advertisement = + ethtool_adv_to_mii_adv_t(phydev->lp_advertising); lan743x_phy_update_flowcontrol(adapter, ksettings.base.duplex, @@ -1675,7 +1666,7 @@ static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight) netif_wake_queue(adapter->netdev); } - if (!napi_complete_done(napi, weight)) + if (!napi_complete(napi)) goto done; /* enable isr */ @@ -1684,7 +1675,7 @@ static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight) lan743x_csr_read(adapter, INT_STS); done: - return weight; + return 0; } static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx) @@ -1873,9 +1864,9 @@ static int lan743x_tx_open(struct lan743x_tx *tx) tx->vector_flags = lan743x_intr_get_vector_flags(adapter, INT_BIT_DMA_TX_ (tx->channel_number)); - netif_napi_add(adapter->netdev, - &tx->napi, lan743x_tx_napi_poll, - tx->ring_size - 1); + netif_tx_napi_add(adapter->netdev, + &tx->napi, lan743x_tx_napi_poll, + tx->ring_size - 1); napi_enable(&tx->napi); data = 0; @@ -2722,8 +2713,9 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "pci-%s", pci_name(adapter->pdev)); - /* set to internal PHY id */ - adapter->mdiobus->phy_mask = ~(u32)BIT(1); + if ((adapter->csr.id_rev & ID_REV_ID_MASK_) == ID_REV_ID_LAN7430_) + /* LAN7430 uses internal phy at address 1 */ + adapter->mdiobus->phy_mask = ~(u32)BIT(1); /* register mdiobus */ ret = mdiobus_register(adapter->mdiobus); @@ -3020,6 +3012,7 @@ static const struct dev_pm_ops lan743x_pm_ops = { static const struct pci_device_id lan743x_pcidev_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) }, + { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7431) }, { 0, } }; diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h index 0e82b6368798a..2d6eea18973e8 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.h +++ b/drivers/net/ethernet/microchip/lan743x_main.h @@ -548,6 +548,7 @@ struct lan743x_adapter; /* SMSC acquired EFAR late 1990's, MCHP acquired SMSC 2012 */ #define PCI_VENDOR_ID_SMSC PCI_VENDOR_ID_EFAR #define PCI_DEVICE_ID_SMSC_LAN7430 (0x7430) +#define PCI_DEVICE_ID_SMSC_LAN7431 (0x7431) #define PCI_CONFIG_LENGTH (0x1000) diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index ed4e298cd8239..0bdd3c400c92f 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -733,7 +733,7 @@ static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], } return ocelot_mact_learn(ocelot, port->chip_port, addr, vid, - ENTRYTYPE_NORMAL); + ENTRYTYPE_LOCKED); } static int ocelot_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c index 398011c87643c..bf4302e45dcd9 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c @@ -807,7 +807,7 @@ __vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath, struct vxge_hw_device_date *fw_date = &hw_info->fw_date; struct vxge_hw_device_version *flash_version = &hw_info->flash_version; struct vxge_hw_device_date *flash_date = &hw_info->flash_date; - u64 data0, data1 = 0, steer_ctrl = 0; + u64 data0 = 0, data1 = 0, steer_ctrl = 0; enum vxge_hw_status status; status = vxge_hw_vpath_fw_api(vpath, diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index bd19624f10cf4..90148dbb261b6 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -375,13 +375,29 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) return -EOPNOTSUPP; - /* We need to store TCP flags in the IPv4 key space, thus - * we need to ensure we include a IPv4 key layer if we have - * not done so already. + /* We need to store TCP flags in the either the IPv4 or IPv6 key + * space, thus we need to ensure we include a IPv4/IPv6 key + * layer if we have not done so already. */ - if (!(key_layer & NFP_FLOWER_LAYER_IPV4)) { - key_layer |= NFP_FLOWER_LAYER_IPV4; - key_size += sizeof(struct nfp_flower_ipv4); + if (!key_basic) + return -EOPNOTSUPP; + + if (!(key_layer & NFP_FLOWER_LAYER_IPV4) && + !(key_layer & NFP_FLOWER_LAYER_IPV6)) { + switch (key_basic->n_proto) { + case cpu_to_be16(ETH_P_IP): + key_layer |= NFP_FLOWER_LAYER_IPV4; + key_size += sizeof(struct nfp_flower_ipv4); + break; + + case cpu_to_be16(ETH_P_IPV6): + key_layer |= NFP_FLOWER_LAYER_IPV6; + key_size += sizeof(struct nfp_flower_ipv6); + break; + + default: + return -EOPNOTSUPP; + } } } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c index db463e20a876c..e9a4179e7e486 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c @@ -96,6 +96,7 @@ nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index, { struct nfp_pf *pf = devlink_priv(devlink); struct nfp_eth_table_port eth_port; + unsigned int lanes; int ret; if (count < 2) @@ -114,8 +115,12 @@ nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index, goto out; } - ret = nfp_devlink_set_lanes(pf, eth_port.index, - eth_port.port_lanes / count); + /* Special case the 100G CXP -> 2x40G split */ + lanes = eth_port.port_lanes / count; + if (eth_port.lanes == 10 && count == 2) + lanes = 8 / count; + + ret = nfp_devlink_set_lanes(pf, eth_port.index, lanes); out: mutex_unlock(&pf->lock); @@ -128,6 +133,7 @@ nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index, { struct nfp_pf *pf = devlink_priv(devlink); struct nfp_eth_table_port eth_port; + unsigned int lanes; int ret; mutex_lock(&pf->lock); @@ -143,7 +149,12 @@ nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index, goto out; } - ret = nfp_devlink_set_lanes(pf, eth_port.index, eth_port.port_lanes); + /* Special case the 100G CXP -> 2x40G unsplit */ + lanes = eth_port.port_lanes; + if (eth_port.port_lanes == 8) + lanes = 10; + + ret = nfp_devlink_set_lanes(pf, eth_port.index, lanes); out: mutex_unlock(&pf->lock); diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index 052b3d2c07a12..c662c6f5bee34 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -912,7 +912,7 @@ static const struct net_device_ops w90p910_ether_netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; -static void __init get_mac_address(struct net_device *dev) +static void get_mac_address(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); struct platform_device *pdev; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c index 0ea141ece19ea..6547a9dd59355 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c @@ -1125,7 +1125,8 @@ netxen_validate_firmware(struct netxen_adapter *adapter) return -EINVAL; } val = nx_get_bios_version(adapter); - netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios); + if (netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios)) + return -EIO; if ((__force u32)val != bios) { dev_err(&pdev->dev, "%s: firmware bios is incompatible\n", fw_name[fw_type]); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index f5459de6d60a6..5900a506bf8df 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -191,7 +191,7 @@ qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data) static void qed_dcbx_set_params(struct qed_dcbx_results *p_data, struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - bool enable, u8 prio, u8 tc, + bool app_tlv, bool enable, u8 prio, u8 tc, enum dcbx_protocol_type type, enum qed_pci_personality personality) { @@ -210,7 +210,7 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data, p_data->arr[type].dont_add_vlan0 = true; /* QM reconf data */ - if (p_hwfn->hw_info.personality == personality) + if (app_tlv && p_hwfn->hw_info.personality == personality) qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc); /* Configure dcbx vlan priority in doorbell block for roce EDPM */ @@ -225,7 +225,7 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data, static void qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - bool enable, u8 prio, u8 tc, + bool app_tlv, bool enable, u8 prio, u8 tc, enum dcbx_protocol_type type) { enum qed_pci_personality personality; @@ -240,7 +240,7 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, personality = qed_dcbx_app_update[i].personality; - qed_dcbx_set_params(p_data, p_hwfn, p_ptt, enable, + qed_dcbx_set_params(p_data, p_hwfn, p_ptt, app_tlv, enable, prio, tc, type, personality); } } @@ -318,8 +318,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enable = true; } - qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable, - priority, tc, type); + qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, true, + enable, priority, tc, type); } } @@ -340,7 +340,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, continue; enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version; - qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable, + qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, false, enable, priority, tc, type); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 97f073fd3725d..2f69ee9221c62 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -179,6 +179,10 @@ void qed_resc_free(struct qed_dev *cdev) qed_iscsi_free(p_hwfn); qed_ooo_free(p_hwfn); } + + if (QED_IS_RDMA_PERSONALITY(p_hwfn)) + qed_rdma_info_free(p_hwfn); + qed_iov_free(p_hwfn); qed_l2_free(p_hwfn); qed_dmae_info_free(p_hwfn); @@ -474,8 +478,16 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn, struct qed_qm_info *qm_info = &p_hwfn->qm_info; /* Can't have multiple flags set here */ - if (bitmap_weight((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1) + if (bitmap_weight((unsigned long *)&pq_flags, + sizeof(pq_flags) * BITS_PER_BYTE) > 1) { + DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags); + goto err; + } + + if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) { + DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags); goto err; + } switch (pq_flags) { case PQ_FLAGS_RLS: @@ -499,8 +511,7 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn, } err: - DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags); - return NULL; + return &qm_info->start_pq; } /* save pq index in qm info */ @@ -524,20 +535,32 @@ u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc) { u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn); + if (max_tc == 0) { + DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n", + PQ_FLAGS_MCOS); + return p_hwfn->qm_info.start_pq; + } + if (tc > max_tc) DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc); - return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc; + return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + (tc % max_tc); } u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf) { u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn); + if (max_vf == 0) { + DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n", + PQ_FLAGS_VFS); + return p_hwfn->qm_info.start_pq; + } + if (vf > max_vf) DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf); - return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf; + return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + (vf % max_vf); } u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc) @@ -1074,6 +1097,12 @@ int qed_resc_alloc(struct qed_dev *cdev) goto alloc_err; } + if (QED_IS_RDMA_PERSONALITY(p_hwfn)) { + rc = qed_rdma_info_alloc(p_hwfn); + if (rc) + goto alloc_err; + } + /* DMA info initialization */ rc = qed_dmae_info_alloc(p_hwfn); if (rc) @@ -2091,11 +2120,8 @@ int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn) if (!p_ptt) return -EAGAIN; - /* If roce info is allocated it means roce is initialized and should - * be enabled in searcher. - */ if (p_hwfn->p_rdma_info && - p_hwfn->b_rdma_enabled_in_prs) + p_hwfn->p_rdma_info->active && p_hwfn->b_rdma_enabled_in_prs) qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1); /* Re-open incoming traffic */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c index cc1b373c0ace5..46dc93d3b9b53 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -147,7 +147,8 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, "Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n", fcoe_pf_params->num_cqs, p_hwfn->hw_info.feat_num[QED_FCOE_CQ]); - return -EINVAL; + rc = -EINVAL; + goto err; } p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu); @@ -156,14 +157,14 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid); if (rc) - return rc; + goto err; cxt_info.iid = dummy_cid; rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info); if (rc) { DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n", dummy_cid); - return rc; + goto err; } p_cxt = cxt_info.p_cxt; SET_FIELD(p_cxt->tstorm_ag_context.flags3, @@ -240,6 +241,10 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, rc = qed_spq_post(p_hwfn, p_ent, NULL); return rc; + +err: + qed_sp_destroy_request(p_hwfn, p_ent); + return rc; } static int diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index a71382687ef2b..bed8f48e029ac 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -12669,8 +12669,9 @@ enum MFW_DRV_MSG_TYPE { MFW_DRV_MSG_BW_UPDATE10, MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE, MFW_DRV_MSG_BW_UPDATE11, - MFW_DRV_MSG_OEM_CFG_UPDATE, + MFW_DRV_MSG_RESERVED, MFW_DRV_MSG_GET_TLV_REQ, + MFW_DRV_MSG_OEM_CFG_UPDATE, MFW_DRV_MSG_MAX }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 0f0aba793352c..b22f464ea3fa7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -992,6 +992,8 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn) */ do { index = p_sb_attn->sb_index; + /* finish reading index before the loop condition */ + dma_rmb(); attn_bits = le32_to_cpu(p_sb_attn->atten_bits); attn_acks = le32_to_cpu(p_sb_attn->atten_ack); } while (index != p_sb_attn->sb_index); diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index 1135387bd99d7..4f8a685d1a55f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -200,6 +200,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, "Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n", p_params->num_queues, p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]); + qed_sp_destroy_request(p_hwfn, p_ent); return -EINVAL; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 82a1bd1f8a8ce..67c02ea939062 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -740,8 +740,7 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn, rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); if (rc) { - /* Return spq entry which is taken in qed_sp_init_request()*/ - qed_spq_return_entry(p_hwfn, p_ent); + qed_sp_destroy_request(p_hwfn, p_ent); return rc; } @@ -1355,6 +1354,7 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn, DP_NOTICE(p_hwfn, "%d is not supported yet\n", p_filter_cmd->opcode); + qed_sp_destroy_request(p_hwfn, *pp_ent); return -EINVAL; } @@ -2056,13 +2056,13 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, } else { rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); if (rc) - return rc; + goto err; if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) { rc = qed_fw_l2_queue(p_hwfn, p_params->qid, &abs_rx_q_id); if (rc) - return rc; + goto err; p_ramrod->rx_qid_valid = 1; p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id); @@ -2083,6 +2083,10 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, (u64)p_params->addr, p_params->length); return qed_spq_post(p_hwfn, p_ent, NULL); + +err: + qed_sp_destroy_request(p_hwfn, p_ent); + return rc; } int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 14ac9cab26534..2fa1c050a14b4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -2485,6 +2485,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) { DP_NOTICE(cdev, "Unable to map frag - dropping packet\n"); + rc = -ENOMEM; goto err; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 2094d86a7a087..cf3b0e3dc350c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1634,9 +1634,9 @@ static int qed_drain(struct qed_dev *cdev) return -EBUSY; } rc = qed_mcp_drain(hwfn, ptt); + qed_ptt_release(hwfn, ptt); if (rc) return rc; - qed_ptt_release(hwfn, ptt); } return 0; diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index c71391b9c757a..7873d6dfd91f5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -140,22 +140,34 @@ static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; } -static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_rdma_start_in_params *params) +int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) { struct qed_rdma_info *p_rdma_info; - u32 num_cons, num_tasks; - int rc = -ENOMEM; - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n"); - - /* Allocate a struct with current pf rdma info */ p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL); if (!p_rdma_info) - return rc; + return -ENOMEM; + + spin_lock_init(&p_rdma_info->lock); p_hwfn->p_rdma_info = p_rdma_info; + return 0; +} + +void qed_rdma_info_free(struct qed_hwfn *p_hwfn) +{ + kfree(p_hwfn->p_rdma_info); + p_hwfn->p_rdma_info = NULL; +} + +static int qed_rdma_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; + u32 num_cons, num_tasks; + int rc = -ENOMEM; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n"); + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) p_rdma_info->proto = PROTOCOLID_IWARP; else @@ -183,7 +195,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, /* Allocate a struct with device params and fill it */ p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL); if (!p_rdma_info->dev) - goto free_rdma_info; + return rc; /* Allocate a struct with port params and fill it */ p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL); @@ -298,8 +310,6 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, kfree(p_rdma_info->port); free_rdma_dev: kfree(p_rdma_info->dev); -free_rdma_info: - kfree(p_rdma_info); return rc; } @@ -370,8 +380,6 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) kfree(p_rdma_info->port); kfree(p_rdma_info->dev); - - kfree(p_rdma_info); } static void qed_rdma_free_tid(void *rdma_cxt, u32 itid) @@ -679,8 +687,6 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n"); - spin_lock_init(&p_hwfn->p_rdma_info->lock); - qed_rdma_init_devinfo(p_hwfn, params); qed_rdma_init_port(p_hwfn); qed_rdma_init_events(p_hwfn, params); @@ -727,7 +733,7 @@ static int qed_rdma_stop(void *rdma_cxt) /* Disable RoCE search */ qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0); p_hwfn->b_rdma_enabled_in_prs = false; - + p_hwfn->p_rdma_info->active = 0; qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); @@ -1236,7 +1242,8 @@ qed_rdma_create_qp(void *rdma_cxt, u8 max_stats_queues; int rc; - if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) { + if (!rdma_cxt || !in_params || !out_params || + !p_hwfn->p_rdma_info->active) { DP_ERR(p_hwfn->cdev, "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n", rdma_cxt, in_params, out_params); @@ -1514,6 +1521,7 @@ qed_rdma_register_tid(void *rdma_cxt, default: rc = -EINVAL; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); + qed_sp_destroy_request(p_hwfn, p_ent); return rc; } SET_FIELD(p_ramrod->flags1, @@ -1801,8 +1809,8 @@ bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn) { bool result; - /* if rdma info has not been allocated, naturally there are no qps */ - if (!p_hwfn->p_rdma_info) + /* if rdma wasn't activated yet, naturally there are no qps */ + if (!p_hwfn->p_rdma_info->active) return false; spin_lock_bh(&p_hwfn->p_rdma_info->lock); @@ -1848,7 +1856,7 @@ static int qed_rdma_start(void *rdma_cxt, if (!p_ptt) goto err; - rc = qed_rdma_alloc(p_hwfn, p_ptt, params); + rc = qed_rdma_alloc(p_hwfn); if (rc) goto err1; @@ -1857,6 +1865,7 @@ static int qed_rdma_start(void *rdma_cxt, goto err2; qed_ptt_release(p_hwfn, p_ptt); + p_hwfn->p_rdma_info->active = 1; return rc; diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h index 6f722ee8ee945..3689fe3e59354 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h @@ -102,6 +102,7 @@ struct qed_rdma_info { u16 max_queue_zones; enum protocol_type proto; struct qed_iwarp_info iwarp; + u8 active:1; }; struct qed_rdma_qp { @@ -176,10 +177,14 @@ struct qed_rdma_qp { #if IS_ENABLED(CONFIG_QED_RDMA) void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn); +void qed_rdma_info_free(struct qed_hwfn *p_hwfn); #else static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} +static inline int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) {return -EINVAL;} +static inline void qed_rdma_info_free(struct qed_hwfn *p_hwfn) {} #endif int diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index f9167d1354bbe..e49fada854108 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -745,6 +745,7 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, DP_NOTICE(p_hwfn, "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n", rc); + qed_sp_destroy_request(p_hwfn, p_ent); return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index e95431f6acd46..3157c0d994417 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -167,6 +167,9 @@ struct qed_spq_entry { enum spq_mode comp_mode; struct qed_spq_comp_cb comp_cb; struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */ + + /* Posted entry for unlimited list entry in EBLOCK mode */ + struct qed_spq_entry *post_ent; }; struct qed_eq { @@ -396,6 +399,17 @@ struct qed_sp_init_data { struct qed_spq_comp_cb *p_comp_data; }; +/** + * @brief Returns a SPQ entry to the pool / frees the entry if allocated. + * Should be called on in error flows after initializing the SPQ entry + * and before posting it. + * + * @param p_hwfn + * @param p_ent + */ +void qed_sp_destroy_request(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent); + int qed_sp_init_request(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent, u8 cmd, diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 77b6248ad3b97..888274fa208bc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -47,6 +47,19 @@ #include "qed_sp.h" #include "qed_sriov.h" +void qed_sp_destroy_request(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent) +{ + /* qed_spq_get_entry() can either get an entry from the free_pool, + * or, if no entries are left, allocate a new entry and add it to + * the unlimited_pending list. + */ + if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending) + kfree(p_ent); + else + qed_spq_return_entry(p_hwfn, p_ent); +} + int qed_sp_init_request(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent, u8 cmd, u8 protocol, struct qed_sp_init_data *p_data) @@ -80,7 +93,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, case QED_SPQ_MODE_BLOCK: if (!p_data->p_comp_data) - return -EINVAL; + goto err; p_ent->comp_cb.cookie = p_data->p_comp_data->cookie; break; @@ -95,7 +108,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, default: DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n", p_ent->comp_mode); - return -EINVAL; + goto err; } DP_VERBOSE(p_hwfn, QED_MSG_SPQ, @@ -109,6 +122,11 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod)); return 0; + +err: + qed_sp_destroy_request(p_hwfn, p_ent); + + return -EINVAL; } static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type) diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 1673fc90027f8..7106ad17afe2e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -142,6 +142,7 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn, DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n"); rc = qed_mcp_drain(p_hwfn, p_ptt); + qed_ptt_release(p_hwfn, p_ptt); if (rc) { DP_NOTICE(p_hwfn, "MCP drain failed\n"); goto err; @@ -150,18 +151,15 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn, /* Retry after drain */ rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true); if (!rc) - goto out; + return 0; comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie; - if (comp_done->done == 1) + if (comp_done->done == 1) { if (p_fw_ret) *p_fw_ret = comp_done->fw_return_code; -out: - qed_ptt_release(p_hwfn, p_ptt); - return 0; - + return 0; + } err: - qed_ptt_release(p_hwfn, p_ptt); DP_NOTICE(p_hwfn, "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n", le32_to_cpu(p_ent->elem.hdr.cid), @@ -685,6 +683,8 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn, /* EBLOCK responsible to free the allocated p_ent */ if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK) kfree(p_ent); + else + p_ent->post_ent = p_en2; p_ent = p_en2; } @@ -768,6 +768,25 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn) SPQ_HIGH_PRI_RESERVE_DEFAULT); } +/* Avoid overriding of SPQ entries when getting out-of-order completions, by + * marking the completions in a bitmap and increasing the chain consumer only + * for the first successive completed entries. + */ +static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo) +{ + u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE; + struct qed_spq *p_spq = p_hwfn->p_spq; + + __set_bit(pos, p_spq->p_comp_bitmap); + while (test_bit(p_spq->comp_bitmap_idx, + p_spq->p_comp_bitmap)) { + __clear_bit(p_spq->comp_bitmap_idx, + p_spq->p_comp_bitmap); + p_spq->comp_bitmap_idx++; + qed_chain_return_produced(&p_spq->chain); + } +} + int qed_spq_post(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent, u8 *fw_return_code) { @@ -825,11 +844,12 @@ int qed_spq_post(struct qed_hwfn *p_hwfn, p_ent->queue == &p_spq->unlimited_pending); if (p_ent->queue == &p_spq->unlimited_pending) { - /* This is an allocated p_ent which does not need to - * return to pool. - */ + struct qed_spq_entry *p_post_ent = p_ent->post_ent; + kfree(p_ent); - return rc; + + /* Return the entry which was actually posted */ + p_ent = p_post_ent; } if (rc) @@ -843,7 +863,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn, spq_post_fail2: spin_lock_bh(&p_spq->lock); list_del(&p_ent->list); - qed_chain_return_produced(&p_spq->chain); + qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo); spq_post_fail: /* return to the free pool */ @@ -875,25 +895,8 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, spin_lock_bh(&p_spq->lock); list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) { if (p_ent->elem.hdr.echo == echo) { - u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE; - list_del(&p_ent->list); - - /* Avoid overriding of SPQ entries when getting - * out-of-order completions, by marking the completions - * in a bitmap and increasing the chain consumer only - * for the first successive completed entries. - */ - __set_bit(pos, p_spq->p_comp_bitmap); - - while (test_bit(p_spq->comp_bitmap_idx, - p_spq->p_comp_bitmap)) { - __clear_bit(p_spq->comp_bitmap_idx, - p_spq->p_comp_bitmap); - p_spq->comp_bitmap_idx++; - qed_chain_return_produced(&p_spq->chain); - } - + qed_spq_comp_bmap_update(p_hwfn, echo); p_spq->comp_count++; found = p_ent; break; @@ -932,11 +935,9 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, QED_MSG_SPQ, "Got a completion without a callback function\n"); - if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) || - (found->queue == &p_spq->unlimited_pending)) + if (found->comp_mode != QED_SPQ_MODE_EBLOCK) /* EBLOCK is responsible for returning its own entry into the - * free list, unless it originally added the entry into the - * unlimited pending list. + * free list. */ qed_spq_return_entry(p_hwfn, found); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 9b08a9d9e1513..ca6290fa0f309 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -101,6 +101,7 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) default: DP_NOTICE(p_hwfn, "Unknown VF personality %d\n", p_hwfn->hw_info.personality); + qed_sp_destroy_request(p_hwfn, p_ent); return -EINVAL; } diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index 0afc3d335d562..d11c16aeb19ad 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -234,7 +234,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, struct net_device *real_dev, struct rmnet_endpoint *ep) { - struct rmnet_priv *priv; + struct rmnet_priv *priv = netdev_priv(rmnet_dev); int rc; if (ep->egress_dev) @@ -247,6 +247,8 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; rmnet_dev->hw_features |= NETIF_F_SG; + priv->real_dev = real_dev; + rc = register_netdevice(rmnet_dev); if (!rc) { ep->egress_dev = rmnet_dev; @@ -255,9 +257,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, rmnet_dev->rtnl_link_ops = &rmnet_link_ops; - priv = netdev_priv(rmnet_dev); priv->mux_id = id; - priv->real_dev = real_dev; netdev_dbg(rmnet_dev, "rmnet dev created\n"); } diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 81045dfa1cd89..44f6e4873aadd 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -571,6 +571,7 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance) struct cp_private *cp; int handled = 0; u16 status; + u16 mask; if (unlikely(dev == NULL)) return IRQ_NONE; @@ -578,6 +579,10 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance) spin_lock(&cp->lock); + mask = cpr16(IntrMask); + if (!mask) + goto out_unlock; + status = cpr16(IntrStatus); if (!status || (status == 0xFFFF)) goto out_unlock; diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 2c350099b83cf..5f45ffeeecb42 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -214,6 +214,8 @@ enum cfg_version { }; static const struct pci_device_id rtl8169_pci_tbl[] = { + { PCI_VDEVICE(REALTEK, 0x2502), RTL_CFG_1 }, + { PCI_VDEVICE(REALTEK, 0x2600), RTL_CFG_1 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 }, @@ -717,6 +719,7 @@ module_param(use_dac, int, 0); MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); module_param_named(debug, debug.msg_enable, int, 0); MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); +MODULE_SOFTDEP("pre: realtek"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(FIRMWARE_8168D_1); MODULE_FIRMWARE(FIRMWARE_8168D_2); @@ -1528,6 +1531,8 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) } RTL_W8(tp, Cfg9346, Cfg9346_Lock); + + device_set_wakeup_enable(tp_to_dev(tp), wolopts); } static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) @@ -1549,8 +1554,6 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) rtl_unlock_work(tp); - device_set_wakeup_enable(d, tp->saved_wolopts); - pm_runtime_put_noidle(d); return 0; @@ -1730,11 +1733,13 @@ static bool rtl8169_reset_counters(struct rtl8169_private *tp) static bool rtl8169_update_counters(struct rtl8169_private *tp) { + u8 val = RTL_R8(tp, ChipCmd); + /* * Some chips are unable to dump tally counters when the receiver - * is disabled. + * is disabled. If 0xff chip may be in a PCI power-save state. */ - if ((RTL_R8(tp, ChipCmd) & CmdRxEnb) == 0) + if (!(val & CmdRxEnb) || val == 0xff) return true; return rtl8169_do_counters(tp, CounterDump); @@ -4175,10 +4180,15 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) { - if (!netif_running(tp->dev) || !__rtl8169_get_wol(tp)) + struct phy_device *phydev; + + if (!__rtl8169_get_wol(tp)) return false; - phy_speed_down(tp->dev->phydev, false); + /* phydev may not be attached to netdevice */ + phydev = mdiobus_get_phy(tp->mii_bus, 0); + + phy_speed_down(phydev, false); rtl_wol_suspend_quirk(tp); return true; diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 4289ccb26e4ec..d2caeb9edc044 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -940,6 +940,9 @@ static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id) dring->head = 0; dring->tail = 0; dring->pkt_cnt = 0; + + if (id == NETSEC_RING_TX) + netdev_reset_queue(priv->ndev); } static void netsec_free_dring(struct netsec_priv *priv, int id) diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index f7ecceeb1e280..f27d67a4d3045 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -194,6 +194,7 @@ /* Parameter for ethernet frame */ #define AVE_MAX_ETHFRAME 1518 +#define AVE_FRAME_HEADROOM 2 /* Parameter for interrupt */ #define AVE_INTM_COUNT 20 @@ -585,12 +586,13 @@ static int ave_rxdesc_prepare(struct net_device *ndev, int entry) skb = priv->rx.desc[entry].skbs; if (!skb) { - skb = netdev_alloc_skb_ip_align(ndev, - AVE_MAX_ETHFRAME); + skb = netdev_alloc_skb(ndev, AVE_MAX_ETHFRAME); if (!skb) { netdev_err(ndev, "can't allocate skb for Rx\n"); return -ENOMEM; } + skb->data += AVE_FRAME_HEADROOM; + skb->tail += AVE_FRAME_HEADROOM; } /* set disable to cmdsts */ @@ -603,12 +605,12 @@ static int ave_rxdesc_prepare(struct net_device *ndev, int entry) * - Rx buffer begins with 2 byte headroom, and data will be put from * (buffer + 2). * To satisfy this, specify the address to put back the buffer - * pointer advanced by NET_IP_ALIGN by netdev_alloc_skb_ip_align(), - * and expand the map size by NET_IP_ALIGN. + * pointer advanced by AVE_FRAME_HEADROOM, and expand the map size + * by AVE_FRAME_HEADROOM. */ ret = ave_dma_map(ndev, &priv->rx.desc[entry], - skb->data - NET_IP_ALIGN, - AVE_MAX_ETHFRAME + NET_IP_ALIGN, + skb->data - AVE_FRAME_HEADROOM, + AVE_MAX_ETHFRAME + AVE_FRAME_HEADROOM, DMA_FROM_DEVICE, &paddr); if (ret) { netdev_err(ndev, "can't map skb for Rx\n"); diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index b1b305f8f4143..272b9ca663148 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -365,7 +365,8 @@ struct dma_features { /* GMAC TX FIFO is 8K, Rx FIFO is 16K */ #define BUF_SIZE_16KiB 16384 -#define BUF_SIZE_8KiB 8192 +/* RX Buffer size must be < 8191 and multiple of 4/8/16 bytes */ +#define BUF_SIZE_8KiB 8188 #define BUF_SIZE_4KiB 4096 #define BUF_SIZE_2KiB 2048 diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h index ca9d7e48034ce..40d6356a7e73c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h @@ -31,7 +31,7 @@ /* Enhanced descriptors */ static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end) { - p->des1 |= cpu_to_le32(((BUF_SIZE_8KiB - 1) + p->des1 |= cpu_to_le32((BUF_SIZE_8KiB << ERDES1_BUFFER2_SIZE_SHIFT) & ERDES1_BUFFER2_SIZE_MASK); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index f9a61f90cfbc6..0f660af01a4b8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -714,8 +714,9 @@ static int get_ephy_nodes(struct stmmac_priv *priv) return -ENODEV; } - mdio_internal = of_find_compatible_node(mdio_mux, NULL, + mdio_internal = of_get_compatible_child(mdio_mux, "allwinner,sun8i-h3-mdio-internal"); + of_node_put(mdio_mux); if (!mdio_internal) { dev_err(priv->device, "Cannot get internal_mdio node\n"); return -ENODEV; @@ -729,13 +730,20 @@ static int get_ephy_nodes(struct stmmac_priv *priv) gmac->rst_ephy = of_reset_control_get_exclusive(iphynode, NULL); if (IS_ERR(gmac->rst_ephy)) { ret = PTR_ERR(gmac->rst_ephy); - if (ret == -EPROBE_DEFER) + if (ret == -EPROBE_DEFER) { + of_node_put(iphynode); + of_node_put(mdio_internal); return ret; + } continue; } dev_info(priv->device, "Found internal PHY node\n"); + of_node_put(iphynode); + of_node_put(mdio_internal); return 0; } + + of_node_put(mdio_internal); return -ENODEV; } diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 77914c89d7497..5ef91a790f9d1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -262,7 +262,7 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, int end) { p->des0 |= cpu_to_le32(RDES0_OWN); - p->des1 |= cpu_to_le32((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK); + p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK); if (mode == STMMAC_CHAIN_MODE) ehn_desc_rx_set_on_chain(p); diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index a7ffc73fffe82..bc83ced94e1b8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -140,7 +140,7 @@ static void clean_desc3(void *priv_ptr, struct dma_desc *p) static int set_16kib_bfsize(int mtu) { int ret = 0; - if (unlikely(mtu >= BUF_SIZE_8KiB)) + if (unlikely(mtu > BUF_SIZE_8KiB)) ret = BUF_SIZE_16KiB; return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 75896d6ba6e2b..2103b865726ac 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2547,12 +2547,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) netdev_warn(priv->dev, "PTP init failed\n"); } -#ifdef CONFIG_DEBUG_FS - ret = stmmac_init_fs(dev); - if (ret < 0) - netdev_warn(priv->dev, "%s: failed debugFS registration\n", - __func__); -#endif priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; if (priv->use_riwt) { @@ -2753,10 +2747,6 @@ static int stmmac_release(struct net_device *dev) netif_carrier_off(dev); -#ifdef CONFIG_DEBUG_FS - stmmac_exit_fs(dev); -#endif - stmmac_release_ptp(priv); return 0; @@ -3896,6 +3886,9 @@ static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v) u32 tx_count = priv->plat->tx_queues_to_use; u32 queue; + if ((dev->flags & IFF_UP) == 0) + return 0; + for (queue = 0; queue < rx_count; queue++) { struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; @@ -4254,6 +4247,7 @@ int stmmac_dvr_probe(struct device *device, priv->wq = create_singlethread_workqueue("stmmac_wq"); if (!priv->wq) { dev_err(priv->device, "failed to create workqueue\n"); + ret = -ENOMEM; goto error_wq; } @@ -4394,6 +4388,13 @@ int stmmac_dvr_probe(struct device *device, goto error_netdev_register; } +#ifdef CONFIG_DEBUG_FS + ret = stmmac_init_fs(ndev); + if (ret < 0) + netdev_warn(priv->dev, "%s: failed debugFS registration\n", + __func__); +#endif + return ret; error_netdev_register: @@ -4429,6 +4430,9 @@ int stmmac_dvr_remove(struct device *dev) netdev_info(priv->dev, "%s: removing driver", __func__); +#ifdef CONFIG_DEBUG_FS + stmmac_exit_fs(ndev); +#endif stmmac_stop_all_dma(priv); stmmac_mac_set(priv, priv->ioaddr, false); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index b72ef171477e0..bdd351597b552 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -243,7 +243,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, */ int stmmac_mdio_reset(struct mii_bus *bus) { -#if defined(CONFIG_STMMAC_PLATFORM) +#if IS_ENABLED(CONFIG_STMMAC_PLATFORM) struct net_device *ndev = bus->priv; struct stmmac_priv *priv = netdev_priv(ndev); unsigned int mii_address = priv->hw->mii.addr; diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 9020b084b9538..7ec4eb74fe216 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -1,22 +1,9 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: GPL-2.0+ /* cassini.c: Sun Microsystems Cassini(+) ethernet driver. * * Copyright (C) 2004 Sun Microsystems Inc. * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com) * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see . - * * This driver uses the sungem driver (c) David Miller * (davem@redhat.com) as its basis. * diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h index 13f3860496a86..ae5f05f03f880 100644 --- a/drivers/net/ethernet/sun/cassini.h +++ b/drivers/net/ethernet/sun/cassini.h @@ -1,23 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0+ */ /* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $ * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver. * * Copyright (C) 2004 Sun Microsystems Inc. * Copyright (c) 2003 Adrian Sun (asun@darksunrising.com) * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see . - * * vendor id: 0x108E (Sun Microsystems, Inc.) * device id: 0xabba (Cassini) * revision ids: 0x01 = Cassini diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index d79a69dd2146d..54e63ec049072 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -524,10 +524,7 @@ static void resync_tnc(struct timer_list *t) /* Start resync timer again -- the TNC might be still absent */ - - del_timer(&sp->resync_t); - sp->resync_t.expires = jiffies + SIXP_RESYNC_TIMEOUT; - add_timer(&sp->resync_t); + mod_timer(&sp->resync_t, jiffies + SIXP_RESYNC_TIMEOUT); } static inline int tnc_init(struct sixpack *sp) @@ -538,9 +535,7 @@ static inline int tnc_init(struct sixpack *sp) sp->tty->ops->write(sp->tty, &inbyte, 1); - del_timer(&sp->resync_t); - sp->resync_t.expires = jiffies + SIXP_RESYNC_TIMEOUT; - add_timer(&sp->resync_t); + mod_timer(&sp->resync_t, jiffies + SIXP_RESYNC_TIMEOUT); return 0; } @@ -918,11 +913,8 @@ static void decode_prio_command(struct sixpack *sp, unsigned char cmd) /* if the state byte has been received, the TNC is present, so the resync timer can be reset. */ - if (sp->tnc_state == TNC_IN_SYNC) { - del_timer(&sp->resync_t); - sp->resync_t.expires = jiffies + SIXP_INIT_RESYNC_TIMEOUT; - add_timer(&sp->resync_t); - } + if (sp->tnc_state == TNC_IN_SYNC) + mod_timer(&sp->resync_t, jiffies + SIXP_INIT_RESYNC_TIMEOUT); sp->status1 = cmd & SIXP_PRIO_DATA_MASK; } diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 3af6d8d152337..1c37a821895b7 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -2022,14 +2022,15 @@ static void netvsc_vf_setup(struct work_struct *w) rtnl_unlock(); } -/* Find netvsc by VMBus serial number. - * The PCI hyperv controller records the serial number as the slot. +/* Find netvsc by VF serial number. + * The PCI hyperv controller records the serial number as the slot kobj name. */ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev) { struct device *parent = vf_netdev->dev.parent; struct net_device_context *ndev_ctx; struct pci_dev *pdev; + u32 serial; if (!parent || !dev_is_pci(parent)) return NULL; /* not a PCI device */ @@ -2040,16 +2041,22 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev) return NULL; } + if (kstrtou32(pci_slot_name(pdev->slot), 10, &serial)) { + netdev_notice(vf_netdev, "Invalid vf serial:%s\n", + pci_slot_name(pdev->slot)); + return NULL; + } + list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { if (!ndev_ctx->vf_alloc) continue; - if (ndev_ctx->vf_serial == pdev->slot->number) + if (ndev_ctx->vf_serial == serial) return hv_get_drvdata(ndev_ctx->device_ctx); } netdev_notice(vf_netdev, - "no netdev found for slot %u\n", pdev->slot->number); + "no netdev found for vf serial:%u\n", serial); return NULL; } diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index 0ff5a403a8dc3..b2ff903a9cb6e 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -721,7 +721,7 @@ static void ca8210_mlme_reset_worker(struct work_struct *work) static void ca8210_rx_done(struct cas_control *cas_ctl) { u8 *buf; - u8 len; + unsigned int len; struct work_priv_container *mlme_reset_wpc; struct ca8210_priv *priv = cas_ctl->priv; @@ -730,7 +730,7 @@ static void ca8210_rx_done(struct cas_control *cas_ctl) if (len > CA8210_SPI_BUF_SIZE) { dev_crit( &priv->spi->dev, - "Received packet len (%d) erroneously long\n", + "Received packet len (%u) erroneously long\n", len ); goto finish; diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c index bf70ab892e697..624bff4d36365 100644 --- a/drivers/net/ieee802154/mac802154_hwsim.c +++ b/drivers/net/ieee802154/mac802154_hwsim.c @@ -500,7 +500,7 @@ static int hwsim_del_edge_nl(struct sk_buff *msg, struct genl_info *info) !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE]) return -EINVAL; - if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX + 1, + if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX, info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], hwsim_edge_policy, NULL)) return -EINVAL; @@ -550,7 +550,7 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info) !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE]) return -EINVAL; - if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX + 1, + if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX, info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], hwsim_edge_policy, NULL)) return -EINVAL; diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 30612497643c0..d192936b76cff 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -75,6 +75,10 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb, int len; skb_tx_timestamp(skb); + + /* do not fool net_timestamp_check() with various clock bases */ + skb->tstamp = 0; + skb_orphan(skb); /* Before queueing this packet to netif_rx(), diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index cfda146f3b3bb..6372cdc4a5109 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -608,7 +608,7 @@ static int macvlan_open(struct net_device *dev) goto hash_add; } - err = -EBUSY; + err = -EADDRINUSE; if (macvlan_addr_busy(vlan->port, dev->dev_addr)) goto out; @@ -706,7 +706,7 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr) } else { /* Rehash and update the device filters */ if (macvlan_addr_busy(vlan->port, addr)) - return -EBUSY; + return -EADDRINUSE; if (!macvlan_passthru(port)) { err = dev_uc_add(lowerdev, addr); @@ -747,6 +747,9 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p) return dev_set_mac_address(vlan->lowerdev, addr); } + if (macvlan_addr_busy(vlan->port, addr->sa_data)) + return -EADDRINUSE; + return macvlan_sync_address(dev, addr->sa_data); } diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c index 7ae1856d1f185..5a749dc25bec4 100644 --- a/drivers/net/net_failover.c +++ b/drivers/net/net_failover.c @@ -603,6 +603,9 @@ static int net_failover_slave_unregister(struct net_device *slave_dev, primary_dev = rtnl_dereference(nfo_info->primary_dev); standby_dev = rtnl_dereference(nfo_info->standby_dev); + if (WARN_ON_ONCE(slave_dev != primary_dev && slave_dev != standby_dev)) + return -ENODEV; + vlan_vids_del_by_dev(slave_dev, failover_dev); dev_uc_unsync(slave_dev, failover_dev); dev_mc_unsync(slave_dev, failover_dev); diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index f7c69ca34056e..d71be15c8c69f 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -1063,6 +1063,39 @@ static int m88e1145_config_init(struct phy_device *phydev) return 0; } +/* The VOD can be out of specification on link up. Poke an + * undocumented register, in an undocumented page, with a magic value + * to fix this. + */ +static int m88e6390_errata(struct phy_device *phydev) +{ + int err; + + err = phy_write(phydev, MII_BMCR, + BMCR_ANENABLE | BMCR_SPEED1000 | BMCR_FULLDPLX); + if (err) + return err; + + usleep_range(300, 400); + + err = phy_write_paged(phydev, 0xf8, 0x08, 0x36); + if (err) + return err; + + return genphy_soft_reset(phydev); +} + +static int m88e6390_config_aneg(struct phy_device *phydev) +{ + int err; + + err = m88e6390_errata(phydev); + if (err) + return err; + + return m88e1510_config_aneg(phydev); +} + /** * fiber_lpa_to_ethtool_lpa_t * @lpa: value of the MII_LPA register for fiber link @@ -1418,7 +1451,7 @@ static int m88e1318_set_wol(struct phy_device *phydev, * before enabling it if !phy_interrupt_is_valid() */ if (!phy_interrupt_is_valid(phydev)) - phy_read(phydev, MII_M1011_IEVENT); + __phy_read(phydev, MII_M1011_IEVENT); /* Enable the WOL interrupt */ err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0, @@ -2313,7 +2346,7 @@ static struct phy_driver marvell_drivers[] = { .flags = PHY_HAS_INTERRUPT, .probe = m88e6390_probe, .config_init = &marvell_config_init, - .config_aneg = &m88e1510_config_aneg, + .config_aneg = &m88e6390_config_aneg, .read_status = &marvell_read_status, .ack_interrupt = &marvell_ack_interrupt, .config_intr = &marvell_config_intr, diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 33265747bf399..0fbcedcdf6e2a 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -63,7 +63,7 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) * assume the pin serves as pull-up. If direction is * output, the default value is high. */ - gpiod_set_value(bitbang->mdo, 1); + gpiod_set_value_cansleep(bitbang->mdo, 1); return; } @@ -78,7 +78,7 @@ static int mdio_get(struct mdiobb_ctrl *ctrl) struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); - return gpiod_get_value(bitbang->mdio); + return gpiod_get_value_cansleep(bitbang->mdio); } static void mdio_set(struct mdiobb_ctrl *ctrl, int what) @@ -87,9 +87,9 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what) container_of(ctrl, struct mdio_gpio_info, ctrl); if (bitbang->mdo) - gpiod_set_value(bitbang->mdo, what); + gpiod_set_value_cansleep(bitbang->mdo, what); else - gpiod_set_value(bitbang->mdio, what); + gpiod_set_value_cansleep(bitbang->mdio, what); } static void mdc_set(struct mdiobb_ctrl *ctrl, int what) @@ -97,7 +97,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what) struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); - gpiod_set_value(bitbang->mdc, what); + gpiod_set_value_cansleep(bitbang->mdc, what); } static const struct mdiobb_ops mdio_gpio_ops = { diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 98f4b1f706df4..15c5586d74ff7 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -391,6 +391,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) if (IS_ERR(gpiod)) { dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n", bus->id); + device_del(&bus->dev); return PTR_ERR(gpiod); } else if (gpiod) { bus->reset_gpiod = gpiod; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 19ab8a7d1e486..2c32c795f5dd0 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -164,11 +164,8 @@ static int mdio_bus_phy_restore(struct device *dev) if (ret < 0) return ret; - /* The PHY needs to renegotiate. */ - phydev->link = 0; - phydev->state = PHY_UP; - - phy_start_machine(phydev); + if (phydev->attached_dev && phydev->adjust_link) + phy_start_machine(phydev); return 0; } @@ -1738,20 +1735,17 @@ EXPORT_SYMBOL(genphy_loopback); static int __set_phy_supported(struct phy_device *phydev, u32 max_speed) { - phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES | - PHY_10BT_FEATURES); - switch (max_speed) { - default: - return -ENOTSUPP; - case SPEED_1000: - phydev->supported |= PHY_1000BT_FEATURES; + case SPEED_10: + phydev->supported &= ~PHY_100BT_FEATURES; /* fall through */ case SPEED_100: - phydev->supported |= PHY_100BT_FEATURES; - /* fall through */ - case SPEED_10: - phydev->supported |= PHY_10BT_FEATURES; + phydev->supported &= ~PHY_1000BT_FEATURES; + break; + case SPEED_1000: + break; + default: + return -ENOTSUPP; } return 0; @@ -1930,6 +1924,14 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner) new_driver->mdiodrv.driver.remove = phy_remove; new_driver->mdiodrv.driver.owner = owner; + /* The following works around an issue where the PHY driver doesn't bind + * to the device, resulting in the genphy driver being used instead of + * the dedicated driver. The root cause of the issue isn't known yet + * and seems to be in the base driver core. Once this is fixed we may + * remove this workaround. + */ + new_driver->mdiodrv.driver.probe_type = PROBE_FORCE_SYNCHRONOUS; + retval = driver_register(&new_driver->mdiodrv.driver); if (retval) { pr_err("%s: Error %d in registering driver\n", diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 7abca86c3aa9b..70f3f90c2ed69 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -907,6 +907,9 @@ void phylink_start(struct phylink *pl) phylink_an_mode_str(pl->link_an_mode), phy_modes(pl->link_config.interface)); + /* Always set the carrier off */ + netif_carrier_off(pl->netdev); + /* Apply the link configuration to the MAC when starting. This allows * a fixed-link to start with the correct parameters, and also * ensures that we set the appropriate advertisement for Serdes links. diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 7fc8508b5231d..271e8adc39f10 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -220,7 +220,7 @@ static struct phy_driver realtek_drvs[] = { .flags = PHY_HAS_INTERRUPT, }, { .phy_id = 0x001cc816, - .name = "RTL8201F 10/100Mbps Ethernet", + .name = "RTL8201F Fast Ethernet", .phy_id_mask = 0x001fffff, .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_INTERRUPT, diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index 83060fb349f4d..ad9db652874dc 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -162,7 +162,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, /* 1000Base-PX or 1000Base-BX10 */ if ((id->base.e_base_px || id->base.e_base_bx10) && br_min <= 1300 && br_max >= 1200) - phylink_set(support, 1000baseX_Full); + phylink_set(modes, 1000baseX_Full); /* For active or passive cables, select the link modes * based on the bit rates and the cable compliance bytes. diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 62dc564b251d5..f22639f0116a4 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -445,6 +445,7 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev, if (pskb_trim_rcsum(skb, len)) goto drop; + ph = pppoe_hdr(skb); pn = pppoe_pernet(dev_net(dev)); /* Note that get_item does a sock_hold(), so sk_pppox(po) diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index e9f101c9bae2c..bfbb39f935545 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c @@ -216,9 +216,9 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev) * it just report sending a packet to the target * (without actual packet transfer). */ - dev_kfree_skb_any(skb); ndev->stats.tx_packets++; ndev->stats.tx_bytes += skb->len; + dev_kfree_skb_any(skb); } } diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index d887016e54b68..4b6572f0188a7 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -985,8 +985,6 @@ static void team_port_disable(struct team *team, team->en_port_count--; team_queue_override_port_del(team, port); team_adjust_ops(team); - team_notify_peers(team); - team_mcast_rejoin(team); team_lower_state_changed(port); } diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 50e9cc19023a7..33978b0cdac81 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -859,10 +859,6 @@ static int tun_attach(struct tun_struct *tun, struct file *file, err = 0; } - rcu_assign_pointer(tfile->tun, tun); - rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); - tun->numqueues++; - if (tfile->detached) { tun_enable_queue(tfile); } else { @@ -876,6 +872,13 @@ static int tun_attach(struct tun_struct *tun, struct file *file, * refcnt. */ + /* Publish tfile->tun and tun->tfiles only after we've fully + * initialized tfile; otherwise we risk using half-initialized + * object. + */ + rcu_assign_pointer(tfile->tun, tun); + rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); + tun->numqueues++; out: return err; } @@ -1527,6 +1530,7 @@ static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, if (!rx_batched || (!more && skb_queue_empty(queue))) { local_bh_disable(); + skb_record_rx_queue(skb, tfile->queue_index); netif_receive_skb(skb); local_bh_enable(); return; @@ -1546,8 +1550,11 @@ static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, struct sk_buff *nskb; local_bh_disable(); - while ((nskb = __skb_dequeue(&process_queue))) + while ((nskb = __skb_dequeue(&process_queue))) { + skb_record_rx_queue(nskb, tfile->queue_index); netif_receive_skb(nskb); + } + skb_record_rx_queue(skb, tfile->queue_index); netif_receive_skb(skb); local_bh_enable(); } @@ -2264,7 +2271,9 @@ static void tun_setup(struct net_device *dev) static int tun_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { - return -EINVAL; + NL_SET_ERR_MSG(extack, + "tun/tap creation via rtnetlink is not supported."); + return -EOPNOTSUPP; } static size_t tun_get_size(const struct net_device *dev) diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 184c24baca152..d6916f787fce9 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -2807,6 +2807,12 @@ static int hso_get_config_data(struct usb_interface *interface) return -EIO; } + /* check if we have a valid interface */ + if (if_num > 16) { + kfree(config_data); + return -EINVAL; + } + switch (config_data[if_num]) { case 0x0: result = 0; @@ -2877,10 +2883,18 @@ static int hso_probe(struct usb_interface *interface, /* Get the interface/port specification from either driver_info or from * the device itself */ - if (id->driver_info) + if (id->driver_info) { + /* if_num is controlled by the device, driver_info is a 0 terminated + * array. Make sure, the access is in bounds! */ + for (i = 0; i <= if_num; ++i) + if (((u32 *)(id->driver_info))[i] == 0) + goto exit; port_spec = ((u32 *)(id->driver_info))[if_num]; - else + } else { port_spec = hso_get_config_data(interface); + if (port_spec < 0) + goto exit; + } /* Check if we need to switch to alt interfaces prior to port * configuration */ diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 7275761a1177c..3d8a70d3ea9bd 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -140,7 +140,6 @@ struct ipheth_device { struct usb_device *udev; struct usb_interface *intf; struct net_device *net; - struct sk_buff *tx_skb; struct urb *tx_urb; struct urb *rx_urb; unsigned char *tx_buf; @@ -230,6 +229,7 @@ static void ipheth_rcvbulk_callback(struct urb *urb) case -ENOENT: case -ECONNRESET: case -ESHUTDOWN: + case -EPROTO: return; case 0: break; @@ -281,7 +281,6 @@ static void ipheth_sndbulk_callback(struct urb *urb) dev_err(&dev->intf->dev, "%s: urb status: %d\n", __func__, status); - dev_kfree_skb_irq(dev->tx_skb); if (status == 0) netif_wake_queue(dev->net); else @@ -423,7 +422,7 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net) if (skb->len > IPHETH_BUF_SIZE) { WARN(1, "%s: skb too large: %d bytes\n", __func__, skb->len); dev->net->stats.tx_dropped++; - dev_kfree_skb_irq(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -443,12 +442,11 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net) dev_err(&dev->intf->dev, "%s: usb_submit_urb: %d\n", __func__, retval); dev->net->stats.tx_errors++; - dev_kfree_skb_irq(skb); + dev_kfree_skb_any(skb); } else { - dev->tx_skb = skb; - dev->net->stats.tx_packets++; dev->net->stats.tx_bytes += skb->len; + dev_consume_skb_any(skb); netif_stop_queue(net); } diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index c3c9ba44e2a12..8d140495da79d 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -2335,6 +2335,10 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p) ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo); ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi); + /* Added to support MAC address changes */ + ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo); + ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_); + return 0; } diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 72a55b6b42118..735ad838e2ba8 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -123,6 +123,7 @@ static void qmimux_setup(struct net_device *dev) dev->addr_len = 0; dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; dev->netdev_ops = &qmimux_netdev_ops; + dev->mtu = 1500; dev->needs_free_netdev = true; } @@ -151,17 +152,18 @@ static bool qmimux_has_slaves(struct usbnet *dev) static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { - unsigned int len, offset = sizeof(struct qmimux_hdr); + unsigned int len, offset = 0; struct qmimux_hdr *hdr; struct net_device *net; struct sk_buff *skbn; + u8 qmimux_hdr_sz = sizeof(*hdr); - while (offset < skb->len) { - hdr = (struct qmimux_hdr *)skb->data; + while (offset + qmimux_hdr_sz < skb->len) { + hdr = (struct qmimux_hdr *)(skb->data + offset); len = be16_to_cpu(hdr->pkt_len); /* drop the packet, bogus length */ - if (offset + len > skb->len) + if (offset + len + qmimux_hdr_sz > skb->len) return 0; /* control packet, we do not know what to do */ @@ -176,7 +178,7 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb) return 0; skbn->dev = net; - switch (skb->data[offset] & 0xf0) { + switch (skb->data[offset + qmimux_hdr_sz] & 0xf0) { case 0x40: skbn->protocol = htons(ETH_P_IP); break; @@ -188,12 +190,12 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb) goto skip; } - skb_put_data(skbn, skb->data + offset, len); + skb_put_data(skbn, skb->data + offset + qmimux_hdr_sz, len); if (netif_rx(skbn) != NET_RX_SUCCESS) return 0; skip: - offset += len + sizeof(struct qmimux_hdr); + offset += len + qmimux_hdr_sz; } return 1; } @@ -1117,6 +1119,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */ {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */ {QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */ + {QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */ {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */ {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */ @@ -1229,6 +1232,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */ {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */ {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */ {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ @@ -1263,6 +1267,7 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ + {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */ /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 262e7a3c23cb6..f2d01cb6f958c 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1321,6 +1321,8 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->ethtool_ops = &smsc95xx_ethtool_ops; dev->net->flags |= IFF_MULTICAST; dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM; + dev->net->min_mtu = ETH_MIN_MTU; + dev->net->max_mtu = ETH_DATA_LEN; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; pdata->dev = dev; @@ -1598,6 +1600,8 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message) return ret; } + cancel_delayed_work_sync(&pdata->carrier_check); + if (pdata->suspend_flags) { netdev_warn(dev->net, "error during last resume\n"); pdata->suspend_flags = 0; @@ -1840,6 +1844,11 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message) */ if (ret && PMSG_IS_AUTO(message)) usbnet_resume(intf); + + if (ret) + schedule_delayed_work(&pdata->carrier_check, + CARRIER_CHECK_DELAY); + return ret; } diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index ddfa3f24204c7..ad14fbfa1864f 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -70,7 +70,8 @@ static const unsigned long guest_offloads[] = { VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, VIRTIO_NET_F_GUEST_ECN, - VIRTIO_NET_F_GUEST_UFO + VIRTIO_NET_F_GUEST_UFO, + VIRTIO_NET_F_GUEST_CSUM }; struct virtnet_stat_desc { @@ -364,7 +365,8 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) static struct sk_buff *page_to_skb(struct virtnet_info *vi, struct receive_queue *rq, struct page *page, unsigned int offset, - unsigned int len, unsigned int truesize) + unsigned int len, unsigned int truesize, + bool hdr_valid) { struct sk_buff *skb; struct virtio_net_hdr_mrg_rxbuf *hdr; @@ -386,7 +388,8 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, else hdr_padded_len = sizeof(struct padded_vnet_hdr); - memcpy(hdr, p, hdr_len); + if (hdr_valid) + memcpy(hdr, p, hdr_len); len -= hdr_len; offset += hdr_padded_len; @@ -738,7 +741,8 @@ static struct sk_buff *receive_big(struct net_device *dev, struct virtnet_rq_stats *stats) { struct page *page = buf; - struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); + struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, + PAGE_SIZE, true); stats->bytes += len - vi->hdr_len; if (unlikely(!skb)) @@ -841,7 +845,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, rcu_read_unlock(); put_page(page); head_skb = page_to_skb(vi, rq, xdp_page, - offset, len, PAGE_SIZE); + offset, len, + PAGE_SIZE, false); return head_skb; } break; @@ -897,7 +902,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, goto err_skb; } - head_skb = page_to_skb(vi, rq, page, offset, len, truesize); + head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog); curr_skb = head_skb; if (unlikely(!curr_skb)) @@ -2285,9 +2290,6 @@ static int virtnet_clear_guest_offloads(struct virtnet_info *vi) if (!vi->guest_offloads) return 0; - if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM)) - offloads = 1ULL << VIRTIO_NET_F_GUEST_CSUM; - return virtnet_set_guest_offloads(vi, offloads); } @@ -2297,8 +2299,6 @@ static int virtnet_restore_guest_offloads(struct virtnet_info *vi) if (!vi->guest_offloads) return 0; - if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM)) - offloads |= 1ULL << VIRTIO_NET_F_GUEST_CSUM; return virtnet_set_guest_offloads(vi, offloads); } @@ -2316,8 +2316,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || - virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO))) { - NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO, disable LRO first"); + virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || + virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) { + NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO/CSUM, disable LRO/CSUM first"); return -EOPNOTSUPP; } diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c index 74c06a5f586f7..4f25c2d8fff06 100644 --- a/drivers/net/wan/x25_asy.c +++ b/drivers/net/wan/x25_asy.c @@ -486,8 +486,10 @@ static int x25_asy_open(struct net_device *dev) /* Cleanup */ kfree(sl->xbuff); + sl->xbuff = NULL; noxbuff: kfree(sl->rbuff); + sl->rbuff = NULL; norbuff: return -ENOMEM; } diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c index a63c97e2c50c5..6f10331e986bd 100644 --- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c +++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c @@ -71,7 +71,7 @@ void ath10k_sta_update_rx_tid_stats_ampdu(struct ath10k *ar, u16 peer_id, u8 tid spin_lock_bh(&ar->data_lock); peer = ath10k_peer_find_by_id(ar, peer_id); - if (!peer) + if (!peer || !peer->sta) goto out; arsta = (struct ath10k_sta *)peer->sta->drv_priv; diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 4d1cd90d6d27c..03d4cc6f35bcd 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -2589,7 +2589,7 @@ static void ath10k_htt_fetch_peer_stats(struct ath10k *ar, rcu_read_lock(); spin_lock_bh(&ar->data_lock); peer = ath10k_peer_find_by_id(ar, peer_id); - if (!peer) { + if (!peer || !peer->sta) { ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n", peer_id); goto out; @@ -2642,7 +2642,7 @@ static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data) rcu_read_lock(); spin_lock_bh(&ar->data_lock); peer = ath10k_peer_find_by_id(ar, peer_id); - if (!peer) { + if (!peer || !peer->sta) { ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n", peer_id); goto out; diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index fd612d2905b05..9f31b9a108507 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -1869,6 +1869,12 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id) if (ret) dev_kfree_skb_any(skb); + if (ret == -EAGAIN) { + ath10k_warn(ar, "wmi command %d timeout, restarting hardware\n", + cmd_id); + queue_work(ar->workqueue, &ar->restart_work); + } + return ret; } @@ -2336,7 +2342,12 @@ static int wmi_process_mgmt_tx_comp(struct ath10k *ar, u32 desc_id, dma_unmap_single(ar->dev, pkt_addr->paddr, msdu->len, DMA_FROM_DEVICE); info = IEEE80211_SKB_CB(msdu); - info->flags |= status; + + if (status) + info->flags &= ~IEEE80211_TX_STAT_ACK; + else + info->flags |= IEEE80211_TX_STAT_ACK; + ieee80211_tx_status_irqsafe(ar->hw, msdu); ret = 0; diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c index bca61cb44c375..3e7fc2983cbb3 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.c +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c @@ -279,9 +279,6 @@ static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil, u16 buff_id; *d = *_d; - pa = wil_rx_desc_get_addr_edma(&d->dma); - dmalen = le16_to_cpu(d->dma.length); - dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE); /* Extract the SKB from the rx_buff management array */ buff_id = __le16_to_cpu(d->mac.buff_id); @@ -291,10 +288,15 @@ static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil, } skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb; wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL; - if (unlikely(!skb)) + if (unlikely(!skb)) { wil_err(wil, "No Rx skb at buff_id %d\n", buff_id); - else + } else { + pa = wil_rx_desc_get_addr_edma(&d->dma); + dmalen = le16_to_cpu(d->dma.length); + dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE); + kfree_skb(skb); + } /* Move the buffer from the active to the free list */ list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list, @@ -906,6 +908,9 @@ static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil, wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL; if (!skb) { wil_err(wil, "No Rx skb at buff_id %d\n", buff_id); + /* Move the buffer from the active list to the free list */ + list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list, + &wil->rx_buff_mgmt.free); goto again; } diff --git a/drivers/net/wireless/broadcom/b43/phy_common.c b/drivers/net/wireless/broadcom/b43/phy_common.c index 85f2ca9895656..ef3ffa5ad4668 100644 --- a/drivers/net/wireless/broadcom/b43/phy_common.c +++ b/drivers/net/wireless/broadcom/b43/phy_common.c @@ -616,7 +616,7 @@ struct b43_c32 b43_cordic(int theta) u8 i; s32 tmp; s8 signx = 1; - u32 angle = 0; + s32 angle = 0; struct b43_c32 ret = { .i = 39797, .q = 0, }; while (theta > (180 << 16)) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 5444e6213d459..6f3faaf1b1cbb 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -5188,10 +5188,17 @@ static struct cfg80211_ops brcmf_cfg80211_ops = { .del_pmk = brcmf_cfg80211_del_pmk, }; -struct cfg80211_ops *brcmf_cfg80211_get_ops(void) +struct cfg80211_ops *brcmf_cfg80211_get_ops(struct brcmf_mp_device *settings) { - return kmemdup(&brcmf_cfg80211_ops, sizeof(brcmf_cfg80211_ops), + struct cfg80211_ops *ops; + + ops = kmemdup(&brcmf_cfg80211_ops, sizeof(brcmf_cfg80211_ops), GFP_KERNEL); + + if (ops && settings->roamoff) + ops->update_connect_params = NULL; + + return ops; } struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, @@ -5997,7 +6004,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, * for subsequent chanspecs. */ channel->flags = IEEE80211_CHAN_NO_HT40 | - IEEE80211_CHAN_NO_80MHZ; + IEEE80211_CHAN_NO_80MHZ | + IEEE80211_CHAN_NO_160MHZ; ch.bw = BRCMU_CHAN_BW_20; cfg->d11inf.encchspec(&ch); chaninfo = ch.chspec; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h index a4aec0004e4f1..9a6287f084a92 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h @@ -404,7 +404,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg); s32 brcmf_cfg80211_up(struct net_device *ndev); s32 brcmf_cfg80211_down(struct net_device *ndev); -struct cfg80211_ops *brcmf_cfg80211_get_ops(void); +struct cfg80211_ops *brcmf_cfg80211_get_ops(struct brcmf_mp_device *settings); enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp); struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index b1f702faff4fb..860a4372cb564 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -1130,7 +1130,7 @@ int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings) brcmf_dbg(TRACE, "Enter\n"); - ops = brcmf_cfg80211_get_ops(); + ops = brcmf_cfg80211_get_ops(settings); if (!ops) return -ENOMEM; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index 9095b830ae4d7..9927079a9ace4 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -641,8 +641,9 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev, struct brcmf_fw_request *fwreq; char chipname[12]; const char *mp_path; + size_t mp_path_len; u32 i, j; - char end; + char end = '\0'; size_t reqsz; for (i = 0; i < table_size; i++) { @@ -667,7 +668,10 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev, mapping_table[i].fw_base, chipname); mp_path = brcmf_mp_global.firmware_path; - end = mp_path[strlen(mp_path) - 1]; + mp_path_len = strnlen(mp_path, BRCMF_FW_ALTPATH_LEN); + if (mp_path_len) + end = mp_path[mp_path_len - 1]; + fwreq->n_items = n_fwnames; for (j = 0; j < n_fwnames; j++) { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c index d8b79cb72b58d..eb5db94f57453 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c @@ -77,6 +77,8 @@ static u16 d11ac_bw(enum brcmu_chan_bw bw) return BRCMU_CHSPEC_D11AC_BW_40; case BRCMU_CHAN_BW_80: return BRCMU_CHSPEC_D11AC_BW_80; + case BRCMU_CHAN_BW_160: + return BRCMU_CHSPEC_D11AC_BW_160; default: WARN_ON(1); } @@ -190,8 +192,41 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch) break; } break; - case BRCMU_CHSPEC_D11AC_BW_8080: case BRCMU_CHSPEC_D11AC_BW_160: + ch->bw = BRCMU_CHAN_BW_160; + ch->sb = brcmu_maskget16(ch->chspec, BRCMU_CHSPEC_D11AC_SB_MASK, + BRCMU_CHSPEC_D11AC_SB_SHIFT); + switch (ch->sb) { + case BRCMU_CHAN_SB_LLL: + ch->control_ch_num -= CH_70MHZ_APART; + break; + case BRCMU_CHAN_SB_LLU: + ch->control_ch_num -= CH_50MHZ_APART; + break; + case BRCMU_CHAN_SB_LUL: + ch->control_ch_num -= CH_30MHZ_APART; + break; + case BRCMU_CHAN_SB_LUU: + ch->control_ch_num -= CH_10MHZ_APART; + break; + case BRCMU_CHAN_SB_ULL: + ch->control_ch_num += CH_10MHZ_APART; + break; + case BRCMU_CHAN_SB_ULU: + ch->control_ch_num += CH_30MHZ_APART; + break; + case BRCMU_CHAN_SB_UUL: + ch->control_ch_num += CH_50MHZ_APART; + break; + case BRCMU_CHAN_SB_UUU: + ch->control_ch_num += CH_70MHZ_APART; + break; + default: + WARN_ON_ONCE(1); + break; + } + break; + case BRCMU_CHSPEC_D11AC_BW_8080: default: WARN_ON_ONCE(1); break; diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h index 7b9a77981df16..75b2a0438cfa7 100644 --- a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h +++ b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h @@ -29,6 +29,8 @@ #define CH_UPPER_SB 0x01 #define CH_LOWER_SB 0x02 #define CH_EWA_VALID 0x04 +#define CH_70MHZ_APART 14 +#define CH_50MHZ_APART 10 #define CH_30MHZ_APART 6 #define CH_20MHZ_APART 4 #define CH_10MHZ_APART 2 diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h index cb5f32c1d7057..0b3b1223cff7e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -29,6 +30,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -84,7 +86,7 @@ #define ACPI_WRDS_WIFI_DATA_SIZE (ACPI_SAR_TABLE_SIZE + 2) #define ACPI_EWRD_WIFI_DATA_SIZE ((ACPI_SAR_PROFILE_NUM - 1) * \ ACPI_SAR_TABLE_SIZE + 3) -#define ACPI_WGDS_WIFI_DATA_SIZE 18 +#define ACPI_WGDS_WIFI_DATA_SIZE 19 #define ACPI_WRDD_WIFI_DATA_SIZE 2 #define ACPI_SPLC_WIFI_DATA_SIZE 2 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 6bb1a99a197a2..16c6c7f921a89 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -704,8 +704,12 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm) enabled = !!(wifi_pkg->package.elements[1].integer.value); n_profiles = wifi_pkg->package.elements[2].integer.value; - /* in case of BIOS bug */ - if (n_profiles <= 0) { + /* + * Check the validity of n_profiles. The EWRD profiles start + * from index 1, so the maximum value allowed here is + * ACPI_SAR_PROFILES_NUM - 1. + */ + if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) { ret = -EINVAL; goto out_free; } @@ -864,6 +868,15 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) int ret, i, j; u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT); + /* + * This command is not supported on earlier firmware versions. + * Unfortunately, we don't have a TLV API flag to rely on, so + * rely on the major version which is in the first byte of + * ucode_ver. + */ + if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41) + return 0; + ret = iwl_mvm_sar_get_wgds_table(mvm); if (ret < 0) { IWL_DEBUG_RADIO(mvm, @@ -876,7 +889,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n"); BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS * - ACPI_WGDS_TABLE_SIZE != ACPI_WGDS_WIFI_DATA_SIZE); + ACPI_WGDS_TABLE_SIZE + 1 != ACPI_WGDS_WIFI_DATA_SIZE); BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES); @@ -911,6 +924,11 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm) return -ENOENT; } +static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm) +{ + return -ENOENT; +} + static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) { return 0; @@ -937,8 +955,11 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm) IWL_DEBUG_RADIO(mvm, "WRDS SAR BIOS table invalid or unavailable. (%d)\n", ret); - /* if not available, don't fail and don't bother with EWRD */ - return 0; + /* + * If not available, don't fail and don't bother with EWRD. + * Return 1 to tell that we can't use WGDS either. + */ + return 1; } ret = iwl_mvm_sar_get_ewrd_table(mvm); @@ -951,9 +972,13 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm) /* choose profile 1 (WRDS) as default for both chains */ ret = iwl_mvm_sar_select_profile(mvm, 1, 1); - /* if we don't have profile 0 from BIOS, just skip it */ + /* + * If we don't have profile 0 from BIOS, just skip it. This + * means that SAR Geo will not be enabled either, even if we + * have other valid profiles. + */ if (ret == -ENOENT) - return 0; + return 1; return ret; } @@ -1151,11 +1176,19 @@ int iwl_mvm_up(struct iwl_mvm *mvm) iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); ret = iwl_mvm_sar_init(mvm); - if (ret) - goto error; + if (ret == 0) { + ret = iwl_mvm_sar_geo_init(mvm); + } else if (ret > 0 && !iwl_mvm_sar_get_wgds_table(mvm)) { + /* + * If basic SAR is not available, we check for WGDS, + * which should *not* be available either. If it is + * available, issue an error, because we can't use SAR + * Geo without basic SAR. + */ + IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n"); + } - ret = iwl_mvm_sar_geo_init(mvm); - if (ret) + if (ret < 0) goto error; iwl_mvm_leds_sync(mvm); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index b15b0d84bb7ea..9a764af30f36b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -306,8 +306,12 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, goto out; } - if (changed) - *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE); + if (changed) { + u32 status = le32_to_cpu(resp->status); + + *changed = (status == MCC_RESP_NEW_CHAN_PROFILE || + status == MCC_RESP_ILLEGAL); + } regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, __le32_to_cpu(resp->n_channels), @@ -1233,12 +1237,15 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) iwl_mvm_del_aux_sta(mvm); /* - * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete() - * won't be called in this case). + * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the + * hw (as restart_complete() won't be called in this case) and mac80211 + * won't execute the restart. * But make sure to cleanup interfaces that have gone down before/during * HW restart was requested. */ - if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || + test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, + &mvm->status)) ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); @@ -2931,7 +2938,8 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); } - iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band); + iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, + false); ret = iwl_mvm_update_sta(mvm, vif, sta); } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTHORIZED) { @@ -2947,7 +2955,8 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, /* enable beacon filtering */ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); - iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band); + iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, + true); ret = 0; } else if (old_state == IEEE80211_STA_AUTHORIZED && @@ -4413,10 +4422,6 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); } - if (!fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) - return; - /* if beacon filtering isn't on mac80211 does it anyway */ if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) return; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index b3987a0a70181..6b65ad6c9b56d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1685,7 +1685,7 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif) #endif /* CONFIG_IWLWIFI_DEBUGFS */ /* rate scaling */ -int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init); +int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync); void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg); int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate); void rs_update_last_rssi(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index cf48517944ecf..f2579c94ffdbc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -545,9 +545,8 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, } IWL_DEBUG_LAR(mvm, - "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n", - status, mcc, mcc >> 8, mcc & 0xff, - !!(status == MCC_RESP_NEW_CHAN_PROFILE), n_channels); + "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') n_chans: %d\n", + status, mcc, mcc >> 8, mcc & 0xff, n_channels); exit: iwl_free_resp(&cmd); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 30cfd7d50bc93..6b9c670fcef86 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -1239,7 +1239,11 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, !(info->flags & IEEE80211_TX_STAT_AMPDU)) return; - rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate); + if (rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, + &tx_resp_rate)) { + WARN_ON_ONCE(1); + return; + } #ifdef CONFIG_MAC80211_DEBUGFS /* Disable last tx check if we are debugging with fixed rate but @@ -1276,7 +1280,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, (unsigned long)(lq_sta->last_tx + (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) { IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n"); - iwl_mvm_rs_rate_init(mvm, sta, info->band); + iwl_mvm_rs_rate_init(mvm, sta, info->band, true); return; } lq_sta->last_tx = jiffies; @@ -1290,7 +1294,10 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, */ table = &lq_sta->lq; lq_hwrate = le32_to_cpu(table->rs_table[0]); - rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate); + if (rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate)) { + WARN_ON_ONCE(1); + return; + } /* Here we actually compare this rate to the latest LQ command */ if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) { @@ -1392,8 +1399,12 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, /* Collect data for each rate used during failed TX attempts */ for (i = 0; i <= retries; ++i) { lq_hwrate = le32_to_cpu(table->rs_table[i]); - rs_rate_from_ucode_rate(lq_hwrate, info->band, - &lq_rate); + if (rs_rate_from_ucode_rate(lq_hwrate, info->band, + &lq_rate)) { + WARN_ON_ONCE(1); + return; + } + /* * Only collect stats if retried rate is in the same RS * table as active/search. @@ -2859,9 +2870,8 @@ void rs_update_last_rssi(struct iwl_mvm *mvm, static void rs_initialize_lq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, - enum nl80211_band band) + enum nl80211_band band, bool update) { - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_scale_tbl_info *tbl; struct rs_rate *rate; u8 active_tbl = 0; @@ -2890,8 +2900,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm, rs_set_expected_tpt_table(lq_sta, tbl); rs_fill_lq_cmd(mvm, sta, lq_sta, rate); /* TODO restore station should remember the lq cmd */ - iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, - mvmsta->sta_state < IEEE80211_STA_AUTHORIZED); + iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, !update); } static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta, @@ -3144,7 +3153,7 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg) * Called after adding a new station to initialize rate scaling */ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - enum nl80211_band band) + enum nl80211_band band, bool update) { int i, j; struct ieee80211_hw *hw = mvm->hw; @@ -3224,7 +3233,7 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, #ifdef CONFIG_IWLWIFI_DEBUGFS iwl_mvm_reset_frame_stats(mvm); #endif - rs_initialize_lq(mvm, sta, lq_sta, band); + rs_initialize_lq(mvm, sta, lq_sta, band, update); } static void rs_drv_rate_update(void *mvm_r, @@ -3244,7 +3253,7 @@ static void rs_drv_rate_update(void *mvm_r, for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) ieee80211_stop_tx_ba_session(sta, tid); - iwl_mvm_rs_rate_init(mvm, sta, sband->band); + iwl_mvm_rs_rate_init(mvm, sta, sband->band, true); } #ifdef CONFIG_MAC80211_DEBUGFS @@ -3262,7 +3271,10 @@ static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm, for (i = 0; i < num_rates; i++) lq_cmd->rs_table[i] = ucode_rate_le32; - rs_rate_from_ucode_rate(ucode_rate, band, &rate); + if (rs_rate_from_ucode_rate(ucode_rate, band, &rate)) { + WARN_ON_ONCE(1); + return; + } if (is_mimo(&rate)) lq_cmd->mimo_delim = num_rates - 1; @@ -4098,12 +4110,12 @@ static const struct rate_control_ops rs_mvm_ops_drv = { }; void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - enum nl80211_band band) + enum nl80211_band band, bool update) { if (iwl_mvm_has_tlc_offload(mvm)) rs_fw_rate_init(mvm, sta, band); else - rs_drv_rate_init(mvm, sta, band); + rs_drv_rate_init(mvm, sta, band, update); } int iwl_mvm_rate_control_register(void) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h index d2cf484e2b73b..8e7f993e29116 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h @@ -420,7 +420,7 @@ struct iwl_lq_sta { /* Initialize station's rate scaling information after adding station */ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - enum nl80211_band band); + enum nl80211_band band, bool init); /* Notify RS about Tx status */ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index ff193dca2020c..2d21f0a1fa006 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -1405,6 +1405,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, while (!skb_queue_empty(&skbs)) { struct sk_buff *skb = __skb_dequeue(&skbs); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (void *)skb->data; bool flushed = false; skb_freed++; @@ -1449,11 +1450,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; info->flags &= ~IEEE80211_TX_CTL_AMPDU; - /* W/A FW bug: seq_ctl is wrong when the status isn't success */ - if (status != TX_STATUS_SUCCESS) { - struct ieee80211_hdr *hdr = (void *)skb->data; + /* W/A FW bug: seq_ctl is wrong upon failure / BAR frame */ + if (ieee80211_is_back_req(hdr->frame_control)) + seq_ctl = 0; + else if (status != TX_STATUS_SUCCESS) seq_ctl = le16_to_cpu(hdr->seq_ctrl); - } if (unlikely(!seq_ctl)) { struct ieee80211_hdr *hdr = (void *)skb->data; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index b002a7afb5f59..6a5349401aa99 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -900,20 +900,19 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, /** * iwl_mvm_send_lq_cmd() - Send link quality command - * @init: This command is sent as part of station initialization right - * after station has been added. + * @sync: This command can be sent synchronously. * * The link quality command is sent as the last step of station creation. * This is the special case in which init is set and we call a callback in * this case to clear the state indicating that station creation is in * progress. */ -int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init) +int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync) { struct iwl_host_cmd cmd = { .id = LQ_CMD, .len = { sizeof(struct iwl_lq_cmd), }, - .flags = init ? 0 : CMD_ASYNC, + .flags = sync ? 0 : CMD_ASYNC, .data = { lq, }, }; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index b150da4c6721e..5d65500a8aa75 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -518,6 +518,56 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)}, /* 9000 Series */ + {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0230, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)}, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index d017aa2a0a8bd..d4a31e014c820 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1144,6 +1144,14 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) kfree(trans_pcie->rxq); } +static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq, + struct iwl_rb_allocator *rba) +{ + spin_lock(&rba->lock); + list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); + spin_unlock(&rba->lock); +} + /* * iwl_pcie_rx_reuse_rbd - Recycle used RBDs * @@ -1175,9 +1183,7 @@ static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { /* Move the 2 RBDs to the allocator ownership. Allocator has another 6 from pool for the request completion*/ - spin_lock(&rba->lock); - list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); - spin_unlock(&rba->lock); + iwl_pcie_rx_move_to_allocator(rxq, rba); atomic_inc(&rba->req_pending); queue_work(rba->alloc_wq, &rba->rx_alloc); @@ -1396,10 +1402,18 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r); while (i != r) { + struct iwl_rb_allocator *rba = &trans_pcie->rba; struct iwl_rx_mem_buffer *rxb; - - if (unlikely(rxq->used_count == rxq->queue_size / 2)) + /* number of RBDs still waiting for page allocation */ + u32 rb_pending_alloc = + atomic_read(&trans_pcie->rba.req_pending) * + RX_CLAIM_REQ_ALLOC; + + if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 && + !emergency)) { + iwl_pcie_rx_move_to_allocator(rxq, rba); emergency = true; + } rxb = iwl_pcie_get_rxb(trans, rxq, i); if (!rxb) @@ -1421,17 +1435,13 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) iwl_pcie_rx_allocator_get(trans, rxq); if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) { - struct iwl_rb_allocator *rba = &trans_pcie->rba; - /* Add the remaining empty RBDs for allocator use */ - spin_lock(&rba->lock); - list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); - spin_unlock(&rba->lock); + iwl_pcie_rx_move_to_allocator(rxq, rba); } else if (emergency) { count++; if (count == 8) { count = 0; - if (rxq->used_count < rxq->queue_size / 3) + if (rb_pending_alloc < rxq->queue_size / 3) emergency = false; rxq->read = i; diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 07442ada6dd0e..4ca6592f5b3a6 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2889,6 +2889,10 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + tasklet_hrtimer_init(&data->beacon_timer, + mac80211_hwsim_beacon, + CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + err = ieee80211_register_hw(hw); if (err < 0) { pr_debug("mac80211_hwsim: ieee80211_register_hw failed (%d)\n", @@ -2913,10 +2917,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, data->debugfs, data, &hwsim_simulate_radar); - tasklet_hrtimer_init(&data->beacon_timer, - mac80211_hwsim_beacon, - CLOCK_MONOTONIC, HRTIMER_MODE_ABS); - spin_lock_bh(&hwsim_radio_lock); err = rhashtable_insert_fast(&hwsim_radios_rht, &data->rht, hwsim_rht_params); @@ -3712,16 +3712,16 @@ static int __init init_mac80211_hwsim(void) if (err) goto out_unregister_pernet; + err = hwsim_init_netlink(); + if (err) + goto out_unregister_driver; + hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim"); if (IS_ERR(hwsim_class)) { err = PTR_ERR(hwsim_class); - goto out_unregister_driver; + goto out_exit_netlink; } - err = hwsim_init_netlink(); - if (err < 0) - goto out_unregister_driver; - for (i = 0; i < radios; i++) { struct hwsim_new_radio_params param = { 0 }; @@ -3827,6 +3827,8 @@ static int __init init_mac80211_hwsim(void) free_netdev(hwsim_mon); out_free_radios: mac80211_hwsim_free(); +out_exit_netlink: + hwsim_exit_netlink(); out_unregister_driver: platform_driver_unregister(&mac80211_hwsim_driver); out_unregister_pernet: diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c index c67a8e7be3106..3dbfce972c56b 100644 --- a/drivers/net/wireless/marvell/libertas/if_usb.c +++ b/drivers/net/wireless/marvell/libertas/if_usb.c @@ -456,8 +456,6 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp, MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, cardp); - cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET; - lbs_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n", cardp->rx_urb); if ((ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC))) { lbs_deb_usbd(&cardp->udev->dev, "Submit Rx URB failed: %d\n", ret); diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c index e92fc50011717..789337ea676ac 100644 --- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c +++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c @@ -605,9 +605,10 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff, { unsigned long flags; - if (recvlength > LBS_CMD_BUFFER_SIZE) { + if (recvlength < MESSAGE_HEADER_LEN || + recvlength > LBS_CMD_BUFFER_SIZE) { lbtf_deb_usbd(&cardp->udev->dev, - "The receive buffer is too large\n"); + "The receive buffer is invalid: %d\n", recvlength); kfree_skb(skb); return; } diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c index e2addd8b878b2..5d75c971004b4 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n.c +++ b/drivers/net/wireless/marvell/mwifiex/11n.c @@ -696,11 +696,10 @@ void mwifiex_11n_delba(struct mwifiex_private *priv, int tid) "Send delba to tid=%d, %pM\n", tid, rx_reor_tbl_ptr->ta); mwifiex_send_delba(priv, tid, rx_reor_tbl_ptr->ta, 0); - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, - flags); - return; + goto exit; } } +exit: spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); } diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c index 8e63d14c1e1c5..5380fba652cc4 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c +++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c @@ -103,8 +103,6 @@ static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload) * There could be holes in the buffer, which are skipped by the function. * Since the buffer is linear, the function uses rotation to simulate * circular buffer. - * - * The caller must hold rx_reorder_tbl_lock spinlock. */ static void mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, @@ -113,21 +111,25 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, { int pkt_to_send, i; void *rx_tmp_ptr; + unsigned long flags; pkt_to_send = (start_win > tbl->start_win) ? min((start_win - tbl->start_win), tbl->win_size) : tbl->win_size; for (i = 0; i < pkt_to_send; ++i) { + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); rx_tmp_ptr = NULL; if (tbl->rx_reorder_ptr[i]) { rx_tmp_ptr = tbl->rx_reorder_ptr[i]; tbl->rx_reorder_ptr[i] = NULL; } + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); if (rx_tmp_ptr) mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr); } + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); /* * We don't have a circular buffer, hence use rotation to simulate * circular buffer @@ -138,6 +140,7 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, } tbl->start_win = start_win; + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); } /* @@ -147,8 +150,6 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, * The start window is adjusted automatically when a hole is located. * Since the buffer is linear, the function uses rotation to simulate * circular buffer. - * - * The caller must hold rx_reorder_tbl_lock spinlock. */ static void mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, @@ -156,15 +157,22 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, { int i, j, xchg; void *rx_tmp_ptr; + unsigned long flags; for (i = 0; i < tbl->win_size; ++i) { - if (!tbl->rx_reorder_ptr[i]) + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); + if (!tbl->rx_reorder_ptr[i]) { + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, + flags); break; + } rx_tmp_ptr = tbl->rx_reorder_ptr[i]; tbl->rx_reorder_ptr[i] = NULL; + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr); } + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); /* * We don't have a circular buffer, hence use rotation to simulate * circular buffer @@ -177,6 +185,7 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, } } tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1); + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); } /* @@ -184,8 +193,6 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, * * The function stops the associated timer and dispatches all the * pending packets in the Rx reorder table before deletion. - * - * The caller must hold rx_reorder_tbl_lock spinlock. */ static void mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv, @@ -211,7 +218,11 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv, del_timer_sync(&tbl->timer_context.timer); tbl->timer_context.timer_is_set = false; + + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); list_del(&tbl->list); + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); + kfree(tbl->rx_reorder_ptr); kfree(tbl); @@ -224,17 +235,22 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv, /* * This function returns the pointer to an entry in Rx reordering * table which matches the given TA/TID pair. - * - * The caller must hold rx_reorder_tbl_lock spinlock. */ struct mwifiex_rx_reorder_tbl * mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta) { struct mwifiex_rx_reorder_tbl *tbl; + unsigned long flags; - list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) - if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); + list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) { + if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) { + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, + flags); return tbl; + } + } + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); return NULL; } @@ -251,9 +267,14 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta) return; spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); - list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) - if (!memcmp(tbl->ta, ta, ETH_ALEN)) + list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) { + if (!memcmp(tbl->ta, ta, ETH_ALEN)) { + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, + flags); mwifiex_del_rx_reorder_entry(priv, tbl); + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); + } + } spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); return; @@ -262,18 +283,24 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta) /* * This function finds the last sequence number used in the packets * buffered in Rx reordering table. - * - * The caller must hold rx_reorder_tbl_lock spinlock. */ static int mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx) { struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr; + struct mwifiex_private *priv = ctx->priv; + unsigned long flags; int i; - for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) - if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); + for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) { + if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) { + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, + flags); return i; + } + } + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); return -1; } @@ -291,22 +318,17 @@ mwifiex_flush_data(struct timer_list *t) struct reorder_tmr_cnxt *ctx = from_timer(ctx, t, timer); int start_win, seq_num; - unsigned long flags; ctx->timer_is_set = false; - spin_lock_irqsave(&ctx->priv->rx_reorder_tbl_lock, flags); seq_num = mwifiex_11n_find_last_seq_num(ctx); - if (seq_num < 0) { - spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags); + if (seq_num < 0) return; - } mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num); start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1); mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr, start_win); - spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags); } /* @@ -333,14 +355,11 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta, * If we get a TID, ta pair which is already present dispatch all the * the packets and move the window size until the ssn */ - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta); if (tbl) { mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num); - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); return; } - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); /* if !tbl then create one */ new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL); if (!new_node) @@ -551,20 +570,16 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv, int prev_start_win, start_win, end_win, win_size; u16 pkt_index; bool init_window_shift = false; - unsigned long flags; int ret = 0; - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta); if (!tbl) { - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); if (pkt_type != PKT_TYPE_BAR) mwifiex_11n_dispatch_pkt(priv, payload); return ret; } if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) { - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); mwifiex_11n_dispatch_pkt(priv, payload); return ret; } @@ -651,8 +666,6 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv, if (!tbl->timer_context.timer_is_set || prev_start_win != tbl->start_win) mwifiex_11n_rxreorder_timer_restart(tbl); - - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); return ret; } @@ -681,18 +694,14 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac, peer_mac, tid, initiator); if (cleanup_rx_reorder_tbl) { - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, peer_mac); if (!tbl) { - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, - flags); mwifiex_dbg(priv->adapter, EVENT, "event: TID, TA not found in table\n"); return; } mwifiex_del_rx_reorder_entry(priv, tbl); - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); } else { ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac); if (!ptx_tbl) { @@ -726,7 +735,6 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv, int tid, win_size; struct mwifiex_rx_reorder_tbl *tbl; uint16_t block_ack_param_set; - unsigned long flags; block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set); @@ -740,20 +748,17 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv, mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n", add_ba_rsp->peer_mac_addr, tid); - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, add_ba_rsp->peer_mac_addr); if (tbl) mwifiex_del_rx_reorder_entry(priv, tbl); - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); return 0; } win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> BLOCKACKPARAM_WINSIZE_POS; - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, add_ba_rsp->peer_mac_addr); if (tbl) { @@ -764,7 +769,6 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv, else tbl->amsdu = false; } - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); mwifiex_dbg(priv->adapter, CMD, "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n", @@ -804,8 +808,11 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv) spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); list_for_each_entry_safe(del_tbl_ptr, tmp_node, - &priv->rx_reorder_tbl_ptr, list) + &priv->rx_reorder_tbl_ptr, list) { + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr); + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); + } INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr); spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); @@ -929,7 +936,6 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv, int tlv_buf_left = len; int ret; u8 *tmp; - unsigned long flags; mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:", event_buf, len); @@ -949,18 +955,14 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv, tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num, tlv_bitmap_len); - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); rx_reor_tbl_ptr = mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid, tlv_rxba->mac); if (!rx_reor_tbl_ptr) { - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, - flags); mwifiex_dbg(priv->adapter, ERROR, "Can not find rx_reorder_tbl!"); return; } - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); for (i = 0; i < tlv_bitmap_len; i++) { for (j = 0 ; j < 8; j++) { diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c index a83c5afc256ab..5ce85d5727e4b 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c @@ -421,15 +421,12 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv, spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); } - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); if (!priv->ap_11n_enabled || (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) && (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) { ret = mwifiex_handle_uap_rx_forward(priv, skb); - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); return ret; } - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); /* Reorder and send to kernel */ pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type); diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig index b6c5f17dca30a..27826217ff762 100644 --- a/drivers/net/wireless/mediatek/mt76/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/Kconfig @@ -1,6 +1,12 @@ config MT76_CORE tristate +config MT76_LEDS + bool + depends on MT76_CORE + depends on LEDS_CLASS=y || MT76_CORE=LEDS_CLASS + default y + config MT76_USB tristate depends on MT76_CORE diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 029d54bce9e81..ade4a2029a24a 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -342,9 +342,11 @@ int mt76_register_device(struct mt76_dev *dev, bool vht, mt76_check_sband(dev, NL80211_BAND_2GHZ); mt76_check_sband(dev, NL80211_BAND_5GHZ); - ret = mt76_led_init(dev); - if (ret) - return ret; + if (IS_ENABLED(CONFIG_MT76_LEDS)) { + ret = mt76_led_init(dev); + if (ret) + return ret; + } return ieee80211_register_hw(hw); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c index 7cdb3e740522b..0a3e046d78db3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c @@ -681,6 +681,7 @@ int mt76x0_register_device(struct mt76x0_dev *dev) ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES); ieee80211_hw_set(hw, AMPDU_AGGREGATION); ieee80211_hw_set(hw, SUPPORTS_RC_TABLE); + ieee80211_hw_set(hw, MFP_CAPABLE); hw->max_rates = 1; hw->max_report_rates = 7; hw->max_rate_tries = 1; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c index 5da7bfbe907ff..14e8c575f6c3e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c @@ -757,10 +757,10 @@ __mt76x0_phy_set_channel(struct mt76x0_dev *dev, /* Vendor driver don't do it */ /* mt76x0_phy_set_tx_power(dev, channel, rf_bw_band); */ + mt76x0_vco_cal(dev, channel); if (scan) - mt76x0_vco_cal(dev, channel); + mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1); - mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1); mt76x0_phy_set_chan_pwr(dev, channel); dev->mt76.chandef = *chandef; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c index b814391f79ac8..03b103c45d69b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c @@ -581,8 +581,10 @@ int mt76x2_register_device(struct mt76x2_dev *dev) mt76x2_dfs_init_detector(dev); /* init led callbacks */ - dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness; - dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink; + if (IS_ENABLED(CONFIG_MT76_LEDS)) { + dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness; + dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink; + } ret = mt76_register_device(&dev->mt76, true, mt76x2_rates, ARRAY_SIZE(mt76x2_rates)); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c index 23cf437d14f96..1a49d1be042db 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c @@ -128,8 +128,7 @@ __mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 bcn_idx, struct sk_buff *skb) if (skb) { ret = mt76_write_beacon(dev, beacon_addr, skb); if (!ret) - dev->beacon_data_mask |= BIT(bcn_idx) & - dev->beacon_mask; + dev->beacon_data_mask |= BIT(bcn_idx); } else { dev->beacon_data_mask &= ~BIT(bcn_idx); for (i = 0; i < beacon_len; i += 4) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c index 1428cfdee5795..9594433234cc3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c @@ -107,16 +107,24 @@ static int __maybe_unused mt76x2u_resume(struct usb_interface *intf) mt76u_mcu_complete_urb, &usb->mcu.cmpl); if (err < 0) - return err; + goto err; err = mt76u_submit_rx_buffers(&dev->mt76); if (err < 0) - return err; + goto err; tasklet_enable(&usb->rx_tasklet); tasklet_enable(&usb->tx_tasklet); - return mt76x2u_init_hardware(dev); + err = mt76x2u_init_hardware(dev); + if (err < 0) + goto err; + + return 0; + +err: + mt76x2u_cleanup(dev); + return err; } MODULE_DEVICE_TABLE(usb, mt76x2u_device_table); diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index af48d43bb7dca..20447fdce4c33 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -385,7 +385,12 @@ void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { struct ieee80211_txq *txq = sta->txq[i]; - struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; + struct mt76_txq *mtxq; + + if (!txq) + continue; + + mtxq = (struct mt76_txq *)txq->drv_priv; spin_lock_bh(&mtxq->hwq->lock); mtxq->send_bar = mtxq->aggr && send_bar; diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index f4122c8fdd977..ef9b502ce576b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -2289,6 +2289,7 @@ void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, struct sk_buff *skb) if (rtl_c2h_fast_cmd(hw, skb)) { rtl_c2h_content_parsing(hw, skb); + kfree_skb(skb); return; } diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index c0a163e404029..f360690396dd0 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -266,15 +266,17 @@ static void rsi_rx_done_handler(struct urb *urb) if (urb->status) goto out; - if (urb->actual_length <= 0) { - rsi_dbg(INFO_ZONE, "%s: Zero length packet\n", __func__); + if (urb->actual_length <= 0 || + urb->actual_length > rx_cb->rx_skb->len) { + rsi_dbg(INFO_ZONE, "%s: Invalid packet length = %d\n", + __func__, urb->actual_length); goto out; } if (skb_queue_len(&dev->rx_q) >= RSI_MAX_RX_PKTS) { rsi_dbg(INFO_ZONE, "Max RX packets reached\n"); goto out; } - skb_put(rx_cb->rx_skb, urb->actual_length); + skb_trim(rx_cb->rx_skb, urb->actual_length); skb_queue_tail(&dev->rx_q, rx_cb->rx_skb); rsi_set_event(&dev->rx_thread.event); @@ -308,6 +310,7 @@ static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num) if (!skb) return -ENOMEM; skb_reserve(skb, MAX_DWORD_ALIGN_BYTES); + skb_put(skb, RSI_MAX_RX_USB_PKT_SIZE - MAX_DWORD_ALIGN_BYTES); dword_align_bytes = (unsigned long)skb->data & 0x3f; if (dword_align_bytes > 0) skb_push(skb, dword_align_bytes); @@ -319,7 +322,7 @@ static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num) usb_rcvbulkpipe(dev->usbdev, dev->bulkin_endpoint_addr[ep_num - 1]), urb->transfer_buffer, - RSI_MAX_RX_USB_PKT_SIZE, + skb->len, rsi_rx_done_handler, rx_cb); diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 89b0d0fade9f2..19e3c5a0b715f 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -957,6 +957,8 @@ static void wl1271_recovery_work(struct work_struct *work) BUG_ON(wl->conf.recovery.bug_on_recovery && !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)); + clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags); + if (wl->conf.recovery.no_recovery) { wl1271_info("No recovery (chosen on module load). Fw will remain stuck."); goto out_unlock; @@ -6710,6 +6712,7 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev) int ret; unsigned long start_time = jiffies; bool pending = false; + bool recovery = false; /* Nothing to do if no ELP mode requested */ if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) @@ -6726,7 +6729,7 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev) ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP); if (ret < 0) { - wl12xx_queue_recovery_work(wl); + recovery = true; goto err; } @@ -6734,11 +6737,12 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev) ret = wait_for_completion_timeout(&compl, msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT)); if (ret == 0) { - wl1271_error("ELP wakeup timeout!"); - wl12xx_queue_recovery_work(wl); + wl1271_warning("ELP wakeup timeout!"); /* Return no error for runtime PM for recovery */ - return 0; + ret = 0; + recovery = true; + goto err; } } @@ -6753,6 +6757,12 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev) spin_lock_irqsave(&wl->wl_lock, flags); wl->elp_compl = NULL; spin_unlock_irqrestore(&wl->wl_lock, flags); + + if (recovery) { + set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags); + wl12xx_queue_recovery_work(wl); + } + return ret; } diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index f17f602e61712..5b97cc946d70a 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -905,7 +905,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue, if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) { unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; - BUG_ON(pull_to <= skb_headlen(skb)); + BUG_ON(pull_to < skb_headlen(skb)); __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); } if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) { diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c index 91162f8e0366c..9a22056e8d9ee 100644 --- a/drivers/nfc/nfcmrvl/uart.c +++ b/drivers/nfc/nfcmrvl/uart.c @@ -73,10 +73,9 @@ static int nfcmrvl_uart_parse_dt(struct device_node *node, struct device_node *matched_node; int ret; - matched_node = of_find_compatible_node(node, NULL, "marvell,nfc-uart"); + matched_node = of_get_compatible_child(node, "marvell,nfc-uart"); if (!matched_node) { - matched_node = of_find_compatible_node(node, NULL, - "mrvl,nfc-uart"); + matched_node = of_get_compatible_child(node, "mrvl,nfc-uart"); if (!matched_node) return -ENODEV; } diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 8aae6dcc839fe..9148015ed8036 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -488,6 +488,8 @@ static void nd_async_device_register(void *d, async_cookie_t cookie) put_device(dev); } put_device(dev); + if (dev->parent) + put_device(dev->parent); } static void nd_async_device_unregister(void *d, async_cookie_t cookie) @@ -507,6 +509,8 @@ void __nd_device_register(struct device *dev) if (!dev) return; dev->bus = &nvdimm_bus_type; + if (dev->parent) + get_device(dev->parent); get_device(dev); async_schedule_domain(nd_async_device_register, dev, &nd_async_domain); diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h index ac68072fb8cd6..5ff254dc9b14f 100644 --- a/drivers/nvdimm/nd-core.h +++ b/drivers/nvdimm/nd-core.h @@ -112,6 +112,8 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, struct nd_mapping *nd_mapping, resource_size_t *overlap); resource_size_t nd_blk_available_dpa(struct nd_region *nd_region); resource_size_t nd_region_available_dpa(struct nd_region *nd_region); +int nd_region_conflict(struct nd_region *nd_region, resource_size_t start, + resource_size_t size); resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, struct nd_label_id *label_id); int alias_dpa_busy(struct device *dev, void *data); diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 3f7ad5bc443ee..7fe84bfe08785 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -590,14 +590,47 @@ static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys) ALIGN_DOWN(phys, nd_pfn->align)); } +/* + * Check if pmem collides with 'System RAM', or other regions when + * section aligned. Trim it accordingly. + */ +static void trim_pfn_device(struct nd_pfn *nd_pfn, u32 *start_pad, u32 *end_trunc) +{ + struct nd_namespace_common *ndns = nd_pfn->ndns; + struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); + struct nd_region *nd_region = to_nd_region(nd_pfn->dev.parent); + const resource_size_t start = nsio->res.start; + const resource_size_t end = start + resource_size(&nsio->res); + resource_size_t adjust, size; + + *start_pad = 0; + *end_trunc = 0; + + adjust = start - PHYS_SECTION_ALIGN_DOWN(start); + size = resource_size(&nsio->res) + adjust; + if (region_intersects(start - adjust, size, IORESOURCE_SYSTEM_RAM, + IORES_DESC_NONE) == REGION_MIXED + || nd_region_conflict(nd_region, start - adjust, size)) + *start_pad = PHYS_SECTION_ALIGN_UP(start) - start; + + /* Now check that end of the range does not collide. */ + adjust = PHYS_SECTION_ALIGN_UP(end) - end; + size = resource_size(&nsio->res) + adjust; + if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM, + IORES_DESC_NONE) == REGION_MIXED + || !IS_ALIGNED(end, nd_pfn->align) + || nd_region_conflict(nd_region, start, size + adjust)) + *end_trunc = end - phys_pmem_align_down(nd_pfn, end); +} + static int nd_pfn_init(struct nd_pfn *nd_pfn) { u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0; struct nd_namespace_common *ndns = nd_pfn->ndns; - u32 start_pad = 0, end_trunc = 0; + struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); resource_size_t start, size; - struct nd_namespace_io *nsio; struct nd_region *nd_region; + u32 start_pad, end_trunc; struct nd_pfn_sb *pfn_sb; unsigned long npfns; phys_addr_t offset; @@ -629,30 +662,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) memset(pfn_sb, 0, sizeof(*pfn_sb)); - /* - * Check if pmem collides with 'System RAM' when section aligned and - * trim it accordingly - */ - nsio = to_nd_namespace_io(&ndns->dev); - start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start); - size = resource_size(&nsio->res); - if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM, - IORES_DESC_NONE) == REGION_MIXED) { - start = nsio->res.start; - start_pad = PHYS_SECTION_ALIGN_UP(start) - start; - } - - start = nsio->res.start; - size = PHYS_SECTION_ALIGN_UP(start + size) - start; - if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM, - IORES_DESC_NONE) == REGION_MIXED - || !IS_ALIGNED(start + resource_size(&nsio->res), - nd_pfn->align)) { - size = resource_size(&nsio->res); - end_trunc = start + size - phys_pmem_align_down(nd_pfn, - start + size); - } - + trim_pfn_device(nd_pfn, &start_pad, &end_trunc); if (start_pad + end_trunc) dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n", dev_name(&ndns->dev), start_pad + end_trunc); @@ -663,7 +673,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) * implementation will limit the pfns advertised through * ->direct_access() to those that are included in the memmap. */ - start += start_pad; + start = nsio->res.start + start_pad; size = resource_size(&nsio->res); npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K) / PAGE_SIZE); diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 6071e2942053c..1d432c5ed2753 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -309,8 +309,11 @@ static void pmem_release_queue(void *q) blk_cleanup_queue(q); } -static void pmem_freeze_queue(void *q) +static void pmem_freeze_queue(struct percpu_ref *ref) { + struct request_queue *q; + + q = container_of(ref, typeof(*q), q_usage_counter); blk_freeze_queue_start(q); } @@ -402,6 +405,7 @@ static int pmem_attach_disk(struct device *dev, pmem->pfn_flags = PFN_DEV; pmem->pgmap.ref = &q->q_usage_counter; + pmem->pgmap.kill = pmem_freeze_queue; if (is_nd_pfn(dev)) { if (setup_pagemap_fsdax(dev, &pmem->pgmap)) return -ENOMEM; @@ -421,16 +425,11 @@ static int pmem_attach_disk(struct device *dev, addr = devm_memremap_pages(dev, &pmem->pgmap); pmem->pfn_flags |= PFN_MAP; memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res)); - } else + } else { addr = devm_memremap(dev, pmem->phys_addr, pmem->size, ARCH_MEMREMAP_PMEM); - - /* - * At release time the queue must be frozen before - * devm_memremap_pages is unwound - */ - if (devm_add_action_or_reset(dev, pmem_freeze_queue, q)) - return -ENOMEM; + memcpy(&bb_res, &nsio->res, sizeof(bb_res)); + } if (IS_ERR(addr)) return PTR_ERR(addr); diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index fa37afcd43ff8..e7377f1028ef6 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -560,10 +560,17 @@ static ssize_t region_badblocks_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nd_region *nd_region = to_nd_region(dev); + ssize_t rc; - return badblocks_show(&nd_region->bb, buf, 0); -} + device_lock(dev); + if (dev->driver) + rc = badblocks_show(&nd_region->bb, buf, 0); + else + rc = -ENXIO; + device_unlock(dev); + return rc; +} static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL); static ssize_t resource_show(struct device *dev, @@ -1177,6 +1184,47 @@ int nvdimm_has_cache(struct nd_region *nd_region) } EXPORT_SYMBOL_GPL(nvdimm_has_cache); +struct conflict_context { + struct nd_region *nd_region; + resource_size_t start, size; +}; + +static int region_conflict(struct device *dev, void *data) +{ + struct nd_region *nd_region; + struct conflict_context *ctx = data; + resource_size_t res_end, region_end, region_start; + + if (!is_memory(dev)) + return 0; + + nd_region = to_nd_region(dev); + if (nd_region == ctx->nd_region) + return 0; + + res_end = ctx->start + ctx->size; + region_start = nd_region->ndr_start; + region_end = region_start + nd_region->ndr_size; + if (ctx->start >= region_start && ctx->start < region_end) + return -EBUSY; + if (res_end > region_start && res_end <= region_end) + return -EBUSY; + return 0; +} + +int nd_region_conflict(struct nd_region *nd_region, resource_size_t start, + resource_size_t size) +{ + struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); + struct conflict_context ctx = { + .nd_region = nd_region, + .start = start, + .size = size, + }; + + return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict); +} + void __exit nd_region_devs_exit(void) { ida_destroy(®ion_ida); diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index 88a8b5916624a..a0027cebf2db6 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -5,6 +5,7 @@ config BLK_DEV_NVME tristate "NVM Express block device" depends on PCI && BLOCK select NVME_CORE + select RPMB ---help--- The NVM Express driver is for solid state drives directly connected to the PCI or PCI Express bus. If you know you diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile index aea459c65ae1b..99f99e87b82ba 100644 --- a/drivers/nvme/host/Makefile +++ b/drivers/nvme/host/Makefile @@ -15,6 +15,7 @@ nvme-core-$(CONFIG_NVM) += lightnvm.o nvme-core-$(CONFIG_FAULT_INJECTION_DEBUG_FS) += fault_inject.o nvme-y += pci.o +nvme-y += rpmb.o nvme-fabrics-y += fabrics.o diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 6bb9908bf46f1..d068d130c0930 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -11,7 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ - #include #include #include @@ -831,6 +830,8 @@ static int nvme_submit_user_cmd(struct request_queue *q, static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) { struct nvme_ctrl *ctrl = rq->end_io_data; + unsigned long flags; + bool startka = false; blk_mq_free_request(rq); @@ -841,7 +842,13 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) return; } - schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); + spin_lock_irqsave(&ctrl->lock, flags); + if (ctrl->state == NVME_CTRL_LIVE || + ctrl->state == NVME_CTRL_CONNECTING) + startka = true; + spin_unlock_irqrestore(&ctrl->lock, flags); + if (startka) + schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); } static int nvme_keep_alive(struct nvme_ctrl *ctrl) @@ -1519,8 +1526,10 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) if (ns->ndev) nvme_nvm_update_nvm_info(ns); #ifdef CONFIG_NVME_MULTIPATH - if (ns->head->disk) + if (ns->head->disk) { nvme_update_disk_info(ns->head->disk, ns, id); + blk_queue_stack_limits(ns->head->disk->queue, ns->queue); + } #endif } @@ -1659,25 +1668,57 @@ static const struct pr_ops nvme_pr_ops = { .pr_clear = nvme_pr_clear, }; -#ifdef CONFIG_BLK_SED_OPAL -int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, - bool send) +int nvme_sec_send(struct nvme_ctrl *ctrl, u8 nssf, u16 spsp, u8 secp, + void *buffer, size_t len) { - struct nvme_ctrl *ctrl = data; struct nvme_command cmd; + dev_dbg(ctrl->device, "%s target = %hhu SPSP = %hu SECP = %hhX len=%zd\n", + __func__, nssf, spsp, secp, len); + memset(&cmd, 0, sizeof(cmd)); - if (send) - cmd.common.opcode = nvme_admin_security_send; - else - cmd.common.opcode = nvme_admin_security_recv; + cmd.common.opcode = nvme_admin_security_send; + cmd.common.nsid = 0; + cmd.common.cdw10[0] = + cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8 | nssf); + cmd.common.cdw10[1] = cpu_to_le32(len); + + return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, + ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0); +} +EXPORT_SYMBOL_GPL(nvme_sec_send); + +int nvme_sec_recv(struct nvme_ctrl *ctrl, u8 nssf, u16 spsp, u8 secp, + void *buffer, size_t len) +{ + struct nvme_command cmd; + + dev_dbg(ctrl->device, "%s target = %hhu SPSP = %hu SECP = %hhX len=%zd\n", + __func__, nssf, spsp, secp, len); + + memset(&cmd, 0, sizeof(cmd)); + cmd.common.opcode = nvme_admin_security_recv; cmd.common.nsid = 0; - cmd.common.cdw10[0] = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8); + cmd.common.cdw10[0] = + cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8 | nssf); cmd.common.cdw10[1] = cpu_to_le32(len); return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0); } +EXPORT_SYMBOL_GPL(nvme_sec_recv); + +#ifdef CONFIG_BLK_SED_OPAL +int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, + bool send) +{ + struct nvme_ctrl *ctrl = data; + + if (send) + return nvme_sec_send(ctrl, 0, spsp, secp, buffer, len); + else + return nvme_sec_recv(ctrl, 0, spsp, secp, buffer, len); +} EXPORT_SYMBOL_GPL(nvme_sec_submit); #endif /* CONFIG_BLK_SED_OPAL */ @@ -2468,7 +2509,10 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); } + ctrl->rpmbs = le32_to_cpu(id->rpmbs); + ret = nvme_mpath_init(ctrl, id); + kfree(id); if (ret < 0) @@ -3306,6 +3350,9 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl) struct nvme_ns *ns, *next; LIST_HEAD(ns_list); + /* prevent racing with ns scanning */ + flush_work(&ctrl->scan_work); + /* * The dead states indicates the controller was not gracefully * disconnected. In that case, we won't be able to flush any data while @@ -3461,7 +3508,6 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl) nvme_mpath_stop(ctrl); nvme_stop_keep_alive(ctrl); flush_work(&ctrl->async_event_work); - flush_work(&ctrl->scan_work); cancel_work_sync(&ctrl->fw_act_work); if (ctrl->ops->stop_ctrl) ctrl->ops->stop_ctrl(ctrl); diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 206d63cb1afc8..bcd09d3a44dad 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -552,8 +552,11 @@ blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl, ctrl->state != NVME_CTRL_DEAD && !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) return BLK_STS_RESOURCE; - nvme_req(rq)->status = NVME_SC_ABORT_REQ; - return BLK_STS_IOERR; + + nvme_req(rq)->status = NVME_SC_HOST_PATH_ERROR; + blk_mq_start_request(rq); + nvme_complete_rq(rq); + return BLK_STS_OK; } EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command); diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 611e70cae7544..9375fa705d829 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -144,6 +144,7 @@ struct nvme_fc_ctrl { bool ioq_live; bool assoc_active; + atomic_t err_work_active; u64 association_id; struct list_head ctrl_list; /* rport->ctrl_list */ @@ -152,6 +153,7 @@ struct nvme_fc_ctrl { struct blk_mq_tag_set tag_set; struct delayed_work connect_work; + struct work_struct err_work; struct kref ref; u32 flags; @@ -1523,6 +1525,10 @@ nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl) struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; int i; + /* ensure we've initialized the ops once */ + if (!(aen_op->flags & FCOP_FLAGS_AEN)) + return; + for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) __nvme_fc_abort_op(ctrl, aen_op); } @@ -2036,7 +2042,25 @@ nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl) static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) { - /* only proceed if in LIVE state - e.g. on first error */ + int active; + + /* + * if an error (io timeout, etc) while (re)connecting, + * it's an error on creating the new association. + * Start the error recovery thread if it hasn't already + * been started. It is expected there could be multiple + * ios hitting this path before things are cleaned up. + */ + if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) { + active = atomic_xchg(&ctrl->err_work_active, 1); + if (!active && !schedule_work(&ctrl->err_work)) { + atomic_set(&ctrl->err_work_active, 0); + WARN_ON(1); + } + return; + } + + /* Otherwise, only proceed if in LIVE state - e.g. on first error */ if (ctrl->ctrl.state != NVME_CTRL_LIVE) return; @@ -2802,6 +2826,7 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl) { struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); + cancel_work_sync(&ctrl->err_work); cancel_delayed_work_sync(&ctrl->connect_work); /* * kill the association on the link side. this will block @@ -2854,23 +2879,30 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) } static void -nvme_fc_reset_ctrl_work(struct work_struct *work) +__nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl) { - struct nvme_fc_ctrl *ctrl = - container_of(work, struct nvme_fc_ctrl, ctrl.reset_work); - int ret; - - nvme_stop_ctrl(&ctrl->ctrl); + nvme_stop_keep_alive(&ctrl->ctrl); /* will block will waiting for io to terminate */ nvme_fc_delete_association(ctrl); - if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { + if (ctrl->ctrl.state != NVME_CTRL_CONNECTING && + !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) dev_err(ctrl->ctrl.device, "NVME-FC{%d}: error_recovery: Couldn't change state " "to CONNECTING\n", ctrl->cnum); - return; - } +} + +static void +nvme_fc_reset_ctrl_work(struct work_struct *work) +{ + struct nvme_fc_ctrl *ctrl = + container_of(work, struct nvme_fc_ctrl, ctrl.reset_work); + int ret; + + __nvme_fc_terminate_io(ctrl); + + nvme_stop_ctrl(&ctrl->ctrl); if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) ret = nvme_fc_create_association(ctrl); @@ -2885,6 +2917,24 @@ nvme_fc_reset_ctrl_work(struct work_struct *work) ctrl->cnum); } +static void +nvme_fc_connect_err_work(struct work_struct *work) +{ + struct nvme_fc_ctrl *ctrl = + container_of(work, struct nvme_fc_ctrl, err_work); + + __nvme_fc_terminate_io(ctrl); + + atomic_set(&ctrl->err_work_active, 0); + + /* + * Rescheduling the connection after recovering + * from the io error is left to the reconnect work + * item, which is what should have stalled waiting on + * the io that had the error that scheduled this work. + */ +} + static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { .name = "fc", .module = THIS_MODULE, @@ -2995,6 +3045,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, ctrl->cnum = idx; ctrl->ioq_live = false; ctrl->assoc_active = false; + atomic_set(&ctrl->err_work_active, 0); init_waitqueue_head(&ctrl->ioabort_wait); get_device(ctrl->dev); @@ -3002,6 +3053,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work); INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); + INIT_WORK(&ctrl->err_work, nvme_fc_connect_err_work); spin_lock_init(&ctrl->lock); /* io queue count */ @@ -3092,6 +3144,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, fail_ctrl: nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING); cancel_work_sync(&ctrl->ctrl.reset_work); + cancel_work_sync(&ctrl->err_work); cancel_delayed_work_sync(&ctrl->connect_work); ctrl->ctrl.opts = NULL; diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 9fe3fff818b8a..c27af277e14ee 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -77,6 +77,13 @@ void nvme_failover_req(struct request *req) queue_work(nvme_wq, &ns->ctrl->ana_work); } break; + case NVME_SC_HOST_PATH_ERROR: + /* + * Temporary transport disruption in talking to the controller. + * Try to send on a new path. + */ + nvme_mpath_clear_current_path(ns); + break; default: /* * Reset the controller for any non-ANA error as we don't know @@ -250,6 +257,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) blk_queue_flag_set(QUEUE_FLAG_NONROT, q); /* set to a default value for 512 until disk is validated */ blk_queue_logical_block_size(q, 512); + blk_set_stacking_limits(&q->limits); /* we need to propagate up the VMC settings */ if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index bb4a2003c0978..92d9f81d82859 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -167,6 +168,7 @@ struct nvme_ctrl { struct list_head subsys_entry; struct opal_dev *opal_dev; + struct rpmb_dev *rdev; char name[12]; u16 cntlid; @@ -193,6 +195,7 @@ struct nvme_ctrl { u8 apsta; u32 oaes; u32 aen_result; + u32 rpmbs; unsigned int shutdown_timeout; unsigned int kato; bool subsystem; @@ -420,6 +423,12 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl); void nvme_stop_ctrl(struct nvme_ctrl *ctrl); void nvme_put_ctrl(struct nvme_ctrl *ctrl); int nvme_init_identify(struct nvme_ctrl *ctrl); +int nvme_sec_send(struct nvme_ctrl *ctrl, u8 nssf, u16 spsp, u8 secp, + void *buffer, size_t len); +int nvme_sec_recv(struct nvme_ctrl *ctrl, u8 nssf, u16 spsp, u8 secp, + void *buffer, size_t len); +int nvme_init_rpmb(struct nvme_ctrl *ctrl); +void nvme_exit_rpmb(struct nvme_ctrl *ctrl); void nvme_remove_namespaces(struct nvme_ctrl *ctrl); @@ -537,6 +546,9 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) static inline int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) { + if (ctrl->subsys->cmic & (1 << 3)) + dev_warn(ctrl->device, +"Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n"); return 0; } static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index d668682f91dfd..6c19fa7525e37 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2289,6 +2289,10 @@ static void nvme_reset_work(struct work_struct *work) if (result) goto out; + result = nvme_init_rpmb(&dev->ctrl); + if (result < 0) + goto out; + if (dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) { if (!dev->ctrl.opal_dev) dev->ctrl.opal_dev = diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index dc042017c293a..b6a28de682e85 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -184,6 +184,7 @@ static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe, qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir); if (ib_dma_mapping_error(ibdev, qe->dma)) { kfree(qe->data); + qe->data = NULL; return -ENOMEM; } @@ -816,6 +817,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, out_free_async_qe: nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, sizeof(struct nvme_command), DMA_TO_DEVICE); + ctrl->async_event_sqe.data = NULL; out_free_queue: nvme_rdma_free_queue(&ctrl->queues[0]); return error; diff --git a/drivers/nvme/host/rpmb.c b/drivers/nvme/host/rpmb.c new file mode 100644 index 0000000000000..34e807bfc4f92 --- /dev/null +++ b/drivers/nvme/host/rpmb.c @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Copyright(c) 2018 Intel Corporation. All rights reserved. + */ +#include +#include "nvme.h" +#define NVME_SECP_RPMB 0xEA /* Security Protocol EAh is assigned + * for NVMe use (refer to ACS-4) + */ +#define NVME_SPSP_RPMB 0x0001 /* RPMB Target */ +static int nvme_rpmb_cmd_seq(struct device *dev, u8 target, + struct rpmb_cmd *cmds, u32 ncmds) +{ + struct nvme_ctrl *ctrl; + struct rpmb_cmd *cmd; + u32 size; + int ret; + int i; + + ctrl = dev_get_drvdata(dev); + + for (ret = 0, i = 0; i < ncmds && !ret; i++) { + cmd = &cmds[i]; + size = rpmb_ioc_frames_len_nvme(cmd->nframes); + if (cmd->flags & RPMB_F_WRITE) + ret = nvme_sec_send(ctrl, target, + NVME_SPSP_RPMB, NVME_SECP_RPMB, + cmd->frames, size); + else + ret = nvme_sec_recv(ctrl, target, + NVME_SPSP_RPMB, NVME_SECP_RPMB, + cmd->frames, size); + } + + return ret; +} + +static int nvme_rpmb_get_capacity(struct device *dev, u8 target) +{ + struct nvme_ctrl *ctrl; + + ctrl = dev_get_drvdata(dev); + + return ((ctrl->rpmbs >> 16) & 0xFF) + 1; +} + +static struct rpmb_ops nvme_rpmb_dev_ops = { + .cmd_seq = nvme_rpmb_cmd_seq, + .get_capacity = nvme_rpmb_get_capacity, + .type = RPMB_TYPE_NVME, +}; + +static void nvme_rpmb_set_cap(struct nvme_ctrl *ctrl, + struct rpmb_ops *ops) +{ + ops->wr_cnt_max = ((ctrl->rpmbs >> 24) & 0xFF) + 1; + ops->rd_cnt_max = ops->wr_cnt_max; + ops->block_size = 2; /* 1 sector == 2 half sectors */ + ops->auth_method = (ctrl->rpmbs >> 3) & 0x3; +} + +static void nvme_rpmb_add(struct nvme_ctrl *ctrl) +{ + struct rpmb_dev *rdev; + int ndevs = ctrl->rpmbs & 0x7; + int i; + + nvme_rpmb_set_cap(ctrl, &nvme_rpmb_dev_ops); + + /* Add RPMB partitions */ + for (i = 0; i < ndevs; i++) { + rdev = rpmb_dev_register(ctrl->device, i, &nvme_rpmb_dev_ops); + if (IS_ERR(rdev)) { + dev_warn(ctrl->device, "%s: cannot register to rpmb %ld\n", + dev_name(ctrl->device), PTR_ERR(rdev)); + } + dev_set_drvdata(&rdev->dev, ctrl); + } +} + +static void nvme_rpmb_remove(struct nvme_ctrl *ctrl) +{ + int ndevs = ctrl->rpmbs & 0x7; + int i; + + /* FIXME: target */ + for (i = 0; i < ndevs; i++) + rpmb_dev_unregister_by_device(ctrl->device, i); +} + +int nvme_init_rpmb(struct nvme_ctrl *ctrl) +{ + dev_err(ctrl->device, "RPMBS %X\n", ctrl->rpmbs); + + if ((ctrl->rpmbs & 0x7) == 0x0) { + dev_err(ctrl->device, "RPMBS No partitions\n"); + return 0; + } + + dev_err(ctrl->device, "RPMBS Number of partitions %d\n", + ctrl->rpmbs & 0x7); + dev_err(ctrl->device, "RPMBS Authentication Method: %d\n", + (ctrl->rpmbs >> 3) & 0x3); + dev_err(ctrl->device, "RPMBS Total Size: %d %dK", + (ctrl->rpmbs >> 16) & 0xFF, + (((ctrl->rpmbs >> 16) & 0xFF) + 1) * 128); + dev_err(ctrl->device, "RPMBS Access Size: %d %dB", + (ctrl->rpmbs >> 24) & 0xFF, + (((ctrl->rpmbs >> 24) & 0xFF) + 1) * 512); + + nvme_rpmb_add(ctrl); + + return 0; +} + +void nvme_exit_rpmb(struct nvme_ctrl *ctrl) +{ + nvme_rpmb_remove(ctrl); +} diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index bfc4da660bb40..08f997a390d5d 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -139,6 +139,10 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc); static void nvmet_rdma_qp_event(struct ib_event *event, void *priv); static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); +static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, + struct nvmet_rdma_rsp *r); +static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, + struct nvmet_rdma_rsp *r); static const struct nvmet_fabrics_ops nvmet_rdma_ops; @@ -182,9 +186,17 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) spin_unlock_irqrestore(&queue->rsps_lock, flags); if (unlikely(!rsp)) { - rsp = kmalloc(sizeof(*rsp), GFP_KERNEL); + int ret; + + rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); if (unlikely(!rsp)) return NULL; + ret = nvmet_rdma_alloc_rsp(queue->dev, rsp); + if (unlikely(ret)) { + kfree(rsp); + return NULL; + } + rsp->allocated = true; } @@ -196,7 +208,8 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) { unsigned long flags; - if (rsp->allocated) { + if (unlikely(rsp->allocated)) { + nvmet_rdma_free_rsp(rsp->queue->dev, rsp); kfree(rsp); return; } @@ -529,6 +542,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) { struct nvmet_rdma_rsp *rsp = container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe); + struct nvmet_rdma_queue *queue = cq->cq_context; nvmet_rdma_release_rsp(rsp); @@ -536,7 +550,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) wc->status != IB_WC_WR_FLUSH_ERR)) { pr_err("SEND for CQE 0x%p failed with status %s (%d).\n", wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); - nvmet_rdma_error_comp(rsp->queue); + nvmet_rdma_error_comp(queue); } } diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index aa1657831b70c..7c530c88b3fbb 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -516,11 +516,17 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) goto err_device_del; } - if (config->cells) - nvmem_add_cells(nvmem, config->cells, config->ncells); + if (config->cells) { + rval = nvmem_add_cells(nvmem, config->cells, config->ncells); + if (rval) + goto err_teardown_compat; + } return nvmem; +err_teardown_compat: + if (config->compat) + device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); err_device_del: device_del(&nvmem->dev); err_put_device: diff --git a/drivers/of/base.c b/drivers/of/base.c index 74eaedd5b860f..3f21ea6a90dcb 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -67,6 +67,7 @@ bool of_node_name_eq(const struct device_node *np, const char *name) return (strlen(name) == len) && (strncmp(node_name, name, len) == 0); } +EXPORT_SYMBOL(of_node_name_eq); bool of_node_name_prefix(const struct device_node *np, const char *prefix) { @@ -75,6 +76,7 @@ bool of_node_name_prefix(const struct device_node *np, const char *prefix) return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0; } +EXPORT_SYMBOL(of_node_name_prefix); int of_n_addr_cells(struct device_node *np) { @@ -113,9 +115,6 @@ int __weak of_node_to_nid(struct device_node *np) } #endif -static struct device_node **phandle_cache; -static u32 phandle_cache_mask; - /* * Assumptions behind phandle_cache implementation: * - phandle property values are in a contiguous range of 1..n @@ -124,6 +123,66 @@ static u32 phandle_cache_mask; * - the phandle lookup overhead reduction provided by the cache * will likely be less */ + +static struct device_node **phandle_cache; +static u32 phandle_cache_mask; + +/* + * Caller must hold devtree_lock. + */ +static void __of_free_phandle_cache(void) +{ + u32 cache_entries = phandle_cache_mask + 1; + u32 k; + + if (!phandle_cache) + return; + + for (k = 0; k < cache_entries; k++) + of_node_put(phandle_cache[k]); + + kfree(phandle_cache); + phandle_cache = NULL; +} + +int of_free_phandle_cache(void) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&devtree_lock, flags); + + __of_free_phandle_cache(); + + raw_spin_unlock_irqrestore(&devtree_lock, flags); + + return 0; +} +#if !defined(CONFIG_MODULES) +late_initcall_sync(of_free_phandle_cache); +#endif + +/* + * Caller must hold devtree_lock. + */ +void __of_free_phandle_cache_entry(phandle handle) +{ + phandle masked_handle; + struct device_node *np; + + if (!handle) + return; + + masked_handle = handle & phandle_cache_mask; + + if (phandle_cache) { + np = phandle_cache[masked_handle]; + if (np && handle == np->phandle) { + of_node_put(np); + phandle_cache[masked_handle] = NULL; + } + } +} + void of_populate_phandle_cache(void) { unsigned long flags; @@ -133,8 +192,7 @@ void of_populate_phandle_cache(void) raw_spin_lock_irqsave(&devtree_lock, flags); - kfree(phandle_cache); - phandle_cache = NULL; + __of_free_phandle_cache(); for_each_of_allnodes(np) if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) @@ -152,30 +210,15 @@ void of_populate_phandle_cache(void) goto out; for_each_of_allnodes(np) - if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) + if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) { + of_node_get(np); phandle_cache[np->phandle & phandle_cache_mask] = np; + } out: raw_spin_unlock_irqrestore(&devtree_lock, flags); } -int of_free_phandle_cache(void) -{ - unsigned long flags; - - raw_spin_lock_irqsave(&devtree_lock, flags); - - kfree(phandle_cache); - phandle_cache = NULL; - - raw_spin_unlock_irqrestore(&devtree_lock, flags); - - return 0; -} -#if !defined(CONFIG_MODULES) -late_initcall_sync(of_free_phandle_cache); -#endif - void __init of_core_init(void) { struct device_node *np; @@ -1148,13 +1191,23 @@ struct device_node *of_find_node_by_phandle(phandle handle) if (phandle_cache[masked_handle] && handle == phandle_cache[masked_handle]->phandle) np = phandle_cache[masked_handle]; + if (np && of_node_check_flag(np, OF_DETACHED)) { + WARN_ON(1); /* did not uncache np on node removal */ + of_node_put(np); + phandle_cache[masked_handle] = NULL; + np = NULL; + } } if (!np) { for_each_of_allnodes(np) - if (np->phandle == handle) { - if (phandle_cache) + if (np->phandle == handle && + !of_node_check_flag(np, OF_DETACHED)) { + if (phandle_cache) { + /* will put when removed from cache */ + of_node_get(np); phandle_cache[masked_handle] = np; + } break; } } diff --git a/drivers/of/device.c b/drivers/of/device.c index 5957cd4fa2621..40b9051a7fcee 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -149,9 +149,11 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma) * set by the driver. */ mask = DMA_BIT_MASK(ilog2(dma_addr + size - 1) + 1); - dev->bus_dma_mask = mask; dev->coherent_dma_mask &= mask; *dev->dma_mask &= mask; + /* ...but only set bus mask if we found valid dma-ranges earlier */ + if (!ret) + dev->bus_dma_mask = mask; coherent = of_dma_is_coherent(np); dev_dbg(dev, "device is%sdma coherent\n", diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c index f4f8ed9b5454c..ecea92f68c871 100644 --- a/drivers/of/dynamic.c +++ b/drivers/of/dynamic.c @@ -268,6 +268,9 @@ void __of_detach_node(struct device_node *np) } of_node_set_flag(np, OF_DETACHED); + + /* race with of_find_node_by_phandle() prevented by devtree_lock */ + __of_free_phandle_cache_entry(np->phandle); } /** diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c index 27d9b4bba535c..2411ed3c73037 100644 --- a/drivers/of/of_numa.c +++ b/drivers/of/of_numa.c @@ -115,9 +115,14 @@ static int __init of_numa_parse_distance_map_v1(struct device_node *map) distance = of_read_number(matrix, 1); matrix++; + if ((nodea == nodeb && distance != LOCAL_DISTANCE) || + (nodea != nodeb && distance <= LOCAL_DISTANCE)) { + pr_err("Invalid distance[node%d -> node%d] = %d\n", + nodea, nodeb, distance); + return -EINVAL; + } + numa_set_distance(nodea, nodeb, distance); - pr_debug("distance[node%d -> node%d] = %d\n", - nodea, nodeb, distance); /* Set default distance of node B->A same as A->B */ if (nodeb > nodea) diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h index 216175d11d3dc..f5da842841e53 100644 --- a/drivers/of/of_private.h +++ b/drivers/of/of_private.h @@ -76,6 +76,10 @@ static inline void __of_detach_node_sysfs(struct device_node *np) {} int of_resolve_phandles(struct device_node *tree); #endif +#if defined(CONFIG_OF_DYNAMIC) +void __of_free_phandle_cache_entry(phandle handle); +#endif + #if defined(CONFIG_OF_OVERLAY) void of_overlay_mutex_lock(void); void of_overlay_mutex_unlock(void); diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index eda57ef12fd05..baa9cee6fa2c3 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -378,7 +378,9 @@ static int add_changeset_node(struct overlay_changeset *ovcs, if (ret) return ret; - return build_changeset_next_level(ovcs, tchild, node); + ret = build_changeset_next_level(ovcs, tchild, node); + of_node_put(tchild); + return ret; } if (node->phandle && tchild->phandle) diff --git a/drivers/of/property.c b/drivers/of/property.c index f46828e3b082b..43720c2de138b 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c @@ -806,6 +806,7 @@ struct device_node *of_graph_get_remote_node(const struct device_node *node, if (!of_device_is_available(remote)) { pr_debug("not available for remote node\n"); + of_node_put(remote); return NULL; } diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 7af0ddec936bb..20988c4266501 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -425,6 +425,7 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np) dev_err(dev, "Not all nodes have performance state set (%d: %d)\n", count, pstate_count); ret = -ENOENT; + _dev_pm_opp_remove_table(opp_table, dev, false); goto put_opp_table; } diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c index 9e5a9a3112c9c..3f4fb4dbbe33b 100644 --- a/drivers/opp/ti-opp-supply.c +++ b/drivers/opp/ti-opp-supply.c @@ -288,7 +288,10 @@ static int ti_opp_supply_set_opp(struct dev_pm_set_opp_data *data) int ret; vdd_uv = _get_optimal_vdd_voltage(dev, &opp_data, - new_supply_vbb->u_volt); + new_supply_vdd->u_volt); + + if (new_supply_vdd->u_volt_min < vdd_uv) + new_supply_vdd->u_volt_min = vdd_uv; /* Scaling up? Scale voltage before frequency */ if (freq > old_freq) { diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index ce9224a36f62f..a32d6dde7a579 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -542,7 +542,7 @@ static const struct of_device_id of_dra7xx_pcie_match[] = { }; /* - * dra7xx_pcie_ep_unaligned_memaccess: workaround for AM572x/AM571x Errata i870 + * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870 * @dra7xx: the dra7xx device where the workaround should be applied * * Access to the PCIe slave port that are not 32-bit aligned will result @@ -552,7 +552,7 @@ static const struct of_device_id of_dra7xx_pcie_match[] = { * * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1. */ -static int dra7xx_pcie_ep_unaligned_memaccess(struct device *dev) +static int dra7xx_pcie_unaligned_memaccess(struct device *dev) { int ret; struct device_node *np = dev->of_node; @@ -704,6 +704,11 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, DEVICE_TYPE_RC); + + ret = dra7xx_pcie_unaligned_memaccess(dev); + if (ret) + dev_err(dev, "WA for Errata i870 not applied\n"); + ret = dra7xx_add_pcie_port(dra7xx, pdev); if (ret < 0) goto err_gpio; @@ -717,7 +722,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, DEVICE_TYPE_EP); - ret = dra7xx_pcie_ep_unaligned_memaccess(dev); + ret = dra7xx_pcie_unaligned_memaccess(dev); if (ret) goto err_gpio; diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 4a9a673b47776..975050a694947 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -80,8 +80,6 @@ struct imx6_pcie { #define PCIE_PL_PFLR_FORCE_LINK (1 << 15) #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28) #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c) -#define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29) -#define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4) #define PCIE_PHY_CTRL (PL_OFFSET + 0x114) #define PCIE_PHY_CTRL_DATA_LOC 0 @@ -641,12 +639,6 @@ static int imx6_pcie_host_init(struct pcie_port *pp) return 0; } -static int imx6_pcie_link_up(struct dw_pcie *pci) -{ - return dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1) & - PCIE_PHY_DEBUG_R1_XMLH_LINK_UP; -} - static const struct dw_pcie_host_ops imx6_pcie_host_ops = { .host_init = imx6_pcie_host_init, }; @@ -679,7 +671,7 @@ static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie, } static const struct dw_pcie_ops dw_pcie_ops = { - .link_up = imx6_pcie_link_up, + /* No special ops needed, but pcie-designware still expects this struct */ }; static int imx6_pcie_probe(struct platform_device *pdev) diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c index 3724d3ef7008e..7aa9a82b7ebd6 100644 --- a/drivers/pci/controller/dwc/pci-layerscape.c +++ b/drivers/pci/controller/dwc/pci-layerscape.c @@ -88,7 +88,7 @@ static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie) int i; for (i = 0; i < PCIE_IATU_NUM; i++) - dw_pcie_disable_atu(pcie->pci, DW_PCIE_REGION_OUTBOUND, i); + dw_pcie_disable_atu(pcie->pci, i, DW_PCIE_REGION_OUTBOUND); } static int ls1021_pcie_link_up(struct dw_pcie *pci) diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 1e7b02221eac9..de8635af4cde2 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -440,7 +440,6 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, tbl_offset = dw_pcie_readl_dbi(pci, reg); bir = (tbl_offset & PCI_MSIX_TABLE_BIR); tbl_offset &= PCI_MSIX_TABLE_OFFSET; - tbl_offset >>= 3; reg = PCI_BASE_ADDRESS_0 + (4 * bir); bar_addr_upper = 0; diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 29a05759a2942..0fa9e8fdce66e 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -99,9 +99,6 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) (i * MAX_MSI_IRQS_PER_CTRL) + pos); generic_handle_irq(irq); - dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + - (i * MSI_REG_CTRL_BLOCK_SIZE), - 4, 1 << pos); pos++; } } @@ -168,8 +165,8 @@ static void dw_pci_bottom_mask(struct irq_data *data) bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL; pp->irq_status[ctrl] &= ~(1 << bit); - dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, - pp->irq_status[ctrl]); + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, + ~pp->irq_status[ctrl]); } raw_spin_unlock_irqrestore(&pp->lock, flags); @@ -191,8 +188,8 @@ static void dw_pci_bottom_unmask(struct irq_data *data) bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL; pp->irq_status[ctrl] |= 1 << bit; - dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, - pp->irq_status[ctrl]); + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, + ~pp->irq_status[ctrl]); } raw_spin_unlock_irqrestore(&pp->lock, flags); @@ -200,13 +197,22 @@ static void dw_pci_bottom_unmask(struct irq_data *data) static void dw_pci_bottom_ack(struct irq_data *d) { - struct msi_desc *msi = irq_data_get_msi_desc(d); - struct pcie_port *pp; + struct pcie_port *pp = irq_data_get_irq_chip_data(d); + unsigned int res, bit, ctrl; + unsigned long flags; + + ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; + res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; + bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; + + raw_spin_lock_irqsave(&pp->lock, flags); - pp = msi_desc_to_pci_sysdata(msi); + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, 1 << bit); if (pp->ops->msi_irq_ack) pp->ops->msi_irq_ack(d->hwirq, pp); + + raw_spin_unlock_irqrestore(&pp->lock, flags); } static struct irq_chip dw_pci_msi_bottom_irq_chip = { @@ -658,10 +664,15 @@ void dw_pcie_setup_rc(struct pcie_port *pp) num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; /* Initialize IRQ Status array */ - for (ctrl = 0; ctrl < num_ctrls; ctrl++) - dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + + for (ctrl = 0; ctrl < num_ctrls; ctrl++) { + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + (ctrl * MSI_REG_CTRL_BLOCK_SIZE), - 4, &pp->irq_status[ctrl]); + 4, ~0); + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + + (ctrl * MSI_REG_CTRL_BLOCK_SIZE), + 4, ~0); + pp->irq_status[ctrl] = 0; + } /* Setup RC BARs */ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004); diff --git a/drivers/pci/controller/pcie-cadence-ep.c b/drivers/pci/controller/pcie-cadence-ep.c index 9e87dd7f9ac38..6692654798d44 100644 --- a/drivers/pci/controller/pcie-cadence-ep.c +++ b/drivers/pci/controller/pcie-cadence-ep.c @@ -258,7 +258,6 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx, bool is_asserted) { struct cdns_pcie *pcie = &ep->pcie; - u32 r = ep->max_regions - 1; u32 offset; u16 status; u8 msg_code; @@ -268,8 +267,8 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, /* Set the outbound region if needed. */ if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY || ep->irq_pci_fn != fn)) { - /* Last region was reserved for IRQ writes. */ - cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, r, + /* First region was reserved for IRQ writes. */ + cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, 0, ep->irq_phys_addr); ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY; ep->irq_pci_fn = fn; @@ -347,8 +346,8 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, /* Set the outbound region if needed. */ if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) || ep->irq_pci_fn != fn)) { - /* Last region was reserved for IRQ writes. */ - cdns_pcie_set_outbound_region(pcie, fn, ep->max_regions - 1, + /* First region was reserved for IRQ writes. */ + cdns_pcie_set_outbound_region(pcie, fn, 0, false, ep->irq_phys_addr, pci_addr & ~pci_addr_mask, @@ -517,6 +516,8 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev) goto free_epc_mem; } ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE; + /* Reserve region 0 for IRQs */ + set_bit(0, &ep->ob_region_map); return 0; diff --git a/drivers/pci/controller/pcie-cadence.c b/drivers/pci/controller/pcie-cadence.c index 975bcdd6b5c0a..cd795f6fc1e23 100644 --- a/drivers/pci/controller/pcie-cadence.c +++ b/drivers/pci/controller/pcie-cadence.c @@ -190,14 +190,16 @@ int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie) for (i = 0; i < phy_count; i++) { of_property_read_string_index(np, "phy-names", i, &name); - phy[i] = devm_phy_optional_get(dev, name); - if (IS_ERR(phy)) - return PTR_ERR(phy); - + phy[i] = devm_phy_get(dev, name); + if (IS_ERR(phy[i])) { + ret = PTR_ERR(phy[i]); + goto err_phy; + } link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS); if (!link[i]) { + devm_phy_put(dev, phy[i]); ret = -EINVAL; - goto err_link; + goto err_phy; } } @@ -207,13 +209,15 @@ int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie) ret = cdns_pcie_enable_phy(pcie); if (ret) - goto err_link; + goto err_phy; return 0; -err_link: - while (--i >= 0) +err_phy: + while (--i >= 0) { device_link_del(link[i]); + devm_phy_put(dev, phy[i]); + } return ret; } diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c index 861dda69f3669..c5ff6ca65eab2 100644 --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c @@ -337,6 +337,17 @@ static struct mtk_pcie_port *mtk_pcie_find_port(struct pci_bus *bus, { struct mtk_pcie *pcie = bus->sysdata; struct mtk_pcie_port *port; + struct pci_dev *dev = NULL; + + /* + * Walk the bus hierarchy to get the devfn value + * of the port in the root bus. + */ + while (bus && bus->number) { + dev = bus->self; + bus = dev->bus; + devfn = dev->devfn; + } list_for_each_entry(port, &pcie->ports, list) if (port->slot == PCI_SLOT(devfn)) diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index f2ef896464b37..af24ed50a2452 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -958,7 +958,6 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, } } } - WARN_ON(!!dev->msix_enabled); /* Check whether driver already requested for MSI irq */ if (dev->msi_enabled) { @@ -1028,8 +1027,6 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, if (!pci_msi_supported(dev, minvec)) return -EINVAL; - WARN_ON(!!dev->msi_enabled); - /* Check whether driver already requested MSI-X irqs */ if (dev->msix_enabled) { pci_info(dev, "can't enable MSI (MSI-X already enabled)\n"); @@ -1039,6 +1036,9 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, if (maxvec < minvec) return -ERANGE; + if (WARN_ON_ONCE(dev->msi_enabled)) + return -EINVAL; + nvec = pci_msi_vec_count(dev); if (nvec < 0) return nvec; @@ -1087,6 +1087,9 @@ static int __pci_enable_msix_range(struct pci_dev *dev, if (maxvec < minvec) return -ERANGE; + if (WARN_ON_ONCE(dev->msix_enabled)) + return -EINVAL; + for (;;) { if (affd) { nvec = irq_calc_affinity_vectors(minvec, nvec, affd); diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index c2ab577050434..f8436d1c4d45f 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -762,19 +762,33 @@ static void pci_acpi_setup(struct device *dev) return; device_set_wakeup_capable(dev, true); + /* + * For bridges that can do D3 we enable wake automatically (as + * we do for the power management itself in that case). The + * reason is that the bridge may have additional methods such as + * _DSW that need to be called. + */ + if (pci_dev->bridge_d3) + device_wakeup_enable(dev); + acpi_pci_wakeup(pci_dev, false); } static void pci_acpi_cleanup(struct device *dev) { struct acpi_device *adev = ACPI_COMPANION(dev); + struct pci_dev *pci_dev = to_pci_dev(dev); if (!adev) return; pci_acpi_remove_pm_notifier(adev); - if (adev->wakeup.flags.valid) + if (adev->wakeup.flags.valid) { + if (pci_dev->bridge_d3) + device_wakeup_disable(dev); + device_set_wakeup_capable(dev, false); + } } static bool pci_acpi_bus_match(struct device *dev) diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index bef17c3fca67c..33f3f475e5c6b 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -1251,30 +1251,29 @@ static int pci_pm_runtime_suspend(struct device *dev) return 0; } - if (!pm || !pm->runtime_suspend) - return -ENOSYS; - pci_dev->state_saved = false; - error = pm->runtime_suspend(dev); - if (error) { + if (pm && pm->runtime_suspend) { + error = pm->runtime_suspend(dev); /* * -EBUSY and -EAGAIN is used to request the runtime PM core * to schedule a new suspend, so log the event only with debug * log level. */ - if (error == -EBUSY || error == -EAGAIN) + if (error == -EBUSY || error == -EAGAIN) { dev_dbg(dev, "can't suspend now (%pf returned %d)\n", pm->runtime_suspend, error); - else + return error; + } else if (error) { dev_err(dev, "can't suspend (%pf returned %d)\n", pm->runtime_suspend, error); - - return error; + return error; + } } pci_fixup_device(pci_fixup_suspend, pci_dev); - if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 + if (pm && pm->runtime_suspend + && !pci_dev->state_saved && pci_dev->current_state != PCI_D0 && pci_dev->current_state != PCI_UNKNOWN) { WARN_ONCE(pci_dev->current_state != prev, "PCI PM: State of device not saved by %pF\n", @@ -1292,7 +1291,7 @@ static int pci_pm_runtime_suspend(struct device *dev) static int pci_pm_runtime_resume(struct device *dev) { - int rc; + int rc = 0; struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; @@ -1306,14 +1305,12 @@ static int pci_pm_runtime_resume(struct device *dev) if (!pci_dev->driver) return 0; - if (!pm || !pm->runtime_resume) - return -ENOSYS; - pci_fixup_device(pci_fixup_resume_early, pci_dev); pci_enable_wake(pci_dev, PCI_D0, false); pci_fixup_device(pci_fixup_resume, pci_dev); - rc = pm->runtime_resume(dev); + if (pm && pm->runtime_resume) + rc = pm->runtime_resume(dev); pci_dev->runtime_d3cold = false; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 51b6c81671c1e..afc4680c584ff 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -5473,9 +5473,13 @@ enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev) u32 lnkcap2, lnkcap; /* - * PCIe r4.0 sec 7.5.3.18 recommends using the Supported Link - * Speeds Vector in Link Capabilities 2 when supported, falling - * back to Max Link Speed in Link Capabilities otherwise. + * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18. The + * implementation note there recommends using the Supported Link + * Speeds Vector in Link Capabilities 2 when supported. + * + * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software + * should use the Supported Link Speeds field in Link Capabilities, + * where only 2.5 GT/s and 5.0 GT/s speeds were defined. */ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2); if (lnkcap2) { /* PCIe r3.0-compliant */ @@ -5491,16 +5495,10 @@ enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev) } pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); - if (lnkcap) { - if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB) - return PCIE_SPEED_16_0GT; - else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB) - return PCIE_SPEED_8_0GT; - else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB) - return PCIE_SPEED_5_0GT; - else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB) - return PCIE_SPEED_2_5GT; - } + if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB) + return PCIE_SPEED_5_0GT; + else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB) + return PCIE_SPEED_2_5GT; return PCI_SPEED_UNKNOWN; } diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 6e0d1528d471c..3d6a1ab54f566 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -9,6 +9,8 @@ extern const unsigned char pcie_link_speed[]; extern bool pci_early_dump; +struct msi_desc; + bool pcie_cap_has_lnkctl(const struct pci_dev *dev); /* Functions internal to the PCI core code */ diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 5326916715d20..f78860ce884bc 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -991,7 +991,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) * All PCIe functions are in one slot, remove one function will remove * the whole slot, so just wait until we are the last function left. */ - if (!list_is_last(&pdev->bus_list, &parent->subordinate->devices)) + if (!list_empty(&parent->subordinate->devices)) goto out; link = parent->link_state; diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 201f9e5ff55c0..b9dda363aea62 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -42,6 +42,70 @@ struct pci_domain_busn_res { int domain_nr; }; +#define PCI_IGNORE_MAX 8 + +static u16 devices_ignore_table[PCI_IGNORE_MAX]; +static int devices_ignore_cnt; + +static void parse_ignore_device(char *bdf_str) +{ + int fields; + unsigned int bus; + unsigned int dev; + unsigned int func; + + if (devices_ignore_cnt >= PCI_IGNORE_MAX - 1) + return; + + fields = sscanf(bdf_str, "%x:%x:%x", &bus, &dev, &func); + if (fields != 3) + return; + + devices_ignore_table[devices_ignore_cnt++] = + PCI_DEVID(bus, PCI_DEVFN(dev, func)); +} + +static int __init pci_deivces_ignore(char *str) +{ + int len; + char *start, *end; + char bdf[16]; + + devices_ignore_cnt = 0; + + while ((start = strchr(str, '('))) { + + end = strchr(start, ')'); + if (end == NULL) + break; + + len = end - start - 1; + if (len >= 16) /*invalid string*/ + break; + + memcpy((void *)bdf, (void *)(start+1), len); + bdf[len] = '\0'; + parse_ignore_device(bdf); + str = end + 1; + } + + return 1; +} +__setup("pci_devices_ignore=", pci_deivces_ignore); + +static bool device_on_ignore_list(int bus, int dev, int func) +{ + int i; + + for (i = 0; i < devices_ignore_cnt; i++) + if ((PCI_BUS_NUM(devices_ignore_table[i]) == bus) && + (PCI_SLOT(devices_ignore_table[i]) == dev) && + (PCI_FUNC(devices_ignore_table[i]) == func)) + return true; + + return false; +} + static struct resource *get_pci_domain_busn_res(int domain_nr) { struct pci_domain_busn_res *r; @@ -2442,6 +2506,11 @@ struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn) return dev; } + if (device_on_ignore_list(bus->number, + PCI_SLOT(devfn), + PCI_FUNC(devfn))) + return NULL; + dev = pci_scan_device(bus, devfn); if (!dev) return NULL; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 6bc27b7fd452a..c0673a7172396 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -3190,7 +3190,11 @@ static void disable_igfx_irq(struct pci_dev *dev) pci_iounmap(dev, regs); } +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0042, disable_igfx_irq); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0046, disable_igfx_irq); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x004a, disable_igfx_irq); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0106, disable_igfx_irq); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq); diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index 461e7fd2756fb..e9c6b120cf451 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c @@ -25,9 +25,6 @@ static void pci_stop_dev(struct pci_dev *dev) pci_dev_assign_added(dev, false); } - - if (dev->bus->self) - pcie_aspm_exit_link_state(dev); } static void pci_destroy_dev(struct pci_dev *dev) @@ -41,6 +38,7 @@ static void pci_destroy_dev(struct pci_dev *dev) list_del(&dev->bus_list); up_write(&pci_bus_sem); + pcie_aspm_exit_link_state(dev); pci_bridge_d3_update(dev); pci_free_resources(dev); put_device(&dev->dev); diff --git a/drivers/pcmcia/ricoh.h b/drivers/pcmcia/ricoh.h index 01098c841f877..8ac7b138c0948 100644 --- a/drivers/pcmcia/ricoh.h +++ b/drivers/pcmcia/ricoh.h @@ -119,6 +119,10 @@ #define RL5C4XX_MISC_CONTROL 0x2F /* 8 bit */ #define RL5C4XX_ZV_ENABLE 0x08 +/* Misc Control 3 Register */ +#define RL5C4XX_MISC3 0x00A2 /* 16 bit */ +#define RL5C47X_MISC3_CB_CLKRUN_DIS BIT(1) + #ifdef __YENTA_H #define rl_misc(socket) ((socket)->private[0]) @@ -156,6 +160,35 @@ static void ricoh_set_zv(struct yenta_socket *socket) } } +static void ricoh_set_clkrun(struct yenta_socket *socket, bool quiet) +{ + u16 misc3; + + /* + * RL5C475II likely has this setting, too, however no datasheet + * is publicly available for this chip + */ + if (socket->dev->device != PCI_DEVICE_ID_RICOH_RL5C476 && + socket->dev->device != PCI_DEVICE_ID_RICOH_RL5C478) + return; + + if (socket->dev->revision < 0x80) + return; + + misc3 = config_readw(socket, RL5C4XX_MISC3); + if (misc3 & RL5C47X_MISC3_CB_CLKRUN_DIS) { + if (!quiet) + dev_dbg(&socket->dev->dev, + "CLKRUN feature already disabled\n"); + } else if (disable_clkrun) { + if (!quiet) + dev_info(&socket->dev->dev, + "Disabling CLKRUN feature\n"); + misc3 |= RL5C47X_MISC3_CB_CLKRUN_DIS; + config_writew(socket, RL5C4XX_MISC3, misc3); + } +} + static void ricoh_save_state(struct yenta_socket *socket) { rl_misc(socket) = config_readw(socket, RL5C4XX_MISC); @@ -172,6 +205,7 @@ static void ricoh_restore_state(struct yenta_socket *socket) config_writew(socket, RL5C4XX_16BIT_IO_0, rl_io(socket)); config_writew(socket, RL5C4XX_16BIT_MEM_0, rl_mem(socket)); config_writew(socket, RL5C4XX_CONFIG, rl_config(socket)); + ricoh_set_clkrun(socket, true); } @@ -197,6 +231,7 @@ static int ricoh_override(struct yenta_socket *socket) config_writew(socket, RL5C4XX_CONFIG, config); ricoh_set_zv(socket); + ricoh_set_clkrun(socket, false); return 0; } diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c index ab3da2262f0fc..ac6a3f46b1e6c 100644 --- a/drivers/pcmcia/yenta_socket.c +++ b/drivers/pcmcia/yenta_socket.c @@ -26,7 +26,8 @@ static bool disable_clkrun; module_param(disable_clkrun, bool, 0444); -MODULE_PARM_DESC(disable_clkrun, "If PC card doesn't function properly, please try this option"); +MODULE_PARM_DESC(disable_clkrun, + "If PC card doesn't function properly, please try this option (TI and Ricoh bridges only)"); static bool isa_probe = 1; module_param(isa_probe, bool, 0444); diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c index 1b10ea05a9149..69372e2bc93c7 100644 --- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c @@ -30,8 +30,8 @@ #define DDRC_FLUX_RCMD 0x38c #define DDRC_PRE_CMD 0x3c0 #define DDRC_ACT_CMD 0x3c4 -#define DDRC_BNK_CHG 0x3c8 #define DDRC_RNK_CHG 0x3cc +#define DDRC_RW_CHG 0x3d0 #define DDRC_EVENT_CTRL 0x6C0 #define DDRC_INT_MASK 0x6c8 #define DDRC_INT_STATUS 0x6cc @@ -51,7 +51,7 @@ static const u32 ddrc_reg_off[] = { DDRC_FLUX_WR, DDRC_FLUX_RD, DDRC_FLUX_WCMD, DDRC_FLUX_RCMD, - DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_BNK_CHG, DDRC_RNK_CHG + DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_RNK_CHG, DDRC_RW_CHG }; /* diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c index e70e425f26f50..69c92843eb3b2 100644 --- a/drivers/phy/qualcomm/phy-qcom-qusb2.c +++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c @@ -231,6 +231,7 @@ static const struct qusb2_phy_cfg sdm845_phy_cfg = { .mask_core_ready = CORE_READY_STATUS, .has_pll_override = true, .autoresume_en = BIT(0), + .update_tune1_with_efuse = true, }; static const char * const qusb2_phy_vreg_names[] = { @@ -402,10 +403,10 @@ static void qusb2_phy_set_tune2_param(struct qusb2_phy *qphy) /* * Read efuse register having TUNE2/1 parameter's high nibble. - * If efuse register shows value as 0x0, or if we fail to find - * a valid efuse register settings, then use default value - * as 0xB for high nibble that we have already set while - * configuring phy. + * If efuse register shows value as 0x0 (indicating value is not + * fused), or if we fail to find a valid efuse register setting, + * then use default value for high nibble that we have already + * set while configuring the phy. */ val = nvmem_cell_read(qphy->cell, NULL); if (IS_ERR(val) || !val[0]) { @@ -415,12 +416,13 @@ static void qusb2_phy_set_tune2_param(struct qusb2_phy *qphy) /* Fused TUNE1/2 value is the higher nibble only */ if (cfg->update_tune1_with_efuse) - qusb2_setbits(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE1], - val[0] << 0x4); + qusb2_write_mask(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE1], + val[0] << HSTX_TRIM_SHIFT, + HSTX_TRIM_MASK); else - qusb2_setbits(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE2], - val[0] << 0x4); - + qusb2_write_mask(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE2], + val[0] << HSTX_TRIM_SHIFT, + HSTX_TRIM_MASK); } static int qusb2_phy_set_mode(struct phy *phy, enum phy_mode mode) diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c index 4ceb06f8a33c9..4edeb4cae72aa 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c +++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c @@ -830,7 +830,7 @@ static struct meson_bank meson_gxbb_periphs_banks[] = { static struct meson_bank meson_gxbb_aobus_banks[] = { /* name first last irq pullen pull dir out in */ - BANK("AO", GPIOAO_0, GPIOAO_13, 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0), + BANK("AO", GPIOAO_0, GPIOAO_13, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0), }; static struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = { diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c index 7dae1d7bf6b0a..158f618f16957 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c +++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c @@ -807,7 +807,7 @@ static struct meson_bank meson_gxl_periphs_banks[] = { static struct meson_bank meson_gxl_aobus_banks[] = { /* name first last irq pullen pull dir out in */ - BANK("AO", GPIOAO_0, GPIOAO_9, 0, 9, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0), + BANK("AO", GPIOAO_0, GPIOAO_9, 0, 9, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0), }; static struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = { diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c index 29a458da78db1..c8eff70fdb1c2 100644 --- a/drivers/pinctrl/meson/pinctrl-meson.c +++ b/drivers/pinctrl/meson/pinctrl-meson.c @@ -191,8 +191,9 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin, case PIN_CONFIG_BIAS_DISABLE: dev_dbg(pc->dev, "pin %u: disable bias\n", pin); - meson_calc_reg_and_bit(bank, pin, REG_PULL, ®, &bit); - ret = regmap_update_bits(pc->reg_pull, reg, + meson_calc_reg_and_bit(bank, pin, REG_PULLEN, ®, + &bit); + ret = regmap_update_bits(pc->reg_pullen, reg, BIT(bit), 0); if (ret) return ret; diff --git a/drivers/pinctrl/meson/pinctrl-meson8.c b/drivers/pinctrl/meson/pinctrl-meson8.c index c6d79315218fa..86466173114da 100644 --- a/drivers/pinctrl/meson/pinctrl-meson8.c +++ b/drivers/pinctrl/meson/pinctrl-meson8.c @@ -1053,7 +1053,7 @@ static struct meson_bank meson8_cbus_banks[] = { static struct meson_bank meson8_aobus_banks[] = { /* name first last irq pullen pull dir out in */ - BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0), + BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0), }; static struct meson_pinctrl_data meson8_cbus_pinctrl_data = { diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c index bb2a30964fc69..647ad15d5c3c4 100644 --- a/drivers/pinctrl/meson/pinctrl-meson8b.c +++ b/drivers/pinctrl/meson/pinctrl-meson8b.c @@ -906,7 +906,7 @@ static struct meson_bank meson8b_cbus_banks[] = { static struct meson_bank meson8b_aobus_banks[] = { /* name first lastc irq pullen pull dir out in */ - BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0), + BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0), }; static struct meson_pinctrl_data meson8b_cbus_pinctrl_data = { diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c index 6556dbeae65ef..ac251c62bc666 100644 --- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c @@ -319,6 +319,8 @@ static int pmic_mpp_set_mux(struct pinctrl_dev *pctldev, unsigned function, pad->function = function; ret = pmic_mpp_write_mode_ctl(state, pad); + if (ret < 0) + return ret; val = pad->is_enabled << PMIC_MPP_REG_MASTER_EN_SHIFT; @@ -343,13 +345,12 @@ static int pmic_mpp_config_get(struct pinctrl_dev *pctldev, switch (param) { case PIN_CONFIG_BIAS_DISABLE: - arg = pad->pullup == PMIC_MPP_PULL_UP_OPEN; + if (pad->pullup != PMIC_MPP_PULL_UP_OPEN) + return -EINVAL; + arg = 1; break; case PIN_CONFIG_BIAS_PULL_UP: switch (pad->pullup) { - case PMIC_MPP_PULL_UP_OPEN: - arg = 0; - break; case PMIC_MPP_PULL_UP_0P6KOHM: arg = 600; break; @@ -364,13 +365,17 @@ static int pmic_mpp_config_get(struct pinctrl_dev *pctldev, } break; case PIN_CONFIG_BIAS_HIGH_IMPEDANCE: - arg = !pad->is_enabled; + if (pad->is_enabled) + return -EINVAL; + arg = 1; break; case PIN_CONFIG_POWER_SOURCE: arg = pad->power_source; break; case PIN_CONFIG_INPUT_ENABLE: - arg = pad->input_enabled; + if (!pad->input_enabled) + return -EINVAL; + arg = 1; break; case PIN_CONFIG_OUTPUT: arg = pad->out_value; @@ -382,7 +387,9 @@ static int pmic_mpp_config_get(struct pinctrl_dev *pctldev, arg = pad->amux_input; break; case PMIC_MPP_CONF_PAIRED: - arg = pad->paired; + if (!pad->paired) + return -EINVAL; + arg = 1; break; case PIN_CONFIG_DRIVE_STRENGTH: arg = pad->drive_strength; @@ -455,7 +462,7 @@ static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin, pad->dtest = arg; break; case PIN_CONFIG_DRIVE_STRENGTH: - arg = pad->drive_strength; + pad->drive_strength = arg; break; case PMIC_MPP_CONF_AMUX_ROUTE: if (arg >= PMIC_MPP_AMUX_ROUTE_ABUS4) @@ -502,6 +509,10 @@ static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin, if (ret < 0) return ret; + ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_SINK_CTL, pad->drive_strength); + if (ret < 0) + return ret; + val = pad->is_enabled << PMIC_MPP_REG_MASTER_EN_SHIFT; return pmic_mpp_write(state, pad, PMIC_MPP_REG_EN_CTL, val); diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c index f53e32a9d8fce..0e153bae322ee 100644 --- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c @@ -260,22 +260,32 @@ static int pm8xxx_pin_config_get(struct pinctrl_dev *pctldev, switch (param) { case PIN_CONFIG_BIAS_DISABLE: - arg = pin->bias == PM8XXX_GPIO_BIAS_NP; + if (pin->bias != PM8XXX_GPIO_BIAS_NP) + return -EINVAL; + arg = 1; break; case PIN_CONFIG_BIAS_PULL_DOWN: - arg = pin->bias == PM8XXX_GPIO_BIAS_PD; + if (pin->bias != PM8XXX_GPIO_BIAS_PD) + return -EINVAL; + arg = 1; break; case PIN_CONFIG_BIAS_PULL_UP: - arg = pin->bias <= PM8XXX_GPIO_BIAS_PU_1P5_30; + if (pin->bias > PM8XXX_GPIO_BIAS_PU_1P5_30) + return -EINVAL; + arg = 1; break; case PM8XXX_QCOM_PULL_UP_STRENGTH: arg = pin->pull_up_strength; break; case PIN_CONFIG_BIAS_HIGH_IMPEDANCE: - arg = pin->disable; + if (!pin->disable) + return -EINVAL; + arg = 1; break; case PIN_CONFIG_INPUT_ENABLE: - arg = pin->mode == PM8XXX_GPIO_MODE_INPUT; + if (pin->mode != PM8XXX_GPIO_MODE_INPUT) + return -EINVAL; + arg = 1; break; case PIN_CONFIG_OUTPUT: if (pin->mode & PM8XXX_GPIO_MODE_OUTPUT) @@ -290,10 +300,14 @@ static int pm8xxx_pin_config_get(struct pinctrl_dev *pctldev, arg = pin->output_strength; break; case PIN_CONFIG_DRIVE_PUSH_PULL: - arg = !pin->open_drain; + if (pin->open_drain) + return -EINVAL; + arg = 1; break; case PIN_CONFIG_DRIVE_OPEN_DRAIN: - arg = pin->open_drain; + if (!pin->open_drain) + return -EINVAL; + arg = 1; break; default: return -EINVAL; diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c index 6624499eae72f..4ada80317a3bd 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c @@ -568,7 +568,7 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = { SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 11), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)), /* PH_EINT11 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)), /* PH_EINT11 */ }; static const struct sunxi_pinctrl_desc sun8i_a83t_pinctrl_data = { diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index 4d9bf9b3e9f3e..26ebedc1f6d31 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c @@ -1079,10 +1079,9 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev) * We suppose that we won't have any more functions than pins, * we'll reallocate that later anyway */ - pctl->functions = devm_kcalloc(&pdev->dev, - pctl->ngroups, - sizeof(*pctl->functions), - GFP_KERNEL); + pctl->functions = kcalloc(pctl->ngroups, + sizeof(*pctl->functions), + GFP_KERNEL); if (!pctl->functions) return -ENOMEM; @@ -1133,8 +1132,10 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev) func_item = sunxi_pinctrl_find_function_by_name(pctl, func->name); - if (!func_item) + if (!func_item) { + kfree(pctl->functions); return -EINVAL; + } if (!func_item->groups) { func_item->groups = @@ -1142,8 +1143,10 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev) func_item->ngroups, sizeof(*func_item->groups), GFP_KERNEL); - if (!func_item->groups) + if (!func_item->groups) { + kfree(pctl->functions); return -ENOMEM; + } } func_grp = func_item->groups; diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 0c1aa6c314f50..73eb42a94a10f 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -1235,3 +1235,6 @@ config PMC_ATOM def_bool y depends on PCI select COMMON_CLK +source "drivers/platform/x86/socwatch/Kconfig" +source "drivers/platform/x86/socwatchhv/Kconfig" +source "drivers/platform/x86/sepdk/Kconfig" diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index e6d1becf81ce8..84f429dd9958b 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -92,3 +92,6 @@ obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o obj-$(CONFIG_INTEL_TURBO_MAX_3) += intel_turbo_max_3.o obj-$(CONFIG_INTEL_CHTDC_TI_PWRBTN) += intel_chtdc_ti_pwrbtn.o obj-$(CONFIG_I2C_MULTI_INSTANTIATE) += i2c-multi-instantiate.o +obj-$(CONFIG_INTEL_SOCWATCH) += socwatch/ +obj-$(CONFIG_INTEL_SOCWATCH_HV) += socwatchhv/ +obj-$(CONFIG_INTEL_SEP) += sepdk/ diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c index ea22591ee66fe..53dfe67807e39 100644 --- a/drivers/platform/x86/acerhdf.c +++ b/drivers/platform/x86/acerhdf.c @@ -233,6 +233,7 @@ static const struct bios_settings bios_tbl[] = { {"Gateway", "LT31", "v1.3201", 0x55, 0x58, {0x9e, 0x00}, 0}, {"Gateway", "LT31", "v1.3302", 0x55, 0x58, {0x9e, 0x00}, 0}, {"Gateway", "LT31", "v1.3303t", 0x55, 0x58, {0x9e, 0x00}, 0}, + {"Gateway", "LT31", "v1.3307", 0x55, 0x58, {0x9e, 0x00}, 0}, /* Packard Bell */ {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00}, 0}, {"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00}, 0}, diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 2d6e272315a82..db3556dc90d18 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -2231,7 +2231,8 @@ static int asus_wmi_add(struct platform_device *pdev) err = asus_wmi_backlight_init(asus); if (err && err != -ENODEV) goto fail_backlight; - } + } else + err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL); status = wmi_install_notify_handler(asus->driver->event_guid, asus_wmi_notify, asus); diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c index ffd0474b05311..1423fa8710fd0 100644 --- a/drivers/platform/x86/intel_telemetry_debugfs.c +++ b/drivers/platform/x86/intel_telemetry_debugfs.c @@ -951,12 +951,16 @@ static int __init telemetry_debugfs_init(void) debugfs_conf = (struct telemetry_debugfs_conf *)id->driver_data; err = telemetry_pltconfig_valid(); - if (err < 0) + if (err < 0) { + pr_info("Invalid pltconfig, ensure IPC1 device is enabled in BIOS\n"); return -ENODEV; + } err = telemetry_debugfs_check_evts(); - if (err < 0) + if (err < 0) { + pr_info("telemetry_debugfs_check_evts failed\n"); return -EINVAL; + } register_pm_notifier(&pm_notifier); diff --git a/drivers/platform/x86/sepdk/Kconfig b/drivers/platform/x86/sepdk/Kconfig new file mode 100755 index 0000000000000..884c5055d304f --- /dev/null +++ b/drivers/platform/x86/sepdk/Kconfig @@ -0,0 +1,54 @@ +# +# THE SEP KERNEL DRIVER UNDER LINUX* +# +config INTEL_SEP + bool "Sampling Enabling Product (SEP)" + help + SEP is a command line tool for doing hardware-based sampling using + event-based sampling (EBS). + depends on X86 || X86_64 + +config SEP + tristate "SEP kernel driver" + depends on INTEL_SEP + default m + +config SEP_ACRN + tristate "SEP kernel driver" + depends on INTEL_SEP && ACRN_VHM + default m + +config SEP_PAX + tristate "PAX kernel driver from SEP" + depends on INTEL_SEP + depends on SEP + default m + +config SEP_PER_USER_MODE + bool "Use Per User Mode on SEP" + depends on INTEL_SEP + default n + +choice + prompt "Choose log mode" + default SEP_STANDARD_MODE + depends on INTEL_SEP + help + This option allows to select logging mode. + +config SEP_STANDARD_MODE + bool "Use standard logging mode" + +config SEP_MINLOG_MODE + bool "Use min logging mode" + help + WARNING: Using minimal logging mode. + This may make troubleshooting more difficult. + +config SEP_MAXLOG_MODE + bool "Use max logging mode" + help + WARNING: Using maximal logging mode. + This may increase overhead + +endchoice diff --git a/drivers/platform/x86/sepdk/Makefile b/drivers/platform/x86/sepdk/Makefile new file mode 100755 index 0000000000000..c8992312a9bb9 --- /dev/null +++ b/drivers/platform/x86/sepdk/Makefile @@ -0,0 +1,5 @@ + +obj-$(CONFIG_SEP) += sep/ +obj-$(CONFIG_SEP_PAX) += pax/ + + diff --git a/drivers/platform/x86/sepdk/inc/apic.h b/drivers/platform/x86/sepdk/inc/apic.h new file mode 100644 index 0000000000000..2b7f1c70dab5b --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/apic.h @@ -0,0 +1,114 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _APIC_H_ +#define _APIC_H_ + +#include +#include + +typedef U64 * PHYSICAL_ADDRESS; + +/** + * Data Types and Macros + */ + +/* + * APIC registers and constants + */ + +// APIC base MSR +#define DRV_APIC_BASE_MSR 0x001b + +// APIC registers +#define DRV_APIC_LCL_ID 0x0020 +#define DRV_APIC_LCL_TSKPRI 0x0080 +#define DRV_APIC_LCL_PPR 0x00a0 +#define DRV_APIC_LCL_EOI 0x00b0 +#define DRV_APIC_LCL_LDEST 0x00d0 +#define DRV_APIC_LCL_DSTFMT 0x00e0 +#define DRV_APIC_LCL_SVR 0x00f0 +#define DRV_APIC_LCL_ICR 0x0300 +#define DRV_APIC_LVT_TIMER 0x0320 +#define DRV_APIC_LVT_PMI 0x0340 +#define DRV_APIC_LVT_LINT0 0x0350 +#define DRV_APIC_LVT_LINT1 0x0360 +#define DRV_APIC_LVT_ERROR 0x0370 + +#define DRV_APIC_LCL_ID_MSR 0x802 +#define DRV_APIC_LCL_TSKPRI_MSR 0x808 +#define DRV_APIC_LCL_PPR_MSR 0x80a +#define DRV_APIC_LCL_EOI_MSR 0x80b +#define DRV_APIC_LCL_LDEST_MSR 0x80d +#define DRV_APIC_LCL_DSTFMT_MSR 0x80e +#define DRV_APIC_LCL_SVR_MSR 0x80f +#define DRV_APIC_LCL_ICR_MSR 0x830 +#define DRV_APIC_LVT_TIMER_MSR 0x832 +#define DRV_APIC_LVT_PMI_MSR 0x834 +#define DRV_APIC_LVT_LINT0_MSR 0x835 +#define DRV_APIC_LVT_LINT1_MSR 0x836 +#define DRV_APIC_LVT_ERROR_MSR 0x837 + +// masks for LVT +#define DRV_LVT_MASK 0x10000 +#define DRV_LVT_EDGE 0x00000 +#define DRV_LVT_LEVEL 0x08000 +#define DRV_LVT_EXTINT 0x00700 +#define DRV_LVT_NMI 0x00400 + +// task priorities +#define DRV_APIC_TSKPRI_LO 0x0000 +#define DRV_APIC_TSKPRI_HI 0x00f0 + +#define DRV_X2APIC_ENABLED 0xc00LL + +//// Interrupt vector for PMU overflow event +// +// Choose the highest unused IDT vector possible so that our +// callback routine runs at the highest priority allowed; +// must avoid using pre-defined vectors in, +// include/asm/irq.h +// include/asm/hw_irq.h +// include/asm/irq_vectors.h +// +// FIRST_DEVICE_VECTOR should be valid for kernels 2.6.33 and earlier +#define CPU_PERF_VECTOR DRV_LVT_NMI +// Has the APIC Been enabled +#define DRV_APIC_BASE_GLOBAL_ENABLED(a) ((a)&1 << 11) +#define DRV_APIC_VIRTUAL_WIRE_ENABLED(a) ((a)&0x100) + +/** + * Function Declarations + */ + +/* + * APIC control functions + */ +extern VOID APIC_Enable_Pmi(void); +extern VOID APIC_Init(PVOID param); +extern VOID APIC_Install_Interrupt_Handler(PVOID param); +extern VOID APIC_Restore_LVTPC(PVOID param); + +#endif diff --git a/drivers/platform/x86/sepdk/inc/asm_helper.h b/drivers/platform/x86/sepdk/inc/asm_helper.h new file mode 100644 index 0000000000000..fd4eabf95dd97 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/asm_helper.h @@ -0,0 +1,186 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _ASM_HELPER_H_ +#define _ASM_HELPER_H_ + +#include + +#if KERNEL_VERSION(4, 1, 0) > LINUX_VERSION_CODE + +#include +#include + +#else + +#ifdef CONFIG_AS_CFI + +#define CFI_STARTPROC .cfi_startproc +#define CFI_ENDPROC .cfi_endproc +#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset +#define CFI_REL_OFFSET .cfi_rel_offset +#define CFI_RESTORE .cfi_restore + +#else + +.macro cfi_ignore a = 0, b = 0, c = 0, d = 0 +.endm + +#define CFI_STARTPROC cfi_ignore +#define CFI_ENDPROC cfi_ignore +#define CFI_ADJUST_CFA_OFFSET cfi_ignore +#define CFI_REL_OFFSET cfi_ignore +#define CFI_RESTORE cfi_ignore +#endif + +#ifdef CONFIG_X86_64 + .macro SAVE_C_REGS_HELPER offset = 0 rax = 1 rcx = 1 r8910 = 1 r11 = 1 + .if \r11 + movq %r11, 6*8+\offset(%rsp) + CFI_REL_OFFSET r11, \offset + .endif + .if \r8910 + movq %r10, 7*8+\offset(%rsp) + CFI_REL_OFFSET r10, \offset + + movq %r9, 8*8+\offset(%rsp) + CFI_REL_OFFSET r9, \offset + + movq %r8, 9*8+\offset(%rsp) + CFI_REL_OFFSET r8, \offset + .endif + .if \rax + movq %rax, 10*8+\offset(%rsp) + CFI_REL_OFFSET rax, \offset + .endif + .if \rcx + movq %rcx, 11*8+\offset(%rsp) + CFI_REL_OFFSET rcx, \offset + .endif + movq %rdx, 12*8+\offset(%rsp) + CFI_REL_OFFSET rdx, \offset + + movq %rsi, 13*8+\offset(%rsp) + CFI_REL_OFFSET rsi, \offset + + movq %rdi, 14*8+\offset(%rsp) + CFI_REL_OFFSET rdi, \offset + .endm + .macro SAVE_C_REGS offset = 0 + SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1 + .endm + .macro SAVE_EXTRA_REGS offset = 0 + movq %r15, 0*8+\offset(%rsp) + CFI_REL_OFFSET r15, \offset + + movq %r14, 1*8+\offset(%rsp) + CFI_REL_OFFSET r14, \offset + + movq %r13, 2*8+\offset(%rsp) + CFI_REL_OFFSET r13, \offset + + movq %r12, 3*8+\offset(%rsp) + CFI_REL_OFFSET r12, \offset + + movq %rbp, 4*8+\offset(%rsp) + CFI_REL_OFFSET rbp, \offset + + movq %rbx, 5*8+\offset(%rsp) + CFI_REL_OFFSET rbx, \offset + .endm + + .macro RESTORE_EXTRA_REGS offset = 0 + movq 0*8+\offset(%rsp), %r15 + CFI_RESTORE r15 + movq 1*8+\offset(%rsp), %r14 + CFI_RESTORE r14 + movq 2*8+\offset(%rsp), %r13 + CFI_RESTORE r13 + movq 3*8+\offset(%rsp), %r12 + CFI_RESTORE r12 + movq 4*8+\offset(%rsp), %rbp + CFI_RESTORE rbp + movq 5*8+\offset(%rsp), %rbx + CFI_RESTORE rbx + .endm + .macro RESTORE_C_REGS_HELPER rstor_rax = 1, rstor_rcx = 1, rstor_r11 = 1, rstor_r8910 = 1, rstor_rdx = 1 + .if \rstor_r11 + movq 6*8(%rsp), %r11 + CFI_RESTORE r11 + .endif + .if \rstor_r8910 + movq 7*8(%rsp), %r10 + CFI_RESTORE r10 + movq 8*8(%rsp), %r9 + CFI_RESTORE r9 + movq 9*8(%rsp), %r8 + CFI_RESTORE r8 + .endif + .if \rstor_rax + movq 10*8(%rsp), %rax + CFI_RESTORE rax + .endif + .if \rstor_rcx + movq 11*8(%rsp), %rcx + CFI_RESTORE rcx + .endif + .if \rstor_rdx + movq 12*8(%rsp), %rdx + CFI_RESTORE rdx + .endif + movq 13*8(%rsp), %rsi + CFI_RESTORE rsi + movq 14*8(%rsp), %rdi + CFI_RESTORE rdi + .endm + .macro RESTORE_C_REGS + RESTORE_C_REGS_HELPER 1, 1, 1, 1, 1 + .endm + + .macro ALLOC_PT_GPREGS_ON_STACK addskip = 0 + subq $15*8+\addskip, %rsp + CFI_ADJUST_CFA_OFFSET 15*8+\addskip + .endm + + .macro REMOVE_PT_GPREGS_FROM_STACK addskip = 0 + addq $15*8+\addskip, %rsp + CFI_ADJUST_CFA_OFFSET - (15*8+\addskip) + .endm + + .macro SAVE_ALL + ALLOC_PT_GPREGS_ON_STACK + SAVE_C_REGS + SAVE_EXTRA_REGS + .endm + + .macro RESTORE_ALL + RESTORE_EXTRA_REGS + RESTORE_C_REGS + REMOVE_PT_GPREGS_FROM_STACK + .endm +#endif //CONFIG_X86_64 +#endif + +#endif diff --git a/drivers/platform/x86/sepdk/inc/chap.h b/drivers/platform/x86/sepdk/inc/chap.h new file mode 100644 index 0000000000000..823aa9058cd57 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/chap.h @@ -0,0 +1,31 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _CHAP_H_ +#define _CHAP_H_ + +extern CS_DISPATCH_NODE chap_dispatch; + +#endif diff --git a/drivers/platform/x86/sepdk/inc/control.h b/drivers/platform/x86/sepdk/inc/control.h new file mode 100644 index 0000000000000..5a94c3ae0fed8 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/control.h @@ -0,0 +1,514 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + + +#ifndef _CONTROL_H_ +#define _CONTROL_H_ + +#include +#include +#if defined(DRV_IA32) +#include +#endif +#include +#if defined(DRV_IA32) +#include +#endif +#include +#include + +#include "lwpmudrv_defines.h" +#include "lwpmudrv.h" +#include "lwpmudrv_types.h" +#if defined(BUILD_CHIPSET) +#include "lwpmudrv_chipset.h" +#endif + +// large memory allocation will be used if the requested size (in bytes) is +// above this threshold +#define MAX_KMALLOC_SIZE ((1 << 17) - 1) + +// check whether Linux driver should use unlocked ioctls (not protected by BKL) +#if defined(HAVE_UNLOCKED_IOCTL) +#define DRV_USE_UNLOCKED_IOCTL +#endif +#if defined(DRV_USE_UNLOCKED_IOCTL) +#define IOCTL_OP .unlocked_ioctl +#define IOCTL_OP_TYPE long +#define IOCTL_USE_INODE +#else +#define IOCTL_OP .ioctl +#define IOCTL_OP_TYPE S32 +#define IOCTL_USE_INODE struct inode *inode, +#endif + +// Information about the state of the driver +typedef struct GLOBAL_STATE_NODE_S GLOBAL_STATE_NODE; +typedef GLOBAL_STATE_NODE * GLOBAL_STATE; +struct GLOBAL_STATE_NODE_S { + volatile S32 cpu_count; + volatile S32 dpc_count; + + S32 num_cpus; // Number of CPUs in the system + S32 active_cpus; // Number of active CPUs - some cores can be + // deactivated by the user / admin + S32 num_em_groups; + S32 num_descriptors; + + volatile S32 current_phase; + + U32 num_modules; +}; + +// Access Macros +#define GLOBAL_STATE_num_cpus(x) ((x).num_cpus) +#define GLOBAL_STATE_active_cpus(x) ((x).active_cpus) +#define GLOBAL_STATE_cpu_count(x) ((x).cpu_count) +#define GLOBAL_STATE_dpc_count(x) ((x).dpc_count) +#define GLOBAL_STATE_num_em_groups(x) ((x).num_em_groups) +#define GLOBAL_STATE_num_descriptors(x) ((x).num_descriptors) +#define GLOBAL_STATE_current_phase(x) ((x).current_phase) +#define GLOBAL_STATE_sampler_id(x) ((x).sampler_id) +#define GLOBAL_STATE_num_modules(x) ((x).num_modules) + +/* + * + * + * CPU State data structure and access macros + * + */ +typedef struct CPU_STATE_NODE_S CPU_STATE_NODE; +typedef CPU_STATE_NODE * CPU_STATE; +struct CPU_STATE_NODE_S { + U32 apic_id; // Processor ID on the system bus + PVOID apic_linear_addr; // linear address of local apic + PVOID apic_physical_addr; // physical address of local apic + + PVOID idt_base; // local IDT base address + atomic_t in_interrupt; + +#if defined(DRV_IA32) + U64 saved_ih; // saved perfvector to restore +#endif +#if defined(DRV_EM64T) + PVOID saved_ih; // saved perfvector to restore +#endif + + U64 last_mperf; // previous value of MPERF, for calculating delta MPERF + U64 last_aperf; // previous value of APERF, for calculating delta MPERF + DRV_BOOL last_p_state_valid; // are the previous values valid? + //(e.g., the first measurement does not have + // a previous value for calculating the delta + DRV_BOOL p_state_counting; //Flag to mark PMI interrupt from fixed event + + S64 *em_tables; // holds the data that is saved/restored + // during event multiplexing + U32 em_table_offset; + + struct timer_list *em_timer; + U32 current_group; + S32 trigger_count; + S32 trigger_event_num; + + DISPATCH dispatch; + PVOID lbr_area; + PVOID old_dts_buffer; + PVOID dts_buffer; + U32 dts_buffer_size; + U32 dts_buffer_offset; + U32 initial_mask; + U32 accept_interrupt; + +#if defined(BUILD_CHIPSET) + // Chipset counter stuff + U32 chipset_count_init; //flag to initialize the last MCH and ICH array + U64 last_mch_count[8]; + U64 last_ich_count[8]; + U64 last_gmch_count[MAX_CHIPSET_COUNTERS]; + U64 last_mmio_count[32]; // it's 9 now but next generation may have 29 + +#endif + + U64 *pmu_state; // holds PMU state (e.g., MSRs) that will be + // saved before and restored after collection + S32 socket_master; + S32 core_master; + S32 thr_master; + U64 num_samples; + U64 reset_mask; + U64 group_swap; + U64 last_visa_count[16]; + U16 cpu_module_num; + U16 cpu_module_master; + S32 system_master; + DRV_BOOL offlined; + U32 nmi_handled; + struct tasklet_struct nmi_tasklet; + U32 em_timer_delay; + U32 core_type; +}; + +#define CPU_STATE_apic_id(cpu) ((cpu)->apic_id) +#define CPU_STATE_apic_linear_addr(cpu) ((cpu)->apic_linear_addr) +#define CPU_STATE_apic_physical_addr(cpu) ((cpu)->apic_physical_addr) +#define CPU_STATE_idt_base(cpu) ((cpu)->idt_base) +#define CPU_STATE_in_interrupt(cpu) ((cpu)->in_interrupt) +#define CPU_STATE_saved_ih(cpu) ((cpu)->saved_ih) +#define CPU_STATE_saved_ih_hi(cpu) ((cpu)->saved_ih_hi) +#define CPU_STATE_dpc(cpu) ((cpu)->dpc) +#define CPU_STATE_em_tables(cpu) ((cpu)->em_tables) +#define CPU_STATE_em_table_offset(cpu) ((cpu)->em_table_offset) +#define CPU_STATE_pmu_state(cpu) ((cpu)->pmu_state) +#define CPU_STATE_em_dpc(cpu) ((cpu)->em_dpc) +#define CPU_STATE_em_timer(cpu) ((cpu)->em_timer) +#define CPU_STATE_current_group(cpu) ((cpu)->current_group) +#define CPU_STATE_trigger_count(cpu) ((cpu)->trigger_count) +#define CPU_STATE_trigger_event_num(cpu) ((cpu)->trigger_event_num) +#define CPU_STATE_dispatch(cpu) ((cpu)->dispatch) +#define CPU_STATE_lbr(cpu) ((cpu)->lbr) +#define CPU_STATE_old_dts_buffer(cpu) ((cpu)->old_dts_buffer) +#define CPU_STATE_dts_buffer(cpu) ((cpu)->dts_buffer) +#define CPU_STATE_dts_buffer_size(cpu) ((cpu)->dts_buffer_size) +#define CPU_STATE_dts_buffer_offset(cpu) ((cpu)->dts_buffer_offset) +#define CPU_STATE_initial_mask(cpu) ((cpu)->initial_mask) +#define CPU_STATE_accept_interrupt(cpu) ((cpu)->accept_interrupt) +#define CPU_STATE_msr_value(cpu) ((cpu)->msr_value) +#define CPU_STATE_msr_addr(cpu) ((cpu)->msr_addr) +#define CPU_STATE_socket_master(cpu) ((cpu)->socket_master) +#define CPU_STATE_core_master(cpu) ((cpu)->core_master) +#define CPU_STATE_thr_master(cpu) ((cpu)->thr_master) +#define CPU_STATE_num_samples(cpu) ((cpu)->num_samples) +#define CPU_STATE_reset_mask(cpu) ((cpu)->reset_mask) +#define CPU_STATE_group_swap(cpu) ((cpu)->group_swap) +#define CPU_STATE_last_mperf(cpu) ((cpu)->last_mperf) +#define CPU_STATE_last_aperf(cpu) ((cpu)->last_aperf) +#define CPU_STATE_last_p_state_valid(cpu) ((cpu)->last_p_state_valid) +#define CPU_STATE_cpu_module_num(cpu) ((cpu)->cpu_module_num) +#define CPU_STATE_cpu_module_master(cpu) ((cpu)->cpu_module_master) +#define CPU_STATE_p_state_counting(cpu) ((cpu)->p_state_counting) +#define CPU_STATE_system_master(cpu) ((cpu)->system_master) +#define CPU_STATE_offlined(cpu) ((cpu)->offlined) +#define CPU_STATE_nmi_handled(cpu) ((cpu)->nmi_handled) +#define CPU_STATE_nmi_tasklet(cpu) ((cpu)->nmi_tasklet) +#define CPU_STATE_em_timer_delay(cpu) ((cpu)->em_timer_delay) +#define CPU_STATE_core_type(cpu) ((cpu)->core_type) + +/* + * For storing data for --read/--write-msr command line options + */ +typedef struct MSR_DATA_NODE_S MSR_DATA_NODE; +typedef MSR_DATA_NODE * MSR_DATA; +struct MSR_DATA_NODE_S { + U64 value; // Used for emon, for read/write-msr value + U64 addr; +}; + +#define MSR_DATA_value(md) ((md)->value) +#define MSR_DATA_addr(md) ((md)->addr) + +/* + * Memory Allocation tracker + * + * Currently used to track large memory allocations + */ + +typedef struct MEM_EL_NODE_S MEM_EL_NODE; +typedef MEM_EL_NODE * MEM_EL; +struct MEM_EL_NODE_S { + PVOID address; // pointer to piece of memory we're tracking + S32 size; // size (bytes) of the piece of memory + U32 is_addr_vmalloc; + // flag to check if the memory is allocated using vmalloc +}; + +// accessors for MEM_EL defined in terms of MEM_TRACKER below + +#define MEM_EL_MAX_ARRAY_SIZE 32 // minimum is 1, nominal is 64 + +typedef struct MEM_TRACKER_NODE_S MEM_TRACKER_NODE; +typedef MEM_TRACKER_NODE * MEM_TRACKER; +struct MEM_TRACKER_NODE_S { + U16 max_size; // MAX number of elements in the array + U16 elements; // number of elements available in this array + U16 node_vmalloc; + // flag to check whether the node struct is allocated using vmalloc + U16 array_vmalloc; + // flag to check whether the list of mem el is allocated using vmalloc + MEM_EL mem; // array of large memory items we're tracking + MEM_TRACKER prev, next; // enables bi-directional scanning linked list +}; +#define MEM_TRACKER_max_size(mt) ((mt)->max_size) +#define MEM_TRACKER_node_vmalloc(mt) ((mt)->node_vmalloc) +#define MEM_TRACKER_array_vmalloc(mt) ((mt)->array_vmalloc) +#define MEM_TRACKER_elements(mt) ((mt)->elements) +#define MEM_TRACKER_mem(mt) ((mt)->mem) +#define MEM_TRACKER_prev(mt) ((mt)->prev) +#define MEM_TRACKER_next(mt) ((mt)->next) +#define MEM_TRACKER_mem_address(mt, i) ((MEM_TRACKER_mem(mt)[(i)].address)) +#define MEM_TRACKER_mem_size(mt, i) ((MEM_TRACKER_mem(mt)[(i)].size)) +#define MEM_TRACKER_mem_vmalloc(mt, i) \ + ((MEM_TRACKER_mem(mt)[(i)].is_addr_vmalloc)) + +/**************************************************************************** + ** Global State variables exported + ***************************************************************************/ +extern CPU_STATE pcb; +extern U64 *cpu_tsc; +extern GLOBAL_STATE_NODE driver_state; +extern MSR_DATA msr_data; +extern U32 *core_to_package_map; +extern U32 *core_to_dev_map; +extern U32 *core_to_phys_core_map; +extern U32 *core_to_thread_map; +extern U32 *threads_per_core; +extern U32 num_packages; +extern U64 *restore_bl_bypass; +extern U32 **restore_ha_direct2core; +extern U32 **restore_qpi_direct2core; +extern U32 *occupied_core_ids; +/**************************************************************************** + ** Handy Short cuts + ***************************************************************************/ + +/* + * CONTROL_THIS_CPU() + * Parameters + * None + * Returns + * CPU number of the processor being executed on + * + */ +#if !defined(DRV_SEP_ACRN_ON) +#define CONTROL_THIS_CPU() smp_processor_id() +#else +#define CONTROL_THIS_CPU() raw_smp_processor_id() +#endif + +/* + * CONTROL_THIS_RAW_CPU() + * Parameters + * None + * Returns + * CPU number of the processor being executed on + * + */ +#define CONTROL_THIS_RAW_CPU() (raw_smp_processor_id()) +/**************************************************************************** + ** Interface definitions + ***************************************************************************/ + +/* + * Execution Control Functions + */ + +extern VOID CONTROL_Invoke_Cpu(S32 cpuid, VOID (*func)(PVOID), PVOID ctx); + +/* + * @fn VOID CONTROL_Invoke_Parallel_Service(func, ctx, blocking, exclude) + * + * @param func - function to be invoked by each core in the system + * @param ctx - pointer to the parameter block for each func invocation + * @param blocking - Wait for invoked function to complete + * @param exclude - exclude the current core from executing the code + * + * @returns none + * + * @brief Service routine to handle all kinds of parallel invoke on all + * CPU calls + * + * Special Notes: + * Invoke the function provided in parallel in either a + * blocking/non-blocking mode. The current core may be excluded if desired. + * NOTE - Do not call this function directly from source code. Use the aliases + * CONTROL_Invoke_Parallel(), CONTROL_Invoke_Parallel_NB(), + * CONTROL_Invoke_Parallel_XS(). + * + */ +extern VOID CONTROL_Invoke_Parallel_Service(VOID (*func)(PVOID), PVOID ctx, + S32 blocking, S32 exclude); + +/* + * @fn VOID CONTROL_Invoke_Parallel(func, ctx) + * + * @param func - function to be invoked by each core in the system + * @param ctx - pointer to the parameter block for each function invocation + * + * @returns none + * + * @brief Invoke the named function in parallel. + * Wait for all the functions to complete. + * + * Special Notes: + * Invoke the function named in parallel, including the CPU + * that the control is being invoked on + * Macro built on the service routine + * + */ +#define CONTROL_Invoke_Parallel(a, b) \ + CONTROL_Invoke_Parallel_Service((a), (b), TRUE, FALSE) + +/* + * @fn VOID CONTROL_Invoke_Parallel_NB(func, ctx) + * + * @param func - function to be invoked by each core in the system + * @param ctx - pointer to the parameter block for each function invocation + * + * @returns none + * + * @brief Invoke the named function in parallel. + * DO NOT Wait for all the functions to complete. + * + * Special Notes: + * Invoke the function named in parallel, including the CPU + * that the control is being invoked on + * Macro built on the service routine + * + */ +#define CONTROL_Invoke_Parallel_NB(a, b) \ + CONTROL_Invoke_Parallel_Service((a), (b), FALSE, FALSE) + +/* + * @fn VOID CONTROL_Invoke_Parallel_XS(func, ctx) + * + * @param func - function to be invoked by each core in the system + * @param ctx - pointer to the parameter block for each function invocation + * + * @returns none + * + * @brief Invoke the named function in parallel. + * Wait for all the functions to complete. + * + * Special Notes: + * Invoke the function named in parallel, excluding the CPU + * that the control is being invoked on + * Macro built on the service routine + * + */ +#define CONTROL_Invoke_Parallel_XS(a, b) \ + CONTROL_Invoke_Parallel_Service((a), (b), TRUE, TRUE) + +/* + * @fn VOID CONTROL_Memory_Tracker_Init(void) + * + * @param None + * + * @returns None + * + * @brief Initializes Memory Tracker + * + * Special Notes: + * This should only be called when the + * the driver is being loaded. + */ +extern VOID CONTROL_Memory_Tracker_Init(void); + +/* + * @fn VOID CONTROL_Memory_Tracker_Free(void) + * + * @param None + * + * @returns None + * + * @brief Frees memory used by Memory Tracker + * + * Special Notes: + * This should only be called when the + * driver is being unloaded. + */ +extern VOID CONTROL_Memory_Tracker_Free(void); + +/* + * @fn VOID CONTROL_Memory_Tracker_Compaction(void) + * + * @param None + * + * @returns None + * + * @brief Compacts the memory allocator if holes are detected + * + * Special Notes: + * At end of collection (or at other safe sync point), + * reclaim/compact space used by mem tracker + */ +extern VOID CONTROL_Memory_Tracker_Compaction(void); + +/* + * @fn PVOID CONTROL_Allocate_Memory(size) + * + * @param IN size - size of the memory to allocate + * + * @returns char* - pointer to the allocated memory block + * + * @brief Allocate and zero memory + * + * Special Notes: + * Allocate memory in the GFP_KERNEL pool. + * + * Use this if memory is to be allocated within a context where + * the allocator can block the allocation (e.g., by putting + * the caller to sleep) while it tries to free up memory to + * satisfy the request. Otherwise, if the allocation must + * occur atomically (e.g., caller cannot sleep), then use + * CONTROL_Allocate_KMemory instead. + */ +extern PVOID CONTROL_Allocate_Memory(size_t size); + +/* + * @fn PVOID CONTROL_Allocate_KMemory(size) + * + * @param IN size - size of the memory to allocate + * + * @returns char* - pointer to the allocated memory block + * + * @brief Allocate and zero memory + * + * Special Notes: + * Allocate memory in the GFP_ATOMIC pool. + * + * Use this if memory is to be allocated within a context where + * the allocator cannot block the allocation (e.g., by putting + * the caller to sleep) as it tries to free up memory to + * satisfy the request. Examples include interrupt handlers, + * process context code holding locks, etc. + */ +extern PVOID CONTROL_Allocate_KMemory(size_t size); + +/* + * @fn PVOID CONTROL_Free_Memory(location) + * + * @param IN location - size of the memory to allocate + * + * @returns pointer to the allocated memory block + * + * @brief Frees the memory block + * + * Special Notes: + * Does not try to free memory if fed with a NULL pointer + * Expected usage: + * ptr = CONTROL_Free_Memory(ptr); + */ +extern PVOID CONTROL_Free_Memory(PVOID location); + +#endif diff --git a/drivers/platform/x86/sepdk/inc/core2.h b/drivers/platform/x86/sepdk/inc/core2.h new file mode 100644 index 0000000000000..8a6c0835a6231 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/core2.h @@ -0,0 +1,49 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _CORE2_H_ +#define _CORE2_H_ + +#include "msrdefs.h" + +extern DISPATCH_NODE core2_dispatch; +extern DISPATCH_NODE corei7_dispatch; +extern DISPATCH_NODE corei7_dispatch_nehalem; +extern DISPATCH_NODE corei7_dispatch_htoff_mode; +extern DISPATCH_NODE corei7_dispatch_2; +extern DISPATCH_NODE corei7_dispatch_htoff_mode_2; + +#define CORE2UNC_BLBYPASS_BITMASK 0x00000001 +#define CORE2UNC_DISABLE_BL_BYPASS_MSR 0x39C + +#if defined(DRV_IA32) +#define CORE2_LBR_DATA_BITS 32 +#else +#define CORE2_LBR_DATA_BITS 48 +#endif + +#define CORE2_LBR_BITMASK ((1ULL << CORE2_LBR_DATA_BITS) - 1) + +#endif diff --git a/drivers/platform/x86/sepdk/inc/cpumon.h b/drivers/platform/x86/sepdk/inc/cpumon.h new file mode 100644 index 0000000000000..0ce584c1c805b --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/cpumon.h @@ -0,0 +1,53 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _CPUMON_H_ +#define _CPUMON_H_ + +#include +#include "lwpmudrv_defines.h" + +/* + * Defines + */ + +/** + * Function Declarations + */ + +/* + * CPUMON control functions + */ + +extern VOID CPUMON_Install_Cpuhooks(void); +extern VOID CPUMON_Remove_Cpuhooks(void); +#if defined(DRV_CPU_HOTPLUG) +extern DRV_BOOL CPUMON_is_Online_Allowed(void); +extern DRV_BOOL CPUMON_is_Offline_Allowed(void); +extern VOID CPUMON_Online_Cpu(PVOID parm); +extern VOID CPUMON_Offline_Cpu(PVOID parm); +#endif + +#endif diff --git a/drivers/platform/x86/sepdk/inc/ecb_iterators.h b/drivers/platform/x86/sepdk/inc/ecb_iterators.h new file mode 100644 index 0000000000000..10527535925fd --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/ecb_iterators.h @@ -0,0 +1,581 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _ECB_ITERATORS_H_ +#define _ECB_ITERATORS_H_ + +#if defined(__cplusplus) +extern "C" { +#endif + +/* + * Loop macros to walk through the event control block + * Use for access only in the kernel mode + * To Do - Control access from kernel mode by a macro + */ + +#define FOR_EACH_CCCR_REG(pecb, idx) \ + { \ + U32 idx; \ + U32 this_cpu__ = CONTROL_THIS_CPU(); \ + CPU_STATE pcpu__ = &pcb[this_cpu__]; \ + U32 dev_idx = core_to_dev_map[this_cpu__]; \ + U32 cur_grp = CPU_STATE_current_group(pcpu__); \ + ECB pecb = LWPMU_DEVICE_PMU_register_data( \ + &devices[dev_idx])[cur_grp]; \ + if ((pecb)) { \ + for ((idx) = ECB_cccr_start(pecb); \ + (idx) < \ + (ECB_cccr_start(pecb) + ECB_cccr_pop(pecb)); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_CCCR_REG \ + } \ + } \ + } + +#define FOR_EACH_CCCR_REG_CPU(pecb, idx, cpuid) \ + { \ + U32 idx; \ + U32 this_cpu__ = cpuid; \ + CPU_STATE pcpu__ = &pcb[this_cpu__]; \ + U32 dev_idx = core_to_dev_map[this_cpu__]; \ + U32 cur_grp = CPU_STATE_current_group(pcpu__); \ + ECB pecb = LWPMU_DEVICE_PMU_register_data( \ + &devices[dev_idx])[cur_grp]; \ + if ((pecb)) { \ + for ((idx) = ECB_cccr_start(pecb); \ + (idx) < \ + (ECB_cccr_start(pecb) + ECB_cccr_pop(pecb)); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_CCCR_REG_CPU \ + } \ + } \ + } + +#define FOR_EACH_CCCR_GP_REG(pecb, idx) \ + { \ + U32 idx; \ + U32 this_cpu__ = CONTROL_THIS_CPU(); \ + CPU_STATE pcpu__ = &pcb[this_cpu__]; \ + U32 dev_idx = core_to_dev_map[this_cpu__]; \ + U32 cur_grp = CPU_STATE_current_group(pcpu__); \ + ECB pecb = LWPMU_DEVICE_PMU_register_data( \ + &devices[dev_idx])[cur_grp]; \ + if ((pecb)) { \ + for ((idx) = ECB_cccr_start(pecb); \ + (idx) < \ + (ECB_cccr_start(pecb) + ECB_cccr_pop(pecb)); \ + (idx)++) { \ + if (ECB_entries_is_gp_reg_get((pecb), \ + (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_CCCR_GP_REG \ + } \ + } \ + } + +#define FOR_EACH_ESCR_REG(pecb, idx) \ + { \ + U32 idx; \ + U32 this_cpu__ = CONTROL_THIS_CPU(); \ + CPU_STATE pcpu__ = &pcb[this_cpu__]; \ + U32 dev_idx = core_to_dev_map[this_cpu__]; \ + U32 cur_grp = CPU_STATE_current_group(pcpu__); \ + ECB pecb = LWPMU_DEVICE_PMU_register_data( \ + &devices[dev_idx])[cur_grp]; \ + if ((pecb)) { \ + for ((idx) = ECB_escr_start(pecb); \ + (idx) < \ + (ECB_cccr_start(pecb) + ECB_cccr_pop(pecb)); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_ESCR_REG \ + } \ + } \ + } + +#define FOR_EACH_ESCR_REG_CPU(pecb, idx, cpuid) \ + { \ + U32 idx; \ + U32 this_cpu__ = cpuid; \ + CPU_STATE pcpu__ = &pcb[this_cpu__]; \ + U32 dev_idx = core_to_dev_map[this_cpu__]; \ + U32 cur_grp = CPU_STATE_current_group(pcpu__); \ + ECB pecb = LWPMU_DEVICE_PMU_register_data( \ + &devices[dev_idx])[cur_grp]; \ + if ((pecb)) { \ + for ((idx) = ECB_escr_start(pecb); \ + (idx) < \ + (ECB_cccr_start(pecb) + ECB_cccr_pop(pecb)); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_ESCR_REG_CPU \ + } \ + } \ + } + + +#define FOR_EACH_DATA_REG(pecb, idx) \ + { \ + U32 idx; \ + U32 this_cpu__ = CONTROL_THIS_CPU(); \ + CPU_STATE pcpu__ = &pcb[this_cpu__]; \ + U32 dev_idx = core_to_dev_map[this_cpu__]; \ + U32 cur_grp = CPU_STATE_current_group(pcpu__); \ + ECB pecb = LWPMU_DEVICE_PMU_register_data( \ + &devices[dev_idx])[cur_grp]; \ + if ((pecb)) { \ + for ((idx) = ECB_data_start(pecb); \ + (idx) < \ + (ECB_cccr_start(pecb) + ECB_cccr_pop(pecb)); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_DATA_REG \ + } \ + } \ + } + +#define FOR_EACH_DATA_REG_CPU(pecb, idx, cpuid) \ + { \ + U32 idx; \ + U32 this_cpu__ = cpuid; \ + CPU_STATE pcpu__ = &pcb[this_cpu__]; \ + U32 dev_idx = core_to_dev_map[this_cpu__]; \ + U32 cur_grp = CPU_STATE_current_group(pcpu__); \ + ECB pecb = LWPMU_DEVICE_PMU_register_data( \ + &devices[dev_idx])[cur_grp]; \ + if ((pecb)) { \ + for ((idx) = ECB_data_start(pecb); \ + (idx) < \ + (ECB_data_start(pecb) + ECB_data_pop(pecb)); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_DATA_REG_CPU \ + } \ + } \ + } + +#define FOR_EACH_DATA_REG_UNC(pecb, device_idx, idx) \ + { \ + U32 idx; \ + U32 cpu = CONTROL_THIS_CPU(); \ + U32 pkg = core_to_package_map[cpu]; \ + U32 cur_grp = \ + LWPMU_DEVICE_cur_group(&devices[(device_idx)])[(pkg)]; \ + ECB pecb = LWPMU_DEVICE_PMU_register_data( \ + &devices[(device_idx)])[cur_grp]; \ + if ((pecb)) { \ + for ((idx) = ECB_data_start(pecb); \ + (idx) < \ + (ECB_cccr_start(pecb) + ECB_cccr_pop(pecb)); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_DATA_REG_UNC \ + } \ + } \ + } + +#define FOR_EACH_DATA_REG_UNC_VER2(pecb, i, idx) \ + { \ + U32 idx; \ + if ((pecb)) { \ + for ((idx) = ECB_data_start(pecb); \ + (idx) < \ + ECB_data_start(pecb) + ECB_data_pop(pecb); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_DATA_REG_UNC_VER2 \ + } \ + } \ + } + +#define FOR_EACH_DATA_GP_REG(pecb, idx) \ + { \ + U32 idx; \ + U32 this_cpu__ = CONTROL_THIS_CPU(); \ + CPU_STATE pcpu__ = &pcb[this_cpu__]; \ + U32 dev_idx = core_to_dev_map[this_cpu__]; \ + U32 cur_grp = CPU_STATE_current_group(pcpu__); \ + ECB pecb = LWPMU_DEVICE_PMU_register_data( \ + &devices[dev_idx])[cur_grp]; \ + if ((pecb)) { \ + for ((idx) = ECB_data_start(pecb); \ + (idx) < \ + ECB_data_start(pecb) + ECB_data_pop(pecb); \ + (idx)++) { \ + if (ECB_entries_is_gp_reg_get((pecb), \ + (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_DATA_GP_REG \ + } \ + } \ + } + +#define FOR_EACH_DATA_GENERIC_REG(pecb, idx) \ + { \ + U32 idx; \ + U32 this_cpu__ = CONTROL_THIS_CPU(); \ + CPU_STATE pcpu__ = &pcb[this_cpu__]; \ + U32 dev_idx = core_to_dev_map[this_cpu__]; \ + U32 cur_grp = CPU_STATE_current_group(pcpu__); \ + ECB pecb = LWPMU_DEVICE_PMU_register_data( \ + &devices[dev_idx])[cur_grp]; \ + if ((pecb)) { \ + for ((idx) = ECB_data_start(pecb); \ + (idx) < \ + ECB_data_start(pecb) + ECB_data_pop(pecb); \ + (idx)++) { \ + if (ECB_entries_is_generic_reg_get( \ + (pecb), (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_DATA_GENERIC_REG \ + } \ + } \ + } + +#define FOR_EACH_REG_ENTRY(pecb, idx) \ + { \ + U32 idx; \ + U32 this_cpu__ = CONTROL_THIS_CPU(); \ + CPU_STATE pcpu__ = &pcb[this_cpu__]; \ + U32 dev_idx = core_to_dev_map[this_cpu__]; \ + U32 cur_grp = CPU_STATE_current_group(pcpu__); \ + ECB pecb = LWPMU_DEVICE_PMU_register_data( \ + &devices[dev_idx])[cur_grp]; \ + if ((pecb)) { \ + for ((idx) = 0; (idx) < ECB_num_entries(pecb); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_REG_ENTRY \ + } \ + } \ + } + +#define FOR_EACH_REG_ENTRY_UNC(pecb, device_idx, idx) \ + { \ + U32 idx; \ + U32 cpu = CONTROL_THIS_CPU(); \ + U32 pkg = core_to_package_map[cpu]; \ + U32 cur_grp = \ + LWPMU_DEVICE_cur_group(&devices[(device_idx)])[(pkg)]; \ + ECB pecb = LWPMU_DEVICE_PMU_register_data( \ + &devices[(device_idx)])[(cur_grp)]; \ + if ((pecb)) { \ + for ((idx) = 0; (idx) < ECB_num_entries(pecb); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_REG_ENTRY_UNC \ + } \ + } \ + } + +#define FOR_EACH_PCI_DATA_REG(pecb, i, device_idx, offset_delta) \ + { \ + U32 i = 0; \ + U32 cpu = CONTROL_THIS_CPU(); \ + U32 pkg = core_to_package_map[cpu]; \ + U32 cur_grp = \ + LWPMU_DEVICE_cur_group(&devices[(device_idx)])[(pkg)]; \ + ECB pecb = LWPMU_DEVICE_PMU_register_data( \ + &devices[(device_idx)])[(cur_grp)]; \ + if ((pecb)) { \ + for ((i) = ECB_data_start(pecb); \ + (i) < ECB_data_start(pecb) + ECB_data_pop(pecb); \ + (i)++) { \ + if (ECB_entries_reg_offset((pecb), (i)) == \ + 0) { \ + continue; \ + } \ + (offset_delta) = \ + (ECB_entries_reg_offset(pecb, i) - \ + DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( \ + &ECB_pcidev_entry_node( \ + pecb))); + +#define END_FOR_EACH_PCI_DATA_REG \ + } \ + } \ + } + +#define FOR_EACH_PCI_DATA_REG_VER2(pecb, i, device_idx, offset_delta) \ + { \ + U32 i = 0; \ + if ((pecb)) { \ + for ((i) = ECB_data_start(pecb); \ + (i) < ECB_data_start(pecb) + ECB_data_pop(pecb); \ + (i)++) { \ + if (ECB_entries_reg_offset((pecb), (i)) == 0) {\ + continue; \ + } \ + (offset_delta) = \ + ECB_entries_reg_offset(pecb, i) - \ + DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( \ + &ECB_pcidev_entry_node(pecb)); + +#define END_FOR_EACH_PCI_DATA_REG_VER2 \ + } \ + } \ + } + +#define FOR_EACH_PCI_DATA_REG_RAW(pecb, i, device_idx) \ + { \ + U32 i = 0; \ + U32 cpu = CONTROL_THIS_CPU(); \ + U32 pkg = core_to_package_map[cpu]; \ + U32 cur_grp = \ + LWPMU_DEVICE_cur_group(&devices[(device_idx)])[(pkg)]; \ + ECB pecb = LWPMU_DEVICE_PMU_register_data( \ + &devices[(device_idx)])[(cur_grp)]; \ + if ((pecb)) { \ + for ((i) = ECB_data_start(pecb); \ + (i) < ECB_data_start(pecb) + ECB_data_pop(pecb); \ + (i)++) { \ + if (ECB_entries_reg_offset((pecb), (i)) == \ + 0) { \ + continue; \ + } + +#define END_FOR_EACH_PCI_DATA_REG_RAW \ + } \ + } \ + } + +#define FOR_EACH_PCI_CCCR_REG_RAW(pecb, i, device_idx) \ + { \ + U32 i = 0; \ + U32 cpu = CONTROL_THIS_CPU(); \ + U32 pkg = core_to_package_map[cpu]; \ + U32 cur_grp = \ + LWPMU_DEVICE_cur_group(&devices[(device_idx)])[(pkg)]; \ + ECB pecb = LWPMU_DEVICE_PMU_register_data( \ + &devices[(device_idx)])[(cur_grp)]; \ + if ((pecb)) { \ + for ((i) = ECB_cccr_start(pecb); \ + (i) < ECB_cccr_start(pecb) + ECB_cccr_pop(pecb); \ + (i)++) { \ + if (ECB_entries_reg_offset((pecb), (i)) == \ + 0) { \ + continue; \ + } + +#define END_FOR_EACH_PCI_CCCR_REG_RAW \ + } \ + } \ + } + +#define FOR_EACH_PCI_REG_RAW(pecb, i, device_idx) \ + { \ + U32 i = 0; \ + U32 cpu = CONTROL_THIS_CPU(); \ + U32 pkg = core_to_package_map[cpu]; \ + U32 cur_grp = \ + LWPMU_DEVICE_cur_group(&devices[(device_idx)])[(pkg)]; \ + ECB pecb = LWPMU_DEVICE_PMU_register_data( \ + &devices[(device_idx)])[(cur_grp)]; \ + if ((pecb)) { \ + for ((i) = 0; (i) < ECB_num_entries(pecb); (i)++) { \ + if (ECB_entries_reg_offset((pecb), (i)) == \ + 0) { \ + continue; \ + } + +#define END_FOR_EACH_PCI_REG_RAW \ + } \ + } \ + } + +#define FOR_EACH_PCI_REG_RAW_GROUP(pecb, i, device_idx, cur_grp) \ + { \ + U32 i = 0; \ + ECB pecb = LWPMU_DEVICE_PMU_register_data( \ + &devices[(device_idx)])[(cur_grp)]; \ + if ((pecb)) { \ + for ((i) = 0; (i) < ECB_num_entries(pecb); (i)++) { \ + if (ECB_entries_reg_offset((pecb), (i)) == \ + 0) { \ + continue; \ + } + +#define END_FOR_EACH_PCI_REG_RAW_GROUP \ + } \ + } \ + } + +#define CHECK_SAVE_RESTORE_EVENT_INDEX(prev_ei, cur_ei, evt_index) \ + { \ + if (prev_ei == -1) { \ + prev_ei = cur_ei; \ + } \ + if (prev_ei < cur_ei) { \ + prev_ei = cur_ei; \ + evt_index++; \ + } else { \ + evt_index = 0; \ + prev_ei = cur_ei; \ + } \ + } + +#define FOR_EACH_REG_ENTRY_UNC_WRITE_MSR(pecb, device_idx, idx) \ + { \ + U32 idx; \ + U32 cpu = CONTROL_THIS_CPU(); \ + U32 pkg = core_to_package_map[cpu]; \ + U32 cur_grp = \ + LWPMU_DEVICE_cur_group(&devices[(device_idx)])[(pkg)]; \ + ECB pecb = LWPMU_DEVICE_PMU_register_data( \ + &devices[(device_idx)])[(cur_grp)]; \ + if ((pecb)) { \ + for ((idx) = 0; (idx) < ECB_num_entries(pecb); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_REG_ENTRY_UNC \ + } \ + } \ + } + +#define FOR_EACH_REG_UNC_OPERATION(pecb, device_idx, idx, operation) \ + { \ + U32 idx; \ + U32 cpu = CONTROL_THIS_CPU(); \ + U32 pkg = core_to_package_map[cpu]; \ + U32 cur_grp = \ + LWPMU_DEVICE_cur_group(&devices[(device_idx)])[(pkg)]; \ + ECB pecb = LWPMU_DEVICE_PMU_register_data( \ + &devices[(device_idx)])[(cur_grp)]; \ + if ((pecb)) { \ + for ((idx) = ECB_operations_register_start( \ + pecb, (operation)); \ + (idx) < \ + (ECB_operations_register_start(pecb, \ + (operation)) + \ + ECB_operations_register_len(pecb, (operation))); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_REG_UNC_OPERATION \ + } \ + } \ + } + +#define FOR_EACH_NONEVENT_REG(pecb, idx) \ + { \ + U32 idx; \ + U32 this_cpu__ = CONTROL_THIS_CPU(); \ + CPU_STATE pcpu__ = &pcb[this_cpu__]; \ + U32 dev_idx = core_to_dev_map[this_cpu__]; \ + U32 cur_grp = CPU_STATE_current_group(pcpu__); \ + ECB pecb = LWPMU_DEVICE_PMU_register_data( \ + &devices[dev_idx])[cur_grp]; \ + if ((pecb)) { \ + for ((idx) = ECB_metric_start(pecb); \ + (idx) < \ + ECB_metric_start(pecb) + ECB_metric_pop(pecb); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_NONEVENT_REG \ + } \ + } \ + } + +#define FOR_EACH_REG_CORE_OPERATION(pecb, idx, operation) \ + { \ + U32 idx; \ + U32 this_cpu__ = CONTROL_THIS_CPU(); \ + CPU_STATE pcpu__ = &pcb[this_cpu__]; \ + U32 cur_grp = CPU_STATE_current_group(pcpu__); \ + U32 dev_idx = core_to_dev_map[this_cpu__]; \ + ECB pecb = LWPMU_DEVICE_PMU_register_data( \ + &devices[dev_idx])[cur_grp]; \ + if ((pecb)) { \ + for ((idx) = ECB_operations_register_start( \ + pecb, (operation)); \ + (idx) < \ + (ECB_operations_register_start(pecb, \ + (operation)) + \ + ECB_operations_register_len(pecb, (operation))); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_REG_CORE_OPERATION \ + } \ + } \ + } + +#define ECB_SECTION_REG_INDEX(pecb, idx, operation) \ + (ECB_operations_register_start(pecb, operation) + (idx)) + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/drivers/platform/x86/sepdk/inc/eventmux.h b/drivers/platform/x86/sepdk/inc/eventmux.h new file mode 100644 index 0000000000000..4a96bb18ae85d --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/eventmux.h @@ -0,0 +1,42 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +/* + * cvs_id[] = "$Id$" + */ + +#ifndef _EVENTMUX_H_ +#define _EVENTMUX_H_ + +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_types.h" + +extern VOID EVENTMUX_Start(void); + +extern VOID EVENTMUX_Initialize(void); + +extern VOID EVENTMUX_Destroy(void); + +#endif /* _EVENTMUX_H_ */ diff --git a/drivers/platform/x86/sepdk/inc/gfx.h b/drivers/platform/x86/sepdk/inc/gfx.h new file mode 100644 index 0000000000000..2bad4d7125270 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/gfx.h @@ -0,0 +1,39 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _GFX_H_ +#define _GFX_H_ + +#include "lwpmudrv_ioctl.h" + +extern OS_STATUS GFX_Read(S8 * buffer); + +extern OS_STATUS GFX_Set_Event_Code(IOCTL_ARGS arg); + +extern OS_STATUS GFX_Start(void); + +extern OS_STATUS GFX_Stop(void); + +#endif diff --git a/drivers/platform/x86/sepdk/inc/gmch.h b/drivers/platform/x86/sepdk/inc/gmch.h new file mode 100644 index 0000000000000..baa35728c4bf0 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/gmch.h @@ -0,0 +1,31 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _GMCH_H_ +#define _GMCH_H_ + +extern CS_DISPATCH_NODE gmch_dispatch; + +#endif diff --git a/drivers/platform/x86/sepdk/inc/haswellunc_sa.h b/drivers/platform/x86/sepdk/inc/haswellunc_sa.h new file mode 100644 index 0000000000000..bd4fb6887d0cb --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/haswellunc_sa.h @@ -0,0 +1,57 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _HSWUNC_SA_H_INC_ +#define _HSWUNC_SA_H_INC_ + +/* + * Local to this architecture: Haswell uncore SA unit + * + */ +#define HSWUNC_SA_DESKTOP_DID 0x000C04 +#define HSWUNC_SA_NEXT_ADDR_OFFSET 4 +#define HSWUNC_SA_BAR_ADDR_SHIFT 32 +#define HSWUNC_SA_BAR_ADDR_MASK 0x0007FFFFFF000LL +#define HSWUNC_SA_MAX_PCI_DEVICES 16 +#define HSWUNC_SA_MAX_COUNT 0x00000000FFFFFFFFLL +#define HSWUNC_SA_MAX_COUNTERS 8 + +#define HSWUNC_SA_MCHBAR_MMIO_PAGE_SIZE (8 * 4096) +#define HSWUNC_SA_PCIEXBAR_MMIO_PAGE_SIZE (57 * 4096) +#define HSWUNC_SA_OTHER_BAR_MMIO_PAGE_SIZE 4096 +#define HSWUNC_SA_GDXCBAR_OFFSET_LO 0x5420 +#define HSWUNC_SA_GDXCBAR_OFFSET_HI 0x5424 +#define HSWUNC_SA_GDXCBAR_MASK 0x7FFFFFF000LL +#define HSWUNC_SA_CHAP_SAMPLE_DATA 0x00020000 +#define HSWUNC_SA_CHAP_STOP 0x00040000 +#define HSWUNC_SA_CHAP_CTRL_REG_OFFSET 0x0 + +#define HSWUNC_SA_PAGE_MASK 0xfffffffffffff000 +#define HSWUNC_SA_PAGE_OFFSET_MASK 0xfff +#define HSWUNC_SA_PAGE_SIZE 0x1000 + +extern DISPATCH_NODE hswunc_sa_dispatch; + +#endif diff --git a/drivers/platform/x86/sepdk/inc/jkt_unc_ha.h b/drivers/platform/x86/sepdk/inc/jkt_unc_ha.h new file mode 100644 index 0000000000000..aa6bf76240755 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/jkt_unc_ha.h @@ -0,0 +1,37 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _JKTUNC_HA_H_INC_ +#define _JKTUNC_HA_H_INC_ + +#define JKTUNC_HA_DID 0x3C46 +#define JKTUNC_HA_DEVICE_NO 14 +#define JKTUNC_HA_FUNC_NO 1 +#define JKTUNC_HA_D2C_OFFSET 0x84 +#define JKTUNC_HA_D2C_BITMASK 0x00000002 +#define JKTUNC_HA_D2C_DID 0x3CA0 +#define JKTUNC_HA_D2C_FUNC_NO 0 + +#endif diff --git a/drivers/platform/x86/sepdk/inc/jkt_unc_qpill.h b/drivers/platform/x86/sepdk/inc/jkt_unc_qpill.h new file mode 100644 index 0000000000000..debde4d482526 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/jkt_unc_qpill.h @@ -0,0 +1,64 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + + +#ifndef _JKTUNC_QPILL_H_INC_ +#define _JKTUNC_QPILL_H_INC_ + +/* + * Local to this architecture: JKT uncore QPILL unit + * + */ +#define JKTUNC_QPILL0_DID 0x3C41 + // --- QPILL0 PerfMon DID --- B:D 1:8:2 +#define JKTUNC_QPILL_MM0_DID 0x3C86 + // --- QPILL0 PerfMon MM Config DID --- B:D 1:8:6 +#define JKTUNC_QPILL1_DID 0x3C42 + // --- QPILL1 PerfMon DID --- B:D 1:9:2 +#define JKTUNC_QPILL2_DID 0x3C44 + // --- QPILL0 PerfMon DID --- B:D 1:8:2 +#define JKTUNC_QPILL3_DID 0x3C45 + // --- QPILL0 PerfMon DID --- B:D 1:8:2 +#define JKTUNC_QPILL_MM1_DID 0x3C96 + // --- QPILL1 PerfMon MM Config DID --- B:D 1:9:6 +#define JKTUNC_QPILL_MCFG_DID 0x3C28 + // --- QPILL1 PerfMon MCFG DID --- B:D 0:5:0 +#define JKTUNC_QPILL0_D2C_DID 0x3C80 + // --- D2C QPILL Port 1 config DID B:D:F X:8:0 +#define JKTUNC_QPILL1_D2C_DID 0x3C90 + // --- D2C QPILL Port 2 config DID B:D:F X:9:0 + +#define JKTUNC_QPILL_PERF_GLOBAL_CTRL 0x391 + +#define IA32_DEBUG_CTRL 0x1D9 + +#define JKTUNC_QPILL_D2C_OFFSET 0x80 +#define JKTUNC_QPILL_D2C_BITMASK 0x00000002 +#define JKTUNC_QPILL_FUNC_NO 2 +#define JKTUNC_QPILL_D2C_FUNC_NO 0 + +extern DISPATCH_NODE jktunc_qpill_dispatch; + +#endif diff --git a/drivers/platform/x86/sepdk/inc/linuxos.h b/drivers/platform/x86/sepdk/inc/linuxos.h new file mode 100644 index 0000000000000..3e4c2c96476f0 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/linuxos.h @@ -0,0 +1,79 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _LINUXOS_H_ +#define _LINUXOS_H_ + +// defines for options parameter of samp_load_image_notify_routine() +#define LOPTS_1ST_MODREC 0x1 +#define LOPTS_GLOBAL_MODULE 0x2 +#define LOPTS_EXE 0x4 + +#define FOR_EACH_TASK for_each_process +#if KERNEL_VERSION(3, 19, 00) <= LINUX_VERSION_CODE +#define DRV_F_DENTRY f_path.dentry +#else +#define DRV_F_DENTRY f_dentry +#endif + +#if KERNEL_VERSION(2, 6, 25) > LINUX_VERSION_CODE +#define D_PATH(vm_file, name, maxlen) \ + d_path((vm_file)->f_dentry, (vm_file)->f_vfsmnt, (name), (maxlen)) +#else +#define D_PATH(vm_file, name, maxlen) \ + d_path(&((vm_file)->f_path), (name), (maxlen)) +#endif + +#if KERNEL_VERSION(3, 7, 0) > LINUX_VERSION_CODE +#define DRV_VM_MOD_EXECUTABLE(vma) (vma->vm_flags & VM_EXECUTABLE) +#else +#define DRV_VM_MOD_EXECUTABLE(vma) (linuxos_Equal_VM_Exe_File(vma)) +#define DRV_MM_EXE_FILE_PRESENT +#endif + +#if KERNEL_VERSION(2, 6, 32) <= LINUX_VERSION_CODE +#define DRV_ALLOW_VDSO +#endif + +#if defined(DRV_IA32) +#define FIND_VMA(mm, data) find_vma((mm), (U32)(data)) +#endif +#if defined(DRV_EM64T) +#define FIND_VMA(mm, data) find_vma((mm), (U64)(data)) +#endif + +extern VOID LINUXOS_Install_Hooks(void); + +extern VOID LINUXOS_Uninstall_Hooks(void); + +extern OS_STATUS LINUXOS_Enum_Process_Modules(DRV_BOOL at_end); + +extern DRV_BOOL LINUXOS_Check_KVM_Guest_Process(void); +#if defined(DRV_CPU_HOTPLUG) +extern VOID LINUXOS_Register_Hotplug(void); + +extern VOID LINUXOS_Unregister_Hotplug(void); +#endif +#endif diff --git a/drivers/platform/x86/sepdk/inc/lwpmudrv.h b/drivers/platform/x86/sepdk/inc/lwpmudrv.h new file mode 100644 index 0000000000000..d682ab6321272 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/lwpmudrv.h @@ -0,0 +1,556 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _LWPMUDRV_H_ +#define _LWPMUDRV_H_ + +#include +#include +#include +#if KERNEL_VERSION(4, 12, 0) > LINUX_VERSION_CODE +#include +#else +#include +#endif +#include + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_version.h" +#include "lwpmudrv_struct.h" +#include "pebs.h" +#if defined(BUILD_CHIPSET) +#include "lwpmudrv_chipset.h" +#endif + +#if defined(DRV_SEP_ACRN_ON) +#include +#include +#endif + +#if defined(X86_FEATURE_KAISER) || defined(CONFIG_KAISER) || \ + defined(KAISER_HEADER_PRESENT) +#define DRV_USE_KAISER +#elif defined(X86_FEATURE_PTI) +#define DRV_USE_PTI +#endif + +/* + * Print macros for driver messages + */ + +#if defined(MYDEBUG) +#define SEP_PRINT_DEBUG(fmt, args...) \ + { \ + printk(KERN_INFO SEP_MSG_PREFIX " [DEBUG] " fmt, ##args); \ + } +#else +#define SEP_PRINT_DEBUG(fmt, args...) \ + { \ + ; \ + } +#endif + +#define SEP_PRINT(fmt, args...) \ + { \ + printk(KERN_INFO SEP_MSG_PREFIX " " fmt, ##args); \ + } + +#define SEP_PRINT_WARNING(fmt, args...) \ + { \ + printk(KERN_ALERT SEP_MSG_PREFIX " [Warning] " fmt, ##args); \ + } + +#define SEP_PRINT_ERROR(fmt, args...) \ + { \ + printk(KERN_CRIT SEP_MSG_PREFIX " [ERROR] " fmt, ##args); \ + } + +// Macro to return the thread group id +#define GET_CURRENT_TGID() (current->tgid) + +#define OVERFLOW_ARGS U64 *, U64 * + +typedef struct DRV_EVENT_MASK_NODE_S DRV_EVENT_MASK_NODE; +typedef DRV_EVENT_MASK_NODE * DRV_EVENT_MASK; + +struct DRV_EVENT_MASK_NODE_S { + U16 event_idx; // 0 <= index < MAX_EVENTS + U16 reserved1; + union { + U32 bitFields1; + struct { + U32 precise : 1; + U32 lbr_capture : 1; + U32 dear_capture : 1; + // Indicates which events need to have additional + // registers read because they are DEAR events. + U32 iear_capture : 1; + // Indicates which events need to have additional + // registers read because they are IEAR events. + U32 btb_capture : 1; + // Indicates which events need to have additional + // registers read because they are BTB events. + U32 ipear_capture : 1; + // Indicates which events need to have additional + // registers read because they are IPEAR events. + U32 uncore_capture : 1; + U32 branch : 1; + // Whether event is related to branch operation or not + U32 perf_metrics_capture : 1; + // Whether the event is related to perf_metrics or not + U32 reserved : 23; + } s1; + } u1; +}; + +#define DRV_EVENT_MASK_event_idx(d) ((d)->event_idx) +#define DRV_EVENT_MASK_bitFields1(d) ((d)->u1.bitFields1) +#define DRV_EVENT_MASK_precise(d) ((d)->u1.s1.precise) +#define DRV_EVENT_MASK_lbr_capture(d) ((d)->u1.s1.lbr_capture) +#define DRV_EVENT_MASK_dear_capture(d) ((d)->u1.s1.dear_capture) +#define DRV_EVENT_MASK_iear_capture(d) ((d)->u1.s1.iear_capture) +#define DRV_EVENT_MASK_btb_capture(d) ((d)->u1.s1.btb_capture) +#define DRV_EVENT_MASK_ipear_capture(d) ((d)->u1.s1.ipear_capture) +#define DRV_EVENT_MASK_uncore_capture(d) ((d)->u1.s1.uncore_capture) +#define DRV_EVENT_MASK_branch(d) ((d)->u1.s1.branch) +#define DRV_EVENT_MASK_perf_metrics_capture(d) \ + ((d)->u1.s1.perf_metrics_capture) + +#define MAX_OVERFLOW_EVENTS 16 +/* This defines the maximum number of overflow events per interrupt. \ + * In order to reduce memory footprint, the value should be at least \ + * the number of fixed and general PMU registers. \ + * Sandybridge with HT off has 11 PMUs(3 fixed and 8 generic) + */ + +typedef struct DRV_MASKS_NODE_S DRV_MASKS_NODE; +typedef DRV_MASKS_NODE * DRV_MASKS; + +/* + * @macro DRV_EVENT_MASK_NODE_S + * @brief + * The structure is used to store overflow events when handling PMU interrupt. + * This approach should be more efficient than checking all event masks + * if there are many events to be monitored + * and only a few events among them have overflow per interrupt. + */ +struct DRV_MASKS_NODE_S { + DRV_EVENT_MASK_NODE eventmasks[MAX_OVERFLOW_EVENTS]; + U8 masks_num; // 0 <= mask_num <= MAX_OVERFLOW_EVENTS +}; + +#define DRV_MASKS_masks_num(d) ((d)->masks_num) +#define DRV_MASKS_eventmasks(d) ((d)->eventmasks) + +/* + * Dispatch table for virtualized functions. + * Used to enable common functionality for different + * processor microarchitectures + */ +typedef struct DISPATCH_NODE_S DISPATCH_NODE; +typedef DISPATCH_NODE *DISPATCH; + +struct DISPATCH_NODE_S { + VOID (*init)(PVOID); + VOID (*fini)(PVOID); + VOID (*write)(PVOID); + VOID (*freeze)(PVOID); + VOID (*restart)(PVOID); + VOID (*read_data)(PVOID); + VOID (*check_overflow)(DRV_MASKS); + VOID (*swap_group)(DRV_BOOL); + U64 (*read_lbrs)(PVOID, PVOID); + VOID (*cleanup)(PVOID); + VOID (*hw_errata)(void); + VOID (*read_power)(PVOID); + U64 (*check_overflow_errata)(ECB, U32, U64); + VOID (*read_counts)(PVOID, U32); + U64 (*check_overflow_gp_errata)(ECB, U64 *); + VOID (*read_ro)(PVOID, U32, U32); + VOID (*platform_info)(PVOID); + VOID (*trigger_read)(PVOID, U32); + // Counter reads triggered/initiated by User mode timer + VOID (*scan_for_uncore)(PVOID); + VOID (*read_metrics)(PVOID); +}; + +#if defined(BUILD_CHIPSET) +/* + * Dispatch table for virtualized functions. + * Used to enable common functionality for different + * chipset types + */ +typedef struct CS_DISPATCH_NODE_S CS_DISPATCH_NODE; +typedef CS_DISPATCH_NODE *CS_DISPATCH; +struct CS_DISPATCH_NODE_S { + U32 (*init_chipset)(void); + // initialize chipset (must be called before the others!) + VOID (*start_chipset)(void); // start the chipset counters + VOID (*read_counters)(PVOID); + // at interrupt time, read out the chipset counters + VOID (*stop_chipset)(void); // stop the chipset counters + VOID (*fini_chipset)(void); + // clean up resources and reset chipset state (called last) + VOID (*Trigger_Read)(void); + // GMCH counter reads triggered/initiated by User mode timer +}; +extern CS_DISPATCH cs_dispatch; +#endif + +/* + * global declarations + */ + +extern VOID **PMU_register_data; +extern VOID **desc_data; +extern U64 *prev_counter_data; +extern U64 *read_counter_info; +extern U64 total_ram; +extern U32 output_buffer_size; +extern U32 saved_buffer_size; +extern uid_t uid; +extern DRV_CONFIG drv_cfg; +extern volatile pid_t control_pid; +extern U64 *interrupt_counts; +extern EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; + +extern DRV_BOOL multi_pebs_enabled; +extern DRV_BOOL unc_buf_init; + +extern DRV_SETUP_INFO_NODE req_drv_setup_info; + + +/* needed for target agent support */ +extern U32 osid; +extern DRV_BOOL sched_switch_enabled; + +#if defined(BUILD_CHIPSET) +extern CHIPSET_CONFIG pma; +#endif + +extern UNCORE_TOPOLOGY_INFO_NODE uncore_topology; +extern PLATFORM_TOPOLOGY_PROG_NODE platform_topology_prog_node; +extern wait_queue_head_t wait_exit; +/* + * end of declarations + */ + +/*! + * @struct LWPMU_DEVICE_NODE_S + * @brief Struct to hold fields per device + * PMU_register_data_unc - MSR info + * dispatch_unc - dispatch table + * em_groups_counts_unc - # groups + * pcfg_unc - config struct + */ +typedef struct LWPMU_DEVICE_NODE_S LWPMU_DEVICE_NODE; +typedef LWPMU_DEVICE_NODE * LWPMU_DEVICE; + +struct LWPMU_DEVICE_NODE_S { + VOID **PMU_register_data; + DISPATCH dispatch; + S32 em_groups_count; + VOID *pcfg; + U64 **unc_prev_value; + U64 ***unc_acc_value; + U64 counter_mask; + U64 num_events; + U32 num_units; + VOID *ec; + S32 *cur_group; + S32 pci_dev_node_index; + U32 device_type; + LBR lbr; + PWR pwr; + PEBS_INFO_NODE pebs_info_node; +}; + +#define LWPMU_DEVICE_PMU_register_data(dev) ((dev)->PMU_register_data) +#define LWPMU_DEVICE_dispatch(dev) ((dev)->dispatch) +#define LWPMU_DEVICE_em_groups_count(dev) ((dev)->em_groups_count) +#define LWPMU_DEVICE_pcfg(dev) ((dev)->pcfg) +#define LWPMU_DEVICE_prev_value(dev) ((dev)->unc_prev_value) +#define LWPMU_DEVICE_acc_value(dev) ((dev)->unc_acc_value) +#define LWPMU_DEVICE_counter_mask(dev) ((dev)->counter_mask) +#define LWPMU_DEVICE_num_events(dev) ((dev)->num_events) +#define LWPMU_DEVICE_num_units(dev) ((dev)->num_units) +#define LWPMU_DEVICE_ec(dev) ((dev)->ec) +#define LWPMU_DEVICE_cur_group(dev) ((dev)->cur_group) +#define LWPMU_DEVICE_pci_dev_node_index(dev) ((dev)->pci_dev_node_index) +#define LWPMU_DEVICE_device_type(dev) ((dev)->device_type) +#define LWPMU_DEVICE_lbr(dev) ((dev)->lbr) +#define LWPMU_DEVICE_pwr(dev) ((dev)->pwr) +#define LWPMU_DEVICE_pebs_dispatch(dev) ((dev)->pebs_info_node.pebs_dispatch) + +#define LWPMU_DEVICE_pebs_record_size(dev) \ + ((dev)->pebs_info_node.pebs_record_size) +#define LWPMU_DEVICE_apebs_basic_offset(dev) \ + ((dev)->pebs_info_node.apebs_basic_offset) +#define LWPMU_DEVICE_apebs_mem_offset(dev) \ + ((dev)->pebs_info_node.apebs_mem_offset) +#define LWPMU_DEVICE_apebs_gpr_offset(dev) \ + ((dev)->pebs_info_node.apebs_gpr_offset) +#define LWPMU_DEVICE_apebs_xmm_offset(dev) \ + ((dev)->pebs_info_node.apebs_xmm_offset) +#define LWPMU_DEVICE_apebs_lbr_offset(dev) \ + ((dev)->pebs_info_node.apebs_lbr_offset) + +extern U32 num_devices; +extern U32 cur_device; +extern LWPMU_DEVICE devices; +extern U64 *pmu_state; + +// Handy macro +#define TSC_SKEW(this_cpu) (cpu_tsc[this_cpu] - cpu_tsc[0]) + +/* + * The IDT / GDT descriptor for use in identifying code segments + */ +#if defined(DRV_EM64T) +#pragma pack(push, 1) +typedef struct _idtgdtDesc { + U16 idtgdt_limit; + PVOID idtgdt_base; +} IDTGDT_DESC; +#pragma pack(pop) + +extern IDTGDT_DESC gdt_desc; +#endif + +extern DRV_BOOL NMI_mode; +extern DRV_BOOL KVM_guest_mode; + +#if defined(DRV_SEP_ACRN_ON) +#define SBUF_MAX_SIZE (1ULL << 22) +#define SBUF_HEAD_SIZE 64 + +#define TRACE_SBUF_SIZE (4 * 1024 * 1024) +#define TRACE_ELEMENT_SIZE 32 /* byte */ +#define TRACE_ELEMENT_NUM \ + ((TRACE_SBUF_SIZE - SBUF_HEAD_SIZE) / TRACE_ELEMENT_SIZE) + +#define COLLECTOR_SEP 0 +#define COLLECTOR_SOCWATCH 1 + +enum PROFILING_FEATURE { + CORE_PMU_SAMPLING = 0, + CORE_PMU_COUNTING, + PEBS_PMU_SAMPLING, + LBR_PMU_SAMPLING, + UNCORE_PMU_SAMPLING, + VM_SWITCH_TRACING, + // Add socwatch feature +}; + +enum sbuf_type { + ACRN_TRACE, + ACRN_HVLOG, + ACRN_SEP, + ACRN_SOCWATCH, + ACRN_SBUF_TYPE_MAX, +}; + +struct data_header { + int32_t collector_id; + uint16_t cpu_id; + uint16_t data_type; + uint64_t tsc; /* TSC */ + uint64_t payload_size; + uint64_t reserved; +} __aligned(32); + +#define PROFILING_DATA_HEADER_SIZE (sizeof(struct data_header)) + +struct core_pmu_sample { + /** context where PMI is triggered */ + uint32_t os_id; + /** the task id */ + uint32_t task_id; + /** instruction pointer */ + uint64_t rip; + /** the task name */ + char task[16]; + /** physical core ID */ + uint32_t cpu_id; + /** the process id */ + uint32_t process_id; + /** perf global status msr value (for overflow status) */ + uint64_t overflow_status; + /** rflags */ + uint32_t rflags; + /** code segment */ + uint32_t cs; +} __aligned(32); + +#define CORE_PMU_SAMPLE_SIZE (sizeof(struct core_pmu_sample)) + +#define NUM_LBR_ENTRY 32 + +struct lbr_pmu_sample { + /* LBR TOS */ + uint64_t lbr_tos; + /* LBR FROM IP */ + uint64_t lbr_from_ip[NUM_LBR_ENTRY]; + /* LBR TO IP */ + uint64_t lbr_to_ip[NUM_LBR_ENTRY]; + /* LBR info */ + uint64_t lbr_info[NUM_LBR_ENTRY]; +} __aligned(32); + +#define LBR_PMU_SAMPLE_SIZE (sizeof(struct lbr_pmu_sample)) + +struct pmu_sample { + /* core pmu sample */ + struct core_pmu_sample csample; + /* lbr pmu sample */ + struct lbr_pmu_sample lsample; +} __aligned(32); + +#define PMU_SAMPLE_SIZE (sizeof(struct pmu_sample)) + +struct vm_switch_trace { + uint64_t vmenter_tsc; + uint64_t vmexit_tsc; + uint64_t vmexit_reason; + int32_t os_id; +} __aligned(32); + +#define VM_SWITCH_TRACE_SIZE (sizeof(struct vm_switch_trace)) + +typedef struct shared_buf shared_buf_t; +typedef struct profiling_control profiling_control_t; +typedef struct data_header data_header_t; +typedef struct core_pmu_sample core_pmu_sample_t; +typedef struct vm_switch_trace vm_switch_trace_t; + +shared_buf_t *sbuf_allocate(uint32_t ele_num, uint32_t ele_size); +void sbuf_free(shared_buf_t *sbuf); +int sbuf_get(shared_buf_t *sbuf, uint8_t *data); +int sbuf_share_setup(uint32_t pcpu_id, uint32_t sbuf_id, shared_buf_t *sbuf); + +extern shared_buf_t **samp_buf_per_cpu; + +#define MAX_NR_VCPUS 4 +#define MAX_NR_VMS 4 +#define MAX_MSR_LIST_NUM 15 +#define MAX_GROUP_NUM 1 + +enum MSR_OP_STATUS { MSR_OP_READY = 0, MSR_OP_REQUESTED, MSR_OP_HANDLED }; + +enum MSR_OP_TYPE { + MSR_OP_NONE = 0, + MSR_OP_READ, + MSR_OP_WRITE, + MSR_OP_READ_CLEAR +}; + +enum PMU_MSR_TYPE { PMU_MSR_CCCR = 0, PMU_MSR_ESCR, PMU_MSR_DATA }; + +struct profiling_msr_op { + /* value to write or location to write into */ + uint64_t value; + /* MSR address to read/write; last entry will have value of -1 */ + uint32_t msr_id; + /* parameter; usage depends on operation */ + uint16_t param; + uint8_t op_type; + uint8_t reg_type; +}; + +struct profiling_msr_ops_list { + int32_t collector_id; + uint32_t num_entries; + int32_t msr_op_state; + struct profiling_msr_op entries[MAX_MSR_LIST_NUM]; +}; + +struct profiling_vcpu_pcpu_map { + int16_t vcpu_id; + int16_t pcpu_id; + uint32_t apic_id; +}; + +struct profiling_vm_info { + uint16_t vm_id; + u_char guid[16]; + char vm_name[16]; + uint16_t num_vcpus; + struct profiling_vcpu_pcpu_map cpu_map[MAX_NR_VCPUS]; +}; + +struct profiling_vm_info_list { + uint16_t num_vms; + struct profiling_vm_info vm_list[MAX_NR_VMS+1]; +}; + +struct profiling_version_info { + int32_t major; + int32_t minor; + int64_t supported_features; + int64_t reserved; +}; + +struct profiling_control { + int32_t collector_id; + int32_t reserved; + uint64_t switches; +}; + +struct profiling_pmi_config { + uint32_t num_groups; + uint32_t trigger_count; + struct profiling_msr_op initial_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM]; + struct profiling_msr_op start_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM]; + struct profiling_msr_op stop_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM]; + struct profiling_msr_op entry_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM]; + struct profiling_msr_op exit_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM]; +}; + +struct profiling_vmsw_config { + int32_t collector_id; + struct profiling_msr_op initial_list[MAX_MSR_LIST_NUM]; + struct profiling_msr_op entry_list[MAX_MSR_LIST_NUM]; + struct profiling_msr_op exit_list[MAX_MSR_LIST_NUM]; +}; + +struct profiling_pcpuid { + uint32_t leaf; + uint32_t subleaf; + uint32_t eax; + uint32_t ebx; + uint32_t ecx; + uint32_t edx; +}; + +struct profiling_status { + uint32_t samples_logged; + uint32_t samples_dropped; +}; + +#endif + +#endif diff --git a/drivers/platform/x86/sepdk/inc/msrdefs.h b/drivers/platform/x86/sepdk/inc/msrdefs.h new file mode 100644 index 0000000000000..40986ea111bb1 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/msrdefs.h @@ -0,0 +1,81 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _MSRDEFS_H_ +#define _MSRDEFS_H_ + +/* + * Arch Perf monitoring version 3 + */ +#define IA32_PMC0 0x0C1 +#define IA32_PMC1 0x0C2 +#define IA32_PMC2 0x0C3 +#define IA32_PMC3 0x0C4 +#define IA32_PMC4 0x0C5 +#define IA32_PMC5 0x0C6 +#define IA32_PMC6 0x0C7 +#define IA32_PMC7 0x0C8 +#define IA32_FULL_PMC0 0x4C1 +#define IA32_FULL_PMC1 0x4C2 +#define IA32_PERFEVTSEL0 0x186 +#define IA32_PERFEVTSEL1 0x187 +#define IA32_FIXED_CTR0 0x309 +#define IA32_FIXED_CTR1 0x30A +#define IA32_FIXED_CTR2 0x30B +#define IA32_FIXED_CTR3 0x30C +#define IA32_PERF_CAPABILITIES 0x345 +#define IA32_FIXED_CTRL 0x38D +#define IA32_PERF_GLOBAL_STATUS 0x38E +#define IA32_PERF_GLOBAL_CTRL 0x38F +#define IA32_PERF_GLOBAL_OVF_CTRL 0x390 +#define IA32_PEBS_ENABLE 0x3F1 +#define IA32_MISC_ENABLE 0x1A0 +#define IA32_DS_AREA 0x600 +#define IA32_DEBUG_CTRL 0x1D9 +#undef IA32_LBR_FILTER_SELECT +#define IA32_LBR_FILTER_SELECT 0x1c8 +#define IA32_PEBS_FRONTEND 0x3F7 +#define IA32_PERF_METRICS 0x329 + +#define COMPOUND_CTR_CTL 0x306 +#define COMPOUND_PERF_CTR 0x307 +#define COMPOUND_CTR_OVF_BIT 0x800 +#define COMPOUND_CTR_OVF_SHIFT 12 + +#define FIXED_CORE_CYCLE_GLOBAL_CTRL_MASK 0x200000000 +#define FIXED_CORE_CYCLE_FIXED_CTRL_MASK 0xF0 + +// REG INDEX inside GLOBAL CTRL SECTION +enum { GLOBAL_CTRL_REG_INDEX = 0, + GLOBAL_OVF_CTRL_REG_INDEX, + PEBS_ENABLE_REG_INDEX, + DEBUG_CTRL_REG_INDEX, + FIXED_CTRL_REG_INDEX, +}; + +// REG INDEX inside GLOBAL STATUS SECTION +enum { GLOBAL_STATUS_REG_INDEX = 0,}; + +#endif diff --git a/drivers/platform/x86/sepdk/inc/output.h b/drivers/platform/x86/sepdk/inc/output.h new file mode 100644 index 0000000000000..483e0b5fb5d5d --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/output.h @@ -0,0 +1,120 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _OUTPUT_H_ +#define _OUTPUT_H_ + +#include +#include + +/* + * Initial allocation + * Size of buffer = 512KB (2^19) + * number of buffers = 2 + * The max size of the buffer cannot exceed 1<<22 i.e. 4MB + */ +#define OUTPUT_SMALL_BUFFER (1 << 15) +#define OUTPUT_LARGE_BUFFER (1 << 19) +#define OUTPUT_CP_BUFFER (1 << 22) +#define OUTPUT_MEMORY_THRESHOLD 0x8000000 + +extern U32 output_buffer_size; +extern U32 saved_buffer_size; +#define OUTPUT_BUFFER_SIZE output_buffer_size +#define OUTPUT_NUM_BUFFERS 2 +#if defined(DRV_ANDROID) +#define MODULE_BUFF_SIZE 1 +#else +#define MODULE_BUFF_SIZE 2 +#endif + +/* + * Data type declarations and accessors macros + */ +typedef struct { + spinlock_t buffer_lock; + U32 remaining_buffer_size; + U32 current_buffer; + U32 total_buffer_size; + U32 next_buffer[OUTPUT_NUM_BUFFERS]; + U32 buffer_full[OUTPUT_NUM_BUFFERS]; + U8 *buffer[OUTPUT_NUM_BUFFERS]; + U32 signal_full; + DRV_BOOL tasklet_queued; +} OUTPUT_NODE, *OUTPUT; + +#define OUTPUT_buffer_lock(x) ((x)->buffer_lock) +#define OUTPUT_remaining_buffer_size(x) ((x)->remaining_buffer_size) +#define OUTPUT_total_buffer_size(x) ((x)->total_buffer_size) +#define OUTPUT_buffer(x, y) ((x)->buffer[(y)]) +#define OUTPUT_buffer_full(x, y) ((x)->buffer_full[(y)]) +#define OUTPUT_current_buffer(x) ((x)->current_buffer) +#define OUTPUT_signal_full(x) ((x)->signal_full) +#define OUTPUT_tasklet_queued(x) ((x)->tasklet_queued) +/* + * Add an array of control buffer for per-cpu + */ +typedef struct { + wait_queue_head_t queue; + OUTPUT_NODE outbuf; + U32 sample_count; +} BUFFER_DESC_NODE, *BUFFER_DESC; + +#define BUFFER_DESC_queue(a) ((a)->queue) +#define BUFFER_DESC_outbuf(a) ((a)->outbuf) +#define BUFFER_DESC_sample_count(a) ((a)->sample_count) + +extern BUFFER_DESC cpu_buf; // actually an array of BUFFER_DESC_NODE +extern BUFFER_DESC unc_buf; +extern BUFFER_DESC module_buf; +extern BUFFER_DESC cpu_sideband_buf; +/* + * Interface Functions + */ + +extern int OUTPUT_Module_Fill(PVOID data, U16 size, U8 in_notification); +extern OS_STATUS OUTPUT_Initialize(void); +extern OS_STATUS OUTPUT_Initialize_UNC(void); +extern void OUTPUT_Cleanup(void); +extern void OUTPUT_Cleanup(void); +extern int OUTPUT_Destroy(void); +extern int OUTPUT_Flush(void); + +extern ssize_t OUTPUT_Module_Read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos); + +extern ssize_t OUTPUT_Sample_Read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos); + +extern ssize_t OUTPUT_UncSample_Read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos); + +extern ssize_t OUTPUT_SidebandInfo_Read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos); + +extern void *OUTPUT_Reserve_Buffer_Space(BUFFER_DESC bd, U32 size, + DRV_BOOL defer, U8 in_notification, S32 cpu_idx); + +#endif diff --git a/drivers/platform/x86/sepdk/inc/pci.h b/drivers/platform/x86/sepdk/inc/pci.h new file mode 100644 index 0000000000000..44d5304d86a56 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/pci.h @@ -0,0 +1,133 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _PCI_H_ +#define _PCI_H_ + +#include "lwpmudrv_defines.h" + +/* + * PCI Config Address macros + */ +#define PCI_ENABLE 0x80000000 + +#define PCI_ADDR_IO 0xCF8 +#define PCI_DATA_IO 0xCFC + +#define BIT0 0x1 +#define BIT1 0x2 + +/* + * Macro for forming a PCI configuration address + */ +#define FORM_PCI_ADDR(bus, dev, fun, off) \ + (((PCI_ENABLE)) | ((bus & 0xFF) << 16) | ((dev & 0x1F) << 11) | \ + ((fun & 0x07) << 8) | ((off & 0xFF) << 0)) + +#define VENDOR_ID_MASK 0x0000FFFF +#define DEVICE_ID_MASK 0xFFFF0000 +#define DEVICE_ID_BITSHIFT 16 +#define LOWER_4_BYTES_MASK 0x00000000FFFFFFFF +#define MAX_BUSNO 256 +#define NEXT_ADDR_OFFSET 4 +#define NEXT_ADDR_SHIFT 32 +#define DRV_IS_PCI_VENDOR_ID_INTEL 0x8086 +#define MAX_PCI_DEVS 32 + +#define CONTINUE_IF_NOT_GENUINE_INTEL_DEVICE(value, vendor_id, device_id) \ + { \ + vendor_id = value & VENDOR_ID_MASK; \ + device_id = (value & DEVICE_ID_MASK) >> DEVICE_ID_BITSHIFT; \ + if (vendor_id != DRV_IS_PCI_VENDOR_ID_INTEL) { \ + continue; \ + } \ + } + +#define CHECK_IF_GENUINE_INTEL_DEVICE(value, vendor_id, device_id, valid) \ + { \ + vendor_id = value & VENDOR_ID_MASK; \ + device_id = (value & DEVICE_ID_MASK) >> DEVICE_ID_BITSHIFT; \ + valid = 1; \ + if (vendor_id != DRV_IS_PCI_VENDOR_ID_INTEL) { \ + valid = 0; \ + } \ + } + +typedef struct SEP_MMIO_NODE_S SEP_MMIO_NODE; + +struct SEP_MMIO_NODE_S { + U64 physical_address; + U64 virtual_address; + U64 map_token; + U32 size; +}; + +#define SEP_MMIO_NODE_physical_address(x) ((x)->physical_address) +#define SEP_MMIO_NODE_virtual_address(x) ((x)->virtual_address) +#define SEP_MMIO_NODE_map_token(x) ((x)->map_token) +#define SEP_MMIO_NODE_size(x) ((x)->size) + +extern OS_STATUS PCI_Map_Memory(SEP_MMIO_NODE *node, U64 phy_address, + U32 map_size); + +extern void PCI_Unmap_Memory(SEP_MMIO_NODE *node); + +extern int PCI_Read_From_Memory_Address(U32 addr, U32 *val); + +extern int PCI_Write_To_Memory_Address(U32 addr, U32 val); + +/*** UNIVERSAL PCI ACCESSORS ***/ + +extern VOID PCI_Initialize(void); + +extern U32 PCI_Read_U32(U32 bus, U32 device, U32 function, U32 offset); + +extern U32 PCI_Read_U32_Valid(U32 bus, U32 device, U32 function, U32 offset, + U32 invalid_value); + +extern U64 PCI_Read_U64(U32 bus, U32 device, U32 function, U32 offset); + +extern U64 PCI_Read_U64_Valid(U32 bus, U32 device, U32 function, U32 offset, + U64 invalid_value); + +extern U32 PCI_Write_U32(U32 bus, U32 device, U32 function, U32 offset, + U32 value); + +extern U32 PCI_Write_U64(U32 bus, U32 device, U32 function, U32 offset, + U64 value); + +/*** UNIVERSAL MMIO ACCESSORS ***/ + +extern U32 PCI_MMIO_Read_U32(U64 virtual_address_base, U32 offset); + +extern U64 PCI_MMIO_Read_U64(U64 virtual_address_base, U32 offset); + +extern void PCI_MMIO_Write_U32(U64 virtual_address_base, U32 offset, + U32 value); + +extern void PCI_MMIO_Write_U64(U64 virtual_address_base, U32 offset, + U64 value); + +#endif diff --git a/drivers/platform/x86/sepdk/inc/pebs.h b/drivers/platform/x86/sepdk/inc/pebs.h new file mode 100644 index 0000000000000..7a7bbe10e2ba2 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/pebs.h @@ -0,0 +1,494 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _PEBS_H_ +#define _PEBS_H_ + + +typedef struct PEBS_REC_NODE_S PEBS_REC_NODE; + +struct PEBS_REC_NODE_S { + U64 r_flags; // Offset 0x00 + U64 linear_ip; // Offset 0x08 + U64 rax; // Offset 0x10 + U64 rbx; // Offset 0x18 + U64 rcx; // Offset 0x20 + U64 rdx; // Offset 0x28 + U64 rsi; // Offset 0x30 + U64 rdi; // Offset 0x38 + U64 rbp; // Offset 0x40 + U64 rsp; // Offset 0x48 + U64 r8; // Offset 0x50 + U64 r9; // Offset 0x58 + U64 r10; // Offset 0x60 + U64 r11; // Offset 0x68 + U64 r12; // Offset 0x70 + U64 r13; // Offset 0x78 + U64 r14; // Offset 0x80 + U64 r15; // Offset 0x88 +}; + +typedef struct PEBS_REC_EXT_NODE_S PEBS_REC_EXT_NODE; +typedef PEBS_REC_EXT_NODE * PEBS_REC_EXT; +struct PEBS_REC_EXT_NODE_S { + PEBS_REC_NODE pebs_basic; // Offset 0x00 to 0x88 + U64 glob_perf_overflow; // Offset 0x90 + U64 data_linear_address; // Offset 0x98 + U64 data_source; // Offset 0xA0 + U64 latency; // Offset 0xA8 +}; + +#define PEBS_REC_EXT_r_flags(x) ((x)->pebs_basic.r_flags) +#define PEBS_REC_EXT_linear_ip(x) ((x)->pebs_basic.linear_ip) +#define PEBS_REC_EXT_rax(x) ((x)->pebs_basic.rax) +#define PEBS_REC_EXT_rbx(x) ((x)->pebs_basic.rbx) +#define PEBS_REC_EXT_rcx(x) ((x)->pebs_basic.rcx) +#define PEBS_REC_EXT_rdx(x) ((x)->pebs_basic.rdx) +#define PEBS_REC_EXT_rsi(x) ((x)->pebs_basic.rsi) +#define PEBS_REC_EXT_rdi(x) ((x)->pebs_basic.rdi) +#define PEBS_REC_EXT_rbp(x) ((x)->pebs_basic.rbp) +#define PEBS_REC_EXT_rsp(x) ((x)->pebs_basic.rsp) +#define PEBS_REC_EXT_r8(x) ((x)->pebs_basic.r8) +#define PEBS_REC_EXT_r9(x) ((x)->pebs_basic.r9) +#define PEBS_REC_EXT_r10(x) ((x)->pebs_basic.r10) +#define PEBS_REC_EXT_r11(x) ((x)->pebs_basic.r11) +#define PEBS_REC_EXT_r12(x) ((x)->pebs_basic.r12) +#define PEBS_REC_EXT_r13(x) ((x)->pebs_basic.r13) +#define PEBS_REC_EXT_r14(x) ((x)->pebs_basic.r14) +#define PEBS_REC_EXT_r15(x) ((x)->pebs_basic.r15) +#define PEBS_REC_EXT_glob_perf_overflow(x) ((x)->glob_perf_overflow) +#define PEBS_REC_EXT_data_linear_address(x) ((x)->data_linear_address) +#define PEBS_REC_EXT_data_source(x) ((x)->data_source) +#define PEBS_REC_EXT_latency(x) ((x)->latency) + +typedef struct PEBS_REC_EXT1_NODE_S PEBS_REC_EXT1_NODE; +typedef PEBS_REC_EXT1_NODE * PEBS_REC_EXT1; +struct PEBS_REC_EXT1_NODE_S { + PEBS_REC_EXT_NODE pebs_ext; + U64 eventing_ip; //Offset 0xB0 + U64 hle_info; //Offset 0xB8 +}; + +#define PEBS_REC_EXT1_r_flags(x) ((x)->pebs_ext.pebs_basic.r_flags) +#define PEBS_REC_EXT1_linear_ip(x) ((x)->pebs_ext.pebs_basic.linear_ip) +#define PEBS_REC_EXT1_rax(x) ((x)->pebs_ext.pebs_basic.rax) +#define PEBS_REC_EXT1_rbx(x) ((x)->pebs_ext.pebs_basic.rbx) +#define PEBS_REC_EXT1_rcx(x) ((x)->pebs_ext.pebs_basic.rcx) +#define PEBS_REC_EXT1_rdx(x) ((x)->pebs_ext.pebs_basic.rdx) +#define PEBS_REC_EXT1_rsi(x) ((x)->pebs_ext.pebs_basic.rsi) +#define PEBS_REC_EXT1_rdi(x) ((x)->pebs_ext.pebs_basic.rdi) +#define PEBS_REC_EXT1_rbp(x) ((x)->pebs_ext.pebs_basic.rbp) +#define PEBS_REC_EXT1_rsp(x) ((x)->pebs_ext.pebs_basic.rsp) +#define PEBS_REC_EXT1_r8(x) ((x)->pebs_ext.pebs_basic.r8) +#define PEBS_REC_EXT1_r9(x) ((x)->pebs_ext.pebs_basic.r9) +#define PEBS_REC_EXT1_r10(x) ((x)->pebs_ext.pebs_basic.r10) +#define PEBS_REC_EXT1_r11(x) ((x)->pebs_ext.pebs_basic.r11) +#define PEBS_REC_EXT1_r12(x) ((x)->pebs_ext.pebs_basic.r12) +#define PEBS_REC_EXT1_r13(x) ((x)->pebs_ext.pebs_basic.r13) +#define PEBS_REC_EXT1_r14(x) ((x)->pebs_ext.pebs_basic.r14) +#define PEBS_REC_EXT1_r15(x) ((x)->pebs_ext.pebs_basic.r15) +#define PEBS_REC_EXT1_glob_perf_overflow(x) ((x)->pebs_ext.glob_perf_overflow) +#define PEBS_REC_EXT1_data_linear_address(x) \ + ((x)->pebs_ext.data_linear_address) +#define PEBS_REC_EXT1_data_source(x) ((x)->pebs_ext.data_source) +#define PEBS_REC_EXT1_latency(x) ((x)->pebs_ext.latency) +#define PEBS_REC_EXT1_eventing_ip(x) ((x)->eventing_ip) +#define PEBS_REC_EXT1_hle_info(x) ((x)->hle_info) + +typedef struct PEBS_REC_EXT2_NODE_S PEBS_REC_EXT2_NODE; +typedef PEBS_REC_EXT2_NODE * PEBS_REC_EXT2; +struct PEBS_REC_EXT2_NODE_S { + PEBS_REC_EXT1_NODE pebs_ext1; + U64 tsc; //Offset 0xC0 +}; + +#define PEBS_REC_EXT2_r_flags(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.r_flags) +#define PEBS_REC_EXT2_linear_ip(x) \ + ((x)->pebs_ext1->pebs_ext.pebs_basic.linear_ip) +#define PEBS_REC_EXT2_rax(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.rax) +#define PEBS_REC_EXT2_rbx(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.rbx) +#define PEBS_REC_EXT2_rcx(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.rcx) +#define PEBS_REC_EXT2_rdx(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.rdx) +#define PEBS_REC_EXT2_rsi(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.rsi) +#define PEBS_REC_EXT2_rdi(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.rdi) +#define PEBS_REC_EXT2_rbp(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.rbp) +#define PEBS_REC_EXT2_rsp(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.rsp) +#define PEBS_REC_EXT2_r8(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.r8) +#define PEBS_REC_EXT2_r9(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.r9) +#define PEBS_REC_EXT2_r10(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.r10) +#define PEBS_REC_EXT2_r11(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.r11) +#define PEBS_REC_EXT2_r12(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.r12) +#define PEBS_REC_EXT2_r13(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.r13) +#define PEBS_REC_EXT2_r14(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.r14) +#define PEBS_REC_EXT2_r15(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.r15) +#define PEBS_REC_EXT2_glob_perf_overflow(x) \ + ((x)->pebs_ext1->pebs_ext.glob_perf_overflow) +#define PEBS_REC_EXT2_data_linear_address(x) \ + ((x)->pebs_ext1->pebs_ext.data_linear_address) +#define PEBS_REC_EXT2_data_source(x) ((x)->pebs_ext1->pebs_ext.data_source) +#define PEBS_REC_EXT2_latency(x) ((x)->pebs_ext1->pebs_ext.latency) +#define PEBS_REC_EXT2_eventing_ip(x) ((x)->pebs_ext1->eventing_ip) +#define PEBS_REC_EXT2_hle_info(x) ((x)->pebs_ext1->hle_info) +#define PEBS_REC_EXT2_tsc(x) ((x)->tsc) + +typedef struct APEBS_CONFIG_NODE_S APEBS_CONFIG_NODE; +typedef APEBS_CONFIG_NODE * APEBS_CONFIG; + +struct APEBS_CONFIG_NODE_S { + U8 apebs_enabled; + U8 collect_mem; + U8 collect_gpr; + U8 collect_xmm; + U8 collect_lbrs; + U8 precise_ip_lbrs; + U8 num_lbr_entries; + U16 basic_offset; + U16 mem_offset; + U16 gpr_offset; + U16 xmm_offset; + U16 lbr_offset; +}; + +#define APEBS_CONFIG_apebs_enabled(x) ((x)->apebs_enabled) +#define APEBS_CONFIG_collect_mem(x) ((x)->collect_mem) +#define APEBS_CONFIG_collect_gpr(x) ((x)->collect_gpr) +#define APEBS_CONFIG_collect_xmm(x) ((x)->collect_xmm) +#define APEBS_CONFIG_collect_lbrs(x) ((x)->collect_lbrs) +#define APEBS_CONFIG_precise_ip_lbrs(x) ((x)->precise_ip_lbrs) +#define APEBS_CONFIG_num_lbr_entries(x) ((x)->num_lbr_entries) +#define APEBS_CONFIG_basic_offset(x) ((x)->basic_offset) +#define APEBS_CONFIG_mem_offset(x) ((x)->mem_offset) +#define APEBS_CONFIG_gpr_offset(x) ((x)->gpr_offset) +#define APEBS_CONFIG_xmm_offset(x) ((x)->xmm_offset) +#define APEBS_CONFIG_lbr_offset(x) ((x)->lbr_offset) + +typedef struct ADAPTIVE_PEBS_BASIC_INFO_NODE_S ADAPTIVE_PEBS_BASIC_INFO_NODE; +typedef ADAPTIVE_PEBS_BASIC_INFO_NODE * ADAPTIVE_PEBS_BASIC_INFO; + +struct ADAPTIVE_PEBS_BASIC_INFO_NODE_S { + U64 record_info; // Offset 0x0 + // [47:0] - record format, [63:48] - record size + U64 eventing_ip; // Offset 0x8 + U64 applicable_counters; // Offset 0x10 + U64 tsc; // Offset 0x18 +}; + +#define ADAPTIVE_PEBS_BASIC_INFO_record_info(x) ((x)->record_info) +#define ADAPTIVE_PEBS_BASIC_INFO_eventing_ip(x) ((x)->eventing_ip) +#define ADAPTIVE_PEBS_BASIC_INFO_tsc(x) ((x)->tsc) +#define ADAPTIVE_PEBS_BASIC_INFO_applicable_counters(x) \ + ((x)->applicable_counters) + +typedef struct ADAPTIVE_PEBS_MEM_INFO_NODE_S ADAPTIVE_PEBS_MEM_INFO_NODE; +typedef ADAPTIVE_PEBS_MEM_INFO_NODE * ADAPTIVE_PEBS_MEM_INFO; + +struct ADAPTIVE_PEBS_MEM_INFO_NODE_S { + U64 data_linear_address; // Offset 0x20 + U64 data_source; // Offset 0x28 + U64 latency; // Offset 0x30 + U64 hle_info; // Offset 0x38 +}; + +#define ADAPTIVE_PEBS_MEM_INFO_data_linear_address(x) ((x)->data_linear_address) +#define ADAPTIVE_PEBS_MEM_INFO_data_source(x) ((x)->data_source) +#define ADAPTIVE_PEBS_MEM_INFO_latency(x) ((x)->latency) +#define ADAPTIVE_PEBS_MEM_INFO_hle_info(x) ((x)->hle_info) + +typedef struct ADAPTIVE_PEBS_GPR_INFO_NODE_S ADAPTIVE_PEBS_GPR_INFO_NODE; +typedef ADAPTIVE_PEBS_GPR_INFO_NODE * ADAPTIVE_PEBS_GPR_INFO; + +struct ADAPTIVE_PEBS_GPR_INFO_NODE_S { + U64 rflags; // Offset 0x40 + U64 rip; // Offset 0x48 + U64 rax; // Offset 0x50 + U64 rcx; // Offset 0x58 + U64 rdx; // Offset 0x60 + U64 rbx; // Offset 0x68 + U64 rsp; // Offset 0x70 + U64 rbp; // Offset 0x78 + U64 rsi; // Offset 0x80 + U64 rdi; // Offset 0x88 + U64 r8; // Offset 0x90 + U64 r9; // Offset 0x98 + U64 r10; // Offset 0xA0 + U64 r11; // Offset 0xA8 + U64 r12; // Offset 0xB0 + U64 r13; // Offset 0xB8 + U64 r14; // Offset 0xC0 + U64 r15; // Offset 0xC8 +}; + +#define ADAPTIVE_PEBS_GPR_INFO_rflags(x) ((x)->rflags) +#define ADAPTIVE_PEBS_GPR_INFO_rip(x) ((x)->rip) +#define ADAPTIVE_PEBS_GPR_INFO_rax(x) ((x)->rax) +#define ADAPTIVE_PEBS_GPR_INFO_rcx(x) ((x)->rcx) +#define ADAPTIVE_PEBS_GPR_INFO_rdx(x) ((x)->rdx) +#define ADAPTIVE_PEBS_GPR_INFO_rbx(x) ((x)->rbx) +#define ADAPTIVE_PEBS_GPR_INFO_rsp(x) ((x)->rsp) +#define ADAPTIVE_PEBS_GPR_INFO_rbp(x) ((x)->rbp) +#define ADAPTIVE_PEBS_GPR_INFO_rsi(x) ((x)->rsi) +#define ADAPTIVE_PEBS_GPR_INFO_rdi(x) ((x)->rdi) +#define ADAPTIVE_PEBS_GPR_INFO_r8(x) ((x)->r8) +#define ADAPTIVE_PEBS_GPR_INFO_r9(x) ((x)->r9) +#define ADAPTIVE_PEBS_GPR_INFO_r10(x) ((x)->r10) +#define ADAPTIVE_PEBS_GPR_INFO_r11(x) ((x)->r11) +#define ADAPTIVE_PEBS_GPR_INFO_r12(x) ((x)->r12) +#define ADAPTIVE_PEBS_GPR_INFO_r13(x) ((x)->r13) +#define ADAPTIVE_PEBS_GPR_INFO_r14(x) ((x)->r14) +#define ADAPTIVE_PEBS_GPR_INFO_r15(x) ((x)->r15) + +typedef struct ADAPTIVE_PEBS_XMM_INFO_NODE_S ADAPTIVE_PEBS_XMM_INFO_NODE; +typedef ADAPTIVE_PEBS_XMM_INFO_NODE * ADAPTIVE_PEBS_XMM_INFO; + +struct ADAPTIVE_PEBS_XMM_INFO_NODE_S { + U64 xmm0_l; // Offset 0xD0 + U64 xmm0_h; // Offset 0xD8 + U64 xmm1_l; // Offset 0xE0 + U64 xmm1_h; // Offset 0xE8 + U64 xmm2_l; // Offset 0xF0 + U64 xmm2_h; // Offset 0xF8 + U64 xmm3_l; // Offset 0x100 + U64 xmm3_h; // Offset 0x108 + U64 xmm4_l; // Offset 0x110 + U64 xmm4_h; // Offset 0x118 + U64 xmm5_l; // Offset 0x120 + U64 xmm5_h; // Offset 0x128 + U64 xmm6_l; // Offset 0x130 + U64 xmm6_h; // Offset 0x138 + U64 xmm7_l; // Offset 0x140 + U64 xmm7_h; // Offset 0x148 + U64 xmm8_l; // Offset 0x150 + U64 xmm8_h; // Offset 0x158 + U64 xmm9_l; // Offset 0x160 + U64 xmm9_h; // Offset 0x168 + U64 xmm10_l; // Offset 0x170 + U64 xmm10_h; // Offset 0x178 + U64 xmm11_l; // Offset 0x180 + U64 xmm11_h; // Offset 0x188 + U64 xmm12_l; // Offset 0x190 + U64 xmm12_h; // Offset 0x198 + U64 xmm13_l; // Offset 0x1A0 + U64 xmm13_h; // Offset 0x1A8 + U64 xmm14_l; // Offset 0x1B0 + U64 xmm14_h; // Offset 0x1B8 + U64 xmm15_l; // Offset 0x1C0 + U64 xmm15_h; // Offset 0x1C8 +}; + +#define ADAPTIVE_PEBS_XMM_INFO_xmm0_l(x) ((x)->xmm0_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm0_h(x) ((x)->xmm0_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm1_l(x) ((x)->xmm1_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm1_h(x) ((x)->xmm1_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm2_l(x) ((x)->xmm2_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm2_h(x) ((x)->xmm2_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm3_l(x) ((x)->xmm3_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm3_h(x) ((x)->xmm3_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm4_l(x) ((x)->xmm4_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm4_h(x) ((x)->xmm4_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm5_l(x) ((x)->xmm5_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm5_h(x) ((x)->xmm5_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm6_l(x) ((x)->xmm6_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm6_h(x) ((x)->xmm6_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm7_l(x) ((x)->xmm7_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm7_h(x) ((x)->xmm7_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm8_l(x) ((x)->xmm8_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm8_h(x) ((x)->xmm8_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm9_l(x) ((x)->xmm9_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm9_h(x) ((x)->xmm9_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm10_l(x) ((x)->xmm10_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm10_h(x) ((x)->xmm10_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm11_l(x) ((x)->xmm11_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm11_h(x) ((x)->xmm11_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm12_l(x) ((x)->xmm12_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm12_h(x) ((x)->xmm12_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm13_l(x) ((x)->xmm13_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm13_h(x) ((x)->xmm13_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm14_l(x) ((x)->xmm14_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm14_h(x) ((x)->xmm14_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm15_l(x) ((x)->xmm15_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm15_h(x) ((x)->xmm15_h) + +typedef struct ADAPTIVE_PEBS_LBR_INFO_NODE_S ADAPTIVE_PEBS_LBR_INFO_NODE; +typedef ADAPTIVE_PEBS_LBR_INFO_NODE * ADAPTIVE_PEBS_LBR_INFO; + +struct ADAPTIVE_PEBS_LBR_INFO_NODE_S { + U64 lbr_from; // Offset 0x1D0 + U64 lbr_to; // Offset 0x1D8 + U64 lbr_info; // Offset 0x1E0 +}; + +#define ADAPTIVE_PEBS_LBR_INFO_lbr_from(x) ((x)->lbr_from) +#define ADAPTIVE_PEBS_LBR_INFO_lbr_to(x) ((x)->lbr_to) +#define ADAPTIVE_PEBS_LBR_INFO_lbr_info(x) ((x)->lbr_info) + +typedef struct LATENCY_INFO_NODE_S LATENCY_INFO_NODE; +typedef LATENCY_INFO_NODE * LATENCY_INFO; + +struct LATENCY_INFO_NODE_S { + U64 linear_address; + U64 data_source; + U64 latency; + U64 stack_pointer; + U64 phys_addr; +}; + +#define LATENCY_INFO_linear_address(x) ((x)->linear_address) +#define LATENCY_INFO_data_source(x) ((x)->data_source) +#define LATENCY_INFO_latency(x) ((x)->latency) +#define LATENCY_INFO_stack_pointer(x) ((x)->stack_pointer) +#define LATENCY_INFO_phys_addr(x) ((x)->phys_addr) + +typedef struct DTS_BUFFER_EXT_NODE_S DTS_BUFFER_EXT_NODE; +typedef DTS_BUFFER_EXT_NODE * DTS_BUFFER_EXT; +struct DTS_BUFFER_EXT_NODE_S { + U64 base; // Offset 0x00 + U64 index; // Offset 0x08 + U64 max; // Offset 0x10 + U64 threshold; // Offset 0x18 + U64 pebs_base; // Offset 0x20 + U64 pebs_index; // Offset 0x28 + U64 pebs_max; // Offset 0x30 + U64 pebs_threshold; // Offset 0x38 + U64 counter_reset0; // Offset 0x40 + U64 counter_reset1; // Offset 0x48 + U64 counter_reset2; // Offset 0x50 + U64 counter_reset3; +}; + +#define DTS_BUFFER_EXT_base(x) ((x)->base) +#define DTS_BUFFER_EXT_index(x) ((x)->index) +#define DTS_BUFFER_EXT_max(x) ((x)->max) +#define DTS_BUFFER_EXT_threshold(x) ((x)->threshold) +#define DTS_BUFFER_EXT_pebs_base(x) ((x)->pebs_base) +#define DTS_BUFFER_EXT_pebs_index(x) ((x)->pebs_index) +#define DTS_BUFFER_EXT_pebs_max(x) ((x)->pebs_max) +#define DTS_BUFFER_EXT_pebs_threshold(x) ((x)->pebs_threshold) +#define DTS_BUFFER_EXT_counter_reset0(x) ((x)->counter_reset0) +#define DTS_BUFFER_EXT_counter_reset1(x) ((x)->counter_reset1) +#define DTS_BUFFER_EXT_counter_reset2(x) ((x)->counter_reset2) +#define DTS_BUFFER_EXT_counter_reset3(x) ((x)->counter_reset3) + +typedef struct DTS_BUFFER_EXT1_NODE_S DTS_BUFFER_EXT1_NODE; +typedef DTS_BUFFER_EXT1_NODE * DTS_BUFFER_EXT1; +struct DTS_BUFFER_EXT1_NODE_S { + DTS_BUFFER_EXT_NODE dts_buffer; + U64 counter_reset4; // Offset 0x60 + U64 counter_reset5; // Offset 0x68 + U64 counter_reset6; // Offset 0x70 + U64 counter_reset7; // Offset 0x78 + U64 fixed_counter_reset0; // Offset 0x80 + U64 fixed_counter_reset1; // Offset 0x88 + U64 fixed_counter_reset2; // Offset 0x90 + U64 fixed_counter_reset3; // Offset 0x98 +}; + +#define DTS_BUFFER_EXT1_base(x) ((x)->dts_buffer.base) +#define DTS_BUFFER_EXT1_index(x) ((x)->dts_buffer.index) +#define DTS_BUFFER_EXT1_max(x) ((x)->dts_buffer.max) +#define DTS_BUFFER_EXT1_threshold(x) ((x)->dts_buffer.threshold) +#define DTS_BUFFER_EXT1_pebs_base(x) ((x)->dts_buffer.pebs_base) +#define DTS_BUFFER_EXT1_pebs_index(x) ((x)->dts_buffer.pebs_index) +#define DTS_BUFFER_EXT1_pebs_max(x) ((x)->dts_buffer.pebs_max) +#define DTS_BUFFER_EXT1_pebs_threshold(x) ((x)->dts_buffer.pebs_threshold) +#define DTS_BUFFER_EXT1_counter_reset0(x) ((x)->dts_buffer.counter_reset0) +#define DTS_BUFFER_EXT1_counter_reset1(x) ((x)->dts_buffer.counter_reset1) +#define DTS_BUFFER_EXT1_counter_reset2(x) ((x)->dts_buffer.counter_reset2) +#define DTS_BUFFER_EXT1_counter_reset3(x) ((x)->dts_buffer.counter_reset3) +#define DTS_BUFFER_EXT1_counter_reset4(x) ((x)->counter_reset4) +#define DTS_BUFFER_EXT1_counter_reset5(x) ((x)->counter_reset5) +#define DTS_BUFFER_EXT1_counter_reset6(x) ((x)->counter_reset6) +#define DTS_BUFFER_EXT1_counter_reset7(x) ((x)->counter_reset7) +#define DTS_BUFFER_EXT1_fixed_counter_reset0(x) ((x)->fixed_counter_reset0) +#define DTS_BUFFER_EXT1_fixed_counter_reset1(x) ((x)->fixed_counter_reset1) +#define DTS_BUFFER_EXT1_fixed_counter_reset2(x) ((x)->fixed_counter_reset2) +#define DTS_BUFFER_EXT1_fixed_counter_reset3(x) ((x)->fixed_counter_reset3) + +extern OS_STATUS PEBS_Initialize(U32 dev_idx); + +extern OS_STATUS PEBS_Allocate(void); + +extern VOID PEBS_Destroy(void); + +extern VOID PEBS_Flush_Buffer(void *); + +extern VOID PEBS_Reset_Counter(S32 this_cpu, U32 index, U64 value); + +extern VOID PEBS_Reset_Index(S32 this_cpu); + +extern VOID PEBS_Modify_IP(void *sample, DRV_BOOL is_64bit_addr, U32 rec_index); + +extern VOID PEBS_Modify_TSC(void *sample, U32 rec_index); + +extern U32 PEBS_Get_Num_Records_Filled(void); + +extern U64 PEBS_Fill_Buffer(S8 *buffer, EVENT_DESC evt_desc, U32 rec_index); + +extern U64 APEBS_Fill_Buffer(S8 *buffer, EVENT_DESC evt_desc, U32 rec_index); + +extern U64 PEBS_Overflowed(S32 this_cpu, U64 overflow_status, U32 rec_index); + +/* + * Dispatch table for virtualized functions. + * Used to enable common functionality for different + * processor microarchitectures + */ +typedef struct PEBS_DISPATCH_NODE_S PEBS_DISPATCH_NODE; +typedef PEBS_DISPATCH_NODE * PEBS_DISPATCH; +struct PEBS_DISPATCH_NODE_S { + VOID (*initialize_threshold)(DTS_BUFFER_EXT); + U64 (*overflow)(S32, U64, U32); + VOID (*modify_ip)(void *, DRV_BOOL, U32); + VOID (*modify_tsc)(void *, U32); + U32 (*get_num_records_filled)(void); +}; + +typedef struct PEBS_INFO_NODE_S PEBS_INFO_NODE; +typedef PEBS_INFO_NODE *PEBS_INFO; +struct PEBS_INFO_NODE_S { + PEBS_DISPATCH pebs_dispatch; + U32 pebs_record_size; + U16 apebs_basic_offset; + U16 apebs_mem_offset; + U16 apebs_gpr_offset; + U16 apebs_xmm_offset; + U16 apebs_lbr_offset; +}; + +#define APEBS_RECORD_SIZE_MASK 0xFFFF000000000000ULL //[63:48] +#define APEBS_RECORD_FORMAT_MASK 0xFFFFFFFFFFFFULL //[47:0] +#define APEBS_MEM_RECORD_FORMAT_MASK 0x1ULL +#define APEBS_GPR_RECORD_FORMAT_MASK 0x2ULL +#define APEBS_XMM_RECORD_FORMAT_MASK 0x4ULL +#define APEBS_LBR_RECORD_FORMAT_MASK 0x8ULL + + +extern PEBS_DISPATCH_NODE core2_pebs; +extern PEBS_DISPATCH_NODE core2p_pebs; +extern PEBS_DISPATCH_NODE corei7_pebs; +extern PEBS_DISPATCH_NODE haswell_pebs; +extern PEBS_DISPATCH_NODE perfver4_pebs; +extern PEBS_DISPATCH_NODE perfver4_apebs; + +#endif diff --git a/drivers/platform/x86/sepdk/inc/perfver4.h b/drivers/platform/x86/sepdk/inc/perfver4.h new file mode 100644 index 0000000000000..74ecf54179dfc --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/perfver4.h @@ -0,0 +1,51 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _PERFVER4_H_ +#define _PERFVER4_H_ + +#include "msrdefs.h" + +extern DISPATCH_NODE perfver4_dispatch; +extern DISPATCH_NODE perfver4_dispatch_htoff_mode; +extern DISPATCH_NODE perfver4_dispatch_nonht_mode; + +#define PERFVER4_UNC_BLBYPASS_BITMASK 0x00000001 +#define PERFVER4_UNC_DISABLE_BL_BYPASS_MSR 0x39C + +#if defined(DRV_IA32) +#define PERFVER4_LBR_DATA_BITS 32 +#else +#define PERFVER4_LBR_DATA_BITS 57 +#endif + +#define PERFVER4_LBR_BITMASK ((1ULL << PERFVER4_LBR_DATA_BITS) - 1) + +#define PERFVER4_FROZEN_BIT_MASK 0xc00000000000000ULL +#define PERFVER4_OVERFLOW_BIT_MASK_HT_ON 0x600000070000000FULL +#define PERFVER4_OVERFLOW_BIT_MASK_HT_OFF 0x60000007000000FFULL +#define PERFVER4_OVERFLOW_BIT_MASK_NON_HT 0x6000000F000000FFULL + +#endif diff --git a/drivers/platform/x86/sepdk/inc/pmi.h b/drivers/platform/x86/sepdk/inc/pmi.h new file mode 100644 index 0000000000000..88b02b1a04b4e --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/pmi.h @@ -0,0 +1,65 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _PMI_H_ +#define _PMI_H_ + +#include "lwpmudrv_defines.h" +#include +#include + +#if defined(DRV_IA32) +#if KERNEL_VERSION(2, 6, 25) > LINUX_VERSION_CODE +#define REGS_xcs(regs) (regs->xcs) +#define REGS_eip(regs) (regs->eip) +#define REGS_eflags(regs) (regs->eflags) +#else +#define REGS_xcs(regs) (regs->cs) +#define REGS_eip(regs) (regs->ip) +#define REGS_eflags(regs) (regs->flags) +#endif +#endif + +#if defined(DRV_EM64T) +#define REGS_cs(regs) (regs->cs) + +#if KERNEL_VERSION(2, 6, 25) > LINUX_VERSION_CODE +#define REGS_rip(regs) (regs->rip) +#define REGS_eflags(regs) (regs->eflags) +#else +#define REGS_rip(regs) (regs->ip) +#define REGS_eflags(regs) (regs->flags) +#endif +#endif + +asmlinkage VOID PMI_Interrupt_Handler(struct pt_regs *regs); + +#if defined(DRV_SEP_ACRN_ON) +extern VOID PMI_Buffer_Handler(PVOID); +#endif + +extern U32 pmi_Get_CSD(U32, U32 *, U32 *); + +#endif diff --git a/drivers/platform/x86/sepdk/inc/sepdrv_p_state.h b/drivers/platform/x86/sepdk/inc/sepdrv_p_state.h new file mode 100644 index 0000000000000..2a20394c393f9 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/sepdrv_p_state.h @@ -0,0 +1,34 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _SEPDRV_P_STATE_H_ +#define _SEPDRV_P_STATE_H_ + +#define DRV_APERF_MSR 0xE8 +#define DRV_MPERF_MSR 0xE7 + +extern OS_STATUS SEPDRV_P_STATE_Read(S8 *buffer, CPU_STATE pcpu); + +#endif diff --git a/drivers/platform/x86/sepdk/inc/silvermont.h b/drivers/platform/x86/sepdk/inc/silvermont.h new file mode 100644 index 0000000000000..4a35b1db5047e --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/silvermont.h @@ -0,0 +1,41 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _SILVERMONT_H_ +#define _SILVERMONT_H_ + +#include "msrdefs.h" +extern DISPATCH_NODE silvermont_dispatch; +extern DISPATCH_NODE knights_dispatch; + +#if defined(DRV_IA32) +#define SILVERMONT_LBR_DATA_BITS 32 +#else +#define SILVERMONT_LBR_DATA_BITS 48 +#endif + +#define SILVERMONT_LBR_BITMASK ((1ULL << SILVERMONT_LBR_DATA_BITS) - 1) + +#endif diff --git a/drivers/platform/x86/sepdk/inc/sys_info.h b/drivers/platform/x86/sepdk/inc/sys_info.h new file mode 100644 index 0000000000000..c5dd5621a58b9 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/sys_info.h @@ -0,0 +1,71 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _SYS_INFO_H_ +#define _SYS_INFO_H_ + +#include "lwpmudrv_defines.h" + +#define KNIGHTS_FAMILY 0x06 +#define KNL_MODEL 0x57 +#define KNM_MODEL 0x85 + +#define is_Knights_family(family, model) \ + ((family == KNIGHTS_FAMILY) && \ + ((model == KNL_MODEL) || (model == KNM_MODEL))) + +typedef struct __generic_ioctl { + U32 size; + S32 ret; + U64 rsv[3]; +} GENERIC_IOCTL; + +#define GENERIC_IOCTL_size(gio) ((gio)->size) +#define GENERIC_IOCTL_ret(gio) ((gio)->ret) + +// +// This one is unusual in that it's really a variable +// size. The system_info field is just a easy way +// to access the base information, but the actual size +// when used tends to be much larger that what is +// shown here. +// +typedef struct __system_info { + GENERIC_IOCTL gen; + VTSA_SYS_INFO sys_info; +} IOCTL_SYS_INFO; + +extern U32 *cpu_built_sysinfo; + +#define IOCTL_SYS_INFO_gen(isi) ((isi)->gen) +#define IOCTL_SYS_INFO_sys_info(isi) ((isi)->sys_info) + +extern U32 SYS_INFO_Build(void); +extern void SYS_INFO_Transfer(PVOID buf_usr_to_drv, + unsigned long len_usr_to_drv); +extern void SYS_INFO_Destroy(void); +extern void SYS_INFO_Build_Cpu(PVOID param); + +#endif diff --git a/drivers/platform/x86/sepdk/inc/unc_common.h b/drivers/platform/x86/sepdk/inc/unc_common.h new file mode 100644 index 0000000000000..d1cc228982f08 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/unc_common.h @@ -0,0 +1,161 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _UNC_COMMON_H_INC_ +#define _UNC_COMMON_H_INC_ + +#include "pci.h" + +#define DRV_IS_PCI_VENDOR_ID_INTEL 0x8086 +#define VENDOR_ID_MASK 0x0000FFFF +#define DEVICE_ID_MASK 0xFFFF0000 +#define DEVICE_ID_BITSHIFT 16 + +#define UNCORE_SOCKETID_UBOX_LNID_OFFSET 0x40 +#define UNCORE_SOCKETID_UBOX_GID_OFFSET 0x54 + +#define INVALID_BUS_NUMBER -1 +#define PCI_INVALID_VALUE 0xFFFFFFFF + +typedef struct DEVICE_CALLBACK_NODE_S DEVICE_CALLBACK_NODE; +typedef DEVICE_CALLBACK_NODE * DEVICE_CALLBACK; + +struct DEVICE_CALLBACK_NODE_S { + DRV_BOOL (*is_Valid_Device)(U32); + DRV_BOOL (*is_Valid_For_Write)(U32, U32); + DRV_BOOL (*is_Unit_Ctl)(U32); + DRV_BOOL (*is_PMON_Ctl)(U32); +}; + +#define MAX_PCIDEV_UNITS 16 +#define GET_MAX_PCIDEV_ENTRIES(num_pkg) \ + ((num_pkg > MAX_PCIDEV_UNITS) ? num_pkg : MAX_PCIDEV_UNITS) + +typedef struct UNC_PCIDEV_NODE_S UNC_PCIDEV_NODE; + +struct UNC_PCIDEV_NODE_S { + U32 num_entries; + U32 max_entries; + S32 *busno_list; // array for pcibus mapping + SEP_MMIO_NODE *mmio_map; // virtual memory mapping entries +}; + +#define UNC_PCIDEV_max_entries(x) ((x)->max_entries) +#define UNC_PCIDEV_num_entries(x) ((x)->num_entries) +#define UNC_PCIDEV_busno_list(x) ((x)->busno_list) +#define UNC_PCIDEV_busno_entry(x, entry) ((x)->busno_list[entry]) +#define UNC_PCIDEV_mmio_map(x) ((x)->mmio_map) +#define UNC_PCIDEV_mmio_map_entry(x, entry) ((x)->mmio_map[entry]) +#define UNC_PCIDEV_virtual_addr_entry(x, entry) \ + (SEP_MMIO_NODE_virtual_address(&UNC_PCIDEV_mmio_map_entry(x, entry))) + +#define UNC_PCIDEV_is_busno_valid(x, entry) \ + (((x)->busno_list) && ((x)->num_entries > (entry)) && \ + ((x)->busno_list[(entry)] != INVALID_BUS_NUMBER)) +#define UNC_PCIDEV_is_vaddr_valid(x, entry) \ + (((x)->mmio_map) && ((x)->num_entries > (entry)) && \ + ((x)->mmio_map[(entry)].virtual_address)) + +extern UNC_PCIDEV_NODE unc_pcidev_map[]; + +#define GET_BUS_MAP(dev_node, entry) \ + (UNC_PCIDEV_busno_entry((&(unc_pcidev_map[dev_node])), entry)) +#define GET_NUM_MAP_ENTRIES(dev_node) \ + (UNC_PCIDEV_num_entries(&(unc_pcidev_map[dev_node]))) +#define IS_MMIO_MAP_VALID(dev_node, entry) \ + (UNC_PCIDEV_is_vaddr_valid((&(unc_pcidev_map[dev_node])), entry)) +#define IS_BUS_MAP_VALID(dev_node, entry) \ + (UNC_PCIDEV_is_busno_valid((&(unc_pcidev_map[dev_node])), entry)) +#define virtual_address_table(dev_node, entry) \ + (UNC_PCIDEV_virtual_addr_entry(&(unc_pcidev_map[dev_node]), entry)) + +extern OS_STATUS UNC_COMMON_Do_Bus_to_Socket_Map(U32 uncore_did, U32 dev_node, + U32 bus_no, U32 device_no, + U32 function_no); + +extern VOID UNC_COMMON_Dummy_Func(PVOID param); + +extern VOID UNC_COMMON_Read_Counts(PVOID param, U32 id); + +/************************************************************/ +/* + * UNC common PCI based API + * + ************************************************************/ + +extern VOID UNC_COMMON_PCI_Write_PMU(PVOID param, U32 ubox_did, U32 control_msr, + U32 ctl_val, U32 pci_dev_index, + DEVICE_CALLBACK callback); + +extern VOID UNC_COMMON_PCI_Enable_PMU(PVOID param, U32 control_msr, + U32 enable_val, U32 disable_val, + DEVICE_CALLBACK callback); + +extern VOID UNC_COMMON_PCI_Disable_PMU(PVOID param, U32 control_msr, + U32 enable_val, U32 disable_val, + DEVICE_CALLBACK callback); + +extern OS_STATUS UNC_COMMON_Add_Bus_Map(U32 uncore_did, U32 dev_node, + U32 bus_no); + +extern OS_STATUS UNC_COMMON_Init(void); + +extern VOID UNC_COMMON_Clean_Up(void); + +extern VOID UNC_COMMON_PCI_Trigger_Read(U32 id); + +extern VOID UNC_COMMON_PCI_Read_PMU_Data(PVOID param); + +extern VOID UNC_COMMON_PCI_Scan_For_Uncore(PVOID param, U32 dev_info_node, + DEVICE_CALLBACK callback); + +extern VOID UNC_COMMON_Get_Platform_Topology(U32 dev_info_node); + +/************************************************************/ +/* + * UNC common MSR based API + * + ************************************************************/ + +extern VOID UNC_COMMON_MSR_Write_PMU(PVOID param, U32 control_msr, + U64 control_val, U64 reset_val, + DEVICE_CALLBACK callback); + +extern VOID UNC_COMMON_MSR_Enable_PMU(PVOID param, U32 control_msr, + U64 control_val, U64 unit_ctl_val, + U64 pmon_ctl_val, + DEVICE_CALLBACK callback); + +extern VOID UNC_COMMON_MSR_Disable_PMU(PVOID param, U32 control_msr, + U64 unit_ctl_val, U64 pmon_ctl_val, + DEVICE_CALLBACK callback); + +extern VOID UNC_COMMON_MSR_Trigger_Read(U32 id); + +extern VOID UNC_COMMON_MSR_Read_PMU_Data(PVOID param); + +extern VOID UNC_COMMON_MSR_Clean_Up(PVOID param); + +#endif diff --git a/drivers/platform/x86/sepdk/inc/unc_gt.h b/drivers/platform/x86/sepdk/inc/unc_gt.h new file mode 100644 index 0000000000000..3e95db32cfa84 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/unc_gt.h @@ -0,0 +1,86 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _UNC_GT_H_INC_ +#define _UNC_GT_H_INC_ + +/* + * Local to this architecture: SNB uncore GT unit + * + */ +#define GT_MMIO_SIZE 0x200000 +#define NEXT_ADDR_OFFSET 4 +#define UNC_GT_BAR_MASK 0xFFF00000 +#define PERF_GLOBAL_CTRL 0x391 +#define GT_CLEAR_COUNTERS 0xFFFF0000 + +#define IA32_DEBUG_CTRL 0x1D9 +#define MAX_FREE_RUNNING_EVENTS 6 +#define GT_DID_1 0x102 +#define INTEL_VENDOR_ID 0x8086 +#define DRV_GET_PCI_VENDOR_ID(value) (value & 0x0000FFFF) +#define DRV_GET_PCI_DEVICE_ID(value) ((value & 0xFFFF0000) >> 16) +#define DRV_IS_INTEL_VENDOR_ID(value) (value == INTEL_VENDOR_ID) +#define DRV_IS_GT_DEVICE_ID(value) (value == GT_DID_1) + +//clock gating disable values +#define UNC_GT_GCPUNIT_REG1 0x9400 +#define UNC_GT_GCPUNIT_REG2 0x9404 +#define UNC_GT_GCPUNIT_REG3 0x9408 +#define UNC_GT_GCPUNIT_REG4 0x940c +#define UNC_GT_GCPUNIT_REG1_VALUE 0xffffffff +#define UNC_GT_GCPUNIT_REG2_VALUE 0xffffffff +#define UNC_GT_GCPUNIT_REG3_VALUE 0xffe3ffff +#define UNC_GT_GCPUNIT_REG4_VALUE 0x00000003 +//RC6 disable +#define UNC_GT_RC6_REG1 0xa090 +#define UNC_GT_RC6_REG2 0xa094 +#define UNC_GT_RC6_REG1_OR_VALUE 0x80000000 +#define UNC_GT_RC6_REG2_VALUE 0x00000000 +extern DISPATCH_NODE unc_gt_dispatch; + +typedef struct GT_CTR_NODE_S GT_CTR_NODE; +typedef GT_CTR_NODE * GT_CTR; +struct GT_CTR_NODE_S { + union { + struct { + U32 low : 32; + U32 high : 12; + } bits; + U64 value; + } u; +}; + +#define GT_CTR_NODE_value(x) (x.u.value) +#define GT_CTR_NODE_low(x) (x.u.bits.low) +#define GT_CTR_NODE_high(x) (x.u.bits.high) +#define GT_CTR_NODE_value_reset(x) x.u.value = 0 + +#define DRV_WRITE_PCI_REG_ULONG(va, offset_delta, value) \ + writel(value, (void __iomem *)((char *)(va + offset_delta))) +#define DRV_READ_PCI_REG_ULONG(va, offset_delta) \ + readl((void __iomem *)(char *)(va + offset_delta)) + +#endif diff --git a/drivers/platform/x86/sepdk/inc/utility.h b/drivers/platform/x86/sepdk/inc/utility.h new file mode 100644 index 0000000000000..d470a656a4a11 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/utility.h @@ -0,0 +1,637 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _UTILITY_H_ +#define _UTILITY_H_ + +/** +// Data Types and Macros +*/ +#pragma pack(push, 1) + +#pragma pack(pop) + +/* + * Declarations + */ +extern DISPATCH_NODE unc_msr_dispatch; +extern DISPATCH_NODE unc_pci_dispatch; +extern DISPATCH_NODE unc_mmio_dispatch; +extern DISPATCH_NODE unc_mmio_fpga_dispatch; +extern DISPATCH_NODE unc_power_dispatch; + +/* + * These routines have macros defined in asm/system.h + */ +#define SYS_Local_Irq_Enable() local_irq_enable() +#define SYS_Local_Irq_Disable() local_irq_disable() +#define SYS_Local_Irq_Save(flags) local_irq_save(flags) +#define SYS_Local_Irq_Restore(flags) local_irq_restore(flags) + +#include + +#define SYS_MMIO_Read32(base, offset) \ + ((base) ? readl((void __iomem *)(base) + (offset)) : 0) +extern U64 SYS_MMIO_Read64(U64 baseAddress, U64 offset); + +extern U64 SYS_Read_MSR(U32 msr); + +extern void SYS_Write_MSR(U32 msr, U64 val); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) && \ + defined(CONFIG_UIDGID_STRICT_TYPE_CHECKS)) +#define DRV_GET_UID(p) (p->cred->uid.val) +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) +#define DRV_GET_UID(p) (p->cred->uid) +#else +#define DRV_GET_UID(p) (p->uid) +#endif + +extern void SYS_Perfvec_Handler(void); + +extern void *SYS_get_stack_ptr0(void); +extern void *SYS_get_stack_ptr3(void); +extern void *SYS_get_user_fp(void); +extern short SYS_Get_cs(void); + +#if defined(DRV_IA32) +extern void *SYS_Get_IDT_Base_HWR(void); /// IDT base from hardware IDTR +extern void *SYS_Get_GDT_Base_HWR(void); /// GDT base from hardware GDTR +extern U64 SYS_Get_TSC(void); + +#define SYS_Get_IDT_Base SYS_Get_IDT_Base_HWR +#define SYS_Get_GDT_Base SYS_Get_GDT_Base_HWR +#endif + +#if defined(DRV_EM64T) +extern unsigned short SYS_Get_Code_Selector0(void); +extern void SYS_Get_IDT_Base(void **); +extern void SYS_Get_GDT_Base(void **); +#endif + +extern void SYS_IO_Delay(void); +#define SYS_Inb(port) inb(port) +#define SYS_Outb(byte, port) outb(byte, port) + +/* typedef int OSSTATUS; */ + +/* + * Lock implementations + */ +#define SYS_Locked_Inc(var) atomic_inc((var)) +#define SYS_Locked_Dec(var) atomic_dec((var)) + +extern void UTILITY_Read_TSC(U64 *pTsc); + +extern void UTILITY_down_read_mm(struct mm_struct *mm); + +extern void UTILITY_up_read_mm(struct mm_struct *mm); + +extern void UTILITY_Read_Cpuid(U64 cpuid_function, U64 *rax_value, + U64 *rbx_value, U64 *rcx_value, U64 *rdx_value); + +extern DISPATCH UTILITY_Configure_CPU(U32); + +#if defined(DRV_IA32) +asmlinkage void SYS_Get_CSD(U32, U32 *, U32 *); +#endif + +#if defined(BUILD_CHIPSET) +extern CS_DISPATCH UTILITY_Configure_Chipset(void); +#endif + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern unsigned long UTILITY_Find_Symbol (const char* name) + * + * @brief Finds the address of the specified kernel symbol. + * + * @param const char* name - name of the symbol to look for + * + * @return Symbol address (0 if could not find) + * + * Special Notes: + * This wrapper is needed due to kallsyms_lookup_name not being exported + * in kernel version 2.6.32.*. + * Careful! This code is *NOT* multithread-safe or reentrant! Should only + * be called from 1 context at a time! + */ +extern unsigned long UTILITY_Find_Symbol(const char *name); + +/************************************************************************/ +/*********************** DRIVER LOG DECLARATIONS ************************/ +/************************************************************************/ + +#define DRV_LOG_COMPILER_MEM_BARRIER() { asm volatile("" : : : "memory"); } + +#define DRV_LOG_DEFAULT_LOAD_VERBOSITY (LOG_CHANNEL_MOSTWHERE | LOG_CONTEXT_ALL) +#define DRV_LOG_DEFAULT_INIT_VERBOSITY \ + (LOG_CHANNEL_MEMLOG | LOG_CHANNEL_AUXMEMLOG | LOG_CONTEXT_ALL) +#define DRV_LOG_DEFAULT_DETECTION_VERBOSITY (DRV_LOG_DEFAULT_INIT_VERBOSITY) +#define DRV_LOG_DEFAULT_ERROR_VERBOSITY \ + (LOG_CHANNEL_MOSTWHERE | LOG_CONTEXT_ALL) +#define DRV_LOG_DEFAULT_STATE_CHANGE_VERBOSITY (DRV_LOG_DEFAULT_INIT_VERBOSITY) +#define DRV_LOG_DEFAULT_MARK_VERBOSITY (LOG_CHANNEL_MOSTWHERE | LOG_CONTEXT_ALL) +#define DRV_LOG_DEFAULT_DEBUG_VERBOSITY \ + (LOG_CHANNEL_MEMLOG | LOG_CHANNEL_PRINTK | LOG_CONTEXT_ALL) +#define DRV_LOG_DEFAULT_FLOW_VERBOSITY \ + (LOG_CHANNEL_MEMLOG | LOG_CHANNEL_AUXMEMLOG | LOG_CONTEXT_ALL) +#define DRV_LOG_DEFAULT_ALLOC_VERBOSITY (LOG_VERBOSITY_NONE) +#define DRV_LOG_DEFAULT_INTERRUPT_VERBOSITY \ + (LOG_CHANNEL_MEMLOG | LOG_CONTEXT_ALL) +#define DRV_LOG_DEFAULT_TRACE_VERBOSITY (LOG_VERBOSITY_NONE) +#define DRV_LOG_DEFAULT_REGISTER_VERBOSITY (LOG_VERBOSITY_NONE) +#define DRV_LOG_DEFAULT_NOTIFICATION_VERBOSITY \ + (LOG_CHANNEL_MEMLOG | LOG_CONTEXT_ALL) +#define DRV_LOG_DEFAULT_WARNING_VERBOSITY \ + (LOG_CHANNEL_MOSTWHERE | LOG_CONTEXT_ALL) + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern void UTILITY_Log (U8 category, U8 in_notification, U8 secondary, + * const char* function_name, U32 func_name_len, + * U32 line_number, const char* format_string, ...) + * + * @brief Checks whether and where the message should be logged, + * and logs it as appropriate. + * + * @param U8 category - message category + * U8 in_notification - whether or not we are in a notification/OS + * callback context (information cannot be reliably obtained without passing + * it through the stack) + * U8 secondary - secondary information field for the message + * const char* function_name - name of the calling function + * U32 func_name_len - length of the name of the calling function + * (more efficient to pass it as parameter than finding it back at runtime) + * U32 line_number - line number of the call site + * const char* format_string - classical format string for + * printf-like functions + * ... - elements to print + * + * @return none + * + * Special Notes: + * Used to keep track of the IOCTL operation currently being processed. + * This information is saved in the log buffer (globally), as well as + * in every log entry. + * NB: only IOCTLs for which grabbing the ioctl mutex is necessary + * should be kept track of this way. + */ +extern VOID UTILITY_Log(U8 category, U8 in_notification, U8 secondary, + const char *function_name, U32 func_name_len, + U32 line_number, const char *format_string, ...); + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern DRV_STATUS UTILITY_Driver_Log_Init (void) + * + * @brief Allocates and initializes the driver log buffer. + * + * @param none + * + * @return OS_SUCCESS on success, OS_NO_MEM on error. + * + * Special Notes: + * Should be (successfully) run before any non-LOAD log calls. + * Allocates memory without going through CONTROL_Allocate (to avoid + * complicating the instrumentation of CONTROL_* functions): calling + * UTILITY_Driver_Log_Free is necessary to free the log structure. + * Falls back to vmalloc when contiguous physical memory cannot be + * allocated. This does not impact runtime behavior, but may impact + * the easiness of retrieving the log from a core dump if the system + * crashes. + */ +extern DRV_STATUS UTILITY_Driver_Log_Init(void); + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern DRV_STATUS UTILITY_Driver_Log_Free (void) + * + * @brief Frees the driver log buffer. + * + * @param none + * + * @return OS_SUCCESS on success, OS_NO_MEM on error. + * + * Special Notes: + * Should be done before unloading the driver. + * See UTILITY_Driver_Log_Init for details. + */ +extern void UTILITY_Driver_Log_Free(void); + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern void UTILITY_Driver_Set_Active_Ioctl (U32 ioctl) + * + * @brief Sets the 'active_ioctl' global to the specified value. + * + * @param U32 ioctl - ioctl/drvop code to use + * + * @return none + * + * Special Notes: + * Used to keep track of the IOCTL operation currently being processed. + * This information is saved in the log buffer (globally), as well as + * in every log entry. + * NB: only IOCTLs for which grabbing the ioctl mutex is necessary + * should be kept track of this way. + */ +extern void UTILITY_Driver_Set_Active_Ioctl(U32); + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern const char** UTILITY_Log_Category_Strings (void) + * + * @brief Accessor function for the log category string array + * + * @param none + * + * @return none + * + * Special Notes: + * Only needed for cosmetic purposes when adjusting category verbosities. + */ +extern const char **UTILITY_Log_Category_Strings(void); + +extern DRV_LOG_BUFFER driver_log_buffer; +extern volatile U8 active_ioctl; + +#define DRV_LOG() driver_log_buffer +#define DRV_LOG_VERBOSITY(category) \ + ((DRV_LOG_BUFFER_verbosities(DRV_LOG()))[category]) +#define SEP_IN_NOTIFICATION 1 + +#define SEP_DRV_RAW_LOG(category, in_notification, second, message, ...) \ + UTILITY_Log(category, in_notification, second, __func__, \ + sizeof(__func__), __LINE__, message, ##__VA_ARGS__) +#define SEP_DRV_ULK_LOG(category, in_notification, second, message, ...) \ + UTILITY_Log(category, in_notification, second, __func__, \ + sizeof(__func__), __LINE__, message, ##__VA_ARGS__) + +#define SEP_DRV_LOG_INCREMENT_NB_ACTIVE_INTERRUPTS() \ + do { \ + __sync_fetch_and_add( \ + &DRV_LOG_BUFFER_nb_active_interrupts(DRV_LOG()), 1); \ + __sync_fetch_and_add( \ + &DRV_LOG_BUFFER_nb_interrupts(DRV_LOG()), 1); \ + } while (0) + +#define SEP_DRV_LOG_DECREMENT_NB_ACTIVE_INTERRUPTS() \ + do { \ + __sync_fetch_and_add( \ + &DRV_LOG_BUFFER_nb_active_interrupts(DRV_LOG()), -1); \ + } while (0) + +#define SEP_DRV_LOG_INCREMENT_NB_ACTIVE_NOTIFICATIONS() \ + do { \ + __sync_fetch_and_add( \ + &DRV_LOG_BUFFER_nb_active_notifications(DRV_LOG()), 1); \ + __sync_fetch_and_add( \ + &DRV_LOG_BUFFER_nb_notifications(DRV_LOG()), 1); \ + } while (0) + +#define SEP_DRV_LOG_DECREMENT_NB_ACTIVE_NOTIFICATIONS() \ + do { \ + __sync_fetch_and_add( \ + &DRV_LOG_BUFFER_nb_active_notifications(DRV_LOG()), -1);\ + } while (0) + +#define SEP_DRV_LOG_INCREMENT_NB_STATE_TRANSITIONS() \ + do { \ + __sync_fetch_and_add( \ + &DRV_LOG_BUFFER_nb_driver_state_transitions(DRV_LOG()), 1); \ + } while (0) + +#define SEP_DRV_LOG_DISAMBIGUATE() \ + do { \ + __sync_fetch_and_add( \ + &DRV_LOG_BUFFER_disambiguator(DRV_LOG()), 1); \ + } while (0) + +/************************************************************************/ +/************************** CATEGORY LOG APIs ***************************/ +/************************************************************************/ + +// ERROR, WARNING and LOAD are always compiled in... +#define SEP_DRV_LOG_ERROR(message, ...) \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_ERROR, 0, DRV_LOG_NOTHING, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_WARNING(message, ...) \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_WARNING, 0, DRV_LOG_NOTHING, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_NOTIFICATION_ERROR(in_notif, message, ...) \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_ERROR, in_notif, DRV_LOG_NOTHING, \ + message, ##__VA_ARGS__) +#define SEP_DRV_LOG_NOTIFICATION_WARNING(in_notif, message, ...) \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_WARNING, in_notif, DRV_LOG_NOTHING, \ + message, ##__VA_ARGS__) +#define SEP_DRV_LOG_LOAD(message, ...) \ + do { \ + if (DRV_LOG()) { \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_LOAD, 0, \ + DRV_LOG_NOTHING, message, \ + ##__VA_ARGS__); \ + } else if (DRV_LOG_DEFAULT_LOAD_VERBOSITY & \ + LOG_CHANNEL_PRINTK) { \ + printk(KERN_ERR SEP_MSG_PREFIX " " message "\n", \ + ##__VA_ARGS__); \ + } \ + } while (0) + +#if defined(DRV_MINIMAL_LOGGING) // MINIMAL LOGGING MODE +#define SEP_DRV_LOG_INIT(message, ...) \ + { \ + } +#define SEP_DRV_LOG_INIT_IN(message, ...) \ + { \ + } +#define SEP_DRV_LOG_INIT_OUT(message, ...) \ + { \ + } +#define SEP_DRV_LOG_DETECTION(message, ...) \ + { \ + } +#define SEP_DRV_LOG_MARK(message, ...) \ + { \ + } +#define SEP_DRV_LOG_DEBUG(message, ...) \ + { \ + } +#define SEP_DRV_LOG_DEBUG_IN(message, ...) \ + { \ + } +#define SEP_DRV_LOG_DEBUG_OUT(message, ...) \ + { \ + } +#define SEP_DRV_LOG_FLOW_IN(message, ...) \ + { \ + } +#define SEP_DRV_LOG_FLOW_OUT(message, ...) \ + { \ + } +#define SEP_DRV_LOG_ALLOC(message, ...) \ + { \ + } +#define SEP_DRV_LOG_ALLOC_IN(message, ...) \ + { \ + } +#define SEP_DRV_LOG_ALLOC_OUT(message, ...) \ + { \ + } +#define SEP_DRV_LOG_INTERRUPT_IN(message, ...) \ + SEP_DRV_LOG_INCREMENT_NB_ACTIVE_INTERRUPTS(); +#define SEP_DRV_LOG_INTERRUPT_OUT(message, ...) \ + SEP_DRV_LOG_DECREMENT_NB_ACTIVE_INTERRUPTS(); +#define SEP_DRV_LOG_NOTIFICATION_IN(message, ...) \ + SEP_DRV_LOG_INCREMENT_NB_ACTIVE_NOTIFICATIONS(); +#define SEP_DRV_LOG_NOTIFICATION_OUT(message, ...) \ + SEP_DRV_LOG_DECREMENT_NB_ACTIVE_NOTIFICATIONS(); +#define SEP_DRV_LOG_STATE_TRANSITION(former_state, new_state, message, ...) \ + { \ + (void)former_state; \ + SEP_DRV_LOG_INCREMENT_NB_STATE_TRANSITIONS(); \ + DRV_LOG_BUFFER_driver_state(DRV_LOG()) = new_state; \ + } +#else // REGULAR LOGGING MODE (PART 1 / 2) +#define SEP_DRV_LOG_INIT(message, ...) \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_INIT, 0, DRV_LOG_NOTHING, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_INIT_IN(message, ...) \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_INIT, 0, DRV_LOG_FLOW_IN, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_INIT_OUT(message, ...) \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_INIT, 0, DRV_LOG_FLOW_OUT, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_DETECTION(message, ...) \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_DETECTION, 0, DRV_LOG_NOTHING, \ + message, ##__VA_ARGS__) +#define SEP_DRV_LOG_MARK(message, ...) \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_MARK, 0, DRV_LOG_NOTHING, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_DEBUG(message, ...) \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_DEBUG, 0, DRV_LOG_NOTHING, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_DEBUG_IN(message, ...) \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_DEBUG, 0, DRV_LOG_FLOW_IN, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_DEBUG_OUT(message, ...) \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_DEBUG, 0, DRV_LOG_FLOW_OUT, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_FLOW_IN(message, ...) \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_FLOW, 0, DRV_LOG_FLOW_IN, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_FLOW_OUT(message, ...) \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_FLOW, 0, DRV_LOG_FLOW_OUT, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_ALLOC(message, ...) \ + SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_ALLOC, 0, DRV_LOG_NOTHING, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_ALLOC_IN(message, ...) \ + SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_ALLOC, 0, DRV_LOG_FLOW_IN, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_ALLOC_OUT(message, ...) \ + SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_ALLOC, 0, DRV_LOG_FLOW_OUT, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_INTERRUPT_IN(message, ...) \ + { \ + SEP_DRV_LOG_INCREMENT_NB_ACTIVE_INTERRUPTS(); \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_INTERRUPT, 0, \ + DRV_LOG_FLOW_IN, message, ##__VA_ARGS__); \ + } + +#define SEP_DRV_LOG_INTERRUPT_OUT(message, ...) \ + { \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_INTERRUPT, 0, \ + DRV_LOG_FLOW_OUT, message, ##__VA_ARGS__); \ + SEP_DRV_LOG_DECREMENT_NB_ACTIVE_INTERRUPTS(); \ + } + +#define SEP_DRV_LOG_NOTIFICATION_IN(message, ...) \ + { \ + SEP_DRV_LOG_INCREMENT_NB_ACTIVE_NOTIFICATIONS(); \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_NOTIFICATION, 1, \ + DRV_LOG_FLOW_IN, message, ##__VA_ARGS__); \ + } + +#define SEP_DRV_LOG_NOTIFICATION_OUT(message, ...) \ + { \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_NOTIFICATION, 1, \ + DRV_LOG_FLOW_OUT, message, ##__VA_ARGS__); \ + SEP_DRV_LOG_DECREMENT_NB_ACTIVE_NOTIFICATIONS(); \ + } + +#define SEP_DRV_LOG_STATE_TRANSITION(former_state, new_state, message, ...) \ + { \ + SEP_DRV_LOG_INCREMENT_NB_STATE_TRANSITIONS(); \ + DRV_LOG_BUFFER_driver_state(DRV_LOG()) = new_state; \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_STATE_CHANGE, 0, \ + ((U8)former_state << 4) | ((U8)new_state & 0xF), \ + message, ##__VA_ARGS__); \ + } + +#endif + +#if defined(DRV_MAXIMAL_LOGGING) // MAXIMAL LOGGING MODE +#define SEP_DRV_LOG_TRACE(message, ...) \ + SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_TRACE, 0, DRV_LOG_NOTHING, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_TRACE_IN(message, ...) \ + SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_TRACE, 0, DRV_LOG_FLOW_IN, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_TRACE_OUT(message, ...) \ + SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_TRACE, 0, DRV_LOG_FLOW_OUT, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_REGISTER_IN(message, ...) \ + SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_REGISTER, 0, DRV_LOG_FLOW_IN, \ + message, ##__VA_ARGS__) +#define SEP_DRV_LOG_REGISTER_OUT(message, ...) \ + SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_REGISTER, 0, DRV_LOG_FLOW_OUT, \ + message, ##__VA_ARGS__) +#define SEP_DRV_LOG_NOTIFICATION_TRACE(in_notif, message, ...) \ + SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_TRACE, in_notif, DRV_LOG_NOTHING, \ + message, ##__VA_ARGS__) +#define SEP_DRV_LOG_NOTIFICATION_TRACE_IN(in_notif, message, ...) \ + SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_TRACE, in_notif, DRV_LOG_FLOW_IN, \ + message, ##__VA_ARGS__) +#define SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(in_notif, message, ...) \ + SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_TRACE, in_notif, DRV_LOG_FLOW_OUT, \ + message, ##__VA_ARGS__) +#else // REGULAR LOGGING MODE (PART 2 / 2) +#define SEP_DRV_LOG_TRACE(message, ...) \ + { \ + } +#define SEP_DRV_LOG_TRACE_IN(message, ...) \ + { \ + } +#define SEP_DRV_LOG_TRACE_OUT(message, ...) \ + { \ + } +#define SEP_DRV_LOG_REGISTER_IN(message, ...) \ + { \ + } +#define SEP_DRV_LOG_REGISTER_OUT(message, ...) \ + { \ + } +#define SEP_DRV_LOG_NOTIFICATION_TRACE(in_notif, message, ...) \ + { \ + } +#define SEP_DRV_LOG_NOTIFICATION_TRACE_IN(in_notif, message, ...) \ + { \ + } +#define SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(in_notif, message, ...) \ + { \ + } +#endif + +/************************************************************************/ +/************************* FACILITATOR MACROS ***************************/ +/************************************************************************/ + +#define SEP_DRV_LOG_ERROR_INIT_OUT(message, ...) \ + { \ + SEP_DRV_LOG_ERROR(message, ##__VA_ARGS__); \ + SEP_DRV_LOG_INIT_OUT(message, ##__VA_ARGS__); \ + } + +#define SEP_DRV_LOG_ERROR_FLOW_OUT(message, ...) \ + { \ + SEP_DRV_LOG_ERROR(message, ##__VA_ARGS__); \ + SEP_DRV_LOG_FLOW_OUT(message, ##__VA_ARGS__); \ + } +#define SEP_DRV_LOG_ERROR_TRACE_OUT(message, ...) \ + { \ + SEP_DRV_LOG_ERROR(message, ##__VA_ARGS__); \ + SEP_DRV_LOG_TRACE_OUT(message, ##__VA_ARGS__); \ + } +#define SEP_DRV_LOG_ERROR_ALLOC_OUT(message, ...) \ + { \ + SEP_DRV_LOG_ERROR(message, ##__VA_ARGS__); \ + SEP_DRV_LOG_ALLOC_OUT(message, ##__VA_ARGS__); \ + } + +#define SEP_DRV_LOG_WARNING_FLOW_OUT(message, ...) \ + { \ + SEP_DRV_LOG_WARNING(message, ##__VA_ARGS__); \ + SEP_DRV_LOG_FLOW_OUT(message, ##__VA_ARGS__); \ + } + +#define SEP_DRV_LOG_WARNING_TRACE_OUT(message, ...) \ + { \ + SEP_DRV_LOG_WARNING(message, ##__VA_ARGS__); \ + SEP_DRV_LOG_TRACE_OUT(message, ##__VA_ARGS__); \ + } +#define SEP_DRV_LOG_WARNING_ALLOC_OUT(message, ...) \ + { \ + SEP_DRV_LOG_WARNING(message, ##__VA_ARGS__); \ + SEP_DRV_LOG_ALLOC_OUT(message, ##__VA_ARGS__); \ + } + +#define SEP_DRV_LOG_INIT_TRACE_OUT(message, ...) \ + { \ + SEP_DRV_LOG_INIT(message, ##__VA_ARGS__); \ + SEP_DRV_LOG_TRACE_OUT(message, ##__VA_ARGS__); \ + } + +#define SEP_DRV_LOG_WARNING_NOTIFICATION_OUT(message, ...) \ + { \ + SEP_DRV_LOG_WARNING(message, ##__VA_ARGS__); \ + SEP_DRV_LOG_NOTIFICATION_OUT(message, ##__VA_ARGS__); \ + } + + +/************************************************************************/ +/************************* DRIVER STATE MACROS **************************/ +/************************************************************************/ + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern U32 UTILITY_Change_Driver_State (U32 allowed_prior_states, + * U32 state, const char* func, U32 line_number) + * + * @brief Updates the driver state (if the transition is legal). + * + * @param U32 allowed_prior_states - the bitmask representing the states + * from which the transition is allowed to occur + * U32 state - the destination state + * const char* func - the callsite's function's name + * U32 line_number - the callsite's line number + * + * @return 1 in case of success, 0 otherwise + * + * Special Notes: + * + */ +extern U32 UTILITY_Change_Driver_State(U32 allowed_prior_states, U32 state, + const char *func, U32 line_number); + +#define GET_DRIVER_STATE() GLOBAL_STATE_current_phase(driver_state) +#define CHANGE_DRIVER_STATE(allowed_prior_states, state) \ + UTILITY_Change_Driver_State(allowed_prior_states, state, __func__, \ + __LINE__) +#define DRIVER_STATE_IN(state, states) \ + (!!(MATCHING_STATE_BIT(state) & (states))) + +#endif diff --git a/drivers/platform/x86/sepdk/inc/valleyview_sochap.h b/drivers/platform/x86/sepdk/inc/valleyview_sochap.h new file mode 100644 index 0000000000000..18214ea3ca763 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/valleyview_sochap.h @@ -0,0 +1,60 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _VALLEYVIEW_SOCHAP_H_INC_ +#define _VALLEYVIEW_SOCHAP_H_INC_ + +/* + * Local to this architecture: Valleyview uncore SA unit + * + */ +#define VLV_VISA_DESKTOP_DID 0x000C04 +#define VLV_VISA_NEXT_ADDR_OFFSET 4 +#define VLV_VISA_BAR_ADDR_SHIFT 32 +#define VLV_VISA_BAR_ADDR_MASK 0x000FFFC00000LL +#define VLV_VISA_MAX_PCI_DEVICES 16 +#define VLV_VISA_MCR_REG_OFFSET 0xD0 +#define VLV_VISA_MDR_REG_OFFSET 0xD4 +#define VLV_VISA_MCRX_REG_OFFSET 0xD8 +#define VLV_VISA_BYTE_ENABLES 0xF +#define VLV_VISA_OP_CODE_SHIFT 24 +#define VLV_VISA_PORT_ID_SHIFT 16 +#define VLV_VISA_OFFSET_HI_MASK 0xFF +#define VLV_VISA_OFFSET_LO_MASK 0xFF +#define VLV_CHAP_SIDEBAND_PORT_ID 23 +#define VLV_CHAP_SIDEBAND_WRITE_OP_CODE 1 +#define VLV_CHAP_SIDEBAND_READ_OP_CODE 0 +#define VLV_CHAP_MAX_COUNTERS 8 +#define VLV_CHAP_MAX_COUNT 0x00000000FFFFFFFFLL + +#define VLV_VISA_OTHER_BAR_MMIO_PAGE_SIZE 4096 +#define VLV_VISA_CHAP_SAMPLE_DATA 0x00020000 +#define VLV_VISA_CHAP_STOP 0x00040000 +#define VLV_VISA_CHAP_START 0x00110000 +#define VLV_VISA_CHAP_CTRL_REG_OFFSET 0x0 + +extern DISPATCH_NODE valleyview_visa_dispatch; + +#endif diff --git a/drivers/platform/x86/sepdk/include/error_reporting_utils.h b/drivers/platform/x86/sepdk/include/error_reporting_utils.h new file mode 100644 index 0000000000000..9df1bf3380cdc --- /dev/null +++ b/drivers/platform/x86/sepdk/include/error_reporting_utils.h @@ -0,0 +1,180 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef __ERROR_REPORTING_UTILS_H__ +#define __ERROR_REPORTING_UTILS_H__ + +#define DRV_ASSERT_N_RET_VAL(ret_val) \ + { \ + DRV_ASSERT((ret_val) == VT_SUCCESS); \ + DRV_CHECK_N_RETURN_N_FAIL(ret_val); \ + } + +#define DRV_ASSERT_N_CONTINUE(ret_val) \ + { \ + if ((ret_val) != VT_SUCCESS) { \ + LOG_ERR1(VTSA_T("Operation failed with error code "), \ + (ret_val)); \ + } \ + } + +#define DRV_CHECK_N_RETURN_N_FAIL(ret_val) \ + { \ + if ((ret_val) != VT_SUCCESS) { \ + LOG_ERR1(VTSA_T("Operation failed with error code "), \ + (ret_val)); \ + return ret_val; \ + } \ + } + +#define DRV_CHECK_N_RETURN_NO_RETVAL(ret_val) \ + { \ + if ((ret_val) != VT_SUCCESS) { \ + LOG_ERR1(VTSA_T("Operation failed with error code "), \ + (ret_val)); \ + return; \ + } \ + } + +#define DRV_CHECK_PTR_N_RET_VAL(ptr) \ + { \ + if ((ptr) == NULL) { \ + LOG_ERR0(VTSA_T("Encountered null pointer")); \ + return VT_SAM_ERROR; \ + } \ + } + +#define DRV_CHECK_PTR_N_RET_NULL(ptr) \ + { \ + if ((ptr) == NULL) { \ + LOG_ERR0(VTSA_T("Encountered null pointer")); \ + return NULL; \ + } \ + } + +#define DRV_CHECK_PTR_N_LOG_NO_RETURN(ptr) \ + { \ + if ((ptr) == NULL) { \ + LOG_ERR0(VTSA_T("Encountered null pointer")); \ + } \ + } + +#define DRV_CHECK_N_LOG_NO_RETURN(ret_val) \ + { \ + if ((ret_val) != VT_SUCCESS) { \ + LOG_ERR1(VTSA_T("Operation failed with error code "), \ + (ret_val)); \ + } \ + } + +#define DRV_CHECK_N_RET_NEG_ONE(ret_val) \ + { \ + if ((ret_val) == -1) { \ + LOG_ERR0(VTSA_T( \ + "Operation failed with error code = -1")); \ + return VT_SAM_ERROR; \ + } \ + } + +#define DRV_REQUIRES_TRUE_COND_RET_N_FAIL(cond) \ + { \ + if (!(cond)) { \ + LOG_ERR0(VTSA_T("Condition check failed")); \ + return VT_SAM_ERROR; \ + } \ + } + +#define DRV_REQUIRES_TRUE_COND_RET_ASSIGNED_VAL(cond, ret_val) \ + { \ + if (!(cond)) { \ + LOG_ERR0(VTSA_T("Condition check failed")); \ + return ret_val; \ + } \ + } + +#define DRV_CHECK_N_ERR_LOG_ERR_STRNG_N_RET(rise_err) \ + { \ + if (rise_err != VT_SUCCESS) { \ + PVOID rise_ptr = NULL; \ + const VTSA_CHAR *error_str = NULL; \ + RISE_open(&rise_ptr); \ + RISE_translate_err_code(rise_ptr, rise_err, \ + &error_str); \ + LogItW(LOG_LEVEL_ERROR | LOG_AREA_GENERAL, \ + L"Operation failed with error [ %d ] = %s\n", \ + rise_err, error_str); \ + RISE_close(rise_ptr); \ + return rise_err; \ + } \ + } + +#define DRV_CHECK_PTR_N_CLEANUP(ptr, gotolabel, ret_val) \ + { \ + if ((ptr) == NULL) { \ + LOG_ERR0(VTSA_T("Encountered null pointer")); \ + ret_val = VT_SAM_ERROR; \ + goto gotolabel; \ + } \ + } + +#define DRV_CHECK_ON_FAIL_CLEANUP_N_RETURN(ret_val, gotolabel) \ + { \ + if ((ret_val) != VT_SUCCESS) { \ + DRV_CHECK_N_LOG_NO_RETURN(ret_val); \ + goto gotolabel; \ + } \ + } + +#define DRV_CHECK_N_CLEANUP_N_RETURN_RET_NEG_ONE(ret_val, gotolabel) \ + { \ + if ((ret_val) == -1) { \ + DRV_CHECK_N_LOG_NO_RETURN(ret_val); \ + goto gotolabel; \ + } \ + } + +#define DRV_CHECK_PTR_ON_NULL_CLEANUP_N_RETURN(ptr, gotolabel) \ + { \ + if ((ptr) == NULL) { \ + DRV_CHECK_PTR_N_LOG_NO_RETURN(ptr); \ + goto gotolabel; \ + } \ + } + +#define FREE_N_SET_NULL(ptr) \ + { \ + if (ptr != NULL) { \ + free(ptr); \ + ptr = NULL; \ + } \ + } + +#define DELETE_N_SET_NULL(ptr) \ + { \ + delete ptr; \ + ptr = NULL; \ + } + +#endif diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_chipset.h b/drivers/platform/x86/sepdk/include/lwpmudrv_chipset.h new file mode 100644 index 0000000000000..755d2799bcd50 --- /dev/null +++ b/drivers/platform/x86/sepdk/include/lwpmudrv_chipset.h @@ -0,0 +1,288 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _LWPMUDRV_CHIPSET_UTILS_H_ +#define _LWPMUDRV_CHIPSET_UTILS_H_ + +#if defined(__cplusplus) +extern "C" { +#endif + +#define MAX_CHIPSET_EVENT_NAME 64 +#define MAX_CHIPSET_COUNTERS 5 + /* TODO: this covers 1 fixed counter \ + * plus 4 general counters on GMCH; \ + * for other chipset devices, this \ + * can vary from 8 to 32; might consider \ + * making this per-chipset-type since \ + * event-multiplexing is currently not \ + * supported for chipset collections + */ + +#if defined(_NTDDK_) +#define CHIPSET_PHYS_ADDRESS PHYSICAL_ADDRESS +#else +#define CHIPSET_PHYS_ADDRESS U64 +#endif + +// possible values for whether chipset data is valid or not +enum { DATA_IS_VALID, DATA_IS_INVALID, DATA_OUT_OF_RANGE }; + +typedef struct CHIPSET_PCI_ARG_NODE_S CHIPSET_PCI_ARG_NODE; +typedef CHIPSET_PCI_ARG_NODE * CHIPSET_PCI_ARG; + +struct CHIPSET_PCI_ARG_NODE_S { + U32 address; + U32 value; +}; + +#define CHIPSET_PCI_ARG_address(chipset_pci) ((chipset_pci)->address) +#define CHIPSET_PCI_ARG_value(chipset_pci) ((chipset_pci)->value) + +typedef struct CHIPSET_PCI_SEARCH_ADDR_NODE_S CHIPSET_PCI_SEARCH_ADDR_NODE; +typedef CHIPSET_PCI_SEARCH_ADDR_NODE * CHIPSET_PCI_SEARCH_ADDR; + +struct CHIPSET_PCI_SEARCH_ADDR_NODE_S { + U32 start; + U32 stop; + U32 increment; + U32 addr; +}; + +#define CHIPSET_PCI_SEARCH_ADDR_start(pci_search_addr) \ + ((pci_search_addr)->start) +#define CHIPSET_PCI_SEARCH_ADDR_stop(pci_search_addr) ((pci_search_addr)->stop) +#define CHIPSET_PCI_SEARCH_ADDR_increment(pci_search_addr) \ + ((pci_search_addr)->increment) +#define CHIPSET_PCI_SEARCH_ADDR_address(pci_search_addr) \ + ((pci_search_addr)->addr) + +typedef struct CHIPSET_PCI_CONFIG_NODE_S CHIPSET_PCI_CONFIG_NODE; +typedef CHIPSET_PCI_CONFIG_NODE * CHIPSET_PCI_CONFIG; + +struct CHIPSET_PCI_CONFIG_NODE_S { + U32 bus; + U32 device; + U32 function; + U32 offset; + U32 value; +}; + +#define CHIPSET_PCI_CONFIG_bus(pci_config) ((pci_config)->bus) +#define CHIPSET_PCI_CONFIG_device(pci_config) ((pci_config)->device) +#define CHIPSET_PCI_CONFIG_function(pci_config) ((pci_config)->function) +#define CHIPSET_PCI_CONFIG_offset(pci_config) ((pci_config)->offset) +#define CHIPSET_PCI_CONFIG_value(pci_config) ((pci_config)->value) + +typedef struct CHIPSET_MARKER_NODE_S CHIPSET_MARKER_NODE; +typedef CHIPSET_MARKER_NODE * CHIPSET_MARKER; + +struct CHIPSET_MARKER_NODE_S { + U32 processor_number; + U32 rsvd; + U64 tsc; +}; + +#define CHIPSET_MARKER_processor_number(chipset_marker) \ + ((pci_config)->processor_number) +#define CHIPSET_MARKER_tsc(chipset_marker) ((pci_config)->tsc) + +typedef struct CHAP_INTERFACE_NODE_S CHAP_INTERFACE_NODE; +typedef CHAP_INTERFACE_NODE * CHAP_INTERFACE; + +// CHAP chipset registers +// Offsets for registers are command-0x00, event-0x04, status-0x08, data-0x0C +struct CHAP_INTERFACE_NODE_S { + U32 command_register; + U32 event_register; + U32 status_register; + U32 data_register; +}; + +#define CHAP_INTERFACE_command_register(chap) ((chap)->command_register) +#define CHAP_INTERFACE_event_register(chap) ((chap)->event_register) +#define CHAP_INTERFACE_status_register(chap) ((chap)->status_register) +#define CHAP_INTERFACE_data_register(chap) ((chap)->data_register) + +/************************************************************************** + * GMCH Registers and Offsets + ************************************************************************** + */ + +// Counter registers - each counter has 4 registers +#define GMCH_MSG_CTRL_REG 0xD0 // message control register (MCR) 0xD0-0xD3 +#define GMCH_MSG_DATA_REG 0xD4 // message data register (MDR) 0xD4-0xD7 + +// Counter register offsets +#define GMCH_PMON_CAPABILITIES \ + 0x0005F0F0 // when read, bit 0 enabled means GMCH counters are available +#define GMCH_PMON_GLOBAL_CTRL \ + 0x0005F1F0 // simultaneously enables/disables fixed and general counters + +// Fixed counters (32-bit) +#define GMCH_PMON_FIXED_CTR_CTRL \ + 0x0005F4F0 // enables and filters the fixed counters +#define GMCH_PMON_FIXED_CTR0 \ + 0x0005E8F0 // 32-bit fixed counter for GMCH_CORE_CLKS event +#define GMCH_PMON_FIXED_CTR_OVF_VAL \ + 0xFFFFFFFFLL // overflow value for GMCH fixed counters + +// General counters (38-bit) +// NOTE: lower order bits on GP counters must be read before the higher bits! +#define GMCH_PMON_GP_CTR0_L 0x0005F8F0 // GMCH GP counter 0, low bits +#define GMCH_PMON_GP_CTR0_H 0x0005FCF0 // GMCH GP counter 0, high bits +#define GMCH_PMON_GP_CTR1_L 0x0005F9F0 +#define GMCH_PMON_GP_CTR1_H 0x0005FDF0 +#define GMCH_PMON_GP_CTR2_L 0x0005FAF0 +#define GMCH_PMON_GP_CTR2_H 0x0005FEF0 +#define GMCH_PMON_GP_CTR3_L 0x0005FBF0 +#define GMCH_PMON_GP_CTR3_H 0x0005FFF0 +#define GMCH_PMON_GP_CTR_OVF_VAL 0x3FFFFFFFFFLL + // overflow value for GMCH general counters + +// Register offsets for LNC +#define LNC_GMCH_REGISTER_READ 0xD0000000 +#define LNC_GMCH_REGISTER_WRITE 0xE0000000 + +// Register offsets for SLT +#define SLT_GMCH_REGISTER_READ 0x10000000 +#define SLT_GMCH_REGISTER_WRITE 0x11000000 + +// Register offsets for CDV +#define CDV_GMCH_REGISTER_READ 0x10000000 +#define CDV_GMCH_REGISTER_WRITE 0x11000000 + + +typedef struct CHIPSET_EVENT_NODE_S CHIPSET_EVENT_NODE; +typedef CHIPSET_EVENT_NODE * CHIPSET_EVENT; + +//chipset event +struct CHIPSET_EVENT_NODE_S { + U32 event_id; + U32 group_id; + char name[MAX_CHIPSET_EVENT_NAME]; + U32 pm; + U32 counter; +}; + +#define CHIPSET_EVENT_event_id(chipset_event) ((chipset_event)->event_id) +#define CHIPSET_EVENT_group_id(chipset_event) ((chipset_event)->group_id) +#define CHIPSET_EVENT_name(chipset_event) ((chipset_event)->name) +#define CHIPSET_EVENT_pm(chipset_event) ((chipset_event)->pm) +#define CHIPSET_EVENT_counter(chipset_event) ((chipset_event)->counter) + +typedef struct CHIPSET_SEGMENT_NODE_S CHIPSET_SEGMENT_NODE; +typedef CHIPSET_SEGMENT_NODE * CHIPSET_SEGMENT; + +//chipset segment data +struct CHIPSET_SEGMENT_NODE_S { + CHIPSET_PHYS_ADDRESS physical_address; + U64 virtual_address; + U16 size; + U16 number_of_counters; + U16 total_events; + U16 start_register; // (see driver for details) + U32 read_register; // read register offset (model dependent) + U32 write_register; // write register offset (model dependent) + CHIPSET_EVENT_NODE events[MAX_CHIPSET_COUNTERS]; +}; + +#define CHIPSET_SEGMENT_physical_address(chipset_segment) \ + ((chipset_segment)->physical_address) +#define CHIPSET_SEGMENT_virtual_address(chipset_segment) \ + ((chipset_segment)->virtual_address) +#define CHIPSET_SEGMENT_size(chipset_segment) ((chipset_segment)->size) +#define CHIPSET_SEGMENT_num_counters(chipset_segment) \ + ((chipset_segment)->number_of_counters) +#define CHIPSET_SEGMENT_total_events(chipset_segment) \ + ((chipset_segment)->total_events) +#define CHIPSET_SEGMENT_start_register(chipset_segment) \ + ((chipset_segment)->start_register) +#define CHIPSET_SEGMENT_read_register(chipset_segment) \ + ((chipset_segment)->read_register) +#define CHIPSET_SEGMENT_write_register(chipset_segment) \ + ((chipset_segment)->write_register) +#define CHIPSET_SEGMENT_events(chipset_segment) ((chipset_segment)->events) + +typedef struct CHIPSET_CONFIG_NODE_S CHIPSET_CONFIG_NODE; +typedef CHIPSET_CONFIG_NODE * CHIPSET_CONFIG; + +//chipset struct used for communication between user mode and kernel +struct CHIPSET_CONFIG_NODE_S { + U32 length; // length of this entire area + U32 major_version; + U32 minor_version; + U32 rsvd; + U64 cpu_counter_mask; + struct { + U64 processor : 1; // Processor PMU + U64 mch_chipset : 1; // MCH Chipset + U64 ich_chipset : 1; // ICH Chipset + U64 motherboard_time_flag : 1; // Motherboard_Time requested. + U64 host_processor_run : 1; + // Each processor should manage the MCH counts they see. + // Turn off for Gen 4 (NOA) runs. + U64 mmio_noa_registers : 1; // NOA + U64 bnb_chipset : 1; // BNB Chipset + U64 gmch_chipset : 1; // GMCH Chipset + U64 rsvd : 56; + } config_flags; + CHIPSET_SEGMENT_NODE mch; + CHIPSET_SEGMENT_NODE ich; + CHIPSET_SEGMENT_NODE mmio; + CHIPSET_SEGMENT_NODE bnb; + CHIPSET_SEGMENT_NODE gmch; +}; + +#define CHIPSET_CONFIG_length(chipset) ((chipset)->length) +#define CHIPSET_CONFIG_major_version(chipset) ((chipset)->major_version) +#define CHIPSET_CONFIG_minor_version(chipset) ((chipset)->minor_version) +#define CHIPSET_CONFIG_cpu_counter_mask(chipset) ((chipset)->cpu_counter_mask) +#define CHIPSET_CONFIG_processor(chipset) ((chipset)->config_flags.processor) +#define CHIPSET_CONFIG_mch_chipset(chipset) \ + ((chipset)->config_flags.mch_chipset) +#define CHIPSET_CONFIG_ich_chipset(chipset) \ + ((chipset)->config_flags.ich_chipset) +#define CHIPSET_CONFIG_motherboard_time(chipset) \ + ((chipset)->config_flags.motherboard_time_flag) +#define CHIPSET_CONFIG_host_proc_run(chipset) \ + ((chipset)->config_flags.host_processor_run) +#define CHIPSET_CONFIG_noa_chipset(chipset) \ + ((chipset)->config_flags.mmio_noa_registers) +#define CHIPSET_CONFIG_bnb_chipset(chipset) \ + ((chipset)->config_flags.bnb_chipset) +#define CHIPSET_CONFIG_gmch_chipset(chipset) \ + ((chipset)->config_flags.gmch_chipset) +#define CHIPSET_CONFIG_mch(chipset) ((chipset)->mch) +#define CHIPSET_CONFIG_ich(chipset) ((chipset)->ich) +#define CHIPSET_CONFIG_noa(chipset) ((chipset)->mmio) +#define CHIPSET_CONFIG_bnb(chipset) ((chipset)->bnb) +#define CHIPSET_CONFIG_gmch(chipset) ((chipset)->gmch) + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_defines.h b/drivers/platform/x86/sepdk/include/lwpmudrv_defines.h new file mode 100644 index 0000000000000..8346ea72d587e --- /dev/null +++ b/drivers/platform/x86/sepdk/include/lwpmudrv_defines.h @@ -0,0 +1,521 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _LWPMUDRV_DEFINES_H_ +#define _LWPMUDRV_DEFINES_H_ + +#if defined(__cplusplus) +extern "C" { +#endif +// +// Start off with none of the OS'es are defined +// +#undef DRV_OS_WINDOWS +#undef DRV_OS_LINUX +#undef DRV_OS_SOLARIS +#undef DRV_OS_MAC +#undef DRV_OS_ANDROID +#undef DRV_OS_UNIX + +// +// Make sure none of the architectures is defined here +// +#undef DRV_IA32 +#undef DRV_EM64T + +// +// Make sure one (and only one) of the OS'es gets defined here +// +// Unfortunately entirex defines _WIN32 so we need to check for linux +// first. The definition of these flags is one and only one +// _OS_xxx is allowed to be defined. +// +#if defined(__ANDROID__) +#define DRV_OS_ANDROID +#define DRV_OS_UNIX +#elif defined(__linux__) +#define DRV_OS_LINUX +#define DRV_OS_UNIX +#elif defined(sun) +#define DRV_OS_SOLARIS +#define DRV_OS_UNIX +#elif defined(_WIN32) +#define DRV_OS_WINDOWS +#elif defined(__APPLE__) +#define DRV_OS_MAC +#define DRV_OS_UNIX +#elif defined(__FreeBSD__) +#define DRV_OS_FREEBSD +#define DRV_OS_UNIX +#else +#error "Compiling for an unknown OS" +#endif + +// +// Make sure one (and only one) architecture is defined here +// as well as one (and only one) pointer__ size +// +#if defined(_M_IX86) || defined(__i386__) +#define DRV_IA32 +#elif defined(_M_AMD64) || defined(__x86_64__) +#define DRV_EM64T +#else +#error "Unknown architecture for compilation" +#endif + +// +// Add a well defined definition of compiling for release (free) vs. +// debug (checked). Once again, don't assume these are the only two values, +// always have an else clause in case we want to expand this. +// +#if defined(DRV_OS_UNIX) +#define WINAPI +#endif + +/* + * Add OS neutral defines for file processing. This is needed in both + * the user code and the kernel code for cleanliness + */ +#undef DRV_FILE_DESC +#undef DRV_INVALID_FILE_DESC_VALUE +#define DRV_ASSERT assert + +#if defined(DRV_OS_WINDOWS) + +#define DRV_FILE_DESC HANDLE +#define DRV_INVALID_FILE_DESC_VALUE INVALID_HANDLE_VALUE + +#elif defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || \ + defined(DRV_OS_ANDROID) + +#define DRV_IOCTL_FILE_DESC SIOP +#define DRV_FILE_DESC SIOP +#define DRV_INVALID_FILE_DESC_VALUE -1 + +#elif defined(DRV_OS_FREEBSD) + +#define DRV_IOCTL_FILE_DESC S64 +#define DRV_FILE_DESC S64 +#define DRV_INVALID_FILE_DESC_VALUE -1 + +#elif defined(DRV_OS_MAC) +#if defined __LP64__ +#define DRV_IOCTL_FILE_DESC S64 +#define DRV_FILE_DESC S64 +#define DRV_INVALID_FILE_DESC_VALUE (S64)(-1) +#else +#define DRV_IOCTL_FILE_DESC S32 +#define DRV_FILE_DESC S32 +#define DRV_INVALID_FILE_DESC_VALUE (S32)(-1) +#endif + +#else + +#error "Compiling for an unknown OS" + +#endif + +#define OUT +#define IN +#define INOUT + +// +// VERIFY_SIZEOF let's you insert a compile-time check that the size of a data +// type (e.g. a struct) is what you think it should be. Usually it is +// important to know what the actual size of your struct is, and to make sure +// it is the same across all platforms. So this will prevent the code from +// compiling if something happens that you didn't expect, whether it's because +// you counted wring, or more often because the compiler inserted padding that +// you don't want. +// +// NOTE: 'elem' and 'size' must both be identifier safe, e.g. matching the +// regular expression /^[0-9a-zA-Z_]$/. +// +// Example: +// typedef struct { void *ptr; int data; } mytype; +// VERIFY_SIZEOF(mytype, 8); +// ^-- this is correct on 32-bit platforms, but fails +// on 64-bit platforms, indicating a possible +// portability issue. +// +#define VERIFY_SIZEOF(type, size) \ + { \ + enum { \ + sizeof_##type##_eq_##size = 1 / (int)(sizeof(type) == size) \ + } \ + } + +#if defined(DRV_OS_WINDOWS) +#define DRV_DLLIMPORT __declspec(dllimport) +#define DRV_DLLEXPORT __declspec(dllexport) +#endif +#if defined(DRV_OS_UNIX) +#define DRV_DLLIMPORT +#define DRV_DLLEXPORT +#endif + +#if defined(DRV_OS_WINDOWS) +#define FSI64RAW "I64" +#define DRV_PATH_SEPARATOR "\\" +#define L_DRV_PATH_SEPARATOR L"\\" +#endif + +#if defined(DRV_OS_UNIX) +#define FSI64RAW "ll" +#define DRV_PATH_SEPARATOR "/" +#define L_DRV_PATH_SEPARATOR L"/" +#endif + +#define FSS64 "%" FSI64RAW "d" +#define FSU64 "%" FSI64RAW "u" +#define FSX64 "%" FSI64RAW "x" + +#if defined(DRV_OS_WINDOWS) +#define DRV_RTLD_NOW 0 +#endif +#if defined(DRV_OS_UNIX) +#if defined(DRV_OS_FREEBSD) +#define DRV_RTLD_NOW 0 +#else +#define DRV_RTLD_NOW RTLD_NOW +#endif +#endif + +#define DRV_STRLEN (U32)(strlen) +#define DRV_WCSLEN (U32)(wcslen) +#define DRV_STRCSPN strcspn +#define DRV_STRCHR strchr +#define DRV_STRRCHR strrchr +#define DRV_WCSRCHR wcsrchr + +#if defined(DRV_OS_WINDOWS) +#define DRV_STCHARLEN DRV_WCSLEN +#else +#define DRV_STCHARLEN DRV_STRLEN +#endif + +#if defined(DRV_OS_WINDOWS) +#define DRV_STRCPY strcpy_s +#define DRV_STRNCPY strncpy_s +#define DRV_STRICMP _stricmp +#define DRV_STRNCMP strncmp +#define DRV_STRNICMP _strnicmp +#define DRV_STRDUP _strdup +#define DRV_WCSDUP _wcsdup +#define DRV_STRCMP strcmp +#define DRV_WCSCMP wcscmp +#define DRV_SNPRINTF _snprintf_s +#define DRV_SNWPRINTF _snwprintf_s +#define DRV_VSNPRINTF _vsnprintf_s +#define DRV_SSCANF sscanf_s +#define DRV_STRCAT strcat_s +#define DRV_STRNCAT strncat_s +#define DRV_MEMCPY memcpy_s +#define DRV_WMEMCPY wmemcpy_s +#define DRV_STRTOK strtok_s +#define DRV_STRTOUL strtoul +#define DRV_STRTOULL _strtoui64 +#define DRV_STRTOQ _strtoui64 +#define DRV_FOPEN(fp, name, mode) fopen_s(&(fp), (name), (mode)) +#define DRV_WFOPEN(fp, name, mode) _wfopen_s(&(fp), (name), (mode)) +#define DRV_FCLOSE(fp) \ + { \ + if ((fp) != NULL) { \ + fclose((fp)); \ + } \ + } +#define DRV_WCSCPY wcscpy_s +#define DRV_WCSNCPY wcsncpy_s +#define DRV_WCSCAT wcscat_s +#define DRV_WCSNCAT wcsncat_s +#define DRV_WCSTOK wcstok_s +#define DRV_WCSSTR wcsstr +#define DRV_STRERROR strerror_s +#define DRV_SPRINTF sprintf_s +#define DRV_VSPRINTF vsprintf_s +#define DRV_VSWPRINTF vswprintf_s +#define DRV_GETENV_S getenv_s +#define DRV_WGETENV_S wgetenv_s +#define DRV_PUTENV(name) _putenv(name) +#define DRV_USTRCMP(X, Y) DRV_WCSCMP(X, Y) +#define DRV_USTRDUP(X) DRV_WCSDUP(X) +#define DRV_ACCESS(X) _access_s(X, 4) +#define DRV_STRSTR strstr + +#define DRV_STCHAR_COPY DRV_WCSNCPY + +#define DRV_GETENV(buf, buf_size, name) _dupenv_s(&(buf), &(buf_size), (name)) +#define DRV_WGETENV(buf, buf_size, name) _wdupenv_s(&(buf), &(buf_size), (name)) +#define DRV_SCLOSE(fp) _close(fp) +#define DRV_WRITE(fp, buf, buf_size) _write(fp, buf, buf_size); +#define DRV_SOPEN_S(fp, name, oflag, shflag, pmode) \ + _sopen_s((fp), (name), (oflag), (shflag), (pmode)) +#endif + +#if defined(DRV_OS_UNIX) +/* + Note: Many of the following macros have a "size" as the second argument. + Generally speaking, this is for compatibility with the _s versions + available on Windows. On Linux/Solaris/Mac, it is ignored. + On Windows, it is the size of the destination buffer and is used wrt + memory checking features available in the C runtime in debug mode. + Do not confuse it with the number of bytes to be copied, or such. + + On Windows, this size should correspond to the number of allocated characters + (char or wchar_t) pointed to by the first argument. See MSDN or more details. +*/ +#define DRV_STRICMP strcasecmp +#define DRV_STRDUP strdup +#define DRV_STRNDUP strndup +#define DRV_STRCMP strcmp +#define DRV_STRNCMP strncmp +#define DRV_STRSTR strstr +#define DRV_SNPRINTF(buf, buf_size, length, args...) \ + snprintf((buf), (length), ##args) +#define DRV_SNWPRINTF(buf, buf_size, length, args...) \ + snwprintf((buf), (length), ##args) +#define DRV_VSNPRINTF(buf, buf_size, length, args...) \ + vsnprintf((buf), (length), ##args) +#define DRV_SSCANF sscanf +#define DRV_STRCPY(dst, dst_size, src) strcpy((dst), (src)) +#define DRV_STRNCPY(dst, dst_size, src, n) strncpy((dst), (src), (n)) +#define DRV_STRCAT(dst, dst_size, src) strcat((dst), (src)) +#define DRV_STRNCAT(dst, dst_size, src, n) strncat((dst), (src), (n)) +#define DRV_MEMCPY(dst, dst_size, src, n) memcpy((dst), (src), (n)) +#define DRV_STRTOK(tok, delim, context) strtok((tok), (delim)) +#define DRV_STRTOUL strtoul +#define DRV_STRTOULL strtoull +#define DRV_STRTOL strtol +#define DRV_FOPEN(fp, name, mode) { (fp) = fopen((name), (mode)); } +#define DRV_FCLOSE(fp) \ + { \ + if ((fp) != NULL) { \ + fclose((fp)); \ + } \ + } + +#define DRV_WCSCPY(dst, dst_size, src) wcscpy((dst), (const wchar_t *)(src)) +#define DRV_WCSNCPY(dst, dst_size, src, count) \ + wcsncpy((dst), (const wchar_t *)(src), (count)) +#define DRV_WCSCAT(dst, dst_size, src) wcscat((dst), (const wchar_t *)(src)) +#define DRV_WCSTOK(tok, delim, context) \ + wcstok((tok), (const wchar_t *)(delim), (context)) +#define DRV_STRERROR strerror +#define DRV_SPRINTF(dst, dst_size, args...) sprintf((dst), ##args) +#define DRV_VSPRINTF(dst, dst_size, length, args...) \ + vsprintf((dst), (length), ##args) +#define DRV_VSWPRINTF(dst, dst_size, length, args...) \ + vswprintf((dst), (length), ##args) +#define DRV_GETENV_S(dst, dst_size) getenv(dst) +#define DRV_WGETENV_S(dst, dst_size) wgetenv(dst) +#define DRV_PUTENV(name) putenv(name) +#define DRV_GETENV(buf, buf_size, name) ((buf) = getenv((name))) +#define DRV_USTRCMP(X, Y) DRV_STRCMP(X, Y) +#define DRV_USTRDUP(X) DRV_STRDUP(X) +#define DRV_ACCESS(X) access(X, X_OK) + +#define DRV_STCHAR_COPY DRV_STRNCPY +#endif + +#if defined(DRV_OS_WINDOWS) +#define DRV_STRTOK_R(tok, delim, context) strtok_s((tok), (delim), (context)) +#else +#define DRV_STRTOK_R(tok, delim, context) strtok_r((tok), (delim), (context)) +#endif + +#if defined(DRV_OS_LINUX) || defined(DRV_OS_MAC) || defined(DRV_OS_FREEBSD) +#define DRV_STRTOQ strtoq +#endif + +#if defined(DRV_OS_ANDROID) +#define DRV_STRTOQ strtol +#endif + +#if defined(DRV_OS_SOLARIS) +#define DRV_STRTOQ strtoll +#endif + +#if defined(DRV_OS_LINUX) || defined(DRV_OS_FREEBSD) || defined(DRV_OS_MAC) +#define DRV_WCSDUP wcsdup +#endif + +#if defined(DRV_OS_SOLARIS) +#define DRV_WCSDUP solaris_wcsdup +#endif + +#if defined(DRV_OS_ANDROID) +#define DRV_WCSDUP android_wcsdup +#endif + +/* + * Windows uses wchar_t and linux uses char for strings. + * Need an extra level of abstraction to standardize it. + */ +#if defined(DRV_OS_WINDOWS) +#define DRV_STDUP DRV_WCSDUP +#define DRV_FORMAT_STRING(x) L##x +#define DRV_PRINT_STRING(stream, format, ...) \ + fwprintf((stream), (format), __VA_ARGS__) +#else +#define DRV_STDUP DRV_STRDUP +#define DRV_FORMAT_STRING(x) x +#define DRV_PRINT_STRING(stream, format, ...) \ + fprintf((stream), (format), __VA_ARGS__) +#endif + +/* + * OS return types + */ +#if defined(DRV_OS_UNIX) +#define OS_STATUS int +#define OS_SUCCESS 0 +#if defined(BUILD_DRV_ESX) +#define OS_ILLEGAL_IOCTL -1 +#define OS_NO_MEM -2 +#define OS_FAULT -3 +#define OS_INVALID -4 +#define OS_NO_SYSCALL -5 +#define OS_RESTART_SYSCALL -6 +#define OS_IN_PROGRESS -7 +#else +#define OS_ILLEGAL_IOCTL -ENOTTY +#define OS_NO_MEM -ENOMEM +#define OS_FAULT -EFAULT +#define OS_INVALID -EINVAL +#define OS_NO_SYSCALL -ENOSYS +#define OS_RESTART_SYSCALL -ERESTARTSYS +#define OS_IN_PROGRESS -EALREADY +#endif +#endif +#if defined(DRV_OS_WINDOWS) +#define OS_STATUS NTSTATUS +#define OS_SUCCESS STATUS_SUCCESS +#define OS_ILLEGAL_IOCTL STATUS_UNSUCCESSFUL +#define OS_NO_MEM STATUS_UNSUCCESSFUL +#define OS_FAULT STATUS_UNSUCCESSFUL +#define OS_INVALID STATUS_UNSUCCESSFUL +#define OS_NO_SYSCALL STATUS_UNSUCCESSFUL +#define OS_RESTART_SYSCALL STATUS_UNSUCCESSFUL +#define OS_IN_PROGRESS STATUS_UNSUCCESSFUL +#endif + +/**************************************************************************** + ** Driver State defintions + ***************************************************************************/ +#define DRV_STATE_UNINITIALIZED 0 +#define DRV_STATE_RESERVED 1 +#define DRV_STATE_IDLE 2 +#define DRV_STATE_PAUSED 3 +#define DRV_STATE_STOPPED 4 +#define DRV_STATE_RUNNING 5 +#define DRV_STATE_PAUSING 6 +#define DRV_STATE_PREPARE_STOP 7 +#define DRV_STATE_TERMINATING 8 + +#define MATCHING_STATE_BIT(state) ((U32)1 << state) +#define STATE_BIT_UNINITIALIZED MATCHING_STATE_BIT(DRV_STATE_UNINITIALIZED) +#define STATE_BIT_RESERVED MATCHING_STATE_BIT(DRV_STATE_RESERVED) +#define STATE_BIT_IDLE MATCHING_STATE_BIT(DRV_STATE_IDLE) +#define STATE_BIT_PAUSED MATCHING_STATE_BIT(DRV_STATE_PAUSED) +#define STATE_BIT_STOPPED MATCHING_STATE_BIT(DRV_STATE_STOPPED) +#define STATE_BIT_RUNNING MATCHING_STATE_BIT(DRV_STATE_RUNNING) +#define STATE_BIT_PAUSING MATCHING_STATE_BIT(DRV_STATE_PAUSING) +#define STATE_BIT_PREPARE_STOP MATCHING_STATE_BIT(DRV_STATE_PREPARE_STOP) +#define STATE_BIT_TERMINATING MATCHING_STATE_BIT(DRV_STATE_TERMINATING) +#define STATE_BIT_ANY ((U32)-1) + +#define IS_COLLECTING_STATE(state) \ + (!!(MATCHING_STATE_BIT(state) & \ + (STATE_BIT_RUNNING | STATE_BIT_PAUSING | STATE_BIT_PAUSED))) + +/* + * Stop codes + */ +#define DRV_STOP_BASE 0 +#define DRV_STOP_NORMAL 1 +#define DRV_STOP_ASYNC 2 +#define DRV_STOP_CANCEL 3 +#define SEP_FREE(loc) \ + { \ + if ((loc)) { \ + free(loc); \ + loc = NULL; \ + } \ + } + +#define MAX_EVENTS 256 // Limiting maximum multiplexing events to 256. +#if defined(DRV_OS_UNIX) +#define UNREFERENCED_PARAMETER(p) ((p) = (p)) +#endif + +/* + * Global marker names + */ +#define START_MARKER_NAME "SEP_START_MARKER" +#define PAUSE_MARKER_NAME "SEP_PAUSE_MARKER" +#define RESUME_MARKER_NAME "SEP_RESUME_MARKER" + +#define DRV_SOC_STRING_LEN (100 + MAX_MARKER_LENGTH) + +/* + * Temp path + */ +#define SEP_TMPDIR "SEP_TMP_DIR" +#if defined(DRV_OS_WINDOWS) +#define OS_TMPDIR "TEMP" +#define GET_DEFAULT_TMPDIR(dir, size) \ + { \ + GetTempPath((U32)size, dir); \ + } +#else +#define OS_TMPDIR "TMPDIR" +/* + * Unix has default tmp dir + */ +#if defined(DRV_OS_ANDROID) +#define TEMP_PATH "/data" +#else +#define TEMP_PATH "/tmp" +#endif +#define GET_DEFAULT_TMPDIR(dir, size) \ + { \ + DRV_STRCPY((STCHAR *)dir, (U32)size, (STCHAR *)TEMP_PATH); \ + } +#endif + +#define OS_ID_UNKNOWN -1 +#define OS_ID_NATIVE 0 +#define OS_ID_VMM 0 +#define OS_ID_MODEM 1 +#define OS_ID_ANDROID 2 +#define OS_ID_SECVM 3 +#define OS_ID_ACORN 0xFFFF + +#define PERF_HW_VER4 (5) +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_ecb.h b/drivers/platform/x86/sepdk/include/lwpmudrv_ecb.h new file mode 100644 index 0000000000000..a8b5fced897d7 --- /dev/null +++ b/drivers/platform/x86/sepdk/include/lwpmudrv_ecb.h @@ -0,0 +1,1129 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _LWPMUDRV_ECB_UTILS_H_ +#define _LWPMUDRV_ECB_UTILS_H_ + +#if defined(DRV_OS_WINDOWS) +#pragma warning(disable : 4200) +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +// control register types +#define CCCR 1 // counter configuration control register +#define ESCR 2 // event selection control register +#define DATA 4 // collected as snapshot of current value +#define DATA_RO_DELTA 8 // read-only counter collected as current-previous +#define DATA_RO_SS 16 +// read-only counter collected as snapshot of current value +#define METRICS 32 // hardware metrics + +// event multiplexing modes +#define EM_DISABLED -1 +#define EM_TIMER_BASED 0 +#define EM_EVENT_BASED_PROFILING 1 +#define EM_TRIGGER_BASED 2 + +// *************************************************************************** + +/*!\struct EVENT_DESC_NODE + * \var sample_size - size of buffer in bytes to hold the sample + extras + * \var max_gp_events - max number of General Purpose events per EM group + * \var pebs_offset - offset in the sample to locate the pebs capture info + * \var lbr_offset - offset in the sample to locate the lbr information + * \var lbr_num_regs - offset in the sample to locate the number of + * lbr register information + * \var latency_offset_in_sample - offset in the sample to locate the + * latency information + * \var latency_size_in_sample - size of latency records in the sample + * \var latency_size_from_pebs_record - size of the latency data from + * pebs record in the sample + * \var latency_offset_in_pebs_record - offset in the sample to locate the + * latency information in pebs record + * \var power_offset_in_sample - offset in the sample to locate the + * power information + * \var ebc_offset - offset in the sample to locate the ebc count informatio + * \var uncore_ebc_offset - offset in the sample to locate the uncore + * ebc count information + * + * \var ro_offset - offset of RO data in the sample + * \var ro_count - total number of RO entries (including all of + * IEAR/DEAR/BTB/IPEAR) + * \var iear_offset - offset into RO data at which IEAR entries begin + * \var dear_offset - offset into RO data at which DEAR entries begin + * \var btb_offset - offset into RO data at which BTB entries begin + * (these use the same PMDs) + * \var ipear_offset - offset into RO data at which IPEAR entries begin + * (these use the same PMDs) + * \var iear_count - number of IEAR entries + * \var dear_count - number of DEAR entries + * \var btb_count - number of BTB entries + * \var ipear_count - number of IPEAR entries + * + * \var pwr_offset - offset in the sample to locate the pwr count info + * \var p_state_offset - offset in the sample to locate the p_state + * information (APERF/MPERF) + * + * \brief Data structure to describe the events and the mode + * + */ + +typedef struct EVENT_DESC_NODE_S EVENT_DESC_NODE; +typedef EVENT_DESC_NODE * EVENT_DESC; + +struct EVENT_DESC_NODE_S { + U32 sample_size; + U32 pebs_offset; + U32 pebs_size; + U32 lbr_offset; + U32 lbr_num_regs; + U32 latency_offset_in_sample; + U32 latency_size_in_sample; + U32 latency_size_from_pebs_record; + U32 latency_offset_in_pebs_record; + U32 power_offset_in_sample; + U32 ebc_offset; + U32 uncore_ebc_offset; + U32 eventing_ip_offset; + U32 hle_offset; + U32 pwr_offset; + U32 callstack_offset; + U32 callstack_size; + U32 p_state_offset; + U32 pebs_tsc_offset; + U32 perfmetrics_offset; + U32 perfmetrics_size; + /* ----------ADAPTIVE PEBS FIELDS --------- */ + U16 applicable_counters_offset; + U16 gpr_info_offset; + U16 gpr_info_size; + U16 xmm_info_offset; + U16 xmm_info_size; + U16 lbr_info_size; + /*------------------------------------------*/ + U32 reserved2; + U64 reserved3; +}; + +// +// Accessor macros for EVENT_DESC node +// +#define EVENT_DESC_sample_size(ec) ((ec)->sample_size) +#define EVENT_DESC_pebs_offset(ec) ((ec)->pebs_offset) +#define EVENT_DESC_pebs_size(ec) ((ec)->pebs_size) +#define EVENT_DESC_lbr_offset(ec) ((ec)->lbr_offset) +#define EVENT_DESC_lbr_num_regs(ec) ((ec)->lbr_num_regs) +#define EVENT_DESC_latency_offset_in_sample(ec) ((ec)->latency_offset_in_sample) +#define EVENT_DESC_latency_size_from_pebs_record(ec) \ + ((ec)->latency_size_from_pebs_record) +#define EVENT_DESC_latency_offset_in_pebs_record(ec) \ + ((ec)->latency_offset_in_pebs_record) +#define EVENT_DESC_latency_size_in_sample(ec) ((ec)->latency_size_in_sample) +#define EVENT_DESC_power_offset_in_sample(ec) ((ec)->power_offset_in_sample) +#define EVENT_DESC_ebc_offset(ec) ((ec)->ebc_offset) +#define EVENT_DESC_uncore_ebc_offset(ec) ((ec)->uncore_ebc_offset) +#define EVENT_DESC_eventing_ip_offset(ec) ((ec)->eventing_ip_offset) +#define EVENT_DESC_hle_offset(ec) ((ec)->hle_offset) +#define EVENT_DESC_pwr_offset(ec) ((ec)->pwr_offset) +#define EVENT_DESC_callstack_offset(ec) ((ec)->callstack_offset) +#define EVENT_DESC_callstack_size(ec) ((ec)->callstack_size) +#define EVENT_DESC_perfmetrics_offset(ec) ((ec)->perfmetrics_offset) +#define EVENT_DESC_perfmetrics_size(ec) ((ec)->perfmetrics_size) +#define EVENT_DESC_p_state_offset(ec) ((ec)->p_state_offset) +#define EVENT_DESC_pebs_tsc_offset(ec) ((ec)->pebs_tsc_offset) +#define EVENT_DESC_applicable_counters_offset(ec) \ + ((ec)->applicable_counters_offset) +#define EVENT_DESC_gpr_info_offset(ec) ((ec)->gpr_info_offset) +#define EVENT_DESC_gpr_info_size(ec) ((ec)->gpr_info_size) +#define EVENT_DESC_xmm_info_offset(ec) ((ec)->xmm_info_offset) +#define EVENT_DESC_xmm_info_size(ec) ((ec)->xmm_info_size) +#define EVENT_DESC_lbr_info_size(ec) ((ec)->lbr_info_size) + +// *************************************************************************** + +/*!\struct EVENT_CONFIG_NODE + * \var num_groups - The number of groups being programmed + * \var em_mode - Is EM valid? If so how? + * \var em_time_slice - EM valid? time slice in milliseconds + * \var sample_size - size of buffer in bytes to hold the sample + extra + * \var max_gp_events - Max number of General Purpose events per EM group + * \var pebs_offset - offset in the sample to locate the pebs capture + * information + * \var lbr_offset - offset in the sample to locate the lbr information + * \var lbr_num_regs - offset in the sample to locate the lbr information + * \var latency_offset_in_sample - offset in the sample to locate the + * latency information + * \var latency_size_in_sample - size of latency records in sample + * \var latency_size_from_pebs_record - offset in sample to locate latency + * size from pebs record + * \var latency_offset_in_pebs_record - offset in the sample to locate the + * latency information in pebs record + * \var power_offset_in_sample - offset in the sample to locate the + * power information + * \var ebc_offset - offset in the sample to locate the + * ebc count information + * + * \var pwr_offset - offset in the sample to locate the pwr count information + * \var p_state_offset - offset in the sample to locate the p_state + * information (APERF/MPERF) + * + * \brief Data structure to describe the events and the mode + * + */ + +typedef struct EVENT_CONFIG_NODE_S EVENT_CONFIG_NODE; +typedef EVENT_CONFIG_NODE * EVENT_CONFIG; + +struct EVENT_CONFIG_NODE_S { + U32 num_groups; + S32 em_mode; + S32 em_factor; + S32 em_event_num; + U32 sample_size; + U32 max_gp_events; + U32 max_fixed_counters; + U32 max_ro_counters; // maximum read-only counters + U32 pebs_offset; + U32 pebs_size; + U32 lbr_offset; + U32 lbr_num_regs; + U32 latency_offset_in_sample; + U32 latency_size_in_sample; + U32 latency_size_from_pebs_record; + U32 latency_offset_in_pebs_record; + U32 power_offset_in_sample; + U32 ebc_offset; + U32 num_groups_unc; + U32 ebc_offset_unc; + U32 sample_size_unc; + U32 eventing_ip_offset; + U32 hle_offset; + U32 pwr_offset; + U32 callstack_offset; + U32 callstack_size; + U32 p_state_offset; + U32 pebs_tsc_offset; + U64 reserved1; + U64 reserved2; + U64 reserved3; + U64 reserved4; +}; + +// +// Accessor macros for EVENT_CONFIG node +// +#define EVENT_CONFIG_num_groups(ec) ((ec)->num_groups) +#define EVENT_CONFIG_mode(ec) ((ec)->em_mode) +#define EVENT_CONFIG_em_factor(ec) ((ec)->em_factor) +#define EVENT_CONFIG_em_event_num(ec) ((ec)->em_event_num) +#define EVENT_CONFIG_sample_size(ec) ((ec)->sample_size) +#define EVENT_CONFIG_max_gp_events(ec) ((ec)->max_gp_events) +#define EVENT_CONFIG_max_fixed_counters(ec) ((ec)->max_fixed_counters) +#define EVENT_CONFIG_max_ro_counters(ec) ((ec)->max_ro_counters) +#define EVENT_CONFIG_pebs_offset(ec) ((ec)->pebs_offset) +#define EVENT_CONFIG_pebs_size(ec) ((ec)->pebs_size) +#define EVENT_CONFIG_lbr_offset(ec) ((ec)->lbr_offset) +#define EVENT_CONFIG_lbr_num_regs(ec) ((ec)->lbr_num_regs) +#define EVENT_CONFIG_latency_offset_in_sample(ec) \ + ((ec)->latency_offset_in_sample) +#define EVENT_CONFIG_latency_size_from_pebs_record(ec) \ + ((ec)->latency_size_from_pebs_record) +#define EVENT_CONFIG_latency_offset_in_pebs_record(ec) \ + ((ec)->latency_offset_in_pebs_record) +#define EVENT_CONFIG_latency_size_in_sample(ec) ((ec)->latency_size_in_sample) +#define EVENT_CONFIG_power_offset_in_sample(ec) ((ec)->power_offset_in_sample) +#define EVENT_CONFIG_ebc_offset(ec) ((ec)->ebc_offset) +#define EVENT_CONFIG_num_groups_unc(ec) ((ec)->num_groups_unc) +#define EVENT_CONFIG_ebc_offset_unc(ec) ((ec)->ebc_offset_unc) +#define EVENT_CONFIG_sample_size_unc(ec) ((ec)->sample_size_unc) +#define EVENT_CONFIG_eventing_ip_offset(ec) ((ec)->eventing_ip_offset) +#define EVENT_CONFIG_hle_offset(ec) ((ec)->hle_offset) +#define EVENT_CONFIG_pwr_offset(ec) ((ec)->pwr_offset) +#define EVENT_CONFIG_callstack_offset(ec) ((ec)->callstack_offset) +#define EVENT_CONFIG_callstack_size(ec) ((ec)->callstack_size) +#define EVENT_CONFIG_p_state_offset(ec) ((ec)->p_state_offset) +#define EVENT_CONFIG_pebs_tsc_offset(ec) ((ec)->pebs_tsc_offset) + +typedef enum { UNC_MUX = 1, UNC_COUNTER } UNC_SA_PROG_TYPE; + +typedef enum { + UNC_PCICFG = 1, + UNC_MMIO, + UNC_STOP, + UNC_MEMORY, + UNC_STATUS +} UNC_SA_CONFIG_TYPE; + +typedef enum { + UNC_MCHBAR = 1, + UNC_DMIBAR, + UNC_PCIEXBAR, + UNC_GTTMMADR, + UNC_GDXCBAR, + UNC_CHAPADR, + UNC_SOCPCI, + UNC_NPKBAR +} UNC_SA_BAR_TYPE; + +typedef enum { UNC_OP_READ = 1, UNC_OP_WRITE, UNC_OP_RMW } UNC_SA_OPERATION; + +typedef enum { + STATIC_COUNTER = 1, + FREERUN_COUNTER, + PROG_FREERUN_COUNTER +} COUNTER_TYPES; + +typedef enum { + PACKAGE_EVENT = 1, + MODULE_EVENT, + THREAD_EVENT, + SYSTEM_EVENT +} EVENT_SCOPE_TYPES; + +typedef enum { + DEVICE_CORE = 1, // CORE DEVICE + DEVICE_HETERO, + DEVICE_UNC_CBO = 10, // UNCORE DEVICES START + DEVICE_UNC_HA, + DEVICE_UNC_IMC, + DEVICE_UNC_IRP, + DEVICE_UNC_NCU, + DEVICE_UNC_PCU, + DEVICE_UNC_POWER, + DEVICE_UNC_QPI, + DEVICE_UNC_R2PCIE, + DEVICE_UNC_R3QPI, + DEVICE_UNC_SBOX, + DEVICE_UNC_GT, + DEVICE_UNC_UBOX, + DEVICE_UNC_WBOX, + DEVICE_UNC_COREI7, + DEVICE_UNC_CHA, + DEVICE_UNC_EDC, + DEVICE_UNC_IIO, + DEVICE_UNC_M2M, + DEVICE_UNC_EDRAM, + DEVICE_UNC_FPGA_CACHE, + DEVICE_UNC_FPGA_FAB, + DEVICE_UNC_FPGA_THERMAL, + DEVICE_UNC_FPGA_POWER, + DEVICE_UNC_FPGA_GB, + DEVICE_UNC_TELEMETRY = 150, // TELEMETRY DEVICE + DEVICE_UNC_CHAP = 200, // CHIPSET DEVICES START + DEVICE_UNC_GMCH, + DEVICE_UNC_GFX, + DEVICE_UNC_SOCPERF = 300, // UNCORE VISA DEVICES START + DEVICE_UNC_HFI_RXE = 400, // STL HFI + DEVICE_UNC_HFI_TXE, +} DEVICE_TYPES; + +typedef enum { + LBR_ENTRY_TOS = 0, + LBR_ENTRY_FROM_IP, + LBR_ENTRY_TO_IP, + LBR_ENTRY_INFO +} LBR_ENTRY_TYPE; + +// *************************************************************************** + +/*!\struct EVENT_REG_ID_NODE + * \var reg_id - MSR index to r/w + * \var pci_id PCI based register and its details to operate on + */ +typedef struct EVENT_REG_ID_NODE_S EVENT_REG_ID_NODE; +typedef EVENT_REG_ID_NODE * EVENT_REG_ID; + +struct EVENT_REG_ID_NODE_S { + U32 reg_id; + U32 pci_bus_no; + U32 pci_dev_no; + U32 pci_func_no; + U32 data_size; + U32 bar_index; // Points to the index (MMIO_INDEX_LIST) + // of bar memory map list to be used in mmio_bar_list of ECB + U32 reserved1; + U32 reserved2; + U64 reserved3; +}; + +// *************************************************************************** + +typedef enum { + PMU_REG_RW_READ = 1, + PMU_REG_RW_WRITE, + PMU_REG_RW_READ_WRITE, +} PMU_REG_RW_TYPES; + +typedef enum { + PMU_REG_PROG_MSR = 1, + PMU_REG_PROG_PCI, + PMU_REG_PROG_MMIO, +} PMU_REG_PROG_TYPES; + +typedef enum { + PMU_REG_GLOBAL_CTRL = 1, + PMU_REG_UNIT_CTRL, + PMU_REG_UNIT_STATUS, + PMU_REG_DATA, + PMU_REG_EVENT_SELECT, + PMU_REG_FILTER, + PMU_REG_FIXED_CTRL, +} PMU_REG_TYPES; + +/*!\struct EVENT_REG_NODE + * \var reg_type - register type + * \var event_id_index - event ID index + * \var event_reg_id - register ID/pci register details + * \var desc_id - desc ID + * \var flags - flags + * \var reg_value - register value + * \var max_bits - max bits + * \var scheduled - boolean to specify if this event node has + * been scheduled already + * \var bus_no - PCI bus number + * \var dev_no - PCI device number + * \var func_no - PCI function number + * \var counter_type - Event counter type - static/freerun + * \var event_scope - Event scope - package/module/thread + * \var reg_prog_type - Register Programming type + * \var reg_rw_type - Register Read/Write type + * \var reg_order - Register order in the programming sequence + * \var + * \brief Data structure to describe the event registers + * + */ + +typedef struct EVENT_REG_NODE_S EVENT_REG_NODE; +typedef EVENT_REG_NODE * EVENT_REG; + +struct EVENT_REG_NODE_S { + U8 reg_type; + U8 unit_id; + U16 event_id_index; + U16 counter_event_offset; + U16 reserved1; + EVENT_REG_ID_NODE event_reg_id; + U64 reg_value; + U16 desc_id; + U16 flags; + U32 reserved2; + U64 max_bits; + U8 scheduled; + S8 secondary_pci_offset_shift; + U16 secondary_pci_offset_offset; // offset of the offset... + U32 counter_type; + U32 event_scope; + U8 reg_prog_type; + U8 reg_rw_type; + U8 reg_order; + U8 bit_position; + U64 secondary_pci_offset_mask; + U32 core_event_id; + U32 uncore_buffer_offset_in_package; + U32 uncore_buffer_offset_in_system; + U32 reserved3; + U64 reserved4; + U64 reserved5; + U64 reserved6; +}; + +// +// Accessor macros for EVENT_REG node +// Note: the flags field is not directly addressible to prevent hackery +// +#define EVENT_REG_reg_type(x, i) ((x)[(i)].reg_type) +#define EVENT_REG_event_id_index(x, i) ((x)[(i)].event_id_index) +#define EVENT_REG_unit_id(x, i) ((x)[(i)].unit_id) +#define EVENT_REG_counter_event_offset(x, i) ((x)[(i)].counter_event_offset) +#define EVENT_REG_reg_id(x, i) ((x)[(i)].event_reg_id.reg_id) +#define EVENT_REG_bus_no(x, i) ((x)[(i)].event_reg_id.pci_bus_no) +#define EVENT_REG_dev_no(x, i) ((x)[(i)].event_reg_id.pci_dev_no) +#define EVENT_REG_func_no(x, i) ((x)[(i)].event_reg_id.pci_func_no) +#define EVENT_REG_offset(x, i) \ + ((x)[(i)].event_reg_id.reg_id) // points to the reg_id +#define EVENT_REG_data_size(x, i) ((x)[(i)].event_reg_id.data_size) +#define EVENT_REG_desc_id(x, i) ((x)[(i)].desc_id) +#define EVENT_REG_flags(x, i) ((x)[(i)].flags) +#define EVENT_REG_reg_value(x, i) ((x)[(i)].reg_value) +#define EVENT_REG_max_bits(x, i) ((x)[(i)].max_bits) +#define EVENT_REG_scheduled(x, i) ((x)[(i)].scheduled) +#define EVENT_REG_secondary_pci_offset_shift(x, i) \ + ((x)[(i)].secondary_pci_offset_shift) +#define EVENT_REG_secondary_pci_offset_offset(x, i) \ + ((x)[(i)].secondary_pci_offset_offset) +#define EVENT_REG_secondary_pci_offset_mask(x, i) \ + ((x)[(i)].secondary_pci_offset_mask) + +#define EVENT_REG_counter_type(x, i) ((x)[(i)].counter_type) +#define EVENT_REG_event_scope(x, i) ((x)[(i)].event_scope) +#define EVENT_REG_reg_prog_type(x, i) ((x)[(i)].reg_prog_type) +#define EVENT_REG_reg_rw_type(x, i) ((x)[(i)].reg_rw_type) +#define EVENT_REG_reg_order(x, i) ((x)[(i)].reg_order) +#define EVENT_REG_bit_position(x, i) ((x)[(i)].bit_position) + +#define EVENT_REG_core_event_id(x, i) ((x)[(i)].core_event_id) +#define EVENT_REG_uncore_buffer_offset_in_package(x, i) \ + ((x)[(i)].uncore_buffer_offset_in_package) +#define EVENT_REG_uncore_buffer_offset_in_system(x, i) \ + ((x)[(i)].uncore_buffer_offset_in_system) + +// +// Config bits +// +#define EVENT_REG_precise_bit 0x00000001 +#define EVENT_REG_global_bit 0x00000002 +#define EVENT_REG_uncore_bit 0x00000004 +#define EVENT_REG_uncore_q_rst_bit 0x00000008 +#define EVENT_REG_latency_bit 0x00000010 +#define EVENT_REG_is_gp_reg_bit 0x00000020 +#define EVENT_REG_clean_up_bit 0x00000040 +#define EVENT_REG_em_trigger_bit 0x00000080 +#define EVENT_REG_lbr_value_bit 0x00000100 +#define EVENT_REG_fixed_reg_bit 0x00000200 +#define EVENT_REG_multi_pkg_evt_bit 0x00001000 +#define EVENT_REG_branch_evt_bit 0x00002000 + +// +// Accessor macros for config bits +// +#define EVENT_REG_precise_get(x, i) ((x)[(i)].flags & EVENT_REG_precise_bit) +#define EVENT_REG_precise_set(x, i) ((x)[(i)].flags |= EVENT_REG_precise_bit) +#define EVENT_REG_precise_clear(x, i) ((x)[(i)].flags &= ~EVENT_REG_precise_bit) + +#define EVENT_REG_global_get(x, i) ((x)[(i)].flags & EVENT_REG_global_bit) +#define EVENT_REG_global_set(x, i) ((x)[(i)].flags |= EVENT_REG_global_bit) +#define EVENT_REG_global_clear(x, i) ((x)[(i)].flags &= ~EVENT_REG_global_bit) + +#define EVENT_REG_uncore_get(x, i) ((x)[(i)].flags & EVENT_REG_uncore_bit) +#define EVENT_REG_uncore_set(x, i) ((x)[(i)].flags |= EVENT_REG_uncore_bit) +#define EVENT_REG_uncore_clear(x, i) ((x)[(i)].flags &= ~EVENT_REG_uncore_bit) + +#define EVENT_REG_uncore_q_rst_get(x, i) \ + ((x)[(i)].flags & EVENT_REG_uncore_q_rst_bit) +#define EVENT_REG_uncore_q_rst_set(x, i) \ + ((x)[(i)].flags |= EVENT_REG_uncore_q_rst_bit) +#define EVENT_REG_uncore_q_rst_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_uncore_q_rst_bit) + +#define EVENT_REG_latency_get(x, i) ((x)[(i)].flags & EVENT_REG_latency_bit) +#define EVENT_REG_latency_set(x, i) ((x)[(i)].flags |= EVENT_REG_latency_bit) +#define EVENT_REG_latency_clear(x, i) ((x)[(i)].flags &= ~EVENT_REG_latency_bit) + +#define EVENT_REG_is_gp_reg_get(x, i) ((x)[(i)].flags & EVENT_REG_is_gp_reg_bit) +#define EVENT_REG_is_gp_reg_set(x, i) \ + ((x)[(i)].flags |= EVENT_REG_is_gp_reg_bit) +#define EVENT_REG_is_gp_reg_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_is_gp_reg_bit) + +#define EVENT_REG_lbr_value_get(x, i) ((x)[(i)].flags & EVENT_REG_lbr_value_bit) +#define EVENT_REG_lbr_value_set(x, i) \ + ((x)[(i)].flags |= EVENT_REG_lbr_value_bit) +#define EVENT_REG_lbr_value_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_lbr_value_bit) + +#define EVENT_REG_fixed_reg_get(x, i) ((x)[(i)].flags & EVENT_REG_fixed_reg_bit) +#define EVENT_REG_fixed_reg_set(x, i) \ + ((x)[(i)].flags |= EVENT_REG_fixed_reg_bit) +#define EVENT_REG_fixed_reg_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_fixed_reg_bit) + +#define EVENT_REG_multi_pkg_evt_bit_get(x, i) \ + ((x)[(i)].flags & EVENT_REG_multi_pkg_evt_bit) +#define EVENT_REG_multi_pkg_evt_bit_set(x, i) \ + ((x)[(i)].flags |= EVENT_REG_multi_pkg_evt_bit) +#define EVENT_REG_multi_pkg_evt_bit_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_multi_pkg_evt_bit) + +#define EVENT_REG_clean_up_get(x, i) ((x)[(i)].flags & EVENT_REG_clean_up_bit) +#define EVENT_REG_clean_up_set(x, i) ((x)[(i)].flags |= EVENT_REG_clean_up_bit) +#define EVENT_REG_clean_up_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_clean_up_bit) + +#define EVENT_REG_em_trigger_get(x, i) \ + ((x)[(i)].flags & EVENT_REG_em_trigger_bit) +#define EVENT_REG_em_trigger_set(x, i) \ + ((x)[(i)].flags |= EVENT_REG_em_trigger_bit) +#define EVENT_REG_em_trigger_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_em_trigger_bit) + +#define EVENT_REG_branch_evt_get(x, i) \ + ((x)[(i)].flags & EVENT_REG_branch_evt_bit) +#define EVENT_REG_branch_evt_set(x, i) \ + ((x)[(i)].flags |= EVENT_REG_branch_evt_bit) +#define EVENT_REG_branch_evt_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_branch_evt_bit) + +// *************************************************************************** + +/*!\struct DRV_PCI_DEVICE_ENTRY_NODE_S + * \var bus_no - PCI bus no to read + * \var dev_no - PCI device no to read + * \var func_no PCI device no to read + * \var bar_offset BASE Address Register offset of the PCI based PMU + * \var bit_offset Bit offset of the same + * \var size size of read/write + * \var bar_address the actual BAR present + * \var enable_offset Offset info to enable/disable + * \var enabled Status of enable/disable + * \brief Data structure to describe the PCI Device + * + */ + +typedef struct DRV_PCI_DEVICE_ENTRY_NODE_S DRV_PCI_DEVICE_ENTRY_NODE; +typedef DRV_PCI_DEVICE_ENTRY_NODE * DRV_PCI_DEVICE_ENTRY; + +struct DRV_PCI_DEVICE_ENTRY_NODE_S { + U32 bus_no; + U32 dev_no; + U32 func_no; + U32 bar_offset; + U64 bar_mask; + U32 bit_offset; + U32 size; + U64 bar_address; + U32 enable_offset; + U32 enabled; + U32 base_offset_for_mmio; + U32 operation; + U32 bar_name; + U32 prog_type; + U32 config_type; + S8 bar_shift; // positive shifts right, negative shifts left + U8 reserved0; + U16 reserved1; + U64 value; + U64 mask; + U64 virtual_address; + U32 port_id; + U32 op_code; + U32 device_id; + U16 bar_num; + U16 feature_id; + U64 reserved2; + U64 reserved3; + U64 reserved4; +}; + +// +// Accessor macros for DRV_PCI_DEVICE_NODE node +// +#define DRV_PCI_DEVICE_ENTRY_bus_no(x) ((x)->bus_no) +#define DRV_PCI_DEVICE_ENTRY_dev_no(x) ((x)->dev_no) +#define DRV_PCI_DEVICE_ENTRY_func_no(x) ((x)->func_no) +#define DRV_PCI_DEVICE_ENTRY_bar_offset(x) ((x)->bar_offset) +#define DRV_PCI_DEVICE_ENTRY_bar_mask(x) ((x)->bar_mask) +#define DRV_PCI_DEVICE_ENTRY_bit_offset(x) ((x)->bit_offset) +#define DRV_PCI_DEVICE_ENTRY_size(x) ((x)->size) +#define DRV_PCI_DEVICE_ENTRY_bar_address(x) ((x)->bar_address) +#define DRV_PCI_DEVICE_ENTRY_enable_offset(x) ((x)->enable_offset) +#define DRV_PCI_DEVICE_ENTRY_enable(x) ((x)->enabled) +#define DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio(x) ((x)->base_offset_for_mmio) +#define DRV_PCI_DEVICE_ENTRY_operation(x) ((x)->operation) +#define DRV_PCI_DEVICE_ENTRY_bar_name(x) ((x)->bar_name) +#define DRV_PCI_DEVICE_ENTRY_prog_type(x) ((x)->prog_type) +#define DRV_PCI_DEVICE_ENTRY_config_type(x) ((x)->config_type) +#define DRV_PCI_DEVICE_ENTRY_bar_shift(x) ((x)->bar_shift) +#define DRV_PCI_DEVICE_ENTRY_value(x) ((x)->value) +#define DRV_PCI_DEVICE_ENTRY_mask(x) ((x)->mask) +#define DRV_PCI_DEVICE_ENTRY_virtual_address(x) ((x)->virtual_address) +#define DRV_PCI_DEVICE_ENTRY_port_id(x) ((x)->port_id) +#define DRV_PCI_DEVICE_ENTRY_op_code(x) ((x)->op_code) +#define DRV_PCI_DEVICE_ENTRY_device_id(x) ((x)->device_id) +#define DRV_PCI_DEVICE_ENTRY_bar_num(x) ((x)->bar_num) +#define DRV_PCI_DEVICE_ENTRY_feature_id(x) ((x)->feature_id) + +// *************************************************************************** +typedef enum { + PMU_OPERATION_INITIALIZE = 0, + PMU_OPERATION_WRITE, + PMU_OPERATION_ENABLE, + PMU_OPERATION_DISABLE, + PMU_OPERATION_READ, + PMU_OPERATION_CLEANUP, + PMU_OPERATION_READ_LBRS, + PMU_OPERATION_GLOBAL_REGS, + PMU_OPERATION_CTRL_GP, + PMU_OPERATION_DATA_FIXED, + PMU_OPERATION_DATA_GP, + PMU_OPERATION_OCR, + PMU_OPERATION_HW_ERRATA, + PMU_OPERATION_CHECK_OVERFLOW_GP_ERRATA, + PMU_OPERATION_CHECK_OVERFLOW_ERRATA, + PMU_OPERATION_ALL_REG, + PMU_OPERATION_DATA_ALL, + PMU_OPERATION_GLOBAL_STATUS, + PMU_OPERATION_METRICS, +} PMU_OPERATION_TYPES; +#define MAX_OPERATION_TYPES 32 + +/*!\struct PMU_OPERATIONS_NODE + * \var operation_type - Type of operation from enumeration PMU_OPERATION_TYPES + * \var register_start - Start index of the registers for a specific operation + * \var register_len - Number of registers for a specific operation + * + * \brief + * Structure for defining start and end indices in the ECB entries array for + * each type of operation performed in the driver + * initialize, write, read, enable, disable, etc. + */ +typedef struct PMU_OPERATIONS_NODE_S PMU_OPERATIONS_NODE; +typedef PMU_OPERATIONS_NODE * PMU_OPERATIONS; +struct PMU_OPERATIONS_NODE_S { + U32 operation_type; + U32 register_start; + U32 register_len; + U32 reserved1; + U32 reserved2; + U32 reserved3; +}; +#define PMU_OPERATIONS_operation_type(x) ((x)->operation_type) +#define PMU_OPERATIONS_register_start(x) ((x)->register_start) +#define PMU_OPERATIONS_register_len(x) ((x)->register_len) +#define PMU_OPER_operation_type(x, i) ((x)[(i)].operation_type) +#define PMU_OPER_register_start(x, i) ((x)[(i)].register_start) +#define PMU_OPER_register_len(x, i) ((x)[(i)].register_len) + +typedef enum { + ECB_MMIO_BAR1 = 1, + ECB_MMIO_BAR2 = 2, + ECB_MMIO_BAR3 = 3, + ECB_MMIO_BAR4 = 4, + ECB_MMIO_BAR5 = 5, + ECB_MMIO_BAR6 = 6, + ECB_MMIO_BAR7 = 7, + ECB_MMIO_BAR8 = 8, +} MMIO_INDEX_LIST; +#define MAX_MMIO_BARS 8 + +/*!\struct MMIO_BAR_INFO_NODE + */ +typedef struct MMIO_BAR_INFO_NODE_S MMIO_BAR_INFO_NODE; +typedef MMIO_BAR_INFO_NODE * MMIO_BAR_INFO; + +struct MMIO_BAR_INFO_NODE_S { + U32 bus_no; + U32 dev_no; + U32 func_no; + U32 offset; + U32 addr_size; + U32 map_size; + S8 bar_shift; + U8 reserved1; + U16 reserved2; + U32 reserved3; + U32 reserved4; + U32 reserved5; + U64 bar_mask; + U64 base_mmio_offset; + U64 physical_address; + U64 virtual_address; + U64 reserved6; + U64 reserved7; +}; + +/*!\struct ECB_NODE_S + * \var num_entries - Total number of entries in "entries". + * \var group_id - Group ID. + * \var num_events - Number of events in this group. + * \var cccr_start - Starting index of counter configuration control + * registers in "entries". + * \var cccr_pop - Number of counter configuration control + * registers in "entries". + * \var escr_start - Starting index of event selection control + * registers in "entries". + * \var escr_pop - Number of event selection control registers + * in "entries". + * \var data_start - Starting index of data registers in "entries". + * \var data_pop - Number of data registers in "entries". + * \var pcidev_entry_node PCI device details for one device + * \var entries - . All the register nodes required for programming + * + * \brief + */ + +typedef struct ECB_NODE_S ECB_NODE; +typedef ECB_NODE * ECB; + +struct ECB_NODE_S { + U8 version; + U8 reserved1; + U16 reserved2; + U32 num_entries; + U32 group_id; + U32 num_events; + U32 cccr_start; + U32 cccr_pop; + U32 escr_start; + U32 escr_pop; + U32 data_start; + U32 data_pop; + U16 flags; + U8 pmu_timer_interval; + U8 reserved3; + U32 size_of_allocation; + U32 group_offset; + U32 reserved4; + DRV_PCI_DEVICE_ENTRY_NODE pcidev_entry_node; + U32 num_pci_devices; + U32 pcidev_list_offset; + DRV_PCI_DEVICE_ENTRY pcidev_entry_list; + U32 device_type; + U32 dev_node; + PMU_OPERATIONS_NODE operations[MAX_OPERATION_TYPES]; + U32 descriptor_id; + U32 reserved5; + U32 metric_start; + U32 metric_pop; + MMIO_BAR_INFO_NODE mmio_bar_list[MAX_MMIO_BARS]; + U64 reserved6; + U64 reserved7; + U64 reserved8; + EVENT_REG_NODE entries[]; +}; + +// +// Accessor macros for ECB node +// +#define ECB_version(x) ((x)->version) +#define ECB_num_entries(x) ((x)->num_entries) +#define ECB_group_id(x) ((x)->group_id) +#define ECB_num_events(x) ((x)->num_events) +#define ECB_cccr_start(x) ((x)->cccr_start) +#define ECB_cccr_pop(x) ((x)->cccr_pop) +#define ECB_escr_start(x) ((x)->escr_start) +#define ECB_escr_pop(x) ((x)->escr_pop) +#define ECB_data_start(x) ((x)->data_start) +#define ECB_data_pop(x) ((x)->data_pop) +#define ECB_metric_start(x) ((x)->metric_start) +#define ECB_metric_pop(x) ((x)->metric_pop) +#define ECB_pcidev_entry_node(x) ((x)->pcidev_entry_node) +#define ECB_num_pci_devices(x) ((x)->num_pci_devices) +#define ECB_pcidev_list_offset(x) ((x)->pcidev_list_offset) +#define ECB_pcidev_entry_list(x) ((x)->pcidev_entry_list) +#define ECB_flags(x) ((x)->flags) +#define ECB_pmu_timer_interval(x) ((x)->pmu_timer_interval) +#define ECB_size_of_allocation(x) ((x)->size_of_allocation) +#define ECB_group_offset(x) ((x)->group_offset) +#define ECB_device_type(x) ((x)->device_type) +#define ECB_dev_node(x) ((x)->dev_node) +#define ECB_operations(x) ((x)->operations) +#define ECB_descriptor_id(x) ((x)->descriptor_id) +#define ECB_entries(x) ((x)->entries) + +// for flag bit field +#define ECB_direct2core_bit 0x0001 +#define ECB_bl_bypass_bit 0x0002 +#define ECB_pci_id_offset_bit 0x0003 +#define ECB_pcu_ccst_debug 0x0004 + +#define ECB_VERSION 2 + +#define ECB_CONSTRUCT(x, num_entries, group_id, cccr_start, escr_start, \ + data_start, size_of_allocation) \ + { \ + ECB_num_entries((x)) = (num_entries); \ + ECB_group_id((x)) = (group_id); \ + ECB_cccr_start((x)) = (cccr_start); \ + ECB_cccr_pop((x)) = 0; \ + ECB_escr_start((x)) = (escr_start); \ + ECB_escr_pop((x)) = 0; \ + ECB_data_start((x)) = (data_start); \ + ECB_data_pop((x)) = 0; \ + ECB_metric_start((x)) = 0; \ + ECB_metric_pop((x)) = 0; \ + ECB_num_pci_devices((x)) = 0; \ + ECB_version((x)) = ECB_VERSION; \ + ECB_size_of_allocation((x)) = (size_of_allocation); \ + } + +#define ECB_CONSTRUCT2(x, num_entries, group_id, size_of_allocation) \ + { \ + ECB_num_entries((x)) = (num_entries); \ + ECB_group_id((x)) = (group_id); \ + ECB_num_pci_devices((x)) = 0; \ + ECB_version((x)) = ECB_VERSION; \ + ECB_size_of_allocation((x)) = (size_of_allocation); \ + } + +#define ECB_CONSTRUCT1(x, num_entries, group_id, cccr_start, escr_start, \ + data_start, num_pci_devices, size_of_allocation) \ + { \ + ECB_num_entries((x)) = (num_entries); \ + ECB_group_id((x)) = (group_id); \ + ECB_cccr_start((x)) = (cccr_start); \ + ECB_cccr_pop((x)) = 0; \ + ECB_escr_start((x)) = (escr_start); \ + ECB_escr_pop((x)) = 0; \ + ECB_data_start((x)) = (data_start); \ + ECB_data_pop((x)) = 0; \ + ECB_metric_start((x)) = 0; \ + ECB_metric_pop((x)) = 0; \ + ECB_num_pci_devices((x)) = (num_pci_devices); \ + ECB_version((x)) = ECB_VERSION; \ + ECB_size_of_allocation((x)) = (size_of_allocation); \ + } + + +// +// Accessor macros for ECB node entries +// +#define ECB_entries_reg_type(x, i) EVENT_REG_reg_type((ECB_entries(x)), (i)) +#define ECB_entries_event_id_index(x, i) \ + EVENT_REG_event_id_index((ECB_entries(x)), (i)) +#define ECB_entries_unit_id(x, i) EVENT_REG_unit_id((ECB_entries(x)), (i)) +#define ECB_entries_counter_event_offset(x, i) \ + EVENT_REG_counter_event_offset((ECB_entries(x)), (i)) +#define ECB_entries_reg_id(x, i) EVENT_REG_reg_id((ECB_entries(x)), (i)) +#define ECB_entries_reg_prog_type(x, i) \ + EVENT_REG_reg_prog_type((ECB_entries(x)), (i)) +#define ECB_entries_reg_offset(x, i) EVENT_REG_offset((ECB_entries(x)), (i)) +#define ECB_entries_reg_data_size(x, i) \ + EVENT_REG_data_size((ECB_entries(x)), (i)) +#define ECB_entries_desc_id(x, i) EVENT_REG_desc_id((ECB_entries(x)), i) +#define ECB_entries_flags(x, i) EVENT_REG_flags((ECB_entries(x)), i) +#define ECB_entries_reg_order(x, i) EVENT_REG_reg_order((ECB_entries(x)), i) +#define ECB_entries_reg_value(x, i) EVENT_REG_reg_value((ECB_entries(x)), (i)) +#define ECB_entries_max_bits(x, i) EVENT_REG_max_bits((ECB_entries(x)), (i)) +#define ECB_entries_scheduled(x, i) EVENT_REG_scheduled((ECB_entries(x)), (i)) +#define ECB_entries_counter_event_offset(x, i) \ + EVENT_REG_counter_event_offset((ECB_entries(x)), (i)) +#define ECB_entries_bit_position(x, i) \ + EVENT_REG_bit_position((ECB_entries(x)), (i)) +// PCI config-specific fields +#define ECB_entries_bus_no(x, i) EVENT_REG_bus_no((ECB_entries(x)), (i)) +#define ECB_entries_dev_no(x, i) EVENT_REG_dev_no((ECB_entries(x)), (i)) +#define ECB_entries_func_no(x, i) EVENT_REG_func_no((ECB_entries(x)), (i)) +#define ECB_entries_counter_type(x, i) \ + EVENT_REG_counter_type((ECB_entries(x)), (i)) +#define ECB_entries_event_scope(x, i) \ + EVENT_REG_event_scope((ECB_entries(x)), (i)) +#define ECB_entries_precise_get(x, i) \ + EVENT_REG_precise_get((ECB_entries(x)), (i)) +#define ECB_entries_global_get(x, i) EVENT_REG_global_get((ECB_entries(x)), (i)) +#define ECB_entries_uncore_get(x, i) EVENT_REG_uncore_get((ECB_entries(x)), (i)) +#define ECB_entries_uncore_q_rst_get(x, i) \ + EVENT_REG_uncore_q_rst_get((ECB_entries(x)), (i)) +#define ECB_entries_is_gp_reg_get(x, i) \ + EVENT_REG_is_gp_reg_get((ECB_entries(x)), (i)) +#define ECB_entries_lbr_value_get(x, i) \ + EVENT_REG_lbr_value_get((ECB_entries(x)), (i)) +#define ECB_entries_fixed_reg_get(x, i) \ + EVENT_REG_fixed_reg_get((ECB_entries(x)), (i)) +#define ECB_entries_is_multi_pkg_bit_set(x, i) \ + EVENT_REG_multi_pkg_evt_bit_get((ECB_entries(x)), (i)) +#define ECB_entries_clean_up_get(x, i) \ + EVENT_REG_clean_up_get((ECB_entries(x)), (i)) +#define ECB_entries_em_trigger_get(x, i) \ + EVENT_REG_em_trigger_get((ECB_entries(x)), (i)) +#define ECB_entries_branch_evt_get(x, i) \ + EVENT_REG_branch_evt_get((ECB_entries(x)), (i)) +#define ECB_entries_reg_rw_type(x, i) \ + EVENT_REG_reg_rw_type((ECB_entries(x)), (i)) +#define ECB_entries_secondary_pci_offset_offset(x, i) \ + EVENT_REG_secondary_pci_offset_offset((ECB_entries(x)), (i)) +#define ECB_entries_secondary_pci_offset_shift(x, i) \ + EVENT_REG_secondary_pci_offset_shift((ECB_entries(x)), (i)) +#define ECB_entries_secondary_pci_offset_mask(x, i) \ + EVENT_REG_secondary_pci_offset_mask((ECB_entries(x)), (i)) +#define ECB_operations_operation_type(x, i) \ + PMU_OPER_operation_type((ECB_operations(x)), (i)) +#define ECB_operations_register_start(x, i) \ + PMU_OPER_register_start((ECB_operations(x)), (i)) +#define ECB_operations_register_len(x, i) \ + PMU_OPER_register_len((ECB_operations(x)), (i)) + +#define ECB_entries_core_event_id(x, i) \ + EVENT_REG_core_event_id((ECB_entries(x)), (i)) +#define ECB_entries_uncore_buffer_offset_in_package(x, i) \ + EVENT_REG_uncore_buffer_offset_in_package((ECB_entries(x)), (i)) +#define ECB_entries_uncore_buffer_offset_in_system(x, i) \ + EVENT_REG_uncore_buffer_offset_in_system((ECB_entries(x)), (i)) + +#define ECB_SET_OPERATIONS(x, operation_type, start, len) \ + { \ + ECB_operations_operation_type(x, operation_type) \ + = operation_type; \ + ECB_operations_register_start(x, operation_type) = start; \ + ECB_operations_register_len(x, operation_type) = len; \ + } + + +// *************************************************************************** + +/*!\struct LBR_ENTRY_NODE_S + * \var etype TOS = 0; FROM = 1; TO = 2 + * \var type_index + * \var reg_id + */ + +typedef struct LBR_ENTRY_NODE_S LBR_ENTRY_NODE; +typedef LBR_ENTRY_NODE * LBR_ENTRY; + +struct LBR_ENTRY_NODE_S { + U16 etype; + U16 type_index; + U32 reg_id; +}; + +// +// Accessor macros for LBR entries +// +#define LBR_ENTRY_NODE_etype(lentry) ((lentry).etype) +#define LBR_ENTRY_NODE_type_index(lentry) ((lentry).type_index) +#define LBR_ENTRY_NODE_reg_id(lentry) ((lentry).reg_id) + +// *************************************************************************** + +/*!\struct LBR_NODE_S + * \var num_entries - The number of entries + * \var entries - The entries in the list + * + * \brief Data structure to describe the LBR registers that need to be read + * + */ + +typedef struct LBR_NODE_S LBR_NODE; +typedef LBR_NODE * LBR; + +struct LBR_NODE_S { + U32 size; + U32 num_entries; + LBR_ENTRY_NODE entries[]; +}; + +// +// Accessor macros for LBR node +// +#define LBR_size(lbr) ((lbr)->size) +#define LBR_num_entries(lbr) ((lbr)->num_entries) +#define LBR_entries_etype(lbr, idx) ((lbr)->entries[idx].etype) +#define LBR_entries_type_index(lbr, idx) ((lbr)->entries[idx].type_index) +#define LBR_entries_reg_id(lbr, idx) ((lbr)->entries[idx].reg_id) + +// *************************************************************************** + +/*!\struct PWR_ENTRY_NODE_S + * \var etype none as yet + * \var type_index + * \var reg_id + */ + +typedef struct PWR_ENTRY_NODE_S PWR_ENTRY_NODE; +typedef PWR_ENTRY_NODE * PWR_ENTRY; + +struct PWR_ENTRY_NODE_S { + U16 etype; + U16 type_index; + U32 reg_id; +}; + +// +// Accessor macros for PWR entries +// +#define PWR_ENTRY_NODE_etype(lentry) ((lentry).etype) +#define PWR_ENTRY_NODE_type_index(lentry) ((lentry).type_index) +#define PWR_ENTRY_NODE_reg_id(lentry) ((lentry).reg_id) + +// *************************************************************************** + +/*!\struct PWR_NODE_S + * \var num_entries - The number of entries + * \var entries - The entries in the list + * + * \brief Data structure to describe the PWR registers that need to be read + * + */ + +typedef struct PWR_NODE_S PWR_NODE; +typedef PWR_NODE * PWR; + +struct PWR_NODE_S { + U32 size; + U32 num_entries; + PWR_ENTRY_NODE entries[]; +}; + +// +// Accessor macros for PWR node +// +#define PWR_size(lbr) ((lbr)->size) +#define PWR_num_entries(lbr) ((lbr)->num_entries) +#define PWR_entries_etype(lbr, idx) ((lbr)->entries[idx].etype) +#define PWR_entries_type_index(lbr, idx) ((lbr)->entries[idx].type_index) +#define PWR_entries_reg_id(lbr, idx) ((lbr)->entries[idx].reg_id) + +// *************************************************************************** + +/*!\struct RO_ENTRY_NODE_S + * \var type - DEAR, IEAR, BTB. + */ + +typedef struct RO_ENTRY_NODE_S RO_ENTRY_NODE; +typedef RO_ENTRY_NODE * RO_ENTRY; + +struct RO_ENTRY_NODE_S { + U32 reg_id; +}; + +// +// Accessor macros for RO entries +// +#define RO_ENTRY_NODE_reg_id(lentry) ((lentry).reg_id) + +// *************************************************************************** + +/*!\struct RO_NODE_S + * \var size - The total size including header and entries. + * \var num_entries - The number of entries. + * \var entries - The entries in the list. + * + * \brief Data structure to describe the RO registers that need to be read. + * + */ + +typedef struct RO_NODE_S RO_NODE; +typedef RO_NODE * RO; + +struct RO_NODE_S { + U32 size; + U32 num_entries; + RO_ENTRY_NODE entries[]; +}; + +// +// Accessor macros for RO node +// +#define RO_size(ro) ((ro)->size) +#define RO_num_entries(ro) ((ro)->num_entries) +#define RO_entries_reg_id(ro, idx) ((ro)->entries[idx].reg_id) + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_gfx.h b/drivers/platform/x86/sepdk/include/lwpmudrv_gfx.h new file mode 100644 index 0000000000000..3d8cf5290f724 --- /dev/null +++ b/drivers/platform/x86/sepdk/include/lwpmudrv_gfx.h @@ -0,0 +1,47 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _LWPMUDRV_GFX_H_ +#define _LWPMUDRV_GFX_H_ + +#if defined(__cplusplus) +extern "C" { +#endif + +#define GFX_BASE_ADDRESS 0xFF200000 +#define GFX_BASE_NEW_OFFSET 0x00080000 +#define GFX_PERF_REG 0x040 // location of GFX counter relative to base +#define GFX_NUM_COUNTERS 9 // max number of GFX counters per counter group +#define GFX_CTR_OVF_VAL 0xFFFFFFFF // overflow value for GFX counters + +#define GFX_REG_CTR_CTRL 0x01FF +#define GFX_CTRL_DISABLE 0x1E00 + + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_ioctl.h b/drivers/platform/x86/sepdk/include/lwpmudrv_ioctl.h new file mode 100644 index 0000000000000..9713b19c0e5ce --- /dev/null +++ b/drivers/platform/x86/sepdk/include/lwpmudrv_ioctl.h @@ -0,0 +1,301 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _LWPMUDRV_IOCTL_H_ +#define _LWPMUDRV_IOCTL_H_ + +#if defined(__cplusplus) +extern "C" { +#endif + +//SEP Driver Operation defines +/* + "NOTE THAT the definition must be identical across all OSes" + "DO NOT add any OS specific compile flag" +*/ +#define DRV_OPERATION_START 1 +#define DRV_OPERATION_STOP 2 +#define DRV_OPERATION_INIT_PMU 3 +#define DRV_OPERATION_INIT 4 +#define DRV_OPERATION_EM_GROUPS 5 +#define DRV_OPERATION_SET_CPU_MASK 17 +#define DRV_OPERATION_PCI_READ 18 +#define DRV_OPERATION_PCI_WRITE 19 +#define DRV_OPERATION_READ_PCI_CONFIG 20 +#define DRV_OPERATION_FD_PHYS 21 +#define DRV_OPERATION_WRITE_PCI_CONFIG 22 +#define DRV_OPERATION_INSERT_MARKER 23 +#define DRV_OPERATION_GET_NORMALIZED_TSC 24 +#define DRV_OPERATION_EM_CONFIG_NEXT 25 +#define DRV_OPERATION_SYS_CONFIG 26 +#define DRV_OPERATION_TSC_SKEW_INFO 27 +#define DRV_OPERATION_NUM_CORES 28 +#define DRV_OPERATION_COLLECT_SYS_CONFIG 29 +#define DRV_OPERATION_GET_SYS_CONFIG 30 +#define DRV_OPERATION_PAUSE 31 +#define DRV_OPERATION_RESUME 32 +#define DRV_OPERATION_SET_ASYNC_EVENT 33 +#define DRV_OPERATION_ASYNC_STOP 34 +#define DRV_OPERATION_TERMINATE 35 +#define DRV_OPERATION_READ_MSRS 36 +#define DRV_OPERATION_LBR_INFO 37 +#define DRV_OPERATION_RESERVE 38 +#define DRV_OPERATION_MARK 39 +#define DRV_OPERATION_AWAIT_STOP 40 +#define DRV_OPERATION_SEED_NAME 41 +#define DRV_OPERATION_KERNEL_CS 42 +#define DRV_OPERATION_SET_UID 43 +#define DRV_OPERATION_VERSION 51 +#define DRV_OPERATION_CHIPSET_INIT 52 +#define DRV_OPERATION_GET_CHIPSET_DEVICE_ID 53 +#define DRV_OPERATION_SWITCH_GROUP 54 +#define DRV_OPERATION_GET_NUM_CORE_CTRS 55 +#define DRV_OPERATION_PWR_INFO 56 +#define DRV_OPERATION_NUM_DESCRIPTOR 57 +#define DRV_OPERATION_DESC_NEXT 58 +#define DRV_OPERATION_MARK_OFF 59 +#define DRV_OPERATION_CREATE_MARKER 60 +#define DRV_OPERATION_GET_DRIVER_STATE 61 +#define DRV_OPERATION_READ_SWITCH_GROUP 62 +#define DRV_OPERATION_EM_GROUPS_UNC 63 +#define DRV_OPERATION_EM_CONFIG_NEXT_UNC 64 +#define DRV_OPERATION_INIT_UNC 65 +#define DRV_OPERATION_RO_INFO 66 +#define DRV_OPERATION_READ_MSR 67 +#define DRV_OPERATION_WRITE_MSR 68 +#define DRV_OPERATION_THREAD_SET_NAME 69 +#define DRV_OPERATION_GET_PLATFORM_INFO 70 +#define DRV_OPERATION_GET_NORMALIZED_TSC_STANDALONE 71 +#define DRV_OPERATION_READ_AND_RESET 72 +#define DRV_OPERATION_SET_CPU_TOPOLOGY 73 +#define DRV_OPERATION_INIT_NUM_DEV 74 +#define DRV_OPERATION_SET_GFX_EVENT 75 +#define DRV_OPERATION_GET_NUM_SAMPLES 76 +#define DRV_OPERATION_SET_PWR_EVENT 77 +#define DRV_OPERATION_SET_DEVICE_NUM_UNITS 78 +#define DRV_OPERATION_TIMER_TRIGGER_READ 79 +#define DRV_OPERATION_GET_INTERVAL_COUNTS 80 +#define DRV_OPERATION_FLUSH 81 +#define DRV_OPERATION_SET_SCAN_UNCORE_TOPOLOGY_INFO 82 +#define DRV_OPERATION_GET_UNCORE_TOPOLOGY 83 +#define DRV_OPERATION_GET_MARKER_ID 84 +#define DRV_OPERATION_GET_SAMPLE_DROP_INFO 85 +#define DRV_OPERATION_GET_DRV_SETUP_INFO 86 +#define DRV_OPERATION_GET_PLATFORM_TOPOLOGY 87 +#define DRV_OPERATION_GET_THREAD_COUNT 88 +#define DRV_OPERATION_GET_THREAD_INFO 89 +#define DRV_OPERATION_GET_DRIVER_LOG 90 +#define DRV_OPERATION_CONTROL_DRIVER_LOG 91 +#define DRV_OPERATION_SET_OSID 92 +#define DRV_OPERATION_GET_AGENT_MODE 93 +#define DRV_OPERATION_INIT_DRIVER 94 +#define DRV_OPERATION_SET_EMON_BUFFER_DRIVER_HELPER 95 +#define DRV_OPERATION_GET_NUM_VM 96 +#define DRV_OPERATION_GET_VCPU_MAP 97 + +// Only used by MAC OS +#define DRV_OPERATION_GET_ASLR_OFFSET 997 // this may not need +#define DRV_OPERATION_SET_OSX_VERSION 998 +#define DRV_OPERATION_PROVIDE_FUNCTION_PTRS 999 + +// IOCTL_SETUP + +// IOCTL_ARGS +typedef struct IOCTL_ARGS_NODE_S IOCTL_ARGS_NODE; +typedef IOCTL_ARGS_NODE * IOCTL_ARGS; + +#if defined(DRV_EM64T) +struct IOCTL_ARGS_NODE_S { + U64 len_drv_to_usr; + // buffer send from driver(target) to user(host), stands for read buffer + char *buf_drv_to_usr; + // length of the driver(target) to user(host) buffer + U64 len_usr_to_drv; + // buffer send from user(host) to driver(target) stands for write buffer + char *buf_usr_to_drv; // length of user(host) to driver(target) buffer + U32 command; +}; +#endif +#if defined(DRV_IA32) +struct IOCTL_ARGS_NODE_S { + U64 len_drv_to_usr; + // buffer send from driver(target) to user(host),stands for read buffer + char *buf_drv_to_usr; // length of driver(target) to user(host) buffer + char *reserved1; + U64 len_usr_to_drv; + // send from user(host) to driver(target),stands for write buffer + char *buf_usr_to_drv; // length of user(host) to driver(target) buffer + char *reserved2; + U32 command; +}; +#endif + +#if defined(DRV_OS_WINDOWS) + +// +// NtDeviceIoControlFile IoControlCode values for this device. +// +// Warning: Remember that the low two bits of the code specify how the +// buffers are passed to the driver! +// +// 16 bit device type. 12 bit function codes +#define LWPMUDRV_IOCTL_DEVICE_TYPE 0xA000 +// values 0-32768 reserved for Microsoft +#define LWPMUDRV_IOCTL_FUNCTION 0x0A00 // values 0-2047 reserved for Microsoft + +// +// Basic CTL CODE macro to reduce typographical errors +// Use for FILE_READ_ACCESS +// +#define LWPMUDRV_CTL_READ_CODE(x) \ + CTL_CODE(LWPMUDRV_IOCTL_DEVICE_TYPE, LWPMUDRV_IOCTL_FUNCTION + (x), \ + METHOD_BUFFERED, FILE_READ_ACCESS) + +/* Refernece https://docs.microsoft.com/en-us/windows-hardware/drivers/kernel/defining-i-o-control-codes + CTL_CODE (DeviceType, Function, Method, Access) generates 32 bit code + ------------------------------------------------- ---------------- + | 31 | 30 ... 16 | 15 14 | 13 | 12 ... 2 | 1 0 | + ------------------------------------------------------------------- + | common | device | req access | custom | func code | transfer | + | | type | | | | type | + ------------------------------------------------------------------- +*/ +#define LWPMUDRV_DEVICE_TYPE(x) ((x & 0xFFFF0000) >> 16) +#define LWPMUDRV_METHOD(x) (x & 3) +#define LWPMUDRV_FUNCTION(x) (((x >> 2) & 0x00000FFF) - 0x0A00) + +#define LWPMUDRV_IOCTL_CODE(x) LWPMUDRV_CTL_READ_CODE(x) + +#elif defined(SEP_ESX) + +typedef struct CPU_ARGS_NODE_S CPU_ARGS_NODE; +typedef CPU_ARGS_NODE * CPU_ARGS; +struct CPU_ARGS_NODE_S { + U64 len_drv_to_usr; + char *buf_drv_to_usr; + U32 command; + U32 CPU_ID; + U32 BUCKET_ID; +}; + +// IOCTL_SETUP +#define LWPMU_IOC_MAGIC 99 +#define OS_SUCCESS 0 +#define OS_STATUS int +//#define OS_ILLEGAL_IOCTL -ENOTTY +//#define OS_NO_MEM -ENOMEM +//#define OS_FAULT -EFAULT + +#define LWPMUDRV_IOCTL_IO(x) (x) +#define LWPMUDRV_IOCTL_IOR(x) (x) +#define LWPMUDRV_IOCTL_IOW(x) (x) +#define LWPMUDRV_IOCTL_IORW(x) (x) + +#elif defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || \ + defined(DRV_OS_ANDROID) +// IOCTL_ARGS + +// COMPAT IOCTL_ARGS +#if defined(CONFIG_COMPAT) && defined(DRV_EM64T) +typedef struct IOCTL_COMPAT_ARGS_NODE_S IOCTL_COMPAT_ARGS_NODE; +typedef IOCTL_COMPAT_ARGS_NODE * IOCTL_COMPAT_ARGS; +struct IOCTL_COMPAT_ARGS_NODE_S { + U64 len_drv_to_usr; + compat_uptr_t buf_drv_to_usr; + U64 len_usr_to_drv; + compat_uptr_t buf_usr_to_drv; +}; +#endif + +// COMPAT IOCTL_SETUP +// +#define LWPMU_IOC_MAGIC 99 + +#if defined(CONFIG_COMPAT) && defined(DRV_EM64T) +#define LWPMUDRV_IOCTL_IO(x) _IO(LWPMU_IOC_MAGIC, (x)) +#define LWPMUDRV_IOCTL_IOR(x) _IOR(LWPMU_IOC_MAGIC, (x), compat_uptr_t) +#define LWPMUDRV_IOCTL_IOW(x) _IOW(LWPMU_IOC_MAGIC, (x), compat_uptr_t) +#define LWPMUDRV_IOCTL_IORW(x) _IOW(LWPMU_IOC_MAGIC, (x), compat_uptr_t) +#else +#define LWPMUDRV_IOCTL_IO(x) _IO(LWPMU_IOC_MAGIC, (x)) +#define LWPMUDRV_IOCTL_IOR(x) _IOR(LWPMU_IOC_MAGIC, (x), IOCTL_ARGS) +#define LWPMUDRV_IOCTL_IOW(x) _IOW(LWPMU_IOC_MAGIC, (x), IOCTL_ARGS) +#define LWPMUDRV_IOCTL_IORW(x) _IOW(LWPMU_IOC_MAGIC, (x), IOCTL_ARGS) +#endif + +#elif defined(DRV_OS_FREEBSD) + +// IOCTL_SETUP +// +#define LWPMU_IOC_MAGIC 99 + +/* FreeBSD is very strict about IOR/IOW/IOWR specifications on IOCTLs. + * Since these IOCTLs all pass down the real read/write buffer lengths + * and addresses inside of an IOCTL_ARGS_NODE data structure, we + * need to specify all of these as _IOW so that the kernel will + * view it as userspace passing the data to the driver, rather than + * the reverse. There are also some cases where Linux is passing + * a smaller type than IOCTL_ARGS_NODE, even though its really + * passing an IOCTL_ARGS_NODE. These needed to be fixed for FreeBSD. + */ +#define LWPMUDRV_IOCTL_IO(x) _IO(LWPMU_IOC_MAGIC, (x)) +#define LWPMUDRV_IOCTL_IOR(x) _IOW(LWPMU_IOC_MAGIC, (x), IOCTL_ARGS_NODE) +#define LWPMUDRV_IOCTL_IOW(x) _IOW(LWPMU_IOC_MAGIC, (x), IOCTL_ARGS_NODE) +#define LWPMUDRV_IOCTL_IORW(x) _IOW(LWPMU_IOC_MAGIC, (x), IOCTL_ARGS_NODE) + +#elif defined(DRV_OS_MAC) + +typedef struct CPU_ARGS_NODE_S CPU_ARGS_NODE; +typedef CPU_ARGS_NODE * CPU_ARGS; +struct CPU_ARGS_NODE_S { + U64 len_drv_to_usr; + char *buf_drv_to_usr; + U32 command; + U32 CPU_ID; + U32 BUCKET_ID; +}; + +// IOCTL_SETUP +// +#define LWPMU_IOC_MAGIC 99 +#define OS_SUCCESS 0 +#define OS_STATUS int +#define OS_ILLEGAL_IOCTL -ENOTTY +#define OS_NO_MEM -ENOMEM +#define OS_FAULT -EFAULT + +// Task file Opcodes. +// keeping the definitions as IOCTL but in MAC OSX +// these are really OpCodes consumed by Execute command. + +#else +#error "unknown OS in lwpmudrv_ioctl.h" +#endif + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_pwr.h b/drivers/platform/x86/sepdk/include/lwpmudrv_pwr.h new file mode 100644 index 0000000000000..d67f5ea1ffa4c --- /dev/null +++ b/drivers/platform/x86/sepdk/include/lwpmudrv_pwr.h @@ -0,0 +1,114 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _LWPMUDRV_PWR_H_ +#define _LWPMUDRV_PWR_H_ + +#if defined(__cplusplus) +extern "C" { +#endif + +#define MAX_EVENT_NAME_LEN 512 +#define MAX_EVENT_DESC_LEN 1024 + +// Power event groups +enum PWR_EVENT_GROUPS { + IO_DEV_STATES = 1, + MMIO_DEV_STATES, + MMIO_SYS_STATES, + MMIO_IPC_DEV_RES, + MMIO_IPC_SYS_RES +}; + +typedef struct PWR_EVENT_INFO_NODE_S PWR_EVENT_INFO_NODE; +typedef PWR_EVENT_INFO_NODE * PWR_EVENT_INFO; + +struct PWR_EVENT_INFO_NODE_S { + U32 event_id; + U32 group_id; + char name[MAX_EVENT_NAME_LEN]; + char desc[MAX_EVENT_DESC_LEN]; + U32 io_baseaddr1; + U32 io_range1; + U32 io_baseaddr2; + U32 io_range2; + U32 offset; + U32 virtual_address; +}; + +#define PWR_EVENT_INFO_event_id(pwr_event) ((pwr_event)->event_id) +#define PWR_EVENT_INFO_group_id(pwr_event) ((pwr_event)->group_id) +#define PWR_EVENT_INFO_name(pwr_event) ((pwr_event)->name) +#define PWR_EVENT_INFO_desc(pwr_event) ((pwr_event)->desc) +#define PWR_EVENT_INFO_io_baseaddr1(pwr_event) ((pwr_event)->io_baseaddr1) +#define PWR_EVENT_INFO_io_range1(pwr_event) ((pwr_event)->io_range1) +#define PWR_EVENT_INFO_io_baseaddr2(pwr_event) ((pwr_event)->io_baseaddr2) +#define PWR_EVENT_INFO_io_range2(pwr_event) ((pwr_event)->io_range2) +#define PWR_EVENT_INFO_offset(pwr_event) ((pwr_event)->offset) +#define PWR_EVENT_INFO_virtual_address(pwr_event) ((pwr_event)->virtual_address) + +// IPC register offsets +#define IPC_BASE_ADDRESS 0xFF11C000 +#define IPC_CMD_OFFSET 0x00000000 +#define IPC_STS_OFFSET 0x00000004 +#define IPC_SPTR_OFFSET 0x00000008 +#define IPC_DPTR_OFFSET 0x0000000C +#define IPC_WBUF_OFFSET 0x00000080 +#define IPC_RBUF_OFFSET 0x00000090 +#define IPC_MAX_ADDR 0x100 + +// Write 3bytes in IPC_WBUF (2bytes for address and 1byte for value) +#define IPC_ADC_WRITE_1 0x000300FF +// Write 2bytes in IPC_WBUF (2bytes for address) and read 1byte from IPC_RBUF +#define IPC_ADC_READ_1 0x000210FF + +// IPC commands +#define IPC_MESSAGE_MSIC 0xFF +#define IPC_MESSAGE_CC 0xEF +#define IPC_MESSAGE_D_RESIDENCY 0xEA +#define IPC_MESSAGE_S_RESIDENCY 0xEB + +// IPC subcommands +#define IPC_COMMAND_WRITE 0x0 +#define IPC_COMMAND_READ 0x1 +#define IPC_COMMAND_START_RESIDENCY 0x0 +#define IPC_COMMAND_STOP_RESIDENCY 0x1 +#define IPC_COMMAND_DUMP_RESIDENCY 0x2 + +// IPC commands for S state residency counter +#define S_RESIDENCY_BASE_ADDRESS 0xFFFF71E0 +#define S_RESIDENCY_MAX_COUNTERS 0x4 +#define S_RESIDENCY_MAX_STATES 0x3 +// IPC commands for D state residency counter +#define D_RESIDENCY_BASE_ADDRESS 0xFFFF7000 +#define D_RESIDENCY_MAX_COUNTERS 0x78 // 40 LSS * 3 D states = 120 +#define D_RESIDENCY_MAX_STATES 0x3 +#define D_RESIDENCY_MAX_LSS 0x28 // 40 LSS + +#if defined(__cplusplus) +} +#endif + +#endif /* _LWPMUDRV_PWR_H_ */ diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_struct.h b/drivers/platform/x86/sepdk/include/lwpmudrv_struct.h new file mode 100644 index 0000000000000..629750152fdb2 --- /dev/null +++ b/drivers/platform/x86/sepdk/include/lwpmudrv_struct.h @@ -0,0 +1,2095 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _LWPMUDRV_STRUCT_UTILS_H_ +#define _LWPMUDRV_STRUCT_UTILS_H_ + +#if defined(__cplusplus) +extern "C" { +#endif + +// processor execution modes +#define MODE_UNKNOWN 99 +// the following defines must start at 0 +#define MODE_64BIT 3 +#define MODE_32BIT 2 +#define MODE_16BIT 1 +#define MODE_V86 0 + +// sampling methods +#define SM_RTC 2020 // real time clock +#define SM_VTD 2021 // OS Virtual Timer Device +#define SM_NMI 2022 // non-maskable interrupt time based +#define SM_EBS 2023 // event based sampling +#define SM_EBC 2024 // event based counting + +// sampling mechanism bitmap definitions +#define INTERRUPT_RTC 0x1 +#define INTERRUPT_VTD 0x2 +#define INTERRUPT_NMI 0x4 +#define INTERRUPT_EBS 0x8 + +// Device types +#define DEV_CORE 0x01 +#define DEV_UNC 0x02 + +// eflags defines +#define EFLAGS_VM 0x00020000 // V86 mode +#define EFLAGS_IOPL0 0 +#define EFLAGS_IOPL1 0x00001000 +#define EFLAGS_IOPL2 0x00002000 +#define EFLAGS_IOPL3 0x00003000 +#define MAX_EMON_GROUPS 1000 +#define MAX_PCI_BUSNO 256 +#define MAX_DEVICES 30 +#define MAX_REGS 64 +#define MAX_EMON_GROUPS 1000 +#define MAX_PCI_DEVNO 32 +#define MAX_PCI_FUNCNO 8 +#define MAX_PCI_DEVUNIT 16 +#define MAX_TURBO_VALUES 32 +#define REG_BIT_MASK 0xFFFFFFFFFFFFFFFFULL + +extern float freq_multiplier; + +// Enumeration for invoking dispatch on multiple cpus or not +typedef enum { DRV_MULTIPLE_INSTANCE = 0, DRV_SINGLE_INSTANCE } DRV_PROG_TYPE; + +typedef struct DRV_CONFIG_NODE_S DRV_CONFIG_NODE; +typedef DRV_CONFIG_NODE * DRV_CONFIG; + +struct DRV_CONFIG_NODE_S { + U32 size; + U16 version; + U16 reserved1; + U32 num_events; + U32 num_chipset_events; + U32 chipset_offset; + S32 seed_name_len; + union { + S8 *seed_name; + U64 dummy1; + } u1; + union { + S8 *cpu_mask; + U64 dummy2; + } u2; + union { + U64 collection_config; + struct { + U64 start_paused : 1; + U64 counting_mode : 1; + U64 enable_chipset : 1; + U64 enable_gfx : 1; + U64 enable_pwr : 1; + U64 emon_mode : 1; + U64 debug_inject : 1; + U64 virt_phys_translation : 1; + U64 enable_p_state : 1; + U64 enable_cp_mode : 1; + U64 read_pstate_msrs : 1; + U64 use_pcl : 1; + U64 enable_ebc : 1; + U64 enable_tbc : 1; + U64 ds_area_available : 1; + U64 per_cpu_tsc : 1; + U64 reserved_field1 : 48; + } s1; + } u3; + U64 target_pid; + U32 os_of_interest; + U16 unc_timer_interval; + U16 unc_em_factor; + S32 p_state_trigger_index; + DRV_BOOL multi_pebs_enabled; + U32 reserved2; + U32 reserved3; + U64 reserved4; + U64 reserved5; + U64 reserved6; +}; + +#define DRV_CONFIG_size(cfg) ((cfg)->size) +#define DRV_CONFIG_version(cfg) ((cfg)->version) +#define DRV_CONFIG_num_events(cfg) ((cfg)->num_events) +#define DRV_CONFIG_num_chipset_events(cfg) ((cfg)->num_chipset_events) +#define DRV_CONFIG_chipset_offset(cfg) ((cfg)->chipset_offset) + +#define DRV_CONFIG_seed_name(cfg) ((cfg)->u1.seed_name) +#define DRV_CONFIG_seed_name_len(cfg) ((cfg)->seed_name_len) +#define DRV_CONFIG_cpu_mask(cfg) ((cfg)->u2.cpu_mask) +#define DRV_CONFIG_start_paused(cfg) ((cfg)->u3.s1.start_paused) +#define DRV_CONFIG_counting_mode(cfg) ((cfg)->u3.s1.counting_mode) +#define DRV_CONFIG_enable_chipset(cfg) ((cfg)->u3.s1.enable_chipset) +#define DRV_CONFIG_enable_gfx(cfg) ((cfg)->u3.s1.enable_gfx) +#define DRV_CONFIG_enable_pwr(cfg) ((cfg)->u3.s1.enable_pwr) +#define DRV_CONFIG_emon_mode(cfg) ((cfg)->u3.s1.emon_mode) +#define DRV_CONFIG_debug_inject(cfg) ((cfg)->u3.s1.debug_inject) +#define DRV_CONFIG_virt_phys_translation(cfg) \ + ((cfg)->u3.s1.virt_phys_translation) +#define DRV_CONFIG_enable_p_state(cfg) ((cfg)->u3.s1.enable_p_state) +#define DRV_CONFIG_enable_cp_mode(cfg) ((cfg)->u3.s1.enable_cp_mode) +#define DRV_CONFIG_read_pstate_msrs(cfg) ((cfg)->u3.s1.read_pstate_msrs) +#define DRV_CONFIG_use_pcl(cfg) ((cfg)->u3.s1.use_pcl) +#define DRV_CONFIG_event_based_counts(cfg) ((cfg)->u3.s1.enable_ebc) +#define DRV_CONFIG_timer_based_counts(cfg) ((cfg)->u3.s1.enable_tbc) +#define DRV_CONFIG_ds_area_available(cfg) ((cfg)->u3.s1.ds_area_available) +#define DRV_CONFIG_per_cpu_tsc(cfg) ((cfg)->u3.s1.per_cpu_tsc) +#define DRV_CONFIG_target_pid(cfg) ((cfg)->target_pid) +#define DRV_CONFIG_os_of_interest(cfg) ((cfg)->os_of_interest) +#define DRV_CONFIG_unc_timer_interval(cfg) ((cfg)->unc_timer_interval) +#define DRV_CONFIG_unc_em_factor(cfg) ((cfg)->unc_em_factor) +#define DRV_CONFIG_p_state_trigger_index(cfg) ((cfg)->p_state_trigger_index) +#define DRV_CONFIG_multi_pebs_enabled(cfg) ((cfg)->multi_pebs_enabled) + +#define DRV_CONFIG_VERSION 1 + +typedef struct DEV_CONFIG_NODE_S DEV_CONFIG_NODE; +typedef DEV_CONFIG_NODE * DEV_CONFIG; + +struct DEV_CONFIG_NODE_S { + U16 size; + U16 version; + U32 dispatch_id; + U32 pebs_mode; + U32 pebs_record_num; + U32 results_offset; // to store the offset for this device's results + U32 max_gp_counters; + U32 device_type; + U32 core_type; + union { + U64 enable_bit_fields; + struct { + U64 pebs_capture : 1; + U64 collect_lbrs : 1; + U64 collect_callstacks : 1; + U64 collect_kernel_callstacks : 1; + U64 latency_capture : 1; + U64 power_capture : 1; + U64 htoff_mode : 1; + U64 eventing_ip_capture : 1; + U64 hle_capture : 1; + U64 precise_ip_lbrs : 1; + U64 store_lbrs : 1; + U64 tsc_capture : 1; + U64 enable_perf_metrics : 1; + U64 enable_adaptive_pebs : 1; + U64 apebs_collect_mem_info : 1; + U64 apebs_collect_gpr : 1; + U64 apebs_collect_xmm : 1; + U64 apebs_collect_lbrs : 1; + U64 collect_fixed_counter_pebs : 1; + U64 collect_os_callstacks : 1; + U64 reserved_field1 : 44; + } s1; + } u1; + U32 emon_unc_offset[MAX_EMON_GROUPS]; + U32 ebc_group_id_offset; + U8 num_perf_metrics; + U8 apebs_num_lbr_entries; + U16 emon_perf_metrics_offset; + U32 device_scope; + U32 reserved1; + U64 reserved2; + U64 reserved3; + U64 reserved4; +}; + +#define DEV_CONFIG_dispatch_id(cfg) ((cfg)->dispatch_id) +#define DEV_CONFIG_pebs_mode(cfg) ((cfg)->pebs_mode) +#define DEV_CONFIG_pebs_record_num(cfg) ((cfg)->pebs_record_num) +#define DEV_CONFIG_results_offset(cfg) ((cfg)->results_offset) +#define DEV_CONFIG_max_gp_counters(cfg) ((cfg)->max_gp_counters) + +#define DEV_CONFIG_device_type(cfg) ((cfg)->device_type) +#define DEV_CONFIG_core_type(cfg) ((cfg)->core_type) + +#define DEV_CONFIG_pebs_capture(cfg) ((cfg)->u1.s1.pebs_capture) +#define DEV_CONFIG_collect_lbrs(cfg) ((cfg)->u1.s1.collect_lbrs) +#define DEV_CONFIG_collect_callstacks(cfg) ((cfg)->u1.s1.collect_callstacks) +#define DEV_CONFIG_collect_kernel_callstacks(cfg) \ + ((cfg)->u1.s1.collect_kernel_callstacks) +#define DEV_CONFIG_latency_capture(cfg) ((cfg)->u1.s1.latency_capture) +#define DEV_CONFIG_power_capture(cfg) ((cfg)->u1.s1.power_capture) +#define DEV_CONFIG_htoff_mode(cfg) ((cfg)->u1.s1.htoff_mode) +#define DEV_CONFIG_eventing_ip_capture(cfg) ((cfg)->u1.s1.eventing_ip_capture) +#define DEV_CONFIG_hle_capture(cfg) ((cfg)->u1.s1.hle_capture) +#define DEV_CONFIG_precise_ip_lbrs(cfg) ((cfg)->u1.s1.precise_ip_lbrs) +#define DEV_CONFIG_store_lbrs(cfg) ((cfg)->u1.s1.store_lbrs) +#define DEV_CONFIG_tsc_capture(cfg) ((cfg)->u1.s1.tsc_capture) +#define DEV_CONFIG_enable_perf_metrics(cfg) ((cfg)->u1.s1.enable_perf_metrics) +#define DEV_CONFIG_enable_adaptive_pebs(cfg) ((cfg)->u1.s1.enable_adaptive_pebs) +#define DEV_CONFIG_apebs_collect_mem_info(cfg) \ + ((cfg)->u1.s1.apebs_collect_mem_info) +#define DEV_CONFIG_apebs_collect_gpr(cfg) ((cfg)->u1.s1.apebs_collect_gpr) +#define DEV_CONFIG_apebs_collect_xmm(cfg) ((cfg)->u1.s1.apebs_collect_xmm) +#define DEV_CONFIG_apebs_collect_lbrs(cfg) ((cfg)->u1.s1.apebs_collect_lbrs) +#define DEV_CONFIG_collect_fixed_counter_pebs(cfg) \ + ((cfg)->u1.s1.collect_fixed_counter_pebs) +#define DEV_CONFIG_collect_os_callstacks(cfg) \ + ((cfg)->u1.s1.collect_os_callstacks) +#define DEV_CONFIG_enable_bit_fields(cfg) ((cfg)->u1.enable_bit_fields) +#define DEV_CONFIG_emon_unc_offset(cfg, grp_num) \ + ((cfg)->emon_unc_offset[grp_num]) +#define DEV_CONFIG_ebc_group_id_offset(cfg) ((cfg)->ebc_group_id_offset) +#define DEV_CONFIG_num_perf_metrics(cfg) ((cfg)->num_perf_metrics) +#define DEV_CONFIG_apebs_num_lbr_entries(cfg) ((cfg)->apebs_num_lbr_entries) +#define DEV_CONFIG_emon_perf_metrics_offset(cfg) \ + ((cfg)->emon_perf_metrics_offset) +#define DEV_CONFIG_device_scope(cfg) ((cfg)->device_scope) + +typedef struct DEV_UNC_CONFIG_NODE_S DEV_UNC_CONFIG_NODE; +typedef DEV_UNC_CONFIG_NODE * DEV_UNC_CONFIG; + +struct DEV_UNC_CONFIG_NODE_S { + U16 size; + U16 version; + U32 dispatch_id; + U32 results_offset; + U32 device_type; + U32 device_scope; + U32 reserved1; + U32 emon_unc_offset[MAX_EMON_GROUPS]; + U64 reserved2; + U64 reserved3; + U64 reserved4; +}; + +#define DEV_UNC_CONFIG_dispatch_id(cfg) ((cfg)->dispatch_id) +#define DEV_UNC_CONFIG_results_offset(cfg) ((cfg)->results_offset) +#define DEV_UNC_CONFIG_emon_unc_offset(cfg, grp_num) \ + ((cfg)->emon_unc_offset[grp_num]) +#define DEV_UNC_CONFIG_device_type(cfg) ((cfg)->device_type) +#define DEV_UNC_CONFIG_device_scope(cfg) ((cfg)->device_scope) + +/* + * X86 processor code descriptor + */ +typedef struct CodeDescriptor_s { + union { + U32 lowWord; // low dword of descriptor + struct { // low broken out by fields + U16 limitLow; // segment limit 15:00 + U16 baseLow; // segment base 15:00 + } s1; + } u1; + union { + U32 highWord; // high word of descriptor + struct { // high broken out by bit fields + U32 baseMid : 8; // base 23:16 + U32 accessed : 1; // accessed + U32 readable : 1; // readable + U32 conforming : 1; // conforming code segment + U32 oneOne : 2; // always 11 + U32 dpl : 2; // Dpl + U32 pres : 1; // present bit + U32 limitHi : 4; // limit 19:16 + U32 sys : 1; // available for use by system + U32 reserved_0 : 1; // reserved, always 0 + U32 default_size : 1; + // default operation size (1=32bit, 0=16bit) + U32 granularity : 1; // granularity (1=32 bit, 0=20 bit) + U32 baseHi : 8; // base hi 31:24 + } s2; + } u2; +} CodeDescriptor; + +/* + * Module record. These are emitted whenever a DLL/EXE is loaded or unloaded. + * The filename fields may be 0 on an unload. The records reperesent a module + * for a certain span of time, delineated by the load / unload samplecounts. + * Note: + * The structure contains 64 bit fields which may cause the compiler to pad the + * length of the structure to an 8 byte boundary. + */ +typedef struct ModuleRecord_s { + U16 recLength; // total length of this record (including this length, + // always U32 multiple) output from sampler is variable + // length (pathname at end of record) sampfile builder moves + // path names to a separate "literal pool" area + // so that these records become fixed length, and can be treated + // as an array see modrecFixedLen in header + + U16 segmentType : 2; + // V86, 16, 32, 64 (see MODE_ defines), maybe inaccurate for Win95 + // .. a 16 bit module may become a 32 bit module, inferred by + // ..looking at 1st sample record that matches the module selector + U16 loadEvent : 1; // 0 for load, 1 for unload + U16 processed : 1; // 0 for load, 1 for unload + U16 reserved0 : 12; + + U16 selector; // code selector or V86 segment + U16 segmentNameLength; + // length of the segment name if the segmentNameSet bit is set + U32 segmentNumber; + // segment number, Win95 can have multiple pieces for one module + union { + U32 flags; // all the flags as one dword + struct { + U32 exe : 1; // this module is an exe + U32 globalModule : 1; + // globally loaded module. There may be multiple + // module records for a global module, but the samples + // will only point to the 1st one, the others will be + // ignored. NT's Kernel32 is an example of this. + // REVISIT this?? + U32 bogusWin95 : 1; + // "bogus" win95 module. By bogus, we mean a + // module that has a pid of 0, no length and no base. + // Selector actually used as a 32 bit module. + U32 pidRecIndexRaw : 1; // pidRecIndex is raw OS pid + U32 sampleFound : 1; + // at least one sample referenced this module + U32 tscUsed : 1; // tsc set when record written + U32 duplicate : 1; + // 1st pass analysis has determined this is a + // duplicate load + U32 globalModuleTB5 : 1; + // module mapped into all processes on system + U32 segmentNameSet : 1; + // set if the segment name was collected + // (initially done for xbox collections) + U32 firstModuleRecInProcess : 1; + // if the pidCreatesTrackedInModuleRecs flag is set + // in the SampleHeaderEx struct and this flag + // is set, the associated module indicates + // the beginning of a new process + U32 source : 1; + // 0 for path in target system, + // 1 for path in host system + U32 unknownLoadAddress : 1; + // for 0 valid loadAddr64 value, + // 1 for invalid loadAddr64 value + U32 reserved1 : 20; + } s1; + } u2; + U64 length64; // module length + U64 loadAddr64; // load address + U32 pidRecIndex; + // process ID rec index (index into start of pid record section) + // .. (see pidRecIndexRaw). If pidRecIndex == 0 and pidRecIndexRaw == 1 + // ..then this is a kernel or global module. Can validly + // ..be 0 if not raw (array index). Use ReturnPid() to access this + // ..field + U32 osid; // OS identifier + U64 unloadTsc; // TSC collected on an unload event + U32 path; // module path name (section offset on disk) + // ..when initally written by sampler name is at end of this + // ..struct, when merged with main file names are pooled at end + // ..of ModuleRecord Section so ModulesRecords can be + // ..fixed length + U16 pathLength; // path name length (inludes terminating \0) + U16 filenameOffset; // offset into path name of base filename + U32 segmentName; // offset to the segmentName from the beginning of the + // module section in a processed module section + // (s/b 0 in a raw module record) + // in a raw module record, the segment name will follow the + // module name and the module name's terminating NULL char + U32 page_offset_high; + U64 tsc; // time stamp counter module event occurred + U32 parent_pid; // Parent PID of the process + U32 page_offset_low; +} ModuleRecord; + +#define MR_unloadTscSet(x, y) { (x)->unloadTsc = (y); } +#define MR_unloadTscGet(x) ((x)->unloadTsc) + +#define MR_page_offset_Set(x, y) \ + { \ + (x)->page_offset_low = (y)&0xFFFFFFFF; \ + (x)->page_offset_high = ((y) >> 32) & 0xFFFFFFFF; \ + } + +#define MR_page_offset_Get(x) \ + ((((U64)(x)->page_offset_high) << 32) | (x)->page_offset_low) + +// Accessor macros for ModuleRecord +#define MODULE_RECORD_rec_length(x) ((x)->recLength) +#define MODULE_RECORD_segment_type(x) ((x)->segmentType) +#define MODULE_RECORD_load_event(x) ((x)->loadEvent) +#define MODULE_RECORD_processed(x) ((x)->processed) +#define MODULE_RECORD_selector(x) ((x)->selector) +#define MODULE_RECORD_segment_name_length(x) ((x)->segmentNameLength) +#define MODULE_RECORD_segment_number(x) ((x)->segmentNumber) +#define MODULE_RECORD_flags(x) ((x)->u2.flags) +#define MODULE_RECORD_exe(x) ((x)->u2.s1.exe) +#define MODULE_RECORD_global_module(x) ((x)->u2.s1.globalModule) +#define MODULE_RECORD_bogus_win95(x) ((x)->u2.s1.bogusWin95) +#define MODULE_RECORD_pid_rec_index_raw(x) ((x)->u2.s1.pidRecIndexRaw) +#define MODULE_RECORD_sample_found(x) ((x)->u2.s1.sampleFound) +#define MODULE_RECORD_tsc_used(x) ((x)->u2.s1.tscUsed) +#define MODULE_RECORD_duplicate(x) ((x)->u2.s1.duplicate) +#define MODULE_RECORD_global_module_tb5(x) ((x)->u2.s1.globalModuleTB5) +#define MODULE_RECORD_segment_name_set(x) ((x)->u2.s1.segmentNameSet) +#define MODULE_RECORD_first_module_rec_in_process(x) \ + ((x)->u2.s1.firstModuleRecInProcess) +#define MODULE_RECORD_source(x) ((x)->u2.s1.source) +#define MODULE_RECORD_unknown_load_address(x) ((x)->u2.s1.unknownLoadAddress) +#define MODULE_RECORD_length64(x) ((x)->length64) +#define MODULE_RECORD_load_addr64(x) ((x)->loadAddr64) +#define MODULE_RECORD_pid_rec_index(x) ((x)->pidRecIndex) +#define MODULE_RECORD_load_sample_count(x) ((x)->u5.s2.loadSampleCount) +#define MODULE_RECORD_unload_sample_count(x) ((x)->u5.s2.unloadSampleCount) +#define MODULE_RECORD_unload_tsc(x) ((x)->unloadTsc) +#define MODULE_RECORD_path(x) ((x)->path) +#define MODULE_RECORD_path_length(x) ((x)->pathLength) +#define MODULE_RECORD_filename_offset(x) ((x)->filenameOffset) +#define MODULE_RECORD_segment_name(x) ((x)->segmentName) +#define MODULE_RECORD_tsc(x) ((x)->tsc) +#define MODULE_RECORD_parent_pid(x) ((x)->parent_pid) +#define MODULE_RECORD_osid(x) ((x)->osid) + +/* + * Sample record. Size can be determined by looking at the header record. + * There can be up to 3 sections. The SampleFileHeader defines the presence + * of sections and their offsets. Within a sample file, all of the sample + * records have the same number of sections and the same size. However, + * different sample record sections and sizes can exist in different + * sample files. Since recording counters and the time stamp counter for + * each sample can be space consuming, the user can determine whether or not + * this information is kept at sample collection time. + */ + +typedef struct SampleRecordPC_s { // Program Counter section + U32 descriptor_id; + U32 osid; // OS identifier + union { + struct { + U64 iip; // IA64 interrupt instruction pointer + U64 ipsr; // IA64 interrupt processor status register + } s1; + struct { + U32 eip; // IA32 instruction pointer + U32 eflags; // IA32 eflags + CodeDescriptor csd; // IA32 code seg descriptor(8 bytes) + } s2; + } u1; + U16 cs; // IA32 cs (0 for IA64) + union { + U16 cpuAndOS; // cpu and OS info as one word + struct { // cpu and OS info broken out + U16 cpuNum : 12; // cpu number (0 - 4096) + U16 notVmid0 : 1; + // win95, vmid0 flag(1 means NOT vmid 0) + U16 codeMode : 2; // processor mode, see MODE_ defines + U16 uncore_valid : 1; + // identifies if the uncore count is valid + } s3; + } u2; + U32 tid; // OS thread ID (may get reused, see tidIsRaw) + U32 pidRecIndex; // process ID rec index (index into start of pid + // record section) .. can validly be 0 if not raw + // (array index). Use ReturnPid() to + // ..access this field .. (see pidRecIndexRaw) + union { + U32 bitFields2; + struct { + U32 mrIndex : 20; + // module record index (index into start of + // module rec section) .. (see mrIndexNone) + U32 eventIndex : 8; // index into the Events section + U32 tidIsRaw : 1; // tid is raw OS tid + U32 IA64PC : 1; // TRUE=this is a IA64 PC sample record + U32 pidRecIndexRaw : 1; // pidRecIndex is raw OS pid + U32 mrIndexNone : 1; // no mrIndex (unknown module) + } s4; + } u3; + U64 tsc; // processor timestamp counter +} SampleRecordPC, *PSampleRecordPC; + +#define SAMPLE_RECORD_descriptor_id(x) ((x)->descriptor_id) +#define SAMPLE_RECORD_osid(x) ((x)->osid) +#define SAMPLE_RECORD_iip(x) ((x)->u1.s1.iip) +#define SAMPLE_RECORD_ipsr(x) ((x)->u1.s1.ipsr) +#define SAMPLE_RECORD_eip(x) ((x)->u1.s2.eip) +#define SAMPLE_RECORD_eflags(x) ((x)->u1.s2.eflags) +#define SAMPLE_RECORD_csd(x) ((x)->u1.s2.csd) +#define SAMPLE_RECORD_cs(x) ((x)->cs) +#define SAMPLE_RECORD_cpu_and_os(x) ((x)->u2.cpuAndOS) +#define SAMPLE_RECORD_cpu_num(x) ((x)->u2.s3.cpuNum) +#define SAMPLE_RECORD_uncore_valid(x) ((x)->u2.s3.uncore_valid) +#define SAMPLE_RECORD_not_vmid0(x) ((x)->u2.s3.notVmid0) +#define SAMPLE_RECORD_code_mode(x) ((x)->u2.s3.codeMode) +#define SAMPLE_RECORD_tid(x) ((x)->tid) +#define SAMPLE_RECORD_pid_rec_index(x) ((x)->pidRecIndex) +#define SAMPLE_RECORD_bit_fields2(x) ((x)->u3.bitFields2) +#define SAMPLE_RECORD_mr_index(x) ((x)->u3.s4.mrIndex) +#define SAMPLE_RECORD_event_index(x) ((x)->u3.s4.eventIndex) +#define SAMPLE_RECORD_tid_is_raw(x) ((x)->u3.s4.tidIsRaw) +#define SAMPLE_RECORD_ia64_pc(x) ((x)->u3.s4.IA64PC) +#define SAMPLE_RECORD_pid_rec_index_raw(x) ((x)->u3.s4.pidRecIndexRaw) +#define SAMPLE_RECORD_mr_index_none(x) ((x)->u3.s4.mrIndexNone) +#define SAMPLE_RECORD_tsc(x) ((x)->tsc) + +// end of SampleRecord sections + +/* Uncore Sample Record definition. This is a skinny sample record used by + * uncore boxes to record samples. + * The sample record consists of a descriptor id, cpu info and timestamp. + */ + +typedef struct UncoreSampleRecordPC_s { + U32 descriptor_id; + U32 osid; + U16 cpuNum; + U16 pkgNum; + union { + U32 flags; + struct { + U32 uncore_valid : 1; + // identifies if the uncore count is valid + U32 reserved1 : 31; + } s1; + } u1; + U64 reserved2; + U64 tsc; // processor timestamp counter +} UncoreSampleRecordPC, *PUnocreSampleRecordPC; + +#define UNCORE_SAMPLE_RECORD_descriptor_id(x) ((x)->descriptor_id) +#define UNCORE_SAMPLE_RECORD_osid(x) ((x)->osid) +#define UNCORE_SAMPLE_RECORD_cpu_num(x) ((x)->cpuNum) +#define UNCORE_SAMPLE_RECORD_pkg_num(x) ((x)->pkgNum) +#define UNCORE_SAMPLE_RECORD_uncore_valid(x) ((x)->u1.s1.uncore_valid) +#define UNCORE_SAMPLE_RECORD_tsc(x) ((x)->tsc) + +// end of UncoreSampleRecord section + +// Definitions for user markers data +// The instances of these structures will be written to user markers temp file +#define MARKER_DEFAULT_TYPE "Default_Marker" +#define MARKER_DEFAULT_ID 0 +#define MAX_MARKER_LENGTH 136 + +#define MARK_ID 4 +#define MARK_DATA 2 +#define THREAD_INFO 8 + +/* + * Common Register descriptions + */ + +/* + * Bits used in the debug control register + */ +#define DEBUG_CTL_LBR 0x0000001 +#define DEBUG_CTL_BTF 0x0000002 +#define DEBUG_CTL_TR 0x0000040 +#define DEBUG_CTL_BTS 0x0000080 +#define DEBUG_CTL_BTINT 0x0000100 +#define DEBUG_CTL_BT_OFF_OS 0x0000200 +#define DEBUG_CTL_BTS_OFF_USR 0x0000400 +#define DEBUG_CTL_FRZ_LBR_ON_PMI 0x0000800 +#define DEBUG_CTL_FRZ_PMON_ON_PMI 0x0001000 +#define DEBUG_CTL_ENABLE_UNCORE_PMI_BIT 0x0002000 + +#define DEBUG_CTL_NODE_lbr_get(reg) ((reg)&DEBUG_CTL_LBR) +#define DEBUG_CTL_NODE_lbr_set(reg) ((reg) |= DEBUG_CTL_LBR) +#define DEBUG_CTL_NODE_lbr_clear(reg) ((reg) &= ~DEBUG_CTL_LBR) + +#define DEBUG_CTL_NODE_btf_get(reg) ((reg)&DEBUG_CTL_BTF) +#define DEBUG_CTL_NODE_btf_set(reg) ((reg) |= DEBUG_CTL_BTF) +#define DEBUG_CTL_NODE_btf_clear(reg) ((reg) &= ~DEBUG_CTL_BTF) + +#define DEBUG_CTL_NODE_tr_get(reg) ((reg)&DEBUG_CTL_TR) +#define DEBUG_CTL_NODE_tr_set(reg) ((reg) |= DEBUG_CTL_TR) +#define DEBUG_CTL_NODE_tr_clear(reg) ((reg) &= ~DEBUG_CTL_TR) + +#define DEBUG_CTL_NODE_bts_get(reg) ((reg)&DEBUG_CTL_BTS) +#define DEBUG_CTL_NODE_bts_set(reg) ((reg) |= DEBUG_CTL_BTS) +#define DEBUG_CTL_NODE_bts_clear(reg) ((reg) &= ~DEBUG_CTL_BTS) + +#define DEBUG_CTL_NODE_btint_get(reg) ((reg)&DEBUG_CTL_BTINT) +#define DEBUG_CTL_NODE_btint_set(reg) ((reg) |= DEBUG_CTL_BTINT) +#define DEBUG_CTL_NODE_btint_clear(reg) ((reg) &= ~DEBUG_CTL_BTINT) + +#define DEBUG_CTL_NODE_bts_off_os_get(reg) ((reg)&DEBUG_CTL_BTS_OFF_OS) +#define DEBUG_CTL_NODE_bts_off_os_set(reg) ((reg) |= DEBUG_CTL_BTS_OFF_OS) +#define DEBUG_CTL_NODE_bts_off_os_clear(reg) ((reg) &= ~DEBUG_CTL_BTS_OFF_OS) + +#define DEBUG_CTL_NODE_bts_off_usr_get(reg) ((reg)&DEBUG_CTL_BTS_OFF_USR) +#define DEBUG_CTL_NODE_bts_off_usr_set(reg) ((reg) |= DEBUG_CTL_BTS_OFF_USR) +#define DEBUG_CTL_NODE_bts_off_usr_clear(reg) ((reg) &= ~DEBUG_CTL_BTS_OFF_USR) + +#define DEBUG_CTL_NODE_frz_lbr_on_pmi_get(reg) ((reg)&DEBUG_CTL_FRZ_LBR_ON_PMI) +#define DEBUG_CTL_NODE_frz_lbr_on_pmi_set(reg) \ + ((reg) |= DEBUG_CTL_FRZ_LBR_ON_PMI) +#define DEBUG_CTL_NODE_frz_lbr_on_pmi_clear(reg) \ + ((reg) &= ~DEBUG_CTL_FRZ_LBR_ON_PMI) + +#define DEBUG_CTL_NODE_frz_pmon_on_pmi_get(reg) \ + ((reg)&DEBUG_CTL_FRZ_PMON_ON_PMI) +#define DEBUG_CTL_NODE_frz_pmon_on_pmi_set(reg) \ + ((reg) |= DEBUG_CTL_FRZ_PMON_ON_PMI) +#define DEBUG_CTL_NODE_frz_pmon_on_pmi_clear(reg) \ + ((reg) &= ~DEBUG_CTL_FRZ_PMON_ON_PMI) + +#define DEBUG_CTL_NODE_enable_uncore_pmi_get(reg) \ + ((reg)&DEBUG_CTL_ENABLE_UNCORE_PMI) +#define DEBUG_CTL_NODE_enable_uncore_pmi_set(reg) \ + ((reg) |= DEBUG_CTL_ENABLE_UNCORE_PMI) +#define DEBUG_CTL_NODE_enable_uncore_pmi_clear(reg) \ + ((reg) &= ~DEBUG_CTL_ENABLE_UNCORE_PMI) + +/* + * @macro SEP_VERSION_NODE_S + * @brief + * This structure supports versioning in Sep. The field major indicates major, + * version minor indicates the minor version and api indicates the api version + * for the current sep build. This structure is initialized at the time when + * the driver is loaded. + */ + +typedef struct SEP_VERSION_NODE_S SEP_VERSION_NODE; +typedef SEP_VERSION_NODE * SEP_VERSION; + +struct SEP_VERSION_NODE_S { + union { + U32 sep_version; + struct { + S32 major : 8; + S32 minor : 8; + S32 api : 8; + S32 update : 8; + } s1; + } u1; +}; + +#define SEP_VERSION_NODE_sep_version(version) ((version)->u1.sep_version) +#define SEP_VERSION_NODE_major(version) ((version)->u1.s1.major) +#define SEP_VERSION_NODE_minor(version) ((version)->u1.s1.minor) +#define SEP_VERSION_NODE_api(version) ((version)->u1.s1.api) +#define SEP_VERSION_NODE_update(version) ((version)->u1.s1.update) + +/* + * The VTSA_SYS_INFO_STRUCT information that is shared across kernel mode + * and user mode code, very specifically for tb5 file generation + */ + +typedef enum { + GT_UNK = 0, + GT_PER_CPU, + GT_PER_CHIPSET, + GT_CPUID, + GT_NODE, + GT_SYSTEM, + GT_SAMPLE_RECORD_INFO +} GEN_ENTRY_TYPES; + +typedef enum { + GST_UNK = 0, + GST_X86, + GST_ITANIUM, + GST_SA, //strong arm + GST_XSC, + GST_EM64T, + GST_CS860 +} GEN_ENTRY_SUBTYPES; + +typedef struct __fixed_size_pointer { + union { + U64 fs_force_alignment; + struct { + U32 fs_unused; + U32 is_ptr : 1; + } s1; + } u1; + union { + U64 fs_offset; + void *fs_ptr; + } u2; +} VTSA_FIXED_SIZE_PTR; + +#define VTSA_FIXED_SIZE_PTR_is_ptr(fsp) ((fsp)->u1.s1.is_ptr) +#define VTSA_FIXED_SIZE_PTR_fs_offset(fsp) ((fsp)->u2.fs_offset) +#define VTSA_FIXED_SIZE_PTR_fs_ptr(fsp) ((fsp)->u2.fs_ptr) + +typedef struct __generic_array_header { + // + // Information realted to the generic header + // + U32 hdr_size; // size of this generic header + // (for versioning and real data starts + // after the header) + + U32 next_field_hdr_padding; // make sure next field is 8-byte aligned + + // + // VTSA_FIXED_SIZE_PTR should always be on an 8-byte boundary... + // + // pointer to the next generic header if there is one + // + VTSA_FIXED_SIZE_PTR hdr_next_gen_hdr; + + U32 hdr_reserved[7]; // padding for future use - force to 64 bytes... + + // + // Information related to the array this header is describing + // + U32 array_num_entries; + U32 array_entry_size; + U16 array_type; // from the GEN_ENTRY_TYPES enumeration + U16 array_subtype; // from the GEN_ENTRY_SUBTYPES enumeration +} VTSA_GEN_ARRAY_HDR; + +#define VTSA_GEN_ARRAY_HDR_hdr_size(gah) ((gah)->hdr_size) +#define VTSA_GEN_ARRAY_HDR_hdr_next_gen_hdr(gah) ((gah)->hdr_next_gen_hdr) +#define VTSA_GEN_ARRAY_HDR_array_num_entries(gah) ((gah)->array_num_entries) +#define VTSA_GEN_ARRAY_HDR_array_entry_size(gah) ((gah)->array_entry_size) +#define VTSA_GEN_ARRAY_HDR_array_type(gah) ((gah)->array_type) +#define VTSA_GEN_ARRAY_HDR_array_subtype(gah) ((gah)->array_subtype) + +typedef struct __cpuid_x86 { + U32 cpuid_eax_input; + U32 cpuid_eax; + U32 cpuid_ebx; + U32 cpuid_ecx; + U32 cpuid_edx; +} VTSA_CPUID_X86; + +#define VTSA_CPUID_X86_cpuid_eax_input(cid) ((cid)->cpuid_eax_input) +#define VTSA_CPUID_X86_cpuid_eax(cid) ((cid)->cpuid_eax) +#define VTSA_CPUID_X86_cpuid_ebx(cid) ((cid)->cpuid_ebx) +#define VTSA_CPUID_X86_cpuid_ecx(cid) ((cid)->cpuid_ecx) +#define VTSA_CPUID_X86_cpuid_edx(cid) ((cid)->cpuid_edx) + +typedef struct __cpuid_ipf { + U64 cpuid_select; + U64 cpuid_val; +} VTSA_CPUID_IPF; + +#define VTSA_CPUID_IPF_cpuid_select(cid) ((cid)->cpuid_select) +#define VTSA_CPUID_IPF_cpuid_val(cid) ((cid)->cpuid_val) + +typedef struct __generic_per_cpu { + // + // per cpu information + // + U32 cpu_number; // cpu number (as defined by the OS) + U32 cpu_speed_mhz; // cpu speed (in Mhz) + U32 cpu_fsb_mhz; // Front Side Bus speed (in Mhz) (if known) + U32 cpu_cache_L2; + // ??? USER: cpu L2 (marketing definition) cache size (if known) + + // + // And pointer to other structures. Keep this on an 8-byte boundary + // + // "pointer" to generic array header that should contain + // cpuid information for this cpu + // + VTSA_FIXED_SIZE_PTR cpu_cpuid_array; + + S64 cpu_tsc_offset; + // TSC offset from CPU 0 computed as (TSC CPU N - TSC CPU 0) + // + // intel processor number (from mkting). + // Currently 3 decimal digits (3xx, 5xx and 7xx) + // + U32 cpu_intel_processor_number; + + U32 cpu_cache_L3; + // ??? USER: cpu L3 (marketing definition) cache size (if known) + + U64 platform_id; + + // + // package/mapping information + // + // The hierarchy for uniquely identifying a logical processor + // in a system is node number/id (from the node structure), + // package number, core number, and thread number. + // Core number is for identifying a core within a package. + // + // Actually, on Itanium getting all this information is + // pretty involved with complicated algorithm using PAL calls. + // I don't know how important all this stuff is to the user. + // Maybe we can just have the place holder now and figure out + // how to fill them later. + // + U16 cpu_package_num; // package number for this cpu (if known) + U16 cpu_core_num; // core number (if known) + U16 cpu_hw_thread_num; // hw thread number inside the core (if known) + + U16 cpu_threads_per_core; // total number of h/w threads per core + U16 cpu_module_id; // Processor module number + U16 cpu_num_modules; // Number of processor modules + U32 cpu_core_type; // Core type for hetero + U32 arch_perfmon_ver; + U32 num_gp_counters; + U32 num_fixed_counters; + U32 reserved1; + U64 reserved2; + U64 reserved3; + +} VTSA_GEN_PER_CPU; + +#define VTSA_GEN_PER_CPU_cpu_number(p_cpu) ((p_cpu)->cpu_number) +#define VTSA_GEN_PER_CPU_cpu_speed_mhz(p_cpu) ((p_cpu)->cpu_speed_mhz) +#define VTSA_GEN_PER_CPU_cpu_fsb_mhz(p_cpu) ((p_cpu)->cpu_fsb_mhz) +#define VTSA_GEN_PER_CPU_cpu_cache_L2(p_cpu) ((p_cpu)->cpu_cache_L2) +#define VTSA_GEN_PER_CPU_cpu_cpuid_array(p_cpu) ((p_cpu)->cpu_cpuid_array) +#define VTSA_GEN_PER_CPU_cpu_tsc_offset(p_cpu) ((p_cpu)->cpu_tsc_offset) +#define VTSA_GEN_PER_CPU_cpu_intel_processor_number(p_cpu) \ + ((p_cpu)->cpu_intel_processor_number) +#define VTSA_GEN_PER_CPU_cpu_cache_L3(p_cpu) ((p_cpu)->cpu_cache_L3) +#define VTSA_GEN_PER_CPU_platform_id(p_cpu) ((p_cpu)->platform_id) +#define VTSA_GEN_PER_CPU_cpu_package_num(p_cpu) ((p_cpu)->cpu_package_num) +#define VTSA_GEN_PER_CPU_cpu_core_num(p_cpu) ((p_cpu)->cpu_core_num) +#define VTSA_GEN_PER_CPU_cpu_hw_thread_num(p_cpu) ((p_cpu)->cpu_hw_thread_num) +#define VTSA_GEN_PER_CPU_cpu_threads_per_core(p_cpu) \ + ((p_cpu)->cpu_threads_per_core) +#define VTSA_GEN_PER_CPU_cpu_module_num(p_cpu) ((p_cpu)->cpu_module_id) +#define VTSA_GEN_PER_CPU_cpu_num_modules(p_cpu) ((p_cpu)->cpu_num_modules) +#define VTSA_GEN_PER_CPU_cpu_core_type(p_cpu) ((p_cpu)->cpu_core_type) +#define VTSA_GEN_PER_CPU_arch_perfmon_ver(p_cpu) ((p_cpu)->arch_perfmon_ver) +#define VTSA_GEN_PER_CPU_num_gp_counters(p_cpu) ((p_cpu)->num_gp_counters) +#define VTSA_GEN_PER_CPU_num_fixed_counters(p_cpu) ((p_cpu)->num_fixed_counters) + +typedef struct __node_info { + U32 node_type_from_shell; + U32 node_id; // The node number/id (if known) + + U32 node_num_available; // total number cpus on this node + U32 node_num_used; // USER: number used based on cpu mask at time of run + + U64 node_physical_memory; + // amount of physical memory (bytes) on this node + + // + // pointer to the first generic header that + // contains the per-cpu information + // + // Keep the VTSA_FIXED_SIZE_PTR on an 8-byte boundary... + // + VTSA_FIXED_SIZE_PTR node_percpu_array; + + U32 node_reserved[2]; // leave some space + +} VTSA_NODE_INFO; + +#define VTSA_NODE_INFO_node_type_from_shell(vni) ((vni)->node_type_from_shell) +#define VTSA_NODE_INFO_node_id(vni) ((vni)->node_id) +#define VTSA_NODE_INFO_node_num_available(vni) ((vni)->node_num_available) +#define VTSA_NODE_INFO_node_num_used(vni) ((vni)->node_num_used) +#define VTSA_NODE_INFO_node_physical_memory(vni) ((vni)->node_physical_memory) +#define VTSA_NODE_INFO_node_percpu_array(vni) ((vni)->node_percpu_array) + +typedef struct __sys_info { + // + // Keep this on an 8-byte boundary + // + VTSA_FIXED_SIZE_PTR node_array; // the per-node information + + U64 min_app_address; + // USER: lower allowed user space address (if known) + U64 max_app_address; + // USER: upper allowed user space address (if known) + U32 page_size; // Current page size + U32 allocation_granularity; + // USER: Granularity of allocation requests (if known) + U32 reserved1; // added for future fields + U32 reserved2; // alignment purpose + U64 reserved3[3]; // added for future fields + +} VTSA_SYS_INFO; + +#define VTSA_SYS_INFO_node_array(sys_info) ((sys_info)->node_array) +#define VTSA_SYS_INFO_min_app_address(sys_info) ((sys_info)->min_app_address) +#define VTSA_SYS_INFO_max_app_address(sys_info) ((sys_info)->max_app_address) +#define VTSA_SYS_INFO_page_size(sys_info) ((sys_info)->page_size) +#define VTSA_SYS_INFO_allocation_granularity(sys_info) \ + ((sys_info)->allocation_granularity) + +typedef struct DRV_TOPOLOGY_INFO_NODE_S DRV_TOPOLOGY_INFO_NODE; +typedef DRV_TOPOLOGY_INFO_NODE * DRV_TOPOLOGY_INFO; + +struct DRV_TOPOLOGY_INFO_NODE_S { + U32 cpu_number; // cpu number (as defined by the OS) + U16 cpu_package_num; // package number for this cpu (if known) + U16 cpu_core_num; // core number (if known) + U16 cpu_hw_thread_num; // T0 or T1 if HT enabled + U16 reserved1; + S32 socket_master; + S32 core_master; + S32 thr_master; + U32 cpu_module_num; + U32 cpu_module_master; + U32 cpu_num_modules; + U32 cpu_core_type; + U32 arch_perfmon_ver; + U32 num_gp_counters; + U32 num_fixed_counters; + U32 reserved2; + U64 reserved3; + U64 reserved4; +}; + +#define DRV_TOPOLOGY_INFO_cpu_number(dti) ((dti)->cpu_number) +#define DRV_TOPOLOGY_INFO_cpu_package_num(dti) ((dti)->cpu_package_num) +#define DRV_TOPOLOGY_INFO_cpu_core_num(dti) ((dti)->cpu_core_num) +#define DRV_TOPOLOGY_INFO_socket_master(dti) ((dti)->socket_master) +#define DRV_TOPOLOGY_INFO_core_master(dti) ((dti)->core_master) +#define DRV_TOPOLOGY_INFO_thr_master(dti) ((dti)->thr_master) +#define DRV_TOPOLOGY_INFO_cpu_hw_thread_num(dti) ((dti)->cpu_hw_thread_num) +#define DRV_TOPOLOGY_INFO_cpu_module_num(dti) ((dti)->cpu_module_num) +#define DRV_TOPOLOGY_INFO_cpu_module_master(dti) ((dti)->cpu_module_master) +#define DRV_TOPOLOGY_INFO_cpu_num_modules(dti) ((dti)->cpu_num_modules) +#define DRV_TOPOLOGY_INFO_cpu_core_type(dti) ((dti)->cpu_core_type) +#define DRV_TOPOLOGY_INFO_arch_perfmon_ver(dti) ((dti)->arch_perfmon_ver) +#define DRV_TOPOLOGY_INFO_num_gp_counters(dti) ((dti)->num_gp_counters) +#define DRV_TOPOLOGY_INFO_num_fixed_counters(dti) ((dti)->num_fixed_counters) + +#define VALUE_TO_BE_DISCOVERED 0 + +// dimm information +typedef struct DRV_DIMM_INFO_NODE_S DRV_DIMM_INFO_NODE; +typedef DRV_DIMM_INFO_NODE * DRV_DIMM_INFO; + +struct DRV_DIMM_INFO_NODE_S { + U32 platform_id; + U32 channel_num; + U32 rank_num; + U32 value; + U8 mc_num; + U8 dimm_valid; + U8 valid_value; + U8 rank_value; + U8 density_value; + U8 width_value; + U16 socket_num; + U64 reserved1; + U64 reserved2; +}; + +#define DRV_DIMM_INFO_platform_id(di) ((di)->platform_id) +#define DRV_DIMM_INFO_channel_num(di) ((di)->channel_num) +#define DRV_DIMM_INFO_rank_num(di) ((di)->rank_num) +#define DRV_DIMM_INFO_value(di) ((di)->value) +#define DRV_DIMM_INFO_mc_num(di) ((di)->mc_num) +#define DRV_DIMM_INFO_dimm_valid(di) ((di)->dimm_valid) +#define DRV_DIMM_INFO_valid_value(di) ((di)->valid_value) +#define DRV_DIMM_INFO_rank_value(di) ((di)->rank_value) +#define DRV_DIMM_INFO_density_value(di) ((di)->density_value) +#define DRV_DIMM_INFO_width_value(di) ((di)->width_value) +#define DRV_DIMM_INFO_socket_num(di) ((di)->socket_num) + +//platform information. need to get from driver +#define MAX_PACKAGES 16 +#define MAX_CHANNELS 8 +#define MAX_RANKS 3 + +typedef struct DRV_PLATFORM_INFO_NODE_S DRV_PLATFORM_INFO_NODE; +typedef DRV_PLATFORM_INFO_NODE * DRV_PLATFORM_INFO; + +struct DRV_PLATFORM_INFO_NODE_S { + U64 info; // platform info + U64 ddr_freq_index; // freq table index + U8 misc_valid; // misc enabled valid bit + U8 reserved1; // added for alignment purpose + U16 reserved2; + U32 vmm_timer_freq; // timer frequency from VMM on SoFIA (in HZ) + U64 misc_info; // misc enabled info + U64 ufs_freq; // ufs frequency (HSX only) + DRV_DIMM_INFO_NODE dimm_info[MAX_PACKAGES * MAX_CHANNELS * MAX_RANKS]; + U64 energy_multiplier; // Value of energy multiplier + U64 reserved3; + U64 reserved4; + U64 reserved5; + U64 reserved6; +}; + +#define DRV_PLATFORM_INFO_info(data) ((data)->info) +#define DRV_PLATFORM_INFO_ddr_freq_index(data) ((data)->ddr_freq_index) +#define DRV_PLATFORM_INFO_misc_valid(data) ((data)->misc_valid) +#define DRV_PLATFORM_INFO_misc_info(data) ((data)->misc_info) +#define DRV_PLATFORM_INFO_ufs_freq(data) ((data)->ufs_freq) +#define DRV_PLATFORM_INFO_dimm_info(data) ((data)->dimm_info) +#define DRV_PLATFORM_INFO_energy_multiplier(data) ((data)->energy_multiplier) +#define DRV_PLATFORM_INFO_vmm_timer_freq(data) ((data)->vmm_timer_freq) + +//platform information. need to get from Platform picker +typedef struct PLATFORM_FREQ_INFO_NODE_S PLATFORM_FREQ_INFO_NODE; +typedef PLATFORM_FREQ_INFO_NODE * PLATFORM_FREQ_INFO; + +struct PLATFORM_FREQ_INFO_NODE_S { + float multiplier; // freq multiplier + double *table; // freq table + U32 table_size; // freq table size + U64 reserved1; + U64 reserved2; + U64 reserved3; + U64 reserved4; +}; +#define PLATFORM_FREQ_INFO_multiplier(data) ((data)->multiplier) +#define PLATFORM_FREQ_INFO_table(data) ((data)->table) +#define PLATFORM_FREQ_INFO_table_size(data) ((data)->table_size) + +typedef struct DEVICE_INFO_NODE_S DEVICE_INFO_NODE; +typedef DEVICE_INFO_NODE * DEVICE_INFO; //NEEDED in PP + +struct DEVICE_INFO_NODE_S { + S8 *dll_name; + PVOID dll_handle; + S8 *cpu_name; + S8 *pmu_name; + DRV_STCHAR *event_db_file_name; + //PLATFORM_IDENTITY plat_identity; + // is undefined right now. Please take this as structure containing U64 + U32 plat_type; + // device type (e.g., DEVICE_INFO_CORE, etc. ... see enum below) + U32 plat_sub_type; + // cti_type (e.g., CTI_Sandybridge, etc., ... see env_info_types.h) + S32 dispatch_id; + // this will be set in user mode dlls and will be unique across all + // IPF, IA32 (including MIDS). + ECB *ecb; + EVENT_CONFIG ec; + DEV_CONFIG pcfg; + DEV_UNC_CONFIG pcfg_unc; + U32 num_of_groups; + U32 size_of_alloc; // size of each event control block + PVOID drv_event; + U32 num_events; + U32 event_id_index; + // event id index of device + // (basically how many events processed before this device) + U32 num_counters; + U32 group_index; + U32 num_packages; + U32 num_units; + U32 device_type; + U32 core_type; + U32 pmu_clone_id; // cti_type of platform to impersonate in device DLLs + U32 device_scope; + U32 reserved1; + U64 reserved2; + U64 reserved3; +}; + +#define MAX_EVENT_NAME_LENGTH 256 + +#define DEVICE_INFO_dll_name(pdev) ((pdev)->dll_name) +#define DEVICE_INFO_dll_handle(pdev) ((pdev)->dll_handle) +#define DEVICE_INFO_cpu_name(pdev) ((pdev)->cpu_name) +#define DEVICE_INFO_pmu_name(pdev) ((pdev)->pmu_name) +#define DEVICE_INFO_event_db_file_name(pdev) ((pdev)->event_db_file_name) +#define DEVICE_INFO_plat_type(pdev) ((pdev)->plat_type) +#define DEVICE_INFO_plat_sub_type(pdev) ((pdev)->plat_sub_type) +#define DEVICE_INFO_pmu_clone_id(pdev) ((pdev)->pmu_clone_id) +#define DEVICE_INFO_dispatch_id(pdev) ((pdev)->dispatch_id) +#define DEVICE_INFO_ecb(pdev) ((pdev)->ecb) +#define DEVICE_INFO_ec(pdev) ((pdev)->ec) +#define DEVICE_INFO_pcfg(pdev) ((pdev)->pcfg) +#define DEVICE_INFO_pcfg_unc(pdev) ((pdev)->pcfg_unc) +#define DEVICE_INFO_num_groups(pdev) ((pdev)->num_of_groups) +#define DEVICE_INFO_size_of_alloc(pdev) ((pdev)->size_of_alloc) +#define DEVICE_INFO_drv_event(pdev) ((pdev)->drv_event) +#define DEVICE_INFO_num_events(pdev) ((pdev)->num_events) +#define DEVICE_INFO_event_id_index(pdev) ((pdev)->event_id_index) +#define DEVICE_INFO_num_counters(pdev) ((pdev)->num_counters) +#define DEVICE_INFO_group_index(pdev) ((pdev)->group_index) +#define DEVICE_INFO_num_packages(pdev) ((pdev)->num_packages) +#define DEVICE_INFO_num_units(pdev) ((pdev)->num_units) +#define DEVICE_INFO_device_type(pdev) ((pdev)->device_type) +#define DEVICE_INFO_core_type(pdev) ((pdev)->core_type) +#define DEVICE_INFO_device_scope(pdev) ((pdev)->device_scope) + +typedef struct DEVICE_INFO_DATA_NODE_S DEVICE_INFO_DATA_NODE; +typedef DEVICE_INFO_DATA_NODE * DEVICE_INFO_DATA; //NEEDED in PP + +struct DEVICE_INFO_DATA_NODE_S { + DEVICE_INFO pdev_info; + U32 num_elements; + U32 num_allocated; + U64 reserved1; + U64 reserved2; + U64 reserved3; + U64 reserved4; +}; + +#define DEVICE_INFO_DATA_pdev_info(d) ((d)->pdev_info) +#define DEVICE_INFO_DATA_num_elements(d) ((d)->num_elements) +#define DEVICE_INFO_DATA_num_allocated(d) ((d)->num_allocated) + +typedef enum { + DEVICE_INFO_CORE = 0, + DEVICE_INFO_UNCORE = 1, + DEVICE_INFO_CHIPSET = 2, + DEVICE_INFO_GFX = 3, + DEVICE_INFO_PWR = 4, + DEVICE_INFO_TELEMETRY = 5 +} DEVICE_INFO_TYPE; + +typedef enum { + INVALID_TERMINATE_TYPE = 0, + STOP_TERMINATE, + CANCEL_TERMINATE +} ABNORMAL_TERMINATE_TYPE; + +typedef enum { + DEVICE_SCOPE_PACKAGE = 0, + DEVICE_SCOPE_SYSTEM = 1 +} DEVICE_SCOPE_TYPE; + +typedef struct PCIFUNC_INFO_NODE_S PCIFUNC_INFO_NODE; +typedef PCIFUNC_INFO_NODE * PCIFUNC_INFO; + +struct PCIFUNC_INFO_NODE_S { + U32 valid; + U32 num_entries; + // the number of entries found with same + // but difference bus_no. + U64 deviceId; + U64 reserved1; + U64 reserved2; +}; + +#define PCIFUNC_INFO_NODE_funcno(x) ((x)->funcno) +#define PCIFUNC_INFO_NODE_valid(x) ((x)->valid) +#define PCIFUNC_INFO_NODE_deviceId(x) ((x)->deviceId) +#define PCIFUNC_INFO_NODE_num_entries(x) ((x)->num_entries) + +typedef struct PCIDEV_INFO_NODE_S PCIDEV_INFO_NODE; +typedef PCIDEV_INFO_NODE * PCIDEV_INFO; + +struct PCIDEV_INFO_NODE_S { + PCIFUNC_INFO_NODE func_info[MAX_PCI_FUNCNO]; + U32 valid; + U32 dispatch_id; + U64 reserved1; + U64 reserved2; +}; + +#define PCIDEV_INFO_NODE_func_info(x, i) ((x).func_info[i]) +#define PCIDEV_INFO_NODE_valid(x) ((x).valid) + +typedef struct UNCORE_PCIDEV_NODE_S UNCORE_PCIDEV_NODE; + +struct UNCORE_PCIDEV_NODE_S { + PCIDEV_INFO_NODE pcidev[MAX_PCI_DEVNO]; + U32 dispatch_id; + U32 scan; + U32 num_uncore_units; + U32 num_deviceid_entries; + U8 dimm_device1; + U8 dimm_device2; + U16 reserved1; + U32 reserved2; + U64 reserved3; + U64 reserved4; + U32 deviceid_list[MAX_PCI_DEVNO]; +}; + +// Structure used to perform uncore device discovery + +typedef struct UNCORE_TOPOLOGY_INFO_NODE_S UNCORE_TOPOLOGY_INFO_NODE; +typedef UNCORE_TOPOLOGY_INFO_NODE * UNCORE_TOPOLOGY_INFO; + +struct UNCORE_TOPOLOGY_INFO_NODE_S { + UNCORE_PCIDEV_NODE device[MAX_DEVICES]; +}; + +#define UNCORE_TOPOLOGY_INFO_device(x, dev_index) ((x)->device[dev_index]) +#define UNCORE_TOPOLOGY_INFO_device_dispatch_id(x, dev_index) \ + ((x)->device[dev_index].dispatch_id) +#define UNCORE_TOPOLOGY_INFO_device_scan(x, dev_index) \ + ((x)->device[dev_index].scan) +#define UNCORE_TOPOLOGY_INFO_pcidev_valid(x, dev_index, devno) \ + ((x)->device[dev_index].pcidev[devno].valid) +#define UNCORE_TOPOLOGY_INFO_pcidev_dispatch_id(x, dev_index, devno) \ + ((x)->device[dev_index].pcidev[devno].dispatch_id) +#define UNCORE_TOPOLOGY_INFO_pcidev(x, dev_index, devno) \ + ((x)->device[dev_index].pcidev[devno]) +#define UNCORE_TOPOLOGY_INFO_num_uncore_units(x, dev_index) \ + ((x)->device[dev_index].num_uncore_units) +#define UNCORE_TOPOLOGY_INFO_num_deviceid_entries(x, dev_index) \ + ((x)->device[dev_index].num_deviceid_entries) +#define UNCORE_TOPOLOGY_INFO_dimm_device1(x, dev_index) \ + ((x)->device[dev_index].dimm_device1) +#define UNCORE_TOPOLOGY_INFO_dimm_device2(x, dev_index) \ + ((x)->device[dev_index].dimm_device2) +#define UNCORE_TOPOLOGY_INFO_deviceid(x, dev_index, deviceid_idx) \ + ((x)->device[dev_index].deviceid_list[deviceid_idx]) +#define UNCORE_TOPOLOGY_INFO_pcidev_set_funcno_valid(x, dev_index, devno, \ + funcno) \ + ((x)->device[dev_index].pcidev[devno].func_info[funcno].valid = 1) +#define UNCORE_TOPOLOGY_INFO_pcidev_is_found_in_platform(x, dev_index, devno, \ + funcno) \ + ((x)->device[dev_index].pcidev[devno].func_info[funcno].num_entries) +#define UNCORE_TOPOLOGY_INFO_pcidev_is_devno_funcno_valid(x, dev_index, devno, \ + funcno) \ + ((x)->device[dev_index].pcidev[devno].func_info[funcno].valid ? TRUE : \ + FALSE) +#define UNCORE_TOPOLOGY_INFO_pcidev_is_device_found(x, dev_index, devno, \ + funcno) \ + ((x)->device[dev_index].pcidev[devno].func_info[funcno].num_entries > 0) + +#define UNCORE_TOPOLOGY_INFO_pcidev_num_entries_found(x, dev_index, devno, \ + funcno) \ + ((x)->device[dev_index].pcidev[devno].func_info[funcno].num_entries) + +typedef enum { + CORE_TOPOLOGY_NODE = 0, + UNCORE_TOPOLOGY_NODE_IMC = 1, + UNCORE_TOPOLOGY_NODE_UBOX = 2, + UNCORE_TOPOLOGY_NODE_QPI = 3, + MAX_TOPOLOGY_DEV = 4, + // When you adding new topo node to this enum, + // make sue MAX_TOPOLOGY_DEV is always the last one. +} UNCORE_TOPOLOGY_NODE_INDEX_TYPE; + +typedef struct PLATFORM_TOPOLOGY_REG_NODE_S PLATFORM_TOPOLOGY_REG_NODE; +typedef PLATFORM_TOPOLOGY_REG_NODE * PLATFORM_TOPOLOGY_REG; + +struct PLATFORM_TOPOLOGY_REG_NODE_S { + U32 bus; + U32 device; + U32 function; + U32 reg_id; + U64 reg_mask; + U64 reg_value[MAX_PACKAGES]; + U8 reg_type; + U8 device_valid; + U16 reserved1; + U32 reserved2; + U64 reserved3; + U64 reserved4; +}; + +#define PLATFORM_TOPOLOGY_REG_bus(x, i) ((x)[(i)].bus) +#define PLATFORM_TOPOLOGY_REG_device(x, i) ((x)[(i)].device) +#define PLATFORM_TOPOLOGY_REG_function(x, i) ((x)[(i)].function) +#define PLATFORM_TOPOLOGY_REG_reg_id(x, i) ((x)[(i)].reg_id) +#define PLATFORM_TOPOLOGY_REG_reg_mask(x, i) ((x)[(i)].reg_mask) +#define PLATFORM_TOPOLOGY_REG_reg_type(x, i) ((x)[(i)].reg_type) +#define PLATFORM_TOPOLOGY_REG_device_valid(x, i) ((x)[(i)].device_valid) +#define PLATFORM_TOPOLOGY_REG_reg_value(x, i, package_no) \ + ((x)[(i)].reg_value[package_no]) + +typedef struct PLATFORM_TOPOLOGY_DISCOVERY_NODE_S + PLATFORM_TOPOLOGY_DISCOVERY_NODE; +typedef PLATFORM_TOPOLOGY_DISCOVERY_NODE * PLATFORM_TOPOLOGY_DISCOVERY; + +struct PLATFORM_TOPOLOGY_DISCOVERY_NODE_S { + U32 device_index; + U32 device_id; + U32 num_registers; + U8 scope; + U8 prog_valid; + U16 reserved2; + U64 reserved3; + U64 reserved4; + U64 reserved5; + PLATFORM_TOPOLOGY_REG_NODE topology_regs[MAX_REGS]; +}; + +//Structure used to discover the uncore device topology_device + +typedef struct PLATFORM_TOPOLOGY_PROG_NODE_S PLATFORM_TOPOLOGY_PROG_NODE; +typedef PLATFORM_TOPOLOGY_PROG_NODE * PLATFORM_TOPOLOGY_PROG; + +struct PLATFORM_TOPOLOGY_PROG_NODE_S { + U32 num_devices; + PLATFORM_TOPOLOGY_DISCOVERY_NODE topology_device[MAX_TOPOLOGY_DEV]; +}; + +#define PLATFORM_TOPOLOGY_PROG_num_devices(x) ((x)->num_devices) +#define PLATFORM_TOPOLOGY_PROG_topology_device(x, dev_index) \ + ((x)->topology_device[dev_index]) +#define PLATFORM_TOPOLOGY_PROG_topology_device_device_index(x, dev_index) \ + ((x)->topology_device[dev_index].device_index) +#define PLATFORM_TOPOLOGY_PROG_topology_device_device_id(x, dev_index) \ + ((x)->topology_device[dev_index].device_id) +#define PLATFORM_TOPOLOGY_PROG_topology_device_scope(x, dev_index) \ + ((x)->topology_device[dev_index].scope) +#define PLATFORM_TOPOLOGY_PROG_topology_device_num_registers(x, dev_index) \ + ((x)->topology_device[dev_index].num_registers) +#define PLATFORM_TOPOLOGY_PROG_topology_device_prog_valid(x, dev_index) \ + ((x)->topology_device[dev_index].prog_valid) +#define PLATFORM_TOPOLOGY_PROG_topology_topology_regs(x, dev_index) \ + ((x)->topology_device[dev_index].topology_regs) + +typedef struct FPGA_GB_DISCOVERY_NODE_S FPGA_GB_DISCOVERY_NODE; + +struct FPGA_GB_DISCOVERY_NODE_S { + U16 bar_num; + U16 feature_id; + U32 device_id; + U64 afu_id_l; + U64 afu_id_h; + U32 feature_offset; + U32 feature_len; + U8 scan; + U8 valid; + U16 reserved1; + U32 reserved2; +}; + +typedef struct FPGA_GB_DEV_NODE_S FPGA_GB_DEV_NODE; +typedef FPGA_GB_DEV_NODE * FPGA_GB_DEV; + +struct FPGA_GB_DEV_NODE_S { + U32 num_devices; + FPGA_GB_DISCOVERY_NODE fpga_gb_device[MAX_DEVICES]; +}; + +#define FPGA_GB_DEV_num_devices(x) ((x)->num_devices) +#define FPGA_GB_DEV_device(x, dev_index) ((x)->fpga_gb_device[dev_index]) +#define FPGA_GB_DEV_bar_num(x, dev_index) \ + ((x)->fpga_gb_device[dev_index].bar_num) +#define FPGA_GB_DEV_feature_id(x, dev_index) \ + ((x)->fpga_gb_device[dev_index].feature_id) +#define FPGA_GB_DEV_device_id(x, dev_index) \ + ((x)->fpga_gb_device[dev_index].device_id) +#define FPGA_GB_DEV_afu_id_low(x, dev_index) \ + ((x)->fpga_gb_device[dev_index].afu_id_l) +#define FPGA_GB_DEV_afu_id_high(x, dev_index) \ + ((x)->fpga_gb_device[dev_index].afu_id_h) +#define FPGA_GB_DEV_feature_offset(x, dev_index) \ + ((x)->fpga_gb_device[dev_index].feature_offset) +#define FPGA_GB_DEV_feature_len(x, dev_index) \ + ((x)->fpga_gb_device[dev_index].feature_len) +#define FPGA_GB_DEV_scan(x, dev_index) ((x)->fpga_gb_device[dev_index].scan) +#define FPGA_GB_DEV_valid(x, dev_index) ((x)->fpga_gb_device[dev_index].valid) + +typedef enum { + UNCORE_TOPOLOGY_INFO_NODE_IMC = 0, + UNCORE_TOPOLOGY_INFO_NODE_QPILL = 1, + UNCORE_TOPOLOGY_INFO_NODE_HA = 2, + UNCORE_TOPOLOGY_INFO_NODE_R3 = 3, + UNCORE_TOPOLOGY_INFO_NODE_R2 = 4, + UNCORE_TOPOLOGY_INFO_NODE_IRP = 5, + UNCORE_TOPOLOGY_INFO_NODE_IMC_UCLK = 6, + UNCORE_TOPOLOGY_INFO_NODE_EDC_ECLK = 7, + UNCORE_TOPOLOGY_INFO_NODE_EDC_UCLK = 8, + UNCORE_TOPOLOGY_INFO_NODE_M2M = 9, + UNCORE_TOPOLOGY_INFO_NODE_HFI_RXE = 10, + UNCORE_TOPOLOGY_INFO_NODE_HFI_TXE = 11, + UNCORE_TOPOLOGY_INFO_NODE_FPGA_CACHE = 12, + UNCORE_TOPOLOGY_INFO_NODE_FPGA_FAB = 13, + UNCORE_TOPOLOGY_INFO_NODE_FPGA_THERMAL = 14, + UNCORE_TOPOLOGY_INFO_NODE_FPGA_POWER = 15, +} UNCORE_TOPOLOGY_INFO_NODE_INDEX_TYPE; + +typedef struct SIDEBAND_INFO_NODE_S SIDEBAND_INFO_NODE; +typedef SIDEBAND_INFO_NODE * SIDEBAND_INFO; + +struct SIDEBAND_INFO_NODE_S { + U32 tid; + U32 pid; + U64 tsc; +}; + +#define SIDEBAND_INFO_pid(x) ((x)->pid) +#define SIDEBAND_INFO_tid(x) ((x)->tid) +#define SIDEBAND_INFO_tsc(x) ((x)->tsc) + +typedef struct SAMPLE_DROP_NODE_S SAMPLE_DROP_NODE; +typedef SAMPLE_DROP_NODE * SAMPLE_DROP; + +struct SAMPLE_DROP_NODE_S { + U32 os_id; + U32 cpu_id; + U32 sampled; + U32 dropped; +}; + +#define SAMPLE_DROP_os_id(x) ((x)->os_id) +#define SAMPLE_DROP_cpu_id(x) ((x)->cpu_id) +#define SAMPLE_DROP_sampled(x) ((x)->sampled) +#define SAMPLE_DROP_dropped(x) ((x)->dropped) + +#define MAX_SAMPLE_DROP_NODES 20 + +typedef struct SAMPLE_DROP_INFO_NODE_S SAMPLE_DROP_INFO_NODE; +typedef SAMPLE_DROP_INFO_NODE * SAMPLE_DROP_INFO; + +struct SAMPLE_DROP_INFO_NODE_S { + U32 size; + SAMPLE_DROP_NODE drop_info[MAX_SAMPLE_DROP_NODES]; +}; + +#define SAMPLE_DROP_INFO_size(x) ((x)->size) +#define SAMPLE_DROP_INFO_drop_info(x, index) ((x)->drop_info[index]) + +#define IS_PEBS_SAMPLE_RECORD(sample_record) \ + ((SAMPLE_RECORD_pid_rec_index(sample_record) == (U32)-1) && \ + (SAMPLE_RECORD_tid(sample_record) == (U32)-1)) + +/* + * VMM vendor information + */ +#define KVM_SIGNATURE "KVMKVMKVM\0\0\0" +#define XEN_SIGNATURE "XenVMMXenVMM" +#define VMWARE_SIGNATURE "VMwareVMware" +#define HYPERV_SIGNATURE "Microsoft Hv" + +#define DRV_VMM_UNKNOWN 0 +#define DRV_VMM_MOBILEVISOR 1 +#define DRV_VMM_KVM 2 +#define DRV_VMM_XEN 3 +#define DRV_VMM_HYPERV 4 +#define DRV_VMM_VMWARE 5 +#define DRV_VMM_ACRN 6 + +/* + * @macro DRV_SETUP_INFO_NODE_S + * @brief + * This structure supports driver information such as NMI profiling mode. + */ + +typedef struct DRV_SETUP_INFO_NODE_S DRV_SETUP_INFO_NODE; +typedef DRV_SETUP_INFO_NODE * DRV_SETUP_INFO; + +struct DRV_SETUP_INFO_NODE_S { + union { + U64 modes; + struct { + U64 nmi_mode : 1; + U64 vmm_mode : 1; + U64 vmm_vendor : 8; + U64 vmm_guest_vm : 1; + U64 pebs_accessible : 1; + U64 cpu_hotplug_mode : 1; + U64 matrix_inaccessible : 1; + U64 page_table_isolation : 2; + U64 pebs_ignored_by_pti : 1; + U64 reserved1 : 47; + } s1; + } u1; + U64 reserved2; + U64 reserved3; + U64 reserved4; +}; + +#define DRV_SETUP_INFO_nmi_mode(info) ((info)->u1.s1.nmi_mode) +#define DRV_SETUP_INFO_vmm_mode(info) ((info)->u1.s1.vmm_mode) +#define DRV_SETUP_INFO_vmm_vendor(info) ((info)->u1.s1.vmm_vendor) +#define DRV_SETUP_INFO_vmm_guest_vm(info) ((info)->u1.s1.vmm_guest_vm) +#define DRV_SETUP_INFO_pebs_accessible(info) ((info)->u1.s1.pebs_accessible) +#define DRV_SETUP_INFO_cpu_hotplug_mode(info) ((info)->u1.s1.cpu_hotplug_mode) +#define DRV_SETUP_INFO_matrix_inaccessible(info) \ + ((info)->u1.s1.matrix_inaccessible) +#define DRV_SETUP_INFO_page_table_isolation(info) \ + ((info)->u1.s1.page_table_isolation) +#define DRV_SETUP_INFO_pebs_ignored_by_pti(info) \ + ((info)->u1.s1.pebs_ignored_by_pti) + +#define DRV_SETUP_INFO_PTI_DISABLED 0 +#define DRV_SETUP_INFO_PTI_KPTI 1 +#define DRV_SETUP_INFO_PTI_KAISER 2 +#define DRV_SETUP_INFO_PTI_VA_SHADOW 3 +#define DRV_SETUP_INFO_PTI_UNKNOWN 4 + +/* + Type: task_info_t + Description: + Represents the equivalent of a Linux Thread. + Fields: + o id: A unique identifier. May be `NULL_TASK_ID`. + o name: Human-readable name for this task + o executable_name: Literal path to the binary elf that this task's + entry point is executing from. + o address_space_id: The unique ID for the address space this task is + running in. + */ +struct task_info_node_s { + U64 id; + char name[32]; + U64 address_space_id; +}; + +/* + Type: REMOTE_SWITCH + Description: + Collection switch set on target +*/ +typedef struct REMOTE_SWITCH_NODE_S REMOTE_SWITCH_NODE; +typedef REMOTE_SWITCH_NODE * REMOTE_SWITCH; + +struct REMOTE_SWITCH_NODE_S { + U32 auto_mode : 1; + U32 adv_hotspot : 1; + U32 lbr_callstack : 2; + U32 full_pebs : 1; + U32 uncore_supported : 1; + U32 agent_mode : 2; + U32 sched_switch_enabled : 1; + U32 data_transfer_mode : 1; + U32 reserved1 : 22; + U32 reserved2; +}; + +#define REMOTE_SWITCH_auto_mode(x) ((x).auto_mode) +#define REMOTE_SWITCH_adv_hotspot(x) ((x).adv_hotspot) +#define REMOTE_SWITCH_lbr_callstack(x) ((x).lbr_callstack) +#define REMOTE_SWITCH_full_pebs(x) ((x).full_pebs) +#define REMOTE_SWITCH_uncore_supported(x) ((x).uncore_supported) +#define REMOTE_SWITCH_agent_mode(x) ((x).agent_mode) +#define REMOTE_SWITCH_sched_switch_enabled(x) ((x).sched_switch_enabled) +#define REMOTE_SWITCH_data_transfer_mode(x) ((x).data_transfer_mode) + +/* + Type: REMOTE_OS_INFO + Description: + Remote target OS system information +*/ +#define OSINFOLEN 64 +typedef struct REMOTE_OS_INFO_NODE_S REMOTE_OS_INFO_NODE; +typedef REMOTE_OS_INFO_NODE * REMOTE_OS_INFO; + +struct REMOTE_OS_INFO_NODE_S { + U32 os_family; + U32 reserved1; + S8 sysname[OSINFOLEN]; + S8 release[OSINFOLEN]; + S8 version[OSINFOLEN]; +}; + +#define REMOTE_OS_INFO_os_family(x) ((x).os_family) +#define REMOTE_OS_INFO_sysname(x) ((x).sysname) +#define REMOTE_OS_INFO_release(x) ((x).release) +#define REMOTE_OS_INFO_version(x) ((x).version) + +/* + Type: REMOTE_HARDWARE_INFO + Description: + Remote target hardware information +*/ +typedef struct REMOTE_HARDWARE_INFO_NODE_S REMOTE_HARDWARE_INFO_NODE; +typedef REMOTE_HARDWARE_INFO_NODE * REMOTE_HARDWARE_INFO; + +struct REMOTE_HARDWARE_INFO_NODE_S { + U32 num_cpus; + U32 family; + U32 model; + U32 stepping; + U64 tsc_freq; + U64 reserved2; + U64 reserved3; +}; + +#define REMOTE_HARDWARE_INFO_num_cpus(x) ((x).num_cpus) +#define REMOTE_HARDWARE_INFO_family(x) ((x).family) +#define REMOTE_HARDWARE_INFO_model(x) ((x).model) +#define REMOTE_HARDWARE_INFO_stepping(x) ((x).stepping) +#define REMOTE_HARDWARE_INFO_tsc_frequency(x) ((x).tsc_freq) + +/* + Type: SEP_AGENT_MODE + Description: + SEP mode on target agent +*/ +typedef enum { + NATIVE_AGENT = 0, + HOST_VM_AGENT, // Service OS in ACRN + GUEST_VM_AGENT // User OS in ACRN +} SEP_AGENT_MODE; + +/* + Type: DATA_TRANSFER_MODE + Description: + Data transfer mode from target agent to remote host +*/ +typedef enum { + IMMEDIATE_TRANSFER = 0, + DELAYED_TRANSFER // Send after collection is done +} DATA_TRANSFER_MODE; + +#define MAX_NUM_OS_ALLOWED 6 +#define TARGET_IP_NAMELEN 64 + +typedef struct TARGET_INFO_NODE_S TARGET_INFO_NODE; +typedef TARGET_INFO_NODE * TARGET_INFO; + +struct TARGET_INFO_NODE_S { + U32 num_of_agents; + U32 reserved; + U32 os_id[MAX_NUM_OS_ALLOWED]; + S8 ip_address[MAX_NUM_OS_ALLOWED][TARGET_IP_NAMELEN]; + REMOTE_OS_INFO_NODE os_info[MAX_NUM_OS_ALLOWED]; + REMOTE_HARDWARE_INFO_NODE hardware_info[MAX_NUM_OS_ALLOWED]; + REMOTE_SWITCH_NODE remote_switch[MAX_NUM_OS_ALLOWED]; +}; + +#define TARGET_INFO_num_of_agents(x) ((x)->num_of_agents) +#define TARGET_INFO_os_id(x, i) ((x)->os_id[i]) +#define TARGET_INFO_os_info(x, i) ((x)->os_info[i]) +#define TARGET_INFO_ip_address(x, i) ((x)->ip_address[i]) +#define TARGET_INFO_hardware_info(x, i) ((x)->hardware_info[i]) +#define TARGET_INFO_remote_switch(x, i) ((x)->remote_switch[i]) + +typedef struct CPU_MAP_TRACE_NODE_S CPU_MAP_TRACE_NODE; +typedef CPU_MAP_TRACE_NODE * CPU_MAP_TRACE; + +struct CPU_MAP_TRACE_NODE_S { + U64 tsc; + U32 os_id; + U32 vcpu_id; + U32 pcpu_id; + U8 is_static : 1; + U8 initial : 1; + U8 reserved1 : 6; + U8 reserved2; + U16 reserved3; + U64 tsc_offset; +}; + +#define CPU_MAP_TRACE_tsc(x) ((x)->tsc) +#define CPU_MAP_TRACE_os_id(x) ((x)->os_id) +#define CPU_MAP_TRACE_vcpu_id(x) ((x)->vcpu_id) +#define CPU_MAP_TRACE_pcpu_id(x) ((x)->pcpu_id) +#define CPU_MAP_TRACE_is_static(x) ((x)->is_static) +#define CPU_MAP_TRACE_initial(x) ((x)->initial) + +#define MAX_NUM_VCPU 64 +#define MAX_NUM_VM 16 + +typedef struct CPU_MAP_TRACE_LIST_NODE_S CPU_MAP_TRACE_LIST_NODE; +typedef CPU_MAP_TRACE_LIST_NODE * CPU_MAP_TRACE_LIST; + +struct CPU_MAP_TRACE_LIST_NODE_S { + U32 osid; + U8 num_entries; + U8 reserved1; + U16 reserved2; + CPU_MAP_TRACE_NODE entries[MAX_NUM_VCPU]; +}; + +typedef struct VM_OSID_MAP_NODE_S VM_OSID_MAP_NODE; +typedef VM_OSID_MAP_NODE * VM_OSID_MAP; +struct VM_OSID_MAP_NODE_S { + U32 num_vms; + U32 reserved1; + U32 osid[MAX_NUM_VM]; +}; + +typedef struct VM_SWITCH_TRACE_NODE_S VM_SWITCH_TRACE_NODE; +typedef VM_SWITCH_TRACE_NODE * VM_SWITCH_TRACE; + +struct VM_SWITCH_TRACE_NODE_S { + U64 tsc; + U32 from_os_id; + U32 to_os_id; + U64 reason; + U64 reserved1; + U64 reserved2; +}; + +#define VM_SWITCH_TRACE_tsc(x) ((x)->tsc) +#define VM_SWITCH_TRACE_from_os_id(x) ((x)->from_os_id) +#define VM_SWITCH_TRACE_to_os_id(x) ((x)->to_os_id) +#define VM_SWITCH_TRACE_reason(x) ((x)->reason) + +typedef struct EMON_BUFFER_DRIVER_HELPER_NODE_S EMON_BUFFER_DRIVER_HELPER_NODE; +typedef EMON_BUFFER_DRIVER_HELPER_NODE * EMON_BUFFER_DRIVER_HELPER; + +struct EMON_BUFFER_DRIVER_HELPER_NODE_S { + U32 num_entries_per_package; + U32 num_cpu; + U32 power_num_package_events; + U32 power_num_module_events; + U32 power_num_thread_events; + U32 power_device_offset_in_package; + U32 core_num_events; + U32 core_index_to_thread_offset_map[]; +}; + +#define EMON_BUFFER_DRIVER_HELPER_num_entries_per_package(x) \ + ((x)->num_entries_per_package) +#define EMON_BUFFER_DRIVER_HELPER_num_cpu(x) ((x)->num_cpu) +#define EMON_BUFFER_DRIVER_HELPER_power_num_package_events(x) \ + ((x)->power_num_package_events) +#define EMON_BUFFER_DRIVER_HELPER_power_num_module_events(x) \ + ((x)->power_num_module_events) +#define EMON_BUFFER_DRIVER_HELPER_power_num_thread_events(x) \ + ((x)->power_num_thread_events) +#define EMON_BUFFER_DRIVER_HELPER_power_device_offset_in_package(x) \ + ((x)->power_device_offset_in_package) +#define EMON_BUFFER_DRIVER_HELPER_core_num_events(x) ((x)->core_num_events) +#define EMON_BUFFER_DRIVER_HELPER_core_index_to_thread_offset_map(x) \ + ((x)->core_index_to_thread_offset_map) + +// EMON counts buffer follow this hardware topology: +// package -> device -> unit/thread -> event + +// Calculate the CORE thread offset +// Using for initialization: calculate the cpu_index_to_thread_offset_map +// in emon_Create_Emon_Buffer_Descriptor() +// EMON_BUFFER_CORE_THREAD_OFFSET = +// package_id * num_entries_per_package + //package offset +// device_offset_in_package + //device base offset +// (core_id * threads_per_core + thread_id) * num_core_events + //thread offset +#define EMON_BUFFER_CORE_THREAD_OFFSET(package_id, num_entries_per_package, \ + device_offset_in_package, core_id, \ + threads_per_core, thread_id, \ + num_core_events) \ + (package_id * num_entries_per_package + device_offset_in_package + \ + (core_id * threads_per_core + thread_id) * num_core_events) + +// Take cpu_index and cpu_index_to_thread_offset_map to get thread_offset, +// and calculate the CORE event offset +// Using for kernel and emon_output.c printing function +// EMON_BUFFER_CORE_EVENT_OFFSET = +// cpu_index_to_thread_offset + //thread offset +// core_event_id //event_offset +#define EMON_BUFFER_CORE_EVENT_OFFSET(cpu_index_to_thread_offset, \ + core_event_id) \ + (cpu_index_to_thread_offset + core_event_id) + +// Calculate the device level to UNCORE event offset +// Using for kernel and emon_output.c printing function +// EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET_IN_PACKAGE = +// device_offset_in_package + //device_offset_in_package +// device_unit_id * num_unit_events + //unit_offset +// device_event_id //event_offset +#define EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET_IN_PACKAGE( \ + device_offset_in_package, device_unit_id, num_unit_events, \ + device_event_id) \ + (device_offset_in_package + device_unit_id * num_unit_events + \ + device_event_id) + +// Take 'device level to UNCORE event offset' and package_id, +// calculate the UNCORE package level event offset +// Using for emon_output.c printing function +// EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET = +// package_id * num_entries_per_package + //package_offset +// uncore_offset_in_package; //offset_in_package +#define EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET( \ + package_id, num_entries_per_package, uncore_offset_in_package) \ + (package_id * num_entries_per_package + uncore_offset_in_package) + +// Take 'device level to UNCORE event offset', +// calculate the UNCORE system level event offset +// Using for emon_output.c printing function +// EMON_BUFFER_UNCORE_SYSTEM_EVENT_OFFSET = +// device_offset_in_system + //device_offset_in_system +// device_unit_id * num_system_events + //device_unit_offset +// device_event_id //event_offset +#define EMON_BUFFER_UNCORE_SYSTEM_EVENT_OFFSET(device_offset_in_system, \ + device_unit_id, \ + num_system_events, \ + device_event_id) \ + (device_offset_in_system + device_unit_id * num_system_events + \ + device_event_id) + +// Calculate the package level power event offset +// Using for kernel and emon_output.c printing function +// EMON_BUFFER_UNCORE_PACKAGE_POWER_EVENT_OFFSET = +// package_id * num_entries_per_package + //package offset +// device_offset_in_package + //device offset +// package_event_offset //power package event offset +#define EMON_BUFFER_UNCORE_PACKAGE_POWER_EVENT_OFFSET( \ + package_id, num_entries_per_package, device_offset_in_package, \ + device_event_offset) \ + (package_id * num_entries_per_package + device_offset_in_package + \ + device_event_offset) + +// Calculate the module level power event offset +// Using for kernel and emon_output.c printing function +// EMON_BUFFER_UNCORE_MODULE_POWER_EVENT_OFFSET = +// package_id * num_entries_per_package + //package offset +// device_offset_in_package + //device offset +// num_package_events + //package event offset +// module_id * num_module_events + //module offset +// module_event_offset //power module event offset +#define EMON_BUFFER_UNCORE_MODULE_POWER_EVENT_OFFSET( \ + package_id, num_entries_per_package, device_offset_in_package, \ + num_package_events, module_id, num_module_events, device_event_offset) \ + (package_id * num_entries_per_package + device_offset_in_package + \ + num_package_events + module_id * num_module_events + \ + device_event_offset) + +// Calculate the package level power event offset +// Using for kernel and emon_output.c printing function +// EMON_BUFFER_UNCORE_THREAD_POWER_EVENT_OFFSET = +// package_id * num_entries_per_package + //package offset +// device_offset_in_package + //device offset +// num_package_events + //package offset +// num_modules_per_package * num_module_events + //module offset +// (core_id*threads_per_core+thread_id)*num_thread_events + //thread offset +// thread_event_offset //power thread event offset +#define EMON_BUFFER_UNCORE_THREAD_POWER_EVENT_OFFSET( \ + package_id, num_entries_per_package, device_offset_in_package, \ + num_package_events, num_modules_per_package, num_module_events, \ + core_id, threads_per_core, thread_id, num_unit_events, \ + device_event_offset) \ + (package_id * num_entries_per_package + device_offset_in_package + \ + num_package_events + \ + num_modules_per_package * num_module_events + \ + (core_id * threads_per_core + thread_id) * num_unit_events + \ + device_event_offset) + +/* + ************************************ + * DRIVER LOG BUFFER DECLARATIONS * + ************************************ + */ + +#define DRV_MAX_NB_LOG_CATEGORIES 256 // Must be a multiple of 8 +#define DRV_NB_LOG_CATEGORIES 14 +#define DRV_LOG_CATEGORY_LOAD 0 +#define DRV_LOG_CATEGORY_INIT 1 +#define DRV_LOG_CATEGORY_DETECTION 2 +#define DRV_LOG_CATEGORY_ERROR 3 +#define DRV_LOG_CATEGORY_STATE_CHANGE 4 +#define DRV_LOG_CATEGORY_MARK 5 +#define DRV_LOG_CATEGORY_DEBUG 6 +#define DRV_LOG_CATEGORY_FLOW 7 +#define DRV_LOG_CATEGORY_ALLOC 8 +#define DRV_LOG_CATEGORY_INTERRUPT 9 +#define DRV_LOG_CATEGORY_TRACE 10 +#define DRV_LOG_CATEGORY_REGISTER 11 +#define DRV_LOG_CATEGORY_NOTIFICATION 12 +#define DRV_LOG_CATEGORY_WARNING 13 + +#define LOG_VERBOSITY_UNSET 0xFF +#define LOG_VERBOSITY_DEFAULT 0xFE +#define LOG_VERBOSITY_NONE 0 + +#define LOG_CHANNEL_MEMLOG 0x1 +#define LOG_CHANNEL_AUXMEMLOG 0x2 +#define LOG_CHANNEL_PRINTK 0x4 +#define LOG_CHANNEL_TRACEK 0x8 +#define LOG_CHANNEL_MOSTWHERE \ + (LOG_CHANNEL_MEMLOG | LOG_CHANNEL_AUXMEMLOG | LOG_CHANNEL_PRINTK) +#define LOG_CHANNEL_EVERYWHERE \ + (LOG_CHANNEL_MEMLOG | LOG_CHANNEL_AUXMEMLOG | LOG_CHANNEL_PRINTK | \ + LOG_CHANNEL_TRACEK) +#define LOG_CHANNEL_MASK LOG_CATEGORY_VERBOSITY_EVERYWHERE + +#define LOG_CONTEXT_REGULAR 0x10 +#define LOG_CONTEXT_INTERRUPT 0x20 +#define LOG_CONTEXT_NOTIFICATION 0x40 +#define LOG_CONTEXT_ALL \ + (LOG_CONTEXT_REGULAR | LOG_CONTEXT_INTERRUPT | LOG_CONTEXT_NOTIFICATION) +#define LOG_CONTEXT_MASK LOG_CONTEXT_ALL +#define LOG_CONTEXT_SHIFT 4 + +#define DRV_LOG_NOTHING 0 +#define DRV_LOG_FLOW_IN 1 +#define DRV_LOG_FLOW_OUT 2 + +/* + * @macro DRV_LOG_ENTRY_NODE_S + * @brief + * This structure is used to store a log message from the driver. + */ + +#define DRV_LOG_MESSAGE_LENGTH 64 +#define DRV_LOG_FUNCTION_NAME_LENGTH 32 + +typedef struct DRV_LOG_ENTRY_NODE_S DRV_LOG_ENTRY_NODE; +typedef DRV_LOG_ENTRY_NODE * DRV_LOG_ENTRY; +struct DRV_LOG_ENTRY_NODE_S { + char function_name[DRV_LOG_FUNCTION_NAME_LENGTH]; + char message[DRV_LOG_MESSAGE_LENGTH]; + + U16 temporal_tag; + U16 integrity_tag; + + U8 category; + U8 secondary_info; // Secondary attribute: + // former driver state for STATE category + // 'ENTER' or 'LEAVE' for FLOW and TRACE categories + U16 processor_id; + // NB: not guaranteed to be accurate (due to preemption/core migration) + + U64 tsc; + + U16 nb_active_interrupts; // never 100% accurate, merely indicative + U8 active_drv_operation; // only 100% accurate IOCTL-called functions + U8 driver_state; + + U16 line_number; // as per the __LINE__ macro + + U16 nb_active_notifications; + + U64 reserved; // need padding to reach 128 bytes +}; // this structure should be exactly 128-byte long + +#define DRV_LOG_ENTRY_temporal_tag(ent) ((ent)->temporal_tag) +#define DRV_LOG_ENTRY_integrity_tag(ent) ((ent)->integrity_tag) +#define DRV_LOG_ENTRY_category(ent) ((ent)->category) +#define DRV_LOG_ENTRY_secondary_info(ent) ((ent)->secondary_info) +#define DRV_LOG_ENTRY_processor_id(ent) ((ent)->processor_id) +#define DRV_LOG_ENTRY_tsc(ent) ((ent)->tsc) +#define DRV_LOG_ENTRY_driver_state(ent) ((ent)->driver_state) +#define DRV_LOG_ENTRY_active_drv_operation(ent) ((ent)->active_drv_operation) +#define DRV_LOG_ENTRY_nb_active_interrupts(ent) ((ent)->nb_active_interrupts) +#define DRV_LOG_ENTRY_nb_active_notifications(ent) \ + ((ent)->nb_active_notifications) +#define DRV_LOG_ENTRY_line_number(ent) ((ent)->line_number) +#define DRV_LOG_ENTRY_message(ent) ((ent)->message) +#define DRV_LOG_ENTRY_function_name(ent) ((ent)->function_name) + +/* + * @macro DRV_LOG_BUFFER_NODE_S + * @brief Circular buffer structure storing the latest + * DRV_LOG_MAX_NB_ENTRIES driver messages + */ + +#define DRV_LOG_SIGNATURE_SIZE 8 // Must be a multiple of 8 +#define DRV_LOG_SIGNATURE_0 'S' +#define DRV_LOG_SIGNATURE_1 'e' +#define DRV_LOG_SIGNATURE_2 'P' +#define DRV_LOG_SIGNATURE_3 'd' +#define DRV_LOG_SIGNATURE_4 'R' +#define DRV_LOG_SIGNATURE_5 'v' +#define DRV_LOG_SIGNATURE_6 '5' +#define DRV_LOG_SIGNATURE_7 '\0' +// The signature is "SePdRv4"; not declared as string on purpose to avoid +// false positives when trying to identify the log buffer in a crash dump + +#define DRV_LOG_VERSION 1 +#define DRV_LOG_FILLER_BYTE 1 + +#define DRV_LOG_DRIVER_VERSION_SIZE 64 // Must be a multiple of 8 +#define DRV_LOG_MAX_NB_PRI_ENTRIES (8192 * 2) +// 2MB buffer [*HAS TO BE* a power of 2!] [8192 entries = 1 MB] +#define DRV_LOG_MAX_NB_AUX_ENTRIES (8192) +// 1MB buffer [*HAS TO BE* a power of 2!] +#define DRV_LOG_MAX_NB_ENTRIES \ + (DRV_LOG_MAX_NB_PRI_ENTRIES + DRV_LOG_MAX_NB_AUX_ENTRIES) + +typedef struct DRV_LOG_BUFFER_NODE_S DRV_LOG_BUFFER_NODE; +typedef DRV_LOG_BUFFER_NODE * DRV_LOG_BUFFER; +struct DRV_LOG_BUFFER_NODE_S { + char header_signature[DRV_LOG_SIGNATURE_SIZE]; + // some signature to be able to locate the log even without -g; ASCII + // would help should we change the signature for each log's version + // instead of keeping it in a dedicated field? + + U32 log_size; // filled with sizeof(this structure) at init. + U32 max_nb_pri_entries; + // filled with the driver's "DRV_LOG_MAX_NB_PRIM_ENTRIES" at init. + + U32 max_nb_aux_entries; + // filled with the driver's "DRV_LOG_MAX_NB_AUX_ENTRIES" at init. + U32 reserved1; + + U64 init_time; // primary log disambiguator + + U32 disambiguator; + // used to differentiate the driver's version of the log when a + // full memory dump can contain some from userland + U32 log_version; // 0 at first, increase when format changes? + + U32 pri_entry_index; + // should be incremented *atomically* as a means to (re)allocate + // the next primary log entry. + U32 aux_entry_index; + // should be incremented *atomically* as a means to (re)allocate + // the next auxiliary log entry. + + char driver_version[DRV_LOG_DRIVER_VERSION_SIZE]; + + U8 driver_state; + U8 active_drv_operation; + U16 reserved2; + U32 nb_drv_operations; + + U32 nb_interrupts; + U16 nb_active_interrupts; + U16 nb_active_notifications; + + U32 nb_notifications; + U32 nb_driver_state_transitions; + + U8 contiguous_physical_memory; + U8 reserved3; + U16 reserved4; + U32 reserved5; + + U8 verbosities[DRV_MAX_NB_LOG_CATEGORIES]; + + DRV_LOG_ENTRY_NODE entries[DRV_LOG_MAX_NB_ENTRIES]; + + char footer_signature[DRV_LOG_SIGNATURE_SIZE]; +}; + +#define DRV_LOG_BUFFER_pri_entry_index(log) ((log)->pri_entry_index) +#define DRV_LOG_BUFFER_aux_entry_index(log) ((log)->aux_entry_index) +#define DRV_LOG_BUFFER_header_signature(log) ((log)->header_signature) +#define DRV_LOG_BUFFER_footer_signature(log) ((log)->footer_signature) +#define DRV_LOG_BUFFER_log_size(log) ((log)->log_size) +#define DRV_LOG_BUFFER_driver_version(log) ((log)->driver_version) +#define DRV_LOG_BUFFER_driver_state(log) ((log)->driver_state) +#define DRV_LOG_BUFFER_active_drv_operation(log) ((log)->active_drv_operation) +#define DRV_LOG_BUFFER_nb_interrupts(log) ((log)->nb_interrupts) +#define DRV_LOG_BUFFER_nb_active_interrupts(log) ((log)->nb_active_interrupts) +#define DRV_LOG_BUFFER_nb_notifications(log) ((log)->nb_notifications) +#define DRV_LOG_BUFFER_nb_active_notifications(log) \ + ((log)->nb_active_notifications) +#define DRV_LOG_BUFFER_nb_driver_state_transitions(log) \ + ((log)->nb_driver_state_transitions) +#define DRV_LOG_BUFFER_nb_drv_operations(log) ((log)->nb_drv_operations) +#define DRV_LOG_BUFFER_max_nb_pri_entries(log) ((log)->max_nb_pri_entries) +#define DRV_LOG_BUFFER_max_nb_aux_entries(log) ((log)->max_nb_aux_entries) +#define DRV_LOG_BUFFER_init_time(log) ((log)->init_time) +#define DRV_LOG_BUFFER_disambiguator(log) ((log)->disambiguator) +#define DRV_LOG_BUFFER_log_version(log) ((log)->log_version) +#define DRV_LOG_BUFFER_entries(log) ((log)->entries) +#define DRV_LOG_BUFFER_contiguous_physical_memory(log) \ + ((log)->contiguous_physical_memory) +#define DRV_LOG_BUFFER_verbosities(log) ((log)->verbosities) + +#define DRV_LOG_CONTROL_MAX_DATA_SIZE \ + DRV_MAX_NB_LOG_CATEGORIES // Must be a multiple of 8 + +typedef struct DRV_LOG_CONTROL_NODE_S DRV_LOG_CONTROL_NODE; +typedef DRV_LOG_CONTROL_NODE * DRV_LOG_CONTROL; + +struct DRV_LOG_CONTROL_NODE_S { + U32 command; + U32 reserved1; + U8 data[DRV_LOG_CONTROL_MAX_DATA_SIZE]; + // only DRV_NB_LOG_CATEGORIES elements will be used, but let's plan for + // backwards compatibility if LOG_CATEGORY_UNSET, READ instead of WRITE + + U64 reserved2; + // may later want to add support for resizing the buffer, + // or only log 100 first interrupts, etc. + U64 reserved3; + U64 reserved4; + U64 reserved5; +}; + +#define DRV_LOG_CONTROL_command(x) ((x)->command) +#define DRV_LOG_CONTROL_verbosities(x) ((x)->data) +#define DRV_LOG_CONTROL_message(x) \ + ((x)->data) // Userland 'MARK' messages use the 'data' field too. +#define DRV_LOG_CONTROL_log_size(x) (*((U32 *)((x)->data))) + +#define DRV_LOG_CONTROL_COMMAND_NONE 0 +#define DRV_LOG_CONTROL_COMMAND_ADJUST_VERBOSITY 1 +#define DRV_LOG_CONTROL_COMMAND_MARK 2 +#define DRV_LOG_CONTROL_COMMAND_QUERY_SIZE 3 +#define DRV_LOG_CONTROL_COMMAND_BENCHMARK 4 + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_types.h b/drivers/platform/x86/sepdk/include/lwpmudrv_types.h new file mode 100644 index 0000000000000..05574ada85ec4 --- /dev/null +++ b/drivers/platform/x86/sepdk/include/lwpmudrv_types.h @@ -0,0 +1,173 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _LWPMUDRV_TYPES_H_ +#define _LWPMUDRV_TYPES_H_ + +#if defined(__cplusplus) +extern "C" { +#endif + +#if defined(BUILD_DRV_ESX) +//SR: added size_t def +typedef unsigned long size_t; +typedef unsigned long ssize_t; +#endif + +typedef unsigned char U8; +typedef char S8; +typedef short S16; +typedef unsigned short U16; +typedef unsigned int U32; +typedef int S32; +#if defined(DRV_OS_WINDOWS) +typedef unsigned __int64 U64; +typedef __int64 S64; +#elif defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || \ + defined(DRV_OS_MAC) || defined(DRV_OS_ANDROID) || \ + defined(DRV_OS_FREEBSD) +typedef unsigned long long U64; +typedef long long S64; +typedef unsigned long ULONG; +typedef void VOID; +typedef void *LPVOID; + +#if defined(BUILD_DRV_ESX) +//SR: added UWORD64 def +typedef union _UWORD64 { + struct { + U32 low; + S32 hi; + } c; + S64 qword; +} UWORD64, *PWORD64; +#endif +#else +#error "Undefined OS" +#endif + +#if defined(DRV_IA32) +typedef S32 SIOP; +typedef U32 UIOP; +#elif defined(DRV_EM64T) +typedef S64 SIOP; +typedef U64 UIOP; +#else +#error "Unexpected Architecture seen" +#endif + +typedef U32 DRV_BOOL; +typedef void *PVOID; + +#if !defined(__DEFINE_STCHAR__) +#define __DEFINE_STCHAR__ +#if defined(UNICODE) +typedef wchar_t STCHAR; +#define VTSA_T(x) L##x +#else +typedef char STCHAR; +#define VTSA_T(x) x +#endif +#endif + +#if defined(DRV_OS_WINDOWS) +#include +typedef wchar_t DRV_STCHAR; +typedef wchar_t VTSA_CHAR; +#else +typedef char DRV_STCHAR; +#endif + +// +// Handy Defines +// +typedef U32 DRV_STATUS; + +#define MAX_STRING_LENGTH 1024 +#define MAXNAMELEN 256 + +#if defined(DRV_OS_WINDOWS) +#define UNLINK _unlink +#define RENAME rename +#define WCSDUP _wcsdup +#endif +#if defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || defined(DRV_OS_MAC) || \ + defined(DRV_OS_ANDROID) || defined(DRV_OS_FREEBSD) +#define UNLINK unlink +#define RENAME rename +#endif + +#if defined(DRV_OS_SOLARIS) && !defined(_KERNEL) +//wcsdup is missing on Solaris +#include +#include + +static inline wchar_t *solaris_wcsdup(const wchar_t *wc) +{ + wchar_t *tmp = (wchar_t *)malloc((wcslen(wc) + 1) * sizeof(wchar_t)); + + wcscpy(tmp, wc); + return tmp; +} +#define WCSDUP solaris_wcsdup +#endif + +#if defined(DRV_OS_LINUX) || defined(DRV_OS_FREEBSD) || defined(DRV_OS_MAC) +#define WCSDUP wcsdup +#endif + +#if !defined(_WCHAR_T_DEFINED) +#if defined(DRV_OS_LINUX) || defined(DRV_OS_ANDROID) || defined(DRV_OS_SOLARIS) +#if !defined(_GNU_SOURCE) +#define _GNU_SOURCE +#endif +#endif +#endif + +#if (defined(DRV_OS_LINUX) || defined(DRV_OS_ANDROID)) && !defined(__KERNEL__) +#include +typedef wchar_t VTSA_CHAR; +#endif + +#if (defined(DRV_OS_MAC) || defined(DRV_OS_FREEBSD) || \ + defined(DRV_OS_SOLARIS)) && \ + !defined(_KERNEL) +#include +typedef wchar_t VTSA_CHAR; +#endif + +#define TRUE 1 +#define FALSE 0 + +#define ALIGN_4(x) (((x) + 3) & ~3) +#define ALIGN_8(x) (((x) + 7) & ~7) +#define ALIGN_16(x) (((x) + 15) & ~15) +#define ALIGN_32(x) (((x) + 31) & ~31) + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_version.h b/drivers/platform/x86/sepdk/include/lwpmudrv_version.h new file mode 100644 index 0000000000000..364fcc38048b9 --- /dev/null +++ b/drivers/platform/x86/sepdk/include/lwpmudrv_version.h @@ -0,0 +1,122 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _LWPMUDRV_VERSION_H_ +#define _LWPMUDRV_VERSION_H_ + +#define _STRINGIFY(x) #x +#define STRINGIFY(x) _STRINGIFY(x) +#define _STRINGIFY_W(x) L#x +#define STRINGIFY_W(x) _STRINGIFY_W(x) + +#define SEP_MAJOR_VERSION 5 +#define SEP_MINOR_VERSION 0 +#define SEP_UPDATE_VERSION 0 +#define SEP_API_VERSION SEP_UPDATE_VERSION +#if SEP_UPDATE_VERSION > 0 +#define SEP_UPDATE_STRING " Update " STRINGIFY(SEP_UPDATE_VERSION) +#else +#define SEP_UPDATE_STRING "" +#endif +#define SEP_RELEASE_STRING "" + +#define EMON_MAJOR_VERSION SEP_MAJOR_VERSION +#define EMON_MINOR_VERSION SEP_MINOR_VERSION +#define EMON_PRODUCT_RELEASE_STRING SEP_UPDATE_VERSION + +#if defined(SEP_ENABLE_PRIVATE_CPUS) +#define PRODUCT_TYPE "private" +#define SEP_NAME "sepint" +#define SEP_NAME_W L"sepint" +#else +#define PRODUCT_TYPE "public" +#define SEP_NAME "sep" +#define SEP_NAME_W L"sep" +#endif + +#if !defined(PRODUCT_BUILDER) +#define PRODUCT_BUILDER unknown +#endif + +#define TB_FILE_EXT ".tb7" +#define TB_FILE_EXT_W L".tb7" + +#define SEP_PRODUCT_NAME "Sampling Enabling Product" +#define EMON_PRODUCT_NAME "EMON" + +#define PRODUCT_VERSION_DATE __DATE__ " at " __TIME__ + +#define SEP_PRODUCT_COPYRIGHT \ + "Copyright(C) 2007-2018 Intel Corporation. All rights reserved." +#define EMON_PRODUCT_COPYRIGHT \ + "Copyright(C) 1993-2018 Intel Corporation. All rights reserved." + +#define PRODUCT_DISCLAIMER \ + "Warning: This computer program is protected under U.S. and \n" \ + "international copyright laws, and may only be used or copied in \n" \ + "accordance with the terms of the license agreement. Except as \n" \ + "permitted by such license, no part of this computer program may \n" \ + "be reproduced, stored in a retrieval system, or transmitted \n" \ + "in any form or by any means without the express written consent \n" \ + "of Intel Corporation." + +#define PRODUCT_VERSION \ + STRINGIFY(SEP_MAJOR_VERSION) "." STRINGIFY(SEP_MINOR_VERSION) + +#define SEP_MSG_PREFIX \ + SEP_NAME "" STRINGIFY(SEP_MAJOR_VERSION) "_" STRINGIFY( \ + SEP_MINOR_VERSION) ":" +#define SEP_VERSION_STR \ + STRINGIFY(SEP_MAJOR_VERSION) \ + "." STRINGIFY(SEP_MINOR_VERSION) "." STRINGIFY(SEP_API_VERSION) + +#if defined(DRV_OS_WINDOWS) + +#define SEP_DRIVER_NAME SEP_NAME "drv" STRINGIFY(SEP_MAJOR_VERSION) +#define SEP_DRIVER_NAME_W SEP_NAME_W L"drv" STRINGIFY_W(SEP_MAJOR_VERSION) +#define SEP_DEVICE_NAME SEP_DRIVER_NAME + +#endif + +#if defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || \ + defined(DRV_OS_ANDROID) || defined(DRV_OS_FREEBSD) + +#define SEP_DRIVER_NAME SEP_NAME "" STRINGIFY(SEP_MAJOR_VERSION) +#define SEP_SAMPLES_NAME SEP_DRIVER_NAME "_s" +#define SEP_UNCORE_NAME SEP_DRIVER_NAME "_u" +#define SEP_SIDEBAND_NAME SEP_DRIVER_NAME "_b" +#define SEP_DEVICE_NAME "/dev/" SEP_DRIVER_NAME + +#endif + +#if defined(DRV_OS_MAC) + +#define SEP_DRIVER_NAME SEP_NAME "" STRINGIFY(SEP_MAJOR_VERSION) +#define SEP_SAMPLES_NAME SEP_DRIVER_NAME "_s" +#define SEP_DEVICE_NAME SEP_DRIVER_NAME + +#endif + +#endif diff --git a/drivers/platform/x86/sepdk/include/pax_shared.h b/drivers/platform/x86/sepdk/include/pax_shared.h new file mode 100644 index 0000000000000..6f35197a51fc9 --- /dev/null +++ b/drivers/platform/x86/sepdk/include/pax_shared.h @@ -0,0 +1,194 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +/* + * + * Description: types and definitions shared between PAX kernel + * and user modes + * + * NOTE: alignment on page boundaries is required on 64-bit platforms! + * +*/ + +#ifndef _PAX_SHARED_H_ +#define _PAX_SHARED_H_ + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" + +#define _STRINGIFY(x) #x +#define STRINGIFY(x) _STRINGIFY(x) + +// PAX versioning + +#define PAX_MAJOR_VERSION 1 // major version +// (increment only when PAX driver is incompatible with previous versions) +#define PAX_MINOR_VERSION 0 // minor version +// (increment only when new APIs added, but driver remains backwards compatible) +#define PAX_BUGFIX_VERSION 2 // bugfix version +// (increment only for bug fix that don't affect usermode/driver compatibility) + +#define PAX_VERSION_STR \ + STRINGIFY(PAX_MAJOR_VERSION) \ + "." STRINGIFY(PAX_MINOR_VERSION) "." STRINGIFY(PAX_BUGFIX_VERSION) + +// PAX device name + +#if defined(DRV_OS_WINDOWS) +#define PAX_NAME "sepdal" +#define PAX_NAME_W L"sepdal" +#else +#define PAX_NAME "pax" +#endif + +// PAX PMU reservation states + +#define PAX_PMU_RESERVED 1 +#define PAX_PMU_UNRESERVED 0 + +#define PAX_GUID_UNINITIALIZED 0 + +// PAX_IOCTL definitions + +#if defined(DRV_OS_WINDOWS) + +// +// The name of the device as seen by the driver +// +#define LSTRING(x) L#x +#define PAX_OBJECT_DEVICE_NAME L"\\Device\\sepdal" // LSTRING(PAX_NAME) +#define PAX_OBJECT_LINK_NAME L"\\DosDevices\\sepdal" // LSTRING(PAX_NAME) + +#define PAX_DEVICE_NAME PAX_NAME // for CreateFile called by app + +#define PAX_IOCTL_DEVICE_TYPE 0xA000 // values 0-32768 reserved for Microsoft +#define PAX_IOCTL_FUNCTION 0xA00 // values 0-2047 reserved for Microsoft + +// +// Basic CTL CODE macro to reduce typographical errors +// +#define PAX_CTL_READ_CODE(x) \ + CTL_CODE(PAX_IOCTL_DEVICE_TYPE, PAX_IOCTL_FUNCTION + (x), \ + METHOD_BUFFERED, FILE_READ_ACCESS) + +#define PAX_IOCTL_INFO PAX_CTL_READ_CODE(1) +#define PAX_IOCTL_STATUS PAX_CTL_READ_CODE(2) +#define PAX_IOCTL_RESERVE_ALL PAX_CTL_READ_CODE(3) +#define PAX_IOCTL_UNRESERVE PAX_CTL_READ_CODE(4) + +#elif defined(DRV_OS_LINUX) || defined(DRV_OS_ANDROID) || \ + defined(DRV_OS_SOLARIS) + +#define PAX_DEVICE_NAME "/dev/" PAX_NAME + +#define PAX_IOC_MAGIC 100 +#define PAX_IOCTL_INFO _IOW(PAX_IOC_MAGIC, 1, IOCTL_ARGS) +#define PAX_IOCTL_STATUS _IOW(PAX_IOC_MAGIC, 2, IOCTL_ARGS) +#define PAX_IOCTL_RESERVE_ALL _IO(PAX_IOC_MAGIC, 3) +#define PAX_IOCTL_UNRESERVE _IO(PAX_IOC_MAGIC, 4) + +#if defined(HAVE_COMPAT_IOCTL) && defined(DRV_EM64T) +#define PAX_IOCTL_COMPAT_INFO _IOW(PAX_IOC_MAGIC, 1, compat_uptr_t) +#define PAX_IOCTL_COMPAT_STATUS _IOW(PAX_IOC_MAGIC, 2, compat_uptr_t) +#define PAX_IOCTL_COMPAT_RESERVE_ALL _IO(PAX_IOC_MAGIC, 3) +#define PAX_IOCTL_COMPAT_UNRESERVE _IO(PAX_IOC_MAGIC, 4) +#endif + +#elif defined(DRV_OS_FREEBSD) + +#define PAX_DEVICE_NAME "/dev/" PAX_NAME + +#define PAX_IOC_MAGIC 100 +#define PAX_IOCTL_INFO _IOW(PAX_IOC_MAGIC, 1, IOCTL_ARGS_NODE) +#define PAX_IOCTL_STATUS _IOW(PAX_IOC_MAGIC, 2, IOCTL_ARGS_NODE) +#define PAX_IOCTL_RESERVE_ALL _IO(PAX_IOC_MAGIC, 3) +#define PAX_IOCTL_UNRESERVE _IO(PAX_IOC_MAGIC, 4) + +#elif defined(DRV_OS_MAC) + +// OSX driver names are always in reverse DNS form. +#define PAXDriverClassName com_intel_driver_PAX +#define kPAXDriverClassName "com_intel_driver_PAX" +#define PAX_DEVICE_NAME "com.intel.driver.PAX" + +// User client method dispatch selectors. +enum { kPAXUserClientOpen, + kPAXUserClientClose, + kPAXReserveAll, + kPAXUnreserve, + kPAXGetStatus, + kPAXGetInfo, + kPAXDataIO, + kNumberOfMethods // Must be last +}; + +#else +#warning "unknown OS in pax_shared.h" +#endif + +// data for PAX_IOCTL_INFO call + +struct PAX_INFO_NODE_S { + volatile U64 managed_by; // entity managing PAX + volatile U32 version; // PAX version number + volatile U64 reserved1; // force 8-byte alignment + volatile U32 reserved2; // unreserved +}; + +typedef struct PAX_INFO_NODE_S PAX_INFO_NODE; +typedef PAX_INFO_NODE * PAX_INFO; + +// data for PAX_IOCTL_STATUS call + +struct PAX_STATUS_NODE_S { + volatile U64 guid; // reservation ID (globally unique identifier) + volatile DRV_FILE_DESC pid; // pid of process that has the reservation + volatile U64 start_time; // reservation start time + volatile U32 is_reserved; // 1 if there is a reservation, 0 otherwise +}; + +typedef struct PAX_STATUS_NODE_S PAX_STATUS_NODE; +typedef PAX_STATUS_NODE * PAX_STATUS; + +struct PAX_VERSION_NODE_S { + union { + U32 version; + struct { + U32 major : 8; + U32 minor : 8; + U32 bugfix : 16; + } s1; + } u1; +}; + +typedef struct PAX_VERSION_NODE_S PAX_VERSION_NODE; +typedef PAX_VERSION_NODE * PAX_VERSION; + +#define PAX_VERSION_NODE_version(v) ((v)->u1.version) +#define PAX_VERSION_NODE_major(v) ((v)->u1.s1.major) +#define PAX_VERSION_NODE_minor(v) ((v)->u1.s1.minor) +#define PAX_VERSION_NODE_bugfix(v) ((v)->u1.s1.bugfix) + +#endif diff --git a/drivers/platform/x86/sepdk/include/rise_errors.h b/drivers/platform/x86/sepdk/include/rise_errors.h new file mode 100644 index 0000000000000..7db811e855d6e --- /dev/null +++ b/drivers/platform/x86/sepdk/include/rise_errors.h @@ -0,0 +1,340 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _RISE_ERRORS_H_ +#define _RISE_ERRORS_H_ + +// +// NOTE: +// +// 1) Before adding an error code, first make sure the error code doesn't +// already exist. If it does, use that, don't create a new one just because... +// +// 2) When adding an error code, add it to the end of the list. Don't insert +// error numbers in the middle of the list! For backwards compatibility, +// we don't want the numbers changing unless we really need them +// to for some reason (like we want to switch to negative error numbers) +// +// 3) Change the VT_LAST_ERROR_CODE macro to point to the (newly added) +// last error. This is done so SW can verify the number of error codes +// possible matches the number of error strings it has +// +// 4) Don't forget to update the error string table to include your +// error code (rise.c). Since the goal is something human readable +// you don't need to use abbreviations in there (ie. don't say "bad param", +// say "bad parameter" or "illegal parameter passed in") +// +// 5) Compile and run the test_rise app (in the test_rise directory) to +// verify things are still working +// +// + +#define VT_SUCCESS 0 +#define VT_FAILURE -1 + +/*************************************************************/ + +#define VT_INVALID_MAX_SAMP 1 +#define VT_INVALID_SAMP_PER_BUFF 2 +#define VT_INVALID_SAMP_INTERVAL 3 +#define VT_INVALID_PATH 4 +#define VT_TB5_IN_USE 5 +#define VT_INVALID_NUM_EVENTS 6 +#define VT_INTERNAL_ERROR 8 +#define VT_BAD_EVENT_NAME 9 +#define VT_NO_SAMP_SESSION 10 +#define VT_NO_EVENTS 11 +#define VT_MULTIPLE_RUNS 12 +#define VT_NO_SAM_PARAMS 13 +#define VT_SDB_ALREADY_EXISTS 14 +#define VT_SAMPLING_ALREADY_STARTED 15 +#define VT_TBS_NOT_SUPPORTED 16 +#define VT_INVALID_SAMPARAMS_SIZE 17 +#define VT_INVALID_EVENT_SIZE 18 +#define VT_ALREADY_PROCESSES 19 +#define VT_INVALID_EVENTS_PATH 20 +#define VT_INVALID_LICENSE 21 + +/******************************************************/ +//SEP error codes + +#define VT_SAM_ERROR 22 +#define VT_SAMPLE_FILE_ALREADY_MAPPED 23 +#define VT_INVALID_SAMPLE_FILE 24 +#define VT_UNKNOWN_SECTION_NUMBER 25 +#define VT_NO_MEMORY 26 +#define VT_ENV_VAR_NOT_FOUND 27 +#define VT_SAMPLE_FILE_NOT_MAPPED 28 +#define VT_BUFFER_OVERFLOW 29 +#define VT_USER_OP_COMPLETED 30 +#define VT_BINARY_NOT_FOUND 31 +#define VT_ISM_NOT_INITIALIZED 32 +#define VT_NO_SYMBOLS 33 +#define VT_SAMPLE_FILE_MAPPING_ERROR 34 +#define VT_BUFFER_NULL 35 +#define VT_UNEXPECTED_NULL_PTR 36 +#define VT_BINARY_LOAD_FAILED 37 +#define VT_FUNCTION_NOT_FOUND_IN_BINARY 38 +#define VT_ENTRY_NOT_FOUND 39 +#define VT_SEP_SYNTAX_ERROR 40 +#define VT_SEP_OPTIONS_ERROR 41 +#define VT_BAD_EVENT_MODIFIER 42 +#define VT_INCOMPATIBLE_PARAMS 43 +#define VT_FILE_OPEN_FAILED 44 +#define VT_EARLY_EXIT 45 +#define VT_TIMEOUT_RETURN 46 +#define VT_NO_CHILD_PROCESS 47 +#define VT_DRIVER_RUNNING 48 +#define VT_DRIVER_STOPPED 49 +#define VT_MULTIPLE_RUNS_NEEDED 50 +#define VT_QUIT_IMMEDIATE 51 +#define VT_DRIVER_INIT_FAILED 52 +#define VT_NO_TB5_CREATED 53 +#define VT_NO_WRITE_PERMISSION 54 +#define VT_DSA_INIT_FAILED 55 +#define VT_INVALID_CPU_MASK 56 +#define VT_SAMP_IN_RUNNING_STATE 57 +#define VT_SAMP_IN_PAUSE_STATE 58 +#define VT_SAMP_IN_STOP_STATE 59 +#define VT_SAMP_NO_SESSION 60 +#define VT_NOT_CONFIGURED 61 +#define VT_LAUNCH_BUILD64_FAILED 62 +#define VT_BAD_PARAMETER 63 +#define VT_ISM_INIT_FAILED 64 +#define VT_INVALID_STATE_TRANS 65 +#define VT_EARLY_EXIT_N_CANCEL 66 +#define VT_EVT_MGR_NOT_INIT 67 +#define VT_ISM_SECTION_ENUM_FAILED 68 +#define VT_VG_PARSER_ERROR 69 +#define VT_MISSING_VALUE_FOR_TOKEN 70 +#define VT_EMPTY_SAMPLE_FILE_NAME 71 +#define VT_UNEXPECTED_VALUE 72 +#define VT_NOT_IMPLEMENTED 73 +#define VT_MISSING_COL_DEPNDNCIES 74 +#define VT_DEP_COL_NOT_LIB_DEFINED 75 +#define VT_COL_NOT_REG_WITH_LIB 76 +#define VT_SECTION_ALREADY_IN_USE 77 +#define VT_SECTION_NOT_EXIST 78 +#define VT_STREAM_NOT_EXIST 79 +#define VT_INVALID_STREAM 80 +#define VT_STREAM_ALREADY_IN_USE 81 +#define VT_DATA_DESC_NOT_EXIST 82 +#define VT_INVALID_ERROR_CODE 83 +#define VT_INCOMPATIBLE_VERSION 84 +#define VT_LEGACY_DATA_NOT_EXIST 85 +#define VT_INVALID_READ_START 86 +#define VT_DRIVER_OPEN_FAILED 87 +#define VT_DRIVER_IOCTL_FAILED 88 +#define VT_SAMP_FILE_CREATE_FAILED 89 +#define VT_MODULE_FILE_CREATE_FAILED 90 +#define VT_INVALID_SAMPLE_FILE_NAME 91 +#define VT_INVALID_MODULE_FILE_NAME 92 +#define VT_FORK_CHILD_PROCESS_FAILED 93 +#define VT_UNEXPECTED_MISMATCH_IN_STRING_TYPES 94 +#define VT_INCOMPLETE_TB5_ENCOUNTERED 95 +#define VT_ERR_CONVERSION_FROM_STRING_2_NUMBER 96 +#define VT_INVALID_STRING 97 +#define VT_UNSUPPORTED_DATA_SIZE 98 +#define VT_TBRW_INIT_FAILED 99 +#define VT_PLUGIN_UNLOAD 100 +#define VT_PLUGIN_ENTRY_NULL 101 +#define VT_UNKNOWN_PLUGIN 102 +#define VT_BUFFER_TOO_SMALL 103 +#define VT_CANNOT_MODIFY_COLUMN 104 +#define VT_MULT_FILTERS_NOT_ALLOWED 105 +#define VT_ADDRESS_IN_USE 106 +#define VT_NO_MORE_MMAPS 107 +#define VT_MAX_PAGES_IN_DS_EXCEEDED 108 +#define VT_INVALID_COL_TYPE_IN_GROUP_INFO 109 +#define VT_AGG_FN_ON_VARCHAR_NOT_SUPP 110 +#define VT_INVALID_ACCESS_PERMS 111 +#define VT_NO_DATA_TO_DISPLAY 112 +#define VT_TB5_IS_NOT_BOUND 113 +#define VT_MISSING_GROUP_BY_COLUMN 114 +#define VT_SMRK_MAX_STREAMS_EXCEEDED 115 +#define VT_SMRK_STREAM_NOT_CREATED 116 +#define VT_SMRK_NOT_IMPL 117 +#define VT_SMRK_TYPE_NOT_IMPL 118 +#define VT_SMRK_TYPE_ALREADY_SET 119 +#define VT_SMRK_NO_STREAM 120 +#define VT_SMRK_INVALID_STREAM_TYPE 121 +#define VT_SMRK_STREAM_NOT_FOUND 122 +#define VT_SMRK_FAIL 123 +#define VT_SECTION_NOT_READABLE 124 +#define VT_SECTION_NOT_WRITEABLE 125 +#define VT_GLOBAL_SECTION_NOT_CLOSED 126 +#define VT_STREAM_SECTION_NOT_CLOSED 127 +#define VT_STREAM_NOT_CLOSED 128 +#define VT_STREAM_NOT_BOUND 129 +#define VT_NO_COLS_SPECIFIED 130 +#define VT_NOT_ALL_SECTIONS_CLOSED 131 +#define VT_SMRK_INVALID_PTR 132 +#define VT_UNEXPECTED_BIND_MISMATCH 133 +#define VT_WIN_TIMER_ERROR 134 +#define VT_ONLY_SNGL_DEPNDT_COL_ALLWD 135 +#define VT_BAD_MODULE 136 +#define VT_INPUT_SOURCE_INFO_NOT_SET 137 +#define VT_UNSUPPORTED_TIME_GRAN 138 +#define VT_NO_SAMPLES_COLLECTED 139 +#define VT_INVALID_CPU_TYPE_VERSION 140 +#define VT_BIND_UNEXPECTED_1STMODREC 141 +#define VT_BIND_MODULES_NOT_SORTED 142 +#define VT_UNEXPECTED_NUM_CPUIDS 143 +#define VT_UNSUPPORTED_ARCH_TYPE 144 +#define VT_NO_DATA_TO_WRITE 145 +#define VT_EM_TIME_SLICE_TOO_SMALL 146 +#define VT_EM_TOO_MANY_EVENT_GROUPS 147 +#define VT_EM_ZERO_GROUPS 148 +#define VT_EM_NOT_SUPPORTED 149 +#define VT_PMU_IN_USE 150 +#define VT_TOO_MANY_INTERRUPTS 151 +#define VT_MAX_SAMPLES_REACHED 152 +#define VT_MODULE_COLLECTION_FAILED 153 +#define VT_INCOMPATIBLE_DRIVER 154 +#define VT_UNABLE_LOCATE_TRIGGER_EVENT 155 +#define VT_COMMAND_NOT_HANDLED 156 +#define VT_DRIVER_VERSION_MISMATCH 157 +#define VT_MAX_MARKERS 158 +#define VT_DRIVER_COMM_FAILED 159 +#define VT_CHIPSET_CONFIG_FAILED 160 +#define VT_BAD_DATA_BASE 161 +#define VT_PAX_SERVICE_NOT_CONNECTED 162 +#define VT_PAX_SERVICE_ERROR 163 +#define VT_PAX_PMU_RESERVE_FAILED 164 +#define VT_INVALID_CPU_INFO_TYPE 165 +#define VT_CACHE_DOESNT_EXIST 166 +#define VT_UNSUPPORTED_UNCORE_ARCH_TYPE 167 +#define VT_EXCEEDED_MAX_EVENTS 168 +#define VT_MARKER_TIMER_FAILED 169 +#define VT_PAX_PMU_UNRESERVE_FAILED 170 +#define VT_MULTIPLE_PROCESSES_FOUND 171 +#define VT_NO_SUCH_PROCESS_FOUND 172 +#define VT_PCL_NOT_ENABLED 173 +#define VT_PCL_UID_CHECK 174 +#define VT_DEL_RESULTS_DIR_FAILED 175 +#define VT_NO_VALID_EVENTS 176 +#define VT_INVALID_EVENT 177 +#define VT_EVENTS_COUNTED 178 +#define VT_EVENTS_COLLECTED 179 +#define VT_UNSUPPORTED_GFX_ARCH_TYPE 180 +#define VT_GFX_CONFIG_FAILED 181 +#define VT_UNSUPPORTED_NON_NATIVE_MODE 182 +#define VT_INVALID_DEVICE 183 +#define VT_ENV_SETUP_FAILED 184 +#define VT_RESUME_NOT_RECEIVED 185 +#define VT_UNSUPPORTED_PWR_ARCH_TYPE 186 +#define VT_PWR_CONFIG_FAILED 187 +#define VT_NMI_WATCHDOG_FOUND 188 +#define VT_NO_PMU_RESOURCES 189 +#define VT_MIC_CARD_NOT_ONLINE 190 +#define VT_FREEZE_ON_PMI_NOT_AVAIL 191 +#define VT_FLUSH_FAILED 192 +#define VT_FLUSH_SUCCESS 193 +#define VT_WRITE_ERROR 194 +#define VT_NO_SPACE 195 +#define VT_MSR_ACCESS_ERROR 196 +#define VT_PEBS_NOT_SUPPORTED 197 +#define VT_LUA_PARSE_ERROR 198 +#define VT_COMM_CONNECTION_CLOSED_BY_REMOTE 199 +#define VT_COMM_LISTEN_ERROR 200 +#define VT_COMM_BIND_ERROR 201 +#define VT_COMM_ACCEPT_ERROR 202 +#define VT_COMM_SEND_ERROR 203 +#define VT_COMM_RECV_ERROR 204 +#define VT_COMM_SOCKET_ERROR 205 +#define VT_COMM_CONNECT_ERROR 206 +#define VT_TARGET_COLLECTION_MISMATCH 207 +#define VT_INVALID_SEP_DRIVER_LOG 208 +#define VT_COMM_PROTOCOL_VERSION_MISTMATCH 209 +#define VT_SAMP_IN_UNEXPECTED_STATE 210 +#define VT_COMM_RECV_BUF_RESIZE_ERROR 211 + +/* + * define error code for checking on async marker request + */ +#define VT_INVALID_MARKER_ID -1 + +/* + * ************************************************************ + * NOTE: after adding new error code(s), remember to also + * update the following: + * 1) VT_LAST_ERROR_CODE below + * 2) viewer/sampling_utils/src/rise.c + * 3) collector/controller/sep_msg_catalog.xmc + * 4) qnx_kernel/sepdk/include/rise_errors.h + * + * ************************************************************ + */ + +// +// To make error checking easier, the special VT_LAST_ERROR_CODE +// should be set to whatever is the last error on the list above +// +#define VT_LAST_ERROR_CODE VT_COMM_RECV_BUF_RESIZE_ERROR + +// +// Define a macro to determine success or failure. Users of this +// error header file should use the macros instead of direct +// checks so that we can change the error numbers in the future +// (such as making negative numbers be an error indication and positive +// numbers being a success with a value indication) +// +#define VTSA_SUCCESS(x) ((x) == VT_SUCCESS) +#define VTSA_FAILED(x) (!VTSA_SUCCESS(x)) + +// +// These should be deprecated, but we'll keep them here just in case +// +#define SEP_IS_SUCCESS(x) VTSA_SUCCESS(x) +#define SEP_IS_FAILED(x) VTSA_FAILED(x) + +/************************************************************* + * API Error Codes + *************************************************************/ +#define VTAPI_INVALID_MAX_SAMP VT_INVALID_MAX_SAMP +#define VTAPI_INVALID_SAMP_PER_BUFF VT_INVALID_SAMP_PER_BUFF +#define VTAPI_INVALID_SAMP_INTERVAL VT_INVALID_SAMP_INTERVAL +#define VTAPI_INVALID_PATH VT_INVALID_PATH +#define VTAPI_TB5_IN_USE VT_TB5_IN_USE +#define VTAPI_INVALID_NUM_EVENTS VT_INVALID_NUM_EVENTS +#define VTAPI_INTERNAL_ERROR VT_INTERNAL_ERROR +#define VTAPI_BAD_EVENT_NAME VT_BAD_EVENT_NAME +#define VTAPI_NO_SAMP_SESSION VT_NO_SAMP_SESSION +#define VTAPI_NO_EVENTS VT_NO_EVENTS +#define VTAPI_MULTIPLE_RUNS VT_MULTIPLE_RUNS +#define VTAPI_NO_SAM_PARAMS VT_NO_SAM_PARAMS +#define VTAPI_SDB_ALREADY_EXISTS VT_SDB_ALREADY_EXISTS +#define VTAPI_SAMPLING_ALREADY_STARTED VT_SAMPLING_ALREADY_STARTED +#define VTAPI_TBS_NOT_SUPPORTED VT_TBS_NOT_SUPPORTED +#define VTAPI_INVALID_SAMPARAMS_SIZE VT_INVALID_SAMPARAMS_SIZE +#define VTAPI_INVALID_EVENT_SIZE VT_INVALID_EVENT_SIZE +#define VTAPI_ALREADY_PROCESSES VT_ALREADY_PROCESSES +#define VTAPI_INVALID_EVENTS_PATH VT_INVALID_EVENTS_PATH +#define VTAPI_INVALID_LICENSE VT_INVALID_LICENSE + +typedef int RISE_ERROR; +typedef void *RISE_PTR; + +#endif diff --git a/drivers/platform/x86/sepdk/pax/Makefile b/drivers/platform/x86/sepdk/pax/Makefile new file mode 100755 index 0000000000000..267d70eeaab56 --- /dev/null +++ b/drivers/platform/x86/sepdk/pax/Makefile @@ -0,0 +1,4 @@ +ccflags-y := -I$(src)/../include -I$(src)/../inc + +obj-$(CONFIG_SEP_PAX) += pax.o + diff --git a/drivers/platform/x86/sepdk/pax/pax.c b/drivers/platform/x86/sepdk/pax/pax.c new file mode 100755 index 0000000000000..f8eebf989b0ee --- /dev/null +++ b/drivers/platform/x86/sepdk/pax/pax.c @@ -0,0 +1,967 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(CONFIG_HARDLOCKUP_DETECTOR) && \ + LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) +#include +#include +#include +#include +#endif + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" +#include "lwpmudrv.h" +#include "lwpmudrv_ioctl.h" + +#include "control.h" +#include "pax_shared.h" +#include "pax.h" + +MODULE_AUTHOR("Copyright(C) 2009-2018 Intel Corporation"); +MODULE_VERSION(PAX_NAME "_" PAX_VERSION_STR); +MODULE_LICENSE("Dual BSD/GPL"); + +typedef struct PAX_DEV_NODE_S PAX_DEV_NODE; +typedef PAX_DEV_NODE * PAX_DEV; + +struct PAX_DEV_NODE_S { + long buffer; + struct semaphore sem; + struct cdev cdev; +}; + +#define PAX_DEV_buffer(dev) ((dev)->buffer) +#define PAX_DEV_sem(dev) ((dev)->sem) +#define PAX_DEV_cdev(dev) ((dev)->cdev) + +// global variables for the PAX driver + +static PAX_DEV pax_control; // main control +static dev_t pax_devnum; // the major char device number for PAX +static PAX_VERSION_NODE pax_version; // version of PAX +static PAX_INFO_NODE pax_info; // information on PAX +static PAX_STATUS_NODE pax_status; // PAX reservation status + +static struct class *pax_class; + +#define NMI_WATCHDOG_PATH "/proc/sys/kernel/nmi_watchdog" +static S8 nmi_watchdog_restore = '0'; + +static struct proc_dir_entry *pax_version_file; + +static int pax_version_proc_read(struct seq_file *, void *); +static int pax_version_proc_open(struct inode *, struct file *); +static struct file_operations pax_version_ops = { + .owner = THIS_MODULE, + .open = pax_version_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +// Print macros for kernel debugging + +#if defined(DEBUG) +#define PAX_PRINT_DEBUG(fmt, args...) \ + { \ + printk(KERN_INFO "PAX: [DEBUG] " fmt, ##args); \ + } +#else +#define PAX_PRINT_DEBUG(fmt, args...) \ + { \ + ; \ + } +#endif +#define PAX_PRINT(fmt, args...) \ + { \ + printk(KERN_INFO "PAX: " fmt, ##args); \ + } +#define PAX_PRINT_WARNING(fmt, args...) \ + { \ + printk(KERN_ALERT "PAX: [Warning] " fmt, ##args); \ + } +#define PAX_PRINT_ERROR(fmt, args...) \ + { \ + printk(KERN_CRIT "PAX: [ERROR] " fmt, ##args); \ + } + +// various other useful macros + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25) +#define PAX_FIND_TASK_BY_PID(pid) find_task_by_pid(pid) +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0) +#define PAX_FIND_TASK_BY_PID(pid) \ + pid_task(find_pid_ns(pid, &init_pid_ns), PIDTYPE_PID); +#else +#define PAX_FIND_TASK_BY_PID(pid) pid_task(find_get_pid(pid), PIDTYPE_PID); +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) +#define PAX_TASKLIST_READ_LOCK() read_lock(&tasklist_lock) +#define PAX_TASKLIST_READ_UNLOCK() read_unlock(&tasklist_lock) +#else +#define PAX_TASKLIST_READ_LOCK() rcu_read_lock() +#define PAX_TASKLIST_READ_UNLOCK() rcu_read_unlock() +#endif + +#if defined(CONFIG_HARDLOCKUP_DETECTOR) && \ + LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) + +static struct task_struct *pax_Enable_NMIWatchdog_Thread; +static struct semaphore pax_Enable_NMIWatchdog_Sem; +static struct task_struct *pax_Disable_NMIWatchdog_Thread; +static struct semaphore pax_Disable_NMIWatchdog_Sem; + +/* ------------------------------------------------------------------------- */ +/*! + * @fn S32 pax_Disable_NMIWatchdog(PVOID data) + * + * @param data - Pointer to data + * + * @return S32 + * + * @brief Disable nmi watchdog + * + * Special Notes + */ +static S32 pax_Disable_NMIWatchdog(PVOID data) +{ + struct file *fd; + mm_segment_t old_fs; + struct cred *kcred; + loff_t pos = 0; + S8 new_val = '0'; + + up(&pax_Disable_NMIWatchdog_Sem); + + kcred = prepare_kernel_cred(NULL); + if (kcred) { + commit_creds(kcred); + } else { + PAX_PRINT_ERROR( + "pax_Disable_NMIWatchdog: prepare_kernel_cred returns NULL\n"); + } + + fd = filp_open(NMI_WATCHDOG_PATH, O_RDWR, 0); + + if (fd) { + fd->f_op->read(fd, (char __user *)&nmi_watchdog_restore, 1, &fd->f_pos); + PAX_PRINT_DEBUG("Existing nmi_watchdog value = %c\n", + nmi_watchdog_restore); + + if (nmi_watchdog_restore != '0') { + old_fs = get_fs(); + set_fs(KERNEL_DS); + fd->f_op->write(fd, (char __user *)&new_val, 1, &pos); + set_fs(old_fs); + } else { + PAX_PRINT_DEBUG( + "pax_Disable_NMIWatchdog: NMI watchdog already disabled!\n"); + } + + filp_close(fd, NULL); + } else { + PAX_PRINT_ERROR( + "pax_Disable_NMIWatchdog: filp_open returns NULL\n"); + } + + while (!kthread_should_stop()) { + schedule(); + } + + return 0; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn S32 pax_Check_NMIWatchdog(PVOID data) + * + * @param data - Pointer to data + * + * @return S32 + * + * @brief Check nmi watchdog + * + * Special Notes + */ + +#if 0 +static S32 pax_Check_NMIWatchdog(PVOID data) +{ + struct file *fd; + struct cred *kcred; + + kcred = prepare_kernel_cred(NULL); + if (kcred) { + commit_creds(kcred); + } + + fd = filp_open(NMI_WATCHDOG_PATH, O_RDWR, 0); + + if (fd) { + fd->f_op->read(fd, &nmi_watchdog_restore, 1, &fd->f_pos); + PAX_PRINT_DEBUG("Checking nmi_watchdog value = %c\n", + nmi_watchdog_restore); + filp_close(fd, NULL); + } + + do_exit(0); + + return 0; +} +#endif +/* ------------------------------------------------------------------------- */ +/*! + * @fn S32 pax_Enable_NMIWatchdog(PVOID data) + * + * @param data - Pointer to data + * + * @return S32 + * + * @brief Enable nmi watchdog + * + * Special Notes + */ +static S32 pax_Enable_NMIWatchdog(PVOID data) +{ + struct file *fd; + mm_segment_t old_fs; + struct cred *kcred; + loff_t pos = 0; + S8 new_val = '1'; + + up(&pax_Enable_NMIWatchdog_Sem); + + kcred = prepare_kernel_cred(NULL); + if (kcred) { + commit_creds(kcred); + } else { + PAX_PRINT_ERROR( + "pax_Enable_NMIWatchdog: prepare_kernel_cred returns NULL!\n"); + } + + fd = filp_open(NMI_WATCHDOG_PATH, O_WRONLY, 0); + + if (fd) { + old_fs = get_fs(); + set_fs(KERNEL_DS); + fd->f_op->write(fd, (char __user *)&new_val, 1, &pos); + set_fs(old_fs); + + filp_close(fd, NULL); + } else { + PAX_PRINT_ERROR( + "pax_Enable_NMIWatchdog: filp_open returns NULL!\n"); + } + + while (!kthread_should_stop()) { + schedule(); + } + + return 0; +} +#endif + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void pax_Init() + * + * @param none + * + * @return none + * + * @brief Initialize PAX system + * + * Special Notes + */ +static void pax_Init(void) +{ + // + // Initialize PAX driver version (done once at driver load time) + // + + PAX_VERSION_NODE_major(&pax_version) = PAX_MAJOR_VERSION; + PAX_VERSION_NODE_minor(&pax_version) = PAX_MINOR_VERSION; + PAX_VERSION_NODE_bugfix(&pax_version) = PAX_BUGFIX_VERSION; + + // initialize PAX_Info + pax_info.version = PAX_VERSION_NODE_version(&pax_version); + pax_info.managed_by = 1; // THIS_MODULE->name; + + // initialize PAX_Status + pax_status.guid = PAX_GUID_UNINITIALIZED; + pax_status.pid = 0; + pax_status.start_time = 0; + pax_status.is_reserved = PAX_PMU_UNRESERVED; + +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void pax_Cleanup() + * + * @param none + * + * @return none + * + * @brief UnInitialize PAX system + * + * Special Notes + */ +static void pax_Cleanup(void) +{ + // uninitialize PAX_Info + pax_info.managed_by = 0; + + // uninitialize PAX_Status + pax_status.guid = PAX_GUID_UNINITIALIZED; + pax_status.pid = 0; + pax_status.start_time = 0; + pax_status.is_reserved = PAX_PMU_UNRESERVED; + +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U32 pax_Process_Valid() + * + * @param U32 pid - process ID + * + * @return TRUE or FALSE + * + * @brief Check whether process with pid still exists, and if so, + * whether it is still "alive". If so, then process is + * deemed valid. Otherwise, process is deemed invalid. + * + * Special Notes + */ +static U32 pax_Process_Valid(U32 pid) +{ + struct task_struct *process_task; + U32 valid_process; + + // + // There doesn't seem to be a way to force the process_task to continue + // to exist after the read_lock is released (SMP system could delete the + // process after lock is released on another processor), so we need to + // do all the work with the lock held... There is a routine on later + // 2.6 kernels (get_task_struct() and put_task_struct()) which seems + // to do what we want, but the code behind the macro calls a function + // that isn't EXPORT'ed so we can't use it in a device driver... + // + PAX_TASKLIST_READ_LOCK(); + process_task = PAX_FIND_TASK_BY_PID(pax_status.pid); + if ((process_task == NULL) || + (process_task->exit_state == EXIT_ZOMBIE) || + (process_task->exit_state == EXIT_DEAD)) { + // not a valid process + valid_process = FALSE; + } else { + // process is "alive", so assume it is still valid ... + valid_process = TRUE; + } + PAX_TASKLIST_READ_UNLOCK(); + + return valid_process; +} + +// ************************************************************************** +// +// below are PAX Open/Read/Write device functions (appears in /proc/kallsyms) +// +// ************************************************************************** + +/* ------------------------------------------------------------------------- */ +/*! + * @fn int pax_Open() + * + * @param struct inode *inode + * @param struct file *filp + * + * @return int (TODO: check for open failure) + * + * @brief This function is called when doing an open(/dev/pax) + * + * Special Notes + */ +static int pax_Open(struct inode *inode, struct file *filp) +{ + PAX_PRINT_DEBUG("open called on maj:%d, min:%d\n", imajor(inode), + iminor(inode)); + filp->private_data = container_of(inode->i_cdev, PAX_DEV_NODE, cdev); + + return 0; +} + +// ************************************************************************** +// +// below are PAX IOCTL function handlers +// +// ************************************************************************** + +/* ------------------------------------------------------------------------- */ +/*! + * @fn OS_STATUS pax_Get_Info() + * + * @param IOCTL_ARGS arg - pointer to the output buffer + * + * @return OS_STATUS + * + * @brief Local function that handles the PAX_IOCTL_INFO call + * Returns static information related to PAX (e.g., version) + * + * Special Notes + */ +static OS_STATUS pax_Get_Info(IOCTL_ARGS arg) +{ + int error; + + error = copy_to_user((void __user *)(arg->buf_usr_to_drv), + &pax_info, sizeof(PAX_INFO_NODE)); + + if (error != 0) { + PAX_PRINT_ERROR( + "pax_Get_Info: unable to copy to user (error=%d)!\n", + error); + return OS_FAULT; + } + + PAX_PRINT_DEBUG("pax_Get_Info: sending PAX info (%ld bytes):\n", + sizeof(PAX_INFO_NODE)); + PAX_PRINT_DEBUG("pax_Get_Info: raw_version = %u (0x%x)\n", + pax_info.version, pax_info.version); + PAX_PRINT_DEBUG("pax_Get_Info: major = %u\n", + PAX_VERSION_NODE_major(&pax_version)); + PAX_PRINT_DEBUG("pax_Get_Info: minor = %u\n", + PAX_VERSION_NODE_minor(&pax_version)); + PAX_PRINT_DEBUG("pax_Get_Info: bugfix = %u\n", + PAX_VERSION_NODE_bugfix(&pax_version)); + PAX_PRINT_DEBUG("pax_Get_Info: managed_by = %lu\n", + (long unsigned int)pax_info.managed_by); + PAX_PRINT_DEBUG("pax_Get_Info: information sent.\n"); + + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn OS_STATUS pax_Get_Status() + * + * @param IOCTL_ARGS arg - pointer to the output buffer + * + * @return OS_STATUS + * + * @brief Local function that handles the PAX_IOCTL_STATUS call + * Returns status of the reservation (e.g., who owns) + * + * Special Notes + */ +static OS_STATUS pax_Get_Status(IOCTL_ARGS arg) +{ + int error; + + error = copy_to_user((void __user *)(arg->buf_usr_to_drv), + &pax_status, sizeof(PAX_STATUS_NODE)); + if (error != 0) { + PAX_PRINT_ERROR( + "pax_Get_Status: unable to copy to user (error=%d)!\n", + error); + return OS_FAULT; + } + + PAX_PRINT_DEBUG("pax_Get_Status: sending PAX status (%ld bytes):\n", + sizeof(PAX_STATUS_NODE)); + PAX_PRINT_DEBUG("pax_Get_Status: guid = %lu\n", + (long unsigned int)pax_status.guid); + PAX_PRINT_DEBUG("pax_Get_Status: pid = %lu\n", + (long unsigned int)pax_status.pid); + PAX_PRINT_DEBUG("pax_Get_Status: start_time = %lu\n", + (long unsigned int)pax_status.start_time); + PAX_PRINT_DEBUG("pax_Get_Status: is_reserved = %u\n", + pax_status.is_reserved); + PAX_PRINT_DEBUG("pax_Get_Status: status sent.\n"); + + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn OS_STATUS pax_Unreserve() + * + * @param none + * + * @return OS_STATUS + * + * @brief Local function that handles the PAX_IOCTL_UNRESERVE call + * Returns OS_SUCCESS if PMU unreservation succeeded, otherwise failure + * + * Special Notes + */ +static OS_STATUS pax_Unreserve(void) +{ + // if no reservation is currently held, then return success + if (pax_status.is_reserved == PAX_PMU_UNRESERVED) { + PAX_PRINT_DEBUG("pax_Unreserve: currently unreserved\n"); + return OS_SUCCESS; + } + + // otherwise, there is a reservation ... + // allow the process which started the reservation to unreserve + // or if that process is invalid, then any other process can unreserve + if ((pax_status.pid == current->pid) || + (!pax_Process_Valid(pax_status.pid))) { + S32 reservation = -1; + PAX_PRINT_DEBUG( + "pax_Unreserve: pid %d attempting to unreserve PMU held by pid %d\n", + (U32)current->pid, (U32)pax_status.pid); + +#if !defined(DRV_ANDROID) && !defined(DRV_CHROMEOS) && \ + defined(CONFIG_HARDLOCKUP_DETECTOR) && \ + LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) + if (nmi_watchdog_restore != '0') { + PAX_PRINT_DEBUG( + "Attempting to enable NMI watchdog...\n"); + + sema_init(&pax_Enable_NMIWatchdog_Sem, 0); + + pax_Enable_NMIWatchdog_Thread = + kthread_run(&pax_Enable_NMIWatchdog, NULL, + "pax_enable_nmi_watchdog"); + if (!pax_Enable_NMIWatchdog_Thread || + pax_Enable_NMIWatchdog_Thread == ERR_PTR(-ENOMEM)) { + PAX_PRINT_ERROR( + "pax_Unreserve: could not create pax_enable_nmi_watchdog kthread."); + } else { + down(&pax_Enable_NMIWatchdog_Sem); + kthread_stop(pax_Enable_NMIWatchdog_Thread); + } + pax_Enable_NMIWatchdog_Thread = NULL; + nmi_watchdog_restore = '0'; + } +#endif + + reservation = cmpxchg(&pax_status.is_reserved, PAX_PMU_RESERVED, + PAX_PMU_UNRESERVED); + if (reservation < 0) { + // no-op ... eliminates "variable not used" compiler warning + } + PAX_PRINT_DEBUG("pax_Unreserve: reserve=%d, is_reserved=%d\n", + reservation, pax_status.is_reserved); + // unreserve but keep track of last PID/GUID that had reservation + } + + PAX_PRINT_DEBUG("pax_Unreserve: pid %d unreserve status: %d\n", + current->pid, pax_status.is_reserved); + + return ((pax_status.is_reserved == PAX_PMU_UNRESERVED) ? OS_SUCCESS : + OS_FAULT); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn OS_STATUS pax_Reserve_All() + * + * @param none + * + * @return OS_STATUS + * + * @brief Local function that handles the PAX_IOCTL_RESERVE_ALL call + * Returns OS_SUCCESS if PMU reservation succeeded, otherwise failure + * + * Special Notes + */ +static OS_STATUS pax_Reserve_All(void) +{ + S32 reservation = -1; // previous reservation state (initially, unknown) + + // check if PMU can be unreserved + if (pax_status.is_reserved == PAX_PMU_RESERVED) { + OS_STATUS unreserve_err = pax_Unreserve(); + if (unreserve_err != OS_SUCCESS) { + return unreserve_err; // attempt to unreserve failed, so return error + } + } + + PAX_PRINT_DEBUG("pax_Reserve_All: pid %d attempting to reserve PMU\n", + current->pid); + + // at this point, there is no reservation, so commence race to reserve ... + reservation = cmpxchg(&pax_status.is_reserved, PAX_PMU_UNRESERVED, + PAX_PMU_RESERVED); + + // only one request to reserve will succeed, and when it does, update status + // information with the successful request + if ((reservation == PAX_PMU_UNRESERVED) && + (pax_status.is_reserved == PAX_PMU_RESERVED)) { + pax_status.start_time = rdtsc_ordered(); + pax_status.pid = current->pid; + +#if !defined(DRV_ANDROID) && !defined(DRV_CHROMEOS) && \ + defined(CONFIG_HARDLOCKUP_DETECTOR) && \ + LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) + sema_init(&pax_Disable_NMIWatchdog_Sem, 0); + pax_Disable_NMIWatchdog_Thread = + kthread_run(&pax_Disable_NMIWatchdog, NULL, + "pax_disable_nmi_watchdog"); + if (!pax_Disable_NMIWatchdog_Thread || + pax_Disable_NMIWatchdog_Thread == ERR_PTR(-ENOMEM)) { + PAX_PRINT_ERROR( + "pax_Reserve_All: could not create pax_disable_nmi_watchdog kthread."); + } else { + down(&pax_Disable_NMIWatchdog_Sem); + kthread_stop(pax_Disable_NMIWatchdog_Thread); + } + pax_Disable_NMIWatchdog_Thread = NULL; +#endif + + return OS_SUCCESS; + } + + return OS_FAULT; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn OS_STATUS pax_Service_IOCTL() + * + * @param inode - pointer to the device object + * @param filp - pointer to the file object + * @param cmd - ioctl value (defined in lwpmu_ioctl.h) + * @param arg - arg or arg pointer + * + * @return OS_STATUS + * + * @brief Worker function that handles IOCTL requests from the user mode + * + * Special Notes + */ +static IOCTL_OP_TYPE pax_Service_IOCTL(IOCTL_USE_INODE struct file *filp, + unsigned int cmd, + IOCTL_ARGS_NODE local_args) +{ + int status = OS_SUCCESS; + + // dispatch to appropriate PAX IOCTL function + switch (cmd) { + case PAX_IOCTL_INFO: + PAX_PRINT_DEBUG("PAX_IOCTL_INFO\n"); + status = pax_Get_Info(&local_args); + break; + + case PAX_IOCTL_STATUS: + PAX_PRINT_DEBUG("PAX_IOCTL_STATUS\n"); + status = pax_Get_Status(&local_args); + break; + + case PAX_IOCTL_RESERVE_ALL: + PAX_PRINT_DEBUG("PAX_IOCTL_RESERVE_ALL\n"); + status = pax_Reserve_All(); + break; + + case PAX_IOCTL_UNRESERVE: + PAX_PRINT_DEBUG("PAX_IOCTL_UNRESERVE\n"); + status = pax_Unreserve(); + break; + + default: + PAX_PRINT_ERROR("unknown IOCTL cmd: %d magic:%d number:%d\n", + cmd, _IOC_TYPE(cmd), _IOC_NR(cmd)); + status = OS_ILLEGAL_IOCTL; + break; + } + + return status; +} + +static long pax_Device_Control(IOCTL_USE_INODE struct file *filp, + unsigned int cmd, unsigned long arg) +{ + int status = OS_SUCCESS; + IOCTL_ARGS_NODE local_args; + + memset(&local_args, 0, sizeof(IOCTL_ARGS_NODE)); + if (arg) { + status = copy_from_user(&local_args, (void __user *)arg, + sizeof(IOCTL_ARGS_NODE)); + if (status != OS_SUCCESS) + return status; + } + + status = pax_Service_IOCTL(IOCTL_USE_INODE filp, cmd, local_args); + return status; +} + +#if defined(CONFIG_COMPAT) && defined(DRV_EM64T) +static IOCTL_OP_TYPE pax_Device_Control_Compat(struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + int status = OS_SUCCESS; + IOCTL_COMPAT_ARGS_NODE local_args_compat; + IOCTL_ARGS_NODE local_args; + + memset(&local_args_compat, 0, sizeof(IOCTL_COMPAT_ARGS_NODE)); + if (arg) { + status = copy_from_user(&local_args_compat, + (void __user *)arg, + sizeof(IOCTL_COMPAT_ARGS_NODE)); + if (status != OS_SUCCESS) + return status; + } + + local_args.len_drv_to_usr = local_args_compat.len_drv_to_usr; + local_args.len_usr_to_drv = local_args_compat.len_usr_to_drv; + local_args.buf_drv_to_usr = + (char *)compat_ptr(local_args_compat.buf_drv_to_usr); + local_args.buf_usr_to_drv = + (char *)compat_ptr(local_args_compat.buf_usr_to_drv); + + if (cmd == PAX_IOCTL_COMPAT_INFO) { + cmd = PAX_IOCTL_INFO; + } + local_args.command = cmd; + + status = pax_Service_IOCTL(filp, cmd, local_args); + + return status; +} +#endif + +// ************************************************************************** +// +// PAX device file operation definitions (required by kernel) +// +// ************************************************************************** + +/* + * Structure that declares the usual file access functions + * First one is for pax, the control functions + */ +static struct file_operations pax_Fops = { + .owner = THIS_MODULE, + IOCTL_OP = pax_Device_Control, +#if defined(CONFIG_COMPAT) && defined(DRV_EM64T) + .compat_ioctl = pax_Device_Control_Compat, +#endif + .read = NULL, + .write = NULL, + .open = pax_Open, + .release = NULL, + .llseek = NULL, +}; + +/* ------------------------------------------------------------------------- */ +/*! + * @fn int pax_Setup_Cdev() + * + * @param dev - pointer to the device object + * @param devnum - major/minor device number + * @param fops - point to file operations struct + * + * @return int + * + * @brief Set up functions to be handled by PAX device + * + * Special Notes + */ +static int pax_Setup_Cdev(PAX_DEV dev, struct file_operations *fops, + dev_t devnum) +{ + cdev_init(&PAX_DEV_cdev(dev), fops); + PAX_DEV_cdev(dev).owner = THIS_MODULE; + PAX_DEV_cdev(dev).ops = fops; + + return cdev_add(&PAX_DEV_cdev(dev), devnum, 1); +} + +static int pax_version_proc_read(struct seq_file *file, void *v) +{ + seq_printf(file, "%u", PAX_VERSION_NODE_version(&pax_version)); + + return 0; +} + +static int pax_version_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, pax_version_proc_read, NULL); +} + +// ************************************************************************** +// +// Exported PAX functions (see pax.h) ; will appear under /proc/kallsyms +// +// ************************************************************************** + +/* ------------------------------------------------------------------------- */ +/*! + * @fn int pax_Load() + * + * @param none + * + * @return int + * + * @brief Load the PAX subsystem + * + * Special Notes + */ +int pax_Load(void) +{ + int result; + struct device *pax_device; + + pax_control = NULL; + + PAX_PRINT_DEBUG("checking for %s interface...\n", PAX_NAME); + + /* If PAX interface does not exist, create it */ + pax_devnum = MKDEV(0, 0); + PAX_PRINT_DEBUG("got major device %d\n", pax_devnum); + /* allocate character device */ + result = alloc_chrdev_region(&pax_devnum, 0, 1, PAX_NAME); + if (result < 0) { + PAX_PRINT_ERROR("unable to alloc chrdev_region for %s!\n", + PAX_NAME); + return result; + } + + pax_class = class_create(THIS_MODULE, "pax"); + if (IS_ERR(pax_class)) { + PAX_PRINT_ERROR("Error registering pax class\n"); + } + pax_device = device_create(pax_class, NULL, pax_devnum, NULL, "pax"); + if (pax_device == NULL) { + return OS_INVALID; + } + + PAX_PRINT_DEBUG("%s major number is %d\n", PAX_NAME, MAJOR(pax_devnum)); + /* Allocate memory for the PAX control device */ + pax_control = (PVOID)kzalloc(sizeof(PAX_DEV_NODE), GFP_KERNEL); + if (!pax_control) { + PAX_PRINT_ERROR("Unable to allocate memory for %s device\n", + PAX_NAME); + return OS_NO_MEM; + } + // /* Initialize memory for the PAX control device */ + // memset(pax_control, '\0', sizeof(PAX_DEV_NODE)); + /* Register PAX file operations with the OS */ + result = pax_Setup_Cdev(pax_control, &pax_Fops, pax_devnum); + if (result) { + PAX_PRINT_ERROR("Unable to add %s as char device (error=%d)\n", + PAX_NAME, result); + return result; + } + + pax_Init(); + + pax_version_file = + proc_create("pax_version", 0, NULL, &pax_version_ops); + if (pax_version_file == NULL) { + SEP_PRINT_ERROR("Unalbe to create the pax_version proc file\n"); + } + + // + // Display driver version information + // + PAX_PRINT("PMU arbitration service v%d.%d.%d has been started.\n", + PAX_VERSION_NODE_major(&pax_version), + PAX_VERSION_NODE_minor(&pax_version), + PAX_VERSION_NODE_bugfix(&pax_version)); + + return result; +} + +EXPORT_SYMBOL(pax_Load); + +/* ------------------------------------------------------------------------- */ +/*! + * @fn int pax_Unload() + * + * @param none + * + * @return none + * + * @brief Unload the PAX subsystem + * + * Special Notes + */ +void pax_Unload(void) +{ + // warn if unable to unreserve + if (pax_Unreserve() != OS_SUCCESS) { + PAX_PRINT_WARNING( + "Unloading driver with existing reservation ...."); + PAX_PRINT_WARNING(" guid = %lu\n", + (long unsigned int)pax_status.guid); + PAX_PRINT_WARNING(" pid = %ld\n", + (long int)pax_status.pid); + PAX_PRINT_WARNING(" start_time = %lu\n", + (long unsigned int)pax_status.start_time); + PAX_PRINT_WARNING(" is_reserved = %u\n", + pax_status.is_reserved); + } + + // unregister PAX device + unregister_chrdev(MAJOR(pax_devnum), "pax"); + device_destroy(pax_class, pax_devnum); + class_destroy(pax_class); + + cdev_del(&PAX_DEV_cdev(pax_control)); + unregister_chrdev_region(pax_devnum, 1); + if (pax_control != NULL) { + kfree(pax_control); + } + + remove_proc_entry("pax_version", NULL); + + // + // Display driver version information + // + PAX_PRINT("PMU arbitration service v%d.%d.%d has been stopped.\n", + PAX_VERSION_NODE_major(&pax_version), + PAX_VERSION_NODE_minor(&pax_version), + PAX_VERSION_NODE_bugfix(&pax_version)); + + // clean up resources used by PAX + pax_Cleanup(); + +} + +EXPORT_SYMBOL(pax_Unload); + +/* Declaration of the init and exit functions */ +module_init(pax_Load); +module_exit(pax_Unload); diff --git a/drivers/platform/x86/sepdk/pax/pax.h b/drivers/platform/x86/sepdk/pax/pax.h new file mode 100755 index 0000000000000..b7d48f874958d --- /dev/null +++ b/drivers/platform/x86/sepdk/pax/pax.h @@ -0,0 +1,33 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#ifndef _PAX_H_ +#define _PAX_H_ + +int pax_Load(void); + +void pax_Unload(void); + +#endif diff --git a/drivers/platform/x86/sepdk/sep/Makefile b/drivers/platform/x86/sepdk/sep/Makefile new file mode 100755 index 0000000000000..c616fc1f7ce85 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/Makefile @@ -0,0 +1,62 @@ +ccflags-y := -I$(src)/../include -I$(src)/../inc -I$(src)/.. +ccflags-y += -DSEP_CONFIG_MODULE_LAYOUT +# TODO: verify kaiser.h +#ccflags-y += -DKAISER_HEADER_PRESENT +ccflags-y += -DDRV_CPU_HOTPLUG -DDRV_USE_TASKLET_WORKAROUND -DENABLE_CPUS -DBUILD_CHIPSET -DBUILD_GFX + +asflags-y := -I$(src)/.. + +ifdef CONFIG_SEP_PER_USER_MODE + ccflags-y += -DSECURE_SEP +endif + +ifdef CONFIG_SEP_MINLOG_MODE + ccflags-y += -DDRV_MINIMAL_LOGGING +endif + +ifdef CONFIG_SEP_MAXLOG_MODE + ccflags-y += -DDRV_MAXIMAL_LOGGING +endif + +ifdef CONFIG_SEP_ACRN + ccflags-y += -DDRV_SEP_ACRN_ON +endif + +obj-$(CONFIG_SEP) += sep5.o + +sep5-y := lwpmudrv.o \ + control.o \ + cpumon.o \ + eventmux.o \ + linuxos.o \ + output.o \ + pmi.o \ + sys_info.o \ + utility.o \ + valleyview_sochap.o \ + unc_power.o \ + core2.o \ + perfver4.o \ + silvermont.o \ + pci.o \ + apic.o \ + pebs.o \ + unc_gt.o \ + unc_mmio.o \ + unc_msr.o \ + unc_common.o \ + unc_pci.o \ + sepdrv_p_state.o \ + chap.o \ + gmch.o \ + gfx.o \ + unc_sa.o + + +ifdef CONFIG_X86_64 + sep5-y += sys64.o +endif + +ifdef CONFIG_X86_32 + sep5-y += sys32.o +endif diff --git a/drivers/platform/x86/sepdk/sep/apic.c b/drivers/platform/x86/sepdk/sep/apic.c new file mode 100755 index 0000000000000..8f8bc5635cedc --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/apic.c @@ -0,0 +1,228 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#include "lwpmudrv_defines.h" +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32) +#include +#endif +#if defined(CONFIG_XEN_DOM0) && LINUX_VERSION_CODE > KERNEL_VERSION(3, 3, 0) +#include +#include +#endif + +#include "lwpmudrv_types.h" +#include "lwpmudrv_ecb.h" +#include "apic.h" +#include "lwpmudrv.h" +#include "control.h" +#include "utility.h" + + +#if defined(DRV_SEP_ACRN_ON) +extern struct profiling_vm_info_list *vm_info_list; +#else +static DEFINE_PER_CPU(unsigned long, saved_apic_lvtpc); +#endif + +/*! + * @fn VOID apic_Get_APIC_ID(S32 cpu) + * + * @brief Obtain APIC ID + * + * @param S32 cpuid - cpu index + * + * @return U32 APIC ID + */ +static VOID apic_Get_APIC_ID(S32 cpu) +{ + U32 apic_id = 0; + CPU_STATE pcpu; +#if defined(DRV_SEP_ACRN_ON) + U16 i; +#endif + + SEP_DRV_LOG_TRACE_IN("CPU: %d.", cpu); + pcpu = &pcb[cpu]; + +#if defined(CONFIG_XEN_DOM0) && LINUX_VERSION_CODE > KERNEL_VERSION(3, 3, 0) + if (xen_initial_domain()) { + S32 ret = 0; + struct xen_platform_op op = { + .cmd = XENPF_get_cpuinfo, + .interface_version = XENPF_INTERFACE_VERSION, + .u.pcpu_info.xen_cpuid = cpu, + }; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + ret = HYPERVISOR_platform_op(&op); +#else + ret = HYPERVISOR_dom0_op(&op); +#endif + if (ret) { + SEP_DRV_LOG_ERROR( + "apic_Get_APIC_ID:Error in reading APIC ID on Xen PV"); + apic_id = 0; + } else { + apic_id = op.u.pcpu_info.apic_id; + } + } else { +#endif +#ifdef CONFIG_X86_LOCAL_APIC + apic_id = read_apic_id(); +#endif +#if defined(CONFIG_XEN_DOM0) && LINUX_VERSION_CODE > KERNEL_VERSION(3, 3, 0) + } +#endif + +#if defined(DRV_SEP_ACRN_ON) + CPU_STATE_apic_id(pcpu) = 0; + if (vm_info_list == NULL) { + SEP_PRINT_ERROR( + "apic_Get_APIC_ID: Error in reading APIC ID on ACRN\n"); + } else { + for (i = 0; i < vm_info_list->num_vms; i++) { + if (vm_info_list->vm_list[i].vm_id == 0xFFFF) { + CPU_STATE_apic_id(pcpu) = + vm_info_list->vm_list[i] + .cpu_map[cpu] + .apic_id; + break; + } + } + } +#else + CPU_STATE_apic_id(pcpu) = apic_id; +#endif + + SEP_DRV_LOG_TRACE_OUT("Apic_id[%d] is %d.", cpu, + CPU_STATE_apic_id(pcpu)); +} + +/*! + * @fn extern VOID APIC_Init(param) + * + * @brief initialize the local APIC + * + * @param int cpu_idx - The cpu to deinit + * + * @return None + * + * Special Notes: + * This routine is expected to be called via the CONTROL_Parallel routine + */ +VOID APIC_Init(PVOID param) +{ + S32 me; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + if (param == NULL) { + preempt_disable(); + me = CONTROL_THIS_CPU(); + preempt_enable(); + } else { + me = *(S32 *)param; + } + + apic_Get_APIC_ID(me); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/*! + * @fn extern VOID APIC_Install_Interrupt_Handler(param) + * + * @brief Install the interrupt handler + * + * @param int param - The linear address of the Local APIC + * + * @return None + * + * Special Notes: + * The linear address is necessary if the LAPIC is used. If X2APIC is + * used the linear address is not necessary. + */ +VOID APIC_Install_Interrupt_Handler(PVOID param) +{ + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + +#if !defined(DRV_SEP_ACRN_ON) + per_cpu(saved_apic_lvtpc, CONTROL_THIS_CPU()) = apic_read(APIC_LVTPC); + apic_write(APIC_LVTPC, APIC_DM_NMI); +#endif + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/*! + * @fn extern VOID APIC_Enable_PMI(void) + * + * @brief Enable the PMU interrupt + * + * @param None + * + * @return None + * + * Special Notes: + * + */ +VOID APIC_Enable_Pmi(VOID) +{ + SEP_DRV_LOG_TRACE_IN(""); + +#if !defined(DRV_SEP_ACRN_ON) + apic_write(APIC_LVTPC, APIC_DM_NMI); +#endif + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/*! + * @fn extern VOID APIC_Restore_LVTPC(void) + * + * @brief Restore APIC LVTPC value + * + * @param None + * + * @return None + * + * Special Notes: + * + */ +VOID APIC_Restore_LVTPC(PVOID param) +{ + SEP_DRV_LOG_TRACE_IN(""); + +#if !defined(DRV_SEP_ACRN_ON) + apic_write(APIC_LVTPC, per_cpu(saved_apic_lvtpc, CONTROL_THIS_CPU())); +#endif + + SEP_DRV_LOG_TRACE_OUT(""); +} diff --git a/drivers/platform/x86/sepdk/sep/chap.c b/drivers/platform/x86/sepdk/sep/chap.c new file mode 100755 index 0000000000000..434e9aeb658e8 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/chap.c @@ -0,0 +1,474 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#include +#include + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" +#include "lwpmudrv_chipset.h" +#include "inc/lwpmudrv.h" +#include "inc/control.h" +#include "inc/ecb_iterators.h" +#include "inc/utility.h" +#include "inc/chap.h" + +extern DRV_CONFIG drv_cfg; +extern CHIPSET_CONFIG pma; +extern CPU_STATE pcb; + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static U32 chap_Init_Chipset(void) + * + * @brief Chipset PMU initialization + * + * @param None + * + * @return VT_SUCCESS if successful, otherwise error + * + * Special Notes: + * + */ +static U32 chap_Init_Chipset(void) +{ + U32 i; + CHIPSET_SEGMENT mch_chipset_seg = &CHIPSET_CONFIG_mch(pma); + CHIPSET_SEGMENT ich_chipset_seg = &CHIPSET_CONFIG_ich(pma); + CHIPSET_SEGMENT noa_chipset_seg = &CHIPSET_CONFIG_noa(pma); + + SEP_DRV_LOG_TRACE_IN(""); + + SEP_DRV_LOG_TRACE("Initializing chipset ..."); + + if (DRV_CONFIG_enable_chipset(drv_cfg)) { + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + pcb[i].chipset_count_init = TRUE; + } + if ((CHIPSET_CONFIG_mch_chipset(pma)) && + (CHIPSET_SEGMENT_virtual_address(mch_chipset_seg) == 0)) { + // Map virtual address of PCI CHAP interface + CHIPSET_SEGMENT_virtual_address( + mch_chipset_seg) = + (U64)(UIOP)ioremap_nocache( + CHIPSET_SEGMENT_physical_address( + mch_chipset_seg), + CHIPSET_SEGMENT_size( + mch_chipset_seg)); + } + + if ((CHIPSET_CONFIG_ich_chipset(pma)) && + (CHIPSET_SEGMENT_virtual_address(ich_chipset_seg) == 0)) { + // Map the virtual address of PCI CHAP interface + CHIPSET_SEGMENT_virtual_address( + ich_chipset_seg) = + (U64)(UIOP)ioremap_nocache( + CHIPSET_SEGMENT_physical_address( + ich_chipset_seg), + CHIPSET_SEGMENT_size( + ich_chipset_seg)); + } + + // Here we map the MMIO registers for the Gen X processors. + if ((CHIPSET_CONFIG_noa_chipset(pma)) && + (CHIPSET_SEGMENT_virtual_address(noa_chipset_seg) == 0)) { + // Map the virtual address of PCI CHAP interface + CHIPSET_SEGMENT_virtual_address( + noa_chipset_seg) = + (U64)(UIOP)ioremap_nocache( + CHIPSET_SEGMENT_physical_address( + noa_chipset_seg), + CHIPSET_SEGMENT_size( + noa_chipset_seg)); + } + + // + // always collect processor events + // + CHIPSET_CONFIG_processor(pma) = 1; + } else { + CHIPSET_CONFIG_processor(pma) = 0; + } + SEP_DRV_LOG_TRACE("Initializing chipset done."); + + SEP_DRV_LOG_TRACE_OUT(""); + return VT_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static U32 chap_Start_Chipset(void) + * @param None + * @return VT_SUCCESS if successful, otherwise error + * @brief Start collection on the Chipset PMU + * + * Special Notes: + * + */ +static VOID chap_Start_Chipset(void) +{ + U32 i; + CHAP_INTERFACE chap; + CHIPSET_SEGMENT mch_chipset_seg = &CHIPSET_CONFIG_mch(pma); + CHIPSET_SEGMENT ich_chipset_seg = &CHIPSET_CONFIG_ich(pma); + + SEP_DRV_LOG_TRACE_IN(""); + + // + // reset and start chipset counters + // + SEP_DRV_LOG_TRACE("Starting chipset counters...\n"); + if (pma) { + chap = (CHAP_INTERFACE)(UIOP)CHIPSET_SEGMENT_virtual_address( + mch_chipset_seg); + if (chap != NULL) { + for (i = 0; + i < CHIPSET_SEGMENT_total_events(mch_chipset_seg); + i++) { + CHAP_INTERFACE_command_register(&chap[i]) = + 0x00040000; // Reset to zero + CHAP_INTERFACE_command_register(&chap[i]) = + 0x00010000; // Restart + } + } + + chap = (CHAP_INTERFACE)(UIOP)CHIPSET_SEGMENT_virtual_address( + ich_chipset_seg); + if (chap != NULL) { + for (i = 0; + i < CHIPSET_SEGMENT_total_events(ich_chipset_seg); + i++) { + CHAP_INTERFACE_command_register(&chap[i]) = + 0x00040000; // Reset to zero + CHAP_INTERFACE_command_register(&chap[i]) = + 0x00010000; // Restart + } + } + } + + SEP_DRV_LOG_TRACE("Starting chipset counters done.\n"); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static U32 chap_Read_Counters(PVOID param) + * + * @brief Read the CHAP counter data + * + * @param PVOID param - address of the buffer to write into + * + * @return None + * + * Special Notes: + * + */ +static VOID chap_Read_Counters(PVOID param) +{ + U64 *data; + CHAP_INTERFACE chap; + U32 mch_cpu; + int i, data_index; + U64 tmp_data; + U64 *mch_data; + U64 *ich_data; + U64 *mmio_data; + U64 *mmio; + U32 this_cpu; + CHIPSET_SEGMENT mch_chipset_seg = &CHIPSET_CONFIG_mch(pma); + CHIPSET_SEGMENT ich_chipset_seg = &CHIPSET_CONFIG_ich(pma); + CHIPSET_SEGMENT noa_chipset_seg = &CHIPSET_CONFIG_noa(pma); + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + data = param; + data_index = 0; + + // Save the Motherboard time. This is universal time for this + // system. This is the only 64-bit timer so we save it first so + // always aligned on 64-bit boundary that way. + + if (CHIPSET_CONFIG_mch_chipset(pma)) { + mch_data = data + data_index; + // Save the MCH counters. + chap = (CHAP_INTERFACE)(UIOP)CHIPSET_SEGMENT_virtual_address( + mch_chipset_seg); + for (i = CHIPSET_SEGMENT_start_register(mch_chipset_seg); + i < CHIPSET_SEGMENT_total_events(mch_chipset_seg); i++) { + CHAP_INTERFACE_command_register(&chap[i]) = + 0x00020000; // Sample + } + + // The StartingReadRegister is only used for special event + // configs that use CHAP counters to trigger events in other + // CHAP counters. This is an unusual request but useful in + // getting the number of lit subspans - implying a count of the + // number of triangles. I am not sure it will be used + // elsewhere. We cannot read some of the counters because it + // will invalidate their configuration to trigger other CHAP + // counters. Yuk! + data_index += CHIPSET_SEGMENT_start_register(mch_chipset_seg); + for (i = CHIPSET_SEGMENT_start_register(mch_chipset_seg); + i < CHIPSET_SEGMENT_total_events(mch_chipset_seg); i++) { + data[data_index++] = + CHAP_INTERFACE_data_register(&chap[i]); + } + + // Initialize the counters on the first interrupt + if (pcb[this_cpu].chipset_count_init == TRUE) { + for (i = 0; + i < CHIPSET_SEGMENT_total_events(mch_chipset_seg); + i++) { + pcb[this_cpu].last_mch_count[i] = mch_data[i]; + } + } + + // Now compute the delta! + // NOTE: Special modification to accomodate Gen 4 work - count + // everything since last interrupt - regardless of cpu! This + // way there is only one count of the Gen 4 counters. + // + mch_cpu = CHIPSET_CONFIG_host_proc_run(pma) ? this_cpu : 0; + for (i = 0; i < CHIPSET_SEGMENT_total_events(mch_chipset_seg); + i++) { + tmp_data = mch_data[i]; + if (mch_data[i] < pcb[mch_cpu].last_mch_count[i]) { + mch_data[i] = mch_data[i] + (U32)(-1) - + pcb[mch_cpu].last_mch_count[i]; + } else { + mch_data[i] = mch_data[i] - + pcb[mch_cpu].last_mch_count[i]; + } + pcb[mch_cpu].last_mch_count[i] = tmp_data; + } + } + + if (CHIPSET_CONFIG_ich_chipset(pma)) { + // Save the ICH counters. + ich_data = data + data_index; + chap = (CHAP_INTERFACE)(UIOP)CHIPSET_SEGMENT_virtual_address( + ich_chipset_seg); + for (i = 0; i < CHIPSET_SEGMENT_total_events(ich_chipset_seg); + i++) { + CHAP_INTERFACE_command_register(&chap[i]) = + 0x00020000; // Sample + } + + for (i = 0; i < CHIPSET_SEGMENT_total_events(ich_chipset_seg); + i++) { + data[data_index++] = + CHAP_INTERFACE_data_register(&chap[i]); + } + + // Initialize the counters on the first interrupt + if (pcb[this_cpu].chipset_count_init == TRUE) { + for (i = 0; + i < CHIPSET_SEGMENT_total_events(ich_chipset_seg); + i++) { + + pcb[this_cpu].last_ich_count[i] = ich_data[i]; + } + } + + // Now compute the delta! + for (i = 0; i < CHIPSET_SEGMENT_total_events(ich_chipset_seg); + i++) { + tmp_data = ich_data[i]; + if (ich_data[i] < pcb[this_cpu].last_ich_count[i]) { + ich_data[i] = ich_data[i] + (U32)(-1) - + pcb[this_cpu].last_ich_count[i]; + } else { + ich_data[i] = ich_data[i] - + pcb[this_cpu].last_ich_count[i]; + } + pcb[this_cpu].last_ich_count[i] = tmp_data; + } + } + + if (CHIPSET_CONFIG_noa_chipset(pma)) { + // Save the MMIO counters. + mmio_data = data + data_index; + mmio = (U64 *)(UIOP)CHIPSET_SEGMENT_virtual_address( + noa_chipset_seg); + + for (i = 0; i < CHIPSET_SEGMENT_total_events(noa_chipset_seg); + i++) { + data[data_index++] = + mmio[i * 2 + 2244]; // 64-bit quantity + } + + // Initialize the counters on the first interrupt + if (pcb[this_cpu].chipset_count_init == TRUE) { + for (i = 0; + i < CHIPSET_SEGMENT_total_events(noa_chipset_seg); + i++) { + pcb[this_cpu].last_mmio_count[i] = mmio_data[i]; + } + } + + // Now compute the delta! + for (i = 0; i < CHIPSET_SEGMENT_total_events(noa_chipset_seg); + i++) { + tmp_data = mmio_data[i]; + if (mmio_data[i] < pcb[this_cpu].last_mmio_count[i]) { + mmio_data[i] = mmio_data[i] + (U32)(-1) - + pcb[this_cpu].last_mmio_count[i]; + } else { + mmio_data[i] = mmio_data[i] - + pcb[this_cpu].last_mmio_count[i]; + } + pcb[this_cpu].last_mmio_count[i] = tmp_data; + } + } + + pcb[this_cpu].chipset_count_init = FALSE; + + FOR_EACH_DATA_REG(pecb, i) + { + data[data_index++] = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), (U64)0); + } + END_FOR_EACH_DATA_REG; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static VOID chap_Stop_Chipset(void) + * + * @brief Stop the Chipset PMU + * + * @param None + * + * @return None + * + * Special Notes: + * + */ +static VOID chap_Stop_Chipset(void) +{ + U32 i; + CHAP_INTERFACE chap; + CHIPSET_SEGMENT mch_chipset_seg = &CHIPSET_CONFIG_mch(pma); + CHIPSET_SEGMENT ich_chipset_seg = &CHIPSET_CONFIG_ich(pma); + + SEP_DRV_LOG_TRACE_IN(""); + + // + // reset and start chipset counters + // + SEP_DRV_LOG_TRACE("Stopping chipset counters..."); + + if (pma == NULL) { + return; + } + + if (CHIPSET_CONFIG_mch_chipset(pma)) { + chap = (CHAP_INTERFACE)(UIOP) + CHIPSET_SEGMENT_virtual_address(mch_chipset_seg); + if (chap != NULL) { + for (i = 0; + i < CHIPSET_SEGMENT_total_events(mch_chipset_seg); + i++) { + CHAP_INTERFACE_command_register(&chap[i]) + = 0x00000000; // Stop + CHAP_INTERFACE_command_register(&chap[i]) + = 0x00040000; // Reset to Zero + } + } + } + + if (CHIPSET_CONFIG_ich_chipset(pma)) { + chap = (CHAP_INTERFACE)(UIOP) + CHIPSET_SEGMENT_virtual_address( + ich_chipset_seg); + if (chap != NULL) { + for (i = 0; + i < CHIPSET_SEGMENT_total_events(ich_chipset_seg); + i++) { + CHAP_INTERFACE_command_register(&chap[i]) + = 0x00000000; // Stop + CHAP_INTERFACE_command_register(&chap[i]) + = 0x00040000; // Reset to Zero + } + } + } + + if (CHIPSET_CONFIG_mch_chipset(pma) && + CHIPSET_SEGMENT_virtual_address(mch_chipset_seg)) { + + iounmap((void __iomem *)(UIOP)CHIPSET_SEGMENT_virtual_address( + mch_chipset_seg)); + CHIPSET_SEGMENT_virtual_address(mch_chipset_seg) = 0; + } + + if (CHIPSET_CONFIG_ich_chipset(pma) && + CHIPSET_SEGMENT_virtual_address(ich_chipset_seg)) { + + iounmap((void __iomem *)(UIOP)CHIPSET_SEGMENT_virtual_address( + ich_chipset_seg)); + CHIPSET_SEGMENT_virtual_address(ich_chipset_seg) = 0; + } + CONTROL_Free_Memory(pma); + pma = NULL; + + SEP_DRV_LOG_TRACE("Stopped chipset counters."); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static VOID chap_Fini_Chipset(void) + * + * @brief Finish routine on a per-logical-core basis + * + * @param None + * + * @return None + * + * Special Notes: + * + */ +static VOID chap_Fini_Chipset(void) +{ + SEP_DRV_LOG_TRACE_IN(""); + SEP_DRV_LOG_TRACE_OUT("Empty function."); +} + +CS_DISPATCH_NODE chap_dispatch = { + .init_chipset = chap_Init_Chipset, + .start_chipset = chap_Start_Chipset, + .read_counters = chap_Read_Counters, + .stop_chipset = chap_Stop_Chipset, + .fini_chipset = chap_Fini_Chipset, + .Trigger_Read = NULL +}; diff --git a/drivers/platform/x86/sepdk/sep/control.c b/drivers/platform/x86/sepdk/sep/control.c new file mode 100755 index 0000000000000..474de2c3e578e --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/control.c @@ -0,0 +1,896 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#include "lwpmudrv_defines.h" +#include +#include +#include +#include +#include + +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv.h" +#include "control.h" +#include "utility.h" +#include +#include + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) +#define SMP_CALL_FUNCTION(func, ctx, retry, wait) \ + smp_call_function((func), (ctx), (wait)) +#define SMP_CALL_FUNCTION_SINGLE(cpuid, func, ctx, retry, wait) \ + smp_call_function_single((cpuid), (func), (ctx), (wait)) +#define ON_EACH_CPU(func, ctx, retry, wait) on_each_cpu((func), (ctx), (wait)) +#else +#define SMP_CALL_FUNCTION(func, ctx, retry, wait) \ + smp_call_function((func), (ctx), (retry), (wait)) +#define SMP_CALL_FUNCTION_SINGLE(cpuid, func, ctx, retry, wait) \ + smp_call_function_single((cpuid), (func), (ctx), (retry), (wait)) +#define ON_EACH_CPU(func, ctx, retry, wait) \ + on_each_cpu((func), (ctx), (retry), (wait)) +#endif + +#if defined(DRV_SEP_ACRN_ON) +void (*local_vfree_atomic)(const void *addr) = NULL; +#endif + +/* + */ +GLOBAL_STATE_NODE driver_state; +MSR_DATA msr_data; +static MEM_TRACKER mem_tr_head; // start of the mem tracker list +static MEM_TRACKER mem_tr_tail; // end of mem tracker list +static spinlock_t mem_tr_lock; // spinlock for mem tracker list +static unsigned long flags; + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID CONTROL_Invoke_Cpu (func, ctx, arg) + * + * @brief Set up a DPC call and insert it into the queue + * + * @param IN cpu_idx - the core id to dispatch this function to + * IN func - function to be invoked by the specified core(s) + * IN ctx - pointer to the parameter block for each function + * invocation + * + * @return None + * + * Special Notes: + * + */ +VOID CONTROL_Invoke_Cpu(int cpu_idx, VOID (*func)(PVOID), PVOID ctx) +{ + SEP_DRV_LOG_TRACE_IN("CPU: %d, function: %p, ctx: %p.", cpu_idx, func, + ctx); + SMP_CALL_FUNCTION_SINGLE(cpu_idx, func, ctx, 0, 1); + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn VOID CONTROL_Invoke_Parallel_Service(func, ctx, blocking, exclude) + * + * @param func - function to be invoked by each core in the system + * @param ctx - pointer to the parameter block for each function invocation + * @param blocking - Wait for invoked function to complete + * @param exclude - exclude the current core from executing the code + * + * @returns None + * + * @brief Service routine to handle all kinds of parallel invoke on all CPU calls + * + * Special Notes: + * Invoke the function provided in parallel in either a blocking or + * non-blocking mode. The current core may be excluded if desired. + * NOTE - Do not call this function directly from source code. + * Use the aliases CONTROL_Invoke_Parallel(), CONTROL_Invoke_Parallel_NB(), + * or CONTROL_Invoke_Parallel_XS(). + * + */ +VOID CONTROL_Invoke_Parallel_Service(VOID (*func)(PVOID), PVOID ctx, + int blocking, int exclude) +{ + SEP_DRV_LOG_TRACE_IN("Fn: %p, ctx: %p, block: %d, excl: %d.", + func, ctx, blocking, exclude); + + GLOBAL_STATE_cpu_count(driver_state) = 0; + GLOBAL_STATE_dpc_count(driver_state) = 0; + + if (GLOBAL_STATE_num_cpus(driver_state) == 1) { + if (!exclude) { + func(ctx); + } + SEP_DRV_LOG_TRACE_OUT(""); + return; + } + if (!exclude) { + ON_EACH_CPU(func, ctx, 0, blocking); + SEP_DRV_LOG_TRACE_OUT(""); + return; + } + + preempt_disable(); + SMP_CALL_FUNCTION(func, ctx, 0, blocking); + preempt_enable(); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn VOID control_Memory_Tracker_Delete_Node(mem_tr) + * + * @param IN mem_tr - memory tracker node to delete + * + * @returns None + * + * @brief Delete specified node in the memory tracker + * + * Special Notes: + * Assumes mem_tr_lock is already held while calling this function! + */ +static VOID control_Memory_Tracker_Delete_Node(MEM_TRACKER mem_tr) +{ + MEM_TRACKER prev_tr = NULL; + MEM_TRACKER next_tr = NULL; + U32 size = 0; + + SEP_DRV_LOG_ALLOC_IN(""); + + if (!mem_tr) { + SEP_DRV_LOG_ALLOC_OUT("mem_tr is NULL!"); + return; + } + size = MEM_TRACKER_max_size(mem_tr) * sizeof(MEM_EL_NODE); + // update the linked list + prev_tr = MEM_TRACKER_prev(mem_tr); + next_tr = MEM_TRACKER_next(mem_tr); + if (prev_tr) { + MEM_TRACKER_next(prev_tr) = next_tr; + } + if (next_tr) { + MEM_TRACKER_prev(next_tr) = prev_tr; + } + + // free the allocated mem_el array (if any) + if (MEM_TRACKER_mem(mem_tr)) { + if (MEM_TRACKER_array_vmalloc(mem_tr)) { + vfree(MEM_TRACKER_mem(mem_tr)); + } else { + if (size < MAX_KMALLOC_SIZE) { + kfree(MEM_TRACKER_mem(mem_tr)); + } else { + free_pages( + (unsigned long)MEM_TRACKER_mem(mem_tr), + get_order(size)); + } + } + } + + // free the mem_tracker node + if (MEM_TRACKER_node_vmalloc(mem_tr)) { + vfree(mem_tr); + } else { + kfree(mem_tr); + } + SEP_DRV_LOG_ALLOC_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn VOID control_Memory_Tracker_Create_Node(void) + * + * @param None - size of the memory to allocate + * + * @returns OS_SUCCESS if successful, otherwise error + * + * @brief Initialize the memory tracker + * + * Special Notes: + * Assumes mem_tr_lock is already held while calling this function! + * + * Since this function can be called within either GFP_KERNEL or + * GFP_ATOMIC contexts, the most restrictive allocation is used + * (viz., GFP_ATOMIC). + */ +static U32 control_Memory_Tracker_Create_Node(void) +{ + U32 size = MEM_EL_MAX_ARRAY_SIZE * sizeof(MEM_EL_NODE); + PVOID location = NULL; + MEM_TRACKER mem_tr = NULL; + + SEP_DRV_LOG_ALLOC_IN(""); + + // create a mem tracker node + mem_tr = (MEM_TRACKER)kmalloc(sizeof(MEM_TRACKER_NODE), GFP_ATOMIC); + if (!mem_tr) { + mem_tr = (MEM_TRACKER)vmalloc(sizeof(MEM_TRACKER_NODE)); + if (mem_tr) { + MEM_TRACKER_node_vmalloc(mem_tr) = 1; + } else { + SEP_DRV_LOG_ERROR_ALLOC_OUT( + "Failed to allocate mem tracker node."); + return OS_FAULT; + } + } else { + MEM_TRACKER_node_vmalloc(mem_tr) = 0; + } + SEP_DRV_LOG_TRACE("Node %p, vmalloc %d.", mem_tr, + MEM_TRACKER_node_vmalloc(mem_tr)); + + // create an initial array of mem_el's inside the mem tracker node + MEM_TRACKER_array_vmalloc(mem_tr) = 0; + if (size < MAX_KMALLOC_SIZE) { + location = (PVOID)kmalloc(size, GFP_ATOMIC); + SEP_DRV_LOG_ALLOC("Allocated small memory (0x%p, %d).", + location, (S32)size); + } else { + location = (PVOID)__get_free_pages(GFP_ATOMIC, get_order(size)); + SEP_DRV_LOG_ALLOC("Allocated large memory (0x%p, %d).", + location, (S32)size); + } + if (!location) { + location = (PVOID)vmalloc(size); + if (location) { + MEM_TRACKER_array_vmalloc(mem_tr) = 1; + SEP_DRV_LOG_ALLOC( + "Allocated memory (vmalloc) (0x%p, %d).", + location, (S32)size); + } else { + if (MEM_TRACKER_node_vmalloc(mem_tr)) { + vfree(mem_tr); + } else { + kfree(mem_tr); + } + SEP_DRV_LOG_ERROR_ALLOC_OUT( + "Failed to allocate mem_el array... deleting node."); + return OS_FAULT; + } + } + + // initialize new mem tracker node + MEM_TRACKER_mem(mem_tr) = location; + MEM_TRACKER_prev(mem_tr) = NULL; + MEM_TRACKER_next(mem_tr) = NULL; + + // initialize mem_tracker's mem_el array + MEM_TRACKER_max_size(mem_tr) = MEM_EL_MAX_ARRAY_SIZE; + MEM_TRACKER_elements(mem_tr) = 0; + memset(MEM_TRACKER_mem(mem_tr), 0, size); + + // update the linked list + if (!mem_tr_head) { + mem_tr_head = mem_tr; + } else { + MEM_TRACKER_prev(mem_tr) = mem_tr_tail; + MEM_TRACKER_next(mem_tr_tail) = mem_tr; + } + mem_tr_tail = mem_tr; + + SEP_DRV_LOG_ALLOC_OUT("Allocated node=0x%p, max_elements=%d, size=%d.", + MEM_TRACKER_mem(mem_tr_tail), + MEM_EL_MAX_ARRAY_SIZE, size); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn VOID control_Memory_Tracker_Add(location, size, vmalloc_flag) + * + * @param IN location - memory location + * @param IN size - size of the memory to allocate + * @param IN vmalloc_flag - flag that indicates if the allocation was done with vmalloc + * + * @returns None + * + * @brief Keep track of allocated memory with memory tracker + * + * Special Notes: + * Starting from first mem_tracker node, the algorithm + * finds the first "hole" in the mem_tracker list and + * tracks the memory allocation there. + */ +static U32 control_Memory_Tracker_Add(PVOID location, ssize_t size, + DRV_BOOL vmalloc_flag) +{ + S32 i, n; + U32 status; + DRV_BOOL found; + MEM_TRACKER mem_tr; + + SEP_DRV_LOG_ALLOC_IN("Location: %p, size: %u, flag: %u.", location, + (U32)size, vmalloc_flag); + + spin_lock_irqsave(&mem_tr_lock, flags); + + // check if there is space in ANY of mem_tracker's nodes for the memory item + mem_tr = mem_tr_head; + found = FALSE; + status = OS_SUCCESS; + i = n = 0; + while (mem_tr && (!found)) { + if (MEM_TRACKER_elements(mem_tr) < + MEM_TRACKER_max_size(mem_tr)) { + for (i = 0; i < MEM_TRACKER_max_size(mem_tr); i++) { + if (!MEM_TRACKER_mem_address(mem_tr, i)) { + SEP_DRV_LOG_ALLOC( + "Found index %d of %d available.", + i, + MEM_TRACKER_max_size(mem_tr) - + 1); + n = i; + found = TRUE; + break; + } + } + } + if (!found) { + mem_tr = MEM_TRACKER_next(mem_tr); + } + } + + if (!found) { + // extend into (i.e., create new) mem_tracker node ... + status = control_Memory_Tracker_Create_Node(); + if (status != OS_SUCCESS) { + SEP_DRV_LOG_ERROR("Unable to create mem tracker node."); + goto finish_add; + } + // use mem tracker tail node and first available entry in mem_el array + mem_tr = mem_tr_tail; + n = 0; + } + + // we now have a location in mem tracker to keep track of the memory item + MEM_TRACKER_mem_address(mem_tr, n) = location; + MEM_TRACKER_mem_size(mem_tr, n) = size; + MEM_TRACKER_mem_vmalloc(mem_tr, n) = vmalloc_flag; + MEM_TRACKER_elements(mem_tr)++; + SEP_DRV_LOG_ALLOC("Tracking (0x%p, %d) in node %d of %d.", location, + (S32)size, n, MEM_TRACKER_max_size(mem_tr) - 1); + +finish_add: + spin_unlock_irqrestore(&mem_tr_lock, flags); + + SEP_DRV_LOG_ALLOC_OUT("Result: %u.", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn VOID CONTROL_Memory_Tracker_Init(void) + * + * @param None + * + * @returns None + * + * @brief Initializes Memory Tracker + * + * Special Notes: + * This should only be called when the driver is being loaded. + */ +VOID CONTROL_Memory_Tracker_Init(void) +{ + SEP_DRV_LOG_ALLOC_IN("Initializing mem tracker."); + + mem_tr_head = NULL; + mem_tr_tail = NULL; + + spin_lock_init(&mem_tr_lock); + + SEP_DRV_LOG_ALLOC_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn VOID CONTROL_Memory_Tracker_Free(void) + * + * @param None + * + * @returns None + * + * @brief Frees memory used by Memory Tracker + * + * Special Notes: + * This should only be called when the driver is being unloaded. + */ +VOID CONTROL_Memory_Tracker_Free(void) +{ + S32 i; + MEM_TRACKER temp; + + SEP_DRV_LOG_ALLOC_IN("Destroying mem tracker."); + + spin_lock_irqsave(&mem_tr_lock, flags); + + // check for any memory that was not freed, and free it + while (mem_tr_head) { + if (MEM_TRACKER_elements(mem_tr_head)) { + for (i = 0; i < MEM_TRACKER_max_size(mem_tr_head); + i++) { + if (MEM_TRACKER_mem_address(mem_tr_head, i)) { + SEP_DRV_LOG_WARNING( + "Index %d of %d, not freed (0x%p, %d) ... freeing now.", + i, + MEM_TRACKER_max_size( + mem_tr_head) - + 1, + MEM_TRACKER_mem_address( + mem_tr_head, i), + MEM_TRACKER_mem_size( + mem_tr_head, i)); + + if (MEM_TRACKER_mem_vmalloc(mem_tr_head, + i)) { + vfree(MEM_TRACKER_mem_address( + mem_tr_head, i)); + } else { + free_pages( + (unsigned long) + MEM_TRACKER_mem_address( + mem_tr_head, + i), + get_order(MEM_TRACKER_mem_size( + mem_tr_head, + i))); + } + MEM_TRACKER_mem_address(mem_tr_head, + i) = NULL; + MEM_TRACKER_mem_size(mem_tr_head, i) = + 0; + MEM_TRACKER_mem_vmalloc(mem_tr_head, + i) = 0; + } + } + } + temp = mem_tr_head; + mem_tr_head = MEM_TRACKER_next(mem_tr_head); + control_Memory_Tracker_Delete_Node(temp); + } + + mem_tr_tail = NULL; + + spin_unlock_irqrestore(&mem_tr_lock, flags); + + SEP_DRV_LOG_ALLOC_OUT("Mem tracker destruction complete."); +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn VOID CONTROL_Memory_Tracker_Compaction(void) + * + * @param None + * + * @returns None + * + * @brief Compacts the memory allocator if holes are detected + * + * Special Notes: + * The algorithm compacts mem_tracker nodes such that + * node entries are full starting from mem_tr_head + * up until the first empty node is detected, after + * which nodes up to mem_tr_tail will be empty. + * At end of collection (or at other safe sync point), + * we reclaim/compact space used by mem tracker. + */ +VOID CONTROL_Memory_Tracker_Compaction(void) +{ + S32 i, j, n, m, c, d; + DRV_BOOL found, overlap; + MEM_TRACKER mem_tr1, mem_tr2, empty_tr; + + SEP_DRV_LOG_FLOW_IN(""); + + spin_lock_irqsave(&mem_tr_lock, flags); + + mem_tr1 = mem_tr_head; + + i = j = n = c = d = 0; + + /* + * step1: free up the track node which does not contain any elements. + */ + while (mem_tr1) { + SEP_DRV_LOG_ALLOC("Node %p, index %d, elememts %d.", mem_tr1, n, + MEM_TRACKER_elements(mem_tr1)); + if (MEM_TRACKER_elements(mem_tr1)) { + mem_tr1 = MEM_TRACKER_next(mem_tr1); + } else { + empty_tr = mem_tr1; + mem_tr1 = MEM_TRACKER_next(mem_tr1); + if (empty_tr == mem_tr_head) { + mem_tr_head = mem_tr1; + } + if (empty_tr == mem_tr_tail) { + mem_tr_tail = MEM_TRACKER_prev(empty_tr); + } + control_Memory_Tracker_Delete_Node(empty_tr); + d++; + SEP_DRV_LOG_ALLOC("Delete node %p.", mem_tr1); + } + } + + mem_tr1 = mem_tr_head; + mem_tr2 = mem_tr_tail; + + /* + * there is no need to compact if memory tracker was never used, or only have one track node + */ + overlap = (mem_tr1 == mem_tr2); + if (!mem_tr1 || !mem_tr2 || overlap) { + goto finish_compact; + } + + /* + * step2: there are more than 2 track node. + * starting from head node, find an empty element slot in a node + * if there is no empty slot or the node is tail, the compact is done. + * find an element in tail node, and move it to the empty slot fount below. + * if tail node is empty after moving, free it up. + * repeat until only one node. + */ + m = MEM_TRACKER_max_size(mem_tr2) - 1; + while (!overlap) { + // find an empty node + found = FALSE; + while (!found && !overlap && mem_tr1) { + SEP_DRV_LOG_TRACE( + "Looking at mem_tr1 0x%p, index=%d, elements %d.", + mem_tr1, n, MEM_TRACKER_elements(mem_tr1)); + if (MEM_TRACKER_elements(mem_tr1) < + MEM_TRACKER_max_size(mem_tr1)) { + for (i = n; i < MEM_TRACKER_max_size(mem_tr1); + i++) { + if (!MEM_TRACKER_mem_address(mem_tr1, + i)) { + SEP_DRV_LOG_TRACE( + "Found index %d of %d empty.", + i, + MEM_TRACKER_max_size( + mem_tr1) - + 1); + found = TRUE; + break; // tentative + } + } + } + + // if no overlap and an empty node was not found, then advance to next node + if (!found) { + mem_tr1 = MEM_TRACKER_next(mem_tr1); + // check for overlap + overlap = (mem_tr1 == mem_tr2); + n = 0; + } + } + // all nodes going in forward direction are full, so exit + if (!found || overlap || !mem_tr1) { + goto finish_compact; + } + + // find a non-empty node + found = FALSE; + while (!found && !overlap && mem_tr2) { + SEP_DRV_LOG_ALLOC( + "Looking at mem_tr2 0x%p, index=%d, elements %d.", + mem_tr2, m, MEM_TRACKER_elements(mem_tr2)); + if (MEM_TRACKER_elements(mem_tr2)) { + for (j = m; j >= 0; j--) { + if (MEM_TRACKER_mem_address(mem_tr2, + j)) { + SEP_DRV_LOG_ALLOC( + "Found index %d of %d non-empty.", + j, + MEM_TRACKER_max_size( + mem_tr2) - + 1); + found = TRUE; + // Any reason why we are not 'breaking' here? + } + } + } + + // if no overlap and no non-empty node was found, then retreat to prev node + if (!found) { + empty_tr = mem_tr2; // keep track of empty node + mem_tr2 = MEM_TRACKER_prev(mem_tr2); + m = MEM_TRACKER_max_size(mem_tr2) - 1; + mem_tr_tail = mem_tr2; // keep track of new tail + // reclaim empty mem_tracker node + control_Memory_Tracker_Delete_Node(empty_tr); + // keep track of number of node deletions performed + d++; + // check for overlap + overlap = (mem_tr1 == mem_tr2); + } + } + // all nodes going in reverse direction are empty, so exit + if (!found || overlap || !mem_tr2) { + goto finish_compact; + } + + // swap empty node with non-empty node so that "holes" get bubbled towards the end of list + MEM_TRACKER_mem_address(mem_tr1, i) = + MEM_TRACKER_mem_address(mem_tr2, j); + MEM_TRACKER_mem_size(mem_tr1, i) = + MEM_TRACKER_mem_size(mem_tr2, j); + MEM_TRACKER_mem_vmalloc(mem_tr1, i) = + MEM_TRACKER_mem_vmalloc(mem_tr2, j); + MEM_TRACKER_elements(mem_tr1)++; + + MEM_TRACKER_mem_address(mem_tr2, j) = NULL; + MEM_TRACKER_mem_size(mem_tr2, j) = 0; + MEM_TRACKER_mem_vmalloc(mem_tr2, j) = FALSE; + MEM_TRACKER_elements(mem_tr2)--; + + SEP_DRV_LOG_ALLOC( + "Node <%p, elemts %d, index %d> moved to <%p, elemts %d, index %d>.", + mem_tr2, MEM_TRACKER_elements(mem_tr2), j, mem_tr1, + MEM_TRACKER_elements(mem_tr1), i); + + // keep track of number of memory compactions performed + c++; + + // start new search starting from next element in mem_tr1 + n = i + 1; + + // start new search starting from prev element in mem_tr2 + m = j - 1; + } + +finish_compact: + spin_unlock_irqrestore(&mem_tr_lock, flags); + + SEP_DRV_LOG_FLOW_OUT( + "Number of elements compacted = %d, nodes deleted = %d.", c, d); +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn PVOID CONTROL_Allocate_Memory(size) + * + * @param IN size - size of the memory to allocate + * + * @returns char* - pointer to the allocated memory block + * + * @brief Allocate and zero memory + * + * Special Notes: + * Allocate memory in the GFP_KERNEL pool. + * + * Use this if memory is to be allocated within a context where + * the allocator can block the allocation (e.g., by putting + * the caller to sleep) while it tries to free up memory to + * satisfy the request. Otherwise, if the allocation must + * occur atomically (e.g., caller cannot sleep), then use + * CONTROL_Allocate_KMemory instead. + */ +PVOID CONTROL_Allocate_Memory(size_t size) +{ + U32 status; + PVOID location = NULL; + + SEP_DRV_LOG_ALLOC_IN("Attempting to allocate %d bytes.", (S32)size); + + if (size <= 0) { + SEP_DRV_LOG_WARNING_ALLOC_OUT( + "Cannot allocate a number of bytes <= 0."); + return NULL; + } + + // determine whether to use mem_tracker or not + if (size < MAX_KMALLOC_SIZE) { + location = (PVOID)kmalloc(size, GFP_KERNEL); + SEP_DRV_LOG_ALLOC("Allocated small memory (0x%p, %d)", location, + (S32)size); + } + if (!location) { + location = (PVOID)vmalloc(size); + if (location) { + status = control_Memory_Tracker_Add(location, size, + TRUE); + SEP_DRV_LOG_ALLOC("Allocated large memory (0x%p, %d)", + location, (S32)size); + if (status != OS_SUCCESS) { + // failed to track in mem_tracker, so free up memory and return NULL + SEP_DRV_LOG_ERROR( + "Allocated %db; failed to track w/ MEM_TRACKER. Freeing...", + (S32)size); + vfree(location); + location = NULL; + } + } + } + + if (!location) { + SEP_DRV_LOG_ERROR("Failed to allocated %db.", (S32)size); + } else { + memset(location, 0, size); + } + + SEP_DRV_LOG_ALLOC_OUT("Returning %p.", location); + return location; +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn PVOID CONTROL_Allocate_KMemory(size) + * + * @param IN size - size of the memory to allocate + * + * @returns char* - pointer to the allocated memory block + * + * @brief Allocate and zero memory + * + * Special Notes: + * Allocate memory in the GFP_ATOMIC pool. + * + * Use this if memory is to be allocated within a context where + * the allocator cannot block the allocation (e.g., by putting + * the caller to sleep) as it tries to free up memory to + * satisfy the request. Examples include interrupt handlers, + * process context code holding locks, etc. + */ +PVOID CONTROL_Allocate_KMemory(size_t size) +{ + U32 status; + PVOID location; + + SEP_DRV_LOG_ALLOC_IN("Attempting to allocate %d bytes.", (S32)size); + + if (size <= 0) { + SEP_DRV_LOG_ALLOC_OUT( + "Cannot allocate a number of bytes <= 0."); + return NULL; + } + + if (size < MAX_KMALLOC_SIZE) { + location = (PVOID)kmalloc(size, GFP_ATOMIC); + SEP_DRV_LOG_ALLOC("Allocated small memory (0x%p, %d)", location, + (S32)size); + } else { + location = (PVOID)__get_free_pages(GFP_ATOMIC, get_order(size)); + if (location) { + status = control_Memory_Tracker_Add(location, size, + FALSE); + SEP_DRV_LOG_ALLOC("Allocated large memory (0x%p, %d)", + location, (S32)size); + if (status != OS_SUCCESS) { + // failed to track in mem_tracker, so free up memory and return NULL + SEP_DRV_LOG_ERROR( + "Allocated %db; failed to track w/ MEM_TRACKER. Freeing...", + (S32)size); + free_pages((unsigned long)location, + get_order(size)); + location = NULL; + } + } + } + + if (!location) { + SEP_DRV_LOG_ERROR("Failed to allocated %db.", (S32)size); + } else { + memset(location, 0, size); + } + + SEP_DRV_LOG_ALLOC_OUT("Returning %p.", location); + return location; +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn PVOID CONTROL_Free_Memory(location) + * + * @param IN location - size of the memory to allocate + * + * @returns pointer to the allocated memory block + * + * @brief Frees the memory block + * + * Special Notes: + * Does not try to free memory if fed with a NULL pointer + * Expected usage: + * ptr = CONTROL_Free_Memory(ptr); + * Does not do compaction ... can have "holes" in + * mem_tracker list after this operation. + */ +PVOID CONTROL_Free_Memory(PVOID location) +{ + S32 i; + DRV_BOOL found; + MEM_TRACKER mem_tr; + + SEP_DRV_LOG_ALLOC_IN("Attempting to free %p.", location); + + if (!location) { + SEP_DRV_LOG_ALLOC_OUT("Cannot free NULL."); + return NULL; + } + +#if defined(DRV_SEP_ACRN_ON) + if (!local_vfree_atomic) { + local_vfree_atomic = (PVOID)UTILITY_Find_Symbol("vfree_atomic"); + if (!local_vfree_atomic) { + SEP_PRINT_ERROR("Could not find 'vfree_atomic'!\n"); + } + } +#endif + spin_lock_irqsave(&mem_tr_lock, flags); + + // scan through mem_tracker nodes for matching entry (if any) + mem_tr = mem_tr_head; + found = FALSE; + while (mem_tr) { + for (i = 0; i < MEM_TRACKER_max_size(mem_tr); i++) { + if (location == MEM_TRACKER_mem_address(mem_tr, i)) { + SEP_DRV_LOG_ALLOC( + "Freeing large memory location 0x%p", + location); + found = TRUE; + if (MEM_TRACKER_mem_vmalloc(mem_tr, i)) { +#if defined(DRV_SEP_ACRN_ON) + if (unlikely(in_atomic() && + local_vfree_atomic)) { + local_vfree_atomic(location); + } else { +#endif + vfree(location); + } + +#if defined(DRV_SEP_ACRN_ON) + } +#endif + else { + free_pages( + (unsigned long)location, + get_order(MEM_TRACKER_mem_size( + mem_tr, i))); + } + MEM_TRACKER_mem_address(mem_tr, i) = NULL; + MEM_TRACKER_mem_size(mem_tr, i) = 0; + MEM_TRACKER_mem_vmalloc(mem_tr, i) = 0; + MEM_TRACKER_elements(mem_tr)--; + goto finish_free; + } + } + mem_tr = MEM_TRACKER_next(mem_tr); + } + +finish_free: + spin_unlock_irqrestore(&mem_tr_lock, flags); + + // must have been of smaller than the size limit for mem tracker nodes + if (!found) { + SEP_DRV_LOG_ALLOC("Freeing small memory location 0x%p", + location); + kfree(location); + } + + SEP_DRV_LOG_ALLOC_OUT("Success. Returning NULL."); + return NULL; +} diff --git a/drivers/platform/x86/sepdk/sep/core2.c b/drivers/platform/x86/sepdk/sep/core2.c new file mode 100755 index 0000000000000..a56ad28cd097c --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/core2.c @@ -0,0 +1,2137 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#include "lwpmudrv_defines.h" +#include +#include +#include + +#include "lwpmudrv_types.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" + +#include "lwpmudrv.h" +#include "utility.h" +#include "control.h" +#include "output.h" +#include "core2.h" +#include "ecb_iterators.h" +#include "pebs.h" +#include "apic.h" + +#if !defined(DRV_ANDROID) +#include "jkt_unc_ha.h" +#include "jkt_unc_qpill.h" +#include "pci.h" +#endif + +extern EVENT_CONFIG global_ec; +extern U64 *read_counter_info; +extern LBR lbr; +extern DRV_CONFIG drv_cfg; +extern DEV_CONFIG pcfg; +extern PWR pwr; +extern U64 *interrupt_counts; +extern DRV_SETUP_INFO_NODE req_drv_setup_info; +extern EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; + +#if !defined(DRV_ANDROID) +static U32 direct2core_data_saved; +static U32 bl_bypass_data_saved; +#endif + +static U32 restore_reg_addr[3]; + +typedef struct SADDR_S { + S64 addr : CORE2_LBR_DATA_BITS; +} SADDR; + +#define SADDR_addr(x) ((x).addr) +#define MSR_ENERGY_MULTIPLIER 0x606 // Energy Multiplier MSR + +#if !defined(DRV_ANDROID) +/* ------------------------------------------------------------------------- */ +/*! + * @fn void core2_Disable_Direct2core(ECB) + * + * @param pecb ECB of group being scheduled + * + * @return None No return needed + * + * @brief program the QPILL and HA register for disabling of direct2core + * + * Special Notes + */ +static VOID core2_Disable_Direct2core(ECB pecb) +{ + U32 busno = 0; + U32 dev_idx = 0; + U32 base_idx = 0; + U32 device_id = 0; + U32 value = 0; + U32 vendor_id = 0; + U32 core2_qpill_dev_no[2] = { 8, 9 }; + U32 this_cpu; + + SEP_DRV_LOG_TRACE_IN("PECB: %p.", pecb); + + this_cpu = CONTROL_THIS_CPU(); + + // Discover the bus # for HA + for (busno = 0; busno < MAX_BUSNO; busno++) { + value = PCI_Read_U32(busno, JKTUNC_HA_DEVICE_NO, + JKTUNC_HA_D2C_FUNC_NO, 0); + vendor_id = value & VENDOR_ID_MASK; + device_id = (value & DEVICE_ID_MASK) >> DEVICE_ID_BITSHIFT; + + if (vendor_id != DRV_IS_PCI_VENDOR_ID_INTEL) { + continue; + } + if (device_id != JKTUNC_HA_D2C_DID) { + continue; + } + value = 0; + // now program at the offset + value = PCI_Read_U32(busno, JKTUNC_HA_DEVICE_NO, + JKTUNC_HA_D2C_FUNC_NO, + JKTUNC_HA_D2C_OFFSET); + restore_ha_direct2core[this_cpu][busno] = 0; + restore_ha_direct2core[this_cpu][busno] = value; + } + for (busno = 0; busno < MAX_BUSNO; busno++) { + value = PCI_Read_U32(busno, JKTUNC_HA_DEVICE_NO, + JKTUNC_HA_D2C_FUNC_NO, 0); + vendor_id = value & VENDOR_ID_MASK; + device_id = (value & DEVICE_ID_MASK) >> DEVICE_ID_BITSHIFT; + + if (vendor_id != DRV_IS_PCI_VENDOR_ID_INTEL) { + continue; + } + if (device_id != JKTUNC_HA_D2C_DID) { + continue; + } + + // now program at the offset + value = PCI_Read_U32(busno, JKTUNC_HA_DEVICE_NO, + JKTUNC_HA_D2C_FUNC_NO, + JKTUNC_HA_D2C_OFFSET); + value |= value | JKTUNC_HA_D2C_BITMASK; + PCI_Write_U32(busno, JKTUNC_HA_DEVICE_NO, JKTUNC_HA_D2C_FUNC_NO, + JKTUNC_HA_D2C_OFFSET, value); + } + + // Discover the bus # for QPI + for (dev_idx = 0; dev_idx < 2; dev_idx++) { + base_idx = dev_idx * MAX_BUSNO; + for (busno = 0; busno < MAX_BUSNO; busno++) { + value = PCI_Read_U32(busno, core2_qpill_dev_no[dev_idx], + JKTUNC_QPILL_D2C_FUNC_NO, 0); + vendor_id = value & VENDOR_ID_MASK; + device_id = + (value & DEVICE_ID_MASK) >> DEVICE_ID_BITSHIFT; + + if (vendor_id != DRV_IS_PCI_VENDOR_ID_INTEL) { + continue; + } + if ((device_id != JKTUNC_QPILL0_D2C_DID) && + (device_id != JKTUNC_QPILL1_D2C_DID)) { + continue; + } + // now program at the corresponding offset + value = PCI_Read_U32(busno, core2_qpill_dev_no[dev_idx], + JKTUNC_QPILL_D2C_FUNC_NO, + JKTUNC_QPILL_D2C_OFFSET); + restore_qpi_direct2core[this_cpu][base_idx + busno] = 0; + restore_qpi_direct2core[this_cpu][base_idx + busno] = + value; + } + } + for (dev_idx = 0; dev_idx < 2; dev_idx++) { + for (busno = 0; busno < MAX_BUSNO; busno++) { + value = PCI_Read_U32(busno, core2_qpill_dev_no[dev_idx], + JKTUNC_QPILL_D2C_FUNC_NO, 0); + vendor_id = value & VENDOR_ID_MASK; + device_id = + (value & DEVICE_ID_MASK) >> DEVICE_ID_BITSHIFT; + + if (vendor_id != DRV_IS_PCI_VENDOR_ID_INTEL) { + continue; + } + if ((device_id != JKTUNC_QPILL0_D2C_DID) && + (device_id != JKTUNC_QPILL1_D2C_DID)) { + continue; + } + // now program at the corresponding offset + value = PCI_Read_U32(busno, core2_qpill_dev_no[dev_idx], + JKTUNC_QPILL_D2C_FUNC_NO, + JKTUNC_QPILL_D2C_OFFSET); + value |= value | JKTUNC_QPILL_D2C_BITMASK; + PCI_Write_U32(busno, core2_qpill_dev_no[dev_idx], + JKTUNC_QPILL_D2C_FUNC_NO, + JKTUNC_QPILL_D2C_OFFSET, value); + } + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void core2_Disable_BL_Bypass(ECB) + * + * @param pecb ECB of group being scheduled + * + * @return None No return needed + * + * @brief Disable the BL Bypass + * + * Special Notes + */ +static VOID core2_Disable_BL_Bypass(ECB pecb) +{ + U64 value; + U32 this_cpu; + + SEP_DRV_LOG_TRACE_IN("PECB: %p.", pecb); + + this_cpu = CONTROL_THIS_CPU(); + + value = SYS_Read_MSR(CORE2UNC_DISABLE_BL_BYPASS_MSR); + restore_bl_bypass[this_cpu] = 0; + restore_bl_bypass[this_cpu] = value; + value |= CORE2UNC_BLBYPASS_BITMASK; + SYS_Write_MSR(CORE2UNC_DISABLE_BL_BYPASS_MSR, value); + + SEP_DRV_LOG_TRACE_OUT(""); +} +#endif + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void core2_Write_PMU(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Initial set up of the PMU registers + * + * Special Notes + * Initial write of PMU registers. + * Walk through the enties and write the value of the register accordingly. + * Assumption: For CCCR registers the enable bit is set to value 0. + * When current_group = 0, then this is the first time this routine is called, + * initialize the locks and set up EM tables. + */ +static VOID core2_Write_PMU(VOID *param) +{ + U32 this_cpu; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; + EVENT_CONFIG ec; + DISPATCH dispatch; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + ec = LWPMU_DEVICE_ec(&devices[dev_idx]); + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT( + "No programming for this device in this group."); + return; + } + + if (CPU_STATE_current_group(pcpu) == 0) { + if (EVENT_CONFIG_mode(ec) != EM_DISABLED) { + U32 index; + U32 st_index; + U32 j; + + /* Save all the initialization values away into an array for Event Multiplexing. */ + for (j = 0; j < EVENT_CONFIG_num_groups(ec); j++) { + CPU_STATE_current_group(pcpu) = j; + st_index = CPU_STATE_current_group(pcpu) * + EVENT_CONFIG_max_gp_events(ec); + FOR_EACH_REG_CORE_OPERATION( + pecb, i, PMU_OPERATION_DATA_GP) + { + index = st_index + i - + ECB_operations_register_start( + pecb, + PMU_OPERATION_DATA_GP); + CPU_STATE_em_tables(pcpu)[index] = + ECB_entries_reg_value(pecb, i); + } + END_FOR_EACH_REG_CORE_OPERATION; + } + /* Reset the current group to the very first one. */ + CPU_STATE_current_group(pcpu) = + this_cpu % EVENT_CONFIG_num_groups(ec); + } + } + + if (dispatch->hw_errata) { + dispatch->hw_errata(); + } + + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_ALL_REG) + { + /* + * Writing the GLOBAL Control register enables the PMU to start counting. + * So write 0 into the register to prevent any counting from starting. + */ + if (i == ECB_SECTION_REG_INDEX(pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + continue; + } + /* + * PEBS is enabled for this collection session + */ + if (DRV_SETUP_INFO_pebs_accessible(&req_drv_setup_info) && + i == ECB_SECTION_REG_INDEX(pecb, PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS) && + ECB_entries_reg_value(pecb, i)) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + continue; + } + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i)); +#if defined(MYDEBUG) + { + U64 val = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); + SEP_DRV_LOG_TRACE( + "Register 0x%x: wrvalue 0x%llx, rdvalue 0x%llx.", + ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i), val); + } +#endif + } + END_FOR_EACH_REG_CORE_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void core2_Disable_PMU(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Zero out the global control register. This automatically disables the PMU counters. + * + */ +static VOID core2_Disable_PMU(PVOID param) +{ + U32 this_cpu; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT( + "No programming for this device in this group."); + return; + } + + if (GET_DRIVER_STATE() != DRV_STATE_RUNNING) { + SEP_DRV_LOG_TRACE("Driver state is not RUNNING."); + SYS_Write_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + 0LL); + if (DEV_CONFIG_pebs_mode(pcfg)) { + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + 0LL); + } + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void core2_Enable_PMU(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Set the enable bit for all the Control registers + * + */ +static VOID core2_Enable_PMU(PVOID param) +{ + /* + * Get the value from the event block + * 0 == location of the global control reg for this block. + * Generalize this location awareness when possible + */ + U32 this_cpu; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT( + "No programming for this device in this group."); + return; + } + + if (KVM_guest_mode) { + SYS_Write_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + 0LL); + } + if (GET_DRIVER_STATE() == DRV_STATE_RUNNING) { + APIC_Enable_Pmi(); + if (CPU_STATE_reset_mask(pcpu)) { + SEP_DRV_LOG_TRACE("Overflow reset mask %llx.", + CPU_STATE_reset_mask(pcpu)); + // Reinitialize the global overflow control register + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + CPU_STATE_reset_mask(pcpu) = 0LL; + } + if (CPU_STATE_group_swap(pcpu)) { + CPU_STATE_group_swap(pcpu) = 0; + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + if (DEV_CONFIG_pebs_mode(pcfg)) { + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, + PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, + PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + } + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); +#if defined(MYDEBUG) + { + U64 val; + val = SYS_Read_MSR(ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + SEP_DRV_LOG_TRACE( + "Write reg 0x%x--- read 0x%llx.", + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, + GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + val); + } +#endif + } + } + SEP_DRV_LOG_TRACE("Reenabled PMU with value 0x%llx.", + ECB_entries_reg_value(pecb, 0)); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void corei7_Enable_PMU_2(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Set the enable bit for all the Control registers + * + */ +static VOID corei7_Enable_PMU_2(PVOID param) +{ + /* + * Get the value from the event block + * 0 == location of the global control reg for this block. + */ + U32 this_cpu; + CPU_STATE pcpu; + ECB pecb; + U64 pebs_val = 0; + U32 dev_idx; + U32 cur_grp; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT( + "No programming for this device in this group."); + return; + } + + if (KVM_guest_mode) { + SYS_Write_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + 0LL); + } + if (GET_DRIVER_STATE() == DRV_STATE_RUNNING) { + APIC_Enable_Pmi(); + if (CPU_STATE_group_swap(pcpu)) { + CPU_STATE_group_swap(pcpu) = 0; + if (DEV_CONFIG_pebs_mode(pcfg)) { + pebs_val = SYS_Read_MSR(ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + if (ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)) != + 0) { + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, + PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, + PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + } else if (pebs_val != 0) { + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, + PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + 0LL); + } + } + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); +#if defined(MYDEBUG) + SEP_DRV_LOG_TRACE("Reenabled PMU with value 0x%llx.", + ECB_entries_reg_value(pecb, 0)); +#endif + } + if (CPU_STATE_reset_mask(pcpu)) { +#if defined(MYDEBUG) + SEP_DRV_LOG_TRACE("Overflow reset mask %llx.", + CPU_STATE_reset_mask(pcpu)); +#endif + // Reinitialize the global overflow control register + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + CPU_STATE_reset_mask(pcpu) = 0LL; + } + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn core2_Read_PMU_Data(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Read all the data MSR's into a buffer. Called by the interrupt handler. + * + */ +static void core2_Read_PMU_Data(PVOID param) +{ + U32 j; + U64 *buffer = read_counter_info; + U32 this_cpu; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; + + SEP_DRV_LOG_TRACE_IN(""); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT( + "No programming for this device in this group."); + return; + } + + SEP_DRV_LOG_TRACE("PMU control_data 0x%p, buffer 0x%p.", + LWPMU_DEVICE_PMU_register_data(&devices[dev_idx]), + buffer); + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) + { + j = EMON_BUFFER_CORE_EVENT_OFFSET( + EMON_BUFFER_DRIVER_HELPER_core_index_to_thread_offset_map( + emon_buffer_driver_helper)[this_cpu], + ECB_entries_core_event_id(pecb, i)); + + buffer[j] = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); + SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u, event_id=%u", j, + buffer[j], this_cpu, + ECB_entries_core_event_id(pecb, i)); + } + END_FOR_EACH_REG_CORE_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn core2_Check_Overflow_Errata(pecb, index, overflow_status) + * + * @param pecb: The current event control block + * @param index: index of the register to process + * @param overflow_status: current overflow mask + * + * @return Updated Event mask of the overflowed registers. + * + * @brief Go through the overflow errata for the architecture and set the mask + * + * Special Notes + * fixed_counter1 on some architectures gets interfered by + * other event counts. Overcome this problem by reading the + * counter value and resetting the overflow mask. + * + */ +static U64 core2_Check_Overflow_Errata(ECB pecb, U32 index, U64 overflow_status) +{ + SEP_DRV_LOG_TRACE_IN(""); + + if (DRV_CONFIG_num_events(drv_cfg) == 1) { + SEP_DRV_LOG_TRACE_OUT("Res: %llu. (num_events = 1)", + overflow_status); + return overflow_status; + } + if (ECB_entries_reg_id(pecb, index) == IA32_FIXED_CTR1 && + (overflow_status & 0x200000000LL) == 0LL) { + U64 val = SYS_Read_MSR(IA32_FIXED_CTR1); + val &= ECB_entries_max_bits(pecb, index); + if (val < ECB_entries_reg_value(pecb, index)) { + overflow_status |= 0x200000000LL; + SEP_DRV_LOG_TRACE( + "Reset -- clk count %llx, status %llx.", val, + overflow_status); + } + } + + SEP_DRV_LOG_TRACE_OUT("Res: %llu.", overflow_status); + return overflow_status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void core2_Check_Overflow(masks) + * + * @param masks the mask structure to populate + * + * @return None No return needed + * + * @brief Called by the data processing method to figure out which registers have overflowed. + * + */ +static void core2_Check_Overflow(DRV_MASKS masks) +{ + U32 index; + U64 overflow_status = 0; + U32 this_cpu; + BUFFER_DESC bd; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; + DEV_CONFIG pcfg; + DISPATCH dispatch; + U64 overflow_status_clr = 0; + DRV_EVENT_MASK_NODE event_flag; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + bd = &cpu_buf[this_cpu]; + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT( + "No programming for this device in this group."); + return; + } + + // initialize masks + DRV_MASKS_masks_num(masks) = 0; + + overflow_status = SYS_Read_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_STATUS_REG_INDEX, + PMU_OPERATION_GLOBAL_STATUS))); + + if (DEV_CONFIG_pebs_mode(pcfg)) { + overflow_status = PEBS_Overflowed(this_cpu, overflow_status, 0); + } + overflow_status_clr = overflow_status; + + if (dispatch->check_overflow_gp_errata) { + overflow_status = dispatch->check_overflow_gp_errata( + pecb, &overflow_status_clr); + } + SEP_DRV_LOG_TRACE("Overflow: cpu: %d, status 0x%llx.", this_cpu, + overflow_status); + index = 0; + BUFFER_DESC_sample_count(bd) = 0; + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) + { + if (ECB_entries_fixed_reg_get(pecb, i)) { + index = i - + ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_FIXED) + + 0x20; + if (dispatch->check_overflow_errata) { + overflow_status = + dispatch->check_overflow_errata( + pecb, i, overflow_status); + } + } else if (ECB_entries_is_gp_reg_get(pecb, i)) { + index = i - ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_GP); + } else { + continue; + } + if (overflow_status & ((U64)1 << index)) { + SEP_DRV_LOG_TRACE("Overflow: cpu: %d, index %d.", + this_cpu, index); + SEP_DRV_LOG_TRACE( + "register 0x%x --- val 0%llx.", + ECB_entries_reg_id(pecb, i), + SYS_Read_MSR(ECB_entries_reg_id(pecb, i))); + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i)); + + if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { + /* Increment the interrupt count. */ + if (interrupt_counts) { + interrupt_counts + [this_cpu * + DRV_CONFIG_num_events( + drv_cfg) + + ECB_entries_event_id_index( + pecb, i)] += 1; + } + } + + DRV_EVENT_MASK_bitFields1(&event_flag) = (U8)0; + if (ECB_entries_fixed_reg_get(pecb, i)) { + CPU_STATE_p_state_counting(pcpu) = 1; + } + if (ECB_entries_precise_get(pecb, i)) { + DRV_EVENT_MASK_precise(&event_flag) = 1; + } + if (ECB_entries_lbr_value_get(pecb, i)) { + DRV_EVENT_MASK_lbr_capture(&event_flag) = 1; + } + if (ECB_entries_uncore_get(pecb, i)) { + DRV_EVENT_MASK_uncore_capture(&event_flag) = 1; + } + if (ECB_entries_branch_evt_get(pecb, i)) { + DRV_EVENT_MASK_branch(&event_flag) = 1; + } + + if (DRV_MASKS_masks_num(masks) < MAX_OVERFLOW_EVENTS) { + DRV_EVENT_MASK_bitFields1( + DRV_MASKS_eventmasks(masks) + + DRV_MASKS_masks_num(masks)) = + DRV_EVENT_MASK_bitFields1(&event_flag); + DRV_EVENT_MASK_event_idx( + DRV_MASKS_eventmasks(masks) + + DRV_MASKS_masks_num(masks)) = + ECB_entries_event_id_index(pecb, i); + DRV_MASKS_masks_num(masks)++; + } else { + SEP_DRV_LOG_ERROR( + "The array for event masks is full."); + } + + SEP_DRV_LOG_TRACE("overflow -- 0x%llx, index 0x%llx.", + overflow_status, (U64)1 << index); + SEP_DRV_LOG_TRACE("slot# %d, reg_id 0x%x, index %d.", i, + ECB_entries_reg_id(pecb, i), index); + if (ECB_entries_event_id_index(pecb, i) == + CPU_STATE_trigger_event_num(pcpu)) { + CPU_STATE_trigger_count(pcpu)--; + } + } + } + END_FOR_EACH_REG_CORE_OPERATION; + + CPU_STATE_reset_mask(pcpu) = overflow_status_clr; + // Reinitialize the global overflow control register + SYS_Write_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX( + pecb, GLOBAL_OVF_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + overflow_status_clr); + + SEP_DRV_LOG_TRACE("Check Overflow completed %d.", this_cpu); + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn core2_Swap_Group(restart) + * + * @param restart dummy parameter which is not used + * + * @return None No return needed + * + * @brief Perform the mechanics of swapping the event groups for event mux operations + * + * Special Notes + * Swap function for event multiplexing. + * Freeze the counting. + * Swap the groups. + * Enable the counting. + * Reset the event trigger count + * + */ +static VOID core2_Swap_Group(DRV_BOOL restart) +{ + U32 index; + U32 next_group; + U32 st_index; + U32 this_cpu; + CPU_STATE pcpu; + U32 dev_idx; + DISPATCH dispatch; + EVENT_CONFIG ec; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + ec = LWPMU_DEVICE_ec(&devices[dev_idx]); + + st_index = + CPU_STATE_current_group(pcpu) * EVENT_CONFIG_max_gp_events(ec); + next_group = (CPU_STATE_current_group(pcpu) + 1); + if (next_group >= EVENT_CONFIG_num_groups(ec)) { + next_group = 0; + } + + SEP_DRV_LOG_TRACE("current group : 0x%x.", + CPU_STATE_current_group(pcpu)); + SEP_DRV_LOG_TRACE("next group : 0x%x.", next_group); + + // Save the counters for the current group + if (!DRV_CONFIG_event_based_counts(drv_cfg)) { + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_GP) + { + index = st_index + i - + ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_GP); + CPU_STATE_em_tables(pcpu)[index] = + SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); + SEP_DRV_LOG_TRACE("Saved value for reg 0x%x : 0x%llx.", + ECB_entries_reg_id(pecb, i), + CPU_STATE_em_tables(pcpu)[index]); + } + END_FOR_EACH_REG_CORE_OPERATION; + } + + CPU_STATE_current_group(pcpu) = next_group; + + if (dispatch->hw_errata) { + dispatch->hw_errata(); + } + + // First write the GP control registers (eventsel) + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_CTRL_GP) + { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i)); + } + END_FOR_EACH_REG_CORE_OPERATION; + + if (DRV_CONFIG_event_based_counts(drv_cfg)) { + // In EBC mode, reset the counts for all events except for trigger event + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) + { + if (ECB_entries_event_id_index(pecb, i) != + CPU_STATE_trigger_event_num(pcpu)) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + } + } + END_FOR_EACH_REG_CORE_OPERATION; + } else { + // Then write the gp count registers + st_index = CPU_STATE_current_group(pcpu) * + EVENT_CONFIG_max_gp_events(ec); + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_GP) + { + index = st_index + i - + ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_GP); + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + CPU_STATE_em_tables(pcpu)[index]); + SEP_DRV_LOG_TRACE( + "Restore value for reg 0x%x : 0x%llx.", + ECB_entries_reg_id(pecb, i), + CPU_STATE_em_tables(pcpu)[index]); + } + END_FOR_EACH_REG_CORE_OPERATION; + } + + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_OCR) + { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i)); + } + END_FOR_EACH_REG_CORE_OPERATION; + + /* + * reset the em factor when a group is swapped + */ + CPU_STATE_trigger_count(pcpu) = EVENT_CONFIG_em_factor(ec); + + /* + * The enable routine needs to rewrite the control registers + */ + CPU_STATE_reset_mask(pcpu) = 0LL; + CPU_STATE_group_swap(pcpu) = 1; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn core2_Initialize(params) + * + * @param params dummy parameter which is not used + * + * @return None No return needed + * + * @brief Initialize the PMU setting up for collection + * + * Special Notes + * Saves the relevant PMU state (minimal set of MSRs required + * to avoid conflicts with other Linux tools, such as Oprofile). + * This function should be called in parallel across all CPUs + * prior to the start of sampling, before PMU state is changed. + * + */ +static VOID core2_Initialize(VOID *param) +{ + U32 this_cpu; + CPU_STATE pcpu; + U32 dev_idx; + DEV_CONFIG pcfg; + U32 i = 0; + ECB pecb = NULL; + U32 cur_grp; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (pcb == NULL) { + SEP_DRV_LOG_TRACE_OUT( + "No programming for this device in this group."); + return; + } + + pcpu = &pcb[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + CPU_STATE_pmu_state(pcpu) = pmu_state + (this_cpu * 3); + if (CPU_STATE_pmu_state(pcpu) == NULL) { + SEP_DRV_LOG_WARNING_TRACE_OUT( + "Unable to save PMU state on CPU %d.", this_cpu); + return; + } + + restore_reg_addr[0] = ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX(pecb, DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)); + restore_reg_addr[1] = ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)); + restore_reg_addr[2] = ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX(pecb, FIXED_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)); + // save the original PMU state on this CPU (NOTE: must only be called ONCE per collection) + CPU_STATE_pmu_state(pcpu)[0] = SYS_Read_MSR(restore_reg_addr[0]); + CPU_STATE_pmu_state(pcpu)[1] = SYS_Read_MSR(restore_reg_addr[1]); + CPU_STATE_pmu_state(pcpu)[2] = SYS_Read_MSR(restore_reg_addr[2]); + + if (DRV_CONFIG_ds_area_available(drv_cfg) && + DEV_CONFIG_pebs_mode(pcfg)) { + SYS_Write_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX( + pecb, PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + 0LL); + } + + SEP_DRV_LOG_TRACE("Saving PMU state on CPU %d:", this_cpu); + SEP_DRV_LOG_TRACE(" msr_val(IA32_DEBUG_CTRL)=0x%llx.", + CPU_STATE_pmu_state(pcpu)[0]); + SEP_DRV_LOG_TRACE(" msr_val(IA32_PERF_GLOBAL_CTRL)=0x%llx.", + CPU_STATE_pmu_state(pcpu)[1]); + SEP_DRV_LOG_TRACE(" msr_val(IA32_FIXED_CTRL)=0x%llx.", + CPU_STATE_pmu_state(pcpu)[2]); + +#if !defined(DRV_ANDROID) + if (!CPU_STATE_socket_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Not socket master."); + return; + } + + direct2core_data_saved = 0; + bl_bypass_data_saved = 0; + cur_grp = CPU_STATE_current_group(pcpu); + + if (restore_ha_direct2core && restore_qpi_direct2core) { + for (i = 0; i < GLOBAL_STATE_num_em_groups(driver_state); i++) { + pecb = LWPMU_DEVICE_PMU_register_data( + &devices[dev_idx])[i]; + if (pecb && (ECB_flags(pecb) & ECB_direct2core_bit)) { + core2_Disable_Direct2core( + LWPMU_DEVICE_PMU_register_data( + &devices[dev_idx])[cur_grp]); + direct2core_data_saved = 1; + break; + } + } + } + if (restore_bl_bypass) { + for (i = 0; i < GLOBAL_STATE_num_em_groups(driver_state); i++) { + pecb = LWPMU_DEVICE_PMU_register_data( + &devices[dev_idx])[i]; + if (pecb && (ECB_flags(pecb) & ECB_bl_bypass_bit)) { + core2_Disable_BL_Bypass( + LWPMU_DEVICE_PMU_register_data( + &devices[dev_idx])[cur_grp]); + bl_bypass_data_saved = 1; + break; + } + } + } +#endif + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn core2_Destroy(params) + * + * @param params dummy parameter which is not used + * + * @return None No return needed + * + * @brief Reset the PMU setting up after collection + * + * Special Notes + * Restores the previously saved PMU state done in core2_Initialize. + * This function should be called in parallel across all CPUs + * after sampling collection ends/terminates. + * + */ +static VOID core2_Destroy(VOID *param) +{ + U32 this_cpu; + CPU_STATE pcpu; + + SEP_DRV_LOG_TRACE_IN(""); + + if (pcb == NULL) { + SEP_DRV_LOG_TRACE_OUT( + "No programming for this device in this group."); + return; + } + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + pcpu = &pcb[this_cpu]; + + if (CPU_STATE_pmu_state(pcpu) == NULL) { + SEP_DRV_LOG_WARNING_TRACE_OUT( + "Unable to restore PMU state on CPU %d.", this_cpu); + return; + } + + SEP_DRV_LOG_TRACE("Clearing PMU state on CPU %d:", this_cpu); + SEP_DRV_LOG_TRACE(" msr_val(IA32_DEBUG_CTRL)=0x0."); + SEP_DRV_LOG_TRACE(" msr_val(IA32_PERF_GLOBAL_CTRL)=0x0."); + SEP_DRV_LOG_TRACE(" msr_val(IA32_FIXED_CTRL)=0x0."); + + // Tentative code below (trying to avoid race conditions with the NMI watchdog). Should be evaluated in the coming few days. (2018/05/21) + SYS_Write_MSR(restore_reg_addr[0], 0); + SYS_Write_MSR(restore_reg_addr[1], 0); + SYS_Write_MSR(restore_reg_addr[2], 0); + + CPU_STATE_pmu_state(pcpu) = NULL; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* + * @fn core2_Read_LBRs(buffer) + * + * @param IN buffer - pointer to the buffer to write the data into + * @return Last branch source IP address + * + * @brief Read all the LBR registers into the buffer provided and return + * + */ +static U64 core2_Read_LBRs(VOID *buffer, PVOID data) +{ + U32 i, count = 0; + U64 *lbr_buf = NULL; + U64 value = 0; + U64 tos_ip_addr = 0; + U64 tos_ptr = 0; + SADDR saddr; + U32 this_cpu; + U32 dev_idx; + LBR lbr; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + lbr = LWPMU_DEVICE_lbr(&devices[dev_idx]); + + if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { + lbr_buf = (U64 *)buffer; + } + + for (i = 0; i < LBR_num_entries(lbr); i++) { + value = SYS_Read_MSR(LBR_entries_reg_id(lbr, i)); + if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { + *lbr_buf = value; + } + SEP_DRV_LOG_TRACE("core2_Read_LBRs %u, 0x%llx.", i, value); + if (i == 0) { + tos_ptr = value; + } else { + if (LBR_entries_etype(lbr, i) == + LBR_ENTRY_FROM_IP) { // LBR from register + if (tos_ptr == count) { + SADDR_addr(saddr) = + value & CORE2_LBR_BITMASK; + tos_ip_addr = (U64)SADDR_addr( + saddr); // Add signed extension + SEP_DRV_LOG_TRACE( + "Tos_ip_addr %llu, 0x%llx.", + tos_ptr, value); + } + count++; + } + } + if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { + lbr_buf++; + } + } + + SEP_DRV_LOG_TRACE_OUT("Res: %llu.", tos_ip_addr); + return tos_ip_addr; +} + +/* + * @fn corei7_Read_LBRs(buffer) + * + * @param IN buffer - pointer to the buffer to write the data into + * @return Last branch source IP address + * + * @brief Read all the LBR registers into the buffer provided and return + * + */ +static U64 corei7_Read_LBRs(VOID *buffer, PVOID data) +{ + U32 i, count = 0; + U64 *lbr_buf = NULL; + U64 value = 0; + U64 tos_ip_addr = 0; + U64 tos_ptr = 0; + SADDR saddr; + U32 pairs = 0; + U32 this_cpu; + U32 dev_idx; + LBR lbr; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + lbr = LWPMU_DEVICE_lbr(&devices[dev_idx]); + + if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { + lbr_buf = (U64 *)buffer; + } + + if (LBR_num_entries(lbr) > 0) { + pairs = (LBR_num_entries(lbr) - 1) / 2; + } + for (i = 0; i < LBR_num_entries(lbr); i++) { + value = SYS_Read_MSR(LBR_entries_reg_id(lbr, i)); + if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { + *lbr_buf = value; + } + if (DEV_CONFIG_collect_callstacks(pcfg)) { + if ((LBR_entries_etype(lbr, i) == LBR_ENTRY_FROM_IP && + i > tos_ptr + 1) || + (LBR_entries_etype(lbr, i) == LBR_ENTRY_TO_IP && + i > tos_ptr + pairs + 1)) { + if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { + *lbr_buf = 0x0ULL; + lbr_buf++; + } + continue; + } + } +#if defined(DRV_SEP_ACRN_ON) + if (DEV_CONFIG_collect_callstacks(pcfg)) { + if ((LBR_entries_etype(lbr, i) == LBR_ENTRY_FROM_IP && + i > tos_ptr + 1) || + (LBR_entries_etype(lbr, i) == LBR_ENTRY_TO_IP && + i > tos_ptr + pairs + 1)) { + if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { + *lbr_buf = 0x0ULL; + lbr_buf++; + } + continue; + } + } +#endif + SEP_DRV_LOG_TRACE("I: %u, value: 0x%llx.", i, value); + if (i == 0) { + tos_ptr = value; + } else { + if (LBR_entries_etype(lbr, i) == + LBR_ENTRY_FROM_IP) { // LBR from register + if (tos_ptr == count) { + SADDR_addr(saddr) = + value & CORE2_LBR_BITMASK; + tos_ip_addr = (U64)SADDR_addr( + saddr); // Add signed extension + SEP_DRV_LOG_TRACE( + "tos_ip_addr %llu, 0x%llx.", + tos_ptr, value); + } + count++; + } + } + if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { + lbr_buf++; + } + } + + SEP_DRV_LOG_TRACE_OUT("Res: %llu.", tos_ip_addr); + return tos_ip_addr; +} + +static VOID core2_Clean_Up(VOID *param) +{ +#if !defined(DRV_ANDROID) + U32 this_cpu; + CPU_STATE pcpu; + U32 busno = 0; + U32 dev_idx = 0; + U32 base_idx = 0; + U32 device_id = 0; + U32 value = 0; + U32 vendor_id = 0; + U32 core2_qpill_dev_no[2] = { 8, 9 }; +#endif + + SEP_DRV_LOG_TRACE_IN(""); + +#if !defined(DRV_ANDROID) + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; +#endif + + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_ALL_REG) + { + if (ECB_entries_clean_up_get(pecb, i)) { + SEP_DRV_LOG_TRACE("clean up set --- RegId --- %x.", + ECB_entries_reg_id(pecb, i)); + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + } + } + END_FOR_EACH_REG_CORE_OPERATION; + +#if !defined(DRV_ANDROID) + if (!CPU_STATE_socket_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Not socket master."); + return; + } + + if (restore_ha_direct2core && restore_qpi_direct2core && + direct2core_data_saved) { + // Discover the bus # for HA + for (busno = 0; busno < MAX_BUSNO; busno++) { + value = PCI_Read_U32(busno, JKTUNC_HA_DEVICE_NO, + JKTUNC_HA_D2C_FUNC_NO, 0); + vendor_id = value & VENDOR_ID_MASK; + device_id = + (value & DEVICE_ID_MASK) >> DEVICE_ID_BITSHIFT; + + if (vendor_id != DRV_IS_PCI_VENDOR_ID_INTEL) { + continue; + } + if (device_id != JKTUNC_HA_D2C_DID) { + continue; + } + + // now program at the offset + PCI_Write_U32(busno, JKTUNC_HA_DEVICE_NO, + JKTUNC_HA_D2C_FUNC_NO, + JKTUNC_HA_D2C_OFFSET, + restore_ha_direct2core[this_cpu][busno]); + } + + // Discover the bus # for QPI + for (dev_idx = 0; dev_idx < 2; dev_idx++) { + base_idx = dev_idx * MAX_BUSNO; + for (busno = 0; busno < MAX_BUSNO; busno++) { + value = PCI_Read_U32( + busno, core2_qpill_dev_no[dev_idx], + JKTUNC_QPILL_D2C_FUNC_NO, 0); + vendor_id = value & VENDOR_ID_MASK; + device_id = (value & DEVICE_ID_MASK) >> + DEVICE_ID_BITSHIFT; + + if (vendor_id != DRV_IS_PCI_VENDOR_ID_INTEL) { + continue; + } + if ((device_id != JKTUNC_QPILL0_D2C_DID) && + (device_id != JKTUNC_QPILL1_D2C_DID)) { + continue; + } + // now program at the corresponding offset + PCI_Write_U32(busno, + core2_qpill_dev_no[dev_idx], + JKTUNC_QPILL_D2C_FUNC_NO, + JKTUNC_QPILL_D2C_OFFSET, + restore_qpi_direct2core[this_cpu] + [base_idx + + busno]); + } + } + } + if (restore_bl_bypass && bl_bypass_data_saved) { + SYS_Write_MSR(CORE2UNC_DISABLE_BL_BYPASS_MSR, + restore_bl_bypass[this_cpu]); + } +#endif + + SEP_DRV_LOG_TRACE_OUT(""); +} + +static VOID corei7_Errata_Fix(void) +{ + U32 this_cpu = CONTROL_THIS_CPU(); + CPU_STATE pcpu = &pcb[this_cpu]; + ECB(pecb) = NULL; + U32 dev_idx, cur_grp; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + cur_grp = CPU_STATE_current_group(pcpu); + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + + if (DEV_CONFIG_pebs_mode(pcfg)) { + SYS_Write_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX( + pecb, PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + 0LL); + } + + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_HW_ERRATA) + { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i)); + } + END_FOR_EACH_REG_CORE_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +static VOID corei7_Errata_Fix_2(void) +{ + SEP_DRV_LOG_TRACE_IN(""); + + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_HW_ERRATA) + { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i)); + } + END_FOR_EACH_REG_CORE_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void core2_Check_Overflow_Htoff_Mode(masks) + * + * @param masks the mask structure to populate + * + * @return None No return needed + * + * @brief Called by the data processing method to figure out which registers have overflowed. + * + */ +static void core2_Check_Overflow_Htoff_Mode(DRV_MASKS masks) +{ + U32 index; + U64 value = 0; + U64 overflow_status = 0; + U32 this_cpu; + BUFFER_DESC bd; + CPU_STATE pcpu; + U32 dev_idx; + U32 cur_grp; + DISPATCH dispatch; + DEV_CONFIG pcfg; + ECB pecb; + U64 overflow_status_clr = 0; + DRV_EVENT_MASK_NODE event_flag; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + bd = &cpu_buf[this_cpu]; + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + + SEP_DRV_LOG_TRACE(""); + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT( + "No programming for this device in this group."); + return; + } + + // initialize masks + DRV_MASKS_masks_num(masks) = 0; + + overflow_status = SYS_Read_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_STATUS_REG_INDEX, + PMU_OPERATION_GLOBAL_STATUS))); + + if (DEV_CONFIG_pebs_mode(pcfg)) { + overflow_status = PEBS_Overflowed(this_cpu, overflow_status, 0); + } + overflow_status_clr = overflow_status; + SEP_DRV_LOG_TRACE("Overflow: cpu: %d, status 0x%llx.", this_cpu, + overflow_status); + index = 0; + BUFFER_DESC_sample_count(bd) = 0; + + if (dispatch->check_overflow_gp_errata) { + overflow_status = dispatch->check_overflow_gp_errata( + pecb, &overflow_status_clr); + } + + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) + { + if (ECB_entries_fixed_reg_get(pecb, i)) { + index = i - + ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_FIXED) + + 0x20; + } else if (ECB_entries_is_gp_reg_get(pecb, i) && + ECB_entries_reg_value(pecb, i) != 0) { + index = i - ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_GP); + if (i >= (ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_GP) + + 4) && + i <= (ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_GP) + + 7)) { + value = SYS_Read_MSR( + ECB_entries_reg_id(pecb, i)); + if (value > 0 && value <= 0x100000000LL) { + overflow_status |= ((U64)1 << index); + } + } + } else { + continue; + } + if (overflow_status & ((U64)1 << index)) { + SEP_DRV_LOG_TRACE("Overflow: cpu: %d, index %d.", + this_cpu, index); + SEP_DRV_LOG_TRACE( + "register 0x%x --- val 0%llx.", + ECB_entries_reg_id(pecb, i), + SYS_Read_MSR(ECB_entries_reg_id(pecb, i))); + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i)); + + if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { + /* Increment the interrupt count. */ + if (interrupt_counts) { + interrupt_counts + [this_cpu * + DRV_CONFIG_num_events( + drv_cfg) + + ECB_entries_event_id_index( + pecb, i)] += 1; + } + } + + DRV_EVENT_MASK_bitFields1(&event_flag) = (U8)0; + if (ECB_entries_fixed_reg_get(pecb, i)) { + CPU_STATE_p_state_counting(pcpu) = 1; + } + if (ECB_entries_precise_get(pecb, i)) { + DRV_EVENT_MASK_precise(&event_flag) = 1; + } + if (ECB_entries_lbr_value_get(pecb, i)) { + DRV_EVENT_MASK_lbr_capture(&event_flag) = 1; + } + if (ECB_entries_branch_evt_get(pecb, i)) { + DRV_EVENT_MASK_branch(&event_flag) = 1; + } + + if (DRV_MASKS_masks_num(masks) < MAX_OVERFLOW_EVENTS) { + DRV_EVENT_MASK_bitFields1( + DRV_MASKS_eventmasks(masks) + + DRV_MASKS_masks_num(masks)) = + DRV_EVENT_MASK_bitFields1(&event_flag); + DRV_EVENT_MASK_event_idx( + DRV_MASKS_eventmasks(masks) + + DRV_MASKS_masks_num(masks)) = + ECB_entries_event_id_index(pecb, i); + DRV_MASKS_masks_num(masks)++; + } else { + SEP_DRV_LOG_ERROR( + "The array for event masks is full."); + } + + SEP_DRV_LOG_TRACE("overflow -- 0x%llx, index 0x%llx.", + overflow_status, (U64)1 << index); + SEP_DRV_LOG_TRACE("slot# %d, reg_id 0x%x, index %d.", i, + ECB_entries_reg_id(pecb, i), index); + if (ECB_entries_event_id_index(pecb, i) == + CPU_STATE_trigger_event_num(pcpu)) { + CPU_STATE_trigger_count(pcpu)--; + } + } + } + END_FOR_EACH_REG_CORE_OPERATION; + + CPU_STATE_reset_mask(pcpu) = overflow_status_clr; + // Reinitialize the global overflow control register + SYS_Write_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX( + pecb, GLOBAL_OVF_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + overflow_status_clr); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void core2_Read_Power(buffer) + * + * @param buffer - pointer to the buffer to write the data into + * + * @return None No return needed + * + * @brief Read all the power MSRs into the buffer provided and return. + * + */ +static VOID corei7_Read_Power(VOID *buffer) +{ + U32 i; + U64 *pwr_buf = (U64 *)buffer; + U32 this_cpu; + U32 dev_idx; + PWR pwr; + + SEP_DRV_LOG_TRACE_IN("Buffer: %p.", buffer); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pwr = LWPMU_DEVICE_pwr(&devices[dev_idx]); + + for (i = 0; i < PWR_num_entries(pwr); i++) { + *pwr_buf = SYS_Read_MSR(PWR_entries_reg_id(pwr, i)); + pwr_buf++; + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn core2_Read_Counts(param, id) + * + * @param param The read thread node to process + * @param id The event id for the which the sample is generated + * + * @return None No return needed + * + * @brief Read CPU event based counts for the events with reg value=0 and store into the buffer param; + * + */ +static VOID core2_Read_Counts(PVOID param, U32 id) +{ + U64 *data; + U32 this_cpu; + CPU_STATE pcpu; + U32 dev_idx; + DEV_CONFIG pcfg; + U32 event_id = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p, id: %u.", param, id); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (DEV_CONFIG_ebc_group_id_offset(pcfg)) { + // Write GroupID + data = (U64 *)((S8 *)param + + DEV_CONFIG_ebc_group_id_offset(pcfg)); + *data = CPU_STATE_current_group(pcpu) + 1; + } + + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) + { + if (ECB_entries_counter_event_offset(pecb, i) == 0) { + continue; + } + data = (U64 *)((S8 *)param + + ECB_entries_counter_event_offset(pecb, i)); + event_id = ECB_entries_event_id_index(pecb, i); + if (event_id == id) { + *data = ~(ECB_entries_reg_value(pecb, i) - 1) & + ECB_entries_max_bits(pecb, i); + ; + } else { + *data = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + } + } + END_FOR_EACH_REG_CORE_OPERATION; + + if (DRV_CONFIG_enable_p_state(drv_cfg)) { + CPU_STATE_p_state_counting(pcpu) = 0; + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn corei7_Check_Overflow_Errata(pecb) + * + * @param pecb: The current event control block + * @param overflow_status: current overflow mask + * + * @return Updated Event mask of the overflowed registers. + * + * @brief There is a bug where highly correlated precise events do + * not raise an indication on overflows in Core i7 and SNB. + */ +static U64 corei7_Check_Overflow_Errata(ECB pecb__, U64 *overflow_status_clr) +{ + U64 index = 0, value = 0, overflow_status = 0; + + SEP_DRV_LOG_TRACE_IN("PECB: %p, overflow_status_clr: %p.", pecb__, + overflow_status_clr); + + overflow_status = *overflow_status_clr; + + if (DRV_CONFIG_num_events(drv_cfg) == 1) { + SEP_DRV_LOG_TRACE_OUT("Res = %llu (num_events = 1).", + overflow_status); + return overflow_status; + } + + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) + { + if (ECB_entries_reg_value(pecb, i) == 0) { + continue; + } + if (ECB_entries_is_gp_reg_get(pecb, i)) { + index = i - ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_GP); + value = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); + if (value > 0LL && value <= 0x100000000LL) { + overflow_status |= ((U64)1 << index); + *overflow_status_clr |= ((U64)1 << index); + SEP_DRV_LOG_TRACE("Counter 0x%x value 0x%llx.", + ECB_entries_reg_id(pecb, i), + value); + } + continue; + } + if (ECB_entries_fixed_reg_get(pecb, i)) { + index = i - + ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_FIXED) + + 0x20; + if (!(overflow_status & ((U64)1 << index))) { + value = SYS_Read_MSR( + ECB_entries_reg_id(pecb, i)); + if (ECB_entries_reg_id(pecb, i) == + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, 0, + PMU_OPERATION_CHECK_OVERFLOW_GP_ERRATA))) { + if (!(value > 0LL && + value <= 0x1000000LL) && + (*overflow_status_clr & + ((U64)1 << index))) { + //Clear it only for overflow_status so that we do not create sample records + //Please do not remove the check for MSR index + overflow_status = + overflow_status & + ~((U64)1 << index); + continue; + } + } + if (value > 0LL && value <= 0x100000000LL) { + overflow_status |= ((U64)1 << index); + *overflow_status_clr |= + ((U64)1 << index); + SEP_DRV_LOG_TRACE( + "counter 0x%x value 0x%llx\n", + ECB_entries_reg_id(pecb, i), + value); + } + } + } + } + END_FOR_EACH_REG_CORE_OPERATION; + + SEP_DRV_LOG_TRACE_OUT("Res = %llu.", overflow_status); + return overflow_status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 corei7_Read_Platform_Info + * + * @brief Reads the MSR_PLATFORM_INFO register if present + * + * @param void + * + * @return value read from the register + * + * Special Notes: + * + */ +static VOID corei7_Platform_Info(PVOID data) +{ + DRV_PLATFORM_INFO platform_data = (DRV_PLATFORM_INFO)data; + U64 value = 0; + + SEP_DRV_LOG_TRACE_IN("Data: %p.", data); + + if (!platform_data) { + SEP_DRV_LOG_TRACE_OUT("Platform_data is NULL!"); + return; + } + + DRV_PLATFORM_INFO_energy_multiplier(platform_data) = 0; + +#define IA32_MSR_PLATFORM_INFO 0xCE + value = SYS_Read_MSR(IA32_MSR_PLATFORM_INFO); + + DRV_PLATFORM_INFO_info(platform_data) = value; + DRV_PLATFORM_INFO_ddr_freq_index(platform_data) = 0; +#undef IA32_MSR_PLATFORM_INFO +#define IA32_MSR_MISC_ENABLE 0x1A4 + DRV_PLATFORM_INFO_misc_valid(platform_data) = 1; + value = SYS_Read_MSR(IA32_MSR_MISC_ENABLE); + DRV_PLATFORM_INFO_misc_info(platform_data) = value; +#undef IA32_MSR_MISC_ENABLE + SEP_DRV_LOG_TRACE("Read from MSR_ENERGY_MULTIPLIER reg is %llu.", + SYS_Read_MSR(MSR_ENERGY_MULTIPLIER)); + DRV_PLATFORM_INFO_energy_multiplier(platform_data) = + (U32)(SYS_Read_MSR(MSR_ENERGY_MULTIPLIER) & 0x00001F00) >> 8; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 corei7_Platform_Info_Nehalem + * + * @brief Reads the MSR_PLATFORM_INFO register if present + * + * @param void + * + * @return value read from the register + * + * Special Notes: + * + */ +static VOID corei7_Platform_Info_Nehalem(PVOID data) +{ + DRV_PLATFORM_INFO platform_data = (DRV_PLATFORM_INFO)data; + U64 value = 0; + + SEP_DRV_LOG_TRACE_IN("Data: %p.", data); + + if (!platform_data) { + SEP_DRV_LOG_TRACE_OUT("Platform_data is NULL!"); + return; + } + +#define IA32_MSR_PLATFORM_INFO 0xCE + value = SYS_Read_MSR(IA32_MSR_PLATFORM_INFO); + + DRV_PLATFORM_INFO_info(platform_data) = value; + DRV_PLATFORM_INFO_ddr_freq_index(platform_data) = 0; +#undef IA32_MSR_PLATFORM_INFO +#define IA32_MSR_MISC_ENABLE 0x1A4 + DRV_PLATFORM_INFO_misc_valid(platform_data) = 1; + value = SYS_Read_MSR(IA32_MSR_MISC_ENABLE); + DRV_PLATFORM_INFO_misc_info(platform_data) = value; +#undef IA32_MSR_MISC_ENABLE + DRV_PLATFORM_INFO_energy_multiplier(platform_data) = 0; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* + * Initialize the dispatch table + */ +DISPATCH_NODE core2_dispatch = { .init = core2_Initialize, + .fini = core2_Destroy, + .write = core2_Write_PMU, + .freeze = core2_Disable_PMU, + .restart = core2_Enable_PMU, + .read_data = core2_Read_PMU_Data, + .check_overflow = core2_Check_Overflow, + .swap_group = core2_Swap_Group, + .read_lbrs = core2_Read_LBRs, + .cleanup = core2_Clean_Up, + .hw_errata = NULL, + .read_power = NULL, + .check_overflow_errata = + core2_Check_Overflow_Errata, + .read_counts = core2_Read_Counts, + .check_overflow_gp_errata = NULL, + .read_ro = NULL, + .platform_info = NULL, + .trigger_read = NULL, + .scan_for_uncore = NULL, + .read_metrics = NULL }; + +DISPATCH_NODE corei7_dispatch = { .init = core2_Initialize, + .fini = core2_Destroy, + .write = core2_Write_PMU, + .freeze = core2_Disable_PMU, + .restart = core2_Enable_PMU, + .read_data = core2_Read_PMU_Data, + .check_overflow = core2_Check_Overflow, + .swap_group = core2_Swap_Group, + .read_lbrs = corei7_Read_LBRs, + .cleanup = core2_Clean_Up, + .hw_errata = corei7_Errata_Fix, + .read_power = corei7_Read_Power, + .check_overflow_errata = NULL, + .read_counts = core2_Read_Counts, + .check_overflow_gp_errata = + corei7_Check_Overflow_Errata, + .read_ro = NULL, + .platform_info = corei7_Platform_Info, + .trigger_read = NULL, + .scan_for_uncore = NULL, + .read_metrics = NULL }; + +DISPATCH_NODE corei7_dispatch_2 = { .init = core2_Initialize, + .fini = core2_Destroy, + .write = core2_Write_PMU, + .freeze = core2_Disable_PMU, + .restart = corei7_Enable_PMU_2, + .read_data = core2_Read_PMU_Data, + .check_overflow = core2_Check_Overflow, + .swap_group = core2_Swap_Group, + .read_lbrs = corei7_Read_LBRs, + .cleanup = core2_Clean_Up, + .hw_errata = corei7_Errata_Fix_2, + .read_power = corei7_Read_Power, + .check_overflow_errata = NULL, + .read_counts = core2_Read_Counts, + .check_overflow_gp_errata = + corei7_Check_Overflow_Errata, + .read_ro = NULL, + .platform_info = corei7_Platform_Info, + .trigger_read = NULL, + .scan_for_uncore = NULL, + .read_metrics = NULL }; + +DISPATCH_NODE corei7_dispatch_nehalem = { + .init = core2_Initialize, + .fini = core2_Destroy, + .write = core2_Write_PMU, + .freeze = core2_Disable_PMU, + .restart = core2_Enable_PMU, + .read_data = core2_Read_PMU_Data, + .check_overflow = core2_Check_Overflow, + .swap_group = core2_Swap_Group, + .read_lbrs = corei7_Read_LBRs, + .cleanup = core2_Clean_Up, + .hw_errata = corei7_Errata_Fix, + .read_power = corei7_Read_Power, + .check_overflow_errata = NULL, + .read_counts = core2_Read_Counts, + .check_overflow_gp_errata = corei7_Check_Overflow_Errata, + .read_ro = NULL, + .platform_info = corei7_Platform_Info_Nehalem, + .trigger_read = NULL, + .scan_for_uncore = NULL, + .read_metrics = NULL +}; + +DISPATCH_NODE corei7_dispatch_htoff_mode = { + .init = core2_Initialize, + .fini = core2_Destroy, + .write = core2_Write_PMU, + .freeze = core2_Disable_PMU, + .restart = core2_Enable_PMU, + .read_data = core2_Read_PMU_Data, + .check_overflow = core2_Check_Overflow_Htoff_Mode, + .swap_group = core2_Swap_Group, + .read_lbrs = corei7_Read_LBRs, + .cleanup = core2_Clean_Up, + .hw_errata = corei7_Errata_Fix, + .read_power = corei7_Read_Power, + .check_overflow_errata = NULL, + .read_counts = core2_Read_Counts, + .check_overflow_gp_errata = corei7_Check_Overflow_Errata, + .read_ro = NULL, + .platform_info = corei7_Platform_Info, + .trigger_read = NULL, + .scan_for_uncore = NULL, + .read_metrics = NULL +}; + +DISPATCH_NODE corei7_dispatch_htoff_mode_2 = { + .init = core2_Initialize, + .fini = core2_Destroy, + .write = core2_Write_PMU, + .freeze = core2_Disable_PMU, + .restart = corei7_Enable_PMU_2, + .read_data = core2_Read_PMU_Data, + .check_overflow = core2_Check_Overflow_Htoff_Mode, + .swap_group = core2_Swap_Group, + .read_lbrs = corei7_Read_LBRs, + .cleanup = core2_Clean_Up, + .hw_errata = corei7_Errata_Fix_2, + .read_power = corei7_Read_Power, + .check_overflow_errata = NULL, + .read_counts = core2_Read_Counts, + .check_overflow_gp_errata = corei7_Check_Overflow_Errata, + .read_ro = NULL, + .platform_info = corei7_Platform_Info, + .trigger_read = NULL, + .scan_for_uncore = NULL, + .read_metrics = NULL +}; diff --git a/drivers/platform/x86/sepdk/sep/cpumon.c b/drivers/platform/x86/sepdk/sep/cpumon.c new file mode 100755 index 0000000000000..ac8ade14f106c --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/cpumon.c @@ -0,0 +1,357 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +/* + * CVS_Id="$Id$" + */ + +#include "lwpmudrv_defines.h" +#include +#include +#if defined(DRV_EM64T) +#include +#endif + +#include "lwpmudrv_types.h" +#include "lwpmudrv_ecb.h" +#include "apic.h" +#include "lwpmudrv.h" +#include "control.h" +#include "utility.h" +#include "cpumon.h" +#include "pmi.h" +#include "sys_info.h" + +#include +#include + +#if !defined(DRV_SEP_ACRN_ON) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) +#include +static int cpumon_NMI_Handler(unsigned int cmd, struct pt_regs *regs) +{ + U32 captured_state = GET_DRIVER_STATE(); + + if (DRIVER_STATE_IN(captured_state, STATE_BIT_RUNNING | + STATE_BIT_PAUSING | + STATE_BIT_PREPARE_STOP | + STATE_BIT_TERMINATING)) { + if (captured_state != DRV_STATE_TERMINATING) { + PMI_Interrupt_Handler(regs); + } + return NMI_HANDLED; + } else { + return NMI_DONE; + } +} + +#define EBS_NMI_CALLBACK cpumon_NMI_Handler + +#else +#include +static int cpumon_NMI_Handler(struct notifier_block *self, unsigned long val, + void *data) +{ + struct die_args *args = (struct die_args *)data; + U32 captured_state = GET_DRIVER_STATE(); + + if (args) { + switch (val) { + case DIE_NMI: +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38)) + case DIE_NMI_IPI: +#endif + if (DRIVER_STATE_IN(captured_state, + STATE_BIT_RUNNING | + STATE_BIT_PAUSING | + STATE_BIT_PREPARE_STOP | + STATE_BIT_TERMINATING)) { + if (captured_state != DRV_STATE_TERMINATING) { + PMI_Interrupt_Handler(args->regs); + } + return NOTIFY_STOP; + } + } + } + return NOTIFY_DONE; +} + +static struct notifier_block cpumon_notifier = { .notifier_call = + cpumon_NMI_Handler, + .next = NULL, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38)) + .priority = 2 +#else + .priority = NMI_LOCAL_LOW_PRIOR, +#endif +}; +#endif +#endif + +static volatile S32 cpuhook_installed; + +/* + * CPU Monitoring Functionality + */ + +/* + * General per-processor initialization + */ +#if defined(DRV_CPU_HOTPLUG) +/* ------------------------------------------------------------------------- */ +/*! + * @fn DRV_BOOL CPUMON_is_Online_Allowed() + * + * @param None + * + * @return DRV_BOOL TRUE if cpu is allowed to go Online, else FALSE + * + * @brief Checks if the cpu is allowed to go online during the + * @brief current driver state + * + */ +DRV_BOOL CPUMON_is_Online_Allowed(void) +{ + DRV_BOOL is_allowed = FALSE; +#if !defined(DRV_SEP_ACRN_ON) + U32 cur_driver_state; + + SEP_DRV_LOG_TRACE_IN(""); + + cur_driver_state = GET_DRIVER_STATE(); + + switch (cur_driver_state) { + case DRV_STATE_IDLE: + case DRV_STATE_PAUSED: + case DRV_STATE_RUNNING: + case DRV_STATE_PAUSING: + is_allowed = TRUE; + break; + default: + SEP_DRV_LOG_TRACE( + "CPU is prohibited to online in driver state %d.", + cur_driver_state); + break; + } +#endif + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", is_allowed); + return is_allowed; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn DRV_BOOL CPUMON_is_Offline_Allowed() + * + * @param None + * + * @return DRV_BOOL TRUE if cpu is allowed to go Offline, else FALSE + * + * @brief Checks if the cpu is allowed to go offline during the + * @brief current driver state + * + */ +DRV_BOOL CPUMON_is_Offline_Allowed(void) +{ + DRV_BOOL is_allowed = FALSE; +#if !defined(DRV_SEP_ACRN_ON) + U32 cur_driver_state; + + SEP_DRV_LOG_TRACE_IN(""); + + cur_driver_state = GET_DRIVER_STATE(); + + switch (cur_driver_state) { + case DRV_STATE_PAUSED: + case DRV_STATE_RUNNING: + case DRV_STATE_PAUSING: + is_allowed = TRUE; + break; + default: + SEP_DRV_LOG_TRACE( + "CPU is prohibited to offline in driver state %d.", + cur_driver_state); + break; + } +#endif + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", is_allowed); + return is_allowed; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID CPUMON_Online_Cpu( + * PVOID param) + * + * @param PVOID parm + * + * @return None + * + * @brief Sets a cpu online, initialize APIC on it, + * @brief Build the sys_info for this cpu + * + */ +VOID CPUMON_Online_Cpu(PVOID param) +{ + S32 this_cpu; + CPU_STATE pcpu; + + SEP_DRV_LOG_TRACE_IN("Dummy parm: %p.", parm); + + if (param == NULL) { + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + } else { + this_cpu = *(S32 *)param; + } + pcpu = &pcb[this_cpu]; + if (pcpu == NULL) { + SEP_DRV_LOG_WARNING_TRACE_OUT("Unable to set CPU %d online!", + this_cpu); + return; + } + SEP_DRV_LOG_INIT("Setting CPU %d online, PCPU = %p.", this_cpu, pcpu); + CPU_STATE_offlined(pcpu) = FALSE; + CPU_STATE_accept_interrupt(pcpu) = 1; + CPU_STATE_initial_mask(pcpu) = 1; + CPU_STATE_group_swap(pcpu) = 1; + APIC_Init(NULL); + APIC_Install_Interrupt_Handler(NULL); + + SYS_INFO_Build_Cpu(NULL); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID CPUMON_Offline_Cpu( + * PVOID param) + * + * @param PVOID parm + * + * @return None + * + * @brief Sets a cpu offline + * + */ +VOID CPUMON_Offline_Cpu(PVOID param) +{ + S32 this_cpu; + CPU_STATE pcpu; + + SEP_DRV_LOG_TRACE_IN("Dummy parm: %p.", parm); + + if (param == NULL) { + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + } else { + this_cpu = *(S32 *)param; + } + pcpu = &pcb[this_cpu]; + + if (pcpu == NULL) { + SEP_DRV_LOG_WARNING_TRACE_OUT("Unable to set CPU %d offline.", + this_cpu); + return; + } + SEP_DRV_LOG_INIT("Setting CPU %d offline.", this_cpu); + CPU_STATE_offlined(pcpu) = TRUE; + + SEP_DRV_LOG_TRACE_OUT(""); +} +#endif + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern void CPUMON_Install_Cpuhooks(void) + * + * @param None + * + * @return None No return needed + * + * @brief set up the interrupt handler (on a per-processor basis) + * @brief Initialize the APIC in two phases (current CPU, then others) + * + */ +VOID CPUMON_Install_Cpuhooks(void) +{ +#if !defined(DRV_SEP_ACRN_ON) + S32 me = 0; + + SEP_DRV_LOG_TRACE_IN(""); + + if (cpuhook_installed) { + SEP_DRV_LOG_WARNING_TRACE_OUT("Cpuhook already installed."); + return; + } + + CONTROL_Invoke_Parallel(APIC_Init, NULL); + CONTROL_Invoke_Parallel(APIC_Install_Interrupt_Handler, + (PVOID)(size_t)me); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) + register_nmi_handler(NMI_LOCAL, EBS_NMI_CALLBACK, 0, "sep_pmi"); +#else + register_die_notifier(&cpumon_notifier); +#endif + + cpuhook_installed = 1; + + SEP_DRV_LOG_TRACE_OUT(""); +#endif +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern void CPUMON_Remove_Cpuhools(void) + * + * @param None + * + * @return None No return needed + * + * @brief De-Initialize the APIC in phases + * @brief clean up the interrupt handler (on a per-processor basis) + * + */ +VOID CPUMON_Remove_Cpuhooks(void) +{ + SEP_DRV_LOG_TRACE_IN(""); + +#if !defined(DRV_SEP_ACRN_ON) + CONTROL_Invoke_Parallel(APIC_Restore_LVTPC, NULL); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) + unregister_nmi_handler(NMI_LOCAL, "sep_pmi"); +#else + unregister_die_notifier(&cpumon_notifier); +#endif + + cpuhook_installed = 0; +#endif + + SEP_DRV_LOG_TRACE_OUT(""); +} diff --git a/drivers/platform/x86/sepdk/sep/eventmux.c b/drivers/platform/x86/sepdk/sep/eventmux.c new file mode 100755 index 0000000000000..1d8099dc674ab --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/eventmux.c @@ -0,0 +1,446 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#include "lwpmudrv_defines.h" +#include +#include +#include +#include +#include "lwpmudrv_types.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" +#include "lwpmudrv.h" +#include "control.h" +#include "utility.h" +#include "eventmux.h" + +static PVOID em_tables; +static size_t em_tables_size; + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID eventmux_Allocate_Groups ( + * VOID *params + * ) + * + * @brief Allocate memory need to support event multiplexing + * + * @param params - pointer to a S32 that holds the size of buffer to allocate + * + * @return NONE + * + * Special Notes: + * Allocate the memory needed to save different group counters + * Called via the parallel control mechanism + */ +static VOID eventmux_Allocate_Groups(PVOID params) +{ + U32 this_cpu; + CPU_STATE cpu_state; + U32 dev_idx; + EVENT_CONFIG ec; + + SEP_DRV_LOG_TRACE_IN(""); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + cpu_state = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + ec = LWPMU_DEVICE_ec(&devices[dev_idx]); + preempt_enable(); + + if (EVENT_CONFIG_mode(ec) == EM_DISABLED || + EVENT_CONFIG_num_groups(ec) == 1) { + return; + } + + CPU_STATE_em_tables(cpu_state) = + em_tables + CPU_STATE_em_table_offset(cpu_state); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID eventmux_Deallocate_Groups ( + * VOID *params + * ) + * + * @brief Free the scratch memory need to support event multiplexing + * + * @param params - pointer to NULL + * + * @return NONE + * + * Special Notes: + * Free the memory needed to save different group counters + * Called via the parallel control mechanism + */ +static VOID eventmux_Deallocate_Groups(PVOID params) +{ + U32 this_cpu; + CPU_STATE cpu_state; + U32 dev_idx; + EVENT_CONFIG ec; + + SEP_DRV_LOG_TRACE_IN(""); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + cpu_state = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + ec = LWPMU_DEVICE_ec(&devices[dev_idx]); + preempt_enable(); + + if (EVENT_CONFIG_mode(ec) == EM_DISABLED || + EVENT_CONFIG_num_groups(ec) == 1) { + return; + } + + CPU_STATE_em_tables(cpu_state) = NULL; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID eventmux_Timer_Callback_Thread ( + * ) + * + * @brief Stop all the timer threads and terminate them + * + * @param none + * + * @return NONE + * + * Special Notes: + * timer routine - The event multiplexing happens here. + */ +static VOID eventmux_Timer_Callback_Thread( +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) + struct timer_list *tl +#else + unsigned long arg +#endif +) +{ + U32 this_cpu; + CPU_STATE pcpu; + U32 dev_idx; + DISPATCH dispatch; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) + SEP_DRV_LOG_TRACE_IN(""); +#else + SEP_DRV_LOG_TRACE_IN("Arg: %u.", (U32)arg); +#endif + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + preempt_enable(); + + if (CPU_STATE_em_tables(pcpu) == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Em_tables is NULL!"); + return; + } + + dispatch->swap_group(TRUE); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) + mod_timer(CPU_STATE_em_timer(pcpu), + jiffies + CPU_STATE_em_timer_delay(pcpu)); +#else + CPU_STATE_em_timer(pcpu)->expires = jiffies + arg; + add_timer(CPU_STATE_em_timer(pcpu)); +#endif + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID eventmux_Prepare_Timer_Threads ( + * VOID + * ) + * + * @brief Stop all the timer threads and terminate them + * + * @param NONE + * + * @return NONE + * + * Special Notes: + * Set up the timer threads to prepare for event multiplexing. + * Do not start the threads as yet + */ +static VOID eventmux_Prepare_Timer_Threads(PVOID arg) +{ + U32 this_cpu; + CPU_STATE pcpu; + U32 dev_idx; + EVENT_CONFIG ec; + + SEP_DRV_LOG_TRACE_IN(""); + + // initialize and set up the timer for all cpus + // Do not start the timer as yet. + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + ec = LWPMU_DEVICE_ec(&devices[dev_idx]); + preempt_enable(); + + if (EVENT_CONFIG_mode(ec) != EM_TIMER_BASED) { + return; + } + + CPU_STATE_em_timer(pcpu) = (struct timer_list *)CONTROL_Allocate_Memory( + sizeof(struct timer_list)); + + if (CPU_STATE_em_timer(pcpu) == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Pcpu = NULL!"); + return; + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID eventmux_Cancel_Timers ( + * VOID + * ) + * + * @brief Stop all the timer threads and terminate them + * + * @param NONE + * + * @return NONE + * + * Special Notes: + * Cancel all the timer threads that have been started + */ +static VOID eventmux_Cancel_Timers(void) +{ + CPU_STATE pcpu; + S32 i; + U32 dev_idx; + EVENT_CONFIG ec; + + SEP_DRV_LOG_TRACE_IN(""); + + /* + * Cancel the timer for all active CPUs + */ + for (i = 0; i < GLOBAL_STATE_active_cpus(driver_state); i++) { + pcpu = &pcb[i]; + dev_idx = core_to_dev_map[i]; + ec = LWPMU_DEVICE_ec(&devices[dev_idx]); + if (EVENT_CONFIG_mode(ec) != EM_TIMER_BASED) { + continue; + } + del_timer_sync(CPU_STATE_em_timer(pcpu)); + CPU_STATE_em_timer(pcpu) = + (struct timer_list *)CONTROL_Free_Memory( + CPU_STATE_em_timer(pcpu)); + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID eventmux_Start_Timers ( + * long unsigned arg + * ) + * + * @brief Start the timer on a single cpu + * + * @param delay interval time in jiffies + * + * @return NONE + * + * Special Notes: + * start the timer on a single cpu + * Call from each cpu to get cpu affinity for Timer_Callback_Thread + */ +static VOID eventmux_Start_Timers(PVOID arg) +{ + U32 this_cpu; + CPU_STATE pcpu; + U32 dev_idx; + EVENT_CONFIG ec; + unsigned long delay; + + SEP_DRV_LOG_TRACE_IN(""); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + ec = LWPMU_DEVICE_ec(&devices[dev_idx]); + preempt_enable(); + + if (EVENT_CONFIG_mode(ec) != EM_TIMER_BASED || + EVENT_CONFIG_num_groups(ec) == 1) { + return; + } + + /* + * notice we want to use group 0's time slice for the initial timer + */ + delay = msecs_to_jiffies(EVENT_CONFIG_em_factor(ec)); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) + CPU_STATE_em_timer_delay(pcpu) = delay; + timer_setup(CPU_STATE_em_timer(pcpu), eventmux_Timer_Callback_Thread, + 0); + mod_timer(CPU_STATE_em_timer(pcpu), + jiffies + CPU_STATE_em_timer_delay(pcpu)); +#else + init_timer(CPU_STATE_em_timer(pcpu)); + CPU_STATE_em_timer(pcpu)->function = eventmux_Timer_Callback_Thread; + CPU_STATE_em_timer(pcpu)->data = delay; + CPU_STATE_em_timer(pcpu)->expires = jiffies + delay; + add_timer(CPU_STATE_em_timer(pcpu)); +#endif + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID EVENTMUX_Start ( + * VOID + * ) + * + * @brief Start the timers and enable all the threads + * + * @param NONE + * + * @return NONE + * + * Special Notes: + * if event multiplexing has been enabled, set up the time slices and + * start the timer threads for all the timers + */ +VOID EVENTMUX_Start(void) +{ + SEP_DRV_LOG_TRACE_IN(""); + + /* + * Start the timer for all cpus + */ + CONTROL_Invoke_Parallel(eventmux_Start_Timers, NULL); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID EVENTMUX_Initialize ( + * VOID + * ) + * + * @brief Initialize the event multiplexing module + * + * @param NONE + * + * @return NONE + * + * Special Notes: + * if event multiplexing has been enabled, + * then allocate the memory needed to save and restore all the counter data + * set up the timers needed, but do not start them + */ +VOID EVENTMUX_Initialize(void) +{ + S32 size_of_vector; + S32 cpu_num; + CPU_STATE pcpu; + U32 dev_idx; + EVENT_CONFIG ec; + + SEP_DRV_LOG_TRACE_IN(""); + + for (cpu_num = 0; cpu_num < GLOBAL_STATE_num_cpus(driver_state); + cpu_num++) { + pcpu = &pcb[cpu_num]; + dev_idx = core_to_dev_map[cpu_num]; + ec = LWPMU_DEVICE_ec(&devices[dev_idx]); + if (EVENT_CONFIG_mode(ec) == EM_DISABLED || + EVENT_CONFIG_num_groups(ec) == 1) { + continue; + } + size_of_vector = EVENT_CONFIG_num_groups(ec) * + EVENT_CONFIG_max_gp_events(ec) * sizeof(S64); + CPU_STATE_em_table_offset(pcpu) = em_tables_size; + em_tables_size += size_of_vector; + } + + if (em_tables_size) { + em_tables = CONTROL_Allocate_Memory(em_tables_size); + } + CONTROL_Invoke_Parallel(eventmux_Allocate_Groups, NULL); + + CONTROL_Invoke_Parallel(eventmux_Prepare_Timer_Threads, + (VOID *)(size_t)0); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID EVENTMUX_Destroy ( + * VOID + * ) + * + * @brief Clean up the event multiplexing threads + * + * @param NONE + * + * @return NONE + * + * Special Notes: + * if event multiplexing has been enabled, then stop and cancel all the timers + * free up all the memory that is associated with EM + */ +VOID EVENTMUX_Destroy(void) +{ + SEP_DRV_LOG_TRACE_IN(""); + + eventmux_Cancel_Timers(); + + if (em_tables) { + em_tables = CONTROL_Free_Memory(em_tables); + em_tables_size = 0; + } + CONTROL_Invoke_Parallel(eventmux_Deallocate_Groups, (VOID *)(size_t)0); + + SEP_DRV_LOG_TRACE_OUT(""); +} diff --git a/drivers/platform/x86/sepdk/sep/gfx.c b/drivers/platform/x86/sepdk/sep/gfx.c new file mode 100755 index 0000000000000..38342f6740c41 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/gfx.c @@ -0,0 +1,261 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#include +#include + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_gfx.h" +#include "lwpmudrv.h" +#include "inc/pci.h" +#include "gfx.h" +#include "utility.h" + +static char *gfx_virtual_addr; +static SEP_MMIO_NODE gfx_map; +static U32 gfx_code = GFX_CTRL_DISABLE; +static U32 gfx_counter[GFX_NUM_COUNTERS]; +static U32 gfx_overflow[GFX_NUM_COUNTERS]; + +/*! + * @fn OS_STATUS GFX_Read + * + * @brief Reads the counters into the buffer provided for the purpose + * + * @param buffer - buffer to read the counts into + * + * @return STATUS_SUCCESS if read succeeded, otherwise error + * + * @note + */ +OS_STATUS GFX_Read(S8 *buffer) +{ + U64 *samp = (U64 *)buffer; + U32 i; + U32 val; + char *reg_addr; + + SEP_DRV_LOG_TRACE_IN("Buffer: %p.", buffer); + + // GFX counting was not specified + if (gfx_virtual_addr == NULL || gfx_code == GFX_CTRL_DISABLE) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "OS_INVALID (!gfx_virtual_addr || gfx_code==GFX_CTRL_DISABLE)"); + return OS_INVALID; + } + + // check for sampling buffer + if (!samp) { + SEP_DRV_LOG_ERROR_TRACE_OUT("OS_INVALID (!samp)."); + return OS_INVALID; + } + + // set the GFX register address + reg_addr = gfx_virtual_addr + GFX_PERF_REG; + + // for all counters - save the information to the sampling stream + for (i = 0; i < GFX_NUM_COUNTERS; i++) { + // read the ith GFX event count + reg_addr += 4; + val = *(U32 *)(reg_addr); +#if defined(GFX_COMPUTE_DELTAS) + // if the current count is bigger than the previous one, + // then the counter overflowed + // so make sure the delta gets adjusted to account for it + if (val < gfx_counter[i]) { + samp[i] = val + (GFX_CTR_OVF_VAL - gfx_counter[i]); + } else { + samp[i] = val - gfx_counter[i]; + } +#else + // just keep track of raw count for this counter + // if the current count is bigger than the previous one, + // then the counter overflowed + if (val < gfx_counter[i]) { + gfx_overflow[i]++; + } + samp[i] = val + (U64)gfx_overflow[i] * GFX_CTR_OVF_VAL; +#endif + // save the current count + gfx_counter[i] = val; + } + + SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS."); + return OS_SUCCESS; +} + +/*! + * @fn OS_STATUS GFX_Set_Event_Code + * + * @brief Programs the Graphics PMU with the right event code + * + * @param arg - buffer containing graphics event code + * + * @return STATUS_SUCCESS if success, otherwise error + * + * @note + */ +OS_STATUS GFX_Set_Event_Code(IOCTL_ARGS arg) +{ + U32 i; + char *reg_addr; + U32 reg_value; + + SEP_DRV_LOG_FLOW_IN("Arg: %p.", arg); + + // extract the graphics event code from usermode + if (get_user(gfx_code, (int __user *)arg->buf_usr_to_drv)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "OS_FAULT (Unable to obtain gfx_code from usermode!)."); + return OS_FAULT; + } + SEP_DRV_LOG_TRACE("Got gfx_code=0x%x.", gfx_code); + + // memory map the address to GFX counters, if not already done + if (gfx_virtual_addr == NULL) { + PCI_Map_Memory(&gfx_map, GFX_BASE_ADDRESS + GFX_BASE_NEW_OFFSET, + PAGE_SIZE); + gfx_virtual_addr = + (char *)(UIOP)SEP_MMIO_NODE_virtual_address(&gfx_map); + } + + // initialize the GFX counts + for (i = 0; i < GFX_NUM_COUNTERS; i++) { + gfx_counter[i] = 0; + gfx_overflow[i] = 0; + // only used if storing raw counts + // (i.e., GFX_COMPUTE_DELTAS is undefined) + } + + // get current GFX event code + if (gfx_virtual_addr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "OS_INVALID (Invalid gfx_virtual_addr=0x%p!).", + gfx_virtual_addr); + return OS_INVALID; + } + + reg_addr = gfx_virtual_addr + GFX_PERF_REG; + reg_value = *(U32 *)(reg_addr); + SEP_DRV_LOG_TRACE("Read reg_value=0x%x from reg_addr=0x%p.", reg_value, + reg_addr); + + /* Update the GFX counter group */ + // write the GFX counter group with reset = 1 for all counters + reg_value = (gfx_code | GFX_REG_CTR_CTRL); + *(U32 *)(reg_addr) = reg_value; + SEP_DRV_LOG_TRACE("Wrote reg_value=0x%x to reg_addr=0x%p.", reg_value, + reg_addr); + + SEP_DRV_LOG_FLOW_OUT("OS_SUCCESS."); + return OS_SUCCESS; +} + +/*! + * @fn OS_STATUS GFX_Start + * + * @brief Starts the count of the Graphics PMU + * + * @param NONE + * + * @return OS_SUCCESS if success, otherwise error + * + * @note + */ +OS_STATUS GFX_Start(void) +{ + U32 reg_value; + char *reg_addr; + + SEP_DRV_LOG_TRACE_IN(""); + + // GFX counting was not specified + if (gfx_virtual_addr == NULL || gfx_code == GFX_CTRL_DISABLE) { + SEP_DRV_LOG_ERROR( + "Invalid gfx_virtual_addr=0x%p or gfx_code=0x%x.", + gfx_virtual_addr, gfx_code); + SEP_DRV_LOG_TRACE_OUT("OS_INVALID."); + return OS_INVALID; + } + + // turn on GFX counters as per event code + reg_addr = gfx_virtual_addr + GFX_PERF_REG; + *(U32 *)(reg_addr) = gfx_code; + + // verify event code was written properly + reg_value = *(U32 *)reg_addr; + if (reg_value != gfx_code) { + SEP_DRV_LOG_ERROR("Got register value 0x%x, expected 0x%x.", + reg_value, gfx_code); + SEP_DRV_LOG_TRACE_OUT("OS_INVALID."); + return OS_INVALID; + } + + SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS."); + return OS_SUCCESS; +} + +/*! + * @fn OS_STATUS GFX_Stop + * + * @brief Stops the count of the Graphics PMU + * + * @param NONE + * + * @return OS_SUCCESS if success, otherwise error + * + * @note + */ +OS_STATUS GFX_Stop(void) +{ + char *reg_addr; + + SEP_DRV_LOG_TRACE_IN(""); + + // GFX counting was not specified + if (gfx_virtual_addr == NULL || gfx_code == GFX_CTRL_DISABLE) { + SEP_DRV_LOG_ERROR( + "Invalid gfx_virtual_addr=0x%p or gfx_code=0x%x.", + gfx_virtual_addr, gfx_code); + SEP_DRV_LOG_TRACE_OUT("OS_INVALID."); + return OS_INVALID; + } + + // turn off GFX counters + reg_addr = gfx_virtual_addr + GFX_PERF_REG; + *(U32 *)(reg_addr) = GFX_CTRL_DISABLE; + + // unmap the memory mapped virtual address + PCI_Unmap_Memory(&gfx_map); + gfx_virtual_addr = NULL; + + // reset the GFX global variables + gfx_code = GFX_CTRL_DISABLE; + + SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS."); + return OS_SUCCESS; +} diff --git a/drivers/platform/x86/sepdk/sep/gmch.c b/drivers/platform/x86/sepdk/sep/gmch.c new file mode 100755 index 0000000000000..41b9ee8b67a59 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/gmch.c @@ -0,0 +1,505 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#include +#include +#include +#include +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" + +#if defined(PCI_HELPERS_API) +#include +#include +#endif + +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" +#include "lwpmudrv_chipset.h" +#include "inc/lwpmudrv.h" +#include "inc/control.h" +#include "inc/utility.h" +#include "inc/gmch.h" +#include "inc/pci.h" + +// global variables for determining which register offsets to use +static U32 gmch_register_read; // value=0 indicates invalid read register +static U32 gmch_register_write; // value=0 indicates invalid write register +static U32 number_of_events; + +//global variable for reading GMCH counter values +static U64 *gmch_current_data; +static U64 *gmch_to_read_data; + +// global variable for tracking number of overflows per GMCH counter +static U32 gmch_overflow[MAX_CHIPSET_COUNTERS]; +static U64 last_gmch_count[MAX_CHIPSET_COUNTERS]; + +extern DRV_CONFIG drv_cfg; +extern CHIPSET_CONFIG pma; +extern CPU_STATE pcb; + +/* + * @fn gmch_PCI_Read32(address) + * + * @brief Read the 32bit value specified by the address + * + * @return the read value + * + */ +#if defined(PCI_HELPERS_API) +#define gmch_PCI_Read32 intel_mid_msgbus_read32_raw +#else +static U32 gmch_PCI_Read32(unsigned long address) +{ + U32 read_value = 0; + + SEP_DRV_LOG_TRACE_IN("Address: %lx.", address); + + PCI_Write_U32(0, 0, 0, GMCH_MSG_CTRL_REG, (U32)address); + read_value = PCI_Read_U32(0, 0, 0, GMCH_MSG_DATA_REG); + + SEP_DRV_LOG_TRACE_OUT("Res: %x.", read_value); + return read_value; +} +#endif + +/* + * @fn gmch_PCI_Write32(address, data) + * + * @brief Write the 32bit value into the address specified + * + * @return None + * + */ +#if defined(PCI_HELPERS_API) +#define gmch_PCI_Write32 intel_mid_msgbus_write32_raw +#else +static void gmch_PCI_Write32(unsigned long address, unsigned long data) +{ + SEP_DRV_LOG_TRACE_IN("Address: %lx, data: %lx.", address, data); + + PCI_Write_U32(0, 0, 0, GMCH_MSG_DATA_REG, data); + PCI_Write_U32(0, 0, 0, GMCH_MSG_CTRL_REG, address); + + SEP_DRV_LOG_TRACE_OUT(""); +} +#endif + +/* + * @fn gmch_Check_Enabled() + * + * @brief Read GMCH PMON capabilities + * + * @param None + * + * @return GMCH enable bits + * + */ +static ULONG gmch_Check_Enabled(void) +{ + ULONG enabled_value; + + SEP_DRV_LOG_TRACE_IN(""); + + enabled_value = + gmch_PCI_Read32(GMCH_PMON_CAPABILITIES + gmch_register_read); + + SEP_DRV_LOG_TRACE_OUT("Res: %lx.", enabled_value); + return enabled_value; +} + +/* + * @fn gmch_Init_Chipset() + * + * @brief Initialize GMCH Counters. See note below. + * + * @param None + * + * @note This function must be called BEFORE any other function + * in this file! + * + * @return VT_SUCCESS if successful, error otherwise + * + */ +static U32 gmch_Init_Chipset(void) +{ + int i; + CHIPSET_SEGMENT cs; + CHIPSET_SEGMENT gmch_chipset_seg; + + SEP_DRV_LOG_TRACE_IN(""); + + cs = &CHIPSET_CONFIG_gmch(pma); + gmch_chipset_seg = &CHIPSET_CONFIG_gmch(pma); + + // configure read/write registers offsets according to usermode setting + if (cs) { + gmch_register_read = CHIPSET_SEGMENT_read_register(cs); + gmch_register_write = CHIPSET_SEGMENT_write_register(cs); + ; + } + if (gmch_register_read == 0 || gmch_register_write == 0) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "VT_CHIPSET_CONFIG_FAILED(Invalid GMCH read/write registers!)"); + return VT_CHIPSET_CONFIG_FAILED; + } + + number_of_events = CHIPSET_SEGMENT_total_events(gmch_chipset_seg); + SEP_DRV_LOG_INIT("Number of chipset events %d.", number_of_events); + + // Allocate memory for reading GMCH counter values + the group id + gmch_current_data = + CONTROL_Allocate_Memory((number_of_events + 1) * sizeof(U64)); + if (!gmch_current_data) { + SEP_DRV_LOG_ERROR_TRACE_OUT("OS_NO_MEM (!gmch_current_data)."); + return OS_NO_MEM; + } + gmch_to_read_data = + CONTROL_Allocate_Memory((number_of_events + 1) * sizeof(U64)); + if (!gmch_to_read_data) { + SEP_DRV_LOG_ERROR_TRACE_OUT("OS_NO_MEM (!gmch_to_read_data)."); + return OS_NO_MEM; + } + + if (!DRV_CONFIG_enable_chipset(drv_cfg)) { + SEP_DRV_LOG_TRACE_OUT( + "VT_SUCCESS (!DRV_CONFIG_enable_chipset(drv_cfg))."); + return VT_SUCCESS; + } + + if (!CHIPSET_CONFIG_gmch_chipset(pma)) { + SEP_DRV_LOG_TRACE_OUT( + "VT_SUCCESS (!CHIPSET_CONFIG_gmch_chipset(drv_cfg))."); + return VT_SUCCESS; + } + // initialize the GMCH per-counter overflow numbers + for (i = 0; i < MAX_CHIPSET_COUNTERS; i++) { + gmch_overflow[i] = 0; + last_gmch_count[i] = 0; + } + + // disable fixed and GP counters + gmch_PCI_Write32(GMCH_PMON_GLOBAL_CTRL + gmch_register_write, + 0x00000000); + // clear fixed counter filter + gmch_PCI_Write32(GMCH_PMON_FIXED_CTR_CTRL + gmch_register_write, + 0x00000000); + + SEP_DRV_LOG_TRACE_OUT("VT_SUCCESS."); + return VT_SUCCESS; +} + +/* + * @fn gmch_Start_Counters() + * + * @brief Start the GMCH Counters. + * + * @param None + * + * @return None + * + */ +static VOID gmch_Start_Counters(void) +{ + SEP_DRV_LOG_TRACE_IN(""); + + // reset and start chipset counters + if (pma == NULL) { + SEP_DRV_LOG_ERROR("gmch_Start_Counters: ERROR pma=NULL."); + } + + // enable fixed and GP counters + gmch_PCI_Write32(GMCH_PMON_GLOBAL_CTRL + gmch_register_write, + 0x0001000F); + // enable fixed counter filter + gmch_PCI_Write32(GMCH_PMON_FIXED_CTR_CTRL + gmch_register_write, + 0x00000001); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* + * @fn gmch_Trigger_Read() + * + * @brief Read the GMCH counters through PCI Config space + * + * @return None + * + */ +static VOID gmch_Trigger_Read(void) +{ + U64 *data; + int i, data_index; + U64 val; + U64 *gmch_data; + U32 counter_data_low; + U32 counter_data_high; + U64 counter_data; + U64 cmd_register_low_read; + U64 cmd_register_high_read; + U32 gp_counter_index = 0; + U64 overflow; + U32 cur_driver_state; + + CHIPSET_SEGMENT gmch_chipset_seg; + CHIPSET_EVENT chipset_events; + U64 *temp; + + SEP_DRV_LOG_TRACE_IN(""); + + cur_driver_state = GET_DRIVER_STATE(); + + if (!IS_COLLECTING_STATE(cur_driver_state)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Invalid driver state!"); + return; + } + + if (pma == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("pma is NULL!"); + return; + } + + if (gmch_current_data == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("gmch_current_data is NULL!"); + return; + } + + if (CHIPSET_CONFIG_gmch_chipset(pma) == 0) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "CHIPSET_CONFIG_gmch_chipset(pma) is NULL!"); + return; + } + + data = gmch_current_data; + data_index = 0; + + preempt_disable(); + SYS_Local_Irq_Disable(); + gmch_chipset_seg = &CHIPSET_CONFIG_gmch(pma); + chipset_events = CHIPSET_SEGMENT_events(gmch_chipset_seg); + + // Write GroupID + data[data_index] = 1; + // Increment the data index as the event id starts from zero + data_index++; + + // GMCH data will be written as gmch_data[0], gmch_data[1], ... + gmch_data = data + data_index; + + // read the GMCH counters and add them into the sample record + + // iterate through GMCH counters configured to collect on events + for (i = 0; i < CHIPSET_SEGMENT_total_events(gmch_chipset_seg); i++) { + U32 event_id = CHIPSET_EVENT_event_id(&chipset_events[i]); + // read count for fixed GMCH counter event + if (event_id == 0) { + cmd_register_low_read = + GMCH_PMON_FIXED_CTR0 + gmch_register_read; + data[data_index++] = + (U64)gmch_PCI_Read32(cmd_register_low_read); + overflow = GMCH_PMON_FIXED_CTR_OVF_VAL; + } else { + // read count for general GMCH counter event + switch (gp_counter_index) { + case 0: + default: + cmd_register_low_read = GMCH_PMON_GP_CTR0_L + + gmch_register_read; + cmd_register_high_read = GMCH_PMON_GP_CTR0_H + + gmch_register_read; + break; + + case 1: + cmd_register_low_read = GMCH_PMON_GP_CTR1_L + + gmch_register_read; + cmd_register_high_read = GMCH_PMON_GP_CTR1_H + + gmch_register_read; + break; + + case 2: + cmd_register_low_read = GMCH_PMON_GP_CTR2_L + + gmch_register_read; + cmd_register_high_read = GMCH_PMON_GP_CTR2_H + + gmch_register_read; + break; + + case 3: + cmd_register_low_read = GMCH_PMON_GP_CTR3_L + + gmch_register_read; + cmd_register_high_read = GMCH_PMON_GP_CTR3_H + + gmch_register_read; + break; + } + counter_data_low = + gmch_PCI_Read32(cmd_register_low_read); + counter_data_high = + gmch_PCI_Read32(cmd_register_high_read); + counter_data = (U64)counter_data_high; + data[data_index++] = + (counter_data << 32) + counter_data_low; + overflow = GMCH_PMON_GP_CTR_OVF_VAL; + gp_counter_index++; + } + + /* Compute the running count of the event. */ + gmch_data[i] &= overflow; + val = gmch_data[i]; + if (gmch_data[i] < last_gmch_count[i]) { + gmch_overflow[i]++; + } + gmch_data[i] = gmch_data[i] + gmch_overflow[i] * overflow; + last_gmch_count[i] = val; + } + + temp = gmch_to_read_data; + gmch_to_read_data = gmch_current_data; + gmch_current_data = temp; + SYS_Local_Irq_Enable(); + preempt_enable(); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* + * @fn gmch_Read_Counters() + * + * @brief Copy the GMCH data to the sampling data stream. + * + * @param param - pointer to data stream where samples are to be written + * + * @return None + * + */ +static VOID gmch_Read_Counters(PVOID param) +{ + U64 *data; + int i; + U32 cur_driver_state; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + cur_driver_state = GET_DRIVER_STATE(); + + if (!IS_COLLECTING_STATE(cur_driver_state)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Invalid driver state!"); + return; + } + + if (pma == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("pma is NULL!"); + return; + } + + if (param == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("param is NULL!"); + return; + } + + if (gmch_to_read_data == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("gmch_to_read_data is NULL!"); + return; + } + + /* + * Account for the group id that is placed at start of chipset array + * Number of data elements to be transferred is number_of_events + 1. + */ + data = param; + for (i = 0; i < number_of_events + 1; i++) { + data[i] = gmch_to_read_data[i]; + SEP_DRV_LOG_TRACE( + "Interrupt gmch read counters data %d is: 0x%llx.", i, + data[i]); + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* + * @fn gmch_Stop_Counters() + * + * @brief Stop the GMCH counters + * + * @param None + * + * @return None + * + */ +static VOID gmch_Stop_Counters(void) +{ + SEP_DRV_LOG_TRACE_IN(""); + + // stop and reset the chipset counters + number_of_events = 0; + if (pma == NULL) { + SEP_DRV_LOG_ERROR("gmch_Stop_Counters: pma=NULL."); + } + + // disable fixed and GP counters + gmch_PCI_Write32(GMCH_PMON_GLOBAL_CTRL + gmch_register_write, + 0x00000000); + gmch_PCI_Write32(GMCH_PMON_FIXED_CTR_CTRL + gmch_register_write, + 0x00000000); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* + * @fn gmch_Fini_Chipset() + * + * @brief Reset GMCH to state where it can be used again. + * Called at cleanup phase. + * + * @param None + * + * @return None + * + */ +static VOID gmch_Fini_Chipset(void) +{ + SEP_DRV_LOG_TRACE_IN(""); + + if (!gmch_Check_Enabled()) { + SEP_DRV_LOG_WARNING("GMCH is not enabled!"); + } + + gmch_current_data = CONTROL_Free_Memory(gmch_current_data); + gmch_to_read_data = CONTROL_Free_Memory(gmch_to_read_data); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +// +// Initialize the GMCH chipset dispatch table +// + +CS_DISPATCH_NODE gmch_dispatch = { .init_chipset = gmch_Init_Chipset, + .start_chipset = gmch_Start_Counters, + .read_counters = gmch_Read_Counters, + .stop_chipset = gmch_Stop_Counters, + .fini_chipset = gmch_Fini_Chipset, + .Trigger_Read = gmch_Trigger_Read }; diff --git a/drivers/platform/x86/sepdk/sep/linuxos.c b/drivers/platform/x86/sepdk/sep/linuxos.c new file mode 100755 index 0000000000000..1f877e6e4bc86 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/linuxos.c @@ -0,0 +1,1415 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#include "lwpmudrv_defines.h" +#include + +#include +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +#include +#include +#else +#include +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) +#include +#endif +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) +#include +#endif + +#include "lwpmudrv_types.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" +#include "inc/lwpmudrv.h" +#include "inc/control.h" +#include "inc/utility.h" +#include "inc/cpumon.h" +#include "inc/output.h" +#include "inc/pebs.h" + +#include "inc/linuxos.h" +#include "inc/apic.h" + +#include +#include + + +extern DRV_BOOL multi_pebs_enabled; +extern DRV_BOOL sched_switch_enabled; +extern uid_t uid; +extern volatile pid_t control_pid; +static volatile S32 hooks_installed; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) +static struct tracepoint *tp_sched_switch; +#endif + +#define HOOK_FREE 0 +#define HOOK_UNINSTALL -10000 +static atomic_t hook_state = ATOMIC_INIT(HOOK_UNINSTALL); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) && defined(DRV_CPU_HOTPLUG) +static enum cpuhp_state cpuhp_sepdrv_state; +#endif +extern wait_queue_head_t wait_exit; + +#define MY_TASK PROFILE_TASK_EXIT +#define MY_UNMAP PROFILE_MUNMAP +#ifdef CONFIG_X86_64 +#define MR_SEG_NUM 0 +#else +#define MR_SEG_NUM 2 +#endif + +#if !defined(KERNEL_IMAGE_SIZE) +#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) +#endif + +#if defined(DRV_IA32) +static U16 linuxos_Get_Exec_Mode(struct task_struct *p) +{ + return (unsigned short)MODE_32BIT; +} +#endif + +#if defined(DRV_EM64T) +static U16 linuxos_Get_Exec_Mode(struct task_struct *p) +{ + SEP_DRV_LOG_TRACE_IN("P: %p.", p); + + if (!p) { + SEP_DRV_LOG_ERROR_TRACE_OUT("MODE_UNKNOWN (p is NULL!)."); + return MODE_UNKNOWN; + } + + if (test_tsk_thread_flag(p, TIF_IA32)) { + SEP_DRV_LOG_TRACE_OUT( + "Res: %u (test_tsk_thread_flag TIF_IA32).", + (U16)(unsigned short)MODE_32BIT); + return (unsigned short)MODE_32BIT; + } + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", (U16)(unsigned short)MODE_64BIT); + return (unsigned short)MODE_64BIT; +} +#endif + +static S32 linuxos_Load_Image_Notify_Routine(char *name, U64 base, U32 size, + U64 page_offset, U32 pid, + U32 parent_pid, U32 options, + unsigned short mode, + S32 load_event, U32 segment_num, + U32 kernel_modules, U32 osid) +{ + char *raw_path; + ModuleRecord *mra; + char buf[sizeof(ModuleRecord) + MAXNAMELEN + 32]; + U64 tsc_read; + S32 local_load_event = (load_event == -1) ? 0 : load_event; + U64 page_offset_shift; + + SEP_DRV_LOG_NOTIFICATION_TRACE_IN(load_event == 1, + "Name: '%s', pid: %d.", name, pid); + + mra = (ModuleRecord *)buf; + memset(mra, '\0', sizeof(buf)); + raw_path = (char *)mra + sizeof(ModuleRecord); + + page_offset_shift = page_offset << PAGE_SHIFT; + MR_page_offset_Set(mra, page_offset_shift); + MODULE_RECORD_segment_type(mra) = mode; + MODULE_RECORD_load_addr64(mra) = (U64)(size_t)base; + MODULE_RECORD_length64(mra) = size; + MODULE_RECORD_tsc_used(mra) = 1; + MODULE_RECORD_first_module_rec_in_process(mra) = + options & LOPTS_1ST_MODREC; + MODULE_RECORD_segment_number(mra) = segment_num; + MODULE_RECORD_exe(mra) = (LOPTS_EXE & options) ? 1 : 0; + MODULE_RECORD_global_module_tb5(mra) = + (options & LOPTS_GLOBAL_MODULE) ? 1 : 0; + MODULE_RECORD_global_module(mra) = + (options & LOPTS_GLOBAL_MODULE) ? 1 : 0; + MODULE_RECORD_processed(mra) = 0; + MODULE_RECORD_parent_pid(mra) = parent_pid; + MODULE_RECORD_osid(mra) = osid; + MODULE_RECORD_pid_rec_index(mra) = pid; + + if (kernel_modules) { + MODULE_RECORD_tsc(mra) = 0; + MR_unloadTscSet(mra, (U64)(0xffffffffffffffffLL)); + } else { + UTILITY_Read_TSC(&tsc_read); + preempt_disable(); + tsc_read -= TSC_SKEW(CONTROL_THIS_CPU()); + preempt_enable(); + + if (local_load_event) { + MR_unloadTscSet(mra, tsc_read); + } else { + MR_unloadTscSet(mra, (U64)(-1)); + } + } + + MODULE_RECORD_pid_rec_index_raw(mra) = 1; // raw pid +#if defined(DEBUG) + if (total_loads_init) { + SEP_DRV_LOG_NOTIFICATION_TRACE( + load_event == 1, + "Setting pid_rec_index_raw pid 0x%x %s.", pid, name); + } +#endif + + strncpy(raw_path, name, MAXNAMELEN); + raw_path[MAXNAMELEN] = 0; + MODULE_RECORD_path_length(mra) = (U16)strlen(raw_path) + 1; + MODULE_RECORD_rec_length(mra) = (U16)ALIGN_8( + sizeof(ModuleRecord) + MODULE_RECORD_path_length(mra)); + +#if defined(DRV_IA32) + MODULE_RECORD_selector(mra) = (pid == 0) ? __KERNEL_CS : __USER_CS; +#endif +#if defined(DRV_EM64T) + if (mode == MODE_64BIT) { + MODULE_RECORD_selector(mra) = + (pid == 0) ? __KERNEL_CS : __USER_CS; + } else if (mode == MODE_32BIT) { + MODULE_RECORD_selector(mra) = + (pid == 0) ? __KERNEL32_CS : __USER32_CS; + } +#endif + + OUTPUT_Module_Fill((PVOID)mra, MODULE_RECORD_rec_length(mra), + load_event == 1); + + SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(load_event == 1, "OS_SUCCESS"); + return OS_SUCCESS; +} + +#ifdef DRV_MM_EXE_FILE_PRESENT +static DRV_BOOL linuxos_Equal_VM_Exe_File(struct vm_area_struct *vma) +{ + S8 name_vm_file[MAXNAMELEN]; + S8 name_exe_file[MAXNAMELEN]; + S8 *pname_vm_file = NULL; + S8 *pname_exe_file = NULL; + DRV_BOOL res; + + SEP_DRV_LOG_TRACE_IN("FMA: %p.", vma); + + if (vma == NULL) { + SEP_DRV_LOG_TRACE_OUT("FALSE (!vma)."); + return FALSE; + } + + if (vma->vm_file == NULL) { + SEP_DRV_LOG_TRACE_OUT("FALSE (!vma->vm_file)."); + return FALSE; + } + + if (vma->vm_mm->exe_file == NULL) { + SEP_DRV_LOG_TRACE_OUT("FALSE (!vma->vm_mm->exe_file)."); + return FALSE; + } + + pname_vm_file = D_PATH(vma->vm_file, + name_vm_file, MAXNAMELEN); + pname_exe_file = D_PATH(vma->vm_mm->exe_file, + name_exe_file, MAXNAMELEN); + res = strcmp(pname_vm_file, pname_exe_file) == 0; + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", res); + return res; +} +#endif + +/* ------------------------------------------------------------------------- */ +/*! + * @fn linuxos_Map_Kernel_Modules (void) + * + * @brief Obtain kernel module details from modules list + * and map the details to the module record. + * + * @return S32 VT_SUCCESS on success + */ +static S32 linuxos_Map_Kernel_Modules(void) +{ +#if defined(CONFIG_MODULES) + struct module *current_module; + struct list_head *modules; + U16 exec_mode; + unsigned long long addr; + unsigned long long size; +#if defined(CONFIG_RANDOMIZE_BASE) + unsigned long dyn_addr = 0; +#endif + + SEP_DRV_LOG_TRACE_IN(""); + + + mutex_lock(&module_mutex); + +#if defined(DRV_EM64T) + addr = (unsigned long)__START_KERNEL_map; + exec_mode = MODE_64BIT; +#elif defined(DRV_IA32) + addr = (unsigned long)PAGE_OFFSET; + exec_mode = MODE_32BIT; +#else + exec_mode = MODE_UNKNOWN; +#endif + + SEP_DRV_LOG_TRACE( + " kernel module address size"); + SEP_DRV_LOG_TRACE( + " ------------------- ------------------ -------"); + + addr += (CONFIG_PHYSICAL_START + (CONFIG_PHYSICAL_ALIGN - 1)) & + ~(CONFIG_PHYSICAL_ALIGN - 1); + size = (unsigned long)KERNEL_IMAGE_SIZE - + ((CONFIG_PHYSICAL_START + (CONFIG_PHYSICAL_ALIGN - 1)) & + ~(CONFIG_PHYSICAL_ALIGN - 1)) - + 1; + +#if defined(CONFIG_RANDOMIZE_BASE) + if (!dyn_addr) { + dyn_addr = (unsigned long)UTILITY_Find_Symbol("_text"); + if (!dyn_addr) { + dyn_addr = (unsigned long)UTILITY_Find_Symbol("_stext"); + } + + if (dyn_addr && dyn_addr > addr) { + dyn_addr &= ~(PAGE_SIZE - 1); + size -= (dyn_addr - addr); + addr = dyn_addr; + } else { + SEP_DRV_LOG_WARNING_TRACE_OUT( + "Could not find the kernel start address!"); + } + } +#endif + + linuxos_Load_Image_Notify_Routine( + "vmlinux", addr, size, 0, 0, 0, + LOPTS_1ST_MODREC | LOPTS_GLOBAL_MODULE | LOPTS_EXE, exec_mode, + -1, MR_SEG_NUM, 1, OS_ID_NATIVE); + + SEP_DRV_LOG_TRACE("kmodule: %20s 0x%llx 0x%llx.", "vmlinux", addr, + size); + +#if defined(DRV_SEP_ACRN_ON) + linuxos_Load_Image_Notify_Routine( + "VMM", 0x0, (U32)0xffffffffffffffffLL, 0, 0, 0, + LOPTS_1ST_MODREC | LOPTS_GLOBAL_MODULE | LOPTS_EXE, exec_mode, + -1, MR_SEG_NUM, 1, OS_ID_ACORN); +#endif + + for (modules = (struct list_head *)(THIS_MODULE->list.prev); + (unsigned long)modules > MODULES_VADDR; modules = modules->prev) + ; + list_for_each_entry (current_module, modules, list) { + char *name = current_module->name; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) || \ + defined(SEP_CONFIG_MODULE_LAYOUT) + addr = (unsigned long)current_module->core_layout.base; + size = current_module->core_layout.size; +#else + addr = (unsigned long)current_module->module_core; + size = current_module->core_size; +#endif + + if (module_is_live(current_module)) { + SEP_DRV_LOG_TRACE("kmodule: %20s 0x%llx 0x%llx.", + name, addr, size); + linuxos_Load_Image_Notify_Routine( + name, addr, size, 0, 0, 0, LOPTS_GLOBAL_MODULE, + exec_mode, -1, 0, 1, OS_ID_NATIVE); + } + } + + mutex_unlock(&module_mutex); +#endif + SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS"); + return OS_SUCCESS; +} + +// +// Register the module for a process. The task_struct and mm +// should be locked if necessary to make sure they don't change while we're +// iterating... +// Used as a service routine +// +static S32 linuxos_VMA_For_Process(struct task_struct *p, + struct vm_area_struct *vma, S32 load_event, + U32 *first) +{ + U32 options = 0; + S8 name[MAXNAMELEN]; + S8 *pname = NULL; + U32 ppid = 0; + U16 exec_mode; + U64 page_offset = 0; + +#if defined(DRV_ANDROID) + char andr_app[TASK_COMM_LEN]; +#endif + + SEP_DRV_LOG_NOTIFICATION_TRACE_IN( + load_event == 1, "P = %p, vma = %p, load_event: %d, first: %p.", + p, vma, load_event, first); + + if (p == NULL) { + SEP_DRV_LOG_NOTIFICATION_ERROR(load_event == 1, + "Skipped p=NULL."); + SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(load_event == 1, + "OS_SUCCESS (!p)."); + return OS_SUCCESS; + } + + if (vma->vm_file) + pname = D_PATH(vma->vm_file, name, MAXNAMELEN); + + page_offset = vma->vm_pgoff; + + if (!IS_ERR(pname) && pname != NULL) { + SEP_DRV_LOG_NOTIFICATION_TRACE(load_event == 1, + "enum: %s, %d, %lx, %lx %llu.", + pname, p->pid, vma->vm_start, + (vma->vm_end - vma->vm_start), + page_offset); + + // if the VM_EXECUTABLE flag is set then this is the module + // that is being used to name the module + if (DRV_VM_MOD_EXECUTABLE(vma)) { + options |= LOPTS_EXE; +#if defined(DRV_ANDROID) + if (!strcmp(pname, "/system/bin/app_process") || + !strcmp(pname, "/system/bin/app_process32") || + !strcmp(pname, "/system/bin/app_process64")) { + memset(andr_app, '\0', TASK_COMM_LEN); + strncpy(andr_app, p->comm, TASK_COMM_LEN); + pname = andr_app; + } +#endif + } + // mark the first of the bunch... + if (*first == 1) { + options |= LOPTS_1ST_MODREC; + *first = 0; + } + } +#if defined(DRV_ALLOW_VDSO) + else if ((vma->vm_mm != NULL) && + vma->vm_start == (long)vma->vm_mm->context.vdso) { + pname = "[vdso]"; + } +#endif +#if defined(DRV_ALLOW_SYSCALL) + else if (vma->vm_start == VSYSCALL_START) { + pname = "[vsyscall]"; + } +#endif + + if (pname != NULL) { + options = 0; + if (DRV_VM_MOD_EXECUTABLE(vma)) { + options |= LOPTS_EXE; + } + + if (p && p->parent) { + ppid = p->parent->tgid; + } + exec_mode = linuxos_Get_Exec_Mode(p); + // record this module + linuxos_Load_Image_Notify_Routine( + pname, vma->vm_start, (vma->vm_end - vma->vm_start), + page_offset, p->pid, ppid, options, exec_mode, + load_event, 1, 0, OS_ID_NATIVE); + } + + SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(load_event == 1, "OS_SUCCESS."); + return OS_SUCCESS; +} + +// +// Common loop to enumerate all modules for a process. The task_struct and mm +// should be locked if necessary to make sure they don't change while we're +// iterating... +// +static S32 linuxos_Enum_Modules_For_Process(struct task_struct *p, + struct mm_struct *mm, + S32 load_event) +{ + struct vm_area_struct *mmap; + U32 first = 1; + +#if defined(SECURE_SEP) + uid_t l_uid; +#endif + + SEP_DRV_LOG_NOTIFICATION_TRACE_IN(load_event == 1, + "P: %p, mm: %p, load_event: %d.", p, + mm, load_event); + +#if defined(SECURE_SEP) + l_uid = DRV_GET_UID(p); + /* + * Check for: same uid, or root uid + */ + if (l_uid != uid && l_uid != 0) { + SEP_DRV_LOG_NOTIFICATION_TRACE_OUT( + load_event == 1, + "OS_SUCCESS (secure_sep && l_uid != uid && l_uid != 0)."); + return OS_SUCCESS; + } +#endif + for (mmap = mm->mmap; mmap; mmap = mmap->vm_next) { + /* We have 3 distinct conditions here. + * 1) Is the page executable? + * 2) Is is a part of the vdso area? + * 3) Is it the vsyscall area? + */ + if (((mmap->vm_flags & VM_EXEC) && mmap->vm_file && + mmap->vm_file->DRV_F_DENTRY) +#if defined(DRV_ALLOW_VDSO) + || (mmap->vm_mm && + mmap->vm_start == (long)mmap->vm_mm->context.vdso) +#endif +#if defined(DRV_ALLOW_VSYSCALL) + || (mmap->vm_start == VSYSCALL_START) +#endif + ) { + + linuxos_VMA_For_Process(p, mmap, load_event, &first); + } + } + + SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(load_event == 1, "OS_SUCCESS"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static int linuxos_Exec_Unmap_Notify( + * struct notifier_block *self, + * unsigned long val, + * VOID *data) + * + * @brief this function is called whenever a task exits + * + * @param self IN - not used + * val IN - not used + * data IN - this is cast in the mm_struct of the task that is call unmap + * + * @return none + * + * Special Notes: + * + * This notification is called from do_munmap(mm/mmap.c). This is called when ever + * a module is loaded or unloaded. It looks like it is called right after a module is + * loaded or before its unloaded (if using dlopen, dlclose). + * However it is not called when a process is exiting instead exit_mmap is called + * (resulting in an EXIT_MMAP notification). + */ +static int linuxos_Exec_Unmap_Notify(struct notifier_block *self, + unsigned long val, PVOID data) +{ + struct mm_struct *mm; + struct vm_area_struct *mmap = NULL; + U32 first = 1; + U32 cur_driver_state; + +#if defined(SECURE_SEP) + uid_t l_uid; +#endif + + SEP_DRV_LOG_NOTIFICATION_IN("Self: %p, val: %lu, data: %p.", self, val, + data); + SEP_DRV_LOG_NOTIFICATION_TRACE(SEP_IN_NOTIFICATION, + "enter: unmap: hook_state %d.", + atomic_read(&hook_state)); + + cur_driver_state = GET_DRIVER_STATE(); + +#if defined(SECURE_SEP) + l_uid = DRV_GET_UID(current); + /* + * Check for: same uid, or root uid + */ + if (l_uid != uid && l_uid != 0) { + SEP_DRV_LOG_NOTIFICATION_OUT( + "Returns 0 (secure_sep && l_uid != uid && l_uid != 0)."); + return 0; + } +#endif + + if (!IS_COLLECTING_STATE(cur_driver_state)) { + SEP_DRV_LOG_NOTIFICATION_OUT("Early exit (driver state)."); + return 0; + } + if (!atomic_add_negative(1, &hook_state)) { + SEP_DRV_LOG_NOTIFICATION_TRACE(SEP_IN_NOTIFICATION, + "unmap: hook_state %d.", + atomic_read(&hook_state)); + mm = get_task_mm(current); + if (mm) { + UTILITY_down_read_mm(mm); + mmap = FIND_VMA(mm, data); + if (mmap && mmap->vm_file && + (mmap->vm_flags & VM_EXEC)) { + linuxos_VMA_For_Process(current, mmap, TRUE, + &first); + } + UTILITY_up_read_mm(mm); + mmput(mm); + } + } + atomic_dec(&hook_state); + SEP_DRV_LOG_NOTIFICATION_TRACE(SEP_IN_NOTIFICATION, + "exit: unmap done: hook_state %d.", + atomic_read(&hook_state)); + + SEP_DRV_LOG_NOTIFICATION_OUT("Returns 0."); + return 0; +} + +#if defined(DRV_CPU_HOTPLUG) +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID linuxos_Handle_Online_cpu( + * PVOID param) + * + * @param PVOID param + * + * @return None + * + * @brief Callback function to set the cpu online + * @brief and begin collection on it + */ +static VOID linuxos_Handle_Online_cpu(PVOID param) +{ + U32 this_cpu; + U32 dev_idx; + DISPATCH dispatch; + + SEP_DRV_LOG_NOTIFICATION_TRACE_IN(SEP_IN_NOTIFICATION, + "Dummy param: %p.", param); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + preempt_enable(); + CPUMON_Online_Cpu((PVOID)&this_cpu); + if (CPU_STATE_pmu_state(&pcb[this_cpu]) == NULL) { + if (dispatch && dispatch->init) { + dispatch->init(NULL); + } + } + if (dispatch && dispatch->write) { + dispatch->write(NULL); + } + CPU_STATE_group_swap(&pcb[this_cpu]) = 1; + if (GET_DRIVER_STATE() == DRV_STATE_RUNNING) { + // possible race conditions with notifications. + // cleanup should wait until all notifications are done, + // and new notifications should not proceed + if (dispatch && dispatch->restart) { + dispatch->restart(NULL); + } + } + + SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(SEP_IN_NOTIFICATION, ""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID linuxos_Handle_Offline_cpu( + * PVOID param) + * + * @param PVOID param + * + * @return None + * + * @brief Callback function to set the cpu offline + * @brief and stop collection on it + */ +static VOID linuxos_Handle_Offline_cpu(PVOID param) +{ + U32 this_cpu; + U32 apic_lvterr; + U32 dev_idx; + DISPATCH dispatch; + SEP_DRV_LOG_NOTIFICATION_TRACE_IN(SEP_IN_NOTIFICATION, + "Dummy param: %p.", param); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + preempt_enable(); + CPUMON_Offline_Cpu((PVOID)&this_cpu); + if (dispatch && dispatch->freeze) { + dispatch->freeze(NULL); + } + apic_lvterr = apic_read(APIC_LVTERR); + apic_write(APIC_LVTERR, apic_lvterr | APIC_LVT_MASKED); + APIC_Restore_LVTPC(NULL); + apic_write(APIC_LVTERR, apic_lvterr); + + SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(SEP_IN_NOTIFICATION, ""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn int linuxos_online_cpu( + * unsigned int cpu) + * + * @param unsigned int cpu + * + * @return None + * + * @brief Invokes appropriate call back function when CPU is online + */ +static int linuxos_online_cpu(unsigned int cpu) +{ + SEP_DRV_LOG_NOTIFICATION_IN("Cpu %d coming online.", cpu); + + if (CPUMON_is_Online_Allowed()) { + CONTROL_Invoke_Cpu(cpu, linuxos_Handle_Online_cpu, NULL); + SEP_DRV_LOG_NOTIFICATION_OUT("Cpu %d came online.", cpu); + return 0; + } else { + SEP_DRV_LOG_WARNING_NOTIFICATION_OUT( + "Cpu %d is not allowed to come online!", cpu); + return 0; + } +} +/* ------------------------------------------------------------------------- */ +/*! + * @fn int linuxos_offline_cpu( + * unsigned int cpu) + * + * @param unsigned int cpu + * + * @return None + * + * @brief Invokes appropriate call back function when CPU is offline + */ +static int linuxos_offline_cpu(unsigned int cpu) +{ + SEP_DRV_LOG_NOTIFICATION_IN("Cpu %d going offline.", cpu); + + if (CPUMON_is_Offline_Allowed()) { + CONTROL_Invoke_Cpu(cpu, linuxos_Handle_Offline_cpu, NULL); + SEP_DRV_LOG_NOTIFICATION_OUT("Cpu %d went offline.", cpu); + return 0; + } else { + SEP_DRV_LOG_WARNING_NOTIFICATION_OUT( + "Cpu %d is not allowed to go offline!", cpu); + return 0; + } +} +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) +/* ------------------------------------------------------------------------- */ +/*! + * @fn OS_STATUS linuxos_Hotplug_Notifier( + * struct notifier_block *block, unsigned long action, void *pcpu) + * + * @param struct notifier_block *block - notifier block + * unsigned long action - notifier action + * void *pcpu - per cpu pcb + * + * @return NOTIFY_OK, if successful + * + * @brief Hotplug Notifier function that handles various cpu states + * @brief and invokes respective callback functions + */ +static OS_STATUS linuxos_Hotplug_Notifier(struct notifier_block *block, + unsigned long action, void *pcpu) +{ + U32 cpu = (unsigned int)(unsigned long)pcpu; + + SEP_DRV_LOG_NOTIFICATION_IN( + "Cpu: %u, action: %u.", cpu, + action); // nb: will overcount number of pending notifications + // when using this routine + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_DOWN_FAILED: + SEP_DRV_LOG_ERROR("SEP cpu %d offline failed!", cpu); + case CPU_ONLINE: + linuxos_online_cpu(cpu); + break; + case CPU_DOWN_PREPARE: + linuxos_offline_cpu(cpu); + break; + default: + SEP_DRV_LOG_WARNING( + "DEFAULT: cpu %d unhandled action value is %d.", cpu, + action); + break; + } + + SEP_DRV_LOG_NOTIFICATION_OUT(""); + return NOTIFY_OK; +} + +static struct notifier_block cpu_hotplug_notifier = { + .notifier_call = &linuxos_Hotplug_Notifier, +}; +#endif +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID LINUXOS_Register_Hotplug( + * VOID) + * + * @param None + * + * @return None + * + * @brief Registers the Hotplug Notifier + */ +VOID LINUXOS_Register_Hotplug(void) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) + S32 err; + + SEP_DRV_LOG_INIT_IN( + "Kernel version >= 4.10.0: using direct notifications."); + + err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "ia64/sep5:online", + linuxos_online_cpu, + linuxos_offline_cpu); + cpuhp_sepdrv_state = (int)err; +#else + SEP_DRV_LOG_INIT_IN("Kernel version < 4.10.0: using notification hub."); + register_cpu_notifier(&cpu_hotplug_notifier); +#endif + SEP_DRV_LOG_INIT_OUT("Hotplug notifier registered."); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID LINUXOS_Unregister_Hotplug( + * VOID) + * + * @param None + * + * @return None + * + * @brief Unregisters the Hotplug Notifier + */ +VOID LINUXOS_Unregister_Hotplug(void) +{ + SEP_DRV_LOG_INIT_IN("Unregistering hotplug notifier."); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) + cpuhp_remove_state_nocalls(cpuhp_sepdrv_state); +#else + unregister_cpu_notifier(&cpu_hotplug_notifier); +#endif + SEP_DRV_LOG_INIT_OUT("Hotplug notifier unregistered."); +} +#endif +/* ------------------------------------------------------------------------- */ +/*! + * @fn OS_STATUS LINUXOS_Enum_Process_Modules(DRV_BOOL at_end) + * + * @brief gather all the process modules that are present. + * + * @param at_end - the collection happens at the end of the sampling run + * + * @return OS_SUCCESS + * + * Special Notes: + * This routine gathers all the process modules that are present + * in the system at this time. If at_end is set to be TRUE, then + * act as if all the modules are being unloaded. + * + */ +OS_STATUS LINUXOS_Enum_Process_Modules(DRV_BOOL at_end) +{ + int n = 0; + struct task_struct *p; + + SEP_DRV_LOG_TRACE_IN("At_end: %u.", at_end); + SEP_DRV_LOG_TRACE("Begin tasks."); + + if (GET_DRIVER_STATE() == DRV_STATE_TERMINATING) { + SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS (TERMINATING)."); + return OS_SUCCESS; + } + + FOR_EACH_TASK(p) + { + struct mm_struct *mm; + + SEP_DRV_LOG_TRACE("Looking at task %d.", n); + /* + * Call driver notification routine for each module + * that is mapped into the process created by the fork + */ + p->comm[TASK_COMM_LEN - 1] = 0; + // making sure there is a trailing 0 + mm = get_task_mm(p); + + if (!mm) { + SEP_DRV_LOG_TRACE( + "Skipped (p->mm=NULL). P=0x%p, pid=%d, p->comm=%s.", + p, p->pid, p->comm); + linuxos_Load_Image_Notify_Routine( + p->comm, 0, 0, 0, p->pid, + (p->parent) ? p->parent->tgid : 0, + LOPTS_EXE | LOPTS_1ST_MODREC, + linuxos_Get_Exec_Mode(p), + 2, // '2' to trigger 'if (load_event)' conditions + 1, 0, OS_ID_NATIVE); + continue; + } + + UTILITY_down_read_mm(mm); + linuxos_Enum_Modules_For_Process(p, mm, at_end ? -1 : 0); + UTILITY_up_read_mm(mm); + mmput(mm); + n++; + } + + SEP_DRV_LOG_TRACE("Enum_Process_Modules done with %d tasks.", n); + + SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS."); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static int linuxos_Exit_Task_Notify(struct notifier_block * self, + * unsigned long val, PVOID data) + * @brief this function is called whenever a task exits + * + * @param self IN - not used + * val IN - not used + * data IN - this is cast into the task_struct of the exiting task + * + * @return none + * + * Special Notes: + * this function is called whenever a task exits. It is called right before + * the virtual memory areas are freed. We just enumerate through all the modules + * of the task and set the unload sample count and the load event flag to 1 to + * indicate this is a module unload + */ +static int linuxos_Exit_Task_Notify(struct notifier_block *self, + unsigned long val, PVOID data) +{ + struct task_struct *p = (struct task_struct *)data; + int status = OS_SUCCESS; + U32 cur_driver_state; + struct mm_struct *mm; + + SEP_DRV_LOG_NOTIFICATION_IN("Self: %p, val: %lu, data: %p.", self, val, + data); + + cur_driver_state = GET_DRIVER_STATE(); + + if (cur_driver_state == DRV_STATE_UNINITIALIZED || + cur_driver_state == DRV_STATE_TERMINATING) { + SEP_DRV_LOG_NOTIFICATION_OUT("Early exit (driver state)."); + return status; + } + SEP_DRV_LOG_TRACE("Pid = %d tgid = %d.", p->pid, p->tgid); + if (p->pid == control_pid) { + SEP_DRV_LOG_NOTIFICATION_TRACE( + SEP_IN_NOTIFICATION, + "The collector task has been terminated via an uncatchable signal."); + SEP_DRV_LOG_NOTIFICATION_WARNING(SEP_IN_NOTIFICATION, + "Sep was killed!"); + CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); + wake_up_interruptible(&wait_exit); + + SEP_DRV_LOG_NOTIFICATION_OUT("Res = %u (pid == control_pid).", + status); + return status; + } + + if (cur_driver_state != DRV_STATE_IDLE && + !IS_COLLECTING_STATE(cur_driver_state)) { + SEP_DRV_LOG_NOTIFICATION_OUT("Res = %u (stopping collection).", + status); + return status; + } + + mm = get_task_mm(p); + if (!mm) { + SEP_DRV_LOG_NOTIFICATION_OUT("Res = %u (!p->mm).", status); + return status; + } + UTILITY_down_read_mm(mm); + if (GET_DRIVER_STATE() != DRV_STATE_TERMINATING) { + if (!atomic_add_negative(1, &hook_state)) { + linuxos_Enum_Modules_For_Process(p, mm, 1); + } + atomic_dec(&hook_state); + } + UTILITY_up_read_mm(mm); + mmput(mm); + + SEP_DRV_LOG_NOTIFICATION_TRACE(SEP_IN_NOTIFICATION, "Hook_state %d.", + atomic_read(&hook_state)); + + SEP_DRV_LOG_NOTIFICATION_OUT("Res = %u.", status); + return status; +} + +/* + * The notifier block. All the static entries have been defined at this point + */ +static struct notifier_block linuxos_exec_unmap_nb = { + .notifier_call = linuxos_Exec_Unmap_Notify, +}; + +static struct notifier_block linuxos_exit_task_nb = { + .notifier_call = linuxos_Exit_Task_Notify, +}; + +#if defined(CONFIG_TRACEPOINTS) +/* ------------------------------------------------------------------------- */ +/*! + * @fn void capture_sched_switch(VOID *) + * @brief capture current pid/tid on all cpus + * + * @param p IN - not used + * + * @return none + * + * Special Notes: + * + * None + */ +static void capture_sched_switch(void *p) +{ + U32 this_cpu; + BUFFER_DESC bd; + SIDEBAND_INFO sideband_info; + U64 tsc; + + SEP_DRV_LOG_TRACE_IN(""); + + UTILITY_Read_TSC(&tsc); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + + bd = &cpu_sideband_buf[this_cpu]; + if (bd == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Bd is NULL!"); + return; + } + + sideband_info = (SIDEBAND_INFO)OUTPUT_Reserve_Buffer_Space( + bd, sizeof(SIDEBAND_INFO_NODE), FALSE, !SEP_IN_NOTIFICATION, + (S32)this_cpu); + if (sideband_info == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Sideband_info is NULL!"); + return; + } + + SIDEBAND_INFO_pid(sideband_info) = current->tgid; + SIDEBAND_INFO_tid(sideband_info) = current->pid; + SIDEBAND_INFO_tsc(sideband_info) = tsc; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) +/* ------------------------------------------------------------------------- */ +/*! + * @fn void record_pebs_process_info(...) + * @brief record all sched switch pid/tid info + * + * @param ignore IN - not used + * from IN + * to IN + * + * @return none + * + * Special Notes: + * + * None + */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) +static void record_pebs_process_info(void *ignore, bool preempt, + struct task_struct *from, + struct task_struct *to) +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) +static void record_pebs_process_info(void *ignore, struct task_struct *from, + struct task_struct *to) +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) +static void record_pebs_process_info(struct rq *ignore, + struct task_struct *from, + struct task_struct *to) +#endif +{ + U32 this_cpu; + BUFFER_DESC bd; + SIDEBAND_INFO sideband_info; + U64 tsc; + U32 cur_driver_state; + + SEP_DRV_LOG_NOTIFICATION_IN("From: %p, to: %p.", from, to); + + cur_driver_state = GET_DRIVER_STATE(); + + if (cur_driver_state != DRV_STATE_IDLE && + !IS_COLLECTING_STATE(cur_driver_state)) { + SEP_DRV_LOG_NOTIFICATION_OUT("Early exit (driver state)."); + return; + } + + UTILITY_Read_TSC(&tsc); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + + SEP_DRV_LOG_NOTIFICATION_TRACE(SEP_IN_NOTIFICATION, + "[OUT<%d:%d:%s>-IN<%d:%d:%s>].", + from->tgid, from->pid, from->comm, + to->tgid, to->pid, to->comm); + + bd = &cpu_sideband_buf[this_cpu]; + if (bd == NULL) { + SEP_DRV_LOG_NOTIFICATION_OUT("Early exit (!bd)."); + return; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) + sideband_info = (SIDEBAND_INFO)OUTPUT_Reserve_Buffer_Space( + bd, sizeof(SIDEBAND_INFO_NODE), TRUE, SEP_IN_NOTIFICATION, + (S32)this_cpu); +#else + sideband_info = (SIDEBAND_INFO)OUTPUT_Reserve_Buffer_Space( + bd, sizeof(SIDEBAND_INFO_NODE), FALSE, SEP_IN_NOTIFICATION, + (S32)this_cpu); +#endif + + if (sideband_info == NULL) { + SEP_DRV_LOG_NOTIFICATION_OUT("Early exit (!sideband_info)."); + return; + } + + SIDEBAND_INFO_pid(sideband_info) = to->tgid; + SIDEBAND_INFO_tid(sideband_info) = to->pid; + SIDEBAND_INFO_tsc(sideband_info) = tsc; + + SEP_DRV_LOG_NOTIFICATION_OUT(""); +} +#endif +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) +/* ------------------------------------------------------------------------- */ +/*! + * @fn void find_sched_switch_tracepoint + * @brief find trace poing for sched_switch + * + * @param tp pass in by system + * param pointer of trace point + * + * @return none + * + * Special Notes: + * + * None + */ +static void find_sched_switch_tracepoint(struct tracepoint *tp, VOID *param) +{ + struct tracepoint **ptp = (struct tracepoint **)param; + + SEP_DRV_LOG_TRACE_IN("Tp: %p, param: %p.", tp, param); + + if (tp && ptp) { + SEP_DRV_LOG_TRACE("trace point name: %s.", tp->name); + if (!strcmp(tp->name, "sched_switch")) { + SEP_DRV_LOG_TRACE( + "Found trace point for sched_switch."); + *ptp = tp; + } + } + + SEP_DRV_LOG_TRACE_OUT(""); +} +#endif + +/* ------------------------------------------------------------------------- */ +/*! + * @fn int install_sched_switch_callback(void) + * @brief registers sched_switch callbacks for PEBS sideband + * + * @param none + * + * @return 0 success else error number + * + * Special Notes: + * + * None + */ +static int install_sched_switch_callback(void) +{ + int err = 0; + + SEP_DRV_LOG_TRACE_IN(""); + SEP_DRV_LOG_INIT("Installing PEBS linux OS Hooks."); + +#if defined(CONFIG_TRACEPOINTS) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) + if (!tp_sched_switch) { + for_each_kernel_tracepoint(&find_sched_switch_tracepoint, + &tp_sched_switch); + } + if (!tp_sched_switch) { + err = -EIO; + SEP_DRV_LOG_INIT( + "Please check Linux is built w/ CONFIG_CONTEXT_SWITCH_TRACER."); + } else { + err = tracepoint_probe_register( + tp_sched_switch, (void *)record_pebs_process_info, + NULL); + } +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) + err = register_trace_sched_switch(record_pebs_process_info, NULL); +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) + err = register_trace_sched_switch(record_pebs_process_info); +#else + SEP_DRV_LOG_INIT( + "Please use Linux kernel version >= 2.6.28 to use multiple pebs."); + err = -1; +#endif + CONTROL_Invoke_Parallel(capture_sched_switch, NULL); +#endif + + SEP_DRV_LOG_TRACE_OUT("Res: %d.", err); + return err; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID LINUXOS_Install_Hooks(void) + * @brief registers the profiling callbacks + * + * @param none + * + * @return none + * + * Special Notes: + * + * None + */ +VOID LINUXOS_Install_Hooks(void) +{ + int err = 0; + int err2 = 0; + + SEP_DRV_LOG_INIT_IN("Installing Linux OS Hooks."); + + if (hooks_installed == 1) { + SEP_DRV_LOG_INIT_OUT("The OS Hooks are already installed."); + return; + } + + linuxos_Map_Kernel_Modules(); + + err = profile_event_register(MY_UNMAP, &linuxos_exec_unmap_nb); + err2 = profile_event_register(MY_TASK, &linuxos_exit_task_nb); + if (err || err2) { + if (err == OS_NO_SYSCALL) { + SEP_DRV_LOG_WARNING( + "This kernel does not implement kernel profiling hooks..."); + SEP_DRV_LOG_WARNING( + "...task termination and image unloads will not be tracked..."); + SEP_DRV_LOG_WARNING("...during sampling session!"); + } + } + + if (multi_pebs_enabled || sched_switch_enabled) { + err = install_sched_switch_callback(); + if (err) { + SEP_DRV_LOG_WARNING( + "Failed to install sched_switch callback for multiple pebs."); + } + } + + hooks_installed = 1; + atomic_set(&hook_state, HOOK_FREE); + + SEP_DRV_LOG_INIT_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn int uninstall_sched_switch_callback(void) + * @brief unregisters sched_switch callbacks for PEBS sideband + * + * @param none + * + * @return 0 success else error number + * + * Special Notes: + * + * None + */ +static int uninstall_sched_switch_callback(void) +{ + int err = 0; + + SEP_DRV_LOG_TRACE_IN(""); + SEP_DRV_LOG_INIT("Uninstalling PEBS Linux OS Hooks."); + +#if defined(CONFIG_TRACEPOINTS) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) + if (!tp_sched_switch) { + err = -EIO; + SEP_DRV_LOG_INIT( + "Please check Linux is built w/ CONFIG_CONTEXT_SWITCH_TRACER."); + } else { + err = tracepoint_probe_unregister( + tp_sched_switch, (void *)record_pebs_process_info, + NULL); + } +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) + err = unregister_trace_sched_switch(record_pebs_process_info, NULL); +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) + err = unregister_trace_sched_switch(record_pebs_process_info); +#else + SEP_DRV_LOG_INIT( + "Please use Linux kernel version >= 2.6.28 to use multiple pebs."); + err = -1; +#endif + CONTROL_Invoke_Parallel(capture_sched_switch, NULL); +#endif + + SEP_DRV_LOG_TRACE_OUT("Res: %d.", err); + return err; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID LINUXOS_Uninstall_Hooks(void) + * @brief unregisters the profiling callbacks + * + * @param none + * + * @return + * + * Special Notes: + * + * None + */ +VOID LINUXOS_Uninstall_Hooks(void) +{ + int err = 0; + int value = 0; + int tries = 10; + + SEP_DRV_LOG_INIT_IN("Uninstalling Linux OS Hooks."); + + if (hooks_installed == 0) { + SEP_DRV_LOG_INIT_OUT("Hooks are not installed!"); + return; + } + + hooks_installed = 0; + profile_event_unregister(MY_UNMAP, &linuxos_exec_unmap_nb); + profile_event_unregister(MY_TASK, &linuxos_exit_task_nb); + + if (multi_pebs_enabled || sched_switch_enabled) { + err = uninstall_sched_switch_callback(); + if (err) { + SEP_DRV_LOG_WARNING( + "Failed to uninstall sched_switch callback for multiple pebs."); + } + } + + value = atomic_cmpxchg(&hook_state, HOOK_FREE, HOOK_UNINSTALL); + if ((value == HOOK_FREE) || + (value == HOOK_UNINSTALL)) { // already in free or uninstall state + SEP_DRV_LOG_INIT_OUT( + "Uninstall hook done (already in state %d).", value); + return; + } + atomic_add(HOOK_UNINSTALL, &hook_state); + while (tries) { + SYS_IO_Delay(); + SYS_IO_Delay(); + value = atomic_read(&hook_state); + if (value == HOOK_UNINSTALL) { + break; + } + tries--; + } + + SEP_DRV_LOG_INIT_OUT("Done -- state %d, tries %d.", value, tries); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn DRV_BOOL LINUXOS_Check_KVM_Guest_Process() + * + * @brief check the presence of kvm guest process + * + * @param none + * + * @return TRUE if the kvm guest process is running, FALSE if not + */ +DRV_BOOL LINUXOS_Check_KVM_Guest_Process(void) +{ + struct task_struct *p; + + SEP_DRV_LOG_TRACE_IN(""); + + FOR_EACH_TASK(p) + { + // if (p == NULL) { + // continue; + // } + + p->comm[TASK_COMM_LEN - 1] = + 0; // making sure there is a trailing 0 + + if (!strncmp(p->comm, "qemu-kvm", 8)) { + SEP_DRV_LOG_INIT_TRACE_OUT("TRUE (found qemu-kvm!)."); + return TRUE; + } + } + + SEP_DRV_LOG_TRACE_OUT("FALSE"); + return FALSE; +} diff --git a/drivers/platform/x86/sepdk/sep/lwpmudrv.c b/drivers/platform/x86/sepdk/sep/lwpmudrv.c new file mode 100644 index 0000000000000..6f6772433cce1 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/lwpmudrv.c @@ -0,0 +1,7877 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_version.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +#include +#else +#include +#endif +#include +#include +#include +#include +#include +#if defined(CONFIG_HYPERVISOR_GUEST) +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34) +#include +#endif +#endif +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32) +#include +#endif + +#if defined(CONFIG_XEN_HAVE_VPMU) +#include +#include +#include +#endif + +#include "lwpmudrv_types.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_ioctl.h" +#include "lwpmudrv_struct.h" +#include "inc/ecb_iterators.h" +#include "inc/unc_common.h" + +#if defined(BUILD_GFX) +#include "gfx.h" +#endif + +#if defined(BUILD_CHIPSET) +#include "lwpmudrv_chipset.h" +#endif +#include "pci.h" + +#include "apic.h" +#include "cpumon.h" +#include "lwpmudrv.h" +#include "utility.h" +#include "control.h" +#include "core2.h" +#include "pmi.h" + +#include "output.h" +#include "linuxos.h" +#include "sys_info.h" +#include "eventmux.h" +#include "pebs.h" + +MODULE_AUTHOR("Copyright(C) 2007-2018 Intel Corporation"); +MODULE_VERSION(SEP_NAME "_" SEP_VERSION_STR); +MODULE_LICENSE("Dual BSD/GPL"); + +static struct task_struct *abnormal_handler; + +typedef struct LWPMU_DEV_NODE_S LWPMU_DEV_NODE; +typedef LWPMU_DEV_NODE * LWPMU_DEV; + +struct LWPMU_DEV_NODE_S { + long buffer; + struct semaphore sem; + struct cdev cdev; +}; + +#define LWPMU_DEV_buffer(dev) ((dev)->buffer) +#define LWPMU_DEV_sem(dev) ((dev)->sem) +#define LWPMU_DEV_cdev(dev) ((dev)->cdev) + +/* Global variables of the driver */ +static SEP_VERSION_NODE drv_version; +U64 *read_counter_info; +U64 *prev_counter_data; +static U64 prev_counter_size; +VOID **desc_data; +U64 total_ram; +U32 output_buffer_size = OUTPUT_LARGE_BUFFER; +U32 saved_buffer_size; +static S32 desc_count; +uid_t uid; +DRV_CONFIG drv_cfg; +static DEV_CONFIG cur_pcfg; +volatile pid_t control_pid; +U64 *interrupt_counts; +static LWPMU_DEV lwpmu_control; +static LWPMU_DEV lwmod_control; +static LWPMU_DEV lwsamp_control; +static LWPMU_DEV lwsampunc_control; +static LWPMU_DEV lwsideband_control; +EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; + +/* needed for multiple devices (core/uncore) */ +U32 num_devices; +static U32 num_core_devs; +U32 cur_device; +LWPMU_DEVICE devices; +static U32 uncore_em_factor; +static unsigned long unc_timer_interval; +static struct timer_list *unc_read_timer; +static S32 max_groups_unc; +DRV_BOOL multi_pebs_enabled = FALSE; +DRV_BOOL unc_buf_init = FALSE; +DRV_BOOL NMI_mode = TRUE; +DRV_BOOL KVM_guest_mode = FALSE; +DRV_SETUP_INFO_NODE req_drv_setup_info; + +/* needed for target agent support */ +U32 osid = OS_ID_NATIVE; +DRV_BOOL sched_switch_enabled = FALSE; + +#if defined(DRV_SEP_ACRN_ON) +struct profiling_vm_info_list *vm_info_list; +static struct timer_list *buffer_read_timer; +static unsigned long buffer_timer_interval; +shared_buf_t **samp_buf_per_cpu; +#endif + +#define UNCORE_EM_GROUP_SWAP_FACTOR 100 +#define PMU_DEVICES 2 // pmu, mod + +extern U32 *cpu_built_sysinfo; + +#define DRV_DEVICE_DELIMITER "!" + +#if defined(DRV_USE_UNLOCKED_IOCTL) +static struct mutex ioctl_lock; +#endif + +#if defined(BUILD_CHIPSET) +CHIPSET_CONFIG pma; +CS_DISPATCH cs_dispatch; +#endif +static S8 *cpu_mask_bits; + +/* + * Global data: Buffer control structure + */ +BUFFER_DESC cpu_buf; +BUFFER_DESC unc_buf; +BUFFER_DESC module_buf; +BUFFER_DESC cpu_sideband_buf; + +static dev_t lwpmu_DevNum; /* the major and minor parts for SEP3 base */ +static dev_t lwsamp_DevNum; /* the major and minor parts for SEP3 percpu */ +static dev_t lwsampunc_DevNum; +/* the major and minor parts for SEP3 per package */ +static dev_t lwsideband_DevNum; + +static struct class *pmu_class; + +//extern volatile int config_done; + +CPU_STATE pcb; +static size_t pcb_size; +U32 *core_to_package_map; +U32 *core_to_phys_core_map; +U32 *core_to_thread_map; +U32 *core_to_dev_map; +U32 *threads_per_core; +U32 num_packages; +U64 *pmu_state; +U64 *cpu_tsc; +static U64 *prev_cpu_tsc; +static U64 *diff_cpu_tsc; +U64 *restore_bl_bypass; +U32 **restore_ha_direct2core; +U32 **restore_qpi_direct2core; +U32 *occupied_core_ids; +UNCORE_TOPOLOGY_INFO_NODE uncore_topology; +PLATFORM_TOPOLOGY_PROG_NODE platform_topology_prog_node; +static PLATFORM_TOPOLOGY_PROG_NODE req_platform_topology_prog_node; + +#if !defined(DRV_SEP_ACRN_ON) +static U8 *prev_set_CR4; +#endif + +wait_queue_head_t wait_exit; + +// extern OS_STATUS SOCPERF_Switch_Group3 (void); + +#if !defined(DRV_USE_UNLOCKED_IOCTL) +#define MUTEX_INIT(lock) +#define MUTEX_LOCK(lock) +#define MUTEX_UNLOCK(lock) +#else +#define MUTEX_INIT(lock) mutex_init(&(lock)); +#define MUTEX_LOCK(lock) mutex_lock(&(lock)) +#define MUTEX_UNLOCK(lock) mutex_unlock(&(lock)) +#endif + +#if defined(CONFIG_XEN_HAVE_VPMU) +typedef struct xen_pmu_params xen_pmu_params_t; +typedef struct xen_pmu_data xen_pmu_data_t; + +static DEFINE_PER_CPU(xen_pmu_data_t *, sep_xenpmu_shared); +#endif + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void lwpmudrv_PWR_Info(IOCTL_ARGS arg) + * + * @param arg - Pointer to the IOCTL structure + * + * @return OS_STATUS + * + * @brief Make a copy of the Power control information that has been passed in. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_PWR_Info(IOCTL_ARGS arg) +{ + SEP_DRV_LOG_FLOW_IN(""); + if (DEV_CONFIG_power_capture(cur_pcfg) == FALSE) { + SEP_DRV_LOG_WARNING_FLOW_OUT( + "'Success' (Power capture is disabled!)."); + return OS_SUCCESS; + } + + // make sure size of incoming arg is correct + if ((arg->len_usr_to_drv != sizeof(PWR_NODE)) || + (arg->buf_usr_to_drv == NULL)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "OS_FAULT (PWR capture has not been configured!)."); + return OS_FAULT; + } + + // + // First things first: Make a copy of the data for global use. + // + LWPMU_DEVICE_pwr(&devices[cur_device]) = + CONTROL_Allocate_Memory((int)arg->len_usr_to_drv); + if (!LWPMU_DEVICE_pwr(&devices[cur_device])) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory allocation failure!"); + return OS_NO_MEM; + } + + if (copy_from_user(LWPMU_DEVICE_pwr(&devices[cur_device]), + (void __user *)arg->buf_usr_to_drv, arg->len_usr_to_drv)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); + return OS_FAULT; + } + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* + * @fn void lwpmudrv_Allocate_Restore_Buffer + * + * @param + * + * @return OS_STATUS + * + * @brief allocate buffer space to save/restore the data (for JKT, QPILL and HA register) before collection + */ +static OS_STATUS lwpmudrv_Allocate_Restore_Buffer(void) +{ + int i = 0; + SEP_DRV_LOG_TRACE_IN(""); + + if (!restore_ha_direct2core) { + restore_ha_direct2core = CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U32 *)); + if (!restore_ha_direct2core) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Memory allocation failure for restore_ha_direct2core!"); + return OS_NO_MEM; + } + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + restore_ha_direct2core[i] = CONTROL_Allocate_Memory( + MAX_BUSNO * sizeof(U32)); + } + } + if (!restore_qpi_direct2core) { + restore_qpi_direct2core = CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U32 *)); + if (!restore_qpi_direct2core) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Memory allocation failure for restore_qpi_direct2core!"); + return OS_NO_MEM; + } + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + restore_qpi_direct2core[i] = CONTROL_Allocate_Memory( + 2 * MAX_BUSNO * sizeof(U32)); + } + } + if (!restore_bl_bypass) { + restore_bl_bypass = CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64)); + if (!restore_bl_bypass) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Memory allocation failure for restore_bl_bypass!"); + return OS_NO_MEM; + } + } + + SEP_DRV_LOG_TRACE_OUT("Success"); + return OS_SUCCESS; +} + +/* + * @fn void lwpmudrv_Allocate_Uncore_Buffer + * + * @param + * + * @return OS_STATUS + * + * @brief allocate buffer space for writing/reading uncore data + */ +static OS_STATUS lwpmudrv_Allocate_Uncore_Buffer(void) +{ + U32 i, j, k, l; + U32 max_entries = 0; + U32 num_entries; + ECB ecb; + + SEP_DRV_LOG_TRACE_IN( + ""); // this function is not checking memory allocations properly + + for (i = num_core_devs; i < num_devices; i++) { + if (!LWPMU_DEVICE_pcfg(&devices[i])) { + continue; + } + LWPMU_DEVICE_acc_value(&devices[i]) = + CONTROL_Allocate_Memory(num_packages * sizeof(U64 **)); + LWPMU_DEVICE_prev_value(&devices[i]) = + CONTROL_Allocate_Memory(num_packages * sizeof(U64 *)); + for (j = 0; j < num_packages; j++) { + // Allocate memory and zero out accumulator array (one per group) + LWPMU_DEVICE_acc_value(&devices[i])[j] = + CONTROL_Allocate_Memory( + LWPMU_DEVICE_em_groups_count( + &devices[i]) * + sizeof(U64 *)); + for (k = 0; + k < LWPMU_DEVICE_em_groups_count(&devices[i]); + k++) { + ecb = LWPMU_DEVICE_PMU_register_data( + &devices[i])[k]; + num_entries = + ECB_num_events(ecb) * + LWPMU_DEVICE_num_units(&devices[i]); + LWPMU_DEVICE_acc_value(&devices[i])[j][k] = + CONTROL_Allocate_Memory(num_entries * + sizeof(U64)); + for (l = 0; l < num_entries; l++) { + LWPMU_DEVICE_acc_value( + &devices[i])[j][k][l] = 0LL; + } + if (max_entries < num_entries) { + max_entries = num_entries; + } + } + // Allocate memory and zero out prev_value array (one across groups) + LWPMU_DEVICE_prev_value(&devices[i])[j] = + CONTROL_Allocate_Memory(max_entries * + sizeof(U64)); + for (k = 0; k < max_entries; k++) { + LWPMU_DEVICE_prev_value(&devices[i])[j][k] = + 0LL; + } + } + max_entries = 0; + } + + SEP_DRV_LOG_TRACE_OUT("Success"); + return OS_SUCCESS; +} + +/* + * @fn void lwpmudrv_Free_Uncore_Buffer + * + * @param + * + * @return OS_STATUS + * + * @brief Free uncore data buffers + */ +static OS_STATUS lwpmudrv_Free_Uncore_Buffer(U32 i) +{ + U32 j, k; + + SEP_DRV_LOG_TRACE_IN(""); + + if (LWPMU_DEVICE_prev_value(&devices[i])) { + for (j = 0; j < num_packages; j++) { + LWPMU_DEVICE_prev_value(&devices[i])[j] = + CONTROL_Free_Memory(LWPMU_DEVICE_prev_value( + &devices[i])[j]); + } + LWPMU_DEVICE_prev_value(&devices[i]) = CONTROL_Free_Memory( + LWPMU_DEVICE_prev_value(&devices[i])); + } + if (LWPMU_DEVICE_acc_value(&devices[i])) { + for (j = 0; j < num_packages; j++) { + if (LWPMU_DEVICE_acc_value(&devices[i])[j]) { + for (k = 0; k < LWPMU_DEVICE_em_groups_count( + &devices[i]); + k++) { + LWPMU_DEVICE_acc_value( + &devices[i])[j][k] = + CONTROL_Free_Memory( + LWPMU_DEVICE_acc_value( + &devices[i])[j] + [k]); + } + LWPMU_DEVICE_acc_value(&devices[i])[j] = + CONTROL_Free_Memory( + LWPMU_DEVICE_acc_value( + &devices[i])[j]); + } + } + LWPMU_DEVICE_acc_value(&devices[i]) = CONTROL_Free_Memory( + LWPMU_DEVICE_acc_value(&devices[i])); + } + + SEP_DRV_LOG_TRACE_OUT("Success"); + return OS_SUCCESS; +} + +/* + * @fn void lwpmudrv_Free_Restore_Buffer + * + * @param + * + * @return OS_STATUS + * + * @brief allocate buffer space to save/restore the data (for JKT, QPILL and HA register) before collection + */ +static OS_STATUS lwpmudrv_Free_Restore_Buffer(void) +{ + U32 i = 0; + + SEP_DRV_LOG_TRACE_IN(""); + + if (restore_ha_direct2core) { + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + restore_ha_direct2core[i] = + CONTROL_Free_Memory(restore_ha_direct2core[i]); + } + restore_ha_direct2core = + CONTROL_Free_Memory(restore_ha_direct2core); + } + if (restore_qpi_direct2core) { + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + restore_qpi_direct2core[i] = + CONTROL_Free_Memory(restore_qpi_direct2core[i]); + } + restore_qpi_direct2core = + CONTROL_Free_Memory(restore_qpi_direct2core); + } + if (restore_bl_bypass) { + restore_bl_bypass = CONTROL_Free_Memory(restore_bl_bypass); + } + + SEP_DRV_LOG_TRACE_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Initialize_State(void) + * + * @param none + * + * @return OS_STATUS + * + * @brief Allocates the memory needed at load time. Initializes all the + * @brief necessary state variables with the default values. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Initialize_State(void) +{ + S32 i, max_cpu_id = 0; + + SEP_DRV_LOG_INIT_IN(""); + + for_each_possible_cpu (i) { + if (cpu_present(i)) { + if (i > max_cpu_id) { + max_cpu_id = i; + } + } + } + max_cpu_id++; + + /* + * Machine Initializations + * Abstract this information away into a separate entry point + * + * Question: Should we allow for the use of Hot-cpu + * add/subtract functionality while the driver is executing? + */ + if (max_cpu_id > num_present_cpus()) { + GLOBAL_STATE_num_cpus(driver_state) = max_cpu_id; + } else { + GLOBAL_STATE_num_cpus(driver_state) = num_present_cpus(); + } + GLOBAL_STATE_active_cpus(driver_state) = num_online_cpus(); + GLOBAL_STATE_cpu_count(driver_state) = 0; + GLOBAL_STATE_dpc_count(driver_state) = 0; + GLOBAL_STATE_num_em_groups(driver_state) = 0; + CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_UNINITIALIZED); + + SEP_DRV_LOG_INIT_OUT("Success: num_cpus=%d, active_cpus=%d.", + GLOBAL_STATE_num_cpus(driver_state), + GLOBAL_STATE_active_cpus(driver_state)); + return OS_SUCCESS; +} + +#if !defined(CONFIG_PREEMPT_COUNT) && !defined(DRV_SEP_ACRN_ON) +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void lwpmudrv_Fill_TSC_Info (PVOID param) + * + * @param param - pointer the buffer to fill in. + * + * @return none + * + * @brief Read the TSC and write into the correct array slot. + * + * Special Notes + */ +atomic_t read_now; +static wait_queue_head_t read_tsc_now; +static VOID lwpmudrv_Fill_TSC_Info(PVOID param) +{ + U32 this_cpu; + + SEP_DRV_LOG_TRACE_IN(""); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + // + // Wait until all CPU's are ready to proceed + // This will serve as a synchronization point to compute tsc skews. + // + + if (atomic_read(&read_now) >= 1) { + if (atomic_dec_and_test(&read_now) == FALSE) { + wait_event_interruptible(read_tsc_now, + (atomic_read(&read_now) >= 1)); + } + } else { + wake_up_interruptible_all(&read_tsc_now); + } + UTILITY_Read_TSC(&cpu_tsc[this_cpu]); + SEP_DRV_LOG_TRACE("This cpu %d --- tsc --- 0x%llx.", this_cpu, + cpu_tsc[this_cpu]); + + SEP_DRV_LOG_TRACE_OUT("Success"); +} +#endif + +/********************************************************************* + * Internal Driver functions + * Should be called only from the lwpmudrv_DeviceControl routine + *********************************************************************/ + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void lwpmudrv_Dump_Tracer(const char *) + * + * @param Name of the tracer + * + * @return void + * + * @brief Function that handles the generation of markers into the ftrace stream + * + * Special Notes + */ +static void lwpmudrv_Dump_Tracer(const char *name, U64 tsc) +{ + SEP_DRV_LOG_TRACE_IN(""); + if (tsc == 0) { + preempt_disable(); + UTILITY_Read_TSC(&tsc); + tsc -= TSC_SKEW(CONTROL_THIS_CPU()); + preempt_enable(); + } + SEP_DRV_LOG_TRACE_OUT("Success"); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Version(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Local function that handles the LWPMU_IOCTL_VERSION call. + * @brief Returns the version number of the kernel mode sampling. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Version(IOCTL_ARGS arg) +{ + OS_STATUS status; + + SEP_DRV_LOG_FLOW_IN(""); + + // Check if enough space is provided for collecting the data + if ((arg->len_drv_to_usr != sizeof(U32)) || + (arg->buf_drv_to_usr == NULL)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); + return OS_FAULT; + } + + status = put_user(SEP_VERSION_NODE_sep_version(&drv_version), + (U32 __user *)arg->buf_drv_to_usr); + + SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Reserve(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief + * @brief Local function that handles the LWPMU_IOCTL_RESERVE call. + * @brief Sets the state to RESERVED if possible. Returns BUSY if unable + * @brief to reserve the PMU. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Reserve(IOCTL_ARGS arg) +{ + OS_STATUS status = OS_SUCCESS; + + SEP_DRV_LOG_FLOW_IN(""); + + // Check if enough space is provided for collecting the data + if ((arg->len_drv_to_usr != sizeof(S32)) || + (arg->buf_drv_to_usr == NULL)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); + return OS_FAULT; + } + + status = put_user(!CHANGE_DRIVER_STATE(STATE_BIT_UNINITIALIZED, + DRV_STATE_RESERVED), + (int __user*)arg->buf_drv_to_usr); + + SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); + return status; +} + +#if !defined(DRV_SEP_ACRN_ON) +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Finish_Op(void) + * + * @param - none + * + * @return OS_STATUS + * + * @brief Finalize PMU after collection + * + * Special Notes + */ +static VOID lwpmudrv_Finish_Op(PVOID param) +{ + U32 this_cpu = CONTROL_THIS_CPU(); + U32 dev_idx = core_to_dev_map[this_cpu]; + DISPATCH dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + + SEP_DRV_LOG_TRACE_IN(""); + + if (dispatch != NULL && dispatch->fini != NULL) { + dispatch->fini(&dev_idx); + } + + SEP_DRV_LOG_TRACE_OUT(""); +} +#endif + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static VOID lwpmudrv_Clean_Up(DRV_BOOL) + * + * @param DRV_BOOL finish - Flag to call finish + * + * @return VOID + * + * @brief Cleans up the memory allocation. + * + * Special Notes + */ +static VOID lwpmudrv_Clean_Up(DRV_BOOL finish) +{ + U32 i; + + SEP_DRV_LOG_FLOW_IN(""); + + if (DRV_CONFIG_use_pcl(drv_cfg) == TRUE) { + drv_cfg = CONTROL_Free_Memory(drv_cfg); + goto signal_end; + } + + if (devices) { + U32 id; + U32 num_groups = 0; + EVENT_CONFIG ec; + DISPATCH dispatch_unc = NULL; + + for (id = 0; id < num_devices; id++) { + if (LWPMU_DEVICE_pcfg(&devices[id])) { + if (LWPMU_DEVICE_device_type(&devices[id]) == + DEVICE_INFO_UNCORE) { + dispatch_unc = LWPMU_DEVICE_dispatch( + &devices[id]); + if (dispatch_unc && + dispatch_unc->fini) { + SEP_DRV_LOG_TRACE( + "LWP: calling UNC Init."); + dispatch_unc->fini( + (PVOID *)&id); + } + lwpmudrv_Free_Uncore_Buffer(id); + } else if (finish) { +#if !defined(DRV_SEP_ACRN_ON) + CONTROL_Invoke_Parallel( + lwpmudrv_Finish_Op, NULL); +#endif + } + } + + if (LWPMU_DEVICE_PMU_register_data(&devices[id])) { + ec = LWPMU_DEVICE_ec(&devices[id]); + if (LWPMU_DEVICE_device_type(&devices[id]) == + DEVICE_INFO_CORE) { + num_groups = + EVENT_CONFIG_num_groups(ec); + } else { + num_groups = + EVENT_CONFIG_num_groups_unc(ec); + } + for (i = 0; i < num_groups; i++) { + LWPMU_DEVICE_PMU_register_data( + &devices[id])[i] = + CONTROL_Free_Memory( + LWPMU_DEVICE_PMU_register_data( + &devices[id])[i]); + } + LWPMU_DEVICE_PMU_register_data(&devices[id]) = + CONTROL_Free_Memory( + LWPMU_DEVICE_PMU_register_data( + &devices[id])); + } + LWPMU_DEVICE_pcfg(&devices[id]) = CONTROL_Free_Memory( + LWPMU_DEVICE_pcfg(&devices[id])); + LWPMU_DEVICE_ec(&devices[id]) = CONTROL_Free_Memory( + LWPMU_DEVICE_ec(&devices[id])); + if (LWPMU_DEVICE_lbr(&devices[id])) { + LWPMU_DEVICE_lbr(&devices[id]) = + CONTROL_Free_Memory( + LWPMU_DEVICE_lbr(&devices[id])); + } + if (LWPMU_DEVICE_pwr(&devices[id])) { + LWPMU_DEVICE_pwr(&devices[id]) = + CONTROL_Free_Memory( + LWPMU_DEVICE_pwr(&devices[id])); + } + if (LWPMU_DEVICE_cur_group(&devices[id])) { + LWPMU_DEVICE_cur_group(&devices[id]) = + CONTROL_Free_Memory( + LWPMU_DEVICE_cur_group( + &devices[id])); + } + } + devices = CONTROL_Free_Memory(devices); + } + + if (desc_data) { + for (i = 0; i < GLOBAL_STATE_num_descriptors(driver_state); + i++) { + desc_data[i] = CONTROL_Free_Memory(desc_data[i]); + } + desc_data = CONTROL_Free_Memory(desc_data); + } + + if (restore_bl_bypass) { + restore_bl_bypass = CONTROL_Free_Memory(restore_bl_bypass); + } + + if (restore_qpi_direct2core) { + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + restore_qpi_direct2core[i] = + CONTROL_Free_Memory(restore_qpi_direct2core[i]); + } + restore_qpi_direct2core = + CONTROL_Free_Memory(restore_qpi_direct2core); + } + + if (restore_ha_direct2core) { + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + restore_ha_direct2core[i] = + CONTROL_Free_Memory(restore_ha_direct2core[i]); + } + restore_ha_direct2core = + CONTROL_Free_Memory(restore_ha_direct2core); + } + + drv_cfg = CONTROL_Free_Memory(drv_cfg); + pmu_state = CONTROL_Free_Memory(pmu_state); + cpu_mask_bits = CONTROL_Free_Memory(cpu_mask_bits); + core_to_dev_map = CONTROL_Free_Memory(core_to_dev_map); +#if defined(BUILD_CHIPSET) + pma = CONTROL_Free_Memory(pma); +#endif + +signal_end: + GLOBAL_STATE_num_em_groups(driver_state) = 0; + GLOBAL_STATE_num_descriptors(driver_state) = 0; + num_devices = 0; + num_core_devs = 0; + max_groups_unc = 0; + control_pid = 0; + unc_buf_init = FALSE; + + OUTPUT_Cleanup(); + memset(pcb, 0, pcb_size); + + SEP_DRV_LOG_FLOW_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static NTSTATUS lwpmudrv_Initialize_Driver (PVOID buf_usr_to_drv, size_t len_usr_to_drv) + * + * @param buf_usr_to_drv - pointer to the input buffer + * @param len_usr_to_drv - size of the input buffer + * + * @return NTSTATUS + * + * @brief Local function that handles the LWPMU_IOCTL_INIT_DRIVER call. + * @brief Sets up the interrupt handler. + * @brief Set up the output buffers/files needed to make the driver + * @brief operational. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Initialize_Driver(PVOID buf_usr_to_drv, + size_t len_usr_to_drv) +{ + S32 cpu_num; + int status = OS_SUCCESS; + + SEP_DRV_LOG_FLOW_IN(""); + + if (buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); + return OS_FAULT; + } + + if (!CHANGE_DRIVER_STATE(STATE_BIT_RESERVED, DRV_STATE_IDLE)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Unexpected driver state!"); + return OS_FAULT; + } + + interrupt_counts = NULL; + pmu_state = NULL; + + drv_cfg = CONTROL_Allocate_Memory(len_usr_to_drv); + if (!drv_cfg) { + status = OS_NO_MEM; + SEP_DRV_LOG_ERROR("Memory allocation failure for drv_cfg!"); + goto clean_return; + } + + if (copy_from_user(drv_cfg, (void __user *)buf_usr_to_drv, + len_usr_to_drv)) { + SEP_DRV_LOG_ERROR("Memory copy failure for drv_cfg!"); + status = OS_FAULT; + goto clean_return; + } + + if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { +#if (defined(DRV_EM64T)) + if (output_buffer_size == OUTPUT_LARGE_BUFFER) { + output_buffer_size = OUTPUT_CP_BUFFER; + } +#endif + interrupt_counts = CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * + DRV_CONFIG_num_events(drv_cfg) * sizeof(U64)); + if (interrupt_counts == NULL) { + SEP_DRV_LOG_ERROR( + "Memory allocation failure for interrupt_counts!"); + status = OS_NO_MEM; + goto clean_return; + } + } else if (output_buffer_size == OUTPUT_CP_BUFFER) { + output_buffer_size = OUTPUT_LARGE_BUFFER; + } + + if (DRV_CONFIG_use_pcl(drv_cfg) == TRUE) { + SEP_DRV_LOG_FLOW_OUT("Success, using PCL."); + return OS_SUCCESS; + } + + pmu_state = CONTROL_Allocate_KMemory( + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64) * 3); + if (!pmu_state) { + SEP_DRV_LOG_ERROR("Memory allocation failure for pmu_state!"); + status = OS_NO_MEM; + goto clean_return; + } + uncore_em_factor = 0; + for (cpu_num = 0; cpu_num < GLOBAL_STATE_num_cpus(driver_state); + cpu_num++) { + CPU_STATE_accept_interrupt(&pcb[cpu_num]) = 1; + CPU_STATE_initial_mask(&pcb[cpu_num]) = 1; + CPU_STATE_group_swap(&pcb[cpu_num]) = 1; + CPU_STATE_reset_mask(&pcb[cpu_num]) = 0; + CPU_STATE_num_samples(&pcb[cpu_num]) = 0; + CPU_STATE_last_p_state_valid(&pcb[cpu_num]) = FALSE; +#if defined(DRV_CPU_HOTPLUG) + CPU_STATE_offlined(&pcb[cpu_num]) = TRUE; +#else + CPU_STATE_offlined(&pcb[cpu_num]) = FALSE; +#endif + CPU_STATE_nmi_handled(&pcb[cpu_num]) = 0; + } + + DRV_CONFIG_seed_name(drv_cfg) = NULL; + DRV_CONFIG_seed_name_len(drv_cfg) = 0; + + SEP_DRV_LOG_TRACE("Config : size = %d.", DRV_CONFIG_size(drv_cfg)); + SEP_DRV_LOG_TRACE("Config : counting_mode = %d.", + DRV_CONFIG_counting_mode(drv_cfg)); + + control_pid = current->pid; + SEP_DRV_LOG_TRACE("Control PID = %d.", control_pid); + + if (core_to_dev_map == NULL) { + core_to_dev_map = CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U32)); + } + + if (DRV_CONFIG_counting_mode(drv_cfg) == FALSE) { + if (cpu_buf == NULL) { + cpu_buf = CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * + sizeof(BUFFER_DESC_NODE)); + if (!cpu_buf) { + SEP_DRV_LOG_ERROR( + "Memory allocation failure for cpu_buf!"); + status = OS_NO_MEM; + goto clean_return; + } + } + + if (module_buf == NULL) { + module_buf = CONTROL_Allocate_Memory( + sizeof(BUFFER_DESC_NODE)); + if (!module_buf) { + status = OS_NO_MEM; + goto clean_return; + } + } + +#if defined(CONFIG_TRACEPOINTS) + multi_pebs_enabled = (DRV_CONFIG_multi_pebs_enabled(drv_cfg) && + (DRV_SETUP_INFO_page_table_isolation( + &req_drv_setup_info) == + DRV_SETUP_INFO_PTI_DISABLED)); +#endif + if (multi_pebs_enabled || sched_switch_enabled) { + if (cpu_sideband_buf == NULL) { + cpu_sideband_buf = CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * + sizeof(BUFFER_DESC_NODE)); + if (!cpu_sideband_buf) { + SEP_DRV_LOG_ERROR( + "Memory allocation failure for cpu_sideband_buf!"); + status = OS_NO_MEM; + goto clean_return; + } + } + } + +#if defined(DRV_SEP_ACRN_ON) + if (samp_buf_per_cpu == NULL) { + samp_buf_per_cpu = + (shared_buf_t **)CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * + sizeof(shared_buf_t *)); + if (!samp_buf_per_cpu) { + SEP_PRINT_ERROR( + "lwpmudrv_Initialize: unable to allocate memory for samp_buf_per_cpu\n"); + goto clean_return; + } + } + + for (cpu_num = 0; cpu_num < GLOBAL_STATE_num_cpus(driver_state); + cpu_num++) { + samp_buf_per_cpu[cpu_num] = sbuf_allocate( + TRACE_ELEMENT_NUM, TRACE_ELEMENT_SIZE); + if (!samp_buf_per_cpu[cpu_num]) { + pr_err("Failed to allocate sampbuf on cpu%d\n", + cpu_num); + goto clean_return; + } + + status = sbuf_share_setup(cpu_num, ACRN_SEP, + samp_buf_per_cpu[cpu_num]); + if (status < 0) { + status = OS_FAULT; + pr_err("Failed to set up sampbuf on cpu%d\n", + cpu_num); + goto clean_return; + } + } +#endif + + /* + * Allocate the output and control buffers for each CPU in the system + * Allocate and set up the temp output files for each CPU in the system + * Allocate and set up the temp outout file for detailing the Modules in the system + */ + status = OUTPUT_Initialize(); + if (status != OS_SUCCESS) { + SEP_DRV_LOG_ERROR("OUTPUT_Initialize failed!"); + goto clean_return; + } + + /* + * Program the APIC and set up the interrupt handler + */ +#if !defined(DRV_SEP_ACRN_ON) + CPUMON_Install_Cpuhooks(); +#endif + SEP_DRV_LOG_TRACE("Finished Installing cpu hooks."); +#if defined(DRV_CPU_HOTPLUG) + for (cpu_num = 0; cpu_num < GLOBAL_STATE_num_cpus(driver_state); + cpu_num++) { + if (cpu_built_sysinfo && + cpu_built_sysinfo[cpu_num] == 0) { + cpu_tsc[cpu_num] = cpu_tsc[0]; + CONTROL_Invoke_Cpu(cpu_num, SYS_INFO_Build_Cpu, + NULL); + } + } +#endif + +#if defined(DRV_EM64T) + SYS_Get_GDT_Base((PVOID *)&gdt_desc); +#endif + SEP_DRV_LOG_TRACE("About to install module notification."); + LINUXOS_Install_Hooks(); + } + +clean_return: + if (status != OS_SUCCESS) { + drv_cfg = CONTROL_Free_Memory(drv_cfg); + interrupt_counts = CONTROL_Free_Memory(interrupt_counts); + pmu_state = CONTROL_Free_Memory(pmu_state); + CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); + } + + SEP_DRV_LOG_FLOW_OUT("Return value: %d.", status); + return status; +} +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Initialize (PVOID buf_usr_to_drv, size_t len_usr_to_drv) + * + * @param buf_usr_to_drv - pointer to the input buffer + * @param len_usr_to_drv - size of the input buffer + * + * @return OS_STATUS + * + * @brief Local function that handles the LWPMU_IOCTL_INIT call. + * @brief Sets up the interrupt handler. + * @brief Set up the output buffers/files needed to make the driver + * @brief operational. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Initialize(PVOID buf_usr_to_drv, + size_t len_usr_to_drv) +{ + int status = OS_SUCCESS; + S32 cpu_num; + + SEP_DRV_LOG_FLOW_IN(""); + + if (buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); + return OS_FAULT; + } + + if (cur_device >= num_devices) { + CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "No more devices to allocate! Wrong lwpmudrv_Init_Num_Devices."); + return OS_FAULT; + } + + /* + * Program State Initializations + */ + LWPMU_DEVICE_pcfg(&devices[cur_device]) = + CONTROL_Allocate_Memory(len_usr_to_drv); + if (!LWPMU_DEVICE_pcfg(&devices[cur_device])) { + status = OS_NO_MEM; + SEP_DRV_LOG_ERROR("Memory allocation failure for pcfg!"); + goto clean_return; + } + + if (copy_from_user(LWPMU_DEVICE_pcfg(&devices[cur_device]), + (void __user *)buf_usr_to_drv, len_usr_to_drv)) { + SEP_DRV_LOG_ERROR("Memory copy failure for pcfg!"); + status = OS_FAULT; + goto clean_return; + } + cur_pcfg = (DEV_CONFIG)LWPMU_DEVICE_pcfg(&devices[cur_device]); + + if (DRV_CONFIG_use_pcl(drv_cfg) == TRUE) { + SEP_DRV_LOG_FLOW_OUT("Success, using PCL."); + return OS_SUCCESS; + } + + LWPMU_DEVICE_dispatch(&devices[cur_device]) = + UTILITY_Configure_CPU(DEV_CONFIG_dispatch_id(cur_pcfg)); + if (LWPMU_DEVICE_dispatch(&devices[cur_device]) == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Dispatch pointer is NULL!"); + status = OS_INVALID; + goto clean_return; + } + +#if !defined(DRV_SEP_ACRN_ON) + if (DRV_CONFIG_counting_mode(drv_cfg) == FALSE) { + status = PEBS_Initialize(cur_device); + if (status != OS_SUCCESS) { + SEP_DRV_LOG_ERROR("PEBS_Initialize failed!"); + goto clean_return; + } + } +#endif + + /* Create core to device ID map */ + for (cpu_num = 0; cpu_num < GLOBAL_STATE_num_cpus(driver_state); + cpu_num++) { + if (CPU_STATE_core_type(&pcb[cpu_num]) == + DEV_CONFIG_core_type(cur_pcfg)) { + core_to_dev_map[cpu_num] = cur_device; + } + } + num_core_devs++; //New core device + LWPMU_DEVICE_device_type(&devices[cur_device]) = DEVICE_INFO_CORE; + +clean_return: + if (status != OS_SUCCESS) { + // release all memory allocated in this function: + lwpmudrv_Clean_Up(FALSE); +#if !defined(DRV_SEP_ACRN_ON) + PEBS_Destroy(); +#endif + CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); + } + + SEP_DRV_LOG_FLOW_OUT("Return value: %d.", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Initialize_Num_Devices(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief + * @brief Local function that handles the LWPMU_IOCTL_INIT_NUM_DEV call. + * @brief Init # uncore devices. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Initialize_Num_Devices(IOCTL_ARGS arg) +{ + SEP_DRV_LOG_FLOW_IN(""); + + // Check if enough space is provided for collecting the data + if ((arg->len_usr_to_drv != sizeof(U32)) || + (arg->buf_usr_to_drv == NULL)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); + return OS_FAULT; + } + + if (copy_from_user(&num_devices, (void __user *)arg->buf_usr_to_drv, + arg->len_usr_to_drv)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure"); + return OS_FAULT; + } + /* + * Allocate memory for number of devices + */ + if (num_devices != 0) { + devices = CONTROL_Allocate_Memory(num_devices * + sizeof(LWPMU_DEVICE_NODE)); + if (!devices) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Unable to allocate memory for devices!"); + return OS_NO_MEM; + } + } + cur_device = 0; + + SEP_DRV_LOG_FLOW_OUT("Success: num_devices=%d, devices=0x%p.", + num_devices, devices); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Initialize_UNC(PVOID buf_usr_to_drv, U32 len_usr_to_drv) + * + * @param buf_usr_to_drv - pointer to the input buffer + * @param len_usr_to_drv - size of the input buffer + * + * @return OS_STATUS + * + * @brief Local function that handles the LWPMU_IOCTL_INIT call. + * @brief Sets up the interrupt handler. + * @brief Set up the output buffers/files needed to make the driver + * @brief operational. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Initialize_UNC(PVOID buf_usr_to_drv, + U32 len_usr_to_drv) +{ + DEV_UNC_CONFIG pcfg_unc; + U32 i; + int status = OS_SUCCESS; + + SEP_DRV_LOG_FLOW_IN(""); + + if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Skipped: current state is not IDLE."); + return OS_IN_PROGRESS; + } + + if (!devices) { + CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); + SEP_DRV_LOG_ERROR_FLOW_OUT("No devices allocated!"); + return OS_INVALID; + } + + /* + * Program State Initializations: + * Foreach device, copy over pcfg and configure dispatch table + */ + if (cur_device >= num_devices) { + CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "No more devices to allocate! Wrong lwpmudrv_Init_Num_Devices."); + return OS_FAULT; + } + if (buf_usr_to_drv == NULL) { + CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); + return OS_FAULT; + } + if (len_usr_to_drv != sizeof(DEV_UNC_CONFIG_NODE)) { + CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Got len_usr_to_drv=%d, expecting size=%d", + len_usr_to_drv, (int)sizeof(DEV_UNC_CONFIG_NODE)); + return OS_FAULT; + } + // allocate memory + LWPMU_DEVICE_pcfg(&devices[cur_device]) = + CONTROL_Allocate_Memory(sizeof(DEV_UNC_CONFIG_NODE)); + // copy over pcfg + if (copy_from_user(LWPMU_DEVICE_pcfg(&devices[cur_device]), + (void __user *)buf_usr_to_drv, len_usr_to_drv)) { + CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); + SEP_DRV_LOG_ERROR_FLOW_OUT("Failed to copy from user!"); + return OS_FAULT; + } + // configure dispatch from dispatch_id + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[cur_device]); + if (!pcfg_unc) { + CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid pcfg_unc."); + return OS_INVALID; + } + + LWPMU_DEVICE_dispatch(&devices[cur_device]) = + UTILITY_Configure_CPU(DEV_UNC_CONFIG_dispatch_id(pcfg_unc)); + if (LWPMU_DEVICE_dispatch(&devices[cur_device]) == NULL) { + CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); + SEP_DRV_LOG_ERROR_FLOW_OUT("Unable to configure CPU!"); + return OS_FAULT; + } + + LWPMU_DEVICE_cur_group(&devices[cur_device]) = + CONTROL_Allocate_Memory(num_packages * sizeof(S32)); + if (LWPMU_DEVICE_cur_group(&devices[cur_device]) == NULL) { + CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Cur_grp allocation failed for device %u!", cur_device); + return OS_NO_MEM; + } + for (i = 0; i < num_packages; i++) { + LWPMU_DEVICE_cur_group(&devices[cur_device])[i] = 0; + } + + LWPMU_DEVICE_em_groups_count(&devices[cur_device]) = 0; + LWPMU_DEVICE_num_units(&devices[cur_device]) = 0; + LWPMU_DEVICE_device_type(&devices[cur_device]) = DEVICE_INFO_UNCORE; + + if (DRV_CONFIG_counting_mode(drv_cfg) == FALSE) { + if (unc_buf == NULL) { + unc_buf = CONTROL_Allocate_Memory( + num_packages * sizeof(BUFFER_DESC_NODE)); + if (!unc_buf) { + CHANGE_DRIVER_STATE(STATE_BIT_ANY, + DRV_STATE_TERMINATING); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure."); + return OS_NO_MEM; + } + } + + if (!unc_buf_init) { + status = OUTPUT_Initialize_UNC(); + if (status != OS_SUCCESS) { + CHANGE_DRIVER_STATE(STATE_BIT_ANY, + DRV_STATE_TERMINATING); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "OUTPUT_Initialize failed!"); + return status; + } + unc_buf_init = TRUE; + } + } + + SEP_DRV_LOG_FLOW_OUT("unc dispatch id = %d.", + DEV_UNC_CONFIG_dispatch_id(pcfg_unc)); + + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Terminate(void) + * + * @param none + * + * @return OS_STATUS + * + * @brief Local function that handles the DRV_OPERATION_TERMINATE call. + * @brief Cleans up the interrupt handler and resets the PMU state. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Terminate(void) +{ + SEP_DRV_LOG_FLOW_IN(""); + + if (GET_DRIVER_STATE() == DRV_STATE_UNINITIALIZED) { + SEP_DRV_LOG_FLOW_OUT("Success (already uninitialized)."); + return OS_SUCCESS; + } + + if (!CHANGE_DRIVER_STATE(STATE_BIT_STOPPED | STATE_BIT_TERMINATING, + DRV_STATE_UNINITIALIZED)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Unexpected state!"); + return OS_FAULT; + } + + if (drv_cfg && DRV_CONFIG_counting_mode(drv_cfg) == FALSE) { + LINUXOS_Uninstall_Hooks(); + } + + lwpmudrv_Clean_Up(TRUE); + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void lwpmudrv_Switch_To_Next_Group(param) + * + * @param none + * + * @return none + * + * @brief Switch to the next event group for both core and uncore. + * @brief This function assumes an active collection is frozen + * @brief or no collection is active. + * + * Special Notes + */ +static VOID lwpmudrv_Switch_To_Next_Group(void) +{ + S32 cpuid; + U32 i, j; + CPU_STATE pcpu; + EVENT_CONFIG ec; + DEV_UNC_CONFIG pcfg_unc; + DISPATCH dispatch_unc; + ECB pecb_unc = NULL; + U32 cur_grp = 0; + + SEP_DRV_LOG_FLOW_IN(""); + + for (cpuid = 0; cpuid < GLOBAL_STATE_num_cpus(driver_state); cpuid++) { + pcpu = &pcb[cpuid]; + ec = (EVENT_CONFIG)LWPMU_DEVICE_ec( + &devices[core_to_dev_map[cpuid]]); + CPU_STATE_current_group(pcpu)++; + // make the event group list circular + CPU_STATE_current_group(pcpu) %= EVENT_CONFIG_num_groups(ec); + } + + if (num_devices) { + for (i = num_core_devs; i < num_devices; i++) { + pcfg_unc = LWPMU_DEVICE_pcfg(&devices[i]); + dispatch_unc = LWPMU_DEVICE_dispatch(&devices[i]); + if (LWPMU_DEVICE_em_groups_count(&devices[i]) > 1) { + if (pcb && pcfg_unc && dispatch_unc && + DRV_CONFIG_emon_mode(drv_cfg)) { + for (j = 0; j < num_packages; j++) { + cur_grp = + LWPMU_DEVICE_cur_group( + &devices[i])[j]; + pecb_unc = + LWPMU_DEVICE_PMU_register_data( + &devices[i]) + [cur_grp]; + LWPMU_DEVICE_cur_group( + &devices[i])[j]++; + if (CPU_STATE_current_group( + &pcb[0]) == 0) { + LWPMU_DEVICE_cur_group( + &devices[i])[j] = + 0; + } + LWPMU_DEVICE_cur_group( + &devices[i])[j] %= + LWPMU_DEVICE_em_groups_count( + &devices[i]); + } + SEP_DRV_LOG_TRACE( + "Swap Group to %d for device %d.", + cur_grp, i); + if (pecb_unc && + ECB_device_type(pecb_unc) == + DEVICE_UNC_SOCPERF) { + // SOCPERF_Switch_Group3(); + } + } + } + } + } + + SEP_DRV_LOG_FLOW_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwmpudrv_Get_Driver_State(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Local function that handles the LWPMU_IOCTL_GET_Driver_State call. + * @brief Returns the current driver state. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Get_Driver_State(IOCTL_ARGS arg) +{ + OS_STATUS status = OS_SUCCESS; + + SEP_DRV_LOG_TRACE_IN(""); + + // Check if enough space is provided for collecting the data + if ((arg->len_drv_to_usr != sizeof(U32)) || + (arg->buf_drv_to_usr == NULL)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Invalid arguments!"); + return OS_FAULT; + } + + status = put_user(GET_DRIVER_STATE(), (U32 __user*)arg->buf_drv_to_usr); + + SEP_DRV_LOG_TRACE_OUT("Return value: %d.", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Pause_Uncore(void) + * + * @param - 1 if switching group, 0 otherwise + * + * @return OS_STATUS + * + * @brief Pause the uncore collection + * + * Special Notes + */ +static VOID lwpmudrv_Pause_Uncore(PVOID param) +{ + U32 i; + U32 switch_grp; + DEV_UNC_CONFIG pcfg_unc = NULL; + DISPATCH dispatch_unc = NULL; + + SEP_DRV_LOG_TRACE_IN(""); + + switch_grp = *((U32 *)param); + + for (i = num_core_devs; i < num_devices; i++) { + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[i]); + dispatch_unc = LWPMU_DEVICE_dispatch(&devices[i]); + + if (pcfg_unc && dispatch_unc && dispatch_unc->freeze) { + SEP_DRV_LOG_TRACE("LWP: calling UNC Pause."); + if (switch_grp) { + if (LWPMU_DEVICE_em_groups_count(&devices[i]) > + 1) { + dispatch_unc->freeze(&i); + } + } else { + dispatch_unc->freeze(&i); + } + } + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +#if !defined(DRV_SEP_ACRN_ON) +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Pause_Op(void) + * + * @param - none + * + * @return OS_STATUS + * + * @brief Pause the core/uncore collection + * + * Special Notes + */ +static VOID lwpmudrv_Pause_Op(PVOID param) +{ + U32 dev_idx; + DISPATCH dispatch; + U32 switch_grp = 0; + U32 this_cpu = CONTROL_THIS_CPU(); + + dev_idx = core_to_dev_map[this_cpu]; + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + + SEP_DRV_LOG_TRACE_IN(""); + + if (dispatch != NULL && dispatch->freeze != NULL && + DRV_CONFIG_use_pcl(drv_cfg) == FALSE) { + dispatch->freeze(param); + } + + lwpmudrv_Pause_Uncore((PVOID)&switch_grp); + + SEP_DRV_LOG_TRACE_OUT(""); +} +#endif + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Pause(void) + * + * @param - none + * + * @return OS_STATUS + * + * @brief Pause the collection + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Pause(void) +{ + int i; + int done = FALSE; + + SEP_DRV_LOG_FLOW_IN(""); + + if (!pcb || !drv_cfg) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Pcb or drv_cfg pointer is NULL!"); + return OS_INVALID; + } + + if (CHANGE_DRIVER_STATE(STATE_BIT_RUNNING, DRV_STATE_PAUSING)) { + if (DRV_CONFIG_use_pcl(drv_cfg) == FALSE) { + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); + i++) { + CPU_STATE_accept_interrupt(&pcb[i]) = 0; + } + while (!done) { + done = TRUE; + for (i = 0; + i < GLOBAL_STATE_num_cpus(driver_state); + i++) { + if (atomic_read(&CPU_STATE_in_interrupt( + &pcb[i]))) { + done = FALSE; + } + } + } + } +#if !defined(DRV_SEP_ACRN_ON) + CONTROL_Invoke_Parallel(lwpmudrv_Pause_Op, NULL); +#endif + /* + * This means that the PAUSE state has been reached. + */ + CHANGE_DRIVER_STATE(STATE_BIT_PAUSING, DRV_STATE_PAUSED); + } + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Resume_Uncore(void) + * + * @param - 1 if switching group, 0 otherwise + * + * @return OS_STATUS + * + * @brief Resume the uncore collection + * + * Special Notes + */ +static VOID lwpmudrv_Resume_Uncore(PVOID param) +{ + U32 i; + U32 switch_grp; + DEV_UNC_CONFIG pcfg_unc = NULL; + DISPATCH dispatch_unc = NULL; + + SEP_DRV_LOG_TRACE_IN(""); + + switch_grp = *((U32 *)param); + + for (i = num_core_devs; i < num_devices; i++) { + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[i]); + dispatch_unc = LWPMU_DEVICE_dispatch(&devices[i]); + + if (pcfg_unc && dispatch_unc && dispatch_unc->restart) { + SEP_DRV_LOG_TRACE("LWP: calling UNC Resume."); + if (switch_grp) { + if (LWPMU_DEVICE_em_groups_count(&devices[i]) > + 1) { + dispatch_unc->restart(&i); + } + } else { + dispatch_unc->restart(&i); + } + } + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +#if !defined(DRV_SEP_ACRN_ON) +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Resume_Op(void) + * + * @param - none + * + * @return OS_STATUS + * + * @brief Resume the core/uncore collection + * + * Special Notes + */ +static VOID lwpmudrv_Resume_Op(PVOID param) +{ + U32 this_cpu = CONTROL_THIS_CPU(); + U32 dev_idx = core_to_dev_map[this_cpu]; + DISPATCH dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + U32 switch_grp = 0; + + SEP_DRV_LOG_TRACE_IN(""); + + if (dispatch != NULL && dispatch->restart != NULL && + DRV_CONFIG_use_pcl(drv_cfg) == FALSE) { + dispatch->restart((VOID *)(size_t)0); + } + + lwpmudrv_Resume_Uncore((PVOID)&switch_grp); + + SEP_DRV_LOG_TRACE_OUT(""); +} +#endif + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Resume(void) + * + * @param - none + * + * @return OS_STATUS + * + * @brief Resume the collection + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Resume(void) +{ + int i; + + SEP_DRV_LOG_FLOW_IN(""); + + if (!pcb || !drv_cfg) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Pcb or drv_cfg pointer is NULL!"); + return OS_INVALID; + } + + /* + * If we are in the process of pausing sampling, wait until the pause has been + * completed. Then start the Resume process. + */ + while (GET_DRIVER_STATE() == DRV_STATE_PAUSING) { + /* + * This delay probably needs to be expanded a little bit more for large systems. + * For now, it is probably sufficient. + */ + SYS_IO_Delay(); + SYS_IO_Delay(); + } + + if (CHANGE_DRIVER_STATE(STATE_BIT_PAUSED, DRV_STATE_RUNNING)) { + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + if (cpu_mask_bits) { + CPU_STATE_accept_interrupt(&pcb[i]) = + cpu_mask_bits[i] ? 1 : 0; + CPU_STATE_group_swap(&pcb[i]) = 1; + } else { + CPU_STATE_accept_interrupt(&pcb[i]) = 1; + CPU_STATE_group_swap(&pcb[i]) = 1; + } + } +#if !defined(DRV_SEP_ACRN_ON) + CONTROL_Invoke_Parallel(lwpmudrv_Resume_Op, NULL); +#endif + } + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Write_Uncore(void) + * + * @param - 1 if switching group, 0 otherwise + * + * @return OS_STATUS + * + * @brief Program the uncore collection + * + * Special Notes + */ +static VOID lwpmudrv_Write_Uncore(PVOID param) +{ + U32 i; + U32 switch_grp; + DEV_UNC_CONFIG pcfg_unc = NULL; + DISPATCH dispatch_unc = NULL; + + SEP_DRV_LOG_TRACE_IN(""); + + switch_grp = *((U32 *)param); + + for (i = num_core_devs; i < num_devices; i++) { + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[i]); + dispatch_unc = LWPMU_DEVICE_dispatch(&devices[i]); + + if (pcfg_unc && dispatch_unc && dispatch_unc->write) { + SEP_DRV_LOG_TRACE("LWP: calling UNC Write."); + if (switch_grp) { + if (LWPMU_DEVICE_em_groups_count(&devices[i]) > + 1) { + dispatch_unc->write(&i); + } + } else { + dispatch_unc->write(&i); + } + } + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Write_Op(void) + * + * @param - Do operation for Core only + * + * @return OS_STATUS + * + * @brief Program the core/uncore collection + * + * Special Notes + */ +static VOID lwpmudrv_Write_Op(PVOID param) +{ + U32 this_cpu; + U32 dev_idx; + DISPATCH dispatch; + U32 switch_grp = 0; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + + if (dispatch != NULL && dispatch->write != NULL) { + dispatch->write((VOID *)(size_t)0); + } + + if (param == NULL) { + lwpmudrv_Write_Uncore((PVOID)&switch_grp); + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Switch_Group(void) + * + * @param none + * + * @return OS_STATUS + * + * @brief Switch the current group that is being collected. + * + * Special Notes + * This routine is called from the user mode code to handle the multiple group + * situation. 4 distinct steps are taken: + * Step 1: Pause the sampling + * Step 2: Increment the current group count + * Step 3: Write the new group to the PMU + * Step 4: Resume sampling + */ +static OS_STATUS lwpmudrv_Switch_Group(void) +{ + S32 idx; + CPU_STATE pcpu; + EVENT_CONFIG ec; + OS_STATUS status = OS_SUCCESS; + U32 current_state = GET_DRIVER_STATE(); + + SEP_DRV_LOG_FLOW_IN(""); + + if (!pcb || !drv_cfg) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Pcb or drv_cfg pointer is NULL!"); + return OS_INVALID; + } + + if (current_state != DRV_STATE_RUNNING && + current_state != DRV_STATE_PAUSED) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Return value: %d (invalid driver state!).", status); + return status; + } + + status = lwpmudrv_Pause(); + + for (idx = 0; idx < GLOBAL_STATE_num_cpus(driver_state); idx++) { + pcpu = &pcb[idx]; + ec = (EVENT_CONFIG)LWPMU_DEVICE_ec( + &devices[core_to_dev_map[idx]]); + CPU_STATE_current_group(pcpu)++; + // make the event group list circular + CPU_STATE_current_group(pcpu) %= EVENT_CONFIG_num_groups(ec); + } +#if !defined(DRV_SEP_ACRN_ON) + CONTROL_Invoke_Parallel(lwpmudrv_Write_Op, + (VOID *)(size_t)CONTROL_THIS_CPU()); +#else + lwpmudrv_Write_Op((VOID *)(size_t)CONTROL_THIS_CPU()); +#endif + if (drv_cfg && DRV_CONFIG_start_paused(drv_cfg) == FALSE) { + lwpmudrv_Resume(); + } + + SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Trigger_Read_Op(void) + * + * @param - none + * + * @return OS_STATUS + * + * @brief Read uncore data + * + * Special Notes + */ +static VOID lwpmudrv_Trigger_Read_Op(PVOID param) +{ + DEV_UNC_CONFIG pcfg_unc = NULL; + DISPATCH dispatch_unc = NULL; + U32 this_cpu; + CPU_STATE pcpu; + U32 package_num; + U64 tsc; + BUFFER_DESC bd; + EVENT_DESC evt_desc; + U32 cur_grp; + ECB pecb; + U32 sample_size = 0; + U32 offset = 0; + PVOID buf; + UncoreSampleRecordPC *psamp; + U32 i; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + package_num = core_to_package_map[this_cpu]; + + if (!DRIVER_STATE_IN(GET_DRIVER_STATE(), + STATE_BIT_RUNNING | STATE_BIT_PAUSED)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("State is not RUNNING or PAUSED!"); + return; + } + + if (!CPU_STATE_socket_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Not socket master."); + return; + } + + UTILITY_Read_TSC(&tsc); + bd = &unc_buf[package_num]; + + for (i = num_core_devs; i < num_devices; i++) { + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[i]); + if (pcfg_unc) { + cur_grp = LWPMU_DEVICE_cur_group( + &devices[i])[package_num]; + pecb = LWPMU_DEVICE_PMU_register_data( + &devices[i])[cur_grp]; + evt_desc = desc_data[ECB_descriptor_id(pecb)]; + sample_size += EVENT_DESC_sample_size(evt_desc); + } + } + + buf = OUTPUT_Reserve_Buffer_Space(bd, sample_size, FALSE, + !SEP_IN_NOTIFICATION, -1); + + if (buf) { + for (i = num_core_devs; i < num_devices; i++) { + pcfg_unc = + (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[i]); + dispatch_unc = LWPMU_DEVICE_dispatch(&devices[i]); + if (pcfg_unc && dispatch_unc && + dispatch_unc->trigger_read) { + cur_grp = LWPMU_DEVICE_cur_group( + &devices[i])[package_num]; + pecb = LWPMU_DEVICE_PMU_register_data( + &devices[i])[cur_grp]; + evt_desc = desc_data[ECB_descriptor_id(pecb)]; + + psamp = (UncoreSampleRecordPC *)(((S8 *)buf) + + offset); + UNCORE_SAMPLE_RECORD_descriptor_id(psamp) = + ECB_descriptor_id(pecb); + UNCORE_SAMPLE_RECORD_tsc(psamp) = tsc; + UNCORE_SAMPLE_RECORD_uncore_valid(psamp) = 1; + UNCORE_SAMPLE_RECORD_cpu_num(psamp) = + (U16)this_cpu; + UNCORE_SAMPLE_RECORD_pkg_num(psamp) = + (U16)package_num; + + dispatch_unc->trigger_read(psamp, i); + offset += EVENT_DESC_sample_size(evt_desc); + } + } + } else { + SEP_DRV_LOG_WARNING( + "Buffer space reservation failed; some samples will be dropped."); + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Uncore_Switch_Group(void) + * + * @param none + * + * @return OS_STATUS + * + * @brief Switch the current group that is being collected. + * + * Special Notes + * This routine is called from the user mode code to handle the multiple group + * situation. 4 distinct steps are taken: + * Step 1: Pause the sampling + * Step 2: Increment the current group count + * Step 3: Write the new group to the PMU + * Step 4: Resume sampling + */ +static OS_STATUS lwpmudrv_Uncore_Switch_Group(void) +{ + OS_STATUS status = OS_SUCCESS; + U32 current_state = GET_DRIVER_STATE(); + U32 i = 0; + U32 j, k; + DEV_UNC_CONFIG pcfg_unc; + DISPATCH dispatch_unc; + ECB ecb_unc; + U32 cur_grp; + U32 num_units; + U32 switch_grp = 1; + + SEP_DRV_LOG_FLOW_IN(""); + + if (!devices || !drv_cfg) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Devices or drv_cfg pointer is NULL!"); + return OS_INVALID; + } + + if (current_state != DRV_STATE_RUNNING && + current_state != DRV_STATE_PAUSED) { + SEP_DRV_LOG_FLOW_OUT("Driver state is not RUNNING or PAUSED!"); + return OS_INVALID; + } + + if (max_groups_unc > 1) { + CONTROL_Invoke_Parallel(lwpmudrv_Pause_Uncore, + (PVOID)&switch_grp); + for (i = num_core_devs; i < num_devices; i++) { + pcfg_unc = LWPMU_DEVICE_pcfg(&devices[i]); + dispatch_unc = LWPMU_DEVICE_dispatch(&devices[i]); + num_units = LWPMU_DEVICE_num_units(&devices[i]); + if (!pcfg_unc || !dispatch_unc) { + continue; + } + if (LWPMU_DEVICE_em_groups_count(&devices[i]) > 1) { + for (j = 0; j < num_packages; j++) { + cur_grp = LWPMU_DEVICE_cur_group( + &devices[i])[j]; + ecb_unc = + LWPMU_DEVICE_PMU_register_data( + &devices[i])[cur_grp]; + // Switch group + LWPMU_DEVICE_cur_group( + &devices[i])[j]++; + LWPMU_DEVICE_cur_group( + &devices[i])[j] %= + LWPMU_DEVICE_em_groups_count( + &devices[i]); + if (ecb_unc && + (ECB_device_type(ecb_unc) == + DEVICE_UNC_SOCPERF) && + (j == 0)) { + // SOCPERF_Switch_Group3(); + } + // Post group switch + cur_grp = LWPMU_DEVICE_cur_group( + &devices[i])[j]; + ecb_unc = + LWPMU_DEVICE_PMU_register_data( + &devices[i])[cur_grp]; + for (k = 0; + k < (ECB_num_events(ecb_unc) * + num_units); + k++) { + LWPMU_DEVICE_prev_value( + &devices[i])[j][k] = + 0LL; //zero out prev_value for new collection + } + } + } + } + CONTROL_Invoke_Parallel(lwpmudrv_Write_Uncore, + (PVOID)&switch_grp); + CONTROL_Invoke_Parallel(lwpmudrv_Resume_Uncore, + (PVOID)&switch_grp); + } + + SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static VOID lwpmudrv_Trigger_Read(void) + * + * @param - none + * + * @return - OS_STATUS + * + * @brief Read the Counter Data. + * + * Special Notes + */ +static VOID lwpmudrv_Trigger_Read( +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) + struct timer_list *tl +#else + unsigned long arg +#endif +) +{ + SEP_DRV_LOG_TRACE_IN(""); + + if (GET_DRIVER_STATE() != DRV_STATE_RUNNING) { + SEP_DRV_LOG_TRACE_OUT("Success: driver state is not RUNNING"); + return; + } +#if defined(BUILD_CHIPSET) + if (cs_dispatch && cs_dispatch->Trigger_Read) { + cs_dispatch->Trigger_Read(); + } +#endif + + if (drv_cfg && DRV_CONFIG_use_pcl(drv_cfg) == TRUE) { + SEP_DRV_LOG_TRACE_OUT("Success: Using PCL"); + return; + } + + CONTROL_Invoke_Parallel(lwpmudrv_Trigger_Read_Op, NULL); + + uncore_em_factor++; + if (uncore_em_factor == DRV_CONFIG_unc_em_factor(drv_cfg)) { + SEP_DRV_LOG_TRACE("Switching Uncore Group..."); + lwpmudrv_Uncore_Switch_Group(); + uncore_em_factor = 0; + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) + mod_timer(unc_read_timer, jiffies + unc_timer_interval); +#else + unc_read_timer->expires = jiffies + unc_timer_interval; + add_timer(unc_read_timer); +#endif + + SEP_DRV_LOG_TRACE_OUT("Success."); +} + + +#if defined(DRV_SEP_ACRN_ON) +/* ------------------------------------------------------------------------- */ +/*! + * @fn static VOID lwpmudrv_ACRN_Buffer_Read(void) + * + * @param - none + * + * @return - OS_STATUS + * + * @brief Read the ACRN Buffer Data. + * + * Special Notes + */ +static VOID lwpmudrv_ACRN_Buffer_Read( +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) + struct timer_list *tl +#else + unsigned long arg +#endif +) +{ + S32 i; + + SEP_DRV_LOG_TRACE_IN(""); + + if (GET_DRIVER_STATE() != DRV_STATE_RUNNING) { + SEP_DRV_LOG_TRACE_OUT("Success: driver state is not RUNNING"); + return; + } + + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + PMI_Buffer_Handler(&i); + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) + mod_timer(buffer_read_timer, jiffies + buffer_timer_interval); +#else + buffer_read_timer->expires = jiffies + buffer_timer_interval; + add_timer(buffer_read_timer); +#endif + + SEP_DRV_LOG_TRACE_OUT("Success."); +} +#endif + + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void lwmudrv_Read_Specific_TSC (PVOID param) + * + * @param param - pointer to the result + * + * @return none + * + * @brief Read the tsc value in the current processor and + * @brief write the result into param. + * + * Special Notes + */ +static VOID lwpmudrv_Read_Specific_TSC(PVOID param) +{ + U32 this_cpu; + + SEP_DRV_LOG_TRACE_IN(""); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + if (this_cpu == 0) { + UTILITY_Read_TSC((U64 *)param); + } + preempt_enable(); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID lwpmudrv_Uncore_Stop_Timer (void) + * + * @brief Stop the uncore read timer + * + * @param none + * + * @return none + * + * Special Notes: + */ +static VOID lwpmudrv_Uncore_Stop_Timer(void) +{ + SEP_DRV_LOG_FLOW_IN(""); + + if (unc_read_timer == NULL) { + return; + } + + del_timer_sync(unc_read_timer); + unc_read_timer = CONTROL_Free_Memory(unc_read_timer); + + SEP_DRV_LOG_FLOW_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn OS_STATUS lwpmudrv_Uncore_Start_Timer (void) + * + * @brief Start the uncore read timer + * + * @param none + * + * @return OS_STATUS + * + * Special Notes: + */ +static VOID lwpmudrv_Uncore_Start_Timer(void) +{ + SEP_DRV_LOG_FLOW_IN(""); + + unc_timer_interval = + msecs_to_jiffies(DRV_CONFIG_unc_timer_interval(drv_cfg)); + unc_read_timer = CONTROL_Allocate_Memory(sizeof(struct timer_list)); + if (unc_read_timer == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for unc_read_timer!"); + return; + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) + timer_setup(unc_read_timer, lwpmudrv_Trigger_Read, 0); + mod_timer(unc_read_timer, jiffies + unc_timer_interval); +#else + init_timer(unc_read_timer); + unc_read_timer->function = lwpmudrv_Trigger_Read; + unc_read_timer->expires = jiffies + unc_timer_interval; + add_timer(unc_read_timer); +#endif + + SEP_DRV_LOG_FLOW_OUT(""); +} + + +#if defined(DRV_SEP_ACRN_ON) +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID lwpmudrv_ACRN_Flush_Stop_Timer (void) + * + * @brief Stop the ACRN buffer read timer + * + * @param none + * + * @return none + * + * Special Notes: + */ +static VOID lwpmudrv_ACRN_Flush_Stop_Timer(void) +{ + SEP_DRV_LOG_FLOW_IN(""); + + if (buffer_read_timer == NULL) { + return; + } + + del_timer_sync(buffer_read_timer); + buffer_read_timer = CONTROL_Free_Memory(buffer_read_timer); + + SEP_DRV_LOG_FLOW_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn OS_STATUS lwpmudrv_ACRN_Flush_Start_Timer (void) + * + * @brief Start the ACRN buffer read timer + * + * @param none + * + * @return OS_STATUS + * + * Special Notes: + */ +static VOID lwpmudrv_ACRN_Flush_Start_Timer(void) +{ + SEP_DRV_LOG_FLOW_IN(""); + + buffer_timer_interval = msecs_to_jiffies(10); + buffer_read_timer = CONTROL_Allocate_Memory(sizeof(struct timer_list)); + if (buffer_read_timer == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for buffer_read_timer!"); + return; + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) + timer_setup(buffer_read_timer, lwpmudrv_ACRN_Buffer_Read, 0); + mod_timer(buffer_read_timer, jiffies + buffer_timer_interval); +#else + init_timer(buffer_read_timer); + buffer_read_timer->function = lwpmudrv_ACRN_Buffer_Read; + buffer_read_timer->expires = jiffies + buffer_timer_interval; + add_timer(buffer_read_timer); +#endif + + SEP_DRV_LOG_FLOW_OUT(""); +} +#endif + + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Init_Op(void) + * + * @param - none + * + * @return OS_STATUS + * + * @brief Initialize PMU before collection + * + * Special Notes + */ +static VOID lwpmudrv_Init_Op(PVOID param) +{ + U32 this_cpu; + U32 dev_idx; + DISPATCH dispatch; + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + dev_idx = core_to_dev_map[this_cpu]; + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + + SEP_DRV_LOG_TRACE_IN(""); + + if (dispatch != NULL && dispatch->init != NULL) { + dispatch->init(&dev_idx); + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Init_PMU(void) + * + * @param - none + * + * @return - OS_STATUS + * + * @brief Initialize the PMU and the driver state in preparation for data collection. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Init_PMU(IOCTL_ARGS args) +{ + DEV_UNC_CONFIG pcfg_unc = NULL; + DISPATCH dispatch_unc = NULL; + EVENT_CONFIG ec; + U32 i; + U32 emon_buffer_size = 0; + OS_STATUS status = OS_SUCCESS; + + SEP_DRV_LOG_FLOW_IN(""); + + if (args->len_usr_to_drv == 0 || args->buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); + return OS_INVALID; + } + + if (copy_from_user(&emon_buffer_size, (void __user *)args->buf_usr_to_drv, + sizeof(U32))) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure"); + return OS_FAULT; + } + prev_counter_size = emon_buffer_size; + + if (!drv_cfg) { + SEP_DRV_LOG_ERROR_FLOW_OUT("drv_cfg not set!"); + return OS_FAULT; + } + if (DRV_CONFIG_use_pcl(drv_cfg) == TRUE) { + SEP_DRV_LOG_FLOW_OUT("Success: using PCL."); + return OS_SUCCESS; + } + + if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Discarded: driver state is not IDLE!"); + return OS_IN_PROGRESS; + } + + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + ec = (EVENT_CONFIG)LWPMU_DEVICE_ec( + &devices[core_to_dev_map[i]]); + CPU_STATE_trigger_count(&pcb[i]) = EVENT_CONFIG_em_factor(ec); + CPU_STATE_trigger_event_num(&pcb[i]) = + EVENT_CONFIG_em_event_num(ec); + } + + // set cur_device's total groups to max groups of all devices + max_groups_unc = 0; + for (i = num_core_devs; i < num_devices; i++) { + if (max_groups_unc < + LWPMU_DEVICE_em_groups_count(&devices[i])) { + max_groups_unc = + LWPMU_DEVICE_em_groups_count(&devices[i]); + } + } + // now go back and up total groups for all devices + if (DRV_CONFIG_emon_mode(drv_cfg) == TRUE) { + for (i = num_core_devs; i < num_devices; i++) { + if (LWPMU_DEVICE_em_groups_count(&devices[i]) < + max_groups_unc) { + LWPMU_DEVICE_em_groups_count(&devices[i]) = + max_groups_unc; + } + } + } + + // allocate save/restore space before program the PMU + lwpmudrv_Allocate_Restore_Buffer(); + + // allocate uncore read buffers for SEP + if (unc_buf_init && !DRV_CONFIG_emon_mode(drv_cfg)) { + lwpmudrv_Allocate_Uncore_Buffer(); + } + + // must be done after pcb is created and before PMU is first written to +#if !defined(DRV_SEP_ACRN_ON) + CONTROL_Invoke_Parallel(lwpmudrv_Init_Op, NULL); +#else + lwpmudrv_Init_Op(NULL); +#endif + + for (i = num_core_devs; i < num_devices; i++) { + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[i]); + dispatch_unc = LWPMU_DEVICE_dispatch(&devices[i]); + if (pcfg_unc && dispatch_unc && dispatch_unc->init) { + dispatch_unc->init((VOID *)&i); + } + } + + // Allocate PEBS buffers + if (DRV_CONFIG_counting_mode(drv_cfg) == FALSE) { + PEBS_Allocate(); + } + + // + // Transfer the data into the PMU registers + // +#if !defined(DRV_SEP_ACRN_ON) + CONTROL_Invoke_Parallel(lwpmudrv_Write_Op, NULL); +#else + lwpmudrv_Write_Op(NULL); +#endif + + SEP_DRV_LOG_TRACE("IOCTL_Init_PMU - finished initial Write."); + + if (DRV_CONFIG_counting_mode(drv_cfg) == TRUE || + DRV_CONFIG_emon_mode(drv_cfg) == TRUE) { + if (!read_counter_info) { + read_counter_info = + CONTROL_Allocate_Memory(emon_buffer_size); + if (!read_counter_info) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure!"); + return OS_NO_MEM; + } + } + if (!prev_counter_data) { + prev_counter_data = + CONTROL_Allocate_Memory(emon_buffer_size); + if (!prev_counter_data) { + read_counter_info = + CONTROL_Free_Memory(read_counter_info); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure!"); + return OS_NO_MEM; + } + } + if (!emon_buffer_driver_helper) { + // allocate size = size of EMON_BUFFER_DRIVER_HELPER_NODE + the number of entries in core_index_to_thread_offset_map, which is num of cpu + emon_buffer_driver_helper = CONTROL_Allocate_Memory( + sizeof(EMON_BUFFER_DRIVER_HELPER_NODE) + + sizeof(U32) * + GLOBAL_STATE_num_cpus(driver_state)); + if (!emon_buffer_driver_helper) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure!"); + return OS_NO_MEM; + } + } + } + + SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void lwpmudrv_Read_MSR(pvoid param) + * + * @param param - pointer to the buffer to store the MSR counts + * + * @return none + * + * @brief + * @brief Read the U64 value at address in buf_drv_to_usr and + * @brief write the result into buf_usr_to_drv. + * + * Special Notes + */ +static VOID lwpmudrv_Read_MSR(PVOID param) +{ + S32 cpu_idx; + MSR_DATA this_node; +#if !defined(DRV_SEP_ACRN_ON) + S64 reg_num; +#else + struct profiling_msr_ops_list *msr_list; +#endif + + SEP_DRV_LOG_TRACE_IN(""); + + if (param == NULL) { + preempt_disable(); + cpu_idx = (S32)CONTROL_THIS_CPU(); + preempt_enable(); + } else { + cpu_idx = *(S32 *)param; + } +#if !defined(DRV_SEP_ACRN_ON) + this_node = &msr_data[cpu_idx]; + reg_num = MSR_DATA_addr(this_node); + + if (reg_num != 0) { + MSR_DATA_value(this_node) = + (U64)SYS_Read_MSR((U32)MSR_DATA_addr(this_node)); + } +#else + msr_list = (struct profiling_msr_ops_list *)CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * + sizeof(struct profiling_msr_ops_list)); + memset(msr_list, 0, + GLOBAL_STATE_num_cpus(driver_state) * + sizeof(struct profiling_msr_ops_list)); + for (cpu_idx = 0; cpu_idx < GLOBAL_STATE_num_cpus(driver_state); + cpu_idx++) { + this_node = &msr_data[cpu_idx]; + msr_list[cpu_idx].collector_id = COLLECTOR_SEP; + msr_list[cpu_idx].entries[0].msr_id = MSR_DATA_addr(this_node); + msr_list[cpu_idx].entries[0].op_type = MSR_OP_READ; + msr_list[cpu_idx].entries[0].value = 0LL; + msr_list[cpu_idx].num_entries = 1; + msr_list[cpu_idx].msr_op_state = MSR_OP_REQUESTED; + } + + BUG_ON(!virt_addr_valid(msr_list)); + + acrn_hypercall2(HC_PROFILING_OPS, PROFILING_MSR_OPS, + virt_to_phys(msr_list)); + + for (cpu_idx = 0; cpu_idx < GLOBAL_STATE_num_cpus(driver_state); + cpu_idx++) { + this_node = &msr_data[cpu_idx]; + MSR_DATA_value(this_node) = msr_list[cpu_idx].entries[0].value; + } + + msr_list = CONTROL_Free_Memory(msr_list); +#endif + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Read_MSR_All_Cores(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Read the U64 value at address into buf_drv_to_usr and write + * @brief the result into buf_usr_to_drv. + * @brief Returns OS_SUCCESS if the read across all cores succeed, + * @brief otherwise OS_FAULT. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Read_MSR_All_Cores(IOCTL_ARGS arg) +{ + U64 *val; + S32 reg_num; + S32 i; + MSR_DATA node; +#if defined(DRV_SEP_ACRN_ON) + S32 this_cpu = 0; +#endif + + SEP_DRV_LOG_FLOW_IN(""); + + if ((arg->len_usr_to_drv != sizeof(U32)) || + (arg->buf_usr_to_drv == NULL) || (arg->buf_drv_to_usr == NULL)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments!"); + return OS_FAULT; + } + + val = (U64 *)arg->buf_drv_to_usr; + if (val == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("NULL buf_usr_to_drv!"); + return OS_FAULT; + } + + if (copy_from_user(®_num, (void __user *)arg->buf_usr_to_drv, sizeof(U32))) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); + return OS_FAULT; + } + + msr_data = CONTROL_Allocate_Memory(GLOBAL_STATE_num_cpus(driver_state) * + sizeof(MSR_DATA_NODE)); + if (!msr_data) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory allocation failure!"); + return OS_NO_MEM; + } + + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + node = &msr_data[i]; + MSR_DATA_addr(node) = reg_num; + } + +#if !defined(DRV_SEP_ACRN_ON) + CONTROL_Invoke_Parallel(lwpmudrv_Read_MSR, NULL); +#else + lwpmudrv_Read_MSR(&this_cpu); +#endif + + /* copy values to arg array? */ + if (arg->len_drv_to_usr < GLOBAL_STATE_num_cpus(driver_state)) { + msr_data = CONTROL_Free_Memory(msr_data); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Not enough memory allocated in output buffer!"); + return OS_FAULT; + } + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + node = &msr_data[i]; + if (copy_to_user((void __user *)&val[i], (U64 *)&MSR_DATA_value(node), + sizeof(U64))) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); + return OS_FAULT; + } + } + + msr_data = CONTROL_Free_Memory(msr_data); + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void lwpmudrv_Write_MSR(pvoid iaram) + * + * @param param - pointer to array containing the MSR address and the value to be written + * + * @return none + * + * @brief + * @brief Read the U64 value at address in buf_drv_to_usr and + * @brief write the result into buf_usr_to_drv. + * + * Special Notes + */ +static VOID lwpmudrv_Write_MSR(PVOID param) +{ + S32 cpu_idx; + MSR_DATA this_node; +#if !defined(DRV_SEP_ACRN_ON) + U32 reg_num; + U64 val; +#else + struct profiling_msr_ops_list *msr_list; +#endif + + SEP_DRV_LOG_TRACE_IN(""); + + if (param == NULL) { + preempt_disable(); + cpu_idx = (S32)CONTROL_THIS_CPU(); + preempt_enable(); + } else { + cpu_idx = *(S32 *)param; + } + +#if !defined(DRV_SEP_ACRN_ON) + this_node = &msr_data[cpu_idx]; + reg_num = (U32)MSR_DATA_addr(this_node); + val = (U64)MSR_DATA_value(this_node); + // don't attempt to write MSR 0 + if (reg_num == 0) { + preempt_enable(); + SEP_DRV_LOG_ERROR_TRACE_OUT("Error: tried to write MSR 0!"); + return; + } + + SYS_Write_MSR(reg_num, val); + preempt_enable(); + +#else + msr_list = (struct profiling_msr_ops_list *)CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * + sizeof(struct profiling_msr_ops_list)); + memset(msr_list, 0, + GLOBAL_STATE_num_cpus(driver_state) * + sizeof(struct profiling_msr_ops_list)); + for (cpu_idx = 0; cpu_idx < GLOBAL_STATE_num_cpus(driver_state); + cpu_idx++) { + this_node = &msr_data[cpu_idx]; + msr_list[cpu_idx].collector_id = COLLECTOR_SEP; + msr_list[cpu_idx].entries[0].msr_id = MSR_DATA_addr(this_node); + msr_list[cpu_idx].entries[0].op_type = MSR_OP_WRITE; + msr_list[cpu_idx].entries[0].value = MSR_DATA_value(this_node); + msr_list[cpu_idx].num_entries = 1; + msr_list[cpu_idx].msr_op_state = MSR_OP_REQUESTED; + } + + BUG_ON(!virt_addr_valid(msr_list)); + + acrn_hypercall2(HC_PROFILING_OPS, PROFILING_MSR_OPS, + virt_to_phys(msr_list)); + + msr_list = CONTROL_Free_Memory(msr_list); +#endif + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Write_MSR_All_Cores(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Read the U64 value at address into buf_usr_to_drv and write + * @brief the result into buf_usr_to_drv. + * @brief Returns OS_SUCCESS if the write across all cores succeed, + * @brief otherwise OS_FAULT. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Write_MSR_All_Cores(IOCTL_ARGS arg) +{ + EVENT_REG_NODE buf; + EVENT_REG buf_usr_to_drv = &buf; + U32 reg_num; + U64 val; + S32 i; + MSR_DATA node; +#if defined(DRV_SEP_ACRN_ON) + S32 this_cpu = 0; +#endif + + SEP_DRV_LOG_FLOW_IN(""); + + if (arg->len_usr_to_drv < sizeof(EVENT_REG_NODE) || + arg->buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments!"); + return OS_FAULT; + } + + if (copy_from_user(buf_usr_to_drv, (void __user *)arg->buf_usr_to_drv, + sizeof(EVENT_REG_NODE))) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); + return OS_FAULT; + } + reg_num = (U32)EVENT_REG_reg_id(buf_usr_to_drv, 0); + val = (U64)EVENT_REG_reg_value(buf_usr_to_drv, 0); + + msr_data = CONTROL_Allocate_Memory(GLOBAL_STATE_num_cpus(driver_state) * + sizeof(MSR_DATA_NODE)); + if (!msr_data) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory allocation failure"); + return OS_NO_MEM; + } + + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + node = &msr_data[i]; + MSR_DATA_addr(node) = reg_num; + MSR_DATA_value(node) = val; + } + +#if !defined(DRV_SEP_ACRN_ON) + CONTROL_Invoke_Parallel(lwpmudrv_Write_MSR, NULL); +#else + lwpmudrv_Write_MSR(&this_cpu); +#endif + + msr_data = CONTROL_Free_Memory(msr_data); + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void lwpmudrv_Read_Data_Op(PVOID param) + * + * @param param - dummy + * + * @return void + * + * @brief Read all the core/uncore data counters at one shot + * + * Special Notes + */ +static void lwpmudrv_Read_Data_Op(VOID *param) +{ + U32 this_cpu; + DISPATCH dispatch; + U32 dev_idx; + DEV_UNC_CONFIG pcfg_unc; + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + + SEP_DRV_LOG_TRACE_IN(""); + + if (devices == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Devices is null!"); + return; + } + dev_idx = core_to_dev_map[this_cpu]; + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + if (dispatch != NULL && dispatch->read_data != NULL) { + dispatch->read_data(param); + } + for (dev_idx = num_core_devs; dev_idx < num_devices; dev_idx++) { + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[dev_idx]); + if (pcfg_unc == NULL) { + continue; + } + if (!(DRV_CONFIG_emon_mode(drv_cfg) || + DRV_CONFIG_counting_mode(drv_cfg))) { + continue; + } + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + if (dispatch == NULL) { + continue; + } + if (dispatch->read_data == NULL) { + continue; + } + dispatch->read_data((VOID *)&dev_idx); + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Read_MSRs(IOCTL_ARG arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Read all the programmed data counters and accumulate them + * @brief into a single buffer. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Read_MSRs(IOCTL_ARGS arg) +{ +#if defined(DRV_SEP_ACRN_ON) + S32 this_cpu = 0; +#endif + + SEP_DRV_LOG_FLOW_IN(""); + + if (arg->len_drv_to_usr == 0 || arg->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); + return OS_SUCCESS; + } + // + // Transfer the data in the PMU registers to the output buffer + // + if (!read_counter_info) { + read_counter_info = + CONTROL_Allocate_Memory(arg->len_drv_to_usr); + if (!read_counter_info) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory allocation failure"); + return OS_NO_MEM; + } + } + if (!prev_counter_data) { + prev_counter_data = + CONTROL_Allocate_Memory(arg->len_drv_to_usr); + if (!prev_counter_data) { + read_counter_info = + CONTROL_Free_Memory(read_counter_info); + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory allocation failure"); + return OS_NO_MEM; + } + } + memset(read_counter_info, 0, arg->len_drv_to_usr); + +#if !defined(DRV_SEP_ACRN_ON) + CONTROL_Invoke_Parallel(lwpmudrv_Read_Data_Op, NULL); +#else + lwpmudrv_Read_Data_Op(&this_cpu); +#endif + + if (copy_to_user((void __user *)arg->buf_drv_to_usr, read_counter_info, + arg->len_drv_to_usr)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure"); + return OS_FAULT; + } + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +#if defined(DRV_SEP_ACRN_ON) +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void lwpmudrv_Read_Metrics_Op(PVOID param) + * + * @param param - dummy + * + * @return void + * + * @brief Read metrics register IA32_PERF_METRICS to collect topdown metrics + * from PMU + * + * Special Notes + */ +static VOID lwpmudrv_Read_Metrics_Op(PVOID param) +{ + U32 this_cpu; + CPU_STATE pcpu; + U32 offset; + U32 dev_idx; + DEV_CONFIG pcfg; + DISPATCH dispatch; + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + // The pmu metric will be append after core event at thread level (basically treat them as extra core events). + // Move the pointer to the end of the core event for this cpu index. + offset = EMON_BUFFER_CORE_EVENT_OFFSET( + EMON_BUFFER_DRIVER_HELPER_core_index_to_thread_offset_map( + emon_buffer_driver_helper)[this_cpu], + EMON_BUFFER_DRIVER_HELPER_core_num_events( + emon_buffer_driver_helper)); + + if (!DEV_CONFIG_enable_perf_metrics(pcfg) || + !DEV_CONFIG_emon_perf_metrics_offset(pcfg) || + (CPU_STATE_current_group(pcpu) != 0)) { + return; + } + + if (dispatch != NULL && dispatch->read_metrics != NULL) { + dispatch->read_metrics(read_counter_info + offset); + SEP_DRV_LOG_TRACE("Data read = %llu.", + *(read_counter_info + offset)); + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Read_Metrics(IOCTL_ARGS args) + * + * @param args - pointer to IOCTL_ARGS_NODE structure + * + * @return OS_STATUS + * + * @brief Read metrics register on all cpus and accumulate them into the output + * buffer + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Read_Metrics(IOCTL_ARGS args) +{ + U32 this_cpu; + CPU_STATE pcpu; + U32 offset; + U64 *p_buffer; + DEV_CONFIG pcfg; + U32 idx; + + SEP_DRV_LOG_FLOW_IN("Args: %p.", args); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + p_buffer = (U64 *)(args->buf_drv_to_usr); + + if (args->len_drv_to_usr == 0 || args->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_FLOW_OUT("Invalid parameters!"); + return OS_SUCCESS; + } + + if (!read_counter_info) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Read_counter_info is NULL!"); + return OS_FAULT; + } + + CONTROL_Invoke_Parallel(lwpmudrv_Read_Metrics_Op, NULL); + for (idx = 0; idx < num_core_devs; idx++) { + pcfg = LWPMU_DEVICE_pcfg(&devices[idx]); + offset = DEV_CONFIG_emon_perf_metrics_offset(pcfg); + if (!DEV_CONFIG_enable_perf_metrics(pcfg) || !offset || + (CPU_STATE_current_group(pcpu) != 0)) { + continue; + } + p_buffer += offset; + if (copy_to_user((void __user *)p_buffer, read_counter_info + offset, + (sizeof(U64) * num_packages * + GLOBAL_STATE_num_cpus(driver_state) * + DEV_CONFIG_num_perf_metrics(pcfg)))) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Failed copy_to_user for read_counter_info!"); + return OS_FAULT; + } + } + + SEP_DRV_LOG_FLOW_OUT("Success."); + return OS_SUCCESS; +} + +#endif + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Read_Counters_And_Switch_Group(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Read / Store the counters and switch to the next valid group. + * + * Special Notes + * This routine is called from the user mode code to handle the multiple group + * situation. 10 distinct steps are taken: + * Step 1: Save the previous cpu's tsc + * Step 2: Read the current cpu's tsc + * Step 3: Pause the counting PMUs + * Step 4: Calculate the difference between the current and previous cpu's tsc + * Step 5: Save original buffer ptr and copy cpu's tsc into the output buffer + * Increment the buffer position by number of CPU + * Step 6: Read the currently programmed data PMUs and copy the data into the output buffer + * Restore the original buffer ptr. + * Step 7: Write the new group to the PMU + * Step 8: Write the new group to the PMU + * Step 9: Read the current cpu's tsc for next collection (so read MSRs time not included in report) + * Step 10: Resume the counting PMUs + */ +static OS_STATUS lwpmudrv_Read_Counters_And_Switch_Group(IOCTL_ARGS arg) +{ + U64 *p_buffer = NULL; + char *orig_r_buf_ptr = NULL; + U64 orig_r_buf_len = 0; + OS_STATUS status = OS_SUCCESS; + DRV_BOOL enter_in_pause_state = 0; + U32 i = 0; +#if !defined(CONFIG_PREEMPT_COUNT) + U64 *tmp = NULL; +#endif + + SEP_DRV_LOG_FLOW_IN(""); + + if (arg->buf_drv_to_usr == NULL || arg->len_drv_to_usr == 0) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); + return OS_FAULT; + } + + if (!DRIVER_STATE_IN(GET_DRIVER_STATE(), + STATE_BIT_RUNNING | STATE_BIT_PAUSED)) { + SEP_DRV_LOG_FLOW_OUT( + "'Success'/error: driver state is not RUNNING or PAUSED!"); + return OS_SUCCESS; + } + + if (GET_DRIVER_STATE() == DRV_STATE_PAUSED) { + enter_in_pause_state = 1; + } + + // step 1 +#if !defined(CONFIG_PREEMPT_COUNT) + if (DRV_CONFIG_per_cpu_tsc(drv_cfg)) { + // swap cpu_tsc and prev_cpu_tsc, so that cpu_tsc is saved in prev_cpu_tsc. + tmp = prev_cpu_tsc; + prev_cpu_tsc = cpu_tsc; + cpu_tsc = tmp; + } else +#endif + prev_cpu_tsc[0] = cpu_tsc[0]; + + // step 2 + // if per_cpu_tsc is not defined, read cpu0's tsc and save in var cpu_tsc[0] + // if per_cpu_tsc is defined, read all cpu's tsc and save in var cpu_tsc by lwpmudrv_Fill_TSC_Info +#if !defined(CONFIG_PREEMPT_COUNT) + if (DRV_CONFIG_per_cpu_tsc(drv_cfg)) { + atomic_set(&read_now, GLOBAL_STATE_num_cpus(driver_state)); + init_waitqueue_head(&read_tsc_now); + CONTROL_Invoke_Parallel(lwpmudrv_Fill_TSC_Info, + (PVOID)(size_t)0); + } else +#endif + CONTROL_Invoke_Cpu(0, lwpmudrv_Read_Specific_TSC, &cpu_tsc[0]); + + // step 3 + // Counters should be frozen right after time stamped. + if (!enter_in_pause_state) { + status = lwpmudrv_Pause(); + } + + // step 4 + if (DRV_CONFIG_per_cpu_tsc(drv_cfg)) { + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { +#if !defined(CONFIG_PREEMPT_COUNT) + diff_cpu_tsc[i] = cpu_tsc[i] - prev_cpu_tsc[i]; +#else + // if CONFIG_PREEMPT_COUNT is defined, means lwpmudrv_Fill_TSC_Info can not be run. + // return all cpu's tsc difference with cpu0's tsc difference instead + diff_cpu_tsc[i] = cpu_tsc[0] - prev_cpu_tsc[0]; +#endif + } + } else { + diff_cpu_tsc[0] = cpu_tsc[0] - prev_cpu_tsc[0]; + } + + // step 5 + orig_r_buf_ptr = arg->buf_drv_to_usr; + orig_r_buf_len = arg->len_drv_to_usr; + + if (copy_to_user((void __user *)arg->buf_drv_to_usr, diff_cpu_tsc, + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64))) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); + return OS_FAULT; + } + + p_buffer = (U64 *)(arg->buf_drv_to_usr); + p_buffer += GLOBAL_STATE_num_cpus(driver_state); + arg->buf_drv_to_usr = (char *)p_buffer; + arg->len_drv_to_usr -= + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64); + + // step 6 + status = lwpmudrv_Read_MSRs(arg); + +#if defined(DRV_SEP_ACRN_ON) + status = lwpmudrv_Read_Metrics(arg); +#endif + arg->buf_drv_to_usr = orig_r_buf_ptr; + arg->len_drv_to_usr = orig_r_buf_len; + + // step 7 + // for each processor, increment its current group number + lwpmudrv_Switch_To_Next_Group(); + + // step 8 +#if !defined(DRV_SEP_ACRN_ON) + CONTROL_Invoke_Parallel(lwpmudrv_Write_Op, NULL); +#else + lwpmudrv_Write_Op(NULL); +#endif + + // step 9 + // if per_cpu_tsc is defined, read all cpu's tsc and save in cpu_tsc for next run +#if !defined(CONFIG_PREEMPT_COUNT) + if (DRV_CONFIG_per_cpu_tsc(drv_cfg)) { + atomic_set(&read_now, GLOBAL_STATE_num_cpus(driver_state)); + init_waitqueue_head(&read_tsc_now); + CONTROL_Invoke_Parallel(lwpmudrv_Fill_TSC_Info, + (PVOID)(size_t)0); + } else +#endif + CONTROL_Invoke_Cpu(0, lwpmudrv_Read_Specific_TSC, &cpu_tsc[0]); + + // step 10 + if (!enter_in_pause_state) { + status = lwpmudrv_Resume(); + } + + SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); + return status; +} + +/* + * @fn static OS_STATUS lwpmudrv_Read_And_Reset_Counters(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Read the current value of the counters, and reset them all to 0. + * + * Special Notes + * This routine is called from the user mode code to handle the multiple group + * situation. 9 distinct steps are taken: + * Step 1: Save the previous cpu's tsc + * Step 2: Read the current cpu's tsc + * Step 3: Pause the counting PMUs + * Step 4: Calculate the difference between the current and previous cpu's tsc + * Step 5: Save original buffer ptr and copy cpu's tsc into the output buffer + * Increment the buffer position by number of CPU + * Step 6: Read the currently programmed data PMUs and copy the data into the output buffer + * Restore the original buffer ptr. + * Step 7: Write the new group to the PMU + * Step 8: Read the current cpu's tsc for next collection (so read MSRs time not included in report) + * Step 9: Resume the counting PMUs + */ +static OS_STATUS lwpmudrv_Read_And_Reset_Counters(IOCTL_ARGS arg) +{ + U64 *p_buffer = NULL; + char *orig_r_buf_ptr = NULL; + U64 orig_r_buf_len = 0; + OS_STATUS status = OS_SUCCESS; + DRV_BOOL enter_in_pause_state = 0; + U32 i = 0; +#if !defined(CONFIG_PREEMPT_COUNT) + U64 *tmp = NULL; +#endif + + SEP_DRV_LOG_FLOW_IN(""); + + if (arg->buf_drv_to_usr == NULL || arg->len_drv_to_usr == 0) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); + return OS_FAULT; + } + + if (!DRIVER_STATE_IN(GET_DRIVER_STATE(), + STATE_BIT_RUNNING | STATE_BIT_PAUSED)) { + SEP_DRV_LOG_FLOW_OUT( + "'Success'/error: driver state is not RUNNING or PAUSED!"); + return OS_SUCCESS; + } + + if (GET_DRIVER_STATE() == DRV_STATE_PAUSED) { + enter_in_pause_state = 1; + } + + // step 1 +#if !defined(CONFIG_PREEMPT_COUNT) + if (DRV_CONFIG_per_cpu_tsc(drv_cfg)) { + // swap cpu_tsc and prev_cpu_tsc, so that cpu_tsc is saved in prev_cpu_tsc. + tmp = prev_cpu_tsc; + prev_cpu_tsc = cpu_tsc; + cpu_tsc = tmp; + } else +#endif + prev_cpu_tsc[0] = cpu_tsc[0]; + + // step 2 + // if per_cpu_tsc is not defined, read cpu0's tsc into var cpu_tsc[0] + // if per_cpu_tsc is defined, read all cpu's tsc into var cpu_tsc by lwpmudrv_Fill_TSC_Info +#if !defined(CONFIG_PREEMPT_COUNT) + if (DRV_CONFIG_per_cpu_tsc(drv_cfg)) { + atomic_set(&read_now, GLOBAL_STATE_num_cpus(driver_state)); + init_waitqueue_head(&read_tsc_now); + CONTROL_Invoke_Parallel(lwpmudrv_Fill_TSC_Info, + (PVOID)(size_t)0); + } else +#endif + CONTROL_Invoke_Cpu(0, lwpmudrv_Read_Specific_TSC, &cpu_tsc[0]); + + // step 3 + // Counters should be frozen right after time stamped. + if (!enter_in_pause_state) { + status = lwpmudrv_Pause(); + if (status != OS_INVALID) { + return status; + } + } + + // step 4 + if (DRV_CONFIG_per_cpu_tsc(drv_cfg)) { + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { +#if !defined(CONFIG_PREEMPT_COUNT) + diff_cpu_tsc[i] = cpu_tsc[i] - prev_cpu_tsc[i]; +#else + // if CONFIG_PREEMPT_COUNT is defined, means lwpmudrv_Fill_TSC_Info can not be run. + // return all cpu's tsc difference with cpu0's tsc difference instead + diff_cpu_tsc[i] = cpu_tsc[0] - prev_cpu_tsc[0]; +#endif + } + } else { + diff_cpu_tsc[0] = cpu_tsc[0] - prev_cpu_tsc[0]; + } + + // step 5 + orig_r_buf_ptr = arg->buf_drv_to_usr; + orig_r_buf_len = arg->len_drv_to_usr; + + if (copy_to_user((void __user *)arg->buf_drv_to_usr, diff_cpu_tsc, + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64))) { + return OS_FAULT; + } + + p_buffer = (U64 *)(arg->buf_drv_to_usr); + p_buffer += GLOBAL_STATE_num_cpus(driver_state); + arg->buf_drv_to_usr = (char *)p_buffer; + arg->len_drv_to_usr -= + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64); + + // step 6 + status = lwpmudrv_Read_MSRs(arg); +#if defined(DRV_SEP_ACRN_ON) + status = lwpmudrv_Read_Metrics(arg); +#endif + arg->buf_drv_to_usr = orig_r_buf_ptr; + arg->len_drv_to_usr = orig_r_buf_len; + + // step 7 +#if !defined(DRV_SEP_ACRN_ON) + CONTROL_Invoke_Parallel(lwpmudrv_Write_Op, NULL); +#else + lwpmudrv_Write_Op(NULL); +#endif + + // step 8 + // if per_cpu_tsc is defined, read all cpu's tsc and save in cpu_tsc for next run +#if !defined(CONFIG_PREEMPT_COUNT) + if (DRV_CONFIG_per_cpu_tsc(drv_cfg)) { + atomic_set(&read_now, GLOBAL_STATE_num_cpus(driver_state)); + init_waitqueue_head(&read_tsc_now); + CONTROL_Invoke_Parallel(lwpmudrv_Fill_TSC_Info, + (PVOID)(size_t)0); + } else +#endif + CONTROL_Invoke_Cpu(0, lwpmudrv_Read_Specific_TSC, &cpu_tsc[0]); + + // step 9 + if (!enter_in_pause_state) { + status = lwpmudrv_Resume(); + if (status != OS_SUCCESS) + return status; + } + + SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Set_Num_EM_Groups(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Configure the event multiplexing group. + * + * Special Notes + * None + */ +static OS_STATUS lwpmudrv_Set_EM_Config(IOCTL_ARGS arg) +{ + EVENT_CONFIG ec; + + SEP_DRV_LOG_FLOW_IN(""); + + if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Skipped: Driver state is not IDLE!"); + return OS_IN_PROGRESS; + } + + if (arg->buf_usr_to_drv == NULL || + arg->len_usr_to_drv != sizeof(EVENT_CONFIG_NODE)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); + return OS_INVALID; + } + + LWPMU_DEVICE_ec(&devices[cur_device]) = + CONTROL_Allocate_Memory(sizeof(EVENT_CONFIG_NODE)); + if (!LWPMU_DEVICE_ec(&devices[cur_device])) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory allocation failure for ec!"); + return OS_NO_MEM; + } + + if (copy_from_user(LWPMU_DEVICE_ec(&devices[cur_device]), + (void __user *)arg->buf_usr_to_drv, sizeof(EVENT_CONFIG_NODE))) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory copy failure (event config)!"); + return OS_FAULT; + } + + ec = (EVENT_CONFIG)LWPMU_DEVICE_ec(&devices[cur_device]); + LWPMU_DEVICE_PMU_register_data(&devices[cur_device]) = + CONTROL_Allocate_Memory(EVENT_CONFIG_num_groups(ec) * + sizeof(VOID *)); + if (!LWPMU_DEVICE_PMU_register_data(&devices[cur_device])) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for PMU_register_data!"); + return OS_NO_MEM; + } + + EVENTMUX_Initialize(); + + SEP_DRV_LOG_FLOW_OUT("OS_SUCCESS."); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Set_EM_Config_UNC(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Set the number of em groups in the global state node. + * @brief Also, copy the EVENT_CONFIG struct that has been passed in, + * @brief into a global location for now. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Set_EM_Config_UNC(IOCTL_ARGS arg) +{ + EVENT_CONFIG ec; + SEP_DRV_LOG_FLOW_IN(""); + + if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Skipped: driver state is not IDLE!"); + return OS_IN_PROGRESS; + } + + // allocate memory + LWPMU_DEVICE_ec(&devices[cur_device]) = + CONTROL_Allocate_Memory(sizeof(EVENT_CONFIG_NODE)); + if (copy_from_user(LWPMU_DEVICE_ec(&devices[cur_device]), + (void __user *)arg->buf_usr_to_drv, arg->len_usr_to_drv)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory copy failure for LWPMU_device_ec!"); + return OS_FAULT; + } + // configure num_groups from ec of the specific device + ec = (EVENT_CONFIG)LWPMU_DEVICE_ec(&devices[cur_device]); + SEP_DRV_LOG_TRACE("Num Groups UNCORE: %d.", + EVENT_CONFIG_num_groups_unc(ec)); + LWPMU_DEVICE_PMU_register_data(&devices[cur_device]) = + CONTROL_Allocate_Memory(EVENT_CONFIG_num_groups_unc(ec) * + sizeof(VOID *)); + if (!LWPMU_DEVICE_PMU_register_data(&devices[cur_device])) { + LWPMU_DEVICE_ec(&devices[cur_device]) = CONTROL_Free_Memory( + LWPMU_DEVICE_ec(&devices[cur_device])); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for LWPMU_DEVICE_PMU_register_data"); + return OS_NO_MEM; + } + LWPMU_DEVICE_em_groups_count(&devices[cur_device]) = 0; + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Configure_events(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Copies one group of events into kernel space at + * @brief PMU_register_data[em_groups_count]. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Configure_Events(IOCTL_ARGS arg) +{ + U32 group_id; + ECB ecb; + U32 em_groups_count; + EVENT_CONFIG ec; + + SEP_DRV_LOG_FLOW_IN(""); + + if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Skipped: driver state is not IDLE!"); + return OS_IN_PROGRESS; + } + + ec = (EVENT_CONFIG)LWPMU_DEVICE_ec(&devices[cur_device]); + em_groups_count = LWPMU_DEVICE_em_groups_count(&devices[cur_device]); + + if (em_groups_count >= EVENT_CONFIG_num_groups(ec)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Error: EM groups number exceeded initial configuration!"); + return OS_INVALID; + } + if (arg->buf_usr_to_drv == NULL || + arg->len_usr_to_drv < sizeof(ECB_NODE)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); + return OS_INVALID; + } + + ecb = CONTROL_Allocate_Memory(arg->len_usr_to_drv); + if (!ecb) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for ecb!"); + return OS_NO_MEM; + } + if (copy_from_user(ecb, (void __user *)arg->buf_usr_to_drv, arg->len_usr_to_drv)) { + CONTROL_Free_Memory(ecb); + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure for ecb data!"); + return OS_FAULT; + } + group_id = ECB_group_id(ecb); + + if (group_id >= EVENT_CONFIG_num_groups(ec)) { + CONTROL_Free_Memory(ecb); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Group_id is larger than total number of groups!"); + return OS_INVALID; + } + + LWPMU_DEVICE_PMU_register_data(&devices[cur_device])[group_id] = ecb; + LWPMU_DEVICE_em_groups_count(&devices[cur_device]) = group_id + 1; + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Configure_events_UNC(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Make a copy of the uncore registers that need to be programmed + * @brief for the next event set used for event multiplexing + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Configure_Events_UNC(IOCTL_ARGS arg) +{ + VOID **PMU_register_data_unc; + S32 em_groups_count_unc; + ECB ecb; + EVENT_CONFIG ec_unc; + DEV_UNC_CONFIG pcfg_unc; + U32 group_id = 0; + ECB in_ecb = NULL; + + SEP_DRV_LOG_FLOW_IN(""); + + if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Skipped: driver state is not IDLE!"); + return OS_IN_PROGRESS; + } + + em_groups_count_unc = + LWPMU_DEVICE_em_groups_count(&devices[cur_device]); + PMU_register_data_unc = + LWPMU_DEVICE_PMU_register_data(&devices[cur_device]); + ec_unc = LWPMU_DEVICE_ec(&devices[cur_device]); + pcfg_unc = LWPMU_DEVICE_pcfg(&devices[cur_device]); + + if (pcfg_unc == NULL || ec_unc == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Pcfg_unc or ec_unc NULL!"); + return OS_INVALID; + } + + if (em_groups_count_unc >= (S32)EVENT_CONFIG_num_groups_unc(ec_unc)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Uncore EM groups number exceeded initial configuration!"); + return OS_INVALID; + } + if (arg->buf_usr_to_drv == NULL || + arg->len_usr_to_drv < sizeof(ECB_NODE)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); + return OS_INVALID; + } + + in_ecb = CONTROL_Allocate_Memory(arg->len_usr_to_drv); + if (!in_ecb) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for uncore ecb!"); + return OS_NO_MEM; + } + if (copy_from_user(in_ecb, (void __user *)arg->buf_usr_to_drv, arg->len_usr_to_drv)) { + CONTROL_Free_Memory(in_ecb); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory copy failure for uncore ecb data!"); + return OS_FAULT; + } + group_id = ECB_group_id(in_ecb); + + if (group_id >= EVENT_CONFIG_num_groups_unc(ec_unc)) { + CONTROL_Free_Memory(in_ecb); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Group_id is larger than total number of groups!"); + return OS_INVALID; + } + + PMU_register_data_unc[group_id] = in_ecb; + // at this point, we know the number of uncore events for this device, + // so allocate the results buffer per thread for uncore only for SEP event based uncore counting + ecb = PMU_register_data_unc[group_id]; + if (ecb == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Encountered NULL ECB!"); + return OS_INVALID; + } + LWPMU_DEVICE_num_events(&devices[cur_device]) = ECB_num_events(ecb); + LWPMU_DEVICE_em_groups_count(&devices[cur_device]) = group_id + 1; + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Set_Sample_Descriptors(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Set the number of descriptor groups in the global state node. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Set_Sample_Descriptors(IOCTL_ARGS arg) +{ + SEP_DRV_LOG_FLOW_IN(""); + + if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Skipped: driver state is not IDLE!"); + return OS_IN_PROGRESS; + } + if (arg->len_usr_to_drv != sizeof(U32) || arg->buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (Unknown size of Sample Descriptors!)."); + return OS_INVALID; + } + + desc_count = 0; + if (copy_from_user(&GLOBAL_STATE_num_descriptors(driver_state), + (void __user *)arg->buf_usr_to_drv, sizeof(U32))) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure"); + return OS_FAULT; + } + + desc_data = CONTROL_Allocate_Memory( + (size_t)GLOBAL_STATE_num_descriptors(driver_state) * + sizeof(VOID *)); + if (desc_data == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for desc_data!"); + return OS_NO_MEM; + } + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Configure_Descriptors(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * @return OS_STATUS + * + * @brief Make a copy of the descriptors that need to be read in order + * @brief to configure a sample record. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Configure_Descriptors(IOCTL_ARGS arg) +{ + U32 uncopied; + + SEP_DRV_LOG_FLOW_IN(""); + + if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Skipped: driver state is not IDLE!"); + return OS_IN_PROGRESS; + } + + if (desc_count >= GLOBAL_STATE_num_descriptors(driver_state)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Descriptor groups number exceeded initial configuration!"); + return OS_INVALID; + } + + if (arg->len_usr_to_drv == 0 || arg->buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arg value!"); + return OS_INVALID; + } + if (desc_data == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("NULL desc_data!"); + return OS_INVALID; + } + // + // First things first: Make a copy of the data for global use. + // + desc_data[desc_count] = CONTROL_Allocate_Memory(arg->len_usr_to_drv); + uncopied = copy_from_user(desc_data[desc_count], (void __user *)arg->buf_usr_to_drv, + arg->len_usr_to_drv); + if (uncopied > 0) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Unable to copy desc_data from user!"); + return OS_NO_MEM; + } + SEP_DRV_LOG_TRACE("Added descriptor # %d.", desc_count); + desc_count++; + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_LBR_Info(IOCTL_ARGS arg) + * + * + * @param arg - pointer to the IOCTL_ARGS structure + * @return OS_STATUS + * + * @brief Make a copy of the LBR information that is passed in. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_LBR_Info(IOCTL_ARGS arg) +{ + SEP_DRV_LOG_FLOW_IN(""); + + if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Skipped: driver state is not IDLE!"); + return OS_IN_PROGRESS; + } + + if (cur_pcfg == NULL || DEV_CONFIG_collect_lbrs(cur_pcfg) == FALSE) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "LBR capture has not been configured!"); + return OS_INVALID; + } + + if (arg->len_usr_to_drv == 0 || arg->buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments!"); + return OS_INVALID; + } + + // + // First things first: Make a copy of the data for global use. + // + + LWPMU_DEVICE_lbr(&devices[cur_device]) = + CONTROL_Allocate_Memory((int)arg->len_usr_to_drv); + if (!LWPMU_DEVICE_lbr(&devices[cur_device])) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Error: Memory allocation failure for lbr!"); + return OS_NO_MEM; + } + + if (copy_from_user(LWPMU_DEVICE_lbr(&devices[cur_device]), + (void __user *)arg->buf_usr_to_drv, arg->len_usr_to_drv)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory copy failure for lbr struct!"); + return OS_FAULT; + } + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +#if !defined(DRV_SEP_ACRN_ON) +#define CR4_PCE 0x00000100 //Performance-monitoring counter enable RDPMC +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void lwpmudrv_Set_CR4_PCE_Bit(PVOID param) + * + * @param param - dummy parameter + * + * @return NONE + * + * @brief Set CR4's PCE bit on the logical processor + * + * Special Notes + */ +static VOID lwpmudrv_Set_CR4_PCE_Bit(PVOID param) +{ + U32 this_cpu; +#if defined(DRV_IA32) + U32 prev_CR4_value = 0; + + SEP_DRV_LOG_TRACE_IN(""); + + // remember if RDPMC bit previously set + // and then enabled it + __asm__("movl %%cr4, %%eax\n\t" + "movl %%eax, %0\n\t" + "orl %1, %%eax\n\t" + "movl %%eax, %%cr4\n\t" + : "=irg"(prev_CR4_value) + : "irg"(CR4_PCE) + : "eax"); +#endif +#if defined(DRV_EM64T) + U64 prev_CR4_value = 0; + + SEP_DRV_LOG_TRACE_IN(""); + + // remember if RDPMC bit previously set + // and then enabled it + __asm__("movq %%cr4, %%rax\n\t" + "movq %%rax, %0\n\t" + "orq %1, %%rax\n\t" + "movq %%rax, %%cr4" + : "=irg"(prev_CR4_value) + : "irg"(CR4_PCE) + : "rax"); +#endif + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + + // if bit RDPMC bit was set before, + // set flag for when we clear it + if (prev_CR4_value & CR4_PCE) { + prev_set_CR4[this_cpu] = 1; + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void lwpmudrv_Clear_CR4_PCE_Bit(PVOID param) + * + * @param param - dummy parameter + * + * @return NONE + * + * @brief ClearSet CR4's PCE bit on the logical processor + * + * Special Notes + */ +static VOID lwpmudrv_Clear_CR4_PCE_Bit(PVOID param) +{ + U32 this_cpu; + + SEP_DRV_LOG_TRACE_IN(""); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + + // only clear the CR4 bit if it wasn't set + // before we started + if (prev_set_CR4 && !prev_set_CR4[this_cpu]) { +#if defined(DRV_IA32) + __asm__("movl %%cr4, %%eax\n\t" + "andl %0, %%eax\n\t" + "movl %%eax, %%cr4\n" + : + : "irg"(~CR4_PCE) + : "eax"); +#endif +#if defined(DRV_EM64T) + __asm__("movq %%cr4, %%rax\n\t" + "andq %0, %%rax\n\t" + "movq %%rax, %%cr4\n" + : + : "irg"(~CR4_PCE) + : "rax"); +#endif + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +#endif + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Start(void) + * + * @param none + * + * @return OS_STATUS + * + * @brief Local function that handles the LWPMU_IOCTL_START call. + * @brief Set up the OS hooks for process/thread/load notifications. + * @brief Write the initial set of MSRs. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Start(void) +{ + OS_STATUS status = OS_SUCCESS; +#if !defined(CONFIG_PREEMPT_COUNT) && !defined(DRV_SEP_ACRN_ON) + U32 cpu_num; +#endif +#if defined(DRV_SEP_ACRN_ON) + struct profiling_control *control = NULL; +#endif + + SEP_DRV_LOG_FLOW_IN(""); + + if (!CHANGE_DRIVER_STATE(STATE_BIT_IDLE, DRV_STATE_RUNNING)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Skipped: driver state is not IDLE!"); + return OS_IN_PROGRESS; + } + + if (drv_cfg == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("NULL drv_cfg!"); + return OS_INVALID; + } + + if (DRV_CONFIG_use_pcl(drv_cfg) == TRUE) { + if (DRV_CONFIG_start_paused(drv_cfg)) { + CHANGE_DRIVER_STATE(STATE_BIT_RUNNING, + DRV_STATE_PAUSED); + } + SEP_DRV_LOG_FLOW_OUT("[PCL enabled] Early return value: %d", + status); + return status; + } + +#if !defined(DRV_SEP_ACRN_ON) + prev_set_CR4 = CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U8)); + CONTROL_Invoke_Parallel(lwpmudrv_Set_CR4_PCE_Bit, (PVOID)(size_t)0); +#endif + +#if !defined(CONFIG_PREEMPT_COUNT) && !defined(DRV_SEP_ACRN_ON) + atomic_set(&read_now, GLOBAL_STATE_num_cpus(driver_state)); + init_waitqueue_head(&read_tsc_now); + CONTROL_Invoke_Parallel(lwpmudrv_Fill_TSC_Info, (PVOID)(size_t)0); +#endif + +#if !defined(CONFIG_PREEMPT_COUNT) && !defined(DRV_SEP_ACRN_ON) + for (cpu_num = 0; cpu_num < GLOBAL_STATE_num_cpus(driver_state); + cpu_num++) { + if (CPU_STATE_offlined(&pcb[cpu_num])) { + cpu_tsc[cpu_num] = cpu_tsc[0]; + } + } +#else + UTILITY_Read_TSC(&cpu_tsc[0]); +#endif + + if (DRV_CONFIG_start_paused(drv_cfg)) { + CHANGE_DRIVER_STATE(STATE_BIT_RUNNING, DRV_STATE_PAUSED); + } else { +#if !defined(DRV_SEP_ACRN_ON) + CONTROL_Invoke_Parallel(lwpmudrv_Resume_Op, NULL); +#else + control = (struct profiling_control *)CONTROL_Allocate_Memory( + sizeof(struct profiling_control)); + if (control == NULL) { + SEP_PRINT_ERROR( + "lwpmudrv_Start: Unable to allocate memory\n"); + return OS_NO_MEM; + } + memset(control, 0, sizeof(struct profiling_control)); + + BUG_ON(!virt_addr_valid(control)); + control->collector_id = COLLECTOR_SEP; + + acrn_hypercall2(HC_PROFILING_OPS, PROFILING_GET_CONTROL_SWITCH, + virt_to_phys(control)); + + SEP_DRV_LOG_TRACE("ACRN profiling collection running 0x%llx\n", + control->switches); + + if (DRV_CONFIG_counting_mode(drv_cfg) == FALSE) { + control->switches |= (1 << CORE_PMU_SAMPLING); + if (DEV_CONFIG_collect_lbrs(cur_pcfg)) { + control->switches |= (1 << LBR_PMU_SAMPLING); + } + } else { + control->switches |= (1 << CORE_PMU_COUNTING); + } + + acrn_hypercall2(HC_PROFILING_OPS, PROFILING_SET_CONTROL_SWITCH, + virt_to_phys(control)); + control = CONTROL_Free_Memory(control); + + lwpmudrv_ACRN_Flush_Start_Timer(); +#endif + +#if defined(BUILD_CHIPSET) + if (DRV_CONFIG_enable_chipset(drv_cfg) && cs_dispatch != NULL && + cs_dispatch->start_chipset != NULL) { + cs_dispatch->start_chipset(); + } +#endif + + EVENTMUX_Start(); + lwpmudrv_Dump_Tracer("start", 0); + +#if defined(BUILD_GFX) + SEP_DRV_LOG_TRACE("Enable_gfx=%d.", + (int)DRV_CONFIG_enable_gfx(drv_cfg)); + if (DRV_CONFIG_enable_gfx(drv_cfg)) { + GFX_Start(); + } +#endif + if (unc_buf_init) { + lwpmudrv_Uncore_Start_Timer(); + } + } + + SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Cleanup_Op(void) + * + * @param - none + * + * @return OS_STATUS + * + * @brief Clean up registers after collection + * + * Special Notes + */ +static VOID lwpmudrv_Cleanup_Op(PVOID param) +{ + U32 this_cpu; + U32 dev_idx; + DISPATCH dispatch; + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + dev_idx = core_to_dev_map[this_cpu]; + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + + SEP_DRV_LOG_TRACE_IN(""); + + if (dispatch != NULL && dispatch->cleanup != NULL) { + dispatch->cleanup(&dev_idx); + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* + * @fn lwpmudrv_Prepare_Stop(); + * + * @param NONE + * @return OS_STATUS + * + * @brief Local function that handles the DRV_OPERATION_STOP call. + * @brief Cleans up the interrupt handler. + */ +static OS_STATUS lwpmudrv_Prepare_Stop(void) +{ + S32 i; + S32 done = FALSE; + S32 cpu_num; +#if defined(DRV_SEP_ACRN_ON) + struct profiling_control *control = NULL; +#endif + + SEP_DRV_LOG_FLOW_IN(""); + + if (GET_DRIVER_STATE() != DRV_STATE_TERMINATING) { + if (!CHANGE_DRIVER_STATE(STATE_BIT_RUNNING | STATE_BIT_PAUSED, + DRV_STATE_PREPARE_STOP)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Unexpected driver state."); + return OS_INVALID; + } + } else { + SEP_DRV_LOG_WARNING("Abnormal termination path."); + } + + if (drv_cfg == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("drv_cfg is NULL!"); + return OS_INVALID; + } + + if (DRV_CONFIG_use_pcl(drv_cfg) == TRUE) { + SEP_DRV_LOG_FLOW_OUT("Success: using PCL"); + return OS_SUCCESS; + } + + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + CPU_STATE_accept_interrupt(&pcb[i]) = 0; + } + while (!done) { + done = TRUE; + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + if (atomic_read(&CPU_STATE_in_interrupt(&pcb[i]))) { + done = FALSE; + } + } + } +#if !defined(DRV_SEP_ACRN_ON) + CONTROL_Invoke_Parallel(lwpmudrv_Pause_Op, NULL); +#else + + control = (struct profiling_control *)CONTROL_Allocate_Memory( + sizeof(struct profiling_control)); + if (control == NULL) { + SEP_PRINT_ERROR("lwpmudrv_Start: Unable to allocate memory\n"); + return OS_NO_MEM; + } + memset(control, 0, sizeof(struct profiling_control)); + + BUG_ON(!virt_addr_valid(control)); + control->collector_id = COLLECTOR_SEP; + + acrn_hypercall2(HC_PROFILING_OPS, PROFILING_GET_CONTROL_SWITCH, + virt_to_phys(control)); + + SEP_DRV_LOG_TRACE("ACRN profiling collection running 0x%llx\n", + control->switches); + + if (DRV_CONFIG_counting_mode(drv_cfg) == FALSE) { + control->switches &= + ~(1 << CORE_PMU_SAMPLING); + } else { + control->switches &= ~(1 << CORE_PMU_COUNTING); + } + + acrn_hypercall2(HC_PROFILING_OPS, PROFILING_SET_CONTROL_SWITCH, + virt_to_phys(control)); + control = CONTROL_Free_Memory(control); + + lwpmudrv_ACRN_Flush_Stop_Timer(); + SEP_DRV_LOG_TRACE("Calling final PMI_Buffer_Handler\n"); + + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + PMI_Buffer_Handler(&i); + } +#endif + + SEP_DRV_LOG_TRACE("Outside of all interrupts."); + +#if defined(BUILD_CHIPSET) + if (DRV_CONFIG_enable_chipset(drv_cfg) && cs_dispatch != NULL && + cs_dispatch->stop_chipset != NULL) { + cs_dispatch->stop_chipset(); + } +#endif + +#if defined(BUILD_GFX) + SEP_DRV_LOG_TRACE("Enable_gfx=%d.", + (int)DRV_CONFIG_enable_gfx(drv_cfg)); + if (DRV_CONFIG_enable_gfx(drv_cfg)) { + GFX_Stop(); + } +#endif + + if (unc_buf_init) { + lwpmudrv_Uncore_Stop_Timer(); + } + + if (drv_cfg == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("drv_cfg is NULL!"); + return OS_INVALID; + } + + /* + * Clean up all the control registers + */ +#if !defined(DRV_SEP_ACRN_ON) + CONTROL_Invoke_Parallel(lwpmudrv_Cleanup_Op, (VOID *)NULL); +#else + lwpmudrv_Cleanup_Op(NULL); +#endif + + SEP_DRV_LOG_TRACE("Cleanup finished."); + lwpmudrv_Free_Restore_Buffer(); + +#if !defined(DRV_SEP_ACRN_ON) + if (prev_set_CR4) { + CONTROL_Invoke_Parallel(lwpmudrv_Clear_CR4_PCE_Bit, + (VOID *)(size_t)0); + prev_set_CR4 = CONTROL_Free_Memory(prev_set_CR4); + } +#endif + +#if defined(BUILD_CHIPSET) + if (DRV_CONFIG_enable_chipset(drv_cfg) && cs_dispatch && + cs_dispatch->fini_chipset) { + cs_dispatch->fini_chipset(); + } +#endif + + for (cpu_num = 0; cpu_num < GLOBAL_STATE_num_cpus(driver_state); + cpu_num++) { + SEP_DRV_LOG_TRACE( + "# of PMU interrupts via NMI triggered on cpu%d: %u.", + cpu_num, CPU_STATE_nmi_handled(&pcb[cpu_num])); + } + + SEP_DRV_LOG_FLOW_OUT("Success."); + return OS_SUCCESS; +} + +/* + * @fn lwpmudrv_Finish_Stop(); + * + * @param NONE + * @return OS_STATUS + * + * @brief Local function that handles the DRV_OPERATION_STOP call. + * @brief Cleans up the interrupt handler. + */ +static OS_STATUS lwpmudrv_Finish_Stop(void) +{ + OS_STATUS status = OS_SUCCESS; + S32 idx, cpu; + + SEP_DRV_LOG_FLOW_IN(""); + + if (GET_DRIVER_STATE() != DRV_STATE_TERMINATING) { + if (!CHANGE_DRIVER_STATE(STATE_BIT_PREPARE_STOP, + DRV_STATE_STOPPED)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Unexpected driver state!"); + return OS_FAULT; + } + } else { + SEP_DRV_LOG_WARNING("Abnormal termination path."); + } + + if (drv_cfg == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("drv_cfg is NULL!"); + return OS_INVALID; + } + + if (DRV_CONFIG_counting_mode(drv_cfg) == FALSE) { + if (GET_DRIVER_STATE() != DRV_STATE_TERMINATING) { +#if !defined(DRV_SEP_ACRN_ON) + CONTROL_Invoke_Parallel(PEBS_Flush_Buffer, NULL); +#endif + /* + * Make sure that the module buffers are not deallocated and that the module flush + * thread has not been terminated. + */ + if (GET_DRIVER_STATE() != DRV_STATE_TERMINATING) { + status = LINUXOS_Enum_Process_Modules(TRUE); + } + OUTPUT_Flush(); + } + /* + * Clean up the interrupt handler via the IDT + */ +#if !defined(DRV_SEP_ACRN_ON) + CPUMON_Remove_Cpuhooks(); + PEBS_Destroy(); +#else + for (cpu = 0; cpu < GLOBAL_STATE_num_cpus(driver_state); + cpu++) { + sbuf_share_setup(cpu, ACRN_SEP, NULL); + sbuf_free(samp_buf_per_cpu[cpu]); + } + samp_buf_per_cpu = CONTROL_Free_Memory(samp_buf_per_cpu); +#endif + EVENTMUX_Destroy(); + } + + if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { + if (interrupt_counts) { + for (cpu = 0; cpu < GLOBAL_STATE_num_cpus(driver_state); + cpu++) { + for (idx = 0; + idx < DRV_CONFIG_num_events(drv_cfg); + idx++) { + SEP_DRV_LOG_TRACE( + "Interrupt count: CPU %d, event %d = %lld.", + cpu, idx, + interrupt_counts + [cpu * DRV_CONFIG_num_events( + drv_cfg) + + idx]); + } + } + } + } + + read_counter_info = CONTROL_Free_Memory(read_counter_info); + prev_counter_data = CONTROL_Free_Memory(prev_counter_data); + emon_buffer_driver_helper = + CONTROL_Free_Memory(emon_buffer_driver_helper); + lwpmudrv_Dump_Tracer("stop", 0); + + SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Get_Normalized_TSC(IOCTL_ARGS arg) + * + * @param arg - Pointer to the IOCTL structure + * + * @return OS_STATUS + * + * @brief Return the current value of the normalized TSC. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Get_Normalized_TSC(IOCTL_ARGS arg) +{ + U64 tsc = 0; + U64 this_cpu = 0; + size_t size_to_copy = sizeof(U64); + + SEP_DRV_LOG_TRACE_IN(""); + + if (arg->len_drv_to_usr != size_to_copy || + arg->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Invalid arguments!"); + return OS_INVALID; + } + + preempt_disable(); + UTILITY_Read_TSC(&tsc); + this_cpu = CONTROL_THIS_CPU(); + tsc -= TSC_SKEW(CONTROL_THIS_CPU()); + preempt_enable(); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) + if (drv_cfg && DRV_CONFIG_use_pcl(drv_cfg) == TRUE) { + preempt_disable(); + tsc = cpu_clock(this_cpu); + preempt_enable(); + } else { +#endif + tsc -= TSC_SKEW(this_cpu); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) + } +#endif + if (copy_to_user((void __user *)arg->buf_drv_to_usr, (VOID *)&tsc, size_to_copy)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Memory copy failure!"); + return OS_FAULT; + } + lwpmudrv_Dump_Tracer("marker", tsc); + + SEP_DRV_LOG_TRACE_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Get_Num_Cores(IOCTL_ARGS arg) + * + * @param arg - Pointer to the IOCTL structure + * + * @return OS_STATUS + * + * @brief Quickly return the (total) number of cpus in the system. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Get_Num_Cores(IOCTL_ARGS arg) +{ + OS_STATUS status = OS_SUCCESS; + S32 num = GLOBAL_STATE_num_cpus(driver_state); + + SEP_DRV_LOG_FLOW_IN(""); + + if (arg->len_drv_to_usr != sizeof(S32) || arg->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Error: Invalid arguments."); + return OS_INVALID; + } + + SEP_DRV_LOG_TRACE("Num_Cores is %d, buf_usr_to_drv is 0x%p.", num, + arg->buf_drv_to_usr); + status = put_user(num, (S32 __user*)arg->buf_drv_to_usr); + + SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Set_CPU_Mask(PVOID buf_usr_to_drv, U32 len_usr_to_drv) + * + * @param buf_usr_to_drv - pointer to the CPU mask buffer + * @param len_usr_to_drv - size of the CPU mask buffer + * + * @return OS_STATUS + * + * @brief process the CPU mask as requested by the user + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Set_CPU_Mask(PVOID buf_usr_to_drv, + size_t len_usr_to_drv) +{ + U32 cpu_count = 0; + + SEP_DRV_LOG_FLOW_IN(""); + + if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Skipped: driver state is not IDLE!"); + return OS_IN_PROGRESS; + } + + if (len_usr_to_drv == 0 || buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "len_usr_to_drv == 0 or buf_usr_to_drv is NULL!"); + return OS_INVALID; + } + + cpu_mask_bits = CONTROL_Allocate_Memory((int)len_usr_to_drv); + if (!cpu_mask_bits) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for cpu_mask_bits!"); + return OS_NO_MEM; + } + + if (copy_from_user(cpu_mask_bits, (void __user *)buf_usr_to_drv, + (int)len_usr_to_drv)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); + return OS_FAULT; + } + + for (cpu_count = 0; + cpu_count < (U32)GLOBAL_STATE_num_cpus(driver_state); + cpu_count++) { + CPU_STATE_accept_interrupt(&pcb[cpu_count]) = + cpu_mask_bits[cpu_count] ? 1 : 0; + CPU_STATE_initial_mask(&pcb[cpu_count]) = + cpu_mask_bits[cpu_count] ? 1 : 0; + } + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Get_KERNEL_CS(IOCTL_ARGS arg) + * + * @param arg - Pointer to the IOCTL structure + * + * @return OS_STATUS + * + * @brief Return the value of the Kernel symbol KERNEL_CS. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Get_KERNEL_CS(IOCTL_ARGS arg) +{ + OS_STATUS status = OS_SUCCESS; + S32 num = __KERNEL_CS; + + SEP_DRV_LOG_FLOW_IN(""); + + if (arg->len_drv_to_usr != sizeof(S32) || arg->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Error: Invalid arguments."); + return OS_INVALID; + } + + SEP_DRV_LOG_TRACE("__KERNEL_CS is %d, buf_usr_to_drv is 0x%p.", num, + arg->buf_drv_to_usr); + status = put_user(num, (S32 __user *)arg->buf_drv_to_usr); + + SEP_DRV_LOG_FLOW_OUT("Return value: %d.", status); + return status; +} + +/* + * @fn lwpmudrv_Set_UID + * + * @param IN arg - pointer to the output buffer + * @return OS_STATUS + * + * @brief Receive the value of the UID of the collector process. + */ +static OS_STATUS lwpmudrv_Set_UID(IOCTL_ARGS arg) +{ + OS_STATUS status = OS_SUCCESS; + + SEP_DRV_LOG_FLOW_IN(""); + + if (arg->len_usr_to_drv != sizeof(uid_t) || + arg->buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Error: Invalid arguments."); + return OS_INVALID; + } + + if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Skipped: driver state is not IDLE!"); + return OS_IN_PROGRESS; + } + + status = get_user(uid, (S32 __user *)arg->buf_usr_to_drv); + SEP_DRV_LOG_TRACE("Uid is %d.", uid); + + SEP_DRV_LOG_FLOW_OUT("Return value: %d.", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Get_TSC_Skew_Info(IOCTL_ARGS arg) + * + * @param arg - Pointer to the IOCTL structure + * + * @return OS_STATUS + * @brief Return the current value of the TSC skew data + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Get_TSC_Skew_Info(IOCTL_ARGS arg) +{ + S64 *skew_array; + size_t skew_array_len; + S32 i; + + SEP_DRV_LOG_FLOW_IN(""); + + skew_array_len = GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64); + if (arg->len_drv_to_usr < skew_array_len || + arg->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Input buffer too small or NULL!"); + return OS_INVALID; + } + + if (!DRV_CONFIG_enable_cp_mode(drv_cfg) && + GET_DRIVER_STATE() != DRV_STATE_STOPPED) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Skipped: cp_mode not enabled and driver is not STOPPED!"); + return OS_IN_PROGRESS; + } + + SEP_DRV_LOG_TRACE("Dispatched with len_drv_to_usr=%lld.", + arg->len_drv_to_usr); + + skew_array = CONTROL_Allocate_Memory(skew_array_len); + if (skew_array == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for skew_array!"); + return OS_NO_MEM; + } + + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + skew_array[i] = TSC_SKEW(i); + } + + if (copy_to_user((void __user *)arg->buf_drv_to_usr, skew_array, skew_array_len)) { + skew_array = CONTROL_Free_Memory(skew_array); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory copy failure for skew_array!"); + return OS_FAULT; + } + + skew_array = CONTROL_Free_Memory(skew_array); + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Collect_Sys_Config(IOCTL_ARGS arg) + * + * @param arg - Pointer to the IOCTL structure + * + * @return OS_STATUS + * + * @brief Local function that handles the COLLECT_SYS_CONFIG call. + * @brief Builds and collects the SYS_INFO data needed. + * @brief Writes the result into the argument. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Collect_Sys_Config(IOCTL_ARGS arg) +{ + OS_STATUS status = OS_SUCCESS; + U32 num; + + SEP_DRV_LOG_FLOW_IN(""); + + num = SYS_INFO_Build(); + + if (arg->len_drv_to_usr < sizeof(S32) || arg->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Error: Invalid arguments."); + return OS_INVALID; + } + + SEP_DRV_LOG_TRACE("Size of sys info is %d.", num); + status = put_user(num, (S32 __user *)arg->buf_drv_to_usr); + + SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Sys_Config(IOCTL_ARGS arg) + * + * @param arg - Pointer to the IOCTL structure + * + * @return OS_STATUS + * + * @brief Return the current value of the normalized TSC. + * + * @brief Transfers the VTSA_SYS_INFO data back to the abstraction layer. + * @brief The buf_usr_to_drv should have enough space to handle the transfer. + */ +static OS_STATUS lwpmudrv_Sys_Config(IOCTL_ARGS arg) +{ + SEP_DRV_LOG_FLOW_IN(""); + + if (arg->len_drv_to_usr == 0 || arg->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Error: Invalid arguments."); + return OS_INVALID; + } + + SYS_INFO_Transfer(arg->buf_drv_to_usr, arg->len_drv_to_usr); + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Samp_Read_Num_Of_Core_Counters(IOCTL_ARGS arg) + * + * @param arg - Pointer to the IOCTL structure + * + * @return OS_STATUS + * + * @brief Read memory mapped i/o physical location + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Samp_Read_Num_Of_Core_Counters(IOCTL_ARGS arg) +{ + U64 rax, rbx, rcx, rdx, num_basic_functions; + U32 val = 0; + OS_STATUS status = OS_SUCCESS; + + SEP_DRV_LOG_FLOW_IN(""); + + if (arg->len_drv_to_usr == 0 || arg->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Error: Invalid arguments."); + return OS_INVALID; + } + + UTILITY_Read_Cpuid(0x0, &num_basic_functions, &rbx, &rcx, &rdx); + + if (num_basic_functions >= 0xA) { + UTILITY_Read_Cpuid(0xA, &rax, &rbx, &rcx, &rdx); + val = ((U32)(rax >> 8)) & 0xFF; + } + status = put_user(val, (U32 __user *)arg->buf_drv_to_usr); + SEP_DRV_LOG_TRACE("Num of counter is %d.", val); + + SEP_DRV_LOG_FLOW_OUT("Return value: %d.", status); + return status; +} + +#if defined(BUILD_CHIPSET) + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static DRV_BOOL lwpmudrv_Is_Physical_Address_Free(U32 physical_addrss) + * + * @param physical_address - physical address + * + * @return DRV_BOOL + * + * @brief Check if physical address is available + * + * Special Notes + */ +static DRV_BOOL lwpmudrv_Is_Physical_Address_Free(U32 physical_address) +{ + U32 new_value; + U32 test_value = 0; + U32 value = 0; + + SEP_DRV_LOG_TRACE_IN(""); + + if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { + SEP_DRV_LOG_WARNING_TRACE_OUT( + "FALSE: driver state is not IDLE!"); + return FALSE; + } + if (physical_address == 0) { + SEP_DRV_LOG_WARNING_TRACE_OUT("FALSE: is NULL!"); + return FALSE; + } + + // First attempt read + // + PCI_Read_From_Memory_Address(physical_address, &value); + + // Value must be 0xFFFFFFFFF or there is NO chance + // that this memory location is available. + // + if (value != 0xFFFFFFFF) { + SEP_DRV_LOG_TRACE_OUT("FALSE: value is not 0xFFFFFFFF!"); + return FALSE; + } + + // + // Try to write a bit to a zero (this probably + // isn't too safe, but this is just for testing) + // + new_value = 0xFFFFFFFE; + PCI_Write_To_Memory_Address(physical_address, new_value); + PCI_Read_From_Memory_Address(physical_address, &test_value); + + // Write back original + PCI_Write_To_Memory_Address(physical_address, value); + + if (new_value == test_value) { + // The write appeared to change the + // memory, it must be mapped already + // + SEP_DRV_LOG_TRACE_OUT("FALSE: appears to be mapped already!"); + return FALSE; + } + + if (test_value == 0xFFFFFFFF) { + // The write did not change the bit, so + // apparently, this memory must not be mapped + // to anything. + // + SEP_DRV_LOG_TRACE_OUT("TRUE: appears not to be mapped!"); + return TRUE; + } + + SEP_DRV_LOG_TRACE_OUT("FALSE: Odd case!"); + return FALSE; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Samp_Find_Physical_Address(IOCTL_ARGS arg) + * + * @param arg - Pointer to the IOCTL structure + * + * @return OS_STATUS + * + * @brief Find a free physical address + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Samp_Find_Physical_Address(IOCTL_ARGS arg) +{ + CHIPSET_PCI_SEARCH_ADDR_NODE user_addr; + CHIPSET_PCI_SEARCH_ADDR search_addr; + U32 addr; + + SEP_DRV_LOG_FLOW_IN(""); + + search_addr = (CHIPSET_PCI_SEARCH_ADDR)arg->buf_usr_to_drv; + + if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Skipped: driver state is not IDLE."); + return OS_IN_PROGRESS; + } + + if (arg->len_drv_to_usr == 0 || arg->buf_drv_to_usr == NULL || + arg->len_usr_to_drv == 0 || arg->buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments!"); + return OS_INVALID; + } + + if (!search_addr) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Search_addr is NULL!"); + return OS_FAULT; + } + + if (!access_ok(VERIFY_WRITE, (void __user *)search_addr, + sizeof(CHIPSET_PCI_SEARCH_ADDR_NODE))) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Access not OK!"); + return OS_FAULT; + } + + if (copy_from_user(&user_addr, (void __user *)search_addr, + sizeof(CHIPSET_PCI_SEARCH_ADDR_NODE))) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Error: Memory copy failure for search_addr!"); + return OS_FAULT; + } + + if (CHIPSET_PCI_SEARCH_ADDR_start(&user_addr) > + CHIPSET_PCI_SEARCH_ADDR_stop(&user_addr)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "SEARCH_ADDR_start > SEARCH_ADDR_stop!"); + return OS_INVALID; + } + + CHIPSET_PCI_SEARCH_ADDR_address(&user_addr) = 0; + + for (addr = CHIPSET_PCI_SEARCH_ADDR_start(&user_addr); + addr <= CHIPSET_PCI_SEARCH_ADDR_stop(&user_addr); + addr += CHIPSET_PCI_SEARCH_ADDR_increment(&user_addr)) { + SEP_DRV_LOG_TRACE("Addr=%x:", addr); + if (lwpmudrv_Is_Physical_Address_Free(addr)) { + CHIPSET_PCI_SEARCH_ADDR_address(&user_addr) = addr; + break; + } + } + + if (copy_to_user((void __user *)arg->buf_drv_to_usr, (VOID *)&user_addr, + sizeof(CHIPSET_PCI_SEARCH_ADDR_NODE))) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Error: Memory copy failure for user_addr!"); + return OS_FAULT; + } + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Samp_Read_PCI_Config(IOCTL_ARGS arg) + * + * @param arg - Pointer to the IOCTL structure + * + * @return OS_STATUS + * + * @brief Read the PCI Configuration Space + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Samp_Read_PCI_Config(IOCTL_ARGS arg) +{ + CHIPSET_PCI_CONFIG rd_pci = NULL; + + SEP_DRV_LOG_FLOW_IN(""); + + if (arg->len_drv_to_usr == 0 || arg->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments!"); + return OS_FAULT; + } + + rd_pci = CONTROL_Allocate_Memory(arg->len_drv_to_usr); + if (rd_pci == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for rd_pci!"); + return OS_NO_MEM; + } + + if (copy_from_user(rd_pci, (void __user *)arg->buf_usr_to_drv, + sizeof(CHIPSET_PCI_CONFIG_NODE))) { + CONTROL_Free_Memory(rd_pci); + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure for rd_pci!"); + return OS_FAULT; + } + + CHIPSET_PCI_CONFIG_value(rd_pci) = + PCI_Read_U32(CHIPSET_PCI_CONFIG_bus(rd_pci), + CHIPSET_PCI_CONFIG_device(rd_pci), + CHIPSET_PCI_CONFIG_function(rd_pci), + CHIPSET_PCI_CONFIG_offset(rd_pci)); + + if (copy_to_user((void __user *)arg->buf_drv_to_usr, (VOID *)rd_pci, + sizeof(CHIPSET_PCI_CONFIG_NODE))) { + CONTROL_Free_Memory(rd_pci); + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure for rd_pci!"); + return OS_FAULT; + } + + SEP_DRV_LOG_TRACE("Value at this PCI address:0x%x.", + CHIPSET_PCI_CONFIG_value(rd_pci)); + + CONTROL_Free_Memory(rd_pci); + + SEP_DRV_LOG_FLOW_OUT("Success."); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Samp_Write_PCI_Config(IOCTL_ARGS arg) + * + * @param arg - Pointer to the IOCTL structure + * + * @return OS_STATUS + * + * @brief Write to the PCI Configuration Space + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Samp_Write_PCI_Config(IOCTL_ARGS arg) +{ + CHIPSET_PCI_CONFIG wr_pci = NULL; + + SEP_DRV_LOG_FLOW_IN(""); + + // the following allows "sep -el -pc" to work, since the command must access the + // the driver ioctls before driver is used for a collection + + if (!DRIVER_STATE_IN(GET_DRIVER_STATE(), + STATE_BIT_UNINITIALIZED | STATE_BIT_IDLE)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Skipped: Driver state is not IDLE or UNINITIALIZED!"); + return OS_IN_PROGRESS; + } + + if (arg->len_usr_to_drv == 0 || arg->buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments!"); + return OS_INVALID; + } + + wr_pci = CONTROL_Allocate_Memory(arg->len_usr_to_drv); + if (wr_pci == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for wr_pci!"); + return OS_NO_MEM; + } + if (copy_from_user(wr_pci, (void __user *)arg->buf_usr_to_drv, + sizeof(CHIPSET_PCI_CONFIG_NODE))) { + CONTROL_Free_Memory(wr_pci); + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure for wr_pci!"); + return OS_FAULT; + } + + PCI_Write_U32(CHIPSET_PCI_CONFIG_bus(wr_pci), + CHIPSET_PCI_CONFIG_device(wr_pci), + CHIPSET_PCI_CONFIG_function(wr_pci), + CHIPSET_PCI_CONFIG_offset(wr_pci), + CHIPSET_PCI_CONFIG_value(wr_pci)); + + CONTROL_Free_Memory(wr_pci); + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Samp_Chipset_Init(IOCTL_ARGS arg) + * + * @param arg - Pointer to the IOCTL structure + * + * @return OS_STATUS + * + * @brief Initialize the chipset cnfiguration + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Samp_Chipset_Init(IOCTL_ARGS arg) +{ + PVOID buf_usr_to_drv; + U32 len_usr_to_drv; + + SEP_DRV_LOG_FLOW_IN(""); + + buf_usr_to_drv = arg->buf_usr_to_drv; + len_usr_to_drv = arg->len_usr_to_drv; + + if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Skipped: driver state is not IDLE!"); + return OS_IN_PROGRESS; + } + + if (buf_usr_to_drv == NULL || len_usr_to_drv == 0) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Error: Invalid arguments!"); + return OS_INVALID; + } + + // First things first: Make a copy of the data for global use. + pma = CONTROL_Allocate_Memory(len_usr_to_drv); + + if (pma == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for pma!"); + return OS_NO_MEM; + } + + if (copy_from_user(pma, (void __user *)buf_usr_to_drv, len_usr_to_drv)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure for pma!"); + return OS_FAULT; + } + +#if defined(MY_DEBUG) + + SEP_DRV_LOG_TRACE("Chipset Configuration follows..."); + SEP_DRV_LOG_TRACE("pma->length=%d.", CHIPSET_CONFIG_length(pma)); + SEP_DRV_LOG_TRACE("pma->version=%d.", + CHIPSET_CONFIG_major_version(pma)); + SEP_DRV_LOG_TRACE("pma->processor=%d.", CHIPSET_CONFIG_processor(pma)); + SEP_DRV_LOG_TRACE("pma->mch_chipset=%d.", + CHIPSET_CONFIG_mch_chipset(pma)); + SEP_DRV_LOG_TRACE("pma->ich_chipset=%d.", + CHIPSET_CONFIG_ich_chipset(pma)); + SEP_DRV_LOG_TRACE("pma->gmch_chipset=%d.", + CHIPSET_CONFIG_gmch_chipset(pma)); + SEP_DRV_LOG_TRACE("pma->mother_board_time=%d.", + CHIPSET_CONFIG_motherboard_time(pma)); + SEP_DRV_LOG_TRACE("pma->host_proc_run=%d.", + CHIPSET_CONFIG_host_proc_run(pma)); + SEP_DRV_LOG_TRACE("pma->noa_chipset=%d.", + CHIPSET_CONFIG_noa_chipset(pma)); + SEP_DRV_LOG_TRACE("pma->bnb_chipset=%d.", + CHIPSET_CONFIG_bnb_chipset(pma)); + + if (CHIPSET_CONFIG_mch_chipset(pma)) { + SEP_DRV_LOG_TRACE("pma->mch->phys_add=0x%llx.", + CHIPSET_SEGMENT_physical_address( + &CHIPSET_CONFIG_mch(pma))); + SEP_DRV_LOG_TRACE( + "pma->mch->size=%d.", + CHIPSET_SEGMENT_size(&CHIPSET_CONFIG_mch(pma))); + SEP_DRV_LOG_TRACE( + "pma->mch->num_counters=%d.", + CHIPSET_SEGMENT_num_counters(&CHIPSET_CONFIG_mch(pma))); + SEP_DRV_LOG_TRACE( + "pma->mch->total_events=%d.", + CHIPSET_SEGMENT_total_events(&CHIPSET_CONFIG_mch(pma))); + } + + if (CHIPSET_CONFIG_ich_chipset(pma)) { + SEP_DRV_LOG_TRACE("pma->ich->phys_add=0x%llx.", + CHIPSET_SEGMENT_physical_address( + &CHIPSET_CONFIG_ich(pma))); + SEP_DRV_LOG_TRACE( + "pma->ich->size=%d.", + CHIPSET_SEGMENT_size(&CHIPSET_CONFIG_ich(pma))); + SEP_DRV_LOG_TRACE( + "pma->ich->num_counters=%d.", + CHIPSET_SEGMENT_num_counters(&CHIPSET_CONFIG_ich(pma))); + SEP_DRV_LOG_TRACE( + "pma->ich->total_events=%d.", + CHIPSET_SEGMENT_total_events(&CHIPSET_CONFIG_ich(pma))); + } + + if (CHIPSET_CONFIG_gmch_chipset(pma)) { + SEP_DRV_LOG_TRACE("pma->gmch->phys_add=0x%llx.", + CHIPSET_SEGMENT_physical_address( + &CHIPSET_CONFIG_gmch(pma))); + SEP_DRV_LOG_TRACE( + "pma->gmch->size=%d.", + CHIPSET_SEGMENT_size(&CHIPSET_CONFIG_gmch(pma))); + SEP_DRV_LOG_TRACE("pma->gmch->num_counters=%d.", + CHIPSET_SEGMENT_num_counters( + &CHIPSET_CONFIG_gmch(pma))); + SEP_DRV_LOG_TRACE("pma->gmch->total_events=%d.", + CHIPSET_SEGMENT_total_events( + &CHIPSET_CONFIG_gmch(pma))); + SEP_DRV_LOG_TRACE("pma->gmch->read_register=0x%x.", + CHIPSET_SEGMENT_read_register( + &CHIPSET_CONFIG_gmch(pma))); + SEP_DRV_LOG_TRACE("pma->gmch->write_register=0x%x.", + CHIPSET_SEGMENT_write_register( + &CHIPSET_CONFIG_gmch(pma))); + } + +#endif + + // Set up the global cs_dispatch table + cs_dispatch = UTILITY_Configure_Chipset(); + if (cs_dispatch == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Unknown chipset family!"); + return OS_INVALID; + } + + // Initialize chipset configuration + if (cs_dispatch->init_chipset()) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Failed to initialize the chipset!"); + return OS_INVALID; + } + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +#endif + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Get_Platform_Info(IOCTL_ARGS arg) + * + * @param arg - Pointer to the IOCTL structure + * + * @return OS_STATUS + * + * @brief Reads the MSR_PLATFORM_INFO register if present + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Get_Platform_Info(IOCTL_ARGS args) +{ + U32 size = sizeof(DRV_PLATFORM_INFO_NODE); + OS_STATUS status = OS_SUCCESS; + DRV_PLATFORM_INFO platform_data = NULL; + U32 *dispatch_ids = NULL; + DISPATCH dispatch_ptr = NULL; + U32 i = 0; + U32 num_entries; // # dispatch ids to process + + SEP_DRV_LOG_FLOW_IN(""); + + num_entries = + args->len_usr_to_drv / sizeof(U32); // # dispatch ids to process + + platform_data = CONTROL_Allocate_Memory(sizeof(DRV_PLATFORM_INFO_NODE)); + if (!platform_data) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for platform_data!"); + return OS_NO_MEM; + } + + memset(platform_data, 0, sizeof(DRV_PLATFORM_INFO_NODE)); + if (args->len_usr_to_drv > 0 && args->buf_usr_to_drv != NULL) { + dispatch_ids = CONTROL_Allocate_Memory(args->len_usr_to_drv); + if (!dispatch_ids) { + platform_data = CONTROL_Free_Memory(platform_data); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for dispatch_ids!"); + return OS_NO_MEM; + } + + status = copy_from_user(dispatch_ids, (void __user *)args->buf_usr_to_drv, + args->len_usr_to_drv); + if (status) { + platform_data = CONTROL_Free_Memory(platform_data); + dispatch_ids = CONTROL_Free_Memory(dispatch_ids); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory copy failure for dispatch_ids!"); + return status; + } + for (i = 0; i < num_entries; i++) { + if (dispatch_ids[i] > 0) { + dispatch_ptr = + UTILITY_Configure_CPU(dispatch_ids[i]); + if (dispatch_ptr && + dispatch_ptr->platform_info) { + dispatch_ptr->platform_info( + (PVOID)platform_data); + } + } + } + dispatch_ids = CONTROL_Free_Memory(dispatch_ids); + } else if (devices) { + dispatch_ptr = LWPMU_DEVICE_dispatch( + &devices[0]); //placeholder, needs to be fixed + if (dispatch_ptr && dispatch_ptr->platform_info) { + dispatch_ptr->platform_info((PVOID)platform_data); + } + } + + if (args->len_drv_to_usr < size || args->buf_drv_to_usr == NULL) { + platform_data = CONTROL_Free_Memory(platform_data); + SEP_DRV_LOG_ERROR_FLOW_OUT("Error: Invalid arguments!"); + return OS_FAULT; + } + + status = copy_to_user((void __user *)args->buf_drv_to_usr, platform_data, size); + platform_data = CONTROL_Free_Memory(platform_data); + + SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); + return status; +} +/* ------------------------------------------------------------------------- */ +/*! + * @fn void lwpmudrv_Setup_Cpu_Topology (value) + * + * @brief Sets up the per CPU state structures + * + * @param IOCTL_ARGS args + * + * @return OS_STATUS + * + * Special Notes: + * This function was added to support abstract dll creation. + */ +static OS_STATUS lwpmudrv_Setup_Cpu_Topology(IOCTL_ARGS args) +{ + S32 cpu_num; + S32 iter; + DRV_TOPOLOGY_INFO drv_topology, dt; + + SEP_DRV_LOG_FLOW_IN(""); + + if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Skipped: driver state is not IDLE!"); + return OS_IN_PROGRESS; + } + if (args->len_usr_to_drv == 0 || args->buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Topology information has been misconfigured!"); + return OS_INVALID; + } + + drv_topology = CONTROL_Allocate_Memory(args->len_usr_to_drv); + if (drv_topology == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for drv_topology!"); + return OS_NO_MEM; + } + + if (copy_from_user(drv_topology, + (void __user *)(args->buf_usr_to_drv), + args->len_usr_to_drv)) { + drv_topology = CONTROL_Free_Memory(drv_topology); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory copy failure for drv_topology!"); + return OS_FAULT; + } + /* + * Topology Initializations + */ + num_packages = 0; + for (iter = 0; iter < GLOBAL_STATE_num_cpus(driver_state); iter++) { + dt = &drv_topology[iter]; + cpu_num = DRV_TOPOLOGY_INFO_cpu_number(dt); + CPU_STATE_socket_master(&pcb[cpu_num]) = + DRV_TOPOLOGY_INFO_socket_master(dt); + num_packages += CPU_STATE_socket_master(&pcb[cpu_num]); + CPU_STATE_core_master(&pcb[cpu_num]) = + DRV_TOPOLOGY_INFO_core_master(dt); + CPU_STATE_thr_master(&pcb[cpu_num]) = + DRV_TOPOLOGY_INFO_thr_master(dt); + CPU_STATE_core_type(&pcb[cpu_num]) = + DRV_TOPOLOGY_INFO_cpu_core_type(dt); + CPU_STATE_cpu_module_num(&pcb[cpu_num]) = + (U16)DRV_TOPOLOGY_INFO_cpu_module_num( + &drv_topology[iter]); + CPU_STATE_cpu_module_master(&pcb[cpu_num]) = + (U16)DRV_TOPOLOGY_INFO_cpu_module_master( + &drv_topology[iter]); + CPU_STATE_system_master(&pcb[cpu_num]) = (iter) ? 0 : 1; + SEP_DRV_LOG_TRACE("Cpu %d sm = %d cm = %d tm = %d.", cpu_num, + CPU_STATE_socket_master(&pcb[cpu_num]), + CPU_STATE_core_master(&pcb[cpu_num]), + CPU_STATE_thr_master(&pcb[cpu_num])); + } + drv_topology = CONTROL_Free_Memory(drv_topology); + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Get_Num_Samples(IOCTL_ARGS arg) + * + * @param arg - Pointer to the IOCTL structure + * + * @return OS_STATUS + * + * @brief Returns the number of samples collected during the current + * @brief sampling run + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Get_Num_Samples(IOCTL_ARGS args) +{ + S32 cpu_num; + U64 samples = 0; + OS_STATUS status; + + SEP_DRV_LOG_FLOW_IN(""); + + if (pcb == NULL) { + SEP_DRV_LOG_ERROR("PCB was not initialized."); + return OS_FAULT; + } + + if (args->len_drv_to_usr == 0 || args->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Topology information has been misconfigured!"); + return OS_INVALID; + } + + for (cpu_num = 0; cpu_num < GLOBAL_STATE_num_cpus(driver_state); + cpu_num++) { + samples += CPU_STATE_num_samples(&pcb[cpu_num]); + + SEP_DRV_LOG_TRACE("Samples for cpu %d = %lld.", cpu_num, + CPU_STATE_num_samples(&pcb[cpu_num])); + } + SEP_DRV_LOG_TRACE("Total number of samples %lld.", samples); + status = put_user(samples, (U64 __user *)args->buf_drv_to_usr); + + SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Set_Device_Num_Units(IOCTL_ARGS arg) + * + * @param arg - Pointer to the IOCTL structure + * + * @return OS_STATUS + * + * @brief Set the number of devices for the sampling run + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Set_Device_Num_Units(IOCTL_ARGS args) +{ + SEP_DRV_LOG_FLOW_IN(""); + + if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { + SEP_DRV_LOG_FLOW_OUT( + "'Success'/Skipped: driver state is not IDLE!"); + return OS_SUCCESS; + } + + if (args->len_usr_to_drv == 0 || args->buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Error: Invalid arguments."); + return OS_INVALID; + } + + if (copy_from_user(&(LWPMU_DEVICE_num_units(&devices[cur_device])), + (void __user *)args->buf_usr_to_drv, sizeof(U32))) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory copy failure for device num units!"); + return OS_FAULT; + } + SEP_DRV_LOG_TRACE("LWP: num_units = %d cur_device = %d.", + LWPMU_DEVICE_num_units(&devices[cur_device]), + cur_device); + // on to the next device. + cur_device++; + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Get_Interval_Counts(IOCTL_ARGS arg) + * + * @param arg - Pointer to the IOCTL structure + * + * @return OS_STATUS + * + * @brief Returns the number of samples collected during the current + * @brief sampling run + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Get_Interval_Counts(IOCTL_ARGS args) +{ + SEP_DRV_LOG_FLOW_IN(""); + + if (!DRV_CONFIG_enable_cp_mode(drv_cfg)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Not in CP mode!"); + return OS_INVALID; + } + if (args->len_drv_to_usr == 0 || args->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Interval Counts information has been misconfigured!"); + return OS_INVALID; + } + if (!interrupt_counts) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Interrupt_counts is NULL!"); + return OS_INVALID; + } + + if (copy_to_user((void __user *)args->buf_drv_to_usr, interrupt_counts, + args->len_drv_to_usr)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); + return OS_FAULT; + } + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 lwpmudrv_Set_Uncore_Topology_Info_And_Scan + * + * @brief Reads the MSR_PLATFORM_INFO register if present + * + * @param arg Pointer to the IOCTL structure + * + * @return status + * + * Special Notes: + * + */ +static OS_STATUS lwpmudrv_Set_Uncore_Topology_Info_And_Scan(IOCTL_ARGS args) +{ + SEP_DRV_LOG_FLOW_IN(""); + SEP_DRV_LOG_FLOW_OUT("Success [but did not do anything]"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 lwpmudrv_Get_Uncore_Topology + * + * @brief Reads the MSR_PLATFORM_INFO register if present + * + * @param arg Pointer to the IOCTL structure + * + * @return status + * + * Special Notes: + * + */ +static OS_STATUS lwpmudrv_Get_Uncore_Topology(IOCTL_ARGS args) +{ + U32 dev; + static UNCORE_TOPOLOGY_INFO_NODE req_uncore_topology; + + SEP_DRV_LOG_FLOW_IN(""); + + if (args->buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (buf_usr_to_drv is NULL)!"); + return OS_INVALID; + } + if (args->len_usr_to_drv != sizeof(UNCORE_TOPOLOGY_INFO_NODE)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (unexpected len_usr_to_drv value)!"); + return OS_INVALID; + } + if (args->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (buf_drv_to_usr is NULL)!"); + return OS_INVALID; + } + if (args->len_drv_to_usr != sizeof(UNCORE_TOPOLOGY_INFO_NODE)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (unexpected len_drv_to_usr value)!"); + return OS_INVALID; + } + + memset((char *)&req_uncore_topology, 0, + sizeof(UNCORE_TOPOLOGY_INFO_NODE)); + if (copy_from_user(&req_uncore_topology, (void __user *)args->buf_usr_to_drv, + args->len_usr_to_drv)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); + return OS_FAULT; + } + + for (dev = 0; dev < MAX_DEVICES; dev++) { + // skip if user does not require to scan this device + if (!UNCORE_TOPOLOGY_INFO_device_scan(&req_uncore_topology, + dev)) { + continue; + } + // skip if this device has been discovered + if (UNCORE_TOPOLOGY_INFO_device_scan(&uncore_topology, dev)) { + continue; + } + memcpy((U8 *)&(UNCORE_TOPOLOGY_INFO_device(&uncore_topology, + dev)), + (U8 *)&(UNCORE_TOPOLOGY_INFO_device(&req_uncore_topology, + dev)), + sizeof(UNCORE_PCIDEV_NODE)); + UNC_COMMON_PCI_Scan_For_Uncore((VOID *)&dev, dev, NULL); + } + + if (copy_to_user((void __user *)args->buf_drv_to_usr, &uncore_topology, + args->len_drv_to_usr)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); + return OS_FAULT; + } + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 lwpmudrv_Get_Platform_Topology + * + * @brief Reads the MSR or PCI PLATFORM_INFO register if present + * + * @param arg Pointer to the IOCTL structure + * + * @return status + * + * Special Notes: + * + */ +static OS_STATUS lwpmudrv_Get_Platform_Topology(IOCTL_ARGS args) +{ + U32 dev; + U32 num_topology_devices = 0; + + SEP_DRV_LOG_FLOW_IN(""); + + if (args->buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (buf_usr_to_drv is NULL)!"); + return OS_INVALID; + } + if (args->len_usr_to_drv != sizeof(PLATFORM_TOPOLOGY_PROG_NODE)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (unexpected len_usr_to_drv value)!"); + return OS_INVALID; + } + if (args->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (buf_drv_to_usr is NULL)!"); + return OS_INVALID; + } + if (args->len_drv_to_usr != sizeof(PLATFORM_TOPOLOGY_PROG_NODE)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (unexpected len_drv_to_usr value)!"); + return OS_INVALID; + } + + memset((char *)&req_platform_topology_prog_node, 0, + sizeof(PLATFORM_TOPOLOGY_PROG_NODE)); + if (copy_from_user(&req_platform_topology_prog_node, + (void __user *)args->buf_usr_to_drv, args->len_usr_to_drv)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory copy failure for req_platform_topology_prog_node!"); + return OS_FAULT; + } + + num_topology_devices = PLATFORM_TOPOLOGY_PROG_num_devices( + &req_platform_topology_prog_node); + for (dev = 0; dev < num_topology_devices; dev++) { + //skip if we have populated the register values already + if (PLATFORM_TOPOLOGY_PROG_topology_device_prog_valid( + &platform_topology_prog_node, dev)) { + continue; + } + memcpy((U8 *)&(PLATFORM_TOPOLOGY_PROG_topology_device( + &platform_topology_prog_node, dev)), + (U8 *)&(PLATFORM_TOPOLOGY_PROG_topology_device( + &req_platform_topology_prog_node, dev)), + sizeof(PLATFORM_TOPOLOGY_DISCOVERY_NODE)); + UNC_COMMON_Get_Platform_Topology(dev); + } + + if (copy_to_user((void __user *)args->buf_drv_to_usr, &platform_topology_prog_node, + args->len_drv_to_usr)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory copy failure for platform_topology_prog_node!"); + return OS_FAULT; + } + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn OS_STATUS lwpmudrv_Flush(void) + * + * @brief Flushes the current contents of sampling buffers + * + * @param - none + * + * @return status + * + * Special Notes: + */ +static OS_STATUS lwpmudrv_Flush(void) +{ + OS_STATUS status = OS_FAULT; + SEP_DRV_LOG_FLOW_IN(""); + + if (!DRV_CONFIG_enable_cp_mode(drv_cfg)) { + SEP_DRV_LOG_ERROR( + "The flush failed. Continuous profiling, -cp, is not enabled!"); + goto clean_return; + } + + if (!DRIVER_STATE_IN(GET_DRIVER_STATE(), STATE_BIT_PAUSED)) { + SEP_DRV_LOG_ERROR( + "The flush failed. The driver should be paused!"); + goto clean_return; + } + + if (multi_pebs_enabled || sched_switch_enabled) { +#if !defined(DRV_SEP_ACRN_ON) + CONTROL_Invoke_Parallel(PEBS_Flush_Buffer, NULL); +#endif + } + + LINUXOS_Uninstall_Hooks(); + LINUXOS_Enum_Process_Modules(TRUE); + status = OUTPUT_Flush(); + LINUXOS_Install_Hooks(); + +clean_return: + SEP_DRV_LOG_FLOW_OUT("Status: %d.", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 lwpmudrv_Get_Driver_log + * + * @brief Dumps the driver log + * + * @param arg Pointer to the IOCTL structure + * + * @return status + * + * Special Notes: + * + */ +static OS_STATUS lwpmudrv_Get_Driver_Log(IOCTL_ARGS args) +{ + SEP_DRV_LOG_FLOW_IN(""); + + if (args->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (buf_drv_to_usr is NULL)!"); + return OS_INVALID; + } + if (args->len_drv_to_usr < sizeof(*DRV_LOG())) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (unexpected len_drv_to_usr value)!"); + return OS_INVALID; + } + + if (copy_to_user((void __user *)args->buf_drv_to_usr, DRV_LOG(), sizeof(*DRV_LOG()))) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); + return OS_FAULT; + } + + SEP_DRV_LOG_DISAMBIGUATE(); // keeps the driver log's footprint unique (has the highest disambiguator field) + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 lwpmudrv_Control_Driver_log + * + * @brief Sets or/and gets the driver log's configuration + * + * @param arg Pointer to the IOCTL structure + * + * @return status + * + * Special Notes: + * + */ +static OS_STATUS lwpmudrv_Control_Driver_Log(IOCTL_ARGS args) +{ + DRV_LOG_CONTROL_NODE log_control; + U32 i; + + SEP_DRV_LOG_FLOW_IN(""); + + if (args->buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (buf_usr_to_drv is NULL)!"); + return OS_INVALID; + } + if (args->len_usr_to_drv < sizeof(log_control)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (unexpected len_usr_to_drv value)!"); + return OS_INVALID; + } + + if (copy_from_user(&log_control, (void __user *)args->buf_usr_to_drv, + sizeof(log_control))) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); + return OS_FAULT; + } + + if (DRV_LOG_CONTROL_command(&log_control) == + DRV_LOG_CONTROL_COMMAND_ADJUST_VERBOSITY) { + for (i = 0; i < DRV_NB_LOG_CATEGORIES; i++) { + if (DRV_LOG_CONTROL_verbosities(&log_control)[i] == + LOG_VERBOSITY_UNSET) { + SEP_DRV_LOG_TRACE( + "Current verbosity mask for '%s' is 0x%x", + (UTILITY_Log_Category_Strings()[i]), + ((U32)DRV_LOG_VERBOSITY(i))); + DRV_LOG_CONTROL_verbosities(&log_control)[i] = + DRV_LOG_VERBOSITY(i); + } else if (DRV_LOG_CONTROL_verbosities( + &log_control)[i] == + LOG_VERBOSITY_DEFAULT) { + U32 verbosity; + switch (i) { + case DRV_LOG_CATEGORY_LOAD: + verbosity = + DRV_LOG_DEFAULT_LOAD_VERBOSITY; + break; + case DRV_LOG_CATEGORY_INIT: + verbosity = + DRV_LOG_DEFAULT_INIT_VERBOSITY; + break; + case DRV_LOG_CATEGORY_DETECTION: + verbosity = + DRV_LOG_DEFAULT_DETECTION_VERBOSITY; + break; + case DRV_LOG_CATEGORY_ERROR: + verbosity = + DRV_LOG_DEFAULT_ERROR_VERBOSITY; + break; + case DRV_LOG_CATEGORY_STATE_CHANGE: + verbosity = + DRV_LOG_DEFAULT_STATE_CHANGE_VERBOSITY; + break; + case DRV_LOG_CATEGORY_MARK: + verbosity = + DRV_LOG_DEFAULT_MARK_VERBOSITY; + break; + case DRV_LOG_CATEGORY_DEBUG: + verbosity = + DRV_LOG_DEFAULT_DEBUG_VERBOSITY; + break; + case DRV_LOG_CATEGORY_FLOW: + verbosity = + DRV_LOG_DEFAULT_FLOW_VERBOSITY; + break; + case DRV_LOG_CATEGORY_ALLOC: + verbosity = + DRV_LOG_DEFAULT_ALLOC_VERBOSITY; + break; + case DRV_LOG_CATEGORY_INTERRUPT: + verbosity = + DRV_LOG_DEFAULT_INTERRUPT_VERBOSITY; + break; + case DRV_LOG_CATEGORY_TRACE: + verbosity = + DRV_LOG_DEFAULT_TRACE_VERBOSITY; + break; + case DRV_LOG_CATEGORY_REGISTER: + verbosity = + DRV_LOG_DEFAULT_REGISTER_VERBOSITY; + break; + case DRV_LOG_CATEGORY_NOTIFICATION: + verbosity = + DRV_LOG_DEFAULT_NOTIFICATION_VERBOSITY; + break; + case DRV_LOG_CATEGORY_WARNING: + verbosity = + DRV_LOG_DEFAULT_WARNING_VERBOSITY; + break; + // default: + // SEP_DRV_LOG_ERROR( + // "Unspecified category '%s' when resetting to default!", + // UTILITY_Log_Category_Strings() + // [i]); + // verbosity = LOG_VERBOSITY_NONE; + // break; + } + SEP_DRV_LOG_INIT( + "Resetting verbosity mask for '%s' from 0x%x to 0x%x.", + UTILITY_Log_Category_Strings()[i], + (U32)DRV_LOG_VERBOSITY(i), verbosity); + DRV_LOG_VERBOSITY(i) = verbosity; + DRV_LOG_CONTROL_verbosities(&log_control)[i] = + verbosity; + } else { + SEP_DRV_LOG_INIT( + "Changing verbosity mask for '%s' from 0x%x to 0x%x.", + UTILITY_Log_Category_Strings()[i], + (U32)DRV_LOG_VERBOSITY(i), + (U32)DRV_LOG_CONTROL_verbosities( + &log_control)[i]); + DRV_LOG_VERBOSITY(i) = + DRV_LOG_CONTROL_verbosities( + &log_control)[i]; + } + } + + for (; i < DRV_MAX_NB_LOG_CATEGORIES; i++) { + DRV_LOG_CONTROL_verbosities(&log_control)[i] = + LOG_VERBOSITY_UNSET; + } + + if (copy_to_user((void __user *)args->buf_drv_to_usr, &log_control, + sizeof(log_control))) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); + return OS_FAULT; + } + } else if (DRV_LOG_CONTROL_command(&log_control) == + DRV_LOG_CONTROL_COMMAND_MARK) { + DRV_LOG_CONTROL_message( + &log_control)[DRV_LOG_CONTROL_MAX_DATA_SIZE - 1] = 0; + SEP_DRV_LOG_MARK("Mark: '%s'.", + DRV_LOG_CONTROL_message(&log_control)); + } else if (DRV_LOG_CONTROL_command(&log_control) == + DRV_LOG_CONTROL_COMMAND_QUERY_SIZE) { + DRV_LOG_CONTROL_log_size(&log_control) = sizeof(*DRV_LOG()); + SEP_DRV_LOG_TRACE("Driver log size is %u bytes.", + DRV_LOG_CONTROL_log_size(&log_control)); + if (copy_to_user((void __user *)args->buf_drv_to_usr, &log_control, + sizeof(log_control))) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); + return OS_FAULT; + } + } else if (DRV_LOG_CONTROL_command(&log_control) == + DRV_LOG_CONTROL_COMMAND_BENCHMARK) { + U32 nb_iterations = + *(U32 *)&DRV_LOG_CONTROL_message(&log_control); + + SEP_DRV_LOG_INIT_IN("Starting benchmark (%u iterations)...", + nb_iterations); + for (i = 0; i < nb_iterations; i++) { + (void)i; + } + SEP_DRV_LOG_INIT_OUT("Benchmark complete (%u/%u iterations).", + i, nb_iterations); + } + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 lwpmudrv_Get_Sample_Drop_Info + * + * @brief Get the information of dropped samples + * + * @param arg Pointer to the IOCTL structure + * + * @return status + * + * Special Notes: + * + */ +static OS_STATUS lwpmudrv_Get_Sample_Drop_Info(IOCTL_ARGS args) +{ + U32 size; + static SAMPLE_DROP_INFO_NODE req_sample_drop_info; +#if defined(DRV_SEP_ACRN_ON) + U32 i; + struct profiling_status *stats = NULL; +#endif + size = 0; + if (args->buf_drv_to_usr == NULL) { + return OS_INVALID; + } + if (args->len_drv_to_usr != sizeof(SAMPLE_DROP_INFO_NODE)) { + return OS_INVALID; + } + + memset((char *)&req_sample_drop_info, 0, sizeof(SAMPLE_DROP_INFO_NODE)); +#if defined(DRV_SEP_ACRN_ON) + stats = (struct profiling_status *)CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state)*sizeof(struct profiling_status)); + + if (stats == NULL) { + SEP_PRINT_ERROR("lwpmudrv_Start: Unable to allocate memory\n"); + return OS_NO_MEM; + } + memset(stats, 0, GLOBAL_STATE_num_cpus(driver_state)* + sizeof(struct profiling_status)); + + acrn_hypercall2(HC_PROFILING_OPS, PROFILING_GET_STATUS, + virt_to_phys(stats)); + + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state) + && size < MAX_SAMPLE_DROP_NODES; i++) { + if (stats[i].samples_logged || stats[i].samples_dropped) { + SAMPLE_DROP_INFO_drop_info( + &req_sample_drop_info, size).os_id = OS_ID_ACORN; + SAMPLE_DROP_INFO_drop_info( + &req_sample_drop_info, size).cpu_id = i; + SAMPLE_DROP_INFO_drop_info( + &req_sample_drop_info, size).sampled = stats[i].samples_logged; + SAMPLE_DROP_INFO_drop_info( + &req_sample_drop_info, size).dropped = stats[i].samples_dropped; + size++; + } + } + + stats = CONTROL_Free_Memory(stats); +#endif + SAMPLE_DROP_INFO_size(&req_sample_drop_info) = size; + + if (copy_to_user((void __user *)args->buf_drv_to_usr, + &req_sample_drop_info, args->len_drv_to_usr)) { + return OS_FAULT; + } + + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 lwpmudrv_Get_Drv_Setup_Info + * + * @brief Get numerous information of driver + * + * @param arg Pointer to the IOCTL structure + * + * @return status + * + * Special Notes: + * + */ +static OS_STATUS lwpmudrv_Get_Drv_Setup_Info(IOCTL_ARGS args) +{ +#define VMM_VENDOR_STR_LEN 12 + U32 pebs_unavailable = 0; + U64 rbx, rcx, rdx, num_basic_functions; + S8 vmm_vendor_name[VMM_VENDOR_STR_LEN + 1]; + S8 *vmm_vmware_str = "VMwareVMware"; + S8 *vmm_kvm_str = "KVMKVMKVM\0\0\0"; + S8 *vmm_mshyperv_str = "Microsoft Hv"; + S8 *vmm_acrn_str = "ACRNACRNACRN"; +#if defined(DRV_USE_KAISER) + int *kaiser_enabled_ptr; + int *kaiser_pti_option; +#endif + bool is_hypervisor = FALSE; + + SEP_DRV_LOG_FLOW_IN("Args: %p.", args); + + if (args->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (buf_drv_to_usr is NULL)!"); + return OS_INVALID; + } + if (args->len_drv_to_usr != sizeof(DRV_SETUP_INFO_NODE)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (unexpected len_drv_to_usr value)!"); + return OS_INVALID; + } + + memset((char *)&req_drv_setup_info, 0, sizeof(DRV_SETUP_INFO_NODE)); + + DRV_SETUP_INFO_nmi_mode(&req_drv_setup_info) = 1; + +#if defined(DRV_SEP_ACRN_ON) + is_hypervisor = TRUE; +#endif + if (boot_cpu_has(X86_FEATURE_HYPERVISOR) || is_hypervisor) { + UTILITY_Read_Cpuid(0x40000000, &num_basic_functions, &rbx, &rcx, + &rdx); + memcpy(vmm_vendor_name, &rbx, 4); + memcpy(vmm_vendor_name + 4, &rcx, 4); + memcpy(vmm_vendor_name + 8, &rdx, 4); + memcpy(vmm_vendor_name + 12, "\0", 1); + + if (!strncmp(vmm_vendor_name, vmm_vmware_str, + VMM_VENDOR_STR_LEN)) { + DRV_SETUP_INFO_vmm_mode(&req_drv_setup_info) = 1; + DRV_SETUP_INFO_vmm_vendor(&req_drv_setup_info) = + DRV_VMM_VMWARE; + } else if (!strncmp(vmm_vendor_name, vmm_kvm_str, + VMM_VENDOR_STR_LEN)) { + DRV_SETUP_INFO_vmm_mode(&req_drv_setup_info) = 1; + DRV_SETUP_INFO_vmm_vendor(&req_drv_setup_info) = + DRV_VMM_KVM; + } else if (!strncmp(vmm_vendor_name, vmm_acrn_str, + VMM_VENDOR_STR_LEN)) { + DRV_SETUP_INFO_vmm_mode(&req_drv_setup_info) = 1; + DRV_SETUP_INFO_vmm_vendor(&req_drv_setup_info) = + DRV_VMM_ACRN; + } else if (!strncmp(vmm_vendor_name, vmm_mshyperv_str, + VMM_VENDOR_STR_LEN)) { + DRV_SETUP_INFO_vmm_mode(&req_drv_setup_info) = 1; + DRV_SETUP_INFO_vmm_vendor(&req_drv_setup_info) = + DRV_VMM_HYPERV; + if (num_basic_functions >= 0x40000003) { + UTILITY_Read_Cpuid(0x40000003, + &num_basic_functions, &rbx, + &rcx, &rdx); + if (rbx & 0x1) { + DRV_SETUP_INFO_vmm_guest_vm( + &req_drv_setup_info) = 0; + } else { + DRV_SETUP_INFO_vmm_guest_vm( + &req_drv_setup_info) = 1; + } + } + } + } +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32) + else if (xen_domain()) { + DRV_SETUP_INFO_vmm_mode(&req_drv_setup_info) = 1; + DRV_SETUP_INFO_vmm_vendor(&req_drv_setup_info) = DRV_VMM_XEN; + + if (xen_initial_domain()) { + DRV_SETUP_INFO_vmm_guest_vm(&req_drv_setup_info) = 0; + } else { + DRV_SETUP_INFO_vmm_guest_vm(&req_drv_setup_info) = 1; + } + } +#endif + else { + if (LINUXOS_Check_KVM_Guest_Process()) { + DRV_SETUP_INFO_vmm_mode(&req_drv_setup_info) = 1; + DRV_SETUP_INFO_vmm_vendor(&req_drv_setup_info) = + DRV_VMM_KVM; + } + } + + pebs_unavailable = (SYS_Read_MSR(IA32_MISC_ENABLE) >> 12) & 0x1; + if (!pebs_unavailable) { + if (!wrmsr_safe(IA32_PEBS_ENABLE, 0, 0)) { + DRV_SETUP_INFO_pebs_accessible(&req_drv_setup_info) = 1; + } + } + +#if defined(DRV_USE_KAISER) + kaiser_enabled_ptr = (int *)UTILITY_Find_Symbol("kaiser_enabled"); + if (kaiser_enabled_ptr && *kaiser_enabled_ptr) { + SEP_DRV_LOG_INIT( + "KAISER is enabled! (&kaiser_enable=%p, val: %d).", + kaiser_enabled_ptr, *kaiser_enabled_ptr); + DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info) = + DRV_SETUP_INFO_PTI_KAISER; + } else { + kaiser_pti_option = (int *)UTILITY_Find_Symbol("pti_option"); + if (kaiser_pti_option) { + SEP_DRV_LOG_INIT( + "KAISER pti_option=%p pti_option val=%d", + kaiser_pti_option, *kaiser_pti_option); +#if defined(X86_FEATURE_PTI) + if (static_cpu_has(X86_FEATURE_PTI)) { + SEP_DRV_LOG_INIT( + "KAISER is Enabled or in Auto Enable!\n"); + DRV_SETUP_INFO_page_table_isolation( + &req_drv_setup_info) = + DRV_SETUP_INFO_PTI_KAISER; + } else { + SEP_DRV_LOG_INIT( + "KAISER is present but disabled!"); + } +#endif + } + } + if (!kaiser_enabled_ptr && !kaiser_pti_option) { + SEP_DRV_LOG_ERROR( + "Could not find KAISER information. Assuming no KAISER!"); + } +#elif defined(DRV_USE_PTI) + if (static_cpu_has(X86_FEATURE_PTI)) { + SEP_DRV_LOG_INIT("Kernel Page Table Isolation is enabled!"); + DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info) = + DRV_SETUP_INFO_PTI_KPTI; + } +#endif + + SEP_DRV_LOG_TRACE("DRV_SETUP_INFO nmi_mode %d.", + DRV_SETUP_INFO_nmi_mode(&req_drv_setup_info)); + SEP_DRV_LOG_TRACE("DRV_SETUP_INFO vmm_mode %d.", + DRV_SETUP_INFO_vmm_mode(&req_drv_setup_info)); + SEP_DRV_LOG_TRACE("DRV_SETUP_INFO vmm_vendor %d.", + DRV_SETUP_INFO_vmm_vendor(&req_drv_setup_info)); + SEP_DRV_LOG_TRACE("DRV_SETUP_INFO vmm_guest_vm %d.", + DRV_SETUP_INFO_vmm_guest_vm(&req_drv_setup_info)); + SEP_DRV_LOG_TRACE("DRV_SETUP_INFO pebs_accessible %d.", + DRV_SETUP_INFO_pebs_accessible(&req_drv_setup_info)); + SEP_DRV_LOG_TRACE( + "DRV_SETUP_INFO page_table_isolation %d.", + DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info)); + +#if defined(DRV_CPU_HOTPLUG) + DRV_SETUP_INFO_cpu_hotplug_mode(&req_drv_setup_info) = 1; +#endif + + if (copy_to_user((void __user *)args->buf_drv_to_usr, &req_drv_setup_info, + args->len_drv_to_usr)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory allocation failure!"); + return OS_FAULT; + } + + SEP_DRV_LOG_FLOW_OUT("Success."); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 lwpmudrv_Set_Emon_Buffer_Driver_Helper + * + * @brief Setup EMON buffer driver helper + * + * @param arg Pointer to the IOCTL structure + * + * @return status + * + * Special Notes: + * + */ +static OS_STATUS lwpmudrv_Set_Emon_Buffer_Driver_Helper(IOCTL_ARGS args) +{ + SEP_DRV_LOG_FLOW_IN(""); + + if (args->len_usr_to_drv == 0 || args->buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Error: Invalid arguments."); + return OS_INVALID; + } + + if (!emon_buffer_driver_helper) { + emon_buffer_driver_helper = + CONTROL_Allocate_Memory(args->len_usr_to_drv); + if (emon_buffer_driver_helper == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for emon_buffer_driver_helper!"); + return OS_NO_MEM; + } + } + + if (copy_from_user(emon_buffer_driver_helper, (void __user *)args->buf_usr_to_drv, + args->len_usr_to_drv)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory copy failure for device num units!"); + return OS_FAULT; + } + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 lwpmudrv_Set_OSID + * + * @brief Set OSID with specified value + * + * @param arg Pointer to the IOCTL structure + * + * @return status + * + * Special Notes: + * + */ + +static OS_STATUS lwpmudrv_Set_OSID(IOCTL_ARGS args) +{ + OS_STATUS status = OS_SUCCESS; + + if (args->buf_usr_to_drv == NULL) { + SEP_PRINT_ERROR("Invalid arguments (buf_usr_to_drv is NULL)!"); + return OS_INVALID; + } + if (args->len_usr_to_drv != sizeof(U32)) { + SEP_PRINT_ERROR( + "Invalid arguments (unexpected len_usr_to_drv value)!"); + return OS_INVALID; + } + + status = get_user(osid, (U32 __user *)args->buf_usr_to_drv); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Get_Agent_Mode(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Local function that copies agent mode from drv to usr code + * @brief Returns status. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Get_Agent_Mode(IOCTL_ARGS args) +{ + OS_STATUS status; + + if (args->buf_drv_to_usr == NULL) { + SEP_PRINT_ERROR("Invalid arguments (buf_drv_to_usr is NULL)!"); + return OS_INVALID; + } + if (args->len_drv_to_usr != sizeof(U32)) { + SEP_PRINT_ERROR( + "Invalid arguments (unexpected len_drv_to_usr value)!"); + return OS_INVALID; + } + +#if defined(DRV_SEP_ACRN_ON) + status = put_user(HOST_VM_AGENT, (U32 __user *)args->buf_drv_to_usr); + sched_switch_enabled = TRUE; +#else + status = put_user(-1, (U32 __user *)args->buf_drv_to_usr); + SEP_PRINT_ERROR("Invalid agent mode..!"); + status = OS_INVALID; +#endif + + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Get_Num_Of_Vms(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Local function to get number of VMS available + * @brief Returns status. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Get_Num_Of_Vms(IOCTL_ARGS args) + +{ + VM_OSID_MAP_NODE vm_map; +#if defined(DRV_SEP_ACRN_ON) + U32 i; +#endif + if (args->buf_drv_to_usr == NULL) { + SEP_PRINT_ERROR("Invalid arguments (buf_drv_to_usr is NULL)!"); + return OS_INVALID; + } + + if (args->len_drv_to_usr != sizeof(VM_OSID_MAP_NODE)) { + SEP_PRINT_ERROR( + "Invalid arguments (unexpected len_drv_to_usr value)!"); + return OS_INVALID; + } + + memset(&vm_map, 0, sizeof(VM_OSID_MAP_NODE)); + +#if defined(DRV_SEP_ACRN_ON) + if (vm_info_list == NULL) { + vm_info_list = + CONTROL_Allocate_Memory(sizeof(struct profiling_vm_info_list)); + } + memset(vm_info_list, 0, sizeof(struct profiling_vm_info_list)); + + BUG_ON(!virt_addr_valid(vm_info_list)); + + acrn_hypercall2(HC_PROFILING_OPS, PROFILING_GET_VMINFO, + virt_to_phys(vm_info_list)); + + vm_map.num_vms = 0; + for (i = 0; i < vm_info_list->num_vms; i++) { + if (vm_info_list->vm_list[i].num_vcpus != 0) { + vm_map.osid[i] = (U32)vm_info_list->vm_list[i].vm_id; + vm_map.num_vms++; + } + } + +#endif + if (copy_to_user((void __user *)args->buf_drv_to_usr, + &vm_map, args->len_drv_to_usr)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); + return OS_FAULT; + } + + return OS_SUCCESS; + +} +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Get_Cpu_Map_Info(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Local function to get pcpu-vcpu mapping info + * @brief Returns status. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Get_Cpu_Map_Info(IOCTL_ARGS args) +{ + CPU_MAP_TRACE_LIST cpumap; + DRV_STATUS status = OS_SUCCESS; +#if defined(DRV_SEP_ACRN_ON) + U32 i, j; +#endif + + if ((args->buf_drv_to_usr == NULL) || + (args->len_drv_to_usr != sizeof(CPU_MAP_TRACE_LIST_NODE))) { + SEP_PRINT_ERROR("Invalid drv_to_usr arguments!"); + return OS_INVALID; + } + + if ((args->buf_usr_to_drv == NULL) || + (args->len_usr_to_drv != sizeof(CPU_MAP_TRACE_LIST_NODE))) { + SEP_PRINT_ERROR("Invalid usr_to_drv arguments!"); + return OS_INVALID; + } + + cpumap = (CPU_MAP_TRACE_LIST) + CONTROL_Allocate_Memory(sizeof(CPU_MAP_TRACE_LIST_NODE)); + if (cpumap == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory allocation failure"); + return OS_NO_MEM; + } + + if (copy_from_user(cpumap, (void __user *)args->buf_usr_to_drv, + sizeof(CPU_MAP_TRACE_LIST_NODE))) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure"); + status = OS_FAULT; + goto cleanup; + } + +#if defined(DRV_SEP_ACRN_ON) + if (vm_info_list == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("vm_info_list is NULL!"); + status = OS_INVALID; + goto cleanup; + } + + SEP_DRV_LOG_TRACE("CPU mapping for osid %d ", cpumap->osid); + for (i = 0; i < vm_info_list->num_vms; i++) { + if (vm_info_list->vm_list[i].vm_id == cpumap->osid) { + for (j = 0; + j < vm_info_list->vm_list[i].num_vcpus; j++) { + UTILITY_Read_TSC(&(cpumap->entries[j].tsc)); + cpumap->entries[j].is_static = 1; + cpumap->entries[j].vcpu_id = + vm_info_list->vm_list[i].cpu_map[j].vcpu_id; + cpumap->entries[j].pcpu_id = + vm_info_list->vm_list[i].cpu_map[j].pcpu_id; + cpumap->entries[j].os_id = + vm_info_list->vm_list[i].vm_id; + cpumap->num_entries++; + } + } + } +#endif + if (copy_to_user((void __user *)args->buf_drv_to_usr, + cpumap, args->len_drv_to_usr)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); + status = OS_FAULT; + goto cleanup; + } + +cleanup: + cpumap = CONTROL_Free_Memory(cpumap); + return status; +} + + +/******************************************************************************* + * External Driver functions - Open + * This function is common to all drivers + *******************************************************************************/ + +static int lwpmu_Open(struct inode *inode, struct file *filp) +{ + SEP_DRV_LOG_TRACE_IN("Maj:%d, min:%d", imajor(inode), iminor(inode)); + + filp->private_data = container_of(inode->i_cdev, LWPMU_DEV_NODE, cdev); + + SEP_DRV_LOG_TRACE_OUT(""); + return 0; +} + +/******************************************************************************* + * External Driver functions + * These functions are registered into the file operations table that + * controls this device. + * Open, Close, Read, Write, Release + *******************************************************************************/ + +static ssize_t lwpmu_Read(struct file *filp, char __user *buf, size_t count, + loff_t *f_pos) +{ + unsigned long retval; + + SEP_DRV_LOG_TRACE_IN(""); + + /* Transfering data to user space */ + SEP_DRV_LOG_TRACE("Dispatched with count=%d.", (S32)count); + if (copy_to_user((void __user *)buf, &LWPMU_DEV_buffer(lwpmu_control), 1)) { + retval = OS_FAULT; + SEP_DRV_LOG_ERROR_TRACE_OUT("Memory copy failure!"); + return retval; + } + /* Changing reading position as best suits */ + if (*f_pos == 0) { + *f_pos += 1; + SEP_DRV_LOG_TRACE_OUT("Return value: 1."); + return 1; + } + + SEP_DRV_LOG_TRACE_OUT("Return value: 0."); + return 0; +} + +static ssize_t lwpmu_Write(struct file *filp, const char __user *buf, size_t count, + loff_t *f_pos) +{ + unsigned long retval; + + SEP_DRV_LOG_TRACE_IN(""); + + SEP_DRV_LOG_TRACE("Dispatched with count=%d.", (S32)count); + if (copy_from_user(&LWPMU_DEV_buffer(lwpmu_control), (void __user *)(buf + count - 1), + 1)) { + retval = OS_FAULT; + SEP_DRV_LOG_ERROR_TRACE_OUT("Memory copy failure!"); + return retval; + } + + SEP_DRV_LOG_TRACE_OUT("Return value: 1."); + return 1; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern IOCTL_OP_TYPE lwpmu_Service_IOCTL(IOCTL_USE_NODE, filp, cmd, arg) + * + * @param IOCTL_USE_INODE - Used for pre 2.6.32 kernels + * @param struct file *filp - file pointer + * @param unsigned int cmd - IOCTL command + * @param unsigned long arg - args to the IOCTL command + * + * @return OS_STATUS + * + * @brief SEP Worker function that handles IOCTL requests from the user mode. + * + * Special Notes + */ +static IOCTL_OP_TYPE lwpmu_Service_IOCTL(IOCTL_USE_INODE struct file *filp, + unsigned int cmd, + IOCTL_ARGS_NODE local_args) +{ + int status = OS_SUCCESS; + + SEP_DRV_LOG_TRACE_IN("Command: %d.", cmd); + + if (cmd == DRV_OPERATION_GET_DRIVER_STATE) { + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_DRIVER_STATE."); + status = lwpmudrv_Get_Driver_State(&local_args); + SEP_DRV_LOG_TRACE_OUT("Return value for command %d: %d", cmd, + status); + return status; + } + if (cmd == DRV_OPERATION_GET_DRIVER_LOG) { + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_DRIVER_LOG."); + status = lwpmudrv_Get_Driver_Log(&local_args); + SEP_DRV_LOG_TRACE_OUT("Return value for command %d: %d", cmd, + status); + return status; + } + if (cmd == DRV_OPERATION_CONTROL_DRIVER_LOG) { + SEP_DRV_LOG_TRACE("DRV_OPERATION_CONTROL_DRIVER_LOG."); + status = lwpmudrv_Control_Driver_Log(&local_args); + SEP_DRV_LOG_TRACE_OUT("Return value for command %d: %d", cmd, + status); + return status; + } + if (GET_DRIVER_STATE() == DRV_STATE_PREPARE_STOP) { + SEP_DRV_LOG_TRACE("skipping ioctl -- processing stop."); + SEP_DRV_LOG_TRACE_OUT("Return value for command %d: %d", cmd, + status); + return status; + } + + MUTEX_LOCK(ioctl_lock); + UTILITY_Driver_Set_Active_Ioctl(cmd); + + switch (cmd) { + /* + * Common IOCTL commands + */ + + case DRV_OPERATION_VERSION: + SEP_DRV_LOG_TRACE("DRV_OPERATION_VERSION."); + status = lwpmudrv_Version(&local_args); + break; + + case DRV_OPERATION_RESERVE: + SEP_DRV_LOG_TRACE("DRV_OPERATION_RESERVE."); + status = lwpmudrv_Reserve(&local_args); + break; + + case DRV_OPERATION_INIT_DRIVER: + SEP_DRV_LOG_TRACE("DRV_OPERATION_INIT_DRIVER."); + status = lwpmudrv_Initialize_Driver(local_args.buf_usr_to_drv, + local_args.len_usr_to_drv); + break; + + case DRV_OPERATION_INIT: + SEP_DRV_LOG_TRACE("DRV_OPERATION_INIT."); + status = lwpmudrv_Initialize(local_args.buf_usr_to_drv, + local_args.len_usr_to_drv); + break; + + case DRV_OPERATION_INIT_PMU: + SEP_DRV_LOG_TRACE("DRV_OPERATION_INIT_PMU."); + status = lwpmudrv_Init_PMU(&local_args); + break; + + case DRV_OPERATION_SET_CPU_MASK: + SEP_DRV_LOG_TRACE("DRV_OPERATION_SET_CPU_MASK."); + status = lwpmudrv_Set_CPU_Mask(local_args.buf_usr_to_drv, + local_args.len_usr_to_drv); + break; + + case DRV_OPERATION_START: + SEP_DRV_LOG_TRACE("DRV_OPERATION_START."); + status = lwpmudrv_Start(); + break; + + case DRV_OPERATION_STOP: + SEP_DRV_LOG_TRACE("DRV_OPERATION_STOP."); + status = lwpmudrv_Prepare_Stop(); + UTILITY_Driver_Set_Active_Ioctl(0); + MUTEX_UNLOCK(ioctl_lock); + + MUTEX_LOCK(ioctl_lock); + UTILITY_Driver_Set_Active_Ioctl(cmd); + if (GET_DRIVER_STATE() == DRV_STATE_PREPARE_STOP) { + status = lwpmudrv_Finish_Stop(); + if (status == OS_SUCCESS) { + // if stop was successful, relevant memory should have been freed, + // so try to compact the memory tracker + CONTROL_Memory_Tracker_Compaction(); + } + } + break; + + case DRV_OPERATION_PAUSE: + SEP_DRV_LOG_TRACE("DRV_OPERATION_PAUSE."); + status = lwpmudrv_Pause(); + break; + + case DRV_OPERATION_RESUME: + SEP_DRV_LOG_TRACE("DRV_OPERATION_RESUME."); + status = lwpmudrv_Resume(); + break; + + case DRV_OPERATION_EM_GROUPS: + SEP_DRV_LOG_TRACE("DRV_OPERATION_EM_GROUPS."); + status = lwpmudrv_Set_EM_Config(&local_args); + break; + + case DRV_OPERATION_EM_CONFIG_NEXT: + SEP_DRV_LOG_TRACE("DRV_OPERATION_EM_CONFIG_NEXT."); + status = lwpmudrv_Configure_Events(&local_args); + break; + + case DRV_OPERATION_NUM_DESCRIPTOR: + SEP_DRV_LOG_TRACE("DRV_OPERATION_NUM_DESCRIPTOR."); + status = lwpmudrv_Set_Sample_Descriptors(&local_args); + break; + + case DRV_OPERATION_DESC_NEXT: + SEP_DRV_LOG_TRACE("DRV_OPERATION_DESC_NEXT."); + status = lwpmudrv_Configure_Descriptors(&local_args); + break; + + case DRV_OPERATION_GET_NORMALIZED_TSC: + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_NORMALIZED_TSC."); + status = lwpmudrv_Get_Normalized_TSC(&local_args); + break; + + case DRV_OPERATION_GET_NORMALIZED_TSC_STANDALONE: + SEP_DRV_LOG_TRACE( + "DRV_OPERATION_GET_NORMALIZED_TSC_STANDALONE."); + status = lwpmudrv_Get_Normalized_TSC(&local_args); + break; + + case DRV_OPERATION_NUM_CORES: + SEP_DRV_LOG_TRACE("DRV_OPERATION_NUM_CORES."); + status = lwpmudrv_Get_Num_Cores(&local_args); + break; + + case DRV_OPERATION_KERNEL_CS: + SEP_DRV_LOG_TRACE("DRV_OPERATION_KERNEL_CS."); + status = lwpmudrv_Get_KERNEL_CS(&local_args); + break; + + case DRV_OPERATION_SET_UID: + SEP_DRV_LOG_TRACE("DRV_OPERATION_SET_UID."); + status = lwpmudrv_Set_UID(&local_args); + break; + + case DRV_OPERATION_TSC_SKEW_INFO: + SEP_DRV_LOG_TRACE("DRV_OPERATION_TSC_SKEW_INFO."); + status = lwpmudrv_Get_TSC_Skew_Info(&local_args); + break; + + case DRV_OPERATION_COLLECT_SYS_CONFIG: + SEP_DRV_LOG_TRACE("DRV_OPERATION_COLLECT_SYS_CONFIG."); + status = lwpmudrv_Collect_Sys_Config(&local_args); + break; + + case DRV_OPERATION_GET_SYS_CONFIG: + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_SYS_CONFIG."); + status = lwpmudrv_Sys_Config(&local_args); + break; + + case DRV_OPERATION_TERMINATE: + SEP_DRV_LOG_TRACE("DRV_OPERATION_TERMINATE."); + status = lwpmudrv_Terminate(); + break; + + case DRV_OPERATION_SET_CPU_TOPOLOGY: + SEP_DRV_LOG_TRACE("DRV_OPERATION_SET_CPU_TOPOLOGY."); + status = lwpmudrv_Setup_Cpu_Topology(&local_args); + break; + + case DRV_OPERATION_GET_NUM_CORE_CTRS: + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_NUM_CORE_CTRS."); + status = lwpmudrv_Samp_Read_Num_Of_Core_Counters(&local_args); + break; + + case DRV_OPERATION_GET_PLATFORM_INFO: + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_PLATFORM_INFO."); + status = lwpmudrv_Get_Platform_Info(&local_args); + break; + + case DRV_OPERATION_READ_MSRS: + SEP_DRV_LOG_TRACE("DRV_OPERATION_READ_MSRs."); + status = lwpmudrv_Read_MSRs(&local_args); + break; + + case DRV_OPERATION_SWITCH_GROUP: + SEP_DRV_LOG_TRACE("DRV_OPERATION_SWITCH_GROUP."); + status = lwpmudrv_Switch_Group(); + break; + + case DRV_OPERATION_SET_OSID: + SEP_DRV_LOG_TRACE("DRV_OPERATION_IOCTL_SET_OSID\n"); + status = lwpmudrv_Set_OSID(&local_args); + break; + + case DRV_OPERATION_GET_AGENT_MODE: + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_AGENT_MODE\n"); + status = lwpmudrv_Get_Agent_Mode(&local_args); + break; + + case DRV_OPERATION_GET_VCPU_MAP: + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_CPU_MAP\n"); + status = lwpmudrv_Get_Cpu_Map_Info(&local_args); + break; + + case DRV_OPERATION_GET_NUM_VM: + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_NUM_VM\n"); + status = lwpmudrv_Get_Num_Of_Vms(&local_args); + break; + + /* + * EMON-specific IOCTL commands + */ + case DRV_OPERATION_READ_MSR: + SEP_DRV_LOG_TRACE("DRV_OPERATION_READ_MSR."); + status = lwpmudrv_Read_MSR_All_Cores(&local_args); + break; + + case DRV_OPERATION_WRITE_MSR: + SEP_DRV_LOG_TRACE("DRV_OPERATION_WRITE_MSR."); + status = lwpmudrv_Write_MSR_All_Cores(&local_args); + break; + + case DRV_OPERATION_READ_SWITCH_GROUP: + SEP_DRV_LOG_TRACE("DRV_OPERATION_READ_SWITCH_GROUP."); + status = lwpmudrv_Read_Counters_And_Switch_Group(&local_args); + break; + + case DRV_OPERATION_READ_AND_RESET: + SEP_DRV_LOG_TRACE("DRV_OPERATION_READ_AND_RESET."); + status = lwpmudrv_Read_And_Reset_Counters(&local_args); + break; + + /* + * Platform-specific IOCTL commands (IA32 and Intel64) + */ + + case DRV_OPERATION_INIT_UNC: + SEP_DRV_LOG_TRACE("DRV_OPERATION_INIT_UNC."); + status = lwpmudrv_Initialize_UNC(local_args.buf_usr_to_drv, + local_args.len_usr_to_drv); + break; + + case DRV_OPERATION_EM_GROUPS_UNC: + SEP_DRV_LOG_TRACE("DRV_OPERATION_EM_GROUPS_UNC."); + status = lwpmudrv_Set_EM_Config_UNC(&local_args); + break; + + case DRV_OPERATION_EM_CONFIG_NEXT_UNC: + SEP_DRV_LOG_TRACE("DRV_OPERATION_EM_CONFIG_NEXT_UNC."); + status = lwpmudrv_Configure_Events_UNC(&local_args); + break; + + case DRV_OPERATION_LBR_INFO: + SEP_DRV_LOG_TRACE("DRV_OPERATION_LBR_INFO."); + status = lwpmudrv_LBR_Info(&local_args); + break; + + case DRV_OPERATION_PWR_INFO: + SEP_DRV_LOG_TRACE("DRV_OPERATION_PWR_INFO."); + status = lwpmudrv_PWR_Info(&local_args); + break; + + case DRV_OPERATION_INIT_NUM_DEV: + SEP_DRV_LOG_TRACE("DRV_OPERATION_INIT_NUM_DEV."); + status = lwpmudrv_Initialize_Num_Devices(&local_args); + break; + case DRV_OPERATION_GET_NUM_SAMPLES: + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_NUM_SAMPLES."); + status = lwpmudrv_Get_Num_Samples(&local_args); + break; + + case DRV_OPERATION_SET_DEVICE_NUM_UNITS: + SEP_DRV_LOG_TRACE("DRV_OPERATION_SET_DEVICE_NUM_UNITS."); + status = lwpmudrv_Set_Device_Num_Units(&local_args); + break; + + case DRV_OPERATION_GET_INTERVAL_COUNTS: + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_INTERVAL_COUNTS."); + lwpmudrv_Get_Interval_Counts(&local_args); + break; + + case DRV_OPERATION_SET_SCAN_UNCORE_TOPOLOGY_INFO: + SEP_DRV_LOG_TRACE( + "DRV_OPERATION_SET_SCAN_UNCORE_TOPOLOGY_INFO."); + status = + lwpmudrv_Set_Uncore_Topology_Info_And_Scan(&local_args); + break; + + case DRV_OPERATION_GET_UNCORE_TOPOLOGY: + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_UNCORE_TOPOLOGY."); + status = lwpmudrv_Get_Uncore_Topology(&local_args); + break; + + case DRV_OPERATION_GET_PLATFORM_TOPOLOGY: + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_PLATFORM_TOPOLOGY."); + status = lwpmudrv_Get_Platform_Topology(&local_args); + break; + + case DRV_OPERATION_FLUSH: + SEP_DRV_LOG_TRACE("DRV_OPERATION_FLUSH."); + status = lwpmudrv_Flush(); + break; + + case DRV_OPERATION_GET_SAMPLE_DROP_INFO: + SEP_PRINT_DEBUG("DRV_OPERATION_IOCTL_GET_SAMPLE_DROP_INFO\n"); + status = lwpmudrv_Get_Sample_Drop_Info(&local_args); + break; + + case DRV_OPERATION_SET_EMON_BUFFER_DRIVER_HELPER: + SEP_DRV_LOG_TRACE( + "DRV_OPERATION_SET_EMON_BUFFER_DRIVER_HELPER."); + status = lwpmudrv_Set_Emon_Buffer_Driver_Helper(&local_args); + break; + + /* + * Graphics IOCTL commands + */ + +#if defined(BUILD_GFX) + case DRV_OPERATION_SET_GFX_EVENT: + SEP_DRV_LOG_TRACE("DRV_OPERATION_SET_GFX_EVENT."); + SEP_DRV_LOG_TRACE("lwpmudrv_Device_Control: enable_gfx=%d.", + (int)DRV_CONFIG_enable_gfx(drv_cfg)); + status = GFX_Set_Event_Code(&local_args); + break; +#endif + + /* + * Chipset IOCTL commands + */ + +#if defined(BUILD_CHIPSET) + case DRV_OPERATION_PCI_READ: { + CHIPSET_PCI_ARG_NODE pci_data; + + SEP_DRV_LOG_TRACE("DRV_OPERATION_PCI_READ."); + + if (local_args.buf_usr_to_drv == NULL || + local_args.len_usr_to_drv != sizeof(CHIPSET_PCI_ARG_NODE) || + local_args.buf_drv_to_usr == NULL || + local_args.len_drv_to_usr != sizeof(CHIPSET_PCI_ARG_NODE)) { + status = OS_FAULT; + goto cleanup; + } + + if (copy_from_user(&pci_data, + (void __user *)local_args.buf_usr_to_drv, + sizeof(CHIPSET_PCI_ARG_NODE))) { + status = OS_FAULT; + goto cleanup; + } + + status = PCI_Read_From_Memory_Address( + CHIPSET_PCI_ARG_address(&pci_data), + &CHIPSET_PCI_ARG_value(&pci_data)); + + if (copy_to_user((void __user *)local_args.buf_drv_to_usr, &pci_data, + sizeof(CHIPSET_PCI_ARG_NODE))) { + status = OS_FAULT; + goto cleanup; + } + + break; + } + + case DRV_OPERATION_PCI_WRITE: { + CHIPSET_PCI_ARG_NODE pci_data; + + SEP_DRV_LOG_TRACE("DRV_OPERATION_PCI_WRITE."); + + if (local_args.buf_usr_to_drv == NULL || + local_args.len_usr_to_drv != sizeof(CHIPSET_PCI_ARG_NODE)) { + status = OS_FAULT; + goto cleanup; + } + + if (copy_from_user(&pci_data, + (void __user *)local_args.buf_usr_to_drv, + sizeof(CHIPSET_PCI_ARG_NODE))) { + status = OS_FAULT; + goto cleanup; + } + + status = PCI_Write_To_Memory_Address( + CHIPSET_PCI_ARG_address(&pci_data), + CHIPSET_PCI_ARG_value(&pci_data)); + break; + } + + case DRV_OPERATION_FD_PHYS: + SEP_DRV_LOG_TRACE("DRV_OPERATION_FD_PHYS."); + status = lwpmudrv_Samp_Find_Physical_Address(&local_args); + break; + + case DRV_OPERATION_READ_PCI_CONFIG: + SEP_DRV_LOG_TRACE("DRV_OPERATION_READ_PCI_CONFIG."); + status = lwpmudrv_Samp_Read_PCI_Config(&local_args); + break; + + case DRV_OPERATION_WRITE_PCI_CONFIG: + SEP_DRV_LOG_TRACE("DRV_OPERATION_WRITE_PCI_CONFIG."); + status = lwpmudrv_Samp_Write_PCI_Config(&local_args); + break; + + case DRV_OPERATION_CHIPSET_INIT: + SEP_DRV_LOG_TRACE("DRV_OPERATION_CHIPSET_INIT."); + SEP_DRV_LOG_TRACE("Enable_chipset=%d.", + (int)DRV_CONFIG_enable_chipset(drv_cfg)); + status = lwpmudrv_Samp_Chipset_Init(&local_args); + break; + + case DRV_OPERATION_GET_CHIPSET_DEVICE_ID: + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_CHIPSET_DEVICE_ID."); + status = lwpmudrv_Samp_Read_PCI_Config(&local_args); + break; +#endif + + case DRV_OPERATION_GET_DRV_SETUP_INFO: + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_DRV_SETUP_INFO."); + status = lwpmudrv_Get_Drv_Setup_Info(&local_args); + break; + + /* + * if none of the above, treat as unknown/illegal IOCTL command + */ + + default: + SEP_DRV_LOG_ERROR("Unknown IOCTL number: %d!", cmd); + status = OS_ILLEGAL_IOCTL; + break; + } +#if defined(BUILD_CHIPSET) +cleanup: +#endif + UTILITY_Driver_Set_Active_Ioctl(0); + MUTEX_UNLOCK(ioctl_lock); + + SEP_DRV_LOG_TRACE_OUT("Return value for command %d: %d.", cmd, status); + return status; +} + +static long lwpmu_Device_Control(IOCTL_USE_INODE struct file *filp, + unsigned int cmd, unsigned long arg) +{ + int status = OS_SUCCESS; + IOCTL_ARGS_NODE local_args; + + SEP_DRV_LOG_TRACE_IN("Cmd type: %d, subcommand: %d.", _IOC_TYPE(cmd), + _IOC_NR(cmd)); + +#if !defined(DRV_USE_UNLOCKED_IOCTL) + SEP_DRV_LOG_TRACE("Cmd: 0x%x, called on inode maj:%d, min:%d.", cmd, + imajor(inode), iminor(inode)); +#endif + SEP_DRV_LOG_TRACE("Type: %d, subcommand: %d.", _IOC_TYPE(cmd), + _IOC_NR(cmd)); + + if (_IOC_TYPE(cmd) != LWPMU_IOC_MAGIC) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Unknown IOCTL magic: %d!", + _IOC_TYPE(cmd)); + return OS_ILLEGAL_IOCTL; + } + + memset(&local_args, 0, sizeof(IOCTL_ARGS_NODE)); + + if (arg) { + status = copy_from_user(&local_args, (void __user *)arg, + sizeof(IOCTL_ARGS_NODE)); + } + + status = lwpmu_Service_IOCTL(IOCTL_USE_INODE filp, _IOC_NR(cmd), + local_args); + + SEP_DRV_LOG_TRACE_OUT("Return value: %d.", status); + return status; +} + +#if defined(CONFIG_COMPAT) && defined(DRV_EM64T) +static long lwpmu_Device_Control_Compat(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int status = OS_SUCCESS; + IOCTL_COMPAT_ARGS_NODE local_args_compat; + IOCTL_ARGS_NODE local_args; + + SEP_DRV_LOG_TRACE_IN("Compat: type: %d, subcommand: %d.", + _IOC_TYPE(cmd), _IOC_NR(cmd)); + + memset(&local_args_compat, 0, sizeof(IOCTL_COMPAT_ARGS_NODE)); + SEP_DRV_LOG_TRACE("Compat: type: %d, subcommand: %d.", _IOC_TYPE(cmd), + _IOC_NR(cmd)); + + if (_IOC_TYPE(cmd) != LWPMU_IOC_MAGIC) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Unknown IOCTL magic: %d!", + _IOC_TYPE(cmd)); + return OS_ILLEGAL_IOCTL; + } + + if (arg) { + status = copy_from_user(&local_args_compat, + (void __user *)arg, + sizeof(IOCTL_COMPAT_ARGS_NODE)); + } // NB: status defined above is not being used... + local_args.len_drv_to_usr = local_args_compat.len_drv_to_usr; + local_args.len_usr_to_drv = local_args_compat.len_usr_to_drv; + local_args.buf_drv_to_usr = + (char *)compat_ptr(local_args_compat.buf_drv_to_usr); + local_args.buf_usr_to_drv = + (char *)compat_ptr(local_args_compat.buf_usr_to_drv); + local_args.command = _IOC_NR(cmd); + + status = lwpmu_Service_IOCTL(filp, _IOC_NR(cmd), local_args); + + SEP_DRV_LOG_TRACE_OUT("Return value: %d", status); + return status; +} +#endif + +/* + * @fn LWPMUDRV_Abnormal_Terminate(void) + * + * @brief This routine is called from linuxos_Exit_Task_Notify if the user process has + * been killed by an uncatchable signal (example kill -9). The state variable + * abormal_terminate is set to 1 and the clean up routines are called. In this + * code path the OS notifier hooks should not be unloaded. + * + * @param None + * + * @return OS_STATUS + * + * Special Notes: + * + */ +static int LWPMUDRV_Abnormal_Terminate(void) +{ + int status; + SEP_DRV_LOG_FLOW_IN(""); + + SEP_DRV_LOG_TRACE("Calling lwpmudrv_Prepare_Stop."); + status = lwpmudrv_Prepare_Stop(); + if (status != OS_SUCCESS) + return status; + + SEP_DRV_LOG_TRACE("Calling lwpmudrv_Finish_Stop."); + status = lwpmudrv_Finish_Stop(); + if (status != OS_SUCCESS) + return status; + + SEP_DRV_LOG_TRACE("Calling lwpmudrv_Terminate."); + status = lwpmudrv_Terminate(); + + SEP_DRV_LOG_FLOW_OUT("Return value: %d.", status); + return status; +} + +static int lwpmudrv_Abnormal_Handler(void *data) +{ + SEP_DRV_LOG_FLOW_IN(""); + + while (!kthread_should_stop()) { + if (wait_event_interruptible_timeout( + wait_exit, + GET_DRIVER_STATE() == DRV_STATE_TERMINATING, + msecs_to_jiffies(350))) { + SEP_DRV_LOG_WARNING( + "Processing abnormal termination..."); + MUTEX_LOCK(ioctl_lock); + SEP_DRV_LOG_TRACE("Locked ioctl_lock..."); + LWPMUDRV_Abnormal_Terminate(); + SEP_DRV_LOG_TRACE("Unlocking ioctl_lock..."); + MUTEX_UNLOCK(ioctl_lock); + } + } + + SEP_DRV_LOG_FLOW_OUT("End of thread."); + return 0; +} + +/***************************************************************************************** + * + * Driver Entry / Exit functions that will be called on when the driver is loaded and + * unloaded + * + ****************************************************************************************/ + +/* + * Structure that declares the usual file access functions + * First one is for lwpmu_c, the control functions + */ +static struct file_operations lwpmu_Fops = { + .owner = THIS_MODULE, + IOCTL_OP = lwpmu_Device_Control, +#if defined(CONFIG_COMPAT) && defined(DRV_EM64T) + .compat_ioctl = lwpmu_Device_Control_Compat, +#endif + .read = lwpmu_Read, + .write = lwpmu_Write, + .open = lwpmu_Open, + .release = NULL, + .llseek = NULL, +}; + +/* + * Second one is for lwpmu_m, the module notification functions + */ +static struct file_operations lwmod_Fops = { + .owner = THIS_MODULE, + IOCTL_OP = NULL, //None needed + .read = OUTPUT_Module_Read, + .write = NULL, //No writing accepted + .open = lwpmu_Open, + .release = NULL, + .llseek = NULL, +}; + +/* + * Third one is for lwsamp_nn, the sampling functions + */ +static struct file_operations lwsamp_Fops = { + .owner = THIS_MODULE, + IOCTL_OP = NULL, //None needed + .read = OUTPUT_Sample_Read, + .write = NULL, //No writing accepted + .open = lwpmu_Open, + .release = NULL, + .llseek = NULL, +}; + +/* + * Fourth one is for lwsamp_sideband, the pebs process info functions + */ +static struct file_operations lwsideband_Fops = { + .owner = THIS_MODULE, + IOCTL_OP = NULL, //None needed + .read = OUTPUT_SidebandInfo_Read, + .write = NULL, //No writing accepted + .open = lwpmu_Open, + .release = NULL, + .llseek = NULL, +}; + +/* + * Fifth one is for lwsampunc_nn, the uncore sampling functions + */ +static struct file_operations lwsampunc_Fops = { + .owner = THIS_MODULE, + IOCTL_OP = NULL, //None needed + .read = OUTPUT_UncSample_Read, + .write = NULL, //No writing accepted + .open = lwpmu_Open, + .release = NULL, + .llseek = NULL, +}; + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static int lwpmudrv_setup_cdev(dev, fops, dev_number) + * + * @param LWPMU_DEV dev - pointer to the device object + * @param struct file_operations *fops - pointer to the file operations struct + * @param dev_t dev_number - major/monor device number + * + * @return OS_STATUS + * + * @brief Set up the device object. + * + * Special Notes + */ +static int lwpmu_setup_cdev(LWPMU_DEV dev, struct file_operations *fops, + dev_t dev_number) +{ + int res; + SEP_DRV_LOG_TRACE_IN(""); + + cdev_init(&LWPMU_DEV_cdev(dev), fops); + LWPMU_DEV_cdev(dev).owner = THIS_MODULE; + LWPMU_DEV_cdev(dev).ops = fops; + + res = cdev_add(&LWPMU_DEV_cdev(dev), dev_number, 1); + + SEP_DRV_LOG_TRACE_OUT("Return value: %d", res); + return res; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static int lwpmu_Load(void) + * + * @param none + * + * @return STATUS + * + * @brief Load the driver module into the kernel. Set up the driver object. + * @brief Set up the initial state of the driver and allocate the memory + * @brief needed to keep basic state information. + */ +static int lwpmu_Load(void) +{ + int i, j, num_cpus; + dev_t lwmod_DevNum; + OS_STATUS status = OS_INVALID; + char dev_name[MAXNAMELEN]; + struct device *sep_device; +#if defined(CONFIG_XEN_HAVE_VPMU) + xen_pmu_params_t xenpmu_param; + xen_pmu_data_t *xenpmu_data; + unsigned long pfn; +#endif + + SEP_DRV_LOG_LOAD("Driver loading..."); + if (UTILITY_Driver_Log_Init() != + OS_SUCCESS) { // Do not use SEP_DRV_LOG_X (where X != LOAD) before this, or if this fails + SEP_DRV_LOG_LOAD("Error: could not allocate log buffer."); + return OS_NO_MEM; + } + SEP_DRV_LOG_FLOW_IN("Starting internal log monitoring."); + + CONTROL_Memory_Tracker_Init(); + +#if defined(DRV_SEP_ACRN_ON) + SEP_DRV_LOG_FLOW_IN("Starting internal log monitoring."); + vm_info_list = + CONTROL_Allocate_Memory(sizeof(struct profiling_vm_info_list)); + memset(vm_info_list, 0, sizeof(struct profiling_vm_info_list)); + + BUG_ON(!virt_addr_valid(vm_info_list)); + + acrn_hypercall2(HC_PROFILING_OPS, PROFILING_GET_VMINFO, + virt_to_phys(vm_info_list)); +#endif + +#if !defined(CONFIG_XEN_HAVE_VPMU) +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32) + if (xen_initial_domain()) { + SEP_DRV_LOG_LOAD( + "PMU virtualization is not enabled on XEN dom0!"); + } +#endif +#endif + + /* Get one major device number and two minor numbers. */ + /* The result is formatted as major+minor(0) */ + /* One minor number is for control (lwpmu_c), */ + /* the other (lwpmu_m) is for modules */ + SEP_DRV_LOG_INIT("About to register chrdev..."); + + lwpmu_DevNum = MKDEV(0, 0); + status = alloc_chrdev_region(&lwpmu_DevNum, 0, PMU_DEVICES, + SEP_DRIVER_NAME); + SEP_DRV_LOG_INIT("Result of alloc_chrdev_region is %d.", status); + if (status < 0) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Error: Failed to alloc chrdev_region (return = %d).", + status); + return status; + } + SEP_DRV_LOG_LOAD("Major number is %d", MAJOR(lwpmu_DevNum)); + status = lwpmudrv_Initialize_State(); + if (status < 0) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Failed to initialize state (return = %d)!", status); + return status; + } + num_cpus = GLOBAL_STATE_num_cpus(driver_state); + SEP_DRV_LOG_LOAD("Detected %d total CPUs and %d active CPUs.", num_cpus, + GLOBAL_STATE_active_cpus(driver_state)); + +#if defined(CONFIG_XEN_HAVE_VPMU) + if (xen_initial_domain()) { + xenpmu_param.version.maj = XENPMU_VER_MAJ; + xenpmu_param.version.min = XENPMU_VER_MIN; + + for (i = 0; i < num_cpus; i++) { + xenpmu_data = + (xen_pmu_data_t *)get_zeroed_page(GFP_KERNEL); + ; + if (!xenpmu_data) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for xenpmu_data!"); + return OS_NO_MEM; + } + pfn = vmalloc_to_pfn((char *)xenpmu_data); + + xenpmu_param.val = pfn_to_mfn(pfn); + xenpmu_param.vcpu = i; + status = HYPERVISOR_xenpmu_op(XENPMU_init, + (PVOID)&xenpmu_param); + + per_cpu(sep_xenpmu_shared, i) = xenpmu_data; + } + SEP_DRV_LOG_LOAD("VPMU is initialized on XEN Dom0."); + } +#endif + + PCI_Initialize(); + + /* Allocate memory for the control structures */ + lwpmu_control = CONTROL_Allocate_Memory(sizeof(LWPMU_DEV_NODE)); + lwmod_control = CONTROL_Allocate_Memory(sizeof(LWPMU_DEV_NODE)); + lwsamp_control = + CONTROL_Allocate_Memory(num_cpus * sizeof(LWPMU_DEV_NODE)); + lwsideband_control = + CONTROL_Allocate_Memory(num_cpus * sizeof(LWPMU_DEV_NODE)); + + if (!lwsideband_control || !lwsamp_control || !lwpmu_control || + !lwmod_control) { + CONTROL_Free_Memory(lwpmu_control); + CONTROL_Free_Memory(lwmod_control); + CONTROL_Free_Memory(lwsamp_control); + CONTROL_Free_Memory(lwsideband_control); + + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for control structures!"); + return OS_NO_MEM; + } + + /* Register the file operations with the OS */ + + pmu_class = class_create(THIS_MODULE, SEP_DRIVER_NAME); + if (IS_ERR(pmu_class)) { + SEP_DRV_LOG_ERROR("Error registering SEP control class!"); + } + sep_device = device_create(pmu_class, NULL, lwpmu_DevNum, NULL, + SEP_DRIVER_NAME DRV_DEVICE_DELIMITER "c"); + if (IS_ERR(sep_device)) { + SEP_DRV_LOG_ERROR("Error creating SEP PMU device!"); + } + + status = lwpmu_setup_cdev(lwpmu_control, &lwpmu_Fops, lwpmu_DevNum); + if (status) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Error %d when adding lwpmu as char device!", status); + unregister_chrdev(MAJOR(lwpmu_DevNum), SEP_DRIVER_NAME); + device_destroy(pmu_class, lwpmu_DevNum); + unregister_chrdev_region(lwpmu_DevNum, PMU_DEVICES); + class_destroy(pmu_class); + return status; + } + /* _c init was fine, now try _m */ + lwmod_DevNum = MKDEV(MAJOR(lwpmu_DevNum), MINOR(lwpmu_DevNum) + 1); + + sep_device = device_create(pmu_class, NULL, lwmod_DevNum, NULL, + SEP_DRIVER_NAME DRV_DEVICE_DELIMITER "m"); + if (IS_ERR(sep_device)) { + SEP_DRV_LOG_ERROR("Error creating SEP PMU device!"); + } + + status = lwpmu_setup_cdev(lwmod_control, &lwmod_Fops, lwmod_DevNum); + if (status) { + cdev_del(&LWPMU_DEV_cdev(lwpmu_control)); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Error %d when adding lwpmu as char device!", status); + unregister_chrdev(MAJOR(lwpmu_DevNum), SEP_DRIVER_NAME); + device_destroy(pmu_class, lwpmu_DevNum); + device_destroy(pmu_class, lwpmu_DevNum + 1); + cdev_del(&LWPMU_DEV_cdev(lwpmu_control)); + unregister_chrdev_region(lwpmu_DevNum, PMU_DEVICES); + class_destroy(pmu_class); + return status; + } + + /* allocate one sampling device per cpu */ + lwsamp_DevNum = MKDEV(0, 0); + status = alloc_chrdev_region(&lwsamp_DevNum, 0, num_cpus, + SEP_SAMPLES_NAME); + + if (status < 0) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Error: Failed to alloc chrdev_region (return = %d).", + status); + unregister_chrdev(MAJOR(lwpmu_DevNum), SEP_DRIVER_NAME); + device_destroy(pmu_class, lwpmu_DevNum); + device_destroy(pmu_class, lwpmu_DevNum + 1); + cdev_del(&LWPMU_DEV_cdev(lwpmu_control)); + cdev_del(&LWPMU_DEV_cdev(lwmod_control)); + unregister_chrdev_region(lwpmu_DevNum, PMU_DEVICES); + class_destroy(pmu_class); + return status; + } + + /* Register the file operations with the OS */ + for (i = 0; i < num_cpus; i++) { + snprintf(dev_name, MAXNAMELEN, "%s%ss%d", SEP_DRIVER_NAME, + DRV_DEVICE_DELIMITER, i); + + sep_device = device_create(pmu_class, NULL, lwsamp_DevNum + i, NULL, + dev_name); + if (IS_ERR(sep_device)) { + SEP_DRV_LOG_ERROR("Error creating SEP PMU device !"); + } + status = lwpmu_setup_cdev(lwsamp_control + i, &lwsamp_Fops, + lwsamp_DevNum + i); + if (status) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Error %d when adding lwpmu as char device!", + status); + unregister_chrdev(MAJOR(lwpmu_DevNum), SEP_DRIVER_NAME); + device_destroy(pmu_class, lwpmu_DevNum); + device_destroy(pmu_class, lwpmu_DevNum + 1); + cdev_del(&LWPMU_DEV_cdev(lwpmu_control)); + cdev_del(&LWPMU_DEV_cdev(lwmod_control)); + unregister_chrdev_region(lwpmu_DevNum, PMU_DEVICES); + unregister_chrdev(MAJOR(lwsamp_DevNum), SEP_SAMPLES_NAME); + for (j = i; j > 0; j--) { + device_destroy(pmu_class, lwsamp_DevNum + j); + cdev_del(&LWPMU_DEV_cdev(&lwsamp_control[j])); + } + + class_destroy(pmu_class); + return status; + } else { + SEP_DRV_LOG_INIT("Added sampling device %d.", i); + } + } + + lwsideband_DevNum = MKDEV(0, 0); + status = alloc_chrdev_region(&lwsideband_DevNum, 0, num_cpus, + SEP_SIDEBAND_NAME); + + if (status < 0) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for chrdev_region for sideband!"); + unregister_chrdev(MAJOR(lwpmu_DevNum), SEP_DRIVER_NAME); + device_destroy(pmu_class, lwpmu_DevNum); + device_destroy(pmu_class, lwpmu_DevNum + 1); + cdev_del(&LWPMU_DEV_cdev(lwpmu_control)); + cdev_del(&LWPMU_DEV_cdev(lwmod_control)); + unregister_chrdev_region(lwpmu_DevNum, PMU_DEVICES); + unregister_chrdev(MAJOR(lwsamp_DevNum), SEP_SAMPLES_NAME); + for (j = 0; j < num_cpus; j++) { + device_destroy(pmu_class, lwsamp_DevNum + j); + cdev_del(&LWPMU_DEV_cdev(&lwsamp_control[j])); + } + + class_destroy(pmu_class); + return status; + } + + for (i = 0; i < num_cpus; i++) { + snprintf(dev_name, MAXNAMELEN, "%s%sb%d", SEP_DRIVER_NAME, + DRV_DEVICE_DELIMITER, i); + sep_device = device_create(pmu_class, NULL, lwsideband_DevNum + i, NULL, + dev_name); + if (IS_ERR(sep_device)) { + SEP_DRV_LOG_ERROR("Error creating SEP PMU device!"); + } + status = lwpmu_setup_cdev(lwsideband_control + i, + &lwsideband_Fops, + lwsideband_DevNum + i); + if (status) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Error %d when adding lwsideband as char device!", + status); + unregister_chrdev(MAJOR(lwpmu_DevNum), SEP_DRIVER_NAME); + device_destroy(pmu_class, lwpmu_DevNum); + device_destroy(pmu_class, lwpmu_DevNum + 1); + cdev_del(&LWPMU_DEV_cdev(lwpmu_control)); + cdev_del(&LWPMU_DEV_cdev(lwmod_control)); + unregister_chrdev_region(lwpmu_DevNum, PMU_DEVICES); + unregister_chrdev(MAJOR(lwsamp_DevNum), SEP_SAMPLES_NAME); + unregister_chrdev(MAJOR(lwsideband_DevNum), SEP_SIDEBAND_NAME); + for (j = 0; j < num_cpus; j++) { + device_destroy(pmu_class, lwsamp_DevNum + j); + cdev_del(&LWPMU_DEV_cdev(&lwsamp_control[j])); + } + for (j = i; j > 0; j--) { + device_destroy(pmu_class, lwsideband_DevNum + j); + cdev_del(&LWPMU_DEV_cdev(&lwsideband_control[j])); + } + + class_destroy(pmu_class); + return status; + } else { + SEP_DRV_LOG_INIT("Added sampling sideband device %d.", + i); + } + } + + cpu_tsc = (U64 *)CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64)); + prev_cpu_tsc = (U64 *)CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64)); + diff_cpu_tsc = (U64 *)CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64)); + +#if !defined(CONFIG_PREEMPT_COUNT) && !defined(DRV_SEP_ACRN_ON) + atomic_set(&read_now, GLOBAL_STATE_num_cpus(driver_state)); + init_waitqueue_head(&read_tsc_now); + CONTROL_Invoke_Parallel(lwpmudrv_Fill_TSC_Info, (PVOID)(size_t)0); +#endif + + pcb_size = GLOBAL_STATE_num_cpus(driver_state) * sizeof(CPU_STATE_NODE); + pcb = CONTROL_Allocate_Memory(pcb_size); + if (!pcb) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for PCB!"); + return OS_NO_MEM; + } + + core_to_package_map = CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U32)); + if (!core_to_package_map) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for core_to_package_map!"); + return OS_NO_MEM; + } + + core_to_phys_core_map = CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U32)); + if (!core_to_phys_core_map) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for core_to_phys_core_map!"); + return OS_NO_MEM; + } + + core_to_thread_map = CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U32)); + if (!core_to_thread_map) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for core_to_thread_map!"); + return OS_NO_MEM; + } + + threads_per_core = CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U32)); + if (!threads_per_core) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for threads_per_core!"); + return OS_NO_MEM; + } + + occupied_core_ids = CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U32)); + if (!occupied_core_ids) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for occupied_core_ids!"); + return OS_NO_MEM; + } + SYS_INFO_Build(); + memset(pcb, 0, pcb_size); + + if (total_ram <= OUTPUT_MEMORY_THRESHOLD) { + output_buffer_size = OUTPUT_SMALL_BUFFER; + } + + MUTEX_INIT(ioctl_lock); + + status = UNC_COMMON_Init(); + if (status) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Error %d when init uncore struct!", + status); + return status; + } + + /* allocate one sampling device per package (for uncore)*/ + lwsampunc_control = + CONTROL_Allocate_Memory(num_packages * sizeof(LWPMU_DEV_NODE)); + if (!lwsampunc_control) { + CONTROL_Free_Memory(lwsampunc_control); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "lwpmu driver failed to alloc space!\n"); + return OS_NO_MEM; + } + + lwsampunc_DevNum = MKDEV(0, 0); + status = alloc_chrdev_region(&lwsampunc_DevNum, 0, num_packages, + SEP_UNCORE_NAME); + + if (status < 0) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Error: Failed to alloc chrdev_region (return = %d).", + status); + unregister_chrdev(MAJOR(lwpmu_DevNum), SEP_DRIVER_NAME); + device_destroy(pmu_class, lwpmu_DevNum); + device_destroy(pmu_class, lwpmu_DevNum + 1); + cdev_del(&LWPMU_DEV_cdev(lwpmu_control)); + cdev_del(&LWPMU_DEV_cdev(lwmod_control)); + unregister_chrdev_region(lwpmu_DevNum, PMU_DEVICES); + unregister_chrdev(MAJOR(lwsamp_DevNum), SEP_SAMPLES_NAME); + unregister_chrdev(MAJOR(lwsampunc_DevNum), SEP_UNCORE_NAME); + unregister_chrdev(MAJOR(lwsideband_DevNum), SEP_SIDEBAND_NAME); + for (j = 0; j < num_cpus; j++) { + device_destroy(pmu_class, lwsamp_DevNum + j); + device_destroy(pmu_class, lwsideband_DevNum + j); + cdev_del(&LWPMU_DEV_cdev(&lwsideband_control[j])); + cdev_del(&LWPMU_DEV_cdev(&lwsamp_control[j])); + } + + class_destroy(pmu_class); + unregister_chrdev_region(lwsamp_DevNum, num_cpus); + unregister_chrdev_region(lwsideband_DevNum, num_cpus); + return status; + } + + /* Register the file operations with the OS */ + for (i = 0; i < num_packages; i++) { + snprintf(dev_name, MAXNAMELEN, "%s%su%d", SEP_DRIVER_NAME, + DRV_DEVICE_DELIMITER, i); + sep_device = device_create(pmu_class, NULL, + lwsampunc_DevNum + i, NULL, dev_name); + if (IS_ERR(sep_device)) { + SEP_DRV_LOG_ERROR("Error creating SEP PMU device!"); + } + status = lwpmu_setup_cdev(lwsampunc_control + i, + &lwsampunc_Fops, + lwsampunc_DevNum + i); + if (status) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Error %d when adding lwpmu as char device!", + status); + unregister_chrdev(MAJOR(lwpmu_DevNum), SEP_DRIVER_NAME); + device_destroy(pmu_class, lwpmu_DevNum); + device_destroy(pmu_class, lwpmu_DevNum + 1); + cdev_del(&LWPMU_DEV_cdev(lwpmu_control)); + cdev_del(&LWPMU_DEV_cdev(lwmod_control)); + unregister_chrdev_region(lwpmu_DevNum, PMU_DEVICES); + unregister_chrdev(MAJOR(lwsamp_DevNum), SEP_SAMPLES_NAME); + unregister_chrdev(MAJOR(lwsampunc_DevNum), SEP_UNCORE_NAME); + unregister_chrdev(MAJOR(lwsideband_DevNum), SEP_SIDEBAND_NAME); + for (j = 0; j < num_cpus; j++) { + device_destroy(pmu_class, lwsamp_DevNum + j); + device_destroy(pmu_class, lwsideband_DevNum + j); + cdev_del(&LWPMU_DEV_cdev(&lwsideband_control[j])); + cdev_del(&LWPMU_DEV_cdev(&lwsamp_control[j])); + } + + for (j = i; j > 0; j--) { + device_destroy(pmu_class, lwsampunc_DevNum + i); + cdev_del(&LWPMU_DEV_cdev(&lwsampunc_control[j])); + } + + class_destroy(pmu_class); + unregister_chrdev_region(lwsamp_DevNum, num_cpus); + unregister_chrdev_region(lwsampunc_DevNum, num_packages); + unregister_chrdev_region(lwsideband_DevNum, num_cpus); + + return status; + } else { + SEP_DRV_LOG_INIT("Added sampling device %d.", i); + } + } + + init_waitqueue_head(&wait_exit); + abnormal_handler = kthread_create(lwpmudrv_Abnormal_Handler, NULL, + "SEPDRV_ABNORMAL_HANDLER"); + if (abnormal_handler) { + wake_up_process(abnormal_handler); + } + +#if defined(DRV_CPU_HOTPLUG) + /* Register CPU hotplug notifier */ + LINUXOS_Register_Hotplug(); +#endif + /* + * Initialize the SEP driver version (done once at driver load time) + */ + SEP_VERSION_NODE_major(&drv_version) = SEP_MAJOR_VERSION; + SEP_VERSION_NODE_minor(&drv_version) = SEP_MINOR_VERSION; + SEP_VERSION_NODE_api(&drv_version) = SEP_API_VERSION; + SEP_VERSION_NODE_update(&drv_version) = SEP_UPDATE_VERSION; + + // + // Display driver version information + // + SEP_DRV_LOG_LOAD("PMU collection driver v%d.%d.%d %s has been loaded.", + SEP_VERSION_NODE_major(&drv_version), + SEP_VERSION_NODE_minor(&drv_version), + SEP_VERSION_NODE_api(&drv_version), + SEP_RELEASE_STRING); + +#if defined(BUILD_CHIPSET) + SEP_DRV_LOG_LOAD("Chipset support is enabled."); +#endif + +#if defined(BUILD_GFX) + SEP_DRV_LOG_LOAD("Graphics support is enabled."); +#endif + + SEP_DRV_LOG_LOAD("NMI will be used for handling PMU interrupts."); + + SEP_DRV_LOG_FLOW_OUT("Return value: %d.", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static int lwpmu_Unload(void) + * + * @param none + * + * @return none + * + * @brief Remove the driver module from the kernel. + */ +static VOID lwpmu_Unload(void) +{ + int i = 0; + int num_cpus; +#if defined(CONFIG_XEN_HAVE_VPMU) + xen_pmu_params_t xenpmu_param; +#endif + PVOID tmp_pcb; + + SEP_DRV_LOG_FLOW_IN(""); + + SEP_DRV_LOG_LOAD("Driver unloading."); + + num_cpus = GLOBAL_STATE_num_cpus(driver_state); + + if (abnormal_handler) { + if (GET_DRIVER_STATE() != DRV_STATE_UNINITIALIZED) { + CHANGE_DRIVER_STATE(STATE_BIT_ANY, + DRV_STATE_TERMINATING); + } + wake_up_interruptible_all(&wait_exit); + kthread_stop(abnormal_handler); + abnormal_handler = NULL; + } + +#if defined(CONFIG_XEN_HAVE_VPMU) + if (xen_initial_domain()) { + xenpmu_param.version.maj = XENPMU_VER_MAJ; + xenpmu_param.version.min = XENPMU_VER_MIN; + + for (i = 0; i < num_cpus; i++) { + xenpmu_param.vcpu = i; + HYPERVISOR_xenpmu_op(XENPMU_finish, &xenpmu_param); + + vfree(per_cpu(sep_xenpmu_shared, i)); + per_cpu(sep_xenpmu_shared, i) = NULL; + } + SEP_DRV_LOG_LOAD("VPMU was disabled on XEN Dom0."); + } +#endif + + LINUXOS_Uninstall_Hooks(); + SYS_INFO_Destroy(); + OUTPUT_Destroy(); + cpu_buf = CONTROL_Free_Memory(cpu_buf); + unc_buf = CONTROL_Free_Memory(unc_buf); + cpu_sideband_buf = CONTROL_Free_Memory(cpu_sideband_buf); + module_buf = CONTROL_Free_Memory(module_buf); + cpu_tsc = CONTROL_Free_Memory(cpu_tsc); + prev_cpu_tsc = CONTROL_Free_Memory(prev_cpu_tsc); + diff_cpu_tsc = CONTROL_Free_Memory(diff_cpu_tsc); + core_to_package_map = CONTROL_Free_Memory(core_to_package_map); + core_to_phys_core_map = CONTROL_Free_Memory(core_to_phys_core_map); + core_to_thread_map = CONTROL_Free_Memory(core_to_thread_map); + threads_per_core = CONTROL_Free_Memory(threads_per_core); + occupied_core_ids = CONTROL_Free_Memory(occupied_core_ids); +#if defined(DRV_SEP_ACRN_ON) + vm_info_list = CONTROL_Free_Memory(vm_info_list); +#endif + + tmp_pcb = pcb; + // Ensures there is no log message written (ERROR, ALLOC, ...) + pcb = NULL; // between pcb being freed and pcb being NULL. + CONTROL_Free_Memory(tmp_pcb); + pcb_size = 0; + + UNC_COMMON_Clean_Up(); + + unregister_chrdev(MAJOR(lwpmu_DevNum), SEP_DRIVER_NAME); + device_destroy(pmu_class, lwpmu_DevNum); + device_destroy(pmu_class, lwpmu_DevNum + 1); + + cdev_del(&LWPMU_DEV_cdev(lwpmu_control)); + cdev_del(&LWPMU_DEV_cdev(lwmod_control)); + unregister_chrdev_region(lwpmu_DevNum, PMU_DEVICES); + + unregister_chrdev(MAJOR(lwsamp_DevNum), SEP_SAMPLES_NAME); + unregister_chrdev(MAJOR(lwsampunc_DevNum), SEP_UNCORE_NAME); + unregister_chrdev(MAJOR(lwsideband_DevNum), SEP_SIDEBAND_NAME); + + for (i = 0; i < num_cpus; i++) { + device_destroy(pmu_class, lwsamp_DevNum + i); + device_destroy(pmu_class, lwsideband_DevNum + i); + cdev_del(&LWPMU_DEV_cdev(&lwsamp_control[i])); + cdev_del(&LWPMU_DEV_cdev(&lwsideband_control[i])); + } + + for (i = 0; i < num_packages; i++) { + device_destroy(pmu_class, lwsampunc_DevNum + i); + cdev_del(&LWPMU_DEV_cdev(&lwsampunc_control[i])); + } + + class_destroy(pmu_class); + + unregister_chrdev_region(lwsamp_DevNum, num_cpus); + unregister_chrdev_region(lwsampunc_DevNum, num_packages); + unregister_chrdev_region(lwsideband_DevNum, num_cpus); + lwpmu_control = CONTROL_Free_Memory(lwpmu_control); + lwmod_control = CONTROL_Free_Memory(lwmod_control); + lwsamp_control = CONTROL_Free_Memory(lwsamp_control); + lwsampunc_control = CONTROL_Free_Memory(lwsampunc_control); + lwsideband_control = CONTROL_Free_Memory(lwsideband_control); + + CONTROL_Memory_Tracker_Free(); + +#if defined(DRV_CPU_HOTPLUG) + /* Unregister CPU hotplug notifier */ + LINUXOS_Unregister_Hotplug(); +#endif + + SEP_DRV_LOG_FLOW_OUT( + "Log deallocation. Cannot track further in internal log."); + UTILITY_Driver_Log_Free(); // Do not use SEP_DRV_LOG_X (where X != LOAD) after this + + SEP_DRV_LOG_LOAD( + "PMU collection driver v%d.%d.%d %s has been unloaded.", + SEP_VERSION_NODE_major(&drv_version), + SEP_VERSION_NODE_minor(&drv_version), + SEP_VERSION_NODE_api(&drv_version), SEP_RELEASE_STRING); +} + +/* Declaration of the init and exit functions */ +module_init(lwpmu_Load); +module_exit(lwpmu_Unload); diff --git a/drivers/platform/x86/sepdk/sep/output.c b/drivers/platform/x86/sepdk/sep/output.c new file mode 100755 index 0000000000000..5fe2d28b41744 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/output.c @@ -0,0 +1,1177 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#include "lwpmudrv_defines.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "lwpmudrv_types.h" +#include "lwpmudrv.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" + +#include "control.h" +#include "output.h" +#include "utility.h" +#include "inc/linuxos.h" +#define OTHER_C_DEVICES 1 // one for module + +/* + * Global data: Buffer control structure + */ +static wait_queue_head_t flush_queue; +static atomic_t flush_writers; +static volatile int flush; +extern DRV_CONFIG drv_cfg; +extern DRV_BOOL multi_pebs_enabled; +extern DRV_BOOL sched_switch_enabled; +extern DRV_BOOL unc_buf_init; + +static void output_NMI_Sample_Buffer(unsigned long data); + +/* + * @fn output_Free_Buffers(output, size) + * + * @param IN outbuf - The output buffer to manipulate + * + * @brief Deallocate the memory associated with the buffer descriptor + * + */ +static VOID output_Free_Buffers(BUFFER_DESC buffer, size_t size) +{ + int j; + OUTPUT outbuf; + + SEP_DRV_LOG_TRACE_IN("Buffer: %p, size: %u.", buffer, size); + + if (buffer == NULL) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!buffer)."); + return; + } + outbuf = &BUFFER_DESC_outbuf(buffer); + for (j = 0; j < OUTPUT_NUM_BUFFERS; j++) { + CONTROL_Free_Memory(OUTPUT_buffer(outbuf, j)); + OUTPUT_buffer(outbuf, j) = NULL; + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn int OUTPUT_Reserve_Buffer_Space (OUTPUT outbuf, + * U32 size, + * U8 in_notification) + * + * @param outbuf IN output buffer to manipulate + * @param size IN The size of data to reserve + * @param defer IN wake up directly if FALSE. + * Otherwise, see below. + * @param in_notification IN 1 if in notification, 0 if not + * + * @result outloc - to the location where data is to be written + * + * Reserve space in the output buffers for data. The behavior of this function + * when a buffer is full will vary depending on the 'defer' and 'in_notification' + * parameters, as described in the special notes section. + * + * Special Notes: + * ----------------------------------------------------------------------------------------------------------------------- + * defer | in_notification | description + * ----------------------------------------------------------------------------------------------------------------------- + * FALSE | FALSE/TRUE | directly signals the buffer's consumer with wake_up_interruptible_sync + * ----------------------------------------------------------------------------------------------------------------------- + * TRUE | FALSE | defers the call to wake_up_interruptible_sync using tasklet_schedule [needed because calling + * | | it directly is not safe from an NMI] + * ----------------------------------------------------------------------------------------------------------------------- + * | | do not signal -or explicitly schedule the signaling of- the buffer's consumer [needed because + * TRUE | TRUE | neither operation is safe from the sched_switch tracepoint callback in kernel version 4.13]. + * | | Instead relies on the interrupt handler to do it next time there is an interrupt. + * ----------------------------------------------------------------------------------------------------------------------- + */ +void *OUTPUT_Reserve_Buffer_Space(BUFFER_DESC bd, U32 size, + DRV_BOOL defer, U8 in_notification, + S32 cpu_idx) +{ + char *outloc = NULL; + OUTPUT outbuf = &BUFFER_DESC_outbuf(bd); + S32 this_cpu; + + SEP_DRV_LOG_NOTIFICATION_TRACE_IN( + in_notification, "Bd: %p, size: %u, defer: %u, notif: %u.", bd, + size, defer, in_notification); + + if (DRV_CONFIG_enable_cp_mode(drv_cfg) && flush) { + SEP_DRV_LOG_NOTIFICATION_TRACE_OUT( + in_notification, "Res: NULL (cp_mode && flush)."); + return NULL; + } + + if (OUTPUT_remaining_buffer_size(outbuf) >= size) { + outloc = (char *) + (OUTPUT_buffer(outbuf, OUTPUT_current_buffer(outbuf)) + + (OUTPUT_total_buffer_size(outbuf) - + OUTPUT_remaining_buffer_size(outbuf))); + } else { + U32 i, j, start; + OUTPUT_buffer_full(outbuf, OUTPUT_current_buffer(outbuf)) = + OUTPUT_total_buffer_size(outbuf) - + OUTPUT_remaining_buffer_size(outbuf); + + // + // Massive Naive assumption: Must find a way to fix it. + // In spite of the loop. + // The next buffer to fill are monotonically increasing + // indicies. + // + if (!DRV_CONFIG_enable_cp_mode(drv_cfg)) { + OUTPUT_signal_full(outbuf) = TRUE; + } + + start = OUTPUT_current_buffer(outbuf); + for (i = start + 1; i < start + OUTPUT_NUM_BUFFERS; i++) { + j = i % OUTPUT_NUM_BUFFERS; + + //don't check if buffer has data when doing CP + if (!OUTPUT_buffer_full(outbuf, j) || + (DRV_CONFIG_enable_cp_mode(drv_cfg))) { + OUTPUT_current_buffer(outbuf) = j; + OUTPUT_remaining_buffer_size(outbuf) = + OUTPUT_total_buffer_size(outbuf); + outloc = (char *)OUTPUT_buffer(outbuf, j); + if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { + // discarding all the information in the new buffer in CP mode + OUTPUT_buffer_full(outbuf, j) = 0; + break; + } + } +#if !(defined(CONFIG_PREEMPT_RT) || defined(CONFIG_PREEMPT_RT_FULL)) + else { + if (!defer) { + OUTPUT_signal_full(outbuf) = FALSE; + SEP_DRV_LOG_NOTIFICATION_WARNING( + in_notification, + "Output buffers are full. Might be dropping some samples!"); + break; + } + } +#endif + } + } + + if (outloc) { + OUTPUT_remaining_buffer_size(outbuf) -= size; + memset(outloc, 0, size); + } + + if (OUTPUT_signal_full(outbuf)) { + if (!defer) { +#if !(defined(CONFIG_PREEMPT_RT) || defined(CONFIG_PREEMPT_RT_FULL)) + SEP_DRV_LOG_NOTIFICATION_TRACE( + in_notification, + "Choosing direct wakeup approach."); +#if !defined(DRV_SEP_ACRN_ON) + wake_up_interruptible_sync(&BUFFER_DESC_queue(bd)); +#endif + OUTPUT_signal_full(outbuf) = FALSE; +#endif + } else { + if (!OUTPUT_tasklet_queued(outbuf)) { + if (cpu_idx == -1) { + this_cpu = CONTROL_THIS_CPU(); + } else { + this_cpu = cpu_idx; + } + if (!in_notification) { + SEP_DRV_LOG_NOTIFICATION_TRACE( + in_notification, + "Scheduling the tasklet on cpu %u.", + this_cpu); + OUTPUT_tasklet_queued(outbuf) = TRUE; +#if !defined(DRV_SEP_ACRN_ON) + tasklet_schedule(&CPU_STATE_nmi_tasklet( + &pcb[this_cpu])); +#endif + } else { + static U32 cpt; + + if (!cpt) { + SEP_DRV_LOG_WARNING( + "Using interrupt-driven sideband buffer flushes for extra safety."); + SEP_DRV_LOG_WARNING( + "This may result in fewer context switches being recorded."); + } + SEP_DRV_LOG_TRACE( + "Lost context switch information (for the %uth time).", + ++cpt); + } + } + } + } + + SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(in_notification, "Res: %p.", outloc); + return outloc; +} + +/* ------------------------------------------------------------------------- */ +/*! + * + * @fn int OUTPUT_Buffer_Fill (BUFFER_DESC buf, + * PVOID data, + * U16 size, + * U8 in_notification) + * + * @brief Place a record (can be module, marker, etc) in a buffer + * + * @param data - pointer to a buffer to copy + * @param size - size of the buffer to cpu + * @param in_notification - 1 if in notification, 0 if not + * + * @return number of bytes copied into buffer + * + * Start by ensuring that output buffer space is available. + * If so, then copy the input data to the output buffer and make the necessary + * adjustments to manage the output buffers. + * If not, signal the read event for this buffer and get another buffer. + * + * Special Notes: + * + */ +static int output_Buffer_Fill(BUFFER_DESC bd, PVOID data, U16 size, + U8 in_notification) +{ + char *outloc; + + SEP_DRV_LOG_NOTIFICATION_TRACE_IN( + in_notification, "Bd: %p, data: %p, size: %u.", bd, data, size); + + outloc = (char *)OUTPUT_Reserve_Buffer_Space(bd, size, + FALSE, in_notification, -1); + if (outloc) { + memcpy(outloc, data, size); + SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(in_notification, + "Res: %d (outloc).", size); + return size; + } + + SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(in_notification, + "Res: 0 (!outloc)."); + return 0; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn int OUTPUT_Module_Fill (PVOID data, + * U16 size, + * U8 in_notification) + * + * @brief Place a module record in a buffer + * + * @param data - pointer to a buffer to copy + * @param size - size of the buffer to cpu + * @param in_notification - 1 if in notification, 0 if not + * + * @return number of bytes copied into buffer + * + * + */ +int OUTPUT_Module_Fill(PVOID data, U16 size, U8 in_notification) +{ + int ret_size; + OUTPUT outbuf = &BUFFER_DESC_outbuf(module_buf); + + SEP_DRV_LOG_NOTIFICATION_TRACE_IN(in_notification, + "Data: %p, size: %u.", data, size); + + spin_lock(&OUTPUT_buffer_lock(outbuf)); + ret_size = output_Buffer_Fill(module_buf, data, size, in_notification); + spin_unlock(&OUTPUT_buffer_lock(outbuf)); + + SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(in_notification, "Res: %d.", + ret_size); + return ret_size; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn ssize_t output_Read(struct file *filp, + * char *buf, + * size_t count, + * loff_t *f_pos, + * BUFFER_DESC kernel_buf) + * + * @brief Return a sample buffer to user-mode. If not full or flush, wait + * + * @param *filp a file pointer + * @param *buf a sampling buffer + * @param count size of the user's buffer + * @param f_pos file pointer (current offset in bytes) + * @param kernel_buf the kernel output buffer structure + * + * @return number of bytes read. zero indicates end of file. Neg means error + * + * Place no more than count bytes into the user's buffer. + * Block if unavailable on "BUFFER_DESC_queue(buf)" + * + * Special Notes: + * + */ +static ssize_t output_Read(struct file *filp, char __user *buf, size_t count, + loff_t *f_pos, BUFFER_DESC kernel_buf) +{ + ssize_t to_copy = 0; + ssize_t uncopied; + OUTPUT outbuf = &BUFFER_DESC_outbuf(kernel_buf); + U32 cur_buf, i; + /* Buffer is filled by output_fill_modules. */ + + SEP_DRV_LOG_TRACE_IN( + "Filp: %p, buf: %p, count: %u, f_pos: %p, kernel_buf: %p.", + filp, buf, (U32)count, f_pos, kernel_buf); + + cur_buf = OUTPUT_current_buffer(outbuf); + if (!DRV_CONFIG_enable_cp_mode(drv_cfg) || flush) { + for (i = 0; i < OUTPUT_NUM_BUFFERS; i++) { + //iterate through all buffers + cur_buf++; + if (cur_buf >= OUTPUT_NUM_BUFFERS) { + cur_buf = 0; + } //circularly + to_copy = OUTPUT_buffer_full(outbuf, cur_buf); + if (to_copy != 0) { + if (flush && + DRV_CONFIG_enable_cp_mode(drv_cfg) && + cur_buf == OUTPUT_current_buffer(outbuf)) { + OUTPUT_current_buffer(outbuf)++; + if (OUTPUT_current_buffer(outbuf) >= + OUTPUT_NUM_BUFFERS) { + OUTPUT_current_buffer(outbuf) = + 0; + } + OUTPUT_remaining_buffer_size(outbuf) = + OUTPUT_total_buffer_size( + outbuf); + } + break; + } + } + } + + SEP_DRV_LOG_TRACE("buffer %d has %d bytes ready.", (S32)cur_buf, + (S32)to_copy); + if (!flush && to_copy == 0) { + unsigned long delay = msecs_to_jiffies(1000); + + while (1) { + U32 res = wait_event_interruptible_timeout( + BUFFER_DESC_queue(kernel_buf), + flush || (OUTPUT_buffer_full(outbuf, cur_buf) && + !DRV_CONFIG_enable_cp_mode(drv_cfg)), + delay); + + if (GET_DRIVER_STATE() == DRV_STATE_TERMINATING) { + SEP_DRV_LOG_INIT( + "Switched to TERMINATING while waiting for BUFFER_DESC_queue!"); + break; + } + + if (res == ERESTARTSYS || res == 0) { + SEP_DRV_LOG_TRACE( + "Wait_event_interruptible_timeout(BUFFER_DESC_queue): %u.", + res); + continue; + } + + break; + } + + if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { + // reset the current buffer index if in CP mode + cur_buf = OUTPUT_current_buffer(outbuf); + for (i = 0; i < OUTPUT_NUM_BUFFERS; + i++) { //iterate through all buffers + cur_buf++; + if (cur_buf >= OUTPUT_NUM_BUFFERS) { + cur_buf = 0; + } //circularly + to_copy = OUTPUT_buffer_full(outbuf, cur_buf); + if (to_copy != 0) { + if (flush && + DRV_CONFIG_enable_cp_mode( + drv_cfg) && + cur_buf == OUTPUT_current_buffer( + outbuf)) { + OUTPUT_current_buffer(outbuf)++; + if (OUTPUT_current_buffer( + outbuf) >= + OUTPUT_NUM_BUFFERS) { + OUTPUT_current_buffer( + outbuf) = 0; + } + OUTPUT_remaining_buffer_size( + outbuf) = + OUTPUT_total_buffer_size( + outbuf); + } + break; + } + } + } + SEP_DRV_LOG_TRACE("Get to copy %d.", (S32)cur_buf); + to_copy = OUTPUT_buffer_full(outbuf, cur_buf); + SEP_DRV_LOG_TRACE( + "output_Read awakened, buffer %d has %d bytes.", + cur_buf, (int)to_copy); + } + + /* Ensure that the user's buffer is large enough */ + if (to_copy > count) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "OS_NO_MEM (user buffer is too small!)."); + return OS_NO_MEM; + } + + /* Copy data to user space. Note that we use cur_buf as the source */ + if (GET_DRIVER_STATE() != DRV_STATE_TERMINATING) { + uncopied = copy_to_user(buf, OUTPUT_buffer(outbuf, cur_buf), + to_copy); + /* Mark the buffer empty */ + OUTPUT_buffer_full(outbuf, cur_buf) = 0; + *f_pos += to_copy - uncopied; + if (uncopied) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Res: %u (only copied %u of %u bytes!).", + (U32)(to_copy - uncopied), (U32)to_copy, + (U32)uncopied); + return (to_copy - uncopied); + } + } else { + to_copy = 0; + SEP_DRV_LOG_TRACE("To copy set to 0."); + } + + // At end-of-file, decrement the count of active buffer writers + + if (to_copy == 0) { + DRV_BOOL flush_val = atomic_dec_and_test(&flush_writers); + SEP_DRV_LOG_TRACE("Decremented flush_writers."); + if (flush_val == TRUE) { + wake_up_interruptible_sync(&flush_queue); + } + } + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", (U32)to_copy); + return to_copy; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn ssize_t OUTPUT_Module_Read(struct file *filp, + * char *buf, + * size_t count, + * loff_t *f_pos) + * + * @brief Return a module buffer to user-mode. If not full or flush, wait + * + * @param *filp a file pointer + * @param *buf a sampling buffer + * @param count size of the user's buffer + * @param f_pos file pointer (current offset in bytes) + * @param buf the kernel output buffer structure + * + * @return number of bytes read. zero indicates end of file. Neg means error + * + * Place no more than count bytes into the user's buffer. + * Block on "BUFFER_DESC_queue(kernel_buf)" if buffer isn't full. + * + * Special Notes: + * + */ +ssize_t OUTPUT_Module_Read(struct file *filp, char __user *buf, size_t count, + loff_t *f_pos) +{ + ssize_t res; + + SEP_DRV_LOG_TRACE_IN(""); + SEP_DRV_LOG_TRACE("Read request for modules on minor."); + + res = output_Read(filp, buf, count, f_pos, module_buf); + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", (U32)res); + return res; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn ssize_t OUTPUT_Sample_Read(struct file *filp, + * char *buf, + * size_t count, + * loff_t *f_pos) + * + * @brief Return a sample buffer to user-mode. If not full or flush, wait + * + * @param *filp a file pointer + * @param *buf a sampling buffer + * @param count size of the user's buffer + * @param f_pos file pointer (current offset in bytes) + * @param buf the kernel output buffer structure + * + * @return number of bytes read. zero indicates end of file. Neg means error + * + * Place no more than count bytes into the user's buffer. + * Block on "BUFFER_DESC_queue(kernel_buf)" if buffer isn't full. + * + * Special Notes: + * + */ +ssize_t OUTPUT_Sample_Read(struct file *filp, char __user *buf, size_t count, + loff_t *f_pos) +{ + int i; + ssize_t res; + + SEP_DRV_LOG_TRACE_IN(""); + + i = iminor(filp->DRV_F_DENTRY + ->d_inode); // kernel pointer - not user pointer + SEP_DRV_LOG_TRACE("Read request for samples on minor %d.", i); + res = output_Read(filp, buf, count, f_pos, &(cpu_buf[i])); + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", (U32)res); + return res; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn ssize_t OUTPUT_Sample_Read(struct file *filp, + * char *buf, + * size_t count, + * loff_t *f_pos) + * + * @brief Return a sample buffer to user-mode. If not full or flush, wait + * + * @param *filp a file pointer + * @param *buf a sampling buffer + * @param count size of the user's buffer + * @param f_pos file pointer (current offset in bytes) + * @param buf the kernel output buffer structure + * + * @return number of bytes read. zero indicates end of file. Neg means error + * + * Place no more than count bytes into the user's buffer. + * Block on "BUFFER_DESC_queue(kernel_buf)" if buffer isn't full. + * + * Special Notes: + * + */ +ssize_t OUTPUT_UncSample_Read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos) +{ + int i; + ssize_t res = 0; + + SEP_DRV_LOG_TRACE_IN(""); + + i = iminor(filp->DRV_F_DENTRY + ->d_inode); // kernel pointer - not user pointer + SEP_DRV_LOG_TRACE("Read request for samples on minor %d.", i); + if (unc_buf_init) { + res = output_Read(filp, buf, count, f_pos, &(unc_buf[i])); + } + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", (U32)res); + return res; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn ssize_t OUTPUT_SidebandInfo_Read(struct file *filp, + * char *buf, + * size_t count, + * loff_t *f_pos) + * + * @brief Return a sideband info buffer to user-mode. If not full or flush, wait + * + * @param *filp a file pointer + * @param *buf a sideband info buffer + * @param count size of the user's buffer + * @param f_pos file pointer (current offset in bytes) + * @param buf the kernel output buffer structure + * + * @return number of bytes read. zero indicates end of file. Neg means error + * + * Place no more than count bytes into the user's buffer. + * Block on "BUFFER_DESC_queue(kernel_buf)" if buffer isn't full. + * + * Special Notes: + * + */ +ssize_t OUTPUT_SidebandInfo_Read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos) +{ + int i; + ssize_t res = 0; + + SEP_DRV_LOG_TRACE_IN(""); + + i = iminor(filp->DRV_F_DENTRY + ->d_inode); // kernel pointer - not user pointer + SEP_DRV_LOG_TRACE("Read request for pebs process info on minor %d.", i); + if (multi_pebs_enabled || sched_switch_enabled) { + res = output_Read(filp, buf, count, f_pos, + &(cpu_sideband_buf[i])); + } + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", (U32)res); + return res; +} + +/* + * @fn output_Initialized_Buffers() + * + * @result OUTPUT + * @param BUFFER_DESC desc - descriptor for the buffer being initialized + * @param U32 factor - multiplier for OUTPUT_BUFFER_SIZE. + * 1 for cpu buffers, 2 for module buffers. + * + * @brief Allocate, initialize, and return an output data structure + * + * Special Notes: + * Multiple (OUTPUT_NUM_BUFFERS) buffers will be allocated + * Each buffer is of size (OUTPUT_BUFFER_SIZE) + * Each field in the buffer is initialized + * The event queue for the OUTPUT is initialized + * + */ +static BUFFER_DESC output_Initialized_Buffers(BUFFER_DESC desc, U32 factor) +{ + OUTPUT outbuf; + int j; + + SEP_DRV_LOG_TRACE_IN("Desc: %p, factor: %u.", desc, factor); + + /* + * Allocate the BUFFER_DESC, then allocate its buffers + */ + if (desc == NULL) { + desc = (BUFFER_DESC)CONTROL_Allocate_Memory( + sizeof(BUFFER_DESC_NODE)); + if (desc == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Res: NULL (failed allocation for desc!)."); + return NULL; + } + } + outbuf = &(BUFFER_DESC_outbuf(desc)); + spin_lock_init(&OUTPUT_buffer_lock(outbuf)); + for (j = 0; j < OUTPUT_NUM_BUFFERS; j++) { + if (OUTPUT_buffer(outbuf, j) == NULL) { + OUTPUT_buffer(outbuf, j) = CONTROL_Allocate_Memory( + (size_t)OUTPUT_BUFFER_SIZE * factor); + } + OUTPUT_buffer_full(outbuf, j) = 0; + if (!OUTPUT_buffer(outbuf, j)) { + /*return NULL to tell the caller that allocation failed*/ + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Res: NULL (failed alloc for OUTPUT_buffer(output, %d)!).", + j); + CONTROL_Free_Memory(desc); + return NULL; + } + } + /* + * Initialize the remaining fields in the BUFFER_DESC + */ + OUTPUT_current_buffer(outbuf) = 0; + OUTPUT_signal_full(outbuf) = FALSE; + OUTPUT_remaining_buffer_size(outbuf) = OUTPUT_BUFFER_SIZE * factor; + OUTPUT_total_buffer_size(outbuf) = OUTPUT_BUFFER_SIZE * factor; + OUTPUT_tasklet_queued(outbuf) = FALSE; + init_waitqueue_head(&BUFFER_DESC_queue(desc)); + + SEP_DRV_LOG_TRACE_OUT("Res: %p.", desc); + return desc; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID output_NMI_Sample_Buffer ( + * ) + * + * @brief Callback from NMI tasklet. The function checks if any buffers + * are full, and if full, signals the reader threads. + * + * @param none + * + * @return NONE + * + * Special Notes: + * This callback was added to handle out-of-band event delivery + * when running in NMI mode + */ +static void output_NMI_Sample_Buffer(unsigned long data) +{ + U32 cpu_id; + OUTPUT outbuf; + + SEP_DRV_LOG_NOTIFICATION_IN("Data: %u.", (U32)data); + + if (data == (unsigned long)-1) { + cpu_id = CONTROL_THIS_CPU(); + } else { + cpu_id = data; + } + + if (cpu_buf) { + outbuf = &BUFFER_DESC_outbuf(&cpu_buf[cpu_id]); + if (outbuf && OUTPUT_signal_full(outbuf)) { + wake_up_interruptible_sync( + &BUFFER_DESC_queue(&cpu_buf[cpu_id])); + OUTPUT_signal_full(outbuf) = FALSE; + OUTPUT_tasklet_queued(outbuf) = FALSE; + } + } + + if (cpu_sideband_buf) { + outbuf = &BUFFER_DESC_outbuf(&cpu_sideband_buf[cpu_id]); + if (outbuf && OUTPUT_signal_full(outbuf)) { + wake_up_interruptible_sync( + &BUFFER_DESC_queue(&cpu_sideband_buf[cpu_id])); + OUTPUT_signal_full(outbuf) = FALSE; + OUTPUT_tasklet_queued(outbuf) = FALSE; + } + } + + SEP_DRV_LOG_NOTIFICATION_OUT(""); +} + +/* + * @fn extern void OUTPUT_Initialize(void) + * + * @returns OS_STATUS + * @brief Allocate, initialize, and return all output data structure + * + * Special Notes: + * Initialize the output structures. + * For each CPU in the system, allocate the output buffers. + * Initialize a module buffer and temp file to hold module information + * Initialize the read queues for each sample buffer + * + */ +OS_STATUS OUTPUT_Initialize(void) +{ + BUFFER_DESC unused; + S32 i; + OS_STATUS status = OS_SUCCESS; + + SEP_DRV_LOG_TRACE_IN(""); + + flush = 0; + if (saved_buffer_size != OUTPUT_BUFFER_SIZE) { + if (saved_buffer_size > 0) { + OUTPUT_Destroy(); + } + saved_buffer_size = OUTPUT_BUFFER_SIZE; + } + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + unused = output_Initialized_Buffers(&cpu_buf[i], 1); + if (!unused) { + OUTPUT_Destroy(); + SEP_DRV_LOG_ERROR_TRACE_OUT( + "OS_NO_MEM (failed to allocate cpu output buffers!)."); + return OS_NO_MEM; + } + } + + if (multi_pebs_enabled || sched_switch_enabled) { + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + unused = output_Initialized_Buffers( + &cpu_sideband_buf[i], 1); + if (!unused) { + OUTPUT_Destroy(); + SEP_DRV_LOG_ERROR_TRACE_OUT( + "OS_NO_MEM (failed to allocate pebs process info output buffers!)."); + return OS_NO_MEM; + } + } + } + + /* + * Just need one module buffer + */ + unused = output_Initialized_Buffers(module_buf, MODULE_BUFF_SIZE); + if (!unused) { + OUTPUT_Destroy(); + SEP_DRV_LOG_ERROR_TRACE_OUT( + "OS_NO_MEM (failed to create module output buffers!)."); + return OS_NO_MEM; + } + + SEP_DRV_LOG_TRACE("Set up the tasklet for NMI."); + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { +#if !defined(DRV_SEP_ACRN_ON) + tasklet_init(&CPU_STATE_nmi_tasklet(&pcb[i]), + output_NMI_Sample_Buffer, (unsigned long)-1); +#else + tasklet_init(&CPU_STATE_nmi_tasklet(&pcb[i]), + output_NMI_Sample_Buffer, (unsigned long)i); +#endif + } + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", (U32)status); + return status; +} + +#if defined(DRV_USE_TASKLET_WORKAROUND) +static struct tasklet_struct dummy_tasklet; + +/* + * @fn extern void output_tasklet_waker (PVOID ptr) + * + * @returns None + * @brief Schedules a dummy tasklet to wake up the tasklet handler on the current core + * + * Special Notes: + * Workaround for a rare situation where some tasklets are scheduled, but the core's TASKLET softirq bit was reset. + * [NB: this may be caused by a kernel bug; so far, this issue was only observed on kernel version 3.10.0-123.el7] + * Scheduling a (new) tasklet raises a new softirq, and gives 'forgotten' tasklets another chance to be processed. + * This workaround is not fool-proof: if this new tasklet gets 'forgotten' too, the driver will get stuck in the + * Clean Up routine until it gets processed (thanks to an external event raising the TASKLET softirq on this core), + * which might never happen. + * + */ +static void output_tasklet_waker(PVOID ptr) +{ + SEP_DRV_LOG_TRACE_IN(""); + tasklet_schedule(&dummy_tasklet); + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* + * @fn extern void output_dummy_tasklet_handler (unsigned long dummy) + * + * @returns None + * @brief Dummy tasklet handler. + * + * Special Notes: + * If this gets executed, the aforementioned workaround was successful. + * + */ +static void output_dummy_tasklet_handler(unsigned long dummy) +{ + SEP_DRV_LOG_NOTIFICATION_IN("Workaround was successful!"); + SEP_DRV_LOG_NOTIFICATION_OUT(""); +} +#endif + +/* + * @fn extern void OUTPUT_Cleanup (void) + * + * @returns None + * @brief Cleans up NMI tasklets if needed + * + * Special Notes: + * Waits until all NMI tasklets are complete. + * + */ +void OUTPUT_Cleanup(void) +{ + SEP_DRV_LOG_TRACE_IN(""); + + if (!pcb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pcb)."); + return; + } else { + int i; + SEP_DRV_LOG_TRACE("Killing all NMI tasklets..."); + + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + SEP_DRV_LOG_TRACE("Killing NMI tasklet %d...", i); + + if (CPU_STATE_nmi_tasklet(&pcb[i]).state) { +#if defined(DRV_USE_TASKLET_WORKAROUND) + SEP_DRV_LOG_ERROR( + "Tasklet %d is probably stuck! Trying workaround...", + i); + tasklet_init(&dummy_tasklet, + output_dummy_tasklet_handler, 0); + CONTROL_Invoke_Cpu(i, output_tasklet_waker, + NULL); + tasklet_kill(&dummy_tasklet); + SEP_DRV_LOG_ERROR( + "Workaround was successful for tasklet %d.", + i); +#else + SEP_DRV_LOG_ERROR( + "Tasklet %d may be stuck. Try to set USE_TASKLET_WORKAROUND=YES in the Makefile if you observe unexpected behavior (e.g. cannot terminate a collection or initiate a new one).", + i); +#endif + } + + tasklet_kill(&CPU_STATE_nmi_tasklet(&pcb[i])); + } + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* + * @fn extern void OUTPUT_Initialize_UNC() + * + * @returns OS_STATUS + * @brief Allocate, initialize, and return all output data structure + * + * Special Notes: + * Initialize the output structures. + * For each CPU in the system, allocate the output buffers. + * Initialize a module buffer and temp file to hold module information + * Initialize the read queues for each sample buffer + * + */ +OS_STATUS OUTPUT_Initialize_UNC(void) +{ + BUFFER_DESC unused; + int i; + OS_STATUS status = OS_SUCCESS; + + SEP_DRV_LOG_TRACE_IN(""); + + for (i = 0; i < num_packages; i++) { + unused = output_Initialized_Buffers(&unc_buf[i], 1); + if (!unused) { + OUTPUT_Destroy(); + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Failed to allocate package output buffers!"); + return OS_NO_MEM; + } + } + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", (U32)status); + return status; +} + +/* + * @fn OS_STATUS OUTPUT_Flush() + * + * @brief Flush the module buffers and sample buffers + * + * @return OS_STATUS + * + * For each CPU in the system, set buffer full to the byte count to flush. + * Flush the modules buffer, as well. + * + */ +int OUTPUT_Flush(void) +{ + int i; + int writers = 0; + OUTPUT outbuf; + + SEP_DRV_LOG_TRACE_IN(""); + + /* + * Flush all remaining data to files + * set up a flush event + */ + init_waitqueue_head(&flush_queue); + SEP_DRV_LOG_TRACE( + "Waiting for %d writers.", + (GLOBAL_STATE_num_cpus(driver_state) + OTHER_C_DEVICES)); + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + if (CPU_STATE_initial_mask(&pcb[i]) == 0) { + continue; + } + outbuf = &(cpu_buf[i].outbuf); + writers += 1; + + OUTPUT_buffer_full(outbuf, OUTPUT_current_buffer(outbuf)) = + OUTPUT_total_buffer_size(outbuf) - + OUTPUT_remaining_buffer_size(outbuf); + } + + if (unc_buf_init) { + for (i = 0; i < num_packages; i++) { + outbuf = &(unc_buf[i].outbuf); + writers += 1; + + OUTPUT_buffer_full(outbuf, + OUTPUT_current_buffer(outbuf)) = + OUTPUT_total_buffer_size(outbuf) - + OUTPUT_remaining_buffer_size(outbuf); + } + } + + if (multi_pebs_enabled || sched_switch_enabled) { + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + if (CPU_STATE_initial_mask(&pcb[i]) == 0) { + continue; + } + outbuf = &(cpu_sideband_buf[i].outbuf); + writers += 1; + + OUTPUT_buffer_full(outbuf, + OUTPUT_current_buffer(outbuf)) = + OUTPUT_total_buffer_size(outbuf) - + OUTPUT_remaining_buffer_size(outbuf); + } + } + + atomic_set(&flush_writers, writers + OTHER_C_DEVICES); + // Flip the switch to terminate the output threads + // Do not do this earlier, as threads may terminate before all the data is flushed + flush = 1; + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + if (CPU_STATE_initial_mask(&pcb[i]) == 0) { + continue; + } + outbuf = &BUFFER_DESC_outbuf(&cpu_buf[i]); + OUTPUT_buffer_full(outbuf, OUTPUT_current_buffer(outbuf)) = + OUTPUT_total_buffer_size(outbuf) - + OUTPUT_remaining_buffer_size(outbuf); + wake_up_interruptible_sync(&BUFFER_DESC_queue(&cpu_buf[i])); + } + + if (unc_buf_init) { + for (i = 0; i < num_packages; i++) { + outbuf = &BUFFER_DESC_outbuf(&unc_buf[i]); + OUTPUT_buffer_full(outbuf, + OUTPUT_current_buffer(outbuf)) = + OUTPUT_total_buffer_size(outbuf) - + OUTPUT_remaining_buffer_size(outbuf); + wake_up_interruptible_sync( + &BUFFER_DESC_queue(&unc_buf[i])); + } + } + + if (multi_pebs_enabled || sched_switch_enabled) { + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + if (CPU_STATE_initial_mask(&pcb[i]) == 0) { + continue; + } + outbuf = &BUFFER_DESC_outbuf(&cpu_sideband_buf[i]); + OUTPUT_buffer_full(outbuf, + OUTPUT_current_buffer(outbuf)) = + OUTPUT_total_buffer_size(outbuf) - + OUTPUT_remaining_buffer_size(outbuf); + wake_up_interruptible_sync( + &BUFFER_DESC_queue(&cpu_sideband_buf[i])); + } + } + // Flush all data from the module buffers + + outbuf = &BUFFER_DESC_outbuf(module_buf); + + OUTPUT_buffer_full(outbuf, OUTPUT_current_buffer(outbuf)) = + OUTPUT_total_buffer_size(outbuf) - + OUTPUT_remaining_buffer_size(outbuf); + + SEP_DRV_LOG_TRACE("Waking up module_queue."); + wake_up_interruptible_sync(&BUFFER_DESC_queue(module_buf)); + + //Wait for buffers to empty + while (atomic_read(&flush_writers) != 0) { + unsigned long delay; + U32 res; + delay = msecs_to_jiffies(1000); + res = wait_event_interruptible_timeout( + flush_queue, atomic_read(&flush_writers) == 0, delay); + + if (res == ERESTARTSYS || res == 0) { + SEP_DRV_LOG_TRACE( + "Wait_event_interruptible_timeout(flush_queue): %u, %u writers.", + res, atomic_read(&flush_writers)); + continue; + } + } + + SEP_DRV_LOG_TRACE("Awakened from flush_queue."); + flush = 0; + + SEP_DRV_LOG_TRACE_OUT("Res: 0."); + return 0; +} + +/* + * @fn extern void OUTPUT_Destroy() + * + * @param buffer - seed name of the output file + * @param len - length of the seed name + * @returns OS_STATUS + * @brief Deallocate output structures + * + * Special Notes: + * Free the module buffers + * For each CPU in the system, free the sampling buffers + */ +int OUTPUT_Destroy(void) +{ + int i, n; + OUTPUT outbuf; + + SEP_DRV_LOG_TRACE_IN(""); + + if (module_buf) { + outbuf = &BUFFER_DESC_outbuf(module_buf); + output_Free_Buffers(module_buf, + OUTPUT_total_buffer_size(outbuf)); + } + + if (cpu_buf != NULL) { + n = GLOBAL_STATE_num_cpus(driver_state); + for (i = 0; i < n; i++) { + outbuf = &BUFFER_DESC_outbuf(&cpu_buf[i]); + output_Free_Buffers(&cpu_buf[i], + OUTPUT_total_buffer_size(outbuf)); + } + } + + if (unc_buf != NULL) { + n = num_packages; + for (i = 0; i < n; i++) { + outbuf = &BUFFER_DESC_outbuf(&unc_buf[i]); + output_Free_Buffers(&unc_buf[i], + OUTPUT_total_buffer_size(outbuf)); + } + } + + if (cpu_sideband_buf != NULL) { + n = GLOBAL_STATE_num_cpus(driver_state); + for (i = 0; i < n; i++) { + outbuf = &BUFFER_DESC_outbuf(&cpu_sideband_buf[i]); + output_Free_Buffers(&cpu_sideband_buf[i], + OUTPUT_total_buffer_size(outbuf)); + } + } + + SEP_DRV_LOG_TRACE_OUT("Res: 0."); + return 0; +} diff --git a/drivers/platform/x86/sepdk/sep/pci.c b/drivers/platform/x86/sepdk/sep/pci.c new file mode 100755 index 0000000000000..12a93804975c3 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/pci.c @@ -0,0 +1,661 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#include "lwpmudrv_defines.h" +#include +#include +#include +#include +#include + +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" + +#if defined(BUILD_CHIPSET) +#include "lwpmudrv_chipset.h" +#endif + +#include "inc/lwpmudrv.h" +#include "inc/pci.h" +#include "inc/utility.h" + +static struct pci_bus *pci_buses[MAX_BUSNO]; + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern VOID PCI_Initialize(void) + * + * @param none + * + * @return none + * + * @brief Initializes the pci_buses array. + * + */ +VOID PCI_Initialize(void) +{ + U32 i; + U32 num_found_buses = 0; + + SEP_DRV_LOG_INIT_IN("Initializing pci_buses..."); + + for (i = 0; i < MAX_BUSNO; i++) { + pci_buses[i] = pci_find_bus(0, i); + if (pci_buses[i]) { + SEP_DRV_LOG_DETECTION("Found PCI bus 0x%x at %p.", i, + pci_buses[i]); + num_found_buses++; + } + SEP_DRV_LOG_TRACE("pci_buses[%u]: %p.", i, pci_buses[i]); + } + + SEP_DRV_LOG_INIT_OUT("Found %u buses.", num_found_buses); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern U32 PCI_Read_U32(bus, device, function, offset) + * + * @param bus - target bus + * @param device - target device + * @param function - target function + * @param offset - target register offset + * + * @return Value at this location + * + * @brief Reads a U32 from PCI configuration space + * + */ +U32 PCI_Read_U32(U32 bus, U32 device, U32 function, U32 offset) +{ + U32 res = 0; + U32 devfn = (device << 3) | (function & 0x7); + + SEP_DRV_LOG_REGISTER_IN("Will read BDF(%x:%x:%x)[0x%x](4B)...", bus, + device, function, offset); + + if (bus < MAX_BUSNO && pci_buses[bus]) { + pci_buses[bus]->ops->read(pci_buses[bus], devfn, offset, 4, + &res); + } else { + SEP_DRV_LOG_ERROR( + "Could not read BDF(%x:%x:%x)[0x%x]: bus not found!", + bus, device, function, offset); + } + + SEP_DRV_LOG_REGISTER_OUT("Has read BDF(%x:%x:%x)[0x%x](4B): 0x%x.", bus, + device, function, offset, res); + return res; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern U32 PCI_Read_U32_Valid(bus,device,function,offset,invalid_value) + * + * @param bus - target bus + * @param device - target device + * @param function - target function + * @param offset - target register offset + * @param invalid_value - value against which to compare PCI-obtained value + * + * @return Value at this location (if value != invalid_value), 0 otherwise + * + * @brief Reads a U32 from PCI configuration space + * + */ +U32 PCI_Read_U32_Valid(U32 bus, U32 device, U32 function, U32 offset, + U32 invalid_value) +{ + U32 res = 0; + U32 devfn = (device << 3) | (function & 0x7); + + SEP_DRV_LOG_REGISTER_IN("Will read BDF(%x:%x:%x)[0x%x](4B)...", bus, + device, function, offset); + + if (bus < MAX_BUSNO && pci_buses[bus]) { + pci_buses[bus]->ops->read(pci_buses[bus], devfn, offset, 4, + &res); + if (res == invalid_value) { + res = 0; + SEP_DRV_LOG_REGISTER_OUT( + "Has read BDF(%x:%x:%x)[0x%x](4B): 0x%x(invalid value)", + bus, device, function, offset, res); + } else { + SEP_DRV_LOG_REGISTER_OUT( + "Has read BDF(%x:%x:%x)[0x%x](4B): 0x%x.", bus, + device, function, offset, res); + } + } else { + SEP_DRV_LOG_REGISTER_OUT( + "Could not read BDF(%x:%x:%x)[0x%x]: bus not found!", + bus, device, function, offset); + } + + return res; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern U32 PCI_Read_U64(bus, device, function, offset) + * + * @param bus - target bus + * @param device - target device + * @param function - target function + * @param offset - target register offset + * + * @return Value at this location + * + * @brief Reads a U64 from PCI configuration space + * + */ +U64 PCI_Read_U64(U32 bus, U32 device, U32 function, U32 offset) +{ + U64 res = 0; + U32 devfn = (device << 3) | (function & 0x7); + + SEP_DRV_LOG_REGISTER_IN("Will read BDF(%x:%x:%x)[0x%x](8B)...", bus, + device, function, offset); + + if (bus < MAX_BUSNO && pci_buses[bus]) { + pci_buses[bus]->ops->read(pci_buses[bus], devfn, offset, 4, + (U32 *)&res); + pci_buses[bus]->ops->read(pci_buses[bus], devfn, offset + 4, 4, + ((U32 *)&res) + 1); + } else { + SEP_DRV_LOG_ERROR( + "Could not read BDF(%x:%x:%x)[0x%x]: bus not found!", + bus, device, function, offset); + } + + SEP_DRV_LOG_REGISTER_OUT("Has read BDF(%x:%x:%x)[0x%x](8B): 0x%llx.", + bus, device, function, offset, res); + return res; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern U32 PCI_Read_U64_Valid(bus,device,function,offset,invalid_value) + * + * @param bus - target bus + * @param device - target device + * @param function - target function + * @param offset - target register offset + * @param invalid_value - value against which to compare PCI-obtained value + * + * @return Value at this location (if value != invalid_value), 0 otherwise + * + * @brief Reads a U64 from PCI configuration space + * + */ +U64 PCI_Read_U64_Valid(U32 bus, U32 device, U32 function, U32 offset, + U64 invalid_value) +{ + U64 res = 0; + U32 devfn = (device << 3) | (function & 0x7); + + SEP_DRV_LOG_REGISTER_IN("Will read BDF(%x:%x:%x)[0x%x](8B)...", bus, + device, function, offset); + + if (bus < MAX_BUSNO && pci_buses[bus]) { + pci_buses[bus]->ops->read(pci_buses[bus], devfn, offset, 4, + (U32 *)&res); + pci_buses[bus]->ops->read(pci_buses[bus], devfn, offset + 4, 4, + ((U32 *)&res) + 1); + + if (res == invalid_value) { + res = 0; + SEP_DRV_LOG_REGISTER_OUT( + "Has read BDF(%x:%x:%x)[0x%x](8B): 0x%llx(invalid val)", + bus, device, function, offset, res); + } else { + SEP_DRV_LOG_REGISTER_OUT( + "Has read BDF(%x:%x:%x)[0x%x](8B): 0x%llx.", + bus, device, function, offset, res); + } + } else { + SEP_DRV_LOG_REGISTER_OUT( + "Could not read BDF(%x:%x:%x)[0x%x]: bus not found!", + bus, device, function, offset); + } + + return res; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern U32 PCI_Write_U32(bus, device, function, offset, value) + * + * @param bus - target bus + * @param device - target device + * @param function - target function + * @param offset - target register offset + * @param value - value to write + * + * @return 0 in case of success, 1 otherwise + * + * @brief Writes a U32 to PCI configuration space + * + */ +U32 PCI_Write_U32(U32 bus, U32 device, U32 function, U32 offset, + U32 value) +{ + U32 res = 0; + U32 devfn = (device << 3) | (function & 0x7); + + SEP_DRV_LOG_REGISTER_IN("Will write BDF(%x:%x:%x)[0x%x](4B): 0x%x...", + bus, device, function, offset, value); + + if (bus < MAX_BUSNO && pci_buses[bus]) { + pci_buses[bus]->ops->write(pci_buses[bus], devfn, offset, 4, + value); + SEP_DRV_LOG_REGISTER_OUT( + "Has written BDF(%x:%x:%x)[0x%x](4B): 0x%x.", bus, + device, function, offset, value); + } else { + SEP_DRV_LOG_ERROR( + "Could not write BDF(%x:%x:%x)[0x%x]: bus not found!", + bus, device, function, offset); + res = 1; + SEP_DRV_LOG_REGISTER_OUT( + "Failed to write BDF(%x:%x:%x)[0x%x](4B): 0x%x.", bus, + device, function, offset, value); + } + + return res; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern U32 PCI_Write_U64(bus, device, function, offset, value) + * + * @param bus - target bus + * @param device - target device + * @param function - target function + * @param offset - target register offset + * @param value - value to write + * + * @return 0 in case of success, 1 otherwise + * + * @brief Writes a U64 to PCI configuration space + * + */ +U32 PCI_Write_U64(U32 bus, U32 device, U32 function, U32 offset, + U64 value) +{ + U32 res = 0; + U32 devfn = (device << 3) | (function & 0x7); + + SEP_DRV_LOG_REGISTER_IN("Will write BDF(%x:%x:%x)[0x%x](8B): 0x%llx...", + bus, device, function, offset, value); + + if (bus < MAX_BUSNO && pci_buses[bus]) { + pci_buses[bus]->ops->write(pci_buses[bus], devfn, offset, 4, + (U32)value); + pci_buses[bus]->ops->write(pci_buses[bus], devfn, offset + 4, 4, + (U32)(value >> 32)); + SEP_DRV_LOG_REGISTER_OUT( + "Has written BDF(%x:%x:%x)[0x%x](8B): 0x%llx.", bus, + device, function, offset, value); + } else { + SEP_DRV_LOG_ERROR( + "Could not write BDF(%x:%x:%x)[0x%x]: bus not found!", + bus, device, function, offset); + res = 1; + SEP_DRV_LOG_REGISTER_OUT( + "Failed to write BDF(%x:%x:%x)[0x%x](8B): 0x%llx.", bus, + device, function, offset, value); + } + + return res; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern int PCI_Read_From_Memory_Address(addr, val) + * + * @param addr - physical address in mmio + * @param *value - value at this address + * + * @return status + * + * @brief Read memory mapped i/o physical location + * + */ +int PCI_Read_From_Memory_Address(U32 addr, U32 *val) +{ + U32 aligned_addr, offset, value; + PVOID base; + + SEP_DRV_LOG_TRACE_IN("Addr: %x, val_pointer: %p.", addr, val); + + if (addr <= 0) { + SEP_DRV_LOG_ERROR_TRACE_OUT("OS_INVALID (addr <= 0!)."); + return OS_INVALID; + } + + SEP_DRV_LOG_TRACE("Preparing to reading physical address: %x.", addr); + offset = addr & ~PAGE_MASK; + aligned_addr = addr & PAGE_MASK; + SEP_DRV_LOG_TRACE("Aligned physical address: %x, offset: %x.", + aligned_addr, offset); + + base = (PVOID)ioremap_nocache(aligned_addr, PAGE_SIZE); + if (base == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("OS_INVALID (mapping failed!)."); + return OS_INVALID; + } + + SEP_DRV_LOG_REGISTER_IN("Will read PCI address %u (mapped at %p).", + addr, base + offset); + value = readl((void __iomem *)(base + offset)); + SEP_DRV_LOG_REGISTER_OUT("Read PCI address %u (mapped at %p): %x.", + addr, base + offset, value); + + *val = value; + + iounmap((void __iomem *)base); + + SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS."); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern int PCI_Write_To_Memory_Address(addr, val) + * + * @param addr - physical address in mmio + * @param value - value to be written + * + * @return status + * + * @brief Write to memory mapped i/o physical location + * + */ +int PCI_Write_To_Memory_Address(U32 addr, U32 val) +{ + U32 aligned_addr, offset; + PVOID base; + + SEP_DRV_LOG_TRACE_IN("Addr: %x, val: %x.", addr, val); + + if (addr <= 0) { + SEP_DRV_LOG_ERROR_TRACE_OUT("OS_INVALID (addr <= 0!)."); + return OS_INVALID; + } + + SEP_DRV_LOG_TRACE( + "Preparing to writing physical address: %x (val: %x).", addr, + val); + offset = addr & ~PAGE_MASK; + aligned_addr = addr & PAGE_MASK; + SEP_DRV_LOG_TRACE("Aligned physical address: %x, offset: %x (val: %x).", + aligned_addr, offset, val); + + base = (PVOID)ioremap_nocache(aligned_addr, PAGE_SIZE); + if (base == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("OS_INVALID (mapping failed!)."); + return OS_INVALID; + } + + SEP_DRV_LOG_REGISTER_IN("Will write PCI address %u (mapped at %p): %x.", + addr, base + offset, val); + writel(val, (void __iomem *)(base + offset)); + SEP_DRV_LOG_REGISTER_OUT("Wrote PCI address %u (mapped at %p): %x.", + addr, base + offset, val); + + iounmap((void __iomem *)base); + + SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS."); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern U32 PCI_Map_Memory(SEP_MMIO_NODE *node, U64 phy_address, + * U64 map_size) + * + * @param node - MAP NODE to use + * @param phy_address - Address to be mapped + * @param map_size - Amount of memory to map (has to be a multiple of 4k) + * + * @return OS_SUCCESS or OS_INVALID + * + * @brief Maps a physical address to a virtual address + * + */ +OS_STATUS PCI_Map_Memory(SEP_MMIO_NODE *node, U64 phy_address, + U32 map_size) +{ + U8 *res; + + SEP_DRV_LOG_INIT_IN("Node: %p, phy_address: %llx, map_size: %u.", node, + phy_address, map_size); + + if (!node || !phy_address || !map_size || (phy_address & 4095)) { + SEP_DRV_LOG_ERROR_INIT_OUT("Invalid parameters, aborting!"); + return OS_INVALID; + } + + res = (U8 *)ioremap_nocache(phy_address, map_size); + if (!res) { + SEP_DRV_LOG_ERROR_INIT_OUT("Map operation failed!"); + return OS_INVALID; + } + + SEP_MMIO_NODE_physical_address(node) = (UIOP)phy_address; + SEP_MMIO_NODE_virtual_address(node) = (UIOP)res; + SEP_MMIO_NODE_map_token(node) = (UIOP)res; + SEP_MMIO_NODE_size(node) = map_size; + + SEP_DRV_LOG_INIT_OUT("Addr:0x%llx->0x%llx, tok:0x%llx, sz:%u.", + SEP_MMIO_NODE_physical_address(node), + SEP_MMIO_NODE_virtual_address(node), + SEP_MMIO_NODE_map_token(node), + SEP_MMIO_NODE_size(node)); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern void PCI_Unmap_Memory(SEP_MMIO_NODE *node) + * + * @param node - memory map node to clean up + * + * @return none + * + * @brief Unmaps previously mapped memory + * + */ +void PCI_Unmap_Memory(SEP_MMIO_NODE *node) +{ + SEP_DRV_LOG_INIT_IN("Unmapping node %p.", node); + + if (node) { + if (SEP_MMIO_NODE_size(node)) { + SEP_DRV_LOG_TRACE( + "Unmapping token 0x%llx (0x%llx->0x%llx)[%uB].", + SEP_MMIO_NODE_map_token(node), + SEP_MMIO_NODE_physical_address(node), + SEP_MMIO_NODE_virtual_address(node), + SEP_MMIO_NODE_size(node)); + iounmap((void __iomem *)(UIOP)SEP_MMIO_NODE_map_token(node)); + SEP_MMIO_NODE_size(node) = 0; + SEP_MMIO_NODE_map_token(node) = 0; + SEP_MMIO_NODE_virtual_address(node) = 0; + SEP_MMIO_NODE_physical_address(node) = 0; + } else { + SEP_DRV_LOG_TRACE("Already unmapped."); + } + } + + SEP_DRV_LOG_INIT_OUT("Unmapped node %p.", node); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U32 PCI_MMIO_Read_U32(virtual_address_base, offset) + * + * @param virtual_address_base - Virtual address base + * @param offset - Register offset + * + * @return U32 read from an MMIO register + * + * @brief Reads U32 value from MMIO + * + */ +U32 PCI_MMIO_Read_U32(U64 virtual_address_base, U32 offset) +{ + U32 temp_u32 = 0LL; + U32 *computed_address; + + computed_address = + (U32 *)(((char *)(UIOP)virtual_address_base) + offset); + + SEP_DRV_LOG_REGISTER_IN("Will read U32(0x%llx + 0x%x = 0x%p).", + virtual_address_base, offset, computed_address); + + if (!virtual_address_base) { + SEP_DRV_LOG_ERROR("Invalid base for U32(0x%llx + 0x%x = 0x%p)!", + virtual_address_base, offset, + computed_address); + temp_u32 = 0; + } else { + temp_u32 = *computed_address; + } + + SEP_DRV_LOG_REGISTER_OUT("Has read U32(0x%llx + 0x%x): 0x%x.", + virtual_address_base, offset, temp_u32); + return temp_u32; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 PCI_MMIO_Read_U64(virtual_address_base, offset) + * + * @param virtual_address_base - Virtual address base + * @param offset - Register offset + * + * @return U64 read from an MMIO register + * + * @brief Reads U64 value from MMIO + * + */ +U64 PCI_MMIO_Read_U64(U64 virtual_address_base, U32 offset) +{ + U64 temp_u64 = 0LL; + U64 *computed_address; + + computed_address = + (U64 *)(((char *)(UIOP)virtual_address_base) + offset); + + SEP_DRV_LOG_REGISTER_IN("Will read U64(0x%llx + 0x%x = 0x%p).", + virtual_address_base, offset, computed_address); + + if (!virtual_address_base) { + SEP_DRV_LOG_ERROR("Invalid base for U32(0x%llx + 0x%x = 0x%p)!", + virtual_address_base, offset, + computed_address); + temp_u64 = 0; + } else { + temp_u64 = *computed_address; + } + + SEP_DRV_LOG_REGISTER_OUT("Has read U64(0x%llx + 0x%x): 0x%llx.", + virtual_address_base, offset, temp_u64); + return temp_u64; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void PCI_MMIO_Write_U32(virtual_address_base, offset, value) + * + * @param virtual_address_base - Virtual address base + * @param offset - Register offset + * @param value - Value to write + * + * @return U32 write into an MMIO register + * + * @brief Writes U32 value to MMIO + * + */ +void PCI_MMIO_Write_U32(U64 virtual_address_base, U32 offset, U32 value) +{ + U32 *computed_address; + + computed_address = + (U32 *)(((char *)(UIOP)virtual_address_base) + offset); + + SEP_DRV_LOG_REGISTER_IN("Writing 0x%x to U32(0x%llx + 0x%x = 0x%p).", + value, virtual_address_base, offset, + computed_address); + + if (!virtual_address_base) { + SEP_DRV_LOG_ERROR("Invalid base for U32(0x%llx + 0x%x = 0x%p)!", + virtual_address_base, offset, + computed_address); + } else { + *computed_address = value; + } + + SEP_DRV_LOG_REGISTER_OUT("Has written 0x%x to U32(0x%llx + 0x%x).", + value, virtual_address_base, offset); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void PCI_MMIO_Write_U64(virtual_address_base, offset, value) + * + * @param virtual_address_base - Virtual address base + * @param offset - Register offset + * @param value - Value to write + * + * @return U64 write into an MMIO register + * + * @brief Writes U64 value to MMIO + * + */ +void PCI_MMIO_Write_U64(U64 virtual_address_base, U32 offset, U64 value) +{ + U64 *computed_address; + + computed_address = + (U64 *)(((char *)(UIOP)virtual_address_base) + offset); + + SEP_DRV_LOG_REGISTER_IN("Writing 0x%llx to U64(0x%llx + 0x%x = 0x%p).", + value, virtual_address_base, offset, + computed_address); + + if (!virtual_address_base) { + SEP_DRV_LOG_ERROR("Invalid base for U32(0x%llx + 0x%x = 0x%p)!", + virtual_address_base, offset, + computed_address); + } else { + *computed_address = value; + } + + SEP_DRV_LOG_REGISTER_OUT("Has written 0x%llx to U64(0x%llx + 0x%x).", + value, virtual_address_base, offset); +} diff --git a/drivers/platform/x86/sepdk/sep/pebs.c b/drivers/platform/x86/sepdk/sep/pebs.c new file mode 100755 index 0000000000000..0a428dc5a7bd7 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/pebs.c @@ -0,0 +1,1954 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#include "lwpmudrv_defines.h" +#include +#include +#include +#include +#include +#include + +#include "lwpmudrv_types.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" +#include "lwpmudrv.h" +#include "control.h" +#include "core2.h" +#include "utility.h" +#include "output.h" +#include "ecb_iterators.h" +#include "pebs.h" + +#if defined(DRV_USE_KAISER) +#include +#include +int (*local_kaiser_add_mapping)(unsigned long, unsigned long, + unsigned long) = NULL; +void (*local_kaiser_remove_mapping)(unsigned long, unsigned long) = NULL; +#elif defined(DRV_USE_PTI) +#include +#include +#include +#include +#include +static void (*local_cea_set_pte)(void *cea_vaddr, phys_addr_t pa, + pgprot_t flags) = NULL; +static void (*local_do_kernel_range_flush)(void *info) = NULL; +static DEFINE_PER_CPU(PVOID, dts_buffer_cea); +#endif + +static PVOID pebs_global_memory; +static size_t pebs_global_memory_size; + +extern DRV_CONFIG drv_cfg; +extern DRV_SETUP_INFO_NODE req_drv_setup_info; + +#if defined(DRV_USE_PTI) +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID pebs_Update_CEA (S32) + * + * @brief Flush the TLB entries related to PEBS buffer in cpu entry area + * + * @param this_cpu current cpu + * + * @return NONE + * + * Special Notes: + */ +static VOID pebs_Update_CEA(S32 this_cpu) +{ + unsigned long cea_start_addr; + unsigned long cea_end_addr; + + SEP_DRV_LOG_TRACE_IN("This_cpu: %d.", this_cpu); + + if (per_cpu(dts_buffer_cea, this_cpu) != 0) { + cea_start_addr = + (unsigned long)per_cpu(dts_buffer_cea, this_cpu); + cea_end_addr = cea_start_addr + + (unsigned long)CPU_STATE_dts_buffer_size( + &pcb[this_cpu]); + if (local_do_kernel_range_flush) { + struct flush_tlb_info info; + info.start = cea_start_addr; + info.end = cea_end_addr; + local_do_kernel_range_flush(&info); + } + } + + SEP_DRV_LOG_TRACE_OUT(""); +} +#endif + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID pebs_Corei7_Initialize_Threshold + * (dts, LWPMU_DEVICE_pebs_record_size(&devices[dev_idx])) + * + * @brief The nehalem specific initialization + * + * @param dts - dts description + * + * @return NONE + * + * Special Notes: + */ +static VOID pebs_Corei7_Initialize_Threshold(DTS_BUFFER_EXT dts) +{ + U32 this_cpu; + U32 dev_idx; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("Dts: %p.", dts); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + DTS_BUFFER_EXT_pebs_threshold(dts) = + DTS_BUFFER_EXT_pebs_base(dts) + + (LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) * + (U64)DEV_CONFIG_pebs_record_num(pcfg)); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID pebs_Corei7_Overflow () + * + * @brief The Nehalem specific overflow check + * + * @param this_cpu - cpu id + * overflow_status - overflow status + * rec_index - record index + * + * @return NONE + * + * Special Notes: + * Check the global overflow field of the buffer descriptor. + * Precise events can be allocated on any of the 4 general purpose + * registers. + */ +static U64 pebs_Corei7_Overflow(S32 this_cpu, U64 overflow_status, + U32 rec_index) +{ + DTS_BUFFER_EXT dtes; + S8 *pebs_base, *pebs_index, *pebs_ptr; + PEBS_REC_EXT pb; + U8 pebs_ptr_check = FALSE; + U32 dev_idx = core_to_dev_map[this_cpu]; + + SEP_DRV_LOG_TRACE_IN( + "This_cpu: %d, overflow_status: %llx, rec_index: %u.", this_cpu, + overflow_status, rec_index); + + dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + SEP_DRV_LOG_TRACE("This_cpu: %d, dtes %p.", this_cpu, dtes); + + if (!dtes) { + return overflow_status; + } + pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_base(dtes); + SEP_DRV_LOG_TRACE("This_cpu: %d, pebs_base %p.", this_cpu, pebs_base); + pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_index(dtes); + pebs_ptr = (S8 *)((UIOP)DTS_BUFFER_EXT_pebs_base(dtes) + + ((UIOP)rec_index * + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]))); + pebs_ptr_check = + (pebs_ptr && pebs_base != pebs_index && pebs_ptr < pebs_index); + if (pebs_ptr_check) { + pb = (PEBS_REC_EXT)pebs_ptr; + overflow_status |= PEBS_REC_EXT_glob_perf_overflow(pb); + } + + SEP_DRV_LOG_TRACE_OUT("Res: %llx.", overflow_status); + return overflow_status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID pebs_Corei7_Overflow_APEBS () + * + * @brief Overflow check + * + * @param this_cpu - cpu id + * overflow_status - overflow status + * rec_index - record index + * + * @return NONE + * + * Special Notes: + * Check the global overflow field of the buffer descriptor. + * Precise events can be allocated on any of the 8 general purpose + * registers or 4 fixed registers. + */ +static U64 pebs_Corei7_Overflow_APEBS(S32 this_cpu, U64 overflow_status, + U32 rec_index) +{ + S8 *pebs_base, *pebs_index, *pebs_ptr; + ADAPTIVE_PEBS_BASIC_INFO pb; + DTS_BUFFER_EXT1 dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + U8 pebs_ptr_check = FALSE; + U32 dev_idx = core_to_dev_map[this_cpu]; + DEV_CONFIG pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!dtes) { + return overflow_status; + } + pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT1_pebs_base(dtes); + pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT1_pebs_index(dtes); + pebs_ptr = (S8 *)((UIOP)DTS_BUFFER_EXT1_pebs_base(dtes) + + ((UIOP)rec_index * + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]))); + pebs_ptr_check = + (pebs_ptr && pebs_base != pebs_index && pebs_ptr < pebs_index); + + if (pebs_ptr_check && DEV_CONFIG_enable_adaptive_pebs(pcfg)) { + pb = (ADAPTIVE_PEBS_BASIC_INFO)pebs_ptr; + overflow_status |= + ADAPTIVE_PEBS_BASIC_INFO_applicable_counters(pb); + } + + return overflow_status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID pebs_Core2_Initialize_Threshold + * (dts, LWPMU_DEVICE_pebs_record_size(&devices[dev_idx])) + * + * @brief The Core2 specific initialization + * + * @param dts - dts description + * + * @return NONE + * + * Special Notes: + */ +static VOID pebs_Core2_Initialize_Threshold(DTS_BUFFER_EXT dts) +{ + SEP_DRV_LOG_TRACE_IN("Dts: %p.", dts); + + DTS_BUFFER_EXT_pebs_threshold(dts) = DTS_BUFFER_EXT_pebs_base(dts); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID pebs_Core2_Overflow + * (dts, LWPMU_DEVICE_pebs_record_size(&devices[dev_idx])) + * + * @brief The Core2 specific overflow check + * + * @param this_cpu - cpu id + * overflow_status - overflow status + * rec_index - record index + * + * @return NONE + * + * Special Notes: + * Check the base and the index fields of the circular buffer, if they are + * not the same, then a precise event has overflowed. Precise events are + * allocated only on register#0. + */ +static U64 pebs_Core2_Overflow(S32 this_cpu, U64 overflow_status, U32 rec_index) +{ + DTS_BUFFER_EXT dtes; + U8 status = FALSE; + + SEP_DRV_LOG_TRACE_IN( + "This_cpu: %d, overflow_status: %llx, rec_index: %u.", this_cpu, + overflow_status, rec_index); + + dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + if (!dtes) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Res: %llx (dtes is NULL!).", + overflow_status); + return overflow_status; + } + status = (U8)((dtes) && (DTS_BUFFER_EXT_pebs_index(dtes) != + DTS_BUFFER_EXT_pebs_base(dtes))); + if (status) { + // Merom allows only for GP register 0 to be precise capable + overflow_status |= 0x1; + } + + SEP_DRV_LOG_TRACE_OUT("Res: %llx.", overflow_status); + return overflow_status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID pebs_Modify_IP (sample, is_64bit_addr) + * + * @brief Change the IP field in the sample to that in the PEBS record + * + * @param sample - sample buffer + * @param is_64bit_addr - are we in a 64 bit module + * + * @return NONE + * + * Special Notes: + * + */ +static VOID pebs_Modify_IP(void *sample, DRV_BOOL is_64bit_addr, U32 rec_index) +{ + SampleRecordPC *psamp = sample; + DTS_BUFFER_EXT dtes; + S8 *pebs_base, *pebs_index, *pebs_ptr; + PEBS_REC_EXT pb; + U8 pebs_ptr_check = FALSE; + U32 this_cpu; + U32 dev_idx; + + SEP_DRV_LOG_TRACE_IN("Sample: %p, is_64bit_addr: %u, rec_index: %u.", + sample, is_64bit_addr, rec_index); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + if (!dtes || !psamp) { + return; + } + SEP_DRV_LOG_TRACE("In PEBS Fill Buffer: cpu %d.", CONTROL_THIS_CPU()); + pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_base(dtes); + pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_index(dtes); + pebs_ptr = (S8 *)((UIOP)DTS_BUFFER_EXT_pebs_base(dtes) + + ((UIOP)rec_index * + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]))); + pebs_ptr_check = + (pebs_ptr && pebs_base != pebs_index && pebs_ptr < pebs_index); + if (pebs_ptr_check) { + pb = (PEBS_REC_EXT)pebs_ptr; + if (is_64bit_addr) { + SAMPLE_RECORD_iip(psamp) = PEBS_REC_EXT_linear_ip(pb); + SAMPLE_RECORD_ipsr(psamp) = PEBS_REC_EXT_r_flags(pb); + } else { + SAMPLE_RECORD_eip(psamp) = + PEBS_REC_EXT_linear_ip(pb) & 0xFFFFFFFF; + SAMPLE_RECORD_eflags(psamp) = + PEBS_REC_EXT_r_flags(pb) & 0xFFFFFFFF; + } + } + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID pebs_Modify_IP_With_Eventing_IP (sample, is_64bit_addr) + * + * @brief Change the IP field in the sample to that in the PEBS record + * + * @param sample - sample buffer + * @param is_64bit_addr - are we in a 64 bit module + * + * @return NONE + * + * Special Notes: + * + */ +static VOID pebs_Modify_IP_With_Eventing_IP(void *sample, + DRV_BOOL is_64bit_addr, + U32 rec_index) +{ + SampleRecordPC *psamp = sample; + DTS_BUFFER_EXT dtes; + S8 *pebs_ptr, *pebs_base, *pebs_index; + U64 ip = 0, flags = 0; + U8 pebs_ptr_check = FALSE; + U32 this_cpu; + U32 dev_idx; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("Sample: %p, is_64bit_addr: %u, rec_index: %u.", + sample, is_64bit_addr, rec_index); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + if (!dtes || !psamp) { + return; + } + + pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_base(dtes); + pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_index(dtes); + pebs_ptr = (S8 *)((UIOP)DTS_BUFFER_EXT_pebs_base(dtes) + + ((UIOP)rec_index * + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]))); + pebs_ptr_check = + (pebs_ptr && pebs_base != pebs_index && pebs_ptr < pebs_index); + + if (!pebs_ptr_check) { + return; + } + if (DEV_CONFIG_enable_adaptive_pebs(pcfg)) { + ip = ADAPTIVE_PEBS_BASIC_INFO_eventing_ip( + (ADAPTIVE_PEBS_BASIC_INFO)pebs_ptr); + if (DEV_CONFIG_apebs_collect_gpr(pcfg)) { + flags = ADAPTIVE_PEBS_GPR_INFO_rflags(( + ADAPTIVE_PEBS_GPR_INFO)( + pebs_ptr + LWPMU_DEVICE_apebs_gpr_offset( + &devices[dev_idx]))); + } + } else { + ip = PEBS_REC_EXT1_eventing_ip((PEBS_REC_EXT1)pebs_ptr); + flags = PEBS_REC_EXT1_r_flags((PEBS_REC_EXT1)pebs_ptr); + } + if (is_64bit_addr) { + SAMPLE_RECORD_iip(psamp) = ip; + SAMPLE_RECORD_ipsr(psamp) = flags; + } else { + SAMPLE_RECORD_eip(psamp) = ip & 0xFFFFFFFF; + SAMPLE_RECORD_eflags(psamp) = flags & 0xFFFFFFFF; + } + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID pebs_Modify_TSC (sample) + * + * @brief Change the TSC field in the sample to that in the PEBS record + * + * @param sample - sample buffer + * rec_index - record index + * @return NONE + * + * Special Notes: + * + */ +static VOID pebs_Modify_TSC(void *sample, U32 rec_index) +{ + SampleRecordPC *psamp = sample; + DTS_BUFFER_EXT dtes; + S8 *pebs_base, *pebs_index, *pebs_ptr; + U64 tsc; + U8 pebs_ptr_check = FALSE; + U32 this_cpu; + U32 dev_idx; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("Sample: %p, rec_index: %u.", sample, rec_index); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + if (!dtes || !psamp) { + return; + } + pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_base(dtes); + pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_index(dtes); + pebs_ptr = (S8 *)((UIOP)DTS_BUFFER_EXT_pebs_base(dtes) + + ((UIOP)rec_index * + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]))); + pebs_ptr_check = + (pebs_ptr && pebs_base != pebs_index && pebs_ptr < pebs_index); + if (!pebs_ptr_check) { + return; + } + + if (DEV_CONFIG_enable_adaptive_pebs(pcfg)) { + tsc = ADAPTIVE_PEBS_BASIC_INFO_tsc( + (ADAPTIVE_PEBS_BASIC_INFO)pebs_ptr); + } else { + tsc = PEBS_REC_EXT2_tsc((PEBS_REC_EXT2)pebs_ptr); + } + SAMPLE_RECORD_tsc(psamp) = tsc; + SEP_DRV_LOG_TRACE_OUT(""); +} +/* ------------------------------------------------------------------------- */ +/*! + * @fn U32 pebs_Get_Num_Records_Filled () + * + * @brief get number of PEBS records filled in PEBS buffer + * + * @param NONE + * + * @return NONE + * + * Special Notes: + * + */ +static U32 pebs_Get_Num_Records_Filled(VOID) +{ + U32 num = 0; + DTS_BUFFER_EXT dtes; + S8 *pebs_base, *pebs_index; + U32 this_cpu; + U32 dev_idx; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + if (!dtes) { + return num; + } + pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_base(dtes); + pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_index(dtes); + if (pebs_base != pebs_index) { + num = (U32)(pebs_index - pebs_base) / + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]); + } + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", num); + return num; +} + +/* + * Initialize the pebs micro dispatch tables + */ +PEBS_DISPATCH_NODE core2_pebs = { + .initialize_threshold = pebs_Core2_Initialize_Threshold, + .overflow = pebs_Core2_Overflow, + .modify_ip = pebs_Modify_IP, + .modify_tsc = NULL, + .get_num_records_filled = pebs_Get_Num_Records_Filled +}; + +PEBS_DISPATCH_NODE core2p_pebs = { + .initialize_threshold = pebs_Corei7_Initialize_Threshold, + .overflow = pebs_Core2_Overflow, + .modify_ip = pebs_Modify_IP, + .modify_tsc = NULL, + .get_num_records_filled = pebs_Get_Num_Records_Filled +}; + +PEBS_DISPATCH_NODE corei7_pebs = { + .initialize_threshold = pebs_Corei7_Initialize_Threshold, + .overflow = pebs_Corei7_Overflow, + .modify_ip = pebs_Modify_IP, + .modify_tsc = NULL, + .get_num_records_filled = pebs_Get_Num_Records_Filled +}; + +PEBS_DISPATCH_NODE haswell_pebs = { + .initialize_threshold = pebs_Corei7_Initialize_Threshold, + .overflow = pebs_Corei7_Overflow, + .modify_ip = pebs_Modify_IP_With_Eventing_IP, + .modify_tsc = NULL, + .get_num_records_filled = pebs_Get_Num_Records_Filled +}; + +PEBS_DISPATCH_NODE perfver4_pebs = { + .initialize_threshold = pebs_Corei7_Initialize_Threshold, + .overflow = pebs_Corei7_Overflow, + .modify_ip = pebs_Modify_IP_With_Eventing_IP, + .modify_tsc = pebs_Modify_TSC, + .get_num_records_filled = pebs_Get_Num_Records_Filled +}; + +// adaptive PEBS +PEBS_DISPATCH_NODE perfver4_apebs = { + .initialize_threshold = pebs_Corei7_Initialize_Threshold, + .overflow = pebs_Corei7_Overflow_APEBS, + .modify_ip = pebs_Modify_IP_With_Eventing_IP, + .modify_tsc = pebs_Modify_TSC, + .get_num_records_filled = pebs_Get_Num_Records_Filled +}; + +#define PER_CORE_BUFFER_SIZE(dts_size, record_size, record_num) \ + (dts_size + (record_num + 1) * (record_size) + 64) +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID* pebs_Alloc_DTS_Buffer (VOID) + * + * @brief Allocate buffers used for latency and pebs sampling + * + * @param NONE + * + * @return NONE + * + * Special Notes: + * Allocate the memory needed to hold the DTS and PEBS records buffer. + * This routine is called by a thread that corresponds to a single core + */ +static VOID *pebs_Alloc_DTS_Buffer(VOID) +{ + UIOP pebs_base; + U32 dts_size; + PVOID dts_buffer = NULL; + DTS_BUFFER_EXT dts; + int this_cpu; + CPU_STATE pcpu; + U32 dev_idx; + DEV_CONFIG pcfg; + PEBS_DISPATCH pebs_dispatch; + + SEP_DRV_LOG_TRACE_IN(""); + + /* + * one PEBS record... need 2 records so that + * threshold can be less than absolute max + */ + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + dts_size = sizeof(DTS_BUFFER_EXT_NODE); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + pebs_dispatch = LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]); + + if (DEV_CONFIG_enable_adaptive_pebs(pcfg) || + DEV_CONFIG_collect_fixed_counter_pebs(pcfg)) { + dts_size = sizeof(DTS_BUFFER_EXT1_NODE); + } + + /* + * account for extra bytes to align PEBS base to cache line boundary + */ + if (DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info) == + DRV_SETUP_INFO_PTI_KPTI) { +#if defined(DRV_USE_PTI) && defined(CONFIG_CPU_SUP_INTEL) + struct page *page; + U32 buffer_size; + + SEP_DRV_LOG_INIT("Allocating PEBS buffer using KPTI approach."); + buffer_size = (PER_CORE_BUFFER_SIZE( + dts_size, + LWPMU_DEVICE_pebs_record_size( + &devices[dev_idx]), + DEV_CONFIG_pebs_record_num(pcfg)) / + PAGE_SIZE + + 1) * + PAGE_SIZE; + if (buffer_size > PEBS_BUFFER_SIZE) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Can't allocate more buffer than CEA allows!"); + return NULL; + } + + page = __alloc_pages_node(cpu_to_node(this_cpu), + GFP_ATOMIC | __GFP_ZERO, + get_order(buffer_size)); + if (!page) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "NULL (failed to allocate space for DTS buffer!)."); + return NULL; + } + dts_buffer = page_address(page); + per_cpu(dts_buffer_cea, this_cpu) = + &get_cpu_entry_area(this_cpu) + ->cpu_debug_buffers.pebs_buffer; + if (!per_cpu(dts_buffer_cea, this_cpu)) { + if (dts_buffer) { + free_pages((unsigned long)dts_buffer, + get_order(buffer_size)); + } + SEP_DRV_LOG_ERROR_TRACE_OUT( + "CEA pebs_buffer ptr is NULL!"); + return NULL; + } + + CPU_STATE_dts_buffer(pcpu) = dts_buffer; + CPU_STATE_dts_buffer_size(pcpu) = buffer_size; + + if (local_cea_set_pte) { + size_t idx; + phys_addr_t phys_addr; + PVOID cea_ptr = per_cpu(dts_buffer_cea, this_cpu); + + phys_addr = virt_to_phys(dts_buffer); + + preempt_disable(); + for (idx = 0; idx < buffer_size; idx += PAGE_SIZE, + phys_addr += PAGE_SIZE, cea_ptr += PAGE_SIZE) { + local_cea_set_pte(cea_ptr, phys_addr, + PAGE_KERNEL); + } + pebs_Update_CEA(this_cpu); + preempt_enable(); + } + pebs_base = + (UIOP)(per_cpu(dts_buffer_cea, this_cpu)) + dts_size; + SEP_DRV_LOG_TRACE("This_cpu: %d, pebs_base %p.", this_cpu, + pebs_base); + + dts = (DTS_BUFFER_EXT)(per_cpu(dts_buffer_cea, this_cpu)); +#else + SEP_DRV_LOG_ERROR_TRACE_OUT( + "KPTI is enabled without PAGE_TABLE_ISOLATION kernel configuration!"); + return NULL; +#endif + } else { + dts_buffer = (char *)pebs_global_memory + + CPU_STATE_dts_buffer_offset(pcpu); + if (!dts_buffer) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "NULL (failed to allocate space for DTS buffer!)."); + return NULL; + } + pebs_base = (UIOP)(dts_buffer) + dts_size; + + CPU_STATE_dts_buffer(pcpu) = dts_buffer; + CPU_STATE_dts_buffer_size(pcpu) = PER_CORE_BUFFER_SIZE( + dts_size, + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]), + DEV_CONFIG_pebs_record_num(pcfg)); + + // Make 32 byte aligned + if ((pebs_base & 0x000001F) != 0x0) { + pebs_base = ALIGN_32(pebs_base); + } + + dts = (DTS_BUFFER_EXT)dts_buffer; + } + + /* + * Program the DTES Buffer for Precise EBS. + * Set PEBS buffer for one PEBS record + */ + DTS_BUFFER_EXT_base(dts) = 0; + DTS_BUFFER_EXT_index(dts) = 0; + DTS_BUFFER_EXT_max(dts) = 0; + DTS_BUFFER_EXT_threshold(dts) = 0; + DTS_BUFFER_EXT_pebs_base(dts) = pebs_base; + DTS_BUFFER_EXT_pebs_index(dts) = pebs_base; + DTS_BUFFER_EXT_pebs_max(dts) = pebs_base + + ((UIOP)DEV_CONFIG_pebs_record_num(pcfg) + 1) * + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]); + + pebs_dispatch->initialize_threshold(dts); + + SEP_DRV_LOG_TRACE("base --- %llx.", DTS_BUFFER_EXT_pebs_base(dts)); + SEP_DRV_LOG_TRACE("index --- %llu.", DTS_BUFFER_EXT_pebs_index(dts)); + SEP_DRV_LOG_TRACE("max --- %llu.", DTS_BUFFER_EXT_pebs_max(dts)); + SEP_DRV_LOG_TRACE("threahold --- %llu.", + DTS_BUFFER_EXT_pebs_threshold(dts)); + SEP_DRV_LOG_TRACE("DTES buffer allocated for PEBS: %p.", dts_buffer); + + SEP_DRV_LOG_TRACE_OUT("Res: %p.", dts_buffer); + return dts; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID* pebs_Allocate_Buffers (VOID *params) + * + * @brief Allocate memory and set up MSRs in preparation for PEBS + * + * @param NONE + * + * @return NONE + * + * Special Notes: + * Set up the DS area and program the DS_AREA msrs in preparation + * for a PEBS run. Save away the old value in the DS_AREA. + * This routine is called via the parallel thread call. + */ +static VOID pebs_Allocate_Buffers(VOID *params) +{ + U64 value; + U32 this_cpu; + CPU_STATE pcpu; + U32 dev_idx; + DEV_CONFIG pcfg; + PVOID dts_ptr = NULL; + + SEP_DRV_LOG_TRACE_IN("Params: %p.", params); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!DEV_CONFIG_pebs_mode(pcfg)) { + return; + } + + SYS_Write_MSR(IA32_PEBS_ENABLE, 0LL); + value = SYS_Read_MSR(IA32_MISC_ENABLE); + if ((value & 0x80) && !(value & 0x1000)) { + CPU_STATE_old_dts_buffer(pcpu) = + (PVOID)(UIOP)SYS_Read_MSR(IA32_DS_AREA); + dts_ptr = pebs_Alloc_DTS_Buffer(); + if (!dts_ptr) { + SEP_DRV_LOG_ERROR_TRACE_OUT("dts_ptr is NULL!"); + return; + } + SEP_DRV_LOG_TRACE("Old dts buffer - %p.", + CPU_STATE_old_dts_buffer(pcpu)); + SEP_DRV_LOG_TRACE("New dts buffer - %p.", dts_ptr); + SYS_Write_MSR(IA32_DS_AREA, (U64)(UIOP)dts_ptr); + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID pebs_Dellocate_Buffers (VOID *params) + * + * @brief Clean up PEBS buffers and restore older values into the DS_AREA + * + * @param NONE + * + * @return NONE + * + * Special Notes: + * Clean up the DS area and all restore state prior to the sampling run + * This routine is called via the parallel thread call. + */ +static VOID pebs_Deallocate_Buffers(VOID *params) +{ + CPU_STATE pcpu; + U32 this_cpu; + U32 dev_idx; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("Params: %p.", params); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!DEV_CONFIG_pebs_mode(pcfg)) { + return; + } + + SEP_DRV_LOG_TRACE("Entered deallocate buffers."); + SYS_Write_MSR(IA32_DS_AREA, (U64)(UIOP)CPU_STATE_old_dts_buffer(pcpu)); + + if (DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info) == + DRV_SETUP_INFO_PTI_KPTI) { +#if defined(DRV_USE_PTI) + SEP_DRV_LOG_INIT("Freeing PEBS buffer using KPTI approach."); + + if (local_cea_set_pte) { + size_t idx; + PVOID cea_ptr = per_cpu(dts_buffer_cea, this_cpu); + preempt_disable(); + for (idx = 0; idx < CPU_STATE_dts_buffer_size(pcpu); + idx += PAGE_SIZE, cea_ptr += PAGE_SIZE) { + local_cea_set_pte(cea_ptr, 0, PAGE_KERNEL); + } + pebs_Update_CEA(this_cpu); + preempt_enable(); + } + + if (CPU_STATE_dts_buffer(pcpu)) { + free_pages((unsigned long)CPU_STATE_dts_buffer(pcpu), + get_order(CPU_STATE_dts_buffer_size(pcpu))); + CPU_STATE_dts_buffer(pcpu) = NULL; + } +#endif + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 PEBS_Overflowed (this_cpu, overflow_status) + * + * @brief Figure out if the PEBS event caused an overflow + * + * @param this_cpu -- the current cpu + * overflow_status -- current value of the global overflow status + * + * @return updated overflow_status + * + * Special Notes: + * Figure out if the PEBS area has data that need to be transferred + * to the output sample. + * Update the overflow_status that is passed and return this value. + * The overflow_status defines the events/status to be read + */ +U64 PEBS_Overflowed(S32 this_cpu, U64 overflow_status, U32 rec_index) +{ + U64 res; + U32 dev_idx; + PEBS_DISPATCH pebs_dispatch; + + SEP_DRV_LOG_TRACE_IN( + "This_cpu: %d, overflow_status: %llx, rec_index: %u.", this_cpu, + overflow_status, rec_index); + + dev_idx = core_to_dev_map[this_cpu]; + pebs_dispatch = LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]); + + res = pebs_dispatch->overflow(this_cpu, overflow_status, rec_index); + + SEP_DRV_LOG_TRACE_OUT("Res: %llx.", overflow_status); + return res; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID PEBS_Reset_Index (this_cpu) + * + * @brief Reset the PEBS index pointer + * + * @param this_cpu -- the current cpu + * + * @return NONE + * + * Special Notes: + * reset index to next PEBS record to base of buffer + */ +VOID PEBS_Reset_Index(S32 this_cpu) +{ + DTS_BUFFER_EXT dtes; + + SEP_DRV_LOG_TRACE_IN("This_cpu: %d.", this_cpu); + + dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + if (!dtes) { + return; + } + SEP_DRV_LOG_TRACE("PEBS Reset Index: %d.", this_cpu); + DTS_BUFFER_EXT_pebs_index(dtes) = DTS_BUFFER_EXT_pebs_base(dtes); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +extern U32 pmi_Get_CSD(U32, U32 *, U32 *); +#define EFLAGS_V86_MASK 0x00020000L + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID PEBS_Flush_Buffer (VOID * param) + * + * @brief generate sampling records from PEBS records in PEBS buffer + * + * @param param -- not used + * + * @return NONE + * + * Special Notes: + */ +VOID PEBS_Flush_Buffer(VOID *param) +{ + U32 i, this_cpu, index, desc_id; + U64 pebs_overflow_status = 0; + U64 lbr_tos_from_ip = 0ULL; + DRV_BOOL counter_overflowed = FALSE; + CPU_STATE pcpu; + EVENT_DESC evt_desc; + BUFFER_DESC bd; + SampleRecordPC *psamp_pebs; + U32 is_64bit_addr = FALSE; + U32 u32PebsRecordNumFilled; +#if defined(DRV_IA32) + U32 seg_cs; + U32 csdlo; + U32 csdhi; +#endif + U32 dev_idx; + DEV_CONFIG pcfg; + U32 cur_grp; + DRV_BOOL multi_pebs_enabled; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + bd = &cpu_buf[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + cur_grp = CPU_STATE_current_group(pcpu); + multi_pebs_enabled = + (DEV_CONFIG_pebs_mode(pcfg) && + (DEV_CONFIG_pebs_record_num(pcfg) > 1) && + (DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info) == + DRV_SETUP_INFO_PTI_DISABLED)); + + if (!multi_pebs_enabled) { + SEP_DRV_LOG_TRACE_OUT("PEBS_Flush_Buffer is not supported."); + return; + } + + u32PebsRecordNumFilled = PEBS_Get_Num_Records_Filled(); + for (i = 0; i < u32PebsRecordNumFilled; i++) { + pebs_overflow_status = PEBS_Overflowed(this_cpu, 0, i); + SEP_DRV_LOG_TRACE("Pebs_overflow_status = 0x%llx, i=%d.", + pebs_overflow_status, i); + + FOR_EACH_DATA_REG(pecb, j) + { + if ((!DEV_CONFIG_enable_adaptive_pebs(pcfg) && + !ECB_entries_is_gp_reg_get(pecb, j)) || + !ECB_entries_precise_get(pecb, j)) { + continue; + } + if (ECB_entries_fixed_reg_get(pecb, j)) { + index = ECB_entries_reg_id(pecb, j) - + IA32_FIXED_CTR0; + if (pebs_overflow_status & + ((U64)1 << (32 + index))) { + counter_overflowed = TRUE; + } + } else { + index = ECB_entries_reg_id(pecb, j) - IA32_PMC0; + if (pebs_overflow_status & (U64)1 << index) { + counter_overflowed = TRUE; + } + } + if (counter_overflowed) { + desc_id = ECB_entries_event_id_index(pecb, j); + evt_desc = desc_data[desc_id]; + SEP_DRV_LOG_TRACE( + "Event_id_index=%u, desc_id=%u.", + ECB_entries_event_id_index(pecb, j), + desc_id); + psamp_pebs = (SampleRecordPC *) + OUTPUT_Reserve_Buffer_Space( + bd, + EVENT_DESC_sample_size( + evt_desc), + (NMI_mode) ? TRUE : FALSE, + !SEP_IN_NOTIFICATION, + (S32)this_cpu); + if (!psamp_pebs) { + SEP_DRV_LOG_ERROR( + "Could not generate samples from PEBS records."); + continue; + } + + lbr_tos_from_ip = 0ULL; + CPU_STATE_num_samples(&pcb[this_cpu]) += 1; + SAMPLE_RECORD_descriptor_id(psamp_pebs) = + desc_id; + SAMPLE_RECORD_event_index(psamp_pebs) = + ECB_entries_event_id_index(pecb, j); + SAMPLE_RECORD_pid_rec_index(psamp_pebs) = + (U32)-1; + SAMPLE_RECORD_pid_rec_index_raw(psamp_pebs) = 1; + SAMPLE_RECORD_tid(psamp_pebs) = (U32)-1; + SAMPLE_RECORD_cpu_num(psamp_pebs) = + (U16)this_cpu; + SAMPLE_RECORD_osid(psamp_pebs) = 0; + +#if defined(DRV_IA32) + PEBS_Modify_IP((S8 *)psamp_pebs, is_64bit_addr, + i); + SAMPLE_RECORD_cs(psamp_pebs) = __KERNEL_CS; + if (SAMPLE_RECORD_eflags(psamp_pebs) & + EFLAGS_V86_MASK) { + csdlo = 0; + csdhi = 0; + } else { + seg_cs = SAMPLE_RECORD_cs(psamp_pebs); + SYS_Get_CSD(seg_cs, &csdlo, &csdhi); + } + SAMPLE_RECORD_csd(psamp_pebs).u1.lowWord = + csdlo; + SAMPLE_RECORD_csd(psamp_pebs).u2.highWord = + csdhi; +#elif defined(DRV_EM64T) + SAMPLE_RECORD_cs(psamp_pebs) = __KERNEL_CS; + pmi_Get_CSD(SAMPLE_RECORD_cs(psamp_pebs), + &SAMPLE_RECORD_csd(psamp_pebs) + .u1.lowWord, + &SAMPLE_RECORD_csd(psamp_pebs) + .u2.highWord); + is_64bit_addr = + (SAMPLE_RECORD_csd(psamp_pebs) + .u2.s2.reserved_0 == 1); + if (is_64bit_addr) { + SAMPLE_RECORD_ia64_pc(psamp_pebs) = + TRUE; + } else { + SAMPLE_RECORD_ia64_pc(psamp_pebs) = + FALSE; + + SEP_DRV_LOG_TRACE( + "SAMPLE_RECORD_eip(psamp_pebs) 0x%x.", + SAMPLE_RECORD_eip(psamp_pebs)); + SEP_DRV_LOG_TRACE( + "SAMPLE_RECORD_eflags(psamp_pebs) %x.", + SAMPLE_RECORD_eflags( + psamp_pebs)); + } +#endif + if (EVENT_DESC_pebs_offset(evt_desc) || + EVENT_DESC_latency_offset_in_sample( + evt_desc)) { + lbr_tos_from_ip = PEBS_Fill_Buffer( + (S8 *)psamp_pebs, evt_desc, i); + } + PEBS_Modify_IP((S8 *)psamp_pebs, is_64bit_addr, + i); + PEBS_Modify_TSC((S8 *)psamp_pebs, i); + if (ECB_entries_branch_evt_get(pecb, j) && + DEV_CONFIG_precise_ip_lbrs(pcfg) && + lbr_tos_from_ip) { + if (is_64bit_addr) { + SAMPLE_RECORD_iip(psamp_pebs) = + lbr_tos_from_ip; + SEP_DRV_LOG_TRACE( + "UPDATED SAMPLE_RECORD_iip(psamp) 0x%llx.", + SAMPLE_RECORD_iip( + psamp_pebs)); + } else { + SAMPLE_RECORD_eip(psamp_pebs) = + (U32)lbr_tos_from_ip; + SEP_DRV_LOG_TRACE( + "UPDATED SAMPLE_RECORD_eip(psamp) 0x%x.", + SAMPLE_RECORD_eip( + psamp_pebs)); + } + } + } + } + END_FOR_EACH_DATA_REG; + } + PEBS_Reset_Index(this_cpu); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID PEBS_Reset_Counter (this_cpu, index, value) + * + * @brief set reset value for PMC after overflow + * + * @param this_cpu -- the current cpu + * index -- PMC register index + * value -- reset value for PMC after overflow + * + * @return NONE + * + * Special Notes: + */ +VOID PEBS_Reset_Counter(S32 this_cpu, U32 index, U64 value) +{ + DTS_BUFFER_EXT dts; + DTS_BUFFER_EXT1 dts_ext = NULL; + U32 dev_idx; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("This_cpu: %d, index: %u, value: %llx.", this_cpu, + index, value); + + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + dts = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + if (!dts) { + return; + } + SEP_DRV_LOG_TRACE( + "PEBS Reset GP Counters[0:4]: cpu %d, index=%u, value=%llx.", + this_cpu, index, value); + switch (index) { + case 0: + DTS_BUFFER_EXT_counter_reset0(dts) = value; + break; + case 1: + DTS_BUFFER_EXT_counter_reset1(dts) = value; + break; + case 2: + DTS_BUFFER_EXT_counter_reset2(dts) = value; + break; + case 3: + DTS_BUFFER_EXT_counter_reset3(dts) = value; + break; + } + + if (DEV_CONFIG_enable_adaptive_pebs(pcfg) || + DEV_CONFIG_collect_fixed_counter_pebs(pcfg)) { + dts_ext = CPU_STATE_dts_buffer(&pcb[this_cpu]); + } + if (!dts_ext) { + return; + } + SEP_DRV_LOG_TRACE("PEBS Reset Fixed Counters and GP Counters[4:7]: \ + cpu %d, index=%u, value=%llx.", + this_cpu, index, value); + switch (index) { + case 4: + DTS_BUFFER_EXT1_counter_reset4(dts_ext) = value; + break; + case 5: + DTS_BUFFER_EXT1_counter_reset5(dts_ext) = value; + break; + case 6: + DTS_BUFFER_EXT1_counter_reset6(dts_ext) = value; + break; + case 7: + DTS_BUFFER_EXT1_counter_reset7(dts_ext) = value; + break; + case 8: + DTS_BUFFER_EXT1_fixed_counter_reset0(dts_ext) = value; + break; + case 9: + DTS_BUFFER_EXT1_fixed_counter_reset1(dts_ext) = value; + break; + case 10: + DTS_BUFFER_EXT1_fixed_counter_reset2(dts_ext) = value; + break; + case 11: + DTS_BUFFER_EXT1_fixed_counter_reset3(dts_ext) = value; + break; + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID PEBS_Modify_IP (sample, is_64bit_addr) + * + * @brief Change the IP field in the sample to that in the PEBS record + * + * @param sample - sample buffer + * @param is_64bit_addr - are we in a 64 bit module + * + * @return NONE + * + * Special Notes: + * + */ +VOID PEBS_Modify_IP(void *sample, DRV_BOOL is_64bit_addr, U32 rec_index) +{ + U32 this_cpu; + U32 dev_idx; + PEBS_DISPATCH pebs_dispatch; + + SEP_DRV_LOG_TRACE_IN("Sample: %p, is_64bit_addr: %u, rec_index: %u.", + sample, is_64bit_addr, rec_index); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pebs_dispatch = LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]); + + pebs_dispatch->modify_ip(sample, is_64bit_addr, rec_index); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID PEBS_Modify_TSC (sample) + * + * @brief Change the TSC field in the sample to that in the PEBS record + * + * @param sample - sample buffer + * + * @return NONE + * + * Special Notes: + * + */ +VOID PEBS_Modify_TSC(void *sample, U32 rec_index) +{ + U32 this_cpu; + U32 dev_idx; + PEBS_DISPATCH pebs_dispatch; + + SEP_DRV_LOG_TRACE_IN("Sample: %p, rec_index: %u.", sample, rec_index); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pebs_dispatch = LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]); + + if (pebs_dispatch->modify_tsc != NULL) { + pebs_dispatch->modify_tsc(sample, rec_index); + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +U32 PEBS_Get_Num_Records_Filled(VOID) +{ + U32 this_cpu; + U32 dev_idx; + PEBS_DISPATCH pebs_dispatch; + U32 num = 0; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pebs_dispatch = LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]); + + if (pebs_dispatch->get_num_records_filled != NULL) { + num = pebs_dispatch->get_num_records_filled(); + SEP_DRV_LOG_TRACE("Num=%u.", num); + } + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", num); + return num; +} +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID PEBS_Fill_Phy_Addr (LATENCY_INFO latency_info) + * + * @brief Fill latency node with phy addr when applicable + * + * @param latency_info - pointer to LATENCY_INFO struct + * + * @return NONE + * + * Special Notes: + * + */ + +static VOID PEBS_Fill_Phy_Addr(LATENCY_INFO latency_info) +{ +#if defined(DRV_EM64T) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) + U64 lin_addr; + U64 offset; + struct page *page = NULL; + + if (!DRV_CONFIG_virt_phys_translation(drv_cfg)) { + return; + } + lin_addr = (U64)LATENCY_INFO_linear_address(latency_info); + if (lin_addr != 0) { + offset = (U64)(lin_addr & 0x0FFF); + if (__virt_addr_valid(lin_addr)) { + LATENCY_INFO_phys_addr(latency_info) = + (U64)__pa(lin_addr); + } else if (lin_addr < __PAGE_OFFSET) { + pagefault_disable(); + if (__get_user_pages_fast(lin_addr, 1, 1, &page)) { + LATENCY_INFO_phys_addr(latency_info) = + (U64)page_to_phys(page) + offset; + put_page(page); + } + pagefault_enable(); + } + } +#endif +} +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 PEBS_Fill_Buffer (S8 *buffer, EVENT_DESC evt_desc, U32 rec_index) + * + * @brief Fill the buffer with the pebs data + * + * @param buffer - area to write the data into + * event_desc - event descriptor of the pebs event + rec_index - current pebs record index + * + * @return if APEBS return LBR_TOS_FROM_IP else return 0 + * + * Special Notes: + * + */ +U64 PEBS_Fill_Buffer(S8 *buffer, EVENT_DESC evt_desc, U32 rec_index) +{ + DTS_BUFFER_EXT dtes; + LATENCY_INFO_NODE latency_info = { 0 }; + PEBS_REC_EXT1 pebs_base_ext1; + PEBS_REC_EXT2 pebs_base_ext2; + S8 *pebs_base, *pebs_index, *pebs_ptr; + U8 pebs_ptr_check = FALSE; + U64 lbr_tos_from_ip = 0ULL; + U32 this_cpu; + U32 dev_idx; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("Buffer: %p, evt_desc: %p, rec_index: %u.", buffer, + evt_desc, rec_index); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + if (DEV_CONFIG_enable_adaptive_pebs(pcfg)) { + lbr_tos_from_ip = + APEBS_Fill_Buffer(buffer, evt_desc, rec_index); + return lbr_tos_from_ip; + } + + SEP_DRV_LOG_TRACE("In PEBS Fill Buffer: cpu %d.", CONTROL_THIS_CPU()); + + if (!dtes) { + return lbr_tos_from_ip; + } + pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_base(dtes); + pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_index(dtes); + pebs_ptr = (S8 *)((UIOP)DTS_BUFFER_EXT_pebs_base(dtes) + + ((UIOP)rec_index * + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]))); + pebs_ptr_check = + (pebs_ptr && pebs_base != pebs_index && pebs_ptr < pebs_index); + if (!pebs_ptr_check) { + return lbr_tos_from_ip; + } + pebs_base = pebs_ptr; + if (EVENT_DESC_pebs_offset(evt_desc)) { + SEP_DRV_LOG_TRACE("PEBS buffer has data available."); + memcpy(buffer + EVENT_DESC_pebs_offset(evt_desc), pebs_base, + EVENT_DESC_pebs_size(evt_desc)); + } + if (EVENT_DESC_eventing_ip_offset(evt_desc)) { + pebs_base_ext1 = (PEBS_REC_EXT1)pebs_base; + *(U64 *)(buffer + EVENT_DESC_eventing_ip_offset(evt_desc)) = + PEBS_REC_EXT1_eventing_ip(pebs_base_ext1); + } + if (EVENT_DESC_hle_offset(evt_desc)) { + pebs_base_ext1 = (PEBS_REC_EXT1)pebs_base; + *(U64 *)(buffer + EVENT_DESC_hle_offset(evt_desc)) = + PEBS_REC_EXT1_hle_info(pebs_base_ext1); + } + if (EVENT_DESC_latency_offset_in_sample(evt_desc)) { + pebs_base_ext1 = (PEBS_REC_EXT1)pebs_base; + memcpy(&latency_info, + pebs_base + EVENT_DESC_latency_offset_in_pebs_record( + evt_desc), + EVENT_DESC_latency_size_from_pebs_record(evt_desc)); + memcpy(&LATENCY_INFO_stack_pointer(&latency_info), + &PEBS_REC_EXT1_rsp(pebs_base_ext1), sizeof(U64)); + + LATENCY_INFO_phys_addr(&latency_info) = 0; + PEBS_Fill_Phy_Addr(&latency_info); + + memcpy(buffer + EVENT_DESC_latency_offset_in_sample(evt_desc), + &latency_info, sizeof(LATENCY_INFO_NODE)); + } + if (EVENT_DESC_pebs_tsc_offset(evt_desc)) { + pebs_base_ext2 = (PEBS_REC_EXT2)pebs_base; + *(U64 *)(buffer + EVENT_DESC_pebs_tsc_offset(evt_desc)) = + PEBS_REC_EXT2_tsc(pebs_base_ext2); + } + + SEP_DRV_LOG_TRACE_OUT(""); + return lbr_tos_from_ip; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 APEBS_Fill_Buffer (S8 *buffer, EVENT_DESC evt_desc, U32 rec_index) + * + * @brief Fill the buffer with the pebs data + * + * @param buffer - area to write the data into + * event_desc - event descriptor of the pebs event + * rec_index - current pebs record index + * + * @return LBR_TOS_FROM_IP + * + * Special Notes: + * + */ +U64 APEBS_Fill_Buffer(S8 *buffer, EVENT_DESC evt_desc, U32 rec_index) +{ + DTS_BUFFER_EXT1 dtes; + LATENCY_INFO_NODE latency_info = { 0 }; + U64 dtes_record_size = 0; + U64 dtes_record_format = 0; + ADAPTIVE_PEBS_MEM_INFO apebs_mem = NULL; + ADAPTIVE_PEBS_GPR_INFO apebs_gpr = NULL; + ADAPTIVE_PEBS_BASIC_INFO apebs_basic = NULL; + S8 *pebs_base, *pebs_index, *pebs_ptr; + U8 pebs_ptr_check = FALSE; + U64 lbr_tos_from_ip = 0ULL; + U32 this_cpu; + U32 dev_idx; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("Buffer: %p, evt_desc: %p, rec_index: %u.", buffer, + evt_desc, rec_index); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + SEP_DRV_LOG_TRACE("In APEBS Fill Buffer: cpu %d.", this_cpu); + + if (!dtes || !DEV_CONFIG_enable_adaptive_pebs(pcfg)) { + return lbr_tos_from_ip; + } + + pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT1_pebs_base(dtes); + pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT1_pebs_index(dtes); + pebs_ptr = (S8 *)((UIOP)DTS_BUFFER_EXT1_pebs_base(dtes) + + ((UIOP)rec_index * + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]))); + pebs_ptr_check = + (pebs_ptr && pebs_base != pebs_index && pebs_ptr < pebs_index); + if (!pebs_ptr_check) { + return lbr_tos_from_ip; + } + + pebs_base = pebs_ptr; + apebs_basic = (ADAPTIVE_PEBS_BASIC_INFO)( + pebs_base + LWPMU_DEVICE_apebs_basic_offset(&devices[dev_idx])); + dtes_record_size = (ADAPTIVE_PEBS_BASIC_INFO_record_info(apebs_basic) & + APEBS_RECORD_SIZE_MASK) >> 48; // [63:48] + dtes_record_format = + (ADAPTIVE_PEBS_BASIC_INFO_record_info(apebs_basic) & + (U64)APEBS_RECORD_FORMAT_MASK); // [47:0] + + if (dtes_record_size != + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx])) { + SEP_DRV_LOG_TRACE( + "PEBS record size does not match with ucode\n"); + } + if (EVENT_DESC_pebs_offset(evt_desc)) { + *(U64 *)(buffer + EVENT_DESC_pebs_offset(evt_desc)) = + ADAPTIVE_PEBS_BASIC_INFO_record_info(apebs_basic); + } + if (EVENT_DESC_eventing_ip_offset(evt_desc)) { + *(U64 *)(buffer + EVENT_DESC_eventing_ip_offset(evt_desc)) = + ADAPTIVE_PEBS_BASIC_INFO_eventing_ip(apebs_basic); + } + if (EVENT_DESC_pebs_tsc_offset(evt_desc)) { + *(U64 *)(buffer + EVENT_DESC_pebs_tsc_offset(evt_desc)) = + ADAPTIVE_PEBS_BASIC_INFO_tsc(apebs_basic); + } + if (EVENT_DESC_applicable_counters_offset(evt_desc)) { + *(U64 *)(buffer + + EVENT_DESC_applicable_counters_offset(evt_desc)) = + ADAPTIVE_PEBS_BASIC_INFO_applicable_counters( + apebs_basic); + } + if (DEV_CONFIG_apebs_collect_gpr(pcfg) && + EVENT_DESC_gpr_info_offset(evt_desc)) { + if (!(dtes_record_format & APEBS_GPR_RECORD_FORMAT_MASK)) { + SEP_DRV_LOG_WARNING( + "GPR info not found in DS PEBS record."); + } + memcpy(buffer + EVENT_DESC_gpr_info_offset(evt_desc), + pebs_base + + LWPMU_DEVICE_apebs_gpr_offset(&devices[dev_idx]), + EVENT_DESC_gpr_info_size(evt_desc)); + } + if (DEV_CONFIG_apebs_collect_mem_info(pcfg) && + EVENT_DESC_latency_offset_in_sample(evt_desc)) { + if (!(dtes_record_format & APEBS_MEM_RECORD_FORMAT_MASK)) { + SEP_DRV_LOG_WARNING( + "MEM info not found in DS PEBS record."); + } + apebs_mem = (ADAPTIVE_PEBS_MEM_INFO)( + pebs_base + + LWPMU_DEVICE_apebs_mem_offset(&devices[dev_idx])); + memcpy(&LATENCY_INFO_linear_address(&latency_info), + &ADAPTIVE_PEBS_MEM_INFO_data_linear_address(apebs_mem), + sizeof(U64)); + memcpy(&LATENCY_INFO_data_source(&latency_info), + &ADAPTIVE_PEBS_MEM_INFO_data_source(apebs_mem), + sizeof(U64)); + memcpy(&LATENCY_INFO_latency(&latency_info), + &ADAPTIVE_PEBS_MEM_INFO_latency(apebs_mem), sizeof(U64)); + LATENCY_INFO_stack_pointer(&latency_info) = 0; + if (DEV_CONFIG_apebs_collect_gpr(pcfg)) { + apebs_gpr = (ADAPTIVE_PEBS_GPR_INFO)( + pebs_base + LWPMU_DEVICE_apebs_gpr_offset( + &devices[dev_idx])); + memcpy(&LATENCY_INFO_stack_pointer(&latency_info), + &ADAPTIVE_PEBS_GPR_INFO_rsp(apebs_gpr), + sizeof(U64)); + } + + LATENCY_INFO_phys_addr(&latency_info) = 0; + PEBS_Fill_Phy_Addr(&latency_info); + memcpy(buffer + EVENT_DESC_latency_offset_in_sample(evt_desc), + &latency_info, sizeof(LATENCY_INFO_NODE)); + } + if (DEV_CONFIG_apebs_collect_mem_info(pcfg) && + EVENT_DESC_hle_offset(evt_desc)) { + *(U64 *)(buffer + EVENT_DESC_hle_offset(evt_desc)) = + ADAPTIVE_PEBS_MEM_INFO_hle_info(( + ADAPTIVE_PEBS_MEM_INFO)( + pebs_base + LWPMU_DEVICE_apebs_mem_offset( + &devices[dev_idx]))); + } + if (DEV_CONFIG_apebs_collect_xmm(pcfg) && + EVENT_DESC_xmm_info_offset(evt_desc)) { + if (!(dtes_record_format & APEBS_XMM_RECORD_FORMAT_MASK)) { + SEP_DRV_LOG_WARNING( + "XMM info not found in DS PEBS record."); + } + memcpy(buffer + EVENT_DESC_xmm_info_offset(evt_desc), + pebs_base + + LWPMU_DEVICE_apebs_xmm_offset(&devices[dev_idx]), + EVENT_DESC_xmm_info_size(evt_desc)); + } + if (DEV_CONFIG_apebs_collect_lbrs(pcfg) && + EVENT_DESC_lbr_offset(evt_desc)) { + if (!(dtes_record_format & APEBS_LBR_RECORD_FORMAT_MASK)) { + SEP_DRV_LOG_WARNING( + "LBR info not found in DS PEBS record\n"); + } + if ((dtes_record_format >> 24) != + (DEV_CONFIG_apebs_num_lbr_entries(pcfg) - 1)) { + SEP_DRV_LOG_WARNING( + "DRV_CONFIG_apebs_num_lbr_entries does not match with PEBS record\n"); + } + *(U64 *)(buffer + EVENT_DESC_lbr_offset(evt_desc)) = + DEV_CONFIG_apebs_num_lbr_entries(pcfg) - 1; + //Top-of-Stack(TOS) pointing to last entry + //Populating lbr callstack as SST_ENTRY_N to SST_ENTRY_0 in + // tb util, hence setting TOS to SST_ENTRY_N + memcpy(buffer + EVENT_DESC_lbr_offset(evt_desc) + sizeof(U64), + pebs_base + + LWPMU_DEVICE_apebs_lbr_offset(&devices[dev_idx]), + EVENT_DESC_lbr_info_size(evt_desc) - sizeof(U64)); + lbr_tos_from_ip = ADAPTIVE_PEBS_LBR_INFO_lbr_from( + (ADAPTIVE_PEBS_LBR_INFO)(pebs_base + + LWPMU_DEVICE_apebs_lbr_offset( + &devices[dev_idx]))); + } + return lbr_tos_from_ip; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn OS_STATUS PEBS_Initialize (DEV_CONFIG pcfg) + * + * @brief Initialize the pebs buffers + * + * @param dev_idx - Device index + * + * @return status + * + * Special Notes: + * If the user is asking for PEBS information. Allocate the DS area + */ +OS_STATUS PEBS_Initialize(U32 dev_idx) +{ + DEV_CONFIG pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + SEP_DRV_LOG_TRACE_IN("Pcfg: %p.", pcfg); + + if (DEV_CONFIG_pebs_mode(pcfg)) { + switch (DEV_CONFIG_pebs_mode(pcfg)) { + case 1: + SEP_DRV_LOG_INIT("Set up the Core2 dispatch table."); + LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]) = + &core2_pebs; + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) = + sizeof(PEBS_REC_NODE); + break; + case 2: + SEP_DRV_LOG_INIT("Set up the Nehalem dispatch."); + LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]) = + &corei7_pebs; + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) = + sizeof(PEBS_REC_EXT_NODE); + break; + case 3: + SEP_DRV_LOG_INIT( + "Set up the Core2 (PNR) dispatch table."); + LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]) = + &core2p_pebs; + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) = + sizeof(PEBS_REC_NODE); + break; + case 4: + SEP_DRV_LOG_INIT("Set up the Haswell dispatch table."); + LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]) = + &haswell_pebs; + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) = + sizeof(PEBS_REC_EXT1_NODE); + break; + case 5: + SEP_DRV_LOG_INIT( + "Set up the Perf version4 dispatch table."); + LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]) = + &perfver4_pebs; + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) = + sizeof(PEBS_REC_EXT2_NODE); + break; + case 6: + if (!DEV_CONFIG_enable_adaptive_pebs(pcfg)) { + SEP_DRV_LOG_TRACE( + "APEBS need to be enabled in perf version4 SNC dispatch mode."); + } + LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]) = + &perfver4_apebs; + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) = + sizeof(ADAPTIVE_PEBS_BASIC_INFO_NODE); + if (DEV_CONFIG_apebs_collect_mem_info(pcfg)) { + LWPMU_DEVICE_apebs_mem_offset( + &devices[dev_idx]) = + LWPMU_DEVICE_pebs_record_size( + &devices[dev_idx]); + LWPMU_DEVICE_pebs_record_size( + &devices[dev_idx]) += + sizeof(ADAPTIVE_PEBS_MEM_INFO_NODE); + } + if (DEV_CONFIG_apebs_collect_gpr(pcfg)) { + LWPMU_DEVICE_apebs_gpr_offset( + &devices[dev_idx]) = + LWPMU_DEVICE_pebs_record_size( + &devices[dev_idx]); + LWPMU_DEVICE_pebs_record_size( + &devices[dev_idx]) += + sizeof(ADAPTIVE_PEBS_GPR_INFO_NODE); + } + if (DEV_CONFIG_apebs_collect_xmm(pcfg)) { + LWPMU_DEVICE_apebs_xmm_offset( + &devices[dev_idx]) = + LWPMU_DEVICE_pebs_record_size( + &devices[dev_idx]); + LWPMU_DEVICE_pebs_record_size( + &devices[dev_idx]) += + sizeof(ADAPTIVE_PEBS_XMM_INFO_NODE); + } + if (DEV_CONFIG_apebs_collect_lbrs(pcfg)) { + LWPMU_DEVICE_apebs_lbr_offset( + &devices[dev_idx]) = + LWPMU_DEVICE_pebs_record_size( + &devices[dev_idx]); + LWPMU_DEVICE_pebs_record_size( + &devices[dev_idx]) += + (sizeof(ADAPTIVE_PEBS_LBR_INFO_NODE) * + DEV_CONFIG_apebs_num_lbr_entries( + pcfg)); + } + SEP_DRV_LOG_TRACE("Size of adaptive pebs record - %d.", + LWPMU_DEVICE_pebs_record_size( + &devices[dev_idx])); + break; + default: + SEP_DRV_LOG_INIT( + "Unknown PEBS type. Will not collect PEBS information."); + break; + } + } + if (LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]) && + !DEV_CONFIG_pebs_record_num(pcfg)) { + DEV_CONFIG_pebs_record_num(pcfg) = 1; + } + + SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn OS_STATUS PEBS_Allocate (void) + * + * @brief Allocate the pebs related buffers + * + * @param NONE + * + * @return NONE + * + * Special Notes: + * Allocated the DS area used for PEBS capture + */ +OS_STATUS PEBS_Allocate(VOID) +{ + S32 cpu_num; + CPU_STATE pcpu; + U32 dev_idx; + U32 dts_size; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_INIT_IN(""); + + for (cpu_num = 0; cpu_num < GLOBAL_STATE_num_cpus(driver_state); + cpu_num++) { + pcpu = &pcb[cpu_num]; + dev_idx = core_to_dev_map[cpu_num]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + if (LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx])) { + dts_size = sizeof(DTS_BUFFER_EXT_NODE); + if (DEV_CONFIG_enable_adaptive_pebs(pcfg)) { + dts_size = sizeof(DTS_BUFFER_EXT1_NODE); + } + CPU_STATE_dts_buffer_offset(pcpu) = + pebs_global_memory_size; + pebs_global_memory_size += PER_CORE_BUFFER_SIZE( + dts_size, + LWPMU_DEVICE_pebs_record_size( + &devices[dev_idx]), + DEV_CONFIG_pebs_record_num(pcfg)); + } + } + if (pebs_global_memory_size) { + if (DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info) == + DRV_SETUP_INFO_PTI_DISABLED) { + SEP_DRV_LOG_INIT( + "Allocating global PEBS buffer using regular control routine."); + pebs_global_memory = (PVOID)CONTROL_Allocate_KMemory( + pebs_global_memory_size); + if (!pebs_global_memory) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Failed to allocate PEBS buffer!"); + return OS_NO_MEM; + } + memset(pebs_global_memory, 0, pebs_global_memory_size); + } else { +#if defined(DRV_USE_KAISER) + SEP_DRV_LOG_INIT( + "Allocating PEBS buffer using KAISER-compatible approach."); + + if (!local_kaiser_add_mapping) { + local_kaiser_add_mapping = + (PVOID)UTILITY_Find_Symbol( + "kaiser_add_mapping"); + if (!local_kaiser_add_mapping) { + SEP_DRV_LOG_ERROR( + "Could not find 'kaiser_add_mapping'!"); + goto kaiser_error_handling; + } + } + + if (!local_kaiser_remove_mapping) { + local_kaiser_remove_mapping = + (PVOID)UTILITY_Find_Symbol( + "kaiser_remove_mapping"); + if (!local_kaiser_remove_mapping) { + SEP_DRV_LOG_ERROR( + "Could not find 'kaiser_remove_mapping'!"); + goto kaiser_error_handling; + } + } + + pebs_global_memory = (PVOID)__get_free_pages( + GFP_KERNEL | __GFP_ZERO, + get_order(pebs_global_memory_size)); + + if (pebs_global_memory) { + SEP_DRV_LOG_TRACE( + "Successful memory allocation for pebs_global_memory."); + + if (local_kaiser_add_mapping( + (unsigned long)pebs_global_memory, + pebs_global_memory_size, + __PAGE_KERNEL) >= 0) { + SEP_DRV_LOG_TRACE( + "Successful kaiser_add_mapping."); + } else { + SEP_DRV_LOG_ERROR( + "KAISER mapping failed!"); + free_pages( + (unsigned long) + pebs_global_memory, + get_order( + pebs_global_memory_size)); + pebs_global_memory = NULL; + goto kaiser_error_handling; + } + } else { + SEP_DRV_LOG_ERROR( + "Failed memory allocation for pebs_global_memory!"); + } + + kaiser_error_handling: + if (!pebs_global_memory) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Failed to setup PEBS buffer!"); + return OS_NO_MEM; + } +#elif defined(DRV_USE_PTI) + if (!local_cea_set_pte) { + local_cea_set_pte = (PVOID)UTILITY_Find_Symbol( + "cea_set_pte"); + if (!local_cea_set_pte) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Could not find 'cea_set_pte'!"); + return OS_FAULT; + } + } + if (!local_do_kernel_range_flush) { + local_do_kernel_range_flush = + (PVOID)UTILITY_Find_Symbol( + "do_kernel_range_flush"); + if (!local_do_kernel_range_flush) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Could not find 'do_kernel_range_flush'!"); + return OS_FAULT; + } + } +#endif // DRV_USE_PTI + } + } + + CONTROL_Invoke_Parallel(pebs_Allocate_Buffers, (VOID *)NULL); + + SEP_DRV_LOG_INIT_OUT(""); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID PEBS_Destroy (void) + * + * @brief Clean up the pebs related buffers + * + * @param pcfg - Driver Configuration + * + * @return NONE + * + * Special Notes: + * Deallocated the DS area used for PEBS capture + */ +VOID PEBS_Destroy(VOID) +{ + SEP_DRV_LOG_TRACE_IN(""); + + CONTROL_Invoke_Parallel(pebs_Deallocate_Buffers, (VOID *)(size_t)0); + if (pebs_global_memory) { + if (DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info) == + DRV_SETUP_INFO_PTI_DISABLED) { + SEP_DRV_LOG_INIT( + "Freeing PEBS buffer using regular control routine."); + pebs_global_memory = + CONTROL_Free_Memory(pebs_global_memory); + } +#if defined(DRV_USE_KAISER) + else if (DRV_SETUP_INFO_page_table_isolation( + &req_drv_setup_info) == + DRV_SETUP_INFO_PTI_KAISER) { + SEP_DRV_LOG_INIT( + "Freeing PEBS buffer using KAISER-compatible approach."); + if (local_kaiser_remove_mapping) { + local_kaiser_remove_mapping( + (unsigned long)pebs_global_memory, + pebs_global_memory_size); + } else { + SEP_DRV_LOG_ERROR( + "Could not call 'kaiser_remove_mapping'!"); + } + free_pages((unsigned long)pebs_global_memory, + get_order(pebs_global_memory_size)); + pebs_global_memory = NULL; + } +#endif // DRV_USE_KAISER + + pebs_global_memory_size = 0; + SEP_DRV_LOG_INIT("PEBS buffer successfully freed."); + } + + SEP_DRV_LOG_TRACE_OUT(""); +} diff --git a/drivers/platform/x86/sepdk/sep/perfver4.c b/drivers/platform/x86/sepdk/sep/perfver4.c new file mode 100755 index 0000000000000..ae8fa717f4bf9 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/perfver4.c @@ -0,0 +1,1972 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#include "lwpmudrv_defines.h" +#include +#include +#include + +#include "lwpmudrv_types.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" + +#include "lwpmudrv.h" +#include "utility.h" +#include "control.h" +#include "output.h" +#include "perfver4.h" +#include "ecb_iterators.h" +#include "pebs.h" +#include "apic.h" + +extern U64 *read_counter_info; +extern DRV_CONFIG drv_cfg; +extern U64 *interrupt_counts; +extern DRV_SETUP_INFO_NODE req_drv_setup_info; +extern EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; +static U64 perf_metrics_counter_reload_value; + +typedef struct SADDR_S { + S64 addr : PERFVER4_LBR_DATA_BITS; +} SADDR; + +static U32 restore_reg_addr[3]; + +#define SADDR_addr(x) ((x).addr) +#define MSR_ENERGY_MULTIPLIER 0x606 // Energy Multiplier MSR + +#define IS_FIXED_CTR_ENABLED(ia32_perf_global_ctrl_reg_val) \ + ((ia32_perf_global_ctrl_reg_val)&0x700000000ULL) +#define IS_FOUR_FIXED_CTR_ENABLED(ia32_perf_global_ctrl_reg_val) \ + ((ia32_perf_global_ctrl_reg_val)&0xF00000000ULL) +#define IS_PMC_PEBS_ENABLED_GP(ia32_perf_global_ctrl_reg_val, \ + ia32_pebs_enable_reg_val) \ + (((ia32_perf_global_ctrl_reg_val)&0xfULL) == \ + ((ia32_pebs_enable_reg_val)&0xfULL)) +#define IS_PMC_PEBS_ENABLED_FP_AND_GP(ia32_perf_global_ctrl_reg_val, \ + ia32_pebs_enable_reg_val) \ + (((ia32_perf_global_ctrl_reg_val)&0xf000000ffULL) == \ + ((ia32_pebs_enable_reg_val)&0xf000000ffULL)) + +#define DISABLE_FRZ_ON_PMI(ia32_debug_ctrl_reg_val) \ + (0xefff & (ia32_debug_ctrl_reg_val)) +/* ------------------------------------------------------------------------- */ +/*! + * @fn void perfver4_Write_PMU(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Initial set up of the PMU registers + * + * Special Notes + * Initial write of PMU registers. + * Walk through the enties and write the value of the register accordingly. + * Assumption: For CCCR registers the enable bit is set to value 0. + * When current_group = 0, then this is the first time this routine is called, + * initialize the locks and set up EM tables. + */ +static VOID perfver4_Write_PMU(VOID *param) +{ + U32 this_cpu; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; + EVENT_CONFIG ec; + DISPATCH dispatch; + DEV_CONFIG pcfg; +#if defined(DRV_SEP_ACRN_ON) + struct profiling_pmi_config *pmi_config; + U32 index; + S32 msr_idx; +#else + U32 counter_index; +#endif + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + if (param == NULL) { + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + } else { + this_cpu = *(S32 *)param; + } + + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + ec = LWPMU_DEVICE_ec(&devices[dev_idx]); + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + +#if !defined(DRV_SEP_ACRN_ON) + counter_index = 0; + if (CPU_STATE_current_group(pcpu) == 0) { + if (EVENT_CONFIG_mode(ec) != EM_DISABLED) { + U32 index; + U32 st_index; + U32 j; + + /* Save all the initialization values away into an array for Event Multiplexing. */ + for (j = 0; j < EVENT_CONFIG_num_groups(ec); j++) { + CPU_STATE_current_group(pcpu) = j; + st_index = CPU_STATE_current_group(pcpu) * + EVENT_CONFIG_max_gp_events(ec); + FOR_EACH_REG_CORE_OPERATION( + pecb, i, PMU_OPERATION_DATA_GP) + { + index = st_index + i - + ECB_operations_register_start( + pecb, + PMU_OPERATION_DATA_GP); + CPU_STATE_em_tables(pcpu)[index] = + ECB_entries_reg_value(pecb, i); + } + END_FOR_EACH_REG_CORE_OPERATION; + } + /* Reset the current group to the very first one. */ + CPU_STATE_current_group(pcpu) = + this_cpu % EVENT_CONFIG_num_groups(ec); + } + } + + if (dispatch->hw_errata) { + dispatch->hw_errata(); + } + + /* Clear outstanding frozen bits */ + SYS_Write_MSR(IA32_PERF_GLOBAL_OVF_CTRL, PERFVER4_FROZEN_BIT_MASK); + + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_ALL_REG) + { + /* + * Writing the GLOBAL Control register enables the PMU to start counting. + * So write 0 into the register to prevent any counting from starting. + */ + if (i == ECB_SECTION_REG_INDEX(pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + continue; + } + /* + * PEBS is enabled for this collection session + */ + if (DRV_SETUP_INFO_pebs_accessible(&req_drv_setup_info) && + i == ECB_SECTION_REG_INDEX(pecb, PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS) && + ECB_entries_reg_value(pecb, i)) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + continue; + } + + if (DEV_CONFIG_pebs_mode(pcfg) && + (ECB_entries_precise_get(pecb, i) == 1)) { + if (ECB_entries_fixed_reg_get(pecb, i)) { + counter_index = (ECB_entries_reg_id(pecb, i) - + IA32_FIXED_CTR0 + 8); + } else { + counter_index = (ECB_entries_reg_id(pecb, i) - + IA32_PMC0); + } + PEBS_Reset_Counter(this_cpu, counter_index, + ECB_entries_reg_value(pecb, i)); + } + + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i)); +#if defined(MYDEBUG) + { + U64 val = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); + SEP_DRV_LOG_TRACE( + "Write reg 0x%x --- value 0x%llx -- read 0x%llx.", + ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i), val); + } +#endif + } + END_FOR_EACH_REG_CORE_OPERATION; +#else + pmi_config = (struct profiling_pmi_config *)CONTROL_Allocate_Memory( + sizeof(struct profiling_pmi_config)); + if (pmi_config == NULL) { + SEP_PRINT_ERROR("pmi_config memory allocation failed\n"); + return; + } + memset(pmi_config, 0, sizeof(struct profiling_pmi_config)); + + msr_idx = 0; + pmi_config->num_groups = 1; + + pmi_config->initial_list[0][msr_idx].msr_id = IA32_PERF_GLOBAL_CTRL; + pmi_config->initial_list[0][msr_idx].op_type = MSR_OP_WRITE; + pmi_config->initial_list[0][msr_idx].reg_type = PMU_MSR_CCCR; + pmi_config->initial_list[0][msr_idx].value = 0x0; + pmi_config->initial_list[0][msr_idx].param = 0x0; + msr_idx++; + + FOR_EACH_CCCR_REG_CPU(pecb, i, this_cpu) + { + if ((ECB_entries_reg_id(pecb, i) == IA32_PERF_GLOBAL_CTRL) || + (ECB_entries_reg_id(pecb, i) == IA32_PEBS_ENABLE)) { + continue; + } + + pmi_config->initial_list[0][msr_idx].msr_id = + ECB_entries_reg_id(pecb, i); + pmi_config->initial_list[0][msr_idx].op_type = MSR_OP_WRITE; + pmi_config->initial_list[0][msr_idx].reg_type = PMU_MSR_CCCR; + pmi_config->initial_list[0][msr_idx].value = + ECB_entries_reg_value(pecb, i); + pmi_config->initial_list[0][msr_idx].param = 0x0; + msr_idx++; + BUG_ON(msr_idx >= MAX_MSR_LIST_NUM); + } + END_FOR_EACH_CCCR_REG_CPU; + + FOR_EACH_ESCR_REG_CPU(pecb, i, this_cpu) + { + pmi_config->initial_list[0][msr_idx].msr_id = + ECB_entries_reg_id(pecb, i); + pmi_config->initial_list[0][msr_idx].op_type = MSR_OP_WRITE; + pmi_config->initial_list[0][msr_idx].reg_type = PMU_MSR_ESCR; + pmi_config->initial_list[0][msr_idx].value = + ECB_entries_reg_value(pecb, i); + pmi_config->initial_list[0][msr_idx].param = 0x0; + msr_idx++; + BUG_ON(msr_idx >= MAX_MSR_LIST_NUM); + } + END_FOR_EACH_ESCR_REG_CPU; + + FOR_EACH_DATA_REG_CPU(pecb, i, this_cpu) + { + if (ECB_entries_fixed_reg_get(pecb, i)) { + index = ECB_entries_reg_id(pecb, i) - IA32_FIXED_CTR0 + + 0x20; + } else if (ECB_entries_is_gp_reg_get(pecb, i)) { + index = ECB_entries_reg_id(pecb, i) - IA32_PMC0; + } else { + continue; + } + pmi_config->initial_list[0][msr_idx].msr_id = + ECB_entries_reg_id(pecb, i); + pmi_config->initial_list[0][msr_idx].op_type = MSR_OP_WRITE; + pmi_config->initial_list[0][msr_idx].reg_type = PMU_MSR_DATA; + pmi_config->initial_list[0][msr_idx].value = + ECB_entries_reg_value(pecb, i); + pmi_config->initial_list[0][msr_idx].param = index; + msr_idx++; + BUG_ON(msr_idx >= MAX_MSR_LIST_NUM); + } + END_FOR_EACH_DATA_REG_CPU; + pmi_config->initial_list[0][msr_idx].msr_id = -1; + + FOR_EACH_CCCR_REG_CPU(pecb, i, this_cpu) + { + if (ECB_entries_reg_id(pecb, i) == IA32_PERF_GLOBAL_CTRL) { + pmi_config->start_list[0][0].msr_id = + IA32_PERF_GLOBAL_CTRL; + pmi_config->start_list[0][0].op_type = MSR_OP_WRITE; + pmi_config->start_list[0][0].reg_type = PMU_MSR_CCCR; + pmi_config->start_list[0][0].value = + ECB_entries_reg_value(pecb, i); + pmi_config->start_list[0][0].param = 0x0; + pmi_config->start_list[0][1].msr_id = -1; + break; + } + } + END_FOR_EACH_CCCR_REG_CPU; + + pmi_config->stop_list[0][0].msr_id = IA32_PERF_GLOBAL_CTRL; + pmi_config->stop_list[0][0].op_type = MSR_OP_WRITE; + pmi_config->stop_list[0][0].reg_type = PMU_MSR_CCCR; + pmi_config->stop_list[0][0].value = 0x0; + pmi_config->stop_list[0][0].param = 0x0; + pmi_config->stop_list[0][1].msr_id = -1; + + if (DRV_CONFIG_counting_mode(drv_cfg) == FALSE) { + pmi_config->entry_list[0][0].msr_id = IA32_PERF_GLOBAL_CTRL; + pmi_config->entry_list[0][0].op_type = MSR_OP_WRITE; + pmi_config->entry_list[0][0].reg_type = PMU_MSR_CCCR; + pmi_config->entry_list[0][0].value = 0x0; + pmi_config->entry_list[0][0].param = 0x0; + pmi_config->entry_list[0][1].msr_id = -1; + + msr_idx = 0; + FOR_EACH_CCCR_REG_CPU(pecb, i, this_cpu) + { + if ((ECB_entries_reg_id(pecb, i) == + IA32_PERF_GLOBAL_CTRL) || + (ECB_entries_reg_id(pecb, i) == IA32_PEBS_ENABLE)) { + continue; + } + + pmi_config->exit_list[0][msr_idx].msr_id = + ECB_entries_reg_id(pecb, i); + pmi_config->exit_list[0][msr_idx].op_type = + MSR_OP_WRITE; + pmi_config->exit_list[0][msr_idx].reg_type = + PMU_MSR_CCCR; + pmi_config->exit_list[0][msr_idx].value = + ECB_entries_reg_value(pecb, i); + pmi_config->exit_list[0][msr_idx].param = 0x0; + msr_idx++; + BUG_ON(msr_idx >= MAX_MSR_LIST_NUM); + } + END_FOR_EACH_CCCR_REG_CPU; + + FOR_EACH_ESCR_REG_CPU(pecb, i, this_cpu) + { + pmi_config->exit_list[0][msr_idx].msr_id = + ECB_entries_reg_id(pecb, i); + pmi_config->exit_list[0][msr_idx].op_type = + MSR_OP_WRITE; + pmi_config->exit_list[0][msr_idx].reg_type = + PMU_MSR_ESCR; + pmi_config->exit_list[0][msr_idx].value = + ECB_entries_reg_value(pecb, i); + pmi_config->exit_list[0][msr_idx].param = 0x0; + msr_idx++; + BUG_ON(msr_idx >= MAX_MSR_LIST_NUM); + } + END_FOR_EACH_ESCR_REG_CPU; + + FOR_EACH_DATA_REG_CPU(pecb, i, this_cpu) + { + if (ECB_entries_fixed_reg_get(pecb, i)) { + index = ECB_entries_reg_id(pecb, i) - + IA32_FIXED_CTR0 + 0x20; + } else if (ECB_entries_is_gp_reg_get(pecb, i)) { + index = ECB_entries_reg_id(pecb, i) - IA32_PMC0; + } else { + continue; + } + pmi_config->exit_list[0][msr_idx].msr_id = + ECB_entries_reg_id(pecb, i); + pmi_config->exit_list[0][msr_idx].op_type = + MSR_OP_WRITE; + pmi_config->exit_list[0][msr_idx].reg_type = + PMU_MSR_DATA; + pmi_config->exit_list[0][msr_idx].value = + ECB_entries_reg_value(pecb, i); + pmi_config->exit_list[0][msr_idx].param = index; + msr_idx++; + BUG_ON(msr_idx >= MAX_MSR_LIST_NUM); + } + END_FOR_EACH_DATA_REG_CPU; + + FOR_EACH_CCCR_REG_CPU(pecb, i, this_cpu) + { + if (ECB_entries_reg_id(pecb, i) == + IA32_PERF_GLOBAL_CTRL) { + pmi_config->exit_list[0][msr_idx].msr_id = + IA32_PERF_GLOBAL_CTRL; + pmi_config->exit_list[0][msr_idx].op_type = + MSR_OP_WRITE; + pmi_config->exit_list[0][msr_idx].reg_type = + PMU_MSR_CCCR; + pmi_config->exit_list[0][msr_idx].value = + ECB_entries_reg_value(pecb, i); + pmi_config->exit_list[0][msr_idx].param = 0x0; + msr_idx++; + BUG_ON(msr_idx >= MAX_MSR_LIST_NUM); + break; + } + } + END_FOR_EACH_CCCR_REG_CPU; + pmi_config->exit_list[0][msr_idx].msr_id = -1; + } + + BUG_ON(!virt_addr_valid(pmi_config)); + + acrn_hypercall2(HC_PROFILING_OPS, PROFILING_CONFIG_PMI, + virt_to_phys(pmi_config)); + + pmi_config = CONTROL_Free_Memory(pmi_config); +#endif + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void perfver4_Disable_PMU(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Zero out the global control register. This automatically disables the PMU counters. + * + */ +static VOID perfver4_Disable_PMU(PVOID param) +{ +#if !defined(DRV_SEP_ACRN_ON) + U32 this_cpu; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!pecb) { + // no programming for this device for this group + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + if (GET_DRIVER_STATE() != DRV_STATE_RUNNING) { + SEP_DRV_LOG_TRACE("Driver state = %d.", GET_DRIVER_STATE()); + SYS_Write_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + 0LL); + if (DEV_CONFIG_pebs_mode(pcfg)) { + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + 0LL); + } + } + + SEP_DRV_LOG_TRACE_OUT(""); +#endif +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void perfver4_Enable_PMU(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Set the enable bit for all the Control registers + * + */ +static VOID perfver4_Enable_PMU(PVOID param) +{ +#if !defined(DRV_SEP_ACRN_ON) + /* + * Get the value from the event block + * 0 == location of the global control reg for this block. + * Generalize this location awareness when possible + */ + U32 this_cpu; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; + DEV_CONFIG pcfg; + U64 global_control_val; + U64 pebs_enable_val; + DRV_BOOL multi_pebs_enabled; + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!pecb) { + // no programming for this device for this group + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + if (KVM_guest_mode) { + SYS_Write_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + 0LL); + } + if (GET_DRIVER_STATE() == DRV_STATE_RUNNING) { + APIC_Enable_Pmi(); + + /* Clear outstanding frozen bits */ + SYS_Write_MSR(ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_OVF_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + PERFVER4_FROZEN_BIT_MASK); + + if (CPU_STATE_reset_mask(pcpu)) { + SEP_DRV_LOG_TRACE("Overflow reset mask %llx.", + CPU_STATE_reset_mask(pcpu)); + // Reinitialize the global overflow control register + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + CPU_STATE_reset_mask(pcpu) = 0LL; + } + if (CPU_STATE_group_swap(pcpu)) { + CPU_STATE_group_swap(pcpu) = 0; + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + if (DEV_CONFIG_pebs_mode(pcfg) || + DEV_CONFIG_latency_capture(pcfg)) { + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, + PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, + PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + } + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); +#if defined(MYDEBUG) + { + U64 val; + val = SYS_Read_MSR(ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + SEP_DRV_LOG_TRACE( + "Write reg 0x%x--- read 0x%llx.", + ECB_entries_reg_id(pecb, 0), val); + } +#endif + } + + multi_pebs_enabled = (DEV_CONFIG_pebs_mode(pcfg) && + (DEV_CONFIG_pebs_record_num(pcfg) > 1) && + (DRV_SETUP_INFO_page_table_isolation( + &req_drv_setup_info) == + DRV_SETUP_INFO_PTI_DISABLED)); + + // FIXME: workaround for sampling both pebs event and non-pebs event + // with pebs buffer size > 1 + if (multi_pebs_enabled) { + global_control_val = SYS_Read_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + pebs_enable_val = SYS_Read_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX( + pecb, PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + if (IS_FIXED_CTR_ENABLED(global_control_val) || + !IS_PMC_PEBS_ENABLED_GP(global_control_val, + pebs_enable_val)) { + SEP_DRV_LOG_TRACE( + "Global_control_val = 0x%llx pebs_enable_val = 0x%llx.", + global_control_val, pebs_enable_val); + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, + DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + DISABLE_FRZ_ON_PMI(ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, + DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)))); + } + } + } + SEP_DRV_LOG_TRACE("Reenabled PMU with value 0x%llx.", + ECB_entries_reg_value(pecb, 0)); + + SEP_DRV_LOG_TRACE_OUT(""); +#endif +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn perfver4_Read_PMU_Data(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Read all the data MSR's into a buffer. Called by the interrupt handler. + * + */ +static void perfver4_Read_PMU_Data(PVOID param) +{ + U32 j; + U64 *buffer = read_counter_info; + U32 this_cpu; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; +#if defined(DRV_SEP_ACRN_ON) + S32 start_index, cpu_idx, msr_idx; + struct profiling_msr_ops_list *msr_list; +#endif + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + if (param == NULL) { + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + } else { + this_cpu = *(S32 *)param; + } + +#if !defined(DRV_SEP_ACRN_ON) + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + SEP_DRV_LOG_TRACE("PMU control_data 0x%p, buffer 0x%p.", + LWPMU_DEVICE_PMU_register_data(&devices[dev_idx]), + buffer); + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) + { + j = EMON_BUFFER_CORE_EVENT_OFFSET( + EMON_BUFFER_DRIVER_HELPER_core_index_to_thread_offset_map( + emon_buffer_driver_helper)[this_cpu], + ECB_entries_core_event_id(pecb, i)); + + buffer[j] = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); + SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u, event_id=%u", j, + buffer[j], this_cpu, + ECB_entries_core_event_id(pecb, i)); + } + END_FOR_EACH_REG_CORE_OPERATION; +#else + if (DRV_CONFIG_counting_mode(drv_cfg) == TRUE) { + msr_list = (struct profiling_msr_ops_list *) + CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * + sizeof(struct profiling_msr_ops_list)); + memset(msr_list, 0, + GLOBAL_STATE_num_cpus(driver_state) * + sizeof(struct profiling_msr_ops_list)); + for (cpu_idx = 0; cpu_idx < GLOBAL_STATE_num_cpus(driver_state); + cpu_idx++) { + pcpu = &pcb[cpu_idx]; + dev_idx = core_to_dev_map[cpu_idx]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data( + &devices[dev_idx])[cur_grp]; + + if (!pecb) { + continue; + } + + msr_idx = 0; + FOR_EACH_DATA_REG_CPU(pecb, i, cpu_idx) + { + msr_list[cpu_idx].entries[msr_idx].msr_id = + ECB_entries_reg_id(pecb, i); + msr_list[cpu_idx].entries[msr_idx].op_type = + MSR_OP_READ_CLEAR; + msr_list[cpu_idx].entries[msr_idx].value = 0LL; + msr_idx++; + } + END_FOR_EACH_DATA_REG_CPU; + msr_list[cpu_idx].num_entries = msr_idx; + msr_list[cpu_idx].msr_op_state = MSR_OP_REQUESTED; + } + + BUG_ON(!virt_addr_valid(msr_list)); + + acrn_hypercall2(HC_PROFILING_OPS, PROFILING_MSR_OPS, + virt_to_phys(msr_list)); + + for (cpu_idx = 0; cpu_idx < GLOBAL_STATE_num_cpus(driver_state); + cpu_idx++) { + pcpu = &pcb[cpu_idx]; + dev_idx = core_to_dev_map[cpu_idx]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data( + &devices[dev_idx])[cur_grp]; + + if (!pecb) { + continue; + } + + start_index = ECB_num_events(pecb) * cpu_idx; + msr_idx = 0; + FOR_EACH_DATA_REG_CPU(pecb, i, cpu_idx) + { + j = start_index + + ECB_entries_event_id_index(pecb, i); + buffer[j] = + msr_list[cpu_idx].entries[msr_idx].value; + msr_idx++; + } + END_FOR_EACH_DATA_REG_CPU; + } + + msr_list = CONTROL_Free_Memory(msr_list); + } +#endif + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void perfver4_Check_Overflow(masks) + * + * @param masks the mask structure to populate + * + * @return None No return needed + * + * @brief Called by the data processing method to figure out which registers have overflowed. + * + */ +static void perfver4_Check_Overflow(DRV_MASKS masks) +{ + U32 index; + U64 overflow_status = 0; + U32 this_cpu; + BUFFER_DESC bd; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; + DEV_CONFIG pcfg; + DISPATCH dispatch; + U64 overflow_status_clr = 0; + DRV_EVENT_MASK_NODE event_flag; + + SEP_DRV_LOG_TRACE_IN("Masks: %p.", masks); + + this_cpu = CONTROL_THIS_CPU(); + bd = &cpu_buf[this_cpu]; + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + // initialize masks + DRV_MASKS_masks_num(masks) = 0; + + overflow_status = SYS_Read_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_STATUS_REG_INDEX, + PMU_OPERATION_GLOBAL_STATUS))); + + if (DEV_CONFIG_pebs_mode(pcfg) && + (DEV_CONFIG_pebs_record_num(pcfg) == 1)) { + overflow_status = PEBS_Overflowed(this_cpu, overflow_status, 0); + } + overflow_status_clr = overflow_status; + + if (dispatch->check_overflow_gp_errata) { + overflow_status = dispatch->check_overflow_gp_errata( + pecb, &overflow_status_clr); + } + SEP_DRV_LOG_TRACE("Overflow: cpu: %d, status 0x%llx.", this_cpu, + overflow_status); + index = 0; + BUFFER_DESC_sample_count(bd) = 0; + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) + { + if (ECB_entries_fixed_reg_get(pecb, i)) { + index = i - + ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_FIXED) + + 0x20; + if (dispatch->check_overflow_errata) { + overflow_status = + dispatch->check_overflow_errata( + pecb, i, overflow_status); + } + } else if (ECB_entries_is_gp_reg_get(pecb, i)) { + index = i - ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_GP); + } else { + continue; + } + if (overflow_status & ((U64)1 << index)) { + SEP_DRV_LOG_TRACE("Overflow: cpu: %d, index %d.", + this_cpu, index); + SEP_DRV_LOG_TRACE( + "Register 0x%x --- val 0%llx.", + ECB_entries_reg_id(pecb, i), + SYS_Read_MSR(ECB_entries_reg_id(pecb, i))); + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i)); + + if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { + /* Increment the interrupt count. */ + if (interrupt_counts) { + interrupt_counts + [this_cpu * + DRV_CONFIG_num_events( + drv_cfg) + + ECB_entries_event_id_index( + pecb, i)] += 1; + } + } + + DRV_EVENT_MASK_bitFields1(&event_flag) = (U8)0; + if (ECB_entries_precise_get(pecb, i)) { + DRV_EVENT_MASK_precise(&event_flag) = 1; + } + if (ECB_entries_lbr_value_get(pecb, i)) { + DRV_EVENT_MASK_lbr_capture(&event_flag) = 1; + } + if (ECB_entries_uncore_get(pecb, i)) { + DRV_EVENT_MASK_uncore_capture(&event_flag) = 1; + } + if (ECB_entries_branch_evt_get(pecb, i)) { + DRV_EVENT_MASK_branch(&event_flag) = 1; + } + + if (DRV_MASKS_masks_num(masks) < MAX_OVERFLOW_EVENTS) { + DRV_EVENT_MASK_bitFields1( + DRV_MASKS_eventmasks(masks) + + DRV_MASKS_masks_num(masks)) = + DRV_EVENT_MASK_bitFields1(&event_flag); + DRV_EVENT_MASK_event_idx( + DRV_MASKS_eventmasks(masks) + + DRV_MASKS_masks_num(masks)) = + ECB_entries_event_id_index(pecb, i); + DRV_MASKS_masks_num(masks)++; + } else { + SEP_DRV_LOG_ERROR( + "The array for event masks is full."); + } + + SEP_DRV_LOG_TRACE("Overflow -- 0x%llx, index 0x%llx.", + overflow_status, (U64)1 << index); + SEP_DRV_LOG_TRACE("Slot# %d, reg_id 0x%x, index %d.", i, + ECB_entries_reg_id(pecb, i), index); + if (ECB_entries_event_id_index(pecb, i) == + CPU_STATE_trigger_event_num(pcpu)) { + CPU_STATE_trigger_count(pcpu)--; + } + } + } + END_FOR_EACH_REG_CORE_OPERATION; + + CPU_STATE_reset_mask(pcpu) = overflow_status_clr; + /* Clear outstanding overflow bits */ + SYS_Write_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX( + pecb, GLOBAL_OVF_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + overflow_status_clr & PERFVER4_OVERFLOW_BIT_MASK_HT_ON); + + SEP_DRV_LOG_TRACE("Check overflow completed %d.", this_cpu); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn perfver4_Swap_Group(restart) + * + * @param restart dummy parameter which is not used + * + * @return None No return needed + * + * @brief Perform the mechanics of swapping the event groups for event mux operations + * + * Special Notes + * Swap function for event multiplexing. + * Freeze the counting. + * Swap the groups. + * Enable the counting. + * Reset the event trigger count + * + */ +static VOID perfver4_Swap_Group(DRV_BOOL restart) +{ + U32 index; + U32 next_group; + U32 st_index; + U32 this_cpu; + CPU_STATE pcpu; + U32 dev_idx; + DISPATCH dispatch; + DEV_CONFIG pcfg; + EVENT_CONFIG ec; + U32 counter_index; + + SEP_DRV_LOG_TRACE_IN("Dummy restart: %u.", restart); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + ec = LWPMU_DEVICE_ec(&devices[dev_idx]); + counter_index = 0; + + st_index = + CPU_STATE_current_group(pcpu) * EVENT_CONFIG_max_gp_events(ec); + next_group = (CPU_STATE_current_group(pcpu) + 1); + if (next_group >= EVENT_CONFIG_num_groups(ec)) { + next_group = 0; + } + + SEP_DRV_LOG_TRACE("Current group : 0x%x.", + CPU_STATE_current_group(pcpu)); + SEP_DRV_LOG_TRACE("Next group : 0x%x.", next_group); + + // Save the counters for the current group + if (!DRV_CONFIG_event_based_counts(drv_cfg)) { + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_GP) + { + index = st_index + i - + ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_GP); + CPU_STATE_em_tables(pcpu)[index] = + SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); + SEP_DRV_LOG_TRACE("Saved value for reg 0x%x : 0x%llx.", + ECB_entries_reg_id(pecb, i), + CPU_STATE_em_tables(pcpu)[index]); + } + END_FOR_EACH_REG_CORE_OPERATION; + } + + CPU_STATE_current_group(pcpu) = next_group; + + if (dispatch->hw_errata) { + dispatch->hw_errata(); + } + + // First write the GP control registers (eventsel) + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_CTRL_GP) + { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i)); + } + END_FOR_EACH_REG_CORE_OPERATION; + + if (DRV_CONFIG_event_based_counts(drv_cfg)) { + // In EBC mode, reset the counts for all events except for trigger event + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) + { + if (ECB_entries_event_id_index(pecb, i) != + CPU_STATE_trigger_event_num(pcpu)) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + } + } + END_FOR_EACH_REG_CORE_OPERATION; + } else { + // Then write the gp count registers + st_index = CPU_STATE_current_group(pcpu) * + EVENT_CONFIG_max_gp_events(ec); + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_GP) + { + index = st_index + i - + ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_GP); + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + CPU_STATE_em_tables(pcpu)[index]); + SEP_DRV_LOG_TRACE( + "Restore value for reg 0x%x : 0x%llx.", + ECB_entries_reg_id(pecb, i), + CPU_STATE_em_tables(pcpu)[index]); + } + END_FOR_EACH_REG_CORE_OPERATION; + } + + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_OCR) + { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i)); + } + END_FOR_EACH_REG_CORE_OPERATION; + + if (DEV_CONFIG_pebs_record_num(pcfg)) { + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) + { + if (ECB_entries_precise_get(pecb, i) == 1) { + if (ECB_entries_fixed_reg_get(pecb, i)) { + counter_index = + i - + ECB_operations_register_start( + pecb, + PMU_OPERATION_DATA_FIXED) + + 8; + } else { + counter_index = + i - + ECB_operations_register_start( + pecb, + PMU_OPERATION_DATA_GP); + } + PEBS_Reset_Counter(this_cpu, counter_index, + ECB_entries_reg_value(pecb, + i)); + } + } + END_FOR_EACH_REG_CORE_OPERATION; + } + + /* + * reset the em factor when a group is swapped + */ + CPU_STATE_trigger_count(pcpu) = EVENT_CONFIG_em_factor(ec); + + /* + * The enable routine needs to rewrite the control registers + */ + CPU_STATE_reset_mask(pcpu) = 0LL; + CPU_STATE_group_swap(pcpu) = 1; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn perfver4_Initialize(params) + * + * @param params dummy parameter which is not used + * + * @return None No return needed + * + * @brief Initialize the PMU setting up for collection + * + * Special Notes + * Saves the relevant PMU state (minimal set of MSRs required + * to avoid conflicts with other Linux tools, such as Oprofile). + * This function should be called in parallel across all CPUs + * prior to the start of sampling, before PMU state is changed. + * + */ +static VOID perfver4_Initialize(VOID *param) +{ + U32 this_cpu; + CPU_STATE pcpu; + U32 dev_idx; + DEV_CONFIG pcfg; + U32 cur_grp; + ECB pecb = NULL; + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + if (pcb == NULL) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pcb)."); + return; + } + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + CPU_STATE_pmu_state(pcpu) = pmu_state + (this_cpu * 3); + if (CPU_STATE_pmu_state(pcpu) == NULL) { + SEP_DRV_LOG_WARNING_TRACE_OUT( + "Unable to save PMU state on CPU %d.", this_cpu); + return; + } + + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + restore_reg_addr[0] = ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX(pecb, DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)); + restore_reg_addr[1] = ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)); + restore_reg_addr[2] = ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX(pecb, FIXED_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)); + + // save the original PMU state on this CPU (NOTE: must only be called ONCE per collection) + CPU_STATE_pmu_state(pcpu)[0] = SYS_Read_MSR(restore_reg_addr[0]); + CPU_STATE_pmu_state(pcpu)[1] = SYS_Read_MSR(restore_reg_addr[1]); + CPU_STATE_pmu_state(pcpu)[2] = SYS_Read_MSR(restore_reg_addr[2]); + + if (DRV_CONFIG_ds_area_available(drv_cfg) && + DEV_CONFIG_pebs_mode(pcfg)) { + SYS_Write_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX( + pecb, PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + 0LL); + } + + SEP_DRV_LOG_TRACE("Saving PMU state on CPU %d:", this_cpu); + SEP_DRV_LOG_TRACE(" msr_val(IA32_DEBUG_CTRL)=0x%llx.", + CPU_STATE_pmu_state(pcpu)[0]); + SEP_DRV_LOG_TRACE(" msr_val(IA32_PERF_GLOBAL_CTRL)=0x%llx.", + CPU_STATE_pmu_state(pcpu)[1]); + SEP_DRV_LOG_TRACE(" msr_val(IA32_FIXED_CTRL)=0x%llx.", + CPU_STATE_pmu_state(pcpu)[2]); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn perfver4_Destroy(params) + * + * @param params dummy parameter which is not used + * + * @return None No return needed + * + * @brief Reset the PMU setting up after collection + * + * Special Notes + * Restores the previously saved PMU state done in pmv_v4_Initialize. + * This function should be called in parallel across all CPUs + * after sampling collection ends/terminates. + * + */ +static VOID perfver4_Destroy(VOID *param) +{ + U32 this_cpu; + CPU_STATE pcpu; + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + if (pcb == NULL) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pcb)."); + return; + } + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + pcpu = &pcb[this_cpu]; + + if (CPU_STATE_pmu_state(pcpu) == NULL) { + SEP_DRV_LOG_WARNING_TRACE_OUT( + "Unable to restore PMU state on CPU %d.", this_cpu); + return; + } + + SEP_DRV_LOG_TRACE("Clearing PMU state on CPU %d:", this_cpu); + SEP_DRV_LOG_TRACE(" msr_val(IA32_DEBUG_CTRL)=0x0."); + SEP_DRV_LOG_TRACE(" msr_val(IA32_PERF_GLOBAL_CTRL)=0x0."); + SEP_DRV_LOG_TRACE(" msr_val(IA32_FIXED_CTRL)=0x0."); + + SYS_Write_MSR(restore_reg_addr[0], 0); + SYS_Write_MSR(restore_reg_addr[1], 0); + SYS_Write_MSR(restore_reg_addr[2], 0); + + CPU_STATE_pmu_state(pcpu) = NULL; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* + * @fn perfver4_Read_LBRs(buffer) + * + * @param IN buffer - pointer to the buffer to write the data into + * @return Last branch source IP address + * + * @brief Read all the LBR registers into the buffer provided and return + * + */ +static U64 perfver4_Read_LBRs(VOID *buffer, PVOID data) +{ + U32 i, count = 0; + U64 *lbr_buf = NULL; + U64 value = 0; + U64 tos_ip_addr = 0; + U64 tos_ptr = 0; + SADDR saddr; + U32 pairs = 0; + U32 this_cpu; + U32 dev_idx; + LBR lbr; + DEV_CONFIG pcfg; +#if defined(DRV_SEP_ACRN_ON) + struct lbr_pmu_sample *lbr_data = NULL; +#endif + + SEP_DRV_LOG_TRACE_IN("Buffer: %p.", buffer); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + lbr = LWPMU_DEVICE_lbr(&devices[dev_idx]); + + if (lbr == NULL) { + return 0; + } + +#if defined(DRV_SEP_ACRN_ON) + if (data == NULL) { + return 0; + } + lbr_data = (struct lbr_pmu_sample *)data; +#endif + + if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { + lbr_buf = (U64 *)buffer; + } + + if (LBR_num_entries(lbr) > 0) { + pairs = (LBR_num_entries(lbr) - 1) / 3; + } + for (i = 0; i < LBR_num_entries(lbr); i++) { +#if !defined(DRV_SEP_ACRN_ON) + value = SYS_Read_MSR(LBR_entries_reg_id(lbr, i)); +#else + if (i == 0) { + value = lbr_data->lbr_tos; + } else { + if (LBR_entries_etype(lbr, i) == LBR_ENTRY_FROM_IP) { + value = lbr_data->lbr_from_ip[i - 1]; + } else if (LBR_entries_etype(lbr, i) == + LBR_ENTRY_TO_IP) { + value = lbr_data->lbr_to_ip[i - pairs - 1]; + } else { + value = lbr_data->lbr_info[i - 2 * pairs - 1]; + } + } +#endif + if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { + *lbr_buf = value; + } + if (DEV_CONFIG_collect_callstacks(pcfg)) { + if ((LBR_entries_etype(lbr, i) == LBR_ENTRY_FROM_IP && + i > tos_ptr + 1) || + (LBR_entries_etype(lbr, i) == LBR_ENTRY_TO_IP && + i > tos_ptr + pairs + 1) || + (LBR_entries_etype(lbr, i) == LBR_ENTRY_INFO && + i > tos_ptr + 2 * pairs + 1)) { + if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { + *lbr_buf = 0x0ULL; + lbr_buf++; + } + continue; + } + } + SEP_DRV_LOG_TRACE("LBR %u, 0x%llx.", i, value); + if (i == 0) { + tos_ptr = value; + } else { + if (LBR_entries_etype(lbr, i) == + LBR_ENTRY_FROM_IP) { // LBR from register + if (tos_ptr == count) { + SADDR_addr(saddr) = + value & PERFVER4_LBR_BITMASK; + tos_ip_addr = (U64)SADDR_addr( + saddr); // Add signed extension + SEP_DRV_LOG_TRACE( + "Tos_ip_addr %llu, 0x%llx.", + tos_ptr, value); + } + count++; + } + } + if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { + lbr_buf++; + } + } + + SEP_DRV_LOG_TRACE_OUT("Res: %llu.", tos_ip_addr); + return tos_ip_addr; +} + +/* + * @fn perfver4_Clean_Up(param) + * + * @param IN param - currently not used + * + * @brief Clean up registers in ECB + * + */ +static VOID perfver4_Clean_Up(VOID *param) +{ + U32 this_cpu; + CPU_STATE pcpu; + ECB pecb = NULL; + U32 dev_idx; + U32 cur_grp; + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_ALL_REG) + { + if (ECB_entries_clean_up_get(pecb, i)) { + SEP_DRV_LOG_TRACE("Clean up set --- RegId --- %x.", + ECB_entries_reg_id(pecb, i)); + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + } + } + END_FOR_EACH_REG_CORE_OPERATION; + + /* Clear outstanding frozen bits */ + if (pecb) { + SYS_Write_MSR(ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_OVF_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + PERFVER4_FROZEN_BIT_MASK); + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void perfver4_Check_Overflow_Htoff_Mode(masks) + * + * @param masks the mask structure to populate + * + * @return None No return needed + * + * @brief Called by the data processing method to figure out which registers have overflowed. + * + */ +static void perfver4_Check_Overflow_Htoff_Mode(DRV_MASKS masks) +{ + U32 index; + U64 value = 0; + U64 overflow_status = 0; + U32 this_cpu; + BUFFER_DESC bd; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; + DISPATCH dispatch; + DEV_CONFIG pcfg; + U64 overflow_status_clr = 0; + DRV_EVENT_MASK_NODE event_flag; + + SEP_DRV_LOG_TRACE_IN("Masks: %p.", masks); + + this_cpu = CONTROL_THIS_CPU(); + bd = &cpu_buf[this_cpu]; + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + // initialize masks + DRV_MASKS_masks_num(masks) = 0; + + overflow_status = SYS_Read_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_STATUS_REG_INDEX, + PMU_OPERATION_GLOBAL_STATUS))); + + if (DEV_CONFIG_pebs_mode(pcfg) && + (DEV_CONFIG_pebs_record_num(pcfg) == 1)) { + overflow_status = PEBS_Overflowed(this_cpu, overflow_status, 0); + } + overflow_status_clr = overflow_status; + SEP_DRV_LOG_TRACE("Overflow: cpu: %d, status 0x%llx.", this_cpu, + overflow_status); + index = 0; + BUFFER_DESC_sample_count(bd) = 0; + + if (dispatch->check_overflow_gp_errata) { + overflow_status = dispatch->check_overflow_gp_errata( + pecb, &overflow_status_clr); + } + + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) + { + if (ECB_entries_fixed_reg_get(pecb, i)) { + index = i - + ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_FIXED) + + 0x20; + } else if (ECB_entries_is_gp_reg_get(pecb, i) && + ECB_entries_reg_value(pecb, i) != 0) { + index = i - ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_GP); + if (index >= 4 && index <= 7) { + value = SYS_Read_MSR( + ECB_entries_reg_id(pecb, i)); + if (value > 0 && value <= 0x100000000LL) { + overflow_status |= ((U64)1 << index); + } + } + } else { + continue; + } + if (overflow_status & ((U64)1 << index)) { + SEP_DRV_LOG_TRACE("Overflow: cpu: %d, index %d.", + this_cpu, index); + SEP_DRV_LOG_TRACE( + "Register 0x%x --- val 0%llx.", + ECB_entries_reg_id(pecb, i), + SYS_Read_MSR(ECB_entries_reg_id(pecb, i))); + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i)); + + if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { + /* Increment the interrupt count. */ + if (interrupt_counts) { + interrupt_counts + [this_cpu * + DRV_CONFIG_num_events( + drv_cfg) + + ECB_entries_event_id_index( + pecb, i)] += 1; + } + } + + DRV_EVENT_MASK_bitFields1(&event_flag) = (U8)0; + if (ECB_entries_precise_get(pecb, i)) { + DRV_EVENT_MASK_precise(&event_flag) = 1; + } + if (ECB_entries_lbr_value_get(pecb, i)) { + DRV_EVENT_MASK_lbr_capture(&event_flag) = 1; + } + + if (DRV_MASKS_masks_num(masks) < MAX_OVERFLOW_EVENTS) { + DRV_EVENT_MASK_bitFields1( + DRV_MASKS_eventmasks(masks) + + DRV_MASKS_masks_num(masks)) = + DRV_EVENT_MASK_bitFields1(&event_flag); + DRV_EVENT_MASK_event_idx( + DRV_MASKS_eventmasks(masks) + + DRV_MASKS_masks_num(masks)) = + ECB_entries_event_id_index(pecb, i); + DRV_MASKS_masks_num(masks)++; + } else { + SEP_DRV_LOG_ERROR( + "The array for event masks is full."); + } + + SEP_DRV_LOG_TRACE("Overflow -- 0x%llx, index 0x%llx.", + overflow_status, (U64)1 << index); + SEP_DRV_LOG_TRACE("Slot# %d, reg_id 0x%x, index %d.", i, + ECB_entries_reg_id(pecb, i), index); + if (ECB_entries_event_id_index(pecb, i) == + CPU_STATE_trigger_event_num(pcpu)) { + CPU_STATE_trigger_count(pcpu)--; + } + } + } + END_FOR_EACH_REG_CORE_OPERATION; + + CPU_STATE_reset_mask(pcpu) = overflow_status_clr; + /* Clear outstanding overflow bits */ + SYS_Write_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX( + pecb, GLOBAL_OVF_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + overflow_status_clr & PERFVER4_OVERFLOW_BIT_MASK_HT_OFF); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +#define MAX_COUNTER 0xFFFFFFFFFFFFLLU +#define FIXED_CTR3_BIT_INDEX 35 + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void perfver4_Check_Overflow_Nonht_Mode(masks) + * + * @param masks the mask structure to populate + * + * @return None No return needed + * + * @brief Called by the data processing method to figure out which registers have overflowed. + * + */ +static VOID perfver4_Check_Overflow_Nonht_Mode(DRV_MASKS masks) +{ + U32 index; + U64 overflow_status = 0; + U32 this_cpu = CONTROL_THIS_CPU(); + BUFFER_DESC bd = &cpu_buf[this_cpu]; + CPU_STATE pcpu = &pcb[this_cpu]; + U32 dev_idx = core_to_dev_map[this_cpu]; + U32 cur_grp = CPU_STATE_current_group(pcpu); + DEV_CONFIG pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + ECB pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + U64 overflow_status_clr = 0; + DRV_EVENT_MASK_NODE event_flag; + + SEP_DRV_LOG_TRACE_IN(""); + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + // initialize masks + DRV_MASKS_masks_num(masks) = 0; + + overflow_status = SYS_Read_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_STATUS_REG_INDEX, + PMU_OPERATION_GLOBAL_STATUS))); + + if (DEV_CONFIG_pebs_mode(pcfg) && + (DEV_CONFIG_pebs_record_num(pcfg) == 1)) { + overflow_status = PEBS_Overflowed(this_cpu, overflow_status, 0); + } + overflow_status_clr = overflow_status; + SEP_DRV_LOG_TRACE("Overflow: cpu: %d, status 0x%llx.", this_cpu, + overflow_status); + index = 0; + BUFFER_DESC_sample_count(bd) = 0; + + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) + { + if (ECB_entries_fixed_reg_get(pecb, i)) { + index = i - + ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_FIXED) + + 0x20; + } else if (ECB_entries_is_gp_reg_get(pecb, i) && + ECB_entries_reg_value(pecb, i) != 0) { + index = i - ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_GP); + } else { + continue; + } + if (overflow_status & ((U64)1 << index)) { + SEP_DRV_LOG_TRACE("Overflow: cpu: %d, index %d.", + this_cpu, index); + SEP_DRV_LOG_TRACE( + "register 0x%x --- val 0%llx.", + ECB_entries_reg_id(pecb, i), + SYS_Read_MSR(ECB_entries_reg_id(pecb, i))); + + DRV_EVENT_MASK_bitFields1(&event_flag) = (U8)0; + if (DEV_CONFIG_enable_perf_metrics(pcfg) && + index == FIXED_CTR3_BIT_INDEX) { + perf_metrics_counter_reload_value = + ECB_entries_reg_value( + pecb, i); // saving reload value + // Writing positive SAV into data register before reading metrics + SYS_Write_MSR( + ECB_entries_reg_id(pecb, i), + ((~(ECB_entries_reg_value(pecb, i)) + + 1) & + MAX_COUNTER)); + DRV_EVENT_MASK_perf_metrics_capture( + &event_flag) = 1; + } else { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i)); + } + if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { + /* Increment the interrupt count. */ + if (interrupt_counts) { + interrupt_counts + [this_cpu * + DRV_CONFIG_num_events( + drv_cfg) + + ECB_entries_event_id_index( + pecb, i)] += 1; + } + } + + if (ECB_entries_precise_get(pecb, i)) { + DRV_EVENT_MASK_precise(&event_flag) = 1; + } + if (ECB_entries_lbr_value_get(pecb, i)) { + DRV_EVENT_MASK_lbr_capture(&event_flag) = 1; + } + if (ECB_entries_uncore_get(pecb, i)) { + DRV_EVENT_MASK_uncore_capture(&event_flag) = 1; + } + if (ECB_entries_branch_evt_get(pecb, i)) { + DRV_EVENT_MASK_branch(&event_flag) = 1; + } + + if (DRV_MASKS_masks_num(masks) < MAX_OVERFLOW_EVENTS) { + DRV_EVENT_MASK_bitFields1( + DRV_MASKS_eventmasks(masks) + + DRV_MASKS_masks_num(masks)) = + DRV_EVENT_MASK_bitFields1(&event_flag); + DRV_EVENT_MASK_event_idx( + DRV_MASKS_eventmasks(masks) + + DRV_MASKS_masks_num(masks)) = + ECB_entries_event_id_index(pecb, i); + DRV_MASKS_masks_num(masks)++; + } else { + SEP_DRV_LOG_ERROR( + "The array for event masks is full."); + } + + SEP_DRV_LOG_TRACE("Overflow -- 0x%llx, index 0x%llx.", + overflow_status, (U64)1 << index); + SEP_DRV_LOG_TRACE("Slot# %d, reg_id 0x%x, index %d.", i, + ECB_entries_reg_id(pecb, i), index); + if (ECB_entries_event_id_index(pecb, i) == + CPU_STATE_trigger_event_num(pcpu)) { + CPU_STATE_trigger_count(pcpu)--; + } + } + } + END_FOR_EACH_REG_CORE_OPERATION; + + CPU_STATE_reset_mask(pcpu) = overflow_status_clr; + /* Clear outstanding overflow bits */ + SYS_Write_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX( + pecb, GLOBAL_OVF_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + overflow_status_clr & PERFVER4_OVERFLOW_BIT_MASK_NON_HT); + + SEP_DRV_LOG_TRACE("Check Overflow completed %d.", this_cpu); +} +/* ------------------------------------------------------------------------- */ +/*! + * @fn void perfver4_Read_Power(buffer) + * + * @param buffer - pointer to the buffer to write the data into + * + * @return None No return needed + * + * @brief Read all the power MSRs into the buffer provided and return. + * + */ +static VOID perfver4_Read_Power(VOID *buffer) +{ + U32 i; + U64 *pwr_buf = (U64 *)buffer; + U32 this_cpu; + U32 dev_idx; + PWR pwr; + + SEP_DRV_LOG_TRACE_IN("Buffer: %p.", buffer); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pwr = LWPMU_DEVICE_pwr(&devices[dev_idx]); + + for (i = 0; i < PWR_num_entries(pwr); i++) { + *pwr_buf = SYS_Read_MSR(PWR_entries_reg_id(pwr, i)); + pwr_buf++; + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn perfver4_Read_Counts(param, id) + * + * @param param The read thread node to process + * @param id The event id for the which the sample is generated + * + * @return None No return needed + * + * @brief Read CPU event based counts data and store into the buffer param; + * For the case of the trigger event, store the SAV value. + */ +static VOID perfver4_Read_Counts(PVOID param, U32 id) +{ + U64 *data; + U32 this_cpu; + CPU_STATE pcpu; + U32 dev_idx; + DEV_CONFIG pcfg; + U32 event_id = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p, id: %u.", param, id); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (DEV_CONFIG_ebc_group_id_offset(pcfg)) { + // Write GroupID + data = (U64 *)((S8 *)param + + DEV_CONFIG_ebc_group_id_offset(pcfg)); + *data = CPU_STATE_current_group(pcpu) + 1; + } + + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) + { + if (ECB_entries_counter_event_offset(pecb, i) == 0) { + continue; + } + data = (U64 *)((S8 *)param + + ECB_entries_counter_event_offset(pecb, i)); + event_id = ECB_entries_event_id_index(pecb, i); + if (event_id == id) { + *data = ~(ECB_entries_reg_value(pecb, i) - 1) & + ECB_entries_max_bits(pecb, i); + ; + } else { + *data = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + } + } + END_FOR_EACH_REG_CORE_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn perfver4_Read_Metrics(buffer, id) + * + * @param param buffer to write metrics into + * + * @return None No return needed + * + * @brief Read hardware metrics from IA32_PERF_METRICS MSR + */ +static VOID perfver4_Read_Metrics(PVOID buffer) +{ + U64 *data, metrics = 0; + U32 j; + U32 this_cpu = CONTROL_THIS_CPU(); + U32 dev_idx = core_to_dev_map[this_cpu]; + DEV_CONFIG pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + data = (U64 *)buffer; + FOR_EACH_NONEVENT_REG(pecb, i) + { + metrics = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); + for (j = 0; j < DEV_CONFIG_num_perf_metrics(pcfg); j++) { + *data = (metrics & (0xFFULL << 8 * j)) >> 8 * j; + data++; + } + } + END_FOR_EACH_NONEVENT_REG; + + if (DRV_CONFIG_emon_mode(drv_cfg)) { + SYS_Write_MSR(IA32_FIXED_CTR3, 0LL); + } else { + SYS_Write_MSR(IA32_FIXED_CTR3, + perf_metrics_counter_reload_value); + perf_metrics_counter_reload_value = 0; + } + + SYS_Write_MSR(IA32_PERF_METRICS, 0LL); +} +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 perfver4_Platform_Info + * + * @brief Reads the MSR_PLATFORM_INFO register if present + * + * @param void + * + * @return value read from the register + * + * Special Notes: + * + */ +static VOID perfver4_Platform_Info(PVOID data) +{ + DRV_PLATFORM_INFO platform_data = (DRV_PLATFORM_INFO)data; + U64 value = 0; + U64 energy_multiplier; + + SEP_DRV_LOG_TRACE_IN("Data: %p.", data); + + if (!platform_data) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!platform_data)."); + return; + } + +#define IA32_MSR_PLATFORM_INFO 0xCE + value = SYS_Read_MSR(IA32_MSR_PLATFORM_INFO); + + DRV_PLATFORM_INFO_info(platform_data) = value; + DRV_PLATFORM_INFO_ddr_freq_index(platform_data) = 0; + +#define IA32_MSR_MISC_ENABLE 0x1A4 + DRV_PLATFORM_INFO_misc_valid(platform_data) = 1; + value = SYS_Read_MSR(IA32_MSR_MISC_ENABLE); + DRV_PLATFORM_INFO_misc_info(platform_data) = value; +#undef IA32_MSR_MISC_ENABLE + + energy_multiplier = SYS_Read_MSR(MSR_ENERGY_MULTIPLIER); + SEP_DRV_LOG_TRACE("MSR_ENERGY_MULTIPLIER: %llx.", energy_multiplier); + DRV_PLATFORM_INFO_energy_multiplier(platform_data) = + (U32)(energy_multiplier & 0x00001F00) >> 8; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* + * Initialize the dispatch table + */ +DISPATCH_NODE perfver4_dispatch = { .init = perfver4_Initialize, + .fini = perfver4_Destroy, + .write = perfver4_Write_PMU, + .freeze = perfver4_Disable_PMU, + .restart = perfver4_Enable_PMU, + .read_data = perfver4_Read_PMU_Data, + .check_overflow = perfver4_Check_Overflow, + .swap_group = perfver4_Swap_Group, + .read_lbrs = perfver4_Read_LBRs, + .cleanup = perfver4_Clean_Up, + .hw_errata = NULL, + .read_power = perfver4_Read_Power, + .check_overflow_errata = NULL, + .read_counts = perfver4_Read_Counts, + .check_overflow_gp_errata = NULL, + .read_ro = NULL, + .platform_info = perfver4_Platform_Info, + .trigger_read = NULL, + .scan_for_uncore = NULL, + .read_metrics = NULL }; + +DISPATCH_NODE perfver4_dispatch_htoff_mode = { + .init = perfver4_Initialize, + .fini = perfver4_Destroy, + .write = perfver4_Write_PMU, + .freeze = perfver4_Disable_PMU, + .restart = perfver4_Enable_PMU, + .read_data = perfver4_Read_PMU_Data, + .check_overflow = perfver4_Check_Overflow_Htoff_Mode, + .swap_group = perfver4_Swap_Group, + .read_lbrs = perfver4_Read_LBRs, + .cleanup = perfver4_Clean_Up, + .hw_errata = NULL, + .read_power = perfver4_Read_Power, + .check_overflow_errata = NULL, + .read_counts = perfver4_Read_Counts, + .check_overflow_gp_errata = NULL, + .read_ro = NULL, + .platform_info = perfver4_Platform_Info, + .trigger_read = NULL, + .scan_for_uncore = NULL, + .read_metrics = NULL +}; + +DISPATCH_NODE perfver4_dispatch_nonht_mode = { + .init = perfver4_Initialize, + .fini = perfver4_Destroy, + .write = perfver4_Write_PMU, + .freeze = perfver4_Disable_PMU, + .restart = perfver4_Enable_PMU, + .read_data = perfver4_Read_PMU_Data, + .check_overflow = perfver4_Check_Overflow_Nonht_Mode, + .swap_group = perfver4_Swap_Group, + .read_lbrs = perfver4_Read_LBRs, + .cleanup = perfver4_Clean_Up, + .hw_errata = NULL, + .read_power = perfver4_Read_Power, + .check_overflow_errata = NULL, + .read_counts = perfver4_Read_Counts, + .check_overflow_gp_errata = NULL, + .read_ro = NULL, + .platform_info = perfver4_Platform_Info, + .trigger_read = NULL, + .scan_for_uncore = NULL, + .read_metrics = perfver4_Read_Metrics +}; diff --git a/drivers/platform/x86/sepdk/sep/pmi.c b/drivers/platform/x86/sepdk/sep/pmi.c new file mode 100755 index 0000000000000..516a7f0027646 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/pmi.c @@ -0,0 +1,637 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#include "lwpmudrv_defines.h" +#include +#include +#include +#if defined(DRV_EM64T) +#include +#endif +#include +#include + +#include "lwpmudrv_types.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" +#include "apic.h" +#include "lwpmudrv.h" +#include "output.h" +#include "control.h" +#include "pmi.h" +#include "utility.h" +#include "pebs.h" +#include "ecb_iterators.h" +#include "msrdefs.h" + +#if defined(BUILD_CHIPSET) +#include "lwpmudrv_chipset.h" +#endif +#include "sepdrv_p_state.h" + +// Desc id #0 is used for module records +#define COMPUTE_DESC_ID(index) ((index)) + +extern DRV_CONFIG drv_cfg; +extern uid_t uid; +extern DRV_SETUP_INFO_NODE req_drv_setup_info; +#define EFLAGS_V86_MASK 0x00020000L + +/********************************************************************* + * Global Variables / State + *********************************************************************/ + +/********************************************************************* + * Interrupt Handler + *********************************************************************/ + +/* + * PMI_Interrupt_Handler + * Arguments + * IntFrame - Pointer to the Interrupt Frame + * + * Returns + * None + * + * Description + * Grab the data that is needed to populate the sample records + */ +#if defined(DRV_EM64T) +#define IS_LDT_BIT 0x4 +#define SEGMENT_SHIFT 3 +IDTGDT_DESC gdt_desc; + +U32 pmi_Get_CSD(U32 seg, U32 *low, U32 *high) +{ + PVOID gdt_max_addr; + struct desc_struct *gdt; + CodeDescriptor *csd; + + SEP_DRV_LOG_TRACE_IN("Seg: %u, low: %p, high: %p.", seg, low, high); + + gdt_max_addr = + (PVOID)(((U64)gdt_desc.idtgdt_base) + gdt_desc.idtgdt_limit); + gdt = gdt_desc.idtgdt_base; + + if (seg & IS_LDT_BIT) { + *low = 0; + *high = 0; + SEP_DRV_LOG_TRACE_OUT("FALSE [%u, %u] (IS_LDT_BIT).", *low, + *high); + return FALSE; + } + + // segment offset is based on dropping the bottom 3 bits... + csd = (CodeDescriptor *)&(gdt[seg >> SEGMENT_SHIFT]); + + if (((PVOID)csd) >= gdt_max_addr) { + SEP_DRV_LOG_WARNING_TRACE_OUT( + "FALSE (segment too big in get_CSD(0x%x)!).", seg); + return FALSE; + } + + *low = csd->u1.lowWord; + *high = csd->u2.highWord; + + SEP_DRV_LOG_TRACE("Seg 0x%x, low %08x, high %08x, reserved_0: %d.", seg, + *low, *high, csd->u2.s2.reserved_0); + SEP_DRV_LOG_TRACE_OUT("TRUE [%u, %u].", *low, *high); + + return TRUE; +} +#endif + +asmlinkage VOID PMI_Interrupt_Handler(struct pt_regs *regs) +{ + SampleRecordPC *psamp; + CPU_STATE pcpu; + BUFFER_DESC bd; +#if defined(DRV_IA32) + U32 csdlo; // low half code seg descriptor + U32 csdhi; // high half code seg descriptor + U32 seg_cs; // code seg selector +#endif + DRV_MASKS_NODE event_mask; + U32 this_cpu; + U32 dev_idx; + DISPATCH dispatch; + DEV_CONFIG pcfg; + U32 i; + U32 is_64bit_addr = FALSE; + U32 pid; + U32 tid; + U64 tsc; + U32 desc_id; + EVENT_DESC evt_desc; + U32 accept_interrupt = 1; +#if defined(SECURE_SEP) + uid_t l_uid; +#endif + U64 lbr_tos_from_ip = 0; + DRV_BOOL multi_pebs_enabled; + + SEP_DRV_LOG_INTERRUPT_IN( + "PID: %d, TID: %d.", current->pid, + GET_CURRENT_TGID()); // needs to be before function calls for the tracing to make sense + // may later want to separate the INTERRUPT_IN from the PID/TID logging + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + bd = &cpu_buf[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + multi_pebs_enabled = + (DEV_CONFIG_pebs_mode(pcfg) && + (DEV_CONFIG_pebs_record_num(pcfg) > 1) && + (DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info) == + DRV_SETUP_INFO_PTI_DISABLED)); + SYS_Locked_Inc(&CPU_STATE_in_interrupt( + pcpu)); // needs to be before dispatch->freeze to ensure printk is never called from an interrupt + + // Disable the counter control + dispatch->freeze(NULL); + + CPU_STATE_nmi_handled(&pcb[this_cpu])++; + +#if defined(SECURE_SEP) + l_uid = DRV_GET_UID(current); + accept_interrupt = (l_uid == uid); +#endif + dispatch->check_overflow(&event_mask); + if (GET_DRIVER_STATE() != DRV_STATE_RUNNING || + CPU_STATE_accept_interrupt(&pcb[this_cpu]) != 1) { + goto pmi_cleanup; + } + + pid = GET_CURRENT_TGID(); + tid = current->pid; + + if (DRV_CONFIG_target_pid(drv_cfg) > 0 && + pid != DRV_CONFIG_target_pid(drv_cfg)) { + accept_interrupt = 0; + } + + if (accept_interrupt == 0) { + goto pmi_cleanup; + } + UTILITY_Read_TSC(&tsc); + if (multi_pebs_enabled && PEBS_Get_Num_Records_Filled() > 0) { + PEBS_Flush_Buffer(NULL); + } + + SEP_DRV_LOG_TRACE("Nb overflowed events: %d.", event_mask.masks_num); + for (i = 0; i < event_mask.masks_num; i++) { + if (multi_pebs_enabled && + (DRV_EVENT_MASK_precise(&event_mask.eventmasks[i]))) { + continue; + } + if (DRV_CONFIG_event_based_counts(drv_cfg) == 0) { + desc_id = COMPUTE_DESC_ID(DRV_EVENT_MASK_event_idx( + &event_mask.eventmasks[i])); + } else { + desc_id = CPU_STATE_current_group(pcpu); + } + evt_desc = desc_data[desc_id]; + psamp = (SampleRecordPC *)OUTPUT_Reserve_Buffer_Space( + bd, EVENT_DESC_sample_size(evt_desc), + (NMI_mode) ? TRUE : FALSE, !SEP_IN_NOTIFICATION, + (S32)this_cpu); + + if (!psamp) { + continue; + } + lbr_tos_from_ip = 0; + CPU_STATE_num_samples(pcpu) += 1; + SAMPLE_RECORD_descriptor_id(psamp) = desc_id; + SAMPLE_RECORD_tsc(psamp) = tsc; + SAMPLE_RECORD_pid_rec_index_raw(psamp) = 1; + SAMPLE_RECORD_pid_rec_index(psamp) = pid; + SAMPLE_RECORD_tid(psamp) = tid; + SAMPLE_RECORD_cpu_num(psamp) = (U16)this_cpu; +#if defined(DRV_IA32) + SAMPLE_RECORD_eip(psamp) = REGS_eip(regs); + SAMPLE_RECORD_eflags(psamp) = REGS_eflags(regs); + SAMPLE_RECORD_cs(psamp) = (U16)REGS_xcs(regs); + + if (SAMPLE_RECORD_eflags(psamp) & EFLAGS_V86_MASK) { + csdlo = 0; + csdhi = 0; + } else { + seg_cs = SAMPLE_RECORD_cs(psamp); + SYS_Get_CSD(seg_cs, &csdlo, &csdhi); + } + SAMPLE_RECORD_csd(psamp).u1.lowWord = csdlo; + SAMPLE_RECORD_csd(psamp).u2.highWord = csdhi; +#elif defined(DRV_EM64T) + SAMPLE_RECORD_cs(psamp) = (U16)REGS_cs(regs); + + pmi_Get_CSD(SAMPLE_RECORD_cs(psamp), + &SAMPLE_RECORD_csd(psamp).u1.lowWord, + &SAMPLE_RECORD_csd(psamp).u2.highWord); +#endif + SEP_DRV_LOG_TRACE("SAMPLE_RECORD_pid_rec_index(psamp) %x.", + SAMPLE_RECORD_pid_rec_index(psamp)); + SEP_DRV_LOG_TRACE("SAMPLE_RECORD_tid(psamp) %x.", + SAMPLE_RECORD_tid(psamp)); +#if defined(DRV_IA32) + SEP_DRV_LOG_TRACE("SAMPLE_RECORD_eip(psamp) %x.", + SAMPLE_RECORD_eip(psamp)); + SEP_DRV_LOG_TRACE("SAMPLE_RECORD_eflags(psamp) %x.", + SAMPLE_RECORD_eflags(psamp)); +#endif + SEP_DRV_LOG_TRACE("SAMPLE_RECORD_cpu_num(psamp) %x.", + SAMPLE_RECORD_cpu_num(psamp)); + SEP_DRV_LOG_TRACE("SAMPLE_RECORD_cs(psamp) %x.", + SAMPLE_RECORD_cs(psamp)); + SEP_DRV_LOG_TRACE("SAMPLE_RECORD_csd(psamp).lowWord %x.", + SAMPLE_RECORD_csd(psamp).u1.lowWord); + SEP_DRV_LOG_TRACE("SAMPLE_RECORD_csd(psamp).highWord %x.", + SAMPLE_RECORD_csd(psamp).u2.highWord); + +#if defined(DRV_EM64T) + is_64bit_addr = + (SAMPLE_RECORD_csd(psamp).u2.s2.reserved_0 == 1); + if (is_64bit_addr) { + SAMPLE_RECORD_iip(psamp) = REGS_rip(regs); + SAMPLE_RECORD_ipsr(psamp) = + (REGS_eflags(regs) & 0xffffffff) | + (((U64)SAMPLE_RECORD_csd(psamp).u2.s2.dpl) + << 32); + SAMPLE_RECORD_ia64_pc(psamp) = TRUE; + } else { + SAMPLE_RECORD_eip(psamp) = REGS_rip(regs); + SAMPLE_RECORD_eflags(psamp) = REGS_eflags(regs); + SAMPLE_RECORD_ia64_pc(psamp) = FALSE; + + SEP_DRV_LOG_TRACE("SAMPLE_RECORD_eip(psamp) 0x%x.", + SAMPLE_RECORD_eip(psamp)); + SEP_DRV_LOG_TRACE("SAMPLE_RECORD_eflags(psamp) %x.", + SAMPLE_RECORD_eflags(psamp)); + } +#endif + + SAMPLE_RECORD_event_index(psamp) = + DRV_EVENT_MASK_event_idx(&event_mask.eventmasks[i]); + if (DEV_CONFIG_pebs_mode(pcfg) && + DRV_EVENT_MASK_precise(&event_mask.eventmasks[i])) { + if (EVENT_DESC_pebs_offset(evt_desc) || + EVENT_DESC_latency_offset_in_sample(evt_desc)) { + lbr_tos_from_ip = PEBS_Fill_Buffer((S8 *)psamp, + evt_desc, 0); + } + PEBS_Modify_IP((S8 *)psamp, is_64bit_addr, 0); + PEBS_Modify_TSC((S8 *)psamp, 0); + } + if (DEV_CONFIG_collect_lbrs(pcfg) && + DRV_EVENT_MASK_lbr_capture(&event_mask.eventmasks[i]) && + !DEV_CONFIG_apebs_collect_lbrs(pcfg)) { + lbr_tos_from_ip = dispatch->read_lbrs( + !DEV_CONFIG_store_lbrs(pcfg) ? + NULL : + ((S8 *)(psamp) + + EVENT_DESC_lbr_offset(evt_desc)), + NULL); + } + if (DRV_EVENT_MASK_branch(&event_mask.eventmasks[i]) && + DEV_CONFIG_precise_ip_lbrs(pcfg) && lbr_tos_from_ip) { + if (is_64bit_addr) { + SAMPLE_RECORD_iip(psamp) = lbr_tos_from_ip; + SEP_DRV_LOG_TRACE( + "UPDATED SAMPLE_RECORD_iip(psamp) 0x%llx.", + SAMPLE_RECORD_iip(psamp)); + } else { + SAMPLE_RECORD_eip(psamp) = (U32)lbr_tos_from_ip; + SEP_DRV_LOG_TRACE( + "UPDATED SAMPLE_RECORD_eip(psamp) 0x%x.", + SAMPLE_RECORD_eip(psamp)); + } + } + if (DEV_CONFIG_power_capture(pcfg)) { + dispatch->read_power( + ((S8 *)(psamp) + + EVENT_DESC_power_offset_in_sample(evt_desc))); + } + +#if defined(BUILD_CHIPSET) + if (DRV_CONFIG_enable_chipset(drv_cfg)) { + cs_dispatch->read_counters( + ((S8 *)(psamp) + + DRV_CONFIG_chipset_offset(drv_cfg))); + } +#endif + if (DRV_CONFIG_event_based_counts(drv_cfg)) { + dispatch->read_counts( + (S8 *)psamp, + DRV_EVENT_MASK_event_idx( + &event_mask.eventmasks[i])); + } + if (DEV_CONFIG_enable_perf_metrics(pcfg) && + DRV_EVENT_MASK_perf_metrics_capture( + &event_mask.eventmasks[i])) { + dispatch->read_metrics( + (S8 *)(psamp) + + EVENT_DESC_perfmetrics_offset(evt_desc)); + } + if (DRV_CONFIG_enable_p_state(drv_cfg)) { + if (DRV_CONFIG_read_pstate_msrs(drv_cfg) && + (DRV_CONFIG_p_state_trigger_index(drv_cfg) == -1 || + SAMPLE_RECORD_event_index(psamp) == + DRV_CONFIG_p_state_trigger_index( + drv_cfg))) { + SEPDRV_P_STATE_Read( + (S8 *)(psamp) + + EVENT_DESC_p_state_offset( + evt_desc), + pcpu); + } + if (!DRV_CONFIG_event_based_counts(drv_cfg) && + CPU_STATE_p_state_counting(pcpu)) { + dispatch->read_counts( + (S8 *)psamp, + DRV_EVENT_MASK_event_idx( + &event_mask.eventmasks[i])); + } + } + } + +pmi_cleanup: + if (DEV_CONFIG_pebs_mode(pcfg)) { + if (!multi_pebs_enabled) { + PEBS_Reset_Index(this_cpu); + } else { + if (cpu_sideband_buf) { + OUTPUT outbuf = &BUFFER_DESC_outbuf( + &cpu_sideband_buf[this_cpu]); + if (OUTPUT_signal_full(outbuf) && + !OUTPUT_tasklet_queued(outbuf)) { + SEP_DRV_LOG_TRACE( + "Interrupt-driven sideband buffer flush tasklet scheduling."); + OUTPUT_tasklet_queued(outbuf) = TRUE; + tasklet_schedule(&CPU_STATE_nmi_tasklet( + &pcb[this_cpu])); + } + } + } + } + + // Reset the data counters + if (CPU_STATE_trigger_count(&pcb[this_cpu]) == 0) { + dispatch->swap_group(FALSE); + } + // Re-enable the counter control + dispatch->restart(NULL); + SYS_Locked_Dec(&CPU_STATE_in_interrupt( + &pcb[this_cpu])); // do not use SEP_DRV_LOG_X (where X != INTERRUPT) below this + + SEP_DRV_LOG_INTERRUPT_OUT(""); +} + +#if defined(DRV_SEP_ACRN_ON) +/* ------------------------------------------------------------------------- */ +/*! + * @fn S32 PMI_Buffer_Handler(PVOID data) + * + * @param data - Pointer to data + * + * @return S32 + * + * @brief Handle the PMI sample data in buffer + * + * Special Notes + */ +VOID PMI_Buffer_Handler(PVOID data) +{ + SampleRecordPC *psamp; + CPU_STATE pcpu; + BUFFER_DESC bd; + S32 cpu_id, j; + U32 desc_id; + EVENT_DESC evt_desc; + U64 lbr_tos_from_ip = 0; + ECB pecb; + U32 dev_idx; + DISPATCH dispatch; + DEV_CONFIG pcfg; + + struct data_header header; + struct pmu_sample psample; + S32 data_size, payload_size, expected_payload_size, index; + U64 overflow_status = 0; + + if (!pcb || !cpu_buf || !devices) { + SEP_DRV_LOG_ERROR( + "Invalid data pointers in PMI_Buffer_Handler!\n"); + return; + } + + if (data) { + cpu_id = *(S32 *)data; + if (cpu_id >= GLOBAL_STATE_num_cpus(driver_state)) { + SEP_DRV_LOG_ERROR( + "Invalid cpu_id: %d\n", cpu_id); + return; + } + } else { + cpu_id = (S32)CONTROL_THIS_CPU(); + } + pcpu = &pcb[cpu_id]; + bd = &cpu_buf[cpu_id]; + dev_idx = core_to_dev_map[cpu_id]; + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + pecb = LWPMU_DEVICE_PMU_register_data( + &devices[dev_idx])[CPU_STATE_current_group(pcpu)]; + + while (1) { + if ((GLOBAL_STATE_current_phase(driver_state) == + DRV_STATE_TERMINATING) || + (GLOBAL_STATE_current_phase(driver_state) == + DRV_STATE_STOPPED)) { + goto handler_cleanup; + } + + data_size = + sbuf_get(samp_buf_per_cpu[cpu_id], (uint8_t *)&header); + if (data_size <= 0) { + goto handler_cleanup; + } + payload_size = 0; + if ((header.data_type == (1 << CORE_PMU_SAMPLING)) || + (header.data_type == (1 << LBR_PMU_SAMPLING))) { + if (header.data_type == (1 << CORE_PMU_SAMPLING)) { + expected_payload_size = CORE_PMU_SAMPLE_SIZE; + } else if (header.data_type == + (1 << LBR_PMU_SAMPLING)) { + expected_payload_size = CORE_PMU_SAMPLE_SIZE + + LBR_PMU_SAMPLE_SIZE; + } else { + expected_payload_size = 0; + } + for (j = 0; j < (expected_payload_size - 1) / + TRACE_ELEMENT_SIZE + 1; j++) { + data_size = sbuf_get( + samp_buf_per_cpu[cpu_id], + (uint8_t *)&psample + + j * TRACE_ELEMENT_SIZE); + if (data_size <= 0) { + break; + } + payload_size += data_size; + } + SEP_DRV_LOG_TRACE("payload_size = %x\n", payload_size); + if (header.payload_size > payload_size) { + // Mismatch in payload size in header info + SEP_DRV_LOG_ERROR( + "Mismatch in data size: header=%llu, payload_size=%d\n", + header.payload_size, payload_size); + goto handler_cleanup; + } + if (header.cpu_id != cpu_id) { + // Mismatch in cpu index in header info + SEP_DRV_LOG_ERROR( + "Mismatch in cpu idx: header=%u, buffer=%d\n", + header.cpu_id, cpu_id); + goto handler_cleanup; + } + + // Now, handle the sample data in buffer + overflow_status = psample.csample.overflow_status; + SEP_DRV_LOG_TRACE("overflow_status cpu%d, value=0x%llx\n", + cpu_id, overflow_status); + + FOR_EACH_DATA_REG_CPU(pecb, i, cpu_id) + { + if (ECB_entries_is_gp_reg_get(pecb, i)) { + index = ECB_entries_reg_id(pecb, i) - + IA32_PMC0; + } else if (ECB_entries_fixed_reg_get(pecb, i)) { + index = ECB_entries_reg_id(pecb, i) - + IA32_FIXED_CTR0 + 0x20; + } else { + continue; + } + + if (overflow_status & ((U64)1 << index)) { + desc_id = COMPUTE_DESC_ID( + ECB_entries_event_id_index(pecb, + i)); + evt_desc = desc_data[desc_id]; + SEP_DRV_LOG_TRACE( + "In Interrupt handler: event_id_index=%u, desc_id=%u\n", + ECB_entries_event_id_index(pecb, + i), + desc_id); + + psamp = (SampleRecordPC *) + OUTPUT_Reserve_Buffer_Space( + bd, + EVENT_DESC_sample_size( + evt_desc), + TRUE, + !SEP_IN_NOTIFICATION, + cpu_id); + if (!psamp) { + SEP_DRV_LOG_TRACE( + "In Interrupt handler: psamp is NULL. No output buffer allocated\n"); + continue; + } + + CPU_STATE_num_samples(pcpu) += 1; + SAMPLE_RECORD_descriptor_id(psamp) = + desc_id; + SAMPLE_RECORD_event_index(psamp) = + ECB_entries_event_id_index(pecb, + i); + SAMPLE_RECORD_osid(psamp) = + psample.csample.os_id; + SAMPLE_RECORD_tsc(psamp) = header.tsc; + SAMPLE_RECORD_pid_rec_index_raw(psamp) = + 1; + SAMPLE_RECORD_pid_rec_index(psamp) = 0; + SAMPLE_RECORD_pid_rec_index(psamp) = 0; + SAMPLE_RECORD_tid(psamp) = 0; + SAMPLE_RECORD_cpu_num(psamp) = + (U16)header.cpu_id; + SAMPLE_RECORD_cs(psamp) = + (U16)psample.csample.cs; + + SAMPLE_RECORD_iip(psamp) = + psample.csample.rip; + SAMPLE_RECORD_ipsr(psamp) = + (psample.csample.rflags & + 0xffffffff) | + (((U64)SAMPLE_RECORD_csd(psamp) + .u2.s2.dpl) + << 32); + SAMPLE_RECORD_ia64_pc(psamp) = TRUE; + + if (DEV_CONFIG_collect_lbrs(pcfg) && + + !DEV_CONFIG_apebs_collect_lbrs( + pcfg) && + header.data_type == + (1 << LBR_PMU_SAMPLING)) { + lbr_tos_from_ip = dispatch->read_lbrs( + !DEV_CONFIG_store_lbrs( + pcfg) ? + NULL : + ((S8 *)(psamp) + + EVENT_DESC_lbr_offset( + evt_desc)), + &psample.lsample); + } + + SEP_DRV_LOG_TRACE( + "SAMPLE_RECORD_cpu_num(psamp) %x\n", + SAMPLE_RECORD_cpu_num(psamp)); + SEP_DRV_LOG_TRACE( + "SAMPLE_RECORD_iip(psamp) %x\n", + SAMPLE_RECORD_iip(psamp)); + SEP_DRV_LOG_TRACE( + "SAMPLE_RECORD_cs(psamp) %x\n", + SAMPLE_RECORD_cs(psamp)); + SEP_DRV_LOG_TRACE( + "SAMPLE_RECORD_csd(psamp).lowWord %x\n", + SAMPLE_RECORD_csd(psamp) + .u1.lowWord); + SEP_DRV_LOG_TRACE( + "SAMPLE_RECORD_csd(psamp).highWord %x\n", + SAMPLE_RECORD_csd(psamp) + .u2.highWord); + } + } + END_FOR_EACH_DATA_REG_CPU; + } else if (header.data_type == (1 << VM_SWITCH_TRACING)) { + SEP_DRV_LOG_TRACE("Ignoring VM switch trace data\n"); + } else { + SEP_DRV_LOG_TRACE("Unknown data_type %x\n", header.data_type); + } + } + +handler_cleanup: + return; +} +#endif diff --git a/drivers/platform/x86/sepdk/sep/sepdrv_p_state.c b/drivers/platform/x86/sepdk/sep/sepdrv_p_state.c new file mode 100755 index 0000000000000..e91b9be4d5824 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/sepdrv_p_state.c @@ -0,0 +1,88 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" +#include "inc/control.h" +#include "inc/utility.h" +#include "inc/sepdrv_p_state.h" + +/*! + * @fn OS_STATUS SEPDRV_P_STATE_Read + * + * @brief Reads the APERF and MPERF counters into the buffer provided for the purpose + * + * @param buffer - buffer to read the counts into + * + * @param pcpu - pcpu struct that contains the previous APERF/MPERF values + * + * @return OS_SUCCESS if read succeeded, otherwise error + * + * @note + */ +OS_STATUS SEPDRV_P_STATE_Read(S8 *buffer, CPU_STATE pcpu) +{ + U64 *samp = (U64 *)buffer; + U64 new_APERF = 0; + U64 new_MPERF = 0; + + SEP_DRV_LOG_TRACE_IN("Buffer: %p, pcpu: %p.", buffer, pcpu); + + if ((samp == NULL) || (pcpu == NULL)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("OS_INVALID (!samp || !pcpu)."); + return OS_INVALID; + } + + new_APERF = SYS_Read_MSR(DRV_APERF_MSR); + new_MPERF = SYS_Read_MSR(DRV_MPERF_MSR); + + if (CPU_STATE_last_p_state_valid(pcpu)) { + // there is a previous APERF/MPERF value + if ((CPU_STATE_last_aperf(pcpu)) > new_APERF) { + // a wrap-around has occurred. + samp[1] = CPU_STATE_last_aperf(pcpu) - new_APERF; + } else { + samp[1] = new_APERF - CPU_STATE_last_aperf(pcpu); + } + + if ((CPU_STATE_last_mperf(pcpu)) > new_MPERF) { + // a wrap-around has occurred. + samp[0] = CPU_STATE_last_mperf(pcpu) - new_MPERF; + } else { + samp[0] = new_MPERF - CPU_STATE_last_mperf(pcpu); + } + } else { + // there is no previous valid APERF/MPERF values, thus no delta calculations + (CPU_STATE_last_p_state_valid(pcpu)) = TRUE; + samp[0] = 0; + samp[1] = 0; + } + + CPU_STATE_last_aperf(pcpu) = new_APERF; + CPU_STATE_last_mperf(pcpu) = new_MPERF; + + SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS."); + return OS_SUCCESS; +} diff --git a/drivers/platform/x86/sepdk/sep/silvermont.c b/drivers/platform/x86/sepdk/sep/silvermont.c new file mode 100755 index 0000000000000..d69930395923b --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/silvermont.c @@ -0,0 +1,1113 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#include "lwpmudrv_defines.h" +#include +#include +#include + +#include "lwpmudrv_types.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" + +#include "lwpmudrv.h" +#include "utility.h" +#include "control.h" +#include "output.h" +#include "silvermont.h" +#include "ecb_iterators.h" +#include "pebs.h" +#include "apic.h" + +extern U64 *read_counter_info; +extern DRV_CONFIG drv_cfg; +extern U64 *interrupt_counts; +extern DRV_SETUP_INFO_NODE req_drv_setup_info; +extern EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; +static U32 restore_reg_addr[3]; + +typedef struct SADDR_S { + S64 addr : SILVERMONT_LBR_DATA_BITS; +} SADDR; + +#define SADDR_addr(x) ((x).addr) +#define ADD_ERRATA_FIX_FOR_FIXED_CTR0 +#define MSR_ENERGY_MULTIPLIER 0x606 // Energy Multiplier MSR + +#if defined(DRV_IA32) +#define ENABLE_IA32_PERFEVTSEL0_CTR 0x00400000 +#define ENABLE_FIXED_CTR0 0x00000003 +#elif defined(DRV_EM64T) +#define ENABLE_IA32_PERFEVTSEL0_CTR 0x0000000000400000 +#define ENABLE_FIXED_CTR0 0x0000000000000003 +#else +#error "Unexpected Architecture seen" +#endif + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void silvermont_Write_PMU(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Initial set up of the PMU registers + * + * Special Notes + * Initial write of PMU registers. + * Walk through the enties and write the value of the register accordingly. + * Assumption: For CCCR registers the enable bit is set to value 0. + * When current_group = 0, then this is the first time this routine is called, + * initialize the locks and set up EM tables. + */ +static VOID silvermont_Write_PMU(VOID *param) +{ + U32 this_cpu; + CPU_STATE pcpu; + U32 dev_idx; + DISPATCH dispatch; + EVENT_CONFIG ec; + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + ec = LWPMU_DEVICE_ec(&devices[dev_idx]); + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + + if (CPU_STATE_current_group(pcpu) == 0) { + if (EVENT_CONFIG_mode(ec) != EM_DISABLED) { + U32 index; + U32 st_index; + U32 j; + + /* Save all the initialization values away into an array for Event Multiplexing. */ + for (j = 0; j < EVENT_CONFIG_num_groups(ec); j++) { + CPU_STATE_current_group(pcpu) = j; + st_index = CPU_STATE_current_group(pcpu) * + EVENT_CONFIG_max_gp_events(ec); + FOR_EACH_REG_CORE_OPERATION( + pecb, i, PMU_OPERATION_DATA_GP) + { + index = st_index + i - + ECB_operations_register_start( + pecb, + PMU_OPERATION_DATA_GP); + CPU_STATE_em_tables(pcpu)[index] = + ECB_entries_reg_value(pecb, i); + } + END_FOR_EACH_REG_CORE_OPERATION; + } + /* Reset the current group to the very first one. */ + CPU_STATE_current_group(pcpu) = + this_cpu % EVENT_CONFIG_num_groups(ec); + } + } + + if (dispatch->hw_errata) { + dispatch->hw_errata(); + } + + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_ALL_REG) + { + /* + * Writing the GLOBAL Control register enables the PMU to start counting. + * So write 0 into the register to prevent any counting from starting. + */ + if (i == ECB_SECTION_REG_INDEX(pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + continue; + } + /* + * PEBS is enabled for this collection session + */ + if (DRV_SETUP_INFO_pebs_accessible(&req_drv_setup_info) && + i == ECB_SECTION_REG_INDEX(pecb, PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS) && + ECB_entries_reg_value(pecb, i)) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + continue; + } + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i)); +#if defined(MYDEBUG) + { + U64 val = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); + SEP_DRV_LOG_TRACE( + "Write reg 0x%x --- value 0x%llx -- read 0x%llx.", + ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i), val); + } +#endif + } + END_FOR_EACH_REG_CORE_OPERATION; + +#if defined(ADD_ERRATA_FIX_FOR_FIXED_CTR0) + { + U64 fixed_ctr0 = SYS_Read_MSR(IA32_FIXED_CTRL); + fixed_ctr0 = (fixed_ctr0 & (ENABLE_FIXED_CTR0)); + if (fixed_ctr0 != 0x0) { + U64 val = SYS_Read_MSR(IA32_PERFEVTSEL0); + val |= ENABLE_IA32_PERFEVTSEL0_CTR; + SYS_Write_MSR(IA32_PERFEVTSEL0, val); + } + } +#endif + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void silvermont_Disable_PMU(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Zero out the global control register. This automatically disables the PMU counters. + * + */ +static VOID silvermont_Disable_PMU(PVOID param) +{ + U32 this_cpu; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT( + "No programming for this device in this group."); + return; + } + + if (GET_DRIVER_STATE() != DRV_STATE_RUNNING) { + SYS_Write_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + 0LL); + if (DEV_CONFIG_pebs_mode(pcfg)) { + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + 0LL); + } + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void silvermont_Enable_PMU(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Set the enable bit for all the Control registers + * + */ +static VOID silvermont_Enable_PMU(PVOID param) +{ + /* + * Get the value from the event block + * 0 == location of the global control reg for this block. + * Generalize this location awareness when possible + */ + U32 this_cpu; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + if (KVM_guest_mode) { + SYS_Write_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + 0LL); + } + if (GET_DRIVER_STATE() == DRV_STATE_RUNNING) { + APIC_Enable_Pmi(); + if (CPU_STATE_reset_mask(pcpu)) { + SEP_DRV_LOG_TRACE("Overflow reset mask %llx.", + CPU_STATE_reset_mask(pcpu)); + // Reinitialize the global overflow control register + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + CPU_STATE_reset_mask(pcpu) = 0LL; + } + if (CPU_STATE_group_swap(pcpu)) { + CPU_STATE_group_swap(pcpu) = 0; + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + if (DEV_CONFIG_pebs_mode(pcfg)) { + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, + PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, + PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + } + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); +#if defined(MYDEBUG) + { + U64 val; + val = SYS_Read_MSR(ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + SEP_DRV_LOG_TRACE( + "Write reg 0x%x--- read 0x%llx.", + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, + GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + val); + } +#endif + } + } + SEP_DRV_LOG_TRACE("Reenabled PMU with value 0x%llx.", + ECB_entries_reg_value(pecb, 0)); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn silvermont_Read_PMU_Data(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Read all the data MSR's into a buffer. Called by the interrupt handler. + * + */ +static void silvermont_Read_PMU_Data(PVOID param) +{ + U32 j; + U64 *buffer = read_counter_info; + U32 this_cpu; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + SEP_DRV_LOG_TRACE("PMU control_data 0x%p, buffer 0x%p.", + LWPMU_DEVICE_PMU_register_data(&devices[dev_idx]), + buffer); + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) + { + j = EMON_BUFFER_CORE_EVENT_OFFSET( + EMON_BUFFER_DRIVER_HELPER_core_index_to_thread_offset_map( + emon_buffer_driver_helper)[this_cpu], + ECB_entries_core_event_id(pecb, i)); + + buffer[j] = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); + SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u, event_id=%u", j, + buffer[j], this_cpu, + ECB_entries_core_event_id(pecb, i)); + } + END_FOR_EACH_REG_CORE_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void silvermont_Check_Overflow(masks) + * + * @param masks the mask structure to populate + * + * @return None No return needed + * + * @brief Called by the data processing method to figure out which registers have overflowed. + * + */ +static void silvermont_Check_Overflow(DRV_MASKS masks) +{ + U32 index; + U64 overflow_status = 0; + U32 this_cpu; + BUFFER_DESC bd; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; + DEV_CONFIG pcfg; + DISPATCH dispatch; + U64 overflow_status_clr = 0; + DRV_EVENT_MASK_NODE event_flag; + + SEP_DRV_LOG_TRACE_IN("Masks: %p.", masks); + + this_cpu = CONTROL_THIS_CPU(); + bd = &cpu_buf[this_cpu]; + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + // initialize masks + DRV_MASKS_masks_num(masks) = 0; + + overflow_status = SYS_Read_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_STATUS_REG_INDEX, + PMU_OPERATION_GLOBAL_STATUS))); + + if (DEV_CONFIG_pebs_mode(pcfg)) { + overflow_status = PEBS_Overflowed(this_cpu, overflow_status, 0); + } + overflow_status_clr = overflow_status; + + if (dispatch->check_overflow_gp_errata) { + overflow_status = dispatch->check_overflow_gp_errata( + pecb, &overflow_status_clr); + } + SEP_DRV_LOG_TRACE("Overflow: cpu: %d, status 0x%llx.", this_cpu, + overflow_status); + index = 0; + BUFFER_DESC_sample_count(bd) = 0; + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) + { + if (ECB_entries_fixed_reg_get(pecb, i)) { + index = i - + ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_FIXED) + + 0x20; + if (dispatch->check_overflow_errata) { + overflow_status = + dispatch->check_overflow_errata( + pecb, i, overflow_status); + } + } else if (ECB_entries_is_gp_reg_get(pecb, i)) { + index = i - ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_GP); + } else { + continue; + } + if (overflow_status & ((U64)1 << index)) { + SEP_DRV_LOG_TRACE("Overflow: cpu: %d, index %d.", + this_cpu, index); + SEP_DRV_LOG_TRACE( + "Register 0x%x --- val 0%llx.", + ECB_entries_reg_id(pecb, i), + SYS_Read_MSR(ECB_entries_reg_id(pecb, i))); + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i)); + + if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { + /* Increment the interrupt count. */ + if (interrupt_counts) { + interrupt_counts + [this_cpu * + DRV_CONFIG_num_events( + drv_cfg) + + ECB_entries_event_id_index( + pecb, i)] += 1; + } + } + + DRV_EVENT_MASK_bitFields1(&event_flag) = (U8)0; + if (ECB_entries_fixed_reg_get(pecb, i)) { + CPU_STATE_p_state_counting(pcpu) = 1; + } + if (ECB_entries_precise_get(pecb, i)) { + DRV_EVENT_MASK_precise(&event_flag) = 1; + } + if (ECB_entries_lbr_value_get(pecb, i)) { + DRV_EVENT_MASK_lbr_capture(&event_flag) = 1; + } + if (ECB_entries_uncore_get(pecb, i)) { + DRV_EVENT_MASK_uncore_capture(&event_flag) = 1; + } + + if (DRV_MASKS_masks_num(masks) < MAX_OVERFLOW_EVENTS) { + DRV_EVENT_MASK_bitFields1( + DRV_MASKS_eventmasks(masks) + + DRV_MASKS_masks_num(masks)) = + DRV_EVENT_MASK_bitFields1(&event_flag); + DRV_EVENT_MASK_event_idx( + DRV_MASKS_eventmasks(masks) + + DRV_MASKS_masks_num(masks)) = + ECB_entries_event_id_index(pecb, i); + DRV_MASKS_masks_num(masks)++; + } else { + SEP_DRV_LOG_ERROR( + "The array for event masks is full."); + } + + SEP_DRV_LOG_TRACE("Overflow -- 0x%llx, index 0x%llx.", + overflow_status, (U64)1 << index); + SEP_DRV_LOG_TRACE("Slot# %d, reg_id 0x%x, index %d.", i, + ECB_entries_reg_id(pecb, i), index); + if (ECB_entries_event_id_index(pecb, i) == + CPU_STATE_trigger_event_num(pcpu)) { + CPU_STATE_trigger_count(pcpu)--; + } + } + } + END_FOR_EACH_REG_CORE_OPERATION; + + CPU_STATE_reset_mask(pcpu) = overflow_status_clr; + // Reinitialize the global overflow control register + SYS_Write_MSR(IA32_PERF_GLOBAL_OVF_CTRL, overflow_status_clr); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn silvermont_Swap_Group(restart) + * + * @param restart dummy parameter which is not used + * + * @return None No return needed + * + * @brief Perform the mechanics of swapping the event groups for event mux operations + * + * Special Notes + * Swap function for event multiplexing. + * Freeze the counting. + * Swap the groups. + * Enable the counting. + * Reset the event trigger count + * + */ +static VOID silvermont_Swap_Group(DRV_BOOL restart) +{ + U32 index; + U32 next_group; + U32 st_index; + U32 this_cpu = CONTROL_THIS_CPU(); + CPU_STATE pcpu = &pcb[this_cpu]; + U32 dev_idx; + DISPATCH dispatch; + EVENT_CONFIG ec; + + SEP_DRV_LOG_TRACE_IN("Dummy restart: %u.", restart); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + ec = LWPMU_DEVICE_ec(&devices[dev_idx]); + st_index = + CPU_STATE_current_group(pcpu) * EVENT_CONFIG_max_gp_events(ec); + next_group = (CPU_STATE_current_group(pcpu) + 1); + + if (next_group >= EVENT_CONFIG_num_groups(ec)) { + next_group = 0; + } + + SEP_DRV_LOG_TRACE("Current group : 0x%x.", + CPU_STATE_current_group(pcpu)); + SEP_DRV_LOG_TRACE("Next group : 0x%x.", next_group); + + // Save the counters for the current group + if (!DRV_CONFIG_event_based_counts(drv_cfg)) { + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_GP) + { + index = st_index + i - + ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_GP); + CPU_STATE_em_tables(pcpu)[index] = + SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); + SEP_DRV_LOG_TRACE("Saved value for reg 0x%x : 0x%llx.", + ECB_entries_reg_id(pecb, i), + CPU_STATE_em_tables(pcpu)[index]); + } + END_FOR_EACH_REG_CORE_OPERATION; + } + + CPU_STATE_current_group(pcpu) = next_group; + + if (dispatch->hw_errata) { + dispatch->hw_errata(); + } + + // First write the GP control registers (eventsel) + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_CTRL_GP) + { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i)); + } + END_FOR_EACH_REG_CORE_OPERATION; + + if (DRV_CONFIG_event_based_counts(drv_cfg)) { + // In EBC mode, reset the counts for all events except for trigger event + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) + { + if (ECB_entries_event_id_index(pecb, i) != + CPU_STATE_trigger_event_num(pcpu)) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + } + } + END_FOR_EACH_REG_CORE_OPERATION; + } else { + // Then write the gp count registers + st_index = CPU_STATE_current_group(pcpu) * + EVENT_CONFIG_max_gp_events(ec); + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_GP) + { + index = st_index + i - + ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_GP); + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + CPU_STATE_em_tables(pcpu)[index]); + SEP_DRV_LOG_TRACE( + "Restore value for reg 0x%x : 0x%llx.", + ECB_entries_reg_id(pecb, i), + CPU_STATE_em_tables(pcpu)[index]); + } + END_FOR_EACH_REG_CORE_OPERATION; + } + + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_OCR) + { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i)); + } + END_FOR_EACH_REG_CORE_OPERATION; + + /* + * reset the em factor when a group is swapped + */ + CPU_STATE_trigger_count(pcpu) = EVENT_CONFIG_em_factor(ec); + + /* + * The enable routine needs to rewrite the control registers + */ + CPU_STATE_reset_mask(pcpu) = 0LL; + CPU_STATE_group_swap(pcpu) = 1; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn silvermont_Initialize(params) + * + * @param params dummy parameter which is not used + * + * @return None No return needed + * + * @brief Initialize the PMU setting up for collection + * + * Special Notes + * Saves the relevant PMU state (minimal set of MSRs required + * to avoid conflicts with other Linux tools, such as Oprofile). + * This function should be called in parallel across all CPUs + * prior to the start of sampling, before PMU state is changed. + * + */ +static VOID silvermont_Initialize(VOID *param) +{ + U32 this_cpu; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + + if (pcb == NULL) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pcb)."); + return; + } + + pcpu = &pcb[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + + CPU_STATE_pmu_state(pcpu) = pmu_state + (this_cpu * 3); + if (CPU_STATE_pmu_state(pcpu) == NULL) { + SEP_DRV_LOG_WARNING_TRACE_OUT( + "Unable to save PMU state on CPU %d!", this_cpu); + return; + } + + restore_reg_addr[0] = ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX(pecb, DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)); + restore_reg_addr[1] = ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)); + restore_reg_addr[2] = ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX(pecb, FIXED_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)); + + // save the original PMU state on this CPU (NOTE: must only be called ONCE per collection) + CPU_STATE_pmu_state(pcpu)[0] = SYS_Read_MSR(restore_reg_addr[0]); + CPU_STATE_pmu_state(pcpu)[1] = SYS_Read_MSR(restore_reg_addr[1]); + CPU_STATE_pmu_state(pcpu)[2] = SYS_Read_MSR(restore_reg_addr[2]); + + SEP_DRV_LOG_TRACE("Saving PMU state on CPU %d:", this_cpu); + SEP_DRV_LOG_TRACE(" msr_val(IA32_DEBUG_CTRL)=0x%llx.", + CPU_STATE_pmu_state(pcpu)[0]); + SEP_DRV_LOG_TRACE(" msr_val(IA32_PERF_GLOBAL_CTRL)=0x%llx.", + CPU_STATE_pmu_state(pcpu)[1]); + SEP_DRV_LOG_TRACE(" msr_val(IA32_FIXED_CTRL)=0x%llx.", + CPU_STATE_pmu_state(pcpu)[2]); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn silvermont_Destroy(params) + * + * @param params dummy parameter which is not used + * + * @return None No return needed + * + * @brief Reset the PMU setting up after collection + * + * Special Notes + * Restores the previously saved PMU state done in core2_Initialize. + * This function should be called in parallel across all CPUs + * after sampling collection ends/terminates. + * + */ +static VOID silvermont_Destroy(VOID *param) +{ + U32 this_cpu; + CPU_STATE pcpu; + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + if (pcb == NULL) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pcb)."); + return; + } + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + pcpu = &pcb[this_cpu]; + + if (CPU_STATE_pmu_state(pcpu) == NULL) { + SEP_DRV_LOG_WARNING_TRACE_OUT( + "Unable to restore PMU state on CPU %d!", this_cpu); + return; + } + + SEP_DRV_LOG_TRACE("Clearing PMU state on CPU %d:", this_cpu); + SEP_DRV_LOG_TRACE(" msr_val(IA32_DEBUG_CTRL)=0x0."); + SEP_DRV_LOG_TRACE(" msr_val(IA32_PERF_GLOBAL_CTRL)=0x0."); + SEP_DRV_LOG_TRACE(" msr_val(IA32_FIXED_CTRL)=0."); + + SYS_Write_MSR(restore_reg_addr[0], 0); + SYS_Write_MSR(restore_reg_addr[1], 0); + SYS_Write_MSR(restore_reg_addr[2], 0); + + CPU_STATE_pmu_state(pcpu) = NULL; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* + * @fn silvermont_Read_LBRs(buffer) + * + * @param IN buffer - pointer to the buffer to write the data into + * @return None + * + * @brief Read all the LBR registers into the buffer provided and return + * + */ +static U64 silvermont_Read_LBRs(VOID *buffer, PVOID data) +{ + U32 i, count = 0; + U64 *lbr_buf = NULL; + U64 value; + U64 tos_ip_addr = 0; + U64 tos_ptr = 0; + SADDR saddr; + U32 this_cpu; + U32 dev_idx; + LBR lbr; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("Buffer: %p.", buffer); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + lbr = LWPMU_DEVICE_lbr(&devices[dev_idx]); + + if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { + lbr_buf = (U64 *)buffer; + } + + for (i = 0; i < LBR_num_entries(lbr); i++) { + value = SYS_Read_MSR(LBR_entries_reg_id(lbr, i)); + if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { + *lbr_buf = value; + } + SEP_DRV_LOG_TRACE("LBR %u, 0x%llx.", i, value); + if (i == 0) { + tos_ptr = value; + } else { + if (LBR_entries_etype(lbr, i) == LBR_ENTRY_FROM_IP) { + if (tos_ptr == count) { + SADDR_addr(saddr) = + value & SILVERMONT_LBR_BITMASK; + tos_ip_addr = (U64)SADDR_addr( + saddr); // Add signed extension + SEP_DRV_LOG_TRACE( + "Tos_ip_addr %llu, 0x%llx.", + tos_ptr, value); + } + count++; + } + } + if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { + lbr_buf++; + } + } + + SEP_DRV_LOG_TRACE_OUT("Res: %llu.", tos_ip_addr); + return tos_ip_addr; +} + +static VOID silvermont_Clean_Up(VOID *param) +{ + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_ALL_REG) + { + if (ECB_entries_clean_up_get(pecb, i)) { + SEP_DRV_LOG_TRACE("Clean up set --- RegId --- %x.", + ECB_entries_reg_id(pecb, i)); + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + } + } + END_FOR_EACH_REG_CORE_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn silvermont_Read_Counts(param, id) + * + * @param param The read thread node to process + * @param id The event id for the which the sample is generated + * + * @return None No return needed + * + * @brief Read CPU event based counts data and store into the buffer param; + * For the case of the trigger event, store the SAV value. + */ +static VOID silvermont_Read_Counts(PVOID param, U32 id) +{ + U64 *data; + U32 this_cpu; + CPU_STATE pcpu; + U32 dev_idx; + DEV_CONFIG pcfg; + U32 event_id = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p, id: %u.", param, id); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (DEV_CONFIG_ebc_group_id_offset(pcfg)) { + // Write GroupID + data = (U64 *)((S8 *)param + + DEV_CONFIG_ebc_group_id_offset(pcfg)); + *data = CPU_STATE_current_group(pcpu) + 1; + } + + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) + { + if (ECB_entries_counter_event_offset(pecb, i) == 0) { + continue; + } + data = (U64 *)((S8 *)param + + ECB_entries_counter_event_offset(pecb, i)); + event_id = ECB_entries_event_id_index(pecb, i); + if (event_id == id) { + *data = ~(ECB_entries_reg_value(pecb, i) - 1) & + ECB_entries_max_bits(pecb, i); + } else { + *data = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + } + } + END_FOR_EACH_REG_CORE_OPERATION; + + if (DRV_CONFIG_enable_p_state(drv_cfg)) { + CPU_STATE_p_state_counting(pcpu) = 0; + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 silvermont_Platform_Info + * + * @brief Reads the MSR_PLATFORM_INFO register if present + * + * @param void + * + * @return value read from the register + * + * Special Notes: + * + */ +static void silvermont_Platform_Info(PVOID data) +{ + U64 index = 0; + DRV_PLATFORM_INFO platform_data = (DRV_PLATFORM_INFO)data; + U64 value = 0; + U64 clock_value = 0; + U64 energy_multiplier; + + SEP_DRV_LOG_TRACE_IN("Data: %p.", data); + + if (!platform_data) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!platform_data)."); + return; + } + +#define IA32_MSR_PLATFORM_INFO 0xCE + value = SYS_Read_MSR(IA32_MSR_PLATFORM_INFO); + +#define IA32_MSR_PSB_CLOCK_STS 0xCD +#define FREQ_MASK_BITS 0x03 + + clock_value = SYS_Read_MSR(IA32_MSR_PSB_CLOCK_STS); + index = clock_value & FREQ_MASK_BITS; + DRV_PLATFORM_INFO_info(platform_data) = value; + DRV_PLATFORM_INFO_ddr_freq_index(platform_data) = index; + +#undef IA32_MSR_PLATFORM_INFO +#undef IA32_MSR_PSB_CLOCK_STS +#undef FREQ_MASK_BITS + energy_multiplier = SYS_Read_MSR(MSR_ENERGY_MULTIPLIER); + SEP_DRV_LOG_TRACE("MSR_ENERGY_MULTIPLIER: %llx.", energy_multiplier); + DRV_PLATFORM_INFO_energy_multiplier(platform_data) = + (U32)(energy_multiplier & 0x00001F00) >> 8; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID knights_Platform_Info + * + * @brief Reads the MSR_PLATFORM_INFO register if present + * + * @param void + * + * @return value read from the register + * + * Special Notes: + * + */ +static VOID knights_Platform_Info(PVOID data) +{ + DRV_PLATFORM_INFO platform_data = (DRV_PLATFORM_INFO)data; + U64 value = 0; + U64 energy_multiplier; + + SEP_DRV_LOG_TRACE_IN("Data: %p.", data); + + if (!platform_data) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!platform_data)."); + return; + } + +#define IA32_MSR_PLATFORM_INFO 0xCE + value = SYS_Read_MSR(IA32_MSR_PLATFORM_INFO); + + DRV_PLATFORM_INFO_info(platform_data) = value; + DRV_PLATFORM_INFO_ddr_freq_index(platform_data) = 0; + energy_multiplier = SYS_Read_MSR(MSR_ENERGY_MULTIPLIER); + SEP_DRV_LOG_TRACE("MSR_ENERGY_MULTIPLIER: %llx.", energy_multiplier); + DRV_PLATFORM_INFO_energy_multiplier(platform_data) = + (U32)(energy_multiplier & 0x00001F00) >> 8; +} + +/* + * Initialize the dispatch table + */ +DISPATCH_NODE silvermont_dispatch = { .init = silvermont_Initialize, + .fini = silvermont_Destroy, + .write = silvermont_Write_PMU, + .freeze = silvermont_Disable_PMU, + .restart = silvermont_Enable_PMU, + .read_data = silvermont_Read_PMU_Data, + .check_overflow = + silvermont_Check_Overflow, + .swap_group = silvermont_Swap_Group, + .read_lbrs = silvermont_Read_LBRs, + .cleanup = silvermont_Clean_Up, + .hw_errata = NULL, + .read_power = NULL, + .check_overflow_errata = NULL, + .read_counts = silvermont_Read_Counts, + .check_overflow_gp_errata = NULL, + .read_ro = NULL, + .platform_info = silvermont_Platform_Info, + .trigger_read = NULL, + .scan_for_uncore = NULL, + .read_metrics = NULL }; + +DISPATCH_NODE knights_dispatch = { .init = silvermont_Initialize, + .fini = silvermont_Destroy, + .write = silvermont_Write_PMU, + .freeze = silvermont_Disable_PMU, + .restart = silvermont_Enable_PMU, + .read_data = silvermont_Read_PMU_Data, + .check_overflow = silvermont_Check_Overflow, + .swap_group = silvermont_Swap_Group, + .read_lbrs = silvermont_Read_LBRs, + .cleanup = silvermont_Clean_Up, + .hw_errata = NULL, + .read_power = NULL, + .check_overflow_errata = NULL, + .read_counts = silvermont_Read_Counts, + .check_overflow_gp_errata = NULL, + .read_ro = NULL, + .platform_info = knights_Platform_Info, + .trigger_read = NULL, + .scan_for_uncore = NULL, + .read_metrics = NULL }; diff --git a/drivers/platform/x86/sepdk/sep/sys32.S b/drivers/platform/x86/sepdk/sep/sys32.S new file mode 100755 index 0000000000000..eb4c12304cdce --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/sys32.S @@ -0,0 +1,200 @@ +# Copyright(C) 2002-2018 Intel Corporation. All Rights Reserved. +# +# This file is part of SEP Development Kit +# +# SEP Development Kit is free software; you can redistribute it +# and/or modify it under the terms of the GNU General Public License +# version 2 as published by the Free Software Foundation. +# +# SEP Development Kit is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# As a special exception, you may use this file as part of a free software +# library without restriction. Specifically, if other files instantiate +# templates or use macros or inline functions from this file, or you compile +# this file and link it with other files to produce an executable, this +# file does not by itself cause the resulting executable to be covered by +# the GNU General Public License. This exception does not however +# invalidate any other reasons why the executable file might be covered by +# the GNU General Public License. + + +#include +#include + +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 20) +#define USE_KERNEL_PERCPU_SEGMENT_GS +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 21) && LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 29) +#define USE_KERNEL_PERCPU_SEGMENT_FS +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) +#define USE_KERNEL_PERCPU_SEGMENT_FS +#define USE_KERNEL_PERCPU_SEGMENT_GS +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) +#if !defined(__KERNEL_PERCPU) +#define __KERNEL_PERCPU __KERNEL_PDA +#endif +#endif + +#if defined(USE_KERNEL_PERCPU_SEGMENT_GS) +#if defined(__KERNEL_STACK_CANARY) +#define SEP_GS_SEG_VALUE __KERNEL_STACK_CANARY +#else +#define SEP_GS_SEG_VALUE __KERNEL_PERCPU +#endif +#endif + +#*********************************************************************** +# +# SYS_Get_IDT_Base_HWR +# Get the IDT Desc address +# +# Entry: none +# +# Exit: base address in eax +# +# void SYS_Get_IDT_Base_HWR(U64 *pIdtDesc); +# +#*********************************************************************** + .text + .align 4 + .global SYS_IO_Delay +SYS_IO_Delay: + ret + + .global SYS_Get_IDT_Base_HWR +SYS_Get_IDT_Base_HWR: + subl $8, %esp + sidt 2(%esp) + movl 4(%esp), %eax + addl $8, %esp + ret + .global SYS_Get_cs +SYS_Get_cs: + mov %cs, %ax + andl $0x0000ffff, %eax + ret + + .global SYS_Get_TSC +SYS_Get_TSC: + rdtsc + ret + .text + .align 4 + .global SYS_Perfvec_Handler +SYS_Perfvec_Handler: + # This is the same as KERNEL's + pushl %eax # Filler for Error Code + + cld + pushl %es # SAVE_ALL macro to access pt_regs + pushl %ds # inside our ISR. +#if defined(USE_KERNEL_PERCPU_SEGMENT_GS) + pushl %gs +#endif +#if defined(USE_KERNEL_PERCPU_SEGMENT_FS) + pushl %fs +#endif + pushl %eax + pushl %ebp + pushl %edi + pushl %esi + pushl %edx + pushl %ecx + pushl %ebx + + movl $(__KERNEL_DS), %edx # Use KERNEL DS selector + movl %edx, %ds # Make sure we set Kernel + movl %edx, %es # DS into local DS and ES + +#if defined(USE_KERNEL_PERCPU_SEGMENT_GS) + movl $(SEP_GS_SEG_VALUE), %edx # Use kernel percpu segment + movl %edx, %gs # ... and load it into %gs +#endif +#if defined(USE_KERNEL_PERCPU_SEGMENT_FS) + movl $(__KERNEL_PERCPU), %edx # Use kernel percpu segment + movl %edx, %fs # ... and load it into %fs +#endif + + movl %esp, %ebx # get ready to put *pt_regs on stack + + pushl %ebx # put *pt_regs on the stack + call PMI_Interrupt_Handler + addl $0x4, %esp # pop to nowhere... + + pop %ebx # restore register set + pop %ecx + pop %edx + pop %esi + pop %edi + pop %ebp + pop %eax +#if defined(USE_KERNEL_PERCPU_SEGMENT_FS) + pop %fs +#endif +#if defined(USE_KERNEL_PERCPU_SEGMENT_GS) + pop %gs +#endif + pop %ds + pop %es + pop %eax + + iret +# ---------------------------------------------------------------------------- +# name: get_CSD +# +# description: get the CS descriptor +# +# input: code segment selector +# +# output: code segment descriptor +# ---------------------------------------------------------------------------- + .text + .align 4 + .globl SYS_Get_CSD + +SYS_Get_CSD: + pushl %ebp + movl %esp, %ebp + pushal # save regs + + subl $8, %esp + xorl %eax, %eax + movw 8(%ebp), %ax # eax.lo = cs + sgdt (%esp) # store gdt reg + leal (%esp), %ebx # ebx = gdt reg ptr + movl 2(%ebx), %ecx # ecx = gdt base + xorl %edx, %edx + movw %ax, %dx + andl $4, %edx + cmpl $0, %edx # test ti. GDT? + jz .bsr_10 # ..yes + xorl %edx, %edx + sldt %dx # ..no dx=ldtsel + andb $0xf8, %dl # clear ti, rpl + addl 2(%ebx), %edx # add gdt base + movb 7(%edx), %cl # ecx = ldt base + shll $8, %ecx # .. + movb 4(%edx), %cl # .. + shll $16, %ecx # .. + movw 2(%edx), %cx # .. +.bsr_10: + andb $0xf8, %al # clear ti & rpl + addl %eax, %ecx # add to gdt/ldt + movl (%ecx), %eax # copy code seg + movl 12(%ebp), %edx # ..descriptor (csdlo) + movl %eax, (%edx) # ..descriptor (csdlo) + movl 4(%ecx), %eax # ..from gdt or + movl 16(%ebp), %edx # ..ldt to sample (csdhi) + movl %eax, (%edx) # ..ldt to sample (csdhi) + addl $8, %esp + popal # restore regs + leave + ret diff --git a/drivers/platform/x86/sepdk/sep/sys64.S b/drivers/platform/x86/sepdk/sep/sys64.S new file mode 100755 index 0000000000000..1deb8db3cdb77 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/sys64.S @@ -0,0 +1,140 @@ +# Copyright(C) 2002-2018 Intel Corporation. All Rights Reserved. +# +# This file is part of SEP Development Kit +# +# SEP Development Kit is free software; you can redistribute it +# and/or modify it under the terms of the GNU General Public License +# version 2 as published by the Free Software Foundation. +# +# SEP Development Kit is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# As a special exception, you may use this file as part of a free software +# library without restriction. Specifically, if other files instantiate +# templates or use macros or inline functions from this file, or you compile +# this file and link it with other files to produce an executable, this +# file does not by itself cause the resulting executable to be covered by +# the GNU General Public License. This exception does not however +# invalidate any other reasons why the executable file might be covered by +# the GNU General Public License. + +#include "inc/asm_helper.h" +#include + +.text + +#*********************************************************************** +# +# SYS_Get_IDT_Base +# Get the IDT Desc address +# +# Entry: pointer to location to store idt Desc +# +# Exit: none +# +# void SYS_Get_IDT_Base(U64 *pIdtDesc); +# +#*********************************************************************** + .global SYS_Get_IDT_Base +SYS_Get_IDT_Base: + SIDT (%rdi) + ret + +#*********************************************************************** +# +# SYS_Get_GDT_Base +# Get the GDT Desc address +# +# Entry: pointer to location to store gdt Desc +# +# Exit: none +# +# void SYS_Get_GDT_Base(U64 *pGdtDesc); +# +#*********************************************************************** + .global SYS_Get_GDT_Base +SYS_Get_GDT_Base: + SGDT (%rdi) + ret + +#*********************************************************************** +# +# SYS_Get_TSC +# Get the current TSC +# +# Entry: pointer to location to store gdt Desc +# +# Exit: none +# +# void SYS_Get_TSC(U64 *tsc); +# +#*********************************************************************** +# .global SYS_Get_TSC +#SYS_Get_TSC: +# rdtsc +# ret + +#*********************************************************************** +# +# SYS_IO_Delay +# Add a short delay to the instruction stream +# +# Entry: none +# +# Exit: none +# +# void SYS_IO_Delay(void); +# +#*********************************************************************** + .global SYS_IO_Delay +SYS_IO_Delay: + ret + +# ---------------------------------------------------------------------------- +# name: SYS_PerfVec_Handler +# +# description: ISR entry for local APIC PERF interrupt vector +# +# Input: n/a +# +# Output: n/a +# ---------------------------------------------------------------------------- + + .global SYS_Perfvec_Handler +SYS_Perfvec_Handler: + CFI_STARTPROC + pushq %rax # fake an error code... + cld # cause the kernel likes it this way... + + SAVE_ALL # Save the world! + + movl $MSR_GS_BASE, %ecx # for the moment, do the safe swapgs check + rdmsr + xorl %ebx, %ebx # assume no swapgs (ebx == 0) + testl %edx, %edx + js 1f + swapgs + movl $1, %ebx # ebx == 1 means we did a swapgs +1: movq %rsp, %rdi # pt_regs is the first argument + + # + # ebx is zero if no swap, one if swap + # ebx is preserved in C calling convention... + # + # NOTE: the C code is responsible for ACK'ing the APIC!!! + # + call PMI_Interrupt_Handler + + # + # Don't want an interrupt while we are doing the swapgs stuff + # + cli + testl %ebx, %ebx + jz 2f + swapgs +2: RESTORE_ALL + popq %rax + iretq + CFI_ENDPROC diff --git a/drivers/platform/x86/sepdk/sep/sys_info.c b/drivers/platform/x86/sepdk/sep/sys_info.c new file mode 100755 index 0000000000000..cf5a90c6c5439 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/sys_info.c @@ -0,0 +1,1108 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#include "lwpmudrv_defines.h" +#include +#include +#include + +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" +#include "lwpmudrv.h" +#include "control.h" +#include "utility.h" +#include "apic.h" +#include "sys_info.h" + +#define VTSA_CPUID VTSA_CPUID_X86 + +extern U64 total_ram; +static IOCTL_SYS_INFO *ioctl_sys_info; +static size_t ioctl_sys_info_size; +static U32 *cpuid_entry_count; +static U32 *cpuid_total_count; +U32 *cpu_built_sysinfo; + +static U32 cpu_threads_per_core; +static VOID *gen_per_cpu_ptr; + +#define VTSA_NA64 ((U64)-1) +#define VTSA_NA32 ((U32)-1) +#define VTSA_NA ((U32)-1) + +#define SYS_INFO_NUM_SETS(rcx) ((rcx) + 1) +#define SYS_INFO_LINE_SIZE(rbx) (((rbx)&0xfff) + 1) +#define SYS_INFO_LINE_PARTITIONS(rbx) ((((rbx) >> 12) & 0x3ff) + 1) +#define SYS_INFO_NUM_WAYS(rbx) ((((rbx) >> 22) & 0x3ff) + 1) + +#define SYS_INFO_CACHE_SIZE(rcx, rbx) \ + (SYS_INFO_NUM_SETS((rcx)) * SYS_INFO_LINE_SIZE((rbx)) * \ + SYS_INFO_LINE_PARTITIONS((rbx)) * SYS_INFO_NUM_WAYS((rbx))) + +#define MSR_FB_PCARD_ID_FUSE 0x17 // platform id fuses MSR + +#define LOW_PART(x) (x & 0xFFFFFFFF) + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static U64 sys_info_nbits(number) + * + * @param number - the number to check + * @return the number of bit. + * + * @brief This routine gets the number of useful bits with the given number. + * It will round the number up to power of 2, and adjust to 0 based number. + * sys_info_nbits(0x3) = 2 + * sys_info_nbits(0x4) = 2 + * + */ +static U64 sys_info_nbits(U64 number) +{ + U64 i; + + SEP_DRV_LOG_TRACE_IN("Number: %llx.", + number); // is %llu portable in the kernel? + + if (number < 2) { + SEP_DRV_LOG_TRACE_OUT("Res: %u. (early exit)", (U32)number); + return number; + } + + // adjust to 0 based number, and round up to power of 2 + number--; + for (i = 0; number > 0; i++) { + number >>= 1; + } + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", (U32)i); + return i; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static U64 sys_info_bitmask(nbits) + * + * @param number - the number of bits + * @return the bit mask for the nbits number + * + * @brief This routine gets the bitmask for the nbits number. + */ +static U64 sys_info_bitmask(U64 nbits) +{ + U64 mask = 0; + + SEP_DRV_LOG_TRACE_IN("Nbits: %u.", (U32)nbits); + + mask = (U64)1 << nbits; + mask--; + + SEP_DRV_LOG_TRACE_OUT("Res: %llx.", mask); + + return mask; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void sys_info_Get_Num_Cpuid_Funcs(basic_funcs, basic_4_funcs, extended_funcs) + * + * @param basic_functions - pointer to the number of basic functions + * @param basic_4_funcs - pointer to the basic 4 functions + * @param extended_funcs - pointer to the number of extended functions + * @return total number of cpuid functions + * + * @brief This routine gets the number of basic and extended cpuid functions. + * + */ +static U32 sys_info_Get_Num_Cpuid_Funcs(OUT U32 *basic_funcs, + OUT U32 *basic_4_funcs, + OUT U32 *extended_funcs) +{ + U64 num_basic_funcs = 0x0LL; + U64 num_basic_4_funcs = 0x0LL; + U64 num_extended_funcs = 0x0LL; + U64 rax; + U64 rbx; + U64 rcx; + U64 rdx; + U64 i; + U32 res; + + SEP_DRV_LOG_TRACE_IN(""); + + UTILITY_Read_Cpuid(0, &num_basic_funcs, &rbx, &rcx, &rdx); + UTILITY_Read_Cpuid(0x80000000, &num_extended_funcs, &rbx, &rcx, &rdx); + + if (num_extended_funcs & 0x80000000) { + num_extended_funcs -= 0x80000000; + } + + // + // make sure num_extended_funcs is not bogus + // + if (num_extended_funcs > 0x1000) { + num_extended_funcs = 0; + } + + // + // if number of basic funcs is greater than 4, figure out how many + // time we should call CPUID with eax = 0x4. + // + num_basic_4_funcs = 0; + if (num_basic_funcs >= 4) { + for (i = 0, rax = (U64)-1; (rax & 0x1f) != 0; i++) { + rcx = i; + UTILITY_Read_Cpuid(4, &rax, &rbx, &rcx, &rdx); + } + num_basic_4_funcs = i - 1; + } + if (num_basic_funcs >= 0xb) { + i = 0; + do { + rcx = i; + UTILITY_Read_Cpuid(0xb, &rax, &rbx, &rcx, &rdx); + i++; + } while (!(LOW_PART(rax) == 0 && LOW_PART(rbx) == 0)); + num_basic_4_funcs += i; + } + SEP_DRV_LOG_TRACE("Num_basic_4_funcs = %llx.", num_basic_4_funcs); + + // + // adjust number to include 0 and 0x80000000 functions. + // + num_basic_funcs++; + num_extended_funcs++; + + SEP_DRV_LOG_TRACE("num_basic_funcs: %llx, num_extended_funcs: %llx.", + num_basic_funcs, num_extended_funcs); + + // + // fill-in the parameter for the caller + // + if (basic_funcs != NULL) { + *basic_funcs = (U32)num_basic_funcs; + } + if (basic_4_funcs != NULL) { + *basic_4_funcs = (U32)num_basic_4_funcs; + } + if (extended_funcs != NULL) { + *extended_funcs = (U32)num_extended_funcs; + } + + res = (U32)(num_basic_funcs + num_basic_4_funcs + num_extended_funcs); + SEP_DRV_LOG_TRACE_OUT("Res: %u.", res); + return res; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void sys_info_Get_Cpuid_Entry_Cpunt(buffer) + * + * @param buffer - pointer to the buffer to hold the info + * @return None + * + * @brief Service Routine to query the CPU for the number of entries needed + * + */ +static VOID sys_info_Get_Cpuid_Entry_Count(PVOID param) +{ + S32 current_processor; + U32 *current_cpu_buffer; + + SEP_DRV_LOG_TRACE_IN("Buffer: %p.", buffer); + + if (param == NULL) { + current_processor = CONTROL_THIS_CPU(); + } else { + current_processor = *(S32 *)param; + } + SEP_DRV_LOG_TRACE("Beginning on CPU %u.", current_processor); + + current_cpu_buffer = (U32 *)((U8 *)cpuid_entry_count + + current_processor * sizeof(U32)); + +#if defined(ALLOW_ASSERT) + ASSERT(((U8 *)current_cpu_buffer + sizeof(U32)) <= + ((U8 *)current_cpu_buffer + + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U32))); +#endif + *current_cpu_buffer = sys_info_Get_Num_Cpuid_Funcs(NULL, NULL, NULL); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static U32 sys_info_Get_Cpuid_Buffer_Size(cpuid_entries) + * + * @param cpuid_entries - number of cpuid entries + * @return size of buffer needed in bytes + * + * @brief This routine returns number of bytes needed to hold the CPU_CS_INFO + * @brief structure. + * + */ +static U32 sys_info_Get_Cpuid_Buffer_Size(U32 cpuid_entries) +{ + U32 cpuid_size; + U32 buffer_size; + + SEP_DRV_LOG_TRACE_IN(""); + + cpuid_size = sizeof(VTSA_CPUID); + + buffer_size = + sizeof(IOCTL_SYS_INFO) + sizeof(VTSA_GEN_ARRAY_HDR) + + sizeof(VTSA_NODE_INFO) + sizeof(VTSA_GEN_ARRAY_HDR) + + GLOBAL_STATE_num_cpus(driver_state) * sizeof(VTSA_GEN_PER_CPU) + + GLOBAL_STATE_num_cpus(driver_state) * + sizeof(VTSA_GEN_ARRAY_HDR) + + cpuid_entries * cpuid_size; + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", buffer_size); + + return buffer_size; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern void sys_info_Fill_CPUID(...) + * + * @param num_cpuids, + * @param basic_funcs, + * @param extended_funcs, + * @param cpu, + * @param *current_cpuid + * @param *gen_per_cpu, + * @param *local_gpc + * + * @return None + * + * @brief This routine is called to build per cpu information. + * @brief Fills in the cpuid for the processor in the right location in the buffer + * + */ +static void sys_info_Fill_CPUID(U32 num_cpuids, U32 basic_funcs, + U32 extended_funcs, U32 cpu, + VTSA_CPUID *current_cpuid, + VTSA_GEN_PER_CPU *gen_per_cpu, + VTSA_GEN_PER_CPU *local_gpc) +{ + U32 i, index, j; + U64 cpuid_function; + U64 rax, rbx, rcx, rdx; + VTSA_CPUID *cpuid_el; + U32 shift_nbits_core = 0; + U32 shift_nbits_pkg = 0; + U32 model = 0; + DRV_BOOL ht_supported = FALSE; + U32 apic_id = 0; + U32 num_logical_per_physical = 0; + U32 cores_per_die = 1; + U32 thread_id = 0; + U32 core_id = 0; + U32 package_id = 0; + U32 module_id = 0; + U32 cores_sharing_cache = 0; + U32 cache_mask_width = 0; + U32 num_cores = 0; + + SEP_DRV_LOG_TRACE_IN("CPU: %x.", cpu); + + apic_id = CPU_STATE_apic_id(&pcb[cpu]); + SEP_DRV_LOG_TRACE("Cpu %x: apic_id = %d.", cpu, apic_id); + + for (i = 0, index = 0; index < num_cpuids; i++) { + cpuid_function = + (i < basic_funcs) ? i : (0x80000000 + i - basic_funcs); + + if (cpuid_function == 0x4) { + for (j = 0, rax = (U64)-1; (rax & 0x1f) != 0; j++) { + rcx = j; + UTILITY_Read_Cpuid(cpuid_function, &rax, &rbx, + &rcx, &rdx); + cpuid_el = ¤t_cpuid[index]; + index++; + +#if defined(ALLOW_ASSERT) + ASSERT(((U8 *)cpuid_el + sizeof(VTSA_CPUID)) <= + cpuid_buffer_limit); +#endif + + VTSA_CPUID_X86_cpuid_eax_input(cpuid_el) = + (U32)cpuid_function; + VTSA_CPUID_X86_cpuid_eax(cpuid_el) = (U32)rax; + VTSA_CPUID_X86_cpuid_ebx(cpuid_el) = (U32)rbx; + VTSA_CPUID_X86_cpuid_ecx(cpuid_el) = (U32)rcx; + VTSA_CPUID_X86_cpuid_edx(cpuid_el) = (U32)rdx; + SEP_DRV_LOG_TRACE("Function: %x.", + (U32)cpuid_function); + SEP_DRV_LOG_TRACE( + "rax: %x, rbx: %x, rcx: %x, rdx: %x.", + (U32)rax, (U32)rbx, (U32)rcx, (U32)rdx); + + if ((rax & 0x1f) != 0) { + local_gpc = &gen_per_cpu[cpu]; + if (((rax >> 5) & 0x3) == 2) { + VTSA_GEN_PER_CPU_cpu_cache_L2( + local_gpc) = + (U32)(SYS_INFO_CACHE_SIZE( + rcx, + rbx) >> + 10); + SEP_DRV_LOG_TRACE( + "L2 Cache: %x.", + VTSA_GEN_PER_CPU_cpu_cache_L2( + local_gpc)); + cores_sharing_cache = + ((U16)(rax >> 14) & + 0xfff) + + 1; + SEP_DRV_LOG_TRACE( + "CORES_SHARING_CACHE=%d j=%d cpu=%d.", + cores_sharing_cache, j, + cpu); + } + + if (((rax >> 5) & 0x3) == 3) { + VTSA_GEN_PER_CPU_cpu_cache_L3( + local_gpc) = + (U32)(SYS_INFO_CACHE_SIZE( + rcx, + rbx) >> + 10); + SEP_DRV_LOG_TRACE( + "L3 Cache: %x.", + VTSA_GEN_PER_CPU_cpu_cache_L3( + local_gpc)); + } + } + if (j == 0) { + cores_per_die = + ((U16)(rax >> 26) & 0x3f) + 1; + } + } + if (cores_sharing_cache != 0) { + cache_mask_width = (U32)sys_info_nbits( + cores_sharing_cache); + SEP_DRV_LOG_TRACE("CACHE MASK WIDTH=%x.", + cache_mask_width); + } + } else if (cpuid_function == 0xb) { + j = 0; + do { + rcx = j; + UTILITY_Read_Cpuid(cpuid_function, &rax, &rbx, + &rcx, &rdx); + cpuid_el = ¤t_cpuid[index]; + index++; + +#if defined(ALLOW_ASSERT) + ASSERT(((U8 *)cpuid_el + + sizeof(VTSA_CPUID_X86)) <= + cpuid_buffer_limit); +#endif + + VTSA_CPUID_X86_cpuid_eax_input(cpuid_el) = + (U32)cpuid_function; + VTSA_CPUID_X86_cpuid_eax(cpuid_el) = (U32)rax; + VTSA_CPUID_X86_cpuid_ebx(cpuid_el) = (U32)rbx; + VTSA_CPUID_X86_cpuid_ecx(cpuid_el) = (U32)rcx; + VTSA_CPUID_X86_cpuid_edx(cpuid_el) = (U32)rdx; + SEP_DRV_LOG_TRACE("Function: %x.", + (U32)cpuid_function); + SEP_DRV_LOG_TRACE( + "rax: %x, rbx: %x, rcx: %x, rdx: %x.", + (U32)rax, (U32)rbx, (U32)rcx, (U32)rdx); + if (j == 0) { + shift_nbits_core = + rax & + 0x1f; //No. of bits to shift APIC ID to get Core ID + } + if (j == 1) { + shift_nbits_pkg = + rax & + 0x1f; //No. of bits to shift APIC ID to get Pkg ID + } + j++; + } while (!(LOW_PART(rax) == 0 && LOW_PART(rbx) == 0)); + } else { + UTILITY_Read_Cpuid(cpuid_function, &rax, &rbx, &rcx, + &rdx); + cpuid_el = ¤t_cpuid[index]; + index++; + + SEP_DRV_LOG_TRACE( + "Cpu %x: num_cpuids = %x i = %x index = %x.", + cpu, num_cpuids, i, index); + +#if defined(ALLOW_ASSERT) + ASSERT(((U8 *)cpuid_el + sizeof(VTSA_CPUID_X86)) <= + cpuid_buffer_limit); + + ASSERT(((U8 *)cpuid_el + sizeof(VTSA_CPUID_X86)) <= + ((U8 *)current_cpuid + + (num_cpuids * sizeof(VTSA_CPUID_X86)))); +#endif + + VTSA_CPUID_X86_cpuid_eax_input(cpuid_el) = + (U32)cpuid_function; + VTSA_CPUID_X86_cpuid_eax(cpuid_el) = (U32)rax; + VTSA_CPUID_X86_cpuid_ebx(cpuid_el) = (U32)rbx; + VTSA_CPUID_X86_cpuid_ecx(cpuid_el) = (U32)rcx; + VTSA_CPUID_X86_cpuid_edx(cpuid_el) = (U32)rdx; + SEP_DRV_LOG_TRACE("Function: %x.", (U32)cpuid_function); + SEP_DRV_LOG_TRACE("rax: %x, rbx: %x, rcx: %x, rdx: %x.", + (U32)rax, (U32)rbx, (U32)rcx, + (U32)rdx); + + if (cpuid_function == 0) { + if ((U32)rbx == 0x756e6547 && + (U32)rcx == 0x6c65746e && + (U32)rdx == 0x49656e69) { + VTSA_GEN_PER_CPU_platform_id( + local_gpc) = + SYS_Read_MSR( + MSR_FB_PCARD_ID_FUSE); + } + } else if (cpuid_function == 1) { + /* extended model bits */ + model = (U32)(rax >> 12 & 0xf0) | + (U32)(rax >> 4 & 0x0f); + ht_supported = (rdx >> 28) & 1 ? TRUE : FALSE; + num_logical_per_physical = + (U32)((rbx & 0xff0000) >> 16); + if (num_logical_per_physical == 0) { + num_logical_per_physical = 1; + } + } else if (cpuid_function == 0xa) { + VTSA_GEN_PER_CPU_arch_perfmon_ver(local_gpc) = + (U32)(rax & 0xFF); + VTSA_GEN_PER_CPU_num_gp_counters(local_gpc) = + (U32)((rax >> 8) & 0xFF); + VTSA_GEN_PER_CPU_num_fixed_counters(local_gpc) = + (U32)(rdx & 0x1F); + } + } + } + + // set cpu_cache_L2 if not already set using 0x80000006 function + if (gen_per_cpu[cpu].cpu_cache_L2 == VTSA_NA && extended_funcs >= 6) { + UTILITY_Read_Cpuid(0x80000006, &rax, &rbx, &rcx, &rdx); + VTSA_GEN_PER_CPU_cpu_cache_L2(local_gpc) = (U32)(rcx >> 16); + } + + if (!ht_supported || num_logical_per_physical == cores_per_die) { + threads_per_core[cpu] = 1; + thread_id = 0; + } else { + // each core has 4 threads for MIC system, otherwise, it has 2 threads when ht is enabled + threads_per_core[cpu] = cpu_threads_per_core; + thread_id = (U16)(apic_id & (cpu_threads_per_core - 1)); + } + + core_id = (apic_id >> shift_nbits_core) & + sys_info_bitmask(shift_nbits_pkg - shift_nbits_core); + package_id = apic_id >> shift_nbits_pkg; + + if (cache_mask_width) { + module_id = (U32)(core_id / 2); + } + SEP_DRV_LOG_TRACE("MODULE ID=%d CORE ID=%d for cpu=%d PACKAGE ID=%d.", + module_id, core_id, cpu, package_id); + SEP_DRV_LOG_TRACE("Num_logical_per_physical=%d cores_per_die=%d.", + num_logical_per_physical, cores_per_die); + SEP_DRV_LOG_TRACE("Package_id %d, apic_id %x.", package_id, apic_id); + SEP_DRV_LOG_TRACE( + "Sys_info_nbits[cores_per_die, threads_per_core[%u]]: [%lld, %lld].", + cpu, sys_info_nbits(cores_per_die), + sys_info_nbits(threads_per_core[cpu])); + + VTSA_GEN_PER_CPU_cpu_intel_processor_number(local_gpc) = VTSA_NA32; + VTSA_GEN_PER_CPU_cpu_package_num(local_gpc) = (U16)package_id; + VTSA_GEN_PER_CPU_cpu_core_num(local_gpc) = (U16)core_id; + VTSA_GEN_PER_CPU_cpu_hw_thread_num(local_gpc) = (U16)thread_id; + VTSA_GEN_PER_CPU_cpu_threads_per_core(local_gpc) = + (U16)threads_per_core[cpu]; + VTSA_GEN_PER_CPU_cpu_module_num(local_gpc) = (U16)module_id; + num_cores = GLOBAL_STATE_num_cpus(driver_state) / threads_per_core[cpu]; + VTSA_GEN_PER_CPU_cpu_num_modules(local_gpc) = + (U16)(num_cores / 2); // Relavent to Atom processors, Always 2 + VTSA_GEN_PER_CPU_cpu_core_type(local_gpc) = 0; + GLOBAL_STATE_num_modules(driver_state) = + VTSA_GEN_PER_CPU_cpu_num_modules(local_gpc); + SEP_DRV_LOG_TRACE("MODULE COUNT=%d.", + GLOBAL_STATE_num_modules(driver_state)); + + core_to_package_map[cpu] = package_id; + core_to_phys_core_map[cpu] = core_id; + core_to_thread_map[cpu] = thread_id; + occupied_core_ids[core_id] = 1; + + if (num_packages < package_id + 1) { + num_packages = package_id + 1; + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +#if !defined(DRV_SEP_ACRN_ON) +/* ------------------------------------------------------------------------- */ +/*! +* @fn static void sys_info_Update_Hyperthreading_Info(buffer) +* +* @param buffer - points to the base of GEN_PER_CPU structure +* @return None +* +* @brief This routine is called to update per cpu information based on HT ON/OFF. +* +*/ +static VOID sys_info_Update_Hyperthreading_Info(VOID *buffer) +{ + U32 cpu; + VTSA_GEN_PER_CPU *gen_per_cpu, *local_gpc; + U32 i = 0; + U32 num_cores = 0; + + SEP_DRV_LOG_TRACE_IN(""); + + cpu = CONTROL_THIS_CPU(); + + // get the GEN_PER_CPU entry for the current processor. + gen_per_cpu = (VTSA_GEN_PER_CPU *)buffer; + + // Update GEN_PER_CPU + local_gpc = &(gen_per_cpu[cpu]); + + while (i < (U32)GLOBAL_STATE_num_cpus(driver_state)) { + if (cpu_built_sysinfo[i] == 1) { + i++; + } + } + + for (i = 0; i < (U32)GLOBAL_STATE_num_cpus(driver_state); i++) { + if (occupied_core_ids[i] == 1) { + num_cores++; + } + } + threads_per_core[cpu] = (U32)(GLOBAL_STATE_num_cpus(driver_state) / + (num_cores * num_packages)); + if (VTSA_GEN_PER_CPU_cpu_threads_per_core(local_gpc) != + (U16)threads_per_core[cpu]) { + VTSA_GEN_PER_CPU_cpu_threads_per_core(local_gpc) = + (U16)threads_per_core[cpu]; + VTSA_GEN_PER_CPU_cpu_num_modules(local_gpc) = + (U16)(num_cores / 2); + GLOBAL_STATE_num_modules(driver_state) = + VTSA_GEN_PER_CPU_cpu_num_modules(local_gpc); + } + SEP_DRV_LOG_TRACE_OUT(""); +} +#endif + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void sys_info_Build_Percpu(buffer) + * + * @param buffer - points to the base of GEN_PER_CPU structure + * @return None + * + * @brief This routine is called to build per cpu information. + * + */ +static VOID sys_info_Build_Percpu(PVOID param) +{ + U32 basic_funcs, basic_4_funcs, extended_funcs; + U32 num_cpuids; + S32 cpu; + VTSA_CPUID *current_cpuid; + VTSA_GEN_ARRAY_HDR *cpuid_gen_array_hdr; + VTSA_GEN_PER_CPU *gen_per_cpu, *local_gpc; + VTSA_FIXED_SIZE_PTR *fsp; + U8 *cpuid_gen_array_hdr_base; +#if defined(ALLOW_ASSERT) + U8 *cpuid_buffer_limit; +#endif + + SEP_DRV_LOG_TRACE_IN("Buffer: %p.", buffer); + + if (param == NULL) { + cpu = CONTROL_THIS_CPU(); + } else { + cpu = *(S32 *)param; + } + num_cpuids = (U32)sys_info_Get_Num_Cpuid_Funcs( + &basic_funcs, &basic_4_funcs, &extended_funcs); + + // get the GEN_PER_CPU entry for the current processor. + gen_per_cpu = (VTSA_GEN_PER_CPU *)gen_per_cpu_ptr; + SEP_DRV_LOG_TRACE("cpu %x: gen_per_cpu = %p.", cpu, gen_per_cpu); + + // get GEN_ARRAY_HDR and cpuid array base + cpuid_gen_array_hdr_base = + (U8 *)gen_per_cpu + + GLOBAL_STATE_num_cpus(driver_state) * sizeof(VTSA_GEN_PER_CPU); + + SEP_DRV_LOG_TRACE("cpuid_gen_array_hdr_base = %p.", + cpuid_gen_array_hdr_base); + SEP_DRV_LOG_TRACE("cpu = %x.", cpu); + SEP_DRV_LOG_TRACE("cpuid_total_count[cpu] = %x.", + cpuid_total_count[cpu]); + SEP_DRV_LOG_TRACE("sizeof(VTSA_CPUID) = %lx.", sizeof(VTSA_CPUID)); + + cpuid_gen_array_hdr =(VTSA_GEN_ARRAY_HDR *) + ((U8 *)cpuid_gen_array_hdr_base + + sizeof(VTSA_GEN_ARRAY_HDR) * cpu + + cpuid_total_count[cpu] * sizeof(VTSA_CPUID)); + + // get current cpuid array base. + current_cpuid = (VTSA_CPUID *)((U8 *)cpuid_gen_array_hdr + + sizeof(VTSA_GEN_ARRAY_HDR)); +#if defined(ALLOW_ASSERT) + // get the absolute buffer limit + cpuid_buffer_limit = + (U8 *)ioctl_sys_info + + GENERIC_IOCTL_size(&IOCTL_SYS_INFO_gen(ioctl_sys_info)); +#endif + + // + // Fill in GEN_PER_CPU + // + local_gpc = &(gen_per_cpu[cpu]); + + if (VTSA_GEN_PER_CPU_cpu_intel_processor_number(local_gpc)) { + SEP_DRV_LOG_TRACE_OUT( + "Early exit (VTSA_GEN_PER_CPU_cpu_intel_processor_number)."); + return; + } + VTSA_GEN_PER_CPU_cpu_number(local_gpc) = cpu; + VTSA_GEN_PER_CPU_cpu_speed_mhz(local_gpc) = VTSA_NA32; + VTSA_GEN_PER_CPU_cpu_fsb_mhz(local_gpc) = VTSA_NA32; + + fsp = &VTSA_GEN_PER_CPU_cpu_cpuid_array(local_gpc); + VTSA_FIXED_SIZE_PTR_is_ptr(fsp) = 0; + VTSA_FIXED_SIZE_PTR_fs_offset(fsp) = + (U64)((U8 *)cpuid_gen_array_hdr - + (U8 *)&IOCTL_SYS_INFO_sys_info(ioctl_sys_info)); + + /* + * Get the time stamp difference between this cpu and cpu 0. + * This value will be used by user mode code to generate standardize + * time needed for sampling over time (SOT) functionality. + */ + VTSA_GEN_PER_CPU_cpu_tsc_offset(local_gpc) = TSC_SKEW(cpu); + + // + // fill GEN_ARRAY_HDR + // + fsp = &VTSA_GEN_ARRAY_HDR_hdr_next_gen_hdr(cpuid_gen_array_hdr); + VTSA_GEN_ARRAY_HDR_hdr_size(cpuid_gen_array_hdr) = + sizeof(VTSA_GEN_ARRAY_HDR); + VTSA_FIXED_SIZE_PTR_is_ptr(fsp) = 0; + VTSA_FIXED_SIZE_PTR_fs_offset(fsp) = 0; + VTSA_GEN_ARRAY_HDR_array_num_entries(cpuid_gen_array_hdr) = num_cpuids; + VTSA_GEN_ARRAY_HDR_array_entry_size(cpuid_gen_array_hdr) = + sizeof(VTSA_CPUID); + VTSA_GEN_ARRAY_HDR_array_type(cpuid_gen_array_hdr) = GT_CPUID; +#if defined(DRV_IA32) + VTSA_GEN_ARRAY_HDR_array_subtype(cpuid_gen_array_hdr) = GST_X86; +#elif defined(DRV_EM64T) + VTSA_GEN_ARRAY_HDR_array_subtype(cpuid_gen_array_hdr) = GST_EM64T; +#endif + + // + // fill out cpu id information + // + sys_info_Fill_CPUID(num_cpuids, basic_funcs, extended_funcs, cpu, + current_cpuid, gen_per_cpu, local_gpc); + /* + * Mark cpu info on this cpu as successfully built + */ + cpu_built_sysinfo[cpu] = 1; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void sys_info_Get_Processor_Info(NULL) + * + * @param None + * @return None + * + * @brief This routine is called to get global informaton on the processor in general, + * it include: + * cpu_thread_per_core + * + */ +static VOID sys_info_Get_Processor_Info(VOID *param) +{ + U64 rax; + U64 rbx; + U64 rcx; + U64 rdx; + U32 family; + U32 model; + DRV_BOOL ht_supported = FALSE; + + SEP_DRV_LOG_TRACE_IN(""); + + // read cpuid with function 1 to find family/model + UTILITY_Read_Cpuid(1, &rax, &rbx, &rcx, &rdx); + family = (U32)(rax >> 8 & 0x0f); + model = (U32)(rax >> 12 & 0xf0); /* extended model bits */ + model |= (U32)(rax >> 4 & 0x0f); + if (is_Knights_family(family, model)) { + cpu_threads_per_core = 4; + } else { + ht_supported = (rdx >> 28) & 1 ? TRUE : FALSE; + if (ht_supported) { + cpu_threads_per_core = 2; + } else { + cpu_threads_per_core = 1; + } + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern void SYS_Info_Build(void) + * + * @param None + * @return None + * + * @brief This is the driver routine that constructs the VTSA_SYS_INFO + * @brief structure used to report system information into the tb5 file + * + */ +U32 SYS_INFO_Build(void) +{ + VTSA_GEN_ARRAY_HDR *gen_array_hdr; + VTSA_NODE_INFO *node_info; + VTSA_SYS_INFO *sys_info; + VTSA_FIXED_SIZE_PTR *fsp; + U32 buffer_size; + U32 total_cpuid_entries; + S32 i; + struct sysinfo k_sysinfo; + U32 res; + + SEP_DRV_LOG_TRACE_IN(""); + SEP_DRV_LOG_TRACE("Entered."); + + if (ioctl_sys_info) { + /* The sys info has already been computed. Do not redo */ + buffer_size = + GENERIC_IOCTL_size(&IOCTL_SYS_INFO_gen(ioctl_sys_info)); + return buffer_size - sizeof(GENERIC_IOCTL); + } + + si_meminfo(&k_sysinfo); + + buffer_size = GLOBAL_STATE_num_cpus(driver_state) * sizeof(U32); + cpu_built_sysinfo = CONTROL_Allocate_Memory(buffer_size); + if (cpu_built_sysinfo == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Cpu_built_sysinfo memory alloc failed!"); + return 0; + } + + cpuid_entry_count = CONTROL_Allocate_Memory(buffer_size); + if (cpuid_entry_count == NULL) { + cpu_built_sysinfo = CONTROL_Free_Memory(cpu_built_sysinfo); + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Memory alloc failed for cpuid_entry_count!"); + return 0; + } + + cpuid_total_count = CONTROL_Allocate_Memory(buffer_size); + if (cpuid_total_count == NULL) { + cpu_built_sysinfo = CONTROL_Free_Memory(cpu_built_sysinfo); + cpuid_entry_count = CONTROL_Free_Memory(cpuid_entry_count); + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Memory alloc failed for cpuid_total_count!"); + return 0; + } + + // checking on family-model to set threads_per_core as 4: MIC, 2: ht-on; 1: rest + sys_info_Get_Processor_Info(NULL); + +#if defined(DRV_SEP_ACRN_ON) + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + sys_info_Get_Cpuid_Entry_Count(&i); + } +#else + CONTROL_Invoke_Parallel(sys_info_Get_Cpuid_Entry_Count, NULL); +#endif + + total_cpuid_entries = 0; + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + //if cpu is offline, set its cpuid count same as cpu0 + if (cpuid_entry_count[i] == 0) { + cpuid_entry_count[i] = cpuid_entry_count[0]; + cpu_built_sysinfo[i] = 0; + } + cpuid_total_count[i] = total_cpuid_entries; + total_cpuid_entries += cpuid_entry_count[i]; + } + + ioctl_sys_info_size = + sys_info_Get_Cpuid_Buffer_Size(total_cpuid_entries); + ioctl_sys_info = CONTROL_Allocate_Memory(ioctl_sys_info_size); + if (ioctl_sys_info == NULL) { + cpuid_entry_count = CONTROL_Free_Memory(cpuid_entry_count); + cpuid_total_count = CONTROL_Free_Memory(cpuid_total_count); + + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Memory alloc failed for ioctl_sys_info!"); + // return STATUS_INSUFFICIENT_RESOURCES; + return 0; + } + + // + // fill in ioctl and cpu_cs_info fields. + // + GENERIC_IOCTL_size(&IOCTL_SYS_INFO_gen(ioctl_sys_info)) = + ioctl_sys_info_size; + GENERIC_IOCTL_ret(&IOCTL_SYS_INFO_gen(ioctl_sys_info)) = VT_SUCCESS; + + sys_info = &IOCTL_SYS_INFO_sys_info(ioctl_sys_info); + VTSA_SYS_INFO_min_app_address(sys_info) = VTSA_NA64; + VTSA_SYS_INFO_max_app_address(sys_info) = VTSA_NA64; + VTSA_SYS_INFO_page_size(sys_info) = k_sysinfo.mem_unit; + VTSA_SYS_INFO_allocation_granularity(sys_info) = k_sysinfo.mem_unit; + + // + // offset from ioctl_sys_info + // + VTSA_FIXED_SIZE_PTR_is_ptr(&VTSA_SYS_INFO_node_array(sys_info)) = 0; + VTSA_FIXED_SIZE_PTR_fs_offset(&VTSA_SYS_INFO_node_array(sys_info)) = + sizeof(VTSA_SYS_INFO); + + // + // fill in node_info array header + // + gen_array_hdr = (VTSA_GEN_ARRAY_HDR *)((U8 *)sys_info + + VTSA_FIXED_SIZE_PTR_fs_offset( + &VTSA_SYS_INFO_node_array(sys_info))); + + SEP_DRV_LOG_TRACE("Gen_array_hdr = %p.", gen_array_hdr); + fsp = &VTSA_GEN_ARRAY_HDR_hdr_next_gen_hdr(gen_array_hdr); + VTSA_FIXED_SIZE_PTR_is_ptr(fsp) = 0; + VTSA_FIXED_SIZE_PTR_fs_offset(fsp) = 0; + + VTSA_GEN_ARRAY_HDR_hdr_size(gen_array_hdr) = sizeof(VTSA_GEN_ARRAY_HDR); + VTSA_GEN_ARRAY_HDR_array_num_entries(gen_array_hdr) = 1; + VTSA_GEN_ARRAY_HDR_array_entry_size(gen_array_hdr) = + sizeof(VTSA_NODE_INFO); + VTSA_GEN_ARRAY_HDR_array_type(gen_array_hdr) = GT_NODE; + VTSA_GEN_ARRAY_HDR_array_subtype(gen_array_hdr) = GST_UNK; + + // + // fill in node_info + // + node_info = (VTSA_NODE_INFO *)((U8 *)gen_array_hdr + + sizeof(VTSA_GEN_ARRAY_HDR)); + SEP_DRV_LOG_TRACE("Node_info = %p.", node_info); + + VTSA_NODE_INFO_node_type_from_shell(node_info) = VTSA_NA32; + + VTSA_NODE_INFO_node_id(node_info) = VTSA_NA32; + VTSA_NODE_INFO_node_num_available(node_info) = + GLOBAL_STATE_num_cpus(driver_state); + VTSA_NODE_INFO_node_num_used(node_info) = VTSA_NA32; + total_ram = k_sysinfo.totalram << PAGE_SHIFT; + VTSA_NODE_INFO_node_physical_memory(node_info) = total_ram; + + fsp = &VTSA_NODE_INFO_node_percpu_array(node_info); + VTSA_FIXED_SIZE_PTR_is_ptr(fsp) = 0; + VTSA_FIXED_SIZE_PTR_fs_offset(fsp) = sizeof(VTSA_SYS_INFO) + + sizeof(VTSA_GEN_ARRAY_HDR) + + sizeof(VTSA_NODE_INFO); + // + // fill in gen_per_cpu array header + // + gen_array_hdr = + (VTSA_GEN_ARRAY_HDR *)((U8 *)sys_info + + VTSA_FIXED_SIZE_PTR_fs_offset(fsp)); + SEP_DRV_LOG_TRACE("Gen_array_hdr = %p.", gen_array_hdr); + + fsp = &VTSA_GEN_ARRAY_HDR_hdr_next_gen_hdr(gen_array_hdr); + VTSA_FIXED_SIZE_PTR_is_ptr(fsp) = 0; + VTSA_FIXED_SIZE_PTR_fs_offset(fsp) = 0; + + VTSA_GEN_ARRAY_HDR_hdr_size(gen_array_hdr) = sizeof(VTSA_GEN_ARRAY_HDR); + VTSA_GEN_ARRAY_HDR_array_num_entries(gen_array_hdr) = + GLOBAL_STATE_num_cpus(driver_state); + VTSA_GEN_ARRAY_HDR_array_entry_size(gen_array_hdr) = + sizeof(VTSA_GEN_PER_CPU); + VTSA_GEN_ARRAY_HDR_array_type(gen_array_hdr) = GT_PER_CPU; + +#if defined(DRV_IA32) + VTSA_GEN_ARRAY_HDR_array_subtype(gen_array_hdr) = GST_X86; +#elif defined(DRV_EM64T) + VTSA_GEN_ARRAY_HDR_array_subtype(gen_array_hdr) = GST_EM64T; +#endif + + gen_per_cpu_ptr = (U8 *)gen_array_hdr + sizeof(VTSA_GEN_ARRAY_HDR); + +#if defined(DRV_SEP_ACRN_ON) + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + APIC_Init(&i); + sys_info_Build_Percpu(&i); + } +#else + CONTROL_Invoke_Parallel(APIC_Init, NULL); + CONTROL_Invoke_Parallel(sys_info_Build_Percpu, NULL); + CONTROL_Invoke_Parallel(sys_info_Update_Hyperthreading_Info, + (VOID *)gen_per_cpu_ptr); +#endif + + /* + * Cleanup - deallocate memory that is no longer needed + */ + cpuid_entry_count = CONTROL_Free_Memory(cpuid_entry_count); + + res = ioctl_sys_info_size - sizeof(GENERIC_IOCTL); + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", res); + return res; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern void SYS_Info_Transfer(buf_usr_to_drv, len_usr_to_drv) + * + * @param buf_usr_to_drv - pointer to the buffer to write the data into + * @param len_usr_to_drv - length of the buffer passed in + * + * @brief Transfer the data collected via the SYS_INFO_Build routine + * @brief back to the caller. + * + */ +VOID SYS_INFO_Transfer(PVOID buf_usr_to_drv, unsigned long len_usr_to_drv) +{ + unsigned long exp_size; + ssize_t unused; + + SEP_DRV_LOG_TRACE_IN("Buffer: %p, buffer_len: %u.", buf_usr_to_drv, + (U32)len_usr_to_drv); + + if (ioctl_sys_info == NULL || len_usr_to_drv == 0) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Ioctl_sys_info is NULL or len_usr_to_drv is 0!"); + return; + } + exp_size = GENERIC_IOCTL_size(&IOCTL_SYS_INFO_gen(ioctl_sys_info)) - + sizeof(GENERIC_IOCTL); + if (len_usr_to_drv < exp_size) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Insufficient Space!"); + return; + } + unused = copy_to_user((void __user *)buf_usr_to_drv, + &(IOCTL_SYS_INFO_sys_info(ioctl_sys_info)), + len_usr_to_drv); + if (unused) { + // no-op ... eliminates "variable not used" compiler warning + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern void SYS_Info_Destroy(void) + * + * @param None + * @return None + * + * @brief Free any memory associated with the sys info before unloading the driver + * + */ +VOID SYS_INFO_Destroy(void) +{ + SEP_DRV_LOG_TRACE_IN(""); + + cpuid_total_count = CONTROL_Free_Memory(cpuid_total_count); + cpu_built_sysinfo = CONTROL_Free_Memory(cpu_built_sysinfo); + ioctl_sys_info = CONTROL_Free_Memory(ioctl_sys_info); + ioctl_sys_info_size = 0; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern void SYS_INFO_Build_Cpu(PVOID param) + * + * @param PVOID param + * @return None + * + * @brief call routine to populate cpu info + * + */ +VOID SYS_INFO_Build_Cpu(PVOID param) +{ + VTSA_GEN_ARRAY_HDR *gen_array_hdr; + VTSA_NODE_INFO *node_info; + VTSA_SYS_INFO *sys_info; + VTSA_FIXED_SIZE_PTR *fsp; + + SEP_DRV_LOG_TRACE_IN(""); + + if (!ioctl_sys_info) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Ioctl_sys_info is null!"); + return; + } + sys_info = &IOCTL_SYS_INFO_sys_info(ioctl_sys_info); + gen_array_hdr = + (VTSA_GEN_ARRAY_HDR *)((U8 *)sys_info + + VTSA_FIXED_SIZE_PTR_fs_offset( + &VTSA_SYS_INFO_node_array( + sys_info))); + SEP_DRV_LOG_TRACE("Gen_array_hdr = %p.", gen_array_hdr); + + node_info = (VTSA_NODE_INFO *)((U8 *)gen_array_hdr + + sizeof(VTSA_GEN_ARRAY_HDR)); + SEP_DRV_LOG_TRACE("Node_info = %p.", node_info); + fsp = &VTSA_NODE_INFO_node_percpu_array(node_info); + + gen_array_hdr = + (VTSA_GEN_ARRAY_HDR *)((U8 *)sys_info + + VTSA_FIXED_SIZE_PTR_fs_offset(fsp)); + SEP_DRV_LOG_TRACE("Gen_array_hdr = %p.", gen_array_hdr); + gen_per_cpu_ptr = (U8 *)gen_array_hdr + sizeof(VTSA_GEN_ARRAY_HDR); + + sys_info_Build_Percpu(NULL); + +#if !defined(DRV_SEP_ACRN_ON) + sys_info_Update_Hyperthreading_Info((VOID *)gen_per_cpu_ptr); +#endif + + SEP_DRV_LOG_TRACE_OUT(""); +} diff --git a/drivers/platform/x86/sepdk/sep/unc_common.c b/drivers/platform/x86/sepdk/sep/unc_common.c new file mode 100755 index 0000000000000..9ad1632aaafb0 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/unc_common.c @@ -0,0 +1,385 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" + +#include "inc/ecb_iterators.h" +#include "inc/control.h" +#include "inc/pci.h" +#include "inc/unc_common.h" +#include "inc/utility.h" + +extern UNCORE_TOPOLOGY_INFO_NODE uncore_topology; +extern PLATFORM_TOPOLOGY_PROG_NODE platform_topology_prog_node; +extern U64 *read_counter_info; + +/* this is the table to keep pci_bus structure for PCI devices + * for both pci config access and mmio access + */ +UNC_PCIDEV_NODE unc_pcidev_map[MAX_DEVICES]; + +#define GET_PACKAGE_NUM(device_type, cpu) \ + (((device_type) == DRV_SINGLE_INSTANCE) ? 0 : core_to_package_map[cpu]) + +/************************************************************/ +/* + * unc common Dispatch functions + * + ************************************************************/ +void UNC_COMMON_Dummy_Func(PVOID param) +{ + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + SEP_DRV_LOG_TRACE_OUT("Empty function."); +} + +/************************************************************/ +/* + * UNC common PCI based API + * + ************************************************************/ + +/*! + * @fn OS_STATUS UNC_COMMON_Add_Bus_Map + * + * @brief This code discovers which package's data is read off of which bus. + * + * @param None + * + * @return OS_STATUS + * + * Special Notes: + * This probably will move to the UBOX once that is programmed. + */ +OS_STATUS +UNC_COMMON_Add_Bus_Map(U32 uncore_did, U32 dev_node, U32 bus_no) +{ + U32 i = 0; + U32 entries = 0; + + if (!UNC_PCIDEV_busno_list(&(unc_pcidev_map[dev_node]))) { + // allocate array for holding bus mapping + // package based device: an entry per package, all units in the same package are in the same bus. + // system based device: an entry per unit if in different bus + entries = GET_MAX_PCIDEV_ENTRIES(num_packages); + UNC_PCIDEV_busno_list(&(unc_pcidev_map[dev_node])) = + CONTROL_Allocate_Memory(entries * sizeof(S32)); + if (UNC_PCIDEV_busno_list(&(unc_pcidev_map[dev_node])) == + NULL) { + SEP_DRV_LOG_ERROR("Memory allocation failure!"); + return OS_NO_MEM; + } + UNC_PCIDEV_num_entries(&(unc_pcidev_map[dev_node])) = 0; + UNC_PCIDEV_max_entries(&(unc_pcidev_map[dev_node])) = entries; + for (i = 0; i < entries; i++) { + UNC_PCIDEV_busno_entry(&(unc_pcidev_map[dev_node]), i) = + INVALID_BUS_NUMBER; + } + } else { + entries = UNC_PCIDEV_max_entries(&(unc_pcidev_map[dev_node])); + } + + for (i = 0; i < UNC_PCIDEV_num_entries(&(unc_pcidev_map[dev_node])); + i++) { + if (UNC_PCIDEV_busno_entry(&(unc_pcidev_map[dev_node]), i) == + (S32)bus_no) { + SEP_DRV_LOG_TRACE( + "Already in the map, another unit, no add."); + return OS_SUCCESS; + } + } + if (i < entries) { + UNC_PCIDEV_busno_entry(&(unc_pcidev_map[dev_node]), i) = + (S32)bus_no; + UNC_PCIDEV_num_entries(&(unc_pcidev_map[dev_node]))++; + SEP_DRV_LOG_TRACE("Add numpackages=%d busno=%x devnode=%d.", + num_packages, bus_no, dev_node); + return OS_SUCCESS; + } + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Exceed max map entries, drop this bus map!"); + return OS_NO_MEM; +} + +OS_STATUS UNC_COMMON_Init(void) +{ + U32 i = 0; + + for (i = 0; i < MAX_DEVICES; i++) { + memset(&(unc_pcidev_map[i]), 0, sizeof(UNC_PCIDEV_NODE)); + } + + memset((char *)&uncore_topology, 0, sizeof(UNCORE_TOPOLOGY_INFO_NODE)); + memset((char *)&platform_topology_prog_node, 0, + sizeof(PLATFORM_TOPOLOGY_PROG_NODE)); + + return OS_SUCCESS; +} + +/*! + * @fn extern VOID UNC_COMMON_Clean_Up(PVOID) + * + * @brief clear out out programming + * + * @param None + * + * @return None + */ +void UNC_COMMON_Clean_Up(void) +{ + U32 i = 0; + for (i = 0; i < MAX_DEVICES; i++) { + if (UNC_PCIDEV_busno_list(&(unc_pcidev_map[i]))) { + UNC_PCIDEV_busno_list(&(unc_pcidev_map[i])) = + CONTROL_Free_Memory(UNC_PCIDEV_busno_list( + &(unc_pcidev_map[i]))); + } + if (UNC_PCIDEV_mmio_map(&(unc_pcidev_map[i]))) { + UNC_PCIDEV_mmio_map(&(unc_pcidev_map[i])) = + CONTROL_Free_Memory(UNC_PCIDEV_mmio_map( + &(unc_pcidev_map[i]))); + } + memset(&(unc_pcidev_map[i]), 0, sizeof(UNC_PCIDEV_NODE)); + } +} + +/*! + * @fn static VOID UNC_COMMON_PCI_Scan_For_Uncore(VOID*) + * + * @brief Initial write of PMU registers + * Walk through the enties and write the value of the register accordingly. + * When current_group = 0, then this is the first time this routine is called, + * + * @param None + * + * @return None + * + * Special Notes: + */ + +VOID UNC_COMMON_PCI_Scan_For_Uncore(PVOID param, U32 dev_node, + DEVICE_CALLBACK callback) +{ + U32 device_id; + U32 value; + U32 vendor_id; + U32 busno; + U32 j, k, l; + U32 device_found = 0; + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p, dev_node: %u, callback: %p.", + param, dev_node, callback); + + for (busno = 0; busno < 256; busno++) { + for (j = 0; j < MAX_PCI_DEVNO; j++) { + if (!(UNCORE_TOPOLOGY_INFO_pcidev_valid( + &uncore_topology, dev_node, j))) { + continue; + } + for (k = 0; k < MAX_PCI_FUNCNO; k++) { + if (!(UNCORE_TOPOLOGY_INFO_pcidev_is_devno_funcno_valid( + &uncore_topology, dev_node, j, + k))) { + continue; + } + device_found = 0; + value = PCI_Read_U32_Valid(busno, j, k, 0, 0); + CONTINUE_IF_NOT_GENUINE_INTEL_DEVICE( + value, vendor_id, device_id); + SEP_DRV_LOG_TRACE("Uncore device ID = 0x%x.", + device_id); + + for (l = 0; + l < + UNCORE_TOPOLOGY_INFO_num_deviceid_entries( + &uncore_topology, dev_node); + l++) { + if (UNCORE_TOPOLOGY_INFO_deviceid( + &uncore_topology, dev_node, + l) == device_id) { + device_found = 1; + break; + } + } + if (device_found) { + if (UNC_COMMON_Add_Bus_Map( + device_id, dev_node, + busno) == OS_SUCCESS) { + UNCORE_TOPOLOGY_INFO_pcidev_num_entries_found( + &uncore_topology, + dev_node, j, k)++; + SEP_DRV_LOG_DETECTION( + "Found device 0x%x at BDF(%x:%x:%x) [%u unit(s) so far].", + device_id, busno, j, k, + UNCORE_TOPOLOGY_INFO_pcidev_num_entries_found( + &uncore_topology, + dev_node, j, + k)); + } + } + } + } + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/*! + * @fn extern VOID UNC_COMMON_Get_Platform_Topology() + * + * @brief This function will walk through the platform registers to retrieve information and calculate the bus no. + * Reads appropriate pci_config regs and populates the PLATFORM_TOPOLOGY_PROG_NODE structure with the reg value. + * + * @param U32 dev_node - Device no. + * + * @return None + * + * Special Notes: + * device_num corresponds to Memory controller + * func_num corresponds to Channel number + * reg_offset corresponds to dimm slot + */ +VOID UNC_COMMON_Get_Platform_Topology(U32 dev_node) +{ + U32 num_registers = 0; + U32 bus_num = 0; + U32 i = 0; + U32 func_num = 0; + U32 num_pkgs = num_packages; + U32 device_num = 0; + U32 reg_offset = 0; + U32 len = 0; + U64 reg_value = 0; + U32 device_value = 0; + U64 reg_mask = 0; + U32 vendor_id; + U32 device_id; + U32 valid; + + PLATFORM_TOPOLOGY_REG topology_regs = NULL; + + SEP_DRV_LOG_TRACE_IN("Dev_node: %u.", dev_node); + PLATFORM_TOPOLOGY_PROG_topology_device_prog_valid( + &platform_topology_prog_node, dev_node) = 1; + + if (num_packages > MAX_PACKAGES) { + SEP_DRV_LOG_ERROR( + "Num_packages %d > MAX_PACKAGE, getting for only %d packages.", + num_packages, MAX_PACKAGES); + num_pkgs = MAX_PACKAGES; + } + + num_registers = PLATFORM_TOPOLOGY_PROG_topology_device_num_registers( + &platform_topology_prog_node, dev_node); + topology_regs = PLATFORM_TOPOLOGY_PROG_topology_topology_regs( + &platform_topology_prog_node, dev_node); + + for (i = 0; i < num_pkgs; i++) { + for (len = 0; len < num_registers; len++) { + if (PLATFORM_TOPOLOGY_REG_reg_type( + topology_regs, len) == PMU_REG_PROG_MSR) { + reg_value = SYS_Read_MSR( + PLATFORM_TOPOLOGY_REG_reg_id( + topology_regs, len)); + reg_mask = PLATFORM_TOPOLOGY_REG_reg_mask( + topology_regs, len); + PLATFORM_TOPOLOGY_REG_reg_value(topology_regs, + len, i) = + reg_value & reg_mask; + SEP_DRV_LOG_TRACE( + "Read UNCORE_MSR_FREQUENCY 0x%x\n", + PLATFORM_TOPOLOGY_REG_reg_id( + topology_regs, len)); + } else { + if (!IS_BUS_MAP_VALID(dev_node, i)) { + continue; + } + bus_num = GET_BUS_MAP(dev_node, i); + device_num = PLATFORM_TOPOLOGY_REG_device( + topology_regs, len); + func_num = PLATFORM_TOPOLOGY_REG_function( + topology_regs, len); + reg_offset = PLATFORM_TOPOLOGY_REG_reg_id( + topology_regs, len); + device_value = PCI_Read_U32_Valid( + bus_num, device_num, func_num, 0, 0); + CHECK_IF_GENUINE_INTEL_DEVICE(device_value, + vendor_id, + device_id, valid); + SEP_DRV_LOG_TRACE("Uncore device ID = 0x%x.", + device_id); + if (!valid) { + PLATFORM_TOPOLOGY_REG_device_valid( + topology_regs, len) = 0; + } + PLATFORM_TOPOLOGY_REG_reg_value(topology_regs, + len, i) = + PCI_Read_U32_Valid(bus_num, device_num, + func_num, reg_offset, + PCI_INVALID_VALUE); + } + } + if (PLATFORM_TOPOLOGY_PROG_topology_device_scope( + &platform_topology_prog_node, dev_node) == + SYSTEM_EVENT) { + break; + } + } + SEP_DRV_LOG_TRACE_OUT(""); +} + +/************************************************************/ +/* + * UNC common MSR based API + * + ************************************************************/ + +/*! + * @fn VOID UNC_COMMON_MSR_Clean_Up(PVOID) + * + * @brief clear out out programming + * + * @param None + * + * @return None + */ +VOID UNC_COMMON_MSR_Clean_Up(VOID *param) +{ + U32 dev_idx; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + dev_idx = *((U32 *)param); + FOR_EACH_REG_ENTRY_UNC(pecb, dev_idx, i) + { + if (ECB_entries_clean_up_get(pecb, i)) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + } + } + END_FOR_EACH_REG_ENTRY_UNC; + + SEP_DRV_LOG_TRACE_OUT(""); +} diff --git a/drivers/platform/x86/sepdk/sep/unc_gt.c b/drivers/platform/x86/sepdk/sep/unc_gt.c new file mode 100755 index 0000000000000..3d07888dac331 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/unc_gt.c @@ -0,0 +1,468 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" + +#include "inc/ecb_iterators.h" +#include "inc/control.h" +#include "inc/unc_common.h" +#include "inc/utility.h" +#include "inc/pci.h" +#include "inc/unc_gt.h" + +extern U64 *read_counter_info; +extern EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; + +static U64 unc_gt_virtual_address; +static SEP_MMIO_NODE unc_gt_map; +static U32 unc_gt_rc6_reg1; +static U32 unc_gt_rc6_reg2; +static U32 unc_gt_clk_gt_reg1; +static U32 unc_gt_clk_gt_reg2; +static U32 unc_gt_clk_gt_reg3; +static U32 unc_gt_clk_gt_reg4; + +/*! + * @fn static VOID unc_gt_Write_PMU(VOID*) + * + * @brief Initial write of PMU registers + * Walk through the enties and write the value of the register accordingly. + * + * @param device id + * + * @return None + * + * Special Notes: + */ +static VOID unc_gt_Write_PMU(VOID *param) +{ + U32 dev_idx; + ECB pecb; + DRV_PCI_DEVICE_ENTRY_NODE dpden; + U64 device_id; + U32 vendor_id; + U64 bar_lo; + U32 offset_delta; + U32 tmp_value; + U32 this_cpu; + U32 value; + CPU_STATE pcpu; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[0]; + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + + if (!CPU_STATE_system_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!system_master)."); + return; + } + + dpden = ECB_pcidev_entry_node(pecb); + value = PCI_Read_U32(DRV_PCI_DEVICE_ENTRY_bus_no(&dpden), + DRV_PCI_DEVICE_ENTRY_dev_no(&dpden), + DRV_PCI_DEVICE_ENTRY_func_no(&dpden), 0); + vendor_id = DRV_GET_PCI_VENDOR_ID(value); + device_id = DRV_GET_PCI_DEVICE_ID(value); + + if (DRV_IS_INTEL_VENDOR_ID(vendor_id) && + DRV_IS_GT_DEVICE_ID(device_id)) { + SEP_DRV_LOG_TRACE("Found Desktop GT."); + } + + bar_lo = PCI_Read_U32(DRV_PCI_DEVICE_ENTRY_bus_no(&dpden), + DRV_PCI_DEVICE_ENTRY_dev_no(&dpden), + DRV_PCI_DEVICE_ENTRY_func_no(&dpden), + DRV_PCI_DEVICE_ENTRY_bar_offset(&dpden)); + bar_lo &= UNC_GT_BAR_MASK; + + PCI_Map_Memory(&unc_gt_map, bar_lo, GT_MMIO_SIZE); + unc_gt_virtual_address = SEP_MMIO_NODE_virtual_address(&unc_gt_map); + + FOR_EACH_PCI_DATA_REG_RAW(pecb, i, dev_idx) + { + offset_delta = ECB_entries_reg_offset(pecb, i); + // this is needed for overflow detection of the accumulators. + if (LWPMU_DEVICE_counter_mask(&devices[dev_idx]) == 0) { + LWPMU_DEVICE_counter_mask(&devices[dev_idx]) = + (U64)ECB_entries_max_bits(pecb, i); + } + } + END_FOR_EACH_PCI_CCCR_REG_RAW; + + //enable the global control to clear the counter first + SYS_Write_MSR(PERF_GLOBAL_CTRL, ECB_entries_reg_value(pecb, 0)); + FOR_EACH_PCI_CCCR_REG_RAW(pecb, i, dev_idx) + { + offset_delta = ECB_entries_reg_offset(pecb, i); + if (offset_delta == PERF_GLOBAL_CTRL) { + continue; + } + PCI_MMIO_Write_U32(unc_gt_virtual_address, offset_delta, + GT_CLEAR_COUNTERS); + + SEP_DRV_LOG_TRACE("CCCR offset delta is 0x%x W is clear ctrs.", + offset_delta); + } + END_FOR_EACH_PCI_CCCR_REG_RAW; + + //disable the counters + SYS_Write_MSR(PERF_GLOBAL_CTRL, 0LL); + + FOR_EACH_PCI_CCCR_REG_RAW(pecb, i, dev_idx) + { + offset_delta = ECB_entries_reg_offset(pecb, i); + if (offset_delta == PERF_GLOBAL_CTRL) { + continue; + } + PCI_MMIO_Write_U32(unc_gt_virtual_address, offset_delta, + ((U32)ECB_entries_reg_value(pecb, i))); + tmp_value = + PCI_MMIO_Read_U32(unc_gt_virtual_address, offset_delta); + + // remove compiler warning on unused variables + if (tmp_value) { + } + + SEP_DRV_LOG_TRACE( + "CCCR offset delta is 0x%x R is 0x%x W is 0x%llx.", + offset_delta, tmp_value, + ECB_entries_reg_value(pecb, i)); + } + END_FOR_EACH_PCI_CCCR_REG_RAW; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/*! + * @fn static VOID unc_gt_Disable_RC6_Clock_Gating(void) + * + * @brief This snippet of code allows GT events to count by + * disabling settings related to clock gating/power + * @param none + * + * @return None + * + * Special Notes: + */ +static VOID unc_gt_Disable_RC6_Clock_Gating(void) +{ + U32 tmp; + + SEP_DRV_LOG_TRACE_IN(""); + + // Disable RC6 + unc_gt_rc6_reg1 = + PCI_MMIO_Read_U32(unc_gt_virtual_address, UNC_GT_RC6_REG1); + tmp = unc_gt_rc6_reg1 | UNC_GT_RC6_REG1_OR_VALUE; + unc_gt_rc6_reg2 = + PCI_MMIO_Read_U32(unc_gt_virtual_address, UNC_GT_RC6_REG2); + + PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_RC6_REG2, + UNC_GT_RC6_REG2_VALUE); + PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_RC6_REG1, tmp); + + SEP_DRV_LOG_TRACE("Original value of RC6 rc6_1 = 0x%x, rc6_2 = 0x%x.", + unc_gt_rc6_reg1, unc_gt_rc6_reg2); + + // Disable clock gating + // Save + unc_gt_clk_gt_reg1 = + PCI_MMIO_Read_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG1); + unc_gt_clk_gt_reg2 = + PCI_MMIO_Read_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG2); + unc_gt_clk_gt_reg3 = + PCI_MMIO_Read_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG3); + unc_gt_clk_gt_reg4 = + PCI_MMIO_Read_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG4); + + SEP_DRV_LOG_TRACE("Original value of RC6 ck_1 = 0x%x, ck_2 = 0x%x.", + unc_gt_clk_gt_reg1, unc_gt_clk_gt_reg2); + SEP_DRV_LOG_TRACE("Original value of RC6 ck_3 = 0x%x, ck_4 = 0x%x.", + unc_gt_clk_gt_reg3, unc_gt_clk_gt_reg4); + + // Disable + PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG1, + UNC_GT_GCPUNIT_REG1_VALUE); + PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG2, + UNC_GT_GCPUNIT_REG2_VALUE); + PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG3, + UNC_GT_GCPUNIT_REG3_VALUE); + PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG4, + UNC_GT_GCPUNIT_REG4_VALUE); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/*! + * @fn static VOID unc_gt_Restore_RC6_Clock_Gating(void) + * + * @brief This snippet of code restores the system settings + * for clock gating/power + * @param none + * + * @return None + * + * Special Notes: + */ +static VOID unc_gt_Restore_RC6_Clock_Gating(void) +{ + SEP_DRV_LOG_TRACE_IN(""); + + PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_RC6_REG2, + unc_gt_rc6_reg2); + PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_RC6_REG1, + unc_gt_rc6_reg1); + + PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG1, + unc_gt_clk_gt_reg1); + PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG2, + unc_gt_clk_gt_reg2); + PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG3, + unc_gt_clk_gt_reg3); + PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG4, + unc_gt_clk_gt_reg4); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/*! + * @fn static VOID unc_gt_Enable_PMU(PVOID) + * + * @brief Disable the clock gating and Set the global enable + * + * @param device_id + * + * @return None + * + * Special Notes: + */ +static VOID unc_gt_Enable_PMU(PVOID param) +{ + U32 dev_idx; + ECB pecb; + U32 this_cpu; + CPU_STATE pcpu; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[0]; + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + + if (!CPU_STATE_system_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!system_master)."); + return; + } + + unc_gt_Disable_RC6_Clock_Gating(); + + if (pecb && GET_DRIVER_STATE() == DRV_STATE_RUNNING) { + SYS_Write_MSR(PERF_GLOBAL_CTRL, ECB_entries_reg_value(pecb, 0)); + SEP_DRV_LOG_TRACE("Enabling GT Global control = 0x%llx.", + ECB_entries_reg_value(pecb, 0)); + } + + SEP_DRV_LOG_TRACE_OUT(""); +} +/*! + * @fn static VOID unc_gt_Disable_PMU(PVOID) + * + * @brief Unmap the virtual address when sampling/driver stops + * and restore system values for clock gating settings + * + * @param None + * + * @return None + * + * Special Notes: + */ +static VOID unc_gt_Disable_PMU(PVOID param) +{ + U32 this_cpu; + CPU_STATE pcpu; + U32 cur_driver_state; + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + cur_driver_state = GET_DRIVER_STATE(); + + if (!CPU_STATE_system_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!system_master)."); + return; + } + unc_gt_Restore_RC6_Clock_Gating(); + + if (unc_gt_virtual_address && + (cur_driver_state == DRV_STATE_STOPPED || + cur_driver_state == DRV_STATE_PREPARE_STOP || + cur_driver_state == DRV_STATE_TERMINATING)) { + SYS_Write_MSR(PERF_GLOBAL_CTRL, 0LL); + PCI_Unmap_Memory(&unc_gt_map); + unc_gt_virtual_address = 0; + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/*! + * @fn unc_gt_Read_Counts(param, id) + * + * @param param The read thread node to process + * @param id The id refers to the device index + * + * @return None No return needed + * + * @brief Read the Uncore count data and store into the buffer param; + * + */ +static VOID unc_gt_Read_Counts(PVOID param, U32 id) +{ + U64 *data = (U64 *)param; + U32 cur_grp; + ECB pecb; + U32 offset_delta; + U32 tmp_value_lo = 0; + U32 tmp_value_hi = 0; + GT_CTR_NODE gt_ctr_value; + U32 this_cpu; + U32 package_num; + + SEP_DRV_LOG_TRACE_IN("Param: %p, id: %u.", param, id); + + this_cpu = CONTROL_THIS_CPU(); + package_num = core_to_package_map[this_cpu]; + cur_grp = LWPMU_DEVICE_cur_group(&devices[id])[package_num]; + pecb = LWPMU_DEVICE_PMU_register_data(&devices[id])[cur_grp]; + + // Write GroupID + data = (U64 *)((S8 *)data + ECB_group_offset(pecb)); + *data = cur_grp + 1; + GT_CTR_NODE_value_reset(gt_ctr_value); + + //Read in the counts into temporary buffe + FOR_EACH_PCI_DATA_REG_RAW(pecb, i, id) + { + offset_delta = ECB_entries_reg_offset(pecb, i); + tmp_value_lo = + PCI_MMIO_Read_U32(unc_gt_virtual_address, offset_delta); + offset_delta = offset_delta + NEXT_ADDR_OFFSET; + tmp_value_hi = + PCI_MMIO_Read_U32(unc_gt_virtual_address, offset_delta); + data = (U64 *)((S8 *)param + + ECB_entries_counter_event_offset(pecb, i)); + GT_CTR_NODE_low(gt_ctr_value) = tmp_value_lo; + GT_CTR_NODE_high(gt_ctr_value) = tmp_value_hi; + *data = GT_CTR_NODE_value(gt_ctr_value); + SEP_DRV_LOG_TRACE("DATA offset delta is 0x%x R is 0x%llx.", + offset_delta, + GT_CTR_NODE_value(gt_ctr_value)); + } + END_FOR_EACH_PCI_DATA_REG_RAW; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +static VOID unc_gt_Read_PMU_Data(PVOID param) +{ + U32 j; + U64 *buffer = read_counter_info; + U32 dev_idx; + U32 this_cpu; + CPU_STATE pcpu; + U32 offset_delta; + U32 tmp_value_lo = 0; + U32 tmp_value_hi = 0; + GT_CTR_NODE gt_ctr_value; + U32 package_num = 0; + + SEP_DRV_LOG_DEBUG_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + + if (!CPU_STATE_system_master(pcpu)) { + SEP_DRV_LOG_DEBUG_OUT("Early exit (!system_master)."); + return; + } + + package_num = core_to_package_map[this_cpu]; + + FOR_EACH_PCI_DATA_REG_RAW(pecb, i, dev_idx) + { + j = EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET( + package_num, + EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( + emon_buffer_driver_helper), + ECB_entries_uncore_buffer_offset_in_package(pecb, i)); + offset_delta = ECB_entries_reg_offset(pecb, i); + tmp_value_lo = + PCI_MMIO_Read_U32(unc_gt_virtual_address, offset_delta); + offset_delta = offset_delta + NEXT_ADDR_OFFSET; + tmp_value_hi = + PCI_MMIO_Read_U32(unc_gt_virtual_address, offset_delta); + GT_CTR_NODE_low(gt_ctr_value) = tmp_value_lo; + GT_CTR_NODE_high(gt_ctr_value) = tmp_value_hi; + buffer[j] = GT_CTR_NODE_value(gt_ctr_value); + SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u", j, buffer[j], + this_cpu); + } + END_FOR_EACH_PCI_DATA_REG_RAW; + + SEP_DRV_LOG_DEBUG_OUT(""); +} + +/* + * Initialize the dispatch table + */ + +DISPATCH_NODE unc_gt_dispatch = { .init = NULL, + .fini = NULL, + .write = unc_gt_Write_PMU, + .freeze = unc_gt_Disable_PMU, + .restart = unc_gt_Enable_PMU, + .read_data = unc_gt_Read_PMU_Data, + .check_overflow = NULL, + .swap_group = NULL, + .read_lbrs = NULL, + .cleanup = NULL, + .hw_errata = NULL, + .read_power = NULL, + .check_overflow_errata = NULL, + .read_counts = unc_gt_Read_Counts, + .check_overflow_gp_errata = NULL, + .read_ro = NULL, + .platform_info = NULL, + .trigger_read = unc_gt_Read_Counts, + .scan_for_uncore = NULL, + .read_metrics = NULL }; diff --git a/drivers/platform/x86/sepdk/sep/unc_mmio.c b/drivers/platform/x86/sepdk/sep/unc_mmio.c new file mode 100755 index 0000000000000..148925dea2637 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/unc_mmio.c @@ -0,0 +1,1073 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#include + +#include "lwpmudrv_defines.h" +#include +#include +#include + +#include "lwpmudrv_types.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" + +#include "lwpmudrv.h" +#include "utility.h" +#include "control.h" +#include "unc_common.h" +#include "ecb_iterators.h" +#include "pebs.h" +#include "inc/pci.h" + +extern U64 *read_counter_info; +extern U64 *prev_counter_data; +extern DRV_CONFIG drv_cfg; +extern EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; + +#define MASK_32BIT 0xffffffff +#define MASK_64BIT 0xffffffff00000000ULL + +#define IS_MASTER(device_type, cpu) \ + (((device_type) == DRV_SINGLE_INSTANCE) ? \ + CPU_STATE_system_master(&pcb[cpu]) : \ + CPU_STATE_socket_master(&pcb[(cpu)])) +#define GET_PACKAGE_NUM(device_type, cpu) \ + (((device_type) == DRV_SINGLE_INSTANCE) ? 0 : core_to_package_map[cpu]) +#define IS_64BIT(mask) (((mask) >> 32) != 0) + +#define EVENT_COUNTER_MAX_TRY 30 + +struct FPGA_CONTROL_NODE_S { + union { + struct { + U64 rst_ctrs : 1; + U64 rsvd1 : 7; + U64 frz : 1; + U64 rsvd2 : 7; + U64 event_select : 4; + U64 port_id : 2; + U64 rsvd3 : 1; + U64 port_enable : 1; + U64 rsvd4 : 40; + } bits; + U64 bit_field; + } u; +}; + +static struct FPGA_CONTROL_NODE_S control_node; + +/*! + * @fn static VOID unc_mmio_Write_PMU(VOID*) + * + * @brief Initial write of PMU registers + * Walk through the enties and write the value of the register accordingly. + * When current_group = 0, then this is the first time this routine is called, + * + * @param None + * + * @return None + * + * Special Notes: + */ +static VOID unc_mmio_Write_PMU(VOID *param) +{ + U32 dev_idx; + U32 offset_delta = 0; + DEV_UNC_CONFIG pcfg_unc; + U32 event_id = 0; + U64 tmp_value = 0; + U32 this_cpu; + U32 package_num = 0; + U32 cur_grp; + ECB pecb; + U64 virtual_addr = 0; + U32 idx_w = 0; + U32 event_code = 0; + U32 counter = 0; + U32 entry = 0; + U32 dev_node = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[dev_idx]); + if (!IS_MASTER(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!is_master)."); + return; + } + + package_num = + GET_PACKAGE_NUM(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu); + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; + pecb = LWPMU_DEVICE_PMU_register_data(&devices[(dev_idx)])[(cur_grp)]; + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + dev_node = ECB_dev_node(pecb); + entry = package_num; + if (!IS_MMIO_MAP_VALID(dev_node, entry)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Early exit (!IS_MMIO_MAP_VALID)."); + return; + } + + virtual_addr = virtual_address_table(dev_node, entry); + + FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_WRITE) + { + PCI_MMIO_Write_U64(virtual_addr, ECB_entries_reg_id(pecb, idx), + ECB_entries_reg_value(pecb, idx)); + } + END_FOR_EACH_REG_UNC_OPERATION; + + if (DRV_CONFIG_emon_mode(drv_cfg)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!event_based_counts)."); + return; + } + + idx_w = ECB_operations_register_start(pecb, PMU_OPERATION_WRITE); + FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_READ) + { + if (ECB_entries_reg_offset(pecb, idx) > + DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( + &ECB_pcidev_entry_node(pecb))) { + offset_delta = + ECB_entries_reg_offset(pecb, idx) - + DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( + &ECB_pcidev_entry_node(pecb)); + } else { + offset_delta = ECB_entries_reg_offset(pecb, idx); + } + + if ((DEV_UNC_CONFIG_device_type(pcfg_unc) == + DRV_SINGLE_INSTANCE) && + (GET_NUM_MAP_ENTRIES(dev_node) > 1)) { + // multiple MMIO mapping per device, find virtual_addr per mapping. + entry = ECB_entries_unit_id(pecb, idx); + virtual_addr = virtual_address_table(dev_node, entry); + } + + if ((ECB_entries_counter_type(pecb, idx) == + PROG_FREERUN_COUNTER) && + (ECB_entries_unit_id(pecb, idx) == 0)) { + //Write event code before reading + PCI_MMIO_Write_U64(virtual_addr, + ECB_entries_reg_id(pecb, idx_w), + ECB_entries_reg_value(pecb, idx_w)); + event_code = (U32)control_node.u.bits.event_select; + idx_w++; + } + + // this is needed for overflow detection of the accumulators. + if (IS_64BIT((U64)(ECB_entries_max_bits(pecb, idx)))) { + if (ECB_entries_counter_type(pecb, idx) == + PROG_FREERUN_COUNTER) { + do { + if (counter > EVENT_COUNTER_MAX_TRY) { + break; + } + tmp_value = SYS_MMIO_Read64( + virtual_addr, offset_delta); + counter++; + } while (event_code != (tmp_value >> 60)); + } + tmp_value = SYS_MMIO_Read64(virtual_addr, offset_delta); + } else { + tmp_value = SYS_MMIO_Read32(virtual_addr, offset_delta); + } + tmp_value &= (U64)ECB_entries_max_bits(pecb, idx); + + LWPMU_DEVICE_prev_value( + &devices[dev_idx])[package_num][event_id] = tmp_value; + SEP_DRV_LOG_TRACE( + "unc_mmio_Write_PMU: cpu[%d], device[%d], package[%d], entry %d, event_id %d, value %llu\n", + this_cpu, dev_idx, package_num, entry, event_id, + tmp_value); + event_id++; + + if (LWPMU_DEVICE_counter_mask(&devices[dev_idx]) == 0) { + LWPMU_DEVICE_counter_mask(&devices[dev_idx]) = + (U64)ECB_entries_max_bits(pecb, idx); + } + } + END_FOR_EACH_REG_UNC_OPERATION; + SEP_DRV_LOG_TRACE( + "BAR address is 0x%llx and virt is 0x%llx.", + DRV_PCI_DEVICE_ENTRY_bar_address(&ECB_pcidev_entry_node(pecb)), + virtual_addr); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/*! + * @fn static VOID unc_mmio_Enable_PMU(PVOID) + * + * @brief Capture the previous values to calculate delta later. + * + * @param None + * + * @return None + * + * Special Notes: + */ +static void unc_mmio_Enable_PMU(PVOID param) +{ + U32 j; + U64 *buffer = prev_counter_data; + U32 this_cpu; + U32 dev_idx; + DEV_UNC_CONFIG pcfg_unc; + U32 package_num; + U32 offset_delta; + U32 cur_grp; + ECB pecb; + U64 virtual_addr = 0; + U64 reg_val = 0; + U32 idx_w = 0; + U32 event_code = 0; + U32 counter = 0; + U32 entry = 0; + U32 dev_node = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[dev_idx]); + if (!IS_MASTER(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MASTER)."); + return; + } + + package_num = + GET_PACKAGE_NUM(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu); + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; + pecb = LWPMU_DEVICE_PMU_register_data(&devices[(dev_idx)])[(cur_grp)]; + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + dev_node = ECB_dev_node(pecb); + entry = package_num; + if (!IS_MMIO_MAP_VALID(dev_node, entry)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Early exit (!IS_MMIO_MAP_VALID)."); + return; + } + + virtual_addr = virtual_address_table(dev_node, entry); + + // NOTE THAT the enable function currently captures previous values + // for EMON collection to avoid unnecessary memory copy. + if (DRV_CONFIG_emon_mode(drv_cfg)) { + idx_w = ECB_operations_register_start(pecb, + PMU_OPERATION_WRITE); + FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, + PMU_OPERATION_READ) + { + if (ECB_entries_reg_offset(pecb, idx) > + DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( + &ECB_pcidev_entry_node(pecb))) { + offset_delta = + ECB_entries_reg_offset(pecb, idx) - + DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( + &ECB_pcidev_entry_node(pecb)); + } else { + offset_delta = + ECB_entries_reg_offset(pecb, idx); + } + + if ((DEV_UNC_CONFIG_device_type(pcfg_unc) == + DRV_SINGLE_INSTANCE) && + (GET_NUM_MAP_ENTRIES(dev_node) > 1)) { + // multiple MMIO mapping per device, find virtual_addr per mapping. + entry = ECB_entries_unit_id(pecb, idx); + virtual_addr = + virtual_address_table(dev_node, entry); + } + + if ((ECB_entries_counter_type(pecb, idx) == + PROG_FREERUN_COUNTER) && + (ECB_entries_unit_id(pecb, idx) == 0)) { + PCI_MMIO_Write_U64( + virtual_addr, + ECB_entries_reg_id(pecb, idx_w), + ECB_entries_reg_value(pecb, idx_w)); + control_node.u.bit_field = + ECB_entries_reg_value(pecb, idx_w); + event_code = + (U32)control_node.u.bits.event_select; + idx_w++; + } + + if ((ECB_entries_event_scope(pecb, idx) == + PACKAGE_EVENT) || + (ECB_entries_event_scope(pecb, idx) == + SYSTEM_EVENT)) { + if (ECB_entries_event_scope(pecb, idx) == + SYSTEM_EVENT) { + j = ECB_entries_uncore_buffer_offset_in_system( + pecb, idx); + } else { + j = EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET( + package_num, + EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( + emon_buffer_driver_helper), + ECB_entries_uncore_buffer_offset_in_package( + pecb, idx)); + } + + if (IS_64BIT((U64)( + ECB_entries_max_bits(pecb, idx)))) { + if (ECB_entries_counter_type(pecb, + idx) == + PROG_FREERUN_COUNTER) { + do { + if (counter > + EVENT_COUNTER_MAX_TRY) { + break; + } + buffer[j] = SYS_MMIO_Read64( + virtual_addr, + offset_delta); + counter++; + } while (event_code != + (buffer[j] >> 60)); + } + buffer[j] = SYS_MMIO_Read64( + virtual_addr, offset_delta); + } else { + buffer[j] = SYS_MMIO_Read32( + virtual_addr, offset_delta); + } + buffer[j] &= + (U64)ECB_entries_max_bits(pecb, idx); + SEP_DRV_LOG_TRACE( + "j=%u, value=%llu, cpu=%u, MSR=0x%x", j, + buffer[j], this_cpu, + ECB_entries_reg_id(pecb, idx)); + } + } + END_FOR_EACH_REG_UNC_OPERATION; + } + virtual_addr = virtual_address_table(dev_node, entry); + FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_ENABLE) + { + if (ECB_entries_reg_rw_type(pecb, idx) == + PMU_REG_RW_READ_WRITE) { + reg_val = PCI_MMIO_Read_U64( + virtual_addr, ECB_entries_reg_id(pecb, idx)); + reg_val &= ECB_entries_reg_value(pecb, idx); + PCI_MMIO_Write_U64(virtual_addr, + ECB_entries_reg_id(pecb, idx), + reg_val); + } + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/*! + * @fn static VOID unc_mmio_Disable_PMU(PVOID) + * + * @brief Unmap the virtual address when you stop sampling. + * + * @param None + * + * @return None + * + * Special Notes: + */ +static void unc_mmio_Disable_PMU(PVOID param) +{ + U32 dev_idx; + U32 this_cpu; + U64 virtual_addr = 0; + U64 reg_val = 0; + DEV_UNC_CONFIG pcfg_unc; + U32 package_num; + U32 dev_node = 0; + U32 cur_grp = 0; + ECB pecb; + U32 entry = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[dev_idx]); + if (!IS_MASTER(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MASTER)."); + return; + } + + package_num = + GET_PACKAGE_NUM(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu); + cur_grp = LWPMU_DEVICE_cur_group(&devices[dev_idx])[package_num]; + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[(cur_grp)]; + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + dev_node = ECB_dev_node(pecb); + entry = package_num; + if (!IS_MMIO_MAP_VALID(dev_node, entry)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Early exit (!IS_MMIO_MAP_VALID)."); + return; + } + + virtual_addr = virtual_address_table(dev_node, entry); + + FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_DISABLE) + { + if (ECB_entries_reg_rw_type(pecb, idx) == + PMU_REG_RW_READ_WRITE) { + reg_val = PCI_MMIO_Read_U64( + virtual_addr, ECB_entries_reg_id(pecb, idx)); + reg_val |= ECB_entries_reg_value(pecb, idx); + PCI_MMIO_Write_U64(virtual_addr, + ECB_entries_reg_id(pecb, idx), + reg_val); + } + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void unc_mmio_Trigger_Read(id) + * + * @param id Device index + * + * @return None No return needed + * + * @brief Read the Uncore data from counters and store into buffer + */ +static VOID unc_mmio_Trigger_Read(PVOID param, U32 id) +{ + U32 this_cpu; + U32 cur_grp; + ECB pecb; + U32 index = 0; + U64 diff = 0; + U32 offset_delta = 0; + U64 value = 0ULL; + U64 *data; + U64 virtual_addr = 0; + DEV_UNC_CONFIG pcfg_unc; + U32 package_num; + U32 idx_w = 0; + U32 event_code = 0; + U32 counter = 0; + U32 entry = 0; + U32 dev_node = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p, id: %u.", param, id); + + this_cpu = CONTROL_THIS_CPU(); + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[id]); + if (!IS_MASTER(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MASTER)."); + return; + } + + package_num = + GET_PACKAGE_NUM(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu); + cur_grp = LWPMU_DEVICE_cur_group(&devices[id])[package_num]; + pecb = LWPMU_DEVICE_PMU_register_data(&devices[id])[(cur_grp)]; + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + dev_node = ECB_dev_node(pecb); + entry = package_num; + if (!IS_MMIO_MAP_VALID(dev_node, entry)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Early exit (!IS_MMIO_MAP_VALID)."); + return; + } + + virtual_addr = virtual_address_table(dev_node, entry); + + // Write GroupID + data = (U64 *)((S8 *)param + ECB_group_offset(pecb)); + *data = cur_grp + 1; + //Read in the counts into temporary buffer + idx_w = ECB_operations_register_start(pecb, PMU_OPERATION_WRITE); + FOR_EACH_REG_UNC_OPERATION(pecb, id, idx, PMU_OPERATION_READ) + { + if (ECB_entries_reg_offset(pecb, idx) > + DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( + &ECB_pcidev_entry_node(pecb))) { + offset_delta = + ECB_entries_reg_offset(pecb, idx) - + DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( + &ECB_pcidev_entry_node(pecb)); + } else { + offset_delta = ECB_entries_reg_offset(pecb, idx); + } + + if ((DEV_UNC_CONFIG_device_type(pcfg_unc) == + DRV_SINGLE_INSTANCE) && + (GET_NUM_MAP_ENTRIES(dev_node) > 1)) { + // multiple MMIO mapping per device + entry = ECB_entries_unit_id(pecb, idx); + virtual_addr = virtual_address_table(dev_node, entry); + } + + if ((ECB_entries_counter_type(pecb, idx) == + PROG_FREERUN_COUNTER) && + (ECB_entries_unit_id(pecb, idx) == 0)) { + PCI_MMIO_Write_U64(virtual_addr, + ECB_entries_reg_id(pecb, idx_w), + ECB_entries_reg_value(pecb, idx_w)); + control_node.u.bit_field = + ECB_entries_reg_value(pecb, idx_w); + event_code = (U32)control_node.u.bits.event_select; + idx_w++; + } + + if (IS_64BIT((U64)(ECB_entries_max_bits(pecb, idx)))) { + if (ECB_entries_counter_type(pecb, idx) == + PROG_FREERUN_COUNTER) { + do { + if (counter > EVENT_COUNTER_MAX_TRY) { + break; + } + value = SYS_MMIO_Read64(virtual_addr, + offset_delta); + counter++; + } while (event_code != (value >> 60)); + } + value = SYS_MMIO_Read64(virtual_addr, offset_delta); + } else { + value = SYS_MMIO_Read32(virtual_addr, offset_delta); + } + value &= (U64)ECB_entries_max_bits(pecb, idx); + + data = (U64 *)((S8 *)param + + ECB_entries_counter_event_offset(pecb, idx)); + //check for overflow if not a static counter + if (ECB_entries_counter_type(pecb, idx) == STATIC_COUNTER) { + *data = value; + } else { + if (value < LWPMU_DEVICE_prev_value( + &devices[id])[package_num][index]) { + diff = LWPMU_DEVICE_counter_mask(&devices[id]) - + LWPMU_DEVICE_prev_value( + &devices[id])[package_num][index]; + diff += value; + } else { + diff = value - + LWPMU_DEVICE_prev_value( + &devices[id])[package_num][index]; + } + LWPMU_DEVICE_acc_value( + &devices[id])[package_num][cur_grp][index] += + diff; + LWPMU_DEVICE_prev_value( + &devices[id])[package_num][index] = value; + *data = LWPMU_DEVICE_acc_value( + &devices[id])[package_num][cur_grp][index]; + } + index++; + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn unc_mmio_Read_PMU_Data(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Read all the data MSR's into a buffer. Called by the interrupt handler. + * + */ +static VOID unc_mmio_Read_PMU_Data(PVOID param) +{ + U32 j; + U64 *buffer = read_counter_info; + U64 *prev_buffer = prev_counter_data; + U32 this_cpu; + U32 dev_idx; + DEV_UNC_CONFIG pcfg_unc; + U32 offset_delta; + U32 cur_grp; + ECB pecb; + U64 tmp_value = 0ULL; + U64 virtual_addr = 0; + U32 idx_w = 0; + U32 event_code = 0; + U32 counter = 0; + U32 package_num; + U32 entry = 0; + U32 dev_node = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[dev_idx]); + if (!IS_MASTER(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MASTER)."); + return; + } + + package_num = + GET_PACKAGE_NUM(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu); + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; + pecb = LWPMU_DEVICE_PMU_register_data(&devices[(dev_idx)])[(cur_grp)]; + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + dev_node = ECB_dev_node(pecb); + entry = package_num; + if (!IS_MMIO_MAP_VALID(dev_node, entry)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Early exit (!IS_MMIO_MAP_VALID)."); + return; + } + + virtual_addr = virtual_address_table(dev_node, entry); + + idx_w = ECB_operations_register_start(pecb, PMU_OPERATION_WRITE); + + FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_READ) + { + if (ECB_entries_reg_offset(pecb, idx) > + DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( + &ECB_pcidev_entry_node(pecb))) { + offset_delta = + ECB_entries_reg_offset(pecb, idx) - + DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( + &ECB_pcidev_entry_node(pecb)); + } else { + offset_delta = ECB_entries_reg_offset(pecb, idx); + } + + if ((DEV_UNC_CONFIG_device_type(pcfg_unc) == + DRV_SINGLE_INSTANCE) && + (GET_NUM_MAP_ENTRIES(dev_node) > 1)) { + // multiple MMIO mapping per device, find virtual_addr per mapping. + entry = ECB_entries_unit_id(pecb, idx); + virtual_addr = virtual_address_table(dev_node, entry); + } + + if ((ECB_entries_counter_type(pecb, idx) == + PROG_FREERUN_COUNTER) && + (ECB_entries_unit_id(pecb, idx) == 0)) { + PCI_MMIO_Write_U64(virtual_addr, + ECB_entries_reg_id(pecb, idx_w), + ECB_entries_reg_value(pecb, idx_w)); + control_node.u.bit_field = + ECB_entries_reg_value(pecb, idx_w); + event_code = (U32)control_node.u.bits.event_select; + idx_w++; + } + + if ((ECB_entries_event_scope(pecb, idx) == PACKAGE_EVENT) || + (ECB_entries_event_scope(pecb, idx) == SYSTEM_EVENT)) { + if (ECB_entries_event_scope(pecb, idx) == + SYSTEM_EVENT) { + j = ECB_entries_uncore_buffer_offset_in_system( + pecb, idx); + } else { + j = EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET( + package_num, + EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( + emon_buffer_driver_helper), + ECB_entries_uncore_buffer_offset_in_package( + pecb, idx)); + } + + if (IS_64BIT((U64)(ECB_entries_max_bits(pecb, idx)))) { + if (ECB_entries_counter_type(pecb, idx) == + PROG_FREERUN_COUNTER) { + do { + if (counter > + EVENT_COUNTER_MAX_TRY) { + break; + } + tmp_value = SYS_MMIO_Read64( + virtual_addr, + offset_delta); + counter++; + } while (event_code != + (tmp_value >> 60)); + } + tmp_value = SYS_MMIO_Read64(virtual_addr, + offset_delta); + } else { + tmp_value = SYS_MMIO_Read32(virtual_addr, + offset_delta); + } + tmp_value &= (U64)ECB_entries_max_bits(pecb, idx); + if (ECB_entries_counter_type(pecb, idx) == + STATIC_COUNTER) { + buffer[j] = tmp_value; + } else { + if (tmp_value >= prev_buffer[j]) { + buffer[j] = tmp_value - prev_buffer[j]; + } else { + buffer[j] = tmp_value + + (ECB_entries_max_bits(pecb, + idx) - + prev_buffer[j]); + } + } + SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u, MSR=0x%x", + j, buffer[j], this_cpu, + ECB_entries_reg_id(pecb, idx)); + } + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn unc_mmio_Initialize(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Do the mapping of the physical address (to do the invalidates in the TLB) + * NOTE: this should never be done with SMP call + * + */ +static VOID unc_mmio_Initialize(PVOID param) +{ + DRV_PCI_DEVICE_ENTRY_NODE dpden; + + U64 bar; + + U64 physical_address; + U32 dev_idx = 0; + U32 cur_grp = 0; + ECB pecb = NULL; + U32 dev_node; + U32 i = 0; + U32 page_len = 4096; // 4K + + U32 use_default_busno = 0; + U32 entries = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[0]; + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + dev_node = ECB_dev_node(pecb); + + if (IS_MMIO_MAP_VALID(dev_node, 0)) { + SEP_DRV_LOG_INIT_TRACE_OUT( + "Early exit (device[%d] node %d already mapped).", + dev_idx, dev_node); + return; + } + + dpden = ECB_pcidev_entry_node(pecb); + + // use busno found from topology scan if available + // otherwise use the default one + entries = GET_NUM_MAP_ENTRIES(dev_node); + if (entries == 0) { + use_default_busno = 1; + entries = 1; // this could the client, does not through the scan + UNC_PCIDEV_num_entries(&(unc_pcidev_map[dev_node])) = 1; + UNC_PCIDEV_max_entries(&(unc_pcidev_map[dev_node])) = 1; + } + if (!UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node]))) { + // it is better to allocate space in the beginning + UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node])) = + CONTROL_Allocate_Memory(entries * + sizeof(SEP_MMIO_NODE)); + if (UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node])) == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Early exit (No Memory)."); + return; + } + memset(UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node])), 0, + entries * sizeof(U64)); + } + for (i = 0; i < entries; i++) { + if (!use_default_busno) { + if (IS_BUS_MAP_VALID(dev_node, i)) { + DRV_PCI_DEVICE_ENTRY_bus_no(&dpden) = + UNC_PCIDEV_busno_entry( + &(unc_pcidev_map[dev_node]), i); + } + } + + bar = PCI_Read_U64(DRV_PCI_DEVICE_ENTRY_bus_no(&dpden), + DRV_PCI_DEVICE_ENTRY_dev_no(&dpden), + DRV_PCI_DEVICE_ENTRY_func_no(&dpden), + DRV_PCI_DEVICE_ENTRY_bar_offset(&dpden)); + + bar &= DRV_PCI_DEVICE_ENTRY_bar_mask(&dpden); + + DRV_PCI_DEVICE_ENTRY_bar_address(&ECB_pcidev_entry_node(pecb)) = + bar; + physical_address = DRV_PCI_DEVICE_ENTRY_bar_address( + &ECB_pcidev_entry_node(pecb)) + + DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( + &ECB_pcidev_entry_node(pecb)); + + PCI_Map_Memory(&UNC_PCIDEV_mmio_map_entry( + &(unc_pcidev_map[dev_node]), i), + physical_address, page_len); + } + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn unc_mmio_fpga_Initialize(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Do the mapping of the physical address (to do the invalidates in the TLB) + * NOTE: this should never be done with SMP call + * + */ +static VOID unc_mmio_fpga_Initialize(PVOID param) +{ +#if defined(DRV_EM64T) + U64 phys_addr; + SEP_MMIO_NODE tmp_map = { 0 }; + U64 virt_addr; + U64 dfh; + U32 id; + U32 offset = 0; + S32 next_offset = -1; + U32 dev_idx; + U32 cur_grp; + ECB pecb; + U32 bus_list[2] = { 0x5e, 0xbe }; + U32 busno; + U32 page_len = 4096; + U32 package_num = 0; + U32 dev_node = 0; + U32 entries = 0; + DRV_PCI_DEVICE_ENTRY_NODE dpden; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[0]; + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + dev_node = ECB_dev_node(pecb); + + entries = GET_NUM_MAP_ENTRIES(dev_node); + if (entries == 0) { + entries = num_packages; + } + + if (!UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node]))) { + // it is better to allocate space in the beginning + UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node])) = + CONTROL_Allocate_Memory(entries * + sizeof(SEP_MMIO_NODE)); + if (UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node])) == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Early exit (No Memory)."); + return; + } + memset(UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node])), 0, + (entries * sizeof(SEP_MMIO_NODE))); + UNC_PCIDEV_num_entries(&(unc_pcidev_map[dev_node])) = 0; + UNC_PCIDEV_max_entries(&(unc_pcidev_map[dev_node])) = entries; + } else { + if (virtual_address_table(dev_node, 0) != 0) { + SEP_DRV_LOG_INIT_TRACE_OUT( + "Early exit (device[%d] node %d already mapped).", + dev_idx, dev_node); + return; + } + } + + dpden = ECB_pcidev_entry_node(pecb); + + for (package_num = 0; package_num < num_packages; package_num++) { + if (package_num < 2) { + busno = bus_list[package_num]; + } else { + busno = 0; + } + phys_addr = + PCI_Read_U64(busno, DRV_PCI_DEVICE_ENTRY_dev_no(&dpden), + DRV_PCI_DEVICE_ENTRY_func_no(&dpden), + DRV_PCI_DEVICE_ENTRY_bar_offset(&dpden)); + phys_addr &= DRV_PCI_DEVICE_ENTRY_bar_mask(&dpden); + if (package_num == 0) { + PCI_Map_Memory(&tmp_map, phys_addr, 8 * page_len); + virt_addr = SEP_MMIO_NODE_virtual_address(&tmp_map); + while (next_offset != 0) { + dfh = SYS_MMIO_Read64((U64)virt_addr, offset); + next_offset = (U32)((dfh >> 16) & 0xffffff); + id = (U32)(dfh & 0xfff); + if (offset && + (id == + DRV_PCI_DEVICE_ENTRY_feature_id(&dpden))) { + break; + } + offset += next_offset; + } + PCI_Unmap_Memory(&tmp_map); + } + phys_addr += offset; + PCI_Map_Memory( + &UNC_PCIDEV_mmio_map_entry(&(unc_pcidev_map[dev_node]), + package_num), + phys_addr, 8 * page_len); + UNC_PCIDEV_num_entries(&(unc_pcidev_map[dev_node]))++; + } + + SEP_DRV_LOG_TRACE_OUT(""); +#endif +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn unc_mmio_Destroy(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Invalidate the entry in TLB of the physical address + * NOTE: this should never be done with SMP call + * + */ +static VOID unc_mmio_Destroy(PVOID param) +{ + U32 dev_idx; + U32 i; + U64 addr = 0; + U32 cur_grp = 0; + U32 dev_node = 0; + U32 entries = 0; + ECB pecb; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[0]; + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + dev_node = ECB_dev_node(pecb); + + if (!UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node]))) { + SEP_DRV_LOG_TRACE_OUT("Early exit (no mapping)."); + return; + } + + entries = GET_NUM_MAP_ENTRIES(dev_node); + + for (i = 0; i < entries; i++) { + addr = virtual_address_table(dev_node, i); + if (addr) { + PCI_Unmap_Memory(&UNC_PCIDEV_mmio_map_entry( + &(unc_pcidev_map[dev_node]), i)); + } + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* + * Initialize the dispatch table + */ +DISPATCH_NODE unc_mmio_dispatch = { .init = unc_mmio_Initialize, + .fini = unc_mmio_Destroy, + .write = unc_mmio_Write_PMU, + .freeze = unc_mmio_Disable_PMU, + .restart = unc_mmio_Enable_PMU, + .read_data = unc_mmio_Read_PMU_Data, + .check_overflow = NULL, + .swap_group = NULL, + .read_lbrs = NULL, + .cleanup = UNC_COMMON_Dummy_Func, + .hw_errata = NULL, + .read_power = NULL, + .check_overflow_errata = NULL, + .read_counts = NULL, + .check_overflow_gp_errata = NULL, + .read_ro = NULL, + .platform_info = NULL, + .trigger_read = unc_mmio_Trigger_Read, + .scan_for_uncore = NULL, + .read_metrics = NULL }; + +DISPATCH_NODE unc_mmio_fpga_dispatch = { .init = unc_mmio_fpga_Initialize, + .fini = unc_mmio_Destroy, + .write = unc_mmio_Write_PMU, + .freeze = unc_mmio_Disable_PMU, + .restart = unc_mmio_Enable_PMU, + .read_data = unc_mmio_Read_PMU_Data, + .check_overflow = NULL, + .swap_group = NULL, + .read_lbrs = NULL, + .cleanup = UNC_COMMON_Dummy_Func, + .hw_errata = NULL, + .read_power = NULL, + .check_overflow_errata = NULL, + .read_counts = NULL, + .check_overflow_gp_errata = NULL, + .read_ro = NULL, + .platform_info = NULL, + .trigger_read = unc_mmio_Trigger_Read, + .scan_for_uncore = NULL, + .read_metrics = NULL }; diff --git a/drivers/platform/x86/sepdk/sep/unc_msr.c b/drivers/platform/x86/sepdk/sep/unc_msr.c new file mode 100755 index 0000000000000..ce144203dc39a --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/unc_msr.c @@ -0,0 +1,347 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" + +#include "inc/ecb_iterators.h" +#include "inc/control.h" +#include "inc/unc_common.h" +#include "inc/utility.h" + +extern U64 *read_counter_info; +extern EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; +extern DRV_CONFIG drv_cfg; + +/*! + * @fn static VOID UNC_COMMON_MSR_Write_PMU(VOID*) + * + * @brief Initial write of PMU registers + * Walk through the enties and write the value of the register accordingly. + * When current_group = 0, then this is the first time this routine is called, + * + * @param None + * + * @return None + * + * Special Notes: + */ +static VOID UNC_MSR_Write_PMU(PVOID param) +{ + U32 dev_idx; + U32 this_cpu; + CPU_STATE pcpu; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + + if (!CPU_STATE_socket_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!CPU_STATE_socket_master)."); + return; + } + + FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_WRITE) + { + SYS_Write_MSR(ECB_entries_reg_id(pecb, idx), + ECB_entries_reg_value(pecb, idx)); + } + END_FOR_EACH_REG_UNC_OPERATION; + + FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_READ) + { + SYS_Write_MSR(ECB_entries_reg_id(pecb, idx), 0ULL); + if (LWPMU_DEVICE_counter_mask(&devices[dev_idx]) == 0) { + LWPMU_DEVICE_counter_mask(&devices[dev_idx]) = + (U64)ECB_entries_max_bits(pecb, idx); + } + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/*! + * @fn static VOID UNC_MSR_Enable_PMU(PVOID) + * + * @brief Set the enable bit for all the evsel registers + * + * @param None + * + * @return None + * + * Special Notes: + */ +static VOID UNC_MSR_Enable_PMU(PVOID param) +{ + U32 dev_idx; + U32 this_cpu; + CPU_STATE pcpu; + U64 reg_val = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + + if (!CPU_STATE_socket_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!CPU_STATE_socket_master)."); + return; + } + + FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_ENABLE) + { + reg_val = ECB_entries_reg_value(pecb, idx); + if (ECB_entries_reg_rw_type(pecb, idx) == + PMU_REG_RW_READ_WRITE) { + reg_val = SYS_Read_MSR(ECB_entries_reg_id(pecb, idx)); + if (ECB_entries_reg_type(pecb, idx) == + PMU_REG_UNIT_CTRL) { + reg_val &= ECB_entries_reg_value(pecb, idx); + } else { + reg_val |= ECB_entries_reg_value(pecb, idx); + } + } + SYS_Write_MSR(ECB_entries_reg_id(pecb, idx), reg_val); + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/*! + * @fn static VOID UNC_MSR_Disable_PMU(PVOID) + * + * @brief Set the enable bit for all the evsel registers + * + * @param None + * + * @return None + * + * Special Notes: + */ +static VOID UNC_MSR_Disable_PMU(PVOID param) +{ + U32 dev_idx; + U32 this_cpu; + CPU_STATE pcpu; + U64 reg_val = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + + if (!CPU_STATE_socket_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!CPU_STATE_socket_master)."); + return; + } + + FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_DISABLE) + { + reg_val = ECB_entries_reg_value(pecb, idx); + if (ECB_entries_reg_rw_type(pecb, idx) == + PMU_REG_RW_READ_WRITE) { + reg_val = SYS_Read_MSR(ECB_entries_reg_id(pecb, idx)); + if (ECB_entries_reg_type(pecb, idx) == + PMU_REG_UNIT_CTRL) { + reg_val |= ECB_entries_reg_value(pecb, idx); + } else { + reg_val &= ECB_entries_reg_value(pecb, idx); + } + } + SYS_Write_MSR(ECB_entries_reg_id(pecb, idx), reg_val); + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/*! + * @fn static VOID UNC_MSR_Read_PMU_Data(param) + * + * @param param The read thread node to process + * @param id The id refers to the device index + * + * @return None No return needed + * + * @brief Read the Uncore count data and store into the buffer + * Let us say we have 2 core events in a dual socket JKTN; + * The start_index will be at 32 as it will 2 events in 16 CPU per socket + * The position for first event of QPI will be computed based on its event + * + */ +static VOID UNC_MSR_Read_PMU_Data(PVOID param) +{ + U32 j = 0; + U32 dev_idx; + U32 this_cpu; + U32 package_num = 0; + U64 *buffer; + CPU_STATE pcpu; + U32 cur_grp; + ECB pecb; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + buffer = read_counter_info; + pcpu = &pcb[this_cpu]; + package_num = core_to_package_map[this_cpu]; + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; + pecb = LWPMU_DEVICE_PMU_register_data(&devices[(dev_idx)])[cur_grp]; + + // NOTE THAT the read_pmu function on for EMON collection. + if (!DRV_CONFIG_emon_mode(drv_cfg)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!emon_mode)."); + return; + } + if (!CPU_STATE_socket_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!CPU_STATE_socket_master)."); + return; + } + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + //Read in the counts into temporary buffer + FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_READ) + { + if (ECB_entries_event_scope(pecb, idx) == SYSTEM_EVENT) { + j = ECB_entries_uncore_buffer_offset_in_system(pecb, + idx); + } else { + j = EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET( + package_num, + EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( + emon_buffer_driver_helper), + ECB_entries_uncore_buffer_offset_in_package( + pecb, idx)); + } + + buffer[j] = SYS_Read_MSR(ECB_entries_reg_id(pecb, idx)); + SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u, event_id=%u", j, + buffer[j], this_cpu, + ECB_entries_core_event_id(pecb, idx)); + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static VOID UNC_MSR_Trigger_Read(id) + * + * @param id Device index + * + * @return None No return needed + * + * @brief Read the Uncore data from counters and store into buffer + */ +static VOID UNC_MSR_Trigger_Read(PVOID param, U32 id) +{ + U32 this_cpu; + U32 package_num; + U32 cur_grp; + ECB pecb; + U32 index = 0; + U64 diff = 0; + U64 value; + U64 *data; + + SEP_DRV_LOG_TRACE_IN("Param: %p, id: %u.", param, id); + + this_cpu = CONTROL_THIS_CPU(); + package_num = core_to_package_map[this_cpu]; + cur_grp = LWPMU_DEVICE_cur_group(&devices[id])[package_num]; + pecb = LWPMU_DEVICE_PMU_register_data(&devices[id])[cur_grp]; + + // Write GroupID + data = (U64 *)((S8 *)param + ECB_group_offset(pecb)); + *data = cur_grp + 1; + //Read in the counts into uncore buffer + FOR_EACH_REG_UNC_OPERATION(pecb, id, idx, PMU_OPERATION_READ) + { + value = SYS_Read_MSR(ECB_entries_reg_id(pecb, idx)); + //check for overflow + if (value < + LWPMU_DEVICE_prev_value(&devices[id])[package_num][index]) { + diff = LWPMU_DEVICE_counter_mask(&devices[id]) - + LWPMU_DEVICE_prev_value( + &devices[id])[package_num][index]; + diff += value; + } else { + diff = value - + LWPMU_DEVICE_prev_value( + &devices[id])[package_num][index]; + } + LWPMU_DEVICE_acc_value( + &devices[id])[package_num][cur_grp][index] += diff; + LWPMU_DEVICE_prev_value(&devices[id])[package_num][index] = + value; + data = (U64 *)((S8 *)param + + ECB_entries_counter_event_offset(pecb, idx)); + *data = LWPMU_DEVICE_acc_value( + &devices[id])[package_num][cur_grp][index]; + index++; + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* + * Initialize the dispatch table + */ + +DISPATCH_NODE unc_msr_dispatch = { .init = NULL, + .fini = NULL, + .write = UNC_MSR_Write_PMU, + .freeze = UNC_MSR_Disable_PMU, + .restart = UNC_MSR_Enable_PMU, + .read_data = UNC_MSR_Read_PMU_Data, + .check_overflow = NULL, + .swap_group = NULL, + .read_lbrs = NULL, + .cleanup = UNC_COMMON_MSR_Clean_Up, + .hw_errata = NULL, + .read_power = NULL, + .check_overflow_errata = NULL, + .read_counts = NULL, + .check_overflow_gp_errata = NULL, + .read_ro = NULL, + .platform_info = NULL, + .trigger_read = UNC_MSR_Trigger_Read, + .scan_for_uncore = NULL, + .read_metrics = NULL }; diff --git a/drivers/platform/x86/sepdk/sep/unc_pci.c b/drivers/platform/x86/sepdk/sep/unc_pci.c new file mode 100755 index 0000000000000..e338556f8b34d --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/unc_pci.c @@ -0,0 +1,491 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" + +#include "inc/ecb_iterators.h" +#include "inc/control.h" +#include "inc/unc_common.h" +#include "inc/utility.h" +#include "inc/pci.h" + +extern U64 *read_counter_info; +extern UNCORE_TOPOLOGY_INFO_NODE uncore_topology; +extern EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; +extern DRV_CONFIG drv_cfg; + +/*! + * @fn static VOID unc_pci_Write_PMU(VOID*) + * + * @brief Initial write of PMU registers + * Walk through the enties and write the value of the register accordingly. + * When current_group = 0, then this is the first time this routine is called, + * + * @param None + * + * @return None + * + * Special Notes: + */ +static VOID unc_pci_Write_PMU(PVOID param) +{ + U32 device_id; + U32 dev_idx; + U32 value; + U32 vendor_id; + U32 this_cpu; + CPU_STATE pcpu; + U32 package_num = 0; + U32 dev_node = 0; + U32 cur_grp; + ECB pecb; + U32 busno; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + package_num = core_to_package_map[this_cpu]; + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; + pecb = LWPMU_DEVICE_PMU_register_data(&devices[(dev_idx)])[cur_grp]; + + if (!CPU_STATE_socket_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!CPU_STATE_socket_master)."); + return; + } + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + // first, figure out which package maps to which bus + dev_node = ECB_dev_node(pecb); + if (!IS_BUS_MAP_VALID(dev_node, package_num)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("No UNC_PCIDEV bus map for %u!", + dev_node); + return; + } + + busno = GET_BUS_MAP(dev_node, package_num); + + LWPMU_DEVICE_pci_dev_node_index(&devices[dev_idx]) = dev_node; + + FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_WRITE) + { + if (ECB_entries_reg_type(pecb, idx) == PMU_REG_GLOBAL_CTRL) { + //Check if we need to zero this MSR out + SYS_Write_MSR(ECB_entries_reg_id(pecb, idx), 0LL); + continue; + } + + // otherwise, we have a valid entry + // now we just need to find the corresponding bus # + ECB_entries_bus_no(pecb, idx) = busno; + value = PCI_Read_U32(busno, ECB_entries_dev_no(pecb, idx), + ECB_entries_func_no(pecb, idx), 0); + + CONTINUE_IF_NOT_GENUINE_INTEL_DEVICE(value, vendor_id, + device_id); + SEP_DRV_LOG_TRACE("Uncore device ID = 0x%x.", + device_id); + + if (ECB_entries_reg_type(pecb, idx) == PMU_REG_UNIT_CTRL) { + // busno can not be stored in ECB because different sockets have different bus no. + PCI_Write_U32(busno, ECB_entries_dev_no(pecb, idx), + ECB_entries_func_no(pecb, idx), + ECB_entries_reg_id(pecb, idx), + (U32)ECB_entries_reg_value(pecb, idx)); + continue; + } + + // now program at the corresponding offset + PCI_Write_U32(busno, ECB_entries_dev_no(pecb, idx), + ECB_entries_func_no(pecb, idx), + ECB_entries_reg_id(pecb, idx), + (U32)ECB_entries_reg_value(pecb, idx)); + + if ((ECB_entries_reg_value(pecb, idx) >> NEXT_ADDR_SHIFT) != + 0) { + PCI_Write_U32(busno, ECB_entries_dev_no(pecb, idx), + ECB_entries_func_no(pecb, idx), + ECB_entries_reg_id(pecb, idx) + + NEXT_ADDR_OFFSET, + (U32)(ECB_entries_reg_value(pecb, idx) >> + NEXT_ADDR_SHIFT)); + } + } + END_FOR_EACH_REG_UNC_OPERATION; + + FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_READ) + { + PCI_Write_U64(busno, ECB_entries_dev_no(pecb, idx), + ECB_entries_func_no(pecb, idx), + ECB_entries_reg_id(pecb, idx), 0); + + // this is needed for overflow detection of the accumulators. + if (LWPMU_DEVICE_counter_mask(&devices[dev_idx]) == 0) { + LWPMU_DEVICE_counter_mask(&devices[dev_idx]) = + (U64)ECB_entries_max_bits(pecb, idx); + } + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/*! + * @fn static VOID unc_pci_Enable_PMU(PVOID) + * + * @brief Set the enable bit for all the EVSEL registers + * + * @param Device Index of this PMU unit + * + * @return None + * + * Special Notes: + */ +static VOID unc_pci_Enable_PMU(PVOID param) +{ + U32 dev_idx; + U32 this_cpu; + CPU_STATE pcpu; + U32 package_num = 0; + U32 dev_node; + U32 reg_val = 0; + U32 busno; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_node = LWPMU_DEVICE_pci_dev_node_index(&devices[dev_idx]); + + if (!CPU_STATE_socket_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!CPU_STATE_socket_master)."); + return; + } + + package_num = core_to_package_map[this_cpu]; + + if (!IS_BUS_MAP_VALID(dev_node, package_num)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("No UNC_PCIDEV bus map for %u!", + dev_node); + return; + } + + busno = GET_BUS_MAP(dev_node, package_num); + + FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_ENABLE) + { + if (ECB_entries_reg_type(pecb, idx) == PMU_REG_GLOBAL_CTRL) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, idx), + ECB_entries_reg_value(pecb, idx)); + continue; + } + reg_val = (U32)ECB_entries_reg_value(pecb, idx); + if (ECB_entries_reg_rw_type(pecb, idx) == + PMU_REG_RW_READ_WRITE) { + reg_val = PCI_Read_U32(busno, + ECB_entries_dev_no(pecb, idx), + ECB_entries_func_no(pecb, idx), + ECB_entries_reg_id(pecb, idx)); + reg_val &= ECB_entries_reg_value(pecb, idx); + } + PCI_Write_U32(busno, ECB_entries_dev_no(pecb, idx), + ECB_entries_func_no(pecb, idx), + ECB_entries_reg_id(pecb, idx), reg_val); + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/*! + * @fn static VOID unc_pci_Disable_PMU(PVOID) + * + * @brief Disable the per unit global control to stop the PMU counters. + * + * @param Device Index of this PMU unit + * @control_msr Control MSR address + * @enable_val If counter freeze bit does not work, counter enable bit should be cleared + * @disable_val Disable collection + * + * @return None + * + * Special Notes: + */ +static VOID unc_pci_Disable_PMU(PVOID param) +{ + U32 dev_idx; + U32 this_cpu; + CPU_STATE pcpu; + U32 package_num = 0; + U32 dev_node; + U32 reg_val = 0; + U32 busno; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_node = LWPMU_DEVICE_pci_dev_node_index(&devices[dev_idx]); + + if (!CPU_STATE_socket_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!CPU_STATE_socket_master)."); + return; + } + + package_num = core_to_package_map[this_cpu]; + + if (!IS_BUS_MAP_VALID(dev_node, package_num)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("No UNC_PCIDEV bus map for %u!", + dev_node); + return; + } + + busno = GET_BUS_MAP(dev_node, package_num); + + FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_DISABLE) + { + if (ECB_entries_reg_type(pecb, idx) == PMU_REG_GLOBAL_CTRL) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, idx), + ECB_entries_reg_value(pecb, idx)); + continue; + } + reg_val = (U32)ECB_entries_reg_value(pecb, idx); + if (ECB_entries_reg_rw_type(pecb, idx) == + PMU_REG_RW_READ_WRITE) { + reg_val = PCI_Read_U32(busno, + ECB_entries_dev_no(pecb, idx), + ECB_entries_func_no(pecb, idx), + ECB_entries_reg_id(pecb, idx)); + reg_val |= ECB_entries_reg_value(pecb, idx); + } + PCI_Write_U32(busno, ECB_entries_dev_no(pecb, idx), + ECB_entries_func_no(pecb, idx), + ECB_entries_reg_id(pecb, idx), reg_val); + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static VOID unc_pci_Trigger_Read(id) + * + * @param id Device index + * + * @return None No return needed + * + * @brief Read the Uncore data from counters and store into buffer + */ +static VOID unc_pci_Trigger_Read(PVOID param, U32 id) +{ + U32 this_cpu = 0; + U32 package_num = 0; + U32 dev_node = 0; + U32 cur_grp = 0; + ECB pecb = NULL; + U32 index = 0; + U64 value_low = 0; + U64 value_high = 0; + U64 diff = 0; + U64 value; + U64 *data; + U32 busno; + + SEP_DRV_LOG_TRACE_IN("Param: %p, id: %u.", param, id); + + this_cpu = CONTROL_THIS_CPU(); + package_num = core_to_package_map[this_cpu]; + dev_node = LWPMU_DEVICE_pci_dev_node_index(&devices[id]); + cur_grp = LWPMU_DEVICE_cur_group(&devices[id])[package_num]; + pecb = LWPMU_DEVICE_PMU_register_data(&devices[id])[cur_grp]; + + if (!IS_BUS_MAP_VALID(dev_node, package_num)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("No UNC_PCIDEV bus map for %u!", + dev_node); + return; + } + + busno = GET_BUS_MAP(dev_node, package_num); + + // Write GroupID + data = (U64 *)((S8 *)param + ECB_group_offset(pecb)); + *data = cur_grp + 1; + // Read the counts into uncore buffer + FOR_EACH_REG_UNC_OPERATION(pecb, id, idx, PMU_OPERATION_READ) + { + // read lower 4 bytes + value_low = PCI_Read_U32(busno, ECB_entries_dev_no(pecb, idx), + ECB_entries_func_no(pecb, idx), + ECB_entries_reg_id(pecb, idx)); + value = LOWER_4_BYTES_MASK & value_low; + + // read upper 4 bytes + value_high = PCI_Read_U32( + busno, ECB_entries_dev_no(pecb, idx), + ECB_entries_func_no(pecb, idx), + (ECB_entries_reg_id(pecb, idx) + NEXT_ADDR_OFFSET)); + value |= value_high << NEXT_ADDR_SHIFT; + //check for overflow + if (value < + LWPMU_DEVICE_prev_value(&devices[id])[package_num][index]) { + diff = LWPMU_DEVICE_counter_mask(&devices[id]) - + LWPMU_DEVICE_prev_value( + &devices[id])[package_num][index]; + diff += value; + } else { + diff = value - + LWPMU_DEVICE_prev_value( + &devices[id])[package_num][index]; + } + LWPMU_DEVICE_acc_value( + &devices[id])[package_num][cur_grp][index] += diff; + LWPMU_DEVICE_prev_value(&devices[id])[package_num][index] = + value; + data = (U64 *)((S8 *)param + + ECB_entries_counter_event_offset(pecb, idx)); + *data = LWPMU_DEVICE_acc_value( + &devices[id])[package_num][cur_grp][index]; + index++; + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/*! + * @fn static unc_pci_Read_PMU_Data(param) + * + * @param param The device index + * + * @return None No return needed + * + * @brief Read the Uncore count data and store into the buffer; + */ +static VOID unc_pci_Read_PMU_Data(PVOID param) +{ + U32 j = 0; + U32 dev_idx; + U32 this_cpu; + U64 *buffer = read_counter_info; + CPU_STATE pcpu; + U32 cur_grp; + ECB pecb; + U32 dev_node; + U32 package_num = 0; + U32 busno; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + package_num = core_to_package_map[this_cpu]; + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; + pecb = LWPMU_DEVICE_PMU_register_data(&devices[(dev_idx)])[cur_grp]; + dev_node = LWPMU_DEVICE_pci_dev_node_index(&devices[dev_idx]); + + // NOTE THAT the read_pmu function on for EMON collection. + if (!DRV_CONFIG_emon_mode(drv_cfg)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!emon_mode)."); + return; + } + if (!CPU_STATE_socket_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!CPU_STATE_socket_master)."); + return; + } + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + if (!IS_BUS_MAP_VALID(dev_node, package_num)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("No UNC_PCIDEV bus map for %u!", + dev_node); + return; + } + + busno = GET_BUS_MAP(dev_node, package_num); + + //Read in the counts into temporary buffer + FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_READ) + { + if (ECB_entries_event_scope(pecb, idx) == SYSTEM_EVENT) { + j = ECB_entries_uncore_buffer_offset_in_system(pecb, + idx); + } else { + j = EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET( + package_num, + EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( + emon_buffer_driver_helper), + ECB_entries_uncore_buffer_offset_in_package( + pecb, idx)); + } + + buffer[j] = PCI_Read_U64(busno, ECB_entries_dev_no(pecb, idx), + ECB_entries_func_no(pecb, idx), + ECB_entries_reg_id(pecb, idx)); + + SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u", j, buffer[j], + this_cpu); + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* + * Initialize the dispatch table + */ + +DISPATCH_NODE unc_pci_dispatch = { .init = NULL, + .fini = NULL, + .write = unc_pci_Write_PMU, + .freeze = unc_pci_Disable_PMU, + .restart = unc_pci_Enable_PMU, + .read_data = unc_pci_Read_PMU_Data, + .check_overflow = NULL, + .swap_group = NULL, + .read_lbrs = NULL, + .cleanup = NULL, + .hw_errata = NULL, + .read_power = NULL, + .check_overflow_errata = NULL, + .read_counts = NULL, + .check_overflow_gp_errata = NULL, + .read_ro = NULL, + .platform_info = NULL, + .trigger_read = unc_pci_Trigger_Read, + .scan_for_uncore = NULL, + .read_metrics = NULL }; diff --git a/drivers/platform/x86/sepdk/sep/unc_power.c b/drivers/platform/x86/sepdk/sep/unc_power.c new file mode 100755 index 0000000000000..4f7d8ff437446 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/unc_power.c @@ -0,0 +1,444 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" + +#include "inc/ecb_iterators.h" +#include "inc/control.h" +#include "inc/unc_common.h" +#include "inc/utility.h" + +extern U64 *read_counter_info; +extern U64 *prev_counter_data; +extern EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; +static U64 **prev_val_per_thread; +static U64 **acc_per_thread; +extern DRV_CONFIG drv_cfg; + +/*! + * @fn unc_power_Allocate(param) + * + * @param param device index + * + * @return None No return needed + * + * @brief Allocate arrays required for reading counts + */ +static VOID unc_power_Allocate(PVOID param) +{ + U32 id; + U32 cur_grp; + ECB pecb; + U32 i; + U32 j; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + id = *((U32 *)param); + cur_grp = LWPMU_DEVICE_cur_group(&devices[id])[0]; + pecb = LWPMU_DEVICE_PMU_register_data(&devices[id])[cur_grp]; + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + acc_per_thread = CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64 *)); + if (acc_per_thread == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Unable to allocate memory for acc_per_thread!"); + return; + } + + prev_val_per_thread = CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64 *)); + if (prev_val_per_thread == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Unable to allocate memory for prev_val_per_thread!"); + return; + } + + for (i = 0; i < (U32)GLOBAL_STATE_num_cpus(driver_state); i++) { + acc_per_thread[i] = CONTROL_Allocate_Memory( + ECB_num_events(pecb) * sizeof(U64)); + if (acc_per_thread[i] == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Unable to allocate memory for acc_per_thread[%u]!", + i); + return; + } + + prev_val_per_thread[i] = CONTROL_Allocate_Memory( + ECB_num_events(pecb) * sizeof(U64)); + if (prev_val_per_thread[i] == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Unable to allocate memory for prev_val_per_thread[%u]!", + i); + return; + } + + // initialize all values to 0 + for (j = 0; j < ECB_num_events(pecb); j++) { + acc_per_thread[i][j] = 0LL; + prev_val_per_thread[i][j] = 0LL; + } + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/*! + * @fn unc_power_Free(param) + * + * @param param device index + * + * @return None No return needed + * + * @brief Free arrays required for reading counts + */ +static VOID unc_power_Free(PVOID param) +{ + U32 i; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + if (acc_per_thread) { + for (i = 0; i < (U32)GLOBAL_STATE_num_cpus(driver_state); i++) { + acc_per_thread[i] = + CONTROL_Free_Memory(acc_per_thread[i]); + } + acc_per_thread = CONTROL_Free_Memory(acc_per_thread); + } + + if (prev_val_per_thread) { + for (i = 0; i < (U32)GLOBAL_STATE_num_cpus(driver_state); i++) { + prev_val_per_thread[i] = + CONTROL_Free_Memory(prev_val_per_thread[i]); + } + prev_val_per_thread = CONTROL_Free_Memory(prev_val_per_thread); + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/*! + * @fn unc_power_Read_Counts(param, id, mask) + * + * @param param pointer to sample buffer + * @param id device index + * @param mask The mask bits for value + * + * @return None No return needed + * + * @brief Read the Uncore count data and store into the buffer param + */ +static VOID unc_power_Trigger_Read(PVOID param, U32 id) +{ + U64 *data = (U64 *)param; + U32 cur_grp; + ECB pecb; + U32 this_cpu; + U32 package_num; + U32 index = 0; + U64 diff = 0; + U64 value; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + package_num = core_to_package_map[this_cpu]; + cur_grp = LWPMU_DEVICE_cur_group(&devices[id])[package_num]; + pecb = LWPMU_DEVICE_PMU_register_data(&devices[id])[cur_grp]; + + // Write GroupID + data = (U64 *)((S8 *)data + ECB_group_offset(pecb)); + *data = cur_grp + 1; + + FOR_EACH_REG_UNC_OPERATION(pecb, id, idx, PMU_OPERATION_READ) + { + data = (U64 *)((S8 *)param + + ECB_entries_counter_event_offset(pecb, idx)); + value = SYS_Read_MSR(ECB_entries_reg_id(pecb, idx)); + if (ECB_entries_max_bits(pecb, idx)) { + value &= ECB_entries_max_bits(pecb, idx); + } + //check for overflow if not a static counter + if (ECB_entries_counter_type(pecb, idx) == STATIC_COUNTER) { + *data = value; + } else { + if (value < prev_val_per_thread[this_cpu][index]) { + diff = ECB_entries_max_bits(pecb, idx) - + prev_val_per_thread[this_cpu][index]; + diff += value; + } else { + diff = value - + prev_val_per_thread[this_cpu][index]; + } + acc_per_thread[this_cpu][index] += diff; + prev_val_per_thread[this_cpu][index] = value; + *data = acc_per_thread[this_cpu][index]; + } + index++; + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn unc_power_Enable_PMU(param) + * + * @param None + * + * @return None + * + * @brief Capture the previous values to calculate delta later. + */ +static VOID unc_power_Enable_PMU(PVOID param) +{ + U32 j; + U64 *buffer = prev_counter_data; + U32 dev_idx; + U32 this_cpu; + CPU_STATE pcpu; + U32 package_event_count = 0; + U32 thread_event_count = 0; + U32 module_event_count = 0; + U64 tmp_value = 0; + U32 package_id = 0; + U32 core_id = 0; + U32 thread_id = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + package_id = core_to_package_map[this_cpu]; + core_id = core_to_phys_core_map[this_cpu]; + thread_id = core_to_thread_map[this_cpu]; + + // NOTE THAT the enable function currently captures previous values + // for EMON collection to avoid unnecessary memory copy. + if (!DRV_CONFIG_emon_mode(drv_cfg)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!emon_mode)."); + return; + } + + FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_READ) + { + if (ECB_entries_event_scope(pecb, idx) == PACKAGE_EVENT) { + j = EMON_BUFFER_UNCORE_PACKAGE_POWER_EVENT_OFFSET( + package_id, + EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( + emon_buffer_driver_helper), + EMON_BUFFER_DRIVER_HELPER_power_device_offset_in_package( + emon_buffer_driver_helper), + package_event_count); + package_event_count++; + } else if (ECB_entries_event_scope(pecb, idx) == MODULE_EVENT) { + j = EMON_BUFFER_UNCORE_MODULE_POWER_EVENT_OFFSET( + package_id, + EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( + emon_buffer_driver_helper), + EMON_BUFFER_DRIVER_HELPER_power_device_offset_in_package( + emon_buffer_driver_helper), + EMON_BUFFER_DRIVER_HELPER_power_num_package_events( + emon_buffer_driver_helper), + CPU_STATE_cpu_module_master(pcpu), + EMON_BUFFER_DRIVER_HELPER_power_num_module_events( + emon_buffer_driver_helper), + module_event_count); + module_event_count++; + } else { + j = EMON_BUFFER_UNCORE_THREAD_POWER_EVENT_OFFSET( + package_id, + EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( + emon_buffer_driver_helper), + EMON_BUFFER_DRIVER_HELPER_power_device_offset_in_package( + emon_buffer_driver_helper), + EMON_BUFFER_DRIVER_HELPER_power_num_package_events( + emon_buffer_driver_helper), + GLOBAL_STATE_num_modules(driver_state), + EMON_BUFFER_DRIVER_HELPER_power_num_module_events( + emon_buffer_driver_helper), + core_id, threads_per_core[cpu], thread_id, + EMON_BUFFER_DRIVER_HELPER_power_num_thread_events( + emon_buffer_driver_helper), + thread_event_count); + thread_event_count++; + } + + tmp_value = SYS_Read_MSR(ECB_entries_reg_id(pecb, idx)); + if (ECB_entries_max_bits(pecb, idx)) { + tmp_value &= ECB_entries_max_bits(pecb, idx); + } + buffer[j] = tmp_value; + SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u", j, buffer[j], + this_cpu); + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn unc_power_Read_PMU_Data(param) + * + * @param param The read thread node to process + * + * @return None No return needed + * + * @brief Read the Uncore count data and store into the buffer param; + * Uncore PMU does not support sampling, i.e. ignore the id parameter. + */ +static VOID unc_power_Read_PMU_Data(PVOID param) +{ + U32 j; + U64 *buffer = read_counter_info; + U64 *prev_buffer = prev_counter_data; + U32 dev_idx; + U32 this_cpu; + CPU_STATE pcpu; + U32 package_event_count = 0; + U32 thread_event_count = 0; + U32 module_event_count = 0; + U64 tmp_value; + U32 package_id = 0; + U32 core_id = 0; + U32 thread_id = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + package_id = core_to_package_map[this_cpu]; + core_id = core_to_phys_core_map[this_cpu]; + thread_id = core_to_thread_map[this_cpu]; + + // NOTE THAT the read_pmu function on for EMON collection. + if (!DRV_CONFIG_emon_mode(drv_cfg)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!emon_mode)."); + return; + } + + FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_READ) + { + if (ECB_entries_event_scope(pecb, idx) == PACKAGE_EVENT) { + j = EMON_BUFFER_UNCORE_PACKAGE_POWER_EVENT_OFFSET( + package_id, + EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( + emon_buffer_driver_helper), + EMON_BUFFER_DRIVER_HELPER_power_device_offset_in_package( + emon_buffer_driver_helper), + package_event_count); + package_event_count++; + } else if (ECB_entries_event_scope(pecb, idx) == MODULE_EVENT) { + j = EMON_BUFFER_UNCORE_MODULE_POWER_EVENT_OFFSET( + package_id, + EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( + emon_buffer_driver_helper), + EMON_BUFFER_DRIVER_HELPER_power_device_offset_in_package( + emon_buffer_driver_helper), + EMON_BUFFER_DRIVER_HELPER_power_num_package_events( + emon_buffer_driver_helper), + CPU_STATE_cpu_module_master(pcpu), + EMON_BUFFER_DRIVER_HELPER_power_num_module_events( + emon_buffer_driver_helper), + module_event_count); + module_event_count++; + } else { + j = EMON_BUFFER_UNCORE_THREAD_POWER_EVENT_OFFSET( + package_id, + EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( + emon_buffer_driver_helper), + EMON_BUFFER_DRIVER_HELPER_power_device_offset_in_package( + emon_buffer_driver_helper), + EMON_BUFFER_DRIVER_HELPER_power_num_package_events( + emon_buffer_driver_helper), + GLOBAL_STATE_num_modules(driver_state), + EMON_BUFFER_DRIVER_HELPER_power_num_module_events( + emon_buffer_driver_helper), + core_id, threads_per_core[cpu], thread_id, + EMON_BUFFER_DRIVER_HELPER_power_num_thread_events( + emon_buffer_driver_helper), + thread_event_count); + thread_event_count++; + } + + tmp_value = SYS_Read_MSR(ECB_entries_reg_id(pecb, idx)); + if (ECB_entries_max_bits(pecb, idx)) { + tmp_value &= ECB_entries_max_bits(pecb, idx); + } + if (ECB_entries_counter_type(pecb, idx) == STATIC_COUNTER) { + buffer[j] = tmp_value; + } else { + if (tmp_value >= prev_buffer[j]) { + buffer[j] = tmp_value - prev_buffer[j]; + } else { + buffer[j] = tmp_value + + (ECB_entries_max_bits(pecb, idx) - + prev_buffer[j]); + } + } + SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u", j, buffer[j], + this_cpu); + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* + * Initialize the dispatch table + */ + +DISPATCH_NODE unc_power_dispatch = { .init = unc_power_Allocate, + .fini = unc_power_Free, + .write = UNC_COMMON_Dummy_Func, + .freeze = NULL, + .restart = unc_power_Enable_PMU, + .read_data = unc_power_Read_PMU_Data, + .check_overflow = NULL, + .swap_group = NULL, + .read_lbrs = NULL, + .cleanup = NULL, + .hw_errata = NULL, + .read_power = NULL, + .check_overflow_errata = NULL, + .read_counts = NULL, + .check_overflow_gp_errata = NULL, + .read_ro = NULL, + .platform_info = NULL, + .trigger_read = unc_power_Trigger_Read, + .scan_for_uncore = NULL, + .read_metrics = NULL }; diff --git a/drivers/platform/x86/sepdk/sep/unc_sa.c b/drivers/platform/x86/sepdk/sep/unc_sa.c new file mode 100755 index 0000000000000..7345807f9588a --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/unc_sa.c @@ -0,0 +1,173 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" + +#include "inc/ecb_iterators.h" +#include "inc/control.h" +#include "inc/haswellunc_sa.h" +#include "inc/utility.h" + +#if 0 +extern U64 *read_counter_info; +extern DRV_CONFIG drv_cfg; + +extern VOID SOCPERF_Read_Data3(PVOID data_buffer); +#endif + +/*! + * @fn static VOID hswunc_sa_Initialize(PVOID) + * + * @brief Initialize any registers or addresses + * + * @param param + * + * @return None + * + * Special Notes: + */ +static VOID hswunc_sa_Initialize(VOID *param) +{ + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + SEP_DRV_LOG_TRACE_OUT("Empty function."); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn hswunc_sa_Read_Counts(param, id) + * + * @param param The read thread node to process + * @param id The id refers to the device index + * + * @return None No return needed + * + * @brief Read the Uncore count data and store into the buffer param; + * + */ +static VOID hswunc_sa_Trigger_Read(PVOID param, U32 id) +{ +#if 0 + U64 *data = (U64 *)param; + U32 cur_grp; + ECB pecb; + U32 this_cpu; + U32 package_num; + + SEP_DRV_LOG_TRACE_IN("Param: %p, id: %u.", param, id); + + this_cpu = CONTROL_THIS_CPU(); + package_num = core_to_package_map[this_cpu]; + cur_grp = LWPMU_DEVICE_cur_group(&devices[id])[package_num]; + pecb = LWPMU_DEVICE_PMU_register_data(&devices[id])[cur_grp]; + + // group id + data = (U64 *)((S8 *)data + ECB_group_offset(pecb)); + SOCPERF_Read_Data3((void*)data); + + SEP_DRV_LOG_TRACE_OUT(""); +#endif +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn hswunc_sa_Read_PMU_Data(param) + * + * @param param the device index + * + * @return None No return needed + * + * @brief Read the Uncore count data and store into the buffer param; + * + */ +static VOID hswunc_sa_Read_PMU_Data(PVOID param) +{ +#if 0 + U32 j; + U64 *buffer = read_counter_info; + U32 dev_idx; + U32 this_cpu; + CPU_STATE pcpu; + U32 event_index = 0; + U64 counter_buffer[HSWUNC_SA_MAX_COUNTERS + 1]; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + + // NOTE THAT the read_pmu function on for EMON collection. + if (!DRV_CONFIG_emon_mode(drv_cfg)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!emon_mode)."); + return; + } + if (!CPU_STATE_system_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!system_master)."); + return; + } + + SOCPERF_Read_Data3((void*)counter_buffer); + + FOR_EACH_PCI_DATA_REG_RAW(pecb, i, dev_idx) + { + j = ECB_entries_uncore_buffer_offset_in_system(pecb, i); + buffer[j] = counter_buffer[event_index + 1]; + event_index++; + SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u", j, buffer[j], + this_cpu); + } + END_FOR_EACH_PCI_DATA_REG_RAW; + + SEP_DRV_LOG_TRACE_OUT(""); +#endif +} + +/* + * Initialize the dispatch table + */ + +DISPATCH_NODE hswunc_sa_dispatch = { .init = hswunc_sa_Initialize, + .fini = NULL, + .write = NULL, + .freeze = NULL, + .restart = NULL, + .read_data = hswunc_sa_Read_PMU_Data, + .check_overflow = NULL, + .swap_group = NULL, + .read_lbrs = NULL, + .cleanup = NULL, + .hw_errata = NULL, + .read_power = NULL, + .check_overflow_errata = NULL, + .read_counts = NULL, + .check_overflow_gp_errata = NULL, + .read_ro = NULL, + .platform_info = NULL, + .trigger_read = hswunc_sa_Trigger_Read, + .scan_for_uncore = NULL, + .read_metrics = NULL }; diff --git a/drivers/platform/x86/sepdk/sep/utility.c b/drivers/platform/x86/sepdk/sep/utility.c new file mode 100755 index 0000000000000..cc4f0cba5e9e7 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/utility.c @@ -0,0 +1,1157 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#include "lwpmudrv_defines.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv.h" +#include "control.h" +#include "core2.h" +#include "silvermont.h" +#include "perfver4.h" +#include "valleyview_sochap.h" +#include "unc_gt.h" +#include "haswellunc_sa.h" +#if defined(BUILD_CHIPSET) +#include "chap.h" +#endif +#include "utility.h" +#if defined(BUILD_CHIPSET) +#include "lwpmudrv_chipset.h" +#include "gmch.h" +#endif + +#include "control.h" + +//volatile int config_done; + + +#if defined(BUILD_CHIPSET) +extern CHIPSET_CONFIG pma; +#endif + +VOID UTILITY_down_read_mm(struct mm_struct *mm) +{ + SEP_DRV_LOG_TRACE_IN("Mm: %p.", mm); + + down_read((struct rw_semaphore *)&mm->mmap_sem); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +VOID UTILITY_up_read_mm(struct mm_struct *mm) +{ + SEP_DRV_LOG_TRACE_IN("Mm: %p.", mm); + + up_read((struct rw_semaphore *)&mm->mmap_sem); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +// NOT to be instrumented, used inside DRV_LOG! +VOID UTILITY_Read_TSC(U64 *pTsc) +{ + *pTsc = rdtsc_ordered(); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID UTILITY_Read_Cpuid + * + * @brief executes the cpuid_function of cpuid and returns values + * + * @param IN cpuid_function + * OUT rax - results of the cpuid instruction in the + * OUT rbx - corresponding registers + * OUT rcx + * OUT rdx + * + * @return none + * + * Special Notes: + * + * + */ +VOID UTILITY_Read_Cpuid(U64 cpuid_function, U64 *rax_value, + U64 *rbx_value, U64 *rcx_value, U64 *rdx_value) +{ + U32 function; + U32 *eax, *ebx, *ecx, *edx; + + SEP_DRV_LOG_TRACE_IN( + "Fn: %llu, rax_p: %p, rbx_p: %p, rcx_p: %p, rdx_p: %p.", + cpuid_function, rax_value, rbx_value, rcx_value, rdx_value); + +#if defined(DRV_SEP_ACRN_ON) + if (cpuid_function != 0x40000000) { + struct profiling_pcpuid pcpuid; + memset(&pcpuid, 0, sizeof(struct profiling_pcpuid)); + pcpuid.leaf = (U32)cpuid_function; + if (rcx_value != NULL) { + pcpuid.subleaf = (U32)*rcx_value; + } + + BUG_ON(!virt_addr_valid(&pcpuid)); + + acrn_hypercall2(HC_PROFILING_OPS, PROFILING_GET_PCPUID, + virt_to_phys(&pcpuid)); + + if (rax_value != NULL) { + *rax_value = pcpuid.eax; + } + if (rbx_value != NULL) { + *rbx_value = pcpuid.ebx; + } + if (rcx_value != NULL) { + *rcx_value = pcpuid.ecx; + } + if (rdx_value != NULL) { + *rdx_value = pcpuid.edx; + } + return; + } +#endif + function = (U32)cpuid_function; + eax = (U32 *)rax_value; + ebx = (U32 *)rbx_value; + ecx = (U32 *)rcx_value; + edx = (U32 *)rdx_value; + + *eax = function; + + __asm__("cpuid" + : "=a"(*eax), "=b"(*ebx), "=c"(*ecx), "=d"(*edx) + : "a"(function), "b"(*ebx), "c"(*ecx), "d"(*edx)); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID UTILITY_Configure_CPU + * + * @brief Reads the CPU information from the hardware + * + * @param param dispatch_id - The id of the dispatch table. + * + * @return Pointer to the correct dispatch table for the CPU architecture + * + * Special Notes: + * + */ +DISPATCH UTILITY_Configure_CPU(U32 dispatch_id) +{ + DISPATCH dispatch = NULL; + + SEP_DRV_LOG_TRACE_IN("Dispatch_id: %u.", dispatch_id); + + switch (dispatch_id) { + case 1: + SEP_DRV_LOG_INIT( + "Set up the Core(TM)2 processor dispatch table."); + dispatch = &core2_dispatch; + break; + case 6: + SEP_DRV_LOG_INIT("Set up the Silvermont dispatch table."); + dispatch = &silvermont_dispatch; + break; + case 7: + SEP_DRV_LOG_INIT( + "Set up the perfver4 HTON dispatch table such as Skylake."); + dispatch = &perfver4_dispatch; + break; + case 8: + SEP_DRV_LOG_INIT( + "Set up the perfver4 HTOFF dispatch table such as Skylake."); + dispatch = &perfver4_dispatch_htoff_mode; + break; + case 11: + SEP_DRV_LOG_INIT( + "Set up the perfver4 NONHT dispatch table such as Icelake."); + dispatch = &perfver4_dispatch_nonht_mode; + break; + case 700: + case 701: + case 1100: + SEP_DRV_LOG_INIT("Set up the Valleyview SA dispatch table."); + dispatch = &valleyview_visa_dispatch; + break; + case 2: + SEP_DRV_LOG_INIT( + "Set up the Core i7(TM) processor dispatch table."); + dispatch = &corei7_dispatch; + break; + case 3: + SEP_DRV_LOG_INIT("Set up the Core i7(TM) dispatch table."); + dispatch = &corei7_dispatch_htoff_mode; + break; + case 4: + SEP_DRV_LOG_INIT( + "Set up the Sandybridge processor dispatch table."); + dispatch = &corei7_dispatch_2; + break; + case 5: + SEP_DRV_LOG_INIT("Set up the Sandybridge dispatch table."); + dispatch = &corei7_dispatch_htoff_mode_2; + break; + case 9: + SEP_DRV_LOG_INIT( + "Set up the Nehalem, Westemere dispatch table."); + dispatch = &corei7_dispatch_nehalem; + break; + case 10: + SEP_DRV_LOG_INIT("Set up the Knights family dispatch table."); + dispatch = &knights_dispatch; + break; + case 100: + SEP_DRV_LOG_INIT("Set up the MSR based uncore dispatch table."); + dispatch = &unc_msr_dispatch; + break; + case 110: + SEP_DRV_LOG_INIT("Set up the PCI Based Uncore dispatch table."); + dispatch = &unc_pci_dispatch; + break; + case 120: + SEP_DRV_LOG_INIT( + "Set up the MMIO based uncore dispatch table."); + dispatch = &unc_mmio_dispatch; + break; + case 121: + SEP_DRV_LOG_INIT( + "Set up the MMIO based uncore dispatch table for FPGA."); + dispatch = &unc_mmio_fpga_dispatch; + break; + case 130: + SEP_DRV_LOG_INIT("Set up the Uncore Power dispatch table."); + dispatch = &unc_power_dispatch; + break; + case 230: + SEP_DRV_LOG_INIT("Set up the Haswell SA dispatch table."); + dispatch = &hswunc_sa_dispatch; + break; + case 400: + SEP_DRV_LOG_INIT("Set up the GT dispatch table."); + dispatch = &unc_gt_dispatch; + break; + default: + dispatch = NULL; + SEP_DRV_LOG_ERROR( + "Architecture not supported (dispatch_id: %d).", + dispatch_id); + break; + } + + SEP_DRV_LOG_TRACE_OUT("Res: %p.", dispatch); + return dispatch; +} + +U64 SYS_MMIO_Read64(U64 baseAddress, U64 offset) +{ + U64 res = 0; +#if defined(DRV_EM64T) + SEP_DRV_LOG_REGISTER_IN("Will read MMIO *(0x%llx + 0x%llx).", + baseAddress, offset); + + if (baseAddress) { + volatile U64 *p = + (U64 *)(baseAddress + offset); // offset is in bytes + res = *p; + } else { + SEP_DRV_LOG_ERROR("BaseAddress is NULL!"); + res = (U64)-1; // typical value for undefined CSR + } + + SEP_DRV_LOG_REGISTER_OUT("Has read MMIO *(0x%llx + 0x%llx): 0x%llx.", + baseAddress, offset, res); +#endif + return res; +} + +U64 SYS_Read_MSR(U32 msr) +{ + U64 val = 0; + +#if defined(DRV_DEBUG_MSR) + int error; + SEP_DRV_LOG_REGISTER_IN("Will safely read MSR 0x%x.", msr); + error = rdmsrl_safe(msr, &val); + if (error) { + SEP_DRV_LOG_ERROR("Failed to read MSR 0x%x.", msr); + } + SEP_DRV_LOG_REGISTER_OUT("Has read MSR 0x%x: 0x%llx (error: %d).", msr, + val, error); +#else + SEP_DRV_LOG_REGISTER_IN("Will read MSR 0x%x.", msr); + rdmsrl(msr, val); + SEP_DRV_LOG_REGISTER_OUT("Has read MSR 0x%x: 0x%llx.", msr, val); +#endif + + return val; +} + +void SYS_Write_MSR(U32 msr, U64 val) +{ +#if defined(DRV_DEBUG_MSR) + int error; + SEP_DRV_LOG_REGISTER_IN("Will safely write MSR 0x%x: 0x%llx.", msr, + val); + error = wrmsr_safe(msr, (U32)val, (U32)(val >> 32)); + if (error) { + SEP_DRV_LOG_ERROR("Failed to write MSR 0x%x: 0x%llx.", msr, + val); + } + SEP_DRV_LOG_REGISTER_OUT("Wrote MSR 0x%x: 0x%llx (error: %d).", msr, + val, error); + +#else // !DRV_DEBUG_MSR + SEP_DRV_LOG_REGISTER_IN("Will write MSR 0x%x: 0x%llx.", msr, val); +#if defined(DRV_IA32) + wrmsr(msr, (U32)val, (U32)(val >> 32)); +#endif +#if defined(DRV_EM64T) + wrmsrl(msr, val); +#endif + SEP_DRV_LOG_REGISTER_OUT("Wrote MSR 0x%x: 0x%llx.", msr, val); + +#endif // !DRV_DEBUG_MSR +} + +#if defined(BUILD_CHIPSET) +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID UTILITY_Configure_Chipset + * + * @brief Configures the chipset information + * + * @param none + * + * @return none + * + * Special Notes: + * + */ +CS_DISPATCH UTILITY_Configure_Chipset(void) +{ + SEP_DRV_LOG_TRACE_IN(""); + + if (CHIPSET_CONFIG_gmch_chipset(pma)) { + cs_dispatch = &gmch_dispatch; + SEP_DRV_LOG_INIT("Using GMCH dispatch table."); + } else if (CHIPSET_CONFIG_mch_chipset(pma) || + CHIPSET_CONFIG_ich_chipset(pma)) { + cs_dispatch = &chap_dispatch; + SEP_DRV_LOG_INIT("Using CHAP dispatch table."); + } else { + SEP_DRV_LOG_ERROR("Unable to map chipset dispatch table!"); + } + + SEP_DRV_LOG_TRACE_OUT("Res: %p.", cs_dispatch); + return cs_dispatch; +} + +#endif + +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 32) +static unsigned long utility_Compare_Symbol_Names_Return_Value; +/* ------------------------------------------------------------------------- */ +/*! + * @fn static int utility_Compare_Symbol_Names (void* ref_name, + * const char* symbol_name, struct module* dummy, unsigned long symbol_address) + * + * @brief Comparator for kallsyms_on_each_symbol. + * + * @param + * void * ref_name : Symbol we are looking for + * const char * symbol_name : Name of the current symbol being evaluated + * struct module* dummy : Pointer to the module structure. Not needed. + * unsigned long symbol_address : Address of the current symbol being evaluated + * + * @return 1 if ref_name matches symbol_name, 0 otherwise. + * Fills utility_Compare_Symbol_Names_Return_Value with the symbol's address + * on success. + * + * Special Notes: + * Only used as a callback comparator for kallsyms_on_each_symbol. + */ +static int utility_Compare_Symbol_Names(void *ref_name, const char *symbol_name, + struct module *dummy, + unsigned long symbol_address) +{ + int res = 0; + + SEP_DRV_LOG_TRACE_IN( + "Ref_name: %p, symbol_name: %p, dummy: %p, symbol_address: %u.", + ref_name, symbol_name, dummy, symbol_address); + + if (strcmp((char *)ref_name, symbol_name) == 0) { + utility_Compare_Symbol_Names_Return_Value = symbol_address; + res = 1; + } + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", res); + return res; +} +#endif + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern unsigned long UTILITY_Find_Symbol (const char* name) + * + * @brief Finds the address of the specified kernel symbol. + * + * @param const char* name - name of the symbol to look for + * + * @return Symbol address (0 if could not find) + * + * Special Notes: + * This wrapper is needed due to kallsyms_lookup_name not being exported + * in kernel version 2.6.32.*. + * Careful! This code is *NOT* multithread-safe or reentrant! Should only + * be called from 1 context at a time! + */ +unsigned long UTILITY_Find_Symbol(const char *name) +{ + unsigned long res = 0; + + SEP_DRV_LOG_TRACE_IN("Name: %p.", name); + // Not printing the name to follow the log convention: *must not* + // dereference any pointer in an 'IN' message + +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 32) + if (kallsyms_on_each_symbol(utility_Compare_Symbol_Names, + (void *)name)) { + res = utility_Compare_Symbol_Names_Return_Value; + } +#else + res = kallsyms_lookup_name(name); +#endif + + SEP_DRV_LOG_INIT("Name: '%s': 0x%llx.", name ? name : "NULL", + (unsigned long long)res); + // Printing here instead. (Paranoia in case of corrupt pointer.) + + SEP_DRV_LOG_TRACE_OUT("Res: 0x%llx.", (unsigned long long)res); + return res; +} + +/* + ************************************ + * DRIVER LOG BUFFER DECLARATIONS * + ************************************ + */ + +volatile U8 active_ioctl; + +DRV_LOG_BUFFER driver_log_buffer; + +static const char *drv_log_categories[DRV_NB_LOG_CATEGORIES] = { + "load", "init", "detection", "error", "state change", + "mark", "debug", "flow", "alloc", "interrupt", + "trace", "register", "notification", "warning" +}; + +#define DRV_LOG_NB_DRIVER_STATES 9 +static const char *drv_log_states[DRV_LOG_NB_DRIVER_STATES] = { + "Uninitialized", "Reserved", "Idle", "Paused", "Stopped", + "Running", "Pausing", "Prepare_Stop", "Terminating" +}; + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static VOID utility_Driver_Log_Kprint_Helper + * (U8 category, char** category_string, + * U8 secondary, char** secondary_string_1, + * char** secondary_string_2, char** secondary_string_3, + * char** secondary_string_4) + * + * @brief Helper function for printing log messages to the system log. + * + * @param IN category - message category + * IN/OUT category_string - location where to place a pointer + * to the category's name + * IN secondary - secondary field value for the message + * IN/OUT secondary_string_1 - location where to place a pointer to + * the 1st part of the secondary info's decoded information + * IN/OUT secondary_string_2 - location where to place a pointer to + * the 2nd part of the secondary info's decoded information + * IN/OUT secondary_string_3 - location where to place a pointer to + * the 3rd part of the secondary info's decoded information + * IN/OUT secondary_string_4 - location where to place a pointer to + * the 4th part of the secondary info's decoded information + * + * @return none + * + * Special Notes: + * Allows a single format string to be used for all categories (instead of + * category-specific format strings) when calling printk, simplifying the + * print routine and reducing potential errors. There is a performance cost to + * this approach (forcing printk to process empty strings), but it + * should be dwarved by the cost of calling printk in the first place. + * NB: none of the input string pointers may be NULL! + */ +static VOID utility_Driver_Log_Kprint_Helper( + U8 category, char **category_string, U8 secondary, + char **secondary_string_1, char **secondary_string_2, + char **secondary_string_3, char **secondary_string_4) +{ + if (category >= DRV_NB_LOG_CATEGORIES) { + *category_string = "Unknown category"; + } else { + *category_string = (char *)drv_log_categories[category]; + } + + *secondary_string_1 = ""; + *secondary_string_2 = ""; + *secondary_string_3 = ""; + *secondary_string_4 = ""; + + switch (category) { + case DRV_LOG_CATEGORY_FLOW: + case DRV_LOG_CATEGORY_TRACE: + case DRV_LOG_CATEGORY_INTERRUPT: + // we should *never* be kprinting from an interrupt context... + if (secondary != DRV_LOG_NOTHING) { + *secondary_string_1 = ", "; + if (secondary == DRV_LOG_FLOW_IN) { + *secondary_string_2 = "Entering"; + } else if (secondary == DRV_LOG_FLOW_OUT) { + *secondary_string_2 = "Leaving"; + } + } + break; + case DRV_LOG_CATEGORY_STATE_CHANGE: { + U8 orig_state, dest_state; + + orig_state = (secondary & 0xF0) >> 4; + dest_state = secondary & 0x0F; + + *secondary_string_1 = ", "; + + if (orig_state < DRV_LOG_NB_DRIVER_STATES) { + *secondary_string_2 = + (char *)drv_log_states[orig_state]; + } else { + *secondary_string_2 = "Unknown_state"; + } + + *secondary_string_3 = " -> "; + + if (dest_state < DRV_LOG_NB_DRIVER_STATES) { + *secondary_string_4 = + (char *)drv_log_states[dest_state]; + } else { + *secondary_string_4 = "Unknown_state"; + } + } break; + + default: + break; + } +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static inline VOID utility_Log_Write ( + * U8 destination, U8 category, U8 secondary, + * const char* function_name, U32 func_name_len, + * U32 line_number, U64 tsc, U8 ioctl, U16 processor_id, + * U8 driver_state, U16 nb_active_interrupts, + * U16 nb_active_notifications, + * const char* format_string, ...) + * + * @brief Checks whether and where the message should be logged, and logs + * it as appropriate. + * + * @param + * U8 destination - whether to write to the primary (0) + * or the auxiliary log buffer (1) + * U8 category - message category + * U8 secondary - secondary information field for the message + * const char* function_name - name of the calling function + * U32 func_name_len - length of the name of the calling function + * (more efficient to pass it as parameter than finding it back at runtime) + * U32 line_number - line number of the call site + * U64 tsc - time stamp value to use + * U8 ioctl - current active ioctl + * U16 processor_id - id of the active core/thread + * U8 driver_state - current driver state + * U16 nb_active_interrupts - number of interrupts currently being + * processed + * U16 nb_active_notifications - number of notifications currently being + * processed + * const char* format_string - classical format string for printf-like funcs + * ... - elements to print + * + * @return none + * + * Special Notes: + * Writes the specified message to the specified log buffer. + * The order of writes (integrity tag at the beginning, overflow tag at + * the very end) matters to ensure the logged information can be detected + * to be only partially written if applicable). Much of the needed information + * (active core, driver state, tsc..) is passed through the stack (instead of + * obtained inside utility_Log_Write) to guarantee entries representing the + * same message (or log call) in different channels use consistent information, + * letting the decoder reliably identify duplicates. + */ +static inline VOID utility_Log_Write(U8 destination, U8 category, U8 secondary, + const char *function_name, + U32 function_name_length, U32 line_number, + U64 tsc, U8 ioctl, U16 processor_id, + U8 driver_state, U16 nb_active_interrupts, + U16 nb_active_notifications, + const char *format_string, va_list args) +{ + U32 entry_id; + U16 overflow_tag; + DRV_LOG_ENTRY entry; + char *target_func_buffer; + U32 local_func_name_length; + U32 i; + + if (destination == 0) { // primary buffer + entry_id = __sync_add_and_fetch( + &DRV_LOG_BUFFER_pri_entry_index(DRV_LOG()), 1); + overflow_tag = (U16)(entry_id / DRV_LOG_MAX_NB_PRI_ENTRIES); + entry = DRV_LOG_BUFFER_entries(DRV_LOG()) + + entry_id % DRV_LOG_MAX_NB_PRI_ENTRIES; + } else { + entry_id = __sync_add_and_fetch( + &DRV_LOG_BUFFER_aux_entry_index(DRV_LOG()), 1); + overflow_tag = (U16)(entry_id / DRV_LOG_MAX_NB_AUX_ENTRIES); + entry = DRV_LOG_BUFFER_entries(DRV_LOG()) + + DRV_LOG_MAX_NB_PRI_ENTRIES + + entry_id % DRV_LOG_MAX_NB_AUX_ENTRIES; + } + + DRV_LOG_COMPILER_MEM_BARRIER(); + DRV_LOG_ENTRY_integrity_tag(entry) = overflow_tag; + DRV_LOG_COMPILER_MEM_BARRIER(); + + if (format_string && + *format_string) { // setting this one first to try to increase MLP + DRV_VSNPRINTF(DRV_LOG_ENTRY_message(entry), + DRV_LOG_MESSAGE_LENGTH, DRV_LOG_MESSAGE_LENGTH, + format_string, args); + } else { + DRV_LOG_ENTRY_message(entry)[0] = 0; + } + + target_func_buffer = DRV_LOG_ENTRY_function_name(entry); + local_func_name_length = + function_name_length < DRV_LOG_FUNCTION_NAME_LENGTH ? + function_name_length : + DRV_LOG_FUNCTION_NAME_LENGTH; + for (i = 0; i < local_func_name_length - 1; i++) { + target_func_buffer[i] = function_name[i]; + } + target_func_buffer[i] = 0; + + DRV_LOG_ENTRY_category(entry) = category; + DRV_LOG_ENTRY_secondary_info(entry) = secondary; + DRV_LOG_ENTRY_line_number(entry) = line_number; + DRV_LOG_ENTRY_active_drv_operation(entry) = ioctl; + DRV_LOG_ENTRY_processor_id(entry) = processor_id; + DRV_LOG_ENTRY_driver_state(entry) = driver_state; + DRV_LOG_ENTRY_nb_active_interrupts(entry) = nb_active_interrupts; + DRV_LOG_ENTRY_nb_active_notifications(entry) = nb_active_notifications; + DRV_LOG_ENTRY_tsc(entry) = tsc; + + DRV_LOG_COMPILER_MEM_BARRIER(); + DRV_LOG_ENTRY_temporal_tag(entry) = overflow_tag; + DRV_LOG_COMPILER_MEM_BARRIER(); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern void UTILITY_Log (U8 category, U8 in_notification, U8 secondary, + * const char* function_name, U32 func_name_len, + * U32 line_number, const char* format_string, ...) + * + * @brief Checks whether and where the message should be logged, + * and logs it as appropriate. + * + * @param + * U8 category - message category + * U8 in_notification - whether or not we are in a notification/OS + * callback context (this information cannot be reliably obtained without + * passing it through the stack) + * U8 secondary - secondary information field for the message + * const char* function_name - name of the calling function + * U32 func_name_len - length of the name of the calling function + * (more efficient to pass it as parameter than finding it back at runtime) + * U32 line_number - line number of the call site + * const char* format_string - classical format string for printf-like + * ... functions elements to print + * + * @return none + * + * Special Notes: + * Takes a snapshot of various elements (TSC, driver state, etc.) to ensure + * a single log call writes consistent information to all applicable channels + * (i.e. favoring consistency over instantaneous accuracy). + * See utility_Log_Write for details. + */ +VOID UTILITY_Log(U8 category, U8 in_notification, U8 secondary, + const char *function_name, U32 func_name_len, + U32 line_number, const char *format_string, ...) +{ + U64 tsc_snapshot; + U8 ioctl_snapshot; + U8 driver_state_snapshot; + U16 processor_id_snapshot; + U16 nb_active_interrupts_snapshot; + U16 nb_active_notifications_snapshot; + U8 category_verbosity; + U8 in_interrupt; + U8 is_enabled; + va_list args; + U32 i; + + category_verbosity = DRV_LOG_VERBOSITY(category); + processor_id_snapshot = raw_smp_processor_id(); + in_interrupt = ((pcb && atomic_read(&CPU_STATE_in_interrupt( + &pcb[processor_id_snapshot]))) + + (category == DRV_LOG_CATEGORY_INTERRUPT)); + is_enabled = + in_interrupt * !!(category_verbosity & LOG_CONTEXT_INTERRUPT) + + in_notification * + !!(category_verbosity & LOG_CONTEXT_NOTIFICATION) + + (!in_interrupt * !in_notification) * + !!(category_verbosity & LOG_CONTEXT_REGULAR); + + if (!is_enabled) { + return; + } + + ioctl_snapshot = active_ioctl; + driver_state_snapshot = GET_DRIVER_STATE(); + nb_active_interrupts_snapshot = + DRV_LOG_BUFFER_nb_active_interrupts(DRV_LOG()); + nb_active_notifications_snapshot = + DRV_LOG_BUFFER_nb_active_notifications(DRV_LOG()); + UTILITY_Read_TSC(&tsc_snapshot); + + va_start(args, format_string); + + for (i = 0; i < 2; i++) { + if (category_verbosity & (1 << i)) { + va_list args_copy; + + va_copy(args_copy, args); + utility_Log_Write( + i, category, secondary, function_name, + func_name_len, line_number, + tsc_snapshot, ioctl_snapshot, + processor_id_snapshot, + driver_state_snapshot, + nb_active_interrupts_snapshot, + nb_active_notifications_snapshot, + format_string, args_copy); + va_end(args_copy); + } + } + if (category_verbosity & LOG_CHANNEL_PRINTK || + category_verbosity & LOG_CHANNEL_TRACEK) { +#define DRV_LOG_DEBUG_ARRAY_SIZE 512 + char tmp_array[DRV_LOG_DEBUG_ARRAY_SIZE]; + U32 nb_written_characters; + char *category_s, *sec1_s, *sec2_s, *sec3_s, *sec4_s; + va_list args_copy; + + utility_Driver_Log_Kprint_Helper(category, &category_s, + secondary, &sec1_s, + &sec2_s, &sec3_s, + &sec4_s); + + nb_written_characters = DRV_SNPRINTF( + tmp_array, DRV_LOG_DEBUG_ARRAY_SIZE - 1, + DRV_LOG_DEBUG_ARRAY_SIZE - 1, + SEP_MSG_PREFIX " [%s%s%s%s%s] [%s@%d]: ", + category_s, sec1_s, sec2_s, sec3_s, sec4_s, + function_name, line_number); + + if (nb_written_characters > 0) { + va_copy(args_copy, args); + nb_written_characters += DRV_VSNPRINTF( + tmp_array + nb_written_characters, + DRV_LOG_DEBUG_ARRAY_SIZE - + nb_written_characters - 1, + DRV_LOG_DEBUG_ARRAY_SIZE - + nb_written_characters - 1, + format_string, args_copy); + va_end(args_copy); +#undef DRV_LOG_DEBUG_ARRAY_SIZE + + tmp_array[nb_written_characters++] = '\n'; + tmp_array[nb_written_characters++] = '\0'; + + if ((category_verbosity & LOG_CHANNEL_PRINTK) * + !in_interrupt * !in_notification) { + if (!in_atomic()) { + switch (category) { + case DRV_LOG_CATEGORY_ERROR: + pr_err("%s", tmp_array); + break; + case DRV_LOG_CATEGORY_WARNING: + pr_debug("%s", tmp_array); + break; + default: + pr_info("%s", tmp_array); + break; + } + } + } + + if (category_verbosity & LOG_CHANNEL_TRACEK) { + trace_printk("%s", tmp_array); + } + } + } + + va_end(args); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern DRV_STATUS UTILITY_Driver_Log_Init (void) + * + * @brief Allocates and initializes the driver log buffer. + * + * @param none + * + * @return OS_SUCCESS on success, OS_NO_MEM on error. + * + * Special Notes: + * Should be (successfully) run before any non-LOAD log calls. + * Allocates memory without going through CONTROL_Allocate (to avoid + * complicating the instrumentation of CONTROL_* functions): calling + * UTILITY_Driver_Log_Free is necessary to free the log structure. + * Falls back to vmalloc when contiguous physical memory cannot be + * allocated. This does not impact runtime behavior, but may impact + * the easiness of retrieving the log from a core dump if the system + * crashes. + */ +DRV_STATUS UTILITY_Driver_Log_Init(void) +{ + struct timespec cur_time; + U32 size = sizeof(*driver_log_buffer); + U8 using_contiguous_physical_memory; + U32 bitness; + + if (size < MAX_KMALLOC_SIZE) { + // allocating outside regular func to restrict area of driver + driver_log_buffer = (PVOID)kmalloc( + size, + GFP_KERNEL); // where the log might not be initialized + } else { + driver_log_buffer = + (PVOID)__get_free_pages(GFP_KERNEL, get_order(size)); + } + + if (driver_log_buffer) { + using_contiguous_physical_memory = 1; + } else { + driver_log_buffer = vmalloc(size); + + if (!driver_log_buffer) { + return OS_NO_MEM; + } + + using_contiguous_physical_memory = 0; + } + + memset(driver_log_buffer, DRV_LOG_FILLER_BYTE, + sizeof(*driver_log_buffer)); + // we don't want zero-filled pages + // (so that the buffer's pages don't get omitted in some crash dumps) + + DRV_LOG_COMPILER_MEM_BARRIER(); + DRV_LOG_BUFFER_header_signature(driver_log_buffer)[0] = + DRV_LOG_SIGNATURE_0; + DRV_LOG_BUFFER_footer_signature(driver_log_buffer)[0] = + DRV_LOG_SIGNATURE_6; + DRV_LOG_BUFFER_header_signature(driver_log_buffer)[3] = + DRV_LOG_SIGNATURE_3; + DRV_LOG_BUFFER_footer_signature(driver_log_buffer)[3] = + DRV_LOG_SIGNATURE_3; + + DRV_LOG_COMPILER_MEM_BARRIER(); + DRV_LOG_BUFFER_header_signature(driver_log_buffer)[2] = + DRV_LOG_SIGNATURE_2; + DRV_LOG_BUFFER_footer_signature(driver_log_buffer)[2] = + DRV_LOG_SIGNATURE_4; + DRV_LOG_BUFFER_header_signature(driver_log_buffer)[1] = + DRV_LOG_SIGNATURE_1; + DRV_LOG_BUFFER_footer_signature(driver_log_buffer)[1] = + DRV_LOG_SIGNATURE_5; + + DRV_LOG_COMPILER_MEM_BARRIER(); + DRV_LOG_BUFFER_header_signature(driver_log_buffer)[7] = + DRV_LOG_SIGNATURE_7; + DRV_LOG_BUFFER_footer_signature(driver_log_buffer)[7] = + DRV_LOG_SIGNATURE_7; + DRV_LOG_BUFFER_header_signature(driver_log_buffer)[5] = + DRV_LOG_SIGNATURE_5; + DRV_LOG_BUFFER_footer_signature(driver_log_buffer)[5] = + DRV_LOG_SIGNATURE_1; + + DRV_LOG_COMPILER_MEM_BARRIER(); + DRV_LOG_BUFFER_header_signature(driver_log_buffer)[6] = + DRV_LOG_SIGNATURE_6; + DRV_LOG_BUFFER_footer_signature(driver_log_buffer)[6] = + DRV_LOG_SIGNATURE_0; + DRV_LOG_BUFFER_header_signature(driver_log_buffer)[4] = + DRV_LOG_SIGNATURE_4; + DRV_LOG_BUFFER_footer_signature(driver_log_buffer)[4] = + DRV_LOG_SIGNATURE_2; + + DRV_LOG_BUFFER_log_size(driver_log_buffer) = sizeof(*driver_log_buffer); + DRV_LOG_BUFFER_max_nb_pri_entries(driver_log_buffer) = + DRV_LOG_MAX_NB_PRI_ENTRIES; + DRV_LOG_BUFFER_max_nb_aux_entries(driver_log_buffer) = + DRV_LOG_MAX_NB_AUX_ENTRIES; + getnstimeofday(&cur_time); + DRV_LOG_BUFFER_init_time(driver_log_buffer) = cur_time.tv_sec; + DRV_LOG_BUFFER_disambiguator(driver_log_buffer) = 0; + DRV_LOG_BUFFER_log_version(driver_log_buffer) = DRV_LOG_VERSION; + DRV_LOG_BUFFER_pri_entry_index(driver_log_buffer) = (U32)((S32)-1); + DRV_LOG_BUFFER_aux_entry_index(driver_log_buffer) = (U32)((S32)-1); + +#if defined(DRV_EM64T) + bitness = 64; +#else + bitness = 32; +#endif + + DRV_SNPRINTF(DRV_LOG_BUFFER_driver_version(driver_log_buffer), + DRV_LOG_DRIVER_VERSION_SIZE, DRV_LOG_DRIVER_VERSION_SIZE, + "[%u-bit Linux] SEP v%d.%d (update %d). API %d.", bitness, + SEP_MAJOR_VERSION, SEP_MINOR_VERSION, SEP_UPDATE_VERSION, + SEP_API_VERSION); + + DRV_LOG_BUFFER_driver_state(driver_log_buffer) = GET_DRIVER_STATE(); + DRV_LOG_BUFFER_active_drv_operation(driver_log_buffer) = active_ioctl; + DRV_LOG_BUFFER_nb_drv_operations(driver_log_buffer) = 0; + DRV_LOG_BUFFER_nb_interrupts(driver_log_buffer) = 0; + DRV_LOG_BUFFER_nb_active_interrupts(driver_log_buffer) = 0; + DRV_LOG_BUFFER_nb_notifications(driver_log_buffer) = 0; + DRV_LOG_BUFFER_nb_active_notifications(driver_log_buffer) = 0; + DRV_LOG_BUFFER_nb_driver_state_transitions(driver_log_buffer) = 0; + + DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_LOAD) = + DRV_LOG_DEFAULT_LOAD_VERBOSITY; + DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_INIT) = + DRV_LOG_DEFAULT_INIT_VERBOSITY; + DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_DETECTION) = + DRV_LOG_DEFAULT_DETECTION_VERBOSITY; + DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_ERROR) = + DRV_LOG_DEFAULT_ERROR_VERBOSITY; + DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_STATE_CHANGE) = + DRV_LOG_DEFAULT_STATE_CHANGE_VERBOSITY; + DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_MARK) = + DRV_LOG_DEFAULT_MARK_VERBOSITY; + DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_DEBUG) = + DRV_LOG_DEFAULT_DEBUG_VERBOSITY; + DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_FLOW) = + DRV_LOG_DEFAULT_FLOW_VERBOSITY; + DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_ALLOC) = + DRV_LOG_DEFAULT_ALLOC_VERBOSITY; + DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_INTERRUPT) = + DRV_LOG_DEFAULT_INTERRUPT_VERBOSITY; + DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_TRACE) = + DRV_LOG_DEFAULT_TRACE_VERBOSITY; + DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_REGISTER) = + DRV_LOG_DEFAULT_REGISTER_VERBOSITY; + DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_NOTIFICATION) = + DRV_LOG_DEFAULT_NOTIFICATION_VERBOSITY; + DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_WARNING) = + DRV_LOG_DEFAULT_WARNING_VERBOSITY; + + DRV_LOG_BUFFER_contiguous_physical_memory(driver_log_buffer) = + using_contiguous_physical_memory; + + SEP_DRV_LOG_LOAD( + "Initialized driver log using %scontiguous physical memory.", + DRV_LOG_BUFFER_contiguous_physical_memory(driver_log_buffer) ? + "" : + "non-"); + + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern DRV_STATUS UTILITY_Driver_Log_Free (void) + * + * @brief Frees the driver log buffer. + * + * @param none + * + * @return OS_SUCCESS on success, OS_NO_MEM on error. + * + * Special Notes: + * Should be done before unloading the driver. + * See UTILITY_Driver_Log_Init for details. + */ +void UTILITY_Driver_Log_Free(void) +{ + U32 size = sizeof(*driver_log_buffer); + + if (driver_log_buffer) { + if (DRV_LOG_BUFFER_contiguous_physical_memory( + driver_log_buffer)) { + if (size < MAX_KMALLOC_SIZE) { + kfree(driver_log_buffer); + } else { + free_pages((unsigned long)driver_log_buffer, + get_order(size)); + } + } else { + vfree(driver_log_buffer); + } + + driver_log_buffer = NULL; + } +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern void UTILITY_Driver_Set_Active_Ioctl (U32 ioctl) + * + * @brief Sets the 'active_ioctl' global to the specified value. + * + * @param U32 ioctl - ioctl/drvop code to use + * + * @return none + * + * Special Notes: + * Used to keep track of the IOCTL operation currently being processed. + * This information is saved in the log buffer (globally), as well as + * in every log entry. + * NB: only IOCTLs for which grabbing the ioctl mutex is necessary + * should be kept track of this way. + */ +void UTILITY_Driver_Set_Active_Ioctl(U32 ioctl) +{ + active_ioctl = ioctl; + if (ioctl) { + DRV_LOG_BUFFER_nb_drv_operations(driver_log_buffer)++; + } +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern const char** UTILITY_Log_Category_Strings (void) + * + * @brief Accessor function for the log category string array + * + * @param none + * + * @return none + * + * Special Notes: + * Only needed for cosmetic purposes when adjusting category verbosities. + */ +const char **UTILITY_Log_Category_Strings(void) +{ + return drv_log_categories; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern U32 UTILITY_Change_Driver_State (U32 allowed_prior_states, + * U32 state, const char* func, U32 line_number) + * + * @brief Updates the driver state (if the transition is legal). + * + * @param U32 allowed_prior_states - the bitmask representing the states + * from which the transition is allowed to occur + * U32 state - the destination state + * const char* func - the callsite's function's name + * U32 line_number - the callsite's line number + * + * @return 1 in case of success, 0 otherwise + * + * Special Notes: + * + */ +U32 UTILITY_Change_Driver_State(U32 allowed_prior_states, U32 state, + const char *func, U32 line_number) +{ + U32 res = 1; + U32 previous_state; + U32 current_state = GET_DRIVER_STATE(); + U32 nb_attempts = 0; + + SEP_DRV_LOG_TRACE_IN( + "Prior states: 0x%x, state: %u, func: %p, line: %u.", + allowed_prior_states, state, func, line_number); + + if (state >= DRV_LOG_NB_DRIVER_STATES) { + SEP_DRV_LOG_ERROR("Illegal destination state %d (%s@%u)!", + state, func, line_number); + res = 0; + goto clean_return; + } + + do { + previous_state = current_state; + nb_attempts++; + SEP_DRV_LOG_TRACE("Attempt #%d to transition to state %s.", + nb_attempts, drv_log_states[state]); + + if (DRIVER_STATE_IN(current_state, allowed_prior_states)) { + current_state = cmpxchg(&GET_DRIVER_STATE(), + previous_state, state); + } else { + SEP_DRV_LOG_ERROR( + "Invalid transition [%s -> %s] (%s@%u)!", + drv_log_states[previous_state], + drv_log_states[state], func, line_number); + res = 0; + goto clean_return; + } + + } while (previous_state != current_state); + + SEP_DRV_LOG_STATE_TRANSITION(previous_state, state, "From %s@%u.", func, + line_number); + +clean_return: + SEP_DRV_LOG_TRACE_OUT("Res: %u.", res); + return res; +} diff --git a/drivers/platform/x86/sepdk/sep/valleyview_sochap.c b/drivers/platform/x86/sepdk/sep/valleyview_sochap.c new file mode 100755 index 0000000000000..7e1e5eb9c65f9 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/valleyview_sochap.c @@ -0,0 +1,301 @@ +/* **************************************************************************** + * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. + * + * This file is part of SEP Development Kit + * + * SEP Development Kit is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * SEP Development Kit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * As a special exception, you may use this file as part of a free software + * library without restriction. Specifically, if other files instantiate + * templates or use macros or inline functions from this file, or you + * compile this file and link it with other files to produce an executable + * this file does not by itself cause the resulting executable to be + * covered by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU General Public License. + * **************************************************************************** + */ + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" + +#include "inc/ecb_iterators.h" +#include "inc/control.h" +#include "inc/utility.h" +#include "inc/valleyview_sochap.h" + +static U64 *uncore_current_data; +static U64 *uncore_to_read_data; +extern DRV_CONFIG drv_cfg; + +#if 0 +extern U64 *read_counter_info; +extern VOID SOCPERF_Read_Data3(PVOID data_buffer); +#endif + +/*! + * @fn static VOID valleyview_VISA_Initialize(PVOID) + * + * @brief Initialize any registers or addresses + * + * @param param + * + * @return None + * + * Special Notes: + */ +static VOID valleyview_VISA_Initialize(VOID *param) +{ + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + // Allocate memory for reading GMCH counter values + the group id + if (!uncore_current_data) { + uncore_current_data = CONTROL_Allocate_Memory( + (VLV_CHAP_MAX_COUNTERS + 1) * sizeof(U64)); + if (!uncore_current_data) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Early exit (uncore_current_data is NULL!)."); + return; + } + } + if (!uncore_to_read_data) { + uncore_to_read_data = CONTROL_Allocate_Memory( + (VLV_CHAP_MAX_COUNTERS + 1) * sizeof(U64)); + if (!uncore_to_read_data) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Early exit (uncore_to_read_data is NULL!)."); + return; + } + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/*! + * @fn static VOID valleyview_VISA_Enable_PMU(PVOID) + * + * @brief Start counting + * + * @param param - device index + * + * @return None + * + * Special Notes: + */ +static VOID valleyview_VISA_Enable_PMU(PVOID param) +{ + U32 this_cpu; + CPU_STATE pcpu; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + + if (!CPU_STATE_system_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!system_master)."); + return; + } + + SEP_DRV_LOG_TRACE("Starting the counters..."); + if (uncore_current_data) { + memset(uncore_current_data, 0, + (VLV_CHAP_MAX_COUNTERS + 1) * sizeof(U64)); + } + if (uncore_to_read_data) { + memset(uncore_to_read_data, 0, + (VLV_CHAP_MAX_COUNTERS + 1) * sizeof(U64)); + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/*! + * @fn static VOID valleyview_VISA_Disable_PMU(PVOID) + * + * @brief Unmap the virtual address when sampling/driver stops + * + * @param param - device index + * + * @return None + * + * Special Notes: + */ +static VOID valleyview_VISA_Disable_PMU(PVOID param) +{ + U32 this_cpu; + CPU_STATE pcpu; + U32 cur_driver_state; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + cur_driver_state = GET_DRIVER_STATE(); + + if (!CPU_STATE_system_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!system_master)."); + return; + } + SEP_DRV_LOG_TRACE("Stopping the counters..."); + if (cur_driver_state == DRV_STATE_PREPARE_STOP || + cur_driver_state == DRV_STATE_STOPPED || + cur_driver_state == DRV_STATE_TERMINATING) { + uncore_current_data = CONTROL_Free_Memory(uncore_current_data); + uncore_to_read_data = CONTROL_Free_Memory(uncore_to_read_data); + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/*! + * @fn static VOID valleyview_VISA_Clean_Up(PVOID) + * + * @brief Reset any registers or addresses + * + * @param param + * + * @return None + * + * Special Notes: + */ +static VOID valleyview_VISA_Clean_Up(VOID *param) +{ + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + SEP_DRV_LOG_TRACE_OUT("Empty function."); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn valleyview_VISA_Read_PMU_Data(param) + * + * @param param The device index + * + * @return None No return needed + * + * @brief Read the Uncore count data and store into the buffer param; + * + */ +static VOID valleyview_VISA_Read_PMU_Data(PVOID param) +{ +#if 0 + U32 j; + U64 *buffer = read_counter_info; + U32 dev_idx; + U32 this_cpu; + CPU_STATE pcpu; + U32 package_num; + U32 event_index = 0; + U32 cur_grp; + ECB pecb; + U64 counter_buffer[VLV_CHAP_MAX_COUNTERS + 1]; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + package_num = core_to_package_map[this_cpu]; + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; + pecb = LWPMU_DEVICE_PMU_register_data(&devices[(dev_idx)])[cur_grp]; + + // NOTE THAT the read_pmu function on for EMON collection. + if (!DRV_CONFIG_emon_mode(drv_cfg)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!emon_mode)."); + return; + } + if (!CPU_STATE_socket_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!socket_master)."); + return; + } + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + SOCPERF_Read_Data3((void*)counter_buffer); + + FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_READ) + { + //the buffer index for this PMU needs to account for each event + j = ECB_entries_uncore_buffer_offset_in_system(pecb, idx); + buffer[j] = counter_buffer[event_index + 1]; + event_index++; + SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u", j, buffer[j], + this_cpu); + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); +#endif +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn valleyview_Trigger_Read() + * + * @param None + * + * @return None No return needed + * + * @brief Read the SoCHAP counters when timer is triggered + * + */ +static VOID valleyview_Trigger_Read(PVOID param, U32 id) +{ +#if 0 + U64 *data = (U64 *)param; + U32 cur_grp; + ECB pecb; + U32 this_cpu; + U32 package_num; + + SEP_DRV_LOG_TRACE_IN("Param: %p, , id: %u.", param, id); + + this_cpu = CONTROL_THIS_CPU(); + package_num = core_to_package_map[this_cpu]; + cur_grp = LWPMU_DEVICE_cur_group(&devices[id])[package_num]; + pecb = LWPMU_DEVICE_PMU_register_data(&devices[id])[cur_grp]; + + // group id + data = (U64 *)((S8 *)data + ECB_group_offset(pecb)); + SOCPERF_Read_Data3((void*)data); + + SEP_DRV_LOG_TRACE_OUT(""); +#endif +} + +/* + * Initialize the dispatch table + */ +DISPATCH_NODE valleyview_visa_dispatch = { + .init = valleyview_VISA_Initialize, + .fini = NULL, + .write = NULL, + .freeze = valleyview_VISA_Disable_PMU, + .restart = valleyview_VISA_Enable_PMU, + .read_data = valleyview_VISA_Read_PMU_Data, + .check_overflow = NULL, + .swap_group = NULL, + .read_lbrs = NULL, + .cleanup = valleyview_VISA_Clean_Up, + .hw_errata = NULL, + .read_power = NULL, + .check_overflow_errata = NULL, + .read_counts = NULL, + .check_overflow_gp_errata = NULL, + .read_ro = NULL, + .platform_info = NULL, + .trigger_read = valleyview_Trigger_Read, + .scan_for_uncore = NULL, + .read_metrics = NULL +}; diff --git a/drivers/platform/x86/socwatch/Kconfig b/drivers/platform/x86/socwatch/Kconfig new file mode 100644 index 0000000000000..87a7ae205f2d6 --- /dev/null +++ b/drivers/platform/x86/socwatch/Kconfig @@ -0,0 +1,6 @@ +menuconfig INTEL_SOCWATCH + depends on X86 + tristate "SocWatch Driver Support" + default m + help + Say Y here to enable SocWatch driver diff --git a/drivers/platform/x86/socwatch/Makefile b/drivers/platform/x86/socwatch/Makefile new file mode 100644 index 0000000000000..15ac18fcfdc03 --- /dev/null +++ b/drivers/platform/x86/socwatch/Makefile @@ -0,0 +1,22 @@ +# +# Makefile for the socwatch driver. +# + +DRIVER_BASE=socwatch +DRIVER_MAJOR=2 +DRIVER_MINOR=6 +# basic name of driver +DRIVER_NAME=${DRIVER_BASE}${DRIVER_MAJOR}_${DRIVER_MINOR} + +DO_DRIVER_PROFILING=0 + +ccflags-y += -Idrivers/platform/x86/socwatch/inc/ \ + -DDO_DRIVER_PROFILING=$(DO_DRIVER_PROFILING) + +obj-$(CONFIG_INTEL_SOCWATCH) += $(DRIVER_NAME).o + +$(DRIVER_NAME)-objs := sw_driver.o sw_hardware_io.o \ + sw_output_buffer.o sw_tracepoint_handlers.o \ + sw_mem.o sw_collector.o sw_telem.o \ + sw_file_ops.o sw_internal.o sw_ops_provider.o \ + sw_reader.o sw_trace_notifier_provider.o diff --git a/drivers/platform/x86/socwatch/inc/sw_collector.h b/drivers/platform/x86/socwatch/inc/sw_collector.h new file mode 100644 index 0000000000000..41430cbeddefb --- /dev/null +++ b/drivers/platform/x86/socwatch/inc/sw_collector.h @@ -0,0 +1,136 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +#ifndef __SW_COLLECTOR_H__ + +#include "sw_internal.h" + +/* + * Forward declaration + */ +struct sw_hw_ops; + +/* TODO: convert from 'list_head' to 'hlist_head' */ +/** + * struct - sw_collector_data + * Information about the collector to be invoked at collection time. + * + * The collector_lists array holds linked lists of collectors to + * be exercised at specific points in time during the collection + * (e.g. begin, poll, end, etc.). At a trigger time, the driver walks + * that time's list of nodes, and exercises the collectors on that list. + * + * @list: List/link implementation + * @cpumask: Collect if cpu matches mask + * @info: Ptr to metric info + * @ops: Ptr to collector's operations + * @last_update_jiffies: Indicates when this node was last exercised. + * @per_msg_payload_size: Data size + * @msg: Ptr to collected data + */ +typedef struct sw_collector_data { + SW_LIST_ENTRY(list, sw_collector_data); + struct cpumask cpumask; + struct sw_driver_interface_info *info; + const struct sw_hw_ops **ops; + size_t per_msg_payload_size; + u64 last_update_jiffies; + struct sw_driver_msg *msg; +} sw_collector_data_t; +#define GET_MSG_SLOT_FOR_CPU(msgs, cpu, size) \ + ((struct sw_driver_msg *)&( \ + ((char *)(msgs))[(cpu) * \ + (sizeof(struct sw_driver_msg) + (size))])) + +struct sw_collector_data *sw_alloc_collector_node(void); +void sw_free_collector_node(struct sw_collector_data *node); +int sw_handle_collector_node(struct sw_collector_data *data); +int sw_handle_collector_node_on_cpu(struct sw_collector_data *data, int cpu); +int sw_write_collector_node(struct sw_collector_data *data); + +void sw_init_collector_list(void *list_head); +void sw_destroy_collector_list(void *list_head); +int sw_handle_collector_list(void *list_head, + int (*func)(struct sw_collector_data *data)); +int sw_handle_collector_list_on_cpu(void *list_head, + int (*func)(struct sw_collector_data *data, + int cpu), + int cpu); + +int sw_handle_driver_io_descriptor( + char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptor, + const struct sw_hw_ops *hw_ops); +int sw_init_driver_io_descriptor(struct sw_driver_io_descriptor *descriptor); +int sw_reset_driver_io_descriptor(struct sw_driver_io_descriptor *descriptor); + +int sw_add_driver_info(void *list_head, + const struct sw_driver_interface_info *info); + +void sw_handle_per_cpu_msg(void *info); +void sw_handle_per_cpu_msg_no_sched(void *info); +void sw_handle_per_cpu_msg_on_cpu(int cpu, void *info); + +void sw_set_collector_ops(const struct sw_hw_ops *hw_ops); + +/** + * Process all messages for the given time. + * @param[in] when The time period e.g. 'BEGIN' or 'END' + * + * @returns 0 on success, non-zero on error + */ +extern int sw_process_snapshot(enum sw_when_type when); +extern int sw_process_snapshot_on_cpu(enum sw_when_type when, int cpu); +#endif /* __SW_COLLECTOR_H__ */ diff --git a/drivers/platform/x86/socwatch/inc/sw_defines.h b/drivers/platform/x86/socwatch/inc/sw_defines.h new file mode 100644 index 0000000000000..ab0f4911332d7 --- /dev/null +++ b/drivers/platform/x86/socwatch/inc/sw_defines.h @@ -0,0 +1,156 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef _PW_DEFINES_H_ +#define _PW_DEFINES_H_ 1 + +#include "sw_version.h" + +/* *************************************************** + * Common to kernel and userspace. + * *************************************************** + */ +#define PW_SUCCESS 0 +#define PW_ERROR 1 +#define PW_SUCCESS_NO_COLLECT 2 + +/* + * Helper macro to convert 'u64' to 'unsigned long long' to avoid gcc warnings. + */ +#define TO_ULL(x) (unsigned long long)(x) +/* +* Convert an arg to 'long long' +*/ +#define TO_LL(x) (long long)(x) +/* + * Convert an arg to 'unsigned long' + */ +#define TO_UL(x) (unsigned long)(x) +/* + * Helper macro for string representation of a boolean value. + */ +#define GET_BOOL_STRING(b) ((b) ? "TRUE" : "FALSE") + +/* + * Circularly increment 'i' MODULO 'l'. + * ONLY WORKS IF 'l' is (power of 2 - 1) ie. + * l == (2 ^ x) - 1 + */ +#define CIRCULAR_INC(index, mask) (((index) + 1) & (mask)) +#define CIRCULAR_ADD(index, val, mask) (((index) + (val)) & (mask)) +/* + * Circularly decrement 'i'. + */ +#define CIRCULAR_DEC(i, m) \ + ({ \ + int __tmp1 = (i); \ + if (--__tmp1 < 0) \ + __tmp1 = (m); \ + __tmp1; \ + }) +/* + * Retrieve size of an array. + */ +#define SW_ARRAY_SIZE(array) (sizeof(array) / sizeof((array)[0])) +/* + * Should the driver count number of dropped samples? + */ +#define DO_COUNT_DROPPED_SAMPLES 1 +/* + * Extract F/W major, minor versions. + * Assumes version numbers are 8b unsigned ints. + */ +#define SW_GET_SCU_FW_VERSION_MAJOR(ver) (((ver) >> 8) & 0xff) +#define SW_GET_SCU_FW_VERSION_MINOR(ver) ((ver)&0xff) +/* + * Max size of process name retrieved from kernel. + */ +#define SW_MAX_PROC_NAME_SIZE 16 + +/* + * Number of SOCPERF counters. + * Needed by both Ring-0 and Ring-3 + */ +#define SW_NUM_SOCPERF_COUNTERS 9 + +/* + * Max size of process name retrieved from kernel space. + */ +#define SW_MAX_PROC_NAME_SIZE 16 +/* + * Max size of kernel wakelock name. + */ +#define SW_MAX_KERNEL_WAKELOCK_NAME_SIZE 100 + +/* Data value read when a telemetry data read fails. */ +#define SW_TELEM_READ_FAIL_VALUE 0xF00DF00DF00DF00DUL + +#ifdef SWW_MERGE +typedef enum { + SW_STOP_EVENT = 0, + SW_CS_EXIT_EVENT, + SW_COUNTER_RESET_EVENT, + SW_COUNTER_HOTKEY_EVENT, + SW_MAX_COLLECTION_EVENT +} collector_stop_event_t; +#endif /* SWW_MERGE */ + +#define MAX_UNSIGNED_16_BIT_VALUE 0xFFFF +#define MAX_UNSIGNED_24_BIT_VALUE 0xFFFFFF +#define MAX_UNSIGNED_32_BIT_VALUE 0xFFFFFFFF +#define MAX_UNSIGNED_64_BIT_VALUE 0xFFFFFFFFFFFFFFFF + +#endif /* _PW_DEFINES_H_ */ diff --git a/drivers/platform/x86/socwatch/inc/sw_file_ops.h b/drivers/platform/x86/socwatch/inc/sw_file_ops.h new file mode 100644 index 0000000000000..c3a30a17a7b02 --- /dev/null +++ b/drivers/platform/x86/socwatch/inc/sw_file_ops.h @@ -0,0 +1,70 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +#ifndef __SW_FILE_OPS_H__ +#define __SW_FILE_OPS_H__ + +enum sw_driver_collection_cmd; +struct sw_file_ops { + long (*ioctl_handler)(unsigned int ioctl_num, void *local_args); + int (*stop_handler)(void); + enum sw_driver_collection_cmd (*get_current_cmd)(void); + bool (*should_flush)(void); +}; + +int sw_register_dev(struct sw_file_ops *ops); +void sw_unregister_dev(void); + +#endif /* __SW_FILE_OPS_H__ */ diff --git a/drivers/platform/x86/socwatch/inc/sw_hardware_io.h b/drivers/platform/x86/socwatch/inc/sw_hardware_io.h new file mode 100644 index 0000000000000..5cc9ebe18cf11 --- /dev/null +++ b/drivers/platform/x86/socwatch/inc/sw_hardware_io.h @@ -0,0 +1,120 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +#ifndef __SW_HARDWARE_IO_H__ +#define __SW_HARDWARE_IO_H__ + +#include "sw_structs.h" + +typedef int (*sw_io_desc_init_func_t)( + struct sw_driver_io_descriptor *descriptor); +typedef void (*sw_hardware_op_func_t)( + char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptor, + u16 counter_size_in_bytes); +typedef int (*sw_io_desc_print_func_t)( + const struct sw_driver_io_descriptor *descriptor); +typedef int (*sw_io_desc_reset_func_t)( + const struct sw_driver_io_descriptor *descriptor); +typedef bool (*sw_io_desc_available_func_t)(void); +typedef bool (*sw_hw_op_post_config_func_t)(void); + +/** + * struct sw_hw_ops - Operations for each of the HW collection mechanisms + * in swkernelcollector. + * @name: A descriptive name used to identify this particular + * operation. + * @init: Initialize a metric's collection. + * @read: Read a metric's data. + * @write: Write to the HW for the metric(?). + * @print: Print out the data. + * @reset: Opposite of init--called after we're done collecting. + * @available: Decide whether this H/W op is available on the current + * platform. + * @post_config: Perform any post-configuration steps. + */ +struct sw_hw_ops { + const char *name; + sw_io_desc_init_func_t init; + sw_hardware_op_func_t read; + sw_hardware_op_func_t write; + sw_io_desc_print_func_t print; + sw_io_desc_reset_func_t reset; + sw_io_desc_available_func_t available; + sw_hw_op_post_config_func_t post_config; +}; + +bool sw_is_valid_hw_op_id(int id); +int sw_get_hw_op_id(const struct sw_hw_ops *op); +const struct sw_hw_ops *sw_get_hw_ops_for(int id); +const char *sw_get_hw_op_abstract_name(const struct sw_hw_ops *op); + +int sw_for_each_hw_op(int (*func)(const struct sw_hw_ops *op, void *priv), + void *priv, bool return_on_error); + +/** + * Add an operation to the list of providers. + */ +int sw_register_hw_op(const struct sw_hw_ops *ops); +/** + * Register all H/W operations. + */ +int sw_register_hw_ops(void); +/** + * Unregister previously registered H/W operations. + */ +void sw_free_hw_ops(void); + +#endif /* __SW_HARDWARE_IO_H__ */ diff --git a/drivers/platform/x86/socwatch/inc/sw_internal.h b/drivers/platform/x86/socwatch/inc/sw_internal.h new file mode 100644 index 0000000000000..c8d9da3307566 --- /dev/null +++ b/drivers/platform/x86/socwatch/inc/sw_internal.h @@ -0,0 +1,138 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +#ifndef __SW_DATA_STRUCTS_H__ +#define __SW_DATA_STRUCTS_H__ + +/* + * Taken from 'sw_driver' + * TODO: move to separate file? + */ +#include +#include +#include +#include +#include +#include +#include /* inode */ +#include /* class_create */ +#include /* cdev_alloc */ +#include /* vmalloc */ +#include /* TASK_INTERRUPTIBLE */ +#include /* wait_event_interruptible */ +#include /* pci_get_bus_and_slot */ +#include /* LINUX_VERSION_CODE */ +#include /* For SFI F/W version */ +#include +#include +#include /* local_t */ +#include /* "in_atomic" */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) +#include /* copy_to_user */ +#else +#include /* copy_to_user */ +#endif /* LINUX_VERSION_CODE */ + +#ifdef CONFIG_X86_WANT_INTEL_MID +#include +#endif /* CONFIG_X86_WANT_INTEL_MID */ +/* + * End taken from sw_driver + */ + +#include "sw_structs.h" +#include "sw_ioctl.h" +#include "sw_list.h" + +/* ****************************************** + * Compile time constants + * ****************************************** + */ +#define GET_POLLED_CPU() (sw_max_num_cpus) + +/* ****************************************** + * Function declarations. + * ****************************************** + */ +/* + * Output to user. + */ +unsigned long sw_copy_to_user(char __user *dst, + char *src, size_t bytes_to_copy); +bool sw_check_output_buffer_params(void __user *buffer, size_t bytes_to_read, + size_t buff_size); +/* + * smp call function. + */ +void sw_schedule_work(const struct cpumask *mask, void (*work)(void *), + void *data); +/* + * Save IRQ flags and retrieve cpu number. + */ +int sw_get_cpu(unsigned long *flags); +/* + * Restore IRQ flags. + */ +void sw_put_cpu(unsigned long flags); +/* + * Set module scope for cpu frequencies. + */ +int sw_set_module_scope_for_cpus(void); +/* + * reset module scope for cpu frequencies. + */ +int sw_reset_module_scope_for_cpus(void); + +#endif /* __SW_DATA_STRUCTS_H__ */ diff --git a/drivers/platform/x86/socwatch/inc/sw_ioctl.h b/drivers/platform/x86/socwatch/inc/sw_ioctl.h new file mode 100644 index 0000000000000..1f8e903a0e1c1 --- /dev/null +++ b/drivers/platform/x86/socwatch/inc/sw_ioctl.h @@ -0,0 +1,303 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +#ifndef __SW_IOCTL_H__ +#define __SW_IOCTL_H__ 1 + +#if defined(__linux__) || defined(__QNX__) +#if __KERNEL__ +#include +#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) +#include +#include +#endif /* COMPAT && x64 */ +#else /* !__KERNEL__ */ +#include +#endif /* __KERNEL__ */ +#endif /* __linux__ */ +/* + * Ensure we pull in definition of 'DO_COUNT_DROPPED_SAMPLES'! + */ +#include "sw_defines.h" + +#ifdef ONECORE +#ifndef __KERNEL__ +#include +#endif /* __KERNEL__ */ +#endif /* ONECORE */ + +/* + * The APWR-specific IOCTL magic + * number -- used to ensure IOCTLs + * are delivered to the correct + * driver. + */ +/* #define APWR_IOCTL_MAGIC_NUM 0xdead */ +#define APWR_IOCTL_MAGIC_NUM 100 + +/* + * The name of the device file + */ +/* #define DEVICE_FILE_NAME "/dev/pw_driver_char_dev" */ +#define PW_DEVICE_FILE_NAME "/dev/apwr_driver_char_dev" +#define PW_DEVICE_NAME "apwr_driver_char_dev" + +enum sw_ioctl_cmd { + sw_ioctl_cmd_none = 0, + sw_ioctl_cmd_config, + sw_ioctl_cmd_cmd, + sw_ioctl_cmd_poll, + sw_ioctl_cmd_immediate_io, + sw_ioctl_cmd_scu_version, + sw_ioctl_cmd_read_immediate, + sw_ioctl_cmd_driver_version, + sw_ioctl_cmd_avail_trace, + sw_ioctl_cmd_avail_notify, + sw_ioctl_cmd_avail_collect, + sw_ioctl_cmd_topology_changes, +}; +/* + * The actual IOCTL commands. + * + * From the kernel documentation: + * "_IOR" ==> Read IOCTL + * "_IOW" ==> Write IOCTL + * "_IOWR" ==> Read/Write IOCTL + * + * Where "Read" and "Write" are from the user's perspective + * (similar to the file "read" and "write" calls). + */ +#ifdef SWW_MERGE /* Windows */ +/* + * Device type -- in the "User Defined" range." + */ +#define POWER_I_CONF_TYPE 40000 + +/* List assigned tracepoint id */ +#define CSIR_TRACEPOINT_ID_MASK 1 +#define DEVICE_STATE_TRACEPOINT_ID_MASK 2 +#define CSIR_SEPARATE_TRACEPOINT_ID_MASK 3 +#define RESET_TRACEPOINT_ID_MASK 4 +#define DISPLAY_ON_TRACEPOINT_ID_MASK 5 + +#ifdef SWW_MERGE +/* + * TELEM BAR CONFIG + */ +#define MAX_TELEM_BAR_CFG 3 +#define TELEM_MCHBAR_CFG 0 +#define TELEM_IPC1BAR_CFG 1 +#define TELEM_SSRAMBAR_CFG 2 +#endif + +/* + * The IOCTL function codes from 0x800 to 0xFFF are for customer use. + */ +#define PW_IOCTL_CONFIG \ + CTL_CODE(POWER_I_CONF_TYPE, 0x900, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_START_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x901, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_STOP_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x902, METHOD_BUFFERED, FILE_ANY_ACCESS) + +/* TODO: pause, resume, cancel not supported yet */ +#define PW_IOCTL_PAUSE_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x903, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_RESUME_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x904, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_CANCEL_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x905, METHOD_BUFFERED, FILE_ANY_ACCESS) + +#define PW_IOCTL_GET_PROCESSOR_GROUP_TOPOLOGY \ + CTL_CODE(POWER_I_CONF_TYPE, 0x906, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_TOPOLOGY \ + CTL_CODE(POWER_I_CONF_TYPE, 0x907, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_GET_AVAILABLE_COLLECTORS \ + CTL_CODE(POWER_I_CONF_TYPE, 0x908, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_IMMEDIATE_IO \ + CTL_CODE(POWER_I_CONF_TYPE, 0x909, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_DRV_CLEANUP \ + CTL_CODE(POWER_I_CONF_TYPE, 0x90A, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_SET_COLLECTION_EVENT \ + CTL_CODE(POWER_I_CONF_TYPE, 0x90B, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_TRY_STOP_EVENT \ + CTL_CODE(POWER_I_CONF_TYPE, 0x90C, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_SET_PCH_ACTIVE_INTERVAL \ + CTL_CODE(POWER_I_CONF_TYPE, 0x90D, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_SET_TELEM_BAR \ + CTL_CODE(POWER_I_CONF_TYPE, 0x90E, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_METADATA \ + CTL_CODE(POWER_I_CONF_TYPE, 0x90F, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_SET_GBE_INTERVAL \ + CTL_CODE(POWER_I_CONF_TYPE, 0x910, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_ENABLE_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x911, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_DISABLE_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x912, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_DRIVER_BUILD_DATE \ + CTL_CODE(POWER_I_CONF_TYPE, 0x913, METHOD_BUFFERED, FILE_ANY_ACCESS) + +#elif !defined(__APPLE__) +#define PW_IOCTL_CONFIG \ + _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config, \ + struct sw_driver_ioctl_arg *) +#if DO_COUNT_DROPPED_SAMPLES +#define PW_IOCTL_CMD \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, \ + struct sw_driver_ioctl_arg *) +#else +#define PW_IOCTL_CMD \ + _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, \ + struct sw_driver_ioctl_arg *) +#endif /* DO_COUNT_DROPPED_SAMPLES */ +#define PW_IOCTL_POLL _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) +#define PW_IOCTL_IMMEDIATE_IO \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, \ + struct sw_driver_ioctl_arg *) +#define PW_IOCTL_GET_SCU_FW_VERSION \ + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_scu_version, \ + struct sw_driver_ioctl_arg *) +#define PW_IOCTL_READ_IMMEDIATE \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_immediate, \ + struct sw_driver_ioctl_arg *) +#define PW_IOCTL_GET_DRIVER_VERSION \ + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_driver_version, \ + struct sw_driver_ioctl_arg *) +#define PW_IOCTL_GET_AVAILABLE_TRACEPOINTS \ + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_trace, \ + struct sw_driver_ioctl_arg *) +#define PW_IOCTL_GET_AVAILABLE_NOTIFIERS \ + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_notify, \ + struct sw_driver_ioctl_arg *) +#define PW_IOCTL_GET_AVAILABLE_COLLECTORS \ + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_collect, \ + struct sw_driver_ioctl_arg *) +#define PW_IOCTL_GET_TOPOLOGY_CHANGES \ + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, \ + struct sw_driver_ioctl_arg *) +#else /* __APPLE__ */ +#define PW_IOCTL_CONFIG \ + _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config, \ + struct sw_driver_ioctl_arg) +#if DO_COUNT_DROPPED_SAMPLES +#define PW_IOCTL_CMD \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, \ + struct sw_driver_ioctl_arg) +#else +#define PW_IOCTL_CMD \ + _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, struct sw_driver_ioctl_arg) +#endif /* DO_COUNT_DROPPED_SAMPLES */ +#define PW_IOCTL_POLL _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) +#define PW_IOCTL_IMMEDIATE_IO \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, \ + struct sw_driver_ioctl_arg) +#define PW_IOCTL_GET_SCU_FW_VERSION \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_scu_version, \ + struct sw_driver_ioctl_arg) +#define PW_IOCTL_READ_IMMEDIATE \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_immediate, \ + struct sw_driver_ioctl_arg) +#define PW_IOCTL_GET_DRIVER_VERSION \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_driver_version, \ + struct sw_driver_ioctl_arg) +#define PW_IOCTL_GET_AVAILABLE_TRACEPOINTS \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_trace, \ + struct sw_driver_ioctl_arg) +#define PW_IOCTL_GET_AVAILABLE_NOTIFIERS \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_notify, \ + struct sw_driver_ioctl_arg) +#define PW_IOCTL_GET_AVAILABLE_COLLECTORS \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_collect, \ + struct sw_driver_ioctl_arg) +#define PW_IOCTL_GET_TOPOLOGY_CHANGES \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, \ + struct sw_driver_ioctl_arg) +#endif /* __APPLE__ */ + +/* + * 32b-compatible version of the above + * IOCTL numbers. Required ONLY for + * 32b compatibility on 64b systems, + * and ONLY by the driver. + */ +#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) +#define PW_IOCTL_CONFIG32 \ + _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config, compat_uptr_t) +#if DO_COUNT_DROPPED_SAMPLES +#define PW_IOCTL_CMD32 \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, compat_uptr_t) +#else +#define PW_IOCTL_CMD32 \ + _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, compat_uptr_t) +#endif /* DO_COUNT_DROPPED_SAMPLES */ +#define PW_IOCTL_POLL32 _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) +#define PW_IOCTL_IMMEDIATE_IO32 \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, compat_uptr_t) +#define PW_IOCTL_GET_SCU_FW_VERSION32 \ + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_scu_version, compat_uptr_t) +#define PW_IOCTL_READ_IMMEDIATE32 \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_immediate, compat_uptr_t) +#define PW_IOCTL_GET_DRIVER_VERSION32 \ + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_driver_version, compat_uptr_t) +#define PW_IOCTL_GET_AVAILABLE_TRACEPOINTS32 \ + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_trace, compat_uptr_t) +#define PW_IOCTL_GET_AVAILABLE_NOTIFIERS32 \ + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_notify, compat_uptr_t) +#define PW_IOCTL_GET_AVAILABLE_COLLECTORS32 \ + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_collect, compat_uptr_t) +#define PW_IOCTL_GET_TOPOLOGY_CHANGES32 \ + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, compat_uptr_t) +#endif /* defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) */ +#endif /* __SW_IOCTL_H__ */ diff --git a/drivers/platform/x86/socwatch/inc/sw_kernel_defines.h b/drivers/platform/x86/socwatch/inc/sw_kernel_defines.h new file mode 100644 index 0000000000000..26328645b232a --- /dev/null +++ b/drivers/platform/x86/socwatch/inc/sw_kernel_defines.h @@ -0,0 +1,164 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +#ifndef _SW_KERNEL_DEFINES_H_ +#define _SW_KERNEL_DEFINES_H_ 1 + +#include "sw_defines.h" + +#if defined(__APPLE__) +#define likely(x) (x) +#define unlikely(x) (x) +#endif /* __APPLE__ */ + +#if !defined(__APPLE__) +#define CPU() (raw_smp_processor_id()) +#define RAW_CPU() (raw_smp_processor_id()) +#else +#define CPU() (cpu_number()) +#define RAW_CPU() (cpu_number()) +#endif /* __APPLE__ */ + +#define TID() (current->pid) +#define PID() (current->tgid) +#define NAME() (current->comm) +#define PKG(c) (cpu_data(c).phys_proc_id) +#define IT_REAL_INCR() (current->signal->it_real_incr.tv64) + +#define ATOMIC_CAS(ptr, old_val, new_val) \ + (cmpxchg((ptr), (old_val), (new_val)) == (old_val)) + +/* + * Should we measure overheads? + * '1' ==> YES + * '0' ==> NO + */ +#define DO_OVERHEAD_MEASUREMENTS 0 +/* + * Should we track memory usage? + * '1' ==> YES + * '0' ==> NO + */ +#define DO_TRACK_MEMORY_USAGE 0 +/* + * Are we compiling with driver profiling support + * turned ON? If YES then force 'DO_OVERHEAD_MEASUREMENTS' + * and 'DO_TRACK_MEMORY_USAGE' to be TRUE. + */ +#if DO_DRIVER_PROFILING +#if !DO_OVERHEAD_MEASUREMENTS +#undef DO_OVERHEAD_MEASUREMENTS +#define DO_OVERHEAD_MEASUREMENTS 1 +#endif /* DO_OVERHEAD_MEASUREMENTS */ +#if !DO_TRACK_MEMORY_USAGE +#undef DO_TRACK_MEMORY_USAGE +#define DO_TRACK_MEMORY_USAGE 1 +#endif /* DO_TRACK_MEMORY_USAGE */ +#endif /* DO_DRIVER_PROFILING */ +/* + * Should we allow debug output. + * Set to: "1" ==> 'OUTPUT' is enabled. + * "0" ==> 'OUTPUT' is disabled. + */ +#define DO_DEBUG_OUTPUT 0 +/* + * Control whether to output driver ERROR messages. + * These are independent of the 'OUTPUT' macro + * (which controls debug messages). + * Set to '1' ==> Print driver error messages (to '/var/log/messages') + * '0' ==> Do NOT print driver error messages + */ +#define DO_PRINT_DRIVER_ERROR_MESSAGES 1 +/* + * Macros to control output printing. + */ +#if !defined(__APPLE__) +#if DO_DEBUG_OUTPUT +#define pw_pr_debug(...) printk(KERN_INFO __VA_ARGS__) +#define pw_pr_warn(...) printk(KERN_WARNING __VA_ARGS__) +#else +#define pw_pr_debug(...) +#define pw_pr_warn(...) +#endif +#define pw_pr_force(...) printk(KERN_INFO __VA_ARGS__) +#else +#if DO_DEBUG_OUTPUT +#define pw_pr_debug(...) IOLog(__VA_ARGS__) +#define pw_pr_warn(...) IOLog(__VA_ARGS__) +#else +#define pw_pr_debug(...) +#define pw_pr_warn(...) +#endif +#define pw_pr_force(...) IOLog(__VA_ARGS__) +#endif /* __APPLE__ */ + +/* + * Macro for driver error messages. + */ +#if !defined(__APPLE__) +#if (DO_PRINT_DRIVER_ERROR_MESSAGES || DO_DEBUG_OUTPUT) +#define pw_pr_error(...) printk(KERN_ERR __VA_ARGS__) +#else +#define pw_pr_error(...) +#endif +#else +#if (DO_PRINT_DRIVER_ERROR_MESSAGES || DO_DEBUG_OUTPUT) +#define pw_pr_error(...) IOLog(__VA_ARGS__) +#else +#define pw_pr_error(...) +#endif +#endif /* __APPLE__ */ + +#endif /* _SW_KERNEL_DEFINES_H_ */ diff --git a/drivers/platform/x86/socwatch/inc/sw_list.h b/drivers/platform/x86/socwatch/inc/sw_list.h new file mode 100644 index 0000000000000..9c17e50ac5bf0 --- /dev/null +++ b/drivers/platform/x86/socwatch/inc/sw_list.h @@ -0,0 +1,76 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +#ifndef __SW_LIST_H__ +#define __SW_LIST_H__ + +#include + +#define SW_DEFINE_LIST_HEAD(name, dummy) struct list_head name +#define SW_DECLARE_LIST_HEAD(name, dummy) extern struct list_head name +#define SW_LIST_ENTRY(name, dummy) struct list_head name +#define SW_LIST_HEAD_VAR(dummy) struct list_head +#define SW_LIST_HEAD_INIT(head) INIT_LIST_HEAD(head) +#define SW_LIST_ENTRY_INIT(node, field) INIT_LIST_HEAD(&node->field) +#define SW_LIST_ADD(head, node, field) list_add_tail(&node->field, head) +#define SW_LIST_GET_HEAD_ENTRY(head, type, field) \ + list_first_entry(head, struct type, field) +#define SW_LIST_UNLINK(node, field) list_del(&node->field) +#define SW_LIST_FOR_EACH_ENTRY(node, head, field) \ + list_for_each_entry(node, head, field) +#define SW_LIST_EMPTY(head) list_empty(head) +#define SW_LIST_HEAD_INITIALIZER(head) LIST_HEAD_INIT(head) + +#endif /* __SW_LIST_H__ */ diff --git a/drivers/platform/x86/socwatch/inc/sw_lock_defs.h b/drivers/platform/x86/socwatch/inc/sw_lock_defs.h new file mode 100644 index 0000000000000..be44bfab01a7c --- /dev/null +++ b/drivers/platform/x86/socwatch/inc/sw_lock_defs.h @@ -0,0 +1,98 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +/* + * Description: file containing locking routines + * used by the power driver. + */ + +#ifndef __SW_LOCK_DEFS_H__ +#define __SW_LOCK_DEFS_H__ + +#define SW_DEFINE_SPINLOCK(s) DEFINE_SPINLOCK(s) +#define SW_DECLARE_SPINLOCK(s) static spinlock_t s + +#define SW_INIT_SPINLOCK(s) spin_lock_init(&s) +#define SW_DESTROY_SPINLOCK(s) /* NOP */ + +#define LOCK(l) \ + { \ + unsigned long _tmp_l_flags; \ + spin_lock_irqsave(&(l), _tmp_l_flags); + +#define UNLOCK(l) \ + spin_unlock_irqrestore(&(l), _tmp_l_flags); \ + } + +#define READ_LOCK(l) \ + { \ + unsigned long _tmp_l_flags; \ + read_lock_irqsave(&(l), _tmp_l_flags); + +#define READ_UNLOCK(l) \ + read_unlock_irqrestore(&(l), _tmp_l_flags); \ + } + +#define WRITE_LOCK(l) \ + { \ + unsigned long _tmp_l_flags; \ + write_lock_irqsave(&(l), _tmp_l_flags); + +#define WRITE_UNLOCK(l) \ + write_unlock_irqrestore(&(l), _tmp_l_flags); \ + } + +#endif /* __SW_LOCK_DEFS_H__ */ diff --git a/drivers/platform/x86/socwatch/inc/sw_mem.h b/drivers/platform/x86/socwatch/inc/sw_mem.h new file mode 100644 index 0000000000000..0d6de7f3a21ba --- /dev/null +++ b/drivers/platform/x86/socwatch/inc/sw_mem.h @@ -0,0 +1,82 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +/* + * Description: file containing memory management routines + * used by the power driver. + */ + +#ifndef _SW_MEM_H_ +#define _SW_MEM_H_ 1 + +#include "sw_types.h" + +void *sw_kmalloc(size_t size, gfp_t flags); +void sw_kfree(const void *obj); +/* + * Allocate free pages. + */ +unsigned long sw_allocate_pages(gfp_t flags, + unsigned int alloc_size_in_bytes); +/* + * Free up previously allocated pages. + */ +void sw_release_pages(unsigned long addr, unsigned int alloc_size_in_bytes); + +u64 sw_get_total_bytes_alloced(void); +u64 sw_get_max_bytes_alloced(void); +u64 sw_get_curr_bytes_alloced(void); +#endif /* _SW_MEM_H_ */ diff --git a/drivers/platform/x86/socwatch/inc/sw_ops_provider.h b/drivers/platform/x86/socwatch/inc/sw_ops_provider.h new file mode 100644 index 0000000000000..bb841bf65cb6c --- /dev/null +++ b/drivers/platform/x86/socwatch/inc/sw_ops_provider.h @@ -0,0 +1,62 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +#ifndef __SW_OPS_PROVIDER_H__ +#define __SW_OPS_PROVIDER_H__ + +int sw_register_ops_providers(void); +void sw_free_ops_providers(void); + +#endif /* __SW_OPS_PROVIDER_H__ */ diff --git a/drivers/platform/x86/socwatch/inc/sw_output_buffer.h b/drivers/platform/x86/socwatch/inc/sw_output_buffer.h new file mode 100644 index 0000000000000..8d6518222ce38 --- /dev/null +++ b/drivers/platform/x86/socwatch/inc/sw_output_buffer.h @@ -0,0 +1,136 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef _SW_OUTPUT_BUFFER_H_ +#define _SW_OUTPUT_BUFFER_H_ 1 +/* + * Special mask for the case where all buffers have been flushed. + */ +/* #define sw_ALL_WRITES_DONE_MASK 0xffffffff */ +#define SW_ALL_WRITES_DONE_MASK ((u32)-1) +/* + * Special mask for the case where no data is available to be read. + */ +#define SW_NO_DATA_AVAIL_MASK ((u32)-2) + +/* + * Forward declarations. + */ +struct sw_driver_msg; + +/* + * Data structures. + */ +enum sw_wakeup_action { + SW_WAKEUP_ACTION_DIRECT, + SW_WAKEUP_ACTION_TIMER, + SW_WAKEUP_ACTION_NONE, +}; + +/* + * Variable declarations. + */ +extern u64 sw_num_samples_produced, sw_num_samples_dropped; +extern unsigned long sw_buffer_alloc_size; +extern int sw_max_num_cpus; +extern wait_queue_head_t sw_reader_queue; + +/* + * Public API. + */ +int sw_init_per_cpu_buffers(void); +void sw_destroy_per_cpu_buffers(void); +void sw_reset_per_cpu_buffers(void); + +void sw_count_samples_produced_dropped(void); + +int sw_produce_polled_msg(struct sw_driver_msg *, enum sw_wakeup_action); +int sw_produce_generic_msg(struct sw_driver_msg *, enum sw_wakeup_action); + +bool sw_any_seg_full(u32 *val, bool is_flush_mode); +size_t sw_consume_data(u32 mask, void __user *buffer, size_t bytes_to_read); + +unsigned int sw_get_output_buffer_size(void); + +void sw_wait_once(void); +void sw_wakeup(void); + +void sw_print_output_buffer_overheads(void); + +/* + * Init reader queue. + */ +int sw_init_reader_queue(void); +/* + * Destroy reader queue. + */ +void sw_destroy_reader_queue(void); +/* + * Wakeup client waiting for a full buffer. + */ +void sw_wakeup_reader(enum sw_wakeup_action); +/* + * Wakeup client waiting for a full buffer, and + * cancel any timers initialized by the reader + * subsys. + */ +void sw_cancel_reader(void); +/* + * Print some stats about the reader subsys. + */ +void sw_print_reader_stats(void); + +#endif /* _SW_OUTPUT_BUFFER_H_ */ diff --git a/drivers/platform/x86/socwatch/inc/sw_overhead_measurements.h b/drivers/platform/x86/socwatch/inc/sw_overhead_measurements.h new file mode 100644 index 0000000000000..4052555419a8c --- /dev/null +++ b/drivers/platform/x86/socwatch/inc/sw_overhead_measurements.h @@ -0,0 +1,189 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +/* + * Description: file containing overhead measurement + * routines used by the power driver. + */ + +#ifndef _PW_OVERHEAD_MEASUREMENTS_H_ +#define _PW_OVERHEAD_MEASUREMENTS_H_ + +/* + * Helper macro to declare variables required + * for conducting overhead measurements. + */ +/* + * For each function that you want to profile, + * do the following (e.g. function 'foo'): + * ************************************************** + * DECLARE_OVERHEAD_VARS(foo); + * ************************************************** + * This will declare the two variables required + * to keep track of overheads incurred in + * calling/servicing 'foo'. Note that the name + * that you declare here *MUST* match the function name! + */ + +#if DO_OVERHEAD_MEASUREMENTS + +#ifndef __get_cpu_var +/* + * Kernels >= 3.19 don't include a definition + * of '__get_cpu_var'. Create one now. + */ +#define __get_cpu_var(var) (*this_cpu_ptr(&var)) +#endif /* __get_cpu_var */ +#ifndef __raw_get_cpu_var +/* + * Kernels >= 3.19 don't include a definition + * of '__raw_get_cpu_var'. Create one now. + */ +#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&var)) +#endif /* __get_cpu_var */ + +extern u64 sw_timestamp(void); + +#define DECLARE_OVERHEAD_VARS(name) \ + static DEFINE_PER_CPU(u64, name##_elapsed_time); \ + static DEFINE_PER_CPU(local_t, name##_num_iters) = LOCAL_INIT(0); \ + \ + static inline u64 get_my_cumulative_elapsed_time_##name(void) \ + { \ + return *(&__get_cpu_var(name##_elapsed_time)); \ + } \ + static inline int get_my_cumulative_num_iters_##name(void) \ + { \ + return local_read(&__get_cpu_var(name##_num_iters)); \ + } \ + \ + static inline u64 name##_get_cumulative_elapsed_time_for(int cpu) \ + { \ + return *(&per_cpu(name##_elapsed_time, cpu)); \ + } \ + \ + static inline int name##_get_cumulative_num_iters_for(int cpu) \ + { \ + return local_read(&per_cpu(name##_num_iters, cpu)); \ + } \ + \ + static inline void name##_get_cumulative_overhead_params(u64 *time, \ + int *iters) \ + { \ + int cpu = 0; \ + *time = 0; \ + *iters = 0; \ + for_each_online_cpu(cpu) { \ + *iters += name##_get_cumulative_num_iters_for(cpu); \ + *time += name##_get_cumulative_elapsed_time_for(cpu); \ + } \ + return; \ + } \ + \ + static inline void name##_print_cumulative_overhead_params( \ + const char *str) \ + { \ + int num = 0; \ + u64 time = 0; \ + name##_get_cumulative_overhead_params(&time, &num); \ + printk(KERN_INFO "%s: %d iters took %llu nano seconds!\n", \ + str, num, time); \ + } + +#define DO_PER_CPU_OVERHEAD_FUNC(func, ...) \ + do { \ + u64 *__v = &__raw_get_cpu_var(func##_elapsed_time); \ + u64 tmp_1 = 0, tmp_2 = 0; \ + local_inc(&__raw_get_cpu_var(func##_num_iters)); \ + tmp_1 = sw_timestamp(); \ + { \ + func(__VA_ARGS__); \ + } \ + tmp_2 = sw_timestamp(); \ + *(__v) += (tmp_2 - tmp_1); \ + } while (0) + +#define DO_PER_CPU_OVERHEAD_FUNC_RET(type, func, ...) \ + ({ \ + type __ret; \ + u64 *__v = &__raw_get_cpu_var(func##_elapsed_time); \ + u64 tmp_1 = 0, tmp_2 = 0; \ + local_inc(&__raw_get_cpu_var(func##_num_iters)); \ + tmp_1 = sw_timestamp(); \ + { \ + __ret = func(__VA_ARGS__); \ + } \ + tmp_2 = sw_timestamp(); \ + *(__v) += (tmp_2 - tmp_1); \ + __ret; \ + }) + +#else /* !DO_OVERHEAD_MEASUREMENTS */ +#define DECLARE_OVERHEAD_VARS(name) \ + static inline void name##_print_cumulative_overhead_params( \ + const char *str) \ + { /* NOP */ \ + } + +#define DO_PER_CPU_OVERHEAD_FUNC(func, ...) func(__VA_ARGS__) +#define DO_PER_CPU_OVERHEAD_FUNC_RET(type, func, ...) func(__VA_ARGS__) + +#endif /* DO_OVERHEAD_MEASUREMENTS */ + +#define PRINT_CUMULATIVE_OVERHEAD_PARAMS(name, str) \ + name##_print_cumulative_overhead_params(str) + +#endif /* _PW_OVERHEAD_MEASUREMENTS_H_ */ diff --git a/drivers/platform/x86/socwatch/inc/sw_structs.h b/drivers/platform/x86/socwatch/inc/sw_structs.h new file mode 100644 index 0000000000000..de5ad2b6eb702 --- /dev/null +++ b/drivers/platform/x86/socwatch/inc/sw_structs.h @@ -0,0 +1,528 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +#ifndef __SW_STRUCTS_H__ +#define __SW_STRUCTS_H__ 1 + +#include "sw_types.h" + +/* + * An enumeration of MSR types. + * Required if we want to differentiate + * between different types of MSRs. + */ +enum sw_msr_type { + SW_MSR_TYPE_THREAD, + SW_MSR_TYPE_CORE, + SW_MSR_TYPE_MODULE, + SW_MSR_TYPE_PACKAGE, + SW_MSR_TYPE_SOC, + SW_MSR_TYPE_MAX, +}; + +/* + * Convenience for a 'string' data type. + * Not strictly required. + */ +#pragma pack(push, 1) +typedef struct sw_string_type { + pw_u16_t len; + char data[1]; +} sw_string_type_t; +#pragma pack(pop) +#define SW_STRING_TYPE_HEADER_SIZE() \ + (sizeof(struct sw_string_type) - sizeof(char[1])) + +#pragma pack(push, 1) +struct sw_key_value_payload { + pw_u16_t m_numKeyValuePairs; + char data[1]; +}; +#pragma pack(pop) +#define SW_KEY_VALUE_PAYLOAD_HEADER_SIZE() \ + (sizeof(struct sw_key_value_payload) - sizeof(char[1])) + +typedef enum sw_kernel_wakelock_type { + SW_WAKE_LOCK = 0, /* A kernel wakelock was acquired */ + SW_WAKE_UNLOCK = 1, /* A kernel wakelock was released */ + SW_WAKE_LOCK_TIMEOUT = + 2, /* A kernel wakelock was acquired with a timeout */ + SW_WAKE_LOCK_INITIAL = 3, /* A kernel wakelock was acquired + * before the + * collection started + */ + SW_WAKE_UNLOCK_ALL = 4, /* All previously held kernel wakelocks were + * released -- used in ACPI S3 notifications + */ +} sw_kernel_wakelock_type_t; + +typedef enum sw_when_type { + SW_WHEN_TYPE_BEGIN = 0, /* Start snapshot */ + SW_WHEN_TYPE_POLL, + SW_WHEN_TYPE_NOTIFIER, + SW_WHEN_TYPE_TRACEPOINT, + SW_WHEN_TYPE_END, /* Stop snapshot */ + SW_WHEN_TYPE_NONE +} sw_when_type_t; + +/** + * trigger_bits is defined to use type pw_u8_t + * that makes only upto 8 types possible + */ +#define SW_TRIGGER_BEGIN_MASK() (1U << SW_WHEN_TYPE_BEGIN) +#define SW_TRIGGER_END_MASK() (1U << SW_WHEN_TYPE_END) +#define SW_TRIGGER_POLL_MASK() (1U << SW_WHEN_TYPE_POLL) +#define SW_TRIGGER_TRACEPOINT_MASK() (1U << SW_WHEN_TYPE_TRACEPOINT) +#define SW_TRIGGER_NOTIFIER_MASK() (1U << SW_WHEN_TYPE_NOTIFIER) +#define SW_GET_TRIGGER_MASK_VALUE(m) (1U << (m)) +#define SW_TRIGGER_MASK_ALL() (0xFF) + +enum sw_io_cmd { SW_IO_CMD_READ = 0, SW_IO_CMD_WRITE, SW_IO_CMD_MAX }; + +#pragma pack(push, 1) +struct sw_driver_msr_io_descriptor { + pw_u64_t address; + enum sw_msr_type type; +}; +#pragma pack(pop) + +#pragma pack(push, 1) +struct sw_driver_ipc_mmio_io_descriptor { + union { +#ifdef SWW_MERGE +#pragma warning(push) +#pragma warning( \ + disable : 4201) /* disable C4201: nonstandard extension used: + * nameless struct/union + */ +#endif + struct { + pw_u16_t command; + pw_u16_t sub_command; + }; +#ifdef SWW_MERGE +#pragma warning(pop) /* enable C4201 */ +#endif + union { + pw_u32_t ipc_command; /* (sub_command << 12) + * | (command) + */ + pw_u8_t is_gbe; /* Used only for GBE MMIO */ + }; + }; + /* TODO: add a section for 'ctrl_address' and 'ctrl_remapped_address' */ + union { + pw_u64_t data_address; /* Will be "io_remapped" */ + pw_u64_t data_remapped_address; + }; +}; +#pragma pack(pop) + +#pragma pack(push, 1) +struct sw_driver_pci_io_descriptor { + pw_u32_t bus; + pw_u32_t device; + pw_u32_t function; +#ifdef __QNX__ + union { + pw_u32_t offset; + pw_u32_t index; + }; +#else /* __QNX__ */ + pw_u32_t offset; +#endif /* __QNX__ */ +}; +#pragma pack(pop) + +#pragma pack(push, 1) +struct sw_driver_configdb_io_descriptor { + /* pw_u32_t port; */ + /* pw_u32_t offset; */ + pw_u32_t address; +}; +#pragma pack(pop) + +#pragma pack(push, 1) +struct sw_driver_trace_args_io_descriptor { + pw_u8_t num_args; /* Number of valid entries in the 'args' array, + * below; 1 <= num_args <= 7 + */ + pw_u8_t args[7]; /* Max of 7 args can be recorded */ +}; +#pragma pack(pop) + +#pragma pack(push, 1) +/** + * struct - sw_driver_telem_io_descriptor - Telemetry Metric descriptor + * + * @id: (Client & Driver) Telemetry ID of the counter to read. + * @idx: (Driver only) index into telem array to read, or the row + * of the telem_indirect table to lookup the telem array index. + * @unit: Unit from which to collect: 0 = PMC, 1 = PUNIT + * Values come from the telemetry_unit enum. + * @scale_op: When there are multiple instances of a telem value (e.g. + * module C-states) the operation to use when scaling the CPU ID + * and adding it to the telemetry data ID. + * @scale_val: Amount to scale an ID (when scaling one.) + * + * Like all hardware mechanism descriptors, the client uses this to pass + * metric hardware properties (unit and ID) to the driver. The driver + * uses it to program the telemetry unit. + * + * Users can specify that IDs should be scaled based on the CPU id, using + * the equation: ID = ID_value + (cpuid ) + * where is one of +, *, /, or %, and scaling_val is an integer + * value. This gives you: + * Operation scale_op scale_val + * Single instance of an ID * 0 + * Sequentially increasing + * CPU-specific values * 1 + * Per module cpu-specific + * values (2 cores/module) / 2 + * Round Robin assignment % cpu_count + * + * Note that scaling_value of 0 implies that no scaling should be + * applied. While (*, 1) is equivalent to (+, 0), the scaling value of 0 + * is reserved/defined to mean "no scaling", and is disallowed. + * + * If you're really tight on space, you could always fold unit and + * scale_op into a single byte without a lot of pain or even effort. + */ +struct sw_driver_telem_io_descriptor { + union { + pw_u16_t id; + pw_u8_t idx; + }; + pw_u8_t unit; + pw_u8_t scale_op; + pw_u16_t scale_val; +}; +#pragma pack(pop) +enum telemetry_unit { TELEM_PUNIT = 0, TELEM_PMC, TELEM_UNIT_NONE }; +#define TELEM_MAX_ID 0xFFFF /* Maximum value of a Telemtry event ID. */ +#define TELEM_MAX_SCALE 0xFFFF /* Maximum ID scaling value. */ +#define TELEM_OP_ADD '+' /* Addition operator */ +#define TELEM_OP_MULT '*' /* Multiplication operator */ +#define TELEM_OP_DIV '/' /* Division operator */ +#define TELEM_OP_MOD '%' /* Modulus operator */ +#define TELEM_OP_NONE 'X' /* No operator--Not a scaled ID */ + +#pragma pack(push, 1) +struct sw_driver_mailbox_io_descriptor { + union { + /* + * Will be "io_remapped" + */ + pw_u64_t interface_address; + pw_u64_t interface_remapped_address; + }; + union { + /* + * Will be "io_remapped" + */ + pw_u64_t data_address; + pw_u64_t data_remapped_address; + }; + pw_u64_t command; + pw_u64_t command_mask; + pw_u16_t run_busy_bit; + pw_u16_t is_msr_type; +}; +#pragma pack(pop) + +#pragma pack(push, 1) +struct sw_driver_pch_mailbox_io_descriptor { + union { + /* + * Will be "io_remapped" + */ + pw_u64_t mtpmc_address; + pw_u64_t mtpmc_remapped_address; + }; + union { + /* + * Will be "io_remapped" + */ + pw_u64_t msg_full_sts_address; + pw_u64_t msg_full_sts_remapped_address; + }; + union { + /* + * Will be "io_remapped" + */ + pw_u64_t mfpmc_address; + pw_u64_t mfpmc_remapped_address; + }; + pw_u32_t data_address; +}; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct sw_driver_io_descriptor { + pw_u16_t collection_type; + /* TODO: specify READ/WRITE */ + pw_s16_t collection_command; /* One of 'enum sw_io_cmd' */ + pw_u16_t counter_size_in_bytes; /* The number of bytes to + * READ or WRITE + */ + union { + struct sw_driver_msr_io_descriptor msr_descriptor; + struct sw_driver_ipc_mmio_io_descriptor ipc_descriptor; + struct sw_driver_ipc_mmio_io_descriptor mmio_descriptor; + struct sw_driver_pci_io_descriptor pci_descriptor; + struct sw_driver_configdb_io_descriptor configdb_descriptor; + struct sw_driver_trace_args_io_descriptor trace_args_descriptor; + struct sw_driver_telem_io_descriptor telem_descriptor; + struct sw_driver_pch_mailbox_io_descriptor + pch_mailbox_descriptor; + struct sw_driver_mailbox_io_descriptor mailbox_descriptor; + }; + pw_u64_t write_value; /* The value to WRITE */ +} sw_driver_io_descriptor_t; +#pragma pack(pop) + +/** + * sw_driver_interface_info is used to map data collected by kernel-level + * collectors to metrics. The client passes one of these structs to the + * driver for each metric the driver should collect. The driver tags the + * collected data (messages) using info from this struct. When processing + * data from the driver, the client uses its copy of this data to + * identify the plugin, metric, and message IDs of each message. + */ +#pragma pack(push, 1) +struct sw_driver_interface_info { + pw_u64_t tracepoint_id_mask; + pw_u64_t notifier_id_mask; + pw_s16_t cpu_mask; /* On which CPU(s) should the driver + * read the data? + * Currently: -2 ==> read on ALL CPUs, + * -1 ==> read on ANY CPU, + * >= 0 ==> the specific CPU to read on + */ + pw_s16_t plugin_id; /* Metric Plugin SID */ + pw_s16_t metric_id; /* Domain-specific ID assigned by + * each Metric Plugin + */ + pw_s16_t msg_id; /* Msg ID retrieved from the SoC Watch config file */ + pw_u16_t num_io_descriptors; /* Number of descriptors in the array, + * below. + */ + pw_u8_t trigger_bits; /* Mask of 'when bits' to fire this collector. */ + pw_u16_t sampling_interval_msec; /* Sampling interval, in msecs */ + pw_u8_t descriptors[1]; /* Array of sw_driver_io_descriptor structs. */ +}; +#pragma pack(pop) + +#define SW_DRIVER_INTERFACE_INFO_HEADER_SIZE() \ + (sizeof(struct sw_driver_interface_info) - sizeof(pw_u8_t[1])) + +#pragma pack(push, 1) +struct sw_driver_interface_msg { + pw_u16_t num_infos; /* Number of 'sw_driver_interface_info' + * structs contained within the 'infos' variable, + * below + */ + pw_u16_t min_polling_interval_msecs; /* Min time to wait before + * polling; used exclusively + * with the low overhead, + * context-switch based + * polling mode + */ + /* pw_u16_t infos_size_bytes; Size of data inlined + * within the 'infos' variable, below + */ + pw_u8_t infos[1]; +}; +#pragma pack(pop) +#define SW_DRIVER_INTERFACE_MSG_HEADER_SIZE() \ + (sizeof(struct sw_driver_interface_msg) - sizeof(pw_u8_t[1])) + +typedef enum sw_name_id_type { + SW_NAME_TYPE_TRACEPOINT, + SW_NAME_TYPE_NOTIFIER, + SW_NAME_TYPE_COLLECTOR, + SW_NAME_TYPE_MAX, +} sw_name_id_type_t; + +#pragma pack(push, 1) +struct sw_name_id_pair { + pw_u16_t id; + pw_u16_t type; /* One of 'sw_name_id_type' */ + struct sw_string_type name; +}; +#pragma pack(pop) +#define SW_NAME_ID_HEADER_SIZE() \ + (sizeof(struct sw_name_id_pair) - sizeof(struct sw_string_type)) + +#pragma pack(push, 1) +struct sw_name_info_msg { + pw_u16_t num_name_id_pairs; + pw_u16_t payload_len; + pw_u8_t pairs[1]; +}; +#pragma pack(pop) + +/** + * This is the basic data structure for passing data collected by the + * kernel-level collectors up to the client. In addition to the data + * (payload), it contains the minimum metadata required for the client + * to identify the source of that data. + */ +#pragma pack(push, 1) +typedef struct sw_driver_msg { + pw_u64_t tsc; + pw_u16_t cpuidx; + pw_u8_t plugin_id; /* Cannot have more than 256 plugins */ + pw_u8_t metric_id; /* Each plugin cannot handle more than 256 metrics */ + pw_u8_t msg_id; /* Each metric cannot have more than 256 components */ + pw_u16_t payload_len; + /* pw_u64_t p_payload; Ptr to payload */ + union { + pw_u64_t __dummy; /* Ensure size of struct is consistent + * on x86, x64 + */ + char *p_payload; /* Ptr to payload (collected data values). */ + }; +} sw_driver_msg_t; +#pragma pack(pop) +#define SW_DRIVER_MSG_HEADER_SIZE() \ + (sizeof(struct sw_driver_msg) - sizeof(pw_u64_t)) + +typedef enum sw_driver_collection_cmd { + SW_DRIVER_START_COLLECTION = 1, + SW_DRIVER_STOP_COLLECTION = 2, + SW_DRIVER_PAUSE_COLLECTION = 3, + SW_DRIVER_RESUME_COLLECTION = 4, + SW_DRIVER_CANCEL_COLLECTION = 5, +} sw_driver_collection_cmd_t; + +#pragma pack(push, 1) +struct sw_driver_version_info { + pw_u16_t major; + pw_u16_t minor; + pw_u16_t other; +}; +#pragma pack(pop) + +enum cpu_action { + SW_CPU_ACTION_NONE, + SW_CPU_ACTION_OFFLINE, + SW_CPU_ACTION_ONLINE_PREPARE, + SW_CPU_ACTION_ONLINE, + SW_CPU_ACTION_MAX, +}; +#pragma pack(push, 1) +struct sw_driver_topology_change { + pw_u64_t timestamp; /* timestamp */ + enum cpu_action type; /* One of 'enum cpu_action' */ + pw_u16_t cpu; /* logical cpu */ + pw_u16_t core; /* core id */ + pw_u16_t pkg; /* pkg/physical id */ +}; +struct sw_driver_topology_msg { + pw_u16_t num_entries; + pw_u8_t topology_entries[1]; +}; +#pragma pack(pop) + +/** + * An enumeration of possible pm states that + * SoC Watch is interested in + */ +enum sw_pm_action { + SW_PM_ACTION_NONE, + SW_PM_ACTION_SUSPEND_ENTER, + SW_PM_ACTION_SUSPEND_EXIT, + SW_PM_ACTION_HIBERNATE_ENTER, + SW_PM_ACTION_HIBERNATE_EXIT, + SW_PM_ACTION_MAX, +}; + +/** + * An enumeration of possible actions that trigger + * the power notifier + */ +enum sw_pm_mode { + SW_PM_MODE_FIRMWARE, + SW_PM_MODE_NONE, +}; + +#define SW_PM_VALUE(mode, action) ((mode) << 16 | (action)) + +/* + * Wrapper for ioctl arguments. + * EVERY ioctl MUST use this struct! + */ +#pragma pack(push, 1) +struct sw_driver_ioctl_arg { + pw_s32_t in_len; + pw_s32_t out_len; + /* pw_u64_t p_in_arg; Pointer to input arg */ + /* pw_u64_t p_out_arg; Pointer to output arg */ + char *in_arg; + char *out_arg; +}; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct sw_driver_msg_interval { + pw_u8_t plugin_id; /* Cannot have more than 256 plugins */ + pw_u8_t metric_id; /* Each plugin cannot handle more than 256 metrics */ + pw_u8_t msg_id; /* Each metric cannot have more than 256 components */ + pw_u16_t interval; /* collection interval */ +} sw_driver_msg_interval_t; +#pragma pack(pop) + +#endif /* __SW_STRUCTS_H__ */ diff --git a/drivers/platform/x86/socwatch/inc/sw_telem.h b/drivers/platform/x86/socwatch/inc/sw_telem.h new file mode 100644 index 0000000000000..e324ff681b2e9 --- /dev/null +++ b/drivers/platform/x86/socwatch/inc/sw_telem.h @@ -0,0 +1,74 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef _SW_TELEM_H_ +#define _SW_TELEM_H_ 1 + +#include "sw_structs.h" /* sw_driver_io_descriptor */ +#include "sw_types.h" /* u8 and other types */ + +int sw_telem_init_func(struct sw_driver_io_descriptor *descriptor); +void sw_read_telem_info(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptor, + u16 counter_size_in_bytes); +void sw_write_telem_info(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptor, + u16 counter_size_in_bytes); +int sw_reset_telem(const struct sw_driver_io_descriptor *descriptor); +bool sw_telem_available(void); +bool sw_telem_post_config(void); + +#endif /* SW_TELEM_H */ diff --git a/drivers/platform/x86/socwatch/inc/sw_trace_notifier_provider.h b/drivers/platform/x86/socwatch/inc/sw_trace_notifier_provider.h new file mode 100644 index 0000000000000..3ec4930c90102 --- /dev/null +++ b/drivers/platform/x86/socwatch/inc/sw_trace_notifier_provider.h @@ -0,0 +1,82 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +#ifndef __SW_TRACE_NOTIFIER_PROVIDER_H__ +#define __SW_TRACE_NOTIFIER_PROVIDER_H__ + +u64 sw_timestamp(void); +/* + * Some architectures and OS versions require a "discovery" + * phase for tracepoints and/or notifiers. Allow for that here. + */ +int sw_extract_trace_notifier_providers(void); +/* + * Reset trace/notifier providers at the end + * of a collection. + */ +void sw_reset_trace_notifier_providers(void); +/* + * Print statistics on trace/notifier provider overheads. + */ +void sw_print_trace_notifier_provider_overheads(void); +/* + * Add all trace/notifier providers. + */ +int sw_add_trace_notifier_providers(void); +/* + * Remove previously added providers. + */ +void sw_remove_trace_notifier_providers(void); +#endif /* __SW_TRACE_NOTIFIER_PROVIDER_H__ */ diff --git a/drivers/platform/x86/socwatch/inc/sw_tracepoint_handlers.h b/drivers/platform/x86/socwatch/inc/sw_tracepoint_handlers.h new file mode 100644 index 0000000000000..d8a54c099d364 --- /dev/null +++ b/drivers/platform/x86/socwatch/inc/sw_tracepoint_handlers.h @@ -0,0 +1,159 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +#ifndef __SW_TRACEPOINT_HANDLERS_H__ +#define __SW_TRACEPOINT_HANDLERS_H__ + +#include "sw_internal.h" + +extern pw_u16_t sw_min_polling_interval_msecs; + +enum sw_trace_data_type { + SW_TRACE_COLLECTOR_TRACEPOINT, + SW_TRACE_COLLECTOR_NOTIFIER +}; + +struct sw_trace_notifier_name { + const char * + kernel_name; /* The tracepoint name; used by the kernel + * to identify tracepoints + */ + const char * + abstract_name; /* An abstract name used by plugins to + * specify tracepoints-of-interest; + * shared with Ring-3 + */ +}; + +typedef struct sw_trace_notifier_data sw_trace_notifier_data_t; +typedef int (*sw_trace_notifier_register_func)( + struct sw_trace_notifier_data *node); +typedef int (*sw_trace_notifier_unregister_func)( + struct sw_trace_notifier_data *node); + +struct sw_trace_notifier_data { + enum sw_trace_data_type type; /* Tracepoint or Notifier */ + const struct sw_trace_notifier_name *name; /* Tracepoint name(s) */ + sw_trace_notifier_register_func probe_register; /* probe register + * function + */ + sw_trace_notifier_unregister_func probe_unregister; /* probe unregister + * function + */ + struct tracepoint *tp; + bool always_register; /* Set to TRUE if this tracepoint/notifier + * must ALWAYS be registered, regardless + * of whether the user has specified + * anything to collect + */ + bool was_registered; + SW_DEFINE_LIST_HEAD( + list, + sw_collector_data); /* List of 'sw_collector_data' + * instances for this tracepoint + * or notifier + */ +}; + +struct sw_topology_node { + struct sw_driver_topology_change change; + + SW_LIST_ENTRY(list, sw_topology_node); +}; +SW_DECLARE_LIST_HEAD( + sw_topology_list, + sw_topology_node); /* List of entries tracking + * changes in CPU topology + */ +extern size_t sw_num_topology_entries; /* Size of the 'sw_topology_list' */ + +int sw_extract_tracepoints(void); +int sw_register_trace_notifiers(void); +int sw_unregister_trace_notifiers(void); + +/* + * Register a single TRACE/NOTIFY provider. + */ +int sw_register_trace_notify_provider(struct sw_trace_notifier_data *tnode); +/* + * Add all TRACE/NOTIFY providers. + */ +int sw_add_trace_notify(void); +void sw_remove_trace_notify(void); + +void sw_reset_trace_notifier_lists(void); + +void sw_print_trace_notifier_overheads(void); + +int sw_for_each_tracepoint_node(int (*func)(struct sw_trace_notifier_data *node, + void *priv), + void *priv, bool return_on_error); +int sw_for_each_notifier_node(int (*func)(struct sw_trace_notifier_data *node, + void *priv), + void *priv, bool return_on_error); + +int sw_get_trace_notifier_id(struct sw_trace_notifier_data *node); + +const char * +sw_get_trace_notifier_kernel_name(struct sw_trace_notifier_data *node); +const char * +sw_get_trace_notifier_abstract_name(struct sw_trace_notifier_data *node); + +/* + * Clear out the topology list. + */ +void sw_clear_topology_list(void); + +#endif /* __SW_TRACEPOINT_HANDLERS_H__ */ diff --git a/drivers/platform/x86/socwatch/inc/sw_types.h b/drivers/platform/x86/socwatch/inc/sw_types.h new file mode 100644 index 0000000000000..156c92c8349aa --- /dev/null +++ b/drivers/platform/x86/socwatch/inc/sw_types.h @@ -0,0 +1,152 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef _PW_TYPES_H_ +#define _PW_TYPES_H_ + +#if defined(__linux__) || defined(__APPLE__) || defined(__QNX__) + +#ifndef __KERNEL__ +/* + * Called from Ring-3. + */ +#include /* Grab 'uint64_t' etc. */ +#include /* Grab 'pid_t' */ +/* + * UNSIGNED types... + */ +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; +/* + * SIGNED types... + */ +typedef int8_t s8; +typedef int16_t s16; +typedef int32_t s32; +typedef int64_t s64; + +#else /* __KERNEL__ */ +#if !defined(__APPLE__) +#include +#else /* __APPLE__ */ +#include +#include /* Grab 'uint64_t' etc. */ + +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; +/* + * SIGNED types... + */ +typedef int8_t s8; +typedef int16_t s16; +typedef int32_t s32; +typedef int64_t s64; +#endif /* __APPLE__ */ +#endif /* __KERNEL__ */ + +#elif defined(_WIN32) +typedef __int32 int32_t; +typedef unsigned __int32 uint32_t; +typedef __int64 int64_t; +typedef unsigned __int64 uint64_t; + +/* + * UNSIGNED types... + */ +typedef unsigned char u8; +typedef unsigned short u16; +typedef unsigned int u32; +typedef unsigned long long u64; + +/* + * SIGNED types... + */ +typedef signed char s8; +typedef signed short s16; +typedef signed int s32; +typedef signed long long s64; +typedef s32 pid_t; +typedef s32 ssize_t; + +#endif /* _WIN32 */ + +/* ************************************ + * Common to both operating systems. + * ************************************ + */ +/* + * UNSIGNED types... + */ +typedef u8 pw_u8_t; +typedef u16 pw_u16_t; +typedef u32 pw_u32_t; +typedef u64 pw_u64_t; + +/* + * SIGNED types... + */ +typedef s8 pw_s8_t; +typedef s16 pw_s16_t; +typedef s32 pw_s32_t; +typedef s64 pw_s64_t; + +typedef pid_t pw_pid_t; + +#endif /* _PW_TYPES_H_ */ diff --git a/drivers/platform/x86/socwatch/inc/sw_version.h b/drivers/platform/x86/socwatch/inc/sw_version.h new file mode 100644 index 0000000000000..5476b0d79ac5c --- /dev/null +++ b/drivers/platform/x86/socwatch/inc/sw_version.h @@ -0,0 +1,74 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef __SW_VERSION_H__ +#define __SW_VERSION_H__ 1 + +/* + * SOCWatch driver version + */ +#define SW_DRIVER_VERSION_MAJOR 2 +#define SW_DRIVER_VERSION_MINOR 6 +#define SW_DRIVER_VERSION_OTHER 2 + +/* + * Every SOC Watch userspace component shares the same version number. + */ +#define SOCWATCH_VERSION_MAJOR 2 +#define SOCWATCH_VERSION_MINOR 8 +#define SOCWATCH_VERSION_OTHER 0 + +#endif /* __SW_VERSION_H__ */ diff --git a/drivers/platform/x86/socwatch/sw_collector.c b/drivers/platform/x86/socwatch/sw_collector.c new file mode 100644 index 0000000000000..db855bab4fd80 --- /dev/null +++ b/drivers/platform/x86/socwatch/sw_collector.c @@ -0,0 +1,707 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of condiions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +#include "sw_internal.h" +#include "sw_structs.h" +#include "sw_collector.h" +#include "sw_kernel_defines.h" +#include "sw_mem.h" +#include "sw_types.h" +#include "sw_hardware_io.h" +#include "sw_output_buffer.h" + +/* ------------------------------------------------- + * Local function declarations. + * ------------------------------------------------- + */ +void sw_free_driver_interface_info_i(struct sw_driver_interface_info *info); +const struct sw_hw_ops **sw_alloc_ops_i(pw_u16_t num_io_descriptors); +void sw_free_ops_i(const struct sw_hw_ops **ops); +struct sw_driver_interface_info * +sw_copy_driver_interface_info_i(const struct sw_driver_interface_info *info); +int sw_init_driver_interface_info_i(struct sw_driver_interface_info *info); +int sw_reset_driver_interface_info_i(struct sw_driver_interface_info *info); +int sw_init_ops_i(const struct sw_hw_ops **ops, + const struct sw_driver_interface_info *info); +sw_driver_msg_t * +sw_alloc_collector_msg_i(const struct sw_driver_interface_info *info, + size_t per_msg_payload_size); +void sw_free_collector_msg_i(sw_driver_msg_t *msg); +size_t sw_get_payload_size_i(const struct sw_driver_interface_info *info); +void sw_handle_per_cpu_msg_i(void *info, enum sw_wakeup_action action); +/* ------------------------------------------------- + * Variables. + * ------------------------------------------------- + */ +const static struct sw_hw_ops *s_hw_ops; +/* ------------------------------------------------- + * Function definitions. + * ------------------------------------------------- + */ +/* + * Driver interface info functions. + */ + +/** + * sw_add_driver_info() - Add a collector node to the list called at this + * "when type". + * @head: The collector node list to add the new node to. + * @info: Driver information to add to the list. + * + * This function allocates and links in a "collector node" for each + * collector based on the collector info in the info parameter. + * The function allocates the new node, and links it to a local copy + * of the passed-in driver interface info. If the collector has an + * init function among its operations, it iterates through the + * descriptors in info, passing each one to the init function. + * + * Finally, it allocates and initializes the "collector message" which + * buffers a data sample that this collector gathers during the run. + * + * Returns: -PW_ERROR on failure, PW_SUCCESS on success. + */ +int sw_add_driver_info(void *list_head, + const struct sw_driver_interface_info *info) +{ + SW_LIST_HEAD_VAR(sw_collector_data) * head = list_head; + struct sw_collector_data *node = sw_alloc_collector_node(); + + if (!node) { + pw_pr_error("ERROR allocating collector node!\n"); + return -PW_ERROR; + } + + node->info = sw_copy_driver_interface_info_i(info); + if (!node->info) { + pw_pr_error( + "ERROR allocating or copying driver_interface_info!\n"); + sw_free_collector_node(node); + return -PW_ERROR; + } + /* + * Initialize the collectors in the node's descriptors. + */ + if (sw_init_driver_interface_info_i(node->info)) { + pw_pr_error( + "ERROR initializing a driver_interface_info node!\n"); + sw_free_collector_node(node); + return -PW_ERROR; + } + /* + * Allocate the ops array. We do this one time as an optimization + * (we could always just repeatedly call 'sw_get_hw_ops_for()' + * during the collection but we want to avoid that overhead) + */ + node->ops = sw_alloc_ops_i(info->num_io_descriptors); + if (!node->ops || sw_init_ops_i(node->ops, info)) { + pw_pr_error("ERROR initializing the ops array!\n"); + sw_free_collector_node(node); + return -PW_ERROR; + } + /* + * Allocate and initialize the "collector message". + */ + node->per_msg_payload_size = sw_get_payload_size_i(info); + pw_pr_debug("Debug: Per msg payload size = %u\n", + (unsigned int)node->per_msg_payload_size); + node->msg = sw_alloc_collector_msg_i(info, node->per_msg_payload_size); + if (!node->msg) { + pw_pr_error("ERROR allocating space for a collector msg!\n"); + sw_free_collector_node(node); + return -PW_ERROR; + } + pw_pr_debug("NODE = %p, NODE->MSG = %p\n", node, node->msg); + cpumask_clear(&node->cpumask); + { + /* + * For now, use following protocol: + * cpu_mask == -2 ==> Collect on ALL CPUs + * cpu_mask == -1 ==> Collect on ANY CPU + * cpu_mask >= 0 ==> Collect on a specific CPU + */ + if (node->info->cpu_mask >= 0) { + /* + * Collect data on 'node->info->cpu_mask' + */ + cpumask_set_cpu(node->info->cpu_mask, &node->cpumask); + pw_pr_debug("OK: set CPU = %d\n", node->info->cpu_mask); + } else if (node->info->cpu_mask == -1) { + /* + * Collect data on ANY CPU. Leave empty as a flag + * to signify user wishes to collect data on 'ANY' cpu. + */ + pw_pr_debug("OK: set ANY CPU\n"); + } else { + /* + * Collect data on ALL cpus. + */ + cpumask_copy(&node->cpumask, cpu_present_mask); + pw_pr_debug("OK: set ALL CPUs\n"); + } + } + SW_LIST_ADD(head, node, list); + return PW_SUCCESS; +} + +const struct sw_hw_ops **sw_alloc_ops_i(pw_u16_t num_io_descriptors) +{ + size_t size = num_io_descriptors * sizeof(struct sw_hw_ops *); + const struct sw_hw_ops **ops = sw_kmalloc(size, GFP_KERNEL); + + if (ops) { + memset(ops, 0, size); + } + return ops; +} + +void sw_free_driver_interface_info_i(struct sw_driver_interface_info *info) +{ + if (info) { + sw_kfree(info); + } +} + +void sw_free_ops_i(const struct sw_hw_ops **ops) +{ + if (ops) { + sw_kfree(ops); + } +} + +/** + * sw_copy_driver_interface_info_i - Allocate and copy the passed-in "info". + * + * @info: Information about the metric and collection properties + * + * Returns: a pointer to the newly allocated sw_driver_interface_info, + * which is a copy of the version passed in via the info pointer. + */ +struct sw_driver_interface_info * +sw_copy_driver_interface_info_i(const struct sw_driver_interface_info *info) +{ + size_t size; + struct sw_driver_interface_info *node = NULL; + + if (!info) { + pw_pr_error("ERROR: NULL sw_driver_interface_info in alloc!\n"); + return node; + } + + size = SW_DRIVER_INTERFACE_INFO_HEADER_SIZE() + + (info->num_io_descriptors * + sizeof(struct sw_driver_io_descriptor)); + node = (struct sw_driver_interface_info *)sw_kmalloc(size, GFP_KERNEL); + if (!node) { + pw_pr_error("ERROR allocating driver interface info!\n"); + return node; + } + memcpy((char *)node, (const char *)info, size); + + /* + * Do debug dump. + */ + pw_pr_debug("DRIVER info has plugin_ID = %d, metric_ID = %d, " + "msg_ID = %d\n", + node->plugin_id, node->metric_id, node->msg_id); + + return node; +} +int sw_init_driver_interface_info_i(struct sw_driver_interface_info *info) +{ + /* + * Do any initialization here. + * For now, only IPC/MMIO descriptors need to be initialized. + */ + int i = 0; + struct sw_driver_io_descriptor *descriptor = NULL; + + if (!info) { + pw_pr_error("ERROR: no info!\n"); + return -PW_ERROR; + } + for (i = 0, + descriptor = (struct sw_driver_io_descriptor *)info->descriptors; + i < info->num_io_descriptors; ++i, ++descriptor) { + if (sw_init_driver_io_descriptor(descriptor)) { + return -PW_ERROR; + } + } + return PW_SUCCESS; +} + +int sw_reset_driver_interface_info_i(struct sw_driver_interface_info *info) +{ + /* + * Do any finalization here. + * For now, only IPC/MMIO descriptors need to be finalized. + */ + int i = 0; + struct sw_driver_io_descriptor *descriptor = NULL; + + if (!info) { + pw_pr_error("ERROR: no info!\n"); + return -PW_ERROR; + } + for (i = 0, + descriptor = (struct sw_driver_io_descriptor *)info->descriptors; + i < info->num_io_descriptors; ++i, ++descriptor) { + if (sw_reset_driver_io_descriptor(descriptor)) { + return -PW_ERROR; + } + } + return PW_SUCCESS; +} +int sw_init_ops_i(const struct sw_hw_ops **ops, + const struct sw_driver_interface_info *info) +{ + int i = 0; + struct sw_driver_io_descriptor *descriptor = NULL; + + if (!ops || !info) { + return -PW_ERROR; + } + for (i = 0, + descriptor = (struct sw_driver_io_descriptor *)info->descriptors; + i < info->num_io_descriptors; ++i, ++descriptor) { + ops[i] = sw_get_hw_ops_for(descriptor->collection_type); + if (ops[i] == NULL) { + return -PW_ERROR; + } + } + return PW_SUCCESS; +} + +/* + * If this descriptor's collector has an init function, call it passing in + * this descriptor. That allows the collector to perform any initialization + * or registration specific to this metric. + */ +int sw_init_driver_io_descriptor(struct sw_driver_io_descriptor *descriptor) +{ + sw_io_desc_init_func_t init_func = NULL; + const struct sw_hw_ops *ops = + sw_get_hw_ops_for(descriptor->collection_type); + if (ops == NULL) { + pw_pr_error("NULL ops found in init_driver_io_desc: type %d\n", + descriptor->collection_type); + return -PW_ERROR; + } + init_func = ops->init; + + if (init_func) { + int retval = (*init_func)(descriptor); + + if (retval) { + pw_pr_error("(*init) return value for type %d: %d\n", + descriptor->collection_type, retval); + } + return retval; + } + return PW_SUCCESS; +} + +/* + * If this descriptor's collector has a finalize function, call it passing in + * this descriptor. This allows the collector to perform any finalization + * specific to this metric. + */ +int sw_reset_driver_io_descriptor(struct sw_driver_io_descriptor *descriptor) +{ + sw_io_desc_reset_func_t reset_func = NULL; + const struct sw_hw_ops *ops = + sw_get_hw_ops_for(descriptor->collection_type); + if (ops == NULL) { + pw_pr_error("NULL ops found in reset_driver_io_desc: type %d\n", + descriptor->collection_type); + return -PW_ERROR; + } + pw_pr_debug("calling reset on descriptor of type %d\n", + descriptor->collection_type); + reset_func = ops->reset; + + if (reset_func) { + int retval = (*reset_func)(descriptor); + + if (retval) { + pw_pr_error("(*reset) return value for type %d: %d\n", + descriptor->collection_type, retval); + } + return retval; + } + return PW_SUCCESS; +} + +int sw_handle_driver_io_descriptor( + char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptor, + const struct sw_hw_ops *hw_ops) +{ + typedef void (*sw_hardware_io_func_t)( + char *, int, const struct sw_driver_io_descriptor *, u16); + sw_hardware_io_func_t hardware_io_func = NULL; + + if (descriptor->collection_command < SW_IO_CMD_READ || + descriptor->collection_command > SW_IO_CMD_WRITE) { + return -PW_ERROR; + } + switch (descriptor->collection_command) { + case SW_IO_CMD_READ: + hardware_io_func = hw_ops->read; + break; + case SW_IO_CMD_WRITE: + hardware_io_func = hw_ops->write; + break; + default: + break; + } + if (hardware_io_func) { + (*hardware_io_func)(dst_vals, cpu, descriptor, + descriptor->counter_size_in_bytes); + } else { + pw_pr_debug( + "NO ops to satisfy %u operation for collection type %u!\n", + descriptor->collection_command, + descriptor->collection_type); + } + return PW_SUCCESS; +} + +sw_driver_msg_t * +sw_alloc_collector_msg_i(const struct sw_driver_interface_info *info, + size_t per_msg_payload_size) +{ + size_t per_msg_size = 0, total_size = 0; + sw_driver_msg_t *msg = NULL; + + if (!info) { + return NULL; + } + per_msg_size = sizeof(struct sw_driver_msg) + per_msg_payload_size; + total_size = per_msg_size * num_possible_cpus(); + msg = (sw_driver_msg_t *)sw_kmalloc(total_size, GFP_KERNEL); + if (msg) { + int cpu = -1; + + memset(msg, 0, total_size); + for_each_possible_cpu(cpu) { + sw_driver_msg_t *__msg = GET_MSG_SLOT_FOR_CPU( + msg, cpu, per_msg_payload_size); + char *__payload = + (char *)__msg + sizeof(struct sw_driver_msg); + __msg->cpuidx = (pw_u16_t)cpu; + __msg->plugin_id = (pw_u8_t)info->plugin_id; + __msg->metric_id = (pw_u8_t)info->metric_id; + __msg->msg_id = (pw_u8_t)info->msg_id; + __msg->payload_len = per_msg_payload_size; + __msg->p_payload = __payload; + pw_pr_debug( + "[%d]: per_msg_payload_size = %zx, msg = %p, payload = %p\n", + cpu, per_msg_payload_size, __msg, __payload); + } + } + return msg; +} + +void sw_free_collector_msg_i(sw_driver_msg_t *msg) +{ + if (msg) { + sw_kfree(msg); + } +} + +size_t sw_get_payload_size_i(const struct sw_driver_interface_info *info) +{ + size_t size = 0; + int i = 0; + + if (info) { + for (i = 0; i < info->num_io_descriptors; + size += + ((struct sw_driver_io_descriptor *)info->descriptors)[i] + .counter_size_in_bytes, + ++i) + ; + } + return size; +} + +void sw_handle_per_cpu_msg_i(void *info, enum sw_wakeup_action action) +{ + /* + * Basic algo: + * For each descriptor in 'node->info->descriptors'; do: + * 1. Perform H/W read; use 'descriptor->collection_type' + * to determine type of read; use 'descriptor->counter_size_in_bytes' + * for read size. Use msg->p_payload[dst_idx] as dst address + * 2. Increment dst idx by 'descriptor->counter_size_in_bytes' + */ + struct sw_collector_data *node = (struct sw_collector_data *)info; + int cpu = RAW_CPU(); + u16 num_descriptors = node->info->num_io_descriptors, i = 0; + struct sw_driver_io_descriptor *descriptors = + (struct sw_driver_io_descriptor *)node->info->descriptors; + sw_driver_msg_t *msg = GET_MSG_SLOT_FOR_CPU(node->msg, cpu, + node->per_msg_payload_size); + char *dst_vals = msg->p_payload; + const struct sw_hw_ops **ops = node->ops; + bool wasAnyWrite = false; + + /* msg TSC assigned when msg is written to buffer */ + msg->cpuidx = cpu; + + for (i = 0; i < num_descriptors; ++i, + dst_vals += descriptors->counter_size_in_bytes, ++descriptors) { + if (unlikely(ops[i] == NULL)) { + pw_pr_debug("NULL OPS!\n"); + continue; + } + if (descriptors->collection_command == SW_IO_CMD_WRITE) { + wasAnyWrite = true; + } + if (sw_handle_driver_io_descriptor(dst_vals, cpu, descriptors, + ops[i])) { + pw_pr_error("ERROR reading descriptor with type %d\n", + descriptors->collection_type); + } + } + + /* + * We produce messages only on READs. Note that SWA prohibits + * messages that contain both READ and WRITE descriptors, so it + * is enough to check if there was ANY WRITE descriptor in this + * message. + */ + if (likely(wasAnyWrite == false)) { + if (sw_produce_generic_msg(msg, action)) { + pw_pr_warn("WARNING: could NOT produce message!\n"); + } + } + + return; +} + +/* + * Collector list and node functions. + */ +struct sw_collector_data *sw_alloc_collector_node(void) +{ + struct sw_collector_data *node = (struct sw_collector_data *)sw_kmalloc( + sizeof(struct sw_collector_data), GFP_KERNEL); + if (node) { + node->per_msg_payload_size = 0x0; + node->last_update_jiffies = 0x0; + node->info = NULL; + node->ops = NULL; + node->msg = NULL; + SW_LIST_ENTRY_INIT(node, list); + } + return node; +} + +void sw_free_collector_node(struct sw_collector_data *node) +{ + if (!node) { + return; + } + if (node->info) { + sw_reset_driver_interface_info_i(node->info); + sw_free_driver_interface_info_i(node->info); + node->info = NULL; + } + if (node->ops) { + sw_free_ops_i(node->ops); + node->ops = NULL; + } + if (node->msg) { + sw_free_collector_msg_i(node->msg); + node->msg = NULL; + } + sw_kfree(node); + return; +} + +int sw_handle_collector_node(struct sw_collector_data *node) +{ + if (!node || !node->info || !node->ops || !node->msg) { + return -PW_ERROR; + } + pw_pr_debug("Calling SMP_CALL_FUNCTION_MANY!\n"); + sw_schedule_work(&node->cpumask, &sw_handle_per_cpu_msg, node); + return PW_SUCCESS; +} + +int sw_handle_collector_node_on_cpu(struct sw_collector_data *node, int cpu) +{ + if (!node || !node->info || !node->ops || !node->msg) { + return -PW_ERROR; + } + /* + * Check if this node indicates it should be scheduled + * on the given cpu. If so, clear all other CPUs from the + * mask and schedule the node. + */ + if (cpumask_test_cpu(cpu, &node->cpumask)) { + struct cpumask tmp_mask; + + cpumask_clear(&tmp_mask); + cpumask_set_cpu(cpu, &tmp_mask); + pw_pr_debug("Calling SMP_CALL_FUNCTION_MANY!\n"); + sw_schedule_work(&tmp_mask, &sw_handle_per_cpu_msg, node); + } + return PW_SUCCESS; +} + +void sw_init_collector_list(void *list_head) +{ + SW_LIST_HEAD_VAR(sw_collector_data) * head = list_head; + SW_LIST_HEAD_INIT(head); +} + +void sw_destroy_collector_list(void *list_head) +{ + SW_LIST_HEAD_VAR(sw_collector_data) * head = list_head; + while (!SW_LIST_EMPTY(head)) { + struct sw_collector_data *curr = + SW_LIST_GET_HEAD_ENTRY(head, sw_collector_data, list); + BUG_ON(!curr->info); + SW_LIST_UNLINK(curr, list); + sw_free_collector_node(curr); + } +} + +/** + * sw_handle_collector_list - Iterate through the collector list, calling + * func() upon each element. + * @list_head: The collector list head. + * @func: The function to call for each collector. + * + * This function is called when one of the "when types" fires, since the + * passed-in collector node list is the list of collections to do at that time. + * + * Returns: PW_SUCCESS on success, -PW_ERROR on error. + */ +int sw_handle_collector_list(void *list_head, + int (*func)(struct sw_collector_data *data)) +{ + SW_LIST_HEAD_VAR(sw_collector_data) * head = list_head; + int retVal = PW_SUCCESS; + struct sw_collector_data *curr = NULL; + + if (!head || !func) { + return -PW_ERROR; + } + SW_LIST_FOR_EACH_ENTRY(curr, head, list) + { + pw_pr_debug("HANDLING\n"); + if ((*func)(curr)) { + retVal = -PW_ERROR; + } + } + return retVal; +} + +int sw_handle_collector_list_on_cpu(void *list_head, + int (*func)(struct sw_collector_data *data, + int cpu), + int cpu) +{ + SW_LIST_HEAD_VAR(sw_collector_data) * head = list_head; + int retVal = PW_SUCCESS; + struct sw_collector_data *curr = NULL; + if (!head || !func) { + return -PW_ERROR; + } + SW_LIST_FOR_EACH_ENTRY(curr, head, list) + { + pw_pr_debug("HANDLING\n"); + if ((*func)(curr, cpu)) { + retVal = -PW_ERROR; + } + } + return retVal; +} + +void sw_handle_per_cpu_msg(void *info) +{ + sw_handle_per_cpu_msg_i(info, SW_WAKEUP_ACTION_DIRECT); +} + +void sw_handle_per_cpu_msg_no_sched(void *info) +{ + sw_handle_per_cpu_msg_i(info, SW_WAKEUP_ACTION_TIMER); +} + +void sw_handle_per_cpu_msg_on_cpu(int cpu, void *info) +{ + if (unlikely(cpu == RAW_CPU())) { + sw_handle_per_cpu_msg_no_sched(info); + } else { + pw_pr_debug("[%d] is handling for %d\n", RAW_CPU(), cpu); + /* + * No need to disable preemption -- 'smp_call_function_single' + * does that for us. + */ + smp_call_function_single( + cpu, &sw_handle_per_cpu_msg_no_sched, info, + false /* false ==> do NOT wait for function + * completion + */); + } +} + +void sw_set_collector_ops(const struct sw_hw_ops *hw_ops) +{ + s_hw_ops = hw_ops; +} diff --git a/drivers/platform/x86/socwatch/sw_driver.c b/drivers/platform/x86/socwatch/sw_driver.c new file mode 100644 index 0000000000000..661a42555baa8 --- /dev/null +++ b/drivers/platform/x86/socwatch/sw_driver.c @@ -0,0 +1,1474 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +#define MOD_AUTHOR "Gautam Upadhyaya " +#define MOD_DESC "SoC Watch kernel module" + +#include "sw_internal.h" +#include "sw_structs.h" +#include "sw_kernel_defines.h" +#include "sw_types.h" +#include "sw_mem.h" +#include "sw_ioctl.h" +#include "sw_output_buffer.h" +#include "sw_hardware_io.h" +#include "sw_overhead_measurements.h" +#include "sw_tracepoint_handlers.h" +#include "sw_collector.h" +#include "sw_file_ops.h" + +/* ------------------------------------------------- + * Compile time constants. + * ------------------------------------------------- + */ +/* + * Number of entries in the 'sw_collector_lists' array + */ +#define NUM_COLLECTOR_MODES (SW_WHEN_TYPE_END - SW_WHEN_TYPE_BEGIN + 1) +#define PW_OUTPUT_BUFFER_SIZE \ + 256 /* Number of output messages in each per-cpu buffer */ +/* + * Check if tracepoint/notifier ID is in (user-supplied) mask + */ +#define IS_TRACE_NOTIFIER_ID_IN_MASK(id, mask) \ + ((id) >= 0 && (((mask) >> (id)) & 0x1)) + +/* ------------------------------------------------- + * Local function declarations. + * ------------------------------------------------- + */ +int sw_load_driver_i(void); +void sw_unload_driver_i(void); +int sw_init_collector_lists_i(void); +void sw_destroy_collector_lists_i(void); +int sw_init_data_structures_i(void); +void sw_destroy_data_structures_i(void); +int sw_get_arch_details_i(void); +void sw_iterate_driver_info_lists_i(void); +void sw_handle_immediate_request_i(void *request); +int sw_print_collector_node_i(struct sw_collector_data *data); +int sw_collection_start_i(void); +int sw_collection_stop_i(void); +int sw_collection_poll_i(void); +size_t sw_get_payload_size_i(const struct sw_driver_interface_info *info); +sw_driver_msg_t * +sw_alloc_collector_msg_i(const struct sw_driver_interface_info *info, + size_t per_msg_payload_size); +static long sw_unlocked_handle_ioctl_i(unsigned int ioctl_num, + void *p_local_args); +static long +sw_set_driver_infos_i(struct sw_driver_interface_msg __user *remote_msg, + int local_len); +static long sw_handle_cmd_i(sw_driver_collection_cmd_t cmd, + u64 __user *remote_out_args); +static void sw_do_extract_scu_fw_version(void); +static long +sw_get_available_name_id_mappings_i(enum sw_name_id_type type, + struct sw_name_info_msg __user *remote_info, + size_t local_len); +static enum sw_driver_collection_cmd sw_get_collection_cmd_i(void); +static bool sw_should_flush_buffer_i(void); + +/* ------------------------------------------------- + * Data structures. + * ------------------------------------------------- + */ +/* + * Structure to hold current CMD state + * of the device driver. Constantly evolving, but + * that's OK -- this is internal to the driver + * and is NOT exported. + */ +struct swa_internal_state { + sw_driver_collection_cmd_t + cmd; /* indicates which command was specified */ + /* last e.g. START, STOP etc. */ + /* + * Should we write to our per-cpu output buffers? + * YES if we're actively collecting. + * NO if we're not. + */ + bool write_to_buffers; + /* + * Should we "drain/flush" the per-cpu output buffers? + * (See "device_read" for an explanation) + */ + bool drain_buffers; + /* Others... */ +}; + +/* ------------------------------------------------- + * Variables. + * ------------------------------------------------- + */ +static bool do_force_module_scope_for_cpu_frequencies; +module_param(do_force_module_scope_for_cpu_frequencies, bool, S_IRUSR); +MODULE_PARM_DESC( + do_force_module_scope_for_cpu_frequencies, + "Toggle module scope for cpu frequencies. Sets \"affected_cpus\" and \"related_cpus\" of cpufreq_policy."); + +static unsigned short sw_buffer_num_pages = 16; +module_param(sw_buffer_num_pages, ushort, S_IRUSR); +MODULE_PARM_DESC( + sw_buffer_num_pages, + "Specify number of 4kB pages to use for each per-cpu buffer. MUST be a power of 2! Default value = 16 (64 kB)"); + +/* TODO: convert from 'list_head' to 'hlist_head' */ +/* + * sw_collector_lists is an array of linked lists of "collector nodes" + * (sw_collector_data structs). It is indexed by the sw_when_type_t's. + * Each list holds the collectors to "execute" at a specific time, + * e.g. the beginning of the run, at a poll interval, tracepoint, etc. + */ +static SW_DEFINE_LIST_HEAD(sw_collector_lists, + sw_collector_data)[NUM_COLLECTOR_MODES]; +static __read_mostly u16 sw_scu_fw_major_minor; + +static struct swa_internal_state s_internal_state; +static struct sw_file_ops s_ops = { + .ioctl_handler = &sw_unlocked_handle_ioctl_i, + .stop_handler = &sw_collection_stop_i, + .get_current_cmd = &sw_get_collection_cmd_i, + .should_flush = &sw_should_flush_buffer_i, +}; + +/* + * For each function that you want to profile, + * do the following (e.g. function 'foo'): + * ************************************************** + * DECLARE_OVERHEAD_VARS(foo); + * ************************************************** + * This will declare the two variables required + * to keep track of overheads incurred in + * calling/servicing 'foo'. Note that the name + * that you declare here *MUST* match the function name! + */ + +DECLARE_OVERHEAD_VARS(sw_collection_poll_i); /* for POLL */ +DECLARE_OVERHEAD_VARS(sw_any_seg_full); + +/* + * String representation of the various 'SW_WHEN_TYPE_XYZ' enum values. + * Debugging ONLY! + */ +#if DO_DEBUG_OUTPUT +static const char *s_when_type_names[] = { "BEGIN", "POLL", "NOTIFIER", + "TRACEPOINT", "END" }; +#endif /* DO_DEBUG_OUTPUT */ + +/* ------------------------------------------------- + * Function definitions. + * ------------------------------------------------- + */ +/* + * External functions. + */ +int sw_process_snapshot(enum sw_when_type when) +{ + if (when > SW_WHEN_TYPE_END) { + pw_pr_error("invalid snapshot time %d specified!\n", when); + return -EINVAL; + } + if (sw_handle_collector_list(&sw_collector_lists[when], + &sw_handle_collector_node)) { + pw_pr_error("ERROR: could NOT handle snapshot for time %d!\n", + when); + return -EIO; + } + return 0; +} + +int sw_process_snapshot_on_cpu(enum sw_when_type when, int cpu) +{ + if (when > SW_WHEN_TYPE_END) { + pw_pr_error("invalid snapshot time %d specified!\n", when); + return -EINVAL; + } + if (sw_handle_collector_list_on_cpu(&sw_collector_lists[when], + &sw_handle_collector_node_on_cpu, + cpu)) { + pw_pr_error("ERROR: could NOT handle snapshot for time %d!\n", + when); + return -EIO; + } + return 0; +} + +/* + * Driver interface info and collector list functions. + */ +int sw_print_collector_node_i(struct sw_collector_data *curr) +{ + pw_u16_t num_descriptors = 0; + sw_io_desc_print_func_t print_func = NULL; + struct sw_driver_io_descriptor *descriptor = NULL; + struct sw_driver_interface_info *info = NULL; + + if (!curr) { + return -PW_ERROR; + } + info = curr->info; + descriptor = (struct sw_driver_io_descriptor *)info->descriptors; + pw_pr_debug( + "cpu-mask = %d, Plugin-ID = %d, Metric-ID = %d, MSG-ID = %d\n", + info->cpu_mask, info->plugin_id, info->metric_id, info->msg_id); + for (num_descriptors = info->num_io_descriptors; num_descriptors > 0; + --num_descriptors, ++descriptor) { + const struct sw_hw_ops *ops = + sw_get_hw_ops_for(descriptor->collection_type); + if (ops == NULL) { + return -PW_ERROR; + } + print_func = ops->print; + if (print_func && (*print_func)(descriptor)) { + return -PW_ERROR; + } + } + return PW_SUCCESS; +} + +/* + * Driver interface info and collector list functions. + */ + +/** + * sw_reset_collector_node_i - Call the reset op on all of the descriptors + * in coll that have one. + * @coll: The data structure containing an array of collector descriptors. + * + * Return: PW_SUCCESS if all of the resets succeeded, -PW_ERROR if any failed. + */ +static int sw_reset_collector_node_i(struct sw_collector_data *coll) +{ + struct sw_driver_io_descriptor *descriptor = NULL; + struct sw_driver_interface_info *info = NULL; + int num_descriptors; + int retcode = PW_SUCCESS; + + if (!coll) { + return -PW_ERROR; + } + info = coll->info; + + descriptor = (struct sw_driver_io_descriptor *)info->descriptors; + pw_pr_debug( + "cpu-mask = %d, Plugin-ID = %d, Metric-ID = %d, MSG-ID = %d\n", + info->cpu_mask, info->plugin_id, info->metric_id, info->msg_id); + for (num_descriptors = info->num_io_descriptors; num_descriptors > 0; + --num_descriptors, ++descriptor) { + const struct sw_hw_ops *ops = + sw_get_hw_ops_for(descriptor->collection_type); + if (ops && ops->reset && (*ops->reset)(descriptor)) { + retcode = -PW_ERROR; + } + } + return retcode; +} + +static int sw_iterate_trace_notifier_list_i(struct sw_trace_notifier_data *node, + void *dummy) +{ + return sw_handle_collector_list(&node->list, + &sw_print_collector_node_i); +} + +void sw_iterate_driver_info_lists_i(void) +{ + sw_when_type_t which; + + for (which = SW_WHEN_TYPE_BEGIN; which <= SW_WHEN_TYPE_END; ++which) { + pw_pr_debug("ITERATING list %s\n", s_when_type_names[which]); + if (sw_handle_collector_list( + &sw_collector_lists[which], + &sw_print_collector_node_i)) { + /* Should NEVER happen! */ + pw_pr_error( + "WARNING: error occurred while printing values!\n"); + } + } + + if (sw_for_each_tracepoint_node(&sw_iterate_trace_notifier_list_i, NULL, + false /*return-on-error*/)) { + pw_pr_error( + "WARNING: error occurred while printing tracepoint values!\n"); + } + if (sw_for_each_notifier_node(&sw_iterate_trace_notifier_list_i, NULL, + false /*return-on-error*/)) { + pw_pr_error( + "WARNING: error occurred while printing notifier values!\n"); + } +} + +static void sw_reset_collectors_i(void) +{ + sw_when_type_t which; + + for (which = SW_WHEN_TYPE_BEGIN; which <= SW_WHEN_TYPE_END; ++which) { + pw_pr_debug("ITERATING list %s\n", s_when_type_names[which]); + if (sw_handle_collector_list(&sw_collector_lists[which], + &sw_reset_collector_node_i)) { + pw_pr_error( + "WARNING: error occurred while resetting a collector!\n"); + } + } +} + +int sw_init_data_structures_i(void) +{ + /* + * Find the # CPUs in this system. + * Update: use 'num_possible' instead of 'num_present' in case + * the cpus aren't numbered contiguously + */ + sw_max_num_cpus = num_possible_cpus(); + + /* + * Initialize our trace subsys: MUST be called + * BEFORE 'sw_init_collector_lists_i()! + */ + if (sw_add_trace_notify()) { + sw_destroy_data_structures_i(); + return -PW_ERROR; + } + if (sw_init_collector_lists_i()) { + sw_destroy_data_structures_i(); + return -PW_ERROR; + } + if (sw_init_per_cpu_buffers()) { + sw_destroy_data_structures_i(); + return -PW_ERROR; + } + if (sw_register_hw_ops()) { + sw_destroy_data_structures_i(); + return -PW_ERROR; + } + return PW_SUCCESS; +} + +void sw_destroy_data_structures_i(void) +{ + sw_free_hw_ops(); + sw_destroy_per_cpu_buffers(); + sw_destroy_collector_lists_i(); + sw_remove_trace_notify(); +} + +int sw_get_arch_details_i(void) +{ + /* + * SCU F/W version (if applicable) + */ + sw_do_extract_scu_fw_version(); + return PW_SUCCESS; +} + +#define INIT_FLAG ((void *)0) +#define DESTROY_FLAG ((void *)1) + +static int +sw_init_destroy_trace_notifier_lists_i(struct sw_trace_notifier_data *node, + void *is_init) +{ + if (is_init == INIT_FLAG) { + sw_init_collector_list(&node->list); + } else { + sw_destroy_collector_list(&node->list); + } + node->was_registered = false; + + return PW_SUCCESS; +} + +int sw_init_collector_lists_i(void) +{ + int i = 0; + + for (i = 0; i < NUM_COLLECTOR_MODES; ++i) { + sw_init_collector_list(&sw_collector_lists[i]); + } + sw_for_each_tracepoint_node(&sw_init_destroy_trace_notifier_lists_i, + INIT_FLAG, false /*return-on-error*/); + sw_for_each_notifier_node(&sw_init_destroy_trace_notifier_lists_i, + INIT_FLAG, false /*return-on-error*/); + + return PW_SUCCESS; +} + +void sw_destroy_collector_lists_i(void) +{ + int i = 0; + + for (i = 0; i < NUM_COLLECTOR_MODES; ++i) { + sw_destroy_collector_list(&sw_collector_lists[i]); + } + sw_for_each_tracepoint_node(&sw_init_destroy_trace_notifier_lists_i, + DESTROY_FLAG, false /*return-on-error*/); + sw_for_each_notifier_node(&sw_init_destroy_trace_notifier_lists_i, + DESTROY_FLAG, false /*return-on-error*/); +} + +/* + * Used for {READ,WRITE}_IMMEDIATE requests. + */ +typedef struct sw_immediate_request_info sw_immediate_request_info_t; +struct sw_immediate_request_info { + struct sw_driver_io_descriptor *local_descriptor; + char *dst_vals; + int *retVal; +}; +void sw_handle_immediate_request_i(void *request) +{ + struct sw_immediate_request_info *info = + (struct sw_immediate_request_info *)request; + struct sw_driver_io_descriptor *descriptor = info->local_descriptor; + char *dst_vals = info->dst_vals; + const struct sw_hw_ops *ops = + sw_get_hw_ops_for(descriptor->collection_type); + if (likely(ops != NULL)) { + *(info->retVal) = sw_handle_driver_io_descriptor( + dst_vals, RAW_CPU(), descriptor, ops); + } else { + pw_pr_error( + "No operations found to satisfy collection type %u!\n", + descriptor->collection_type); + } + return; +} + +static int num_times_polled; + +int sw_collection_start_i(void) +{ + /* + * Reset the poll tick counter. + */ + num_times_polled = 0; + /* + * Update the output buffers. + */ + sw_reset_per_cpu_buffers(); + /* + * Ensure clients don't think we're in 'flush' mode. + */ + s_internal_state.drain_buffers = false; + /* + * Set the 'command' + */ + s_internal_state.cmd = SW_DRIVER_START_COLLECTION; + /* + * Clear out the topology list + */ + sw_clear_topology_list(); + /* + * Handle 'START' snapshots, if any. + */ + { + if (sw_handle_collector_list( + &sw_collector_lists[SW_WHEN_TYPE_BEGIN], + &sw_handle_collector_node)) { + pw_pr_error( + "ERROR: could NOT handle START collector list!\n"); + return -PW_ERROR; + } + } + /* + * Register any required tracepoints and notifiers. + */ + { + if (sw_register_trace_notifiers()) { + pw_pr_error("ERROR registering trace_notifiers!\n"); + sw_unregister_trace_notifiers(); + return -PW_ERROR; + } + } + pw_pr_debug("OK, STARTED collection!\n"); + return PW_SUCCESS; +} + +int sw_collection_stop_i(void) +{ + /* + * Unregister any registered tracepoints and notifiers. + */ + if (sw_unregister_trace_notifiers()) { + pw_pr_warn( + "Warning: some trace_notifier probe functions could NOT be unregistered!\n"); + } + /* + * Handle 'STOP' snapshots, if any. + */ + if (sw_handle_collector_list(&sw_collector_lists[SW_WHEN_TYPE_END], + &sw_handle_collector_node)) { + pw_pr_error("ERROR: could NOT handle STOP collector list!\n"); + return -PW_ERROR; + } + /* + * Set the 'command' + */ + s_internal_state.cmd = SW_DRIVER_STOP_COLLECTION; + /* + * Tell consumers to 'flush' all buffers. We need to + * defer this as long as possible because it needs to be + * close to the 'wake_up_interruptible', below. + */ + s_internal_state.drain_buffers = true; + smp_mb(); + /* + * Wakeup any sleeping readers, and cleanup any + * timers in the reader subsys. + */ + sw_cancel_reader(); + /* + * Collect stats on samples produced and dropped. + * TODO: call from 'device_read()' instead? + */ + sw_count_samples_produced_dropped(); +#if DO_OVERHEAD_MEASUREMENTS + pw_pr_force( + "DEBUG: there were %llu samples produced and %llu samples dropped in buffer v5!\n", + sw_num_samples_produced, sw_num_samples_dropped); +#endif /* DO_OVERHEAD_MEASUREMENTS */ + /* + * DEBUG: iterate over collection lists. + */ + sw_iterate_driver_info_lists_i(); + /* + * Shut down any collectors that need shutting down. + */ + sw_reset_collectors_i(); + /* + * Clear out the collector lists. + */ + sw_destroy_collector_lists_i(); + pw_pr_debug("OK, STOPPED collection!\n"); +#if DO_OVERHEAD_MEASUREMENTS + pw_pr_force("There were %d poll ticks!\n", num_times_polled); +#endif /* DO_OVERHEAD_MEASUREMENTS */ + return PW_SUCCESS; +} + +int sw_collection_poll_i(void) +{ + /* + * Handle 'POLL' timer expirations. + */ + if (SW_LIST_EMPTY(&sw_collector_lists[SW_WHEN_TYPE_POLL])) { + pw_pr_debug("DEBUG: EMPTY POLL LIST\n"); + } + ++num_times_polled; + return sw_handle_collector_list(&sw_collector_lists[SW_WHEN_TYPE_POLL], + &sw_handle_collector_node); +} + +/* + * Private data for the 'sw_add_trace_notifier_driver_info_i' function. + */ +struct tn_data { + struct sw_driver_interface_info *info; + u64 mask; +}; + +static int +sw_add_trace_notifier_driver_info_i(struct sw_trace_notifier_data *node, + void *priv) +{ + struct tn_data *data = (struct tn_data *)priv; + struct sw_driver_interface_info *local_info = data->info; + u64 mask = data->mask; + int id = sw_get_trace_notifier_id(node); + + if (IS_TRACE_NOTIFIER_ID_IN_MASK(id, mask)) { + pw_pr_debug("TRACEPOINT ID = %d is IN mask 0x%llx\n", id, mask); + if (sw_add_driver_info(&node->list, local_info)) { + pw_pr_error( + "WARNING: could NOT add driver info to list!\n"); + return -PW_ERROR; + } + } + return PW_SUCCESS; +} + +static int sw_post_config_i(const struct sw_hw_ops *op, void *priv) +{ + if (!op->available || !(*op->available)()) { + /* op not available */ + return 0; + } + if (!op->post_config || (*op->post_config)()) { + return 0; + } + return -EIO; +} + +/** + * sw_set_driver_infos_i - Process the collection config data passed down + * from the client. + * @remote_msg: The user space address of our ioctl data. + * @local_len: The number of bytes of remote_msg we should copy. + * + * This function copies the ioctl data from user space to kernel + * space. That data is an array of sw_driver_interface_info structs, + * which hold information about tracepoints, notifiers, and collector + * configuration info for this collection run.. For each driver_info + * struct, it calls the appropriate "add info" (registration/ + * configuration) function for each of the "when types" (begin, poll, + * notifier, tracepoint, end) which should trigger a collection + * operation for that collector. + * + * When this function is done, the data structures corresponding to + * collection should be configured and initialized. + * + * + * Returns: PW_SUCCESS on success, or a non-zero on an error. + */ +static long +sw_set_driver_infos_i(struct sw_driver_interface_msg __user *remote_msg, + int local_len) +{ + struct sw_driver_interface_info *local_info = NULL; + struct sw_driver_interface_msg *local_msg = vmalloc(local_len); + pw_u8_t read_triggers = 0x0; + pw_u16_t num_infos = 0; + sw_when_type_t i = SW_WHEN_TYPE_BEGIN; + char *__data = (char *)local_msg->infos; + size_t dst_idx = 0; + + if (!local_msg) { + pw_pr_error("ERROR allocating space for local message!\n"); + return -EFAULT; + } + if (copy_from_user(local_msg, (struct sw_driver_interface_msg __user *) + remote_msg, local_len)) { + pw_pr_error("ERROR copying message from user space!\n"); + vfree(local_msg); + return -EFAULT; + } + /* + * We aren't allowed to config the driver multiple times between + * collections. Clear out any previous config values. + */ + sw_destroy_collector_lists_i(); + + /* + * Did the user specify a min polling interval? + */ + sw_min_polling_interval_msecs = local_msg->min_polling_interval_msecs; + pw_pr_debug("min_polling_interval_msecs = %u\n", + sw_min_polling_interval_msecs); + + num_infos = local_msg->num_infos; + pw_pr_debug("LOCAL NUM INFOS = %u\n", num_infos); + for (; num_infos > 0; --num_infos) { + local_info = + (struct sw_driver_interface_info *)&__data[dst_idx]; + dst_idx += (SW_DRIVER_INTERFACE_INFO_HEADER_SIZE() + + local_info->num_io_descriptors * + sizeof(struct sw_driver_io_descriptor)); + read_triggers = local_info->trigger_bits; + pw_pr_debug( + "read_triggers = %u, # msrs = %u, new dst_idx = %u\n", + (unsigned int)read_triggers, + (unsigned int)local_info->num_io_descriptors, + (unsigned int)dst_idx); + for (i = SW_WHEN_TYPE_BEGIN; i <= SW_WHEN_TYPE_END; + ++i, read_triggers >>= 1) { + if (read_triggers & 0x1) { /* Bit 'i' is set */ + pw_pr_debug("BIT %d is SET!\n", i); + if (i == SW_WHEN_TYPE_TRACEPOINT) { + struct tn_data tn_data = { + local_info, + local_info->tracepoint_id_mask + }; + pw_pr_debug( + "TRACEPOINT, MASK = 0x%llx\n", + local_info->tracepoint_id_mask); + sw_for_each_tracepoint_node( + &sw_add_trace_notifier_driver_info_i, + &tn_data, + false /*return-on-error*/); + } else if (i == SW_WHEN_TYPE_NOTIFIER) { + struct tn_data tn_data = { + local_info, + local_info->notifier_id_mask + }; + pw_pr_debug( + "NOTIFIER, MASK = 0x%llx\n", + local_info->notifier_id_mask); + sw_for_each_notifier_node( + &sw_add_trace_notifier_driver_info_i, + &tn_data, + false /*return-on-error*/); + } else { + if (sw_add_driver_info( + &sw_collector_lists[i], + local_info)) { + pw_pr_error( + "WARNING: could NOT add driver info to list for 'when type' %d!\n", + i); + } + } + } + } + } + if (sw_for_each_hw_op(&sw_post_config_i, NULL, + false /*return-on-error*/)) { + pw_pr_error("POST-CONFIG error!\n"); + } + vfree(local_msg); + memset(&s_internal_state, 0, sizeof(s_internal_state)); + /* + * DEBUG: iterate over collection lists. + */ + sw_iterate_driver_info_lists_i(); + return PW_SUCCESS; +} + +static long sw_handle_cmd_i(sw_driver_collection_cmd_t cmd, + u64 __user *remote_out_args) +{ + /* + * First, handle the command. + */ + if (cmd < SW_DRIVER_START_COLLECTION || + cmd > SW_DRIVER_CANCEL_COLLECTION) { + pw_pr_error("ERROR: invalid cmd = %d\n", cmd); + return -PW_ERROR; + } + switch (cmd) { + case SW_DRIVER_START_COLLECTION: + if (sw_collection_start_i()) { + return -PW_ERROR; + } + break; + case SW_DRIVER_STOP_COLLECTION: + if (sw_collection_stop_i()) { + return -PW_ERROR; + } + break; + default: + pw_pr_error("WARNING: unsupported command %d\n", cmd); + break; + } + /* + * Then retrieve sample stats. + */ +#if DO_COUNT_DROPPED_SAMPLES + if (cmd == SW_DRIVER_STOP_COLLECTION) { + u64 local_args[2] = { sw_num_samples_produced, + sw_num_samples_dropped }; + if (copy_to_user(remote_out_args, local_args, + sizeof(local_args))) { + pw_pr_error( + "couldn't copy collection stats to user space!\n"); + return -PW_ERROR; + } + } +#endif /* DO_COUNT_DROPPED_SAMPLES */ + return PW_SUCCESS; +} + +#ifdef SFI_SIG_OEMB +static int sw_do_parse_sfi_oemb_table(struct sfi_table_header *header) +{ +#ifdef CONFIG_X86_WANT_INTEL_MID + struct sfi_table_oemb *oemb = (struct sfi_table_oemb *) + header; /* 'struct sfi_table_oemb' defined in 'intel-mid.h' */ + if (!oemb) { + pw_pr_error("ERROR: NULL sfi table header!\n"); + return -PW_ERROR; + } + sw_scu_fw_major_minor = (oemb->scu_runtime_major_version << 8) | + (oemb->scu_runtime_minor_version); + pw_pr_debug("DEBUG: major = %u, minor = %u\n", + oemb->scu_runtime_major_version, + oemb->scu_runtime_minor_version); +#endif /* CONFIG_X86_WANT_INTEL_MID */ + return PW_SUCCESS; +} +#endif /* SFI_SIG_OEMB */ + +static void sw_do_extract_scu_fw_version(void) +{ + sw_scu_fw_major_minor = 0x0; +#ifdef SFI_SIG_OEMB + if (sfi_table_parse(SFI_SIG_OEMB, NULL, NULL, + &sw_do_parse_sfi_oemb_table)) { + pw_pr_force("WARNING: NO SFI information!\n"); + } +#endif /* SFI_SIG_OEMB */ +} + +static int sw_gather_trace_notifier_i(struct sw_trace_notifier_data *node, + struct sw_name_info_msg *msg, + enum sw_name_id_type type) +{ + pw_u16_t *idx = &msg->payload_len; + char *buffer = (char *)&msg->pairs[*idx]; + struct sw_name_id_pair *pair = (struct sw_name_id_pair *)buffer; + int id = sw_get_trace_notifier_id(node); + struct sw_string_type *str = &pair->name; + const char *abstract_name = sw_get_trace_notifier_abstract_name(node); + + if (likely(abstract_name && id >= 0)) { + ++msg->num_name_id_pairs; + pair->type = type; + pair->id = (u16)id; + /* "+1" for trailing '\0' */ + str->len = strlen(abstract_name) + 1; + memcpy(&str->data[0], abstract_name, str->len); + + pw_pr_debug("TP[%d] = %s (%u)\n", + sw_get_trace_notifier_id(node), abstract_name, + (unsigned int)strlen(abstract_name)); + + *idx += SW_NAME_ID_HEADER_SIZE() + + SW_STRING_TYPE_HEADER_SIZE() + str->len; + } + + return PW_SUCCESS; +} + +static int sw_gather_tracepoint_i(struct sw_trace_notifier_data *node, + void *priv) +{ + return sw_gather_trace_notifier_i(node, (struct sw_name_info_msg *)priv, + SW_NAME_TYPE_TRACEPOINT); +} + +static int sw_gather_notifier_i(struct sw_trace_notifier_data *node, void *priv) +{ + return sw_gather_trace_notifier_i(node, (struct sw_name_info_msg *)priv, + SW_NAME_TYPE_NOTIFIER); +} + +static long +sw_get_available_trace_notifiers_i(enum sw_name_id_type type, + struct sw_name_info_msg *local_info) +{ + long retVal = PW_SUCCESS; + + if (type == SW_NAME_TYPE_TRACEPOINT) { + retVal = sw_for_each_tracepoint_node(&sw_gather_tracepoint_i, + local_info, + false /*return-on-error*/); + } else { + retVal = sw_for_each_notifier_node(&sw_gather_notifier_i, + local_info, + false /*return-on-error*/); + } + pw_pr_debug( + "There are %u extracted traces/notifiers for a total of %u bytes!\n", + local_info->num_name_id_pairs, local_info->payload_len); + return retVal; +} + +static int sw_gather_hw_op_i(const struct sw_hw_ops *op, void *priv) +{ + struct sw_name_info_msg *msg = (struct sw_name_info_msg *)priv; + pw_u16_t *idx = &msg->payload_len; + char *buffer = (char *)&msg->pairs[*idx]; + struct sw_name_id_pair *pair = (struct sw_name_id_pair *)buffer; + struct sw_string_type *str = &pair->name; + const char *abstract_name = sw_get_hw_op_abstract_name(op); + int id = sw_get_hw_op_id(op); + + pw_pr_debug("Gather Collector[%d] = %s\n", id, abstract_name); + if (likely(abstract_name && id >= 0)) { + /* + * Final check: is this operation available on the + * target platform? If 'available' function doesn't + * exist then YES. Else call 'available' + * function to decide. + */ + pw_pr_debug("%s has available = %p\n", abstract_name, + op->available); + if (!op->available || (*op->available)()) { + ++msg->num_name_id_pairs; + pair->type = SW_NAME_TYPE_COLLECTOR; + pair->id = (u16)id; + str->len = strlen(abstract_name) + + 1; /* "+1" for trailing '\0' */ + memcpy(&str->data[0], abstract_name, str->len); + + *idx += SW_NAME_ID_HEADER_SIZE() + + SW_STRING_TYPE_HEADER_SIZE() + str->len; + } + } + + return PW_SUCCESS; +} + +static long sw_get_available_collectors_i(struct sw_name_info_msg *local_info) +{ + return sw_for_each_hw_op(&sw_gather_hw_op_i, local_info, + false /*return-on-error*/); +} + +static long +sw_get_available_name_id_mappings_i(enum sw_name_id_type type, + struct sw_name_info_msg __user *remote_info, + size_t local_len) +{ + char *buffer = vmalloc(local_len); + struct sw_name_info_msg *local_info = NULL; + long retVal = PW_SUCCESS; + + if (!buffer) { + pw_pr_error("ERROR: couldn't alloc temp buffer!\n"); + return -PW_ERROR; + } + memset(buffer, 0, local_len); + local_info = (struct sw_name_info_msg *)buffer; + + if (type == SW_NAME_TYPE_COLLECTOR) { + retVal = sw_get_available_collectors_i(local_info); + } else { + retVal = sw_get_available_trace_notifiers_i(type, local_info); + } + if (retVal == PW_SUCCESS) { + retVal = copy_to_user(remote_info, local_info, local_len); + if (retVal) { + pw_pr_error( + "ERROR: couldn't copy tracepoint info to user space!\n"); + } + } + vfree(buffer); + return retVal; +} + +static long +sw_get_topology_changes_i(struct sw_driver_topology_msg __user *remote_msg, + size_t local_len) +{ + char *buffer = NULL; + struct sw_driver_topology_msg *local_msg = NULL; + size_t buffer_len = sizeof(struct sw_driver_topology_msg) + + sw_num_topology_entries * + sizeof(struct sw_driver_topology_change); + long retVal = PW_SUCCESS; + struct sw_driver_topology_change *dst = NULL; + size_t dst_idx = 0; + + SW_LIST_HEAD_VAR(sw_topology_node) *head = (void *)&sw_topology_list; + struct sw_topology_node *tnode = NULL; + + if (local_len < buffer_len) { + pw_pr_error( + "ERROR: insufficient buffer space to encode topology changes! Requires %zu, output space = %zu\n", + buffer_len, local_len); + return -EIO; + } + + buffer = vmalloc(buffer_len); + if (!buffer) { + pw_pr_error( + "ERROR: couldn't allocate buffer for topology transfer!\n"); + return -EIO; + } + memset(buffer, 0, buffer_len); + + local_msg = (struct sw_driver_topology_msg *)buffer; + local_msg->num_entries = sw_num_topology_entries; + dst = (struct sw_driver_topology_change *)&local_msg + ->topology_entries[0]; + SW_LIST_FOR_EACH_ENTRY(tnode, head, list) + { + struct sw_driver_topology_change *change = &tnode->change; + + memcpy(&dst[dst_idx++], change, sizeof(*change)); + } + retVal = copy_to_user(remote_msg, local_msg, buffer_len); + if (retVal) { + pw_pr_error( + "ERROR: couldn't copy topology changes to user space!\n"); + } + vfree(buffer); + return retVal; +} + +#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) +#define MATCH_IOCTL(num, pred) ((num) == (pred) || (num) == (pred##32)) +#else +#define MATCH_IOCTL(num, pred) ((num) == (pred)) +#endif + +static long sw_unlocked_handle_ioctl_i(unsigned int ioctl_num, + void *p_local_args) +{ + struct sw_driver_ioctl_arg local_args; + int local_in_len, local_out_len; + + if (!p_local_args) { + pw_pr_error("ERROR: NULL p_local_args value?!\n"); + return -PW_ERROR; + } + + /* + * (1) Sanity check: + * Before doing anything, double check to + * make sure this IOCTL was really intended + * for us! + */ + if (_IOC_TYPE(ioctl_num) != APWR_IOCTL_MAGIC_NUM) { + pw_pr_error( + "ERROR: requested IOCTL TYPE (%d) != APWR_IOCTL_MAGIC_NUM (%d)\n", + _IOC_TYPE(ioctl_num), APWR_IOCTL_MAGIC_NUM); + return -PW_ERROR; + } + /* + * (2) Extract arg lengths. + */ + local_args = *((struct sw_driver_ioctl_arg *)p_local_args); + + local_in_len = local_args.in_len; + local_out_len = local_args.out_len; + pw_pr_debug("GU: local_in_len = %d, local_out_len = %d\n", local_in_len, + local_out_len); + /* + * (3) Service individual IOCTL requests. + */ + if (MATCH_IOCTL(ioctl_num, PW_IOCTL_CONFIG)) { + pw_pr_debug("PW_IOCTL_CONFIG\n"); + return sw_set_driver_infos_i( + (struct sw_driver_interface_msg __user *) + local_args.in_arg, + local_in_len); + } else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_CMD)) { + sw_driver_collection_cmd_t local_cmd; + + pw_pr_debug("PW_IOCTL_CMD\n"); + if (get_user(local_cmd, (sw_driver_collection_cmd_t __user *) + local_args.in_arg)) { + pw_pr_error("ERROR: could NOT extract cmd value!\n"); + return -PW_ERROR; + } + return sw_handle_cmd_i(local_cmd, + (u64 __user *)local_args.out_arg); + } else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_POLL)) { + pw_pr_debug("PW_IOCTL_POLL\n"); + return DO_PER_CPU_OVERHEAD_FUNC_RET(int, sw_collection_poll_i); + } else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_IMMEDIATE_IO)) { + struct sw_driver_interface_info *local_info; + struct sw_driver_io_descriptor *local_descriptor = NULL; + int retVal = PW_SUCCESS; + char *src_vals = NULL; + char *dst_vals = NULL; + + pw_pr_debug("PW_IOCTL_IMMEDIATE_IO\n"); + pw_pr_debug("local_in_len = %u\n", local_in_len); + + src_vals = vmalloc(local_in_len); + if (!src_vals) { + pw_pr_error( + "ERROR allocating space for immediate IO\n"); + return -PW_ERROR; + } + if (local_out_len) { + dst_vals = vmalloc(local_out_len); + if (!dst_vals) { + vfree(src_vals); + pw_pr_error( + "ERROR allocating space for immediate IO\n"); + return -PW_ERROR; + } + } + if (copy_from_user(src_vals, (char __user *)local_args.in_arg, + local_in_len)) { + pw_pr_error( + "ERROR copying in immediate IO descriptor\n"); + retVal = -PW_ERROR; + goto ret_immediate_io; + } + local_info = (struct sw_driver_interface_info *)src_vals; + pw_pr_debug( + "OK, asked to perform immediate IO on cpu(s) %d, # descriptors = %d\n", + local_info->cpu_mask, local_info->num_io_descriptors); + /* + * For now, require only a single descriptor. + */ + if (local_info->num_io_descriptors != 1) { + pw_pr_error( + "ERROR: told to perform immediate IO with %d descriptors -- MAX of 1 descriptor allowed!\n", + local_info->num_io_descriptors); + retVal = -PW_ERROR; + goto ret_immediate_io; + } + local_descriptor = ((struct sw_driver_io_descriptor *) + local_info->descriptors); + pw_pr_debug("Collection type after %d\n", + local_descriptor->collection_type); + /* + * Check cpu mask for correctness here. For now, we do NOT allow + * reading on ALL cpus. + */ + if ((int)local_info->cpu_mask < -1 || + (int)local_info->cpu_mask >= (int)sw_max_num_cpus) { + pw_pr_error( + "ERROR: invalid cpu mask %d specified in immediate IO; valid values are: -1, [0 -- %d]!\n", + local_info->cpu_mask, sw_max_num_cpus - 1); + retVal = -PW_ERROR; + goto ret_immediate_io; + } + /* + * Check collection type for correctness here + */ + pw_pr_debug( + "Asked to perform immediate IO with descriptor with type = %d, on cpu = %d\n", + local_descriptor->collection_type, + local_info->cpu_mask); + if (sw_is_valid_hw_op_id(local_descriptor->collection_type) == + false) { + pw_pr_error( + "ERROR: invalid collection type %d specified for immediate IO\n", + (int)local_descriptor->collection_type); + retVal = -PW_ERROR; + goto ret_immediate_io; + } + /* + * Check collection cmd for correctness here + */ + if (local_descriptor->collection_command < SW_IO_CMD_READ || + local_descriptor->collection_command > SW_IO_CMD_WRITE) { + pw_pr_error( + "ERROR: invalid collection command %d specified for immediate IO\n", + local_descriptor->collection_command); + retVal = -PW_ERROR; + goto ret_immediate_io; + } + /* + * Initialize the descriptor -- 'MMIO' and 'IPC' reads may need + * an "ioremap_nocache" + */ + if (sw_init_driver_io_descriptor(local_descriptor)) { + pw_pr_error( + "ERROR initializing immediate IO descriptor\n"); + retVal = -PW_ERROR; + goto ret_immediate_io; + } + /* + * OK, perform the actual IO. + */ + { + struct sw_immediate_request_info request_info = { + local_descriptor, dst_vals, &retVal + }; + struct cpumask cpumask; + + cpumask_clear(&cpumask); + switch (local_info->cpu_mask) { + case -1: /* IO on ANY CPU (assume current CPU) */ + cpumask_set_cpu(RAW_CPU(), &cpumask); + pw_pr_debug("ANY CPU\n"); + break; + default: /* IO on a particular CPU */ + cpumask_set_cpu(local_info->cpu_mask, &cpumask); + pw_pr_debug("[%d] setting for %d\n", RAW_CPU(), + local_info->cpu_mask); + break; + } + sw_schedule_work(&cpumask, + &sw_handle_immediate_request_i, + &request_info); + } + if (retVal != PW_SUCCESS) { + pw_pr_error( + "ERROR performing immediate IO on one (or more) CPUs!\n"); + goto ret_immediate_io_reset; + } + /* + * OK, all done. + */ + if (local_descriptor->collection_command == SW_IO_CMD_READ) { + if (copy_to_user(local_args.out_arg, dst_vals, + local_out_len)) { + pw_pr_error( + "ERROR copying %u bytes of value to userspace!\n", + local_out_len); + retVal = -PW_ERROR; + goto ret_immediate_io_reset; + } + pw_pr_debug( + "OK, copied %u bytes of value to userspace addr %p!\n", + local_out_len, local_args.out_arg); + } +ret_immediate_io_reset: + /* + * Reset the descriptor -- 'MMIO' and 'IPC' reads may have + * performed an "ioremap_nocache" which now needs to be + * unmapped. + */ + if (sw_reset_driver_io_descriptor(local_descriptor)) { + pw_pr_error( + "ERROR resetting immediate IO descriptor\n"); + retVal = -PW_ERROR; + goto ret_immediate_io; + } +ret_immediate_io: + vfree(src_vals); + if (dst_vals) { + vfree(dst_vals); + } + return retVal; + } else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_GET_SCU_FW_VERSION)) { + u32 local_data = (u32)sw_scu_fw_major_minor; + + if (put_user(local_data, (u32 __user *)local_args.out_arg)) { + pw_pr_error( + "ERROR copying scu fw version to userspace!\n"); + return -PW_ERROR; + } + return PW_SUCCESS; + } else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_GET_DRIVER_VERSION)) { + pw_u64_t local_version = + (pw_u64_t)SW_DRIVER_VERSION_MAJOR << 32 | + (pw_u64_t)SW_DRIVER_VERSION_MINOR << 16 | + (pw_u64_t)SW_DRIVER_VERSION_OTHER; + if (put_user(local_version, (u64 __user *)local_args.out_arg)) { + pw_pr_error( + "ERROR copying driver version to userspace!\n"); + return -PW_ERROR; + } + return PW_SUCCESS; + } else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_GET_AVAILABLE_TRACEPOINTS)) { + pw_pr_debug("DEBUG: AVAIL tracepoints! local_out_len = %u\n", + local_out_len); + return sw_get_available_name_id_mappings_i( + SW_NAME_TYPE_TRACEPOINT, + (struct sw_name_info_msg __user *)local_args.out_arg, + local_out_len); + } else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_GET_AVAILABLE_NOTIFIERS)) { + pw_pr_debug("DEBUG: AVAIL tracepoints! local_out_len = %u\n", + local_out_len); + return sw_get_available_name_id_mappings_i( + SW_NAME_TYPE_NOTIFIER, + (struct sw_name_info_msg __user *)local_args.out_arg, + local_out_len); + } else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_GET_AVAILABLE_COLLECTORS)) { + pw_pr_debug("DEBUG: AVAIL tracepoints! local_out_len = %u\n", + local_out_len); + return sw_get_available_name_id_mappings_i( + SW_NAME_TYPE_COLLECTOR, + (struct sw_name_info_msg __user *)local_args.out_arg, + local_out_len); + } else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_GET_TOPOLOGY_CHANGES)) { + pw_pr_debug("DEBUG: TOPOLOGY changes! local_out_len = %u\n", + local_out_len); + return sw_get_topology_changes_i( + (struct sw_driver_topology_msg __user *) + local_args.out_arg, + local_out_len); + } else { + pw_pr_error("ERROR: invalid ioctl num: %u\n", + _IOC_NR(ioctl_num)); + } + return -PW_ERROR; +} + +static enum sw_driver_collection_cmd sw_get_collection_cmd_i(void) +{ + return s_internal_state.cmd; +}; + +static bool sw_should_flush_buffer_i(void) +{ + return s_internal_state.drain_buffers; +}; + +int sw_load_driver_i(void) +{ + /* + * Set per-cpu buffer size. + * First, Perform sanity checking of per-cpu buffer size. + */ + /* + * 1. Num pages MUST be pow-of-2. + */ + { + if (sw_buffer_num_pages & (sw_buffer_num_pages - 1)) { + pw_pr_error( + "Invalid value (%u) for number of pages in each per-cpu buffer; MUST be a power of 2!\n", + sw_buffer_num_pages); + return -PW_ERROR; + } + } + /* + * 2. Num pages MUST be <= 16 (i.e. per-cpu buffer size + * MUST be <= 64 kB) + */ + { + if (sw_buffer_num_pages > 16) { + pw_pr_error( + "Invalid value (%u) for number of pages in each per-cpu buffer; MUST be <= 16!\n", + sw_buffer_num_pages); + return -PW_ERROR; + } + } + sw_buffer_alloc_size = sw_buffer_num_pages * PAGE_SIZE; + /* + * Retrieve any arch details here. + */ + if (sw_get_arch_details_i()) { + pw_pr_error("ERROR retrieving arch details!\n"); + return -PW_ERROR; + } + /* + * Check to see if the user wants us to force + * software coordination of CPU frequencies. + */ + if (do_force_module_scope_for_cpu_frequencies) { + pw_pr_force( + "DEBUG: FORCING MODULE SCOPE FOR CPU FREQUENCIES!\n"); + if (sw_set_module_scope_for_cpus()) { + pw_pr_force("ERROR setting affected cpus\n"); + return -PW_ERROR; + } else { + pw_pr_debug("OK, setting worked\n"); + } + } + if (sw_init_data_structures_i()) { + pw_pr_error("ERROR initializing data structures!\n"); + goto err_ret_init_data; + } + if (sw_register_dev(&s_ops)) { + goto err_ret_register_dev; + } + /* + * Retrieve a list of tracepoint structs to use when + * registering probe functions. + */ + { + if (sw_extract_tracepoints()) { + pw_pr_error( + "ERROR: could NOT retrieve a complete list of valid tracepoint structs!\n"); + goto err_ret_tracepoint; + } + } + pw_pr_force("-----------------------------------------\n"); + pw_pr_force("OK: LOADED SoC Watch Driver\n"); +#ifdef CONFIG_X86_WANT_INTEL_MID + pw_pr_force("SOC Identifier = %u, Stepping = %u\n", + intel_mid_identify_cpu(), intel_mid_soc_stepping()); +#endif /* CONFIG_X86_WANT_INTEL_MID */ + pw_pr_force("-----------------------------------------\n"); + return PW_SUCCESS; + +err_ret_tracepoint: + sw_unregister_dev(); +err_ret_register_dev: + sw_destroy_data_structures_i(); +err_ret_init_data: + if (do_force_module_scope_for_cpu_frequencies) { + if (sw_reset_module_scope_for_cpus()) { + pw_pr_force("ERROR resetting affected cpus\n"); + } else { + pw_pr_debug("OK, resetting worked\n"); + } + } + return -PW_ERROR; +} + +void sw_unload_driver_i(void) +{ + sw_iterate_driver_info_lists_i(); + + sw_unregister_dev(); + + sw_destroy_data_structures_i(); + + if (do_force_module_scope_for_cpu_frequencies) { + if (sw_reset_module_scope_for_cpus()) { + pw_pr_force("ERROR resetting affected cpus\n"); + } else { + pw_pr_debug("OK, resetting worked\n"); + } + } + + pw_pr_force("-----------------------------------------\n"); + pw_pr_force("OK: UNLOADED SoC Watch Driver\n"); + + sw_print_trace_notifier_overheads(); + sw_print_output_buffer_overheads(); + PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_collection_poll_i, "POLL"); + PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_any_seg_full, "ANY_SEG_FULL"); +#if DO_TRACK_MEMORY_USAGE + { + /* + * Dump memory stats. + */ + pw_pr_force( + "TOTAL # BYTES ALLOCED = %llu, CURR # BYTES ALLOCED = %llu, MAX # BYTES ALLOCED = %llu\n", + sw_get_total_bytes_alloced(), + sw_get_curr_bytes_alloced(), + sw_get_max_bytes_alloced()); + if (unlikely(sw_get_curr_bytes_alloced())) { + pw_pr_force( + "***********************************************************************\n"); + pw_pr_force( + "WARNING: possible memory leak: there are %llu bytes still allocated!\n", + sw_get_curr_bytes_alloced()); + pw_pr_force( + "***********************************************************************\n"); + } + } +#endif /* DO_TRACK_MEMORY_USAGE */ + pw_pr_force("-----------------------------------------\n"); +} + +module_init(sw_load_driver_i); +module_exit(sw_unload_driver_i); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR(MOD_AUTHOR); +MODULE_DESCRIPTION(MOD_DESC); diff --git a/drivers/platform/x86/socwatch/sw_file_ops.c b/drivers/platform/x86/socwatch/sw_file_ops.c new file mode 100644 index 0000000000000..ea84d252a4d30 --- /dev/null +++ b/drivers/platform/x86/socwatch/sw_file_ops.c @@ -0,0 +1,364 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +#include /* try_module_get */ +#include /* inode */ +#include /* class_create */ +#include /* cdev_alloc */ +#include /* LINUX_VERSION_CODE */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) +#include /* copy_to_user */ +#else +#include /* copy_to_user */ +#endif /* LINUX_VERSION_CODE */ +#include /* wait_event_interruptible */ +#include /* TASK_INTERRUPTIBLE */ + +#include "sw_kernel_defines.h" +#include "sw_types.h" +#include "sw_structs.h" +#include "sw_file_ops.h" +#include "sw_ioctl.h" +#include "sw_output_buffer.h" + +/* ------------------------------------------------- + * Compile time constants. + * ------------------------------------------------- + */ +/* + * Get current command. + */ +#define GET_CMD() ((*s_file_ops->get_current_cmd)()) +/* + * Check if we're currently collecting data. + */ +#define IS_COLLECTING() \ + ({ \ + sw_driver_collection_cmd_t __cmd = GET_CMD(); \ + bool __val = (__cmd == SW_DRIVER_START_COLLECTION || \ + __cmd == SW_DRIVER_RESUME_COLLECTION); \ + __val; \ + }) +/* + * Check if we're currently paused. + */ +#define IS_SLEEPING() \ + ({ \ + sw_driver_collection_cmd_t __cmd = GET_CMD(); \ + bool __val = __cmd == SW_DRIVER_PAUSE_COLLECTION; \ + __val; \ + }) +/* ------------------------------------------------- + * Typedefs + * ------------------------------------------------- + */ +typedef unsigned long sw_bits_t; + +/* ------------------------------------------------- + * Local function declarations. + * ------------------------------------------------- + */ +static int sw_device_open_i(struct inode *inode, struct file *file); +static int sw_device_release_i(struct inode *inode, struct file *file); +static ssize_t sw_device_read_i(struct file *file, char __user *buffer, + size_t length, loff_t *offset); +static long sw_device_unlocked_ioctl_i(struct file *filp, + unsigned int ioctl_num, + unsigned long ioctl_param); +#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) +static long sw_device_compat_ioctl_i(struct file *file, unsigned int ioctl_num, + unsigned long ioctl_param); +#endif + +/* + * File operations exported by the driver. + */ +static struct file_operations s_fops = { + .open = &sw_device_open_i, + .read = &sw_device_read_i, + .unlocked_ioctl = &sw_device_unlocked_ioctl_i, +#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) + .compat_ioctl = &sw_device_compat_ioctl_i, +#endif /* COMPAT && x64 */ + .release = &sw_device_release_i, +}; +/* + * Character device file MAJOR + * number -- we're now obtaining + * this dynamically. + */ +static int apwr_dev_major_num = -1; +/* + * Variables to create the character device file + */ +static dev_t apwr_dev; +static struct cdev *apwr_cdev; +static struct class *apwr_class; +/* + * Operations exported by the main driver. + */ +static struct sw_file_ops *s_file_ops; +/* + * Is the device open right now? Used to prevent + * concurent access into the same device. + */ +#define DEV_IS_OPEN 0 /* see if device is in use */ +static volatile sw_bits_t dev_status; + +/* + * File operations. + */ +/* + * Service an "open(...)" call from user-space. + */ +static int sw_device_open_i(struct inode *inode, struct file *file) +{ + /* + * We don't want to talk to two processes at the same time + */ + if (test_and_set_bit(DEV_IS_OPEN, &dev_status)) { + /* Device is busy */ + return -EBUSY; + } + + if (!try_module_get(THIS_MODULE)) { + /* No such device */ + return -ENODEV; + } + pw_pr_debug("OK, allowed client open!\n"); + return PW_SUCCESS; +} + +/* + * Service a "close(...)" call from user-space. + */ +static int sw_device_release_i(struct inode *inode, struct file *file) +{ + /* + * Did the client just try to zombie us? + */ + int retVal = PW_SUCCESS; + + if (IS_COLLECTING()) { + pw_pr_error( + "ERROR: Detected ongoing collection on a device release!\n"); + retVal = (*s_file_ops->stop_handler)(); + } + module_put(THIS_MODULE); + /* + * We're now ready for our next caller + */ + clear_bit(DEV_IS_OPEN, &dev_status); + return retVal; +} + +static ssize_t sw_device_read_i(struct file *file, char __user *user_buffer, + size_t length, loff_t *offset) +{ + size_t bytes_read = 0; + u32 val = 0; + + if (!user_buffer) { + pw_pr_error( + "ERROR: \"read\" called with an empty user_buffer?!\n"); + return -PW_ERROR; + } + do { + val = SW_ALL_WRITES_DONE_MASK; + if (wait_event_interruptible( + sw_reader_queue, + (sw_any_seg_full(&val, + (*s_file_ops->should_flush)()) || + (!IS_COLLECTING() && !IS_SLEEPING())))) { + pw_pr_error("wait_event_interruptible error\n"); + return -ERESTARTSYS; + } + pw_pr_debug(KERN_INFO "After wait: val = %u\n", val); + } while (val == SW_NO_DATA_AVAIL_MASK); + /* + * Are we done producing/consuming? + */ + if (val == SW_ALL_WRITES_DONE_MASK) { + return 0; /* "0" ==> EOF */ + } + /* + * Copy the buffer contents into userspace. + */ + bytes_read = sw_consume_data( + val, user_buffer, + length); /* 'read' returns # of bytes actually read */ + if (unlikely(bytes_read == 0)) { + /* Cannot be EOF since that has already been checked above */ + return -EIO; + } + return bytes_read; +} + +/* + * (1) Handle 32b IOCTLs in 32b kernel-space. + * (2) Handle 64b IOCTLs in 64b kernel-space. + */ +static long sw_device_unlocked_ioctl_i(struct file *filp, + unsigned int ioctl_num, + unsigned long ioctl_param) +{ + struct sw_driver_ioctl_arg __user *remote_args = + (struct sw_driver_ioctl_arg __user *)ioctl_param; + struct sw_driver_ioctl_arg local_args; + + if (copy_from_user(&local_args, remote_args, sizeof(local_args))) { + pw_pr_error("ERROR copying ioctl args from userspace\n"); + return -PW_ERROR; + } + return (*s_file_ops->ioctl_handler)(ioctl_num, &local_args); +}; + +#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) +#include +/* + * Helper struct for use in translating + * IOCTLs from 32b user programs in 64b + * kernels. + */ +#pragma pack(push, 1) +struct sw_driver_ioctl_arg32 { + pw_s32_t in_len; + pw_s32_t out_len; + compat_caddr_t in_arg; + compat_caddr_t out_arg; +}; +#pragma pack(pop) + +/* + * Handle 32b IOCTLs in 64b kernel-space. + */ +static long sw_device_compat_ioctl_i(struct file *file, unsigned int ioctl_num, + unsigned long ioctl_param) +{ + struct sw_driver_ioctl_arg32 __user *remote_args32 = + compat_ptr(ioctl_param); + struct sw_driver_ioctl_arg local_args; + u32 data; + + if (get_user(local_args.in_len, &remote_args32->in_len)) { + return -PW_ERROR; + } + if (get_user(local_args.out_len, &remote_args32->out_len)) { + return -PW_ERROR; + } + if (get_user(data, &remote_args32->in_arg)) { + return -PW_ERROR; + } + local_args.in_arg = (char *)(unsigned long)data; + if (get_user(data, &remote_args32->out_arg)) { + return -PW_ERROR; + } + local_args.out_arg = (char *)(unsigned long)data; + return (*s_file_ops->ioctl_handler)(ioctl_num, &local_args); +} +#endif + +/* + * Device creation, deletion operations. + */ +int sw_register_dev(struct sw_file_ops *ops) +{ + int ret; + /* + * Ensure we have valid handlers! + */ + if (!ops) { + pw_pr_error("NULL file ops?!\n"); + return -PW_ERROR; + } + + /* + * Create the character device + */ + ret = alloc_chrdev_region(&apwr_dev, 0, 1, PW_DEVICE_NAME); + apwr_dev_major_num = MAJOR(apwr_dev); + apwr_class = class_create(THIS_MODULE, "apwr"); + if (IS_ERR(apwr_class)) { + printk(KERN_ERR "Error registering apwr class\n"); + } + + device_create(apwr_class, NULL, apwr_dev, NULL, PW_DEVICE_NAME); + apwr_cdev = cdev_alloc(); + if (apwr_cdev == NULL) { + printk("Error allocating character device\n"); + return ret; + } + apwr_cdev->owner = THIS_MODULE; + apwr_cdev->ops = &s_fops; + if (cdev_add(apwr_cdev, apwr_dev, 1) < 0) { + printk("Error registering device driver\n"); + return ret; + } + s_file_ops = ops; + + return ret; +} + +void sw_unregister_dev(void) +{ + /* + * Remove the device + */ + unregister_chrdev(apwr_dev_major_num, PW_DEVICE_NAME); + device_destroy(apwr_class, apwr_dev); + class_destroy(apwr_class); + unregister_chrdev_region(apwr_dev, 1); + cdev_del(apwr_cdev); +} diff --git a/drivers/platform/x86/socwatch/sw_hardware_io.c b/drivers/platform/x86/socwatch/sw_hardware_io.c new file mode 100644 index 0000000000000..759288ac546ee --- /dev/null +++ b/drivers/platform/x86/socwatch/sw_hardware_io.c @@ -0,0 +1,188 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#include "sw_types.h" +#include "sw_kernel_defines.h" +#include "sw_ops_provider.h" +#include "sw_mem.h" +#include "sw_internal.h" +#include "sw_hardware_io.h" + +struct sw_ops_node { + const struct sw_hw_ops *op; + int id; + + SW_LIST_ENTRY(list, sw_ops_node); +}; + +static SW_DEFINE_LIST_HEAD(s_ops, +sw_in sw_ops_node) = SW_LIST_HEAD_INITIALIZER(s_ops); + +static int s_op_idx = -1; + +/* + * Function definitions. + */ +int sw_get_hw_op_id(const struct sw_hw_ops *ops) +{ + if (ops && ops->name) { + struct sw_ops_node *node = NULL; + + SW_LIST_FOR_EACH_ENTRY(node, &s_ops, list) + { + if (node->op->name && + !strcmp(node->op->name, ops->name)) { + return node->id; + } + } + } + return -1; +} + +const struct sw_hw_ops *sw_get_hw_ops_for(int id) +{ + struct sw_ops_node *node = NULL; + + SW_LIST_FOR_EACH_ENTRY(node, &s_ops, list) + { + if (node->id == id) { + return node->op; + } + } + return NULL; +} + +bool sw_is_valid_hw_op_id(int id) +{ + struct sw_ops_node *node = NULL; + + SW_LIST_FOR_EACH_ENTRY(node, &s_ops, list) + { + if (node->id == id) { + return true; + } + } + return false; +} + +const char *sw_get_hw_op_abstract_name(const struct sw_hw_ops *op) +{ + if (op) { + return op->name; + } + return NULL; +} + +int sw_for_each_hw_op(int (*func)(const struct sw_hw_ops *op, void *priv), + void *priv, bool return_on_error) { + int retval = PW_SUCCESS; + struct sw_ops_node *node = NULL; + + if (func) { + SW_LIST_FOR_EACH_ENTRY(node, &s_ops, list) + { + if ((*func)(node->op, priv)) { + retval = -EIO; + if (return_on_error) { + break; + } + } + } + } + return retval; +} + +int sw_register_hw_op(const struct sw_hw_ops *op) +{ + struct sw_ops_node *node = NULL; + + if (!op) { + pw_pr_error("NULL input node in \"sw_register_hw_op\""); + return -EIO; + } + node = sw_kmalloc(sizeof(struct sw_ops_node), GFP_KERNEL); + if (!node) { + pw_pr_error("sw_kmalloc error in \"sw_register_hw_op\""); + return -ENOMEM; + } + node->op = op; + node->id = ++s_op_idx; + SW_LIST_ENTRY_INIT(node, list); + SW_LIST_ADD(&s_ops, node, list); + return PW_SUCCESS; +} + +int sw_register_hw_ops(void) +{ + return sw_register_ops_providers(); +} + +void sw_free_hw_ops(void) +{ + /* + * Free all nodes. + */ + while (!SW_LIST_EMPTY(&s_ops)) { + struct sw_ops_node *node = + SW_LIST_GET_HEAD_ENTRY(&s_ops, sw_ops_node, list); + SW_LIST_UNLINK(node, list); + sw_kfree(node); + } + /* + * Call our providers to deallocate resources. + */ + sw_free_ops_providers(); +} diff --git a/drivers/platform/x86/socwatch/sw_internal.c b/drivers/platform/x86/socwatch/sw_internal.c new file mode 100644 index 0000000000000..04544b8fecb34 --- /dev/null +++ b/drivers/platform/x86/socwatch/sw_internal.c @@ -0,0 +1,238 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +#include "sw_hardware_io.h" +#include "sw_mem.h" +#include "sw_kernel_defines.h" +#include "sw_internal.h" + +bool sw_check_output_buffer_params(void __user *buffer, size_t bytes_to_read, + size_t buff_size) +{ + if (!buffer) { + pw_pr_error("ERROR: NULL ptr in sw_consume_data!\n"); + return false; + } + if (bytes_to_read != buff_size) { + pw_pr_error("Error: bytes_to_read = %zu, required to be %zu\n", + bytes_to_read, buff_size); + return false; + } + return true; +} + +unsigned long sw_copy_to_user(char __user *dst, char *src, size_t bytes_to_copy) +{ + return copy_to_user(dst, src, bytes_to_copy); +} + +void sw_schedule_work(const struct cpumask *mask, void (*work)(void *), + void *data) +{ + /* + * Did the user ask us to run on 'ANY' CPU? + */ + if (cpumask_empty(mask)) { + (*work)(data); /* Call on current CPU */ + } else { + preempt_disable(); + { + /* + * Did the user ask to run on this CPU? + */ + if (cpumask_test_cpu(RAW_CPU(), mask)) { + (*work)(data); /* Call on current CPU */ + } + /* + * OK, now check other CPUs. + */ + smp_call_function_many( + mask, work, data, + true /* Wait for all funcs to complete */); + } + preempt_enable(); + } +} + +int sw_get_cpu(unsigned long *flags) +{ + local_irq_save(*flags); + return get_cpu(); +} + +void sw_put_cpu(unsigned long flags) +{ + put_cpu(); + local_irq_restore(flags); +} + +#ifndef CONFIG_NR_CPUS_PER_MODULE +#define CONFIG_NR_CPUS_PER_MODULE 2 +#endif /* CONFIG_NR_CPUS_PER_MODULE */ + +static void sw_get_cpu_sibling_mask(int cpu, struct cpumask *sibling_mask) +{ + unsigned int base = + (cpu / CONFIG_NR_CPUS_PER_MODULE) * CONFIG_NR_CPUS_PER_MODULE; + unsigned int i; + + cpumask_clear(sibling_mask); + for (i = base; i < (base + CONFIG_NR_CPUS_PER_MODULE); ++i) { + cpumask_set_cpu(i, sibling_mask); + } +} + +struct pw_cpufreq_node { + int cpu; + struct cpumask cpus, related_cpus; + unsigned int shared_type; + struct list_head list; +}; +static struct list_head pw_cpufreq_policy_lists; + +int sw_set_module_scope_for_cpus(void) +{ + /* + * Warning: no support for cpu hotplugging! + */ + int cpu = 0; + + INIT_LIST_HEAD(&pw_cpufreq_policy_lists); + + for_each_online_cpu(cpu) { + struct cpumask sibling_mask; + struct pw_cpufreq_node *node = NULL; + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + + if (!policy) { + continue; + } + /* + * Get siblings for this cpu. + */ + sw_get_cpu_sibling_mask(cpu, &sibling_mask); + /* + * Check if affected_cpus already contains sibling_mask + */ + if (cpumask_subset(&sibling_mask, policy->cpus)) { + /* + * 'sibling_mask' is already a subset of + * affected_cpus -- nothing to do on this CPU. + */ + cpufreq_cpu_put(policy); + continue; + } + + node = sw_kmalloc(sizeof(*node), GFP_ATOMIC); + if (node) { + cpumask_clear(&node->cpus); + cpumask_clear(&node->related_cpus); + + node->cpu = cpu; + cpumask_copy(&node->cpus, policy->cpus); + cpumask_copy(&node->related_cpus, policy->related_cpus); + node->shared_type = policy->shared_type; + } + + policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; + /* + * Set siblings. Don't worry about online/offline, that's + * handled below. + */ + cpumask_copy(policy->cpus, &sibling_mask); + /* + * Ensure 'related_cpus' is a superset of 'cpus' + */ + cpumask_or(policy->related_cpus, policy->related_cpus, + policy->cpus); + /* + * Ensure 'cpus' only contains online cpus. + */ + cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); + + cpufreq_cpu_put(policy); + + if (node) { + INIT_LIST_HEAD(&node->list); + list_add_tail(&node->list, &pw_cpufreq_policy_lists); + } + } + return PW_SUCCESS; +} + +int sw_reset_module_scope_for_cpus(void) +{ + struct list_head *head = &pw_cpufreq_policy_lists; + + while (!list_empty(head)) { + struct pw_cpufreq_node *node = + list_first_entry(head, struct pw_cpufreq_node, list); + int cpu = node->cpu; + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + if (!policy) { + continue; + } + policy->shared_type = node->shared_type; + cpumask_copy(policy->related_cpus, &node->related_cpus); + cpumask_copy(policy->cpus, &node->cpus); + + cpufreq_cpu_put(policy); + + pw_pr_debug("OK, reset cpufreq_policy for cpu %d\n", cpu); + list_del(&node->list); + sw_kfree(node); + } + return PW_SUCCESS; +} diff --git a/drivers/platform/x86/socwatch/sw_mem.c b/drivers/platform/x86/socwatch/sw_mem.c new file mode 100644 index 0000000000000..ac7725387c789 --- /dev/null +++ b/drivers/platform/x86/socwatch/sw_mem.c @@ -0,0 +1,331 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +#include + +#include "sw_kernel_defines.h" +#include "sw_lock_defs.h" +#include "sw_mem.h" + +/* + * How do we behave if we ever + * get an allocation error? + * (a) Setting to '1' REFUSES ANY FURTHER + * allocation requests. + * (b) Setting to '0' treats each + * allocation request as separate, and + * handles them on an on-demand basis + */ +#define DO_MEM_PANIC_ON_ALLOC_ERROR 0 + +#if DO_MEM_PANIC_ON_ALLOC_ERROR +/* + * If we ever run into memory allocation errors then + * stop (and drop) everything. + */ +static atomic_t pw_mem_should_panic = ATOMIC_INIT(0); +/* + * Macro to check if PANIC is on. + */ +#define MEM_PANIC() \ + do { \ + atomic_set(&pw_mem_should_panic, 1); \ + smp_mb(); \ + } while (0) +#define SHOULD_TRACE() \ + ({ \ + bool __tmp = false; \ + smp_mb(); \ + __tmp = (atomic_read(&pw_mem_should_panic) == 0); \ + __tmp; \ + }) + +#else /* if !DO_MEM_PANIC_ON_ALLOC_ERROR */ + +#define MEM_PANIC() +#define SHOULD_TRACE() (true) + +#endif + +/* + * Variables to track memory usage. + */ +/* + * TOTAL num bytes allocated. + */ +static u64 total_num_bytes_alloced; +/* + * Num of allocated bytes that have + * not yet been freed. + */ +static u64 curr_num_bytes_alloced; +/* + * Max # of allocated bytes that + * have not been freed at any point + * in time. + */ +static u64 max_num_bytes_alloced; + +u64 sw_get_total_bytes_alloced(void) +{ + return total_num_bytes_alloced; +}; + +u64 sw_get_max_bytes_alloced(void) +{ + return max_num_bytes_alloced; +}; + +u64 sw_get_curr_bytes_alloced(void) +{ + return curr_num_bytes_alloced; +}; + +/* + * Allocate free pages. + * TODO: add memory tracker? + */ +unsigned long sw_allocate_pages(gfp_t flags, + unsigned int alloc_size_in_bytes) +{ + return __get_free_pages(flags, get_order(alloc_size_in_bytes)); +} +/* + * Free up previously allocated pages. + * TODO: add memory tracker? + */ +void sw_release_pages(unsigned long addr, unsigned int alloc_size_in_bytes) +{ + free_pages(addr, get_order(alloc_size_in_bytes)); +} + +#if DO_TRACK_MEMORY_USAGE + +/* + * Lock to guard access to memory + * debugging stats. + */ +static SW_DEFINE_SPINLOCK(sw_kmalloc_lock); + +/* + * Helper macros to print out + * mem debugging stats. + */ +#define TOTAL_NUM_BYTES_ALLOCED() total_num_bytes_alloced +#define CURR_NUM_BYTES_ALLOCED() curr_num_bytes_alloced +#define MAX_NUM_BYTES_ALLOCED() max_num_bytes_alloced + +/* + * MAGIC number based memory tracker. Relies on + * storing (a) a MAGIC marker and (b) the requested + * size WITHIN the allocated block of memory. Standard + * malloc-tracking stuff, really. + * + * Overview: + * (1) ALLOCATION: + * When asked to allocate a block of 'X' bytes, allocate + * 'X' + 8 bytes. Then, in the FIRST 4 bytes, write the + * requested size. In the NEXT 4 bytes, write a special + * (i.e. MAGIC) number to let our deallocator know that + * this block of memory was allocated using this technique. + * Also, keep track of the number of bytes allocated. + * + * (2) DEALLOCATION: + * When given an object to deallocate, we first check + * the MAGIC number by decrementing the pointer by + * 4 bytes and reading the (integer) stored there. + * After ensuring the pointer was, in fact, allocated + * by us, we then read the size of the allocated + * block (again, by decrementing the pointer by 4 + * bytes and reading the integer size). We + * use this size argument to decrement # of bytes + * allocated. + */ +#define PW_MEM_MAGIC 0xdeadbeef + +#define PW_ADD_MAGIC(x) \ + ({ \ + char *__tmp1 = (char *)(x); \ + *((int *)__tmp1) = PW_MEM_MAGIC; \ + __tmp1 += sizeof(int); \ + __tmp1; \ + }) +#define PW_ADD_SIZE(x, s) \ + ({ \ + char *__tmp1 = (char *)(x); \ + *((int *)__tmp1) = (s); \ + __tmp1 += sizeof(int); \ + __tmp1; \ + }) +#define PW_ADD_STAMP(x, s) PW_ADD_MAGIC(PW_ADD_SIZE((x), (s))) + +#define PW_IS_MAGIC(x) \ + ({ \ + int *__tmp1 = (int *)((char *)(x) - sizeof(int)); \ + *__tmp1 == PW_MEM_MAGIC; \ + }) +#define PW_REMOVE_STAMP(x) \ + ({ \ + char *__tmp1 = (char *)(x); \ + __tmp1 -= sizeof(int) * 2; \ + __tmp1; \ + }) +#define PW_GET_SIZE(x) (*((int *)(x))) + +void *sw_kmalloc(size_t size, gfp_t flags) +{ + size_t act_size = 0; + void *retVal = NULL; + /* + * No point in allocating if + * we were unable to allocate + * previously! + */ + { + if (!SHOULD_TRACE()) { + return NULL; + } + } + /* + * (1) Allocate requested block. + */ + act_size = size + sizeof(int) * 2; + retVal = kmalloc(act_size, flags); + if (!retVal) { + /* + * Panic if we couldn't allocate + * requested memory. + */ + printk(KERN_INFO "ERROR: could NOT allocate memory!\n"); + MEM_PANIC(); + return NULL; + } + /* + * (2) Update memory usage stats. + */ + LOCK(sw_kmalloc_lock); + { + total_num_bytes_alloced += size; + curr_num_bytes_alloced += size; + if (curr_num_bytes_alloced > max_num_bytes_alloced) + max_num_bytes_alloced = curr_num_bytes_alloced; + } + UNLOCK(sw_kmalloc_lock); + /* + * (3) And finally, add the 'size' + * and 'magic' stamps. + */ + return PW_ADD_STAMP(retVal, size); +}; + +void sw_kfree(const void *obj) +{ + void *tmp = NULL; + size_t size = 0; + + /* + * (1) Check if this block was allocated + * by us. + */ + if (!PW_IS_MAGIC(obj)) { + printk(KERN_INFO "ERROR: %p is NOT a PW_MAGIC ptr!\n", obj); + return; + } + /* + * (2) Strip the magic num... + */ + tmp = PW_REMOVE_STAMP(obj); + /* + * ...and retrieve size of block. + */ + size = PW_GET_SIZE(tmp); + /* + * (3) Update memory usage stats. + */ + LOCK(sw_kmalloc_lock); + { + curr_num_bytes_alloced -= size; + } + UNLOCK(sw_kmalloc_lock); + /* + * And finally, free the block. + */ + kfree(tmp); +}; + +#else /* !DO_TRACK_MEMORY_USAGE */ + +void *sw_kmalloc(size_t size, gfp_t flags) +{ + void *ret = NULL; + + if (SHOULD_TRACE()) { + if (!(ret = kmalloc(size, flags))) { + /* + * Panic if we couldn't allocate + * requested memory. + */ + MEM_PANIC(); + } + } + return ret; +}; + +void sw_kfree(const void *mem) +{ + kfree(mem); +}; + +#endif /* DO_TRACK_MEMORY_USAGE */ diff --git a/drivers/platform/x86/socwatch/sw_ops_provider.c b/drivers/platform/x86/socwatch/sw_ops_provider.c new file mode 100644 index 0000000000000..6e0c772046571 --- /dev/null +++ b/drivers/platform/x86/socwatch/sw_ops_provider.c @@ -0,0 +1,1227 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +#include +#include +#include /* "pci_get_domain_bus_and_slot" */ +#include /* "udelay" */ +#include +#ifdef CONFIG_RPMSG_IPC +#include +#endif /* CONFIG_RPMSG_IPC */ + +#include "sw_types.h" +#include "sw_kernel_defines.h" +#include "sw_hardware_io.h" +#include "sw_telem.h" +#include "sw_ops_provider.h" + +/* + * Compile time constants. + */ +/* + * Should we be doing 'direct' PCI reads and writes? + * '1' ==> YES, call "pci_{read,write}_config_dword()" directly + * '0' ==> NO, Use the "intel_mid_msgbus_{read32,write32}_raw()" + * API (defined in 'intel_mid_pcihelpers.c') + */ +#define DO_DIRECT_PCI_READ_WRITE 0 +#if !IS_ENABLED(CONFIG_ANDROID) || !defined(CONFIG_X86_WANT_INTEL_MID) +/* + * 'intel_mid_pcihelpers.h' is probably not present -- force + * direct PCI calls in this case. + */ +#undef DO_DIRECT_PCI_READ_WRITE +#define DO_DIRECT_PCI_READ_WRITE 1 +#endif +#if !DO_DIRECT_PCI_READ_WRITE +#include +#endif + +#define SW_PCI_MSG_CTRL_REG 0x000000D0 +#define SW_PCI_MSG_DATA_REG 0x000000D4 + +/* + * NUM_RETRY & USEC_DELAY are used in PCH Mailbox (sw_read_pch_mailbox_info_i). + * Tested on KBL + SPT-LP. May need to revisit. + */ +#define NUM_RETRY 100 +#define USEC_DELAY 100 + +#define EXTCNF_CTRL 0xF00 /* offset for hw semaphore. */ +#define FWSM_CTRL 0x5B54 /* offset for fw semaphore */ +#define GBE_CTRL_OFFSET 0x34 /* GBE LPM offset */ + +#define IS_HW_SEMAPHORE_SET(data) (data & (pw_u64_t)(0x1 << 6)) +#define IS_FW_SEMAPHORE_SET(data) (data & (pw_u64_t)0x1) +/* + * Number of retries for mailbox configuration + */ +#define MAX_MAILBOX_ITERS 100 + +/* + * Local data structures. + */ +/* + * TODO: separate into H/W and S/W IO? + */ +typedef enum sw_io_type { + SW_IO_MSR = 0, + SW_IO_IPC = 1, + SW_IO_MMIO = 2, + SW_IO_PCI = 3, + SW_IO_CONFIGDB = 4, + SW_IO_TRACE_ARGS = 5, + SW_IO_WAKEUP = 6, + SW_IO_SOCPERF = 7, + SW_IO_PROC_NAME = 8, + SW_IO_IRQ_NAME = 9, + SW_IO_WAKELOCK = 10, + SW_IO_TELEM = 11, + SW_IO_PCH_MAILBOX = 12, + SW_IO_MAILBOX = 13, + SW_IO_MAX = 14, +} sw_io_type_t; + +/* + * "io_remapped" values for HW and FW semaphores + */ +static struct { + volatile void __iomem *hw_semaphore; + volatile void __iomem *fw_semaphore; +} s_gbe_semaphore = { NULL, NULL }; + +/* + * Function declarations. + */ +/* + * Exported by the SOCPERF driver. + */ +extern void SOCPERF_Read_Data2(void *data_buffer); + +/* + * Init functions. + */ +int sw_ipc_mmio_descriptor_init_func_i( + struct sw_driver_io_descriptor *descriptor); +int sw_pch_mailbox_descriptor_init_func_i( + struct sw_driver_io_descriptor *descriptor); +int sw_mailbox_descriptor_init_func_i( + struct sw_driver_io_descriptor *descriptor); + +/* + * Read functions. + */ +void sw_read_msr_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptor, + u16 counter_size_in_bytes); +void sw_read_ipc_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptor, + u16 counter_size_in_bytes); +void sw_read_mmio_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptor, + u16 counter_size_in_bytes); +void sw_read_pch_mailbox_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptor, + u16 counter_size_in_bytes); +void sw_read_mailbox_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptor, + u16 counter_size_in_bytes); +void sw_read_pci_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptor, + u16 counter_size_in_bytes); +void sw_read_configdb_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptor, + u16 counter_size_in_bytes); +void sw_read_socperf_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptor, + u16 counter_size_in_bytes); + +/* + * Write functions. + */ +void sw_write_msr_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptor, + u16 counter_size_in_bytes); +void sw_write_ipc_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptor, + u16 counter_size_in_bytes); +void sw_write_mmio_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptor, + u16 counter_size_in_bytes); +void sw_write_mailbox_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptor, + u16 counter_size_in_bytes); +void sw_write_pci_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptor, + u16 counter_size_in_bytes); +void sw_write_configdb_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptor, + u16 counter_size_in_bytes); +void sw_write_trace_args_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptor, + u16 counter_size_in_bytes); +void sw_write_wakeup_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptor, + u16 counter_size_in_bytes); +void sw_write_socperf_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptor, + u16 counter_size_in_bytes); + +/* + * Print functions. + */ +int sw_print_msr_io_descriptor(const struct sw_driver_io_descriptor + *descriptor); + +/* + * Reset functions -- equal but opposite of init. + */ +int sw_ipc_mmio_descriptor_reset_func_i( + const struct sw_driver_io_descriptor *descriptor); +int sw_pch_mailbox_descriptor_reset_func_i( + const struct sw_driver_io_descriptor *descriptor); +int sw_mailbox_descriptor_reset_func_i( + const struct sw_driver_io_descriptor *descriptor); + +/* + * Available functions. + */ +bool sw_socperf_available_i(void); + +/* + * Helper functions. + */ +u32 sw_platform_configdb_read32(u32 address); +u32 sw_platform_pci_read32(u32 bus, u32 device, u32 function, u32 ctrl_offset, + u32 ctrl_value, u32 data_offset); +u64 sw_platform_pci_read64(u32 bus, u32 device, u32 function, u32 ctrl_offset, + u32 ctrl_value, u32 data_offset); +bool sw_platform_pci_write32(u32 bus, u32 device, u32 function, + u32 write_offset, u32 data_value); + +/* + * Table of collector operations. + */ +static const struct sw_hw_ops s_hw_ops[] = { + [SW_IO_MSR] = { .name = "MSR", + .init = NULL, + .read = &sw_read_msr_info_i, + .write = &sw_write_msr_info_i, + .print = &sw_print_msr_io_descriptor, + .reset = NULL, + .available = NULL }, + [SW_IO_IPC] = { + .name = "IPC", + .init = &sw_ipc_mmio_descriptor_init_func_i, + .read = &sw_read_ipc_info_i, + .reset = &sw_ipc_mmio_descriptor_reset_func_i, + /* Other fields are don't care (will be set to NULL) */ + }, + [SW_IO_MMIO] = { + .name = "MMIO", + .init = &sw_ipc_mmio_descriptor_init_func_i, + .read = &sw_read_mmio_info_i, + .write = &sw_write_mmio_info_i, + .reset = &sw_ipc_mmio_descriptor_reset_func_i, + /* Other fields are don't care (will be set to NULL) */ + }, + [SW_IO_PCI] = { + .name = "PCI", + .read = &sw_read_pci_info_i, + .write = &sw_write_pci_info_i, + /* Other fields are don't care (will be set to NULL) */ + }, + [SW_IO_CONFIGDB] = { + .name = "CONFIGDB", + .read = &sw_read_configdb_info_i, + /* Other fields are don't care (will be set to NULL) */ + }, + [SW_IO_WAKEUP] = { + .name = "WAKEUP", + /* Other fields are don't care (will be set to NULL) */ + }, + [SW_IO_SOCPERF] = { + .name = "SOCPERF", + .read = &sw_read_socperf_info_i, + .available = &sw_socperf_available_i, + /* Other fields are don't care (will be set to NULL) */ + }, + [SW_IO_PROC_NAME] = { + .name = "PROC-NAME", + /* Other fields are don't care (will be set to NULL) */ + }, + [SW_IO_IRQ_NAME] = { + .name = "IRQ-NAME", + /* Other fields are don't care (will be set to NULL) */ + }, + [SW_IO_WAKELOCK] = { + .name = "WAKELOCK", + /* Other fields are don't care (will be set to NULL) */ + }, + [SW_IO_TELEM] = { + .name = "TELEM", + .init = &sw_telem_init_func, + .read = &sw_read_telem_info, + .reset = &sw_reset_telem, + .available = &sw_telem_available, + .post_config = &sw_telem_post_config, + /* Other fields are don't care (will be set to NULL) */ + }, + [SW_IO_PCH_MAILBOX] = { + .name = "PCH-MAILBOX", + .init = &sw_pch_mailbox_descriptor_init_func_i, + .read = &sw_read_pch_mailbox_info_i, + .reset = &sw_pch_mailbox_descriptor_reset_func_i, + /* Other fields are don't care (will be set to NULL) */ + }, + [SW_IO_MAILBOX] = { + .name = "MAILBOX", + .init = &sw_mailbox_descriptor_init_func_i, + .read = &sw_read_mailbox_info_i, + .write = &sw_write_mailbox_info_i, + .reset = &sw_mailbox_descriptor_reset_func_i, + /* Other fields are don't care (will be set to NULL) */ + }, + [SW_IO_MAX] = { + .name = NULL, + /* Other fields are don't care (will be set to NULL) */ + } +}; + +/* + * Function definitions. + */ +int sw_ipc_mmio_descriptor_init_func_i( + struct sw_driver_io_descriptor *descriptor) +{ + /* Perform any required 'io_remap' calls here */ + struct sw_driver_ipc_mmio_io_descriptor *__ipc_mmio = NULL; + u64 data_address = 0; + + if (!descriptor) { /* Should NEVER happen */ + return -PW_ERROR; + } + if (descriptor->collection_type == SW_IO_IPC) { + __ipc_mmio = &descriptor->ipc_descriptor; + } else { + __ipc_mmio = &descriptor->mmio_descriptor; + } + pw_pr_debug("cmd = %u, sub-cmd = %u, data_addr = 0x%llx\n", + __ipc_mmio->command, __ipc_mmio->sub_command, + __ipc_mmio->data_address); + data_address = __ipc_mmio->data_address; + /* + * if (__ipc_mmio->command || __ipc_mmio->sub_command) { + * __ipc_mmio->ipc_command = + * ((pw_u32_t)__ipc_mmio->sub_command << 12) + * | (pw_u32_t)__ipc_mmio->command; + * } + */ + if (data_address) { + __ipc_mmio->data_remapped_address = + (pw_u64_t)(unsigned long)ioremap_nocache( + (unsigned long)data_address, + descriptor->counter_size_in_bytes); + if ((void *)(unsigned long)__ipc_mmio->data_remapped_address == + NULL) { + return -EIO; + } + pw_pr_debug("mapped addr 0x%llx\n", + __ipc_mmio->data_remapped_address); + if (__ipc_mmio->is_gbe) { + if (!s_gbe_semaphore.hw_semaphore || + !s_gbe_semaphore.fw_semaphore) { + pw_pr_debug("Initializing GBE semaphore\n"); + if (data_address >= GBE_CTRL_OFFSET) { + u64 hw_addr = (data_address - + GBE_CTRL_OFFSET) + + EXTCNF_CTRL; + u64 fw_addr = (data_address - + GBE_CTRL_OFFSET) + + FWSM_CTRL; + s_gbe_semaphore.hw_semaphore = + ioremap_nocache( + (unsigned long)hw_addr, + descriptor + ->counter_size_in_bytes); + s_gbe_semaphore.fw_semaphore = + ioremap_nocache( + (unsigned long)fw_addr, + descriptor + ->counter_size_in_bytes); + if (s_gbe_semaphore.hw_semaphore == + NULL || + s_gbe_semaphore.fw_semaphore == + NULL) { + pw_pr_error( + "couldn't mmap hw/fw semaphores for GBE MMIO op!\n"); + return -EIO; + } + pw_pr_debug( + "GBE has hw_sem = 0x%llx, fw_sem = 0x%llx, size = %u\n", + (unsigned long long) + s_gbe_semaphore + .hw_semaphore, + (unsigned long long) + s_gbe_semaphore + .fw_semaphore, + descriptor + ->counter_size_in_bytes); + } + } + } + } + return PW_SUCCESS; +} + +int sw_pch_mailbox_descriptor_init_func_i( + struct sw_driver_io_descriptor *descriptor) +{ + /* Perform any required 'io_remap' calls here */ + struct sw_driver_pch_mailbox_io_descriptor *__pch_mailbox = NULL; + + if (!descriptor) { /* Should NEVER happen */ + return -PW_ERROR; + } + __pch_mailbox = &descriptor->pch_mailbox_descriptor; + pw_pr_debug("pch_mailbox data_addr = 0x%llx\n", + (unsigned long long)__pch_mailbox->data_address); + if (__pch_mailbox->mtpmc_address) { + __pch_mailbox->mtpmc_remapped_address = + (pw_u64_t)(unsigned long)ioremap_nocache( + (unsigned long)__pch_mailbox->mtpmc_address, + descriptor->counter_size_in_bytes); + if ((void *)(unsigned long) + __pch_mailbox->mtpmc_remapped_address == NULL) { + return -PW_ERROR; + } + pw_pr_debug("mtpmc_mapped addr 0x%llx\n", + __pch_mailbox->mtpmc_remapped_address); + } + if (__pch_mailbox->msg_full_sts_address) { + __pch_mailbox->msg_full_sts_remapped_address = + (pw_u64_t)(unsigned long)ioremap_nocache( + (unsigned long) + __pch_mailbox->msg_full_sts_address, + descriptor->counter_size_in_bytes); + if ((void *)(unsigned long)__pch_mailbox + ->msg_full_sts_remapped_address == NULL) { + return -PW_ERROR; + } + pw_pr_debug("msg_full_sts_mapped addr 0x%llx\n", + __pch_mailbox->msg_full_sts_address); + } + if (__pch_mailbox->mfpmc_address) { + __pch_mailbox->mfpmc_remapped_address = + (pw_u64_t)(unsigned long)ioremap_nocache( + (unsigned long)__pch_mailbox->mfpmc_address, + descriptor->counter_size_in_bytes); + if ((void *)(unsigned long) + __pch_mailbox->mfpmc_remapped_address == NULL) { + return -PW_ERROR; + } + pw_pr_debug("mfpmc_mapped addr 0x%llx\n", + __pch_mailbox->mfpmc_remapped_address); + } + return PW_SUCCESS; +} + +int sw_mailbox_descriptor_init_func_i(struct sw_driver_io_descriptor + *descriptor) +{ + /* Perform any required 'io_remap' calls here */ + struct sw_driver_mailbox_io_descriptor *__mailbox = NULL; + + if (!descriptor) { /* Should NEVER happen */ + return -PW_ERROR; + } + __mailbox = &descriptor->mailbox_descriptor; + + pw_pr_debug( + "type = %u, interface_address = 0x%llx, data_address = 0x%llx\n", + __mailbox->is_msr_type, __mailbox->interface_address, + __mailbox->data_address); + + if (!__mailbox->is_msr_type) { + if (__mailbox->interface_address) { + __mailbox->interface_remapped_address = + (pw_u64_t)(unsigned long)ioremap_nocache( + (unsigned long) + __mailbox->interface_address, + descriptor->counter_size_in_bytes); + if ((void *)(unsigned long)__mailbox + ->interface_remapped_address == NULL) { + pw_pr_error( + "Couldn't iomap interface_address = 0x%llx\n", + __mailbox->interface_address); + return -PW_ERROR; + } + } + if (__mailbox->data_address) { + __mailbox->data_remapped_address = + (pw_u64_t)(unsigned long)ioremap_nocache( + (unsigned long)__mailbox->data_address, + descriptor->counter_size_in_bytes); + if ((void *)(unsigned long) + __mailbox->data_remapped_address == NULL) { + pw_pr_error( + "Couldn't iomap data_address = 0x%llx\n", + __mailbox->data_address); + return -PW_ERROR; + } + } + pw_pr_debug("OK, mapped addr 0x%llx, 0x%llx\n", + __mailbox->interface_remapped_address, + __mailbox->data_remapped_address); + } + return PW_SUCCESS; +} + +void sw_read_msr_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptors, + u16 counter_size_in_bytes) +{ + u64 address = descriptors->msr_descriptor.address; + u32 l = 0, h = 0; + + if (likely(cpu == RAW_CPU())) { + if (rdmsr_safe((unsigned long)address, &l, &h)) { + pw_pr_warn("Failed to read MSR address = 0x%llx\n", address); + } + } else { + if (rdmsr_safe_on_cpu(cpu, (unsigned long)address, &l, &h)) { + pw_pr_warn("Failed to read MSR address = 0x%llx\n", address); + } + } + switch (counter_size_in_bytes) { + case 4: + *((u32 *)dst_vals) = l; + break; + case 8: + *((u64 *)dst_vals) = ((u64)h << 32) | l; + break; + default: + break; + } + return; +} + +#ifdef CONFIG_RPMSG_IPC +#define SW_DO_IPC(cmd, sub_cmd) rpmsg_send_generic_simple_command(cmd, sub_cmd) +#else +#define SW_DO_IPC(cmd, sub_cmd) (-ENODEV) +#endif /* CONFIG_RPMSG_IPC */ + +void sw_read_ipc_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptors, + u16 counter_size_in_bytes) +{ + u16 cmd = descriptors->ipc_descriptor.command, + sub_cmd = descriptors->ipc_descriptor.sub_command; + unsigned long remapped_address = + (unsigned long)descriptors->ipc_descriptor.data_remapped_address; + + if (cmd || sub_cmd) { + pw_pr_debug("EXECUTING IPC Cmd = %u, %u\n", cmd, sub_cmd); + if (SW_DO_IPC(cmd, sub_cmd)) { + pw_pr_error("ERROR running IPC command(s)\n"); + return; + } + } + + if (remapped_address) { + /* memcpy(&value, (void *)remapped_address, counter_size_in_bytes); */ + pw_pr_debug("COPYING MMIO size %u\n", counter_size_in_bytes); + memcpy(dst_vals, (void *)remapped_address, + counter_size_in_bytes); + } + pw_pr_debug("Value = %llu\n", *((u64 *)dst_vals)); +} + +static void +sw_read_gbe_mmio_info_i(char *dst_vals, + const struct sw_driver_io_descriptor *descriptors, + u16 counter_size_in_bytes) +{ + u32 hw_val = 0, fw_val = 0; + unsigned long remapped_address = + (unsigned long) + descriptors->mmio_descriptor.data_remapped_address; + u64 write_value = descriptors->write_value; + + memset(dst_vals, 0, counter_size_in_bytes); + + pw_pr_debug( + "hw_sem = 0x%llx, fw_sem = 0x%llx, addr = 0x%lx, dst_vals = 0x%lx, size = %u\n", + (unsigned long long)s_gbe_semaphore.hw_semaphore, + (unsigned long long)s_gbe_semaphore.fw_semaphore, + remapped_address, (unsigned long)dst_vals, + counter_size_in_bytes); + if (!s_gbe_semaphore.hw_semaphore || !s_gbe_semaphore.fw_semaphore || + !remapped_address) { + return; + } + + memcpy_fromio(&hw_val, s_gbe_semaphore.hw_semaphore, sizeof(hw_val)); + memcpy_fromio(&fw_val, s_gbe_semaphore.fw_semaphore, sizeof(fw_val)); + pw_pr_debug("HW_VAL = 0x%lx, FW_VAL = 0x%lx\n", (unsigned long)hw_val, + (unsigned long)fw_val); + if (!IS_HW_SEMAPHORE_SET(hw_val) && !IS_FW_SEMAPHORE_SET(fw_val)) { + memcpy_toio((volatile void __iomem *)remapped_address, + &write_value, + 4 /* counter_size_in_bytes*/); + memcpy_fromio(dst_vals, + (volatile void __iomem *)remapped_address, + counter_size_in_bytes); + } +} +void sw_read_mmio_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptors, + u16 counter_size_in_bytes) +{ + unsigned long remapped_address = + (unsigned long) + descriptors->mmio_descriptor.data_remapped_address; + if (descriptors->mmio_descriptor.is_gbe) { + /* MMIO for GBE requires a mailbox-like operation */ + sw_read_gbe_mmio_info_i(dst_vals, descriptors, + counter_size_in_bytes); + } else { + if (remapped_address) { + memcpy_fromio(dst_vals, + (volatile void __iomem *)remapped_address, + counter_size_in_bytes); + } + } + pw_pr_debug("Value = %llu\n", *((u64 *)dst_vals)); +} + +void sw_read_pch_mailbox_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor + *descriptor, u16 counter_size_in_bytes) +{ + /* + * TODO: spinlock? + */ + const struct sw_driver_pch_mailbox_io_descriptor *pch_mailbox = + &descriptor->pch_mailbox_descriptor; + u32 address = pch_mailbox->data_address; + u64 mtpmc_remapped_address = pch_mailbox->mtpmc_remapped_address; + u64 msg_full_sts_remapped_address = + pch_mailbox->msg_full_sts_remapped_address; + u64 mfpmc_remapped_address = pch_mailbox->mfpmc_remapped_address; + + /* + * write address of desired device counter to request + * from PMC (shift and add 2 to format device offset) + */ + if (mtpmc_remapped_address) { + int iter = 0; + u32 written_val = 0; + u32 write_value = + (address << 16) + + 2; /* shift and add 2 to format device offset */ + memcpy_toio( + (volatile void __iomem *) + (unsigned long)mtpmc_remapped_address, + &write_value, 4 /*counter_size_in_bytes*/); + /* + * Check if address has been written using a while loop in + * order to wait for the PMC to consume that address + * and to introduce sufficient delay so that the message full + * status bit has time to flip. This should ensure + * all is ready when begin the wait loop for it to turn 0, + * which indicates the value is available to be read. + * (This fixes problem where values being read were huge.) + */ + do { + memcpy_fromio(&written_val, + (volatile void __iomem *)(unsigned long) + mtpmc_remapped_address, + 4 /*counter_size_in_bytes*/); + pw_pr_debug( + "DEBUG: written_val = 0x%x, address = 0x%x\n", + written_val, address); + udelay(USEC_DELAY); + } while ((written_val >> 16) != address && ++iter < NUM_RETRY); + } + + /* + * wait for PMC to set status indicating that device counter + * is available for read. + */ + if (msg_full_sts_remapped_address) { + u32 status_wait = 0; + int iter = 0; + + do { + memcpy_fromio(&status_wait, + (volatile void __iomem *)(unsigned long) + msg_full_sts_remapped_address, + 4 /*counter_size_in_bytes*/); + pw_pr_debug("DEBUG: status_wait = 0x%x\n", status_wait); + udelay(USEC_DELAY); + } while ((status_wait & 0x01000000) >> 24 && + ++iter < NUM_RETRY); + } + + /* + * read device counter + */ + if (mfpmc_remapped_address) { + memcpy_fromio( + dst_vals, + (volatile void __iomem *) + (unsigned long)mfpmc_remapped_address, + 4 /*counter_size_in_bytes*/); + pw_pr_debug("DEBUG: read value = 0x%x\n", + *((pw_u32_t *)dst_vals)); + } +} + +void sw_read_mailbox_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptor, + u16 counter_size_in_bytes) +{ + /* + * TODO: spinlock? + */ + const struct sw_driver_mailbox_io_descriptor *mailbox = + &descriptor->mailbox_descriptor; + unsigned long interface_address = mailbox->interface_address; + unsigned long interface_remapped_address = + mailbox->interface_remapped_address; + unsigned long data_address = mailbox->data_address; + size_t iter = 0; + + if (mailbox->is_msr_type) { + u64 command = 0; + + if (rdmsrl_safe(interface_address, &command)) { + pw_pr_warn("Failed to read MSR address = 0x%llx\n", + interface_address); + } + command &= mailbox->command_mask; + command |= mailbox->command | (u64)0x1 << mailbox->run_busy_bit; + wrmsrl_safe(interface_address, command); + do { + udelay(1); + if (rdmsrl_safe(interface_address, &command)) { + pw_pr_warn("Failed to read MSR address = 0x%llx\n", + interface_address); + } + } while ((command & ((u64)0x1 << mailbox->run_busy_bit)) && + ++iter < MAX_MAILBOX_ITERS); + if (iter >= MAX_MAILBOX_ITERS) { + pw_pr_error("Couldn't write to BIOS mailbox\n"); + command = 0; + } else { + if (rdmsrl_safe(data_address, &command)) { + pw_pr_warn("Failed to read MSR address = 0x%llx\n", + data_address); + } + } + *((u64 *)dst_vals) = command; + } else { + u32 command = 0; + const size_t counter_size = + 4; /* Always use 4 bytes, regardless of + *'counter_size_in_bytes' + */ + memcpy_fromio(&command, + (volatile void __iomem *)(unsigned long) + interface_remapped_address, + sizeof(command)); + command &= mailbox->command_mask; + command |= (u32)mailbox->command | + (u32)0x1 << mailbox->run_busy_bit; + memcpy_toio((volatile void __iomem *)(unsigned long) + interface_remapped_address, + &command, sizeof(command)); + do { + udelay(1); + memcpy_fromio(&command, + (volatile void __iomem *)(unsigned long) + interface_remapped_address, + sizeof(command)); + } while ((command & ((u32)0x1 << mailbox->run_busy_bit)) && + ++iter < MAX_MAILBOX_ITERS); + if (iter >= MAX_MAILBOX_ITERS) { + pw_pr_error("Couldn't write to BIOS mailbox\n"); + command = 0; + } else { + memcpy_fromio(&command, + (volatile void __iomem *)(unsigned long) + mailbox->data_remapped_address, + counter_size); + } + *((u32 *)dst_vals) = command; + } +} + +void sw_read_pci_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptors, + u16 counter_size_in_bytes) +{ + u32 bus = descriptors->pci_descriptor.bus, + device = descriptors->pci_descriptor.device; + u32 function = descriptors->pci_descriptor.function, + offset = descriptors->pci_descriptor.offset; + u32 data32 = 0; + u64 data64 = 0; + + switch (counter_size_in_bytes) { + case 4: + data32 = sw_platform_pci_read32(bus, device, function, + 0 /* CTRL-OFFSET */, + 0 /* CTRL-DATA, don't care */, + offset /* DATA-OFFSET */); + *((u32 *)dst_vals) = data32; + break; + case 8: + data64 = sw_platform_pci_read64(bus, device, function, + 0 /* CTRL-OFFSET */, + 0 /* CTRL-DATA, don't care */, + offset /* DATA-OFFSET */); + *((u64 *)dst_vals) = data64; + break; + default: + pw_pr_error("ERROR: invalid read size = %u\n", + counter_size_in_bytes); + return; + } + return; +} +void sw_read_configdb_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptors, + u16 counter_size_in_bytes) +{ + { + pw_u32_t address = descriptors->configdb_descriptor.address; + u32 data = sw_platform_configdb_read32(address); + + pw_pr_debug( + "ADDRESS = 0x%x, CPU = %d, dst_vals = %p, counter size = %u, data = %u\n", + address, cpu, dst_vals, counter_size_in_bytes, data); + /* + * 'counter_size_in_bytes' is ignored, for now. + */ + *((u32 *)dst_vals) = data; + } + return; +} +void sw_read_socperf_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptors, + u16 counter_size_in_bytes) +{ +#if IS_ENABLED(CONFIG_INTEL_SOCPERF) + u64 *socperf_buffer = (u64 *)dst_vals; + + memset(socperf_buffer, 0, counter_size_in_bytes); + SOCPERF_Read_Data2(socperf_buffer); +#endif /* IS_ENABLED(CONFIG_INTEL_SOCPERF) */ + return; +} + +/** + * Decide if the socperf interface is available for use + * @returns true if available + */ +bool sw_socperf_available_i(void) +{ + bool retVal = false; +#if IS_ENABLED(CONFIG_INTEL_SOCPERF) + retVal = true; +#endif /* IS_ENABLED(CONFIG_INTEL_SOCPERF) */ + return retVal; +} + +/** + * sw_platform_configdb_read32 - for reading PCI space through config registers + * of the platform. + * @address: An address in the PCI space + * + * Returns: the value read from address. + */ +u32 sw_platform_configdb_read32(u32 address) +{ + u32 read_value = 0; +#if DO_DIRECT_PCI_READ_WRITE + read_value = + sw_platform_pci_read32(0 /*bus*/, + 0 /*device*/, + 0 /*function*/, + SW_PCI_MSG_CTRL_REG /*ctrl-offset*/, + address /*ctrl-value*/, + SW_PCI_MSG_DATA_REG /*data-offset*/); +#else /* !DO_DIRECT_PCI_READ_WRITE */ + read_value = intel_mid_msgbus_read32_raw(address); +#endif /* if DO_DIRECT_PCI_READ_WRITE */ + pw_pr_debug("address = %u, value = %u\n", address, read_value); + return read_value; +} + +u32 sw_platform_pci_read32(u32 bus, u32 device, u32 function, u32 write_offset, + u32 write_value, u32 read_offset) +{ + u32 read_value = 0; + struct pci_dev *pci_root = pci_get_domain_bus_and_slot( + 0, bus, PCI_DEVFN(device, function)); /* 0, PCI_DEVFN(0, 0)); */ + if (!pci_root) { + return 0; /* Application will verify the data */ + } + if (write_offset) { + pci_write_config_dword( + pci_root, write_offset, + write_value); /* SW_PCI_MSG_CTRL_REG, address); */ + } + pci_read_config_dword( + pci_root, read_offset, + &read_value); /* SW_PCI_MSG_DATA_REG, &read_value); */ + return read_value; +} + +u64 sw_platform_pci_read64(u32 bus, u32 device, u32 function, u32 write_offset, + u32 write_value, u32 read_offset) +{ + u32 lo = sw_platform_pci_read32(bus, device, function, + 0 /* CTRL-OFFSET */, + 0 /* CTRL-DATA, don't care */, + read_offset /* DATA-OFFSET */); + u32 hi = sw_platform_pci_read32(bus, device, function, + 0 /* CTRL-OFFSET */, + 0 /* CTRL-DATA, don't care */, + read_offset + 4 /* DATA-OFFSET */); + return ((u64)hi << 32) | lo; +} + +void sw_write_msr_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptor, + u16 counter_size_in_bytes) +{ + u64 write_value = descriptor->write_value; + u64 address = descriptor->msr_descriptor.address; + + pw_pr_debug( + "ADDRESS = 0x%llx, CPU = %d, counter size = %u, value = %llu\n", + address, cpu, counter_size_in_bytes, write_value); + if (likely(cpu == RAW_CPU())) { + wrmsrl_safe((unsigned long)address, write_value); + } else { + u32 l = write_value & 0xffffffff, + h = (write_value >> 32) & 0xffffffff; + wrmsr_safe_on_cpu(cpu, (u32)address, l, h); + } + return; +}; + +void sw_write_mmio_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptor, + u16 counter_size_in_bytes) +{ + unsigned long remapped_address = + (unsigned long) + descriptor->mmio_descriptor.data_remapped_address; + u64 write_value = descriptor->write_value; + + if (remapped_address) { + memcpy_toio((volatile void __iomem *)remapped_address, + &write_value, + counter_size_in_bytes); + } + pw_pr_debug("Value = %llu\n", *((u64 *)dst_vals)); +}; + +void sw_write_mailbox_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptor, + u16 counter_size_in_bytes) +{ + /* + * TODO: spinlock? + */ + const struct sw_driver_mailbox_io_descriptor *mailbox = + &descriptor->mailbox_descriptor; + unsigned long interface_address = mailbox->interface_address; + unsigned long interface_remapped_address = + mailbox->interface_remapped_address; + unsigned long data_address = mailbox->data_address; + u64 data = descriptor->write_value; + size_t iter = 0; + + if (mailbox->is_msr_type) { + u64 command = 0; + + if (rdmsrl_safe(interface_address, &command)) { + pw_pr_warn("Failed to read MSR address = 0x%llx\n", + interface_address); + } + command &= mailbox->command_mask; + command |= mailbox->command | (u64)0x1 << mailbox->run_busy_bit; + wrmsrl_safe(data_address, data); + wrmsrl_safe(interface_address, command); + do { + if (rdmsrl_safe(interface_address, &command)) { + pw_pr_warn("Failed to read MSR address = 0x%llx\n", + interface_address); + } + } while ((command & ((u64)0x1 << mailbox->run_busy_bit)) && + ++iter < MAX_MAILBOX_ITERS); + } else { + u32 command = 0; + + memcpy_fromio(&command, + (volatile void __iomem *)(unsigned long) + interface_remapped_address, + sizeof(command)); + command &= mailbox->command_mask; + command |= (u32)mailbox->command | + (u32)0x1 << mailbox->run_busy_bit; + memcpy_toio((volatile void __iomem *)(unsigned long) + mailbox->data_remapped_address, + &data, sizeof(data)); + memcpy_toio((volatile void __iomem *)(unsigned long) + interface_remapped_address, + &command, sizeof(command)); + do { + memcpy_fromio(&command, + (volatile void __iomem *)(unsigned long) + interface_remapped_address, + sizeof(command)); + } while ((command & ((u32)0x1 << mailbox->run_busy_bit)) && + ++iter < MAX_MAILBOX_ITERS); + } +} + +void sw_write_pci_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptor, + u16 counter_size_in_bytes) +{ + u32 bus = descriptor->pci_descriptor.bus, + device = descriptor->pci_descriptor.device; + u32 function = descriptor->pci_descriptor.function, + offset = descriptor->pci_descriptor.offset; + u32 write_value = (u32)descriptor->write_value; + /* + * 'counter_size_in_bytes' is ignored for now. + */ + if (!sw_platform_pci_write32(bus, device, function, offset, + write_value)) { + pw_pr_error("ERROR writing to PCI B/D/F/O %u/%u/%u/%u\n", bus, + device, function, offset); + } else { + pw_pr_debug( + "OK, successfully wrote to PCI B/D/F/O %u/%u/%u/%u\n", + bus, device, function, offset); + } + return; +}; + +/* + * Write to PCI space via config registers. + */ +bool sw_platform_pci_write32(u32 bus, u32 device, u32 function, + u32 write_offset, u32 data_value) +{ + struct pci_dev *pci_root = pci_get_domain_bus_and_slot( + 0, bus, PCI_DEVFN(device, function)); /* 0, PCI_DEVFN(0, 0)); */ + if (!pci_root) { + return false; + } + + pci_write_config_dword(pci_root, write_offset, data_value); + + return true; +}; + +int sw_print_msr_io_descriptor(const struct sw_driver_io_descriptor *descriptor) +{ + if (!descriptor) { + return -PW_ERROR; + } + pw_pr_debug("MSR address = 0x%llx\n", + descriptor->msr_descriptor.address); + return PW_SUCCESS; +} + +int sw_ipc_mmio_descriptor_reset_func_i( + const struct sw_driver_io_descriptor *descriptor) +{ + /* Unmap previously mapped memory here */ + struct sw_driver_ipc_mmio_io_descriptor *__ipc_mmio = NULL; + + if (!descriptor) { /* Should NEVER happen */ + return -PW_ERROR; + } + if (descriptor->collection_type == SW_IO_IPC) { + __ipc_mmio = + (struct sw_driver_ipc_mmio_io_descriptor *)&descriptor + ->ipc_descriptor; + } else { + __ipc_mmio = + (struct sw_driver_ipc_mmio_io_descriptor *)&descriptor + ->mmio_descriptor; + } + if (__ipc_mmio->data_remapped_address) { + pw_pr_debug("unmapping addr 0x%llx\n", + __ipc_mmio->data_remapped_address); + iounmap((volatile void __iomem *)(unsigned long) + __ipc_mmio->data_remapped_address); + __ipc_mmio->data_remapped_address = 0; + } + /* Uninitialize the GBE, if it wasn't already done */ + if (s_gbe_semaphore.hw_semaphore || s_gbe_semaphore.fw_semaphore) { + pw_pr_debug("Uninitializing gbe!\n"); + if (s_gbe_semaphore.hw_semaphore) { + iounmap(s_gbe_semaphore.hw_semaphore); + } + if (s_gbe_semaphore.fw_semaphore) { + iounmap(s_gbe_semaphore.fw_semaphore); + } + memset(&s_gbe_semaphore, 0, sizeof(s_gbe_semaphore)); + } + return PW_SUCCESS; +} + +int sw_pch_mailbox_descriptor_reset_func_i( + const struct sw_driver_io_descriptor *descriptor) +{ + /* Unmap previously mapped memory here */ + struct sw_driver_pch_mailbox_io_descriptor *__pch_mailbox = NULL; + + if (!descriptor) { /* Should NEVER happen */ + return -PW_ERROR; + } + __pch_mailbox = + (struct sw_driver_pch_mailbox_io_descriptor *)&descriptor + ->pch_mailbox_descriptor; + if (__pch_mailbox->mtpmc_remapped_address) { + pw_pr_debug("unmapping addr 0x%llx\n", + __pch_mailbox->mtpmc_remapped_address); + iounmap((volatile void __iomem *)(unsigned long) + __pch_mailbox->mtpmc_remapped_address); + __pch_mailbox->mtpmc_remapped_address = 0; + } + if (__pch_mailbox->msg_full_sts_remapped_address) { + pw_pr_debug("unmapping addr 0x%llx\n", + __pch_mailbox->msg_full_sts_remapped_address); + iounmap((volatile void __iomem *)(unsigned long) + __pch_mailbox->msg_full_sts_remapped_address); + __pch_mailbox->msg_full_sts_remapped_address = 0; + } + if (__pch_mailbox->mfpmc_remapped_address) { + pw_pr_debug("unmapping addr 0x%llx\n", + __pch_mailbox->mfpmc_remapped_address); + iounmap((volatile void __iomem *)(unsigned long) + __pch_mailbox->mfpmc_remapped_address); + __pch_mailbox->mfpmc_remapped_address = 0; + } + return PW_SUCCESS; +} + +int sw_mailbox_descriptor_reset_func_i( + const struct sw_driver_io_descriptor *descriptor) +{ + /* Unmap previously mapped memory here */ + struct sw_driver_mailbox_io_descriptor *__mailbox = NULL; + + if (!descriptor) { /* Should NEVER happen */ + return -PW_ERROR; + } + __mailbox = (struct sw_driver_mailbox_io_descriptor *)&descriptor + ->mailbox_descriptor; + if (!__mailbox->is_msr_type) { + if (__mailbox->interface_remapped_address) { + pw_pr_debug("unmapping addr 0x%llx\n", + __mailbox->interface_remapped_address); + iounmap((volatile void __iomem *)(unsigned long) + __mailbox->interface_remapped_address); + __mailbox->interface_remapped_address = 0; + } + if (__mailbox->data_remapped_address) { + pw_pr_debug("unmapping addr 0x%llx\n", + __mailbox->data_remapped_address); + iounmap((volatile void __iomem *)(unsigned long) + __mailbox->data_remapped_address); + __mailbox->data_remapped_address = 0; + } + } + return PW_SUCCESS; +} + +#define NUM_HW_OPS SW_ARRAY_SIZE(s_hw_ops) +#define FOR_EACH_HW_OP(idx, op) \ + for (idx = 0; idx < NUM_HW_OPS && (op = &s_hw_ops[idx]); ++idx) + +int sw_register_ops_providers(void) +{ + size_t idx = 0; + const struct sw_hw_ops *op = NULL; + + FOR_EACH_HW_OP(idx, op) + { + if (op->name && sw_register_hw_op(op)) { + pw_pr_error("ERROR registering provider %s\n", + op->name); + return -EIO; + } + } + return PW_SUCCESS; +} + +void sw_free_ops_providers(void) +{ + /* NOP */ +} diff --git a/drivers/platform/x86/socwatch/sw_output_buffer.c b/drivers/platform/x86/socwatch/sw_output_buffer.c new file mode 100644 index 0000000000000..a0c1c5fedd059 --- /dev/null +++ b/drivers/platform/x86/socwatch/sw_output_buffer.c @@ -0,0 +1,598 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#include "sw_internal.h" +#include "sw_output_buffer.h" +#include "sw_kernel_defines.h" +#include "sw_mem.h" +#include "sw_lock_defs.h" +#include "sw_overhead_measurements.h" + +/* ------------------------------------------------- + * Compile time constants and macros. + * ------------------------------------------------- + */ +#define NUM_SEGS_PER_BUFFER 2 /* MUST be pow 2! */ +#define NUM_SEGS_PER_BUFFER_MASK (NUM_SEGS_PER_BUFFER - 1) +/* + * The size of the 'buffer' data array in each segment. + */ +#define SW_SEG_DATA_SIZE (sw_buffer_alloc_size) +/* + * Min size of per-cpu output buffers. + */ +#define SW_MIN_SEG_SIZE_BYTES (1 << 10) /* 1kB */ +#define SW_MIN_OUTPUT_BUFFER_SIZE (SW_MIN_SEG_SIZE_BYTES * NUM_SEGS_PER_BUFFER) +/* + * A symbolic constant for an empty buffer index. + */ +#define EMPTY_SEG (-1) +/* + * How much space is available in a given segment? + */ +#define EMPTY_TSC ((u64)-1) +#define SEG_IS_FULL(seg) \ + ({ \ + bool __full = false; \ + smp_mb(); \ + __full = ((seg)->is_full != EMPTY_TSC); \ + __full; \ + }) +#define SEG_SET_FULL(seg, tsc) \ + do { \ + (seg)->is_full = (tsc); \ + smp_mb(); \ + } while (0) +#define SEG_SET_EMPTY(seg) \ + do { \ + barrier(); \ + (seg)->bytes_written = 0; \ + SEG_SET_FULL(seg, EMPTY_TSC); \ + /*smp_mb(); */ \ + } while (0) +#define SPACE_AVAIL(seg) (SW_SEG_DATA_SIZE - (seg)->bytes_written) +#define SEG_IS_EMPTY(seg) (SPACE_AVAIL(seg) == SW_SEG_DATA_SIZE) + +#define GET_OUTPUT_BUFFER(cpu) (&per_cpu_output_buffers[(cpu)]) +/* + * Convenience macro: iterate over each segment in a per-cpu output buffer. + */ +#define for_each_segment(i) for (i = 0; i < NUM_SEGS_PER_BUFFER; ++i) +#define for_each_seg(buffer, seg) \ + for (int i = 0; \ + i < NUM_SEGS_PER_BUFFER && (seg = (buffer)->segments[i]); ++i) +/* + * How many buffers are we using? + */ +#define GET_NUM_OUTPUT_BUFFERS() (sw_max_num_cpus + 1) +/* + * Convenience macro: iterate over each per-cpu output buffer. + */ +#define for_each_output_buffer(i) for (i = 0; i < GET_NUM_OUTPUT_BUFFERS(); ++i) + +/* ------------------------------------------------- + * Local data structures. + * ------------------------------------------------- + */ +typedef struct sw_data_buffer sw_data_buffer_t; +typedef struct sw_output_buffer sw_output_buffer_t; +struct sw_data_buffer { + u64 is_full; + u32 bytes_written; + char *buffer; +} __attribute__((packed)); +#define SW_SEG_HEADER_SIZE() (sizeof(struct sw_data_buffer) - sizeof(char *)) + +struct sw_output_buffer { + sw_data_buffer_t buffers[NUM_SEGS_PER_BUFFER]; + int buff_index; + u32 produced_samples; + u32 dropped_samples; + int last_seg_read; + unsigned int mem_alloc_size; + unsigned long free_pages; +} ____cacheline_aligned_in_smp; + +/* ------------------------------------------------- + * Function declarations. + * ------------------------------------------------- + */ +extern u64 sw_timestamp(void); + +/* ------------------------------------------------- + * Variable definitions. + * ------------------------------------------------- + */ +u64 sw_num_samples_produced = 0, sw_num_samples_dropped = 0; +int sw_max_num_cpus = -1; + +DECLARE_OVERHEAD_VARS(sw_produce_generic_msg_i); +/* + * Per-cpu output buffers. + */ +static sw_output_buffer_t *per_cpu_output_buffers; +/* + * Variables for book keeping. + */ +static volatile int sw_last_cpu_read = -1; +static volatile s32 sw_last_mask = -1; +/* + * Lock for the polled buffer. + */ +SW_DECLARE_SPINLOCK(sw_polled_lock); +/* + * Buffer allocation size. + */ +unsigned long sw_buffer_alloc_size = (1 << 16); /* 64 KB */ + +/* ------------------------------------------------- + * Function definitions. + * ------------------------------------------------- + */ + +static char *reserve_seg_space_i(size_t size, int cpu, bool *should_wakeup, + u64 *reservation_tsc) +{ + sw_output_buffer_t *buffer = GET_OUTPUT_BUFFER(cpu); + int i = 0; + int buff_index = buffer->buff_index; + char *dst = NULL; + + if (buff_index < 0 || buff_index >= NUM_SEGS_PER_BUFFER) { + goto prod_seg_done; + } + for_each_segment(i) { + sw_data_buffer_t *seg = &buffer->buffers[buff_index]; + + if (SEG_IS_FULL(seg) == false) { + if (SPACE_AVAIL(seg) >= size) { + *reservation_tsc = sw_timestamp(); + dst = &seg->buffer[seg->bytes_written]; + seg->bytes_written += size; + smp_mb(); + buffer->buff_index = buff_index; + buffer->produced_samples++; + goto prod_seg_done; + } + SEG_SET_FULL(seg, sw_timestamp()); + } + buff_index = CIRCULAR_INC(buff_index, NUM_SEGS_PER_BUFFER_MASK); + *should_wakeup = true; + } +prod_seg_done: + if (!dst) { + buffer->dropped_samples++; + } + return dst; +}; + +static int sw_produce_polled_msg_i(struct sw_driver_msg *msg, + enum sw_wakeup_action action) +{ + int cpu = GET_POLLED_CPU(); + bool should_wakeup = false; + int retVal = PW_SUCCESS; + + if (!msg) { + return -PW_ERROR; + } + pw_pr_debug("POLLED! cpu = %d\n", cpu); + LOCK(sw_polled_lock); + { + size_t size = SW_DRIVER_MSG_HEADER_SIZE() + msg->payload_len; + char *dst = reserve_seg_space_i(size, cpu, &should_wakeup, + &msg->tsc); + if (dst) { + /* + * Assign a special CPU number to this CPU. + * This is OK, because messages enqueued in this buffer + * are always CPU agnostic (otherwise they would + * be invoked from within a preempt_disable()d context + * in 'sw_handle_collector_node_i()', which ensures they + * will be enqueued within the + * 'sw_produce_generic_msg_on_cpu()' function). + */ + msg->cpuidx = cpu; + memcpy(dst, msg, SW_DRIVER_MSG_HEADER_SIZE()); + dst += SW_DRIVER_MSG_HEADER_SIZE(); + memcpy(dst, msg->p_payload, msg->payload_len); + } else { + pw_pr_debug("NO space in polled msg!\n"); + retVal = -PW_ERROR; + } + } + UNLOCK(sw_polled_lock); + if (unlikely(should_wakeup)) { + sw_wakeup_reader(action); + } + return retVal; +}; + +static int sw_produce_generic_msg_i(struct sw_driver_msg *msg, + enum sw_wakeup_action action) +{ + int retval = PW_SUCCESS; + bool should_wakeup = false; + int cpu = -1; + unsigned long flags = 0; + + if (!msg) { + pw_pr_error("ERROR: CANNOT produce a NULL msg!\n"); + return -PW_ERROR; + } + +#ifdef CONFIG_PREEMPT_COUNT + if (!in_atomic()) { + return sw_produce_polled_msg(msg, action); + } +#endif + + cpu = sw_get_cpu(&flags); + { + size_t size = msg->payload_len + SW_DRIVER_MSG_HEADER_SIZE(); + char *dst = reserve_seg_space_i(size, cpu, &should_wakeup, + &msg->tsc); + if (likely(dst)) { + memcpy(dst, msg, SW_DRIVER_MSG_HEADER_SIZE()); + dst += SW_DRIVER_MSG_HEADER_SIZE(); + memcpy(dst, msg->p_payload, msg->payload_len); + } else { + retval = -PW_ERROR; + } + } + sw_put_cpu(flags); + + if (unlikely(should_wakeup)) { + sw_wakeup_reader(action); + } + + return retval; +}; + +int sw_produce_polled_msg(struct sw_driver_msg *msg, + enum sw_wakeup_action action) +{ + return DO_PER_CPU_OVERHEAD_FUNC_RET(int, sw_produce_polled_msg_i, msg, + action); +}; + +int sw_produce_generic_msg(struct sw_driver_msg *msg, + enum sw_wakeup_action action) +{ + return DO_PER_CPU_OVERHEAD_FUNC_RET(int, sw_produce_generic_msg_i, msg, + action); +}; + +static int sw_init_per_cpu_buffers_i(unsigned long per_cpu_mem_size) +{ + int cpu = -1; + + per_cpu_output_buffers = (sw_output_buffer_t *)sw_kmalloc( + sizeof(sw_output_buffer_t) * GET_NUM_OUTPUT_BUFFERS(), + GFP_KERNEL | __GFP_ZERO); + if (per_cpu_output_buffers == NULL) { + pw_pr_error( + "ERROR allocating space for per-cpu output buffers!\n"); + sw_destroy_per_cpu_buffers(); + return -PW_ERROR; + } + for_each_output_buffer(cpu) { + sw_output_buffer_t *buffer = &per_cpu_output_buffers[cpu]; + char *buff = NULL; + int i = 0; + + buffer->mem_alloc_size = per_cpu_mem_size; + buffer->free_pages = + sw_allocate_pages(GFP_KERNEL | __GFP_ZERO, + (unsigned int)per_cpu_mem_size); + if (buffer->free_pages == 0) { + pw_pr_error("ERROR allocating pages for buffer [%d]!\n", + cpu); + sw_destroy_per_cpu_buffers(); + return -PW_ERROR; + } + buff = (char *)buffer->free_pages; + for_each_segment(i) { + buffer->buffers[i].buffer = (char *)buff; + buff += SW_SEG_DATA_SIZE; + } + } + pw_pr_debug("PER_CPU_MEM_SIZE = %lu, order = %u\n", + (unsigned long)per_cpu_mem_size, + get_order(per_cpu_mem_size)); + return PW_SUCCESS; +}; + +int sw_init_per_cpu_buffers(void) +{ + unsigned int per_cpu_mem_size = sw_get_output_buffer_size(); + + pw_pr_debug("Buffer alloc size = %ld\n", sw_buffer_alloc_size); + + if (GET_NUM_OUTPUT_BUFFERS() <= 0) { + pw_pr_error("ERROR: max # output buffers= %d\n", + GET_NUM_OUTPUT_BUFFERS()); + return -PW_ERROR; + } + + pw_pr_debug("DEBUG: sw_max_num_cpus = %d, num output buffers = %d\n", + sw_max_num_cpus, GET_NUM_OUTPUT_BUFFERS()); + + /* + * Try to allocate per-cpu buffers. If allocation fails, + * decrease buffer size and retry. Stop trying if size + * drops below 2KB (which means 1KB for each buffer). + */ + while (per_cpu_mem_size >= SW_MIN_OUTPUT_BUFFER_SIZE && + sw_init_per_cpu_buffers_i(per_cpu_mem_size)) { + pw_pr_debug( + "WARNING: couldn't allocate per-cpu buffers with size %u -- trying smaller size!\n", + per_cpu_mem_size); + sw_buffer_alloc_size >>= 1; + per_cpu_mem_size = sw_get_output_buffer_size(); + } + + if (unlikely(per_cpu_output_buffers == NULL)) { + pw_pr_error( + "ERROR: couldn't allocate space for per-cpu output buffers!\n"); + return -PW_ERROR; + } + /* + * Initialize our locks. + */ + SW_INIT_SPINLOCK(sw_polled_lock); + + pw_pr_debug("OK, allocated per-cpu buffers with size = %lu\n", + (unsigned long)per_cpu_mem_size); + + if (sw_init_reader_queue()) { + pw_pr_error("ERROR initializing reader subsys\n"); + return -PW_ERROR; + } + + return PW_SUCCESS; +}; + +void sw_destroy_per_cpu_buffers(void) +{ + int cpu = -1; + + /* + * Perform lock finalization. + */ + SW_DESTROY_SPINLOCK(sw_polled_lock); + + if (per_cpu_output_buffers != NULL) { + for_each_output_buffer(cpu) { + sw_output_buffer_t *buffer = + &per_cpu_output_buffers[cpu]; + if (buffer->free_pages != 0) { + sw_release_pages(buffer->free_pages, + buffer->mem_alloc_size); + buffer->free_pages = 0; + } + } + sw_kfree(per_cpu_output_buffers); + per_cpu_output_buffers = NULL; + } +}; + +void sw_reset_per_cpu_buffers(void) +{ + int cpu = 0, i = 0; + + for_each_output_buffer(cpu) { + sw_output_buffer_t *buffer = GET_OUTPUT_BUFFER(cpu); + + buffer->buff_index = buffer->dropped_samples = + buffer->produced_samples = 0; + buffer->last_seg_read = -1; + + for_each_segment(i) { + sw_data_buffer_t *seg = &buffer->buffers[i]; + + memset(seg->buffer, 0, SW_SEG_DATA_SIZE); + SEG_SET_EMPTY(seg); + } + } + sw_last_cpu_read = -1; + sw_last_mask = -1; + pw_pr_debug("OK, reset per-cpu output buffers!\n"); +}; + +bool sw_any_seg_full(u32 *val, bool is_flush_mode) +{ + int num_visited = 0, i = 0; + + if (!val) { + pw_pr_error("ERROR: NULL ptrs in sw_any_seg_full!\n"); + return false; + } + + *val = SW_NO_DATA_AVAIL_MASK; + pw_pr_debug("Checking for full seg: val = %u, flush = %s\n", *val, + GET_BOOL_STRING(is_flush_mode)); + for_each_output_buffer(num_visited) { + int min_seg = EMPTY_SEG, non_empty_seg = EMPTY_SEG; + u64 min_tsc = EMPTY_TSC; + sw_output_buffer_t *buffer = NULL; + + if (++sw_last_cpu_read >= GET_NUM_OUTPUT_BUFFERS()) { + sw_last_cpu_read = 0; + } + buffer = GET_OUTPUT_BUFFER(sw_last_cpu_read); + for_each_segment(i) { + sw_data_buffer_t *seg = &buffer->buffers[i]; + u64 seg_tsc = seg->is_full; + + if (SEG_IS_EMPTY(seg)) { + continue; + } + non_empty_seg = i; + if (seg_tsc < min_tsc) { + /* + * Can only happen if seg was full, + * provided 'EMPTY_TSC' is set to "(u64)-1" + */ + min_tsc = seg_tsc; + min_seg = i; + } + } + if (min_seg != EMPTY_SEG) { + *val = (sw_last_cpu_read & 0xffff) << 16 | + (min_seg & 0xffff); + return true; + } else if (is_flush_mode && non_empty_seg != EMPTY_SEG) { + *val = (sw_last_cpu_read & 0xffff) << 16 | + (non_empty_seg & 0xffff); + return true; + } + } + /* + * Reaches here only if there's no data to be read. + */ + if (is_flush_mode) { + /* + * We've drained all buffers and need to tell the userspace + * application there isn't any data. Unfortunately, we can't + * just return a 'zero' value for the mask (because that could + * also indicate that segment # 0 of cpu #0 has data). + */ + *val = SW_ALL_WRITES_DONE_MASK; + return true; + } + return false; +}; + +/* + * Has semantics of 'copy_to_user()' -- returns # of bytes that could + * NOT be copied (On success ==> returns 0). + */ +size_t sw_consume_data(u32 mask, void __user *buffer, size_t bytes_to_read) +{ + int which_cpu = -1, which_seg = -1; + unsigned long bytes_not_copied = 0; + sw_output_buffer_t *buff = NULL; + sw_data_buffer_t *seg = NULL; + size_t bytes_read = 0; + + if (!sw_check_output_buffer_params(buffer, bytes_to_read, + SW_SEG_DATA_SIZE)) { + pw_pr_error("ERROR: invalid params to \"sw_consume_data\"!\n"); + return -PW_ERROR; + } + + which_cpu = mask >> 16; + which_seg = mask & 0xffff; + pw_pr_debug("CONSUME: cpu = %d, seg = %d\n", which_cpu, which_seg); + if (which_seg >= NUM_SEGS_PER_BUFFER) { + pw_pr_error( + "Error: which_seg (%d) >= NUM_SEGS_PER_BUFFER (%d)\n", + which_seg, NUM_SEGS_PER_BUFFER); + return bytes_to_read; + } + /* + * OK to access unlocked; either the segment is FULL, or no collection + * is ongoing. In either case, we're GUARANTEED no producer is touching + * this segment. + */ + buff = GET_OUTPUT_BUFFER(which_cpu); + seg = &buff->buffers[which_seg]; + + bytes_not_copied = sw_copy_to_user(buffer, seg->buffer, + seg->bytes_written); /* dst, src */ + + /* bytes_not_copied = */ + /* copy_to_user(buffer, seg->buffer, seg->bytes_written); dst,src */ + if (likely(bytes_not_copied == 0)) { + bytes_read = seg->bytes_written; + } else { + pw_pr_error("Warning: couldn't copy %lu bytes\n", + bytes_not_copied); + bytes_read = 0; + } + SEG_SET_EMPTY(seg); + return bytes_read; +} + +unsigned int sw_get_output_buffer_size(void) +{ + return (sw_buffer_alloc_size * NUM_SEGS_PER_BUFFER); +}; + +void sw_count_samples_produced_dropped(void) +{ + int cpu = 0; + + sw_num_samples_produced = sw_num_samples_dropped = 0; + + if (per_cpu_output_buffers == NULL) { + return; + } + for_each_output_buffer(cpu) { + sw_output_buffer_t *buff = GET_OUTPUT_BUFFER(cpu); + + sw_num_samples_dropped += buff->dropped_samples; + sw_num_samples_produced += buff->produced_samples; + } +}; + +void sw_print_output_buffer_overheads(void) +{ + PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_produce_generic_msg_i, + "PRODUCE_GENERIC_MSG"); + sw_print_reader_stats(); +}; diff --git a/drivers/platform/x86/socwatch/sw_reader.c b/drivers/platform/x86/socwatch/sw_reader.c new file mode 100644 index 0000000000000..2e55ae1a54ccb --- /dev/null +++ b/drivers/platform/x86/socwatch/sw_reader.c @@ -0,0 +1,163 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +#include "sw_internal.h" +#include "sw_output_buffer.h" +#include "sw_kernel_defines.h" + +#define SW_BUFFER_CLEANUP_TIMER_DELAY_NSEC \ + 1000000 /* delay buffer cleanup by 10^6 nsec i.e. 1 msec */ + +/* + * The alarm queue. + */ +wait_queue_head_t sw_reader_queue; +/* + * Reader wakeup timer. + */ +static struct hrtimer s_reader_wakeup_timer; +/* + * Variable to track # timer fires. + */ +static int s_num_timer_fires; + +/* + * The alarm callback. + */ +static enum hrtimer_restart sw_wakeup_callback_i(struct hrtimer *timer) +{ + ++s_num_timer_fires; + wake_up_interruptible(&sw_reader_queue); + return HRTIMER_NORESTART; +} + +/* + * Init reader queue. + */ +int sw_init_reader_queue(void) +{ + init_waitqueue_head(&sw_reader_queue); + /* + * Also init wakeup timer (used in low-overhead mode). + */ + hrtimer_init(&s_reader_wakeup_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + s_reader_wakeup_timer.function = &sw_wakeup_callback_i; + + return PW_SUCCESS; +} +/* + * Destroy reader queue. + */ +void sw_destroy_reader_queue(void) +{ + /* NOP */ +} +/* + * Wakeup client waiting for a full buffer. + */ +void sw_wakeup_reader(enum sw_wakeup_action action) +{ + if (!waitqueue_active(&sw_reader_queue)) { + return; + } + /* + * Direct mode? + */ + switch (action) { + case SW_WAKEUP_ACTION_DIRECT: + wake_up_interruptible(&sw_reader_queue); + break; + case SW_WAKEUP_ACTION_TIMER: + if (!hrtimer_active(&s_reader_wakeup_timer)) { + ktime_t ktime = + ns_to_ktime(SW_BUFFER_CLEANUP_TIMER_DELAY_NSEC); + /* TODO: possible race here -- introduce locks? */ + hrtimer_start(&s_reader_wakeup_timer, ktime, + HRTIMER_MODE_REL); + } + break; + default: + break; + } + return; +} +/* + * Wakeup client waiting for a full buffer, and + * cancel any timers initialized by the reader + * subsys. + */ +void sw_cancel_reader(void) +{ + /* + * Cancel pending wakeup timer (used in low-overhead mode). + */ + if (hrtimer_active(&s_reader_wakeup_timer)) { + hrtimer_cancel(&s_reader_wakeup_timer); + } + /* + * There might be a reader thread blocked on a read: wake + * it up to give it a chance to respond to changed + * conditions. + */ + sw_wakeup_reader(SW_WAKEUP_ACTION_DIRECT); +} + +void sw_print_reader_stats(void) +{ +#if DO_OVERHEAD_MEASUREMENTS + printk(KERN_INFO "# reader queue timer fires = %d\n", + s_num_timer_fires); +#endif /* OVERHEAD */ +} diff --git a/drivers/platform/x86/socwatch/sw_telem.c b/drivers/platform/x86/socwatch/sw_telem.c new file mode 100644 index 0000000000000..eccb37df44d57 --- /dev/null +++ b/drivers/platform/x86/socwatch/sw_telem.c @@ -0,0 +1,498 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#include +#include +#include /* Definition of __weak */ +#include /* LINUX_VERSION_CODE */ +#include "sw_kernel_defines.h" /* pw_pr_debug */ +#include "sw_mem.h" /* sw_kmalloc/free */ +#include "sw_lock_defs.h" /* Various lock-related definitions */ +#include "sw_telem.h" /* Signatures of fn's exported from here. */ + +/* + * These functions and data structures are exported by the Telemetry + * driver. However, that file may not be available in the kernel for + * which this driver is being built, so we re-define many of the same + * things here. + */ +/** + * struct telemetry_evtlog - The "event log" returned by the kernel's + * full-read telemetry driver. + * @telem_evtid: The 16-bit event ID. + * @telem_evtlog: The actual telemetry data. + */ +struct telemetry_evtlog { + u32 telem_evtid; /* Event ID of a data item. */ + u64 telem_evtlog; /* Counter data */ +}; + +struct telemetry_evtconfig { + u32 *evtmap; /* Array of Event-IDs to Enable */ + u8 num_evts; /* Number of Events (<29) in evtmap */ + u8 period; /* Sampling period */ +}; + +#define MAX_TELEM_EVENTS 28 /* Max telem events per unit */ + +/* The enable bit is set when programming events, but is returned + * cleared for queried events requests. + */ +#define TELEM_EVENT_ENABLE 0x8000 /* Enabled when Event ID HIGH bit */ + +/* + * Sampling Period values. + * The sampling period is encoded in an 7-bit value, where + * Period = (Value * 16^Exponent) usec where: + * bits[6:3] -> Value; + * bits [0:2]-> Exponent; + * Here are some of the calculated possible values: + * | Value Val+Exp | Value | Exponent | Period (usec) | Period (msec) | + * |-----------------+-------+----------+---------------+---------------| + * | 0xA = 000 1+010 | 1 | 2 | 256 | 0.256 | + * | 0x12= 001 0+010 | 2 | 2 | 512 | 0.512 | + * | 0x22= 010 0+010 | 4 | 2 | 1024 | 1.024 | + * | 0xB = 000 1+011 | 1 | 3 | 4096 | 4.096 | + * | 0x13= 001 0+011 | 2 | 3 | 8192 | 8.192 | + * | 0x1B= 001 1+011 | 3 | 3 | 12288 | 12.288 | + * | 0x0C= 000 1+100 | 1 | 4 | 65536 | 65.536 | + * | 0x0D= 000 1+101 | 1 | 5 | 1048576 | 1048.576 | + */ +#define TELEM_SAMPLING_1MS 0x22 /* Approximately 1 ms */ +#define TELEM_SAMPLING_1S 0x0D /* Approximately 1 s */ + +/* These functions make up the main APIs of the telemetry driver. We + * define all of them with weak linkage so that we can still compile + * and load into kernels which don't have a telemetry driver. + */ +extern int __weak telemetry_raw_read_eventlog(enum telemetry_unit telem_unit, + struct telemetry_evtlog *evtlog, + int evcount); +extern int __weak telemetry_reset(void); +extern int __weak telemetry_reset_events(void); +extern int __weak telemetry_get_sampling_period(u8 *punit_min, u8 *punit_max, + u8 *pmc_min, u8 *pmc_max); +extern int __weak telemetry_set_sampling_period(u8 punit_period, u8 pmc_period); +extern int __weak telemetry_get_eventconfig( + struct telemetry_evtconfig *punit_config, + struct telemetry_evtconfig *pmc_config, int punit_len, int pmc_len); +extern int __weak telemetry_add_events(u8 num_punit_evts, u8 num_pmc_evts, + u32 *punit_evtmap, u32 *pmc_evtmap); + +extern int __weak +telemetry_update_events(struct telemetry_evtconfig punit_config, + struct telemetry_evtconfig pmc_config); + +/* + * Some telemetry IDs have multiple instances, indexed by cpu ID. We + * implement these by defining two types of IDs: 'regular' and 'scaled'. + * For Telemetry IDs with a single instance (the majority of them), the + * index into the system's telemetry table is stored in the + * sw_driver_io_descriptor.idx. At read time, the driver gets the telemetry + * "slot" from sw_driver_io_descriptor.idx, and reads that data. This case + * is illustrated by telem_desc_A in the illustration below, where idx 2 + * indicates that telem_data[2] contains the telem data for this descriptor. + * + * telem_desc_A telem_data + * scale_op: X |..|[0] + * idx : 2 -------------------- |..|[1] + * \------->|..|[2] + * Scaled_IDs |..|[3] + * telem_desc_B CPU#0 1 2 3 ------>|..|[4] + * scale_op: / [0]|.|.|.|.| / + * idx : 1---->[1]|4|4|5|5| / + * +----------/ + * + * Descriptors with scaled IDs contain a scale operation (scale_op) and + * value. They use a 'scaled_ids' table, which is indexed by descriptor + * number and CPU id, and stores the telem_data index. So in the + * illustration above, CPU 0 reading from telem_desc_B would fetch row 1 + * (from telem_desc_B.idx == 1), and column [0] yielding element 4, so + * that's the telemetry ID it looks up in the telemetry data. + * + * The scaled_ids table is populated at telemetry ID initialization time + * + */ +static unsigned char *sw_telem_scaled_ids; /* Allocate on demand */ +static unsigned int sw_telem_rows_alloced; /* Rows currently allocated */ +static unsigned int sw_telem_rows_avail; /* Available rows */ + +extern int sw_max_num_cpus; /* SoC Watch's copy of cpu count. */ + +/* Macro for identifying telemetry IDs with either per-cpu, or per-module + * instances. These IDs need to be 'scaled' as per scale_op and scale_val. + */ +#define IS_SCALED_ID(td) ((td)->scale_op != TELEM_OP_NONE) +/* + * Event map that is populated with user-supplied IDs + */ +static u32 s_event_map[2][MAX_TELEM_EVENTS]; +/* + * Index into event map(s) + */ +static size_t s_unit_idx[2] = { 0, 0 }; +/* + * Used to decide if telemetry values need refreshing + */ +static size_t s_unit_iters[2] = { 0, 0 }; +/* + * Spinlock to guard updates to the 'iters' values. + */ +static SW_DEFINE_SPINLOCK(sw_telem_lock); +/* + * Macro to determine if socwatch telemetry system has been configured + */ +#define SW_TELEM_CONFIGURED() (s_unit_idx[0] > 0 || s_unit_idx[1] > 0) + +/** + * telemetry_available - Determine if telemetry driver is present + * + * Returns: 1 if telemetry driver is present, 0 if not. + */ +static int telemetry_available(void) +{ + int retval = 0; + struct telemetry_evtconfig punit_evtconfig; + struct telemetry_evtconfig pmc_evtconfig; + u32 punit_event_map[MAX_TELEM_EVENTS]; + u32 pmc_event_map[MAX_TELEM_EVENTS]; + + /* The symbol below is weak. We return 1 if we have a definition + * for this telemetry-driver-supplied symbol, or 0 if only the + * weak definition exists. This test will suffice to detect if + * the telemetry driver is loaded. + */ + if (telemetry_get_eventconfig == NULL) { + return 0; + } + /* OK, the telemetry driver is loaded. But it's possible it + * hasn't been configured properly. To check that, retrieve + * the number of events currently configured. This should never + * be zero since the telemetry driver reserves some SSRAM slots + * for its own use + */ + memset(&punit_evtconfig, 0, sizeof(punit_evtconfig)); + memset(&pmc_evtconfig, 0, sizeof(pmc_evtconfig)); + + punit_evtconfig.evtmap = (u32 *)&punit_event_map; + pmc_evtconfig.evtmap = (u32 *)&pmc_event_map; + + retval = telemetry_get_eventconfig(&punit_evtconfig, &pmc_evtconfig, + MAX_TELEM_EVENTS, MAX_TELEM_EVENTS); + return retval == 0 && punit_evtconfig.num_evts > 0 && + pmc_evtconfig.num_evts > 0; +} + +/** + * sw_get_instance_row -- Get the address of a 'row' of instance IDs. + * @rownum: The row number of the Instance ID table, whose address to return. + * Returns: The address of the appropriate row, or NULL if rownum is bad. + */ +static unsigned char *sw_get_instance_row_addr(unsigned int rownum) +{ + if (rownum >= (sw_telem_rows_alloced - sw_telem_rows_avail)) { + pw_pr_error("ERROR: Cannot retrieve row Instance ID row %d\n", + rownum); + return NULL; + } + return &sw_telem_scaled_ids[rownum * sw_max_num_cpus]; +} + +/** + * sw_free_telem_scaled_id_table - Free the allocated slots. + * Returns: Nothing + * + * Admittedly, a more symmetrical function name would be nice. + */ +static void sw_telem_release_scaled_ids(void) +{ + sw_telem_rows_alloced = 0; + sw_telem_rows_avail = 0; + if (sw_telem_scaled_ids) { + sw_kfree(sw_telem_scaled_ids); + } + sw_telem_scaled_ids = NULL; +} + +/** + * sw_telem_init_func - Set up the telemetry unit to retrieve a data item + * (e.g. counter). + * @descriptor: The IO descriptor containing the unit and ID + * of the telemetry info to gather. + * + * Because we don't (currently) control all of the counters, we + * economize by seeing if it's already being collected before allocate + * a slot for it. + * + * Returns: PW_SUCCESS if the telem collector can collect the requested data. + * -PW_ERROR if the the addition of that item fails. + */ +int sw_telem_init_func(struct sw_driver_io_descriptor *descriptor) +{ + struct sw_driver_telem_io_descriptor *td = + &(descriptor->telem_descriptor); + u8 unit = td->unit; /* Telemetry unit to use. */ + u32 id; /* Event ID we want telemetry to track. */ + size_t idx; /* Index into telemetry data array of event ID to gather. */ + const char *unit_str = unit == TELEM_PUNIT ? "PUNIT" : "PMC"; + size_t *unit_idx = &s_unit_idx[unit]; + + if (!telemetry_available()) { + return -ENXIO; + } + + id = (u32)(td->id); + + /* Check if we've already added this ID */ + for (idx = 0; idx < *unit_idx && idx < MAX_TELEM_EVENTS; ++idx) { + if (s_event_map[unit][idx] == id) { + /* Invariant: idx contains the + * index of the new data item. + */ + /* Save the index for later fast lookup. */ + td->idx = (u16)idx; + return 0; + } + } + + if (*unit_idx >= MAX_TELEM_EVENTS) { + pw_pr_error( + "Too many events %s units requested; max of %u available!\n", + unit_str, MAX_TELEM_EVENTS); + return -E2BIG; + } + s_event_map[unit][(*unit_idx)++] = id; + /* Invariant: idx contains the index of the new data item. */ + /* Save the index for later fast lookup. */ + td->idx = (u16)idx; + pw_pr_debug( + "OK, added id = 0x%x to unit %s at entry %zu; retrieved = 0x%x\n", + id, unit_str, *unit_idx - 1, s_event_map[unit][*unit_idx - 1]); + + return 0; +} + +/** + * sw_read_telem_info - Read a metric's data from the telemetry driver. + * @dest: Destination (storage for the read data) + * @cpu: Which CPU to read from (not used) + * @descriptor: The descriptor containing the data ID to read + * @data_size_in_bytes: The # of bytes in the result (always 8) + * + * Returns: Nothing, but stores SW_TELEM_READ_FAIL_VALUE to dest + * if the read fails. + */ +void sw_read_telem_info(char *dest, int cpu, + const sw_driver_io_descriptor_t *descriptor, + u16 data_size_in_bytes) +{ + int len; + u64 *data_dest = (u64 *)dest; + int retry_count; + const struct sw_driver_telem_io_descriptor *td = + &(descriptor->telem_descriptor); + unsigned int idx; + u8 unit = td->unit; + bool needs_refresh = false; + +#define TELEM_PKT_SIZE 16 /* sizeof(struct telemetry_evtlog) + padding */ + static struct telemetry_evtlog events[MAX_TELEM_EVENTS]; + + /* Get the event index */ + if (IS_SCALED_ID(td)) { + unsigned char *scaled_ids; + + scaled_ids = sw_get_instance_row_addr(td->idx); + if (scaled_ids == NULL) { + pw_pr_error( + "Sw_read_telem_info_i: Illegal row index: *%p = %d", + &td->idx, td->idx); + *data_dest = SW_TELEM_READ_FAIL_VALUE; + return; /* Don't set the dest/data buffer. */ + } + idx = scaled_ids[RAW_CPU()]; /* Get per-cpu entry */ + } else { + idx = td->idx; + } + + /* + * Check if we need to refresh the list of values + */ + LOCK(sw_telem_lock); + { + if (s_unit_iters[unit] == 0) { + needs_refresh = true; + } + if (++s_unit_iters[unit] == s_unit_idx[unit]) { + s_unit_iters[unit] = 0; + } + } + UNLOCK(sw_telem_lock); + + /* + * Because of the enormous overhead of reading telemetry data from + * the current kernel driver, failure to read the data is not + * unheard of. As such, 3 times, should the read fail. Once we + * get a higher-performance read routine, we should be able to + * eliminate this retry (or maybe decrease it.) + */ + retry_count = 3; + while (needs_refresh && retry_count--) { + len = telemetry_raw_read_eventlog( + unit, events, sizeof(events) / TELEM_PKT_SIZE); + + if ((len < 0) || (len < idx)) { + pw_pr_error( + "sw_read_telem_info_i: read failed: len=%d\n", + len); + } else { + break; + } + } + + if (retry_count) { + /* TODO: Resolve if we should return something other than + * SW_TELEM_READ_FAIL_VALUE, if the actual data + * happens to be that. + */ + *data_dest = events[idx].telem_evtlog; + } else { + *data_dest = SW_TELEM_READ_FAIL_VALUE; + } +} + +/** + * sw_reset_telem - Stop collecting telemetry info. + * @descriptor: Unused in this function + * + * Stop collecting anything extra, and give the driver back to + * debugfs. Because this driver increases the sampling rate, the + * kernel's telemetry driver can't successfully reset the driver unless + * we first drop the rate back down to a much slower rate. This is a + * temporary measure, since the reset operation will then reset the + * sampling interval to whatever the GMIN driver wants. + * + * Return: PW_SUCCESS. + */ +int sw_reset_telem(const struct sw_driver_io_descriptor *descriptor) +{ + if (telemetry_available() && SW_TELEM_CONFIGURED()) { + telemetry_set_sampling_period(TELEM_SAMPLING_1S, + TELEM_SAMPLING_1S); + telemetry_reset_events(); + sw_telem_release_scaled_ids(); + memset(s_unit_idx, 0, sizeof(s_unit_idx)); + memset(s_unit_iters, 0, sizeof(s_unit_iters)); + } + return PW_SUCCESS; +} + +/** + * sw_available_telem -- Decide if the telemetry subsystem is available for use + */ +bool sw_telem_available(void) +{ + return telemetry_available(); +}; + +bool sw_telem_post_config(void) +{ + bool retval = true; + size_t i = 0; + struct telemetry_evtconfig punit_evtconfig; + struct telemetry_evtconfig pmc_evtconfig; + + if (!SW_TELEM_CONFIGURED()) { + return true; + } + + memset(&punit_evtconfig, 0, sizeof(punit_evtconfig)); + memset(&pmc_evtconfig, 0, sizeof(pmc_evtconfig)); + + telemetry_set_sampling_period(TELEM_SAMPLING_1S, TELEM_SAMPLING_1S); + + punit_evtconfig.period = TELEM_SAMPLING_1S; + pmc_evtconfig.period = TELEM_SAMPLING_1S; + + /* Punit */ + punit_evtconfig.evtmap = (u32 *)&s_event_map[TELEM_PUNIT]; + punit_evtconfig.num_evts = s_unit_idx[TELEM_PUNIT]; + /* PMC */ + pmc_evtconfig.evtmap = (u32 *)&s_event_map[TELEM_PMC]; + pmc_evtconfig.num_evts = s_unit_idx[TELEM_PMC]; + + for (i = 0; i < punit_evtconfig.num_evts; ++i) { + pw_pr_debug("PUNIT[%zu] = 0x%x\n", i, + punit_evtconfig.evtmap[i]); + } + for (i = 0; i < pmc_evtconfig.num_evts; ++i) { + pw_pr_debug("PMC[%zu] = 0x%x\n", i, pmc_evtconfig.evtmap[i]); + } + + /* + * OK, everything done. Now update + */ + if (telemetry_update_events(punit_evtconfig, pmc_evtconfig)) { + pw_pr_error("telemetry_update_events error"); + retval = false; + } else { + pw_pr_debug("OK, telemetry_update_events success\n"); + } + + telemetry_set_sampling_period(TELEM_SAMPLING_1MS, TELEM_SAMPLING_1MS); + + return retval; +} diff --git a/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c b/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c new file mode 100644 index 0000000000000..153fa70102958 --- /dev/null +++ b/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c @@ -0,0 +1,2355 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +#include /* "LINUX_VERSION_CODE" */ +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) +#include +#else +#include +#endif +#include +#include + +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) +#include /* for the various APIC vector tracepoints + * (e.g. "thermal_apic", + * "local_timer" etc.) + */ +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) */ +struct pool_workqueue; +struct cpu_workqueue_struct; +#include +#include /* for 'pm_notifier' */ +#include /* for "cpufreq_notifier" */ +#include /* for 'CPU_UP_PREPARE' etc */ + +#include "sw_kernel_defines.h" +#include "sw_collector.h" +#include "sw_overhead_measurements.h" +#include "sw_tracepoint_handlers.h" +#include "sw_output_buffer.h" +#include "sw_mem.h" +#include "sw_trace_notifier_provider.h" + +/* ------------------------------------------------- + * Compile time constants and useful macros. + * ------------------------------------------------- + */ +#ifndef __get_cpu_var +/* + * Kernels >= 3.19 don't include a definition + * of '__get_cpu_var'. Create one now. + */ +#define __get_cpu_var(var) (*this_cpu_ptr(&var)) +#endif /* __get_cpu_var */ + +#define BEGIN_LOCAL_IRQ_STATS_READ(p) \ + do { \ + p = &__get_cpu_var(irq_stat); + +#define END_LOCAL_IRQ_STATS_READ(p) \ + } \ + while (0) +/* + * CAS{32,64} + */ +#define CAS32(p, o, n) (cmpxchg((p), (o), (n)) == (o)) +#define CAS64(p, o, n) (cmpxchg64((p), (o), (n)) == (o)) +/* + * Timer start pid accessor macros + */ +#ifdef CONFIG_TIMER_STATS +#define GET_TIMER_THREAD_ID(t) \ + ((t)->start_pid) /* 'start_pid' is actually the thread ID + * of the thread that initialized the timer + */ +#else +#define GET_TIMER_THREAD_ID(t) (-1) +#endif /* CONFIG_TIMER_STATS */ +/* + * Tracepoint probe register/unregister functions and + * helper macros. + */ +#if IS_ENABLED(CONFIG_TRACEPOINTS) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) +#define DO_REGISTER_SW_TRACEPOINT_PROBE(node, name, probe) \ + WARN_ON(register_trace_##name(probe)) +#define DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, name, probe) \ + unregister_trace_##name(probe) +#elif LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) +#define DO_REGISTER_SW_TRACEPOINT_PROBE(node, name, probe) \ + WARN_ON(register_trace_##name(probe, NULL)) +#define DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, name, probe) \ + unregister_trace_##name(probe, NULL) +#else +#define DO_REGISTER_SW_TRACEPOINT_PROBE(node, name, probe) \ + WARN_ON(tracepoint_probe_register(node->tp, probe, NULL)) +#define DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, name, probe) \ + tracepoint_probe_unregister(node->tp, probe, NULL) +#endif +#else /* CONFIG_TRACEPOINTS */ +#define DO_REGISTER_SW_TRACEPOINT_PROBE(...) /* NOP */ +#define DO_UNREGISTER_SW_TRACEPOINT_PROBE(...) /* NOP */ +#endif /* CONFIG_TRACEPOINTS */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) +#define _DEFINE_PROBE_FUNCTION(name, ...) static void name(__VA_ARGS__) +#else +#define _DEFINE_PROBE_FUNCTION(name, ...) \ + static void name(void *ignore, __VA_ARGS__) +#endif +#define DEFINE_PROBE_FUNCTION(x) _DEFINE_PROBE_FUNCTION(x) + +/* + * Tracepoint probe function parameters. + * These tracepoint signatures depend on kernel version. + */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) +#define PROBE_TPS_PARAMS \ + sw_probe_power_start_i, unsigned int type, unsigned int state +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38) +#define PROBE_TPS_PARAMS \ + sw_probe_power_start_i, unsigned int type, unsigned int state, \ + unsigned int cpu_id +#else +#define PROBE_TPS_PARAMS \ + sw_probe_cpu_idle_i, unsigned int state, unsigned int cpu_id +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38) +#define PROBE_TPF_PARAMS \ + sw_probe_power_frequency_i, unsigned int type, unsigned int state +#else +#define PROBE_TPF_PARAMS \ + sw_probe_cpu_frequency_i, unsigned int new_freq, unsigned int cpu +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) +#define PROBE_SCHED_WAKEUP_PARAMS \ + sw_probe_sched_wakeup_i, struct rq *rq, struct task_struct *task, \ + int success +#else +#define PROBE_SCHED_WAKEUP_PARAMS \ + sw_probe_sched_wakeup_i, struct task_struct *task, int success +#endif + +#if IS_ENABLED(CONFIG_ANDROID) +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) +#define PROBE_WAKE_LOCK_PARAMS sw_probe_wake_lock_i, struct wake_lock *lock +#define PROBE_WAKE_UNLOCK_PARAMS \ + sw_probe_wake_unlock_i, struct wake_unlock *unlock +#else +#define PROBE_WAKE_LOCK_PARAMS \ + sw_probe_wakeup_source_activate_i, const char *name, unsigned int state +#define PROBE_WAKE_UNLOCK_PARAMS \ + sw_probe_wakeup_source_deactivate_i, const char *name, \ + unsigned int state +#endif /* version */ +#endif /* CONFIG_ANDROID */ + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35) +#define PROBE_WORKQUEUE_PARAMS \ + sw_probe_workqueue_execution_i, struct task_struct *wq_thread, \ + struct work_struct *work +#else +#define PROBE_WORKQUEUE_PARAMS \ + sw_probe_workqueue_execute_start_i, struct work_struct *work +#endif + +#define PROBE_SCHED_SWITCH_PARAMS \ + sw_probe_sched_switch_i, struct task_struct *prev, \ + struct task_struct *next +/* + * These tracepoint signatures are independent of kernel version. + */ +#define PROBE_IRQ_PARAMS \ + sw_probe_irq_handler_entry_i, int irq, struct irqaction *action +#define PROBE_TIMER_ARGS sw_probe_timer_expire_entry_i, struct timer_list *t +#define PROBE_HRTIMER_PARAMS \ + sw_probe_hrtimer_expire_entry_i, struct hrtimer *hrt, ktime_t *now +#define PROBE_PROCESS_FORK_PARAMS \ + sw_probe_sched_process_fork_i, struct task_struct *parent, \ + struct task_struct *child +#define PROBE_SCHED_PROCESS_EXIT_PARAMS \ + sw_probe_sched_process_exit_i, struct task_struct *task +#define PROBE_THERMAL_APIC_ENTRY_PARAMS \ + sw_probe_thermal_apic_entry_i, int vector +#define PROBE_THERMAL_APIC_EXIT_PARAMS sw_probe_thermal_apic_exit_i, int vector + +#define IS_VALID_WAKEUP_EVENT(cpu) \ + ({ \ + bool *per_cpu_event = \ + &per_cpu(sw_is_valid_wakeup_event, (cpu)); \ + bool old_value = \ + CAS32(per_cpu_event, true, sw_wakeup_event_flag); \ + old_value; \ + }) +#define SHOULD_PRODUCE_WAKEUP_SAMPLE(cpu) (IS_VALID_WAKEUP_EVENT(cpu)) +#define RESET_VALID_WAKEUP_EVENT_COUNTER(cpu) \ + (per_cpu(sw_is_valid_wakeup_event, (cpu)) = true) + +#define NUM_TRACEPOINT_NODES SW_ARRAY_SIZE(s_trace_collector_lists) +#define NUM_VALID_TRACEPOINTS (NUM_TRACEPOINT_NODES - 1) /* "-1" for IPI */ +#define FOR_EACH_TRACEPOINT_NODE(idx, node) \ + for (idx = 0; idx < NUM_TRACEPOINT_NODES && \ + (node = &s_trace_collector_lists[idx]); \ + ++idx) + +#define FOR_EACH_NOTIFIER_NODE(idx, node) \ + for (idx = 0; idx < SW_ARRAY_SIZE(s_notifier_collector_lists) && \ + (node = &s_notifier_collector_lists[idx]); \ + ++idx) +/* + * Use these macros if all tracepoint ID numbers + * ARE contiguous from 0 -- max tracepoint ID # + */ +#if 0 +#define IS_VALID_TRACE_NOTIFIER_ID(id) \ + ((id) >= 0 && (id) < SW_ARRAY_SIZE(s_trace_collector_lists)) +#define GET_COLLECTOR_TRACE_NODE(id) (&s_trace_collector_lists[id]) +#define FOR_EACH_trace_notifier_id(idx) \ + for (idx = 0; idx < SW_ARRAY_SIZE(s_trace_collector_lists); ++idx) +#endif /* if 0 */ +/* + * Use these macros if all tracepoint ID numbers + * are NOT contiguous from 0 -- max tracepoint ID # + */ +#define GET_COLLECTOR_TRACE_NODE(idx) \ + ({ \ + int __idx = 0; \ + struct sw_trace_notifier_data *__node = NULL, \ + *__retVal = NULL; \ + FOR_EACH_TRACEPOINT_NODE(__idx, __node) \ + { \ + if ((idx) == GET_TRACE_NOTIFIER_ID(__node)) { \ + __retVal = __node; \ + break; \ + } \ + } \ + __retVal; \ + }) +#define IS_VALID_TRACE_NOTIFIER_ID(idx) (GET_COLLECTOR_TRACE_NODE(idx) != NULL) + +#define GET_COLLECTOR_NOTIFIER_NODE(idx) \ + ({ \ + int __idx = 0; \ + struct sw_trace_notifier_data *__node = NULL, \ + *__retVal = NULL; \ + FOR_EACH_NOTIFIER_NODE(__idx, __node) \ + { \ + if ((idx) == GET_TRACE_NOTIFIER_ID(__node)) { \ + __retVal = __node; \ + break; \ + } \ + } \ + __retVal; \ + }) +#define IS_VALID_NOTIFIER_ID(idx) (GET_COLLECTOR_NOTIFIER_NODE(idx) != NULL) + +/* ------------------------------------------------- + * Local function declarations. + * ------------------------------------------------- + */ +/* + * The tracepoint registration functions. + */ +int sw_register_trace_cpu_idle_i(struct sw_trace_notifier_data *node); +int sw_unregister_trace_cpu_idle_i(struct sw_trace_notifier_data *node); +int sw_register_trace_cpu_frequency_i(struct sw_trace_notifier_data *node); +int sw_unregister_trace_cpu_frequency_i(struct sw_trace_notifier_data *node); +int sw_register_trace_irq_handler_entry_i(struct sw_trace_notifier_data *node); +int sw_unregister_trace_irq_handler_entry_i(struct sw_trace_notifier_data + *node); +int sw_register_trace_timer_expire_entry_i(struct sw_trace_notifier_data *node); +int sw_unregister_trace_timer_expire_entry_i( + struct sw_trace_notifier_data *node); +int sw_register_trace_hrtimer_expire_entry_i( + struct sw_trace_notifier_data *node); +int sw_unregister_trace_hrtimer_expire_entry_i( + struct sw_trace_notifier_data *node); +int sw_register_trace_sched_wakeup_i(struct sw_trace_notifier_data *node); +int sw_unregister_trace_sched_wakeup_i(struct sw_trace_notifier_data *node); +int sw_register_trace_sched_process_fork_i(struct sw_trace_notifier_data *node); +int sw_unregister_trace_sched_process_fork_i( + struct sw_trace_notifier_data *node); +int sw_register_trace_sched_process_exit_i(struct sw_trace_notifier_data *node); +int sw_unregister_trace_sched_process_exit_i( + struct sw_trace_notifier_data *node); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) +int sw_register_trace_thermal_apic_entry_i(struct sw_trace_notifier_data *node); +int sw_unregister_trace_thermal_apic_entry_i( + struct sw_trace_notifier_data *node); +int sw_register_trace_thermal_apic_exit_i(struct sw_trace_notifier_data *node); +int sw_unregister_trace_thermal_apic_exit_i(struct sw_trace_notifier_data + *node); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) */ +#if IS_ENABLED(CONFIG_ANDROID) +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) +int sw_register_trace_wake_lock_i(struct sw_trace_notifier_data *node); +int sw_unregister_trace_wake_lock_i(struct sw_trace_notifier_data *node); +int sw_register_trace_wake_unlock_i(struct sw_trace_notifier_data *node); +int sw_unregister_trace_wake_unlock_i(struct sw_trace_notifier_data *node); +#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0) */ +int sw_register_trace_wakeup_source_activate_i( + struct sw_trace_notifier_data *node); +int sw_unregister_trace_wakeup_source_activate_i( + struct sw_trace_notifier_data *node); +int sw_register_trace_wakeup_source_deactivate_i( + struct sw_trace_notifier_data *node); +int sw_unregister_trace_wakeup_source_deactivate_i( + struct sw_trace_notifier_data *node); +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) */ +#endif /* CONFIG_ANDROID */ +int sw_register_trace_workqueue_execution_i(struct sw_trace_notifier_data + *node); +int sw_unregister_trace_workqueue_execution_i( + struct sw_trace_notifier_data *node); +int sw_register_trace_sched_switch_i(struct sw_trace_notifier_data *node); +int sw_unregister_trace_sched_switch_i(struct sw_trace_notifier_data *node); +int sw_register_pm_notifier_i(struct sw_trace_notifier_data *node); +int sw_unregister_pm_notifier_i(struct sw_trace_notifier_data *node); +int sw_register_cpufreq_notifier_i(struct sw_trace_notifier_data *node); +int sw_unregister_cpufreq_notifier_i(struct sw_trace_notifier_data *node); +int sw_register_hotcpu_notifier_i(struct sw_trace_notifier_data *node); +int sw_unregister_hotcpu_notifier_i(struct sw_trace_notifier_data *node); +void sw_handle_sched_wakeup_i(struct sw_collector_data *node, int source_cpu, + int target_cpu); +void sw_handle_timer_wakeup_helper_i(struct sw_collector_data *curr, + struct sw_trace_notifier_data *node, + pid_t tid); +void sw_handle_apic_timer_wakeup_i(struct sw_collector_data *node); +void sw_handle_workqueue_wakeup_helper_i(int cpu, + struct sw_collector_data *node); +void sw_handle_sched_switch_helper_i(void); +void sw_tps_apic_i(int cpu); +void sw_tps_tps_i(int cpu); +void sw_tps_wakeup_i(int cpu); +void sw_tps_i(void); +void sw_tpf_i(int cpu, struct sw_trace_notifier_data *node); +void sw_process_fork_exit_helper_i(struct sw_collector_data *node, + struct task_struct *task, bool is_fork); +void sw_produce_wakelock_msg_i(int cpu, struct sw_collector_data *node, + const char *name, int type, u64 timeout, int pid, + int tid, const char *proc_name); +u64 sw_my_local_arch_irq_stats_cpu_i(void); + +/* + * The tracepoint probes. + */ +/* + * The tracepoint handlers. + */ +void sw_handle_trace_notifier_i(struct sw_trace_notifier_data *node); +void sw_handle_trace_notifier_on_cpu_i(int cpu, + struct sw_trace_notifier_data *node); +void sw_handle_reset_messages_i(struct sw_trace_notifier_data *node); + +/* ------------------------------------------------- + * Variable definitions. + * ------------------------------------------------- + */ +/* + * For overhead measurements. + */ +DECLARE_OVERHEAD_VARS( + sw_handle_timer_wakeup_helper_i); /* for the "timer_expire" + * family of probes + */ +DECLARE_OVERHEAD_VARS(sw_handle_irq_wakeup_i); /* for IRQ wakeups */ +DECLARE_OVERHEAD_VARS(sw_handle_sched_wakeup_i); /* for SCHED */ +DECLARE_OVERHEAD_VARS(sw_tps_i); /* for TPS */ +DECLARE_OVERHEAD_VARS(sw_tpf_i); /* for TPF */ +DECLARE_OVERHEAD_VARS(sw_process_fork_exit_helper_i); +#if IS_ENABLED(CONFIG_ANDROID) +DECLARE_OVERHEAD_VARS(sw_handle_wakelock_i); /* for wake lock/unlock */ +#endif /* CONFIG_ANDROID */ +DECLARE_OVERHEAD_VARS(sw_handle_workqueue_wakeup_helper_i); +DECLARE_OVERHEAD_VARS(sw_handle_sched_switch_helper_i); +/* + * Per-cpu wakeup counters. + * Used to decide which wakeup event is the first to occur after a + * core wakes up from a C-state. + * Set to 'true' in TPS probe + */ +static DEFINE_PER_CPU(bool, sw_is_valid_wakeup_event) = { true }; +/* + * Per-cpu counts of the number of times the local APIC fired. + * We need a separate count because some apic timer fires don't seem + * to result in hrtimer/timer expires + */ +static DEFINE_PER_CPU(u64, sw_num_local_apic_timer_inters); +/* + * Flag value to use to decide if the event is a valid wakeup event. + * Set to 'false' in TPS probe. + */ +static bool sw_wakeup_event_flag = true; + +#if IS_ENABLED(CONFIG_TRACEPOINTS) +/* + * Scheduler-based polling emulation. + */ +static DEFINE_PER_CPU(unsigned long, sw_pcpu_polling_jiff); +#endif /* CONFIG_TRACEPOINTS */ + +pw_u16_t sw_min_polling_interval_msecs; + +/* + * IDs for supported tracepoints. + */ +enum sw_trace_id { + SW_TRACE_ID_CPU_IDLE, + SW_TRACE_ID_CPU_FREQUENCY, + SW_TRACE_ID_IRQ_HANDLER_ENTRY, + SW_TRACE_ID_TIMER_EXPIRE_ENTRY, + SW_TRACE_ID_HRTIMER_EXPIRE_ENTRY, + SW_TRACE_ID_SCHED_WAKEUP, + SW_TRACE_ID_IPI, + SW_TRACE_ID_SCHED_PROCESS_FORK, + SW_TRACE_ID_SCHED_PROCESS_EXIT, + SW_TRACE_ID_THERMAL_APIC_ENTRY, + SW_TRACE_ID_THERMAL_APIC_EXIT, + SW_TRACE_ID_WAKE_LOCK, + SW_TRACE_ID_WAKE_UNLOCK, + SW_TRACE_ID_WORKQUEUE_EXECUTE_START, + SW_TRACE_ID_SCHED_SWITCH, +}; + +/* + * IDs for supported notifiers. + */ +enum sw_notifier_id { + SW_NOTIFIER_ID_SUSPEND, /* TODO: change name? */ + SW_NOTIFIER_ID_SUSPEND_ENTER, + SW_NOTIFIER_ID_SUSPEND_EXIT, + SW_NOTIFIER_ID_HIBERNATE, + SW_NOTIFIER_ID_HIBERNATE_ENTER, + SW_NOTIFIER_ID_HIBERNATE_EXIT, + SW_NOTIFIER_ID_COUNTER_RESET, + SW_NOTIFIER_ID_CPUFREQ, + SW_NOTIFIER_ID_HOTCPU, +}; + +/* + * Names for supported tracepoints. A tracepoint + * 'name' consists of two strings: a "kernel" string + * that is used to locate the tracepoint within the kernel + * and an "abstract" string, that is used by Ring-3 to + * specify which tracepoints to use during a collection. + */ +static const struct sw_trace_notifier_name s_trace_names[] = { + [SW_TRACE_ID_CPU_IDLE] = { "cpu_idle", "CPU-IDLE" }, + [SW_TRACE_ID_CPU_FREQUENCY] = { "cpu_frequency", "CPU-FREQUENCY" }, + [SW_TRACE_ID_IRQ_HANDLER_ENTRY] = { "irq_handler_entry", "IRQ-ENTRY" }, + [SW_TRACE_ID_TIMER_EXPIRE_ENTRY] = { "timer_expire_entry", + "TIMER-ENTRY" }, + [SW_TRACE_ID_HRTIMER_EXPIRE_ENTRY] = { "hrtimer_expire_entry", + "HRTIMER-ENTRY" }, + [SW_TRACE_ID_SCHED_WAKEUP] = { "sched_wakeup", "SCHED-WAKEUP" }, + [SW_TRACE_ID_IPI] = { NULL, "IPI" }, + [SW_TRACE_ID_SCHED_PROCESS_FORK] = { "sched_process_fork", + "PROCESS-FORK" }, + [SW_TRACE_ID_SCHED_PROCESS_EXIT] = { "sched_process_exit", + "PROCESS-EXIT" }, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) + [SW_TRACE_ID_THERMAL_APIC_ENTRY] = { "thermal_apic_entry", + "THERMAL-THROTTLE-ENTRY" }, + [SW_TRACE_ID_THERMAL_APIC_EXIT] = { "thermal_apic_exit", + "THERMAL-THROTTLE-EXIT" }, +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) */ +#if IS_ENABLED(CONFIG_ANDROID) +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) + [SW_TRACE_ID_WAKE_LOCK] = { "wake_lock", "WAKE-LOCK" }, + [SW_TRACE_ID_WAKE_UNLOCK] = { "wake_unlock", "WAKE-UNLOCK" }, +#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0) */ + [SW_TRACE_ID_WAKE_LOCK] = { "wakeup_source_activate", "WAKE-LOCK" }, + [SW_TRACE_ID_WAKE_UNLOCK] = { "wakeup_source_deactivate", + "WAKE-UNLOCK" }, +#endif +#endif + [SW_TRACE_ID_WORKQUEUE_EXECUTE_START] = { "workqueue_execute_start", + "WORKQUEUE-START" }, + [SW_TRACE_ID_SCHED_SWITCH] = { "sched_switch", "CONTEXT-SWITCH" }, +}; + +/* + * Names for supported notifiers. A notifier + * 'name' consists of two strings: an unused "kernel" string + * and an "abstract" string, that is used by Ring-3 to + * specify which notifiers to use during a collection. + */ +static const struct sw_trace_notifier_name s_notifier_names[] = { + [SW_NOTIFIER_ID_SUSPEND] = { "suspend_notifier" /* don't care */, + "SUSPEND-NOTIFIER" }, + [SW_NOTIFIER_ID_SUSPEND_ENTER] = { NULL, "SUSPEND-ENTER" }, + [SW_NOTIFIER_ID_SUSPEND_EXIT] = { NULL, "SUSPEND-EXIT" }, + [SW_NOTIFIER_ID_HIBERNATE] = { "hibernate_notifier" /* don't care */, + "HIBERNATE-NOTIFIER" }, + [SW_NOTIFIER_ID_HIBERNATE_ENTER] = { NULL, "HIBERNATE-ENTER" }, + [SW_NOTIFIER_ID_HIBERNATE_EXIT] = { NULL, "HIBERNATE-EXIT" }, + [SW_NOTIFIER_ID_COUNTER_RESET] = { NULL, "COUNTER-RESET" }, + [SW_NOTIFIER_ID_CPUFREQ] = { "cpufreq_notifier" /* don't care */, + "CPUFREQ-NOTIFIER" }, + [SW_NOTIFIER_ID_HOTCPU] = { "hotcpu_notifier" /* don't care */, + "HOTCPU-NOTIFIER" }, +}; + +#if IS_ENABLED(CONFIG_TRACEPOINTS) +/* + * A list of supported tracepoints. + */ +static struct sw_trace_notifier_data s_trace_collector_lists[] = { + { SW_TRACE_COLLECTOR_TRACEPOINT, &s_trace_names[SW_TRACE_ID_CPU_IDLE], + &sw_register_trace_cpu_idle_i, &sw_unregister_trace_cpu_idle_i, + NULL }, + { SW_TRACE_COLLECTOR_TRACEPOINT, + &s_trace_names[SW_TRACE_ID_CPU_FREQUENCY], + &sw_register_trace_cpu_frequency_i, + &sw_unregister_trace_cpu_frequency_i, NULL }, + { SW_TRACE_COLLECTOR_TRACEPOINT, + &s_trace_names[SW_TRACE_ID_IRQ_HANDLER_ENTRY], + &sw_register_trace_irq_handler_entry_i, + &sw_unregister_trace_irq_handler_entry_i, NULL }, + { SW_TRACE_COLLECTOR_TRACEPOINT, + &s_trace_names[SW_TRACE_ID_TIMER_EXPIRE_ENTRY], + &sw_register_trace_timer_expire_entry_i, + &sw_unregister_trace_timer_expire_entry_i, NULL }, + { SW_TRACE_COLLECTOR_TRACEPOINT, + &s_trace_names[SW_TRACE_ID_HRTIMER_EXPIRE_ENTRY], + &sw_register_trace_hrtimer_expire_entry_i, + &sw_unregister_trace_hrtimer_expire_entry_i, NULL }, + { SW_TRACE_COLLECTOR_TRACEPOINT, + &s_trace_names[SW_TRACE_ID_SCHED_WAKEUP], + &sw_register_trace_sched_wakeup_i, + &sw_unregister_trace_sched_wakeup_i, NULL }, + /* Placeholder for IPI -- no tracepoints associated with it! */ + { SW_TRACE_COLLECTOR_TRACEPOINT, &s_trace_names[SW_TRACE_ID_IPI], NULL, + NULL, NULL }, + { SW_TRACE_COLLECTOR_TRACEPOINT, + &s_trace_names[SW_TRACE_ID_SCHED_PROCESS_FORK], + &sw_register_trace_sched_process_fork_i, + &sw_unregister_trace_sched_process_fork_i, NULL }, + { SW_TRACE_COLLECTOR_TRACEPOINT, + &s_trace_names[SW_TRACE_ID_SCHED_PROCESS_EXIT], + &sw_register_trace_sched_process_exit_i, + &sw_unregister_trace_sched_process_exit_i, NULL }, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) + /* + * For thermal throttling. + * We probably only need one of either 'entry' or 'exit'. Use + * both, until we decide which one to keep. Note that + * tracepoint IDs for these, and subsequent tracepoints + * (e.g. 'wake_lock') will change once we've picked which + * one to use. + */ + { SW_TRACE_COLLECTOR_TRACEPOINT, + &s_trace_names[SW_TRACE_ID_THERMAL_APIC_ENTRY], + &sw_register_trace_thermal_apic_entry_i, + &sw_unregister_trace_thermal_apic_entry_i, NULL }, + { SW_TRACE_COLLECTOR_TRACEPOINT, + &s_trace_names[SW_TRACE_ID_THERMAL_APIC_EXIT], + &sw_register_trace_thermal_apic_exit_i, + &sw_unregister_trace_thermal_apic_exit_i, NULL }, +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) */ +/* Wakelocks have multiple tracepoints, depending on kernel version */ +#if IS_ENABLED(CONFIG_ANDROID) +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) + { SW_TRACE_COLLECTOR_TRACEPOINT, &s_trace_names[SW_TRACE_ID_WAKE_LOCK], + &sw_register_trace_wake_lock_i, &sw_unregister_trace_wake_lock_i, + NULL }, + { SW_TRACE_COLLECTOR_TRACEPOINT, + &s_trace_names[SW_TRACE_ID_WAKE_UNLOCK], + &sw_register_trace_wake_unlock_i, &sw_unregister_trace_wake_unlock_i, + NULL }, +#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0) */ + { SW_TRACE_COLLECTOR_TRACEPOINT, &s_trace_names[SW_TRACE_ID_WAKE_LOCK], + &sw_register_trace_wakeup_source_activate_i, + &sw_unregister_trace_wakeup_source_activate_i, NULL }, + { SW_TRACE_COLLECTOR_TRACEPOINT, + &s_trace_names[SW_TRACE_ID_WAKE_UNLOCK], + &sw_register_trace_wakeup_source_deactivate_i, + &sw_unregister_trace_wakeup_source_deactivate_i, NULL }, +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) */ +#endif /* CONFIG_ANDROID */ + { SW_TRACE_COLLECTOR_TRACEPOINT, + &s_trace_names[SW_TRACE_ID_WORKQUEUE_EXECUTE_START], + &sw_register_trace_workqueue_execution_i, + &sw_unregister_trace_workqueue_execution_i, NULL }, + { SW_TRACE_COLLECTOR_TRACEPOINT, + &s_trace_names[SW_TRACE_ID_SCHED_SWITCH], + &sw_register_trace_sched_switch_i, + &sw_unregister_trace_sched_switch_i, NULL }, +}; + +/* + * List of supported notifiers. + */ +static struct sw_trace_notifier_data s_notifier_collector_lists[] = { + { SW_TRACE_COLLECTOR_NOTIFIER, + &s_notifier_names[SW_NOTIFIER_ID_SUSPEND], &sw_register_pm_notifier_i, + &sw_unregister_pm_notifier_i, NULL, true /* always register */ }, + /* Placeholder for suspend enter/exit -- these will be called + * from within the pm notifier + */ + { SW_TRACE_COLLECTOR_NOTIFIER, + &s_notifier_names[SW_NOTIFIER_ID_SUSPEND_ENTER], NULL, NULL, NULL }, + { SW_TRACE_COLLECTOR_NOTIFIER, + &s_notifier_names[SW_NOTIFIER_ID_SUSPEND_EXIT], NULL, NULL, NULL }, + /* Placeholder for hibernate enter/exit -- these will be called + * from within the pm notifier + */ + { SW_TRACE_COLLECTOR_NOTIFIER, + &s_notifier_names[SW_NOTIFIER_ID_HIBERNATE], NULL, NULL, NULL }, + { SW_TRACE_COLLECTOR_NOTIFIER, + &s_notifier_names[SW_NOTIFIER_ID_HIBERNATE_ENTER], NULL, NULL, NULL }, + { SW_TRACE_COLLECTOR_NOTIFIER, + &s_notifier_names[SW_NOTIFIER_ID_HIBERNATE_EXIT], NULL, NULL, NULL }, + { SW_TRACE_COLLECTOR_NOTIFIER, + &s_notifier_names[SW_NOTIFIER_ID_COUNTER_RESET], NULL, NULL, NULL }, + { SW_TRACE_COLLECTOR_NOTIFIER, + &s_notifier_names[SW_NOTIFIER_ID_CPUFREQ], + &sw_register_cpufreq_notifier_i, &sw_unregister_cpufreq_notifier_i }, +}; + +/* + * Special entry for CPU notifier (i.e. "hotplug" notifier) + * We don't want these to be visible to the user. + */ +static struct sw_trace_notifier_data s_hotplug_notifier_data = { + SW_TRACE_COLLECTOR_NOTIFIER, + &s_notifier_names[SW_NOTIFIER_ID_HOTCPU], + &sw_register_hotcpu_notifier_i, + &sw_unregister_hotcpu_notifier_i, + NULL, + true /* always register */ +}; +#else /* !CONFIG_TRACEPOINTS */ +/* + * A list of supported tracepoints. + */ +static struct sw_trace_notifier_data s_trace_collector_lists[] = { + /* EMPTY */}; +/* + * List of supported notifiers. + */ +static struct sw_trace_notifier_data s_notifier_collector_lists[] = { + /* EMPTY */ }; + +#endif /* CONFIG_TRACEPOINTS */ + +/* + * Macros to retrieve tracepoint and notifier IDs. + */ +#define GET_TRACE_ID_FROM_NODE(node) ((node)->name - s_trace_names) +#define GET_NOTIFIER_ID_FROM_NODE(node) ((node)->name - s_notifier_names) + +#define GET_TRACE_NOTIFIER_ID(node) \ + (int)(((node)->type == SW_TRACE_COLLECTOR_TRACEPOINT) ? \ + GET_TRACE_ID_FROM_NODE(node) : \ + GET_NOTIFIER_ID_FROM_NODE(node)) + +/* ------------------------------------------------- + * Function definitions. + * ------------------------------------------------- + */ + +/* + * Retrieve a TSC value + */ +static inline u64 sw_tscval(void) +{ + unsigned int low, high; + + asm volatile("rdtsc" : "=a"(low), "=d"(high)); + return low | ((unsigned long long)high) << 32; +}; + +u64 sw_timestamp(void) +{ + struct timespec ts; + + getnstimeofday(&ts); + return (ts.tv_sec * 1000000000ULL + ts.tv_nsec); +} + +/* + * Basically the same as arch/x86/kernel/irq.c --> "arch_irq_stat_cpu(cpu)" + */ +u64 sw_my_local_arch_irq_stats_cpu_i(void) +{ + u64 sum = 0; + irq_cpustat_t *stats; +#ifdef __arm__ + int i = 0; +#endif + BEGIN_LOCAL_IRQ_STATS_READ(stats); + { +#ifndef __arm__ + sum += stats->__nmi_count; +#if IS_ENABLED(CONFIG_X86_LOCAL_APIC) + sum += stats->apic_timer_irqs; + sum += stats->irq_spurious_count; +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 34) + sum += stats->x86_platform_ipis; +#endif /* 2,6,34 */ + sum += stats->apic_perf_irqs; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) + sum += stats->apic_irq_work_irqs; +#endif /* 3,5,0 */ +#ifdef CONFIG_SMP + sum += stats->irq_call_count; + sum += stats->irq_resched_count; + sum += stats->irq_tlb_count; +#endif +#ifdef CONFIG_X86_THERMAL_VECTOR + sum += stats->irq_thermal_count; +#endif + +#else + sum += stats->__softirq_pending; +#ifdef CONFIG_SMP + for (i = 0; i < NR_IPI; ++i) { + sum += stats->ipi_irqs[i]; + } +#endif +#ifdef CONFIG_X86_MCE + sum += stats->mce_exception_count; + sum += stats->mce_poll_count; +#endif +#endif + } + END_LOCAL_IRQ_STATS_READ(stats); + return sum; +}; + +/* + * Generic tracepoint/notifier handling function. + */ +void sw_handle_trace_notifier_i(struct sw_trace_notifier_data *node) +{ + struct sw_collector_data *curr = NULL; + + if (!node) { + return; + } + list_for_each_entry(curr, &node->list, list) { + pw_pr_debug("DEBUG: handling message\n"); + sw_handle_per_cpu_msg(curr); + } +}; + +/* + * Generic tracepoint/notifier handling function. + */ +void sw_handle_trace_notifier_on_cpu_i(int cpu, + struct sw_trace_notifier_data *node) +{ + struct sw_collector_data *curr = NULL; + + if (!node) { + return; + } + list_for_each_entry(curr, &node->list, list) { + sw_handle_per_cpu_msg_on_cpu(cpu, curr); + } +}; + +void sw_handle_reset_messages_i(struct sw_trace_notifier_data *node) +{ + struct sw_collector_data *curr = NULL; + + if (!node) { + return; + } + list_for_each_entry(curr, &node->list, list) { + pw_pr_debug("Handling message of unknown cpumask on cpu %d\n", + RAW_CPU()); + sw_schedule_work(&curr->cpumask, &sw_handle_per_cpu_msg, curr); + } +} + +/* + * Tracepoint helpers. + */ + +/* + * TIMER wakeup handling function. + */ +static void sw_handle_timer_wakeup_i(struct sw_collector_data *node, pid_t pid, + pid_t tid) +{ + int cpu = RAW_CPU(); + sw_driver_msg_t *msg = GET_MSG_SLOT_FOR_CPU(node->msg, cpu, + node->per_msg_payload_size); + /* char *dst_vals = (char *)(unsigned long)msg->p_payload; */ + char *dst_vals = msg->p_payload; + + /* msg->tsc = sw_timestamp(); */ + /* msg TSC assigned when msg is written to buffer */ + msg->cpuidx = cpu; + + /* + * TIMER handling ==> only return the pid, tid + */ + *((int *)dst_vals) = pid; + dst_vals += sizeof(pid); + *((int *)dst_vals) = tid; + + if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) { + pw_pr_warn("WARNING: could NOT produce message!\n"); + } + pw_pr_debug("HANDLED timer expire for %d, %d\n", pid, tid); +}; + +/* + * Helper function for {hr}timer expires. Required for overhead tracking. + */ +void sw_handle_timer_wakeup_helper_i(struct sw_collector_data *curr, + struct sw_trace_notifier_data *node, + pid_t tid) +{ + pid_t pid = -1; + + if (tid == 0) { + pid = 0; + } else { + struct task_struct *task = + pid_task(find_pid_ns(tid, &init_pid_ns), PIDTYPE_PID); + if (likely(task)) { + pid = task->tgid; + } + } + list_for_each_entry(curr, &node->list, list) { + sw_handle_timer_wakeup_i(curr, pid, tid); + } +}; + +/* + * SCHED wakeup handling function. + */ +void sw_handle_sched_wakeup_i(struct sw_collector_data *node, int source_cpu, + int target_cpu) +{ + int cpu = source_cpu; + sw_driver_msg_t *msg = GET_MSG_SLOT_FOR_CPU(node->msg, cpu, + node->per_msg_payload_size); + /* char *dst_vals = (char *)(unsigned long)msg->p_payload; */ + char *dst_vals = msg->p_payload; + + /* msg->tsc = sw_timestamp(); */ + /* msg TSC assigned when msg is written to buffer */ + msg->cpuidx = source_cpu; + + /* + * sched handling ==> only return the source, target CPUs + */ + *((int *)dst_vals) = source_cpu; + dst_vals += sizeof(source_cpu); + *((int *)dst_vals) = target_cpu; + + if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_NONE)) { + pw_pr_warn("WARNING: could NOT produce message!\n"); + } +}; + +/* + * APIC timer wakeup + */ +void sw_handle_apic_timer_wakeup_i(struct sw_collector_data *node) +{ + /* + * Send an empty message back to Ring-3 + */ + int cpu = RAW_CPU(); + sw_driver_msg_t *msg = GET_MSG_SLOT_FOR_CPU(node->msg, cpu, + node->per_msg_payload_size); + /* char *dst_vals = (char *)(unsigned long)msg->p_payload; */ + + /* msg->tsc = sw_timestamp(); */ + /* msg TSC assigned when msg is written to buffer */ + msg->cpuidx = cpu; + + if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) { + pw_pr_warn("WARNING: could NOT produce message!\n"); + } + pw_pr_debug("HANDLED APIC timer wakeup for cpu = %d\n", cpu); +}; + +/* + * Helper function for workqueue executions. Required for overhead tracking. + */ +void sw_handle_workqueue_wakeup_helper_i(int cpu, + struct sw_collector_data *node) +{ + sw_driver_msg_t *msg = GET_MSG_SLOT_FOR_CPU(node->msg, cpu, + node->per_msg_payload_size); + + /* msg->tsc = sw_timestamp(); */ + /* msg TSC assigned when msg is written to buffer */ + msg->cpuidx = cpu; + + /* + * Workqueue wakeup ==> empty message. + */ + if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) { + pw_pr_error("WARNING: could NOT produce message!\n"); + } +}; + +/* + * Helper function for sched_switch. Required for overhead tracking. + */ +void sw_handle_sched_switch_helper_i(void) +{ + static struct sw_trace_notifier_data *node; + + if (unlikely(node == NULL)) { + node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_SCHED_SWITCH); + pw_pr_debug("SCHED SWITCH NODE = %p\n", node); + } + if (!node) { + return; + } + preempt_disable(); + { + struct sw_collector_data *curr; + + list_for_each_entry(curr, &node->list, list) { + unsigned long curr_jiff = jiffies, + prev_jiff = curr->last_update_jiffies; + unsigned long delta_msecs = + jiffies_to_msecs(curr_jiff) - + jiffies_to_msecs(prev_jiff); + struct cpumask *mask = &curr->cpumask; + u16 timeout = curr->info->sampling_interval_msec; + + if (!timeout) { + timeout = sw_min_polling_interval_msecs; + } + /* Has there been enough time since the last + * collection point? + */ + if (delta_msecs < timeout) { + continue; + } + /* Update timestamp and handle message */ + if (cpumask_test_cpu( + RAW_CPU(), + mask) /* This msg must be handled on + * the current CPU + */ + || + cpumask_empty( + mask) /* This msg may be handled by + * any CPU + */) { + if (!CAS64(&curr->last_update_jiffies, + prev_jiff, curr_jiff)) { + /* + * CAS failure should only be possible + * for messages that can be handled + * on any CPU, in which case it + * indicates a different CPU already + * handled this message. + */ + continue; + } + sw_handle_per_cpu_msg_no_sched(curr); + } + } + } + preempt_enable(); +}; + +/* + * Probe functions. + */ + +/* + * 1. TPS + */ + +/* + * Check IPI wakeups within the cpu_idle tracepoint. + */ +void sw_tps_apic_i(int cpu) +{ + static struct sw_trace_notifier_data *apic_timer_node; + + if (unlikely(apic_timer_node == NULL)) { + apic_timer_node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_IPI); + pw_pr_debug("apic NODE = %p\n", apic_timer_node); + } + if (apic_timer_node) { + bool local_apic_timer_fired = false; + u64 curr_num_local_apic = sw_my_local_arch_irq_stats_cpu_i(); + u64 *old_num_local_apic = + &__get_cpu_var(sw_num_local_apic_timer_inters); + + if (*old_num_local_apic && + (*old_num_local_apic != curr_num_local_apic)) { + local_apic_timer_fired = true; + } + *old_num_local_apic = curr_num_local_apic; + + if (local_apic_timer_fired && + SHOULD_PRODUCE_WAKEUP_SAMPLE(cpu)) { + struct sw_collector_data *curr = NULL; + list_for_each_entry(curr, &apic_timer_node->list, + list) { + sw_handle_apic_timer_wakeup_i(curr); + } + } + } +}; + +/* + * Perform any user-defined tasks within the + * cpu_idle tracepoint. + */ +void sw_tps_tps_i(int cpu) +{ + static struct sw_trace_notifier_data *tps_node; + + if (unlikely(tps_node == NULL)) { + tps_node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_CPU_IDLE); + pw_pr_debug("TPS NODE = %p\n", tps_node); + } + sw_handle_trace_notifier_i(tps_node); +}; + +/* + * Perform any wakeup-related tasks within the + * cpu_idle tracepoint. + */ +void sw_tps_wakeup_i(int cpu) +{ + /* + * For now, assume we will always have to + * do some wakeup book keeping. Later, we'll + * need to detect if the user requested wakeups. + */ + sw_wakeup_event_flag = false; + RESET_VALID_WAKEUP_EVENT_COUNTER(cpu); +}; + +void sw_tps_i(void) +{ + /* + * Update: FIRST handle IPI wakeups + * THEN handle TPS + */ + int cpu = RAW_CPU(); + + sw_tps_apic_i(cpu); + sw_tps_tps_i(cpu); + sw_tps_wakeup_i(cpu); +}; + +/* + * 2. TPF + */ + +/* + * Helper function for overhead measurements. + */ +void sw_tpf_i(int cpu, struct sw_trace_notifier_data *node) +{ + sw_handle_trace_notifier_on_cpu_i((int)cpu, node); +}; + +#if IS_ENABLED(CONFIG_TRACEPOINTS) +DEFINE_PROBE_FUNCTION(PROBE_TPS_PARAMS) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38) + if (state == PWR_EVENT_EXIT) { + return; + } +#endif + DO_PER_CPU_OVERHEAD_FUNC(sw_tps_i); +}; + +DEFINE_PROBE_FUNCTION(PROBE_TPF_PARAMS) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38) + int cpu = RAW_CPU(); +#endif /* version < 2.6.38 */ + static struct sw_trace_notifier_data *node; + + if (unlikely(node == NULL)) { + node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_CPU_FREQUENCY); + pw_pr_debug("NODE = %p\n", node); + } + DO_PER_CPU_OVERHEAD_FUNC(sw_tpf_i, (int)cpu, node); +}; + +/* + * IRQ wakeup handling function. + */ +static void sw_handle_irq_wakeup_i(struct sw_collector_data *node, int irq) +{ + int cpu = RAW_CPU(); + sw_driver_msg_t *msg = GET_MSG_SLOT_FOR_CPU(node->msg, cpu, + node->per_msg_payload_size); + /* char *dst_vals = (char *)(unsigned long)msg->p_payload; */ + char *dst_vals = msg->p_payload; + + /* msg->tsc = sw_timestamp(); */ + /* msg TSC assigned when msg is written to buffer */ + msg->cpuidx = cpu; + + /* + * IRQ handling ==> only return the irq number + */ + *((int *)dst_vals) = irq; + + if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) { + pw_pr_warn("WARNING: could NOT produce message!\n"); + } +}; + +/* + * 3. IRQ handler entry + */ +DEFINE_PROBE_FUNCTION(PROBE_IRQ_PARAMS) +{ + int cpu = RAW_CPU(); + static struct sw_trace_notifier_data *node; + + struct sw_collector_data *curr = NULL; + + if (unlikely(node == NULL)) { + node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_IRQ_HANDLER_ENTRY); + pw_pr_debug("NODE = %p\n", node); + } + if (!node || !SHOULD_PRODUCE_WAKEUP_SAMPLE(cpu)) { + return; + } + list_for_each_entry(curr, &node->list, list) { + DO_PER_CPU_OVERHEAD_FUNC(sw_handle_irq_wakeup_i, curr, irq); + } +}; + +/* + * 4. TIMER expire + */ +DEFINE_PROBE_FUNCTION(PROBE_TIMER_ARGS) +{ + int cpu = RAW_CPU(); + static struct sw_trace_notifier_data *node; + + struct sw_collector_data *curr = NULL; + pid_t tid = GET_TIMER_THREAD_ID(t); + + if (unlikely(node == NULL)) { + node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_TIMER_EXPIRE_ENTRY); + pw_pr_debug("NODE = %p\n", node); + } + + if (!node || !SHOULD_PRODUCE_WAKEUP_SAMPLE(cpu)) { + return; + } + DO_PER_CPU_OVERHEAD_FUNC(sw_handle_timer_wakeup_helper_i, curr, node, + tid); +}; + +/* + * 5. HRTIMER expire + */ +DEFINE_PROBE_FUNCTION(PROBE_HRTIMER_PARAMS) +{ + int cpu = RAW_CPU(); + static struct sw_trace_notifier_data *node; + struct sw_collector_data *curr = NULL; + pid_t tid = GET_TIMER_THREAD_ID(hrt); + + if (unlikely(node == NULL)) { + node = GET_COLLECTOR_TRACE_NODE( + SW_TRACE_ID_HRTIMER_EXPIRE_ENTRY); + pw_pr_debug("NODE = %p\n", node); + } + + if (!node || !SHOULD_PRODUCE_WAKEUP_SAMPLE(cpu)) { + return; + } + DO_PER_CPU_OVERHEAD_FUNC(sw_handle_timer_wakeup_helper_i, curr, node, + tid); +}; + +/* + * 6. SCHED wakeup + */ +DEFINE_PROBE_FUNCTION(PROBE_SCHED_WAKEUP_PARAMS) +{ + static struct sw_trace_notifier_data *node; + struct sw_collector_data *curr = NULL; + int target_cpu = task_cpu(task), source_cpu = RAW_CPU(); + /* + * "Self-sched" samples are "don't care". + */ + if (target_cpu == source_cpu) { + return; + } + if (unlikely(node == NULL)) { + node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_SCHED_WAKEUP); + pw_pr_debug("NODE = %p\n", node); + } + /* + * Unlike other wakeup sources, we check the per-cpu flag + * of the TARGET cpu to decide if we should produce a sample. + */ + if (!node || !SHOULD_PRODUCE_WAKEUP_SAMPLE(target_cpu)) { + return; + } + list_for_each_entry(curr, &node->list, list) { + /* sw_handle_sched_wakeup_i(curr, source_cpu, target_cpu); */ + DO_PER_CPU_OVERHEAD_FUNC(sw_handle_sched_wakeup_i, curr, + source_cpu, target_cpu); + } +}; + +/* + * 8. PROCESS fork + */ + +/* + * Helper for PROCESS fork, PROCESS exit + */ +void sw_process_fork_exit_helper_i(struct sw_collector_data *node, + struct task_struct *task, bool is_fork) +{ + int cpu = RAW_CPU(); + pid_t pid = task->tgid, tid = task->pid; + const char *name = task->comm; + sw_driver_msg_t *msg = GET_MSG_SLOT_FOR_CPU(node->msg, cpu, + node->per_msg_payload_size); + char *dst_vals = msg->p_payload; + + msg->cpuidx = cpu; + + /* + * Fork/Exit ==> return pid, tid + * Fork ==> also return name + */ + *((int *)dst_vals) = pid; + dst_vals += sizeof(pid); + *((int *)dst_vals) = tid; + dst_vals += sizeof(tid); + if (is_fork) { + memcpy(dst_vals, name, SW_MAX_PROC_NAME_SIZE); + } + + if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) { + pw_pr_warn("WARNING: could NOT produce message!\n"); + } + pw_pr_debug( + "HANDLED process %s event for task: pid = %d, tid = %d, name = %s\n", + is_fork ? "FORK" : "EXIT", pid, tid, name); +}; + +DEFINE_PROBE_FUNCTION(PROBE_PROCESS_FORK_PARAMS) +{ + static struct sw_trace_notifier_data *node; + struct sw_collector_data *curr = NULL; + + if (unlikely(node == NULL)) { + node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_SCHED_PROCESS_FORK); + pw_pr_debug("NODE = %p\n", node); + } + if (!node) { + return; + } + list_for_each_entry(curr, &node->list, list) { + DO_PER_CPU_OVERHEAD_FUNC(sw_process_fork_exit_helper_i, curr, + child, true /* true ==> fork */); + } +}; + +/* + * 9. PROCESS exit + */ +DEFINE_PROBE_FUNCTION(PROBE_SCHED_PROCESS_EXIT_PARAMS) +{ + static struct sw_trace_notifier_data *node; + struct sw_collector_data *curr = NULL; + + if (unlikely(node == NULL)) { + node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_SCHED_PROCESS_EXIT); + pw_pr_debug("NODE = %p\n", node); + } + if (!node) { + return; + } + list_for_each_entry(curr, &node->list, list) { + DO_PER_CPU_OVERHEAD_FUNC(sw_process_fork_exit_helper_i, curr, + task, false /* false ==> exit */); + } +}; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) +/* + * 10. THERMAL_APIC entry + */ +DEFINE_PROBE_FUNCTION(PROBE_THERMAL_APIC_ENTRY_PARAMS) +{ + int cpu = RAW_CPU(); + static struct sw_trace_notifier_data *node; + + if (unlikely(node == NULL)) { + node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_THERMAL_APIC_ENTRY); + pw_pr_debug("NODE = %p\n", node); + } + DO_PER_CPU_OVERHEAD_FUNC(sw_tpf_i, (int)cpu, node); +}; + +/* + * 10. THERMAL_APIC exit + */ +DEFINE_PROBE_FUNCTION(PROBE_THERMAL_APIC_EXIT_PARAMS) +{ + int cpu = RAW_CPU(); + static struct sw_trace_notifier_data *node; + + if (unlikely(node == NULL)) { + node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_THERMAL_APIC_EXIT); + pw_pr_debug("NODE = %p\n", node); + } + DO_PER_CPU_OVERHEAD_FUNC(sw_tpf_i, (int)cpu, node); +}; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) */ + +#if IS_ENABLED(CONFIG_ANDROID) +/* + * 11. WAKE lock / WAKEUP source activate. + */ + +/* + * Helper function to produce wake lock/unlock messages. + */ +void sw_produce_wakelock_msg_i(int cpu, struct sw_collector_data *node, + const char *name, int type, u64 timeout, int pid, + int tid, const char *proc_name) +{ + sw_driver_msg_t *msg = GET_MSG_SLOT_FOR_CPU(node->msg, cpu, + node->per_msg_payload_size); + char *dst_vals = msg->p_payload; + + msg->cpuidx = cpu; + + /* + * Protocol: + * wakelock_timeout, wakelock_type, wakelock_name, + * proc_pid, proc_tid, proc_name + */ + *((u64 *)dst_vals) = timeout; + dst_vals += sizeof(timeout); + *((int *)dst_vals) = type; + dst_vals += sizeof(type); + strncpy(dst_vals, name, SW_MAX_KERNEL_WAKELOCK_NAME_SIZE); + dst_vals += SW_MAX_KERNEL_WAKELOCK_NAME_SIZE; + + *((int *)dst_vals) = pid; + dst_vals += sizeof(pid); + *((int *)dst_vals) = tid; + dst_vals += sizeof(tid); + strncpy(dst_vals, proc_name, SW_MAX_PROC_NAME_SIZE); + dst_vals += SW_MAX_PROC_NAME_SIZE; + + if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) { + pw_pr_warn("WARNING: could NOT produce message!\n"); + } +}; + +/* + * Helper function to handle wake lock/unlock callbacks. + */ +void sw_handle_wakelock_i(int cpu, struct sw_trace_notifier_data *node, + const char *name, int type, u64 timeout) +{ + int pid = PID(), tid = TID(); + const char *proc_name = NAME(); + struct sw_collector_data *curr = NULL; + + if (!node) { + return; + } + + list_for_each_entry(curr, &node->list, list) { + sw_produce_wakelock_msg_i(cpu, curr, name, type, timeout, pid, + tid, proc_name); + } +}; + +DEFINE_PROBE_FUNCTION(PROBE_WAKE_LOCK_PARAMS) +{ + int cpu = RAW_CPU(); + static struct sw_trace_notifier_data *node; + enum sw_kernel_wakelock_type type = SW_WAKE_LOCK; + u64 timeout = 0; +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) + const char *name = lock->name; +#endif + + if (unlikely(node == NULL)) { + node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_WAKE_LOCK); + pw_pr_debug("NODE = %p\n", node); + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) + /* + * Was this wakelock acquired with a timeout i.e. + * is this an auto expire wakelock? + */ + if (lock->flags & (1U << 10)) { + type = SW_WAKE_LOCK_TIMEOUT; + timeout = jiffies_to_msecs(lock->expires - jiffies); + } +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) */ + DO_PER_CPU_OVERHEAD_FUNC(sw_handle_wakelock_i, cpu, node, name, + (int)type, timeout); +}; + +/* + * 11. WAKE unlock / WAKEUP source deactivate. + */ +DEFINE_PROBE_FUNCTION(PROBE_WAKE_UNLOCK_PARAMS) +{ + int cpu = RAW_CPU(); + static struct sw_trace_notifier_data *node; + enum sw_kernel_wakelock_type type = SW_WAKE_UNLOCK; +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) + const char *name = lock->name; +#endif + + if (unlikely(node == NULL)) { + node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_WAKE_UNLOCK); + pw_pr_debug("NODE = %p\n", node); + } + DO_PER_CPU_OVERHEAD_FUNC(sw_handle_wakelock_i, cpu, node, name, + (int)type, 0 /*timeout*/); +}; +#endif /* CONFIG_ANDROID */ + +/* + * 12. WORKQUEUE + */ +DEFINE_PROBE_FUNCTION(PROBE_WORKQUEUE_PARAMS) +{ + int cpu = RAW_CPU(); + static struct sw_trace_notifier_data *node; + struct sw_collector_data *curr = NULL; + + if (unlikely(node == NULL)) { + node = GET_COLLECTOR_TRACE_NODE( + SW_TRACE_ID_WORKQUEUE_EXECUTE_START); + pw_pr_debug("NODE = %p\n", node); + } + + if (!node || !SHOULD_PRODUCE_WAKEUP_SAMPLE(cpu)) { + return; + } + list_for_each_entry(curr, &node->list, list) { + DO_PER_CPU_OVERHEAD_FUNC(sw_handle_workqueue_wakeup_helper_i, + cpu, curr); + } +}; + +/* + * 13. SCHED switch + */ +DEFINE_PROBE_FUNCTION(PROBE_SCHED_SWITCH_PARAMS) +{ + DO_PER_CPU_OVERHEAD_FUNC(sw_handle_sched_switch_helper_i); +}; + +/* + * 1. SUSPEND notifier + */ +static void sw_send_pm_notification_i(int value) +{ + struct sw_driver_msg *msg = NULL; + size_t buffer_len = sizeof(*msg) + sizeof(value); + char *buffer = vmalloc(buffer_len); + + if (!buffer) { + pw_pr_error( + "couldn't allocate memory when sending suspend notification!\n"); + return; + } + msg = (struct sw_driver_msg *)buffer; + msg->tsc = sw_timestamp(); + msg->cpuidx = RAW_CPU(); + msg->plugin_id = 0; /* "0" indicates a system message */ + msg->metric_id = 1; /* "1" indicates a suspend/resume message (TODO) */ + msg->msg_id = 0; + /* don't care; TODO: use the 'msg_id' to encode the 'value'? */ + msg->payload_len = sizeof(value); + msg->p_payload = buffer + sizeof(*msg); + *((int *)msg->p_payload) = value; + if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) { + pw_pr_error("couldn't produce generic message!\n"); + } + vfree(buffer); +} + +static u64 sw_pm_enter_tsc; +static bool sw_is_reset_i(void) +{ + /* + * TODO: rely on checking the IA32_FIXED_CTR2 instead? + */ + u64 curr_tsc = sw_tscval(); + bool is_reset = sw_pm_enter_tsc > curr_tsc; + + pw_pr_force("DEBUG: curr tsc = %llu, prev tsc = %llu, is reset = %s\n", + curr_tsc, sw_pm_enter_tsc, is_reset ? "true" : "false"); + + return is_reset; +} + +static void sw_probe_pm_helper_i(int id, int both_id, bool is_enter, + enum sw_pm_action action, enum sw_pm_mode mode) +{ + struct sw_trace_notifier_data *node = GET_COLLECTOR_NOTIFIER_NODE(id); + struct sw_trace_notifier_data *both_node = + GET_COLLECTOR_NOTIFIER_NODE(both_id); + struct sw_trace_notifier_data *reset_node = + GET_COLLECTOR_NOTIFIER_NODE(SW_NOTIFIER_ID_COUNTER_RESET); + if (is_enter) { + /* + * Entering HIBERNATION/SUSPEND + */ + sw_pm_enter_tsc = sw_tscval(); + } else { + /* + * Exitting HIBERNATION/SUSPEND + */ + if (sw_is_reset_i() && reset_node) { + sw_handle_reset_messages_i(reset_node); + } + } + if (node) { + sw_handle_trace_notifier_i(node); + } + if (both_node) { + sw_handle_trace_notifier_i(both_node); + } + /* Send the suspend-resume notification */ + sw_send_pm_notification_i(SW_PM_VALUE(mode, action)); +} + +static bool sw_is_suspend_via_firmware(void) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) + /* 'pm_suspend_via_firmware' only available in kernel >= 4.4 */ + return pm_suspend_via_firmware(); +#endif + return true; +} + +static int sw_probe_pm_notifier_i(struct notifier_block *block, + unsigned long state, + void *dummy) +{ + static const struct { + enum sw_pm_action action; + int node_id; + int both_id; + bool is_enter; + } pm_data[PM_POST_RESTORE] = { + [PM_HIBERNATION_PREPARE] = { SW_PM_ACTION_HIBERNATE_ENTER, + SW_NOTIFIER_ID_HIBERNATE_ENTER, + SW_NOTIFIER_ID_HIBERNATE, true }, + [PM_POST_HIBERNATION] = { SW_PM_ACTION_HIBERNATE_EXIT, + SW_NOTIFIER_ID_HIBERNATE_EXIT, + SW_NOTIFIER_ID_HIBERNATE, false }, + [PM_SUSPEND_PREPARE] = { SW_PM_ACTION_SUSPEND_ENTER, + SW_NOTIFIER_ID_SUSPEND_ENTER, + SW_NOTIFIER_ID_SUSPEND, true }, + [PM_POST_SUSPEND] = { SW_PM_ACTION_SUSPEND_EXIT, + SW_NOTIFIER_ID_SUSPEND_EXIT, + SW_NOTIFIER_ID_SUSPEND, false }, + }; + enum sw_pm_action action = pm_data[state].action; + enum sw_pm_mode mode = sw_is_suspend_via_firmware() ? + SW_PM_MODE_FIRMWARE : + SW_PM_MODE_NONE; + if (action != SW_PM_ACTION_NONE) { + int node_id = pm_data[state].node_id, + both_id = pm_data[state].both_id; + bool is_enter = pm_data[state].is_enter; + + sw_probe_pm_helper_i(node_id, both_id, is_enter, action, mode); + } else { + /* Not supported */ + pw_pr_error( + "ERROR: unknown state %lu passed to SWA pm notifier!\n", + state); + } + return NOTIFY_DONE; +} + +static void sw_store_topology_change_i(enum cpu_action type, + int cpu, int core_id, + int pkg_id) +{ + struct sw_topology_node *node = sw_kmalloc(sizeof(*node), GFP_ATOMIC); + + if (!node) { + pw_pr_error( + "couldn't allocate a node for topology change tracking!\n"); + return; + } + node->change.timestamp = sw_timestamp(); + node->change.type = type; + node->change.cpu = cpu; + node->change.core = core_id; + node->change.pkg = pkg_id; + + SW_LIST_ADD(&sw_topology_list, node, list); + ++sw_num_topology_entries; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) +int sw_probe_hotplug_notifier_i(struct notifier_block *block, + unsigned long action, void *pcpu) +{ + unsigned int cpu = (unsigned long)pcpu; + unsigned int pkg_id = topology_physical_package_id(cpu); + unsigned int core_id = topology_core_id(cpu); + + switch (action) { + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: + /* CPU is coming online -- store top change */ + sw_store_topology_change_i(SW_CPU_ACTION_ONLINE_PREPARE, cpu, + core_id, pkg_id); + pw_pr_debug( + "DEBUG: SoC Watch has cpu %d (phys = %d, core = %d) preparing to come online at tsc = %llu! Current cpu = %d\n", + cpu, pkg_id, core_id, sw_timestamp(), RAW_CPU()); + break; + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + /* CPU is online -- first store top change + * then take BEGIN snapshot + */ + sw_store_topology_change_i(SW_CPU_ACTION_ONLINE, cpu, core_id, + pkg_id); + sw_process_snapshot_on_cpu(SW_WHEN_TYPE_BEGIN, cpu); + pw_pr_debug( + "DEBUG: SoC Watch has cpu %d (phys = %d, core = %d) online at tsc = %llu! Current cpu = %d\n", + cpu, pkg_id, core_id, sw_timestamp(), RAW_CPU()); + break; + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + /* CPU is going offline -- take END snapshot */ + sw_process_snapshot_on_cpu(SW_WHEN_TYPE_END, cpu); + pw_pr_debug( + "DEBUG: SoC Watch has cpu %d preparing to go offline at tsc = %llu! Current cpu = %d\n", + cpu, sw_timestamp(), RAW_CPU()); + break; + case CPU_DEAD: + case CPU_DEAD_FROZEN: + /* CPU is offline -- store top change */ + sw_store_topology_change_i(SW_CPU_ACTION_OFFLINE, cpu, core_id, + pkg_id); + pw_pr_debug( + "DEBUG: SoC Watch has cpu %d offlined at tsc = %llu! Current cpu = %d\n", + cpu, sw_timestamp(), RAW_CPU()); + break; + default: + break; + } + return NOTIFY_OK; +}; +#else +static void sw_probe_cpuhp_helper_i(unsigned int cpu, enum cpu_action action) +{ + unsigned int pkg_id = topology_physical_package_id(cpu); + unsigned int core_id = topology_core_id(cpu); + + switch (action) { + case SW_CPU_ACTION_ONLINE_PREPARE: + /* CPU is coming online -- store top change */ + sw_store_topology_change_i(action, cpu, core_id, pkg_id); + break; + case SW_CPU_ACTION_ONLINE: + /* CPU is online -- first store top change + * then take BEGIN snapshot + */ + sw_store_topology_change_i(action, cpu, core_id, pkg_id); + sw_process_snapshot_on_cpu(SW_WHEN_TYPE_BEGIN, cpu); + break; + case SW_CPU_ACTION_OFFLINE: + /* CPU is preparing to go offline -- take + * END snapshot then store top change + */ + sw_process_snapshot_on_cpu(SW_WHEN_TYPE_END, cpu); + sw_store_topology_change_i(action, cpu, core_id, pkg_id); + break; + default: + break; + } +} + +static int sw_probe_cpu_offline_i(unsigned int cpu) +{ + printk(KERN_INFO "DEBUG: offline notification for cpu %u at %llu\n", + cpu, sw_tscval()); + sw_probe_cpuhp_helper_i(cpu, SW_CPU_ACTION_OFFLINE); + return 0; +} + +static int sw_probe_cpu_online_i(unsigned int cpu) +{ + printk(KERN_INFO "DEBUG: online notification for cpu %u at %llu\n", cpu, + sw_tscval()); + sw_probe_cpuhp_helper_i(cpu, SW_CPU_ACTION_ONLINE_PREPARE); + sw_probe_cpuhp_helper_i(cpu, SW_CPU_ACTION_ONLINE); + return 0; +} +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) */ + +/* + * 2. CPUFREQ notifier + */ +static int sw_probe_cpufreq_notifier_i(struct notifier_block *block, + unsigned long state, void *data) +{ + struct cpufreq_freqs *freqs = data; + static struct sw_trace_notifier_data *node; + int cpu = freqs->cpu; + + if (state == CPUFREQ_PRECHANGE) { + pw_pr_debug( + "CPU %d reports a CPUFREQ_PRECHANGE for target CPU %d at TSC = %llu\n", + RAW_CPU(), cpu, sw_timestamp()); + if (unlikely(node == NULL)) { + node = GET_COLLECTOR_NOTIFIER_NODE( + SW_NOTIFIER_ID_CPUFREQ); + pw_pr_debug("NODE = %p\n", node); + } + /* Force an atomic context by disabling preemption */ + get_cpu(); + DO_PER_CPU_OVERHEAD_FUNC(sw_tpf_i, cpu, node); + put_cpu(); + } + return NOTIFY_DONE; +} + +/* + * 1. TPS. + */ +int sw_register_trace_cpu_idle_i(struct sw_trace_notifier_data *node) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38) + DO_REGISTER_SW_TRACEPOINT_PROBE(node, power_start, + sw_probe_power_start_i); +#else /* kernel version >= 2.6.38 */ + DO_REGISTER_SW_TRACEPOINT_PROBE(node, cpu_idle, sw_probe_cpu_idle_i); +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) */ + return PW_SUCCESS; +}; + +int sw_unregister_trace_cpu_idle_i(struct sw_trace_notifier_data *node) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38) + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, power_start, + sw_probe_power_start_i); +#else /* kernel version >= 2.6.38 */ + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, cpu_idle, sw_probe_cpu_idle_i); +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) */ + return PW_SUCCESS; +}; + +/* + * 2. TPF + */ +int sw_register_trace_cpu_frequency_i(struct sw_trace_notifier_data *node) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38) + DO_REGISTER_SW_TRACEPOINT_PROBE(node, power_frequency, + sw_probe_power_frequency_i); +#else /* kernel version >= 2.6.38 */ + DO_REGISTER_SW_TRACEPOINT_PROBE(node, cpu_frequency, + sw_probe_cpu_frequency_i); +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) */ + return PW_SUCCESS; +}; + +int sw_unregister_trace_cpu_frequency_i(struct sw_trace_notifier_data *node) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38) + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, power_frequency, + sw_probe_power_frequency_i); +#else /* kernel version >= 2.6.38 */ + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, cpu_frequency, + sw_probe_cpu_frequency_i); +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) */ + return PW_SUCCESS; +}; + +/* + * 3. IRQ handler entry + */ +int sw_register_trace_irq_handler_entry_i(struct sw_trace_notifier_data *node) +{ + DO_REGISTER_SW_TRACEPOINT_PROBE(node, irq_handler_entry, + sw_probe_irq_handler_entry_i); + return PW_SUCCESS; +}; + +int sw_unregister_trace_irq_handler_entry_i(struct sw_trace_notifier_data *node) +{ + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, irq_handler_entry, + sw_probe_irq_handler_entry_i); + return PW_SUCCESS; +}; + +/* + * 4. TIMER expire. + */ +int sw_register_trace_timer_expire_entry_i(struct sw_trace_notifier_data *node) +{ + DO_REGISTER_SW_TRACEPOINT_PROBE(node, timer_expire_entry, + sw_probe_timer_expire_entry_i); + return PW_SUCCESS; +}; + +int sw_unregister_trace_timer_expire_entry_i(struct sw_trace_notifier_data + *node) +{ + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, timer_expire_entry, + sw_probe_timer_expire_entry_i); + return PW_SUCCESS; +}; + +/* + * 5. HRTIMER expire. + */ +int sw_register_trace_hrtimer_expire_entry_i(struct sw_trace_notifier_data + *node) +{ + DO_REGISTER_SW_TRACEPOINT_PROBE(node, hrtimer_expire_entry, + sw_probe_hrtimer_expire_entry_i); + return PW_SUCCESS; +}; + +int sw_unregister_trace_hrtimer_expire_entry_i( + struct sw_trace_notifier_data *node) +{ + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, hrtimer_expire_entry, + sw_probe_hrtimer_expire_entry_i); + return PW_SUCCESS; +}; + +/* + * 6. SCHED wakeup + */ +int sw_register_trace_sched_wakeup_i(struct sw_trace_notifier_data *node) +{ + DO_REGISTER_SW_TRACEPOINT_PROBE(node, sched_wakeup, + sw_probe_sched_wakeup_i); + return PW_SUCCESS; +}; + +int sw_unregister_trace_sched_wakeup_i(struct sw_trace_notifier_data *node) +{ + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, sched_wakeup, + sw_probe_sched_wakeup_i); + return PW_SUCCESS; +}; + +/* + * 8. PROCESS fork + */ +int sw_register_trace_sched_process_fork_i(struct sw_trace_notifier_data *node) +{ + DO_REGISTER_SW_TRACEPOINT_PROBE(node, sched_process_fork, + sw_probe_sched_process_fork_i); + return PW_SUCCESS; +}; + +int sw_unregister_trace_sched_process_fork_i(struct sw_trace_notifier_data + *node) +{ + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, sched_process_fork, + sw_probe_sched_process_fork_i); + return PW_SUCCESS; +}; + +/* + * 9. PROCESS exit + */ +int sw_register_trace_sched_process_exit_i(struct sw_trace_notifier_data *node) +{ + DO_REGISTER_SW_TRACEPOINT_PROBE(node, sched_process_exit, + sw_probe_sched_process_exit_i); + return PW_SUCCESS; +}; + +int sw_unregister_trace_sched_process_exit_i(struct sw_trace_notifier_data + *node) +{ + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, sched_process_exit, + sw_probe_sched_process_exit_i); + return PW_SUCCESS; +}; + +/* + * 10. THERMAL_APIC entry + */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) +int sw_register_trace_thermal_apic_entry_i(struct sw_trace_notifier_data *node) +{ + DO_REGISTER_SW_TRACEPOINT_PROBE(node, thermal_apic_entry, + sw_probe_thermal_apic_entry_i); + return PW_SUCCESS; +}; + +int sw_unregister_trace_thermal_apic_entry_i(struct sw_trace_notifier_data + *node) +{ + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, thermal_apic_entry, + sw_probe_thermal_apic_entry_i); + return PW_SUCCESS; +}; + +/* + * 10. THERMAL_APIC exit + */ +int sw_register_trace_thermal_apic_exit_i(struct sw_trace_notifier_data *node) +{ + DO_REGISTER_SW_TRACEPOINT_PROBE(node, thermal_apic_exit, + sw_probe_thermal_apic_exit_i); + return PW_SUCCESS; +}; + +int sw_unregister_trace_thermal_apic_exit_i(struct sw_trace_notifier_data *node) +{ + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, thermal_apic_exit, + sw_probe_thermal_apic_exit_i); + return PW_SUCCESS; +}; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) */ + +/* + * 11. WAKE lock / WAKEUP source activate. + */ +#if IS_ENABLED(CONFIG_ANDROID) +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) +int sw_register_trace_wake_lock_i(struct sw_trace_notifier_data *node) +{ + DO_REGISTER_SW_TRACEPOINT_PROBE(node, wake_lock, sw_probe_wake_lock_i); + return PW_SUCCESS; +}; + +int sw_unregister_trace_wake_lock_i(struct sw_trace_notifier_data *node) +{ + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, wake_lock, + sw_probe_wake_lock_i); + return PW_SUCCESS; +}; +#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0) */ +int sw_register_trace_wakeup_source_activate_i( + struct sw_trace_notifier_data *node) +{ + DO_REGISTER_SW_TRACEPOINT_PROBE(node, wakeup_source_activate, + sw_probe_wakeup_source_activate_i); + return PW_SUCCESS; +}; + +int sw_unregister_trace_wakeup_source_activate_i( + struct sw_trace_notifier_data *node) +{ + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, wakeup_source_activate, + sw_probe_wakeup_source_activate_i); + return PW_SUCCESS; +}; +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) */ + +/* + * 11. WAKE unlock / WAKEUP source deactivate. + */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) +int sw_register_trace_wake_unlock_i(struct sw_trace_notifier_data *node) +{ + DO_REGISTER_SW_TRACEPOINT_PROBE(node, wake_unlock, + sw_probe_wake_unlock_i); + return PW_SUCCESS; +}; + +int sw_unregister_trace_wake_unlock_i(struct sw_trace_notifier_data *node) +{ + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, wake_unlock, + sw_probe_wake_unlock_i); + return PW_SUCCESS; +}; + +#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0) */ +int sw_register_trace_wakeup_source_deactivate_i( + struct sw_trace_notifier_data *node) +{ + DO_REGISTER_SW_TRACEPOINT_PROBE(node, wakeup_source_deactivate, + sw_probe_wakeup_source_deactivate_i); + return PW_SUCCESS; +}; + +int sw_unregister_trace_wakeup_source_deactivate_i( + struct sw_trace_notifier_data *node) +{ + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, wakeup_source_deactivate, + sw_probe_wakeup_source_deactivate_i); + return PW_SUCCESS; +}; +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) */ +#endif /* CONFIG_ANDROID */ + +/* + * 12. WORKQUEUE execution. + */ +int sw_register_trace_workqueue_execution_i(struct sw_trace_notifier_data *node) +{ +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35) + DO_REGISTER_SW_TRACEPOINT_PROBE(node, workqueue_execution, + sw_probe_workqueue_execution_i); +#else + DO_REGISTER_SW_TRACEPOINT_PROBE(node, workqueue_execute_start, + sw_probe_workqueue_execute_start_i); +#endif + return PW_SUCCESS; +}; + +int sw_unregister_trace_workqueue_execution_i( + struct sw_trace_notifier_data *node) +{ +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35) + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, workqueue_execution, + sw_probe_workqueue_execution_i); +#else + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, workqueue_execute_start, + sw_probe_workqueue_execute_start_i); +#endif + return PW_SUCCESS; +}; + +/* + * 13. SCHED switch + */ +int sw_register_trace_sched_switch_i(struct sw_trace_notifier_data *node) +{ + /* + * Set polling tick time, in jiffies. + * Used by the context switch tracepoint to decide + * if enough time has elapsed since the last + * collection point to read resources again. + */ + { + int cpu = 0; + for_each_present_cpu(cpu) { + *(&per_cpu(sw_pcpu_polling_jiff, cpu)) = jiffies; + } + } + DO_REGISTER_SW_TRACEPOINT_PROBE(node, sched_switch, + sw_probe_sched_switch_i); + return PW_SUCCESS; +}; + +int sw_unregister_trace_sched_switch_i(struct sw_trace_notifier_data *node) +{ + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, sched_switch, + sw_probe_sched_switch_i); + return PW_SUCCESS; +}; + +/* + * Notifier register/unregister functions. + */ + +/* + * 1. SUSPEND notifier. + */ +static struct notifier_block sw_pm_notifier = { + .notifier_call = &sw_probe_pm_notifier_i, +}; + +int sw_register_pm_notifier_i(struct sw_trace_notifier_data *node) +{ + register_pm_notifier(&sw_pm_notifier); + return PW_SUCCESS; +}; + +int sw_unregister_pm_notifier_i(struct sw_trace_notifier_data *node) +{ + unregister_pm_notifier(&sw_pm_notifier); + return PW_SUCCESS; +}; + +/* + * 2. CPUFREQ notifier. + */ +static struct notifier_block sw_cpufreq_notifier = { + .notifier_call = &sw_probe_cpufreq_notifier_i, +}; + +int sw_register_cpufreq_notifier_i(struct sw_trace_notifier_data *node) +{ + cpufreq_register_notifier(&sw_cpufreq_notifier, + CPUFREQ_TRANSITION_NOTIFIER); + return PW_SUCCESS; +}; + +int sw_unregister_cpufreq_notifier_i(struct sw_trace_notifier_data *node) +{ + cpufreq_unregister_notifier(&sw_cpufreq_notifier, + CPUFREQ_TRANSITION_NOTIFIER); + return PW_SUCCESS; +}; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) +/* + * 3. CPU hot plug notifier. + */ +struct notifier_block sw_cpu_hotplug_notifier = { + .notifier_call = &sw_probe_hotplug_notifier_i, +}; + +int sw_register_hotcpu_notifier_i(struct sw_trace_notifier_data *node) +{ + register_hotcpu_notifier(&sw_cpu_hotplug_notifier); + return PW_SUCCESS; +}; + +int sw_unregister_hotcpu_notifier_i(struct sw_trace_notifier_data *node) +{ + unregister_hotcpu_notifier(&sw_cpu_hotplug_notifier); + return PW_SUCCESS; +}; + +#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0) */ +static int sw_cpuhp_state = -1; +int sw_register_hotcpu_notifier_i(struct sw_trace_notifier_data *node) +{ + sw_cpuhp_state = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, + "socwatch:online", + &sw_probe_cpu_online_i, + &sw_probe_cpu_offline_i); + if (sw_cpuhp_state < 0) { + pw_pr_error("couldn't register socwatch hotplug callbacks!\n"); + return -EIO; + } + return 0; +}; + +int sw_unregister_hotcpu_notifier_i(struct sw_trace_notifier_data *node) +{ + if (sw_cpuhp_state >= 0) { + cpuhp_remove_state_nocalls((enum cpuhp_state)sw_cpuhp_state); + } + return 0; +}; +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) */ + +/* + * Tracepoint extraction routines. + * Required for newer kernels (>=3.15) + */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) +static void sw_extract_tracepoint_callback(struct tracepoint *tp, void *priv) +{ + struct sw_trace_notifier_data *node = NULL; + int i = 0; + int *numStructsFound = (int *)priv; + + if (*numStructsFound == NUM_VALID_TRACEPOINTS) { + /* + * We've found all the tracepoints we need. + */ + return; + } + if (tp) { + FOR_EACH_TRACEPOINT_NODE(i, node) + { + if (node->tp == NULL && node->name) { + const char *name = + sw_get_trace_notifier_kernel_name(node); + if (name && !strcmp(tp->name, name)) { + node->tp = tp; + ++*numStructsFound; + pw_pr_debug("OK, found TP %s\n", + tp->name); + } + } + } + } +}; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0) */ +#endif /* CONFIG_TRACEPOINTS */ + +/* + * Retrieve the list of tracepoint structs to use + * when registering and unregistering tracepoint handlers. + */ +int sw_extract_trace_notifier_providers(void) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) && \ + IS_ENABLED(CONFIG_TRACEPOINTS) + int numCallbacks = 0; + + for_each_kernel_tracepoint(&sw_extract_tracepoint_callback, + &numCallbacks); + /* + * Did we get the complete list? + */ + if (numCallbacks != NUM_VALID_TRACEPOINTS) { + printk(KERN_WARNING + "WARNING: Could NOT find tracepoint structs for some tracepoints!\n"); + } +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0) */ + return PW_SUCCESS; +}; + +void sw_reset_trace_notifier_providers(void) +{ + /* + * Reset the wakeup flag. Not strictly required if we aren't probing + * any of the wakeup tracepoints. + */ + { + int cpu = 0; + + for_each_online_cpu(cpu) { + RESET_VALID_WAKEUP_EVENT_COUNTER(cpu); + } + } + /* + * Reset the wakeup event flag. Not strictly required if we + * aren't probing any of the wakeup tracepoints. Will be reset + * in the power_start tracepoint if user requested a c-state + * collection. + */ + sw_wakeup_event_flag = true; +}; + +void sw_print_trace_notifier_provider_overheads(void) +{ + PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_tps_i, "TPS"); + PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_tpf_i, "TPF"); + PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_handle_irq_wakeup_i, "IRQ"); + PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_handle_timer_wakeup_helper_i, + "TIMER_EXPIRE"); + PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_handle_sched_wakeup_i, + "SCHED WAKEUP"); + PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_process_fork_exit_helper_i, + "PROCESS FORK/EXIT"); +#if IS_ENABLED(CONFIG_ANDROID) + PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_handle_wakelock_i, + "WAKE LOCK/UNLOCK"); +#endif /* CONFIG_ANDROID */ + PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_handle_workqueue_wakeup_helper_i, + "WORKQUEUE"); + PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_handle_sched_switch_helper_i, + "SCHED SWITCH"); +}; + +/* + * Add all trace/notifier providers. + */ +int sw_add_trace_notifier_providers(void) +{ + struct sw_trace_notifier_data *node = NULL; + int i = 0; + + FOR_EACH_TRACEPOINT_NODE(i, node) + { + if (sw_register_trace_notify_provider(node)) { + pw_pr_error("ERROR: couldn't add a trace provider!\n"); + return -EIO; + } + } + FOR_EACH_NOTIFIER_NODE(i, node) + { + if (sw_register_trace_notify_provider(node)) { + pw_pr_error( + "ERROR: couldn't add a notifier provider!\n"); + return -EIO; + } + } +#if IS_ENABLED(CONFIG_TRACEPOINTS) + /* + * Add the cpu hot plug notifier. + */ + { + if (sw_register_trace_notify_provider( + &s_hotplug_notifier_data)) { + pw_pr_error( + "ERROR: couldn't add cpu notifier provider!\n"); + return -EIO; + } + } +#endif /* CONFIG_TRACEPOINTS */ + return PW_SUCCESS; +} + +/* + * Remove previously added providers. + */ +void sw_remove_trace_notifier_providers(void) +{ /* NOP */ +} diff --git a/drivers/platform/x86/socwatch/sw_tracepoint_handlers.c b/drivers/platform/x86/socwatch/sw_tracepoint_handlers.c new file mode 100644 index 0000000000000..8154f6b516c8c --- /dev/null +++ b/drivers/platform/x86/socwatch/sw_tracepoint_handlers.c @@ -0,0 +1,402 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +#include "sw_structs.h" +#include "sw_kernel_defines.h" +#include "sw_types.h" +#include "sw_tracepoint_handlers.h" +#include "sw_trace_notifier_provider.h" +#include "sw_mem.h" + +/* ------------------------------------------------- + * Data structures and variable definitions. + * ------------------------------------------------- + */ +struct sw_trace_list_node { + struct sw_trace_notifier_data *data; + int id; + + SW_LIST_ENTRY(list, sw_trace_list_node); +}; +static SW_DEFINE_LIST_HEAD(s_trace_list, sw_trace_list_node) = + SW_LIST_HEAD_INITIALIZER(s_trace_list); +static SW_DEFINE_LIST_HEAD(s_notifier_list, sw_trace_list_node) = + SW_LIST_HEAD_INITIALIZER(s_notifier_list); +static int s_trace_idx = -1, s_notifier_idx = -1; + +SW_DEFINE_LIST_HEAD(sw_topology_list, sw_topology_node) = + SW_LIST_HEAD_INITIALIZER(sw_topology_list); +size_t sw_num_topology_entries; + +/* ------------------------------------------------- + * Function definitions. + * ------------------------------------------------- + */ +int sw_extract_tracepoints(void) +{ + return sw_extract_trace_notifier_providers(); +} + +void sw_reset_trace_notifier_lists(void) +{ + sw_reset_trace_notifier_providers(); +} + +void sw_print_trace_notifier_overheads(void) +{ + sw_print_trace_notifier_provider_overheads(); +} + +static int sw_for_each_node_i(void *list_head, + int (*func)(struct sw_trace_notifier_data *node, + void *priv), + void *priv, bool return_on_error) +{ + SW_LIST_HEAD_VAR(sw_trace_list_node) * head = list_head; + int retval = PW_SUCCESS; + struct sw_trace_list_node *lnode = NULL; + + SW_LIST_FOR_EACH_ENTRY(lnode, head, list) + { + if ((*func)(lnode->data, priv)) { + retval = -EIO; + if (return_on_error) { + break; + } + } + } + return retval; +} + +int sw_for_each_tracepoint_node(int (*func)(struct sw_trace_notifier_data *node, + void *priv), + void *priv, bool return_on_error) +{ + if (func) { + return sw_for_each_node_i(&s_trace_list, func, priv, + return_on_error); + } + return PW_SUCCESS; +} + +int sw_for_each_notifier_node(int (*func)(struct sw_trace_notifier_data *node, + void *priv), + void *priv, bool return_on_error) +{ + if (func) { + return sw_for_each_node_i(&s_notifier_list, func, priv, + return_on_error); + } + return PW_SUCCESS; +} + +/* + * Retrieve the ID for the corresponding tracepoint/notifier. + */ +int sw_get_trace_notifier_id(struct sw_trace_notifier_data *tnode) +{ + struct sw_trace_list_node *lnode = NULL; + + SW_LIST_HEAD_VAR(sw_trace_list_node) * head = (void *)&s_trace_list; + if (!tnode) { + pw_pr_error( + "ERROR: cannot get ID for NULL trace/notifier data!\n"); + return -EIO; + } + if (!(tnode->type == SW_TRACE_COLLECTOR_TRACEPOINT || + tnode->type == SW_TRACE_COLLECTOR_NOTIFIER)) { + pw_pr_error( + "ERROR: cannot get ID for invalid trace/notifier data!\n"); + return -EIO; + } + if (!tnode->name || !tnode->name->abstract_name) { + pw_pr_error( + "ERROR: cannot get ID for trace/notifier data without valid name!\n"); + return -EIO; + } +#ifdef LINUX_VERSION_CODE +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) && \ + defined(CONFIG_TRACEPOINTS) + if (tnode->type == SW_TRACE_COLLECTOR_TRACEPOINT && + tnode->name->kernel_name && !tnode->tp) { + /* No tracepoint structure found so no ID possible */ + return -EIO; + } +#endif +#endif + if (tnode->type == SW_TRACE_COLLECTOR_NOTIFIER) { + head = (void *)&s_notifier_list; + } + SW_LIST_FOR_EACH_ENTRY(lnode, head, list) + { + struct sw_trace_notifier_data *data = lnode->data; + + if (!strcmp(data->name->abstract_name, + tnode->name->abstract_name)) { + return lnode->id; + } + } + return -1; +} +/* + * Retrieve the "kernel" name for this tracepoint/notifier. + */ +const char * +sw_get_trace_notifier_kernel_name(struct sw_trace_notifier_data *node) +{ + return node->name->kernel_name; +}; +/* + * Retrieve the "abstract" name for this tracepoint/notifier. + */ +const char * +sw_get_trace_notifier_abstract_name(struct sw_trace_notifier_data *node) +{ + return node->name->abstract_name; +}; + +/* + * Add a single TRACE/NOTIFY provider. + */ +int sw_register_trace_notify_provider(struct sw_trace_notifier_data *data) +{ + struct sw_trace_list_node *lnode = NULL; + + if (!data) { + pw_pr_error( + "ERROR: cannot add NULL trace/notifier provider!\n"); + return -EIO; + } + if (!(data->type == SW_TRACE_COLLECTOR_TRACEPOINT || + data->type == SW_TRACE_COLLECTOR_NOTIFIER)) { + pw_pr_error("ERROR: cannot add invalid trace/notifier data!\n"); + return -EIO; + } + /* + * Kernel name is allowed to be NULL, but abstract name MUST be present! + */ + if (!data->name || !data->name->abstract_name) { + pw_pr_error( + "ERROR: cannot add trace/notifier provider without an abstract name!\n"); + pw_pr_error("ERROR: data->name = %p\n", data->name); + return -EIO; + } + lnode = sw_kmalloc(sizeof(*lnode), GFP_KERNEL); + if (!lnode) { + pw_pr_error( + "ERROR: couldn't allocate a list node when adding a trace/notifier provider!\n"); + return -ENOMEM; + } + lnode->data = data; + SW_LIST_ENTRY_INIT(lnode, list); + if (data->type == SW_TRACE_COLLECTOR_TRACEPOINT) { + lnode->id = ++s_trace_idx; + SW_LIST_ADD(&s_trace_list, lnode, list); + } else { + lnode->id = ++s_notifier_idx; + SW_LIST_ADD(&s_notifier_list, lnode, list); + } + return PW_SUCCESS; +} +/* + * Add all TRACE/NOTIFY providers. + */ +int sw_add_trace_notify(void) +{ + return sw_add_trace_notifier_providers(); +} + +static void sw_free_trace_notifier_list_i(void *list_head) +{ + SW_LIST_HEAD_VAR(sw_trace_list_node) * head = list_head; + while (!SW_LIST_EMPTY(head)) { + struct sw_trace_list_node *lnode = + SW_LIST_GET_HEAD_ENTRY(head, sw_trace_list_node, list); + SW_LIST_UNLINK(lnode, list); + sw_kfree(lnode); + } +} +/* + * Remove TRACE/NOTIFY providers. + */ +void sw_remove_trace_notify(void) +{ + /* + * Free all nodes. + */ + sw_free_trace_notifier_list_i(&s_trace_list); + sw_free_trace_notifier_list_i(&s_notifier_list); + /* + * Call our providers to deallocate resources. + */ + sw_remove_trace_notifier_providers(); + /* + * Clear out the topology list + */ + sw_clear_topology_list(); +} + +#define REG_FLAG (void *)1 +#define UNREG_FLAG (void *)2 +static int sw_reg_unreg_node_i(struct sw_trace_notifier_data *node, + void *is_reg) +{ + if (is_reg == REG_FLAG) { + /* + * Do we have anything to collect? + * Update: or were we asked to always register? + */ + if (SW_LIST_EMPTY(&node->list) && !node->always_register) { + return PW_SUCCESS; + } + /* + * Sanity: ensure we have a register AND an + * unregister function before proceeding! + */ + if (node->probe_register == NULL || + node->probe_unregister == NULL) { + pw_pr_debug( + "WARNING: invalid trace/notifier register/unregister function for %s\n", + sw_get_trace_notifier_kernel_name(node)); + /* + * Don't flag this as an error -- + * some socwatch trace providers don't have a + * register/unregister function + */ + return PW_SUCCESS; + } + if ((*node->probe_register)(node)) { + return -EIO; + } + node->was_registered = true; + return PW_SUCCESS; + } else if (is_reg == UNREG_FLAG) { + if (node->was_registered) { + /* + * No need to check for validity of probe + * unregister function -- 'sw_register_notifiers_i()' + * would already have done so! + */ + WARN_ON((*node->probe_unregister)(node)); + node->was_registered = false; + pw_pr_debug("OK, unregistered trace/notifier for %s\n", + sw_get_trace_notifier_kernel_name(node)); + } + return PW_SUCCESS; + } + pw_pr_error("ERROR: invalid reg/unreg flag value 0x%lx\n", + (unsigned long)is_reg); + return -EIO; +} +/* + * Register all required tracepoints and notifiers. + */ +int sw_register_trace_notifiers(void) +{ + /* + * First, the tracepoints. + */ + if (sw_for_each_tracepoint_node(&sw_reg_unreg_node_i, REG_FLAG, + true /* return on error */)) { + pw_pr_error("ERROR registering some tracepoints\n"); + return -EIO; + } + /* + * And then the notifiers. + */ + if (sw_for_each_notifier_node(&sw_reg_unreg_node_i, REG_FLAG, + true /* return on error */)) { + pw_pr_error("ERROR registering some tracepoints\n"); + return -EIO; + } + return PW_SUCCESS; +}; +/* + * Unregister all previously registered tracepoints and notifiers. + */ +int sw_unregister_trace_notifiers(void) +{ + /* + * First, the notifiers. + */ + if (sw_for_each_notifier_node(&sw_reg_unreg_node_i, UNREG_FLAG, + true /* return on error */)) { + pw_pr_error("ERROR registering some tracepoints\n"); + return -EIO; + } + /* + * And then the tracepoints. + */ + if (sw_for_each_tracepoint_node(&sw_reg_unreg_node_i, UNREG_FLAG, + true /* return on error */)) { + pw_pr_error("ERROR registering some tracepoints\n"); + return -EIO; + } + return PW_SUCCESS; +}; + +void sw_clear_topology_list(void) +{ + SW_LIST_HEAD_VAR(sw_topology_node) * head = &sw_topology_list; + while (!SW_LIST_EMPTY(head)) { + struct sw_topology_node *lnode = + SW_LIST_GET_HEAD_ENTRY(head, sw_topology_node, list); + pw_pr_debug("Clearing topology node for cpu %d\n", + lnode->change.cpu); + SW_LIST_UNLINK(lnode, list); + sw_kfree(lnode); + } + sw_num_topology_entries = 0; +} diff --git a/drivers/platform/x86/socwatchhv/Kconfig b/drivers/platform/x86/socwatchhv/Kconfig new file mode 100644 index 0000000000000..3226632de1fc2 --- /dev/null +++ b/drivers/platform/x86/socwatchhv/Kconfig @@ -0,0 +1,6 @@ +menuconfig INTEL_SOCWATCH_HV + depends on X86 && ACRN_VHM && ACRN_SHARED_BUFFER + tristate "SocWatch Hypervisor Driver Support" + default m + help + Say Y here to enable SocWatch hypervisor driver diff --git a/drivers/platform/x86/socwatchhv/Makefile b/drivers/platform/x86/socwatchhv/Makefile new file mode 100644 index 0000000000000..bd4b58a61f06a --- /dev/null +++ b/drivers/platform/x86/socwatchhv/Makefile @@ -0,0 +1,20 @@ +# +# Makefile for the socwatch hv driver. +# + +DRIVER_BASE=socwatchhv +DRIVER_MAJOR=2 +DRIVER_MINOR=0 +# basic name of driver +DRIVER_NAME=${DRIVER_BASE}${DRIVER_MAJOR}_${DRIVER_MINOR} + +HYPERVISOR=2 # ACRN + +ccflags-y += -Idrivers/ \ + -Idrivers/platform/x86/socwatchhv/inc/ \ + -DHYPERVISOR=$(HYPERVISOR) + +obj-$(CONFIG_INTEL_SOCWATCH_HV) += $(DRIVER_NAME).o + +$(DRIVER_NAME)-objs := swhv_driver.o \ + swhv_acrn.o diff --git a/drivers/platform/x86/socwatchhv/control.c b/drivers/platform/x86/socwatchhv/control.c new file mode 100644 index 0000000000000..120705e562d95 --- /dev/null +++ b/drivers/platform/x86/socwatchhv/control.c @@ -0,0 +1,142 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#include + +#include "control.h" +#include + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) +#define SMP_CALL_FUNCTION(func, ctx, retry, wait) \ + smp_call_function((func), (ctx), (wait)) +#define SMP_CALL_FUNCTION_SINGLE(cpuid, func, ctx, retry, wait) \ + smp_call_function_single((cpuid), (func), (ctx), (wait)) +#define ON_EACH_CPU(func, ctx, retry, wait) on_each_cpu((func), (ctx), (wait)) +#else +#define SMP_CALL_FUNCTION(func, ctx, retry, wait) \ + smp_call_function((func), (ctx), (retry), (wait)) +#define SMP_CALL_FUNCTION_SINGLE(cpuid, func, ctx, retry, wait) \ + smp_call_function_single((cpuid), (func), (ctx), (retry), (wait)) +#define ON_EACH_CPU(func, ctx, retry, wait) \ + on_each_cpu((func), (ctx), (retry), (wait)) +#endif + +extern int num_CPUs; +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID CONTROL_Invoke_Cpu (func, ctx, arg) + * + * @brief Set up a DPC call and insert it into the queue + * + * @param IN cpu_idx - the core id to dispatch this function to + * IN func - function to be invoked by the specified core(s) + * IN ctx - pointer to the parameter block for each function + * invocation + * + * @return None + * + * Special Notes: + * + */ +extern void CONTROL_Invoke_Cpu(int cpu_idx, void (*func)(pvoid), pvoid ctx) +{ + SMP_CALL_FUNCTION_SINGLE(cpu_idx, func, ctx, 0, 1); + + return; +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn VOID CONTROL_Invoke_Parallel_Service(func, ctx, blocking, exclude) + * + * @param func - function to be invoked by each core in the system + * @param ctx - pointer to the parameter block for each function + * invocation + * @param blocking - Wait for invoked function to complete + * @param exclude - exclude the current core from executing the code + * + * @returns None + * + * @brief Service routine to handle all kinds of parallel invoke on + * all CPU calls + * + * Special Notes: + * Invoke the function provided in parallel in either a blocking or + * non-blocking mode. The current core may be excluded if desired. + * NOTE - Do not call this function directly from source code. + * Use the aliases CONTROL_Invoke_Parallel(), + * CONTROL_Invoke_Parallel_NB(), or CONTROL_Invoke_Parallel_XS(). + */ +extern void CONTROL_Invoke_Parallel_Service(void (*func)(pvoid), pvoid ctx, + int blocking, int exclude) +{ + if (num_CPUs == 1) { + if (!exclude) { + func(ctx); + } + return; + } + if (!exclude) { + ON_EACH_CPU(func, ctx, 0, blocking); + return; + } + + preempt_disable(); + SMP_CALL_FUNCTION(func, ctx, 0, blocking); + preempt_enable(); + + return; +} diff --git a/drivers/platform/x86/socwatchhv/inc/asm_helper.h b/drivers/platform/x86/socwatchhv/inc/asm_helper.h new file mode 100644 index 0000000000000..10e95190e4f0c --- /dev/null +++ b/drivers/platform/x86/socwatchhv/inc/asm_helper.h @@ -0,0 +1,158 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef _ASM_HELPER_H_ +#define _ASM_HELPER_H_ + +#include + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + +#include +#include + +#else + +#ifdef CONFIG_AS_CFI + +#define CFI_STARTPROC (.cfi_startproc) +#define CFI_ENDPROC (.cfi_endproc) +#define CFI_ADJUST_CFA_OFFSET (.cfi_adjust_cfa_offset) +#define CFI_REL_OFFSET (.cfi_rel_offset) +#define CFI_RESTORE (.cfi_restore) + +#else + +.macro cfi_ignore a = 0, b = 0, c = 0, d = 0.endm + +#define CFI_STARTPROC cfi_ignore +#define CFI_ENDPROC cfi_ignore +#define CFI_ADJUST_CFA_OFFSET cfi_ignore +#define CFI_REL_OFFSET cfi_ignore +#define CFI_RESTORE cfi_ignore +#endif + +#ifdef CONFIG_X86_64 +.macro SAVE_C_REGS_HELPER + offset = 0 rax = 1 rcx = 1 r8910 = 1 r11 = 1.if \r11 movq % r11, + 6 * 8 +\offset(% rsp) CFI_REL_OFFSET r11, \offset.endif.if \r8910 movq + % r10, + 7 * 8 +\offset(% rsp) CFI_REL_OFFSET r10, \offset movq % r9, + 8 * 8 +\offset(% rsp) CFI_REL_OFFSET r9, \offset movq % r8, + 9 * 8 +\offset(% rsp) CFI_REL_OFFSET r8, \offset.endif.if \rax movq + % rax, + 10 * 8 +\offset(% rsp) CFI_REL_OFFSET rax, \offset.endif.if \rcx movq + % rcx, + 11 * 8 +\offset(% rsp) CFI_REL_OFFSET rcx, \offset.endif movq % rdx, + 12 * 8 +\offset(% rsp) CFI_REL_OFFSET rdx, \offset movq % rsi, + 13 * 8 +\offset(% rsp) CFI_REL_OFFSET rsi, \offset movq % rdi, + 14 * 8 +\offset(% rsp) CFI_REL_OFFSET rdi, \offset.endm.macro + SAVE_C_REGS offset = + 0 SAVE_C_REGS_HELPER \offset + , + 1, 1, 1, 1.endm.macro SAVE_EXTRA_REGS offset = 0 movq % r15, + 0 * 8 +\offset(% rsp) CFI_REL_OFFSET r15, \offset movq % r14, + 1 * 8 +\offset(% rsp) CFI_REL_OFFSET r14, \offset movq % r13, + 2 * 8 +\offset(% rsp) CFI_REL_OFFSET r13, \offset movq % r12, + 3 * 8 +\offset(% rsp) CFI_REL_OFFSET r12, \offset movq % rbp, + 4 * 8 +\offset(% rsp) CFI_REL_OFFSET rbp, \offset movq % rbx, + 5 * 8 +\offset(% rsp) CFI_REL_OFFSET rbx, \offset.endm + + .macro + RESTORE_EXTRA_REGS offset = + 0 movq 0 * 8 +\offset( + % rsp), + % r15 CFI_RESTORE r15 movq 1 * 8 +\offset(% rsp), + % r14 CFI_RESTORE r14 movq 2 * 8 +\offset(% rsp), + % r13 CFI_RESTORE r13 movq 3 * 8 +\offset(% rsp), + % r12 CFI_RESTORE r12 movq 4 * 8 +\offset(% rsp), + % rbp CFI_RESTORE rbp movq 5 * 8 +\offset(% rsp), + % rbx CFI_RESTORE rbx.endm.macro RESTORE_C_REGS_HELPER rstor_rax = 1, + rstor_rcx = 1, rstor_r11 = 1, + rstor_r8910 = 1, rstor_rdx = 1.if \rstor_r11 movq 6 * 8(% rsp), + % r11 CFI_RESTORE r11.endif.if \rstor_r8910 movq 7 * 8(% rsp), + % r10 CFI_RESTORE r10 movq 8 * 8(% rsp), + % r9 CFI_RESTORE r9 movq 9 * 8(% rsp), + % r8 CFI_RESTORE r8.endif.if \rstor_rax movq 10 * 8(% rsp), + % rax CFI_RESTORE rax.endif.if \rstor_rcx movq 11 * 8(% rsp), + % rcx CFI_RESTORE rcx.endif.if \rstor_rdx movq 12 * 8(% rsp), + % rdx CFI_RESTORE rdx.endif movq 13 * 8(% rsp), + % rsi CFI_RESTORE rsi movq 14 * 8(% rsp), + % rdi CFI_RESTORE rdi.endm.macro RESTORE_C_REGS RESTORE_C_REGS_HELPER 1, + 1, 1, 1, + 1.endm + + .macro ALLOC_PT_GPREGS_ON_STACK addskip = 0 subq $15 * + 8 +\addskip, + % rsp CFI_ADJUST_CFA_OFFSET + 15 * 8 +\addskip.endm + + .macro REMOVE_PT_GPREGS_FROM_STACK + addskip = 0 addq $15 * 8 +\addskip, + % rsp CFI_ADJUST_CFA_OFFSET - + (15 * 8 +\addskip) + .endm + + .macro SAVE_ALL ALLOC_PT_GPREGS_ON_STACK SAVE_C_REGS + SAVE_EXTRA_REGS + .endm + + .macro RESTORE_ALL RESTORE_EXTRA_REGS RESTORE_C_REGS + REMOVE_PT_GPREGS_FROM_STACK.endm +#endif /*CONFIG_X86_64 */ +#endif + +#endif diff --git a/drivers/platform/x86/socwatchhv/inc/control.h b/drivers/platform/x86/socwatchhv/inc/control.h new file mode 100644 index 0000000000000..7403150dd6796 --- /dev/null +++ b/drivers/platform/x86/socwatchhv/inc/control.h @@ -0,0 +1,194 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef _CONTROL_H_ +#define _CONTROL_H_ + +#include +#include +#include +#include + +#include "swhv_driver.h" +/**************************************************************************** + ** Handy Short cuts + ***************************************************************************/ + +typedef void *pvoid; +#define TRUE 1 +#define FALSE 0 +/* + * These routines have macros defined in asm/system.h + */ +#define SYS_Local_Irq_Enable() local_irq_enable() +#define SYS_Local_Irq_Disable() local_irq_disable() +#define SYS_Local_Irq_Save(flags) local_irq_save(flags) +#define SYS_Local_Irq_Restore(flags) local_irq_restore(flags) + +/* + * CONTROL_THIS_CPU() + * Parameters + * None + * Returns + * CPU number of the processor being executed on + * + */ +#define CONTROL_THIS_CPU() smp_processor_id() + +/**************************************************************************** + ** Interface definitions + ***************************************************************************/ + +/* + * Execution Control Functions + */ + +extern void CONTROL_Invoke_Cpu(s32 cpuid, void (*func)(pvoid), pvoid ctx); + +/* + * @fn VOID CONTROL_Invoke_Parallel_Service(func, ctx, blocking, exclude) + * + * @param func - function to be invoked by each core in the system + * @param ctx - pointer to the parameter block for each function + * invocation + * @param blocking - Wait for invoked function to complete + * @param exclude - exclude the current core from executing the code + * + * @returns none + * + * @brief Service routine to handle all kinds of parallel invoke on + * all CPU calls + * + * Special Notes: + * Invoke the function provided in parallel in either a + * blocking/non-blocking mode. + * The current core may be excluded if desired. + * NOTE - Do not call this function directly from source code. + * Use the aliases + * CONTROL_Invoke_Parallel(), CONTROL_Invoke_Parallel_NB(), + * CONTROL_Invoke_Parallel_XS(). + * + */ +extern void CONTROL_Invoke_Parallel_Service(void (*func)(pvoid), pvoid ctx, + s32 blocking, s32 exclude); + +/* + * @fn VOID CONTROL_Invoke_Parallel(func, ctx) + * + * @param func - function to be invoked by each core in the system + * @param ctx - pointer to the parameter block for each function + * invocation + * + * @returns none + * + * @brief Invoke the named function in parallel. Wait for all the + * functions to complete. + * + * Special Notes: + * Invoke the function named in parallel, including the CPU + * that the control is being invoked on + * + * Macro built on the service routine + * + */ +#define CONTROL_Invoke_Parallel(a, b) \ + CONTROL_Invoke_Parallel_Service((a), (b), TRUE, FALSE) + +/* + * @fn VOID CONTROL_Invoke_Parallel_NB(func, ctx) + * + * @param func - function to be invoked by each core in the system + * @param ctx - pointer to the parameter block for each function + * invocation + * + * @returns none + * + * @brief Invoke the named function in parallel. DO NOT Wait for all + * the functions to complete. + * + * Special Notes: + * Invoke the function named in parallel, including the CPU + * that the control is being invoked on + * + * Macro built on the service routine + * + */ +#define CONTROL_Invoke_Parallel_NB(a, b) \ + CONTROL_Invoke_Parallel_Service((a), (b), FALSE, FALSE) + +/* + * @fn VOID CONTROL_Invoke_Parallel_XS(func, ctx) + * + * @param func - function to be invoked by each core in the system + * @param ctx - pointer to the parameter block for each function + * invocation + * + * @returns none + * + * @brief Invoke the named function in parallel. Wait for all + * the functions to complete. + * + * Special Notes: + * Invoke the function named in parallel, excluding the CPU + * that the control is being invoked on + * + * Macro built on the service routine + * + */ +#define CONTROL_Invoke_Parallel_XS(a, b) \ + CONTROL_Invoke_Parallel_Service((a), (b), TRUE, TRUE) + +#endif diff --git a/drivers/platform/x86/socwatchhv/inc/pw_types.h b/drivers/platform/x86/socwatchhv/inc/pw_types.h new file mode 100644 index 0000000000000..8b56e5c265dca --- /dev/null +++ b/drivers/platform/x86/socwatchhv/inc/pw_types.h @@ -0,0 +1,132 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef _PW_TYPES_H_ +#define _PW_TYPES_H_ + +#if defined(__linux__) || defined(__QNX__) + +#ifndef __KERNEL__ +/* + * Called from Ring-3. + */ +#include /* Grab 'uint64_t' etc. */ +#include /* Grab 'pid_t' */ +/* + * UNSIGNED types... + */ +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; +/* + * SIGNED types... + */ +typedef int8_t s8; +typedef int16_t s16; +typedef int32_t s32; +typedef int64_t s64; + +#endif /* __KERNEL__ */ + +#elif defined(_WIN32) +/* + * UNSIGNED types... + */ +typedef unsigned char u8; +typedef unsigned short u16; +typedef unsigned int u32; +typedef unsigned long long u64; +/* + * SIGNED types... + */ +typedef signed char s8; +typedef signed short s16; +typedef signed int s32; +typedef signed long long s64; +typedef s32 pid_t; +typedef s32 ssize_t; + +#endif /* _WIN32 */ + +/* ************************************ + * Common to both operating systems. + * ************************************ + */ +/* + * UNSIGNED types... + */ +typedef u8 pw_u8_t; +typedef u16 pw_u16_t; +typedef u32 pw_u32_t; +typedef u64 pw_u64_t; + +/* + * SIGNED types... + */ +typedef s8 pw_s8_t; +typedef s16 pw_s16_t; +typedef s32 pw_s32_t; +typedef s64 pw_s64_t; + +typedef pid_t pw_pid_t; + +typedef void *pvoid; + +#define TRUE 1 +#define FALSE 0 + +#endif /* _PW_TYPES_H_ */ diff --git a/drivers/platform/x86/socwatchhv/inc/pw_version.h b/drivers/platform/x86/socwatchhv/inc/pw_version.h new file mode 100644 index 0000000000000..7f1a40d82d71a --- /dev/null +++ b/drivers/platform/x86/socwatchhv/inc/pw_version.h @@ -0,0 +1,67 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef _PW_VERSION_H_ +#define _PW_VERSION_H_ 1 + +/* + * SOCWatch driver version + */ +#define SWHVDRV_VERSION_MAJOR 2 +#define SWHVDRV_VERSION_MINOR 0 +#define SWHVDRV_VERSION_OTHER 0 + +#endif /* _PW_VERSION_H_ */ diff --git a/drivers/platform/x86/socwatchhv/inc/sw_defines.h b/drivers/platform/x86/socwatchhv/inc/sw_defines.h new file mode 100644 index 0000000000000..f0ef6baceb3fc --- /dev/null +++ b/drivers/platform/x86/socwatchhv/inc/sw_defines.h @@ -0,0 +1,156 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef _PW_DEFINES_H_ +#define _PW_DEFINES_H_ 1 + +#include "sw_version.h" + +/* *************************************************** + * Common to kernel and userspace. + * *************************************************** + */ +#define PW_SUCCESS 0 +#define PW_ERROR 1 +#define PW_SUCCESS_NO_COLLECT 2 + +/* + * Helper macro to convert 'u64' to 'unsigned long long' to avoid gcc warnings. + */ +#define TO_ULL(x) (unsigned long long)(x) +/* + * Convert an arg to 'long long' + */ +#define TO_LL(x) (long long)(x) +/* + * Convert an arg to 'unsigned long' + */ +#define TO_UL(x) (unsigned long)(x) +/* + * Helper macro for string representation of a boolean value. + */ +#define GET_BOOL_STRING(b) ((b) ? "TRUE" : "FALSE") + +/* + * Circularly increment 'i' MODULO 'l'. + * ONLY WORKS IF 'l' is (power of 2 - 1) ie. + * l == (2 ^ x) - 1 + */ +#define CIRCULAR_INC(index, mask) (((index) + 1) & (mask)) +#define CIRCULAR_ADD(index, val, mask) (((index) + (val)) & (mask)) +/* + * Circularly decrement 'i'. + */ +#define CIRCULAR_DEC(i, m) \ + ({ \ + int __tmp1 = (i); \ + if (--__tmp1 < 0) \ + __tmp1 = (m); \ + __tmp1; \ + }) +/* + * Retrieve size of an array. + */ +#define SW_ARRAY_SIZE(array) (sizeof(array) / sizeof((array)[0])) +/* + * Should the driver count number of dropped samples? + */ +#define DO_COUNT_DROPPED_SAMPLES 1 +/* + * Extract F/W major, minor versions. + * Assumes version numbers are 8b unsigned ints. + */ +#define SW_GET_SCU_FW_VERSION_MAJOR(ver) (((ver) >> 8) & 0xff) +#define SW_GET_SCU_FW_VERSION_MINOR(ver) ((ver)&0xff) +/* + * Max size of process name retrieved from kernel. + */ +#define SW_MAX_PROC_NAME_SIZE 16 + +/* + * Number of SOCPERF counters. + * Needed by both Ring-0 and Ring-3 + */ +#define SW_NUM_SOCPERF_COUNTERS 9 + +/* + * Max size of process name retrieved from kernel space. + */ +#define SW_MAX_PROC_NAME_SIZE 16 +/* + * Max size of kernel wakelock name. + */ +#define SW_MAX_KERNEL_WAKELOCK_NAME_SIZE 100 + +/* Data value read when a telemetry data read fails. */ +#define SW_TELEM_READ_FAIL_VALUE 0xF00DF00DF00DF00D + +#ifdef SWW_MERGE +typedef enum { + SW_STOP_EVENT = 0, + SW_CS_EXIT_EVENT, + SW_COUNTER_RESET_EVENT, + SW_COUNTER_HOTKEY_EVENT, + SW_MAX_COLLECTION_EVENT +} collector_stop_event_t; +#endif /* SWW_MERGE */ + +#define MAX_UNSIGNED_16_BIT_VALUE 0xFFFF +#define MAX_UNSIGNED_24_BIT_VALUE 0xFFFFFF +#define MAX_UNSIGNED_32_BIT_VALUE 0xFFFFFFFF +#define MAX_UNSIGNED_64_BIT_VALUE 0xFFFFFFFFFFFFFFFF + +#endif /* _PW_DEFINES_H_ */ diff --git a/drivers/platform/x86/socwatchhv/inc/sw_ioctl.h b/drivers/platform/x86/socwatchhv/inc/sw_ioctl.h new file mode 100644 index 0000000000000..1f8e903a0e1c1 --- /dev/null +++ b/drivers/platform/x86/socwatchhv/inc/sw_ioctl.h @@ -0,0 +1,303 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +#ifndef __SW_IOCTL_H__ +#define __SW_IOCTL_H__ 1 + +#if defined(__linux__) || defined(__QNX__) +#if __KERNEL__ +#include +#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) +#include +#include +#endif /* COMPAT && x64 */ +#else /* !__KERNEL__ */ +#include +#endif /* __KERNEL__ */ +#endif /* __linux__ */ +/* + * Ensure we pull in definition of 'DO_COUNT_DROPPED_SAMPLES'! + */ +#include "sw_defines.h" + +#ifdef ONECORE +#ifndef __KERNEL__ +#include +#endif /* __KERNEL__ */ +#endif /* ONECORE */ + +/* + * The APWR-specific IOCTL magic + * number -- used to ensure IOCTLs + * are delivered to the correct + * driver. + */ +/* #define APWR_IOCTL_MAGIC_NUM 0xdead */ +#define APWR_IOCTL_MAGIC_NUM 100 + +/* + * The name of the device file + */ +/* #define DEVICE_FILE_NAME "/dev/pw_driver_char_dev" */ +#define PW_DEVICE_FILE_NAME "/dev/apwr_driver_char_dev" +#define PW_DEVICE_NAME "apwr_driver_char_dev" + +enum sw_ioctl_cmd { + sw_ioctl_cmd_none = 0, + sw_ioctl_cmd_config, + sw_ioctl_cmd_cmd, + sw_ioctl_cmd_poll, + sw_ioctl_cmd_immediate_io, + sw_ioctl_cmd_scu_version, + sw_ioctl_cmd_read_immediate, + sw_ioctl_cmd_driver_version, + sw_ioctl_cmd_avail_trace, + sw_ioctl_cmd_avail_notify, + sw_ioctl_cmd_avail_collect, + sw_ioctl_cmd_topology_changes, +}; +/* + * The actual IOCTL commands. + * + * From the kernel documentation: + * "_IOR" ==> Read IOCTL + * "_IOW" ==> Write IOCTL + * "_IOWR" ==> Read/Write IOCTL + * + * Where "Read" and "Write" are from the user's perspective + * (similar to the file "read" and "write" calls). + */ +#ifdef SWW_MERGE /* Windows */ +/* + * Device type -- in the "User Defined" range." + */ +#define POWER_I_CONF_TYPE 40000 + +/* List assigned tracepoint id */ +#define CSIR_TRACEPOINT_ID_MASK 1 +#define DEVICE_STATE_TRACEPOINT_ID_MASK 2 +#define CSIR_SEPARATE_TRACEPOINT_ID_MASK 3 +#define RESET_TRACEPOINT_ID_MASK 4 +#define DISPLAY_ON_TRACEPOINT_ID_MASK 5 + +#ifdef SWW_MERGE +/* + * TELEM BAR CONFIG + */ +#define MAX_TELEM_BAR_CFG 3 +#define TELEM_MCHBAR_CFG 0 +#define TELEM_IPC1BAR_CFG 1 +#define TELEM_SSRAMBAR_CFG 2 +#endif + +/* + * The IOCTL function codes from 0x800 to 0xFFF are for customer use. + */ +#define PW_IOCTL_CONFIG \ + CTL_CODE(POWER_I_CONF_TYPE, 0x900, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_START_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x901, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_STOP_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x902, METHOD_BUFFERED, FILE_ANY_ACCESS) + +/* TODO: pause, resume, cancel not supported yet */ +#define PW_IOCTL_PAUSE_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x903, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_RESUME_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x904, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_CANCEL_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x905, METHOD_BUFFERED, FILE_ANY_ACCESS) + +#define PW_IOCTL_GET_PROCESSOR_GROUP_TOPOLOGY \ + CTL_CODE(POWER_I_CONF_TYPE, 0x906, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_TOPOLOGY \ + CTL_CODE(POWER_I_CONF_TYPE, 0x907, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_GET_AVAILABLE_COLLECTORS \ + CTL_CODE(POWER_I_CONF_TYPE, 0x908, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_IMMEDIATE_IO \ + CTL_CODE(POWER_I_CONF_TYPE, 0x909, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_DRV_CLEANUP \ + CTL_CODE(POWER_I_CONF_TYPE, 0x90A, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_SET_COLLECTION_EVENT \ + CTL_CODE(POWER_I_CONF_TYPE, 0x90B, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_TRY_STOP_EVENT \ + CTL_CODE(POWER_I_CONF_TYPE, 0x90C, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_SET_PCH_ACTIVE_INTERVAL \ + CTL_CODE(POWER_I_CONF_TYPE, 0x90D, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_SET_TELEM_BAR \ + CTL_CODE(POWER_I_CONF_TYPE, 0x90E, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_METADATA \ + CTL_CODE(POWER_I_CONF_TYPE, 0x90F, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_SET_GBE_INTERVAL \ + CTL_CODE(POWER_I_CONF_TYPE, 0x910, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_ENABLE_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x911, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_DISABLE_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x912, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define PW_IOCTL_DRIVER_BUILD_DATE \ + CTL_CODE(POWER_I_CONF_TYPE, 0x913, METHOD_BUFFERED, FILE_ANY_ACCESS) + +#elif !defined(__APPLE__) +#define PW_IOCTL_CONFIG \ + _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config, \ + struct sw_driver_ioctl_arg *) +#if DO_COUNT_DROPPED_SAMPLES +#define PW_IOCTL_CMD \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, \ + struct sw_driver_ioctl_arg *) +#else +#define PW_IOCTL_CMD \ + _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, \ + struct sw_driver_ioctl_arg *) +#endif /* DO_COUNT_DROPPED_SAMPLES */ +#define PW_IOCTL_POLL _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) +#define PW_IOCTL_IMMEDIATE_IO \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, \ + struct sw_driver_ioctl_arg *) +#define PW_IOCTL_GET_SCU_FW_VERSION \ + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_scu_version, \ + struct sw_driver_ioctl_arg *) +#define PW_IOCTL_READ_IMMEDIATE \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_immediate, \ + struct sw_driver_ioctl_arg *) +#define PW_IOCTL_GET_DRIVER_VERSION \ + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_driver_version, \ + struct sw_driver_ioctl_arg *) +#define PW_IOCTL_GET_AVAILABLE_TRACEPOINTS \ + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_trace, \ + struct sw_driver_ioctl_arg *) +#define PW_IOCTL_GET_AVAILABLE_NOTIFIERS \ + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_notify, \ + struct sw_driver_ioctl_arg *) +#define PW_IOCTL_GET_AVAILABLE_COLLECTORS \ + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_collect, \ + struct sw_driver_ioctl_arg *) +#define PW_IOCTL_GET_TOPOLOGY_CHANGES \ + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, \ + struct sw_driver_ioctl_arg *) +#else /* __APPLE__ */ +#define PW_IOCTL_CONFIG \ + _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config, \ + struct sw_driver_ioctl_arg) +#if DO_COUNT_DROPPED_SAMPLES +#define PW_IOCTL_CMD \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, \ + struct sw_driver_ioctl_arg) +#else +#define PW_IOCTL_CMD \ + _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, struct sw_driver_ioctl_arg) +#endif /* DO_COUNT_DROPPED_SAMPLES */ +#define PW_IOCTL_POLL _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) +#define PW_IOCTL_IMMEDIATE_IO \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, \ + struct sw_driver_ioctl_arg) +#define PW_IOCTL_GET_SCU_FW_VERSION \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_scu_version, \ + struct sw_driver_ioctl_arg) +#define PW_IOCTL_READ_IMMEDIATE \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_immediate, \ + struct sw_driver_ioctl_arg) +#define PW_IOCTL_GET_DRIVER_VERSION \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_driver_version, \ + struct sw_driver_ioctl_arg) +#define PW_IOCTL_GET_AVAILABLE_TRACEPOINTS \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_trace, \ + struct sw_driver_ioctl_arg) +#define PW_IOCTL_GET_AVAILABLE_NOTIFIERS \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_notify, \ + struct sw_driver_ioctl_arg) +#define PW_IOCTL_GET_AVAILABLE_COLLECTORS \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_collect, \ + struct sw_driver_ioctl_arg) +#define PW_IOCTL_GET_TOPOLOGY_CHANGES \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, \ + struct sw_driver_ioctl_arg) +#endif /* __APPLE__ */ + +/* + * 32b-compatible version of the above + * IOCTL numbers. Required ONLY for + * 32b compatibility on 64b systems, + * and ONLY by the driver. + */ +#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) +#define PW_IOCTL_CONFIG32 \ + _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config, compat_uptr_t) +#if DO_COUNT_DROPPED_SAMPLES +#define PW_IOCTL_CMD32 \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, compat_uptr_t) +#else +#define PW_IOCTL_CMD32 \ + _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, compat_uptr_t) +#endif /* DO_COUNT_DROPPED_SAMPLES */ +#define PW_IOCTL_POLL32 _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) +#define PW_IOCTL_IMMEDIATE_IO32 \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, compat_uptr_t) +#define PW_IOCTL_GET_SCU_FW_VERSION32 \ + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_scu_version, compat_uptr_t) +#define PW_IOCTL_READ_IMMEDIATE32 \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_immediate, compat_uptr_t) +#define PW_IOCTL_GET_DRIVER_VERSION32 \ + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_driver_version, compat_uptr_t) +#define PW_IOCTL_GET_AVAILABLE_TRACEPOINTS32 \ + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_trace, compat_uptr_t) +#define PW_IOCTL_GET_AVAILABLE_NOTIFIERS32 \ + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_notify, compat_uptr_t) +#define PW_IOCTL_GET_AVAILABLE_COLLECTORS32 \ + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_collect, compat_uptr_t) +#define PW_IOCTL_GET_TOPOLOGY_CHANGES32 \ + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, compat_uptr_t) +#endif /* defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) */ +#endif /* __SW_IOCTL_H__ */ diff --git a/drivers/platform/x86/socwatchhv/inc/sw_kernel_defines.h b/drivers/platform/x86/socwatchhv/inc/sw_kernel_defines.h new file mode 100644 index 0000000000000..d970236df8c79 --- /dev/null +++ b/drivers/platform/x86/socwatchhv/inc/sw_kernel_defines.h @@ -0,0 +1,164 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +#ifndef _SW_KERNEL_DEFINES_H_ +#define _SW_KERNEL_DEFINES_H_ 1 + +#include "sw_defines.h" + +#if defined(__APPLE__) +#define likely(x) (x) +#define unlikely(x) (x) +#endif /* __APPLE__ */ + +#if !defined(__APPLE__) +#define CPU() (raw_smp_processor_id()) +#define RAW_CPU() (raw_smp_processor_id()) +#else +#define CPU() (cpu_number()) +#define RAW_CPU() (cpu_number()) +#endif /* __APPLE__ */ + +#define TID() (current->pid) +#define PID() (current->tgid) +#define NAME() (current->comm) +#define PKG(c) (cpu_data(c).phys_proc_id) +#define IT_REAL_INCR() (current->signal->it_real_incr.tv64) + +#define ATOMIC_CAS(ptr, old_val, new_val) \ + (cmpxchg((ptr), (old_val), (new_val)) == (old_val)) + +/* + * Should we measure overheads? + * '1' ==> YES + * '0' ==> NO + */ +#define DO_OVERHEAD_MEASUREMENTS 0 +/* + * Should we track memory usage? + * '1' ==> YES + * '0' ==> NO + */ +#define DO_TRACK_MEMORY_USAGE 0 +/* + * Are we compiling with driver profiling support + * turned ON? If YES then force 'DO_OVERHEAD_MEASUREMENTS' + * and 'DO_TRACK_MEMORY_USAGE' to be TRUE. + */ +#if IS_ENABLED(CONFIG_SOCWATCH_DRIVER_PROFILING) +#if !DO_OVERHEAD_MEASUREMENTS +#undef DO_OVERHEAD_MEASUREMENTS +#define DO_OVERHEAD_MEASUREMENTS 1 +#endif /* DO_OVERHEAD_MEASUREMENTS */ +#if !DO_TRACK_MEMORY_USAGE +#undef DO_TRACK_MEMORY_USAGE +#define DO_TRACK_MEMORY_USAGE 1 +#endif /* DO_TRACK_MEMORY_USAGE */ +#endif /* CONFIG_SOCWATCH_DRIVER_PROFILING */ +/* + * Should we allow debug output. + * Set to: "1" ==> 'OUTPUT' is enabled. + * "0" ==> 'OUTPUT' is disabled. + */ +#define DO_DEBUG_OUTPUT 0 +/* + * Control whether to output driver ERROR messages. + * These are independent of the 'OUTPUT' macro + * (which controls debug messages). + * Set to '1' ==> Print driver error messages (to '/var/log/messages') + * '0' ==> Do NOT print driver error messages + */ +#define DO_PRINT_DRIVER_ERROR_MESSAGES 1 +/* + * Macros to control output printing. + */ +#if !defined(__APPLE__) +#if DO_DEBUG_OUTPUT +#define pw_pr_debug(...) printk(KERN_INFO __VA_ARGS__) +#define pw_pr_warn(...) printk(KERN_WARNING __VA_ARGS__) +#else +#define pw_pr_debug(...) +#define pw_pr_warn(...) +#endif +#define pw_pr_force(...) printk(KERN_INFO __VA_ARGS__) +#else +#if DO_DEBUG_OUTPUT +#define pw_pr_debug(...) IOLog(__VA_ARGS__) +#define pw_pr_warn(...) IOLog(__VA_ARGS__) +#else +#define pw_pr_debug(...) +#define pw_pr_warn(...) +#endif +#define pw_pr_force(...) IOLog(__VA_ARGS__) +#endif /* __APPLE__ */ + +/* + * Macro for driver error messages. + */ +#if !defined(__APPLE__) +#if (DO_PRINT_DRIVER_ERROR_MESSAGES || DO_DEBUG_OUTPUT) +#define pw_pr_error(...) printk(KERN_ERR __VA_ARGS__) +#else +#define pw_pr_error(...) +#endif +#else +#if (DO_PRINT_DRIVER_ERROR_MESSAGES || DO_DEBUG_OUTPUT) +#define pw_pr_error(...) IOLog(__VA_ARGS__) +#else +#define pw_pr_error(...) +#endif +#endif /* __APPLE__ */ + +#endif /* _SW_KERNEL_DEFINES_H_ */ diff --git a/drivers/platform/x86/socwatchhv/inc/sw_structs.h b/drivers/platform/x86/socwatchhv/inc/sw_structs.h new file mode 100644 index 0000000000000..baac8520e7fd1 --- /dev/null +++ b/drivers/platform/x86/socwatchhv/inc/sw_structs.h @@ -0,0 +1,527 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +#ifndef __SW_STRUCTS_H__ +#define __SW_STRUCTS_H__ 1 + +#include "sw_types.h" + +/* + * An enumeration of MSR types. + * Required if we want to differentiate + * between different types of MSRs. + */ +enum sw_msr_type { + SW_MSR_TYPE_THREAD, + SW_MSR_TYPE_CORE, + SW_MSR_TYPE_MODULE, + SW_MSR_TYPE_PACKAGE, + SW_MSR_TYPE_SOC, + SW_MSR_TYPE_MAX, +}; + +/* + * Convenience for a 'string' data type. + * Not strictly required. + */ +#pragma pack(push, 1) +typedef struct sw_string_type { + pw_u16_t len; + char data[1]; +} sw_string_type_t; +#pragma pack(pop) +#define SW_STRING_TYPE_HEADER_SIZE() \ + (sizeof(struct sw_string_type) - sizeof(char[1])) + +#pragma pack(push, 1) +struct sw_key_value_payload { + pw_u16_t m_numKeyValuePairs; + char data[1]; +}; +#pragma pack(pop) +#define SW_KEY_VALUE_PAYLOAD_HEADER_SIZE() \ + (sizeof(struct sw_key_value_payload) - sizeof(char[1])) + +typedef enum sw_kernel_wakelock_type { + SW_WAKE_LOCK = 0, /* A kernel wakelock was acquired */ + SW_WAKE_UNLOCK = 1, /* A kernel wakelock was released */ + SW_WAKE_LOCK_TIMEOUT = + 2, /* A kernel wakelock was acquired with a timeout */ + SW_WAKE_LOCK_INITIAL = 3, /* A kernel wakelock was acquired + * before the collection started + */ + SW_WAKE_UNLOCK_ALL = 4, /* All previously held kernel wakelocks were */ + /* released -- used in ACPI S3 notifications */ +} sw_kernel_wakelock_type_t; + +typedef enum sw_when_type { + SW_WHEN_TYPE_BEGIN = 0, /* Start snapshot */ + SW_WHEN_TYPE_POLL, + SW_WHEN_TYPE_NOTIFIER, + SW_WHEN_TYPE_TRACEPOINT, + SW_WHEN_TYPE_END, /* Stop snapshot */ + SW_WHEN_TYPE_NONE +} sw_when_type_t; + +/** + * trigger_bits is defined to use type pw_u8_t that makes only up + * to 8 types possible + */ +#define SW_TRIGGER_BEGIN_MASK() (1U << SW_WHEN_TYPE_BEGIN) +#define SW_TRIGGER_END_MASK() (1U << SW_WHEN_TYPE_END) +#define SW_TRIGGER_POLL_MASK() (1U << SW_WHEN_TYPE_POLL) +#define SW_TRIGGER_TRACEPOINT_MASK() (1U << SW_WHEN_TYPE_TRACEPOINT) +#define SW_TRIGGER_NOTIFIER_MASK() (1U << SW_WHEN_TYPE_NOTIFIER) +#define SW_GET_TRIGGER_MASK_VALUE(m) (1U << (m)) +#define SW_TRIGGER_MASK_ALL() (0xFF) + +enum sw_io_cmd { SW_IO_CMD_READ = 0, SW_IO_CMD_WRITE, SW_IO_CMD_MAX }; + +#pragma pack(push, 1) +struct sw_driver_msr_io_descriptor { + pw_u64_t address; + enum sw_msr_type type; +}; +#pragma pack(pop) + +#pragma pack(push, 1) +struct sw_driver_ipc_mmio_io_descriptor { + union { +#ifdef SWW_MERGE +#pragma warning(push) +#pragma warning( \ + disable : 4201) /* disable C4201: nonstandard extension used: + * nameless struct/union + */ +#endif + struct { + pw_u16_t command; + pw_u16_t sub_command; + }; +#ifdef SWW_MERGE +#pragma warning(pop) /* enable C4201 */ +#endif + union { + pw_u32_t ipc_command; /* (sub_command << 12) + * | (command) + */ + pw_u8_t is_gbe; /* Used only for GBE MMIO */ + }; + }; + /* TODO: add a section for 'ctrl_address' and 'ctrl_remapped_address' */ + union { + pw_u64_t data_address; /* Will be "io_remapped" */ + pw_u64_t data_remapped_address; + }; +}; +#pragma pack(pop) + +#pragma pack(push, 1) +struct sw_driver_pci_io_descriptor { + pw_u32_t bus; + pw_u32_t device; + pw_u32_t function; +#ifdef __QNX__ + union { + pw_u32_t offset; + pw_u32_t index; + }; +#else /* __QNX__ */ + pw_u32_t offset; +#endif /* __QNX__ */ +}; +#pragma pack(pop) + +#pragma pack(push, 1) +struct sw_driver_configdb_io_descriptor { + /* pw_u32_t port; */ + /* pw_u32_t offset; */ + pw_u32_t address; +}; +#pragma pack(pop) + +#pragma pack(push, 1) +struct sw_driver_trace_args_io_descriptor { + pw_u8_t num_args; /* Number of valid entries in the 'args' array, + * below; 1 <= num_args <= 7 + */ + pw_u8_t args[7]; /* Max of 7 args can be recorded */ +}; +#pragma pack(pop) + +#pragma pack(push, 1) +/** + * struct - sw_driver_telem_io_descriptor - Telemetry Metric descriptor + * + * @id: (Client & Driver) Telemetry ID of the counter to read. + * @idx: (Driver only) index into telem array to read, or the row + * of the telem_indirect table to lookup the telem array index. + * @unit: Unit from which to collect: 0 = PMC, 1 = PUNIT + * Values come from the telemetry_unit enum. + * @scale_op: When there are multiple instances of a telem value (e.g. + * module C-states) the operation to use when scaling the CPU ID + * and adding it to the telemetry data ID. + * @scale_val: Amount to scale an ID (when scaling one.) + * + * Like all hardware mechanism descriptors, the client uses this to pass + * metric hardware properties (unit and ID) to the driver. The driver + * uses it to program the telemetry unit. + * + * Users can specify that IDs should be scaled based on the CPU id, using + * the equation: ID = ID_value + (cpuid ) + * where is one of +, *, /, or %, and scaling_val is an integer + * value. This gives you: + * Operation scale_op scale_val + * Single instance of an ID * 0 + * Sequentially increasing + * CPU-specific values * 1 + * Per module cpu-specific + * values (2 cores/module) / 2 + * Round Robin assignment % cpu_count + * + * Note that scaling_value of 0 implies that no scaling should be + * applied. While (*, 1) is equivalent to (+, 0), the scaling value of 0 + * is reserved/defined to mean "no scaling", and is disallowed. + * + * If you're really tight on space, you could always fold unit and + * scale_op into a single byte without a lot of pain or even effort. + */ +struct sw_driver_telem_io_descriptor { + union { + pw_u16_t id; + pw_u8_t idx; + }; + pw_u8_t unit; + pw_u8_t scale_op; + pw_u16_t scale_val; +}; +#pragma pack(pop) +enum telemetry_unit { TELEM_PUNIT = 0, TELEM_PMC, TELEM_UNIT_NONE }; +#define TELEM_MAX_ID 0xFFFF /* Maximum value of a Telemtry event ID. */ +#define TELEM_MAX_SCALE 0xFFFF /* Maximum ID scaling value. */ +#define TELEM_OP_ADD '+' /* Addition operator */ +#define TELEM_OP_MULT '*' /* Multiplication operator */ +#define TELEM_OP_DIV '/' /* Division operator */ +#define TELEM_OP_MOD '%' /* Modulus operator */ +#define TELEM_OP_NONE 'X' /* No operator--Not a scaled ID */ + +#pragma pack(push, 1) +struct sw_driver_mailbox_io_descriptor { + union { + /* + * Will be "io_remapped" + */ + pw_u64_t interface_address; + pw_u64_t interface_remapped_address; + }; + union { + /* + * Will be "io_remapped" + */ + pw_u64_t data_address; + pw_u64_t data_remapped_address; + }; + pw_u64_t command; + pw_u64_t command_mask; + pw_u16_t run_busy_bit; + pw_u16_t is_msr_type; +}; +#pragma pack(pop) + +#pragma pack(push, 1) +struct sw_driver_pch_mailbox_io_descriptor { + union { + /* + * Will be "io_remapped" + */ + pw_u64_t mtpmc_address; + pw_u64_t mtpmc_remapped_address; + }; + union { + /* + * Will be "io_remapped" + */ + pw_u64_t msg_full_sts_address; + pw_u64_t msg_full_sts_remapped_address; + }; + union { + /* + * Will be "io_remapped" + */ + pw_u64_t mfpmc_address; + pw_u64_t mfpmc_remapped_address; + }; + pw_u32_t data_address; +}; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct sw_driver_io_descriptor { + pw_u16_t collection_type; + /* TODO: specify READ/WRITE */ + pw_s16_t collection_command; /* One of 'enum sw_io_cmd' */ + pw_u16_t counter_size_in_bytes; /* The number of bytes to + * READ or WRITE + */ + union { + struct sw_driver_msr_io_descriptor msr_descriptor; + struct sw_driver_ipc_mmio_io_descriptor ipc_descriptor; + struct sw_driver_ipc_mmio_io_descriptor mmio_descriptor; + struct sw_driver_pci_io_descriptor pci_descriptor; + struct sw_driver_configdb_io_descriptor configdb_descriptor; + struct sw_driver_trace_args_io_descriptor trace_args_descriptor; + struct sw_driver_telem_io_descriptor telem_descriptor; + struct sw_driver_pch_mailbox_io_descriptor + pch_mailbox_descriptor; + struct sw_driver_mailbox_io_descriptor mailbox_descriptor; + }; + pw_u64_t write_value; /* The value to WRITE */ +} sw_driver_io_descriptor_t; +#pragma pack(pop) + +/** + * sw_driver_interface_info is used to map data collected by kernel-level + * collectors to metrics. The client passes one of these structs to the + * driver for each metric the driver should collect. The driver tags the + * collected data (messages) using info from this struct. When processing + * data from the driver, the client uses its copy of this data to + * identify the plugin, metric, and message IDs of each message. + */ +#pragma pack(push, 1) +struct sw_driver_interface_info { + pw_u64_t tracepoint_id_mask; + pw_u64_t notifier_id_mask; + pw_s16_t cpu_mask; /* On which CPU(s) should the driver + * read the data? + * Currently: -2 ==> read on ALL CPUs, + * -1 ==> read on ANY CPU, + * >= 0 ==> the specific CPU to read on + */ + pw_s16_t plugin_id; /* Metric Plugin SID */ + pw_s16_t metric_id; /* Domain-specific ID assigned by each + * Metric Plugin + */ + pw_s16_t msg_id; /* Msg ID retrieved from the SoC Watch config file */ + pw_u16_t num_io_descriptors; /* Number of descriptors in the array, + * below. + */ + pw_u8_t trigger_bits; /* Mask of 'when bits' to fire this collector. */ + pw_u16_t sampling_interval_msec; /* Sampling interval, in msecs */ + pw_u8_t descriptors[1]; /* Array of sw_driver_io_descriptor structs. */ +}; +#pragma pack(pop) + +#define SW_DRIVER_INTERFACE_INFO_HEADER_SIZE() \ + (sizeof(struct sw_driver_interface_info) - sizeof(pw_u8_t[1])) + +#pragma pack(push, 1) +struct sw_driver_interface_msg { + pw_u16_t num_infos; /* Number of 'sw_driver_interface_info' + * structs contained within the 'infos' variable, + * below + */ + pw_u16_t min_polling_interval_msecs; /* Min time to wait before + * polling; used exclusively + * with the low overhead, + * context-switch based + * polling mode + */ + /* pw_u16_t infos_size_bytes; + * Size of data inlined within the + * 'infos' variable, below + */ + pw_u8_t infos[1]; +}; +#pragma pack(pop) +#define SW_DRIVER_INTERFACE_MSG_HEADER_SIZE() \ + (sizeof(struct sw_driver_interface_msg) - sizeof(pw_u8_t[1])) + +typedef enum sw_name_id_type { + SW_NAME_TYPE_TRACEPOINT, + SW_NAME_TYPE_NOTIFIER, + SW_NAME_TYPE_COLLECTOR, + SW_NAME_TYPE_MAX, +} sw_name_id_type_t; + +#pragma pack(push, 1) +struct sw_name_id_pair { + pw_u16_t id; + pw_u16_t type; /* One of 'sw_name_id_type' */ + struct sw_string_type name; +}; +#pragma pack(pop) +#define SW_NAME_ID_HEADER_SIZE() \ + (sizeof(struct sw_name_id_pair) - sizeof(struct sw_string_type)) + +#pragma pack(push, 1) +struct sw_name_info_msg { + pw_u16_t num_name_id_pairs; + pw_u16_t payload_len; + pw_u8_t pairs[1]; +}; +#pragma pack(pop) + +/** + * This is the basic data structure for passing data collected by the + * kernel-level collectors up to the client. In addition to the data + * (payload), it contains the minimum metadata required for the client + * to identify the source of that data. + */ +#pragma pack(push, 1) +typedef struct sw_driver_msg { + pw_u64_t tsc; + pw_u16_t cpuidx; + pw_u8_t plugin_id; /* Cannot have more than 256 plugins */ + pw_u8_t metric_id; /* Each plugin cannot handle more than 256 metrics */ + pw_u8_t msg_id; /* Each metric cannot have more than 256 components */ + pw_u16_t payload_len; + /* pw_u64_t p_payload; Ptr to payload */ + union { + pw_u64_t __dummy; /* Ensure size of struct is + * consistent on x86, x64 + */ + char *p_payload; /* Ptr to payload (collected data values). */ + }; +} sw_driver_msg_t; +#pragma pack(pop) +#define SW_DRIVER_MSG_HEADER_SIZE() \ + (sizeof(struct sw_driver_msg) - sizeof(pw_u64_t)) + +typedef enum sw_driver_collection_cmd { + SW_DRIVER_START_COLLECTION = 1, + SW_DRIVER_STOP_COLLECTION = 2, + SW_DRIVER_PAUSE_COLLECTION = 3, + SW_DRIVER_RESUME_COLLECTION = 4, + SW_DRIVER_CANCEL_COLLECTION = 5, +} sw_driver_collection_cmd_t; + +#pragma pack(push, 1) +struct sw_driver_version_info { + pw_u16_t major; + pw_u16_t minor; + pw_u16_t other; +}; +#pragma pack(pop) + +enum cpu_action { + SW_CPU_ACTION_NONE, + SW_CPU_ACTION_OFFLINE, + SW_CPU_ACTION_ONLINE_PREPARE, + SW_CPU_ACTION_ONLINE, + SW_CPU_ACTION_MAX, +}; +#pragma pack(push, 1) +struct sw_driver_topology_change { + pw_u64_t timestamp; /* timestamp */ + enum cpu_action type; /* One of 'enum cpu_action' */ + pw_u16_t cpu; /* logical cpu */ + pw_u16_t core; /* core id */ + pw_u16_t pkg; /* pkg/physical id */ +}; +struct sw_driver_topology_msg { + pw_u16_t num_entries; + pw_u8_t topology_entries[1]; +}; +#pragma pack(pop) + +/** + * An enumeration of possible pm states that + * SoC Watch is interested in + */ +enum sw_pm_action { + SW_PM_ACTION_NONE, + SW_PM_ACTION_SUSPEND_ENTER, + SW_PM_ACTION_SUSPEND_EXIT, + SW_PM_ACTION_HIBERNATE_ENTER, + SW_PM_ACTION_HIBERNATE_EXIT, + SW_PM_ACTION_MAX, +}; + +/** + * An enumeration of possible actions that trigger + * the power notifier + */ +enum sw_pm_mode { + SW_PM_MODE_FIRMWARE, + SW_PM_MODE_NONE, +}; + +#define SW_PM_VALUE(mode, action) ((mode) << 16 | (action)) + +/* + * Wrapper for ioctl arguments. + * EVERY ioctl MUST use this struct! + */ +#pragma pack(push, 1) +struct sw_driver_ioctl_arg { + pw_s32_t in_len; + pw_s32_t out_len; + /* pw_u64_t p_in_arg; Pointer to input arg */ + /* pw_u64_t p_out_arg; Pointer to output arg */ + char *in_arg; + char *out_arg; +}; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct sw_driver_msg_interval { + pw_u8_t plugin_id; /* Cannot have more than 256 plugins */ + pw_u8_t metric_id; /* Each plugin cannot handle more than 256 metrics */ + pw_u8_t msg_id; /* Each metric cannot have more than 256 components */ + pw_u16_t interval; /* collection interval */ +} sw_driver_msg_interval_t; +#pragma pack(pop) + +#endif /* __SW_STRUCTS_H__ */ diff --git a/drivers/platform/x86/socwatchhv/inc/sw_types.h b/drivers/platform/x86/socwatchhv/inc/sw_types.h new file mode 100644 index 0000000000000..156c92c8349aa --- /dev/null +++ b/drivers/platform/x86/socwatchhv/inc/sw_types.h @@ -0,0 +1,152 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef _PW_TYPES_H_ +#define _PW_TYPES_H_ + +#if defined(__linux__) || defined(__APPLE__) || defined(__QNX__) + +#ifndef __KERNEL__ +/* + * Called from Ring-3. + */ +#include /* Grab 'uint64_t' etc. */ +#include /* Grab 'pid_t' */ +/* + * UNSIGNED types... + */ +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; +/* + * SIGNED types... + */ +typedef int8_t s8; +typedef int16_t s16; +typedef int32_t s32; +typedef int64_t s64; + +#else /* __KERNEL__ */ +#if !defined(__APPLE__) +#include +#else /* __APPLE__ */ +#include +#include /* Grab 'uint64_t' etc. */ + +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; +/* + * SIGNED types... + */ +typedef int8_t s8; +typedef int16_t s16; +typedef int32_t s32; +typedef int64_t s64; +#endif /* __APPLE__ */ +#endif /* __KERNEL__ */ + +#elif defined(_WIN32) +typedef __int32 int32_t; +typedef unsigned __int32 uint32_t; +typedef __int64 int64_t; +typedef unsigned __int64 uint64_t; + +/* + * UNSIGNED types... + */ +typedef unsigned char u8; +typedef unsigned short u16; +typedef unsigned int u32; +typedef unsigned long long u64; + +/* + * SIGNED types... + */ +typedef signed char s8; +typedef signed short s16; +typedef signed int s32; +typedef signed long long s64; +typedef s32 pid_t; +typedef s32 ssize_t; + +#endif /* _WIN32 */ + +/* ************************************ + * Common to both operating systems. + * ************************************ + */ +/* + * UNSIGNED types... + */ +typedef u8 pw_u8_t; +typedef u16 pw_u16_t; +typedef u32 pw_u32_t; +typedef u64 pw_u64_t; + +/* + * SIGNED types... + */ +typedef s8 pw_s8_t; +typedef s16 pw_s16_t; +typedef s32 pw_s32_t; +typedef s64 pw_s64_t; + +typedef pid_t pw_pid_t; + +#endif /* _PW_TYPES_H_ */ diff --git a/drivers/platform/x86/socwatchhv/inc/sw_version.h b/drivers/platform/x86/socwatchhv/inc/sw_version.h new file mode 100644 index 0000000000000..5476b0d79ac5c --- /dev/null +++ b/drivers/platform/x86/socwatchhv/inc/sw_version.h @@ -0,0 +1,74 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef __SW_VERSION_H__ +#define __SW_VERSION_H__ 1 + +/* + * SOCWatch driver version + */ +#define SW_DRIVER_VERSION_MAJOR 2 +#define SW_DRIVER_VERSION_MINOR 6 +#define SW_DRIVER_VERSION_OTHER 2 + +/* + * Every SOC Watch userspace component shares the same version number. + */ +#define SOCWATCH_VERSION_MAJOR 2 +#define SOCWATCH_VERSION_MINOR 8 +#define SOCWATCH_VERSION_OTHER 0 + +#endif /* __SW_VERSION_H__ */ diff --git a/drivers/platform/x86/socwatchhv/inc/swhv_acrn.h b/drivers/platform/x86/socwatchhv/inc/swhv_acrn.h new file mode 100644 index 0000000000000..2bcc97a84bbcd --- /dev/null +++ b/drivers/platform/x86/socwatchhv/inc/swhv_acrn.h @@ -0,0 +1,117 @@ +#ifndef _SWHV_ACRN_H_ +#define _SWHV_ACRN_H_ 1 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* LINUX_VERSION_CODE */ +#include /* for struct list_head */ + +#include "swhv_defines.h" +#include "pw_version.h" + +#define SW_DEFINE_LIST_HEAD(name, dummy) struct list_head name +#define SW_DECLARE_LIST_HEAD(name, dummy) extern struct list_head name +#define SW_LIST_ENTRY(name, dummy) struct list_head name +#define SW_LIST_HEAD_VAR(dummy) struct list_head +#define SW_LIST_HEAD_INIT(head) INIT_LIST_HEAD(head) +#define SW_LIST_ENTRY_INIT(node, field) INIT_LIST_HEAD(&node->field) +#define SW_LIST_ADD(head, node, field) list_add_tail(&node->field, head) +#define SW_LIST_GET_HEAD_ENTRY(head, type, field) \ + list_first_entry(head, struct type, field) +#define SW_LIST_UNLINK(node, field) list_del(&node->field) +#define SW_LIST_FOR_EACH_ENTRY(node, head, field) \ + list_for_each_entry(node, head, field) +#define SW_LIST_EMPTY(head) list_empty(head) +#define SW_LIST_HEAD_INITIALIZER(head) LIST_HEAD_INIT(head) + +int device_open_i(struct inode *inode, struct file *file); + +ssize_t device_read_i(struct file *file, /* see include/linux/fs.h */ + char __user *buffer, /* buffer to be filled with data */ + size_t length, /* length of the buffer */ + loff_t *offset); + +long swhv_configure(struct swhv_driver_interface_msg __user *remote_msg, + int local_len); +long swhv_start(void); +long swhv_stop(void); +long swhv_get_cpu_count(u32 __user *remote_args); +long swhv_get_clock(u32 __user *remote_in_args, u64 __user *remote_args); +long swhv_get_topology(u64 __user *remote_args); +long swhv_get_hypervisor_type(u32 __user *remote_args); +int swhv_load_driver_i(void); +void swhv_unload_driver_i(void); +void cleanup_error_i(void); +long swhv_msr_read(u32 __user *remote_in_args, u64 __user *remote_args); +long swhv_collection_poll(void); + +enum MSR_CMD_TYPE { + MSR_OP_NONE = 0, + MSR_OP_READ, + MSR_OP_WRITE, + MSR_OP_READ_CLEAR +}; + +enum MSR_CMD_STATUS { MSR_OP_READY = 0, MSR_OP_REQUESTED, MSR_OP_HANDLED }; + +struct profiling_msr_op { + /* value to write or location to write into */ + uint64_t value; + /* MSR address to read/write; last entry will have value of -1 */ + uint32_t msr_id; + /* parameter; usage depends on operation */ + uint16_t param; + uint8_t msr_op_type; + uint8_t reg_type; +}; + +#define MAX_MSR_LIST_NUM 15 +struct profiling_msr_ops_list { + int32_t collector_id; + uint32_t num_entries; + int32_t msr_op_state; /* enum value from 'MSR_CMD_STATUS' */ + struct profiling_msr_op entries[MAX_MSR_LIST_NUM]; +}; + +#define COLLECTOR_SOCWATCH 1 + +struct profiling_control { + int32_t collector_id; + int32_t reserved; + uint64_t switches; +}; + +/** + * struct - sw_collector_data + * Information about the collector to be invoked at collection time. + * + * The collector_lists array holds linked lists of collectors to + * be exercised at specific points in time during the collection + * (e.g. begin, poll, end, etc.). At a trigger time, the driver walks + * that time's list of nodes, and exercises the collectors on that list. + * + * @list: List/link implementation + * @cpumask: Collect if cpu matches mask + * @info: Ptr to metric info + * @ops: Ptr to collector's operations + * @last_update_jiffies: Indicates when this node was last exercised. + * @per_msg_payload_size: Data size + * @msg: Ptr to collected data + */ +typedef struct swhv_acrn_msr_collector_data { + SW_LIST_ENTRY(list, swhv_acrn_msr_collector_data); + pw_s16_t cpu_mask; + pw_s16_t sample_id; + struct profiling_msr_ops_list *msr_ops_list; + size_t per_msg_payload_size; +} swhv_acrn_msr_collector_data_t; +#endif /* _SWHV_ACRN_H_ */ diff --git a/drivers/platform/x86/socwatchhv/inc/swhv_acrn_sbuf.h b/drivers/platform/x86/socwatchhv/inc/swhv_acrn_sbuf.h new file mode 100644 index 0000000000000..5f62c2d43e113 --- /dev/null +++ b/drivers/platform/x86/socwatchhv/inc/swhv_acrn_sbuf.h @@ -0,0 +1,186 @@ +#ifndef _SWHV_ACRN_SBUF_H_ +#define _SWHV_ACRN_SBUF_H_ 1 + +#include + +/* + * Checks if the passed sbuf is empty. + */ +static inline bool sbuf_is_empty(struct shared_buf *sbuf) +{ + return (sbuf->head == sbuf->tail); +} + +static inline uint32_t sbuf_next_ptr(uint32_t pos, uint32_t span, + uint32_t scope) +{ + pos += span; + pos = (pos >= scope) ? (pos - scope) : pos; + return pos; +} + +/* + * This function returns the available free space in the + * passed sbuf. + */ +inline uint32_t sbuf_available_space(struct shared_buf *sbuf) +{ + uint32_t remaining_space; + /* + * if tail isn't wrapped around + * subtract difference of tail and head from size + * otherwise + * difference between head and tail + */ + if (sbuf->tail >= sbuf->head) + remaining_space = sbuf->size - (sbuf->tail - sbuf->head); + else + remaining_space = sbuf->head - sbuf->tail; + + return remaining_space; +} + +/* + * This function retrieves the requested 'size' amount of data from + * the passed buffer. + * This is a much more efficient implementation than the default + * 'sbuf_get()' which retrieves one 'element' size at a time. + */ +int sbuf_get_variable(struct shared_buf *sbuf, void **data, uint32_t size) +{ + /* + * 1. Check if buffer isn't empty and non-zero 'size' + * 2. check if enough ('size' bytes) data to be read is present. + * 3. Continue if buffer has enough data + * 4. Copy data from buffer + * 4a. copy data in 2 parts if there is a wrap-around + * 4b. Otherwise do a simple copy + */ + const void *from; + uint32_t current_data_size, offset = 0, next_head; + + if ((sbuf == NULL) || (*data == NULL)) + return -EINVAL; + + if (sbuf_is_empty(sbuf) || (size == 0)) { + /* no data available */ + return 0; + } + + current_data_size = sbuf->size - sbuf_available_space(sbuf); + + /* + * TODO If requested data size is greater than current buffer size, + * consider at least copying the current buffer size. + */ + if (size > current_data_size) { + pw_pr_warn( + "Requested data size is greater than the current buffer size!"); + /* not enough data to be read */ + return 0; + } + + next_head = sbuf_next_ptr(sbuf->head, size, sbuf->size); + + from = (void *)sbuf + SBUF_HEAD_SIZE + sbuf->head; + + if (next_head < sbuf->head) { /* wrap-around */ + /* copy first part */ + offset = sbuf->size - sbuf->head; + memcpy(*data, from, offset); + + from = (void *)sbuf + SBUF_HEAD_SIZE; + } + memcpy((void *)*data + offset, from, size - offset); + + sbuf->head = next_head; + + return size; +} + +/* + * This API can be used to retrieve complete samples at a time from the + * sbuf. It internally uses the sbuf_get() which retrieves 1 'element' + * at a time and is probably not very efficient for reading large amount + * of data. + * Note: Not used currently. + */ +int sbuf_get_wrapper(struct shared_buf *sbuf, uint8_t **data) +{ + uint8_t *sample; + uint8_t sample_offset; + acrn_msg_header *header; + uint32_t payload_size, sample_size, _size; + + /* + * Assumption: A partial variable sample will not be written + * to the buffer. + * do while buf isn't empty + * Read header from the buffer + * write to data + * get size of payload + * check if the size of 'data' is enough for the + * variable sample to be read to + * Read the payload + * Keep reading ele_size chunks till available and write to data + * if the last chunk is less than ele_size, do a partial copy to + * data + * + * + */ + if ((sbuf == NULL) || (data == NULL)) + return -EINVAL; + + if (sbuf_is_empty(sbuf)) { + /* no data available */ + return 0; + } + + sample_offset = 0; + + header = vmalloc(sizeof(ACRN_MSG_HEADER_SIZE)); + memset(header, 0, sizeof(ACRN_MSG_HEADER_SIZE)); + /*read header */ + sbuf_get(sbuf, (uint8_t *)header); + + payload_size = header->payload_size; + + sample_size = ACRN_MSG_HEADER_SIZE + header->payload_size; + + sample = vmalloc(sample_size); + + /*copy header */ + memcpy((void *)sample, (void *)header, ACRN_MSG_HEADER_SIZE); + + sample_offset += ACRN_MSG_HEADER_SIZE; + + _size = payload_size; + while (_size) { + if (_size >= sbuf->ele_size) { + sbuf_get(sbuf, (uint8_t *)(sample + sample_offset)); + sample_offset += sbuf->ele_size; + _size -= sbuf->ele_size; + } else { + pw_pr_error( + "error: payload has to be multiple of 32\n"); + return 0; + /* + * This code can be enabled when support for variable + * sized samples needs to be added. + */ +#if 0 + chunk = malloc(sbuf->ele_size); + sbuf_get(sbuf, chunk); + memcpys((void *)(sample + sample_offset), _size, chunk); + _size -= _size; + free(chunk); +#endif + } + } + + *data = sample; + + vfree(header); + return sample_size; +} +#endif /* _SWHV_ACRN_SBUF_H_ */ diff --git a/drivers/platform/x86/socwatchhv/inc/swhv_defines.h b/drivers/platform/x86/socwatchhv/inc/swhv_defines.h new file mode 100644 index 0000000000000..2f51a5d760f6a --- /dev/null +++ b/drivers/platform/x86/socwatchhv/inc/swhv_defines.h @@ -0,0 +1,111 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef _SWHV_DEFINES_H_ +#define _SWHV_DEFINES_H_ + +/* *************************************************** + * Common to kernel and userspace. + * *************************************************** + */ +#define PW_SUCCESS 0 +#define PW_ERROR 1 +#define PW_SUCCESS_NO_COLLECT 2 + +/* + * Start off with none of the OS'es are defined + */ +#undef SWDRV_OS_LINUX +#undef SWDRV_OS_ANDROID +#undef SWDRV_OS_UNIX + +/* + * Make sure none of the architectures is defined here + */ +#undef SWDRV_IA32 +#undef SWDRV_EM64T + +/* + * Make sure one (and only one) of the OS'es gets defined here + * + * Unfortunately entirex defines _WIN32 so we need to check for linux + * first. The definition of these flags is one and only one + * _OS_xxx is allowed to be defined. + */ +#if defined(__ANDROID__) +#define SWDRV_OS_ANDROID +#define SWDRV_OS_UNIX +#elif defined(__linux__) +#define SWDRV_OS_LINUX +#define SWDRV_OS_UNIX +#else +#error "Compiling for an unknown OS" +#endif + +/* + * Make sure one (and only one) architecture is defined here + * as well as one (and only one) pointer__ size + */ +#if defined(_M_IX86) || defined(__i386__) +#define SWDRV_IA32 +#elif defined(_M_AMD64) || defined(__x86_64__) +#define SWDRV_EM64T +#else +#error "Unknown architecture for compilation" +#endif + +#endif /* _SWHV_DEFINES_H_ */ diff --git a/drivers/platform/x86/socwatchhv/inc/swhv_driver.h b/drivers/platform/x86/socwatchhv/inc/swhv_driver.h new file mode 100644 index 0000000000000..f2f9f662b3112 --- /dev/null +++ b/drivers/platform/x86/socwatchhv/inc/swhv_driver.h @@ -0,0 +1,109 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef _SWHV_DRIVER_H_ +#define _SWHV_DRIVER_H_ 1 + +#include /* LINUX_VERSION_CODE */ +#include /* vmalloc */ +#include "swhv_defines.h" +#include "sw_kernel_defines.h" +#include "pw_version.h" + +#define MAX_CORE_COUNT 8 + +#define MOBILEVISOR 1 +#define ACRN 2 + +/* define this flag to have IDT entry programmed for SoCWatch IRQ handler */ +#define SOCWATCH_IDT_IRQ 1 + +extern void SYS_Perfvec_Handler(void); +extern short SYS_Get_cs(void); + +#if defined(SWDRV_IA32) && (SOCWATCH_IDT_IRQ) +extern void *SYS_Get_IDT_Base_HWR(void); /* IDT base from hardware IDTR */ + +#define SYS_Get_IDT_Base SYS_Get_IDT_Base_HWR +#endif /* defined(SWDRV_IA32) && (SOCWATCH_IDT_IRQ) */ + +#if defined(SWDRV_EM64T) && (SOCWATCH_IDT_IRQ) +extern void SYS_Get_IDT_Base(void **); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25) +typedef struct gate_struct gate_struct_t; +#else +typedef struct gate_struct64 gate_struct_t; +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) */ +#endif /* defined(SWDRV_EM64T) && (SOCWATCH_IDT_IRQ) */ + +/* miscellaneous defines */ +#define CPU() (raw_smp_processor_id()) +#define GET_BOOL_STRING(b) ((b) ? "TRUE" : "FALSE") + +#define _STRINGIFY(x) #x +#define STRINGIFY(x) _STRINGIFY(x) +#define _STRINGIFY_W(x) (L#x) +#define STRINGIFY_W(x) _STRINGIFY_W(x) + +/* + * 64bit Compare-and-swap. + */ +#define CAS64(p, o, n) (cmpxchg64((p), (o), (n)) == (o)) + +typedef struct PWCollector_msg PWCollector_msg_t; + +#endif /* _SWHV_DRIVER_H_ */ diff --git a/drivers/platform/x86/socwatchhv/inc/swhv_ioctl.h b/drivers/platform/x86/socwatchhv/inc/swhv_ioctl.h new file mode 100644 index 0000000000000..0d2a368c12ca4 --- /dev/null +++ b/drivers/platform/x86/socwatchhv/inc/swhv_ioctl.h @@ -0,0 +1,164 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +#ifndef __SWHV_IOCTL_H__ +#define __SWHV_IOCTL_H__ + +#include "pw_types.h" + +#if defined(__linux__) || defined(__QNX__) +#if __KERNEL__ +#include +#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64) +#include +#endif /* COMPAT && x64 */ +#else /* !__KERNEL__ */ +#include +#endif /* __KERNEL__ */ +#endif /* __linux__ */ +/* + * Path to the Hypervisor driver device file. + */ +#define SWHV_DEVICE_NAME "swhypervdrv" +#define SWHV_DEVICE_PATH "/dev/" SWHV_DEVICE_NAME + +/* + * The SoFIA-specific IOCTL magic + * number -- used to ensure IOCTLs + * are delivered to the correct + * driver. + */ +#define SP_IOC_MAGIC 99 +/* + * CONSTANTS that define the various operations. + * TODO: convert to enum? + */ +#define SWHVDRV_OPERATION_CONFIGURE 1 /* configure a collection */ +#define SWHVDRV_OPERATION_CMD 2 /* control a collection */ +#define SWHVDRV_OPERATION_VERSION 3 /* retrieve driver version info */ +#define SWHVDRV_OPERATION_CLOCK 4 /* retrieve STM clock */ +#define SWHVDRV_OPERATION_TOPOLOGY 5 /* retrieve CPU topology */ +#define SWHVDRV_OPERATION_CPUCOUNT 6 /* retrieve CPU count */ +#define SWHVDRV_OPERATION_HYPERVISOR_TYPE 7 /* retrieve hypervisor type */ +#define SWHVDRV_OPERATION_MSR_READ 8 /* retrieve MSR value */ +#define SWHVDRV_OPERATION_POLL 9 /* Polling tick */ + +enum swhv_ioctl_cmd { + swhv_ioctl_cmd_none = 0, + swhv_ioctl_cmd_config, + swhv_ioctl_cmd_cmd, + swhv_ioctl_cmd_version, + swhv_ioctl_cmd_clock, + swhv_ioctl_cmd_topology, + swhv_ioctl_cmd_cpucount, + swhv_ioctl_cmd_hypervisor_type, +}; +/* + * The actual IOCTL commands. + * + * From the kernel documentation: + * "_IOR" ==> Read IOCTL + * "_IOW" ==> Write IOCTL + * "_IOWR" ==> Read/Write IOCTL + * + * Where "Read" and "Write" are from the user's perspective + * (similar to the file "read" and "write" calls). + */ +#define SWHVDRV_IOCTL_CONFIGURE \ + _IOW(SP_IOC_MAGIC, SWHVDRV_OPERATION_CONFIGURE, \ + struct spdrv_ioctl_arg *) +#define SWHVDRV_IOCTL_CMD \ + _IOW(SP_IOC_MAGIC, SWHVDRV_OPERATION_CMD, struct spdrv_ioctl_arg *) +#define SWHVDRV_IOCTL_VERSION \ + _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_VERSION, struct spdrv_ioctl_arg *) +#define SWHVDRV_IOCTL_CLOCK \ + _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_CLOCK, struct spdrv_ioctl_arg *) +#define SWHVDRV_IOCTL_TOPOLOGY \ + _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_TOPOLOGY, struct spdrv_ioctl_arg *) +#define SWHVDRV_IOCTL_CPUCOUNT \ + _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_CPUCOUNT, struct spdrv_ioctl_arg *) +#define SWHVDRV_IOCTL_HYPERVISOR_TYPE \ + _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_HYPERVISOR_TYPE, \ + struct spdrv_ioctl_arg *) +#define SWHVDRV_IOCTL_MSR_READ \ + _IOWR(SP_IOC_MAGIC, SWHVDRV_OPERATION_MSR_READ, \ + struct spdrv_ioctl_arg *) +#define SWHVDRV_IOCTL_POLL \ + _IO(SP_IOC_MAGIC, SWHVDRV_OPERATION_POLL, struct spdrv_ioctl_arg *) + +#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64) +#include + +#define SWHVDRV_IOCTL_CONFIGURE32 \ + _IOW(SP_IOC_MAGIC, SWHVDRV_OPERATION_CONFIGURE, compat_uptr_t) +#define SWHVDRV_IOCTL_CMD32 \ + _IOW(SP_IOC_MAGIC, SWHVDRV_OPERATION_CMD, compat_uptr_t) +#define SWHVDRV_IOCTL_VERSION32 \ + _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_VERSION, compat_uptr_t) +#define SWHVDRV_IOCTL_CLOCK32 \ + _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_CLOCK, compat_uptr_t) +#define SWHVDRV_IOCTL_TOPOLOGY32 \ + _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_TOPOLOGY, compat_uptr_t) +#define SWHVDRV_IOCTL_CPUCOUNT32 \ + _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_CPUCOUNT, compat_uptr_t) +#define SWHVDRV_IOCTL_HYPERVISOR_TYPE32 \ + _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_HYPERVISOR_TYPE, compat_uptr_t) +#define SWHVDRV_IOCTL_MSR_READ32 \ + _IOWR(SP_IOC_MAGIC, SWHVDRV_OPERATION_MSR_READ, compat_uptr_t) +#define SWHVDRV_IOCTL_POLL32 \ + _IO(SP_IOC_MAGIC, SWHVDRV_OPERATION_POLL, compat_uptr_t) +#endif /* COMPAT && x64 */ + +#endif /* __SWHV_IOCTL_H__ */ diff --git a/drivers/platform/x86/socwatchhv/inc/swhv_structs.h b/drivers/platform/x86/socwatchhv/inc/swhv_structs.h new file mode 100644 index 0000000000000..0393a95e48755 --- /dev/null +++ b/drivers/platform/x86/socwatchhv/inc/swhv_structs.h @@ -0,0 +1,251 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef _SWHV_STRUCTS_H_ +#define _SWHV_STRUCTS_H_ 1 + +#include "sw_structs.h" + +enum swhv_hypervisor_type { + swhv_hypervisor_none = 0, + swhv_hypervisor_mobilevisor, + swhv_hypervisor_acrn, +}; + +/* + * Structure to return version information. + */ +#pragma pack(push) +#pragma pack(1) +struct sp_driver_version_info { + pw_s32_t major; + pw_s32_t minor; + pw_s32_t other; +}; + +struct spdrv_ioctl_arg { + pw_s32_t in_len; + pw_s32_t out_len; + char *in_arg; + char *out_arg; +}; +#pragma pack(pop) + +/* + * Various commands to control a collection. + */ +enum swhvdrv_cmd { + SWHVDRV_CMD_START, + SWHVDRV_CMD_STOP, + /* others here when appropriate */ + SVHVDRV_CMD_MAX +}; + +enum swhv_collector_type { + SWHV_COLLECTOR_TYPE_NONE, + SWHV_COLLECTOR_TYPE_SWITCH, + SWHV_COLLECTOR_TYPE_MSR, +}; + +enum swhv_io_cmd { SWHV_IO_CMD_READ = 0, SWHV_IO_CMD_WRITE, SWHV_IO_CMD_MAX }; + +#pragma pack(push, 1) +struct swhv_driver_msr_io_descriptor { + pw_u64_t address; + enum sw_msr_type type; +}; +#pragma pack(pop) + +#pragma pack(push, 1) +struct swhv_driver_switch_io_descriptor { + pw_u32_t switch_bitmask; +}; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct swhv_driver_io_descriptor { + pw_u16_t collection_type; /* One of 'enum swhv_collector_type' */ + pw_s16_t collection_command; /* One of 'enum swhv_io_cmd' */ + pw_u16_t counter_size_in_bytes; /* The number of bytes to + * READ or WRITE + */ + union { + struct swhv_driver_msr_io_descriptor msr_descriptor; + struct swhv_driver_switch_io_descriptor switch_descriptor; + }; + pw_u64_t write_value; /* The value to WRITE */ +} swhv_driver_io_descriptor_t; +#pragma pack(pop) + +#pragma pack(push, 1) +struct swhv_driver_interface_info { + pw_s16_t cpu_mask; /* On which CPU(s) should the driver + * read the data? + */ + /* Currently: -2 ==> read on ALL CPUs, + * -1 ==> read on ANY CPU, + * >= 0 ==> the specific CPU to read on + */ + pw_s16_t sample_id; /* Sample ID, used to map it back + * to Metric Plugin, Metric and Msg ID combo + */ + pw_u16_t num_io_descriptors; /* Number of descriptors in the array, + * below. + */ + pw_u8_t descriptors[1]; /* Array of swhv_driver_io_descriptor + * structs. + */ +}; +#pragma pack(pop) +#define SWHV_DRIVER_INTERFACE_INFO_HEADER_SIZE() \ + (sizeof(struct swhv_driver_interface_info) - sizeof(pw_u8_t[1])) + +#pragma pack(push, 1) +struct swhv_driver_interface_msg { + pw_u16_t num_infos; /* Number of 'swhv_driver_interface_info' + * structs contained within the 'infos' variable, + * below + */ + /* pw_u16_t infos_size_bytes; Size of data inlined within + * the 'infos' variable, below + */ + pw_u8_t infos[1]; +}; +#pragma pack(pop) +#define SWHV_DRIVER_INTERFACE_MSG_HEADER_SIZE() \ + (sizeof(struct swhv_driver_interface_msg) - sizeof(pw_u8_t[1])) + +/* + * ACRN specific structs, copied from the ACRN profiling service + * DO NOT modify these below stucts + */ +#define SBUF_HEAD_SIZE 64 /* bytes */ + +typedef enum PROFILING_SOCWATCH_FEATURE { + SOCWATCH_COMMAND = 0, + SOCWATCH_VM_SWITCH_TRACING, + MAX_SOCWATCH_FEATURE_ID +} profiling_socwatch_feature; + +typedef enum PROFILING_SOCWATCH_FEATURE acrn_type; + +/* + * current default ACRN header + */ +struct data_header { + int32_t collector_id; + uint16_t cpu_id; + uint16_t data_type; + uint64_t tsc; + uint64_t payload_size; + uint64_t reserved; +} __attribute__((aligned(32))); +#define ACRN_MSG_HEADER_SIZE ((uint64_t)sizeof(struct data_header)) + +struct vm_switch_trace { + uint64_t vm_enter_tsc; + uint64_t vm_exit_tsc; + uint64_t vm_exit_reason; + uint16_t os_id; + uint16_t reserved; +} __attribute__((aligned(32))); +#define VM_SWITCH_TRACE_SIZE ((uint64_t)sizeof(struct vm_switch_trace)) + +#define CONFIG_MAX_VCPUS_PER_VM 8 +#define CONFIG_MAX_VM_NUM 6 + +struct profiling_vcpu_pcpu_map { + int16_t vcpu_id; + int16_t pcpu_id; + uint32_t apic_id; +} __attribute__((aligned(8))); + +struct profiling_vm_info { + uint16_t vm_id_num; + uint8_t guid[16]; + char vm_name[16]; + uint16_t num_vcpus; + struct profiling_vcpu_pcpu_map cpu_map[CONFIG_MAX_VCPUS_PER_VM]; +} __attribute__((aligned(8))); + +struct profiling_vm_info_list { + uint16_t num_vms; + struct profiling_vm_info vm_list[CONFIG_MAX_VM_NUM]; +} __attribute__((aligned(8))); + +/* + * End of ACRN specific structs, copied from the ACRN profiling service + */ +typedef struct data_header acrn_msg_header; +typedef struct vm_switch_trace vmswitch_trace_t; + +/* + * ACRN specific constants shared between the driver and user-mode + */ +/* Per CPU buffer size */ +#define ACRN_BUF_SIZE ((4 * 1024 * 1024) - SBUF_HEAD_SIZE /* 64 bytes */) +/* Size of buffer at which data should be transferred to user-mode */ +#define ACRN_BUF_TRANSFER_SIZE (ACRN_BUF_SIZE / 2) +/* + * The ACRN 'sbuf' buffers consist of fixed size elements. + * This is how they are intended to be used, though SoCWatch only uses it to + * allocate the correct buffer size. + */ +#define ACRN_BUF_ELEMENT_SIZE 32 /* byte */ +#define ACRN_BUF_ELEMENT_NUM (ACRN_BUF_SIZE / ACRN_BUF_ELEMENT_SIZE) +#define ACRN_BUF_FILLED_SIZE(sbuf) (sbuf->size - sbuf_available_space(sbuf)) + +#endif /* _SWHV_STRUCTS_H_ */ diff --git a/drivers/platform/x86/socwatchhv/swhv_acrn.c b/drivers/platform/x86/socwatchhv/swhv_acrn.c new file mode 100644 index 0000000000000..962db47cec452 --- /dev/null +++ b/drivers/platform/x86/socwatchhv/swhv_acrn.c @@ -0,0 +1,734 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "swhv_defines.h" +#include "swhv_driver.h" +#include "swhv_ioctl.h" +#include "swhv_structs.h" +#include "control.h" +#include "swhv_acrn.h" +#include "swhv_acrn_sbuf.h" + +/* ******************************************* + * Compile-time constants + * ******************************************* + */ +#define foreach_cpu(cpu, cpu_num) for ((cpu) = 0; (cpu) < (cpu_num); (cpu)++) + +/* actual physical cpu number, initialized by module init */ +static int pcpu_num; +bool flush_mode; + +/* TODO is this needed? + * module_param(nr_cpus, int, S_IRUSR | S_IWUSR); + */ + +static struct shared_buf **sbuf_per_cpu; + +static pw_u64_t global_collection_switch; +static SW_DEFINE_LIST_HEAD(swhv_msr_collector, swhv_acrn_msr_collector_data); + +/* used by the MSR read IOCTL */ +struct profiling_msr_ops_list *msr_read_ops_list; + +bool buffer_not_ready(int *cpu); + +struct swhv_acrn_msr_collector_data *swhv_alloc_msr_collector_node(void) +{ + struct swhv_acrn_msr_collector_data *node = + (struct swhv_acrn_msr_collector_data *)kmalloc( + sizeof(struct swhv_acrn_msr_collector_data), + GFP_KERNEL); + if (node) { + node->per_msg_payload_size = 0x0; + node->sample_id = 0x0; + node->msr_ops_list = kmalloc( + pcpu_num * sizeof(struct profiling_msr_ops_list), + GFP_KERNEL); + memset(node->msr_ops_list, 0, + pcpu_num * sizeof(struct profiling_msr_ops_list)); + SW_LIST_ENTRY_INIT(node, list); + } + return node; +} +struct swhv_acrn_msr_collector_data * +swhv_add_driver_msr_info(void *list_head, + const struct swhv_driver_interface_info *info) +{ + int cpu; + + SW_LIST_HEAD_VAR(swhv_acrn_msr_collector_data) * head = list_head; + + struct swhv_acrn_msr_collector_data *node = + swhv_alloc_msr_collector_node(); + if (!node) { + pw_pr_error("ERROR allocating MSR collector node!\n"); + return NULL; + } + + node->sample_id = info->sample_id; + node->cpu_mask = info->cpu_mask; + foreach_cpu(cpu, pcpu_num) + { + node->msr_ops_list[cpu].collector_id = COLLECTOR_SOCWATCH; + node->msr_ops_list[cpu].msr_op_state = MSR_OP_REQUESTED; + } + + SW_LIST_ADD(head, node, list); + return node; +} + +int swhv_add_driver_msr_io_desc(struct swhv_acrn_msr_collector_data *node, + struct swhv_driver_io_descriptor *info) +{ + int idx, cpu; + pw_u16_t num_entries; + struct profiling_msr_op *msr_op = NULL; + + /* Confirm this is an MSR IO descriptor */ + if (info->collection_type != SWHV_COLLECTOR_TYPE_MSR) { + pw_pr_error( + "ERROR trying to configure MSR collector with other data!\n"); + return -EINVAL; + } + + foreach_cpu(cpu, pcpu_num) + { + num_entries = node->msr_ops_list[cpu].num_entries; + if (num_entries >= MAX_MSR_LIST_NUM) { + pw_pr_error( + "ERROR trying to add too many MSRs to collect!\n"); + return -PW_ERROR; + } + + idx = num_entries; + + msr_op = &(node->msr_ops_list[cpu].entries[idx]); + + msr_op->msr_id = info->msr_descriptor.address; + if (info->collection_command == SWHV_IO_CMD_READ) { + msr_op->msr_op_type = MSR_OP_READ; + } else if (info->collection_command == SWHV_IO_CMD_WRITE) { + msr_op->msr_op_type = MSR_OP_WRITE; + } + + /* + * Use the param field to set sample id. + * This'll be used in the hypervisor to + * set the id in the samples + */ + msr_op->param = (uint16_t)node->sample_id; + + num_entries++; + + if (num_entries < MAX_MSR_LIST_NUM) { + node->msr_ops_list[cpu].entries[num_entries].msr_id = + -1; + } + node->msr_ops_list[cpu].num_entries = num_entries; + } + return PW_SUCCESS; +} + +int swhv_init_per_cpu_buffers(void) +{ + int i, ret, cpu; + + sbuf_per_cpu = vmalloc(pcpu_num * sizeof(struct shared_buf *)); + + foreach_cpu(cpu, pcpu_num) + { + /* allocate shared_buf */ + sbuf_per_cpu[cpu] = sbuf_allocate(ACRN_BUF_ELEMENT_NUM, + ACRN_BUF_ELEMENT_SIZE); + if (!sbuf_per_cpu[cpu]) { + pw_pr_error("Failed to allocate buffer for cpu %d\n", + cpu); + ret = -ENOMEM; + goto out_free; + } + } + + /* TODO understand the use of this API */ + foreach_cpu(cpu, pcpu_num) + { + ret = sbuf_share_setup(cpu, ACRN_SOCWATCH, sbuf_per_cpu[cpu]); + if (ret < 0) { + pw_pr_error("Failed to setup buffer for cpu %d\n", cpu); + goto out_sbuf; + } + } + + return PW_SUCCESS; +out_sbuf: + for (i = --cpu; i >= 0; i--) { + sbuf_share_setup(i, ACRN_SOCWATCH, NULL); + } + cpu = pcpu_num; + +out_free: + for (i = --cpu; i >= 0; i--) { + sbuf_free(sbuf_per_cpu[i]); + } + + vfree(sbuf_per_cpu); + return ret; +} + +void swhv_destroy_per_cpu_buffers(void) +{ + int cpu; + + pw_pr_debug("%s, pcpu_num: %d\n", __func__, pcpu_num); + + foreach_cpu(cpu, pcpu_num) + { + /* TODO anything else to de-register? + * deregister devices + */ + + /* set sbuf pointer to NULL in HV */ + sbuf_share_setup(cpu, ACRN_SOCWATCH, NULL); + + /* free sbuf, sbuf_per_cpu[cpu] should be set NULL */ + sbuf_free(sbuf_per_cpu[cpu]); + } + vfree(sbuf_per_cpu); +} + +void swhv_free_msr_collector_node(struct swhv_acrn_msr_collector_data *node) +{ + if (!node) { + return; + } + + kfree(node->msr_ops_list); + kfree(node); + return; +} + +void swhv_init_msr_collector_list(void) +{ + void *list_head = &swhv_msr_collector; + + SW_LIST_HEAD_VAR(swhv_acrn_msr_collector_data) * head = list_head; + SW_LIST_HEAD_INIT(head); +} + +void swhv_destroy_msr_collector_list(void) +{ + void *list_head = &swhv_msr_collector; + + SW_LIST_HEAD_VAR(swhv_acrn_msr_collector_data) * head = list_head; + while (!SW_LIST_EMPTY(head)) { + struct swhv_acrn_msr_collector_data *curr = + SW_LIST_GET_HEAD_ENTRY( + head, swhv_acrn_msr_collector_data, list); + SW_LIST_UNLINK(curr, list); + swhv_free_msr_collector_node(curr); + } +} + +void swhv_handle_hypervisor_collector(uint32_t control_cmd) +{ + struct profiling_control *acrn_profiling_control; + + acrn_profiling_control = + kmalloc(sizeof(struct profiling_control), GFP_KERNEL); + memset(acrn_profiling_control, 0, sizeof(struct profiling_control)); + + acrn_profiling_control->collector_id = COLLECTOR_SOCWATCH; + + if (control_cmd == 1) { /* start collection + send switch bitmask */ + pw_pr_debug("STARTING ACRN PROFILING SERVICE\n"); + global_collection_switch |= + control_cmd; /* first bit controls start/stop + * of collection + */ + } else if (control_cmd == 0) { /* stop collection + * + reset switch bitmask + */ + pw_pr_debug("STOPPING ACRN PROFILING SERVICE\n"); + global_collection_switch = control_cmd; + } + acrn_profiling_control->switches = global_collection_switch; + + /* send collection command + switch bitmask */ + acrn_hypercall2(HC_PROFILING_OPS, PROFILING_SET_CONTROL_SWITCH, + virt_to_phys(acrn_profiling_control)); + kfree(acrn_profiling_control); +} + +int swhv_handle_msr_collector_list(void) +{ + void *list_head = &swhv_msr_collector; + + SW_LIST_HEAD_VAR(swhv_acrn_msr_collector_data) * head = list_head; + int retVal = PW_SUCCESS; + struct swhv_acrn_msr_collector_data *curr = NULL; + + if (SW_LIST_EMPTY(&swhv_msr_collector)) { + pw_pr_debug("DEBUG: EMPTY MSR COLLECTOR LIST\n"); + return retVal; + } + + if (!head) { + return -PW_ERROR; + } + SW_LIST_FOR_EACH_ENTRY(curr, head, list) + { + pw_pr_debug("HANDLING MSR NODE\n"); + + /*hypervisor call to do immediate MSR read */ + acrn_hypercall2(HC_PROFILING_OPS, PROFILING_MSR_OPS, + virt_to_phys(curr->msr_ops_list)); + } + return retVal; +} + +long swhv_configure(struct swhv_driver_interface_msg __user *remote_msg, + int local_len) +{ + struct swhv_driver_interface_info *local_info = NULL; + struct swhv_driver_io_descriptor *local_io_desc = NULL; + struct swhv_driver_interface_msg *local_msg = vmalloc(local_len); + pw_u16_t num_infos = 0, num_io_desc = 0; + pw_u32_t local_config_bitmap = 0; + int done = 0; + bool driver_info_added = false; + + char *__data = (char *)local_msg->infos; + size_t dst_idx = 0, desc_idx = 0; + struct swhv_acrn_msr_collector_data *msr_collector_node = NULL; + + if (!local_msg) { + pw_pr_error("ERROR allocating space for local message!\n"); + return -EFAULT; + } + if (copy_from_user(local_msg, remote_msg, local_len)) { + pw_pr_error("ERROR copying message from user space!\n"); + vfree(local_msg); + return -EFAULT; + } + + flush_mode = false; + + pw_pr_debug("local_len: %d\n", local_len); + /* + * We aren't allowed to config the driver multiple times between + * collections. Clear out any previous config values. + */ + swhv_destroy_msr_collector_list(); + + /* clear the collection bitmask */ + global_collection_switch = 0; + + num_infos = local_msg->num_infos; + pw_pr_debug("LOCAL NUM INFOS = %u\n", num_infos); + for (; num_infos > 0 && !done; --num_infos) { + local_info = + (struct swhv_driver_interface_info *)&__data[dst_idx]; + desc_idx = dst_idx + SWHV_DRIVER_INTERFACE_INFO_HEADER_SIZE(); + dst_idx += (SWHV_DRIVER_INTERFACE_INFO_HEADER_SIZE() + + local_info->num_io_descriptors * + sizeof(struct swhv_driver_io_descriptor)); + pw_pr_debug("# msrs = %u\n", + (unsigned int)local_info->num_io_descriptors); + + num_io_desc = local_info->num_io_descriptors; + pw_pr_debug("LOCAL NUM IO DESC = %u\n", num_io_desc); + + driver_info_added = false; + for (; num_io_desc > 0; --num_io_desc) { + local_io_desc = (struct swhv_driver_io_descriptor + *)&__data[desc_idx]; + desc_idx += sizeof(struct swhv_driver_io_descriptor); + if (local_io_desc->collection_type == + SWHV_COLLECTOR_TYPE_MSR) { + if (!driver_info_added) { + msr_collector_node = + swhv_add_driver_msr_info( + &swhv_msr_collector, + local_info); + if (msr_collector_node == NULL) { + return -PW_ERROR; + } + driver_info_added = true; + } + + pw_pr_debug( + "MSR - addr: 0x%llx, type: %u, read/write: %u\n", + local_io_desc->msr_descriptor.address, + local_io_desc->msr_descriptor.type, + local_io_desc->collection_command); + swhv_add_driver_msr_io_desc(msr_collector_node, + local_io_desc); + } else if (local_io_desc->collection_type == + SWHV_COLLECTOR_TYPE_SWITCH) { + local_config_bitmap = + local_io_desc->switch_descriptor + .switch_bitmask; + pw_pr_debug("local bitmask = %u\n", + local_config_bitmap); + + global_collection_switch = local_config_bitmap; + + /* only one set of collection switches are + * expected, we are done configuring + */ + done = 1; + break; + } else { + pw_pr_error( + "WARNING: unknown collector configuration requested, collector id: %u!\n", + local_io_desc->collection_type); + } + } + driver_info_added = false; + } + vfree(local_msg); + return PW_SUCCESS; +} + +long swhv_stop(void) +{ + uint32_t control = 0; /* stop collection command */ + + pw_pr_debug("socwatch: stop called\n"); + + /*If MSR ops are present, perform them to get begin snapshot data. */ + swhv_handle_msr_collector_list(); + + /* stop collection + reset switch bitmask */ + swhv_handle_hypervisor_collector(control); + + /* flush partially filled hypervisor buffers */ + flush_mode = true; + + /* + * Clear out the MSR collector list. + */ + swhv_destroy_msr_collector_list(); + + return PW_SUCCESS; +} + +long swhv_start(void) +{ + uint32_t control = 1; /* start collection command */ +#if 0 + struct profiling_vm_info_list *vm_info_list = NULL; + int i; +#endif + pw_pr_debug("socwatch: start called\n"); + + flush_mode = false; + + /* start collection + send switch bitmask */ + swhv_handle_hypervisor_collector(control); + + /* If MSR ops are present, perform them to get begin snapshot data. */ + swhv_handle_msr_collector_list(); + +#if 0 + /* Expand this eventually to retrieve VM-related info + * from the hypervisor. Leaving it here for now. + */ + vm_info_list = kmalloc(sizeof(struct profiling_vm_info_list), + GFP_KERNEL); + memset(vm_info_list, 0, sizeof(struct profiling_vm_info_list)); + + acrn_hypercall2(HC_PROFILING_OPS, PROFILING_GET_VMINFO, + virt_to_phys(vm_info_list)); + + pw_pr_debug("Number of VMs: %d\n", vm_info_list->num_vms); + for (i = 0; i < vm_info_list->num_vms; ++i) { + pw_pr_debug("VM id: %d\n", vm_info_list->vm_list[i].vm_id_num); + pw_pr_debug("VM name: %s\n", vm_info_list->vm_list[i].vm_name); + } +#endif + return PW_SUCCESS; +} + +long swhv_get_cpu_count(u32 __user *remote_args) +{ + uint32_t num_CPUs = pcpu_num; + + return copy_to_user(remote_args, &num_CPUs, sizeof(num_CPUs)); +}; + +int device_open_i(struct inode *inode, struct file *file) +{ + pw_pr_debug("socwatch: device_open_i() called\n"); + return PW_SUCCESS; +} + +long swhv_get_clock(u32 __user *remote_in_args, u64 __user *remote_args) +{ + return -1; +} + +long swhv_get_topology(u64 __user *remote_args) +{ + return -1; +} + +long swhv_get_hypervisor_type(u32 __user *remote_args) +{ + uint32_t hypervisor_type = swhv_hypervisor_acrn; + + return copy_to_user(remote_args, &hypervisor_type, + sizeof(hypervisor_type)); +} + +long swhv_msr_read(u32 __user *remote_in_args, u64 __user *remote_args) +{ + int cpu; + uint64_t msr_addr = 0, value; + int ret = PW_SUCCESS; + + if (get_user(msr_addr, remote_in_args)) { + pw_pr_error( + "ERROR: couldn't copy remote args for read MSR IOCTL!\n"); + return -1; + } + + if (!msr_read_ops_list) { + msr_read_ops_list = kmalloc( + pcpu_num * sizeof(struct profiling_msr_ops_list), + GFP_KERNEL); + if (!msr_read_ops_list) { + pw_pr_error( + "couldn't allocate memory for doing an MSR read!\n"); + return -1; + } + memset(msr_read_ops_list, 0, + pcpu_num * sizeof(struct profiling_msr_ops_list)); + } + + /* + * The hypercall is set in such a way that the MSR read will occur on + * all CPUs and as a result we have to set up structures for each CPU. + */ + foreach_cpu(cpu, pcpu_num) + { + msr_read_ops_list[cpu].collector_id = COLLECTOR_SOCWATCH; + msr_read_ops_list[cpu].msr_op_state = MSR_OP_REQUESTED; + msr_read_ops_list[cpu].num_entries = 1; + msr_read_ops_list[cpu].entries[0].msr_id = msr_addr; + msr_read_ops_list[cpu].entries[0].msr_op_type = MSR_OP_READ; + msr_read_ops_list[cpu].entries[1].msr_id = + -1; /* the next entry is expected to be set to -1 */ + msr_read_ops_list[cpu].entries[1].param = + 0; /* set to 0 to not generate sample in hypervisor */ + } + + /* hypervisor call to do immediate MSR read */ + acrn_hypercall2(HC_PROFILING_OPS, PROFILING_MSR_OPS, + virt_to_phys(msr_read_ops_list)); + + /* copy value to remote args, pick from any CPU */ + value = msr_read_ops_list[0].entries[0].value; + + if (copy_to_user(remote_args, &value, sizeof(value))) { + pw_pr_error("ERROR: unable to copy MSR value to userspace!\n"); + ret = -PW_ERROR; + } + + return ret; +} + +long swhv_collection_poll(void) +{ + int ret = PW_SUCCESS; + /* + * Handle 'POLL' timer expirations. + */ + if (SW_LIST_EMPTY(&swhv_msr_collector)) { + pw_pr_debug("DEBUG: EMPTY MSR COLLECTOR POLL LIST\n"); + } + + if (swhv_handle_msr_collector_list()) { + pw_pr_error("ERROR: unable to copy MSR value to userspace!\n"); + ret = -PW_ERROR; + } + return ret; +} + +ssize_t swhv_transfer_data(void *user_buffer, struct shared_buf *sbuf_to_copy, + size_t bytes_to_read) +{ + unsigned long bytes_not_copied; + ssize_t bytes_read; + ssize_t ret = 0; + void *data_read = NULL; + + if (bytes_to_read == 0) { + pw_pr_debug( + "%s - 0 bytes requested to transfer! Returning...\n", + __func__); + + return bytes_to_read; + } + + data_read = vmalloc(bytes_to_read); + if (!data_read) { + pw_pr_error( + "couldn't allocate memory when trying to transfer data to userspace!\n"); + return 0; + } + + pw_pr_debug("%s - bytes to transfer %zu\n", __func__, bytes_to_read); + + if (sbuf_to_copy) { + bytes_read = sbuf_get_variable(sbuf_to_copy, &data_read, + bytes_to_read); + + if (bytes_read != bytes_to_read) { + pw_pr_warn("%s - bytes read (%zu bytes) are not equal to expected bytes (%zu bytes) to be read!", __func__, bytes_read, bytes_to_read); + } + + if (bytes_read < 0) { + pw_pr_error("Error reading this buffer\n"); + ret = -PW_ERROR; + goto ret_free; + } + if (bytes_read) { + /* copy data to device file */ + if (bytes_read > bytes_to_read) { + pw_pr_error("user buffer is too small\n"); + ret = -PW_ERROR; + goto ret_free; + } + + bytes_not_copied = copy_to_user(user_buffer, data_read, + bytes_read); + /* TODO check if this is meaningful enough to have */ + /* *offset += bytes_read - bytes_not_copied; */ + + if (bytes_not_copied) { + pw_pr_error( + "transferring data to user mode failed, bytes %ld\n", + bytes_not_copied); + /* copy_to_user returns an unsigned */ + ret = -EIO; + goto ret_free; + } + ret = bytes_read; + goto ret_free; + } else { + pw_pr_debug( + "Buffer empty! nothing more to read from this buffer\n"); + } + } + +ret_free: + vfree(data_read); + return ret; +} + +bool buffer_not_ready(int *cpu) +{ + /* cycle through and confirm buffers on all CPUs + * are less than ACRN_BUF_TRANSFER_SIZE + * as well as flush mode has not been requested + */ + int i = 0; + bool not_enough_data = true; + + pw_pr_debug( + "checking if a buffer is ready to be copied to the device file\n"); + /* + * It's possible that the buffer from cpu0 may always have + * data to transfer and can potentially prevent buffers from + * other cpus from ever being serviced. + * TODO Consider adding an optimization to check for last cpu read. + */ + for (i = 0; i < pcpu_num; ++i) { + if (ACRN_BUF_FILLED_SIZE(sbuf_per_cpu[i]) >= + ACRN_BUF_TRANSFER_SIZE || + (flush_mode && ACRN_BUF_FILLED_SIZE(sbuf_per_cpu[i]))) { + not_enough_data = false; + *cpu = i; + pw_pr_debug( + "buffer ready (flush_mode=%d) on cpu %d, waking up read queue\n", + flush_mode, *cpu); + break; + } + } + return not_enough_data && !flush_mode; +} + +ssize_t device_read_i(struct file *file, char __user *user_buffer, + size_t length, loff_t *offset) +{ + ssize_t bytes_read = 0; + int cpu = 0; + + pw_pr_debug("%s - usermode attempting to read device file\n", __func__); + if (buffer_not_ready(&cpu)) { + pw_pr_debug("%s - no buffer ready to be read\n", __func__); + return bytes_read; + } + + if (flush_mode) { + pw_pr_debug("flush mode on, ready to flush a buffer\n"); + } + length = ACRN_BUF_FILLED_SIZE(sbuf_per_cpu[cpu]); + pw_pr_debug("on cpu %d, buffer size is %zu bytes\n", cpu, length); + + bytes_read = swhv_transfer_data(user_buffer, sbuf_per_cpu[cpu], length); + + return bytes_read; +} + +void cleanup_error_i(void) +{ + /* NOP for acrn */ +} + +int swhv_load_driver_i(void) +{ + int ret = PW_SUCCESS; + + if (x86_hyper_type != X86_HYPER_ACRN) { + pw_pr_error("Non-ACRN hypervisor not supported!\n"); + return -EINVAL; + } + + /* TODO: we could get the cpu count by querying the hypervisor later */ + pcpu_num = num_present_cpus(); + pw_pr_debug("%s, pcpu_num: %d\n", __func__, pcpu_num); + + ret = swhv_init_per_cpu_buffers(); + if (ret < 0) { + return ret; + } + + swhv_init_msr_collector_list(); + + return ret; +} + +void swhv_unload_driver_i(void) +{ + swhv_destroy_per_cpu_buffers(); + + /* used by the MSR read IOCTL */ + kfree(msr_read_ops_list); +} diff --git a/drivers/platform/x86/socwatchhv/swhv_driver.c b/drivers/platform/x86/socwatchhv/swhv_driver.c new file mode 100644 index 0000000000000..7a4e6c57ab456 --- /dev/null +++ b/drivers/platform/x86/socwatchhv/swhv_driver.c @@ -0,0 +1,376 @@ +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2014 - 2018 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + SoC Watch Developer Team + Intel Corporation, + 1300 S Mopac Expwy, + Austin, TX 78746 + + BSD LICENSE + + Copyright(c) 2014 - 2018 Intel Corporation. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +#define MOD_AUTHOR "SoCWatch Team" +#define MOD_DESC "SoCWatch kernel module to communicate with hypervisors" + +#include "swhv_defines.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "swhv_driver.h" +#include "swhv_ioctl.h" +#include "swhv_structs.h" +#if HYPERVISOR == MOBILEVISOR +#include "swhv_mobilevisor.h" +#include "swhv_mobilevisor_buffer.h" +#elif HYPERVISOR == ACRN +#include "swhv_acrn.h" +#endif + +/* ******************************************* + * Compile-time constants + * ******************************************* + */ + +/* ******************************************* + * Local data structures. + * ******************************************* + */ +#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64) +#include +/* + * Helper struct used to translate IOCTLs + * from 32b user programs in 64b kernels. + */ +struct spdrv_ioctl_arg32 { + pw_s32_t in_len; + pw_s32_t out_len; + compat_caddr_t in_arg; + compat_caddr_t out_arg; +}; +#endif /* COMPAT && x64 */ + +static int sp_dev_major_num = -1; +static dev_t sp_dev; +static struct cdev *sp_cdev; +static struct class *sp_class; + +/* ******************************************* + * Variables. + * ******************************************* + */ + +/* Per-CPU variable containing the currently running vcpu. */ +/*static DEFINE_PER_CPU(int, curr_vcpu) = 0; */ + +/* ******************************************* + * Function definitions. + * ******************************************* + */ + +static long swhv_handle_cmd(u32 __user *remote_cmd) +{ + u32 local_cmd = 0; + long status = 0; + + if (get_user(local_cmd, remote_cmd)) { + pw_pr_error("ERROR: couldn't copy in remote command!\n"); + return -1; + } + switch (local_cmd) { + case SWHVDRV_CMD_START: + pw_pr_debug("RECEIVED CMD START!\n"); + status = swhv_start(); + break; + case SWHVDRV_CMD_STOP: + pw_pr_debug("RECEIVED CMD STOP!\n"); + status = swhv_stop(); + break; + default: + pw_pr_error( + "ERROR: invalid command %d passed to the SoFIA driver!\n", + local_cmd); + status = -1; + break; + } + return status; +}; + +long swhv_get_version(u64 __user *remote_args) +{ + u64 local_version = (u64)SWHVDRV_VERSION_MAJOR << 32 | + (u64)SWHVDRV_VERSION_MINOR << 16 | + (u64)SWHVDRV_VERSION_OTHER; + + return put_user(local_version, remote_args); +}; + +#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) +#define MATCH_IOCTL(num, pred) ((num) == (pred) || (num) == (pred##32)) +#else +#define MATCH_IOCTL(num, pred) ((num) == (pred)) +#endif + +static long handle_ioctl(unsigned int ioctl_num, + struct spdrv_ioctl_arg __user *remote_args) +{ + long status = 0; + struct spdrv_ioctl_arg local_args; + + int local_in_len, local_out_len; + + if (copy_from_user(&local_args, remote_args, sizeof(local_args))) { + pw_pr_error("ERROR: couldn't copy in remote args!\n"); + return -1; + } + pw_pr_debug("Invoking IOCTL!\n"); + + local_in_len = local_args.in_len; + local_out_len = local_args.out_len; + + switch (ioctl_num) { + case SWHVDRV_OPERATION_CMD: + status = swhv_handle_cmd((u32 __user *)local_args.in_arg); + break; + + case SWHVDRV_OPERATION_CONFIGURE: + pw_pr_debug("Trying to configure driver!\n"); + status = swhv_configure( + (struct swhv_driver_interface_msg __user *) + local_args.in_arg, + local_in_len); + break; + + case SWHVDRV_OPERATION_VERSION: + pw_pr_debug("Trying to get driver version!\n"); + status = swhv_get_version((u64 __user *)local_args.out_arg); + break; + + case SWHVDRV_OPERATION_CLOCK: + pw_pr_debug("Trying to get hypervisor type!\n"); + status = swhv_get_clock((u32 __user *)local_args.in_arg, + (u64 __user *)local_args.out_arg); + break; + + case SWHVDRV_OPERATION_TOPOLOGY: + pw_pr_debug("Trying to get CPU topology!\n"); + status = swhv_get_topology((u64 __user *)local_args.out_arg); + break; + + case SWHVDRV_OPERATION_CPUCOUNT: + pw_pr_debug("Trying to get CPU count!\n"); + status = swhv_get_cpu_count((u32 __user *)local_args.out_arg); + break; + + case SWHVDRV_OPERATION_HYPERVISOR_TYPE: + pw_pr_debug("Trying to get hypervisor type!\n"); + status = swhv_get_hypervisor_type( + (u32 __user *)local_args.out_arg); + break; + + case SWHVDRV_OPERATION_MSR_READ: + pw_pr_debug("Trying to do MSR read!\n"); + status = swhv_msr_read((u32 __user *)local_args.in_arg, + (u64 __user *)local_args.out_arg); + break; + case SWHVDRV_OPERATION_POLL: + pw_pr_debug("Polling tick!\n"); + status = swhv_collection_poll(); + break; + } + return status; +} + +static long device_unlocked_ioctl(struct file *filep, unsigned int ioctl_num, + unsigned long ioctl_param) +{ + return handle_ioctl(_IOC_NR(ioctl_num), + (struct spdrv_ioctl_arg __user *)ioctl_param); +}; + +#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64) +static long device_compat_ioctl(struct file *file, unsigned int ioctl_num, + unsigned long ioctl_param) +{ + struct spdrv_ioctl_arg32 __user *remote_args32 = + compat_ptr(ioctl_param); + struct spdrv_ioctl_arg __user *remote_args = + compat_alloc_user_space(sizeof(*remote_args)); + int tmp; + u32 data; + + if (!remote_args) { + return -1; + } + if (get_user(tmp, &remote_args32->in_len) || + put_user(tmp, &remote_args->in_len)) { + return -1; + } + if (get_user(tmp, &remote_args32->out_len) || + put_user(tmp, &remote_args->out_len)) { + return -1; + } + if (get_user(data, &remote_args32->in_arg) || + put_user(compat_ptr(data), &remote_args->in_arg)) { + return -1; + } + if (get_user(data, &remote_args32->out_arg) || + put_user(compat_ptr(data), &remote_args->out_arg)) { + return -1; + } + return handle_ioctl(_IOC_NR(ioctl_num), remote_args); +}; +#endif /* COMPAT && x64 */ + +static int device_open(struct inode *inode, struct file *file) +{ + return device_open_i(inode, file); +} + +static ssize_t +device_read(struct file *file, /* see include/linux/fs.h */ + char __user *buffer, /* buffer to be filled with data */ + size_t length, /* length of the buffer */ + loff_t *offset) +{ + return device_read_i(file, buffer, length, offset); +} + +static struct file_operations s_fops = { + .open = &device_open, + .read = &device_read, + .unlocked_ioctl = &device_unlocked_ioctl, +#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64) + .compat_ioctl = &device_compat_ioctl, +#endif /* COMPAT && x64 */ +}; + +static void cleanup_error(void) +{ + unregister_chrdev(sp_dev_major_num, SWHV_DEVICE_NAME); + device_destroy(sp_class, sp_dev); + class_destroy(sp_class); + unregister_chrdev_region(sp_dev, 1); + cdev_del(sp_cdev); +} + +int __init swhv_load_driver(void) +{ + int error; + struct device *dev; + + /* create the char device "sp" */ + alloc_chrdev_region(&sp_dev, 0, 1, SWHV_DEVICE_NAME); + sp_dev_major_num = MAJOR(sp_dev); + sp_class = class_create(THIS_MODULE, SWHV_DEVICE_NAME); + if (IS_ERR(sp_class)) { + error = PTR_ERR(sp_class); + pw_pr_error("Error registering sp class\n"); + goto cleanup_return_error; + } + + dev = device_create(sp_class, NULL, sp_dev, NULL, SWHV_DEVICE_NAME); + if (dev == NULL) { + error = PTR_ERR(dev); + pw_pr_error("Error during call to device_create\n"); + goto cleanup_return_error; + } + + sp_cdev = cdev_alloc(); + if (sp_cdev == NULL) { + error = -ENOMEM; + pw_pr_error("Error allocating character device\n"); + goto cleanup_return_error; + } + sp_cdev->owner = THIS_MODULE; + sp_cdev->ops = &s_fops; + if (cdev_add(sp_cdev, sp_dev, 1) < 0) { + error = -1; + pw_pr_error("Error registering device driver\n"); + goto cleanup_return_error; + } + + error = swhv_load_driver_i(); + if (error < 0) { + pw_pr_error("Error initializing device driver\n"); + goto cleanup_return_error; + } + + return 0; + +cleanup_return_error: + cleanup_error_i(); + + /* release char device */ + cleanup_error(); + return error; +} + +static void __exit swhv_unload_driver(void) +{ + swhv_unload_driver_i(); + + /* release char device */ + cleanup_error(); +} + +module_init(swhv_load_driver); +module_exit(swhv_unload_driver); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR(MOD_AUTHOR); +MODULE_DESCRIPTION(MOD_DESC); diff --git a/drivers/power/supply/olpc_battery.c b/drivers/power/supply/olpc_battery.c index 6da79ae148601..5a97e42a35473 100644 --- a/drivers/power/supply/olpc_battery.c +++ b/drivers/power/supply/olpc_battery.c @@ -428,14 +428,14 @@ static int olpc_bat_get_property(struct power_supply *psy, if (ret) return ret; - val->intval = (s16)be16_to_cpu(ec_word) * 100 / 256; + val->intval = (s16)be16_to_cpu(ec_word) * 10 / 256; break; case POWER_SUPPLY_PROP_TEMP_AMBIENT: ret = olpc_ec_cmd(EC_AMB_TEMP, NULL, 0, (void *)&ec_word, 2); if (ret) return ret; - val->intval = (int)be16_to_cpu(ec_word) * 100 / 256; + val->intval = (int)be16_to_cpu(ec_word) * 10 / 256; break; case POWER_SUPPLY_PROP_CHARGE_COUNTER: ret = olpc_ec_cmd(EC_BAT_ACR, NULL, 0, (void *)&ec_word, 2); diff --git a/drivers/power/supply/twl4030_charger.c b/drivers/power/supply/twl4030_charger.c index bbcaee56db9d7..b6a7d9f74cf30 100644 --- a/drivers/power/supply/twl4030_charger.c +++ b/drivers/power/supply/twl4030_charger.c @@ -996,12 +996,13 @@ static int twl4030_bci_probe(struct platform_device *pdev) if (bci->dev->of_node) { struct device_node *phynode; - phynode = of_find_compatible_node(bci->dev->of_node->parent, - NULL, "ti,twl4030-usb"); + phynode = of_get_compatible_child(bci->dev->of_node->parent, + "ti,twl4030-usb"); if (phynode) { bci->usb_nb.notifier_call = twl4030_bci_usb_ncb; bci->transceiver = devm_usb_get_phy_by_node( bci->dev, phynode, &bci->usb_nb); + of_node_put(phynode); if (IS_ERR(bci->transceiver)) { ret = PTR_ERR(bci->transceiver); if (ret == -EPROBE_DEFER) diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c index 61a760ee4aacc..e9ab90c19304f 100644 --- a/drivers/remoteproc/qcom_q6v5.c +++ b/drivers/remoteproc/qcom_q6v5.c @@ -198,6 +198,9 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev, } q6v5->fatal_irq = platform_get_irq_byname(pdev, "fatal"); + if (q6v5->fatal_irq == -EPROBE_DEFER) + return -EPROBE_DEFER; + ret = devm_request_threaded_irq(&pdev->dev, q6v5->fatal_irq, NULL, q6v5_fatal_interrupt, IRQF_TRIGGER_RISING | IRQF_ONESHOT, @@ -208,6 +211,9 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev, } q6v5->ready_irq = platform_get_irq_byname(pdev, "ready"); + if (q6v5->ready_irq == -EPROBE_DEFER) + return -EPROBE_DEFER; + ret = devm_request_threaded_irq(&pdev->dev, q6v5->ready_irq, NULL, q6v5_ready_interrupt, IRQF_TRIGGER_RISING | IRQF_ONESHOT, @@ -218,6 +224,9 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev, } q6v5->handover_irq = platform_get_irq_byname(pdev, "handover"); + if (q6v5->handover_irq == -EPROBE_DEFER) + return -EPROBE_DEFER; + ret = devm_request_threaded_irq(&pdev->dev, q6v5->handover_irq, NULL, q6v5_handover_interrupt, IRQF_TRIGGER_RISING | IRQF_ONESHOT, @@ -229,6 +238,9 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev, disable_irq(q6v5->handover_irq); q6v5->stop_irq = platform_get_irq_byname(pdev, "stop-ack"); + if (q6v5->stop_irq == -EPROBE_DEFER) + return -EPROBE_DEFER; + ret = devm_request_threaded_irq(&pdev->dev, q6v5->stop_irq, NULL, q6v5_stop_interrupt, IRQF_TRIGGER_RISING | IRQF_ONESHOT, diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c index 8da83a4ebadc3..b2e5a6abf7d5c 100644 --- a/drivers/rpmsg/qcom_smd.c +++ b/drivers/rpmsg/qcom_smd.c @@ -1122,8 +1122,10 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed channel->edge = edge; channel->name = kstrdup(name, GFP_KERNEL); - if (!channel->name) - return ERR_PTR(-ENOMEM); + if (!channel->name) { + ret = -ENOMEM; + goto free_channel; + } spin_lock_init(&channel->tx_lock); spin_lock_init(&channel->recv_lock); @@ -1173,6 +1175,7 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed free_name_and_channel: kfree(channel->name); +free_channel: kfree(channel); return ERR_PTR(ret); diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c index e79f2a181ad24..b9ec4a16db1f6 100644 --- a/drivers/rtc/hctosys.c +++ b/drivers/rtc/hctosys.c @@ -50,8 +50,10 @@ static int __init rtc_hctosys(void) tv64.tv_sec = rtc_tm_to_time64(&tm); #if BITS_PER_LONG == 32 - if (tv64.tv_sec > INT_MAX) + if (tv64.tv_sec > INT_MAX) { + err = -ERANGE; goto err_read; + } #endif err = do_settimeofday64(&tv64); diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index cd3a2411bc2f5..a5a19ff105354 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -50,6 +50,7 @@ /* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */ #include +#ifdef CONFIG_ACPI /* * Use ACPI SCI to replace HPET interrupt for RTC Alarm event * @@ -61,6 +62,18 @@ static bool use_acpi_alarm; module_param(use_acpi_alarm, bool, 0444); +static inline int cmos_use_acpi_alarm(void) +{ + return use_acpi_alarm; +} +#else /* !CONFIG_ACPI */ + +static inline int cmos_use_acpi_alarm(void) +{ + return 0; +} +#endif + struct cmos_rtc { struct rtc_device *rtc; struct device *dev; @@ -167,9 +180,9 @@ static inline int hpet_unregister_irq_handler(irq_handler_t handler) #endif /* Don't use HPET for RTC Alarm event if ACPI Fixed event is used */ -static int use_hpet_alarm(void) +static inline int use_hpet_alarm(void) { - return is_hpet_enabled() && !use_acpi_alarm; + return is_hpet_enabled() && !cmos_use_acpi_alarm(); } /*----------------------------------------------------------------*/ @@ -244,6 +257,7 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t) struct cmos_rtc *cmos = dev_get_drvdata(dev); unsigned char rtc_control; + /* This not only a rtc_op, but also called directly */ if (!is_valid_irq(cmos->irq)) return -EIO; @@ -340,7 +354,7 @@ static void cmos_irq_enable(struct cmos_rtc *cmos, unsigned char mask) if (use_hpet_alarm()) hpet_set_rtc_irq_bit(mask); - if ((mask & RTC_AIE) && use_acpi_alarm) { + if ((mask & RTC_AIE) && cmos_use_acpi_alarm()) { if (cmos->wake_on) cmos->wake_on(cmos->dev); } @@ -358,7 +372,7 @@ static void cmos_irq_disable(struct cmos_rtc *cmos, unsigned char mask) if (use_hpet_alarm()) hpet_mask_rtc_irq_bit(mask); - if ((mask & RTC_AIE) && use_acpi_alarm) { + if ((mask & RTC_AIE) && cmos_use_acpi_alarm()) { if (cmos->wake_off) cmos->wake_off(cmos->dev); } @@ -439,6 +453,7 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t) unsigned char mon, mday, hrs, min, sec, rtc_control; int ret; + /* This not only a rtc_op, but also called directly */ if (!is_valid_irq(cmos->irq)) return -EIO; @@ -503,9 +518,6 @@ static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled) struct cmos_rtc *cmos = dev_get_drvdata(dev); unsigned long flags; - if (!is_valid_irq(cmos->irq)) - return -EINVAL; - spin_lock_irqsave(&rtc_lock, flags); if (enabled) @@ -566,6 +578,12 @@ static const struct rtc_class_ops cmos_rtc_ops = { .alarm_irq_enable = cmos_alarm_irq_enable, }; +static const struct rtc_class_ops cmos_rtc_ops_no_alarm = { + .read_time = cmos_read_time, + .set_time = cmos_set_time, + .proc = cmos_procfs, +}; + /*----------------------------------------------------------------*/ /* @@ -842,9 +860,12 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) dev_dbg(dev, "IRQ %d is already in use\n", rtc_irq); goto cleanup1; } + + cmos_rtc.rtc->ops = &cmos_rtc_ops; + } else { + cmos_rtc.rtc->ops = &cmos_rtc_ops_no_alarm; } - cmos_rtc.rtc->ops = &cmos_rtc_ops; cmos_rtc.rtc->nvram_old_abi = true; retval = rtc_register_device(cmos_rtc.rtc); if (retval) @@ -980,7 +1001,7 @@ static int cmos_suspend(struct device *dev) } spin_unlock_irq(&rtc_lock); - if ((tmp & RTC_AIE) && !use_acpi_alarm) { + if ((tmp & RTC_AIE) && !cmos_use_acpi_alarm()) { cmos->enabled_wake = 1; if (cmos->wake_on) cmos->wake_on(dev); @@ -1031,7 +1052,7 @@ static void cmos_check_wkalrm(struct device *dev) * ACPI RTC wake event is cleared after resume from STR, * ACK the rtc irq here */ - if (t_now >= cmos->alarm_expires && use_acpi_alarm) { + if (t_now >= cmos->alarm_expires && cmos_use_acpi_alarm()) { cmos_interrupt(0, (void *)cmos->rtc); return; } @@ -1053,7 +1074,7 @@ static int __maybe_unused cmos_resume(struct device *dev) struct cmos_rtc *cmos = dev_get_drvdata(dev); unsigned char tmp; - if (cmos->enabled_wake && !use_acpi_alarm) { + if (cmos->enabled_wake && !cmos_use_acpi_alarm()) { if (cmos->wake_off) cmos->wake_off(dev); else @@ -1132,7 +1153,7 @@ static u32 rtc_handler(void *context) * Or else, ACPI SCI is enabled during suspend/resume only, * update rtc irq in that case. */ - if (use_acpi_alarm) + if (cmos_use_acpi_alarm()) cmos_interrupt(0, (void *)cmos->rtc); else { /* Fix me: can we use cmos_interrupt() here as well? */ diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index 4b2b4627daebf..71396b62dc52b 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c @@ -1384,7 +1384,6 @@ static void ds1307_clks_register(struct ds1307 *ds1307) static const struct regmap_config regmap_config = { .reg_bits = 8, .val_bits = 8, - .max_register = 0x9, }; static int ds1307_probe(struct i2c_client *client, diff --git a/drivers/rtc/rtc-hid-sensor-time.c b/drivers/rtc/rtc-hid-sensor-time.c index 2751dba850c61..3e1abb4554721 100644 --- a/drivers/rtc/rtc-hid-sensor-time.c +++ b/drivers/rtc/rtc-hid-sensor-time.c @@ -213,7 +213,7 @@ static int hid_rtc_read_time(struct device *dev, struct rtc_time *tm) /* get a report with all values through requesting one value */ sensor_hub_input_attr_get_raw_value(time_state->common_attributes.hsdev, HID_USAGE_SENSOR_TIME, hid_time_addresses[0], - time_state->info[0].report_id, SENSOR_HUB_SYNC); + time_state->info[0].report_id, SENSOR_HUB_SYNC, false); /* wait for all values (event) */ ret = wait_for_completion_killable_timeout( &time_state->comp_last_time, HZ*6); diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index ad03e2f12f5d3..5808a1e4c2e9f 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -393,7 +393,7 @@ static int m41t80_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) alrm->time.tm_min = bcd2bin(alarmvals[3] & 0x7f); alrm->time.tm_hour = bcd2bin(alarmvals[2] & 0x3f); alrm->time.tm_mday = bcd2bin(alarmvals[1] & 0x3f); - alrm->time.tm_mon = bcd2bin(alarmvals[0] & 0x3f); + alrm->time.tm_mon = bcd2bin(alarmvals[0] & 0x3f) - 1; alrm->enabled = !!(alarmvals[0] & M41T80_ALMON_AFE); alrm->pending = (flags & M41T80_FLAGS_AF) && alrm->enabled; diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c index 9f99a0966550b..7cb786d76e3c1 100644 --- a/drivers/rtc/rtc-pcf2127.c +++ b/drivers/rtc/rtc-pcf2127.c @@ -303,6 +303,9 @@ static int pcf2127_i2c_gather_write(void *context, memcpy(buf + 1, val, val_size); ret = i2c_master_send(client, buf, val_size + 1); + + kfree(buf); + if (ret != val_size + 1) return ret < 0 ? ret : -EIO; diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c index 194ffd5c85804..039b2074db7e5 100644 --- a/drivers/s390/char/sclp_config.c +++ b/drivers/s390/char/sclp_config.c @@ -60,7 +60,9 @@ static void sclp_cpu_capability_notify(struct work_struct *work) static void __ref sclp_cpu_change_notify(struct work_struct *work) { + lock_device_hotplug(); smp_rescan_cpus(); + unlock_device_hotplug(); } static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index fd77e46eb3b21..70a006ba4d050 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c @@ -387,8 +387,10 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp) * orb specified one of the unsupported formats, we defer * checking for IDAWs in unsupported formats to here. */ - if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw)) + if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw)) { + kfree(p); return -EOPNOTSUPP; + } if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw))) break; @@ -528,7 +530,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain, ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count); if (ret < 0) - goto out_init; + goto out_unpin; /* Translate this direct ccw to a idal ccw. */ idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL); diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c index c0631895154e6..8684bcec8ff46 100644 --- a/drivers/s390/net/ism_drv.c +++ b/drivers/s390/net/ism_drv.c @@ -415,9 +415,9 @@ static irqreturn_t ism_handle_irq(int irq, void *data) break; clear_bit_inv(bit, bv); + ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0; barrier(); smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET); - ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0; } if (ism->sba->e) { diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 34e0d476c5c61..970654fcc48d2 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -826,6 +826,11 @@ struct qeth_trap_id { /*some helper functions*/ #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") +static inline bool qeth_netdev_is_registered(struct net_device *dev) +{ + return dev->netdev_ops != NULL; +} + static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf, unsigned int elements) { diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index ffce6f39828aa..b03515d43745d 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -4524,8 +4524,8 @@ static int qeth_snmp_command_cb(struct qeth_card *card, { struct qeth_ipa_cmd *cmd; struct qeth_arp_query_info *qinfo; - struct qeth_snmp_cmd *snmp; unsigned char *data; + void *snmp_data; __u16 data_len; QETH_CARD_TEXT(card, 3, "snpcmdcb"); @@ -4533,7 +4533,6 @@ static int qeth_snmp_command_cb(struct qeth_card *card, cmd = (struct qeth_ipa_cmd *) sdata; data = (unsigned char *)((char *)cmd - reply->offset); qinfo = (struct qeth_arp_query_info *) reply->param; - snmp = &cmd->data.setadapterparms.data.snmp; if (cmd->hdr.return_code) { QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code); @@ -4546,10 +4545,15 @@ static int qeth_snmp_command_cb(struct qeth_card *card, return 0; } data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data)); - if (cmd->data.setadapterparms.hdr.seq_no == 1) - data_len -= (__u16)((char *)&snmp->data - (char *)cmd); - else - data_len -= (__u16)((char *)&snmp->request - (char *)cmd); + if (cmd->data.setadapterparms.hdr.seq_no == 1) { + snmp_data = &cmd->data.setadapterparms.data.snmp; + data_len -= offsetof(struct qeth_ipa_cmd, + data.setadapterparms.data.snmp); + } else { + snmp_data = &cmd->data.setadapterparms.data.snmp.request; + data_len -= offsetof(struct qeth_ipa_cmd, + data.setadapterparms.data.snmp.request); + } /* check if there is enough room in userspace */ if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { @@ -4562,16 +4566,9 @@ static int qeth_snmp_command_cb(struct qeth_card *card, QETH_CARD_TEXT_(card, 4, "sseqn%i", cmd->data.setadapterparms.hdr.seq_no); /*copy entries to user buffer*/ - if (cmd->data.setadapterparms.hdr.seq_no == 1) { - memcpy(qinfo->udata + qinfo->udata_offset, - (char *)snmp, - data_len + offsetof(struct qeth_snmp_cmd, data)); - qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data); - } else { - memcpy(qinfo->udata + qinfo->udata_offset, - (char *)&snmp->request, data_len); - } + memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len); qinfo->udata_offset += data_len; + /* check if all replies received ... */ QETH_CARD_TEXT_(card, 4, "srtot%i", cmd->data.setadapterparms.hdr.used_total); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index b5e38531733f2..76b2fba5fba22 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -854,7 +854,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) if (cgdev->state == CCWGROUP_ONLINE) qeth_l2_set_offline(cgdev); - unregister_netdev(card->dev); + if (qeth_netdev_is_registered(card->dev)) + unregister_netdev(card->dev); } static const struct ethtool_ops qeth_l2_ethtool_ops = { @@ -894,7 +895,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) { int rc; - if (card->dev->netdev_ops) + if (qeth_netdev_is_registered(card->dev)) return 0; card->dev->priv_flags |= IFF_UNICAST_FLT; diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index ada258c01a08e..b7f6a8384543c 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -279,9 +279,6 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover) QETH_CARD_TEXT(card, 4, "clearip"); - if (recover && card->options.sniffer) - return; - spin_lock_bh(&card->ip_lock); hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { @@ -664,6 +661,8 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card, int rc = 0; int cnt = 3; + if (card->options.sniffer) + return 0; if (addr->proto == QETH_PROT_IPV4) { QETH_CARD_TEXT(card, 2, "setaddr4"); @@ -698,6 +697,9 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card, { int rc = 0; + if (card->options.sniffer) + return 0; + if (addr->proto == QETH_PROT_IPV4) { QETH_CARD_TEXT(card, 2, "deladdr4"); QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int)); @@ -2512,7 +2514,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) { int rc; - if (card->dev->netdev_ops) + if (qeth_netdev_is_registered(card->dev)) return 0; if (card->info.type == QETH_CARD_TYPE_OSD || @@ -2609,7 +2611,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) if (cgdev->state == CCWGROUP_ONLINE) qeth_l3_set_offline(cgdev); - unregister_netdev(card->dev); + if (qeth_netdev_is_registered(card->dev)) + unregister_netdev(card->dev); qeth_l3_clear_ip_htable(card, 0); qeth_l3_clear_ipato_list(card); } diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 94f4d8fe85e0e..d1b531fe9ada1 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -275,16 +275,16 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter) */ int zfcp_status_read_refill(struct zfcp_adapter *adapter) { - while (atomic_read(&adapter->stat_miss) > 0) + while (atomic_add_unless(&adapter->stat_miss, -1, 0)) if (zfcp_fsf_status_read(adapter->qdio)) { + atomic_inc(&adapter->stat_miss); /* undo add -1 */ if (atomic_read(&adapter->stat_miss) >= adapter->stat_read_buf_num) { zfcp_erp_adapter_reopen(adapter, 0, "axsref1"); return 1; } break; - } else - atomic_dec(&adapter->stat_miss); + } return 0; } diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 8f5c1d7f751ae..b67dc4974f239 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -56,6 +56,7 @@ struct virtio_ccw_device { unsigned int revision; /* Transport revision */ wait_queue_head_t wait_q; spinlock_t lock; + struct mutex io_lock; /* Serializes I/O requests */ struct list_head virtqueues; unsigned long indicators; unsigned long indicators2; @@ -296,6 +297,7 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev, unsigned long flags; int flag = intparm & VIRTIO_CCW_INTPARM_MASK; + mutex_lock(&vcdev->io_lock); do { spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags); ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0); @@ -308,7 +310,9 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev, cpu_relax(); } while (ret == -EBUSY); wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0); - return ret ? ret : vcdev->err; + ret = ret ? ret : vcdev->err; + mutex_unlock(&vcdev->io_lock); + return ret; } static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev, @@ -828,6 +832,7 @@ static void virtio_ccw_get_config(struct virtio_device *vdev, int ret; struct ccw1 *ccw; void *config_area; + unsigned long flags; ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); if (!ccw) @@ -846,11 +851,13 @@ static void virtio_ccw_get_config(struct virtio_device *vdev, if (ret) goto out_free; + spin_lock_irqsave(&vcdev->lock, flags); memcpy(vcdev->config, config_area, offset + len); - if (buf) - memcpy(buf, &vcdev->config[offset], len); if (vcdev->config_ready < offset + len) vcdev->config_ready = offset + len; + spin_unlock_irqrestore(&vcdev->lock, flags); + if (buf) + memcpy(buf, config_area + offset, len); out_free: kfree(config_area); @@ -864,6 +871,7 @@ static void virtio_ccw_set_config(struct virtio_device *vdev, struct virtio_ccw_device *vcdev = to_vc_device(vdev); struct ccw1 *ccw; void *config_area; + unsigned long flags; ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); if (!ccw) @@ -876,9 +884,11 @@ static void virtio_ccw_set_config(struct virtio_device *vdev, /* Make sure we don't overwrite fields. */ if (vcdev->config_ready < offset) virtio_ccw_get_config(vdev, 0, NULL, offset); + spin_lock_irqsave(&vcdev->lock, flags); memcpy(&vcdev->config[offset], buf, len); /* Write the config area to the host. */ memcpy(config_area, vcdev->config, sizeof(vcdev->config)); + spin_unlock_irqrestore(&vcdev->lock, flags); ccw->cmd_code = CCW_CMD_WRITE_CONF; ccw->flags = 0; ccw->count = offset + len; @@ -1247,6 +1257,7 @@ static int virtio_ccw_online(struct ccw_device *cdev) init_waitqueue_head(&vcdev->wait_q); INIT_LIST_HEAD(&vcdev->virtqueues); spin_lock_init(&vcdev->lock); + mutex_init(&vcdev->io_lock); spin_lock_irqsave(get_ccwdev_lock(cdev), flags); dev_set_drvdata(&cdev->dev, vcdev); diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c index 5c8ed7350a04a..a36e4cf1841d9 100644 --- a/drivers/sbus/char/display7seg.c +++ b/drivers/sbus/char/display7seg.c @@ -220,6 +220,7 @@ static int d7s_probe(struct platform_device *op) dev_set_drvdata(&op->dev, p); d7s_device = p; err = 0; + of_node_put(opts); out: return err; diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c index 56e962a014939..b8481927bfe40 100644 --- a/drivers/sbus/char/envctrl.c +++ b/drivers/sbus/char/envctrl.c @@ -910,8 +910,10 @@ static void envctrl_init_i2c_child(struct device_node *dp, for (len = 0; len < PCF8584_MAX_CHANNELS; ++len) { pchild->mon_type[len] = ENVCTRL_NOMON; } + of_node_put(root_node); return; } + of_node_put(root_node); } /* Get the monitor channels. */ diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index f000458133789..3f97ec4aac4bb 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -2371,7 +2371,7 @@ static int _bnx2fc_create(struct net_device *netdev, if (!interface) { printk(KERN_ERR PFX "bnx2fc_interface_create failed\n"); rc = -ENOMEM; - goto ifput_err; + goto netdev_err; } if (is_vlan_dev(netdev)) { diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c index c3fc34b9964df..9e5d3f7d29ae7 100644 --- a/drivers/scsi/esp_scsi.c +++ b/drivers/scsi/esp_scsi.c @@ -1338,6 +1338,7 @@ static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent, bytes_sent = esp->data_dma_len; bytes_sent -= ecount; + bytes_sent -= esp->send_cmd_residual; /* * The am53c974 has a DMA 'pecularity'. The doc states: diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h index 8163dca2071bf..a77772777a309 100644 --- a/drivers/scsi/esp_scsi.h +++ b/drivers/scsi/esp_scsi.h @@ -540,6 +540,8 @@ struct esp { void *dma; int dmarev; + + u32 send_cmd_residual; }; /* A front-end driver for the ESP chip should do the following in diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c index 8f60f0e045996..410eccf0bc5eb 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c @@ -904,11 +904,9 @@ static void start_delivery_v1_hw(struct hisi_sas_dq *dq) { struct hisi_hba *hisi_hba = dq->hisi_hba; struct hisi_sas_slot *s, *s1, *s2 = NULL; - struct list_head *dq_list; int dlvry_queue = dq->id; int wp; - dq_list = &dq->list; list_for_each_entry_safe(s, s1, &dq->list, delivery) { if (!s->ready) break; diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index 9c5c5a601332e..1c4ea58da1ae1 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -1666,11 +1666,9 @@ static void start_delivery_v2_hw(struct hisi_sas_dq *dq) { struct hisi_hba *hisi_hba = dq->hisi_hba; struct hisi_sas_slot *s, *s1, *s2 = NULL; - struct list_head *dq_list; int dlvry_queue = dq->id; int wp; - dq_list = &dq->list; list_for_each_entry_safe(s, s1, &dq->list, delivery) { if (!s->ready) break; diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 08b503e274b81..687ff61bba9fd 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -883,11 +883,9 @@ static void start_delivery_v3_hw(struct hisi_sas_dq *dq) { struct hisi_hba *hisi_hba = dq->hisi_hba; struct hisi_sas_slot *s, *s1, *s2 = NULL; - struct list_head *dq_list; int dlvry_queue = dq->id; int wp; - dq_list = &dq->list; list_for_each_entry_safe(s, s1, &dq->list, delivery) { if (!s->ready) break; diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 93c66ebad907e..f78d2e5c1471d 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -2416,8 +2416,8 @@ int iscsi_eh_session_reset(struct scsi_cmnd *sc) failed: ISCSI_DBG_EH(session, "failing session reset: Could not log back into " - "%s, %s [age %d]\n", session->targetname, - conn->persistent_address, session->age); + "%s [age %d]\n", session->targetname, + session->age); spin_unlock_bh(&session->frwd_lock); mutex_unlock(&session->eh_mutex); return FAILED; diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index aec5b10a8c855..ca6c3982548db 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -700,6 +700,8 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) rport = lpfc_ndlp_get_nrport(ndlp); if (rport) nrport = rport->remoteport; + else + nrport = NULL; spin_unlock(&phba->hbalock); if (!nrport) continue; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index f3cae733ae2df..9acb5b44ce4c1 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -167,7 +167,11 @@ lpfc_config_port_prep(struct lpfc_hba *phba) sizeof(phba->wwpn)); } - phba->sli3_options = 0x0; + /* + * Clear all option bits except LPFC_SLI3_BG_ENABLED, + * which was already set in lpfc_get_cfgparam() + */ + phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED; /* Setup and issue mailbox READ REV command */ lpfc_read_rev(phba, pmb); diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 5c7858e735c9e..200b5bca1f5f4 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -4158,9 +4158,17 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, } lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); - spin_lock_irqsave(&phba->hbalock, flags); - lpfc_cmd->pCmd = NULL; - spin_unlock_irqrestore(&phba->hbalock, flags); + /* If pCmd was set to NULL from abort path, do not call scsi_done */ + if (xchg(&lpfc_cmd->pCmd, NULL) == NULL) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "0711 FCP cmd already NULL, sid: 0x%06x, " + "did: 0x%06x, oxid: 0x%04x\n", + vport->fc_myDID, + (pnode) ? pnode->nlp_DID : 0, + phba->sli_rev == LPFC_SLI_REV4 ? + lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff); + return; + } /* The sdev is not guaranteed to be valid post scsi_done upcall. */ cmd->scsi_done(cmd); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 9830bdb6e0726..a490e63c94b67 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -3797,6 +3797,7 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf; struct lpfc_cq_event *cq_event; unsigned long iflag; + int count = 0; spin_lock_irqsave(&phba->hbalock, iflag); phba->hba_flag &= ~HBA_SP_QUEUE_EVT; @@ -3818,16 +3819,22 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, if (irspiocbq) lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq); + count++; break; case CQE_CODE_RECEIVE: case CQE_CODE_RECEIVE_V1: dmabuf = container_of(cq_event, struct hbq_dmabuf, cq_event); lpfc_sli4_handle_received_buffer(phba, dmabuf); + count++; break; default: break; } + + /* Limit the number of events to 64 to avoid soft lockups */ + if (count == 64) + break; } } @@ -4962,7 +4969,6 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED | LPFC_SLI3_CRP_ENABLED | - LPFC_SLI3_BG_ENABLED | LPFC_SLI3_DSS_ENABLED); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -14215,7 +14221,8 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size, hw_page_size))/hw_page_size; /* If needed, Adjust page count to match the max the adapter supports */ - if (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt) + if (phba->sli4_hba.pc_sli4_params.wqpcnt && + (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt)) queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt; INIT_LIST_HEAD(&queue->list); diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c index eb551f3cc471d..71879f2207e0e 100644 --- a/drivers/scsi/mac_esp.c +++ b/drivers/scsi/mac_esp.c @@ -427,6 +427,8 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count, scsi_esp_cmd(esp, ESP_CMD_TI); } } + + esp->send_cmd_residual = esp_count; } static int mac_esp_irq_pending(struct esp *esp) diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 9aa9590c53739..f6de7526ded56 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -7523,6 +7523,9 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) get_user(user_sense_off, &cioc->sense_off)) return -EFAULT; + if (local_sense_off != user_sense_off) + return -EINVAL; + if (local_sense_len) { void __user **sense_ioc_ptr = (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off); diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index 59ecbb3b53b52..a336285504254 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -1266,7 +1266,7 @@ void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map, for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { ld = MR_TargetIdToLdGet(ldCount, drv_map); - if (ld >= MAX_LOGICAL_DRIVES_EXT) { + if (ld >= MAX_LOGICAL_DRIVES_EXT - 1) { lbInfo[ldCount].loadBalanceFlag = 0; continue; } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index c7f95bace353a..f45c54f02bfa5 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -2832,7 +2832,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance, device_id < instance->fw_supported_vd_count)) { ld = MR_TargetIdToLdGet(device_id, local_map_ptr); - if (ld >= instance->fw_supported_vd_count) + if (ld >= instance->fw_supported_vd_count - 1) fp_possible = 0; else { raid = MR_LdRaidGet(ld, local_map_ptr); diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 59d7844ee0222..b59bba3e65162 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -3344,8 +3344,9 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr, static inline void _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) { + wmb(); __raw_writeq(b, addr); - mmiowb(); + barrier(); } #else static inline void diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index e5bd035ebad0f..4de740da547b8 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -952,6 +952,9 @@ static int qedi_find_boot_info(struct qedi_ctx *qedi, cls_sess = iscsi_conn_to_session(cls_conn); sess = cls_sess->dd_data; + if (!iscsi_is_session_online(cls_sess)) + continue; + if (pri_ctrl_flags) { if (!strcmp(pri_tgt->iscsi_name, sess->targetname) && !strcmp(pri_tgt->ip_addr, ep_ip_addr)) { diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index c11a89be292c8..4a9fd8d944d60 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -2487,7 +2487,7 @@ qla24xx_bsg_request(struct bsg_job *bsg_job) vha = shost_priv(host); } - if (qla2x00_reset_active(vha)) { + if (qla2x00_chip_is_down(vha)) { ql_dbg(ql_dbg_user, vha, 0x709f, "BSG: ISP abort active/needed -- cmd=%d.\n", bsg_request->msgcode); diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index a0038d879b9df..de3f2a097451d 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -3261,6 +3261,9 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res) "Async done-%s res %x, WWPN %8phC \n", sp->name, res, fcport->port_name); + if (res == QLA_FUNCTION_TIMEOUT) + return; + if (res == (DID_ERROR << 16)) { /* entry status error */ goto done; @@ -4444,9 +4447,9 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); - rspsz = sizeof(struct ct_sns_gpnft_rsp) + - ((vha->hw->max_fibre_devices - 1) * - sizeof(struct ct_sns_gpn_ft_data)); + rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size; + memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size); + memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size); ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; /* CT_IU preamble */ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index b934977c5c260..5352c9bbcaf76 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -52,12 +52,14 @@ qla2x00_sp_timeout(struct timer_list *t) struct srb_iocb *iocb; struct req_que *req; unsigned long flags; + struct qla_hw_data *ha = sp->vha->hw; - spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); + WARN_ON(irqs_disabled()); + spin_lock_irqsave(&ha->hardware_lock, flags); req = sp->qpair->req; req->outstanding_cmds[sp->handle] = NULL; iocb = &sp->u.iocb_cmd; - spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); + spin_unlock_irqrestore(&ha->hardware_lock, flags); iocb->timeout(sp); } @@ -972,6 +974,15 @@ void qla24xx_async_gpdb_sp_done(void *s, int res) fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + if (res == QLA_FUNCTION_TIMEOUT) + return; + + if (res == QLA_FUNCTION_TIMEOUT) { + dma_pool_free(sp->vha->hw->s_dma_pool, sp->u.iocb_cmd.u.mbx.in, + sp->u.iocb_cmd.u.mbx.in_dma); + return; + } + memset(&ea, 0, sizeof(ea)); ea.event = FCME_GPDB_DONE; ea.fcport = fcport; @@ -1788,6 +1799,8 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) wait_for_completion(&abt_iocb->u.abt.comp); rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ? QLA_SUCCESS : QLA_FUNCTION_FAILED; + } else { + goto done; } done_free_sp: @@ -1952,25 +1965,15 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) cid.b.rsvd_1 = 0; ql_dbg(ql_dbg_disc, vha, 0x20ec, - "%s %d %8phC LoopID 0x%x in use post gnl\n", + "%s %d %8phC lid %#x in use with pid %06x post gnl\n", __func__, __LINE__, ea->fcport->port_name, - ea->fcport->loop_id); + ea->fcport->loop_id, cid.b24); - if (IS_SW_RESV_ADDR(cid)) { - set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); - ea->fcport->loop_id = FC_NO_LOOP_ID; - } else { - qla2x00_clear_loop_id(ea->fcport); - } + set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); + ea->fcport->loop_id = FC_NO_LOOP_ID; qla24xx_post_gnl_work(vha, ea->fcport); break; case MBS_PORT_ID_USED: - ql_dbg(ql_dbg_disc, vha, 0x20ed, - "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n", - __func__, __LINE__, ea->fcport->port_name, - ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area, - ea->fcport->d_id.b.al_pa); - lid = ea->iop[1] & 0xffff; qlt_find_sess_invalidate_other(vha, wwn_to_u64(ea->fcport->port_name), @@ -4711,6 +4714,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) fcport->loop_id = FC_NO_LOOP_ID; qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); fcport->supported_classes = FC_COS_UNSPECIFIED; + fcport->fp_speed = PORT_SPEED_UNKNOWN; fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma, @@ -6682,7 +6686,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) * The next call disables the board * completely. */ - ha->isp_ops->reset_adapter(vha); + qla2x00_abort_isp_cleanup(vha); vha->flags.online = 0; clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); @@ -7142,7 +7146,6 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) } icb->firmware_options_2 &= cpu_to_le32( ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); - vha->flags.process_response_queue = 0; if (ha->zio_mode != QLA_ZIO_DISABLED) { ha->zio_mode = QLA_ZIO_MODE_6; @@ -7153,7 +7156,6 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) icb->firmware_options_2 |= cpu_to_le32( (uint32_t)ha->zio_mode); icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); - vha->flags.process_response_queue = 1; } if (rval) { diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 42ac8e097419c..119927220299e 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -1526,12 +1526,6 @@ qla24xx_start_scsi(srb_t *sp) /* Set chip new ring index. */ WRT_REG_DWORD(req->req_q_in, req->ring_index); - RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); - - /* Manage unprocessed RIO/ZIO commands in response queue. */ - if (vha->flags.process_response_queue && - rsp->ring_ptr->signature != RESPONSE_PROCESSED) - qla24xx_process_response_queue(vha, rsp); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; @@ -1725,12 +1719,6 @@ qla24xx_dif_start_scsi(srb_t *sp) /* Set chip new ring index. */ WRT_REG_DWORD(req->req_q_in, req->ring_index); - RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); - - /* Manage unprocessed RIO/ZIO commands in response queue. */ - if (vha->flags.process_response_queue && - rsp->ring_ptr->signature != RESPONSE_PROCESSED) - qla24xx_process_response_queue(vha, rsp); spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -1880,11 +1868,6 @@ qla2xxx_start_scsi_mq(srb_t *sp) /* Set chip new ring index. */ WRT_REG_DWORD(req->req_q_in, req->ring_index); - /* Manage unprocessed RIO/ZIO commands in response queue. */ - if (vha->flags.process_response_queue && - rsp->ring_ptr->signature != RESPONSE_PROCESSED) - qla24xx_process_response_queue(vha, rsp); - spin_unlock_irqrestore(&qpair->qp_lock, flags); return QLA_SUCCESS; diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 2c6c2cd5a0d07..84f57f075455e 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -493,7 +493,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } - } else if (!abort_active) { + } else if (current == ha->dpc_thread) { /* call abort directly since we are in the DPC thread */ ql_dbg(ql_dbg_mbx, vha, 0x101d, "Timeout, calling abort_isp.\n"); @@ -3762,10 +3762,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, mcp->mb[0] = MBC_PORT_PARAMS; mcp->mb[1] = loop_id; mcp->mb[2] = BIT_0; - if (IS_CNA_CAPABLE(vha->hw)) - mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0); - else - mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0); + mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0); mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_3|MBX_1|MBX_0; diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index 20d9dc39f0fbe..e6545cb9a2c19 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -607,7 +607,7 @@ void qla_nvme_abort(struct qla_hw_data *ha, struct srb *sp, int res) { int rval; - if (!test_bit(ABORT_ISP_ACTIVE, &sp->vha->dpc_flags)) { + if (ha->flags.fw_started) { rval = ha->isp_ops->abort_command(sp); if (!rval && !qla_nvme_wait_on_command(sp)) ql_log(ql_log_warn, NULL, 0x2112, @@ -660,9 +660,6 @@ void qla_nvme_delete(struct scsi_qla_host *vha) __func__, fcport); nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0); - init_completion(&fcport->nvme_del_done); - nvme_fc_unregister_remoteport(fcport->nvme_remote_port); - wait_for_completion(&fcport->nvme_del_done); } if (vha->nvme_local_port) { diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 8c811b251d428..d2888b30a8a3b 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1261,7 +1261,8 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) qla24xx_chk_fcp_state(sess); ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, - "Scheduling sess %p for deletion\n", sess); + "Scheduling sess %p for deletion %8phC\n", + sess, sess->port_name); INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn); WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work)); diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index e03d12a5f986c..64e2d859f6332 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -718,10 +718,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) cmd->sg_cnt = 0; cmd->offset = 0; cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); - if (cmd->trc_flags & TRC_XMIT_STATUS) { - pr_crit("Multiple calls for status = %p.\n", cmd); - dump_stack(); - } cmd->trc_flags |= TRC_XMIT_STATUS; if (se_cmd->data_direction == DMA_FROM_DEVICE) { diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c index ea88906d2cc52..5c3d6e1e0145e 100644 --- a/drivers/scsi/raid_class.c +++ b/drivers/scsi/raid_class.c @@ -63,8 +63,7 @@ static int raid_match(struct attribute_container *cont, struct device *dev) * emulated RAID devices, so start with SCSI */ struct raid_internal *i = ac_to_raid_internal(cont); -#if defined(CONFIG_SCSI) || defined(CONFIG_SCSI_MODULE) - if (scsi_is_sdev_device(dev)) { + if (IS_ENABLED(CONFIG_SCSI) && scsi_is_sdev_device(dev)) { struct scsi_device *sdev = to_scsi_device(dev); if (i->f->cookie != sdev->host->hostt) @@ -72,7 +71,6 @@ static int raid_match(struct attribute_container *cont, struct device *dev) return i->f->is_raid(dev); } -#endif /* FIXME: look at other subsystems too */ return 0; } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index eb97d2dd36516..b5f638286037a 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -697,6 +697,12 @@ static bool scsi_end_request(struct request *req, blk_status_t error, */ scsi_mq_uninit_cmd(cmd); + /* + * queue is still alive, so grab the ref for preventing it + * from being cleaned up during running queue. + */ + percpu_ref_get(&q->q_usage_counter); + __blk_mq_end_request(req, error); if (scsi_target(sdev)->single_lun || @@ -704,6 +710,8 @@ static bool scsi_end_request(struct request *req, blk_status_t error, kblockd_schedule_work(&sdev->requeue_work); else blk_mq_run_hw_queues(q, true); + + percpu_ref_put(&q->q_usage_counter); } else { unsigned long flags; diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c index b44c1bb687a2e..ebc193f7f7ddb 100644 --- a/drivers/scsi/scsi_pm.c +++ b/drivers/scsi/scsi_pm.c @@ -79,8 +79,22 @@ static int scsi_dev_type_resume(struct device *dev, if (err == 0) { pm_runtime_disable(dev); - pm_runtime_set_active(dev); + err = pm_runtime_set_active(dev); pm_runtime_enable(dev); + + /* + * Forcibly set runtime PM status of request queue to "active" + * to make sure we can again get requests from the queue + * (see also blk_pm_peek_request()). + * + * The resume hook will correct runtime PM status of the disk. + */ + if (!err && scsi_is_sdev_device(dev)) { + struct scsi_device *sdev = to_scsi_device(dev); + + if (sdev->request_queue->dev) + blk_set_runtime_active(sdev->request_queue); + } } return err; @@ -139,16 +153,6 @@ static int scsi_bus_resume_common(struct device *dev, else fn = NULL; - /* - * Forcibly set runtime PM status of request queue to "active" to - * make sure we can again get requests from the queue (see also - * blk_pm_peek_request()). - * - * The resume hook will correct runtime PM status of the disk. - */ - if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev)) - blk_set_runtime_active(to_scsi_device(dev)->request_queue); - if (fn) { async_schedule_domain(fn, dev, &scsi_sd_pm_domain); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 4a57ffecc7e61..0a27917263aa2 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -132,6 +132,7 @@ static DEFINE_MUTEX(sd_ref_mutex); static struct kmem_cache *sd_cdb_cache; static mempool_t *sd_cdb_pool; +static mempool_t *sd_page_pool; static const char *sd_cache_types[] = { "write through", "none", "write back", @@ -204,6 +205,12 @@ cache_type_store(struct device *dev, struct device_attribute *attr, sp = buffer_data[0] & 0x80 ? 1 : 0; buffer_data[0] &= ~0x80; + /* + * Ensure WP, DPOFUA, and RESERVED fields are cleared in + * received mode parameter buffer before doing MODE SELECT. + */ + data.device_specific = 0; + if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT, SD_MAX_RETRIES, &data, &sshdr)) { if (scsi_sense_valid(&sshdr)) @@ -758,9 +765,10 @@ static int sd_setup_unmap_cmnd(struct scsi_cmnd *cmd) unsigned int data_len = 24; char *buf; - rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO); + rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC); if (!rq->special_vec.bv_page) return BLKPREP_DEFER; + clear_highpage(rq->special_vec.bv_page); rq->special_vec.bv_offset = 0; rq->special_vec.bv_len = data_len; rq->rq_flags |= RQF_SPECIAL_PAYLOAD; @@ -791,9 +799,10 @@ static int sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, bool unmap) u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9); u32 data_len = sdp->sector_size; - rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO); + rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC); if (!rq->special_vec.bv_page) return BLKPREP_DEFER; + clear_highpage(rq->special_vec.bv_page); rq->special_vec.bv_offset = 0; rq->special_vec.bv_len = data_len; rq->rq_flags |= RQF_SPECIAL_PAYLOAD; @@ -821,9 +830,10 @@ static int sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, bool unmap) u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9); u32 data_len = sdp->sector_size; - rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO); + rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC); if (!rq->special_vec.bv_page) return BLKPREP_DEFER; + clear_highpage(rq->special_vec.bv_page); rq->special_vec.bv_offset = 0; rq->special_vec.bv_len = data_len; rq->rq_flags |= RQF_SPECIAL_PAYLOAD; @@ -1287,7 +1297,7 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt) u8 *cmnd; if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) - __free_page(rq->special_vec.bv_page); + mempool_free(rq->special_vec.bv_page, sd_page_pool); if (SCpnt->cmnd != scsi_req(rq)->cmd) { cmnd = SCpnt->cmnd; @@ -3635,6 +3645,13 @@ static int __init init_sd(void) goto err_out_cache; } + sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0); + if (!sd_page_pool) { + printk(KERN_ERR "sd: can't init discard page pool\n"); + err = -ENOMEM; + goto err_out_ppool; + } + err = scsi_register_driver(&sd_template.gendrv); if (err) goto err_out_driver; @@ -3642,6 +3659,9 @@ static int __init init_sd(void) return 0; err_out_driver: + mempool_destroy(sd_page_pool); + +err_out_ppool: mempool_destroy(sd_cdb_pool); err_out_cache: @@ -3668,6 +3688,7 @@ static void __exit exit_sd(void) scsi_unregister_driver(&sd_template.gendrv); mempool_destroy(sd_cdb_pool); + mempool_destroy(sd_page_pool); kmem_cache_destroy(sd_cdb_cache); class_unregister(&sd_disk_class); diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 2112ea6723c60..8c1a232ac6bfb 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -2720,6 +2720,9 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, switch (response->header.iu_type) { case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS: + if (io_request->scmd) + io_request->scmd->result = 0; + /* fall through */ case PQI_RESPONSE_IU_GENERAL_MANAGEMENT: break; case PQI_RESPONSE_IU_TASK_MANAGEMENT: @@ -6686,6 +6689,7 @@ static void pqi_shutdown(struct pci_dev *pci_dev) * storage. */ rc = pqi_flush_cache(ctrl_info, SHUTDOWN); + pqi_free_interrupts(ctrl_info); pqi_reset(ctrl_info); if (rc == 0) return; diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index e09fe6ab35723..a3c1982b213a4 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -38,6 +38,7 @@ config SCSI_UFSHCD select PM_DEVFREQ select DEVFREQ_GOV_SIMPLE_ONDEMAND select NLS + select RPMB ---help--- This selects the support for UFS devices in Linux, say Y and make sure that you know the name of your UFS host adapter (the card diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c index 46df707e6f2c0..452e19f8fb470 100644 --- a/drivers/scsi/ufs/ufs-hisi.c +++ b/drivers/scsi/ufs/ufs-hisi.c @@ -20,6 +20,7 @@ #include "unipro.h" #include "ufs-hisi.h" #include "ufshci.h" +#include "ufs_quirks.h" static int ufs_hisi_check_hibern8(struct ufs_hba *hba) { @@ -390,6 +391,14 @@ static void ufs_hisi_set_dev_cap(struct ufs_hisi_dev_params *hisi_param) static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba) { + if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME) { + pr_info("ufs flash device must set VS_DebugSaveConfigTime 0x10\n"); + /* VS_DebugSaveConfigTime */ + ufshcd_dme_set(hba, UIC_ARG_MIB(0xD0A0), 0x10); + /* sync length */ + ufshcd_dme_set(hba, UIC_ARG_MIB(0x1556), 0x48); + } + /* update */ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15A8), 0x1); /* PA_TxSkip */ diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c index 8d9332bb7d0c3..0b221c5a244c6 100644 --- a/drivers/scsi/ufs/ufs-sysfs.c +++ b/drivers/scsi/ufs/ufs-sysfs.c @@ -570,10 +570,11 @@ static ssize_t _name##_show(struct device *dev, \ struct ufs_hba *hba = dev_get_drvdata(dev); \ int ret; \ int desc_len = QUERY_DESC_MAX_SIZE; \ - u8 *desc_buf; \ + char *desc_buf; \ + \ desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_ATOMIC); \ - if (!desc_buf) \ - return -ENOMEM; \ + if (!desc_buf) \ + return -ENOMEM; \ ret = ufshcd_query_descriptor_retry(hba, \ UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_DEVICE, \ 0, 0, desc_buf, &desc_len); \ @@ -582,14 +583,13 @@ static ssize_t _name##_show(struct device *dev, \ goto out; \ } \ index = desc_buf[DEVICE_DESC_PARAM##_pname]; \ - memset(desc_buf, 0, QUERY_DESC_MAX_SIZE); \ - if (ufshcd_read_string_desc(hba, index, desc_buf, \ - QUERY_DESC_MAX_SIZE, true)) { \ - ret = -EINVAL; \ + kfree(desc_buf); \ + desc_buf = NULL; \ + ret = ufshcd_read_string_desc(hba, index, &desc_buf, \ + SD_ASCII_STD); \ + if (ret < 0) \ goto out; \ - } \ - ret = snprintf(buf, PAGE_SIZE, "%s\n", \ - desc_buf + QUERY_DESC_HDR_SIZE); \ + ret = snprintf(buf, PAGE_SIZE, "%s\n", desc_buf); \ out: \ kfree(desc_buf); \ return ret; \ diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index 14e5bf7af0bb1..b60dfcb5f0090 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -614,10 +614,14 @@ struct ufs_dev_info { * * @wmanufacturerid: card details * @model: card model + * @serial_no: serial number + * @serial_no_len: serial number string length */ struct ufs_dev_desc { u16 wmanufacturerid; - char model[MAX_MODEL_LEN + 1]; + char *model; + char *serial_no; + size_t serial_no_len; }; /** diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h index 71f73d1d1ad1f..5d2dfdb41a6ff 100644 --- a/drivers/scsi/ufs/ufs_quirks.h +++ b/drivers/scsi/ufs/ufs_quirks.h @@ -131,4 +131,10 @@ struct ufs_dev_fix { */ #define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME (1 << 8) +/* + * Some UFS devices require VS_DebugSaveConfigTime is 0x10, + * enabling this quirk ensure this. + */ +#define UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME (1 << 9) + #endif /* UFS_QUIRKS_H_ */ diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index c55f38ec391ca..e922559f9e237 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -37,11 +37,15 @@ * license terms, and distributes only under these terms. */ +#include #include #include #include #include #include +#include +#include + #include "ufshcd.h" #include "ufs_quirks.h" #include "unipro.h" @@ -109,13 +113,19 @@ int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, const char *prefix) { - u8 *regs; + u32 *regs; + size_t pos; + + if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */ + return -EINVAL; regs = kzalloc(len, GFP_KERNEL); if (!regs) return -ENOMEM; - memcpy_fromio(regs, hba->mmio_base + offset, len); + for (pos = 0; pos < len; pos += 4) + regs[pos / 4] = ufshcd_readl(hba, offset + pos); + ufshcd_hex_dump(prefix, regs, len); kfree(regs); @@ -230,6 +240,8 @@ static struct ufs_dev_fix ufs_fixups[] = { UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ), UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME), + UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/, + UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME), END_FIX }; @@ -297,16 +309,6 @@ static void ufshcd_scsi_block_requests(struct ufs_hba *hba) scsi_block_requests(hba->host); } -/* replace non-printable or non-ASCII characters with spaces */ -static inline void ufshcd_remove_non_printable(char *val) -{ - if (!val) - return; - - if (*val < 0x20 || *val > 0x7e) - *val = ' '; -} - static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag, const char *str) { @@ -1691,8 +1693,9 @@ static void __ufshcd_release(struct ufs_hba *hba) hba->clk_gating.state = REQ_CLKS_OFF; trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); - schedule_delayed_work(&hba->clk_gating.gate_work, - msecs_to_jiffies(hba->clk_gating.delay_ms)); + queue_delayed_work(hba->clk_gating.clk_gating_workq, + &hba->clk_gating.gate_work, + msecs_to_jiffies(hba->clk_gating.delay_ms)); } void ufshcd_release(struct ufs_hba *hba) @@ -3120,7 +3123,7 @@ int ufshcd_read_desc_param(struct ufs_hba *hba, enum desc_idn desc_id, int desc_index, u8 param_offset, - u8 *param_read_buf, + void *param_read_buf, u8 param_size) { int ret; @@ -3188,7 +3191,7 @@ int ufshcd_read_desc_param(struct ufs_hba *hba, static inline int ufshcd_read_desc(struct ufs_hba *hba, enum desc_idn desc_id, int desc_index, - u8 *buf, + void *buf, u32 size) { return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size); @@ -3206,49 +3209,77 @@ static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size) return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size); } +/** + * struct uc_string_id - unicode string + * + * @len: size of this descriptor inclusive + * @type: descriptor type + * @uc: unicode string character + */ +struct uc_string_id { + u8 len; + u8 type; + wchar_t uc[0]; +} __packed; + +/* replace non-printable or non-ASCII characters with spaces */ +static inline char blank_non_printable(char ch) +{ + return (ch >= 0x20 && ch <= 0x7e) ? ch : ' '; +} + /** * ufshcd_read_string_desc - read string descriptor * @hba: pointer to adapter instance * @desc_index: descriptor index - * @buf: pointer to buffer where descriptor would be read - * @size: size of buf + * @buf: pointer to buffer where descriptor would be read, + * the caller should free the memory. * @ascii: if true convert from unicode to ascii characters + * null terminated string. * - * Return 0 in case of success, non-zero otherwise + * Return: string size on success. + * -ENOMEM: on allocation failure + * -EINVAL: on a wrong parameter */ -int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, - u8 *buf, u32 size, bool ascii) +int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, + char **buf, bool ascii) { - int err = 0; + struct uc_string_id *uc_str; + char *str; + int ret; - err = ufshcd_read_desc(hba, - QUERY_DESC_IDN_STRING, desc_index, buf, size); + if (!buf) + return -EINVAL; - if (err) { - dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n", - __func__, QUERY_REQ_RETRIES, err); + uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL); + if (!uc_str) + return -ENOMEM; + + ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING, + desc_index, uc_str, + QUERY_DESC_MAX_SIZE); + if (ret < 0) { + dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n", + QUERY_REQ_RETRIES, ret); + str = NULL; + goto out; + } + + if (uc_str->len <= QUERY_DESC_HDR_SIZE) { + dev_dbg(hba->dev, "String Desc is of zero length\n"); + str = NULL; + ret = 0; goto out; } if (ascii) { - int desc_len; - int ascii_len; + ssize_t ascii_len; int i; - char *buff_ascii; - - desc_len = buf[0]; /* remove header and divide by 2 to move from UTF16 to UTF8 */ - ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1; - if (size < ascii_len + QUERY_DESC_HDR_SIZE) { - dev_err(hba->dev, "%s: buffer allocated size is too small\n", - __func__); - err = -ENOMEM; - goto out; - } - - buff_ascii = kmalloc(ascii_len, GFP_KERNEL); - if (!buff_ascii) { - err = -ENOMEM; + ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1; + str = kzalloc(ascii_len, GFP_KERNEL); + if (!str) { + ret = -ENOMEM; goto out; } @@ -3256,22 +3287,29 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, * the descriptor contains string in UTF16 format * we need to convert to utf-8 so it can be displayed */ - utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE], - desc_len - QUERY_DESC_HDR_SIZE, - UTF16_BIG_ENDIAN, buff_ascii, ascii_len); + ret = utf16s_to_utf8s(uc_str->uc, + uc_str->len - QUERY_DESC_HDR_SIZE, + UTF16_BIG_ENDIAN, str, ascii_len); /* replace non-printable or non-ASCII characters with spaces */ - for (i = 0; i < ascii_len; i++) - ufshcd_remove_non_printable(&buff_ascii[i]); + for (i = 0; i < ret; i++) + str[i] = blank_non_printable(str[i]); - memset(buf + QUERY_DESC_HDR_SIZE, 0, - size - QUERY_DESC_HDR_SIZE); - memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len); - buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE; - kfree(buff_ascii); + str[ret++] = '\0'; + + } else { + str = kzalloc(uc_str->len, GFP_KERNEL); + if (!str) { + ret = -ENOMEM; + goto out; + } + memcpy(str, uc_str, uc_str->len); + ret = uc_str->len; } out: - return err; + *buf = str; + kfree(uc_str); + return ret; } /** @@ -6179,6 +6217,227 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba) kfree(desc_buf); } +#define SEC_PROTOCOL_UFS 0xEC +#define SEC_SPECIFIC_UFS_RPMB 0x001 + +#define SEC_PROTOCOL_CMD_SIZE 12 +#define SEC_PROTOCOL_RETRIES 3 +#define SEC_PROTOCOL_RETRIES_ON_RESET 10 +#define SEC_PROTOCOL_TIMEOUT msecs_to_jiffies(1000) + +static int +ufshcd_rpmb_security_out(struct scsi_device *sdev, u8 region, + void *frames, u32 trans_len) +{ + struct scsi_sense_hdr sshdr; + int reset_retries = SEC_PROTOCOL_RETRIES_ON_RESET; + int ret; + u8 cmd[SEC_PROTOCOL_CMD_SIZE]; + + memset(cmd, 0, SEC_PROTOCOL_CMD_SIZE); + cmd[0] = SECURITY_PROTOCOL_OUT; + cmd[1] = SEC_PROTOCOL_UFS; + cmd[2] = region; + cmd[3] = SEC_SPECIFIC_UFS_RPMB; + cmd[4] = 0; /* inc_512 bit 7 set to 0 */ + put_unaligned_be32(trans_len, cmd + 6); /* transfer length */ + +retry: + ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, + frames, trans_len, &sshdr, + SEC_PROTOCOL_TIMEOUT, SEC_PROTOCOL_RETRIES, + NULL); + + if (ret && scsi_sense_valid(&sshdr) && + sshdr.sense_key == UNIT_ATTENTION && + sshdr.asc == 0x29 && sshdr.ascq == 0x00) + /* + * Device reset might occur several times, + * give it one more chance + */ + if (--reset_retries > 0) + goto retry; + + if (ret) + dev_err(&sdev->sdev_gendev, "%s: failed with err %0x\n", + __func__, ret); + + if (driver_byte(ret) & DRIVER_SENSE) + scsi_print_sense_hdr(sdev, "rpmb: security out", &sshdr); + + return ret; +} + +static int +ufshcd_rpmb_security_in(struct scsi_device *sdev, u8 region, + void *frames, u32 alloc_len) +{ + struct scsi_sense_hdr sshdr; + int reset_retries = SEC_PROTOCOL_RETRIES_ON_RESET; + int ret; + u8 cmd[SEC_PROTOCOL_CMD_SIZE]; + + memset(cmd, 0, SEC_PROTOCOL_CMD_SIZE); + cmd[0] = SECURITY_PROTOCOL_IN; + cmd[1] = SEC_PROTOCOL_UFS; + cmd[2] = region; + cmd[3] = SEC_SPECIFIC_UFS_RPMB; + cmd[4] = 0; /* inc_512 bit 7 set to 0 */ + put_unaligned_be32(alloc_len, cmd + 6); /* allocation length */ + +retry: + ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, + frames, alloc_len, &sshdr, + SEC_PROTOCOL_TIMEOUT, SEC_PROTOCOL_RETRIES, + NULL); + + if (ret && scsi_sense_valid(&sshdr) && + sshdr.sense_key == UNIT_ATTENTION && + sshdr.asc == 0x29 && sshdr.ascq == 0x00) + /* + * Device reset might occur several times, + * give it one more chance + */ + if (--reset_retries > 0) + goto retry; + + if (ret) + dev_err(&sdev->sdev_gendev, "%s: failed with err %0x\n", + __func__, ret); + + if (driver_byte(ret) & DRIVER_SENSE) + scsi_print_sense_hdr(sdev, "rpmb: security in", &sshdr); + + return ret; +} + +static int ufshcd_rpmb_cmd_seq(struct device *dev, u8 target, + struct rpmb_cmd *cmds, u32 ncmds) +{ + unsigned long flags; + struct ufs_hba *hba = dev_get_drvdata(dev); + struct scsi_device *sdev; + struct rpmb_cmd *cmd; + u32 len; + u32 i; + int ret; + + spin_lock_irqsave(hba->host->host_lock, flags); + sdev = hba->sdev_ufs_rpmb; + if (sdev) { + ret = scsi_device_get(sdev); + if (!ret && !scsi_device_online(sdev)) { + ret = -ENODEV; + scsi_device_put(sdev); + } + } else { + ret = -ENODEV; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + if (ret) + return ret; + + for (ret = 0, i = 0; i < ncmds && !ret; i++) { + cmd = &cmds[i]; + len = rpmb_ioc_frames_len_jdec(cmd->nframes); + if (cmd->flags & RPMB_F_WRITE) + ret = ufshcd_rpmb_security_out(sdev, target, + cmd->frames, len); + else + ret = ufshcd_rpmb_security_in(sdev, target, + cmd->frames, len); + } + scsi_device_put(sdev); + return ret; +} + +static int ufshcd_rpmb_get_capacity(struct device *dev, u8 target) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + __be64 block_count; + int ret; + + ret = ufshcd_read_unit_desc_param(hba, + UFS_UPIU_RPMB_WLUN, + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT, + (u8 *)&block_count, + sizeof(block_count)); + if (ret) + return ret; + + return be64_to_cpu(block_count) * SZ_512 / SZ_128K; +} + +static struct rpmb_ops ufshcd_rpmb_dev_ops = { + .cmd_seq = ufshcd_rpmb_cmd_seq, + .get_capacity = ufshcd_rpmb_get_capacity, + .type = RPMB_TYPE_UFS, + .auth_method = RPMB_HMAC_ALGO_SHA_256, + +}; + +static inline void ufshcd_rpmb_add(struct ufs_hba *hba, + struct ufs_dev_desc *dev_desc) +{ + struct rpmb_dev *rdev; + u8 rpmb_rw_size = 1; + int ret; + + ufshcd_rpmb_dev_ops.dev_id = kmemdup(dev_desc->serial_no, + dev_desc->serial_no_len, + GFP_KERNEL); + if (ufshcd_rpmb_dev_ops.dev_id) + ufshcd_rpmb_dev_ops.dev_id_len = dev_desc->serial_no_len; + + ret = scsi_device_get(hba->sdev_ufs_rpmb); + if (ret) + goto out_put_dev; + + if (hba->ufs_version >= UFSHCI_VERSION_21) { + ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, + GEOMETRY_DESC_PARAM_RPMB_RW_SIZE, + &rpmb_rw_size, + sizeof(rpmb_rw_size)); + if (ret) + goto out_put_dev; + } + + ufshcd_rpmb_dev_ops.rd_cnt_max = rpmb_rw_size; + ufshcd_rpmb_dev_ops.wr_cnt_max = rpmb_rw_size; + + rdev = rpmb_dev_register(hba->dev, 0, &ufshcd_rpmb_dev_ops); + if (IS_ERR(rdev)) { + dev_warn(hba->dev, "%s: cannot register to rpmb %ld\n", + dev_name(hba->dev), PTR_ERR(rdev)); + goto out_put_dev; + } + + return; + +out_put_dev: + scsi_device_put(hba->sdev_ufs_rpmb); + hba->sdev_ufs_rpmb = NULL; +} + +static inline void ufshcd_rpmb_remove(struct ufs_hba *hba) +{ + unsigned long flags; + + if (!hba->sdev_ufs_rpmb) + return; + + spin_lock_irqsave(hba->host->host_lock, flags); + + rpmb_dev_unregister_by_device(hba->dev, 0); + scsi_device_put(hba->sdev_ufs_rpmb); + hba->sdev_ufs_rpmb = NULL; + + kfree(ufshcd_rpmb_dev_ops.dev_id); + ufshcd_rpmb_dev_ops.dev_id = NULL; + + spin_unlock_irqrestore(hba->host->host_lock, flags); +} + /** * ufshcd_scsi_add_wlus - Adds required W-LUs * @hba: per-adapter instance @@ -6226,6 +6485,8 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) ret = PTR_ERR(sdev_rpmb); goto remove_sdev_ufs_device; } + hba->sdev_ufs_rpmb = sdev_rpmb; + scsi_device_put(sdev_rpmb); sdev_boot = __scsi_add_device(hba->host, 0, 0, @@ -6247,9 +6508,12 @@ static int ufs_get_device_desc(struct ufs_hba *hba, { int err; size_t buff_len; - u8 model_index; + u8 index; u8 *desc_buf; + if (!dev_desc) + return -EINVAL; + buff_len = max_t(size_t, hba->desc_size.dev_desc, QUERY_DESC_MAX_SIZE + 1); desc_buf = kmalloc(buff_len, GFP_KERNEL); @@ -6272,32 +6536,43 @@ static int ufs_get_device_desc(struct ufs_hba *hba, dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1]; - model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; - - /* Zero-pad entire buffer for string termination. */ - memset(desc_buf, 0, buff_len); - - err = ufshcd_read_string_desc(hba, model_index, desc_buf, - QUERY_DESC_MAX_SIZE, true/*ASCII*/); - if (err) { + index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; + err = ufshcd_read_string_desc(hba, index, + &dev_desc->model, SD_ASCII_STD); + if (err < 0) { dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", __func__, err); goto out; } - desc_buf[QUERY_DESC_MAX_SIZE] = '\0'; - strlcpy(dev_desc->model, (desc_buf + QUERY_DESC_HDR_SIZE), - min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET], - MAX_MODEL_LEN)); + index = desc_buf[DEVICE_DESC_PARAM_SN]; + err = ufshcd_read_string_desc(hba, index, &dev_desc->serial_no, SD_RAW); + if (err < 0) { + dev_err(hba->dev, "%s: Failed reading Serial No. err = %d\n", + __func__, err); + goto out; + } - /* Null terminate the model string */ - dev_desc->model[MAX_MODEL_LEN] = '\0'; + /* + * ufshcd_read_string_desc returns size of the string + * reset the error value + */ + err = 0; out: kfree(desc_buf); return err; } +static void ufs_put_device_desc(struct ufs_dev_desc *dev_desc) +{ + kfree(dev_desc->model); + dev_desc->model = NULL; + + kfree(dev_desc->serial_no); + dev_desc->serial_no = NULL; +} + static void ufs_fixup_device_setup(struct ufs_hba *hba, struct ufs_dev_desc *dev_desc) { @@ -6306,8 +6581,9 @@ static void ufs_fixup_device_setup(struct ufs_hba *hba, for (f = ufs_fixups; f->quirk; f++) { if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid || f->card.wmanufacturerid == UFS_ANY_VENDOR) && - (STR_PRFX_EQUAL(f->card.model, dev_desc->model) || - !strcmp(f->card.model, UFS_ANY_MODEL))) + ((dev_desc->model && + STR_PRFX_EQUAL(f->card.model, dev_desc->model)) || + !strcmp(f->card.model, UFS_ANY_MODEL))) hba->dev_quirks |= f->quirk; } } @@ -6590,6 +6866,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) } ufs_fixup_device_setup(hba, &card); + ufshcd_tune_unipro_params(hba); ret = ufshcd_set_vccq_rail_unused(hba, @@ -6638,6 +6915,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) if (ufshcd_scsi_add_wlus(hba)) goto out; + ufshcd_rpmb_add(hba, &card); + /* Initialize devfreq after UFS device is detected */ if (ufshcd_is_clkscaling_supported(hba)) { memcpy(&hba->clk_scaling.saved_pwr_info.info, @@ -6660,6 +6939,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) hba->is_init_prefetch = true; out: + + ufs_put_device_desc(&card); /* * If we failed to initialize the device or the device is not * present, turn off the power/clocks etc. @@ -7859,6 +8140,8 @@ int ufshcd_shutdown(struct ufs_hba *hba) goto out; } + ufshcd_rpmb_remove(hba); + ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM); out: if (ret) @@ -7875,7 +8158,10 @@ EXPORT_SYMBOL(ufshcd_shutdown); */ void ufshcd_remove(struct ufs_hba *hba) { + ufshcd_rpmb_remove(hba); + ufs_sysfs_remove_nodes(hba->dev); + scsi_remove_host(hba->host); /* disable interrupts */ ufshcd_disable_intr(hba, hba->intr_mask); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 33fdd3f281ae8..82b5e7d317f5e 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -458,6 +458,7 @@ struct ufs_stats { * @utmrdl_dma_addr: UTMRDL DMA address * @host: Scsi_Host instance of the driver * @dev: device handle + * @sdev_ufs_rpmb: reference to RPMB device W-LU * @lrb: local reference block * @lrb_in_use: lrb in use * @outstanding_tasks: Bits representing outstanding task requests @@ -522,6 +523,7 @@ struct ufs_hba { * "UFS device" W-LU. */ struct scsi_device *sdev_ufs_device; + struct scsi_device *sdev_ufs_rpmb; enum ufs_dev_pwr_mode curr_dev_pwr_mode; enum uic_link_state uic_link_state; @@ -875,14 +877,17 @@ int ufshcd_read_desc_param(struct ufs_hba *hba, enum desc_idn desc_id, int desc_index, u8 param_offset, - u8 *param_read_buf, + void *param_read_buf, u8 param_size); int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector, u32 *attr_val); int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, enum flag_idn idn, bool *flag_res); -int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, - u8 *buf, u32 size, bool ascii); + +#define SD_ASCII_STD true +#define SD_RAW false +int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, + char **buf, bool ascii); int ufshcd_hold(struct ufs_hba *hba, bool async); void ufshcd_release(struct ufs_hba *hba); diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 0cd947f78b5bf..890b8aaf95e10 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -1202,8 +1202,6 @@ static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter) static void pvscsi_release_resources(struct pvscsi_adapter *adapter) { - pvscsi_shutdown_intr(adapter); - if (adapter->workqueue) destroy_workqueue(adapter->workqueue); @@ -1535,6 +1533,7 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id) out_reset_adapter: ll_adapter_reset(adapter); out_release_resources: + pvscsi_shutdown_intr(adapter); pvscsi_release_resources(adapter); scsi_host_put(host); out_disable_device: @@ -1543,6 +1542,7 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id) return error; out_release_resources_and_disable: + pvscsi_shutdown_intr(adapter); pvscsi_release_resources(adapter); goto out_disable_device; } diff --git a/drivers/sdw/Kconfig b/drivers/sdw/Kconfig new file mode 100644 index 0000000000000..7e5a57f1f6d26 --- /dev/null +++ b/drivers/sdw/Kconfig @@ -0,0 +1,19 @@ +menuconfig SDW + tristate "SoundWire bus support" + select CRC8 + depends on X86 + help + SoundWire interface is typically used for transporting data + related to audio functions. +menuconfig SDW_CNL + tristate "Intel SoundWire master controller support" + depends on SDW && X86 + help + Intel SoundWire master controller driver +menuconfig SDW_MAXIM_SLAVE + bool "SoundWire Slave for the Intel CNL FPGA" + depends on SDW && X86 + help + SoundWire Slave on FPGA platform for Intel CNL IP + Mostly N for all the cases other than CNL Slave FPGA + diff --git a/drivers/sdw/Makefile b/drivers/sdw/Makefile new file mode 100644 index 0000000000000..e2ba440f4ef27 --- /dev/null +++ b/drivers/sdw/Makefile @@ -0,0 +1,5 @@ +sdw_bus-objs := sdw.o sdw_bwcalc.o sdw_utils.o + +obj-$(CONFIG_SDW) += sdw_bus.o +obj-$(CONFIG_SDW_CNL) += sdw_cnl.o +obj-$(CONFIG_SDW_MAXIM_SLAVE) += sdw_maxim.o diff --git a/drivers/sdw/sdw.c b/drivers/sdw/sdw.c new file mode 100644 index 0000000000000..aefd25d4e3937 --- /dev/null +++ b/drivers/sdw/sdw.c @@ -0,0 +1,3459 @@ +/* + * sdw.c - SoundWire Bus driver implementation + * + * Copyright (C) 2015-2016 Intel Corp + * Author: Hardik T Shah + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "sdw_priv.h" + +#define sdw_slave_attr_gr NULL +#define sdw_mstr_attr_gr NULL + +#define CREATE_TRACE_POINTS +#include + +/* Global instance handling all the SoundWire buses */ +struct sdw_core sdw_core; + +static void sdw_slave_release(struct device *dev) +{ + kfree(to_sdw_slave(dev)); +} + +static void sdw_mstr_release(struct device *dev) +{ + struct sdw_master *mstr = to_sdw_master(dev); + + complete(&mstr->slv_released); +} + +static struct device_type sdw_slv_type = { + .groups = sdw_slave_attr_gr, + .release = sdw_slave_release, +}; + +static struct device_type sdw_mstr_type = { + .groups = sdw_mstr_attr_gr, + .release = sdw_mstr_release, +}; +/** + * sdw_slave_verify - return parameter as sdw_slv, or NULL + * @dev: device, probably from some driver model iterator + * + * When traversing the driver model tree, perhaps using driver model + * iterators like @device_for_each_child(), you can't assume very much + * about the nodes you find. Use this function to avoid oopses caused + * by wrongly treating some non-SDW device as an sdw_slv. + */ +struct sdw_slv *sdw_slave_verify(struct device *dev) +{ + return (dev->type == &sdw_slv_type) + ? to_sdw_slave(dev) + : NULL; +} + +/** + * sdw_mstr_verify - return parameter as sdw_master, or NULL + * @dev: device, probably from some driver model iterator + * + * When traversing the driver model tree, perhaps using driver model + * iterators like @device_for_each_child(), you can't assume very much + * about the nodes you find. Use this function to avoid oopses caused + * by wrongly treating some non-SDW device as an sdw_slv. + */ +struct sdw_master *sdw_mstr_verify(struct device *dev) +{ + return (dev->type == &sdw_mstr_type) + ? to_sdw_master(dev) + : NULL; +} + +static const struct sdw_slv_id *sdw_match_slave(const struct sdw_slv_id *id, + const struct sdw_slv *sdw_slv) +{ + while (id->name[0]) { + if (strncmp(sdw_slv->name, id->name, SOUNDWIRE_NAME_SIZE) == 0) + return id; + id++; + } + return NULL; +} + +static const struct sdw_master_id *sdw_match_master( + const struct sdw_master_id *id, + const struct sdw_master *sdw_mstr) +{ + if (!id) + return NULL; + while (id->name[0]) { + if (strncmp(sdw_mstr->name, id->name, SOUNDWIRE_NAME_SIZE) == 0) + return id; + id++; + } + return NULL; +} + +static int sdw_slv_match(struct device *dev, struct device_driver *driver) +{ + struct sdw_slv *sdw_slv; + struct sdw_slave_driver *drv = to_sdw_slave_driver(driver); + int ret = 0; + + /* Check if driver is slave type or not, both master and slave + * driver has first field as driver_type, so if driver is not + * of slave type return + */ + if (drv->driver_type != SDW_DRIVER_TYPE_SLAVE) + return ret; + + sdw_slv = to_sdw_slave(dev); + + if (drv->id_table) + ret = (sdw_match_slave(drv->id_table, sdw_slv) != NULL); + + if (driver->name && !ret) + ret = (strncmp(sdw_slv->name, driver->name, SOUNDWIRE_NAME_SIZE) + == 0); + if (ret) + sdw_slv->driver = drv; + return ret; +} +static int sdw_mstr_match(struct device *dev, struct device_driver *driver) +{ + struct sdw_master *sdw_mstr; + struct sdw_mstr_driver *drv = to_sdw_mstr_driver(driver); + int ret = 0; + + /* Check if driver is slave type or not, both master and slave + * driver has first field as driver_type, so if driver is not + * of slave type return + */ + if (drv->driver_type != SDW_DRIVER_TYPE_MASTER) + return ret; + + sdw_mstr = to_sdw_master(dev); + + if (drv->id_table) + ret = (sdw_match_master(drv->id_table, sdw_mstr) != NULL); + + if (driver->name) + ret = (strncmp(sdw_mstr->name, driver->name, + SOUNDWIRE_NAME_SIZE) == 0); + if (ret) + sdw_mstr->driver = drv; + + return ret; +} + +static int sdw_mstr_probe(struct device *dev) +{ + const struct sdw_mstr_driver *sdrv = to_sdw_mstr_driver(dev->driver); + struct sdw_master *mstr = to_sdw_master(dev); + int ret = 0; + + if (!sdrv->probe) + return -ENODEV; + ret = dev_pm_domain_attach(dev, true); + if (ret != -EPROBE_DEFER) { + ret = sdrv->probe(mstr, sdw_match_master(sdrv->id_table, mstr)); + if (ret) + dev_pm_domain_detach(dev, true); + } + return ret; +} + +static int sdw_slv_probe(struct device *dev) +{ + const struct sdw_slave_driver *sdrv = to_sdw_slave_driver(dev->driver); + struct sdw_slv *sdwslv = to_sdw_slave(dev); + int ret = 0; + + if (!sdrv->probe) + return -ENODEV; + ret = dev_pm_domain_attach(dev, true); + if (ret != -EPROBE_DEFER) { + ret = sdrv->probe(sdwslv, sdw_match_slave(sdrv->id_table, + sdwslv)); + return 0; + if (ret) + dev_pm_domain_detach(dev, true); + } + return ret; +} + + +int sdw_slave_get_bus_params(struct sdw_slv *sdw_slv, + struct sdw_bus_params *params) +{ + struct sdw_bus *bus; + struct sdw_master *mstr = sdw_slv->mstr; + + list_for_each_entry(bus, &sdw_core.bus_list, bus_node) { + if (bus->mstr == mstr) + break; + } + if (!bus) + return -EFAULT; + + params->num_rows = bus->row; + params->num_cols = bus->col; + params->bus_clk_freq = bus->clk_freq >> 1; + params->bank = bus->active_bank; + + return 0; +} +EXPORT_SYMBOL(sdw_slave_get_bus_params); + +static int sdw_mstr_remove(struct device *dev) +{ + const struct sdw_mstr_driver *sdrv = to_sdw_mstr_driver(dev->driver); + int ret = 0; + + if (sdrv->remove) + ret = sdrv->remove(to_sdw_master(dev)); + else + return -ENODEV; + + dev_pm_domain_detach(dev, true); + return ret; + +} + +static int sdw_slv_remove(struct device *dev) +{ + const struct sdw_slave_driver *sdrv = to_sdw_slave_driver(dev->driver); + int ret = 0; + + if (sdrv->remove) + ret = sdrv->remove(to_sdw_slave(dev)); + else + return -ENODEV; + + dev_pm_domain_detach(dev, true); + return ret; +} + +static void sdw_slv_shutdown(struct device *dev) +{ + const struct sdw_slave_driver *sdrv = to_sdw_slave_driver(dev->driver); + + if (sdrv->shutdown) + sdrv->shutdown(to_sdw_slave(dev)); +} + +static void sdw_mstr_shutdown(struct device *dev) +{ + const struct sdw_mstr_driver *sdrv = to_sdw_mstr_driver(dev->driver); + struct sdw_master *mstr = to_sdw_master(dev); + + if (sdrv->shutdown) + sdrv->shutdown(mstr); +} + +static void sdw_shutdown(struct device *dev) +{ + struct sdw_slv *sdw_slv; + struct sdw_master *sdw_mstr; + + sdw_slv = sdw_slave_verify(dev); + sdw_mstr = sdw_mstr_verify(dev); + if (sdw_slv) + sdw_slv_shutdown(dev); + else if (sdw_mstr) + sdw_mstr_shutdown(dev); +} + +static int sdw_remove(struct device *dev) +{ + struct sdw_slv *sdw_slv; + struct sdw_master *sdw_mstr; + + sdw_slv = sdw_slave_verify(dev); + sdw_mstr = sdw_mstr_verify(dev); + if (sdw_slv) + return sdw_slv_remove(dev); + else if (sdw_mstr) + return sdw_mstr_remove(dev); + + return 0; +} + +static int sdw_probe(struct device *dev) +{ + + struct sdw_slv *sdw_slv; + struct sdw_master *sdw_mstr; + + sdw_slv = sdw_slave_verify(dev); + sdw_mstr = sdw_mstr_verify(dev); + if (sdw_slv) + return sdw_slv_probe(dev); + else if (sdw_mstr) + return sdw_mstr_probe(dev); + + return -ENODEV; + +} + +static int sdw_match(struct device *dev, struct device_driver *driver) +{ + struct sdw_slv *sdw_slv; + struct sdw_master *sdw_mstr; + + sdw_slv = sdw_slave_verify(dev); + sdw_mstr = sdw_mstr_verify(dev); + if (sdw_slv) + return sdw_slv_match(dev, driver); + else if (sdw_mstr) + return sdw_mstr_match(dev, driver); + return 0; + +} + +#ifdef CONFIG_PM_SLEEP +static int sdw_legacy_suspend(struct device *dev, pm_message_t mesg) +{ + struct sdw_slv *sdw_slv = NULL; + struct sdw_slave_driver *driver; + + if (dev->type == &sdw_slv_type) + sdw_slv = to_sdw_slave(dev); + + if (!sdw_slv || !dev->driver) + return 0; + + driver = to_sdw_slave_driver(dev->driver); + if (!driver->suspend) + return 0; + + return driver->suspend(sdw_slv, mesg); +} + +static int sdw_legacy_resume(struct device *dev) +{ + struct sdw_slv *sdw_slv = NULL; + struct sdw_slave_driver *driver; + + if (dev->type == &sdw_slv_type) + sdw_slv = to_sdw_slave(dev); + + if (!sdw_slv || !dev->driver) + return 0; + + driver = to_sdw_slave_driver(dev->driver); + if (!driver->resume) + return 0; + + return driver->resume(sdw_slv); +} + +static int sdw_pm_suspend(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + if (pm) + return pm_generic_suspend(dev); + else + return sdw_legacy_suspend(dev, PMSG_SUSPEND); +} + +static int sdw_pm_resume(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + if (pm) + return pm_generic_resume(dev); + else + return sdw_legacy_resume(dev); +} + +#else +#define sdw_pm_suspend NULL +#define sdw_pm_resume NULL +#endif /* CONFIG_PM_SLEEP */ + +static const struct dev_pm_ops soundwire_pm = { + .suspend = sdw_pm_suspend, + .resume = sdw_pm_resume, +#ifdef CONFIG_PM + .runtime_suspend = pm_generic_runtime_suspend, + .runtime_resume = pm_generic_runtime_resume, +#endif +}; + +struct bus_type sdwint_bus_type = { + .name = "soundwire", + .match = sdw_match, + .probe = sdw_probe, + .remove = sdw_remove, + .shutdown = sdw_shutdown, + .pm = &soundwire_pm, +}; +EXPORT_SYMBOL_GPL(sdwint_bus_type); + +struct device sdw_slv = { + .init_name = "soundwire", +}; + +static struct static_key sdw_trace_msg = STATIC_KEY_INIT_FALSE; + +int sdw_transfer_trace_reg(void) +{ + static_key_slow_inc(&sdw_trace_msg); + + return 0; +} + +void sdw_transfer_trace_unreg(void) +{ + static_key_slow_dec(&sdw_trace_msg); +} + +/** + * sdw_lock_mstr - Get exclusive access to an SDW bus segment + * @mstr: Target SDW bus segment + */ +void sdw_lock_mstr(struct sdw_master *mstr) +{ + rt_mutex_lock(&mstr->bus_lock); +} + +/** + * sdw_trylock_mstr - Try to get exclusive access to an SDW bus segment + * @mstr: Target SDW bus segment + */ +int sdw_trylock_mstr(struct sdw_master *mstr) +{ + return rt_mutex_trylock(&mstr->bus_lock); +} + + +/** + * sdw_unlock_mstr - Release exclusive access to an SDW bus segment + * @mstr: Target SDW bus segment + */ +void sdw_unlock_mstr(struct sdw_master *mstr) +{ + rt_mutex_unlock(&mstr->bus_lock); +} + + +static int sdw_assign_slv_number(struct sdw_master *mstr, + struct sdw_msg *msg) +{ + int i, j, ret = -1; + + sdw_lock_mstr(mstr); + for (i = 1; i <= SOUNDWIRE_MAX_DEVICES; i++) { + if (mstr->sdw_addr[i].assigned == true) + continue; + mstr->sdw_addr[i].assigned = true; + for (j = 0; j < 6; j++) + mstr->sdw_addr[i].dev_id[j] = msg->buf[j]; + ret = i; + break; + } + sdw_unlock_mstr(mstr); + return ret; +} + +static int sdw_program_slv_address(struct sdw_master *mstr, + u8 slave_addr) +{ + struct sdw_msg msg; + u8 buf[1] = {0}; + int ret; + + buf[0] = slave_addr; + msg.ssp_tag = 0; + msg.flag = SDW_MSG_FLAG_WRITE; + msg.addr = SDW_SCP_DEVNUMBER; + msg.len = 1; + msg.buf = buf; + msg.slave_addr = 0x0; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + + ret = sdw_slave_transfer(mstr, &msg, 1); + if (ret != 1) { + dev_err(&mstr->dev, "Program Slave address change\n"); + return ret; + } + return 0; +} + +static int sdw_find_slave(struct sdw_master *mstr, struct sdw_msg + *msg, bool *found) +{ + struct sdw_slv_addr *sdw_addr; + int ret = 0, i, comparison; + *found = false; + + sdw_lock_mstr(mstr); + sdw_addr = mstr->sdw_addr; + for (i = 1; i <= SOUNDWIRE_MAX_DEVICES; i++) { + comparison = memcmp(sdw_addr[i].dev_id, msg->buf, + SDW_NUM_DEV_ID_REGISTERS); + if ((!comparison) && (sdw_addr[i].assigned == true)) { + *found = true; + break; + } + } + sdw_unlock_mstr(mstr); + if (*found == true) + ret = sdw_program_slv_address(mstr, sdw_addr[i].slv_number); + return ret; +} + +static void sdw_free_slv_number(struct sdw_master *mstr, + int slv_number) +{ + int i; + + sdw_lock_mstr(mstr); + for (i = 0; i <= SOUNDWIRE_MAX_DEVICES; i++) { + if (slv_number == mstr->sdw_addr[i].slv_number) { + mstr->sdw_addr[slv_number].assigned = false; + memset(&mstr->sdw_addr[slv_number].dev_id[0], 0x0, 6); + } + } + sdw_unlock_mstr(mstr); +} + + +int count; +static int sdw_register_slave(struct sdw_master *mstr) +{ + int ret = 0, i, ports; + struct sdw_msg msg; + u8 buf[6] = {0}; + struct sdw_slv *sdw_slv; + int slv_number = -1; + bool found = false; + + + msg.ssp_tag = 0; + msg.flag = SDW_MSG_FLAG_READ; + msg.addr = SDW_SCP_DEVID_0; + msg.len = 6; + msg.buf = buf; + msg.slave_addr = 0x0; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + + while ((ret = (sdw_slave_transfer(mstr, &msg, 1)) == 1)) { + ret = sdw_find_slave(mstr, &msg, &found); + if (found && !ret) { + dev_info(&mstr->dev, "Slave already registered\n"); + continue; + /* Even if slave registering fails we continue for other + * slave status, but we flag error + */ + } else if (ret) { + dev_err(&mstr->dev, "Re-registering slave failed"); + continue; + } + slv_number = sdw_assign_slv_number(mstr, &msg); + if (slv_number <= 0) { + dev_err(&mstr->dev, "Failed to assign slv_number\n"); + ret = -EINVAL; + goto slv_number_assign_fail; + } + sdw_slv = kzalloc(sizeof(struct sdw_slv), GFP_KERNEL); + if (!sdw_slv) { + ret = -ENOMEM; + goto mem_alloc_failed; + } + sdw_slv->mstr = mstr; + sdw_slv->dev.parent = &sdw_slv->mstr->dev; + sdw_slv->dev.bus = &sdwint_bus_type; + sdw_slv->dev.type = &sdw_slv_type; + sdw_slv->slv_addr = &mstr->sdw_addr[slv_number]; + sdw_slv->slv_addr->slave = sdw_slv; + /* We have assigned new slave number, so its not present + * till it again attaches to bus with this new + * slave address + */ + sdw_slv->slv_addr->status = SDW_SLAVE_STAT_NOT_PRESENT; + for (i = 0; i < 6; i++) + sdw_slv->dev_id[i] = msg.buf[i]; + dev_dbg(&mstr->dev, "SDW slave slave id found with values\n"); + dev_dbg(&mstr->dev, "dev_id0 to dev_id5: %x:%x:%x:%x:%x:%x\n", + msg.buf[0], msg.buf[1], msg.buf[2], + msg.buf[3], msg.buf[4], msg.buf[5]); + dev_dbg(&mstr->dev, "Slave number assigned is %x\n", slv_number); + /* TODO: Fill the sdw_slv structre from ACPI */ + ports = sdw_slv->sdw_slv_cap.num_of_sdw_ports; + /* Add 1 for port 0 for simplicity */ + ports++; + sdw_slv->port_ready = + kzalloc((sizeof(struct completion) * ports), + GFP_KERNEL); + if (!sdw_slv->port_ready) { + ret = -ENOMEM; + goto port_alloc_mem_failed; + } + for (i = 0; i < ports; i++) + init_completion(&sdw_slv->port_ready[i]); + + dev_set_name(&sdw_slv->dev, "sdw-slave%d-%02x:%02x:%02x:%02x:%02x:%02x", + sdw_master_id(mstr), + sdw_slv->dev_id[0], + sdw_slv->dev_id[1], + sdw_slv->dev_id[2], + sdw_slv->dev_id[3], + sdw_slv->dev_id[4], + sdw_slv->dev_id[5] + mstr->nr); + /* Set name based on dev_id. This will be + * compared to load driver + */ + sprintf(sdw_slv->name, "%02x:%02x:%02x:%02x:%02x:%02x", + sdw_slv->dev_id[0], + sdw_slv->dev_id[1], + sdw_slv->dev_id[2], + sdw_slv->dev_id[3], + sdw_slv->dev_id[4], + sdw_slv->dev_id[5] + mstr->nr); + ret = device_register(&sdw_slv->dev); + if (ret) { + dev_err(&mstr->dev, "Register slave failed\n"); + goto reg_slv_failed; + } + ret = sdw_program_slv_address(mstr, slv_number); + if (ret) { + dev_err(&mstr->dev, "Programming slave address failed\n"); + goto program_slv_failed; + } + dev_dbg(&mstr->dev, "Slave registered with bus id %s\n", + dev_name(&sdw_slv->dev)); + sdw_slv->slv_number = slv_number; + mstr->num_slv++; + sdw_lock_mstr(mstr); + list_add_tail(&sdw_slv->node, &mstr->slv_list); + sdw_unlock_mstr(mstr); + + } + count++; + return 0; +program_slv_failed: + device_unregister(&sdw_slv->dev); +port_alloc_mem_failed: +reg_slv_failed: + kfree(sdw_slv); +mem_alloc_failed: + sdw_free_slv_number(mstr, slv_number); +slv_number_assign_fail: + return ret; + +} + +/** + * __sdw_transfer - unlocked flavor of sdw_slave_transfer + * @mstr: Handle to SDW bus + * @msg: One or more messages to execute before STOP is issued to + * terminate the operation; each message begins with a START. + * @num: Number of messages to be executed. + * + * Returns negative errno, else the number of messages executed. + * + * Adapter lock must be held when calling this function. No debug logging + * takes place. mstr->algo->master_xfer existence isn't checked. + */ +int __sdw_transfer(struct sdw_master *mstr, struct sdw_msg *msg, int num, + struct sdw_async_xfer_data *async_data) +{ + unsigned long orig_jiffies; + int ret = 0, try, i; + struct sdw_slv_capabilities *slv_cap; + int program_scp_addr_page; + int addr = msg->slave_addr; + + /* sdw_trace_msg gets enabled when tracepoint sdw_slave_transfer gets + * enabled. This is an efficient way of keeping the for-loop from + * being executed when not needed. + */ + if (static_key_false(&sdw_trace_msg)) { + int i; + + for (i = 0; i < num; i++) + if (msg[i].flag & SDW_MSG_FLAG_READ) + trace_sdw_read(mstr, &msg[i], i); + else + trace_sdw_write(mstr, &msg[i], i); + } + orig_jiffies = jiffies; + for (i = 0; i < num; i++) { + for (ret = 0, try = 0; try <= mstr->retries; try++) { + if (msg->slave_addr == 0) + /* If we are enumerating slave address 0, + * we dont program scp, it should be set + * default to 0 + */ + program_scp_addr_page = 0; + else if (msg->slave_addr == 15) + /* If we are broadcasting, we need to program + * the SCP address as some slaves will be + * supporting it while some wont be. + * So it should be programmed + */ + program_scp_addr_page = 1; + + else { + slv_cap = + &mstr->sdw_addr[addr].slave->sdw_slv_cap; + program_scp_addr_page = + slv_cap->paging_supported; + } + /* Call async or sync handler based on call */ + if (!async_data) + ret = mstr->driver->mstr_ops->xfer_msg(mstr, + msg, program_scp_addr_page); + /* Async transfer is not mandatory to support + * It requires only if stream is split across the + * masters, where bus driver need to send the commands + * for bank switch individually and wait for them + * to complete out side of the master context + */ + else if (mstr->driver->mstr_ops->xfer_msg_async && + async_data) + ret = mstr->driver->mstr_ops->xfer_msg_async( + mstr, msg, + program_scp_addr_page, + async_data); + else + return -ENOTSUPP; + if (ret != -EAGAIN) + break; + if (time_after(jiffies, + orig_jiffies + mstr->timeout)) + break; + } + } + + if (static_key_false(&sdw_trace_msg)) { + int i; + + for (i = 0; i < msg->len; i++) + if (msg[i].flag & SDW_MSG_FLAG_READ) + trace_sdw_reply(mstr, &msg[i], i); + trace_sdw_result(mstr, i, ret); + } + if (!ret) + return i; + return ret; +} +EXPORT_SYMBOL_GPL(__sdw_transfer); + +/* NO PM version of slave transfer. Called from power management APIs + * to avoid dead locks. + */ +static int sdw_slave_transfer_nopm(struct sdw_master *mstr, struct sdw_msg *msg, + int num) +{ + int ret; + + if (mstr->driver->mstr_ops->xfer_msg) { + ret = __sdw_transfer(mstr, msg, num, NULL); + return ret; + } + dev_dbg(&mstr->dev, "SDW level transfers not supported\n"); + return -EOPNOTSUPP; +} + +int sdw_slave_transfer_async(struct sdw_master *mstr, struct sdw_msg *msg, + int num, + struct sdw_async_xfer_data *async_data) +{ + int ret; + /* Currently we support only message asynchronously, This is mainly + * used to do bank switch for multiple controllers + */ + if (num != 1) + return -EINVAL; + if (!(mstr->driver->mstr_ops->xfer_msg)) { + dev_dbg(&mstr->dev, "SDW level transfers not supported\n"); + return -EOPNOTSUPP; + } + pm_runtime_get_sync(&mstr->dev); + ret = __sdw_transfer(mstr, msg, num, async_data); + pm_runtime_mark_last_busy(&mstr->dev); + pm_runtime_put_sync_autosuspend(&mstr->dev); + return ret; +} + +/** + * sdw_slave_transfer: Transfer message between slave and mstr on the bus. + * @mstr: mstr master which will transfer the message + * @msg: Array of messages to be transferred. + * @num: Number of messages to be transferred, messages include read and write + * messages, but not the ping messages. + */ +int sdw_slave_transfer(struct sdw_master *mstr, struct sdw_msg *msg, int num) +{ + int ret; + + /* REVISIT the fault reporting model here is weak: + * + * - When we get an error after receiving N bytes from a slave, + * there is no way to report "N". + * + * - When we get a NAK after transmitting N bytes to a slave, + * there is no way to report "N" ... or to let the mstr + * continue executing the rest of this combined message, if + * that's the appropriate response. + * + * - When for example "num" is two and we successfully complete + * the first message but get an error part way through the + * second, it's unclear whether that should be reported as + * one (discarding status on the second message) or errno + * (discarding status on the first one). + */ + if (!(mstr->driver->mstr_ops->xfer_msg)) { + dev_dbg(&mstr->dev, "SDW level transfers not supported\n"); + return -EOPNOTSUPP; + } + pm_runtime_get_sync(&mstr->dev); + if (in_atomic() || irqs_disabled()) { + ret = sdw_trylock_mstr(mstr); + if (!ret) { + /* SDW activity is ongoing. */ + ret = -EAGAIN; + goto out; + } + } else { + sdw_lock_mstr(mstr); + } + ret = __sdw_transfer(mstr, msg, num, NULL); + sdw_unlock_mstr(mstr); +out: + pm_runtime_mark_last_busy(&mstr->dev); + pm_runtime_put_sync_autosuspend(&mstr->dev); + return ret; +} +EXPORT_SYMBOL_GPL(sdw_slave_transfer); + +static int sdw_handle_dp0_interrupts(struct sdw_master *mstr, + struct sdw_slv *sdw_slv, u8 *status) +{ + int ret = 0; + struct sdw_msg rd_msg, wr_msg; + int impl_def_mask = 0; + u8 rbuf[1] = {0}, wbuf[1] = {0}; + + /* Create message for clearing the interrupts */ + wr_msg.ssp_tag = 0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.addr = SDW_DP0_INTCLEAR; + wr_msg.len = 1; + wr_msg.buf = wbuf; + wr_msg.slave_addr = sdw_slv->slv_number; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + /* Create message for reading the interrupts for DP0 interrupts*/ + rd_msg.ssp_tag = 0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.addr = SDW_DP0_INTSTAT; + rd_msg.len = 1; + rd_msg.buf = rbuf; + rd_msg.slave_addr = sdw_slv->slv_number; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + ret = sdw_slave_transfer(mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr->dev, "Interrupt status read failed for slave %x\n", sdw_slv->slv_number); + goto out; + } + if (rd_msg.buf[0] & SDW_DP0_INTSTAT_TEST_FAIL_MASK) { + dev_err(&mstr->dev, "Test fail for slave %d port 0\n", + sdw_slv->slv_number); + wr_msg.buf[0] |= SDW_DP0_INTCLEAR_TEST_FAIL_MASK; + } + if (rd_msg.buf[0] & SDW_DP0_INTSTAT_PORT_READY_MASK) { + complete(&sdw_slv->port_ready[0]); + wr_msg.buf[0] |= SDW_DP0_INTCLEAR_PORT_READY_MASK; + } + if (rd_msg.buf[0] & SDW_DP0_INTMASK_BRA_FAILURE_MASK) { + /* TODO: Handle BRA failure */ + dev_err(&mstr->dev, "BRA failed for slave %d\n", + sdw_slv->slv_number); + wr_msg.buf[0] |= SDW_DP0_INTCLEAR_BRA_FAILURE_MASK; + } + impl_def_mask = SDW_DP0_INTSTAT_IMPDEF1_MASK | + SDW_DP0_INTSTAT_IMPDEF2_MASK | + SDW_DP0_INTSTAT_IMPDEF3_MASK; + if (rd_msg.buf[0] & impl_def_mask) { + wr_msg.buf[0] |= impl_def_mask; + *status = wr_msg.buf[0]; + } + ret = sdw_slave_transfer(mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr->dev, "Register transfer failed\n"); + goto out; + } +out: + return ret; + +} + +static int sdw_handle_port_interrupt(struct sdw_master *mstr, + struct sdw_slv *sdw_slv, int port_num, + u8 *status) +{ + int ret = 0; + struct sdw_msg rd_msg, wr_msg; + u8 rbuf[1], wbuf[1]; + int impl_def_mask = 0; + +/* + * Handle the Data port0 interrupt separately since the interrupt + * mask and stat register is different than other DPn registers + */ + if (port_num == 0 && sdw_slv->sdw_slv_cap.sdw_dp0_supported) + return sdw_handle_dp0_interrupts(mstr, sdw_slv, status); + + /* Create message for reading the port interrupts */ + wr_msg.ssp_tag = 0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.addr = SDW_DPN_INTCLEAR + + (SDW_NUM_DATA_PORT_REGISTERS * port_num); + wr_msg.len = 1; + wr_msg.buf = wbuf; + wr_msg.slave_addr = sdw_slv->slv_number; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + rd_msg.ssp_tag = 0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.addr = SDW_DPN_INTSTAT + + (SDW_NUM_DATA_PORT_REGISTERS * port_num); + rd_msg.len = 1; + rd_msg.buf = rbuf; + rd_msg.slave_addr = sdw_slv->slv_number; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + ret = sdw_slave_transfer(mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr->dev, "Port Status read failed for slv %x port %x\n", + sdw_slv->slv_number, port_num); + goto out; + } + if (rd_msg.buf[0] & SDW_DPN_INTSTAT_TEST_FAIL_MASK) { + dev_err(&mstr->dev, "Test fail for slave %x port %x\n", + sdw_slv->slv_number, port_num); + wr_msg.buf[0] |= SDW_DPN_INTCLEAR_TEST_FAIL_MASK; + } + if (rd_msg.buf[0] & SDW_DPN_INTSTAT_PORT_READY_MASK) { + complete(&sdw_slv->port_ready[port_num]); + wr_msg.buf[0] |= SDW_DPN_INTCLEAR_PORT_READY_MASK; + } + impl_def_mask = SDW_DPN_INTSTAT_IMPDEF1_MASK | + SDW_DPN_INTSTAT_IMPDEF2_MASK | + SDW_DPN_INTSTAT_IMPDEF3_MASK; + if (rd_msg.buf[0] & impl_def_mask) { + /* TODO: Handle implementation defined mask ready */ + wr_msg.buf[0] |= impl_def_mask; + *status = wr_msg.buf[0]; + } + /* Clear and Ack the interrupt */ + ret = sdw_slave_transfer(mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr->dev, "Register transfer failed\n"); + goto out; + } +out: + return ret; + +} +static int sdw_handle_slave_alerts(struct sdw_master *mstr, + struct sdw_slv *sdw_slv) +{ + struct sdw_msg rd_msg[3], wr_msg; + u8 rbuf[3], wbuf[1]; + int i, ret = 0; + int cs_port_mask, cs_port_register, cs_port_start, cs_ports; + struct sdw_impl_def_intr_stat *intr_status; + struct sdw_portn_intr_stat *portn_stat; + u8 port_status[15] = {0}; + u8 control_port_stat = 0; + + + /* Read Instat 1, Instat 2 and Instat 3 registers */ + rd_msg[0].ssp_tag = 0x0; + rd_msg[0].flag = SDW_MSG_FLAG_READ; + rd_msg[0].addr = SDW_SCP_INTSTAT_1; + rd_msg[0].len = 1; + rd_msg[0].buf = &rbuf[0]; + rd_msg[0].slave_addr = sdw_slv->slv_number; + rd_msg[0].addr_page1 = 0x0; + rd_msg[0].addr_page2 = 0x0; + + rd_msg[1].ssp_tag = 0x0; + rd_msg[1].flag = SDW_MSG_FLAG_READ; + rd_msg[1].addr = SDW_SCP_INTSTAT2; + rd_msg[1].len = 1; + rd_msg[1].buf = &rbuf[1]; + rd_msg[1].slave_addr = sdw_slv->slv_number; + rd_msg[1].addr_page1 = 0x0; + rd_msg[1].addr_page2 = 0x0; + + rd_msg[2].ssp_tag = 0x0; + rd_msg[2].flag = SDW_MSG_FLAG_READ; + rd_msg[2].addr = SDW_SCP_INTSTAT3; + rd_msg[2].len = 1; + rd_msg[2].buf = &rbuf[2]; + rd_msg[2].slave_addr = sdw_slv->slv_number; + rd_msg[2].addr_page1 = 0x0; + rd_msg[2].addr_page2 = 0x0; + + wr_msg.ssp_tag = 0x0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.addr = SDW_SCP_INTCLEAR1; + wr_msg.len = 1; + wr_msg.buf = &wbuf[0]; + wr_msg.slave_addr = sdw_slv->slv_number; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + ret = sdw_slave_transfer(mstr, rd_msg, 3); + if (ret != 3) { + ret = -EINVAL; + dev_err(&mstr->dev, "Reading of register failed\n"); + goto out; + } + /* First handle parity and bus clash interrupts */ + if (rd_msg[0].buf[0] & SDW_SCP_INTSTAT1_PARITY_MASK) { + dev_err(&mstr->dev, "Parity error detected\n"); + wr_msg.buf[0] |= SDW_SCP_INTCLEAR1_PARITY_MASK; + } + /* Handle bus errors */ + if (rd_msg[0].buf[0] & SDW_SCP_INTSTAT1_BUS_CLASH_MASK) { + dev_err(&mstr->dev, "Bus clash error detected\n"); + wr_msg.buf[0] |= SDW_SCP_INTCLEAR1_BUS_CLASH_MASK; + } + /* Handle implementation defined mask */ + if (rd_msg[0].buf[0] & SDW_SCP_INTSTAT1_IMPL_DEF_MASK) { + wr_msg.buf[0] |= SDW_SCP_INTCLEAR1_IMPL_DEF_MASK; + control_port_stat = (rd_msg[0].buf[0] & + SDW_SCP_INTSTAT1_IMPL_DEF_MASK); + } + + /* Handle Cascaded Port interrupts from Instat_1 registers */ + + /* Number of port status bits in this register */ + cs_ports = 4; + /* Port number starts at in this register */ + cs_port_start = 0; + /* Bit mask for the starting port intr status */ + cs_port_mask = 0x08; + /* Bit mask for the starting port intr status */ + cs_port_register = 0; + + /* Look for cascaded port interrupts, if found handle port + * interrupts. Do this for all the Int_stat registers. + */ + for (i = cs_port_start; i < cs_port_start + cs_ports && + i <= sdw_slv->sdw_slv_cap.num_of_sdw_ports; i++) { + if (rd_msg[cs_port_register].buf[0] & cs_port_mask) { + ret += sdw_handle_port_interrupt(mstr, + sdw_slv, i, &port_status[i]); + } + cs_port_mask = cs_port_mask << 1; + } + + /* + * Handle cascaded interrupts from instat_2 register, + * if no cascaded interrupt from SCP2 cascade move to SCP3 + */ + if (!(rd_msg[0].buf[0] & SDW_SCP_INTSTAT1_SCP2_CASCADE_MASK)) + goto handle_instat_3_register; + + + cs_ports = 7; + cs_port_start = 4; + cs_port_mask = 0x1; + cs_port_register = 1; + for (i = cs_port_start; i < cs_port_start + cs_ports && + i <= sdw_slv->sdw_slv_cap.num_of_sdw_ports; i++) { + + if (rd_msg[cs_port_register].buf[0] & cs_port_mask) { + + ret += sdw_handle_port_interrupt(mstr, + sdw_slv, i, &port_status[i]); + } + cs_port_mask = cs_port_mask << 1; + } + + /* + * Handle cascaded interrupts from instat_2 register, + * if no cascaded interrupt from SCP2 cascade move to impl_def intrs + */ +handle_instat_3_register: + if (!(rd_msg[1].buf[0] & SDW_SCP_INTSTAT2_SCP3_CASCADE_MASK)) + goto handle_impl_def_interrupts; + + cs_ports = 4; + cs_port_start = 11; + cs_port_mask = 0x1; + cs_port_register = 2; + + for (i = cs_port_start; i < cs_port_start + cs_ports && + i <= sdw_slv->sdw_slv_cap.num_of_sdw_ports; i++) { + + if (rd_msg[cs_port_register].buf[0] & cs_port_mask) { + + ret += sdw_handle_port_interrupt(mstr, + sdw_slv, i, &port_status[i]); + } + cs_port_mask = cs_port_mask << 1; + } + +handle_impl_def_interrupts: + + /* + * If slave has not registered for implementation defined + * interrupts, dont read it. + */ + if (!sdw_slv->driver->handle_impl_def_interrupts) + goto ack_interrupts; + + intr_status = kzalloc(sizeof(*intr_status), GFP_KERNEL); + if (!intr_status) + return -ENOMEM; + + portn_stat = kzalloc((sizeof(*portn_stat)) * + sdw_slv->sdw_slv_cap.num_of_sdw_ports, + GFP_KERNEL); + if (!portn_stat) + return -ENOMEM; + + intr_status->portn_stat = portn_stat; + intr_status->control_port_stat = control_port_stat; + + /* Update the implementation defined status to Slave */ + for (i = 1; i < sdw_slv->sdw_slv_cap.num_of_sdw_ports; i++) { + + intr_status->portn_stat[i].status = port_status[i]; + intr_status->portn_stat[i].num = i; + } + + intr_status->port0_stat = port_status[0]; + intr_status->control_port_stat = wr_msg.buf[0]; + + ret = sdw_slv->driver->handle_impl_def_interrupts(sdw_slv, + intr_status); + if (ret) + dev_err(&mstr->dev, "Implementation defined interrupt handling failed\n"); + + kfree(portn_stat); + kfree(intr_status); + +ack_interrupts: + /* Ack the interrupts */ + ret = sdw_slave_transfer(mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr->dev, "Register transfer failed\n"); + } +out: + return 0; +} + +int sdw_en_intr(struct sdw_slv *sdw_slv, int port_num, int mask) +{ + + struct sdw_msg rd_msg, wr_msg; + u8 buf; + int ret; + struct sdw_master *mstr = sdw_slv->mstr; + + rd_msg.addr = wr_msg.addr = SDW_DPN_INTMASK + + (SDW_NUM_DATA_PORT_REGISTERS * port_num); + + /* Create message for enabling the interrupts */ + wr_msg.ssp_tag = 0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.len = 1; + wr_msg.buf = &buf; + wr_msg.slave_addr = sdw_slv->slv_number; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + /* Create message for reading the interrupts for DP0 interrupts*/ + rd_msg.ssp_tag = 0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.buf = &buf; + rd_msg.slave_addr = sdw_slv->slv_number; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + ret = sdw_slave_transfer(mstr, &rd_msg, 1); + if (ret != 1) { + dev_err(&mstr->dev, "DPn Intr mask read failed for slave %x\n", + sdw_slv->slv_number); + return -EINVAL; + } + + buf |= mask; + + /* Set the port ready and Test fail interrupt mask as well */ + buf |= SDW_DPN_INTSTAT_TEST_FAIL_MASK; + buf |= SDW_DPN_INTSTAT_PORT_READY_MASK; + ret = sdw_slave_transfer(mstr, &wr_msg, 1); + if (ret != 1) { + dev_err(&mstr->dev, "DPn Intr mask write failed for slave %x\n", + sdw_slv->slv_number); + return -EINVAL; + } + return 0; +} + +static int sdw_en_scp_intr(struct sdw_slv *sdw_slv, int mask) +{ + struct sdw_msg rd_msg, wr_msg; + u8 buf = 0; + int ret; + struct sdw_master *mstr = sdw_slv->mstr; + u16 reg_addr; + + reg_addr = SDW_SCP_INTMASK1; + + rd_msg.addr = wr_msg.addr = reg_addr; + + /* Create message for reading the interrupt mask */ + rd_msg.ssp_tag = 0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.buf = &buf; + rd_msg.slave_addr = sdw_slv->slv_number; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + ret = sdw_slave_transfer(mstr, &rd_msg, 1); + if (ret != 1) { + dev_err(&mstr->dev, "SCP Intr mask read failed for slave %x\n", + sdw_slv->slv_number); + return -EINVAL; + } + + /* Enable the Slave defined interrupts. */ + buf |= mask; + + /* Set the port ready and Test fail interrupt mask as well */ + buf |= SDW_SCP_INTMASK1_BUS_CLASH_MASK; + buf |= SDW_SCP_INTMASK1_PARITY_MASK; + + /* Create message for enabling the interrupts */ + wr_msg.ssp_tag = 0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.len = 1; + wr_msg.buf = &buf; + wr_msg.slave_addr = sdw_slv->slv_number; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + ret = sdw_slave_transfer(mstr, &wr_msg, 1); + if (ret != 1) { + dev_err(&mstr->dev, "SCP Intr mask write failed for slave %x\n", + sdw_slv->slv_number); + return -EINVAL; + } + + /* Return if DP0 is not present */ + if (!sdw_slv->sdw_slv_cap.sdw_dp0_supported) + return 0; + + + reg_addr = SDW_DP0_INTMASK; + rd_msg.addr = wr_msg.addr = reg_addr; + mask = sdw_slv->sdw_slv_cap.sdw_dp0_cap->imp_def_intr_mask; + buf = 0; + + /* Create message for reading the interrupt mask */ + /* Create message for reading the interrupt mask */ + rd_msg.ssp_tag = 0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.buf = &buf; + rd_msg.slave_addr = sdw_slv->slv_number; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + ret = sdw_slave_transfer(mstr, &rd_msg, 1); + if (ret != 1) { + dev_err(&mstr->dev, "DP0 Intr mask read failed for slave %x\n", + sdw_slv->slv_number); + return -EINVAL; + } + + /* Enable the Slave defined interrupts. */ + buf |= mask; + + /* Set the port ready and Test fail interrupt mask as well */ + buf |= SDW_DP0_INTSTAT_TEST_FAIL_MASK; + buf |= SDW_DP0_INTSTAT_PORT_READY_MASK; + buf |= SDW_DP0_INTSTAT_BRA_FAILURE_MASK; + + wr_msg.ssp_tag = 0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.len = 1; + wr_msg.buf = &buf; + wr_msg.slave_addr = sdw_slv->slv_number; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + ret = sdw_slave_transfer(mstr, &wr_msg, 1); + if (ret != 1) { + dev_err(&mstr->dev, "DP0 Intr mask write failed for slave %x\n", + sdw_slv->slv_number); + return -EINVAL; + } + return 0; +} + +static int sdw_prog_slv(struct sdw_slv *sdw_slv) +{ + + struct sdw_slv_capabilities *cap; + int ret, i; + struct sdw_slv_dpn_capabilities *dpn_cap; + struct sdw_master *mstr = sdw_slv->mstr; + + if (!sdw_slv->slave_cap_updated) + return 0; + cap = &sdw_slv->sdw_slv_cap; + + /* Enable DP0 and SCP interrupts */ + ret = sdw_en_scp_intr(sdw_slv, cap->scp_impl_def_intr_mask); + + /* Failure should never happen, even if it happens we continue */ + if (ret) + dev_err(&mstr->dev, "SCP program failed\n"); + + for (i = 0; i < cap->num_of_sdw_ports; i++) { + dpn_cap = &cap->sdw_dpn_cap[i]; + ret = sdw_en_intr(sdw_slv, (i + 1), + dpn_cap->imp_def_intr_mask); + + if (ret) + break; + } + return ret; +} + + +static void sdw_send_slave_status(struct sdw_slv *slave, + enum sdw_slave_status *status) +{ + struct sdw_slave_driver *slv_drv = slave->driver; + + if (slv_drv && slv_drv->update_slv_status) + slv_drv->update_slv_status(slave, status); +} + +static int sdw_wait_for_deprepare(struct sdw_slv *slave) +{ + int ret; + struct sdw_msg msg; + u8 buf[1] = {0}; + int timeout = 0; + struct sdw_master *mstr = slave->mstr; + + /* Create message to read clock stop status, its broadcast message. */ + buf[0] = 0xFF; + + msg.ssp_tag = 0; + msg.flag = SDW_MSG_FLAG_READ; + msg.len = 1; + msg.buf = &buf[0]; + msg.slave_addr = slave->slv_number; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + msg.addr = SDW_SCP_STAT; + /* + * Read the ClockStopNotFinished bit from the SCP_Stat register + * of particular Slave to make sure that clock stop prepare is done + */ + do { + /* + * Ideally this should not fail, but even if it fails + * in exceptional situation, we go ahead for clock stop + */ + ret = sdw_slave_transfer_nopm(mstr, &msg, 1); + + if (ret != 1) { + WARN_ONCE(1, "Clock stop status read failed\n"); + break; + } + + if (!(buf[0] & SDW_SCP_STAT_CLK_STP_NF_MASK)) + break; + + /* + * TODO: Need to find from spec what is requirement. + * Since we are in suspend we should not sleep for more + * Ideally Slave should be ready to stop clock in less than + * few ms. + * So sleep less and increase loop time. This is not + * harmful, since if Slave is ready loop will terminate. + * + */ + msleep(2); + timeout++; + + } while (timeout != 500); + + if (!(buf[0] & SDW_SCP_STAT_CLK_STP_NF_MASK)) + + dev_info(&mstr->dev, "Clock stop prepare done\n"); + else + WARN_ONCE(1, "Clk stp deprepare failed for slave %d\n", + slave->slv_number); + + return -EINVAL; +} + +static void sdw_prep_slave_for_clk_stp(struct sdw_master *mstr, + struct sdw_slv *slave, + enum sdw_clk_stop_mode clock_stop_mode, + bool prep) +{ + bool wake_en; + struct sdw_slv_capabilities *cap; + u8 buf[1] = {0}; + struct sdw_msg msg; + int ret; + + cap = &slave->sdw_slv_cap; + + /* Set the wakeup enable based on Slave capability */ + wake_en = !cap->wake_up_unavailable; + + if (prep) { + /* Even if its simplified clock stop prepare, + * setting prepare bit wont harm + */ + buf[0] |= (1 << SDW_SCP_SYSTEMCTRL_CLK_STP_PREP_SHIFT); + buf[0] |= clock_stop_mode << + SDW_SCP_SYSTEMCTRL_CLK_STP_MODE_SHIFT; + buf[0] |= wake_en << SDW_SCP_SYSTEMCTRL_WAKE_UP_EN_SHIFT; + } else + buf[0] = 0; + + msg.ssp_tag = 0; + msg.flag = SDW_MSG_FLAG_WRITE; + msg.len = 1; + msg.buf = &buf[0]; + msg.slave_addr = slave->slv_number; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + msg.addr = SDW_SCP_SYSTEMCTRL; + + /* + * We are calling NOPM version of the transfer API, because + * Master controllers calls this from the suspend handler, + * so if we call the normal transfer API, it tries to resume + * controller, which result in deadlock + */ + + ret = sdw_slave_transfer_nopm(mstr, &msg, 1); + /* We should continue even if it fails for some Slave */ + if (ret != 1) + WARN_ONCE(1, "Clock Stop prepare failed for slave %d\n", + slave->slv_number); +} + +static int sdw_check_for_prep_bit(struct sdw_slv *slave) +{ + u8 buf[1] = {0}; + struct sdw_msg msg; + int ret; + struct sdw_master *mstr = slave->mstr; + + msg.ssp_tag = 0; + msg.flag = SDW_MSG_FLAG_READ; + msg.len = 1; + msg.buf = &buf[0]; + msg.slave_addr = slave->slv_number; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + msg.addr = SDW_SCP_SYSTEMCTRL; + + ret = sdw_slave_transfer_nopm(mstr, &msg, 1); + /* We should continue even if it fails for some Slave */ + if (ret != 1) { + dev_err(&mstr->dev, "SCP_SystemCtrl read failed for Slave %d\n", + slave->slv_number); + return -EINVAL; + + } + return (buf[0] & SDW_SCP_SYSTEMCTRL_CLK_STP_PREP_MASK); + +} + +static int sdw_slv_deprepare_clk_stp1(struct sdw_slv *slave) +{ + struct sdw_slv_capabilities *cap; + int ret; + struct sdw_master *mstr = slave->mstr; + + cap = &slave->sdw_slv_cap; + + /* + * Slave might have enumerated 1st time or from clock stop mode 1 + * return if Slave doesn't require deprepare + */ + if (!cap->clk_stp1_deprep_required) + return 0; + + /* + * If Slave requires de-prepare after exiting from Clock Stop + * mode 1, than check for ClockStopPrepare bit in SystemCtrl register + * if its 1, de-prepare Slave from clock stop prepare, else + * return + */ + ret = sdw_check_for_prep_bit(slave); + /* If prepare bit is not set, return without error */ + if (!ret) + return 0; + + /* If error in reading register, return with error */ + if (ret < 0) + return ret; + + /* + * Call the pre clock stop prepare, if Slave requires. + */ + if (slave->driver && slave->driver->pre_clk_stop_prep) { + ret = slave->driver->pre_clk_stop_prep(slave, + cap->clock_stop1_mode_supported, false); + if (ret) { + dev_warn(&mstr->dev, "Pre de-prepare failed for Slave %d\n", + slave->slv_number); + return ret; + } + } + + sdw_prep_slave_for_clk_stp(slave->mstr, slave, + cap->clock_stop1_mode_supported, false); + + /* Make sure NF = 0 for deprepare to complete */ + ret = sdw_wait_for_deprepare(slave); + + /* Return in de-prepare unsuccessful */ + if (ret) + return ret; + + if (slave->driver && slave->driver->post_clk_stop_prep) { + ret = slave->driver->post_clk_stop_prep(slave, + cap->clock_stop1_mode_supported, false); + + if (ret) + dev_err(&mstr->dev, "Post de-prepare failed for Slave %d\n", + slave->slv_number); + } + + return ret; +} + +static void handle_slave_status(struct kthread_work *work) +{ + int i, ret = 0; + struct sdw_slv_status *status, *__status__; + struct sdw_bus *bus = + container_of(work, struct sdw_bus, kwork); + struct sdw_master *mstr = bus->mstr; + unsigned long flags; + bool slave_present = 0; + + /* Handle the new attached slaves to the bus. Register new slave + * to the bus. + */ + list_for_each_entry_safe(status, __status__, &bus->status_list, node) { + if (status->status[0] == SDW_SLAVE_STAT_ATTACHED_OK) { + ret += sdw_register_slave(mstr); + if (ret) + /* Even if adding new slave fails, we will + * continue. + */ + dev_err(&mstr->dev, "Registering new slave failed\n"); + } + for (i = 1; i <= SOUNDWIRE_MAX_DEVICES; i++) { + slave_present = false; + if (status->status[i] == SDW_SLAVE_STAT_NOT_PRESENT && + mstr->sdw_addr[i].assigned == true) { + /* Logical address was assigned to slave, but + * now its down, so mark it as not present + */ + mstr->sdw_addr[i].status = + SDW_SLAVE_STAT_NOT_PRESENT; + slave_present = true; + } + + else if (status->status[i] == SDW_SLAVE_STAT_ALERT && + mstr->sdw_addr[i].assigned == true) { + ret = 0; + /* Handle slave alerts */ + mstr->sdw_addr[i].status = SDW_SLAVE_STAT_ALERT; + ret = sdw_handle_slave_alerts(mstr, + mstr->sdw_addr[i].slave); + if (ret) + dev_err(&mstr->dev, "Handle slave alert failed for Slave %d\n", i); + + slave_present = true; + + + } else if (status->status[i] == + SDW_SLAVE_STAT_ATTACHED_OK && + mstr->sdw_addr[i].assigned == true) { + + sdw_prog_slv(mstr->sdw_addr[i].slave); + + mstr->sdw_addr[i].status = + SDW_SLAVE_STAT_ATTACHED_OK; + ret = sdw_slv_deprepare_clk_stp1( + mstr->sdw_addr[i].slave); + + /* + * If depreparing Slave fails, no need to + * reprogram Slave, this should never happen + * in ideal case. + */ + if (ret) + continue; + slave_present = true; + } + + if (!slave_present) + continue; + + sdw_send_slave_status(mstr->sdw_addr[i].slave, + &mstr->sdw_addr[i].status); + } + spin_lock_irqsave(&bus->spinlock, flags); + list_del(&status->node); + spin_unlock_irqrestore(&bus->spinlock, flags); + kfree(status); + } +} + +static int sdw_register_master(struct sdw_master *mstr) +{ + int ret = 0; + int i; + struct sdw_bus *sdw_bus; + + /* Can't register until after driver model init */ + if (unlikely(WARN_ON(!sdwint_bus_type.p))) { + ret = -EAGAIN; + goto bus_init_not_done; + } + /* Sanity checks */ + if (unlikely(mstr->name[0] == '\0')) { + pr_err("sdw-core: Attempt to register an master with no name!\n"); + ret = -EINVAL; + goto mstr_no_name; + } + for (i = 0; i <= SOUNDWIRE_MAX_DEVICES; i++) + mstr->sdw_addr[i].slv_number = i; + + rt_mutex_init(&mstr->bus_lock); + INIT_LIST_HEAD(&mstr->slv_list); + INIT_LIST_HEAD(&mstr->mstr_rt_list); + + sdw_bus = kzalloc(sizeof(struct sdw_bus), GFP_KERNEL); + if (!sdw_bus) + goto bus_alloc_failed; + sdw_bus->mstr = mstr; + init_completion(&sdw_bus->async_data.xfer_complete); + + mutex_lock(&sdw_core.core_lock); + list_add_tail(&sdw_bus->bus_node, &sdw_core.bus_list); + mutex_unlock(&sdw_core.core_lock); + + dev_set_name(&mstr->dev, "sdw-%d", mstr->nr); + mstr->dev.bus = &sdwint_bus_type; + mstr->dev.type = &sdw_mstr_type; + + ret = device_register(&mstr->dev); + if (ret) + goto out_list; + kthread_init_worker(&sdw_bus->kworker); + sdw_bus->status_thread = kthread_run(kthread_worker_fn, + &sdw_bus->kworker, "%s", + dev_name(&mstr->dev)); + if (IS_ERR(sdw_bus->status_thread)) { + dev_err(&mstr->dev, "error: failed to create status message task\n"); + ret = PTR_ERR(sdw_bus->status_thread); + goto task_failed; + } + kthread_init_work(&sdw_bus->kwork, handle_slave_status); + INIT_LIST_HEAD(&sdw_bus->status_list); + spin_lock_init(&sdw_bus->spinlock); + ret = sdw_mstr_bw_init(sdw_bus); + if (ret) { + dev_err(&mstr->dev, "error: Failed to init mstr bw\n"); + goto mstr_bw_init_failed; + } + dev_dbg(&mstr->dev, "master [%s] registered\n", mstr->name); + + return 0; + +mstr_bw_init_failed: +task_failed: + device_unregister(&mstr->dev); +out_list: + mutex_lock(&sdw_core.core_lock); + list_del(&sdw_bus->bus_node); + mutex_unlock(&sdw_core.core_lock); + kfree(sdw_bus); +bus_alloc_failed: +mstr_no_name: +bus_init_not_done: + mutex_lock(&sdw_core.core_lock); + idr_remove(&sdw_core.idr, mstr->nr); + mutex_unlock(&sdw_core.core_lock); + return ret; +} + +/** + * sdw_master_update_slv_status: Report the status of slave to the bus driver. + * master calls this function based on the + * interrupt it gets once the slave changes its + * state. + * @mstr: Master handle for which status is reported. + * @status: Array of status of each slave. + */ +int sdw_master_update_slv_status(struct sdw_master *mstr, + struct sdw_status *status) +{ + struct sdw_bus *bus = NULL; + struct sdw_slv_status *slv_status; + unsigned long flags; + + list_for_each_entry(bus, &sdw_core.bus_list, bus_node) { + if (bus->mstr == mstr) + break; + } + /* This is master is not registered with bus driver */ + if (!bus) { + dev_info(&mstr->dev, "Master not registered with bus\n"); + return 0; + } + slv_status = kzalloc(sizeof(struct sdw_slv_status), GFP_ATOMIC); + memcpy(slv_status->status, status, sizeof(struct sdw_status)); + + spin_lock_irqsave(&bus->spinlock, flags); + list_add_tail(&slv_status->node, &bus->status_list); + spin_unlock_irqrestore(&bus->spinlock, flags); + + kthread_queue_work(&bus->kworker, &bus->kwork); + return 0; +} +EXPORT_SYMBOL_GPL(sdw_master_update_slv_status); + +/** + * sdw_add_master_controller - declare sdw master, use dynamic bus number + * @master: the master to add + * Context: can sleep + * + * This routine is used to declare an sdw master when its bus number + * doesn't matter or when its bus number is specified by an dt alias. + * Examples of bases when the bus number doesn't matter: sdw masters + * dynamically added by USB links or PCI plugin cards. + * + * When this returns zero, a new bus number was allocated and stored + * in mstr->nr, and the specified master became available for slaves. + * Otherwise, a negative errno value is returned. + */ +int sdw_add_master_controller(struct sdw_master *mstr) +{ + int id; + + mutex_lock(&sdw_core.core_lock); + + id = idr_alloc(&sdw_core.idr, mstr, + sdw_core.first_dynamic_bus_num, 0, GFP_KERNEL); + mutex_unlock(&sdw_core.core_lock); + if (id < 0) + return id; + + mstr->nr = id; + + return sdw_register_master(mstr); +} +EXPORT_SYMBOL_GPL(sdw_add_master_controller); + +static void sdw_unregister_slave(struct sdw_slv *sdw_slv) +{ + + struct sdw_master *mstr; + + mstr = sdw_slv->mstr; + sdw_lock_mstr(mstr); + list_del(&sdw_slv->node); + sdw_unlock_mstr(mstr); + mstr->sdw_addr[sdw_slv->slv_number].assigned = false; + memset(mstr->sdw_addr[sdw_slv->slv_number].dev_id, 0x0, 6); + device_unregister(&sdw_slv->dev); + kfree(sdw_slv); +} + +static int __unregister_slave(struct device *dev, void *dummy) +{ + struct sdw_slv *slave = sdw_slave_verify(dev); + + if (slave && strcmp(slave->name, "dummy")) + sdw_unregister_slave(slave); + return 0; +} + +/** + * sdw_del_master_controller - unregister SDW master + * @mstr: the master being unregistered + * Context: can sleep + * + * This unregisters an SDW master which was previously registered + * by @sdw_add_master_controller or @sdw_add_master_controller. + */ +void sdw_del_master_controller(struct sdw_master *mstr) +{ + struct sdw_master *found; + + /* First make sure that this master was ever added */ + mutex_lock(&sdw_core.core_lock); + found = idr_find(&sdw_core.idr, mstr->nr); + mutex_unlock(&sdw_core.core_lock); + + if (found != mstr) { + pr_debug("sdw-core: attempting to delete unregistered master [%s]\n", mstr->name); + return; + } + /* Detach any active slaves. This can't fail, thus we do not + * check the returned value. + */ + device_for_each_child(&mstr->dev, NULL, __unregister_slave); + + /* device name is gone after device_unregister */ + dev_dbg(&mstr->dev, "mstrter [%s] unregistered\n", mstr->name); + + /* wait until all references to the device are gone + * + * FIXME: This is old code and should ideally be replaced by an + * alternative which results in decoupling the lifetime of the struct + * device from the sdw_master, like spi or netdev do. Any solution + * should be thoroughly tested with DEBUG_KOBJECT_RELEASE enabled! + */ + init_completion(&mstr->slv_released); + device_unregister(&mstr->dev); + wait_for_completion(&mstr->slv_released); + + /* free bus id */ + mutex_lock(&sdw_core.core_lock); + idr_remove(&sdw_core.idr, mstr->nr); + mutex_unlock(&sdw_core.core_lock); + + /* Clear the device structure in case this mstrter is ever going to be + added again */ + memset(&mstr->dev, 0, sizeof(mstr->dev)); +} +EXPORT_SYMBOL_GPL(sdw_del_master_controller); + +/** + * sdw_slave_xfer_bra_block: Transfer the data block using the BTP/BRA + * protocol. + * @mstr: SoundWire Master Master + * @block: Data block to be transferred. + */ +int sdw_slave_xfer_bra_block(struct sdw_master *mstr, + struct sdw_bra_block *block) +{ + struct sdw_bus *sdw_mstr_bs = NULL; + struct sdw_mstr_driver *ops = NULL; + int ret; + + /* + * This API will be called by slave/codec + * when it needs to xfer firmware to + * its memory or perform bulk read/writes of registers. + */ + + /* + * Acquire core lock + * TODO: Acquire Master lock inside core lock + * similar way done in upstream. currently + * keeping it as core lock + */ + mutex_lock(&sdw_core.core_lock); + + /* Get master data structure */ + list_for_each_entry(sdw_mstr_bs, &sdw_core.bus_list, bus_node) { + /* Match master structure pointer */ + if (sdw_mstr_bs->mstr != mstr) + continue; + + break; + } + + /* + * Here assumption is made that complete SDW bandwidth is used + * by BRA. So bus will return -EBUSY if any active stream + * is running on given master. + * TODO: In final implementation extra bandwidth will be always + * allocated for BRA. In that case all the computation of clock, + * frame shape, transport parameters for DP0 will be done + * considering BRA feature. + */ + if (!list_empty(&mstr->mstr_rt_list)) { + + /* + * Currently not allowing BRA when any + * active stream on master, returning -EBUSY + */ + + /* Release lock */ + mutex_unlock(&sdw_core.core_lock); + return -EBUSY; + } + + /* Get master driver ops */ + ops = sdw_mstr_bs->mstr->driver; + + /* + * Check whether Master is supporting bulk transfer. If not, then + * bus will use alternate method of performing BRA request using + * normal register read/write API. + * TODO: Currently if Master is not supporting BRA transfers, bus + * returns error. Bus driver to extend support for normal register + * read/write as alternate method. + */ + if (!ops->mstr_ops->xfer_bulk) + return -EINVAL; + + /* Data port Programming (ON) */ + ret = sdw_bus_bra_xport_config(sdw_mstr_bs, block, true); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Xport parameter config failed ret=%d\n", ret); + goto error; + } + + /* Bulk Setup */ + ret = ops->mstr_ops->xfer_bulk(mstr, block); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Transfer failed ret=%d\n", ret); + goto error; + } + + /* Data port Programming (OFF) */ + ret = sdw_bus_bra_xport_config(sdw_mstr_bs, block, false); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Xport parameter de-config failed ret=%d\n", ret); + goto error; + } + +error: + /* Release lock */ + mutex_unlock(&sdw_core.core_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(sdw_slave_xfer_bra_block); + +/* + * An sdw_driver is used with one or more sdw_slv (slave) nodes to access + * sdw slave chips, on a bus instance associated with some sdw_master. + */ +int __sdw_mstr_driver_register(struct module *owner, + struct sdw_mstr_driver *driver) +{ + int res; + + /* Can't register until after driver model init */ + if (unlikely(WARN_ON(!sdwint_bus_type.p))) + return -EAGAIN; + + /* add the driver to the list of sdw drivers in the driver core */ + driver->driver.owner = owner; + driver->driver.bus = &sdwint_bus_type; + + /* When registration returns, the driver core + * will have called probe() for all matching-but-unbound slaves. + */ + res = driver_register(&driver->driver); + if (res) + return res; + + pr_debug("sdw-core: driver [%s] registered\n", driver->driver.name); + + return 0; +} +EXPORT_SYMBOL_GPL(__sdw_mstr_driver_register); + +void sdw_mstr_driver_unregister(struct sdw_mstr_driver *driver) +{ + driver_unregister(&driver->driver); +} +EXPORT_SYMBOL_GPL(sdw_mstr_driver_unregister); + +void sdw_slave_driver_unregister(struct sdw_slave_driver *driver) +{ + driver_unregister(&driver->driver); +} +EXPORT_SYMBOL_GPL(sdw_slave_driver_unregister); + +/* + * An sdw_driver is used with one or more sdw_slv (slave) nodes to access + * sdw slave chips, on a bus instance associated with some sdw_master. + */ +int __sdw_slave_driver_register(struct module *owner, + struct sdw_slave_driver *driver) +{ + int res; + /* Can't register until after driver model init */ + if (unlikely(WARN_ON(!sdwint_bus_type.p))) + return -EAGAIN; + + /* add the driver to the list of sdw drivers in the driver core */ + driver->driver.owner = owner; + driver->driver.bus = &sdwint_bus_type; + + /* When registration returns, the driver core + * will have called probe() for all matching-but-unbound slaves. + */ + res = driver_register(&driver->driver); + if (res) + return res; + pr_debug("sdw-core: driver [%s] registered\n", driver->driver.name); + + return 0; +} +EXPORT_SYMBOL_GPL(__sdw_slave_driver_register); + +int sdw_register_slave_capabilities(struct sdw_slv *sdw, + struct sdw_slv_capabilities *cap) +{ + struct sdw_slv_capabilities *slv_cap; + struct sdw_slv_dpn_capabilities *slv_dpn_cap, *dpn_cap; + struct port_audio_mode_properties *prop, *slv_prop; + int i, j; + int ret = 0; + + slv_cap = &sdw->sdw_slv_cap; + + slv_cap->wake_up_unavailable = cap->wake_up_unavailable; + slv_cap->wake_up_unavailable = cap->wake_up_unavailable; + slv_cap->test_mode_supported = cap->test_mode_supported; + slv_cap->clock_stop1_mode_supported = cap->clock_stop1_mode_supported; + slv_cap->simplified_clock_stop_prepare = + cap->simplified_clock_stop_prepare; + slv_cap->scp_impl_def_intr_mask = cap->scp_impl_def_intr_mask; + + slv_cap->highphy_capable = cap->highphy_capable; + slv_cap->paging_supported = cap->paging_supported; + slv_cap->bank_delay_support = cap->bank_delay_support; + slv_cap->port_15_read_behavior = cap->port_15_read_behavior; + slv_cap->sdw_dp0_supported = cap->sdw_dp0_supported; + slv_cap->num_of_sdw_ports = cap->num_of_sdw_ports; + slv_cap->sdw_dpn_cap = devm_kzalloc(&sdw->dev, + ((sizeof(struct sdw_slv_dpn_capabilities)) * + cap->num_of_sdw_ports), GFP_KERNEL); + if (!slv_cap->sdw_dpn_cap) + return -ENOMEM; + + for (i = 0; i < cap->num_of_sdw_ports; i++) { + dpn_cap = &cap->sdw_dpn_cap[i]; + slv_dpn_cap = &slv_cap->sdw_dpn_cap[i]; + slv_dpn_cap->port_direction = dpn_cap->port_direction; + slv_dpn_cap->port_number = dpn_cap->port_number; + slv_dpn_cap->max_word_length = dpn_cap->max_word_length; + slv_dpn_cap->min_word_length = dpn_cap->min_word_length; + slv_dpn_cap->num_word_length = dpn_cap->num_word_length; + if (NULL == dpn_cap->word_length_buffer) + slv_dpn_cap->word_length_buffer = + dpn_cap->word_length_buffer; + else { + slv_dpn_cap->word_length_buffer = + devm_kzalloc(&sdw->dev, + dpn_cap->num_word_length * + (sizeof(unsigned int)), GFP_KERNEL); + if (!slv_dpn_cap->word_length_buffer) + return -ENOMEM; + memcpy(slv_dpn_cap->word_length_buffer, + dpn_cap->word_length_buffer, + dpn_cap->num_word_length * + (sizeof(unsigned int))); + } + slv_dpn_cap->dpn_type = dpn_cap->dpn_type; + slv_dpn_cap->dpn_grouping = dpn_cap->dpn_grouping; + slv_dpn_cap->prepare_ch = dpn_cap->prepare_ch; + slv_dpn_cap->imp_def_intr_mask = dpn_cap->imp_def_intr_mask; + slv_dpn_cap->min_ch_num = dpn_cap->min_ch_num; + slv_dpn_cap->max_ch_num = dpn_cap->max_ch_num; + slv_dpn_cap->num_ch_supported = dpn_cap->num_ch_supported; + if (NULL == slv_dpn_cap->ch_supported) + slv_dpn_cap->ch_supported = dpn_cap->ch_supported; + else { + slv_dpn_cap->ch_supported = + devm_kzalloc(&sdw->dev, + dpn_cap->num_ch_supported * + (sizeof(unsigned int)), GFP_KERNEL); + if (!slv_dpn_cap->ch_supported) + return -ENOMEM; + memcpy(slv_dpn_cap->ch_supported, + dpn_cap->ch_supported, + dpn_cap->num_ch_supported * + (sizeof(unsigned int))); + } + slv_dpn_cap->port_flow_mode_mask = + dpn_cap->port_flow_mode_mask; + slv_dpn_cap->block_packing_mode_mask = + dpn_cap->block_packing_mode_mask; + slv_dpn_cap->port_encoding_type_mask = + dpn_cap->port_encoding_type_mask; + slv_dpn_cap->num_audio_modes = dpn_cap->num_audio_modes; + + slv_dpn_cap->mode_properties = devm_kzalloc(&sdw->dev, + ((sizeof(struct port_audio_mode_properties)) * + dpn_cap->num_audio_modes), GFP_KERNEL); + if (!slv_dpn_cap->mode_properties) + return -ENOMEM; + + for (j = 0; j < dpn_cap->num_audio_modes; j++) { + prop = &dpn_cap->mode_properties[j]; + slv_prop = &slv_dpn_cap->mode_properties[j]; + slv_prop->max_frequency = prop->max_frequency; + slv_prop->min_frequency = prop->min_frequency; + slv_prop->num_freq_configs = prop->num_freq_configs; + if (NULL == slv_prop->freq_supported) + slv_prop->freq_supported = + prop->freq_supported; + else { + slv_prop->freq_supported = + devm_kzalloc(&sdw->dev, + prop->num_freq_configs * + (sizeof(unsigned int)), GFP_KERNEL); + if (!slv_prop->freq_supported) + return -ENOMEM; + memcpy(slv_prop->freq_supported, + prop->freq_supported, + prop->num_freq_configs * + (sizeof(unsigned int))); + } + slv_prop->glitchless_transitions_mask + = prop->glitchless_transitions_mask; + slv_prop->max_sampling_frequency = + prop->max_sampling_frequency; + slv_prop->min_sampling_frequency = + prop->min_sampling_frequency; + slv_prop->num_sampling_freq_configs = + prop->num_sampling_freq_configs; + if (NULL == prop->sampling_freq_config) + slv_prop->sampling_freq_config = + prop->sampling_freq_config; + else { + slv_prop->sampling_freq_config = + devm_kzalloc(&sdw->dev, + prop->num_sampling_freq_configs * + (sizeof(unsigned int)), GFP_KERNEL); + if (!slv_prop->sampling_freq_config) + return -ENOMEM; + memcpy(slv_prop->sampling_freq_config, + prop->sampling_freq_config, + prop->num_sampling_freq_configs * + (sizeof(unsigned int))); + } + + slv_prop->ch_prepare_behavior = + prop->ch_prepare_behavior; + } + } + ret = sdw_prog_slv(sdw); + if (ret) + return ret; + sdw->slave_cap_updated = true; + return 0; +} +EXPORT_SYMBOL_GPL(sdw_register_slave_capabilities); + +static int sdw_get_stream_tag(char *key, int *stream_tag) +{ + int i; + int ret = -EINVAL; + struct sdw_runtime *sdw_rt; + struct sdw_stream_tag *stream_tags = sdw_core.stream_tags; + + /* If stream tag is already allocated return that after incrementing + * reference count. This is only possible if key is provided. + */ + mutex_lock(&sdw_core.core_lock); + if (!key) + goto key_check_not_required; + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (!(strcmp(stream_tags[i].key, key))) { + stream_tags[i].ref_count++; + *stream_tag = stream_tags[i].stream_tag; + mutex_unlock(&sdw_core.core_lock); + return 0; + } + } +key_check_not_required: + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (!stream_tags[i].ref_count) { + *stream_tag = stream_tags[i].stream_tag; + mutex_init(&stream_tags[i].stream_lock); + sdw_rt = kzalloc(sizeof(struct sdw_runtime), + GFP_KERNEL); + if (!sdw_rt) { + ret = -ENOMEM; + mutex_unlock(&sdw_core.core_lock); + goto out; + } + stream_tags[i].ref_count++; + INIT_LIST_HEAD(&sdw_rt->slv_rt_list); + INIT_LIST_HEAD(&sdw_rt->mstr_rt_list); + sdw_rt->stream_state = SDW_STATE_INIT_STREAM_TAG; + stream_tags[i].sdw_rt = sdw_rt; + if (key) + strlcpy(stream_tags[i].key, key, + SDW_MAX_STREAM_TAG_KEY_SIZE); + mutex_unlock(&sdw_core.core_lock); + return 0; + } + } + mutex_unlock(&sdw_core.core_lock); +out: + return ret; +} + +void sdw_release_stream_tag(int stream_tag) +{ + int i; + struct sdw_stream_tag *stream_tags = sdw_core.stream_tags; + + mutex_lock(&sdw_core.core_lock); + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (stream_tag == stream_tags[i].stream_tag) { + stream_tags[i].ref_count--; + if (stream_tags[i].ref_count == 0) { + kfree(stream_tags[i].sdw_rt); + memset(stream_tags[i].key, 0x0, + SDW_MAX_STREAM_TAG_KEY_SIZE); + } + } + } + mutex_unlock(&sdw_core.core_lock); +} +EXPORT_SYMBOL_GPL(sdw_release_stream_tag); + +/** + * sdw_alloc_stream_tag: Assign the stream tag for the unique streams + * between master and slave device. + * Normally master master will request for the + * stream tag for the stream between master + * and slave device. It programs the same stream + * tag to the slave device. Stream tag is unique + * for all the streams between masters and slave + * across SoCs. + * @guid: Group of the device port. All the ports of the device with + * part of same stream will have same guid. + * + * @stream:tag: Stream tag returned by bus driver. + */ +int sdw_alloc_stream_tag(char *guid, int *stream_tag) +{ + int ret = 0; + + ret = sdw_get_stream_tag(guid, stream_tag); + if (ret) { + pr_err("Stream tag assignment failed\n"); + goto out; + } + +out: + return ret; +} +EXPORT_SYMBOL_GPL(sdw_alloc_stream_tag); + +static struct sdw_mstr_runtime *sdw_get_mstr_rt(struct sdw_runtime *sdw_rt, + struct sdw_master *mstr) { + + struct sdw_mstr_runtime *mstr_rt; + int ret = 0; + + list_for_each_entry(mstr_rt, &sdw_rt->mstr_rt_list, mstr_sdw_node) { + if (mstr_rt->mstr == mstr) + return mstr_rt; + } + + /* Allocate sdw_mstr_runtime structure */ + mstr_rt = kzalloc(sizeof(struct sdw_mstr_runtime), GFP_KERNEL); + if (!mstr_rt) { + ret = -ENOMEM; + goto out; + } + + /* Initialize sdw_mstr_runtime structure */ + INIT_LIST_HEAD(&mstr_rt->port_rt_list); + INIT_LIST_HEAD(&mstr_rt->slv_rt_list); + list_add_tail(&mstr_rt->mstr_sdw_node, &sdw_rt->mstr_rt_list); + list_add_tail(&mstr_rt->mstr_node, &mstr->mstr_rt_list); + mstr_rt->rt_state = SDW_STATE_INIT_RT; + mstr_rt->mstr = mstr; +out: + return mstr_rt; +} + +static struct sdw_slave_runtime *sdw_config_slave_stream( + struct sdw_slv *slave, + struct sdw_stream_config *stream_config, + struct sdw_runtime *sdw_rt) +{ + struct sdw_slave_runtime *slv_rt; + int ret = 0; + struct sdw_stream_params *str_p; + + slv_rt = kzalloc(sizeof(struct sdw_slave_runtime), GFP_KERNEL); + if (!slv_rt) { + ret = -ENOMEM; + goto out; + } + slv_rt->slave = slave; + str_p = &slv_rt->stream_params; + slv_rt->direction = stream_config->direction; + slv_rt->rt_state = SDW_STATE_CONFIG_RT; + str_p->rate = stream_config->frame_rate; + str_p->channel_count = stream_config->channel_count; + str_p->bps = stream_config->bps; + INIT_LIST_HEAD(&slv_rt->port_rt_list); +out: + return slv_rt; +} + +static void sdw_release_mstr_stream(struct sdw_master *mstr, + struct sdw_runtime *sdw_rt) +{ + struct sdw_mstr_runtime *mstr_rt, *__mstr_rt; + struct sdw_port_runtime *port_rt, *__port_rt, *first_port_rt = NULL; + + list_for_each_entry_safe(mstr_rt, __mstr_rt, &sdw_rt->mstr_rt_list, + mstr_sdw_node) { + if (mstr_rt->mstr == mstr) { + + /* Get first runtime node from port list */ + first_port_rt = list_first_entry(&mstr_rt->port_rt_list, + struct sdw_port_runtime, + port_node); + + /* Release Master port resources */ + list_for_each_entry_safe(port_rt, __port_rt, + &mstr_rt->port_rt_list, port_node) + list_del(&port_rt->port_node); + + kfree(first_port_rt); + list_del(&mstr_rt->mstr_sdw_node); + if (mstr_rt->direction == SDW_DATA_DIR_OUT) + sdw_rt->tx_ref_count--; + else + sdw_rt->rx_ref_count--; + list_del(&mstr_rt->mstr_node); + pm_runtime_mark_last_busy(&mstr->dev); + pm_runtime_put_sync_autosuspend(&mstr->dev); + kfree(mstr_rt); + } + } +} + +static void sdw_release_slave_stream(struct sdw_slv *slave, + struct sdw_runtime *sdw_rt) +{ + struct sdw_slave_runtime *slv_rt, *__slv_rt; + struct sdw_port_runtime *port_rt, *__port_rt, *first_port_rt = NULL; + + list_for_each_entry_safe(slv_rt, __slv_rt, &sdw_rt->slv_rt_list, + slave_sdw_node) { + if (slv_rt->slave == slave) { + + /* Get first runtime node from port list */ + first_port_rt = list_first_entry(&slv_rt->port_rt_list, + struct sdw_port_runtime, + port_node); + + /* Release Slave port resources */ + list_for_each_entry_safe(port_rt, __port_rt, + &slv_rt->port_rt_list, port_node) + list_del(&port_rt->port_node); + + kfree(first_port_rt); + list_del(&slv_rt->slave_sdw_node); + if (slv_rt->direction == SDW_DATA_DIR_OUT) + sdw_rt->tx_ref_count--; + else + sdw_rt->rx_ref_count--; + pm_runtime_mark_last_busy(&slave->dev); + pm_runtime_put_sync_autosuspend(&slave->dev); + kfree(slv_rt); + } + } +} + +/** + * sdw_release_stream: De-allocates the bandwidth allocated to the + * the stream. This is reference counted, + * so for the last stream count, BW will be de-allocated + * for the stream. Normally this will be called + * as part of hw_free. + * + * @mstr: Master handle + * @slave: SoundWire slave handle. + * @stream_config: Stream configuration for the soundwire audio stream. + * @stream_tag: Unique stream tag identifier across SoC for all soundwire + * busses. + * for each audio stream between slaves. This stream tag + * will be allocated by master driver for every + * stream getting open. + */ +int sdw_release_stream(struct sdw_master *mstr, + struct sdw_slv *slave, + unsigned int stream_tag) +{ + int i; + struct sdw_runtime *sdw_rt = NULL; + struct sdw_stream_tag *stream_tags = sdw_core.stream_tags; + + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (stream_tags[i].stream_tag == stream_tag) { + sdw_rt = stream_tags[i].sdw_rt; + break; + } + } + if (!sdw_rt) { + dev_err(&mstr->dev, "Invalid stream tag\n"); + return -EINVAL; + } + if (!slave) + sdw_release_mstr_stream(mstr, sdw_rt); + else + sdw_release_slave_stream(slave, sdw_rt); + return 0; +} +EXPORT_SYMBOL_GPL(sdw_release_stream); + +/** + * sdw_configure_stream: Allocates the B/W onto the soundwire bus + * for transferring the data between slave and master. + * This is configuring the single stream of data. + * This will be called by slave, Slave stream + * configuration should match the master stream + * configuration. Normally slave would call this + * as a part of hw_params. + * + * @mstr: Master handle + * @sdw_slv: SoundWire slave handle. + * @stream_config: Stream configuration for the soundwire audio stream. + * @stream_tag: Unique stream tag identifier across the soundwire bus + * for each audio stream between slaves and master. + * This is something like stream_tag in HDA protocol, but + * here its virtual rather than being embedded into protocol. + * Further same stream tag is valid across masters also + * if some ports of the master is participating in + * stream aggregation. This is input parameters to the + * function. + */ +int sdw_config_stream(struct sdw_master *mstr, + struct sdw_slv *slave, + struct sdw_stream_config *stream_config, + unsigned int stream_tag) +{ + int i; + int ret = 0; + struct sdw_runtime *sdw_rt = NULL; + struct sdw_mstr_runtime *mstr_rt = NULL; + struct sdw_slave_runtime *slv_rt = NULL; + struct sdw_stream_tag *stream_tags = sdw_core.stream_tags; + struct sdw_stream_tag *stream = NULL; + + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (stream_tags[i].stream_tag == stream_tag) { + sdw_rt = stream_tags[i].sdw_rt; + stream = &stream_tags[i]; + break; + } + } + if (!sdw_rt) { + dev_err(&mstr->dev, "Valid stream tag not found\n"); + ret = -EINVAL; + goto out; + } + if (static_key_false(&sdw_trace_msg)) + trace_sdw_config_stream(mstr, slave, stream_config, + stream_tag); + + mutex_lock(&stream->stream_lock); + + mstr_rt = sdw_get_mstr_rt(sdw_rt, mstr); + if (!mstr_rt) { + dev_err(&mstr->dev, "master runtime configuration failed\n"); + ret = -EINVAL; + goto out; + } + + if (!slave) { + mstr_rt->direction = stream_config->direction; + mstr_rt->rt_state = SDW_STATE_CONFIG_RT; + sdw_rt->xport_state = SDW_STATE_ONLY_XPORT_STREAM; + + mstr_rt->stream_params.rate = stream_config->frame_rate; + mstr_rt->stream_params.channel_count = + stream_config->channel_count; + mstr_rt->stream_params.bps = stream_config->bps; + + } else + slv_rt = sdw_config_slave_stream(slave, + stream_config, sdw_rt); + /* Stream params will be stored based on Tx only, since there can + * be only one Tx and muliple Rx, There can be muliple Tx if + * there is aggregation on Tx. That is handled by adding the channels + * to stream_params for each aggregated Tx slaves + */ + if (!sdw_rt->tx_ref_count && stream_config->direction == + SDW_DATA_DIR_OUT) { + sdw_rt->stream_params.rate = stream_config->frame_rate; + sdw_rt->stream_params.channel_count = + stream_config->channel_count; + sdw_rt->stream_params.bps = stream_config->bps; + sdw_rt->tx_ref_count++; + } + + + /* Normally there will be only one Tx in system, multiple Tx + * can only be there if we support aggregation. In that case + * there may be multiple slave or masters handing different + * channels of same Tx stream. + */ + else if (sdw_rt->tx_ref_count && stream_config->direction == + SDW_DATA_DIR_OUT) { + if (sdw_rt->stream_params.rate != + stream_config->frame_rate) { + dev_err(&mstr->dev, "Frame rate for aggregated devices not matching\n"); + ret = -EINVAL; + goto free_mem; + } + if (sdw_rt->stream_params.bps != stream_config->bps) { + dev_err(&mstr->dev, "bps for aggregated devices not matching\n"); + ret = -EINVAL; + goto free_mem; + } + /* Number of channels gets added, since both devices will + * be supporting different channels. Like one Codec + * supporting L and other supporting R channel. + */ + sdw_rt->stream_params.channel_count += + stream_config->channel_count; + sdw_rt->tx_ref_count++; + } else + sdw_rt->rx_ref_count++; + + sdw_rt->type = stream_config->type; + sdw_rt->stream_state = SDW_STATE_CONFIG_STREAM; + + /* Slaves are added to two list, This is because BW is calculated + * for two masters individually, while Ports are enabled of all + * the aggregated masters and slaves part of the same stream tag + * simultaneously. + */ + if (slave) { + list_add_tail(&slv_rt->slave_sdw_node, &sdw_rt->slv_rt_list); + list_add_tail(&slv_rt->slave_node, &mstr_rt->slv_rt_list); + } + mutex_unlock(&stream->stream_lock); + if (slave) + pm_runtime_get_sync(&slave->dev); + else + pm_runtime_get_sync(&mstr->dev); + return ret; + +free_mem: + mutex_unlock(&stream->stream_lock); + kfree(mstr_rt); + kfree(slv_rt); +out: + return ret; + +} +EXPORT_SYMBOL_GPL(sdw_config_stream); + +/** + * sdw_chk_slv_dpn_caps - Return success + * -EINVAL - In case of error + * + * This function checks all slave port capabilities + * for given stream parameters. If any of parameters + * is not supported in port capabilities, it returns + * error. + */ +int sdw_chk_slv_dpn_caps(struct sdw_slv_dpn_capabilities *dpn_cap, + struct sdw_stream_params *strm_prms) +{ + struct port_audio_mode_properties *mode_prop = + dpn_cap->mode_properties; + int ret = 0, i, value; + + /* Check Sampling frequency */ + if (mode_prop->num_sampling_freq_configs) { + for (i = 0; i < mode_prop->num_sampling_freq_configs; i++) { + + value = mode_prop->sampling_freq_config[i]; + if (strm_prms->rate == value) + break; + } + + if (i == mode_prop->num_sampling_freq_configs) + return -EINVAL; + + } else { + + if ((strm_prms->rate < mode_prop->min_sampling_frequency) + || (strm_prms->rate > + mode_prop->max_sampling_frequency)) + return -EINVAL; + } + + /* check for bit rate */ + if (dpn_cap->num_word_length) { + for (i = 0; i < dpn_cap->num_word_length; i++) { + + value = dpn_cap->word_length_buffer[i]; + if (strm_prms->bps == value) + break; + } + + if (i == dpn_cap->num_word_length) + return -EINVAL; + + } else { + + if ((strm_prms->bps < dpn_cap->min_word_length) + || (strm_prms->bps > dpn_cap->max_word_length)) + return -EINVAL; + } + + /* check for number of channels */ + if (dpn_cap->num_ch_supported) { + for (i = 0; i < dpn_cap->num_ch_supported; i++) { + + value = dpn_cap->ch_supported[i]; + if (strm_prms->bps == value) + break; + } + + if (i == dpn_cap->num_ch_supported) + return -EINVAL; + + } else { + + if ((strm_prms->channel_count < dpn_cap->min_ch_num) + || (strm_prms->channel_count > dpn_cap->max_ch_num)) + return -EINVAL; + } + + return ret; +} + +/** + * sdw_chk_mstr_dpn_caps - Return success + * -EINVAL - In case of error + * + * This function checks all master port capabilities + * for given stream parameters. If any of parameters + * is not supported in port capabilities, it returns + * error. + */ +int sdw_chk_mstr_dpn_caps(struct sdw_mstr_dpn_capabilities *dpn_cap, + struct sdw_stream_params *strm_prms) +{ + + int ret = 0, i, value; + + /* check for bit rate */ + if (dpn_cap->num_word_length) { + for (i = 0; i < dpn_cap->num_word_length; i++) { + + value = dpn_cap->word_length_buffer[i]; + if (strm_prms->bps == value) + break; + } + + if (i == dpn_cap->num_word_length) + return -EINVAL; + + } else { + + if ((strm_prms->bps < dpn_cap->min_word_length) + || (strm_prms->bps > dpn_cap->max_word_length)) { + return -EINVAL; + } + + + } + + /* check for number of channels */ + if (dpn_cap->num_ch_supported) { + for (i = 0; i < dpn_cap->num_ch_supported; i++) { + + value = dpn_cap->ch_supported[i]; + if (strm_prms->bps == value) + break; + } + + if (i == dpn_cap->num_ch_supported) + return -EINVAL; + + } else { + + if ((strm_prms->channel_count < dpn_cap->min_ch_num) + || (strm_prms->channel_count > dpn_cap->max_ch_num)) + return -EINVAL; + } + + return ret; +} + +static int sdw_mstr_port_configuration(struct sdw_master *mstr, + struct sdw_runtime *sdw_rt, + struct sdw_port_config *port_config) +{ + struct sdw_mstr_runtime *mstr_rt; + struct sdw_port_runtime *port_rt; + int found = 0; + int i; + int ret = 0, pn = 0; + struct sdw_mstr_dpn_capabilities *dpn_cap = + mstr->mstr_capabilities.sdw_dpn_cap; + + list_for_each_entry(mstr_rt, &sdw_rt->mstr_rt_list, mstr_sdw_node) { + if (mstr_rt->mstr == mstr) { + found = 1; + break; + } + } + if (!found) { + dev_err(&mstr->dev, "Master not found for this port\n"); + return -EINVAL; + } + + port_rt = kzalloc((sizeof(struct sdw_port_runtime)) * + port_config->num_ports, GFP_KERNEL); + if (!port_rt) + return -EINVAL; + + if (!dpn_cap) + return -EINVAL; + /* + * Note: Here the assumption the configuration is not + * received for 0th port. + */ + for (i = 0; i < port_config->num_ports; i++) { + port_rt[i].channel_mask = port_config->port_cfg[i].ch_mask; + port_rt[i].port_num = pn = port_config->port_cfg[i].port_num; + + /* Perform capability check for master port */ + ret = sdw_chk_mstr_dpn_caps(&dpn_cap[pn], + &mstr_rt->stream_params); + if (ret < 0) { + dev_err(&mstr->dev, + "Master capabilities check failed\n"); + return -EINVAL; + } + + list_add_tail(&port_rt[i].port_node, &mstr_rt->port_rt_list); + } + + return ret; +} + +static int sdw_slv_port_configuration(struct sdw_slv *slave, + struct sdw_runtime *sdw_rt, + struct sdw_port_config *port_config) +{ + struct sdw_slave_runtime *slv_rt; + struct sdw_port_runtime *port_rt; + struct sdw_slv_dpn_capabilities *dpn_cap = + slave->sdw_slv_cap.sdw_dpn_cap; + int found = 0, ret = 0; + int i, pn; + + list_for_each_entry(slv_rt, &sdw_rt->slv_rt_list, slave_sdw_node) { + if (slv_rt->slave == slave) { + found = 1; + break; + } + } + if (!found) { + dev_err(&slave->mstr->dev, "Slave not found for this port\n"); + return -EINVAL; + } + + if (!slave->slave_cap_updated) { + dev_err(&slave->mstr->dev, "Slave capabilities not updated\n"); + return -EINVAL; + } + + port_rt = kzalloc((sizeof(struct sdw_port_runtime)) * + port_config->num_ports, GFP_KERNEL); + if (!port_rt) + return -EINVAL; + + for (i = 0; i < port_config->num_ports; i++) { + port_rt[i].channel_mask = port_config->port_cfg[i].ch_mask; + port_rt[i].port_num = pn = port_config->port_cfg[i].port_num; + + /* Perform capability check for master port */ + ret = sdw_chk_slv_dpn_caps(&dpn_cap[pn], + &slv_rt->stream_params); + if (ret < 0) { + dev_err(&slave->mstr->dev, + "Slave capabilities check failed\n"); + return -EINVAL; + } + + list_add_tail(&port_rt[i].port_node, &slv_rt->port_rt_list); + } + + return ret; +} + +/** + * sdw_config_port: Port configuration for the SoundWire. Multiple + * soundWire ports may form single stream. Like two + * ports each transferring/receiving mono channels + * forms single stream with stereo channels. + * There will be single ASoC DAI representing + * the both ports. So stream configuration will be + * stereo, but both of the ports will be configured + * for mono channels, each with different channel + * mask. This is used to program port w.r.t to stream. + * params. So no need to de-configure, since these + * are automatically destroyed once stream gets + * destroyed. + * @mstr: Master handle where the slave is connected. + * @slave: Slave handle. + * @port_config: Port configuration for each port of soundwire slave. + * @stream_tag: Stream tag, where this port is connected. + * + */ +int sdw_config_port(struct sdw_master *mstr, + struct sdw_slv *slave, + struct sdw_port_config *port_config, + unsigned int stream_tag) +{ + int ret = 0; + int i; + struct sdw_stream_tag *stream_tags = sdw_core.stream_tags; + struct sdw_runtime *sdw_rt = NULL; + struct sdw_stream_tag *stream = NULL; + + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (stream_tags[i].stream_tag == stream_tag) { + sdw_rt = stream_tags[i].sdw_rt; + stream = &stream_tags[i]; + break; + } + } + + if (!sdw_rt) { + dev_err(&mstr->dev, "Invalid stream tag\n"); + return -EINVAL; + } + + if (static_key_false(&sdw_trace_msg)) { + int i; + + for (i = 0; i < port_config->num_ports; i++) { + trace_sdw_config_port(mstr, slave, + &port_config->port_cfg[i], stream_tag); + } + } + + mutex_lock(&stream->stream_lock); + + if (!slave) + ret = sdw_mstr_port_configuration(mstr, sdw_rt, port_config); + else + ret = sdw_slv_port_configuration(slave, sdw_rt, port_config); + + mutex_unlock(&stream->stream_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(sdw_config_port); + +int sdw_prepare_and_enable(int stream_tag, bool enable) +{ + + int i, ret = 0; + struct sdw_stream_tag *stream_tags = sdw_core.stream_tags; + struct sdw_stream_tag *stream = NULL; + + mutex_lock(&sdw_core.core_lock); + + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (stream_tag == stream_tags[i].stream_tag) { + stream = &stream_tags[i]; + break; + } + } + if (stream == NULL) { + mutex_unlock(&sdw_core.core_lock); + WARN_ON(1); /* Return from here after unlocking core*/ + return -EINVAL; + } + mutex_lock(&stream->stream_lock); + ret = sdw_bus_calc_bw(&stream_tags[i], enable); + if (ret) + pr_err("Bandwidth allocation failed\n"); + + mutex_unlock(&stream->stream_lock); + mutex_unlock(&sdw_core.core_lock); + return ret; +} +EXPORT_SYMBOL_GPL(sdw_prepare_and_enable); + +int sdw_disable_and_unprepare(int stream_tag, bool unprepare) +{ + int i, ret = 0; + struct sdw_stream_tag *stream_tags = sdw_core.stream_tags; + struct sdw_stream_tag *stream = NULL; + + mutex_lock(&sdw_core.core_lock); + + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (stream_tag == stream_tags[i].stream_tag) { + stream = &stream_tags[i]; + break; + } + } + if (stream == NULL) { + mutex_unlock(&sdw_core.core_lock); + WARN_ON(1); /* Return from here after unlocking core*/ + return -EINVAL; + } + mutex_lock(&stream->stream_lock); + ret = sdw_bus_calc_bw_dis(&stream_tags[i], unprepare); + if (ret) + pr_err("Bandwidth de-allocation failed\n"); + + mutex_unlock(&stream->stream_lock); + + mutex_unlock(&sdw_core.core_lock); + return ret; +} +EXPORT_SYMBOL_GPL(sdw_disable_and_unprepare); + +int sdw_stop_clock(struct sdw_master *mstr, enum sdw_clk_stop_mode mode) +{ + int ret = 0, i; + struct sdw_msg msg; + u8 buf[1] = {0}; + int slave_present = 0; + + for (i = 1; i <= SOUNDWIRE_MAX_DEVICES; i++) { + if (mstr->sdw_addr[i].assigned && + mstr->sdw_addr[i].status != + SDW_SLAVE_STAT_NOT_PRESENT) + slave_present = 1; + } + + /* Send Broadcast message to the SCP_ctrl register with + * clock stop now + */ + msg.ssp_tag = 1; + msg.flag = SDW_MSG_FLAG_WRITE; + msg.addr = SDW_SCP_CTRL; + msg.len = 1; + buf[0] |= 0x1 << SDW_SCP_CTRL_CLK_STP_NOW_SHIFT; + msg.buf = buf; + msg.slave_addr = 15; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + ret = sdw_slave_transfer_nopm(mstr, &msg, 1); + if (ret != 1 && slave_present) { + dev_err(&mstr->dev, "Failed to stop clk\n"); + return -EBUSY; + } + /* If we are entering clock stop mode1, mark all the slaves un-attached. + */ + if (mode == SDW_CLOCK_STOP_MODE_1) { + for (i = 1; i <= SOUNDWIRE_MAX_DEVICES; i++) { + if (mstr->sdw_addr[i].assigned) + mstr->sdw_addr[i].status = + SDW_SLAVE_STAT_NOT_PRESENT; + } + } + return 0; +} +EXPORT_SYMBOL_GPL(sdw_stop_clock); + +int sdw_wait_for_slave_enumeration(struct sdw_master *mstr, + struct sdw_slv *slave) +{ + int timeout = 0; + + /* Wait till device gets enumerated. Wait for 2Secs before + * giving up + */ + do { + msleep(100); + timeout++; + } while ((slave->slv_addr->status == SDW_SLAVE_STAT_NOT_PRESENT) && + timeout < 20); + + if (slave->slv_addr->status == SDW_SLAVE_STAT_NOT_PRESENT) + return -EBUSY; + return 0; +} +EXPORT_SYMBOL_GPL(sdw_wait_for_slave_enumeration); + +static enum sdw_clk_stop_mode sdw_get_clk_stp_mode(struct sdw_slv *slave) +{ + enum sdw_clk_stop_mode clock_stop_mode = SDW_CLOCK_STOP_MODE_0; + struct sdw_slv_capabilities *cap = &slave->sdw_slv_cap; + + if (!slave->driver) + return clock_stop_mode; + /* + * Get the dynamic value of clock stop from Slave driver + * if supported, else use the static value from + * capabilities register. Update the capabilities also + * if we have new dynamic value. + */ + if (slave->driver->get_dyn_clk_stp_mod) { + clock_stop_mode = slave->driver->get_dyn_clk_stp_mod(slave); + + if (clock_stop_mode == SDW_CLOCK_STOP_MODE_1) + cap->clock_stop1_mode_supported = true; + else + cap->clock_stop1_mode_supported = false; + } else + clock_stop_mode = cap->clock_stop1_mode_supported; + + return clock_stop_mode; +} + +/** + * sdw_master_stop_clock: Stop the clock. This function broadcasts the SCP_CTRL + * register with clock_stop_now bit set. + * + * @mstr: Master handle for which clock has to be stopped. + * + * Returns 0 on success, appropriate error code on failure. + */ +int sdw_master_stop_clock(struct sdw_master *mstr) +{ + int ret = 0, i; + struct sdw_msg msg; + u8 buf[1] = {0}; + enum sdw_clk_stop_mode mode; + + /* Send Broadcast message to the SCP_ctrl register with + * clock stop now. If none of the Slaves are attached, then there + * may not be ACK, flag the error about ACK not recevied but + * clock will be still stopped. + */ + msg.ssp_tag = 0; + msg.flag = SDW_MSG_FLAG_WRITE; + msg.len = 1; + msg.buf = &buf[0]; + msg.slave_addr = SDW_SLAVE_BDCAST_ADDR; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + msg.addr = SDW_SCP_CTRL; + buf[0] |= 0x1 << SDW_SCP_CTRL_CLK_STP_NOW_SHIFT; + ret = sdw_slave_transfer_nopm(mstr, &msg, 1); + + /* Even if broadcast fails, we stop the clock and flag error */ + if (ret != 1) + dev_err(&mstr->dev, "ClockStopNow Broadcast message failed\n"); + + /* + * Mark all Slaves as un-attached which are entering clock stop + * mode1 + */ + for (i = 1; i <= SOUNDWIRE_MAX_DEVICES; i++) { + + if (!mstr->sdw_addr[i].assigned) + continue; + + /* Get clock stop mode for all Slaves */ + mode = sdw_get_clk_stp_mode(mstr->sdw_addr[i].slave); + if (mode == SDW_CLOCK_STOP_MODE_0) + continue; + + /* If clock stop mode 1, mark Slave as not present */ + mstr->sdw_addr[i].status = SDW_SLAVE_STAT_NOT_PRESENT; + } + return 0; +} +EXPORT_SYMBOL_GPL(sdw_master_stop_clock); + +static struct sdw_slv *get_slave_for_prep_deprep(struct sdw_master *mstr, + int *slave_index) +{ + int i; + + for (i = *slave_index; i <= SOUNDWIRE_MAX_DEVICES; i++) { + if (mstr->sdw_addr[i].assigned != true) + continue; + + if (mstr->sdw_addr[i].status == SDW_SLAVE_STAT_NOT_PRESENT) + continue; + + *slave_index = i + 1; + return mstr->sdw_addr[i].slave; + } + return NULL; +} + +/* + * Wait till clock stop prepare/deprepare is finished. Prepare for all + * mode, De-prepare only for the Slaves resuming from clock stop mode 0 + */ +static void sdw_wait_for_clk_prep(struct sdw_master *mstr) +{ + int ret; + struct sdw_msg msg; + u8 buf[1] = {0}; + int timeout = 0; + + /* Create message to read clock stop status, its broadcast message. */ + msg.ssp_tag = 0; + msg.flag = SDW_MSG_FLAG_READ; + msg.len = 1; + msg.buf = &buf[0]; + msg.slave_addr = SDW_SLAVE_BDCAST_ADDR; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + msg.addr = SDW_SCP_STAT; + buf[0] = 0xFF; + /* + * Once all the Slaves are written with prepare bit, + * we go ahead and broadcast the read message for the + * SCP_STAT register to read the ClockStopNotFinished bit + * Read till we get this a 0. Currently we have timeout of 1sec + * before giving up. Even if its not read as 0 after timeout, + * controller can stop the clock after warning. + */ + do { + /* + * Ideally this should not fail, but even if it fails + * in exceptional situation, we go ahead for clock stop + */ + ret = sdw_slave_transfer_nopm(mstr, &msg, 1); + + if (ret != 1) { + WARN_ONCE(1, "Clock stop status read failed\n"); + break; + } + + if (!(buf[0] & SDW_SCP_STAT_CLK_STP_NF_MASK)) + break; + + /* + * TODO: Need to find from spec what is requirement. + * Since we are in suspend we should not sleep for more + * Ideally Slave should be ready to stop clock in less than + * few ms. + * So sleep less and increase loop time. This is not + * harmful, since if Slave is ready loop will terminate. + * + */ + msleep(2); + timeout++; + + } while (timeout != 500); + + if (!(buf[0] & SDW_SCP_STAT_CLK_STP_NF_MASK)) + + dev_info(&mstr->dev, "Clock stop prepare done\n"); + else + WARN_ONCE(1, "Some Slaves prepare un-successful\n"); +} + +/** + * sdw_master_prep_for_clk_stop: Prepare all the Slaves for clock stop. + * Iterate through each of the enumerated Slave. + * Prepare each Slave according to the clock stop + * mode supported by Slave. Use dynamic value from + * Slave callback if registered, else use static values + * from Slave capabilities registered. + * 1. Get clock stop mode for each Slave. + * 2. Call pre_prepare callback of each Slave if + * registered. + * 3. Prepare each Slave for clock stop + * 4. Broadcast the Read message to make sure + * all Slaves are prepared for clock stop. + * 5. Call post_prepare callback of each Slave if + * registered. + * + * @mstr: Master handle for which clock state has to be changed. + * + * Returns 0 + */ +int sdw_master_prep_for_clk_stop(struct sdw_master *mstr) +{ + struct sdw_slv_capabilities *cap; + enum sdw_clk_stop_mode clock_stop_mode; + int ret = 0; + struct sdw_slv *slave = NULL; + int slv_index = 1; + + /* + * Get all the Slaves registered to the master driver for preparing + * for clock stop. Start from Slave with logical address as 1. + */ + while ((slave = get_slave_for_prep_deprep(mstr, &slv_index)) != NULL) { + + cap = &slave->sdw_slv_cap; + + clock_stop_mode = sdw_get_clk_stp_mode(slave); + + /* + * Call the pre clock stop prepare, if Slave requires. + */ + if (slave->driver && slave->driver->pre_clk_stop_prep) { + ret = slave->driver->pre_clk_stop_prep(slave, + clock_stop_mode, true); + + /* If it fails we still continue */ + if (ret) + dev_warn(&mstr->dev, "Pre prepare failed for Slave %d\n", + slave->slv_number); + } + + sdw_prep_slave_for_clk_stp(mstr, slave, clock_stop_mode, true); + } + + /* Wait till prepare for all Slaves is finished */ + /* + * We should continue even if the prepare fails. Clock stop + * prepare failure on Slaves, should not impact the broadcasting + * of ClockStopNow. + */ + sdw_wait_for_clk_prep(mstr); + + slv_index = 1; + while ((slave = get_slave_for_prep_deprep(mstr, &slv_index)) != NULL) { + + cap = &slave->sdw_slv_cap; + + clock_stop_mode = sdw_get_clk_stp_mode(slave); + + if (slave->driver && slave->driver->post_clk_stop_prep) { + ret = slave->driver->post_clk_stop_prep(slave, + clock_stop_mode, + true); + /* + * Even if Slave fails we continue with other + * Slaves. This should never happen ideally. + */ + if (ret) + dev_err(&mstr->dev, "Post prepare failed for Slave %d\n", + slave->slv_number); + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(sdw_master_prep_for_clk_stop); + +/** + * sdw_mstr_deprep_after_clk_start: De-prepare all the Slaves + * exiting clock stop mode 0 after clock resumes. Clock + * is already resumed before this. De-prepare all the Slaves + * which were earlier in ClockStop mode0. De-prepare for the + * Slaves which were there in ClockStop mode1 is done after + * they enumerated back. Its not done here as part of master + * getting resumed. + * 1. Get clock stop mode for each Slave its exiting from + * 2. Call pre_prepare callback of each Slave exiting from + * clock stop mode 0. + * 3. De-Prepare each Slave exiting from Clock Stop mode0 + * 4. Broadcast the Read message to make sure + * all Slaves are de-prepared for clock stop. + * 5. Call post_prepare callback of each Slave exiting from + * clock stop mode0 + * + * + * @mstr: Master handle + * + * Returns 0 + */ +int sdw_mstr_deprep_after_clk_start(struct sdw_master *mstr) +{ + struct sdw_slv_capabilities *cap; + enum sdw_clk_stop_mode clock_stop_mode; + int ret = 0; + struct sdw_slv *slave = NULL; + /* We are preparing for stop */ + bool stop = false; + int slv_index = 1; + + while ((slave = get_slave_for_prep_deprep(mstr, &slv_index)) != NULL) { + + cap = &slave->sdw_slv_cap; + + /* Get the clock stop mode from which Slave is exiting */ + clock_stop_mode = sdw_get_clk_stp_mode(slave); + + /* + * Slave is exiting from Clock stop mode 1, De-prepare + * is optional based on capability, and it has to be done + * after Slave is enumerated. So nothing to be done + * here. + */ + if (clock_stop_mode == SDW_CLOCK_STOP_MODE_1) + continue; + /* + * Call the pre clock stop prepare, if Slave requires. + */ + if (slave->driver && slave->driver->pre_clk_stop_prep) + ret = slave->driver->pre_clk_stop_prep(slave, + clock_stop_mode, false); + + /* If it fails we still continue */ + if (ret) + dev_warn(&mstr->dev, "Pre de-prepare failed for Slave %d\n", + slave->slv_number); + + sdw_prep_slave_for_clk_stp(mstr, slave, clock_stop_mode, false); + } + + /* + * Wait till prepare is finished for all the Slaves. + */ + sdw_wait_for_clk_prep(mstr); + + slv_index = 1; + while ((slave = get_slave_for_prep_deprep(mstr, &slv_index)) != NULL) { + + cap = &slave->sdw_slv_cap; + + clock_stop_mode = sdw_get_clk_stp_mode(slave); + + /* + * Slave is exiting from Clock stop mode 1, De-prepare + * is optional based on capability, and it has to be done + * after Slave is enumerated. + */ + if (clock_stop_mode == SDW_CLOCK_STOP_MODE_1) + continue; + + if (slave->driver && slave->driver->post_clk_stop_prep) { + ret = slave->driver->post_clk_stop_prep(slave, + clock_stop_mode, + stop); + /* + * Even if Slave fails we continue with other + * Slaves. This should never happen ideally. + */ + if (ret) + dev_err(&mstr->dev, "Post de-prepare failed for Slave %d\n", + slave->slv_number); + } + } + return 0; +} +EXPORT_SYMBOL_GPL(sdw_mstr_deprep_after_clk_start); + + +struct sdw_master *sdw_get_master(int nr) +{ + struct sdw_master *master; + + mutex_lock(&sdw_core.core_lock); + master = idr_find(&sdw_core.idr, nr); + if (master && !try_module_get(master->owner)) + master = NULL; + mutex_unlock(&sdw_core.core_lock); + + return master; +} +EXPORT_SYMBOL_GPL(sdw_get_master); + +void sdw_put_master(struct sdw_master *mstr) +{ + if (mstr) + module_put(mstr->owner); +} +EXPORT_SYMBOL_GPL(sdw_put_master); + +static void sdw_exit(void) +{ + device_unregister(&sdw_slv); + bus_unregister(&sdwint_bus_type); +} + +static int sdw_init(void) +{ + int retval; + int i; + + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) + sdw_core.stream_tags[i].stream_tag = i; + mutex_init(&sdw_core.core_lock); + INIT_LIST_HEAD(&sdw_core.bus_list); + idr_init(&sdw_core.idr); + retval = bus_register(&sdwint_bus_type); + + if (!retval) + retval = device_register(&sdw_slv); + + + if (retval) + bus_unregister(&sdwint_bus_type); + + retval = sdw_bus_bw_init(); + if (retval) { + device_unregister(&sdw_slv); + bus_unregister(&sdwint_bus_type); + } + + return retval; +} +postcore_initcall(sdw_init); +module_exit(sdw_exit); + +MODULE_AUTHOR("Hardik Shah "); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION("0.1"); +MODULE_DESCRIPTION("SoundWire bus driver"); +MODULE_ALIAS("platform:soundwire"); diff --git a/drivers/sdw/sdw_bwcalc.c b/drivers/sdw/sdw_bwcalc.c new file mode 100644 index 0000000000000..7ebb26756f596 --- /dev/null +++ b/drivers/sdw/sdw_bwcalc.c @@ -0,0 +1,3097 @@ +/* + * sdw_bwcalc.c - SoundWire Bus BW calculation & CHN Enabling implementation + * + * Copyright (C) 2015-2016 Intel Corp + * Author: Sanyog Kale + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ +#include +#include +#include +#include "sdw_priv.h" +#include +#include + + + +#ifndef CONFIG_SND_SOC_SVFPGA /* Original */ +#if IS_ENABLED(CONFIG_SND_SOC_INTEL_CNL_FPGA) +int rows[MAX_NUM_ROWS] = {48, 50, 60, 64, 72, 75, 80, 90, + 96, 125, 144, 147, 100, 120, 128, 150, + 160, 180, 192, 200, 240, 250, 256}; +#define SDW_DEFAULT_SSP 50 +#else +int rows[MAX_NUM_ROWS] = {125, 64, 48, 50, 60, 72, 75, 80, 90, + 96, 144, 147, 100, 120, 128, 150, + 160, 180, 192, 200, 240, 250, 256}; +#define SDW_DEFAULT_SSP 24 +#endif /* IS_ENABLED(CONFIG_SND_SOC_INTEL_CNL_FPGA) */ + +int cols[MAX_NUM_COLS] = {2, 4, 6, 8, 10, 12, 14, 16}; + +#else +/* For PDM Capture, frameshape used is 50x10 */ +int rows[MAX_NUM_ROWS] = {50, 100, 48, 60, 64, 72, 75, 80, 90, + 96, 125, 144, 147, 120, 128, 150, + 160, 180, 192, 200, 240, 250, 256}; + +int cols[MAX_NUM_COLS] = {10, 2, 4, 6, 8, 12, 14, 16}; +#define SDW_DEFAULT_SSP 50 +#endif + +/* + * TBD: Get supported clock frequency from ACPI and store + * it in master data structure. + */ +#define MAXCLOCKDIVS 1 +int clock_div[MAXCLOCKDIVS] = {1}; + +struct sdw_num_to_col sdw_num_col_mapping[MAX_NUM_COLS] = { + {0, 2}, {1, 4}, {2, 6}, {3, 8}, {4, 10}, {5, 12}, {6, 14}, {7, 16}, +}; + +struct sdw_num_to_row sdw_num_row_mapping[MAX_NUM_ROWS] = { + {0, 48}, {1, 50}, {2, 60}, {3, 64}, {4, 75}, {5, 80}, {6, 125}, + {7, 147}, {8, 96}, {9, 100}, {10, 120}, {11, 128}, {12, 150}, + {13, 160}, {14, 250}, {16, 192}, {17, 200}, {18, 240}, {19, 256}, + {20, 72}, {21, 144}, {22, 90}, {23, 180}, +}; + +/** + * sdw_bus_bw_init - returns Success + * + * + * This function is called from sdw_init function when bus driver + * gets intitalized. This function performs all the generic + * intializations required for BW control. + */ +int sdw_bus_bw_init(void) +{ + int r, c, rowcolcount = 0; + int control_bits = 48; + + for (c = 0; c < MAX_NUM_COLS; c++) { + + for (r = 0; r < MAX_NUM_ROWS; r++) { + sdw_core.rowcolcomb[rowcolcount].col = cols[c]; + sdw_core.rowcolcomb[rowcolcount].row = rows[r]; + sdw_core.rowcolcomb[rowcolcount].control_bits = + control_bits; + sdw_core.rowcolcomb[rowcolcount].data_bits = + (cols[c] * rows[r]) - control_bits; + rowcolcount++; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(sdw_bus_bw_init); + + +/** + * sdw_mstr_bw_init - returns Success + * + * + * This function is called from sdw_register_master function + * for each master controller gets register. This function performs + * all the intializations per master controller required for BW control. + */ +int sdw_mstr_bw_init(struct sdw_bus *sdw_bs) +{ + struct sdw_master_capabilities *sdw_mstr_cap = NULL; + + /* Initialize required parameters in bus structure */ + sdw_bs->bandwidth = 0; + sdw_bs->system_interval = 0; + sdw_bs->frame_freq = 0; + sdw_bs->clk_state = SDW_CLK_STATE_ON; + sdw_mstr_cap = &sdw_bs->mstr->mstr_capabilities; + sdw_bs->clk_freq = (sdw_mstr_cap->base_clk_freq * 2); + + return 0; +} +EXPORT_SYMBOL_GPL(sdw_mstr_bw_init); + + +/** + * sdw_get_col_to_num + * + * Returns column number from the mapping. + */ +int sdw_get_col_to_num(int col) +{ + int i; + + for (i = 0; i < MAX_NUM_COLS; i++) { + if (sdw_num_col_mapping[i].col == col) + return sdw_num_col_mapping[i].num; + } + + return 0; /* Lowest Column number = 2 */ +} + + +/** + * sdw_get_row_to_num + * + * Returns row number from the mapping. + */ +int sdw_get_row_to_num(int row) +{ + int i; + + for (i = 0; i < MAX_NUM_ROWS; i++) { + if (sdw_num_row_mapping[i].row == row) + return sdw_num_row_mapping[i].num; + } + + return 0; /* Lowest Row number = 48 */ +} + +/* + * sdw_lcm - returns LCM of two numbers + * + * + * This function is called BW calculation function to find LCM + * of two numbers. + */ +int sdw_lcm(int num1, int num2) +{ + int max; + + /* maximum value is stored in variable max */ + max = (num1 > num2) ? num1 : num2; + + while (1) { + if (max%num1 == 0 && max%num2 == 0) + break; + ++max; + } + + return max; +} + + +/* + * sdw_cfg_slv_params - returns Success + * -EINVAL - In case of error. + * + * + * This function configures slave registers for + * transport and port parameters. + */ +int sdw_cfg_slv_params(struct sdw_bus *mstr_bs, + struct sdw_transport_params *t_slv_params, + struct sdw_port_params *p_slv_params, int slv_number) +{ + struct sdw_msg wr_msg, wr_msg1, rd_msg; + int ret = 0; + int banktouse; + u8 wbuf[8] = {0, 0, 0, 0, 0, 0, 0, 0}; + u8 wbuf1[2] = {0, 0}; + u8 rbuf[1] = {0}; + + +#ifdef CONFIG_SND_SOC_SVFPGA + /* + * The below hardcoding is required + * for running PDM capture with SV conora card + * because the transport params of card is not + * same as master parameters. Also not all + * standard registers are valid. + */ + t_slv_params->blockgroupcontrol_valid = false; + t_slv_params->sample_interval = 50; + t_slv_params->offset1 = 0; + t_slv_params->offset2 = 0; + t_slv_params->hstart = 1; + t_slv_params->hstop = 6; + p_slv_params->word_length = 30; +#endif + + /* Program slave alternate bank with all transport parameters */ + /* DPN_BlockCtrl2 */ + wbuf[0] = t_slv_params->blockgroupcontrol; + /* DPN_SampleCtrl1 */ + wbuf[1] = (t_slv_params->sample_interval - 1) & + SDW_DPN_SAMPLECTRL1_LOW_MASK; + wbuf[2] = ((t_slv_params->sample_interval - 1) >> 8) & + SDW_DPN_SAMPLECTRL1_LOW_MASK; /* DPN_SampleCtrl2 */ + wbuf[3] = t_slv_params->offset1; /* DPN_OffsetCtrl1 */ + wbuf[4] = t_slv_params->offset2; /* DPN_OffsetCtrl2 */ + /* DPN_HCtrl */ + wbuf[5] = (t_slv_params->hstop | (t_slv_params->hstart << 4)); + wbuf[6] = t_slv_params->blockpackingmode; /* DPN_BlockCtrl3 */ + wbuf[7] = t_slv_params->lanecontrol; /* DPN_LaneCtrl */ + + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + banktouse = !banktouse; + /* Program slave alternate bank with all port parameters */ + rd_msg.addr = SDW_DPN_PORTCTRL + + (SDW_NUM_DATA_PORT_REGISTERS * t_slv_params->num); + rd_msg.ssp_tag = 0x0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.slave_addr = slv_number; + + rd_msg.buf = rbuf; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, "Register transfer failed\n"); + goto out; + } + + wbuf1[0] = (p_slv_params->port_flow_mode | + (p_slv_params->port_data_mode << + SDW_DPN_PORTCTRL_PORTDATAMODE_SHIFT) | + (rbuf[0])); + + wbuf1[1] = (p_slv_params->word_length - 1); + + /* Check whether address computed is correct for both cases */ + wr_msg.addr = ((SDW_DPN_BLOCKCTRL2 + + (1 * (!t_slv_params->blockgroupcontrol_valid)) + + (SDW_BANK1_REGISTER_OFFSET * banktouse)) + + (SDW_NUM_DATA_PORT_REGISTERS * t_slv_params->num)); + + wr_msg1.addr = SDW_DPN_PORTCTRL + + (SDW_NUM_DATA_PORT_REGISTERS * t_slv_params->num); + + wr_msg.ssp_tag = 0x0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; +#ifdef CONFIG_SND_SOC_SVFPGA + wr_msg.len = (5 + (1 * (t_slv_params->blockgroupcontrol_valid))); +#else + wr_msg.len = (7 + (1 * (t_slv_params->blockgroupcontrol_valid))); +#endif + + wr_msg.slave_addr = slv_number; + wr_msg.buf = &wbuf[0 + (1 * (!t_slv_params->blockgroupcontrol_valid))]; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + wr_msg1.ssp_tag = 0x0; + wr_msg1.flag = SDW_MSG_FLAG_WRITE; + wr_msg1.len = 2; + + wr_msg1.slave_addr = slv_number; + wr_msg1.buf = &wbuf1[0]; + wr_msg1.addr_page1 = 0x0; + wr_msg1.addr_page2 = 0x0; + + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, "Register transfer failed\n"); + goto out; + } + + + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg1, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, "Register transfer failed\n"); + goto out; + } +out: + + return ret; +} + + +/* + * sdw_cfg_mstr_params - returns Success + * -EINVAL - In case of error. + * + * + * This function configures master registers for + * transport and port parameters. + */ +int sdw_cfg_mstr_params(struct sdw_bus *mstr_bs, + struct sdw_transport_params *t_mstr_params, + struct sdw_port_params *p_mstr_params) +{ + struct sdw_mstr_driver *ops = mstr_bs->mstr->driver; + int banktouse, ret = 0; + + /* 1. Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + banktouse = !banktouse; + + /* 2. Set Master Xport Params */ + if (ops->mstr_port_ops->dpn_set_port_transport_params) { + ret = ops->mstr_port_ops->dpn_set_port_transport_params + (mstr_bs->mstr, t_mstr_params, banktouse); + if (ret < 0) + return ret; + } + + /* 3. Set Master Port Params */ + if (ops->mstr_port_ops->dpn_set_port_params) { + ret = ops->mstr_port_ops->dpn_set_port_params + (mstr_bs->mstr, p_mstr_params, banktouse); + if (ret < 0) + return ret; + } + + return 0; +} + +/* + * sdw_cfg_params_mstr_slv - returns Success + * + * This function copies/configure master/slave transport & + * port params. + * + */ +int sdw_cfg_params_mstr_slv(struct sdw_bus *sdw_mstr_bs, + struct sdw_mstr_runtime *sdw_mstr_bs_rt, + bool state_check) +{ + struct sdw_slave_runtime *slv_rt = NULL; + struct sdw_port_runtime *port_rt, *port_slv_rt; + struct sdw_transport_params *t_params, *t_slv_params; + struct sdw_port_params *p_params, *p_slv_params; + int ret = 0; + + list_for_each_entry(slv_rt, + &sdw_mstr_bs_rt->slv_rt_list, slave_node) { + + if (slv_rt->slave == NULL) + break; + + /* configure transport params based on state */ + if ((state_check) && + (slv_rt->rt_state == SDW_STATE_UNPREPARE_RT)) + continue; + + list_for_each_entry(port_slv_rt, + &slv_rt->port_rt_list, port_node) { + + /* Fill in port params here */ + port_slv_rt->port_params.num = port_slv_rt->port_num; + port_slv_rt->port_params.word_length = + slv_rt->stream_params.bps; + /* Normal/Isochronous Mode */ + port_slv_rt->port_params.port_flow_mode = 0x0; + /* Normal Mode */ + port_slv_rt->port_params.port_data_mode = 0x0; + t_slv_params = &port_slv_rt->transport_params; + p_slv_params = &port_slv_rt->port_params; + + /* Configure xport & port params for slave */ + ret = sdw_cfg_slv_params(sdw_mstr_bs, t_slv_params, + p_slv_params, slv_rt->slave->slv_number); + if (ret < 0) + return ret; + + } + } + + if ((state_check) && + (sdw_mstr_bs_rt->rt_state == SDW_STATE_UNPREPARE_RT)) + return 0; + + list_for_each_entry(port_rt, + &sdw_mstr_bs_rt->port_rt_list, port_node) { + + /* Transport and port parameters */ + t_params = &port_rt->transport_params; + p_params = &port_rt->port_params; + + + p_params->num = port_rt->port_num; + p_params->word_length = sdw_mstr_bs_rt->stream_params.bps; + p_params->port_flow_mode = 0x0; /* Normal/Isochronous Mode */ + p_params->port_data_mode = 0x0; /* Normal Mode */ + + /* Configure xport params and port params for master */ + ret = sdw_cfg_mstr_params(sdw_mstr_bs, t_params, p_params); + if (ret < 0) + return ret; + + } + + return 0; +} + + +/* + * sdw_cfg_slv_enable_disable - returns Success + * -EINVAL - In case of error. + * + * + * This function enable/disable slave port channels. + */ +int sdw_cfg_slv_enable_disable(struct sdw_bus *mstr_bs, + struct sdw_slave_runtime *slv_rt_strm, + struct sdw_port_runtime *port_slv_strm, + struct port_chn_en_state *chn_en) +{ + struct sdw_msg wr_msg, rd_msg; + int ret = 0; + int banktouse; + u8 wbuf[1] = {0}; + u8 rbuf[1] = {0}; + + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + if ((chn_en->is_activate) || (chn_en->is_bank_sw)) + banktouse = !banktouse; + + rd_msg.addr = wr_msg.addr = ((SDW_DPN_CHANNELEN + + (SDW_BANK1_REGISTER_OFFSET * banktouse)) + + (SDW_NUM_DATA_PORT_REGISTERS * + port_slv_strm->port_num)); + + rd_msg.ssp_tag = 0x0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.slave_addr = slv_rt_strm->slave->slv_number; + rd_msg.buf = rbuf; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + + wr_msg.ssp_tag = 0x0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.len = 1; + wr_msg.slave_addr = slv_rt_strm->slave->slv_number; + wr_msg.buf = wbuf; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + + if (chn_en->is_activate) { + + /* + * 1. slave port enable_ch_pre + * --> callback + * --> no callback available + */ + + /* 2. slave port enable */ + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + wbuf[0] = (rbuf[0] | port_slv_strm->channel_mask); + + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + rbuf[0] = 0; + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + /* + * 3. slave port enable post pre + * --> callback + * --> no callback available + */ + slv_rt_strm->rt_state = SDW_STATE_ENABLE_RT; + + } else { + + /* + * 1. slave port enable_ch_unpre + * --> callback + * --> no callback available + */ + + /* 2. slave port disable */ + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + wbuf[0] = (rbuf[0] & ~(port_slv_strm->channel_mask)); + + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + rbuf[0] = 0; + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + /* + * 3. slave port enable post unpre + * --> callback + * --> no callback available + */ + if (!chn_en->is_bank_sw) + slv_rt_strm->rt_state = SDW_STATE_DISABLE_RT; + + } +out: + return ret; + +} + + +/* + * sdw_cfg_mstr_activate_disable - returns Success + * -EINVAL - In case of error. + * + * + * This function enable/disable master port channels. + */ +int sdw_cfg_mstr_activate_disable(struct sdw_bus *mstr_bs, + struct sdw_mstr_runtime *mstr_rt_strm, + struct sdw_port_runtime *port_mstr_strm, + struct port_chn_en_state *chn_en) +{ + struct sdw_mstr_driver *ops = mstr_bs->mstr->driver; + struct sdw_activate_ch activate_ch; + int banktouse, ret = 0; + + activate_ch.num = port_mstr_strm->port_num; + activate_ch.ch_mask = port_mstr_strm->channel_mask; + activate_ch.activate = chn_en->is_activate; /* Enable/Disable */ + + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + if ((chn_en->is_activate) || (chn_en->is_bank_sw)) + banktouse = !banktouse; + + /* 2. Master port enable */ + if (ops->mstr_port_ops->dpn_port_activate_ch) { + ret = ops->mstr_port_ops->dpn_port_activate_ch(mstr_bs->mstr, + &activate_ch, banktouse); + if (ret < 0) + return ret; + } + + if (chn_en->is_activate) + mstr_rt_strm->rt_state = SDW_STATE_ENABLE_RT; + else if (!chn_en->is_bank_sw) + mstr_rt_strm->rt_state = SDW_STATE_DISABLE_RT; + + return 0; +} + + +/* + * sdw_en_dis_mstr_slv - returns Success + * -EINVAL - In case of error. + * + * + * This function call master/slave enable/disable + * channel API's. + */ +int sdw_en_dis_mstr_slv(struct sdw_bus *sdw_mstr_bs, + struct sdw_runtime *sdw_rt, bool is_act) +{ + struct sdw_slave_runtime *slv_rt_strm = NULL; + struct sdw_port_runtime *port_slv_strm, *port_mstr_strm; + struct sdw_mstr_runtime *mstr_rt_strm = NULL; + struct port_chn_en_state chn_en; + int ret = 0; + + if (is_act) + chn_en.is_bank_sw = true; + else + chn_en.is_bank_sw = false; + + chn_en.is_activate = is_act; + + list_for_each_entry(slv_rt_strm, &sdw_rt->slv_rt_list, slave_sdw_node) { + + if (slv_rt_strm->slave == NULL) + break; + + list_for_each_entry(port_slv_strm, + &slv_rt_strm->port_rt_list, port_node) { + + ret = sdw_cfg_slv_enable_disable + (sdw_mstr_bs, slv_rt_strm, + port_slv_strm, &chn_en); + if (ret < 0) + return ret; + + } + + break; + + } + + list_for_each_entry(mstr_rt_strm, + &sdw_rt->mstr_rt_list, mstr_sdw_node) { + + if (mstr_rt_strm->mstr == NULL) + break; + + list_for_each_entry(port_mstr_strm, + &mstr_rt_strm->port_rt_list, port_node) { + + ret = sdw_cfg_mstr_activate_disable + (sdw_mstr_bs, mstr_rt_strm, + port_mstr_strm, &chn_en); + if (ret < 0) + return ret; + + } + + } + + return 0; +} + + +/* + * sdw_en_dis_mstr_slv_state - returns Success + * -EINVAL - In case of error. + * + * + * This function call master/slave enable/disable + * channel API's based on runtime state. + */ +int sdw_en_dis_mstr_slv_state(struct sdw_bus *sdw_mstr_bs, + struct sdw_mstr_runtime *sdw_mstr_bs_rt, + struct port_chn_en_state *chn_en) +{ + struct sdw_slave_runtime *slv_rt = NULL; + struct sdw_port_runtime *port_slv_rt, *port_rt; + int ret = 0; + + list_for_each_entry(slv_rt, &sdw_mstr_bs_rt->slv_rt_list, slave_node) { + + if (slv_rt->slave == NULL) + break; + + if (slv_rt->rt_state == SDW_STATE_ENABLE_RT) { + + list_for_each_entry(port_slv_rt, + &slv_rt->port_rt_list, port_node) { + + ret = sdw_cfg_slv_enable_disable + (sdw_mstr_bs, slv_rt, + port_slv_rt, chn_en); + if (ret < 0) + return ret; + + } + } + } + + if (sdw_mstr_bs_rt->rt_state == SDW_STATE_ENABLE_RT) { + + list_for_each_entry(port_rt, + &sdw_mstr_bs_rt->port_rt_list, port_node) { + + ret = sdw_cfg_mstr_activate_disable + (sdw_mstr_bs, sdw_mstr_bs_rt, port_rt, chn_en); + if (ret < 0) + return ret; + + } + } + + return 0; +} + + +/* + * sdw_get_clock_frmshp - returns Success + * -EINVAL - In case of error. + * + * + * This function computes clock and frame shape based on + * clock frequency. + */ +int sdw_get_clock_frmshp(struct sdw_bus *sdw_mstr_bs, int *frame_int, + struct sdw_mstr_runtime *sdw_mstr_rt) +{ + struct sdw_master_capabilities *sdw_mstr_cap = NULL; + struct sdw_slv_dpn_capabilities *sdw_slv_dpn_cap = NULL; + struct port_audio_mode_properties *mode_prop = NULL; + struct sdw_slave_runtime *slv_rt = NULL; + struct sdw_port_runtime *port_slv_rt = NULL; + int i, j, rc; + int clock_reqd = 0, frame_interval = 0, frame_frequency = 0; + int sel_row = 0, sel_col = 0, pn = 0; + int value; + bool clock_ok = false; + + sdw_mstr_cap = &sdw_mstr_bs->mstr->mstr_capabilities; + + /* + * Find nearest clock frequency needed by master for + * given bandwidth + */ + for (i = 0; i < MAXCLOCKDIVS; i++) { + + /* TBD: Check why 3000 */ + if ((((sdw_mstr_cap->base_clk_freq * 2) / clock_div[i]) <= + sdw_mstr_bs->bandwidth) || + ((((sdw_mstr_cap->base_clk_freq * 2) / clock_div[i]) + % 3000) != 0)) + continue; + + clock_reqd = ((sdw_mstr_cap->base_clk_freq * 2) / clock_div[i]); + + /* + * Check all the slave device capabilities + * here and find whether given frequency is + * supported by all slaves + */ + list_for_each_entry(slv_rt, &sdw_mstr_rt->slv_rt_list, + slave_node) { + + /* check for valid slave */ + if (slv_rt->slave == NULL) + break; + + /* check clock req for each port */ + list_for_each_entry(port_slv_rt, + &slv_rt->port_rt_list, port_node) { + + pn = port_slv_rt->port_num; + + + sdw_slv_dpn_cap = + &slv_rt->slave->sdw_slv_cap.sdw_dpn_cap[pn]; + mode_prop = sdw_slv_dpn_cap->mode_properties; + + /* + * TBD: Indentation to be fixed, + * code refactoring to be considered. + */ + if (mode_prop->num_freq_configs) { + for (j = 0; j < + mode_prop->num_freq_configs; j++) { + value = + mode_prop->freq_supported[j]; + if (clock_reqd == value) { + clock_ok = true; + break; + } + if (j == + mode_prop->num_freq_configs) { + clock_ok = false; + break; + } + + } + + } else { + if ((clock_reqd < + mode_prop->min_frequency) || + (clock_reqd > + mode_prop->max_frequency)) { + clock_ok = false; + } else + clock_ok = true; + } + + /* Go for next clock frequency */ + if (!clock_ok) + break; + } + + /* + * Dont check next slave, go for next clock + * frequency + */ + if (!clock_ok) + break; + } + + /* check for next clock divider */ + if (!clock_ok) + continue; + + /* Find frame shape based on bandwidth per controller */ + for (rc = 0; rc < MAX_NUM_ROW_COLS; rc++) { + frame_interval = + sdw_core.rowcolcomb[rc].row * + sdw_core.rowcolcomb[rc].col; + frame_frequency = clock_reqd/frame_interval; + + if ((clock_reqd - + (frame_frequency * + sdw_core.rowcolcomb[rc]. + control_bits)) < + sdw_mstr_bs->bandwidth) + continue; + + break; + } + + /* Valid frameshape not found, check for next clock freq */ + if (rc == MAX_NUM_ROW_COLS) + continue; + + sel_row = sdw_core.rowcolcomb[rc].row; + sel_col = sdw_core.rowcolcomb[rc].col; + sdw_mstr_bs->frame_freq = frame_frequency; + sdw_mstr_bs->clk_freq = clock_reqd; + sdw_mstr_bs->clk_div = clock_div[i]; + clock_ok = false; + *frame_int = frame_interval; + sdw_mstr_bs->col = sel_col; + sdw_mstr_bs->row = sel_row; + + return 0; + } + + /* None of clock frequency matches, return error */ + if (i == MAXCLOCKDIVS) + return -EINVAL; + + return 0; +} + +/* + * sdw_compute_sys_interval - returns Success + * -EINVAL - In case of error. + * + * + * This function computes system interval. + */ +int sdw_compute_sys_interval(struct sdw_bus *sdw_mstr_bs, + struct sdw_master_capabilities *sdw_mstr_cap, + int frame_interval) +{ + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + struct sdw_mstr_runtime *sdw_mstr_rt = NULL; + struct sdw_slave_runtime *slv_rt = NULL; + struct sdw_transport_params *t_params = NULL, *t_slv_params = NULL; + struct sdw_port_runtime *port_rt, *port_slv_rt; + int lcmnum1 = 0, lcmnum2 = 0, div = 0, lcm = 0; + int sample_interval; + + /* + * once you got bandwidth frame shape for bus, + * run a loop for all the active streams running + * on bus and compute stream interval & sample_interval. + */ + list_for_each_entry(sdw_mstr_rt, + &sdw_mstr->mstr_rt_list, mstr_node) { + + if (sdw_mstr_rt->mstr == NULL) + break; + + /* + * Calculate sample interval for stream + * running on given master. + */ + if (sdw_mstr_rt->stream_params.rate) + sample_interval = (sdw_mstr_bs->clk_freq/ + sdw_mstr_rt->stream_params.rate); + else + return -EINVAL; + + /* Run port loop to assign sample interval per port */ + list_for_each_entry(port_rt, + &sdw_mstr_rt->port_rt_list, port_node) { + + t_params = &port_rt->transport_params; + + /* + * Assign sample interval each port transport + * properties. Assumption is that sample interval + * per port for given master will be same. + */ + t_params->sample_interval = sample_interval; + } + + /* Calculate LCM */ + lcmnum2 = sample_interval; + if (!lcmnum1) + lcmnum1 = sdw_lcm(lcmnum2, lcmnum2); + else + lcmnum1 = sdw_lcm(lcmnum1, lcmnum2); + + /* Run loop for slave per master runtime */ + list_for_each_entry(slv_rt, + &sdw_mstr_rt->slv_rt_list, slave_node) { + + if (slv_rt->slave == NULL) + break; + + /* Assign sample interval for each port of slave */ + list_for_each_entry(port_slv_rt, + &slv_rt->port_rt_list, port_node) { + + t_slv_params = &port_slv_rt->transport_params; + + /* Assign sample interval each port */ + t_slv_params->sample_interval = sample_interval; + } + } + } + + /* + * If system interval already calculated + * In pause/resume, underrun scenario + */ + if (sdw_mstr_bs->system_interval) + return 0; + + /* Assign frame stream interval */ + sdw_mstr_bs->stream_interval = lcmnum1; + + /* 6. compute system_interval */ + if ((sdw_mstr_cap) && (sdw_mstr_bs->clk_freq)) { + + div = ((sdw_mstr_cap->base_clk_freq * 2) / + sdw_mstr_bs->clk_freq); + + if ((lcmnum1) && (frame_interval)) + lcm = sdw_lcm(lcmnum1, frame_interval); + else + return -EINVAL; + + sdw_mstr_bs->system_interval = (div * lcm); + + } + + /* + * Something went wrong, may be sdw_lcm value may be 0, + * return error accordingly + */ + if (!sdw_mstr_bs->system_interval) + return -EINVAL; + + + return 0; +} + +/** + * sdw_chk_first_node - returns True or false + * + * This function returns true in case of first node + * else returns false. + */ +bool sdw_chk_first_node(struct sdw_mstr_runtime *sdw_mstr_rt, + struct sdw_master *sdw_mstr) +{ + struct sdw_mstr_runtime *first_rt = NULL; + + first_rt = list_first_entry(&sdw_mstr->mstr_rt_list, + struct sdw_mstr_runtime, mstr_node); + if (sdw_mstr_rt == first_rt) + return true; + else + return false; + +} + +/* + * sdw_compute_hstart_hstop - returns Success + * -EINVAL - In case of error. + * + * + * This function computes hstart and hstop for running + * streams per master & slaves. + */ +int sdw_compute_hstart_hstop(struct sdw_bus *sdw_mstr_bs) +{ + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + struct sdw_mstr_runtime *sdw_mstr_rt; + struct sdw_transport_params *t_params = NULL, *t_slv_params = NULL; + struct sdw_slave_runtime *slv_rt = NULL; + struct sdw_port_runtime *port_rt, *port_slv_rt; + int hstart = 0, hstop = 0; + int column_needed = 0; + int sel_col = sdw_mstr_bs->col; + int group_count = 0, no_of_channels = 0; + struct temp_elements *temp, *element; + int rates[10]; + int num, ch_mask, block_offset, i, port_block_offset; + + /* Run loop for all master runtimes for given master */ + list_for_each_entry(sdw_mstr_rt, + &sdw_mstr->mstr_rt_list, mstr_node) { + + if (sdw_mstr_rt->mstr == NULL) + break; + + /* should not compute any transport params */ + if (sdw_mstr_rt->rt_state == SDW_STATE_UNPREPARE_RT) + continue; + + /* Perform grouping of streams based on stream rate */ + if (sdw_mstr_rt == list_first_entry(&sdw_mstr->mstr_rt_list, + struct sdw_mstr_runtime, mstr_node)) + rates[group_count++] = sdw_mstr_rt->stream_params.rate; + else { + num = group_count; + for (i = 0; i < num; i++) { + if (sdw_mstr_rt->stream_params.rate == rates[i]) + break; + + if (i == num) + rates[group_count++] = + sdw_mstr_rt->stream_params.rate; + } + } + } + + /* check for number of streams and number of group count */ + if (group_count == 0) + return 0; + + /* Allocate temporary memory holding temp variables */ + temp = kzalloc((sizeof(struct temp_elements) * group_count), + GFP_KERNEL); + if (!temp) + return -ENOMEM; + + /* Calculate full bandwidth per group */ + for (i = 0; i < group_count; i++) { + element = &temp[i]; + element->rate = rates[i]; + element->full_bw = sdw_mstr_bs->clk_freq/element->rate; + } + + /* Calculate payload bandwidth per group */ + list_for_each_entry(sdw_mstr_rt, + &sdw_mstr->mstr_rt_list, mstr_node) { + + if (sdw_mstr_rt->mstr == NULL) + break; + + /* should not compute any transport params */ + if (sdw_mstr_rt->rt_state == SDW_STATE_UNPREPARE_RT) + continue; + + for (i = 0; i < group_count; i++) { + element = &temp[i]; + if (sdw_mstr_rt->stream_params.rate == element->rate) { + element->payload_bw += + sdw_mstr_rt->stream_params.bps * + sdw_mstr_rt->stream_params.channel_count; + } + + /* Any of stream rate should match */ + if (i == group_count) + return -EINVAL; + } + } + + /* Calculate hwidth per group and total column needed per master */ + for (i = 0; i < group_count; i++) { + element = &temp[i]; + element->hwidth = + (sel_col * element->payload_bw + + element->full_bw - 1)/element->full_bw; + column_needed += element->hwidth; + } + + /* Check column required should not be greater than selected columns*/ + if (column_needed > sel_col - 1) + return -EINVAL; + + /* Compute hstop */ + hstop = sel_col - 1; + + /* Run loop for all groups to compute transport parameters */ + for (i = 0; i < group_count; i++) { + port_block_offset = block_offset = 1; + element = &temp[i]; + + /* Find streams associated with each group */ + list_for_each_entry(sdw_mstr_rt, + &sdw_mstr->mstr_rt_list, mstr_node) { + + if (sdw_mstr_rt->mstr == NULL) + break; + + /* should not compute any transport params */ + if (sdw_mstr_rt->rt_state == SDW_STATE_UNPREPARE_RT) + continue; + + if (sdw_mstr_rt->stream_params.rate != element->rate) + continue; + + /* Compute hstart */ + sdw_mstr_rt->hstart = hstart = + hstop - element->hwidth + 1; + sdw_mstr_rt->hstop = hstop; + + /* Assign hstart, hstop, block offset for each port */ + list_for_each_entry(port_rt, + &sdw_mstr_rt->port_rt_list, port_node) { + + t_params = &port_rt->transport_params; + t_params->num = port_rt->port_num; + t_params->hstart = hstart; + t_params->hstop = hstop; + t_params->offset1 = port_block_offset; + t_params->offset2 = port_block_offset >> 8; + + /* Only BlockPerPort supported */ + t_params->blockgroupcontrol_valid = true; + t_params->blockgroupcontrol = 0x0; + t_params->lanecontrol = 0x0; + /* Copy parameters if first node */ + if (port_rt == list_first_entry + (&sdw_mstr_rt->port_rt_list, + struct sdw_port_runtime, port_node)) { + + sdw_mstr_rt->hstart = hstart; + sdw_mstr_rt->hstop = hstop; + + sdw_mstr_rt->block_offset = + port_block_offset; + + } + + /* Get no. of channels running on curr. port */ + ch_mask = port_rt->channel_mask; + no_of_channels = (((ch_mask >> 3) & 1) + + ((ch_mask >> 2) & 1) + + ((ch_mask >> 1) & 1) + + (ch_mask & 1)); + + + port_block_offset += + sdw_mstr_rt->stream_params.bps * + no_of_channels; + } + + /* Compute block offset */ + block_offset += sdw_mstr_rt->stream_params.bps * + sdw_mstr_rt->stream_params.channel_count; + + /* + * Re-assign port_block_offset for next stream + * under same group + */ + port_block_offset = block_offset; + } + + /* Compute hstop for next group */ + hstop = hstop - element->hwidth; + } + + /* Compute transport params for slave */ + + /* Run loop for master runtime streams running on master */ + list_for_each_entry(sdw_mstr_rt, + &sdw_mstr->mstr_rt_list, mstr_node) { + + /* Get block offset from master runtime */ + port_block_offset = sdw_mstr_rt->block_offset; + + /* Run loop for slave per master runtime */ + list_for_each_entry(slv_rt, + &sdw_mstr_rt->slv_rt_list, slave_node) { + + if (slv_rt->slave == NULL) + break; + + if (slv_rt->rt_state == SDW_STATE_UNPREPARE_RT) + continue; + + /* Run loop for each port of slave */ + list_for_each_entry(port_slv_rt, + &slv_rt->port_rt_list, port_node) { + + t_slv_params = &port_slv_rt->transport_params; + t_slv_params->num = port_slv_rt->port_num; + + /* Assign transport parameters */ + t_slv_params->hstart = sdw_mstr_rt->hstart; + t_slv_params->hstop = sdw_mstr_rt->hstop; + t_slv_params->offset1 = port_block_offset; + t_slv_params->offset2 = port_block_offset >> 8; + + /* Only BlockPerPort supported */ + t_slv_params->blockgroupcontrol_valid = true; + t_slv_params->blockgroupcontrol = 0x0; + t_slv_params->lanecontrol = 0x0; + + /* Get no. of channels running on curr. port */ + ch_mask = port_slv_rt->channel_mask; + no_of_channels = (((ch_mask >> 3) & 1) + + ((ch_mask >> 2) & 1) + + ((ch_mask >> 1) & 1) + + (ch_mask & 1)); + + /* Increment block offset for next port/slave */ + port_block_offset += slv_rt->stream_params.bps * + no_of_channels; + } + } + } + + kfree(temp); + + return 0; +} + +/* + * sdw_cfg_frmshp_bnkswtch - returns Success + * -EINVAL - In case of error. + * -ENOMEM - In case of memory alloc failure. + * -EAGAIN - In case of activity ongoing. + * + * + * This function broadcast frameshape on framectrl + * register and performs bank switch. + */ +int sdw_cfg_frmshp_bnkswtch(struct sdw_bus *mstr_bs, bool is_wait) +{ + struct sdw_msg *wr_msg; + int ret = 0; + int banktouse, numcol, numrow; + u8 *wbuf; + + wr_msg = kzalloc(sizeof(struct sdw_msg), GFP_KERNEL); + if (!wr_msg) + return -ENOMEM; + + mstr_bs->async_data.msg = wr_msg; + + wbuf = kzalloc(sizeof(*wbuf), GFP_KERNEL); + if (!wbuf) + return -ENOMEM; + + numcol = sdw_get_col_to_num(mstr_bs->col); + numrow = sdw_get_row_to_num(mstr_bs->row); + + wbuf[0] = numcol | (numrow << 3); + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + banktouse = !banktouse; + + if (banktouse) { + wr_msg->addr = (SDW_SCP_FRAMECTRL + SDW_BANK1_REGISTER_OFFSET) + + (SDW_NUM_DATA_PORT_REGISTERS * 0); /* Data port 0 */ + } else { + + wr_msg->addr = SDW_SCP_FRAMECTRL + + (SDW_NUM_DATA_PORT_REGISTERS * 0); /* Data port 0 */ + } + + wr_msg->ssp_tag = 0x1; + wr_msg->flag = SDW_MSG_FLAG_WRITE; + wr_msg->len = 1; + wr_msg->slave_addr = 0xF; /* Broadcast address*/ + wr_msg->buf = wbuf; + wr_msg->addr_page1 = 0x0; + wr_msg->addr_page2 = 0x0; + + if (is_wait) { + + if (in_atomic() || irqs_disabled()) { + ret = sdw_trylock_mstr(mstr_bs->mstr); + if (!ret) { + /* SDW activity is ongoing. */ + ret = -EAGAIN; + goto out; + } + } else + sdw_lock_mstr(mstr_bs->mstr); + + ret = sdw_slave_transfer_async(mstr_bs->mstr, wr_msg, + 1, &mstr_bs->async_data); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, "Register transfer failed\n"); + goto out; + } + + } else { + ret = sdw_slave_transfer(mstr_bs->mstr, wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, "Register transfer failed\n"); + goto out; + } + + } + + msleep(100); /* TBD: Remove this */ + + /* + * TBD: check whether we need to poll on + * mcp active bank bit to switch bank + */ + mstr_bs->active_bank = banktouse; + + if (!is_wait) { + kfree(mstr_bs->async_data.msg->buf); + kfree(mstr_bs->async_data.msg); + } + + +out: + + return ret; +} + +/* + * sdw_cfg_frmshp_bnkswtch_wait - returns Success + * -ETIMEDOUT - In case of timeout + * + * This function waits on completion of + * bank switch. + */ +int sdw_cfg_frmshp_bnkswtch_wait(struct sdw_bus *mstr_bs) +{ + unsigned long time_left; + struct sdw_master *mstr = mstr_bs->mstr; + + time_left = wait_for_completion_timeout( + &mstr_bs->async_data.xfer_complete, + 3000); + if (!time_left) { + dev_err(&mstr->dev, "Controller Timed out\n"); + sdw_unlock_mstr(mstr); + return -ETIMEDOUT; + } + kfree(mstr_bs->async_data.msg->buf); + kfree(mstr_bs->async_data.msg); + sdw_unlock_mstr(mstr); + return 0; +} + +/* + * sdw_config_bs_prms - returns Success + * -EINVAL - In case of error. + * + * + * This function performs master/slave transport + * params config, set SSP interval, set Clock + * frequency, enable channel. This API is called + * from sdw_bus_calc_bw & sdw_bus_calc_bw_dis API. + * + */ +int sdw_config_bs_prms(struct sdw_bus *sdw_mstr_bs, bool state_check) +{ + struct port_chn_en_state chn_en; + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + struct sdw_mstr_runtime *sdw_mstr_bs_rt = NULL; + struct sdw_mstr_driver *ops; + int banktouse, ret = 0; + + list_for_each_entry(sdw_mstr_bs_rt, + &sdw_mstr->mstr_rt_list, mstr_node) { + + if (sdw_mstr_bs_rt->mstr == NULL) + continue; + + /* + * Configure transport and port params + * for master and slave ports. + */ + ret = sdw_cfg_params_mstr_slv(sdw_mstr_bs, + sdw_mstr_bs_rt, state_check); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr_bs->mstr->dev, + "slave/master config params failed\n"); + return ret; + } + + /* Get master driver ops */ + ops = sdw_mstr_bs->mstr->driver; + + /* Configure SSP */ + banktouse = sdw_mstr_bs->active_bank; + banktouse = !banktouse; + + /* + * TBD: Currently harcoded SSP interval, + * computed value to be taken from system_interval in + * bus data structure. + * Add error check. + */ + if (ops->mstr_ops->set_ssp_interval) + ops->mstr_ops->set_ssp_interval(sdw_mstr_bs->mstr, + SDW_DEFAULT_SSP, banktouse); + + /* + * Configure Clock + * TBD: Add error check + */ + if (ops->mstr_ops->set_clock_freq) + ops->mstr_ops->set_clock_freq(sdw_mstr_bs->mstr, + sdw_mstr_bs->clk_div, banktouse); + + /* Enable channel on alternate bank for running streams */ + chn_en.is_activate = true; + chn_en.is_bank_sw = true; + ret = sdw_en_dis_mstr_slv_state + (sdw_mstr_bs, sdw_mstr_bs_rt, &chn_en); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr_bs->mstr->dev, + "Channel enable failed\n"); + return ret; + } + + } + + return 0; +} + +/* + * sdw_dis_chan - returns Success + * -EINVAL - In case of error. + * + * + * This function disables channel on alternate + * bank. This API is called from sdw_bus_calc_bw + * & sdw_bus_calc_bw_dis when channel on current + * bank is enabled. + * + */ +int sdw_dis_chan(struct sdw_bus *sdw_mstr_bs) +{ + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + struct sdw_mstr_runtime *sdw_mstr_bs_rt = NULL; + struct port_chn_en_state chn_en; + int ret = 0; + + list_for_each_entry(sdw_mstr_bs_rt, + &sdw_mstr->mstr_rt_list, mstr_node) { + + if (sdw_mstr_bs_rt->mstr == NULL) + continue; + + chn_en.is_activate = false; + chn_en.is_bank_sw = true; + ret = sdw_en_dis_mstr_slv_state(sdw_mstr_bs, + sdw_mstr_bs_rt, &chn_en); + if (ret < 0) + return ret; + } + + return 0; +} + + +/* + * sdw_cfg_slv_prep_unprep - returns Success + * -EINVAL - In case of error. + * + * + * This function prepare/unprepare slave ports. + */ +int sdw_cfg_slv_prep_unprep(struct sdw_bus *mstr_bs, + struct sdw_slave_runtime *slv_rt_strm, + struct sdw_port_runtime *port_slv_strm, + bool prep) +{ + struct sdw_slave_driver *slv_ops = slv_rt_strm->slave->driver; + struct sdw_slv_capabilities *slv_cap = + &slv_rt_strm->slave->sdw_slv_cap; + struct sdw_slv_dpn_capabilities *sdw_slv_dpn_cap = + slv_cap->sdw_dpn_cap; + + struct sdw_msg wr_msg, rd_msg, rd_msg1; + int ret = 0; + int banktouse; + u8 wbuf[1] = {0}; + u8 rbuf[1] = {0}; + u8 rbuf1[1] = {0}; + + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + banktouse = !banktouse; + + /* Read SDW_DPN_PREPARECTRL register */ + rd_msg.addr = wr_msg.addr = SDW_DPN_PREPARECTRL + + (SDW_NUM_DATA_PORT_REGISTERS * port_slv_strm->port_num); + + rd_msg.ssp_tag = 0x0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.slave_addr = slv_rt_strm->slave->slv_number; + rd_msg.buf = rbuf; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + + rd_msg1.ssp_tag = 0x0; + rd_msg1.flag = SDW_MSG_FLAG_READ; + rd_msg1.len = 1; + rd_msg1.slave_addr = slv_rt_strm->slave->slv_number; + rd_msg1.buf = rbuf1; + rd_msg1.addr_page1 = 0x0; + rd_msg1.addr_page2 = 0x0; + + + rd_msg1.addr = SDW_DPN_PREPARESTATUS + + (SDW_NUM_DATA_PORT_REGISTERS * port_slv_strm->port_num); + + wr_msg.ssp_tag = 0x0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.len = 1; + wr_msg.slave_addr = slv_rt_strm->slave->slv_number; + wr_msg.buf = wbuf; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + if (prep) { /* PREPARE */ + + /* + * 1. slave port prepare_ch_pre + * --> callback + * --> handle_pre_port_prepare + */ + if (slv_ops->handle_pre_port_prepare) { + slv_ops->handle_pre_port_prepare(slv_rt_strm->slave, + port_slv_strm->port_num, + port_slv_strm->channel_mask, + banktouse); + } + + /* 2. slave port prepare --> to write */ + if (sdw_slv_dpn_cap->prepare_ch) { + + /* NON SIMPLIFIED CM, prepare required */ + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg1, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + wbuf[0] = (rbuf[0] | port_slv_strm->channel_mask); + + /* + * TBD: poll for prepare interrupt bit + * before calling post_prepare + * 2. check capabilities if simplified + * CM no need to prepare + */ + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + /* + * TBD: check on port ready, + * ideally we should check on prepare + * status for port_ready + */ + + /* wait for completion on port ready*/ + msleep(100); /* TBD: Remove this */ + + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg1, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + } + + /* + * 3. slave port post pre + * --> callback + * --> handle_post_port_prepare + */ + if (slv_ops->handle_post_port_prepare) { + slv_ops->handle_post_port_prepare + (slv_rt_strm->slave, + port_slv_strm->port_num, + port_slv_strm->channel_mask, banktouse); + } + + slv_rt_strm->rt_state = SDW_STATE_PREPARE_RT; + + } else { + /* UNPREPARE */ + /* + * 1. slave port unprepare_ch_pre + * --> callback + * --> handle_pre_port_prepare + */ + if (slv_ops->handle_pre_port_unprepare) { + slv_ops->handle_pre_port_unprepare(slv_rt_strm->slave, + port_slv_strm->port_num, + port_slv_strm->channel_mask, + banktouse); + } + + /* 2. slave port unprepare --> to write */ + if (sdw_slv_dpn_cap->prepare_ch) { + + /* NON SIMPLIFIED CM, unprepare required */ + + /* Read SDW_DPN_PREPARECTRL register */ + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + wbuf[0] = (rbuf[0] & ~(port_slv_strm->channel_mask)); + + /* + * TBD: poll for prepare interrupt bit before + * calling post_prepare + * Does it apply for unprepare aswell? + * 2. check capabilities if simplified CM + * no need to unprepare + */ + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + } + + /* + * 3. slave port post unpre + * --> callback + * --> handle_post_port_unprepare + */ + if (slv_ops->handle_post_port_unprepare) { + slv_ops->handle_post_port_unprepare(slv_rt_strm->slave, + port_slv_strm->port_num, + port_slv_strm->channel_mask, + banktouse); + } + + slv_rt_strm->rt_state = SDW_STATE_UNPREPARE_RT; + } +out: + return ret; + +} + + +/* + * sdw_cfg_mstr_prep_unprep - returns Success + * -EINVAL - In case of error. + * + * + * This function prepare/unprepare master ports. + */ +int sdw_cfg_mstr_prep_unprep(struct sdw_bus *mstr_bs, + struct sdw_mstr_runtime *mstr_rt_strm, + struct sdw_port_runtime *port_mstr_strm, + bool prep) +{ + struct sdw_mstr_driver *ops = mstr_bs->mstr->driver; + struct sdw_prepare_ch prep_ch; + int ret = 0; + + prep_ch.num = port_mstr_strm->port_num; + prep_ch.ch_mask = port_mstr_strm->channel_mask; + prep_ch.prepare = prep; /* Prepare/Unprepare */ + + /* TBD: Bank configuration */ + + /* 1. Master port prepare_ch_pre */ + if (ops->mstr_port_ops->dpn_port_prepare_ch_pre) { + ret = ops->mstr_port_ops->dpn_port_prepare_ch_pre + (mstr_bs->mstr, &prep_ch); + if (ret < 0) + return ret; + } + + /* 2. Master port prepare */ + if (ops->mstr_port_ops->dpn_port_prepare_ch) { + ret = ops->mstr_port_ops->dpn_port_prepare_ch + (mstr_bs->mstr, &prep_ch); + if (ret < 0) + return ret; + } + + /* 3. Master port prepare_ch_post */ + if (ops->mstr_port_ops->dpn_port_prepare_ch_post) { + ret = ops->mstr_port_ops->dpn_port_prepare_ch_post + (mstr_bs->mstr, &prep_ch); + if (ret < 0) + return ret; + } + + if (prep) + mstr_rt_strm->rt_state = SDW_STATE_PREPARE_RT; + else + mstr_rt_strm->rt_state = SDW_STATE_UNPREPARE_RT; + + return 0; +} + + +/* + * sdw_prep_unprep_mstr_slv - returns Success + * -EINVAL - In case of error. + * + * + * This function call master/slave prepare/unprepare + * port configuration API's, called from sdw_bus_calc_bw + * & sdw_bus_calc_bw_dis API's. + */ +int sdw_prep_unprep_mstr_slv(struct sdw_bus *sdw_mstr_bs, + struct sdw_runtime *sdw_rt, bool is_prep) +{ + struct sdw_slave_runtime *slv_rt_strm = NULL; + struct sdw_port_runtime *port_slv_strm, *port_mstr_strm; + struct sdw_mstr_runtime *mstr_rt_strm = NULL; + int ret = 0; + + list_for_each_entry(slv_rt_strm, + &sdw_rt->slv_rt_list, slave_sdw_node) { + + if (slv_rt_strm->slave == NULL) + break; + + list_for_each_entry(port_slv_strm, + &slv_rt_strm->port_rt_list, port_node) { + + ret = sdw_cfg_slv_prep_unprep(sdw_mstr_bs, + slv_rt_strm, port_slv_strm, is_prep); + if (ret < 0) + return ret; + } + + } + + list_for_each_entry(mstr_rt_strm, + &sdw_rt->mstr_rt_list, mstr_sdw_node) { + + if (mstr_rt_strm->mstr == NULL) + break; + + list_for_each_entry(port_mstr_strm, + &mstr_rt_strm->port_rt_list, port_node) { + + ret = sdw_cfg_mstr_prep_unprep(sdw_mstr_bs, + mstr_rt_strm, port_mstr_strm, is_prep); + if (ret < 0) + return ret; + } + } + + return 0; +} + +struct sdw_bus *master_to_bus(struct sdw_master *mstr) +{ + struct sdw_bus *sdw_mstr_bs = NULL; + + list_for_each_entry(sdw_mstr_bs, &sdw_core.bus_list, bus_node) { + /* Match master structure pointer */ + if (sdw_mstr_bs->mstr != mstr) + continue; + return sdw_mstr_bs; + } + /* This should never happen, added to suppress warning */ + WARN_ON(1); + + return NULL; +} + +/* + * sdw_chk_strm_prms - returns Success + * -EINVAL - In case of error. + * + * + * This function performs all the required + * check such as isynchronous mode support, + * stream rates etc. This API is called + * from sdw_bus_calc_bw API. + * + */ +int sdw_chk_strm_prms(struct sdw_master_capabilities *sdw_mstr_cap, + struct sdw_stream_params *mstr_params, + struct sdw_stream_params *stream_params) +{ + /* Asynchronous mode not supported, return Error */ + if (((sdw_mstr_cap->base_clk_freq * 2) % mstr_params->rate) != 0) + return -EINVAL; + + /* Check for sampling frequency */ + if (stream_params->rate != mstr_params->rate) + return -EINVAL; + + return 0; +} + +/* + * sdw_compute_bs_prms - returns Success + * -EINVAL - In case of error. + * + * + * This function performs master/slave transport + * params computation. This API is called + * from sdw_bus_calc_bw & sdw_bus_calc_bw_dis API. + * + */ +int sdw_compute_bs_prms(struct sdw_bus *sdw_mstr_bs, + struct sdw_mstr_runtime *sdw_mstr_rt) +{ + + struct sdw_master_capabilities *sdw_mstr_cap = NULL; + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + int ret = 0, frame_interval = 0; + + sdw_mstr_cap = &sdw_mstr->mstr_capabilities; + + ret = sdw_get_clock_frmshp(sdw_mstr_bs, &frame_interval, + sdw_mstr_rt); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "clock/frameshape config failed\n"); + return ret; + } + + /* + * TBD: find right place to run sorting on + * master rt_list. Below sorting is done based on + * bps from low to high, that means PDM streams + * will be placed before PCM. + */ + + /* + * TBD Should we also perform sorting based on rate + * for PCM stream check. if yes then how?? + * creating two different list. + */ + + /* Compute system interval */ + ret = sdw_compute_sys_interval(sdw_mstr_bs, sdw_mstr_cap, + frame_interval); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "compute system interval failed\n"); + return ret; + } + + /* Compute hstart/hstop */ + ret = sdw_compute_hstart_hstop(sdw_mstr_bs); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "compute hstart/hstop failed\n"); + return ret; + } + + return 0; +} + +/* + * sdw_bs_pre_bnkswtch_post - returns Success + * -EINVAL or ret value - In case of error. + * + * This API performs on of the following operation + * based on bs_state value: + * pre-activate port + * bank switch operation + * post-activate port + * bankswitch wait operation + * disable channel operation + */ +int sdw_bs_pre_bnkswtch_post(struct sdw_runtime *sdw_rt, int bs_state) +{ + struct sdw_mstr_runtime *mstr_rt_act = NULL; + struct sdw_bus *mstr_bs_act = NULL; + struct sdw_master_port_ops *ops; + int ret = 0; + + list_for_each_entry(mstr_rt_act, &sdw_rt->mstr_rt_list, + mstr_sdw_node) { + + if (mstr_rt_act->mstr == NULL) + break; + + /* Get bus structure for master */ + mstr_bs_act = master_to_bus(mstr_rt_act->mstr); + if (!mstr_bs_act) + return -EINVAL; + + ops = mstr_bs_act->mstr->driver->mstr_port_ops; + + /* + * Note that current all the operations + * of pre->bankswitch->post->wait->disable + * are performed sequentially.The switch case + * is kept in order for code to scale where + * pre->bankswitch->post->wait->disable are + * not sequential and called from different + * instances. + */ + switch (bs_state) { + + case SDW_UPDATE_BS_PRE: + /* Pre activate ports */ + if (ops->dpn_port_activate_ch_pre) { + ret = ops->dpn_port_activate_ch_pre + (mstr_bs_act->mstr, NULL, 0); + if (ret < 0) + return ret; + } + break; + case SDW_UPDATE_BS_BNKSWTCH: + /* Configure Frame Shape/Switch Bank */ + ret = sdw_cfg_frmshp_bnkswtch(mstr_bs_act, true); + if (ret < 0) + return ret; + break; + case SDW_UPDATE_BS_POST: + /* Post activate ports */ + if (ops->dpn_port_activate_ch_post) { + ret = ops->dpn_port_activate_ch_post + (mstr_bs_act->mstr, NULL, 0); + if (ret < 0) + return ret; + } + break; + case SDW_UPDATE_BS_BNKSWTCH_WAIT: + /* Post Bankswitch wait operation */ + ret = sdw_cfg_frmshp_bnkswtch_wait(mstr_bs_act); + if (ret < 0) + return ret; + break; + case SDW_UPDATE_BS_DIS_CHN: + /* Disable channel on previous bank */ + ret = sdw_dis_chan(mstr_bs_act); + if (ret < 0) + return ret; + break; + default: + return -EINVAL; + break; + } + } + + return ret; + +} + +/* + * sdw_update_bs_prms - returns Success + * -EINVAL - In case of error. + * + * Once all the parameters are configured + * for ports, this function performs bankswitch + * where all the new configured parameters + * gets in effect. This function is called + * from sdw_bus_calc_bw & sdw_bus_calc_bw_dis API. + * This function also disables all the channels + * enabled on previous bank after bankswitch. + */ +int sdw_update_bs_prms(struct sdw_bus *sdw_mstr_bs, + struct sdw_runtime *sdw_rt, + int last_node) +{ + + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + int ret = 0; + + /* + * Optimization scope. + * Check whether we can assign function pointer + * link sync value is 1, and call that function + * if its not NULL. + */ + if ((last_node) && (sdw_mstr->link_sync_mask)) { + + /* Perform pre-activate ports */ + ret = sdw_bs_pre_bnkswtch_post(sdw_rt, SDW_UPDATE_BS_PRE); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Pre-activate port failed\n"); + return ret; + } + + /* Perform bankswitch operation*/ + ret = sdw_bs_pre_bnkswtch_post(sdw_rt, SDW_UPDATE_BS_BNKSWTCH); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Bank Switch operation failed\n"); + return ret; + } + + /* Perform post-activate ports */ + ret = sdw_bs_pre_bnkswtch_post(sdw_rt, SDW_UPDATE_BS_POST); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Pre-activate port failed\n"); + return ret; + } + + /* Perform bankswitch post wait opearation */ + ret = sdw_bs_pre_bnkswtch_post(sdw_rt, + SDW_UPDATE_BS_BNKSWTCH_WAIT); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "BnkSwtch wait op failed\n"); + return ret; + } + + /* Disable channels on previous bank */ + ret = sdw_bs_pre_bnkswtch_post(sdw_rt, SDW_UPDATE_BS_DIS_CHN); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Channel disabled failed\n"); + return ret; + } + + } + + if (!sdw_mstr->link_sync_mask) { + + /* Configure Frame Shape/Switch Bank */ + ret = sdw_cfg_frmshp_bnkswtch(sdw_mstr_bs, false); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "bank switch failed\n"); + return ret; + } + + /* Disable all channels enabled on previous bank */ + ret = sdw_dis_chan(sdw_mstr_bs); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "Channel disabled failed\n"); + return ret; + } + } + + return ret; +} + +/** + * sdw_chk_last_node - returns True or false + * + * This function returns true in case of last node + * else returns false. + */ +bool sdw_chk_last_node(struct sdw_mstr_runtime *sdw_mstr_rt, + struct sdw_runtime *sdw_rt) +{ + struct sdw_mstr_runtime *last_rt = NULL; + + last_rt = list_last_entry(&sdw_rt->mstr_rt_list, + struct sdw_mstr_runtime, mstr_sdw_node); + if (sdw_mstr_rt == last_rt) + return true; + else + return false; + +} + +/** + * sdw_unprepare_op - returns Success + * -EINVAL - In case of error. + * + * This function perform all operations required + * to unprepare ports and does recomputation of + * bus parameters. + */ +int sdw_unprepare_op(struct sdw_bus *sdw_mstr_bs, + struct sdw_mstr_runtime *sdw_mstr_rt, + struct sdw_runtime *sdw_rt) +{ + + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + struct sdw_stream_params *mstr_params; + bool last_node = false; + int ret = 0; + + last_node = sdw_chk_last_node(sdw_mstr_rt, sdw_rt); + mstr_params = &sdw_mstr_rt->stream_params; + + /* 1. Un-prepare master and slave port */ + ret = sdw_prep_unprep_mstr_slv(sdw_mstr_bs, + sdw_rt, false); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "Ch unprep failed\n"); + return ret; + } + + /* change stream state to unprepare */ + if (last_node) + sdw_rt->stream_state = + SDW_STATE_UNPREPARE_STREAM; + + /* + * Calculate new bandwidth, frame size + * and total BW required for master controller + */ + sdw_mstr_rt->stream_bw = mstr_params->rate * + mstr_params->channel_count * mstr_params->bps; + sdw_mstr_bs->bandwidth -= sdw_mstr_rt->stream_bw; + + /* Something went wrong in bandwidth calulation */ + if (sdw_mstr_bs->bandwidth < 0) { + dev_err(&sdw_mstr->dev, "BW calculation failed\n"); + return -EINVAL; + } + + if (!sdw_mstr_bs->bandwidth) { + /* + * Last stream on master should + * return successfully + */ + sdw_mstr_bs->system_interval = 0; + sdw_mstr_bs->stream_interval = 0; + sdw_mstr_bs->frame_freq = 0; + sdw_mstr_bs->row = 0; + sdw_mstr_bs->col = 0; + return 0; + } + + /* Compute transport params */ + ret = sdw_compute_bs_prms(sdw_mstr_bs, sdw_mstr_rt); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "Params computation failed\n"); + return -EINVAL; + } + + /* Configure bus params */ + ret = sdw_config_bs_prms(sdw_mstr_bs, true); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "xport params config failed\n"); + return ret; + } + + /* + * Perform SDW bus update + * For Aggregation flow: + * Pre-> Bankswitch -> Post -> Disable channel + * For normal flow: + * Bankswitch -> Disable channel + */ + ret = sdw_update_bs_prms(sdw_mstr_bs, sdw_rt, last_node); + + return ret; +} + +/** + * sdw_disable_op - returns Success + * -EINVAL - In case of error. + * + * This function perform all operations required + * to disable ports. + */ +int sdw_disable_op(struct sdw_bus *sdw_mstr_bs, + struct sdw_mstr_runtime *sdw_mstr_rt, + struct sdw_runtime *sdw_rt) +{ + + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + struct sdw_master_capabilities *sdw_mstr_cap = NULL; + struct sdw_stream_params *mstr_params; + bool last_node = false; + int ret = 0; + + + last_node = sdw_chk_last_node(sdw_mstr_rt, sdw_rt); + sdw_mstr_cap = &sdw_mstr_bs->mstr->mstr_capabilities; + mstr_params = &sdw_mstr_rt->stream_params; + + /* Lets do disabling of port for stream to be freed */ + ret = sdw_en_dis_mstr_slv(sdw_mstr_bs, sdw_rt, false); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "Ch dis failed\n"); + return ret; + } + + /* Change stream state to disable */ + if (last_node) + sdw_rt->stream_state = SDW_STATE_DISABLE_STREAM; + + ret = sdw_config_bs_prms(sdw_mstr_bs, false); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "xport params config failed\n"); + return ret; + } + + /* + * Perform SDW bus update + * For Aggregation flow: + * Pre-> Bankswitch -> Post -> Disable channel + * For normal flow: + * Bankswitch -> Disable channel + */ + ret = sdw_update_bs_prms(sdw_mstr_bs, sdw_rt, last_node); + + return ret; +} + +/** + * sdw_enable_op - returns Success + * -EINVAL - In case of error. + * + * This function perform all operations required + * to enable ports. + */ +int sdw_enable_op(struct sdw_bus *sdw_mstr_bs, + struct sdw_mstr_runtime *sdw_mstr_rt, + struct sdw_runtime *sdw_rt) +{ + + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + bool last_node = false; + int ret = 0; + + last_node = sdw_chk_last_node(sdw_mstr_rt, sdw_rt); + + ret = sdw_config_bs_prms(sdw_mstr_bs, false); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "xport params config failed\n"); + return ret; + } + + /* Enable new port for master and slave */ + ret = sdw_en_dis_mstr_slv(sdw_mstr_bs, sdw_rt, true); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "Channel enable failed\n"); + return ret; + } + + /* change stream state to enable */ + if (last_node) + sdw_rt->stream_state = SDW_STATE_ENABLE_STREAM; + /* + * Perform SDW bus update + * For Aggregation flow: + * Pre-> Bankswitch -> Post -> Disable channel + * For normal flow: + * Bankswitch -> Disable channel + */ + ret = sdw_update_bs_prms(sdw_mstr_bs, sdw_rt, last_node); + + return ret; +} + +/** + * sdw_prepare_op - returns Success + * -EINVAL - In case of error. + * + * This function perform all operations required + * to prepare ports and does computation of + * bus parameters. + */ +int sdw_prepare_op(struct sdw_bus *sdw_mstr_bs, + struct sdw_mstr_runtime *sdw_mstr_rt, + struct sdw_runtime *sdw_rt) +{ + struct sdw_stream_params *stream_params = &sdw_rt->stream_params; + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + struct sdw_master_capabilities *sdw_mstr_cap = NULL; + struct sdw_stream_params *mstr_params; + + bool last_node = false; + int ret = 0; + + last_node = sdw_chk_last_node(sdw_mstr_rt, sdw_rt); + sdw_mstr_cap = &sdw_mstr_bs->mstr->mstr_capabilities; + mstr_params = &sdw_mstr_rt->stream_params; + + /* + * check all the stream parameters received + * Check for isochronous mode, sample rate etc + */ + ret = sdw_chk_strm_prms(sdw_mstr_cap, mstr_params, + stream_params); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Stream param check failed\n"); + return -EINVAL; + } + + /* + * Calculate stream bandwidth, frame size and + * total BW required for master controller + */ + sdw_mstr_rt->stream_bw = mstr_params->rate * + mstr_params->channel_count * mstr_params->bps; + sdw_mstr_bs->bandwidth += sdw_mstr_rt->stream_bw; + + /* Compute transport params */ + ret = sdw_compute_bs_prms(sdw_mstr_bs, sdw_mstr_rt); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "Params computation failed\n"); + return -EINVAL; + } + + /* Configure bus parameters */ + ret = sdw_config_bs_prms(sdw_mstr_bs, true); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "xport param config failed\n"); + return ret; + } + + /* + * Perform SDW bus update + * For Aggregation flow: + * Pre-> Bankswitch -> Post -> Disable channel + * For normal flow: + * Bankswitch -> Disable channel + */ + ret = sdw_update_bs_prms(sdw_mstr_bs, sdw_rt, last_node); + + /* Prepare new port for master and slave */ + ret = sdw_prep_unprep_mstr_slv(sdw_mstr_bs, sdw_rt, true); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "Channel prepare failed\n"); + return ret; + } + + /* change stream state to prepare */ + if (last_node) + sdw_rt->stream_state = SDW_STATE_PREPARE_STREAM; + + + return ret; +} + +/** + * sdw_pre_en_dis_unprep_op - returns Success + * -EINVAL - In case of error. + * + * This function is called by sdw_bus_calc_bw + * and sdw_bus_calc_bw_dis to prepare, enable, + * unprepare and disable ports. Based on state + * value, individual APIs are called. + */ +int sdw_pre_en_dis_unprep_op(struct sdw_mstr_runtime *sdw_mstr_rt, + struct sdw_runtime *sdw_rt, int state) +{ + struct sdw_master *sdw_mstr = NULL; + struct sdw_bus *sdw_mstr_bs = NULL; + int ret = 0; + + /* Get bus structure for master */ + sdw_mstr_bs = master_to_bus(sdw_mstr_rt->mstr); + if (!sdw_mstr_bs) + return -EINVAL; + + sdw_mstr = sdw_mstr_bs->mstr; + + /* + * All data structures required available, + * lets calculate BW for master controller + */ + + switch (state) { + + case SDW_STATE_PREPARE_STREAM: /* Prepare */ + ret = sdw_prepare_op(sdw_mstr_bs, sdw_mstr_rt, sdw_rt); + break; + case SDW_STATE_ENABLE_STREAM: /* Enable */ + ret = sdw_enable_op(sdw_mstr_bs, sdw_mstr_rt, sdw_rt); + break; + case SDW_STATE_DISABLE_STREAM: /* Disable */ + ret = sdw_disable_op(sdw_mstr_bs, sdw_mstr_rt, sdw_rt); + break; + case SDW_STATE_UNPREPARE_STREAM: /* UnPrepare */ + ret = sdw_unprepare_op(sdw_mstr_bs, sdw_mstr_rt, sdw_rt); + break; + default: + ret = -EINVAL; + break; + + } + + return ret; +} + +/** + * sdw_bus_calc_bw - returns Success + * -EINVAL - In case of error. + * + * + * This function is called from sdw_prepare_and_enable + * whenever new stream is processed. The function based + * on the stream associated with controller calculates + * required bandwidth, clock, frameshape, computes + * all transport params for a given port, enable channel + * & perform bankswitch. + */ +int sdw_bus_calc_bw(struct sdw_stream_tag *stream_tag, bool enable) +{ + + struct sdw_runtime *sdw_rt = stream_tag->sdw_rt; + struct sdw_mstr_runtime *sdw_mstr_rt = NULL; + struct sdw_bus *sdw_mstr_bs = NULL; + struct sdw_master *sdw_mstr = NULL; + int ret = 0; + + + /* + * TBD: check for mstr_rt is in configured state or not + * If yes, then configure masters as well + * If no, then do not configure/enable master related parameters + */ + + /* BW calulation for active master controller for given stream tag */ + list_for_each_entry(sdw_mstr_rt, &sdw_rt->mstr_rt_list, + mstr_sdw_node) { + + if (sdw_mstr_rt->mstr == NULL) + break; + + if ((sdw_rt->stream_state != SDW_STATE_CONFIG_STREAM) && + (sdw_rt->stream_state != SDW_STATE_UNPREPARE_STREAM)) + goto enable_stream; + + /* Get bus structure for master */ + sdw_mstr_bs = master_to_bus(sdw_mstr_rt->mstr); + if (!sdw_mstr_bs) + return -EINVAL; + + sdw_mstr = sdw_mstr_bs->mstr; + ret = sdw_pre_en_dis_unprep_op(sdw_mstr_rt, sdw_rt, + SDW_STATE_PREPARE_STREAM); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Prepare Operation failed\n"); + return -EINVAL; + } + } + +enable_stream: + + list_for_each_entry(sdw_mstr_rt, &sdw_rt->mstr_rt_list, mstr_sdw_node) { + + + if (sdw_mstr_rt->mstr == NULL) + break; + + if ((!enable) || + (sdw_rt->stream_state != SDW_STATE_PREPARE_STREAM)) + return 0; + sdw_mstr_bs = master_to_bus(sdw_mstr_rt->mstr); + if (!sdw_mstr_bs) + return -EINVAL; + + sdw_mstr = sdw_mstr_bs->mstr; + + ret = sdw_pre_en_dis_unprep_op(sdw_mstr_rt, sdw_rt, + SDW_STATE_ENABLE_STREAM); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Enable Operation failed\n"); + return -EINVAL; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(sdw_bus_calc_bw); + +/** + * sdw_bus_calc_bw_dis - returns Success + * -EINVAL - In case of error. + * + * + * This function is called from sdw_disable_and_unprepare + * whenever stream is ended. The function based disables/ + * unprepare port/channel of associated stream and computes + * required bandwidth, clock, frameshape, computes + * all transport params for a given port, enable channel + * & perform bankswitch for remaining streams on given + * controller. + */ +int sdw_bus_calc_bw_dis(struct sdw_stream_tag *stream_tag, bool unprepare) +{ + struct sdw_runtime *sdw_rt = stream_tag->sdw_rt; + struct sdw_mstr_runtime *sdw_mstr_rt = NULL; + struct sdw_bus *sdw_mstr_bs = NULL; + struct sdw_master *sdw_mstr = NULL; + int ret = 0; + + + /* BW calulation for active master controller for given stream tag */ + list_for_each_entry(sdw_mstr_rt, + &sdw_rt->mstr_rt_list, mstr_sdw_node) { + + + if (sdw_mstr_rt->mstr == NULL) + break; + + if (sdw_rt->stream_state != SDW_STATE_ENABLE_STREAM) + goto unprepare_stream; + + /* Get bus structure for master */ + sdw_mstr_bs = master_to_bus(sdw_mstr_rt->mstr); + if (!sdw_mstr_bs) + return -EINVAL; + + sdw_mstr = sdw_mstr_bs->mstr; + ret = sdw_pre_en_dis_unprep_op(sdw_mstr_rt, sdw_rt, + SDW_STATE_DISABLE_STREAM); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Disable Operation failed\n"); + return -EINVAL; + } + } + +unprepare_stream: + list_for_each_entry(sdw_mstr_rt, + &sdw_rt->mstr_rt_list, mstr_sdw_node) { + if (sdw_mstr_rt->mstr == NULL) + break; + + if ((!unprepare) || + (sdw_rt->stream_state != SDW_STATE_DISABLE_STREAM)) + return 0; + + sdw_mstr_bs = master_to_bus(sdw_mstr_rt->mstr); + if (!sdw_mstr_bs) + return -EINVAL; + + sdw_mstr = sdw_mstr_bs->mstr; + ret = sdw_pre_en_dis_unprep_op(sdw_mstr_rt, sdw_rt, + SDW_STATE_UNPREPARE_STREAM); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Unprepare Operation failed\n"); + return -EINVAL; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(sdw_bus_calc_bw_dis); + +/* + * sdw_slv_dp0_en_dis - returns Success + * -EINVAL - In case of error. + * + * + * This function enable/disable Slave DP0 channels. + */ +int sdw_slv_dp0_en_dis(struct sdw_bus *mstr_bs, + bool is_enable, u8 slv_number) +{ + struct sdw_msg wr_msg, rd_msg; + int ret = 0; + int banktouse; + u8 wbuf[1] = {0}; + u8 rbuf[1] = {0}; + + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + banktouse = !banktouse; + + rd_msg.addr = wr_msg.addr = ((SDW_DPN_CHANNELEN + + (SDW_BANK1_REGISTER_OFFSET * banktouse)) + + (SDW_NUM_DATA_PORT_REGISTERS * + 0x0)); + rd_msg.ssp_tag = 0x0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.slave_addr = slv_number; + rd_msg.buf = rbuf; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + + wr_msg.ssp_tag = 0x0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.len = 1; + wr_msg.slave_addr = slv_number; + wr_msg.buf = wbuf; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + if (is_enable) + wbuf[0] = (rbuf[0] | 0x1); + else + wbuf[0] = (rbuf[0] & ~(0x1)); + + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + rbuf[0] = 0; + /* This is just status read, can be removed later */ + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + +out: + return ret; + +} + + +/* + * sdw_mstr_dp0_act_dis - returns Success + * -EINVAL - In case of error. + * + * + * This function enable/disable Master DP0 channels. + */ +int sdw_mstr_dp0_act_dis(struct sdw_bus *mstr_bs, bool is_enable) +{ + struct sdw_mstr_driver *ops = mstr_bs->mstr->driver; + struct sdw_activate_ch activate_ch; + int banktouse, ret = 0; + + activate_ch.num = 0; + activate_ch.ch_mask = 0x1; + activate_ch.activate = is_enable; /* Enable/Disable */ + + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + banktouse = !banktouse; + + /* 1. Master port enable_ch_pre */ + if (ops->mstr_port_ops->dpn_port_activate_ch_pre) { + ret = ops->mstr_port_ops->dpn_port_activate_ch_pre + (mstr_bs->mstr, &activate_ch, banktouse); + if (ret < 0) + return ret; + } + + /* 2. Master port enable */ + if (ops->mstr_port_ops->dpn_port_activate_ch) { + ret = ops->mstr_port_ops->dpn_port_activate_ch(mstr_bs->mstr, + &activate_ch, banktouse); + if (ret < 0) + return ret; + } + + /* 3. Master port enable_ch_post */ + if (ops->mstr_port_ops->dpn_port_activate_ch_post) { + ret = ops->mstr_port_ops->dpn_port_activate_ch_post + (mstr_bs->mstr, &activate_ch, banktouse); + if (ret < 0) + return ret; + } + + return 0; +} + +/* + * sdw_slv_dp0_prep_unprep - returns Success + * -EINVAL - In case of error. + * + * + * This function prepare/unprepare Slave DP0. + */ +int sdw_slv_dp0_prep_unprep(struct sdw_bus *mstr_bs, + u8 slv_number, bool prepare) +{ + struct sdw_msg wr_msg, rd_msg; + int ret = 0; + int banktouse; + u8 wbuf[1] = {0}; + u8 rbuf[1] = {0}; + + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + banktouse = !banktouse; + + /* Read SDW_DPN_PREPARECTRL register */ + rd_msg.addr = wr_msg.addr = SDW_DPN_PREPARECTRL + + (SDW_NUM_DATA_PORT_REGISTERS * 0x0); + rd_msg.ssp_tag = 0x0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.slave_addr = slv_number; + rd_msg.buf = rbuf; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + + wr_msg.ssp_tag = 0x0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.len = 1; + wr_msg.slave_addr = slv_number; + wr_msg.buf = wbuf; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + if (prepare) + wbuf[0] = (rbuf[0] | 0x1); + else + wbuf[0] = (rbuf[0] & ~(0x1)); + + /* + * TBD: poll for prepare interrupt bit + * before calling post_prepare + * 2. check capabilities if simplified + * CM no need to prepare + */ + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + /* + * Sleep for 100ms. + * TODO: check on check on prepare status for port_ready + */ + msleep(100); + +out: + return ret; + +} + +/* + * sdw_mstr_dp0_prep_unprep - returns Success + * -EINVAL - In case of error. + * + * + * This function prepare/unprepare Master DP0. + */ +int sdw_mstr_dp0_prep_unprep(struct sdw_bus *mstr_bs, + bool prep) +{ + struct sdw_mstr_driver *ops = mstr_bs->mstr->driver; + struct sdw_prepare_ch prep_ch; + int ret = 0; + + prep_ch.num = 0x0; + prep_ch.ch_mask = 0x1; + prep_ch.prepare = prep; /* Prepare/Unprepare */ + + /* 1. Master port prepare_ch_pre */ + if (ops->mstr_port_ops->dpn_port_prepare_ch_pre) { + ret = ops->mstr_port_ops->dpn_port_prepare_ch_pre + (mstr_bs->mstr, &prep_ch); + if (ret < 0) + return ret; + } + + /* 2. Master port prepare */ + if (ops->mstr_port_ops->dpn_port_prepare_ch) { + ret = ops->mstr_port_ops->dpn_port_prepare_ch + (mstr_bs->mstr, &prep_ch); + if (ret < 0) + return ret; + } + + /* 3. Master port prepare_ch_post */ + if (ops->mstr_port_ops->dpn_port_prepare_ch_post) { + ret = ops->mstr_port_ops->dpn_port_prepare_ch_post + (mstr_bs->mstr, &prep_ch); + if (ret < 0) + return ret; + } + + return 0; +} + +static int sdw_bra_config_ops(struct sdw_bus *sdw_mstr_bs, + struct sdw_bra_block *block, + struct sdw_transport_params *t_params, + struct sdw_port_params *p_params) +{ + struct sdw_mstr_driver *ops; + int ret, banktouse; + + /* configure Master transport params */ + ret = sdw_cfg_mstr_params(sdw_mstr_bs, t_params, p_params); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Master xport params config failed\n"); + return ret; + } + + /* configure Slave transport params */ + ret = sdw_cfg_slv_params(sdw_mstr_bs, t_params, + p_params, block->slave_addr); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Slave xport params config failed\n"); + return ret; + } + + /* Get master driver ops */ + ops = sdw_mstr_bs->mstr->driver; + + /* Configure SSP */ + banktouse = sdw_mstr_bs->active_bank; + banktouse = !banktouse; + + if (ops->mstr_ops->set_ssp_interval) { + ret = ops->mstr_ops->set_ssp_interval(sdw_mstr_bs->mstr, + 24, banktouse); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: SSP interval config failed\n"); + return ret; + } + } + + /* Configure Clock */ + if (ops->mstr_ops->set_clock_freq) { + ret = ops->mstr_ops->set_clock_freq(sdw_mstr_bs->mstr, + sdw_mstr_bs->clk_div, banktouse); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Clock config failed\n"); + return ret; + } + } + + return 0; +} + +static int sdw_bra_xport_config_enable(struct sdw_bus *sdw_mstr_bs, + struct sdw_bra_block *block, + struct sdw_transport_params *t_params, + struct sdw_port_params *p_params) +{ + int ret; + + /* Prepare sequence */ + ret = sdw_bra_config_ops(sdw_mstr_bs, block, t_params, p_params); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: config operation failed\n"); + return ret; + } + + /* Bank Switch */ + ret = sdw_cfg_frmshp_bnkswtch(sdw_mstr_bs, false); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: bank switch failed\n"); + return ret; + } + + /* + * TODO: There may be some slave which doesn't support + * prepare for DP0. We have two options here. + * 1. Just call prepare and ignore error from those + * codec who doesn't support prepare for DP0. + * 2. Get slave capabilities and based on prepare DP0 + * support, Program Slave prepare register. + * Currently going with approach 1, not checking return + * value. + * 3. Try to use existing prep_unprep API both for master + * and slave. + */ + sdw_slv_dp0_prep_unprep(sdw_mstr_bs, block->slave_addr, true); + + /* Prepare Master port */ + ret = sdw_mstr_dp0_prep_unprep(sdw_mstr_bs, true); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Master prepare failed\n"); + return ret; + } + + /* Enable sequence */ + ret = sdw_bra_config_ops(sdw_mstr_bs, block, t_params, p_params); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: config operation failed\n"); + return ret; + } + + /* Enable DP0 channel (Slave) */ + ret = sdw_slv_dp0_en_dis(sdw_mstr_bs, true, block->slave_addr); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Slave DP0 enable failed\n"); + return ret; + } + + /* Enable DP0 channel (Master) */ + ret = sdw_mstr_dp0_act_dis(sdw_mstr_bs, true); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Master DP0 enable failed\n"); + return ret; + } + + /* Bank Switch */ + ret = sdw_cfg_frmshp_bnkswtch(sdw_mstr_bs, false); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: bank switch failed\n"); + return ret; + } + + return 0; +} + +static int sdw_bra_xport_config_disable(struct sdw_bus *sdw_mstr_bs, + struct sdw_bra_block *block) +{ + int ret; + + /* Disable DP0 channel (Slave) */ + ret = sdw_slv_dp0_en_dis(sdw_mstr_bs, false, block->slave_addr); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Slave DP0 disable failed\n"); + return ret; + } + + /* Disable DP0 channel (Master) */ + ret = sdw_mstr_dp0_act_dis(sdw_mstr_bs, false); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Master DP0 disable failed\n"); + return ret; + } + + /* Bank Switch */ + ret = sdw_cfg_frmshp_bnkswtch(sdw_mstr_bs, false); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: bank switch failed\n"); + return ret; + } + + /* + * TODO: There may be some slave which doesn't support + * de-prepare for DP0. We have two options here. + * 1. Just call prepare and ignore error from those + * codec who doesn't support de-prepare for DP0. + * 2. Get slave capabilities and based on prepare DP0 + * support, Program Slave prepare register. + * Currently going with approach 1, not checking return + * value. + */ + sdw_slv_dp0_prep_unprep(sdw_mstr_bs, block->slave_addr, false); + + /* De-prepare Master port */ + ret = sdw_mstr_dp0_prep_unprep(sdw_mstr_bs, false); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Master de-prepare failed\n"); + return ret; + } + + return 0; +} + +int sdw_bus_bra_xport_config(struct sdw_bus *sdw_mstr_bs, + struct sdw_bra_block *block, bool enable) +{ + struct sdw_transport_params t_params; + struct sdw_port_params p_params; + int ret; + + /* TODO: + * compute transport parameters based on current clock and + * frameshape. need to check how algorithm should be designed + * for BRA for computing clock, frameshape, SSP and transport params. + */ + + /* Transport Parameters */ + t_params.num = 0x0; /* DP 0 */ + t_params.blockpackingmode = 0x0; + t_params.blockgroupcontrol_valid = false; + t_params.blockgroupcontrol = 0x0; + t_params.lanecontrol = 0; + t_params.sample_interval = 10; + + t_params.hstart = 7; + t_params.hstop = 9; + t_params.offset1 = 0; + t_params.offset2 = 0; + + /* Port Parameters */ + p_params.num = 0x0; /* DP 0 */ + + /* Isochronous Mode */ + p_params.port_flow_mode = 0x0; + + /* Normal Mode */ + p_params.port_data_mode = 0x0; + + /* Word length */ + p_params.word_length = 3; + + /* Frameshape and clock params */ + sdw_mstr_bs->clk_div = 1; + sdw_mstr_bs->col = 10; + sdw_mstr_bs->row = 80; + +#if IS_ENABLED(CONFIG_SND_SOC_INTEL_CNL_FPGA) + sdw_mstr_bs->bandwidth = 9.6 * 1000 * 1000; +#else + sdw_mstr_bs->bandwidth = 12 * 1000 * 1000; +#endif + + if (enable) { + ret = sdw_bra_xport_config_enable(sdw_mstr_bs, block, + &t_params, &p_params); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Xport params config failed\n"); + return ret; + } + + } else { + ret = sdw_bra_xport_config_disable(sdw_mstr_bs, block); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Xport params de-config failed\n"); + return ret; + } + } + + return 0; +} diff --git a/drivers/sdw/sdw_cnl.c b/drivers/sdw/sdw_cnl.c new file mode 100644 index 0000000000000..c754edbe65640 --- /dev/null +++ b/drivers/sdw/sdw_cnl.c @@ -0,0 +1,2535 @@ +/* + * sdw_cnl.c - Intel SoundWire master controller driver implementation. + * + * Copyright (C) 2015-2016 Intel Corp + * Author: Hardik T Shah + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "sdw_cnl_priv.h" + +static inline int cnl_sdw_reg_readl(void __iomem *base, int offset) +{ + int value; + + value = readl(base + offset); + return value; +} + +static inline void cnl_sdw_reg_writel(void __iomem *base, int offset, int value) +{ + writel(value, base + offset); +} + +static inline u16 cnl_sdw_reg_readw(void __iomem *base, int offset) +{ + int value; + + value = readw(base + offset); + return value; +} + +static inline void cnl_sdw_reg_writew(void __iomem *base, int offset, u16 value) +{ + writew(value, base + offset); +} + +static inline int cnl_sdw_port_reg_readl(void __iomem *base, int offset, + int port_num) +{ + return cnl_sdw_reg_readl(base, offset + port_num * 128); +} + +static inline void cnl_sdw_port_reg_writel(u32 __iomem *base, int offset, + int port_num, int value) +{ + return cnl_sdw_reg_writel(base, offset + port_num * 128, value); +} + +struct cnl_sdw_async_msg { + struct completion *async_xfer_complete; + struct sdw_msg *msg; + int length; +}; + +struct cnl_sdw { + struct cnl_sdw_data data; + struct sdw_master *mstr; + irqreturn_t (*thread)(int irq, void *context); + void *thread_context; + struct completion tx_complete; + struct cnl_sdw_port port[CNL_SDW_MAX_PORTS]; + int num_pcm_streams; + struct cnl_sdw_pdi_stream *pcm_streams; + int num_in_pcm_streams; + struct cnl_sdw_pdi_stream *in_pcm_streams; + int num_out_pcm_streams; + struct cnl_sdw_pdi_stream *out_pcm_streams; + int num_pdm_streams; + struct cnl_sdw_pdi_stream *pdm_streams; + int num_in_pdm_streams; + struct cnl_sdw_pdi_stream *in_pdm_streams; + int num_out_pdm_streams; + struct cnl_sdw_pdi_stream *out_pdm_streams; + struct mutex stream_lock; + spinlock_t ctrl_lock; + struct cnl_sdw_async_msg async_msg; + u32 response_buf[0x80]; + bool sdw_link_status; + +}; + +static int sdw_power_up_link(struct cnl_sdw *sdw) +{ + volatile int link_control; + struct sdw_master *mstr = sdw->mstr; + struct cnl_sdw_data *data = &sdw->data; + /* Try 10 times before timing out */ + int timeout = 10; + int spa_mask, cpa_mask; + + link_control = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_LCTL); + spa_mask = (CNL_LCTL_SPA_MASK << (data->inst_id + CNL_LCTL_SPA_SHIFT)); + cpa_mask = (CNL_LCTL_CPA_MASK << (data->inst_id + CNL_LCTL_CPA_SHIFT)); + link_control |= spa_mask; + cnl_sdw_reg_writel(data->sdw_shim, SDW_CNL_LCTL, link_control); + do { + link_control = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_LCTL); + if (link_control & cpa_mask) + break; + timeout--; + /* Wait 20ms before each time */ + msleep(20); + } while (timeout != 0); + /* Read once again to confirm */ + link_control = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_LCTL); + if (link_control & cpa_mask) { + dev_info(&mstr->dev, "SoundWire ctrl %d Powered Up\n", + data->inst_id); + sdw->sdw_link_status = 1; + return 0; + } + dev_err(&mstr->dev, "Failed to Power Up the SDW ctrl %d\n", + data->inst_id); + return -EIO; +} + +static void sdw_power_down_link(struct cnl_sdw *sdw) +{ + volatile int link_control; + struct sdw_master *mstr = sdw->mstr; + struct cnl_sdw_data *data = &sdw->data; + /* Retry 10 times before giving up */ + int timeout = 10; + int spa_mask, cpa_mask; + + link_control = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_LCTL); + spa_mask = ~(CNL_LCTL_SPA_MASK << (data->inst_id + CNL_LCTL_SPA_SHIFT)); + cpa_mask = (CNL_LCTL_CPA_MASK << (data->inst_id + CNL_LCTL_CPA_SHIFT)); + link_control &= spa_mask; + cnl_sdw_reg_writel(data->sdw_shim, SDW_CNL_LCTL, link_control); + do { + link_control = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_LCTL); + if (!(link_control & cpa_mask)) + break; + timeout--; + /* Wait for 20ms before each retry */ + msleep(20); + } while (timeout != 0); + /* Read once again to confirm */ + link_control = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_LCTL); + if (!(link_control & cpa_mask)) { + dev_info(&mstr->dev, "SoundWire ctrl %d Powered Down\n", + data->inst_id); + sdw->sdw_link_status = 0; + return; + } + dev_err(&mstr->dev, "Failed to Power Down the SDW ctrl %d\n", + data->inst_id); +} + +static void sdw_init_phyctrl(struct cnl_sdw *sdw) +{ + /* TODO: Initialize based on hardware requirement */ + +} + +static void sdw_switch_to_mip(struct cnl_sdw *sdw) +{ + u16 ioctl; + u16 act = 0; + struct cnl_sdw_data *data = &sdw->data; + int ioctl_offset = SDW_CNL_IOCTL + (data->inst_id * + SDW_CNL_IOCTL_REG_OFFSET); + int act_offset = SDW_CNL_CTMCTL + (data->inst_id * + SDW_CNL_CTMCTL_REG_OFFSET); + + ioctl = cnl_sdw_reg_readw(data->sdw_shim, ioctl_offset); + + ioctl &= ~(CNL_IOCTL_DOE_MASK << CNL_IOCTL_DOE_SHIFT); + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + ioctl &= ~(CNL_IOCTL_DO_MASK << CNL_IOCTL_DO_SHIFT); + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + ioctl |= CNL_IOCTL_MIF_MASK << CNL_IOCTL_MIF_SHIFT; + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + ioctl &= ~(CNL_IOCTL_BKE_MASK << CNL_IOCTL_BKE_SHIFT); + ioctl &= ~(CNL_IOCTL_COE_MASK << CNL_IOCTL_COE_SHIFT); + + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + act |= 0x1 << CNL_CTMCTL_DOAIS_SHIFT; + act |= CNL_CTMCTL_DACTQE_MASK << CNL_CTMCTL_DACTQE_SHIFT; + act |= CNL_CTMCTL_DODS_MASK << CNL_CTMCTL_DODS_SHIFT; + cnl_sdw_reg_writew(data->sdw_shim, act_offset, act); +} + +static void sdw_switch_to_glue(struct cnl_sdw *sdw) +{ + u16 ioctl; + struct cnl_sdw_data *data = &sdw->data; + int ioctl_offset = SDW_CNL_IOCTL + (data->inst_id * + SDW_CNL_IOCTL_REG_OFFSET); + + ioctl = cnl_sdw_reg_readw(data->sdw_shim, ioctl_offset); + ioctl |= CNL_IOCTL_BKE_MASK << CNL_IOCTL_BKE_SHIFT; + ioctl |= CNL_IOCTL_COE_MASK << CNL_IOCTL_COE_SHIFT; + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + ioctl &= ~(CNL_IOCTL_MIF_MASK << CNL_IOCTL_MIF_SHIFT); + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); +} + +static void sdw_init_shim(struct cnl_sdw *sdw) +{ + u16 ioctl = 0; + struct cnl_sdw_data *data = &sdw->data; + int ioctl_offset = SDW_CNL_IOCTL + (data->inst_id * + SDW_CNL_IOCTL_REG_OFFSET); + + + ioctl |= CNL_IOCTL_BKE_MASK << CNL_IOCTL_BKE_SHIFT; + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + ioctl |= CNL_IOCTL_WPDD_MASK << CNL_IOCTL_WPDD_SHIFT; + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + ioctl |= CNL_IOCTL_DO_MASK << CNL_IOCTL_DO_SHIFT; + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + ioctl |= CNL_IOCTL_DOE_MASK << CNL_IOCTL_DOE_SHIFT; + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); +} + +static int sdw_config_update(struct cnl_sdw *sdw) +{ + struct cnl_sdw_data *data = &sdw->data; + struct sdw_master *mstr = sdw->mstr; + int sync_reg, syncgo_mask; + volatile int config_update = 0; + volatile int sync_update = 0; + /* Try 10 times before giving up on configuration update */ + int timeout = 10; + int config_updated = 0; + + config_update |= MCP_CONFIGUPDATE_CONFIGUPDATE_MASK << + MCP_CONFIGUPDATE_CONFIGUPDATE_SHIFT; + /* Bit is self-cleared when configuration gets updated. */ + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_CONFIGUPDATE, + config_update); + + /* + * Set SYNCGO bit for Master(s) running in aggregated mode + * (MMModeEN = 1). This action causes all gSyncs of all Master IPs + * to be unmasked and asserted at the currently active gSync rate. + * The initialization-pending Master IP SoundWire bus clock will + * start up synchronizing to gSync, leading to bus reset entry, + * subsequent exit, and 1st Frame generation aligning to gSync. + * Note that this is done in order to overcome hardware bug related + * to mis-alignment of gSync and frame. + */ + if (mstr->link_sync_mask) { + sync_reg = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_SYNC); + sync_reg |= (CNL_SYNC_SYNCGO_MASK << CNL_SYNC_SYNCGO_SHIFT); + cnl_sdw_reg_writel(data->sdw_shim, SDW_CNL_SYNC, sync_reg); + syncgo_mask = (CNL_SYNC_SYNCGO_MASK << CNL_SYNC_SYNCGO_SHIFT); + + do { + sync_update = cnl_sdw_reg_readl(data->sdw_shim, + SDW_CNL_SYNC); + if ((sync_update & syncgo_mask) == 0) + break; + + msleep(20); + timeout--; + + } while (timeout); + + if ((sync_update & syncgo_mask) != 0) { + dev_err(&mstr->dev, "Failed to set sync go\n"); + return -EIO; + } + + /* Reset timeout */ + timeout = 10; + } + + /* Wait for config update bit to be self cleared */ + do { + config_update = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_CONFIGUPDATE); + if ((config_update & + MCP_CONFIGUPDATE_CONFIGUPDATE_MASK) == 0) { + config_updated = 1; + break; + } + timeout--; + /* Wait for 20ms between each try */ + msleep(20); + + } while (timeout != 0); + if (!config_updated) { + dev_err(&mstr->dev, "SoundWire update failed\n"); + return -EIO; + } + return 0; +} + +static void sdw_enable_interrupt(struct cnl_sdw *sdw) +{ + struct cnl_sdw_data *data = &sdw->data; + int int_mask = 0; + + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_SLAVEINTMASK0, + MCP_SLAVEINTMASK0_MASK); + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_SLAVEINTMASK1, + MCP_SLAVEINTMASK1_MASK); + /* Enable slave interrupt mask */ + int_mask |= MCP_INTMASK_SLAVERESERVED_MASK << + MCP_INTMASK_SLAVERESERVED_SHIFT; + int_mask |= MCP_INTMASK_SLAVEALERT_MASK << + MCP_INTMASK_SLAVEALERT_SHIFT; + int_mask |= MCP_INTMASK_SLAVEATTACHED_MASK << + MCP_INTMASK_SLAVEATTACHED_SHIFT; + int_mask |= MCP_INTMASK_SLAVENOTATTACHED_MASK << + MCP_INTMASK_SLAVENOTATTACHED_SHIFT; + int_mask |= MCP_INTMASK_CONTROLBUSCLASH_MASK << + MCP_INTMASK_CONTROLBUSCLASH_SHIFT; + int_mask |= MCP_INTMASK_DATABUSCLASH_MASK << + MCP_INTMASK_DATABUSCLASH_SHIFT; + int_mask |= MCP_INTMASK_RXWL_MASK << + MCP_INTMASK_RXWL_SHIFT; + int_mask |= MCP_INTMASK_IRQEN_MASK << + MCP_INTMASK_IRQEN_SHIFT; + int_mask |= MCP_INTMASK_DPPDIINT_MASK << + MCP_INTMASK_DPPDIINT_SHIFT; + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_INTMASK, int_mask); +} + +static int sdw_pcm_pdi_init(struct cnl_sdw *sdw) +{ + struct sdw_master *mstr = sdw->mstr; + struct cnl_sdw_data *data = &sdw->data; + int pcm_cap; + int pcm_cap_offset = SDW_CNL_PCMSCAP + (data->inst_id * + SDW_CNL_PCMSCAP_REG_OFFSET); + int ch_cnt_offset; + int i; + + pcm_cap = cnl_sdw_reg_readw(data->sdw_shim, pcm_cap_offset); + sdw->num_pcm_streams = (pcm_cap >> CNL_PCMSCAP_BSS_SHIFT) & + CNL_PCMSCAP_BSS_MASK; + dev_info(&mstr->dev, "Number of Bidirectional PCM stream = %d\n", + sdw->num_pcm_streams); + sdw->pcm_streams = devm_kzalloc(&mstr->dev, + sdw->num_pcm_streams * sizeof(struct cnl_sdw_pdi_stream), + GFP_KERNEL); + if (!sdw->pcm_streams) + return -ENOMEM; + /* Two of the PCM streams are reserved for bulk transfers */ + sdw->pcm_streams -= SDW_CNL_PCM_PDI_NUM_OFFSET; + for (i = SDW_CNL_PCM_PDI_NUM_OFFSET; i < sdw->num_pcm_streams; i++) { + ch_cnt_offset = SDW_CNL_PCMSCHC + + (data->inst_id * SDW_CNL_PCMSCHC_REG_OFFSET) + + ((i + SDW_CNL_PCM_PDI_NUM_OFFSET) * 0x2); + + sdw->pcm_streams[i].ch_cnt = cnl_sdw_reg_readw(data->sdw_shim, + ch_cnt_offset); + /* Zero based value in register */ + sdw->pcm_streams[i].ch_cnt++; + sdw->pcm_streams[i].pdi_num = i; + sdw->pcm_streams[i].allocated = false; + dev_info(&mstr->dev, "CH Count for stream %d is %d\n", + i, sdw->pcm_streams[i].ch_cnt); + } + return 0; +} + +static int sdw_pdm_pdi_init(struct cnl_sdw *sdw) +{ + int i; + struct sdw_master *mstr = sdw->mstr; + struct cnl_sdw_data *data = &sdw->data; + int pdm_cap, pdm_ch_count, total_pdm_streams; + int pdm_cap_offset = SDW_CNL_PDMSCAP + + (data->inst_id * SDW_CNL_PDMSCAP_REG_OFFSET); + pdm_cap = cnl_sdw_reg_readw(data->sdw_shim, pdm_cap_offset); + sdw->num_pdm_streams = (pdm_cap >> CNL_PDMSCAP_BSS_SHIFT) & + CNL_PDMSCAP_BSS_MASK; + + sdw->pdm_streams = devm_kzalloc(&mstr->dev, + sdw->num_pdm_streams * sizeof(struct cnl_sdw_pdi_stream), + GFP_KERNEL); + if (!sdw->pdm_streams) + return -ENOMEM; + + sdw->num_in_pdm_streams = (pdm_cap >> CNL_PDMSCAP_ISS_SHIFT) & + CNL_PDMSCAP_ISS_MASK; + + sdw->in_pdm_streams = devm_kzalloc(&mstr->dev, + sdw->num_in_pdm_streams * sizeof(struct cnl_sdw_pdi_stream), + GFP_KERNEL); + + if (!sdw->in_pdm_streams) + return -ENOMEM; + + sdw->num_out_pdm_streams = (pdm_cap >> CNL_PDMSCAP_OSS_SHIFT) & + CNL_PDMSCAP_OSS_MASK; + /* Zero based value in register */ + sdw->out_pdm_streams = devm_kzalloc(&mstr->dev, + sdw->num_out_pdm_streams * sizeof(struct cnl_sdw_pdi_stream), + GFP_KERNEL); + if (!sdw->out_pdm_streams) + return -ENOMEM; + + total_pdm_streams = sdw->num_pdm_streams + + sdw->num_in_pdm_streams + + sdw->num_out_pdm_streams; + + pdm_ch_count = (pdm_cap >> CNL_PDMSCAP_CPSS_SHIFT) & + CNL_PDMSCAP_CPSS_MASK; + for (i = 0; i < sdw->num_pdm_streams; i++) { + sdw->pdm_streams[i].ch_cnt = pdm_ch_count; + sdw->pdm_streams[i].pdi_num = i + SDW_CNL_PDM_PDI_NUM_OFFSET; + sdw->pdm_streams[i].allocated = false; + } + for (i = 0; i < sdw->num_in_pdm_streams; i++) { + sdw->in_pdm_streams[i].ch_cnt = pdm_ch_count; + sdw->in_pdm_streams[i].pdi_num = i + SDW_CNL_PDM_PDI_NUM_OFFSET; + sdw->in_pdm_streams[i].allocated = false; + } + for (i = 0; i < sdw->num_out_pdm_streams; i++) { + sdw->out_pdm_streams[i].ch_cnt = pdm_ch_count; + sdw->out_pdm_streams[i].pdi_num = + i + SDW_CNL_PDM_PDI_NUM_OFFSET; + sdw->out_pdm_streams[i].allocated = false; + } + return 0; +} + +static int sdw_port_pdi_init(struct cnl_sdw *sdw) +{ + int i, ret = 0; + + for (i = 0; i < CNL_SDW_MAX_PORTS; i++) { + sdw->port[i].port_num = i; + sdw->port[i].allocated = false; + } + ret = sdw_pcm_pdi_init(sdw); + if (ret) + return ret; + ret = sdw_pdm_pdi_init(sdw); + + return ret; +} + +static int sdw_init(struct cnl_sdw *sdw, bool is_first_init) +{ + struct sdw_master *mstr = sdw->mstr; + struct cnl_sdw_data *data = &sdw->data; + int mcp_config, mcp_control, sync_reg, mcp_clockctrl; + volatile int sync_update = 0; + int timeout = 10; /* Try 10 times before timing out */ + int ret = 0, mask; + + /* Power up the link controller */ + ret = sdw_power_up_link(sdw); + if (ret) + return ret; + + /* Initialize the IO control registers */ + sdw_init_shim(sdw); + + /* Switch the ownership to Master IP from glue logic */ + sdw_switch_to_mip(sdw); + + /* write to MCP Control register to enable block wakeup */ + mcp_control = cnl_sdw_reg_readl(data->sdw_regs, SDW_CNL_MCP_CONTROL); + mask = (MCP_CONTROL_BLOCKWAKEUP_MASK << + MCP_CONTROL_BLOCKWAKEUP_SHIFT); + mcp_control &= ~mask; + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_CONTROL, mcp_control); + do { + mcp_control = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_CONTROL); + if (!(mcp_control & mask)) + break; + + timeout--; + /* Wait 20ms before each time */ + msleep(20); + } while (timeout != 0); + + /* Write the MCP Control register to exit from clock stop */ + mcp_control = cnl_sdw_reg_readl(data->sdw_regs, SDW_CNL_MCP_CONTROL); + mask = (MCP_CONTROL_CLOCKSTOPCLEAR_MASK << + MCP_CONTROL_CLOCKSTOPCLEAR_SHIFT); + mcp_control |= mask; + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_CONTROL, mcp_control); + + /* Reset timeout */ + timeout = 10; + + /* Wait for clock stop exit bit to be self cleared */ + do { + mcp_control = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_CONTROL); + if (!(mcp_control & mask)) + break; + timeout--; + /* Wait 20ms before each time */ + msleep(20); + } while (timeout != 0); + + /* Read once again to confirm */ + mcp_control = cnl_sdw_reg_readl(data->sdw_regs, SDW_CNL_MCP_CONTROL); + if (!(mcp_control & mask)) { + dev_dbg(&sdw->mstr->dev, "SDW ctrl %d exit clock stop success\n", + data->inst_id); + } else { + dev_err(&sdw->mstr->dev, + "Failed exit from clock stop SDW ctrl %d\n", + data->inst_id); + return -EIO; + } + + /* Set SyncPRD period */ + sync_reg = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_SYNC); + sync_reg |= (SDW_CNL_DEFAULT_SYNC_PERIOD << CNL_SYNC_SYNCPRD_SHIFT); + + /* Set SyncPU bit */ + sync_reg |= (0x1 << CNL_SYNC_SYNCCPU_SHIFT); + cnl_sdw_reg_writel(data->sdw_shim, SDW_CNL_SYNC, sync_reg); + + /* Reset timeout */ + timeout = 10; + + do { + sync_update = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_SYNC); + if ((sync_update & CNL_SYNC_SYNCCPU_MASK) == 0) + break; + timeout--; + /* Wait 20ms before each time */ + msleep(20); + } while (timeout != 0); + if ((sync_update & CNL_SYNC_SYNCCPU_MASK) != 0) { + dev_err(&mstr->dev, "Fail to set sync period\n"); + return -EINVAL; + } + + /* + * Set CMDSYNC bit based on Master ID + * Note that this bit is set only for the Master which will be + * running in aggregated mode (MMModeEN = 1). By doing + * this the gSync to Master IP to be masked inactive. + * Note that this is done in order to overcome hardware bug related + * to mis-alignment of gSync and frame. + */ + if (mstr->link_sync_mask) { + + sync_reg = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_SYNC); + sync_reg |= (1 << (data->inst_id + CNL_SYNC_CMDSYNC_SHIFT)); + cnl_sdw_reg_writel(data->sdw_shim, SDW_CNL_SYNC, sync_reg); + } + + /* Set clock divider to default value in default bank */ + mcp_clockctrl = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_CLOCKCTRL0); + mcp_clockctrl |= SDW_CNL_DEFAULT_CLK_DIVIDER; + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_CLOCKCTRL0, + mcp_clockctrl); + + /* Set the Frame shape init to default value */ + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_FRAMESHAPEINIT, + SDW_CNL_DEFAULT_FRAME_SHAPE); + + + /* Set the SSP interval to default value for both banks */ + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_SSPCTRL0, + SDW_CNL_DEFAULT_SSP_INTERVAL); + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_SSPCTRL1, + SDW_CNL_DEFAULT_SSP_INTERVAL); + + /* Set command acceptance mode. This is required because when + * Master broadcasts the clock_stop command to slaves, slaves + * might be already suspended, so this return NO ACK, in that + * case also master should go to clock stop mode. + */ + mcp_control = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_CONTROL); + mcp_control |= (MCP_CONTROL_CMDACCEPTMODE_MASK << + MCP_CONTROL_CMDACCEPTMODE_SHIFT); + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_CONTROL, mcp_control); + + + mcp_config = cnl_sdw_reg_readl(data->sdw_regs, SDW_CNL_MCP_CONFIG); + /* Set Max cmd retry to 15 times */ + mcp_config |= (CNL_SDW_MAX_CMD_RETRIES << + MCP_CONFIG_MAXCMDRETRY_SHIFT); + + /* Set Ping request to ping delay to 15 frames. + * Spec supports 32 max frames + */ + mcp_config |= (CNL_SDW_MAX_PREQ_DELAY << + MCP_CONFIG_MAXPREQDELAY_SHIFT); + + /* If master is synchronized to some other master set Multimode */ + if (mstr->link_sync_mask) { + mcp_config |= (MCP_CONFIG_MMMODEEN_MASK << + MCP_CONFIG_MMMODEEN_SHIFT); + mcp_config |= (MCP_CONFIG_SSPMODE_MASK << + MCP_CONFIG_SSPMODE_SHIFT); + } else { + mcp_config &= ~(MCP_CONFIG_MMMODEEN_MASK << + MCP_CONFIG_MMMODEEN_SHIFT); + mcp_config &= ~(MCP_CONFIG_SSPMODE_MASK << + MCP_CONFIG_SSPMODE_SHIFT); + } + + /* Disable automatic bus release */ + mcp_config &= ~(MCP_CONFIG_BRELENABLE_MASK << + MCP_CONFIG_BRELENABLE_SHIFT); + + /* Disable sniffer mode now */ + mcp_config &= ~(MCP_CONFIG_SNIFFEREN_MASK << + MCP_CONFIG_SNIFFEREN_SHIFT); + + /* Set the command mode for Tx and Rx command */ + mcp_config &= ~(MCP_CONFIG_CMDMODE_MASK << + MCP_CONFIG_CMDMODE_SHIFT); + + /* Set operation mode to normal */ + mcp_config &= ~(MCP_CONFIG_OPERATIONMODE_MASK << + MCP_CONFIG_OPERATIONMODE_SHIFT); + mcp_config |= ((MCP_CONFIG_OPERATIONMODE_NORMAL & + MCP_CONFIG_OPERATIONMODE_MASK) << + MCP_CONFIG_OPERATIONMODE_SHIFT); + + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_CONFIG, mcp_config); + + /* Initialize the phy control registers. */ + sdw_init_phyctrl(sdw); + + if (is_first_init) { + /* Initlaize the ports */ + ret = sdw_port_pdi_init(sdw); + if (ret) { + dev_err(&mstr->dev, "SoundWire controller init failed %d\n", + data->inst_id); + sdw_power_down_link(sdw); + return ret; + } + } + + /* Lastly enable interrupts */ + sdw_enable_interrupt(sdw); + + /* Update soundwire configuration */ + return sdw_config_update(sdw); +} + +static int sdw_alloc_pcm_stream(struct cnl_sdw *sdw, + struct cnl_sdw_port *port, int ch_cnt, + enum sdw_data_direction direction) +{ + int num_pcm_streams, pdi_ch_map = 0, stream_id; + struct cnl_sdw_pdi_stream *stream, *pdi_stream; + unsigned int i; + unsigned int ch_map_offset, port_ctrl_offset, pdi_config_offset; + struct sdw_master *mstr = sdw->mstr; + unsigned int port_ctrl = 0, pdi_config = 0, channel_mask; + unsigned int stream_config; + + /* Currently PCM supports only bi-directional streams only */ + num_pcm_streams = sdw->num_pcm_streams; + stream = sdw->pcm_streams; + + mutex_lock(&sdw->stream_lock); + for (i = SDW_CNL_PCM_PDI_NUM_OFFSET; i < num_pcm_streams; i++) { + if (stream[i].allocated == false) { + stream[i].allocated = true; + stream[i].port_num = port->port_num; + port->pdi_stream = &stream[i]; + break; + } + } + mutex_unlock(&sdw->stream_lock); + if (!port->pdi_stream) { + dev_err(&mstr->dev, "Unable to allocate stream for PCM\n"); + return -EINVAL; + } + pdi_stream = port->pdi_stream; + /* We didnt get enough PDI streams, so free the allocated + * PDI streams. Free the port as well and return with error + */ + pdi_stream->l_ch_num = 0; + pdi_stream->h_ch_num = ch_cnt - 1; + ch_map_offset = SDW_CNL_PCMSCHM + + (SDW_CNL_PCMSCHM_REG_OFFSET * mstr->nr) + + (SDW_PCM_STRM_START_INDEX * pdi_stream->pdi_num); + if (port->direction == SDW_DATA_DIR_IN) + pdi_ch_map |= (CNL_PCMSYCM_DIR_MASK << CNL_PCMSYCM_DIR_SHIFT); + else + pdi_ch_map &= ~(CNL_PCMSYCM_DIR_MASK << CNL_PCMSYCM_DIR_SHIFT); + /* TODO: Remove this hardcoding */ + stream_id = mstr->nr * 16 + pdi_stream->pdi_num + 5; + pdi_stream->sdw_pdi_num = stream_id; + pdi_ch_map |= (stream_id & CNL_PCMSYCM_STREAM_MASK) << + CNL_PCMSYCM_STREAM_SHIFT; + pdi_ch_map |= (pdi_stream->l_ch_num & + CNL_PCMSYCM_LCHAN_MASK) << + CNL_PCMSYCM_LCHAN_SHIFT; + pdi_ch_map |= (0xF & CNL_PCMSYCM_HCHAN_MASK) << + CNL_PCMSYCM_HCHAN_SHIFT; + cnl_sdw_reg_writew(sdw->data.sdw_shim, ch_map_offset, + pdi_ch_map); + /* If direction is input, port is sink port*/ + if (direction == SDW_DATA_DIR_IN) + port_ctrl |= (PORTCTRL_PORT_DIRECTION_MASK << + PORTCTRL_PORT_DIRECTION_SHIFT); + else + port_ctrl &= ~(PORTCTRL_PORT_DIRECTION_MASK << + PORTCTRL_PORT_DIRECTION_SHIFT); + + port_ctrl_offset = SDW_CNL_PORTCTRL + (port->port_num * + SDW_CNL_PORT_REG_OFFSET); + cnl_sdw_reg_writel(sdw->data.sdw_regs, port_ctrl_offset, port_ctrl); + + pdi_config |= ((port->port_num & PDINCONFIG_PORT_NUMBER_MASK) << + PDINCONFIG_PORT_NUMBER_SHIFT); + + channel_mask = (1 << ch_cnt) - 1; + pdi_config |= (channel_mask << PDINCONFIG_CHANNEL_MASK_SHIFT); + /* TODO: Remove below hardcodings */ + pdi_config_offset = (SDW_CNL_PDINCONFIG0 + + (pdi_stream->pdi_num * 16)); + cnl_sdw_reg_writel(sdw->data.sdw_regs, pdi_config_offset, pdi_config); + + stream_config = cnl_sdw_reg_readl(sdw->data.alh_base, + (pdi_stream->sdw_pdi_num * ALH_CNL_STRMZCFG_OFFSET)); + stream_config |= (CNL_STRMZCFG_DMAT_VAL & CNL_STRMZCFG_DMAT_MASK) << + CNL_STRMZCFG_DMAT_SHIFT; + stream_config |= ((ch_cnt - 1) & CNL_STRMZCFG_CHAN_MASK) << + CNL_STRMZCFG_CHAN_SHIFT; + cnl_sdw_reg_writel(sdw->data.alh_base, + (pdi_stream->sdw_pdi_num * ALH_CNL_STRMZCFG_OFFSET), + stream_config); + return 0; +} + +static int sdw_alloc_pdm_stream(struct cnl_sdw *sdw, + struct cnl_sdw_port *port, int ch_cnt, int direction) +{ + int num_pdm_streams; + struct cnl_sdw_pdi_stream *stream; + int i; + unsigned int port_ctrl_offset, pdi_config_offset; + unsigned int port_ctrl = 0, pdi_config = 0, channel_mask; + + /* Currently PDM supports either Input or Output Streams */ + if (direction == SDW_DATA_DIR_IN) { + num_pdm_streams = sdw->num_in_pdm_streams; + stream = sdw->in_pdm_streams; + } else { + num_pdm_streams = sdw->num_out_pdm_streams; + stream = sdw->out_pdm_streams; + } + mutex_lock(&sdw->stream_lock); + for (i = 0; i < num_pdm_streams; i++) { + if (stream[i].allocated == false) { + stream[i].allocated = true; + stream[i].port_num = port->port_num; + port->pdi_stream = &stream[i]; + break; + } + } + mutex_unlock(&sdw->stream_lock); + if (!port->pdi_stream) + return -EINVAL; + /* If direction is input, port is sink port*/ + if (direction == SDW_DATA_DIR_IN) + port_ctrl |= (PORTCTRL_PORT_DIRECTION_MASK << + PORTCTRL_PORT_DIRECTION_SHIFT); + else + port_ctrl &= ~(PORTCTRL_PORT_DIRECTION_MASK << + PORTCTRL_PORT_DIRECTION_SHIFT); + + port_ctrl_offset = SDW_CNL_PORTCTRL + (port->port_num * + SDW_CNL_PORT_REG_OFFSET); + cnl_sdw_reg_writel(sdw->data.sdw_regs, port_ctrl_offset, port_ctrl); + + pdi_config |= ((port->port_num & PDINCONFIG_PORT_NUMBER_MASK) << + PDINCONFIG_PORT_NUMBER_SHIFT); + + channel_mask = (1 << ch_cnt) - 1; + pdi_config |= (channel_mask << PDINCONFIG_CHANNEL_MASK_SHIFT); + /* TODO: Remove below hardcodings */ + pdi_config_offset = (SDW_CNL_PDINCONFIG0 + (stream[i].pdi_num * 16)); + cnl_sdw_reg_writel(sdw->data.sdw_regs, pdi_config_offset, pdi_config); + + return 0; +} + +struct cnl_sdw_port *cnl_sdw_alloc_port(struct sdw_master *mstr, int ch_count, + enum sdw_data_direction direction, + enum cnl_sdw_pdi_stream_type stream_type) +{ + struct cnl_sdw *sdw; + struct cnl_sdw_port *port = NULL; + int i, ret = 0; + struct num_pdi_streams; + + sdw = sdw_master_get_drvdata(mstr); + + mutex_lock(&sdw->stream_lock); + for (i = 1; i < CNL_SDW_MAX_PORTS; i++) { + if (sdw->port[i].allocated == false) { + port = &sdw->port[i]; + port->allocated = true; + port->direction = direction; + port->ch_cnt = ch_count; + break; + } + } + mutex_unlock(&sdw->stream_lock); + if (!port) { + dev_err(&mstr->dev, "Unable to allocate port\n"); + return NULL; + } + port->pdi_stream = NULL; + if (stream_type == CNL_SDW_PDI_TYPE_PDM) + ret = sdw_alloc_pdm_stream(sdw, port, ch_count, direction); + else + ret = sdw_alloc_pcm_stream(sdw, port, ch_count, direction); + if (!ret) + return port; + + dev_err(&mstr->dev, "Unable to allocate stream\n"); + mutex_lock(&sdw->stream_lock); + port->allocated = false; + mutex_unlock(&sdw->stream_lock); + return NULL; +} +EXPORT_SYMBOL_GPL(cnl_sdw_alloc_port); + +void cnl_sdw_free_port(struct sdw_master *mstr, int port_num) +{ + int i; + struct cnl_sdw *sdw; + struct cnl_sdw_port *port = NULL; + + sdw = sdw_master_get_drvdata(mstr); + for (i = 1; i < CNL_SDW_MAX_PORTS; i++) { + if (sdw->port[i].port_num == port_num) { + port = &sdw->port[i]; + break; + } + } + if (!port) + return; + mutex_lock(&sdw->stream_lock); + port->pdi_stream->allocated = false; + port->pdi_stream = NULL; + port->allocated = false; + mutex_unlock(&sdw->stream_lock); +} +EXPORT_SYMBOL_GPL(cnl_sdw_free_port); + +static int cnl_sdw_update_slave_status(struct cnl_sdw *sdw, int slave_intstat0, + int slave_intstat1) +{ + int i; + struct sdw_status slave_status; + u64 slaves_stat, slave_stat; + int ret = 0; + + memset(&slave_status, 0x0, sizeof(slave_status)); + slaves_stat = (u64) slave_intstat1 << + SDW_CNL_SLAVES_STAT_UPPER_DWORD_SHIFT; + slaves_stat |= slave_intstat0; + for (i = 0; i <= SOUNDWIRE_MAX_DEVICES; i++) { + slave_stat = slaves_stat >> (i * SDW_CNL_SLAVE_STATUS_BITS); + if (slave_stat & MCP_SLAVEINTSTAT_NOT_PRESENT_MASK) + slave_status.status[i] = SDW_SLAVE_STAT_NOT_PRESENT; + else if (slave_stat & MCP_SLAVEINTSTAT_ATTACHED_MASK) + slave_status.status[i] = SDW_SLAVE_STAT_ATTACHED_OK; + else if (slave_stat & MCP_SLAVEINTSTAT_ALERT_MASK) + slave_status.status[i] = SDW_SLAVE_STAT_ALERT; + else if (slave_stat & MCP_SLAVEINTSTAT_RESERVED_MASK) + slave_status.status[i] = SDW_SLAVE_STAT_RESERVED; + } + ret = sdw_master_update_slv_status(sdw->mstr, &slave_status); + return ret; +} + +static void cnl_sdw_read_response(struct cnl_sdw *sdw) +{ + struct cnl_sdw_data *data = &sdw->data; + int num_res = 0, i; + u32 cmd_base = SDW_CNL_MCP_COMMAND_BASE; + + num_res = cnl_sdw_reg_readl(data->sdw_regs, SDW_CNL_MCP_FIFOSTAT); + num_res &= MCP_RX_FIFO_AVAIL_MASK; + for (i = 0; i < num_res; i++) { + sdw->response_buf[i] = cnl_sdw_reg_readl(data->sdw_regs, + cmd_base); + cmd_base += SDW_CNL_CMD_WORD_LEN; + } +} + +static enum sdw_command_response sdw_fill_message_response( + struct sdw_master *mstr, + struct sdw_msg *msg, + int count, int offset) +{ + int i, j; + int no_ack = 0, nack = 0; + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + + for (i = 0; i < count; i++) { + if (!(MCP_RESPONSE_ACK_MASK & sdw->response_buf[i])) { + no_ack = 1; + dev_err(&mstr->dev, "Ack not recevied\n"); + if ((MCP_RESPONSE_NACK_MASK & + sdw->response_buf[i])) { + nack = 1; + dev_err(&mstr->dev, "NACK recevied\n"); + } + } + break; + } + if (nack) { + dev_err(&mstr->dev, "Nack detected for slave %d\n", msg->slave_addr); + msg->len = 0; + return -EREMOTEIO; + } else if (no_ack) { + dev_err(&mstr->dev, "Command ignored for slave %d\n", msg->slave_addr); + msg->len = 0; + return -EREMOTEIO; + } + if (msg->flag == SDW_MSG_FLAG_WRITE) + return 0; + /* Response and Command has same base address */ + for (j = 0; j < count; j++) + msg->buf[j + offset] = + (sdw->response_buf[j] >> MCP_RESPONSE_RDATA_SHIFT); + return 0; +} + + +irqreturn_t cnl_sdw_irq_handler(int irq, void *context) +{ + struct cnl_sdw *sdw = context; + volatile int int_status, status, wake_sts; + + struct cnl_sdw_data *data = &sdw->data; + volatile int slave_intstat0 = 0, slave_intstat1 = 0; + struct sdw_master *mstr = sdw->mstr; + + /* + * Return if IP is in power down state. Interrupt can still come + * since its shared irq. + */ + if (!sdw->sdw_link_status) + return IRQ_NONE; + + int_status = cnl_sdw_reg_readl(data->sdw_regs, SDW_CNL_MCP_INTSTAT); + status = cnl_sdw_reg_readl(data->sdw_regs, SDW_CNL_MCP_STAT); + slave_intstat0 = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_SLAVEINTSTAT0); + slave_intstat1 = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_SLAVEINTSTAT1); + wake_sts = cnl_sdw_reg_readw(data->sdw_shim, + SDW_CNL_SNDWWAKESTS_REG_OFFSET); + cnl_sdw_reg_writew(data->sdw_shim, SDW_CNL_SNDWWAKESTS_REG_OFFSET, + wake_sts); + + if (!(int_status & (MCP_INTSTAT_IRQ_MASK << MCP_INTSTAT_IRQ_SHIFT))) + return IRQ_NONE; + + if (int_status & (MCP_INTSTAT_RXWL_MASK << MCP_INTSTAT_RXWL_SHIFT)) { + cnl_sdw_read_response(sdw); + if (sdw->async_msg.async_xfer_complete) { + sdw_fill_message_response(mstr, sdw->async_msg.msg, + sdw->async_msg.length, 0); + complete(sdw->async_msg.async_xfer_complete); + sdw->async_msg.async_xfer_complete = NULL; + sdw->async_msg.msg = NULL; + } else + complete(&sdw->tx_complete); + } + if (int_status & (MCP_INTSTAT_CONTROLBUSCLASH_MASK << + MCP_INTSTAT_CONTROLBUSCLASH_SHIFT)) { + /* Some slave is behaving badly, where its driving + * data line during control word bits. + */ + dev_err_ratelimited(&mstr->dev, "Bus clash detected for control word\n"); + WARN_ONCE(1, "Bus clash detected for control word\n"); + } + if (int_status & (MCP_INTSTAT_DATABUSCLASH_MASK << + MCP_INTSTAT_DATABUSCLASH_SHIFT)) { + /* More than 1 slave is trying to drive bus. There is + * some problem with ownership of bus data bits, + * or either of the + * slave is behaving badly. + */ + dev_err_ratelimited(&mstr->dev, "Bus clash detected for control word\n"); + WARN_ONCE(1, "Bus clash detected for data word\n"); + } + + if (int_status & (MCP_INTSTAT_SLAVE_STATUS_CHANGED_MASK << + MCP_INTSTAT_SLAVE_STATUS_CHANGED_SHIFT)) { + dev_info(&mstr->dev, "Slave status change\n"); + cnl_sdw_update_slave_status(sdw, slave_intstat0, + slave_intstat1); + } + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_SLAVEINTSTAT0, + slave_intstat0); + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_SLAVEINTSTAT1, + slave_intstat1); + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_INTSTAT, int_status); + return IRQ_HANDLED; +} + +static enum sdw_command_response cnl_program_scp_addr(struct sdw_master *mstr, + struct sdw_msg *msg) +{ + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + u32 cmd_base = SDW_CNL_MCP_COMMAND_BASE; + u32 cmd_data[2] = {0, 0}; + unsigned long time_left; + int no_ack = 0, nack = 0; + int i; + + /* Since we are programming 2 commands, program the + * RX watermark level at 2 + */ + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_FIFOLEVEL, 2); + /* Program device address */ + cmd_data[0] |= (msg->slave_addr & MCP_COMMAND_DEV_ADDR_MASK) << + MCP_COMMAND_DEV_ADDR_SHIFT; + /* Write command to program the scp_addr1 register */ + cmd_data[0] |= (0x3 << MCP_COMMAND_COMMAND_SHIFT); + cmd_data[1] = cmd_data[0]; + /* scp_addr1 register address */ + cmd_data[0] |= (SDW_SCP_ADDRPAGE1 << MCP_COMMAND_REG_ADDR_L_SHIFT); + cmd_data[1] |= (SDW_SCP_ADDRPAGE2 << MCP_COMMAND_REG_ADDR_L_SHIFT); + cmd_data[0] |= msg->addr_page1; + cmd_data[1] |= msg->addr_page2; + + cnl_sdw_reg_writel(data->sdw_regs, cmd_base, cmd_data[0]); + cmd_base += SDW_CNL_CMD_WORD_LEN; + cnl_sdw_reg_writel(data->sdw_regs, cmd_base, cmd_data[1]); + + time_left = wait_for_completion_timeout(&sdw->tx_complete, + 3000); + if (!time_left) { + dev_err(&mstr->dev, "Controller Timed out\n"); + msg->len = 0; + return -ETIMEDOUT; + } + + for (i = 0; i < CNL_SDW_SCP_ADDR_REGS; i++) { + if (!(MCP_RESPONSE_ACK_MASK & sdw->response_buf[i])) { + no_ack = 1; + dev_err(&mstr->dev, "Ack not recevied\n"); + if ((MCP_RESPONSE_NACK_MASK & sdw->response_buf[i])) { + nack = 1; + dev_err(&mstr->dev, "NACK recevied\n"); + } + } + } + /* We dont return error if NACK or No ACK detected for broadcast addr + * because some slave might support SCP addr, while some slaves may not + * support it. This is not correct, since we wont be able to find out + * if NACK is detected because of slave not supporting SCP_addrpage or + * its a genuine NACK because of bus errors. We are not sure what slaves + * will report, NACK or No ACK for the scp_addrpage programming if they + * dont support it. Spec is not clear about this. + * This needs to be thought through + */ + if (nack & (msg->slave_addr != 15)) { + dev_err(&mstr->dev, "SCP_addrpage write NACKed for slave %d\n", msg->slave_addr); + return -EREMOTEIO; + } else if (no_ack && (msg->slave_addr != 15)) { + dev_err(&mstr->dev, "SCP_addrpage write ignored for slave %d\n", msg->slave_addr); + return -EREMOTEIO; + } else + return 0; + +} + +static enum sdw_command_response sdw_xfer_msg(struct sdw_master *mstr, + struct sdw_msg *msg, int cmd, int offset, int count, bool async) +{ + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + int j; + u32 cmd_base = SDW_CNL_MCP_COMMAND_BASE; + u32 cmd_data = 0; + unsigned long time_left; + u16 addr = msg->addr; + + /* Program the watermark level upto number of count */ + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_FIFOLEVEL, count); + + cmd_base = SDW_CNL_MCP_COMMAND_BASE; + for (j = 0; j < count; j++) { + /* Program device address */ + cmd_data = 0; + cmd_data |= (msg->slave_addr & + MCP_COMMAND_DEV_ADDR_MASK) << + MCP_COMMAND_DEV_ADDR_SHIFT; + /* Program read/write command */ + cmd_data |= (cmd << MCP_COMMAND_COMMAND_SHIFT); + /* program incrementing address register */ + cmd_data |= (addr++ << MCP_COMMAND_REG_ADDR_L_SHIFT); + /* Program the data if write command */ + if (msg->flag == SDW_MSG_FLAG_WRITE) + cmd_data |= + msg->buf[j + offset]; + + cmd_data |= ((msg->ssp_tag & + MCP_COMMAND_SSP_TAG_MASK) << + MCP_COMMAND_SSP_TAG_SHIFT); + cnl_sdw_reg_writel(data->sdw_regs, + cmd_base, cmd_data); + cmd_base += SDW_CNL_CMD_WORD_LEN; + } + + /* If Async dont wait for completion */ + if (async) + return 0; + /* Wait for 3 second for timeout */ + time_left = wait_for_completion_timeout(&sdw->tx_complete, 3 * HZ); + if (!time_left) { + dev_err(&mstr->dev, "Controller timedout\n"); + msg->len = 0; + return -ETIMEDOUT; + } + return sdw_fill_message_response(mstr, msg, count, offset); +} + +static enum sdw_command_response cnl_sdw_xfer_msg_async(struct sdw_master *mstr, + struct sdw_msg *msg, bool program_scp_addr_page, + struct sdw_async_xfer_data *data) +{ + int ret = 0, cmd; + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + + /* Only 1 message can be handled in Async fashion. This is used + * only for Bank switching where during aggregation it is required + * to synchronously switch the bank on more than 1 controller + */ + if (msg->len > 1) { + ret = -EINVAL; + goto error; + } + /* If scp addr programming fails goto error */ + if (program_scp_addr_page) + ret = cnl_program_scp_addr(mstr, msg); + if (ret) + goto error; + + switch (msg->flag) { + case SDW_MSG_FLAG_READ: + cmd = 0x2; + break; + case SDW_MSG_FLAG_WRITE: + cmd = 0x3; + break; + default: + dev_err(&mstr->dev, "Command not supported\n"); + return -EINVAL; + } + sdw->async_msg.async_xfer_complete = &data->xfer_complete; + sdw->async_msg.msg = msg; + sdw->async_msg.length = msg->len; + /* Dont wait for reply, calling function will wait for reply. */ + ret = sdw_xfer_msg(mstr, msg, cmd, 0, msg->len, true); + return ret; +error: + msg->len = 0; + complete(&data->xfer_complete); + return -EINVAL; + +} + +static enum sdw_command_response cnl_sdw_xfer_msg(struct sdw_master *mstr, + struct sdw_msg *msg, bool program_scp_addr_page) +{ + int i, ret = 0, cmd; + + if (program_scp_addr_page) + ret = cnl_program_scp_addr(mstr, msg); + + if (ret) { + msg->len = 0; + return ret; + } + + switch (msg->flag) { + case SDW_MSG_FLAG_READ: + cmd = 0x2; + break; + case SDW_MSG_FLAG_WRITE: + cmd = 0x3; + break; + default: + dev_err(&mstr->dev, "Command not supported\n"); + return -EINVAL; + } + for (i = 0; i < msg->len / SDW_CNL_MCP_COMMAND_LENGTH; i++) { + ret = sdw_xfer_msg(mstr, msg, + cmd, i * SDW_CNL_MCP_COMMAND_LENGTH, + SDW_CNL_MCP_COMMAND_LENGTH, false); + if (ret < 0) + break; + } + if (!(msg->len % SDW_CNL_MCP_COMMAND_LENGTH)) + return ret; + ret = sdw_xfer_msg(mstr, msg, cmd, i * SDW_CNL_MCP_COMMAND_LENGTH, + msg->len % SDW_CNL_MCP_COMMAND_LENGTH, false); + if (ret < 0) + return -EINVAL; + return ret; +} + +static void cnl_sdw_bra_prep_crc(u8 *txdata_buf, + struct sdw_bra_block *block, int data_offset, int addr_offset) +{ + + int addr = addr_offset; + + txdata_buf[addr++] = sdw_bus_compute_crc8((block->values + data_offset), + block->num_bytes); + txdata_buf[addr++] = 0x0; + txdata_buf[addr++] = 0x0; + txdata_buf[addr] |= ((0x2 & SDW_BRA_SOP_EOP_PDI_MASK) + << SDW_BRA_SOP_EOP_PDI_SHIFT); +} + +static void cnl_sdw_bra_prep_data(u8 *txdata_buf, + struct sdw_bra_block *block, int data_offset, int addr_offset) +{ + + int i; + int addr = addr_offset; + + for (i = 0; i < block->num_bytes; i += 2) { + + txdata_buf[addr++] = block->values[i + data_offset]; + if ((block->num_bytes - 1) - i) + txdata_buf[addr++] = block->values[i + data_offset + 1]; + else + txdata_buf[addr++] = 0; + + txdata_buf[addr++] = 0; + txdata_buf[addr++] = 0; + } +} + +static void cnl_sdw_bra_prep_hdr(u8 *txdata_buf, + struct sdw_bra_block *block, int rolling_id, int offset) +{ + + u8 tmp_hdr[6] = {0, 0, 0, 0, 0, 0}; + u8 temp = 0x0; + + /* + * 6 bytes header + * 1st byte: b11001010 + * b11: Header is active + * b0010: Device number 2 is selected + * b1: Write operation + * b0: MSB of BRA_NumBytes is 0 + * 2nd byte: LSB of number of bytes + * 3rd byte to 6th byte: Slave register offset + */ + temp |= (SDW_BRA_HDR_ACTIVE & SDW_BRA_HDR_ACTIVE_MASK) << + SDW_BRA_HDR_ACTIVE_SHIFT; + temp |= (block->slave_addr & SDW_BRA_HDR_SLV_ADDR_MASK) << + SDW_BRA_HDR_SLV_ADDR_SHIFT; + temp |= (block->cmd & SDW_BRA_HDR_RD_WR_MASK) << + SDW_BRA_HDR_RD_WR_SHIFT; + + if (block->num_bytes > SDW_BRA_HDR_MSB_BYTE_CHK) + temp |= (SDW_BRA_HDR_MSB_BYTE_SET & SDW_BRA_HDR_MSB_BYTE_MASK); + else + temp |= (SDW_BRA_HDR_MSB_BYTE_UNSET & + SDW_BRA_HDR_MSB_BYTE_MASK); + + txdata_buf[offset + 0] = tmp_hdr[0] = temp; + txdata_buf[offset + 1] = tmp_hdr[1] = block->num_bytes; + txdata_buf[offset + 3] |= ((SDW_BRA_SOP_EOP_PDI_STRT_VALUE & + SDW_BRA_SOP_EOP_PDI_MASK) << + SDW_BRA_SOP_EOP_PDI_SHIFT); + + txdata_buf[offset + 3] |= ((rolling_id & SDW_BRA_ROLLINGID_PDI_MASK) + << SDW_BRA_ROLLINGID_PDI_SHIFT); + + txdata_buf[offset + 4] = tmp_hdr[2] = ((block->reg_offset & + SDW_BRA_HDR_SLV_REG_OFF_MASK24) + >> SDW_BRA_HDR_SLV_REG_OFF_SHIFT24); + + txdata_buf[offset + 5] = tmp_hdr[3] = ((block->reg_offset & + SDW_BRA_HDR_SLV_REG_OFF_MASK16) + >> SDW_BRA_HDR_SLV_REG_OFF_SHIFT16); + + txdata_buf[offset + 8] = tmp_hdr[4] = ((block->reg_offset & + SDW_BRA_HDR_SLV_REG_OFF_MASK8) + >> SDW_BRA_HDR_SLV_REG_OFF_SHIFT8); + + txdata_buf[offset + 9] = tmp_hdr[5] = (block->reg_offset & + SDW_BRA_HDR_SLV_REG_OFF_MASK0); + + /* CRC check */ + txdata_buf[offset + 0xc] = sdw_bus_compute_crc8(tmp_hdr, + SDW_BRA_HEADER_SIZE); + + if (!block->cmd) + txdata_buf[offset + 0xf] = ((SDW_BRA_SOP_EOP_PDI_END_VALUE & + SDW_BRA_SOP_EOP_PDI_MASK) << + SDW_BRA_SOP_EOP_PDI_SHIFT); +} + +static void cnl_sdw_bra_pdi_tx_config(struct sdw_master *mstr, + struct cnl_sdw *sdw, bool enable) +{ + struct cnl_sdw_pdi_stream tx_pdi_stream; + unsigned int tx_ch_map_offset, port_ctrl_offset, tx_pdi_config_offset; + unsigned int port_ctrl = 0, tx_pdi_config = 0, tx_stream_config; + int tx_pdi_ch_map = 0; + + if (enable) { + /* DP0 PORT CTRL REG */ + port_ctrl_offset = SDW_CNL_PORTCTRL + (SDW_BRA_PORT_ID * + SDW_CNL_PORT_REG_OFFSET); + + port_ctrl &= ~(PORTCTRL_PORT_DIRECTION_MASK << + PORTCTRL_PORT_DIRECTION_SHIFT); + + port_ctrl |= ((SDW_BRA_BULK_ENABLE & SDW_BRA_BLK_EN_MASK) << + SDW_BRA_BLK_EN_SHIFT); + + port_ctrl |= ((SDW_BRA_BPT_PAYLOAD_TYPE & + SDW_BRA_BPT_PYLD_TY_MASK) << + SDW_BRA_BPT_PYLD_TY_SHIFT); + + cnl_sdw_reg_writel(sdw->data.sdw_regs, port_ctrl_offset, + port_ctrl); + + /* PDI0 Programming */ + tx_pdi_stream.l_ch_num = 0; + tx_pdi_stream.h_ch_num = 0xF; + tx_pdi_stream.pdi_num = SDW_BRA_PDI_TX_ID; + /* TODO: Remove hardcoding */ + tx_pdi_stream.sdw_pdi_num = mstr->nr * 16 + + tx_pdi_stream.pdi_num + 3; + + /* SNDWxPCMS2CM SHIM REG */ + tx_ch_map_offset = SDW_CNL_CTLS2CM + + (SDW_CNL_PCMSCHM_REG_OFFSET * mstr->nr); + + tx_pdi_ch_map |= (tx_pdi_stream.sdw_pdi_num & + CNL_PCMSYCM_STREAM_MASK) << + CNL_PCMSYCM_STREAM_SHIFT; + + tx_pdi_ch_map |= (tx_pdi_stream.l_ch_num & + CNL_PCMSYCM_LCHAN_MASK) << + CNL_PCMSYCM_LCHAN_SHIFT; + + tx_pdi_ch_map |= (tx_pdi_stream.h_ch_num & + CNL_PCMSYCM_HCHAN_MASK) << + CNL_PCMSYCM_HCHAN_SHIFT; + + cnl_sdw_reg_writew(sdw->data.sdw_shim, tx_ch_map_offset, + tx_pdi_ch_map); + + /* TX PDI0 CONFIG REG BANK 0 */ + tx_pdi_config_offset = (SDW_CNL_PDINCONFIG0 + + (tx_pdi_stream.pdi_num * 16)); + + tx_pdi_config |= ((SDW_BRA_PORT_ID & + PDINCONFIG_PORT_NUMBER_MASK) << + PDINCONFIG_PORT_NUMBER_SHIFT); + + tx_pdi_config |= (SDW_BRA_CHN_MASK << + PDINCONFIG_CHANNEL_MASK_SHIFT); + + tx_pdi_config |= (SDW_BRA_SOFT_RESET << + PDINCONFIG_PORT_SOFT_RESET_SHIFT); + + cnl_sdw_reg_writel(sdw->data.sdw_regs, + tx_pdi_config_offset, tx_pdi_config); + + /* ALH STRMzCFG REG */ + tx_stream_config = cnl_sdw_reg_readl(sdw->data.alh_base, + (tx_pdi_stream.sdw_pdi_num * + ALH_CNL_STRMZCFG_OFFSET)); + + tx_stream_config |= (CNL_STRMZCFG_DMAT_VAL & + CNL_STRMZCFG_DMAT_MASK) << + CNL_STRMZCFG_DMAT_SHIFT; + + tx_stream_config |= (0x0 & CNL_STRMZCFG_CHAN_MASK) << + CNL_STRMZCFG_CHAN_SHIFT; + + cnl_sdw_reg_writel(sdw->data.alh_base, + (tx_pdi_stream.sdw_pdi_num * + ALH_CNL_STRMZCFG_OFFSET), + tx_stream_config); + + + } else { + + /* + * TODO: There is official workaround which needs to be + * performed for PDI config register. The workaround + * is to perform SoftRst twice in order to clear + * PDI fifo contents. + */ + + } +} + +static void cnl_sdw_bra_pdi_rx_config(struct sdw_master *mstr, + struct cnl_sdw *sdw, bool enable) +{ + + struct cnl_sdw_pdi_stream rx_pdi_stream; + unsigned int rx_ch_map_offset, rx_pdi_config_offset, rx_stream_config; + unsigned int rx_pdi_config = 0; + int rx_pdi_ch_map = 0; + + if (enable) { + + /* RX PDI1 Configuration */ + rx_pdi_stream.l_ch_num = 0; + rx_pdi_stream.h_ch_num = 0xF; + rx_pdi_stream.pdi_num = SDW_BRA_PDI_RX_ID; + rx_pdi_stream.sdw_pdi_num = mstr->nr * 16 + + rx_pdi_stream.pdi_num + 3; + + /* SNDWxPCMS3CM SHIM REG */ + rx_ch_map_offset = SDW_CNL_CTLS3CM + + (SDW_CNL_PCMSCHM_REG_OFFSET * mstr->nr); + + rx_pdi_ch_map |= (rx_pdi_stream.sdw_pdi_num & + CNL_PCMSYCM_STREAM_MASK) << + CNL_PCMSYCM_STREAM_SHIFT; + + rx_pdi_ch_map |= (rx_pdi_stream.l_ch_num & + CNL_PCMSYCM_LCHAN_MASK) << + CNL_PCMSYCM_LCHAN_SHIFT; + + rx_pdi_ch_map |= (rx_pdi_stream.h_ch_num & + CNL_PCMSYCM_HCHAN_MASK) << + CNL_PCMSYCM_HCHAN_SHIFT; + + cnl_sdw_reg_writew(sdw->data.sdw_shim, rx_ch_map_offset, + rx_pdi_ch_map); + + /* RX PDI1 CONFIG REG */ + rx_pdi_config_offset = (SDW_CNL_PDINCONFIG0 + + (rx_pdi_stream.pdi_num * 16)); + + rx_pdi_config |= ((SDW_BRA_PORT_ID & + PDINCONFIG_PORT_NUMBER_MASK) << + PDINCONFIG_PORT_NUMBER_SHIFT); + + rx_pdi_config |= (SDW_BRA_CHN_MASK << + PDINCONFIG_CHANNEL_MASK_SHIFT); + + rx_pdi_config |= (SDW_BRA_SOFT_RESET << + PDINCONFIG_PORT_SOFT_RESET_SHIFT); + + cnl_sdw_reg_writel(sdw->data.sdw_regs, + rx_pdi_config_offset, rx_pdi_config); + + + /* ALH STRMzCFG REG */ + rx_stream_config = cnl_sdw_reg_readl(sdw->data.alh_base, + (rx_pdi_stream.sdw_pdi_num * + ALH_CNL_STRMZCFG_OFFSET)); + + rx_stream_config |= (CNL_STRMZCFG_DMAT_VAL & + CNL_STRMZCFG_DMAT_MASK) << + CNL_STRMZCFG_DMAT_SHIFT; + + rx_stream_config |= (0 & CNL_STRMZCFG_CHAN_MASK) << + CNL_STRMZCFG_CHAN_SHIFT; + + cnl_sdw_reg_writel(sdw->data.alh_base, + (rx_pdi_stream.sdw_pdi_num * + ALH_CNL_STRMZCFG_OFFSET), + rx_stream_config); + + } else { + + /* + * TODO: There is official workaround which needs to be + * performed for PDI config register. The workaround + * is to perform SoftRst twice in order to clear + * PDI fifo contents. + */ + + } +} + +static void cnl_sdw_bra_pdi_config(struct sdw_master *mstr, bool enable) +{ + struct cnl_sdw *sdw; + + /* Get driver data for master */ + sdw = sdw_master_get_drvdata(mstr); + + /* PDI0 configuration */ + cnl_sdw_bra_pdi_tx_config(mstr, sdw, enable); + + /* PDI1 configuration */ + cnl_sdw_bra_pdi_rx_config(mstr, sdw, enable); +} + +static int cnl_sdw_bra_verify_footer(u8 *rx_buf, int offset) +{ + int ret = 0; + u8 ftr_response; + u8 ack_nack = 0; + u8 ftr_result = 0; + + ftr_response = rx_buf[offset]; + + /* + * ACK/NACK check + * NACK+ACK value from target: + * 00 -> Ignored + * 01 -> OK + * 10 -> Failed (Header CRC check failed) + * 11 -> Reserved + * NACK+ACK values at Target or initiator + * 00 -> Ignored + * 01 -> OK + * 10 -> Abort (Header cannot be trusted) + * 11 -> Abort (Header cannot be trusted) + */ + ack_nack = ((ftr_response >> SDW_BRA_FTR_RESP_ACK_SHIFT) & + SDW_BRA_FTR_RESP_ACK_MASK); + if (ack_nack == SDW_BRA_ACK_NAK_IGNORED) { + pr_info("BRA Packet Ignored\n"); + ret = -EINVAL; + } else if (ack_nack == SDW_BRA_ACK_NAK_OK) + pr_info("BRA: Packet OK\n"); + else if (ack_nack == SDW_BRA_ACK_NAK_FAILED_ABORT) { + pr_info("BRA: Packet Failed/Reserved\n"); + return -EINVAL; + } else if (ack_nack == SDW_BRA_ACK_NAK_RSVD_ABORT) { + pr_info("BRA: Packet Reserved/Abort\n"); + return -EINVAL; + } + + /* + * BRA footer result check + * Writes: + * 0 -> Good. Target accepted write payload + * 1 -> Bad. Target did not accept write payload + * Reads: + * 0 -> Good. Target completed read operation successfully + * 1 -> Bad. Target failed to complete read operation successfully + */ + ftr_result = (ftr_response >> SDW_BRA_FTR_RESP_RES_SHIFT) & + SDW_BRA_FTR_RESP_RES_MASK; + if (ftr_result == SDW_BRA_FTR_RESULT_BAD) { + pr_info("BRA: Read/Write operation failed on target side\n"); + /* Error scenario */ + return -EINVAL; + } + + pr_info("BRA: Read/Write operation complete on target side\n"); + + return ret; +} + +static int cnl_sdw_bra_verify_hdr(u8 *rx_buf, int offset, bool *chk_footer, + int roll_id) +{ + int ret = 0; + u8 hdr_response, rolling_id; + u8 ack_nack = 0; + u8 not_ready = 0; + + /* Match rolling ID */ + hdr_response = rx_buf[offset]; + rolling_id = rx_buf[offset + SDW_BRA_ROLLINGID_PDI_INDX]; + + rolling_id = (rolling_id & SDW_BRA_ROLLINGID_PDI_MASK); + if (roll_id != rolling_id) { + pr_info("BRA: Rolling ID doesn't match, returning error\n"); + return -EINVAL; + } + + /* + * ACK/NACK check + * NACK+ACK value from target: + * 00 -> Ignored + * 01 -> OK + * 10 -> Failed (Header CRC check failed) + * 11 -> Reserved + * NACK+ACK values at Target or initiator + * 00 -> Ignored + * 01 -> OK + * 10 -> Abort (Header cannot be trusted) + * 11 -> Abort (Header cannot be trusted) + */ + ack_nack = ((hdr_response >> SDW_BRA_HDR_RESP_ACK_SHIFT) & + SDW_BRA_HDR_RESP_ACK_MASK); + if (ack_nack == SDW_BRA_ACK_NAK_IGNORED) { + pr_info("BRA: Packet Ignored rolling_id:%d\n", rolling_id); + ret = -EINVAL; + } else if (ack_nack == SDW_BRA_ACK_NAK_OK) + pr_info("BRA: Packet OK rolling_id:%d\n", rolling_id); + else if (ack_nack == SDW_BRA_ACK_NAK_FAILED_ABORT) { + pr_info("BRA: Packet Failed/Abort rolling_id:%d\n", rolling_id); + return -EINVAL; + } else if (ack_nack == SDW_BRA_ACK_NAK_RSVD_ABORT) { + pr_info("BRA: Packet Reserved/Abort rolling_id:%d\n", rolling_id); + return -EINVAL; + } + + /* BRA not ready check */ + not_ready = (hdr_response >> SDW_BRA_HDR_RESP_NRDY_SHIFT) & + SDW_BRA_HDR_RESP_NRDY_MASK; + if (not_ready == SDW_BRA_TARGET_NOT_READY) { + pr_info("BRA: Target not ready for read/write operation rolling_id:%d\n", + rolling_id); + chk_footer = false; + return -EBUSY; + } + + pr_info("BRA: Target ready for read/write operation rolling_id:%d\n", rolling_id); + return ret; +} + +static void cnl_sdw_bra_remove_data_padding(u8 *src_buf, u8 *dst_buf, + u8 size) { + + int i; + + for (i = 0; i < size/2; i++) { + + *dst_buf++ = *src_buf++; + *dst_buf++ = *src_buf++; + src_buf++; + src_buf++; + } +} + + +static int cnl_sdw_bra_check_data(struct sdw_master *mstr, + struct sdw_bra_block *block, struct bra_info *info) { + + int offset = 0, rolling_id = 0, tmp_offset = 0; + int rx_crc_comp = 0, rx_crc_rvd = 0; + int i, ret; + bool chk_footer = true; + int rx_buf_size = info->rx_block_size; + u8 *rx_buf = info->rx_ptr; + u8 *tmp_buf = NULL; + + /* TODO: Remove below hex dump print */ + print_hex_dump(KERN_DEBUG, "BRA RX DATA:", DUMP_PREFIX_OFFSET, 8, 4, + rx_buf, rx_buf_size, false); + + /* Allocate temporary buffer in case of read request */ + if (!block->cmd) { + tmp_buf = kzalloc(block->num_bytes, GFP_KERNEL); + if (!tmp_buf) { + ret = -ENOMEM; + goto error; + } + } + + /* + * TODO: From the response header and footer there is no mention of + * read or write packet so controller needs to keep transmit packet + * information in order to verify rx packet. Also the current + * approach used for error mechanism is any of the packet response + * is not success, just report the whole transfer failed to Slave. + */ + + /* + * Verification of response packet for one known + * hardcoded configuration. This needs to be extended + * once we have dynamic algorithm integrated. + */ + + /* 2 valid read response */ + for (i = 0; i < info->valid_packets; i++) { + + + pr_info("BRA: Verifying packet number:%d with rolling id:%d\n", + info->packet_info[i].packet_num, + rolling_id); + chk_footer = true; + ret = cnl_sdw_bra_verify_hdr(rx_buf, offset, &chk_footer, + rolling_id); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Header verification failed for packet number:%d\n", + info->packet_info[i].packet_num); + goto error; + } + + /* Increment offset for header response */ + offset = offset + SDW_BRA_HEADER_RESP_SIZE_PDI; + + if (!block->cmd) { + + /* Remove PDI padding for data */ + cnl_sdw_bra_remove_data_padding(&rx_buf[offset], + &tmp_buf[tmp_offset], + info->packet_info[i].num_data_bytes); + + /* Increment offset for consumed data */ + offset = offset + + (info->packet_info[i].num_data_bytes * 2); + + rx_crc_comp = sdw_bus_compute_crc8(&tmp_buf[tmp_offset], + info->packet_info[i].num_data_bytes); + + /* Match Data CRC */ + rx_crc_rvd = rx_buf[offset]; + if (rx_crc_comp != rx_crc_rvd) { + ret = -EINVAL; + dev_err(&mstr->dev, "BRA: Data CRC doesn't match for packet number:%d\n", + info->packet_info[i].packet_num); + goto error; + } + + /* Increment destination buffer with copied data */ + tmp_offset = tmp_offset + + info->packet_info[i].num_data_bytes; + + /* Increment offset for CRC */ + offset = offset + SDW_BRA_DATA_CRC_SIZE_PDI; + } + + if (chk_footer) { + ret = cnl_sdw_bra_verify_footer(rx_buf, offset); + if (ret < 0) { + ret = -EINVAL; + dev_err(&mstr->dev, "BRA: Footer verification failed for packet number:%d\n", + info->packet_info[i].packet_num); + goto error; + } + + } + + /* Increment offset for footer response */ + offset = offset + SDW_BRA_HEADER_RESP_SIZE_PDI; + + /* Increment rolling id for next packet */ + rolling_id++; + if (rolling_id > 0xF) + rolling_id = 0; + } + + /* + * No need to check for dummy responses from codec + * Assumption made here is that dummy packets are + * added in 1ms buffer only after valid packets. + */ + + /* Copy data to codec buffer in case of read request */ + if (!block->cmd) + memcpy(block->values, tmp_buf, block->num_bytes); + +error: + /* Free up temp buffer allocated in case of read request */ + if (!block->cmd) + kfree(tmp_buf); + + /* Free up buffer allocated in cnl_sdw_bra_data_ops */ + kfree(info->tx_ptr); + kfree(info->rx_ptr); + kfree(info->packet_info); + + return ret; +} + +static int cnl_sdw_bra_data_ops(struct sdw_master *mstr, + struct sdw_bra_block *block, struct bra_info *info) +{ + + struct sdw_bra_block tmp_block; + int i; + int tx_buf_size = 384, rx_buf_size = 1152; + u8 *tx_buf = NULL, *rx_buf = NULL; + int rolling_id = 0, total_bytes = 0, offset = 0, reg_offset = 0; + int dummy_read = 0x0000; + int ret; + + /* + * TODO: Run an algorithm here to identify the buffer size + * for TX and RX buffers + number of dummy packets (read + * or write) to be added for to align buffers. + */ + + info->tx_block_size = tx_buf_size; + info->tx_ptr = tx_buf = kzalloc(tx_buf_size, GFP_KERNEL); + if (!tx_buf) { + ret = -ENOMEM; + goto error; + } + + info->rx_block_size = rx_buf_size; + info->rx_ptr = rx_buf = kzalloc(rx_buf_size, GFP_KERNEL); + if (!rx_buf) { + ret = -ENOMEM; + goto error; + } + + /* Fill valid packets transferred per millisecond buffer */ + info->valid_packets = 2; + info->packet_info = kcalloc(info->valid_packets, + sizeof(*info->packet_info), + GFP_KERNEL); + if (!info->packet_info) { + ret = -ENOMEM; + goto error; + } + + /* + * Below code performs packet preparation for one known + * configuration. + * 1. 2 Valid Read request with 18 bytes each. + * 2. 22 dummy read packets with 18 bytes each. + */ + for (i = 0; i < info->valid_packets; i++) { + tmp_block.slave_addr = block->slave_addr; + tmp_block.cmd = block->cmd; /* Read Request */ + tmp_block.num_bytes = 18; + tmp_block.reg_offset = block->reg_offset + reg_offset; + tmp_block.values = NULL; + reg_offset += tmp_block.num_bytes; + + cnl_sdw_bra_prep_hdr(tx_buf, &tmp_block, rolling_id, offset); + /* Total Header size: Header + Header CRC size on PDI */ + offset += SDW_BRA_HEADER_TOTAL_SZ_PDI; + + if (block->cmd) { + /* + * PDI data preparation in case of write request + * Assumption made here is data size from codec will + * be always an even number. + */ + cnl_sdw_bra_prep_data(tx_buf, &tmp_block, + total_bytes, offset); + offset += tmp_block.num_bytes * 2; + + /* Data CRC */ + cnl_sdw_bra_prep_crc(tx_buf, &tmp_block, + total_bytes, offset); + offset += SDW_BRA_DATA_CRC_SIZE_PDI; + } + + total_bytes += tmp_block.num_bytes; + rolling_id++; + + /* Fill packet info data structure */ + info->packet_info[i].packet_num = i + 1; + info->packet_info[i].num_data_bytes = tmp_block.num_bytes; + } + + /* Prepare dummy packets */ + for (i = 0; i < 22; i++) { + tmp_block.slave_addr = block->slave_addr; + tmp_block.cmd = 0; /* Read request */ + tmp_block.num_bytes = 18; + tmp_block.reg_offset = dummy_read++; + tmp_block.values = NULL; + + cnl_sdw_bra_prep_hdr(tx_buf, &tmp_block, rolling_id, offset); + + /* Total Header size: RD header + RD header CRC size on PDI */ + offset += SDW_BRA_HEADER_TOTAL_SZ_PDI; + + total_bytes += tmp_block.num_bytes; + rolling_id++; + } + + /* TODO: Remove below hex dump print */ + print_hex_dump(KERN_DEBUG, "BRA PDI VALID TX DATA:", + DUMP_PREFIX_OFFSET, 8, 4, tx_buf, tx_buf_size, false); + + return 0; + +error: + kfree(info->tx_ptr); + kfree(info->rx_ptr); + kfree(info->packet_info); + + return ret; +} + +static int cnl_sdw_xfer_bulk(struct sdw_master *mstr, + struct sdw_bra_block *block) +{ + struct cnl_sdw *sdw = sdw_master_get_platdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + struct cnl_bra_operation *ops = data->bra_data->bra_ops; + struct bra_info info; + int ret; + + /* + * 1. PDI Configuration + * 2. Prepare BRA packets including CRC calculation. + * 3. Configure TX and RX DMA in one shot mode. + * 4. Configure TX and RX Pipeline. + * 5. Run TX and RX DMA. + * 6. Run TX and RX pipelines. + * 7. Wait on completion for RX buffer. + * 8. Match TX and RX buffer packets and check for errors. + */ + + /* Memset bra_info data structure */ + memset(&info, 0x0, sizeof(info)); + + /* Fill master number in bra info data structure */ + info.mstr_num = mstr->nr; + + /* Prepare TX buffer */ + ret = cnl_sdw_bra_data_ops(mstr, block, &info); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Request packet(s) creation failed\n"); + goto out; + } + + /* Pipeline Setup (ON) */ + ret = ops->bra_platform_setup(data->bra_data->drv_data, true, &info); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Pipeline setup failed\n"); + goto out; + } + + /* PDI Configuration (ON) */ + cnl_sdw_bra_pdi_config(mstr, true); + + /* Trigger START host DMA and pipeline */ + ret = ops->bra_platform_xfer(data->bra_data->drv_data, true, &info); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Pipeline start failed\n"); + goto out; + } + + /* Trigger STOP host DMA and pipeline */ + ret = ops->bra_platform_xfer(data->bra_data->drv_data, false, &info); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Pipeline stop failed\n"); + goto out; + } + + /* Pipeline Setup (OFF) */ + ret = ops->bra_platform_setup(data->bra_data->drv_data, false, &info); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Pipeline de-setup failed\n"); + goto out; + } + + /* Verify RX buffer */ + ret = cnl_sdw_bra_check_data(mstr, block, &info); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Response packet(s) incorrect\n"); + goto out; + } + + /* PDI Configuration (OFF) */ + cnl_sdw_bra_pdi_config(mstr, false); + +out: + return ret; +} + +static int cnl_sdw_mon_handover(struct sdw_master *mstr, + bool enable) +{ + int mcp_config; + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + + mcp_config = cnl_sdw_reg_readl(data->sdw_regs, SDW_CNL_MCP_CONFIG); + if (enable) + mcp_config |= MCP_CONFIG_BRELENABLE_MASK << + MCP_CONFIG_BRELENABLE_SHIFT; + else + mcp_config &= ~(MCP_CONFIG_BRELENABLE_MASK << + MCP_CONFIG_BRELENABLE_SHIFT); + + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_CONFIG, mcp_config); + return 0; +} + +static int cnl_sdw_set_ssp_interval(struct sdw_master *mstr, + int ssp_interval, int bank) +{ + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + int sspctrl_offset, check; + + if (bank) + sspctrl_offset = SDW_CNL_MCP_SSPCTRL1; + else + sspctrl_offset = SDW_CNL_MCP_SSPCTRL0; + + cnl_sdw_reg_writel(data->sdw_regs, sspctrl_offset, ssp_interval); + + check = cnl_sdw_reg_readl(data->sdw_regs, sspctrl_offset); + + return 0; +} + +static int cnl_sdw_set_clock_freq(struct sdw_master *mstr, + int cur_clk_div, int bank) +{ + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + int mcp_clockctrl_offset, mcp_clockctrl; + + + /* TODO: Retrieve divider value or get value directly from calling + * function + */ + int divider = (cur_clk_div - 1); + + if (bank) { + mcp_clockctrl_offset = SDW_CNL_MCP_CLOCKCTRL1; + mcp_clockctrl = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_CLOCKCTRL1); + + } else { + mcp_clockctrl_offset = SDW_CNL_MCP_CLOCKCTRL0; + mcp_clockctrl = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_CLOCKCTRL0); + } + + mcp_clockctrl |= divider; + + /* Write value here */ + cnl_sdw_reg_writel(data->sdw_regs, mcp_clockctrl_offset, + mcp_clockctrl); + + mcp_clockctrl = cnl_sdw_reg_readl(data->sdw_regs, + mcp_clockctrl_offset); + return 0; +} + +static int cnl_sdw_set_port_params(struct sdw_master *mstr, + struct sdw_port_params *params, int bank) +{ + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + int dpn_config = 0, dpn_config_offset; + + if (bank) + dpn_config_offset = SDW_CNL_DPN_CONFIG1; + else + dpn_config_offset = SDW_CNL_DPN_CONFIG0; + + dpn_config = cnl_sdw_port_reg_readl(data->sdw_regs, + dpn_config_offset, params->num); + + dpn_config |= (((params->word_length - 1) & DPN_CONFIG_WL_MASK) << + DPN_CONFIG_WL_SHIFT); + dpn_config |= ((params->port_flow_mode & DPN_CONFIG_PF_MODE_MASK) << + DPN_CONFIG_PF_MODE_SHIFT); + dpn_config |= ((params->port_data_mode & DPN_CONFIG_PD_MODE_MASK) << + DPN_CONFIG_PD_MODE_SHIFT); + cnl_sdw_port_reg_writel(data->sdw_regs, + dpn_config_offset, params->num, dpn_config); + + cnl_sdw_port_reg_readl(data->sdw_regs, + dpn_config_offset, params->num); + return 0; +} + +static int cnl_sdw_set_port_transport_params(struct sdw_master *mstr, + struct sdw_transport_params *params, int bank) +{ +struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + + int dpn_config = 0, dpn_config_offset; + int dpn_samplectrl_offset; + int dpn_offsetctrl = 0, dpn_offsetctrl_offset; + int dpn_hctrl = 0, dpn_hctrl_offset; + + if (bank) { + dpn_config_offset = SDW_CNL_DPN_CONFIG1; + dpn_samplectrl_offset = SDW_CNL_DPN_SAMPLECTRL1; + dpn_hctrl_offset = SDW_CNL_DPN_HCTRL1; + dpn_offsetctrl_offset = SDW_CNL_DPN_OFFSETCTRL1; + } else { + dpn_config_offset = SDW_CNL_DPN_CONFIG0; + dpn_samplectrl_offset = SDW_CNL_DPN_SAMPLECTRL0; + dpn_hctrl_offset = SDW_CNL_DPN_HCTRL0; + dpn_offsetctrl_offset = SDW_CNL_DPN_OFFSETCTRL0; + } + dpn_config = cnl_sdw_port_reg_readl(data->sdw_regs, + dpn_config_offset, params->num); + dpn_config |= ((params->blockgroupcontrol & DPN_CONFIG_BGC_MASK) << + DPN_CONFIG_BGC_SHIFT); + dpn_config |= ((params->blockpackingmode & DPN_CONFIG_BPM_MASK) << + DPN_CONFIG_BPM_SHIFT); + + cnl_sdw_port_reg_writel(data->sdw_regs, + dpn_config_offset, params->num, dpn_config); + + cnl_sdw_port_reg_readl(data->sdw_regs, + dpn_config_offset, params->num); + + dpn_offsetctrl |= ((params->offset1 & DPN_OFFSETCTRL0_OF1_MASK) << + DPN_OFFSETCTRL0_OF1_SHIFT); + + dpn_offsetctrl |= ((params->offset2 & DPN_OFFSETCTRL0_OF2_MASK) << + DPN_OFFSETCTRL0_OF2_SHIFT); + + cnl_sdw_port_reg_writel(data->sdw_regs, + dpn_offsetctrl_offset, params->num, dpn_offsetctrl); + + + dpn_hctrl |= ((params->hstart & DPN_HCTRL_HSTART_MASK) << + DPN_HCTRL_HSTART_SHIFT); + dpn_hctrl |= ((params->hstop & DPN_HCTRL_HSTOP_MASK) << + DPN_HCTRL_HSTOP_SHIFT); + dpn_hctrl |= ((params->lanecontrol & DPN_HCTRL_LCONTROL_MASK) << + DPN_HCTRL_LCONTROL_SHIFT); + + cnl_sdw_port_reg_writel(data->sdw_regs, + dpn_hctrl_offset, params->num, dpn_hctrl); + + cnl_sdw_port_reg_writel(data->sdw_regs, + dpn_samplectrl_offset, params->num, + (params->sample_interval - 1)); + + cnl_sdw_port_reg_readl(data->sdw_regs, + dpn_hctrl_offset, params->num); + + cnl_sdw_port_reg_readl(data->sdw_regs, + dpn_samplectrl_offset, params->num); + + return 0; +} + +static int cnl_sdw_port_activate_ch(struct sdw_master *mstr, + struct sdw_activate_ch *activate_ch, int bank) +{ + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + int dpn_channelen_offset; + int ch_mask; + + if (bank) + dpn_channelen_offset = SDW_CNL_DPN_CHANNELEN1; + else + dpn_channelen_offset = SDW_CNL_DPN_CHANNELEN0; + + if (activate_ch->activate) + ch_mask = activate_ch->ch_mask; + else + ch_mask = 0; + + cnl_sdw_port_reg_writel(data->sdw_regs, + dpn_channelen_offset, activate_ch->num, + ch_mask); + + return 0; +} + +static int cnl_sdw_port_activate_ch_pre(struct sdw_master *mstr, + struct sdw_activate_ch *activate_ch, int bank) +{ + int sync_reg; + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + + if (mstr->link_sync_mask) { + /* Check if this link is synchronized with some other link */ + sync_reg = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_SYNC); + /* If link is synchronized with other link than + * Need to make sure that command doesnt go till + * ssync is applied + */ + sync_reg |= (1 << (data->inst_id + CNL_SYNC_CMDSYNC_SHIFT)); + cnl_sdw_reg_writel(data->sdw_shim, SDW_CNL_SYNC, sync_reg); + } + + return 0; +} +static int cnl_sdw_port_activate_ch_post(struct sdw_master *mstr, + struct sdw_activate_ch *activate_ch, int bank) +{ + int sync_reg; + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + volatile int sync_update = 0; + int timeout = 10; + + + sync_reg = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_SYNC); + /* If waiting for synchronization set the go bit, else return */ + if (!(sync_reg & SDW_CMDSYNC_SET_MASK)) + return 0; + sync_reg |= (CNL_SYNC_SYNCGO_MASK << CNL_SYNC_SYNCGO_SHIFT); + cnl_sdw_reg_writel(data->sdw_shim, SDW_CNL_SYNC, sync_reg); + + do { + sync_update = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_SYNC); + if ((sync_update & + (CNL_SYNC_SYNCGO_MASK << CNL_SYNC_SYNCGO_SHIFT)) == 0) + break; + msleep(20); + timeout--; + + } while (timeout); + + if ((sync_update & + (CNL_SYNC_SYNCGO_MASK << CNL_SYNC_SYNCGO_SHIFT)) != 0) { + dev_err(&mstr->dev, "Failed to set sync go\n"); + return -EIO; + } + return 0; +} + +static int cnl_sdw_probe(struct sdw_master *mstr, + const struct sdw_master_id *sdw_id) +{ + struct cnl_sdw *sdw; + int ret = 0; + struct cnl_sdw_data *data = mstr->dev.platform_data; + + sdw = devm_kzalloc(&mstr->dev, sizeof(*sdw), GFP_KERNEL); + if (!sdw) { + ret = -ENOMEM; + return ret; + } + dev_info(&mstr->dev, + "Controller Resources ctrl_base = %p shim=%p irq=%d inst_id=%d\n", + data->sdw_regs, data->sdw_shim, data->irq, data->inst_id); + sdw->data.sdw_regs = data->sdw_regs; + sdw->data.sdw_shim = data->sdw_shim; + sdw->data.irq = data->irq; + sdw->data.inst_id = data->inst_id; + sdw->data.alh_base = data->alh_base; + sdw->mstr = mstr; + spin_lock_init(&sdw->ctrl_lock); + sdw_master_set_drvdata(mstr, sdw); + init_completion(&sdw->tx_complete); + mutex_init(&sdw->stream_lock); + ret = sdw_init(sdw, true); + if (ret) { + dev_err(&mstr->dev, "SoundWire controller init failed %d\n", + data->inst_id); + return ret; + } + ret = devm_request_irq(&mstr->dev, + sdw->data.irq, cnl_sdw_irq_handler, IRQF_SHARED, "SDW", sdw); + if (ret) { + dev_err(&mstr->dev, "unable to grab IRQ %d, disabling device\n", + sdw->data.irq); + sdw_power_down_link(sdw); + return ret; + } + pm_runtime_set_autosuspend_delay(&mstr->dev, 3000); + pm_runtime_use_autosuspend(&mstr->dev); + pm_runtime_enable(&mstr->dev); + pm_runtime_get_sync(&mstr->dev); + /* Resuming the device, since its already ON, function will simply + * return doing nothing + */ + pm_runtime_mark_last_busy(&mstr->dev); + /* Suspending the device after 3 secs, by the time + * all the slave would have enumerated. Initial + * clock freq is 9.6MHz and frame shape is 48X2, so + * there are 200000 frames in second, total there are + * minimum 600000 frames before device suspends. Soundwire + * spec says slave should get attached to bus in 4096 + * error free frames after reset. So this should be + * enough to make sure device gets attached to bus. + */ + pm_runtime_put_sync_autosuspend(&mstr->dev); + return ret; +} + +static int cnl_sdw_remove(struct sdw_master *mstr) +{ + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + + sdw_power_down_link(sdw); + + return 0; +} + +#ifdef CONFIG_PM +static int cnl_sdw_runtime_suspend(struct device *dev) +{ + int volatile mcp_stat; + int mcp_control; + int timeout = 0; + int ret = 0; + + struct cnl_sdw *sdw = dev_get_drvdata(dev); + struct cnl_sdw_data *data = &sdw->data; + + /* If its suspended return */ + mcp_stat = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_STAT); + if (mcp_stat & (MCP_STAT_CLOCKSTOPPED_MASK << + MCP_STAT_CLOCKSTOPPED_SHIFT)) { + dev_info(dev, "Clock is already stopped\n"); + return 0; + } + + /* Write the MCP Control register to prevent block wakeup */ + mcp_control = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_CONTROL); + mcp_control |= (MCP_CONTROL_BLOCKWAKEUP_MASK << + MCP_CONTROL_BLOCKWAKEUP_SHIFT); + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_CONTROL, mcp_control); + + /* Prepare all the slaves for clock stop */ + ret = sdw_master_prep_for_clk_stop(sdw->mstr); + if (ret) + return ret; + + /* Call bus function to broadcast the clock stop now */ + ret = sdw_master_stop_clock(sdw->mstr); + if (ret) + return ret; + /* Wait for clock to be stopped, we are waiting at max 1sec now */ + while (timeout != 10) { + mcp_stat = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_STAT); + if (mcp_stat & (MCP_STAT_CLOCKSTOPPED_MASK << + MCP_STAT_CLOCKSTOPPED_SHIFT)) + break; + msleep(100); + timeout++; + } + mcp_stat = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_STAT); + if (!(mcp_stat & (MCP_STAT_CLOCKSTOPPED_MASK << + MCP_STAT_CLOCKSTOPPED_SHIFT))) { + dev_err(dev, "Clock Stop failed\n"); + ret = -EBUSY; + goto out; + } + /* Switch control from master IP to glue */ + sdw_switch_to_glue(sdw); + + sdw_power_down_link(sdw); + + /* Enable the wakeup */ + cnl_sdw_reg_writew(data->sdw_shim, + SDW_CNL_SNDWWAKEEN_REG_OFFSET, + (0x1 << data->inst_id)); +out: + return ret; +} + +static int cnl_sdw_clock_stop_exit(struct cnl_sdw *sdw) +{ + u16 wake_en, wake_sts; + int ret; + struct cnl_sdw_data *data = &sdw->data; + + /* Disable the wake up interrupt */ + wake_en = cnl_sdw_reg_readw(data->sdw_shim, + SDW_CNL_SNDWWAKEEN_REG_OFFSET); + wake_en &= ~(0x1 << data->inst_id); + cnl_sdw_reg_writew(data->sdw_shim, SDW_CNL_SNDWWAKEEN_REG_OFFSET, + wake_en); + + /* Clear wake status. This may be set if Slave requested wakeup has + * happened, or may not be if it master requested. But in any case + * this wont make any harm + */ + wake_sts = cnl_sdw_reg_readw(data->sdw_shim, + SDW_CNL_SNDWWAKESTS_REG_OFFSET); + wake_sts |= (0x1 << data->inst_id); + cnl_sdw_reg_writew(data->sdw_shim, SDW_CNL_SNDWWAKESTS_REG_OFFSET, + wake_sts); + ret = sdw_init(sdw, false); + if (ret < 0) { + pr_err("sdw_init fail: %d\n", ret); + return ret; + } + + dev_info(&sdw->mstr->dev, "Exit from clock stop successful\n"); + return 0; + +} + +static int cnl_sdw_runtime_resume(struct device *dev) +{ + struct cnl_sdw *sdw = dev_get_drvdata(dev); + struct cnl_sdw_data *data = &sdw->data; + int volatile mcp_stat; + struct sdw_master *mstr; + int ret = 0; + + mstr = sdw->mstr; + /* + * If already resumed, do nothing. This can happen because of + * wakeup enable. + */ + mcp_stat = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_STAT); + if (!(mcp_stat & (MCP_STAT_CLOCKSTOPPED_MASK << + MCP_STAT_CLOCKSTOPPED_SHIFT))) { + dev_info(dev, "Clock is already running\n"); + return 0; + } + dev_info(dev, "%s %d Clock is stopped\n", __func__, __LINE__); + + ret = cnl_sdw_clock_stop_exit(sdw); + if (ret) + return ret; + dev_info(&mstr->dev, "Exit from clock stop successful\n"); + + /* Prepare all the slaves to comeout of clock stop */ + ret = sdw_mstr_deprep_after_clk_start(sdw->mstr); + if (ret) + return ret; + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int cnl_sdw_sleep_resume(struct device *dev) +{ + return cnl_sdw_runtime_resume(dev); +} +static int cnl_sdw_sleep_suspend(struct device *dev) +{ + return cnl_sdw_runtime_suspend(dev); +} +#else +#define cnl_sdw_sleep_suspend NULL +#define cnl_sdw_sleep_resume NULL +#endif /* CONFIG_PM_SLEEP */ +#else +#define cnl_sdw_runtime_suspend NULL +#define cnl_sdw_runtime_resume NULL +#endif /* CONFIG_PM */ + + +static const struct dev_pm_ops cnl_sdw_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(cnl_sdw_sleep_suspend, cnl_sdw_sleep_resume) + SET_RUNTIME_PM_OPS(cnl_sdw_runtime_suspend, + cnl_sdw_runtime_resume, NULL) +}; + +static struct sdw_master_ops cnl_sdw_master_ops = { + .xfer_msg_async = cnl_sdw_xfer_msg_async, + .xfer_msg = cnl_sdw_xfer_msg, + .xfer_bulk = cnl_sdw_xfer_bulk, + .monitor_handover = cnl_sdw_mon_handover, + .set_ssp_interval = cnl_sdw_set_ssp_interval, + .set_clock_freq = cnl_sdw_set_clock_freq, + .set_frame_shape = NULL, +}; + +static struct sdw_master_port_ops cnl_sdw_master_port_ops = { + .dpn_set_port_params = cnl_sdw_set_port_params, + .dpn_set_port_transport_params = cnl_sdw_set_port_transport_params, + .dpn_port_activate_ch = cnl_sdw_port_activate_ch, + .dpn_port_activate_ch_pre = cnl_sdw_port_activate_ch_pre, + .dpn_port_activate_ch_post = cnl_sdw_port_activate_ch_post, + .dpn_port_prepare_ch = NULL, + .dpn_port_prepare_ch_pre = NULL, + .dpn_port_prepare_ch_post = NULL, + +}; + +static struct sdw_mstr_driver cnl_sdw_mstr_driver = { + .driver_type = SDW_DRIVER_TYPE_MASTER, + .driver = { + .name = "cnl_sdw_mstr", + .pm = &cnl_sdw_pm_ops, + }, + .probe = cnl_sdw_probe, + .remove = cnl_sdw_remove, + .mstr_ops = &cnl_sdw_master_ops, + .mstr_port_ops = &cnl_sdw_master_port_ops, +}; + +static int __init cnl_sdw_init(void) +{ + return sdw_mstr_driver_register(&cnl_sdw_mstr_driver); +} +module_init(cnl_sdw_init); + +static void cnl_sdw_exit(void) +{ + sdw_mstr_driver_unregister(&cnl_sdw_mstr_driver); +} +module_exit(cnl_sdw_exit); + +MODULE_DESCRIPTION("Intel SoundWire Master Controller Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Hardik Shah "); diff --git a/drivers/sdw/sdw_cnl_priv.h b/drivers/sdw/sdw_cnl_priv.h new file mode 100644 index 0000000000000..b7f44e1f9d6f5 --- /dev/null +++ b/drivers/sdw/sdw_cnl_priv.h @@ -0,0 +1,385 @@ +/* + * sdw_cnl_priv.h - Private definition for intel master controller driver. + * + * Copyright (C) 2014-2015 Intel Corp + * Author: Hardik Shah + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + */ + +#ifndef _LINUX_SDW_CNL_PRIV_H +#define _LINUX_SDW_CNL_PRIV_H + +#define SDW_CNL_PM_TIMEOUT 3000 /* ms */ +#define SDW_CNL_SLAVES_STAT_UPPER_DWORD_SHIFT 32 +#define SDW_CNL_SLAVE_STATUS_BITS 4 +#define SDW_CNL_CMD_WORD_LEN 4 +#define SDW_CNL_DEFAULT_SSP_INTERVAL 0x18 +#define SDW_CNL_DEFAULT_CLK_DIVIDER 0 + +#if IS_ENABLED(CONFIG_SND_SOC_INTEL_CNL_FPGA) +#define SDW_CNL_DEFAULT_SYNC_PERIOD 0x257F +#define SDW_CNL_DEFAULT_FRAME_SHAPE 0x48 +#else +#define SDW_CNL_DEFAULT_SYNC_PERIOD 0x176F +#define SDW_CNL_DEFAULT_FRAME_SHAPE 0x30 +#endif + +#define SDW_CNL_PORT_REG_OFFSET 0x80 +#define CNL_SDW_SCP_ADDR_REGS 0x2 +#define SDW_CNL_PCM_PDI_NUM_OFFSET 0x2 +#define SDW_CNL_PDM_PDI_NUM_OFFSET 0x6 + +#define SDW_CNL_CTMCTL_REG_OFFSET 0x60 +#define SDW_CNL_IOCTL_REG_OFFSET 0x60 +#define SDW_CNL_PCMSCAP_REG_OFFSET 0x60 +#define SDW_CNL_PCMSCHC_REG_OFFSET 0x60 +#define SDW_CNL_PDMSCAP_REG_OFFSET 0x60 +#define SDW_CNL_PCMSCHM_REG_OFFSET 0x60 +#define SDW_CNL_SNDWWAKEEN_REG_OFFSET 0x190 +#define SDW_CNL_SNDWWAKESTS_REG_OFFSET 0x192 + + +#define SDW_CNL_MCP_CONFIG 0x0 +#define MCP_CONFIG_BRELENABLE_MASK 0x1 +#define MCP_CONFIG_BRELENABLE_SHIFT 0x6 +#define MCP_CONFIG_MAXCMDRETRY_SHIFT 24 +#define MCP_CONFIG_MAXCMDRETRY_MASK 0xF +#define MCP_CONFIG_MAXPREQDELAY_SHIFT 16 +#define MCP_CONFIG_MAXPREQDELAY_MASK 0x1F +#define MCP_CONFIG_MMMODEEN_SHIFT 0x7 +#define MCP_CONFIG_MMMODEEN_MASK 0x1 +#define MCP_CONFIG_SNIFFEREN_SHIFT 0x5 +#define MCP_CONFIG_SNIFFEREN_MASK 0x1 +#define MCP_CONFIG_SSPMODE_SHIFT 0x4 +#define MCP_CONFIG_SSPMODE_MASK 0x1 +#define MCP_CONFIG_CMDMODE_SHIFT 0x3 +#define MCP_CONFIG_CMDMODE_MASK 0x1 + +#define MCP_CONFIG_OPERATIONMODE_MASK 0x7 +#define MCP_CONFIG_OPERATIONMODE_SHIFT 0x0 +#define MCP_CONFIG_OPERATIONMODE_NORMAL 0x0 + +#define SDW_CNL_MCP_CONTROL 0x4 +#define MCP_CONTROL_RESETDELAY_SHIFT 0x8 +#define MCP_CONTROL_CMDRST_SHIFT 0x7 +#define MCP_CONTROL_CMDRST_MASK 0x1 +#define MCP_CONTROL_SOFTRST_SHIFT 0x6 +#define MCP_CONTROL_SOFTCTRLBUSRST_SHIFT 0x5 +#define MCP_CONTROL_HARDCTRLBUSRST_MASK 0x1 +#define MCP_CONTROL_HARDCTRLBUSRST_SHIFT 0x4 +#define MCP_CONTROL_CLOCKPAUSEREQ_SHIFT 0x3 +#define MCP_CONTROL_CLOCKSTOPCLEAR_SHIFT 0x2 +#define MCP_CONTROL_CLOCKSTOPCLEAR_MASK 0x1 +#define MCP_CONTROL_CMDACCEPTMODE_MASK 0x1 +#define MCP_CONTROL_CMDACCEPTMODE_SHIFT 0x1 +#define MCP_CONTROL_BLOCKWAKEUP_SHIFT 0x0 +#define MCP_CONTROL_BLOCKWAKEUP_MASK 0x1 + + +#define MCP_SLAVEINTMASK0_MASK 0xFFFFFFFF +#define MCP_SLAVEINTMASK1_MASK 0x0000FFFF + +#define SDW_CNL_MCP_CMDCTRL 0x8 +#define SDW_CNL_MCP_SSPSTAT 0xC +#define SDW_CNL_MCP_FRAMESHAPE 0x10 +#define SDW_CNL_MCP_FRAMESHAPEINIT 0x14 +#define SDW_CNL_MCP_CONFIGUPDATE 0x18 +#define MCP_CONFIGUPDATE_CONFIGUPDATE_SHIFT 0x0 +#define MCP_CONFIGUPDATE_CONFIGUPDATE_MASK 0x1 + +#define SDW_CNL_MCP_PHYCTRL 0x1C +#define SDW_CNL_MCP_SSPCTRL0 0x20 +#define SDW_CNL_MCP_SSPCTRL1 0x28 +#define SDW_CNL_MCP_CLOCKCTRL0 0x30 +#define SDW_CNL_MCP_CLOCKCTRL1 0x38 +#define SDW_CNL_MCP_STAT 0x40 +#define SDW_CNL_MCP_INTSTAT 0x44 +#define MCP_INTSTAT_IRQ_SHIFT 31 +#define MCP_INTSTAT_IRQ_MASK 1 +#define MCP_INTSTAT_WAKEUP_SHIFT 16 +#define MCP_INTSTAT_SLAVE_STATUS_CHANGED_SHIFT 12 +#define MCP_INTSTAT_SLAVE_STATUS_CHANGED_MASK 0xF +#define MCP_INTSTAT_SLAVENOTATTACHED_SHIFT 12 +#define MCP_INTSTAT_SLAVEATTACHED_SHIFT 13 +#define MCP_INTSTAT_SLAVEALERT_SHIFT 14 +#define MCP_INTSTAT_SLAVERESERVED_SHIFT 15 + +#define MCP_INTSTAT_DPPDIINT_SHIFT 11 +#define MCP_INTSTAT_DPPDIINTMASK 0x1 +#define MCP_INTSTAT_CONTROLBUSCLASH_SHIFT 10 +#define MCP_INTSTAT_CONTROLBUSCLASH_MASK 0x1 +#define MCP_INTSTAT_DATABUSCLASH_SHIFT 9 +#define MCP_INTSTAT_DATABUSCLASH_MASK 0x1 +#define MCP_INTSTAT_CMDERR_SHIFT 7 +#define MCP_INTSTAT_CMDERR_MASK 0x1 +#define MCP_INTSTAT_TXE_SHIFT 1 +#define MCP_INTSTAT_TXE_MASK 0x1 +#define MCP_INTSTAT_RXWL_SHIFT 2 +#define MCP_INTSTAT_RXWL_MASK 1 + +#define SDW_CNL_MCP_INTMASK 0x48 +#define MCP_INTMASK_IRQEN_SHIFT 31 +#define MCP_INTMASK_IRQEN_MASK 0x1 +#define MCP_INTMASK_WAKEUP_SHIFT 16 +#define MCP_INTMASK_WAKEUP_MASK 0x1 +#define MCP_INTMASK_SLAVERESERVED_SHIFT 15 +#define MCP_INTMASK_SLAVERESERVED_MASK 0x1 +#define MCP_INTMASK_SLAVEALERT_SHIFT 14 +#define MCP_INTMASK_SLAVEALERT_MASK 0x1 +#define MCP_INTMASK_SLAVEATTACHED_SHIFT 13 +#define MCP_INTMASK_SLAVEATTACHED_MASK 0x1 +#define MCP_INTMASK_SLAVENOTATTACHED_SHIFT 12 +#define MCP_INTMASK_SLAVENOTATTACHED_MASK 0x1 +#define MCP_INTMASK_DPPDIINT_SHIFT 11 +#define MCP_INTMASK_DPPDIINT_MASK 0x1 +#define MCP_INTMASK_CONTROLBUSCLASH_SHIFT 10 +#define MCP_INTMASK_CONTROLBUSCLASH_MASK 1 +#define MCP_INTMASK_DATABUSCLASH_SHIFT 9 +#define MCP_INTMASK_DATABUSCLASH_MASK 1 +#define MCP_INTMASK_CMDERR_SHIFT 7 +#define MCP_INTMASK_CMDERR_MASK 0x1 +#define MCP_INTMASK_TXE_SHIFT 1 +#define MCP_INTMASK_TXE_MASK 0x1 +#define MCP_INTMASK_RXWL_SHIFT 2 +#define MCP_INTMASK_RXWL_MASK 0x1 + +#define SDW_CNL_MCP_INTSET 0x4C +#define SDW_CNL_MCP_STAT 0x40 +#define MCP_STAT_ACTIVE_BANK_MASK 0x1 +#define MCP_STAT_ACTIVE_BANK_SHIT 20 +#define MCP_STAT_CLOCKSTOPPED_MASK 0x1 +#define MCP_STAT_CLOCKSTOPPED_SHIFT 16 + +#define SDW_CNL_MCP_SLAVESTAT 0x50 +#define MCP_SLAVESTAT_MASK 0x3 + +#define SDW_CNL_MCP_SLAVEINTSTAT0 0x54 +#define MCP_SLAVEINTSTAT_NOT_PRESENT_MASK 0x1 +#define MCP_SLAVEINTSTAT_ATTACHED_MASK 0x2 +#define MCP_SLAVEINTSTAT_ALERT_MASK 0x4 +#define MCP_SLAVEINTSTAT_RESERVED_MASK 0x8 + +#define SDW_CNL_MCP_SLAVEINTSTAT1 0x58 +#define SDW_CNL_MCP_SLAVEINTMASK0 0x5C +#define SDW_CNL_MCP_SLAVEINTMASK1 0x60 +#define SDW_CNL_MCP_PORTINTSTAT 0x64 +#define SDW_CNL_MCP_PDISTAT 0x6C + +#define SDW_CNL_MCP_FIFOLEVEL 0x78 +#define SDW_CNL_MCP_FIFOSTAT 0x7C +#define MCP_RX_FIFO_AVAIL_MASK 0x3F +#define SDW_CNL_MCP_COMMAND_BASE 0x80 +#define SDW_CNL_MCP_RESPONSE_BASE 0x80 +#define SDW_CNL_MCP_COMMAND_LENGTH 0x20 + +#define MCP_COMMAND_SSP_TAG_MASK 0x1 +#define MCP_COMMAND_SSP_TAG_SHIFT 31 +#define MCP_COMMAND_COMMAND_MASK 0x7 +#define MCP_COMMAND_COMMAND_SHIFT 28 +#define MCP_COMMAND_DEV_ADDR_MASK 0xF +#define MCP_COMMAND_DEV_ADDR_SHIFT 24 +#define MCP_COMMAND_REG_ADDR_H_MASK 0x7 +#define MCP_COMMAND_REG_ADDR_H_SHIFT 16 +#define MCP_COMMAND_REG_ADDR_L_MASK 0xFF +#define MCP_COMMAND_REG_ADDR_L_SHIFT 8 +#define MCP_COMMAND_REG_DATA_MASK 0xFF +#define MCP_COMMAND_REG_DATA_SHIFT 0x0 + +#define MCP_RESPONSE_RDATA_MASK 0xFF +#define MCP_RESPONSE_RDATA_SHIFT 8 +#define MCP_RESPONSE_ACK_MASK 0x1 +#define MCP_RESPONSE_ACK_SHIFT 0 +#define MCP_RESPONSE_NACK_MASK 0x2 + +#define SDW_CNL_DPN_CONFIG0 0x100 +#define SDW_CNL_DPN_CHANNELEN0 0x104 +#define SDW_CNL_DPN_SAMPLECTRL0 0x108 +#define SDW_CNL_DPN_OFFSETCTRL0 0x10C +#define SDW_CNL_DPN_HCTRL0 0x110 +#define SDW_CNL_DPN_ASYNCCTRL0 0x114 + +#define SDW_CNL_DPN_CONFIG1 0x118 +#define SDW_CNL_DPN_CHANNELEN1 0x11C +#define SDW_CNL_DPN_SAMPLECTRL1 0x120 +#define SDW_CNL_DPN_OFFSETCTRL1 0x124 +#define SDW_CNL_DPN_HCTRL1 0x128 + +#define SDW_CNL_PORTCTRL 0x130 +#define PORTCTRL_PORT_DIRECTION_SHIFT 0x7 +#define PORTCTRL_PORT_DIRECTION_MASK 0x1 +#define PORTCTRL_BANK_INVERT_SHIFT 0x8 +#define PORTCTRL_BANK_INVERT_MASK 0x1 + +#define SDW_CNL_PDINCONFIG0 0x1100 +#define SDW_CNL_PDINCONFIG1 0x1108 +#define PDINCONFIG_CHANNEL_MASK_SHIFT 0x8 +#define PDINCONFIG_CHANNEL_MASK_MASK 0xFF +#define PDINCONFIG_PORT_NUMBER_SHIFT 0x0 +#define PDINCONFIG_PORT_NUMBER_MASK 0x1F +#define PDINCONFIG_PORT_SOFT_RESET_SHIFT 0x18 +#define PDINCONFIG_PORT_SOFT_RESET 0x1F + +#define DPN_CONFIG_WL_SHIFT 0x8 +#define DPN_CONFIG_WL_MASK 0x1F +#define DPN_CONFIG_PF_MODE_SHIFT 0x0 +#define DPN_CONFIG_PF_MODE_MASK 0x3 +#define DPN_CONFIG_PD_MODE_SHIFT 0x2 +#define DPN_CONFIG_PD_MODE_MASK 0x3 +#define DPN_CONFIG_BPM_MASK 0x1 +#define DPN_CONFIG_BPM_SHIFT 0x12 +#define DPN_CONFIG_BGC_MASK 0x3 +#define DPN_CONFIG_BGC_SHIFT 0x10 + +#define DPN_SAMPLECTRL_SI_MASK 0xFFFF +#define DPN_SAMPLECTRL_SI_SHIFT 0x0 + +#define DPN_OFFSETCTRL0_OF1_MASK 0xFF +#define DPN_OFFSETCTRL0_OF1_SHIFT 0x0 +#define DPN_OFFSETCTRL0_OF2_MASK 0xFF +#define DPN_OFFSETCTRL0_OF2_SHIFT 0x8 + +#define DPN_HCTRL_HSTOP_MASK 0xF +#define DPN_HCTRL_HSTOP_SHIFT 0x0 +#define DPN_HCTRL_HSTART_MASK 0xF +#define DPN_HCTRL_HSTART_SHIFT 0x4 +#define DPN_HCTRL_LCONTROL_MASK 0x7 +#define DPN_HCTRL_LCONTROL_SHIFT 0x8 + +/* SoundWire Shim registers */ +#define SDW_CNL_LCAP 0x0 +#define SDW_CNL_LCTL 0x4 +#define CNL_LCTL_CPA_SHIFT 8 +#define CNL_LCTL_SPA_SHIFT 0 +#define CNL_LCTL_CPA_MASK 0x1 +#define CNL_LCTL_SPA_MASK 0x1 + +#define SDW_CMDSYNC_SET_MASK 0xF0000 +#define SDW_CNL_IPPTR 0x8 +#define SDW_CNL_SYNC 0xC +#define CNL_SYNC_CMDSYNC_MASK 0x1 +#define CNL_SYNC_CMDSYNC_SHIFT 16 +#define CNL_SYNC_SYNCGO_MASK 0x1 +#define CNL_SYNC_SYNCGO_SHIFT 0x18 +#define CNL_SYNC_SYNCPRD_MASK 0x7FFF +#define CNL_SYNC_SYNCPRD_SHIFT 0x0 +#define CNL_SYNC_SYNCCPU_MASK 0x8000 +#define CNL_SYNC_SYNCCPU_SHIFT 0xF + +#define SDW_CNL_CTLSCAP 0x10 +#define SDW_CNL_CTLS0CM 0x12 +#define SDW_CNL_CTLS1CM 0x14 +#define SDW_CNL_CTLS2CM 0x16 +#define SDW_CNL_CTLS3CM 0x18 + +#define SDW_CNL_PCMSCAP 0x20 +#define CNL_PCMSCAP_BSS_SHIFT 8 +#define CNL_PCMSCAP_BSS_MASK 0x1F +#define CNL_PCMSCAP_OSS_SHIFT 4 +#define CNL_PCMSCAP_OSS_MASK 0xF +#define CNL_PCMSCAP_ISS_SHIFT 0 +#define CNL_PCMSCAP_ISS_MASK 0xF + +#define SDW_CNL_PCMSCHM 0x22 +#define CNL_PCMSYCM_DIR_SHIFT 15 +#define CNL_PCMSYCM_DIR_MASK 0x1 +#define CNL_PCMSYCM_STREAM_SHIFT 8 +#define CNL_PCMSYCM_STREAM_MASK 0x3F +#define CNL_PCMSYCM_HCHAN_SHIFT 4 +#define CNL_PCMSYCM_HCHAN_MASK 0xF +#define CNL_PCMSYCM_LCHAN_SHIFT 0 +#define CNL_PCMSYCM_LCHAN_MASK 0xF + +#define SDW_CNL_PCMSCHC 0x42 + +#define SDW_CNL_PDMSCAP 0x62 +#define CNL_PDMSCAP_BSS_SHIFT 8 +#define CNL_PDMSCAP_BSS_MASK 0x1F +#define CNL_PDMSCAP_OSS_SHIFT 4 +#define CNL_PDMSCAP_OSS_MASK 0xF +#define CNL_PDMSCAP_ISS_SHIFT 0 +#define CNL_PDMSCAP_ISS_MASK 0xF +#define CNL_PDMSCAP_CPSS_SHIFT 13 +#define CNL_PDMSCAP_CPSS_MASK 0x7 +#define SDW_CNL_PDMSCM + +#define SDW_CNL_IOCTL 0x6C +#define CNL_IOCTL_MIF_SHIFT 0x0 +#define CNL_IOCTL_MIF_MASK 0x1 +#define CNL_IOCTL_CO_SHIFT 0x1 +#define CNL_IOCTL_CO_MASK 0x1 +#define CNL_IOCTL_COE_SHIFT 0x2 +#define CNL_IOCTL_COE_MASK 0x1 +#define CNL_IOCTL_DO_SHIFT 0x3 +#define CNL_IOCTL_DO_MASK 0x1 +#define CNL_IOCTL_DOE_SHIFT 0x4 +#define CNL_IOCTL_DOE_MASK 0x1 +#define CNL_IOCTL_BKE_SHIFT 0x5 +#define CNL_IOCTL_BKE_MASK 0x1 +#define CNL_IOCTL_WPDD_SHIFT 0x6 +#define CNL_IOCTL_WPDD_MASK 0x1 +#define CNL_IOCTL_CIBD_SHIFT 0x8 +#define CNL_IOCTL_CIBD_MASK 0x1 +#define CNL_IOCTL_DIBD_SHIFT 0x9 +#define CNL_IOCTL_DIBD_MASK 0x1 + +#define SDW_CNL_CTMCTL_OFFSET 0x60 +#define SDW_CNL_CTMCTL 0x6E +#define CNL_CTMCTL_DACTQE_SHIFT 0x0 +#define CNL_CTMCTL_DACTQE_MASK 0x1 +#define CNL_CTMCTL_DODS_SHIFT 0x1 +#define CNL_CTMCTL_DODS_MASK 0x1 +#define CNL_CTMCTL_DOAIS_SHIFT 0x3 +#define CNL_CTMCTL_DOAIS_MASK 0x3 + +#define ALH_CNL_STRMZCFG_BASE 0x4 +#define ALH_CNL_STRMZCFG_OFFSET 0x4 +#define CNL_STRMZCFG_DMAT_SHIFT 0x0 +#define CNL_STRMZCFG_DMAT_MASK 0xFF +#define CNL_STRMZCFG_DMAT_VAL 0x3 +#define CNL_STRMZCFG_CHAN_SHIFT 16 +#define CNL_STRMZCFG_CHAN_MASK 0xF + +#define SDW_BRA_HEADER_SIZE_PDI 12 /* In bytes */ +#define SDW_BRA_HEADER_CRC_SIZE_PDI 4 /* In bytes */ +#define SDW_BRA_DATA_CRC_SIZE_PDI 4 /* In bytes */ +#define SDW_BRA_HEADER_RESP_SIZE_PDI 4 /* In bytes */ +#define SDW_BRA_FOOTER_RESP_SIZE_PDI 4 /* In bytes */ +#define SDW_BRA_PADDING_SZ_PDI 4 /* In bytes */ +#define SDW_BRA_HEADER_TOTAL_SZ_PDI 16 /* In bytes */ + +#define SDW_BRA_SOP_EOP_PDI_STRT_VALUE 0x4 +#define SDW_BRA_SOP_EOP_PDI_END_VALUE 0x2 +#define SDW_BRA_SOP_EOP_PDI_MASK 0x1F +#define SDW_BRA_SOP_EOP_PDI_SHIFT 5 + +#define SDW_BRA_STRM_ID_BLK_OUT 3 +#define SDW_BRA_STRM_ID_BLK_IN 4 + +#define SDW_BRA_PDI_TX_ID 0 +#define SDW_BRA_PDI_RX_ID 1 + +#define SDW_BRA_SOFT_RESET 0x1 +#define SDW_BRA_BULK_ENABLE 1 +#define SDW_BRA_BLK_EN_MASK 0xFFFEFFFF +#define SDW_BRA_BLK_EN_SHIFT 16 + +#define SDW_BRA_ROLLINGID_PDI_INDX 3 +#define SDW_BRA_ROLLINGID_PDI_MASK 0xF +#define SDW_BRA_ROLLINGID_PDI_SHIFT 0 + +#define SDW_PCM_STRM_START_INDEX 0x2 + +#endif /* _LINUX_SDW_CNL_H */ diff --git a/drivers/sdw/sdw_maxim.c b/drivers/sdw/sdw_maxim.c new file mode 100644 index 0000000000000..0081c5c004972 --- /dev/null +++ b/drivers/sdw/sdw_maxim.c @@ -0,0 +1,146 @@ +/* + * sdw_maxim.c -- Maxim SoundWire slave device driver. Dummy driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include + + +static int maxim_register_sdw_capabilties(struct sdw_slv *sdw, + const struct sdw_slv_id *sdw_id) +{ + struct sdw_slv_capabilities cap; + struct sdw_slv_dpn_capabilities *dpn_cap = NULL; + struct port_audio_mode_properties *prop = NULL; + int i, j; + + cap.wake_up_unavailable = true; + cap.test_mode_supported = false; + cap.clock_stop1_mode_supported = false; + cap.simplified_clock_stop_prepare = false; + cap.highphy_capable = true; + cap.paging_supported = false; + cap.bank_delay_support = false; + cap.port_15_read_behavior = 0; + cap.sdw_dp0_supported = false; + cap.num_of_sdw_ports = 3; + cap.sdw_dpn_cap = devm_kzalloc(&sdw->dev, + ((sizeof(struct sdw_slv_dpn_capabilities)) * + cap.num_of_sdw_ports), GFP_KERNEL); + for (i = 0; i < cap.num_of_sdw_ports; i++) { + dpn_cap = &cap.sdw_dpn_cap[i]; + if (i == 0 || i == 2) + dpn_cap->port_direction = SDW_PORT_SOURCE; + else + dpn_cap->port_direction = SDW_PORT_SINK; + + dpn_cap->port_number = i+1; + dpn_cap->max_word_length = 24; + dpn_cap->min_word_length = 16; + dpn_cap->num_word_length = 0; + dpn_cap->word_length_buffer = NULL; + dpn_cap->dpn_type = SDW_FULL_DP; + dpn_cap->dpn_grouping = SDW_BLOCKGROUPCOUNT_1; + dpn_cap->prepare_ch = SDW_CP_SM; + dpn_cap->imp_def_intr_mask = 0x0; + dpn_cap->min_ch_num = 1; + dpn_cap->max_ch_num = 2; + dpn_cap->num_ch_supported = 0; + dpn_cap->ch_supported = NULL; + dpn_cap->port_flow_mode_mask = SDW_PORT_FLOW_MODE_ISOCHRONOUS; + dpn_cap->block_packing_mode_mask = + SDW_PORT_BLK_PKG_MODE_BLK_PER_PORT_MASK | + SDW_PORT_BLK_PKG_MODE_BLK_PER_CH_MASK; + dpn_cap->port_encoding_type_mask = + SDW_PORT_ENCODING_TYPE_TWOS_CMPLMNT | + SDW_PORT_ENCODING_TYPE_SIGN_MAGNITUDE | + SDW_PORT_ENCODING_TYPE_IEEE_32_FLOAT; + dpn_cap->num_audio_modes = 1; + + dpn_cap->mode_properties = devm_kzalloc(&sdw->dev, + ((sizeof(struct port_audio_mode_properties)) * + dpn_cap->num_audio_modes), GFP_KERNEL); + for (j = 0; j < dpn_cap->num_audio_modes; j++) { + prop = &dpn_cap->mode_properties[j]; + prop->max_frequency = 16000000; + prop->min_frequency = 1000000; + prop->num_freq_configs = 0; + prop->freq_supported = NULL; + prop->glitchless_transitions_mask = 0x1; + prop->max_sampling_frequency = 192000; + prop->min_sampling_frequency = 8000; + prop->num_sampling_freq_configs = 0; + prop->sampling_freq_config = NULL; + prop->ch_prepare_behavior = SDW_CH_PREP_ANY_TIME; + } + } + return sdw_register_slave_capabilities(sdw, &cap); + +} +static int maxim_sdw_probe(struct sdw_slv *sdw, + const struct sdw_slv_id *sdw_id) +{ + dev_info(&sdw->dev, "Maxim SoundWire Slave Registered %lx\n", sdw_id->driver_data); + return maxim_register_sdw_capabilties(sdw, sdw_id); +} + +static int maxim_sdw_remove(struct sdw_slv *sdw) +{ + dev_info(&sdw->dev, "Maxim SoundWire Slave un-Registered\n"); + return 0; +} + +static const struct sdw_slv_id maxim_id[] = { + {"03:01:9f:79:00:00", 0}, + {"09:01:9f:79:00:00", 1}, + {"04:01:9f:79:00:00", 2}, + {"0a:01:9f:79:00:00", 3}, + {"04:01:9f:79:00:00", 4}, + {"0a:01:9f:79:00:00", 5}, + {"05:01:9f:79:00:00", 6}, + {"06:01:9f:79:00:00", 7}, + {"05:01:9f:79:00:00", 8}, + {"00:01:9f:79:00:00", 9}, + {"06:01:9f:79:00:00", 10}, + {"07:01:9f:79:00:00", 11}, + {"00:01:9f:79:00:00", 12}, + {"06:01:9f:79:00:00", 13}, + {"01:01:9f:79:00:00", 14}, + {"07:01:9f:79:00:00", 15}, + {"08:01:9f:79:00:00", 16}, + {"01:01:9f:79:00:00", 17}, + {"07:01:9f:79:00:00", 18}, + {"02:01:9f:79:00:00", 19}, + {"08:01:9f:79:00:00", 20}, + {"09:01:9f:79:00:00", 21}, + {"02:01:9f:79:00:00", 22}, + {"08:01:9f:79:00:00", 23}, + {"03:01:9f:79:00:00", 24}, + {"09:01:9f:79:00:00", 25}, + {"0a:01:9f:79:00:00", 26}, + {}, +}; + +MODULE_DEVICE_TABLE(sdwint, maxim_id); + +static struct sdw_slave_driver maxim_sdw_driver = { + .driver_type = SDW_DRIVER_TYPE_SLAVE, + .driver = { + .name = "maxim", + }, + .probe = maxim_sdw_probe, + .remove = maxim_sdw_remove, + .id_table = maxim_id, +}; + +module_sdw_slave_driver(maxim_sdw_driver); + +MODULE_DESCRIPTION("SoundWire Maxim Slave Driver"); +MODULE_AUTHOR("Hardik Shah, "); +MODULE_LICENSE("GPL"); diff --git a/drivers/sdw/sdw_priv.h b/drivers/sdw/sdw_priv.h new file mode 100644 index 0000000000000..fd060bfa74c4a --- /dev/null +++ b/drivers/sdw/sdw_priv.h @@ -0,0 +1,280 @@ +/* + * sdw_priv.h - Private definition for sdw bus interface. + * + * Copyright (C) 2014-2015 Intel Corp + * Author: Hardik Shah + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + */ + +#ifndef _LINUX_SDW_PRIV_H +#define _LINUX_SDW_PRIV_H + +#include /* For kthread */ +#include + +#define SDW_MAX_STREAM_TAG_KEY_SIZE 80 +#define SDW_NUM_STREAM_TAGS 100 +#define MAX_NUM_ROWS 23 /* As per SDW Spec */ +#define MAX_NUM_COLS 8/* As per SDW Spec */ +#define MAX_NUM_ROW_COLS (MAX_NUM_ROWS * MAX_NUM_COLS) + +#define SDW_STATE_INIT_STREAM_TAG 0x1 +#define SDW_STATE_ALLOC_STREAM 0x2 +#define SDW_STATE_CONFIG_STREAM 0x3 +#define SDW_STATE_PREPARE_STREAM 0x4 +#define SDW_STATE_ENABLE_STREAM 0x5 +#define SDW_STATE_DISABLE_STREAM 0x6 +#define SDW_STATE_UNPREPARE_STREAM 0x7 +#define SDW_STATE_RELEASE_STREAM 0x8 +#define SDW_STATE_FREE_STREAM 0x9 +#define SDW_STATE_FREE_STREAM_TAG 0xA +#define SDW_STATE_ONLY_XPORT_STREAM 0xB + +#define SDW_STATE_INIT_RT 0x1 +#define SDW_STATE_CONFIG_RT 0x2 +#define SDW_STATE_PREPARE_RT 0x3 +#define SDW_STATE_ENABLE_RT 0x4 +#define SDW_STATE_DISABLE_RT 0x5 +#define SDW_STATE_UNPREPARE_RT 0x6 +#define SDW_STATE_RELEASE_RT 0x7 + +#define SDW_SLAVE_BDCAST_ADDR 15 + +struct sdw_runtime; +/* Defined in sdw.c, used by multiple files of module */ +extern struct sdw_core sdw_core; + +enum sdw_port_state { + SDW_PORT_STATE_CH_READY, + SDW_PORT_STATE_CH_STOPPED, + SDW_PORT_STATE_CH_PREPARING, + SDW_PORT_STATE_CH_DEPREPARING, +}; + +enum sdw_stream_state { + SDW_STREAM_ALLOCATED, + SDW_STREAM_FREE, + SDW_STREAM_ACTIVE, + SDW_STREAM_INACTIVE, +}; + +enum sdw_clk_state { + SDW_CLK_STATE_OFF = 0, + SDW_CLK_STATE_ON = 1, +}; + +enum sdw_update_bs_state { + SDW_UPDATE_BS_PRE, + SDW_UPDATE_BS_BNKSWTCH, + SDW_UPDATE_BS_POST, + SDW_UPDATE_BS_BNKSWTCH_WAIT, + SDW_UPDATE_BS_DIS_CHN, +}; + +enum sdw_port_en_state { + SDW_PORT_STATE_PREPARE, + SDW_PORT_STATE_ENABLE, + SDW_PORT_STATE_DISABLE, + SDW_PORT_STATE_UNPREPARE, +}; + +struct port_chn_en_state { + bool is_activate; + bool is_bank_sw; +}; + +struct temp_elements { + int rate; + int full_bw; + int payload_bw; + int hwidth; +}; + +struct sdw_stream_tag { + int stream_tag; + struct mutex stream_lock; + int ref_count; + enum sdw_stream_state stream_state; + char key[SDW_MAX_STREAM_TAG_KEY_SIZE]; + struct sdw_runtime *sdw_rt; +}; + +struct sdw_stream_params { + unsigned int rate; + unsigned int channel_count; + unsigned int bps; +}; + +struct sdw_port_runtime { + int port_num; + enum sdw_port_state port_state; + int channel_mask; + /* Frame params and stream params are per port based + * Single stream of audio may be split + * into mutliple port each handling + * subset of channels, channels should + * be contiguous in subset + */ + struct sdw_transport_params transport_params; + struct sdw_port_params port_params; + struct list_head port_node; +}; + +struct sdw_slave_runtime { + /* Simplified port or full port, there cannot be both types of + * data port for single stream, so data structure is kept per + * slave runtime, not per port + */ + enum sdw_dpn_type type; + struct sdw_slv *slave; + int direction; + /* Stream may be split into multiple slaves, so this is for + * this particular slave + */ + struct sdw_stream_params stream_params; + struct list_head port_rt_list; + struct list_head slave_sdw_node; + struct list_head slave_node; + int rt_state; /* State of runtime structure */ + +}; + + +struct sdw_mstr_runtime { + struct sdw_master *mstr; + int direction; + /* Stream may be split between multiple masters so this + * is for invidual master, if stream is split into multiple + * streams. For calculating the bandwidth on the particular bus + * stream params of master is taken into account. + */ + struct sdw_stream_params stream_params; + struct list_head port_rt_list; + /* Two nodes are required because BW calculation is based on master + * while stream enabling is based on stream_tag, where multiple + * masters may be involved + */ + struct list_head mstr_sdw_node; /* This is to add mstr_rt in sdw_rt */ + struct list_head mstr_node; /* This is to add mstr_rt in mstr */ + + struct list_head slv_rt_list; + /* Individual stream bandwidth on given master */ + unsigned int stream_bw; + /* State of runtime structure */ + int rt_state; + int hstart; + int hstop; + int block_offset; + int sub_block_offset; +}; + +struct sdw_runtime { + int tx_ref_count; + int rx_ref_count; + /* This is stream params for whole stream + * but stream may be split between two + * masters, or two slaves. + */ + struct sdw_stream_params stream_params; + struct list_head slv_rt_list; + struct list_head mstr_rt_list; + enum sdw_stream_type type; + int stream_state; + int xport_state; + +}; + +struct sdw_slv_status { + struct list_head node; + enum sdw_slave_status status[SOUNDWIRE_MAX_DEVICES+1]; +}; + +/** Bus structure which handles bus related information */ +struct sdw_bus { + struct list_head bus_node; + struct sdw_master *mstr; + unsigned int port_grp_mask[2]; + unsigned int slave_grp_mask[2]; + unsigned int clk_state; + unsigned int active_bank; + unsigned int clk_freq; + unsigned int clk_div; + /* Bus total Bandwidth. Initialize and reset to zero */ + unsigned int bandwidth; + unsigned int stream_interval; /* Stream Interval */ + unsigned int system_interval; /* Bus System Interval */ + unsigned int frame_freq; + unsigned int col; + unsigned int row; + struct task_struct *status_thread; + struct kthread_worker kworker; + struct kthread_work kwork; + struct list_head status_list; + spinlock_t spinlock; + struct sdw_async_xfer_data async_data; +}; + +/** Holds supported Row-Column combination related information */ +struct sdw_rowcol { + int row; + int col; + int control_bits; + int data_bits; +}; + +/** + * Global soundwire structure. It handles all the streams spawned + * across masters and has list of bus structure per every master + * registered + */ +struct sdw_core { + struct sdw_stream_tag stream_tags[SDW_NUM_STREAM_TAGS]; + struct sdw_rowcol rowcolcomb[MAX_NUM_ROW_COLS]; + struct list_head bus_list; + struct mutex core_lock; + struct idr idr; + int first_dynamic_bus_num; +}; + +/* Structure holding mapping of numbers to cols */ +struct sdw_num_to_col { + int num; + int col; +}; + +/* Structure holding mapping of numbers to rows */ +struct sdw_num_to_row { + int num; + int row; +}; + +int sdw_slave_port_config_port_params(struct sdw_slave_runtime *slv_rt); +int sdw_slave_port_prepare(struct sdw_slave_runtime, bool prepare); +int sdw_bus_bw_init(void); +int sdw_mstr_bw_init(struct sdw_bus *sdw_bs); +int sdw_bus_calc_bw(struct sdw_stream_tag *stream_tag, bool enable); +int sdw_bus_calc_bw_dis(struct sdw_stream_tag *stream_tag, bool unprepare); +int sdw_bus_bra_xport_config(struct sdw_bus *sdw_mstr_bs, + struct sdw_bra_block *block, bool enable); +int sdw_chn_enable(void); +void sdw_unlock_mstr(struct sdw_master *mstr); +int sdw_trylock_mstr(struct sdw_master *mstr); +void sdw_lock_mstr(struct sdw_master *mstr); +int sdw_slave_transfer_async(struct sdw_master *mstr, struct sdw_msg *msg, + int num, + struct sdw_async_xfer_data *async_data); + +#endif /* _LINUX_SDW_PRIV_H */ diff --git a/drivers/sdw/sdw_utils.c b/drivers/sdw/sdw_utils.c new file mode 100644 index 0000000000000..724323d01993e --- /dev/null +++ b/drivers/sdw/sdw_utils.c @@ -0,0 +1,49 @@ +/* + * sdw_bwcalc.c - SoundWire Bus BW calculation & CHN Enabling implementation + * + * Copyright (C) 2015-2016 Intel Corp + * Author: Sanyog Kale + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include +#include +#include +#include + + + +/** + * sdw_bus_compute_crc8: SoundWire bus helper function to compute crc8. + * This API uses crc8 helper functions internally. + * + * @values: Data buffer. + * @num_bytes: Number of bytes. + */ +u8 sdw_bus_compute_crc8(u8 *values, u8 num_bytes) +{ + u8 table[256]; + u8 poly = 0x4D; /* polynomial = x^8 + x^6 + x^3 + x^2 + 1 */ + u8 crc = CRC8_INIT_VALUE; /* Initialize 8 bit to 11111111 */ + + /* Populate MSB */ + crc8_populate_msb(table, poly); + + /* CRC computation */ + crc = crc8(table, values, num_bytes, crc); + + return crc; +} +EXPORT_SYMBOL(sdw_bus_compute_crc8); diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c index 8be4d6786c610..14a9d18306cbf 100644 --- a/drivers/slimbus/qcom-ngd-ctrl.c +++ b/drivers/slimbus/qcom-ngd-ctrl.c @@ -1467,7 +1467,7 @@ static int qcom_slim_ngd_remove(struct platform_device *pdev) return 0; } -static int qcom_slim_ngd_runtime_idle(struct device *dev) +static int __maybe_unused qcom_slim_ngd_runtime_idle(struct device *dev) { struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev); @@ -1477,8 +1477,7 @@ static int qcom_slim_ngd_runtime_idle(struct device *dev) return -EAGAIN; } -#ifdef CONFIG_PM -static int qcom_slim_ngd_runtime_suspend(struct device *dev) +static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev) { struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev); int ret = 0; @@ -1491,7 +1490,6 @@ static int qcom_slim_ngd_runtime_suspend(struct device *dev) return ret; } -#endif static const struct dev_pm_ops qcom_slim_ngd_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c index 8a3678c2e83cf..97bb5989aa211 100644 --- a/drivers/soc/qcom/rmtfs_mem.c +++ b/drivers/soc/qcom/rmtfs_mem.c @@ -212,6 +212,11 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to parse qcom,vmid\n"); goto remove_cdev; } else if (!ret) { + if (!qcom_scm_is_available()) { + ret = -EPROBE_DEFER; + goto remove_cdev; + } + perms[0].vmid = QCOM_SCM_VMID_HLOS; perms[0].perm = QCOM_SCM_PERM_RW; perms[1].vmid = vmid; diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index 2d6f3fcf32110..ed71a4c9c8b29 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -1288,7 +1288,7 @@ static void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc) if (!pmc->soc->has_tsense_reset) return; - np = of_find_node_by_name(pmc->dev->of_node, "i2c-thermtrip"); + np = of_get_child_by_name(pmc->dev->of_node, "i2c-thermtrip"); if (!np) { dev_warn(dev, "i2c-thermtrip node not found, %s.\n", disabled); return; diff --git a/drivers/soc/ti/knav_qmss.h b/drivers/soc/ti/knav_qmss.h index 3efc47e82973b..bd040c29c4bf8 100644 --- a/drivers/soc/ti/knav_qmss.h +++ b/drivers/soc/ti/knav_qmss.h @@ -329,8 +329,8 @@ struct knav_range_ops { }; struct knav_irq_info { - int irq; - u32 cpu_map; + int irq; + struct cpumask *cpu_mask; }; struct knav_range_info { diff --git a/drivers/soc/ti/knav_qmss_acc.c b/drivers/soc/ti/knav_qmss_acc.c index 316e82e46f6cb..2f7fb2dcc1d66 100644 --- a/drivers/soc/ti/knav_qmss_acc.c +++ b/drivers/soc/ti/knav_qmss_acc.c @@ -205,18 +205,18 @@ static int knav_range_setup_acc_irq(struct knav_range_info *range, { struct knav_device *kdev = range->kdev; struct knav_acc_channel *acc; - unsigned long cpu_map; + struct cpumask *cpu_mask; int ret = 0, irq; u32 old, new; if (range->flags & RANGE_MULTI_QUEUE) { acc = range->acc; irq = range->irqs[0].irq; - cpu_map = range->irqs[0].cpu_map; + cpu_mask = range->irqs[0].cpu_mask; } else { acc = range->acc + queue; irq = range->irqs[queue].irq; - cpu_map = range->irqs[queue].cpu_map; + cpu_mask = range->irqs[queue].cpu_mask; } old = acc->open_mask; @@ -239,8 +239,8 @@ static int knav_range_setup_acc_irq(struct knav_range_info *range, acc->name, acc->name); ret = request_irq(irq, knav_acc_int_handler, 0, acc->name, range); - if (!ret && cpu_map) { - ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map)); + if (!ret && cpu_mask) { + ret = irq_set_affinity_hint(irq, cpu_mask); if (ret) { dev_warn(range->kdev->dev, "Failed to set IRQ affinity\n"); diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c index 6755f2af56195..ef36acc0e7088 100644 --- a/drivers/soc/ti/knav_qmss_queue.c +++ b/drivers/soc/ti/knav_qmss_queue.c @@ -118,19 +118,17 @@ static int knav_queue_setup_irq(struct knav_range_info *range, struct knav_queue_inst *inst) { unsigned queue = inst->id - range->queue_base; - unsigned long cpu_map; int ret = 0, irq; if (range->flags & RANGE_HAS_IRQ) { irq = range->irqs[queue].irq; - cpu_map = range->irqs[queue].cpu_map; ret = request_irq(irq, knav_queue_int_handler, 0, inst->irq_name, inst); if (ret) return ret; disable_irq(irq); - if (cpu_map) { - ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map)); + if (range->irqs[queue].cpu_mask) { + ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask); if (ret) { dev_warn(range->kdev->dev, "Failed to set IRQ affinity\n"); @@ -1262,9 +1260,19 @@ static int knav_setup_queue_range(struct knav_device *kdev, range->num_irqs++; - if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) - range->irqs[i].cpu_map = - (oirq.args[2] & 0x0000ff00) >> 8; + if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) { + unsigned long mask; + int bit; + + range->irqs[i].cpu_mask = devm_kzalloc(dev, + cpumask_size(), GFP_KERNEL); + if (!range->irqs[i].cpu_mask) + return -ENOMEM; + + mask = (oirq.args[2] & 0x0000ff00) >> 8; + for_each_set_bit(bit, &mask, BITS_PER_LONG) + cpumask_set_cpu(bit, range->irqs[i].cpu_mask); + } } range->num_irqs = min(range->num_irqs, range->num_queues); diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig index 19c8efb9a5ee7..a4b03e8cd694a 100644 --- a/drivers/soundwire/Kconfig +++ b/drivers/soundwire/Kconfig @@ -4,6 +4,7 @@ menuconfig SOUNDWIRE bool "SoundWire support" + depends on !SDW ---help--- SoundWire is a 2-Pin interface with data and clock line ratified by the MIPI Alliance. SoundWire is used for transporting data diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c index 8612525fa4e34..584bcb018a62d 100644 --- a/drivers/spi/spi-bcm-qspi.c +++ b/drivers/spi/spi-bcm-qspi.c @@ -89,7 +89,7 @@ #define BSPI_BPP_MODE_SELECT_MASK BIT(8) #define BSPI_BPP_ADDR_SELECT_MASK BIT(16) -#define BSPI_READ_LENGTH 512 +#define BSPI_READ_LENGTH 256 /* MSPI register offsets */ #define MSPI_SPCR0_LSB 0x000 @@ -355,7 +355,7 @@ static int bcm_qspi_bspi_set_flex_mode(struct bcm_qspi *qspi, int bpc = 0, bpp = 0; u8 command = op->cmd.opcode; int width = op->cmd.buswidth ? op->cmd.buswidth : SPI_NBITS_SINGLE; - int addrlen = op->addr.nbytes * 8; + int addrlen = op->addr.nbytes; int flex_mode = 1; dev_dbg(&qspi->pdev->dev, "set flex mode w %x addrlen %x hp %d\n", diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index f35cc10772f66..25abf2d1732a0 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -88,7 +88,7 @@ struct bcm2835_spi { u8 *rx_buf; int tx_len; int rx_len; - bool dma_pending; + unsigned int dma_pending; }; static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg) @@ -155,8 +155,7 @@ static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id) /* Write as many bytes as possible to FIFO */ bcm2835_wr_fifo(bs); - /* based on flags decide if we can finish the transfer */ - if (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE) { + if (!bs->rx_len) { /* Transfer complete - reset SPI HW */ bcm2835_spi_reset_hw(master); /* wake up the framework */ @@ -233,10 +232,9 @@ static void bcm2835_spi_dma_done(void *data) * is called the tx-dma must have finished - can't get to this * situation otherwise... */ - dmaengine_terminate_all(master->dma_tx); - - /* mark as no longer pending */ - bs->dma_pending = 0; + if (cmpxchg(&bs->dma_pending, true, false)) { + dmaengine_terminate_all(master->dma_tx); + } /* and mark as completed */; complete(&master->xfer_completion); @@ -342,6 +340,7 @@ static int bcm2835_spi_transfer_one_dma(struct spi_master *master, if (ret) { /* need to reset on errors */ dmaengine_terminate_all(master->dma_tx); + bs->dma_pending = false; bcm2835_spi_reset_hw(master); return ret; } @@ -617,10 +616,9 @@ static void bcm2835_spi_handle_err(struct spi_master *master, struct bcm2835_spi *bs = spi_master_get_devdata(master); /* if an error occurred and we have an active dma, then terminate */ - if (bs->dma_pending) { + if (cmpxchg(&bs->dma_pending, true, false)) { dmaengine_terminate_all(master->dma_tx); dmaengine_terminate_all(master->dma_rx); - bs->dma_pending = 0; } /* and reset */ bcm2835_spi_reset_hw(master); diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c index f1526757aaf6d..79fc3940245a4 100644 --- a/drivers/spi/spi-ep93xx.c +++ b/drivers/spi/spi-ep93xx.c @@ -246,6 +246,19 @@ static int ep93xx_spi_read_write(struct spi_master *master) return -EINPROGRESS; } +static enum dma_transfer_direction +ep93xx_dma_data_to_trans_dir(enum dma_data_direction dir) +{ + switch (dir) { + case DMA_TO_DEVICE: + return DMA_MEM_TO_DEV; + case DMA_FROM_DEVICE: + return DMA_DEV_TO_MEM; + default: + return DMA_TRANS_NONE; + } +} + /** * ep93xx_spi_dma_prepare() - prepares a DMA transfer * @master: SPI master @@ -257,7 +270,7 @@ static int ep93xx_spi_read_write(struct spi_master *master) */ static struct dma_async_tx_descriptor * ep93xx_spi_dma_prepare(struct spi_master *master, - enum dma_transfer_direction dir) + enum dma_data_direction dir) { struct ep93xx_spi *espi = spi_master_get_devdata(master); struct spi_transfer *xfer = master->cur_msg->state; @@ -277,9 +290,9 @@ ep93xx_spi_dma_prepare(struct spi_master *master, buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; memset(&conf, 0, sizeof(conf)); - conf.direction = dir; + conf.direction = ep93xx_dma_data_to_trans_dir(dir); - if (dir == DMA_DEV_TO_MEM) { + if (dir == DMA_FROM_DEVICE) { chan = espi->dma_rx; buf = xfer->rx_buf; sgt = &espi->rx_sgt; @@ -343,7 +356,8 @@ ep93xx_spi_dma_prepare(struct spi_master *master, if (!nents) return ERR_PTR(-ENOMEM); - txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK); + txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, conf.direction, + DMA_CTRL_ACK); if (!txd) { dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); return ERR_PTR(-ENOMEM); @@ -360,13 +374,13 @@ ep93xx_spi_dma_prepare(struct spi_master *master, * unmapped. */ static void ep93xx_spi_dma_finish(struct spi_master *master, - enum dma_transfer_direction dir) + enum dma_data_direction dir) { struct ep93xx_spi *espi = spi_master_get_devdata(master); struct dma_chan *chan; struct sg_table *sgt; - if (dir == DMA_DEV_TO_MEM) { + if (dir == DMA_FROM_DEVICE) { chan = espi->dma_rx; sgt = &espi->rx_sgt; } else { @@ -381,8 +395,8 @@ static void ep93xx_spi_dma_callback(void *callback_param) { struct spi_master *master = callback_param; - ep93xx_spi_dma_finish(master, DMA_MEM_TO_DEV); - ep93xx_spi_dma_finish(master, DMA_DEV_TO_MEM); + ep93xx_spi_dma_finish(master, DMA_TO_DEVICE); + ep93xx_spi_dma_finish(master, DMA_FROM_DEVICE); spi_finalize_current_transfer(master); } @@ -392,15 +406,15 @@ static int ep93xx_spi_dma_transfer(struct spi_master *master) struct ep93xx_spi *espi = spi_master_get_devdata(master); struct dma_async_tx_descriptor *rxd, *txd; - rxd = ep93xx_spi_dma_prepare(master, DMA_DEV_TO_MEM); + rxd = ep93xx_spi_dma_prepare(master, DMA_FROM_DEVICE); if (IS_ERR(rxd)) { dev_err(&master->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd)); return PTR_ERR(rxd); } - txd = ep93xx_spi_dma_prepare(master, DMA_MEM_TO_DEV); + txd = ep93xx_spi_dma_prepare(master, DMA_TO_DEVICE); if (IS_ERR(txd)) { - ep93xx_spi_dma_finish(master, DMA_DEV_TO_MEM); + ep93xx_spi_dma_finish(master, DMA_FROM_DEVICE); dev_err(&master->dev, "DMA TX failed: %ld\n", PTR_ERR(txd)); return PTR_ERR(txd); } diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c index 421bfc7dda674..088772ebef9bd 100644 --- a/drivers/spi/spi-gpio.c +++ b/drivers/spi/spi-gpio.c @@ -295,9 +295,11 @@ static int spi_gpio_request(struct device *dev, spi_gpio->miso = devm_gpiod_get_optional(dev, "miso", GPIOD_IN); if (IS_ERR(spi_gpio->miso)) return PTR_ERR(spi_gpio->miso); - if (!spi_gpio->miso) - /* HW configuration without MISO pin */ - *mflags |= SPI_MASTER_NO_RX; + /* + * No setting SPI_MASTER_NO_RX here - if there is only a MOSI + * pin connected the host can still do RX by changing the + * direction of the line. + */ spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW); if (IS_ERR(spi_gpio->sck)) @@ -423,7 +425,7 @@ static int spi_gpio_probe(struct platform_device *pdev) spi_gpio->bitbang.chipselect = spi_gpio_chipselect; spi_gpio->bitbang.set_line_direction = spi_gpio_set_direction; - if ((master_flags & (SPI_MASTER_NO_TX | SPI_MASTER_NO_RX)) == 0) { + if ((master_flags & SPI_MASTER_NO_TX) == 0) { spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0; spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1; spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2; diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c index e43842c7a31a9..eb72dba71d832 100644 --- a/drivers/spi/spi-mem.c +++ b/drivers/spi/spi-mem.c @@ -346,10 +346,25 @@ EXPORT_SYMBOL_GPL(spi_mem_get_name); int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) { struct spi_controller *ctlr = mem->spi->controller; + size_t len; + + len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes; if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size) return ctlr->mem_ops->adjust_op_size(mem, op); + if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) { + if (len > spi_max_transfer_size(mem->spi)) + return -EINVAL; + + op->data.nbytes = min3((size_t)op->data.nbytes, + spi_max_transfer_size(mem->spi), + spi_max_message_size(mem->spi) - + len); + if (!op->data.nbytes) + return -EINVAL; + } + return 0; } EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size); diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 508c61c669e7d..e2be7da743438 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -1455,13 +1455,26 @@ static int omap2_mcspi_remove(struct platform_device *pdev) /* work with hotplug and coldplug */ MODULE_ALIAS("platform:omap2_mcspi"); -#ifdef CONFIG_SUSPEND -static int omap2_mcspi_suspend_noirq(struct device *dev) +static int __maybe_unused omap2_mcspi_suspend(struct device *dev) { - return pinctrl_pm_select_sleep_state(dev); + struct spi_master *master = dev_get_drvdata(dev); + struct omap2_mcspi *mcspi = spi_master_get_devdata(master); + int error; + + error = pinctrl_pm_select_sleep_state(dev); + if (error) + dev_warn(mcspi->dev, "%s: failed to set pins: %i\n", + __func__, error); + + error = spi_master_suspend(master); + if (error) + dev_warn(mcspi->dev, "%s: master suspend failed: %i\n", + __func__, error); + + return pm_runtime_force_suspend(dev); } -static int omap2_mcspi_resume_noirq(struct device *dev) +static int __maybe_unused omap2_mcspi_resume(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); struct omap2_mcspi *mcspi = spi_master_get_devdata(master); @@ -1472,17 +1485,17 @@ static int omap2_mcspi_resume_noirq(struct device *dev) dev_warn(mcspi->dev, "%s: failed to set pins: %i\n", __func__, error); - return 0; -} + error = spi_master_resume(master); + if (error) + dev_warn(mcspi->dev, "%s: master resume failed: %i\n", + __func__, error); -#else -#define omap2_mcspi_suspend_noirq NULL -#define omap2_mcspi_resume_noirq NULL -#endif + return pm_runtime_force_resume(dev); +} static const struct dev_pm_ops omap2_mcspi_pm_ops = { - .suspend_noirq = omap2_mcspi_suspend_noirq, - .resume_noirq = omap2_mcspi_resume_noirq, + SET_SYSTEM_SLEEP_PM_OPS(omap2_mcspi_suspend, + omap2_mcspi_resume) .runtime_resume = omap_mcspi_runtime_resume, }; diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 1abf76be2aa8c..89735a5fd9e12 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -126,4 +126,6 @@ source "drivers/staging/axis-fifo/Kconfig" source "drivers/staging/erofs/Kconfig" +source "drivers/staging/igb_avb/Kconfig" + endif # STAGING diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index ab0cbe8815b1c..f7d9b0acf3615 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -53,3 +53,4 @@ obj-$(CONFIG_SOC_MT7621) += mt7621-dts/ obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/ obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/ obj-$(CONFIG_EROFS_FS) += erofs/ +obj-$(CONFIG_IGB_AVB) += igb_avb/ diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c index 8721f0a41d157..0346630b67c8c 100644 --- a/drivers/staging/erofs/unzip_vle.c +++ b/drivers/staging/erofs/unzip_vle.c @@ -724,13 +724,18 @@ static void z_erofs_vle_unzip_kickoff(void *ptr, int bios) struct z_erofs_vle_unzip_io *io = tagptr_unfold_ptr(t); bool background = tagptr_unfold_tags(t); - if (atomic_add_return(bios, &io->pending_bios)) + if (!background) { + unsigned long flags; + + spin_lock_irqsave(&io->u.wait.lock, flags); + if (!atomic_add_return(bios, &io->pending_bios)) + wake_up_locked(&io->u.wait); + spin_unlock_irqrestore(&io->u.wait.lock, flags); return; + } - if (background) + if (!atomic_add_return(bios, &io->pending_bios)) queue_work(z_erofs_workqueue, &io->u.work); - else - wake_up(&io->u.wait); } static inline void z_erofs_vle_read_endio(struct bio *bio) @@ -1490,6 +1495,7 @@ static erofs_off_t vle_get_logical_extent_head( unsigned long long ofs; const unsigned int clusterbits = EROFS_SB(inode->i_sb)->clusterbits; const unsigned int clustersize = 1 << clusterbits; + unsigned int delta0; if (page->index != blkaddr) { kunmap_atomic(*kaddr_iter); @@ -1504,12 +1510,13 @@ static erofs_off_t vle_get_logical_extent_head( di = *kaddr_iter + vle_extent_blkoff(inode, lcn); switch (vle_cluster_type(di)) { case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: - BUG_ON(!di->di_u.delta[0]); - BUG_ON(lcn < di->di_u.delta[0]); + delta0 = le16_to_cpu(di->di_u.delta[0]); + DBG_BUGON(!delta0); + DBG_BUGON(lcn < delta0); ofs = vle_get_logical_extent_head(inode, page_iter, kaddr_iter, - lcn - di->di_u.delta[0], pcn, flags); + lcn - delta0, pcn, flags); break; case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: *flags ^= EROFS_MAP_ZIPPED; diff --git a/drivers/staging/greybus/tools/Android.mk b/drivers/staging/greybus/tools/Android.mk deleted file mode 100644 index fdadbf611757d..0000000000000 --- a/drivers/staging/greybus/tools/Android.mk +++ /dev/null @@ -1,10 +0,0 @@ -LOCAL_PATH:= $(call my-dir) - -include $(CLEAR_VARS) - -LOCAL_SRC_FILES:= loopback_test.c -LOCAL_MODULE_TAGS := optional -LOCAL_MODULE := gb_loopback_test - -include $(BUILD_EXECUTABLE) - diff --git a/drivers/staging/igb_avb/COPYING b/drivers/staging/igb_avb/COPYING new file mode 100644 index 0000000000000..d159169d10508 --- /dev/null +++ b/drivers/staging/igb_avb/COPYING @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/drivers/staging/igb_avb/Kconfig b/drivers/staging/igb_avb/Kconfig new file mode 100644 index 0000000000000..b432034906ced --- /dev/null +++ b/drivers/staging/igb_avb/Kconfig @@ -0,0 +1,17 @@ +config IGB_AVB + tristate "Avnu IGB AVB driver" + depends on IGB=n && PCI + select DCA + default n + ---help--- + This is the Intel I210 Ethernet driver that lives + at https://github.com/AVnu/OpenAvnu/tree/master/ + kmod/igb. Note that this is different from drivers/ + net/ethernet/intel/igb. It can be used for developing + Audio/Video Bridging applications, Industrial Ethernet + applications which require precise timing control over + frame transmission, or test harnesses for measuring system + latencies and sampling events. It is exclusive with the + in-tree IGB driver, so only one of them can be enabled + at any point in time. There are also coexistance issues with + the e1000 and e1000e. diff --git a/drivers/staging/igb_avb/LICENSE b/drivers/staging/igb_avb/LICENSE new file mode 100644 index 0000000000000..b84d7002e5c4d --- /dev/null +++ b/drivers/staging/igb_avb/LICENSE @@ -0,0 +1,24 @@ + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + diff --git a/drivers/staging/igb_avb/Makefile b/drivers/staging/igb_avb/Makefile new file mode 100644 index 0000000000000..eaae47e157f31 --- /dev/null +++ b/drivers/staging/igb_avb/Makefile @@ -0,0 +1,18 @@ +obj-$(CONFIG_IGB_AVB) += igb_avb.o + +igb_avb-y := igb_main.o \ + e1000_82575.o \ + e1000_i210.o \ + e1000_mac.o \ + e1000_nvm.o e1000_phy.o \ + e1000_manage.o \ + igb_param.o \ + igb_ethtool.o \ + kcompat.o \ + e1000_api.o \ + e1000_mbx.o \ + igb_vmdq.o \ + igb_procfs.o \ + igb_hwmon.o \ + igb_debugfs.o \ + igb_ptp.o diff --git a/drivers/staging/igb_avb/README b/drivers/staging/igb_avb/README new file mode 100644 index 0000000000000..c08ff5d4d18d5 --- /dev/null +++ b/drivers/staging/igb_avb/README @@ -0,0 +1,65 @@ +INTRODUCTION + +This component demonstrates various features of the Intel I210 Ethernet +controller. These features can be used for developing Audio/Video Bridging +applications, Industrial Ethernet applications which require precise timing +control over frame transmission, or test harnesses for measuring system +latencies and sampling events. + +This component - igb_avb - is limited to the Intel I210 Ethernet controller. +The kernel module can be loaded in parallel to existing in-kernel igb modules +which may be used on other supported Intel LAN controllers. Modifications are +required to the in-kernel drivers if the existing in-kernel igb driver has +support for the Intel I210. + +BUILDING + +The kernel igb module should be built which supports the latest Linux kernel +3.x PTP clock support. Unlike the standard igb driver, this version enables +PTP by default (and will fail to build without kernel PTP support enabled). + +RUNNING + +To install the kernel mode driver, you must have root permissions. Typically, +the driver is loaded by removing the currently running igb and running igb_avb: + sudo rmmod igb + sudo modprobe i2c_algo_bit + sudo modprobe dca + sudo modprobe ptp + sudo insmod ./igb_avb.ko + +Another option is to install the igb_avb driver in the "updates" directory +which will override igb for the other drivers claiming the same device ID. This +will allow the coexistence of igb and igb_avb. Copy igb_avb.ko to: + sudo cp igb_avb.ko /lib/modules/`uname -r`/updates/ + sudo depmod -a + modprobe igb_avb + +As the AVB Transmit queues (0,1) are mapped to a user-space application, +typical LAN traffic must be steered away from these queues. The driver +implements one method registering an ndo_select_queue handler to map traffic to +queue[3]. Another possibly faster method uses the the transmit packet steering +(XPS) functionality available since 2.6.35. An example script is below + +#!/bin/bash + +INTERFACE=p2p1 +export INTERFACE + +rmmod igb +rmmod igb_avb +insmod ./igb_avb.ko +sleep 1 +ifconfig $INTERFACE down +echo 0 > /sys/class/net/$INTERFACE/queues/tx-0/xps_cpus +echo 0 > /sys/class/net/$INTERFACE/queues/tx-1/xps_cpus +echo f > /sys/class/net/$INTERFACE/queues/tx-2/xps_cpus +echo f > /sys/class/net/$INTERFACE/queues/tx-3/xps_cpus +ifconfig $INTERFACE up + +You map also want to disable the network manager from 'managing' your +interface. The easiest way is to find the interface configuration scripts on +your distribution. On Fedora 18, these are located at +/etc/sysconfig/network-scripts/ifcfg-. Edit the file to set +'BOOTPROTO=none'. This eliminates DHCP trying to configure the interface while +you may be doing user-space application configuration. diff --git a/drivers/staging/igb_avb/e1000_82575.c b/drivers/staging/igb_avb/e1000_82575.c new file mode 100644 index 0000000000000..2fcc3bf3af396 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_82575.c @@ -0,0 +1,3809 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* + * 82575EB Gigabit Network Connection + * 82575EB Gigabit Backplane Connection + * 82575GB Gigabit Network Connection + * 82576 Gigabit Network Connection + * 82576 Quad Port Gigabit Mezzanine Adapter + * 82580 Gigabit Network Connection + * I350 Gigabit Network Connection + */ + +#include "e1000_api.h" +#include "e1000_i210.h" + +static s32 e1000_init_phy_params_82575(struct e1000_hw *hw); +static s32 e1000_init_mac_params_82575(struct e1000_hw *hw); +static s32 e1000_acquire_phy_82575(struct e1000_hw *hw); +static void e1000_release_phy_82575(struct e1000_hw *hw); +static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw); +static void e1000_release_nvm_82575(struct e1000_hw *hw); +static s32 e1000_check_for_link_82575(struct e1000_hw *hw); +static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw); +static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw); +static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw); +static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 *data); +static s32 e1000_reset_hw_82575(struct e1000_hw *hw); +static s32 e1000_reset_hw_82580(struct e1000_hw *hw); +static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, + u32 offset, u16 *data); +static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, + u32 offset, u16 data); +static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, + bool active); +static s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, + bool active); +static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, + bool active); +static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw); +static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw); +static s32 e1000_get_media_type_82575(struct e1000_hw *hw); +#ifdef I2C_ENABLED +static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw); +#endif +static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data); +static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, + u32 offset, u16 data); +static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw); +static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask); +static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, + u16 *speed, u16 *duplex); +static s32 e1000_get_phy_id_82575(struct e1000_hw *hw); +static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask); +static bool e1000_sgmii_active_82575(struct e1000_hw *hw); +static s32 e1000_reset_init_script_82575(struct e1000_hw *hw); +static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw); +static void e1000_config_collision_dist_82575(struct e1000_hw *hw); +static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw); +static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw); +static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw); +static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw); +static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw); +static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw); +static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw); +static s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, + u16 offset); +static s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, + u16 offset); +static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw); +static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw); +static void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value); +static void e1000_clear_vfta_i350(struct e1000_hw *hw); + +static void e1000_i2c_start(struct e1000_hw *hw); +static void e1000_i2c_stop(struct e1000_hw *hw); +static void e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data); +static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data); +static s32 e1000_get_i2c_ack(struct e1000_hw *hw); +static void e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data); +static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data); +static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl); +static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl); +static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data); +static bool e1000_get_i2c_data(u32 *i2cctl); + +static const u16 e1000_82580_rxpbs_table[] = { + 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; +#define E1000_82580_RXPBS_TABLE_SIZE \ + (sizeof(e1000_82580_rxpbs_table) / \ + sizeof(e1000_82580_rxpbs_table[0])) + +/** + * e1000_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO + * @hw: pointer to the HW structure + * + * Called to determine if the I2C pins are being used for I2C or as an + * external MDIO interface since the two options are mutually exclusive. + **/ +static bool e1000_sgmii_uses_mdio_82575(struct e1000_hw *hw) +{ + u32 reg = 0; + bool ext_mdio = false; + + DEBUGFUNC("e1000_sgmii_uses_mdio_82575"); + + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + reg = E1000_READ_REG(hw, E1000_MDIC); + ext_mdio = !!(reg & E1000_MDIC_DEST); + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + reg = E1000_READ_REG(hw, E1000_MDICNFG); + ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); + break; + default: + break; + } + return ext_mdio; +} + +/** + * e1000_init_phy_params_82575 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_phy_params_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u32 ctrl_ext; + + DEBUGFUNC("e1000_init_phy_params_82575"); + + phy->ops.read_i2c_byte = e1000_read_i2c_byte_generic; + phy->ops.write_i2c_byte = e1000_write_i2c_byte_generic; + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + goto out; + } + + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_82575; + + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + + phy->ops.acquire = e1000_acquire_phy_82575; + phy->ops.check_reset_block = e1000_check_reset_block_generic; + phy->ops.commit = e1000_phy_sw_reset_generic; + phy->ops.get_cfg_done = e1000_get_cfg_done_82575; + phy->ops.release = e1000_release_phy_82575; + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + + if (e1000_sgmii_active_82575(hw)) { + phy->ops.reset = e1000_phy_hw_reset_sgmii_82575; + ctrl_ext |= E1000_CTRL_I2C_ENA; + } else { + phy->ops.reset = e1000_phy_hw_reset_generic; + ctrl_ext &= ~E1000_CTRL_I2C_ENA; + } + + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + e1000_reset_mdicnfg_82580(hw); + + if (e1000_sgmii_active_82575(hw) && !e1000_sgmii_uses_mdio_82575(hw)) { + phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575; + phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575; + } else { + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_i354: + phy->ops.read_reg = e1000_read_phy_reg_82580; + phy->ops.write_reg = e1000_write_phy_reg_82580; + break; + case e1000_i210: + case e1000_i211: + phy->ops.read_reg = e1000_read_phy_reg_gs40g; + phy->ops.write_reg = e1000_write_phy_reg_gs40g; + break; + default: + phy->ops.read_reg = e1000_read_phy_reg_igp; + phy->ops.write_reg = e1000_write_phy_reg_igp; + } + } + + /* Set phy->phy_addr and phy->id. */ + ret_val = e1000_get_phy_id_82575(hw); + + /* Verify phy id and set remaining function pointers */ + switch (phy->id) { + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1340M_E_PHY_ID: + case M88E1111_I_PHY_ID: + phy->type = e1000_phy_m88; + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.get_info = e1000_get_phy_info_m88; + if (phy->id == I347AT4_E_PHY_ID || + phy->id == M88E1112_E_PHY_ID || + phy->id == M88E1340M_E_PHY_ID) + phy->ops.get_cable_length = + e1000_get_cable_length_m88_gen2; + else if (phy->id == M88E1543_E_PHY_ID || + phy->id == M88E1512_E_PHY_ID) + phy->ops.get_cable_length = + e1000_get_cable_length_m88_gen2; + else + phy->ops.get_cable_length = e1000_get_cable_length_m88; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; + /* Check if this PHY is confgured for media swap. */ + if (phy->id == M88E1112_E_PHY_ID) { + u16 data; + + ret_val = phy->ops.write_reg(hw, + E1000_M88E1112_PAGE_ADDR, + 2); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, + E1000_M88E1112_MAC_CTRL_1, + &data); + if (ret_val) + goto out; + + data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >> + E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT; + if (data == E1000_M88E1112_AUTO_COPPER_SGMII || + data == E1000_M88E1112_AUTO_COPPER_BASEX) + hw->mac.ops.check_for_link = + e1000_check_for_link_media_swap; + } + if (phy->id == M88E1512_E_PHY_ID) { + ret_val = e1000_initialize_M88E1512_phy(hw); + if (ret_val) + goto out; + } + break; + case IGP03E1000_E_PHY_ID: + case IGP04E1000_E_PHY_ID: + phy->type = e1000_phy_igp_3; + phy->ops.check_polarity = e1000_check_polarity_igp; + phy->ops.get_info = e1000_get_phy_info_igp; + phy->ops.get_cable_length = e1000_get_cable_length_igp_2; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82575; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic; + break; + case I82580_I_PHY_ID: + case I350_I_PHY_ID: + phy->type = e1000_phy_82580; + phy->ops.check_polarity = e1000_check_polarity_82577; + phy->ops.force_speed_duplex = + e1000_phy_force_speed_duplex_82577; + phy->ops.get_cable_length = e1000_get_cable_length_82577; + phy->ops.get_info = e1000_get_phy_info_82577; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580; + break; + case I210_I_PHY_ID: + phy->type = e1000_phy_i210; + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.get_info = e1000_get_phy_info_m88; + phy->ops.get_cable_length = e1000_get_cable_length_m88_gen2; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_init_nvm_params_82575 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +s32 e1000_init_nvm_params_82575(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u16 size; + + DEBUGFUNC("e1000_init_nvm_params_82575"); + + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + /* + * Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* Just in case size is out of range, cap it to the largest + * EEPROM size supported + */ + if (size > 15) + size = 15; + + nvm->word_size = 1 << size; + if (hw->mac.type < e1000_i210) { + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? + 16 : 8; + break; + } + if (nvm->word_size == (1 << 15)) + nvm->page_size = 128; + + nvm->type = e1000_nvm_eeprom_spi; + } else { + nvm->type = e1000_nvm_flash_hw; + } + + /* Function Pointers */ + nvm->ops.acquire = e1000_acquire_nvm_82575; + nvm->ops.release = e1000_release_nvm_82575; + if (nvm->word_size < (1 << 15)) + nvm->ops.read = e1000_read_nvm_eerd; + else + nvm->ops.read = e1000_read_nvm_spi; + + nvm->ops.write = e1000_write_nvm_spi; + nvm->ops.validate = e1000_validate_nvm_checksum_generic; + nvm->ops.update = e1000_update_nvm_checksum_generic; + nvm->ops.valid_led_default = e1000_valid_led_default_82575; + + /* override generic family function pointers for specific descendants */ + switch (hw->mac.type) { + case e1000_82580: + nvm->ops.validate = e1000_validate_nvm_checksum_82580; + nvm->ops.update = e1000_update_nvm_checksum_82580; + break; + case e1000_i350: + case e1000_i354: + nvm->ops.validate = e1000_validate_nvm_checksum_i350; + nvm->ops.update = e1000_update_nvm_checksum_i350; + break; + default: + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_init_mac_params_82575 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_mac_params_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + + DEBUGFUNC("e1000_init_mac_params_82575"); + + /* Derives media type */ + e1000_get_media_type_82575(hw); + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set uta register count */ + mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES_82575; + if (mac->type == e1000_82576) + mac->rar_entry_count = E1000_RAR_ENTRIES_82576; + if (mac->type == e1000_82580) + mac->rar_entry_count = E1000_RAR_ENTRIES_82580; + if (mac->type == e1000_i350 || mac->type == e1000_i354) + mac->rar_entry_count = E1000_RAR_ENTRIES_I350; + + /* Enable EEE default settings for EEE supported devices */ + if (mac->type >= e1000_i350) + dev_spec->eee_disable = false; + + /* Allow a single clear of the SW semaphore on I210 and newer */ + if (mac->type >= e1000_i210) + dev_spec->clear_semaphore_once = true; + + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = true; + /* FWSM register */ + mac->has_fwsm = true; + /* ARC supported; valid only if manageability features are enabled. */ + mac->arc_subsystem_valid = + !!(E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK); + + /* Function pointers */ + + /* bus type/speed/width */ + mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic; + /* reset */ + if (mac->type >= e1000_82580) + mac->ops.reset_hw = e1000_reset_hw_82580; + else + mac->ops.reset_hw = e1000_reset_hw_82575; + /* hw initialization */ + if ((mac->type == e1000_i210) || (mac->type == e1000_i211)) + mac->ops.init_hw = e1000_init_hw_i210; + else + mac->ops.init_hw = e1000_init_hw_82575; + /* link setup */ + mac->ops.setup_link = e1000_setup_link_generic; + /* physical interface link setup */ + mac->ops.setup_physical_interface = + (hw->phy.media_type == e1000_media_type_copper) + ? e1000_setup_copper_link_82575 : e1000_setup_serdes_link_82575; + /* physical interface shutdown */ + mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575; + /* physical interface power up */ + mac->ops.power_up_serdes = e1000_power_up_serdes_link_82575; + /* check for link */ + mac->ops.check_for_link = e1000_check_for_link_82575; + /* read mac address */ + mac->ops.read_mac_addr = e1000_read_mac_addr_82575; + /* configure collision distance */ + mac->ops.config_collision_dist = e1000_config_collision_dist_82575; + /* multicast address update */ + mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; + if (hw->mac.type == e1000_i350 || mac->type == e1000_i354) { + /* writing VFTA */ + mac->ops.write_vfta = e1000_write_vfta_i350; + /* clearing VFTA */ + mac->ops.clear_vfta = e1000_clear_vfta_i350; + } else { + /* writing VFTA */ + mac->ops.write_vfta = e1000_write_vfta_generic; + /* clearing VFTA */ + mac->ops.clear_vfta = e1000_clear_vfta_generic; + } + if (hw->mac.type >= e1000_82580) + mac->ops.validate_mdi_setting = + e1000_validate_mdi_setting_crossover_generic; + /* ID LED init */ + mac->ops.id_led_init = e1000_id_led_init_generic; + /* blink LED */ + mac->ops.blink_led = e1000_blink_led_generic; + /* setup LED */ + mac->ops.setup_led = e1000_setup_led_generic; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_generic; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_generic; + mac->ops.led_off = e1000_led_off_generic; + /* clear hardware counters */ + mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575; + /* link info */ + mac->ops.get_link_up_info = e1000_get_link_up_info_82575; + /* get thermal sensor data */ + mac->ops.get_thermal_sensor_data = + e1000_get_thermal_sensor_data_generic; + mac->ops.init_thermal_sensor_thresh = + e1000_init_thermal_sensor_thresh_generic; + /* acquire SW_FW sync */ + mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_82575; + mac->ops.release_swfw_sync = e1000_release_swfw_sync_82575; + if (mac->type >= e1000_i210) { + mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_i210; + mac->ops.release_swfw_sync = e1000_release_swfw_sync_i210; + } + + /* set lan id for port to determine which phy lock to use */ + hw->mac.ops.set_lan_id(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_init_function_pointers_82575 - Init func ptrs. + * @hw: pointer to the HW structure + * + * Called to initialize all function pointers and parameters. + **/ +void e1000_init_function_pointers_82575(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_82575"); + + hw->mac.ops.init_params = e1000_init_mac_params_82575; + hw->nvm.ops.init_params = e1000_init_nvm_params_82575; + hw->phy.ops.init_params = e1000_init_phy_params_82575; + hw->mbx.ops.init_params = e1000_init_mbx_params_pf; +} + +/** + * e1000_acquire_phy_82575 - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * Acquire access rights to the correct PHY. + **/ +static s32 e1000_acquire_phy_82575(struct e1000_hw *hw) +{ + u16 mask = E1000_SWFW_PHY0_SM; + + DEBUGFUNC("e1000_acquire_phy_82575"); + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_SWFW_PHY1_SM; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_SWFW_PHY2_SM; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_SWFW_PHY3_SM; + + return hw->mac.ops.acquire_swfw_sync(hw, mask); +} + +/** + * e1000_release_phy_82575 - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. + **/ +static void e1000_release_phy_82575(struct e1000_hw *hw) +{ + u16 mask = E1000_SWFW_PHY0_SM; + + DEBUGFUNC("e1000_release_phy_82575"); + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_SWFW_PHY1_SM; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_SWFW_PHY2_SM; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_SWFW_PHY3_SM; + + hw->mac.ops.release_swfw_sync(hw, mask); +} + +/** + * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the serial gigabit media independent + * interface and stores the retrieved information in data. + **/ +static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + s32 ret_val = -E1000_ERR_PARAM; + + DEBUGFUNC("e1000_read_phy_reg_sgmii_82575"); + + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { + DEBUGOUT1("PHY Address %u is out of range\n", offset); + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = e1000_read_phy_reg_i2c(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset using the serial gigabit + * media independent interface. + **/ +static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 data) +{ + s32 ret_val = -E1000_ERR_PARAM; + + DEBUGFUNC("e1000_write_phy_reg_sgmii_82575"); + + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = e1000_write_phy_reg_i2c(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000_get_phy_id_82575 - Retrieve PHY addr and id + * @hw: pointer to the HW structure + * + * Retrieves the PHY address and ID for both PHY's which do and do not use + * sgmi interface. + **/ +static s32 e1000_get_phy_id_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 phy_id; + u32 ctrl_ext; + u32 mdic; + + DEBUGFUNC("e1000_get_phy_id_82575"); + + /* some i354 devices need an extra read for phy id */ + if (hw->mac.type == e1000_i354) + e1000_get_phy_id(hw); + + /* + * For SGMII PHYs, we try the list of possible addresses until + * we find one that works. For non-SGMII PHYs + * (e.g. integrated copper PHYs), an address of 1 should + * work. The result of this function should mean phy->phy_addr + * and phy->id are set correctly. + */ + if (!e1000_sgmii_active_82575(hw)) { + phy->addr = 1; + ret_val = e1000_get_phy_id(hw); + goto out; + } + + if (e1000_sgmii_uses_mdio_82575(hw)) { + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + mdic = E1000_READ_REG(hw, E1000_MDIC); + mdic &= E1000_MDIC_PHY_MASK; + phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + mdic = E1000_READ_REG(hw, E1000_MDICNFG); + mdic &= E1000_MDICNFG_PHY_MASK; + phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + break; + } + ret_val = e1000_get_phy_id(hw); + goto out; + } + + /* Power on sgmii phy if it is disabled */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); + E1000_WRITE_FLUSH(hw); + msec_delay(300); + + /* + * The address field in the I2CCMD register is 3 bits and 0 is invalid. + * Therefore, we need to test 1-7 + */ + for (phy->addr = 1; phy->addr < 8; phy->addr++) { + ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); + if (ret_val == E1000_SUCCESS) { + DEBUGOUT2("Vendor ID 0x%08X read at address %u\n", + phy_id, phy->addr); + /* + * At the time of this writing, The M88 part is + * the only supported SGMII PHY product. + */ + if (phy_id == M88_VENDOR) + break; + } else { + DEBUGOUT1("PHY address %u was unreadable\n", + phy->addr); + } + } + + /* A valid PHY type couldn't be found. */ + if (phy->addr == 8) { + phy->addr = 0; + ret_val = -E1000_ERR_PHY; + } else { + ret_val = e1000_get_phy_id(hw); + } + + /* restore previous sfp cage power state */ + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + +out: + return ret_val; +} + +/** + * e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset + * @hw: pointer to the HW structure + * + * Resets the PHY using the serial gigabit media independent interface. + **/ +static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + struct e1000_phy_info *phy = &hw->phy; + + DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575"); + + /* + * This isn't a true "hard" reset, but is the only reset + * available to us at this time. + */ + + DEBUGOUT("Soft resetting SGMII attached PHY...\n"); + + if (!(hw->phy.ops.write_reg)) + goto out; + + /* + * SFP documentation requires the following to configure the SPF module + * to work on SGMII. No further documentation is given. + */ + ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); + if (ret_val) + goto out; + + ret_val = hw->phy.ops.commit(hw); + if (ret_val) + goto out; + + if (phy->id == M88E1512_E_PHY_ID) + ret_val = e1000_initialize_M88E1512_phy(hw); +out: + return ret_val; +} + +/** + * e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 data; + + DEBUGFUNC("e1000_set_d0_lplu_state_82575"); + + if (!(hw->phy.ops.read_reg)) + goto out; + + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + goto out; + + if (active) { + data |= IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else { + data &= ~IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } + } + +out: + return ret_val; +} + +/** + * e1000_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 data; + + DEBUGFUNC("e1000_set_d0_lplu_state_82580"); + + data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + + if (active) { + data |= E1000_82580_PM_D0_LPLU; + + /* When LPLU is enabled, we should disable SmartSpeed */ + data &= ~E1000_82580_PM_SPD; + } else { + data &= ~E1000_82580_PM_D0_LPLU; + + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) + data |= E1000_82580_PM_SPD; + else if (phy->smart_speed == e1000_smart_speed_off) + data &= ~E1000_82580_PM_SPD; + } + + E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data); + return E1000_SUCCESS; +} + +/** + * e1000_set_d3_lplu_state_82580 - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 data; + + DEBUGFUNC("e1000_set_d3_lplu_state_82580"); + + data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + + if (!active) { + data &= ~E1000_82580_PM_D3_LPLU; + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) + data |= E1000_82580_PM_SPD; + else if (phy->smart_speed == e1000_smart_speed_off) + data &= ~E1000_82580_PM_SPD; + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= E1000_82580_PM_D3_LPLU; + /* When LPLU is enabled, we should disable SmartSpeed */ + data &= ~E1000_82580_PM_SPD; + } + + E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data); + return E1000_SUCCESS; +} + +/** + * e1000_acquire_nvm_82575 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_acquire_nvm_82575"); + + ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); + if (ret_val) + goto out; + + /* + * Check if there is some access + * error this access may hook on + */ + if (hw->mac.type == e1000_i350) { + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + if (eecd & (E1000_EECD_BLOCKED | E1000_EECD_ABORT | + E1000_EECD_TIMEOUT)) { + /* Clear all access error flags */ + E1000_WRITE_REG(hw, E1000_EECD, eecd | + E1000_EECD_ERROR_CLR); + DEBUGOUT("Nvm bit banging access error detected and cleared.\n"); + } + } + + if (hw->mac.type == e1000_82580) { + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + if (eecd & E1000_EECD_BLOCKED) { + /* Clear access error flag */ + E1000_WRITE_REG(hw, E1000_EECD, eecd | + E1000_EECD_BLOCKED); + DEBUGOUT("Nvm bit banging access error detected and cleared.\n"); + } + } + + ret_val = e1000_acquire_nvm_generic(hw); + if (ret_val) + e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); + +out: + return ret_val; +} + +/** + * e1000_release_nvm_82575 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + **/ +static void e1000_release_nvm_82575(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_release_nvm_82575"); + + e1000_release_nvm_generic(hw); + + e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); +} + +/** + * e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 ret_val = E1000_SUCCESS; + s32 i = 0, timeout = 200; + + DEBUGFUNC("e1000_acquire_swfw_sync_82575"); + + while (i < timeout) { + if (e1000_get_hw_semaphore_generic(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* + * Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) + */ + e1000_put_hw_semaphore_generic(hw); + msec_delay_irq(5); + i++; + } + + if (i == timeout) { + DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + + e1000_put_hw_semaphore_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_release_swfw_sync_82575 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + DEBUGFUNC("e1000_release_swfw_sync_82575"); + + while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS) + ; /* Empty */ + + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + swfw_sync &= ~mask; + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + + e1000_put_hw_semaphore_generic(hw); +} + +/** + * e1000_get_cfg_done_82575 - Read config done bit + * @hw: pointer to the HW structure + * + * Read the management control register for the config done bit for + * completion status. NOTE: silicon which is EEPROM-less will fail trying + * to read the config done bit, so an error is *ONLY* logged and returns + * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon + * would not be able to be reset or change link. + **/ +static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + DEBUGFUNC("e1000_get_cfg_done_82575"); + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_NVM_CFG_DONE_PORT_1; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_NVM_CFG_DONE_PORT_2; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_NVM_CFG_DONE_PORT_3; + while (timeout) { + if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask) + break; + msec_delay(1); + timeout--; + } + if (!timeout) + DEBUGOUT("MNG configuration cycle has not completed.\n"); + + /* If EEPROM is not marked present, init the PHY manually */ + if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) && + (hw->phy.type == e1000_phy_igp_3)) + e1000_phy_init_script_igp3(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_get_link_up_info_82575 - Get link speed/duplex info + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * This is a wrapper function, if using the serial gigabit media independent + * interface, use PCS to retrieve the link speed and duplex information. + * Otherwise, use the generic function to get the link speed and duplex info. + **/ +static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + DEBUGFUNC("e1000_get_link_up_info_82575"); + + if (hw->phy.media_type != e1000_media_type_copper) + ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed, + duplex); + else + ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, + duplex); + + return ret_val; +} + +/** + * e1000_check_for_link_82575 - Check for link + * @hw: pointer to the HW structure + * + * If sgmii is enabled, then use the pcs register to determine link, otherwise + * use the generic interface for determining link. + **/ +static s32 e1000_check_for_link_82575(struct e1000_hw *hw) +{ + s32 ret_val; + u16 speed, duplex; + + DEBUGFUNC("e1000_check_for_link_82575"); + + if (hw->phy.media_type != e1000_media_type_copper) { + ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed, + &duplex); + /* + * Use this flag to determine if link needs to be checked or + * not. If we have link clear the flag so that we do not + * continue to check for link. + */ + hw->mac.get_link_status = !hw->mac.serdes_has_link; + + /* + * Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) + DEBUGOUT("Error configuring flow control\n"); + } else { + ret_val = e1000_check_for_copper_link_generic(hw); + } + + return ret_val; +} + +/** + * e1000_check_for_link_media_swap - Check which M88E1112 interface linked + * @hw: pointer to the HW structure + * + * Poll the M88E1112 interfaces to see which interface achieved link. + */ +static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + u8 port = 0; + + DEBUGFUNC("e1000_check_for_link_media_swap"); + + /* Check for copper. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); + if (ret_val) + return ret_val; + + if (data & E1000_M88E1112_STATUS_LINK) + port = E1000_MEDIA_PORT_COPPER; + + /* Check for other. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); + if (ret_val) + return ret_val; + + if (data & E1000_M88E1112_STATUS_LINK) + port = E1000_MEDIA_PORT_OTHER; + + /* Determine if a swap needs to happen. */ + if (port && (hw->dev_spec._82575.media_port != port)) { + hw->dev_spec._82575.media_port = port; + hw->dev_spec._82575.media_changed = true; + } + + if (port == E1000_MEDIA_PORT_COPPER) { + /* reset page to 0 */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + e1000_check_for_link_82575(hw); + } else { + e1000_check_for_link_82575(hw); + /* reset page to 0 */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_power_up_serdes_link_82575 - Power up the serdes link after shutdown + * @hw: pointer to the HW structure + **/ +static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw) +{ + u32 reg; + + DEBUGFUNC("e1000_power_up_serdes_link_82575"); + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !e1000_sgmii_active_82575(hw)) + return; + + /* Enable PCS to turn on link */ + reg = E1000_READ_REG(hw, E1000_PCS_CFG0); + reg |= E1000_PCS_CFG_PCS_EN; + E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); + + /* Power up the laser */ + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg &= ~E1000_CTRL_EXT_SDP3_DATA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ + E1000_WRITE_FLUSH(hw); + msec_delay(1); +} + +/** + * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Using the physical coding sub-layer (PCS), retrieve the current speed and + * duplex, then store the values in the pointers provided. + **/ +static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, + u16 *speed, u16 *duplex) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 pcs; + u32 status; + + DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575"); + + /* + * Read the PCS Status register for link state. For non-copper mode, + * the status register is not accurate. The PCS status register is + * used instead. + */ + pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT); + + /* + * The link up bit determines when link is up on autoneg. + */ + if (pcs & E1000_PCS_LSTS_LINK_OK) { + mac->serdes_has_link = true; + + /* Detect and store PCS speed */ + if (pcs & E1000_PCS_LSTS_SPEED_1000) + *speed = SPEED_1000; + else if (pcs & E1000_PCS_LSTS_SPEED_100) + *speed = SPEED_100; + else + *speed = SPEED_10; + + /* Detect and store PCS duplex */ + if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + /* Check if it is an I354 2.5Gb backplane connection. */ + if (mac->type == e1000_i354) { + status = E1000_READ_REG(hw, E1000_STATUS); + if ((status & E1000_STATUS_2P5_SKU) && + !(status & E1000_STATUS_2P5_SKU_OVER)) { + *speed = SPEED_2500; + *duplex = FULL_DUPLEX; + DEBUGOUT("2500 Mbs, "); + DEBUGOUT("Full Duplex\n"); + } + } + + } else { + mac->serdes_has_link = false; + *speed = 0; + *duplex = 0; + } + + return E1000_SUCCESS; +} + +/** + * e1000_shutdown_serdes_link_82575 - Remove link during power down + * @hw: pointer to the HW structure + * + * In the case of serdes shut down sfp and PCS on driver unload + * when management pass thru is not enabled. + **/ +void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw) +{ + u32 reg; + + DEBUGFUNC("e1000_shutdown_serdes_link_82575"); + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !e1000_sgmii_active_82575(hw)) + return; + + if (!e1000_enable_mng_pass_thru(hw)) { + /* Disable PCS to turn off link */ + reg = E1000_READ_REG(hw, E1000_PCS_CFG0); + reg &= ~E1000_PCS_CFG_PCS_EN; + E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); + + /* shutdown the laser */ + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg |= E1000_CTRL_EXT_SDP3_DATA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ + E1000_WRITE_FLUSH(hw); + msec_delay(1); + } + + return; +} + +/** + * e1000_reset_hw_82575 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. + **/ +static s32 e1000_reset_hw_82575(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + DEBUGFUNC("e1000_reset_hw_82575"); + + /* + * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000_disable_pcie_master_generic(hw); + if (ret_val) + DEBUGOUT("PCI-E Master disable polling has failed.\n"); + + /* set the completion timeout for interface */ + ret_val = e1000_set_pcie_completion_timeout(hw); + if (ret_val) + DEBUGOUT("PCI-E Set completion timeout has failed.\n"); + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + msec_delay(10); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGOUT("Issuing a global reset to MAC\n"); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + + ret_val = e1000_get_auto_rd_done_generic(hw); + if (ret_val) { + /* + * When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + DEBUGOUT("Auto Read Done did not complete\n"); + } + + /* If EEPROM is not present, run manual init scripts */ + if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES)) + e1000_reset_init_script_82575(hw); + + /* Clear any pending interrupt events. */ + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_READ_REG(hw, E1000_ICR); + + /* Install any alternate MAC address into RAR0 */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + + return ret_val; +} + +/** + * e1000_init_hw_82575 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +s32 e1000_init_hw_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + u16 i, rar_count = mac->rar_entry_count; + + DEBUGFUNC("e1000_init_hw_82575"); + + /* Initialize identification LED */ + ret_val = mac->ops.id_led_init(hw); + if (ret_val) { + DEBUGOUT("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + } + + /* Disabling VLAN filtering */ + DEBUGOUT("Initializing the IEEE VLAN\n"); + mac->ops.clear_vfta(hw); + + /* Setup the receive address */ + e1000_init_rx_addrs_generic(hw, rar_count); + + /* Zero out the Multicast HASH table */ + DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Zero out the Unicast HASH table */ + DEBUGOUT("Zeroing the UTA\n"); + for (i = 0; i < mac->uta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0); + + /* Setup link and flow control */ + ret_val = mac->ops.setup_link(hw); + + /* Set the default MTU size */ + hw->dev_spec._82575.mtu = 1500; + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_82575(hw); + + return ret_val; +} + +/** + * e1000_setup_copper_link_82575 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + **/ +static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u32 phpm_reg; + + DEBUGFUNC("e1000_setup_copper_link_82575"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Clear Go Link Disconnect bit on supported devices */ + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_i210: + case e1000_i211: + phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + phpm_reg &= ~E1000_82580_PM_GO_LINKD; + E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg); + break; + default: + break; + } + + ret_val = e1000_setup_serdes_link_82575(hw); + if (ret_val) + goto out; + + if (e1000_sgmii_active_82575(hw) && !hw->phy.reset_disable) { + /* allow time for SFP cage time to power up phy */ + msec_delay(300); + + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + DEBUGOUT("Error resetting the PHY.\n"); + goto out; + } + } + switch (hw->phy.type) { + case e1000_phy_i210: + case e1000_phy_m88: + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1340M_E_PHY_ID: + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I210_I_PHY_ID: + ret_val = e1000_copper_link_setup_m88_gen2(hw); + break; + default: + ret_val = e1000_copper_link_setup_m88(hw); + break; + } + break; + case e1000_phy_igp_3: + ret_val = e1000_copper_link_setup_igp(hw); + break; + case e1000_phy_82580: + ret_val = e1000_copper_link_setup_82577(hw); + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + + if (ret_val) + goto out; + + ret_val = e1000_setup_copper_link_generic(hw); +out: + return ret_val; +} + +/** + * e1000_setup_serdes_link_82575 - Setup link for serdes + * @hw: pointer to the HW structure + * + * Configure the physical coding sub-layer (PCS) link. The PCS link is + * used on copper connections where the serialized gigabit media independent + * interface (sgmii), or serdes fiber is being used. Configures the link + * for auto-negotiation or forces speed/duplex. + **/ +static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw) +{ + u32 ctrl_ext, ctrl_reg, reg, anadv_reg; + bool pcs_autoneg; + s32 ret_val = E1000_SUCCESS; + u16 data; + + DEBUGFUNC("e1000_setup_serdes_link_82575"); + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !e1000_sgmii_active_82575(hw)) + return ret_val; + + /* + * On the 82575, SerDes loopback mode persists until it is + * explicitly turned off or a power cycle is performed. A read to + * the register does not indicate its status. Therefore, we ensure + * loopback mode is disabled during initialization. + */ + E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); + + /* power on the sfp cage if present */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + + ctrl_reg = E1000_READ_REG(hw, E1000_CTRL); + ctrl_reg |= E1000_CTRL_SLU; + + /* set both sw defined pins on 82575/82576*/ + if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) + ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; + + reg = E1000_READ_REG(hw, E1000_PCS_LCTL); + + /* default pcs_autoneg to the same setting as mac autoneg */ + pcs_autoneg = hw->mac.autoneg; + + switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { + case E1000_CTRL_EXT_LINK_MODE_SGMII: + /* sgmii mode lets the phy handle forcing speed/duplex */ + pcs_autoneg = true; + /* autoneg time out should be disabled for SGMII mode */ + reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); + break; + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + /* disable PCS autoneg and support parallel detect only */ + pcs_autoneg = false; + /* fall through to default case */ + default: + if (hw->mac.type == e1000_82575 || + hw->mac.type == e1000_82576) { + ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT) + pcs_autoneg = false; + } + + /* + * non-SGMII modes only supports a speed of 1000/Full for the + * link so it is best to just force the MAC and let the pcs + * link either autoneg or be forced to 1000/Full + */ + ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | + E1000_CTRL_FD | E1000_CTRL_FRCDPX; + + /* set speed of 1000/Full if speed/duplex is forced */ + reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; + break; + } + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg); + + /* + * New SerDes mode allows for forcing speed or autonegotiating speed + * at 1gb. Autoneg should be default set by most drivers. This is the + * mode that will be compatible with older link partners and switches. + * However, both are supported by the hardware and some drivers/tools. + */ + reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | + E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); + + if (pcs_autoneg) { + /* Set PCS register for autoneg */ + reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ + E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ + + /* Disable force flow control for autoneg */ + reg &= ~E1000_PCS_LCTL_FORCE_FCTRL; + + /* Configure flow control advertisement for autoneg */ + anadv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV); + anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE); + + switch (hw->fc.requested_mode) { + case e1000_fc_full: + case e1000_fc_rx_pause: + anadv_reg |= E1000_TXCW_ASM_DIR; + anadv_reg |= E1000_TXCW_PAUSE; + break; + case e1000_fc_tx_pause: + anadv_reg |= E1000_TXCW_ASM_DIR; + break; + default: + break; + } + + E1000_WRITE_REG(hw, E1000_PCS_ANADV, anadv_reg); + + DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); + } else { + /* Set PCS register for forced link */ + reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ + + /* Force flow control for forced link */ + reg |= E1000_PCS_LCTL_FORCE_FCTRL; + + DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); + } + + E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg); + + if (!pcs_autoneg && !e1000_sgmii_active_82575(hw)) + e1000_force_mac_fc_generic(hw); + + return ret_val; +} + +/** + * e1000_get_media_type_82575 - derives current media type. + * @hw: pointer to the HW structure + * + * The media type is chosen reflecting few settings. + * The following are taken into account: + * - link mode set in the current port Init Control Word #3 + * - current link mode settings in CSR register + * - MDIO vs. I2C PHY control interface chosen + * - SFP module media type + **/ +static s32 e1000_get_media_type_82575(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + s32 ret_val = E1000_SUCCESS; + u32 ctrl_ext = 0; + u32 link_mode = 0; + + /* Set internal phy as default */ + dev_spec->sgmii_active = false; + dev_spec->module_plugged = false; + + /* Get CSR setting */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + + /* extract link mode setting */ + link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK; + + switch (link_mode) { + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + hw->phy.media_type = e1000_media_type_internal_serdes; + break; + case E1000_CTRL_EXT_LINK_MODE_GMII: + hw->phy.media_type = e1000_media_type_copper; + break; + case E1000_CTRL_EXT_LINK_MODE_SGMII: + /* Get phy control interface type set (MDIO vs. I2C)*/ + if (e1000_sgmii_uses_mdio_82575(hw)) { + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = true; + break; + } + /* fall through for I2C based SGMII */ + case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: + /* read media type from SFP EEPROM */ +#ifdef I2C_ENABLED + printk(KERN_INFO "igb_avb I2C enabled - set_sfp_media_type_82575() called"); + ret_val = e1000_set_sfp_media_type_82575(hw); +#else + printk(KERN_INFO "igb_avb I2C disabled - set_sfp_media_type_82575() not necessary"); + hw->phy.media_type = e1000_media_type_unknown; +#endif + if ((ret_val != E1000_SUCCESS) || + (hw->phy.media_type == e1000_media_type_unknown)) { + /* + * If media type was not identified then return media + * type defined by the CTRL_EXT settings. + */ + hw->phy.media_type = e1000_media_type_internal_serdes; + + if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) { + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = true; + } + + break; + } + + /* do not change link mode for 100BaseFX */ + if (dev_spec->eth_flags.e100_base_fx) + break; + + /* change current link mode setting */ + ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; + + if (hw->phy.media_type == e1000_media_type_copper) + ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; + else + ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + + break; + } + + return ret_val; +} + +/** + * e1000_set_sfp_media_type_82575 - derives SFP module media type. + * @hw: pointer to the HW structure + * + * The media type is chosen based on SFP module. + * compatibility flags retrieved from SFP ID EEPROM. + **/ +#ifdef I2C_ENABLED +static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw) +{ + s32 ret_val = E1000_ERR_CONFIG; + u32 ctrl_ext = 0; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + struct sfp_e1000_flags *eth_flags = &dev_spec->eth_flags; + u8 tranceiver_type = 0; + s32 timeout = 3; + + /* Turn I2C interface ON and power on sfp cage */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA); + + E1000_WRITE_FLUSH(hw); + + /* Read SFP module data */ + while (timeout) { + ret_val = e1000_read_sfp_data_byte(hw, + E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET), + &tranceiver_type); + if (ret_val == E1000_SUCCESS) + break; + msec_delay(100); + timeout--; + } + if (ret_val != E1000_SUCCESS) + goto out; + + ret_val = e1000_read_sfp_data_byte(hw, + E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET), + (u8 *)eth_flags); + if (ret_val != E1000_SUCCESS) + goto out; + + /* Check if there is some SFP module plugged and powered */ + if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) || + (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) { + dev_spec->module_plugged = true; + if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { + hw->phy.media_type = e1000_media_type_internal_serdes; + } else if (eth_flags->e100_base_fx) { + dev_spec->sgmii_active = true; + hw->phy.media_type = e1000_media_type_internal_serdes; + } else if (eth_flags->e1000_base_t) { + dev_spec->sgmii_active = true; + hw->phy.media_type = e1000_media_type_copper; + } else { + hw->phy.media_type = e1000_media_type_unknown; + DEBUGOUT("PHY module has not been recognized\n"); + goto out; + } + } else { + hw->phy.media_type = e1000_media_type_unknown; + } + ret_val = E1000_SUCCESS; +out: + /* Restore I2C interface setting */ + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + return ret_val; +} +#endif +/** + * e1000_valid_led_default_82575 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_valid_led_default_82575"); + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { + switch (hw->phy.media_type) { + case e1000_media_type_internal_serdes: + *data = ID_LED_DEFAULT_82575_SERDES; + break; + case e1000_media_type_copper: + default: + *data = ID_LED_DEFAULT; + break; + } + } +out: + return ret_val; +} + +/** + * e1000_sgmii_active_82575 - Return sgmii state + * @hw: pointer to the HW structure + * + * 82575 silicon has a serialized gigabit media independent interface (sgmii) + * which can be enabled for use in the embedded applications. Simply + * return the current state of the sgmii interface. + **/ +static bool e1000_sgmii_active_82575(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + return dev_spec->sgmii_active; +} + +/** + * e1000_reset_init_script_82575 - Inits HW defaults after reset + * @hw: pointer to the HW structure + * + * Inits recommended HW defaults after a reset when there is no EEPROM + * detected. This is only for the 82575. + **/ +static s32 e1000_reset_init_script_82575(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_reset_init_script_82575"); + + if (hw->mac.type == e1000_82575) { + DEBUGOUT("Running reset init script for 82575\n"); + /* SerDes configuration via SERDESCTRL */ + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x00, 0x0C); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x01, 0x78); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x1B, 0x23); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x23, 0x15); + + /* CCM configuration via CCMCTL register */ + e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x14, 0x00); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x10, 0x00); + + /* PCIe lanes configuration */ + e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x00, 0xEC); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x61, 0xDF); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x34, 0x05); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x2F, 0x81); + + /* PCIe PLL Configuration */ + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x02, 0x47); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x14, 0x00); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x10, 0x00); + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_mac_addr_82575 - Read device MAC address + * @hw: pointer to the HW structure + **/ +static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_read_mac_addr_82575"); + + /* + * If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + goto out; + + ret_val = e1000_read_mac_addr_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_config_collision_dist_82575 - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. + **/ +static void e1000_config_collision_dist_82575(struct e1000_hw *hw) +{ + u32 tctl_ext; + + DEBUGFUNC("e1000_config_collision_dist_82575"); + + tctl_ext = E1000_READ_REG(hw, E1000_TCTL_EXT); + + tctl_ext &= ~E1000_TCTL_EXT_COLD; + tctl_ext |= E1000_COLLISION_DISTANCE << E1000_TCTL_EXT_COLD_SHIFT; + + E1000_WRITE_REG(hw, E1000_TCTL_EXT, tctl_ext); + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_power_down_phy_copper_82575 - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + + if (!(phy->ops.check_reset_block)) + return; + + /* If the management interface is not enabled, then power down */ + if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); + + return; +} + +/** + * e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_clear_hw_cntrs_82575"); + + e1000_clear_hw_cntrs_base_generic(hw); + + E1000_READ_REG(hw, E1000_PRC64); + E1000_READ_REG(hw, E1000_PRC127); + E1000_READ_REG(hw, E1000_PRC255); + E1000_READ_REG(hw, E1000_PRC511); + E1000_READ_REG(hw, E1000_PRC1023); + E1000_READ_REG(hw, E1000_PRC1522); + E1000_READ_REG(hw, E1000_PTC64); + E1000_READ_REG(hw, E1000_PTC127); + E1000_READ_REG(hw, E1000_PTC255); + E1000_READ_REG(hw, E1000_PTC511); + E1000_READ_REG(hw, E1000_PTC1023); + E1000_READ_REG(hw, E1000_PTC1522); + + E1000_READ_REG(hw, E1000_ALGNERRC); + E1000_READ_REG(hw, E1000_RXERRC); + E1000_READ_REG(hw, E1000_TNCRS); + E1000_READ_REG(hw, E1000_CEXTERR); + E1000_READ_REG(hw, E1000_TSCTC); + E1000_READ_REG(hw, E1000_TSCTFC); + + E1000_READ_REG(hw, E1000_MGTPRC); + E1000_READ_REG(hw, E1000_MGTPDC); + E1000_READ_REG(hw, E1000_MGTPTC); + + E1000_READ_REG(hw, E1000_IAC); + E1000_READ_REG(hw, E1000_ICRXOC); + + E1000_READ_REG(hw, E1000_ICRXPTC); + E1000_READ_REG(hw, E1000_ICRXATC); + E1000_READ_REG(hw, E1000_ICTXPTC); + E1000_READ_REG(hw, E1000_ICTXATC); + E1000_READ_REG(hw, E1000_ICTXQEC); + E1000_READ_REG(hw, E1000_ICTXQMTC); + E1000_READ_REG(hw, E1000_ICRXDMTC); + + E1000_READ_REG(hw, E1000_CBTMPC); + E1000_READ_REG(hw, E1000_HTDPMC); + E1000_READ_REG(hw, E1000_CBRMPC); + E1000_READ_REG(hw, E1000_RPTHC); + E1000_READ_REG(hw, E1000_HGPTC); + E1000_READ_REG(hw, E1000_HTCBDPC); + E1000_READ_REG(hw, E1000_HGORCL); + E1000_READ_REG(hw, E1000_HGORCH); + E1000_READ_REG(hw, E1000_HGOTCL); + E1000_READ_REG(hw, E1000_HGOTCH); + E1000_READ_REG(hw, E1000_LENERRS); + + /* This register should not be read in copper configurations */ + if ((hw->phy.media_type == e1000_media_type_internal_serdes) || + e1000_sgmii_active_82575(hw)) + E1000_READ_REG(hw, E1000_SCVPC); +} + +/** + * e1000_rx_fifo_flush_82575 - Clean rx fifo after Rx enable + * @hw: pointer to the HW structure + * + * After Rx enable, if manageability is enabled then there is likely some + * bad data at the start of the fifo and possibly in the DMA fifo. This + * function clears the fifos and flushes any packets that came in as rx was + * being enabled. + **/ +void e1000_rx_fifo_flush_82575(struct e1000_hw *hw) +{ + u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; + int i, ms_wait; + + DEBUGFUNC("e1000_rx_fifo_flush_82575"); + + /* disable IPv6 options as per hardware errata */ + rfctl = E1000_READ_REG(hw, E1000_RFCTL); + rfctl |= E1000_RFCTL_IPV6_EX_DIS; + E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); + + if (hw->mac.type != e1000_82575 || + !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN)) + return; + + /* Disable all Rx queues */ + for (i = 0; i < 4; i++) { + rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i)); + E1000_WRITE_REG(hw, E1000_RXDCTL(i), + rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); + } + /* Poll all queues to verify they have shut down */ + for (ms_wait = 0; ms_wait < 10; ms_wait++) { + msec_delay(1); + rx_enabled = 0; + for (i = 0; i < 4; i++) + rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i)); + if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) + break; + } + + if (ms_wait == 10) + DEBUGOUT("Queue disable timed out after 10ms\n"); + + /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all + * incoming packets are rejected. Set enable and wait 2ms so that + * any packet that was coming in as RCTL.EN was set is flushed + */ + E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); + + rlpml = E1000_READ_REG(hw, E1000_RLPML); + E1000_WRITE_REG(hw, E1000_RLPML, 0); + + rctl = E1000_READ_REG(hw, E1000_RCTL); + temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); + temp_rctl |= E1000_RCTL_LPE; + + E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl); + E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN); + E1000_WRITE_FLUSH(hw); + msec_delay(2); + + /* Enable Rx queues that were previously enabled and restore our + * previous state + */ + for (i = 0; i < 4; i++) + E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + E1000_WRITE_FLUSH(hw); + + E1000_WRITE_REG(hw, E1000_RLPML, rlpml); + E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); + + /* Flush receive errors generated by workaround */ + E1000_READ_REG(hw, E1000_ROC); + E1000_READ_REG(hw, E1000_RNBC); + E1000_READ_REG(hw, E1000_MPC); +} + +/** + * e1000_set_pcie_completion_timeout - set pci-e completion timeout + * @hw: pointer to the HW structure + * + * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, + * however the hardware default for these parts is 500us to 1ms which is less + * than the 10ms recommended by the pci-e spec. To address this we need to + * increase the value to either 10ms to 200ms for capability version 1 config, + * or 16ms to 55ms for version 2. + **/ +static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw) +{ + u32 gcr = E1000_READ_REG(hw, E1000_GCR); + s32 ret_val = E1000_SUCCESS; + u16 pcie_devctl2; + + /* only take action if timeout value is defaulted to 0 */ + if (gcr & E1000_GCR_CMPL_TMOUT_MASK) + goto out; + + /* + * if capababilities version is type 1 we can write the + * timeout of 10ms to 200ms through the GCR register + */ + if (!(gcr & E1000_GCR_CAP_VER2)) { + gcr |= E1000_GCR_CMPL_TMOUT_10ms; + goto out; + } + + /* + * for version 2 capabilities we need to write the config space + * directly in order to set the completion timeout value for + * 16ms to 55ms + */ + ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, + &pcie_devctl2); + if (ret_val) + goto out; + + pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; + + ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, + &pcie_devctl2); +out: + /* disable completion timeout resend */ + gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; + + E1000_WRITE_REG(hw, E1000_GCR, gcr); + return ret_val; +} + +/** + * e1000_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * @pf: Physical Function pool - do not set anti-spoofing for the PF + * + * enables/disables L2 switch anti-spoofing functionality. + **/ +void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) +{ + u32 reg_val, reg_offset; + + switch (hw->mac.type) { + case e1000_82576: + reg_offset = E1000_DTXSWC; + break; + case e1000_i350: + case e1000_i354: + reg_offset = E1000_TXSWC; + break; + default: + return; + } + + reg_val = E1000_READ_REG(hw, reg_offset); + if (enable) { + reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + /* The PF can spoof - it has to in order to + * support emulation mode NICs + */ + reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); + } else { + reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + } + E1000_WRITE_REG(hw, reg_offset, reg_val); +} + +/** + * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * + * enables/disables L2 switch loopback functionality. + **/ +void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) +{ + u32 dtxswc; + + switch (hw->mac.type) { + case e1000_82576: + dtxswc = E1000_READ_REG(hw, E1000_DTXSWC); + if (enable) + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; + else + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; + E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc); + break; + case e1000_i350: + case e1000_i354: + dtxswc = E1000_READ_REG(hw, E1000_TXSWC); + if (enable) + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; + else + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; + E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc); + break; + default: + /* Currently no other hardware supports loopback */ + break; + } + + +} + +/** + * e1000_vmdq_set_replication_pf - enable or disable vmdq replication + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * + * enables/disables replication of packets across multiple pools. + **/ +void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) +{ + u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL); + + if (enable) + vt_ctl |= E1000_VT_CTL_VM_REPL_EN; + else + vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; + + E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl); +} + +/** + * e1000_read_phy_reg_82580 - Read 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_read_phy_reg_82580"); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = e1000_read_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000_write_phy_reg_82580 - Write 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_write_phy_reg_82580"); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = e1000_write_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits + * @hw: pointer to the HW structure + * + * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on + * the values found in the EEPROM. This addresses an issue in which these + * bits are not restored from EEPROM after reset. + **/ +static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u32 mdicnfg; + u16 nvm_data = 0; + + DEBUGFUNC("e1000_reset_mdicnfg_82580"); + + if (hw->mac.type != e1000_82580) + goto out; + if (!e1000_sgmii_active_82575(hw)) + goto out; + + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, + &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG); + if (nvm_data & NVM_WORD24_EXT_MDIO) + mdicnfg |= E1000_MDICNFG_EXT_MDIO; + if (nvm_data & NVM_WORD24_COM_MDIO) + mdicnfg |= E1000_MDICNFG_COM_MDIO; + E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg); +out: + return ret_val; +} + +/** + * e1000_reset_hw_82580 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets function or entire device (all ports, etc.) + * to a known state. + **/ +static s32 e1000_reset_hw_82580(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + /* BH SW mailbox bit in SW_FW_SYNC */ + u16 swmbsw_mask = E1000_SW_SYNCH_MB; + u32 ctrl; + bool global_device_reset = hw->dev_spec._82575.global_device_reset; + + DEBUGFUNC("e1000_reset_hw_82580"); + + hw->dev_spec._82575.global_device_reset = false; + + /* 82580 does not reliably do global_device_reset due to hw errata */ + if (hw->mac.type == e1000_82580) + global_device_reset = false; + + /* Get current control state. */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* + * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000_disable_pcie_master_generic(hw); + if (ret_val) + DEBUGOUT("PCI-E Master disable polling has failed.\n"); + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + msec_delay(10); + + /* Determine whether or not a global dev reset is requested */ + if (global_device_reset && hw->mac.ops.acquire_swfw_sync(hw, + swmbsw_mask)) + global_device_reset = false; + + if (global_device_reset && !(E1000_READ_REG(hw, E1000_STATUS) & + E1000_STAT_DEV_RST_SET)) + ctrl |= E1000_CTRL_DEV_RST; + else + ctrl |= E1000_CTRL_RST; + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + switch (hw->device_id) { + case E1000_DEV_ID_DH89XXCC_SGMII: + break; + default: + E1000_WRITE_FLUSH(hw); + break; + } + + /* Add delay to insure DEV_RST or RST has time to complete */ + msec_delay(5); + + ret_val = e1000_get_auto_rd_done_generic(hw); + if (ret_val) { + /* + * When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + DEBUGOUT("Auto Read Done did not complete\n"); + } + + /* clear global device reset status bit */ + E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET); + + /* Clear any pending interrupt events. */ + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_READ_REG(hw, E1000_ICR); + + ret_val = e1000_reset_mdicnfg_82580(hw); + if (ret_val) + DEBUGOUT("Could not reset MDICNFG based on EEPROM\n"); + + /* Install any alternate MAC address into RAR0 */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + + /* Release semaphore */ + if (global_device_reset) + hw->mac.ops.release_swfw_sync(hw, swmbsw_mask); + + return ret_val; +} + +/** + * e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual Rx PBA size + * @data: data received by reading RXPBS register + * + * The 82580 uses a table based approach for packet buffer allocation sizes. + * This function converts the retrieved value into the correct table value + * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 + * 0x0 36 72 144 1 2 4 8 16 + * 0x8 35 70 140 rsv rsv rsv rsv rsv + */ +u16 e1000_rxpbs_adjust_82580(u32 data) +{ + u16 ret_val = 0; + + if (data < E1000_82580_RXPBS_TABLE_SIZE) + ret_val = e1000_82580_rxpbs_table[data]; + + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_with_offset - Validate EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) +{ + s32 ret_val = E1000_SUCCESS; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_validate_nvm_checksum_with_offset"); + + for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + DEBUGOUT("NVM Checksum Invalid\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_update_nvm_checksum_with_offset - Update EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_update_nvm_checksum_with_offset"); + + for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, + &checksum); + if (ret_val) + DEBUGOUT("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_82580 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val; + u16 eeprom_regions_count = 1; + u16 j, nvm_data; + u16 nvm_offset; + + DEBUGFUNC("e1000_validate_nvm_checksum_82580"); + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { + /* if chekcsums compatibility bit is set validate checksums + * for all 4 ports. */ + eeprom_regions_count = 4; + } + + for (j = 0; j < eeprom_regions_count; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = e1000_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != E1000_SUCCESS) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_update_nvm_checksum_82580 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val; + u16 j, nvm_data; + u16 nvm_offset; + + DEBUGFUNC("e1000_update_nvm_checksum_82580"); + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error while updating checksum compatibility bit.\n"); + goto out; + } + + if (!(nvm_data & NVM_COMPATIBILITY_BIT_MASK)) { + /* set compatibility bit to validate checksums appropriately */ + nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; + ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, + &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Write Error while updating checksum compatibility bit.\n"); + goto out; + } + } + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_i350 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 j; + u16 nvm_offset; + + DEBUGFUNC("e1000_validate_nvm_checksum_i350"); + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = e1000_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != E1000_SUCCESS) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_update_nvm_checksum_i350 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 j; + u16 nvm_offset; + + DEBUGFUNC("e1000_update_nvm_checksum_i350"); + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val != E1000_SUCCESS) + goto out; + } + +out: + return ret_val; +} + +/** + * __e1000_access_emi_reg - Read/write EMI register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: pointer to value to read/write from/to the EMI address + * @read: boolean flag to indicate read or write + **/ +static s32 __e1000_access_emi_reg(struct e1000_hw *hw, u16 address, + u16 *data, bool read) +{ + s32 ret_val; + + DEBUGFUNC("__e1000_access_emi_reg"); + + ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data); + else + ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data); + + return ret_val; +} + +/** + * e1000_read_emi_reg - Read Extended Management Interface register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: value to be read from the EMI address + **/ +s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) +{ + DEBUGFUNC("e1000_read_emi_reg"); + + return __e1000_access_emi_reg(hw, addr, data, true); +} + +/** + * e1000_initialize_M88E1512_phy - Initialize M88E1512 PHY + * @hw: pointer to the HW structure + * + * Initialize Marverl 1512 to work correctly with Avoton. + **/ +s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_initialize_M88E1512_phy"); + + /* Check if this is correct PHY. */ + if (phy->id != M88E1512_E_PHY_ID) + goto out; + + /* Switch to PHY page 0xFF. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xCC0C); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); + if (ret_val) + goto out; + + /* Switch to PHY page 0xFB. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x000D); + if (ret_val) + goto out; + + /* Switch to PHY page 0x12. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); + if (ret_val) + goto out; + + /* Change mode to SGMII-to-Copper */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + + msec_delay(1000); +out: + return ret_val; +} + +/** + * e1000_set_eee_i350 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv1g: boolean flag enabling 1G EEE advertisement + * @adv100m: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE based on setting in dev_spec structure. + * + **/ +s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M) +{ + u32 ipcnfg, eeer; + + DEBUGFUNC("e1000_set_eee_i350"); + + if ((hw->mac.type < e1000_i350) || + (hw->phy.media_type != e1000_media_type_copper)) + goto out; + ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG); + eeer = E1000_READ_REG(hw, E1000_EEER); + + /* enable or disable per user setting */ + if (!(hw->dev_spec._82575.eee_disable)) { + u32 eee_su = E1000_READ_REG(hw, E1000_EEE_SU); + + if (adv100M) + ipcnfg |= E1000_IPCNFG_EEE_100M_AN; + else + ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN; + + if (adv1G) + ipcnfg |= E1000_IPCNFG_EEE_1G_AN; + else + ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN; + + eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + + /* This bit should not be set in normal operation. */ + if (eee_su & E1000_EEE_SU_LPI_CLK_STP) + DEBUGOUT("LPI Clock Stop Bit should not be set!\n"); + } else { + ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); + eeer &= ~(E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + } + E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg); + E1000_WRITE_REG(hw, E1000_EEER, eeer); + E1000_READ_REG(hw, E1000_IPCNFG); + E1000_READ_REG(hw, E1000_EEER); +out: + + return E1000_SUCCESS; +} + +/** + * e1000_set_eee_i354 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv1g: boolean flag enabling 1G EEE advertisement + * @adv100m: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE legacy mode based on setting in dev_spec structure. + * + **/ +s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 phy_data; + + DEBUGFUNC("e1000_set_eee_i354"); + + if ((hw->phy.media_type != e1000_media_type_copper) || + ((phy->id != M88E1543_E_PHY_ID) && + (phy->id != M88E1512_E_PHY_ID))) + goto out; + + if (!hw->dev_spec._82575.eee_disable) { + /* Switch to PHY page 18. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1, + &phy_data); + if (ret_val) + goto out; + + phy_data |= E1000_M88E1543_EEE_CTRL_1_MS; + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1, + phy_data); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + /* Turn on EEE advertisement. */ + ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + if (adv100M) + phy_data |= E1000_EEE_ADV_100_SUPPORTED; + else + phy_data &= ~E1000_EEE_ADV_100_SUPPORTED; + + if (adv1G) + phy_data |= E1000_EEE_ADV_1000_SUPPORTED; + else + phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED; + + ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + phy_data); + } else { + /* Turn off EEE advertisement. */ + ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED | + E1000_EEE_ADV_1000_SUPPORTED); + ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + phy_data); + } + +out: + return ret_val; +} + +/** + * e1000_get_eee_status_i354 - Get EEE status + * @hw: pointer to the HW structure + * @status: EEE status + * + * Get EEE status by guessing based on whether Tx or Rx LPI indications have + * been received. + **/ +s32 e1000_get_eee_status_i354(struct e1000_hw *hw, bool *status) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 phy_data; + + DEBUGFUNC("e1000_get_eee_status_i354"); + + /* Check if EEE is supported on this device. */ + if ((hw->phy.media_type != e1000_media_type_copper) || + ((phy->id != M88E1543_E_PHY_ID) && + (phy->id != M88E1512_E_PHY_ID))) + goto out; + + ret_val = e1000_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, + E1000_PCS_STATUS_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD | + E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false; + +out: + return ret_val; +} + +/* Due to a hw errata, if the host tries to configure the VFTA register + * while performing queries from the BMC or DMA, then the VFTA in some + * cases won't be written. + */ + +/** + * e1000_clear_vfta_i350 - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void e1000_clear_vfta_i350(struct e1000_hw *hw) +{ + u32 offset; + int i; + + DEBUGFUNC("e1000_clear_vfta_350"); + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + for (i = 0; i < 10; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); + + E1000_WRITE_FLUSH(hw); + } +} + +/** + * e1000_write_vfta_i350 - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) +{ + int i; + + DEBUGFUNC("e1000_write_vfta_350"); + + for (i = 0; i < 10; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); + + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_set_i2c_bb - Enable I2C bit-bang + * @hw: pointer to the HW structure + * + * Enable I2C bit-bang interface + * + **/ +s32 e1000_set_i2c_bb(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u32 ctrl_ext, i2cparams; + + DEBUGFUNC("e1000_set_i2c_bb"); + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= E1000_CTRL_I2C_ENA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + + i2cparams = E1000_READ_REG(hw, E1000_I2CPARAMS); + i2cparams |= E1000_I2CBB_EN; + i2cparams |= E1000_I2C_DATA_OE_N; + i2cparams |= E1000_I2C_CLK_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cparams); + E1000_WRITE_FLUSH(hw); + + return ret_val; +} + +/** + * e1000_read_i2c_byte_generic - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: device address + * @data: value read + * + * Performs byte read operation over I2C interface at + * a specified device address. + **/ +s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + s32 status = E1000_SUCCESS; + u32 max_retry = 10; + u32 retry = 1; + u16 swfw_mask = 0; + + bool nack = true; + + DEBUGFUNC("e1000_read_i2c_byte_generic"); + + swfw_mask = E1000_SWFW_PHY0_SM; + + do { + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) + != E1000_SUCCESS) { + status = E1000_ERR_SWFW_SYNC; + goto read_byte_out; + } + + e1000_i2c_start(hw); + + /* Device Address and write indication */ + status = e1000_clock_out_i2c_byte(hw, dev_addr); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_clock_out_i2c_byte(hw, byte_offset); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + e1000_i2c_start(hw); + + /* Device Address and read indication */ + status = e1000_clock_out_i2c_byte(hw, (dev_addr | 0x1)); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + e1000_clock_in_i2c_byte(hw, data); + + status = e1000_clock_out_i2c_bit(hw, nack); + if (status != E1000_SUCCESS) + goto fail; + + e1000_i2c_stop(hw); + break; + +fail: + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msec_delay(100); + e1000_i2c_bus_clear(hw); + retry++; + if (retry < max_retry) + DEBUGOUT("I2C byte read error - Retrying.\n"); + else + DEBUGOUT("I2C byte read error.\n"); + + } while (retry < max_retry); + + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + +read_byte_out: + + return status; +} + +/** + * e1000_write_i2c_byte_generic - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: device address + * @data: value to write + * + * Performs byte write operation over I2C interface at + * a specified device address. + **/ +s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + s32 status = E1000_SUCCESS; + u32 max_retry = 1; + u32 retry = 0; + u16 swfw_mask = 0; + + DEBUGFUNC("e1000_write_i2c_byte_generic"); + + swfw_mask = E1000_SWFW_PHY0_SM; + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS) { + status = E1000_ERR_SWFW_SYNC; + goto write_byte_out; + } + + do { + e1000_i2c_start(hw); + + status = e1000_clock_out_i2c_byte(hw, dev_addr); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_clock_out_i2c_byte(hw, byte_offset); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_clock_out_i2c_byte(hw, data); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + e1000_i2c_stop(hw); + break; + +fail: + e1000_i2c_bus_clear(hw); + retry++; + if (retry < max_retry) + DEBUGOUT("I2C byte write error - Retrying.\n"); + else + DEBUGOUT("I2C byte write error.\n"); + } while (retry < max_retry); + + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + +write_byte_out: + + return status; +} + +/** + * e1000_i2c_start - Sets I2C start condition + * @hw: pointer to hardware structure + * + * Sets I2C start condition (High -> Low on SDA while SCL is High) + **/ +static void e1000_i2c_start(struct e1000_hw *hw) +{ + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + DEBUGFUNC("e1000_i2c_start"); + + /* Start condition must begin with data and clock high */ + e1000_set_i2c_data(hw, &i2cctl, 1); + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for start condition (4.7us) */ + usec_delay(E1000_I2C_T_SU_STA); + + e1000_set_i2c_data(hw, &i2cctl, 0); + + /* Hold time for start condition (4us) */ + usec_delay(E1000_I2C_T_HD_STA); + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(E1000_I2C_T_LOW); + +} + +/** + * e1000_i2c_stop - Sets I2C stop condition + * @hw: pointer to hardware structure + * + * Sets I2C stop condition (Low -> High on SDA while SCL is High) + **/ +static void e1000_i2c_stop(struct e1000_hw *hw) +{ + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + DEBUGFUNC("e1000_i2c_stop"); + + /* Stop condition must begin with data low and clock high */ + e1000_set_i2c_data(hw, &i2cctl, 0); + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for stop condition (4us) */ + usec_delay(E1000_I2C_T_SU_STO); + + e1000_set_i2c_data(hw, &i2cctl, 1); + + /* bus free time between stop and start (4.7us)*/ + usec_delay(E1000_I2C_T_BUF); +} + +/** + * e1000_clock_in_i2c_byte - Clocks in one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte to clock in + * + * Clocks in one byte data via I2C data/clock + **/ +static void e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data) +{ + s32 i; + bool bit = 0; + + DEBUGFUNC("e1000_clock_in_i2c_byte"); + + *data = 0; + for (i = 7; i >= 0; i--) { + e1000_clock_in_i2c_bit(hw, &bit); + *data |= bit << i; + } + +} + +/** + * e1000_clock_out_i2c_byte - Clocks out one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte clocked out + * + * Clocks out one byte data via I2C data/clock + **/ +static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data) +{ + s32 status = E1000_SUCCESS; + s32 i; + u32 i2cctl; + bool bit = 0; + + DEBUGFUNC("e1000_clock_out_i2c_byte"); + + for (i = 7; i >= 0; i--) { + bit = (data >> i) & 0x1; + status = e1000_clock_out_i2c_bit(hw, bit); + + if (status != E1000_SUCCESS) + break; + } + + /* Release SDA line (set high) */ + i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + i2cctl |= E1000_I2C_DATA_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl); + E1000_WRITE_FLUSH(hw); + + return status; +} + +/** + * e1000_get_i2c_ack - Polls for I2C ACK + * @hw: pointer to hardware structure + * + * Clocks in/out one bit via I2C data/clock + **/ +static s32 e1000_get_i2c_ack(struct e1000_hw *hw) +{ + s32 status = E1000_SUCCESS; + u32 i = 0; + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + u32 timeout = 10; + bool ack = true; + + DEBUGFUNC("e1000_get_i2c_ack"); + + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(E1000_I2C_T_HIGH); + + /* Wait until SCL returns high */ + for (i = 0; i < timeout; i++) { + usec_delay(1); + i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + if (i2cctl & E1000_I2C_CLK_IN) + break; + } + if (!(i2cctl & E1000_I2C_CLK_IN)) + return E1000_ERR_I2C; + + ack = e1000_get_i2c_data(&i2cctl); + if (ack) { + DEBUGOUT("I2C ack was not received.\n"); + status = E1000_ERR_I2C; + } + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(E1000_I2C_T_LOW); + + return status; +} + +/** + * e1000_clock_in_i2c_bit - Clocks in one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: read data value + * + * Clocks in one bit via I2C data/clock + **/ +static void e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data) +{ + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + DEBUGFUNC("e1000_clock_in_i2c_bit"); + + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(E1000_I2C_T_HIGH); + + i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + *data = e1000_get_i2c_data(&i2cctl); + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(E1000_I2C_T_LOW); + +} + +/** + * e1000_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: data value to write + * + * Clocks out one bit via I2C data/clock + **/ +static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data) +{ + s32 status; + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + DEBUGFUNC("e1000_clock_out_i2c_bit"); + + status = e1000_set_i2c_data(hw, &i2cctl, data); + if (status == E1000_SUCCESS) { + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(E1000_I2C_T_HIGH); + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us. + * This also takes care of the data hold time. + */ + usec_delay(E1000_I2C_T_LOW); + } else { + status = E1000_ERR_I2C; + DEBUGOUT1("I2C data was not set to %X\n", data); + } + + return status; +} +/** + * e1000_raise_i2c_clk - Raises the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Raises the I2C clock line '0'->'1' + **/ +static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl) +{ + DEBUGFUNC("e1000_raise_i2c_clk"); + + *i2cctl |= E1000_I2C_CLK_OUT; + *i2cctl &= ~E1000_I2C_CLK_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); + E1000_WRITE_FLUSH(hw); + + /* SCL rise time (1000ns) */ + usec_delay(E1000_I2C_T_RISE); +} + +/** + * e1000_lower_i2c_clk - Lowers the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Lowers the I2C clock line '1'->'0' + **/ +static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl) +{ + + DEBUGFUNC("e1000_lower_i2c_clk"); + + *i2cctl &= ~E1000_I2C_CLK_OUT; + *i2cctl &= ~E1000_I2C_CLK_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); + E1000_WRITE_FLUSH(hw); + + /* SCL fall time (300ns) */ + usec_delay(E1000_I2C_T_FALL); +} + +/** + * e1000_set_i2c_data - Sets the I2C data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * @data: I2C data value (0 or 1) to set + * + * Sets the I2C data bit + **/ +static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data) +{ + s32 status = E1000_SUCCESS; + + DEBUGFUNC("e1000_set_i2c_data"); + + if (data) + *i2cctl |= E1000_I2C_DATA_OUT; + else + *i2cctl &= ~E1000_I2C_DATA_OUT; + + *i2cctl &= ~E1000_I2C_DATA_OE_N; + *i2cctl |= E1000_I2C_CLK_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); + E1000_WRITE_FLUSH(hw); + + /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ + usec_delay(E1000_I2C_T_RISE + E1000_I2C_T_FALL + E1000_I2C_T_SU_DATA); + + *i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + if (data != e1000_get_i2c_data(i2cctl)) { + status = E1000_ERR_I2C; + DEBUGOUT1("Error - I2C data was not set to %X.\n", data); + } + + return status; +} + +/** + * e1000_get_i2c_data - Reads the I2C SDA data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Returns the I2C data bit value + **/ +static bool e1000_get_i2c_data(u32 *i2cctl) +{ + bool data; + + DEBUGFUNC("e1000_get_i2c_data"); + + if (*i2cctl & E1000_I2C_DATA_IN) + data = 1; + else + data = 0; + + return data; +} + +/** + * e1000_i2c_bus_clear - Clears the I2C bus + * @hw: pointer to hardware structure + * + * Clears the I2C bus by sending nine clock pulses. + * Used when data line is stuck low. + **/ +void e1000_i2c_bus_clear(struct e1000_hw *hw) +{ + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + u32 i; + + DEBUGFUNC("e1000_i2c_bus_clear"); + + e1000_i2c_start(hw); + + e1000_set_i2c_data(hw, &i2cctl, 1); + + for (i = 0; i < 9; i++) { + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Min high period of clock is 4us */ + usec_delay(E1000_I2C_T_HIGH); + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Min low period of clock is 4.7us*/ + usec_delay(E1000_I2C_T_LOW); + } + + e1000_i2c_start(hw); + + /* Put the i2c bus back to default state */ + e1000_i2c_stop(hw); +} + +static const u8 e1000_emc_temp_data[4] = { + E1000_EMC_INTERNAL_DATA, + E1000_EMC_DIODE1_DATA, + E1000_EMC_DIODE2_DATA, + E1000_EMC_DIODE3_DATA +}; +static const u8 e1000_emc_therm_limit[4] = { + E1000_EMC_INTERNAL_THERM_LIMIT, + E1000_EMC_DIODE1_THERM_LIMIT, + E1000_EMC_DIODE2_THERM_LIMIT, + E1000_EMC_DIODE3_THERM_LIMIT +}; + +/** + * e1000_get_thermal_sensor_data_generic - Gathers thermal sensor data + * @hw: pointer to hardware structure + * + * Updates the temperatures in mac.thermal_sensor_data + **/ +s32 e1000_get_thermal_sensor_data_generic(struct e1000_hw *hw) +{ + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 num_sensors; + u8 sensor_index; + u8 sensor_location; + u8 i; + struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + DEBUGFUNC("e1000_get_thermal_sensor_data_generic"); + + if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) + return E1000_NOT_IMPLEMENTED; + + data->sensor[0].temp = (E1000_READ_REG(hw, E1000_THMJT) & 0xFF); + + /* Return the internal sensor only if ETS is unsupported */ + e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_offset); + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) + return E1000_SUCCESS; + + e1000_read_nvm(hw, ets_offset, 1, &ets_cfg); + if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) + != NVM_ETS_TYPE_EMC) + return E1000_NOT_IMPLEMENTED; + + num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); + if (num_sensors > E1000_MAX_SENSORS) + num_sensors = E1000_MAX_SENSORS; + + for (i = 1; i < num_sensors; i++) { + e1000_read_nvm(hw, (ets_offset + i), 1, &ets_sensor); + sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> + NVM_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> + NVM_ETS_DATA_LOC_SHIFT); + + if (sensor_location != 0) + hw->phy.ops.read_i2c_byte(hw, + e1000_emc_temp_data[sensor_index], + E1000_I2C_THERMAL_SENSOR_ADDR, + &data->sensor[i].temp); + } + return E1000_SUCCESS; +} + +/** + * e1000_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Sets the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data + **/ +s32 e1000_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) +{ + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 low_thresh_delta; + u8 num_sensors; + u8 sensor_index; + u8 sensor_location; + u8 therm_limit; + u8 i; + struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + DEBUGFUNC("e1000_init_thermal_sensor_thresh_generic"); + + if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) + return E1000_NOT_IMPLEMENTED; + + memset(data, 0, sizeof(struct e1000_thermal_sensor_data)); + + data->sensor[0].location = 0x1; + data->sensor[0].caution_thresh = + (E1000_READ_REG(hw, E1000_THHIGHTC) & 0xFF); + data->sensor[0].max_op_thresh = + (E1000_READ_REG(hw, E1000_THLOWTC) & 0xFF); + + /* Return the internal sensor only if ETS is unsupported */ + e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_offset); + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) + return E1000_SUCCESS; + + e1000_read_nvm(hw, ets_offset, 1, &ets_cfg); + if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) + != NVM_ETS_TYPE_EMC) + return E1000_NOT_IMPLEMENTED; + + low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >> + NVM_ETS_LTHRES_DELTA_SHIFT); + num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); + + for (i = 1; i <= num_sensors; i++) { + e1000_read_nvm(hw, (ets_offset + i), 1, &ets_sensor); + sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> + NVM_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> + NVM_ETS_DATA_LOC_SHIFT); + therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK; + + hw->phy.ops.write_i2c_byte(hw, + e1000_emc_therm_limit[sensor_index], + E1000_I2C_THERMAL_SENSOR_ADDR, + therm_limit); + + if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) { + data->sensor[i].location = sensor_location; + data->sensor[i].caution_thresh = therm_limit; + data->sensor[i].max_op_thresh = therm_limit - + low_thresh_delta; + } + } + return E1000_SUCCESS; +} diff --git a/drivers/staging/igb_avb/e1000_82575.h b/drivers/staging/igb_avb/e1000_82575.h new file mode 100644 index 0000000000000..c6b61f71e3535 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_82575.h @@ -0,0 +1,510 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_82575_H_ +#define _E1000_82575_H_ + +#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_DEF1_DEF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_ON2)) +/* + * Receive Address Register Count + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * These entries are also used for MAC-based filtering. + */ +/* + * For 82576, there are an additional set of RARs that begin at an offset + * separate from the first set of RARs. + */ +#define E1000_RAR_ENTRIES_82575 16 +#define E1000_RAR_ENTRIES_82576 24 +#define E1000_RAR_ENTRIES_82580 24 +#define E1000_RAR_ENTRIES_I350 32 +#define E1000_SW_SYNCH_MB 0x00000100 +#define E1000_STAT_DEV_RST_SET 0x00100000 +#define E1000_CTRL_DEV_RST 0x20000000 + +struct e1000_adv_data_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + u32 data; + struct { + u32 datalen:16; /* Data buffer length */ + u32 rsvd:4; + u32 dtyp:4; /* Descriptor type */ + u32 dcmd:8; /* Descriptor command */ + } config; + } lower; + union { + u32 data; + struct { + u32 status:4; /* Descriptor status */ + u32 idx:4; + u32 popts:6; /* Packet Options */ + u32 paylen:18; /* Payload length */ + } options; + } upper; +}; + +#define E1000_TXD_DTYP_ADV_C 0x2 /* Advanced Context Descriptor */ +#define E1000_TXD_DTYP_ADV_D 0x3 /* Advanced Data Descriptor */ +#define E1000_ADV_TXD_CMD_DEXT 0x20 /* Descriptor extension (0 = legacy) */ +#define E1000_ADV_TUCMD_IPV4 0x2 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADV_TUCMD_IPV6 0x0 /* IP Packet Type: 0=IPv6 */ +#define E1000_ADV_TUCMD_L4T_UDP 0x0 /* L4 Packet TYPE of UDP */ +#define E1000_ADV_TUCMD_L4T_TCP 0x4 /* L4 Packet TYPE of TCP */ +#define E1000_ADV_TUCMD_MKRREQ 0x10 /* Indicates markers are required */ +#define E1000_ADV_DCMD_EOP 0x1 /* End of Packet */ +#define E1000_ADV_DCMD_IFCS 0x2 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADV_DCMD_RS 0x8 /* Report Status */ +#define E1000_ADV_DCMD_VLE 0x40 /* Add VLAN tag */ +#define E1000_ADV_DCMD_TSE 0x80 /* TCP Seg enable */ +/* Extended Device Control */ +#define E1000_CTRL_EXT_NSICR 0x00000001 /* Disable Intr Clear all on read */ + +struct e1000_adv_context_desc { + union { + u32 ip_config; + struct { + u32 iplen:9; + u32 maclen:7; + u32 vlan_tag:16; + } fields; + } ip_setup; + u32 seq_num; + union { + u64 l4_config; + struct { + u32 mkrloc:9; + u32 tucmd:11; + u32 dtyp:4; + u32 adv:8; + u32 rsvd:4; + u32 idx:4; + u32 l4len:8; + u32 mss:16; + } fields; + } l4_setup; +}; + +/* SRRCTL bit definitions */ +#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000 +#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 +#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 +#define E1000_SRRCTL_TIMESTAMP 0x40000000 +#define E1000_SRRCTL_DROP_EN 0x80000000 + +#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 + +#define E1000_TX_HEAD_WB_ENABLE 0x1 +#define E1000_TX_SEQNUM_WB_ENABLE 0x2 + +#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 +#define E1000_MRQC_ENABLE_VMDQ 0x00000003 +#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005 +#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 +#define E1000_MRQC_ENABLE_RSS_8Q 0x00000002 + +#define E1000_VMRCTL_MIRROR_PORT_SHIFT 8 +#define E1000_VMRCTL_MIRROR_DSTPORT_MASK (7 << \ + E1000_VMRCTL_MIRROR_PORT_SHIFT) +#define E1000_VMRCTL_POOL_MIRROR_ENABLE (1 << 0) +#define E1000_VMRCTL_UPLINK_MIRROR_ENABLE (1 << 1) +#define E1000_VMRCTL_DOWNLINK_MIRROR_ENABLE (1 << 2) + +#define E1000_EICR_TX_QUEUE ( \ + E1000_EICR_TX_QUEUE0 | \ + E1000_EICR_TX_QUEUE1 | \ + E1000_EICR_TX_QUEUE2 | \ + E1000_EICR_TX_QUEUE3) + +#define E1000_EICR_RX_QUEUE ( \ + E1000_EICR_RX_QUEUE0 | \ + E1000_EICR_RX_QUEUE1 | \ + E1000_EICR_RX_QUEUE2 | \ + E1000_EICR_RX_QUEUE3) + +#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE +#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE + +#define EIMS_ENABLE_MASK ( \ + E1000_EIMS_RX_QUEUE | \ + E1000_EIMS_TX_QUEUE | \ + E1000_EIMS_TCP_TIMER | \ + E1000_EIMS_OTHER) + +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define E1000_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ +#define E1000_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ +#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +#define E1000_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ +#define E1000_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ +#define E1000_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ +#define E1000_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ +#define E1000_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ +#define E1000_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ +#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ + +/* Receive Descriptor - Advanced */ +union e1000_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /*RSS type, Pkt type*/ + /* Split Header, header buffer len */ + __le16 hdr_info; + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define E1000_RXDADV_RSSTYPE_MASK 0x0000000F +#define E1000_RXDADV_RSSTYPE_SHIFT 12 +#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 +#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 +#define E1000_RXDADV_SPLITHEADER_EN 0x00001000 +#define E1000_RXDADV_SPH 0x8000 +#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ +#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ +#define E1000_RXDADV_ERR_HBO 0x00800000 + +/* RSS Hash results */ +#define E1000_RXDADV_RSSTYPE_NONE 0x00000000 +#define E1000_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 +#define E1000_RXDADV_RSSTYPE_IPV4 0x00000002 +#define E1000_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 +#define E1000_RXDADV_RSSTYPE_IPV6_EX 0x00000004 +#define E1000_RXDADV_RSSTYPE_IPV6 0x00000005 +#define E1000_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 +#define E1000_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 +#define E1000_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 +#define E1000_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 + +/* RSS Packet Types as indicated in the receive descriptor */ +#define E1000_RXDADV_PKTTYPE_ILMASK 0x000000F0 +#define E1000_RXDADV_PKTTYPE_TLMASK 0x00000F00 +#define E1000_RXDADV_PKTTYPE_NONE 0x00000000 +#define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */ +#define E1000_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPV4 hdr + extensions */ +#define E1000_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPV6 hdr present */ +#define E1000_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPV6 hdr + extensions */ +#define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ +#define E1000_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ +#define E1000_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ +#define E1000_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ + +#define E1000_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ +#define E1000_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ +#define E1000_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ +#define E1000_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ +#define E1000_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ +#define E1000_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ + +/* LinkSec results */ +/* Security Processing bit Indication */ +#define E1000_RXDADV_LNKSEC_STATUS_SECP 0x00020000 +#define E1000_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 +#define E1000_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 +#define E1000_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 +#define E1000_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 + +#define E1000_RXDADV_IPSEC_STATUS_SECP 0x00020000 +#define E1000_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 +#define E1000_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 +#define E1000_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 +#define E1000_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED 0x18000000 + +/* Transmit Descriptor - Advanced */ +union e1000_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define E1000_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_MAC_LINKSEC 0x00040000 /* Apply LinkSec on pkt */ +#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp pkt */ +#define E1000_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED prsnt in WB */ +#define E1000_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define E1000_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ +#define E1000_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ +#define E1000_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ +/* 1st & Last TSO-full iSCSI PDU*/ +#define E1000_ADVTXD_POPTS_ISCO_FULL 0x00001800 +#define E1000_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +/* Context descriptors */ +struct e1000_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ +#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ +/* IPSec Encrypt Enable for ESP */ +#define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000 +/* Req requires Markers and CRC */ +#define E1000_ADVTXD_TUCMD_MKRREQ 0x00002000 +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +/* Adv ctxt IPSec SA IDX mask */ +#define E1000_ADVTXD_IPSEC_SA_INDEX_MASK 0x000000FF +/* Adv ctxt IPSec ESP len mask */ +#define E1000_ADVTXD_IPSEC_ESP_LEN_MASK 0x000000FF + +/* Additional Transmit Descriptor Control definitions */ +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */ +#define E1000_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wbk flushing */ +/* Tx Queue Arbitration Priority 0=low, 1=high */ +#define E1000_TXDCTL_PRIORITY 0x08000000 + +/* Additional Receive Descriptor Control definitions */ +#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */ +#define E1000_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. wbk flushing */ + +/* Direct Cache Access (DCA) definitions */ +#define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ +#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ + +#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ +#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ + +#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ +#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header ena */ +#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload ena */ +#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx Desc Relax Order */ + +#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ +#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ + +#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ +#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */ +#define E1000_DCA_TXCTRL_CPUID_SHIFT_82576 24 /* Tx CPUID */ +#define E1000_DCA_RXCTRL_CPUID_SHIFT_82576 24 /* Rx CPUID */ + +/* Additional interrupt register bit definitions */ +#define E1000_ICR_LSECPNS 0x00000020 /* PN threshold - server */ +#define E1000_IMS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */ +#define E1000_ICS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */ + +/* ETQF register bit definitions */ +#define E1000_ETQF_FILTER_ENABLE (1 << 26) +#define E1000_ETQF_IMM_INT (1 << 29) +#define E1000_ETQF_1588 (1 << 30) +#define E1000_ETQF_QUEUE_ENABLE (1 << 31) +/* + * ETQF filter list: one static filter per filter consumer. This is + * to avoid filter collisions later. Add new filters + * here!! + * + * Current filters: + * EAPOL 802.1x (0x888e): Filter 0 + */ +#define E1000_ETQF_FILTER_EAPOL 0 + +#define E1000_FTQF_VF_BP 0x00008000 +#define E1000_FTQF_1588_TIME_STAMP 0x08000000 +#define E1000_FTQF_MASK 0xF0000000 +#define E1000_FTQF_MASK_PROTO_BP 0x10000000 +#define E1000_FTQF_MASK_SOURCE_ADDR_BP 0x20000000 +#define E1000_FTQF_MASK_DEST_ADDR_BP 0x40000000 +#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000 + +#define E1000_NVM_APME_82575 0x0400 +#define MAX_NUM_VFS 7 + +#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof cntrl */ +#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof cntrl */ +#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ +#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8 +#define E1000_DTXSWC_LLE_SHIFT 16 +#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ + +/* Easy defines for setting default pool, would normally be left a zero */ +#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 +#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) + +/* Other useful VMD_CTL register defines */ +#define E1000_VT_CTL_IGNORE_MAC (1 << 28) +#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29) +#define E1000_VT_CTL_VM_REPL_EN (1 << 30) + +/* Per VM Offload register setup */ +#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ +#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */ +#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */ +#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */ +#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */ +#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */ +#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */ +#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */ +#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define E1000_VMOLR_VPE 0x00800000 /* VLAN promiscuous enable */ +#define E1000_VMOLR_UPE 0x20000000 /* Unicast promisuous enable */ +#define E1000_DVMOLR_HIDVLAN 0x20000000 /* Vlan hiding enable */ +#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define E1000_PBRWAC_WALPB 0x00000007 /* Wrap around event on LAN Rx PB */ +#define E1000_PBRWAC_PBE 0x00000008 /* Rx packet buffer empty */ + +#define E1000_VLVF_ARRAY_SIZE 32 +#define E1000_VLVF_VLANID_MASK 0x00000FFF +#define E1000_VLVF_POOLSEL_SHIFT 12 +#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT) +#define E1000_VLVF_LVLAN 0x00100000 +#define E1000_VLVF_VLANID_ENABLE 0x80000000 + +#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ + +#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ + +#define E1000_IOVCTL 0x05BBC +#define E1000_IOVCTL_REUSE_VFQ 0x00000001 + +#define E1000_RPLOLR_STRVLAN 0x40000000 +#define E1000_RPLOLR_STRCRC 0x80000000 + +#define E1000_TCTL_EXT_COLD 0x000FFC00 +#define E1000_TCTL_EXT_COLD_SHIFT 10 + +#define E1000_DTXCTL_8023LL 0x0004 +#define E1000_DTXCTL_VLAN_ADDED 0x0008 +#define E1000_DTXCTL_OOS_ENABLE 0x0010 +#define E1000_DTXCTL_MDP_EN 0x0020 +#define E1000_DTXCTL_SPOOF_INT 0x0040 + +#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14) + +#define ALL_QUEUES 0xFFFF + +/* Rx packet buffer size defines */ +#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F +void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable); +void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf); +void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable); +s32 e1000_init_nvm_params_82575(struct e1000_hw *hw); +s32 e1000_init_hw_82575(struct e1000_hw *hw); + +u16 e1000_rxpbs_adjust_82580(u32 data); +s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data); +s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M); +s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M); +s32 e1000_get_eee_status_i354(struct e1000_hw *, bool *); +s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw); +#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8 +#define E1000_EMC_INTERNAL_DATA 0x00 +#define E1000_EMC_INTERNAL_THERM_LIMIT 0x20 +#define E1000_EMC_DIODE1_DATA 0x01 +#define E1000_EMC_DIODE1_THERM_LIMIT 0x19 +#define E1000_EMC_DIODE2_DATA 0x23 +#define E1000_EMC_DIODE2_THERM_LIMIT 0x1A +#define E1000_EMC_DIODE3_DATA 0x2A +#define E1000_EMC_DIODE3_THERM_LIMIT 0x30 + +s32 e1000_get_thermal_sensor_data_generic(struct e1000_hw *hw); +s32 e1000_init_thermal_sensor_thresh_generic(struct e1000_hw *hw); + +/* I2C SDA and SCL timing parameters for standard mode */ +#define E1000_I2C_T_HD_STA 4 +#define E1000_I2C_T_LOW 5 +#define E1000_I2C_T_HIGH 4 +#define E1000_I2C_T_SU_STA 5 +#define E1000_I2C_T_HD_DATA 5 +#define E1000_I2C_T_SU_DATA 1 +#define E1000_I2C_T_RISE 1 +#define E1000_I2C_T_FALL 1 +#define E1000_I2C_T_SU_STO 4 +#define E1000_I2C_T_BUF 5 + +s32 e1000_set_i2c_bb(struct e1000_hw *hw); +s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +void e1000_i2c_bus_clear(struct e1000_hw *hw); +#endif /* _E1000_82575_H_ */ diff --git a/drivers/staging/igb_avb/e1000_api.c b/drivers/staging/igb_avb/e1000_api.c new file mode 100644 index 0000000000000..87bccbd19fc66 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_api.c @@ -0,0 +1,1160 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000_api.h" + +/** + * e1000_init_mac_params - Initialize MAC function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the MAC + * set of functions. Called by drivers or by e1000_setup_init_funcs. + **/ +s32 e1000_init_mac_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->mac.ops.init_params) { + ret_val = hw->mac.ops.init_params(hw); + if (ret_val) { + DEBUGOUT("MAC Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("mac.init_mac_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * e1000_init_nvm_params - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the NVM + * set of functions. Called by drivers or by e1000_setup_init_funcs. + **/ +s32 e1000_init_nvm_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->nvm.ops.init_params) { + ret_val = hw->nvm.ops.init_params(hw); + if (ret_val) { + DEBUGOUT("NVM Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("nvm.init_nvm_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * e1000_init_phy_params - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the PHY + * set of functions. Called by drivers or by e1000_setup_init_funcs. + **/ +s32 e1000_init_phy_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->phy.ops.init_params) { + ret_val = hw->phy.ops.init_params(hw); + if (ret_val) { + DEBUGOUT("PHY Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("phy.init_phy_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * e1000_init_mbx_params - Initialize mailbox function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the PHY + * set of functions. Called by drivers or by e1000_setup_init_funcs. + **/ +s32 e1000_init_mbx_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->mbx.ops.init_params) { + ret_val = hw->mbx.ops.init_params(hw); + if (ret_val) { + DEBUGOUT("Mailbox Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("mbx.init_mbx_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * e1000_set_mac_type - Sets MAC type + * @hw: pointer to the HW structure + * + * This function sets the mac type of the adapter based on the + * device ID stored in the hw structure. + * MUST BE FIRST FUNCTION CALLED (explicitly or through + * e1000_setup_init_funcs()). + **/ +s32 e1000_set_mac_type(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_set_mac_type"); + + switch (hw->device_id) { + case E1000_DEV_ID_82575EB_COPPER: + case E1000_DEV_ID_82575EB_FIBER_SERDES: + case E1000_DEV_ID_82575GB_QUAD_COPPER: + mac->type = e1000_82575; + break; + case E1000_DEV_ID_82576: + case E1000_DEV_ID_82576_FIBER: + case E1000_DEV_ID_82576_SERDES: + case E1000_DEV_ID_82576_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: + case E1000_DEV_ID_82576_NS: + case E1000_DEV_ID_82576_NS_SERDES: + case E1000_DEV_ID_82576_SERDES_QUAD: + mac->type = e1000_82576; + break; + case E1000_DEV_ID_82580_COPPER: + case E1000_DEV_ID_82580_FIBER: + case E1000_DEV_ID_82580_SERDES: + case E1000_DEV_ID_82580_SGMII: + case E1000_DEV_ID_82580_COPPER_DUAL: + case E1000_DEV_ID_82580_QUAD_FIBER: + case E1000_DEV_ID_DH89XXCC_SGMII: + case E1000_DEV_ID_DH89XXCC_SERDES: + case E1000_DEV_ID_DH89XXCC_BACKPLANE: + case E1000_DEV_ID_DH89XXCC_SFP: + mac->type = e1000_82580; + break; + case E1000_DEV_ID_I350_COPPER: + case E1000_DEV_ID_I350_FIBER: + case E1000_DEV_ID_I350_SERDES: + case E1000_DEV_ID_I350_SGMII: + case E1000_DEV_ID_I350_DA4: + mac->type = e1000_i350; + break; + case E1000_DEV_ID_I210_COPPER_FLASHLESS: + case E1000_DEV_ID_I210_SERDES_FLASHLESS: + case E1000_DEV_ID_I210_COPPER: + case E1000_DEV_ID_I210_COPPER_OEM1: + case E1000_DEV_ID_I210_COPPER_IT: + case E1000_DEV_ID_I210_FIBER: + case E1000_DEV_ID_I210_SERDES: + case E1000_DEV_ID_I210_SGMII: + case E1000_DEV_ID_I210_AUTOMOTIVE: + mac->type = e1000_i210; + break; + case E1000_DEV_ID_I211_COPPER: + mac->type = e1000_i211; + break; + + case E1000_DEV_ID_I354_BACKPLANE_1GBPS: + case E1000_DEV_ID_I354_SGMII: + case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS: + mac->type = e1000_i354; + break; + default: + /* Should never have loaded on this device */ + ret_val = -E1000_ERR_MAC_INIT; + break; + } + + return ret_val; +} + +/** + * e1000_setup_init_funcs - Initializes function pointers + * @hw: pointer to the HW structure + * @init_device: true will initialize the rest of the function pointers + * getting the device ready for use. false will only set + * MAC type and the function pointers for the other init + * functions. Passing false will not generate any hardware + * reads or writes. + * + * This function must be called by a driver in order to use the rest + * of the 'shared' code files. Called by drivers only. + **/ +s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device) +{ + s32 ret_val; + + /* Can't do much good without knowing the MAC type. */ + ret_val = e1000_set_mac_type(hw); + if (ret_val) { + DEBUGOUT("ERROR: MAC type could not be set properly.\n"); + goto out; + } + + if (!hw->hw_addr) { + DEBUGOUT("ERROR: Registers not mapped\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + /* + * Init function pointers to generic implementations. We do this first + * allowing a driver module to override it afterward. + */ + e1000_init_mac_ops_generic(hw); + e1000_init_phy_ops_generic(hw); + e1000_init_nvm_ops_generic(hw); + e1000_init_mbx_ops_generic(hw); + + /* + * Set up the init function pointers. These are functions within the + * adapter family file that sets up function pointers for the rest of + * the functions in that family. + */ + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + case e1000_82580: + case e1000_i350: + case e1000_i354: + e1000_init_function_pointers_82575(hw); + break; + case e1000_i210: + case e1000_i211: + e1000_init_function_pointers_i210(hw); + break; + default: + DEBUGOUT("Hardware not supported\n"); + ret_val = -E1000_ERR_CONFIG; + break; + } + + /* + * Initialize the rest of the function pointers. These require some + * register reads/writes in some cases. + */ + if (!(ret_val) && init_device) { + ret_val = e1000_init_mac_params(hw); + if (ret_val) + goto out; + + ret_val = e1000_init_nvm_params(hw); + if (ret_val) + goto out; + + ret_val = e1000_init_phy_params(hw); + if (ret_val) + goto out; + + ret_val = e1000_init_mbx_params(hw); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_get_bus_info - Obtain bus information for adapter + * @hw: pointer to the HW structure + * + * This will obtain information about the HW bus for which the + * adapter is attached and stores it in the hw structure. This is a + * function pointer entry point called by drivers. + **/ +s32 e1000_get_bus_info(struct e1000_hw *hw) +{ + if (hw->mac.ops.get_bus_info) + return hw->mac.ops.get_bus_info(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_clear_vfta - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * This clears the VLAN filter table on the adapter. This is a function + * pointer entry point called by drivers. + **/ +void e1000_clear_vfta(struct e1000_hw *hw) +{ + if (hw->mac.ops.clear_vfta) + hw->mac.ops.clear_vfta(hw); +} + +/** + * e1000_write_vfta - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: the 32-bit offset in which to write the value to. + * @value: the 32-bit value to write at location offset. + * + * This writes a 32-bit value to a 32-bit offset in the VLAN filter + * table. This is a function pointer entry point called by drivers. + **/ +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + if (hw->mac.ops.write_vfta) + hw->mac.ops.write_vfta(hw, offset, value); +} + +/** + * e1000_update_mc_addr_list - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates the Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count) +{ + if (hw->mac.ops.update_mc_addr_list) + hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, + mc_addr_count); +} + +/** + * e1000_force_mac_fc - Force MAC flow control + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Currently no func pointer exists + * and all implementations are handled in the generic version of this + * function. + **/ +s32 e1000_force_mac_fc(struct e1000_hw *hw) +{ + return e1000_force_mac_fc_generic(hw); +} + +/** + * e1000_check_for_link - Check/Store link connection + * @hw: pointer to the HW structure + * + * This checks the link condition of the adapter and stores the + * results in the hw->mac structure. This is a function pointer entry + * point called by drivers. + **/ +s32 e1000_check_for_link(struct e1000_hw *hw) +{ + if (hw->mac.ops.check_for_link) + return hw->mac.ops.check_for_link(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_check_mng_mode - Check management mode + * @hw: pointer to the HW structure + * + * This checks if the adapter has manageability enabled. + * This is a function pointer entry point called by drivers. + **/ +bool e1000_check_mng_mode(struct e1000_hw *hw) +{ + if (hw->mac.ops.check_mng_mode) + return hw->mac.ops.check_mng_mode(hw); + + return false; +} + +/** + * e1000_mng_write_dhcp_info - Writes DHCP info to host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface + * @length: size of the buffer + * + * Writes the DHCP information to the host interface. + **/ +s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length) +{ + return e1000_mng_write_dhcp_info_generic(hw, buffer, length); +} + +/** + * e1000_reset_hw - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a function pointer + * entry point called by drivers. + **/ +s32 e1000_reset_hw(struct e1000_hw *hw) +{ + if (hw->mac.ops.reset_hw) + return hw->mac.ops.reset_hw(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_init_hw - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. This is a function + * pointer entry point called by drivers. + **/ +s32 e1000_init_hw(struct e1000_hw *hw) +{ + if (hw->mac.ops.init_hw) + return hw->mac.ops.init_hw(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_setup_link - Configures link and flow control + * @hw: pointer to the HW structure + * + * This configures link and flow control settings for the adapter. This + * is a function pointer entry point called by drivers. While modules can + * also call this, they probably call their own version of this function. + **/ +s32 e1000_setup_link(struct e1000_hw *hw) +{ + if (hw->mac.ops.setup_link) + return hw->mac.ops.setup_link(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_get_speed_and_duplex - Returns current speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to a 16-bit value to store the speed + * @duplex: pointer to a 16-bit value to store the duplex. + * + * This returns the speed and duplex of the adapter in the two 'out' + * variables passed in. This is a function pointer entry point called + * by drivers. + **/ +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + if (hw->mac.ops.get_link_up_info) + return hw->mac.ops.get_link_up_info(hw, speed, duplex); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_setup_led - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use and saves the current state + * of the LED so it can be later restored. This is a function pointer entry + * point called by drivers. + **/ +s32 e1000_setup_led(struct e1000_hw *hw) +{ + if (hw->mac.ops.setup_led) + return hw->mac.ops.setup_led(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_cleanup_led - Restores SW controllable LED + * @hw: pointer to the HW structure + * + * This restores the SW controllable LED to the value saved off by + * e1000_setup_led. This is a function pointer entry point called by drivers. + **/ +s32 e1000_cleanup_led(struct e1000_hw *hw) +{ + if (hw->mac.ops.cleanup_led) + return hw->mac.ops.cleanup_led(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_blink_led - Blink SW controllable LED + * @hw: pointer to the HW structure + * + * This starts the adapter LED blinking. Request the LED to be setup first + * and cleaned up after. This is a function pointer entry point called by + * drivers. + **/ +s32 e1000_blink_led(struct e1000_hw *hw) +{ + if (hw->mac.ops.blink_led) + return hw->mac.ops.blink_led(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_id_led_init - store LED configurations in SW + * @hw: pointer to the HW structure + * + * Initializes the LED config in SW. This is a function pointer entry point + * called by drivers. + **/ +s32 e1000_id_led_init(struct e1000_hw *hw) +{ + if (hw->mac.ops.id_led_init) + return hw->mac.ops.id_led_init(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_led_on - Turn on SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED on. This is a function pointer entry point + * called by drivers. + **/ +s32 e1000_led_on(struct e1000_hw *hw) +{ + if (hw->mac.ops.led_on) + return hw->mac.ops.led_on(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_led_off - Turn off SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED off. This is a function pointer entry point + * called by drivers. + **/ +s32 e1000_led_off(struct e1000_hw *hw) +{ + if (hw->mac.ops.led_off) + return hw->mac.ops.led_off(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_reset_adaptive - Reset adaptive IFS + * @hw: pointer to the HW structure + * + * Resets the adaptive IFS. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void e1000_reset_adaptive(struct e1000_hw *hw) +{ + e1000_reset_adaptive_generic(hw); +} + +/** + * e1000_update_adaptive - Update adaptive IFS + * @hw: pointer to the HW structure + * + * Updates adapter IFS. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void e1000_update_adaptive(struct e1000_hw *hw) +{ + e1000_update_adaptive_generic(hw); +} + +/** + * e1000_disable_pcie_master - Disable PCI-Express master access + * @hw: pointer to the HW structure + * + * Disables PCI-Express master access and verifies there are no pending + * requests. Currently no func pointer exists and all implementations are + * handled in the generic version of this function. + **/ +s32 e1000_disable_pcie_master(struct e1000_hw *hw) +{ + return e1000_disable_pcie_master_generic(hw); +} + +/** + * e1000_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. + **/ +void e1000_config_collision_dist(struct e1000_hw *hw) +{ + if (hw->mac.ops.config_collision_dist) + hw->mac.ops.config_collision_dist(hw); +} + +/** + * e1000_rar_set - Sets a receive address register + * @hw: pointer to the HW structure + * @addr: address to set the RAR to + * @index: the RAR to set + * + * Sets a Receive Address Register (RAR) to the specified address. + **/ +int e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + if (hw->mac.ops.rar_set) + return hw->mac.ops.rar_set(hw, addr, index); + + return E1000_SUCCESS; +} + +/** + * e1000_validate_mdi_setting - Ensures valid MDI/MDIX SW state + * @hw: pointer to the HW structure + * + * Ensures that the MDI/MDIX SW state is valid. + **/ +s32 e1000_validate_mdi_setting(struct e1000_hw *hw) +{ + if (hw->mac.ops.validate_mdi_setting) + return hw->mac.ops.validate_mdi_setting(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_hash_mc_addr - Determines address location in multicast table + * @hw: pointer to the HW structure + * @mc_addr: Multicast address to hash. + * + * This hashes an address to determine its location in the multicast + * table. Currently no func pointer exists and all implementations + * are handled in the generic version of this function. + **/ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + return e1000_hash_mc_addr_generic(hw, mc_addr); +} + +/** + * e1000_enable_tx_pkt_filtering - Enable packet filtering on TX + * @hw: pointer to the HW structure + * + * Enables packet filtering on transmit packets if manageability is enabled + * and host interface is enabled. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw) +{ + return e1000_enable_tx_pkt_filtering_generic(hw); +} + +/** + * e1000_mng_host_if_write - Writes to the manageability host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface buffer + * @length: size of the buffer + * @offset: location in the buffer to write to + * @sum: sum of the data (not checksum) + * + * This function writes the buffer content at the offset given on the host if. + * It also does alignment considerations to do the writes in most efficient + * way. Also fills up the sum of the buffer in *buffer parameter. + **/ +s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, + u16 offset, u8 *sum) +{ + return e1000_mng_host_if_write_generic(hw, buffer, length, offset, sum); +} + +/** + * e1000_mng_write_cmd_header - Writes manageability command header + * @hw: pointer to the HW structure + * @hdr: pointer to the host interface command header + * + * Writes the command header after does the checksum calculation. + **/ +s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr) +{ + return e1000_mng_write_cmd_header_generic(hw, hdr); +} + +/** + * e1000_mng_enable_host_if - Checks host interface is enabled + * @hw: pointer to the HW structure + * + * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND + * + * This function checks whether the HOST IF is enabled for command operation + * and also checks whether the previous command is completed. It busy waits + * in case of previous command is not completed. + **/ +s32 e1000_mng_enable_host_if(struct e1000_hw *hw) +{ + return e1000_mng_enable_host_if_generic(hw); +} + +/** + * e1000_check_reset_block - Verifies PHY can be reset + * @hw: pointer to the HW structure + * + * Checks if the PHY is in a state that can be reset or if manageability + * has it tied up. This is a function pointer entry point called by drivers. + **/ +s32 e1000_check_reset_block(struct e1000_hw *hw) +{ + if (hw->phy.ops.check_reset_block) + return hw->phy.ops.check_reset_block(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_read_phy_reg - Reads PHY register + * @hw: pointer to the HW structure + * @offset: the register to read + * @data: the buffer to store the 16-bit read. + * + * Reads the PHY register and returns the value in data. + * This is a function pointer entry point called by drivers. + **/ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + if (hw->phy.ops.read_reg) + return hw->phy.ops.read_reg(hw, offset, data); + + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg - Writes PHY register + * @hw: pointer to the HW structure + * @offset: the register to write + * @data: the value to write. + * + * Writes the PHY register at offset with the value in data. + * This is a function pointer entry point called by drivers. + **/ +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + if (hw->phy.ops.write_reg) + return hw->phy.ops.write_reg(hw, offset, data); + + return E1000_SUCCESS; +} + +/** + * e1000_release_phy - Generic release PHY + * @hw: pointer to the HW structure + * + * Return if silicon family does not require a semaphore when accessing the + * PHY. + **/ +void e1000_release_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.release) + hw->phy.ops.release(hw); +} + +/** + * e1000_acquire_phy - Generic acquire PHY + * @hw: pointer to the HW structure + * + * Return success if silicon family does not require a semaphore when + * accessing the PHY. + **/ +s32 e1000_acquire_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.acquire) + return hw->phy.ops.acquire(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_read_kmrn_reg - Reads register using Kumeran interface + * @hw: pointer to the HW structure + * @offset: the register to read + * @data: the location to store the 16-bit value read. + * + * Reads a register out of the Kumeran interface. Currently no func pointer + * exists and all implementations are handled in the generic version of + * this function. + **/ +s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return e1000_read_kmrn_reg_generic(hw, offset, data); +} + +/** + * e1000_write_kmrn_reg - Writes register using Kumeran interface + * @hw: pointer to the HW structure + * @offset: the register to write + * @data: the value to write. + * + * Writes a register to the Kumeran interface. Currently no func pointer + * exists and all implementations are handled in the generic version of + * this function. + **/ +s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + return e1000_write_kmrn_reg_generic(hw, offset, data); +} + +/** + * e1000_get_cable_length - Retrieves cable length estimation + * @hw: pointer to the HW structure + * + * This function estimates the cable length and stores them in + * hw->phy.min_length and hw->phy.max_length. This is a function pointer + * entry point called by drivers. + **/ +s32 e1000_get_cable_length(struct e1000_hw *hw) +{ + if (hw->phy.ops.get_cable_length) + return hw->phy.ops.get_cable_length(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_info - Retrieves PHY information from registers + * @hw: pointer to the HW structure + * + * This function gets some information from various PHY registers and + * populates hw->phy values with it. This is a function pointer entry + * point called by drivers. + **/ +s32 e1000_get_phy_info(struct e1000_hw *hw) +{ + if (hw->phy.ops.get_info) + return hw->phy.ops.get_info(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_phy_hw_reset - Hard PHY reset + * @hw: pointer to the HW structure + * + * Performs a hard PHY reset. This is a function pointer entry point called + * by drivers. + **/ +s32 e1000_phy_hw_reset(struct e1000_hw *hw) +{ + if (hw->phy.ops.reset) + return hw->phy.ops.reset(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_phy_commit - Soft PHY reset + * @hw: pointer to the HW structure + * + * Performs a soft PHY reset on those that apply. This is a function pointer + * entry point called by drivers. + **/ +s32 e1000_phy_commit(struct e1000_hw *hw) +{ + if (hw->phy.ops.commit) + return hw->phy.ops.commit(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_set_d0_lplu_state - Sets low power link up state for D0 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D0 + * and SmartSpeed is disabled when active is true, else clear lplu for D0 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. This is a function pointer entry point called by drivers. + **/ +s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) +{ + if (hw->phy.ops.set_d0_lplu_state) + return hw->phy.ops.set_d0_lplu_state(hw, active); + + return E1000_SUCCESS; +} + +/** + * e1000_set_d3_lplu_state - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. This is a function pointer entry point called by drivers. + **/ +s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + if (hw->phy.ops.set_d3_lplu_state) + return hw->phy.ops.set_d3_lplu_state(hw, active); + + return E1000_SUCCESS; +} + +/** + * e1000_read_mac_addr - Reads MAC address + * @hw: pointer to the HW structure + * + * Reads the MAC address out of the adapter and stores it in the HW structure. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +s32 e1000_read_mac_addr(struct e1000_hw *hw) +{ + if (hw->mac.ops.read_mac_addr) + return hw->mac.ops.read_mac_addr(hw); + + return e1000_read_mac_addr_generic(hw); +} + +/** + * e1000_read_pba_string - Read device part number string + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size) +{ + return e1000_read_pba_string_generic(hw, pba_num, pba_num_size); +} + +/** + * e1000_read_pba_length - Read device part number string length + * @hw: pointer to the HW structure + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number length from the EEPROM and + * stores the value in pba_num. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size) +{ + return e1000_read_pba_length_generic(hw, pba_num_size); +} + +/** + * e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum + * @hw: pointer to the HW structure + * + * Validates the NVM checksum is correct. This is a function pointer entry + * point called by drivers. + **/ +s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) +{ + if (hw->nvm.ops.validate) + return hw->nvm.ops.validate(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_update_nvm_checksum - Updates NVM (EEPROM) checksum + * @hw: pointer to the HW structure + * + * Updates the NVM checksum. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +s32 e1000_update_nvm_checksum(struct e1000_hw *hw) +{ + if (hw->nvm.ops.update) + return hw->nvm.ops.update(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_reload_nvm - Reloads EEPROM + * @hw: pointer to the HW structure + * + * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the + * extended control register. + **/ +void e1000_reload_nvm(struct e1000_hw *hw) +{ + if (hw->nvm.ops.reload) + hw->nvm.ops.reload(hw); +} + +/** + * e1000_read_nvm - Reads NVM (EEPROM) + * @hw: pointer to the HW structure + * @offset: the word offset to read + * @words: number of 16-bit words to read + * @data: pointer to the properly sized buffer for the data. + * + * Reads 16-bit chunks of data from the NVM (EEPROM). This is a function + * pointer entry point called by drivers. + **/ +s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + if (hw->nvm.ops.read) + return hw->nvm.ops.read(hw, offset, words, data); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_write_nvm - Writes to NVM (EEPROM) + * @hw: pointer to the HW structure + * @offset: the word offset to read + * @words: number of 16-bit words to write + * @data: pointer to the properly sized buffer for the data. + * + * Writes 16-bit chunks of data to the NVM (EEPROM). This is a function + * pointer entry point called by drivers. + **/ +s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + if (hw->nvm.ops.write) + return hw->nvm.ops.write(hw, offset, words, data); + + return E1000_SUCCESS; +} + +/** + * e1000_write_8bit_ctrl_reg - Writes 8bit Control register + * @hw: pointer to the HW structure + * @reg: 32bit register offset + * @offset: the register to write + * @data: the value to write. + * + * Writes the PHY register at offset with the value in data. + * This is a function pointer entry point called by drivers. + **/ +s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset, + u8 data) +{ + return e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data); +} + +/** + * e1000_power_up_phy - Restores link in case of PHY power down + * @hw: pointer to the HW structure + * + * The phy may be powered down to save power, to turn off link when the + * driver is unloaded, or wake on lan is not enabled (among others). + **/ +void e1000_power_up_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.power_up) + hw->phy.ops.power_up(hw); + + e1000_setup_link(hw); +} + +/** + * e1000_power_down_phy - Power down PHY + * @hw: pointer to the HW structure + * + * The phy may be powered down to save power, to turn off link when the + * driver is unloaded, or wake on lan is not enabled (among others). + **/ +void e1000_power_down_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.power_down) + hw->phy.ops.power_down(hw); +} + +/** + * e1000_power_up_fiber_serdes_link - Power up serdes link + * @hw: pointer to the HW structure + * + * Power on the optics and PCS. + **/ +void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw) +{ + if (hw->mac.ops.power_up_serdes) + hw->mac.ops.power_up_serdes(hw); +} + +/** + * e1000_shutdown_fiber_serdes_link - Remove link during power down + * @hw: pointer to the HW structure + * + * Shutdown the optics and PCS on driver unload. + **/ +void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw) +{ + if (hw->mac.ops.shutdown_serdes) + hw->mac.ops.shutdown_serdes(hw); +} + +/** + * e1000_get_thermal_sensor_data - Gathers thermal sensor data + * @hw: pointer to hardware structure + * + * Updates the temperatures in mac.thermal_sensor_data + **/ +s32 e1000_get_thermal_sensor_data(struct e1000_hw *hw) +{ + if (hw->mac.ops.get_thermal_sensor_data) + return hw->mac.ops.get_thermal_sensor_data(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_init_thermal_sensor_thresh - Sets thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Sets the thermal sensor thresholds according to the NVM map + **/ +s32 e1000_init_thermal_sensor_thresh(struct e1000_hw *hw) +{ + if (hw->mac.ops.init_thermal_sensor_thresh) + return hw->mac.ops.init_thermal_sensor_thresh(hw); + + return E1000_SUCCESS; +} + diff --git a/drivers/staging/igb_avb/e1000_api.h b/drivers/staging/igb_avb/e1000_api.h new file mode 100644 index 0000000000000..32fce254685ae --- /dev/null +++ b/drivers/staging/igb_avb/e1000_api.h @@ -0,0 +1,152 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_API_H_ +#define _E1000_API_H_ + +#include "e1000_hw.h" + +extern void e1000_init_function_pointers_82575(struct e1000_hw *hw); +extern void e1000_rx_fifo_flush_82575(struct e1000_hw *hw); +extern void e1000_init_function_pointers_vf(struct e1000_hw *hw); +extern void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw); +extern void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw); +extern void e1000_init_function_pointers_i210(struct e1000_hw *hw); + +s32 e1000_set_obff_timer(struct e1000_hw *hw, u32 itr); +s32 e1000_set_mac_type(struct e1000_hw *hw); +s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device); +s32 e1000_init_mac_params(struct e1000_hw *hw); +s32 e1000_init_nvm_params(struct e1000_hw *hw); +s32 e1000_init_phy_params(struct e1000_hw *hw); +s32 e1000_init_mbx_params(struct e1000_hw *hw); +s32 e1000_get_bus_info(struct e1000_hw *hw); +void e1000_clear_vfta(struct e1000_hw *hw); +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); +s32 e1000_force_mac_fc(struct e1000_hw *hw); +s32 e1000_check_for_link(struct e1000_hw *hw); +s32 e1000_reset_hw(struct e1000_hw *hw); +s32 e1000_init_hw(struct e1000_hw *hw); +s32 e1000_setup_link(struct e1000_hw *hw); +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex); +s32 e1000_disable_pcie_master(struct e1000_hw *hw); +void e1000_config_collision_dist(struct e1000_hw *hw); +int e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr); +void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count); +s32 e1000_setup_led(struct e1000_hw *hw); +s32 e1000_cleanup_led(struct e1000_hw *hw); +s32 e1000_check_reset_block(struct e1000_hw *hw); +s32 e1000_blink_led(struct e1000_hw *hw); +s32 e1000_led_on(struct e1000_hw *hw); +s32 e1000_led_off(struct e1000_hw *hw); +s32 e1000_id_led_init(struct e1000_hw *hw); +void e1000_reset_adaptive(struct e1000_hw *hw); +void e1000_update_adaptive(struct e1000_hw *hw); +s32 e1000_get_cable_length(struct e1000_hw *hw); +s32 e1000_validate_mdi_setting(struct e1000_hw *hw); +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset, + u8 data); +s32 e1000_get_phy_info(struct e1000_hw *hw); +void e1000_release_phy(struct e1000_hw *hw); +s32 e1000_acquire_phy(struct e1000_hw *hw); +s32 e1000_phy_hw_reset(struct e1000_hw *hw); +s32 e1000_phy_commit(struct e1000_hw *hw); +void e1000_power_up_phy(struct e1000_hw *hw); +void e1000_power_down_phy(struct e1000_hw *hw); +s32 e1000_read_mac_addr(struct e1000_hw *hw); +s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size); +s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size); +void e1000_reload_nvm(struct e1000_hw *hw); +s32 e1000_update_nvm_checksum(struct e1000_hw *hw); +s32 e1000_validate_nvm_checksum(struct e1000_hw *hw); +s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); +s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); +bool e1000_check_mng_mode(struct e1000_hw *hw); +bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); +s32 e1000_mng_enable_host_if(struct e1000_hw *hw); +s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, + u16 offset, u8 *sum); +s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr); +s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length); +s32 e1000_get_thermal_sensor_data(struct e1000_hw *hw); +s32 e1000_init_thermal_sensor_thresh(struct e1000_hw *hw); + +/* + * TBI_ACCEPT macro definition: + * + * This macro requires: + * a = a pointer to struct e1000_hw + * status = the 8 bit status field of the Rx descriptor with EOP set + * errors = the 8 bit error field of the Rx descriptor with EOP set + * length = the sum of all the length fields of the Rx descriptors that + * make up the current frame + * last_byte = the last byte of the frame DMAed by the hardware + * min_frame_size = the minimum frame length we want to accept. + * max_frame_size = the maximum frame length we want to accept. + * + * This macro is a conditional that should be used in the interrupt + * handler's Rx processing routine when RxErrors have been detected. + * + * Typical use: + * ... + * if (TBI_ACCEPT) { + * accept_frame = true; + * e1000_tbi_adjust_stats(adapter, MacAddress); + * frame_length--; + * } else { + * accept_frame = false; + * } + * ... + */ + +/* The carrier extension symbol, as received by the NIC. */ +#define CARRIER_EXTENSION 0x0F + +#define TBI_ACCEPT(a, status, errors, length, last_byte, \ + min_frame_size, max_frame_size) \ + (e1000_tbi_sbp_enabled_82543(a) && \ + (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \ + ((last_byte) == CARRIER_EXTENSION) && \ + (((status) & E1000_RXD_STAT_VP) ? \ + (((length) > ((min_frame_size) - VLAN_TAG_SIZE)) && \ + ((length) <= ((max_frame_size) + 1))) : \ + (((length) > (min_frame_size)) && \ + ((length) <= ((max_frame_size) + VLAN_TAG_SIZE + 1))))) + +#ifndef E1000_MAX +#define E1000_MAX(a, b) ((a) > (b) ? (a) : (b)) +#endif +#ifndef E1000_DIVIDE_ROUND_UP +#define E1000_DIVIDE_ROUND_UP(a, b) (((a) + (b) - 1) / (b)) /* ceil(a/b) */ +#endif +#endif /* _E1000_API_H_ */ diff --git a/drivers/staging/igb_avb/e1000_defines.h b/drivers/staging/igb_avb/e1000_defines.h new file mode 100644 index 0000000000000..4022e22be7754 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_defines.h @@ -0,0 +1,1486 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_DEFINES_H_ +#define _E1000_DEFINES_H_ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ +#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ + +/* Wake Up Status */ +#define E1000_WUS_LNKC E1000_WUFC_LNKC +#define E1000_WUS_MAG E1000_WUFC_MAG +#define E1000_WUS_EX E1000_WUFC_EX +#define E1000_WUS_MC E1000_WUFC_MC +#define E1000_WUS_BC E1000_WUFC_BC + +/* Extended Device Control */ +#define E1000_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */ +#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* SW Definable Pin 4 data */ +#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* SW Definable Pin 6 data */ +#define E1000_CTRL_EXT_SDP2_DATA 0x00000040 /* SW Definable Pin 2 data */ +#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* SW Definable Pin 3 data */ +#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ +#define E1000_CTRL_EXT_SDP2_DIR 0x00000400 /* Direction of SDP2 0=in 1=out */ +#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* Direction of SDP3 0=in 1=out */ +#define E1000_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +/* Physical Func Reset Done Indication */ +#define E1000_CTRL_EXT_PFRSTD 0x00004000 +#define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */ +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clk Gating */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +/* Offset of the link mode field in Ctrl Ext register */ +#define E1000_CTRL_EXT_LINK_MODE_OFFSET 22 +#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_EIAME 0x01000000 +#define E1000_CTRL_EXT_IRCA 0x00000001 +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */ +#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define E1000_CTRL_EXT_PHYPDEN 0x00100000 +#define E1000_I2CCMD_REG_ADDR_SHIFT 16 +#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 +#define E1000_I2CCMD_OPCODE_READ 0x08000000 +#define E1000_I2CCMD_OPCODE_WRITE 0x00000000 +#define E1000_I2CCMD_READY 0x20000000 +#define E1000_I2CCMD_ERROR 0x80000000 +#define E1000_I2CCMD_SFP_DATA_ADDR(a) (0x0000 + (a)) +#define E1000_I2CCMD_SFP_DIAG_ADDR(a) (0x0100 + (a)) +#define E1000_MAX_SGMII_PHY_REG_ADDR 255 +#define E1000_I2CCMD_PHY_TIMEOUT 200 +#define E1000_IVAR_VALID 0x80 +#define E1000_GPIE_NSICR 0x00000001 +#define E1000_GPIE_MSIX_MODE 0x00000010 +#define E1000_GPIE_EIAME 0x40000000 +#define E1000_GPIE_PBA 0x80000000 + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ +#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define E1000_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ + +#define E1000_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */ +#define E1000_RXDEXT_STATERR_LB 0x00040000 +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +/* mask to determine if packets should be dropped due to frame errors */ +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +/* Enable MAC address filtering */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 +/* Enable MNG packets to host memory */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 + +#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */ +#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */ +#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */ +#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */ + +/* Receive Control */ +#define E1000_RCTL_RST 0x00000001 /* Software reset */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promisc enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promisc enable */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */ +#define E1000_RCTL_RDMTS_HEX 0x00010000 +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SWFW_SYNC Definitions */ +#define E1000_SWFW_EEP_SM 0x01 +#define E1000_SWFW_PHY0_SM 0x02 +#define E1000_SWFW_PHY1_SM 0x04 +#define E1000_SWFW_CSR_SM 0x08 +#define E1000_SWFW_PHY2_SM 0x20 +#define E1000_SWFW_PHY3_SM 0x40 +#define E1000_SWFW_SW_MNG_SM 0x400 + +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master reqs */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ +#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ +#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ +#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */ + +#define E1000_CONNSW_ENRGSRC 0x4 +#define E1000_CONNSW_PHYSD 0x400 +#define E1000_CONNSW_PHY_PDN 0x800 +#define E1000_CONNSW_SERDESD 0x200 +#define E1000_CONNSW_AUTOSENSE_CONF 0x2 +#define E1000_CONNSW_AUTOSENSE_EN 0x1 +#define E1000_PCS_CFG_PCS_EN 8 +#define E1000_PCS_LCTL_FLV_LINK_UP 1 +#define E1000_PCS_LCTL_FSV_10 0 +#define E1000_PCS_LCTL_FSV_100 2 +#define E1000_PCS_LCTL_FSV_1000 4 +#define E1000_PCS_LCTL_FDV_FULL 8 +#define E1000_PCS_LCTL_FSD 0x10 +#define E1000_PCS_LCTL_FORCE_LINK 0x20 +#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 +#define E1000_PCS_LCTL_AN_ENABLE 0x10000 +#define E1000_PCS_LCTL_AN_RESTART 0x20000 +#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 +#define E1000_ENABLE_SERDES_LOOPBACK 0x0410 + +#define E1000_PCS_LSTS_LINK_OK 1 +#define E1000_PCS_LSTS_SPEED_100 2 +#define E1000_PCS_LSTS_SPEED_1000 4 +#define E1000_PCS_LSTS_DUPLEX_FULL 8 +#define E1000_PCS_LSTS_SYNK_OK 0x10 +#define E1000_PCS_LSTS_AN_COMPLETE 0x10000 + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Duplex 0=half 1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_SPEED_MASK 0x000000C0 +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Compltn by NVM */ +#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */ +#define E1000_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */ +#define E1000_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 + +/* 1000/H is not supported, nor spec-compliant. */ +#define E1000_ALL_SPEED_DUPLEX ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL | ADVERTISE_1000_FULL) +#define E1000_ALL_NOT_GIG ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL) +#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) +#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX + +/* LED Control */ +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_IVRT 0x00000040 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 + +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ +#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ + +/* Transmit Control */ +#define E1000_TCTL_EN 0x00000002 /* enable Tx */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ + +/* Transmit Arbitration Count */ +#define E1000_TARC0_ENABLE 0x00000400 /* Enable Tx Queue 0 */ + +/* SerDes Control */ +#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 +#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410 + +/* Receive Checksum Control */ +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Header split receive */ +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_ACK_DIS 0x00001000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 +#define E1000_RFCTL_LEF 0x00040000 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLD_SHIFT 12 + +/* Default values for the transmit IPG register */ +#define DEFAULT_82543_TIPG_IPGT_FIBER 9 +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 + +#define E1000_TIPG_IPGT_MASK 0x000003FF + +#define DEFAULT_82543_TIPG_IPGR1 8 +#define E1000_TIPG_IPGR1_SHIFT 10 + +#define DEFAULT_82543_TIPG_IPGR2 6 +#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7 +#define E1000_TIPG_IPGR2_SHIFT 20 + +/* Ethertype field values */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ + +#define ETHERNET_FCS_SIZE 4 +#define MAX_JUMBO_FRAME_SIZE 0x3F00 +#define E1000_TX_PTR_GAP 0x1F + +/* Extended Configuration Control and Size */ +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 +#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16 + +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 + +#define E1000_KABGTXD_BGSQLBIAS 0x00050000 + +/* PBA constants */ +#define E1000_PBA_8K 0x0008 /* 8KB */ +#define E1000_PBA_10K 0x000A /* 10KB */ +#define E1000_PBA_12K 0x000C /* 12KB */ +#define E1000_PBA_14K 0x000E /* 14KB */ +#define E1000_PBA_16K 0x0010 /* 16KB */ +#define E1000_PBA_18K 0x0012 +#define E1000_PBA_20K 0x0014 +#define E1000_PBA_22K 0x0016 +#define E1000_PBA_24K 0x0018 +#define E1000_PBA_26K 0x001A +#define E1000_PBA_30K 0x001E +#define E1000_PBA_32K 0x0020 +#define E1000_PBA_34K 0x0022 +#define E1000_PBA_35K 0x0023 +#define E1000_PBA_38K 0x0026 +#define E1000_PBA_40K 0x0028 +#define E1000_PBA_48K 0x0030 /* 48KB */ +#define E1000_PBA_64K 0x0040 /* 64KB */ + +#define E1000_PBA_RXA_MASK 0xFFFF + +#define E1000_PBS_16K E1000_PBA_16K + +/* Uncorrectable/correctable ECC Error counts and enable bits */ +#define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF +#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00 +#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8 +#define E1000_PBECCSTS_ECC_ENABLE 0x00010000 + +#define IFS_MAX 80 +#define IFS_MIN 40 +#define IFS_RATIO 4 +#define IFS_STEP 10 +#define MIN_NUM_XMITS 1000 + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ +#define E1000_ICR_RXO 0x00000040 /* Rx overrun */ +#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ +#define E1000_ICR_VMMB 0x00000100 /* VM MB event */ +#define E1000_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */ +#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ +#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ +#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ +#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ +#define E1000_ICR_TXD_LOW 0x00008000 +#define E1000_ICR_MNG 0x00040000 /* Manageability event */ +#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ +#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */ +#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ +/* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_INT_ASSERTED 0x80000000 +#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ +#define E1000_ICR_FER 0x00400000 /* Fatal Error */ + +#define E1000_ICR_THS 0x00800000 /* ICR.THS: Thermal Sensor Event*/ +#define E1000_ICR_MDDET 0x10000000 /* Malicious Driver Detect */ + +/* Extended Interrupt Cause Read */ +#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ +#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ +#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ +#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ +#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ +#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ +#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ +#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ +#define E1000_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ +#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ +/* TCP Timer */ +#define E1000_TCPTIMER_KS 0x00000100 /* KickStart */ +#define E1000_TCPTIMER_COUNT_ENABLE 0x00000200 /* Count Enable */ +#define E1000_TCPTIMER_COUNT_FINISH 0x00000400 /* Count finish */ +#define E1000_TCPTIMER_LOOP 0x00000800 /* Loop */ + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Tx desc written back */ +#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ +#define E1000_IMS_RXO E1000_ICR_RXO /* Rx overrun */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ +#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */ +#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */ +#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */ +#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ +#define E1000_IMS_FER E1000_ICR_FER /* Fatal Error */ + +#define E1000_IMS_THS E1000_ICR_THS /* ICR.TS: Thermal Sensor Event*/ +#define E1000_IMS_MDDET E1000_ICR_MDDET /* Malicious Driver Detect */ +/* Extended Interrupt Mask Set */ +#define E1000_EIMS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ +#define E1000_EIMS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ +#define E1000_EIMS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ +#define E1000_EIMS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ +#define E1000_EIMS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ +#define E1000_EIMS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ +#define E1000_EIMS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ +#define E1000_EIMS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ +#define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ +#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ + +/* Interrupt Cause Set */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ + +/* Extended Interrupt Cause Set */ +#define E1000_EICS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ +#define E1000_EICS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ +#define E1000_EICS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ +#define E1000_EICS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ +#define E1000_EICS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ +#define E1000_EICS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ +#define E1000_EICS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ +#define E1000_EICS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ +#define E1000_EICS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ +#define E1000_EICS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ + +#define E1000_EITR_ITR_INT_MASK 0x0000FFFF +/* E1000_EITR_CNT_IGNR is only for 82576 and newer */ +#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ +#define E1000_EITR_INTERVAL 0x00007FFC + +/* Transmit Descriptor Control */ +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ +#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ +#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ +/* Enable the counting of descriptors still to be processed. */ +#define E1000_TXDCTL_COUNT_DESC 0x00400000 + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* 802.1q VLAN Packet Size */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define E1000_RAR_ENTRIES 15 +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ +#define E1000_RAL_MAC_ADDR_LEN 4 +#define E1000_RAH_MAC_ADDR_LEN 2 +#define E1000_RAH_QUEUE_MASK_82575 0x000C0000 +#define E1000_RAH_POOL_1 0x00040000 + +/* Error Codes */ +#define E1000_SUCCESS 0 +#define E1000_ERR_NVM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 +#define E1000_ERR_SWFW_SYNC 13 +#define E1000_NOT_IMPLEMENTED 14 +#define E1000_ERR_MBX 15 +#define E1000_ERR_INVALID_ARGUMENT 16 +#define E1000_ERR_NO_SPACE 17 +#define E1000_ERR_NVM_PBA_SECTION 18 +#define E1000_ERR_I2C 19 +#define E1000_ERR_INVM_VALUE_NOT_FOUND 20 + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define FIBER_LINK_UP_LIMIT 50 +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 +#define PHY_FORCE_LIMIT 20 +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 +/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ +#define MDIO_OWNERSHIP_TIMEOUT 10 +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 + +/* Flow Control */ +#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ +#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +/* Transmit Configuration Word */ +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +/* Receive Configuration Word */ +#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ +#define E1000_RXCW_C 0x20000000 /* Receive config */ +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ + +#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ +#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ + +#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */ +#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ +#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 +#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */ +#define E1000_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */ + +#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF +#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 +#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03 +#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04 + +#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00 +#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300 +#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00 +#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00 +#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00 +#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00 + +#define E1000_TIMINCA_16NS_SHIFT 24 +#define E1000_TIMINCA_INCPERIOD_SHIFT 24 +#define E1000_TIMINCA_INCVALUE_MASK 0x00FFFFFF + +/* Time Sync Interrupt Cause/Mask Register Bits */ + +#define TSINTR_SYS_WRAP (1 << 0) /* SYSTIM Wrap around. */ +#define TSINTR_TXTS (1 << 1) /* Transmit Timestamp. */ +#define TSINTR_RXTS (1 << 2) /* Receive Timestamp. */ +#define TSINTR_TT0 (1 << 3) /* Target Time 0 Trigger. */ +#define TSINTR_TT1 (1 << 4) /* Target Time 1 Trigger. */ +#define TSINTR_AUTT0 (1 << 5) /* Auxiliary Timestamp 0 Taken. */ +#define TSINTR_AUTT1 (1 << 6) /* Auxiliary Timestamp 1 Taken. */ +#define TSINTR_TADJ (1 << 7) /* Time Adjust Done. */ + +#define TSYNC_INTERRUPTS TSINTR_TXTS +#define E1000_TSICR_TXTS TSINTR_TXTS + +/* TSAUXC Configuration Bits */ +#define TSAUXC_EN_TT0 (1 << 0) /* Enable target time 0. */ +#define TSAUXC_EN_TT1 (1 << 1) /* Enable target time 1. */ +#define TSAUXC_EN_CLK0 (1 << 2) /* Enable Configurable Frequency Clock 0. */ +#define TSAUXC_SAMP_AUT0 (1 << 3) /* Latch SYSTIML/H into AUXSTMPL/0. */ +#define TSAUXC_ST0 (1 << 4) /* Start Clock 0 Toggle on Target Time 0. */ +#define TSAUXC_EN_CLK1 (1 << 5) /* Enable Configurable Frequency Clock 1. */ +#define TSAUXC_SAMP_AUT1 (1 << 6) /* Latch SYSTIML/H into AUXSTMPL/1. */ +#define TSAUXC_ST1 (1 << 7) /* Start Clock 1 Toggle on Target Time 1. */ +#define TSAUXC_EN_TS0 (1 << 8) /* Enable hardware timestamp 0. */ +#define TSAUXC_AUTT0 (1 << 9) /* Auxiliary Timestamp Taken. */ +#define TSAUXC_EN_TS1 (1 << 10) /* Enable hardware timestamp 0. */ +#define TSAUXC_AUTT1 (1 << 11) /* Auxiliary Timestamp Taken. */ +#define TSAUXC_PLSG (1 << 17) /* Generate a pulse. */ +#define TSAUXC_DISABLE (1 << 31) /* Disable SYSTIM Count Operation. */ + +/* SDP Configuration Bits */ +#define AUX0_SEL_SDP0 (0 << 0) /* Assign SDP0 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP1 (1 << 0) /* Assign SDP1 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP2 (2 << 0) /* Assign SDP2 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP3 (3 << 0) /* Assign SDP3 to auxiliary time stamp 0. */ +#define AUX0_TS_SDP_EN (1 << 2) /* Enable auxiliary time stamp trigger 0. */ +#define AUX1_SEL_SDP0 (0 << 3) /* Assign SDP0 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP1 (1 << 3) /* Assign SDP1 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP2 (2 << 3) /* Assign SDP2 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP3 (3 << 3) /* Assign SDP3 to auxiliary time stamp 1. */ +#define AUX1_TS_SDP_EN (1 << 5) /* Enable auxiliary time stamp trigger 1. */ +#define TS_SDP0_SEL_TT0 (0 << 6) /* Target time 0 is output on SDP0. */ +#define TS_SDP0_SEL_TT1 (1 << 6) /* Target time 1 is output on SDP0. */ +#define TS_SDP0_SEL_FC0 (2 << 6) /* Freq clock 0 is output on SDP0. */ +#define TS_SDP0_SEL_FC1 (3 << 6) /* Freq clock 1 is output on SDP0. */ +#define TS_SDP0_EN (1 << 8) /* SDP0 is assigned to Tsync. */ +#define TS_SDP1_SEL_TT0 (0 << 9) /* Target time 0 is output on SDP1. */ +#define TS_SDP1_SEL_TT1 (1 << 9) /* Target time 1 is output on SDP1. */ +#define TS_SDP1_SEL_FC0 (2 << 9) /* Freq clock 0 is output on SDP1. */ +#define TS_SDP1_SEL_FC1 (3 << 9) /* Freq clock 1 is output on SDP1. */ +#define TS_SDP1_EN (1 << 11) /* SDP1 is assigned to Tsync. */ +#define TS_SDP2_SEL_TT0 (0 << 12) /* Target time 0 is output on SDP2. */ +#define TS_SDP2_SEL_TT1 (1 << 12) /* Target time 1 is output on SDP2. */ +#define TS_SDP2_SEL_FC0 (2 << 12) /* Freq clock 0 is output on SDP2. */ +#define TS_SDP2_SEL_FC1 (3 << 12) /* Freq clock 1 is output on SDP2. */ +#define TS_SDP2_EN (1 << 14) /* SDP2 is assigned to Tsync. */ +#define TS_SDP3_SEL_TT0 (0 << 15) /* Target time 0 is output on SDP3. */ +#define TS_SDP3_SEL_TT1 (1 << 15) /* Target time 1 is output on SDP3. */ +#define TS_SDP3_SEL_FC0 (2 << 15) /* Freq clock 0 is output on SDP3. */ +#define TS_SDP3_SEL_FC1 (3 << 15) /* Freq clock 1 is output on SDP3. */ +#define TS_SDP3_EN (1 << 17) /* SDP3 is assigned to Tsync. */ +/* TUPLE Filtering Configuration */ +#define E1000_TTQF_DISABLE_MASK 0xF0008000 /* TTQF Disable Mask */ +#define E1000_TTQF_QUEUE_ENABLE 0x100 /* TTQF Queue Enable Bit */ +#define E1000_TTQF_PROTOCOL_MASK 0xFF /* TTQF Protocol Mask */ +/* TTQF TCP Bit, shift with E1000_TTQF_PROTOCOL SHIFT */ +#define E1000_TTQF_PROTOCOL_TCP 0x0 +/* TTQF UDP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */ +#define E1000_TTQF_PROTOCOL_UDP 0x1 +/* TTQF SCTP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */ +#define E1000_TTQF_PROTOCOL_SCTP 0x2 +#define E1000_TTQF_PROTOCOL_SHIFT 5 /* TTQF Protocol Shift */ +#define E1000_TTQF_QUEUE_SHIFT 16 /* TTQF Queue Shfit */ +#define E1000_TTQF_RX_QUEUE_MASK 0x70000 /* TTQF Queue Mask */ +#define E1000_TTQF_MASK_ENABLE 0x10000000 /* TTQF Mask Enable Bit */ +#define E1000_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */ +#define E1000_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */ +#define E1000_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */ +#define E1000_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */ + +#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ +#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ +#define E1000_MDICNFG_PHY_MASK 0x03E00000 +#define E1000_MDICNFG_PHY_SHIFT 21 + +#define E1000_MEDIA_PORT_COPPER 1 +#define E1000_MEDIA_PORT_OTHER 2 +#define E1000_M88E1112_AUTO_COPPER_SGMII 0x2 +#define E1000_M88E1112_AUTO_COPPER_BASEX 0x3 +#define E1000_M88E1112_STATUS_LINK 0x0004 /* Interface Link Bit */ +#define E1000_M88E1112_MAC_CTRL_1 0x10 +#define E1000_M88E1112_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */ +#define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7 +#define E1000_M88E1112_PAGE_ADDR 0x16 +#define E1000_M88E1112_STATUS 0x01 + +#define E1000_THSTAT_LOW_EVENT 0x20000000 /* Low thermal threshold */ +#define E1000_THSTAT_MID_EVENT 0x00200000 /* Mid thermal threshold */ +#define E1000_THSTAT_HIGH_EVENT 0x00002000 /* High thermal threshold */ +#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */ +#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Spd Throttle Event */ + +/* I350 EEE defines */ +#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */ +#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */ +#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */ +#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */ +#define E1000_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */ +/* EEE status */ +#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ +#define E1000_EEER_RX_LPI_STATUS 0x40000000 /* Rx in LPI state */ +#define E1000_EEER_TX_LPI_STATUS 0x80000000 /* Tx in LPI state */ +#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */ +#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */ +#define E1000_M88E1543_EEE_CTRL_1 0x0 +#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ +#define E1000_EEE_ADV_DEV_I354 7 +#define E1000_EEE_ADV_ADDR_I354 60 +#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */ +#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */ +#define E1000_PCS_STATUS_DEV_I354 3 +#define E1000_PCS_STATUS_ADDR_I354 1 +#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400 +#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800 +#define E1000_M88E1512_CFG_REG_1 0x0010 +#define E1000_M88E1512_CFG_REG_2 0x0011 +#define E1000_M88E1512_CFG_REG_3 0x0007 +#define E1000_M88E1512_MODE 0x0014 +#define E1000_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */ +#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */ +#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */ +/* PCI Express Control */ +#define E1000_GCR_RXD_NO_SNOOP 0x00000001 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 +#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 +#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 +#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000 +#define E1000_GCR_CAP_VER2 0x00040000 + +#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ + E1000_GCR_RXDSCW_NO_SNOOP | \ + E1000_GCR_RXDSCR_NO_SNOOP | \ + E1000_GCR_TXD_NO_SNOOP | \ + E1000_GCR_TXDSCW_NO_SNOOP | \ + E1000_GCR_TXDSCR_NO_SNOOP) + +#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ + +/* mPHY address control and data registers */ +#define E1000_MPHY_ADDR_CTL 0x0024 /* Address Control Reg */ +#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000 +#define E1000_MPHY_DATA 0x0E10 /* Data Register */ + +/* AFE CSR Offset for PCS CLK */ +#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 +/* Override for near end digital loopback. */ +#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10 + +/* PHY Control Register */ +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ +#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ +#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ +#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP 10T Half Dplx Capable */ +#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP 10T Full Dplx Capable */ +#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP 100TX Half Dplx Capable */ +#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP 100TX Full Dplx Capable */ +#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asym Pause Direction bit */ +#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP detected Remote Fault */ +#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP rx'd link code word */ +#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Autoneg Expansion Register */ +#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ +#define NWAY_ER_PAGE_RXD 0x0002 /* LP 10T Half Dplx Capable */ +#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP 10T Full Dplx Capable */ +#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP 100TX Half Dplx Capable */ +#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP 100TX Full Dplx Capable */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +/* 1=Repeater/switch device port 0=DTE device */ +#define CR_1000T_REPEATER_DTE 0x0400 +/* 1=Configure PHY as Master 0=Configure PHY as Slave */ +#define CR_1000T_MS_VALUE 0x0800 +/* 1=Master/Slave manual config value 0=Automatic Master/Slave config */ +#define CR_1000T_MS_ENABLE 0x1000 +#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle err since last rd */ +#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asym pause direction bit */ +#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ +#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local Tx Master, 0=Slave */ +#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ + +#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ +#define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */ +#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ +#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ + +#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */ + +/* NVM Control */ +#define E1000_EECD_SK 0x00000001 /* NVM Clock */ +#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* NVM Data In */ +#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ +#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* NVM Present */ +#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ +#define E1000_EECD_BLOCKED 0x00008000 /* Bit banging access blocked flag */ +#define E1000_EECD_ABORT 0x00010000 /* NVM operation aborted flag */ +#define E1000_EECD_TIMEOUT 0x00020000 /* NVM read operation timeout flag */ +#define E1000_EECD_ERROR_CLR 0x00040000 /* NVM error status clear bit */ +/* NVM Addressing bits based on type 0=small, 1=large */ +#define E1000_EECD_ADDR_BITS 0x00000400 +#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Ena Auto FLASH update */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ +#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES) +#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ +#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done */ +#define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */ +#define E1000_EECD_SEC1VAL_I210 0x02000000 /* Sector One Valid */ +#define E1000_FLUDONE_ATTEMPTS 20000 +#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ +#define E1000_I210_FIFO_SEL_RX 0x00 +#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i)) +#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) +#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 +#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 + +#define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */ +/* Secure FLASH mode requires removing MSb */ +#define E1000_I210_FW_PTR_MASK 0x7FFF +/* Firmware code revision field word offset*/ +#define E1000_I210_FW_VER_OFFSET 328 + +#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write regs */ +#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_NVM_RW_REG_START 1 /* Start operation */ +#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ +#define E1000_FLASH_UPDATES 2000 + +/* NVM Word Offsets */ +#define NVM_COMPAT 0x0003 +#define NVM_ID_LED_SETTINGS 0x0004 +#define NVM_VERSION 0x0005 +#define E1000_I210_NVM_FW_MODULE_PTR 0x0010 +#define E1000_I350_NVM_FW_MODULE_PTR 0x0051 +#define NVM_FUTURE_INIT_WORD1 0x0019 +#define NVM_ETRACK_WORD 0x0042 +#define NVM_ETRACK_HIWORD 0x0043 +#define NVM_COMB_VER_OFF 0x0083 +#define NVM_COMB_VER_PTR 0x003d + +/* NVM version defines */ +#define NVM_MAJOR_MASK 0xF000 +#define NVM_MINOR_MASK 0x0FF0 +#define NVM_IMAGE_ID_MASK 0x000F +#define NVM_COMB_VER_MASK 0x00FF +#define NVM_MAJOR_SHIFT 12 +#define NVM_MINOR_SHIFT 4 +#define NVM_COMB_VER_SHFT 8 +#define NVM_VER_INVALID 0xFFFF +#define NVM_ETRACK_SHIFT 16 +#define NVM_ETRACK_VALID 0x8000 +#define NVM_NEW_DEC_MASK 0x0F00 +#define NVM_HEX_CONV 16 +#define NVM_HEX_TENS 10 + +/* FW version defines */ +/* Offset of "Loader patch ptr" in Firmware Header */ +#define E1000_I350_NVM_FW_LOADER_PATCH_PTR_OFFSET 0x01 +/* Patch generation hour & minutes */ +#define E1000_I350_NVM_FW_VER_WORD1_OFFSET 0x04 +/* Patch generation month & day */ +#define E1000_I350_NVM_FW_VER_WORD2_OFFSET 0x05 +/* Patch generation year */ +#define E1000_I350_NVM_FW_VER_WORD3_OFFSET 0x06 +/* Patch major & minor numbers */ +#define E1000_I350_NVM_FW_VER_WORD4_OFFSET 0x07 + +#define NVM_MAC_ADDR 0x0000 +#define NVM_SUB_DEV_ID 0x000B +#define NVM_SUB_VEN_ID 0x000C +#define NVM_DEV_ID 0x000D +#define NVM_VEN_ID 0x000E +#define NVM_INIT_CTRL_2 0x000F +#define NVM_INIT_CTRL_4 0x0013 +#define NVM_LED_1_CFG 0x001C +#define NVM_LED_0_2_CFG 0x001F + +#define NVM_COMPAT_VALID_CSUM 0x0001 +#define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040 + +#define NVM_ETS_CFG 0x003E +#define NVM_ETS_LTHRES_DELTA_MASK 0x07C0 +#define NVM_ETS_LTHRES_DELTA_SHIFT 6 +#define NVM_ETS_TYPE_MASK 0x0038 +#define NVM_ETS_TYPE_SHIFT 3 +#define NVM_ETS_TYPE_EMC 0x000 +#define NVM_ETS_NUM_SENSORS_MASK 0x0007 +#define NVM_ETS_DATA_LOC_MASK 0x3C00 +#define NVM_ETS_DATA_LOC_SHIFT 10 +#define NVM_ETS_DATA_INDEX_MASK 0x0300 +#define NVM_ETS_DATA_INDEX_SHIFT 8 +#define NVM_ETS_DATA_HTHRESH_MASK 0x00FF +#define NVM_INIT_CONTROL2_REG 0x000F +#define NVM_INIT_CONTROL3_PORT_B 0x0014 +#define NVM_INIT_3GIO_3 0x001A +#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020 +#define NVM_INIT_CONTROL3_PORT_A 0x0024 +#define NVM_CFG 0x0012 +#define NVM_ALT_MAC_ADDR_PTR 0x0037 +#define NVM_CHECKSUM_REG 0x003F +#define NVM_COMPATIBILITY_REG_3 0x0003 +#define NVM_COMPATIBILITY_BIT_MASK 0x8000 + +#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ +#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ +#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */ +#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */ + +#define NVM_82580_LAN_FUNC_OFFSET(a) ((a) ? (0x40 + (0x40 * (a))) : 0) + +/* Mask bits for fields in Word 0x24 of the NVM */ +#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */ +#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed extrnl */ +/* Offset of Link Mode bits for 82575/82576 */ +#define NVM_WORD24_LNK_MODE_OFFSET 8 +/* Offset of Link Mode bits for 82580 up */ +#define NVM_WORD24_82580_LNK_MODE_OFFSET 4 + +/* Mask bits for fields in Word 0x0f of the NVM */ +#define NVM_WORD0F_PAUSE_MASK 0x3000 +#define NVM_WORD0F_PAUSE 0x1000 +#define NVM_WORD0F_ASM_DIR 0x2000 + +/* Mask bits for fields in Word 0x1a of the NVM */ +#define NVM_WORD1A_ASPM_MASK 0x000C + +/* Mask bits for fields in Word 0x03 of the EEPROM */ +#define NVM_COMPAT_LOM 0x0800 + +/* length of string needed to store PBA number */ +#define E1000_PBANUM_LENGTH 11 + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA + +/* PBA (printed board assembly) number words */ +#define NVM_PBA_OFFSET_0 8 +#define NVM_PBA_OFFSET_1 9 +#define NVM_PBA_PTR_GUARD 0xFAFA +#define NVM_RESERVED_WORD 0xFFFF +#define NVM_WORD_SIZE_BASE_SHIFT 6 + +/* NVM Commands - SPI */ +#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ +#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ +#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ +#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ + +/* SPI NVM Status Register */ +#define NVM_STATUS_RDY_SPI 0x01 + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* PCI/PCI-X/PCI-EX Config space */ +#define PCIX_COMMAND_REGISTER 0xE6 +#define PCIX_STATUS_REGISTER_LO 0xE8 +#define PCIX_STATUS_REGISTER_HI 0xEA +#define PCI_HEADER_TYPE_REGISTER 0x0E +#define PCIE_LINK_STATUS 0x12 +#define PCIE_DEVICE_CONTROL2 0x28 + +#define PCIX_COMMAND_MMRBC_MASK 0x000C +#define PCIX_COMMAND_MMRBC_SHIFT 0x2 +#define PCIX_STATUS_HI_MMRBC_MASK 0x0060 +#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5 +#define PCIX_STATUS_HI_MMRBC_4K 0x3 +#define PCIX_STATUS_HI_MMRBC_2K 0x2 +#define PCIX_STATUS_LO_FUNC_MASK 0x7 +#define PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define PCIE_LINK_WIDTH_MASK 0x3F0 +#define PCIE_LINK_WIDTH_SHIFT 4 +#define PCIE_LINK_SPEED_MASK 0x0F +#define PCIE_LINK_SPEED_2500 0x01 +#define PCIE_LINK_SPEED_5000 0x02 +#define PCIE_DEVICE_CONTROL2_16ms 0x0005 + +#ifndef ETH_ADDR_LEN +#define ETH_ADDR_LEN 6 +#endif + +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF + +/* Bit definitions for valid PHY IDs. + * I = Integrated + * E = External + */ +#define M88E1000_E_PHY_ID 0x01410C50 +#define M88E1000_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01E1000_I_PHY_ID 0x02A80380 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define M88E1543_E_PHY_ID 0x01410EA0 +#define M88E1512_E_PHY_ID 0x01410DD0 +#define M88E1112_E_PHY_ID 0x01410C90 +#define I347AT4_E_PHY_ID 0x01410DC0 +#define M88E1340M_E_PHY_ID 0x01410DF0 +#define GG82563_E_PHY_ID 0x01410CA0 +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 +#define I82580_I_PHY_ID 0x015403A0 +#define I350_I_PHY_ID 0x015403B0 +#define I210_I_PHY_ID 0x01410C00 +#define IGP04E1000_E_PHY_ID 0x02A80391 +#define M88_VENDOR 0x0141 + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Reg */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Reg */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Cntrl */ +#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ + +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for pg number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* meaning depends on reg 29 */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reverse enabled */ +/* MDI Crossover Mode bits 6:5 Manual MDI configuration */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 +/* Auto crossover enabled all speeds */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Tx */ + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +/* 0 = <50M + * 1 = 50-80M + * 2 = 80-110M + * 3 = 110-140M + * 4 = >140M + */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 +#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ +#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave + */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ + +/* Intel I347AT4 Registers */ +#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */ +#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */ +#define I347AT4_PAGE_SELECT 0x16 + +/* I347AT4 Extended PHY Specific Control Register */ + +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 +#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000 +#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000 +#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000 +#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000 +#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000 +#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000 +#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000 +#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000 +#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000 + +/* I347AT4 PHY Cable Diagnostics Control */ +#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */ + +/* M88E1112 only registers */ +#define M88E1112_VCT_DSP_DISTANCE 0x001A + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 + +/* Bits... + * 15-5: page + * 4-0: register offset + */ +#define GG82563_PAGE_SHIFT 5 +#define GG82563_REG(page, reg) \ + (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) +#define GG82563_MIN_ALT_REG 30 + +/* GG82563 Specific Registers */ +#define GG82563_PHY_SPEC_CTRL GG82563_REG(0, 16) /* PHY Spec Cntrl */ +#define GG82563_PHY_PAGE_SELECT GG82563_REG(0, 22) /* Page Select */ +#define GG82563_PHY_SPEC_CTRL_2 GG82563_REG(0, 26) /* PHY Spec Cntrl2 */ +#define GG82563_PHY_PAGE_SELECT_ALT GG82563_REG(0, 29) /* Alt Page Select */ + +/* MAC Specific Control Register */ +#define GG82563_PHY_MAC_SPEC_CTRL GG82563_REG(2, 21) + +#define GG82563_PHY_DSP_DISTANCE GG82563_REG(5, 26) /* DSP Distance */ + +/* Page 193 - Port Control Registers */ +/* Kumeran Mode Control */ +#define GG82563_PHY_KMRN_MODE_CTRL GG82563_REG(193, 16) +#define GG82563_PHY_PWR_MGMT_CTRL GG82563_REG(193, 20) /* Pwr Mgt Ctrl */ + +/* Page 194 - KMRN Registers */ +#define GG82563_PHY_INBAND_CTRL GG82563_REG(194, 18) /* Inband Ctrl */ + +/* MDI Control */ +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_ERROR 0x40000000 +#define E1000_MDIC_DEST 0x80000000 + +/* SerDes Control */ +#define E1000_GEN_CTL_READY 0x80000000 +#define E1000_GEN_CTL_ADDRESS_SHIFT 8 +#define E1000_GEN_POLL_TIMEOUT 640 + +/* LinkSec register fields */ +#define E1000_LSECTXCAP_SUM_MASK 0x00FF0000 +#define E1000_LSECTXCAP_SUM_SHIFT 16 +#define E1000_LSECRXCAP_SUM_MASK 0x00FF0000 +#define E1000_LSECRXCAP_SUM_SHIFT 16 + +#define E1000_LSECTXCTRL_EN_MASK 0x00000003 +#define E1000_LSECTXCTRL_DISABLE 0x0 +#define E1000_LSECTXCTRL_AUTH 0x1 +#define E1000_LSECTXCTRL_AUTH_ENCRYPT 0x2 +#define E1000_LSECTXCTRL_AISCI 0x00000020 +#define E1000_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 +#define E1000_LSECTXCTRL_RSV_MASK 0x000000D8 + +#define E1000_LSECRXCTRL_EN_MASK 0x0000000C +#define E1000_LSECRXCTRL_EN_SHIFT 2 +#define E1000_LSECRXCTRL_DISABLE 0x0 +#define E1000_LSECRXCTRL_CHECK 0x1 +#define E1000_LSECRXCTRL_STRICT 0x2 +#define E1000_LSECRXCTRL_DROP 0x3 +#define E1000_LSECRXCTRL_PLSH 0x00000040 +#define E1000_LSECRXCTRL_RP 0x00000080 +#define E1000_LSECRXCTRL_RSV_MASK 0xFFFFFF33 + +/* Tx Rate-Scheduler Config fields */ +#define E1000_RTTBCNRC_RS_ENA 0x80000000 +#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF +#define E1000_RTTBCNRC_RF_INT_SHIFT 14 +#define E1000_RTTBCNRC_RF_INT_MASK \ + (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT) + +/* DMA Coalescing register fields */ +/* DMA Coalescing Watchdog Timer */ +#define E1000_DMACR_DMACWT_MASK 0x00003FFF +/* DMA Coalescing Rx Threshold */ +#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 +#define E1000_DMACR_DMACTHR_SHIFT 16 +/* Lx when no PCIe transactions */ +#define E1000_DMACR_DMAC_LX_MASK 0x30000000 +#define E1000_DMACR_DMAC_LX_SHIFT 28 +#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ +/* DMA Coalescing BMC-to-OS Watchdog Enable */ +#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000 + +/* DMA Coalescing Transmit Threshold */ +#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF + +#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ + +/* Rx Traffic Rate Threshold */ +#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF +/* Rx packet rate in current window */ +#define E1000_DMCRTRH_LRPRCW 0x80000000 + +/* DMA Coal Rx Traffic Current Count */ +#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF + +/* Flow ctrl Rx Threshold High val */ +#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 +#define E1000_FCRTC_RTH_COAL_SHIFT 4 +/* Lx power decision based on DMA coal */ +#define E1000_PCIEMISC_LX_DECISION 0x00000080 + +#define E1000_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */ +#define E1000_RXPBS_SIZE_I210_MASK 0x0000003F /* Rx packet buffer size */ +#define E1000_TXPB0S_SIZE_I210_MASK 0x0000003F /* Tx packet buffer 0 size */ +#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ +#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ + +/* Proxy Filter Control */ +#define E1000_PROXYFC_D0 0x00000001 /* Enable offload in D0 */ +#define E1000_PROXYFC_EX 0x00000004 /* Directed exact proxy */ +#define E1000_PROXYFC_MC 0x00000008 /* Directed MC Proxy */ +#define E1000_PROXYFC_BC 0x00000010 /* Broadcast Proxy Enable */ +#define E1000_PROXYFC_ARP_DIRECTED 0x00000020 /* Directed ARP Proxy Ena */ +#define E1000_PROXYFC_IPV4 0x00000040 /* Directed IPv4 Enable */ +#define E1000_PROXYFC_IPV6 0x00000080 /* Directed IPv6 Enable */ +#define E1000_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */ +#define E1000_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Ena */ +/* Proxy Status */ +#define E1000_PROXYS_CLEAR 0xFFFFFFFF /* Clear */ + +/* Firmware Status */ +#define E1000_FWSTS_FWRI 0x80000000 /* FW Reset Indication */ +/* VF Control */ +#define E1000_VTCTRL_RST 0x04000000 /* Reset VF */ + +#define E1000_STATUS_LAN_ID_MASK 0x00000000C /* Mask for Lan ID field */ +/* Lan ID bit field offset in status register */ +#define E1000_STATUS_LAN_ID_OFFSET 2 +#define E1000_VFTA_ENTRIES 128 +#define E1000_TQAVCC_QUEUEMODE 0x80000000 /* queue mode, 0=strict, 1=SR mode */ +#define E1000_TQAVCTRL_TXMODE 0x00000001 /* Transmit mode, 0=legacy, 1=QAV */ +#define E1000_TQAVCTRL_1588_STAT_EN 0x00000004 /* report DMA time of tx packets */ +#define E1000_TQAVCTRL_DATA_FETCH_ARB 0x00000010 /* data fetch arbitration */ +#define E1000_TQAVCTRL_DATA_TRAN_ARB 0x00000100 /* data tx arbitration */ +#define E1000_TQAVCTRL_DATA_TRAN_TIM 0x00000200 /* data launch time valid */ +#define E1000_TQAVCTRL_SP_WAIT_SR 0x00000400 /* stall SP to guarantee SR */ +#define E1000_TQAVCTRL_FETCH_TM_SHIFT (16) /* ... and associated shift value */ + +/* Tx packet buffer fields */ +#define E1000_TXPBSIZE_PBSZ_MASK 0x3F +#define E1000_TXPBSIZE_TX0PB_SHIFT 0 +#define E1000_TXPBSIZE_TX1PB_SHIFT 6 +#define E1000_TXPBSIZE_TX2PB_SHIFT 12 +#define E1000_TXPBSIZE_TX3PB_SHIFT 18 +#ifndef E1000_UNUSEDARG +#define E1000_UNUSEDARG +#endif /* E1000_UNUSEDARG */ +#ifndef ERROR_REPORT +#define ERROR_REPORT(fmt) do { } while (0) +#endif /* ERROR_REPORT */ +#define E1000_TSAUXC_SAMP_AUTO 0x00000008 /* sample current ts */ +#endif /* _E1000_DEFINES_H_ */ diff --git a/drivers/staging/igb_avb/e1000_hw.h b/drivers/staging/igb_avb/e1000_hw.h new file mode 100644 index 0000000000000..74cb22ee8ead0 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_hw.h @@ -0,0 +1,792 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_HW_H_ +#define _E1000_HW_H_ + +#include "e1000_osdep.h" +#include "e1000_regs.h" +#include "e1000_defines.h" + +struct e1000_hw; + +#define E1000_DEV_ID_82576 0x10C9 +#define E1000_DEV_ID_82576_FIBER 0x10E6 +#define E1000_DEV_ID_82576_SERDES 0x10E7 +#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 +#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526 +#define E1000_DEV_ID_82576_NS 0x150A +#define E1000_DEV_ID_82576_NS_SERDES 0x1518 +#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D +#define E1000_DEV_ID_82575EB_COPPER 0x10A7 +#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 +#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 +#define E1000_DEV_ID_82580_COPPER 0x150E +#define E1000_DEV_ID_82580_FIBER 0x150F +#define E1000_DEV_ID_82580_SERDES 0x1510 +#define E1000_DEV_ID_82580_SGMII 0x1511 +#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 +#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527 +#define E1000_DEV_ID_I350_COPPER 0x1521 +#define E1000_DEV_ID_I350_FIBER 0x1522 +#define E1000_DEV_ID_I350_SERDES 0x1523 +#define E1000_DEV_ID_I350_SGMII 0x1524 +#define E1000_DEV_ID_I350_DA4 0x1546 +#define E1000_DEV_ID_I210_COPPER 0x1533 +#define E1000_DEV_ID_I210_COPPER_OEM1 0x1534 +#define E1000_DEV_ID_I210_COPPER_IT 0x1535 +#define E1000_DEV_ID_I210_FIBER 0x1536 +#define E1000_DEV_ID_I210_SERDES 0x1537 +#define E1000_DEV_ID_I210_SGMII 0x1538 +#define E1000_DEV_ID_I210_AUTOMOTIVE 0x15F6 +#define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B +#define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C +#define E1000_DEV_ID_I211_COPPER 0x1539 +#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40 +#define E1000_DEV_ID_I354_SGMII 0x1F41 +#define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45 +#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 +#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A +#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C +#define E1000_DEV_ID_DH89XXCC_SFP 0x0440 + +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 +#define E1000_REVISION_4 4 + +#define E1000_FUNC_0 0 +#define E1000_FUNC_1 1 +#define E1000_FUNC_2 2 +#define E1000_FUNC_3 3 + +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9 + +enum e1000_mac_type { + e1000_undefined = 0, + e1000_82575, + e1000_82576, + e1000_82580, + e1000_i350, + e1000_i354, + e1000_i210, + e1000_i211, + e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ +}; + +enum e1000_media_type { + e1000_media_type_unknown = 0, + e1000_media_type_copper = 1, + e1000_media_type_fiber = 2, + e1000_media_type_internal_serdes = 3, + e1000_num_media_types +}; + +enum e1000_nvm_type { + e1000_nvm_unknown = 0, + e1000_nvm_none, + e1000_nvm_eeprom_spi, + e1000_nvm_flash_hw, + e1000_nvm_invm, + e1000_nvm_flash_sw +}; + +enum e1000_nvm_override { + e1000_nvm_override_none = 0, + e1000_nvm_override_spi_small, + e1000_nvm_override_spi_large, +}; + +enum e1000_phy_type { + e1000_phy_unknown = 0, + e1000_phy_none, + e1000_phy_m88, + e1000_phy_igp, + e1000_phy_igp_2, + e1000_phy_gg82563, + e1000_phy_igp_3, + e1000_phy_ife, + e1000_phy_82580, + e1000_phy_vf, + e1000_phy_i210, +}; + +enum e1000_bus_type { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci, + e1000_bus_type_pcix, + e1000_bus_type_pci_express, + e1000_bus_type_reserved +}; + +enum e1000_bus_speed { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33, + e1000_bus_speed_66, + e1000_bus_speed_100, + e1000_bus_speed_120, + e1000_bus_speed_133, + e1000_bus_speed_2500, + e1000_bus_speed_5000, + e1000_bus_speed_reserved +}; + +enum e1000_bus_width { + e1000_bus_width_unknown = 0, + e1000_bus_width_pcie_x1, + e1000_bus_width_pcie_x2, + e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_pcie_x8 = 8, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +}; + +enum e1000_1000t_rx_status { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +}; + +enum e1000_rev_polarity { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +}; + +enum e1000_fc_mode { + e1000_fc_none = 0, + e1000_fc_rx_pause, + e1000_fc_tx_pause, + e1000_fc_full, + e1000_fc_default = 0xFF +}; + +enum e1000_ms_type { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +}; + +enum e1000_smart_speed { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +}; + +enum e1000_serdes_link_state { + e1000_serdes_link_down = 0, + e1000_serdes_link_autoneg_progress, + e1000_serdes_link_autoneg_complete, + e1000_serdes_link_forced_up +}; + +#ifndef __le16 +#define __le16 u16 +#endif +#ifndef __le32 +#define __le32 u32 +#endif +#ifndef __le64 +#define __le64 u64 +#endif +/* Receive Descriptor */ +struct e1000_rx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 length; /* Length of data DMAed into data buffer */ + __le16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + __le16 special; +}; + +/* Receive Descriptor - Extended */ +union e1000_rx_desc_extended { + struct { + __le64 buffer_addr; + __le64 reserved; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 + +/* Number of packet split data buffers (not including the header buffer) */ +#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) + +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + __le64 buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length0; /* length of buffer 0 */ + __le16 vlan; /* VLAN tag */ + } middle; + struct { + __le16 header_status; + /* length of buffers 1-3 */ + __le16 length[PS_PAGE_BUFFERS]; + } upper; + __le64 reserved; + } wb; /* writeback */ +}; + +/* Transmit Descriptor */ +struct e1000_tx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 special; + } fields; + } upper; +}; + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + __le32 ip_config; + struct { + u8 ipcss; /* IP checksum start */ + u8 ipcso; /* IP checksum offset */ + __le16 ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + __le32 tcp_config; + struct { + u8 tucss; /* TCP checksum start */ + u8 tucso; /* TCP checksum offset */ + __le16 tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + __le32 cmd_and_length; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 hdr_len; /* Header length */ + __le16 mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct e1000_data_desc { + __le64 buffer_addr; /* Address of the descriptor's buffer address */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 typ_len_ext; + u8 cmd; + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 popts; /* Packet Options */ + __le16 special; + } fields; + } upper; +}; + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; + u64 cbtmpc; + u64 htdpmc; + u64 cbrdpc; + u64 cbrmpc; + u64 rpthc; + u64 hgptc; + u64 htcbdpc; + u64 hgorc; + u64 hgotc; + u64 lenerrs; + u64 scvpc; + u64 hrmpc; + u64 doosync; + u64 o2bgptc; + u64 o2bspc; + u64 b2ospc; + u64 b2ogprc; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; + +/* Host Interface "Rev 1" */ +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; + u8 checksum; +}; + +#define E1000_HI_MAX_DATA_LENGTH 252 +struct e1000_host_command_info { + struct e1000_host_command_header command_header; + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; +}; + +/* Host Interface "Rev 2" */ +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; +}; + +#include "e1000_mac.h" +#include "e1000_phy.h" +#include "e1000_nvm.h" +#include "e1000_manage.h" +#include "e1000_mbx.h" + +/* Function pointers for the MAC. */ +struct e1000_mac_operations { + s32 (*init_params)(struct e1000_hw *); + s32 (*id_led_init)(struct e1000_hw *); + s32 (*blink_led)(struct e1000_hw *); + bool (*check_mng_mode)(struct e1000_hw *); + s32 (*check_for_link)(struct e1000_hw *); + s32 (*cleanup_led)(struct e1000_hw *); + void (*clear_hw_cntrs)(struct e1000_hw *); + void (*clear_vfta)(struct e1000_hw *); + s32 (*get_bus_info)(struct e1000_hw *); + void (*set_lan_id)(struct e1000_hw *); + s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); + s32 (*led_on)(struct e1000_hw *); + s32 (*led_off)(struct e1000_hw *); + void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + void (*shutdown_serdes)(struct e1000_hw *); + void (*power_up_serdes)(struct e1000_hw *); + s32 (*setup_link)(struct e1000_hw *); + s32 (*setup_physical_interface)(struct e1000_hw *); + s32 (*setup_led)(struct e1000_hw *); + void (*write_vfta)(struct e1000_hw *, u32, u32); + void (*config_collision_dist)(struct e1000_hw *); + int (*rar_set)(struct e1000_hw *, u8*, u32); + s32 (*read_mac_addr)(struct e1000_hw *); + s32 (*validate_mdi_setting)(struct e1000_hw *); + s32 (*get_thermal_sensor_data)(struct e1000_hw *); + s32 (*init_thermal_sensor_thresh)(struct e1000_hw *); + s32 (*acquire_swfw_sync)(struct e1000_hw *, u16); + void (*release_swfw_sync)(struct e1000_hw *, u16); +}; + +/* When to use various PHY register access functions: + * + * Func Caller + * Function Does Does When to use + * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * X_reg L,P,A n/a for simple PHY reg accesses + * X_reg_locked P,A L for multiple accesses of different regs + * on different pages + * X_reg_page A L,P for multiple accesses of different regs + * on the same page + * + * Where X=[read|write], L=locking, P=sets page, A=register access + * + */ +struct e1000_phy_operations { + s32 (*init_params)(struct e1000_hw *); + s32 (*acquire)(struct e1000_hw *); + s32 (*check_polarity)(struct e1000_hw *); + s32 (*check_reset_block)(struct e1000_hw *); + s32 (*commit)(struct e1000_hw *); + s32 (*force_speed_duplex)(struct e1000_hw *); + s32 (*get_cfg_done)(struct e1000_hw *hw); + s32 (*get_cable_length)(struct e1000_hw *); + s32 (*get_info)(struct e1000_hw *); + s32 (*set_page)(struct e1000_hw *, u16); + s32 (*read_reg)(struct e1000_hw *, u32, u16 *); + s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *); + s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *); + void (*release)(struct e1000_hw *); + s32 (*reset)(struct e1000_hw *); + s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); + s32 (*write_reg)(struct e1000_hw *, u32, u16); + s32 (*write_reg_locked)(struct e1000_hw *, u32, u16); + s32 (*write_reg_page)(struct e1000_hw *, u32, u16); + void (*power_up)(struct e1000_hw *); + void (*power_down)(struct e1000_hw *); + s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *); + s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8); +}; + +/* Function pointers for the NVM. */ +struct e1000_nvm_operations { + s32 (*init_params)(struct e1000_hw *); + s32 (*acquire)(struct e1000_hw *); + s32 (*read)(struct e1000_hw *, u16, u16, u16 *); + void (*release)(struct e1000_hw *); + void (*reload)(struct e1000_hw *); + s32 (*update)(struct e1000_hw *); + s32 (*valid_led_default)(struct e1000_hw *, u16 *); + s32 (*validate)(struct e1000_hw *); + s32 (*write)(struct e1000_hw *, u16, u16, u16 *); +}; + +#define E1000_MAX_SENSORS 3 + +struct e1000_thermal_diode_data { + u8 location; + u8 temp; + u8 caution_thresh; + u8 max_op_thresh; +}; + +struct e1000_thermal_sensor_data { + struct e1000_thermal_diode_data sensor[E1000_MAX_SENSORS]; +}; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + u8 addr[ETH_ADDR_LEN]; + u8 perm_addr[ETH_ADDR_LEN]; + + enum e1000_mac_type type; + + u32 collision_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 tx_packet_delta; + u32 txcw; + + u16 current_ifs_val; + u16 ifs_max_val; + u16 ifs_min_val; + u16 ifs_ratio; + u16 ifs_step_size; + u16 mta_reg_count; + u16 uta_reg_count; + + /* Maximum size of the MTA register table in all supported adapters */ +#define MAX_MTA_REG 128 + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; + + u8 forced_speed_duplex; + + bool adaptive_ifs; + bool has_fwsm; + bool arc_subsystem_valid; + bool asf_firmware_present; + bool autoneg; + bool autoneg_failed; + bool get_link_status; + bool in_ifs_mode; + enum e1000_serdes_link_state serdes_link_state; + bool serdes_has_link; + bool tx_pkt_filtering; + struct e1000_thermal_sensor_data thermal_sensor_data; +}; + +struct e1000_phy_info { + struct e1000_phy_operations ops; + enum e1000_phy_type type; + + enum e1000_1000t_rx_status local_rx; + enum e1000_1000t_rx_status remote_rx; + enum e1000_ms_type ms_type; + enum e1000_ms_type original_ms_type; + enum e1000_rev_polarity cable_polarity; + enum e1000_smart_speed smart_speed; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + enum e1000_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + + u8 mdix; + + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool reset_disable; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct e1000_nvm_info { + struct e1000_nvm_operations ops; + enum e1000_nvm_type type; + enum e1000_nvm_override override; + + u32 flash_bank_size; + u32 flash_base_addr; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info { + enum e1000_bus_type type; + enum e1000_bus_speed speed; + enum e1000_bus_width width; + + u16 func; + u16 pci_cmd_word; +}; + +struct e1000_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + u16 refresh_time; /* Flow control refresh timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum e1000_fc_mode current_mode; /* FC mode in effect */ + enum e1000_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +struct e1000_mbx_operations { + s32 (*init_params)(struct e1000_hw *hw); + s32 (*read)(struct e1000_hw *, u32 *, u16, u16); + s32 (*write)(struct e1000_hw *, u32 *, u16, u16); + s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16); + s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16); + s32 (*check_for_msg)(struct e1000_hw *, u16); + s32 (*check_for_ack)(struct e1000_hw *, u16); + s32 (*check_for_rst)(struct e1000_hw *, u16); +}; + +struct e1000_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct e1000_mbx_info { + struct e1000_mbx_operations ops; + struct e1000_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u16 size; +}; + +struct e1000_dev_spec_82575 { + bool sgmii_active; + bool global_device_reset; + bool eee_disable; + bool module_plugged; + bool clear_semaphore_once; + u32 mtu; + struct sfp_e1000_flags eth_flags; + u8 media_port; + bool media_changed; +}; + +struct e1000_dev_spec_vf { + u32 vf_number; + u32 v2p_mailbox; +}; + +struct e1000_hw { + void *back; + + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + unsigned long io_base; + + struct e1000_mac_info mac; + struct e1000_fc_info fc; + struct e1000_phy_info phy; + struct e1000_nvm_info nvm; + struct e1000_bus_info bus; + struct e1000_mbx_info mbx; + struct e1000_host_mng_dhcp_cookie mng_cookie; + + union { + struct e1000_dev_spec_82575 _82575; + struct e1000_dev_spec_vf vf; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +#include "e1000_82575.h" +#include "e1000_i210.h" + +/* These functions must be implemented by drivers */ +s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); +void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); + +#endif diff --git a/drivers/staging/igb_avb/e1000_i210.c b/drivers/staging/igb_avb/e1000_i210.c new file mode 100644 index 0000000000000..7e32fd112f332 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_i210.c @@ -0,0 +1,993 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000_api.h" + + +static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw); +static void e1000_release_nvm_i210(struct e1000_hw *hw); +static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw); +static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +static s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw); +static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data); + +/** + * e1000_acquire_nvm_i210 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_acquire_nvm_i210"); + + ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); + + return ret_val; +} + +/** + * e1000_release_nvm_i210 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + **/ +static void e1000_release_nvm_i210(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_release_nvm_i210"); + + e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); +} + +/** + * e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 ret_val = E1000_SUCCESS; + s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ + + DEBUGFUNC("e1000_acquire_swfw_sync_i210"); + + while (i < timeout) { + if (e1000_get_hw_semaphore_i210(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* + * Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) + */ + e1000_put_hw_semaphore_generic(hw); + msec_delay_irq(5); + i++; + } + + if (i == timeout) { + DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + + e1000_put_hw_semaphore_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_release_swfw_sync_i210 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + DEBUGFUNC("e1000_release_swfw_sync_i210"); + + while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS) + ; /* Empty */ + + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + swfw_sync &= ~mask; + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + + e1000_put_hw_semaphore_generic(hw); +} + +/** + * e1000_get_hw_semaphore_i210 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw) +{ + u32 swsm; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + DEBUGFUNC("e1000_get_hw_semaphore_i210"); + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + usec_delay(50); + i++; + } + + if (i == timeout) { + /* In rare circumstances, the SW semaphore may already be held + * unintentionally. Clear the semaphore once before giving up. + */ + if (hw->dev_spec._82575.clear_semaphore_once) { + hw->dev_spec._82575.clear_semaphore_once = false; + e1000_put_hw_semaphore_generic(hw); + for (i = 0; i < timeout; i++) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + usec_delay(50); + } + } + + /* If we do not have the semaphore here, we have to give up. */ + if (i == timeout) { + DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); + return -E1000_ERR_NVM; + } + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + usec_delay(50); + } + + if (i == timeout) { + /* Release semaphores */ + e1000_put_hw_semaphore_generic(hw); + DEBUGOUT("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the Shadow Ram to read + * @words: number of words to read + * @data: word read from the Shadow Ram + * + * Reads a 16 bit word from the Shadow Ram using the EERD register. + * Uses necessary synchronization semaphores. + **/ +s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = E1000_SUCCESS; + u16 i, count; + + DEBUGFUNC("e1000_read_nvm_srrd_i210"); + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to read in bursts than synchronizing access for each word. */ + for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { + count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? + E1000_EERD_EEWR_MAX_COUNT : (words - i); + if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { + status = e1000_read_nvm_eerd(hw, offset, count, + data + i); + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + if (status != E1000_SUCCESS) + break; + } + + return status; +} + +/** + * e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow RAM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow RAM + * + * Writes data to Shadow RAM at offset using EEWR register. + * + * If e1000_update_nvm_checksum is not called after this function , the + * data will not be committed to FLASH and also Shadow RAM will most likely + * contain an invalid checksum. + * + * If error code is returned, data and Shadow RAM may be inconsistent - buffer + * partially written. + **/ +s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = E1000_SUCCESS; + u16 i, count; + + DEBUGFUNC("e1000_write_nvm_srwr_i210"); + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to write in bursts than synchronizing access for each word. */ + for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { + count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? + E1000_EERD_EEWR_MAX_COUNT : (words - i); + if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { + status = e1000_write_nvm_srwr(hw, offset, count, + data + i); + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + if (status != E1000_SUCCESS) + break; + } + + return status; +} + +/** + * e1000_write_nvm_srwr - Write to Shadow Ram using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow Ram to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow Ram + * + * Writes data to Shadow Ram at offset using EEWR register. + * + * If e1000_update_nvm_checksum is not called after this function , the + * Shadow Ram will most likely contain an invalid checksum. + **/ +static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, k, eewr = 0; + u32 attempts = 100000; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_nvm_srwr"); + + /* + * A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | + (data[i] << E1000_NVM_RW_REG_DATA) | + E1000_NVM_RW_REG_START; + + E1000_WRITE_REG(hw, E1000_SRWR, eewr); + + for (k = 0; k < attempts; k++) { + if (E1000_NVM_RW_REG_DONE & + E1000_READ_REG(hw, E1000_SRWR)) { + ret_val = E1000_SUCCESS; + break; + } + usec_delay(5); + } + + if (ret_val != E1000_SUCCESS) { + DEBUGOUT("Shadow RAM write EEWR timed out\n"); + break; + } + } + +out: + return ret_val; +} + +/** e1000_read_invm_word_i210 - Reads OTP + * @hw: pointer to the HW structure + * @address: the word address (aka eeprom offset) to read + * @data: pointer to the data read + * + * Reads 16-bit words from the OTP. Return error when the word is not + * stored in OTP. + **/ +static s32 e1000_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data) +{ + s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; + u32 invm_dword; + u16 i; + u8 record_type, word_address; + + DEBUGFUNC("e1000_read_invm_word_i210"); + + for (i = 0; i < E1000_INVM_SIZE; i++) { + invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i)); + /* Get record type */ + record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); + if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE) + break; + if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE) + i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS; + if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE) + i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS; + if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) { + word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); + if (word_address == address) { + *data = INVM_DWORD_TO_WORD_DATA(invm_dword); + DEBUGOUT2("Read INVM Word 0x%02x = %x", + address, *data); + status = E1000_SUCCESS; + break; + } + } + } + if (status != E1000_SUCCESS) + DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address); + return status; +} + +/** e1000_read_invm_i210 - Read invm wrapper function for I210/I211 + * @hw: pointer to the HW structure + * @address: the word address (aka eeprom offset) to read + * @data: pointer to the data read + * + * Wrapper function to return data formerly found in the NVM. + **/ +static s32 e1000_read_invm_i210(struct e1000_hw *hw, u16 offset, + u16 E1000_UNUSEDARG words, u16 *data) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_read_invm_i210"); + + /* Only the MAC addr is required to be present in the iNVM */ + switch (offset) { + case NVM_MAC_ADDR: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, &data[0]); + ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+1, + &data[1]); + ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+2, + &data[2]); + if (ret_val != E1000_SUCCESS) + DEBUGOUT("MAC Addr not found in iNVM\n"); + break; + case NVM_INIT_CTRL_2: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val != E1000_SUCCESS) { + *data = NVM_INIT_CTRL_2_DEFAULT_I211; + ret_val = E1000_SUCCESS; + } + break; + case NVM_INIT_CTRL_4: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val != E1000_SUCCESS) { + *data = NVM_INIT_CTRL_4_DEFAULT_I211; + ret_val = E1000_SUCCESS; + } + break; + case NVM_LED_1_CFG: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val != E1000_SUCCESS) { + *data = NVM_LED_1_CFG_DEFAULT_I211; + ret_val = E1000_SUCCESS; + } + break; + case NVM_LED_0_2_CFG: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val != E1000_SUCCESS) { + *data = NVM_LED_0_2_CFG_DEFAULT_I211; + ret_val = E1000_SUCCESS; + } + break; + case NVM_ID_LED_SETTINGS: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val != E1000_SUCCESS) { + *data = ID_LED_RESERVED_FFFF; + ret_val = E1000_SUCCESS; + } + break; + case NVM_SUB_DEV_ID: + *data = hw->subsystem_device_id; + break; + case NVM_SUB_VEN_ID: + *data = hw->subsystem_vendor_id; + break; + case NVM_DEV_ID: + *data = hw->device_id; + break; + case NVM_VEN_ID: + *data = hw->vendor_id; + break; + default: + DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset); + *data = NVM_RESERVED_WORD; + break; + } + return ret_val; +} + +/** + * e1000_read_invm_version - Reads iNVM version and image type + * @hw: pointer to the HW structure + * @invm_ver: version structure for the version read + * + * Reads iNVM version and image type. + **/ +s32 e1000_read_invm_version(struct e1000_hw *hw, + struct e1000_fw_version *invm_ver) +{ + u32 *record = NULL; + u32 *next_record = NULL; + u32 i = 0; + u32 invm_dword = 0; + u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE / + E1000_INVM_RECORD_SIZE_IN_BYTES); + u32 buffer[E1000_INVM_SIZE]; + s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; + u16 version = 0; + + DEBUGFUNC("e1000_read_invm_version"); + + /* Read iNVM memory */ + for (i = 0; i < E1000_INVM_SIZE; i++) { + invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i)); + buffer[i] = invm_dword; + } + + /* Read version number */ + for (i = 1; i < invm_blocks; i++) { + record = &buffer[invm_blocks - i]; + next_record = &buffer[invm_blocks - i + 1]; + + /* Check if we have first version location used */ + if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) { + version = 0; + status = E1000_SUCCESS; + break; + } + /* Check if we have second version location used */ + else if ((i == 1) && + ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) { + version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; + status = E1000_SUCCESS; + break; + } + /* + * Check if we have odd version location + * used and it is the last one used + */ + else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) && + ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) && + (i != 1))) { + version = (*next_record & E1000_INVM_VER_FIELD_TWO) + >> 13; + status = E1000_SUCCESS; + break; + } + /* + * Check if we have even version location + * used and it is the last one used + */ + else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) && + ((*record & 0x3) == 0)) { + version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; + status = E1000_SUCCESS; + break; + } + } + + if (status == E1000_SUCCESS) { + invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK) + >> E1000_INVM_MAJOR_SHIFT; + invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK; + } + /* Read Image Type */ + for (i = 1; i < invm_blocks; i++) { + record = &buffer[invm_blocks - i]; + next_record = &buffer[invm_blocks - i + 1]; + + /* Check if we have image type in first location used */ + if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) { + invm_ver->invm_img_type = 0; + status = E1000_SUCCESS; + break; + } + /* Check if we have image type in first location used */ + else if ((((*record & 0x3) == 0) && + ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) || + ((((*record & 0x3) != 0) && (i != 1)))) { + invm_ver->invm_img_type = + (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23; + status = E1000_SUCCESS; + break; + } + } + return status; +} + +/** + * e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw) +{ + s32 status = E1000_SUCCESS; + s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); + + DEBUGFUNC("e1000_validate_nvm_checksum_i210"); + + if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { + + /* + * Replace the read function with semaphore grabbing with + * the one that skips this for a while. + * We have semaphore taken already here. + */ + read_op_ptr = hw->nvm.ops.read; + hw->nvm.ops.read = e1000_read_nvm_eerd; + + status = e1000_validate_nvm_checksum_generic(hw); + + /* Revert original read operation. */ + hw->nvm.ops.read = read_op_ptr; + + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + return status; +} + + +/** + * e1000_update_nvm_checksum_i210 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. Next commit EEPROM data onto the Flash. + **/ +s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_update_nvm_checksum_i210"); + + /* + * Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data); + if (ret_val != E1000_SUCCESS) { + DEBUGOUT("EEPROM read failed\n"); + goto out; + } + + if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { + /* + * Do not use hw->nvm.ops.write, hw->nvm.ops.read + * because we do not want to take the synchronization + * semaphores twice here. + */ + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data); + if (ret_val) { + hw->nvm.ops.release(hw); + DEBUGOUT("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, + &checksum); + if (ret_val != E1000_SUCCESS) { + hw->nvm.ops.release(hw); + DEBUGOUT("NVM Write Error while updating checksum.\n"); + goto out; + } + + hw->nvm.ops.release(hw); + + ret_val = e1000_update_flash_i210(hw); + } else { + ret_val = E1000_ERR_SWFW_SYNC; + } +out: + return ret_val; +} + +/** + * e1000_get_flash_presence_i210 - Check if flash device is detected. + * @hw: pointer to the HW structure + * + **/ +bool e1000_get_flash_presence_i210(struct e1000_hw *hw) +{ + u32 eec = 0; + bool ret_val = false; + + DEBUGFUNC("e1000_get_flash_presence_i210"); + + eec = E1000_READ_REG(hw, E1000_EECD); + + if (eec & E1000_EECD_FLASH_DETECTED_I210) + ret_val = true; + + return ret_val; +} + +/** + * e1000_update_flash_i210 - Commit EEPROM to the flash + * @hw: pointer to the HW structure + * + **/ +s32 e1000_update_flash_i210(struct e1000_hw *hw) +{ + s32 ret_val; + u32 flup; + + DEBUGFUNC("e1000_update_flash_i210"); + + ret_val = e1000_pool_flash_update_done_i210(hw); + if (ret_val == -E1000_ERR_NVM) { + DEBUGOUT("Flash update time out\n"); + goto out; + } + + flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210; + E1000_WRITE_REG(hw, E1000_EECD, flup); + + ret_val = e1000_pool_flash_update_done_i210(hw); + if (ret_val == E1000_SUCCESS) + DEBUGOUT("Flash update complete\n"); + else + DEBUGOUT("Flash update time out\n"); + +out: + return ret_val; +} + +/** + * e1000_pool_flash_update_done_i210 - Pool FLUDONE status. + * @hw: pointer to the HW structure + * + **/ +s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_NVM; + u32 i, reg; + + DEBUGFUNC("e1000_pool_flash_update_done_i210"); + + for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { + reg = E1000_READ_REG(hw, E1000_EECD); + if (reg & E1000_EECD_FLUDONE_I210) { + ret_val = E1000_SUCCESS; + break; + } + usec_delay(5); + } + + return ret_val; +} + +/** + * e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers + * @hw: pointer to the HW structure + * + * Initialize the i210/i211 NVM parameters and function pointers. + **/ +static s32 e1000_init_nvm_params_i210(struct e1000_hw *hw) +{ + s32 ret_val; + struct e1000_nvm_info *nvm = &hw->nvm; + + DEBUGFUNC("e1000_init_nvm_params_i210"); + + ret_val = e1000_init_nvm_params_82575(hw); + nvm->ops.acquire = e1000_acquire_nvm_i210; + nvm->ops.release = e1000_release_nvm_i210; + nvm->ops.valid_led_default = e1000_valid_led_default_i210; + if (e1000_get_flash_presence_i210(hw)) { + hw->nvm.type = e1000_nvm_flash_hw; + nvm->ops.read = e1000_read_nvm_srrd_i210; + nvm->ops.write = e1000_write_nvm_srwr_i210; + nvm->ops.validate = e1000_validate_nvm_checksum_i210; + nvm->ops.update = e1000_update_nvm_checksum_i210; + } else { + hw->nvm.type = e1000_nvm_invm; + nvm->ops.read = e1000_read_invm_i210; + nvm->ops.write = e1000_null_write_nvm; + nvm->ops.validate = e1000_null_ops_generic; + nvm->ops.update = e1000_null_ops_generic; + } + return ret_val; +} + +/** + * e1000_init_function_pointers_i210 - Init func ptrs. + * @hw: pointer to the HW structure + * + * Called to initialize all function pointers and parameters. + **/ +void e1000_init_function_pointers_i210(struct e1000_hw *hw) +{ + e1000_init_function_pointers_82575(hw); + hw->nvm.ops.init_params = e1000_init_nvm_params_i210; + + return; +} + +/** + * e1000_valid_led_default_i210 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_valid_led_default_i210"); + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { + switch (hw->phy.media_type) { + case e1000_media_type_internal_serdes: + *data = ID_LED_DEFAULT_I210_SERDES; + break; + case e1000_media_type_copper: + default: + *data = ID_LED_DEFAULT_I210; + break; + } + } +out: + return ret_val; +} + +/** + * __e1000_access_xmdio_reg - Read/write XMDIO register + * @hw: pointer to the HW structure + * @address: XMDIO address to program + * @dev_addr: device address to program + * @data: pointer to value to read/write from/to the XMDIO address + * @read: boolean flag to indicate read or write + **/ +static s32 __e1000_access_xmdio_reg(struct e1000_hw *hw, u16 address, + u8 dev_addr, u16 *data, bool read) +{ + s32 ret_val; + + DEBUGFUNC("__e1000_access_xmdio_reg"); + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA | + dev_addr); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data); + else + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data); + if (ret_val) + return ret_val; + + /* Recalibrate the device back to 0 */ + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0); + if (ret_val) + return ret_val; + + return ret_val; +} + +/** + * e1000_read_xmdio_reg - Read XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be read from the EMI address + **/ +s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data) +{ + DEBUGFUNC("e1000_read_xmdio_reg"); + + return __e1000_access_xmdio_reg(hw, addr, dev_addr, data, true); +} + +/** + * e1000_write_xmdio_reg - Write XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be written to the XMDIO address + **/ +s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) +{ + DEBUGFUNC("e1000_read_xmdio_reg"); + + return __e1000_access_xmdio_reg(hw, addr, dev_addr, &data, false); +} + +/** + * e1000_pll_workaround_i210 + * @hw: pointer to the HW structure + * + * Works around an errata in the PLL circuit where it occasionally + * provides the wrong clock frequency after power up. + **/ +static s32 e1000_pll_workaround_i210(struct e1000_hw *hw) +{ + s32 ret_val; + u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val; + u16 nvm_word, phy_word, pci_word, tmp_nvm; + int i; + + /* Get and set needed register values */ + wuc = E1000_READ_REG(hw, E1000_WUC); + mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG); + reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO; + E1000_WRITE_REG(hw, E1000_MDICNFG, reg_val); + + /* Get data from NVM, or set default */ + ret_val = e1000_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD, + &nvm_word); + if (ret_val != E1000_SUCCESS) + nvm_word = E1000_INVM_DEFAULT_AL; + tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; + for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { + /* check current state directly from internal PHY */ + e1000_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE | + E1000_PHY_PLL_FREQ_REG), &phy_word); + if ((phy_word & E1000_PHY_PLL_UNCONF) + != E1000_PHY_PLL_UNCONF) { + ret_val = E1000_SUCCESS; + break; + } else { + ret_val = -E1000_ERR_PHY; + } + /* directly reset the internal PHY */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl|E1000_CTRL_PHY_RST); + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + + E1000_WRITE_REG(hw, E1000_WUC, 0); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16); + E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val); + + e1000_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + pci_word |= E1000_PCI_PMCSR_D3; + e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + msec_delay(1); + pci_word &= ~E1000_PCI_PMCSR_D3; + e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16); + E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val); + + /* restore WUC register */ + E1000_WRITE_REG(hw, E1000_WUC, wuc); + } + /* restore MDICNFG setting */ + E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg); + return ret_val; +} + +/** + * e1000_init_hw_i210 - Init hw for I210/I211 + * @hw: pointer to the HW structure + * + * Called to initialize hw for i210 hw family. + **/ +s32 e1000_init_hw_i210(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_init_hw_i210"); + if ((hw->mac.type >= e1000_i210) && + !(e1000_get_flash_presence_i210(hw))) { + ret_val = e1000_pll_workaround_i210(hw); + if (ret_val != E1000_SUCCESS) + return ret_val; + } + ret_val = e1000_init_hw_82575(hw); + return ret_val; +} diff --git a/drivers/staging/igb_avb/e1000_i210.h b/drivers/staging/igb_avb/e1000_i210.h new file mode 100644 index 0000000000000..a14e897d26a06 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_i210.h @@ -0,0 +1,101 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_I210_H_ +#define _E1000_I210_H_ + +bool e1000_get_flash_presence_i210(struct e1000_hw *hw); +s32 e1000_update_flash_i210(struct e1000_hw *hw); +s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw); +s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw); +s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +s32 e1000_read_invm_version(struct e1000_hw *hw, + struct e1000_fw_version *invm_ver); +s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, + u16 *data); +s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, + u16 data); +s32 e1000_init_hw_i210(struct e1000_hw *hw); + +#define E1000_STM_OPCODE 0xDB00 +#define E1000_EEPROM_FLASH_SIZE_WORD 0x11 + +#define INVM_DWORD_TO_RECORD_TYPE(invm_dword) \ + (u8)((invm_dword) & 0x7) +#define INVM_DWORD_TO_WORD_ADDRESS(invm_dword) \ + (u8)(((invm_dword) & 0x0000FE00) >> 9) +#define INVM_DWORD_TO_WORD_DATA(invm_dword) \ + (u16)(((invm_dword) & 0xFFFF0000) >> 16) + +enum E1000_INVM_STRUCTURE_TYPE { + E1000_INVM_UNINITIALIZED_STRUCTURE = 0x00, + E1000_INVM_WORD_AUTOLOAD_STRUCTURE = 0x01, + E1000_INVM_CSR_AUTOLOAD_STRUCTURE = 0x02, + E1000_INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE = 0x03, + E1000_INVM_RSA_KEY_SHA256_STRUCTURE = 0x04, + E1000_INVM_INVALIDATED_STRUCTURE = 0x0F, +}; + +#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8 +#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1 +#define E1000_INVM_ULT_BYTES_SIZE 8 +#define E1000_INVM_RECORD_SIZE_IN_BYTES 4 +#define E1000_INVM_VER_FIELD_ONE 0x1FF8 +#define E1000_INVM_VER_FIELD_TWO 0x7FE000 +#define E1000_INVM_IMGTYPE_FIELD 0x1F800000 + +#define E1000_INVM_MAJOR_MASK 0x3F0 +#define E1000_INVM_MINOR_MASK 0xF +#define E1000_INVM_MAJOR_SHIFT 4 + +#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_OFF2)) +#define ID_LED_DEFAULT_I210_SERDES ((ID_LED_DEF1_DEF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_ON2)) + +/* NVM offset defaults for I211 devices */ +#define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243 +#define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1 +#define NVM_LED_1_CFG_DEFAULT_I211 0x0184 +#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C + +/* PLL Defines */ +#define E1000_PCI_PMCSR 0x44 +#define E1000_PCI_PMCSR_D3 0x03 +#define E1000_MAX_PLL_TRIES 5 +#define E1000_PHY_PLL_UNCONF 0xFF +#define E1000_PHY_PLL_FREQ_PAGE 0xFC0000 +#define E1000_PHY_PLL_FREQ_REG 0x000E +#define E1000_INVM_DEFAULT_AL 0x202F +#define E1000_INVM_AUTOLOAD 0x0A +#define E1000_INVM_PLL_WO_VAL 0x0010 + +#endif diff --git a/drivers/staging/igb_avb/e1000_mac.c b/drivers/staging/igb_avb/e1000_mac.c new file mode 100644 index 0000000000000..f848b995c9320 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_mac.c @@ -0,0 +1,2149 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000_api.h" + +static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw); +static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw); +static void e1000_config_collision_dist_generic(struct e1000_hw *hw); +static int e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index); + +/** + * e1000_init_mac_ops_generic - Initialize MAC function pointers + * @hw: pointer to the HW structure + * + * Setups up the function pointers to no-op functions + **/ +void e1000_init_mac_ops_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + DEBUGFUNC("e1000_init_mac_ops_generic"); + + /* General Setup */ + mac->ops.init_params = e1000_null_ops_generic; + mac->ops.init_hw = e1000_null_ops_generic; + mac->ops.reset_hw = e1000_null_ops_generic; + mac->ops.setup_physical_interface = e1000_null_ops_generic; + mac->ops.get_bus_info = e1000_null_ops_generic; + mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pcie; + mac->ops.read_mac_addr = e1000_read_mac_addr_generic; + mac->ops.config_collision_dist = e1000_config_collision_dist_generic; + mac->ops.clear_hw_cntrs = e1000_null_mac_generic; + /* LED */ + mac->ops.cleanup_led = e1000_null_ops_generic; + mac->ops.setup_led = e1000_null_ops_generic; + mac->ops.blink_led = e1000_null_ops_generic; + mac->ops.led_on = e1000_null_ops_generic; + mac->ops.led_off = e1000_null_ops_generic; + /* LINK */ + mac->ops.setup_link = e1000_null_ops_generic; + mac->ops.get_link_up_info = e1000_null_link_info; + mac->ops.check_for_link = e1000_null_ops_generic; + /* Management */ + mac->ops.check_mng_mode = e1000_null_mng_mode; + /* VLAN, MC, etc. */ + mac->ops.update_mc_addr_list = e1000_null_update_mc; + mac->ops.clear_vfta = e1000_null_mac_generic; + mac->ops.write_vfta = e1000_null_write_vfta; + mac->ops.rar_set = e1000_rar_set_generic; + mac->ops.validate_mdi_setting = e1000_validate_mdi_setting_generic; +} + +/** + * e1000_null_ops_generic - No-op function, returns 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_ops_generic(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_null_ops_generic"); + return E1000_SUCCESS; +} + +/** + * e1000_null_mac_generic - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_mac_generic(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_null_mac_generic"); + return; +} + +/** + * e1000_null_link_info - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_link_info(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG *s, u16 E1000_UNUSEDARG *d) +{ + DEBUGFUNC("e1000_null_link_info"); + return E1000_SUCCESS; +} + +/** + * e1000_null_mng_mode - No-op function, return false + * @hw: pointer to the HW structure + **/ +bool e1000_null_mng_mode(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_null_mng_mode"); + return false; +} + +/** + * e1000_null_update_mc - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_update_mc(struct e1000_hw E1000_UNUSEDARG *hw, + u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a) +{ + DEBUGFUNC("e1000_null_update_mc"); + return; +} + +/** + * e1000_null_write_vfta - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_write_vfta(struct e1000_hw E1000_UNUSEDARG *hw, + u32 E1000_UNUSEDARG a, u32 E1000_UNUSEDARG b) +{ + DEBUGFUNC("e1000_null_write_vfta"); + return; +} + +/** + * e1000_null_rar_set - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +int e1000_null_rar_set(struct e1000_hw E1000_UNUSEDARG *hw, + u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a) +{ + DEBUGFUNC("e1000_null_rar_set"); + return E1000_SUCCESS; +} + +/** + * e1000_get_bus_info_pcie_generic - Get PCIe bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCIe), and PCIe function. + **/ +s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val; + u16 pcie_link_status; + + DEBUGFUNC("e1000_get_bus_info_pcie_generic"); + + bus->type = e1000_bus_type_pci_express; + + ret_val = e1000_read_pcie_cap_reg(hw, PCIE_LINK_STATUS, + &pcie_link_status); + if (ret_val) { + bus->width = e1000_bus_width_unknown; + bus->speed = e1000_bus_speed_unknown; + } else { + switch (pcie_link_status & PCIE_LINK_SPEED_MASK) { + case PCIE_LINK_SPEED_2500: + bus->speed = e1000_bus_speed_2500; + break; + case PCIE_LINK_SPEED_5000: + bus->speed = e1000_bus_speed_5000; + break; + default: + bus->speed = e1000_bus_speed_unknown; + break; + } + + bus->width = (enum e1000_bus_width)((pcie_link_status & + PCIE_LINK_WIDTH_MASK) >> PCIE_LINK_WIDTH_SHIFT); + } + + mac->ops.set_lan_id(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices + * + * @hw: pointer to the HW structure + * + * Determines the LAN function id by reading memory-mapped registers + * and swaps the port value if requested. + **/ +static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + u32 reg; + + /* The status register reports the correct function number + * for the device regardless of function swap state. + */ + reg = E1000_READ_REG(hw, E1000_STATUS); + bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; +} + +/** + * e1000_set_lan_id_single_port - Set LAN id for a single port device + * @hw: pointer to the HW structure + * + * Sets the LAN function id to zero for a single port device. + **/ +void e1000_set_lan_id_single_port(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + + bus->func = 0; +} + +/** + * e1000_clear_vfta_generic - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void e1000_clear_vfta_generic(struct e1000_hw *hw) +{ + u32 offset; + + DEBUGFUNC("e1000_clear_vfta_generic"); + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); + E1000_WRITE_FLUSH(hw); + } +} + +/** + * e1000_write_vfta_generic - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value) +{ + DEBUGFUNC("e1000_write_vfta_generic"); + + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_init_rx_addrs_generic - Initialize receive address's + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setup the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + **/ +void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count) +{ + u32 i; + u8 mac_addr[ETH_ADDR_LEN] = {0}; + + DEBUGFUNC("e1000_init_rx_addrs_generic"); + + /* Setup the receive address */ + DEBUGOUT("Programming MAC Address into RAR[0]\n"); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1); + for (i = 1; i < rar_count; i++) + hw->mac.ops.rar_set(hw, mac_addr, i); +} + +/** + * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr + * @hw: pointer to the HW structure + * + * Checks the nvm for an alternate MAC address. An alternate MAC address + * can be setup by pre-boot software and must be treated like a permanent + * address and must override the actual permanent MAC address. If an + * alternate MAC address is found it is programmed into RAR0, replacing + * the permanent address that was installed into RAR0 by the Si on reset. + * This function will return SUCCESS unless it encounters an error while + * reading the EEPROM. + **/ +s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) +{ + u32 i; + s32 ret_val; + u16 offset, nvm_alt_mac_addr_offset, nvm_data; + u8 alt_mac_addr[ETH_ADDR_LEN]; + + DEBUGFUNC("e1000_check_alt_mac_addr_generic"); + + ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &nvm_data); + if (ret_val) + return ret_val; + + /* Alternate MAC address is handled by the option ROM for 82580 + * and newer. SW support not required. + */ + if (hw->mac.type >= e1000_82580) + return E1000_SUCCESS; + + ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1, + &nvm_alt_mac_addr_offset); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if ((nvm_alt_mac_addr_offset == 0xFFFF) || + (nvm_alt_mac_addr_offset == 0x0000)) + /* There is no Alternate MAC Address */ + return E1000_SUCCESS; + + if (hw->bus.func == E1000_FUNC_1) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; + if (hw->bus.func == E1000_FUNC_2) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2; + + if (hw->bus.func == E1000_FUNC_3) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3; + for (i = 0; i < ETH_ADDR_LEN; i += 2) { + offset = nvm_alt_mac_addr_offset + (i >> 1); + ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + alt_mac_addr[i] = (u8)(nvm_data & 0xFF); + alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); + } + + /* if multicast bit is set, the alternate address will not be used */ + if (alt_mac_addr[0] & 0x01) { + DEBUGOUT("Ignoring Alternate Mac Address with MC bit set\n"); + return E1000_SUCCESS; + } + + /* We have a valid alternate MAC address, and we want to treat it the + * same as the normal permanent MAC address stored by the HW into the + * RAR. Do this by mapping this address into RAR0. + */ + hw->mac.ops.rar_set(hw, alt_mac_addr, 0); + + return E1000_SUCCESS; +} + +/** + * e1000_rar_set_generic - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + **/ +static int e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + DEBUGFUNC("e1000_rar_set_generic"); + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + /* Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ + E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); + E1000_WRITE_FLUSH(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_hash_mc_addr_generic - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. + **/ +u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + DEBUGFUNC("e1000_hash_mc_addr_generic"); + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16) mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * e1000_update_mc_addr_list_generic - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void e1000_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 hash_value, hash_bit, hash_reg; + int i; + + DEBUGFUNC("e1000_update_mc_addr_list_generic"); + + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32) i < mc_addr_count; i++) { + hash_value = e1000_hash_mc_addr_generic(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); + mc_addr_list += (ETH_ADDR_LEN); + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]); + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_pcix_mmrbc_workaround_generic - Fix incorrect MMRBC value + * @hw: pointer to the HW structure + * + * In certain situations, a system BIOS may report that the PCIx maximum + * memory read byte count (MMRBC) value is higher than than the actual + * value. We check the PCIx command register with the current PCIx status + * register. + **/ +void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw) +{ + u16 cmd_mmrbc; + u16 pcix_cmd; + u16 pcix_stat_hi_word; + u16 stat_mmrbc; + + DEBUGFUNC("e1000_pcix_mmrbc_workaround_generic"); + + /* Workaround for PCI-X issue when BIOS sets MMRBC incorrectly */ + if (hw->bus.type != e1000_bus_type_pcix) + return; + + e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd); + e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI, &pcix_stat_hi_word); + cmd_mmrbc = (pcix_cmd & PCIX_COMMAND_MMRBC_MASK) >> + PCIX_COMMAND_MMRBC_SHIFT; + stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >> + PCIX_STATUS_HI_MMRBC_SHIFT; + if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K) + stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K; + if (cmd_mmrbc > stat_mmrbc) { + pcix_cmd &= ~PCIX_COMMAND_MMRBC_MASK; + pcix_cmd |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT; + e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd); + } +} + +/** + * e1000_clear_hw_cntrs_base_generic - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + **/ +void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_clear_hw_cntrs_base_generic"); + + E1000_READ_REG(hw, E1000_CRCERRS); + E1000_READ_REG(hw, E1000_SYMERRS); + E1000_READ_REG(hw, E1000_MPC); + E1000_READ_REG(hw, E1000_SCC); + E1000_READ_REG(hw, E1000_ECOL); + E1000_READ_REG(hw, E1000_MCC); + E1000_READ_REG(hw, E1000_LATECOL); + E1000_READ_REG(hw, E1000_COLC); + E1000_READ_REG(hw, E1000_DC); + E1000_READ_REG(hw, E1000_SEC); + E1000_READ_REG(hw, E1000_RLEC); + E1000_READ_REG(hw, E1000_XONRXC); + E1000_READ_REG(hw, E1000_XONTXC); + E1000_READ_REG(hw, E1000_XOFFRXC); + E1000_READ_REG(hw, E1000_XOFFTXC); + E1000_READ_REG(hw, E1000_FCRUC); + E1000_READ_REG(hw, E1000_GPRC); + E1000_READ_REG(hw, E1000_BPRC); + E1000_READ_REG(hw, E1000_MPRC); + E1000_READ_REG(hw, E1000_GPTC); + E1000_READ_REG(hw, E1000_GORCL); + E1000_READ_REG(hw, E1000_GORCH); + E1000_READ_REG(hw, E1000_GOTCL); + E1000_READ_REG(hw, E1000_GOTCH); + E1000_READ_REG(hw, E1000_RNBC); + E1000_READ_REG(hw, E1000_RUC); + E1000_READ_REG(hw, E1000_RFC); + E1000_READ_REG(hw, E1000_ROC); + E1000_READ_REG(hw, E1000_RJC); + E1000_READ_REG(hw, E1000_TORL); + E1000_READ_REG(hw, E1000_TORH); + E1000_READ_REG(hw, E1000_TOTL); + E1000_READ_REG(hw, E1000_TOTH); + E1000_READ_REG(hw, E1000_TPR); + E1000_READ_REG(hw, E1000_TPT); + E1000_READ_REG(hw, E1000_MPTC); + E1000_READ_REG(hw, E1000_BPTC); +} + +/** + * e1000_check_for_copper_link_generic - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + DEBUGFUNC("e1000_check_for_copper_link"); + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) + return E1000_SUCCESS; + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) + return E1000_SUCCESS; /* No link detected */ + + mac->get_link_status = false; + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000_check_downshift_generic(hw); + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) + return -E1000_ERR_CONFIG; + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + mac->ops.config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) + DEBUGOUT("Error configuring flow control\n"); + + return ret_val; +} + +/** + * e1000_check_for_fiber_link_generic - Check for link (Fiber) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val; + + DEBUGFUNC("e1000_check_for_fiber_link_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + status = E1000_READ_REG(hw, E1000_STATUS); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), the cable is plugged in (we have signal), + * and our link partner is not trying to auto-negotiate with us (we + * are receiving idles or data), we need to force link up. We also + * need to give auto-negotiation time to complete, in case the cable + * was just plugged in. The autoneg_failed flag does this. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((ctrl & E1000_CTRL_SWDPIN1) && !(status & E1000_STATUS_LU) && + !(rxcw & E1000_RXCW_C)) { + if (!mac->autoneg_failed) { + mac->autoneg_failed = true; + return E1000_SUCCESS; + } + DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) { + DEBUGOUT("Error configuring flow control\n"); + return ret_val; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); + E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); + E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = true; + } + + return E1000_SUCCESS; +} + +/** + * e1000_check_for_serdes_link_generic - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val; + + DEBUGFUNC("e1000_check_for_serdes_link_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + status = E1000_READ_REG(hw, E1000_STATUS); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), and our link partner is not trying to + * auto-negotiate with us (we are receiving idles or data), + * we need to force link up. We also need to give auto-negotiation + * time to complete. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if (!(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) { + if (!mac->autoneg_failed) { + mac->autoneg_failed = true; + return E1000_SUCCESS; + } + DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) { + DEBUGOUT("Error configuring flow control\n"); + return ret_val; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); + E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); + E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = true; + } else if (!(E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW))) { + /* If we force link for non-auto-negotiation switch, check + * link status based on MAC synchronization for internal + * serdes media type. + */ + /* SYNCH bit and IV bit are sticky. */ + usec_delay(10); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + mac->serdes_has_link = true; + DEBUGOUT("SERDES: Link up - forced.\n"); + } + } else { + mac->serdes_has_link = false; + DEBUGOUT("SERDES: Link down - force failed.\n"); + } + } + + if (E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW)) { + status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_LU) { + /* SYNCH bit and IV bit are sticky, so reread rxcw. */ + usec_delay(10); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + mac->serdes_has_link = true; + DEBUGOUT("SERDES: Link up - autoneg completed successfully.\n"); + } else { + mac->serdes_has_link = false; + DEBUGOUT("SERDES: Link down - invalid codewords detected in autoneg.\n"); + } + } else { + mac->serdes_has_link = false; + DEBUGOUT("SERDES: Link down - no sync.\n"); + } + } else { + mac->serdes_has_link = false; + DEBUGOUT("SERDES: Link down - autoneg failed\n"); + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_set_default_fc_generic - Set flow control default values + * @hw: pointer to the HW structure + * + * Read the EEPROM for the default values for flow control and store the + * values. + **/ +static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 nvm_data; + u16 nvm_offset = 0; + + DEBUGFUNC("e1000_set_default_fc_generic"); + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + if (hw->mac.type == e1000_i350) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func); + ret_val = hw->nvm.ops.read(hw, + NVM_INIT_CONTROL2_REG + + nvm_offset, + 1, &nvm_data); + } else { + ret_val = hw->nvm.ops.read(hw, + NVM_INIT_CONTROL2_REG, + 1, &nvm_data); + } + + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (!(nvm_data & NVM_WORD0F_PAUSE_MASK)) + hw->fc.requested_mode = e1000_fc_none; + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == + NVM_WORD0F_ASM_DIR) + hw->fc.requested_mode = e1000_fc_tx_pause; + else + hw->fc.requested_mode = e1000_fc_full; + + return E1000_SUCCESS; +} + +/** + * e1000_setup_link_generic - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +s32 e1000_setup_link_generic(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_setup_link_generic"); + + /* In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) + return E1000_SUCCESS; + + /* If requested flow control is set to default, set flow control + * based on the EEPROM flow control settings. + */ + if (hw->fc.requested_mode == e1000_fc_default) { + ret_val = e1000_set_default_fc_generic(hw); + if (ret_val) + return ret_val; + } + + /* Save off the requested flow control mode for use later. Depending + * on the link partner's capabilities, we may or may not use this mode. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + DEBUGOUT1("After fix-ups FlowControl is now = %x\n", + hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + return ret_val; + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + DEBUGOUT("Initializing the Flow Control address, type and timer regs\n"); + E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE); + E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); + E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); + + E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time); + + return e1000_set_fc_watermarks_generic(hw); +} + +/** + * e1000_commit_fc_settings_generic - Configure flow control + * @hw: pointer to the HW structure + * + * Write the flow control settings to the Transmit Config Word Register (TXCW) + * base on the flow control settings in e1000_mac_info. + **/ +static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 txcw; + + DEBUGFUNC("e1000_commit_fc_settings_generic"); + + /* Check for a software override of the flow control settings, and + * setup the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Transmit Config Word Register (TXCW) and re-start auto- + * negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we + * do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* Flow control completely disabled by a software over-ride. */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case e1000_fc_rx_pause: + /* Rx Flow control is enabled and Tx Flow control is disabled + * by a software over-ride. Since there really isn't a way to + * advertise that we are capable of Rx Pause ONLY, we will + * advertise that we support both symmetric and asymmetric Rx + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case e1000_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case e1000_fc_full: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + break; + } + + E1000_WRITE_REG(hw, E1000_TXCW, txcw); + mac->txcw = txcw; + + return E1000_SUCCESS; +} + +/** + * e1000_poll_fiber_serdes_link_generic - Poll for link up + * @hw: pointer to the HW structure + * + * Polls for link up by reading the status register, if link fails to come + * up with auto-negotiation, then the link is forced if a signal is detected. + **/ +static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 i, status; + s32 ret_val; + + DEBUGFUNC("e1000_poll_fiber_serdes_link_generic"); + + /* If we have a signal (the cable is plugged in, or assumed true for + * serdes media) then poll for a "Link-Up" indication in the Device + * Status Register. Time-out if a link isn't seen in 500 milliseconds + * seconds (Auto-negotiation should complete in less than 500 + * milliseconds even if the other end is doing it in SW). + */ + for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { + msec_delay(10); + status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_LU) + break; + } + if (i == FIBER_LINK_UP_LIMIT) { + DEBUGOUT("Never got a valid link from auto-neg!!!\n"); + mac->autoneg_failed = true; + /* AutoNeg failed to achieve a link, so we'll call + * mac->check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. + */ + ret_val = mac->ops.check_for_link(hw); + if (ret_val) { + DEBUGOUT("Error while checking for link\n"); + return ret_val; + } + mac->autoneg_failed = false; + } else { + mac->autoneg_failed = false; + DEBUGOUT("Valid Link Found\n"); + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_fiber_serdes_link_generic - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber and serdes + * links. Upon successful setup, poll for link. + **/ +s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + DEBUGFUNC("e1000_setup_fiber_serdes_link_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* Take the link out of reset */ + ctrl &= ~E1000_CTRL_LRST; + + hw->mac.ops.config_collision_dist(hw); + + ret_val = e1000_commit_fc_settings_generic(hw); + if (ret_val) + return ret_val; + + /* Since auto-negotiation is enabled, take the link out of reset (the + * link will be in reset, because we previously reset the chip). This + * will restart auto-negotiation. If auto-negotiation is successful + * then the link-up status bit will be set and the flow control enable + * bits (RFCE and TFCE) will be set according to their negotiated value. + */ + DEBUGOUT("Auto-negotiation enabled\n"); + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + E1000_WRITE_FLUSH(hw); + msec_delay(1); + + /* For these adapters, the SW definable pin 1 is set when the optics + * detect a signal. If we have a signal, then poll for a "Link-Up" + * indication. + */ + if (hw->phy.media_type == e1000_media_type_internal_serdes || + (E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) { + ret_val = e1000_poll_fiber_serdes_link_generic(hw); + } else { + DEBUGOUT("No signal detected\n"); + } + + return ret_val; +} + +/** + * e1000_config_collision_dist_generic - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. + **/ +static void e1000_config_collision_dist_generic(struct e1000_hw *hw) +{ + u32 tctl; + + DEBUGFUNC("e1000_config_collision_dist_generic"); + + tctl = E1000_READ_REG(hw, E1000_TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; + + E1000_WRITE_REG(hw, E1000_TCTL, tctl); + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_set_fc_watermarks_generic - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * transmission as well. + **/ +s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw) +{ + u32 fcrtl = 0, fcrth = 0; + + DEBUGFUNC("e1000_set_fc_watermarks_generic"); + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & e1000_fc_tx_pause) { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= E1000_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl); + E1000_WRITE_REG(hw, E1000_FCRTH, fcrth); + + return E1000_SUCCESS; +} + +/** + * e1000_force_mac_fc_generic - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + **/ +s32 e1000_force_mac_fc_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + DEBUGFUNC("e1000_force_mac_fc_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.current_mode" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and Tx flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + DEBUGOUT1("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case e1000_fc_none: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case e1000_fc_rx_pause: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case e1000_fc_tx_pause: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case e1000_fc_full: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_config_fc_after_link_up_generic - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + **/ +s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = E1000_SUCCESS; + u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg; + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + u16 speed, duplex; + + DEBUGFUNC("e1000_config_fc_after_link_up_generic"); + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) { + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) + ret_val = e1000_force_mac_fc_generic(hw); + } else { + if (hw->phy.media_type == e1000_media_type_copper) + ret_val = e1000_force_mac_fc_generic(hw); + } + + if (ret_val) { + DEBUGOUT("Error forcing flow control settings\n"); + return ret_val; + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + DEBUGOUT("Copper PHY and Auto Neg has not completed.\n"); + return ret_val; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + return ret_val; + ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + return ret_val; + + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | E1000_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + DEBUGOUT("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + DEBUGOUT("Flow Control = Tx PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); + } else { + /* Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; + DEBUGOUT("Flow Control = NONE.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); + if (ret_val) { + DEBUGOUT("Error getting link speed and duplex\n"); + return ret_val; + } + + if (duplex == HALF_DUPLEX) + hw->fc.current_mode = e1000_fc_none; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = e1000_force_mac_fc_generic(hw); + if (ret_val) { + DEBUGOUT("Error forcing flow control settings\n"); + return ret_val; + } + } + + /* Check for the case where we have SerDes media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_internal_serdes) && + mac->autoneg) { + /* Read the PCS_LSTS and check to see if AutoNeg + * has completed. + */ + pcs_status_reg = E1000_READ_REG(hw, E1000_PCS_LSTAT); + + if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) { + DEBUGOUT("PCS Auto Neg has not completed.\n"); + return ret_val; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (PCS_ANADV) and the Auto_Negotiation Base + * Page Ability Register (PCS_LPAB) to determine how + * flow control was negotiated. + */ + pcs_adv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV); + pcs_lp_ability_reg = E1000_READ_REG(hw, E1000_PCS_LPAB); + + /* Two bits in the Auto Negotiation Advertisement Register + * (PCS_ANADV) and two bits in the Auto Negotiation Base + * Page Ability Register (PCS_LPAB) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | e1000_fc_full + * + */ + if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) { + /* Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + DEBUGOUT("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + DEBUGOUT("Flow Control = Tx PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); + } else { + /* Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; + DEBUGOUT("Flow Control = NONE.\n"); + } + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + pcs_ctrl_reg = E1000_READ_REG(hw, E1000_PCS_LCTL); + pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL; + E1000_WRITE_REG(hw, E1000_PCS_LCTL, pcs_ctrl_reg); + + ret_val = e1000_force_mac_fc_generic(hw); + if (ret_val) { + DEBUGOUT("Error forcing flow control settings\n"); + return ret_val; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_get_speed_and_duplex_copper_generic - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + **/ +s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + u32 status; + + DEBUGFUNC("e1000_get_speed_and_duplex_copper_generic"); + + status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + DEBUGOUT("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + DEBUGOUT("100 Mbs, "); + } else { + *speed = SPEED_10; + DEBUGOUT("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + DEBUGOUT("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + DEBUGOUT("Half Duplex\n"); + } + + return E1000_SUCCESS; +} + +/** + * e1000_get_speed_and_duplex_fiber_generic - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Sets the speed and duplex to gigabit full duplex (the only possible option) + * for fiber/serdes links. + **/ +s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw E1000_UNUSEDARG *hw, + u16 *speed, u16 *duplex) +{ + DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic"); + + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + + return E1000_SUCCESS; +} + +/** + * e1000_get_hw_semaphore_generic - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw) +{ + u32 swsm; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + DEBUGFUNC("e1000_get_hw_semaphore_generic"); + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + usec_delay(50); + i++; + } + + if (i == timeout) { + DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); + return -E1000_ERR_NVM; + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + usec_delay(50); + } + + if (i == timeout) { + /* Release semaphores */ + e1000_put_hw_semaphore_generic(hw); + DEBUGOUT("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_put_hw_semaphore_generic - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +void e1000_put_hw_semaphore_generic(struct e1000_hw *hw) +{ + u32 swsm; + + DEBUGFUNC("e1000_put_hw_semaphore_generic"); + + swsm = E1000_READ_REG(hw, E1000_SWSM); + + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + + E1000_WRITE_REG(hw, E1000_SWSM, swsm); +} + +/** + * e1000_get_auto_rd_done_generic - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + **/ +s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw) +{ + s32 i = 0; + + DEBUGFUNC("e1000_get_auto_rd_done_generic"); + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_AUTO_RD) + break; + msec_delay(1); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + DEBUGOUT("Auto read by HW from NVM has not completed.\n"); + return -E1000_ERR_RESET; + } + + return E1000_SUCCESS; +} + +/** + * e1000_valid_led_default_generic - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_valid_led_default_generic"); + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT; + + return E1000_SUCCESS; +} + +/** + * e1000_id_led_init_generic - + * @hw: pointer to the HW structure + * + **/ +s32 e1000_id_led_init_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 data, i, temp; + const u16 led_mask = 0x0F; + + DEBUGFUNC("e1000_id_led_init_generic"); + + ret_val = hw->nvm.ops.valid_led_default(hw, &data); + if (ret_val) + return ret_val; + + mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_led_generic - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use and saves the current state + * of the LED so it can be later restored. + **/ +s32 e1000_setup_led_generic(struct e1000_hw *hw) +{ + u32 ledctl; + + DEBUGFUNC("e1000_setup_led_generic"); + + if (hw->mac.ops.setup_led != e1000_setup_led_generic) + return -E1000_ERR_CONFIG; + + if (hw->phy.media_type == e1000_media_type_fiber) { + ledctl = E1000_READ_REG(hw, E1000_LEDCTL); + hw->mac.ledctl_default = ledctl; + /* Turn off LED0 */ + ledctl &= ~(E1000_LEDCTL_LED0_IVRT | E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_LED0_MODE_MASK); + ledctl |= (E1000_LEDCTL_MODE_LED_OFF << + E1000_LEDCTL_LED0_MODE_SHIFT); + E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl); + } else if (hw->phy.media_type == e1000_media_type_copper) { + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); + } + + return E1000_SUCCESS; +} + +/** + * e1000_cleanup_led_generic - Set LED config to default operation + * @hw: pointer to the HW structure + * + * Remove the current LED configuration and set the LED configuration + * to the default value, saved from the EEPROM. + **/ +s32 e1000_cleanup_led_generic(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_cleanup_led_generic"); + + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default); + return E1000_SUCCESS; +} + +/** + * e1000_blink_led_generic - Blink LED + * @hw: pointer to the HW structure + * + * Blink the LEDs which are set to be on. + **/ +s32 e1000_blink_led_generic(struct e1000_hw *hw) +{ + u32 ledctl_blink = 0; + u32 i; + + DEBUGFUNC("e1000_blink_led_generic"); + + if (hw->phy.media_type == e1000_media_type_fiber) { + /* always blink LED0 for PCI-E fiber */ + ledctl_blink = E1000_LEDCTL_LED0_BLINK | + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); + } else { + /* Set the blink bit for each LED that's "on" (0x0E) + * (or "off" if inverted) in ledctl_mode2. The blink + * logic in hardware only works when mode is set to "on" + * so it must be changed accordingly when the mode is + * "off" and inverted. + */ + ledctl_blink = hw->mac.ledctl_mode2; + for (i = 0; i < 32; i += 8) { + u32 mode = (hw->mac.ledctl_mode2 >> i) & + E1000_LEDCTL_LED0_MODE_MASK; + u32 led_default = hw->mac.ledctl_default >> i; + + if ((!(led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_ON)) || + ((led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_OFF))) { + ledctl_blink &= + ~(E1000_LEDCTL_LED0_MODE_MASK << i); + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_MODE_LED_ON) << i; + } + } + } + + E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink); + + return E1000_SUCCESS; +} + +/** + * e1000_led_on_generic - Turn LED on + * @hw: pointer to the HW structure + * + * Turn LED on. + **/ +s32 e1000_led_on_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + DEBUGFUNC("e1000_led_on_generic"); + + switch (hw->phy.media_type) { + case e1000_media_type_fiber: + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + break; + case e1000_media_type_copper: + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2); + break; + default: + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_led_off_generic - Turn LED off + * @hw: pointer to the HW structure + * + * Turn LED off. + **/ +s32 e1000_led_off_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + DEBUGFUNC("e1000_led_off_generic"); + + switch (hw->phy.media_type) { + case e1000_media_type_fiber: + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + break; + case e1000_media_type_copper: + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); + break; + default: + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_set_pcie_no_snoop_generic - Set PCI-express capabilities + * @hw: pointer to the HW structure + * @no_snoop: bitmap of snoop events + * + * Set the PCI-express register to snoop for events enabled in 'no_snoop'. + **/ +void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop) +{ + u32 gcr; + + DEBUGFUNC("e1000_set_pcie_no_snoop_generic"); + + if (hw->bus.type != e1000_bus_type_pci_express) + return; + + if (no_snoop) { + gcr = E1000_READ_REG(hw, E1000_GCR); + gcr &= ~(PCIE_NO_SNOOP_ALL); + gcr |= no_snoop; + E1000_WRITE_REG(hw, E1000_GCR, gcr); + } +} + +/** + * e1000_disable_pcie_master_generic - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns E1000_SUCCESS if successful, else returns -10 + * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + **/ +s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw) +{ + u32 ctrl; + s32 timeout = MASTER_DISABLE_TIMEOUT; + + DEBUGFUNC("e1000_disable_pcie_master_generic"); + + if (hw->bus.type != e1000_bus_type_pci_express) + return E1000_SUCCESS; + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + while (timeout) { + if (!(E1000_READ_REG(hw, E1000_STATUS) & + E1000_STATUS_GIO_MASTER_ENABLE) || + E1000_REMOVED(hw->hw_addr)) + break; + usec_delay(100); + timeout--; + } + + if (!timeout) { + DEBUGOUT("Master requests are pending.\n"); + return -E1000_ERR_MASTER_REQUESTS_PENDING; + } + + return E1000_SUCCESS; +} + +/** + * e1000_reset_adaptive_generic - Reset Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Reset the Adaptive Interframe Spacing throttle to default values. + **/ +void e1000_reset_adaptive_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + DEBUGFUNC("e1000_reset_adaptive_generic"); + + if (!mac->adaptive_ifs) { + DEBUGOUT("Not in Adaptive IFS mode!\n"); + return; + } + + mac->current_ifs_val = 0; + mac->ifs_min_val = IFS_MIN; + mac->ifs_max_val = IFS_MAX; + mac->ifs_step_size = IFS_STEP; + mac->ifs_ratio = IFS_RATIO; + + mac->in_ifs_mode = false; + E1000_WRITE_REG(hw, E1000_AIT, 0); +} + +/** + * e1000_update_adaptive_generic - Update Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Update the Adaptive Interframe Spacing Throttle value based on the + * time between transmitted packets and time between collisions. + **/ +void e1000_update_adaptive_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + DEBUGFUNC("e1000_update_adaptive_generic"); + + if (!mac->adaptive_ifs) { + DEBUGOUT("Not in Adaptive IFS mode!\n"); + return; + } + + if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { + if (mac->tx_packet_delta > MIN_NUM_XMITS) { + mac->in_ifs_mode = true; + if (mac->current_ifs_val < mac->ifs_max_val) { + if (!mac->current_ifs_val) + mac->current_ifs_val = mac->ifs_min_val; + else + mac->current_ifs_val += + mac->ifs_step_size; + E1000_WRITE_REG(hw, E1000_AIT, + mac->current_ifs_val); + } + } + } else { + if (mac->in_ifs_mode && + (mac->tx_packet_delta <= MIN_NUM_XMITS)) { + mac->current_ifs_val = 0; + mac->in_ifs_mode = false; + E1000_WRITE_REG(hw, E1000_AIT, 0); + } + } +} + +/** + * e1000_validate_mdi_setting_generic - Verify MDI/MDIx settings + * @hw: pointer to the HW structure + * + * Verify that when not using auto-negotiation that MDI/MDIx is correctly + * set, which is forced to MDI mode only. + **/ +static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_validate_mdi_setting_generic"); + + if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { + DEBUGOUT("Invalid MDI setting detected\n"); + hw->phy.mdix = 1; + return -E1000_ERR_CONFIG; + } + + return E1000_SUCCESS; +} + +/** + * e1000_validate_mdi_setting_crossover_generic - Verify MDI/MDIx settings + * @hw: pointer to the HW structure + * + * Validate the MDI/MDIx setting, allowing for auto-crossover during forced + * operation. + **/ +s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_validate_mdi_setting_crossover_generic"); + + return E1000_SUCCESS; +} + +/** + * e1000_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register + * @hw: pointer to the HW structure + * @reg: 32bit register offset such as E1000_SCTL + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes an address/data control type register. There are several of these + * and they all have the format address << 8 | data and bit 31 is polled for + * completion. + **/ +s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data) +{ + u32 i, regvalue = 0; + + DEBUGFUNC("e1000_write_8bit_ctrl_reg_generic"); + + /* Set up the address and data */ + regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT); + E1000_WRITE_REG(hw, reg, regvalue); + + /* Poll the ready bit to see if the MDI read completed */ + for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) { + usec_delay(5); + regvalue = E1000_READ_REG(hw, reg); + if (regvalue & E1000_GEN_CTL_READY) + break; + } + if (!(regvalue & E1000_GEN_CTL_READY)) { + DEBUGOUT1("Reg %08x did not indicate ready\n", reg); + return -E1000_ERR_PHY; + } + + return E1000_SUCCESS; +} diff --git a/drivers/staging/igb_avb/e1000_mac.h b/drivers/staging/igb_avb/e1000_mac.h new file mode 100644 index 0000000000000..a3878361095e7 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_mac.h @@ -0,0 +1,81 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_MAC_H_ +#define _E1000_MAC_H_ + +void e1000_init_mac_ops_generic(struct e1000_hw *hw); +#ifndef E1000_REMOVED +#define E1000_REMOVED(a) (0) +#endif /* E1000_REMOVED */ +void e1000_null_mac_generic(struct e1000_hw *hw); +s32 e1000_null_ops_generic(struct e1000_hw *hw); +s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d); +bool e1000_null_mng_mode(struct e1000_hw *hw); +void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a); +void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b); +int e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a); +s32 e1000_blink_led_generic(struct e1000_hw *hw); +s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw); +s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw); +s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw); +s32 e1000_cleanup_led_generic(struct e1000_hw *hw); +s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw); +s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw); +s32 e1000_force_mac_fc_generic(struct e1000_hw *hw); +s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw); +s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw); +void e1000_set_lan_id_single_port(struct e1000_hw *hw); +s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw); +s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw, + u16 *speed, u16 *duplex); +s32 e1000_id_led_init_generic(struct e1000_hw *hw); +s32 e1000_led_on_generic(struct e1000_hw *hw); +s32 e1000_led_off_generic(struct e1000_hw *hw); +void e1000_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count); +s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw); +s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw); +s32 e1000_setup_led_generic(struct e1000_hw *hw); +s32 e1000_setup_link_generic(struct e1000_hw *hw); +s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw *hw); +s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data); + +u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr); + +void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw); +void e1000_clear_vfta_generic(struct e1000_hw *hw); +void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count); +void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw); +void e1000_put_hw_semaphore_generic(struct e1000_hw *hw); +s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw); +void e1000_reset_adaptive_generic(struct e1000_hw *hw); +void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop); +void e1000_update_adaptive_generic(struct e1000_hw *hw); +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); + +#endif diff --git a/drivers/staging/igb_avb/e1000_manage.c b/drivers/staging/igb_avb/e1000_manage.c new file mode 100644 index 0000000000000..36671fbdab136 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_manage.c @@ -0,0 +1,552 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000_api.h" + +/** + * e1000_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +u8 e1000_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + DEBUGFUNC("e1000_calculate_checksum"); + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8) (0 - sum); +} + +/** + * e1000_mng_enable_host_if_generic - Checks host interface is enabled + * @hw: pointer to the HW structure + * + * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND + * + * This function checks whether the HOST IF is enabled for command operation + * and also checks whether the previous command is completed. It busy waits + * in case of previous command is not completed. + **/ +s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw) +{ + u32 hicr; + u8 i; + + DEBUGFUNC("e1000_mng_enable_host_if_generic"); + + if (!hw->mac.arc_subsystem_valid) { + DEBUGOUT("ARC subsystem not valid.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Check that the host interface is enabled. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_EN)) { + DEBUGOUT("E1000_HOST_EN bit disabled.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + /* check the previous command is completed */ + for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_C)) + break; + msec_delay_irq(1); + } + + if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { + DEBUGOUT("Previous command timeout failed .\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + return E1000_SUCCESS; +} + +/** + * e1000_check_mng_mode_generic - Generic check management mode + * @hw: pointer to the HW structure + * + * Reads the firmware semaphore register and returns true (>0) if + * manageability is enabled, else false (0). + **/ +bool e1000_check_mng_mode_generic(struct e1000_hw *hw) +{ + u32 fwsm = E1000_READ_REG(hw, E1000_FWSM); + + DEBUGFUNC("e1000_check_mng_mode_generic"); + + + return (fwsm & E1000_FWSM_MODE_MASK) == + (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); +} + +/** + * e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on Tx + * @hw: pointer to the HW structure + * + * Enables packet filtering on transmit packets if manageability is enabled + * and host interface is enabled. + **/ +bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw) +{ + struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie; + u32 *buffer = (u32 *)&hw->mng_cookie; + u32 offset; + s32 ret_val, hdr_csum, csum; + u8 i, len; + + DEBUGFUNC("e1000_enable_tx_pkt_filtering_generic"); + + hw->mac.tx_pkt_filtering = true; + + /* No manageability, no filtering */ + if (!hw->mac.ops.check_mng_mode(hw)) { + hw->mac.tx_pkt_filtering = false; + return hw->mac.tx_pkt_filtering; + } + + /* If we can't read from the host interface for whatever + * reason, disable filtering. + */ + ret_val = e1000_mng_enable_host_if_generic(hw); + if (ret_val != E1000_SUCCESS) { + hw->mac.tx_pkt_filtering = false; + return hw->mac.tx_pkt_filtering; + } + + /* Read in the header. Length and offset are in dwords. */ + len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2; + offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2; + for (i = 0; i < len; i++) + *(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, + offset + i); + hdr_csum = hdr->checksum; + hdr->checksum = 0; + csum = e1000_calculate_checksum((u8 *)hdr, + E1000_MNG_DHCP_COOKIE_LENGTH); + /* If either the checksums or signature don't match, then + * the cookie area isn't considered valid, in which case we + * take the safe route of assuming Tx filtering is enabled. + */ + if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { + hw->mac.tx_pkt_filtering = true; + return hw->mac.tx_pkt_filtering; + } + + /* Cookie area is valid, make the final check for filtering. */ + if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) + hw->mac.tx_pkt_filtering = false; + + return hw->mac.tx_pkt_filtering; +} + +/** + * e1000_mng_write_cmd_header_generic - Writes manageability command header + * @hw: pointer to the HW structure + * @hdr: pointer to the host interface command header + * + * Writes the command header after does the checksum calculation. + **/ +s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr) +{ + u16 i, length = sizeof(struct e1000_host_mng_command_header); + + DEBUGFUNC("e1000_mng_write_cmd_header_generic"); + + /* Write the whole command header structure with new checksum. */ + + hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length); + + length >>= 2; + /* Write the relevant command block into the ram area. */ + for (i = 0; i < length; i++) { + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i, + *((u32 *) hdr + i)); + E1000_WRITE_FLUSH(hw); + } + + return E1000_SUCCESS; +} + +/** + * e1000_mng_host_if_write_generic - Write to the manageability host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface buffer + * @length: size of the buffer + * @offset: location in the buffer to write to + * @sum: sum of the data (not checksum) + * + * This function writes the buffer content at the offset given on the host if. + * It also does alignment considerations to do the writes in most efficient + * way. Also fills up the sum of the buffer in *buffer parameter. + **/ +s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer, + u16 length, u16 offset, u8 *sum) +{ + u8 *tmp; + u8 *bufptr = buffer; + u32 data = 0; + u16 remaining, i, j, prev_bytes; + + DEBUGFUNC("e1000_mng_host_if_write_generic"); + + /* sum = only sum of the data and it is not checksum */ + + if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) + return -E1000_ERR_PARAM; + + tmp = (u8 *)&data; + prev_bytes = offset & 0x3; + offset >>= 2; + + if (prev_bytes) { + data = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset); + for (j = prev_bytes; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset, data); + length -= j - prev_bytes; + offset++; + } + + remaining = length & 0x3; + length -= remaining; + + /* Calculate length in DWORDs */ + length >>= 2; + + /* The device driver writes the relevant command block into the + * ram area. + */ + for (i = 0; i < length; i++) { + for (j = 0; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, + data); + } + if (remaining) { + for (j = 0; j < sizeof(u32); j++) { + if (j < remaining) + *(tmp + j) = *bufptr++; + else + *(tmp + j) = 0; + + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, + data); + } + + return E1000_SUCCESS; +} + +/** + * e1000_mng_write_dhcp_info_generic - Writes DHCP info to host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface + * @length: size of the buffer + * + * Writes the DHCP information to the host interface. + **/ +s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, u8 *buffer, + u16 length) +{ + struct e1000_host_mng_command_header hdr; + s32 ret_val; + u32 hicr; + + DEBUGFUNC("e1000_mng_write_dhcp_info_generic"); + + hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; + hdr.command_length = length; + hdr.reserved1 = 0; + hdr.reserved2 = 0; + hdr.checksum = 0; + + /* Enable the host interface */ + ret_val = e1000_mng_enable_host_if_generic(hw); + if (ret_val) + return ret_val; + + /* Populate the host interface with the contents of "buffer". */ + ret_val = e1000_mng_host_if_write_generic(hw, buffer, length, + sizeof(hdr), &(hdr.checksum)); + if (ret_val) + return ret_val; + + /* Write the manageability command header */ + ret_val = e1000_mng_write_cmd_header_generic(hw, &hdr); + if (ret_val) + return ret_val; + + /* Tell the ARC a new command is pending. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); + + return E1000_SUCCESS; +} + +/** + * e1000_enable_mng_pass_thru - Check if management passthrough is needed + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + **/ +bool e1000_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + u32 fwsm, factps; + + DEBUGFUNC("e1000_enable_mng_pass_thru"); + + if (!hw->mac.asf_firmware_present) + return false; + + manc = E1000_READ_REG(hw, E1000_MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN)) + return false; + + if (hw->mac.has_fwsm) { + fwsm = E1000_READ_REG(hw, E1000_FWSM); + factps = E1000_READ_REG(hw, E1000_FACTPS); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) + return true; + } else if ((manc & E1000_MANC_SMBUS_EN) && + !(manc & E1000_MANC_ASF_EN)) { + return true; + } + + return false; +} + +/** + * e1000_host_interface_command - Writes buffer to host interface + * @hw: pointer to the HW structure + * @buffer: contains a command to write + * @length: the byte length of the buffer, must be multiple of 4 bytes + * + * Writes a buffer to the Host Interface. Upon success, returns E1000_SUCCESS + * else returns E1000_ERR_HOST_INTERFACE_COMMAND. + **/ +s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length) +{ + u32 hicr, i; + + DEBUGFUNC("e1000_host_interface_command"); + + if (!(hw->mac.arc_subsystem_valid)) { + DEBUGOUT("Hardware doesn't support host interface command.\n"); + return E1000_SUCCESS; + } + + if (!hw->mac.asf_firmware_present) { + DEBUGOUT("Firmware is not present.\n"); + return E1000_SUCCESS; + } + + if (length == 0 || length & 0x3 || + length > E1000_HI_MAX_BLOCK_BYTE_LENGTH) { + DEBUGOUT("Buffer length failure.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Check that the host interface is enabled. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_EN)) { + DEBUGOUT("E1000_HOST_EN bit disabled.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Calculate length in DWORDs */ + length >>= 2; + + /* The device driver writes the relevant command block + * into the ram area. + */ + for (i = 0; i < length; i++) + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i, + *((u32 *)buffer + i)); + + /* Setting this bit tells the ARC that a new command is pending. */ + E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); + + for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) { + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_C)) + break; + msec_delay(1); + } + + /* Check command successful completion. */ + if (i == E1000_HI_COMMAND_TIMEOUT || + (!(E1000_READ_REG(hw, E1000_HICR) & E1000_HICR_SV))) { + DEBUGOUT("Command has failed with no status valid.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + for (i = 0; i < length; i++) + *((u32 *)buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, + E1000_HOST_IF, + i); + + return E1000_SUCCESS; +} +/** + * e1000_load_firmware - Writes proxy FW code buffer to host interface + * and execute. + * @hw: pointer to the HW structure + * @buffer: contains a firmware to write + * @length: the byte length of the buffer, must be multiple of 4 bytes + * + * Upon success returns E1000_SUCCESS, returns E1000_ERR_CONFIG if not enabled + * in HW else returns E1000_ERR_HOST_INTERFACE_COMMAND. + **/ +s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length) +{ + u32 hicr, hibba, fwsm, icr, i; + + DEBUGFUNC("e1000_load_firmware"); + + if (hw->mac.type < e1000_i210) { + DEBUGOUT("Hardware doesn't support loading FW by the driver\n"); + return -E1000_ERR_CONFIG; + } + + /* Check that the host interface is enabled. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_EN)) { + DEBUGOUT("E1000_HOST_EN bit disabled.\n"); + return -E1000_ERR_CONFIG; + } + if (!(hicr & E1000_HICR_MEMORY_BASE_EN)) { + DEBUGOUT("E1000_HICR_MEMORY_BASE_EN bit disabled.\n"); + return -E1000_ERR_CONFIG; + } + + if (length == 0 || length & 0x3 || length > E1000_HI_FW_MAX_LENGTH) { + DEBUGOUT("Buffer length failure.\n"); + return -E1000_ERR_INVALID_ARGUMENT; + } + + /* Clear notification from ROM-FW by reading ICR register */ + icr = E1000_READ_REG(hw, E1000_ICR_V2); + + /* Reset ROM-FW */ + hicr = E1000_READ_REG(hw, E1000_HICR); + hicr |= E1000_HICR_FW_RESET_ENABLE; + E1000_WRITE_REG(hw, E1000_HICR, hicr); + hicr |= E1000_HICR_FW_RESET; + E1000_WRITE_REG(hw, E1000_HICR, hicr); + E1000_WRITE_FLUSH(hw); + + /* Wait till MAC notifies about its readiness after ROM-FW reset */ + for (i = 0; i < (E1000_HI_COMMAND_TIMEOUT * 2); i++) { + icr = E1000_READ_REG(hw, E1000_ICR_V2); + if (icr & E1000_ICR_MNG) + break; + msec_delay(1); + } + + /* Check for timeout */ + if (i == E1000_HI_COMMAND_TIMEOUT) { + DEBUGOUT("FW reset failed.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Wait till MAC is ready to accept new FW code */ + for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) { + fwsm = E1000_READ_REG(hw, E1000_FWSM); + if ((fwsm & E1000_FWSM_FW_VALID) && + ((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT == + E1000_FWSM_HI_EN_ONLY_MODE)) + break; + msec_delay(1); + } + + /* Check for timeout */ + if (i == E1000_HI_COMMAND_TIMEOUT) { + DEBUGOUT("FW reset failed.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Calculate length in DWORDs */ + length >>= 2; + + /* The device driver writes the relevant FW code block + * into the ram area in DWORDs via 1kB ram addressing window. + */ + for (i = 0; i < length; i++) { + if (!(i % E1000_HI_FW_BLOCK_DWORD_LENGTH)) { + /* Point to correct 1kB ram window */ + hibba = E1000_HI_FW_BASE_ADDRESS + + ((E1000_HI_FW_BLOCK_DWORD_LENGTH << 2) * + (i / E1000_HI_FW_BLOCK_DWORD_LENGTH)); + + E1000_WRITE_REG(hw, E1000_HIBBA, hibba); + } + + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, + i % E1000_HI_FW_BLOCK_DWORD_LENGTH, + *((u32 *)buffer + i)); + } + + /* Setting this bit tells the ARC that a new FW is ready to execute. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); + + for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) { + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_C)) + break; + msec_delay(1); + } + + /* Check for successful FW start. */ + if (i == E1000_HI_COMMAND_TIMEOUT) { + DEBUGOUT("New FW did not start within timeout period.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + return E1000_SUCCESS; +} + diff --git a/drivers/staging/igb_avb/e1000_manage.h b/drivers/staging/igb_avb/e1000_manage.h new file mode 100644 index 0000000000000..09afc1aed4970 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_manage.h @@ -0,0 +1,86 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_MANAGE_H_ +#define _E1000_MANAGE_H_ + +bool e1000_check_mng_mode_generic(struct e1000_hw *hw); +bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw); +s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw); +s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer, + u16 length, u16 offset, u8 *sum); +s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr); +s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, + u8 *buffer, u16 length); +bool e1000_enable_mng_pass_thru(struct e1000_hw *hw); +u8 e1000_calculate_checksum(u8 *buffer, u32 length); +s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length); +s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length); + +enum e1000_mng_mode { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_if_only +}; + +#define E1000_FACTPS_MNGCG 0x20000000 + +#define E1000_FWSM_MODE_MASK 0xE +#define E1000_FWSM_MODE_SHIFT 1 +#define E1000_FWSM_FW_VALID 0x00008000 +#define E1000_FWSM_HI_EN_ONLY_MODE 0x4 + +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1 +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 + +#define E1000_VFTA_ENTRY_SHIFT 5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ +#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ +#define E1000_HI_COMMAND_TIMEOUT 500 /* Process HI cmd limit */ +#define E1000_HI_FW_BASE_ADDRESS 0x10000 +#define E1000_HI_FW_MAX_LENGTH (64 * 1024) /* Num of bytes */ +#define E1000_HI_FW_BLOCK_DWORD_LENGTH 256 /* Num of DWORDs per page */ +#define E1000_HICR_MEMORY_BASE_EN 0x200 /* MB Enable bit - RO */ +#define E1000_HICR_EN 0x01 /* Enable bit - RO */ +/* Driver sets this bit when done to put command in RAM */ +#define E1000_HICR_C 0x02 +#define E1000_HICR_SV 0x04 /* Status Validity */ +#define E1000_HICR_FW_RESET_ENABLE 0x40 +#define E1000_HICR_FW_RESET 0x80 + +/* Intel(R) Active Management Technology signature */ +#define E1000_IAMT_SIGNATURE 0x544D4149 + +#endif diff --git a/drivers/staging/igb_avb/e1000_mbx.c b/drivers/staging/igb_avb/e1000_mbx.c new file mode 100644 index 0000000000000..f2998f470ce53 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_mbx.c @@ -0,0 +1,523 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000_mbx.h" + +/** + * e1000_null_mbx_check_for_flag - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +static s32 e1000_null_mbx_check_for_flag(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG mbx_id) +{ + DEBUGFUNC("e1000_null_mbx_check_flag"); + + return E1000_SUCCESS; +} + +/** + * e1000_null_mbx_transact - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +static s32 e1000_null_mbx_transact(struct e1000_hw E1000_UNUSEDARG *hw, + u32 E1000_UNUSEDARG *msg, + u16 E1000_UNUSEDARG size, + u16 E1000_UNUSEDARG mbx_id) +{ + DEBUGFUNC("e1000_null_mbx_rw_msg"); + + return E1000_SUCCESS; +} + +/** + * e1000_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfuly read message from buffer + **/ +s32 e1000_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_read_mbx"); + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + if (mbx->ops.read) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * e1000_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +s32 e1000_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_mbx"); + + if (size > mbx->size) + ret_val = -E1000_ERR_MBX; + + else if (mbx->ops.write) + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * e1000_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 e1000_check_for_msg(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_msg"); + + if (mbx->ops.check_for_msg) + ret_val = mbx->ops.check_for_msg(hw, mbx_id); + + return ret_val; +} + +/** + * e1000_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 e1000_check_for_ack(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_ack"); + + if (mbx->ops.check_for_ack) + ret_val = mbx->ops.check_for_ack(hw, mbx_id); + + return ret_val; +} + +/** + * e1000_check_for_rst - checks to see if other side has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 e1000_check_for_rst(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_rst"); + + if (mbx->ops.check_for_rst) + ret_val = mbx->ops.check_for_rst(hw, mbx_id); + + return ret_val; +} + +/** + * e1000_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +static s32 e1000_poll_for_msg(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + DEBUGFUNC("e1000_poll_for_msg"); + + if (!countdown || !mbx->ops.check_for_msg) + goto out; + + while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + usec_delay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; +} + +/** + * e1000_poll_for_ack - Wait for message acknowledgement + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledgement + **/ +static s32 e1000_poll_for_ack(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + DEBUGFUNC("e1000_poll_for_ack"); + + if (!countdown || !mbx->ops.check_for_ack) + goto out; + + while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + usec_delay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; +} + +/** + * e1000_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_read_posted_mbx"); + + if (!mbx->ops.read) + goto out; + + ret_val = e1000_poll_for_msg(hw, mbx_id); + + /* if ack received read message, otherwise we timed out */ + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); +out: + return ret_val; +} + +/** + * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_write_posted_mbx"); + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + goto out; + + /* send msg */ + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + /* if msg sent wait until we receive an ack */ + if (!ret_val) + ret_val = e1000_poll_for_ack(hw, mbx_id); +out: + return ret_val; +} + +/** + * e1000_init_mbx_ops_generic - Initialize mbx function pointers + * @hw: pointer to the HW structure + * + * Sets the function pointers to no-op functions + **/ +void e1000_init_mbx_ops_generic(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + mbx->ops.init_params = e1000_null_ops_generic; + mbx->ops.read = e1000_null_mbx_transact; + mbx->ops.write = e1000_null_mbx_transact; + mbx->ops.check_for_msg = e1000_null_mbx_check_for_flag; + mbx->ops.check_for_ack = e1000_null_mbx_check_for_flag; + mbx->ops.check_for_rst = e1000_null_mbx_check_for_flag; + mbx->ops.read_posted = e1000_read_posted_mbx; + mbx->ops.write_posted = e1000_write_posted_mbx; +} + +static s32 e1000_check_for_bit_pf(struct e1000_hw *hw, u32 mask) +{ + u32 mbvficr = E1000_READ_REG(hw, E1000_MBVFICR); + s32 ret_val = -E1000_ERR_MBX; + + if (mbvficr & mask) { + ret_val = E1000_SUCCESS; + E1000_WRITE_REG(hw, E1000_MBVFICR, mask); + } + + return ret_val; +} + +/** + * e1000_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 e1000_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_msg_pf"); + + if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * e1000_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 e1000_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_ack_pf"); + + if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * e1000_check_for_rst_pf - checks to see if the VF has reset + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 e1000_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) +{ + u32 vflre = E1000_READ_REG(hw, E1000_VFLRE); + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_rst_pf"); + + if (vflre & (1 << vf_number)) { + ret_val = E1000_SUCCESS; + E1000_WRITE_REG(hw, E1000_VFLRE, (1 << vf_number)); + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * e1000_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +static s32 e1000_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + u32 p2v_mailbox; + + DEBUGFUNC("e1000_obtain_mbx_lock_pf"); + + /* Take ownership of the buffer */ + E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); + + /* reserve mailbox for vf use */ + p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number)); + if (p2v_mailbox & E1000_P2VMAILBOX_PFU) + ret_val = E1000_SUCCESS; + + return ret_val; +} + +/** + * e1000_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static s32 e1000_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + DEBUGFUNC("e1000_write_mbx_pf"); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + e1000_check_for_msg_pf(hw, vf_number); + e1000_check_for_ack_pf(hw, vf_number); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer*/ + E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + +out_no_write: + return ret_val; + +} + +/** + * e1000_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +static s32 e1000_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + DEBUGFUNC("e1000_read_mbx_pf"); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_read; + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i); + + /* Acknowledge the message and release buffer */ + E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return ret_val; +} + +/** + * e1000_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +s32 e1000_init_mbx_params_pf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + + switch (hw->mac.type) { + case e1000_82576: + case e1000_i350: + case e1000_i354: + mbx->timeout = 0; + mbx->usec_delay = 0; + + mbx->size = E1000_VFMAILBOX_SIZE; + + mbx->ops.read = e1000_read_mbx_pf; + mbx->ops.write = e1000_write_mbx_pf; + mbx->ops.read_posted = e1000_read_posted_mbx; + mbx->ops.write_posted = e1000_write_posted_mbx; + mbx->ops.check_for_msg = e1000_check_for_msg_pf; + mbx->ops.check_for_ack = e1000_check_for_ack_pf; + mbx->ops.check_for_rst = e1000_check_for_rst_pf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + default: + return E1000_SUCCESS; + } +} + diff --git a/drivers/staging/igb_avb/e1000_mbx.h b/drivers/staging/igb_avb/e1000_mbx.h new file mode 100644 index 0000000000000..28900216ac250 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_mbx.h @@ -0,0 +1,84 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_MBX_H_ +#define _E1000_MBX_H_ + +#include "e1000_api.h" + +#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ + +#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */ +#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */ +#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ + +#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ + +/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is E1000_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +/* Msgs below or'd with this are the ACK */ +#define E1000_VT_MSGTYPE_ACK 0x80000000 +/* Msgs below or'd with this are the NACK */ +#define E1000_VT_MSGTYPE_NACK 0x40000000 +/* Indicates that VF is still clear to send requests */ +#define E1000_VT_MSGTYPE_CTS 0x20000000 +#define E1000_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for extra info for certain messages */ +#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) + +#define E1000_VF_RESET 0x01 /* VF requests reset */ +#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ +#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ +#define E1000_VF_SET_MULTICAST_COUNT_MASK (0x1F << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_MULTICAST_OVERFLOW (0x80 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ +#define E1000_VF_SET_VLAN_ADD (0x01 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_LPE 0x05 /* reqs to set VMOLR.LPE */ +#define E1000_VF_SET_PROMISC 0x06 /* reqs to clear VMOLR.ROPE/MPME*/ +#define E1000_VF_SET_PROMISC_UNICAST (0x01 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT) + +#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ + +#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define E1000_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ + +s32 e1000_read_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 e1000_write_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 e1000_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 e1000_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 e1000_check_for_msg(struct e1000_hw *, u16); +s32 e1000_check_for_ack(struct e1000_hw *, u16); +s32 e1000_check_for_rst(struct e1000_hw *, u16); +void e1000_init_mbx_ops_generic(struct e1000_hw *hw); +s32 e1000_init_mbx_params_pf(struct e1000_hw *); + +#endif /* _E1000_MBX_H_ */ diff --git a/drivers/staging/igb_avb/e1000_nvm.c b/drivers/staging/igb_avb/e1000_nvm.c new file mode 100644 index 0000000000000..c328f40dfa203 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_nvm.c @@ -0,0 +1,973 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000_api.h" + +static void e1000_reload_nvm_generic(struct e1000_hw *hw); + +/** + * e1000_init_nvm_ops_generic - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * Setups up the function pointers to no-op functions + **/ +void e1000_init_nvm_ops_generic(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + DEBUGFUNC("e1000_init_nvm_ops_generic"); + + /* Initialize function pointers */ + nvm->ops.init_params = e1000_null_ops_generic; + nvm->ops.acquire = e1000_null_ops_generic; + nvm->ops.read = e1000_null_read_nvm; + nvm->ops.release = e1000_null_nvm_generic; + nvm->ops.reload = e1000_reload_nvm_generic; + nvm->ops.update = e1000_null_ops_generic; + nvm->ops.valid_led_default = e1000_null_led_default; + nvm->ops.validate = e1000_null_ops_generic; + nvm->ops.write = e1000_null_write_nvm; +} + +/** + * e1000_null_nvm_read - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_read_nvm(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b, + u16 E1000_UNUSEDARG *c) +{ + DEBUGFUNC("e1000_null_read_nvm"); + return E1000_SUCCESS; +} + +/** + * e1000_null_nvm_generic - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_nvm_generic(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_null_nvm_generic"); + return; +} + +/** + * e1000_null_led_default - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_led_default(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG *data) +{ + DEBUGFUNC("e1000_null_led_default"); + return E1000_SUCCESS; +} + +/** + * e1000_null_write_nvm - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_write_nvm(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b, + u16 E1000_UNUSEDARG *c) +{ + DEBUGFUNC("e1000_null_write_nvm"); + return E1000_SUCCESS; +} + +/** + * e1000_raise_eec_clk - Raise EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Enable/Raise the EEPROM clock bit. + **/ +static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd | E1000_EECD_SK; + E1000_WRITE_REG(hw, E1000_EECD, *eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(hw->nvm.delay_usec); +} + +/** + * e1000_lower_eec_clk - Lower EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Clear/Lower the EEPROM clock bit. + **/ +static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd & ~E1000_EECD_SK; + E1000_WRITE_REG(hw, E1000_EECD, *eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(hw->nvm.delay_usec); +} + +/** + * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM + * @hw: pointer to the HW structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + * + * We need to shift 'count' bits out to the EEPROM. So, the value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + **/ +static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u32 mask; + + DEBUGFUNC("e1000_shift_out_eec_bits"); + + mask = 0x01 << (count - 1); + if (nvm->type == e1000_nvm_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + + usec_delay(nvm->delay_usec); + + e1000_raise_eec_clk(hw, &eecd); + e1000_lower_eec_clk(hw, &eecd); + + mask >>= 1; + } while (mask); + + eecd &= ~E1000_EECD_DI; + E1000_WRITE_REG(hw, E1000_EECD, eecd); +} + +/** + * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM + * @hw: pointer to the HW structure + * @count: number of bits to shift in + * + * In order to read a register from the EEPROM, we need to shift 'count' bits + * in from the EEPROM. Bits are "shifted in" by raising the clock input to + * the EEPROM (setting the SK bit), and then reading the value of the data out + * "DO" bit. During this "shifting in" process the data in "DI" bit should + * always be clear. + **/ +static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + DEBUGFUNC("e1000_shift_in_eec_bits"); + + eecd = E1000_READ_REG(hw, E1000_EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data <<= 1; + e1000_raise_eec_clk(hw, &eecd); + + eecd = E1000_READ_REG(hw, E1000_EECD); + + eecd &= ~E1000_EECD_DI; + if (eecd & E1000_EECD_DO) + data |= 1; + + e1000_lower_eec_clk(hw, &eecd); + } + + return data; +} + +/** + * e1000_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + **/ +s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) +{ + u32 attempts = 100000; + u32 i, reg = 0; + + DEBUGFUNC("e1000_poll_eerd_eewr_done"); + + for (i = 0; i < attempts; i++) { + if (ee_reg == E1000_NVM_POLL_READ) + reg = E1000_READ_REG(hw, E1000_EERD); + else + reg = E1000_READ_REG(hw, E1000_EEWR); + + if (reg & E1000_NVM_RW_REG_DONE) + return E1000_SUCCESS; + + usec_delay(5); + } + + return -E1000_ERR_NVM; +} + +/** + * e1000_acquire_nvm_generic - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +s32 e1000_acquire_nvm_generic(struct e1000_hw *hw) +{ + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + s32 timeout = E1000_NVM_GRANT_ATTEMPTS; + + DEBUGFUNC("e1000_acquire_nvm_generic"); + + E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_REQ); + eecd = E1000_READ_REG(hw, E1000_EECD); + + while (timeout) { + if (eecd & E1000_EECD_GNT) + break; + usec_delay(5); + eecd = E1000_READ_REG(hw, E1000_EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~E1000_EECD_REQ; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + DEBUGOUT("Could not acquire NVM grant\n"); + return -E1000_ERR_NVM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_standby_nvm - Return EEPROM to standby state + * @hw: pointer to the HW structure + * + * Return the EEPROM to a standby state. + **/ +static void e1000_standby_nvm(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + + DEBUGFUNC("e1000_standby_nvm"); + + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); + eecd &= ~E1000_EECD_CS; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); + } +} + +/** + * e1000_stop_nvm - Terminate EEPROM command + * @hw: pointer to the HW structure + * + * Terminates the current command by inverting the EEPROM's chip select pin. + **/ +static void e1000_stop_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + DEBUGFUNC("e1000_stop_nvm"); + + eecd = E1000_READ_REG(hw, E1000_EECD); + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + /* Pull CS high */ + eecd |= E1000_EECD_CS; + e1000_lower_eec_clk(hw, &eecd); + } +} + +/** + * e1000_release_nvm_generic - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +void e1000_release_nvm_generic(struct e1000_hw *hw) +{ + u32 eecd; + + DEBUGFUNC("e1000_release_nvm_generic"); + + e1000_stop_nvm(hw); + + eecd = E1000_READ_REG(hw, E1000_EECD); + eecd &= ~E1000_EECD_REQ; + E1000_WRITE_REG(hw, E1000_EECD, eecd); +} + +/** + * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write + * @hw: pointer to the HW structure + * + * Setups the EEPROM for reading and writing. + **/ +static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u8 spi_stat_reg; + + DEBUGFUNC("e1000_ready_nvm_eeprom"); + + if (nvm->type == e1000_nvm_eeprom_spi) { + u16 timeout = NVM_MAX_RETRY_SPI; + + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(1); + + /* Read "Status Register" repeatedly until the LSB is cleared. + * The EEPROM will signal that the command has been completed + * by clearing bit 0 of the internal status register. If it's + * not cleared within 'timeout', then error out. + */ + while (timeout) { + e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, + hw->nvm.opcode_bits); + spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8); + if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) + break; + + usec_delay(5); + e1000_standby_nvm(hw); + timeout--; + } + + if (!timeout) { + DEBUGOUT("SPI NVM Status error\n"); + return -E1000_ERR_NVM; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_nvm_spi - Read EEPROM's using SPI + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM. + **/ +s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i = 0; + s32 ret_val; + u16 word_in; + u8 read_opcode = NVM_READ_OPCODE_SPI; + + DEBUGFUNC("e1000_read_nvm_spi"); + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + e1000_standby_nvm(hw); + + if ((nvm->address_bits == 8) && (offset >= 128)) + read_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); + + /* Read the data. SPI NVMs increment the address with each byte + * read and will roll over if reading beyond the end. This allows + * us to read the whole NVM from any offset + */ + for (i = 0; i < words; i++) { + word_in = e1000_shift_in_eec_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + +release: + nvm->ops.release(hw); + + return ret_val; +} + +/** + * e1000_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_read_nvm_eerd"); + + /* A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + for (i = 0; i < words; i++) { + eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + + E1000_NVM_RW_REG_START; + + E1000_WRITE_REG(hw, E1000_EERD, eerd); + ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (E1000_READ_REG(hw, E1000_EERD) >> + E1000_NVM_RW_REG_DATA); + } + + if (ret_val) + DEBUGOUT1("NVM read error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_write_nvm_spi - Write to EEPROM using SPI + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using SPI interface. + * + * If e1000_update_nvm_checksum is not called after this function , the + * EEPROM will most likely contain an invalid checksum. + **/ +s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val = -E1000_ERR_NVM; + u16 widx = 0; + + DEBUGFUNC("e1000_write_nvm_spi"); + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + while (widx < words) { + u8 write_opcode = NVM_WRITE_OPCODE_SPI; + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) { + nvm->ops.release(hw); + return ret_val; + } + + e1000_standby_nvm(hw); + + /* Send the WRITE ENABLE command (8 bit opcode) */ + e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, + nvm->opcode_bits); + + e1000_standby_nvm(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((nvm->address_bits == 8) && (offset >= 128)) + write_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), + nvm->address_bits); + + /* Loop to allow for up to whole page write of eeprom */ + while (widx < words) { + u16 word_out = data[widx]; + word_out = (word_out >> 8) | (word_out << 8); + e1000_shift_out_eec_bits(hw, word_out, 16); + widx++; + + if ((((offset + widx) * 2) % nvm->page_size) == 0) { + e1000_standby_nvm(hw); + break; + } + } + msec_delay(10); + nvm->ops.release(hw); + } + + return ret_val; +} + +/** + * e1000_read_pba_string_generic - Read device part number + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + **/ +s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, + u32 pba_num_size) +{ + s32 ret_val; + u16 nvm_data; + u16 pba_ptr; + u16 offset; + u16 length; + + DEBUGFUNC("e1000_read_pba_string_generic"); + + if ((hw->mac.type >= e1000_i210) && + !e1000_get_flash_presence_i210(hw)) { + DEBUGOUT("Flashless no PBA string\n"); + return -E1000_ERR_NVM_PBA_SECTION; + } + + if (pba_num == NULL) { + DEBUGOUT("PBA string buffer was null\n"); + return -E1000_ERR_INVALID_ARGUMENT; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + /* if nvm_data is not ptr guard the PBA must be in legacy format which + * means pba_ptr is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (nvm_data != NVM_PBA_PTR_GUARD) { + DEBUGOUT("NVM PBA number is not stored as string\n"); + + /* make sure callers buffer is big enough to store the PBA */ + if (pba_num_size < E1000_PBANUM_LENGTH) { + DEBUGOUT("PBA string buffer too small\n"); + return E1000_ERR_NO_SPACE; + } + + /* extract hex string from data and pba_ptr */ + pba_num[0] = (nvm_data >> 12) & 0xF; + pba_num[1] = (nvm_data >> 8) & 0xF; + pba_num[2] = (nvm_data >> 4) & 0xF; + pba_num[3] = nvm_data & 0xF; + pba_num[4] = (pba_ptr >> 12) & 0xF; + pba_num[5] = (pba_ptr >> 8) & 0xF; + pba_num[6] = '-'; + pba_num[7] = 0; + pba_num[8] = (pba_ptr >> 4) & 0xF; + pba_num[9] = pba_ptr & 0xF; + + /* put a null character on the end of our string */ + pba_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (pba_num[offset] < 0xA) + pba_num[offset] += '0'; + else if (pba_num[offset] < 0x10) + pba_num[offset] += 'A' - 0xA; + } + + return E1000_SUCCESS; + } + + ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (length == 0xFFFF || length == 0) { + DEBUGOUT("NVM PBA number section invalid length\n"); + return -E1000_ERR_NVM_PBA_SECTION; + } + /* check if pba_num buffer is big enough */ + if (pba_num_size < (((u32)length * 2) - 1)) { + DEBUGOUT("PBA string buffer too small\n"); + return -E1000_ERR_NO_SPACE; + } + + /* trim pba length from start of string */ + pba_ptr++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = hw->nvm.ops.read(hw, pba_ptr + offset, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + pba_num[offset * 2] = (u8)(nvm_data >> 8); + pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); + } + pba_num[offset * 2] = '\0'; + + return E1000_SUCCESS; +} + +/** + * e1000_read_pba_length_generic - Read device part number length + * @hw: pointer to the HW structure + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number length from the EEPROM and + * stores the value in pba_num_size. + **/ +s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size) +{ + s32 ret_val; + u16 nvm_data; + u16 pba_ptr; + u16 length; + + DEBUGFUNC("e1000_read_pba_length_generic"); + + if (pba_num_size == NULL) { + DEBUGOUT("PBA buffer size was null\n"); + return -E1000_ERR_INVALID_ARGUMENT; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + /* if data is not ptr guard the PBA must be in legacy format */ + if (nvm_data != NVM_PBA_PTR_GUARD) { + *pba_num_size = E1000_PBANUM_LENGTH; + return E1000_SUCCESS; + } + + ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (length == 0xFFFF || length == 0) { + DEBUGOUT("NVM PBA number section invalid length\n"); + return -E1000_ERR_NVM_PBA_SECTION; + } + + /* Convert from length in u16 values to u8 chars, add 1 for NULL, + * and subtract 2 because length field is included in length. + */ + *pba_num_size = ((u32)length * 2) - 1; + + return E1000_SUCCESS; +} + +/** + * e1000_read_mac_addr_generic - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + * Since devices with two ports use the same EEPROM, we increment the + * last bit in the MAC address for the second port. + **/ +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = E1000_READ_REG(hw, E1000_RAH(0)); + rar_low = E1000_READ_REG(hw, E1000_RAL(0)); + + for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8)); + + for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8)); + + for (i = 0; i < ETH_ADDR_LEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return E1000_SUCCESS; +} + +/** + * e1000_validate_nvm_checksum_generic - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_validate_nvm_checksum_generic"); + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + DEBUGOUT("NVM Checksum Invalid\n"); + return -E1000_ERR_NVM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_update_nvm_checksum_generic - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_update_nvm_checksum"); + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error while updating checksum.\n"); + return ret_val; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + DEBUGOUT("NVM Write Error while updating checksum.\n"); + + return ret_val; +} + +/** + * e1000_reload_nvm_generic - Reloads EEPROM + * @hw: pointer to the HW structure + * + * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the + * extended control register. + **/ +static void e1000_reload_nvm_generic(struct e1000_hw *hw) +{ + u32 ctrl_ext; + + DEBUGFUNC("e1000_reload_nvm_generic"); + + usec_delay(10); + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_get_fw_version - Get firmware version information + * @hw: pointer to the HW structure + * @fw_vers: pointer to output version structure + * + * unsupported/not present features return 0 in version structure + **/ +void e1000_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers) +{ + u16 eeprom_verh, eeprom_verl, etrack_test, fw_version; + u8 q, hval, rem, result; + u16 comb_verh, comb_verl, comb_offset; + + memset(fw_vers, 0, sizeof(struct e1000_fw_version)); + + /* basic eeprom version numbers, bits used vary by part and by tool + * used to create the nvm images */ + /* Check which data format we have */ + switch (hw->mac.type) { + case e1000_i211: + e1000_read_invm_version(hw, fw_vers); + return; + case e1000_82575: + case e1000_82576: + case e1000_82580: + hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); + /* Use this format, unless EETRACK ID exists, + * then use alternate format + */ + if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) { + hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); + fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) + >> NVM_MAJOR_SHIFT; + fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK) + >> NVM_MINOR_SHIFT; + fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK); + goto etrack_id; + } + break; + case e1000_i210: + if (!(e1000_get_flash_presence_i210(hw))) { + e1000_read_invm_version(hw, fw_vers); + return; + } + /* fall through */ + case e1000_i350: + hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); + /* find combo image version */ + hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset); + if ((comb_offset != 0x0) && + (comb_offset != NVM_VER_INVALID)) { + + hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset + + 1), 1, &comb_verh); + hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset), + 1, &comb_verl); + + /* get Option Rom version if it exists and is valid */ + if ((comb_verh && comb_verl) && + ((comb_verh != NVM_VER_INVALID) && + (comb_verl != NVM_VER_INVALID))) { + + fw_vers->or_valid = true; + fw_vers->or_major = + comb_verl >> NVM_COMB_VER_SHFT; + fw_vers->or_build = + (comb_verl << NVM_COMB_VER_SHFT) + | (comb_verh >> NVM_COMB_VER_SHFT); + fw_vers->or_patch = + comb_verh & NVM_COMB_VER_MASK; + } + } + break; + default: + hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); + return; + } + hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); + fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) + >> NVM_MAJOR_SHIFT; + + /* check for old style version format in newer images*/ + if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) { + eeprom_verl = (fw_version & NVM_COMB_VER_MASK); + } else { + eeprom_verl = (fw_version & NVM_MINOR_MASK) + >> NVM_MINOR_SHIFT; + } + /* Convert minor value to hex before assigning to output struct + * Val to be converted will not be higher than 99, per tool output + */ + q = eeprom_verl / NVM_HEX_CONV; + hval = q * NVM_HEX_TENS; + rem = eeprom_verl % NVM_HEX_CONV; + result = hval + rem; + fw_vers->eep_minor = result; + +etrack_id: + if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) { + hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl); + hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh); + fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) + | eeprom_verl; + } else if ((etrack_test & NVM_ETRACK_VALID) == 0) { + hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh); + hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl); + fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) | + eeprom_verl; + } +} + diff --git a/drivers/staging/igb_avb/e1000_nvm.h b/drivers/staging/igb_avb/e1000_nvm.h new file mode 100644 index 0000000000000..a4263113d72d8 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_nvm.h @@ -0,0 +1,70 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_NVM_H_ +#define _E1000_NVM_H_ + +struct e1000_fw_version { + u32 etrack_id; + u16 eep_major; + u16 eep_minor; + u16 eep_build; + + u8 invm_major; + u8 invm_minor; + u8 invm_img_type; + + bool or_valid; + u16 or_major; + u16 or_build; + u16 or_patch; +}; + +void e1000_init_nvm_ops_generic(struct e1000_hw *hw); +s32 e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c); +void e1000_null_nvm_generic(struct e1000_hw *hw); +s32 e1000_null_led_default(struct e1000_hw *hw, u16 *data); +s32 e1000_null_write_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c); +s32 e1000_acquire_nvm_generic(struct e1000_hw *hw); + +s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg); +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw); +s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, + u32 pba_num_size); +s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size); +s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data); +s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw); +s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw); +void e1000_release_nvm_generic(struct e1000_hw *hw); +void e1000_get_fw_version(struct e1000_hw *hw, + struct e1000_fw_version *fw_vers); + +#define E1000_STM_OPCODE 0xDB00 + +#endif diff --git a/drivers/staging/igb_avb/e1000_osdep.h b/drivers/staging/igb_avb/e1000_osdep.h new file mode 100644 index 0000000000000..3c6b79586cf81 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_osdep.h @@ -0,0 +1,141 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* glue for the OS independent part of e1000 + * includes register access macros + */ + +#ifndef _E1000_OSDEP_H_ +#define _E1000_OSDEP_H_ + +#include +#include +#include +#include +#include +#include "kcompat.h" + +#define usec_delay(x) udelay(x) +#define usec_delay_irq(x) udelay(x) +#ifndef msec_delay +#define msec_delay(x) do { \ + /* Don't mdelay in interrupt context! */ \ + if (in_interrupt()) \ + BUG(); \ + else \ + msleep(x); \ +} while (0) + +/* Some workarounds require millisecond delays and are run during interrupt + * context. Most notably, when establishing link, the phy may need tweaking + * but cannot process phy register reads/writes faster than millisecond + * intervals...and we establish link due to a "link status change" interrupt. + */ +#define msec_delay_irq(x) mdelay(x) + +#define E1000_READ_REG(x, y) e1000_read_reg(x, y) +#endif + +#define PCI_COMMAND_REGISTER PCI_COMMAND +#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE +#define ETH_ADDR_LEN ETH_ALEN + +#ifdef __BIG_ENDIAN +#define E1000_BIG_ENDIAN __BIG_ENDIAN +#endif + +#ifdef DEBUG +#define DEBUGOUT(S) pr_debug(S) +#define DEBUGOUT1(S, A...) pr_debug(S, ## A) +#else +#define DEBUGOUT(S) +#define DEBUGOUT1(S, A...) +#endif + +#ifdef DEBUG_FUNC +#define DEBUGFUNC(F) DEBUGOUT(F "\n") +#else +#define DEBUGFUNC(F) +#endif +#define DEBUGOUT2 DEBUGOUT1 +#define DEBUGOUT3 DEBUGOUT2 +#define DEBUGOUT7 DEBUGOUT3 + +#define E1000_REGISTER(a, reg) reg + +/* forward declaration */ +struct e1000_hw; + +/* write operations, indexed using DWORDS */ +#define E1000_WRITE_REG(hw, reg, val) \ +do { \ + u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \ + if (!E1000_REMOVED(hw_addr)) \ + writel((val), &hw_addr[(reg)]); \ +} while (0) + +u32 e1000_read_reg(struct e1000_hw *hw, u32 reg); + +#define E1000_WRITE_REG_ARRAY(hw, reg, idx, val) \ + E1000_WRITE_REG((hw), (reg) + ((idx) << 2), (val)) + +#define E1000_READ_REG_ARRAY(hw, reg, idx) ( \ + e1000_read_reg((hw), (reg) + ((idx) << 2))) + +#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY +#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY + +#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \ + writew((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + \ + ((offset) << 1)))) + +#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \ + readw((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1))) + +#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \ + writeb((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + (offset)))) + +#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \ + readb((a)->hw_addr + E1000_REGISTER(a, reg) + (offset))) + +#define E1000_WRITE_REG_IO(a, reg, offset) do { \ + outl(reg, ((a)->io_base)); \ + outl(offset, ((a)->io_base + 4)); \ + } while (0) + +#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS) + +#define E1000_WRITE_FLASH_REG(a, reg, value) ( \ + writel((value), ((a)->flash_address + reg))) + +#define E1000_WRITE_FLASH_REG16(a, reg, value) ( \ + writew((value), ((a)->flash_address + reg))) + +#define E1000_READ_FLASH_REG(a, reg) (readl((a)->flash_address + reg)) + +#define E1000_READ_FLASH_REG16(a, reg) (readw((a)->flash_address + reg)) + +#define E1000_REMOVED(h) unlikely(!(h)) + +#endif /* _E1000_OSDEP_H_ */ diff --git a/drivers/staging/igb_avb/e1000_phy.c b/drivers/staging/igb_avb/e1000_phy.c new file mode 100644 index 0000000000000..46ab8d5ae2ab4 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_phy.c @@ -0,0 +1,3398 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000_api.h" + +static s32 e1000_wait_autoneg(struct e1000_hw *hw); +/* Cable length tables */ +static const u16 e1000_m88_cable_length_table[] = { + 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; +#define M88E1000_CABLE_LENGTH_TABLE_SIZE \ + (sizeof(e1000_m88_cable_length_table) / \ + sizeof(e1000_m88_cable_length_table[0])) + +static const u16 e1000_igp_2_cable_length_table[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3, + 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22, + 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40, + 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61, + 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, + 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, + 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, + 124}; +#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ + (sizeof(e1000_igp_2_cable_length_table) / \ + sizeof(e1000_igp_2_cable_length_table[0])) + +/** + * e1000_init_phy_ops_generic - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * Setups up the function pointers to no-op functions + **/ +void e1000_init_phy_ops_generic(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + DEBUGFUNC("e1000_init_phy_ops_generic"); + + /* Initialize function pointers */ + phy->ops.init_params = e1000_null_ops_generic; + phy->ops.acquire = e1000_null_ops_generic; + phy->ops.check_polarity = e1000_null_ops_generic; + phy->ops.check_reset_block = e1000_null_ops_generic; + phy->ops.commit = e1000_null_ops_generic; + phy->ops.force_speed_duplex = e1000_null_ops_generic; + phy->ops.get_cfg_done = e1000_null_ops_generic; + phy->ops.get_cable_length = e1000_null_ops_generic; + phy->ops.get_info = e1000_null_ops_generic; + phy->ops.set_page = e1000_null_set_page; + phy->ops.read_reg = e1000_null_read_reg; + phy->ops.read_reg_locked = e1000_null_read_reg; + phy->ops.read_reg_page = e1000_null_read_reg; + phy->ops.release = e1000_null_phy_generic; + phy->ops.reset = e1000_null_ops_generic; + phy->ops.set_d0_lplu_state = e1000_null_lplu_state; + phy->ops.set_d3_lplu_state = e1000_null_lplu_state; + phy->ops.write_reg = e1000_null_write_reg; + phy->ops.write_reg_locked = e1000_null_write_reg; + phy->ops.write_reg_page = e1000_null_write_reg; + phy->ops.power_up = e1000_null_phy_generic; + phy->ops.power_down = e1000_null_phy_generic; + phy->ops.read_i2c_byte = e1000_read_i2c_byte_null; + phy->ops.write_i2c_byte = e1000_write_i2c_byte_null; +} + +/** + * e1000_null_set_page - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_set_page(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG data) +{ + DEBUGFUNC("e1000_null_set_page"); + return E1000_SUCCESS; +} + +/** + * e1000_null_read_reg - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_read_reg(struct e1000_hw E1000_UNUSEDARG *hw, + u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG *data) +{ + DEBUGFUNC("e1000_null_read_reg"); + return E1000_SUCCESS; +} + +/** + * e1000_null_phy_generic - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_phy_generic(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_null_phy_generic"); + return; +} + +/** + * e1000_null_lplu_state - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_lplu_state(struct e1000_hw E1000_UNUSEDARG *hw, + bool E1000_UNUSEDARG active) +{ + DEBUGFUNC("e1000_null_lplu_state"); + return E1000_SUCCESS; +} + +/** + * e1000_null_write_reg - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_write_reg(struct e1000_hw E1000_UNUSEDARG *hw, + u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG data) +{ + DEBUGFUNC("e1000_null_write_reg"); + return E1000_SUCCESS; +} + +/** + * e1000_read_i2c_byte_null - No-op function, return 0 + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: device address + * @data: data value read + * + **/ +s32 e1000_read_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw, + u8 E1000_UNUSEDARG byte_offset, + u8 E1000_UNUSEDARG dev_addr, + u8 E1000_UNUSEDARG *data) +{ + DEBUGFUNC("e1000_read_i2c_byte_null"); + return E1000_SUCCESS; +} + +/** + * e1000_write_i2c_byte_null - No-op function, return 0 + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: device address + * @data: data value to write + * + **/ +s32 e1000_write_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw, + u8 E1000_UNUSEDARG byte_offset, + u8 E1000_UNUSEDARG dev_addr, + u8 E1000_UNUSEDARG data) +{ + DEBUGFUNC("e1000_write_i2c_byte_null"); + return E1000_SUCCESS; +} + +/** + * e1000_check_reset_block_generic - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return E1000_SUCCESS, otherwise + * return E1000_BLK_PHY_RESET (12). + **/ +s32 e1000_check_reset_block_generic(struct e1000_hw *hw) +{ + u32 manc; + + DEBUGFUNC("e1000_check_reset_block"); + + manc = E1000_READ_REG(hw, E1000_MANC); + + return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? + E1000_BLK_PHY_RESET : E1000_SUCCESS; +} + +/** + * e1000_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +s32 e1000_get_phy_id(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 phy_id; + + DEBUGFUNC("e1000_get_phy_id"); + + if (!phy->ops.read_reg) + return E1000_SUCCESS; + + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); + if (ret_val) + return ret_val; + + phy->id = (u32)(phy_id << 16); + usec_delay(20); + ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); + if (ret_val) + return ret_val; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + + return E1000_SUCCESS; +} + +/** + * e1000_phy_reset_dsp_generic - Reset PHY DSP + * @hw: pointer to the HW structure + * + * Reset the digital signal processor. + **/ +s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_phy_reset_dsp_generic"); + + if (!hw->phy.ops.write_reg) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); + if (ret_val) + return ret_val; + + return hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0); +} + +/** + * e1000_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + + DEBUGFUNC("e1000_read_phy_reg_mdic"); + + if (offset > MAX_PHY_REG_ADDRESS) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + return -E1000_ERR_PARAM; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + E1000_WRITE_REG(hw, E1000_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + usec_delay_irq(50); + mdic = E1000_READ_REG(hw, E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + DEBUGOUT("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + DEBUGOUT("MDI Error\n"); + return -E1000_ERR_PHY; + } + if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { + DEBUGOUT2("MDI Read offset error - requested %d, returned %d\n", + offset, + (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); + return -E1000_ERR_PHY; + } + *data = (u16) mdic; + + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + + DEBUGFUNC("e1000_write_phy_reg_mdic"); + + if (offset > MAX_PHY_REG_ADDRESS) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + return -E1000_ERR_PARAM; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = (((u32)data) | + (offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + E1000_WRITE_REG(hw, E1000_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + usec_delay_irq(50); + mdic = E1000_READ_REG(hw, E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + DEBUGOUT("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + DEBUGOUT("MDI Error\n"); + return -E1000_ERR_PHY; + } + if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { + DEBUGOUT2("MDI Write offset error - requested %d, returned %d\n", + offset, + (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); + return -E1000_ERR_PHY; + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_phy_reg_i2c - Read PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the i2c interface and stores the + * retrieved information in data. + **/ +s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + + DEBUGFUNC("e1000_read_phy_reg_i2c"); + + /* Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + (E1000_I2CCMD_OPCODE_READ)); + + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + usec_delay(50); + i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + DEBUGOUT("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + DEBUGOUT("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + + /* Need to byte-swap the 16-bit value. */ + *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00); + + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg_i2c - Write PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset using the i2c interface. + **/ +s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + u16 phy_data_swapped; + + DEBUGFUNC("e1000_write_phy_reg_i2c"); + + /* Prevent overwritting SFP I2C EEPROM which is at A0 address.*/ + if ((hw->phy.addr == 0) || (hw->phy.addr > 7)) { + DEBUGOUT1("PHY I2C Address %d is out of range.\n", + hw->phy.addr); + return -E1000_ERR_CONFIG; + } + + /* Swap the data bytes for the I2C interface */ + phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); + + /* Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_WRITE | + phy_data_swapped); + + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + usec_delay(50); + i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + DEBUGOUT("I2CCMD Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + DEBUGOUT("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_sfp_data_byte - Reads SFP module data. + * @hw: pointer to the HW structure + * @offset: byte location offset to be read + * @data: read data buffer pointer + * + * Reads one byte from SFP module data stored + * in SFP resided EEPROM memory or SFP diagnostic area. + * Function should be called with + * E1000_I2CCMD_SFP_DATA_ADDR() for SFP module database access + * E1000_I2CCMD_SFP_DIAG_ADDR() for SFP diagnostics parameters + * access + **/ +s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data) +{ + u32 i = 0; + u32 i2ccmd = 0; + u32 data_local = 0; + + DEBUGFUNC("e1000_read_sfp_data_byte"); + + if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { + DEBUGOUT("I2CCMD command address exceeds upper limit\n"); + return -E1000_ERR_PHY; + } + + /* Set up Op-code, EEPROM Address,in the I2CCMD + * register. The MAC will take care of interfacing with the + * EEPROM to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_READ); + + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + usec_delay(50); + data_local = E1000_READ_REG(hw, E1000_I2CCMD); + if (data_local & E1000_I2CCMD_READY) + break; + } + if (!(data_local & E1000_I2CCMD_READY)) { + DEBUGOUT("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (data_local & E1000_I2CCMD_ERROR) { + DEBUGOUT("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + *data = (u8) data_local & 0xFF; + + return E1000_SUCCESS; +} + +/** + * e1000_write_sfp_data_byte - Writes SFP module data. + * @hw: pointer to the HW structure + * @offset: byte location offset to write to + * @data: data to write + * + * Writes one byte to SFP module data stored + * in SFP resided EEPROM memory or SFP diagnostic area. + * Function should be called with + * E1000_I2CCMD_SFP_DATA_ADDR() for SFP module database access + * E1000_I2CCMD_SFP_DIAG_ADDR() for SFP diagnostics parameters + * access + **/ +s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data) +{ + u32 i = 0; + u32 i2ccmd = 0; + u32 data_local = 0; + + DEBUGFUNC("e1000_write_sfp_data_byte"); + + if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { + DEBUGOUT("I2CCMD command address exceeds upper limit\n"); + return -E1000_ERR_PHY; + } + /* The programming interface is 16 bits wide + * so we need to read the whole word first + * then update appropriate byte lane and write + * the updated word back. + */ + /* Set up Op-code, EEPROM Address,in the I2CCMD + * register. The MAC will take care of interfacing + * with an EEPROM to write the data given. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_READ); + /* Set a command to read single word */ + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + usec_delay(50); + /* Poll the ready bit to see if lastly + * launched I2C operation completed + */ + i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) { + /* Check if this is READ or WRITE phase */ + if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) == + E1000_I2CCMD_OPCODE_READ) { + /* Write the selected byte + * lane and update whole word + */ + data_local = i2ccmd & 0xFF00; + data_local |= data; + i2ccmd = ((offset << + E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_WRITE | data_local); + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + } else { + break; + } + } + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + DEBUGOUT("I2CCMD Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + DEBUGOUT("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + return E1000_SUCCESS; +} + +/** + * e1000_read_phy_reg_m88 - Read m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_read_phy_reg_m88"); + + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_write_phy_reg_m88 - Write m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_write_phy_reg_m88"); + + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_set_page_igp - Set page as on IGP-like PHY(s) + * @hw: pointer to the HW structure + * @page: page to set (shifted left when necessary) + * + * Sets PHY page required for PHY register access. Assumes semaphore is + * already acquired. Note, this function sets phy.addr to 1 so the caller + * must set it appropriately (if necessary) after this function returns. + **/ +s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page) +{ + DEBUGFUNC("e1000_set_page_igp"); + + DEBUGOUT1("Setting page 0x%x\n", page); + + hw->phy.addr = 1; + + return e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page); +} + +/** + * __e1000_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and stores the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +static s32 __e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("__e1000_read_phy_reg_igp"); + + if (!locked) { + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + if (offset > MAX_PHY_MULTI_PAGE_REG) + ret_val = e1000_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (!ret_val) + ret_val = e1000_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset and stores the + * retrieved information in data. + * Release the acquired semaphore before exiting. + **/ +s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_igp(hw, offset, data, false); +} + +/** + * e1000_read_phy_reg_igp_locked - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired. + **/ +s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_igp(hw, offset, data, true); +} + +/** + * e1000_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +static s32 __e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, + bool locked) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_phy_reg_igp"); + + if (!locked) { + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + if (offset > MAX_PHY_MULTI_PAGE_REG) + ret_val = e1000_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (!ret_val) + ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & + offset, + data); + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_igp(hw, offset, data, false); +} + +/** + * e1000_write_phy_reg_igp_locked - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. + * Assumes semaphore already acquired. + **/ +s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_igp(hw, offset, data, true); +} + +/** + * __e1000_read_kmrn_reg - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary. Then reads the PHY register at offset + * using the kumeran interface. The information retrieved is stored in data. + * Release any acquired semaphores before exiting. + **/ +static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked) +{ + u32 kmrnctrlsta; + + DEBUGFUNC("__e1000_read_kmrn_reg"); + + if (!locked) { + s32 ret_val = E1000_SUCCESS; + + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; + E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); + E1000_WRITE_FLUSH(hw); + + usec_delay(2); + + kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA); + *data = (u16)kmrnctrlsta; + + if (!locked) + hw->phy.ops.release(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_read_kmrn_reg_generic - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset using the + * kumeran interface. The information retrieved is stored in data. + * Release the acquired semaphore before exiting. + **/ +s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_kmrn_reg(hw, offset, data, false); +} + +/** + * e1000_read_kmrn_reg_locked - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the kumeran interface. The + * information retrieved is stored in data. + * Assumes semaphore already acquired. + **/ +s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_kmrn_reg(hw, offset, data, true); +} + +/** + * __e1000_write_kmrn_reg - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary. Then write the data to PHY register + * at the offset using the kumeran interface. Release any acquired semaphores + * before exiting. + **/ +static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, + bool locked) +{ + u32 kmrnctrlsta; + + DEBUGFUNC("e1000_write_kmrn_reg_generic"); + + if (!locked) { + s32 ret_val = E1000_SUCCESS; + + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | data; + E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); + E1000_WRITE_FLUSH(hw); + + usec_delay(2); + + if (!locked) + hw->phy.ops.release(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_write_kmrn_reg_generic - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to the PHY register at the offset + * using the kumeran interface. Release the acquired semaphore before exiting. + **/ +s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_kmrn_reg(hw, offset, data, false); +} + +/** + * e1000_write_kmrn_reg_locked - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Write the data to PHY register at the offset using the kumeran interface. + * Assumes semaphore already acquired. + **/ +s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_kmrn_reg(hw, offset, data, true); +} + +/** + * e1000_set_master_slave_mode - Setup PHY for Master/slave mode + * @hw: pointer to the HW structure + * + * Sets up Master/slave mode + **/ +static s32 e1000_set_master_slave_mode(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Resolve Master/Slave mode */ + ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : e1000_ms_auto; + + switch (hw->phy.ms_type) { + case e1000_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + /* fall-through */ + default: + break; + } + + return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data); +} + +/** + * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link + * @hw: pointer to the HW structure + * + * Sets up Carrier-sense on Transmit and downshift values. + **/ +s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + DEBUGFUNC("e1000_copper_link_setup_82577"); + + if (hw->phy.reset_disable) + return E1000_SUCCESS; + + if (hw->phy.type == e1000_phy_82580) { + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + DEBUGOUT("Error resetting the PHY.\n"); + return ret_val; + } + } + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = hw->phy.ops.read_reg(hw, I82577_CFG_REG, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= I82577_CFG_ASSERT_CRS_ON_TX; + + /* Enable downshift */ + phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; + + ret_val = hw->phy.ops.write_reg(hw, I82577_CFG_REG, phy_data); + if (ret_val) + return ret_val; + + /* Set MDI/MDIX mode */ + ret_val = hw->phy.ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK; + /* Options: + * 0 - Auto (default) + * 1 - MDI mode + * 2 - MDI-X mode + */ + switch (hw->phy.mdix) { + case 1: + break; + case 2: + phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX; + break; + case 0: + default: + phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX; + break; + } + ret_val = hw->phy.ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data); + if (ret_val) + return ret_val; + + return e1000_set_master_slave_mode(hw); +} + +/** + * e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock + * and downshift values are set also. + **/ +s32 e1000_copper_link_setup_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + DEBUGFUNC("e1000_copper_link_setup_m88"); + + if (phy->reset_disable) + return E1000_SUCCESS; + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if (phy->revision < E1000_REVISION_4) { + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((phy->revision == E1000_REVISION_2) && + (phy->id == M88E1111_I_PHY_ID)) { + /* 82573L PHY - set the downshift counter to 5x. */ + phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + } + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } + + /* Commit the changes. */ + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's. + * Also enables and sets the downshift parameters. + **/ +s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + DEBUGFUNC("e1000_copper_link_setup_m88_gen2"); + + if (phy->reset_disable) + return E1000_SUCCESS; + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + /* M88E1112 does not support this mode) */ + if (phy->id != M88E1112_E_PHY_ID) { + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + } + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + /* Enable downshift and setting it to X6 */ + if (phy->id == M88E1543_E_PHY_ID) { + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_ENABLE; + ret_val = + phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + } + + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK; + phy_data |= I347AT4_PSCR_DOWNSHIFT_6X; + phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE; + + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* Commit the changes. */ + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + + ret_val = e1000_set_master_slave_mode(hw); + if (ret_val) + return ret_val; + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_setup_igp - Setup igp PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for + * igp PHY's. + **/ +s32 e1000_copper_link_setup_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_copper_link_setup_igp"); + + if (phy->reset_disable) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + DEBUGOUT("Error resetting the PHY.\n"); + return ret_val; + } + + /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid + * timeout issues when LFS is enabled. + */ + msec_delay(100); + + /* disable lplu d0 during driver init */ + if (hw->phy.ops.set_d0_lplu_state) { + ret_val = hw->phy.ops.set_d0_lplu_state(hw, false); + if (ret_val) { + DEBUGOUT("Error Disabling LPLU D0\n"); + return ret_val; + } + } + /* Configure mdi-mdix settings */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (phy->mdix) { + case 1: + data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data); + if (ret_val) + return ret_val; + + /* set auto-master slave resolution settings */ + if (hw->mac.autoneg) { + /* when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + + /* Set auto Master/Slave resolution process */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~CR_1000T_MS_ENABLE; + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_set_master_slave_mode(hw); + } + + return ret_val; +} + +/** + * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + **/ +static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg = 0; + + DEBUGFUNC("e1000_phy_setup_autoneg"); + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, + &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | + NWAY_AR_100TX_HD_CAPS | + NWAY_AR_10T_FD_CAPS | + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + + DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + DEBUGOUT("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + DEBUGOUT("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + DEBUGOUT("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + DEBUGOUT("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) + DEBUGOUT("Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + DEBUGOUT("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* Flow control (Rx & Tx) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_rx_pause: + /* Rx Flow control is enabled, and Tx Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are + * capable of Rx Pause ONLY, we will advertise that we + * support both symmetric and asymmetric Rx PAUSE. Later + * (in e1000_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case e1000_fc_full: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + + return ret_val; +} + +/** + * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + **/ +static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_ctrl; + + DEBUGFUNC("e1000_copper_link_autoneg"); + + /* Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (!phy->autoneg_advertised) + phy->autoneg_advertised = phy->autoneg_mask; + + DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) { + DEBUGOUT("Error Setting up Auto-Negotiation\n"); + return ret_val; + } + DEBUGOUT("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + return ret_val; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + return ret_val; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = e1000_wait_autoneg(hw); + if (ret_val) { + DEBUGOUT("Error while waiting for autoneg to complete\n"); + return ret_val; + } + } + + hw->mac.get_link_status = true; + + return ret_val; +} + +/** + * e1000_setup_copper_link_generic - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). + **/ +s32 e1000_setup_copper_link_generic(struct e1000_hw *hw) +{ + s32 ret_val; + bool link; + + DEBUGFUNC("e1000_setup_copper_link_generic"); + + if (hw->mac.autoneg) { + /* Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + return ret_val; + } else { + /* PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + DEBUGOUT("Forcing Speed and Duplex\n"); + ret_val = hw->phy.ops.force_speed_duplex(hw); + if (ret_val) { + DEBUGOUT("Error Forcing Speed and Duplex\n"); + return ret_val; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = e1000_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10, + &link); + if (ret_val) + return ret_val; + + if (link) { + DEBUGOUT("Valid link established!!!\n"); + hw->mac.ops.config_collision_dist(hw); + ret_val = e1000_config_fc_after_link_up_generic(hw); + } else { + DEBUGOUT("Unable to establish link!!!\n"); + } + + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("e1000_phy_force_speed_duplex_igp"); + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + e1000_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + return ret_val; + + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + + DEBUGOUT1("IGP PSCR: %X\n", phy_data); + + usec_delay(1); + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on IGP phy.\n"); + + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) + DEBUGOUT("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + } + + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Resets the PHY to commit the + * changes. If time expires while waiting for link up, we reset the DSP. + * After reset, TX_CLK and CRS on Tx must be set. Return successful upon + * successful completion, else return corresponding error code. + **/ +s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("e1000_phy_force_speed_duplex_m88"); + + /* I210 and I211 devices support Auto-Crossover in forced operation. */ + if (phy->type != e1000_phy_i210) { + /* Clear Auto-Crossover to force MDI manually. M88E1000 + * requires MDI forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } + + DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data); + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + e1000_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + return ret_val; + + /* Reset the phy to commit changes. */ + ret_val = hw->phy.ops.commit(hw); + if (ret_val) + return ret_val; + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on M88 phy.\n"); + + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) { + bool reset_dsp = true; + + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1340M_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I210_I_PHY_ID: + reset_dsp = false; + break; + default: + if (hw->phy.type != e1000_phy_m88) + reset_dsp = false; + break; + } + + if (!reset_dsp) { + DEBUGOUT("Link taking longer than expected.\n"); + } else { + /* We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = phy->ops.write_reg(hw, + M88E1000_PHY_PAGE_SELECT, + 0x001d); + if (ret_val) + return ret_val; + ret_val = e1000_phy_reset_dsp_generic(hw); + if (ret_val) + return ret_val; + } + } + + /* Try once more */ + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + if (hw->phy.type != e1000_phy_m88) + return E1000_SUCCESS; + + if (hw->phy.id == I347AT4_E_PHY_ID || + hw->phy.id == M88E1340M_E_PHY_ID || + hw->phy.id == M88E1112_E_PHY_ID) + return E1000_SUCCESS; + if (hw->phy.id == I210_I_PHY_ID) + return E1000_SUCCESS; + if ((hw->phy.id == M88E1543_E_PHY_ID) || + (hw->phy.id == M88E1512_E_PHY_ID)) + return E1000_SUCCESS; + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Resetting the phy means we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock from + * the reset value of 2.5MHz. + */ + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex + * @hw: pointer to the HW structure + * + * Forces the speed and duplex settings of the PHY. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("e1000_phy_force_speed_duplex_ife"); + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &data); + if (ret_val) + return ret_val; + + e1000_phy_force_speed_duplex_setup(hw, &data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, data); + if (ret_val) + return ret_val; + + /* Disable MDI-X support for 10/100 */ + ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + return ret_val; + + data &= ~IFE_PMC_AUTO_MDIX; + data &= ~IFE_PMC_FORCE_MDIX; + + ret_val = phy->ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, data); + if (ret_val) + return ret_val; + + DEBUGOUT1("IFE PMC: %X\n", data); + + usec_delay(1); + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n"); + + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) + DEBUGOUT("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex + * @hw: pointer to the HW structure + * @phy_ctrl: pointer to current value of PHY_CONTROL + * + * Forces speed and duplex on the PHY by doing the following: disable flow + * control, force speed/duplex on the MAC, disable auto speed detection, + * disable auto-negotiation, configure duplex, configure speed, configure + * the collision distance, write configuration to CTRL register. The + * caller must write to the PHY_CONTROL register for these settings to + * take affect. + **/ +void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl; + + DEBUGFUNC("e1000_phy_force_speed_duplex_setup"); + + /* Turn off flow control when forcing speed/duplex */ + hw->fc.current_mode = e1000_fc_none; + + /* Force speed/duplex on the mac */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~E1000_CTRL_SPD_SEL; + + /* Disable Auto Speed Detection */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Disable autoneg on the phy */ + *phy_ctrl &= ~MII_CR_AUTO_NEG_EN; + + /* Forcing Full or Half Duplex? */ + if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { + ctrl &= ~E1000_CTRL_FD; + *phy_ctrl &= ~MII_CR_FULL_DUPLEX; + DEBUGOUT("Half Duplex\n"); + } else { + ctrl |= E1000_CTRL_FD; + *phy_ctrl |= MII_CR_FULL_DUPLEX; + DEBUGOUT("Full Duplex\n"); + } + + /* Forcing 10mb or 100mb? */ + if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { + ctrl |= E1000_CTRL_SPD_100; + *phy_ctrl |= MII_CR_SPEED_100; + *phy_ctrl &= ~MII_CR_SPEED_1000; + DEBUGOUT("Forcing 100mb\n"); + } else { + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + DEBUGOUT("Forcing 10mb\n"); + } + + hw->mac.ops.config_collision_dist(hw); + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); +} + +/** + * e1000_set_d3_lplu_state_generic - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_set_d3_lplu_state_generic"); + + if (!hw->phy.ops.read_reg) + return E1000_SUCCESS; + + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + return ret_val; + + if (!active) { + data &= ~IGP02E1000_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + return ret_val; + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= IGP02E1000_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + return ret_val; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + } + + return ret_val; +} + +/** + * e1000_check_downshift_generic - Checks whether a downshift in speed occurred + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns 1 + * + * A downshift is detected by querying the PHY link health. + **/ +s32 e1000_check_downshift_generic(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + DEBUGFUNC("e1000_check_downshift_generic"); + + switch (phy->type) { + case e1000_phy_i210: + case e1000_phy_m88: + case e1000_phy_gg82563: + offset = M88E1000_PHY_SPEC_STATUS; + mask = M88E1000_PSSR_DOWNSHIFT; + break; + case e1000_phy_igp_2: + case e1000_phy_igp_3: + offset = IGP01E1000_PHY_LINK_HEALTH; + mask = IGP01E1000_PLHR_SS_DOWNGRADE; + break; + default: + /* speed downshift not supported */ + phy->speed_downgraded = false; + return E1000_SUCCESS; + } + + ret_val = phy->ops.read_reg(hw, offset, &phy_data); + + if (!ret_val) + phy->speed_downgraded = !!(phy_data & mask); + + return ret_val; +} + +/** + * e1000_check_polarity_m88 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +s32 e1000_check_polarity_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_check_polarity_m88"); + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data); + + if (!ret_val) + phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + + return ret_val; +} + +/** + * e1000_check_polarity_igp - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY port status register, and the + * current speed (since there is no polarity at 100Mbps). + **/ +s32 e1000_check_polarity_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data, offset, mask; + + DEBUGFUNC("e1000_check_polarity_igp"); + + /* Polarity is determined based on the speed of + * our connection. + */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + return ret_val; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + offset = IGP01E1000_PHY_PCS_INIT_REG; + mask = IGP01E1000_PHY_POLARITY_MASK; + } else { + /* This really only applies to 10Mbps since + * there is no polarity for 100Mbps (always 0). + */ + offset = IGP01E1000_PHY_PORT_STATUS; + mask = IGP01E1000_PSSR_POLARITY_REVERSED; + } + + ret_val = phy->ops.read_reg(hw, offset, &data); + + if (!ret_val) + phy->cable_polarity = ((data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + + return ret_val; +} + +/** + * e1000_check_polarity_ife - Check cable polarity for IFE PHY + * @hw: pointer to the HW structure + * + * Polarity is determined on the polarity reversal feature being enabled. + **/ +s32 e1000_check_polarity_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + DEBUGFUNC("e1000_check_polarity_ife"); + + /* Polarity is determined based on the reversal feature being enabled. + */ + if (phy->polarity_correction) { + offset = IFE_PHY_EXTENDED_STATUS_CONTROL; + mask = IFE_PESC_POLARITY_REVERSED; + } else { + offset = IFE_PHY_SPECIAL_CONTROL; + mask = IFE_PSC_FORCE_POLARITY; + } + + ret_val = phy->ops.read_reg(hw, offset, &phy_data); + + if (!ret_val) + phy->cable_polarity = ((phy_data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + + return ret_val; +} + +/** + * e1000_wait_autoneg - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + **/ +static s32 e1000_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 i, phy_status; + + DEBUGFUNC("e1000_wait_autoneg"); + + if (!hw->phy.ops.read_reg) + return E1000_SUCCESS; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msec_delay(100); + } + + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * e1000_phy_has_link_generic - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + **/ +s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + s32 ret_val = E1000_SUCCESS; + u16 i, phy_status; + + DEBUGFUNC("e1000_phy_has_link_generic"); + + if (!hw->phy.ops.read_reg) + return E1000_SUCCESS; + + for (i = 0; i < iterations; i++) { + /* Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) { + /* If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + if (usec_interval >= 1000) + msec_delay(usec_interval/1000); + else + usec_delay(usec_interval); + } + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) + msec_delay(usec_interval/1000); + else + usec_delay(usec_interval); + } + + *success = (i < iterations); + + return ret_val; +} + +/** + * e1000_get_cable_length_m88 - Determine cable length for m88 PHY + * @hw: pointer to the HW structure + * + * Reads the PHY specific status register to retrieve the cable length + * information. The cable length is determined by averaging the minimum and + * maximum values to get the "average" cable length. The m88 PHY has four + * possible cable length values, which are: + * Register Value Cable Length + * 0 < 50 meters + * 1 50 - 80 meters + * 2 80 - 110 meters + * 3 110 - 140 meters + * 4 > 140 meters + **/ +s32 e1000_get_cable_length_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + + DEBUGFUNC("e1000_get_cable_length_m88"); + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT); + + if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) + return -E1000_ERR_PHY; + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + + return E1000_SUCCESS; +} + +s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, phy_data2, is_cm; + u16 index, default_page; + + DEBUGFUNC("e1000_get_cable_length_m88_gen2"); + + switch (hw->phy.id) { + case I210_I_PHY_ID: + /* Get cable length from PHY Cable Diagnostics Control Reg */ + ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) + + (I347AT4_PCDL + phy->addr), + &phy_data); + if (ret_val) + return ret_val; + + /* Check if the unit of cable length is meters or cm */ + ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) + + I347AT4_PCDC, &phy_data2); + if (ret_val) + return ret_val; + + is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); + + /* Populate the phy structure with cable length in meters */ + phy->min_cable_length = phy_data / (is_cm ? 100 : 1); + phy->max_cable_length = phy_data / (is_cm ? 100 : 1); + phy->cable_length = phy_data / (is_cm ? 100 : 1); + break; + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case M88E1340M_E_PHY_ID: + case I347AT4_E_PHY_ID: + /* Remember the original page select and set it to 7 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) + return ret_val; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07); + if (ret_val) + return ret_val; + + /* Get cable length from PHY Cable Diagnostics Control Reg */ + ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr), + &phy_data); + if (ret_val) + return ret_val; + + /* Check if the unit of cable length is meters or cm */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2); + if (ret_val) + return ret_val; + + is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); + + /* Populate the phy structure with cable length in meters */ + phy->min_cable_length = phy_data / (is_cm ? 100 : 1); + phy->max_cable_length = phy_data / (is_cm ? 100 : 1); + phy->cable_length = phy_data / (is_cm ? 100 : 1); + + /* Reset the page select to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) + return ret_val; + break; + + case M88E1112_E_PHY_ID: + /* Remember the original page select and set it to 5 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) + return ret_val; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE, + &phy_data); + if (ret_val) + return ret_val; + + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + + if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) + return -E1000_ERR_PHY; + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + + phy->max_cable_length) / 2; + + /* Reset the page select to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) + return ret_val; + + break; + default: + return -E1000_ERR_PHY; + } + + return ret_val; +} + +/** + * e1000_get_cable_length_igp_2 - Determine cable length for igp2 PHY + * @hw: pointer to the HW structure + * + * The automatic gain control (agc) normalizes the amplitude of the + * received signal, adjusting for the attenuation produced by the + * cable. By reading the AGC registers, which represent the + * combination of coarse and fine gain value, the value can be put + * into a lookup table to obtain the approximate cable length + * for each channel. + **/ +s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, i, agc_value = 0; + u16 cur_agc_index, max_agc_index = 0; + u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; + static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { + IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D + }; + + DEBUGFUNC("e1000_get_cable_length_igp_2"); + + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { + ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + return ret_val; + + /* Getting bits 15:9, which represent the combination of + * coarse and fine gain values. The result is a number + * that can be put into the lookup table to obtain the + * approximate cable length. + */ + cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & + IGP02E1000_AGC_LENGTH_MASK); + + /* Array index bound check. */ + if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || + (cur_agc_index == 0)) + return -E1000_ERR_PHY; + + /* Remove min & max AGC values from calculation. */ + if (e1000_igp_2_cable_length_table[min_agc_index] > + e1000_igp_2_cable_length_table[cur_agc_index]) + min_agc_index = cur_agc_index; + if (e1000_igp_2_cable_length_table[max_agc_index] < + e1000_igp_2_cable_length_table[cur_agc_index]) + max_agc_index = cur_agc_index; + + agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; + } + + agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + + e1000_igp_2_cable_length_table[max_agc_index]); + agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); + + /* Calculate cable length with the error range of +/- 10 meters. */ + phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ? + (agc_value - IGP02E1000_AGC_RANGE) : 0); + phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_info_m88 - Retrieve PHY information + * @hw: pointer to the HW structure + * + * Valid for only copper links. Read the PHY status register (sticky read) + * to verify that link is up. Read the PHY special control register to + * determine the polarity and 10base-T extended distance. Read the PHY + * special status register to determine MDI/MDIx and current speed. If + * speed is 1000, then determine cable length, local and remote receiver. + **/ +s32 e1000_get_phy_info_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("e1000_get_phy_info_m88"); + + if (phy->media_type != e1000_media_type_copper) { + DEBUGOUT("Phy info is only valid for copper media\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy->polarity_correction = !!(phy_data & + M88E1000_PSCR_POLARITY_REVERSAL); + + ret_val = e1000_check_polarity_m88(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX); + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + /* Set values to "undefined" */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + + return ret_val; +} + +/** + * e1000_get_phy_info_igp - Retrieve igp PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 e1000_get_phy_info_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("e1000_get_phy_info_igp"); + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + phy->polarity_correction = true; + + ret_val = e1000_check_polarity_igp(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX); + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + ret_val = phy->ops.get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) + return ret_val; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + + return ret_val; +} + +/** + * e1000_get_phy_info_ife - Retrieves various IFE PHY states + * @hw: pointer to the HW structure + * + * Populates "phy" structure with various feature states. + **/ +s32 e1000_get_phy_info_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("e1000_get_phy_info_ife"); + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = phy->ops.read_reg(hw, IFE_PHY_SPECIAL_CONTROL, &data); + if (ret_val) + return ret_val; + phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE); + + if (phy->polarity_correction) { + ret_val = e1000_check_polarity_ife(hw); + if (ret_val) + return ret_val; + } else { + /* Polarity is forced */ + phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + } + + ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS); + + /* The following parameters are undefined for 10/100 operation. */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + + return E1000_SUCCESS; +} + +/** + * e1000_phy_sw_reset_generic - PHY software reset + * @hw: pointer to the HW structure + * + * Does a software reset of the PHY by reading the PHY control register and + * setting/write the control register reset bit to the PHY. + **/ +s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_ctrl; + + DEBUGFUNC("e1000_phy_sw_reset_generic"); + + if (!hw->phy.ops.read_reg) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + return ret_val; + + phy_ctrl |= MII_CR_RESET; + ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + return ret_val; + + usec_delay(1); + + return ret_val; +} + +/** + * e1000_phy_hw_reset_generic - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + **/ +s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 ctrl; + + DEBUGFUNC("e1000_phy_hw_reset_generic"); + + if (phy->ops.check_reset_block) { + ret_val = phy->ops.check_reset_block(hw); + if (ret_val) + return E1000_SUCCESS; + } + + ret_val = phy->ops.acquire(hw); + if (ret_val) + return ret_val; + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PHY_RST); + E1000_WRITE_FLUSH(hw); + + usec_delay(phy->reset_delay_us); + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + E1000_WRITE_FLUSH(hw); + + usec_delay(150); + + phy->ops.release(hw); + + return phy->ops.get_cfg_done(hw); +} + +/** + * e1000_get_cfg_done_generic - Generic configuration done + * @hw: pointer to the HW structure + * + * Generic function to wait 10 milli-seconds for configuration to complete + * and return success. + **/ +s32 e1000_get_cfg_done_generic(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_get_cfg_done_generic"); + + msec_delay_irq(10); + + return E1000_SUCCESS; +} + +/** + * e1000_phy_init_script_igp3 - Inits the IGP3 PHY + * @hw: pointer to the HW structure + * + * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. + **/ +s32 e1000_phy_init_script_igp3(struct e1000_hw *hw) +{ + DEBUGOUT("Running IGP 3 PHY init script\n"); + + /* PHY init IGP 3 */ + /* Enable rise/fall, 10-mode work in class-A */ + hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018); + /* Remove all caps from Replica path filter */ + hw->phy.ops.write_reg(hw, 0x2F52, 0x0000); + /* Bias trimming for ADC, AFE and Driver (Default) */ + hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24); + /* Increase Hybrid poly bias */ + hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0); + /* Add 4% to Tx amplitude in Gig mode */ + hw->phy.ops.write_reg(hw, 0x2010, 0x10B0); + /* Disable trimming (TTT) */ + hw->phy.ops.write_reg(hw, 0x2011, 0x0000); + /* Poly DC correction to 94.6% + 2% for all channels */ + hw->phy.ops.write_reg(hw, 0x20DD, 0x249A); + /* ABS DC correction to 95.9% */ + hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3); + /* BG temp curve trim */ + hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE); + /* Increasing ADC OPAMP stage 1 currents to max */ + hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4); + /* Force 1000 ( required for enabling PHY regs configuration) */ + hw->phy.ops.write_reg(hw, 0x0000, 0x0140); + /* Set upd_freq to 6 */ + hw->phy.ops.write_reg(hw, 0x1F30, 0x1606); + /* Disable NPDFE */ + hw->phy.ops.write_reg(hw, 0x1F31, 0xB814); + /* Disable adaptive fixed FFE (Default) */ + hw->phy.ops.write_reg(hw, 0x1F35, 0x002A); + /* Enable FFE hysteresis */ + hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067); + /* Fixed FFE for short cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F54, 0x0065); + /* Fixed FFE for medium cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F55, 0x002A); + /* Fixed FFE for long cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F56, 0x002A); + /* Enable Adaptive Clip Threshold */ + hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0); + /* AHT reset limit to 1 */ + hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF); + /* Set AHT master delay to 127 msec */ + hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC); + /* Set scan bits for AHT */ + hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF); + /* Set AHT Preset bits */ + hw->phy.ops.write_reg(hw, 0x1F79, 0x0210); + /* Change integ_factor of channel A to 3 */ + hw->phy.ops.write_reg(hw, 0x1895, 0x0003); + /* Change prop_factor of channels BCD to 8 */ + hw->phy.ops.write_reg(hw, 0x1796, 0x0008); + /* Change cg_icount + enable integbp for channels BCD */ + hw->phy.ops.write_reg(hw, 0x1798, 0xD008); + /* Change cg_icount + enable integbp + change prop_factor_master + * to 8 for channel A + */ + hw->phy.ops.write_reg(hw, 0x1898, 0xD918); + /* Disable AHT in Slave mode on channel A */ + hw->phy.ops.write_reg(hw, 0x187A, 0x0800); + /* Enable LPLU and disable AN to 1000 in non-D0a states, + * Enable SPD+B2B + */ + hw->phy.ops.write_reg(hw, 0x0019, 0x008D); + /* Enable restart AN on an1000_dis change */ + hw->phy.ops.write_reg(hw, 0x001B, 0x2080); + /* Enable wh_fifo read clock in 10/100 modes */ + hw->phy.ops.write_reg(hw, 0x0014, 0x0045); + /* Restart AN, Speed selection is 1000 */ + hw->phy.ops.write_reg(hw, 0x0000, 0x1340); + + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_type_from_id - Get PHY type from id + * @phy_id: phy_id read from the phy + * + * Returns the phy type from the id. + **/ +enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id) +{ + enum e1000_phy_type phy_type = e1000_phy_unknown; + + switch (phy_id) { + case M88E1000_I_PHY_ID: + case M88E1000_E_PHY_ID: + case M88E1111_I_PHY_ID: + case M88E1011_I_PHY_ID: + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1340M_E_PHY_ID: + phy_type = e1000_phy_m88; + break; + case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ + phy_type = e1000_phy_igp_2; + break; + case GG82563_E_PHY_ID: + phy_type = e1000_phy_gg82563; + break; + case IGP03E1000_E_PHY_ID: + phy_type = e1000_phy_igp_3; + break; + case IFE_E_PHY_ID: + case IFE_PLUS_E_PHY_ID: + case IFE_C_E_PHY_ID: + phy_type = e1000_phy_ife; + break; + case I82580_I_PHY_ID: + phy_type = e1000_phy_82580; + break; + case I210_I_PHY_ID: + phy_type = e1000_phy_i210; + break; + default: + phy_type = e1000_phy_unknown; + break; + } + return phy_type; +} + +/** + * e1000_determine_phy_address - Determines PHY address. + * @hw: pointer to the HW structure + * + * This uses a trial and error method to loop through possible PHY + * addresses. It tests each by reading the PHY ID registers and + * checking for a match. + **/ +s32 e1000_determine_phy_address(struct e1000_hw *hw) +{ + u32 phy_addr = 0; + u32 i; + enum e1000_phy_type phy_type = e1000_phy_unknown; + + hw->phy.id = phy_type; + + for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) { + hw->phy.addr = phy_addr; + i = 0; + + do { + e1000_get_phy_id(hw); + phy_type = e1000_get_phy_type_from_id(hw->phy.id); + + /* If phy_type is valid, break - we found our + * PHY address + */ + if (phy_type != e1000_phy_unknown) + return E1000_SUCCESS; + + msec_delay(1); + i++; + } while (i < 10); + } + + return -E1000_ERR_PHY_TYPE; +} + +/** + * e1000_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, restore the link to previous + * settings. + **/ +void e1000_power_up_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); +} + +/** + * e1000_power_down_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, restore the link to previous + * settings. + **/ +void e1000_power_down_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); + msec_delay(1); +} + +/** + * e1000_check_polarity_82577 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +s32 e1000_check_polarity_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_check_polarity_82577"); + + ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); + + if (!ret_val) + phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. + **/ +s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("e1000_phy_force_speed_duplex_82577"); + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + e1000_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + return ret_val; + + usec_delay(1); + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on 82577 phy\n"); + + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) + DEBUGOUT("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + } + + return ret_val; +} + +/** + * e1000_get_phy_info_82577 - Retrieve I82577 PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 e1000_get_phy_info_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("e1000_get_phy_info_82577"); + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + phy->polarity_correction = true; + + ret_val = e1000_check_polarity_82577(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX); + + if ((data & I82577_PHY_STATUS2_SPEED_MASK) == + I82577_PHY_STATUS2_SPEED_1000MBPS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) + return ret_val; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + + return E1000_SUCCESS; +} + +/** + * e1000_get_cable_length_82577 - Determine cable length for 82577 PHY + * @hw: pointer to the HW structure + * + * Reads the diagnostic status register and verifies result is valid before + * placing it in the phy_cable_length field. + **/ +s32 e1000_get_cable_length_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, length; + + DEBUGFUNC("e1000_get_cable_length_82577"); + + ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data); + if (ret_val) + return ret_val; + + length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >> + I82577_DSTATUS_CABLE_LENGTH_SHIFT); + + if (length == E1000_CABLE_LENGTH_UNDEFINED) + return -E1000_ERR_PHY; + + phy->cable_length = length; + + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg_gs40g - Write GS40G PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u16 page = offset >> GS40G_PAGE_SHIFT; + + DEBUGFUNC("e1000_write_phy_reg_gs40g"); + + offset = offset & GS40G_OFFSET_MASK; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); + if (ret_val) + goto release; + ret_val = e1000_write_phy_reg_mdic(hw, offset, data); + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_read_phy_reg_gs40g - Read GS40G PHY register + * @hw: pointer to the HW structure + * @offset: lower half is register offset to read to + * upper half is page to use. + * @data: data to read at register offset + * + * Acquires semaphore, if necessary, then reads the data in the PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u16 page = offset >> GS40G_PAGE_SHIFT; + + DEBUGFUNC("e1000_read_phy_reg_gs40g"); + + offset = offset & GS40G_OFFSET_MASK; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); + if (ret_val) + goto release; + ret_val = e1000_read_phy_reg_mdic(hw, offset, data); + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_read_phy_reg_mphy - Read mPHY control register + * @hw: pointer to the HW structure + * @address: address to be read + * @data: pointer to the read data + * + * Reads the mPHY control register in the PHY at offset and stores the + * information read to data. + **/ +s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data) +{ + u32 mphy_ctrl = 0; + bool locked = false; + bool ready; + + DEBUGFUNC("e1000_read_phy_reg_mphy"); + + /* Check if mPHY is ready to read/write operations */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + + /* Check if mPHY access is disabled and enable it if so */ + mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); + if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) { + locked = true; + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + mphy_ctrl |= E1000_MPHY_ENA_ACCESS; + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); + } + + /* Set the address that we want to read */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + + /* We mask address, because we want to use only current lane */ + mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK & + ~E1000_MPHY_ADDRESS_FNC_OVERRIDE) | + (address & E1000_MPHY_ADDRESS_MASK); + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); + + /* Read data from the address */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + *data = E1000_READ_REG(hw, E1000_MPHY_DATA); + + /* Disable access to mPHY if it was originally disabled */ + if (locked){ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, + E1000_MPHY_DIS_ACCESS); + } + + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg_mphy - Write mPHY control register + * @hw: pointer to the HW structure + * @address: address to write to + * @data: data to write to register at offset + * @line_override: used when we want to use different line than default one + * + * Writes data to mPHY control register. + **/ +s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data, + bool line_override) +{ + u32 mphy_ctrl = 0; + bool locked = false; + bool ready; + + DEBUGFUNC("e1000_write_phy_reg_mphy"); + + /* Check if mPHY is ready to read/write operations */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + + /* Check if mPHY access is disabled and enable it if so */ + mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); + if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) { + locked = true; + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + mphy_ctrl |= E1000_MPHY_ENA_ACCESS; + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); + } + + /* Set the address that we want to read */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + + /* We mask address, because we want to use only current lane */ + if (line_override) + mphy_ctrl |= E1000_MPHY_ADDRESS_FNC_OVERRIDE; + else + mphy_ctrl &= ~E1000_MPHY_ADDRESS_FNC_OVERRIDE; + mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK) | + (address & E1000_MPHY_ADDRESS_MASK); + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); + + /* Read data from the address */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + E1000_WRITE_REG(hw, E1000_MPHY_DATA, data); + + /* Disable access to mPHY if it was originally disabled */ + if (locked) { + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, + E1000_MPHY_DIS_ACCESS); + } + + return E1000_SUCCESS; +} + +/** + * e1000_is_mphy_ready - Check if mPHY control register is not busy + * @hw: pointer to the HW structure + * + * Returns mPHY control register status. + **/ +bool e1000_is_mphy_ready(struct e1000_hw *hw) +{ + u16 retry_count = 0; + u32 mphy_ctrl = 0; + bool ready = false; + + while (retry_count < 2) { + mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); + if (mphy_ctrl & E1000_MPHY_BUSY) { + usec_delay(20); + retry_count++; + continue; + } + ready = true; + break; + } + + if (!ready) + DEBUGOUT("ERROR READING mPHY control register, phy is busy.\n"); + + return ready; +} diff --git a/drivers/staging/igb_avb/e1000_phy.h b/drivers/staging/igb_avb/e1000_phy.h new file mode 100644 index 0000000000000..a109c914ce8d2 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_phy.h @@ -0,0 +1,252 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_PHY_H_ +#define _E1000_PHY_H_ + +void e1000_init_phy_ops_generic(struct e1000_hw *hw); +s32 e1000_null_read_reg(struct e1000_hw *hw, u32 offset, u16 *data); +void e1000_null_phy_generic(struct e1000_hw *hw); +s32 e1000_null_lplu_state(struct e1000_hw *hw, bool active); +s32 e1000_null_write_reg(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_null_set_page(struct e1000_hw *hw, u16 data); +s32 e1000_read_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 e1000_write_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +s32 e1000_check_downshift_generic(struct e1000_hw *hw); +s32 e1000_check_polarity_m88(struct e1000_hw *hw); +s32 e1000_check_polarity_igp(struct e1000_hw *hw); +s32 e1000_check_polarity_ife(struct e1000_hw *hw); +s32 e1000_check_reset_block_generic(struct e1000_hw *hw); +s32 e1000_copper_link_setup_igp(struct e1000_hw *hw); +s32 e1000_copper_link_setup_m88(struct e1000_hw *hw); +s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw); +s32 e1000_get_cable_length_m88(struct e1000_hw *hw); +s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw); +s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw); +s32 e1000_get_cfg_done_generic(struct e1000_hw *hw); +s32 e1000_get_phy_id(struct e1000_hw *hw); +s32 e1000_get_phy_info_igp(struct e1000_hw *hw); +s32 e1000_get_phy_info_m88(struct e1000_hw *hw); +s32 e1000_get_phy_info_ife(struct e1000_hw *hw); +s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw); +void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); +s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw); +s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw); +s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page); +s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active); +s32 e1000_setup_copper_link_generic(struct e1000_hw *hw); +s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +s32 e1000_phy_init_script_igp3(struct e1000_hw *hw); +enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id); +s32 e1000_determine_phy_address(struct e1000_hw *hw); +s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg); +s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg); +void e1000_power_up_phy_copper(struct e1000_hw *hw); +void e1000_power_down_phy_copper(struct e1000_hw *hw); +s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data); +s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data); +s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); +s32 e1000_check_polarity_82577(struct e1000_hw *hw); +s32 e1000_get_phy_info_82577(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw); +s32 e1000_get_cable_length_82577(struct e1000_hw *hw); +s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data); +s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data, + bool line_override); +bool e1000_is_mphy_ready(struct e1000_hw *hw); + +#define E1000_MAX_PHY_ADDR 8 + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ +#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */ +#define IGP_PAGE_SHIFT 5 +#define PHY_REG_MASK 0x1F + +/* GS40G - I210 PHY defines */ +#define GS40G_PAGE_SELECT 0x16 +#define GS40G_PAGE_SHIFT 16 +#define GS40G_OFFSET_MASK 0xFFFF +#define GS40G_PAGE_2 0x20000 +#define GS40G_MAC_REG2 0x15 +#define GS40G_MAC_LB 0x4140 +#define GS40G_MAC_SPEED_1G 0X0006 +#define GS40G_COPPER_SPEC 0x0010 + +#define HV_INTC_FC_PAGE_START 768 +#define I82578_ADDR_REG 29 +#define I82577_ADDR_REG 16 +#define I82577_CFG_REG 22 +#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) +#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */ +#define I82577_CTRL_REG 23 + +/* 82577 specific PHY registers */ +#define I82577_PHY_CTRL_2 18 +#define I82577_PHY_LBK_CTRL 19 +#define I82577_PHY_STATUS_2 26 +#define I82577_PHY_DIAG_STATUS 31 + +/* I82577 PHY Status 2 */ +#define I82577_PHY_STATUS2_REV_POLARITY 0x0400 +#define I82577_PHY_STATUS2_MDIX 0x0800 +#define I82577_PHY_STATUS2_SPEED_MASK 0x0300 +#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200 + +/* I82577 PHY Control 2 */ +#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200 +#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400 +#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600 + +/* I82577 PHY Diagnostics Status */ +#define I82577_DSTATUS_CABLE_LENGTH 0x03FC +#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2 + +/* 82580 PHY Power Management */ +#define E1000_82580_PHY_POWER_MGMT 0xE14 +#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */ +#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */ +#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */ +#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */ + +#define E1000_MPHY_DIS_ACCESS 0x80000000 /* disable_access bit */ +#define E1000_MPHY_ENA_ACCESS 0x40000000 /* enable_access bit */ +#define E1000_MPHY_BUSY 0x00010000 /* busy bit */ +#define E1000_MPHY_ADDRESS_FNC_OVERRIDE 0x20000000 /* fnc_override bit */ +#define E1000_MPHY_ADDRESS_MASK 0x0000FFFF /* address mask */ + +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ + +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ + +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 + +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 + +#define IGP02E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F +#define IGP02E1000_AGC_RANGE 15 + +#define E1000_CABLE_LENGTH_UNDEFINED 0xFF + +#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000 +#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KMRNCTRLSTA_REN 0x00200000 +#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ +#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */ +#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */ +#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */ +#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */ + +/* IFE PHY Extended Status Control */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 + +/* IFE PHY Special Control */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 +#define IFE_PSC_FORCE_POLARITY 0x0020 + +/* IFE PHY Special Control and LED Control */ +#define IFE_PSCL_PROBE_MODE 0x0020 +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +/* IFE PHY MDIX Control */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */ +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto, 0=disable */ + +/* SFP modules ID memory locations */ +#define E1000_SFF_IDENTIFIER_OFFSET 0x00 +#define E1000_SFF_IDENTIFIER_SFF 0x02 +#define E1000_SFF_IDENTIFIER_SFP 0x03 + +#define E1000_SFF_ETH_FLAGS_OFFSET 0x06 +/* Flags for SFP modules compatible with ETH up to 1Gb */ +struct sfp_e1000_flags { + u8 e1000_base_sx:1; + u8 e1000_base_lx:1; + u8 e1000_base_cx:1; + u8 e1000_base_t:1; + u8 e100_base_lx:1; + u8 e100_base_fx:1; + u8 e10_base_bx10:1; + u8 e10_base_px:1; +}; + +/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ +#define E1000_SFF_VENDOR_OUI_TYCO 0x00407600 +#define E1000_SFF_VENDOR_OUI_FTL 0x00906500 +#define E1000_SFF_VENDOR_OUI_AVAGO 0x00176A00 +#define E1000_SFF_VENDOR_OUI_INTEL 0x001B2100 + +#endif diff --git a/drivers/staging/igb_avb/e1000_regs.h b/drivers/staging/igb_avb/e1000_regs.h new file mode 100644 index 0000000000000..caf1d04dee87c --- /dev/null +++ b/drivers/staging/igb_avb/e1000_regs.h @@ -0,0 +1,633 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_REGS_H_ +#define _E1000_REGS_H_ + +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_FLA 0x0001C /* Flash Access - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ +#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */ +#define E1000_REGISTER_SET_SIZE 0x20000 /* CSR Size */ +#define E1000_EEPROM_INIT_CTRL_WORD_2 0x0F /* EEPROM Init Ctrl Word 2 */ +#define E1000_EEPROM_PCIE_CTRL_WORD_2 0x28 /* EEPROM PCIe Ctrl Word 2 */ +#define E1000_BARCTRL 0x5BBC /* BAR ctrl reg */ +#define E1000_BARCTRL_FLSIZE 0x0700 /* BAR ctrl Flsize */ +#define E1000_BARCTRL_CSRSIZE 0x2000 /* BAR ctrl CSR size */ +#define E1000_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */ +#define E1000_MPHY_DATA 0x0E10 /* GBE MPHY Data */ +#define E1000_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */ +#define E1000_PPHY_CTRL 0x5b48 /* PCIe PHY Control */ +#define E1000_I350_BARCTRL 0x5BFC /* BAR ctrl reg */ +#define E1000_I350_DTXMXPKTSZ 0x355C /* Maximum sent packet size reg*/ +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ +#define E1000_RCTL 0x00100 /* Rx Control - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */ +#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */ +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) +#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */ +#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ +#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +#define E1000_TCTL 0x00400 /* Tx Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */ +#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_LEDMUX 0x08130 /* LED MUX Control */ +#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ +#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ +#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ +#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ +#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ +#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */ +#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */ +#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */ +#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */ +#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */ +#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */ +#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */ +#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */ +#define E1000_I2C_CLK_STRETCH_DIS 0x00008000 /* I2C- Dis Clk Stretching */ +#define E1000_WDSTP 0x01040 /* Watchdog Setup - RW */ +#define E1000_SWDSTS 0x01044 /* SW Device Status - RW */ +#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ +#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */ +#define E1000_VPDDIAG 0x01060 /* VPD Diagnostic - RO */ +#define E1000_ICR_V2 0x01500 /* Intr Cause - new location - RC */ +#define E1000_ICS_V2 0x01504 /* Intr Cause Set - new location - WO */ +#define E1000_IMS_V2 0x01508 /* Intr Mask Set/Read - new location - RW */ +#define E1000_IMC_V2 0x0150C /* Intr Mask Clear - new location - WO */ +#define E1000_IAM_V2 0x01510 /* Intr Ack Auto Mask - new location - RW */ +#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ +#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ +#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ +#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ +#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ +#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ +#define E1000_PBRTH 0x02458 /* PB Rx Arbitration Threshold - RW */ +#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ +/* Split and Replication Rx Control - RW */ +#define E1000_RDPUMB 0x025CC /* DMA Rx Descriptor uC Mailbox - RW */ +#define E1000_RDPUAD 0x025D0 /* DMA Rx Descriptor uC Addr Command - RW */ +#define E1000_RDPUWD 0x025D4 /* DMA Rx Descriptor uC Data Write - RW */ +#define E1000_RDPURD 0x025D8 /* DMA Rx Descriptor uC Data Read - RW */ +#define E1000_RDPUCTL 0x025DC /* DMA Rx Descriptor uC Control - RW */ +#define E1000_PBDIAG 0x02458 /* Packet Buffer Diagnostic - RW */ +#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ +#define E1000_IRPBS 0x02404 /* Same as RXPBS, renamed for newer Si - RW */ +#define E1000_PBRWAC 0x024E8 /* Rx packet buffer wrap around counter - RO */ +#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */ +#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */ +#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */ +#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */ +#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */ +#define E1000_I210_FLMNGCTL 0x12038 +#define E1000_I210_FLMNGDATA 0x1203C +#define E1000_I210_FLMNGCNT 0x12040 + +#define E1000_I210_FLSWCTL 0x12048 +#define E1000_I210_FLSWDATA 0x1204C +#define E1000_I210_FLSWCNT 0x12050 + +#define E1000_I210_FLA 0x1201C + +#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n)) +#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */ + +/* QAV Tx mode control register */ +#define E1000_I210_TQAVCTRL 0x3570 +#define E1000_DTXMXPKTSZ 0x0355C + +/* High credit registers where _n can be 0 or 1. */ +#define E1000_I210_TQAVHC(_n) (0x300C + 0x40 * (_n)) + +/* Queues fetch arbitration priority control register */ +#define E1000_I210_TQAVARBCTRL 0x3574 +/* Queues priority masks where _n and _p can be 0-3. */ +#define E1000_TQAVARBCTRL_QUEUE_PRI(_n, _p) ((_p) << (2 * (_n))) +/* QAV Tx mode control registers where _n can be 0 or 1. */ +#define E1000_I210_TQAVCC(_n) (0x3004 + 0x40 * (_n)) + +/* QAV Tx mode control register bitfields masks */ +#define E1000_TQAVCC_IDLE_SLOPE 0xFFFF /* Idle slope */ +#define E1000_TQAVCC_KEEP_CREDITS (1 << 30) /* Keep credits opt enable */ +#define E1000_TQAVCC_QUEUE_MODE (1 << 31) /* SP vs. SR Tx mode */ + +/* Good transmitted packets counter registers */ +#define E1000_PQGPTC(_n) (0x010014 + (0x100 * (_n))) + +/* Queues packet buffer size masks where _n can be 0-3 and _s 0-63 [kB] */ +#define E1000_I210_TXPBS_SIZE(_n, _s) ((_s) << (6 * (_n))) + +#define E1000_MMDAC 13 /* MMD Access Control */ +#define E1000_MMDAAD 14 /* MMD Access Address/Data */ + +/* Convenience macros + * + * Note: "_n" is the queue number of the register to be written to. + * + * Example usage: + * E1000_RDBAL_REG(current_rx_queue) + */ +#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ + (0x0C000 + ((_n) * 0x40))) +#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ + (0x0C004 + ((_n) * 0x40))) +#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ + (0x0C008 + ((_n) * 0x40))) +#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ + (0x0C00C + ((_n) * 0x40))) +#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ + (0x0C010 + ((_n) * 0x40))) +#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \ + (0x0C014 + ((_n) * 0x40))) +#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n) +#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ + (0x0C018 + ((_n) * 0x40))) +#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ + (0x0C028 + ((_n) * 0x40))) +#define E1000_RQDPC(_n) ((_n) < 4 ? (0x02830 + ((_n) * 0x100)) : \ + (0x0C030 + ((_n) * 0x40))) +#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ + (0x0E000 + ((_n) * 0x40))) +#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ + (0x0E004 + ((_n) * 0x40))) +#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ + (0x0E008 + ((_n) * 0x40))) +#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ + (0x0E010 + ((_n) * 0x40))) +#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \ + (0x0E014 + ((_n) * 0x40))) +#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n) +#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ + (0x0E018 + ((_n) * 0x40))) +#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ + (0x0E028 + ((_n) * 0x40))) +#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : \ + (0x0E038 + ((_n) * 0x40))) +#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : \ + (0x0E03C + ((_n) * 0x40))) +#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100)) +#define E1000_RSRPD 0x02C00 /* Rx Small Packet Detect - RW */ +#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ +#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ +#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) +#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x054E0 + ((_i - 16) * 8))) +#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x054E4 + ((_i - 16) * 8))) +#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8)) +#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8)) +#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) +#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) +#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) +#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) +#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) +#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) +#define E1000_PBSLAC 0x03100 /* Pkt Buffer Slave Access Control */ +#define E1000_PBSLAD(_n) (0x03110 + (0x4 * (_n))) /* Pkt Buffer DWORD */ +#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ +/* Same as TXPBS, renamed for newer Si - RW */ +#define E1000_ITPBS 0x03404 +#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ +#define E1000_TDPUMB 0x0357C /* DMA Tx Desc uC Mail Box - RW */ +#define E1000_TDPUAD 0x03580 /* DMA Tx Desc uC Addr Command - RW */ +#define E1000_TDPUWD 0x03584 /* DMA Tx Desc uC Data Write - RW */ +#define E1000_TDPURD 0x03588 /* DMA Tx Desc uC Data Read - RW */ +#define E1000_TDPUCTL 0x0358C /* DMA Tx Desc uC Control - RW */ +#define E1000_DTXCTL 0x03590 /* DMA Tx Control - RW */ +#define E1000_DTXTCPFLGL 0x0359C /* DMA Tx Control flag low - RW */ +#define E1000_DTXTCPFLGH 0x035A0 /* DMA Tx Control flag high - RW */ +/* DMA Tx Max Total Allow Size Reqs - RW */ +#define E1000_DTXMXSZRQ 0x03540 +#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */ +#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */ +#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */ +#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */ +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */ +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */ +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ + +/* Virtualization statistical counters */ +#define E1000_PFVFGPRC(_n) (0x010010 + (0x100 * (_n))) +#define E1000_PFVFGPTC(_n) (0x010014 + (0x100 * (_n))) +#define E1000_PFVFGORC(_n) (0x010018 + (0x100 * (_n))) +#define E1000_PFVFGOTC(_n) (0x010034 + (0x100 * (_n))) +#define E1000_PFVFMPRC(_n) (0x010038 + (0x100 * (_n))) +#define E1000_PFVFGPRLBC(_n) (0x010040 + (0x100 * (_n))) +#define E1000_PFVFGPTLBC(_n) (0x010044 + (0x100 * (_n))) +#define E1000_PFVFGORLBC(_n) (0x010048 + (0x100 * (_n))) +#define E1000_PFVFGOTLBC(_n) (0x010050 + (0x100 * (_n))) + +/* LinkSec */ +#define E1000_LSECTXUT 0x04300 /* Tx Untagged Pkt Cnt */ +#define E1000_LSECTXPKTE 0x04304 /* Encrypted Tx Pkts Cnt */ +#define E1000_LSECTXPKTP 0x04308 /* Protected Tx Pkt Cnt */ +#define E1000_LSECTXOCTE 0x0430C /* Encrypted Tx Octets Cnt */ +#define E1000_LSECTXOCTP 0x04310 /* Protected Tx Octets Cnt */ +#define E1000_LSECRXUT 0x04314 /* Untagged non-Strict Rx Pkt Cnt */ +#define E1000_LSECRXOCTD 0x0431C /* Rx Octets Decrypted Count */ +#define E1000_LSECRXOCTV 0x04320 /* Rx Octets Validated */ +#define E1000_LSECRXBAD 0x04324 /* Rx Bad Tag */ +#define E1000_LSECRXNOSCI 0x04328 /* Rx Packet No SCI Count */ +#define E1000_LSECRXUNSCI 0x0432C /* Rx Packet Unknown SCI Count */ +#define E1000_LSECRXUNCH 0x04330 /* Rx Unchecked Packets Count */ +#define E1000_LSECRXDELAY 0x04340 /* Rx Delayed Packet Count */ +#define E1000_LSECRXLATE 0x04350 /* Rx Late Packets Count */ +#define E1000_LSECRXOK(_n) (0x04360 + (0x04 * (_n))) /* Rx Pkt OK Cnt */ +#define E1000_LSECRXINV(_n) (0x04380 + (0x04 * (_n))) /* Rx Invalid Cnt */ +#define E1000_LSECRXNV(_n) (0x043A0 + (0x04 * (_n))) /* Rx Not Valid Cnt */ +#define E1000_LSECRXUNSA 0x043C0 /* Rx Unused SA Count */ +#define E1000_LSECRXNUSA 0x043D0 /* Rx Not Using SA Count */ +#define E1000_LSECTXCAP 0x0B000 /* Tx Capabilities Register - RO */ +#define E1000_LSECRXCAP 0x0B300 /* Rx Capabilities Register - RO */ +#define E1000_LSECTXCTRL 0x0B004 /* Tx Control - RW */ +#define E1000_LSECRXCTRL 0x0B304 /* Rx Control - RW */ +#define E1000_LSECTXSCL 0x0B008 /* Tx SCI Low - RW */ +#define E1000_LSECTXSCH 0x0B00C /* Tx SCI High - RW */ +#define E1000_LSECTXSA 0x0B010 /* Tx SA0 - RW */ +#define E1000_LSECTXPN0 0x0B018 /* Tx SA PN 0 - RW */ +#define E1000_LSECTXPN1 0x0B01C /* Tx SA PN 1 - RW */ +#define E1000_LSECRXSCL 0x0B3D0 /* Rx SCI Low - RW */ +#define E1000_LSECRXSCH 0x0B3E0 /* Rx SCI High - RW */ +/* LinkSec Tx 128-bit Key 0 - WO */ +#define E1000_LSECTXKEY0(_n) (0x0B020 + (0x04 * (_n))) +/* LinkSec Tx 128-bit Key 1 - WO */ +#define E1000_LSECTXKEY1(_n) (0x0B030 + (0x04 * (_n))) +#define E1000_LSECRXSA(_n) (0x0B310 + (0x04 * (_n))) /* Rx SAs - RW */ +#define E1000_LSECRXPN(_n) (0x0B330 + (0x04 * (_n))) /* Rx SAs - RW */ +/* LinkSec Rx Keys - where _n is the SA no. and _m the 4 dwords of the 128 bit + * key - RW. + */ +#define E1000_LSECRXKEY(_n, _m) (0x0B350 + (0x10 * (_n)) + (0x04 * (_m))) + +#define E1000_SSVPC 0x041A0 /* Switch Security Violation Pkt Cnt */ +#define E1000_IPSCTRL 0xB430 /* IpSec Control Register */ +#define E1000_IPSRXCMD 0x0B408 /* IPSec Rx Command Register - RW */ +#define E1000_IPSRXIDX 0x0B400 /* IPSec Rx Index - RW */ +/* IPSec Rx IPv4/v6 Address - RW */ +#define E1000_IPSRXIPADDR(_n) (0x0B420 + (0x04 * (_n))) +/* IPSec Rx 128-bit Key - RW */ +#define E1000_IPSRXKEY(_n) (0x0B410 + (0x04 * (_n))) +#define E1000_IPSRXSALT 0x0B404 /* IPSec Rx Salt - RW */ +#define E1000_IPSRXSPI 0x0B40C /* IPSec Rx SPI - RW */ +/* IPSec Tx 128-bit Key - RW */ +#define E1000_IPSTXKEY(_n) (0x0B460 + (0x04 * (_n))) +#define E1000_IPSTXSALT 0x0B454 /* IPSec Tx Salt - RW */ +#define E1000_IPSTXIDX 0x0B450 /* IPSec Tx SA IDX - RW */ +#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */ +#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ +#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ +#define E1000_CBTMPC 0x0402C /* Circuit Breaker Tx Packet Count */ +#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ +#define E1000_CBRDPC 0x04044 /* Circuit Breaker Rx Dropped Count */ +#define E1000_CBRMPC 0x040FC /* Circuit Breaker Rx Packet Count */ +#define E1000_RPTHC 0x04104 /* Rx Packets To Host */ +#define E1000_HGPTC 0x04118 /* Host Good Packets Tx Count */ +#define E1000_HTCBDPC 0x04124 /* Host Tx Circuit Breaker Dropped Count */ +#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */ +#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */ +#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ +#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ +#define E1000_LENERRS 0x04138 /* Length Errors Count */ +#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ +#define E1000_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */ +#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ +#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ +#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */ +#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Pg - RW */ +#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */ +#define E1000_RLPML 0x05004 /* Rx Long Packet Max Length */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */ +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ +#define E1000_CIAA 0x05B88 /* Config Indirect Access Address - RW */ +#define E1000_CIAD 0x05B8C /* Config Indirect Access Data - RW */ +#define E1000_VFQA0 0x0B000 /* VLAN Filter Queue Array 0 - RW Array */ +#define E1000_VFQA1 0x0B200 /* VLAN Filter Queue Array 1 - RW Array */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ +#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ +#define E1000_PBACL 0x05B68 /* MSIx PBA Clear - Read/Write 1's to clear */ +#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ +#define E1000_HOST_IF 0x08800 /* Host Interface */ +#define E1000_HIBBA 0x8F40 /* Host Interface Buffer Base Address */ +/* Flexible Host Filter Table */ +#define E1000_FHFT(_n) (0x09000 + ((_n) * 0x100)) +/* Ext Flexible Host Filter Table */ +#define E1000_FHFT_EXT(_n) (0x09A00 + ((_n) * 0x100)) + +#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */ +#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ +/* Management Decision Filters */ +#define E1000_MDEF(_n) (0x05890 + (4 * (_n))) +#define E1000_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */ +#define E1000_CCMCTL 0x05B48 /* CCM Control Register */ +#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */ +#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */ +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */ +#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ +#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ +#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ +#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +/* Driver-only SW semaphore (not used by BOOT agents) */ +#define E1000_SWSM2 0x05B58 +#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */ +#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ +#define E1000_UFUSE 0x05B78 /* UFUSE - RO */ +#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ +#define E1000_HICR 0x08F00 /* Host Interface Control */ +#define E1000_FWSTS 0x08F0C /* FW Status */ + +/* RSS registers */ +#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ +#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/ +#define E1000_IMIRVP 0x05AC0 /* Immediate INT Rx VLAN Priority -RW */ +#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) /* MSI-X Alloc Reg -RW */ +#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */ +#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */ +#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ +#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ +/* VT Registers */ +#define E1000_SWPBS 0x03004 /* Switch Packet Buffer Size - RW */ +#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */ +#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */ +#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */ +#define E1000_VFRE 0x00C8C /* VF Receive Enables */ +#define E1000_VFTE 0x00C90 /* VF Transmit Enables */ +#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ +#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ +#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */ +#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ +#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ +#define E1000_IOVTCL 0x05BBC /* IOV Control Register */ +#define E1000_VMRCTL 0X05D80 /* Virtual Mirror Rule Control */ +#define E1000_VMRVLAN 0x05D90 /* Virtual Mirror Rule VLAN */ +#define E1000_VMRVM 0x05DA0 /* Virtual Mirror Rule VM */ +#define E1000_MDFB 0x03558 /* Malicious Driver free block */ +#define E1000_LVMMC 0x03548 /* Last VM Misbehavior cause */ +#define E1000_TXSWC 0x05ACC /* Tx Switch Control */ +#define E1000_SCCRL 0x05DB0 /* Storm Control Control */ +#define E1000_BSCTRH 0x05DB8 /* Broadcast Storm Control Threshold */ +#define E1000_MSCTRH 0x05DBC /* Multicast Storm Control Threshold */ +/* These act per VF so an array friendly macro is used */ +#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) +#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) +#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) +#define E1000_VFVMBMEM(_n) (0x00800 + (_n)) +#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) +/* VLAN Virtual Machine Filter - RW */ +#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) +#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) +#define E1000_DVMOLR(_n) (0x0C038 + (0x40 * (_n))) /* DMA VM offload */ +#define E1000_VTCTRL(_n) (0x10000 + (0x100 * (_n))) /* VT Control */ +#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ +#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ +#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ +#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ +#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ +#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ +#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ +#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ +#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ +#define E1000_TIMADJL 0x0B60C /* Time sync time adjustment offset Low - RW */ +#define E1000_TIMADJH 0x0B610 /* Time sync time adjustment offset High - RW */ +#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ +#define E1000_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */ +#define E1000_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */ +#define E1000_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */ +#define E1000_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */ +#define E1000_FREQOUT0 0x0B654 /* Frequency Out 0 Control Register - RW */ +#define E1000_FREQOUT1 0x0B658 /* Frequency Out 1 Control Register - RW */ +#define E1000_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */ +#define E1000_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */ +#define E1000_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */ +#define E1000_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */ +#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ +#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */ +#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */ + +/* Filtering Registers */ +#define E1000_SAQF(_n) (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */ +#define E1000_DAQF(_n) (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */ +#define E1000_SPQF(_n) (0x059C0 + (4 * (_n))) /* Source Port Queue Fltr */ +#define E1000_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */ +#define E1000_TTQF(_n) (0x059E0 + (4 * (_n))) /* 2-tuple Queue Fltr */ +#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */ +#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ + +#define E1000_RTTDCS 0x3600 /* Reedtown Tx Desc plane control and status */ +#define E1000_RTTPCS 0x3474 /* Reedtown Tx Packet Plane control and status */ +#define E1000_RTRPCS 0x2474 /* Rx packet plane control and status */ +#define E1000_RTRUP2TC 0x05AC4 /* Rx User Priority to Traffic Class */ +#define E1000_RTTUP2TC 0x0418 /* Transmit User Priority to Traffic Class */ +/* Tx Desc plane TC Rate-scheduler config */ +#define E1000_RTTDTCRC(_n) (0x3610 + ((_n) * 4)) +/* Tx Packet plane TC Rate-Scheduler Config */ +#define E1000_RTTPTCRC(_n) (0x3480 + ((_n) * 4)) +/* Rx Packet plane TC Rate-Scheduler Config */ +#define E1000_RTRPTCRC(_n) (0x2480 + ((_n) * 4)) +/* Tx Desc Plane TC Rate-Scheduler Status */ +#define E1000_RTTDTCRS(_n) (0x3630 + ((_n) * 4)) +/* Tx Desc Plane TC Rate-Scheduler MMW */ +#define E1000_RTTDTCRM(_n) (0x3650 + ((_n) * 4)) +/* Tx Packet plane TC Rate-Scheduler Status */ +#define E1000_RTTPTCRS(_n) (0x34A0 + ((_n) * 4)) +/* Tx Packet plane TC Rate-scheduler MMW */ +#define E1000_RTTPTCRM(_n) (0x34C0 + ((_n) * 4)) +/* Rx Packet plane TC Rate-Scheduler Status */ +#define E1000_RTRPTCRS(_n) (0x24A0 + ((_n) * 4)) +/* Rx Packet plane TC Rate-Scheduler MMW */ +#define E1000_RTRPTCRM(_n) (0x24C0 + ((_n) * 4)) +/* Tx Desc plane VM Rate-Scheduler MMW*/ +#define E1000_RTTDVMRM(_n) (0x3670 + ((_n) * 4)) +/* Tx BCN Rate-Scheduler MMW */ +#define E1000_RTTBCNRM(_n) (0x3690 + ((_n) * 4)) +#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select */ +#define E1000_RTTDVMRC 0x3608 /* Tx Desc Plane VM Rate-Scheduler Config */ +#define E1000_RTTDVMRS 0x360C /* Tx Desc Plane VM Rate-Scheduler Status */ +#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config */ +#define E1000_RTTBCNRS 0x36B4 /* Tx BCN Rate-Scheduler Status */ +#define E1000_RTTBCNCR 0xB200 /* Tx BCN Control Register */ +#define E1000_RTTBCNTG 0x35A4 /* Tx BCN Tagging */ +#define E1000_RTTBCNCP 0xB208 /* Tx BCN Congestion point */ +#define E1000_RTRBCNCR 0xB20C /* Rx BCN Control Register */ +#define E1000_RTTBCNRD 0x36B8 /* Tx BCN Rate Drift */ +#define E1000_PFCTOP 0x1080 /* Priority Flow Control Type and Opcode */ +#define E1000_RTTBCNIDX 0xB204 /* Tx BCN Congestion Point */ +#define E1000_RTTBCNACH 0x0B214 /* Tx BCN Control High */ +#define E1000_RTTBCNACL 0x0B210 /* Tx BCN Control Low */ + +/* DMA Coalescing registers */ +#define E1000_DMACR 0x02508 /* Control Register */ +#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ +#define E1000_DMCTLX 0x02514 /* Time to Lx Request */ +#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ +#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ +#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ +#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ + +/* PCIe Parity Status Register */ +#define E1000_PCIEERRSTS 0x05BA8 + +#define E1000_PROXYS 0x5F64 /* Proxying Status */ +#define E1000_PROXYFC 0x5F60 /* Proxying Filter Control */ +/* Thermal sensor configuration and status registers */ +#define E1000_THMJT 0x08100 /* Junction Temperature */ +#define E1000_THLOWTC 0x08104 /* Low Threshold Control */ +#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */ +#define E1000_THHIGHTC 0x0810C /* High Threshold Control */ +#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ + +/* Energy Efficient Ethernet "EEE" registers */ +#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ +#define E1000_LTRC 0x01A0 /* Latency Tolerance Reporting Control */ +#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/ +#define E1000_EEE_SU 0x0E34 /* EEE Setup */ +#define E1000_TLPIC 0x4148 /* EEE Tx LPI Count - TLPIC */ +#define E1000_RLPIC 0x414C /* EEE Rx LPI Count - RLPIC */ + +/* OS2BMC Registers */ +#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */ +#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */ +#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */ +#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */ + +#endif diff --git a/drivers/staging/igb_avb/igb.h b/drivers/staging/igb_avb/igb.h new file mode 100644 index 0000000000000..4482f74079558 --- /dev/null +++ b/drivers/staging/igb_avb/igb.h @@ -0,0 +1,939 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2016 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _IGB_H_ +#define _IGB_H_ + +#include + +#ifndef IGB_NO_LRO +#include +#endif + +#include +#include +#include + +#ifdef SIOCETHTOOL +#include +#endif + +struct igb_adapter; + +struct igb_user_page; + +struct igb_user_page { + struct igb_user_page *prev; + struct igb_user_page *next; + struct page *page; + dma_addr_t page_dma; +}; +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) +#define IGB_DCA +#endif +#ifdef IGB_DCA +#include +#endif + +#include "kcompat.h" + +#ifdef HAVE_SCTP +#include +#endif + +#include "e1000_api.h" +#include "e1000_82575.h" +#include "e1000_manage.h" +#include "e1000_mbx.h" + +#define IGB_ERR(args...) pr_err(KERN_ERR "igb: " args) + +#define PFX "igb: " +#define DPRINTK(nlevel, klevel, fmt, args...) \ + (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ + printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ + __func__ , ## args)) + +#ifdef HAVE_PTP_1588_CLOCK +#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#include +#else +#include +#endif /* HAVE_INCLUDE_TIMECOUNTER_H */ +#include +#include +#endif /* HAVE_PTP_1588_CLOCK */ + +#ifdef HAVE_I2C_SUPPORT +#include +#include +#endif /* HAVE_I2C_SUPPORT */ + +#include +typedef u64 cycle_t; + +/* Interrupt defines */ +#define IGB_START_ITR 648 /* ~6000 ints/sec */ +#define IGB_4K_ITR 980 +#define IGB_20K_ITR 196 +#define IGB_70K_ITR 56 + +/* Interrupt modes, as used by the IntMode paramter */ +#define IGB_INT_MODE_LEGACY 0 +#define IGB_INT_MODE_MSI 1 +#define IGB_INT_MODE_MSIX 2 + +/* TX/RX descriptor defines */ +#define IGB_DEFAULT_TXD 256 +#define IGB_DEFAULT_TX_WORK 128 +#define IGB_MIN_TXD 80 +#define IGB_MAX_TXD 4096 + +#define IGB_DEFAULT_RXD 256 +#define IGB_MIN_RXD 80 +#define IGB_MAX_RXD 4096 + +#define IGB_MIN_ITR_USECS 10 /* 100k irq/sec */ +#define IGB_MAX_ITR_USECS 8191 /* 120 irq/sec */ + +#define NON_Q_VECTORS 1 +#define MAX_Q_VECTORS 10 + +/* Transmit and receive queues */ +#define IGB_MAX_RX_QUEUES 16 +#define IGB_MAX_RX_QUEUES_82575 4 +#define IGB_MAX_RX_QUEUES_I211 2 +#define IGB_MAX_TX_QUEUES 16 + +#define IGB_MAX_VF_MC_ENTRIES 30 +#define IGB_MAX_VF_FUNCTIONS 8 +#define IGB_82576_VF_DEV_ID 0x10CA +#define IGB_I350_VF_DEV_ID 0x1520 +#define IGB_MAX_UTA_ENTRIES 128 +#define MAX_EMULATION_MAC_ADDRS 16 +#define OUI_LEN 3 +#define IGB_MAX_VMDQ_QUEUES 8 + +struct vf_data_storage { + unsigned char vf_mac_addresses[ETH_ALEN]; + u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; + u16 num_vf_mc_hashes; + u16 default_vf_vlan_id; + u16 vlans_enabled; + unsigned char em_mac_addresses[MAX_EMULATION_MAC_ADDRS * ETH_ALEN]; + u32 uta_table_copy[IGB_MAX_UTA_ENTRIES]; + u32 flags; + unsigned long last_nack; +#ifdef IFLA_VF_MAX + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; + u16 tx_rate; +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + bool spoofchk_enabled; +#endif +#endif +}; + +#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ +#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */ +#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */ +#define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */ + +/* RX descriptor control thresholds. + * PTHRESH - MAC will consider prefetch if it has fewer than this number of + * descriptors available in its onboard memory. + * Setting this to 0 disables RX descriptor prefetch. + * HTHRESH - MAC will only prefetch if there are at least this many descriptors + * available in host memory. + * If PTHRESH is 0, this should also be 0. + * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back + * descriptors until either it has this many to write back, or the + * ITR timer expires. + */ +#define IGB_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8) +#define IGB_RX_HTHRESH 8 +#define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8) +#define IGB_TX_HTHRESH 1 +#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \ + adapter->msix_entries) ? 1 : 4) + +/* this is the size past which hardware will drop packets when setting LPE=0 */ +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + +/* NOTE: netdev_alloc_skb reserves 16 bytes, NET_IP_ALIGN means we + * reserve 2 more, and skb_shared_info adds an additional 384 more, + * this adds roughly 448 bytes of extra data meaning the smallest + * allocation we could have is 1K. + * i.e. RXBUFFER_512 --> size-1024 slab + */ +/* Supported Rx Buffer Sizes */ +#define IGB_RXBUFFER_256 256 +#define IGB_RXBUFFER_2048 2048 +#define IGB_RXBUFFER_16384 16384 +#define IGB_RX_HDR_LEN IGB_RXBUFFER_256 +#if MAX_SKB_FRAGS < 8 +#define IGB_RX_BUFSZ ALIGN(MAX_JUMBO_FRAME_SIZE / MAX_SKB_FRAGS, 1024) +#else +#define IGB_RX_BUFSZ IGB_RXBUFFER_2048 +#endif + + +/* Packet Buffer allocations */ +#define IGB_PBA_BYTES_SHIFT 0xA +#define IGB_TX_HEAD_ADDR_SHIFT 7 +#define IGB_PBA_TX_MASK 0xFFFF0000 + +#define IGB_FC_PAUSE_TIME 0x0680 /* 858 usec */ + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define IGB_EEPROM_APME 0x0400 +#define AUTO_ALL_MODES 0 + +#ifndef IGB_MASTER_SLAVE +/* Switch to override PHY master/slave setting */ +#define IGB_MASTER_SLAVE e1000_ms_hw_default +#endif + +#define IGB_MNG_VLAN_NONE -1 + +#ifndef IGB_NO_LRO +#define IGB_LRO_MAX 32 /*Maximum number of LRO descriptors*/ +struct igb_lro_stats { + u32 flushed; + u32 coal; +}; + +/* + * igb_lro_header - header format to be aggregated by LRO + * @iph: IP header without options + * @tcp: TCP header + * @ts: Optional TCP timestamp data in TCP options + * + * This structure relies on the check above that verifies that the header + * is IPv4 and does not contain any options. + */ +struct igb_lrohdr { + struct iphdr iph; + struct tcphdr th; + __be32 ts[0]; +}; + +struct igb_lro_list { + struct sk_buff_head active; + struct igb_lro_stats stats; +}; + +#endif /* IGB_NO_LRO */ +struct igb_cb { +#ifndef IGB_NO_LRO +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT + union { /* Union defining head/tail partner */ + struct sk_buff *head; + struct sk_buff *tail; + }; +#endif + __be32 tsecr; /* timestamp echo response */ + u32 tsval; /* timestamp value in host order */ + u32 next_seq; /* next expected sequence number */ + u16 free; /* 65521 minus total size */ + u16 mss; /* size of data portion of packet */ + u16 append_cnt; /* number of skb's appended */ +#endif /* IGB_NO_LRO */ +#ifdef HAVE_VLAN_RX_REGISTER + u16 vid; /* VLAN tag */ +#endif +}; +#define IGB_CB(skb) ((struct igb_cb *)(skb)->cb) + +enum igb_tx_flags { + /* cmd_type flags */ + IGB_TX_FLAGS_VLAN = 0x01, + IGB_TX_FLAGS_TSO = 0x02, + IGB_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + IGB_TX_FLAGS_IPV4 = 0x10, + IGB_TX_FLAGS_CSUM = 0x20, +}; + +/* VLAN info */ +#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGB_TX_FLAGS_VLAN_SHIFT 16 + +/* + * The largest size we can write to the descriptor is 65535. In order to + * maintain a power of two alignment we have to limit ourselves to 32K. + */ +#define IGB_MAX_TXD_PWR 15 +#define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD) +#ifndef MAX_SKB_FRAGS +#define DESC_NEEDED 4 +#elif (MAX_SKB_FRAGS < 16) +#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) +#else +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) +#endif + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer */ +struct igb_tx_buffer { + union e1000_adv_tx_desc *next_to_watch; + unsigned long time_stamp; + struct sk_buff *skb; + unsigned int bytecount; + u16 gso_segs; + __be16 protocol; + + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct igb_rx_buffer { + dma_addr_t dma; +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT + struct sk_buff *skb; +#else + struct page *page; + u32 page_offset; +#endif +}; + +struct igb_tx_queue_stats { + u64 packets; + u64 bytes; + u64 restart_queue; +}; + +struct igb_rx_queue_stats { + u64 packets; + u64 bytes; + u64 drops; + u64 csum_err; + u64 alloc_failed; +}; + +struct igb_rx_packet_stats { + u64 ipv4_packets; /* IPv4 headers processed */ + u64 ipv4e_packets; /* IPv4E headers with extensions processed */ + u64 ipv6_packets; /* IPv6 headers processed */ + u64 ipv6e_packets; /* IPv6E headers with extensions processed */ + u64 tcp_packets; /* TCP headers processed */ + u64 udp_packets; /* UDP headers processed */ + u64 sctp_packets; /* SCTP headers processed */ + u64 nfs_packets; /* NFS headers processe */ + u64 other_packets; +}; + +struct igb_ring_container { + struct igb_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +struct igb_ring { + struct igb_q_vector *q_vector; /* backlink to q_vector */ + struct net_device *netdev; /* back pointer to net_device */ + struct device *dev; /* device for dma mapping */ + union { /* array of buffer info structs */ + struct igb_tx_buffer *tx_buffer_info; + struct igb_rx_buffer *rx_buffer_info; + }; + void *desc; /* descriptor ring memory */ + unsigned long flags; /* ring specific flags */ + void __iomem *tail; /* pointer to ring tail register */ + dma_addr_t dma; /* phys address of the ring */ + unsigned int size; /* length of desc. ring in bytes */ + + u16 count; /* number of desc. in the ring */ + u8 queue_index; /* logical index of the ring*/ + u8 reg_idx; /* physical index of the ring */ + + /* everything past this point are written often */ + u16 next_to_clean; + u16 next_to_use; + u16 next_to_alloc; + + union { + /* TX */ + struct { + struct igb_tx_queue_stats tx_stats; + }; + /* RX */ + struct { + struct igb_rx_queue_stats rx_stats; + struct igb_rx_packet_stats pkt_stats; +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT + u16 rx_buffer_len; +#else + struct sk_buff *skb; +#endif + }; + }; +#ifdef CONFIG_IGB_VMDQ_NETDEV + struct net_device *vmdq_netdev; + int vqueue_index; /* queue index for virtual netdev */ +#endif +} ____cacheline_internodealigned_in_smp; + +struct igb_q_vector { + struct igb_adapter *adapter; /* backlink */ + int cpu; /* CPU for DCA */ + u32 eims_value; /* EIMS mask value */ + + u16 itr_val; + u8 set_itr; + void __iomem *itr_register; + + struct igb_ring_container rx, tx; + + struct napi_struct napi; +#ifndef IGB_NO_LRO + struct igb_lro_list lrolist; /* LRO list for queue vector*/ +#endif + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 9]; +#ifndef HAVE_NETDEV_NAPI_LIST + struct net_device poll_dev; +#endif + + /* for dynamic allocation of rings associated with this q_vector */ + struct igb_ring ring[0] ____cacheline_internodealigned_in_smp; +}; + +enum e1000_ring_flags_t { +#ifndef HAVE_NDO_SET_FEATURES + IGB_RING_FLAG_RX_CSUM, +#endif + IGB_RING_FLAG_RX_SCTP_CSUM, + IGB_RING_FLAG_RX_LB_VLAN_BSWAP, + IGB_RING_FLAG_TX_CTX_IDX, + IGB_RING_FLAG_TX_DETECT_HANG, +}; + +struct igb_mac_addr { + u8 addr[ETH_ALEN]; + u16 queue; + u16 state; /* bitmask */ +}; +#define IGB_MAC_STATE_DEFAULT 0x1 +#define IGB_MAC_STATE_MODIFIED 0x2 +#define IGB_MAC_STATE_IN_USE 0x4 + +#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) + +#define IGB_RX_DESC(R, i) \ + (&(((union e1000_adv_rx_desc *)((R)->desc))[i])) +#define IGB_TX_DESC(R, i) \ + (&(((union e1000_adv_tx_desc *)((R)->desc))[i])) +#define IGB_TX_CTXTDESC(R, i) \ + (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i])) + +#ifdef CONFIG_IGB_VMDQ_NETDEV +#define netdev_ring(ring) \ + ((ring->vmdq_netdev ? ring->vmdq_netdev : ring->netdev)) +#define ring_queue_index(ring) \ + ((ring->vmdq_netdev ? ring->vqueue_index : ring->queue_index)) +#else +#define netdev_ring(ring) (ring->netdev) +#define ring_queue_index(ring) (ring->queue_index) +#endif /* CONFIG_IGB_VMDQ_NETDEV */ + +/* igb_test_staterr - tests bits within Rx descriptor status and error fields */ +static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +/* igb_desc_unused - calculate if we have unused descriptors */ +static inline u16 igb_desc_unused(const struct igb_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +#ifdef CONFIG_BQL +static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring) +{ + return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); +} +#endif /* CONFIG_BQL */ + +struct igb_therm_proc_data { + struct e1000_hw *hw; + struct e1000_thermal_diode_data *sensor_data; +}; + +#ifdef IGB_HWMON +#define IGB_HWMON_TYPE_LOC 0 +#define IGB_HWMON_TYPE_TEMP 1 +#define IGB_HWMON_TYPE_CAUTION 2 +#define IGB_HWMON_TYPE_MAX 3 + +struct hwmon_attr { + struct device_attribute dev_attr; + struct e1000_hw *hw; + struct e1000_thermal_diode_data *sensor; + char name[12]; + }; + +struct hwmon_buff { + struct device *device; + struct hwmon_attr *hwmon_list; + unsigned int n_hwmon; + }; +#endif /* IGB_HWMON */ +#define IGB_N_EXTTS 2 +#define IGB_N_PEROUT 2 +#define IGB_N_SDP 4 +#ifdef ETHTOOL_GRXFHINDIR +#define IGB_RETA_SIZE 128 +#endif /* ETHTOOL_GRXFHINDIR */ + +/* board specific private data structure */ +struct igb_adapter { +#ifdef HAVE_VLAN_RX_REGISTER + /* vlgrp must be first member of structure */ + struct vlan_group *vlgrp; +#else + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; +#endif + struct net_device *netdev; + + unsigned long state; + unsigned int flags; + + unsigned int num_q_vectors; + struct msix_entry *msix_entries; + + + /* TX */ + u16 tx_work_limit; + u32 tx_timeout_count; + int num_tx_queues; + struct igb_ring *tx_ring[IGB_MAX_TX_QUEUES]; + + /* RX */ + int num_rx_queues; + struct igb_ring *rx_ring[IGB_MAX_RX_QUEUES]; + + struct timer_list watchdog_timer; + struct timer_list dma_err_timer; + struct timer_list phy_info_timer; + u16 mng_vlan_id; + u32 bd_number; + u32 wol; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + u8 port_num; + + u8 __iomem *io_addr; /* Mainly for iounmap use */ + + /* Interrupt Throttle Rate */ + u32 rx_itr_setting; + u32 tx_itr_setting; + + struct work_struct reset_task; + struct work_struct watchdog_task; + struct work_struct dma_err_task; + bool fc_autoneg; + u8 tx_timeout_factor; + +#ifdef DEBUG + bool tx_hang_detected; + bool disable_hw_reset; +#endif + u32 max_frame_size; + + /* OS defined structs */ + struct pci_dev *pdev; + /* user-dma specific variables */ + u32 uring_tx_init; + u32 uring_rx_init; +#ifndef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats net_stats; +#endif +#ifndef IGB_NO_LRO + struct igb_lro_stats lro_stats; +#endif + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + struct e1000_phy_stats phy_stats; + +#ifdef ETHTOOL_TEST + u32 test_icr; + struct igb_ring test_tx_ring; + struct igb_ring test_rx_ring; +#endif + + int msg_enable; + + struct igb_q_vector *q_vector[MAX_Q_VECTORS]; + u32 eims_enable_mask; + u32 eims_other; + + /* to not mess up cache alignment, always add to the bottom */ + u32 *config_space; + u16 tx_ring_count; + u16 rx_ring_count; + struct vf_data_storage *vf_data; +#ifdef IFLA_VF_MAX + int vf_rate_link_speed; +#endif + u32 lli_port; + u32 lli_size; + unsigned int vfs_allocated_count; + /* Malicious Driver Detection flag. Valid only when SR-IOV is enabled */ + bool mdd; + int int_mode; + u32 rss_queues; + u32 tss_queues; + u32 vmdq_pools; + char fw_version[32]; + u32 wvbr; + struct igb_mac_addr *mac_table; +#ifdef CONFIG_IGB_VMDQ_NETDEV + struct net_device *vmdq_netdev[IGB_MAX_VMDQ_QUEUES]; +#endif + int vferr_refcount; + int dmac; + u32 *shadow_vfta; + + /* External Thermal Sensor support flag */ + bool ets; +#ifdef IGB_HWMON + struct hwmon_buff igb_hwmon_buff; +#else /* IGB_HWMON */ +#ifdef IGB_PROCFS + struct proc_dir_entry *eth_dir; + struct proc_dir_entry *info_dir; + struct proc_dir_entry *therm_dir[E1000_MAX_SENSORS]; + struct igb_therm_proc_data therm_data[E1000_MAX_SENSORS]; + bool old_lsc; +#endif /* IGB_PROCFS */ +#endif /* IGB_HWMON */ + u32 etrack_id; + +#ifdef HAVE_PTP_1588_CLOCK + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct delayed_work ptp_overflow_work; + struct work_struct ptp_tx_work; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + unsigned long ptp_tx_start; + unsigned long last_rx_ptp_check; + unsigned long last_rx_timestamp; + spinlock_t tmreg_lock; + struct cyclecounter cc; + struct timecounter tc; + u32 tx_hwtstamp_timeouts; + u32 rx_hwtstamp_cleared; + +#ifdef HAVE_PTP_1588_CLOCK_PINS + struct ptp_pin_desc sdp_config[IGB_N_SDP]; +#endif /* HAVE_PTP_1588_CLOCK_PINS */ + struct { + struct timespec64 start; + struct timespec64 period; + } perout[IGB_N_PEROUT]; +#endif /* HAVE_PTP_1588_CLOCK */ + +#ifdef HAVE_I2C_SUPPORT + struct i2c_algo_bit_data i2c_algo; + struct i2c_adapter i2c_adap; + struct i2c_client *i2c_client; +#endif /* HAVE_I2C_SUPPORT */ + unsigned long link_check_timeout; + + int devrc; + + int copper_tries; + u16 eee_advert; +#ifdef ETHTOOL_GRXFHINDIR + u32 rss_indir_tbl_init; + u8 rss_indir_tbl[IGB_RETA_SIZE]; +#endif + struct mutex lock; +}; + +#ifdef CONFIG_IGB_VMDQ_NETDEV +struct igb_vmdq_adapter { +#ifdef HAVE_VLAN_RX_REGISTER + /* vlgrp must be first member of structure */ + struct vlan_group *vlgrp; +#else + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; +#endif + struct igb_adapter *real_adapter; + struct net_device *vnetdev; + struct net_device_stats net_stats; + struct igb_ring *tx_ring; + struct igb_ring *rx_ring; +}; +#endif + +#define IGB_FLAG_HAS_MSI (1 << 0) +#define IGB_FLAG_DCA_ENABLED (1 << 1) +#define IGB_FLAG_LLI_PUSH (1 << 2) +#define IGB_FLAG_QUAD_PORT_A (1 << 3) +#define IGB_FLAG_QUEUE_PAIRS (1 << 4) +#define IGB_FLAG_EEE (1 << 5) +#define IGB_FLAG_DMAC (1 << 6) +#define IGB_FLAG_DETECT_BAD_DMA (1 << 7) +#define IGB_FLAG_PTP (1 << 8) +#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 9) +#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 10) +#define IGB_FLAG_WOL_SUPPORTED (1 << 11) +#define IGB_FLAG_NEED_LINK_UPDATE (1 << 12) +#define IGB_FLAG_LOOPBACK_ENABLE (1 << 13) +#define IGB_FLAG_MEDIA_RESET (1 << 14) +#define IGB_FLAG_MAS_ENABLE (1 << 15) + +/* Media Auto Sense */ +#define IGB_MAS_ENABLE_0 0X0001 +#define IGB_MAS_ENABLE_1 0X0002 +#define IGB_MAS_ENABLE_2 0X0004 +#define IGB_MAS_ENABLE_3 0X0008 + +#define IGB_MIN_TXPBSIZE 20408 +#define IGB_TX_BUF_4096 4096 + +#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ + +/* DMA Coalescing defines */ +#define IGB_DMAC_DISABLE 0 +#define IGB_DMAC_MIN 250 +#define IGB_DMAC_500 500 +#define IGB_DMAC_EN_DEFAULT 1000 +#define IGB_DMAC_2000 2000 +#define IGB_DMAC_3000 3000 +#define IGB_DMAC_4000 4000 +#define IGB_DMAC_5000 5000 +#define IGB_DMAC_6000 6000 +#define IGB_DMAC_7000 7000 +#define IGB_DMAC_8000 8000 +#define IGB_DMAC_9000 9000 +#define IGB_DMAC_MAX 10000 + +#define IGB_82576_TSYNC_SHIFT 19 +#define IGB_82580_TSYNC_SHIFT 24 +#define IGB_TS_HDR_LEN 16 + +/* CEM Support */ +#define FW_HDR_LEN 0x4 +#define FW_CMD_DRV_INFO 0xDD +#define FW_CMD_DRV_INFO_LEN 0x5 +#define FW_CMD_RESERVED 0X0 +#define FW_RESP_SUCCESS 0x1 +#define FW_UNUSED_VER 0x0 +#define FW_MAX_RETRIES 3 +#define FW_STATUS_SUCCESS 0x1 +#define FW_FAMILY_DRV_VER 0Xffffffff + +#define IGB_MAX_LINK_TRIES 20 + +struct e1000_fw_hdr { + u8 cmd; + u8 buf_len; + union { + u8 cmd_resv; + u8 ret_status; + } cmd_or_resp; + u8 checksum; +}; + +#pragma pack(push, 1) +struct e1000_fw_drv_info { + struct e1000_fw_hdr hdr; + u8 port_num; + u32 drv_version; + u16 pad; /* end spacing to ensure length is mult. of dword */ + u8 pad2; /* end spacing to ensure length is mult. of dword2 */ +}; +#pragma pack(pop) + +enum e1000_state_t { + __IGB_TESTING, + __IGB_RESETTING, + __IGB_DOWN, + __IGB_PTP_TX_IN_PROGRESS, +}; + +extern char igb_driver_name[]; +extern char igb_driver_version[]; + +extern void igb_up(struct igb_adapter *); +extern void igb_down(struct igb_adapter *); +extern void igb_reinit_locked(struct igb_adapter *); +extern void igb_reset(struct igb_adapter *); +#ifdef ETHTOOL_SRXFHINDIR +extern void igb_write_rss_indir_tbl(struct igb_adapter *); +#endif +extern int igb_set_spd_dplx(struct igb_adapter *, u16); +extern int igb_setup_tx_resources(struct igb_ring *); +extern int igb_setup_rx_resources(struct igb_ring *); +extern void igb_free_tx_resources(struct igb_ring *); +extern void igb_free_rx_resources(struct igb_ring *); +extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); +extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); +extern void igb_setup_tctl(struct igb_adapter *); +extern void igb_setup_rctl(struct igb_adapter *); +extern netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *); +extern void igb_unmap_and_free_tx_resource(struct igb_ring *, + struct igb_tx_buffer *); +extern void igb_alloc_rx_buffers(struct igb_ring *, u16); +extern void igb_clean_rx_ring(struct igb_ring *); +extern int igb_setup_queues(struct igb_adapter *adapter); +extern void igb_update_stats(struct igb_adapter *); +extern bool igb_has_link(struct igb_adapter *adapter); +extern void igb_set_ethtool_ops(struct net_device *); +extern void igb_check_options(struct igb_adapter *); +extern void igb_power_up_link(struct igb_adapter *); +#ifdef HAVE_PTP_1588_CLOCK +extern void igb_ptp_init(struct igb_adapter *adapter); +extern void igb_ptp_stop(struct igb_adapter *adapter); +extern void igb_ptp_reset(struct igb_adapter *adapter); +extern void igb_ptp_tx_work(struct work_struct *work); +extern void igb_ptp_rx_hang(struct igb_adapter *adapter); +extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); +extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, + struct sk_buff *skb); +extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, + unsigned char *va, + struct sk_buff *skb); +extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, + struct ifreq *ifr, int cmd); +#endif /* HAVE_PTP_1588_CLOCK */ +#ifdef ETHTOOL_OPS_COMPAT +extern int ethtool_ioctl(struct ifreq *); +#endif +extern int igb_write_mc_addr_list(struct net_device *netdev); +extern int igb_add_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue); +extern int igb_del_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue); +extern int igb_available_rars(struct igb_adapter *adapter); +extern s32 igb_vlvf_set(struct igb_adapter *, u32, bool, u32); +extern void igb_configure_vt_default_pool(struct igb_adapter *adapter); +extern void igb_enable_vlan_tags(struct igb_adapter *adapter); +#ifndef HAVE_VLAN_RX_REGISTER +extern void igb_vlan_mode(struct net_device *, u32); +#endif + +#define E1000_PCS_CFG_IGN_SD 1 + +int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); +int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); +#ifdef IGB_HWMON +void igb_sysfs_exit(struct igb_adapter *adapter); +int igb_sysfs_init(struct igb_adapter *adapter); +#else +#ifdef IGB_PROCFS +int igb_procfs_init(struct igb_adapter *adapter); +void igb_procfs_exit(struct igb_adapter *adapter); +int igb_procfs_topdir_init(void); +void igb_procfs_topdir_exit(void); +#endif /* IGB_PROCFS */ +#endif /* IGB_HWMON */ + +#define IGB_BIND _IOW('E', 200, int) +#define IGB_UNBIND _IOW('E', 201, int) +#define IGB_MAPRING _IOW('E', 202, int) +#define IGB_MAP_TX_RING IGB_MAPRING +#define IGB_UNMAPRING _IOW('E', 203, int) +#define IGB_UNMAP_TX_RING IGB_UNMAPRING +#define IGB_MAPBUF _IOW('E', 204, int) +#define IGB_UNMAPBUF _IOW('E', 205, int) +#define IGB_LINKSPEED _IOW('E', 206, int) +#define IGB_MAP_RX_RING _IOW('E', 207, int) +#define IGB_UNMAP_RX_RING _IOW('E', 208, int) + +/*set of newly defined ioctl calls - new libigb compatibility + each of them is an equivalent of the old ioctl + changed numberiong convention: new_ioctl = old_ioctl + 100*/ + +#define IGB_IOCTL_MAPRING _IOW('E', 302, int) +#define IGB_IOCTL_MAP_TX_RING IGB_IOCTL_MAPRING +#define IGB_IOCTL_UNMAPRING _IOW('E', 303, int) +#define IGB_IOCTL_UNMAP_TX_RING IGB_IOCTL_UNMAPRING +#define IGB_IOCTL_MAPBUF _IOW('E', 304, int) +#define IGB_IOCTL_UNMAPBUF _IOW('E', 305, int) +#define IGB_IOCTL_MAP_RX_RING _IOW('E', 307, int) +#define IGB_IOCTL_UNMAP_RX_RING _IOW('E', 308, int) + + +/*END*/ + +#define IGB_BIND_NAMESZ 24 + +struct igb_bind_cmd { + char iface[IGB_BIND_NAMESZ]; + u32 mmap_size; +}; + +struct igb_pci_lookup { + struct igb_adapter *adapter; + char *pci_info; +}; + +/* used with both map/unmap ring & buf ioctls */ +struct igb_buf_cmd { + u64 physaddr; + u32 queue; + u32 mmap_size; + u64 pa; +}; + +struct igb_link_cmd { + u32 up; + u32 speed; + u32 duplex; +}; + +struct igb_private_data { + struct igb_adapter *adapter; + /* user-dma specific variable for buffer */ + struct igb_user_page *userpages; + /* user-dma specific variable for TX and RX */ + u32 uring_tx_init; + u32 uring_rx_init; +}; + +#endif /* _IGB_H_ */ diff --git a/drivers/staging/igb_avb/igb_avb.7 b/drivers/staging/igb_avb/igb_avb.7 new file mode 100755 index 0000000000000..d17b3de3c61e6 --- /dev/null +++ b/drivers/staging/igb_avb/igb_avb.7 @@ -0,0 +1,253 @@ +.\" LICENSE +.\" +.\" This software program is released under the terms of a license agreement between you ('Licensee') and Intel. Do not use or load this software or any associated materials (collectively, the 'Software') until you have carefully read the full terms and conditions of the LICENSE located in this software package. By loading or using the Software, you agree to the terms of this Agreement. If you do not agree with the terms of this Agreement, do not install or use the Software. +.\" +.\" * Other names and brands may be claimed as the property of others. +.\" +.TH igb 1 "January 5, 2012" + +.SH NAME +igb \-This file describes the Linux* Base Driver for the Gigabit Family of Adapters. +.SH SYNOPSIS +.PD 0.4v +modprobe igb [